Initial Contribution

msm-2.6.38: tag AU_LINUX_ANDROID_GINGERBREAD.02.03.04.00.142

Signed-off-by: Bryan Huntsman <bryanh@codeaurora.org>
diff --git a/drivers/usb/Kconfig b/drivers/usb/Kconfig
index 48f1781..afe0033 100644
--- a/drivers/usb/Kconfig
+++ b/drivers/usb/Kconfig
@@ -168,4 +168,6 @@
 
 source "drivers/usb/otg/Kconfig"
 
+source "drivers/usb/function/Kconfig"
+
 endif # USB_SUPPORT
diff --git a/drivers/usb/Makefile b/drivers/usb/Makefile
index 30ddf8d..ef69048 100644
--- a/drivers/usb/Makefile
+++ b/drivers/usb/Makefile
@@ -8,6 +8,7 @@
 
 obj-$(CONFIG_USB_MON)		+= mon/
 
+obj-$(CONFIG_USB_OTG_UTILS)	+= otg/
 obj-$(CONFIG_PCI)		+= host/
 obj-$(CONFIG_USB_EHCI_HCD)	+= host/
 obj-$(CONFIG_USB_ISP116X_HCD)	+= host/
@@ -23,6 +24,7 @@
 obj-$(CONFIG_USB_ISP1760_HCD)	+= host/
 obj-$(CONFIG_USB_IMX21_HCD)	+= host/
 obj-$(CONFIG_USB_FSL_MPH_DR_OF)	+= host/
+obj-$(CONFIG_USB_PEHCI_HCD)	+= host/
 
 obj-$(CONFIG_USB_C67X00_HCD)	+= c67x00/
 
@@ -49,5 +51,4 @@
 
 obj-$(CONFIG_USB_MUSB_HDRC)	+= musb/
 obj-$(CONFIG_USB_RENESAS_USBHS)	+= renesas_usbhs/
-obj-$(CONFIG_USB_OTG_UTILS)	+= otg/
 obj-$(CONFIG_USB_GADGET)	+= gadget/
diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
index 34e3da5..97c5690 100644
--- a/drivers/usb/core/driver.c
+++ b/drivers/usb/core/driver.c
@@ -1278,6 +1278,44 @@
 	return status;
 }
 
+#ifdef CONFIG_USB_OTG
+void usb_hnp_polling_work(struct work_struct *work)
+{
+	int ret;
+	struct usb_bus *bus =
+		container_of(work, struct usb_bus, hnp_polling.work);
+	struct usb_device *udev = bus->root_hub->children[bus->otg_port - 1];
+	u8 *status = kmalloc(sizeof(*status), GFP_KERNEL);
+
+	if (!status)
+		return;
+
+	ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
+		USB_REQ_GET_STATUS, USB_DIR_IN | USB_RECIP_DEVICE,
+		0, OTG_STATUS_SELECTOR, status, sizeof(*status),
+		USB_CTRL_GET_TIMEOUT);
+	if (ret < 0) {
+		/* Peripheral may not be supporting HNP polling */
+		dev_info(&udev->dev, "HNP polling failed. status %d\n", ret);
+		goto out;
+	}
+
+	/* Spec says host must suspend the bus with in 2 sec. */
+	if (*status & (1 << HOST_REQUEST_FLAG)) {
+		do_unbind_rebind(udev, DO_UNBIND);
+		udev->do_remote_wakeup = device_may_wakeup(&udev->dev);
+		ret = usb_suspend_both(udev, PMSG_USER_SUSPEND);
+		if (ret)
+			dev_info(&udev->dev, "suspend failed\n");
+	} else {
+		schedule_delayed_work(&bus->hnp_polling,
+			msecs_to_jiffies(THOST_REQ_POLL));
+	}
+out:
+	kfree(status);
+}
+#endif
+
 static void choose_wakeup(struct usb_device *udev, pm_message_t msg)
 {
 	int	w;
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index ace9f84..54338fc 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -872,6 +872,9 @@
 	bus->bandwidth_isoc_reqs = 0;
 
 	INIT_LIST_HEAD (&bus->bus_list);
+#ifdef CONFIG_USB_OTG
+	INIT_DELAYED_WORK(&bus->hnp_polling, usb_hnp_polling_work);
+#endif
 }
 
 /*-------------------------------------------------------------------------*/
@@ -901,6 +904,11 @@
 	/* Add it to the local list of buses */
 	list_add (&bus->bus_list, &usb_bus_list);
 	mutex_unlock(&usb_bus_list_lock);
+#ifdef CONFIG_USB_OTG
+	/* Obvioulsy HNP is supported on B-host */
+	if (bus->is_b_host)
+		bus->hnp_support = 1;
+#endif
 
 	usb_notify_add_bus(bus);
 
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index a428aa0..5442297 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -24,12 +24,38 @@
 #include <linux/kthread.h>
 #include <linux/mutex.h>
 #include <linux/freezer.h>
+#include <linux/usb/otg.h>
 
 #include <asm/uaccess.h>
 #include <asm/byteorder.h>
 
 #include "usb.h"
 
+#if defined(CONFIG_USB_PEHCI_HCD) || defined(CONFIG_USB_PEHCI_HCD_MODULE)
+#include <linux/usb/hcd.h>
+#include <linux/usb/ch11.h>
+
+int portno;
+int No_Data_Phase;
+EXPORT_SYMBOL(No_Data_Phase);
+int No_Status_Phase;
+EXPORT_SYMBOL(No_Status_Phase);
+unsigned char hub_tier;
+
+#define PDC_HOST_NOTIFY		0x8001	/*completion from core */
+#define UNSUPPORTED_DEVICE	0x8099
+#define UNWANTED_SUSPEND	0x8098
+#define PDC_POWERMANAGEMENT	0x8097
+
+int Unwanted_SecondReset;
+EXPORT_SYMBOL(Unwanted_SecondReset);
+int HostComplianceTest;
+EXPORT_SYMBOL(HostComplianceTest);
+int HostTest;
+EXPORT_SYMBOL(HostTest);
+#endif
+
+
 /* if we are in debug mode, always announce new devices */
 #ifdef DEBUG
 #ifndef CONFIG_USB_ANNOUNCE_NEW_DEVICES
@@ -356,8 +382,11 @@
 {
 	int i, status = -ETIMEDOUT;
 
+	/* ISP1763A HUB sometimes returns 2 bytes instead of 4 bytes, retry
+	 * if this happens
+	 */
 	for (i = 0; i < USB_STS_RETRIES &&
-			(status == -ETIMEDOUT || status == -EPIPE); i++) {
+			(status == -ETIMEDOUT || status == -EPIPE || status == 2); i++) {
 		status = usb_control_msg(hdev, usb_rcvctrlpipe(hdev, 0),
 			USB_REQ_GET_STATUS, USB_DIR_IN | USB_RT_PORT, 0, port1,
 			data, sizeof(*data), USB_STS_TIMEOUT);
@@ -724,6 +753,10 @@
 		 */
 		if (type == HUB_INIT) {
 			delay = hub_power_on(hub, false);
+#ifdef CONFIG_USB_OTG
+			if (hdev->bus->is_b_host)
+				goto init2;
+#endif
 			PREPARE_DELAYED_WORK(&hub->init_work, hub_init_func2);
 			schedule_delayed_work(&hub->init_work,
 					msecs_to_jiffies(delay));
@@ -858,6 +891,11 @@
 	 * will see them later and handle them normally.
 	 */
 	if (need_debounce_delay) {
+#ifdef CONFIG_USB_OTG
+		if (hdev->bus->is_b_host && type == HUB_INIT)
+			goto init3;
+#endif
+
 		delay = HUB_DEBOUNCE_STABLE;
 
 		/* Don't do a long sleep inside a workqueue routine */
@@ -1301,6 +1339,7 @@
 #ifdef	CONFIG_USB_OTG_BLACKLIST_HUB
 	if (hdev->parent) {
 		dev_warn(&intf->dev, "ignoring external hub\n");
+		otg_send_event(OTG_EVENT_HUB_NOT_SUPPORTED);
 		return -ENODEV;
 	}
 #endif
@@ -1649,6 +1688,13 @@
 	dev_info(&udev->dev, "USB disconnect, device number %d\n",
 			udev->devnum);
 
+#ifdef CONFIG_USB_OTG
+	if (udev->bus->hnp_support && udev->portnum == udev->bus->otg_port) {
+		cancel_delayed_work(&udev->bus->hnp_polling);
+		udev->bus->hnp_support = 0;
+	}
+#endif
+
 	usb_lock_device(udev);
 
 	/* Free up all the children before we remove this device */
@@ -1755,15 +1801,30 @@
 					(port1 == bus->otg_port)
 						? "" : "non-");
 
+				/* a_alt_hnp_support is obsoleted */
+				if (port1 != bus->otg_port)
+					goto out;
+
+				bus->hnp_support = 1;
+
+				/* a_hnp_support is not required for devices
+				 * compliant to revision 2.0 or subsequent
+				 * versions.
+				 */
+				if (le16_to_cpu(desc->bcdOTG) >= 0x0200)
+					goto out;
+
+				/* Legacy B-device i.e compliant to spec
+				 * revision 1.3 expect A-device to set
+				 * a_hnp_support or b_hnp_enable before
+				 * selecting configuration.
+				 */
+
 				/* enable HNP before suspend, it's simpler */
-				if (port1 == bus->otg_port)
-					bus->b_hnp_enable = 1;
 				err = usb_control_msg(udev,
 					usb_sndctrlpipe(udev, 0),
 					USB_REQ_SET_FEATURE, 0,
-					bus->b_hnp_enable
-						? USB_DEVICE_B_HNP_ENABLE
-						: USB_DEVICE_A_ALT_HNP_SUPPORT,
+					USB_DEVICE_A_HNP_SUPPORT,
 					0, NULL, 0, USB_CTRL_SET_TIMEOUT);
 				if (err < 0) {
 					/* OTG MESSAGE: report errors here,
@@ -1772,26 +1833,35 @@
 					dev_info(&udev->dev,
 						"can't set HNP mode: %d\n",
 						err);
-					bus->b_hnp_enable = 0;
+					bus->hnp_support = 0;
 				}
 			}
 		}
 	}
-
+out:
 	if (!is_targeted(udev)) {
 
+		otg_send_event(OTG_EVENT_DEV_NOT_SUPPORTED);
+
 		/* Maybe it can talk to us, though we can't talk to it.
 		 * (Includes HNP test device.)
 		 */
-		if (udev->bus->b_hnp_enable || udev->bus->is_b_host) {
+		if (udev->bus->hnp_support) {
 			err = usb_port_suspend(udev, PMSG_SUSPEND);
 			if (err < 0)
 				dev_dbg(&udev->dev, "HNP fail, %d\n", err);
 		}
 		err = -ENOTSUPP;
-		goto fail;
+	} else if (udev->bus->hnp_support &&
+		udev->portnum == udev->bus->otg_port) {
+		/* HNP polling is introduced in OTG supplement Rev 2.0
+		 * and older devices may not support. Work is not
+		 * re-armed if device returns STALL. B-Host also perform
+		 * HNP polling.
+		 */
+		schedule_delayed_work(&udev->bus->hnp_polling,
+			msecs_to_jiffies(THOST_REQ_POLL));
 	}
-fail:
 #endif
 	return err;
 }
@@ -2346,6 +2416,22 @@
 				return status;
 		}
 	}
+#ifdef CONFIG_USB_OTG
+	if (!udev->bus->is_b_host && udev->bus->hnp_support &&
+		udev->portnum == udev->bus->otg_port) {
+		status = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
+				USB_REQ_SET_FEATURE, 0,
+				USB_DEVICE_B_HNP_ENABLE,
+				0, NULL, 0, USB_CTRL_SET_TIMEOUT);
+		if (status < 0) {
+			otg_send_event(OTG_EVENT_NO_RESP_FOR_HNP_ENABLE);
+			dev_dbg(&udev->dev, "can't enable HNP on port %d, "
+					"status %d\n", port1, status);
+		} else {
+			udev->bus->b_hnp_enable = 1;
+		}
+	}
+#endif
 
 	/* see 7.1.7.6 */
 	if (hub_is_superspeed(hub->hdev))
@@ -2949,14 +3035,22 @@
 					buf->bMaxPacketSize0;
 			kfree(buf);
 
-			retval = hub_port_reset(hub, port1, udev, delay);
-			if (retval < 0)		/* error or disconnect */
-				goto fail;
-			if (oldspeed != udev->speed) {
-				dev_dbg(&udev->dev,
-					"device reset changed speed!\n");
-				retval = -ENODEV;
-				goto fail;
+			/*
+			 * If it is a HSET Test device, we don't issue a
+			 * second reset which results in failure due to
+			 * speed change.
+			 */
+			if (le16_to_cpu(buf->idVendor) != 0x1a0a) {
+				retval = hub_port_reset(hub, port1, udev,
+							 delay);
+				if (retval < 0)	/* error or disconnect */
+					goto fail;
+				if (oldspeed != udev->speed) {
+					dev_dbg(&udev->dev,
+					       "device reset changed speed!\n");
+					retval = -ENODEV;
+					goto fail;
+				}
 			}
 			if (r) {
 				dev_err(&udev->dev,
@@ -3199,6 +3293,9 @@
 			(portchange & USB_PORT_STAT_C_CONNECTION))
 		clear_bit(port1, hub->removed_bits);
 
+#if defined(CONFIG_USB_PEHCI_HCD) || defined(CONFIG_USB_PEHCI_HCD_MODULE)
+	if (Unwanted_SecondReset == 0)   /*stericsson*/
+#endif
 	if (portchange & (USB_PORT_STAT_C_CONNECTION |
 				USB_PORT_STAT_C_ENABLE)) {
 		status = hub_port_debounce(hub, port1);
@@ -3337,7 +3434,32 @@
 		status = hub_power_remaining(hub);
 		if (status)
 			dev_dbg(hub_dev, "%dmA power budget left\n", status);
+#if defined(CONFIG_USB_PEHCI_HCD) || defined(CONFIG_USB_PEHCI_HCD_MODULE)
+		if (HostComplianceTest == 1 && udev->devnum > 1) {
+			if (HostTest == 7) {	/*SINGLE_STEP_GET_DEV_DESC */
+				dev_info(hub_dev, "Testing "
+						"SINGLE_STEP_GET_DEV_DESC\n");
+				/* Test the Single Step Get Device Descriptor ,
+				 * take care it should not get status phase
+				 */
+				No_Data_Phase = 1;
+				No_Status_Phase = 1;
 
+				usb_get_device_descriptor(udev, 8);
+				No_Data_Phase = 0;
+				No_Status_Phase = 0;
+			}
+
+			if (HostTest == 8) {
+				dev_info(hub_dev, "Testing "
+						"SINGLE_STEP_SET_FEATURE\n");
+				/* Test Single Step Set Feature */
+				No_Status_Phase = 1;
+				usb_get_device_descriptor(udev, 8);
+				No_Status_Phase = 0;
+			}
+		}
+#endif
 		return;
 
 loop_disable:
@@ -3375,7 +3497,11 @@
 	u16 portchange;
 	int i, ret;
 	int connect_change;
-
+#if defined(CONFIG_USB_PEHCI_HCD) || defined(CONFIG_USB_PEHCI_HCD_MODULE)
+	int j;
+	int otgport = 0;
+	struct usb_port_status port_status;
+#endif
 	/*
 	 *  We restart the list every time to avoid a deadlock with
 	 * deleting hubs downstream from this one. This should be
@@ -3450,6 +3576,171 @@
 
 		/* deal with port status changes */
 		for (i = 1; i <= hub->descriptor->bNbrPorts; i++) {
+#if defined(CONFIG_USB_PEHCI_HCD) || defined(CONFIG_USB_PEHCI_HCD_MODULE)
+			struct usb_port_status portsts;
+
+			/*if we have something to do on
+			 * otg port
+			 * */
+			if ((hdev->otgstate & USB_OTG_SUSPEND) ||
+			    (hdev->otgstate & USB_OTG_ENUMERATE) ||
+			    (hdev->otgstate & USB_OTG_DISCONNECT) ||
+			    (hdev->otgstate & USB_OTG_RESUME)) {
+				otgport = 1;
+			}
+
+
+			if (hdev->otgstate & USB_OTG_RESUME) {
+				ret = clear_port_feature(hdev, i,
+							 USB_PORT_FEAT_SUSPEND);
+				if (ret < 0) {
+					dev_err(hub_dev, "usb otg port Resume"
+						" fails, %d\n", ret);
+				}
+				hdev->otgstate &= ~USB_OTG_RESUME;
+			}
+			if ((hdev->otgstate & USB_OTG_SUSPEND)
+			    && (hdev->children[0])) {
+				hdev->otgstate &= ~USB_OTG_SUSPEND;
+
+				ret = set_port_feature(hdev, 1,
+						       USB_PORT_FEAT_SUSPEND);
+				if (ret < 0) {
+					dev_err(hub_dev, "usb otg port suspend"
+						" fails, %d\n", ret);
+					break;
+				}
+				msleep(1);
+				ret = get_port_status(hdev, i, &portsts);
+				if (ret < 0) {
+					dev_err(hub_dev, "usb otg get port"
+						" status fails, %d\n", ret);
+					break;
+				}
+				portchange = le16_to_cpu(portsts.wPortChange);
+				if (portchange & USB_PORT_STAT_C_SUSPEND) {
+					clear_port_feature(hdev, i,
+						USB_PORT_FEAT_C_SUSPEND);
+				}
+				break;
+			}
+
+			if (hdev->otgstate & USB_OTG_REMOTEWAKEUP) {
+
+				for (j = 1; j <= hub->descriptor->bNbrPorts;
+				     j++) {
+					if (hdev->children[j - 1]) {
+						dev_dbg(hub_dev, "child"
+						     " found at port %d\n", j);
+						ret = usb_control_msg(hdev->
+						      children[j - 1],
+						      usb_sndctrlpipe(hdev->
+								children[j - 1],
+								0),
+						      USB_REQ_SET_FEATURE,
+						      USB_RECIP_DEVICE,
+						      USB_DEVICE_REMOTE_WAKEUP,
+						      0, NULL,
+						      0,
+						      USB_CTRL_SET_TIMEOUT);
+						if (ret < 0) {
+							dev_err(hub_dev, "Port"
+							  " %d doesn't support"
+							  "remote wakeup\n", j);
+						} else {
+							dev_dbg(hub_dev, "Port"
+							  " %d supports"
+							  "remote wakeup\n", j);
+						}
+						ret = set_port_feature(hdev, j,
+							USB_PORT_FEAT_SUSPEND);
+						if (ret < 0) {
+							dev_err(hub_dev, "Port"
+							  " %d NOT ABLE TO"
+							  " SUSPEND\n", j);
+						} else {
+							dev_dbg(hub_dev, "Port"
+							  " %d is ABLE TO"
+							  " SUSPEND\n", j);
+						}
+					}
+				}
+				ret = usb_control_msg(hdev,
+						      usb_sndctrlpipe(hdev, 0),
+						      USB_REQ_SET_FEATURE,
+						      USB_RECIP_DEVICE,
+						      USB_DEVICE_REMOTE_WAKEUP,
+						      0, NULL, 0,
+						      USB_CTRL_SET_TIMEOUT);
+				if (ret < 0) {
+					dev_err(hub_dev, "HUB doesn't support"
+							" REMOTE WAKEUP\n");
+				} else {
+					dev_dbg(hub_dev, "HUB supports"
+							" REMOTE WAKEUP\n");
+				}
+				ret = 0;
+				msleep(10);
+				if (hdev->parent == hdev->bus->root_hub) {
+					if (hdev->hcd_suspend &&
+					    hdev->hcd_priv) {
+						dev_dbg(hub_dev, "calling"
+						  " suspend after remote wakeup"
+						  " command is issued\n");
+						hdev->hcd_suspend(hdev->
+								   hcd_priv);
+					}
+					if (hdev->otg_notif)
+						hdev->otg_notif(hdev->otgpriv,
+						       PDC_POWERMANAGEMENT, 10);
+				}
+			}
+
+			if (hdev->otgstate & USB_OTG_WAKEUP_ALL) {
+				(void) usb_control_msg(hdev,
+						       usb_sndctrlpipe(hdev, 0),
+						       USB_REQ_CLEAR_FEATURE,
+						       USB_RECIP_DEVICE,
+						       USB_DEVICE_REMOTE_WAKEUP,
+						       0, NULL, 0,
+						       USB_CTRL_SET_TIMEOUT);
+				dev_dbg(hub_dev, "Hub CLEARED REMOTE WAKEUP\n");
+				for (j = 1; j <= hub->descriptor->bNbrPorts;
+				     j++) {
+					if (hdev->children[j - 1]) {
+						dev_dbg(hub_dev, "PORT %d"
+						   " SUSPEND IS CLEARD\n", j);
+						clear_port_feature(hdev, j,
+						   USB_PORT_FEAT_C_SUSPEND);
+						msleep(50);
+						(void) usb_control_msg(hdev->
+						       children[j - 1],
+						       usb_sndctrlpipe(
+							  hdev->children[j - 1],
+							  0),
+						       USB_REQ_CLEAR_FEATURE,
+						       USB_RECIP_DEVICE,
+						       USB_DEVICE_REMOTE_WAKEUP,
+						       0, NULL,
+						       0,
+						       USB_CTRL_SET_TIMEOUT);
+						dev_dbg(hub_dev, "PORT %d "
+							"REMOTE WAKEUP IS "
+							"CLEARD\n", j);
+						msleep(10);
+					}
+				}
+
+
+			}
+
+
+			/*
+			 * reset the state of otg device,
+			 * regardless of otg device
+			 */
+			hdev->otgstate = 0;
+#endif
 			if (test_bit(i, hub->busy_bits))
 				continue;
 			connect_change = test_bit(i, hub->change_bits);
@@ -3573,9 +3864,19 @@
 				hub_port_warm_reset(hub, i);
 			}
 
-			if (connect_change)
+			if (connect_change) {
+#if defined(CONFIG_USB_PEHCI_HCD) || defined(CONFIG_USB_PEHCI_HCD_MODULE)
+				if (hdev->parent == hdev->bus->root_hub)
+					if (hdev->otg_notif
+					    && (HostComplianceTest == 0))
+						hdev->otg_notif(hdev->otgpriv,
+								PDC_HOST_NOTIFY,
+								5);
+				portno = i;
+#endif
 				hub_port_connect_change(hub, i,
 						portstatus, portchange);
+				}
 		} /* end for i */
 
 		/* deal with hub status changes */
@@ -3607,7 +3908,105 @@
 						"condition\n");
 			}
 		}
+#if defined(CONFIG_USB_PEHCI_HCD) || defined(CONFIG_USB_PEHCI_HCD_MODULE)
+		/* if we have something on otg */
+		if (otgport) {
+			otgport = 0;
+			/* notify otg controller about it */
+			if (hdev->parent == hdev->bus->root_hub)
+				if (hdev->otg_notif)
+					hdev->otg_notif(hdev->otgpriv,
+							PDC_HOST_NOTIFY, 0);
+		}
 
+		if (HostComplianceTest && hdev->devnum > 1) {
+			/* TEST_SE0_NAK */
+			if (HostTest == 1) {
+				dev_info(hub_dev, "Testing for TEST_SE0_NAK\n");
+				ret = clear_port_feature(hdev, portno,
+						 USB_PORT_FEAT_C_CONNECTION);
+				ret = set_port_feature(hdev, portno,
+						       USB_PORT_FEAT_SUSPEND);
+				ret = set_port_feature(hdev, portno | 0x300,
+						       USB_PORT_FEAT_TEST);
+				ret = get_port_status(hdev, portno,
+						      &port_status);
+			}
+			/*TEST_J*/
+			if (HostTest == 2) {
+				dev_info(hub_dev, "Testing TEST_J\n");
+				ret = clear_port_feature(hdev, portno,
+						USB_PORT_FEAT_C_CONNECTION);
+				ret = set_port_feature(hdev, portno,
+						USB_PORT_FEAT_SUSPEND);
+				ret = set_port_feature(hdev, portno | 0x100,
+						       USB_PORT_FEAT_TEST);
+				ret = get_port_status(hdev, portno,
+						      &port_status);
+			}
+			if (HostTest == 3) {
+				dev_info(hub_dev, "Testing TEST_K\n");
+				ret = clear_port_feature(hdev, portno,
+						USB_PORT_FEAT_C_CONNECTION);
+				ret = set_port_feature(hdev, portno,
+						       USB_PORT_FEAT_SUSPEND);
+				ret = set_port_feature(hdev, portno | 0x200,
+						       USB_PORT_FEAT_TEST);
+				ret = get_port_status(hdev, portno,
+						      &port_status);
+			}
+			if (HostTest == 4) {
+				dev_info(hub_dev, "Testing TEST_PACKET at Port"
+						  " %d\n", portno);
+				ret = clear_port_feature(hdev, portno,
+						USB_PORT_FEAT_C_CONNECTION);
+				if (ret < 0)
+					dev_err(hub_dev, "Clear port feature"
+						" C_CONNECTION failed\n");
+
+				ret = set_port_feature(hdev, portno,
+						       USB_PORT_FEAT_SUSPEND);
+				if (ret < 0)
+					dev_err(hub_dev, "Clear port feature"
+						" SUSPEND failed\n");
+
+				ret = set_port_feature(hdev, portno | 0x400,
+						       USB_PORT_FEAT_TEST);
+				if (ret < 0)
+					dev_err(hub_dev, "Clear port feature"
+						" TEST failed\n");
+
+				ret = get_port_status(hdev, portno,
+						      &port_status);
+				if (ret < 0)
+					dev_err(hub_dev, "Get port status"
+						" failed\n");
+			}
+			if (HostTest == 5) {
+				dev_info(hub_dev, "Testing TEST_FORCE_ENBLE\n");
+				ret = clear_port_feature(hdev, portno,
+						 USB_PORT_FEAT_C_CONNECTION);
+				ret = set_port_feature(hdev, portno,
+						 USB_PORT_FEAT_SUSPEND);
+				ret = set_port_feature(hdev, portno | 0x500,
+						       USB_PORT_FEAT_TEST);
+				ret = get_port_status(hdev, portno,
+						      &port_status);
+			}
+			if (HostTest == 6) {
+				dev_info(hub_dev, "Testing "
+					 "HS_HOST_PORT_SUSPEND_RESUME\n");
+				ret = clear_port_feature(hdev, portno,
+						 USB_PORT_FEAT_C_CONNECTION);
+				ret = set_port_feature(hdev, portno,
+						     USB_PORT_FEAT_SUSPEND);
+				msleep(3000);
+				ret = clear_port_feature(hdev, portno,
+						 USB_PORT_FEAT_SUSPEND);
+				HostTest = 0;
+			}
+		}
+#endif
  loop_autopm:
 		/* Balance the usb_autopm_get_interface() above */
 		usb_autopm_put_interface_no_suspend(intf);
diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
index 0b5ec23..415593c 100644
--- a/drivers/usb/core/message.c
+++ b/drivers/usb/core/message.c
@@ -1769,6 +1769,9 @@
 		goto free_interfaces;
 	}
 
+	dev->actconfig = cp;
+	if (cp)
+		usb_notify_config_device(dev);
 	ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
 			      USB_REQ_SET_CONFIGURATION, 0, configuration, 0,
 			      NULL, 0, USB_CTRL_SET_TIMEOUT);
@@ -1776,11 +1779,11 @@
 		/* All the old state is gone, so what else can we do?
 		 * The device is probably useless now anyway.
 		 */
-		cp = NULL;
+		dev->actconfig = cp = NULL;
 	}
 
-	dev->actconfig = cp;
 	if (!cp) {
+		usb_notify_config_device(dev);
 		usb_set_device_state(dev, USB_STATE_ADDRESS);
 		usb_hcd_alloc_bandwidth(dev, NULL, NULL, NULL);
 		mutex_unlock(hcd->bandwidth_mutex);
diff --git a/drivers/usb/core/notify.c b/drivers/usb/core/notify.c
index 7542dce..15311d8 100644
--- a/drivers/usb/core/notify.c
+++ b/drivers/usb/core/notify.c
@@ -57,6 +57,12 @@
 	mutex_unlock(&usbfs_mutex);
 }
 
+void usb_notify_config_device(struct usb_device *udev)
+{
+	blocking_notifier_call_chain(&usb_notifier_list,
+			USB_DEVICE_CONFIG, udev);
+}
+
 void usb_notify_add_bus(struct usb_bus *ubus)
 {
 	blocking_notifier_call_chain(&usb_notifier_list, USB_BUS_ADD, ubus);
diff --git a/drivers/usb/core/otg_whitelist.h b/drivers/usb/core/otg_whitelist.h
index e8cdce5..cec4167 100644
--- a/drivers/usb/core/otg_whitelist.h
+++ b/drivers/usb/core/otg_whitelist.h
@@ -92,7 +92,30 @@
 		if ((id->match_flags & USB_DEVICE_ID_MATCH_DEV_PROTOCOL) &&
 		    (id->bDeviceProtocol != dev->descriptor.bDeviceProtocol))
 			continue;
+#if defined(CONFIG_USB_PEHCI_HCD) || defined(CONFIG_USB_PEHCI_HCD_MODULE)
+		/*Hub is targeted device,so code execution should reach here */
+		if (USB_CLASS_HUB == dev->descriptor.bDeviceClass) {
+			/* count the tiers and if it is more than 6, return 0 */
+			unsigned char tier = 0;
+			struct usb_device *root_hub;
 
+			root_hub = dev->bus->root_hub;
+			while ((dev->parent != NULL) && /* root hub not count */
+				(dev->parent != root_hub) &&
+				(tier != 6))  {/* interal hub not count */
+				tier++;
+				dev = dev->parent;
+			}
+
+			if (tier == 6) {
+				dev_err(&dev->dev, "5 tier of hubs reached,"
+					" newly added hub will not be"
+					" supported!\n");
+				hub_tier = 1;
+				return 0;
+			}
+		}
+#endif
 		return 1;
 	}
 
diff --git a/drivers/usb/core/usb.h b/drivers/usb/core/usb.h
index d44d4b7..c36c72a 100644
--- a/drivers/usb/core/usb.h
+++ b/drivers/usb/core/usb.h
@@ -96,6 +96,10 @@
 
 #endif
 
+#ifdef CONFIG_USB_OTG
+extern void usb_hnp_polling_work(struct work_struct *work);
+#endif
+
 extern struct bus_type usb_bus_type;
 extern struct device_type usb_device_type;
 extern struct device_type usb_if_device_type;
@@ -159,6 +163,7 @@
 /* internal notify stuff */
 extern void usb_notify_add_device(struct usb_device *udev);
 extern void usb_notify_remove_device(struct usb_device *udev);
+extern void usb_notify_config_device(struct usb_device *udev);
 extern void usb_notify_add_bus(struct usb_bus *ubus);
 extern void usb_notify_remove_bus(struct usb_bus *ubus);
 
diff --git a/drivers/usb/function/Kconfig b/drivers/usb/function/Kconfig
new file mode 100644
index 0000000..90d776c
--- /dev/null
+++ b/drivers/usb/function/Kconfig
@@ -0,0 +1,163 @@
+menu "USB Function Support"
+	depends on !USB_GADGET
+
+config USB_MSM_OTG
+	bool "OTG support for Qualcomm on-chip USB controller"
+	depends on USB && USB_FUNCTION && USB_EHCI_MSM
+	help
+	  USB OTG driver.
+	  This driver is required if you want to use USB in
+	  Host mode and Device mode.
+
+config USB_FUNCTION
+	boolean "Support for USB Function Drivers"
+	help
+	   The USB Function framework is similar to the Gadget framework
+	   but a little simpler and a little more plugable.  It trades
+	   some flexibility in the framework for smaller and simpler
+	   function drivers that can be combined into a composite driver.
+
+choice
+	prompt "USB Peripheral Controller"
+	depends on USB_FUNCTION
+	help
+	  USB devices interfaces with the host using a controller.
+	  Many controller drivers are platform-specific; these
+	  often need board-specific hooks.
+
+config USB_FUNCTION_MSM_HSUSB
+	boolean "MSM Highspeed USB Peripheral Controller"
+	depends on ARCH_MSM
+	help
+	  High speed USB device controller for Qualcomm chipsets using
+	  USB Function framework. Controller supports IAD and
+	  32 endpoints(16 IN and 16 OUT).
+
+endchoice
+
+config USB_FUNCTION_NULL
+	boolean "Null Function -- eats packets"
+	depends on USB_FUNCTION
+	depends on USB_FUNCTION_MSM_HSUSB
+	default n
+
+config USB_FUNCTION_ZERO
+	boolean "Zero Function -- generates packets"
+	depends on USB_FUNCTION
+	depends on USB_FUNCTION_MSM_HSUSB
+	default n
+
+config USB_FUNCTION_LOOPBACK
+	boolean "Loopback Function -- returns packets"
+	depends on USB_FUNCTION
+	depends on USB_FUNCTION_MSM_HSUSB
+	default n
+
+config USB_FUNCTION_ADB
+	tristate "ADB Transport Function"
+	depends on USB_FUNCTION
+	depends on USB_FUNCTION_MSM_HSUSB
+	help
+	  Function Driver for the Android ADB Protocol
+
+	  Say "y" to link the driver statically, or "m" to build a
+	  dynamically linked module called "adb"
+
+	default USB_FUNCTION_MSM_HSUSB
+
+config USB_FUNCTION_UMS
+	boolean "USB Mass Storage Function (userspace)"
+	depends on USB_FUNCTION
+	depends on USB_FUNCTION_MSM_HSUSB
+	default n
+
+config USB_FUNCTION_MASS_STORAGE
+	tristate "USB Mass Storage Function (kernel based)"
+	depends on USB_FUNCTION
+	depends on USB_FUNCTION_MSM_HSUSB && SWITCH
+	help
+	  The File-backed Storage function driver acts as a USB Mass Storage
+	  disk drive.  As its storage repository it can use a regular
+	  file or a block device specified as a module parameter. Initial
+	  driver version is derived from Gadget framework and ported to
+	  Function driver framework.
+
+	  Say "y" to link the driver statically, or "m" to build a
+	  dynamically linked module called "file_storage".
+
+	default USB_FUNCTION_MSM_HSUSB
+
+config USB_CSW_HACK
+	boolean "USB Mass storage csw hack Feature"
+	depends on USB_FUNCTION
+	depends on USB_FUNCTION_MASS_STORAGE
+	help
+	 This csw hack feature is for increasing the performance of the mass
+	 storage
+
+	default n
+
+config USB_FUNCTION_DIAG
+	tristate "USB MSM Diag Function"
+	depends on USB_FUNCTION
+	depends on USB_FUNCTION_MSM_HSUSB
+	help
+	  Simple bridge driver between smd and debug client(host side)
+
+	  Say "y" to link the driver statically, or "m" to build a
+	  dynamically linked module called "diag".
+
+	default USB_FUNCTION_MSM_HSUSB
+
+config USB_FUNCTION_ETHER
+	tristate "USB Ethernet Function"
+	depends on USB_FUNCTION
+	depends on USB_FUNCTION_MSM_HSUSB
+	help
+	  Implements the Ethernet style communication using CDC/ECM.
+
+	  Say "y" to link the driver statically, or "m" to build a
+	  dynamically linked module called "ether".
+
+	default USB_FUNCTION_MSM_HSUSB
+
+config USB_FUNCTION_SERIAL
+	tristate "USB Serial Function"
+	depends on USB_FUNCTION
+	depends on USB_FUNCTION_MSM_HSUSB
+	help
+	  Implements serial communication using single interface; uses
+	  two endpoints(bulk-in and bulk out) for data transfer and a
+	  interrupt endpoint for control data transfer.
+
+	  Say "y" to link the driver statically, or "m" to build a
+	  dynamically linked module called "serial".
+
+	default USB_FUNCTION_MSM_HSUSB
+
+config USB_FUNCTION_RMNET
+	bool "RmNet function driver"
+	depends on USB_FUNCTION
+	depends on USB_FUNCTION_MSM_HSUSB
+	default n
+	help
+	  Implements Rmnet function.
+	  Rmnet is an alternative to CDC-ECM and Windows RNDIS. It uses
+	  QUALCOMM MSM Interface for control transfers. It acts like a
+	  bridge between Host and modem found in MSM chipsets.
+
+config RMNET_SMD_CTL_CHANNEL
+	string "control SMD channel name"
+	depends on USB_FUNCTION_RMNET
+	default ""
+	help
+	  Control SMD channel for transferring QMI messages
+
+config RMNET_SMD_DATA_CHANNEL
+	string "Data SMD channel name"
+	depends on USB_FUNCTION_RMNET
+	default ""
+	help
+	  Data SMD channel for transferring network data
+
+endmenu
diff --git a/drivers/usb/function/Makefile b/drivers/usb/function/Makefile
new file mode 100644
index 0000000..7614d3b
--- /dev/null
+++ b/drivers/usb/function/Makefile
@@ -0,0 +1,13 @@
+
+obj-$(CONFIG_USB_MSM_OTG)		+= msm_otg.o
+obj-$(CONFIG_USB_FUNCTION_MSM_HSUSB)	+= msm_hsusb.o
+obj-$(CONFIG_USB_FUNCTION_NULL)		+= null.o
+obj-$(CONFIG_USB_FUNCTION_NULL)		+= zero.o
+obj-$(CONFIG_USB_FUNCTION_LOOPBACK)	+= loopback.o
+obj-$(CONFIG_USB_FUNCTION_ADB)		+= adb.o
+obj-$(CONFIG_USB_FUNCTION_UMS)		+= ums.o
+obj-$(CONFIG_USB_FUNCTION_MASS_STORAGE)	+= mass_storage.o
+obj-$(CONFIG_USB_FUNCTION_DIAG)		+= diag.o
+obj-$(CONFIG_USB_FUNCTION_SERIAL)       += serial.o
+obj-$(CONFIG_USB_FUNCTION_ETHER)	+= ether_cdc_ecm.o
+obj-$(CONFIG_USB_FUNCTION_RMNET)	+= rmnet.o
diff --git a/drivers/usb/function/adb.c b/drivers/usb/function/adb.c
new file mode 100644
index 0000000..dd91be3
--- /dev/null
+++ b/drivers/usb/function/adb.c
@@ -0,0 +1,624 @@
+/* drivers/usb/function/adb.c
+ *
+ * Function Device for the Android ADB Protocol
+ *
+ * Copyright (C) 2007 Google, Inc.
+ * Copyright (c) 2009, Code Aurora Forum. All rights reserved.
+ * Author: Brian Swetland <swetland@google.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/miscdevice.h>
+#include <linux/fs.h>
+
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/list.h>
+
+#include <asm/atomic.h>
+#include <asm/uaccess.h>
+
+#include "usb_function.h"
+
+#if 1
+#define DBG(x...) do {} while (0)
+#else
+#define DBG(x...) printk(x)
+#endif
+
+#define TXN_MAX 4096
+
+/* number of rx and tx requests to allocate */
+#define RX_REQ_MAX 4
+#define TX_REQ_MAX 4
+
+#define ADB_FUNCTION_NAME "adb"
+
+struct adb_context
+{
+	int online;
+	int error;
+
+	atomic_t read_excl;
+	atomic_t write_excl;
+	atomic_t open_excl;
+	atomic_t enable_excl;
+	spinlock_t lock;
+
+	struct usb_endpoint *out;
+	struct usb_endpoint *in;
+
+	struct list_head tx_idle;
+	struct list_head rx_idle;
+	struct list_head rx_done;
+
+	wait_queue_head_t read_wq;
+	wait_queue_head_t write_wq;
+
+	/* the request we're currently reading from */
+	struct usb_request *read_req;
+	unsigned char *read_buf;
+	unsigned read_count;
+	unsigned bound;
+};
+
+static struct adb_context _context;
+
+static struct usb_interface_descriptor intf_desc = {
+	.bLength =		sizeof intf_desc,
+	.bDescriptorType =	USB_DT_INTERFACE,
+	.bNumEndpoints =	2,
+	.bInterfaceClass =	0xff,
+	.bInterfaceSubClass =	0x42,
+	.bInterfaceProtocol =	0x01,
+};
+
+static struct usb_endpoint_descriptor hs_bulk_in_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize =	__constant_cpu_to_le16(512),
+	.bInterval =		0,
+};
+static struct usb_endpoint_descriptor fs_bulk_in_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize =	__constant_cpu_to_le16(64),
+	.bInterval =		0,
+};
+
+static struct usb_endpoint_descriptor hs_bulk_out_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_OUT,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize =	__constant_cpu_to_le16(512),
+	.bInterval =		0,
+};
+
+static struct usb_endpoint_descriptor fs_bulk_out_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_OUT,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize =	__constant_cpu_to_le16(64),
+	.bInterval =		0,
+};
+
+static struct usb_function usb_func_adb;
+
+static inline int _lock(atomic_t *excl)
+{
+	if (atomic_inc_return(excl) == 1) {
+		return 0;
+	} else {
+		atomic_dec(excl);
+		return -1;
+	}
+}
+
+static inline void _unlock(atomic_t *excl)
+{
+	atomic_dec(excl);
+}
+
+/* add a request to the tail of a list */
+void req_put(struct adb_context *ctxt, struct list_head *head, struct usb_request *req)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&ctxt->lock, flags);
+	list_add_tail(&req->list, head);
+	spin_unlock_irqrestore(&ctxt->lock, flags);
+}
+
+/* remove a request from the head of a list */
+struct usb_request *req_get(struct adb_context *ctxt, struct list_head *head)
+{
+	unsigned long flags;
+	struct usb_request *req;
+
+	spin_lock_irqsave(&ctxt->lock, flags);
+	if (list_empty(head)) {
+		req = 0;
+	} else {
+		req = list_first_entry(head, struct usb_request, list);
+		list_del(&req->list);
+	}
+	spin_unlock_irqrestore(&ctxt->lock, flags);
+	return req;
+}
+
+static void adb_complete_in(struct usb_endpoint *ept, struct usb_request *req)
+{
+	struct adb_context *ctxt = req->context;
+
+	if (req->status != 0)
+		ctxt->error = 1;
+
+	req_put(ctxt, &ctxt->tx_idle, req);
+
+	wake_up(&ctxt->write_wq);
+}
+
+static void adb_complete_out(struct usb_endpoint *ept, struct usb_request *req)
+{
+	struct adb_context *ctxt = req->context;
+
+	if (req->status != 0) {
+		ctxt->error = 1;
+		req_put(ctxt, &ctxt->rx_idle, req);
+	} else {
+		req_put(ctxt, &ctxt->rx_done, req);
+	}
+
+	wake_up(&ctxt->read_wq);
+}
+
+static ssize_t adb_read(struct file *fp, char __user *buf,
+			size_t count, loff_t *pos)
+{
+	struct adb_context *ctxt = &_context;
+	struct usb_request *req;
+	int r = count, xfer;
+	int ret;
+
+	DBG("adb_read(%d)\n", count);
+
+	if (_lock(&ctxt->read_excl))
+		return -EBUSY;
+
+	/* we will block until we're online */
+	while (!(ctxt->online || ctxt->error)) {
+		DBG("adb_read: waiting for online state\n");
+		ret = wait_event_interruptible(ctxt->read_wq, (ctxt->online || ctxt->error));
+		if (ret < 0) {
+			_unlock(&ctxt->read_excl);
+			return ret;
+		}
+	}
+
+	while (count > 0) {
+		if (ctxt->error) {
+			r = -EIO;
+			break;
+		}
+
+		/* if we have idle read requests, get them queued */
+		while ((req = req_get(ctxt, &ctxt->rx_idle))) {
+requeue_req:
+			req->length = TXN_MAX;
+			ret = usb_ept_queue_xfer(ctxt->out, req);
+			if (ret < 0) {
+				DBG("adb_read: failed to queue req %p (%d)\n", req, ret);
+				r = -EIO;
+				ctxt->error = 1;
+				req_put(ctxt, &ctxt->rx_idle, req);
+				goto fail;
+			} else {
+				DBG("rx %p queue\n", req);
+			}
+		}
+
+		/* if we have data pending, give it to userspace */
+		if (ctxt->read_count > 0) {
+			xfer = (ctxt->read_count < count) ? ctxt->read_count : count;
+
+			if (copy_to_user(buf, ctxt->read_buf, xfer)) {
+				r = -EFAULT;
+				break;
+			}
+			ctxt->read_buf += xfer;
+			ctxt->read_count -= xfer;
+			buf += xfer;
+			count -= xfer;
+
+			/* if we've emptied the buffer, release the request */
+			if (ctxt->read_count == 0) {
+				req_put(ctxt, &ctxt->rx_idle, ctxt->read_req);
+				ctxt->read_req = 0;
+			}
+			continue;
+		}
+
+		/* wait for a request to complete */
+		req = 0;
+		ret = wait_event_interruptible(ctxt->read_wq,
+					       ((req = req_get(ctxt, &ctxt->rx_done)) || ctxt->error));
+
+		if (req != 0) {
+			/* if we got a 0-len one we need to put it back into
+			** service.  if we made it the current read req we'd
+			** be stuck forever
+			*/
+			if (req->actual == 0)
+				goto requeue_req;
+
+			ctxt->read_req = req;
+			ctxt->read_count = req->actual;
+			ctxt->read_buf = req->buf;
+			DBG("rx %p %d\n", req, req->actual);
+		}
+
+		if (ret < 0) {
+			r = ret;
+			break;
+		}
+	}
+
+fail:
+	_unlock(&ctxt->read_excl);
+	return r;
+}
+
+static ssize_t adb_write(struct file *fp, const char __user *buf,
+			 size_t count, loff_t *pos)
+{
+	struct adb_context *ctxt = &_context;
+	struct usb_request *req = 0;
+	int r = count, xfer;
+	int ret;
+
+	DBG("adb_write(%d)\n", count);
+
+	if (_lock(&ctxt->write_excl))
+		return -EBUSY;
+
+	while (count > 0) {
+		if (ctxt->error) {
+			r = -EIO;
+			break;
+		}
+
+		/* get an idle tx request to use */
+		req = 0;
+		ret = wait_event_interruptible(ctxt->write_wq,
+					       ((req = req_get(ctxt, &ctxt->tx_idle)) || ctxt->error));
+
+		if (ret < 0) {
+			r = ret;
+			break;
+		}
+
+		if (req != 0) {
+			xfer = count > TXN_MAX ? TXN_MAX : count;
+			if (copy_from_user(req->buf, buf, xfer)) {
+				r = -EFAULT;
+				break;
+			}
+
+			req->length = xfer;
+			ret = usb_ept_queue_xfer(ctxt->in, req);
+			if (ret < 0) {
+				DBG("adb_write: xfer error %d\n", ret);
+				ctxt->error = 1;
+				r = -EIO;
+				break;
+			}
+
+			buf += xfer;
+			count -= xfer;
+
+			/* zero this so we don't try to free it on error exit */
+			req = 0;
+		}
+	}
+
+
+	if (req)
+		req_put(ctxt, &ctxt->tx_idle, req);
+
+	_unlock(&ctxt->write_excl);
+	return r;
+}
+
+static int adb_open(struct inode *ip, struct file *fp)
+{
+	struct adb_context *ctxt = &_context;
+
+	if (_lock(&ctxt->open_excl))
+		return -EBUSY;
+
+	/* clear the error latch */
+	ctxt->error = 0;
+
+	return 0;
+}
+
+static int adb_release(struct inode *ip, struct file *fp)
+{
+	struct adb_context *ctxt = &_context;
+
+	_unlock(&ctxt->open_excl);
+	return 0;
+}
+
+static struct file_operations adb_fops = {
+	.owner =   THIS_MODULE,
+	.read =    adb_read,
+	.write =   adb_write,
+	.open =    adb_open,
+	.release = adb_release,
+};
+
+static struct miscdevice adb_device = {
+	.minor = MISC_DYNAMIC_MINOR,
+	.name = "android_adb",
+	.fops = &adb_fops,
+};
+
+static int adb_enable_open(struct inode *ip, struct file *fp)
+{
+	struct adb_context *ctxt = &_context;
+
+	if (_lock(&ctxt->enable_excl))
+		return -EBUSY;
+
+	printk(KERN_INFO "enabling adb function\n");
+	usb_function_enable(ADB_FUNCTION_NAME, 1);
+	/* clear the error latch */
+	ctxt->error = 0;
+
+	return 0;
+}
+
+static int adb_enable_release(struct inode *ip, struct file *fp)
+{
+	struct adb_context *ctxt = &_context;
+
+	printk(KERN_INFO "disabling adb function\n");
+	usb_function_enable(ADB_FUNCTION_NAME, 0);
+	_unlock(&ctxt->enable_excl);
+	return 0;
+}
+
+static struct file_operations adb_enable_fops = {
+	.owner =   THIS_MODULE,
+	.open =    adb_enable_open,
+	.release = adb_enable_release,
+};
+
+static struct miscdevice adb_enable_device = {
+	.minor = MISC_DYNAMIC_MINOR,
+	.name = "android_adb_enable",
+	.fops = &adb_enable_fops,
+};
+
+static void adb_unbind(void *_ctxt)
+{
+	struct adb_context *ctxt = _ctxt;
+	struct usb_request *req;
+
+	if (!ctxt->bound)
+		return;
+
+	while ((req = req_get(ctxt, &ctxt->rx_idle))) {
+		usb_ept_free_req(ctxt->out, req);
+	}
+	while ((req = req_get(ctxt, &ctxt->tx_idle))) {
+		usb_ept_free_req(ctxt->in, req);
+	}
+	if (ctxt->in) {
+		usb_ept_fifo_flush(ctxt->in);
+		usb_ept_enable(ctxt->in,  0);
+		usb_free_endpoint(ctxt->in);
+	}
+	if (ctxt->out) {
+		usb_ept_fifo_flush(ctxt->out);
+		usb_ept_enable(ctxt->out,  0);
+		usb_free_endpoint(ctxt->out);
+	}
+
+	ctxt->online = 0;
+	ctxt->error = 1;
+
+	/* readers may be blocked waiting for us to go online */
+	wake_up(&ctxt->read_wq);
+	ctxt->bound = 0;
+}
+
+static void adb_bind(void *_ctxt)
+{
+	struct adb_context *ctxt = _ctxt;
+	struct usb_request *req;
+	int n;
+
+	intf_desc.bInterfaceNumber =
+		usb_msm_get_next_ifc_number(&usb_func_adb);
+
+	ctxt->in = usb_alloc_endpoint(USB_DIR_IN);
+	if (ctxt->in) {
+		hs_bulk_in_desc.bEndpointAddress = USB_DIR_IN | ctxt->in->num;
+		fs_bulk_in_desc.bEndpointAddress = USB_DIR_IN | ctxt->in->num;
+	}
+
+	ctxt->out = usb_alloc_endpoint(USB_DIR_OUT);
+	if (ctxt->out) {
+		hs_bulk_out_desc.bEndpointAddress = USB_DIR_OUT|ctxt->out->num;
+		fs_bulk_out_desc.bEndpointAddress = USB_DIR_OUT|ctxt->out->num;
+	}
+
+	for (n = 0; n < RX_REQ_MAX; n++) {
+		req = usb_ept_alloc_req(ctxt->out, 4096);
+		if (req == 0) {
+			ctxt->bound = 1;
+			goto fail;
+		}
+		req->context = ctxt;
+		req->complete = adb_complete_out;
+		req_put(ctxt, &ctxt->rx_idle, req);
+	}
+
+	for (n = 0; n < TX_REQ_MAX; n++) {
+		req = usb_ept_alloc_req(ctxt->in, 4096);
+		if (req == 0) {
+			ctxt->bound = 1;
+			goto fail;
+		}
+		req->context = ctxt;
+		req->complete = adb_complete_in;
+		req_put(ctxt, &ctxt->tx_idle, req);
+	}
+	ctxt->bound = 1;
+	return;
+
+fail:
+	printk(KERN_ERR "adb_bind() could not allocate requests\n");
+	adb_unbind(ctxt);
+}
+
+static void adb_configure(int configured, void *_ctxt)
+{
+	struct adb_context *ctxt = _ctxt;
+	struct usb_request *req;
+
+	if (configured) {
+		ctxt->online = 1;
+
+		if (usb_msm_get_speed() == USB_SPEED_HIGH) {
+			usb_configure_endpoint(ctxt->in, &hs_bulk_in_desc);
+			usb_configure_endpoint(ctxt->out, &hs_bulk_out_desc);
+		} else {
+			usb_configure_endpoint(ctxt->in, &fs_bulk_in_desc);
+			usb_configure_endpoint(ctxt->out, &fs_bulk_out_desc);
+		}
+		usb_ept_enable(ctxt->in,  1);
+		usb_ept_enable(ctxt->out, 1);
+
+		/* if we have a stale request being read, recycle it */
+		ctxt->read_buf = 0;
+		ctxt->read_count = 0;
+		if (ctxt->read_req) {
+			req_put(ctxt, &ctxt->rx_idle, ctxt->read_req);
+			ctxt->read_req = 0;
+		}
+
+		/* retire any completed rx requests from previous session */
+		while ((req = req_get(ctxt, &ctxt->rx_done)))
+			req_put(ctxt, &ctxt->rx_idle, req);
+
+	} else {
+		ctxt->online = 0;
+		ctxt->error = 1;
+	}
+
+	/* readers may be blocked waiting for us to go online */
+	wake_up(&ctxt->read_wq);
+}
+
+static struct usb_function usb_func_adb = {
+	.bind = adb_bind,
+	.unbind = adb_unbind,
+	.configure = adb_configure,
+
+	.name = ADB_FUNCTION_NAME,
+	.context = &_context,
+
+};
+
+struct usb_descriptor_header *adb_hs_descriptors[4];
+struct usb_descriptor_header *adb_fs_descriptors[4];
+static int __init adb_init(void)
+{
+	int ret = 0;
+	struct adb_context *ctxt = &_context;
+	DBG("adb_init()\n");
+
+	init_waitqueue_head(&ctxt->read_wq);
+	init_waitqueue_head(&ctxt->write_wq);
+
+	atomic_set(&ctxt->open_excl, 0);
+	atomic_set(&ctxt->read_excl, 0);
+	atomic_set(&ctxt->write_excl, 0);
+	atomic_set(&ctxt->enable_excl, 0);
+
+	spin_lock_init(&ctxt->lock);
+
+	INIT_LIST_HEAD(&ctxt->rx_idle);
+	INIT_LIST_HEAD(&ctxt->rx_done);
+	INIT_LIST_HEAD(&ctxt->tx_idle);
+
+	adb_hs_descriptors[0] = (struct usb_descriptor_header *)&intf_desc;
+	adb_hs_descriptors[1] =
+		(struct usb_descriptor_header *)&hs_bulk_in_desc;
+	adb_hs_descriptors[2] =
+		(struct usb_descriptor_header *)&hs_bulk_out_desc;
+	adb_hs_descriptors[3] = NULL;
+
+	adb_fs_descriptors[0] = (struct usb_descriptor_header *)&intf_desc;
+	adb_fs_descriptors[1] =
+		(struct usb_descriptor_header *)&fs_bulk_in_desc;
+	adb_fs_descriptors[2] =
+		(struct usb_descriptor_header *)&fs_bulk_out_desc;
+	adb_fs_descriptors[3] = NULL;
+
+	usb_func_adb.hs_descriptors = adb_hs_descriptors;
+	usb_func_adb.fs_descriptors = adb_fs_descriptors;
+
+	ret = misc_register(&adb_device);
+	if (ret) {
+		printk(KERN_ERR "adb Can't register misc device  %d \n",
+						MISC_DYNAMIC_MINOR);
+		return ret;
+	}
+	ret = misc_register(&adb_enable_device);
+	if (ret) {
+		printk(KERN_ERR "adb Can't register misc enable device  %d \n",
+						MISC_DYNAMIC_MINOR);
+		misc_deregister(&adb_device);
+		return ret;
+	}
+
+	ret = usb_function_register(&usb_func_adb);
+	if (ret) {
+		misc_deregister(&adb_device);
+		misc_deregister(&adb_enable_device);
+	}
+	return ret;
+}
+
+module_init(adb_init);
+
+static void __exit adb_exit(void)
+{
+	misc_deregister(&adb_device);
+	misc_deregister(&adb_enable_device);
+
+	usb_function_unregister(&usb_func_adb);
+}
+module_exit(adb_exit);
diff --git a/drivers/usb/function/diag.c b/drivers/usb/function/diag.c
new file mode 100644
index 0000000..94c32e7
--- /dev/null
+++ b/drivers/usb/function/diag.c
@@ -0,0 +1,567 @@
+/* drivers/usb/function/diag.c
+ *
+ * Diag Function Device - Route DIAG frames between SMD and USB
+ *
+ * Copyright (C) 2007 Google, Inc.
+ * Copyright (c) 2008-2009, Code Aurora Forum. All rights reserved.
+ * Author: Brian Swetland <swetland@google.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/workqueue.h>
+#include <linux/err.h>
+
+#include <mach/msm_smd.h>
+#include <mach/usbdiag.h>
+
+#include "usb_function.h"
+
+#define WRITE_COMPLETE 0
+#define READ_COMPLETE  0
+static struct usb_interface_descriptor intf_desc = {
+	.bLength            =	sizeof intf_desc,
+	.bDescriptorType    =	USB_DT_INTERFACE,
+	.bNumEndpoints      =	2,
+	.bInterfaceClass    =	0xFF,
+	.bInterfaceSubClass =	0xFF,
+	.bInterfaceProtocol =	0xFF,
+};
+
+static struct usb_endpoint_descriptor hs_bulk_in_desc = {
+	.bLength 			=	USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType 	=	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes 		=	USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize 	=	__constant_cpu_to_le16(512),
+	.bInterval 			=	0,
+};
+static struct usb_endpoint_descriptor fs_bulk_in_desc = {
+	.bLength          =	USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType  =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes     =	USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize   = __constant_cpu_to_le16(64),
+	.bInterval        =	0,
+};
+
+static struct usb_endpoint_descriptor hs_bulk_out_desc = {
+	.bLength          =	USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType  =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_OUT,
+	.bmAttributes     =	USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize   = __constant_cpu_to_le16(512),
+	.bInterval        =	0,
+};
+
+static struct usb_endpoint_descriptor fs_bulk_out_desc = {
+	.bLength          =	USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType  =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_OUT,
+	.bmAttributes     =	USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize   = __constant_cpu_to_le16(64),
+	.bInterval        =	0,
+};
+
+/* list of requests */
+struct diag_req_entry {
+	struct list_head re_entry;
+	struct usb_request *usb_req;
+	void *diag_request;
+};
+struct diag_context {
+	struct usb_endpoint *epout;
+	struct usb_endpoint *epin;
+	spinlock_t dev_lock;
+	/* linked list of read requets*/
+	struct list_head dev_read_req_list;
+	/* linked list of write requets*/
+	struct list_head dev_write_req_list;
+	struct diag_operations *operations;
+	struct workqueue_struct *diag_wq;
+	struct work_struct usb_config_work;
+	unsigned configured;
+	unsigned bound;
+	int diag_opened;
+};
+
+static struct usb_function usb_func_diag;
+static struct diag_context _context;
+static void diag_write_complete(struct usb_endpoint *,
+		struct usb_request *);
+static struct diag_req_entry *diag_alloc_req_entry(struct usb_endpoint *,
+		unsigned len, gfp_t);
+static void diag_free_req_entry(struct usb_endpoint *, struct diag_req_entry *);
+static void diag_read_complete(struct usb_endpoint *, struct usb_request *);
+
+
+static void diag_unbind(void *context)
+{
+
+	struct diag_context *ctxt = context;
+
+	if (!ctxt)
+		return;
+	if (!ctxt->bound)
+		return;
+	if (ctxt->epin) {
+		usb_ept_fifo_flush(ctxt->epin);
+		usb_ept_enable(ctxt->epin, 0);
+		usb_free_endpoint(ctxt->epin);
+		}
+	if (ctxt->epout) {
+		usb_ept_fifo_flush(ctxt->epout);
+		usb_ept_enable(ctxt->epout, 0);
+		usb_free_endpoint(ctxt->epout);
+		}
+	ctxt->bound = 0;
+}
+static void diag_bind(void *context)
+{
+	struct diag_context *ctxt = context;
+
+	if (!ctxt)
+		return;
+
+	intf_desc.bInterfaceNumber =
+		usb_msm_get_next_ifc_number(&usb_func_diag);
+
+	ctxt->epin = usb_alloc_endpoint(USB_DIR_IN);
+	if (ctxt->epin) {
+		hs_bulk_in_desc.bEndpointAddress =
+			USB_DIR_IN | ctxt->epin->num;
+		fs_bulk_in_desc.bEndpointAddress =
+			USB_DIR_IN | ctxt->epin->num;
+	}
+
+	ctxt->epout = usb_alloc_endpoint(USB_DIR_OUT);
+	if (ctxt->epout) {
+		hs_bulk_out_desc.bEndpointAddress =
+			USB_DIR_OUT | ctxt->epout->num;
+		fs_bulk_out_desc.bEndpointAddress =
+			USB_DIR_OUT | ctxt->epout->num;
+	}
+
+	ctxt->bound = 1;
+}
+static void diag_configure(int configured, void *_ctxt)
+
+{
+	struct diag_context *ctxt = _ctxt;
+
+	if (!ctxt)
+		return;
+	if (configured) {
+		if (usb_msm_get_speed() == USB_SPEED_HIGH) {
+			usb_configure_endpoint(ctxt->epin, &hs_bulk_in_desc);
+			usb_configure_endpoint(ctxt->epout, &hs_bulk_out_desc);
+		} else {
+			usb_configure_endpoint(ctxt->epin, &fs_bulk_in_desc);
+			usb_configure_endpoint(ctxt->epout, &fs_bulk_out_desc);
+		}
+		usb_ept_enable(ctxt->epin,  1);
+		usb_ept_enable(ctxt->epout, 1);
+		ctxt->configured = 1;
+		queue_work(_context.diag_wq, &(_context.usb_config_work));
+	} else {
+		/* all pending requests will be canceled */
+		ctxt->configured = 0;
+		if (ctxt->epin) {
+			usb_ept_fifo_flush(ctxt->epin);
+			usb_ept_enable(ctxt->epin, 0);
+		}
+		if (ctxt->epout) {
+			usb_ept_fifo_flush(ctxt->epout);
+			usb_ept_enable(ctxt->epout, 0);
+		}
+		if ((ctxt->operations) &&
+			(ctxt->operations->diag_disconnect))
+				ctxt->operations->diag_disconnect();
+	}
+
+}
+static struct usb_function usb_func_diag = {
+	.bind = diag_bind,
+	.configure = diag_configure,
+	.unbind = diag_unbind,
+
+
+	.name = "diag",
+	.context = &_context,
+};
+int diag_usb_register(struct diag_operations *func)
+{
+	struct diag_context *ctxt = &_context;
+
+	if (func == NULL) {
+		printk(KERN_ERR "diag_usb_register:registering"
+				"diag char operations NULL\n");
+		return -1;
+	}
+	ctxt->operations = func;
+	if (ctxt->configured == 1)
+		if ((ctxt->operations) &&
+			(ctxt->operations->diag_connect))
+				ctxt->operations->diag_connect();
+	return 0;
+}
+EXPORT_SYMBOL(diag_usb_register);
+
+int diag_usb_unregister(void)
+{
+	struct diag_context *ctxt = &_context;
+
+	ctxt->operations = NULL;
+	return 0;
+}
+EXPORT_SYMBOL(diag_usb_unregister);
+
+int diag_open(int num_req)
+{
+	struct diag_context *ctxt = &_context;
+	struct diag_req_entry *write_entry;
+	struct diag_req_entry *read_entry;
+	int i = 0;
+
+	for (i = 0; i < num_req; i++) {
+		write_entry = diag_alloc_req_entry(ctxt->epin, 0, GFP_KERNEL);
+		if (write_entry) {
+			write_entry->usb_req->complete = diag_write_complete;
+			write_entry->usb_req->device = (void *)ctxt;
+			list_add(&write_entry->re_entry,
+					&ctxt->dev_write_req_list);
+		} else
+			goto write_error;
+	}
+
+	for (i = 0; i < num_req ; i++) {
+		read_entry = diag_alloc_req_entry(ctxt->epout, 0 , GFP_KERNEL);
+		if (read_entry) {
+			read_entry->usb_req->complete = diag_read_complete;
+			read_entry->usb_req->device = (void *)ctxt;
+			list_add(&read_entry->re_entry ,
+					&ctxt->dev_read_req_list);
+		} else
+			goto read_error;
+		}
+	ctxt->diag_opened = 1;
+	return 0;
+read_error:
+	printk(KERN_ERR "%s:read requests allocation failure\n", __func__);
+	while (!list_empty(&ctxt->dev_read_req_list)) {
+		read_entry = list_entry(ctxt->dev_read_req_list.next,
+				struct diag_req_entry, re_entry);
+		list_del(&read_entry->re_entry);
+		diag_free_req_entry(ctxt->epout, read_entry);
+	}
+write_error:
+	printk(KERN_ERR "%s: write requests allocation failure\n", __func__);
+	while (!list_empty(&ctxt->dev_write_req_list)) {
+		write_entry = list_entry(ctxt->dev_write_req_list.next,
+				struct diag_req_entry, re_entry);
+		list_del(&write_entry->re_entry);
+		diag_free_req_entry(ctxt->epin, write_entry);
+	}
+	ctxt->diag_opened = 0;
+	return -ENOMEM;
+}
+EXPORT_SYMBOL(diag_open);
+
+void diag_close(void)
+{
+	struct diag_context *ctxt = &_context;
+	struct diag_req_entry *req_entry;
+	/* free write requests */
+
+	while (!list_empty(&ctxt->dev_write_req_list)) {
+		req_entry = list_entry(ctxt->dev_write_req_list.next,
+				struct diag_req_entry, re_entry);
+		list_del(&req_entry->re_entry);
+		diag_free_req_entry(ctxt->epin, req_entry);
+	}
+
+	/* free read requests */
+	while (!list_empty(&ctxt->dev_read_req_list)) {
+		req_entry = list_entry(ctxt->dev_read_req_list.next,
+				struct diag_req_entry, re_entry);
+		list_del(&req_entry->re_entry);
+		diag_free_req_entry(ctxt->epout, req_entry);
+	}
+	return;
+}
+EXPORT_SYMBOL(diag_close);
+
+static void diag_free_req_entry(struct usb_endpoint *ep,
+		struct diag_req_entry *req)
+{
+	if (ep != NULL && req != NULL) {
+		if (req->usb_req != NULL)
+			usb_ept_free_req(ep, req->usb_req);
+		kfree(req);
+	}
+}
+
+
+static struct diag_req_entry *diag_alloc_req_entry(struct usb_endpoint *ep,
+		unsigned len, gfp_t kmalloc_flags)
+{
+	struct diag_req_entry *req;
+
+	req = kmalloc(sizeof(struct diag_req_entry), kmalloc_flags);
+	if (req == NULL)
+		return ERR_PTR(-ENOMEM);
+
+
+	req->usb_req = usb_ept_alloc_req(ep , 0);
+	if (req->usb_req == NULL) {
+		kfree(req);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	req->usb_req->context = req;
+	return req;
+}
+
+int diag_read(struct diag_request *d_req)
+{
+	unsigned long flags;
+	struct usb_request *req = NULL;
+	struct diag_req_entry *req_entry = NULL;
+	struct diag_context *ctxt = &_context;
+
+
+	if (ctxt->diag_opened != 1)
+		return -EIO;
+	spin_lock_irqsave(&ctxt->dev_lock , flags);
+	if (!list_empty(&ctxt->dev_read_req_list)) {
+		req_entry = list_entry(ctxt->dev_read_req_list.next ,
+				struct diag_req_entry , re_entry);
+		req_entry->diag_request = d_req;
+		req = req_entry->usb_req;
+		list_del(&req_entry->re_entry);
+	}
+	spin_unlock_irqrestore(&ctxt->dev_lock , flags);
+	if (req) {
+		req->buf = d_req->buf;
+		req->length = d_req->length;
+		req->device = ctxt;
+		if (usb_ept_queue_xfer(ctxt->epout, req)) {
+			/* If error add the link to the linked list again. */
+			spin_lock_irqsave(&ctxt->dev_lock , flags);
+			list_add_tail(&req_entry->re_entry ,
+					&ctxt->dev_read_req_list);
+			spin_unlock_irqrestore(&ctxt->dev_lock , flags);
+			printk(KERN_ERR "diag_read:can't queue the request\n");
+			return -EIO;
+		}
+	} else {
+		printk(KERN_ERR
+				"diag_read:no requests avialable\n");
+		return -EIO;
+	}
+	return 0;
+}
+EXPORT_SYMBOL(diag_read);
+
+int diag_write(struct diag_request *d_req)
+{
+	unsigned long flags;
+	struct usb_request *req = NULL;
+	struct diag_req_entry *req_entry = NULL;
+	struct diag_context *ctxt = &_context;
+
+	if (ctxt->diag_opened != 1)
+		return -EIO;
+	spin_lock_irqsave(&ctxt->dev_lock , flags);
+	if (!list_empty(&ctxt->dev_write_req_list)) {
+		req_entry = list_entry(ctxt->dev_write_req_list.next ,
+				struct diag_req_entry , re_entry);
+		req_entry->diag_request = d_req;
+		req = req_entry->usb_req;
+		list_del(&req_entry->re_entry);
+	}
+	spin_unlock_irqrestore(&ctxt->dev_lock, flags);
+	if (req) {
+		req->buf = d_req->buf;
+		req->length = d_req->length;
+		req->device = ctxt;
+		if (usb_ept_queue_xfer(ctxt->epin, req)) {
+			/* If error add the link to linked list again*/
+			spin_lock_irqsave(&ctxt->dev_lock, flags);
+			list_add_tail(&req_entry->re_entry ,
+					&ctxt->dev_write_req_list);
+			spin_unlock_irqrestore(&ctxt->dev_lock, flags);
+			printk(KERN_ERR "diag_write: cannot queue"
+					" read request\n");
+			return -EIO;
+		}
+	} else {
+		printk(KERN_ERR	"diag_write: no requests available\n");
+		return -EIO;
+	}
+	return 0;
+}
+EXPORT_SYMBOL(diag_write);
+
+static void diag_write_complete(struct usb_endpoint *ep ,
+		struct usb_request *req)
+{
+	struct diag_context *ctxt = (struct diag_context *)req->device;
+	struct diag_req_entry *diag_req = req->context;
+	struct diag_request *d_req = (struct diag_request *)
+						diag_req->diag_request;
+	unsigned long flags;
+
+	if (ctxt == NULL) {
+		printk(KERN_ERR "diag_write_complete : requesting"
+				"NULL device pointer\n");
+		return;
+	}
+	if (req->status == WRITE_COMPLETE) {
+		if ((req->length >= ep->max_pkt) &&
+				((req->length % ep->max_pkt) == 0)) {
+			req->length = 0;
+			req->device = ctxt;
+			d_req->actual = req->actual;
+			d_req->status = req->status;
+			/* Queue zero length packet */
+			usb_ept_queue_xfer(ctxt->epin, req);
+			return;
+		}
+			/* normal completion*/
+		spin_lock_irqsave(&ctxt->dev_lock, flags);
+		list_add_tail(&diag_req->re_entry ,
+				&ctxt->dev_write_req_list);
+		if (req->length != 0) {
+			d_req->actual = req->actual;
+			d_req->status = req->status;
+		}
+		spin_unlock_irqrestore(&ctxt->dev_lock , flags);
+		if ((ctxt->operations) &&
+			(ctxt->operations->diag_char_write_complete))
+				ctxt->operations->diag_char_write_complete(
+					d_req);
+	} else {
+		spin_lock_irqsave(&ctxt->dev_lock, flags);
+		list_add_tail(&diag_req->re_entry ,
+			&ctxt->dev_write_req_list);
+		d_req->actual = req->actual;
+		d_req->status = req->status;
+		spin_unlock_irqrestore(&ctxt->dev_lock , flags);
+		if ((ctxt->operations) &&
+			(ctxt->operations->diag_char_write_complete))
+				ctxt->operations->diag_char_write_complete(
+					d_req);
+	}
+}
+static void diag_read_complete(struct usb_endpoint *ep ,
+		struct usb_request *req)
+{
+	 struct diag_context *ctxt = (struct diag_context *)req->device;
+	 struct diag_req_entry *diag_req = req->context;
+	 struct diag_request *d_req = (struct diag_request *)
+							diag_req->diag_request;
+	 unsigned long flags;
+
+	if (ctxt == NULL) {
+		printk(KERN_ERR "diag_read_complete : requesting"
+				"NULL device pointer\n");
+		return;
+	}
+	if (req->status == READ_COMPLETE) {
+			/* normal completion*/
+		spin_lock_irqsave(&ctxt->dev_lock, flags);
+		list_add_tail(&diag_req->re_entry ,
+				&ctxt->dev_read_req_list);
+		d_req->actual = req->actual;
+		d_req->status = req->status;
+		spin_unlock_irqrestore(&ctxt->dev_lock, flags);
+		if ((ctxt->operations) &&
+			(ctxt->operations->diag_char_read_complete))
+				ctxt->operations->diag_char_read_complete(
+					d_req);
+	} else {
+		spin_lock_irqsave(&ctxt->dev_lock, flags);
+		list_add_tail(&diag_req->re_entry ,
+				&ctxt->dev_read_req_list);
+		d_req->actual = req->actual;
+		d_req->status = req->status;
+		spin_unlock_irqrestore(&ctxt->dev_lock, flags);
+		if ((ctxt->operations) &&
+			(ctxt->operations->diag_char_read_complete))
+				ctxt->operations->diag_char_read_complete(
+					d_req);
+	}
+}
+void usb_config_work_func(struct work_struct *work)
+{
+	struct diag_context *ctxt = &_context;
+	if ((ctxt->operations) &&
+		(ctxt->operations->diag_connect))
+			ctxt->operations->diag_connect();
+}
+
+struct usb_descriptor_header *diag_hs_descriptors[4];
+struct usb_descriptor_header *diag_fs_descriptors[4];
+
+static int __init diag_init(void)
+{
+	int r;
+	struct diag_context *ctxt = &_context;
+
+	diag_hs_descriptors[0] = (struct usb_descriptor_header *)&intf_desc;
+	diag_hs_descriptors[1] =
+		(struct usb_descriptor_header *)&hs_bulk_in_desc;
+	diag_hs_descriptors[2] =
+		(struct usb_descriptor_header *)&hs_bulk_out_desc;
+	diag_hs_descriptors[3] = NULL;
+
+	diag_fs_descriptors[0] = (struct usb_descriptor_header *)&intf_desc;
+	diag_fs_descriptors[1] =
+		(struct usb_descriptor_header *)&fs_bulk_in_desc;
+	diag_fs_descriptors[2] =
+		(struct usb_descriptor_header *)&fs_bulk_out_desc;
+	diag_fs_descriptors[3] = NULL;
+	INIT_LIST_HEAD(&ctxt->dev_read_req_list);
+	INIT_LIST_HEAD(&ctxt->dev_write_req_list);
+	ctxt->diag_wq  = create_singlethread_workqueue("diag");
+	if (ctxt->diag_wq == NULL)
+		return -1;
+	INIT_WORK(&_context.usb_config_work , usb_config_work_func);
+
+	usb_func_diag.hs_descriptors = diag_hs_descriptors;
+	usb_func_diag.fs_descriptors = diag_fs_descriptors;
+	spin_lock_init(&_context.dev_lock);
+	r = usb_function_register(&usb_func_diag);
+	if (r < 0)
+		destroy_workqueue(ctxt->diag_wq);
+	return r;
+}
+
+module_init(diag_init);
+static void __exit diag_exit(void)
+{
+	struct diag_context *ctxt = &_context;
+	if (!ctxt)
+		return;
+	if (!ctxt)
+		BUG_ON(1);
+
+	usb_function_unregister(&usb_func_diag);
+	destroy_workqueue(ctxt->diag_wq);
+}
+module_exit(diag_exit);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/function/ether.c b/drivers/usb/function/ether.c
new file mode 100644
index 0000000..f31032e9
--- /dev/null
+++ b/drivers/usb/function/ether.c
@@ -0,0 +1,327 @@
+/* drivers/usb/function/ether.c
+ *
+ * Simple Ethernet Function Device
+ *
+ * Copyright (C) 2008 Google, Inc.
+ * Author: Brian Swetland <swetland@google.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * Implements the "cdc_subset" bulk-only protocol supported by Linux.
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+
+#include "usb_function.h"
+
+/* Ethernet frame is 1514 + FCS, but round up to 512 * 3 so we
+ * always queue a multiple of the USB max packet size (64 or 512)
+ */
+#define USB_MTU 1536
+
+#define MAX_TX 8
+#define MAX_RX 8
+
+struct ether_context {
+	spinlock_t lock;
+	struct net_device *dev;
+	struct usb_endpoint *out;
+	struct usb_endpoint *in;
+
+	struct list_head rx_reqs;
+	struct list_head tx_reqs;
+
+	struct net_device_stats stats;
+};
+
+static int ether_queue_out(struct ether_context *ctxt,
+			   struct usb_request *req);
+static void ether_in_complete(struct usb_endpoint *ept,
+			      struct usb_request *req);
+static void ether_out_complete(struct usb_endpoint *ept,
+			       struct usb_request *req);
+
+static void ether_bind(struct usb_endpoint **ept, void *_ctxt)
+{
+	struct ether_context *ctxt = _ctxt;
+	struct usb_request *req;
+	unsigned long flags;
+	int n;
+
+	ctxt->out = ept[0];
+	ctxt->in = ept[1];
+
+	for (n = 0; n < MAX_RX; n++) {
+		req = usb_ept_alloc_req(ctxt->out, 0);
+		if (!req)
+			break;
+		req->complete = ether_out_complete;
+		spin_lock_irqsave(&ctxt->lock, flags);
+		list_add_tail(&req->list, &ctxt->rx_reqs);
+		spin_unlock_irqrestore(&ctxt->lock, flags);
+	}
+	for (n = 0; n < MAX_TX; n++) {
+		req = usb_ept_alloc_req(ctxt->in, 0);
+		if (!req)
+			break;
+		req->complete = ether_in_complete;
+		spin_lock_irqsave(&ctxt->lock, flags);
+		list_add_tail(&req->list, &ctxt->tx_reqs);
+		spin_unlock_irqrestore(&ctxt->lock, flags);
+	}
+}
+
+static void ether_in_complete(struct usb_endpoint *ept,
+			      struct usb_request *req)
+{
+	unsigned long flags;
+	struct sk_buff *skb = req->context;
+	struct ether_context *ctxt = *((void **) skb->cb);
+
+	if (req->status == 0) {
+		ctxt->stats.tx_packets++;
+		ctxt->stats.tx_bytes += req->actual;
+	} else {
+		ctxt->stats.tx_errors++;
+	}
+
+	dev_kfree_skb_any(skb);
+
+	spin_lock_irqsave(&ctxt->lock, flags);
+	if (list_empty(&ctxt->tx_reqs))
+		netif_start_queue(ctxt->dev);
+	list_add_tail(&req->list, &ctxt->tx_reqs);
+	spin_unlock_irqrestore(&ctxt->lock, flags);
+}
+
+static void ether_out_complete(struct usb_endpoint *ept,
+			       struct usb_request *req)
+{
+	struct sk_buff *skb = req->context;
+	struct ether_context *ctxt = *((void **) skb->cb);
+
+	if (req->status == 0) {
+		skb_put(skb, req->actual);
+		skb->protocol = eth_type_trans(skb, ctxt->dev);
+		ctxt->stats.rx_packets++;
+		ctxt->stats.rx_bytes += req->actual;
+		netif_rx(skb);
+	} else {
+		dev_kfree_skb_any(skb);
+		ctxt->stats.rx_errors++;
+	}
+
+	/* don't bother requeuing if we just went offline */
+	if (req->status == -ENODEV) {
+		unsigned long flags;
+		spin_lock_irqsave(&ctxt->lock, flags);
+		list_add_tail(&req->list, &ctxt->rx_reqs);
+		spin_unlock_irqrestore(&ctxt->lock, flags);
+	} else {
+		if (ether_queue_out(ctxt, req))
+			pr_err("ether_out: cannot requeue\n");
+	}
+}
+
+static int ether_queue_out(struct ether_context *ctxt,
+			   struct usb_request *req)
+{
+	unsigned long flags;
+	struct sk_buff *skb;
+	int ret;
+
+	skb = alloc_skb(USB_MTU + NET_IP_ALIGN, GFP_ATOMIC);
+	if (!skb) {
+		pr_err("ether_queue_out: failed to alloc skb\n");
+		ret = -ENOMEM;
+		goto fail;
+	}
+
+	skb_reserve(skb, NET_IP_ALIGN);
+
+	*((void **) skb->cb) = ctxt;
+	req->buf = skb->data;
+	req->length = USB_MTU;
+	req->context = skb;
+
+	ret = usb_ept_queue_xfer(ctxt->out, req);
+	if (ret) {
+fail:
+		spin_lock_irqsave(&ctxt->lock, flags);
+		list_add_tail(&req->list, &ctxt->rx_reqs);
+		spin_unlock_irqrestore(&ctxt->lock, flags);
+	}
+
+	return ret;
+}
+
+static void ether_configure(int configured, void *_ctxt)
+{
+	unsigned long flags;
+	struct ether_context *ctxt = _ctxt;
+	struct usb_request *req;
+
+	pr_info("ether_configure() %d\n", configured);
+
+	if (configured) {
+		/* we're online -- get all rx requests queued */
+		for (;;) {
+			spin_lock_irqsave(&ctxt->lock, flags);
+			if (list_empty(&ctxt->rx_reqs)) {
+				req = 0;
+			} else {
+				req = list_first_entry(&ctxt->rx_reqs,
+						       struct usb_request,
+						       list);
+				list_del(&req->list);
+			}
+			spin_unlock_irqrestore(&ctxt->lock, flags);
+			if (!req)
+				break;
+			if (ether_queue_out(ctxt, req))
+				break;
+		}
+	} else {
+		/* all pending requests will be canceled */
+	}
+}
+
+static struct usb_function usb_func_ether = {
+	.bind = ether_bind,
+	.configure = ether_configure,
+
+	.name = "ether",
+
+	.ifc_class = 0x02,
+	.ifc_subclass = 0x0a,
+	.ifc_protocol = 0x00,
+
+	.ifc_name = "ether",
+
+	.ifc_ept_count = 2,
+	.ifc_ept_type = { EPT_BULK_OUT, EPT_BULK_IN },
+};
+
+static int usb_ether_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+	struct ether_context *ctxt = netdev_priv(dev);
+	struct usb_request *req;
+	unsigned long flags;
+	unsigned len;
+
+	spin_lock_irqsave(&ctxt->lock, flags);
+	if (list_empty(&ctxt->tx_reqs)) {
+		req = 0;
+	} else {
+		req = list_first_entry(&ctxt->tx_reqs,
+				       struct usb_request, list);
+		list_del(&req->list);
+		if (list_empty(&ctxt->tx_reqs))
+			netif_stop_queue(dev);
+	}
+	spin_unlock_irqrestore(&ctxt->lock, flags);
+
+	if (!req) {
+		pr_err("usb_ether_xmit: could not obtain tx request\n");
+		return 1;
+	}
+
+	/* ensure that we end with a short packet */
+	len = skb->len;
+	if (!(len & 63) || !(len & 511))
+		len++;
+
+	*((void **) skb->cb) = ctxt;
+	req->context = skb;
+	req->buf = skb->data;
+	req->length = len;
+
+	if (usb_ept_queue_xfer(ctxt->in, req)) {
+		spin_lock_irqsave(&ctxt->lock, flags);
+		list_add_tail(&req->list, &ctxt->tx_reqs);
+		netif_start_queue(dev);
+		spin_unlock_irqrestore(&ctxt->lock, flags);
+
+		dev_kfree_skb_any(skb);
+		ctxt->stats.tx_dropped++;
+
+		pr_err("usb_ether_xmit: could not queue tx request\n");
+	}
+
+	return 0;
+}
+
+static int usb_ether_open(struct net_device *dev)
+{
+	return 0;
+}
+
+static int usb_ether_stop(struct net_device *dev)
+{
+	return 0;
+}
+
+static struct net_device_stats *usb_ether_get_stats(struct net_device *dev)
+{
+	struct ether_context *ctxt = netdev_priv(dev);
+	return &ctxt->stats;
+}
+
+static void __init usb_ether_setup(struct net_device *dev)
+{
+	struct ether_context *ctxt = netdev_priv(dev);
+
+	pr_info("usb_ether_setup()\n");
+
+	INIT_LIST_HEAD(&ctxt->rx_reqs);
+	INIT_LIST_HEAD(&ctxt->tx_reqs);
+	spin_lock_init(&ctxt->lock);
+	ctxt->dev = dev;
+
+	dev->open = usb_ether_open;
+	dev->stop = usb_ether_stop;
+	dev->hard_start_xmit = usb_ether_xmit;
+	dev->get_stats = usb_ether_get_stats;
+	dev->watchdog_timeo = 20;
+
+	ether_setup(dev);
+
+	random_ether_addr(dev->dev_addr);
+}
+
+static int __init ether_init(void)
+{
+	struct net_device *dev;
+	int ret;
+
+	dev = alloc_netdev(sizeof(struct ether_context),
+			   "usb%d", usb_ether_setup);
+	if (!dev)
+		return -ENOMEM;
+
+	ret = register_netdev(dev);
+	if (ret) {
+		free_netdev(dev);
+	} else {
+		struct ether_context *ctxt = netdev_priv(dev);
+		usb_func_ether.context = ctxt;
+		usb_function_register(&usb_func_ether);
+	}
+	return ret;
+}
+
+module_init(ether_init);
diff --git a/drivers/usb/function/ether_cdc_ecm.c b/drivers/usb/function/ether_cdc_ecm.c
new file mode 100644
index 0000000..8fa5af1
--- /dev/null
+++ b/drivers/usb/function/ether_cdc_ecm.c
@@ -0,0 +1,1337 @@
+/*
+ * ether_cdc_ecm.c -- Ethernet Function driver, with CDC
+ *
+ * Copyright (c) 2008-2009, Code Aurora Forum. All rights reserved.
+ *
+ * This file has been derived from gadget/ether.c
+ *
+ * Copyright (C) 2003-2005 David Brownell
+ * Copyright (C) 2003-2004 Robert Schwebel, Benedikt Spranger
+ *
+ * All source code in this file is licensed under the following license except
+ * where indicated.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * See the GNU General Public License for more details.
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can find it at http://www.fsf.org
+ */
+
+/* #define VERBOSE_DEBUG */
+
+#include <linux/kernel.h>
+#include <linux/utsname.h>
+#include <linux/device.h>
+#include <linux/ctype.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+
+#include <linux/usb/ch9.h>
+#include <linux/usb/cdc.h>
+
+#include "usb_function.h"
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * Ethernet function driver -- with CDC options
+ * Builds on hardware support for a full duplex link.
+ *
+ * CDC Ethernet is the standard USB solution for sending Ethernet frames
+ * using USB.  Real hardware tends to use the same framing protocol but look
+ * different for control features.  This driver strongly prefers to use
+ * this USB-IF standard as its open-systems interoperability solution;
+ * most host side USB stacks (except from Microsoft) support it.
+ */
+
+#define DRIVER_DESC		"Ethernet Function CDC ECM"
+#define DRIVER_VERSION		"1.0"
+
+static const char shortname[] = "ether";
+static const char driver_desc[] = DRIVER_DESC;
+
+static unsigned int string_data;
+static unsigned int string_control;
+static unsigned int string_ethaddr;
+#define RX_EXTRA	20		/* guard against rx overflows */
+
+
+
+/* outgoing packet filters. */
+#define	DEFAULT_FILTER	(USB_CDC_PACKET_TYPE_BROADCAST \
+			| USB_CDC_PACKET_TYPE_ALL_MULTICAST \
+			| USB_CDC_PACKET_TYPE_PROMISCUOUS \
+			| USB_CDC_PACKET_TYPE_DIRECTED)
+
+/*-------------------------------------------------------------------------*/
+
+struct eth_dev {
+	spinlock_t		lock;
+	struct usb_request	*req;		/* for control responses */
+	struct usb_request	*stat_req;	/* for cdc status */
+
+	unsigned		configured:1;
+	struct usb_endpoint	*in_ep, *out_ep, *status_ep;
+
+	spinlock_t		req_lock;
+	struct list_head	tx_reqs, rx_reqs;
+
+	struct net_device	*net;
+	struct net_device_stats	stats;
+	atomic_t		tx_qlen;
+
+	struct work_struct	work;
+	unsigned		zlp:1;
+	unsigned		suspended:1;
+	u16			cdc_filter;
+	unsigned long		todo;
+#define	WORK_RX_MEMORY		0
+	u8			host_mac[ETH_ALEN];
+
+	int alt_set;
+};
+
+static struct usb_function usb_func_ether;
+
+/* Ethernet function descriptors */
+#define USB_DT_IAD_SIZE		8
+struct usb_interface_assoc_descriptor	eth_IAD = {
+	.bLength           = USB_DT_IAD_SIZE,
+	.bDescriptorType   = USB_DT_INTERFACE_ASSOCIATION,
+	.bInterfaceCount   = 2,
+	.bFunctionClass    = USB_CLASS_COMM,
+	.bFunctionSubClass = USB_CDC_SUBCLASS_ETHERNET,
+	.bFunctionProtocol = USB_CDC_PROTO_NONE,
+	.iFunction         = 0,
+};
+
+struct usb_interface_descriptor		eth_control_intf = {
+	.bLength =  USB_DT_INTERFACE_SIZE,
+	.bDescriptorType =	USB_DT_INTERFACE,
+	.bNumEndpoints =	1,
+	.bInterfaceClass =	USB_CLASS_COMM,
+	.bInterfaceSubClass =	USB_CDC_SUBCLASS_ETHERNET,
+	.bInterfaceProtocol =	USB_CDC_PROTO_NONE,
+};
+
+struct usb_cdc_header_desc		eth_header_desc = {
+	.bLength =		sizeof(struct usb_cdc_header_desc),
+	.bDescriptorType =	USB_DT_CS_INTERFACE,
+	.bDescriptorSubType =	USB_CDC_HEADER_TYPE,
+	.bcdCDC =		__constant_cpu_to_le16(0x0110),
+};
+
+struct usb_cdc_union_desc		eth_union_desc = {
+	.bLength =		sizeof(struct usb_cdc_union_desc),
+	.bDescriptorType =	USB_DT_CS_INTERFACE,
+	.bDescriptorSubType =	USB_CDC_UNION_TYPE,
+};
+
+struct usb_cdc_ether_desc 		eth_ether_desc = {
+	.bLength =		sizeof(struct usb_cdc_ether_desc),
+	.bDescriptorType =	USB_DT_CS_INTERFACE,
+	.bDescriptorSubType =	USB_CDC_ETHERNET_TYPE,
+	/* this descriptor actually adds value, surprise! */
+	.bmEthernetStatistics =	__constant_cpu_to_le32(0), /* no statistics */
+	.wMaxSegmentSize =	__constant_cpu_to_le16(ETH_FRAME_LEN),
+	.wNumberMCFilters =	__constant_cpu_to_le16(0),
+	.bNumberPowerFilters =	0,
+};
+
+struct usb_endpoint_descriptor 		eth_control_intf_hs_int_in_ep_desc = {
+	.bDescriptorType =      USB_DT_ENDPOINT,
+	.bLength =              USB_DT_ENDPOINT_SIZE,
+	.bEndpointAddress =     USB_DIR_IN,
+	.bmAttributes =         USB_ENDPOINT_XFER_INT,
+	.bInterval =           4,
+	.wMaxPacketSize =       64,
+};
+
+struct usb_endpoint_descriptor 		eth_control_intf_fs_int_in_ep_desc = {
+	.bDescriptorType =      USB_DT_ENDPOINT,
+	.bLength =              USB_DT_ENDPOINT_SIZE,
+	.bEndpointAddress =     USB_DIR_IN,
+	.bmAttributes =         USB_ENDPOINT_XFER_INT,
+	.bInterval =           4,
+	.wMaxPacketSize =       64,
+};
+
+struct usb_interface_descriptor 	eth_data_alt_zero_intf = {
+	.bLength =  USB_DT_INTERFACE_SIZE,
+	.bDescriptorType =      USB_DT_INTERFACE,
+	.bAlternateSetting =    0,
+	.bNumEndpoints =        0,
+	.bInterfaceClass =      USB_CLASS_CDC_DATA,
+	.bInterfaceSubClass =   0,
+	.bInterfaceProtocol =   0,
+};
+
+struct usb_interface_descriptor 	eth_data_alt_one_intf = {
+	.bLength =              USB_DT_INTERFACE_SIZE,
+	.bDescriptorType =      USB_DT_INTERFACE,
+	.bAlternateSetting =    1,
+	.bNumEndpoints =        2,
+	.bInterfaceClass =      USB_CLASS_CDC_DATA ,
+	.bInterfaceSubClass =   0,
+	.bInterfaceProtocol =   USB_CDC_PROTO_NONE,
+};
+
+struct usb_endpoint_descriptor 		eth_data_intf_hs_bulk_out_ep_desc = {
+	.bDescriptorType =      USB_DT_ENDPOINT,
+	.bLength =              USB_DT_ENDPOINT_SIZE,
+	.bEndpointAddress =     USB_DIR_OUT,
+	.bmAttributes =         USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize =       __constant_cpu_to_le16(512),
+};
+
+struct usb_endpoint_descriptor 		eth_data_intf_fs_bulk_out_ep_desc = {
+	.bDescriptorType =      USB_DT_ENDPOINT,
+	.bLength =              USB_DT_ENDPOINT_SIZE,
+	.bEndpointAddress =     USB_DIR_OUT,
+	.bmAttributes =         USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize =       __constant_cpu_to_le16(64),
+};
+
+struct usb_endpoint_descriptor 		eth_data_intf_hs_bulk_in_ep_desc = {
+	.bDescriptorType =      USB_DT_ENDPOINT,
+	.bLength =              USB_DT_ENDPOINT_SIZE,
+	.bEndpointAddress =     USB_DIR_IN,
+	.bmAttributes =         USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize =       __constant_cpu_to_le16(512),
+};
+
+struct usb_endpoint_descriptor 		eth_data_intf_fs_bulk_in_ep_desc = {
+	.bDescriptorType =      USB_DT_ENDPOINT,
+	.bLength =              USB_DT_ENDPOINT_SIZE,
+	.bEndpointAddress =     USB_DIR_IN,
+	.bmAttributes =         USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize =       __constant_cpu_to_le16(64),
+};
+
+struct eth_dev *eth_device;
+
+/* Some systems will want different product identifers published in the
+ * device descriptor, either numbers or strings or both.  These string
+ * parameters are in UTF-8 (superset of ASCII's 7 bit characters).
+ */
+
+
+/* initial value, changed by "ifconfig usb0 hw ether xx:xx:xx:xx:xx:xx" */
+static char *dev_addr;
+module_param(dev_addr, charp, S_IRUGO);
+MODULE_PARM_DESC(dev_addr, "Device Ethernet Address");
+
+/* this address is invisible to ifconfig */
+static char *host_addr;
+module_param(host_addr, charp, S_IRUGO);
+MODULE_PARM_DESC(host_addr, "Host Ethernet Address");
+
+static char ethaddr[2 * ETH_ALEN + 1];
+static int eth_bound;
+
+#define DEFAULT_QLEN	2	/* double buffering by default */
+
+/* peak bulk transfer bits-per-second */
+#define	HS_BPS		(13 * 512 * 8 * 1000 * 8)
+
+/* for dual-speed hardware, use deeper queues at highspeed */
+#define qlen (DEFAULT_QLEN * 5) /* High Speed */
+
+/*-------------------------------------------------------------------------*/
+
+#define xprintk(d, level, fmt, args...) \
+	printk(level "%s: " fmt, (d)->net->name, ## args)
+
+#ifdef DEBUG
+#undef DEBUG
+#define DEBUG(dev, fmt, args...) \
+	xprintk(dev, KERN_DEBUG, fmt, ## args)
+#else
+#define DEBUG(dev, fmt, args...) \
+	do { } while (0)
+#endif /* DEBUG */
+
+#ifdef VERBOSE_DEBUG
+#define VDEBUG	DEBUG
+#else
+#define VDEBUG(dev, fmt, args...) \
+	do { } while (0)
+#endif /* DEBUG */
+
+#define ERROR(dev, fmt, args...) \
+	xprintk(dev, KERN_ERR, fmt, ## args)
+#ifdef WARN
+#undef WARN
+#endif
+#define WARN(dev, fmt, args...) \
+	xprintk(dev, KERN_WARNING, fmt, ## args)
+#define INFO(dev, fmt, args...) \
+	xprintk(dev, KERN_INFO, fmt, ## args)
+
+/*-------------------------------------------------------------------------*/
+
+/* include the status endpoint if we can, even where it's optional.
+ * use wMaxPacketSize big enough to fit CDC_NOTIFY_SPEED_CHANGE in one
+ * packet, to simplify cancellation; and a big transfer interval, to
+ * waste less bandwidth.
+ *
+ * some drivers (like Linux 2.4 cdc-ether!) "need" it to exist even
+ * if they ignore the connect/disconnect notifications that real ether
+ * can provide.  more advanced cdc configurations might want to support
+ * encapsulated commands (vendor-specific, using control-OUT).
+ */
+#define STATUS_BYTECOUNT		16	/* 8 byte header + data */
+
+
+static void eth_start(struct eth_dev *dev, gfp_t gfp_flags);
+static int alloc_requests(struct eth_dev *dev, unsigned n, gfp_t gfp_flags);
+
+static int set_ether_config(struct eth_dev *dev, gfp_t gfp_flags)
+{
+	int result = 0;
+
+	if (dev->status_ep)
+		usb_ept_enable(dev->status_ep, 1);
+
+	result = alloc_requests(dev, qlen , gfp_flags);
+	if (result == 0)
+		DEBUG(dev, "qlen %d\n", qlen);
+
+	/* caller is responsible for cleanup on error */
+	return result;
+}
+
+static void eth_reset_config(struct eth_dev *dev)
+{
+	struct usb_request	*req;
+	unsigned long  flags;
+
+	DEBUG(dev, "%s\n", __func__);
+
+	if (!dev)
+		return;
+	if (!dev->net)
+		return;
+
+	if (dev->configured == 0)
+		return;
+	netif_stop_queue(dev->net);
+	netif_carrier_off(dev->net);
+
+	/* disable endpoints, forcing (synchronous) completion of
+	 * pending i/o.  then free the requests.
+	 */
+	if (dev->in_ep) {
+		usb_ept_enable(dev->in_ep, 0);
+		spin_lock_irqsave(&dev->req_lock, flags);
+		while (likely(!list_empty(&dev->tx_reqs))) {
+			req = container_of(dev->tx_reqs.next,
+						struct usb_request, list);
+			list_del(&req->list);
+			spin_unlock_irqrestore(&dev->req_lock, flags);
+			usb_ept_free_req(dev->in_ep, req);
+			spin_lock_irqsave(&dev->req_lock, flags);
+		}
+		spin_unlock_irqrestore(&dev->req_lock, flags);
+	}
+	if (dev->out_ep) {
+		usb_ept_enable(dev->out_ep, 0);
+		spin_lock_irqsave(&dev->req_lock, flags);
+		while (likely(!list_empty(&dev->rx_reqs))) {
+			req = container_of(dev->rx_reqs.next,
+						struct usb_request, list);
+			list_del(&req->list);
+			spin_unlock_irqrestore(&dev->req_lock, flags);
+			usb_ept_free_req(dev->out_ep, req);
+			spin_lock_irqsave(&dev->req_lock, flags);
+		}
+		spin_unlock_irqrestore(&dev->req_lock, flags);
+	}
+
+	if (dev->status_ep)
+		usb_ept_free_req(dev->status_ep, 0);
+	dev->cdc_filter = 0;
+	dev->configured = 0;
+}
+
+/* change our operational config.  must agree with the code
+ * that returns config descriptors, and altsetting code.
+ */
+static int eth_set_config(struct eth_dev *dev,  gfp_t gfp_flags)
+{
+	int result = 0;
+
+	eth_reset_config(dev);
+	result = set_ether_config(dev, gfp_flags);
+	if (result)
+		eth_reset_config(dev);
+	else
+		dev->configured = 1;
+	return result;
+}
+
+static void eth_configure(int configured, void *_ctxt)
+{
+	int                     result = 0;
+	struct eth_dev *dev = (struct eth_dev *) _ctxt;
+	if (!dev)
+		return ;
+	if (!eth_bound)
+		return;
+
+	if (!configured) {
+		eth_reset_config(dev);
+		return ;
+	}
+	if (dev->configured == 1)
+		return ;
+	if (usb_msm_get_speed() == USB_SPEED_HIGH) {
+		usb_configure_endpoint(dev->status_ep,
+					&eth_control_intf_hs_int_in_ep_desc);
+		usb_configure_endpoint(dev->in_ep,
+					&eth_data_intf_hs_bulk_in_ep_desc);
+		usb_configure_endpoint(dev->out_ep,
+					&eth_data_intf_hs_bulk_out_ep_desc);
+	} else {
+		usb_configure_endpoint(dev->status_ep,
+					&eth_control_intf_fs_int_in_ep_desc);
+		usb_configure_endpoint(dev->in_ep,
+					&eth_data_intf_fs_bulk_in_ep_desc);
+		usb_configure_endpoint(dev->out_ep,
+					&eth_data_intf_fs_bulk_out_ep_desc);
+	}
+	result = eth_set_config(dev, GFP_ATOMIC);
+}
+/* The interrupt endpoint is used in CDC networking models (Ethernet, ATM)
+ * only to notify the host about link status changes (which we support)
+ * Since we want this CDC Ethernet code to be vendor-neutral, only one
+ * status request is ever queued.
+ */
+
+static void
+eth_status_complete(struct usb_endpoint *ep, struct usb_request *req)
+{
+	struct usb_cdc_notification	*event = req->buf;
+	int				value = req->status;
+
+	/* issue the second notification if host reads the first */
+	if (event->bNotificationType == USB_CDC_NOTIFY_NETWORK_CONNECTION
+			&& value == 0) {
+		__le32	*data = req->buf + sizeof *event;
+
+		event->bmRequestType = 0xA1;
+		event->bNotificationType = USB_CDC_NOTIFY_SPEED_CHANGE;
+		event->wValue = __constant_cpu_to_le16(0);
+		event->wIndex =	__constant_cpu_to_le16(
+				eth_data_alt_one_intf.bInterfaceNumber);
+		event->wLength = __constant_cpu_to_le16(8);
+
+		/* SPEED_CHANGE data is up/down speeds in bits/sec */
+		data[0] = data[1] = cpu_to_le32(HS_BPS);
+
+		req->length = STATUS_BYTECOUNT;
+		value = usb_ept_queue_xfer(ep, req);
+		DEBUG(dev, "send SPEED_CHANGE --> %d\n", value);
+		if (value == 0)
+			return;
+	} else if (value != -ECONNRESET)
+		DEBUG(dev, "event %02x --> %d\n",
+			event->bNotificationType, value);
+	req->context = NULL;
+}
+
+static void issue_start_status(struct eth_dev *dev)
+{
+	struct usb_request		*req = dev->stat_req;
+	struct usb_cdc_notification	*event;
+	int				value;
+
+	DEBUG(dev, "%s, flush old status first\n", __func__);
+
+	/* flush old status
+	 *
+	 * FIXME ugly idiom, maybe we'd be better with just
+	 * a "cancel the whole queue" primitive since any
+	 * unlink-one primitive has way too many error modes.
+	 * here, we "know" toggle is already clear...
+	 *
+	 * FIXME iff req->context != null just dequeue it
+	 */
+	usb_ept_enable(dev->status_ep,  0);
+	usb_ept_enable(dev->status_ep, 1);
+
+	/* 3.8.1 says to issue first NETWORK_CONNECTION, then
+	 * a SPEED_CHANGE.  could be useful in some configs.
+	 */
+	event = req->buf;
+	event->bmRequestType = 0xA1;
+	event->bNotificationType = USB_CDC_NOTIFY_NETWORK_CONNECTION;
+	event->wValue = __constant_cpu_to_le16(1);	/* connected */
+	event->wIndex = __constant_cpu_to_le16(
+				eth_data_alt_one_intf.bInterfaceNumber);
+	event->wLength = 0;
+
+	req->length = sizeof *event;
+	req->complete = eth_status_complete;
+	req->context = dev;
+
+	value = usb_ept_queue_xfer(dev->status_ep, req);
+	if (value < 0)
+		DEBUG(dev, "status buf queue --> %d\n", value);
+}
+
+static int  eth_set_interface(int  wIndex, int wValue, void *_ctxt)
+{
+	struct eth_dev *dev = eth_device;
+	unsigned long		flags;
+
+	if (dev == NULL)
+		return 1;
+
+	if ((wIndex == eth_data_alt_one_intf.bInterfaceNumber)
+			&& (wValue == 1)) {
+		dev->alt_set = 1;
+		usb_ept_enable(dev->in_ep, 1);
+		usb_ept_enable(dev->out_ep, 1);
+		dev->cdc_filter = DEFAULT_FILTER;
+		netif_carrier_on(dev->net);
+		issue_start_status(dev);
+		if (netif_running(dev->net)) {
+			spin_lock_irqsave(&dev->lock, flags);
+			eth_start(dev, GFP_ATOMIC);
+			spin_unlock_irqrestore(&dev->lock, flags);
+		}
+	} else {
+		dev->alt_set = 0;
+		netif_stop_queue(dev->net);
+		netif_carrier_off(dev->net);
+	}
+	return 0;
+}
+
+static int eth_get_interface(int wIndex, void *_ctxt)
+{
+	struct eth_dev *dev = eth_device;
+
+	return dev->alt_set;
+}
+
+/*
+ * The setup() callback implements all the ep0 functionality that's not
+ * handled lower down.  CDC has a number of less-common features:
+ *
+ *  - class-specific descriptors for the control interface
+ *  - class-specific control requests
+ */
+static int
+eth_setup(struct usb_ctrlrequest *ctrl, void *buf, int len, void *_ctxt)
+{
+	struct eth_dev	*dev = (struct eth_dev *) _ctxt;
+	int		value = -EOPNOTSUPP;
+	u16		wIndex = le16_to_cpu(ctrl->wIndex);
+	u16		wValue = le16_to_cpu(ctrl->wValue);
+	u16		wLength = le16_to_cpu(ctrl->wLength);
+	u16		data_int = eth_data_alt_one_intf.bInterfaceNumber;
+	u16		ctrl_int = eth_control_intf.bInterfaceNumber;
+	switch (ctrl->bRequest) {
+	case USB_CDC_SET_ETHERNET_PACKET_FILTER:
+		/* see 6.2.30: no data, wIndex = interface,
+		 * wValue = packet filter bitmap
+		 */
+		if (ctrl->bRequestType != (USB_TYPE_CLASS|USB_RECIP_INTERFACE)
+			|| wLength != 0
+			|| ((wIndex != data_int) && (wIndex != ctrl_int)))
+			break;
+		DEBUG(dev, "packet filter %02x\n", wValue);
+		dev->cdc_filter = wValue;
+		value = 0;
+		break;
+
+	/* and potentially:
+	 * case USB_CDC_SET_ETHERNET_MULTICAST_FILTERS:
+	 * case USB_CDC_SET_ETHERNET_PM_PATTERN_FILTER:
+	 * case USB_CDC_GET_ETHERNET_PM_PATTERN_FILTER:
+	 * case USB_CDC_GET_ETHERNET_STATISTIC:
+	 */
+
+	default:
+		VDEBUG(dev,
+			"unknown control req%02x.%02x v%04x i%04x l%d\n",
+			ctrl->bRequestType, ctrl->bRequest,
+			wValue, wIndex, wLength);
+	}
+	return value;
+}
+
+
+static void eth_disconnect(void *_ctxt)
+{
+	struct eth_dev		*dev = (struct eth_dev *) _ctxt;
+	unsigned long		flags;
+
+	printk(KERN_INFO "eth_disconnect()\n");
+	spin_lock_irqsave(&dev->lock, flags);
+	netif_stop_queue(dev->net);
+	netif_carrier_off(dev->net);
+	eth_reset_config(dev);
+	spin_unlock_irqrestore(&dev->lock, flags);
+}
+
+/*-------------------------------------------------------------------------*/
+
+/* NETWORK DRIVER HOOKUP (to the layer above this driver) */
+
+static int usb_eth_change_mtu(struct net_device *net, int new_mtu)
+{
+	struct eth_dev	*dev = netdev_priv(net);
+
+	if (new_mtu <= ETH_HLEN || new_mtu > ETH_FRAME_LEN)
+		return -ERANGE;
+	/* no zero-length packet read wanted after mtu-sized packets */
+	if (((new_mtu + sizeof(struct ethhdr)) %
+			(usb_ept_get_max_packet(dev->in_ep))) == 0)
+		return -EDOM;
+	net->mtu = new_mtu;
+	return 0;
+}
+
+static struct net_device_stats *eth_get_stats(struct net_device *net)
+{
+	return &((struct eth_dev *)netdev_priv(net))->stats;
+}
+
+static void eth_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *p)
+{
+	strlcpy(p->driver, shortname, sizeof p->driver);
+	strlcpy(p->version, DRIVER_VERSION, sizeof p->version);
+	strlcpy(p->fw_version, "ethernet", sizeof p->fw_version);
+}
+
+static u32 eth_get_link(struct net_device *net)
+{
+	return 1;
+}
+
+static struct ethtool_ops ops = {
+	.get_drvinfo = eth_get_drvinfo,
+	.get_link = eth_get_link
+};
+
+static void defer_kevent(struct eth_dev *dev, int flag)
+{
+	if (test_and_set_bit(flag, &dev->todo))
+		return;
+	if (!schedule_work(&dev->work))
+		ERROR(dev, "kevent %d may have been dropped\n", flag);
+	else
+		DEBUG(dev, "kevent %d scheduled\n", flag);
+}
+
+static void rx_complete(struct usb_endpoint *ep, struct usb_request *req);
+
+static int
+rx_submit(struct eth_dev *dev, struct usb_request *req, gfp_t gfp_flags)
+{
+	struct sk_buff		*skb;
+	int			retval = -ENOMEM;
+	size_t			size;
+	unsigned long		flags;
+	/* Padding up to RX_EXTRA handles minor disagreements with host.
+	 * Normally we use the USB "terminate on short read" convention;
+	 * so allow up to (N*max_pkt), since that memory is normally
+	 * already allocated.  Some hardware doesn't deal well with short
+	 * reads (e.g. DMA must be N*max_pkt), so for now don't trim a
+	 * byte off the end (to force hardware errors on overflow).
+	 */
+	size = (sizeof(struct ethhdr) + dev->net->mtu + RX_EXTRA);
+	size += usb_ept_get_max_packet(dev->out_ep) - 1;
+	size -= size % usb_ept_get_max_packet(dev->out_ep);
+	skb = alloc_skb(size + NET_IP_ALIGN, gfp_flags);
+	if (skb  == NULL) {
+		DEBUG(dev, "no rx skb\n");
+		goto enomem;
+	}
+
+	/* Some platforms perform better when IP packets are aligned,
+	 * but on at least one, checksumming fails otherwise.
+	 */
+	skb_reserve(skb, NET_IP_ALIGN);
+
+	req->buf = skb->data;
+	req->length = size;
+	req->complete = rx_complete;
+	req->context = skb;
+
+	retval = usb_ept_queue_xfer(dev->out_ep, req);
+	if (retval == -ENOMEM)
+enomem:
+		defer_kevent(dev, WORK_RX_MEMORY);
+	if (retval) {
+		DEBUG(dev, "rx submit --> %d\n", retval);
+		if (skb)
+			dev_kfree_skb_any(skb);
+		spin_lock_irqsave(&dev->req_lock, flags);
+		list_add(&req->list, &dev->rx_reqs);
+		spin_unlock_irqrestore(&dev->req_lock, flags);
+	}
+	return retval;
+}
+
+static void rx_complete(struct usb_endpoint *ep, struct usb_request *req)
+{
+	struct sk_buff	*skb = req->context;
+	struct eth_dev	*dev = eth_device;
+	int		status = req->status;
+	switch (status) {
+
+	/* normal completion */
+	case 0:
+		skb_put(skb, req->actual);
+		/* we know MaxPacketsPerTransfer == 1 here */
+		if (status < 0
+				|| ETH_HLEN > skb->len
+				|| skb->len > ETH_FRAME_LEN) {
+			dev->stats.rx_errors++;
+			dev->stats.rx_length_errors++;
+			DEBUG(dev, "rx length %d\n", skb->len);
+			break;
+		}
+
+		skb->protocol = eth_type_trans(skb, dev->net);
+		dev->stats.rx_packets++;
+		dev->stats.rx_bytes += skb->len;
+
+		/* no buffer copies needed, unless hardware can't
+		 * use skb buffers.
+		 */
+		status = netif_rx(skb);
+		skb = NULL;
+		break;
+
+	/* software-driven interface shutdown */
+	case -ECONNRESET:		/* unlink */
+	case -ESHUTDOWN:		/* disconnect etc */
+		VDEBUG(dev, "rx shutdown, code %d\n", status);
+		goto quiesce;
+
+	/* for hardware automagic (such as pxa) */
+	case -ECONNABORTED:		/* endpoint reset */
+		DEBUG(dev, "rx %s reset\n", ep->name);
+		defer_kevent(dev, WORK_RX_MEMORY);
+quiesce:
+		dev_kfree_skb_any(skb);
+		goto clean;
+
+	/* data overrun */
+	case -EOVERFLOW:
+		dev->stats.rx_over_errors++;
+		/* FALLTHROUGH */
+
+	default:
+		dev->stats.rx_errors++;
+		DEBUG(dev, "rx status %d\n", status);
+		break;
+	}
+
+	if (skb)
+		dev_kfree_skb_any(skb);
+	if (!netif_running(dev->net)) {
+clean:
+		spin_lock(&dev->req_lock);
+		list_add(&req->list, &dev->rx_reqs);
+		spin_unlock(&dev->req_lock);
+		req = NULL;
+	}
+	if (req)
+		rx_submit(dev, req, GFP_ATOMIC);
+}
+
+static int prealloc(struct list_head *list, struct usb_endpoint *ep,
+			unsigned n, gfp_t gfp_flags)
+{
+	unsigned		i;
+	struct usb_request	*req;
+
+	if (!n)
+		return -ENOMEM;
+
+	/* queue/recycle up to N requests */
+	i = n;
+	list_for_each_entry(req, list, list) {
+		if (i-- == 0)
+			goto extra;
+	}
+	while (i--) {
+		/* CDC ECM uses skb buffer pointer for requests */
+		req = usb_ept_alloc_req(ep, 0);
+		if (!req)
+			return list_empty(list) ? -ENOMEM : 0;
+		list_add(&req->list, list);
+	}
+	return 0;
+
+extra:
+	/* free extras */
+	for (;;) {
+		struct list_head	*next;
+
+		next = req->list.next;
+		list_del(&req->list);
+		usb_ept_free_req(ep, req);
+
+		if (next == list)
+			break;
+
+		req = container_of(next, struct usb_request, list);
+	}
+	return 0;
+}
+
+static int alloc_requests(struct eth_dev *dev, unsigned n, gfp_t gfp_flags)
+{
+	int status;
+	unsigned long flags;
+
+	spin_lock_irqsave(&dev->req_lock, flags);
+	status = prealloc(&dev->tx_reqs, dev->in_ep, n, gfp_flags);
+	if (status < 0)
+		goto fail;
+	status = prealloc(&dev->rx_reqs, dev->out_ep, n, gfp_flags);
+	if (status < 0)
+		goto fail;
+	goto done;
+fail:
+	DEBUG(dev, "can't alloc requests\n");
+done:
+	spin_unlock_irqrestore(&dev->req_lock, flags);
+	return status;
+}
+
+static void rx_fill(struct eth_dev *dev, gfp_t gfp_flags)
+{
+	struct usb_request	*req;
+	unsigned long		flags;
+	/* fill unused rxq slots with some skb */
+	spin_lock_irqsave(&dev->req_lock, flags);
+	while (!list_empty(&dev->rx_reqs)) {
+		req = container_of(dev->rx_reqs.next,
+				struct usb_request, list);
+		list_del_init(&req->list);
+		spin_unlock_irqrestore(&dev->req_lock, flags);
+
+		if (rx_submit(dev, req, gfp_flags) < 0) {
+			defer_kevent(dev, WORK_RX_MEMORY);
+			return;
+		}
+
+		spin_lock_irqsave(&dev->req_lock, flags);
+	}
+	spin_unlock_irqrestore(&dev->req_lock, flags);
+}
+
+static void eth_work(struct work_struct *work)
+{
+	struct eth_dev	*dev = container_of(work, struct eth_dev, work);
+
+	if (test_and_clear_bit(WORK_RX_MEMORY, &dev->todo)) {
+		if (netif_running(dev->net))
+			rx_fill(dev, GFP_KERNEL);
+	}
+
+	if (dev->todo)
+		DEBUG(dev, "work done, flags = 0x%lx\n", dev->todo);
+}
+
+static void tx_complete(struct usb_endpoint *ep, struct usb_request *req)
+{
+	struct sk_buff	*skb = req->context;
+	struct eth_dev	*dev = eth_device;
+
+	switch (req->status) {
+	default:
+		dev->stats.tx_errors++;
+		VDEBUG(dev, "tx err %d\n", req->status);
+		/* FALLTHROUGH */
+	case -ECONNRESET:		/* unlink */
+	case -ESHUTDOWN:		/* disconnect etc */
+		break;
+	case 0:
+		dev->stats.tx_bytes += skb->len;
+	}
+	dev->stats.tx_packets++;
+
+	spin_lock(&dev->req_lock);
+	list_add(&req->list, &dev->tx_reqs);
+	spin_unlock(&dev->req_lock);
+	dev_kfree_skb_any(skb);
+
+	atomic_dec(&dev->tx_qlen);
+	if (netif_carrier_ok(dev->net))
+		netif_wake_queue(dev->net);
+}
+
+static inline int eth_is_promisc(struct eth_dev *dev)
+{
+	return dev->cdc_filter & USB_CDC_PACKET_TYPE_PROMISCUOUS;
+}
+
+static int eth_start_xmit(struct sk_buff *skb, struct net_device *net)
+{
+	struct eth_dev		*dev = netdev_priv(net);
+	int			length = skb->len;
+	int			retval;
+	struct usb_request	*req = NULL;
+	unsigned long		flags;
+
+	/* apply outgoing CDC filters */
+	if (!eth_is_promisc(dev)) {
+		u8		*dest = skb->data;
+
+		if (is_multicast_ether_addr(dest)) {
+			u16	type;
+
+			/* ignores USB_CDC_PACKET_TYPE_MULTICAST and host
+			 * SET_ETHERNET_MULTICAST_FILTERS requests
+			 */
+			if (is_broadcast_ether_addr(dest))
+				type = USB_CDC_PACKET_TYPE_BROADCAST;
+			else
+				type = USB_CDC_PACKET_TYPE_ALL_MULTICAST;
+			if (!(dev->cdc_filter & type)) {
+				dev_kfree_skb_any(skb);
+				return 0;
+			}
+		}
+		/* ignores USB_CDC_PACKET_TYPE_DIRECTED */
+	}
+
+	spin_lock_irqsave(&dev->req_lock, flags);
+	/*
+	 * this freelist can be empty if an interrupt triggered disconnect()
+	 * and reconfigured the function (shutting down this queue) after the
+	 * network stack decided to xmit but before we got the spinlock.
+	 */
+	if (list_empty(&dev->tx_reqs)) {
+		spin_unlock_irqrestore(&dev->req_lock, flags);
+		return 1;
+	}
+
+	req = container_of(dev->tx_reqs.next, struct usb_request, list);
+	list_del(&req->list);
+
+	/* temporarily stop TX queue when the freelist empties */
+	if (list_empty(&dev->tx_reqs))
+		netif_stop_queue(net);
+	spin_unlock_irqrestore(&dev->req_lock, flags);
+
+	/* no buffer copies needed, unless the network stack did it
+	 * or the hardware can't use skb buffers.
+	 */
+	req->buf = skb->data;
+	req->context = skb;
+	req->complete = tx_complete;
+
+	/* use zlp framing on tx for strict CDC-Ether conformance,
+	 * though any robust network rx path ignores extra padding.
+	 * and some hardware doesn't like to write zlps.
+	 */
+	if (!dev->zlp && (length % usb_ept_get_max_packet(dev->in_ep)) == 0)
+		length++;
+
+	req->length = length;
+
+	retval = usb_ept_queue_xfer(dev->in_ep, req);
+	switch (retval) {
+	default:
+		DEBUG(dev, "tx queue err %d\n", retval);
+		break;
+	case 0:
+		net->trans_start = jiffies;
+		atomic_inc(&dev->tx_qlen);
+	}
+	if (retval) {
+		dev->stats.tx_dropped++;
+		dev_kfree_skb_any(skb);
+		spin_lock_irqsave(&dev->req_lock, flags);
+		if (list_empty(&dev->tx_reqs))
+			netif_start_queue(net);
+		list_add(&req->list, &dev->tx_reqs);
+		spin_unlock_irqrestore(&dev->req_lock, flags);
+	}
+	return 0;
+}
+
+
+static void eth_start(struct eth_dev *dev, gfp_t gfp_flags)
+{
+	DEBUG(dev, "%s\n", __func__);
+
+	/* fill the rx queue */
+	rx_fill(dev, gfp_flags);
+
+	/* and open the tx floodgates */
+	atomic_set(&dev->tx_qlen, 0);
+	netif_wake_queue(dev->net);
+}
+
+static int eth_open(struct net_device *net)
+{
+	struct eth_dev		*dev = netdev_priv(net);
+
+	DEBUG(dev, "%s\n", __func__);
+	if (netif_carrier_ok(dev->net))
+		eth_start(dev, GFP_KERNEL);
+	return 0;
+}
+
+static int eth_stop(struct net_device *net)
+{
+	struct eth_dev		*dev = netdev_priv(net);
+
+	VDEBUG(dev, "%s\n", __func__);
+	netif_stop_queue(net);
+
+	DEBUG(dev, "stop stats: rx/tx %ld/%ld, errs %ld/%ld\n",
+		dev->stats.rx_packets, dev->stats.tx_packets,
+		dev->stats.rx_errors, dev->stats.tx_errors
+		);
+
+	/* ensure there are no more active requests */
+	if (dev->configured) {
+		usb_ept_enable(dev->in_ep, 0);
+		usb_ept_enable(dev->out_ep, 0);
+		if (netif_carrier_ok(dev->net)) {
+			DEBUG(dev, "host still using in/out endpoints\n");
+			/* FIXME idiom may leave toggle wrong here */
+			usb_ept_enable(dev->in_ep, 1);
+			usb_ept_enable(dev->out_ep, 1);
+		}
+		if (dev->status_ep) {
+			usb_ept_enable(dev->status_ep, 0);
+			usb_ept_enable(dev->status_ep,  1);
+		}
+	}
+
+	return 0;
+}
+
+
+static u8 __devinit nibble(unsigned char c)
+{
+	if (likely(isdigit(c)))
+		return c - '0';
+	c = toupper(c);
+	if (likely(isxdigit(c)))
+		return 10 + c - 'A';
+	return 0;
+}
+
+static int __devinit get_ether_addr(const char *str, u8 *dev_addr)
+{
+	if (str) {
+		unsigned	i;
+
+		for (i = 0; i < 6; i++) {
+			unsigned char num;
+
+			if ((*str == '.') || (*str == ':'))
+				str++;
+			num = nibble(*str++) << 4;
+			num |= (nibble(*str++));
+			dev_addr[i] = num;
+		}
+		if (is_valid_ether_addr(dev_addr))
+			return 0;
+	}
+	random_ether_addr(dev_addr);
+	return 1;
+}
+
+static void  eth_unbind(void *_ctxt)
+{
+	struct eth_dev   *dev = (struct eth_dev *)_ctxt ;
+
+	pr_debug("%s ()\n", __func__);
+	if (!dev)
+		return ;
+	if (!eth_bound)
+		return;
+
+	if (dev->in_ep) {
+		usb_ept_fifo_flush(dev->in_ep);
+		usb_ept_enable(dev->in_ep, 0);
+		usb_free_endpoint(dev->in_ep);
+	}
+	if (dev->out_ep) {
+		usb_ept_fifo_flush(dev->out_ep);
+		usb_ept_enable(dev->out_ep, 0);
+		usb_free_endpoint(dev->out_ep);
+	}
+	if (dev->status_ep) {
+		usb_ept_fifo_flush(dev->status_ep);
+		usb_ept_enable(dev->status_ep, 0);
+		usb_free_endpoint(dev->status_ep);
+	}
+
+
+	if (dev->net) {
+		unregister_netdev(dev->net);
+		free_netdev(dev->net);
+	}
+	eth_bound = 0;
+	return ;
+}
+
+static void  eth_bind(void *_ctxt)
+{
+	struct eth_dev		*dev;
+	struct net_device	*net;
+	u8			zlp = 1;
+	struct usb_endpoint     *in_ep, *out_ep, *status_ep = NULL;
+	int			status = -ENOMEM;
+	int			ret;
+	struct device		*get_dev;
+
+	get_dev = usb_get_device();
+
+	ret = usb_msm_get_next_ifc_number(&usb_func_ether);
+	eth_control_intf.bInterfaceNumber = ret;
+	eth_control_intf.iInterface = string_control;
+	eth_IAD.bFirstInterface = ret;
+	eth_union_desc.bMasterInterface0 = ret;
+
+	ret = usb_msm_get_next_ifc_number(&usb_func_ether);
+	eth_data_alt_zero_intf.bInterfaceNumber = ret;
+	eth_data_alt_zero_intf.iInterface = 0;
+	eth_data_alt_one_intf.bInterfaceNumber = ret;
+	eth_data_alt_one_intf.iInterface = string_data;
+	eth_union_desc.bSlaveInterface0 = ret;
+
+	/* Enable IAD */
+	usb_msm_enable_iad();
+
+	/* Configuring STATUS endpoint */
+	status_ep = usb_alloc_endpoint(USB_DIR_IN);
+	status_ep->max_pkt = 64;
+
+	eth_control_intf_hs_int_in_ep_desc.bEndpointAddress =
+						USB_DIR_IN | status_ep->num;
+	eth_control_intf_hs_int_in_ep_desc.wMaxPacketSize =
+						status_ep->max_pkt;
+	eth_control_intf_fs_int_in_ep_desc.bEndpointAddress =
+						USB_DIR_IN | status_ep->num;
+	eth_control_intf_hs_int_in_ep_desc.bInterval = 4;
+
+	/* Configuring OUT endpoint */
+	out_ep = usb_alloc_endpoint(USB_DIR_OUT);
+	out_ep->max_pkt = 512;
+	eth_data_intf_hs_bulk_out_ep_desc.bEndpointAddress =
+						USB_DIR_OUT | out_ep->num;
+	eth_data_intf_hs_bulk_out_ep_desc.wMaxPacketSize = out_ep->max_pkt;
+	eth_data_intf_fs_bulk_out_ep_desc.bEndpointAddress =
+						USB_DIR_OUT | out_ep->num;
+
+	/*Configuring IN Endpoint*/
+	in_ep = usb_alloc_endpoint(USB_DIR_IN);
+	in_ep->max_pkt = 512;
+	eth_data_intf_hs_bulk_in_ep_desc.bEndpointAddress =
+						USB_DIR_IN | in_ep->num;
+	eth_data_intf_hs_bulk_in_ep_desc.wMaxPacketSize = in_ep->max_pkt;
+	eth_data_intf_fs_bulk_in_ep_desc.bEndpointAddress =
+						USB_DIR_IN | in_ep->num;
+
+	net = alloc_etherdev(sizeof *dev);
+	if (!net) {
+		printk(KERN_DEBUG "eth_bind: alloc_etherdev failed \n");
+		return ;
+	}
+	dev = netdev_priv(net);
+	spin_lock_init(&dev->lock);
+	spin_lock_init(&dev->req_lock);
+	INIT_WORK(&dev->work, eth_work);
+	INIT_LIST_HEAD(&dev->tx_reqs);
+	INIT_LIST_HEAD(&dev->rx_reqs);
+
+	/* network device setup */
+	dev->net = net;
+	strcpy(net->name, "usb%d");
+	dev->zlp = zlp;
+	dev->in_ep = in_ep;
+	dev->out_ep = out_ep;
+	dev->status_ep = status_ep;
+
+	eth_device = dev;
+	usb_func_ether.context = eth_device;
+
+	/* Module params for these addresses should come from ID proms.
+	 * The host side address is used with CDC, and commonly
+	 * ends up in a persistent config database.  It's not clear if
+	 * host side code for the SAFE thing cares -- its original BLAN
+	 * thing didn't, Sharp never assigned those addresses on Zaurii.
+	 */
+	if (get_ether_addr(dev_addr, net->dev_addr))
+		dev_warn(get_dev,
+			"using random %s ethernet address\n", "self");
+	if (get_ether_addr(host_addr, dev->host_mac))
+		dev_warn(get_dev,
+			"using random %s ethernet address\n", "host");
+	snprintf(ethaddr, sizeof ethaddr, "%02X%02X%02X%02X%02X%02X",
+		dev->host_mac[0], dev->host_mac[1],
+		dev->host_mac[2], dev->host_mac[3],
+		dev->host_mac[4], dev->host_mac[5]);
+
+	net->change_mtu = usb_eth_change_mtu;
+	net->get_stats = eth_get_stats;
+	net->hard_start_xmit = eth_start_xmit;
+	net->open = eth_open;
+	net->stop = eth_stop;
+	/* watchdog_timeo, tx_timeout ...
+	 * set_multicast_list */
+	SET_ETHTOOL_OPS(net, &ops);
+	/* ... and maybe likewise for status transfer */
+	if (dev->status_ep) {
+		dev->stat_req = usb_ept_alloc_req(dev->status_ep,
+					STATUS_BYTECOUNT);
+		if (!dev->stat_req) {
+			usb_ept_free_req(dev->status_ep, dev->req);
+			goto fail;
+		}
+		dev->stat_req->context = NULL;
+	}
+	/* finish hookup to lower layer ... */
+	/* two kinds of host-initiated state changes:
+	 *  - iff DATA transfer is active, carrier is "on"
+	 *  - tx queueing enabled if open *and* carrier is "on"
+	 */
+	netif_stop_queue(dev->net);
+	netif_carrier_off(dev->net);
+
+	SET_NETDEV_DEV(dev->net, get_dev);
+	status = register_netdev(dev->net);
+	if (status < 0)
+		goto fail1;
+
+	INFO(dev, "%s, version: " DRIVER_VERSION "\n", driver_desc);
+	INFO(dev, "MAC %02x:%02x:%02x:%02x:%02x:%02x\n",
+		net->dev_addr[0], net->dev_addr[1],
+		net->dev_addr[2], net->dev_addr[3],
+		net->dev_addr[4], net->dev_addr[5]);
+
+	INFO(dev, "HOST MAC %02x:%02x:%02x:%02x:%02x:%02x\n",
+		dev->host_mac[0], dev->host_mac[1],
+		dev->host_mac[2], dev->host_mac[3],
+		dev->host_mac[4], dev->host_mac[5]);
+
+	string_data = usb_msm_get_next_strdesc_id("Ethernet Data");
+	if (string_data != 0) {
+		string_control = usb_msm_get_next_strdesc_id
+				 ("CDC Communications Control");
+		if (string_control != 0) {
+			string_ethaddr = usb_msm_get_next_strdesc_id(ethaddr);
+			if (string_ethaddr != 0) {
+				eth_ether_desc.iMACAddress = string_ethaddr;
+				eth_bound = 1;
+				return ;
+			}
+		}
+	}
+fail1:
+	dev_dbg(get_dev, "register_netdev failed, %d\n", status);
+fail:
+	eth_bound = 1;
+	printk(KERN_INFO"eth_bind: returning from eth_bind\n");
+	return ;
+}
+
+
+static struct usb_function usb_func_ether = {
+	.name		= "ethernet",
+	.bind		= eth_bind,
+	.unbind		= eth_unbind,
+	.configure	= eth_configure,
+	.disconnect	= eth_disconnect,
+	.setup		= eth_setup,
+	.set_interface	= eth_set_interface,
+	.get_interface	= eth_get_interface,
+};
+
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL v2");
+
+#define TOTAL_ETH_DESCRIPTORS 11
+struct usb_descriptor_header *eth_hs_descriptors[TOTAL_ETH_DESCRIPTORS];
+struct usb_descriptor_header *eth_fs_descriptors[TOTAL_ETH_DESCRIPTORS];
+
+static int __init init(void)
+{
+	int rc;
+
+	eth_hs_descriptors[0] = (struct usb_descriptor_header *)
+				&eth_IAD;
+	eth_hs_descriptors[1] = (struct usb_descriptor_header *)
+				&eth_control_intf;
+	eth_hs_descriptors[2] = (struct usb_descriptor_header *)
+				&eth_header_desc;
+	eth_hs_descriptors[3] = (struct usb_descriptor_header *)
+				&eth_union_desc;
+	eth_hs_descriptors[4] = (struct usb_descriptor_header *)
+				&eth_ether_desc;
+	eth_hs_descriptors[5] = (struct usb_descriptor_header *)
+				&eth_control_intf_hs_int_in_ep_desc;
+	eth_hs_descriptors[6] = (struct usb_descriptor_header *)
+				&eth_data_alt_zero_intf;
+	eth_hs_descriptors[7] = (struct usb_descriptor_header *)
+				&eth_data_alt_one_intf;
+	eth_hs_descriptors[8] = (struct usb_descriptor_header *)
+				&eth_data_intf_hs_bulk_out_ep_desc;
+	eth_hs_descriptors[9] = (struct usb_descriptor_header *)
+				&eth_data_intf_hs_bulk_in_ep_desc;
+	eth_hs_descriptors[10] = NULL;
+
+	eth_fs_descriptors[0] = (struct usb_descriptor_header *)&eth_IAD;
+	eth_fs_descriptors[1] = (struct usb_descriptor_header *)
+				&eth_control_intf;
+	eth_fs_descriptors[2] = (struct usb_descriptor_header *)
+				&eth_header_desc;
+	eth_fs_descriptors[3] = (struct usb_descriptor_header *)&eth_union_desc;
+	eth_fs_descriptors[4] = (struct usb_descriptor_header *)&eth_ether_desc;
+	eth_fs_descriptors[5] = (struct usb_descriptor_header *)
+				&eth_control_intf_fs_int_in_ep_desc;
+	eth_fs_descriptors[6] = (struct usb_descriptor_header *)
+				&eth_data_alt_zero_intf;
+	eth_fs_descriptors[7] = (struct usb_descriptor_header *)
+				&eth_data_alt_one_intf;
+	eth_fs_descriptors[8] = (struct usb_descriptor_header *)
+				&eth_data_intf_fs_bulk_out_ep_desc;
+	eth_fs_descriptors[9] = (struct usb_descriptor_header *)
+				&eth_data_intf_fs_bulk_in_ep_desc;
+	eth_fs_descriptors[10] = NULL;
+
+	usb_func_ether.hs_descriptors = eth_hs_descriptors;
+	usb_func_ether.fs_descriptors = eth_fs_descriptors;
+	rc = usb_function_register(&usb_func_ether);
+
+	if (rc < 0)
+		printk(KERN_INFO "cdcecm init:usb function register failed \n");
+	return rc;
+}
+module_init(init);
+
+static void __exit eth_cleanup(void)
+{
+	struct eth_dev          *dev = eth_device;
+
+	usb_function_unregister(&usb_func_ether);
+	if (dev) {
+		dev->net = NULL;
+		dev = NULL;
+	}
+}
+module_exit(eth_cleanup);
diff --git a/drivers/usb/function/loopback.c b/drivers/usb/function/loopback.c
new file mode 100644
index 0000000..d7c93a3
--- /dev/null
+++ b/drivers/usb/function/loopback.c
@@ -0,0 +1,128 @@
+/* drivers/usb/function/loopback.c
+ *
+ * Simple Loopback Function Device
+ *
+ * Copyright (C) 2007 Google, Inc.
+ * Author: Brian Swetland <swetland@google.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+
+#include "usb_function.h"
+
+struct loopback_context
+{
+	struct usb_endpoint *out;
+	struct usb_endpoint *in;
+	struct usb_request *req_out;
+	struct usb_request *req_in;
+};
+
+static struct loopback_context _context;
+
+static void loopback_bind(struct usb_endpoint **ept, void *_ctxt)
+{
+	struct loopback_context *ctxt = _ctxt;
+
+	ctxt->out = ept[0];
+	ctxt->in = ept[1];
+
+	printk(KERN_INFO "loopback_bind() %p, %p\n", ctxt->out, ctxt->in);
+
+	ctxt->req_out = usb_ept_alloc_req(ctxt->out, 4096);
+	ctxt->req_in = usb_ept_alloc_req(ctxt->in, 4096);
+}
+
+static void loopback_queue_in(struct loopback_context *ctxt, void *data, unsigned len);
+static void loopback_queue_out(struct loopback_context *ctxt);
+
+static void loopback_in_complete(struct usb_endpoint *ept, struct usb_request *req)
+{
+	struct loopback_context *ctxt = req->context;
+	printk(KERN_INFO "loopback_out_complete (%d)\n", req->actual);
+	loopback_queue_out(ctxt);
+}
+
+static void loopback_out_complete(struct usb_endpoint *ept, struct usb_request *req)
+{
+	struct loopback_context *ctxt = req->context;
+	printk(KERN_INFO "loopback_in_complete (%d)\n", req->actual);
+
+	if (req->status == 0) {
+		loopback_queue_in(ctxt, req->buf, req->actual);
+	} else {
+		loopback_queue_out(ctxt);
+	}
+}
+
+static void loopback_queue_out(struct loopback_context *ctxt)
+{
+	struct usb_request *req = ctxt->req_out;
+
+	req->complete = loopback_out_complete;
+	req->context = ctxt;
+	req->length = 4096;
+
+	usb_ept_queue_xfer(ctxt->out, req);
+}
+
+static void loopback_queue_in(struct loopback_context *ctxt, void *data, unsigned len)
+{
+	struct usb_request *req = ctxt->req_in;
+
+	memcpy(req->buf, data, len);
+	req->complete = loopback_in_complete;
+	req->context = ctxt;
+	req->length = len;
+
+	usb_ept_queue_xfer(ctxt->in, req);
+}
+
+static void loopback_configure(int configured, void *_ctxt)
+{
+	struct loopback_context *ctxt = _ctxt;
+	printk(KERN_INFO "loopback_configure() %d\n", configured);
+
+	if (configured) {
+		loopback_queue_out(ctxt);
+	} else {
+		/* all pending requests will be canceled */
+	}
+}
+
+static struct usb_function usb_func_loopback = {
+	.bind = loopback_bind,
+	.configure = loopback_configure,
+
+	.name = "loopback",
+	.context = &_context,
+
+	.ifc_class = 0xff,
+	.ifc_subclass = 0xff,
+	.ifc_protocol = 0xff,
+
+	.ifc_name = "loopback",
+
+	.ifc_ept_count = 2,
+	.ifc_ept_type = { EPT_BULK_OUT, EPT_BULK_IN },
+};
+
+static int __init loopback_init(void)
+{
+	printk(KERN_INFO "loopback_init()\n");
+	return usb_function_register(&usb_func_loopback);
+}
+
+module_init(loopback_init);
diff --git a/drivers/usb/function/mass_storage.c b/drivers/usb/function/mass_storage.c
new file mode 100644
index 0000000..f679cd0
--- /dev/null
+++ b/drivers/usb/function/mass_storage.c
@@ -0,0 +1,3009 @@
+/* drivers/usb/function/mass_storage.c
+ *
+ * Function Driver for USB Mass Storage
+ *
+ * Copyright (C) 2008 Google, Inc.
+ * Copyright (c) 2009, Code Aurora Forum. All rights reserved.
+ * Author: Mike Lockwood <lockwood@android.com>
+ *
+ * Based heavily on the file_storage gadget driver in
+ * drivers/usb/gadget/file_storage.c and licensed under the same terms:
+ *
+ * Copyright (C) 2003-2007 Alan Stern
+ * All rights reserved.
+ * 
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The names of the above-listed copyright holders may not be used
+ *    to endorse or promote products derived from this software without
+ *    specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+ * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+//#define DEBUG
+//#define VERBOSE_DEBUG
+//#define DUMP_MSGS
+
+
+#include <linux/blkdev.h>
+#include <linux/completion.h>
+#include <linux/dcache.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/fcntl.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/kref.h>
+#include <linux/kthread.h>
+#include <linux/limits.h>
+#include <linux/rwsem.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/switch.h>
+#include <linux/freezer.h>
+#include <linux/utsname.h>
+#include <linux/usb/ch9.h>
+#include <linux/usb/mass_storage_function.h>
+#include <linux/usb_usual.h>
+#include <linux/platform_device.h>
+#include <linux/wakelock.h>
+
+#include "usb_function.h"
+
+/*-------------------------------------------------------------------------*/
+
+#define DRIVER_NAME		"usb_mass_storage"
+#define MAX_LUNS		8
+
+#ifdef DEBUG
+#define LDBG(lun, fmt, args...) \
+	dev_dbg(&(lun)->dev , fmt , ## args)
+#define MDBG(fmt,args...) \
+	printk(KERN_DEBUG DRIVER_NAME ": " fmt , ## args)
+#else
+#define LDBG(lun, fmt, args...) \
+	do { } while (0)
+#define MDBG(fmt,args...) \
+	do { } while (0)
+#undef VERBOSE_DEBUG
+#undef DUMP_MSGS
+#endif /* DEBUG */
+
+#ifdef VERBOSE_DEBUG
+#define VLDBG	LDBG
+#else
+#define VLDBG(lun, fmt, args...) \
+	do { } while (0)
+#endif /* VERBOSE_DEBUG */
+
+#define LERROR(lun, fmt, args...) \
+	dev_err(&(lun)->dev , fmt , ## args)
+#define LWARN(lun, fmt, args...) \
+	dev_warn(&(lun)->dev , fmt , ## args)
+#define LINFO(lun, fmt, args...) \
+	dev_info(&(lun)->dev , fmt , ## args)
+
+#define MINFO(fmt,args...) \
+	printk(KERN_INFO DRIVER_NAME ": " fmt , ## args)
+
+#define DBG(d, fmt, args...) \
+	dev_dbg(&(d)->pdev->dev , fmt , ## args)
+#define VDBG(d, fmt, args...) \
+	dev_vdbg(&(d)->pdev->dev , fmt , ## args)
+#define ERROR(d, fmt, args...) \
+	dev_err(&(d)->pdev->dev , fmt , ## args)
+#define MS_WARN(d, fmt, args...) \
+	dev_warn(&(d)->pdev->dev , fmt , ## args)
+#define INFO(d, fmt, args...) \
+	dev_info(&(d)->pdev->dev , fmt , ## args)
+
+
+/*-------------------------------------------------------------------------*/
+
+/* Bulk-only data structures */
+
+/* Command Block Wrapper */
+struct bulk_cb_wrap {
+	__le32	Signature;		/* Contains 'USBC' */
+	u32	Tag;			/* Unique per command id */
+	__le32	DataTransferLength;	/* Size of the data */
+	u8	Flags;			/* Direction in bit 7 */
+	u8	Lun;			/* LUN (normally 0) */
+	u8	Length;			/* Of the CDB, <= MAX_COMMAND_SIZE */
+	u8	CDB[16];		/* Command Data Block */
+};
+
+#define USB_BULK_CB_WRAP_LEN	31
+#define USB_BULK_CB_SIG		0x43425355	/* Spells out USBC */
+#define USB_BULK_IN_FLAG	0x80
+
+/* Command Status Wrapper */
+struct bulk_cs_wrap {
+	__le32	Signature;		/* Should = 'USBS' */
+	u32	Tag;			/* Same as original command */
+	__le32	Residue;		/* Amount not transferred */
+	u8	Status;			/* See below */
+};
+
+#define USB_BULK_CS_WRAP_LEN	13
+#define USB_BULK_CS_SIG		0x53425355	/* Spells out 'USBS' */
+#define USB_STATUS_PASS		0
+#define USB_STATUS_FAIL		1
+#define USB_STATUS_PHASE_ERROR	2
+
+/* Bulk-only class specific requests */
+#define USB_BULK_RESET_REQUEST		0xff
+#define USB_BULK_GET_MAX_LUN_REQUEST	0xfe
+
+/* Length of a SCSI Command Data Block */
+#define MAX_COMMAND_SIZE	16
+
+/* SCSI commands that we recognize */
+#define SC_FORMAT_UNIT			0x04
+#define SC_INQUIRY			0x12
+#define SC_MODE_SELECT_6		0x15
+#define SC_MODE_SELECT_10		0x55
+#define SC_MODE_SENSE_6			0x1a
+#define SC_MODE_SENSE_10		0x5a
+#define SC_PREVENT_ALLOW_MEDIUM_REMOVAL	0x1e
+#define SC_READ_6			0x08
+#define SC_READ_10			0x28
+#define SC_READ_12			0xa8
+#define SC_READ_CAPACITY		0x25
+#define SC_READ_FORMAT_CAPACITIES	0x23
+#define SC_RELEASE			0x17
+#define SC_REQUEST_SENSE		0x03
+#define SC_RESERVE			0x16
+#define SC_SEND_DIAGNOSTIC		0x1d
+#define SC_START_STOP_UNIT		0x1b
+#define SC_SYNCHRONIZE_CACHE		0x35
+#define SC_TEST_UNIT_READY		0x00
+#define SC_VERIFY			0x2f
+#define SC_WRITE_6			0x0a
+#define SC_WRITE_10			0x2a
+#define SC_WRITE_12			0xaa
+
+/* SCSI Sense Key/Additional Sense Code/ASC Qualifier values */
+#define SS_NO_SENSE				0
+#define SS_COMMUNICATION_FAILURE		0x040800
+#define SS_INVALID_COMMAND			0x052000
+#define SS_INVALID_FIELD_IN_CDB			0x052400
+#define SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE	0x052100
+#define SS_LOGICAL_UNIT_NOT_SUPPORTED		0x052500
+#define SS_MEDIUM_NOT_PRESENT			0x023a00
+#define SS_MEDIUM_REMOVAL_PREVENTED		0x055302
+#define SS_NOT_READY_TO_READY_TRANSITION	0x062800
+#define SS_RESET_OCCURRED			0x062900
+#define SS_SAVING_PARAMETERS_NOT_SUPPORTED	0x053900
+#define SS_UNRECOVERED_READ_ERROR		0x031100
+#define SS_WRITE_ERROR				0x030c02
+#define SS_WRITE_PROTECTED			0x072700
+
+#define SK(x)		((u8) ((x) >> 16))	/* Sense Key byte, etc. */
+#define ASC(x)		((u8) ((x) >> 8))
+#define ASCQ(x)		((u8) (x))
+
+
+/*-------------------------------------------------------------------------*/
+
+struct lun {
+	struct file	*filp;
+	loff_t		file_length;
+	loff_t		num_sectors;
+
+	unsigned int	ro : 1;
+	unsigned int	prevent_medium_removal : 1;
+	unsigned int	registered : 1;
+	unsigned int	info_valid : 1;
+
+	u32		sense_data;
+	u32		sense_data_info;
+	u32		unit_attention_data;
+
+	struct device	dev;
+};
+
+#define backing_file_is_open(curlun)	((curlun)->filp != NULL)
+
+
+static struct lun *dev_to_lun(struct device *dev)
+{
+	return container_of(dev, struct lun, dev);
+}
+
+/* Big enough to hold our biggest descriptor */
+#define EP0_BUFSIZE	256
+#define DELAYED_STATUS	(EP0_BUFSIZE + 999)	/* An impossibly large value */
+
+/* Number of buffers for CBW, DATA and CSW */
+#ifdef CONFIG_USB_CSW_HACK
+#define NUM_BUFFERS	4
+#else
+#define NUM_BUFFERS	2
+#endif
+
+enum fsg_buffer_state {
+	BUF_STATE_EMPTY = 0,
+	BUF_STATE_FULL,
+	BUF_STATE_BUSY
+};
+
+struct fsg_buffhd {
+	void				*buf;
+	enum fsg_buffer_state		state;
+	struct fsg_buffhd		*next;
+
+	/* The NetChip 2280 is faster, and handles some protocol faults
+	 * better, if we don't submit any short bulk-out read requests.
+	 * So we will record the intended request length here. */
+	unsigned int			bulk_out_intended_length;
+
+	struct usb_request		*inreq;
+	int				inreq_busy;
+	struct usb_request		*outreq;
+	int				outreq_busy;
+};
+
+enum fsg_state {
+	/* This one isn't used anywhere */
+	FSG_STATE_COMMAND_PHASE = -10,
+
+	FSG_STATE_DATA_PHASE,
+	FSG_STATE_STATUS_PHASE,
+
+	FSG_STATE_IDLE = 0,
+	FSG_STATE_ABORT_BULK_OUT,
+	FSG_STATE_RESET,
+	FSG_STATE_CONFIG_CHANGE,
+	FSG_STATE_EXIT,
+	FSG_STATE_TERMINATED
+};
+
+enum data_direction {
+	DATA_DIR_UNKNOWN = 0,
+	DATA_DIR_FROM_HOST,
+	DATA_DIR_TO_HOST,
+	DATA_DIR_NONE
+};
+int can_stall = 1;
+
+struct fsg_dev {
+	/* lock protects: state and all the req_busy's */
+	spinlock_t		lock;
+
+	/* filesem protects: backing files in use */
+	struct rw_semaphore	filesem;
+
+	/* reference counting: wait until all LUNs are released */
+	struct kref		ref;
+
+	unsigned int		bulk_out_maxpacket;
+	enum fsg_state		state;		/* For exception handling */
+
+	u8			config, new_config;
+
+	unsigned int		running : 1;
+	unsigned int		phase_error : 1;
+	unsigned int		short_packet_received : 1;
+	unsigned int		bad_lun_okay : 1;
+
+	unsigned long		atomic_bitflags;
+#define REGISTERED		0
+#define CLEAR_BULK_HALTS	1
+#define SUSPENDED		2
+
+	struct usb_endpoint		*bulk_in;
+	struct usb_endpoint		*bulk_out;
+
+	struct fsg_buffhd	*next_buffhd_to_fill;
+	struct fsg_buffhd	*next_buffhd_to_drain;
+	struct fsg_buffhd	buffhds[NUM_BUFFERS];
+
+	int			thread_wakeup_needed;
+	struct completion	thread_notifier;
+	struct task_struct	*thread_task;
+
+	int			cmnd_size;
+	u8			cmnd[MAX_COMMAND_SIZE];
+	enum data_direction	data_dir;
+	u32			data_size;
+	u32			data_size_from_cmnd;
+	u32			tag;
+	unsigned int		lun;
+	u32			residue;
+	u32			usb_amount_left;
+
+	unsigned int		nluns;
+	struct lun		*luns;
+	struct lun		*curlun;
+
+	u32				buf_size;
+	const char		*vendor;
+	const char		*product;
+	int				release;
+
+	struct platform_device *pdev;
+	struct switch_dev sdev;
+	int	bound;
+	struct wake_lock wake_lock, wake_lock_idle;
+};
+static int send_status(struct fsg_dev *fsg);
+
+static int exception_in_progress(struct fsg_dev *fsg)
+{
+	return (fsg->state > FSG_STATE_IDLE);
+}
+
+/* Make bulk-out requests be divisible by the maxpacket size */
+static void set_bulk_out_req_length(struct fsg_dev *fsg,
+		struct fsg_buffhd *bh, unsigned int length)
+{
+	unsigned int	rem;
+
+	bh->bulk_out_intended_length = length;
+	rem = length % fsg->bulk_out_maxpacket;
+	if (rem > 0)
+		length += fsg->bulk_out_maxpacket - rem;
+	bh->outreq->length = length;
+}
+
+static struct fsg_dev			*the_fsg;
+
+static void	close_backing_file(struct fsg_dev *fsg, struct lun *curlun);
+static void	close_all_backing_files(struct fsg_dev *fsg);
+
+
+static struct usb_function		fsg_function;
+/*-------------------------------------------------------------------------*/
+
+#ifdef DUMP_MSGS
+
+static void dump_msg(struct fsg_dev *fsg, const char *label,
+		const u8 *buf, unsigned int length)
+{
+	if (length < 512) {
+		DBG(fsg, "%s, length %u:\n", label, length);
+		print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET,
+				16, 1, buf, length, 0);
+	}
+}
+
+static void dump_cdb(struct fsg_dev *fsg)
+{}
+
+#else
+
+static void dump_msg(struct fsg_dev *fsg, const char *label,
+		const u8 *buf, unsigned int length)
+{}
+
+#ifdef VERBOSE_DEBUG
+
+static void dump_cdb(struct fsg_dev *fsg)
+{
+	print_hex_dump(KERN_DEBUG, "SCSI CDB: ", DUMP_PREFIX_NONE,
+			16, 1, fsg->cmnd, fsg->cmnd_size, 0);
+}
+
+#else
+
+static void dump_cdb(struct fsg_dev *fsg)
+{}
+
+#endif /* VERBOSE_DEBUG */
+#endif /* DUMP_MSGS */
+
+static int fsg_set_halt(struct fsg_dev *fsg, struct usb_endpoint *ep)
+{
+	const char  *name;
+
+	if (ep == fsg->bulk_in)
+		name = "bulk-in";
+	else if (ep == fsg->bulk_out)
+		name = "bulk-out";
+	else
+		return -1;
+
+	DBG(fsg, "%s set halt\n", name);
+	return usb_ept_set_halt(ep);
+}
+
+/*-------------------------------------------------------------------------*/
+
+/* Routines for unaligned data access */
+
+static u16 get_be16(u8 *buf)
+{
+	return ((u16) buf[0] << 8) | ((u16) buf[1]);
+}
+
+static u32 get_be32(u8 *buf)
+{
+	return ((u32) buf[0] << 24) | ((u32) buf[1] << 16) |
+			((u32) buf[2] << 8) | ((u32) buf[3]);
+}
+
+static void put_be16(u8 *buf, u16 val)
+{
+	buf[0] = val >> 8;
+	buf[1] = val;
+}
+
+static void put_be32(u8 *buf, u32 val)
+{
+	buf[0] = val >> 24;
+	buf[1] = val >> 16;
+	buf[2] = val >> 8;
+	buf[3] = val & 0xff;
+}
+
+/*-------------------------------------------------------------------------*/
+
+
+/* There is only one interface. */
+#define USB_SC_SCSI     0x06            /* Transparent SCSI */
+#define USB_PR_BULK     0x50            /* Bulk-only */
+static struct usb_interface_descriptor
+intf_desc = {
+	.bLength 		= sizeof intf_desc,
+	.bDescriptorType 	= USB_DT_INTERFACE,
+	.bNumEndpoints 		= 2,
+	.bInterfaceClass 	= USB_CLASS_MASS_STORAGE,
+	.bInterfaceSubClass 	= USB_SC_SCSI,
+	.bInterfaceProtocol 	= USB_PR_BULK,
+};
+
+
+static struct usb_endpoint_descriptor
+hs_bulk_in_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize =	__constant_cpu_to_le16(512),
+};
+
+static struct usb_endpoint_descriptor
+hs_bulk_out_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_OUT,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize =	__constant_cpu_to_le16(512),
+	.bInterval =		0,
+};
+
+static struct usb_endpoint_descriptor
+fs_bulk_in_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize =	__constant_cpu_to_le16(64),
+};
+
+static struct usb_endpoint_descriptor
+fs_bulk_out_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_OUT,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize =	__constant_cpu_to_le16(64),
+	.bInterval =		0,
+};
+
+
+static struct usb_descriptor_header *hs_function[] = {
+	(struct usb_descriptor_header *) &intf_desc,
+	(struct usb_descriptor_header *) &hs_bulk_in_desc,
+	(struct usb_descriptor_header *) &hs_bulk_out_desc,
+	NULL,
+};
+static struct usb_descriptor_header *fs_function[] = {
+	(struct usb_descriptor_header *) &intf_desc,
+	(struct usb_descriptor_header *) &fs_bulk_in_desc,
+	(struct usb_descriptor_header *) &fs_bulk_out_desc,
+	NULL,
+};
+/*-------------------------------------------------------------------------*/
+
+/* These routines may be called in process context or in_irq */
+
+/* Caller must hold fsg->lock */
+static void wakeup_thread(struct fsg_dev *fsg)
+{
+	/* Tell the main thread that something has happened */
+	fsg->thread_wakeup_needed = 1;
+	if (fsg->thread_task)
+		wake_up_process(fsg->thread_task);
+}
+
+
+static void raise_exception(struct fsg_dev *fsg, enum fsg_state new_state)
+{
+	unsigned long		flags;
+
+	DBG(fsg, "raise_exception %d\n", (int)new_state);
+	/* Do nothing if a higher-priority exception is already in progress.
+	 * If a lower-or-equal priority exception is in progress, preempt it
+	 * and notify the main thread by sending it a signal. */
+	spin_lock_irqsave(&fsg->lock, flags);
+	if (fsg->state <= new_state) {
+		fsg->state = new_state;
+		if (fsg->thread_task)
+			send_sig_info(SIGUSR1, SEND_SIG_FORCED,
+					fsg->thread_task);
+	}
+	spin_unlock_irqrestore(&fsg->lock, flags);
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+/* Bulk and interrupt endpoint completion handlers.
+ * These always run in_irq. */
+
+static void bulk_in_complete(struct usb_endpoint *ep, struct usb_request *req)
+{
+	struct fsg_dev		*fsg = the_fsg;
+	struct fsg_buffhd	*bh = req->context;
+	unsigned long		flags;
+
+	if (req->status || req->actual != req->length)
+		DBG(fsg, "%s --> %d, %u/%u\n", __func__,
+				req->status, req->actual, req->length);
+
+	/* Hold the lock while we update the request and buffer states */
+	if (req->status == 0) {
+		smp_wmb();
+		spin_lock_irqsave(&fsg->lock, flags);
+		bh->inreq_busy = 0;
+		bh->state = BUF_STATE_EMPTY;
+		wakeup_thread(fsg);
+		spin_unlock_irqrestore(&fsg->lock, flags);
+	} else
+		bh->inreq_busy = 0;
+}
+
+static void bulk_out_complete(struct usb_endpoint *ep, struct usb_request *req)
+{
+	struct fsg_dev		*fsg = the_fsg;
+	struct fsg_buffhd	*bh = req->context;
+	unsigned long		flags;
+
+	dump_msg(fsg, "bulk-out", req->buf, req->actual);
+	if (req->status || req->actual != bh->bulk_out_intended_length)
+		DBG(fsg, "%s --> %d, %u/%u\n", __func__,
+				req->status, req->actual,
+				bh->bulk_out_intended_length);
+
+	/* Hold the lock while we update the request and buffer states */
+	if (req->status == 0) {
+		smp_wmb();
+		spin_lock_irqsave(&fsg->lock, flags);
+		bh->outreq_busy = 0;
+		bh->state = BUF_STATE_FULL;
+		wakeup_thread(fsg);
+		spin_unlock_irqrestore(&fsg->lock, flags);
+	} else
+		bh->outreq_busy = 0;
+}
+
+static int fsg_setup(struct usb_ctrlrequest *ctrl, void *buf,
+			int len, void *context)
+{
+	struct fsg_dev		*fsg = context;
+	int			value = -EOPNOTSUPP;
+	u16			w_index = le16_to_cpu(ctrl->wIndex);
+	u16			w_value = le16_to_cpu(ctrl->wValue);
+	u16			w_length = le16_to_cpu(ctrl->wLength);
+
+	if (!fsg->config)
+		return value;
+
+	if (w_index != intf_desc.bInterfaceNumber)
+		return value;
+
+	/* Handle Bulk-only class-specific requests */
+	if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_CLASS) {
+		switch (ctrl->bRequest) {
+		case USB_BULK_RESET_REQUEST:
+			if (ctrl->bRequestType != (USB_DIR_OUT |
+					USB_TYPE_CLASS | USB_RECIP_INTERFACE))
+				break;
+			if (w_value != 0) {
+				value = -EDOM;
+				break;
+			}
+
+			/* Raise an exception to stop the current operation
+			 * and reinitialize our state. */
+			DBG(fsg, "bulk reset request\n");
+			value = 0;
+			break;
+
+		case USB_BULK_GET_MAX_LUN_REQUEST:
+			if (ctrl->bRequestType != (USB_DIR_IN |
+					USB_TYPE_CLASS | USB_RECIP_INTERFACE))
+				break;
+			if (w_value != 0) {
+				value = -EDOM;
+				break;
+			}
+			VDBG(fsg, "get max LUN\n");
+			*(u8 *) buf = fsg->nluns - 1;
+			value = 1;
+			break;
+		}
+	}
+
+	if (value == -EOPNOTSUPP)
+		VDBG(fsg,
+			"unknown class-specific control req "
+			"%02x.%02x v%04x i%04x l%u\n",
+			ctrl->bRequestType, ctrl->bRequest,
+			le16_to_cpu(ctrl->wValue), w_index, w_length);
+	return value;
+}
+
+/*-------------------------------------------------------------------------*/
+
+/* All the following routines run in process context */
+
+
+/* Use this for bulk or interrupt transfers, not ep0 */
+static void start_transfer(struct fsg_dev *fsg, struct usb_endpoint *ep,
+		struct usb_request *req, int *pbusy,
+		enum fsg_buffer_state *state)
+{
+	int	rc;
+	unsigned long		flags;
+
+	if (ep == fsg->bulk_in)
+		dump_msg(fsg, "bulk-in", req->buf, req->length);
+
+	spin_lock_irqsave(&fsg->lock, flags);
+	*pbusy = 1;
+	*state = BUF_STATE_BUSY;
+	spin_unlock_irqrestore(&fsg->lock, flags);
+	rc = usb_ept_queue_xfer(ep, req);
+	if (rc != 0) {
+		*pbusy = 0;
+		*state = BUF_STATE_EMPTY;
+
+		/* We can't do much more than wait for a reset */
+
+		/* Note: currently the net2280 driver fails zero-length
+		 * submissions if DMA is enabled. */
+		if (rc != -ESHUTDOWN && !(rc == -EOPNOTSUPP &&
+						req->length == 0))
+			MS_WARN(fsg, "error in submission: %s --> %d\n",
+				(ep == fsg->bulk_in ? "bulk-in" : "bulk-out"),
+				rc);
+	}
+}
+
+
+static int sleep_thread(struct fsg_dev *fsg)
+{
+	int	rc = 0;
+
+	/* Wait until a signal arrives or we are woken up */
+	for (;;) {
+		try_to_freeze();
+		set_current_state(TASK_INTERRUPTIBLE);
+		if (signal_pending(current)) {
+			rc = -EINTR;
+			break;
+		}
+		if (fsg->thread_wakeup_needed)
+			break;
+		schedule();
+	}
+	__set_current_state(TASK_RUNNING);
+	fsg->thread_wakeup_needed = 0;
+	return rc;
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+static int do_read(struct fsg_dev *fsg)
+{
+	struct lun		*curlun = fsg->curlun;
+	u32			lba;
+	struct fsg_buffhd	*bh;
+	int			rc;
+	u32			amount_left;
+	loff_t			file_offset, file_offset_tmp;
+	unsigned int		amount;
+	unsigned int		partial_page;
+	ssize_t			nread;
+
+	/* Get the starting Logical Block Address and check that it's
+	 * not too big */
+	if (fsg->cmnd[0] == SC_READ_6)
+		lba = (fsg->cmnd[1] << 16) | get_be16(&fsg->cmnd[2]);
+	else {
+		lba = get_be32(&fsg->cmnd[2]);
+
+		/* We allow DPO (Disable Page Out = don't save data in the
+		 * cache) and FUA (Force Unit Access = don't read from the
+		 * cache), but we don't implement them. */
+		if ((fsg->cmnd[1] & ~0x18) != 0) {
+			curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
+			return -EINVAL;
+		}
+	}
+	if (lba >= curlun->num_sectors) {
+		curlun->sense_data = SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
+		return -EINVAL;
+	}
+	file_offset = ((loff_t) lba) << 9;
+
+	/* Carry out the file reads */
+	amount_left = fsg->data_size_from_cmnd;
+	if (unlikely(amount_left == 0))
+		return -EIO;		/* No default reply */
+
+	for (;;) {
+
+		/* Figure out how much we need to read:
+		 * Try to read the remaining amount.
+		 * But don't read more than the buffer size.
+		 * And don't try to read past the end of the file.
+		 * Finally, if we're not at a page boundary, don't read past
+		 *	the next page.
+		 * If this means reading 0 then we were asked to read past
+		 *	the end of file. */
+		amount = min((unsigned int) amount_left,
+				(unsigned int)fsg->buf_size);
+		amount = min((loff_t) amount,
+				curlun->file_length - file_offset);
+		partial_page = file_offset & (PAGE_CACHE_SIZE - 1);
+		if (partial_page > 0)
+			amount = min(amount, (unsigned int) PAGE_CACHE_SIZE -
+					partial_page);
+
+		/* Wait for the next buffer to become available */
+		bh = fsg->next_buffhd_to_fill;
+		while (bh->state != BUF_STATE_EMPTY) {
+			rc = sleep_thread(fsg);
+			if (rc)
+				return rc;
+		}
+
+		/* If we were asked to read past the end of file,
+		 * end with an empty buffer. */
+		if (amount == 0) {
+			curlun->sense_data =
+					SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
+			curlun->sense_data_info = file_offset >> 9;
+			curlun->info_valid = 1;
+			bh->inreq->length = 0;
+			bh->state = BUF_STATE_FULL;
+			break;
+		}
+
+		/* Perform the read */
+		file_offset_tmp = file_offset;
+		nread = vfs_read(curlun->filp,
+				(char __user *) bh->buf,
+				amount, &file_offset_tmp);
+		VLDBG(curlun, "file read %u @ %llu -> %d\n", amount,
+				(unsigned long long) file_offset,
+				(int) nread);
+		if (signal_pending(current))
+			return -EINTR;
+
+		if (nread < 0) {
+			LDBG(curlun, "error in file read: %d\n",
+					(int) nread);
+			nread = 0;
+		} else if (nread < amount) {
+			LDBG(curlun, "partial file read: %d/%u\n",
+					(int) nread, amount);
+			nread -= (nread & 511);	/* Round down to a block */
+		}
+		file_offset  += nread;
+		amount_left  -= nread;
+		fsg->residue -= nread;
+		bh->inreq->length = nread;
+		bh->state = BUF_STATE_FULL;
+
+		/* If an error occurred, report it and its position */
+		if (nread < amount) {
+			curlun->sense_data = SS_UNRECOVERED_READ_ERROR;
+			curlun->sense_data_info = file_offset >> 9;
+			curlun->info_valid = 1;
+			break;
+		}
+
+		if (amount_left == 0)
+			break;		/* No more left to read */
+
+		/* Send this buffer and go read some more */
+		start_transfer(fsg, fsg->bulk_in, bh->inreq,
+				&bh->inreq_busy, &bh->state);
+		fsg->next_buffhd_to_fill = bh->next;
+	}
+
+	return -EIO;		/* No default reply */
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+static int do_write(struct fsg_dev *fsg)
+{
+	struct lun		*curlun = fsg->curlun;
+	u32			lba;
+	struct fsg_buffhd	*bh;
+	int			get_some_more;
+	u32			amount_left_to_req, amount_left_to_write;
+	loff_t			usb_offset, file_offset, file_offset_tmp;
+	unsigned int		amount;
+	unsigned int		partial_page;
+	ssize_t			nwritten;
+	int			rc;
+
+#ifdef CONFIG_USB_CSW_HACK
+	int			csw_hack_sent = 0;
+	int			i;
+#endif
+	if (curlun->ro) {
+		curlun->sense_data = SS_WRITE_PROTECTED;
+		return -EINVAL;
+	}
+	curlun->filp->f_flags &= ~O_SYNC;	/* Default is not to wait */
+
+	/* Get the starting Logical Block Address and check that it's
+	 * not too big */
+	if (fsg->cmnd[0] == SC_WRITE_6)
+		lba = (fsg->cmnd[1] << 16) | get_be16(&fsg->cmnd[2]);
+	else {
+		lba = get_be32(&fsg->cmnd[2]);
+
+		/* We allow DPO (Disable Page Out = don't save data in the
+		 * cache) and FUA (Force Unit Access = write directly to the
+		 * medium).  We don't implement DPO; we implement FUA by
+		 * performing synchronous output. */
+		if ((fsg->cmnd[1] & ~0x18) != 0) {
+			curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
+			return -EINVAL;
+		}
+		if (fsg->cmnd[1] & 0x08)	/* FUA */
+			curlun->filp->f_flags |= O_SYNC;
+	}
+	if (lba >= curlun->num_sectors) {
+		curlun->sense_data = SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
+		return -EINVAL;
+	}
+
+	/* Carry out the file writes */
+	get_some_more = 1;
+	file_offset = usb_offset = ((loff_t) lba) << 9;
+	amount_left_to_req = amount_left_to_write = fsg->data_size_from_cmnd;
+
+	while (amount_left_to_write > 0) {
+
+		/* Queue a request for more data from the host */
+		bh = fsg->next_buffhd_to_fill;
+		if (bh->state == BUF_STATE_EMPTY && get_some_more) {
+
+			/* Figure out how much we want to get:
+			 * Try to get the remaining amount.
+			 * But don't get more than the buffer size.
+			 * And don't try to go past the end of the file.
+			 * If we're not at a page boundary,
+			 *	don't go past the next page.
+			 * If this means getting 0, then we were asked
+			 *	to write past the end of file.
+			 * Finally, round down to a block boundary. */
+			amount = min(amount_left_to_req, (u32)fsg->buf_size);
+			amount = min((loff_t) amount, curlun->file_length -
+					usb_offset);
+			partial_page = usb_offset & (PAGE_CACHE_SIZE - 1);
+			if (partial_page > 0)
+				amount = min(amount,
+	(unsigned int) PAGE_CACHE_SIZE - partial_page);
+
+			if (amount == 0) {
+				get_some_more = 0;
+				curlun->sense_data =
+					SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
+				curlun->sense_data_info = usb_offset >> 9;
+				curlun->info_valid = 1;
+				continue;
+			}
+			amount -= (amount & 511);
+			if (amount == 0) {
+
+				/* Why were we were asked to transfer a
+				 * partial block? */
+				get_some_more = 0;
+				continue;
+			}
+
+			/* Get the next buffer */
+			usb_offset += amount;
+			fsg->usb_amount_left -= amount;
+			amount_left_to_req -= amount;
+			if (amount_left_to_req == 0)
+				get_some_more = 0;
+
+			/* amount is always divisible by 512, hence by
+			 * the bulk-out maxpacket size */
+			bh->outreq->length = bh->bulk_out_intended_length =
+					amount;
+			start_transfer(fsg, fsg->bulk_out, bh->outreq,
+					&bh->outreq_busy, &bh->state);
+			fsg->next_buffhd_to_fill = bh->next;
+			continue;
+		}
+
+		/* Write the received data to the backing file */
+		bh = fsg->next_buffhd_to_drain;
+		if (bh->state == BUF_STATE_EMPTY && !get_some_more)
+			break;			/* We stopped early */
+#ifdef CONFIG_USB_CSW_HACK
+		/*
+		 * If the csw packet is already submmitted to the DCD,
+		 * by marking the state of buffer as full, then by checking
+		 * the residue, we make sure that this csw packet is not
+		 * written on to the storage media.
+		 */
+		if (bh->state == BUF_STATE_FULL && fsg->residue) {
+#else
+		if (bh->state == BUF_STATE_FULL) {
+#endif
+			smp_rmb();
+			fsg->next_buffhd_to_drain = bh->next;
+			bh->state = BUF_STATE_EMPTY;
+
+			/* Did something go wrong with the transfer? */
+			if (bh->outreq->status != 0) {
+				curlun->sense_data = SS_COMMUNICATION_FAILURE;
+				curlun->sense_data_info = file_offset >> 9;
+				curlun->info_valid = 1;
+				break;
+			}
+
+			amount = bh->outreq->actual;
+			if (curlun->file_length - file_offset < amount) {
+				LERROR(curlun,
+	"write %u @ %llu beyond end %llu\n",
+	amount, (unsigned long long) file_offset,
+	(unsigned long long) curlun->file_length);
+				amount = curlun->file_length - file_offset;
+			}
+
+			/* Perform the write */
+			file_offset_tmp = file_offset;
+			nwritten = vfs_write(curlun->filp,
+					(char __user *) bh->buf,
+					amount, &file_offset_tmp);
+			VLDBG(curlun, "file write %u @ %llu -> %d\n", amount,
+					(unsigned long long) file_offset,
+					(int) nwritten);
+			if (signal_pending(current))
+				return -EINTR;		/* Interrupted! */
+
+			if (nwritten < 0) {
+				LDBG(curlun, "error in file write: %d\n",
+						(int) nwritten);
+				nwritten = 0;
+			} else if (nwritten < amount) {
+				LDBG(curlun, "partial file write: %d/%u\n",
+						(int) nwritten, amount);
+				nwritten -= (nwritten & 511);
+						/* Round down to a block */
+			}
+			file_offset += nwritten;
+			amount_left_to_write -= nwritten;
+			fsg->residue -= nwritten;
+
+			/* If an error occurred, report it and its position */
+			if (nwritten < amount) {
+#ifdef CONFIG_USB_CSW_HACK
+				/*
+				 * If csw is already sent & write failure
+				 * occured, then detach the storage media
+				 * from the corresponding lun, and cable must
+				 * be disconnected to recover fom this error.
+				 */
+				if (csw_hack_sent) {
+					if (backing_file_is_open(curlun)) {
+						close_backing_file(fsg, curlun);
+						curlun->unit_attention_data =
+							SS_MEDIUM_NOT_PRESENT;
+					}
+					break;
+				}
+#endif
+				curlun->sense_data = SS_WRITE_ERROR;
+				curlun->sense_data_info = file_offset >> 9;
+				curlun->info_valid = 1;
+				break;
+			}
+
+#ifdef CONFIG_USB_CSW_HACK
+			if ((nwritten == amount) && !csw_hack_sent) {
+				/*
+				 * Check if any of the buffer is in the
+				 * busy state, if any buffer is in busy state,
+				 * means the complete data is not received
+				 * yet from the host. So there is no point in
+				 * csw right away without the complete data.
+				 */
+				for (i = 0; i < NUM_BUFFERS; i++) {
+					if (fsg->buffhds[i].state ==
+							BUF_STATE_BUSY)
+						break;
+				}
+				/* Check whether we received the complete
+				 * data from the host, before sending csw */
+				if (!amount_left_to_req && i == NUM_BUFFERS) {
+					csw_hack_sent = 1;
+					send_status(fsg);
+				}
+			}
+#endif
+			/* Did the host decide to stop early? */
+			if (bh->outreq->actual != bh->outreq->length) {
+				fsg->short_packet_received = 1;
+				break;
+			}
+			continue;
+		}
+
+		/* Wait for something to happen */
+		rc = sleep_thread(fsg);
+		if (rc)
+			return rc;
+	}
+
+	return -EIO;		/* No default reply */
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+/* Sync the file data, don't bother with the metadata.
+ * The caller must own fsg->filesem.
+ * This code was copied from fs/buffer.c:sys_fdatasync(). */
+static int fsync_sub(struct lun *curlun)
+{
+	struct file	*filp = curlun->filp;
+	struct inode	*inode;
+	int		rc, err;
+
+	if (curlun->ro || !filp)
+		return 0;
+	if (!filp->f_op->fsync)
+		return -EINVAL;
+
+	inode = filp->f_path.dentry->d_inode;
+	mutex_lock(&inode->i_mutex);
+	rc = filemap_fdatawrite(inode->i_mapping);
+	err = filp->f_op->fsync(filp, filp->f_path.dentry, 1);
+	if (!rc)
+		rc = err;
+	err = filemap_fdatawait(inode->i_mapping);
+	if (!rc)
+		rc = err;
+	mutex_unlock(&inode->i_mutex);
+	VLDBG(curlun, "fdatasync -> %d\n", rc);
+	return rc;
+}
+
+static void fsync_all(struct fsg_dev *fsg)
+{
+	int	i;
+
+	for (i = 0; i < fsg->nluns; ++i)
+		fsync_sub(&fsg->luns[i]);
+}
+
+static int do_synchronize_cache(struct fsg_dev *fsg)
+{
+	struct lun	*curlun = fsg->curlun;
+	int		rc;
+
+	/* We ignore the requested LBA and write out all file's
+	 * dirty data buffers. */
+	rc = fsync_sub(curlun);
+	if (rc)
+		curlun->sense_data = SS_WRITE_ERROR;
+	return 0;
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+static void invalidate_sub(struct lun *curlun)
+{
+	struct file	*filp = curlun->filp;
+	struct inode	*inode = filp->f_path.dentry->d_inode;
+	unsigned long	rc;
+
+	rc = invalidate_mapping_pages(inode->i_mapping, 0, -1);
+	VLDBG(curlun, "invalidate_inode_pages -> %ld\n", rc);
+}
+
+static int do_verify(struct fsg_dev *fsg)
+{
+	struct lun		*curlun = fsg->curlun;
+	u32			lba;
+	u32			verification_length;
+	struct fsg_buffhd	*bh = fsg->next_buffhd_to_fill;
+	loff_t			file_offset, file_offset_tmp;
+	u32			amount_left;
+	unsigned int		amount;
+	ssize_t			nread;
+
+	/* Get the starting Logical Block Address and check that it's
+	 * not too big */
+	lba = get_be32(&fsg->cmnd[2]);
+	if (lba >= curlun->num_sectors) {
+		curlun->sense_data = SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
+		return -EINVAL;
+	}
+
+	/* We allow DPO (Disable Page Out = don't save data in the
+	 * cache) but we don't implement it. */
+	if ((fsg->cmnd[1] & ~0x10) != 0) {
+		curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
+		return -EINVAL;
+	}
+
+	verification_length = get_be16(&fsg->cmnd[7]);
+	if (unlikely(verification_length == 0))
+		return -EIO;		/* No default reply */
+
+	/* Prepare to carry out the file verify */
+	amount_left = verification_length << 9;
+	file_offset = ((loff_t) lba) << 9;
+
+	/* Write out all the dirty buffers before invalidating them */
+	fsync_sub(curlun);
+	if (signal_pending(current))
+		return -EINTR;
+
+	invalidate_sub(curlun);
+	if (signal_pending(current))
+		return -EINTR;
+
+	/* Just try to read the requested blocks */
+	while (amount_left > 0) {
+
+		/* Figure out how much we need to read:
+		 * Try to read the remaining amount, but not more than
+		 * the buffer size.
+		 * And don't try to read past the end of the file.
+		 * If this means reading 0 then we were asked to read
+		 * past the end of file. */
+		amount = min((unsigned int) amount_left,
+				(unsigned int)fsg->buf_size);
+		amount = min((loff_t) amount,
+				curlun->file_length - file_offset);
+		if (amount == 0) {
+			curlun->sense_data =
+					SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
+			curlun->sense_data_info = file_offset >> 9;
+			curlun->info_valid = 1;
+			break;
+		}
+
+		/* Perform the read */
+		file_offset_tmp = file_offset;
+		nread = vfs_read(curlun->filp,
+				(char __user *) bh->buf,
+				amount, &file_offset_tmp);
+		VLDBG(curlun, "file read %u @ %llu -> %d\n", amount,
+				(unsigned long long) file_offset,
+				(int) nread);
+		if (signal_pending(current))
+			return -EINTR;
+
+		if (nread < 0) {
+			LDBG(curlun, "error in file verify: %d\n",
+					(int) nread);
+			nread = 0;
+		} else if (nread < amount) {
+			LDBG(curlun, "partial file verify: %d/%u\n",
+					(int) nread, amount);
+			nread -= (nread & 511);	/* Round down to a sector */
+		}
+		if (nread == 0) {
+			curlun->sense_data = SS_UNRECOVERED_READ_ERROR;
+			curlun->sense_data_info = file_offset >> 9;
+			curlun->info_valid = 1;
+			break;
+		}
+		file_offset += nread;
+		amount_left -= nread;
+	}
+	return 0;
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+static int do_inquiry(struct fsg_dev *fsg, struct fsg_buffhd *bh)
+{
+	u8	*buf = (u8 *) bh->buf;
+
+	if (!fsg->curlun) {		/* Unsupported LUNs are okay */
+		fsg->bad_lun_okay = 1;
+		memset(buf, 0, 36);
+		buf[0] = 0x7f;		/* Unsupported, no device-type */
+		return 36;
+	}
+
+	memset(buf, 0, 8);	/* Non-removable, direct-access device */
+
+	buf[1] = 0x80;	/* set removable bit */
+	buf[2] = 2;		/* ANSI SCSI level 2 */
+	buf[3] = 2;		/* SCSI-2 INQUIRY data format */
+	buf[4] = 31;		/* Additional length */
+				/* No special options */
+	sprintf(buf + 8, "%-8s%-16s%04x", fsg->vendor,
+			fsg->product, fsg->release);
+	return 36;
+}
+
+
+static int do_request_sense(struct fsg_dev *fsg, struct fsg_buffhd *bh)
+{
+	struct lun	*curlun = fsg->curlun;
+	u8		*buf = (u8 *) bh->buf;
+	u32		sd, sdinfo;
+	int		valid;
+
+	/*
+	 * From the SCSI-2 spec., section 7.9 (Unit attention condition):
+	 *
+	 * If a REQUEST SENSE command is received from an initiator
+	 * with a pending unit attention condition (before the target
+	 * generates the contingent allegiance condition), then the
+	 * target shall either:
+	 *   a) report any pending sense data and preserve the unit
+	 *	attention condition on the logical unit, or,
+	 *   b) report the unit attention condition, may discard any
+	 *	pending sense data, and clear the unit attention
+	 *	condition on the logical unit for that initiator.
+	 *
+	 * FSG normally uses option a); enable this code to use option b).
+	 */
+#if 0
+	if (curlun && curlun->unit_attention_data != SS_NO_SENSE) {
+		curlun->sense_data = curlun->unit_attention_data;
+		curlun->unit_attention_data = SS_NO_SENSE;
+	}
+#endif
+
+	if (!curlun) {		/* Unsupported LUNs are okay */
+		fsg->bad_lun_okay = 1;
+		sd = SS_LOGICAL_UNIT_NOT_SUPPORTED;
+		sdinfo = 0;
+		valid = 0;
+	} else {
+		sd = curlun->sense_data;
+		sdinfo = curlun->sense_data_info;
+		valid = curlun->info_valid << 7;
+		curlun->sense_data = SS_NO_SENSE;
+		curlun->sense_data_info = 0;
+		curlun->info_valid = 0;
+	}
+
+	memset(buf, 0, 18);
+	buf[0] = valid | 0x70;			/* Valid, current error */
+	buf[2] = SK(sd);
+	put_be32(&buf[3], sdinfo);		/* Sense information */
+	buf[7] = 18 - 8;			/* Additional sense length */
+	buf[12] = ASC(sd);
+	buf[13] = ASCQ(sd);
+	return 18;
+}
+
+
+static int do_read_capacity(struct fsg_dev *fsg, struct fsg_buffhd *bh)
+{
+	struct lun	*curlun = fsg->curlun;
+	u32		lba = get_be32(&fsg->cmnd[2]);
+	int		pmi = fsg->cmnd[8];
+	u8		*buf = (u8 *) bh->buf;
+
+	/* Check the PMI and LBA fields */
+	if (pmi > 1 || (pmi == 0 && lba != 0)) {
+		curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
+		return -EINVAL;
+	}
+
+	put_be32(&buf[0], curlun->num_sectors - 1);	/* Max logical block */
+	put_be32(&buf[4], 512);				/* Block length */
+	return 8;
+}
+
+
+static int do_mode_sense(struct fsg_dev *fsg, struct fsg_buffhd *bh)
+{
+	struct lun	*curlun = fsg->curlun;
+	int		mscmnd = fsg->cmnd[0];
+	u8		*buf = (u8 *) bh->buf;
+	u8		*buf0 = buf;
+	int		pc, page_code;
+	int		changeable_values, all_pages;
+	int		valid_page = 0;
+	int		len, limit;
+
+	if ((fsg->cmnd[1] & ~0x08) != 0) {		/* Mask away DBD */
+		curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
+		return -EINVAL;
+	}
+	pc = fsg->cmnd[2] >> 6;
+	page_code = fsg->cmnd[2] & 0x3f;
+	if (pc == 3) {
+		curlun->sense_data = SS_SAVING_PARAMETERS_NOT_SUPPORTED;
+		return -EINVAL;
+	}
+	changeable_values = (pc == 1);
+	all_pages = (page_code == 0x3f);
+
+	/* Write the mode parameter header.  Fixed values are: default
+	 * medium type, no cache control (DPOFUA), and no block descriptors.
+	 * The only variable value is the WriteProtect bit.  We will fill in
+	 * the mode data length later. */
+	memset(buf, 0, 8);
+	if (mscmnd == SC_MODE_SENSE_6) {
+		buf[2] = (curlun->ro ? 0x80 : 0x00);		/* WP, DPOFUA */
+		buf += 4;
+		limit = 255;
+	} else {			/* SC_MODE_SENSE_10 */
+		buf[3] = (curlun->ro ? 0x80 : 0x00);		/* WP, DPOFUA */
+		buf += 8;
+		limit = 65535;
+	}
+
+	/* No block descriptors */
+
+	/* Disabled to workaround USB reset problems with a Vista host.
+	 */
+#if 0
+	/* The mode pages, in numerical order.  The only page we support
+	 * is the Caching page. */
+	if (page_code == 0x08 || all_pages) {
+		valid_page = 1;
+		buf[0] = 0x08;		/* Page code */
+		buf[1] = 10;		/* Page length */
+		memset(buf+2, 0, 10);	/* None of the fields are changeable */
+
+		if (!changeable_values) {
+			buf[2] = 0x04;	/* Write cache enable, */
+					/* Read cache not disabled */
+					/* No cache retention priorities */
+			put_be16(&buf[4], 0xffff);  /* Don't disable prefetch */
+					/* Minimum prefetch = 0 */
+			put_be16(&buf[8], 0xffff);  /* Maximum prefetch */
+			/* Maximum prefetch ceiling */
+			put_be16(&buf[10], 0xffff);
+		}
+		buf += 12;
+	}
+#else
+	valid_page = 1;
+#endif
+
+	/* Check that a valid page was requested and the mode data length
+	 * isn't too long. */
+	len = buf - buf0;
+	if (!valid_page || len > limit) {
+		curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
+		return -EINVAL;
+	}
+
+	/*  Store the mode data length */
+	if (mscmnd == SC_MODE_SENSE_6)
+		buf0[0] = len - 1;
+	else
+		put_be16(buf0, len - 2);
+	return len;
+}
+
+static int do_start_stop(struct fsg_dev *fsg)
+{
+	struct lun	*curlun = fsg->curlun;
+	int		loej, start;
+
+	/* int immed = fsg->cmnd[1] & 0x01; */
+	loej = fsg->cmnd[4] & 0x02;
+	start = fsg->cmnd[4] & 0x01;
+
+	if (loej) {
+		/* eject request from the host */
+		if (backing_file_is_open(curlun)) {
+			close_backing_file(fsg, curlun);
+			curlun->unit_attention_data = SS_MEDIUM_NOT_PRESENT;
+		}
+	}
+
+	return 0;
+}
+
+static int do_prevent_allow(struct fsg_dev *fsg)
+{
+	struct lun	*curlun = fsg->curlun;
+	int		prevent;
+
+	prevent = fsg->cmnd[4] & 0x01;
+	if ((fsg->cmnd[4] & ~0x01) != 0) {		/* Mask away Prevent */
+		curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
+		return -EINVAL;
+	}
+
+	if (curlun->prevent_medium_removal && !prevent)
+		fsync_sub(curlun);
+	curlun->prevent_medium_removal = prevent;
+	return 0;
+}
+
+
+static int do_read_format_capacities(struct fsg_dev *fsg,
+			struct fsg_buffhd *bh)
+{
+	struct lun	*curlun = fsg->curlun;
+	u8		*buf = (u8 *) bh->buf;
+
+	buf[0] = buf[1] = buf[2] = 0;
+	buf[3] = 8;	/* Only the Current/Maximum Capacity Descriptor */
+	buf += 4;
+
+	put_be32(&buf[0], curlun->num_sectors);	/* Number of blocks */
+	put_be32(&buf[4], 512);				/* Block length */
+	buf[4] = 0x02;					/* Current capacity */
+	return 12;
+}
+
+
+static int do_mode_select(struct fsg_dev *fsg, struct fsg_buffhd *bh)
+{
+	struct lun	*curlun = fsg->curlun;
+
+	/* We don't support MODE SELECT */
+	curlun->sense_data = SS_INVALID_COMMAND;
+	return -EINVAL;
+}
+
+
+static int halt_bulk_in_endpoint(struct fsg_dev *fsg)
+{
+	int     rc;
+
+	rc = fsg_set_halt(fsg, fsg->bulk_in);
+	if (rc == -EAGAIN)
+		DBG(fsg, "delayed bulk-in endpoint halt\n");
+	while (rc != 0) {
+		if (rc != -EAGAIN) {
+			DBG(fsg, "usb_ep_set_halt -> %d\n", rc);
+			rc = 0;
+			break;
+		}
+		/* Wait for a short time and then try again */
+		if (msleep_interruptible(100) != 0)
+			return -EINTR;
+		rc = usb_ept_set_halt(fsg->bulk_in);
+	}
+	return rc;
+}
+/*-------------------------------------------------------------------------*/
+#if 0
+static int write_zero(struct fsg_dev *fsg)
+{
+	struct fsg_buffhd	*bh;
+	int			rc;
+
+	DBG(fsg, "write_zero\n");
+	/* Wait for the next buffer to become available */
+	bh = fsg->next_buffhd_to_fill;
+	while (bh->state != BUF_STATE_EMPTY) {
+		rc = sleep_thread(fsg);
+		if (rc)
+			return rc;
+	}
+
+	bh->inreq->length = 0;
+	start_transfer(fsg, fsg->bulk_in, bh->inreq,
+			&bh->inreq_busy, &bh->state);
+
+	fsg->next_buffhd_to_fill = bh->next;
+	return 0;
+}
+#endif
+
+static int throw_away_data(struct fsg_dev *fsg)
+{
+	struct fsg_buffhd	*bh;
+	u32			amount;
+	int			rc;
+
+	DBG(fsg, "throw_away_data\n");
+	while ((bh = fsg->next_buffhd_to_drain)->state != BUF_STATE_EMPTY ||
+			fsg->usb_amount_left > 0) {
+
+		/* Throw away the data in a filled buffer */
+		if (bh->state == BUF_STATE_FULL) {
+			smp_rmb();
+			bh->state = BUF_STATE_EMPTY;
+			fsg->next_buffhd_to_drain = bh->next;
+
+			/* A short packet or an error ends everything */
+			if (bh->outreq->actual != bh->outreq->length ||
+					bh->outreq->status != 0) {
+				raise_exception(fsg, FSG_STATE_ABORT_BULK_OUT);
+				return -EINTR;
+			}
+			continue;
+		}
+
+		/* Try to submit another request if we need one */
+		bh = fsg->next_buffhd_to_fill;
+		if (bh->state == BUF_STATE_EMPTY && fsg->usb_amount_left > 0) {
+			amount = min(fsg->usb_amount_left, (u32) fsg->buf_size);
+
+			/* amount is always divisible by 512, hence by
+			 * the bulk-out maxpacket size */
+			bh->outreq->length = bh->bulk_out_intended_length =
+					amount;
+			start_transfer(fsg, fsg->bulk_out, bh->outreq,
+					&bh->outreq_busy, &bh->state);
+			fsg->next_buffhd_to_fill = bh->next;
+			fsg->usb_amount_left -= amount;
+			continue;
+		}
+
+		/* Otherwise wait for something to happen */
+		rc = sleep_thread(fsg);
+		if (rc)
+			return rc;
+	}
+	return 0;
+}
+
+
+static int finish_reply(struct fsg_dev *fsg)
+{
+	struct fsg_buffhd	*bh = fsg->next_buffhd_to_fill;
+	int			rc = 0;
+	int			i;
+
+	switch (fsg->data_dir) {
+	case DATA_DIR_NONE:
+		break;			/* Nothing to send */
+
+	case DATA_DIR_UNKNOWN:
+		rc = -EINVAL;
+		break;
+
+	/* All but the last buffer of data must have already been sent */
+	case DATA_DIR_TO_HOST:
+		if (fsg->data_size == 0)
+			;		/* Nothing to send */
+
+		/* If there's no residue, simply send the last buffer */
+		else if (fsg->residue == 0) {
+			start_transfer(fsg, fsg->bulk_in, bh->inreq,
+					&bh->inreq_busy, &bh->state);
+			fsg->next_buffhd_to_fill = bh->next;
+		} else {
+			if (can_stall) {
+				bh->state = BUF_STATE_EMPTY;
+				for (i = 0; i < NUM_BUFFERS; ++i) {
+					struct fsg_buffhd
+							*bh = &fsg->buffhds[i];
+					while (bh->state != BUF_STATE_EMPTY) {
+						rc = sleep_thread(fsg);
+						if (rc)
+							return rc;
+					}
+				}
+				rc = halt_bulk_in_endpoint(fsg);
+			} else {
+			start_transfer(fsg, fsg->bulk_in, bh->inreq,
+					&bh->inreq_busy, &bh->state);
+			fsg->next_buffhd_to_fill = bh->next;
+			}
+#if 0
+	/* this is unnecessary, and was causing problems with MacOS */
+			if (length > 0)
+				write_zero(fsg);
+#endif
+		}
+		break;
+
+	/* We have processed all we want from the data the host has sent.
+	 * There may still be outstanding bulk-out requests. */
+	case DATA_DIR_FROM_HOST:
+		if (fsg->residue == 0)
+			;		/* Nothing to receive */
+
+		/* Did the host stop sending unexpectedly early? */
+		else if (fsg->short_packet_received) {
+			raise_exception(fsg, FSG_STATE_ABORT_BULK_OUT);
+			rc = -EINTR;
+		}
+
+		/* We haven't processed all the incoming data.  Even though
+		 * we may be allowed to stall, doing so would cause a race.
+		 * The controller may already have ACK'ed all the remaining
+		 * bulk-out packets, in which case the host wouldn't see a
+		 * STALL.  Not realizing the endpoint was halted, it wouldn't
+		 * clear the halt -- leading to problems later on. */
+#if 0
+		fsg_set_halt(fsg, fsg->bulk_out);
+		raise_exception(fsg, FSG_STATE_ABORT_BULK_OUT);
+		rc = -EINTR;
+#endif
+
+		/* We can't stall.  Read in the excess data and throw it
+		 * all away. */
+		else
+			rc = throw_away_data(fsg);
+		break;
+	}
+	return rc;
+}
+
+
+static int send_status(struct fsg_dev *fsg)
+{
+	struct lun		*curlun = fsg->curlun;
+	struct fsg_buffhd	*bh;
+	int			rc;
+	u8			status = USB_STATUS_PASS;
+	u32			sd, sdinfo = 0;
+	struct bulk_cs_wrap	*csw;
+
+	DBG(fsg, "send_status\n");
+	/* Wait for the next buffer to become available */
+	bh = fsg->next_buffhd_to_fill;
+	while (bh->state != BUF_STATE_EMPTY) {
+		rc = sleep_thread(fsg);
+		if (rc)
+			return rc;
+	}
+
+	if (curlun) {
+		sd = curlun->sense_data;
+		sdinfo = curlun->sense_data_info;
+	} else if (fsg->bad_lun_okay)
+		sd = SS_NO_SENSE;
+	else
+		sd = SS_LOGICAL_UNIT_NOT_SUPPORTED;
+
+	if (fsg->phase_error) {
+		DBG(fsg, "sending phase-error status\n");
+		status = USB_STATUS_PHASE_ERROR;
+		sd = SS_INVALID_COMMAND;
+	} else if (sd != SS_NO_SENSE) {
+		DBG(fsg, "sending command-failure status\n");
+		status = USB_STATUS_FAIL;
+		VDBG(fsg, "  sense data: SK x%02x, ASC x%02x, ASCQ x%02x;"
+				"  info x%x\n",
+				SK(sd), ASC(sd), ASCQ(sd), sdinfo);
+	}
+
+	csw = bh->buf;
+
+	/* Store and send the Bulk-only CSW */
+	csw->Signature = __constant_cpu_to_le32(USB_BULK_CS_SIG);
+	csw->Tag = fsg->tag;
+#ifdef CONFIG_USB_CSW_HACK
+	/* Since csw is being sent early, before
+	 * writing on to storage media, need to set
+	 * residue to zero,assuming that write will succeed.
+	 */
+	csw->Residue = 0;
+#else
+	csw->Residue = cpu_to_le32(fsg->residue);
+#endif
+	csw->Status = status;
+
+	bh->inreq->length = USB_BULK_CS_WRAP_LEN;
+	start_transfer(fsg, fsg->bulk_in, bh->inreq,
+			&bh->inreq_busy, &bh->state);
+
+	fsg->next_buffhd_to_fill = bh->next;
+	return 0;
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+/* Check whether the command is properly formed and whether its data size
+ * and direction agree with the values we already have. */
+static int check_command(struct fsg_dev *fsg, int cmnd_size,
+		enum data_direction data_dir, unsigned int mask,
+		int needs_medium, const char *name)
+{
+	int			i;
+	int			lun = fsg->cmnd[1] >> 5;
+	static const char	dirletter[4] = {'u', 'o', 'i', 'n'};
+	char			hdlen[20];
+	struct lun		*curlun;
+
+	hdlen[0] = 0;
+	if (fsg->data_dir != DATA_DIR_UNKNOWN)
+		sprintf(hdlen, ", H%c=%u", dirletter[(int) fsg->data_dir],
+				fsg->data_size);
+	VDBG(fsg, "SCSI command: %s;  Dc=%d, D%c=%u;  Hc=%d%s\n",
+			name, cmnd_size, dirletter[(int) data_dir],
+			fsg->data_size_from_cmnd, fsg->cmnd_size, hdlen);
+
+	/* We can't reply at all until we know the correct data direction
+	 * and size. */
+	if (fsg->data_size_from_cmnd == 0)
+		data_dir = DATA_DIR_NONE;
+	if (fsg->data_dir == DATA_DIR_UNKNOWN) {	/* CB or CBI */
+		fsg->data_dir = data_dir;
+		fsg->data_size = fsg->data_size_from_cmnd;
+
+	} else {					/* Bulk-only */
+		if (fsg->data_size < fsg->data_size_from_cmnd) {
+
+			/* Host data size < Device data size is a phase error.
+			 * Carry out the command, but only transfer as much
+			 * as we are allowed. */
+			DBG(fsg, "phase error 1\n");
+			fsg->data_size_from_cmnd = fsg->data_size;
+			fsg->phase_error = 1;
+		}
+	}
+	fsg->residue = fsg->usb_amount_left = fsg->data_size;
+
+	/* Conflicting data directions is a phase error */
+	if (fsg->data_dir != data_dir && fsg->data_size_from_cmnd > 0) {
+		fsg->phase_error = 1;
+		DBG(fsg, "phase error 2\n");
+		return -EINVAL;
+	}
+
+	/* Verify the length of the command itself */
+	if (cmnd_size != fsg->cmnd_size) {
+
+		/* Special case workaround: MS-Windows issues REQUEST SENSE/
+		 * INQUIRY with cbw->Length == 12 (it should be 6). */
+		if ((fsg->cmnd[0] == SC_REQUEST_SENSE && fsg->cmnd_size == 12)
+		 || (fsg->cmnd[0] == SC_INQUIRY && fsg->cmnd_size == 12))
+			cmnd_size = fsg->cmnd_size;
+		else {
+			fsg->phase_error = 1;
+			return -EINVAL;
+		}
+	}
+
+	/* Check that the LUN values are consistent */
+	if (fsg->lun != lun)
+		DBG(fsg, "using LUN %d from CBW, "
+				"not LUN %d from CDB\n",
+				fsg->lun, lun);
+
+	/* Check the LUN */
+	if (fsg->lun >= 0 && fsg->lun < fsg->nluns) {
+		fsg->curlun = curlun = &fsg->luns[fsg->lun];
+		if (fsg->cmnd[0] != SC_REQUEST_SENSE) {
+			curlun->sense_data = SS_NO_SENSE;
+			curlun->sense_data_info = 0;
+			curlun->info_valid = 0;
+		}
+	} else {
+		fsg->curlun = curlun = NULL;
+		fsg->bad_lun_okay = 0;
+
+		/* INQUIRY and REQUEST SENSE commands are explicitly allowed
+		 * to use unsupported LUNs; all others may not. */
+		if (fsg->cmnd[0] != SC_INQUIRY &&
+				fsg->cmnd[0] != SC_REQUEST_SENSE) {
+			DBG(fsg, "unsupported LUN %d\n", fsg->lun);
+			return -EINVAL;
+		}
+	}
+
+	/* If a unit attention condition exists, only INQUIRY and
+	 * REQUEST SENSE commands are allowed; anything else must fail. */
+	if (curlun && curlun->unit_attention_data != SS_NO_SENSE &&
+			fsg->cmnd[0] != SC_INQUIRY &&
+			fsg->cmnd[0] != SC_REQUEST_SENSE) {
+		curlun->sense_data = curlun->unit_attention_data;
+		curlun->unit_attention_data = SS_NO_SENSE;
+		return -EINVAL;
+	}
+
+	/* Check that only command bytes listed in the mask are non-zero */
+	fsg->cmnd[1] &= 0x1f;			/* Mask away the LUN */
+	for (i = 1; i < cmnd_size; ++i) {
+		if (fsg->cmnd[i] && !(mask & (1 << i))) {
+			if (curlun)
+				curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
+			DBG(fsg, "SS_INVALID_FIELD_IN_CDB\n");
+			return -EINVAL;
+		}
+	}
+
+	/* If the medium isn't mounted and the command needs to access
+	 * it, return an error. */
+	if (curlun && !backing_file_is_open(curlun) && needs_medium) {
+		curlun->sense_data = SS_MEDIUM_NOT_PRESENT;
+		DBG(fsg, "SS_MEDIUM_NOT_PRESENT\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+
+static int do_scsi_command(struct fsg_dev *fsg)
+{
+	struct fsg_buffhd	*bh;
+	int			rc;
+	int			reply = -EINVAL;
+	int			i;
+	static char		unknown[16];
+
+	dump_cdb(fsg);
+
+	/* Wait for the next buffer to become available for data or status */
+	bh = fsg->next_buffhd_to_drain = fsg->next_buffhd_to_fill;
+	while (bh->state != BUF_STATE_EMPTY) {
+		rc = sleep_thread(fsg);
+		if (rc)
+			return rc;
+	}
+	fsg->phase_error = 0;
+	fsg->short_packet_received = 0;
+
+	down_read(&fsg->filesem);	/* We're using the backing file */
+	switch (fsg->cmnd[0]) {
+
+	case SC_INQUIRY:
+		fsg->data_size_from_cmnd = fsg->cmnd[4];
+		if ((reply = check_command(fsg, 6, DATA_DIR_TO_HOST,
+				(1<<4), 0,
+				"INQUIRY")) == 0)
+			reply = do_inquiry(fsg, bh);
+		break;
+
+	case SC_MODE_SELECT_6:
+		fsg->data_size_from_cmnd = fsg->cmnd[4];
+		if ((reply = check_command(fsg, 6, DATA_DIR_FROM_HOST,
+				(1<<1) | (1<<4), 0,
+				"MODE SELECT(6)")) == 0)
+			reply = do_mode_select(fsg, bh);
+		break;
+
+	case SC_MODE_SELECT_10:
+		fsg->data_size_from_cmnd = get_be16(&fsg->cmnd[7]);
+		if ((reply = check_command(fsg, 10, DATA_DIR_FROM_HOST,
+				(1<<1) | (3<<7), 0,
+				"MODE SELECT(10)")) == 0)
+			reply = do_mode_select(fsg, bh);
+		break;
+
+	case SC_MODE_SENSE_6:
+		fsg->data_size_from_cmnd = fsg->cmnd[4];
+		if ((reply = check_command(fsg, 6, DATA_DIR_TO_HOST,
+				(1<<1) | (1<<2) | (1<<4), 0,
+				"MODE SENSE(6)")) == 0)
+			reply = do_mode_sense(fsg, bh);
+		break;
+
+	case SC_MODE_SENSE_10:
+		fsg->data_size_from_cmnd = get_be16(&fsg->cmnd[7]);
+		if ((reply = check_command(fsg, 10, DATA_DIR_TO_HOST,
+				(1<<1) | (1<<2) | (3<<7), 0,
+				"MODE SENSE(10)")) == 0)
+			reply = do_mode_sense(fsg, bh);
+		break;
+
+	case SC_PREVENT_ALLOW_MEDIUM_REMOVAL:
+		fsg->data_size_from_cmnd = 0;
+		if ((reply = check_command(fsg, 6, DATA_DIR_NONE,
+				(1<<4), 0,
+				"PREVENT-ALLOW MEDIUM REMOVAL")) == 0)
+			reply = do_prevent_allow(fsg);
+		break;
+
+	case SC_READ_6:
+		i = fsg->cmnd[4];
+		fsg->data_size_from_cmnd = (i == 0 ? 256 : i) << 9;
+		if ((reply = check_command(fsg, 6, DATA_DIR_TO_HOST,
+				(7<<1) | (1<<4), 1,
+				"READ(6)")) == 0)
+			reply = do_read(fsg);
+		break;
+
+	case SC_READ_10:
+		fsg->data_size_from_cmnd = get_be16(&fsg->cmnd[7]) << 9;
+		if ((reply = check_command(fsg, 10, DATA_DIR_TO_HOST,
+				(1<<1) | (0xf<<2) | (3<<7), 1,
+				"READ(10)")) == 0)
+			reply = do_read(fsg);
+		break;
+
+	case SC_READ_12:
+		fsg->data_size_from_cmnd = get_be32(&fsg->cmnd[6]) << 9;
+		if ((reply = check_command(fsg, 12, DATA_DIR_TO_HOST,
+				(1<<1) | (0xf<<2) | (0xf<<6), 1,
+				"READ(12)")) == 0)
+			reply = do_read(fsg);
+		break;
+
+	case SC_READ_CAPACITY:
+		fsg->data_size_from_cmnd = 8;
+		if ((reply = check_command(fsg, 10, DATA_DIR_TO_HOST,
+				(0xf<<2) | (1<<8), 1,
+				"READ CAPACITY")) == 0)
+			reply = do_read_capacity(fsg, bh);
+		break;
+
+	case SC_READ_FORMAT_CAPACITIES:
+		fsg->data_size_from_cmnd = get_be16(&fsg->cmnd[7]);
+		if ((reply = check_command(fsg, 10, DATA_DIR_TO_HOST,
+				(3<<7), 1,
+				"READ FORMAT CAPACITIES")) == 0)
+			reply = do_read_format_capacities(fsg, bh);
+		break;
+
+	case SC_REQUEST_SENSE:
+		fsg->data_size_from_cmnd = fsg->cmnd[4];
+		if ((reply = check_command(fsg, 6, DATA_DIR_TO_HOST,
+				(1<<4), 0,
+				"REQUEST SENSE")) == 0)
+			reply = do_request_sense(fsg, bh);
+		break;
+
+	case SC_START_STOP_UNIT:
+		fsg->data_size_from_cmnd = 0;
+		if ((reply = check_command(fsg, 6, DATA_DIR_NONE,
+				(1<<1) | (1<<4), 0,
+				"START-STOP UNIT")) == 0)
+			reply = do_start_stop(fsg);
+		break;
+
+	case SC_SYNCHRONIZE_CACHE:
+		fsg->data_size_from_cmnd = 0;
+		if ((reply = check_command(fsg, 10, DATA_DIR_NONE,
+				(0xf<<2) | (3<<7), 1,
+				"SYNCHRONIZE CACHE")) == 0)
+			reply = do_synchronize_cache(fsg);
+		break;
+
+	case SC_TEST_UNIT_READY:
+		fsg->data_size_from_cmnd = 0;
+		reply = check_command(fsg, 6, DATA_DIR_NONE,
+				0, 1,
+				"TEST UNIT READY");
+		break;
+
+	/* Although optional, this command is used by MS-Windows.  We
+	 * support a minimal version: BytChk must be 0. */
+	case SC_VERIFY:
+		fsg->data_size_from_cmnd = 0;
+		if ((reply = check_command(fsg, 10, DATA_DIR_NONE,
+				(1<<1) | (0xf<<2) | (3<<7), 1,
+				"VERIFY")) == 0)
+			reply = do_verify(fsg);
+		break;
+
+	case SC_WRITE_6:
+		i = fsg->cmnd[4];
+		fsg->data_size_from_cmnd = (i == 0 ? 256 : i) << 9;
+		if ((reply = check_command(fsg, 6, DATA_DIR_FROM_HOST,
+				(7<<1) | (1<<4), 1,
+				"WRITE(6)")) == 0)
+			reply = do_write(fsg);
+		break;
+
+	case SC_WRITE_10:
+		fsg->data_size_from_cmnd = get_be16(&fsg->cmnd[7]) << 9;
+		if ((reply = check_command(fsg, 10, DATA_DIR_FROM_HOST,
+				(1<<1) | (0xf<<2) | (3<<7), 1,
+				"WRITE(10)")) == 0)
+			reply = do_write(fsg);
+		break;
+
+	case SC_WRITE_12:
+		fsg->data_size_from_cmnd = get_be32(&fsg->cmnd[6]) << 9;
+		if ((reply = check_command(fsg, 12, DATA_DIR_FROM_HOST,
+				(1<<1) | (0xf<<2) | (0xf<<6), 1,
+				"WRITE(12)")) == 0)
+			reply = do_write(fsg);
+		break;
+
+	/* Some mandatory commands that we recognize but don't implement.
+	 * They don't mean much in this setting.  It's left as an exercise
+	 * for anyone interested to implement RESERVE and RELEASE in terms
+	 * of Posix locks. */
+	case SC_FORMAT_UNIT:
+	case SC_RELEASE:
+	case SC_RESERVE:
+	case SC_SEND_DIAGNOSTIC:
+		/* Fall through */
+
+	default:
+		fsg->data_size_from_cmnd = 0;
+		sprintf(unknown, "Unknown x%02x", fsg->cmnd[0]);
+		if ((reply = check_command(fsg, fsg->cmnd_size,
+				DATA_DIR_UNKNOWN, 0xff, 0, unknown)) == 0) {
+			fsg->curlun->sense_data = SS_INVALID_COMMAND;
+			reply = -EINVAL;
+		}
+		break;
+	}
+	up_read(&fsg->filesem);
+
+	VDBG(fsg, "reply: %d, fsg->data_size_from_cmnd: %d\n",
+			reply, fsg->data_size_from_cmnd);
+	if (reply == -EINTR || signal_pending(current))
+		return -EINTR;
+
+	/* Set up the single reply buffer for finish_reply() */
+	if (reply == -EINVAL)
+		reply = 0;		/* Error reply length */
+	if (reply >= 0 && fsg->data_dir == DATA_DIR_TO_HOST) {
+		reply = min((u32) reply, fsg->data_size_from_cmnd);
+		bh->inreq->length = reply;
+		bh->state = BUF_STATE_FULL;
+		fsg->residue -= reply;
+	}				/* Otherwise it's already set */
+
+	return 0;
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+static int received_cbw(struct fsg_dev *fsg, struct fsg_buffhd *bh)
+{
+	struct usb_request	*req = bh->outreq;
+	struct bulk_cb_wrap	*cbw = req->buf;
+
+	/* Was this a real packet? */
+	if (req->status)
+		return -EINVAL;
+
+	/* Is the CBW valid? */
+	if (req->actual != USB_BULK_CB_WRAP_LEN ||
+			cbw->Signature != __constant_cpu_to_le32(
+				USB_BULK_CB_SIG)) {
+		DBG(fsg, "invalid CBW: len %u sig 0x%x\n",
+				req->actual,
+				le32_to_cpu(cbw->Signature));
+		return -EINVAL;
+	}
+
+	/* Is the CBW meaningful? */
+	if (cbw->Lun >= MAX_LUNS || cbw->Flags & ~USB_BULK_IN_FLAG ||
+			cbw->Length <= 0 || cbw->Length > MAX_COMMAND_SIZE) {
+		DBG(fsg, "non-meaningful CBW: lun = %u, flags = 0x%x, "
+				"cmdlen %u\n",
+				cbw->Lun, cbw->Flags, cbw->Length);
+		return -EINVAL;
+	}
+
+	/* Save the command for later */
+	fsg->cmnd_size = cbw->Length;
+	memcpy(fsg->cmnd, cbw->CDB, fsg->cmnd_size);
+	if (cbw->Flags & USB_BULK_IN_FLAG)
+		fsg->data_dir = DATA_DIR_TO_HOST;
+	else
+		fsg->data_dir = DATA_DIR_FROM_HOST;
+	fsg->data_size = le32_to_cpu(cbw->DataTransferLength);
+	if (fsg->data_size == 0)
+		fsg->data_dir = DATA_DIR_NONE;
+	fsg->lun = cbw->Lun;
+	fsg->tag = cbw->Tag;
+	return 0;
+}
+
+
+static int get_next_command(struct fsg_dev *fsg)
+{
+	struct fsg_buffhd	*bh;
+	int			rc = 0;
+
+	/* Wait for the next buffer to become available */
+	bh = fsg->next_buffhd_to_fill;
+	while (bh->state != BUF_STATE_EMPTY) {
+		rc = sleep_thread(fsg);
+		if (rc)
+			return rc;
+	}
+
+	/* Queue a request to read a Bulk-only CBW */
+	set_bulk_out_req_length(fsg, bh, USB_BULK_CB_WRAP_LEN);
+	start_transfer(fsg, fsg->bulk_out, bh->outreq,
+			&bh->outreq_busy, &bh->state);
+
+	/* We will drain the buffer in software, which means we
+	 * can reuse it for the next filling.  No need to advance
+	 * next_buffhd_to_fill. */
+
+	/* Wait for the CBW to arrive */
+	while (bh->state != BUF_STATE_FULL) {
+		rc = sleep_thread(fsg);
+		if (rc)
+			return rc;
+	}
+	smp_rmb();
+	rc = received_cbw(fsg, bh);
+	bh->state = BUF_STATE_EMPTY;
+
+	return rc;
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+static int alloc_request(struct fsg_dev *fsg, struct usb_endpoint *ep,
+		struct usb_request **preq)
+{
+	*preq = usb_ept_alloc_req(ep, 0);
+	if (*preq)
+		return 0;
+	ERROR(fsg, "can't allocate request for bulk %s\n",
+			(ep == fsg->bulk_in ? "IN" : "OUT"));
+	return -ENOMEM;
+}
+
+/*
+ * Reset interface setting and re-init endpoint state (toggle etc).
+ * Call with altsetting < 0 to disable the interface.  The only other
+ * available altsetting is 0, which enables the interface.
+ */
+static int do_set_interface(struct fsg_dev *fsg, int altsetting)
+{
+	int	rc = 0;
+	int	i;
+
+	if (fsg->running)
+		DBG(fsg, "reset interface\n");
+
+reset:
+	/* Deallocate the requests */
+	for (i = 0; i < NUM_BUFFERS; ++i) {
+		struct fsg_buffhd *bh = &fsg->buffhds[i];
+
+		if (bh->inreq) {
+			usb_ept_cancel_xfer(fsg->bulk_in, bh->inreq);
+			usb_ept_free_req(fsg->bulk_in, bh->inreq);
+			bh->inreq = NULL;
+		}
+		if (bh->outreq) {
+			usb_ept_cancel_xfer(fsg->bulk_out, bh->outreq);
+			usb_ept_free_req(fsg->bulk_out, bh->outreq);
+			bh->outreq = NULL;
+		}
+	}
+
+	fsg->running = 0;
+	if (altsetting < 0 || rc != 0)
+		return rc;
+
+	DBG(fsg, "set interface %d\n", altsetting);
+
+	fsg->bulk_out_maxpacket = usb_ept_get_max_packet(fsg->bulk_out);
+
+	/* Allocate the requests */
+	for (i = 0; i < NUM_BUFFERS; ++i) {
+		struct fsg_buffhd	*bh = &fsg->buffhds[i];
+
+		rc = alloc_request(fsg, fsg->bulk_in, &bh->inreq);
+		if (rc != 0)
+			goto reset;
+		rc = alloc_request(fsg, fsg->bulk_out, &bh->outreq);
+		if (rc != 0)
+			goto reset;
+		bh->inreq->buf = bh->outreq->buf = bh->buf;
+		bh->inreq->context = bh->outreq->context = bh;
+		bh->inreq->complete = bulk_in_complete;
+		bh->outreq->complete = bulk_out_complete;
+	}
+
+	fsg->running = 1;
+	for (i = 0; i < fsg->nluns; ++i)
+		fsg->luns[i].unit_attention_data = SS_RESET_OCCURRED;
+
+	return rc;
+}
+
+static void adjust_wake_lock(struct fsg_dev *fsg)
+{
+	int ums_active = 0;
+	int i;
+	unsigned long		flags;
+	
+	spin_lock_irqsave(&fsg->lock, flags);
+
+	if (fsg->config) {
+		for (i = 0; i < fsg->nluns; ++i) {
+			if (backing_file_is_open(&fsg->luns[i]))
+				ums_active = 1;
+		}
+	}
+
+	if (ums_active)
+		wake_lock(&fsg->wake_lock);
+	else
+		wake_unlock(&fsg->wake_lock);
+
+	spin_unlock_irqrestore(&fsg->lock, flags);
+}
+
+/*
+ * Change our operational configuration.  This code must agree with the code
+ * that returns config descriptors, and with interface altsetting code.
+ *
+ * It's also responsible for power management interactions.  Some
+ * configurations might not work with our current power sources.
+ * For now we just assume the gadget is always self-powered.
+ */
+static int do_set_config(struct fsg_dev *fsg, u8 new_config)
+{
+	int	rc = 0;
+
+	if (new_config == fsg->config)
+		return rc;
+
+	/* Disable the single interface */
+	if (fsg->config != 0) {
+		DBG(fsg, "reset config\n");
+		fsg->config = 0;
+		rc = do_set_interface(fsg, -1);
+	}
+
+	/* Enable the interface */
+	if (new_config != 0) {
+		fsg->config = new_config;
+		rc = do_set_interface(fsg, 0);
+		if (rc != 0)
+			fsg->config = 0;	/* Reset on errors */
+		else
+			INFO(fsg, "config #%d\n", fsg->config);
+	}
+
+	switch_set_state(&fsg->sdev, new_config);
+	adjust_wake_lock(fsg);
+	return rc;
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+static void handle_exception(struct fsg_dev *fsg)
+{
+	siginfo_t		info;
+	int			sig;
+	int			i;
+	struct fsg_buffhd	*bh;
+	enum fsg_state		old_state;
+	u8			new_config;
+	struct lun		*curlun;
+	int			rc;
+	unsigned long		flags;
+
+	DBG(fsg, "handle_exception state: %d\n", (int)fsg->state);
+	/* Clear the existing signals.  Anything but SIGUSR1 is converted
+	 * into a high-priority EXIT exception. */
+	for (;;) {
+		sig = dequeue_signal_lock(current, &current->blocked, &info);
+		if (!sig)
+			break;
+		if (sig != SIGUSR1) {
+			if (fsg->state < FSG_STATE_EXIT)
+				DBG(fsg, "Main thread exiting on signal\n");
+			raise_exception(fsg, FSG_STATE_EXIT);
+		}
+	}
+
+	/* Reset the I/O buffer states and pointers, the SCSI
+	 * state, and the exception.  Then invoke the handler. */
+	spin_lock_irqsave(&fsg->lock, flags);
+
+	for (i = 0; i < NUM_BUFFERS; ++i) {
+		bh = &fsg->buffhds[i];
+		bh->state = BUF_STATE_EMPTY;
+	}
+	fsg->next_buffhd_to_fill = fsg->next_buffhd_to_drain =
+			&fsg->buffhds[0];
+
+	new_config = fsg->new_config;
+	old_state = fsg->state;
+
+	if (old_state == FSG_STATE_ABORT_BULK_OUT)
+		fsg->state = FSG_STATE_STATUS_PHASE;
+	else {
+		for (i = 0; i < fsg->nluns; ++i) {
+			curlun = &fsg->luns[i];
+			curlun->prevent_medium_removal = 0;
+			curlun->sense_data = curlun->unit_attention_data =
+					SS_NO_SENSE;
+			curlun->sense_data_info = 0;
+			curlun->info_valid = 0;
+		}
+		fsg->state = FSG_STATE_IDLE;
+	}
+	spin_unlock_irqrestore(&fsg->lock, flags);
+
+	/* Carry out any extra actions required for the exception */
+	switch (old_state) {
+	default:
+		break;
+
+	case FSG_STATE_ABORT_BULK_OUT:
+		DBG(fsg, "FSG_STATE_ABORT_BULK_OUT\n");
+		spin_lock_irqsave(&fsg->lock, flags);
+		if (fsg->state == FSG_STATE_STATUS_PHASE)
+			fsg->state = FSG_STATE_IDLE;
+		spin_unlock_irqrestore(&fsg->lock, flags);
+		break;
+
+	case FSG_STATE_RESET:
+		/* really not much to do here */
+		break;
+
+	case FSG_STATE_CONFIG_CHANGE:
+		rc = do_set_config(fsg, new_config);
+		if (new_config == 0) {
+			/* We're using the backing file */
+			down_read(&fsg->filesem);
+			fsync_all(fsg);
+			up_read(&fsg->filesem);
+		}
+		break;
+
+	case FSG_STATE_EXIT:
+	case FSG_STATE_TERMINATED:
+		do_set_config(fsg, 0);			/* Free resources */
+		spin_lock_irqsave(&fsg->lock, flags);
+		fsg->state = FSG_STATE_TERMINATED;	/* Stop the thread */
+		spin_unlock_irqrestore(&fsg->lock, flags);
+		break;
+	}
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+static int fsg_main_thread(void *fsg_)
+{
+	struct fsg_dev		*fsg = fsg_;
+	unsigned long		flags;
+
+	/* Allow the thread to be killed by a signal, but set the signal mask
+	 * to block everything but INT, TERM, KILL, and USR1. */
+	allow_signal(SIGINT);
+	allow_signal(SIGTERM);
+	allow_signal(SIGKILL);
+	allow_signal(SIGUSR1);
+
+	/* Allow the thread to be frozen */
+	set_freezable();
+
+	/* Arrange for userspace references to be interpreted as kernel
+	 * pointers.  That way we can pass a kernel pointer to a routine
+	 * that expects a __user pointer and it will work okay. */
+	set_fs(get_ds());
+
+	/* The main loop */
+	while (fsg->state != FSG_STATE_TERMINATED) {
+		if (exception_in_progress(fsg) || signal_pending(current)) {
+			handle_exception(fsg);
+			continue;
+		}
+
+		if (!fsg->running) {
+			sleep_thread(fsg);
+			continue;
+		}
+
+		if (get_next_command(fsg))
+			continue;
+
+		spin_lock_irqsave(&fsg->lock, flags);
+		if (!exception_in_progress(fsg))
+			fsg->state = FSG_STATE_DATA_PHASE;
+		spin_unlock_irqrestore(&fsg->lock, flags);
+
+		if (do_scsi_command(fsg) || finish_reply(fsg))
+			continue;
+
+		spin_lock_irqsave(&fsg->lock, flags);
+		if (!exception_in_progress(fsg))
+			fsg->state = FSG_STATE_STATUS_PHASE;
+		spin_unlock_irqrestore(&fsg->lock, flags);
+
+#ifdef CONFIG_USB_CSW_HACK
+		/* Since status is already sent for write scsi command,
+		 * need to skip sending status once again if it is a
+		 * write scsi command.
+		 */
+		if (fsg->cmnd[0] == SC_WRITE_6  || fsg->cmnd[0] == SC_WRITE_10
+					|| fsg->cmnd[0] == SC_WRITE_12)
+			continue;
+#endif
+		if (send_status(fsg))
+			continue;
+
+		spin_lock_irqsave(&fsg->lock, flags);
+		if (!exception_in_progress(fsg))
+			fsg->state = FSG_STATE_IDLE;
+		spin_unlock_irqrestore(&fsg->lock, flags);
+		}
+
+	spin_lock_irqsave(&fsg->lock, flags);
+	fsg->thread_task = NULL;
+	spin_unlock_irqrestore(&fsg->lock, flags);
+
+	/* In case we are exiting because of a signal, unregister the
+	 * gadget driver and close the backing file. */
+	if (test_and_clear_bit(REGISTERED, &fsg->atomic_bitflags))
+		close_all_backing_files(fsg);
+
+	/* Let the unbind and cleanup routines know the thread has exited */
+	complete_and_exit(&fsg->thread_notifier, 0);
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+/* If the next two routines are called while the gadget is registered,
+ * the caller must own fsg->filesem for writing. */
+
+static int open_backing_file(struct fsg_dev *fsg, struct lun *curlun, const char *filename)
+{
+	int				ro;
+	struct file			*filp = NULL;
+	int				rc = -EINVAL;
+	struct inode			*inode = NULL;
+	loff_t				size;
+	loff_t				num_sectors;
+
+	/* R/W if we can, R/O if we must */
+	ro = curlun->ro;
+	if (!ro) {
+		filp = filp_open(filename, O_RDWR | O_LARGEFILE, 0);
+		if (-EROFS == PTR_ERR(filp))
+			ro = 1;
+	}
+	if (ro)
+		filp = filp_open(filename, O_RDONLY | O_LARGEFILE, 0);
+	if (IS_ERR(filp)) {
+		LINFO(curlun, "unable to open backing file: %s\n", filename);
+		return PTR_ERR(filp);
+	}
+
+	if (!(filp->f_mode & FMODE_WRITE))
+		ro = 1;
+
+	if (filp->f_path.dentry)
+		inode = filp->f_path.dentry->d_inode;
+	if (inode && S_ISBLK(inode->i_mode)) {
+		if (bdev_read_only(inode->i_bdev))
+			ro = 1;
+	} else if (!inode || !S_ISREG(inode->i_mode)) {
+		LINFO(curlun, "invalid file type: %s\n", filename);
+		goto out;
+	}
+
+	/* If we can't read the file, it's no good.
+	 * If we can't write the file, use it read-only. */
+	if (!filp->f_op || !(filp->f_op->read || filp->f_op->aio_read)) {
+		LINFO(curlun, "file not readable: %s\n", filename);
+		goto out;
+	}
+	if (!(filp->f_op->write || filp->f_op->aio_write))
+		ro = 1;
+
+	size = i_size_read(inode->i_mapping->host);
+	if (size < 0) {
+		LINFO(curlun, "unable to find file size: %s\n", filename);
+		rc = (int) size;
+		goto out;
+	}
+	num_sectors = size >> 9;	/* File size in 512-byte sectors */
+	if (num_sectors == 0) {
+		LINFO(curlun, "file too small: %s\n", filename);
+		rc = -ETOOSMALL;
+		goto out;
+	}
+
+	get_file(filp);
+	curlun->ro = ro;
+	curlun->filp = filp;
+	curlun->file_length = size;
+	curlun->num_sectors = num_sectors;
+	LDBG(curlun, "open backing file: %s size: %lld num_sectors: %lld\n",
+			filename, size, num_sectors);
+	rc = 0;
+	adjust_wake_lock(fsg);
+
+out:
+	filp_close(filp, current->files);
+	return rc;
+}
+
+
+static void close_backing_file(struct fsg_dev *fsg, struct lun *curlun)
+{
+	if (curlun->filp) {
+		int rc;
+
+		/*
+		 * XXX: San: Ugly hack here added to ensure that
+		 * our pages get synced to disk.
+		 * Also drop caches here just to be extra-safe
+		 */
+		rc = vfs_fsync(curlun->filp, curlun->filp->f_path.dentry, 1);
+		if (rc < 0)
+			printk(KERN_ERR "ums: Error syncing data (%d)\n", rc);
+		/* drop_pagecache and drop_slab are no longer available */
+		/* drop_pagecache(); */
+		/* drop_slab(); */
+
+		LDBG(curlun, "close backing file\n");
+		fput(curlun->filp);
+		curlun->filp = NULL;
+		adjust_wake_lock(fsg);
+	}
+}
+
+static void close_all_backing_files(struct fsg_dev *fsg)
+{
+	int	i;
+
+	for (i = 0; i < fsg->nluns; ++i)
+		close_backing_file(fsg, &fsg->luns[i]);
+}
+
+static ssize_t show_file(struct device *dev, struct device_attribute *attr,
+		char *buf)
+{
+	struct lun	*curlun = dev_to_lun(dev);
+	struct fsg_dev	*fsg = dev_get_drvdata(dev);
+	char		*p;
+	ssize_t		rc;
+
+	down_read(&fsg->filesem);
+	if (backing_file_is_open(curlun)) {	/* Get the complete pathname */
+		p = d_path(&curlun->filp->f_path, buf, PAGE_SIZE - 1);
+		if (IS_ERR(p))
+			rc = PTR_ERR(p);
+		else {
+			rc = strlen(p);
+			memmove(buf, p, rc);
+			buf[rc] = '\n';		/* Add a newline */
+			buf[++rc] = 0;
+		}
+	} else {				/* No file, return 0 bytes */
+		*buf = 0;
+		rc = 0;
+	}
+	up_read(&fsg->filesem);
+	return rc;
+}
+
+static ssize_t store_file(struct device *dev, struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	struct lun	*curlun = dev_to_lun(dev);
+	struct fsg_dev	*fsg = dev_get_drvdata(dev);
+	int		rc = 0;
+
+	DBG(fsg, "store_file: \"%s\"\n", buf);
+#if 0
+	/* disabled because we need to allow closing the backing file if the media was removed */
+	if (curlun->prevent_medium_removal && backing_file_is_open(curlun)) {
+		LDBG(curlun, "eject attempt prevented\n");
+		return -EBUSY;				/* "Door is locked" */
+	}
+#endif
+
+	/* Remove a trailing newline */
+	if (count > 0 && buf[count-1] == '\n')
+		((char *) buf)[count-1] = 0;
+
+	/* Eject current medium */
+	down_write(&fsg->filesem);
+	if (backing_file_is_open(curlun)) {
+		close_backing_file(fsg, curlun);
+		curlun->unit_attention_data = SS_MEDIUM_NOT_PRESENT;
+	}
+
+	/* Load new medium */
+	if (count > 0 && buf[0]) {
+		rc = open_backing_file(fsg, curlun, buf);
+		if (rc == 0)
+			curlun->unit_attention_data =
+					SS_NOT_READY_TO_READY_TRANSITION;
+	}
+	up_write(&fsg->filesem);
+	return (rc < 0 ? rc : count);
+}
+
+
+static DEVICE_ATTR(file, 0444, show_file, store_file);
+
+/*-------------------------------------------------------------------------*/
+
+static void fsg_release(struct kref *ref)
+{
+	struct fsg_dev	*fsg = container_of(ref, struct fsg_dev, ref);
+
+	kfree(fsg->luns);
+	kfree(fsg);
+}
+
+static void lun_release(struct device *dev)
+{
+	struct fsg_dev	*fsg = dev_get_drvdata(dev);
+
+	kref_put(&fsg->ref, fsg_release);
+}
+
+static void /* __init_or_exit */ fsg_unbind(void *_ctxt)
+{
+	struct fsg_dev		*fsg = _ctxt;
+	int			i;
+	struct lun		*curlun;
+
+	pr_debug("%s ()\n", __func__);
+	if (!fsg)
+		return;
+	if (!fsg->bound)
+		return;
+
+	fsg->running = 0;
+	clear_bit(REGISTERED, &fsg->atomic_bitflags);
+
+	/* Unregister the sysfs attribute files and the LUNs */
+	for (i = 0; i < fsg->nluns; ++i) {
+		curlun = &fsg->luns[i];
+		if (curlun->registered) {
+			device_remove_file(&curlun->dev, &dev_attr_file);
+			device_unregister(&curlun->dev);
+			curlun->registered = 0;
+		}
+	}
+
+	/* If the thread isn't already dead, tell it to exit now */
+	if (fsg->state != FSG_STATE_TERMINATED) {
+		raise_exception(fsg, FSG_STATE_EXIT);
+		wait_for_completion(&fsg->thread_notifier);
+
+	}
+
+	/* Free the data buffers */
+	for (i = 0; i < NUM_BUFFERS; ++i) {
+		kfree(fsg->buffhds[i].buf);
+		fsg->buffhds[i].buf = NULL;
+	}
+
+	if (fsg->bulk_in) {
+		usb_ept_fifo_flush(fsg->bulk_in);
+		usb_ept_enable(fsg->bulk_in,  0);
+		usb_free_endpoint(fsg->bulk_in);
+	}
+	if (fsg->bulk_out) {
+		usb_ept_fifo_flush(fsg->bulk_out);
+		usb_ept_enable(fsg->bulk_out,  0);
+		usb_free_endpoint(fsg->bulk_out);
+	}
+	fsg->bound = 0;
+}
+
+static void fsg_bind(void *_ctxt)
+{
+	struct fsg_dev		*fsg = the_fsg;
+	int			rc;
+	int			i;
+	unsigned int 		ret;
+	struct lun		*curlun;
+	char			*pathbuf, *p;
+	struct usb_function	*usb_func = &fsg_function;
+	struct usb_endpoint *ep;
+
+
+	dev_attr_file.attr.mode = 0644;
+	fsg->running = 0;
+
+	/* Find out how many LUNs there should be */
+	i = fsg->nluns;
+	if (i == 0)
+		i = 1;
+	if (i > MAX_LUNS) {
+		ERROR(fsg, "invalid number of LUNs: %d\n", i);
+		rc = -EINVAL;
+		goto out;
+	}
+
+	/* Create the LUNs, open their backing files, and register the
+	 * LUN devices in sysfs. */
+	fsg->luns = kzalloc(i * sizeof(struct lun), GFP_KERNEL);
+	if (!fsg->luns) {
+		rc = -ENOMEM;
+		goto out;
+	}
+	fsg->nluns = i;
+
+	for (i = 0; i < fsg->nluns; ++i) {
+		curlun = &fsg->luns[i];
+		curlun->ro = 0;
+		curlun->dev.release = lun_release;
+		curlun->dev.parent = &fsg->pdev->dev;
+		dev_set_drvdata(&curlun->dev, fsg);
+		snprintf(curlun->dev.bus_id, BUS_ID_SIZE,
+				"lun%d", i);
+
+		rc = device_register(&curlun->dev);
+		if (rc != 0) {
+			INFO(fsg, "failed to register LUN%d: %d\n", i, rc);
+			goto out;
+		}
+		rc = device_create_file(&curlun->dev, &dev_attr_file);
+		if (rc != 0) {
+			ERROR(fsg, "device_create_file failed: %d\n", rc);
+			device_unregister(&curlun->dev);
+			goto out;
+		}
+		curlun->registered = 1;
+		kref_get(&fsg->ref);
+	}
+	ret = usb_msm_get_next_ifc_number(usb_func);
+	intf_desc.bInterfaceNumber = ret;
+	pr_debug("%s: interface number = %d\n", __func__, ret);
+
+	ep = fsg->bulk_in = usb_alloc_endpoint(USB_DIR_IN);
+	hs_bulk_in_desc.bEndpointAddress = USB_DIR_IN | ep->num;
+	fs_bulk_in_desc.bEndpointAddress = USB_DIR_IN | ep->num;
+	pr_debug("%s: bulk in endpoint number = %d\n",
+						__func__, ep->num);
+
+	ep = fsg->bulk_out = usb_alloc_endpoint(USB_DIR_OUT);
+	hs_bulk_out_desc.bEndpointAddress = USB_DIR_OUT | ep->num;
+	fs_bulk_out_desc.bEndpointAddress = USB_DIR_OUT | ep->num;
+	pr_debug("%s: bulk out endpoint number = %d\n",
+						__func__, ep->num);
+
+	/* Allocate the data buffers */
+	for (i = 0; i < NUM_BUFFERS; ++i) {
+		struct fsg_buffhd	*bh = &fsg->buffhds[i];
+
+		/* Allocate for the bulk-in endpoint.  We assume that
+		 * the buffer will also work with the bulk-out (and
+		 * interrupt-in) endpoint. */
+		bh->buf = kmalloc(fsg->buf_size, GFP_KERNEL);
+		if (!bh->buf)
+			goto out;
+		bh->next = bh + 1;
+	}
+	fsg->buffhds[NUM_BUFFERS - 1].next = &fsg->buffhds[0];
+
+	fsg->state = FSG_STATE_IDLE;
+	fsg->thread_task = kthread_create(fsg_main_thread, fsg,
+			"USB mass_storage");
+	if (IS_ERR(fsg->thread_task)) {
+		rc = PTR_ERR(fsg->thread_task);
+		ERROR(fsg, "kthread_create failed: %d\n", rc);
+		goto out;
+	}
+
+	DBG(fsg, "Number of LUNs=%d\n", fsg->nluns);
+
+	pathbuf = kmalloc(PATH_MAX, GFP_KERNEL);
+	for (i = 0; i < fsg->nluns; ++i) {
+		curlun = &fsg->luns[i];
+		if (backing_file_is_open(curlun)) {
+			p = NULL;
+			if (pathbuf) {
+				p = d_path(&curlun->filp->f_path,
+					   pathbuf, PATH_MAX);
+				if (IS_ERR(p))
+					p = NULL;
+			}
+			LINFO(curlun, "ro=%d, file: %s\n",
+					curlun->ro, (p ? p : "(error)"));
+		}
+	}
+	kfree(pathbuf);
+
+	set_bit(REGISTERED, &fsg->atomic_bitflags);
+
+	/* Tell the thread to start working */
+	wake_up_process(fsg->thread_task);
+	fsg->bound = 1;
+	return;
+
+out:
+	fsg->state = FSG_STATE_TERMINATED;	/* The thread is dead */
+	fsg->bound = 1;
+	fsg_unbind(fsg);
+	close_all_backing_files(fsg);
+}
+
+static void fsg_configure(int configured, void *_ctxt)
+{
+	struct fsg_dev *fsg = _ctxt;
+
+	if (!fsg)
+		return;
+	if (!fsg->bound)
+		return;
+
+	/* Clear out the controller's fifos */
+	if ((fsg->new_config) && (fsg->bulk_in))
+		usb_ept_fifo_flush(fsg->bulk_in);
+	if ((fsg->new_config) && (fsg->bulk_out))
+		usb_ept_fifo_flush(fsg->bulk_out);
+
+	if (configured) {
+		if (usb_msm_get_speed() == USB_SPEED_HIGH) {
+			usb_configure_endpoint(fsg->bulk_in, &hs_bulk_in_desc);
+			usb_configure_endpoint(fsg->bulk_out,
+						&hs_bulk_out_desc);
+		} else {
+			usb_configure_endpoint(fsg->bulk_in, &fs_bulk_in_desc);
+			usb_configure_endpoint(fsg->bulk_out,
+						&fs_bulk_out_desc);
+		}
+
+		usb_ept_enable(fsg->bulk_in, 1);
+		usb_ept_enable(fsg->bulk_out, 1);
+		wake_lock(&fsg->wake_lock_idle);
+	} else
+		wake_unlock(&fsg->wake_lock_idle);
+
+	fsg->new_config = configured;
+	raise_exception(fsg, FSG_STATE_CONFIG_CHANGE);
+}
+
+/*-------------------------------------------------------------------------*/
+
+static struct usb_function		fsg_function = {
+	.bind		= fsg_bind,
+	.unbind		= fsg_unbind,
+	.configure  = fsg_configure,
+	.setup		= fsg_setup,
+
+	.name = "mass_storage",
+
+};
+
+
+static int __init fsg_alloc(void)
+{
+	struct fsg_dev		*fsg;
+
+	fsg = kzalloc(sizeof *fsg, GFP_KERNEL);
+	if (!fsg)
+		return -ENOMEM;
+	spin_lock_init(&fsg->lock);
+	init_rwsem(&fsg->filesem);
+	kref_init(&fsg->ref);
+	init_completion(&fsg->thread_notifier);
+
+	the_fsg = fsg;
+	return 0;
+}
+
+static ssize_t print_switch_name(struct switch_dev *sdev, char *buf)
+{
+	return sprintf(buf, "%s\n", DRIVER_NAME);
+}
+
+static ssize_t print_switch_state(struct switch_dev *sdev, char *buf)
+{
+	struct fsg_dev	*fsg = container_of(sdev, struct fsg_dev, sdev);
+	return sprintf(buf, "%s\n", (fsg->config ? "online" : "offline"));
+}
+static int __exit fsg_remove(struct platform_device *pdev)
+{
+	struct fsg_dev  *fsg = the_fsg;
+
+	usb_function_unregister(&fsg_function);
+	wake_lock_destroy(&fsg->wake_lock_idle);
+	switch_dev_unregister(&fsg->sdev);
+	test_and_clear_bit(REGISTERED, &fsg->atomic_bitflags);
+	close_all_backing_files(fsg);
+	kref_put(&fsg->ref, fsg_release);
+
+	return 0;
+}
+
+static int __init fsg_probe(struct platform_device *pdev)
+{
+	struct usb_mass_storage_platform_data *pdata = pdev->dev.platform_data;
+	int		rc;
+
+	rc = fsg_alloc();
+	if (rc != 0)
+		return rc;
+
+	the_fsg->pdev = pdev;
+	the_fsg->sdev.name = DRIVER_NAME;
+	the_fsg->nluns = pdata->nluns;
+	the_fsg->buf_size = pdata->buf_size;
+	the_fsg->vendor = pdata->vendor;
+	the_fsg->product = pdata->product;
+	the_fsg->release = pdata->release;
+	the_fsg->sdev.print_name = print_switch_name;
+	the_fsg->sdev.print_state = print_switch_state;
+	rc = switch_dev_register(&the_fsg->sdev);
+	if (rc < 0)
+		goto err_switch_dev_register;
+
+	wake_lock_init(&the_fsg->wake_lock, WAKE_LOCK_SUSPEND,
+		       "usb_mass_storage");
+	wake_lock_init(&the_fsg->wake_lock_idle, WAKE_LOCK_IDLE,
+		       "mass_storage_hold_idle");
+
+	fsg_function.hs_descriptors = hs_function;
+	fsg_function.fs_descriptors = fs_function;
+	fsg_function.context = the_fsg;
+	rc = usb_function_register(&fsg_function);
+	if (rc != 0)
+		goto err_usb_function_register;
+
+	return 0;
+
+err_usb_function_register:
+	switch_dev_unregister(&the_fsg->sdev);
+err_switch_dev_register:
+	kref_put(&the_fsg->ref, fsg_release);
+
+	return rc;
+}
+
+static struct platform_driver fsg_driver = {
+	.probe = fsg_probe,
+	.remove = __exit_p(fsg_remove),
+	.driver = { .name = DRIVER_NAME, },
+};
+
+static int __init fsg_init(void)
+{
+	return platform_driver_register(&fsg_driver);
+}
+module_init(fsg_init);
+
+static void __exit fsg_cleanup(void)
+{
+	platform_driver_unregister(&fsg_driver);
+
+}
+module_exit(fsg_cleanup);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/function/msm_hsusb.c b/drivers/usb/function/msm_hsusb.c
new file mode 100644
index 0000000..eebd9d4
--- /dev/null
+++ b/drivers/usb/function/msm_hsusb.c
@@ -0,0 +1,3948 @@
+/* drivers/usb/function/msm_hsusb.c
+ *
+ * Driver for HighSpeed USB Client Controller in MSM7K
+ *
+ * Copyright (C) 2007 Google, Inc.
+ * Copyright (c) 2008-2009, Code Aurora Forum. All rights reserved.
+ * Author: Brian Swetland <swetland@google.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/platform_device.h>
+#include <linux/debugfs.h>
+#include <linux/workqueue.h>
+#include <linux/clk.h>
+#include <linux/spinlock.h>
+#include <linux/switch.h>
+
+#include <linux/usb/ch9.h>
+#include <linux/io.h>
+
+#include <asm/mach-types.h>
+#include <mach/vreg.h>
+#include <mach/board.h>
+#include <mach/msm_hsusb.h>
+#include <mach/rpc_hsusb.h>
+#include <mach/rpc_pmapp.h>
+#include <mach/gpio.h>
+#include <mach/msm_hsusb_hw.h>
+#include <mach/msm_otg.h>
+#include <linux/wakelock.h>
+#include <linux/pm_qos_params.h>
+#include <mach/clk.h>
+
+#define MSM_USB_BASE ((unsigned) ui->addr)
+
+#include "usb_function.h"
+
+#define EPT_FLAG_IN	0x0001
+#define USB_DIR_MASK	USB_DIR_IN
+#define SETUP_BUF_SIZE	4096
+
+/* IDs for string descriptors */
+#define STRING_LANGUAGE_ID      0
+#define STRING_SERIAL           1
+#define STRING_PRODUCT          2
+#define STRING_MANUFACTURER     3
+
+#define LANGUAGE_ID             0x0409 /* en-US */
+#define SOC_ROC_2_0		0x10002 /* ROC 2.0 */
+
+#define TRUE			1
+#define FALSE			0
+#define USB_LINK_RESET_TIMEOUT	(msecs_to_jiffies(10))
+#define USB_CHG_DET_DELAY	msecs_to_jiffies(1000)
+
+#define is_phy_45nm()     (PHY_MODEL(ui->phy_info) == USB_PHY_MODEL_45NM)
+#define is_phy_external() (PHY_TYPE(ui->phy_info) == USB_PHY_EXTERNAL)
+
+static int pid = 0x9018;
+
+struct usb_fi_ept {
+	struct usb_endpoint *ept;
+	struct usb_endpoint_descriptor desc;
+};
+
+struct usb_function_info {
+	struct list_head list;
+	unsigned enabled;
+	struct usb_function *func;
+};
+
+struct msm_request {
+	struct usb_request req;
+
+	struct usb_info *ui;
+	struct msm_request *next;
+
+	unsigned busy:1;
+	unsigned live:1;
+	unsigned alloced:1;
+	unsigned dead:1;
+
+	dma_addr_t dma;
+
+	struct ept_queue_item *item;
+	dma_addr_t item_dma;
+};
+static unsigned char str_lang_desc[] = {4,
+				USB_DT_STRING,
+				(unsigned char)LANGUAGE_ID,
+				(unsigned char)(LANGUAGE_ID >> 8)};
+
+#define to_msm_request(r) container_of(r, struct msm_request, req)
+static int usb_hw_reset(struct usb_info *ui);
+static void usb_vbus_online(struct usb_info *);
+static void usb_vbus_offline(struct usb_info *ui);
+static void usb_lpm_exit(struct usb_info *ui);
+static void usb_lpm_wakeup_phy(struct work_struct *);
+static void usb_exit(void);
+static int usb_is_online(struct usb_info *ui);
+static void usb_do_work(struct work_struct *w);
+static int usb_lpm_enter(struct usb_info *ui);
+int (*usb_lpm_config_gpio)(int);
+static void usb_enable_pullup(struct usb_info *ui);
+static void usb_disable_pullup(struct usb_info *ui);
+
+static struct workqueue_struct *usb_work;
+static void usb_chg_stop(struct work_struct *w);
+
+#define USB_STATE_IDLE    0
+#define USB_STATE_ONLINE  1
+#define USB_STATE_OFFLINE 2
+
+#define USB_FLAG_START          0x0001
+#define USB_FLAG_VBUS_ONLINE    0x0002
+#define USB_FLAG_VBUS_OFFLINE   0x0004
+#define USB_FLAG_RESET          0x0008
+#define USB_FLAG_SUSPEND	0x0010
+#define USB_FLAG_CONFIGURE	0x0020
+#define USB_FLAG_RESUME	0x0040
+#define USB_FLAG_REG_OTG 0x0080
+
+#define USB_MSC_ONLY_FUNC_MAP	0x10
+#define DRIVER_NAME		"msm_hsusb_peripheral"
+
+struct lpm_info {
+	struct work_struct wakeup_phy;
+};
+
+enum charger_type {
+	USB_CHG_TYPE__SDP,
+	USB_CHG_TYPE__CARKIT,
+	USB_CHG_TYPE__WALLCHARGER,
+	USB_CHG_TYPE__INVALID
+};
+
+struct usb_info {
+	/* lock for register/queue/device state changes */
+	spinlock_t lock;
+
+	/* single request used for handling setup transactions */
+	struct usb_request *setup_req;
+	struct usb_request *ep0out_req;
+
+	struct platform_device *pdev;
+	struct msm_hsusb_platform_data *pdata;
+	int irq;
+	int gpio_irq[2];
+	void *addr;
+
+	unsigned state;
+	unsigned flags;
+
+	unsigned online;
+	unsigned running;
+	unsigned bound;
+
+	struct dma_pool *pool;
+
+	/* dma page to back the queue heads and items */
+	unsigned char *buf;
+	dma_addr_t dma;
+
+	struct ept_queue_head *head;
+
+	/* used for allocation */
+	unsigned next_item;
+	unsigned next_ifc_num;
+	unsigned stopped:1;
+	unsigned remote_wakeup:1;
+	unsigned configured:1;
+	unsigned selfpowered:1;
+	unsigned iad:1;
+	unsigned char maxpower;
+	enum usb_device_speed speed;
+	unsigned phy_info;
+
+	/* endpoints are ordered based on their status bits,
+	** so they are OUT0, OUT1, ... OUT15, IN0, IN1, ... IN15
+	*/
+	struct usb_endpoint ept[32];
+
+	struct delayed_work work;
+	struct delayed_work chg_legacy_det;
+	unsigned phy_status;
+	unsigned phy_fail_count;
+	struct usb_composition *composition;
+
+	struct usb_function_info **func;
+	unsigned num_funcs;
+	struct usb_function_map *functions_map;
+
+#define MAX_INTERFACE_NUM	15
+	struct usb_function *func2ifc_map[MAX_INTERFACE_NUM];
+
+#define ep0out ept[0]
+#define ep0in  ept[16]
+
+	struct clk *clk;
+	struct clk *pclk;
+	struct clk *cclk;
+	unsigned int clk_enabled;
+
+	struct vreg *vreg;
+	unsigned int vreg_enabled;
+
+	unsigned in_lpm;
+	struct lpm_info li;
+
+	enum charger_type chg_type;
+	struct work_struct chg_stop;
+#define MAX_STRDESC_NUM		100
+	char **strdesc;
+	int strdesc_index;
+
+	u16 test_mode;
+	struct wake_lock wlock;
+	struct msm_otg_transceiver *xceiv;
+	int active;
+	enum usb_device_state usb_state;
+	int vbus_sn_notif;
+	struct switch_dev sdev;
+};
+static struct usb_info *the_usb_info;
+
+static unsigned short usb_validate_product_id(unsigned short pid);
+static unsigned short usb_get_product_id(unsigned long enabled_functions);
+static void usb_switch_composition(unsigned short pid);
+static unsigned short usb_set_composition(unsigned short pid);
+static void usb_configure_device_descriptor(struct usb_info *ui);
+static void usb_uninit(struct usb_info *ui);
+
+static unsigned ulpi_read(struct usb_info *ui, unsigned reg);
+static int ulpi_write(struct usb_info *ui, unsigned val, unsigned reg);
+
+
+
+struct usb_device_descriptor desc_device = {
+	.bLength = USB_DT_DEVICE_SIZE,
+	.bDescriptorType = USB_DT_DEVICE,
+	.bcdUSB = 0x0200,
+	.bDeviceClass = 0,
+	.bDeviceSubClass = 0,
+	.bDeviceProtocol = 0,
+	.bMaxPacketSize0 = 64,
+	/* the following fields are filled in by usb_probe */
+	.idVendor = 0,
+	.idProduct = 0,
+	.bcdDevice = 0,
+	.iManufacturer = 0,
+	.iProduct = 0,
+	.iSerialNumber = 0,
+	.bNumConfigurations = 1,
+};
+
+static void flush_endpoint(struct usb_endpoint *ept);
+static void msm_hsusb_suspend_locks_acquire(struct usb_info *, int);
+
+static ssize_t print_switch_name(struct switch_dev *sdev, char *buf)
+{
+	return sprintf(buf, "%s\n", DRIVER_NAME);
+}
+
+static ssize_t print_switch_state(struct switch_dev *sdev, char *buf)
+{
+	struct usb_info *ui = the_usb_info;
+
+	return sprintf(buf, "%s\n", (ui->online ? "online" : "offline"));
+}
+
+#define USB_WALLCHARGER_CHG_CURRENT 1800
+static int usb_get_max_power(struct usb_info *ui)
+{
+	unsigned long flags;
+	enum charger_type temp;
+	int suspended;
+	int configured;
+
+	spin_lock_irqsave(&ui->lock, flags);
+	temp = ui->chg_type;
+	suspended = ui->usb_state == USB_STATE_SUSPENDED ? 1 : 0;
+	configured = ui->configured;
+	spin_unlock_irqrestore(&ui->lock, flags);
+
+	if (temp == USB_CHG_TYPE__INVALID)
+		return -ENODEV;
+
+	if (temp == USB_CHG_TYPE__WALLCHARGER)
+		return USB_WALLCHARGER_CHG_CURRENT;
+
+	if (suspended || !configured)
+		return 0;
+
+	return ui->maxpower * 2;
+}
+
+static void usb_chg_legacy_detect(struct work_struct *w)
+{
+	struct usb_info *ui = the_usb_info;
+	unsigned long flags;
+	enum charger_type temp = USB_CHG_TYPE__INVALID;
+	int maxpower;
+	int ret = 0;
+
+	spin_lock_irqsave(&ui->lock, flags);
+
+	if (ui->usb_state == USB_STATE_NOTATTACHED) {
+		ret = -ENODEV;
+		goto chg_legacy_det_out;
+	}
+
+	if ((readl(USB_PORTSC) & PORTSC_LS) == PORTSC_LS) {
+		ui->chg_type = temp = USB_CHG_TYPE__WALLCHARGER;
+		goto chg_legacy_det_out;
+	}
+
+	ui->chg_type = temp = USB_CHG_TYPE__SDP;
+chg_legacy_det_out:
+	spin_unlock_irqrestore(&ui->lock, flags);
+
+	if (ret)
+		return;
+
+	msm_chg_usb_charger_connected(temp);
+	maxpower = usb_get_max_power(ui);
+	if (maxpower > 0)
+		msm_chg_usb_i_is_available(maxpower);
+
+	/* USB driver prevents idle and suspend power collapse(pc)
+	 * while usb cable is connected. But when dedicated charger is
+	 * connected, driver can vote for idle and suspend pc. In order
+	 * to allow pc, driver has to initiate low power mode which it
+	 * cannot do as phy cannot be accessed when dedicated charger
+	 * is connected due to phy lockup issues. Just to allow idle &
+	 * suspend pc when dedicated charger is connected, release the
+	 * wakelock, set driver latency to default and act as if we are
+	 * in low power mode so that, driver will re-acquire wakelocks
+	 * for any sub-sequent usb interrupts.
+	 */
+	if (temp == USB_CHG_TYPE__WALLCHARGER) {
+		pr_info("\n%s: WALL-CHARGER\n", __func__);
+		spin_lock_irqsave(&ui->lock, flags);
+		if (ui->usb_state == USB_STATE_NOTATTACHED) {
+			spin_unlock_irqrestore(&ui->lock, flags);
+			return;
+		}
+		ui->in_lpm = 1;
+		spin_unlock_irqrestore(&ui->lock, flags);
+
+		msm_hsusb_suspend_locks_acquire(ui, 0);
+	} else
+		pr_info("\n%s: Standard Downstream Port\n", __func__);
+}
+
+int usb_msm_get_next_strdesc_id(char *str)
+{
+	struct usb_info *ui = the_usb_info;
+	unsigned id;
+	unsigned long flags;
+	int len;
+
+	len = strlen(str);
+	if (!len) {
+		printk(KERN_ERR "usb next_strdesc_id(); null string\n");
+		return -EPERM;
+	}
+	/* for null character */
+	len = len + 1;
+
+	spin_lock_irqsave(&ui->lock, flags);
+
+	id = ui->strdesc_index;
+	if (id >= MAX_STRDESC_NUM) {
+		id = -EPERM;
+		printk(KERN_ERR "reached max strdesc number\n");
+		goto get_strd_id_exit;
+	}
+
+	ui->strdesc[id] = kmalloc(len, GFP_ATOMIC);
+	if (ui->strdesc[id]) {
+		memcpy(ui->strdesc[id], str, len);
+		ui->strdesc_index++;
+	} else {
+		id = -EPERM;
+		printk(KERN_ERR "usb next_strdesc_id(); Out of memory:(%s)\n",
+			str);
+	}
+
+get_strd_id_exit:
+	spin_unlock_irqrestore(&ui->lock, flags);
+	return id;
+}
+EXPORT_SYMBOL(usb_msm_get_next_strdesc_id);
+
+
+inline int usb_msm_is_iad(void)
+{
+	return the_usb_info->iad;
+}
+EXPORT_SYMBOL(usb_msm_is_iad);
+
+inline void usb_msm_enable_iad(void)
+{
+	the_usb_info->iad = 1;
+}
+EXPORT_SYMBOL(usb_msm_enable_iad);
+
+int usb_msm_get_speed()
+{
+	return the_usb_info->speed;
+}
+EXPORT_SYMBOL(usb_msm_get_speed);
+
+int usb_msm_get_next_ifc_number(struct usb_function *driver)
+{
+	struct usb_info *ui = the_usb_info;
+	int ifc_num = -1;
+	unsigned long flags;
+	int i;
+
+	spin_lock_irqsave(&ui->lock, flags);
+	for (i = 0; i < ui->pdata->num_functions; i++) {
+		if (strcmp(ui->functions_map[i].name, driver->name))
+			continue;
+		if (!(ui->composition->functions & (1 << i)))
+			continue;
+		ifc_num = ui->next_ifc_num++;
+		ui->func2ifc_map[ifc_num] = driver;
+		break;
+	}
+	spin_unlock_irqrestore(&ui->lock, flags);
+	return ifc_num;
+}
+EXPORT_SYMBOL(usb_msm_get_next_ifc_number);
+
+static inline int usb_msm_get_selfpowered(void)
+{
+	struct usb_info *ui = the_usb_info;
+
+	return ui->selfpowered;
+}
+static inline int usb_msm_get_remotewakeup(void)
+{
+	struct usb_info *ui = the_usb_info;
+
+	return ui->remote_wakeup;
+}
+
+static void usb_clk_enable(struct usb_info *ui)
+{
+	if (!ui->clk_enabled) {
+		clk_enable(ui->pclk);
+		if (ui->cclk)
+			clk_enable(ui->cclk);
+		ui->clk_enabled = 1;
+	}
+}
+
+static void usb_clk_disable(struct usb_info *ui)
+{
+	if (ui->clk_enabled) {
+		clk_disable(ui->pclk);
+		if (ui->cclk)
+			clk_disable(ui->cclk);
+		ui->clk_enabled = 0;
+	}
+}
+
+static void usb_vreg_enable(struct usb_info *ui)
+{
+	if (ui->vreg && !IS_ERR(ui->vreg) && !ui->vreg_enabled) {
+		vreg_enable(ui->vreg);
+		ui->vreg_enabled = 1;
+	}
+}
+
+static void usb_vreg_disable(struct usb_info *ui)
+{
+	if (ui->vreg && !IS_ERR(ui->vreg) && ui->vreg_enabled) {
+		vreg_disable(ui->vreg);
+		ui->vreg_enabled = 0;
+	}
+}
+
+static unsigned ulpi_read(struct usb_info *ui, unsigned reg)
+{
+	unsigned timeout = 100000;
+
+	/* initiate read operation */
+	writel(ULPI_RUN | ULPI_READ | ULPI_ADDR(reg),
+	       USB_ULPI_VIEWPORT);
+
+	/* wait for completion */
+	while ((readl(USB_ULPI_VIEWPORT) & ULPI_RUN) && (--timeout)) ;
+
+	if (timeout == 0) {
+		printk(KERN_ERR "ulpi_read: timeout %08x\n",
+			readl(USB_ULPI_VIEWPORT));
+		return 0xffffffff;
+	}
+	return ULPI_DATA_READ(readl(USB_ULPI_VIEWPORT));
+}
+
+static int ulpi_write(struct usb_info *ui, unsigned val, unsigned reg)
+{
+	unsigned timeout = 10000;
+
+	/* initiate write operation */
+	writel(ULPI_RUN | ULPI_WRITE |
+	       ULPI_ADDR(reg) | ULPI_DATA(val),
+	       USB_ULPI_VIEWPORT);
+
+	/* wait for completion */
+	while((readl(USB_ULPI_VIEWPORT) & ULPI_RUN) && (--timeout)) ;
+
+	if (timeout == 0) {
+		printk(KERN_ERR "ulpi_write: timeout\n");
+		return -1;
+	}
+
+	return 0;
+}
+
+static void msm_hsusb_suspend_locks_acquire(struct usb_info *ui, int acquire)
+{
+	if (acquire) {
+		wake_lock(&ui->wlock);
+		pm_qos_update_requirement(PM_QOS_CPU_DMA_LATENCY,
+				DRIVER_NAME, ui->pdata->swfi_latency);
+		/* targets like 7x30 have introduced core clock
+		 * to remove the dependency on max axi frequency
+		 */
+		if (!ui->cclk)
+			pm_qos_update_requirement(PM_QOS_SYSTEM_BUS_FREQ,
+					DRIVER_NAME, MSM_AXI_MAX_FREQ);
+	} else {
+		wake_lock_timeout(&ui->wlock, HZ / 2);
+		pm_qos_update_requirement(PM_QOS_CPU_DMA_LATENCY,
+					DRIVER_NAME,
+					PM_QOS_DEFAULT_VALUE);
+		if (!ui->cclk)
+			pm_qos_update_requirement(PM_QOS_SYSTEM_BUS_FREQ,
+					DRIVER_NAME, PM_QOS_DEFAULT_VALUE);
+	}
+}
+
+static void msm_hsusb_suspend_locks_init(struct usb_info *ui, int init)
+{
+	if (init) {
+		wake_lock_init(&ui->wlock, WAKE_LOCK_SUSPEND,
+				"usb_bus_active");
+		pm_qos_add_requirement(PM_QOS_CPU_DMA_LATENCY,
+				DRIVER_NAME,
+				PM_QOS_DEFAULT_VALUE);
+		pm_qos_add_requirement(PM_QOS_SYSTEM_BUS_FREQ,
+				DRIVER_NAME, PM_QOS_DEFAULT_VALUE);
+	} else {
+		wake_lock_destroy(&ui->wlock);
+		pm_qos_remove_requirement(PM_QOS_CPU_DMA_LATENCY, DRIVER_NAME);
+		pm_qos_remove_requirement(PM_QOS_SYSTEM_BUS_FREQ, DRIVER_NAME);
+	}
+}
+
+static void init_endpoints(struct usb_info *ui)
+{
+	unsigned n;
+
+	for (n = 0; n < 32; n++) {
+		struct usb_endpoint *ept = ui->ept + n;
+
+		ept->ui = ui;
+		ept->bit = n;
+		ept->num = n & 15;
+		ept->alloced = 0;
+
+		if (ept->bit > 15) {
+			/* IN endpoint */
+			ept->head = ui->head + (ept->num << 1) + 1;
+			ept->flags = EPT_FLAG_IN;
+		} else {
+			/* OUT endpoint */
+			ept->head = ui->head + (ept->num << 1);
+			ept->flags = 0;
+		}
+	}
+}
+
+void usb_configure_endpoint(struct usb_endpoint *ep,
+			struct usb_endpoint_descriptor *ep_desc)
+{
+	unsigned cfg = 0;
+	unsigned long flags;
+	struct usb_info *ui = ep->ui;
+
+	if (!ui)
+		return;
+	spin_lock_irqsave(&ui->lock, flags);
+
+	if (ep_desc) {
+		ep->max_pkt = ep_desc->wMaxPacketSize;
+		ep->ep_descriptor = ep_desc;
+	}
+
+	if (!ep->max_pkt) {
+		printk(KERN_ERR "cannot configure zero length max pkt\n");
+		goto cfg_ept_end;
+	}
+
+	cfg = CONFIG_MAX_PKT(ep->max_pkt) | CONFIG_ZLT;
+	/* ep0 out needs interrupt-on-setup */
+	if (ep->bit == 0)
+		cfg |= CONFIG_IOS;
+	ep->head->config = cfg;
+	ep->head->next = TERMINATE;
+
+	pr_debug("ept #%d %s max:%d head:%p bit:%d\n",
+		       ep->num,
+		       (ep->flags & EPT_FLAG_IN) ? "in" : "out",
+		       ep->max_pkt, ep->head, ep->bit);
+
+cfg_ept_end:
+	spin_unlock_irqrestore(&ui->lock, flags);
+}
+EXPORT_SYMBOL(usb_configure_endpoint);
+
+#define NUM_EPTS 15	/* number of in or out non-ctrl endpoints */
+struct usb_endpoint *usb_alloc_endpoint(unsigned direction)
+{
+	struct usb_info *ui = the_usb_info;
+	struct usb_endpoint *ept = NULL;
+	int i;
+	unsigned long flags;
+
+	spin_lock_irqsave(&ui->lock, flags);
+	if (direction & USB_DIR_IN)
+		ept = (&ui->ep0in);
+	else
+		ept = (&ui->ep0out);
+
+	for (i = 0; i < NUM_EPTS; i++) {
+		ept++;
+		if (!ept->alloced) {
+			ept->alloced = 1;
+			ept->ui = ui;
+			spin_unlock_irqrestore(&ui->lock, flags);
+			return ept;
+		}
+	}
+	spin_unlock_irqrestore(&ui->lock, flags);
+
+	return NULL;
+}
+EXPORT_SYMBOL(usb_alloc_endpoint);
+
+int usb_free_endpoint(struct usb_endpoint *ept)
+{
+	struct usb_info *ui = the_usb_info;
+	unsigned long flags;
+
+	if (!ept)
+		return -EINVAL;
+	spin_lock_irqsave(&ui->lock, flags);
+	ept->alloced = 0;
+	ept->ui = 0;
+	spin_unlock_irqrestore(&ui->lock, flags);
+
+	return 0;
+}
+EXPORT_SYMBOL(usb_free_endpoint);
+
+struct usb_request *usb_ept_alloc_req(struct usb_endpoint *ept,
+			unsigned bufsize)
+{
+	struct usb_info *ui = ept->ui;
+	struct msm_request *req;
+
+	if (!ui)
+		return NULL;
+
+	req = kzalloc(sizeof(*req), GFP_ATOMIC);
+	if (!req)
+		goto fail1;
+
+	req->item = dma_pool_alloc(ui->pool, GFP_ATOMIC, &req->item_dma);
+	if (!req->item)
+		goto fail2;
+
+	if (bufsize) {
+		req->req.buf = kmalloc(bufsize, GFP_ATOMIC);
+		if (!req->req.buf)
+			goto fail3;
+		req->alloced = 1;
+	}
+
+	return &req->req;
+
+fail3:
+	dma_pool_free(ui->pool, req->item, req->item_dma);
+fail2:
+	kfree(req);
+fail1:
+	return NULL;
+}
+EXPORT_SYMBOL(usb_ept_alloc_req);
+
+static void do_free_req(struct usb_info *ui, struct msm_request *req)
+{
+	if (req->alloced)
+		kfree(req->req.buf);
+
+	dma_pool_free(ui->pool, req->item, req->item_dma);
+	kfree(req);
+}
+
+void usb_ept_free_req(struct usb_endpoint *ept, struct usb_request *_req)
+{
+	struct msm_request *req, *temp_req, *prev_req;
+	struct usb_info *ui;
+	unsigned long flags;
+	int dead = 0;
+	if (!ept || !_req)
+		return;
+
+	ui = ept->ui;
+	if (!ui)
+		return;
+
+	req = to_msm_request(_req);
+	spin_lock_irqsave(&ui->lock, flags);
+	/* defer freeing resources if request is still busy */
+	if (req->busy)
+		dead = req->dead = 1;
+	spin_unlock_irqrestore(&ui->lock, flags);
+
+	/* if req->dead, then we will clean up when the request finishes */
+	if (!dead) {
+		temp_req = ept->req;
+		prev_req = temp_req;
+		while (temp_req != NULL) {
+			if (req == temp_req && ept->req != temp_req)
+				prev_req->next = temp_req->next;
+
+			prev_req = temp_req;
+			temp_req = temp_req->next;
+		}
+		if (ept->req == req)
+			ept->req = req->next;
+		req->req.complete = NULL;
+		do_free_req(ui, req);
+	} else
+		pr_err("%s: req is busy, can't free req\n", __func__);
+}
+EXPORT_SYMBOL(usb_ept_free_req);
+
+void usb_ept_enable(struct usb_endpoint *ept, int yes)
+{
+	struct usb_info *ui;
+	int in;
+	unsigned n;
+	unsigned char xfer;
+
+	if (!ept || !ept->ui)
+		return;
+	ui = ept->ui;
+	in = ept->flags & EPT_FLAG_IN;
+	if (!ept->ep_descriptor)
+		return;
+
+	if (ui->in_lpm) {
+		pr_err("%s: controller is in lpm, cannot proceed\n", __func__);
+		return;
+	}
+
+	xfer = ept->ep_descriptor->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK;
+
+	n = readl(USB_ENDPTCTRL(ept->num));
+
+	if (in) {
+		if (xfer == USB_ENDPOINT_XFER_BULK)
+			n = (n & (~CTRL_TXT_MASK)) | CTRL_TXT_BULK;
+		else if (xfer == USB_ENDPOINT_XFER_INT)
+			n = (n & (~CTRL_TXT_MASK)) | CTRL_TXT_INT;
+		if (yes)
+			n |= CTRL_TXE | CTRL_TXR;
+		else
+			n &= (~CTRL_TXE);
+	} else {
+		if (xfer == USB_ENDPOINT_XFER_BULK)
+			n = (n & (~CTRL_RXT_MASK)) | CTRL_RXT_BULK;
+		else if (xfer == USB_ENDPOINT_XFER_INT)
+			n = (n & (~CTRL_RXT_MASK)) | CTRL_RXT_INT;
+		if (yes)
+			n |= CTRL_RXE | CTRL_RXR;
+		else
+			n &= ~(CTRL_RXE);
+	}
+	/* complete all the updates to ept->head before enabling endpoint*/
+	dma_coherent_pre_ops();
+	writel(n, USB_ENDPTCTRL(ept->num));
+}
+EXPORT_SYMBOL(usb_ept_enable);
+
+static void usb_ept_start(struct usb_endpoint *ept)
+{
+	struct usb_info *ui = ept->ui;
+	struct msm_request *req = ept->req;
+
+	BUG_ON(req->live);
+
+	/* link the hw queue head to the request's transaction item */
+	ept->head->next = req->item_dma;
+	ept->head->info = 0;
+
+	/* memory barrier to flush the data before priming endpoint*/
+	dma_coherent_pre_ops();
+	/* start the endpoint */
+	writel(1 << ept->bit, USB_ENDPTPRIME);
+
+	/* mark this chain of requests as live */
+	while (req) {
+		req->live = 1;
+		if (req->item->next == TERMINATE)
+			break;
+		req = req->next;
+	}
+}
+
+int usb_ept_queue_xfer(struct usb_endpoint *ept, struct usb_request *_req)
+{
+	unsigned long flags;
+	struct msm_request *req = to_msm_request(_req);
+	struct msm_request *last;
+	struct usb_info *ui = ept->ui;
+	struct ept_queue_item *item = req->item;
+	unsigned length = req->req.length;
+
+	if (length > 0x4000)
+		return -EMSGSIZE;
+
+	if (ui->in_lpm) {
+		req->req.status = usb_remote_wakeup();
+		if (req->req.status) {
+			pr_debug("%s:RWakeup generation failed, EP = %x\n",
+							__func__, ept->bit);
+			return req->req.status;
+		}
+	}
+
+	spin_lock_irqsave(&ui->lock, flags);
+
+	if (req->busy) {
+		req->req.status = -EBUSY;
+		spin_unlock_irqrestore(&ui->lock, flags);
+		printk(KERN_INFO
+		       "usb_ept_queue_xfer() tried to queue busy request\n");
+		return -EBUSY;
+	}
+
+	if (!ui->online && (ept->num != 0)) {
+		req->req.status = -ENODEV;
+		spin_unlock_irqrestore(&ui->lock, flags);
+		printk(KERN_INFO "usb_ept_queue_xfer() tried to queue request"
+				"while offline; ept->bit: %x\n", ept->bit);
+		return -ENODEV;
+	}
+
+	req->busy = 1;
+	req->live = 0;
+	req->next = 0;
+	req->req.status = -EBUSY;
+
+	req->dma = dma_map_single(NULL, req->req.buf, length,
+				  (ept->flags & EPT_FLAG_IN) ?
+				  DMA_TO_DEVICE : DMA_FROM_DEVICE);
+
+	/* prepare the transaction descriptor item for the hardware */
+	item->next = TERMINATE;
+	item->info = INFO_BYTES(length) | INFO_IOC | INFO_ACTIVE;
+	item->page0 = req->dma;
+	item->page1 = (req->dma + 0x1000) & 0xfffff000;
+	item->page2 = (req->dma + 0x2000) & 0xfffff000;
+	item->page3 = (req->dma + 0x3000) & 0xfffff000;
+
+	/* Add the new request to the end of the queue */
+	last = ept->last;
+	if (last) {
+		/* Already requests in the queue. add us to the
+		 * end, but let the completion interrupt actually
+		 * start things going, to avoid hw issues
+		 */
+		last->next = req;
+
+		/* only modify the hw transaction next pointer if
+		 * that request is not live
+		 */
+		if (!last->live)
+			last->item->next = req->item_dma;
+	} else {
+		/* queue was empty -- kick the hardware */
+		ept->req = req;
+		usb_ept_start(ept);
+	}
+	ept->last = req;
+
+	spin_unlock_irqrestore(&ui->lock, flags);
+	return 0;
+}
+EXPORT_SYMBOL(usb_ept_queue_xfer);
+
+int usb_ept_flush(struct usb_endpoint *ept)
+{
+	printk("usb_ept_flush \n");
+	flush_endpoint(ept);
+	return 0;
+}
+
+int usb_ept_get_max_packet(struct usb_endpoint *ept)
+{
+	return ept->max_pkt;
+}
+EXPORT_SYMBOL(usb_ept_get_max_packet);
+
+int usb_remote_wakeup(void)
+{
+	struct usb_info *ui = the_usb_info;
+	unsigned long flags;
+
+	spin_lock_irqsave(&ui->lock, flags);
+	if (!ui->remote_wakeup) {
+		spin_unlock_irqrestore(&ui->lock, flags);
+		pr_err("%s: remote wakeup not supported\n", __func__);
+		return -ENOTSUPP;
+	}
+
+	if (!ui->online) {
+		spin_unlock_irqrestore(&ui->lock, flags);
+		pr_err("%s: device is not configured\n", __func__);
+		return -ENODEV;
+	}
+
+	if (ui->in_lpm)
+		usb_lpm_exit(ui);
+	spin_unlock_irqrestore(&ui->lock, flags);
+
+	/* if usb_lpm_exit is unable to set PHCD,
+	 * it would initiate workthread to set the PHCD
+	 */
+	if (cancel_work_sync(&ui->li.wakeup_phy))
+		usb_lpm_wakeup_phy(NULL);
+
+	spin_lock_irqsave(&ui->lock, flags);
+	if (ui->in_lpm) {
+		spin_unlock_irqrestore(&ui->lock, flags);
+		pr_err("%s: cannot bring controller out of lpm\n", __func__);
+		return -ENODEV;
+	}
+
+	if (!usb_is_online(ui)) {
+		pr_debug("%s: enabling force resume\n", __func__);
+		writel(readl(USB_PORTSC) | PORTSC_FPR, USB_PORTSC);
+	} else
+		pr_debug("%s: controller seems to be out of suspend already\n",
+				__func__);
+	spin_unlock_irqrestore(&ui->lock, flags);
+
+	return 0;
+}
+EXPORT_SYMBOL(usb_remote_wakeup);
+
+/* --- endpoint 0 handling --- */
+
+static void set_configuration(struct usb_info *ui, int yes)
+{
+	unsigned i;
+
+	ui->online = !!yes;
+
+	for (i = 0; i < ui->num_funcs; i++) {
+		struct usb_function_info *fi = ui->func[i];
+		if (!fi || !(ui->composition->functions & (1 << i)))
+			continue;
+		if (fi->func->configure)
+			fi->func->configure(yes, fi->func->context);
+	}
+}
+
+static void ep0out_complete(struct usb_endpoint *ept, struct usb_request *req)
+{
+	req->complete = 0;
+}
+
+static void ep0in_complete(struct usb_endpoint *ept, struct usb_request *req)
+{
+	/* queue up the receive of the ACK response from the host */
+	if (req->status == 0) {
+		struct usb_info *ui = ept->ui;
+		req->length = 0;
+		req->complete = ep0out_complete;
+		usb_ept_queue_xfer(&ui->ep0out, req);
+	}
+}
+
+static void ep0in_complete_sendzero(
+		struct usb_endpoint *ept, struct usb_request *req)
+{
+	if (req->status == 0) {
+		struct usb_info *ui = ept->ui;
+		req->length = 0;
+		req->complete = ep0in_complete;
+		usb_ept_queue_xfer(&ui->ep0in, req);
+	}
+}
+
+static void ep0_status_complete(
+		struct usb_endpoint *ept, struct usb_request *req)
+{
+	struct usb_info *ui = ept->ui;
+	unsigned int i;
+
+	if (!ui->test_mode)
+		return;
+
+	switch (ui->test_mode) {
+	case J_TEST:
+		pr_info("usb electrical test mode: (J)\n");
+		i = readl(USB_PORTSC) & (~PORTSC_PTC);
+		writel(i | PORTSC_PTC_J_STATE, USB_PORTSC);
+		break;
+
+	case K_TEST:
+		pr_info("usb electrical test mode: (K)\n");
+		i = readl(USB_PORTSC) & (~PORTSC_PTC);
+		writel(i | PORTSC_PTC_K_STATE, USB_PORTSC);
+		break;
+
+	case SE0_NAK_TEST:
+		pr_info("usb electrical test mode: (SE0-NAK)\n");
+		i = readl(USB_PORTSC) & (~PORTSC_PTC);
+		writel(i | PORTSC_PTC_SE0_NAK, USB_PORTSC);
+		break;
+
+	case TST_PKT_TEST:
+		pr_info("usb electrical test mode: (TEST_PKT)\n");
+		i = readl(USB_PORTSC) & (~PORTSC_PTC);
+		writel(i | PORTSC_PTC_TST_PKT, USB_PORTSC);
+		break;
+	default:
+		pr_err("usb:%s: undefined test mode: (%x)\n",
+				__func__, ui->test_mode);
+	}
+
+}
+
+static void ep0_setup_ack(struct usb_info *ui)
+{
+	struct usb_request *req = ui->setup_req;
+	req->length = 0;
+	req->complete = ep0_status_complete;
+	usb_ept_queue_xfer(&ui->ep0in, req);
+}
+
+static void ep0_setup_stall(struct usb_info *ui)
+{
+	writel((1<<16) | (1<<0), USB_ENDPTCTRL(0));
+}
+
+static void ep0_setup_receive(struct usb_info *ui, unsigned len)
+{
+	ui->ep0out_req->length = len;
+	usb_ept_queue_xfer(&ui->ep0out, ui->ep0out_req);
+}
+
+static void ep0_setup_send(struct usb_info *ui, unsigned wlen)
+{
+	struct usb_request *req = ui->setup_req;
+	struct usb_endpoint *ept = &ui->ep0in;
+
+	/* never send more data than the host requested */
+	if (req->length > wlen)
+		req->length = wlen;
+
+	/* if we are sending a short response that ends on
+	 * a packet boundary, we'll need to send a zero length
+	 * packet as well.
+	 */
+	if ((req->length != wlen) && ((req->length & 63) == 0)) {
+		req->complete = ep0in_complete_sendzero;
+	} else {
+		req->complete = ep0in_complete;
+	}
+
+	usb_ept_queue_xfer(ept, req);
+}
+
+
+static int usb_find_descriptor(struct usb_info *ui, struct usb_ctrlrequest *ctl,
+				struct usb_request *req);
+
+static void handle_setup(struct usb_info *ui)
+{
+	struct usb_ctrlrequest ctl;
+
+	memcpy(&ctl, ui->ep0out.head->setup_data, sizeof(ctl));
+	writel(EPT_RX(0), USB_ENDPTSETUPSTAT);
+
+	/* any pending ep0 transactions must be canceled */
+	flush_endpoint(&ui->ep0out);
+	flush_endpoint(&ui->ep0in);
+
+	/* let functions handle vendor and class requests */
+	if ((ctl.bRequestType & USB_TYPE_MASK) != USB_TYPE_STANDARD) {
+		struct usb_function *func;
+
+		/* Send stall if received interface number is invalid */
+		if (ctl.wIndex >= ui->next_ifc_num)
+			goto stall;
+
+		func = ui->func2ifc_map[ctl.wIndex];
+		if (func && func->setup) {
+			if (ctl.bRequestType & USB_DIR_IN) {
+				struct usb_request *req = ui->setup_req;
+				int ret = func->setup(&ctl,
+						req->buf, SETUP_BUF_SIZE,
+						func->context);
+				if (ret >= 0) {
+					req->length = ret;
+					ep0_setup_send(ui, ctl.wLength);
+					return;
+				}
+			} else {
+				int ret = func->setup(&ctl, NULL, 0,
+							func->context);
+				if (ret == 0) {
+					ep0_setup_ack(ui);
+					return;
+				} else if (ret > 0) {
+					ep0_setup_receive(ui, ret);
+					return;
+				}
+			}
+		}
+		goto stall;
+		return;
+	}
+
+	switch (ctl.bRequest) {
+	case USB_REQ_GET_STATUS:
+	{
+		struct usb_request *req = ui->setup_req;
+		if ((ctl.bRequestType & (USB_DIR_MASK)) != (USB_DIR_IN))
+			break;
+		if (ctl.wLength != 2)
+			break;
+		req->length = 2;
+		switch (ctl.bRequestType & USB_RECIP_MASK) {
+		case USB_RECIP_ENDPOINT:
+		{
+			unsigned num = ctl.wIndex & USB_ENDPOINT_NUMBER_MASK;
+			struct usb_endpoint *ept;
+
+			if (num == 0)
+				break;
+			if (ctl.wIndex & USB_ENDPOINT_DIR_MASK)
+				num += 16;
+			ept = ui->ept + num;
+			memcpy(req->buf, &ept->ept_halted, 2);
+			break;
+		}
+
+		case USB_RECIP_DEVICE:
+		{
+			unsigned short temp = 0;
+			if (usb_msm_get_selfpowered())
+				temp = 1 << USB_DEVICE_SELF_POWERED;
+			if (usb_msm_get_remotewakeup())
+				temp |= 1 << USB_DEVICE_REMOTE_WAKEUP;
+			memcpy(req->buf, &temp, 2);
+			break;
+		}
+
+		case USB_RECIP_INTERFACE:
+			memset(req->buf, 0, 2);
+			break;
+		default:
+			printk(KERN_ERR "Unreconginized recipient\n");
+			break;
+		}
+
+		ep0_setup_send(ui, 2);
+		return;
+	}
+
+	case USB_REQ_GET_DESCRIPTOR:
+	{
+		struct usb_request *req;
+
+		if ((ctl.bRequestType & (USB_DIR_MASK)) != (USB_DIR_IN))
+			break;
+
+		req = ui->setup_req;
+		if (!usb_find_descriptor(ui, &ctl, req)) {
+			if (req->length > ctl.wLength)
+				req->length = ctl.wLength;
+			ep0_setup_send(ui, ctl.wLength);
+			return;
+		}
+		break;
+	}
+
+	case USB_REQ_SET_FEATURE:
+		if ((ctl.bRequestType & (USB_DIR_MASK)) != (USB_DIR_OUT))
+			break;
+		if (ctl.wLength != 0)
+			break;
+		switch (ctl.bRequestType & USB_RECIP_MASK) {
+		case USB_RECIP_DEVICE:
+			if (ctl.wValue == USB_DEVICE_REMOTE_WAKEUP) {
+				ui->remote_wakeup = 1;
+				ep0_setup_ack(ui);
+				return;
+			} else if (ctl.wValue == USB_DEVICE_TEST_MODE) {
+				if (ctl.wIndex & 0x0f)
+					break;
+				ui->test_mode = ctl.wIndex;
+				ep0_setup_ack(ui);
+				return;
+			}
+			break;
+
+		case USB_RECIP_ENDPOINT:
+		{
+			unsigned num = ctl.wIndex & USB_ENDPOINT_NUMBER_MASK;
+			if ((num == 0) || (ctl.wValue != 0))
+				break;
+			if (ctl.wIndex & USB_ENDPOINT_DIR_MASK)
+				num += 16;
+			usb_ept_set_halt(ui->ept + num);
+			ep0_setup_ack(ui);
+			return;
+		}
+
+		default:
+			pr_err("usb: %s: set_feature: unrecognized recipient\n",
+					__func__);
+			break;
+		}
+		break;
+
+	case USB_REQ_CLEAR_FEATURE:
+	{
+		if ((ctl.bRequestType & (USB_DIR_MASK)) != (USB_DIR_OUT))
+			break;
+		if (ctl.wLength != 0)
+			break;
+
+		switch (ctl.bRequestType & USB_RECIP_MASK) {
+		case USB_RECIP_DEVICE:
+			if (ctl.wValue != USB_DEVICE_REMOTE_WAKEUP)
+				break;
+			ui->remote_wakeup = 0;
+			ep0_setup_ack(ui);
+			return;
+		case USB_RECIP_ENDPOINT:
+		{
+			unsigned num;
+			if (ctl.wValue != USB_ENDPOINT_HALT)
+				break;
+			num = ctl.wIndex & USB_ENDPOINT_NUMBER_MASK;
+			if (num != 0) {
+				if (ctl.wIndex & USB_ENDPOINT_DIR_MASK)
+					num += 16;
+				usb_ept_clear_halt(ui->ept + num);
+			}
+			ep0_setup_ack(ui);
+			return;
+		}
+		default:
+			pr_info("unsupported clear feature command\n");
+			pr_info("Request-type:(%08x) wValue:(%08x) "
+					"wIndex:(%08x) wLength:(%08x)\n",
+						ctl.bRequestType, ctl.wValue,
+						ctl.wIndex, ctl.wLength);
+			break;
+		}
+		break;
+	}
+
+	case USB_REQ_SET_INTERFACE:
+		if ((ctl.bRequestType & (USB_DIR_MASK | USB_RECIP_MASK))
+			!= (USB_DIR_OUT | USB_RECIP_INTERFACE))
+			break;
+		if (ui->func2ifc_map[ctl.wIndex]->set_interface) {
+			ui->func2ifc_map[ctl.wIndex]->set_interface(ctl.wIndex,
+					ctl.wValue,
+					ui->func2ifc_map[ctl.wIndex]->context);
+			ep0_setup_ack(ui);
+			return;
+		}
+		break;
+	case USB_REQ_GET_INTERFACE:
+		{
+		struct usb_function *f;
+		struct usb_request *req = ui->setup_req;
+		int ifc_num = ctl.wIndex;
+		int ret = 0;
+
+		if ((ctl.bRequestType & (USB_DIR_MASK | USB_RECIP_MASK))
+					!= (USB_DIR_IN | USB_RECIP_INTERFACE))
+			break;
+
+		f = ui->func2ifc_map[ifc_num];
+		if (!f->get_interface)
+			break;
+		ret = f->get_interface(ifc_num,
+				ui->func2ifc_map[ifc_num]->context);
+		if (ret < 0)
+			break;
+		req->length = ctl.wLength;
+		memcpy(req->buf, &ret, req->length);
+		ep0_setup_send(ui, ctl.wLength);
+		return;
+		}
+	case USB_REQ_SET_CONFIGURATION:
+		if ((ctl.bRequestType & USB_DIR_MASK) != USB_DIR_OUT)
+			break;
+		ui->configured = ctl.wValue;
+		pr_info("hsusb set_configuration wValue = %d usbcmd = %x\n",
+						ctl.wValue, readl(USB_USBCMD));
+		set_configuration(ui, ctl.wValue);
+		ep0_setup_ack(ui);
+		ui->flags = USB_FLAG_CONFIGURE;
+		if (ui->configured)
+			ui->usb_state = USB_STATE_CONFIGURED;
+		queue_delayed_work(usb_work, &ui->work, 0);
+		return;
+
+	case USB_REQ_GET_CONFIGURATION:
+	{
+		unsigned conf;
+		struct usb_request *req = ui->setup_req;
+		req->length = 1;
+		conf = ui->configured;
+		memcpy(req->buf, &conf, req->length);
+		ep0_setup_send(ui, ctl.wLength);
+		return;
+	}
+
+	case USB_REQ_SET_ADDRESS:
+		if ((ctl.bRequestType & (USB_DIR_MASK | USB_RECIP_MASK))
+			!= (USB_DIR_OUT | USB_RECIP_DEVICE))
+			break;
+		ui->usb_state = USB_STATE_ADDRESS;
+		writel((ctl.wValue << 25) | (1 << 24), USB_DEVICEADDR);
+		ep0_setup_ack(ui);
+		return;
+	}
+
+stall:
+	ep0_setup_stall(ui);
+	return;
+
+}
+
+static void handle_endpoint(struct usb_info *ui, unsigned bit)
+{
+	struct usb_endpoint *ept = ui->ept + bit;
+	struct msm_request *req;
+	unsigned long flags;
+	unsigned info;
+
+#if 0
+	printk(KERN_INFO "handle_endpoint() %d %s req=%p(%08x)\n",
+	       ept->num, (ept->flags & EPT_FLAG_IN) ? "in" : "out",
+	       ept->req, ept->req ? ept->req->item_dma : 0);
+#endif
+	if (!ept) {
+		pr_err("%s: ept is null: ep bit = %d\n", __func__, bit);
+		return;
+	}
+
+	/* expire all requests that are no longer active */
+	spin_lock_irqsave(&ui->lock, flags);
+	while ((req = ept->req)) {
+		/* clean speculative fetches on req->item->info */
+		dma_coherent_post_ops();
+		info = req->item->info;
+
+		/* if we've processed all live requests, time to
+		 * restart the hardware on the next non-live request
+		 */
+		if (!req->live) {
+			usb_ept_start(ept);
+			break;
+		}
+
+		/* if the transaction is still in-flight, stop here */
+		if (info & INFO_ACTIVE)
+			break;
+
+		/* advance ept queue to the next request */
+		ept->req = req->next;
+		if (ept->req == 0)
+			ept->last = 0;
+
+		dma_unmap_single(NULL, req->dma, req->req.length,
+				(ept->flags & EPT_FLAG_IN) ?
+				DMA_TO_DEVICE : DMA_FROM_DEVICE);
+
+		if (info & (INFO_HALTED | INFO_BUFFER_ERROR | INFO_TXN_ERROR)) {
+			/* XXX pass on more specific error code */
+			req->req.status = -EIO;
+			req->req.actual = 0;
+			printk(KERN_INFO "hsusb: ept %d %s error. info=%08x\n",
+				ept->num,
+				(ept->flags & EPT_FLAG_IN) ? "in" : "out",
+			       info);
+		} else {
+			req->req.status = 0;
+			req->req.actual = req->req.length - ((info >> 16) & 0x7FFF);
+		}
+		req->busy = 0;
+		req->live = 0;
+		if (req->dead)
+			do_free_req(ui, req);
+
+		if (req->req.complete) {
+			spin_unlock_irqrestore(&ui->lock, flags);
+			req->req.complete(ept, &req->req);
+			spin_lock_irqsave(&ui->lock, flags);
+		}
+	}
+	spin_unlock_irqrestore(&ui->lock, flags);
+}
+
+static void flush_endpoint_hw(struct usb_info *ui, unsigned bits)
+{
+	/* flush endpoint, canceling transactions
+	** - this can take a "large amount of time" (per databook)
+	** - the flush can fail in some cases, thus we check STAT
+	**   and repeat if we're still operating
+	**   (does the fact that this doesn't use the tripwire matter?!)
+	*/
+
+	if (ui->in_lpm) {
+		pr_err("%s: controller is in lpm, cannot proceed\n", __func__);
+		return;
+	}
+
+	do {
+		writel(bits, USB_ENDPTFLUSH);
+		while (readl(USB_ENDPTFLUSH) & bits)
+			udelay(100);
+	} while (readl(USB_ENDPTSTAT) & bits);
+}
+
+static void flush_endpoint_sw(struct usb_endpoint *ept)
+{
+	struct usb_info *ui = ept->ui;
+	struct msm_request *req, *next;
+	unsigned long flags;
+
+	/* inactive endpoints have nothing to do here */
+	if (!ui || !ept->alloced || !ept->max_pkt)
+		return;
+
+	/* put the queue head in a sane state */
+	ept->head->info = 0;
+	ept->head->next = TERMINATE;
+
+	/* cancel any pending requests */
+	spin_lock_irqsave(&ui->lock, flags);
+	req = ept->req;
+	ept->req = 0;
+	ept->last = 0;
+	while (req != 0) {
+		next = req->next;
+
+		req->busy = 0;
+		req->live = 0;
+		req->req.status = -ENODEV;
+		req->req.actual = 0;
+		if (req->req.complete) {
+			spin_unlock_irqrestore(&ui->lock, flags);
+			req->req.complete(ept, &req->req);
+			spin_lock_irqsave(&ui->lock, flags);
+		}
+		if (req->dead)
+			do_free_req(ui, req);
+		req = req->next;
+	}
+	spin_unlock_irqrestore(&ui->lock, flags);
+}
+
+static void flush_endpoint(struct usb_endpoint *ept)
+{
+	if (!ept->ui)
+		return;
+
+	flush_endpoint_hw(ept->ui, (1 << ept->bit));
+	flush_endpoint_sw(ept);
+}
+
+static void flush_all_endpoints(struct usb_info *ui)
+{
+	unsigned n;
+
+	flush_endpoint_hw(ui, 0xffffffff);
+
+	for (n = 0; n < 32; n++)
+		flush_endpoint_sw(ui->ept + n);
+}
+
+#define HW_DELAY_FOR_LPM msecs_to_jiffies(1000)
+#define DELAY_FOR_USB_VBUS_STABILIZE msecs_to_jiffies(500)
+static irqreturn_t usb_interrupt(int irq, void *data)
+{
+	struct usb_info *ui = data;
+	unsigned n;
+	unsigned speed;
+
+	if (!ui->active)
+		return IRQ_HANDLED;
+
+	if (ui->in_lpm) {
+		usb_lpm_exit(ui);
+		return IRQ_HANDLED;
+	}
+
+	n = readl(USB_USBSTS);
+	writel(n, USB_USBSTS);
+
+	/* somehow we got an IRQ while in the reset sequence: ignore it */
+	if (ui->running == 0) {
+		pr_err("%s: ui->running is zero\n", __func__);
+		return IRQ_HANDLED;
+	}
+
+	if (n & STS_PCI) {
+		if (!(readl(USB_PORTSC) & PORTSC_PORT_RESET)) {
+			speed = (readl(USB_PORTSC) & PORTSC_PORT_SPEED_MASK);
+			switch (speed) {
+			case PORTSC_PORT_SPEED_HIGH:
+				pr_info("hsusb resume: speed = HIGH\n");
+				ui->speed = USB_SPEED_HIGH;
+				break;
+
+			case PORTSC_PORT_SPEED_FULL:
+				pr_info("hsusb resume: speed = FULL\n");
+				ui->speed = USB_SPEED_FULL;
+				break;
+
+			default:
+				pr_err("hsusb resume: Unknown Speed\n");
+				ui->speed = USB_SPEED_UNKNOWN;
+				break;
+			}
+		}
+
+		/* pci interrutpt would also be generated when resuming
+		 * from bus suspend, following check would avoid kick
+		 * starting usb main thread in case of pci interrupts
+		 * during enumeration
+		 */
+		if (ui->configured && ui->chg_type == USB_CHG_TYPE__SDP) {
+			ui->usb_state = USB_STATE_CONFIGURED;
+			ui->flags = USB_FLAG_RESUME;
+			queue_delayed_work(usb_work, &ui->work, 0);
+		}
+	}
+
+	if (n & STS_URI) {
+		pr_info("hsusb reset interrupt\n");
+		ui->usb_state = USB_STATE_DEFAULT;
+		ui->configured = 0;
+		schedule_work(&ui->chg_stop);
+
+		writel(readl(USB_ENDPTSETUPSTAT), USB_ENDPTSETUPSTAT);
+		writel(readl(USB_ENDPTCOMPLETE), USB_ENDPTCOMPLETE);
+		writel(0xffffffff, USB_ENDPTFLUSH);
+		writel(0, USB_ENDPTCTRL(1));
+
+		if (ui->online != 0) {
+			/* marking us offline will cause ept queue attempts to fail */
+			ui->online = 0;
+
+			flush_all_endpoints(ui);
+
+			/* XXX: we can't seem to detect going offline, so deconfigure
+			 * XXX: on reset for the time being
+			 */
+			set_configuration(ui, 0);
+		}
+	}
+
+	if (n & STS_SLI) {
+		pr_info("hsusb suspend interrupt\n");
+		ui->usb_state = USB_STATE_SUSPENDED;
+
+		/* stop usb charging */
+		schedule_work(&ui->chg_stop);
+	}
+
+	if (n & STS_UI) {
+		n = readl(USB_ENDPTSETUPSTAT);
+		if (n & EPT_RX(0))
+			handle_setup(ui);
+
+		n = readl(USB_ENDPTCOMPLETE);
+		writel(n, USB_ENDPTCOMPLETE);
+		while (n) {
+			unsigned bit = __ffs(n);
+			handle_endpoint(ui, bit);
+			n = n & (~(1 << bit));
+		}
+	}
+
+	n = readl(USB_OTGSC);
+	writel(n, USB_OTGSC);
+
+	if (n & OTGSC_BSVIS) {
+		/*Verify B Session Valid Bit to verify vbus status*/
+		if (B_SESSION_VALID & n)	{
+			pr_info("usb cable connected\n");
+			ui->usb_state = USB_STATE_POWERED;
+			ui->flags = USB_FLAG_VBUS_ONLINE;
+			/* Wait for 100ms to stabilize VBUS before initializing
+			 * USB and detecting charger type
+			 */
+			queue_delayed_work(usb_work, &ui->work, 0);
+		} else {
+			int i;
+
+			usb_disable_pullup(ui);
+
+			printk(KERN_INFO "usb cable disconnected\n");
+			ui->usb_state = USB_STATE_NOTATTACHED;
+			ui->configured = 0;
+			for (i = 0; i < ui->num_funcs; i++) {
+				struct usb_function_info *fi = ui->func[i];
+				if (!fi ||
+				!(ui->composition->functions & (1 << i)))
+					continue;
+				if (fi->func->disconnect)
+					fi->func->disconnect
+						(fi->func->context);
+			}
+			ui->flags = USB_FLAG_VBUS_OFFLINE;
+			queue_delayed_work(usb_work, &ui->work, 0);
+		}
+	}
+
+	return IRQ_HANDLED;
+}
+
+static void usb_prepare(struct usb_info *ui)
+{
+	memset(ui->buf, 0, 4096);
+	ui->head = (void *) (ui->buf + 0);
+
+	/* only important for reset/reinit */
+	memset(ui->ept, 0, sizeof(ui->ept));
+	ui->next_item = 0;
+	ui->speed = USB_SPEED_UNKNOWN;
+
+	init_endpoints(ui);
+
+	ui->ep0in.max_pkt = 64;
+	ui->ep0in.ui = ui;
+	ui->ep0in.alloced = 1;
+	ui->ep0out.max_pkt = 64;
+	ui->ep0out.ui = ui;
+	ui->ep0out.alloced = 1;
+
+	ui->setup_req = usb_ept_alloc_req(&ui->ep0in, SETUP_BUF_SIZE);
+	ui->ep0out_req = usb_ept_alloc_req(&ui->ep0out, ui->ep0out.max_pkt);
+
+	INIT_WORK(&ui->chg_stop, usb_chg_stop);
+	INIT_WORK(&ui->li.wakeup_phy, usb_lpm_wakeup_phy);
+	INIT_DELAYED_WORK(&ui->work, usb_do_work);
+	INIT_DELAYED_WORK(&ui->chg_legacy_det, usb_chg_legacy_detect);
+}
+
+static int usb_is_online(struct usb_info *ui)
+{
+	/* continue lpm if bus is suspended or disconnected or stopped*/
+	if (((readl(USB_PORTSC) & PORTSC_SUSP) == PORTSC_SUSP) ||
+			((readl(USB_PORTSC) & PORTSC_CCS) == 0) ||
+			((readl(USB_USBCMD) & USBCMD_RS) == 0))
+		return 0;
+
+	pr_debug("usb is online\n");
+	pr_debug("usbcmd:(%08x) usbsts:(%08x) portsc:(%08x)\n",
+			readl(USB_USBCMD),
+			readl(USB_USBSTS),
+			readl(USB_PORTSC));
+	return -1;
+}
+
+static int usb_wakeup_phy(struct usb_info *ui)
+{
+	int i;
+
+	writel(readl(USB_USBCMD) & ~ULPI_STP_CTRL, USB_USBCMD);
+
+	/* some circuits automatically clear PHCD bit */
+	for (i = 0; i < 5 && (readl(USB_PORTSC) & PORTSC_PHCD); i++) {
+		writel(readl(USB_PORTSC) & ~PORTSC_PHCD, USB_PORTSC);
+		msleep(1);
+	}
+
+	if ((readl(USB_PORTSC) & PORTSC_PHCD)) {
+		pr_err("%s: cannot clear phcd bit\n", __func__);
+		return -1;
+	}
+
+	return 0;
+}
+
+static int usb_suspend_phy(struct usb_info *ui)
+{
+	int i;
+	unsigned long flags;
+
+	if (usb_is_online(ui))
+		return -1;
+
+	/* spec talks about following bits in LPM for external phy.
+	 * But they are ignored because
+	 * 1. disabling interface protection circuit: by disabling
+	 * interface protection curcuit we cannot come out
+	 * of lpm as async interrupts would be disabled
+	 * 2. setting the suspendM bit: this bit would be set by usb
+	 * controller once we set phcd bit.
+	 */
+	switch (PHY_TYPE(ui->phy_info)) {
+	case USB_PHY_INTEGRATED:
+		if (!is_phy_45nm())
+			ulpi_read(ui, 0x14);
+
+		/* turn on/off otg comparators */
+		if (ui->vbus_sn_notif &&
+			ui->usb_state == USB_STATE_NOTATTACHED)
+			ulpi_write(ui, 0x00, 0x30);
+		else
+			ulpi_write(ui, 0x01, 0x30);
+
+		if (!is_phy_45nm())
+			ulpi_write(ui, 0x08, 0x09);
+
+		break;
+
+	case USB_PHY_UNDEFINED:
+		pr_err("%s: undefined phy type\n", __func__);
+		return -1;
+	}
+
+	/* loop for large amount of time */
+	for (i = 0; i < 500; i++) {
+		spin_lock_irqsave(&ui->lock, flags);
+		if (usb_is_online(ui)) {
+			spin_unlock_irqrestore(&ui->lock, flags);
+			return -1;
+		}
+		/* set phy to be in lpm */
+		writel(readl(USB_PORTSC) | PORTSC_PHCD, USB_PORTSC);
+		spin_unlock_irqrestore(&ui->lock, flags);
+
+		msleep(1);
+		if (readl(USB_PORTSC) & PORTSC_PHCD)
+			goto blk_stp_sig;
+	}
+
+	if (!(readl(USB_PORTSC) & PORTSC_PHCD)) {
+		pr_err("unable to set phcd of portsc reg\n");
+		pr_err("Reset HW link and phy to recover from phcd error\n");
+		usb_hw_reset(ui);
+		return -1;
+	}
+
+	/* we have to set this bit again to work-around h/w bug */
+	writel(readl(USB_PORTSC) | PORTSC_PHCD, USB_PORTSC);
+
+blk_stp_sig:
+	/* block the stop signal */
+	writel(readl(USB_USBCMD) | ULPI_STP_CTRL, USB_USBCMD);
+
+	return 0;
+}
+
+/* SW workarounds
+Issue#2		- Integrated PHY Calibration
+Symptom		- Electrical compliance failure in eye-diagram tests
+SW workaround		- Try to raise amplitude to 400mV
+
+Issue#3		- AHB Posted Writes
+Symptom		- USB stability
+SW workaround		- This programs xtor ON, BURST disabled and
+			unspecified length of INCR burst enabled
+*/
+static int usb_hw_reset(struct usb_info *ui)
+{
+	unsigned i;
+	struct msm_hsusb_platform_data *pdata;
+	unsigned long timeout;
+	unsigned val = 0;
+
+	pdata = ui->pdev->dev.platform_data;
+
+	clk_enable(ui->clk);
+	/* reset the phy before resetting link */
+	if (readl(USB_PORTSC) & PORTSC_PHCD)
+		usb_wakeup_phy(ui);
+	/* rpc call for phy_reset */
+	if (ui->pdata->phy_reset)
+		ui->pdata->phy_reset(ui->addr);
+	else
+		msm_hsusb_phy_reset();
+	/* Give some delay to settle phy after reset */
+	msleep(100);
+
+	/* RESET */
+	writel(USBCMD_RESET, USB_USBCMD);
+	timeout = jiffies + USB_LINK_RESET_TIMEOUT;
+	while (readl(USB_USBCMD) & USBCMD_RESET) {
+		if (time_after(jiffies, timeout)) {
+			dev_err(&ui->pdev->dev, "usb link reset timeout\n");
+			break;
+		}
+		msleep(1);
+	}
+
+	/* select DEVICE mode with SDIS active */
+	writel((USBMODE_SDIS | USBMODE_DEVICE), USB_USBMODE);
+	msleep(1);
+
+	/* select ULPI phy */
+	i = (readl(USB_PORTSC) & ~PORTSC_PTS);
+	writel(i | PORTSC_PTS_ULPI, USB_PORTSC);
+	/* set usb controller interrupt latency to zero*/
+	writel((readl(USB_USBCMD) & ~USBCMD_ITC_MASK) | USBCMD_ITC(0),
+							USB_USBCMD);
+
+	/* If the target is 7x01 and roc version is > 1.2, set
+	 * the AHB mode to 2 for maximum performance, else set
+	 * it to 1, to bypass the AHB transactor for stability.
+	 */
+	if (PHY_TYPE(ui->phy_info) == USB_PHY_EXTERNAL) {
+		if (pdata->soc_version >= SOC_ROC_2_0)
+			writel(0x02, USB_ROC_AHB_MODE);
+		else
+			writel(0x01, USB_ROC_AHB_MODE);
+	} else {
+		unsigned cfg_val;
+
+		/* Raise  amplitude to 400mV
+		 * SW workaround, Issue#2
+		 */
+		cfg_val = ulpi_read(ui, ULPI_CONFIG_REG);
+		cfg_val |= ULPI_AMPLITUDE_MAX;
+		ulpi_write(ui, cfg_val, ULPI_CONFIG_REG);
+
+		writel(0x0, USB_AHB_BURST);
+		writel(0x00, USB_AHB_MODE);
+	}
+
+	/* TBD: do we have to add DpRise, ChargerRise and
+	 * IdFloatRise for 45nm
+	 */
+	/* Disable VbusValid and SessionEnd comparators */
+	val = ULPI_VBUS_VALID | ULPI_SESS_END;
+
+	/* enable id interrupt only when transceiver is available */
+	if (ui->xceiv)
+		writel(readl(USB_OTGSC) | OTGSC_BSVIE | OTGSC_IDIE, USB_OTGSC);
+	else {
+		writel((readl(USB_OTGSC) | OTGSC_BSVIE) & ~OTGSC_IDPU,
+							USB_OTGSC);
+		ulpi_write(ui, ULPI_IDPU, ULPI_OTG_CTRL_CLR);
+		val |= ULPI_HOST_DISCONNECT | ULPI_ID_GND;
+	}
+	ulpi_write(ui, val, ULPI_INT_RISE_CLR);
+	ulpi_write(ui, val, ULPI_INT_FALL_CLR);
+
+	/* we are just setting the pointer in the hwblock. Since the
+	 * endpoint isnt enabled the hw block doenst read the contents
+	 * of ui->dma - so we dont need a barrier here
+	 * */
+	writel(ui->dma, USB_ENDPOINTLISTADDR);
+
+	clk_disable(ui->clk);
+
+	return 0;
+}
+
+static void usb_reset(struct usb_info *ui)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&ui->lock, flags);
+	ui->running = 0;
+	spin_unlock_irqrestore(&ui->lock, flags);
+
+#if 0
+	/* we should flush and shutdown cleanly if already running */
+	writel(0xffffffff, USB_ENDPTFLUSH);
+	msleep(2);
+#endif
+
+	if (usb_hw_reset(ui)) {
+		pr_info("%s: h/w reset failed\n", __func__);
+		return;
+	}
+
+	usb_configure_endpoint(&ui->ep0in, NULL);
+	usb_configure_endpoint(&ui->ep0out, NULL);
+
+	/* marking us offline will cause ept queue attempts to fail */
+	ui->online = 0;
+
+	/* terminate any pending transactions */
+	flush_all_endpoints(ui);
+
+	set_configuration(ui, 0);
+
+	spin_lock_irqsave(&ui->lock, flags);
+	ui->running = 1;
+	spin_unlock_irqrestore(&ui->lock, flags);
+}
+
+static void usb_enable(void *handle, int enable)
+{
+	struct usb_info *ui = handle;
+	unsigned long flags;
+	spin_lock_irqsave(&ui->lock, flags);
+
+	if (enable) {
+		ui->flags |= USB_FLAG_RESET;
+		ui->active = 1;
+		spin_unlock_irqrestore(&ui->lock, flags);
+		usb_do_work(&ui->work.work);
+	} else {
+		ui->active = 0;
+		spin_unlock_irqrestore(&ui->lock, flags);
+		usb_clk_disable(ui);
+		msm_hsusb_suspend_locks_acquire(ui, 0);
+	}
+}
+
+static struct msm_otg_ops dcd_ops = {
+	.request = usb_enable,
+};
+
+void usb_start(struct usb_info *ui)
+{
+	int i, ret;
+
+	for (i = 0; i < ui->num_funcs; i++) {
+		struct usb_function_info *fi = ui->func[i];
+		if (!fi || !(ui->composition->functions & (1<<i)))
+			continue;
+		if (fi->enabled) {
+			pr_info("usb_bind_func() (%s)\n", fi->func->name);
+			fi->func->bind(fi->func->context);
+		}
+	}
+
+	ui->clk_enabled = 0;
+	ui->vreg_enabled = 0;
+
+	ui->xceiv = msm_otg_get_transceiver();
+	if (ui->xceiv) {
+		ui->flags = USB_FLAG_REG_OTG;
+		queue_delayed_work(usb_work, &ui->work, 0);
+	} else {
+		/*Initialize pm app RPC */
+		ret = msm_pm_app_rpc_init();
+		if (ret) {
+			pr_err("%s: pm_app_rpc connect failed\n", __func__);
+			goto out;
+		}
+		pr_info("%s: pm_app_rpc connect success\n", __func__);
+
+		ret = msm_pm_app_register_vbus_sn(&msm_hsusb_set_vbus_state);
+		if (ret) {
+			pr_err("%s:PMIC VBUS SN notif not supported\n", \
+					__func__);
+			msm_pm_app_rpc_deinit();
+			goto out;
+		}
+		pr_info("%s:PMIC VBUS SN notif supported\n", \
+					__func__);
+
+		ret = msm_pm_app_enable_usb_ldo(1);
+		if (ret) {
+			pr_err("%s: unable to turn on internal LDO", \
+					__func__);
+			msm_pm_app_unregister_vbus_sn(
+					&msm_hsusb_set_vbus_state);
+			msm_pm_app_rpc_deinit();
+			goto out;
+		}
+		ui->vbus_sn_notif = 1;
+out:
+		ui->active = 1;
+		ui->flags |= (USB_FLAG_START | USB_FLAG_RESET);
+		queue_delayed_work(usb_work, &ui->work, 0);
+	}
+
+}
+
+static LIST_HEAD(usb_function_list);
+static DEFINE_MUTEX(usb_function_list_lock);
+
+
+static struct usb_function_info *usb_find_function(const char *name)
+{
+	struct list_head *entry;
+	list_for_each(entry, &usb_function_list) {
+		struct usb_function_info *fi =
+			list_entry(entry, struct usb_function_info, list);
+		if (fi) {
+			if (!strcmp(name, fi->func->name))
+				return fi;
+		}
+	}
+
+	return NULL;
+}
+
+static void usb_try_to_bind(void)
+{
+	struct usb_info *ui = the_usb_info;
+	unsigned long enabled_functions = 0;
+	int i;
+
+	if (!ui || ui->bound || !ui->pdev || !ui->composition)
+		return;
+
+	for (i = 0; i < ui->num_funcs; i++) {
+		if (ui->func[i])
+			enabled_functions |= (1 << i);
+	}
+	if ((enabled_functions & ui->composition->functions)
+					!= ui->composition->functions)
+		return;
+
+	usb_set_composition(ui->composition->product_id);
+	usb_configure_device_descriptor(ui);
+
+	/* we have found all the needed functions */
+	ui->bound = 1;
+	printk(KERN_INFO "msm_hsusb: functions bound. starting.\n");
+	usb_start(ui);
+}
+
+static int usb_get_function_index(const char *name)
+{
+	struct usb_info *ui = the_usb_info;
+	int i;
+
+	for (i = 0; i < ui->num_funcs; i++) {
+		if (!strcmp(name, ui->functions_map[i].name))
+			return i;
+	}
+	return -1;
+}
+
+int usb_function_register(struct usb_function *driver)
+{
+	struct usb_info *ui = the_usb_info;
+	struct usb_function_info *fi;
+	int ret = 0;
+	int index;
+
+	mutex_lock(&usb_function_list_lock);
+
+	index = usb_get_function_index(driver->name);
+	if (index < 0) {
+		pr_err("%s: unsupported function = %s\n",
+				__func__, driver->name);
+		ret = -EINVAL;
+		goto fail;
+	}
+
+	fi = kzalloc(sizeof(*fi), GFP_KERNEL);
+	if (!fi) {
+		ret = -ENOMEM;
+		goto fail;
+	}
+
+	fi->func = driver;
+	list_add(&fi->list, &usb_function_list);
+	ui->func[index] = fi;
+	fi->func->ep0_out_req = ui->ep0out_req;
+	fi->func->ep0_in_req = ui->setup_req;
+	fi->func->ep0_out = &ui->ep0out;
+	fi->func->ep0_in = &ui->ep0in;
+	pr_info("%s: name = '%s',  map = %d\n", __func__, driver->name, index);
+
+	usb_try_to_bind();
+fail:
+	mutex_unlock(&usb_function_list_lock);
+	return ret;
+}
+EXPORT_SYMBOL(usb_function_register);
+
+static unsigned short usb_validate_product_id(unsigned short pid)
+{
+	struct usb_info *ui = the_usb_info;
+	int i;
+
+	if (!ui || !ui->pdata)
+		return -1;
+
+	/* set idProduct based on which functions are enabled */
+	for (i = 0; i < ui->pdata->num_compositions; i++) {
+		if (ui->pdata->compositions[i].product_id == pid)
+			break;
+	}
+
+	if (i < ui->pdata->num_compositions) {
+		struct usb_composition *comp = &ui->pdata->compositions[i];
+		for (i = 0; i < ui->num_funcs; i++) {
+			if (comp->functions & (1 << i)) {
+				if (!ui->func[i]) {
+					pr_err("%s: func(%d) not available\n",
+								__func__, i);
+					return 0;
+				}
+			}
+		}
+		return comp->product_id;
+	} else
+		pr_err("%s: Product id (%x) is not supported\n", __func__, pid);
+	return 0;
+}
+
+static unsigned short usb_get_product_id(unsigned long enabled_functions)
+{
+	struct usb_info *ui = the_usb_info;
+	int i;
+
+	if (!(ui && ui->pdata))
+		return -1;
+
+	/* set idProduct based on which functions are enabled */
+	for (i = 0; i < ui->pdata->num_compositions; i++) {
+		if (ui->pdata->compositions[i].functions == enabled_functions)
+			return ui->pdata->compositions[i].product_id;
+	}
+	return 0;
+}
+
+static void usb_uninit(struct usb_info *ui)
+{
+	int i;
+
+	for (i = 0; i < ui->strdesc_index; i++)
+		kfree(ui->strdesc[i]);
+	ui->strdesc_index = 1;
+	ui->next_ifc_num = 0;
+}
+
+static unsigned short usb_set_composition(unsigned short pid)
+{
+	struct usb_info *ui = the_usb_info;
+	int i;
+
+	if (!(ui && ui->pdata))
+		return 0;
+
+	/* Retrieve product id on enabled functions */
+	for (i = 0; i < ui->pdata->num_compositions; i++) {
+		if (ui->pdata->compositions[i].product_id == pid) {
+			ui->composition = &ui->pdata->compositions[i];
+			for (i = 0; i < ui->num_funcs; i++) {
+				struct usb_function_info *fi = ui->func[i];
+				if (ui->func && fi && fi->func) {
+					fi->enabled = (ui->composition->
+							functions >> i) & 1;
+				}
+			}
+			pr_info("%s: composition set to product id = %x\n",
+				__func__, ui->composition->product_id);
+			return ui->composition->product_id;
+		}
+	}
+	pr_err("%s: product id (%x) not supported\n", __func__, pid);
+	return 0;
+}
+
+static void usb_switch_composition(unsigned short pid)
+{
+	struct usb_info *ui = the_usb_info;
+	int i;
+	unsigned long flags;
+
+
+	if (!ui->active)
+		return;
+	if (!usb_validate_product_id(pid))
+		return;
+
+	disable_irq(ui->irq);
+	if (cancel_delayed_work_sync(&ui->work))
+		pr_info("%s: Removed work successfully\n", __func__);
+	if (ui->running) {
+		spin_lock_irqsave(&ui->lock, flags);
+		ui->running = 0;
+		ui->online = 0;
+		ui->bound = 0;
+		spin_unlock_irqrestore(&ui->lock, flags);
+		/* we should come out of lpm to access registers */
+		if (ui->in_lpm) {
+			if (PHY_TYPE(ui->phy_info) == USB_PHY_EXTERNAL) {
+				disable_irq(ui->gpio_irq[0]);
+				disable_irq(ui->gpio_irq[1]);
+			}
+
+			if (ui->usb_state == USB_STATE_NOTATTACHED
+						&& ui->vbus_sn_notif)
+				msm_pm_app_enable_usb_ldo(1);
+
+			usb_lpm_exit(ui);
+			if (cancel_work_sync(&ui->li.wakeup_phy))
+				usb_lpm_wakeup_phy(NULL);
+			ui->in_lpm = 0;
+		}
+		/* disable usb and session valid interrupts */
+		writel(0, USB_USBINTR);
+		writel(readl(USB_OTGSC) & ~OTGSC_BSVIE, USB_OTGSC);
+
+		/* stop the controller */
+		usb_disable_pullup(ui);
+		ui->usb_state = USB_STATE_NOTATTACHED;
+		switch_set_state(&ui->sdev, 0);
+		/* Before starting again, wait for 300ms
+		 * to make sure host detects soft disconnection
+		 **/
+		msleep(300);
+	}
+
+	for (i = 0; i < ui->num_funcs; i++) {
+		struct usb_function_info *fi = ui->func[i];
+		if (!fi || !fi->func || !fi->enabled)
+			continue;
+		if (fi->func->configure)
+			fi->func->configure(0, fi->func->context);
+		if (fi->func->unbind)
+			fi->func->unbind(fi->func->context);
+	}
+
+	usb_uninit(ui);
+	usb_set_composition(pid);
+	usb_configure_device_descriptor(ui);
+
+	/* initialize functions */
+	for (i = 0; i < ui->num_funcs; i++) {
+		struct usb_function_info *fi = ui->func[i];
+		if (!fi || !(ui->composition->functions & (1 << i)))
+			continue;
+		if (fi->enabled) {
+			if (fi->func->bind)
+				fi->func->bind(fi->func->context);
+		}
+	}
+
+	ui->bound = 1;
+	ui->flags = USB_FLAG_RESET;
+	queue_delayed_work(usb_work, &ui->work, 0);
+	enable_irq(ui->irq);
+}
+
+void usb_function_enable(const char *function, int enable)
+{
+	struct usb_function_info *fi;
+	struct usb_info *ui = the_usb_info;
+	unsigned long functions_mask;
+	int curr_enable;
+	unsigned short pid;
+	int i;
+
+	if (!ui)
+		return;
+
+	pr_info("%s: name = %s, enable = %d\n", __func__, function, enable);
+
+	fi = usb_find_function(function);
+	if (!fi) {
+		pr_err("%s: function (%s) not registered with DCD\n",
+							__func__, function);
+		return;
+	}
+	if (fi->enabled == enable) {
+		pr_err("%s: function (%s) state is same\n",
+						__func__, function);
+		return;
+	}
+	functions_mask = 0;
+	curr_enable = fi->enabled;
+	fi->enabled = enable;
+	for (i = 0; i < ui->num_funcs; i++) {
+		struct usb_function_info *fi = ui->func[i];
+		if (fi && fi->enabled)
+			functions_mask |= (1 << i);
+	}
+
+	pid = usb_get_product_id(functions_mask);
+	if (!pid) {
+		fi->enabled = curr_enable;
+		pr_err("%s: mask (%lx) not matching with any products\n",
+						__func__, functions_mask);
+		pr_err("%s: continuing with current composition\n", __func__);
+		return;
+	}
+	usb_switch_composition(pid);
+}
+EXPORT_SYMBOL(usb_function_enable);
+
+static int usb_free(struct usb_info *ui, int ret)
+{
+	disable_irq_wake(ui->irq);
+	free_irq(ui->irq, ui);
+	if (ui->gpio_irq[0])
+		free_irq(ui->gpio_irq[0], NULL);
+	if (ui->gpio_irq[1])
+		free_irq(ui->gpio_irq[1], NULL);
+
+	dma_pool_destroy(ui->pool);
+	dma_free_coherent(&ui->pdev->dev, 4096, ui->buf, ui->dma);
+	kfree(ui->func);
+	kfree(ui->strdesc);
+	iounmap(ui->addr);
+	clk_put(ui->clk);
+	clk_put(ui->pclk);
+	clk_put(ui->cclk);
+	msm_hsusb_suspend_locks_init(ui, 0);
+	kfree(ui);
+
+	return ret;
+}
+
+static int usb_vbus_is_on(struct usb_info *ui)
+{
+	unsigned tmp;
+
+	/* disable session valid raising and falling interrupts */
+	ulpi_write(ui, ULPI_SESSION_VALID_RAISE, ULPI_USBINTR_ENABLE_RASING_C);
+	ulpi_write(ui, ULPI_SESSION_VALID_FALL, ULPI_USBINTR_ENABLE_FALLING_C);
+
+	tmp = ulpi_read(ui, ULPI_USBINTR_STATUS);
+
+	/* enable session valid raising and falling interrupts */
+	ulpi_write(ui, ULPI_SESSION_VALID_RAISE, ULPI_USBINTR_ENABLE_RASING_S);
+	ulpi_write(ui, ULPI_SESSION_VALID_FALL, ULPI_USBINTR_ENABLE_FALLING_S);
+
+	if (tmp & (1 << 2))
+		return 1;
+	return 0;
+}
+static void usb_do_work(struct work_struct *w)
+{
+	struct usb_info *ui = container_of(w, struct usb_info, work.work);
+	unsigned long iflags;
+	unsigned long flags, ret;
+
+	for (;;) {
+		spin_lock_irqsave(&ui->lock, iflags);
+		flags = ui->flags;
+		ui->flags = 0;
+		spin_unlock_irqrestore(&ui->lock, iflags);
+
+		/* give up if we have nothing to do */
+		if (flags == 0)
+			break;
+
+		switch (ui->state) {
+		case USB_STATE_IDLE:
+			if (flags & USB_FLAG_REG_OTG) {
+				dcd_ops.handle = (void *) ui;
+				ret = ui->xceiv->set_peripheral(ui->xceiv,
+								&dcd_ops);
+				if (ret)
+					pr_err("%s: Can't register peripheral"
+						"driver with OTG", __func__);
+				break;
+			}
+			if ((flags & USB_FLAG_START) ||
+					(flags & USB_FLAG_RESET)) {
+				disable_irq(ui->irq);
+				if (ui->vbus_sn_notif)
+					msm_pm_app_enable_usb_ldo(1);
+				usb_clk_enable(ui);
+				usb_vreg_enable(ui);
+				usb_vbus_online(ui);
+
+				/* if VBUS is present move to ONLINE state
+				 * otherwise move to OFFLINE state
+				 */
+				if (usb_vbus_is_on(ui)) {
+					ui->usb_state = USB_STATE_POWERED;
+					msm_hsusb_suspend_locks_acquire(ui, 1);
+					ui->state = USB_STATE_ONLINE;
+					usb_enable_pullup(ui);
+					schedule_delayed_work(
+							&ui->chg_legacy_det,
+							USB_CHG_DET_DELAY);
+					pr_info("hsusb: IDLE -> ONLINE\n");
+				} else {
+					ui->usb_state = USB_STATE_NOTATTACHED;
+					ui->state = USB_STATE_OFFLINE;
+
+					msleep(500);
+					usb_lpm_enter(ui);
+					pr_info("hsusb: IDLE -> OFFLINE\n");
+					if (ui->vbus_sn_notif)
+						msm_pm_app_enable_usb_ldo(0);
+				}
+				enable_irq(ui->irq);
+				break;
+			}
+			goto reset;
+
+		case USB_STATE_ONLINE:
+			/* If at any point when we were online, we received
+			 * the signal to go offline, we must honor it
+			 */
+			if (flags & USB_FLAG_VBUS_OFFLINE) {
+				enum charger_type temp;
+				unsigned long f;
+
+				cancel_delayed_work_sync(&ui->chg_legacy_det);
+
+				spin_lock_irqsave(&ui->lock, f);
+				temp = ui->chg_type;
+				ui->chg_type = USB_CHG_TYPE__INVALID;
+				spin_unlock_irqrestore(&ui->lock, f);
+
+				if (temp != USB_CHG_TYPE__INVALID) {
+					/* re-acquire wakelock and restore axi
+					 * freq if they have been reduced by
+					 * charger work item
+					 */
+					msm_hsusb_suspend_locks_acquire(ui, 1);
+
+					msm_chg_usb_i_is_not_available();
+					msm_chg_usb_charger_disconnected();
+				}
+
+				/* reset usb core and usb phy */
+				disable_irq(ui->irq);
+				if (ui->in_lpm)
+					usb_lpm_exit(ui);
+				usb_vbus_offline(ui);
+				usb_lpm_enter(ui);
+				if ((ui->vbus_sn_notif) &&
+				(ui->usb_state == USB_STATE_NOTATTACHED))
+					msm_pm_app_enable_usb_ldo(0);
+				ui->state = USB_STATE_OFFLINE;
+				enable_irq(ui->irq);
+				switch_set_state(&ui->sdev, 0);
+				pr_info("hsusb: ONLINE -> OFFLINE\n");
+				break;
+			}
+			if (flags & USB_FLAG_SUSPEND) {
+				ui->usb_state = USB_STATE_SUSPENDED;
+				usb_lpm_enter(ui);
+				msm_hsusb_suspend_locks_acquire(ui, 1);
+				break;
+			}
+			if ((flags & USB_FLAG_RESUME) ||
+					(flags & USB_FLAG_CONFIGURE)) {
+				int maxpower = usb_get_max_power(ui);
+
+				if (maxpower > 0)
+					msm_chg_usb_i_is_available(maxpower);
+
+				if (flags & USB_FLAG_CONFIGURE)
+					switch_set_state(&ui->sdev, 1);
+
+				break;
+			}
+			goto reset;
+
+		case USB_STATE_OFFLINE:
+			/* If we were signaled to go online and vbus is still
+			 * present when we received the signal, go online.
+			 */
+			if ((flags & USB_FLAG_VBUS_ONLINE)) {
+				msm_hsusb_suspend_locks_acquire(ui, 1);
+				disable_irq(ui->irq);
+				ui->state = USB_STATE_ONLINE;
+				if (ui->in_lpm)
+					usb_lpm_exit(ui);
+				usb_vbus_online(ui);
+				if (!(B_SESSION_VALID & readl(USB_OTGSC))) {
+					writel(((readl(USB_OTGSC) &
+						~OTGSC_INTR_STS_MASK) |
+						OTGSC_BSVIS), USB_OTGSC);
+					enable_irq(ui->irq);
+					goto reset;
+				}
+				usb_enable_pullup(ui);
+				schedule_delayed_work(
+						&ui->chg_legacy_det,
+						USB_CHG_DET_DELAY);
+				pr_info("hsusb: OFFLINE -> ONLINE\n");
+				enable_irq(ui->irq);
+				break;
+			}
+			if (flags & USB_FLAG_SUSPEND) {
+				usb_lpm_enter(ui);
+				wake_unlock(&ui->wlock);
+				break;
+			}
+		default:
+reset:
+			/* For RESET or any unknown flag in a particular state
+			 * go to IDLE state and reset HW to bring to known state
+			 */
+			ui->flags = USB_FLAG_RESET;
+			ui->state = USB_STATE_IDLE;
+		}
+	}
+}
+
+void msm_hsusb_set_vbus_state(int online)
+{
+	struct usb_info *ui = the_usb_info;
+
+	if (ui && online) {
+		msm_pm_app_enable_usb_ldo(1);
+		usb_lpm_exit(ui);
+		/* Turn on PHY comparators */
+		if (!(ulpi_read(ui, 0x30) & 0x01))
+				ulpi_write(ui, 0x01, 0x30);
+	}
+}
+
+static irqreturn_t usb_lpm_gpio_isr(int irq, void *data)
+{
+	disable_irq(irq);
+
+	return IRQ_HANDLED;
+}
+
+static void usb_lpm_exit(struct usb_info *ui)
+{
+	if (ui->in_lpm == 0)
+		return;
+
+	if (usb_lpm_config_gpio)
+		usb_lpm_config_gpio(0);
+
+	wake_lock(&ui->wlock);
+	usb_clk_enable(ui);
+	usb_vreg_enable(ui);
+
+	writel(readl(USB_USBCMD) & ~ASYNC_INTR_CTRL, USB_USBCMD);
+	writel(readl(USB_USBCMD) & ~ULPI_STP_CTRL, USB_USBCMD);
+
+	if (readl(USB_PORTSC) & PORTSC_PHCD) {
+		disable_irq(ui->irq);
+		schedule_work(&ui->li.wakeup_phy);
+	} else {
+		ui->in_lpm = 0;
+		if (ui->xceiv)
+			ui->xceiv->set_suspend(ui->xceiv, 0);
+	}
+	pr_info("%s(): USB exited from low power mode\n", __func__);
+}
+
+static int usb_lpm_enter(struct usb_info *ui)
+{
+	unsigned long flags;
+	unsigned connected;
+
+	spin_lock_irqsave(&ui->lock, flags);
+	if (ui->in_lpm) {
+		spin_unlock_irqrestore(&ui->lock, flags);
+		pr_debug("already in lpm, nothing to do\n");
+		return 0;
+	}
+
+	if (usb_is_online(ui)) {
+		spin_unlock_irqrestore(&ui->lock, flags);
+		pr_info("%s: lpm procedure aborted\n", __func__);
+		return -1;
+	}
+
+	ui->in_lpm = 1;
+	if (ui->xceiv)
+		ui->xceiv->set_suspend(ui->xceiv, 1);
+	disable_irq(ui->irq);
+	spin_unlock_irqrestore(&ui->lock, flags);
+
+	if (usb_suspend_phy(ui)) {
+		ui->in_lpm = 0;
+		ui->flags = USB_FLAG_RESET;
+		enable_irq(ui->irq);
+		pr_err("%s: phy suspend failed, lpm procedure aborted\n",
+				__func__);
+		return -1;
+	}
+
+	if ((B_SESSION_VALID & readl(USB_OTGSC)) &&
+				(ui->usb_state == USB_STATE_NOTATTACHED)) {
+		ui->in_lpm = 0;
+		writel(((readl(USB_OTGSC) & ~OTGSC_INTR_STS_MASK) |
+						OTGSC_BSVIS), USB_OTGSC);
+		ui->flags = USB_FLAG_VBUS_ONLINE;
+		ui->usb_state = USB_STATE_POWERED;
+		usb_wakeup_phy(ui);
+		enable_irq(ui->irq);
+		return -1;
+	}
+
+	/* enable async interrupt */
+	writel(readl(USB_USBCMD) | ASYNC_INTR_CTRL, USB_USBCMD);
+	connected = readl(USB_USBCMD) & USBCMD_RS;
+
+	usb_vreg_disable(ui);
+	usb_clk_disable(ui);
+
+	if (usb_lpm_config_gpio) {
+		if (usb_lpm_config_gpio(1)) {
+			spin_lock_irqsave(&ui->lock, flags);
+			usb_lpm_exit(ui);
+			spin_unlock_irqrestore(&ui->lock, flags);
+			enable_irq(ui->irq);
+			return -1;
+		}
+		enable_irq(ui->gpio_irq[0]);
+		enable_irq(ui->gpio_irq[1]);
+	}
+
+	enable_irq(ui->irq);
+	msm_hsusb_suspend_locks_acquire(ui, 0);
+	pr_info("%s: usb in low power mode\n", __func__);
+	return 0;
+}
+
+static void usb_enable_pullup(struct usb_info *ui)
+{
+	disable_irq(ui->irq);
+	writel(STS_URI | STS_SLI | STS_UI | STS_PCI, USB_USBINTR);
+	writel(readl(USB_USBCMD) | USBCMD_RS, USB_USBCMD);
+	enable_irq(ui->irq);
+}
+
+/* SW workarounds
+Issue #1	- USB Spoof Disconnect Failure
+Symptom	- Writing 0 to run/stop bit of USBCMD doesn't cause disconnect
+SW workaround	- Making opmode non-driving and SuspendM set in function
+		register of SMSC phy
+*/
+static void usb_disable_pullup(struct usb_info *ui)
+{
+	disable_irq(ui->irq);
+	writel(readl(USB_USBINTR) & ~(STS_URI | STS_SLI | STS_UI | STS_PCI),
+			USB_USBINTR);
+	writel(readl(USB_USBCMD) & ~USBCMD_RS, USB_USBCMD);
+
+	/* S/W workaround, Issue#1 */
+	if (!is_phy_external() && !is_phy_45nm())
+		ulpi_write(ui, 0x48, 0x04);
+
+	enable_irq(ui->irq);
+}
+
+static void usb_chg_stop(struct work_struct *w)
+{
+	struct usb_info *ui = the_usb_info;
+	enum charger_type temp;
+	unsigned long flags;
+
+	spin_lock_irqsave(&ui->lock, flags);
+	temp = ui->chg_type;
+	spin_unlock_irqrestore(&ui->lock, flags);
+
+	if (temp == USB_CHG_TYPE__SDP)
+		msm_chg_usb_i_is_not_available();
+}
+
+static void usb_vbus_online(struct usb_info *ui)
+{
+	if (ui->in_lpm) {
+		if (usb_lpm_config_gpio)
+			usb_lpm_config_gpio(0);
+		usb_vreg_enable(ui);
+		usb_clk_enable(ui);
+		usb_wakeup_phy(ui);
+		ui->in_lpm = 0;
+	}
+
+	usb_reset(ui);
+}
+
+static void usb_vbus_offline(struct usb_info *ui)
+{
+	unsigned long timeout;
+	unsigned val = 0;
+
+	if (ui->online != 0) {
+		ui->online = 0;
+		flush_all_endpoints(ui);
+		set_configuration(ui, 0);
+	}
+
+	/* reset h/w at cable disconnetion becasuse
+	 * of h/w bugs and to flush any resource that
+	 * h/w might be holding
+	 */
+	clk_enable(ui->clk);
+
+	if (readl(USB_PORTSC) & PORTSC_PHCD)
+		usb_wakeup_phy(ui);
+
+	if (ui->pdata->phy_reset)
+		ui->pdata->phy_reset(ui->addr);
+	else
+		msm_hsusb_phy_reset();
+	/* Give some delay to settle phy after reset */
+	msleep(100);
+
+	writel(USBCMD_RESET, USB_USBCMD);
+	timeout = jiffies + USB_LINK_RESET_TIMEOUT;
+	while (readl(USB_USBCMD) & USBCMD_RESET) {
+		if (time_after(jiffies, timeout)) {
+			dev_err(&ui->pdev->dev, "usb link reset timeout\n");
+			break;
+		}
+		msleep(1);
+	}
+
+	/* Disable VbusValid and SessionEnd comparators */
+	val = ULPI_VBUS_VALID | ULPI_SESS_END;
+
+	/* enable id interrupt only when transceiver is available */
+	if (ui->xceiv)
+		writel(readl(USB_OTGSC) | OTGSC_BSVIE | OTGSC_IDIE, USB_OTGSC);
+	else {
+		writel((readl(USB_OTGSC) | OTGSC_BSVIE) & ~OTGSC_IDPU,
+							USB_OTGSC);
+		ulpi_write(ui, ULPI_IDPU, ULPI_OTG_CTRL_CLR);
+		val |= ULPI_HOST_DISCONNECT | ULPI_ID_GND;
+	}
+	ulpi_write(ui, val, ULPI_INT_RISE_CLR);
+	ulpi_write(ui, val, ULPI_INT_FALL_CLR);
+
+	clk_disable(ui->clk);
+}
+
+static void usb_lpm_wakeup_phy(struct work_struct *w)
+{
+	struct usb_info *ui = the_usb_info;
+	unsigned long flags;
+
+	if (usb_wakeup_phy(ui)) {
+		pr_err("fatal error: cannot bring phy out of lpm\n");
+		pr_err("%s: resetting controller\n", __func__);
+
+		spin_lock_irqsave(&ui->lock, flags);
+		usb_disable_pullup(ui);
+		ui->flags = USB_FLAG_RESET;
+		queue_delayed_work(usb_work, &ui->work, 0);
+		enable_irq(ui->irq);
+		spin_unlock_irqrestore(&ui->lock, flags);
+		return;
+	}
+
+	ui->in_lpm = 0;
+	if (ui->xceiv)
+		ui->xceiv->set_suspend(ui->xceiv, 0);
+	enable_irq(ui->irq);
+}
+
+void usb_function_reenumerate(void)
+{
+	struct usb_info *ui = the_usb_info;
+
+	/* disable and re-enable the D+ pullup */
+	pr_info("hsusb: disable pullup\n");
+	usb_disable_pullup(ui);
+
+	msleep(10);
+
+	pr_info("hsusb: enable pullup\n");
+	usb_enable_pullup(ui);
+}
+
+#if defined(CONFIG_DEBUG_FS)
+static char debug_buffer[PAGE_SIZE];
+
+static ssize_t debug_read_status(struct file *file, char __user *ubuf,
+				 size_t count, loff_t *ppos)
+{
+	struct usb_info *ui = file->private_data;
+	char *buf = debug_buffer;
+	unsigned long flags;
+	struct usb_endpoint *ept;
+	struct msm_request *req;
+	int n;
+	int i = 0;
+
+	spin_lock_irqsave(&ui->lock, flags);
+
+	i += scnprintf(buf + i, PAGE_SIZE - i,
+		       "regs: setup=%08x prime=%08x stat=%08x done=%08x\n",
+		       readl(USB_ENDPTSETUPSTAT),
+		       readl(USB_ENDPTPRIME),
+		       readl(USB_ENDPTSTAT),
+		       readl(USB_ENDPTCOMPLETE));
+	i += scnprintf(buf + i, PAGE_SIZE - i,
+		       "regs:   cmd=%08x   sts=%08x intr=%08x port=%08x\n\n",
+		       readl(USB_USBCMD),
+		       readl(USB_USBSTS),
+		       readl(USB_USBINTR),
+		       readl(USB_PORTSC));
+
+
+	for (n = 0; n < 32; n++) {
+		ept = ui->ept + n;
+		if (ept->max_pkt == 0)
+			continue;
+
+		i += scnprintf(buf + i, PAGE_SIZE - i,
+			       "ept%d %s cfg=%08x active=%08x next=%08x info=%08x\n",
+			       ept->num, (ept->flags & EPT_FLAG_IN) ? "in " : "out",
+			       ept->head->config, ept->head->active,
+			       ept->head->next, ept->head->info);
+
+		for (req = ept->req; req; req = req->next)
+			i += scnprintf(buf + i, PAGE_SIZE - i,
+				       "  req @%08x next=%08x info=%08x page0=%08x %c %c\n",
+				       req->item_dma, req->item->next,
+				       req->item->info, req->item->page0,
+				       req->busy ? 'B' : ' ',
+				       req->live ? 'L' : ' '
+				);
+	}
+
+	i += scnprintf(buf + i, PAGE_SIZE - i,
+		       "phy failure count: %d\n", ui->phy_fail_count);
+
+	spin_unlock_irqrestore(&ui->lock, flags);
+
+	return simple_read_from_buffer(ubuf, count, ppos, buf, i);
+}
+
+
+static ssize_t debug_write_reset(struct file *file, const char __user *buf,
+				 size_t count, loff_t *ppos)
+{
+	struct usb_info *ui = file->private_data;
+	unsigned long flags;
+
+	spin_lock_irqsave(&ui->lock, flags);
+	ui->flags |= USB_FLAG_RESET;
+	queue_delayed_work(usb_work, &ui->work, 0);
+	spin_unlock_irqrestore(&ui->lock, flags);
+
+	return count;
+}
+
+
+static ssize_t debug_write_cycle(struct file *file, const char __user *buf,
+				 size_t count, loff_t *ppos)
+{
+	usb_function_reenumerate();
+	return count;
+}
+
+static int debug_open(struct inode *inode, struct file *file)
+{
+	file->private_data = inode->i_private;
+	return 0;
+}
+
+const struct file_operations debug_stat_ops = {
+	.open = debug_open,
+	.read = debug_read_status,
+};
+
+
+
+const struct file_operations debug_reset_ops = {
+	.open = debug_open,
+	.write = debug_write_reset,
+};
+
+const struct file_operations debug_cycle_ops = {
+	.open = debug_open,
+	.write = debug_write_cycle,
+};
+
+static struct dentry *debugfs_dent;
+static struct dentry *debugfs_status;
+static struct dentry *debugfs_reset;
+static struct dentry *debugfs_cycle;
+static void usb_debugfs_init(struct usb_info *ui)
+{
+	debugfs_dent = debugfs_create_dir("usb", 0);
+	if (IS_ERR(debugfs_dent))
+		return;
+
+	debugfs_status = debugfs_create_file("status", 0444,
+				debugfs_dent, ui, &debug_stat_ops);
+	debugfs_reset = debugfs_create_file("reset", 0222,
+				debugfs_dent, ui, &debug_reset_ops);
+	debugfs_cycle = debugfs_create_file("cycle", 0222,
+				debugfs_dent, ui, &debug_cycle_ops);
+}
+
+static void usb_debugfs_uninit(void)
+{
+	debugfs_remove(debugfs_status);
+	debugfs_remove(debugfs_reset);
+	debugfs_remove(debugfs_cycle);
+	debugfs_remove(debugfs_dent);
+}
+
+#else
+static void usb_debugfs_init(struct usb_info *ui) {}
+static void usb_debugfs_uninit(void) {}
+#endif
+
+static void usb_configure_device_descriptor(struct usb_info *ui)
+{
+	desc_device.idVendor = ui->pdata->vendor_id;
+	desc_device.idProduct = ui->composition->product_id;
+	desc_device.bcdDevice = ui->pdata->version;
+
+	if (ui->pdata->serial_number)
+		desc_device.iSerialNumber =
+			usb_msm_get_next_strdesc_id(ui->pdata->serial_number);
+	if (ui->pdata->product_name)
+		desc_device.iProduct =
+			usb_msm_get_next_strdesc_id(ui->pdata->product_name);
+	if (ui->pdata->manufacturer_name)
+		desc_device.iManufacturer =
+			usb_msm_get_next_strdesc_id(
+				ui->pdata->manufacturer_name);
+
+	/* Send Serial number to A9 for software download */
+	if (ui->pdata->serial_number) {
+		msm_hsusb_is_serial_num_null(FALSE);
+		msm_hsusb_send_serial_number(ui->pdata->serial_number);
+	} else
+		msm_hsusb_is_serial_num_null(TRUE);
+
+	msm_hsusb_send_productID(desc_device.idProduct);
+
+}
+static ssize_t msm_hsusb_store_func_enable(struct device *dev,
+					  struct device_attribute *attr,
+					  const char *buf, size_t size)
+{
+	char name[20];
+	int enable = 0;
+	int i;
+
+	for (i = 0; buf[i] != 0; i++) {
+		if (buf[i] == '=')
+			break;
+		name[i] = buf[i];
+	}
+	name[i++] = 0;
+	if (buf[i] == '0' || buf[i] == '1')
+		enable = buf[i] - '0';
+	else
+		return size;
+
+	pr_info("%s: name = %s, enable = %d\n", __func__, name, enable);
+	usb_function_enable(name, enable);
+	return size;
+}
+static ssize_t msm_hsusb_show_compswitch(struct device *dev,
+					 struct device_attribute *attr,
+					 char *buf)
+{
+	struct usb_info *ui = the_usb_info;
+	int i;
+
+	if (ui->composition)
+		i = scnprintf(buf, PAGE_SIZE,
+				"composition product id = %x\n",
+					ui->composition->product_id);
+	else
+		i = scnprintf(buf, PAGE_SIZE,
+				"composition product id = 0\n");
+	return i;
+}
+
+static ssize_t msm_hsusb_store_compswitch(struct device *dev,
+					  struct device_attribute *attr,
+					  const char *buf, size_t size)
+{
+	unsigned long pid;
+
+	if (!strict_strtoul(buf, 16, &pid)) {
+		pr_info("%s: Requested New Product id = %lx\n", __func__, pid);
+		usb_switch_composition((unsigned short)pid);
+	} else
+		pr_info("%s: strict_strtoul conversion failed\n", __func__);
+
+	return size;
+}
+static ssize_t msm_hsusb_store_autoresume(struct device *dev,
+					  struct device_attribute *attr,
+					  const char *buf, size_t size)
+{
+	usb_remote_wakeup();
+
+	return size;
+}
+
+static ssize_t msm_hsusb_show_state(struct device *dev,
+					 struct device_attribute *attr,
+					 char *buf)
+{
+	struct usb_info *ui = the_usb_info;
+	int i;
+	char *state[] = {"USB_STATE_NOTATTACHED", "USB_STATE_ATTACHED",
+			"USB_STATE_POWERED", "USB_STATE_UNAUTHENTICATED",
+			"USB_STATE_RECONNECTING", "USB_STATE_DEFAULT",
+			"USB_STATE_ADDRESS", "USB_STATE_CONFIGURED",
+			"USB_STATE_SUSPENDED"
+	};
+
+	i = scnprintf(buf, PAGE_SIZE, "%s\n", state[ui->usb_state]);
+	return i;
+}
+
+static ssize_t msm_hsusb_show_lpm(struct device *dev,
+					 struct device_attribute *attr,
+					 char *buf)
+{
+	struct usb_info *ui = the_usb_info;
+	int i;
+
+	i = scnprintf(buf, PAGE_SIZE, "%d\n", ui->in_lpm);
+	return i;
+}
+
+static ssize_t msm_hsusb_show_speed(struct device *dev,
+					 struct device_attribute *attr,
+					 char *buf)
+{
+	struct usb_info *ui = the_usb_info;
+	int i;
+	char *speed[] = {"USB_SPEED_UNKNOWN", "USB_SPEED_LOW",
+			"USB_SPEED_FULL", "USB_SPEED_HIGH"};
+
+	i = scnprintf(buf, PAGE_SIZE, "%s\n", speed[ui->speed]);
+	return i;
+}
+
+static DEVICE_ATTR(composition, 0664,
+		msm_hsusb_show_compswitch, msm_hsusb_store_compswitch);
+static DEVICE_ATTR(func_enable, S_IWUSR,
+		NULL, msm_hsusb_store_func_enable);
+static DEVICE_ATTR(autoresume, 0222,
+		NULL, msm_hsusb_store_autoresume);
+static DEVICE_ATTR(state, 0664, msm_hsusb_show_state, NULL);
+static DEVICE_ATTR(lpm, 0664, msm_hsusb_show_lpm, NULL);
+static DEVICE_ATTR(speed, 0664, msm_hsusb_show_speed, NULL);
+
+static struct attribute *msm_hsusb_attrs[] = {
+	&dev_attr_composition.attr,
+	&dev_attr_func_enable.attr,
+	&dev_attr_autoresume.attr,
+	&dev_attr_state.attr,
+	&dev_attr_lpm.attr,
+	&dev_attr_speed.attr,
+	NULL,
+};
+static struct attribute_group msm_hsusb_attr_grp = {
+	.attrs = msm_hsusb_attrs,
+};
+
+#define msm_hsusb_func_attr(function, index)				\
+static ssize_t  show_##function(struct device *dev,			\
+		struct device_attribute *attr, char *buf)		\
+{									\
+	struct usb_info *ui = the_usb_info;				\
+	struct usb_function_info *fi = ui->func[index];			\
+									\
+	return sprintf(buf, "%d", fi->enabled);				\
+									\
+}									\
+									\
+static DEVICE_ATTR(function, S_IRUGO, show_##function, NULL);
+
+msm_hsusb_func_attr(diag, 0);
+msm_hsusb_func_attr(adb, 1);
+msm_hsusb_func_attr(modem, 2);
+msm_hsusb_func_attr(nmea, 3);
+msm_hsusb_func_attr(mass_storage, 4);
+msm_hsusb_func_attr(ethernet, 5);
+msm_hsusb_func_attr(rmnet, 6);
+
+static struct attribute *msm_hsusb_func_attrs[] = {
+	&dev_attr_diag.attr,
+	&dev_attr_adb.attr,
+	&dev_attr_modem.attr,
+	&dev_attr_nmea.attr,
+	&dev_attr_mass_storage.attr,
+	&dev_attr_ethernet.attr,
+	&dev_attr_rmnet.attr,
+	NULL,
+};
+
+static struct attribute_group msm_hsusb_func_attr_grp = {
+	.name  = "functions",
+	.attrs = msm_hsusb_func_attrs,
+};
+
+static int __init usb_probe(struct platform_device *pdev)
+{
+	struct resource *res;
+	struct usb_info *ui;
+	int irq;
+	int ulpi_irq1 = 0;
+	int ulpi_irq2 = 0;
+	int i;
+	int ret = 0;
+
+	if (!pdev || !pdev->dev.platform_data) {
+		pr_err("%s:pdev or platform data is null\n", __func__);
+		return -ENODEV;
+	}
+
+	irq = platform_get_irq(pdev, 0);
+	if (irq < 0) {
+		pr_err("%s: failed to get irq num from platform_get_irq\n",
+				__func__);
+		return -ENODEV;
+	}
+
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!res) {
+		pr_err("%s: failed to get mem resource\n", __func__);
+		return -ENODEV;
+	}
+
+	ret = sysfs_create_group(&pdev->dev.kobj, &msm_hsusb_attr_grp);
+	if (ret) {
+		pr_err("%s: unable to create sysfs group\n", __func__);
+		return ret;
+	}
+
+	usb_work = create_singlethread_workqueue("usb_work");
+	if (!usb_work) {
+		pr_err("%s: unable to create work queue\n", __func__);
+		ret = -ENOMEM;
+		goto free_sysfs_grp;
+	}
+
+	ui = kzalloc(sizeof(struct usb_info), GFP_KERNEL);
+	if (!ui) {
+		pr_err("%s: unable to allocate memory for ui\n", __func__);
+		ret = -ENOMEM;
+		goto free_workqueue;
+	}
+
+	ui->pdev = pdev;
+	ui->pdata = pdev->dev.platform_data;
+
+	for (i = 0; i < ui->pdata->num_compositions; i++)
+		if (ui->pdata->compositions[i].product_id == pid) {
+			ui->composition = &ui->pdata->compositions[i];
+			break;
+		}
+	if (!ui->composition) {
+		pr_err("%s: unable to find the composition with pid:(%d)\n",
+				__func__, pid);
+		ret = -ENODEV;
+		goto free_ui;
+	}
+
+	ui->phy_info = ui->pdata->phy_info;
+	if (ui->phy_info == USB_PHY_UNDEFINED) {
+		pr_err("undefined phy_info: (%d)\n", ui->phy_info);
+		ret = -ENOMEM;
+		goto free_ui;
+	}
+
+	/* zero is reserved for language id */
+	ui->strdesc_index = 1;
+	ui->strdesc = kzalloc(sizeof(char *) * MAX_STRDESC_NUM, GFP_KERNEL);
+	if (!ui->strdesc) {
+		pr_err("%s: unable allocate mem for string descriptors\n",
+				__func__);
+		ret = -ENOMEM;
+		goto free_ui;
+	}
+
+	ui->num_funcs = ui->pdata->num_functions;
+	ui->func = kzalloc(sizeof(struct usb_function *) * ui->num_funcs,
+				GFP_KERNEL);
+	if (!ui->func) {
+		pr_err("%s: unable allocate mem for functions\n", __func__);
+		ret = -ENOMEM;
+		goto free_str_desc;
+	}
+
+	ret = sysfs_create_group(&pdev->dev.kobj, &msm_hsusb_func_attr_grp);
+	if (ret) {
+		pr_err("%s: unable to create functions sysfs group\n",
+				__func__);
+		goto free_func;
+	}
+
+	ui->addr = ioremap(res->start, resource_size(res));
+	if (!ui->addr) {
+		pr_err("%s: unable ioremap\n", __func__);
+		ret = -ENOMEM;
+		goto free_func_sysfs_grp;
+	}
+
+	ui->buf = dma_alloc_coherent(&pdev->dev, 4096, &ui->dma, GFP_KERNEL);
+	if (!ui->buf) {
+		pr_err("%s: failed allocate dma coherent memory\n", __func__);
+		ret = -ENOMEM;
+		goto free_iounmap;
+	}
+
+	ui->pool = dma_pool_create("hsusb", NULL, 32, 32, 0);
+	if (!ui->pool) {
+		pr_err("%s: unable to allocate dma pool\n", __func__);
+		ret = -ENOMEM;
+		goto free_dma_coherent;
+	}
+
+	ui->clk = clk_get(&pdev->dev, "usb_hs_clk");
+	if (IS_ERR(ui->clk)) {
+		pr_err("%s: unable get usb_hs_clk\n", __func__);
+		ret = PTR_ERR(ui->clk);
+		goto free_dma_pool;
+	}
+
+	ui->pclk = clk_get(&pdev->dev, "usb_hs_pclk");
+	if (IS_ERR(ui->pclk)) {
+		pr_err("%s: unable get usb_hs_pclk\n", __func__);
+		ret = PTR_ERR(ui->pclk);
+		goto free_hs_clk;
+	}
+
+	if (ui->pdata->core_clk) {
+		ui->cclk = clk_get(&pdev->dev, "usb_hs_core_clk");
+		if (IS_ERR(ui->cclk)) {
+			pr_err("%s: unable get usb_hs_core_clk\n", __func__);
+			ret = PTR_ERR(ui->cclk);
+			goto free_hs_pclk;
+		}
+	}
+
+	if (ui->pdata->vreg5v_required) {
+		ui->vreg = vreg_get(NULL, "boost");
+		if (IS_ERR(ui->vreg)) {
+			pr_err("%s: vreg get failed\n", __func__);
+			ui->vreg = NULL;
+			ret = PTR_ERR(ui->vreg);
+			goto free_hs_cclk;
+		}
+	}
+
+	/* disable interrupts before requesting irq */
+	usb_clk_enable(ui);
+	writel(0, USB_USBINTR);
+	writel(readl(USB_OTGSC) & ~OTGSC_INTR_MASK, USB_OTGSC);
+	usb_clk_disable(ui);
+
+	ret = request_irq(irq, usb_interrupt, IRQF_SHARED, pdev->name, ui);
+	if (ret) {
+		pr_err("%s: request_irq failed\n", __func__);
+		goto free_vreg5v;
+	}
+	ui->irq = irq;
+
+	if (ui->pdata->config_gpio) {
+		usb_lpm_config_gpio = ui->pdata->config_gpio;
+
+		ulpi_irq1 = platform_get_irq_byname(pdev, "vbus_interrupt");
+		if (ulpi_irq1 < 0) {
+			pr_err("%s: failed to get vbus gpio interrupt\n",
+					__func__);
+			return -ENODEV;
+		}
+
+		ulpi_irq2 = platform_get_irq_byname(pdev, "id_interrupt");
+		if (ulpi_irq2 < 0) {
+			pr_err("%s: failed to get id gpio interrupt\n",
+					__func__);
+			return -ENODEV;
+		}
+
+		ret = request_irq(ulpi_irq1,
+				&usb_lpm_gpio_isr,
+				IRQF_TRIGGER_HIGH,
+				"vbus_interrupt", NULL);
+		if (ret) {
+			pr_err("%s: failed to request vbus interrupt:(%d)\n",
+					__func__, ulpi_irq1);
+			goto free_irq;
+		}
+
+		ret = request_irq(ulpi_irq2,
+				&usb_lpm_gpio_isr,
+				IRQF_TRIGGER_RISING,
+				"usb_ulpi_data3", NULL);
+		if (ret) {
+			pr_err("%s: failed to request irq ulpi_data_3:(%d)\n",
+							__func__, ulpi_irq2);
+			goto free_ulpi_irq1;
+		}
+
+		ui->gpio_irq[0] = ulpi_irq1;
+		ui->gpio_irq[1] = ulpi_irq2;
+	}
+
+	ui->sdev.name = DRIVER_NAME;
+	ui->sdev.print_name = print_switch_name;
+	ui->sdev.print_state = print_switch_state;
+
+	ret = switch_dev_register(&ui->sdev);
+	if (ret < 0) {
+		pr_err("%s(): switch_dev_register failed ret = %d\n",
+				__func__, ret);
+		goto free_ulpi_irq2;
+	}
+
+	the_usb_info = ui;
+	ui->functions_map = ui->pdata->function_map;
+	ui->selfpowered = 0;
+	ui->remote_wakeup = 0;
+	ui->maxpower = 0xFA;
+	ui->chg_type = USB_CHG_TYPE__INVALID;
+	/* to allow swfi latency, driver latency
+	 * must be above listed swfi latency
+	 */
+	ui->pdata->swfi_latency += 1;
+
+	spin_lock_init(&ui->lock);
+	msm_hsusb_suspend_locks_init(ui, 1);
+	enable_irq_wake(irq);
+
+	/* memory barrier initialization in non-interrupt context */
+	dmb();
+
+	usb_debugfs_init(ui);
+	usb_prepare(ui);
+
+	pr_info("%s: io=%p, irq=%d, dma=%p(%x)\n",
+			__func__, ui->addr, ui->irq, ui->buf, ui->dma);
+	return 0;
+
+free_ulpi_irq2:
+	free_irq(ulpi_irq2, NULL);
+free_ulpi_irq1:
+	free_irq(ulpi_irq1, NULL);
+free_irq:
+	free_irq(ui->irq, ui);
+free_vreg5v:
+	if (ui->pdata->vreg5v_required)
+		vreg_put(ui->vreg);
+free_hs_cclk:
+	clk_put(ui->cclk);
+free_hs_pclk:
+	clk_put(ui->pclk);
+free_hs_clk:
+	clk_put(ui->clk);
+free_dma_pool:
+	dma_pool_destroy(ui->pool);
+free_dma_coherent:
+	dma_free_coherent(&pdev->dev, 4096, ui->buf, ui->dma);
+free_iounmap:
+	iounmap(ui->addr);
+free_func_sysfs_grp:
+	sysfs_remove_group(&pdev->dev.kobj, &msm_hsusb_func_attr_grp);
+free_func:
+	kfree(ui->func);
+free_str_desc:
+	kfree(ui->strdesc);
+free_ui:
+	kfree(ui);
+free_workqueue:
+	destroy_workqueue(usb_work);
+free_sysfs_grp:
+	sysfs_remove_group(&pdev->dev.kobj, &msm_hsusb_attr_grp);
+
+	return ret;
+}
+
+#ifdef CONFIG_PM
+static int usb_platform_suspend(struct platform_device *pdev,
+		pm_message_t state)
+{
+	struct usb_info *ui = the_usb_info;
+	unsigned long flags;
+	int ret = 0;
+
+	spin_lock_irqsave(&ui->lock, flags);
+
+	if (!ui->active) {
+		spin_unlock_irqrestore(&ui->lock, flags);
+		pr_info("%s: peripheral mode is not active"
+				"nothing to be done\n", __func__);
+		return 0;
+	}
+
+	if (ui->in_lpm) {
+		spin_unlock_irqrestore(&ui->lock, flags);
+		pr_info("%s: we are already in lpm, nothing to be done\n",
+					__func__);
+		return 0;
+	}
+	spin_unlock_irqrestore(&ui->lock, flags);
+
+	ret = usb_lpm_enter(ui);
+	if (ret)
+		pr_err("%s: failed to enter lpm\n", __func__);
+
+	return ret;
+}
+#endif
+
+static struct platform_driver usb_driver = {
+	.probe = usb_probe,
+#ifdef CONFIG_PM
+	.suspend = usb_platform_suspend,
+#endif
+	.driver = { .name = DRIVER_NAME, },
+};
+
+static int __init usb_module_init(void)
+{
+	/* rpc connect for phy_reset */
+	msm_hsusb_rpc_connect();
+	/* rpc connect for charging */
+	msm_chg_rpc_connect();
+
+	return platform_driver_register(&usb_driver);
+}
+
+static void free_usb_info(void)
+{
+	struct usb_info *ui = the_usb_info;
+	unsigned long flags;
+	int i;
+	if (ui) {
+		INIT_LIST_HEAD(&usb_function_list);
+
+		for (i = 0; i < ui->num_funcs; i++)
+			kfree(ui->func[i]);
+		ui->num_funcs = 0;
+		usb_uninit(ui);
+		kfree(ui->strdesc);
+		usb_ept_free_req(&ui->ep0in, ui->setup_req);
+		if (ui->ept[0].ui == ui)
+			flush_all_endpoints(ui);
+		spin_lock_irqsave(&ui->lock, flags);
+		usb_clk_disable(ui);
+		usb_vreg_disable(ui);
+		spin_unlock_irqrestore(&ui->lock, flags);
+		usb_free(ui, 0);
+		the_usb_info = NULL;
+	}
+}
+static void usb_exit(void)
+{
+	struct usb_info *ui = the_usb_info;
+	/* free the dev state structure */
+	if (!ui)
+		return;
+
+	if (ui->xceiv) {
+		ui->xceiv->set_peripheral(ui->xceiv, NULL);
+		msm_otg_put_transceiver(ui->xceiv);
+	}
+
+	cancel_work_sync(&ui->li.wakeup_phy);
+
+	destroy_workqueue(usb_work);
+	/* free the usb_info structure */
+	free_usb_info();
+	switch_dev_unregister(&ui->sdev);
+	sysfs_remove_group(&ui->pdev->dev.kobj, &msm_hsusb_func_attr_grp);
+	sysfs_remove_group(&ui->pdev->dev.kobj, &msm_hsusb_attr_grp);
+	usb_debugfs_uninit();
+	platform_driver_unregister(&usb_driver);
+	msm_hsusb_rpc_close();
+	msm_chg_rpc_close();
+	msm_pm_app_unregister_vbus_sn(&msm_hsusb_set_vbus_state);
+	msm_pm_app_rpc_deinit();
+}
+
+static void __exit usb_module_exit(void)
+{
+	usb_exit();
+}
+
+module_param(pid, int, 0);
+MODULE_PARM_DESC(pid, "Product ID of the desired composition");
+
+module_init(usb_module_init);
+module_exit(usb_module_exit);
+
+static void copy_string_descriptor(char *string, char *buffer)
+{
+	int length, i;
+
+	if (string) {
+		length = strlen(string);
+		buffer[0] = 2 * length + 2;
+		buffer[1] = USB_DT_STRING;
+		for (i = 0; i < length; i++) {
+			buffer[2 * i + 2] = string[i];
+			buffer[2 * i + 3] = 0;
+		}
+	}
+}
+static int get_qualifier_descriptor(struct usb_qualifier_descriptor *dq)
+{
+	struct usb_qualifier_descriptor *dev_qualifier = dq;
+	dev_qualifier->bLength = sizeof(struct usb_qualifier_descriptor),
+	dev_qualifier->bDescriptorType = USB_DT_DEVICE_QUALIFIER,
+	dev_qualifier->bcdUSB =  __constant_cpu_to_le16(0x0200),
+	dev_qualifier->bDeviceClass = USB_CLASS_PER_INTERFACE,
+	dev_qualifier->bDeviceSubClass = 0;
+	dev_qualifier->bDeviceProtocol = 0;
+	dev_qualifier->bMaxPacketSize0 = 64;
+	dev_qualifier->bNumConfigurations = 1;
+	dev_qualifier->bRESERVED = 0;
+	return sizeof(struct usb_qualifier_descriptor);
+}
+
+static int usb_fill_descriptors(void *ptr,
+		struct usb_descriptor_header **descriptors)
+{
+	unsigned char *buf = ptr;
+	struct usb_descriptor_header *item = descriptors[0];
+	unsigned cnt = 0;
+
+	while (NULL != item) {
+		unsigned len = item->bLength;
+		memcpy(buf, item, len);
+		buf += len;
+		cnt++;
+		item = descriptors[cnt];
+	}
+
+	return buf-(u8 *)ptr;
+}
+
+static int usb_find_descriptor(struct usb_info *ui, struct usb_ctrlrequest *ctl,
+				struct usb_request *req)
+{
+	int i;
+	unsigned short id = ctl->wValue;
+	unsigned short type = id >> 8;
+	id &= 0xff;
+
+	if ((type == USB_DT_DEVICE) && (id == 0)) {
+		req->length = sizeof(desc_device);
+		if (usb_msm_is_iad()) {
+			desc_device.bDeviceClass = 0xEF;
+			desc_device.bDeviceSubClass = 0x02;
+			desc_device.bDeviceProtocol = 0x01;
+		}
+		memcpy(req->buf, &desc_device, req->length);
+		return 0;
+	}
+	if ((type == USB_DT_DEVICE_QUALIFIER) && (id == 0)) {
+		struct usb_qualifier_descriptor dq;
+		req->length = get_qualifier_descriptor(&dq);
+		if (usb_msm_is_iad()) {
+			dq.bDeviceClass = 0xEF;
+			dq.bDeviceSubClass = 0x02;
+			dq.bDeviceProtocol = 0x01;
+		}
+		memcpy(req->buf, &dq, req->length);
+		return 0;
+	}
+
+	if ((type == USB_DT_OTHER_SPEED_CONFIG) && (id == 0))
+		goto get_config;
+
+	if ((type == USB_DT_CONFIG) && (id == 0)) {
+		struct usb_config_descriptor cfg;
+		unsigned ifc_count = 0;
+		char *ptr, *start;
+get_config:
+		ifc_count = 0;
+		start = req->buf;
+		ptr = start + USB_DT_CONFIG_SIZE;
+		ifc_count = ui->next_ifc_num;
+
+		for (i = 0; i < ui->num_funcs; i++) {
+			struct usb_function_info *fi = ui->func[i];
+			struct usb_descriptor_header **dh = NULL;
+
+			if (!fi || !(ui->composition->functions & (1 << i)))
+				continue;
+			switch (ui->speed) {
+			case USB_SPEED_HIGH:
+				if (type == USB_DT_OTHER_SPEED_CONFIG)
+					dh = fi->func->fs_descriptors;
+				else
+					dh = fi->func->hs_descriptors;
+				break;
+
+			case USB_SPEED_FULL:
+				if (type == USB_DT_OTHER_SPEED_CONFIG)
+					dh = fi->func->hs_descriptors;
+				else
+					dh = fi->func->fs_descriptors;
+				break;
+
+			default:
+				printk(KERN_ERR "Unsupported speed(%x)\n",
+						ui->speed);
+				return -1;
+			}
+			ptr += usb_fill_descriptors(ptr, dh);
+		}
+
+#define	USB_REMOTE_WAKEUP_SUPPORT	1
+		cfg.bLength = USB_DT_CONFIG_SIZE;
+		if (type == USB_DT_OTHER_SPEED_CONFIG)
+			cfg.bDescriptorType =  USB_DT_OTHER_SPEED_CONFIG;
+		else
+			cfg.bDescriptorType = USB_DT_CONFIG;
+		cfg.wTotalLength = ptr - start;
+		cfg.bNumInterfaces = ifc_count;
+		cfg.bConfigurationValue = 1;
+		cfg.iConfiguration = 0;
+		cfg.bmAttributes = USB_CONFIG_ATT_ONE |
+			ui->selfpowered << USB_CONFIG_ATT_SELFPOWER_POS |
+			USB_REMOTE_WAKEUP_SUPPORT << USB_CONFIG_ATT_WAKEUP_POS;
+		cfg.bMaxPower = ui->maxpower;
+
+		memcpy(start, &cfg, USB_DT_CONFIG_SIZE);
+
+		req->length = ptr - start;
+		return 0;
+	}
+
+	if (type == USB_DT_STRING) {
+		char *buffer = req->buf;
+
+		buffer[0] = 0;
+		if (id > ui->strdesc_index)
+			return -1;
+		 if (id == STRING_LANGUAGE_ID)
+			memcpy(buffer, str_lang_desc, str_lang_desc[0]);
+		 else
+			copy_string_descriptor(ui->strdesc[id], buffer);
+
+		if (buffer[0]) {
+			req->length = buffer[0];
+			return 0;
+		} else
+			return -1;
+	}
+	return -1;
+}
+
+/*****Gadget Framework Functions***/
+struct device *usb_get_device(void)
+{
+	if (the_usb_info) {
+		if (the_usb_info->pdev)
+			return &(the_usb_info->pdev->dev);
+	}
+	return NULL;
+}
+EXPORT_SYMBOL(usb_get_device);
+
+int usb_ept_cancel_xfer(struct usb_endpoint *ept, struct usb_request *_req)
+{
+	struct usb_info 	*ui = the_usb_info;
+	struct msm_request      *req = to_msm_request(_req);
+	struct msm_request 	*temp_req, *prev_req;
+	unsigned long		flags;
+
+	if (!(ui && req && ept->req))
+		return -EINVAL;
+
+	spin_lock_irqsave(&ui->lock, flags);
+	if (req->busy) {
+		req->req.status = 0;
+		req->busy = 0;
+
+		/* See if the request is the first request in the ept queue */
+		if (ept->req == req) {
+			/* Stop the transfer */
+			do {
+				writel((1 << ept->bit), USB_ENDPTFLUSH);
+				while (readl(USB_ENDPTFLUSH) & (1 << ept->bit))
+					udelay(100);
+			} while (readl(USB_ENDPTSTAT) & (1 << ept->bit));
+			if (!req->next)
+				ept->last = NULL;
+			ept->req = req->next;
+			ept->head->next = req->item->next;
+			goto cancel_req;
+		}
+		/* Request could be in the middle of ept queue */
+		prev_req = temp_req = ept->req;
+		do {
+			if (req == temp_req) {
+				if (req->live) {
+					/* Stop the transfer */
+					do {
+						writel((1 << ept->bit),
+							USB_ENDPTFLUSH);
+						while (readl(USB_ENDPTFLUSH) &
+							(1 << ept->bit))
+							udelay(100);
+					} while (readl(USB_ENDPTSTAT) &
+						(1 << ept->bit));
+				}
+				prev_req->next = temp_req->next;
+				prev_req->item->next = temp_req->item->next;
+				if (!req->next)
+					ept->last = prev_req;
+				goto cancel_req;
+			}
+			prev_req = temp_req;
+			temp_req = temp_req->next;
+		} while (temp_req != NULL);
+		goto error;
+cancel_req:
+	if (req->live) {
+		/* prepare the transaction descriptor item for the hardware */
+		req->item->next = TERMINATE;
+		req->item->info = 0;
+		req->live = 0;
+		dma_unmap_single(NULL, req->dma, req->req.length,
+				(ept->flags & EPT_FLAG_IN) ?
+				DMA_TO_DEVICE : DMA_FROM_DEVICE);
+		/* Reprime the endpoint for the remaining transfers */
+		if (ept->req) {
+			temp_req = ept->req;
+			while (temp_req != NULL) {
+				temp_req->live = 0;
+				temp_req = temp_req->next;
+			}
+			usb_ept_start(ept);
+		}
+	} else
+		dma_unmap_single(NULL, req->dma, req->req.length,
+				(ept->flags & EPT_FLAG_IN) ?
+				DMA_TO_DEVICE : DMA_FROM_DEVICE);
+	spin_unlock_irqrestore(&ui->lock, flags);
+	return 0;
+	}
+error:
+	spin_unlock_irqrestore(&ui->lock, flags);
+	return -EINVAL;
+}
+EXPORT_SYMBOL(usb_ept_cancel_xfer);
+
+int usb_ept_set_halt(struct usb_endpoint *ept)
+{
+	struct usb_info *ui = ept->ui;
+	int in = ept->flags & EPT_FLAG_IN;
+	unsigned n;
+
+	if (ui->in_lpm) {
+		pr_err("%s: controller is in lpm, cannot proceed\n", __func__);
+		return -1;
+	}
+
+	ept->ept_halted = 1;
+
+	n = readl(USB_ENDPTCTRL(ept->num));
+
+	if (in)
+		n |= CTRL_TXS;
+	else
+		n |= CTRL_RXS;
+
+	writel(n, USB_ENDPTCTRL(ept->num));
+
+	return 0;
+}
+EXPORT_SYMBOL(usb_ept_set_halt);
+
+int usb_ept_clear_halt(struct usb_endpoint *ept)
+{
+	struct usb_info *ui = ept->ui;
+	int in = ept->flags & EPT_FLAG_IN;
+	unsigned n;
+
+	if (ui->in_lpm) {
+		pr_err("%s: controller is in lpm, cannot proceed\n", __func__);
+		return -1;
+	}
+
+	if (ept->ept_halted)
+		ept->ept_halted = 0;
+
+	n = readl(USB_ENDPTCTRL(ept->num));
+
+	/*clear stall bit and set data toggle bit*/
+	if (in) {
+		n &= (~CTRL_TXS);
+		n |= (CTRL_TXR);
+	} else {
+		n &= ~(CTRL_RXS);
+		n |= (CTRL_RXR);
+	}
+
+	writel(n, USB_ENDPTCTRL(ept->num));
+
+	return 0;
+}
+EXPORT_SYMBOL(usb_ept_clear_halt);
+
+int usb_ept_is_stalled(struct usb_endpoint *ept)
+{
+	struct usb_info *ui = ept->ui;
+	int in = ept->flags & EPT_FLAG_IN;
+	unsigned n;
+
+	n = readl(USB_ENDPTCTRL(ept->num));
+
+	if (in && (n & CTRL_TXS))
+		return 1;
+	else if (n & CTRL_RXS)
+		return 1;
+	return 0;
+}
+
+void usb_ept_fifo_flush(struct usb_endpoint *ept)
+{
+	flush_endpoint(ept);
+}
+EXPORT_SYMBOL(usb_ept_fifo_flush);
+
+struct usb_function *usb_ept_get_function(struct usb_endpoint *ept)
+{
+	return NULL;
+}
+EXPORT_SYMBOL(usb_ept_get_function);
+
+
+void usb_free_endpoint_all_req(struct usb_endpoint *ep)
+{
+	struct msm_request *temp;
+	struct msm_request *req;
+	if (!ep)
+		return;
+	req = ep->req;
+	while (req) {
+		temp = req->next;
+		req->busy = 0;
+		if (&req->req)
+			usb_ept_free_req(ep, &req->req);
+		req = temp;
+	}
+}
+EXPORT_SYMBOL(usb_free_endpoint_all_req);
+
+int usb_function_unregister(struct usb_function *func)
+{
+	struct usb_info *ui = the_usb_info;
+	int i;
+	struct usb_function_info *fi;
+	unsigned long flags;
+
+	if (!func)
+		return -EINVAL;
+
+	fi = usb_find_function(func->name);
+	if (!fi)
+		return -EINVAL;
+
+	if (ui->running) {
+		disable_irq(ui->irq);
+		spin_lock_irqsave(&ui->lock, flags);
+		ui->running = 0;
+		ui->online = 0;
+		ui->bound = 0;
+		spin_unlock_irqrestore(&ui->lock, flags);
+		usb_uninit(ui);
+		/* we should come out of lpm to access registers */
+		if (ui->in_lpm) {
+			if (PHY_TYPE(ui->phy_info) == USB_PHY_EXTERNAL) {
+				disable_irq(ui->gpio_irq[0]);
+				disable_irq(ui->gpio_irq[1]);
+			}
+			usb_lpm_exit(ui);
+			if (cancel_work_sync(&ui->li.wakeup_phy))
+				usb_lpm_wakeup_phy(NULL);
+			ui->in_lpm = 0;
+		}
+		/* disable usb and session valid interrupts */
+		writel(0, USB_USBINTR);
+		writel(readl(USB_OTGSC) & ~OTGSC_BSVIE, USB_OTGSC);
+
+		/* stop the controller */
+		usb_disable_pullup(ui);
+		msleep(100);
+		enable_irq(ui->irq);
+	}
+
+	pr_info("%s: func->name = %s\n", __func__, func->name);
+
+	ui->composition = NULL;
+
+	if (func->configure)
+		func->configure(0, func->context);
+	if (func->unbind)
+		func->unbind(func->context);
+
+	list_del(&fi->list);
+	for (i = 0; i < ui->num_funcs; i++)
+		if (fi == ui->func[i])
+			ui->func[i] = NULL;
+	kfree(fi);
+	return 0;
+}
+EXPORT_SYMBOL(usb_function_unregister);
+
+MODULE_LICENSE("GPL");
+
diff --git a/drivers/usb/function/msm_hsusb_hw.h b/drivers/usb/function/msm_hsusb_hw.h
new file mode 100644
index 0000000..c016c3f
--- /dev/null
+++ b/drivers/usb/function/msm_hsusb_hw.h
@@ -0,0 +1,163 @@
+/* drivers/usb/function/msm_hsusb_hw.h
+ *
+ * Copyright (C) 2007 Google, Inc.
+ * Author: Brian Swetland <swetland@google.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _USB_FUNCTION_MSM_HSUSB_HW_H
+#define _USB_FUNCTION_MSM_HSUSB_HW_H
+
+#define USB_ID               (MSM_USB_BASE + 0x0000)
+#define USB_HWGENERAL        (MSM_USB_BASE + 0x0004)
+#define USB_HWHOST           (MSM_USB_BASE + 0x0008)
+#define USB_HWDEVICE         (MSM_USB_BASE + 0x000C)
+#define USB_HWTXBUF          (MSM_USB_BASE + 0x0010)
+#define USB_HWRXBUF          (MSM_USB_BASE + 0x0014)
+#define USB_SBUSCFG          (MSM_USB_BASE + 0x0090)
+
+#define USB_CAPLENGTH        (MSM_USB_BASE + 0x0100) /* 8 bit */
+#define USB_HCIVERSION       (MSM_USB_BASE + 0x0102) /* 16 bit */
+#define USB_HCSPARAMS        (MSM_USB_BASE + 0x0104)
+#define USB_HCCPARAMS        (MSM_USB_BASE + 0x0108)
+#define USB_DCIVERSION       (MSM_USB_BASE + 0x0120) /* 16 bit */
+#define USB_USBCMD           (MSM_USB_BASE + 0x0140)
+#define USB_USBSTS           (MSM_USB_BASE + 0x0144)
+#define USB_USBINTR          (MSM_USB_BASE + 0x0148)
+#define USB_FRINDEX          (MSM_USB_BASE + 0x014C)
+#define USB_DEVICEADDR       (MSM_USB_BASE + 0x0154)
+#define USB_ENDPOINTLISTADDR (MSM_USB_BASE + 0x0158)
+#define USB_BURSTSIZE        (MSM_USB_BASE + 0x0160)
+#define USB_TXFILLTUNING     (MSM_USB_BASE + 0x0164)
+#define USB_ULPI_VIEWPORT    (MSM_USB_BASE + 0x0170)
+#define USB_ENDPTNAK         (MSM_USB_BASE + 0x0178)
+#define USB_ENDPTNAKEN       (MSM_USB_BASE + 0x017C)
+#define USB_PORTSC           (MSM_USB_BASE + 0x0184)
+#define USB_OTGSC            (MSM_USB_BASE + 0x01A4)
+#define USB_USBMODE          (MSM_USB_BASE + 0x01A8)
+#define USB_ENDPTSETUPSTAT   (MSM_USB_BASE + 0x01AC)
+#define USB_ENDPTPRIME       (MSM_USB_BASE + 0x01B0)
+#define USB_ENDPTFLUSH       (MSM_USB_BASE + 0x01B4)
+#define USB_ENDPTSTAT        (MSM_USB_BASE + 0x01B8)
+#define USB_ENDPTCOMPLETE    (MSM_USB_BASE + 0x01BC)
+#define USB_ENDPTCTRL(n)     (MSM_USB_BASE + 0x01C0 + (4 * (n)))
+
+
+#define USBCMD_RESET   2
+#define USBCMD_ATTACH  1
+#define USBCMD_ATDTW   (1 << 14)
+
+#define USBMODE_DEVICE 2
+#define USBMODE_HOST   3
+
+struct ept_queue_head
+{
+    unsigned config;
+    unsigned active; /* read-only */
+
+    unsigned next;
+    unsigned info;
+    unsigned page0;
+    unsigned page1;
+    unsigned page2;
+    unsigned page3;
+    unsigned page4;
+    unsigned reserved_0;
+
+    unsigned char setup_data[8];
+
+    unsigned reserved_1;
+    unsigned reserved_2;
+    unsigned reserved_3;
+    unsigned reserved_4;
+};
+
+#define CONFIG_MAX_PKT(n)     ((n) << 16)
+#define CONFIG_ZLT            (1 << 29)    /* stop on zero-len xfer */
+#define CONFIG_IOS            (1 << 15)    /* IRQ on setup */
+
+struct ept_queue_item
+{
+    unsigned next;
+    unsigned info;
+    unsigned page0;
+    unsigned page1;
+    unsigned page2;
+    unsigned page3;
+    unsigned page4;
+    unsigned reserved;
+};
+
+#define TERMINATE 1
+
+#define INFO_BYTES(n)         ((n) << 16)
+#define INFO_IOC              (1 << 15)
+#define INFO_ACTIVE           (1 << 7)
+#define INFO_HALTED           (1 << 6)
+#define INFO_BUFFER_ERROR     (1 << 5)
+#define INFO_TXN_ERROR        (1 << 3)
+
+
+#define STS_NAKI              (1 << 16)  /* */
+#define STS_SLI               (1 << 8)   /* R/WC - suspend state entered */
+#define STS_SRI               (1 << 7)   /* R/WC - SOF recv'd */
+#define STS_URI               (1 << 6)   /* R/WC - RESET recv'd - write to clear */
+#define STS_FRI               (1 << 3)   /* R/WC - Frame List Rollover */
+#define STS_PCI               (1 << 2)   /* R/WC - Port Change Detect */
+#define STS_UEI               (1 << 1)   /* R/WC - USB Error */
+#define STS_UI                (1 << 0)   /* R/WC - USB Transaction Complete */
+
+
+/* bits used in all the endpoint status registers */
+#define EPT_TX(n) (1 << ((n) + 16))
+#define EPT_RX(n) (1 << (n))
+
+
+#define CTRL_TXE              (1 << 23)
+#define CTRL_TXR              (1 << 22)
+#define CTRL_TXI              (1 << 21)
+#define CTRL_TXD              (1 << 17)
+#define CTRL_TXS              (1 << 16)
+#define CTRL_RXE              (1 << 7)
+#define CTRL_RXR              (1 << 6)
+#define CTRL_RXI              (1 << 5)
+#define CTRL_RXD              (1 << 1)
+#define CTRL_RXS              (1 << 0)
+
+#define CTRL_TXT_MASK         (3 << 18)
+#define CTRL_TXT_CTRL         (0 << 18)
+#define CTRL_TXT_ISOCH        (1 << 18)
+#define CTRL_TXT_BULK         (2 << 18)
+#define CTRL_TXT_INT          (3 << 18)
+
+#define CTRL_RXT_MASK         (3 << 2)
+#define CTRL_RXT_CTRL         (0 << 2)
+#define CTRL_RXT_ISOCH        (1 << 2)
+#define CTRL_RXT_BULK         (2 << 2)
+#define CTRL_RXT_INT          (3 << 2)
+
+#define ULPI_WAKEUP           (1 << 31)
+#define ULPI_RUN              (1 << 30)
+#define ULPI_WRITE            (1 << 29)
+#define ULPI_READ             (0 << 29)
+#define ULPI_STATE_NORMAL     (1 << 27)
+#define ULPI_ADDR(n)          (((n) & 255) << 16)
+#define ULPI_DATA(n)          ((n) & 255)
+#define ULPI_DATA_READ(n)     (((n) >> 8) & 255)
+
+/* USB_PORTSC bits for determining port speed */
+#define PORTSC_PSPD_FS        (0 << 26)
+#define PORTSC_PSPD_LS        (1 << 26)
+#define PORTSC_PSPD_HS        (2 << 26)
+#define PORTSC_PSPD_MASK      (3 << 26)
+
+#endif
diff --git a/drivers/usb/function/msm_otg.c b/drivers/usb/function/msm_otg.c
new file mode 100644
index 0000000..c931290
--- /dev/null
+++ b/drivers/usb/function/msm_otg.c
@@ -0,0 +1,368 @@
+/* drivers/usb/otg/msm_otg.c
+ *
+ * OTG Driver for HighSpeed USB
+ *
+ * Copyright (c) 2009, Code Aurora Forum. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+
+#include <mach/msm_otg.h>
+#include <mach/msm_hsusb.h>
+#include <mach/msm_hsusb_hw.h>
+#include <mach/board.h>
+
+#define MSM_USB_BASE (xceiv->regs)
+
+#define A_HOST 0
+#define B_DEVICE 1
+#define A_TO_B 0
+#define B_TO_A 1
+
+static struct msm_otg_transceiver *xceiv;
+
+struct msm_otg_transceiver *msm_otg_get_transceiver(void)
+{
+	if (xceiv)
+		get_device(xceiv->dev);
+	return xceiv;
+}
+EXPORT_SYMBOL(msm_otg_get_transceiver);
+
+void msm_otg_put_transceiver(struct msm_otg_transceiver *xceiv)
+{
+	if (xceiv)
+		put_device(xceiv->dev);
+}
+EXPORT_SYMBOL(msm_otg_put_transceiver);
+
+static void msm_otg_set_clk(int on)
+{
+	if (on) {
+		clk_enable(xceiv->clk);
+		clk_enable(xceiv->pclk);
+	} else {
+		clk_disable(xceiv->clk);
+		clk_disable(xceiv->pclk);
+	}
+}
+
+static inline int is_host(void)
+{
+	int ret;
+
+	ret = (OTGSC_ID & readl(USB_OTGSC)) ? 0 : 1;
+	return ret;
+}
+
+static void msm_otg_enable(void)
+{
+	msm_otg_set_clk(1);
+	/* Enable ID interrupts */
+	writel(readl(USB_OTGSC) | OTGSC_IDIE, USB_OTGSC);
+
+	if (is_host()) {
+		pr_info("%s: configuring USB in host mode\n", __func__);
+		xceiv->hcd_ops->request(xceiv->hcd_ops->handle, REQUEST_START);
+		xceiv->state = A_HOST;
+	} else {
+		pr_info("%s: configuring USB in device mode\n", __func__);
+		xceiv->dcd_ops->request(xceiv->dcd_ops->handle, REQUEST_START);
+		xceiv->state = B_DEVICE;
+	}
+	msm_otg_set_clk(0);
+	xceiv->active = 1;
+	wake_lock_timeout(&xceiv->wlock, HZ/2);
+	enable_irq(xceiv->irq);
+}
+
+static void msm_otg_disable(int mode)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&xceiv->lock, flags);
+	xceiv->active = 0;
+	spin_unlock_irqrestore(&xceiv->lock, flags);
+
+	pr_info("%s: OTG is disabled\n", __func__);
+
+	if (mode != xceiv->state)
+		return;
+	switch (mode) {
+	case A_HOST:
+		if (xceiv->state == A_HOST) {
+			pr_info("%s: configuring USB in device mode\n",
+					__func__);
+			xceiv->dcd_ops->request(xceiv->dcd_ops->handle,
+							REQUEST_START);
+			xceiv->state = B_DEVICE;
+		}
+		break;
+	case B_DEVICE:
+		if (xceiv->state == B_DEVICE) {
+			pr_info("%s: configuring USB in host mode\n",
+					__func__);
+			xceiv->hcd_ops->request(xceiv->hcd_ops->handle,
+							REQUEST_START);
+			xceiv->state = A_HOST;
+		}
+		break;
+	}
+
+}
+
+static void msm_otg_do_work(struct work_struct *w)
+{
+	switch (xceiv->state) {
+	case A_HOST:
+		if (xceiv->flags == A_TO_B) {
+			xceiv->hcd_ops->request(xceiv->hcd_ops->handle,
+							REQUEST_STOP);
+			pr_info("%s: configuring USB in device mode\n",
+					__func__);
+			xceiv->dcd_ops->request(xceiv->dcd_ops->handle,
+							REQUEST_START);
+			xceiv->state = B_DEVICE;
+		}
+		break;
+	case B_DEVICE:
+		if (xceiv->flags == B_TO_A) {
+			xceiv->dcd_ops->request(xceiv->dcd_ops->handle,
+							REQUEST_STOP);
+			pr_info("%s: configuring USB in host mode\n",
+					__func__);
+			xceiv->hcd_ops->request(xceiv->hcd_ops->handle,
+							REQUEST_START);
+			xceiv->state = A_HOST;
+		}
+		break;
+	}
+	wake_lock_timeout(&xceiv->wlock, HZ/2);
+	enable_irq(xceiv->irq);
+}
+
+static irqreturn_t msm_otg_irq(int irq, void *data)
+{
+	u32 otgsc;
+	u32 temp;
+
+	if (!xceiv->active)
+		return IRQ_HANDLED;
+
+	if (xceiv->in_lpm)
+		return IRQ_HANDLED;
+
+	otgsc = readl(USB_OTGSC);
+	temp = otgsc & ~OTGSC_INTR_STS_MASK;
+	if (otgsc & OTGSC_IDIS) {
+		wake_lock(&xceiv->wlock);
+		if (is_host()) {
+			xceiv->flags = B_TO_A;
+			schedule_work(&xceiv->work);
+		} else {
+			xceiv->flags = A_TO_B;
+			schedule_work(&xceiv->work);
+		}
+		disable_irq(xceiv->irq);
+		writel(temp | OTGSC_IDIS, USB_OTGSC);
+	}
+
+	return IRQ_HANDLED;
+
+}
+
+static DEFINE_MUTEX(otg_register_lock);
+
+static int msm_otg_set_peripheral(struct msm_otg_transceiver *xceiv,
+					struct msm_otg_ops *ops)
+{
+	int ret = 0;
+
+	mutex_lock(&otg_register_lock);
+	if (!xceiv) {
+		ret = -EINVAL;
+		goto unlock;
+	}
+	if (!ops) {
+		xceiv->dcd_ops = NULL;
+		pr_info("%s: Peripheral driver is deregistered with OTG\n",
+				__func__);
+		msm_otg_disable(B_DEVICE);
+		goto unlock;
+	}
+	if (xceiv->dcd_ops) {
+		ret = -EBUSY;
+		goto unlock;
+	}
+
+	xceiv->dcd_ops = ops;
+	xceiv->dcd_ops->request(xceiv->dcd_ops->handle, REQUEST_STOP);
+	if (xceiv->hcd_ops)
+		msm_otg_enable();
+unlock:
+	mutex_unlock(&otg_register_lock);
+	return ret;
+}
+
+static int msm_otg_set_host(struct msm_otg_transceiver *xceiv,
+				struct msm_otg_ops *hcd_ops)
+{
+	int ret = 0;
+
+	mutex_lock(&otg_register_lock);
+	if (!xceiv) {
+		ret = -EINVAL;
+		goto unlock;
+	}
+	if (!hcd_ops) {
+		xceiv->hcd_ops = NULL;
+		pr_info("%s: Host driver is deregistered with OTG\n",
+				__func__);
+		msm_otg_disable(A_HOST);
+		goto unlock;
+	}
+	if (xceiv->hcd_ops) {
+		ret = -EBUSY;
+		goto unlock;
+	}
+
+	xceiv->hcd_ops = hcd_ops;
+	xceiv->hcd_ops->request(xceiv->hcd_ops->handle, REQUEST_STOP);
+	if (xceiv->dcd_ops)
+		msm_otg_enable();
+
+unlock:
+	mutex_unlock(&otg_register_lock);
+	return ret;
+}
+
+static int msm_otg_set_suspend(struct msm_otg_transceiver *otg, int suspend)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&xceiv->lock, flags);
+	xceiv->in_lpm = suspend;
+	spin_unlock_irqrestore(&xceiv->lock, flags);
+	return 0;
+}
+
+static int __init msm_otg_probe(struct platform_device *pdev)
+{
+	int ret;
+	struct resource *res;
+	xceiv = kzalloc(sizeof(struct msm_otg_transceiver), GFP_KERNEL);
+	if (!xceiv)
+		return -ENOMEM;
+
+	xceiv->clk = clk_get(NULL, "usb_hs_clk");
+	if (IS_ERR(xceiv->clk)) {
+		ret = PTR_ERR(xceiv->clk);
+		goto free_xceiv;
+	}
+	xceiv->pclk = clk_get(NULL, "usb_hs_pclk");
+	if (IS_ERR(xceiv->clk)) {
+		ret = PTR_ERR(xceiv->pclk);
+		goto put_clk;
+	}
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!res) {
+		ret = -ENODEV;
+		goto put_pclk;
+	}
+
+	xceiv->regs = ioremap(res->start, resource_size(res));
+	if (!xceiv->regs) {
+		ret = -ENOMEM;
+		goto put_pclk;
+	}
+	xceiv->irq = platform_get_irq(pdev, 0);
+	if (!xceiv->irq) {
+		ret = -ENODEV;
+		goto free_regs;
+	}
+
+	/* disable interrupts before requesting irq */
+	msm_otg_set_clk(1);
+	writel(0, USB_USBINTR);
+	writel(readl(USB_OTGSC) & ~OTGSC_INTR_MASK, USB_OTGSC);
+	msm_otg_set_clk(0);
+
+	ret = request_irq(xceiv->irq, msm_otg_irq, IRQF_SHARED,
+					"msm_otg", pdev);
+	if (ret)
+		goto free_regs;
+	disable_irq(xceiv->irq);
+
+	INIT_WORK(&xceiv->work, msm_otg_do_work);
+	spin_lock_init(&xceiv->lock);
+	wake_lock_init(&xceiv->wlock, WAKE_LOCK_SUSPEND, "usb_otg");
+	wake_lock(&xceiv->wlock);
+
+	xceiv->set_host = msm_otg_set_host;
+	xceiv->set_peripheral = msm_otg_set_peripheral;
+	xceiv->set_suspend = msm_otg_set_suspend;
+
+	return 0;
+free_regs:
+	iounmap(xceiv->regs);
+put_pclk:
+	clk_put(xceiv->pclk);
+put_clk:
+	clk_put(xceiv->clk);
+free_xceiv:
+	kfree(xceiv);
+	return ret;
+
+}
+
+static int __exit msm_otg_remove(struct platform_device *pdev)
+{
+	cancel_work_sync(&xceiv->work);
+	free_irq(xceiv->irq, pdev);
+	iounmap(xceiv->regs);
+	clk_put(xceiv->pclk);
+	clk_put(xceiv->clk);
+	kfree(xceiv);
+	return 0;
+}
+
+static struct platform_driver msm_otg_driver = {
+	.remove = __exit_p(msm_otg_remove),
+	.driver = {
+		.name = "msm_hsusb_otg",
+		.owner = THIS_MODULE,
+	},
+};
+
+static int __init msm_otg_init(void)
+{
+	return platform_driver_probe(&msm_otg_driver, msm_otg_probe);
+}
+
+static void __exit msm_otg_exit(void)
+{
+	platform_driver_unregister(&msm_otg_driver);
+}
+
+subsys_initcall(msm_otg_init);
+module_exit(msm_otg_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("MSM USB OTG driver");
+MODULE_VERSION("1.00");
diff --git a/drivers/usb/function/null.c b/drivers/usb/function/null.c
new file mode 100644
index 0000000..68f1e35
--- /dev/null
+++ b/drivers/usb/function/null.c
@@ -0,0 +1,118 @@
+/* driver/usb/function/null.c
+ *
+ * Null Function Device - A Data Sink
+ *
+ * Copyright (C) 2007 Google, Inc.
+ * Author: Brian Swetland <swetland@google.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+
+#include "usb_function.h"
+
+struct null_context
+{
+	struct usb_endpoint *out;
+	struct usb_request *req0;
+	struct usb_request *req1;
+};
+
+static struct null_context _context;
+
+static void null_bind(struct usb_endpoint **ept, void *_ctxt)
+{
+	struct null_context *ctxt = _ctxt;
+	ctxt->out = ept[0];
+	printk(KERN_INFO "null_bind() %p\n", ctxt->out);
+
+	ctxt->req0 = usb_ept_alloc_req(ctxt->out, 4096);
+	ctxt->req1 = usb_ept_alloc_req(ctxt->out, 4096);
+}
+
+static void null_unbind(void *_ctxt)
+{
+	struct null_context *ctxt = _ctxt;
+	printk(KERN_INFO "null_unbind()\n");
+	if (ctxt->req0) {
+		usb_ept_free_req(ctxt->out, ctxt->req0);
+		ctxt->req0 = 0;
+	}
+	if (ctxt->req1) {
+		usb_ept_free_req(ctxt->out, ctxt->req1);
+		ctxt->req1 = 0;
+	}
+	ctxt->out = 0;
+}
+
+
+static void null_queue_out(struct null_context *ctxt, struct usb_request *req);
+
+static void null_out_complete(struct usb_endpoint *ept, struct usb_request *req)
+{
+	struct null_context *ctxt = req->context;
+	unsigned char *data = req->buf;
+
+	if (req->status != -ENODEV)
+		null_queue_out(ctxt, req);
+}
+
+static void null_queue_out(struct null_context *ctxt, struct usb_request *req)
+{
+	req->complete = null_out_complete;
+	req->context = ctxt;
+	req->length = 4096;
+
+	usb_ept_queue_xfer(ctxt->out, req);
+}
+
+static void null_configure(int configured, void *_ctxt)
+{
+	struct null_context *ctxt = _ctxt;
+	printk(KERN_INFO "null_configure() %d\n", configured);
+
+	if (configured) {
+		null_queue_out(ctxt, ctxt->req0);
+		null_queue_out(ctxt, ctxt->req1);
+	} else {
+		/* all pending requests will be canceled */
+	}
+}
+
+static struct usb_function usb_func_null = {
+	.bind = null_bind,
+	.unbind = null_unbind,
+	.configure = null_configure,
+
+	.name = "null",
+	.context = &_context,
+
+	.ifc_class = 0xff,
+	.ifc_subclass = 0xfe,
+	.ifc_protocol = 0x01,
+
+	.ifc_name = "null",
+
+	.ifc_ept_count = 1,
+	.ifc_ept_type = { EPT_BULK_OUT },
+};
+
+static int __init null_init(void)
+{
+	printk(KERN_INFO "null_init()\n");
+	usb_function_register(&usb_func_null);
+	return 0;
+}
+
+module_init(null_init);
diff --git a/drivers/usb/function/rmnet.c b/drivers/usb/function/rmnet.c
new file mode 100644
index 0000000..e618ec0
--- /dev/null
+++ b/drivers/usb/function/rmnet.c
@@ -0,0 +1,1086 @@
+/*
+ * rmnet.c -- RmNet function driver
+ *
+ * Copyright (C) 2003-2005,2008 David Brownell
+ * Copyright (C) 2003-2004 Robert Schwebel, Benedikt Spranger
+ * Copyright (C) 2003 Al Borchers (alborchers@steinerpoint.com)
+ * Copyright (C) 2008 Nokia Corporation
+ * Copyright (c) 2010, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/kfifo.h>
+
+#include <mach/msm_smd.h>
+#include <linux/usb/cdc.h>
+
+#include "usb_function.h"
+
+static char *rmnet_ctl_ch = CONFIG_RMNET_SMD_CTL_CHANNEL;
+module_param(rmnet_ctl_ch, charp, S_IRUGO);
+MODULE_PARM_DESC(rmnet_ctl_ch, "RmNet control SMD channel");
+
+static char *rmnet_data_ch = CONFIG_RMNET_SMD_DATA_CHANNEL;
+module_param(rmnet_data_ch, charp, S_IRUGO);
+MODULE_PARM_DESC(rmnet_data_ch, "RmNet data SMD channel");
+
+#define RMNET_NOTIFY_INTERVAL	5
+#define RMNET_MAX_NOTIFY_SIZE	sizeof(struct usb_cdc_notification)
+
+#define QMI_REQ_MAX		4
+#define QMI_REQ_SIZE		2048
+#define QMI_RESP_MAX		8
+#define QMI_RESP_SIZE		2048
+
+#define RX_REQ_MAX		8
+#define RX_REQ_SIZE		2048
+#define TX_REQ_MAX		8
+#define TX_REQ_SIZE		2048
+
+#define TXN_MAX 		2048
+
+static struct usb_interface_descriptor rmnet_interface_desc = {
+	.bLength =		USB_DT_INTERFACE_SIZE,
+	.bDescriptorType =	USB_DT_INTERFACE,
+	/* .bInterfaceNumber = DYNAMIC */
+	.bNumEndpoints =	3,
+	.bInterfaceClass =	USB_CLASS_VENDOR_SPEC,
+	.bInterfaceSubClass =	USB_CLASS_VENDOR_SPEC,
+	.bInterfaceProtocol =	USB_CLASS_VENDOR_SPEC,
+	/* .iInterface = DYNAMIC */
+};
+
+/* Full speed support */
+static struct usb_endpoint_descriptor rmnet_fs_notify_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes =		USB_ENDPOINT_XFER_INT,
+	.wMaxPacketSize =	__constant_cpu_to_le16(RMNET_MAX_NOTIFY_SIZE),
+	.bInterval =		1 << RMNET_NOTIFY_INTERVAL,
+};
+
+static struct usb_endpoint_descriptor rmnet_fs_in_desc  = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize   = __constant_cpu_to_le16(64),
+};
+
+static struct usb_endpoint_descriptor rmnet_fs_out_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_OUT,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize   = __constant_cpu_to_le16(64),
+};
+
+/* High speed support */
+static struct usb_endpoint_descriptor rmnet_hs_notify_desc  = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes =		USB_ENDPOINT_XFER_INT,
+	.wMaxPacketSize =	__constant_cpu_to_le16(RMNET_MAX_NOTIFY_SIZE),
+	.bInterval =		RMNET_NOTIFY_INTERVAL + 4,
+};
+
+static struct usb_endpoint_descriptor rmnet_hs_in_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize =	__constant_cpu_to_le16(512),
+};
+
+static struct usb_endpoint_descriptor rmnet_hs_out_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_OUT,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize =	__constant_cpu_to_le16(512),
+};
+
+/* QMI requests & responses buffer*/
+struct qmi_buf {
+	void *buf;
+	int len;
+	struct list_head list;
+};
+
+/* Control & data SMD channel private data */
+struct rmnet_smd_info {
+	struct smd_channel 	*ch;
+	struct tasklet_struct	tx_tlet;
+	struct tasklet_struct	rx_tlet;
+#define CH_OPENED	0
+	unsigned long		flags;
+	/* pending rx packet length */
+	atomic_t		rx_pkt;
+	/* wait for smd open event*/
+	wait_queue_head_t	wait;
+};
+
+struct rmnet_dev {
+	struct usb_endpoint	*epout;
+	struct usb_endpoint	*epin;
+	struct usb_endpoint	*epnotify;
+	struct usb_request 	*notify_req;
+
+	u8			ifc_id;
+	/* QMI lists */
+	struct list_head	qmi_req_pool;
+	struct list_head	qmi_resp_pool;
+	struct list_head	qmi_req_q;
+	struct list_head	qmi_resp_q;
+	/* Tx/Rx lists */
+	struct list_head 	tx_idle;
+	struct list_head 	rx_idle;
+	struct list_head	rx_queue;
+
+	spinlock_t		lock;
+	atomic_t		online;
+	atomic_t		notify_count;
+
+	struct rmnet_smd_info	smd_ctl;
+	struct rmnet_smd_info	smd_data;
+
+	struct workqueue_struct *wq;
+	struct work_struct connect_work;
+	struct work_struct disconnect_work;
+};
+
+static struct usb_function rmnet_function;
+
+struct qmi_buf *
+rmnet_alloc_qmi(unsigned len, gfp_t kmalloc_flags)
+{
+	struct qmi_buf *qmi;
+
+	qmi = kmalloc(sizeof(struct qmi_buf), kmalloc_flags);
+	if (qmi != NULL) {
+		qmi->buf = kmalloc(len, kmalloc_flags);
+		if (qmi->buf == NULL) {
+			kfree(qmi);
+			qmi = NULL;
+		}
+	}
+
+	return qmi ? qmi : ERR_PTR(-ENOMEM);
+}
+
+void rmnet_free_qmi(struct qmi_buf *qmi)
+{
+	kfree(qmi->buf);
+	kfree(qmi);
+}
+/*
+ * Allocate a usb_request and its buffer.  Returns a pointer to the
+ * usb_request or NULL if there is an error.
+ */
+struct usb_request *
+rmnet_alloc_req(struct usb_endpoint *ep, unsigned len, gfp_t kmalloc_flags)
+{
+	struct usb_request *req;
+
+	req = usb_ept_alloc_req(ep, 0);
+
+	if (req != NULL) {
+		req->length = len;
+		req->buf = kmalloc(len, kmalloc_flags);
+		if (req->buf == NULL) {
+			usb_ept_free_req(ep, req);
+			req = NULL;
+		}
+	}
+
+	return req ? req : ERR_PTR(-ENOMEM);
+}
+
+/*
+ * Free a usb_request and its buffer.
+ */
+void rmnet_free_req(struct usb_endpoint *ep, struct usb_request *req)
+{
+	kfree(req->buf);
+	usb_ept_free_req(ep, req);
+}
+
+static void rmnet_notify_complete(struct usb_endpoint *ep,
+		struct usb_request *req)
+{
+	struct rmnet_dev *dev = req->context;
+	int status = req->status;
+
+	switch (status) {
+	case -ECONNRESET:
+	case -ESHUTDOWN:
+	case -ENODEV:
+		/* connection gone */
+		atomic_set(&dev->notify_count, 0);
+		break;
+	default:
+		pr_err("%s: rmnet notify ep error %d\n", __func__, status);
+		/* FALLTHROUGH */
+	case 0:
+		if (ep != dev->epnotify)
+			break;
+
+		/* handle multiple pending QMI_RESPONSE_AVAILABLE
+		 * notifications by resending until we're done
+		 */
+		if (atomic_dec_and_test(&dev->notify_count))
+			break;
+
+		status = usb_ept_queue_xfer(dev->epnotify, dev->notify_req);
+		if (status) {
+			atomic_dec(&dev->notify_count);
+			pr_err("%s: rmnet notify ep enqueue error %d\n",
+					__func__, status);
+		}
+		break;
+	}
+}
+
+static void qmi_response_available(struct rmnet_dev *dev)
+{
+	struct usb_request		*req = dev->notify_req;
+	struct usb_cdc_notification	*event = req->buf;
+	int status;
+
+	/* Response will be sent later */
+	if (atomic_inc_return(&dev->notify_count) != 1)
+		return;
+
+	event->bmRequestType = USB_DIR_IN | USB_TYPE_CLASS
+			| USB_RECIP_INTERFACE;
+	event->bNotificationType = USB_CDC_NOTIFY_RESPONSE_AVAILABLE;
+	event->wValue = cpu_to_le16(0);
+	event->wIndex = cpu_to_le16(dev->ifc_id);
+	event->wLength = cpu_to_le16(0);
+
+	status = usb_ept_queue_xfer(dev->epnotify, dev->notify_req);
+	if (status < 0) {
+		atomic_dec(&dev->notify_count);
+		pr_err("%s: rmnet notify ep enqueue error %d\n",
+				__func__, status);
+	}
+}
+
+/* TODO
+ * handle modem restart events
+ */
+static void rmnet_smd_notify(void *priv, unsigned event)
+{
+	struct rmnet_smd_info *smd_info = priv;
+	int len = atomic_read(&smd_info->rx_pkt);
+
+	switch (event) {
+	case SMD_EVENT_DATA: {
+
+		if (len && (smd_write_avail(smd_info->ch) >= len))
+			tasklet_schedule(&smd_info->rx_tlet);
+
+		if (smd_read_avail(smd_info->ch))
+			tasklet_schedule(&smd_info->tx_tlet);
+
+		break;
+	}
+	case SMD_EVENT_OPEN:
+		/* usb endpoints are not enabled untill smd channels
+		 * are opened. wake up worker thread to continue
+		 * connection processing
+		 */
+		set_bit(CH_OPENED, &smd_info->flags);
+		wake_up(&smd_info->wait);
+		break;
+	case SMD_EVENT_CLOSE:
+		/* We will never come here.
+		 * reset flags after closing smd channel
+		 * */
+		clear_bit(CH_OPENED, &smd_info->flags);
+		break;
+	}
+}
+
+static void rmnet_control_tx_tlet(unsigned long arg)
+{
+	struct rmnet_dev *dev = (struct rmnet_dev *) arg;
+	struct qmi_buf *qmi_resp;
+	int sz;
+	unsigned long flags;
+
+	while (1) {
+		sz = smd_cur_packet_size(dev->smd_ctl.ch);
+		if (sz == 0)
+			break;
+		if (smd_read_avail(dev->smd_ctl.ch) < sz)
+			break;
+
+		spin_lock_irqsave(&dev->lock, flags);
+		if (list_empty(&dev->qmi_resp_pool)) {
+			pr_err("%s: rmnet QMI Tx buffers full\n", __func__);
+			spin_unlock_irqrestore(&dev->lock, flags);
+			break;
+		}
+		qmi_resp = list_first_entry(&dev->qmi_resp_pool,
+				struct qmi_buf, list);
+		list_del(&qmi_resp->list);
+		spin_unlock_irqrestore(&dev->lock, flags);
+
+		qmi_resp->len = smd_read(dev->smd_ctl.ch, qmi_resp->buf, sz);
+
+		spin_lock_irqsave(&dev->lock, flags);
+		list_add_tail(&qmi_resp->list, &dev->qmi_resp_q);
+		spin_unlock_irqrestore(&dev->lock, flags);
+
+		qmi_response_available(dev);
+	}
+
+}
+
+static void rmnet_control_rx_tlet(unsigned long arg)
+{
+	struct rmnet_dev *dev = (struct rmnet_dev *) arg;
+	struct qmi_buf *qmi_req;
+	int ret;
+	unsigned long flags;
+
+	spin_lock_irqsave(&dev->lock, flags);
+	while (1) {
+
+		if (list_empty(&dev->qmi_req_q)) {
+			atomic_set(&dev->smd_ctl.rx_pkt, 0);
+			break;
+		}
+		qmi_req = list_first_entry(&dev->qmi_req_q,
+				struct qmi_buf, list);
+		if (smd_write_avail(dev->smd_ctl.ch) < qmi_req->len) {
+			atomic_set(&dev->smd_ctl.rx_pkt, qmi_req->len);
+			pr_debug("%s: rmnet control smd channel full\n",
+					__func__);
+			break;
+		}
+
+		list_del(&qmi_req->list);
+		spin_unlock_irqrestore(&dev->lock, flags);
+		ret = smd_write(dev->smd_ctl.ch, qmi_req->buf, qmi_req->len);
+		spin_lock_irqsave(&dev->lock, flags);
+		if (ret != qmi_req->len) {
+			pr_err("%s: rmnet control smd write failed\n",
+					__func__);
+			break;
+		}
+
+		list_add_tail(&qmi_req->list, &dev->qmi_req_pool);
+	}
+	spin_unlock_irqrestore(&dev->lock, flags);
+}
+
+static void rmnet_command_complete(struct usb_endpoint *ep,
+		struct usb_request *req)
+{
+	struct rmnet_dev *dev = req->context;
+	struct usb_function *func = &rmnet_function;
+	struct usb_request *in_req;
+	struct qmi_buf *qmi_req;
+	int ret;
+
+	if (req->status < 0) {
+		pr_err("%s: rmnet command error %d\n", __func__, req->status);
+		return;
+	}
+
+	spin_lock(&dev->lock);
+	/* no pending control rx packet */
+	if (!atomic_read(&dev->smd_ctl.rx_pkt)) {
+		if (smd_write_avail(dev->smd_ctl.ch) < req->actual) {
+			atomic_set(&dev->smd_ctl.rx_pkt, req->actual);
+			goto queue_req;
+		}
+		spin_unlock(&dev->lock);
+		ret = smd_write(dev->smd_ctl.ch, req->buf, req->actual);
+		/* This should never happen */
+		if (ret != req->actual)
+			pr_err("%s: rmnet control smd write failed\n",
+					__func__);
+		goto ep0_ack;
+	}
+queue_req:
+	if (list_empty(&dev->qmi_req_pool)) {
+		spin_unlock(&dev->lock);
+		pr_err("%s: rmnet QMI pool is empty\n", __func__);
+		return;
+	}
+
+	qmi_req = list_first_entry(&dev->qmi_req_pool, struct qmi_buf, list);
+	list_del(&qmi_req->list);
+	spin_unlock(&dev->lock);
+	memcpy(qmi_req->buf, req->buf, req->actual);
+	qmi_req->len = req->actual;
+	spin_lock(&dev->lock);
+	list_add_tail(&qmi_req->list, &dev->qmi_req_q);
+	spin_unlock(&dev->lock);
+ep0_ack:
+	/* Send ACK on EP0 IN */
+	in_req = func->ep0_in_req;
+	in_req->length = 0;
+	in_req->complete = 0;
+	usb_ept_queue_xfer(func->ep0_in, in_req);
+}
+
+static int rmnet_setup(struct usb_ctrlrequest *ctrl, void *buf,
+				int len, void *context)
+{
+	struct rmnet_dev *dev = context;
+	struct usb_request *req = rmnet_function.ep0_out_req;
+	int			ret = -EOPNOTSUPP;
+	u16			w_index = le16_to_cpu(ctrl->wIndex);
+	u16			w_value = le16_to_cpu(ctrl->wValue);
+	u16			w_length = le16_to_cpu(ctrl->wLength);
+	struct qmi_buf *resp;
+	int schedule = 0;
+
+	if (!atomic_read(&dev->online))
+		return -ENOTCONN;
+
+	switch ((ctrl->bRequestType << 8) | ctrl->bRequest) {
+
+	case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+			| USB_CDC_SEND_ENCAPSULATED_COMMAND:
+		if (w_value || w_index != dev->ifc_id)
+			goto invalid;
+		ret = w_length;
+		req->complete = rmnet_command_complete;
+		req->context = dev;
+		break;
+
+
+	case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+			| USB_CDC_GET_ENCAPSULATED_RESPONSE:
+		if (w_value || w_index != dev->ifc_id)
+			goto invalid;
+		else {
+			spin_lock(&dev->lock);
+			resp = list_first_entry(&dev->qmi_resp_q,
+					struct qmi_buf, list);
+			list_del(&resp->list);
+			spin_unlock(&dev->lock);
+			memcpy(buf, resp->buf, resp->len);
+			ret = resp->len;
+			spin_lock(&dev->lock);
+
+			if (list_empty(&dev->qmi_resp_pool))
+				schedule = 1;
+			list_add_tail(&resp->list, &dev->qmi_resp_pool);
+
+			if (schedule)
+				tasklet_schedule(&dev->smd_ctl.tx_tlet);
+			spin_unlock(&dev->lock);
+		}
+		break;
+	default:
+
+invalid:
+		pr_debug("%s: invalid control req%02x.%02x v%04x i%04x l%d\n",
+			__func__, ctrl->bRequestType, ctrl->bRequest,
+			w_value, w_index, w_length);
+	}
+
+	return ret;
+}
+
+static void rmnet_start_rx(struct rmnet_dev *dev)
+{
+	int status;
+	struct usb_request *req;
+	struct list_head *act, *tmp;
+	unsigned long flags;
+
+	spin_lock_irqsave(&dev->lock, flags);
+	list_for_each_safe(act, tmp, &dev->rx_idle) {
+		req = list_entry(act, struct usb_request, list);
+		list_del(&req->list);
+
+		spin_unlock_irqrestore(&dev->lock, flags);
+		status = usb_ept_queue_xfer(dev->epout, req);
+		spin_lock_irqsave(&dev->lock, flags);
+
+		if (status) {
+			pr_err("%s: rmnet data rx enqueue err %d\n",
+					__func__, status);
+			list_add_tail(&req->list, &dev->rx_idle);
+			break;
+		}
+	}
+	spin_unlock_irqrestore(&dev->lock, flags);
+}
+
+static void rmnet_data_tx_tlet(unsigned long arg)
+{
+	struct rmnet_dev *dev = (struct rmnet_dev *) arg;
+	struct usb_request *req;
+	int status;
+	int sz;
+	unsigned long flags;
+
+	while (1) {
+
+		sz = smd_cur_packet_size(dev->smd_data.ch);
+		if (sz == 0)
+			break;
+		if (smd_read_avail(dev->smd_data.ch) < sz)
+			break;
+
+		spin_lock_irqsave(&dev->lock, flags);
+		if (list_empty(&dev->tx_idle)) {
+			spin_unlock_irqrestore(&dev->lock, flags);
+			pr_debug("%s: rmnet data Tx buffers full\n", __func__);
+			break;
+		}
+		req = list_first_entry(&dev->tx_idle, struct usb_request, list);
+		list_del(&req->list);
+		spin_unlock_irqrestore(&dev->lock, flags);
+
+		req->length = smd_read(dev->smd_data.ch, req->buf, sz);
+		status = usb_ept_queue_xfer(dev->epin, req);
+		if (status) {
+			pr_err("%s: rmnet tx data enqueue err %d\n",
+					__func__, status);
+			spin_lock_irqsave(&dev->lock, flags);
+			list_add_tail(&req->list, &dev->tx_idle);
+			spin_unlock_irqrestore(&dev->lock, flags);
+			break;
+		}
+	}
+
+}
+
+static void rmnet_data_rx_tlet(unsigned long arg)
+{
+	struct rmnet_dev *dev = (struct rmnet_dev *) arg;
+	struct usb_request *req;
+	int ret;
+	unsigned long flags;
+
+	spin_lock_irqsave(&dev->lock, flags);
+	while (1) {
+		if (list_empty(&dev->rx_queue)) {
+			atomic_set(&dev->smd_data.rx_pkt, 0);
+			break;
+		}
+		req = list_first_entry(&dev->rx_queue,
+			struct usb_request, list);
+		if (smd_write_avail(dev->smd_data.ch) < req->actual) {
+			atomic_set(&dev->smd_data.rx_pkt, req->actual);
+			pr_debug("%s: rmnet SMD data channel full\n", __func__);
+			break;
+		}
+
+		list_del(&req->list);
+		spin_unlock_irqrestore(&dev->lock, flags);
+		ret = smd_write(dev->smd_data.ch, req->buf, req->actual);
+		spin_lock_irqsave(&dev->lock, flags);
+		if (ret != req->actual) {
+			pr_err("%s: rmnet SMD data write failed\n", __func__);
+			break;
+		}
+		list_add_tail(&req->list, &dev->rx_idle);
+	}
+	spin_unlock_irqrestore(&dev->lock, flags);
+
+	/* We have free rx data requests. */
+	rmnet_start_rx(dev);
+}
+
+/* If SMD has enough room to accommodate a data rx packet,
+ * write into SMD directly. Otherwise enqueue to rx_queue.
+ * We will not write into SMD directly untill rx_queue is
+ * empty to strictly follow the ordering requests.
+ */
+static void rmnet_complete_epout(struct usb_endpoint *ep,
+		struct usb_request *req)
+{
+	struct rmnet_dev *dev = req->context;
+	int status = req->status;
+	int ret;
+
+	switch (status) {
+	case 0:
+		/* normal completion */
+		break;
+	case -ECONNRESET:
+	case -ESHUTDOWN:
+	case -ENODEV:
+		/* connection gone */
+		spin_lock(&dev->lock);
+		list_add_tail(&req->list, &dev->rx_idle);
+		spin_unlock(&dev->lock);
+		return;
+	default:
+		/* unexpected failure */
+		pr_err("%s: response error %d, %d/%d\n",
+			__func__, status, req->actual,
+			req->length);
+		spin_lock(&dev->lock);
+		list_add_tail(&req->list, &dev->rx_idle);
+		spin_unlock(&dev->lock);
+		return;
+	}
+
+	spin_lock(&dev->lock);
+	if (!atomic_read(&dev->smd_data.rx_pkt)) {
+		if (smd_write_avail(dev->smd_data.ch) < req->actual) {
+			atomic_set(&dev->smd_data.rx_pkt, req->actual);
+			goto queue_req;
+		}
+		spin_unlock(&dev->lock);
+		ret = smd_write(dev->smd_data.ch, req->buf, req->actual);
+		/* This should never happen */
+		if (ret != req->actual)
+			pr_err("%s: rmnet data smd write failed\n", __func__);
+		/* Restart Rx */
+		spin_lock(&dev->lock);
+		list_add_tail(&req->list, &dev->rx_idle);
+		spin_unlock(&dev->lock);
+		rmnet_start_rx(dev);
+		return;
+	}
+queue_req:
+	list_add_tail(&req->list, &dev->rx_queue);
+	spin_unlock(&dev->lock);
+}
+
+static void rmnet_complete_epin(struct usb_endpoint *ep,
+		struct usb_request *req)
+{
+	struct rmnet_dev *dev = req->context;
+	int status = req->status;
+	int schedule = 0;
+
+	switch (status) {
+	case -ECONNRESET:
+	case -ESHUTDOWN:
+	case -ENODEV:
+		/* connection gone */
+		spin_lock(&dev->lock);
+		list_add_tail(&req->list, &dev->tx_idle);
+		spin_unlock(&dev->lock);
+		break;
+	default:
+		pr_err("%s: rmnet data tx ep error %d\n", __func__, status);
+		/* FALLTHROUGH */
+	case 0:
+		spin_lock(&dev->lock);
+		if (list_empty(&dev->tx_idle))
+			schedule = 1;
+		list_add_tail(&req->list, &dev->tx_idle);
+
+		if (schedule)
+			tasklet_schedule(&dev->smd_data.tx_tlet);
+		spin_unlock(&dev->lock);
+		break;
+	}
+
+}
+
+static void rmnet_disconnect_work(struct work_struct *w)
+{
+	struct qmi_buf *qmi;
+	struct usb_request *req;
+	struct list_head *act, *tmp;
+	struct rmnet_dev *dev = container_of(w, struct rmnet_dev,
+					disconnect_work);
+
+	atomic_set(&dev->notify_count, 0);
+
+	tasklet_kill(&dev->smd_ctl.rx_tlet);
+	tasklet_kill(&dev->smd_ctl.tx_tlet);
+	tasklet_kill(&dev->smd_data.rx_tlet);
+	tasklet_kill(&dev->smd_data.rx_tlet);
+
+	list_for_each_safe(act, tmp, &dev->rx_queue) {
+		req = list_entry(act, struct usb_request, list);
+		list_del(&req->list);
+		list_add_tail(&req->list, &dev->rx_idle);
+	}
+
+	list_for_each_safe(act, tmp, &dev->qmi_req_q) {
+		qmi = list_entry(act, struct qmi_buf, list);
+		list_del(&qmi->list);
+		list_add_tail(&qmi->list, &dev->qmi_req_pool);
+	}
+
+	list_for_each_safe(act, tmp, &dev->qmi_resp_q) {
+		qmi = list_entry(act, struct qmi_buf, list);
+		list_del(&qmi->list);
+		list_add_tail(&qmi->list, &dev->qmi_resp_pool);
+	}
+
+	smd_close(dev->smd_ctl.ch);
+	dev->smd_ctl.flags = 0;
+
+	smd_close(dev->smd_data.ch);
+	dev->smd_data.flags = 0;
+}
+
+static void rmnet_connect_work(struct work_struct *w)
+{
+	struct rmnet_dev *dev = container_of(w, struct rmnet_dev, connect_work);
+	int ret;
+
+	/* Control channel for QMI messages */
+	ret = smd_open(rmnet_ctl_ch, &dev->smd_ctl.ch,
+			&dev->smd_ctl, rmnet_smd_notify);
+	if (ret) {
+		pr_err("%s: Unable to open control smd channel\n", __func__);
+		return;
+	}
+	wait_event(dev->smd_ctl.wait, test_bit(CH_OPENED,
+				&dev->smd_ctl.flags));
+
+	/* Data channel for network packets */
+	ret = smd_open(rmnet_data_ch, &dev->smd_data.ch,
+			&dev->smd_data, rmnet_smd_notify);
+	if (ret) {
+		pr_err("%s: Unable to open data smd channel\n", __func__);
+		smd_close(dev->smd_ctl.ch);
+	}
+	wait_event(dev->smd_data.wait, test_bit(CH_OPENED,
+				&dev->smd_data.flags));
+
+	if (usb_msm_get_speed() == USB_SPEED_HIGH) {
+		usb_configure_endpoint(dev->epin, &rmnet_hs_in_desc);
+		usb_configure_endpoint(dev->epout, &rmnet_hs_out_desc);
+		usb_configure_endpoint(dev->epnotify, &rmnet_hs_notify_desc);
+	} else {
+		usb_configure_endpoint(dev->epin, &rmnet_fs_in_desc);
+		usb_configure_endpoint(dev->epout, &rmnet_fs_out_desc);
+		usb_configure_endpoint(dev->epnotify, &rmnet_fs_notify_desc);
+	}
+
+	usb_ept_enable(dev->epin,  1);
+	usb_ept_enable(dev->epout, 1);
+	usb_ept_enable(dev->epnotify, 1);
+
+	atomic_set(&dev->online, 1);
+	/* Queue Rx data requests */
+	rmnet_start_rx(dev);
+}
+
+static void rmnet_configure(int configured, void *context)
+
+{
+	struct rmnet_dev *dev = context;
+
+	if (configured) {
+		queue_work(dev->wq, &dev->connect_work);
+	} else {
+		/* all pending requests will be canceled */
+		if (!atomic_read(&dev->online))
+			return;
+
+		atomic_set(&dev->online, 0);
+
+		usb_ept_fifo_flush(dev->epnotify);
+		usb_ept_enable(dev->epnotify, 0);
+
+		usb_ept_fifo_flush(dev->epout);
+		usb_ept_enable(dev->epout, 0);
+
+		usb_ept_fifo_flush(dev->epin);
+		usb_ept_enable(dev->epin, 0);
+
+		/* cleanup work */
+		queue_work(dev->wq, &dev->disconnect_work);
+	}
+
+}
+
+static void rmnet_free_buf(struct rmnet_dev *dev)
+{
+	struct qmi_buf *qmi;
+	struct usb_request *req;
+	struct list_head *act, *tmp;
+
+	/* free all usb requests in tx pool */
+	list_for_each_safe(act, tmp, &dev->tx_idle) {
+		req = list_entry(act, struct usb_request, list);
+		list_del(&req->list);
+		rmnet_free_req(dev->epout, req);
+	}
+
+	/* free all usb requests in rx pool */
+	list_for_each_safe(act, tmp, &dev->rx_idle) {
+		req = list_entry(act, struct usb_request, list);
+		list_del(&req->list);
+		rmnet_free_req(dev->epin, req);
+	}
+
+	/* free all buffers in qmi request pool */
+	list_for_each_safe(act, tmp, &dev->qmi_req_pool) {
+		qmi = list_entry(act, struct qmi_buf, list);
+		list_del(&qmi->list);
+		rmnet_free_qmi(qmi);
+	}
+
+	/* free all buffers in qmi request pool */
+	list_for_each_safe(act, tmp, &dev->qmi_resp_pool) {
+		qmi = list_entry(act, struct qmi_buf, list);
+		list_del(&qmi->list);
+		rmnet_free_qmi(qmi);
+	}
+
+	rmnet_free_req(dev->epnotify, dev->notify_req);
+}
+
+static void rmnet_bind(void *context)
+{
+	struct rmnet_dev *dev = context;
+	int i, ret;
+	struct usb_request *req;
+	struct qmi_buf *qmi;
+
+	dev->ifc_id = usb_msm_get_next_ifc_number(&rmnet_function);
+	rmnet_interface_desc.bInterfaceNumber = dev->ifc_id;
+
+	/*Configuring IN Endpoint*/
+	dev->epin = usb_alloc_endpoint(USB_DIR_IN);
+	if (!dev->epin)
+		return;
+
+	rmnet_hs_in_desc.bEndpointAddress = USB_DIR_IN |
+					dev->epin->num;
+	rmnet_fs_in_desc.bEndpointAddress = USB_DIR_IN |
+					dev->epin->num;
+
+	/*Configuring OUT Endpoint*/
+	dev->epout = usb_alloc_endpoint(USB_DIR_OUT);
+	if (!dev->epout)
+		goto free_epin;
+
+	rmnet_hs_out_desc.bEndpointAddress = USB_DIR_OUT |
+					dev->epout->num;
+	rmnet_fs_out_desc.bEndpointAddress = USB_DIR_OUT |
+					dev->epout->num;
+
+	/*Configuring NOTIFY Endpoint*/
+	dev->epnotify = usb_alloc_endpoint(USB_DIR_IN);
+	if (!dev->epnotify)
+		goto free_epout;
+
+	rmnet_hs_notify_desc.bEndpointAddress = USB_DIR_IN |
+				dev->epnotify->num;
+	rmnet_fs_notify_desc.bEndpointAddress = USB_DIR_IN |
+				dev->epnotify->num;
+
+	dev->notify_req = usb_ept_alloc_req(dev->epnotify, 0);
+	if (!dev->notify_req)
+		goto free_epnotify;
+
+	dev->notify_req->buf = kmalloc(RMNET_MAX_NOTIFY_SIZE, GFP_KERNEL);
+	if (!dev->notify_req->buf)
+		goto free_buf;;
+
+	dev->notify_req->complete = rmnet_notify_complete;
+	dev->notify_req->context = dev;
+	dev->notify_req->length = RMNET_MAX_NOTIFY_SIZE;
+
+	/* Allocate the qmi request and response buffers */
+	for (i = 0; i < QMI_REQ_MAX; i++) {
+		qmi = rmnet_alloc_qmi(QMI_REQ_SIZE, GFP_KERNEL);
+		if (IS_ERR(qmi)) {
+			ret = PTR_ERR(qmi);
+			goto free_buf;
+		}
+		list_add_tail(&qmi->list, &dev->qmi_req_pool);
+	}
+
+	for (i = 0; i < QMI_RESP_MAX; i++) {
+		qmi = rmnet_alloc_qmi(QMI_RESP_SIZE, GFP_KERNEL);
+		if (IS_ERR(qmi)) {
+			ret = PTR_ERR(qmi);
+			goto free_buf;
+		}
+		list_add_tail(&qmi->list, &dev->qmi_resp_pool);
+	}
+
+	/* Allocate bulk in/out requests for data transfer */
+	for (i = 0; i < RX_REQ_MAX; i++) {
+		req = rmnet_alloc_req(dev->epout, RX_REQ_SIZE, GFP_KERNEL);
+		if (IS_ERR(req)) {
+			ret = PTR_ERR(req);
+			goto free_buf;
+		}
+		req->length = TXN_MAX;
+		req->context = dev;
+		req->complete = rmnet_complete_epout;
+		list_add_tail(&req->list, &dev->rx_idle);
+	}
+
+	for (i = 0; i < TX_REQ_MAX; i++) {
+		req = rmnet_alloc_req(dev->epout, TX_REQ_SIZE, GFP_KERNEL);
+		if (IS_ERR(req)) {
+			ret = PTR_ERR(req);
+			goto free_buf;
+		}
+		req->context = dev;
+		req->complete = rmnet_complete_epin;
+		list_add_tail(&req->list, &dev->tx_idle);
+	}
+
+
+	pr_info("Rmnet function bind completed\n");
+
+	return;
+
+free_buf:
+	rmnet_free_buf(dev);
+free_epnotify:
+	usb_free_endpoint(dev->epnotify);
+free_epout:
+	usb_free_endpoint(dev->epout);
+free_epin:
+	usb_free_endpoint(dev->epin);
+
+}
+
+static void rmnet_unbind(void *context)
+{
+	struct rmnet_dev *dev = context;
+
+	tasklet_kill(&dev->smd_ctl.rx_tlet);
+	tasklet_kill(&dev->smd_ctl.tx_tlet);
+	tasklet_kill(&dev->smd_data.rx_tlet);
+	tasklet_kill(&dev->smd_data.rx_tlet);
+	flush_workqueue(dev->wq);
+
+	rmnet_free_buf(dev);
+	usb_free_endpoint(dev->epin);
+	usb_free_endpoint(dev->epout);
+	usb_free_endpoint(dev->epnotify);
+
+	kfree(dev);
+
+}
+static struct usb_function rmnet_function = {
+	.bind = rmnet_bind,
+	.configure = rmnet_configure,
+	.unbind = rmnet_unbind,
+	.setup  = rmnet_setup,
+	.name = "rmnet",
+};
+
+struct usb_descriptor_header *rmnet_hs_descriptors[5];
+struct usb_descriptor_header *rmnet_fs_descriptors[5];
+static int __init rmnet_init(void)
+{
+	struct rmnet_dev *dev;
+	int ret;
+
+	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+	if (!dev)
+		return -ENOMEM;
+
+	dev->wq = create_singlethread_workqueue("k_rmnet_work");
+	if (!dev->wq) {
+		ret = -ENOMEM;
+		goto free_dev;
+	}
+
+	spin_lock_init(&dev->lock);
+	atomic_set(&dev->notify_count, 0);
+	atomic_set(&dev->online, 0);
+	atomic_set(&dev->smd_ctl.rx_pkt, 0);
+	atomic_set(&dev->smd_data.rx_pkt, 0);
+
+	INIT_WORK(&dev->connect_work, rmnet_connect_work);
+	INIT_WORK(&dev->disconnect_work, rmnet_disconnect_work);
+
+	tasklet_init(&dev->smd_ctl.rx_tlet, rmnet_control_rx_tlet,
+					(unsigned long) dev);
+	tasklet_init(&dev->smd_ctl.tx_tlet, rmnet_control_tx_tlet,
+					(unsigned long) dev);
+	tasklet_init(&dev->smd_data.rx_tlet, rmnet_data_rx_tlet,
+					(unsigned long) dev);
+	tasklet_init(&dev->smd_data.tx_tlet, rmnet_data_tx_tlet,
+					(unsigned long) dev);
+
+	init_waitqueue_head(&dev->smd_ctl.wait);
+	init_waitqueue_head(&dev->smd_data.wait);
+
+	INIT_LIST_HEAD(&dev->qmi_req_pool);
+	INIT_LIST_HEAD(&dev->qmi_req_q);
+	INIT_LIST_HEAD(&dev->qmi_resp_pool);
+	INIT_LIST_HEAD(&dev->qmi_resp_q);
+	INIT_LIST_HEAD(&dev->rx_idle);
+	INIT_LIST_HEAD(&dev->rx_queue);
+	INIT_LIST_HEAD(&dev->tx_idle);
+
+	rmnet_hs_descriptors[0] =
+		(struct usb_descriptor_header *)&rmnet_interface_desc;
+	rmnet_hs_descriptors[1] =
+		(struct usb_descriptor_header *)&rmnet_hs_in_desc;
+	rmnet_hs_descriptors[2] =
+		(struct usb_descriptor_header *)&rmnet_hs_out_desc;
+	rmnet_hs_descriptors[3] =
+		(struct usb_descriptor_header *)&rmnet_hs_notify_desc;
+	rmnet_hs_descriptors[4] = NULL;
+
+	rmnet_fs_descriptors[0] =
+		(struct usb_descriptor_header *)&rmnet_interface_desc;
+	rmnet_fs_descriptors[1] =
+		(struct usb_descriptor_header *)&rmnet_fs_in_desc;
+	rmnet_fs_descriptors[2] =
+		(struct usb_descriptor_header *)&rmnet_fs_out_desc;
+	rmnet_fs_descriptors[3] =
+		(struct usb_descriptor_header *)&rmnet_fs_notify_desc;
+	rmnet_fs_descriptors[4] = NULL;
+
+	rmnet_function.hs_descriptors = rmnet_hs_descriptors;
+	rmnet_function.fs_descriptors = rmnet_fs_descriptors;
+	rmnet_function.context = dev;
+
+	ret = usb_function_register(&rmnet_function);
+	if (ret)
+		goto free_wq;
+
+	return 0;
+
+free_wq:
+	destroy_workqueue(dev->wq);
+free_dev:
+	kfree(dev);
+
+	return ret;
+}
+
+static void __exit rmnet_exit(void)
+{
+	usb_function_unregister(&rmnet_function);
+}
+
+module_init(rmnet_init);
+module_exit(rmnet_exit);
+MODULE_DESCRIPTION("RmNet usb function driver");
+MODULE_VERSION("1.0");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/function/serial.c b/drivers/usb/function/serial.c
new file mode 100644
index 0000000..0539351
--- /dev/null
+++ b/drivers/usb/function/serial.c
@@ -0,0 +1,2252 @@
+/*
+ * serial.c -- USB Serial Function driver
+ *
+ * Copyright 2003 (C) Al Borchers (alborchers@steinerpoint.com)
+ * Copyright (c) 2008-2009, Code Aurora Forum. All rights reserved.
+ *
+ * This code is based in part on the Gadget Zero driver, which
+ * is Copyright (C) 2003 by David Brownell, all rights reserved.
+ *
+ * This code also borrows from usbserial.c, which is
+ * Copyright (C) 1999 - 2002 Greg Kroah-Hartman (greg@kroah.com)
+ * Copyright (C) 2000 Peter Berger (pberger@brimson.com)
+ * Copyright (C) 2000 Al Borchers (alborchers@steinerpoint.com)
+ *
+ * All source code in this file is licensed under the following license except
+ * where indicated.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * See the GNU General Public License for more details.
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can find it at http://www.fsf.org
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/timer.h>
+#include <linux/list.h>
+#include <linux/interrupt.h>
+#include <linux/utsname.h>
+#include <linux/wait.h>
+#include <linux/serial.h>
+#include <linux/proc_fs.h>
+#include <linux/device.h>
+#include <linux/tty.h>
+#include <linux/tty_flip.h>
+
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/uaccess.h>
+#include <asm/byteorder.h>
+#include <asm/system.h>
+#include <asm/unaligned.h>
+
+#include <linux/usb/cdc.h>
+#include "usb_function.h"
+
+#include <linux/workqueue.h>
+/* Defines */
+
+#define GS_VERSION_STR			"v2.2"
+#define GS_VERSION_NUM			0x0202
+
+#define GS_LONG_NAME			"Serial Function"
+#define GS_SHORT_NAME			"serial"
+
+static int instances = 2;
+#define MAX_INSTANCES 2
+
+#define GS_MAJOR			127
+#define GS_MINOR_START			0
+
+#define GS_NUM_PORTS			16
+
+#define GS_NO_CONFIG_ID			0
+#define GS_ACM_CONFIG_ID		2
+
+#define GS_MAX_DESC_LEN			256
+
+/* defines for maintaining serial states */
+#define	MSR_CTS		(1 << 4)
+#define	MSR_DSR		(1 << 5)
+#define	MSR_RI		(1 << 6)
+#define	MSR_CD		(1 << 7)
+#define	MCR_DTR		(1 << 0)
+#define	MCR_RTS		(1 << 1)
+#define	MCR_LOOP	(1 << 4)
+
+/* USB CDC control line state defines */
+#define USB_CDC_SET_CONTROL_LINE_STATE_DTR 0x1
+#define USB_CDC_SET_CONTROL_LINE_STATE_RTS 0x2
+
+#define GS_DEFAULT_READ_Q_SIZE		16
+#define GS_DEFAULT_WRITE_Q_SIZE		16
+#define GS_DEFAULT_INT_REQ		1
+
+#define GS_DEFAULT_WRITE_BUF_SIZE	8192
+#define GS_TMP_BUF_SIZE			8192
+
+#define GS_CLOSE_TIMEOUT		15
+
+#define GS_DEFAULT_USE_ACM		0
+
+#define GS_DEFAULT_DTE_RATE		9600
+#define GS_DEFAULT_DATA_BITS		8
+#define GS_DEFAULT_PARITY		USB_CDC_NO_PARITY
+#define GS_DEFAULT_CHAR_FORMAT		USB_CDC_1_STOP_BITS
+
+/* #define GS_DEBUG */
+
+/* debug settings */
+#ifdef GS_DEBUG
+static int debug = 1;
+
+#define gs_debug(format, arg...) \
+	do { if (debug) printk(KERN_DEBUG format, ## arg); } while (0)
+#define gs_debug_level(level, format, arg...) \
+	do { if (debug >= level) printk(KERN_DEBUG format, ## arg); } while (0)
+
+#else
+
+#define gs_debug(format, arg...) \
+	do { } while (0)
+#define gs_debug_level(level, format, arg...) \
+	do { } while (0)
+
+#endif /* GS_DEBUG */
+
+#define GS_LOG2_NOTIFY_INTERVAL		5	/* 1 << 5 == 32 msec */
+#define GS_NOTIFY_MAXPACKET		8
+#define SERIAL_CONFIGURED        1
+#define SERIAL_UNCONFIGURED      0
+
+/* Structures */
+
+struct gs_dev;
+
+/* circular buffer */
+struct gs_buf {
+	unsigned int buf_size;
+	char *buf_buf;
+	char *buf_get;
+	char *buf_put;
+};
+
+/* list of requests */
+struct gs_req_entry {
+	struct list_head re_entry;
+	struct usb_request *re_req;
+};
+
+/* the port structure holds info for each port, one for each minor number */
+struct gs_port {
+	struct gs_dev *port_dev;	/* pointer to device struct */
+	struct tty_struct *port_tty;	/* pointer to tty struct */
+	spinlock_t port_lock;
+	struct mutex	mutex_lock;	/* protect open/close */
+	int port_num;
+	int port_open_count;
+	int port_in_use;	/* open/close in progress */
+	wait_queue_head_t port_write_wait;	/* waiting to write */
+	struct gs_buf *port_write_buf;
+	struct usb_cdc_line_coding port_line_coding;
+	struct list_head        read_pool;
+	struct list_head        read_queue;
+	struct list_head	write_pool;
+	unsigned                n_read;
+	unsigned int msr;
+	unsigned int prev_msr;
+	unsigned int mcr;
+	struct work_struct push_work;
+};
+
+/*-------------------------------------------------------------*/
+/*Allocate DMA buffer in non interrupt context(gs_bind)*/
+
+struct gs_reqbuf {
+	void *buf;
+};
+
+/*-------------------------------------------------------------*/
+
+/* the device structure holds info for the USB device */
+struct gs_dev {
+	/* lock for set/reset config */
+	spinlock_t dev_lock;
+	/* configuration number */
+	int dev_config;
+	/* address of notify endpoint */
+	struct usb_endpoint *dev_notify_ep;
+	/* address of in endpoint */
+	struct usb_endpoint *dev_in_ep;
+	struct usb_request *notify_req;
+	unsigned long notify_queued;
+	/* address of out endpoint */
+	struct usb_endpoint *dev_out_ep;
+	/* list of write requests */
+	struct list_head dev_req_list;
+	/* round robin port scheduled */
+	int dev_sched_port;
+	struct gs_port *dev_port[GS_NUM_PORTS];	/* the ports */
+	struct gs_reqbuf statusreqbuf;
+	u16 interface_num;
+
+	/*interface, endpoint descriptors*/
+	struct usb_interface_descriptor gs_ifc_desc;
+	struct usb_endpoint_descriptor gs_hs_bulkin_desc, gs_fs_bulkin_desc;
+	struct usb_endpoint_descriptor gs_hs_bulkout_desc, gs_fs_bulkout_desc;
+	struct usb_endpoint_descriptor gs_hs_notifyin_desc, gs_fs_notifyin_desc;
+	struct usb_descriptor_header **gs_fullspeed_header;
+	struct usb_descriptor_header **gs_highspeed_header;
+
+	struct usb_function *func;
+	int configured;
+	int bound;
+};
+
+/* Functions */
+
+/* module */
+static int __init gs_module_init(void);
+static void __exit gs_module_exit(void);
+
+static void send_notify_data(struct usb_endpoint *ep, struct usb_request *req);
+/* tty driver */
+static int gs_open(struct tty_struct *tty, struct file *file);
+static void gs_close(struct tty_struct *tty, struct file *file);
+static int gs_write(struct tty_struct *tty,
+		    const unsigned char *buf, int count);
+static int gs_put_char(struct tty_struct *tty, unsigned char ch);
+static void gs_flush_chars(struct tty_struct *tty);
+static int gs_write_room(struct tty_struct *tty);
+static int gs_chars_in_buffer(struct tty_struct *tty);
+static void gs_throttle(struct tty_struct *tty);
+static void gs_unthrottle(struct tty_struct *tty);
+static int gs_break(struct tty_struct *tty, int break_state);
+static int gs_ioctl(struct tty_struct *tty, struct file *file,
+		    unsigned int cmd, unsigned long arg);
+static void gs_set_termios(struct tty_struct *tty, struct ktermios *old);
+static unsigned gs_start_rx(struct gs_dev *dev);
+
+static int gs_send(struct gs_dev *dev);
+static int gs_send_packet(struct gs_dev *dev, char *packet, unsigned int size);
+static void gs_read_complete(struct usb_endpoint *ep, struct usb_request *req);
+static void gs_write_complete(struct usb_endpoint *ep, struct usb_request *req);
+static int gs_tiocmget(struct tty_struct *tty, struct file *file);
+static int gs_tiocmset(struct tty_struct *tty, struct file *file,
+			unsigned int set, unsigned int clear);
+
+/* Function driver */
+static void gs_bind(void *);
+static void gs_unbind(void *);
+static int gs_setup(struct usb_ctrlrequest *req,
+		void *buf, int len, void *_ctxt);
+
+static void gs_configure(int config, void *_ctxt);
+static void gs_disconnect(void *_ctxt);
+static void gs_reset_config(struct gs_dev *dev);
+
+static struct usb_request *gs_alloc_req(struct usb_endpoint *ep,
+					unsigned int len);
+static void gs_free_req(struct usb_endpoint *ep, struct usb_request *req);
+
+static int gs_alloc_ports(struct gs_dev *dev, gfp_t kmalloc_flags);
+static void gs_free_ports(struct gs_dev *dev);
+
+/* circular buffer */
+static struct gs_buf *gs_buf_alloc(unsigned int size, gfp_t kmalloc_flags);
+static void gs_buf_free(struct gs_buf *gb);
+static void gs_buf_clear(struct gs_buf *gb);
+static unsigned int gs_buf_data_avail(struct gs_buf *gb);
+static unsigned int gs_buf_space_avail(struct gs_buf *gb);
+static unsigned int gs_buf_put(struct gs_buf *gb, const char *buf,
+			       unsigned int count);
+static unsigned int gs_buf_get(struct gs_buf *gb, char *buf,
+			       unsigned int count);
+
+/* Globals */
+static struct gs_dev **gs_devices;
+
+static struct semaphore gs_open_close_sem[GS_NUM_PORTS];
+
+static unsigned int read_q_size = GS_DEFAULT_READ_Q_SIZE;
+static unsigned int write_q_size = GS_DEFAULT_WRITE_Q_SIZE;
+
+static unsigned int write_buf_size = GS_DEFAULT_WRITE_BUF_SIZE;
+
+static struct workqueue_struct *gs_tty_wq;
+
+
+/* tty driver struct */
+static const struct tty_operations gs_tty_ops = {
+	.open = gs_open,
+	.close = gs_close,
+	.write = gs_write,
+	.put_char = gs_put_char,
+	.flush_chars = gs_flush_chars,
+	.write_room = gs_write_room,
+	.ioctl = gs_ioctl,
+	.set_termios = gs_set_termios,
+	.throttle = gs_throttle,
+	.unthrottle = gs_unthrottle,
+	.break_ctl = gs_break,
+	.chars_in_buffer = gs_chars_in_buffer,
+	.tiocmget = gs_tiocmget,
+	.tiocmset = gs_tiocmset,
+};
+static struct tty_driver *gs_tty_driver;
+
+/* Function  driver struct */
+static struct usb_function usb_function_serial[2];
+
+struct usb_function *global_func_serial;
+struct gs_dev **dum_device;
+
+/* Module */
+MODULE_DESCRIPTION(GS_LONG_NAME);
+MODULE_AUTHOR("Al Borchers");
+MODULE_LICENSE("GPL");
+
+#ifdef GS_DEBUG
+module_param(debug, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(debug, "Enable debugging, 0=off, 1=on");
+#endif
+
+module_param(read_q_size, uint, S_IRUGO);
+MODULE_PARM_DESC(read_q_size, "Read request queue size, default=32");
+
+module_param(write_q_size, uint, S_IRUGO);
+MODULE_PARM_DESC(write_q_size, "Write request queue size, default=32");
+
+module_param(write_buf_size, uint, S_IRUGO);
+MODULE_PARM_DESC(write_buf_size, "Write buffer size, default=8192");
+
+module_param(instances, int, 0);
+MODULE_PARM_DESC(instances, "Number of serial instances");
+
+module_init(gs_module_init);
+module_exit(gs_module_exit);
+
+/******************************************************************************/
+
+/*
+ * CDC-ACM Class specific Descriptors
+ */
+
+static const struct usb_cdc_header_desc gs_header_desc = {
+	.bLength = sizeof(gs_header_desc),
+	.bDescriptorType = USB_DT_CS_INTERFACE,
+	.bDescriptorSubType = USB_CDC_HEADER_TYPE,
+	.bcdCDC = __constant_cpu_to_le16(0x0110),
+};
+
+static const struct usb_cdc_call_mgmt_descriptor gs_call_mgmt_descriptor = {
+	.bLength = sizeof(gs_call_mgmt_descriptor),
+	.bDescriptorType = USB_DT_CS_INTERFACE,
+	.bDescriptorSubType = USB_CDC_CALL_MANAGEMENT_TYPE,
+	.bmCapabilities = 0,
+	.bDataInterface = 0,
+};
+
+static struct usb_cdc_acm_descriptor gs_acm_descriptor = {
+	.bLength = sizeof(gs_acm_descriptor),
+	.bDescriptorType = USB_DT_CS_INTERFACE,
+	.bDescriptorSubType = USB_CDC_ACM_TYPE,
+	.bmCapabilities = 3,  /* bits should be 00000011 (refer to 5.2.3.3) */
+};
+
+static const struct usb_cdc_union_desc gs_union_desc = {
+	.bLength = sizeof(gs_union_desc),
+	.bDescriptorType = USB_DT_CS_INTERFACE,
+	.bDescriptorSubType = USB_CDC_UNION_TYPE,
+	.bMasterInterface0 = 0,
+	.bSlaveInterface0 = 0,
+};
+
+static void gs_init_ifc_desc(struct usb_interface_descriptor *ifc_desc)
+{
+	ifc_desc->bLength =		USB_DT_INTERFACE_SIZE;
+	ifc_desc->bDescriptorType =	USB_DT_INTERFACE;
+	ifc_desc->bNumEndpoints =	3;
+	ifc_desc->bInterfaceClass =	USB_CLASS_VENDOR_SPEC;
+	ifc_desc->bInterfaceSubClass =	USB_CLASS_VENDOR_SPEC;
+	ifc_desc->bInterfaceProtocol =	USB_CLASS_VENDOR_SPEC;
+	ifc_desc->iInterface =		0;
+}
+
+#define HIGHSPEED	1
+#define	FULLSPEED	2
+
+#define BULK	1
+#define INTERRUPT	2
+static void gs_init_ep_desc(struct usb_endpoint_descriptor *ep_desc,
+				unsigned type, unsigned speed)
+{
+	ep_desc->bLength =		USB_DT_ENDPOINT_SIZE;
+	ep_desc->bDescriptorType =	USB_DT_ENDPOINT;
+
+	if (type == BULK) {
+		ep_desc->bmAttributes = USB_ENDPOINT_XFER_BULK;
+		if (speed == HIGHSPEED)
+			ep_desc->wMaxPacketSize = 512;
+		else
+			ep_desc->wMaxPacketSize = 64;
+	} else {
+
+		ep_desc->bmAttributes = USB_ENDPOINT_XFER_INT;
+		ep_desc->wMaxPacketSize = 64;
+		ep_desc->bInterval = 4;
+	}
+}
+
+static void gs_init_header_desc(struct gs_dev *dev)
+{
+	dev->gs_highspeed_header[0] =
+		(struct usb_descriptor_header *)&dev->gs_ifc_desc;
+	dev->gs_highspeed_header[1] =
+		(struct usb_descriptor_header *)&dev->gs_hs_bulkin_desc;
+	dev->gs_highspeed_header[2] =
+		(struct usb_descriptor_header *)&dev->gs_hs_bulkout_desc;
+	dev->gs_highspeed_header[3] =
+		(struct usb_descriptor_header *)&dev->gs_hs_notifyin_desc;
+	dev->gs_highspeed_header[4] = NULL;
+
+	dev->gs_fullspeed_header[0] =
+		(struct usb_descriptor_header *)&dev->gs_ifc_desc;
+	dev->gs_fullspeed_header[1] =
+		(struct usb_descriptor_header *)&dev->gs_fs_bulkin_desc;
+	dev->gs_fullspeed_header[2] =
+		(struct usb_descriptor_header *)&dev->gs_fs_bulkout_desc;
+	dev->gs_fullspeed_header[3] =
+		(struct usb_descriptor_header *)&dev->gs_fs_notifyin_desc;
+	dev->gs_fullspeed_header[4] = NULL;
+}
+
+/*****************************************************************************/
+/*
+ *  gs_module_init
+ *
+ *  Register as a USB gadget driver and a tty driver.
+ */
+
+char *a[] = {"modem", "nmea"};
+
+static int __init gs_module_init(void)
+{
+	int i, retval;
+	struct usb_function *func;
+
+	if (instances > MAX_INSTANCES || instances == 0) {
+		printk(KERN_ERR "Incorrect instances entered \n");
+		return -ENODEV;
+	}
+
+	gs_tty_wq = create_singlethread_workqueue("gs_tty");
+	if (gs_tty_wq == 0)
+		return -ENOMEM;
+	gs_tty_driver = alloc_tty_driver(GS_NUM_PORTS);
+	if (!gs_tty_driver) {
+		destroy_workqueue(gs_tty_wq);
+		return -ENOMEM;
+	}
+	gs_tty_driver->owner = THIS_MODULE;
+	gs_tty_driver->driver_name = GS_SHORT_NAME;
+	gs_tty_driver->name = "ttyHSUSB";
+	gs_tty_driver->major = GS_MAJOR;
+	gs_tty_driver->minor_start = GS_MINOR_START;
+	gs_tty_driver->type = TTY_DRIVER_TYPE_SERIAL;
+	gs_tty_driver->subtype = SERIAL_TYPE_NORMAL;
+	gs_tty_driver->flags =  TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV
+				| TTY_DRIVER_RESET_TERMIOS;
+	gs_tty_driver->init_termios = tty_std_termios;
+	gs_tty_driver->init_termios.c_cflag = B9600 | CS8 | CREAD | HUPCL
+	    | CLOCAL;
+	tty_set_operations(gs_tty_driver, &gs_tty_ops);
+
+	for (i = 0; i < GS_NUM_PORTS; i++)
+		sema_init(&gs_open_close_sem[i], 1);
+
+	retval = tty_register_driver(gs_tty_driver);
+	if (retval) {
+		/*usb_function_unregister(&usb_func_serial); */
+		put_tty_driver(gs_tty_driver);
+		printk(KERN_ERR
+		       "gs_module_init: cannot register tty driver,ret = %d\n",
+		       retval);
+		return retval;
+	}
+	for (i = 0; i < MAX_INSTANCES; i++)
+		tty_register_device(gs_tty_driver, i, NULL);
+
+	gs_devices = kzalloc(sizeof(struct gs_dev *) * instances,
+				GFP_KERNEL);
+	if (!gs_devices)
+		return -ENOMEM;
+
+	for (i = 0; i < instances; i++) {
+		func = &usb_function_serial[i];
+
+		gs_devices[i] = kzalloc(sizeof(struct gs_dev), GFP_KERNEL);
+		if (!gs_devices[i])
+			return -ENOMEM;
+		spin_lock_init(&gs_devices[i]->dev_lock);
+		INIT_LIST_HEAD(&gs_devices[i]->dev_req_list);
+		gs_devices[i]->func = func;
+		/*1 - Interface, 3 Endpoints-> Total 4 + 1 for NULL*/
+		gs_devices[i]->gs_fullspeed_header =
+		kmalloc(sizeof(struct usb_descriptor_header *) * 5, GFP_KERNEL);
+		gs_devices[i]->gs_highspeed_header =
+		kmalloc(sizeof(struct usb_descriptor_header *) * 5, GFP_KERNEL);
+
+		gs_init_ifc_desc(&gs_devices[i]->gs_ifc_desc);
+		gs_init_ep_desc(&gs_devices[i]->gs_hs_bulkin_desc, BULK,
+				HIGHSPEED);
+		gs_init_ep_desc(&gs_devices[i]->gs_hs_bulkout_desc, BULK,
+				HIGHSPEED);
+		gs_init_ep_desc(&gs_devices[i]->gs_hs_notifyin_desc, INTERRUPT,
+				HIGHSPEED);
+
+		gs_init_ep_desc(&gs_devices[i]->gs_fs_bulkin_desc, BULK,
+				FULLSPEED);
+		gs_init_ep_desc(&gs_devices[i]->gs_fs_bulkout_desc, BULK,
+				FULLSPEED);
+		gs_init_ep_desc(&gs_devices[i]->gs_fs_notifyin_desc, INTERRUPT,
+				FULLSPEED);
+		gs_init_header_desc(gs_devices[i]);
+
+		/*Initializing Directions*/
+		gs_devices[i]->gs_hs_bulkin_desc.bEndpointAddress = USB_DIR_IN;
+		gs_devices[i]->gs_hs_bulkout_desc.bEndpointAddress =
+								USB_DIR_OUT;
+		gs_devices[i]->gs_hs_notifyin_desc.bEndpointAddress =
+								USB_DIR_IN;
+		gs_devices[i]->gs_fs_bulkin_desc.bEndpointAddress = USB_DIR_IN;
+		gs_devices[i]->gs_fs_bulkout_desc.bEndpointAddress =
+								USB_DIR_OUT;
+		gs_devices[i]->gs_fs_notifyin_desc.bEndpointAddress =
+								USB_DIR_IN;
+
+		func->bind = gs_bind;
+		func->unbind = gs_unbind;
+		func->configure = gs_configure;
+		func->disconnect = gs_disconnect;
+		func->setup = gs_setup;
+		func->name = a[i];
+		func->context = gs_devices[i];
+		func->fs_descriptors = gs_devices[i]->gs_fullspeed_header;
+		func->hs_descriptors = gs_devices[i]->gs_highspeed_header;
+
+		retval = usb_function_register(func);
+		if (retval) {
+			printk(KERN_ERR
+	      "gs_module_init: cannot register Function driver, ret = %d\n",
+			       retval);
+			return retval;
+		}
+	}
+
+	return 0;
+}
+
+/*
+* gs_module_exit
+*
+* Unregister as a tty driver and a USB gadget driver.
+*/
+static void __exit gs_module_exit(void)
+{
+	int i;
+	for (i = 0; i < instances; i++)
+		usb_function_unregister(&usb_function_serial[i]);
+
+	for (i = 0; i < instances; ++i) {
+		kfree(gs_devices[i]->gs_fullspeed_header);
+		kfree(gs_devices[i]->gs_highspeed_header);
+		kfree(gs_devices[i]);
+	}
+	for (i = 0; i < MAX_INSTANCES; i++)
+		tty_unregister_device(gs_tty_driver, i);
+	tty_unregister_driver(gs_tty_driver);
+	put_tty_driver(gs_tty_driver);
+	printk(KERN_INFO "gs_module_exit: %s %s unloaded\n", GS_LONG_NAME,
+	       GS_VERSION_STR);
+}
+
+/* TTY Driver */
+/*
+ * gs_open
+ */
+static int gs_open(struct tty_struct *tty, struct file *file)
+{
+	int port_num;
+	unsigned long flags;
+	struct gs_port *port;
+	struct gs_dev *dev;
+	struct gs_buf *buf;
+	struct semaphore *sem;
+	int ret;
+
+	port_num = tty->index;
+
+	gs_debug("gs_open: (%d,%p,%p)\n", port_num, tty, file);
+
+	if (port_num < 0 || port_num >= GS_NUM_PORTS) {
+		printk(KERN_ERR "gs_open: (%d,%p,%p) invalid port number\n",
+		       port_num, tty, file);
+		return -ENODEV;
+	}
+
+	dev = gs_devices[tty->index];
+
+	if (dev == NULL) {
+		printk(KERN_ERR "gs_open: (%d,%p,%p) NULL device pointer\n",
+		       port_num, tty, file);
+		return -ENODEV;
+	}
+
+	sem = &gs_open_close_sem[port_num];
+	if (down_interruptible(sem)) {
+		printk(KERN_ERR
+	       "gs_open: (%d,%p,%p) interrupted waiting for semaphore\n",
+		       port_num, tty, file);
+		return -ERESTARTSYS;
+	}
+
+	spin_lock_irqsave(&dev->dev_lock, flags);
+	port = dev->dev_port[0];
+
+	if (port == NULL) {
+		printk(KERN_ERR "gs_open: (%d,%p,%p) NULL port pointer\n",
+		       port_num, tty, file);
+		ret = -ENODEV;
+		goto exit_unlock_dev;
+	}
+
+	spin_unlock_irqrestore(&dev->dev_lock, flags);
+
+	spin_lock_irqsave(&port->port_lock, flags);
+
+	if (port->port_dev == NULL) {
+		printk(KERN_ERR "gs_open: (%d,%p,%p) port disconnected (1)\n",
+		       port_num, tty, file);
+		ret = -EIO;
+		goto exit_unlock_port;
+	}
+
+	if (port->port_open_count > 0) {
+		++port->port_open_count;
+		gs_debug("gs_open: (%d,%p,%p) already open\n",
+			 port_num, tty, file);
+		ret = 0;
+		goto exit_unlock_port;
+	}
+
+	tty->driver_data = NULL;
+
+	/* mark port as in use, we can drop port lock and sleep if necessary */
+	port->port_in_use = 1;
+
+	/* allocate write buffer on first open */
+	if (port->port_write_buf == NULL) {
+		spin_unlock_irqrestore(&port->port_lock, flags);
+		buf = gs_buf_alloc(write_buf_size, GFP_KERNEL);
+		spin_lock_irqsave(&port->port_lock, flags);
+
+		/* might have been disconnected while asleep, check */
+		if (port->port_dev == NULL) {
+			printk(KERN_ERR
+			       "gs_open: (%d,%p,%p) port disconnected (2)\n",
+			       port_num, tty, file);
+			port->port_in_use = 0;
+			ret = -EIO;
+			goto exit_unlock_port;
+		}
+
+		port->port_write_buf = buf;
+		if (port->port_write_buf == NULL) {
+			printk(KERN_ERR
+	       "gs_open: (%d,%p,%p) cannot allocate port write buffer\n",
+			       port_num, tty, file);
+			port->port_in_use = 0;
+			ret = -ENOMEM;
+			goto exit_unlock_port;
+		}
+
+	}
+
+	/* wait for carrier detect (not implemented) */
+
+	/* might have been disconnected while asleep, check */
+	if (port->port_dev == NULL) {
+		printk(KERN_ERR "gs_open: (%d,%p,%p) port disconnected (3)\n",
+		       port_num, tty, file);
+		port->port_in_use = 0;
+		ret = -EIO;
+		goto exit_unlock_port;
+	}
+
+	tty->driver_data = port;
+	port->port_tty = tty;
+	port->port_tty->low_latency = 1;
+	port->port_open_count = 1;
+	port->port_in_use = 0;
+
+	gs_debug("gs_open: (%d,%p,%p) completed\n", port_num, tty, file);
+	/* Queue RX requests */
+	port->n_read = 0;
+	gs_start_rx(dev);
+
+	ret = 0;
+
+exit_unlock_port:
+	spin_unlock_irqrestore(&port->port_lock, flags);
+	up(sem);
+	return ret;
+
+exit_unlock_dev:
+	spin_unlock_irqrestore(&dev->dev_lock, flags);
+	up(sem);
+	return ret;
+
+}
+
+/*
+ * gs_close
+ */
+
+#define GS_WRITE_FINISHED_EVENT_SAFELY(p)			\
+({								\
+	int cond;						\
+								\
+	spin_lock_irq(&(p)->port_lock);				\
+	cond = !(p)->port_dev || !gs_buf_data_avail((p)->port_write_buf); \
+	spin_unlock_irq(&(p)->port_lock);			\
+	cond;							\
+})
+
+static void gs_close(struct tty_struct *tty, struct file *file)
+{
+	struct gs_port *port = tty->driver_data;
+	struct semaphore *sem;
+
+	if (port == NULL) {
+		printk(KERN_ERR "gs_close: NULL port pointer\n");
+		return;
+	}
+
+	gs_debug("gs_close: (%d,%p,%p)\n", port->port_num, tty, file);
+
+	sem = &gs_open_close_sem[port->port_num];
+	down(sem);
+
+	spin_lock_irq(&port->port_lock);
+
+	if (port->port_open_count == 0) {
+		printk(KERN_ERR
+		       "gs_close: (%d,%p,%p) port is already closed\n",
+		       port->port_num, tty, file);
+		goto exit;
+	}
+
+	if (port->port_open_count > 1) {
+		--port->port_open_count;
+		goto exit;
+	}
+
+	/* free disconnected port on final close */
+	if (port->port_dev == NULL)
+		goto exit;
+
+
+	/* mark port as closed but in use, we can drop port lock */
+	/* and sleep if necessary */
+	port->port_in_use = 1;
+	port->port_open_count = 0;
+
+	/* wait for write buffer to drain, or */
+	/* at most GS_CLOSE_TIMEOUT seconds */
+	if (gs_buf_data_avail(port->port_write_buf) > 0) {
+		spin_unlock_irq(&port->port_lock);
+		wait_event_interruptible_timeout(port->port_write_wait,
+						 GS_WRITE_FINISHED_EVENT_SAFELY
+						 (port), GS_CLOSE_TIMEOUT * HZ);
+		spin_lock_irq(&port->port_lock);
+	}
+
+	/* free disconnected port on final close */
+	/* (might have happened during the above sleep) */
+	if (port->port_dev == NULL)
+		goto exit;
+
+
+	gs_buf_clear(port->port_write_buf);
+
+	/* Flush bulk-out pipe */
+	usb_ept_fifo_flush(port->port_dev->dev_out_ep);
+	tty->driver_data = NULL;
+	port->port_tty = NULL;
+	port->port_in_use = 0;
+
+	gs_debug("gs_close: (%d,%p,%p) completed\n", port->port_num, tty, file);
+
+exit:
+	spin_unlock_irq(&port->port_lock);
+	up(sem);
+	if (port->port_dev == NULL)
+		kfree(port);
+}
+
+/*
+ * gs_write
+ */
+static int gs_write(struct tty_struct *tty, const unsigned char *buf, int count)
+{
+	unsigned long flags;
+	struct gs_port *port = tty->driver_data;
+	int ret;
+
+	if (port == NULL) {
+		printk(KERN_ERR "gs_write: NULL port pointer\n");
+		return -EIO;
+	}
+
+	gs_debug("gs_write: (%d,%p) writing %d bytes\n", port->port_num, tty,
+		 count);
+
+	if (count == 0)
+		return 0;
+
+	spin_lock_irqsave(&port->port_lock, flags);
+
+	if (port->port_dev == NULL) {
+		printk(KERN_ERR "gs_write: (%d,%p) port is not connected\n",
+		       port->port_num, tty);
+		ret = -EIO;
+		goto exit;
+	}
+
+	if (port->port_open_count == 0) {
+		printk(KERN_ERR "gs_write: (%d,%p) port is closed\n",
+		       port->port_num, tty);
+		ret = -EBADF;
+		goto exit;
+	}
+
+	count = gs_buf_put(port->port_write_buf, buf, count);
+
+
+	if (port->port_dev->dev_config)
+		gs_send(gs_devices[tty->index]);
+	spin_unlock_irqrestore(&port->port_lock, flags);
+
+	gs_debug("gs_write: (%d,%p) wrote %d bytes\n", port->port_num, tty,
+		 count);
+
+	return count;
+
+exit:
+	spin_unlock_irqrestore(&port->port_lock, flags);
+	return ret;
+}
+
+/*
+ * gs_put_char
+ */
+static int gs_put_char(struct tty_struct *tty, unsigned char ch)
+{
+	unsigned long flags;
+	int ret = 0;
+	struct gs_port *port = tty->driver_data;
+
+	if (port == NULL) {
+		printk(KERN_ERR "gs_put_char: NULL port pointer\n");
+		goto out;
+	}
+
+	gs_debug("gs_put_char: (%d,%p) char=0x%x, called from %p, %p, %p\n",
+		 port->port_num, tty, ch, __builtin_return_address(0),
+		 __builtin_return_address(1), __builtin_return_address(2));
+
+	spin_lock_irqsave(&port->port_lock, flags);
+
+	if (port->port_dev == NULL) {
+		printk(KERN_ERR "gs_put_char: (%d,%p) port is not connected\n",
+		       port->port_num, tty);
+		goto exit_unlock;
+	}
+
+	if (port->port_open_count == 0) {
+		printk(KERN_ERR "gs_put_char: (%d,%p) port is closed\n",
+		       port->port_num, tty);
+		goto exit_unlock;
+	}
+
+	ret = gs_buf_put(port->port_write_buf, &ch, 1);
+
+exit_unlock:
+	spin_unlock_irqrestore(&port->port_lock, flags);
+out:
+	return ret;
+}
+
+/*
+ * gs_flush_chars
+ */
+static void gs_flush_chars(struct tty_struct *tty)
+{
+	unsigned long flags;
+	struct gs_port *port = tty->driver_data;
+
+	if (port == NULL) {
+		printk(KERN_ERR "gs_flush_chars: NULL port pointer\n");
+		return;
+	}
+
+	gs_debug("gs_flush_chars: (%d,%p)\n", port->port_num, tty);
+
+	spin_lock_irqsave(&port->port_lock, flags);
+
+	if (port->port_dev == NULL) {
+		printk(KERN_ERR
+		       "gs_flush_chars: (%d,%p) port is not connected\n",
+		       port->port_num, tty);
+		goto exit;
+	}
+
+	if (port->port_open_count == 0) {
+		printk(KERN_ERR "gs_flush_chars: (%d,%p) port is closed\n",
+		       port->port_num, tty);
+		goto exit;
+	}
+
+	if (port->port_dev->dev_config)
+		gs_send(gs_devices[tty->index]);
+	spin_unlock_irqrestore(&port->port_lock, flags);
+
+
+	return;
+
+exit:
+	spin_unlock_irqrestore(&port->port_lock, flags);
+}
+
+/*
+ * gs_write_room
+ */
+static int gs_write_room(struct tty_struct *tty)
+{
+
+	int room = 0;
+	unsigned long flags;
+	struct gs_port *port = tty->driver_data;
+
+	if (port == NULL)
+		return 0;
+
+	spin_lock_irqsave(&port->port_lock, flags);
+
+	if (port->port_dev != NULL && port->port_open_count > 0
+	    && port->port_write_buf != NULL)
+		room = gs_buf_space_avail(port->port_write_buf);
+
+	spin_unlock_irqrestore(&port->port_lock, flags);
+
+	gs_debug("gs_write_room: (%d,%p) room=%d\n", port->port_num, tty, room);
+
+	return room;
+}
+
+/*
+ * gs_chars_in_buffer
+ */
+static int gs_chars_in_buffer(struct tty_struct *tty)
+{
+	int chars = 0;
+	unsigned long flags;
+	struct gs_port *port = tty->driver_data;
+
+	if (port == NULL)
+		return 0;
+
+	spin_lock_irqsave(&port->port_lock, flags);
+
+	if (port->port_dev != NULL && port->port_open_count > 0
+	    && port->port_write_buf != NULL)
+		chars = gs_buf_data_avail(port->port_write_buf);
+
+	spin_unlock_irqrestore(&port->port_lock, flags);
+
+	gs_debug("gs_chars_in_buffer: (%d,%p) chars=%d\n",
+		 port->port_num, tty, chars);
+
+	return chars;
+}
+
+/*
+ * gs_throttle
+ */
+static void gs_throttle(struct tty_struct *tty)
+{
+}
+
+/*
+ * gs_unthrottle
+ */
+static void gs_unthrottle(struct tty_struct *tty)
+{
+	struct gs_port		*port = tty->driver_data;
+	unsigned long		flags;
+
+	spin_lock_irqsave(&port->port_lock, flags);
+	queue_work(gs_tty_wq, &port->push_work);
+	spin_unlock_irqrestore(&port->port_lock, flags);
+}
+
+/*
+ * gs_break
+ */
+static int gs_break(struct tty_struct *tty, int break_state)
+{
+	return 0;
+}
+
+/*
+ * gs_ioctl
+ */
+static int gs_ioctl(struct tty_struct *tty, struct file *file,
+		    unsigned int cmd, unsigned long arg)
+{
+	/* could not handle ioctl */
+	return -ENOIOCTLCMD;
+}
+
+/*
+ * gs_set_termios
+ */
+static void gs_set_termios(struct tty_struct *tty, struct ktermios *old)
+{
+}
+
+/*
+* gs_send
+*
+* This function finds available write requests, calls
+* gs_send_packet to fill these packets with data, and
+* continues until either there are no more write requests
+* available or no more data to send.  This function is
+* run whenever data arrives or write requests are available.
+*/
+static int gs_send(struct gs_dev *dev)
+{
+	struct gs_port *port = dev->dev_port[0];
+	struct list_head *pool = &port->write_pool;
+	int status = 0;
+	static long prev_len;
+	bool do_tty_wake = false;
+	struct usb_endpoint *ep = dev->dev_in_ep;
+
+	while (!list_empty(pool)) {
+		struct usb_request *req;
+		int len;
+		req = list_entry(pool->next, struct usb_request, list);
+		len = gs_send_packet(dev, req->buf, usb_ept_get_max_packet(ep));
+		if (len == 0) {
+			/* Queue zero length packet */
+			if (prev_len == usb_ept_get_max_packet(ep)) {
+				req->length = 0;
+				list_del(&req->list);
+
+				spin_unlock(&port->port_lock);
+				status = usb_ept_queue_xfer(ep, req);
+				spin_lock(&port->port_lock);
+				if (status) {
+					printk(KERN_ERR "%s: %s err %d\n",
+					__func__, "queue", status);
+					list_add(&req->list, pool);
+				}
+				prev_len = 0;
+			}
+			wake_up_interruptible(&port->port_write_wait);
+			break;
+		}
+		do_tty_wake = true;
+
+		req->length = len;
+		list_del(&req->list);
+
+		/* Drop lock while we call out of driver; completions
+		 * could be issued while we do so.  Disconnection may
+		 * happen too; maybe immediately before we queue this!
+		 * NOTE that we may keep sending data for a while after
+		 * the TTY closed (dev->ioport->port_tty is NULL).
+		 */
+		spin_unlock(&port->port_lock);
+		status = usb_ept_queue_xfer(ep, req);
+		spin_lock(&port->port_lock);
+
+		if (status) {
+			printk(KERN_ERR "%s: %s err %d\n",
+					__func__, "queue", status);
+			list_add(&req->list, pool);
+			break;
+		}
+		prev_len = req->length;
+
+	}
+
+	if (do_tty_wake && port->port_tty)
+		tty_wakeup(port->port_tty);
+	return status;
+
+}
+
+/*
+ * gs_send_packet
+ *
+ * If there is data to send, a packet is built in the given
+ * buffer and the size is returned.  If there is no data to
+ * send, 0 is returned.  If there is any error a negative
+ * error number is returned.
+ *
+ * Called during USB completion routine, on interrupt time.
+ *
+ * We assume that disconnect will not happen until all completion
+ * routines have completed, so we can assume that the dev_port
+ * array does not change during the lifetime of this function.
+ */
+static int gs_send_packet(struct gs_dev *dev, char *packet, unsigned int size)
+{
+	unsigned int len;
+	struct gs_port *port;
+
+	if (dev == NULL) {
+		printk(KERN_ERR "gs_recv_packet:NULL device pointer\n");
+		return -EIO;
+	}
+
+	/* TEMPORARY -- only port 0 is supported right now */
+	port = dev->dev_port[0];
+	if (port == NULL) {
+		printk(KERN_ERR
+		       "gs_send_packet: port=%d, NULL port pointer\n", 0);
+		return -EIO;
+	}
+
+
+	len = gs_buf_data_avail(port->port_write_buf);
+	if (len < size)
+		size = len;
+	if (size != 0)
+		size = gs_buf_get(port->port_write_buf, packet, size);
+
+
+
+	if (port->port_tty)
+		tty_wakeup(port->port_tty);
+
+	return size;
+}
+
+static void gs_rx_push(struct work_struct *work)
+{
+	struct gs_port *port = container_of(work,
+					struct gs_port,
+					push_work);
+	struct tty_struct *tty;
+	struct list_head *queue = &port->read_queue;
+	bool do_push = false;
+	struct gs_dev *dev = port->port_dev;
+
+	/* hand any queued data to the tty */
+	spin_lock_irq(&port->port_lock);
+	tty = port->port_tty;
+	while (!list_empty(queue)) {
+		struct usb_request	*req;
+
+		req = list_first_entry(queue, struct usb_request, list);
+
+		/* discard data if tty was closed */
+		if (!tty)
+			goto recycle;
+
+		if (req->actual) {
+			char		*packet = req->buf;
+			unsigned	size = req->actual;
+			unsigned	n;
+			int		count;
+			/* we may have pushed part of this packet already... */
+			n = port->n_read;
+			if (n) {
+				packet += n;
+				size -= n;
+			}
+			/*printk(KERN_INFO "tty_push:%d\n",size);*/
+			count = tty_insert_flip_string(tty, packet, size);
+			if (count == 0)
+				printk(KERN_INFO "%s: tty buffer is full: throttle\n",
+							__func__);
+			if (count)
+				do_push = true;
+			if (count != size) {
+				/* stop pushing; TTY layer can't handle more */
+				port->n_read += count;
+				break;
+			}
+			port->n_read = 0;
+		}
+recycle:
+		list_move(&req->list, &port->read_pool);
+	}
+	if (tty && do_push) {
+		spin_unlock_irq(&port->port_lock);
+		tty_flip_buffer_push(tty);
+		wake_up_interruptible(&tty->read_wait);
+		spin_lock_irq(&port->port_lock);
+		/* tty may have been closed */
+		tty = port->port_tty;
+	}
+	if (!list_empty(queue) && tty) {
+		if (!test_bit(TTY_THROTTLED, &tty->flags)) {
+			if (do_push)
+				queue_work(gs_tty_wq, &port->push_work);
+		}
+	}
+	gs_start_rx(dev);
+	spin_unlock_irq(&port->port_lock);
+}
+
+/*
+* gs_read_complete
+*/
+static void gs_read_complete(struct usb_endpoint *ep, struct usb_request *req)
+{
+	/* used global variable */
+	struct gs_dev *dev = (struct gs_dev *)req->device;
+	struct gs_port *port;
+	struct tty_struct *tty;
+
+	if (dev == NULL) {
+		printk(KERN_ERR "gs_read_complete: NULL device pointer\n");
+		return;
+	}
+
+	port = dev->dev_port[0];
+	tty = port->port_tty;
+	switch (req->status) {
+	case 0:
+		spin_lock(&port->port_lock);
+		list_add_tail(&req->list, &port->read_queue);
+		if (!test_bit(TTY_THROTTLED, &tty->flags))
+			queue_work(gs_tty_wq, &port->push_work);
+		spin_unlock(&port->port_lock);
+		break;
+
+	case -ESHUTDOWN:
+		/* disconnect */
+		gs_debug("gs_read_complete: shutdown\n");
+		gs_free_req(ep, req);
+		break;
+
+	case -ENODEV:
+		list_add_tail(&req->list, &port->read_pool);
+		/* Implemented handling in future if needed */
+		break;
+	default:
+		list_add_tail(&req->list, &port->read_pool);
+		printk(KERN_ERR
+		"gs_read_complete: unexpected status error, status=%d\n",
+			req->status);
+		/* goto requeue; */
+		break;
+	}
+}
+
+/*
+* gs_write_complete
+*/
+static void gs_write_complete(struct usb_endpoint *ep, struct usb_request *req)
+{
+	struct gs_dev *dev = (struct gs_dev *)req->device;
+	struct gs_port	*port = dev->dev_port[0];
+	unsigned long flags;
+
+	if (dev == NULL) {
+		printk(KERN_ERR "gs_write_complete: NULL device pointer\n");
+		return;
+	}
+	spin_lock_irqsave(&port->port_lock, flags);
+	list_add(&req->list, &port->write_pool);
+
+	switch (req->status) {
+	default:
+		/* presumably a transient fault */
+		printk(KERN_ERR "%s: unexpected status %d\n",
+				__func__, req->status);
+		/* FALL THROUGH */
+	case 0:
+		/* normal completion */
+
+		if ((req->length == 0) &&
+			(gs_buf_data_avail(port->port_write_buf) == 0)) {
+			break;
+		}
+		if (dev->dev_config)
+			gs_send(dev);
+
+		break;
+
+	case -ESHUTDOWN:
+		/* disconnect */
+		printk(KERN_DEBUG "%s: shutdown\n", __func__);
+		break;
+	}
+	spin_unlock_irqrestore(&port->port_lock, flags);
+}
+
+/* Send Notification to host if Status changes */
+static void send_notify_data(struct usb_endpoint *ep, struct usb_request *req)
+{
+	struct gs_dev *dev = (struct gs_dev *)req->device;
+	struct usb_cdc_notification *notify;
+	struct gs_port *port;
+	unsigned int msr, ret;
+	__le16 *data;
+
+	if (dev == NULL) {
+		printk(KERN_ERR "send_notify_data: NULL device pointer\n");
+		return;
+	}
+
+	port = dev->dev_port[0];
+
+	if (port == NULL) {
+		printk(KERN_ERR"send_notify_data:port is NULL\n");
+		return;
+	}
+
+	if (test_bit(0, &dev->notify_queued))
+		usb_ept_cancel_xfer(dev->dev_notify_ep,
+		dev->notify_req);
+	notify = req->buf;
+	msr = port->msr;
+	notify->bmRequestType  = 0xA1;
+	notify->bNotificationType  = USB_CDC_NOTIFY_SERIAL_STATE;
+	notify->wValue  = __constant_cpu_to_le16(0);
+	notify->wIndex  = __constant_cpu_to_le16(dev->interface_num);
+	notify->wLength  = __constant_cpu_to_le16(2);
+	data = req->buf + sizeof *notify;
+	data[0] = __constant_cpu_to_le16(((msr & MSR_CD) ? 1 : 0)
+			| ((msr & MSR_DSR) ? (1<<1) : (0<<1))
+			| ((msr & MSR_RI) ? (1<<3) : (0<<3)));
+
+	set_bit(0, &dev->notify_queued);
+	ret = usb_ept_queue_xfer(ep, req);
+	if (ret) {
+		clear_bit(0, &dev->notify_queued);
+		printk(KERN_ERR
+		"send_notify_data: cannot queue status request,ret = %d\n",
+			       ret);
+	}
+}
+
+/* Free request if -ESHUTDOWN */
+static void gs_status_complete(struct usb_endpoint *ep,
+				struct usb_request *req)
+{
+	struct gs_dev *dev = (struct gs_dev *)req->device;
+	struct gs_port *port;
+
+	if (dev == NULL) {
+		printk(KERN_ERR"gs_status_complete : NULL device pointer\n");
+		return;
+	}
+
+	port = dev->dev_port[0];
+
+	if (port == NULL) {
+		printk(KERN_ERR "gs_status_complete: NULL port pointer\n");
+		return;
+	}
+
+	clear_bit(0, &dev->notify_queued);
+	switch (req->status) {
+	case 0:
+
+		gs_debug("%s:port->msr=%x,dev=%p,ep=%p,req=%p", __func__,
+			port->msr, dev, dev->dev_notify_ep, dev->notify_req);
+		/* executed only if data missed because of
+		** request already in queue and user modifies using tiocmset */
+		if (port->prev_msr != port->msr) {
+			send_notify_data(dev->dev_notify_ep, dev->notify_req);
+			port->prev_msr = port->msr;
+		}
+		break;
+
+	case -ESHUTDOWN:
+		/* disconnect */
+		gs_debug("gs_status_complete: shutdown\n");
+		gs_free_req(ep, req);
+		break;
+
+	default:
+		printk(KERN_ERR
+	       "gs_status_complete: unexpected status error, status=%d\n",
+		       req->status);
+		break;
+	}
+}
+
+/* Function Driver */
+/*
+ * gs_bind
+ *
+ * Called on module load.  Allocates and initializes the device
+ * structure and a control request.
+ */
+static void gs_bind(void *_ctxt)
+{
+	struct usb_endpoint *ep;
+	struct gs_dev *dev = _ctxt;
+	struct usb_function *func = dev->func;
+	int i = 0;
+	int ret;
+
+	if (func == NULL) {
+		pr_err("%s: NULL function pointer\n", __func__);
+		return;
+	}
+
+	ret = gs_alloc_ports(dev, GFP_KERNEL);
+	if (ret != 0) {
+		pr_err("%s: cannot allocate ports\n", __func__);
+		gs_unbind(_ctxt);
+		return;
+	}
+
+	ret = usb_msm_get_next_ifc_number(func);
+	dev->gs_ifc_desc.bInterfaceNumber = ret;
+	dev->gs_ifc_desc.iInterface = 0;
+
+	/*Configuring IN Endpoint*/
+	ep = dev->dev_in_ep = usb_alloc_endpoint(USB_DIR_IN);
+	if (!ep) {
+		pr_err("%s: in endpoint allocation failed\n", __func__);
+		return;
+	}
+	dev->gs_hs_bulkin_desc.bEndpointAddress = USB_DIR_IN | ep->num;
+	dev->gs_fs_bulkin_desc.bEndpointAddress = USB_DIR_IN | ep->num;
+	pr_debug("%s: bulk_in_endpoint Number = %d\n",
+						__func__, ep->num);
+
+	/*Configuring OUT endpoint*/
+	ep = dev->dev_out_ep = usb_alloc_endpoint(USB_DIR_OUT);
+	if (!ep) {
+		pr_err("out endpoint allocation failed\n");
+		return;
+	}
+	dev->gs_hs_bulkout_desc.bEndpointAddress = USB_DIR_OUT | ep->num;
+	dev->gs_fs_bulkout_desc.bEndpointAddress = USB_DIR_OUT | ep->num;
+	pr_debug("%s: bulk_out_endpoint Number = %d\n",
+						__func__, ep->num);
+
+	/*Configuring NOTIFY endpoint*/
+	ep = dev->dev_notify_ep = usb_alloc_endpoint(USB_DIR_IN);
+	if (!ep) {
+		pr_err("notify endpoint allocation failed\n");
+		return;
+	}
+	dev->gs_hs_notifyin_desc.bEndpointAddress = USB_DIR_IN | ep->num;
+	dev->gs_fs_notifyin_desc.bEndpointAddress = USB_DIR_IN | ep->num;
+	pr_debug("%s: notify_in_endpoint Number = %d\n",
+						__func__, ep->num);
+
+
+
+	for (i = 0; i < GS_DEFAULT_INT_REQ; ++i) {
+		struct gs_reqbuf *bh = &dev->statusreqbuf;
+		bh->buf = kmalloc(64, GFP_KERNEL);
+		if (!bh->buf)
+			return;
+	}
+
+	dev->bound = 1;
+	return;
+}
+
+/*
+ * gs_unbind
+ *
+ * Called on module unload.  Frees the control request and device
+ * structure.
+ */
+static void /* __init_or_exit */ gs_unbind(void *_ctxt)
+{
+	struct gs_dev *dev = _ctxt;
+
+	if (!dev) {
+		pr_err("%s: error: null device\n", __func__);
+		return;
+	}
+	if (!dev->bound)
+		return;
+
+	kfree(dev->statusreqbuf.buf);
+
+	if (dev->dev_in_ep) {
+		usb_ept_fifo_flush(dev->dev_in_ep);
+		usb_ept_enable(dev->dev_in_ep,  0);
+		usb_free_endpoint(dev->dev_in_ep);
+	}
+	if (dev->dev_out_ep) {
+		usb_ept_fifo_flush(dev->dev_out_ep);
+		usb_ept_enable(dev->dev_out_ep,  0);
+		usb_free_endpoint(dev->dev_out_ep);
+	}
+	if (dev->dev_notify_ep) {
+		usb_ept_fifo_flush(dev->dev_notify_ep);
+		usb_ept_enable(dev->dev_notify_ep,  0);
+		usb_free_endpoint(dev->dev_notify_ep);
+	}
+
+	gs_free_ports(dev);
+	dev->bound = 0;
+	pr_debug("%s: %s %s\n", __func__, GS_LONG_NAME, GS_VERSION_STR);
+}
+
+static void gser_complete_set_line_coding(struct usb_endpoint *ep,
+		struct usb_request *req)
+{
+	struct gs_dev *dev = (struct gs_dev *)req->device;
+	struct gs_port *port;
+	struct usb_cdc_line_coding *value;
+	struct usb_request *in_req;
+
+	port = dev->dev_port[0];
+	if (!(dev && dev->dev_port[0])) {
+		printk(KERN_ERR "%s(): dev or dev_port is null\n", __func__);
+		usb_ept_set_halt(dev->func->ep0_in);
+		return;
+	}
+	if (req->actual != sizeof(port->port_line_coding)) {
+		printk(KERN_ERR "%s(): received wrong data\n", __func__);
+		usb_ept_set_halt(dev->func->ep0_in);
+		return;
+	}
+
+	port = dev->dev_port[0];
+
+	/* Use Host assigned port_line setting */
+	value = req->buf;
+	port->port_line_coding = *value;
+
+	/* Send ACK on EP0 IN */
+	in_req = dev->func->ep0_in_req;
+	in_req->length = 0;
+	in_req->complete = 0;
+	usb_ept_queue_xfer(dev->func->ep0_in, in_req);
+}
+
+static int gs_setup(struct usb_ctrlrequest *ctrl,
+		void *buf, int len, void *_ctxt)
+{
+	int ret = -EOPNOTSUPP;
+	struct gs_dev *dev = _ctxt;
+	struct gs_port *port;/* ACM only has one port */
+	u16 wIndex = le16_to_cpu(ctrl->wIndex);
+	u16 wValue = le16_to_cpu(ctrl->wValue);
+	u16 wLength = le16_to_cpu(ctrl->wLength);
+
+	if (dev == NULL) {
+		printk(KERN_ERR"gs_setup:device pointer NULL\n");
+		return 0;
+	}
+	port = dev->dev_port[0];
+
+	if (port == NULL) {
+		printk(KERN_ERR"gs_setup: port pointer is NULL\n");
+		return 0;
+	}
+	switch (ctrl->bRequest) {
+
+	case USB_CDC_REQ_SET_LINE_CODING:
+		if (port) {
+			struct usb_request *req = dev->func->ep0_out_req;
+			ret = min(wLength,
+				(u16) sizeof(struct usb_cdc_line_coding));
+			if (ret != sizeof(struct usb_cdc_line_coding))
+				ret = -EOPNOTSUPP;
+			else {
+				req->device = dev;
+				req->complete = gser_complete_set_line_coding;
+				}
+		} else
+			ret = -ENODEV;
+		break;
+
+	case USB_CDC_REQ_GET_LINE_CODING:
+		port = dev->dev_port[0];/* ACM only has one port */
+		ret = min(wLength, (u16) sizeof(struct usb_cdc_line_coding));
+		if (port) {
+			spin_lock(&port->port_lock);
+			memcpy(buf, &port->port_line_coding, ret);
+			spin_unlock(&port->port_lock);
+		}
+		break;
+	case USB_CDC_REQ_SET_CONTROL_LINE_STATE:
+		port = dev->dev_port[0];/* ACM only has one port */
+		if (wValue & USB_CDC_SET_CONTROL_LINE_STATE_DTR) {
+			port->mcr |= MCR_DTR;
+		} else	{
+			port->mcr &= ~MCR_DTR;
+		}
+		if (wValue & USB_CDC_SET_CONTROL_LINE_STATE_RTS)
+			port->mcr |= MCR_RTS;
+		else
+			port->mcr &= ~MCR_RTS;
+
+		dev->interface_num = wIndex;
+		ret = 0;
+		break;
+
+	default:
+		break;
+	}
+
+	return ret;
+}
+
+static void gs_disconnect(void *_ctxt)
+{
+	struct gs_dev *dev = _ctxt;
+	struct gs_port *port = dev->dev_port[0];
+	unsigned long flags;
+
+	/* tell the TTY glue not to do I/O here any more */
+	spin_lock_irqsave(&port->port_lock, flags);
+	dev->dev_config = 0;
+	if (port->port_open_count > 0 || port->port_in_use) {
+		wake_up_interruptible(&port->port_write_wait);
+		if (port->port_tty) {
+			wake_up_interruptible(&port->port_tty->read_wait);
+			wake_up_interruptible(&port->port_tty->write_wait);
+			tty_hangup(port->port_tty);
+		}
+	}
+	port->mcr = 0;
+	port->msr = 0;
+	spin_unlock_irqrestore(&port->port_lock, flags);
+
+}
+/*
+ * gs_configure
+ *
+ * Configures the device by enabling device specific
+ * optimizations, setting up the endpoints, allocating
+ * read and write requests and queuing read requests.
+ *
+ * The device lock must be held when calling this function.
+ */
+static void gs_configure(int config, void *_ctxt)
+{
+	int i, ret = 0;
+	unsigned MaxPacketSize;
+	struct gs_dev *dev = _ctxt;
+	struct usb_endpoint *ep;
+	struct usb_request *req;
+	struct gs_port *port;
+	struct list_head *rhead;
+	struct list_head *whead;
+	unsigned started = 0;
+
+	if (dev == NULL) {
+		printk(KERN_ERR "gs_configure: NULL device pointer\n");
+		return;
+	}
+	if (!dev->bound)
+		return;
+
+	port = dev->dev_port[0];
+	rhead = &port->read_pool;
+	whead = &port->write_pool;
+	if (port == NULL) {
+		printk(KERN_ERR "gs_configure:port is NULL\n");
+		return;
+	}
+
+
+	if (!config) {
+		gs_debug("gs_configure: Deconfigure\n");
+		dev->configured = SERIAL_UNCONFIGURED;
+		gs_reset_config(dev);
+		return;
+	}
+	dev->dev_config = config;
+
+	if (dev->dev_in_ep == NULL || dev->dev_out_ep == NULL ||
+	    (dev->dev_notify_ep == NULL)) {
+		printk(KERN_ERR "gs_configure : cannot find endpoints\n");
+		ret = -ENODEV;
+		goto reset_config;
+	}
+
+	if (usb_msm_get_speed() == USB_SPEED_HIGH) {
+		usb_configure_endpoint(dev->dev_in_ep, &dev->gs_hs_bulkin_desc);
+		usb_configure_endpoint(dev->dev_out_ep,
+					&dev->gs_hs_bulkout_desc);
+		usb_configure_endpoint(dev->dev_notify_ep,
+					&dev->gs_hs_notifyin_desc);
+	} else {
+		usb_configure_endpoint(dev->dev_in_ep, &dev->gs_fs_bulkin_desc);
+		usb_configure_endpoint(dev->dev_out_ep,
+					&dev->gs_fs_bulkout_desc);
+		usb_configure_endpoint(dev->dev_notify_ep,
+					&dev->gs_fs_notifyin_desc);
+	}
+	usb_ept_enable(dev->dev_in_ep, 1);
+	usb_ept_enable(dev->dev_out_ep, 1);
+	usb_ept_enable(dev->dev_notify_ep, 1);
+
+	gs_debug("gs_configure: endpoint sizes and buffers\n");
+	/* allocate and queue read requests */
+	ep = dev->dev_out_ep;
+	MaxPacketSize = usb_ept_get_max_packet(ep);
+	for (i = 0; i < read_q_size; i++) {
+		req = gs_alloc_req(ep, MaxPacketSize);
+		if (req) {
+			req->device = (void *)dev;
+			req->length = MaxPacketSize;
+			req->complete = gs_read_complete;
+			list_add_tail(&req->list, rhead);
+			gs_debug("gs_configure: queuing read request(%d)\n", i);
+		} else {
+			printk(KERN_ERR
+			"gs_configure: cannot allocate read request(%d)\n", i);
+			goto reset_config;
+		}
+	}
+
+	/* allocate write requests, and put on free list */
+	ep = dev->dev_in_ep;
+	MaxPacketSize = usb_ept_get_max_packet(ep);
+	for (i = 0; i < write_q_size; i++) {
+		req = gs_alloc_req(ep, MaxPacketSize);
+		if (req) {
+			req->device = (void *)dev;
+			req->length = MaxPacketSize;
+			req->complete = gs_write_complete;
+			list_add_tail(&req->list, whead);
+		} else {
+			printk(KERN_ERR
+			"gs_configure: cannot allocate write request(%d)\n", i);
+			goto reset_config;
+		}
+	}
+
+	ep = dev->dev_notify_ep;
+	MaxPacketSize = usb_ept_get_max_packet(ep);
+	for (i = 0; i < GS_DEFAULT_INT_REQ; ++i) {
+		struct gs_reqbuf *bh = &dev->statusreqbuf;
+		dev->notify_req = req = gs_alloc_req(ep, 0);
+		if (req) {
+			req->device = (void *)dev;
+			req->buf = bh->buf;
+			req->length = MaxPacketSize;
+			req->complete = gs_status_complete;
+		}
+	}
+	if (port->port_open_count) {
+		unsigned long flags;
+		spin_lock_irqsave(&port->port_lock, flags);
+		started = gs_start_rx(dev);
+		spin_unlock_irqrestore(&port->port_lock, flags);
+		if (started)
+			tty_wakeup(port->port_tty);
+	}
+
+	dev->configured = SERIAL_CONFIGURED;
+
+	return;
+
+reset_config:
+	printk(KERN_ERR "gs_configure(end): error, calling gs_reset_config\n");
+	gs_reset_config(dev);
+	return;
+}
+static unsigned gs_start_rx(struct gs_dev *dev)
+{
+	struct gs_port *port = dev->dev_port[0];
+	struct list_head *pool = &port->read_pool;
+	unsigned ret = 0;
+	struct usb_endpoint *ep = dev->dev_out_ep;
+	unsigned started = 0;
+
+	while (!list_empty(pool)) {
+		struct usb_request	*req;
+		struct tty_struct	*tty;
+		tty = port->port_tty;
+		if (!tty) {
+			printk(KERN_ERR "%s: tty is null\n", __func__);
+			break;
+		}
+
+		req = list_entry(pool->next, struct usb_request, list);
+		list_del(&req->list);
+		spin_unlock(&port->port_lock);
+		ret = usb_ept_queue_xfer(ep, req);
+		spin_lock(&port->port_lock);
+		if (ret) {
+			list_add(&req->list, pool);
+			break;
+		}
+		started++;
+
+	}
+	return started;
+}
+/*
+ * gs_reset_config
+ *
+ * Mark the device as not configured, disable all endpoints,
+ * which forces completion of pending I/O and frees queued
+ * requests, and free the remaining write requests on the
+ * free list.
+ *
+ * The device lock must be held when calling this function.
+ */
+static void gs_reset_config(struct gs_dev *dev)
+{
+	struct gs_port *port;
+	struct usb_request *req;
+	unsigned long flags;
+
+	if (dev == NULL) {
+		printk(KERN_ERR "gs_reset_config: NULL device pointer\n");
+		return;
+	}
+
+	port = dev->dev_port[0];
+
+
+
+	if (dev->dev_out_ep)
+		usb_free_endpoint_all_req(dev->dev_out_ep);
+	if (dev->dev_in_ep)
+		usb_free_endpoint_all_req(dev->dev_in_ep);
+	if (dev->dev_notify_ep)
+		usb_free_endpoint_all_req(dev->dev_notify_ep);
+
+
+	spin_lock_irqsave(&port->port_lock, flags);
+	dev->dev_config = GS_NO_CONFIG_ID;
+	/* free write requests on the free list */
+	while (!list_empty(&port->write_pool)) {
+		req = list_entry(port->write_pool.next,
+				       struct usb_request, list);
+		list_del(&req->list);
+		gs_free_req(dev->dev_in_ep, req);
+	}
+
+	/* free read requests from read pool */
+	while (!list_empty(&port->read_pool)) {
+		req = list_entry(port->read_pool.next,
+				       struct usb_request, list);
+		list_del(&req->list);
+		gs_free_req(dev->dev_out_ep, req);
+	}
+
+	/* free read requests from read queue */
+	while (!list_empty(&port->read_queue)) {
+		req = list_entry(port->read_queue.next,
+				       struct usb_request, list);
+		list_del(&req->list);
+		gs_free_req(dev->dev_out_ep, req);
+	}
+	spin_unlock_irqrestore(&port->port_lock, flags);
+}
+
+/*
+ * gs_alloc_req
+ *
+ * Allocate a usb_request and its buffer.  Returns a pointer to the
+ * usb_request or NULL if there is an error.
+ */
+static struct usb_request *gs_alloc_req(struct usb_endpoint *ep,
+					unsigned int len)
+{
+	struct usb_request *req;
+	if (ep == NULL)
+		return NULL;
+	req = usb_ept_alloc_req(ep, len);
+	return req;
+}
+
+/*
+ * gs_free_req
+ *
+ * Free a usb_request and its buffer.
+ */
+static void gs_free_req(struct usb_endpoint *ep, struct usb_request *req)
+{
+	if (ep != NULL && req != NULL)
+		usb_ept_free_req(ep, req);
+}
+
+/*
+ * gs_alloc_ports
+ *
+ * Allocate all ports and set the gs_dev struct to point to them.
+ * Return 0 if successful, or a negative error number.
+ *
+ * The device lock is normally held when calling this function.
+ */
+static int gs_alloc_ports(struct gs_dev *dev, gfp_t kmalloc_flags)
+{
+	int i;
+	struct gs_port *port;
+
+	if (dev == NULL)
+		return -EIO;
+
+	for (i = 0; i < GS_NUM_PORTS; i++) {
+		port = kzalloc(sizeof(struct gs_port), kmalloc_flags);
+		if (port == NULL)
+			return -ENOMEM;
+
+		INIT_WORK(&port->push_work, gs_rx_push);
+		INIT_LIST_HEAD(&port->read_pool);
+		INIT_LIST_HEAD(&port->read_queue);
+		INIT_LIST_HEAD(&port->write_pool);
+		port->msr = 0;
+		port->prev_msr = 0;
+		port->mcr = 0;
+		port->port_dev = dev;
+		port->port_num = i;
+		port->port_line_coding.dwDTERate =
+		    cpu_to_le32(GS_DEFAULT_DTE_RATE);
+		port->port_line_coding.bCharFormat = GS_DEFAULT_CHAR_FORMAT;
+		port->port_line_coding.bParityType = GS_DEFAULT_PARITY;
+		port->port_line_coding.bDataBits = GS_DEFAULT_DATA_BITS;
+		spin_lock_init(&port->port_lock);
+		mutex_init(&port->mutex_lock);
+		init_waitqueue_head(&port->port_write_wait);
+
+		dev->dev_port[i] = port;
+	}
+
+	return 0;
+}
+
+/*
+ * gs_free_ports
+ *
+ * Free all closed ports.  Open ports are disconnected by
+ * freeing their write buffers, setting their device pointers
+ * and the pointers to them in the device to NULL.  These
+ * ports will be freed when closed.
+ *
+ * The device lock is normally held when calling this function.
+ */
+static void gs_free_ports(struct gs_dev *dev)
+{
+	int i;
+	unsigned long flags;
+	struct gs_port *port;
+
+	if (dev == NULL)
+		return;
+
+	for (i = 0; i < GS_NUM_PORTS; i++) {
+		port = dev->dev_port[i];
+		if (port != NULL) {
+			dev->dev_port[i] = NULL;
+
+			spin_lock_irqsave(&port->port_lock, flags);
+
+			if (port->port_write_buf != NULL) {
+				gs_buf_free(port->port_write_buf);
+				port->port_write_buf = NULL;
+			}
+
+			if (port->port_open_count > 0 || port->port_in_use) {
+				port->port_dev = NULL;
+				wake_up_interruptible(&port->port_write_wait);
+				if (port->port_tty) {
+					wake_up_interruptible
+					    (&port->port_tty->read_wait);
+					wake_up_interruptible
+					    (&port->port_tty->write_wait);
+				}
+				spin_unlock_irqrestore(&port->port_lock, flags);
+			} else {
+				spin_unlock_irqrestore(&port->port_lock, flags);
+				kfree(port);
+			}
+
+		}
+	}
+}
+
+/* Circular Buffer */
+
+/*
+ * gs_buf_alloc
+ *
+ * Allocate a circular buffer and all associated memory.
+ */
+static struct gs_buf *gs_buf_alloc(unsigned int size, gfp_t kmalloc_flags)
+{
+	struct gs_buf *gb;
+
+	if (size == 0)
+		return NULL;
+
+	gb = kmalloc(sizeof(struct gs_buf), kmalloc_flags);
+	if (gb == NULL)
+		return NULL;
+
+	gb->buf_buf = kmalloc(size, kmalloc_flags);
+	if (gb->buf_buf == NULL) {
+		kfree(gb);
+		return NULL;
+	}
+
+	gb->buf_size = size;
+	gb->buf_get = gb->buf_put = gb->buf_buf;
+
+	return gb;
+}
+
+/*
+ * gs_buf_free
+ *
+ * Free the buffer and all associated memory.
+ */
+void gs_buf_free(struct gs_buf *gb)
+{
+	if (gb) {
+		kfree(gb->buf_buf);
+		kfree(gb);
+	}
+}
+
+/*
+ * gs_buf_clear
+ *
+ * Clear out all data in the circular buffer.
+ */
+void gs_buf_clear(struct gs_buf *gb)
+{
+	if (gb != NULL)
+		gb->buf_get = gb->buf_put;
+	/* equivalent to a get of all data available */
+}
+
+/*
+ * gs_buf_data_avail
+ *
+ * Return the number of bytes of data available in the circular
+ * buffer.
+ */
+unsigned int gs_buf_data_avail(struct gs_buf *gb)
+{
+	if (gb != NULL)
+		return (gb->buf_size + gb->buf_put - gb->buf_get)
+		    % gb->buf_size;
+	else
+		return 0;
+}
+
+/*
+ * gs_buf_space_avail
+ *
+ * Return the number of bytes of space available in the circular
+ * buffer.
+ */
+unsigned int gs_buf_space_avail(struct gs_buf *gb)
+{
+	if (gb != NULL)
+		return (gb->buf_size + gb->buf_get - gb->buf_put - 1)
+		    % gb->buf_size;
+	else
+		return 0;
+}
+
+/*
+ * gs_buf_put
+ *
+ * Copy data data from a user buffer and put it into the circular buffer.
+ * Restrict to the amount of space available.
+ *
+ * Return the number of bytes copied.
+ */
+unsigned int gs_buf_put(struct gs_buf *gb, const char *buf, unsigned int count)
+{
+	unsigned int len;
+
+	if (gb == NULL)
+		return 0;
+
+	len = gs_buf_space_avail(gb);
+	if (count > len)
+		count = len;
+
+	if (count == 0)
+		return 0;
+
+	len = gb->buf_buf + gb->buf_size - gb->buf_put;
+	if (count > len) {
+		memcpy(gb->buf_put, buf, len);
+		memcpy(gb->buf_buf, buf + len, count - len);
+		gb->buf_put = gb->buf_buf + count - len;
+	} else {
+		memcpy(gb->buf_put, buf, count);
+		if (count < len)
+			gb->buf_put += count;
+		else		/* count == len */
+			gb->buf_put = gb->buf_buf;
+	}
+
+	return count;
+}
+
+/*
+ * gs_buf_get
+ *
+ * Get data from the circular buffer and copy to the given buffer.
+ * Restrict to the amount of data available.
+ *
+ * Return the number of bytes copied.
+ */
+unsigned int gs_buf_get(struct gs_buf *gb, char *buf, unsigned int count)
+{
+	unsigned int len;
+
+	if (gb == NULL)
+		return 0;
+
+	len = gs_buf_data_avail(gb);
+	if (count > len)
+		count = len;
+
+	if (count == 0)
+		return 0;
+
+	len = gb->buf_buf + gb->buf_size - gb->buf_get;
+	if (count > len) {
+		memcpy(buf, gb->buf_get, len);
+		memcpy(buf + len, gb->buf_buf, count - len);
+		gb->buf_get = gb->buf_buf + count - len;
+	} else {
+		memcpy(buf, gb->buf_get, count);
+		if (count < len)
+			gb->buf_get += count;
+		else		/* count == len */
+			gb->buf_get = gb->buf_buf;
+	}
+
+	return count;
+}
+
+/*
+* gs_tiocmget
+*/
+static int gs_tiocmget(struct tty_struct *tty, struct file *file)
+{
+	struct gs_port *port;
+	unsigned int mcr, msr;
+	unsigned int result = 0;
+	struct gs_dev *dev = gs_devices[tty->index];
+
+	if (dev == NULL)
+		return -EIO;
+
+	port = dev->dev_port[0];
+	if (port == NULL)
+		return -EIO;
+
+	mutex_lock(&port->mutex_lock);
+	mcr = port->mcr;
+	msr = port->msr;
+
+	result = ((mcr & MCR_RTS) ? TIOCM_RTS : 0)
+		| ((mcr & MCR_DTR) ? TIOCM_DTR : 0)
+		| ((mcr & MCR_LOOP) ? TIOCM_LOOP : 0)
+		| ((msr & MSR_CD) ? TIOCM_CD : 0)
+		| ((msr & MSR_RI) ? TIOCM_RI : 0)
+		| ((msr & MSR_DSR) ? TIOCM_DSR : 0)
+		| ((msr & MSR_CTS) ? TIOCM_CTS : 0);
+
+	mutex_unlock(&port->mutex_lock);
+	return result;
+}
+
+/*
+* gs_tiocmset
+*/
+static int gs_tiocmset(struct tty_struct *tty, struct file *file,
+	unsigned int set, unsigned int clear)
+{
+	struct gs_port *port;
+	unsigned int mcr;
+	unsigned int msr;
+	struct gs_dev *dev = gs_devices[tty->index];
+
+	if (dev == NULL)
+		return -EIO;
+	port = dev->dev_port[0];
+
+	if (port == NULL)
+		return -EIO;
+
+	mcr = port->mcr;
+	msr = port->msr;
+	if (dev->configured != SERIAL_CONFIGURED)
+		return -EIO;
+
+	set &= TIOCM_DSR | TIOCM_RI | TIOCM_CD | TIOCM_CTS;
+
+	if (set & TIOCM_DSR)
+		msr |= MSR_DSR;
+	if (set & TIOCM_RI)
+		msr |= MSR_RI;
+	if (set & TIOCM_CD)
+		msr |= MSR_CD;
+	if (set & TIOCM_CTS)
+		msr |= MSR_CTS;
+
+	clear &= TIOCM_DSR | TIOCM_RI | TIOCM_CD | TIOCM_CTS;
+
+	if (clear & TIOCM_RI)
+		msr &= ~MSR_RI;
+	if (clear & TIOCM_DSR)
+		msr &= ~MSR_DSR;
+	if (clear & TIOCM_CD)
+		msr &= ~MSR_CD;
+	if (clear & TIOCM_CTS)
+		msr &= ~MSR_CTS;
+
+	mutex_lock(&port->mutex_lock);
+	port->mcr = mcr;
+	port->msr = msr;
+
+	if (port->prev_msr != port->msr) {
+		send_notify_data(dev->dev_notify_ep, dev->notify_req);
+		port->prev_msr = port->msr;
+	}
+	mutex_unlock(&port->mutex_lock);
+
+	return 0;
+}
diff --git a/drivers/usb/function/ums.c b/drivers/usb/function/ums.c
new file mode 100644
index 0000000..509387f
--- /dev/null
+++ b/drivers/usb/function/ums.c
@@ -0,0 +1,469 @@
+/* drivers/usb/function/ums.c
+ *
+ * Function Device for USB Mass Storage
+ *
+ * Copyright (C) 2007 Google, Inc.
+ * Author: Mike Lockwood <lockwood@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+*/
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/miscdevice.h>
+#include <linux/fs.h>
+
+#include <linux/wait.h>
+#include <linux/list.h>
+
+#include <linux/usb/ch9.h>
+#include <linux/usb_usual.h>
+
+#include <asm/atomic.h>
+#include <asm/uaccess.h>
+
+#include "usb_function.h"
+
+#if 1
+#define DBG(x...) do {} while (0)
+#else
+#define DBG(x...) printk(x)
+#endif
+
+#define TXN_MAX 4096
+
+/* UMS setup class requests */
+#define USB_BULK_GET_MAX_LUN_REQUEST   0xFE
+#define USB_BULK_RESET_REQUEST         0xFF
+
+/* number of rx and tx requests to allocate */
+#define RX_REQ_MAX 4
+#define TX_REQ_MAX 4
+
+/* FIXME - add ioctl() support for LUN count */
+int lun_count = 1;
+
+struct ums_context
+{
+	int online;
+	int error;
+	
+	atomic_t read_excl;
+	atomic_t write_excl;
+	atomic_t open_excl;
+	spinlock_t lock;
+	
+	struct usb_endpoint *out;
+	struct usb_endpoint *in;
+
+	struct list_head tx_idle;
+	struct list_head rx_idle;
+	struct list_head rx_done;
+	
+	wait_queue_head_t read_wq;
+	wait_queue_head_t write_wq;
+
+	/* the request we're currently reading from */
+	struct usb_request *read_req;
+	unsigned char *read_buf;
+};
+
+static struct ums_context _context;
+
+static inline int _lock(atomic_t *excl)
+{
+	if(atomic_inc_return(excl) == 1) {
+		return 0;
+	} else {
+		atomic_dec(excl);
+		return -1;
+	}
+}
+
+static inline void _unlock(atomic_t *excl)
+{
+	atomic_dec(excl);
+}
+
+/* add a request to the tail of a list */
+static void req_put(struct ums_context *ctxt, struct list_head *head, struct usb_request *req)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&ctxt->lock, flags);
+	list_add_tail(&req->list, head);
+	spin_unlock_irqrestore(&ctxt->lock, flags);
+}
+
+/* remove a request from the head of a list */
+static struct usb_request *req_get(struct ums_context *ctxt, struct list_head *head)
+{
+	unsigned long flags;
+	struct usb_request *req;
+	
+	spin_lock_irqsave(&ctxt->lock, flags);
+	if(list_empty(head)) {
+		req = 0;
+	} else {
+		req = list_first_entry(head, struct usb_request, list);
+		list_del(&req->list);
+	}
+	spin_unlock_irqrestore(&ctxt->lock, flags);
+	return req;
+}
+
+static void ums_complete_in(struct usb_endpoint *ept, struct usb_request *req)
+{
+	struct ums_context *ctxt = req->context;
+
+	DBG("ums_complete_in length: %d, actual: %d \n", req->length, req->actual);
+    
+	if(req->status != 0) 
+		ctxt->error = 1;
+
+	req_put(ctxt, &ctxt->tx_idle, req);
+
+	wake_up(&ctxt->write_wq);
+}
+
+static void ums_complete_out(struct usb_endpoint *ept, struct usb_request *req)
+{
+	struct ums_context *ctxt = req->context;
+
+	DBG("ums_complete_out length: %d, actual: %d \n", req->length, req->actual);
+
+	if(req->status != 0) {
+		ctxt->error = 1;
+		req_put(ctxt, &ctxt->rx_idle, req);
+	} else {
+		req_put(ctxt, &ctxt->rx_done, req);
+	}
+
+	wake_up(&ctxt->read_wq);
+}
+
+static ssize_t ums_read(struct file *fp, char __user *buf,
+                            size_t count, loff_t *pos)
+{
+	struct ums_context *ctxt = &_context;
+	struct usb_request *req;
+	int r = count, xfer;
+	int ret;
+
+	DBG("ums_read(%d)\n", count);
+	
+	if(_lock(&ctxt->read_excl))
+		return -EBUSY;
+	
+	/* we will block until we're online */
+	while(!(ctxt->online || ctxt->error)) {
+		DBG("ums_read: waiting for online state\n");
+		ret = wait_event_interruptible(ctxt->read_wq, (ctxt->online || ctxt->error));
+		if(ret < 0) {
+			_unlock(&ctxt->read_excl);
+			return ret;
+		}
+	}
+
+	if(ctxt->error) {
+		r = -EIO;
+		goto fail;
+	}
+
+		/* if we have idle read requests, get them queued */
+	if((req = req_get(ctxt, &ctxt->rx_idle))) {
+		req->length = count;
+		ret = usb_ept_queue_xfer(ctxt->out, req);
+		if(ret < 0) {
+			DBG("ums_read: failed to queue req %p (%d)\n", req, ret);
+			r = -EIO;
+			ctxt->error = 1;
+			req_put(ctxt, &ctxt->rx_idle, req);
+			goto fail;
+		} else {
+			DBG("rx %p queue\n", req);
+		}
+	} else {
+		DBG("req_get failed!\n");
+		goto fail;
+	}
+
+	/* wait for a request to complete */
+	req = 0;
+	ret = wait_event_interruptible(ctxt->read_wq, 
+				       ((req = req_get(ctxt, &ctxt->rx_done)) || ctxt->error));
+	
+	if(req != 0) {
+		ctxt->read_req = req;
+		ctxt->read_buf = req->buf;
+		DBG("rx %p %d\n", req, req->actual);
+
+		xfer = req->actual;
+		if (xfer > count) {
+			xfer = count;
+		}
+		r = xfer;
+
+		if (xfer > 0) {	
+			DBG("copy_to_user %d bytes\n", xfer); 
+			if(copy_to_user(buf, ctxt->read_buf, xfer)) {
+				r = -EFAULT;
+			}
+
+		}		
+		req_put(ctxt, &ctxt->rx_idle, ctxt->read_req);
+		ctxt->read_req = 0;
+	} else {
+		r = ret;
+	}
+
+fail:
+	_unlock(&ctxt->read_excl);
+	DBG("ums_read returning %d\n", r);
+	return r;
+} 
+
+static ssize_t ums_write(struct file *fp, const char __user *buf,
+                             size_t count, loff_t *pos)
+{
+	struct ums_context *ctxt = &_context;
+	struct usb_request *req = 0;
+	int r = count, xfer;
+	int ret;
+
+	DBG("ums_write(%d)\n", count);
+
+	if(_lock(&ctxt->write_excl))
+		return -EBUSY;
+
+	while(count >= 0) {
+		if(ctxt->error) {
+			r = -EIO;
+			break;
+		}
+
+		/* get an idle tx request to use */
+		req = 0;
+		ret = wait_event_interruptible(ctxt->write_wq, 
+					       ((req = req_get(ctxt, &ctxt->tx_idle)) || ctxt->error));
+		
+		if(ret < 0) {
+			r = ret;
+			break;
+		}
+
+		if(req != 0) {
+			xfer = count > TXN_MAX ? TXN_MAX : count;
+			if(copy_from_user(req->buf, buf, xfer)){
+				r = -EFAULT;
+				break;
+			}
+			
+			req->length = xfer;
+			ret = usb_ept_queue_xfer(ctxt->in, req);
+			if(ret < 0) {
+				DBG("ums_write: xfer error %d\n", ret);
+				ctxt->error = 1;
+				r = -EIO;
+				break;
+			}
+
+			buf += xfer;
+			count -= xfer;
+
+			/* zero this so we don't try to free it on error exit */
+			req = 0;
+			if (count == 0) {
+			    break;
+			}
+		}
+	}
+
+
+	if(req)
+		req_put(ctxt, &ctxt->tx_idle, req);
+
+	_unlock(&ctxt->write_excl);
+	DBG("ums_write returning %d\n", r);
+	return r;
+}
+
+static int ums_open(struct inode *ip, struct file *fp)
+{
+	struct ums_context *ctxt = &_context;
+	
+	if(_lock(&ctxt->open_excl))
+		return -EBUSY;
+
+	/* clear the error latch */
+	ctxt->error = 0;
+	
+	return 0;
+}
+
+static int ums_release(struct inode *ip, struct file *fp)
+{
+	struct ums_context *ctxt = &_context;
+
+	_unlock(&ctxt->open_excl);
+	return 0;
+}
+
+static struct file_operations ums_fops = {
+	.owner =   THIS_MODULE,
+	.read =    ums_read,
+	.write =   ums_write,
+	.open =    ums_open,
+	.release = ums_release,
+};
+	
+static struct miscdevice ums_device = {
+	.minor = MISC_DYNAMIC_MINOR,
+	.name = "android_ums",
+	.fops = &ums_fops,
+};
+
+static void ums_bind(struct usb_endpoint **ept, void *_ctxt)
+{
+	struct ums_context *ctxt = _ctxt;
+	struct usb_request *req;
+	int n;
+	
+	ctxt->out = ept[0];
+	ctxt->in = ept[1];
+
+	DBG("ums_bind() %p, %p\n", ctxt->out, ctxt->in);
+	
+	for(n = 0; n < RX_REQ_MAX; n++) {
+		req = usb_ept_alloc_req(ctxt->out, 4096);
+		if(req == 0) goto fail;
+		req->context = ctxt;
+		req->complete = ums_complete_out;
+		req_put(ctxt, &ctxt->rx_idle, req);
+	}
+
+	for(n = 0; n < TX_REQ_MAX; n++) {
+		req = usb_ept_alloc_req(ctxt->in, 4096);
+		if(req == 0) goto fail;
+		req->context = ctxt;
+		req->complete = ums_complete_in;
+		req_put(ctxt, &ctxt->tx_idle, req);
+	}
+
+	printk("ums_bind() allocated %d rx and %d tx requests\n",
+	       RX_REQ_MAX, TX_REQ_MAX);
+	
+	misc_register(&ums_device);
+	return;
+	
+fail:
+	printk("ums_bind() could not allocate requests\n");
+
+	/* XXX release any we did allocate */
+}
+
+static int ums_setup(struct usb_ctrlrequest* req, void* buf, int len, void *_ctxt)
+{
+	if ((req->bRequestType & USB_TYPE_MASK) == USB_TYPE_CLASS) {
+		if (req->bRequest == USB_BULK_GET_MAX_LUN_REQUEST) {
+			if ((req->bRequestType & USB_DIR_IN) != USB_DIR_IN 
+					|| req->wValue != 0 || req->wIndex != 0)
+			 	return -1;
+
+			((u8*)buf)[0] = lun_count - 1;
+			printk("USB_BULK_GET_MAX_LUN_REQUEST returning %d\n", lun_count - 1);
+			return 1;
+		} else if (req->bRequest == USB_BULK_RESET_REQUEST) {
+			if ((req->bRequestType & USB_DIR_OUT) != USB_DIR_IN 
+					|| req->wValue != 0 || req->wIndex != 0)
+			 	return -1;
+
+			/* FIXME - I'm not sure what to do here */
+			printk("USB_BULK_RESET_REQUEST\n");
+			return 0;
+		}
+	}
+
+	return -1;
+}
+
+static void ums_configure(int configured, void *_ctxt)
+{
+	struct ums_context *ctxt = _ctxt;
+	struct usb_request *req;
+	
+	DBG("ums_configure() %d\n", configured);
+
+	if(configured) {
+		ctxt->online = 1;
+
+		/* if we have a stale request being read, recycle it */
+		ctxt->read_buf = 0;
+		if(ctxt->read_req) {
+			req_put(ctxt, &ctxt->rx_idle, ctxt->read_req);
+			ctxt->read_req = 0;
+		}
+
+		/* retire any completed rx requests from previous session */
+		while((req = req_get(ctxt, &ctxt->rx_done))) {
+			req_put(ctxt, &ctxt->rx_idle, req);
+		}
+		
+	} else {
+		ctxt->online = 0;
+		ctxt->error = 1;
+	}
+
+	/* readers may be blocked waiting for us to go online */
+	wake_up(&ctxt->read_wq);
+}
+
+static struct usb_function usb_func_ums = {
+	.bind = ums_bind,
+	.configure = ums_configure,
+	.setup = ums_setup,
+
+	.name = "ums",
+	.context = &_context,
+
+	.ifc_class = USB_CLASS_MASS_STORAGE,
+	.ifc_subclass = US_SC_SCSI,
+	.ifc_protocol = US_PR_BULK,
+
+	.ifc_name = "ums",
+	
+	.ifc_ept_count = 2,
+	.ifc_ept_type = { EPT_BULK_OUT, EPT_BULK_IN },
+};
+
+static int __init ums_init(void)
+{
+	struct ums_context *ctxt = &_context;
+	DBG("ums_init()\n");
+
+	spin_lock_init(&ctxt->lock);
+
+	init_waitqueue_head(&ctxt->read_wq);
+	init_waitqueue_head(&ctxt->write_wq);
+
+	atomic_set(&ctxt->open_excl, 0);
+	atomic_set(&ctxt->read_excl, 0);
+	atomic_set(&ctxt->write_excl, 0);
+	
+	INIT_LIST_HEAD(&ctxt->rx_idle);
+	INIT_LIST_HEAD(&ctxt->rx_done);
+	INIT_LIST_HEAD(&ctxt->tx_idle);
+	
+	return usb_function_register(&usb_func_ums);
+}
+
+module_init(ums_init);
diff --git a/drivers/usb/function/usb_function.h b/drivers/usb/function/usb_function.h
new file mode 100644
index 0000000..35eb257
--- /dev/null
+++ b/drivers/usb/function/usb_function.h
@@ -0,0 +1,187 @@
+/* drivers/usb/function/usb_function.h
+ *
+ * USB Function Device Interface
+ *
+ * Copyright (C) 2007 Google, Inc.
+ * Copyright (c) 2008-2009, Code Aurora Forum. All rights reserved.
+ * Author: Brian Swetland <swetland@google.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _DRIVERS_USB_FUNCTION_USB_FUNCTION_H_
+#define _DRIVERS_USB_FUNCTION_USB_FUNCTION_H_
+
+#include <linux/list.h>
+#include <linux/usb/ch9.h>
+
+#define EPT_BULK_IN   1
+#define EPT_BULK_OUT  2
+#define EPT_INT_IN  3
+
+#define USB_CONFIG_ATT_SELFPOWER_POS	(6)	/* self powered */
+#define USB_CONFIG_ATT_WAKEUP_POS	(5)	/* can wakeup */
+
+struct usb_endpoint {
+	struct usb_info *ui;
+	struct msm_request *req; /* head of pending requests */
+	struct msm_request *last;
+	unsigned flags;
+
+	/* bit number (0-31) in various status registers
+	** as well as the index into the usb_info's array
+	** of all endpoints
+	*/
+	unsigned char bit;
+	unsigned char num;
+
+	unsigned short max_pkt;
+
+	unsigned ept_halted;
+
+	/* pointers to DMA transfer list area */
+	/* these are allocated from the usb_info dma space */
+	struct ept_queue_head *head;
+	struct usb_endpoint_descriptor *ep_descriptor;
+	unsigned int alloced;
+};
+
+struct usb_request {
+	void *buf;          /* pointer to associated data buffer */
+	unsigned length;    /* requested transfer length */
+	int status;         /* status upon completion */
+	unsigned actual;    /* actual bytes transferred */
+
+	void (*complete)(struct usb_endpoint *ep, struct usb_request *req);
+	void *context;
+
+	void *device;
+
+	struct list_head list;
+};
+
+struct usb_function {
+	/* bind() is called once when the function has had its endpoints
+	** allocated, but before the bus is active.
+	**
+	** might be a good place to allocate some usb_request objects
+	*/
+	void (*bind)(void *);
+
+	/* unbind() is called when the function is being removed.
+	** it is illegal to call and usb_ept_* hooks at this point
+	** and all endpoints must be released.
+	*/
+	void (*unbind)(void *);
+
+	/* configure() is called when the usb client has been configured
+	** by the host and again when the device is unconfigured (or
+	** when the client is detached)
+	**
+	** currently called from interrupt context.
+	*/
+	void (*configure)(int configured, void *);
+	void (*disconnect)(void *);
+
+	/* setup() is called to allow functions to handle class and vendor
+	** setup requests.  If the request is unsupported or can not be handled,
+	** setup() should return -1.
+	** For OUT requests, buf will point to a buffer to data received in the
+	** request's data phase, and len will contain the length of the data.
+	** setup() should return 0 after handling an OUT request successfully.
+	** for IN requests, buf will contain a pointer to a buffer for setup()
+	** to write data to, and len will be the maximum size of the data to
+	** be written back to the host.
+	** After successfully handling an IN request, setup() should return
+	** the number of bytes written to buf that should be sent in the
+	** response to the host.
+	*/
+	int (*setup)(struct usb_ctrlrequest *req, void *buf,
+			int len, void *);
+
+	int (*set_interface)(int ifc_num, int alt_set, void *_ctxt);
+	int (*get_interface)(int ifc_num, void *ctxt);
+	/* driver name */
+	const char *name;
+	void *context;
+
+	/* interface class/subclass/protocol for descriptor */
+	unsigned char ifc_class;
+	unsigned char ifc_subclass;
+	unsigned char ifc_protocol;
+
+	/* name string for descriptor */
+	const char *ifc_name;
+
+	/* number of needed endpoints and their types */
+	unsigned char ifc_ept_count;
+	unsigned char ifc_ept_type[8];
+
+	/* if the endpoint is disabled, its interface will not be
+	** included in the configuration descriptor
+	*/
+	unsigned char   disabled;
+
+	struct usb_descriptor_header **fs_descriptors;
+	struct usb_descriptor_header **hs_descriptors;
+
+	struct usb_request *ep0_out_req, *ep0_in_req;
+	struct usb_endpoint *ep0_out, *ep0_in;
+};
+
+int usb_function_register(struct usb_function *driver);
+int usb_function_unregister(struct usb_function *driver);
+
+int usb_msm_get_speed(void);
+void usb_configure_endpoint(struct usb_endpoint *ep,
+			struct usb_endpoint_descriptor *ep_desc);
+int usb_remote_wakeup(void);
+/* To allocate endpoint from function driver*/
+struct usb_endpoint *usb_alloc_endpoint(unsigned direction);
+int usb_free_endpoint(struct usb_endpoint *ept);
+/* To enable endpoint from frunction driver*/
+void usb_ept_enable(struct usb_endpoint *ept, int yes);
+int usb_msm_get_next_ifc_number(struct usb_function *);
+int usb_msm_get_next_strdesc_id(char *);
+void usb_msm_enable_iad(void);
+
+void usb_function_enable(const char *function, int enable);
+
+/* Allocate a USB request.
+** Must be called from a context that can sleep.
+** If bufsize is nonzero, req->buf will be allocated for
+** you and free'd when the request is free'd.  Otherwise
+** it is your responsibility to provide.
+*/
+struct usb_request *usb_ept_alloc_req(struct usb_endpoint *ept, unsigned bufsize);
+void usb_ept_free_req(struct usb_endpoint *ept, struct usb_request *req);
+
+/* safely callable from any context
+** returns 0 if successfully queued and sets req->status = -EBUSY
+** req->status will change to a different value upon completion
+** (0 for success, -EIO, -ENODEV, etc for error)
+*/
+int usb_ept_queue_xfer(struct usb_endpoint *ept, struct usb_request *req);
+int usb_ept_flush(struct usb_endpoint *ept);
+int usb_ept_get_max_packet(struct usb_endpoint *ept);
+int usb_ept_cancel_xfer(struct usb_endpoint *ept, struct usb_request *_req);
+void usb_ept_fifo_flush(struct usb_endpoint *ept);
+int usb_ept_set_halt(struct usb_endpoint *ept);
+int usb_ept_clear_halt(struct usb_endpoint *ept);
+struct device *usb_get_device(void);
+struct usb_endpoint *usb_ept_find(struct usb_endpoint **ept, int type);
+struct usb_function *usb_ept_get_function(struct usb_endpoint *ept);
+int usb_ept_is_stalled(struct usb_endpoint *ept);
+void usb_request_set_buffer(struct usb_request *req, void *buf, dma_addr_t dma);
+void usb_free_endpoint_all_req(struct usb_endpoint *ep);
+void usb_remove_function_driver(struct usb_function *func);
+int usb_remote_wakeup(void);
+#endif
diff --git a/drivers/usb/function/zero.c b/drivers/usb/function/zero.c
new file mode 100644
index 0000000..449bcbf
--- /dev/null
+++ b/drivers/usb/function/zero.c
@@ -0,0 +1,120 @@
+/* driver/usb/function/zero.c
+ *
+ * Zero Function Device - A Trivial Data Source
+ *
+ * Copyright (C) 2007 Google, Inc.
+ * Author: Brian Swetland <swetland@google.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+
+#include "usb_function.h"
+
+struct zero_context
+{
+	struct usb_endpoint *in;
+	struct usb_request *req0;
+	struct usb_request *req1;
+};
+
+static struct zero_context _context;
+
+static void zero_bind(struct usb_endpoint **ept, void *_ctxt)
+{
+	struct zero_context *ctxt = _ctxt;
+	ctxt->in = ept[0];
+	printk(KERN_INFO "zero_bind() %p\n", ctxt->in);
+
+	ctxt->req0 = usb_ept_alloc_req(ctxt->in, 4096);
+	ctxt->req1 = usb_ept_alloc_req(ctxt->in, 4096);
+
+	memset(ctxt->req0->buf, 0, 4096);
+	memset(ctxt->req1->buf, 0, 4096);
+}
+
+static void zero_unbind(void *_ctxt)
+{
+	struct zero_context *ctxt = _ctxt;
+	printk(KERN_INFO "null_unbind()\n");
+	if (ctxt->req0) {
+		usb_ept_free_req(ctxt->in, ctxt->req0);
+		ctxt->req0 = 0;
+	}
+	if (ctxt->req1) {
+		usb_ept_free_req(ctxt->in, ctxt->req1);
+		ctxt->req1 = 0;
+	}
+	ctxt->in = 0;
+}
+
+static void zero_queue_in(struct zero_context *ctxt, struct usb_request *req);
+
+static void zero_in_complete(struct usb_endpoint *ept, struct usb_request *req)
+{
+	struct zero_context *ctxt = req->context;
+	unsigned char *data = req->buf;
+
+	if (req->status != -ENODEV)
+		zero_queue_in(ctxt, req);
+}
+
+static void zero_queue_in(struct zero_context *ctxt, struct usb_request *req)
+{
+	req->complete = zero_in_complete;
+	req->context = ctxt;
+	req->length = 4096;
+
+	usb_ept_queue_xfer(ctxt->in, req);
+}
+
+static void zero_configure(int configured, void *_ctxt)
+{
+	struct zero_context *ctxt = _ctxt;
+	printk(KERN_INFO "zero_configure() %d\n", configured);
+
+	if (configured) {
+		zero_queue_in(ctxt, ctxt->req0);
+		zero_queue_in(ctxt, ctxt->req1);
+	} else {
+		/* all pending requests will be canceled */
+	}
+}
+
+static struct usb_function usb_func_zero = {
+	.bind = zero_bind,
+	.unbind = zero_unbind,
+	.configure = zero_configure,
+
+	.name = "zero",
+	.context = &_context,
+
+	.ifc_class = 0xff,
+	.ifc_subclass = 0xfe,
+	.ifc_protocol = 0x02,
+
+	.ifc_name = "zero",
+
+	.ifc_ept_count = 1,
+	.ifc_ept_type = { EPT_BULK_IN },
+};
+
+static int __init zero_init(void)
+{
+	printk(KERN_INFO "zero_init()\n");
+	usb_function_register(&usb_func_zero);
+	return 0;
+}
+
+module_init(zero_init);
diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig
index 144a8c8..117d3bf 100644
--- a/drivers/usb/gadget/Kconfig
+++ b/drivers/usb/gadget/Kconfig
@@ -598,6 +598,24 @@
 # LAST -- dummy/emulated controller
 #
 
+config USB_GADGET_MSM_72K
+	boolean "MSM 72K Device Controller"
+	depends on ARCH_MSM
+	select USB_GADGET_SELECTED
+	select USB_GADGET_DUALSPEED
+	help
+	   USB gadget driver for Qualcomm MSM 72K architecture.
+
+	   Say "y" to link the driver statically, or "m" to build a
+	   dynamically linked module called "msm72k" and force all
+	   gadget drivers to also be dynamically linked.
+
+config USB_MSM_72K
+	tristate
+	depends on USB_GADGET_MSM_72K
+	default USB_GADGET
+	select USB_GADGET_SELECTED
+
 config USB_GADGET_DUMMY_HCD
 	boolean "Dummy HCD (DEVELOPMENT)"
 	depends on USB=y || (USB=m && USB_GADGET=m)
@@ -1064,4 +1082,84 @@
 
 endchoice
 
+config USB_CSW_HACK
+	boolean "USB Mass storage csw hack Feature"
+	default y
+	help
+	 This csw hack feature is for increasing the performance of the mass
+	 storage
+
+config MODEM_SUPPORT
+	boolean "modem support in generic serial function driver"
+	depends on USB_G_ANDROID
+	default y
+	help
+          This feature enables the modem functionality in the
+	  generic serial.
+	  adds interrupt endpoint support to send modem notifications
+	  to host.
+	  adds CDC descriptors to enumerate the generic serial as MODEM.
+	  adds CDC class requests to configure MODEM line settings.
+	  Say "y" to enable MODEM support in the generic serial driver.
+
+config RMNET_SMD_CTL_CHANNEL
+	string "RMNET control SMD channel name"
+	depends on USB_G_ANDROID && MSM_SMD
+	default ""
+	help
+	  Control SMD channel for transferring QMI messages
+
+config RMNET_SMD_DATA_CHANNEL
+	string "RMNET Data SMD channel name"
+	depends on USB_G_ANDROID && MSM_SMD
+	default ""
+	help
+	  Data SMD channel for transferring network data
+
+config RMNET_SDIO_CTL_CHANNEL
+       int "RMNET control SDIO channel id"
+       default 8
+       depends on MSM_SDIO_CMUX && MSM_SDIO_DMUX
+       help
+         Control SDIO channel for transferring RMNET QMI messages
+
+config RMNET_SDIO_DATA_CHANNEL
+       int "RMNET Data SDIO channel id"
+       default 8
+       depends on MSM_SDIO_CMUX && MSM_SDIO_DMUX
+       help
+         Data SDIO channel for transferring network data
+
+config RMNET_SMD_SDIO_CTL_CHANNEL
+       int "RMNET(sdio_smd) Control SDIO channel id"
+       depends on MSM_SDIO_CMUX && MSM_SDIO_DMUX
+       default 8
+       help
+         Control SDIO channel for transferring QMI messages
+
+config RMNET_SMD_SDIO_DATA_CHANNEL
+       int "RMNET(sdio_smd) Data SDIO channel id"
+       default 8
+       depends on MSM_SDIO_CMUX && MSM_SDIO_DMUX
+       help
+         Data SDIO channel for transferring network data
+
+config RMNET_SDIO_SMD_DATA_CHANNEL
+       string "RMNET(sdio_smd) Data SMD channel name"
+       depends on MSM_SDIO_CMUX && MSM_SDIO_DMUX
+       default "DATA40"
+       help
+	  Data SMD channel for transferring network data
+
+config USB_ANDROID_RMNET_CTRL_SMD
+       boolean "RmNet(BAM) control over SMD driver"
+       depends on MSM_SMD
+       help
+         Enabling this option adds rmnet control over SMD
+	 support to the android gadget. Rmnet is an
+	 alternative to CDC-ECM and Windows RNDIS.
+	 It uses QUALCOMM MSM Interface for control
+	 transfers. This option enables only control interface.
+	 Data interface used is BAM.
+
 endif # USB_GADGET
diff --git a/drivers/usb/gadget/Makefile b/drivers/usb/gadget/Makefile
index ab17a4c..064960c 100644
--- a/drivers/usb/gadget/Makefile
+++ b/drivers/usb/gadget/Makefile
@@ -29,6 +29,7 @@
 mv_udc-y			:= mv_udc_core.o mv_udc_phy.o
 obj-$(CONFIG_USB_CI13XXX_MSM)	+= ci13xxx_msm.o
 obj-$(CONFIG_USB_FUSB300)	+= fusb300_udc.o
+obj-$(CONFIG_USB_MSM_72K)	+= msm72k_udc.o
 
 #
 # USB gadget drivers
diff --git a/drivers/usb/gadget/android.c b/drivers/usb/gadget/android.c
index b13633b..8146af7 100644
--- a/drivers/usb/gadget/android.c
+++ b/drivers/usb/gadget/android.c
@@ -30,6 +30,7 @@
 #include <linux/usb/ch9.h>
 #include <linux/usb/composite.h>
 #include <linux/usb/gadget.h>
+#include <linux/usb/android.h>
 
 #include "gadget_chips.h"
 
@@ -45,9 +46,10 @@
 #include "epautoconf.c"
 #include "composite.c"
 
+#include "f_diag.c"
 #include "f_mass_storage.c"
-#include "u_serial.c"
-#include "f_acm.c"
+//#include "u_serial.c"
+//#include "f_acm.c"
 #include "f_adb.c"
 #include "f_mtp.c"
 #include "f_accessory.c"
@@ -99,6 +101,7 @@
 	struct list_head enabled_functions;
 	struct usb_composite_dev *cdev;
 	struct device *dev;
+	struct android_usb_platform_data *pdata;
 
 	bool enabled;
 	bool connected;
@@ -187,6 +190,68 @@
 /*-------------------------------------------------------------------------*/
 /* Supported functions initialization */
 
+char diag_clients[32];	    /* enabled DIAG clients - "diag[,diag_mdm]" */
+static ssize_t clients_store(
+		struct device *device, struct device_attribute *attr,
+		const char *buff, size_t size)
+{
+	strncpy(diag_clients, buff, sizeof(diag_clients));
+
+	return size;
+}
+
+static DEVICE_ATTR(clients, S_IWUSR, NULL, clients_store);
+static struct device_attribute *diag_function_attributes[] =
+					 { &dev_attr_clients, NULL };
+
+static int diag_function_init(struct android_usb_function *f,
+				 struct usb_composite_dev *cdev)
+{
+	return diag_setup();
+}
+
+static void diag_function_cleanup(struct android_usb_function *f)
+{
+	diag_cleanup();
+}
+
+static int diag_function_bind_config(struct android_usb_function *f,
+					struct usb_configuration *c)
+{
+	char *name;
+	char buf[32], *b;
+	int once = 0, err = -1;
+	int (*notify)(uint32_t, const char *);
+
+	strncpy(buf, diag_clients, sizeof(buf));
+	b = strim(buf);
+
+	while (b) {
+		name = strsep(&b, ",");
+		/* Allow only first diag channel to update pid and serial no */
+		if (!once++)
+			notify = _android_dev->pdata->update_pid_and_serial_num;
+		else
+			notify = NULL;
+
+		if (name) {
+			err = diag_function_add(c, name, notify);
+			if (err)
+				pr_err("diag: Cannot open channel '%s'", name);
+		}
+	}
+
+	return err;
+}
+
+static struct android_usb_function diag_function = {
+	.name		= "diag",
+	.init		= diag_function_init,
+	.cleanup	= diag_function_cleanup,
+	.bind_config	= diag_function_bind_config,
+	.attributes	= diag_function_attributes,
+};
+
 static int adb_function_init(struct android_usb_function *f, struct usb_composite_dev *cdev)
 {
 	return adb_setup();
@@ -209,7 +274,7 @@
 	.bind_config	= adb_function_bind_config,
 };
 
-
+#if 0
 #define MAX_ACM_INSTANCES 4
 struct acm_function_config {
 	int instances;
@@ -280,7 +345,7 @@
 	.bind_config	= acm_function_bind_config,
 	.attributes	= acm_function_attributes,
 };
-
+#endif
 
 static int mtp_function_init(struct android_usb_function *f, struct usb_composite_dev *cdev)
 {
@@ -644,8 +709,9 @@
 
 
 static struct android_usb_function *supported_functions[] = {
+	&diag_function,
 	&adb_function,
-	&acm_function,
+//	&acm_function,
 	&mtp_function,
 	&ptp_function,
 	&rndis_function,
@@ -1104,6 +1170,19 @@
 	return 0;
 }
 
+static int __devinit android_probe(struct platform_device *pdev)
+{
+	struct android_usb_platform_data *pdata = pdev->dev.platform_data;
+	struct android_dev *dev = _android_dev;
+
+	dev->pdata = pdata;
+	
+	return 0;
+}
+
+static struct platform_driver android_platform_driver = {
+	.driver = { .name = "android_usb"},
+};
 
 static int __init init(void)
 {
@@ -1135,6 +1214,8 @@
 	composite_driver.setup = android_setup;
 	composite_driver.disconnect = android_disconnect;
 
+	platform_driver_probe(&android_platform_driver, android_probe);
+
 	return usb_composite_probe(&android_usb_driver, android_bind);
 }
 module_init(init);
diff --git a/drivers/usb/gadget/ci13xxx_msm.c b/drivers/usb/gadget/ci13xxx_msm.c
index 139ac94..8a75420 100644
--- a/drivers/usb/gadget/ci13xxx_msm.c
+++ b/drivers/usb/gadget/ci13xxx_msm.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2010, Code Aurora Forum. All rights reserved.
+/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -9,11 +9,6 @@
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
  *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- *
  */
 
 #include <linux/module.h>
@@ -64,7 +59,8 @@
 	.flags			= CI13XXX_REGS_SHARED |
 				  CI13XXX_REQUIRE_TRANSCEIVER |
 				  CI13XXX_PULLUP_ON_VBUS |
-				  CI13XXX_DISABLE_STREAMING,
+				  CI13XXX_DISABLE_STREAMING |
+				  CI13XXX_ZERO_ITC,
 
 	.notify_event		= ci13xxx_msm_notify_event,
 };
diff --git a/drivers/usb/gadget/ci13xxx_udc.c b/drivers/usb/gadget/ci13xxx_udc.c
index baaf87e..9a03ca7 100644
--- a/drivers/usb/gadget/ci13xxx_udc.c
+++ b/drivers/usb/gadget/ci13xxx_udc.c
@@ -318,6 +318,17 @@
 	hw_cwrite(CAP_USBMODE, USBMODE_CM, USBMODE_CM_DEVICE);
 	hw_cwrite(CAP_USBMODE, USBMODE_SLOM, USBMODE_SLOM);  /* HW >= 2.3 */
 
+	/*
+	 * ITC (Interrupt Threshold Control) field is to set the maximum
+	 * rate at which the device controller will issue interrupts.
+	 * The maximum interrupt interval measured in micro frames.
+	 * Valid values are 0, 1, 2, 4, 8, 16, 32, 64. The default value is
+	 * 8 micro frames. If CPU can handle interrupts at faster rate, ITC
+	 * can be set to lesser value to gain performance.
+	 */
+	if (udc->udc_driver->flags & CI13XXX_ZERO_ITC)
+		hw_cwrite(CAP_USBCMD, USBCMD_ITC_MASK, USBCMD_ITC(0));
+
 	if (hw_cread(CAP_USBMODE, USBMODE_CM) != USBMODE_CM_DEVICE) {
 		pr_err("cannot enter in device mode");
 		pr_err("lpm = %i", hw_bank.lpm);
@@ -417,6 +428,10 @@
 		data |= ENDPTCTRL_RXE;
 	}
 	hw_cwrite(CAP_ENDPTCTRL + num * sizeof(u32), mask, data);
+
+	/* make sure endpoint is enabled before returning */
+	mb();
+
 	return 0;
 }
 
@@ -1219,7 +1234,7 @@
 {
 	struct ci13xxx *udc = container_of(dev, struct ci13xxx, gadget.dev);
 	unsigned long flags;
-	u32 dump[512];
+	u32 *dump;
 	unsigned i, k, n = 0;
 
 	dbg_trace("[%s] %p\n", __func__, buf);
@@ -1227,9 +1242,12 @@
 		dev_err(dev, "[%s] EINVAL\n", __func__);
 		return 0;
 	}
+	dump = kmalloc(2048, GFP_KERNEL);
+	if (dump == NULL)
+		return -ENOMEM;
 
 	spin_lock_irqsave(udc->lock, flags);
-	k = hw_register_read(dump, sizeof(dump)/sizeof(u32));
+	k = hw_register_read(dump, 512);
 	spin_unlock_irqrestore(udc->lock, flags);
 
 	for (i = 0; i < k; i++) {
@@ -1237,7 +1255,7 @@
 			       "reg[0x%04X] = 0x%08X\n",
 			       i * (unsigned)sizeof(u32), dump[i]);
 	}
-
+	kfree(dump);
 	return n;
 }
 
@@ -1317,6 +1335,42 @@
 }
 static DEVICE_ATTR(requests, S_IRUSR, show_requests, NULL);
 
+static int ci13xxx_wakeup(struct usb_gadget *_gadget)
+{
+	struct ci13xxx *udc = container_of(_gadget, struct ci13xxx, gadget);
+	unsigned long flags;
+	int ret = 0;
+
+	trace();
+
+	spin_lock_irqsave(udc->lock, flags);
+	if (!udc->remote_wakeup) {
+		ret = -EOPNOTSUPP;
+		dbg_trace("remote wakeup feature is not enabled\n");
+		goto out;
+	}
+	if (!hw_cread(CAP_PORTSC, PORTSC_SUSP)) {
+		ret = -EINVAL;
+		dbg_trace("port is not suspended\n");
+		goto out;
+	}
+	hw_cwrite(CAP_PORTSC, PORTSC_FPR, PORTSC_FPR);
+out:
+	spin_unlock_irqrestore(udc->lock, flags);
+	return ret;
+}
+
+static ssize_t usb_remote_wakeup(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct ci13xxx *udc = container_of(dev, struct ci13xxx, gadget.dev);
+
+	ci13xxx_wakeup(&udc->gadget);
+
+	return count;
+}
+static DEVICE_ATTR(wakeup, S_IWUSR, 0, usb_remote_wakeup);
+
 /**
  * dbg_create_files: initializes the attribute interface
  * @dev: device
@@ -1353,8 +1407,13 @@
 	retval = device_create_file(dev, &dev_attr_requests);
 	if (retval)
 		goto rm_registers;
+	retval = device_create_file(dev, &dev_attr_wakeup);
+	if (retval)
+		goto rm_remote_wakeup;
 	return 0;
 
+rm_remote_wakeup:
+	device_remove_file(dev, &dev_attr_wakeup);
  rm_registers:
 	device_remove_file(dev, &dev_attr_registers);
  rm_qheads:
@@ -1391,6 +1450,7 @@
 	device_remove_file(dev, &dev_attr_events);
 	device_remove_file(dev, &dev_attr_driver);
 	device_remove_file(dev, &dev_attr_device);
+	device_remove_file(dev, &dev_attr_wakeup);
 	return 0;
 }
 
@@ -1619,6 +1679,7 @@
 	udc->gadget.speed = USB_SPEED_UNKNOWN;
 	udc->remote_wakeup = 0;
 	udc->suspended = 0;
+	udc->configured = 0;
 	spin_unlock_irqrestore(udc->lock, flags);
 
 	/* flush all endpoints */
@@ -1930,6 +1991,8 @@
 		do {
 			hw_test_and_set_setup_guard();
 			memcpy(&req, &mEp->qh.ptr->setup, sizeof(req));
+			/* Ensure buffer is read before acknowledging to h/w */
+			mb();
 		} while (!hw_test_and_clear_setup_guard());
 
 		type = req.bRequestType;
@@ -1991,6 +2054,10 @@
 				break;
 			err = isr_setup_status_phase(udc);
 			break;
+		case USB_REQ_SET_CONFIGURATION:
+			if (type == (USB_DIR_OUT|USB_TYPE_STANDARD))
+				udc->configured = !!req.wValue;
+			goto delegate;
 		case USB_REQ_SET_FEATURE:
 			if (type == (USB_DIR_OUT|USB_RECIP_ENDPOINT) &&
 					le16_to_cpu(req.wValue) ==
@@ -2104,12 +2171,15 @@
 	else if (mEp->type == USB_ENDPOINT_XFER_ISOC)
 		mEp->qh.ptr->cap &= ~QH_MULT;
 	else
-		mEp->qh.ptr->cap &= ~QH_ZLT;
+		mEp->qh.ptr->cap |= QH_ZLT;
 
 	mEp->qh.ptr->cap |=
 		(mEp->ep.maxpacket << ffs_nr(QH_MAX_PKT)) & QH_MAX_PKT;
 	mEp->qh.ptr->td.next |= TD_TERMINATE;   /* needed? */
 
+	/* complete all the updates to ept->head before enabling endpoint*/
+	mb();
+
 	/*
 	 * Enable endpoints in the HW other than ep0 as ep0
 	 * is always enabled
@@ -2467,7 +2537,8 @@
 		if (is_active) {
 			pm_runtime_get_sync(&_gadget->dev);
 			hw_device_reset(udc);
-			hw_device_state(udc->ep0out.qh.dma);
+			if (udc->softconnect)
+				hw_device_state(udc->ep0out.qh.dma);
 		} else {
 			hw_device_state(0);
 			if (udc->udc_driver->notify_event)
@@ -2481,31 +2552,6 @@
 	return 0;
 }
 
-static int ci13xxx_wakeup(struct usb_gadget *_gadget)
-{
-	struct ci13xxx *udc = container_of(_gadget, struct ci13xxx, gadget);
-	unsigned long flags;
-	int ret = 0;
-
-	trace();
-
-	spin_lock_irqsave(udc->lock, flags);
-	if (!udc->remote_wakeup) {
-		ret = -EOPNOTSUPP;
-		dbg_trace("remote wakeup feature is not enabled\n");
-		goto out;
-	}
-	if (!hw_cread(CAP_PORTSC, PORTSC_SUSP)) {
-		ret = -EINVAL;
-		dbg_trace("port is not suspended\n");
-		goto out;
-	}
-	hw_cwrite(CAP_PORTSC, PORTSC_FPR, PORTSC_FPR);
-out:
-	spin_unlock_irqrestore(udc->lock, flags);
-	return ret;
-}
-
 static int ci13xxx_vbus_draw(struct usb_gadget *_gadget, unsigned mA)
 {
 	struct ci13xxx *udc = container_of(_gadget, struct ci13xxx, gadget);
@@ -2515,6 +2561,32 @@
 	return -ENOTSUPP;
 }
 
+static int ci13xxx_pullup(struct usb_gadget *_gadget, int is_active)
+{
+	struct ci13xxx *udc = container_of(_gadget, struct ci13xxx, gadget);
+	unsigned long flags;
+
+	spin_lock_irqsave(udc->lock, flags);
+	udc->softconnect = is_active;
+	if (((udc->udc_driver->flags & CI13XXX_PULLUP_ON_VBUS) &&
+			!udc->vbus_active) || !udc->driver) {
+		spin_unlock_irqrestore(udc->lock, flags);
+		return 0;
+	}
+	spin_unlock_irqrestore(udc->lock, flags);
+
+	if (is_active) {
+		hw_device_state(udc->ep0out.qh.dma);
+	} else {
+		hw_device_state(0);
+		if (udc->udc_driver->notify_event)
+			udc->udc_driver->notify_event(udc,
+				CI13XXX_CONTROLLER_STOPPED_EVENT);
+	}
+	return 0;
+}
+
+
 /**
  * Device operations part of the API to the USB controller hardware,
  * which don't involve endpoints (or i/o)
@@ -2524,6 +2596,7 @@
 	.vbus_session	= ci13xxx_vbus_session,
 	.wakeup		= ci13xxx_wakeup,
 	.vbus_draw	= ci13xxx_vbus_draw,
+	.pullup		= ci13xxx_pullup,
 };
 
 /**
@@ -2627,6 +2700,7 @@
 	/* bind gadget */
 	driver->driver.bus     = NULL;
 	udc->gadget.dev.driver = &driver->driver;
+	udc->softconnect = 1;
 
 	spin_unlock_irqrestore(udc->lock, flags);
 	retval = bind(&udc->gadget);                /* MAY SLEEP */
@@ -2649,6 +2723,9 @@
 		}
 	}
 
+	if (!udc->softconnect)
+		goto done;
+
 	retval = hw_device_state(udc->ep0out.qh.dma);
 	if (retval)
 		pm_runtime_put_sync(&udc->gadget.dev);
diff --git a/drivers/usb/gadget/ci13xxx_udc.h b/drivers/usb/gadget/ci13xxx_udc.h
index 2370777..27af8aa 100644
--- a/drivers/usb/gadget/ci13xxx_udc.h
+++ b/drivers/usb/gadget/ci13xxx_udc.h
@@ -108,6 +108,7 @@
 #define CI13XXX_REQUIRE_TRANSCEIVER	BIT(1)
 #define CI13XXX_PULLUP_ON_VBUS		BIT(2)
 #define CI13XXX_DISABLE_STREAMING	BIT(3)
+#define CI13XXX_ZERO_ITC		BIT(4)
 
 #define CI13XXX_CONTROLLER_RESET_EVENT		0
 #define CI13XXX_CONTROLLER_STOPPED_EVENT	1
@@ -131,11 +132,13 @@
 	u8                         remote_wakeup; /* Is remote wakeup feature
 							enabled by the host? */
 	u8                         suspended;  /* suspended by the host */
+	u8                         configured;  /* is device configured */
 	u8                         test_mode;  /* the selected test mode */
 
 	struct usb_gadget_driver  *driver;     /* 3rd party gadget driver */
 	struct ci13xxx_udc_driver *udc_driver; /* device controller driver */
 	int                        vbus_active; /* is VBUS active */
+	int                        softconnect; /* is pull-up enable allowed */
 	struct otg_transceiver    *transceiver; /* Transceiver struct */
 };
 
@@ -189,6 +192,8 @@
 #define    USBMODE_CM_HOST    (0x03UL <<  0)
 #define USBMODE_SLOM          BIT(3)
 #define USBMODE_SDIS          BIT(4)
+#define USBCMD_ITC(n)         (n << 16) /* n = 0, 1, 2, 4, 8, 16, 32, 64 */
+#define USBCMD_ITC_MASK       (0xFF << 16)
 
 /* ENDPTCTRL */
 #define ENDPTCTRL_RXS         BIT(0)
diff --git a/drivers/usb/gadget/f_acm.c b/drivers/usb/gadget/f_acm.c
index 68b1a8e..3fd12b1 100644
--- a/drivers/usb/gadget/f_acm.c
+++ b/drivers/usb/gadget/f_acm.c
@@ -5,6 +5,7 @@
  * Copyright (C) 2008 by David Brownell
  * Copyright (C) 2008 by Nokia Corporation
  * Copyright (C) 2009 by Samsung Electronics
+ * Copyright (c) 2011 Code Aurora Forum. All rights reserved.
  * Author: Michal Nazarewicz (m.nazarewicz@samsung.com)
  *
  * This software is distributed under the terms of the GNU General
@@ -17,6 +18,7 @@
 #include <linux/slab.h>
 #include <linux/kernel.h>
 #include <linux/device.h>
+#include <linux/usb/android_composite.h>
 
 #include "u_serial.h"
 #include "gadget_chips.h"
@@ -49,6 +51,7 @@
 	struct gserial			port;
 	u8				ctrl_id, data_id;
 	u8				port_num;
+	enum transport_type		transport;
 
 	u8				pending;
 
@@ -83,6 +86,17 @@
 #define ACM_CTRL_DCD		(1 << 0)
 };
 
+static unsigned int no_tty_ports;
+static unsigned int no_sdio_ports;
+static unsigned int no_smd_ports;
+static unsigned int nr_ports;
+
+static struct port_info {
+	enum transport_type	transport;
+	unsigned		port_num;
+	unsigned		client_port_num;
+} gacm_ports[GSERIAL_NO_PORTS];
+
 static inline struct f_acm *func_to_acm(struct usb_function *f)
 {
 	return container_of(f, struct f_acm, port.func);
@@ -93,6 +107,95 @@
 	return container_of(p, struct f_acm, port);
 }
 
+static char *transport_to_str(enum transport_type t)
+{
+	switch (t) {
+	case USB_GADGET_FSERIAL_TRANSPORT_TTY:
+		return "TTY";
+	case USB_GADGET_FSERIAL_TRANSPORT_SDIO:
+		return "SDIO";
+	case USB_GADGET_FSERIAL_TRANSPORT_SMD:
+		return "SMD";
+	}
+
+	return "NONE";
+}
+
+static int gport_setup(struct usb_configuration *c)
+{
+	int ret = 0;
+
+	pr_debug("%s: no_tty_ports:%u no_sdio_ports: %u nr_ports:%u\n",
+			__func__, no_tty_ports, no_sdio_ports, nr_ports);
+
+	if (no_tty_ports)
+		ret = gserial_setup(c->cdev->gadget, no_tty_ports);
+	if (no_sdio_ports)
+		ret = gsdio_setup(c->cdev->gadget, no_sdio_ports);
+	if (no_smd_ports)
+		ret = gsmd_setup(c->cdev->gadget, no_smd_ports);
+
+	return ret;
+}
+
+static int gport_connect(struct f_acm *acm)
+{
+	unsigned port_num;
+
+	port_num = gacm_ports[acm->port_num].client_port_num;
+
+
+	pr_debug("%s: transport:%s f_acm:%p gserial:%p port_num:%d cl_port_no:%d\n",
+			__func__, transport_to_str(acm->transport),
+			acm, &acm->port, acm->port_num, port_num);
+
+	switch (acm->transport) {
+	case USB_GADGET_FSERIAL_TRANSPORT_TTY:
+		gserial_connect(&acm->port, port_num);
+		break;
+	case USB_GADGET_FSERIAL_TRANSPORT_SDIO:
+		gsdio_connect(&acm->port, port_num);
+		break;
+	case USB_GADGET_FSERIAL_TRANSPORT_SMD:
+		gsmd_connect(&acm->port, port_num);
+		break;
+	default:
+		pr_err("%s: Un-supported transport: %s\n", __func__,
+				transport_to_str(acm->transport));
+		return -ENODEV;
+	}
+
+	return 0;
+}
+
+static int gport_disconnect(struct f_acm *acm)
+{
+	unsigned port_num;
+
+	port_num = gacm_ports[acm->port_num].client_port_num;
+
+	pr_debug("%s: transport:%s f_acm:%p gserial:%p port_num:%d cl_pno:%d\n",
+			__func__, transport_to_str(acm->transport),
+			acm, &acm->port, acm->port_num, port_num);
+
+	switch (acm->transport) {
+	case USB_GADGET_FSERIAL_TRANSPORT_TTY:
+		gserial_disconnect(&acm->port);
+		break;
+	case USB_GADGET_FSERIAL_TRANSPORT_SDIO:
+		gsdio_disconnect(&acm->port, port_num);
+		break;
+	case USB_GADGET_FSERIAL_TRANSPORT_SMD:
+		gsmd_disconnect(&acm->port, port_num);
+		break;
+	default:
+		pr_err("%s: Un-supported transport:%s\n", __func__,
+				transport_to_str(acm->transport));
+		return -ENODEV;
+	}
+
+	return 0;
+}
 /*-------------------------------------------------------------------------*/
 
 /* notification endpoint uses smallish and infrequent fixed-size messages */
@@ -333,8 +436,7 @@
 	/* SET_LINE_CODING ... just read and save what the host sends */
 	case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
 			| USB_CDC_REQ_SET_LINE_CODING:
-		if (w_length != sizeof(struct usb_cdc_line_coding)
-				|| w_index != acm->ctrl_id)
+		if (w_length != sizeof(struct usb_cdc_line_coding))
 			goto invalid;
 
 		value = w_length;
@@ -345,8 +447,6 @@
 	/* GET_LINE_CODING ... return what host sent, or initial value */
 	case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
 			| USB_CDC_REQ_GET_LINE_CODING:
-		if (w_index != acm->ctrl_id)
-			goto invalid;
 
 		value = min_t(unsigned, w_length,
 				sizeof(struct usb_cdc_line_coding));
@@ -356,9 +456,6 @@
 	/* SET_CONTROL_LINE_STATE ... save what the host sent */
 	case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
 			| USB_CDC_REQ_SET_CONTROL_LINE_STATE:
-		if (w_index != acm->ctrl_id)
-			goto invalid;
-
 		value = 0;
 
 		/* FIXME we should not allow data to flow until the
@@ -366,6 +463,12 @@
 		 * that bit, we should return to that no-flow state.
 		 */
 		acm->port_handshake_bits = w_value;
+		if (acm->port.notify_modem) {
+			unsigned port_num =
+				gacm_ports[acm->port_num].client_port_num;
+
+			acm->port.notify_modem(&acm->port, port_num, w_value);
+		}
 		break;
 
 	default:
@@ -415,7 +518,7 @@
 	} else if (intf == acm->data_id) {
 		if (acm->port.in->driver_data) {
 			DBG(cdev, "reset acm ttyGS%d\n", acm->port_num);
-			gserial_disconnect(&acm->port);
+			gport_disconnect(acm);
 		} else {
 			DBG(cdev, "activate acm ttyGS%d\n", acm->port_num);
 		}
@@ -423,7 +526,7 @@
 				acm->hs.in, acm->fs.in);
 		acm->port.out_desc = ep_choose(cdev->gadget,
 				acm->hs.out, acm->fs.out);
-		gserial_connect(&acm->port, acm->port_num);
+		gport_connect(acm);
 
 	} else
 		return -EINVAL;
@@ -437,7 +540,7 @@
 	struct usb_composite_dev *cdev = f->config->cdev;
 
 	DBG(cdev, "acm ttyGS%d deactivated\n", acm->port_num);
-	gserial_disconnect(&acm->port);
+	gport_disconnect(acm);
 	usb_ep_disable(acm->notify);
 	acm->notify->driver_data = NULL;
 }
@@ -568,6 +671,15 @@
 	return acm_notify_serial_state(acm);
 }
 
+static int acm_send_modem_ctrl_bits(struct gserial *port, int ctrl_bits)
+{
+	struct f_acm *acm = port_to_acm(port);
+
+	acm->serial_state = ctrl_bits;
+
+	return acm_notify_serial_state(acm);
+}
+
 /*-------------------------------------------------------------------------*/
 
 /* ACM function driver setup/binding */
@@ -764,12 +876,14 @@
 	spin_lock_init(&acm->lock);
 
 	acm->port_num = port_num;
+	acm->transport = gacm_ports[port_num].transport;
 
 	acm->port.connect = acm_connect;
 	acm->port.disconnect = acm_disconnect;
 	acm->port.send_break = acm_send_break;
+	acm->port.send_modem_ctrl_bits = acm_send_modem_ctrl_bits;
 
-	acm->port.func.name = kasprintf(GFP_KERNEL, "acm%u", port_num);
+	acm->port.func.name = kasprintf(GFP_KERNEL, "acm%u", port_num + 1);
 	if (!acm->port.func.name) {
 		kfree(acm);
 		return -ENOMEM;
@@ -787,3 +901,117 @@
 		kfree(acm);
 	return status;
 }
+
+#ifdef CONFIG_USB_ANDROID_ACM
+#include <linux/platform_device.h>
+
+static struct acm_platform_data *acm_pdata;
+
+static int acm_probe(struct platform_device *pdev)
+{
+	acm_pdata = pdev->dev.platform_data;
+	return 0;
+}
+
+static struct platform_driver acm_platform_driver = {
+	.driver = { .name = "acm", },
+	.probe = acm_probe,
+};
+
+int acm1_function_bind_config(struct usb_configuration *c)
+{
+	int ret = acm_bind_config(c, 0);
+	if (ret == 0)
+		gport_setup(c);
+	return ret;
+}
+
+int acm2_function_bind_config(struct usb_configuration *c)
+{
+	int ret = acm_bind_config(c, 1);
+
+	return ret;
+}
+
+static struct android_usb_function acm1_function = {
+	.name = "acm1",
+	.bind_config = acm1_function_bind_config,
+};
+
+static struct android_usb_function acm2_function = {
+	.name = "acm2",
+	.bind_config = acm2_function_bind_config,
+};
+
+static int facm_remove(struct platform_device *pdev)
+{
+	gserial_cleanup();
+
+	return 0;
+}
+
+static struct platform_driver usb_facm = {
+	.remove		= facm_remove,
+	.driver = {
+		.name = "usb_facm",
+		.owner = THIS_MODULE,
+	},
+};
+
+static int __init facm_probe(struct platform_device *pdev)
+{
+	struct usb_gadget_facm_pdata *pdata = pdev->dev.platform_data;
+	int i;
+
+	dev_dbg(&pdev->dev, "%s: probe\n", __func__);
+
+	if (!pdata)
+		goto probe_android_register;
+
+	for (i = 0; i < GSERIAL_NO_PORTS; i++) {
+		gacm_ports[i].transport = pdata->transport[i];
+		gacm_ports[i].port_num = i;
+
+		switch (gacm_ports[i].transport) {
+		case USB_GADGET_FSERIAL_TRANSPORT_TTY:
+			gacm_ports[i].client_port_num = no_tty_ports;
+			no_tty_ports++;
+			break;
+		case USB_GADGET_FSERIAL_TRANSPORT_SDIO:
+			gacm_ports[i].client_port_num = no_sdio_ports;
+			no_sdio_ports++;
+			break;
+		case USB_GADGET_FSERIAL_TRANSPORT_SMD:
+			gacm_ports[i].client_port_num = no_smd_ports;
+			no_smd_ports++;
+			break;
+		default:
+			pr_err("%s: Un-supported transport transport: %u\n",
+					__func__, gacm_ports[i].transport);
+			return -ENODEV;
+		}
+
+		nr_ports++;
+	}
+
+	pr_info("%s:gport:tty_ports:%u sdio_ports:%u "
+			"smd_ports:%u nr_ports:%u\n",
+			__func__, no_tty_ports, no_sdio_ports,
+			no_smd_ports, nr_ports);
+
+probe_android_register:
+	android_register_function(&acm1_function);
+	android_register_function(&acm2_function);
+
+	return 0;
+}
+
+static int __init init(void)
+{
+	printk(KERN_INFO "f_acm init\n");
+
+	return platform_driver_probe(&usb_facm, facm_probe);
+}
+module_init(init);
+
+#endif /* CONFIG_USB_ANDROID_ACM */
diff --git a/drivers/usb/gadget/f_diag.c b/drivers/usb/gadget/f_diag.c
new file mode 100644
index 0000000..53660186
--- /dev/null
+++ b/drivers/usb/gadget/f_diag.c
@@ -0,0 +1,752 @@
+/* drivers/usb/gadget/f_diag.c
+ * Diag Function Device - Route ARM9 and ARM11 DIAG messages
+ * between HOST and DEVICE.
+ * Copyright (C) 2007 Google, Inc.
+ * Copyright (c) 2008-2011, Code Aurora Forum. All rights reserved.
+ * Author: Brian Swetland <swetland@google.com>
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+
+#include <mach/usbdiag.h>
+#include <mach/rpc_hsusb.h>
+
+#include <linux/usb/composite.h>
+#include <linux/usb/gadget.h>
+#include <linux/workqueue.h>
+#include <linux/debugfs.h>
+
+static DEFINE_SPINLOCK(ch_lock);
+static LIST_HEAD(usb_diag_ch_list);
+
+static struct usb_interface_descriptor intf_desc = {
+	.bLength            =	sizeof intf_desc,
+	.bDescriptorType    =	USB_DT_INTERFACE,
+	.bNumEndpoints      =	2,
+	.bInterfaceClass    =	0xFF,
+	.bInterfaceSubClass =	0xFF,
+	.bInterfaceProtocol =	0xFF,
+};
+
+static struct usb_endpoint_descriptor hs_bulk_in_desc = {
+	.bLength 			=	USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType 	=	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes 		=	USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize 	=	__constant_cpu_to_le16(512),
+	.bInterval 			=	0,
+};
+static struct usb_endpoint_descriptor fs_bulk_in_desc = {
+	.bLength          =	USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType  =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes     =	USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize   = __constant_cpu_to_le16(64),
+	.bInterval        =	0,
+};
+
+static struct usb_endpoint_descriptor hs_bulk_out_desc = {
+	.bLength          =	USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType  =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_OUT,
+	.bmAttributes     =	USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize   = __constant_cpu_to_le16(512),
+	.bInterval        =	0,
+};
+
+static struct usb_endpoint_descriptor fs_bulk_out_desc = {
+	.bLength          =	USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType  =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_OUT,
+	.bmAttributes     =	USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize   = __constant_cpu_to_le16(64),
+	.bInterval        =	0,
+};
+
+static struct usb_descriptor_header *fs_diag_desc[] = {
+	(struct usb_descriptor_header *) &intf_desc,
+	(struct usb_descriptor_header *) &fs_bulk_in_desc,
+	(struct usb_descriptor_header *) &fs_bulk_out_desc,
+	NULL,
+	};
+static struct usb_descriptor_header *hs_diag_desc[] = {
+	(struct usb_descriptor_header *) &intf_desc,
+	(struct usb_descriptor_header *) &hs_bulk_in_desc,
+	(struct usb_descriptor_header *) &hs_bulk_out_desc,
+	NULL,
+};
+
+/**
+ * struct diag_context - USB diag function driver private structure
+ * @function: function structure for USB interface
+ * @out: USB OUT endpoint struct
+ * @in: USB IN endpoint struct
+ * @in_desc: USB IN endpoint descriptor struct
+ * @out_desc: USB OUT endpoint descriptor struct
+ * @read_pool: List of requests used for Rx (OUT ep)
+ * @write_pool: List of requests used for Tx (IN ep)
+ * @config_work: Work item schedule after interface is configured to notify
+ *               CONNECT event to diag char driver and updating product id
+ *               and serial number to MODEM/IMEM.
+ * @lock: Spinlock to proctect read_pool, write_pool lists
+ * @cdev: USB composite device struct
+ * @ch: USB diag channel
+ *
+ */
+struct diag_context {
+	struct usb_function function;
+	struct usb_ep *out;
+	struct usb_ep *in;
+	struct usb_endpoint_descriptor  *in_desc;
+	struct usb_endpoint_descriptor  *out_desc;
+	struct list_head read_pool;
+	struct list_head write_pool;
+	struct work_struct config_work;
+	spinlock_t lock;
+	unsigned configured;
+	struct usb_composite_dev *cdev;
+	int (*update_pid_and_serial_num)(uint32_t, const char *);
+	struct usb_diag_ch ch;
+
+	/* pkt counters */
+	unsigned long dpkts_tolaptop;
+	unsigned long dpkts_tomodem;
+	unsigned dpkts_tolaptop_pending;
+};
+
+static inline struct diag_context *func_to_diag(struct usb_function *f)
+{
+	return container_of(f, struct diag_context, function);
+}
+
+static void usb_config_work_func(struct work_struct *work)
+{
+	struct diag_context *ctxt = container_of(work,
+			struct diag_context, config_work);
+	struct usb_composite_dev *cdev = ctxt->cdev;
+	struct usb_gadget_strings *table;
+	struct usb_string *s;
+
+	if (ctxt->ch.notify)
+		ctxt->ch.notify(ctxt->ch.priv, USB_DIAG_CONNECT, NULL);
+
+	if (!ctxt->update_pid_and_serial_num)
+		return;
+
+	/* pass on product id and serial number to dload */
+	if (!cdev->desc.iSerialNumber) {
+		ctxt->update_pid_and_serial_num(
+					cdev->desc.idProduct, 0);
+		return;
+	}
+
+	/*
+	 * Serial number is filled by the composite driver. So
+	 * it is fair enough to assume that it will always be
+	 * found at first table of strings.
+	 */
+	table = *(cdev->driver->strings);
+	for (s = table->strings; s && s->s; s++)
+		if (s->id == cdev->desc.iSerialNumber) {
+			ctxt->update_pid_and_serial_num(
+					cdev->desc.idProduct, s->s);
+			break;
+		}
+}
+
+static void diag_write_complete(struct usb_ep *ep,
+		struct usb_request *req)
+{
+	struct diag_context *ctxt = ep->driver_data;
+	struct diag_request *d_req = req->context;
+	unsigned long flags;
+
+	ctxt->dpkts_tolaptop_pending--;
+
+	if (!req->status) {
+		if ((req->length >= ep->maxpacket) &&
+				((req->length % ep->maxpacket) == 0)) {
+			ctxt->dpkts_tolaptop_pending++;
+			req->length = 0;
+			d_req->actual = req->actual;
+			d_req->status = req->status;
+			/* Queue zero length packet */
+			usb_ep_queue(ctxt->in, req, GFP_ATOMIC);
+			return;
+		}
+	}
+
+	spin_lock_irqsave(&ctxt->lock, flags);
+	list_add_tail(&req->list, &ctxt->write_pool);
+	if (req->length != 0) {
+		d_req->actual = req->actual;
+		d_req->status = req->status;
+	}
+	spin_unlock_irqrestore(&ctxt->lock, flags);
+
+	if (ctxt->ch.notify)
+		ctxt->ch.notify(ctxt->ch.priv, USB_DIAG_WRITE_DONE, d_req);
+}
+
+static void diag_read_complete(struct usb_ep *ep,
+		struct usb_request *req)
+{
+	struct diag_context *ctxt = ep->driver_data;
+	struct diag_request *d_req = req->context;
+	unsigned long flags;
+
+	d_req->actual = req->actual;
+	d_req->status = req->status;
+
+	spin_lock_irqsave(&ctxt->lock, flags);
+	list_add_tail(&req->list, &ctxt->read_pool);
+	spin_unlock_irqrestore(&ctxt->lock, flags);
+
+	ctxt->dpkts_tomodem++;
+
+	if (ctxt->ch.notify)
+		ctxt->ch.notify(ctxt->ch.priv, USB_DIAG_READ_DONE, d_req);
+}
+
+/**
+ * usb_diag_open() - Open a diag channel over USB
+ * @name: Name of the channel
+ * @priv: Private structure pointer which will be passed in notify()
+ * @notify: Callback function to receive notifications
+ *
+ * This function iterates overs the available channels and returns
+ * the channel handler if the name matches. The notify callback is called
+ * for CONNECT, DISCONNECT, READ_DONE and WRITE_DONE events.
+ *
+ */
+struct usb_diag_ch *usb_diag_open(const char *name, void *priv,
+		void (*notify)(void *, unsigned, struct diag_request *))
+{
+	struct usb_diag_ch *ch;
+	struct diag_context *ctxt;
+	unsigned long flags;
+	int found = 0;
+
+	spin_lock_irqsave(&ch_lock, flags);
+	/* Check if we already have a channel with this name */
+	list_for_each_entry(ch, &usb_diag_ch_list, list) {
+		if (!strcmp(name, ch->name)) {
+			found = 1;
+			break;
+		}
+	}
+	spin_unlock_irqrestore(&ch_lock, flags);
+
+	if (!found) {
+		ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
+		if (!ctxt)
+			return ERR_PTR(-ENOMEM);
+
+		ch = &ctxt->ch;
+	}
+
+	ch->name = name;
+	ch->priv = priv;
+	ch->notify = notify;
+
+	spin_lock_irqsave(&ch_lock, flags);
+	list_add_tail(&ch->list, &usb_diag_ch_list);
+	spin_unlock_irqrestore(&ch_lock, flags);
+
+	return ch;
+}
+EXPORT_SYMBOL(usb_diag_open);
+
+/**
+ * usb_diag_close() - Close a diag channel over USB
+ * @ch: Channel handler
+ *
+ * This function closes the diag channel.
+ *
+ */
+void usb_diag_close(struct usb_diag_ch *ch)
+{
+	struct diag_context *dev = container_of(ch, struct diag_context, ch);
+	unsigned long flags;
+
+	spin_lock_irqsave(&ch_lock, flags);
+	ch->priv = NULL;
+	ch->notify = NULL;
+	/* Free-up the resources if channel is no more active */
+	if (!ch->priv_usb) {
+		list_del(&ch->list);
+		kfree(dev);
+	}
+
+	spin_unlock_irqrestore(&ch_lock, flags);
+}
+EXPORT_SYMBOL(usb_diag_close);
+
+/**
+ * usb_diag_free_req() - Free USB requests
+ * @ch: Channel handler
+ *
+ * This function free read and write USB requests for the interface
+ * associated with this channel.
+ *
+ */
+void usb_diag_free_req(struct usb_diag_ch *ch)
+{
+	struct diag_context *ctxt = ch->priv_usb;
+	struct usb_request *req;
+	struct list_head *act, *tmp;
+
+	if (!ctxt)
+		return;
+
+	list_for_each_safe(act, tmp, &ctxt->write_pool) {
+		req = list_entry(act, struct usb_request, list);
+		list_del(&req->list);
+		usb_ep_free_request(ctxt->in, req);
+	}
+
+	list_for_each_safe(act, tmp, &ctxt->read_pool) {
+		req = list_entry(act, struct usb_request, list);
+		list_del(&req->list);
+		usb_ep_free_request(ctxt->out, req);
+	}
+}
+EXPORT_SYMBOL(usb_diag_free_req);
+
+/**
+ * usb_diag_alloc_req() - Allocate USB requests
+ * @ch: Channel handler
+ * @n_write: Number of requests for Tx
+ * @n_read: Number of requests for Rx
+ *
+ * This function allocate read and write USB requests for the interface
+ * associated with this channel. The actual buffer is not allocated.
+ * The buffer is passed by diag char driver.
+ *
+ */
+int usb_diag_alloc_req(struct usb_diag_ch *ch, int n_write, int n_read)
+{
+	struct diag_context *ctxt = ch->priv_usb;
+	struct usb_request *req;
+	int i;
+
+	if (!ctxt)
+		return -ENODEV;
+
+	for (i = 0; i < n_write; i++) {
+		req = usb_ep_alloc_request(ctxt->in, GFP_ATOMIC);
+		if (!req)
+			goto fail;
+		req->complete = diag_write_complete;
+		list_add_tail(&req->list, &ctxt->write_pool);
+	}
+
+	for (i = 0; i < n_read; i++) {
+		req = usb_ep_alloc_request(ctxt->out, GFP_ATOMIC);
+		if (!req)
+			goto fail;
+		req->complete = diag_read_complete;
+		list_add_tail(&req->list, &ctxt->read_pool);
+	}
+
+	return 0;
+
+fail:
+	usb_diag_free_req(ch);
+	return -ENOMEM;
+
+}
+EXPORT_SYMBOL(usb_diag_alloc_req);
+
+/**
+ * usb_diag_read() - Read data from USB diag channel
+ * @ch: Channel handler
+ * @d_req: Diag request struct
+ *
+ * Enqueue a request on OUT endpoint of the interface corresponding to this
+ * channel. This function returns proper error code when interface is not
+ * in configured state, no Rx requests available and ep queue is failed.
+ *
+ * This function operates asynchronously. READ_DONE event is notified after
+ * completion of OUT request.
+ *
+ */
+int usb_diag_read(struct usb_diag_ch *ch, struct diag_request *d_req)
+{
+	struct diag_context *ctxt = ch->priv_usb;
+	unsigned long flags;
+	struct usb_request *req;
+
+	spin_lock_irqsave(&ctxt->lock, flags);
+
+	if (!ctxt->configured) {
+		spin_unlock_irqrestore(&ctxt->lock, flags);
+		return -EIO;
+	}
+
+	if (list_empty(&ctxt->read_pool)) {
+		spin_unlock_irqrestore(&ctxt->lock, flags);
+		ERROR(ctxt->cdev, "%s: no requests available\n", __func__);
+		return -EAGAIN;
+	}
+
+	req = list_first_entry(&ctxt->read_pool, struct usb_request, list);
+	list_del(&req->list);
+	spin_unlock_irqrestore(&ctxt->lock, flags);
+
+	req->buf = d_req->buf;
+	req->length = d_req->length;
+	req->context = d_req;
+	if (usb_ep_queue(ctxt->out, req, GFP_ATOMIC)) {
+		/* If error add the link to linked list again*/
+		spin_lock_irqsave(&ctxt->lock, flags);
+		list_add_tail(&req->list, &ctxt->read_pool);
+		spin_unlock_irqrestore(&ctxt->lock, flags);
+		ERROR(ctxt->cdev, "%s: cannot queue"
+				" read request\n", __func__);
+		return -EIO;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(usb_diag_read);
+
+/**
+ * usb_diag_write() - Write data from USB diag channel
+ * @ch: Channel handler
+ * @d_req: Diag request struct
+ *
+ * Enqueue a request on IN endpoint of the interface corresponding to this
+ * channel. This function returns proper error code when interface is not
+ * in configured state, no Tx requests available and ep queue is failed.
+ *
+ * This function operates asynchronously. WRITE_DONE event is notified after
+ * completion of IN request.
+ *
+ */
+int usb_diag_write(struct usb_diag_ch *ch, struct diag_request *d_req)
+{
+	struct diag_context *ctxt = ch->priv_usb;
+	unsigned long flags;
+	struct usb_request *req = NULL;
+
+	spin_lock_irqsave(&ctxt->lock, flags);
+
+	if (!ctxt->configured) {
+		spin_unlock_irqrestore(&ctxt->lock, flags);
+		return -EIO;
+	}
+
+	if (list_empty(&ctxt->write_pool)) {
+		spin_unlock_irqrestore(&ctxt->lock, flags);
+		ERROR(ctxt->cdev, "%s: no requests available\n", __func__);
+		return -EAGAIN;
+	}
+
+	req = list_first_entry(&ctxt->write_pool, struct usb_request, list);
+	list_del(&req->list);
+	spin_unlock_irqrestore(&ctxt->lock, flags);
+
+	req->buf = d_req->buf;
+	req->length = d_req->length;
+	req->context = d_req;
+	if (usb_ep_queue(ctxt->in, req, GFP_ATOMIC)) {
+		/* If error add the link to linked list again*/
+		spin_lock_irqsave(&ctxt->lock, flags);
+		list_add_tail(&req->list, &ctxt->write_pool);
+		spin_unlock_irqrestore(&ctxt->lock, flags);
+		ERROR(ctxt->cdev, "%s: cannot queue"
+				" read request\n", __func__);
+		return -EIO;
+	}
+
+	ctxt->dpkts_tolaptop++;
+	ctxt->dpkts_tolaptop_pending++;
+
+	return 0;
+}
+EXPORT_SYMBOL(usb_diag_write);
+
+static void diag_function_disable(struct usb_function *f)
+{
+	struct diag_context  *dev = func_to_diag(f);
+	unsigned long flags;
+
+	DBG(dev->cdev, "diag_function_disable\n");
+
+	spin_lock_irqsave(&dev->lock, flags);
+	dev->configured = 0;
+	spin_unlock_irqrestore(&dev->lock, flags);
+
+	if (dev->ch.notify)
+		dev->ch.notify(dev->ch.priv, USB_DIAG_DISCONNECT, NULL);
+
+	usb_ep_disable(dev->in);
+	dev->in->driver_data = NULL;
+
+	usb_ep_disable(dev->out);
+	dev->out->driver_data = NULL;
+
+}
+
+static int diag_function_set_alt(struct usb_function *f,
+		unsigned intf, unsigned alt)
+{
+	struct diag_context  *dev = func_to_diag(f);
+	struct usb_composite_dev *cdev = f->config->cdev;
+	unsigned long flags;
+	struct usb_diag_ch *ch;
+	int rc = 0;
+
+	dev->in_desc = ep_choose(cdev->gadget,
+			&hs_bulk_in_desc, &fs_bulk_in_desc);
+	dev->out_desc = ep_choose(cdev->gadget,
+			&hs_bulk_out_desc, &fs_bulk_out_desc);
+	dev->in->driver_data = dev;
+	rc = usb_ep_enable(dev->in, dev->in_desc);
+	if (rc) {
+		ERROR(dev->cdev, "can't enable %s, result %d\n",
+						dev->in->name, rc);
+		return rc;
+	}
+	dev->out->driver_data = dev;
+	rc = usb_ep_enable(dev->out, dev->out_desc);
+	if (rc) {
+		ERROR(dev->cdev, "can't enable %s, result %d\n",
+						dev->out->name, rc);
+		usb_ep_disable(dev->in);
+		return rc;
+	}
+	schedule_work(&dev->config_work);
+
+	list_for_each_entry(ch, &usb_diag_ch_list, list) {
+		struct diag_context *ctxt;
+
+		ctxt = ch->priv_usb;
+		ctxt->dpkts_tolaptop = 0;
+		ctxt->dpkts_tomodem = 0;
+		ctxt->dpkts_tolaptop_pending = 0;
+	}
+
+	spin_lock_irqsave(&dev->lock, flags);
+	dev->configured = 1;
+	spin_unlock_irqrestore(&dev->lock, flags);
+
+	return rc;
+}
+
+static void diag_function_unbind(struct usb_configuration *c,
+		struct usb_function *f)
+{
+	struct diag_context *ctxt = func_to_diag(f);
+
+	if (gadget_is_dualspeed(c->cdev->gadget))
+		usb_free_descriptors(f->hs_descriptors);
+
+	usb_free_descriptors(f->descriptors);
+	ctxt->ch.priv_usb = NULL;
+}
+
+static int diag_function_bind(struct usb_configuration *c,
+		struct usb_function *f)
+{
+	struct usb_composite_dev *cdev = c->cdev;
+	struct diag_context *ctxt = func_to_diag(f);
+	struct usb_ep *ep;
+	int status = -ENODEV;
+
+	intf_desc.bInterfaceNumber =  usb_interface_id(c, f);
+
+	ep = usb_ep_autoconfig(cdev->gadget, &fs_bulk_in_desc);
+	ctxt->in = ep;
+	ep->driver_data = ctxt;
+
+	ep = usb_ep_autoconfig(cdev->gadget, &fs_bulk_out_desc);
+	ctxt->out = ep;
+	ep->driver_data = ctxt;
+
+	/* copy descriptors, and track endpoint copies */
+	f->descriptors = usb_copy_descriptors(fs_diag_desc);
+	if (!f->descriptors)
+		goto fail;
+
+	if (gadget_is_dualspeed(c->cdev->gadget)) {
+		hs_bulk_in_desc.bEndpointAddress =
+				fs_bulk_in_desc.bEndpointAddress;
+		hs_bulk_out_desc.bEndpointAddress =
+				fs_bulk_out_desc.bEndpointAddress;
+
+		/* copy descriptors, and track endpoint copies */
+		f->hs_descriptors = usb_copy_descriptors(hs_diag_desc);
+	}
+	return 0;
+fail:
+	if (ctxt->out)
+		ctxt->out->driver_data = NULL;
+	if (ctxt->in)
+		ctxt->in->driver_data = NULL;
+	return status;
+
+}
+
+int diag_function_add(struct usb_configuration *c, const char *name,
+			int (*update_pid)(uint32_t, const char *))
+{
+	struct diag_context *dev;
+	struct usb_diag_ch *_ch;
+	int found = 0, ret;
+
+	DBG(c->cdev, "diag_function_add\n");
+
+	list_for_each_entry(_ch, &usb_diag_ch_list, list) {
+		if (!strcmp(name, _ch->name)) {
+			found = 1;
+			break;
+		}
+	}
+	if (!found) {
+		ERROR(c->cdev, "unable to get diag usb channel\n");
+		return -ENODEV;
+	}
+
+	dev = container_of(_ch, struct diag_context, ch);
+	/* claim the channel for this USB interface */
+	_ch->priv_usb = dev;
+
+	dev->update_pid_and_serial_num = update_pid; 
+	dev->cdev = c->cdev;
+	dev->function.name = _ch->name;
+	dev->function.descriptors = fs_diag_desc;
+	dev->function.hs_descriptors = hs_diag_desc;
+	dev->function.bind = diag_function_bind;
+	dev->function.unbind = diag_function_unbind;
+	dev->function.set_alt = diag_function_set_alt;
+	dev->function.disable = diag_function_disable;
+	spin_lock_init(&dev->lock);
+	INIT_LIST_HEAD(&dev->read_pool);
+	INIT_LIST_HEAD(&dev->write_pool);
+	INIT_WORK(&dev->config_work, usb_config_work_func);
+
+	ret = usb_add_function(c, &dev->function);
+	if (ret) {
+		INFO(c->cdev, "usb_add_function failed\n");
+		_ch->priv_usb = NULL;
+	}
+
+	return ret;
+}
+
+
+#if defined(CONFIG_DEBUG_FS)
+static char debug_buffer[PAGE_SIZE];
+
+static ssize_t debug_read_stats(struct file *file, char __user *ubuf,
+		size_t count, loff_t *ppos)
+{
+	char *buf = debug_buffer;
+	int temp = 0;
+	struct usb_diag_ch *ch;
+
+	list_for_each_entry(ch, &usb_diag_ch_list, list) {
+		struct diag_context *ctxt;
+
+		ctxt = ch->priv_usb;
+
+		temp += scnprintf(buf + temp, PAGE_SIZE - temp,
+				"---Name: %s---\n"
+				"dpkts_tolaptop: %lu\n"
+				"dpkts_tomodem:  %lu\n"
+				"pkts_tolaptop_pending: %u\n",
+				ch->name, ctxt->dpkts_tolaptop,
+				ctxt->dpkts_tomodem,
+				ctxt->dpkts_tolaptop_pending);
+	}
+
+	return simple_read_from_buffer(ubuf, count, ppos, buf, temp);
+}
+
+static ssize_t debug_reset_stats(struct file *file, const char __user *buf,
+				 size_t count, loff_t *ppos)
+{
+	struct usb_diag_ch *ch;
+
+	list_for_each_entry(ch, &usb_diag_ch_list, list) {
+		struct diag_context *ctxt;
+
+		ctxt = ch->priv_usb;
+
+		ctxt->dpkts_tolaptop = 0;
+		ctxt->dpkts_tomodem = 0;
+		ctxt->dpkts_tolaptop_pending = 0;
+	}
+
+	return count;
+}
+
+static int debug_open(struct inode *inode, struct file *file)
+{
+	return 0;
+}
+
+static const struct file_operations debug_fdiag_ops = {
+	.open = debug_open,
+	.read = debug_read_stats,
+	.write = debug_reset_stats,
+};
+
+static void fdiag_debugfs_init(void)
+{
+	struct dentry *dent;
+
+	dent = debugfs_create_dir("usb_diag", 0);
+	if (IS_ERR(dent))
+		return;
+
+	debugfs_create_file("status", 0444, dent, 0, &debug_fdiag_ops);
+}
+#else
+static void fdiag_debugfs_init(void)
+{
+	return;
+}
+#endif
+
+static void diag_cleanup(void)
+{
+	struct diag_context *dev;
+	struct list_head *act, *tmp;
+	struct usb_diag_ch *_ch;
+	unsigned long flags;
+
+	list_for_each_safe(act, tmp, &usb_diag_ch_list) {
+		_ch = list_entry(act, struct usb_diag_ch, list);
+		dev = container_of(_ch, struct diag_context, ch);
+
+		spin_lock_irqsave(&ch_lock, flags);
+		/* Free if diagchar is not using the channel anymore */
+		if (!_ch->priv) {
+			list_del(&_ch->list);
+			kfree(dev);
+		}
+		spin_unlock_irqrestore(&ch_lock, flags);
+
+	}
+}
+
+static int diag_setup(void)
+{
+	fdiag_debugfs_init();
+
+	return 0;
+}
diff --git a/drivers/usb/gadget/f_diag.h b/drivers/usb/gadget/f_diag.h
new file mode 100644
index 0000000..82d9a25
--- /dev/null
+++ b/drivers/usb/gadget/f_diag.h
@@ -0,0 +1,24 @@
+/* drivers/usb/gadget/f_diag.h
+ *
+ * Diag Function Device - Route DIAG frames between SMD and USB
+ *
+ * Copyright (C) 2008-2009 Google, Inc.
+ * Copyright (c) 2009, Code Aurora Forum. All rights reserved.
+ * Author: Brian Swetland <swetland@google.com>
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef __F_DIAG_H
+#define __F_DIAG_H
+
+int diag_function_add(struct usb_configuration *c, const char *);
+
+#endif /* __F_DIAG_H */
+
diff --git a/drivers/usb/gadget/f_mass_storage.c b/drivers/usb/gadget/f_mass_storage.c
index 5440c6d..ccd9c2d 100644
--- a/drivers/usb/gadget/f_mass_storage.c
+++ b/drivers/usb/gadget/f_mass_storage.c
@@ -312,7 +312,10 @@
 
 #include "storage_common.c"
 
-
+#ifdef CONFIG_USB_CSW_HACK
+static int write_error_after_csw_sent;
+static int csw_hack_sent;
+#endif
 /*-------------------------------------------------------------------------*/
 
 struct fsg_dev;
@@ -469,6 +472,7 @@
 }
 
 typedef void (*fsg_routine_t)(struct fsg_dev *);
+static int send_status(struct fsg_common *common);
 
 static int exception_in_progress(struct fsg_common *common)
 {
@@ -625,7 +629,7 @@
 		if (ctrl->bRequestType !=
 		    (USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE))
 			break;
-		if (w_index != fsg->interface_number || w_value != 0)
+		if (w_value != 0)
 			return -EDOM;
 
 		/*
@@ -640,7 +644,7 @@
 		if (ctrl->bRequestType !=
 		    (USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE))
 			break;
-		if (w_index != fsg->interface_number || w_value != 0)
+		if (w_value != 0)
 			return -EDOM;
 		VDBG(fsg, "get max LUN\n");
 		*(u8 *)req->buf = fsg->common->nluns - 1;
@@ -881,6 +885,9 @@
 	ssize_t			nwritten;
 	int			rc;
 
+#ifdef CONFIG_USB_CSW_HACK
+	int			i;
+#endif
 	if (curlun->ro) {
 		curlun->sense_data = SS_WRITE_PROTECTED;
 		return -EINVAL;
@@ -994,7 +1001,17 @@
 		bh = common->next_buffhd_to_drain;
 		if (bh->state == BUF_STATE_EMPTY && !get_some_more)
 			break;			/* We stopped early */
+#ifdef CONFIG_USB_CSW_HACK
+		/*
+		 * If the csw packet is already submmitted to the hardware,
+		 * by marking the state of buffer as full, then by checking
+		 * the residue, we make sure that this csw packet is not
+		 * written on to the storage media.
+		 */
+		if (bh->state == BUF_STATE_FULL && common->residue) {
+#else
 		if (bh->state == BUF_STATE_FULL) {
+#endif
 			smp_rmb();
 			common->next_buffhd_to_drain = bh->next;
 			bh->state = BUF_STATE_EMPTY;
@@ -1045,9 +1062,36 @@
 				curlun->sense_data = SS_WRITE_ERROR;
 				curlun->sense_data_info = file_offset >> 9;
 				curlun->info_valid = 1;
+#ifdef CONFIG_USB_CSW_HACK
+				write_error_after_csw_sent = 1;
+				goto write_error;
+#endif
 				break;
 			}
 
+#ifdef CONFIG_USB_CSW_HACK
+write_error:
+			if ((nwritten == amount) && !csw_hack_sent) {
+				if (write_error_after_csw_sent)
+					break;
+				/*
+				 * Check if any of the buffer is in the
+				 * busy state, if any buffer is in busy state,
+				 * means the complete data is not received
+				 * yet from the host. So there is no point in
+				 * csw right away without the complete data.
+				 */
+				for (i = 0; i < FSG_NUM_BUFFERS; i++) {
+					if (common->buffhds[i].state ==
+							BUF_STATE_BUSY)
+						break;
+				}
+				if (!amount_left_to_req && i == FSG_NUM_BUFFERS) {
+					csw_hack_sent = 1;
+					send_status(common);
+				}
+			}
+#endif
 			/* Did the host decide to stop early? */
 			if (bh->outreq->actual != bh->outreq->length) {
 				common->short_packet_received = 1;
@@ -1508,8 +1552,7 @@
 		curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
 		return -EINVAL;
 	}
-
-	if (curlun->prevent_medium_removal && !prevent)
+	if (!curlun->nofua && curlun->prevent_medium_removal && !prevent)
 		fsg_lun_fsync_sub(curlun);
 	curlun->prevent_medium_removal = prevent;
 	return 0;
@@ -1790,6 +1833,19 @@
 	csw->Signature = cpu_to_le32(USB_BULK_CS_SIG);
 	csw->Tag = common->tag;
 	csw->Residue = cpu_to_le32(common->residue);
+#ifdef CONFIG_USB_CSW_HACK
+	/* Since csw is being sent early, before
+	 * writing on to storage media, need to set
+	 * residue to zero,assuming that write will succeed.
+	 */
+	if (write_error_after_csw_sent) {
+		write_error_after_csw_sent = 0;
+		csw->Residue = cpu_to_le32(common->residue);
+	} else
+		csw->Residue = 0;
+#else
+	csw->Residue = cpu_to_le32(common->residue);
+#endif
 	csw->Status = status;
 
 	bh->inreq->length = USB_BULK_CS_WRAP_LEN;
@@ -2349,7 +2405,6 @@
 /* Reset interface setting and re-init endpoint state (toggle etc). */
 static int do_set_interface(struct fsg_common *common, struct fsg_dev *new_fsg)
 {
-	const struct usb_endpoint_descriptor *d;
 	struct fsg_dev *fsg;
 	int i, rc = 0;
 
@@ -2374,15 +2429,6 @@
 			}
 		}
 
-		/* Disable the endpoints */
-		if (fsg->bulk_in_enabled) {
-			usb_ep_disable(fsg->bulk_in);
-			fsg->bulk_in_enabled = 0;
-		}
-		if (fsg->bulk_out_enabled) {
-			usb_ep_disable(fsg->bulk_out);
-			fsg->bulk_out_enabled = 0;
-		}
 
 		common->fsg = NULL;
 		wake_up(&common->fsg_wait);
@@ -2395,22 +2441,6 @@
 	common->fsg = new_fsg;
 	fsg = common->fsg;
 
-	/* Enable the endpoints */
-	d = fsg_ep_desc(common->gadget,
-			&fsg_fs_bulk_in_desc, &fsg_hs_bulk_in_desc);
-	rc = enable_endpoint(common, fsg->bulk_in, d);
-	if (rc)
-		goto reset;
-	fsg->bulk_in_enabled = 1;
-
-	d = fsg_ep_desc(common->gadget,
-			&fsg_fs_bulk_out_desc, &fsg_hs_bulk_out_desc);
-	rc = enable_endpoint(common, fsg->bulk_out, d);
-	if (rc)
-		goto reset;
-	fsg->bulk_out_enabled = 1;
-	common->bulk_out_maxpacket = le16_to_cpu(d->wMaxPacketSize);
-	clear_bit(IGNORE_BULK_OUT, &fsg->atomic_bitflags);
 
 	/* Allocate the requests */
 	for (i = 0; i < FSG_NUM_BUFFERS; ++i) {
@@ -2440,6 +2470,29 @@
 static int fsg_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
 {
 	struct fsg_dev *fsg = fsg_from_func(f);
+	struct fsg_common *common = fsg->common;
+	const struct usb_endpoint_descriptor *d;
+	int rc;
+
+	/* Enable the endpoints */
+	d = fsg_ep_desc(common->gadget,
+			&fsg_fs_bulk_in_desc, &fsg_hs_bulk_in_desc);
+	rc = enable_endpoint(common, fsg->bulk_in, d);
+	if (rc)
+		return rc;
+	fsg->bulk_in_enabled = 1;
+
+	d = fsg_ep_desc(common->gadget,
+			&fsg_fs_bulk_out_desc, &fsg_hs_bulk_out_desc);
+	rc = enable_endpoint(common, fsg->bulk_out, d);
+	if (rc) {
+		usb_ep_disable(fsg->bulk_in);
+		fsg->bulk_in_enabled = 0;
+		return rc;
+	}
+	fsg->bulk_out_enabled = 1;
+	common->bulk_out_maxpacket = le16_to_cpu(d->wMaxPacketSize);
+	clear_bit(IGNORE_BULK_OUT, &fsg->atomic_bitflags);
 	fsg->common->new_fsg = fsg;
 	raise_exception(fsg->common, FSG_STATE_CONFIG_CHANGE);
 	return USB_GADGET_DELAYED_STATUS;
@@ -2448,6 +2501,18 @@
 static void fsg_disable(struct usb_function *f)
 {
 	struct fsg_dev *fsg = fsg_from_func(f);
+
+	/* Disable the endpoints */
+	if (fsg->bulk_in_enabled) {
+		usb_ep_disable(fsg->bulk_in);
+		fsg->bulk_in_enabled = 0;
+		fsg->bulk_in->driver_data = NULL;
+	}
+	if (fsg->bulk_out_enabled) {
+		usb_ep_disable(fsg->bulk_out);
+		fsg->bulk_out_enabled = 0;
+		fsg->bulk_out->driver_data = NULL;
+	}
 	fsg->common->new_fsg = NULL;
 	raise_exception(fsg->common, FSG_STATE_CONFIG_CHANGE);
 }
@@ -2559,6 +2624,7 @@
 		 */
 		if (!fsg_is_set(common))
 			break;
+		common->ep0req->length = 0;
 		if (test_and_clear_bit(IGNORE_BULK_OUT,
 				       &common->fsg->atomic_bitflags))
 			usb_ep_clear_halt(common->fsg->bulk_in);
@@ -2654,6 +2720,16 @@
 			common->state = FSG_STATE_STATUS_PHASE;
 		spin_unlock_irq(&common->lock);
 
+#ifdef CONFIG_USB_CSW_HACK
+		/* Since status is already sent for write scsi command,
+		 * need to skip sending status once again if it is a
+		 * write scsi command.
+		 */
+		if (csw_hack_sent) {
+			csw_hack_sent = 0;
+			continue;
+		}
+#endif
 		if (send_status(common))
 			continue;
 
@@ -2779,6 +2855,7 @@
 		curlun->ro = lcfg->cdrom || lcfg->ro;
 		curlun->initially_ro = curlun->ro;
 		curlun->removable = lcfg->removable;
+		curlun->nofua = lcfg->nofua;
 		curlun->dev.release = fsg_lun_release;
 		curlun->dev.parent = &gadget->dev;
 		/* curlun->dev.driver = &fsg_driver.driver; XXX */
diff --git a/drivers/usb/gadget/f_rmnet.c b/drivers/usb/gadget/f_rmnet.c
new file mode 100644
index 0000000..770a225
--- /dev/null
+++ b/drivers/usb/gadget/f_rmnet.c
@@ -0,0 +1,819 @@
+/*
+ * Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/slab.h>
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/usb/android_composite.h>
+#include <linux/spinlock.h>
+
+#include <linux/platform_data/usb_rmnet.h>
+#include "u_rmnet.h"
+#include "gadget_chips.h"
+
+
+#define RMNET_NOTIFY_INTERVAL	5
+#define RMNET_MAX_NOTIFY_SIZE	sizeof(struct usb_cdc_notification)
+
+struct rmnet_descs {
+	struct usb_endpoint_descriptor	*in;
+	struct usb_endpoint_descriptor	*out;
+	struct usb_endpoint_descriptor	*notify;
+};
+
+#define ACM_CTRL_DTR	(1 << 0)
+
+/* TODO: use separate structures for data and
+ * control paths
+ */
+struct f_rmnet {
+	struct grmnet			port;
+	int				ifc_id;
+	u8				port_num;
+	atomic_t			online;
+	struct usb_composite_dev	*cdev;
+
+	spinlock_t			lock;
+
+	/* usb descriptors */
+	struct rmnet_descs		fs;
+	struct rmnet_descs		hs;
+
+	/* usb eps*/
+	struct usb_ep			*notify;
+	struct usb_endpoint_descriptor	*notify_desc;
+	struct usb_request		*notify_req;
+
+	/* control info */
+	struct list_head		cpkt_resp_q;
+	atomic_t			notify_count;
+	unsigned long			cpkts_len;
+};
+
+#define NR_PORTS	1
+static unsigned int nr_ports;
+static struct rmnet_ports {
+	unsigned			port_num;
+	struct f_rmnet			*port;
+#ifdef CONFIG_USB_ANDROID
+	struct android_usb_function	android_f;
+#endif
+} ports[NR_PORTS];
+
+static struct usb_interface_descriptor rmnet_interface_desc = {
+	.bLength =		USB_DT_INTERFACE_SIZE,
+	.bDescriptorType =	USB_DT_INTERFACE,
+	.bNumEndpoints =	3,
+	.bInterfaceClass =	USB_CLASS_VENDOR_SPEC,
+	.bInterfaceSubClass =	USB_CLASS_VENDOR_SPEC,
+	.bInterfaceProtocol =	USB_CLASS_VENDOR_SPEC,
+	/* .iInterface = DYNAMIC */
+};
+
+/* Full speed support */
+static struct usb_endpoint_descriptor rmnet_fs_notify_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes =		USB_ENDPOINT_XFER_INT,
+	.wMaxPacketSize =	__constant_cpu_to_le16(RMNET_MAX_NOTIFY_SIZE),
+	.bInterval =		1 << RMNET_NOTIFY_INTERVAL,
+};
+
+static struct usb_endpoint_descriptor rmnet_fs_in_desc  = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize   = __constant_cpu_to_le16(64),
+};
+
+static struct usb_endpoint_descriptor rmnet_fs_out_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_OUT,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize   = __constant_cpu_to_le16(64),
+};
+
+static struct usb_descriptor_header *rmnet_fs_function[] = {
+	(struct usb_descriptor_header *) &rmnet_interface_desc,
+	(struct usb_descriptor_header *) &rmnet_fs_notify_desc,
+	(struct usb_descriptor_header *) &rmnet_fs_in_desc,
+	(struct usb_descriptor_header *) &rmnet_fs_out_desc,
+	NULL,
+};
+
+/* High speed support */
+static struct usb_endpoint_descriptor rmnet_hs_notify_desc  = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes =		USB_ENDPOINT_XFER_INT,
+	.wMaxPacketSize =	__constant_cpu_to_le16(RMNET_MAX_NOTIFY_SIZE),
+	.bInterval =		RMNET_NOTIFY_INTERVAL + 4,
+};
+
+static struct usb_endpoint_descriptor rmnet_hs_in_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize =	__constant_cpu_to_le16(512),
+};
+
+static struct usb_endpoint_descriptor rmnet_hs_out_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_OUT,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize =	__constant_cpu_to_le16(512),
+};
+
+static struct usb_descriptor_header *rmnet_hs_function[] = {
+	(struct usb_descriptor_header *) &rmnet_interface_desc,
+	(struct usb_descriptor_header *) &rmnet_hs_notify_desc,
+	(struct usb_descriptor_header *) &rmnet_hs_in_desc,
+	(struct usb_descriptor_header *) &rmnet_hs_out_desc,
+	NULL,
+};
+
+/* String descriptors */
+
+static struct usb_string rmnet_string_defs[] = {
+	[0].s = "RmNet",
+	{  } /* end of list */
+};
+
+static struct usb_gadget_strings rmnet_string_table = {
+	.language =		0x0409,	/* en-us */
+	.strings =		rmnet_string_defs,
+};
+
+static struct usb_gadget_strings *rmnet_strings[] = {
+	&rmnet_string_table,
+	NULL,
+};
+
+/* ------- misc functions --------------------*/
+
+static inline struct f_rmnet *func_to_rmnet(struct usb_function *f)
+{
+	return container_of(f, struct f_rmnet, port.func);
+}
+
+static inline struct f_rmnet *port_to_rmnet(struct grmnet *r)
+{
+	return container_of(r, struct f_rmnet, port);
+}
+
+static struct usb_request *
+frmnet_alloc_req(struct usb_ep *ep, unsigned len, gfp_t flags)
+{
+	struct usb_request *req;
+
+	req = usb_ep_alloc_request(ep, flags);
+	if (!req)
+		return ERR_PTR(-ENOMEM);
+
+	req->buf = kmalloc(len, flags);
+	if (!req->buf) {
+		usb_ep_free_request(ep, req);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	req->length = len;
+
+	return req;
+}
+
+void frmnet_free_req(struct usb_ep *ep, struct usb_request *req)
+{
+	kfree(req->buf);
+	usb_ep_free_request(ep, req);
+}
+
+static struct rmnet_ctrl_pkt *rmnet_alloc_ctrl_pkt(unsigned len, gfp_t flags)
+{
+	struct rmnet_ctrl_pkt *pkt;
+
+	pkt = kzalloc(sizeof(struct rmnet_ctrl_pkt), flags);
+	if (!pkt)
+		return ERR_PTR(-ENOMEM);
+
+	pkt->buf = kmalloc(len, flags);
+	if (!pkt->buf) {
+		kfree(pkt);
+		return ERR_PTR(-ENOMEM);
+	}
+	pkt->len = len;
+
+	return pkt;
+}
+
+static void rmnet_free_ctrl_pkt(struct rmnet_ctrl_pkt *pkt)
+{
+	kfree(pkt->buf);
+	kfree(pkt);
+}
+
+/* -------------------------------------------*/
+
+static int gport_setup(int no_ports)
+{
+	int ret;
+
+	pr_debug("%s: no_ports:%d\n", __func__, no_ports);
+
+	ret = gbam_setup(no_ports);
+	if (ret)
+		return ret;
+
+	ret = gsmd_ctrl_setup(no_ports);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+static int gport_connect(struct f_rmnet *dev)
+{
+	int ret;
+
+	pr_debug("%s:dev:%p portno:%d\n",
+			__func__, dev, dev->port_num);
+
+	ret = gsmd_ctrl_connect(&dev->port, dev->port_num);
+	if (ret) {
+		pr_err("%s: gsmd_ctrl_connect failed: err:%d\n",
+				__func__, ret);
+		return ret;
+	}
+
+	ret = gbam_connect(&dev->port, dev->port_num);
+	if (ret) {
+		pr_err("%s: gbam_connect failed: err:%d\n",
+				__func__, ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+static int gport_disconnect(struct f_rmnet *dev)
+{
+	pr_debug("%s:dev:%p portno:%d\n",
+			__func__, dev, dev->port_num);
+
+	gbam_disconnect(&dev->port, dev->port_num);
+
+	gsmd_ctrl_disconnect(&dev->port, dev->port_num);
+
+	return 0;
+}
+
+static int frmnet_remove(struct platform_device *dev)
+{
+	/* TBD:
+	 *  1. Unregister android function
+	 *  2. Free name from ports
+	 *  3. Free rmnet device
+	 *  4. Free Copy Descriptors
+	 */
+	return 0;
+}
+
+static void frmnet_unbind(struct usb_configuration *c, struct usb_function *f)
+{
+	struct f_rmnet *dev = func_to_rmnet(f);
+
+	pr_debug("%s: portno:%d\n", __func__, dev->port_num);
+
+	if (gadget_is_dualspeed(c->cdev->gadget))
+		usb_free_descriptors(f->hs_descriptors);
+	usb_free_descriptors(f->descriptors);
+
+	frmnet_free_req(dev->notify, dev->notify_req);
+
+	kfree(dev);
+}
+
+static void frmnet_disable(struct usb_function *f)
+{
+	struct f_rmnet *dev = func_to_rmnet(f);
+
+	pr_debug("%s: port#%d\n", __func__, dev->port_num);
+
+	usb_ep_disable(dev->notify);
+
+	atomic_set(&dev->online, 0);
+
+	gport_disconnect(dev);
+}
+
+static int
+frmnet_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
+{
+	struct f_rmnet			*dev = func_to_rmnet(f);
+	struct usb_composite_dev	*cdev = dev->cdev;
+	int				ret;
+
+	pr_debug("%s:dev:%p port#%d\n", __func__, dev, dev->port_num);
+
+	if (dev->notify->driver_data) {
+		pr_debug("%s: reset port:%d\n", __func__, dev->port_num);
+		usb_ep_disable(dev->notify);
+	}
+	dev->notify_desc = ep_choose(cdev->gadget,
+				dev->hs.notify,
+				dev->fs.notify);
+	ret = usb_ep_enable(dev->notify, dev->notify_desc);
+	if (ret) {
+		pr_err("%s: usb ep#%s enable failed, err#%d\n",
+				__func__, dev->notify->name, ret);
+		return ret;
+	}
+	dev->notify->driver_data = dev;
+
+	if (dev->port.in->driver_data) {
+		pr_debug("%s: reset port:%d\n", __func__, dev->port_num);
+		gport_disconnect(dev);
+	}
+
+	dev->port.in_desc = ep_choose(cdev->gadget,
+			dev->hs.in, dev->fs.in);
+	dev->port.out_desc = ep_choose(cdev->gadget,
+			dev->hs.out, dev->fs.out);
+
+	ret = gport_connect(dev);
+
+	atomic_set(&dev->online, 1);
+
+	return ret;
+}
+
+static void frmnet_ctrl_response_available(struct f_rmnet *dev)
+{
+	struct usb_request		*req = dev->notify_req;
+	struct usb_cdc_notification	*event;
+	unsigned long			flags;
+	int				ret;
+
+	pr_debug("%s:dev:%p portno#%d\n", __func__, dev, dev->port_num);
+
+	spin_lock_irqsave(&dev->lock, flags);
+	if (!atomic_read(&dev->online) || !req || !req->buf) {
+		spin_unlock_irqrestore(&dev->lock, flags);
+		return;
+	}
+
+	if (atomic_inc_return(&dev->notify_count) != 1) {
+		spin_unlock_irqrestore(&dev->lock, flags);
+		return;
+	}
+
+	event = req->buf;
+	event->bmRequestType = USB_DIR_IN | USB_TYPE_CLASS
+			| USB_RECIP_INTERFACE;
+	event->bNotificationType = USB_CDC_NOTIFY_RESPONSE_AVAILABLE;
+	event->wValue = cpu_to_le16(0);
+	event->wIndex = cpu_to_le16(dev->ifc_id);
+	event->wLength = cpu_to_le16(0);
+	spin_unlock_irqrestore(&dev->lock, flags);
+
+	ret = usb_ep_queue(dev->notify, dev->notify_req, GFP_ATOMIC);
+	if (ret) {
+		atomic_dec(&dev->notify_count);
+		pr_debug("ep enqueue error %d\n", ret);
+	}
+}
+
+static int
+frmnet_send_cpkt_response(struct grmnet *gr, struct rmnet_ctrl_pkt *cpkt)
+{
+	struct f_rmnet		*dev;
+	unsigned long		flags;
+
+	if (!gr || !cpkt) {
+		pr_err("%s: Invalid grmnet/cpkt, grmnet:%p cpkt:%p\n",
+				__func__, gr, cpkt);
+		return -ENODEV;
+	}
+
+	dev = port_to_rmnet(gr);
+
+	pr_debug("%s: dev:%p port#%d\n", __func__, dev, dev->port_num);
+
+	if (!atomic_read(&dev->online)) {
+		rmnet_free_ctrl_pkt(cpkt);
+		return 0;
+	}
+
+	spin_lock_irqsave(&dev->lock, flags);
+	list_add(&cpkt->list, &dev->cpkt_resp_q);
+	spin_unlock_irqrestore(&dev->lock, flags);
+
+	frmnet_ctrl_response_available(dev);
+
+	return 0;
+}
+
+static void
+frmnet_cmd_complete(struct usb_ep *ep, struct usb_request *req)
+{
+	struct f_rmnet			*dev = req->context;
+	struct usb_composite_dev	*cdev;
+	struct rmnet_ctrl_pkt		*cpkt;
+
+	if (!dev) {
+		pr_err("%s: rmnet dev is null\n", __func__);
+		return;
+	}
+
+	pr_debug("%s: dev:%p port#%d\n", __func__, dev, dev->port_num);
+
+	cdev = dev->cdev;
+
+	cpkt = rmnet_alloc_ctrl_pkt(req->actual, GFP_ATOMIC);
+	if (IS_ERR(cpkt)) {
+		pr_err("%s: Unable to allocate ctrl pkt\n", __func__);
+		return;
+	}
+
+	memcpy(cpkt->buf, req->buf, req->actual);
+
+	if (dev->port.send_cpkt_request)
+		dev->port.send_cpkt_request(&dev->port, dev->port_num, cpkt);
+}
+
+static void frmnet_notify_complete(struct usb_ep *ep, struct usb_request *req)
+{
+	struct f_rmnet *dev = req->context;
+	int status = req->status;
+
+	pr_debug("%s: dev:%p port#%d\n", __func__, dev, dev->port_num);
+
+	switch (status) {
+	case -ECONNRESET:
+	case -ESHUTDOWN:
+		/* connection gone */
+		atomic_set(&dev->notify_count, 0);
+		break;
+	default:
+		pr_err("rmnet notify ep error %d\n", status);
+		/* FALLTHROUGH */
+	case 0:
+		if (atomic_dec_and_test(&dev->notify_count))
+			break;
+
+		status = usb_ep_queue(dev->notify, req, GFP_ATOMIC);
+		if (status) {
+			atomic_dec(&dev->notify_count);
+			pr_debug("ep enqueue error %d\n", status);
+		}
+		break;
+	}
+}
+
+static int
+frmnet_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl)
+{
+	struct f_rmnet			*dev = func_to_rmnet(f);
+	struct usb_composite_dev	*cdev = dev->cdev;
+	struct usb_request		*req = cdev->req;
+	u16				w_index = le16_to_cpu(ctrl->wIndex);
+	u16				w_value = le16_to_cpu(ctrl->wValue);
+	u16				w_length = le16_to_cpu(ctrl->wLength);
+	int				ret = -EOPNOTSUPP;
+
+	pr_debug("%s:dev:%p port#%d\n", __func__, dev, dev->port_num);
+
+	if (!atomic_read(&dev->online)) {
+		pr_debug("%s: usb cable is not connected\n", __func__);
+		return -ENOTCONN;
+	}
+
+	switch ((ctrl->bRequestType << 8) | ctrl->bRequest) {
+
+	case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+			| USB_CDC_SEND_ENCAPSULATED_COMMAND:
+		if (w_length > req->length)
+			goto invalid;
+		ret = w_length;
+		req->complete = frmnet_cmd_complete;
+		req->context = dev;
+		break;
+
+
+	case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+			| USB_CDC_GET_ENCAPSULATED_RESPONSE:
+		if (w_value)
+			goto invalid;
+		else {
+			unsigned len;
+			struct rmnet_ctrl_pkt *cpkt;
+
+			spin_lock(&dev->lock);
+			if (list_empty(&dev->cpkt_resp_q)) {
+				pr_err("ctrl resp queue empty "
+					" req%02x.%02x v%04x i%04x l%d\n",
+					ctrl->bRequestType, ctrl->bRequest,
+					w_value, w_index, w_length);
+				spin_unlock(&dev->lock);
+				goto invalid;
+			}
+
+			cpkt = list_first_entry(&dev->cpkt_resp_q,
+					struct rmnet_ctrl_pkt, list);
+			list_del(&cpkt->list);
+			spin_unlock(&dev->lock);
+
+			len = min_t(unsigned, w_length, cpkt->len);
+			memcpy(req->buf, cpkt->buf, len);
+			ret = len;
+
+			rmnet_free_ctrl_pkt(cpkt);
+		}
+		break;
+	case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+			| USB_CDC_REQ_SET_CONTROL_LINE_STATE:
+		if (dev->port.send_cbits_tomodem)
+			dev->port.send_cbits_tomodem(&dev->port,
+							dev->port_num,
+							w_value);
+		ret = 0;
+
+		break;
+	default:
+
+invalid:
+		DBG(cdev, "invalid control req%02x.%02x v%04x i%04x l%d\n",
+			ctrl->bRequestType, ctrl->bRequest,
+			w_value, w_index, w_length);
+	}
+
+	/* respond with data transfer or status phase? */
+	if (ret >= 0) {
+		VDBG(cdev, "rmnet req%02x.%02x v%04x i%04x l%d\n",
+			ctrl->bRequestType, ctrl->bRequest,
+			w_value, w_index, w_length);
+		req->zero = (ret < w_length);
+		req->length = ret;
+		ret = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC);
+		if (ret < 0)
+			ERROR(cdev, "rmnet ep0 enqueue err %d\n", ret);
+	}
+
+	return ret;
+}
+
+static int frmnet_bind(struct usb_configuration *c, struct usb_function *f)
+{
+	struct f_rmnet			*dev = func_to_rmnet(f);
+	struct usb_ep			*ep;
+	struct usb_composite_dev	*cdev = c->cdev;
+	int				ret = -ENODEV;
+
+	dev->ifc_id = usb_interface_id(c, f);
+	if (dev->ifc_id < 0) {
+		pr_err("%s: unable to allocate ifc id, err:%d",
+				__func__, dev->ifc_id);
+		return dev->ifc_id;
+	}
+	rmnet_interface_desc.bInterfaceNumber = dev->ifc_id;
+
+	ep = usb_ep_autoconfig(cdev->gadget, &rmnet_fs_in_desc);
+	if (!ep) {
+		pr_err("%s: usb epin autoconfig failed\n", __func__);
+		return -ENODEV;
+	}
+	dev->port.in = ep;
+	ep->driver_data = cdev;
+
+	ep = usb_ep_autoconfig(cdev->gadget, &rmnet_fs_out_desc);
+	if (!ep) {
+		pr_err("%s: usb epout autoconfig failed\n", __func__);
+		ret = -ENODEV;
+		goto ep_auto_out_fail;
+	}
+	dev->port.out = ep;
+	ep->driver_data = cdev;
+
+	ep = usb_ep_autoconfig(cdev->gadget, &rmnet_fs_notify_desc);
+	if (!ep) {
+		pr_err("%s: usb epnotify autoconfig failed\n", __func__);
+		ret = -ENODEV;
+		goto ep_auto_notify_fail;
+	}
+	dev->notify = ep;
+	ep->driver_data = cdev;
+
+	dev->notify_req = frmnet_alloc_req(ep,
+				sizeof(struct usb_cdc_notification) + 2,
+				GFP_KERNEL);
+	if (IS_ERR(dev->notify_req)) {
+		pr_err("%s: unable to allocate memory for notify req\n",
+				__func__);
+		ret = -ENOMEM;
+		goto ep_notify_alloc_fail;
+	}
+
+	dev->notify_req->complete = frmnet_notify_complete;
+	dev->notify_req->context = dev;
+
+	f->descriptors = usb_copy_descriptors(rmnet_fs_function);
+
+	dev->fs.in = usb_find_endpoint(rmnet_fs_function,
+					f->descriptors,
+					&rmnet_fs_in_desc);
+	dev->fs.out = usb_find_endpoint(rmnet_fs_function,
+					f->descriptors,
+					&rmnet_fs_out_desc);
+	dev->fs.notify = usb_find_endpoint(rmnet_fs_function,
+					f->descriptors,
+					&rmnet_fs_notify_desc);
+
+	if (gadget_is_dualspeed(cdev->gadget)) {
+		rmnet_hs_in_desc.bEndpointAddress =
+				rmnet_fs_in_desc.bEndpointAddress;
+		rmnet_hs_out_desc.bEndpointAddress =
+				rmnet_fs_out_desc.bEndpointAddress;
+		rmnet_hs_notify_desc.bEndpointAddress =
+				rmnet_fs_notify_desc.bEndpointAddress;
+
+		/* copy descriptors, and track endpoint copies */
+		f->hs_descriptors = usb_copy_descriptors(rmnet_hs_function);
+
+		dev->hs.in = usb_find_endpoint(rmnet_hs_function,
+				f->hs_descriptors, &rmnet_hs_in_desc);
+		dev->hs.out = usb_find_endpoint(rmnet_hs_function,
+				f->hs_descriptors, &rmnet_hs_out_desc);
+		dev->hs.notify = usb_find_endpoint(rmnet_hs_function,
+				f->hs_descriptors, &rmnet_hs_notify_desc);
+	}
+
+	pr_info("%s: RmNet(%d) %s Speed, IN:%s OUT:%s\n",
+			__func__, dev->port_num,
+			gadget_is_dualspeed(cdev->gadget) ? "dual" : "full",
+			dev->port.in->name, dev->port.out->name);
+
+	return 0;
+
+ep_notify_alloc_fail:
+	dev->notify->driver_data = NULL;
+	dev->notify = NULL;
+ep_auto_notify_fail:
+	dev->port.out->driver_data = NULL;
+	dev->port.out = NULL;
+ep_auto_out_fail:
+	dev->port.in->driver_data = NULL;
+	dev->port.in = NULL;
+
+	return ret;
+}
+
+#ifdef CONFIG_USB_ANDROID
+static int frmnet_bind_config(struct usb_configuration *c)
+{
+	static unsigned		portno;
+	int			status;
+	struct f_rmnet		*dev;
+	struct usb_function	*f;
+	unsigned long		flags;
+
+	pr_debug("%s: usb config:%p\n", __func__, c);
+
+	if (portno >= nr_ports) {
+		pr_err("%s: supporting ports#%u port_id:%u", __func__,
+				nr_ports, portno);
+		return -ENODEV;
+	}
+
+	if (rmnet_string_defs[0].id == 0) {
+		status = usb_string_id(c->cdev);
+		if (status < 0) {
+			pr_err("%s: failed to get string id, err:%d\n",
+					__func__, status);
+			return status;
+		}
+		rmnet_string_defs[0].id = status;
+	}
+
+	dev = ports[portno].port;
+
+	spin_lock_irqsave(&dev->lock, flags);
+	dev->cdev = c->cdev;
+	f = &dev->port.func;
+	f->name = ports[portno].android_f.name;
+	portno++;
+	spin_unlock_irqrestore(&dev->lock, flags);
+
+	f->strings = rmnet_strings;
+	f->bind = frmnet_bind;
+	f->unbind = frmnet_unbind;
+	f->disable = frmnet_disable;
+	f->set_alt = frmnet_set_alt;
+	f->setup = frmnet_setup;
+	dev->port.send_cpkt_response = frmnet_send_cpkt_response;
+
+	status = usb_add_function(c, f);
+	if (status) {
+		pr_err("%s: usb add function failed: %d\n",
+				__func__, status);
+		kfree(ports[portno].android_f.name);
+		kfree(dev);
+		return status;
+	}
+
+	pr_debug("%s: complete\n", __func__);
+
+	return status;
+}
+
+static struct platform_driver usb_rmnet = {
+	.remove = frmnet_remove,
+	.driver = {
+		.name = "usb_rmnet",
+		.owner = THIS_MODULE,
+	},
+};
+
+static int __devinit frmnet_probe(struct platform_device *pdev)
+{
+	struct usb_rmnet_pdata *pdata = pdev->dev.platform_data;
+	int i;
+	struct f_rmnet *dev;
+	int ret;
+	int instances;
+
+	instances = 1;
+	if (pdata)
+		instances = pdata->num_instances;
+
+	pr_debug("%s: instances :%d\n", __func__, instances);
+
+	for (i = 0; i < instances; i++) {
+		dev = kzalloc(sizeof(struct f_rmnet), GFP_KERNEL);
+		if (!dev) {
+			pr_err("%s: Unable to allocate rmnet device\n",
+					__func__);
+			ret = -ENOMEM;
+			goto fail_probe;
+		}
+
+		dev->port_num = i;
+		spin_lock_init(&dev->lock);
+		INIT_LIST_HEAD(&dev->cpkt_resp_q);
+
+		ports[i].port = dev;
+		ports[i].port_num = i;
+		ports[i].android_f.name = kasprintf(GFP_KERNEL, "rmnet%d", i);
+		ports[i].android_f.bind_config = frmnet_bind_config;
+
+		pr_debug("%s: anroid f_name:%s\n", __func__,
+				ports[i].android_f.name);
+
+		nr_ports++;
+
+		android_register_function(&ports[i].android_f);
+	}
+
+	gport_setup(nr_ports);
+
+	return 0;
+
+fail_probe:
+	for (i = 0; i < nr_ports; i++) {
+		/* android_unregister_function(&ports[i].android_f); */
+		kfree(ports[i].android_f.name);
+		kfree(ports[i].port);
+	}
+
+	return ret;
+}
+
+static int __init frmnet_init(void)
+{
+	return platform_driver_probe(&usb_rmnet, frmnet_probe);
+}
+module_init(frmnet_init);
+
+static void __exit frmnet_exit(void)
+{
+	platform_driver_unregister(&usb_rmnet);
+}
+module_exit(frmnet_exit);
+
+MODULE_DESCRIPTION("rmnet function driver");
+MODULE_LICENSE("GPL v2");
+#endif
diff --git a/drivers/usb/gadget/f_rmnet.h b/drivers/usb/gadget/f_rmnet.h
new file mode 100644
index 0000000..2d816c6
--- /dev/null
+++ b/drivers/usb/gadget/f_rmnet.h
@@ -0,0 +1,19 @@
+/* Copyright (c) 2010, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __F_RMNET_H
+#define __F_RMNET_H
+
+int rmnet_function_add(struct usb_configuration *c);
+
+#endif /* __F_RMNET_H */
diff --git a/drivers/usb/gadget/f_rmnet_sdio.c b/drivers/usb/gadget/f_rmnet_sdio.c
new file mode 100644
index 0000000..aa8fd3a
--- /dev/null
+++ b/drivers/usb/gadget/f_rmnet_sdio.c
@@ -0,0 +1,1314 @@
+/*
+ * f_rmnet_sdio.c -- RmNet SDIO function driver
+ *
+ * Copyright (C) 2003-2005,2008 David Brownell
+ * Copyright (C) 2003-2004 Robert Schwebel, Benedikt Spranger
+ * Copyright (C) 2003 Al Borchers (alborchers@steinerpoint.com)
+ * Copyright (C) 2008 Nokia Corporation
+ * Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/list.h>
+#include <linux/device.h>
+#include <linux/workqueue.h>
+#include <linux/netdevice.h>
+
+#include <linux/usb/cdc.h>
+#include <linux/usb/composite.h>
+#include <linux/usb/ch9.h>
+#include <linux/usb/android_composite.h>
+#include <linux/termios.h>
+#include <linux/debugfs.h>
+
+#include <mach/sdio_cmux.h>
+#include <mach/sdio_dmux.h>
+
+static uint32_t rmnet_sdio_ctl_ch = CONFIG_RMNET_SDIO_CTL_CHANNEL;
+module_param(rmnet_sdio_ctl_ch, uint, S_IRUGO);
+MODULE_PARM_DESC(rmnet_sdio_ctl_ch, "RmNet control SDIO channel ID");
+
+static uint32_t rmnet_sdio_data_ch = CONFIG_RMNET_SDIO_DATA_CHANNEL;
+module_param(rmnet_sdio_data_ch, uint, S_IRUGO);
+MODULE_PARM_DESC(rmnet_sdio_data_ch, "RmNet data SDIO channel ID");
+
+#define ACM_CTRL_DTR	(1 << 0)
+
+#define SDIO_MUX_HDR           8
+#define RMNET_SDIO_NOTIFY_INTERVAL  5
+#define RMNET_SDIO_MAX_NFY_SZE  sizeof(struct usb_cdc_notification)
+
+#define RMNET_SDIO_RX_REQ_MAX             16
+#define RMNET_SDIO_RX_REQ_SIZE            2048
+#define RMNET_SDIO_TX_REQ_MAX             200
+
+#define TX_PKT_DROP_THRESHOLD			1000
+#define RX_PKT_FLOW_CTRL_EN_THRESHOLD		1000
+#define RX_PKT_FLOW_CTRL_DISABLE		500
+
+unsigned int tx_pkt_drop_thld = TX_PKT_DROP_THRESHOLD;
+module_param(tx_pkt_drop_thld, uint, S_IRUGO | S_IWUSR);
+
+unsigned int rx_fctrl_en_thld = RX_PKT_FLOW_CTRL_EN_THRESHOLD;
+module_param(rx_fctrl_en_thld, uint, S_IRUGO | S_IWUSR);
+
+unsigned int rx_fctrl_dis_thld = RX_PKT_FLOW_CTRL_DISABLE;
+module_param(rx_fctrl_dis_thld, uint, S_IRUGO | S_IWUSR);
+
+/* QMI requests & responses buffer*/
+struct rmnet_sdio_qmi_buf {
+	void *buf;
+	int len;
+	struct list_head list;
+};
+
+struct rmnet_dev {
+	struct usb_function function;
+	struct usb_composite_dev *cdev;
+
+	struct usb_ep           *epout;
+	struct usb_ep           *epin;
+	struct usb_ep           *epnotify;
+	struct usb_request      *notify_req;
+
+	u8                      ifc_id;
+	/* QMI lists */
+	struct list_head        qmi_req_q;
+	struct list_head        qmi_resp_q;
+	/* Tx/Rx lists */
+	struct list_head        tx_idle;
+	struct sk_buff_head	tx_skb_queue;
+	struct list_head        rx_idle;
+	struct sk_buff_head	rx_skb_queue;
+
+	spinlock_t              lock;
+	atomic_t                online;
+	atomic_t                notify_count;
+
+	struct workqueue_struct *wq;
+	struct work_struct disconnect_work;
+
+	struct work_struct ctl_rx_work;
+	struct work_struct data_rx_work;
+
+	struct delayed_work sdio_open_work;
+	atomic_t sdio_open;
+
+	unsigned int dpkts_pending_atdmux;
+	int cbits_to_modem;
+	struct work_struct set_modem_ctl_bits_work;
+
+	/* pkt logging dpkt - data pkt; cpkt - control pkt*/
+	unsigned long dpkt_tolaptop;
+	unsigned long dpkt_tomodem;
+	unsigned long tx_drp_cnt;
+	unsigned long cpkt_tolaptop;
+	unsigned long cpkt_tomodem;
+};
+
+static struct usb_interface_descriptor rmnet_interface_desc = {
+	.bLength =              USB_DT_INTERFACE_SIZE,
+	.bDescriptorType =      USB_DT_INTERFACE,
+	/* .bInterfaceNumber = DYNAMIC */
+	.bNumEndpoints =        3,
+	.bInterfaceClass =      USB_CLASS_VENDOR_SPEC,
+	.bInterfaceSubClass =   USB_CLASS_VENDOR_SPEC,
+	.bInterfaceProtocol =   USB_CLASS_VENDOR_SPEC,
+	/* .iInterface = DYNAMIC */
+};
+
+/* Full speed support */
+static struct usb_endpoint_descriptor rmnet_fs_notify_desc = {
+	.bLength =              USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =      USB_DT_ENDPOINT,
+	.bEndpointAddress =     USB_DIR_IN,
+	.bmAttributes =         USB_ENDPOINT_XFER_INT,
+	.wMaxPacketSize =	__constant_cpu_to_le16(RMNET_SDIO_MAX_NFY_SZE),
+	.bInterval =            1 << RMNET_SDIO_NOTIFY_INTERVAL,
+};
+
+static struct usb_endpoint_descriptor rmnet_fs_in_desc  = {
+	.bLength =              USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =      USB_DT_ENDPOINT,
+	.bEndpointAddress =     USB_DIR_IN,
+	.bmAttributes =         USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize   = __constant_cpu_to_le16(64),
+};
+
+static struct usb_endpoint_descriptor rmnet_fs_out_desc = {
+	.bLength =              USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =      USB_DT_ENDPOINT,
+	.bEndpointAddress =     USB_DIR_OUT,
+	.bmAttributes =         USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize = __constant_cpu_to_le16(64),
+};
+
+static struct usb_descriptor_header *rmnet_fs_function[] = {
+	(struct usb_descriptor_header *) &rmnet_interface_desc,
+	(struct usb_descriptor_header *) &rmnet_fs_notify_desc,
+	(struct usb_descriptor_header *) &rmnet_fs_in_desc,
+	(struct usb_descriptor_header *) &rmnet_fs_out_desc,
+	NULL,
+};
+
+/* High speed support */
+static struct usb_endpoint_descriptor rmnet_hs_notify_desc  = {
+	.bLength =              USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =      USB_DT_ENDPOINT,
+	.bEndpointAddress =     USB_DIR_IN,
+	.bmAttributes =         USB_ENDPOINT_XFER_INT,
+	.wMaxPacketSize =	__constant_cpu_to_le16(RMNET_SDIO_MAX_NFY_SZE),
+	.bInterval =            RMNET_SDIO_NOTIFY_INTERVAL + 4,
+};
+
+static struct usb_endpoint_descriptor rmnet_hs_in_desc = {
+	.bLength =              USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =      USB_DT_ENDPOINT,
+	.bEndpointAddress =     USB_DIR_IN,
+	.bmAttributes =         USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize =	__constant_cpu_to_le16(512),
+};
+
+static struct usb_endpoint_descriptor rmnet_hs_out_desc = {
+	.bLength =              USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =      USB_DT_ENDPOINT,
+	.bEndpointAddress =     USB_DIR_OUT,
+	.bmAttributes =         USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize =	__constant_cpu_to_le16(512),
+};
+
+static struct usb_descriptor_header *rmnet_hs_function[] = {
+	(struct usb_descriptor_header *) &rmnet_interface_desc,
+	(struct usb_descriptor_header *) &rmnet_hs_notify_desc,
+	(struct usb_descriptor_header *) &rmnet_hs_in_desc,
+	(struct usb_descriptor_header *) &rmnet_hs_out_desc,
+	NULL,
+};
+
+/* String descriptors */
+
+static struct usb_string rmnet_string_defs[] = {
+	[0].s = "QMI RmNet",
+	{  } /* end of list */
+};
+
+static struct usb_gadget_strings rmnet_string_table = {
+	.language =             0x0409, /* en-us */
+	.strings =              rmnet_string_defs,
+};
+
+static struct usb_gadget_strings *rmnet_strings[] = {
+	&rmnet_string_table,
+	NULL,
+};
+
+static struct rmnet_sdio_qmi_buf *
+rmnet_alloc_qmi(unsigned len, gfp_t kmalloc_flags)
+
+{
+	struct rmnet_sdio_qmi_buf *qmi;
+
+	qmi = kmalloc(sizeof(struct rmnet_sdio_qmi_buf), kmalloc_flags);
+	if (qmi != NULL) {
+		qmi->buf = kmalloc(len, kmalloc_flags);
+		if (qmi->buf == NULL) {
+			kfree(qmi);
+			qmi = NULL;
+		}
+	}
+
+	return qmi ? qmi : ERR_PTR(-ENOMEM);
+}
+
+static void rmnet_free_qmi(struct rmnet_sdio_qmi_buf *qmi)
+{
+	kfree(qmi->buf);
+	kfree(qmi);
+}
+/*
+ * Allocate a usb_request and its buffer.  Returns a pointer to the
+ * usb_request or a pointer with an error code if there is an error.
+ */
+static struct usb_request *
+rmnet_alloc_req(struct usb_ep *ep, unsigned len, gfp_t kmalloc_flags)
+{
+	struct usb_request *req;
+
+	req = usb_ep_alloc_request(ep, kmalloc_flags);
+
+	if (len && req != NULL) {
+		req->length = len;
+		req->buf = kmalloc(len, kmalloc_flags);
+		if (req->buf == NULL) {
+			usb_ep_free_request(ep, req);
+			req = NULL;
+		}
+	}
+
+	return req ? req : ERR_PTR(-ENOMEM);
+}
+
+/*
+ * Free a usb_request and its buffer.
+ */
+static void rmnet_free_req(struct usb_ep *ep, struct usb_request *req)
+{
+	kfree(req->buf);
+	usb_ep_free_request(ep, req);
+}
+
+static void rmnet_notify_complete(struct usb_ep *ep, struct usb_request *req)
+{
+	struct rmnet_dev *dev = req->context;
+	struct usb_composite_dev *cdev = dev->cdev;
+	int status = req->status;
+
+	switch (status) {
+	case -ECONNRESET:
+	case -ESHUTDOWN:
+		/* connection gone */
+		atomic_set(&dev->notify_count, 0);
+		break;
+	default:
+		ERROR(cdev, "rmnet notifyep error %d\n", status);
+		/* FALLTHROUGH */
+	case 0:
+
+		/* handle multiple pending QMI_RESPONSE_AVAILABLE
+		 * notifications by resending until we're done
+		 */
+		if (atomic_dec_and_test(&dev->notify_count))
+			break;
+
+		status = usb_ep_queue(dev->epnotify, req, GFP_ATOMIC);
+		if (status) {
+			atomic_dec(&dev->notify_count);
+			ERROR(cdev, "rmnet notify ep enq error %d\n", status);
+		}
+		break;
+	}
+}
+
+static void qmi_response_available(struct rmnet_dev *dev)
+{
+	struct usb_composite_dev *cdev = dev->cdev;
+	struct usb_request              *req = dev->notify_req;
+	struct usb_cdc_notification     *event = req->buf;
+	int status;
+
+	/* Response will be sent later */
+	if (atomic_inc_return(&dev->notify_count) != 1)
+		return;
+
+	event->bmRequestType = USB_DIR_IN | USB_TYPE_CLASS
+			| USB_RECIP_INTERFACE;
+	event->bNotificationType = USB_CDC_NOTIFY_RESPONSE_AVAILABLE;
+	event->wValue = cpu_to_le16(0);
+	event->wIndex = cpu_to_le16(dev->ifc_id);
+	event->wLength = cpu_to_le16(0);
+
+	status = usb_ep_queue(dev->epnotify, dev->notify_req, GFP_ATOMIC);
+	if (status < 0) {
+		atomic_dec(&dev->notify_count);
+		ERROR(cdev, "rmnet notify ep enqueue error %d\n", status);
+	}
+}
+
+#define MAX_CTRL_PKT_SIZE	4096
+static void rmnet_ctl_receive_cb(void *data, int size, void *priv)
+{
+	struct rmnet_dev *dev = priv;
+	struct usb_composite_dev *cdev = dev->cdev;
+	struct rmnet_sdio_qmi_buf *qmi_resp;
+	unsigned long flags;
+
+	if (!data || !size)
+		return;
+
+	if (size > MAX_CTRL_PKT_SIZE) {
+		ERROR(cdev, "ctrl pkt size:%d exceeds max pkt size:%d\n",
+				size, MAX_CTRL_PKT_SIZE);
+		return;
+	}
+
+	if (!atomic_read(&dev->online)) {
+		DBG(cdev, "USB disconnected\n");
+		return;
+	}
+
+	qmi_resp = rmnet_alloc_qmi(size, GFP_KERNEL);
+	if (IS_ERR(qmi_resp)) {
+		DBG(cdev, "unable to allocate memory for QMI resp\n");
+		return;
+	}
+	memcpy(qmi_resp->buf, data, size);
+	qmi_resp->len = size;
+	spin_lock_irqsave(&dev->lock, flags);
+	list_add_tail(&qmi_resp->list, &dev->qmi_resp_q);
+	spin_unlock_irqrestore(&dev->lock, flags);
+
+	qmi_response_available(dev);
+}
+
+static void rmnet_ctl_write_done(void *data, int size, void *priv)
+{
+	struct rmnet_dev *dev = priv;
+	struct usb_composite_dev *cdev = dev->cdev;
+
+	VDBG(cdev, "rmnet control write done = %d bytes\n", size);
+}
+
+static void rmnet_sts_callback(int id, void *priv)
+{
+	struct rmnet_dev *dev = priv;
+	struct usb_composite_dev *cdev = dev->cdev;
+
+	DBG(cdev, "rmnet_sts_callback: id: %d\n", id);
+}
+
+static void rmnet_control_rx_work(struct work_struct *w)
+{
+	struct rmnet_dev *dev = container_of(w, struct rmnet_dev, ctl_rx_work);
+	struct usb_composite_dev *cdev = dev->cdev;
+	struct rmnet_sdio_qmi_buf *qmi_req;
+	unsigned long flags;
+	int ret;
+
+	while (1) {
+		spin_lock_irqsave(&dev->lock, flags);
+		if (list_empty(&dev->qmi_req_q))
+			goto unlock;
+
+		qmi_req = list_first_entry(&dev->qmi_req_q,
+					struct rmnet_sdio_qmi_buf, list);
+		list_del(&qmi_req->list);
+		spin_unlock_irqrestore(&dev->lock, flags);
+
+		ret = sdio_cmux_write(rmnet_sdio_ctl_ch, qmi_req->buf,
+					qmi_req->len);
+		if (ret != qmi_req->len) {
+			ERROR(cdev, "rmnet control SDIO write failed\n");
+			return;
+		}
+
+		dev->cpkt_tomodem++;
+
+		/*
+		 * cmux_write API copies the buffer and gives it to sdio_al.
+		 * Hence freeing the memory before write is completed.
+		 */
+		rmnet_free_qmi(qmi_req);
+	}
+unlock:
+	spin_unlock_irqrestore(&dev->lock, flags);
+}
+
+static void rmnet_response_complete(struct usb_ep *ep, struct usb_request *req)
+{
+	struct rmnet_dev *dev = req->context;
+	struct usb_composite_dev *cdev = dev->cdev;
+
+	switch (req->status) {
+	case -ECONNRESET:
+	case -ESHUTDOWN:
+	case 0:
+		return;
+	default:
+		INFO(cdev, "rmnet %s response error %d, %d/%d\n",
+			ep->name, req->status,
+			req->actual, req->length);
+	}
+}
+
+static void rmnet_command_complete(struct usb_ep *ep, struct usb_request *req)
+{
+	struct rmnet_dev *dev = req->context;
+	struct usb_composite_dev *cdev = dev->cdev;
+	struct rmnet_sdio_qmi_buf *qmi_req;
+	int len = req->actual;
+
+	if (req->status < 0) {
+		ERROR(cdev, "rmnet command error %d\n", req->status);
+		return;
+	}
+
+	/* discard the packet if sdio is not available */
+	if (!atomic_read(&dev->sdio_open))
+		return;
+
+	qmi_req = rmnet_alloc_qmi(len, GFP_ATOMIC);
+	if (IS_ERR(qmi_req)) {
+		ERROR(cdev, "unable to allocate memory for QMI req\n");
+		return;
+	}
+	memcpy(qmi_req->buf, req->buf, len);
+	qmi_req->len = len;
+	spin_lock(&dev->lock);
+	list_add_tail(&qmi_req->list, &dev->qmi_req_q);
+	spin_unlock(&dev->lock);
+	queue_work(dev->wq, &dev->ctl_rx_work);
+}
+
+static int
+rmnet_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl)
+{
+	struct rmnet_dev *dev = container_of(f, struct rmnet_dev, function);
+	struct usb_composite_dev *cdev = f->config->cdev;
+	struct usb_request      *req = cdev->req;
+	int                     ret = -EOPNOTSUPP;
+	u16                     w_index = le16_to_cpu(ctrl->wIndex);
+	u16                     w_value = le16_to_cpu(ctrl->wValue);
+	u16                     w_length = le16_to_cpu(ctrl->wLength);
+	struct rmnet_sdio_qmi_buf *resp;
+
+	if (!atomic_read(&dev->online))
+		return -ENOTCONN;
+
+	switch ((ctrl->bRequestType << 8) | ctrl->bRequest) {
+
+	case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+			| USB_CDC_SEND_ENCAPSULATED_COMMAND:
+		if (w_length > req->length)
+			goto invalid;
+		ret = w_length;
+		req->complete = rmnet_command_complete;
+		req->context = dev;
+		break;
+
+
+	case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+			| USB_CDC_GET_ENCAPSULATED_RESPONSE:
+		if (w_value)
+			goto invalid;
+		else {
+			unsigned len;
+
+			spin_lock(&dev->lock);
+
+			if (list_empty(&dev->qmi_resp_q)) {
+				INFO(cdev, "qmi resp empty "
+					" req%02x.%02x v%04x i%04x l%d\n",
+					ctrl->bRequestType, ctrl->bRequest,
+					w_value, w_index, w_length);
+				spin_unlock(&dev->lock);
+				goto invalid;
+			}
+
+			resp = list_first_entry(&dev->qmi_resp_q,
+				struct rmnet_sdio_qmi_buf, list);
+			list_del(&resp->list);
+			spin_unlock(&dev->lock);
+
+			len = min_t(unsigned, w_length, resp->len);
+			memcpy(req->buf, resp->buf, len);
+			ret = len;
+			req->context = dev;
+			req->complete = rmnet_response_complete;
+			rmnet_free_qmi(resp);
+
+			/* check if its the right place to add */
+			dev->cpkt_tolaptop++;
+		}
+		break;
+	case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+			| USB_CDC_REQ_SET_CONTROL_LINE_STATE:
+		/* This is a workaround for RmNet and is borrowed from the
+		 * CDC/ACM standard. The host driver will issue the above ACM
+		 * standard request to the RmNet interface in the following
+		 * scenario: Once the network adapter is disabled from device
+		 * manager, the above request will be sent from the qcusbnet
+		 * host driver, with DTR being '0'. Once network adapter is
+		 * enabled from device manager (or during enumeration), the
+		 * request will be sent with DTR being '1'.
+		 */
+		if (w_value & ACM_CTRL_DTR)
+			dev->cbits_to_modem |= TIOCM_DTR;
+		else
+			dev->cbits_to_modem &= ~TIOCM_DTR;
+		queue_work(dev->wq, &dev->set_modem_ctl_bits_work);
+
+		ret = 0;
+
+		break;
+	default:
+
+invalid:
+	DBG(cdev, "invalid control req%02x.%02x v%04x i%04x l%d\n",
+		ctrl->bRequestType, ctrl->bRequest,
+		w_value, w_index, w_length);
+	}
+
+	/* respond with data transfer or status phase? */
+	if (ret >= 0) {
+		VDBG(cdev, "rmnet req%02x.%02x v%04x i%04x l%d\n",
+			ctrl->bRequestType, ctrl->bRequest,
+			w_value, w_index, w_length);
+		req->zero = (ret < w_length);
+		req->length = ret;
+		ret = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC);
+		if (ret < 0)
+			ERROR(cdev, "rmnet ep0 enqueue err %d\n", ret);
+	}
+
+	return ret;
+}
+
+static int
+rmnet_rx_submit(struct rmnet_dev *dev, struct usb_request *req, gfp_t gfp_flags)
+{
+	struct sk_buff *skb;
+	int retval;
+
+	skb = alloc_skb(RMNET_SDIO_RX_REQ_SIZE + SDIO_MUX_HDR, gfp_flags);
+	if (skb == NULL)
+		return -ENOMEM;
+	skb_reserve(skb, SDIO_MUX_HDR);
+
+	req->buf = skb->data;
+	req->length = RMNET_SDIO_RX_REQ_SIZE;
+	req->context = skb;
+
+	retval = usb_ep_queue(dev->epout, req, gfp_flags);
+	if (retval)
+		dev_kfree_skb_any(skb);
+
+	return retval;
+}
+
+static void rmnet_start_rx(struct rmnet_dev *dev)
+{
+	struct usb_composite_dev *cdev = dev->cdev;
+	int status;
+	struct usb_request *req;
+	struct list_head *act, *tmp;
+	unsigned long flags;
+
+	if (!atomic_read(&dev->online)) {
+		pr_err("%s: USB not connected\n", __func__);
+		return;
+	}
+
+	spin_lock_irqsave(&dev->lock, flags);
+	list_for_each_safe(act, tmp, &dev->rx_idle) {
+		req = list_entry(act, struct usb_request, list);
+		list_del(&req->list);
+
+		spin_unlock_irqrestore(&dev->lock, flags);
+		status = rmnet_rx_submit(dev, req, GFP_ATOMIC);
+		spin_lock_irqsave(&dev->lock, flags);
+
+		if (status) {
+			ERROR(cdev, "rmnet data rx enqueue err %d\n", status);
+			list_add_tail(&req->list, &dev->rx_idle);
+			break;
+		}
+	}
+	spin_unlock_irqrestore(&dev->lock, flags);
+}
+
+static void usb_rmnet_sdio_start_tx(struct rmnet_dev *dev)
+{
+	unsigned long			flags;
+	int				status;
+	struct sk_buff			*skb;
+	struct usb_request		*req;
+	struct usb_composite_dev	*cdev = dev->cdev;
+
+	if (!atomic_read(&dev->online))
+		return;
+
+	spin_lock_irqsave(&dev->lock, flags);
+	while (!list_empty(&dev->tx_idle)) {
+		skb = __skb_dequeue(&dev->tx_skb_queue);
+		if (!skb) {
+			spin_unlock_irqrestore(&dev->lock, flags);
+			return;
+		}
+
+		req = list_first_entry(&dev->tx_idle, struct usb_request, list);
+		req->context = skb;
+		req->buf = skb->data;
+		req->length = skb->len;
+
+		list_del(&req->list);
+		spin_unlock(&dev->lock);
+		status = usb_ep_queue(dev->epin, req, GFP_ATOMIC);
+		spin_lock(&dev->lock);
+		if (status) {
+			/* USB still online, queue requests back */
+			if (atomic_read(&dev->online)) {
+				ERROR(cdev, "rmnet tx data enqueue err %d\n",
+						status);
+				list_add_tail(&req->list, &dev->tx_idle);
+				__skb_queue_head(&dev->tx_skb_queue, skb);
+			} else {
+				req->buf = 0;
+				rmnet_free_req(dev->epin, req);
+				dev_kfree_skb_any(skb);
+			}
+			break;
+		}
+		dev->dpkt_tolaptop++;
+	}
+	spin_unlock_irqrestore(&dev->lock, flags);
+}
+
+static void rmnet_data_receive_cb(void *priv, struct sk_buff *skb)
+{
+	struct rmnet_dev *dev = priv;
+	unsigned long flags;
+
+	/* SDIO mux sends NULL SKB when link state changes */
+	if (!skb)
+		return;
+
+	if (!atomic_read(&dev->online)) {
+		dev_kfree_skb_any(skb);
+		return;
+	}
+
+	spin_lock_irqsave(&dev->lock, flags);
+
+	if (dev->tx_skb_queue.qlen > tx_pkt_drop_thld) {
+		if (printk_ratelimit())
+			pr_err("%s: tx pkt dropped: tx_drop_cnt:%lu\n",
+					__func__, dev->tx_drp_cnt);
+		dev->tx_drp_cnt++;
+		spin_unlock_irqrestore(&dev->lock, flags);
+		dev_kfree_skb_any(skb);
+		return;
+	}
+
+	__skb_queue_tail(&dev->tx_skb_queue, skb);
+	spin_unlock_irqrestore(&dev->lock, flags);
+
+	usb_rmnet_sdio_start_tx(dev);
+}
+
+static void rmnet_data_write_done(void *priv, struct sk_buff *skb)
+{
+	struct rmnet_dev *dev = priv;
+
+	/* SDIO mux sends NULL SKB when link state changes */
+	if (!skb)
+		return;
+
+	dev_kfree_skb_any(skb);
+	/* this function is called from
+	 * sdio mux from spin_lock_irqsave
+	 */
+	spin_lock(&dev->lock);
+	dev->dpkts_pending_atdmux--;
+
+	if (dev->dpkts_pending_atdmux >= rx_fctrl_dis_thld) {
+		spin_unlock(&dev->lock);
+		return;
+	}
+	spin_unlock(&dev->lock);
+
+	rmnet_start_rx(dev);
+}
+
+static void rmnet_data_rx_work(struct work_struct *w)
+{
+	struct rmnet_dev *dev = container_of(w, struct rmnet_dev, data_rx_work);
+	struct usb_composite_dev *cdev = dev->cdev;
+	struct sk_buff *skb;
+	int ret;
+	unsigned long flags;
+
+	spin_lock_irqsave(&dev->lock, flags);
+	while ((skb = __skb_dequeue(&dev->rx_skb_queue))) {
+		spin_unlock_irqrestore(&dev->lock, flags);
+		ret = msm_sdio_dmux_write(rmnet_sdio_data_ch, skb);
+		spin_lock_irqsave(&dev->lock, flags);
+		if (ret < 0) {
+			ERROR(cdev, "rmnet SDIO data write failed\n");
+			dev_kfree_skb_any(skb);
+		} else {
+			dev->dpkt_tomodem++;
+			dev->dpkts_pending_atdmux++;
+		}
+	}
+	spin_unlock_irqrestore(&dev->lock, flags);
+}
+
+static void rmnet_complete_epout(struct usb_ep *ep, struct usb_request *req)
+{
+	struct rmnet_dev *dev = ep->driver_data;
+	struct usb_composite_dev *cdev = dev->cdev;
+	struct sk_buff *skb = req->context;
+	int status = req->status;
+	int queue = 0;
+
+	switch (status) {
+	case 0:
+		/* successful completion */
+		skb_put(skb, req->actual);
+		queue = 1;
+		break;
+	case -ECONNRESET:
+	case -ESHUTDOWN:
+		/* connection gone */
+		dev_kfree_skb_any(skb);
+		req->buf = 0;
+		rmnet_free_req(ep, req);
+		return;
+	default:
+		/* unexpected failure */
+		ERROR(cdev, "RMNET %s response error %d, %d/%d\n",
+			ep->name, status,
+			req->actual, req->length);
+		dev_kfree_skb_any(skb);
+		break;
+	}
+
+	spin_lock(&dev->lock);
+	if (queue) {
+		__skb_queue_tail(&dev->rx_skb_queue, skb);
+		queue_work(dev->wq, &dev->data_rx_work);
+	}
+
+	if (dev->dpkts_pending_atdmux >= rx_fctrl_en_thld) {
+		list_add_tail(&req->list, &dev->rx_idle);
+		spin_unlock(&dev->lock);
+		return;
+	}
+	spin_unlock(&dev->lock);
+
+	status = rmnet_rx_submit(dev, req, GFP_ATOMIC);
+	if (status) {
+		ERROR(cdev, "rmnet data rx enqueue err %d\n", status);
+		list_add_tail(&req->list, &dev->rx_idle);
+	}
+}
+
+static void rmnet_complete_epin(struct usb_ep *ep, struct usb_request *req)
+{
+	struct rmnet_dev *dev = ep->driver_data;
+	struct sk_buff  *skb = req->context;
+	struct usb_composite_dev *cdev = dev->cdev;
+	int status = req->status;
+
+	switch (status) {
+	case 0:
+		/* successful completion */
+	case -ECONNRESET:
+	case -ESHUTDOWN:
+		/* connection gone */
+		break;
+	default:
+		ERROR(cdev, "rmnet data tx ep error %d\n", status);
+		break;
+	}
+
+	spin_lock(&dev->lock);
+	list_add_tail(&req->list, &dev->tx_idle);
+	spin_unlock(&dev->lock);
+	dev_kfree_skb_any(skb);
+
+	usb_rmnet_sdio_start_tx(dev);
+}
+
+static void rmnet_free_buf(struct rmnet_dev *dev)
+{
+	struct rmnet_sdio_qmi_buf *qmi;
+	struct usb_request *req;
+	struct list_head *act, *tmp;
+	struct sk_buff *skb;
+	unsigned long flags;
+
+
+	spin_lock_irqsave(&dev->lock, flags);
+
+	dev->dpkt_tolaptop = 0;
+	dev->dpkt_tomodem = 0;
+	dev->cpkt_tolaptop = 0;
+	dev->cpkt_tomodem = 0;
+	dev->dpkts_pending_atdmux = 0;
+	dev->tx_drp_cnt = 0;
+
+	/* free all usb requests in tx pool */
+	list_for_each_safe(act, tmp, &dev->tx_idle) {
+		req = list_entry(act, struct usb_request, list);
+		list_del(&req->list);
+		req->buf = NULL;
+		rmnet_free_req(dev->epout, req);
+	}
+
+	/* free all usb requests in rx pool */
+	list_for_each_safe(act, tmp, &dev->rx_idle) {
+		req = list_entry(act, struct usb_request, list);
+		list_del(&req->list);
+		req->buf = NULL;
+		rmnet_free_req(dev->epin, req);
+	}
+
+	/* free all buffers in qmi request pool */
+	list_for_each_safe(act, tmp, &dev->qmi_req_q) {
+		qmi = list_entry(act, struct rmnet_sdio_qmi_buf, list);
+		list_del(&qmi->list);
+		rmnet_free_qmi(qmi);
+	}
+
+	/* free all buffers in qmi request pool */
+	list_for_each_safe(act, tmp, &dev->qmi_resp_q) {
+		qmi = list_entry(act, struct rmnet_sdio_qmi_buf, list);
+		list_del(&qmi->list);
+		rmnet_free_qmi(qmi);
+	}
+
+	while ((skb = __skb_dequeue(&dev->tx_skb_queue)))
+		dev_kfree_skb_any(skb);
+
+	while ((skb = __skb_dequeue(&dev->rx_skb_queue)))
+		dev_kfree_skb_any(skb);
+
+	rmnet_free_req(dev->epnotify, dev->notify_req);
+
+	spin_unlock_irqrestore(&dev->lock, flags);
+}
+
+static void rmnet_set_modem_ctl_bits_work(struct work_struct *w)
+{
+	struct rmnet_dev *dev;
+
+	dev = container_of(w, struct rmnet_dev, set_modem_ctl_bits_work);
+
+	if (!atomic_read(&dev->sdio_open))
+		return;
+
+	pr_debug("%s: cbits_to_modem:%d\n",
+			__func__, dev->cbits_to_modem);
+
+	sdio_cmux_tiocmset(rmnet_sdio_ctl_ch,
+			dev->cbits_to_modem,
+			~dev->cbits_to_modem);
+}
+
+static void rmnet_disconnect_work(struct work_struct *w)
+{
+	/* REVISIT: Push all the data to sdio if anythign is pending */
+}
+static void rmnet_suspend(struct usb_function *f)
+{
+	struct rmnet_dev *dev = container_of(f, struct rmnet_dev, function);
+
+	if (!atomic_read(&dev->online))
+		return;
+	/* This is a workaround for Windows Host bug during suspend.
+	 * Windows 7/xp Hosts are suppose to drop DTR, when Host suspended.
+	 * Since it is not beind done, Hence exclusively dropping the DTR
+	 * from function driver suspend.
+	 */
+	dev->cbits_to_modem &= ~TIOCM_DTR;
+	queue_work(dev->wq, &dev->set_modem_ctl_bits_work);
+}
+static void rmnet_disable(struct usb_function *f)
+{
+	struct rmnet_dev *dev = container_of(f, struct rmnet_dev, function);
+
+	if (!atomic_read(&dev->online))
+		return;
+
+	usb_ep_disable(dev->epnotify);
+	usb_ep_disable(dev->epout);
+	usb_ep_disable(dev->epin);
+
+	atomic_set(&dev->online, 0);
+	atomic_set(&dev->notify_count, 0);
+	rmnet_free_buf(dev);
+
+	/* cleanup work */
+	queue_work(dev->wq, &dev->disconnect_work);
+	dev->cbits_to_modem = 0;
+	queue_work(dev->wq, &dev->set_modem_ctl_bits_work);
+}
+
+#define SDIO_OPEN_RETRY_DELAY	msecs_to_jiffies(2000)
+#define SDIO_OPEN_MAX_RETRY	90
+static void rmnet_open_sdio_work(struct work_struct *w)
+{
+	struct rmnet_dev *dev =
+			container_of(w, struct rmnet_dev, sdio_open_work.work);
+	struct usb_composite_dev *cdev = dev->cdev;
+	int ret;
+	static int retry_cnt;
+	static bool ctl_ch_opened, data_ch_opened;
+
+	if (!ctl_ch_opened) {
+		/* Control channel for QMI messages */
+		ret = sdio_cmux_open(rmnet_sdio_ctl_ch, rmnet_ctl_receive_cb,
+				rmnet_ctl_write_done, rmnet_sts_callback, dev);
+		if (!ret)
+			ctl_ch_opened = true;
+	}
+	if (!data_ch_opened) {
+		/* Data channel for network packets */
+		ret = msm_sdio_dmux_open(rmnet_sdio_data_ch, dev,
+				rmnet_data_receive_cb,
+				rmnet_data_write_done);
+		if (!ret)
+			data_ch_opened = true;
+	}
+
+	if (ctl_ch_opened && data_ch_opened) {
+		atomic_set(&dev->sdio_open, 1);
+
+		/* if usb cable is connected, update DTR status to modem */
+		if (atomic_read(&dev->online))
+			queue_work(dev->wq, &dev->set_modem_ctl_bits_work);
+
+		pr_info("%s: usb rmnet sdio channels are open retry_cnt:%d\n",
+				__func__, retry_cnt);
+		return;
+	}
+
+	retry_cnt++;
+	pr_debug("%s: usb rmnet sdio open retry_cnt:%d\n",
+			__func__, retry_cnt);
+
+	if (retry_cnt > SDIO_OPEN_MAX_RETRY) {
+		if (!ctl_ch_opened)
+			ERROR(cdev, "Unable to open control SDIO channel\n");
+		else
+			sdio_cmux_close(rmnet_sdio_ctl_ch);
+
+		if (!data_ch_opened)
+			ERROR(cdev, "Unable to open DATA SDIO channel\n");
+		else
+			msm_sdio_dmux_close(rmnet_sdio_data_ch);
+
+	} else {
+		queue_delayed_work(dev->wq, &dev->sdio_open_work,
+				SDIO_OPEN_RETRY_DELAY);
+	}
+}
+
+static int rmnet_set_alt(struct usb_function *f,
+			unsigned intf, unsigned alt)
+{
+	struct rmnet_dev *dev = container_of(f, struct rmnet_dev, function);
+	struct usb_composite_dev *cdev = dev->cdev;
+	struct usb_request *req;
+	int ret, i;
+
+	/* allocate notification */
+	dev->notify_req = rmnet_alloc_req(dev->epnotify,
+				RMNET_SDIO_MAX_NFY_SZE, GFP_ATOMIC);
+
+	if (IS_ERR(dev->notify_req)) {
+		ret = PTR_ERR(dev->notify_req);
+		goto free_buf;
+	}
+	for (i = 0; i < RMNET_SDIO_RX_REQ_MAX; i++) {
+		req = rmnet_alloc_req(dev->epout, 0, GFP_ATOMIC);
+		if (IS_ERR(req)) {
+			ret = PTR_ERR(req);
+			goto free_buf;
+		}
+		req->complete = rmnet_complete_epout;
+		list_add_tail(&req->list, &dev->rx_idle);
+	}
+	for (i = 0; i < RMNET_SDIO_TX_REQ_MAX; i++) {
+		req = rmnet_alloc_req(dev->epin, 0, GFP_ATOMIC);
+		if (IS_ERR(req)) {
+			ret = PTR_ERR(req);
+			goto free_buf;
+		}
+		req->complete = rmnet_complete_epin;
+		list_add_tail(&req->list, &dev->tx_idle);
+	}
+
+	dev->notify_req->complete = rmnet_notify_complete;
+	dev->notify_req->context = dev;
+	dev->notify_req->length = RMNET_SDIO_MAX_NFY_SZE;
+
+	dev->epin->driver_data = dev;
+	usb_ep_enable(dev->epin, ep_choose(cdev->gadget,
+				&rmnet_hs_in_desc,
+				&rmnet_fs_in_desc));
+	dev->epout->driver_data = dev;
+	usb_ep_enable(dev->epout, ep_choose(cdev->gadget,
+				&rmnet_hs_out_desc,
+				&rmnet_fs_out_desc));
+	usb_ep_enable(dev->epnotify, ep_choose(cdev->gadget,
+				&rmnet_hs_notify_desc,
+				&rmnet_fs_notify_desc));
+
+	atomic_set(&dev->online, 1);
+
+	/* Queue Rx data requests */
+	rmnet_start_rx(dev);
+
+	return 0;
+
+free_buf:
+	rmnet_free_buf(dev);
+	dev->epout = dev->epin = dev->epnotify = NULL; /* release endpoints */
+	return ret;
+}
+
+static int rmnet_bind(struct usb_configuration *c, struct usb_function *f)
+{
+	struct usb_composite_dev *cdev = c->cdev;
+	struct rmnet_dev *dev = container_of(f, struct rmnet_dev, function);
+	int id;
+	struct usb_ep *ep;
+
+	dev->cdev = cdev;
+
+	/* allocate interface ID */
+	id = usb_interface_id(c, f);
+	if (id < 0)
+		return id;
+	dev->ifc_id = id;
+	rmnet_interface_desc.bInterfaceNumber = id;
+
+	ep = usb_ep_autoconfig(cdev->gadget, &rmnet_fs_in_desc);
+	if (!ep)
+		goto out;
+	ep->driver_data = cdev; /* claim endpoint */
+	dev->epin = ep;
+
+	ep = usb_ep_autoconfig(cdev->gadget, &rmnet_fs_out_desc);
+	if (!ep)
+		goto out;
+	ep->driver_data = cdev; /* claim endpoint */
+	dev->epout = ep;
+
+	ep = usb_ep_autoconfig(cdev->gadget, &rmnet_fs_notify_desc);
+	if (!ep)
+		goto out;
+	ep->driver_data = cdev; /* claim endpoint */
+	dev->epnotify = ep;
+
+	/* support all relevant hardware speeds... we expect that when
+	 * hardware is dual speed, all bulk-capable endpoints work at
+	 * both speeds
+	 */
+	if (gadget_is_dualspeed(c->cdev->gadget)) {
+		rmnet_hs_in_desc.bEndpointAddress =
+			rmnet_fs_in_desc.bEndpointAddress;
+		rmnet_hs_out_desc.bEndpointAddress =
+			rmnet_fs_out_desc.bEndpointAddress;
+		rmnet_hs_notify_desc.bEndpointAddress =
+			rmnet_fs_notify_desc.bEndpointAddress;
+	}
+
+	queue_delayed_work(dev->wq, &dev->sdio_open_work, 0);
+
+	return 0;
+
+out:
+	if (dev->epnotify)
+		dev->epnotify->driver_data = NULL;
+	if (dev->epout)
+		dev->epout->driver_data = NULL;
+	if (dev->epin)
+		dev->epin->driver_data = NULL;
+
+	return -ENODEV;
+}
+
+static void
+rmnet_unbind(struct usb_configuration *c, struct usb_function *f)
+{
+       struct rmnet_dev *dev = container_of(f, struct rmnet_dev, function);
+
+       destroy_workqueue(dev->wq);
+
+       rmnet_free_buf(dev);
+       dev->epout = dev->epin = dev->epnotify = NULL; /* release endpoints */
+
+	msm_sdio_dmux_close(rmnet_sdio_data_ch);
+	sdio_cmux_close(rmnet_sdio_ctl_ch);
+
+	atomic_set(&dev->sdio_open, 0);
+
+       kfree(dev);
+}
+
+#if defined(CONFIG_DEBUG_FS)
+static ssize_t debug_read_stats(struct file *file, char __user *ubuf,
+		size_t count, loff_t *ppos)
+{
+	struct rmnet_dev *dev = file->private_data;
+	char *buf;
+	unsigned long flags;
+	int ret;
+
+	buf = kzalloc(sizeof(char) * 1024, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	spin_lock_irqsave(&dev->lock, flags);
+	ret = scnprintf(buf, PAGE_SIZE,
+			"dpkts_to_modem:  %lu\n"
+			"dpkts_to_laptop: %lu\n"
+			"cpkts_to_modem:  %lu\n"
+			"cpkts_to_laptop: %lu\n"
+			"cbits_to_modem:  %d\n"
+			"tx skb size:     %u\n"
+			"rx_skb_size:     %u\n"
+			"dpkts_pending_at_dmux: %u\n"
+			"tx drp cnt: %lu\n"
+			"cbits_tomodem: %d",
+			dev->dpkt_tomodem, dev->dpkt_tolaptop,
+			dev->cpkt_tomodem, dev->cpkt_tolaptop,
+			dev->cbits_to_modem,
+			dev->tx_skb_queue.qlen, dev->rx_skb_queue.qlen,
+			dev->dpkts_pending_atdmux, dev->tx_drp_cnt,
+			dev->cbits_to_modem);
+
+	spin_unlock_irqrestore(&dev->lock, flags);
+
+	ret = simple_read_from_buffer(ubuf, count, ppos, buf, ret);
+
+	kfree(buf);
+
+	return ret;
+}
+
+static ssize_t debug_reset_stats(struct file *file, const char __user *buf,
+				 size_t count, loff_t *ppos)
+{
+	struct rmnet_dev *dev = file->private_data;
+
+	dev->dpkt_tolaptop = 0;
+	dev->dpkt_tomodem = 0;
+	dev->cpkt_tolaptop = 0;
+	dev->cpkt_tomodem = 0;
+	dev->dpkts_pending_atdmux = 0;
+	dev->tx_drp_cnt = 0;
+
+	/* TBD: How do we reset skb qlen
+	 * it might have side effects
+	 */
+
+	return count;
+}
+
+static int debug_open(struct inode *inode, struct file *file)
+{
+	file->private_data = inode->i_private;
+
+	return 0;
+}
+
+const struct file_operations debug_stats_ops = {
+	.open = debug_open,
+	.read = debug_read_stats,
+	.write = debug_reset_stats,
+};
+
+static void usb_debugfs_init(struct rmnet_dev *dev)
+{
+	struct dentry *dent;
+
+	dent = debugfs_create_dir("usb_rmnet", 0);
+	if (IS_ERR(dent))
+		return;
+
+	debugfs_create_file("status", 0444, dent, dev, &debug_stats_ops);
+}
+#else
+static void usb_debugfs_init(struct rmnet_dev *dev)
+{
+	return;
+}
+#endif
+
+int rmnet_sdio_function_add(struct usb_configuration *c)
+{
+	struct rmnet_dev *dev;
+	int ret;
+
+	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+	if (!dev)
+		return -ENOMEM;
+
+	dev->wq = create_singlethread_workqueue("k_rmnet_work");
+	if (!dev->wq) {
+		ret = -ENOMEM;
+		goto free_dev;
+	}
+
+	spin_lock_init(&dev->lock);
+	atomic_set(&dev->notify_count, 0);
+	atomic_set(&dev->online, 0);
+
+	INIT_WORK(&dev->disconnect_work, rmnet_disconnect_work);
+	INIT_WORK(&dev->set_modem_ctl_bits_work, rmnet_set_modem_ctl_bits_work);
+
+	INIT_WORK(&dev->ctl_rx_work, rmnet_control_rx_work);
+	INIT_WORK(&dev->data_rx_work, rmnet_data_rx_work);
+
+	INIT_DELAYED_WORK(&dev->sdio_open_work, rmnet_open_sdio_work);
+
+	INIT_LIST_HEAD(&dev->qmi_req_q);
+	INIT_LIST_HEAD(&dev->qmi_resp_q);
+
+	INIT_LIST_HEAD(&dev->rx_idle);
+	INIT_LIST_HEAD(&dev->tx_idle);
+	skb_queue_head_init(&dev->tx_skb_queue);
+	skb_queue_head_init(&dev->rx_skb_queue);
+
+	dev->function.name = "rmnet_sdio";
+	dev->function.strings = rmnet_strings;
+	dev->function.descriptors = rmnet_fs_function;
+	dev->function.hs_descriptors = rmnet_hs_function;
+	dev->function.bind = rmnet_bind;
+	dev->function.unbind = rmnet_unbind;
+	dev->function.setup = rmnet_setup;
+	dev->function.set_alt = rmnet_set_alt;
+	dev->function.disable = rmnet_disable;
+	dev->function.suspend = rmnet_suspend;
+
+	ret = usb_add_function(c, &dev->function);
+	if (ret)
+		goto free_wq;
+
+	usb_debugfs_init(dev);
+
+       return 0;
+
+free_wq:
+       destroy_workqueue(dev->wq);
+free_dev:
+       kfree(dev);
+
+       return ret;
+}
+
+#ifdef CONFIG_USB_ANDROID_RMNET_SDIO
+static struct android_usb_function rmnet_function = {
+       .name = "rmnet_sdio",
+       .bind_config = rmnet_sdio_function_add,
+};
+
+static int __init rmnet_init(void)
+{
+       android_register_function(&rmnet_function);
+       return 0;
+}
+module_init(rmnet_init);
+
+#endif /* CONFIG_USB_ANDROID_RMNET_SDIO */
diff --git a/drivers/usb/gadget/f_rmnet_smd.c b/drivers/usb/gadget/f_rmnet_smd.c
new file mode 100644
index 0000000..00925f9
--- /dev/null
+++ b/drivers/usb/gadget/f_rmnet_smd.c
@@ -0,0 +1,1333 @@
+/*
+ * f_rmnet.c -- RmNet function driver
+ *
+ * Copyright (C) 2003-2005,2008 David Brownell
+ * Copyright (C) 2003-2004 Robert Schwebel, Benedikt Spranger
+ * Copyright (C) 2003 Al Borchers (alborchers@steinerpoint.com)
+ * Copyright (C) 2008 Nokia Corporation
+ * Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/device.h>
+#include <linux/wait.h>
+#include <linux/workqueue.h>
+#include <linux/bitops.h>
+#include <linux/termios.h>
+#include <linux/debugfs.h>
+
+#include <mach/msm_smd.h>
+#include <linux/usb/cdc.h>
+#include <linux/usb/composite.h>
+#include <linux/usb/ch9.h>
+#include <linux/usb/android_composite.h>
+
+#include "gadget_chips.h"
+
+static char *rmnet_ctl_ch = CONFIG_RMNET_SMD_CTL_CHANNEL;
+module_param(rmnet_ctl_ch, charp, S_IRUGO);
+MODULE_PARM_DESC(rmnet_ctl_ch, "RmNet control SMD channel");
+
+static char *rmnet_data_ch = CONFIG_RMNET_SMD_DATA_CHANNEL;
+module_param(rmnet_data_ch, charp, S_IRUGO);
+MODULE_PARM_DESC(rmnet_data_ch, "RmNet data SMD channel");
+
+#define ACM_CTRL_DTR	(1 << 0)
+
+#define RMNET_NOTIFY_INTERVAL	5
+#define RMNET_MAX_NOTIFY_SIZE	sizeof(struct usb_cdc_notification)
+
+#define QMI_REQ_MAX		4
+#define QMI_REQ_SIZE		2048
+#define QMI_RESP_MAX		8
+#define QMI_RESP_SIZE		2048
+
+#define RX_REQ_MAX		8
+#define RX_REQ_SIZE		2048
+#define TX_REQ_MAX		8
+#define TX_REQ_SIZE		2048
+
+#define TXN_MAX 		2048
+
+/* QMI requests & responses buffer*/
+struct qmi_buf {
+	void *buf;
+	int len;
+	struct list_head list;
+};
+
+/* Control & data SMD channel private data */
+struct rmnet_smd_info {
+	struct smd_channel 	*ch;
+	struct tasklet_struct	tx_tlet;
+	struct tasklet_struct	rx_tlet;
+#define CH_OPENED	0
+	unsigned long		flags;
+	/* pending rx packet length */
+	atomic_t		rx_pkt;
+	/* wait for smd open event*/
+	wait_queue_head_t	wait;
+};
+
+struct rmnet_dev {
+	struct usb_function function;
+	struct usb_composite_dev *cdev;
+
+	struct usb_ep		*epout;
+	struct usb_ep		*epin;
+	struct usb_ep		*epnotify;
+	struct usb_request 	*notify_req;
+
+	u8			ifc_id;
+	/* QMI lists */
+	struct list_head	qmi_req_pool;
+	struct list_head	qmi_resp_pool;
+	struct list_head	qmi_req_q;
+	struct list_head	qmi_resp_q;
+	/* Tx/Rx lists */
+	struct list_head 	tx_idle;
+	struct list_head 	rx_idle;
+	struct list_head	rx_queue;
+
+	spinlock_t		lock;
+	atomic_t		online;
+	atomic_t		notify_count;
+
+	struct rmnet_smd_info	smd_ctl;
+	struct rmnet_smd_info	smd_data;
+
+	struct workqueue_struct *wq;
+	struct work_struct connect_work;
+	struct work_struct disconnect_work;
+
+	unsigned long	dpkts_to_host;
+	unsigned long	dpkts_from_modem;
+	unsigned long	dpkts_from_host;
+	unsigned long	dpkts_to_modem;
+
+	unsigned long	cpkts_to_host;
+	unsigned long	cpkts_from_modem;
+	unsigned long	cpkts_from_host;
+	unsigned long	cpkts_to_modem;
+};
+
+static struct usb_interface_descriptor rmnet_interface_desc = {
+	.bLength =		USB_DT_INTERFACE_SIZE,
+	.bDescriptorType =	USB_DT_INTERFACE,
+	/* .bInterfaceNumber = DYNAMIC */
+	.bNumEndpoints =	3,
+	.bInterfaceClass =	USB_CLASS_VENDOR_SPEC,
+	.bInterfaceSubClass =	USB_CLASS_VENDOR_SPEC,
+	.bInterfaceProtocol =	USB_CLASS_VENDOR_SPEC,
+	/* .iInterface = DYNAMIC */
+};
+
+/* Full speed support */
+static struct usb_endpoint_descriptor rmnet_fs_notify_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes =		USB_ENDPOINT_XFER_INT,
+	.wMaxPacketSize =	__constant_cpu_to_le16(RMNET_MAX_NOTIFY_SIZE),
+	.bInterval =		1 << RMNET_NOTIFY_INTERVAL,
+};
+
+static struct usb_endpoint_descriptor rmnet_fs_in_desc  = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize   = __constant_cpu_to_le16(64),
+};
+
+static struct usb_endpoint_descriptor rmnet_fs_out_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_OUT,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize   = __constant_cpu_to_le16(64),
+};
+
+static struct usb_descriptor_header *rmnet_fs_function[] = {
+	(struct usb_descriptor_header *) &rmnet_interface_desc,
+	(struct usb_descriptor_header *) &rmnet_fs_notify_desc,
+	(struct usb_descriptor_header *) &rmnet_fs_in_desc,
+	(struct usb_descriptor_header *) &rmnet_fs_out_desc,
+	NULL,
+};
+
+/* High speed support */
+static struct usb_endpoint_descriptor rmnet_hs_notify_desc  = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes =		USB_ENDPOINT_XFER_INT,
+	.wMaxPacketSize =	__constant_cpu_to_le16(RMNET_MAX_NOTIFY_SIZE),
+	.bInterval =		RMNET_NOTIFY_INTERVAL + 4,
+};
+
+static struct usb_endpoint_descriptor rmnet_hs_in_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize =	__constant_cpu_to_le16(512),
+};
+
+static struct usb_endpoint_descriptor rmnet_hs_out_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_OUT,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize =	__constant_cpu_to_le16(512),
+};
+
+static struct usb_descriptor_header *rmnet_hs_function[] = {
+	(struct usb_descriptor_header *) &rmnet_interface_desc,
+	(struct usb_descriptor_header *) &rmnet_hs_notify_desc,
+	(struct usb_descriptor_header *) &rmnet_hs_in_desc,
+	(struct usb_descriptor_header *) &rmnet_hs_out_desc,
+	NULL,
+};
+
+/* String descriptors */
+
+static struct usb_string rmnet_string_defs[] = {
+	[0].s = "QMI RmNet",
+	{  } /* end of list */
+};
+
+static struct usb_gadget_strings rmnet_string_table = {
+	.language =		0x0409,	/* en-us */
+	.strings =		rmnet_string_defs,
+};
+
+static struct usb_gadget_strings *rmnet_strings[] = {
+	&rmnet_string_table,
+	NULL,
+};
+
+static struct qmi_buf *
+rmnet_alloc_qmi(unsigned len, gfp_t kmalloc_flags)
+{
+	struct qmi_buf *qmi;
+
+	qmi = kmalloc(sizeof(struct qmi_buf), kmalloc_flags);
+	if (qmi != NULL) {
+		qmi->buf = kmalloc(len, kmalloc_flags);
+		if (qmi->buf == NULL) {
+			kfree(qmi);
+			qmi = NULL;
+		}
+	}
+
+	return qmi ? qmi : ERR_PTR(-ENOMEM);
+}
+
+static void rmnet_free_qmi(struct qmi_buf *qmi)
+{
+	kfree(qmi->buf);
+	kfree(qmi);
+}
+/*
+ * Allocate a usb_request and its buffer.  Returns a pointer to the
+ * usb_request or a error code if there is an error.
+ */
+static struct usb_request *
+rmnet_alloc_req(struct usb_ep *ep, unsigned len, gfp_t kmalloc_flags)
+{
+	struct usb_request *req;
+
+	req = usb_ep_alloc_request(ep, kmalloc_flags);
+
+	if (req != NULL) {
+		req->length = len;
+		req->buf = kmalloc(len, kmalloc_flags);
+		if (req->buf == NULL) {
+			usb_ep_free_request(ep, req);
+			req = NULL;
+		}
+	}
+
+	return req ? req : ERR_PTR(-ENOMEM);
+}
+
+/*
+ * Free a usb_request and its buffer.
+ */
+static void rmnet_free_req(struct usb_ep *ep, struct usb_request *req)
+{
+	kfree(req->buf);
+	usb_ep_free_request(ep, req);
+}
+
+static void rmnet_notify_complete(struct usb_ep *ep, struct usb_request *req)
+{
+	struct rmnet_dev *dev = req->context;
+	struct usb_composite_dev *cdev = dev->cdev;
+	int status = req->status;
+
+	switch (status) {
+	case -ECONNRESET:
+	case -ESHUTDOWN:
+		/* connection gone */
+		atomic_set(&dev->notify_count, 0);
+		break;
+	default:
+		ERROR(cdev, "rmnet notify ep error %d\n", status);
+		/* FALLTHROUGH */
+	case 0:
+		if (ep != dev->epnotify)
+			break;
+
+		/* handle multiple pending QMI_RESPONSE_AVAILABLE
+		 * notifications by resending until we're done
+		 */
+		if (atomic_dec_and_test(&dev->notify_count))
+			break;
+
+		status = usb_ep_queue(dev->epnotify, req, GFP_ATOMIC);
+		if (status) {
+			atomic_dec(&dev->notify_count);
+			ERROR(cdev, "rmnet notify ep enqueue error %d\n",
+					status);
+		}
+		break;
+	}
+}
+
+static void qmi_response_available(struct rmnet_dev *dev)
+{
+	struct usb_composite_dev *cdev = dev->cdev;
+	struct usb_request		*req = dev->notify_req;
+	struct usb_cdc_notification	*event = req->buf;
+	int status;
+
+	/* Response will be sent later */
+	if (atomic_inc_return(&dev->notify_count) != 1)
+		return;
+
+	event->bmRequestType = USB_DIR_IN | USB_TYPE_CLASS
+			| USB_RECIP_INTERFACE;
+	event->bNotificationType = USB_CDC_NOTIFY_RESPONSE_AVAILABLE;
+	event->wValue = cpu_to_le16(0);
+	event->wIndex = cpu_to_le16(dev->ifc_id);
+	event->wLength = cpu_to_le16(0);
+
+	status = usb_ep_queue(dev->epnotify, dev->notify_req, GFP_ATOMIC);
+	if (status < 0) {
+		atomic_dec(&dev->notify_count);
+		ERROR(cdev, "rmnet notify ep enqueue error %d\n", status);
+	}
+}
+
+/* TODO
+ * handle modem restart events
+ */
+static void rmnet_smd_notify(void *priv, unsigned event)
+{
+	struct rmnet_smd_info *smd_info = priv;
+	int len = atomic_read(&smd_info->rx_pkt);
+	struct rmnet_dev *dev = (struct rmnet_dev *) smd_info->tx_tlet.data;
+
+	switch (event) {
+	case SMD_EVENT_DATA: {
+		if (!atomic_read(&dev->online))
+			break;
+		if (len && (smd_write_avail(smd_info->ch) >= len))
+			tasklet_schedule(&smd_info->rx_tlet);
+
+		if (smd_read_avail(smd_info->ch))
+			tasklet_schedule(&smd_info->tx_tlet);
+
+		break;
+	}
+	case SMD_EVENT_OPEN:
+		/* usb endpoints are not enabled untill smd channels
+		 * are opened. wake up worker thread to continue
+		 * connection processing
+		 */
+		set_bit(CH_OPENED, &smd_info->flags);
+		wake_up(&smd_info->wait);
+		break;
+	case SMD_EVENT_CLOSE:
+		/* We will never come here.
+		 * reset flags after closing smd channel
+		 * */
+		clear_bit(CH_OPENED, &smd_info->flags);
+		break;
+	}
+}
+
+static void rmnet_control_tx_tlet(unsigned long arg)
+{
+	struct rmnet_dev *dev = (struct rmnet_dev *) arg;
+	struct usb_composite_dev *cdev = dev->cdev;
+	struct qmi_buf *qmi_resp;
+	int sz;
+	unsigned long flags;
+
+	while (1) {
+		sz = smd_cur_packet_size(dev->smd_ctl.ch);
+		if (sz == 0)
+			break;
+		if (smd_read_avail(dev->smd_ctl.ch) < sz)
+			break;
+
+		spin_lock_irqsave(&dev->lock, flags);
+		if (list_empty(&dev->qmi_resp_pool)) {
+			ERROR(cdev, "rmnet QMI Tx buffers full\n");
+			spin_unlock_irqrestore(&dev->lock, flags);
+			break;
+		}
+		qmi_resp = list_first_entry(&dev->qmi_resp_pool,
+				struct qmi_buf, list);
+		list_del(&qmi_resp->list);
+		spin_unlock_irqrestore(&dev->lock, flags);
+
+		qmi_resp->len = smd_read(dev->smd_ctl.ch, qmi_resp->buf, sz);
+
+		spin_lock_irqsave(&dev->lock, flags);
+		dev->cpkts_from_modem++;
+		list_add_tail(&qmi_resp->list, &dev->qmi_resp_q);
+		spin_unlock_irqrestore(&dev->lock, flags);
+
+		qmi_response_available(dev);
+	}
+
+}
+
+static void rmnet_control_rx_tlet(unsigned long arg)
+{
+	struct rmnet_dev *dev = (struct rmnet_dev *) arg;
+	struct usb_composite_dev *cdev = dev->cdev;
+	struct qmi_buf *qmi_req;
+	int ret;
+	unsigned long flags;
+
+	spin_lock_irqsave(&dev->lock, flags);
+	while (1) {
+
+		if (list_empty(&dev->qmi_req_q)) {
+			atomic_set(&dev->smd_ctl.rx_pkt, 0);
+			break;
+		}
+		qmi_req = list_first_entry(&dev->qmi_req_q,
+				struct qmi_buf, list);
+		if (smd_write_avail(dev->smd_ctl.ch) < qmi_req->len) {
+			atomic_set(&dev->smd_ctl.rx_pkt, qmi_req->len);
+			DBG(cdev, "rmnet control smd channel full\n");
+			break;
+		}
+
+		list_del(&qmi_req->list);
+		dev->cpkts_from_host++;
+		spin_unlock_irqrestore(&dev->lock, flags);
+		ret = smd_write(dev->smd_ctl.ch, qmi_req->buf, qmi_req->len);
+		spin_lock_irqsave(&dev->lock, flags);
+		if (ret != qmi_req->len) {
+			ERROR(cdev, "rmnet control smd write failed\n");
+			break;
+		}
+		dev->cpkts_to_modem++;
+		list_add_tail(&qmi_req->list, &dev->qmi_req_pool);
+	}
+	spin_unlock_irqrestore(&dev->lock, flags);
+}
+
+static void rmnet_command_complete(struct usb_ep *ep, struct usb_request *req)
+{
+	struct rmnet_dev *dev = req->context;
+	struct usb_composite_dev *cdev = dev->cdev;
+	struct qmi_buf *qmi_req;
+	int ret;
+
+	if (req->status < 0) {
+		ERROR(cdev, "rmnet command error %d\n", req->status);
+		return;
+	}
+
+	spin_lock(&dev->lock);
+	dev->cpkts_from_host++;
+	/* no pending control rx packet */
+	if (!atomic_read(&dev->smd_ctl.rx_pkt)) {
+		if (smd_write_avail(dev->smd_ctl.ch) < req->actual) {
+			atomic_set(&dev->smd_ctl.rx_pkt, req->actual);
+			goto queue_req;
+		}
+		spin_unlock(&dev->lock);
+		ret = smd_write(dev->smd_ctl.ch, req->buf, req->actual);
+		/* This should never happen */
+		if (ret != req->actual)
+			ERROR(cdev, "rmnet control smd write failed\n");
+		spin_lock(&dev->lock);
+		dev->cpkts_to_modem++;
+		spin_unlock(&dev->lock);
+		return;
+	}
+queue_req:
+	if (list_empty(&dev->qmi_req_pool)) {
+		spin_unlock(&dev->lock);
+		ERROR(cdev, "rmnet QMI pool is empty\n");
+		return;
+	}
+
+	qmi_req = list_first_entry(&dev->qmi_req_pool, struct qmi_buf, list);
+	list_del(&qmi_req->list);
+	spin_unlock(&dev->lock);
+	memcpy(qmi_req->buf, req->buf, req->actual);
+	qmi_req->len = req->actual;
+	spin_lock(&dev->lock);
+	list_add_tail(&qmi_req->list, &dev->qmi_req_q);
+	spin_unlock(&dev->lock);
+}
+static void rmnet_txcommand_complete(struct usb_ep *ep, struct usb_request *req)
+{
+	struct rmnet_dev *dev = req->context;
+
+	spin_lock(&dev->lock);
+	dev->cpkts_to_host++;
+	spin_unlock(&dev->lock);
+}
+
+static int
+rmnet_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl)
+{
+	struct rmnet_dev *dev = container_of(f, struct rmnet_dev, function);
+	struct usb_composite_dev *cdev = f->config->cdev;
+	struct usb_request	*req = cdev->req;
+	int			ret = -EOPNOTSUPP;
+	u16			w_index = le16_to_cpu(ctrl->wIndex);
+	u16			w_value = le16_to_cpu(ctrl->wValue);
+	u16			w_length = le16_to_cpu(ctrl->wLength);
+	struct qmi_buf *resp;
+	int schedule = 0;
+
+	if (!atomic_read(&dev->online))
+		return -ENOTCONN;
+
+	switch ((ctrl->bRequestType << 8) | ctrl->bRequest) {
+
+	case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+			| USB_CDC_SEND_ENCAPSULATED_COMMAND:
+		if (w_length > req->length)
+			goto invalid;
+		ret = w_length;
+		req->complete = rmnet_command_complete;
+		req->context = dev;
+		break;
+
+
+	case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+			| USB_CDC_GET_ENCAPSULATED_RESPONSE:
+		if (w_value)
+			goto invalid;
+		else {
+			spin_lock(&dev->lock);
+			if (list_empty(&dev->qmi_resp_q)) {
+				INFO(cdev, "qmi resp empty "
+					" req%02x.%02x v%04x i%04x l%d\n",
+					ctrl->bRequestType, ctrl->bRequest,
+					w_value, w_index, w_length);
+				spin_unlock(&dev->lock);
+				goto invalid;
+			}
+			resp = list_first_entry(&dev->qmi_resp_q,
+					struct qmi_buf, list);
+			list_del(&resp->list);
+			spin_unlock(&dev->lock);
+			memcpy(req->buf, resp->buf, resp->len);
+			ret = resp->len;
+			spin_lock(&dev->lock);
+
+			if (list_empty(&dev->qmi_resp_pool))
+				schedule = 1;
+			list_add_tail(&resp->list, &dev->qmi_resp_pool);
+
+			if (schedule)
+				tasklet_schedule(&dev->smd_ctl.tx_tlet);
+			spin_unlock(&dev->lock);
+			req->complete = rmnet_txcommand_complete;
+			req->context = dev;
+		}
+		break;
+	case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+			| USB_CDC_REQ_SET_CONTROL_LINE_STATE:
+		/* This is a workaround for RmNet and is borrowed from the
+		 * CDC/ACM standard. The host driver will issue the above ACM
+		 * standard request to the RmNet interface in the following
+		 * scenario: Once the network adapter is disabled from device
+		 * manager, the above request will be sent from the qcusbnet
+		 * host driver, with DTR being '0'. Once network adapter is
+		 * enabled from device manager (or during enumeration), the
+		 * request will be sent with DTR being '1'.
+		 */
+		if (w_value & ACM_CTRL_DTR)
+			ret = smd_tiocmset(dev->smd_ctl.ch, TIOCM_DTR, 0);
+		else
+			ret = smd_tiocmset(dev->smd_ctl.ch, 0, TIOCM_DTR);
+
+		break;
+	default:
+
+invalid:
+		DBG(cdev, "invalid control req%02x.%02x v%04x i%04x l%d\n",
+			ctrl->bRequestType, ctrl->bRequest,
+			w_value, w_index, w_length);
+	}
+
+	/* respond with data transfer or status phase? */
+	if (ret >= 0) {
+		VDBG(cdev, "rmnet req%02x.%02x v%04x i%04x l%d\n",
+			ctrl->bRequestType, ctrl->bRequest,
+			w_value, w_index, w_length);
+		req->zero = 0;
+		req->length = ret;
+		ret = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC);
+		if (ret < 0)
+			ERROR(cdev, "rmnet ep0 enqueue err %d\n", ret);
+	}
+
+	return ret;
+}
+
+static void rmnet_start_rx(struct rmnet_dev *dev)
+{
+	struct usb_composite_dev *cdev = dev->cdev;
+	int status;
+	struct usb_request *req;
+	struct list_head *pool = &dev->rx_idle;
+	unsigned long flags;
+
+	spin_lock_irqsave(&dev->lock, flags);
+	while (!list_empty(pool)) {
+		req = list_entry(pool->next, struct usb_request, list);
+		list_del(&req->list);
+
+		spin_unlock_irqrestore(&dev->lock, flags);
+		status = usb_ep_queue(dev->epout, req, GFP_ATOMIC);
+		spin_lock_irqsave(&dev->lock, flags);
+
+		if (status) {
+			ERROR(cdev, "rmnet data rx enqueue err %d\n", status);
+			list_add_tail(&req->list, pool);
+			break;
+		}
+	}
+	spin_unlock_irqrestore(&dev->lock, flags);
+}
+
+static void rmnet_data_tx_tlet(unsigned long arg)
+{
+	struct rmnet_dev *dev = (struct rmnet_dev *) arg;
+	struct usb_composite_dev *cdev = dev->cdev;
+	struct usb_request *req;
+	int status;
+	int sz;
+	unsigned long flags;
+
+	while (1) {
+
+		sz = smd_cur_packet_size(dev->smd_data.ch);
+		if (sz == 0)
+			break;
+		if (smd_read_avail(dev->smd_data.ch) < sz)
+			break;
+
+		spin_lock_irqsave(&dev->lock, flags);
+		if (list_empty(&dev->tx_idle)) {
+			spin_unlock_irqrestore(&dev->lock, flags);
+			DBG(cdev, "rmnet data Tx buffers full\n");
+			break;
+		}
+		req = list_first_entry(&dev->tx_idle, struct usb_request, list);
+		list_del(&req->list);
+		spin_unlock_irqrestore(&dev->lock, flags);
+
+		req->length = smd_read(dev->smd_data.ch, req->buf, sz);
+		status = usb_ep_queue(dev->epin, req, GFP_ATOMIC);
+		if (status) {
+			ERROR(cdev, "rmnet tx data enqueue err %d\n", status);
+			spin_lock_irqsave(&dev->lock, flags);
+			list_add_tail(&req->list, &dev->tx_idle);
+			spin_unlock_irqrestore(&dev->lock, flags);
+			break;
+		}
+		spin_lock_irqsave(&dev->lock, flags);
+		dev->dpkts_from_modem++;
+		spin_unlock_irqrestore(&dev->lock, flags);
+	}
+
+}
+
+static void rmnet_data_rx_tlet(unsigned long arg)
+{
+	struct rmnet_dev *dev = (struct rmnet_dev *) arg;
+	struct usb_composite_dev *cdev = dev->cdev;
+	struct usb_request *req;
+	int ret;
+	unsigned long flags;
+
+	spin_lock_irqsave(&dev->lock, flags);
+	while (1) {
+		if (list_empty(&dev->rx_queue)) {
+			atomic_set(&dev->smd_data.rx_pkt, 0);
+			break;
+		}
+		req = list_first_entry(&dev->rx_queue,
+			struct usb_request, list);
+		if (smd_write_avail(dev->smd_data.ch) < req->actual) {
+			atomic_set(&dev->smd_data.rx_pkt, req->actual);
+			DBG(cdev, "rmnet SMD data channel full\n");
+			break;
+		}
+
+		list_del(&req->list);
+		spin_unlock_irqrestore(&dev->lock, flags);
+		ret = smd_write(dev->smd_data.ch, req->buf, req->actual);
+		spin_lock_irqsave(&dev->lock, flags);
+		if (ret != req->actual) {
+			ERROR(cdev, "rmnet SMD data write failed\n");
+			break;
+		}
+		dev->dpkts_to_modem++;
+		list_add_tail(&req->list, &dev->rx_idle);
+	}
+	spin_unlock_irqrestore(&dev->lock, flags);
+
+	/* We have free rx data requests. */
+	rmnet_start_rx(dev);
+}
+
+/* If SMD has enough room to accommodate a data rx packet,
+ * write into SMD directly. Otherwise enqueue to rx_queue.
+ * We will not write into SMD directly untill rx_queue is
+ * empty to strictly follow the ordering requests.
+ */
+static void rmnet_complete_epout(struct usb_ep *ep, struct usb_request *req)
+{
+	struct rmnet_dev *dev = req->context;
+	struct usb_composite_dev *cdev = dev->cdev;
+	int status = req->status;
+	int ret;
+
+	switch (status) {
+	case 0:
+		/* normal completion */
+		break;
+	case -ECONNRESET:
+	case -ESHUTDOWN:
+		/* connection gone */
+		spin_lock(&dev->lock);
+		list_add_tail(&req->list, &dev->rx_idle);
+		spin_unlock(&dev->lock);
+		return;
+	default:
+		/* unexpected failure */
+		ERROR(cdev, "RMNET %s response error %d, %d/%d\n",
+			ep->name, status,
+			req->actual, req->length);
+		spin_lock(&dev->lock);
+		list_add_tail(&req->list, &dev->rx_idle);
+		spin_unlock(&dev->lock);
+		return;
+	}
+
+	spin_lock(&dev->lock);
+	dev->dpkts_from_host++;
+	if (!atomic_read(&dev->smd_data.rx_pkt)) {
+		if (smd_write_avail(dev->smd_data.ch) < req->actual) {
+			atomic_set(&dev->smd_data.rx_pkt, req->actual);
+			goto queue_req;
+		}
+		spin_unlock(&dev->lock);
+		ret = smd_write(dev->smd_data.ch, req->buf, req->actual);
+		/* This should never happen */
+		if (ret != req->actual)
+			ERROR(cdev, "rmnet data smd write failed\n");
+		/* Restart Rx */
+		spin_lock(&dev->lock);
+		dev->dpkts_to_modem++;
+		list_add_tail(&req->list, &dev->rx_idle);
+		spin_unlock(&dev->lock);
+		rmnet_start_rx(dev);
+		return;
+	}
+queue_req:
+	list_add_tail(&req->list, &dev->rx_queue);
+	spin_unlock(&dev->lock);
+}
+
+static void rmnet_complete_epin(struct usb_ep *ep, struct usb_request *req)
+{
+	struct rmnet_dev *dev = req->context;
+	struct usb_composite_dev *cdev = dev->cdev;
+	int status = req->status;
+	int schedule = 0;
+
+	switch (status) {
+	case -ECONNRESET:
+	case -ESHUTDOWN:
+		/* connection gone */
+		spin_lock(&dev->lock);
+		list_add_tail(&req->list, &dev->tx_idle);
+		spin_unlock(&dev->lock);
+		break;
+	default:
+		ERROR(cdev, "rmnet data tx ep error %d\n", status);
+		/* FALLTHROUGH */
+	case 0:
+		spin_lock(&dev->lock);
+		if (list_empty(&dev->tx_idle))
+			schedule = 1;
+		list_add_tail(&req->list, &dev->tx_idle);
+		dev->dpkts_to_host++;
+		if (schedule)
+			tasklet_schedule(&dev->smd_data.tx_tlet);
+		spin_unlock(&dev->lock);
+		break;
+	}
+
+}
+
+static void rmnet_disconnect_work(struct work_struct *w)
+{
+	struct qmi_buf *qmi;
+	struct usb_request *req;
+	struct list_head *act, *tmp;
+	struct rmnet_dev *dev = container_of(w, struct rmnet_dev,
+					disconnect_work);
+
+	tasklet_kill(&dev->smd_ctl.rx_tlet);
+	tasklet_kill(&dev->smd_ctl.tx_tlet);
+	tasklet_kill(&dev->smd_data.rx_tlet);
+	tasklet_kill(&dev->smd_data.tx_tlet);
+
+	smd_close(dev->smd_ctl.ch);
+	dev->smd_ctl.flags = 0;
+
+	smd_close(dev->smd_data.ch);
+	dev->smd_data.flags = 0;
+
+	atomic_set(&dev->notify_count, 0);
+
+	list_for_each_safe(act, tmp, &dev->rx_queue) {
+		req = list_entry(act, struct usb_request, list);
+		list_del(&req->list);
+		list_add_tail(&req->list, &dev->rx_idle);
+	}
+
+	list_for_each_safe(act, tmp, &dev->qmi_req_q) {
+		qmi = list_entry(act, struct qmi_buf, list);
+		list_del(&qmi->list);
+		list_add_tail(&qmi->list, &dev->qmi_req_pool);
+	}
+
+	list_for_each_safe(act, tmp, &dev->qmi_resp_q) {
+		qmi = list_entry(act, struct qmi_buf, list);
+		list_del(&qmi->list);
+		list_add_tail(&qmi->list, &dev->qmi_resp_pool);
+	}
+
+}
+
+/* SMD close may sleep
+ * schedule a work to close smd channels
+ */
+static void rmnet_disable(struct usb_function *f)
+{
+	struct rmnet_dev *dev = container_of(f, struct rmnet_dev, function);
+
+	if (!atomic_read(&dev->online))
+		return;
+
+	atomic_set(&dev->online, 0);
+
+	usb_ep_fifo_flush(dev->epnotify);
+	usb_ep_disable(dev->epnotify);
+	usb_ep_fifo_flush(dev->epout);
+	usb_ep_disable(dev->epout);
+
+	usb_ep_fifo_flush(dev->epin);
+	usb_ep_disable(dev->epin);
+
+	/* cleanup work */
+	queue_work(dev->wq, &dev->disconnect_work);
+}
+
+static void rmnet_connect_work(struct work_struct *w)
+{
+	struct rmnet_dev *dev = container_of(w, struct rmnet_dev, connect_work);
+	struct usb_composite_dev *cdev = dev->cdev;
+	int ret = 0;
+
+	/* Control channel for QMI messages */
+	ret = smd_open(rmnet_ctl_ch, &dev->smd_ctl.ch,
+			&dev->smd_ctl, rmnet_smd_notify);
+	if (ret) {
+		ERROR(cdev, "Unable to open control smd channel\n");
+		return;
+	}
+	wait_event(dev->smd_ctl.wait, test_bit(CH_OPENED,
+				&dev->smd_ctl.flags));
+
+	/* Data channel for network packets */
+	ret = smd_open(rmnet_data_ch, &dev->smd_data.ch,
+			&dev->smd_data, rmnet_smd_notify);
+	if (ret) {
+		ERROR(cdev, "Unable to open data smd channel\n");
+		smd_close(dev->smd_ctl.ch);
+		return;
+	}
+	wait_event(dev->smd_data.wait, test_bit(CH_OPENED,
+				&dev->smd_data.flags));
+
+	atomic_set(&dev->online, 1);
+	/* Queue Rx data requests */
+	rmnet_start_rx(dev);
+}
+
+/* SMD open may sleep.
+ * Schedule a work to open smd channels and enable
+ * endpoints if smd channels are opened successfully.
+ */
+static int rmnet_set_alt(struct usb_function *f,
+		unsigned intf, unsigned alt)
+{
+	struct rmnet_dev *dev = container_of(f, struct rmnet_dev, function);
+	struct usb_composite_dev *cdev = dev->cdev;
+	int ret = 0;
+
+	ret = usb_ep_enable(dev->epin, ep_choose(cdev->gadget,
+				&rmnet_hs_in_desc,
+				&rmnet_fs_in_desc));
+	if (ret) {
+		ERROR(cdev, "can't enable %s, result %d\n",
+					dev->epin->name, ret);
+		return ret;
+	}
+	ret = usb_ep_enable(dev->epout, ep_choose(cdev->gadget,
+				&rmnet_hs_out_desc,
+				&rmnet_fs_out_desc));
+	if (ret) {
+		ERROR(cdev, "can't enable %s, result %d\n",
+					dev->epout->name, ret);
+		usb_ep_disable(dev->epin);
+		return ret;
+	}
+
+	ret = usb_ep_enable(dev->epnotify, ep_choose(cdev->gadget,
+				&rmnet_hs_notify_desc,
+				&rmnet_fs_notify_desc));
+	if (ret) {
+		ERROR(cdev, "can't enable %s, result %d\n",
+					dev->epnotify->name, ret);
+		usb_ep_disable(dev->epin);
+		usb_ep_disable(dev->epout);
+		return ret;
+	}
+
+	queue_work(dev->wq, &dev->connect_work);
+	return 0;
+}
+
+static void rmnet_free_buf(struct rmnet_dev *dev)
+{
+	struct qmi_buf *qmi;
+	struct usb_request *req;
+	struct list_head *act, *tmp;
+
+	dev->dpkts_to_host = 0;
+	dev->dpkts_from_modem = 0;
+	dev->dpkts_from_host = 0;
+	dev->dpkts_to_modem = 0;
+
+	dev->cpkts_to_host = 0;
+	dev->cpkts_from_modem = 0;
+	dev->cpkts_from_host = 0;
+	dev->cpkts_to_modem = 0;
+	/* free all usb requests in tx pool */
+	list_for_each_safe(act, tmp, &dev->tx_idle) {
+		req = list_entry(act, struct usb_request, list);
+		list_del(&req->list);
+		rmnet_free_req(dev->epout, req);
+	}
+
+	/* free all usb requests in rx pool */
+	list_for_each_safe(act, tmp, &dev->rx_idle) {
+		req = list_entry(act, struct usb_request, list);
+		list_del(&req->list);
+		rmnet_free_req(dev->epin, req);
+	}
+
+	/* free all buffers in qmi request pool */
+	list_for_each_safe(act, tmp, &dev->qmi_req_pool) {
+		qmi = list_entry(act, struct qmi_buf, list);
+		list_del(&qmi->list);
+		rmnet_free_qmi(qmi);
+	}
+
+	/* free all buffers in qmi request pool */
+	list_for_each_safe(act, tmp, &dev->qmi_resp_pool) {
+		qmi = list_entry(act, struct qmi_buf, list);
+		list_del(&qmi->list);
+		rmnet_free_qmi(qmi);
+	}
+
+	rmnet_free_req(dev->epnotify, dev->notify_req);
+}
+static int rmnet_bind(struct usb_configuration *c, struct usb_function *f)
+{
+	struct usb_composite_dev *cdev = c->cdev;
+	struct rmnet_dev *dev = container_of(f, struct rmnet_dev, function);
+	int i, id, ret;
+	struct qmi_buf *qmi;
+	struct usb_request *req;
+	struct usb_ep *ep;
+
+	dev->cdev = cdev;
+
+	/* allocate interface ID */
+	id = usb_interface_id(c, f);
+	if (id < 0)
+		return id;
+	dev->ifc_id = id;
+	rmnet_interface_desc.bInterfaceNumber = id;
+
+	ep = usb_ep_autoconfig(cdev->gadget, &rmnet_fs_in_desc);
+	if (!ep)
+		return -ENODEV;
+	ep->driver_data = cdev; /* claim endpoint */
+	dev->epin = ep;
+
+	ep = usb_ep_autoconfig(cdev->gadget, &rmnet_fs_out_desc);
+	if (!ep)
+		return -ENODEV;
+	ep->driver_data = cdev; /* claim endpoint */
+	dev->epout = ep;
+
+	ep = usb_ep_autoconfig(cdev->gadget, &rmnet_fs_notify_desc);
+	if (!ep)
+		return -ENODEV;
+	ep->driver_data = cdev; /* clain endpoint */
+	dev->epnotify = ep;
+
+	/* support all relevant hardware speeds... we expect that when
+	 * hardware is dual speed, all bulk-capable endpoints work at
+	 * both speeds
+	 */
+	if (gadget_is_dualspeed(c->cdev->gadget)) {
+		rmnet_hs_in_desc.bEndpointAddress =
+				rmnet_fs_in_desc.bEndpointAddress;
+		rmnet_hs_out_desc.bEndpointAddress =
+				rmnet_fs_out_desc.bEndpointAddress;
+		rmnet_hs_notify_desc.bEndpointAddress =
+				rmnet_fs_notify_desc.bEndpointAddress;
+
+	}
+
+	/* allocate notification */
+	dev->notify_req = rmnet_alloc_req(dev->epnotify, RMNET_MAX_NOTIFY_SIZE,
+							GFP_KERNEL);
+	if (IS_ERR(dev->notify_req))
+		return PTR_ERR(dev->notify_req);
+
+	dev->notify_req->complete = rmnet_notify_complete;
+	dev->notify_req->context = dev;
+	dev->notify_req->length = RMNET_MAX_NOTIFY_SIZE;
+
+	/* Allocate the qmi request and response buffers */
+	for (i = 0; i < QMI_REQ_MAX; i++) {
+		qmi = rmnet_alloc_qmi(QMI_REQ_SIZE, GFP_KERNEL);
+		if (IS_ERR(qmi)) {
+			ret = PTR_ERR(qmi);
+			goto free_buf;
+		}
+		list_add_tail(&qmi->list, &dev->qmi_req_pool);
+	}
+
+	for (i = 0; i < QMI_RESP_MAX; i++) {
+		qmi = rmnet_alloc_qmi(QMI_RESP_SIZE, GFP_KERNEL);
+		if (IS_ERR(qmi)) {
+			ret = PTR_ERR(qmi);
+			goto free_buf;
+		}
+		list_add_tail(&qmi->list, &dev->qmi_resp_pool);
+	}
+
+	/* Allocate bulk in/out requests for data transfer */
+	for (i = 0; i < RX_REQ_MAX; i++) {
+		req = rmnet_alloc_req(dev->epout, RX_REQ_SIZE, GFP_KERNEL);
+		if (IS_ERR(req)) {
+			ret = PTR_ERR(req);
+			goto free_buf;
+		}
+		req->length = TXN_MAX;
+		req->context = dev;
+		req->complete = rmnet_complete_epout;
+		list_add_tail(&req->list, &dev->rx_idle);
+	}
+
+	for (i = 0; i < TX_REQ_MAX; i++) {
+		req = rmnet_alloc_req(dev->epin, TX_REQ_SIZE, GFP_KERNEL);
+		if (IS_ERR(req)) {
+			ret = PTR_ERR(req);
+			goto free_buf;
+		}
+		req->context = dev;
+		req->complete = rmnet_complete_epin;
+		list_add_tail(&req->list, &dev->tx_idle);
+	}
+
+	return 0;
+
+free_buf:
+	rmnet_free_buf(dev);
+	dev->epout = dev->epin = dev->epnotify = NULL; /* release endpoints */
+	return ret;
+}
+
+#if defined(CONFIG_DEBUG_FS)
+static ssize_t debug_read_stats(struct file *file, char __user *ubuf,
+		size_t count, loff_t *ppos)
+{
+	struct rmnet_dev *dev = file->private_data;
+	struct rmnet_smd_info smd_ctl_info = dev->smd_ctl;
+	struct rmnet_smd_info smd_data_info = dev->smd_data;
+	char *buf;
+	unsigned long flags;
+	int ret;
+
+	buf = kzalloc(sizeof(char) * 512, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	spin_lock_irqsave(&dev->lock, flags);
+	ret = scnprintf(buf, 512,
+			"smd_control_ch_opened: %lu\n"
+			"smd_data_ch_opened: %lu\n"
+			"usb online : %d\n"
+			"dpkts_from_modem: %lu\n"
+			"dpkts_to_host: %lu\n"
+			"pending_dpkts_to_host: %lu\n"
+			"dpkts_from_host: %lu\n"
+			"dpkts_to_modem: %lu\n"
+			"pending_dpkts_to_modem: %lu\n"
+			"cpkts_from_modem: %lu\n"
+			"cpkts_to_host: %lu\n"
+			"pending_cpkts_to_host: %lu\n"
+			"cpkts_from_host: %lu\n"
+			"cpkts_to_modem: %lu\n"
+			"pending_cpkts_to_modem: %lu\n"
+			"smd_read_avail_ctrl: %d\n"
+			"smd_write_avail_ctrl: %d\n"
+			"smd_read_avail_data: %d\n"
+			"smd_write_avail_data: %d\n",
+			smd_ctl_info.flags, smd_data_info.flags,
+			atomic_read(&dev->online),
+			dev->dpkts_from_modem, dev->dpkts_to_host,
+			(dev->dpkts_from_modem - dev->dpkts_to_host),
+			dev->dpkts_from_host, dev->dpkts_to_modem,
+			(dev->dpkts_from_host - dev->dpkts_to_modem),
+			dev->cpkts_from_modem, dev->cpkts_to_host,
+			(dev->cpkts_from_modem - dev->cpkts_to_host),
+			dev->cpkts_from_host, dev->cpkts_to_modem,
+			(dev->cpkts_from_host - dev->cpkts_to_modem),
+			smd_read_avail(dev->smd_ctl.ch),
+			smd_write_avail(dev->smd_ctl.ch),
+			smd_read_avail(dev->smd_data.ch),
+			smd_write_avail(dev->smd_data.ch));
+
+	spin_unlock_irqrestore(&dev->lock, flags);
+
+	ret = simple_read_from_buffer(ubuf, count, ppos, buf, ret);
+
+	kfree(buf);
+
+	return ret;
+}
+
+static ssize_t debug_reset_stats(struct file *file, const char __user *buf,
+				size_t count, loff_t *ppos)
+{
+	struct rmnet_dev *dev = file->private_data;
+	unsigned long flags;
+
+	spin_lock_irqsave(&dev->lock, flags);
+
+	dev->dpkts_to_host = 0;
+	dev->dpkts_from_modem = 0;
+	dev->dpkts_from_host = 0;
+	dev->dpkts_to_modem = 0;
+
+	dev->cpkts_to_host = 0;
+	dev->cpkts_from_modem = 0;
+	dev->cpkts_from_host = 0;
+	dev->cpkts_to_modem = 0;
+
+	spin_unlock_irqrestore(&dev->lock, flags);
+
+	return count;
+}
+
+static int debug_open(struct inode *inode, struct file *file)
+{
+	file->private_data = inode->i_private;
+
+	return 0;
+}
+
+const struct file_operations rmnet_debug_stats_ops = {
+	.open = debug_open,
+	.read = debug_read_stats,
+	.write = debug_reset_stats,
+};
+
+struct dentry *dent;
+struct dentry *dent_status;
+
+static void usb_debugfs_init(struct rmnet_dev *dev)
+{
+
+	dent = debugfs_create_dir("usb_rmnet", 0);
+	if (IS_ERR(dent))
+		return;
+
+	dent_status = debugfs_create_file("status", 0444, dent, dev,
+			&rmnet_debug_stats_ops);
+
+	if (!dent_status) {
+		debugfs_remove(dent);
+		dent = NULL;
+		return;
+	}
+
+	return;
+}
+#else
+static void usb_debugfs_init(struct rmnet_dev *dev) {}
+#endif
+
+static void
+rmnet_unbind(struct usb_configuration *c, struct usb_function *f)
+{
+	struct rmnet_dev *dev = container_of(f, struct rmnet_dev, function);
+
+	tasklet_kill(&dev->smd_ctl.rx_tlet);
+	tasklet_kill(&dev->smd_ctl.tx_tlet);
+	tasklet_kill(&dev->smd_data.rx_tlet);
+	tasklet_kill(&dev->smd_data.tx_tlet);
+
+	flush_workqueue(dev->wq);
+	rmnet_free_buf(dev);
+	dev->epout = dev->epin = dev->epnotify = NULL; /* release endpoints */
+
+	destroy_workqueue(dev->wq);
+	debugfs_remove_recursive(dent);
+	kfree(dev);
+
+}
+
+int rmnet_function_add(struct usb_configuration *c)
+{
+	struct rmnet_dev *dev;
+	int ret;
+
+	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+	if (!dev)
+		return -ENOMEM;
+
+	dev->wq = create_singlethread_workqueue("k_rmnet_work");
+	if (!dev->wq) {
+		ret = -ENOMEM;
+		goto free_dev;
+	}
+
+	spin_lock_init(&dev->lock);
+	atomic_set(&dev->notify_count, 0);
+	atomic_set(&dev->online, 0);
+	atomic_set(&dev->smd_ctl.rx_pkt, 0);
+	atomic_set(&dev->smd_data.rx_pkt, 0);
+
+	INIT_WORK(&dev->connect_work, rmnet_connect_work);
+	INIT_WORK(&dev->disconnect_work, rmnet_disconnect_work);
+
+	tasklet_init(&dev->smd_ctl.rx_tlet, rmnet_control_rx_tlet,
+					(unsigned long) dev);
+	tasklet_init(&dev->smd_ctl.tx_tlet, rmnet_control_tx_tlet,
+					(unsigned long) dev);
+	tasklet_init(&dev->smd_data.rx_tlet, rmnet_data_rx_tlet,
+					(unsigned long) dev);
+	tasklet_init(&dev->smd_data.tx_tlet, rmnet_data_tx_tlet,
+					(unsigned long) dev);
+
+	init_waitqueue_head(&dev->smd_ctl.wait);
+	init_waitqueue_head(&dev->smd_data.wait);
+
+	INIT_LIST_HEAD(&dev->qmi_req_pool);
+	INIT_LIST_HEAD(&dev->qmi_req_q);
+	INIT_LIST_HEAD(&dev->qmi_resp_pool);
+	INIT_LIST_HEAD(&dev->qmi_resp_q);
+	INIT_LIST_HEAD(&dev->rx_idle);
+	INIT_LIST_HEAD(&dev->rx_queue);
+	INIT_LIST_HEAD(&dev->tx_idle);
+
+	dev->function.name = "rmnet";
+	dev->function.strings = rmnet_strings;
+	dev->function.descriptors = rmnet_fs_function;
+	dev->function.hs_descriptors = rmnet_hs_function;
+	dev->function.bind = rmnet_bind;
+	dev->function.unbind = rmnet_unbind;
+	dev->function.setup = rmnet_setup;
+	dev->function.set_alt = rmnet_set_alt;
+	dev->function.disable = rmnet_disable;
+
+	ret = usb_add_function(c, &dev->function);
+	if (ret)
+		goto free_wq;
+
+	usb_debugfs_init(dev);
+
+	return 0;
+
+free_wq:
+	destroy_workqueue(dev->wq);
+free_dev:
+	kfree(dev);
+
+	return ret;
+}
+
+#ifdef CONFIG_USB_ANDROID_RMNET
+static struct android_usb_function rmnet_function = {
+	.name = "rmnet",
+	.bind_config = rmnet_function_add,
+};
+
+static int __init init(void)
+{
+	android_register_function(&rmnet_function);
+	return 0;
+}
+module_init(init);
+
+#endif /* CONFIG_USB_ANDROID_RMNET */
diff --git a/drivers/usb/gadget/f_rmnet_smd_sdio.c b/drivers/usb/gadget/f_rmnet_smd_sdio.c
new file mode 100644
index 0000000..e99716b
--- /dev/null
+++ b/drivers/usb/gadget/f_rmnet_smd_sdio.c
@@ -0,0 +1,1995 @@
+/*
+ * f_rmnet_smd_sdio.c -- RmNet SMD & SDIO function driver
+ *
+ * Copyright (C) 2003-2005,2008 David Brownell
+ * Copyright (C) 2003-2004 Robert Schwebel, Benedikt Spranger
+ * Copyright (C) 2003 Al Borchers (alborchers@steinerpoint.com)
+ * Copyright (C) 2008 Nokia Corporation
+ * Copyright (c) 2011 Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/list.h>
+#include <linux/device.h>
+#include <linux/workqueue.h>
+#include <linux/netdevice.h>
+#include <linux/interrupt.h>
+
+#include <linux/fs.h>
+#include <linux/miscdevice.h>
+#include <linux/uaccess.h>
+#include <asm/ioctls.h>
+
+#include <linux/usb/cdc.h>
+#include <linux/usb/composite.h>
+#include <linux/usb/ch9.h>
+#include <linux/usb/android_composite.h>
+#include <linux/termios.h>
+#include <linux/debugfs.h>
+
+#include <mach/msm_smd.h>
+#include <mach/sdio_cmux.h>
+#include <mach/sdio_dmux.h>
+
+static uint32_t rmnet_sdio_ctl_ch = CONFIG_RMNET_SMD_SDIO_CTL_CHANNEL;
+module_param(rmnet_sdio_ctl_ch, uint, S_IRUGO);
+MODULE_PARM_DESC(rmnet_sdio_ctl_ch, "RmNet control SDIO channel ID");
+
+static uint32_t rmnet_sdio_data_ch = CONFIG_RMNET_SMD_SDIO_DATA_CHANNEL;
+module_param(rmnet_sdio_data_ch, uint, S_IRUGO);
+MODULE_PARM_DESC(rmnet_sdio_data_ch, "RmNet data SDIO channel ID");
+
+static char *rmnet_smd_data_ch = CONFIG_RMNET_SDIO_SMD_DATA_CHANNEL;
+module_param(rmnet_smd_data_ch, charp, S_IRUGO);
+MODULE_PARM_DESC(rmnet_smd_data_ch, "RmNet data SMD channel");
+
+#define ACM_CTRL_DTR	(1 << 0)
+
+#define SDIO_MUX_HDR           8
+#define RMNET_SDIO_NOTIFY_INTERVAL  5
+#define RMNET_SDIO_MAX_NFY_SZE  sizeof(struct usb_cdc_notification)
+
+#define RMNET_SDIO_RX_REQ_MAX             16
+#define RMNET_SDIO_RX_REQ_SIZE            2048
+#define RMNET_SDIO_TX_REQ_MAX             100
+
+#define RMNET_SDIO_TX_PKT_DROP_THRESHOLD		1000
+#define RMNET_SDIO_RX_PKT_FLOW_CTRL_EN_THRESHOLD	1000
+#define RMNET_SDIO_RX_PKT_FLOW_CTRL_DISABLE		500
+
+static uint32_t sdio_tx_pkt_drop_thld = RMNET_SDIO_TX_PKT_DROP_THRESHOLD;
+module_param(sdio_tx_pkt_drop_thld, uint, S_IRUGO | S_IWUSR);
+
+static uint32_t sdio_rx_fctrl_en_thld =
+		RMNET_SDIO_RX_PKT_FLOW_CTRL_EN_THRESHOLD;
+module_param(sdio_rx_fctrl_en_thld, uint, S_IRUGO | S_IWUSR);
+
+static uint32_t sdio_rx_fctrl_dis_thld = RMNET_SDIO_RX_PKT_FLOW_CTRL_DISABLE;
+module_param(sdio_rx_fctrl_dis_thld, uint, S_IRUGO | S_IWUSR);
+
+
+#define RMNET_SMD_RX_REQ_MAX		8
+#define RMNET_SMD_RX_REQ_SIZE		2048
+#define RMNET_SMD_TX_REQ_MAX		8
+#define RMNET_SMD_TX_REQ_SIZE		2048
+#define RMNET_SMD_TXN_MAX		2048
+
+struct rmnet_ctrl_pkt {
+	void *buf;
+	int len;
+	struct list_head list;
+};
+
+enum usb_rmnet_xport_type {
+	USB_RMNET_XPORT_UNDEFINED,
+	USB_RMNET_XPORT_SDIO,
+	USB_RMNET_XPORT_SMD,
+};
+
+struct rmnet_ctrl_dev {
+	struct list_head tx_q;
+	wait_queue_head_t tx_wait_q;
+	unsigned long tx_len;
+
+	struct list_head rx_q;
+	unsigned long rx_len;
+
+	unsigned long cbits_to_modem;
+
+	unsigned	opened;
+};
+
+struct rmnet_sdio_dev {
+	/* Tx/Rx lists */
+	struct list_head tx_idle;
+	struct sk_buff_head    tx_skb_queue;
+	struct list_head rx_idle;
+	struct sk_buff_head    rx_skb_queue;
+
+
+
+	struct work_struct data_rx_work;
+
+	struct delayed_work open_work;
+	atomic_t sdio_open;
+
+	unsigned int dpkts_pending_atdmux;
+};
+
+/* Data SMD channel */
+struct rmnet_smd_info {
+	struct smd_channel *ch;
+	struct tasklet_struct tx_tlet;
+	struct tasklet_struct rx_tlet;
+#define CH_OPENED 0
+	unsigned long flags;
+	/* pending rx packet length */
+	atomic_t rx_pkt;
+	/* wait for smd open event*/
+	wait_queue_head_t wait;
+};
+
+struct rmnet_smd_dev {
+	/* Tx/Rx lists */
+	struct list_head tx_idle;
+	struct list_head rx_idle;
+	struct list_head rx_queue;
+
+	struct rmnet_smd_info smd_data;
+};
+
+struct rmnet_dev {
+	struct usb_function function;
+	struct usb_composite_dev *cdev;
+
+	struct usb_ep *epout;
+	struct usb_ep *epin;
+	struct usb_ep *epnotify;
+	struct usb_request *notify_req;
+
+	struct rmnet_smd_dev smd_dev;
+	struct rmnet_sdio_dev sdio_dev;
+	struct rmnet_ctrl_dev ctrl_dev;
+
+	u8 ifc_id;
+	enum usb_rmnet_xport_type xport;
+	spinlock_t lock;
+	atomic_t online;
+	atomic_t notify_count;
+	struct workqueue_struct *wq;
+	struct work_struct disconnect_work;
+
+	/* pkt counters */
+	unsigned long dpkts_tomsm;
+	unsigned long dpkts_tomdm;
+	unsigned long dpkts_tolaptop;
+	unsigned long tx_drp_cnt;
+	unsigned long cpkts_tolaptop;
+	unsigned long cpkts_tomdm;
+	unsigned long cpkts_drp_cnt;
+};
+
+static struct rmnet_dev *_dev;
+
+static struct usb_interface_descriptor rmnet_interface_desc = {
+	.bLength =              USB_DT_INTERFACE_SIZE,
+	.bDescriptorType =      USB_DT_INTERFACE,
+	.bNumEndpoints =        3,
+	.bInterfaceClass =      USB_CLASS_VENDOR_SPEC,
+	.bInterfaceSubClass =   USB_CLASS_VENDOR_SPEC,
+	.bInterfaceProtocol =   USB_CLASS_VENDOR_SPEC,
+};
+
+/* Full speed support */
+static struct usb_endpoint_descriptor rmnet_fs_notify_desc = {
+	.bLength =              USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =      USB_DT_ENDPOINT,
+	.bEndpointAddress =     USB_DIR_IN,
+	.bmAttributes =         USB_ENDPOINT_XFER_INT,
+	.wMaxPacketSize =	__constant_cpu_to_le16(RMNET_SDIO_MAX_NFY_SZE),
+	.bInterval =            1 << RMNET_SDIO_NOTIFY_INTERVAL,
+};
+
+static struct usb_endpoint_descriptor rmnet_fs_in_desc  = {
+	.bLength =              USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =      USB_DT_ENDPOINT,
+	.bEndpointAddress =     USB_DIR_IN,
+	.bmAttributes =         USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize   = __constant_cpu_to_le16(64),
+};
+
+static struct usb_endpoint_descriptor rmnet_fs_out_desc = {
+	.bLength =              USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =      USB_DT_ENDPOINT,
+	.bEndpointAddress =     USB_DIR_OUT,
+	.bmAttributes =         USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize = __constant_cpu_to_le16(64),
+};
+
+static struct usb_descriptor_header *rmnet_fs_function[] = {
+	(struct usb_descriptor_header *) &rmnet_interface_desc,
+	(struct usb_descriptor_header *) &rmnet_fs_notify_desc,
+	(struct usb_descriptor_header *) &rmnet_fs_in_desc,
+	(struct usb_descriptor_header *) &rmnet_fs_out_desc,
+	NULL,
+};
+
+/* High speed support */
+static struct usb_endpoint_descriptor rmnet_hs_notify_desc  = {
+	.bLength =              USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =      USB_DT_ENDPOINT,
+	.bEndpointAddress =     USB_DIR_IN,
+	.bmAttributes =         USB_ENDPOINT_XFER_INT,
+	.wMaxPacketSize =	__constant_cpu_to_le16(RMNET_SDIO_MAX_NFY_SZE),
+	.bInterval =            RMNET_SDIO_NOTIFY_INTERVAL + 4,
+};
+
+static struct usb_endpoint_descriptor rmnet_hs_in_desc = {
+	.bLength =              USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =      USB_DT_ENDPOINT,
+	.bEndpointAddress =     USB_DIR_IN,
+	.bmAttributes =         USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize =	__constant_cpu_to_le16(512),
+};
+
+static struct usb_endpoint_descriptor rmnet_hs_out_desc = {
+	.bLength =              USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =      USB_DT_ENDPOINT,
+	.bEndpointAddress =     USB_DIR_OUT,
+	.bmAttributes =         USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize =	__constant_cpu_to_le16(512),
+};
+
+static struct usb_descriptor_header *rmnet_hs_function[] = {
+	(struct usb_descriptor_header *) &rmnet_interface_desc,
+	(struct usb_descriptor_header *) &rmnet_hs_notify_desc,
+	(struct usb_descriptor_header *) &rmnet_hs_in_desc,
+	(struct usb_descriptor_header *) &rmnet_hs_out_desc,
+	NULL,
+};
+
+/* String descriptors */
+
+static struct usb_string rmnet_string_defs[] = {
+	[0].s = "RmNet",
+	{  } /* end of list */
+};
+
+static struct usb_gadget_strings rmnet_string_table = {
+	.language =             0x0409, /* en-us */
+	.strings =              rmnet_string_defs,
+};
+
+static struct usb_gadget_strings *rmnet_strings[] = {
+	&rmnet_string_table,
+	NULL,
+};
+
+static char *xport_to_str(enum usb_rmnet_xport_type t)
+{
+	switch (t) {
+	case USB_RMNET_XPORT_SDIO:
+		return "SDIO";
+	case USB_RMNET_XPORT_SMD:
+		return "SMD";
+	default:
+		return "UNDEFINED";
+	}
+}
+
+static struct rmnet_ctrl_pkt *rmnet_alloc_ctrl_pkt(unsigned len, gfp_t flags)
+{
+	struct rmnet_ctrl_pkt *cpkt;
+
+	cpkt = kzalloc(sizeof(struct rmnet_ctrl_pkt), flags);
+	if (!cpkt)
+		return 0;
+
+	cpkt->buf = kzalloc(len, flags);
+	if (!cpkt->buf) {
+		kfree(cpkt);
+		return 0;
+	}
+
+	cpkt->len = len;
+
+	return cpkt;
+
+}
+
+static void rmnet_free_ctrl_pkt(struct rmnet_ctrl_pkt *cpkt)
+{
+	kfree(cpkt->buf);
+	kfree(cpkt);
+}
+
+/*
+ * Allocate a usb_request and its buffer.  Returns a pointer to the
+ * usb_request or a pointer with an error code if there is an error.
+ */
+static struct usb_request *
+rmnet_alloc_req(struct usb_ep *ep, unsigned len, gfp_t kmalloc_flags)
+{
+	struct usb_request *req;
+
+	req = usb_ep_alloc_request(ep, kmalloc_flags);
+
+	if (len && req != NULL) {
+		req->length = len;
+		req->buf = kmalloc(len, kmalloc_flags);
+		if (req->buf == NULL) {
+			usb_ep_free_request(ep, req);
+			req = NULL;
+		}
+	}
+
+	return req ? req : ERR_PTR(-ENOMEM);
+}
+
+/*
+ * Free a usb_request and its buffer.
+ */
+static void rmnet_free_req(struct usb_ep *ep, struct usb_request *req)
+{
+	kfree(req->buf);
+	usb_ep_free_request(ep, req);
+}
+
+static int rmnet_sdio_rx_submit(struct rmnet_dev *dev, struct usb_request *req,
+				gfp_t gfp_flags)
+{
+	struct sk_buff *skb;
+	int retval;
+
+	skb = alloc_skb(RMNET_SDIO_RX_REQ_SIZE + SDIO_MUX_HDR, gfp_flags);
+	if (skb == NULL)
+		return -ENOMEM;
+	skb_reserve(skb, SDIO_MUX_HDR);
+
+	req->buf = skb->data;
+	req->length = RMNET_SDIO_RX_REQ_SIZE;
+	req->context = skb;
+
+	retval = usb_ep_queue(dev->epout, req, gfp_flags);
+	if (retval)
+		dev_kfree_skb_any(skb);
+
+	return retval;
+}
+
+static void rmnet_sdio_start_rx(struct rmnet_dev *dev)
+{
+	struct rmnet_sdio_dev *sdio_dev = &dev->sdio_dev;
+	struct usb_composite_dev *cdev = dev->cdev;
+	int status;
+	struct usb_request *req;
+	struct list_head *pool;
+	unsigned long flags;
+
+	if (!atomic_read(&dev->online)) {
+		pr_debug("%s: USB not connected\n", __func__);
+		return;
+	}
+
+	spin_lock_irqsave(&dev->lock, flags);
+	pool = &sdio_dev->rx_idle;
+	while (!list_empty(pool)) {
+		req = list_first_entry(pool, struct usb_request, list);
+		list_del(&req->list);
+
+		spin_unlock_irqrestore(&dev->lock, flags);
+		status = rmnet_sdio_rx_submit(dev, req, GFP_KERNEL);
+		spin_lock_irqsave(&dev->lock, flags);
+
+		if (status) {
+			ERROR(cdev, "rmnet data rx enqueue err %d\n", status);
+			list_add_tail(&req->list, &sdio_dev->rx_idle);
+			break;
+		}
+	}
+	spin_unlock_irqrestore(&dev->lock, flags);
+}
+
+static void rmnet_sdio_start_tx(struct rmnet_dev *dev)
+{
+	unsigned long			flags;
+	int				status;
+	struct sk_buff			*skb;
+	struct usb_request		*req;
+	struct rmnet_sdio_dev *sdio_dev = &dev->sdio_dev;
+	struct usb_composite_dev	*cdev = dev->cdev;
+
+
+	if (!atomic_read(&dev->online))
+		return;
+
+	spin_lock_irqsave(&dev->lock, flags);
+	while (!list_empty(&sdio_dev->tx_idle)) {
+		skb = __skb_dequeue(&sdio_dev->tx_skb_queue);
+		if (!skb) {
+			spin_unlock_irqrestore(&dev->lock, flags);
+			return;
+		}
+
+		req = list_first_entry(&sdio_dev->tx_idle,
+				struct usb_request, list);
+		req->context = skb;
+		req->buf = skb->data;
+		req->length = skb->len;
+
+		list_del(&req->list);
+		spin_unlock(&dev->lock);
+		status = usb_ep_queue(dev->epin, req, GFP_ATOMIC);
+		spin_lock(&dev->lock);
+		if (status) {
+			/* USB still online, queue requests back */
+			if (atomic_read(&dev->online)) {
+				ERROR(cdev, "rmnet tx data enqueue err %d\n",
+						status);
+				list_add_tail(&req->list, &sdio_dev->tx_idle);
+				__skb_queue_head(&sdio_dev->tx_skb_queue, skb);
+			} else {
+				req->buf = 0;
+				rmnet_free_req(dev->epin, req);
+				dev_kfree_skb_any(skb);
+			}
+			break;
+		}
+		dev->dpkts_tolaptop++;
+	}
+	spin_unlock_irqrestore(&dev->lock, flags);
+}
+
+static void rmnet_sdio_data_receive_cb(void *priv, struct sk_buff *skb)
+{
+	struct rmnet_dev *dev = priv;
+	struct rmnet_sdio_dev *sdio_dev = &dev->sdio_dev;
+	unsigned long flags;
+
+	if (!skb)
+		return;
+	if (!atomic_read(&dev->online)) {
+		dev_kfree_skb_any(skb);
+		return;
+	}
+	spin_lock_irqsave(&dev->lock, flags);
+	if (sdio_dev->tx_skb_queue.qlen > sdio_tx_pkt_drop_thld) {
+		pr_err_ratelimited("%s: tx pkt dropped: tx_drop_cnt:%lu\n",
+			__func__, dev->tx_drp_cnt);
+		dev->tx_drp_cnt++;
+		spin_unlock_irqrestore(&dev->lock, flags);
+		dev_kfree_skb_any(skb);
+		return;
+	}
+	__skb_queue_tail(&sdio_dev->tx_skb_queue, skb);
+	spin_unlock_irqrestore(&dev->lock, flags);
+	rmnet_sdio_start_tx(dev);
+}
+
+static void rmnet_sdio_data_write_done(void *priv, struct sk_buff *skb)
+{
+	struct rmnet_dev *dev = priv;
+	struct rmnet_sdio_dev *sdio_dev = &dev->sdio_dev;
+
+	if (!skb)
+		return;
+
+	dev_kfree_skb_any(skb);
+	/* this function is called from
+	 * sdio mux from spin_lock_irqsave
+	 */
+	spin_lock(&dev->lock);
+	sdio_dev->dpkts_pending_atdmux--;
+
+	if (sdio_dev->dpkts_pending_atdmux >= sdio_rx_fctrl_dis_thld) {
+		spin_unlock(&dev->lock);
+		return;
+	}
+	spin_unlock(&dev->lock);
+
+	rmnet_sdio_start_rx(dev);
+}
+
+static void rmnet_sdio_data_rx_work(struct work_struct *w)
+{
+	struct rmnet_dev *dev = container_of(w, struct rmnet_dev,
+			sdio_dev.data_rx_work);
+	struct rmnet_sdio_dev *sdio_dev = &dev->sdio_dev;
+	struct usb_composite_dev *cdev = dev->cdev;
+
+	struct sk_buff *skb;
+	int ret;
+	unsigned long flags;
+
+	spin_lock_irqsave(&dev->lock, flags);
+	while ((skb = __skb_dequeue(&sdio_dev->rx_skb_queue))) {
+		spin_unlock_irqrestore(&dev->lock, flags);
+		ret = msm_sdio_dmux_write(rmnet_sdio_data_ch, skb);
+		spin_lock_irqsave(&dev->lock, flags);
+		if (ret < 0) {
+			ERROR(cdev, "rmnet SDIO data write failed\n");
+			dev_kfree_skb_any(skb);
+		} else {
+			dev->dpkts_tomdm++;
+			sdio_dev->dpkts_pending_atdmux++;
+		}
+	}
+	spin_unlock_irqrestore(&dev->lock, flags);
+}
+
+static void
+rmnet_sdio_complete_epout(struct usb_ep *ep, struct usb_request *req)
+{
+	struct rmnet_dev *dev = ep->driver_data;
+	struct rmnet_sdio_dev *sdio_dev = &dev->sdio_dev;
+	struct usb_composite_dev *cdev = dev->cdev;
+	struct sk_buff *skb = req->context;
+	int status = req->status;
+	int queue = 0;
+
+	if (dev->xport == USB_RMNET_XPORT_UNDEFINED) {
+		dev_kfree_skb_any(skb);
+		req->buf = 0;
+		rmnet_free_req(ep, req);
+		return;
+	}
+
+	switch (status) {
+	case 0:
+		/* successful completion */
+		skb_put(skb, req->actual);
+		queue = 1;
+		break;
+	case -ECONNRESET:
+	case -ESHUTDOWN:
+		/* connection gone */
+		dev_kfree_skb_any(skb);
+		req->buf = 0;
+		rmnet_free_req(ep, req);
+		return;
+	default:
+		/* unexpected failure */
+		ERROR(cdev, "RMNET %s response error %d, %d/%d\n",
+			ep->name, status,
+			req->actual, req->length);
+		dev_kfree_skb_any(skb);
+		break;
+	}
+
+	spin_lock(&dev->lock);
+	if (queue) {
+		__skb_queue_tail(&sdio_dev->rx_skb_queue, skb);
+		queue_work(dev->wq, &sdio_dev->data_rx_work);
+	}
+
+	if (sdio_dev->dpkts_pending_atdmux >= sdio_rx_fctrl_en_thld) {
+		list_add_tail(&req->list, &sdio_dev->rx_idle);
+		spin_unlock(&dev->lock);
+		return;
+	}
+	spin_unlock(&dev->lock);
+
+	status = rmnet_sdio_rx_submit(dev, req, GFP_ATOMIC);
+	if (status) {
+		ERROR(cdev, "rmnet data rx enqueue err %d\n", status);
+		list_add_tail(&req->list, &sdio_dev->rx_idle);
+	}
+}
+
+static void
+rmnet_sdio_complete_epin(struct usb_ep *ep, struct usb_request *req)
+{
+	struct rmnet_dev *dev = ep->driver_data;
+	struct rmnet_sdio_dev *sdio_dev = &dev->sdio_dev;
+	struct sk_buff  *skb = req->context;
+	struct usb_composite_dev *cdev = dev->cdev;
+	int status = req->status;
+
+	if (dev->xport == USB_RMNET_XPORT_UNDEFINED) {
+		dev_kfree_skb_any(skb);
+		req->buf = 0;
+		rmnet_free_req(ep, req);
+		return;
+	}
+
+	switch (status) {
+	case 0:
+		/* successful completion */
+	case -ECONNRESET:
+	case -ESHUTDOWN:
+		/* connection gone */
+		break;
+	default:
+		ERROR(cdev, "rmnet data tx ep error %d\n", status);
+		break;
+	}
+
+	spin_lock(&dev->lock);
+	list_add_tail(&req->list, &sdio_dev->tx_idle);
+	spin_unlock(&dev->lock);
+	dev_kfree_skb_any(skb);
+
+	rmnet_sdio_start_tx(dev);
+}
+
+static int rmnet_sdio_enable(struct rmnet_dev *dev)
+{
+	struct rmnet_sdio_dev *sdio_dev = &dev->sdio_dev;
+	int i;
+	struct usb_request *req;
+
+	/*
+	 * If the memory allocation fails, all the allocated
+	 * requests will be freed upon cable disconnect.
+	 */
+	for (i = 0; i < RMNET_SDIO_RX_REQ_MAX; i++) {
+		req = rmnet_alloc_req(dev->epout, 0, GFP_KERNEL);
+		if (IS_ERR(req))
+			return PTR_ERR(req);
+		req->complete = rmnet_sdio_complete_epout;
+		list_add_tail(&req->list, &sdio_dev->rx_idle);
+	}
+	for (i = 0; i < RMNET_SDIO_TX_REQ_MAX; i++) {
+		req = rmnet_alloc_req(dev->epin, 0, GFP_KERNEL);
+		if (IS_ERR(req))
+			return PTR_ERR(req);
+		req->complete = rmnet_sdio_complete_epin;
+		list_add_tail(&req->list, &sdio_dev->tx_idle);
+	}
+
+	rmnet_sdio_start_rx(dev);
+	return 0;
+}
+
+static void rmnet_smd_start_rx(struct rmnet_dev *dev)
+{
+	struct usb_composite_dev *cdev = dev->cdev;
+	struct rmnet_smd_dev *smd_dev = &dev->smd_dev;
+	int status;
+	struct usb_request *req;
+	struct list_head *pool = &smd_dev->rx_idle;
+	unsigned long flags;
+
+	spin_lock_irqsave(&dev->lock, flags);
+	while (!list_empty(pool)) {
+		req = list_entry(pool->next, struct usb_request, list);
+		list_del(&req->list);
+
+		spin_unlock_irqrestore(&dev->lock, flags);
+		status = usb_ep_queue(dev->epout, req, GFP_ATOMIC);
+		spin_lock_irqsave(&dev->lock, flags);
+
+		if (status) {
+			ERROR(cdev, "rmnet data rx enqueue err %d\n", status);
+			list_add_tail(&req->list, pool);
+			break;
+		}
+	}
+	spin_unlock_irqrestore(&dev->lock, flags);
+}
+
+static void rmnet_smd_data_tx_tlet(unsigned long arg)
+{
+	struct rmnet_dev *dev = (struct rmnet_dev *) arg;
+	struct rmnet_smd_dev *smd_dev = &dev->smd_dev;
+	struct usb_composite_dev *cdev = dev->cdev;
+	struct usb_request *req;
+	int status;
+	int sz;
+	unsigned long flags;
+
+	while (1) {
+		if (!atomic_read(&dev->online))
+			break;
+		sz = smd_cur_packet_size(smd_dev->smd_data.ch);
+		if (sz == 0)
+			break;
+		if (smd_read_avail(smd_dev->smd_data.ch) < sz)
+			break;
+
+		spin_lock_irqsave(&dev->lock, flags);
+		if (list_empty(&smd_dev->tx_idle)) {
+			spin_unlock_irqrestore(&dev->lock, flags);
+			DBG(cdev, "rmnet data Tx buffers full\n");
+			break;
+		}
+		req = list_first_entry(&smd_dev->tx_idle,
+				struct usb_request, list);
+		list_del(&req->list);
+		spin_unlock_irqrestore(&dev->lock, flags);
+
+		req->length = smd_read(smd_dev->smd_data.ch, req->buf, sz);
+		status = usb_ep_queue(dev->epin, req, GFP_ATOMIC);
+		if (status) {
+			ERROR(cdev, "rmnet tx data enqueue err %d\n", status);
+			spin_lock_irqsave(&dev->lock, flags);
+			list_add_tail(&req->list, &smd_dev->tx_idle);
+			spin_unlock_irqrestore(&dev->lock, flags);
+			break;
+		}
+		dev->dpkts_tolaptop++;
+	}
+
+}
+
+static void rmnet_smd_data_rx_tlet(unsigned long arg)
+{
+	struct rmnet_dev *dev = (struct rmnet_dev *) arg;
+	struct rmnet_smd_dev *smd_dev = &dev->smd_dev;
+	struct usb_composite_dev *cdev = dev->cdev;
+	struct usb_request *req;
+	int ret;
+	unsigned long flags;
+
+	spin_lock_irqsave(&dev->lock, flags);
+	while (1) {
+		if (!atomic_read(&dev->online))
+			break;
+		if (list_empty(&smd_dev->rx_queue)) {
+			atomic_set(&smd_dev->smd_data.rx_pkt, 0);
+			break;
+		}
+		req = list_first_entry(&smd_dev->rx_queue,
+			struct usb_request, list);
+		if (smd_write_avail(smd_dev->smd_data.ch) < req->actual) {
+			atomic_set(&smd_dev->smd_data.rx_pkt, req->actual);
+			DBG(cdev, "rmnet SMD data channel full\n");
+			break;
+		}
+
+		list_del(&req->list);
+		spin_unlock_irqrestore(&dev->lock, flags);
+		ret = smd_write(smd_dev->smd_data.ch, req->buf, req->actual);
+		spin_lock_irqsave(&dev->lock, flags);
+		if (ret != req->actual) {
+			ERROR(cdev, "rmnet SMD data write failed\n");
+			break;
+		}
+		dev->dpkts_tomsm++;
+		list_add_tail(&req->list, &smd_dev->rx_idle);
+	}
+	spin_unlock_irqrestore(&dev->lock, flags);
+
+	/* We have free rx data requests. */
+	rmnet_smd_start_rx(dev);
+}
+
+/* If SMD has enough room to accommodate a data rx packet,
+ * write into SMD directly. Otherwise enqueue to rx_queue.
+ * We will not write into SMD directly untill rx_queue is
+ * empty to strictly follow the ordering requests.
+ */
+static void
+rmnet_smd_complete_epout(struct usb_ep *ep, struct usb_request *req)
+{
+	struct rmnet_dev *dev = req->context;
+	struct rmnet_smd_dev *smd_dev = &dev->smd_dev;
+	struct usb_composite_dev *cdev = dev->cdev;
+	int status = req->status;
+	int ret;
+
+	if (dev->xport == USB_RMNET_XPORT_UNDEFINED) {
+		rmnet_free_req(ep, req);
+		return;
+	}
+
+	switch (status) {
+	case 0:
+		/* normal completion */
+		break;
+	case -ECONNRESET:
+	case -ESHUTDOWN:
+		/* connection gone */
+		spin_lock(&dev->lock);
+		list_add_tail(&req->list, &smd_dev->rx_idle);
+		spin_unlock(&dev->lock);
+		return;
+	default:
+		/* unexpected failure */
+		ERROR(cdev, "RMNET %s response error %d, %d/%d\n",
+			ep->name, status,
+			req->actual, req->length);
+		spin_lock(&dev->lock);
+		list_add_tail(&req->list, &smd_dev->rx_idle);
+		spin_unlock(&dev->lock);
+		return;
+	}
+
+	spin_lock(&dev->lock);
+	if (!atomic_read(&smd_dev->smd_data.rx_pkt)) {
+		if (smd_write_avail(smd_dev->smd_data.ch) < req->actual) {
+			atomic_set(&smd_dev->smd_data.rx_pkt, req->actual);
+			goto queue_req;
+		}
+		spin_unlock(&dev->lock);
+		ret = smd_write(smd_dev->smd_data.ch, req->buf, req->actual);
+		/* This should never happen */
+		if (ret != req->actual)
+			ERROR(cdev, "rmnet data smd write failed\n");
+		/* Restart Rx */
+		dev->dpkts_tomsm++;
+		spin_lock(&dev->lock);
+		list_add_tail(&req->list, &smd_dev->rx_idle);
+		spin_unlock(&dev->lock);
+		rmnet_smd_start_rx(dev);
+		return;
+	}
+queue_req:
+	list_add_tail(&req->list, &smd_dev->rx_queue);
+	spin_unlock(&dev->lock);
+}
+
+static void rmnet_smd_complete_epin(struct usb_ep *ep, struct usb_request *req)
+{
+	struct rmnet_dev *dev = req->context;
+	struct rmnet_smd_dev *smd_dev = &dev->smd_dev;
+	struct usb_composite_dev *cdev = dev->cdev;
+	int status = req->status;
+	int schedule = 0;
+
+	if (dev->xport == USB_RMNET_XPORT_UNDEFINED) {
+		rmnet_free_req(ep, req);
+		return;
+	}
+
+	switch (status) {
+	case -ECONNRESET:
+	case -ESHUTDOWN:
+		/* connection gone */
+		spin_lock(&dev->lock);
+		list_add_tail(&req->list, &smd_dev->tx_idle);
+		spin_unlock(&dev->lock);
+		break;
+	default:
+		ERROR(cdev, "rmnet data tx ep error %d\n", status);
+		/* FALLTHROUGH */
+	case 0:
+		spin_lock(&dev->lock);
+		if (list_empty(&smd_dev->tx_idle))
+			schedule = 1;
+		list_add_tail(&req->list, &smd_dev->tx_idle);
+
+		if (schedule)
+			tasklet_schedule(&smd_dev->smd_data.tx_tlet);
+		spin_unlock(&dev->lock);
+		break;
+	}
+
+}
+
+
+static void rmnet_smd_notify(void *priv, unsigned event)
+{
+	struct rmnet_dev *dev = priv;
+	struct rmnet_smd_info *smd_info = &dev->smd_dev.smd_data;
+	int len = atomic_read(&smd_info->rx_pkt);
+
+	switch (event) {
+	case SMD_EVENT_DATA: {
+		if (!atomic_read(&dev->online))
+			break;
+		if (len && (smd_write_avail(smd_info->ch) >= len))
+			tasklet_schedule(&smd_info->rx_tlet);
+
+		if (smd_read_avail(smd_info->ch))
+			tasklet_schedule(&smd_info->tx_tlet);
+
+		break;
+	}
+	case SMD_EVENT_OPEN:
+		/* usb endpoints are not enabled untill smd channels
+		 * are opened. wake up worker thread to continue
+		 * connection processing
+		 */
+		set_bit(CH_OPENED, &smd_info->flags);
+		wake_up(&smd_info->wait);
+		break;
+	case SMD_EVENT_CLOSE:
+		/* We will never come here.
+		 * reset flags after closing smd channel
+		 * */
+		clear_bit(CH_OPENED, &smd_info->flags);
+		break;
+	}
+}
+
+static int rmnet_smd_enable(struct rmnet_dev *dev)
+{
+	struct usb_composite_dev *cdev = dev->cdev;
+	struct rmnet_smd_dev *smd_dev = &dev->smd_dev;
+	int i, ret;
+	struct usb_request *req;
+
+	if (test_bit(CH_OPENED, &smd_dev->smd_data.flags))
+		goto smd_alloc_req;
+
+	ret = smd_open(rmnet_smd_data_ch, &smd_dev->smd_data.ch,
+			dev, rmnet_smd_notify);
+	if (ret) {
+		ERROR(cdev, "Unable to open data smd channel\n");
+		return ret;
+	}
+
+	wait_event(smd_dev->smd_data.wait, test_bit(CH_OPENED,
+				&smd_dev->smd_data.flags));
+
+	/* Allocate bulk in/out requests for data transfer.
+	 * If the memory allocation fails, all the allocated
+	 * requests will be freed upon cable disconnect.
+	 */
+smd_alloc_req:
+	for (i = 0; i < RMNET_SMD_RX_REQ_MAX; i++) {
+		req = rmnet_alloc_req(dev->epout, RMNET_SMD_RX_REQ_SIZE,
+				GFP_KERNEL);
+		if (IS_ERR(req))
+			return PTR_ERR(req);
+		req->length = RMNET_SMD_TXN_MAX;
+		req->context = dev;
+		req->complete = rmnet_smd_complete_epout;
+		list_add_tail(&req->list, &smd_dev->rx_idle);
+	}
+
+	for (i = 0; i < RMNET_SMD_TX_REQ_MAX; i++) {
+		req = rmnet_alloc_req(dev->epin, RMNET_SMD_TX_REQ_SIZE,
+				GFP_KERNEL);
+		if (IS_ERR(req))
+			return PTR_ERR(req);
+		req->context = dev;
+		req->complete = rmnet_smd_complete_epin;
+		list_add_tail(&req->list, &smd_dev->tx_idle);
+	}
+
+	rmnet_smd_start_rx(dev);
+	return 0;
+}
+
+static void rmnet_notify_complete(struct usb_ep *ep, struct usb_request *req)
+{
+	struct rmnet_dev *dev = req->context;
+	struct usb_composite_dev *cdev = dev->cdev;
+	int status = req->status;
+
+	switch (status) {
+	case -ECONNRESET:
+	case -ESHUTDOWN:
+		/* connection gone */
+		atomic_set(&dev->notify_count, 0);
+		break;
+	default:
+		ERROR(cdev, "rmnet notifyep error %d\n", status);
+		/* FALLTHROUGH */
+	case 0:
+
+		if (atomic_dec_and_test(&dev->notify_count))
+			break;
+
+		status = usb_ep_queue(dev->epnotify, req, GFP_ATOMIC);
+		if (status) {
+			atomic_dec(&dev->notify_count);
+			ERROR(cdev, "rmnet notify ep enq error %d\n", status);
+		}
+		break;
+	}
+}
+
+static void ctrl_response_available(struct rmnet_dev *dev)
+{
+	struct usb_composite_dev *cdev = dev->cdev;
+	struct usb_request              *req = dev->notify_req;
+	struct usb_cdc_notification     *event = req->buf;
+	int status;
+
+	/* Response will be sent later */
+	if (atomic_inc_return(&dev->notify_count) != 1)
+		return;
+
+	event->bmRequestType = USB_DIR_IN | USB_TYPE_CLASS
+			| USB_RECIP_INTERFACE;
+	event->bNotificationType = USB_CDC_NOTIFY_RESPONSE_AVAILABLE;
+	event->wValue = cpu_to_le16(0);
+	event->wIndex = cpu_to_le16(dev->ifc_id);
+	event->wLength = cpu_to_le16(0);
+
+	status = usb_ep_queue(dev->epnotify, dev->notify_req, GFP_ATOMIC);
+	if (status < 0) {
+		atomic_dec(&dev->notify_count);
+		ERROR(cdev, "rmnet notify ep enqueue error %d\n", status);
+	}
+}
+
+#define MAX_CTRL_PKT_SIZE	4096
+
+static void rmnet_response_complete(struct usb_ep *ep, struct usb_request *req)
+{
+	struct rmnet_dev *dev = req->context;
+	struct usb_composite_dev *cdev = dev->cdev;
+
+	switch (req->status) {
+	case -ECONNRESET:
+	case -ESHUTDOWN:
+	case 0:
+		return;
+	default:
+		INFO(cdev, "rmnet %s response error %d, %d/%d\n",
+			ep->name, req->status,
+			req->actual, req->length);
+	}
+}
+
+static void rmnet_command_complete(struct usb_ep *ep, struct usb_request *req)
+{
+	struct rmnet_dev		*dev = req->context;
+	struct usb_composite_dev	*cdev = dev->cdev;
+	struct rmnet_ctrl_dev		*ctrl_dev = &dev->ctrl_dev;
+	struct rmnet_ctrl_pkt		*cpkt;
+	int				len = req->actual;
+
+	if (req->status < 0) {
+		ERROR(cdev, "rmnet command error %d\n", req->status);
+		return;
+	}
+
+	cpkt = rmnet_alloc_ctrl_pkt(len, GFP_ATOMIC);
+	if (!cpkt) {
+		ERROR(cdev, "unable to allocate memory for ctrl req\n");
+		return;
+	}
+
+	spin_lock(&dev->lock);
+	if (!ctrl_dev->opened) {
+		spin_unlock(&dev->lock);
+		kfree(cpkt);
+		dev->cpkts_drp_cnt++;
+		pr_err_ratelimited(
+			"%s: ctrl pkts dropped: cpkts_drp_cnt: %lu\n",
+			__func__, dev->cpkts_drp_cnt);
+		return;
+	}
+
+	memcpy(cpkt->buf, req->buf, len);
+
+	list_add_tail(&cpkt->list, &ctrl_dev->tx_q);
+	ctrl_dev->tx_len++;
+	spin_unlock(&dev->lock);
+
+	/* wakeup read thread */
+	wake_up(&ctrl_dev->tx_wait_q);
+}
+
+static int
+rmnet_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl)
+{
+	struct rmnet_dev *dev = container_of(f, struct rmnet_dev, function);
+	struct rmnet_ctrl_dev *ctrl_dev = &dev->ctrl_dev;
+	struct usb_composite_dev *cdev = f->config->cdev;
+	struct usb_request      *req = cdev->req;
+	int                     ret = -EOPNOTSUPP;
+	u16                     w_index = le16_to_cpu(ctrl->wIndex);
+	u16                     w_value = le16_to_cpu(ctrl->wValue);
+	u16                     w_length = le16_to_cpu(ctrl->wLength);
+	struct rmnet_ctrl_pkt	*cpkt;
+
+	if (!atomic_read(&dev->online))
+		return -ENOTCONN;
+
+	switch ((ctrl->bRequestType << 8) | ctrl->bRequest) {
+
+	case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+			| USB_CDC_SEND_ENCAPSULATED_COMMAND:
+		if (w_length > req->length)
+			goto invalid;
+		ret = w_length;
+		req->complete = rmnet_command_complete;
+		req->context = dev;
+		break;
+
+
+	case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+			| USB_CDC_GET_ENCAPSULATED_RESPONSE:
+		if (w_value)
+			goto invalid;
+		else {
+			unsigned len;
+
+			spin_lock(&dev->lock);
+			if (list_empty(&ctrl_dev->rx_q)) {
+				DBG(cdev, "ctrl resp queue empty"
+					" %02x.%02x v%04x i%04x l%d\n",
+					ctrl->bRequestType, ctrl->bRequest,
+					w_value, w_index, w_length);
+				spin_unlock(&dev->lock);
+				goto invalid;
+
+			}
+			cpkt = list_first_entry(&ctrl_dev->rx_q,
+					struct rmnet_ctrl_pkt, list);
+			list_del(&cpkt->list);
+			ctrl_dev->rx_len--;
+			spin_unlock(&dev->lock);
+
+			len = min_t(unsigned, w_length, cpkt->len);
+			memcpy(req->buf, cpkt->buf, len);
+			ret = len;
+			req->complete = rmnet_response_complete;
+			req->context = dev;
+			rmnet_free_ctrl_pkt(cpkt);
+
+			dev->cpkts_tolaptop++;
+		}
+		break;
+	case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+			| USB_CDC_REQ_SET_CONTROL_LINE_STATE:
+		/* This is a workaround for RmNet and is borrowed from the
+		 * CDC/ACM standard. The host driver will issue the above ACM
+		 * standard request to the RmNet interface in the following
+		 * scenario: Once the network adapter is disabled from device
+		 * manager, the above request will be sent from the qcusbnet
+		 * host driver, with DTR being '0'. Once network adapter is
+		 * enabled from device manager (or during enumeration), the
+		 * request will be sent with DTR being '1'.
+		 */
+		if (w_value & ACM_CTRL_DTR)
+			ctrl_dev->cbits_to_modem |= TIOCM_DTR;
+		else
+			ctrl_dev->cbits_to_modem &= ~TIOCM_DTR;
+
+		ret = 0;
+
+		break;
+	default:
+
+invalid:
+	DBG(cdev, "invalid control req%02x.%02x v%04x i%04x l%d\n",
+		ctrl->bRequestType, ctrl->bRequest,
+		w_value, w_index, w_length);
+	}
+
+	/* respond with data transfer or status phase? */
+	if (ret >= 0) {
+		VDBG(cdev, "rmnet req%02x.%02x v%04x i%04x l%d\n",
+			ctrl->bRequestType, ctrl->bRequest,
+			w_value, w_index, w_length);
+		req->zero = (ret < w_length);
+		req->length = ret;
+		ret = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC);
+		if (ret < 0)
+			ERROR(cdev, "rmnet ep0 enqueue err %d\n", ret);
+	}
+
+	return ret;
+}
+
+static void rmnet_free_buf(struct rmnet_dev *dev)
+{
+	struct rmnet_sdio_dev *sdio_dev = &dev->sdio_dev;
+	struct rmnet_ctrl_dev *ctrl_dev = &dev->ctrl_dev;
+	struct rmnet_smd_dev *smd_dev = &dev->smd_dev;
+	struct rmnet_ctrl_pkt *cpkt;
+	struct usb_request *req;
+	struct list_head *pool;
+	struct sk_buff *skb;
+	unsigned long flags;
+
+	spin_lock_irqsave(&dev->lock, flags);
+	/* free all usb requests in SDIO tx pool */
+	pool = &sdio_dev->tx_idle;
+	while (!list_empty(pool)) {
+		req = list_first_entry(pool, struct usb_request, list);
+		list_del(&req->list);
+		req->buf = NULL;
+		rmnet_free_req(dev->epout, req);
+	}
+
+	pool = &sdio_dev->rx_idle;
+	/* free all usb requests in SDIO rx pool */
+	while (!list_empty(pool)) {
+		req = list_first_entry(pool, struct usb_request, list);
+		list_del(&req->list);
+		req->buf = NULL;
+		rmnet_free_req(dev->epin, req);
+	}
+
+	while ((skb = __skb_dequeue(&sdio_dev->tx_skb_queue)))
+		dev_kfree_skb_any(skb);
+
+	while ((skb = __skb_dequeue(&sdio_dev->rx_skb_queue)))
+		dev_kfree_skb_any(skb);
+
+	/* free all usb requests in SMD tx pool */
+	pool = &smd_dev->tx_idle;
+	while (!list_empty(pool)) {
+		req = list_first_entry(pool, struct usb_request, list);
+		list_del(&req->list);
+		rmnet_free_req(dev->epout, req);
+	}
+
+	pool = &smd_dev->rx_idle;
+	/* free all usb requests in SMD rx pool */
+	while (!list_empty(pool)) {
+		req = list_first_entry(pool, struct usb_request, list);
+		list_del(&req->list);
+		rmnet_free_req(dev->epin, req);
+	}
+
+	/* free all usb requests in SMD rx queue */
+	pool = &smd_dev->rx_queue;
+	while (!list_empty(pool)) {
+		req = list_first_entry(pool, struct usb_request, list);
+		list_del(&req->list);
+		rmnet_free_req(dev->epin, req);
+	}
+
+	pool = &ctrl_dev->tx_q;
+	while (!list_empty(pool)) {
+		cpkt = list_first_entry(pool, struct rmnet_ctrl_pkt, list);
+		list_del(&cpkt->list);
+		rmnet_free_ctrl_pkt(cpkt);
+		ctrl_dev->tx_len--;
+	}
+
+	pool = &ctrl_dev->rx_q;
+	while (!list_empty(pool)) {
+		cpkt = list_first_entry(pool, struct rmnet_ctrl_pkt, list);
+		list_del(&cpkt->list);
+		rmnet_free_ctrl_pkt(cpkt);
+		ctrl_dev->rx_len--;
+	}
+	spin_unlock_irqrestore(&dev->lock, flags);
+}
+
+static void rmnet_disconnect_work(struct work_struct *w)
+{
+	struct rmnet_dev *dev = container_of(w, struct rmnet_dev,
+			disconnect_work);
+	struct rmnet_smd_dev *smd_dev = &dev->smd_dev;
+	struct rmnet_ctrl_dev *ctrl_dev = &dev->ctrl_dev;
+
+	if (dev->xport == USB_RMNET_XPORT_SMD) {
+		tasklet_kill(&smd_dev->smd_data.rx_tlet);
+		tasklet_kill(&smd_dev->smd_data.tx_tlet);
+	}
+
+	rmnet_free_buf(dev);
+	dev->xport = 0;
+
+	/* wakeup read thread */
+	wake_up(&ctrl_dev->tx_wait_q);
+}
+
+static void rmnet_suspend(struct usb_function *f)
+{
+	struct rmnet_dev *dev = container_of(f, struct rmnet_dev, function);
+	struct rmnet_ctrl_dev *ctrl_dev = &dev->ctrl_dev;
+
+	if (!atomic_read(&dev->online))
+		return;
+	/* This is a workaround for Windows Host bug during suspend.
+	 * Windows 7/xp Hosts are suppose to drop DTR, when Host suspended.
+	 * Since it is not being done, Hence exclusively dropping the DTR
+	 * from function driver suspend.
+	 */
+	ctrl_dev->cbits_to_modem &= ~TIOCM_DTR;
+}
+
+static void rmnet_disable(struct usb_function *f)
+{
+	struct rmnet_dev *dev = container_of(f, struct rmnet_dev, function);
+	struct rmnet_ctrl_dev *ctrl_dev = &dev->ctrl_dev;
+
+	if (!atomic_read(&dev->online))
+		return;
+
+	atomic_set(&dev->online, 0);
+
+	usb_ep_fifo_flush(dev->epnotify);
+	usb_ep_disable(dev->epnotify);
+	rmnet_free_req(dev->epnotify, dev->notify_req);
+
+	usb_ep_fifo_flush(dev->epout);
+	usb_ep_disable(dev->epout);
+
+	usb_ep_fifo_flush(dev->epin);
+	usb_ep_disable(dev->epin);
+
+	/* cleanup work */
+	ctrl_dev->cbits_to_modem = 0;
+	queue_work(dev->wq, &dev->disconnect_work);
+}
+
+#define SDIO_OPEN_RETRY_DELAY	msecs_to_jiffies(2000)
+#define SDIO_OPEN_MAX_RETRY	90
+static void rmnet_open_sdio_work(struct work_struct *w)
+{
+	struct rmnet_dev *dev =
+		container_of(w, struct rmnet_dev, sdio_dev.open_work.work);
+	struct rmnet_sdio_dev *sdio_dev = &dev->sdio_dev;
+	struct usb_composite_dev *cdev = dev->cdev;
+	int ret;
+	static int retry_cnt;
+
+	/* Data channel for network packets */
+	ret = msm_sdio_dmux_open(rmnet_sdio_data_ch, dev,
+				rmnet_sdio_data_receive_cb,
+				rmnet_sdio_data_write_done);
+	if (ret) {
+		if (retry_cnt > SDIO_OPEN_MAX_RETRY) {
+			ERROR(cdev, "Unable to open SDIO DATA channel\n");
+			return;
+		}
+		retry_cnt++;
+		queue_delayed_work(dev->wq, &sdio_dev->open_work,
+					SDIO_OPEN_RETRY_DELAY);
+		return;
+	}
+
+
+	atomic_set(&sdio_dev->sdio_open, 1);
+	pr_info("%s: usb rmnet sdio channels are open retry_cnt:%d\n",
+				__func__, retry_cnt);
+	retry_cnt = 0;
+	return;
+}
+
+static int rmnet_set_alt(struct usb_function *f,
+			unsigned intf, unsigned alt)
+{
+	struct rmnet_dev *dev = container_of(f, struct rmnet_dev, function);
+	struct rmnet_sdio_dev *sdio_dev = &dev->sdio_dev;
+	struct usb_composite_dev *cdev = dev->cdev;
+
+	/* allocate notification */
+	dev->notify_req = rmnet_alloc_req(dev->epnotify,
+				RMNET_SDIO_MAX_NFY_SZE, GFP_ATOMIC);
+
+	if (IS_ERR(dev->notify_req))
+		return PTR_ERR(dev->notify_req);
+
+	dev->notify_req->complete = rmnet_notify_complete;
+	dev->notify_req->context = dev;
+	dev->notify_req->length = RMNET_SDIO_MAX_NFY_SZE;
+	usb_ep_enable(dev->epnotify, ep_choose(cdev->gadget,
+				&rmnet_hs_notify_desc,
+				&rmnet_fs_notify_desc));
+
+	dev->epin->driver_data = dev;
+	usb_ep_enable(dev->epin, ep_choose(cdev->gadget,
+				&rmnet_hs_in_desc,
+				&rmnet_fs_in_desc));
+	dev->epout->driver_data = dev;
+	usb_ep_enable(dev->epout, ep_choose(cdev->gadget,
+				&rmnet_hs_out_desc,
+				&rmnet_fs_out_desc));
+
+	dev->dpkts_tolaptop = 0;
+	dev->cpkts_tolaptop = 0;
+	dev->cpkts_tomdm = 0;
+	dev->dpkts_tomdm = 0;
+	dev->dpkts_tomsm = 0;
+	dev->tx_drp_cnt = 0;
+	dev->cpkts_drp_cnt = 0;
+	sdio_dev->dpkts_pending_atdmux = 0;
+	atomic_set(&dev->online, 1);
+
+	return 0;
+}
+
+static ssize_t transport_store(
+		struct device *device, struct device_attribute *attr,
+		const char *buf, size_t size)
+{
+	struct usb_function *f = dev_get_drvdata(device);
+	struct rmnet_dev *dev = container_of(f, struct rmnet_dev, function);
+	int value;
+	enum usb_rmnet_xport_type given_xport;
+	enum usb_rmnet_xport_type t;
+	struct rmnet_smd_dev *smd_dev = &dev->smd_dev;
+	struct rmnet_sdio_dev *sdio_dev = &dev->sdio_dev;
+	struct list_head *pool;
+	struct sk_buff_head *skb_pool;
+	struct sk_buff *skb;
+	struct usb_request *req;
+	unsigned long flags;
+
+	if (!atomic_read(&dev->online)) {
+		pr_err("%s: usb cable is not connected\n", __func__);
+		return -EINVAL;
+	}
+
+	sscanf(buf, "%d", &value);
+	if (value)
+		given_xport = USB_RMNET_XPORT_SDIO;
+	else
+		given_xport = USB_RMNET_XPORT_SMD;
+
+	if (given_xport == dev->xport) {
+		pr_err("%s: given_xport:%s cur_xport:%s doing nothing\n",
+				__func__, xport_to_str(given_xport),
+				xport_to_str(dev->xport));
+		return 0;
+	}
+
+	pr_debug("usb_rmnet: TransportRequested: %s\n",
+			xport_to_str(given_xport));
+
+	/* prevent any other pkts to/from usb  */
+	t = dev->xport;
+	dev->xport = USB_RMNET_XPORT_UNDEFINED;
+	if (t != USB_RMNET_XPORT_UNDEFINED) {
+		usb_ep_fifo_flush(dev->epin);
+		usb_ep_fifo_flush(dev->epout);
+	}
+
+	switch (t) {
+	case USB_RMNET_XPORT_SDIO:
+		spin_lock_irqsave(&dev->lock, flags);
+		/* tx_idle */
+
+		sdio_dev->dpkts_pending_atdmux = 0;
+
+		pool = &sdio_dev->tx_idle;
+		while (!list_empty(pool)) {
+			req = list_first_entry(pool, struct usb_request, list);
+			list_del(&req->list);
+			req->buf = NULL;
+			rmnet_free_req(dev->epout, req);
+		}
+
+		/* rx_idle */
+		pool = &sdio_dev->rx_idle;
+		/* free all usb requests in SDIO rx pool */
+		while (!list_empty(pool)) {
+			req = list_first_entry(pool, struct usb_request, list);
+			list_del(&req->list);
+			req->buf = NULL;
+			rmnet_free_req(dev->epin, req);
+		}
+
+		/* tx_skb_queue */
+		skb_pool = &sdio_dev->tx_skb_queue;
+		while ((skb = __skb_dequeue(skb_pool)))
+			dev_kfree_skb_any(skb);
+		/* rx_skb_queue */
+		skb_pool = &sdio_dev->rx_skb_queue;
+		while ((skb = __skb_dequeue(skb_pool)))
+			dev_kfree_skb_any(skb);
+
+		spin_unlock_irqrestore(&dev->lock, flags);
+		break;
+	case USB_RMNET_XPORT_SMD:
+		/* close smd xport */
+		tasklet_kill(&smd_dev->smd_data.rx_tlet);
+		tasklet_kill(&smd_dev->smd_data.tx_tlet);
+
+		spin_lock_irqsave(&dev->lock, flags);
+		/* free all usb requests in SMD tx pool */
+		pool = &smd_dev->tx_idle;
+		while (!list_empty(pool)) {
+			req = list_first_entry(pool, struct usb_request, list);
+			list_del(&req->list);
+			rmnet_free_req(dev->epout, req);
+		}
+
+		pool = &smd_dev->rx_idle;
+		/* free all usb requests in SMD rx pool */
+		while (!list_empty(pool)) {
+			req = list_first_entry(pool, struct usb_request, list);
+			list_del(&req->list);
+			rmnet_free_req(dev->epin, req);
+		}
+
+		/* free all usb requests in SMD rx queue */
+		pool = &smd_dev->rx_queue;
+		while (!list_empty(pool)) {
+			req = list_first_entry(pool, struct usb_request, list);
+			list_del(&req->list);
+			rmnet_free_req(dev->epin, req);
+		}
+
+		spin_unlock_irqrestore(&dev->lock, flags);
+		break;
+	default:
+		pr_debug("%s: undefined xport, do nothing\n", __func__);
+	}
+
+	dev->xport = given_xport;
+
+	switch (dev->xport) {
+	case USB_RMNET_XPORT_SDIO:
+		rmnet_sdio_enable(dev);
+		break;
+	case USB_RMNET_XPORT_SMD:
+		rmnet_smd_enable(dev);
+		break;
+	default:
+		/* we should never come here */
+		pr_err("%s: undefined transport\n", __func__);
+	}
+
+	return size;
+}
+static DEVICE_ATTR(transport, S_IRUGO | S_IWUSR, NULL, transport_store);
+
+static int rmnet_bind(struct usb_configuration *c, struct usb_function *f)
+{
+	struct usb_composite_dev *cdev = c->cdev;
+	struct rmnet_dev *dev = container_of(f, struct rmnet_dev, function);
+	struct rmnet_sdio_dev *sdio_dev = &dev->sdio_dev;
+	int id, ret;
+	struct usb_ep *ep;
+
+	dev->cdev = cdev;
+
+	/* allocate interface ID */
+	id = usb_interface_id(c, f);
+	if (id < 0)
+		return id;
+	dev->ifc_id = id;
+	rmnet_interface_desc.bInterfaceNumber = id;
+
+	ep = usb_ep_autoconfig(cdev->gadget, &rmnet_fs_in_desc);
+	if (!ep)
+		goto out;
+	ep->driver_data = cdev; /* claim endpoint */
+	dev->epin = ep;
+
+	ep = usb_ep_autoconfig(cdev->gadget, &rmnet_fs_out_desc);
+	if (!ep)
+		goto out;
+	ep->driver_data = cdev; /* claim endpoint */
+	dev->epout = ep;
+
+	ep = usb_ep_autoconfig(cdev->gadget, &rmnet_fs_notify_desc);
+	if (!ep)
+		goto out;
+	ep->driver_data = cdev; /* claim endpoint */
+	dev->epnotify = ep;
+
+	/* support all relevant hardware speeds... we expect that when
+	 * hardware is dual speed, all bulk-capable endpoints work at
+	 * both speeds
+	 */
+	if (gadget_is_dualspeed(c->cdev->gadget)) {
+		rmnet_hs_in_desc.bEndpointAddress =
+			rmnet_fs_in_desc.bEndpointAddress;
+		rmnet_hs_out_desc.bEndpointAddress =
+			rmnet_fs_out_desc.bEndpointAddress;
+		rmnet_hs_notify_desc.bEndpointAddress =
+			rmnet_fs_notify_desc.bEndpointAddress;
+	}
+
+	ret = device_create_file(f->dev, &dev_attr_transport);
+	if (ret)
+		goto out;
+
+	queue_delayed_work(dev->wq, &sdio_dev->open_work, 0);
+
+	return 0;
+
+out:
+	if (dev->epnotify)
+		dev->epnotify->driver_data = NULL;
+	if (dev->epout)
+		dev->epout->driver_data = NULL;
+	if (dev->epin)
+		dev->epin->driver_data = NULL;
+
+	return -ENODEV;
+}
+
+static void rmnet_smd_init(struct rmnet_smd_dev *smd_dev)
+{
+	struct rmnet_dev *dev = container_of(smd_dev,
+			struct rmnet_dev, smd_dev);
+
+	atomic_set(&smd_dev->smd_data.rx_pkt, 0);
+	tasklet_init(&smd_dev->smd_data.rx_tlet, rmnet_smd_data_rx_tlet,
+					(unsigned long) dev);
+	tasklet_init(&smd_dev->smd_data.tx_tlet, rmnet_smd_data_tx_tlet,
+					(unsigned long) dev);
+
+	init_waitqueue_head(&smd_dev->smd_data.wait);
+
+	INIT_LIST_HEAD(&smd_dev->rx_idle);
+	INIT_LIST_HEAD(&smd_dev->rx_queue);
+	INIT_LIST_HEAD(&smd_dev->tx_idle);
+}
+
+static void rmnet_sdio_init(struct rmnet_sdio_dev *sdio_dev)
+{
+	INIT_WORK(&sdio_dev->data_rx_work, rmnet_sdio_data_rx_work);
+
+	INIT_DELAYED_WORK(&sdio_dev->open_work, rmnet_open_sdio_work);
+
+	INIT_LIST_HEAD(&sdio_dev->rx_idle);
+	INIT_LIST_HEAD(&sdio_dev->tx_idle);
+	skb_queue_head_init(&sdio_dev->tx_skb_queue);
+	skb_queue_head_init(&sdio_dev->rx_skb_queue);
+}
+
+static void
+rmnet_unbind(struct usb_configuration *c, struct usb_function *f)
+{
+	struct rmnet_dev *dev = container_of(f, struct rmnet_dev, function);
+	struct rmnet_smd_dev *smd_dev = &dev->smd_dev;
+
+	smd_close(smd_dev->smd_data.ch);
+	smd_dev->smd_data.flags = 0;
+
+}
+
+#if defined(CONFIG_DEBUG_FS)
+#define DEBUG_BUF_SIZE	1024
+static ssize_t debug_read_stats(struct file *file, char __user *ubuf,
+		size_t count, loff_t *ppos)
+{
+	struct rmnet_dev *dev = file->private_data;
+	struct rmnet_sdio_dev *sdio_dev = &dev->sdio_dev;
+	struct rmnet_ctrl_dev *ctrl_dev = &dev->ctrl_dev;
+	char *debug_buf;
+	unsigned long flags;
+	int ret;
+
+	debug_buf = kmalloc(sizeof(char) * DEBUG_BUF_SIZE, GFP_KERNEL);
+	if (!debug_buf)
+		return -ENOMEM;
+
+	spin_lock_irqsave(&dev->lock, flags);
+	ret = scnprintf(debug_buf, DEBUG_BUF_SIZE,
+			"dpkts_tomsm:  %lu\n"
+			"dpkts_tomdm: %lu\n"
+			"cpkts_tomdm: %lu\n"
+			"dpkts_tolaptop: %lu\n"
+			"cpkts_tolaptop:  %lu\n"
+			"cbits_to_modem: %lu\n"
+			"tx skb size:     %u\n"
+			"rx_skb_size:     %u\n"
+			"dpkts_pending_at_dmux: %u\n"
+			"tx drp cnt: %lu\n"
+			"cpkts_drp_cnt: %lu\n"
+			"cpkt_tx_qlen: %lu\n"
+			"cpkt_rx_qlen_to_modem: %lu\n"
+			"xport: %s\n"
+			"ctr_ch_opened:	%d\n",
+			dev->dpkts_tomsm, dev->dpkts_tomdm,
+			dev->cpkts_tomdm, dev->dpkts_tolaptop,
+			dev->cpkts_tolaptop, ctrl_dev->cbits_to_modem,
+			sdio_dev->tx_skb_queue.qlen,
+			sdio_dev->rx_skb_queue.qlen,
+			sdio_dev->dpkts_pending_atdmux, dev->tx_drp_cnt,
+			dev->cpkts_drp_cnt,
+			ctrl_dev->tx_len, ctrl_dev->rx_len,
+			xport_to_str(dev->xport), ctrl_dev->opened);
+
+	spin_unlock_irqrestore(&dev->lock, flags);
+
+	ret = simple_read_from_buffer(ubuf, count, ppos, debug_buf, ret);
+
+	kfree(debug_buf);
+
+	return ret;
+}
+
+static ssize_t debug_reset_stats(struct file *file, const char __user *buf,
+				 size_t count, loff_t *ppos)
+{
+	struct rmnet_dev *dev = file->private_data;
+	struct rmnet_sdio_dev *sdio_dev = &dev->sdio_dev;
+
+	dev->dpkts_tolaptop = 0;
+	dev->cpkts_tolaptop = 0;
+	dev->cpkts_tomdm = 0;
+	dev->dpkts_tomdm = 0;
+	dev->dpkts_tomsm = 0;
+	sdio_dev->dpkts_pending_atdmux = 0;
+	dev->tx_drp_cnt = 0;
+	dev->cpkts_drp_cnt = 0;
+	return count;
+}
+
+static int debug_open(struct inode *inode, struct file *file)
+{
+	file->private_data = inode->i_private;
+
+	return 0;
+}
+
+const struct file_operations rmnet_svlte_debug_stats_ops = {
+	.open = debug_open,
+	.read = debug_read_stats,
+	.write = debug_reset_stats,
+};
+
+static void usb_debugfs_init(struct rmnet_dev *dev)
+{
+	struct dentry *dent;
+
+	dent = debugfs_create_dir("usb_rmnet", 0);
+	if (IS_ERR(dent))
+		return;
+
+	debugfs_create_file("status", 0444, dent, dev,
+			&rmnet_svlte_debug_stats_ops);
+}
+#else
+static void usb_debugfs_init(struct rmnet_dev *dev) {}
+#endif
+
+int usb_rmnet_ctrl_open(struct inode *inode, struct file *fp)
+{
+	struct rmnet_dev *dev =  _dev;
+	struct rmnet_ctrl_dev *ctrl_dev = &dev->ctrl_dev;
+	unsigned long flags;
+
+	spin_lock_irqsave(&dev->lock, flags);
+	if (ctrl_dev->opened) {
+		spin_unlock_irqrestore(&dev->lock, flags);
+		pr_err("%s: device is already opened\n", __func__);
+		return -EBUSY;
+	}
+
+	ctrl_dev->opened = 1;
+	fp->private_data = dev;
+	spin_unlock_irqrestore(&dev->lock, flags);
+
+	return 0;
+}
+
+
+int usb_rmnet_ctrl_release(struct inode *inode, struct file *fp)
+{
+	struct rmnet_dev *dev = fp->private_data;
+	struct rmnet_ctrl_dev *ctrl_dev = &dev->ctrl_dev;
+	unsigned long flags;
+
+	spin_lock_irqsave(&dev->lock, flags);
+	ctrl_dev->opened = 0;
+	fp->private_data = 0;
+	spin_unlock_irqrestore(&dev->lock, flags);
+
+	return 0;
+}
+
+ssize_t usb_rmnet_ctrl_read(struct file *fp,
+		      char __user *buf,
+		      size_t count,
+		      loff_t *ppos)
+{
+	struct rmnet_dev *dev = fp->private_data;
+	struct rmnet_ctrl_dev *ctrl_dev = &dev->ctrl_dev;
+	struct rmnet_ctrl_pkt *cpkt;
+	unsigned long flags;
+	int ret = 0;
+
+ctrl_read:
+	if (!atomic_read(&dev->online)) {
+		pr_debug("%s: USB cable not connected\n", __func__);
+		return -ENODEV;
+	}
+
+	spin_lock_irqsave(&dev->lock, flags);
+	if (list_empty(&ctrl_dev->tx_q)) {
+		spin_unlock_irqrestore(&dev->lock, flags);
+		/* Implement sleep and wakeup here */
+		ret = wait_event_interruptible(ctrl_dev->tx_wait_q,
+					!list_empty(&ctrl_dev->tx_q) ||
+					!atomic_read(&dev->online));
+		if (ret < 0)
+			return ret;
+
+		goto ctrl_read;
+	}
+
+	cpkt = list_first_entry(&ctrl_dev->tx_q, struct rmnet_ctrl_pkt, list);
+	if (cpkt->len > count) {
+		spin_unlock_irqrestore(&dev->lock, flags);
+		pr_err("%s: cpkt size:%d > buf size:%d\n",
+				__func__, cpkt->len, count);
+		return -ENOMEM;
+	}
+	list_del(&cpkt->list);
+	ctrl_dev->tx_len--;
+	spin_unlock_irqrestore(&dev->lock, flags);
+
+	count = cpkt->len;
+
+	ret = copy_to_user(buf, cpkt->buf, count);
+	dev->cpkts_tomdm++;
+
+	rmnet_free_ctrl_pkt(cpkt);
+
+	if (ret)
+		return ret;
+
+	return count;
+}
+
+ssize_t usb_rmnet_ctrl_write(struct file *fp,
+		       const char __user *buf,
+		       size_t count,
+		       loff_t *ppos)
+{
+	struct rmnet_dev *dev = fp->private_data;
+	struct rmnet_ctrl_dev *ctrl_dev = &dev->ctrl_dev;
+	struct rmnet_ctrl_pkt *cpkt;
+	unsigned long flags;
+	int ret = 0;
+
+	if (!atomic_read(&dev->online)) {
+		pr_debug("%s: USB cable not connected\n", __func__);
+		return -ENODEV;
+	}
+
+	if (!count) {
+		pr_err("%s: zero length ctrl pkt\n", __func__);
+		return -ENODEV;
+	}
+
+	if (count > MAX_CTRL_PKT_SIZE) {
+		pr_err("%s: max_pkt_size:%d given_pkt_size:%d\n",
+				__func__, MAX_CTRL_PKT_SIZE, count);
+		return -ENOMEM;
+	}
+
+	cpkt = rmnet_alloc_ctrl_pkt(count, GFP_KERNEL);
+	if (!cpkt) {
+		pr_err("%s: cannot allocate rmnet ctrl pkt\n", __func__);
+		return -ENOMEM;
+	}
+
+	ret = copy_from_user(cpkt->buf, buf, count);
+	if (ret) {
+		pr_err("%s: copy_from_user failed err:%d\n",
+				__func__, ret);
+		rmnet_free_ctrl_pkt(cpkt);
+		return ret;
+	}
+
+	spin_lock_irqsave(&dev->lock, flags);
+	ctrl_dev->rx_len++;
+	list_add(&cpkt->list, &ctrl_dev->rx_q);
+	spin_unlock_irqrestore(&dev->lock, flags);
+
+	ctrl_response_available(dev);
+
+	return count;
+}
+
+
+#define RMNET_CTRL_GET_DTR	_IOR(0xFE, 0, int)
+static long
+usb_rmnet_ctrl_ioctl(struct file *fp, unsigned c, unsigned long value)
+{
+	struct rmnet_dev *dev = fp->private_data;
+	struct rmnet_ctrl_dev *ctrl_dev = &dev->ctrl_dev;
+	unsigned long *temp = (unsigned long *)value;
+	int ret = 0;
+
+	if (c != RMNET_CTRL_GET_DTR)
+		return -ENODEV;
+
+	ret = copy_to_user(temp,
+			&ctrl_dev->cbits_to_modem,
+			sizeof(*temp));
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+static const struct file_operations rmnet_ctrl_fops = {
+	.owner		= THIS_MODULE,
+	.open		= usb_rmnet_ctrl_open,
+	.release	= usb_rmnet_ctrl_release,
+	.read		= usb_rmnet_ctrl_read,
+	.write		= usb_rmnet_ctrl_write,
+	.unlocked_ioctl	= usb_rmnet_ctrl_ioctl,
+};
+
+static struct miscdevice rmnet_ctrl_dev = {
+	.minor = MISC_DYNAMIC_MINOR,
+	.name = "rmnet_ctrl",
+	.fops = &rmnet_ctrl_fops,
+};
+
+static int rmnet_ctrl_device_init(struct rmnet_dev *dev)
+{
+	int ret;
+	struct rmnet_ctrl_dev *ctrl_dev = &dev->ctrl_dev;
+
+	INIT_LIST_HEAD(&ctrl_dev->tx_q);
+	INIT_LIST_HEAD(&ctrl_dev->rx_q);
+	init_waitqueue_head(&ctrl_dev->tx_wait_q);
+
+	ret = misc_register(&rmnet_ctrl_dev);
+	if (ret) {
+		pr_err("%s: failed to register misc device\n", __func__);
+		return ret;
+	}
+
+	return 0;
+}
+
+static int rmnet_function_add(struct usb_configuration *c)
+{
+	struct rmnet_dev *dev;
+	int ret;
+
+	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+	if (!dev)
+		return -ENOMEM;
+
+	_dev = dev;
+
+	dev->wq = create_singlethread_workqueue("k_rmnet_work");
+	if (!dev->wq) {
+		ret = -ENOMEM;
+		goto free_dev;
+	}
+
+	spin_lock_init(&dev->lock);
+	atomic_set(&dev->notify_count, 0);
+	atomic_set(&dev->online, 0);
+	INIT_WORK(&dev->disconnect_work, rmnet_disconnect_work);
+	rmnet_smd_init(&dev->smd_dev);
+	rmnet_sdio_init(&dev->sdio_dev);
+
+	ret = rmnet_ctrl_device_init(dev);
+	if (ret) {
+		pr_debug("%s: rmnet_ctrl_device_init failed, err:%d\n",
+				__func__, ret);
+		goto free_wq;
+	}
+
+	dev->function.name = "rmnet_smd_sdio";
+	dev->function.strings = rmnet_strings;
+	dev->function.descriptors = rmnet_fs_function;
+	dev->function.hs_descriptors = rmnet_hs_function;
+	dev->function.bind = rmnet_bind;
+	dev->function.unbind = rmnet_unbind;
+	dev->function.setup = rmnet_setup;
+	dev->function.set_alt = rmnet_set_alt;
+	dev->function.disable = rmnet_disable;
+	dev->function.suspend = rmnet_suspend;
+
+	ret = usb_add_function(c, &dev->function);
+	if (ret)
+		goto free_wq;
+
+	usb_debugfs_init(dev);
+
+	return 0;
+
+free_wq:
+	destroy_workqueue(dev->wq);
+free_dev:
+	kfree(dev);
+
+	return ret;
+}
+
+#ifdef CONFIG_USB_ANDROID_RMNET_SMD_SDIO
+static struct android_usb_function rmnet_function = {
+	.name = "rmnet_smd_sdio",
+	.bind_config = rmnet_function_add,
+};
+
+static int __init rmnet_init(void)
+{
+	android_register_function(&rmnet_function);
+	return 0;
+}
+module_init(rmnet_init);
+
+#endif /* CONFIG_USB_ANDROID_RMNET_SDIO */
diff --git a/drivers/usb/gadget/f_serial.c b/drivers/usb/gadget/f_serial.c
index 490b00b..0c31544 100644
--- a/drivers/usb/gadget/f_serial.c
+++ b/drivers/usb/gadget/f_serial.c
@@ -13,6 +13,8 @@
 #include <linux/slab.h>
 #include <linux/kernel.h>
 #include <linux/device.h>
+#include <linux/usb/android_composite.h>
+#include <mach/usb_gadget_fserial.h>
 
 #include "u_serial.h"
 #include "gadget_chips.h"
@@ -30,6 +32,9 @@
 struct gser_descs {
 	struct usb_endpoint_descriptor	*in;
 	struct usb_endpoint_descriptor	*out;
+#ifdef CONFIG_MODEM_SUPPORT
+	struct usb_endpoint_descriptor	*notify;
+#endif
 };
 
 struct f_gser {
@@ -39,29 +44,129 @@
 
 	struct gser_descs		fs;
 	struct gser_descs		hs;
+	u8				online;
+	enum transport_type		transport;
+
+#ifdef CONFIG_MODEM_SUPPORT
+	u8				pending;
+	spinlock_t			lock;
+	struct usb_ep			*notify;
+	struct usb_endpoint_descriptor	*notify_desc;
+	struct usb_request		*notify_req;
+
+	struct usb_cdc_line_coding	port_line_coding;
+
+	/* SetControlLineState request */
+	u16				port_handshake_bits;
+#define ACM_CTRL_RTS	(1 << 1)	/* unused with full duplex */
+#define ACM_CTRL_DTR	(1 << 0)	/* host is ready for data r/w */
+
+	/* SerialState notification */
+	u16				serial_state;
+#define ACM_CTRL_OVERRUN	(1 << 6)
+#define ACM_CTRL_PARITY		(1 << 5)
+#define ACM_CTRL_FRAMING	(1 << 4)
+#define ACM_CTRL_RI		(1 << 3)
+#define ACM_CTRL_BRK		(1 << 2)
+#define ACM_CTRL_DSR		(1 << 1)
+#define ACM_CTRL_DCD		(1 << 0)
+#endif
 };
 
+#ifdef CONFIG_USB_F_SERIAL
+static unsigned int no_tty_ports;
+static unsigned int no_sdio_ports;
+static unsigned int no_smd_ports;
+static unsigned int nr_ports;
+#endif
+
+static struct port_info {
+	enum transport_type	transport;
+	unsigned		port_num;
+	unsigned		client_port_num;
+} gserial_ports[GSERIAL_NO_PORTS];
+
+static inline bool is_transport_sdio(enum transport_type t)
+{
+	if (t == USB_GADGET_FSERIAL_TRANSPORT_SDIO)
+		return 1;
+	return 0;
+}
+
 static inline struct f_gser *func_to_gser(struct usb_function *f)
 {
 	return container_of(f, struct f_gser, port.func);
 }
 
+#ifdef CONFIG_MODEM_SUPPORT
+static inline struct f_gser *port_to_gser(struct gserial *p)
+{
+	return container_of(p, struct f_gser, port);
+}
+#define GS_LOG2_NOTIFY_INTERVAL		5	/* 1 << 5 == 32 msec */
+#define GS_NOTIFY_MAXPACKET		10	/* notification + 2 bytes */
+#endif
 /*-------------------------------------------------------------------------*/
 
 /* interface descriptor: */
 
-static struct usb_interface_descriptor gser_interface_desc __initdata = {
+static struct usb_interface_descriptor gser_interface_desc = {
 	.bLength =		USB_DT_INTERFACE_SIZE,
 	.bDescriptorType =	USB_DT_INTERFACE,
 	/* .bInterfaceNumber = DYNAMIC */
+#ifdef CONFIG_MODEM_SUPPORT
+	.bNumEndpoints =	3,
+#else
 	.bNumEndpoints =	2,
+#endif
 	.bInterfaceClass =	USB_CLASS_VENDOR_SPEC,
 	.bInterfaceSubClass =	0,
 	.bInterfaceProtocol =	0,
 	/* .iInterface = DYNAMIC */
 };
+#ifdef CONFIG_MODEM_SUPPORT
+static struct usb_cdc_header_desc gser_header_desc  = {
+	.bLength =		sizeof(gser_header_desc),
+	.bDescriptorType =	USB_DT_CS_INTERFACE,
+	.bDescriptorSubType =	USB_CDC_HEADER_TYPE,
+	.bcdCDC =		__constant_cpu_to_le16(0x0110),
+};
 
+static struct usb_cdc_call_mgmt_descriptor
+gser_call_mgmt_descriptor  = {
+	.bLength =		sizeof(gser_call_mgmt_descriptor),
+	.bDescriptorType =	USB_DT_CS_INTERFACE,
+	.bDescriptorSubType =	USB_CDC_CALL_MANAGEMENT_TYPE,
+	.bmCapabilities =	0,
+	/* .bDataInterface = DYNAMIC */
+};
+
+static struct usb_cdc_acm_descriptor gser_descriptor  = {
+	.bLength =		sizeof(gser_descriptor),
+	.bDescriptorType =	USB_DT_CS_INTERFACE,
+	.bDescriptorSubType =	USB_CDC_ACM_TYPE,
+	.bmCapabilities =	USB_CDC_CAP_LINE,
+};
+
+static struct usb_cdc_union_desc gser_union_desc  = {
+	.bLength =		sizeof(gser_union_desc),
+	.bDescriptorType =	USB_DT_CS_INTERFACE,
+	.bDescriptorSubType =	USB_CDC_UNION_TYPE,
+	/* .bMasterInterface0 =	DYNAMIC */
+	/* .bSlaveInterface0 =	DYNAMIC */
+};
+#endif
 /* full speed support: */
+#ifdef CONFIG_MODEM_SUPPORT
+static struct usb_endpoint_descriptor gser_fs_notify_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes =		USB_ENDPOINT_XFER_INT,
+	.wMaxPacketSize =	__constant_cpu_to_le16(GS_NOTIFY_MAXPACKET),
+	.bInterval =		1 << GS_LOG2_NOTIFY_INTERVAL,
+};
+#endif
 
 static struct usb_endpoint_descriptor gser_fs_in_desc __initdata = {
 	.bLength =		USB_DT_ENDPOINT_SIZE,
@@ -79,29 +184,53 @@
 
 static struct usb_descriptor_header *gser_fs_function[] __initdata = {
 	(struct usb_descriptor_header *) &gser_interface_desc,
+#ifdef CONFIG_MODEM_SUPPORT
+	(struct usb_descriptor_header *) &gser_header_desc,
+	(struct usb_descriptor_header *) &gser_call_mgmt_descriptor,
+	(struct usb_descriptor_header *) &gser_descriptor,
+	(struct usb_descriptor_header *) &gser_union_desc,
+	(struct usb_descriptor_header *) &gser_fs_notify_desc,
+#endif
 	(struct usb_descriptor_header *) &gser_fs_in_desc,
 	(struct usb_descriptor_header *) &gser_fs_out_desc,
 	NULL,
 };
 
 /* high speed support: */
+#ifdef CONFIG_MODEM_SUPPORT
+static struct usb_endpoint_descriptor gser_hs_notify_desc  = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes =		USB_ENDPOINT_XFER_INT,
+	.wMaxPacketSize =	__constant_cpu_to_le16(GS_NOTIFY_MAXPACKET),
+	.bInterval =		GS_LOG2_NOTIFY_INTERVAL+4,
+};
+#endif
 
 static struct usb_endpoint_descriptor gser_hs_in_desc __initdata = {
 	.bLength =		USB_DT_ENDPOINT_SIZE,
 	.bDescriptorType =	USB_DT_ENDPOINT,
 	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
-	.wMaxPacketSize =	cpu_to_le16(512),
+	.wMaxPacketSize =	__constant_cpu_to_le16(512),
 };
 
-static struct usb_endpoint_descriptor gser_hs_out_desc __initdata = {
+static struct usb_endpoint_descriptor gser_hs_out_desc = {
 	.bLength =		USB_DT_ENDPOINT_SIZE,
 	.bDescriptorType =	USB_DT_ENDPOINT,
 	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
-	.wMaxPacketSize =	cpu_to_le16(512),
+	.wMaxPacketSize =	__constant_cpu_to_le16(512),
 };
 
 static struct usb_descriptor_header *gser_hs_function[] __initdata = {
 	(struct usb_descriptor_header *) &gser_interface_desc,
+#ifdef CONFIG_MODEM_SUPPORT
+	(struct usb_descriptor_header *) &gser_header_desc,
+	(struct usb_descriptor_header *) &gser_call_mgmt_descriptor,
+	(struct usb_descriptor_header *) &gser_descriptor,
+	(struct usb_descriptor_header *) &gser_union_desc,
+	(struct usb_descriptor_header *) &gser_hs_notify_desc,
+#endif
 	(struct usb_descriptor_header *) &gser_hs_in_desc,
 	(struct usb_descriptor_header *) &gser_hs_out_desc,
 	NULL,
@@ -124,27 +253,232 @@
 	NULL,
 };
 
+static char *transport_to_str(enum transport_type t)
+{
+	switch (t) {
+	case USB_GADGET_FSERIAL_TRANSPORT_TTY:
+		return "TTY";
+	case USB_GADGET_FSERIAL_TRANSPORT_SDIO:
+		return "SDIO";
+	case USB_GADGET_FSERIAL_TRANSPORT_SMD:
+		return "SMD";
+	}
+
+	return "NONE";
+}
+
+#ifdef CONFIG_USB_F_SERIAL
+static int gport_setup(struct usb_configuration *c)
+{
+	int ret = 0;
+
+	pr_debug("%s: no_tty_ports:%u no_sdio_ports: %u nr_ports:%u\n",
+			__func__, no_tty_ports, no_sdio_ports, nr_ports);
+
+	if (no_tty_ports)
+		ret = gserial_setup(c->cdev->gadget, no_tty_ports);
+	if (no_sdio_ports)
+		ret = gsdio_setup(c->cdev->gadget, no_sdio_ports);
+	if (no_smd_ports)
+		ret = gsmd_setup(c->cdev->gadget, no_smd_ports);
+
+	return ret;
+}
+#endif
+static int gport_connect(struct f_gser *gser)
+{
+	unsigned port_num;
+
+	pr_debug("%s: transport:%s f_gser:%p gserial:%p port_num:%d\n",
+			__func__, transport_to_str(gser->transport),
+			gser, &gser->port, gser->port_num);
+
+	port_num = gserial_ports[gser->port_num].client_port_num;
+
+	switch (gser->transport) {
+	case USB_GADGET_FSERIAL_TRANSPORT_TTY:
+		gserial_connect(&gser->port, port_num);
+		break;
+	case USB_GADGET_FSERIAL_TRANSPORT_SDIO:
+		gsdio_connect(&gser->port, port_num);
+		break;
+	case USB_GADGET_FSERIAL_TRANSPORT_SMD:
+		gsmd_connect(&gser->port, port_num);
+		break;
+	default:
+		pr_err("%s: Un-supported transport: %s\n", __func__,
+				transport_to_str(gser->transport));
+		return -ENODEV;
+	}
+
+	return 0;
+}
+
+static int gport_disconnect(struct f_gser *gser)
+{
+	unsigned port_num;
+
+	pr_debug("%s: transport:%s f_gser:%p gserial:%p port_num:%d\n",
+			__func__, transport_to_str(gser->transport),
+			gser, &gser->port, gser->port_num);
+
+	port_num = gserial_ports[gser->port_num].client_port_num;
+
+	switch (gser->transport) {
+	case USB_GADGET_FSERIAL_TRANSPORT_TTY:
+		gserial_disconnect(&gser->port);
+		break;
+	case USB_GADGET_FSERIAL_TRANSPORT_SDIO:
+		gsdio_disconnect(&gser->port, port_num);
+		break;
+	case USB_GADGET_FSERIAL_TRANSPORT_SMD:
+		gsmd_disconnect(&gser->port, port_num);
+		break;
+	default:
+		pr_err("%s: Un-supported transport:%s\n", __func__,
+				transport_to_str(gser->transport));
+		return -ENODEV;
+	}
+
+	return 0;
+}
+
+#ifdef CONFIG_MODEM_SUPPORT
+static void gser_complete_set_line_coding(struct usb_ep *ep,
+		struct usb_request *req)
+{
+	struct f_gser            *gser = ep->driver_data;
+	struct usb_composite_dev *cdev = gser->port.func.config->cdev;
+
+	if (req->status != 0) {
+		DBG(cdev, "gser ttyGS%d completion, err %d\n",
+				gser->port_num, req->status);
+		return;
+	}
+
+	/* normal completion */
+	if (req->actual != sizeof(gser->port_line_coding)) {
+		DBG(cdev, "gser ttyGS%d short resp, len %d\n",
+				gser->port_num, req->actual);
+		usb_ep_set_halt(ep);
+	} else {
+		struct usb_cdc_line_coding	*value = req->buf;
+		gser->port_line_coding = *value;
+	}
+}
 /*-------------------------------------------------------------------------*/
 
+static int
+gser_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl)
+{
+	struct f_gser            *gser = func_to_gser(f);
+	struct usb_composite_dev *cdev = f->config->cdev;
+	struct usb_request	 *req = cdev->req;
+	int			 value = -EOPNOTSUPP;
+	u16			 w_index = le16_to_cpu(ctrl->wIndex);
+	u16			 w_value = le16_to_cpu(ctrl->wValue);
+	u16			 w_length = le16_to_cpu(ctrl->wLength);
+
+	switch ((ctrl->bRequestType << 8) | ctrl->bRequest) {
+
+	/* SET_LINE_CODING ... just read and save what the host sends */
+	case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+			| USB_CDC_REQ_SET_LINE_CODING:
+		if (w_length != sizeof(struct usb_cdc_line_coding))
+			goto invalid;
+
+		value = w_length;
+		cdev->gadget->ep0->driver_data = gser;
+		req->complete = gser_complete_set_line_coding;
+		break;
+
+	/* GET_LINE_CODING ... return what host sent, or initial value */
+	case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+			| USB_CDC_REQ_GET_LINE_CODING:
+		value = min_t(unsigned, w_length,
+				sizeof(struct usb_cdc_line_coding));
+		memcpy(req->buf, &gser->port_line_coding, value);
+		break;
+
+	/* SET_CONTROL_LINE_STATE ... save what the host sent */
+	case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+			| USB_CDC_REQ_SET_CONTROL_LINE_STATE:
+
+		value = 0;
+		gser->port_handshake_bits = w_value;
+		if (gser->port.notify_modem) {
+			unsigned port_num =
+				gserial_ports[gser->port_num].client_port_num;
+
+			gser->port.notify_modem(&gser->port,
+					port_num, w_value);
+		}
+		break;
+
+	default:
+invalid:
+		DBG(cdev, "invalid control req%02x.%02x v%04x i%04x l%d\n",
+			ctrl->bRequestType, ctrl->bRequest,
+			w_value, w_index, w_length);
+	}
+
+	/* respond with data transfer or status phase? */
+	if (value >= 0) {
+		DBG(cdev, "gser ttyGS%d req%02x.%02x v%04x i%04x l%d\n",
+			gser->port_num, ctrl->bRequestType, ctrl->bRequest,
+			w_value, w_index, w_length);
+		req->zero = 0;
+		req->length = value;
+		value = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC);
+		if (value < 0)
+			ERROR(cdev, "gser response on ttyGS%d, err %d\n",
+					gser->port_num, value);
+	}
+
+	/* device either stalls (value < 0) or reports success */
+	return value;
+}
+#endif
 static int gser_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
 {
 	struct f_gser		*gser = func_to_gser(f);
 	struct usb_composite_dev *cdev = f->config->cdev;
+	int rc = 0;
 
 	/* we know alt == 0, so this is an activation or a reset */
 
-	if (gser->port.in->driver_data) {
-		DBG(cdev, "reset generic ttyGS%d\n", gser->port_num);
-		gserial_disconnect(&gser->port);
-	} else {
-		DBG(cdev, "activate generic ttyGS%d\n", gser->port_num);
-		gser->port.in_desc = ep_choose(cdev->gadget,
-				gser->hs.in, gser->fs.in);
-		gser->port.out_desc = ep_choose(cdev->gadget,
-				gser->hs.out, gser->fs.out);
+#ifdef CONFIG_MODEM_SUPPORT
+	if (gser->notify->driver_data) {
+		DBG(cdev, "reset generic ctl ttyGS%d\n", gser->port_num);
+		usb_ep_disable(gser->notify);
 	}
-	gserial_connect(&gser->port, gser->port_num);
-	return 0;
+	gser->notify_desc = ep_choose(cdev->gadget,
+			gser->hs.notify,
+			gser->fs.notify);
+	rc = usb_ep_enable(gser->notify, gser->notify_desc);
+	if (rc) {
+		ERROR(cdev, "can't enable %s, result %d\n",
+					gser->notify->name, rc);
+		return rc;
+	}
+	gser->notify->driver_data = gser;
+#endif
+
+	if (gser->port.in->driver_data) {
+		DBG(cdev, "reset generic data ttyGS%d\n", gser->port_num);
+		gport_disconnect(gser);
+	} else {
+		DBG(cdev, "activate generic data ttyGS%d\n", gser->port_num);
+	}
+	gser->port.in_desc = ep_choose(cdev->gadget,
+			gser->hs.in, gser->fs.in);
+	gser->port.out_desc = ep_choose(cdev->gadget,
+			gser->hs.out, gser->fs.out);
+
+	gport_connect(gser);
+
+	gser->online = 1;
+	return rc;
 }
 
 static void gser_disable(struct usb_function *f)
@@ -153,9 +487,180 @@
 	struct usb_composite_dev *cdev = f->config->cdev;
 
 	DBG(cdev, "generic ttyGS%d deactivated\n", gser->port_num);
-	gserial_disconnect(&gser->port);
+
+	gport_disconnect(gser);
+
+#ifdef CONFIG_MODEM_SUPPORT
+	usb_ep_fifo_flush(gser->notify);
+	usb_ep_disable(gser->notify);
+#endif
+	gser->online = 0;
+}
+#ifdef CONFIG_MODEM_SUPPORT
+static int gser_notify(struct f_gser *gser, u8 type, u16 value,
+		void *data, unsigned length)
+{
+	struct usb_ep			*ep = gser->notify;
+	struct usb_request		*req;
+	struct usb_cdc_notification	*notify;
+	const unsigned			len = sizeof(*notify) + length;
+	void				*buf;
+	int				status;
+	struct usb_composite_dev *cdev = gser->port.func.config->cdev;
+
+	req = gser->notify_req;
+	gser->notify_req = NULL;
+	gser->pending = false;
+
+	req->length = len;
+	notify = req->buf;
+	buf = notify + 1;
+
+	notify->bmRequestType = USB_DIR_IN | USB_TYPE_CLASS
+			| USB_RECIP_INTERFACE;
+	notify->bNotificationType = type;
+	notify->wValue = cpu_to_le16(value);
+	notify->wIndex = cpu_to_le16(gser->data_id);
+	notify->wLength = cpu_to_le16(length);
+	memcpy(buf, data, length);
+
+	status = usb_ep_queue(ep, req, GFP_ATOMIC);
+	if (status < 0) {
+		ERROR(cdev, "gser ttyGS%d can't notify serial state, %d\n",
+				gser->port_num, status);
+		gser->notify_req = req;
+	}
+
+	return status;
 }
 
+static int gser_notify_serial_state(struct f_gser *gser)
+{
+	int			 status;
+	unsigned long flags;
+	struct usb_composite_dev *cdev = gser->port.func.config->cdev;
+
+	spin_lock_irqsave(&gser->lock, flags);
+	if (gser->notify_req) {
+		DBG(cdev, "gser ttyGS%d serial state %04x\n",
+				gser->port_num, gser->serial_state);
+		status = gser_notify(gser, USB_CDC_NOTIFY_SERIAL_STATE,
+				0, &gser->serial_state,
+					sizeof(gser->serial_state));
+	} else {
+		gser->pending = true;
+		status = 0;
+	}
+	spin_unlock_irqrestore(&gser->lock, flags);
+	return status;
+}
+
+static void gser_notify_complete(struct usb_ep *ep, struct usb_request *req)
+{
+	struct f_gser *gser = req->context;
+	u8	      doit = false;
+	unsigned long flags;
+
+	/* on this call path we do NOT hold the port spinlock,
+	 * which is why ACM needs its own spinlock
+	 */
+	spin_lock_irqsave(&gser->lock, flags);
+	if (req->status != -ESHUTDOWN)
+		doit = gser->pending;
+	gser->notify_req = req;
+	spin_unlock_irqrestore(&gser->lock, flags);
+
+	if (doit && gser->online)
+		gser_notify_serial_state(gser);
+}
+static void gser_connect(struct gserial *port)
+{
+	struct f_gser *gser = port_to_gser(port);
+
+	gser->serial_state |= ACM_CTRL_DSR | ACM_CTRL_DCD;
+	gser_notify_serial_state(gser);
+}
+
+unsigned int gser_get_dtr(struct gserial *port)
+{
+	struct f_gser *gser = port_to_gser(port);
+
+	if (gser->port_handshake_bits & ACM_CTRL_DTR)
+		return 1;
+	else
+		return 0;
+}
+
+unsigned int gser_get_rts(struct gserial *port)
+{
+	struct f_gser *gser = port_to_gser(port);
+
+	if (gser->port_handshake_bits & ACM_CTRL_RTS)
+		return 1;
+	else
+		return 0;
+}
+
+unsigned int gser_send_carrier_detect(struct gserial *port, unsigned int yes)
+{
+	struct f_gser *gser = port_to_gser(port);
+	u16			state;
+
+	state = gser->serial_state;
+	state &= ~ACM_CTRL_DCD;
+	if (yes)
+		state |= ACM_CTRL_DCD;
+
+	gser->serial_state = state;
+	return gser_notify_serial_state(gser);
+
+}
+
+unsigned int gser_send_ring_indicator(struct gserial *port, unsigned int yes)
+{
+	struct f_gser *gser = port_to_gser(port);
+	u16			state;
+
+	state = gser->serial_state;
+	state &= ~ACM_CTRL_RI;
+	if (yes)
+		state |= ACM_CTRL_RI;
+
+	gser->serial_state = state;
+	return gser_notify_serial_state(gser);
+
+}
+static void gser_disconnect(struct gserial *port)
+{
+	struct f_gser *gser = port_to_gser(port);
+
+	gser->serial_state &= ~(ACM_CTRL_DSR | ACM_CTRL_DCD);
+	gser_notify_serial_state(gser);
+}
+
+static int gser_send_break(struct gserial *port, int duration)
+{
+	struct f_gser *gser = port_to_gser(port);
+	u16			state;
+
+	state = gser->serial_state;
+	state &= ~ACM_CTRL_BRK;
+	if (duration)
+		state |= ACM_CTRL_BRK;
+
+	gser->serial_state = state;
+	return gser_notify_serial_state(gser);
+}
+
+static int gser_send_modem_ctrl_bits(struct gserial *port, int ctrl_bits)
+{
+	struct f_gser *gser = port_to_gser(port);
+
+	gser->serial_state = ctrl_bits;
+
+	return gser_notify_serial_state(gser);
+}
+#endif
 /*-------------------------------------------------------------------------*/
 
 /* serial function driver setup/binding */
@@ -190,6 +695,23 @@
 	gser->port.out = ep;
 	ep->driver_data = cdev;	/* claim */
 
+#ifdef CONFIG_MODEM_SUPPORT
+	ep = usb_ep_autoconfig(cdev->gadget, &gser_fs_notify_desc);
+	if (!ep)
+		goto fail;
+	gser->notify = ep;
+	ep->driver_data = cdev;	/* claim */
+	/* allocate notification */
+	gser->notify_req = gs_alloc_req(ep,
+			sizeof(struct usb_cdc_notification) + 2,
+			GFP_KERNEL);
+	if (!gser->notify_req)
+		goto fail;
+
+	gser->notify_req->complete = gser_notify_complete;
+	gser->notify_req->context = gser;
+#endif
+
 	/* copy descriptors, and track endpoint copies */
 	f->descriptors = usb_copy_descriptors(gser_fs_function);
 
@@ -197,6 +719,10 @@
 			f->descriptors, &gser_fs_in_desc);
 	gser->fs.out = usb_find_endpoint(gser_fs_function,
 			f->descriptors, &gser_fs_out_desc);
+#ifdef CONFIG_MODEM_SUPPORT
+	gser->fs.notify = usb_find_endpoint(gser_fs_function,
+			f->descriptors, &gser_fs_notify_desc);
+#endif
 
 
 	/* support all relevant hardware speeds... we expect that when
@@ -208,6 +734,10 @@
 				gser_fs_in_desc.bEndpointAddress;
 		gser_hs_out_desc.bEndpointAddress =
 				gser_fs_out_desc.bEndpointAddress;
+#ifdef CONFIG_MODEM_SUPPORT
+		gser_hs_notify_desc.bEndpointAddress =
+				gser_fs_notify_desc.bEndpointAddress;
+#endif
 
 		/* copy descriptors, and track endpoint copies */
 		f->hs_descriptors = usb_copy_descriptors(gser_hs_function);
@@ -216,6 +746,10 @@
 				f->hs_descriptors, &gser_hs_in_desc);
 		gser->hs.out = usb_find_endpoint(gser_hs_function,
 				f->hs_descriptors, &gser_hs_out_desc);
+#ifdef CONFIG_MODEM_SUPPORT
+		gser->hs.notify = usb_find_endpoint(gser_hs_function,
+				f->hs_descriptors, &gser_hs_notify_desc);
+#endif
 	}
 
 	DBG(cdev, "generic ttyGS%d: %s speed IN/%s OUT/%s\n",
@@ -225,6 +759,14 @@
 	return 0;
 
 fail:
+#ifdef CONFIG_MODEM_SUPPORT
+	if (gser->notify_req)
+		gs_free_req(gser->notify, gser->notify_req);
+
+	/* we might as well release our claims on endpoints */
+	if (gser->notify)
+		gser->notify->driver_data = NULL;
+#endif
 	/* we might as well release our claims on endpoints */
 	if (gser->port.out)
 		gser->port.out->driver_data = NULL;
@@ -239,9 +781,15 @@
 static void
 gser_unbind(struct usb_configuration *c, struct usb_function *f)
 {
+#ifdef CONFIG_MODEM_SUPPORT
+	struct f_gser *gser = func_to_gser(f);
+#endif
 	if (gadget_is_dualspeed(c->cdev->gadget))
 		usb_free_descriptors(f->hs_descriptors);
 	usb_free_descriptors(f->descriptors);
+#ifdef CONFIG_MODEM_SUPPORT
+	gs_free_req(gser->notify, gser->notify_req);
+#endif
 	kfree(func_to_gser(f));
 }
 
@@ -279,6 +827,9 @@
 	if (!gser)
 		return -ENOMEM;
 
+#ifdef CONFIG_MODEM_SUPPORT
+	spin_lock_init(&gser->lock);
+#endif
 	gser->port_num = port_num;
 
 	gser->port.func.name = "gser";
@@ -287,9 +838,130 @@
 	gser->port.func.unbind = gser_unbind;
 	gser->port.func.set_alt = gser_set_alt;
 	gser->port.func.disable = gser_disable;
+	gser->transport		= gserial_ports[port_num].transport;
+#ifdef CONFIG_MODEM_SUPPORT
+	/* We support only two ports for now */
+	if (port_num == 0)
+		gser->port.func.name = "modem";
+	else
+		gser->port.func.name = "nmea";
+	gser->port.func.setup = gser_setup;
+	gser->port.connect = gser_connect;
+	gser->port.get_dtr = gser_get_dtr;
+	gser->port.get_rts = gser_get_rts;
+	gser->port.send_carrier_detect = gser_send_carrier_detect;
+	gser->port.send_ring_indicator = gser_send_ring_indicator;
+	gser->port.send_modem_ctrl_bits = gser_send_modem_ctrl_bits;
+	gser->port.disconnect = gser_disconnect;
+	gser->port.send_break = gser_send_break;
+#endif
 
 	status = usb_add_function(c, &gser->port.func);
 	if (status)
 		kfree(gser);
 	return status;
 }
+
+#ifdef CONFIG_USB_F_SERIAL
+
+int fserial_nmea_bind_config(struct usb_configuration *c)
+{
+	return gser_bind_config(c, 1);
+}
+
+static struct android_usb_function nmea_function = {
+	.name = "nmea",
+	.bind_config = fserial_nmea_bind_config,
+};
+
+int fserial_modem_bind_config(struct usb_configuration *c)
+{
+	int ret;
+
+	/* See if composite driver can allocate
+	 * serial ports. But for now allocate
+	 * two ports for modem and nmea.
+	 */
+	ret = gport_setup(c);
+
+	if (ret)
+		return ret;
+	return gser_bind_config(c, 0);
+}
+
+static struct android_usb_function modem_function = {
+	.name = "modem",
+	.bind_config = fserial_modem_bind_config,
+};
+
+static int fserial_remove(struct platform_device *dev)
+{
+	gserial_cleanup();
+
+	return 0;
+}
+
+static struct platform_driver usb_fserial = {
+	.remove		= fserial_remove,
+	.driver = {
+		.name = "usb_fserial",
+		.owner = THIS_MODULE,
+	},
+};
+
+static int __init fserial_probe(struct platform_device *pdev)
+{
+	struct usb_gadget_fserial_platform_data	*pdata =
+					pdev->dev.platform_data;
+	int i;
+
+	dev_dbg(&pdev->dev, "%s: probe\n", __func__);
+
+	if (!pdata)
+		goto probe_android_register;
+
+	for (i = 0; i < GSERIAL_NO_PORTS; i++) {
+		gserial_ports[i].transport = pdata->transport[i];
+		gserial_ports[i].port_num = i;
+
+		switch (gserial_ports[i].transport) {
+		case USB_GADGET_FSERIAL_TRANSPORT_TTY:
+			gserial_ports[i].client_port_num = no_tty_ports;
+			no_tty_ports++;
+			break;
+		case USB_GADGET_FSERIAL_TRANSPORT_SDIO:
+			gserial_ports[i].client_port_num = no_sdio_ports;
+			no_sdio_ports++;
+			break;
+		case USB_GADGET_FSERIAL_TRANSPORT_SMD:
+			gserial_ports[i].client_port_num = no_smd_ports;
+			no_smd_ports++;
+			break;
+		default:
+			pr_err("%s: Un-supported transport transport: %u\n",
+					__func__, gserial_ports[i].transport);
+			return -ENODEV;
+		}
+
+		nr_ports++;
+	}
+
+	pr_info("%s:gport:tty_ports:%u sdio_ports:%u "
+			"smd_ports:%u nr_ports:%u\n",
+			__func__, no_tty_ports, no_sdio_ports,
+			no_smd_ports, nr_ports);
+
+probe_android_register:
+	android_register_function(&modem_function);
+	android_register_function(&nmea_function);
+
+	return 0;
+}
+
+static int __init fserial_init(void)
+{
+	return platform_driver_probe(&usb_fserial, fserial_probe);
+}
+module_init(fserial_init);
+
+#endif /* CONFIG_USB_ANDROID_ACM */
diff --git a/drivers/usb/gadget/gadget_chips.h b/drivers/usb/gadget/gadget_chips.h
index bcdac7c..05692bb 100644
--- a/drivers/usb/gadget/gadget_chips.h
+++ b/drivers/usb/gadget/gadget_chips.h
@@ -120,6 +120,12 @@
 #define gadget_is_ci13xxx_pci(g)	0
 #endif
 
+#ifdef CONFIG_USB_GADGET_MSM_72K
+#define	gadget_is_msm72k(g)	!strcmp("msm72k_udc", (g)->name)
+#else
+#define	gadget_is_msm72k(g)	0
+#endif
+
 // CONFIG_USB_GADGET_SX2
 // CONFIG_USB_GADGET_AU1X00
 // ...
@@ -223,6 +229,8 @@
 		return 0x29;
 	else if (gadget_is_s3c_hsudc(gadget))
 		return 0x30;
+	else if (gadget_is_msm72k(gadget))
+		return 0x31;
 
 	return -ENOENT;
 }
diff --git a/drivers/usb/gadget/msm72k_udc.c b/drivers/usb/gadget/msm72k_udc.c
new file mode 100644
index 0000000..24ba619
--- /dev/null
+++ b/drivers/usb/gadget/msm72k_udc.c
@@ -0,0 +1,2653 @@
+/*
+ * Driver for HighSpeed USB Client Controller in MSM7K
+ *
+ * Copyright (C) 2008 Google, Inc.
+ * Copyright (c) 2009-2011, Code Aurora Forum. All rights reserved.
+ * Author: Mike Lockwood <lockwood@android.com>
+ *         Brian Swetland <swetland@google.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+
+#include <linux/delay.h>
+#include <linux/timer.h>
+#include <linux/interrupt.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/platform_device.h>
+#include <linux/debugfs.h>
+#include <linux/workqueue.h>
+#include <linux/switch.h>
+#include <linux/pm_runtime.h>
+
+#include <mach/msm72k_otg.h>
+#include <linux/io.h>
+
+#include <asm/mach-types.h>
+
+#include <mach/board.h>
+#include <mach/msm_hsusb.h>
+#include <linux/device.h>
+#include <mach/msm_hsusb_hw.h>
+#include <mach/clk.h>
+#include <linux/uaccess.h>
+#include <linux/wakelock.h>
+
+static const char driver_name[] = "msm72k_udc";
+
+/* #define DEBUG */
+/* #define VERBOSE */
+
+#define MSM_USB_BASE ((unsigned) ui->addr)
+
+#define	DRIVER_DESC		"MSM 72K USB Peripheral Controller"
+#define	DRIVER_NAME		"MSM72K_UDC"
+
+#define EPT_FLAG_IN        0x0001
+
+#define SETUP_BUF_SIZE     8
+
+
+static const char *const ep_name[] = {
+	"ep0out", "ep1out", "ep2out", "ep3out",
+	"ep4out", "ep5out", "ep6out", "ep7out",
+	"ep8out", "ep9out", "ep10out", "ep11out",
+	"ep12out", "ep13out", "ep14out", "ep15out",
+	"ep0in", "ep1in", "ep2in", "ep3in",
+	"ep4in", "ep5in", "ep6in", "ep7in",
+	"ep8in", "ep9in", "ep10in", "ep11in",
+	"ep12in", "ep13in", "ep14in", "ep15in"
+};
+
+/*To release the wakelock from debugfs*/
+static int release_wlocks;
+
+struct msm_request {
+	struct usb_request req;
+
+	/* saved copy of req.complete */
+	void	(*gadget_complete)(struct usb_ep *ep,
+					struct usb_request *req);
+
+
+	struct usb_info *ui;
+	struct msm_request *next;
+	struct msm_request *prev;
+
+	unsigned busy:1;
+	unsigned live:1;
+	unsigned alloced:1;
+
+	dma_addr_t dma;
+	dma_addr_t item_dma;
+
+	struct ept_queue_item *item;
+};
+
+#define to_msm_request(r) container_of(r, struct msm_request, req)
+#define to_msm_endpoint(r) container_of(r, struct msm_endpoint, ep)
+#define to_msm_otg(xceiv)  container_of(xceiv, struct msm_otg, otg)
+#define is_b_sess_vld()	((OTGSC_BSV & readl(USB_OTGSC)) ? 1 : 0)
+#define is_usb_online(ui) (ui->usb_state != USB_STATE_NOTATTACHED)
+
+struct msm_endpoint {
+	struct usb_ep ep;
+	struct usb_info *ui;
+	struct msm_request *req; /* head of pending requests */
+	struct msm_request *last;
+	unsigned flags;
+
+	/* bit number (0-31) in various status registers
+	** as well as the index into the usb_info's array
+	** of all endpoints
+	*/
+	unsigned char bit;
+	unsigned char num;
+
+	unsigned wedged:1;
+	/* pointers to DMA transfer list area */
+	/* these are allocated from the usb_info dma space */
+	struct ept_queue_head *head;
+};
+
+/* PHY status check timer to monitor phy stuck up on reset */
+static struct timer_list phy_status_timer;
+
+static void usb_do_work(struct work_struct *w);
+static void usb_do_remote_wakeup(struct work_struct *w);
+
+
+#define USB_STATE_IDLE    0
+#define USB_STATE_ONLINE  1
+#define USB_STATE_OFFLINE 2
+
+#define USB_FLAG_START          0x0001
+#define USB_FLAG_VBUS_ONLINE    0x0002
+#define USB_FLAG_VBUS_OFFLINE   0x0004
+#define USB_FLAG_RESET          0x0008
+#define USB_FLAG_SUSPEND        0x0010
+#define USB_FLAG_CONFIGURED     0x0020
+
+#define USB_CHG_DET_DELAY	msecs_to_jiffies(1000)
+#define REMOTE_WAKEUP_DELAY	msecs_to_jiffies(1000)
+#define PHY_STATUS_CHECK_DELAY	(jiffies + msecs_to_jiffies(1000))
+
+struct usb_info {
+	/* lock for register/queue/device state changes */
+	spinlock_t lock;
+
+	/* single request used for handling setup transactions */
+	struct usb_request *setup_req;
+
+	struct platform_device *pdev;
+	int irq;
+	void *addr;
+
+	unsigned state;
+	unsigned flags;
+
+	atomic_t configured;
+	atomic_t running;
+
+	struct dma_pool *pool;
+
+	/* dma page to back the queue heads and items */
+	unsigned char *buf;
+	dma_addr_t dma;
+
+	struct ept_queue_head *head;
+
+	/* used for allocation */
+	unsigned next_item;
+	unsigned next_ifc_num;
+
+	/* endpoints are ordered based on their status bits,
+	** so they are OUT0, OUT1, ... OUT15, IN0, IN1, ... IN15
+	*/
+	struct msm_endpoint ept[32];
+
+
+	/* max power requested by selected configuration */
+	unsigned b_max_pow;
+	unsigned chg_current;
+	struct delayed_work chg_det;
+	struct delayed_work chg_stop;
+	struct msm_hsusb_gadget_platform_data *pdata;
+	struct work_struct phy_status_check;
+
+	struct work_struct work;
+	unsigned phy_status;
+	unsigned phy_fail_count;
+
+	struct usb_gadget		gadget;
+	struct usb_gadget_driver	*driver;
+	struct switch_dev sdev;
+
+#define ep0out ept[0]
+#define ep0in  ept[16]
+
+	atomic_t ep0_dir;
+	atomic_t test_mode;
+	atomic_t offline_pending;
+	atomic_t softconnect;
+#ifdef CONFIG_USB_OTG
+	u8 hnp_avail;
+#endif
+
+	atomic_t remote_wakeup;
+	atomic_t self_powered;
+	struct delayed_work rw_work;
+
+	struct otg_transceiver *xceiv;
+	enum usb_device_state usb_state;
+	struct wake_lock	wlock;
+};
+
+static const struct usb_ep_ops msm72k_ep_ops;
+static struct usb_info *the_usb_info;
+
+static int msm72k_wakeup(struct usb_gadget *_gadget);
+static int msm72k_pullup_internal(struct usb_gadget *_gadget, int is_active);
+static int msm72k_set_halt(struct usb_ep *_ep, int value);
+static void flush_endpoint(struct msm_endpoint *ept);
+static void usb_reset(struct usb_info *ui);
+static int usb_ept_set_halt(struct usb_ep *_ep, int value);
+
+static void msm_hsusb_set_speed(struct usb_info *ui)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&ui->lock, flags);
+	switch (readl(USB_PORTSC) & PORTSC_PSPD_MASK) {
+	case PORTSC_PSPD_FS:
+		dev_dbg(&ui->pdev->dev, "portchange USB_SPEED_FULL\n");
+		ui->gadget.speed = USB_SPEED_FULL;
+		break;
+	case PORTSC_PSPD_LS:
+		dev_dbg(&ui->pdev->dev, "portchange USB_SPEED_LOW\n");
+		ui->gadget.speed = USB_SPEED_LOW;
+		break;
+	case PORTSC_PSPD_HS:
+		dev_dbg(&ui->pdev->dev, "portchange USB_SPEED_HIGH\n");
+		ui->gadget.speed = USB_SPEED_HIGH;
+		break;
+	}
+	spin_unlock_irqrestore(&ui->lock, flags);
+}
+
+static void msm_hsusb_set_state(enum usb_device_state state)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&the_usb_info->lock, flags);
+	the_usb_info->usb_state = state;
+	spin_unlock_irqrestore(&the_usb_info->lock, flags);
+}
+
+static enum usb_device_state msm_hsusb_get_state(void)
+{
+	unsigned long flags;
+	enum usb_device_state state;
+
+	spin_lock_irqsave(&the_usb_info->lock, flags);
+	state = the_usb_info->usb_state;
+	spin_unlock_irqrestore(&the_usb_info->lock, flags);
+
+	return state;
+}
+
+static ssize_t print_switch_name(struct switch_dev *sdev, char *buf)
+{
+	return sprintf(buf, "%s\n", DRIVER_NAME);
+}
+
+static ssize_t print_switch_state(struct switch_dev *sdev, char *buf)
+{
+	return sprintf(buf, "%s\n", sdev->state ? "online" : "offline");
+}
+
+static inline enum chg_type usb_get_chg_type(struct usb_info *ui)
+{
+	if ((readl(USB_PORTSC) & PORTSC_LS) == PORTSC_LS)
+		return USB_CHG_TYPE__WALLCHARGER;
+	else
+		return USB_CHG_TYPE__SDP;
+}
+
+#define USB_WALLCHARGER_CHG_CURRENT 1800
+static int usb_get_max_power(struct usb_info *ui)
+{
+	struct msm_otg *otg = to_msm_otg(ui->xceiv);
+	unsigned long flags;
+	enum chg_type temp;
+	int suspended;
+	int configured;
+	unsigned bmaxpow;
+
+	if (ui->gadget.is_a_peripheral)
+		return -EINVAL;
+
+	temp = atomic_read(&otg->chg_type);
+	spin_lock_irqsave(&ui->lock, flags);
+	suspended = ui->usb_state == USB_STATE_SUSPENDED ? 1 : 0;
+	configured = atomic_read(&ui->configured);
+	bmaxpow = ui->b_max_pow;
+	spin_unlock_irqrestore(&ui->lock, flags);
+
+	if (temp == USB_CHG_TYPE__INVALID)
+		return -ENODEV;
+
+	if (temp == USB_CHG_TYPE__WALLCHARGER)
+		return USB_WALLCHARGER_CHG_CURRENT;
+
+	if (suspended || !configured)
+		return 0;
+
+	return bmaxpow;
+}
+
+static int usb_phy_stuck_check(struct usb_info *ui)
+{
+	/*
+	 * write some value (0xAA) into scratch reg (0x16) and read it back,
+	 * If the read value is same as written value, means PHY is normal
+	 * otherwise, PHY seems to have stuck.
+	 */
+
+	if (otg_io_write(ui->xceiv, 0xAA, 0x16) == -1) {
+		dev_dbg(&ui->pdev->dev,
+				"%s(): ulpi write timeout\n", __func__);
+		return -EIO;
+	}
+
+	if (otg_io_read(ui->xceiv, 0x16) != 0xAA) {
+		dev_dbg(&ui->pdev->dev,
+				"%s(): read value is incorrect\n", __func__);
+		return -EIO;
+	}
+
+	return 0;
+}
+
+/*
+ * This function checks the phy status by reading/writing to the
+ * phy scratch register. If the phy is stuck resets the HW
+ * */
+static void usb_phy_stuck_recover(struct work_struct *w)
+{
+	struct usb_info *ui = the_usb_info;
+	struct msm_otg *otg = to_msm_otg(ui->xceiv);
+	unsigned long flags;
+
+	spin_lock_irqsave(&ui->lock, flags);
+	if (ui->gadget.speed != USB_SPEED_UNKNOWN ||
+			ui->usb_state == USB_STATE_NOTATTACHED ||
+			ui->driver == NULL) {
+		spin_unlock_irqrestore(&ui->lock, flags);
+		return;
+	}
+	spin_unlock_irqrestore(&ui->lock, flags);
+
+	disable_irq(otg->irq);
+	if (usb_phy_stuck_check(ui)) {
+#ifdef CONFIG_USB_MSM_ACA
+		del_timer_sync(&otg->id_timer);
+#endif
+		ui->phy_fail_count++;
+		dev_err(&ui->pdev->dev,
+				"%s():PHY stuck, resetting HW\n", __func__);
+		/*
+		 * PHY seems to have stuck,
+		 * reset the PHY and HW link to recover the PHY
+		 */
+		usb_reset(ui);
+#ifdef CONFIG_USB_MSM_ACA
+		mod_timer(&otg->id_timer, jiffies +
+				 msecs_to_jiffies(OTG_ID_POLL_MS));
+#endif
+		msm72k_pullup_internal(&ui->gadget, 1);
+	}
+	enable_irq(otg->irq);
+}
+
+static void usb_phy_status_check_timer(unsigned long data)
+{
+	struct usb_info *ui = the_usb_info;
+
+	schedule_work(&ui->phy_status_check);
+}
+
+static void usb_chg_stop(struct work_struct *w)
+{
+	struct usb_info *ui = container_of(w, struct usb_info, chg_stop.work);
+	struct msm_otg *otg = to_msm_otg(ui->xceiv);
+	enum chg_type temp;
+
+	temp = atomic_read(&otg->chg_type);
+
+	if (temp == USB_CHG_TYPE__SDP)
+		otg_set_power(ui->xceiv, 0);
+}
+
+static void usb_chg_detect(struct work_struct *w)
+{
+	struct usb_info *ui = container_of(w, struct usb_info, chg_det.work);
+	struct msm_otg *otg = to_msm_otg(ui->xceiv);
+	enum chg_type temp = USB_CHG_TYPE__INVALID;
+	unsigned long flags;
+	int maxpower;
+
+	spin_lock_irqsave(&ui->lock, flags);
+	if (ui->usb_state == USB_STATE_NOTATTACHED) {
+		spin_unlock_irqrestore(&ui->lock, flags);
+		return;
+	}
+
+	temp = usb_get_chg_type(ui);
+	spin_unlock_irqrestore(&ui->lock, flags);
+
+	atomic_set(&otg->chg_type, temp);
+	maxpower = usb_get_max_power(ui);
+	if (maxpower > 0)
+		otg_set_power(ui->xceiv, maxpower);
+
+	/* USB driver prevents idle and suspend power collapse(pc)
+	 * while USB cable is connected. But when dedicated charger is
+	 * connected, driver can vote for idle and suspend pc.
+	 * OTG driver handles idle pc as part of above otg_set_power call
+	 * when wallcharger is attached. To allow suspend pc, release the
+	 * wakelock which will be re-acquired for any sub-sequent usb interrupts
+	 * */
+	if (temp == USB_CHG_TYPE__WALLCHARGER) {
+		pm_runtime_put_sync(&ui->pdev->dev);
+		wake_unlock(&ui->wlock);
+	}
+}
+
+static int usb_ep_get_stall(struct msm_endpoint *ept)
+{
+	unsigned int n;
+	struct usb_info *ui = ept->ui;
+
+	n = readl(USB_ENDPTCTRL(ept->num));
+	if (ept->flags & EPT_FLAG_IN)
+		return (CTRL_TXS & n) ? 1 : 0;
+	else
+		return (CTRL_RXS & n) ? 1 : 0;
+}
+
+static void init_endpoints(struct usb_info *ui)
+{
+	unsigned n;
+
+	for (n = 0; n < 32; n++) {
+		struct msm_endpoint *ept = ui->ept + n;
+
+		ept->ui = ui;
+		ept->bit = n;
+		ept->num = n & 15;
+		ept->ep.name = ep_name[n];
+		ept->ep.ops = &msm72k_ep_ops;
+
+		if (ept->bit > 15) {
+			/* IN endpoint */
+			ept->head = ui->head + (ept->num << 1) + 1;
+			ept->flags = EPT_FLAG_IN;
+		} else {
+			/* OUT endpoint */
+			ept->head = ui->head + (ept->num << 1);
+			ept->flags = 0;
+		}
+
+	}
+}
+
+static void config_ept(struct msm_endpoint *ept)
+{
+	struct usb_info *ui = ept->ui;
+	unsigned cfg = CONFIG_MAX_PKT(ept->ep.maxpacket) | CONFIG_ZLT;
+
+	/* ep0 out needs interrupt-on-setup */
+	if (ept->bit == 0)
+		cfg |= CONFIG_IOS;
+
+	ept->head->config = cfg;
+	ept->head->next = TERMINATE;
+
+	if (ept->ep.maxpacket)
+		dev_dbg(&ui->pdev->dev,
+			"ept #%d %s max:%d head:%p bit:%d\n",
+		       ept->num,
+		       (ept->flags & EPT_FLAG_IN) ? "in" : "out",
+		       ept->ep.maxpacket, ept->head, ept->bit);
+}
+
+static void configure_endpoints(struct usb_info *ui)
+{
+	unsigned n;
+
+	for (n = 0; n < 32; n++)
+		config_ept(ui->ept + n);
+}
+
+struct usb_request *usb_ept_alloc_req(struct msm_endpoint *ept,
+			unsigned bufsize, gfp_t gfp_flags)
+{
+	struct usb_info *ui = ept->ui;
+	struct msm_request *req;
+
+	req = kzalloc(sizeof(*req), gfp_flags);
+	if (!req)
+		goto fail1;
+
+	req->item = dma_pool_alloc(ui->pool, gfp_flags, &req->item_dma);
+	if (!req->item)
+		goto fail2;
+
+	if (bufsize) {
+		req->req.buf = kmalloc(bufsize, gfp_flags);
+		if (!req->req.buf)
+			goto fail3;
+		req->alloced = 1;
+	}
+
+	return &req->req;
+
+fail3:
+	dma_pool_free(ui->pool, req->item, req->item_dma);
+fail2:
+	kfree(req);
+fail1:
+	return 0;
+}
+
+static void usb_ept_enable(struct msm_endpoint *ept, int yes,
+		unsigned char ep_type)
+{
+	struct usb_info *ui = ept->ui;
+	int in = ept->flags & EPT_FLAG_IN;
+	unsigned n;
+
+	n = readl(USB_ENDPTCTRL(ept->num));
+
+	if (in) {
+		if (yes) {
+			n = (n & (~CTRL_TXT_MASK)) |
+				(ep_type << CTRL_TXT_EP_TYPE_SHIFT);
+			n |= CTRL_TXE | CTRL_TXR;
+		} else
+			n &= (~CTRL_TXE);
+	} else {
+		if (yes) {
+			n = (n & (~CTRL_RXT_MASK)) |
+				(ep_type << CTRL_RXT_EP_TYPE_SHIFT);
+			n |= CTRL_RXE | CTRL_RXR;
+		} else
+			n &= ~(CTRL_RXE);
+	}
+	/* complete all the updates to ept->head before enabling endpoint*/
+	mb();
+	writel(n, USB_ENDPTCTRL(ept->num));
+
+	/* Ensure endpoint is enabled before returning */
+	mb();
+
+	dev_dbg(&ui->pdev->dev, "ept %d %s %s\n",
+	       ept->num, in ? "in" : "out", yes ? "enabled" : "disabled");
+}
+
+static void usb_ept_start(struct msm_endpoint *ept)
+{
+	struct usb_info *ui = ept->ui;
+	struct msm_request *req = ept->req;
+	struct msm_request *f_req = ept->req;
+	unsigned n = 1 << ept->bit;
+	unsigned info;
+	int reprime_cnt = 0;
+
+	BUG_ON(req->live);
+
+	while (req) {
+		req->live = 1;
+		/* prepare the transaction descriptor item for the hardware */
+		req->item->info =
+			INFO_BYTES(req->req.length) | INFO_IOC | INFO_ACTIVE;
+		req->item->page0 = req->dma;
+		req->item->page1 = (req->dma + 0x1000) & 0xfffff000;
+		req->item->page2 = (req->dma + 0x2000) & 0xfffff000;
+		req->item->page3 = (req->dma + 0x3000) & 0xfffff000;
+
+		if (req->next == NULL) {
+			req->item->next = TERMINATE;
+			break;
+		}
+		req->item->next = req->next->item_dma;
+		req = req->next;
+	}
+
+	rmb();
+	/* link the hw queue head to the request's transaction item */
+	ept->head->next = ept->req->item_dma;
+	ept->head->info = 0;
+
+reprime_ept:
+	/* flush buffers before priming ept */
+	mb();
+	/* during high throughput testing it is observed that
+	 * ept stat bit is not set even thoguh all the data
+	 * structures are updated properly and ept prime bit
+	 * is set. To workaround the issue, use dTD INFO bit
+	 * to make decision on re-prime or not.
+	 */
+	writel_relaxed(n, USB_ENDPTPRIME);
+	/* busy wait till endptprime gets clear */
+	while ((readl_relaxed(USB_ENDPTPRIME) & n))
+		;
+	if (readl_relaxed(USB_ENDPTSTAT) & n)
+		return;
+
+	rmb();
+	info = f_req->item->info;
+	if (info & INFO_ACTIVE) {
+		if (reprime_cnt++ < 3)
+			goto reprime_ept;
+		else
+			pr_err("%s(): ept%d%s prime failed. ept: config: %x"
+				"active: %x next: %x info: %x\n"
+				" req@ %x next: %x info: %x\n",
+				__func__, ept->num,
+				ept->flags & EPT_FLAG_IN ? "in" : "out",
+				ept->head->config, ept->head->active,
+				ept->head->next, ept->head->info,
+				f_req->item_dma, f_req->item->next, info);
+	}
+}
+
+int usb_ept_queue_xfer(struct msm_endpoint *ept, struct usb_request *_req)
+{
+	unsigned long flags;
+	struct msm_request *req = to_msm_request(_req);
+	struct msm_request *last;
+	struct usb_info *ui = ept->ui;
+	unsigned length = req->req.length;
+
+	if (length > 0x4000)
+		return -EMSGSIZE;
+
+	spin_lock_irqsave(&ui->lock, flags);
+
+	if (req->busy) {
+		req->req.status = -EBUSY;
+		spin_unlock_irqrestore(&ui->lock, flags);
+		dev_err(&ui->pdev->dev,
+			"usb_ept_queue_xfer() tried to queue busy request\n");
+		return -EBUSY;
+	}
+
+	if (!atomic_read(&ui->configured) && (ept->num != 0)) {
+		req->req.status = -ESHUTDOWN;
+		spin_unlock_irqrestore(&ui->lock, flags);
+		if (printk_ratelimit())
+			dev_err(&ui->pdev->dev,
+				"%s: called while offline\n", __func__);
+		return -ESHUTDOWN;
+	}
+
+	if (ui->usb_state == USB_STATE_SUSPENDED) {
+		if (!atomic_read(&ui->remote_wakeup)) {
+			req->req.status = -EAGAIN;
+			spin_unlock_irqrestore(&ui->lock, flags);
+			if (printk_ratelimit())
+				dev_err(&ui->pdev->dev,
+				"%s: cannot queue as bus is suspended "
+				"ept #%d %s max:%d head:%p bit:%d\n",
+				__func__, ept->num,
+				(ept->flags & EPT_FLAG_IN) ? "in" : "out",
+				ept->ep.maxpacket, ept->head, ept->bit);
+
+			return -EAGAIN;
+		}
+
+		wake_lock(&ui->wlock);
+		otg_set_suspend(ui->xceiv, 0);
+		schedule_delayed_work(&ui->rw_work, REMOTE_WAKEUP_DELAY);
+	}
+
+	req->busy = 1;
+	req->live = 0;
+	req->next = 0;
+	req->req.status = -EBUSY;
+
+	req->dma = dma_map_single(NULL, req->req.buf, length,
+				  (ept->flags & EPT_FLAG_IN) ?
+				  DMA_TO_DEVICE : DMA_FROM_DEVICE);
+
+
+	/* Add the new request to the end of the queue */
+	last = ept->last;
+	if (last) {
+		/* Already requests in the queue. add us to the
+		 * end, but let the completion interrupt actually
+		 * start things going, to avoid hw issues
+		 */
+		last->next = req;
+		req->prev = last;
+
+	} else {
+		/* queue was empty -- kick the hardware */
+		ept->req = req;
+		req->prev = NULL;
+		usb_ept_start(ept);
+	}
+	ept->last = req;
+
+	spin_unlock_irqrestore(&ui->lock, flags);
+	return 0;
+}
+
+/* --- endpoint 0 handling --- */
+
+static void ep0_complete(struct usb_ep *ep, struct usb_request *req)
+{
+	struct msm_request *r = to_msm_request(req);
+	struct msm_endpoint *ept = to_msm_endpoint(ep);
+	struct usb_info *ui = ept->ui;
+
+	req->complete = r->gadget_complete;
+	r->gadget_complete = 0;
+	if	(req->complete)
+		req->complete(&ui->ep0in.ep, req);
+}
+
+static void ep0_status_complete(struct usb_ep *ep, struct usb_request *_req)
+{
+	struct usb_request *req = _req->context;
+	struct msm_request *r;
+	struct msm_endpoint *ept;
+	struct usb_info *ui;
+
+	pr_debug("%s:\n", __func__);
+	if (!req)
+		return;
+
+	r = to_msm_request(req);
+	ept = to_msm_endpoint(ep);
+	ui = ept->ui;
+	_req->context = 0;
+
+	req->complete = r->gadget_complete;
+	req->zero = 0;
+	r->gadget_complete = 0;
+	if (req->complete)
+		req->complete(&ui->ep0in.ep, req);
+
+}
+
+static void ep0_status_phase(struct usb_ep *ep, struct usb_request *req)
+{
+	struct msm_endpoint *ept = to_msm_endpoint(ep);
+	struct usb_info *ui = ept->ui;
+
+	pr_debug("%s:\n", __func__);
+
+	req->length = 0;
+	req->complete = ep0_status_complete;
+
+	/* status phase */
+	if (atomic_read(&ui->ep0_dir) == USB_DIR_IN)
+		usb_ept_queue_xfer(&ui->ep0out, req);
+	else
+		usb_ept_queue_xfer(&ui->ep0in, req);
+}
+
+static void ep0in_send_zero_leng_pkt(struct msm_endpoint *ept)
+{
+	struct usb_info *ui = ept->ui;
+	struct usb_request *req = ui->setup_req;
+
+	pr_debug("%s:\n", __func__);
+
+	req->length = 0;
+	req->complete = ep0_status_phase;
+	usb_ept_queue_xfer(&ui->ep0in, req);
+}
+
+static void ep0_queue_ack_complete(struct usb_ep *ep,
+	struct usb_request *_req)
+{
+	struct msm_endpoint *ept = to_msm_endpoint(ep);
+	struct usb_info *ui = ept->ui;
+	struct usb_request *req = ui->setup_req;
+
+	pr_debug("%s: _req:%p actual:%d length:%d zero:%d\n",
+			__func__, _req, _req->actual,
+			_req->length, _req->zero);
+
+	/* queue up the receive of the ACK response from the host */
+	if (_req->status == 0 && _req->actual == _req->length) {
+		req->context = _req;
+		if (atomic_read(&ui->ep0_dir) == USB_DIR_IN) {
+			if (_req->zero && _req->length &&
+					!(_req->length % ep->maxpacket)) {
+				ep0in_send_zero_leng_pkt(&ui->ep0in);
+				return;
+			}
+		}
+		ep0_status_phase(ep, req);
+	} else
+		ep0_complete(ep, _req);
+}
+
+static void ep0_setup_ack_complete(struct usb_ep *ep, struct usb_request *req)
+{
+	struct msm_endpoint *ept = to_msm_endpoint(ep);
+	struct usb_info *ui = ept->ui;
+	unsigned int temp;
+	int test_mode = atomic_read(&ui->test_mode);
+
+	if (!test_mode)
+		return;
+
+	switch (test_mode) {
+	case J_TEST:
+		dev_info(&ui->pdev->dev, "usb electrical test mode: (J)\n");
+		temp = readl(USB_PORTSC) & (~PORTSC_PTC);
+		writel(temp | PORTSC_PTC_J_STATE, USB_PORTSC);
+		break;
+
+	case K_TEST:
+		dev_info(&ui->pdev->dev, "usb electrical test mode: (K)\n");
+		temp = readl(USB_PORTSC) & (~PORTSC_PTC);
+		writel(temp | PORTSC_PTC_K_STATE, USB_PORTSC);
+		break;
+
+	case SE0_NAK_TEST:
+		dev_info(&ui->pdev->dev,
+			"usb electrical test mode: (SE0-NAK)\n");
+		temp = readl(USB_PORTSC) & (~PORTSC_PTC);
+		writel(temp | PORTSC_PTC_SE0_NAK, USB_PORTSC);
+		break;
+
+	case TST_PKT_TEST:
+		dev_info(&ui->pdev->dev,
+			"usb electrical test mode: (TEST_PKT)\n");
+		temp = readl(USB_PORTSC) & (~PORTSC_PTC);
+		writel(temp | PORTSC_PTC_TST_PKT, USB_PORTSC);
+		break;
+	}
+}
+
+static void ep0_setup_ack(struct usb_info *ui)
+{
+	struct usb_request *req = ui->setup_req;
+	req->length = 0;
+	req->complete = ep0_setup_ack_complete;
+	usb_ept_queue_xfer(&ui->ep0in, req);
+}
+
+static void ep0_setup_stall(struct usb_info *ui)
+{
+	writel((1<<16) | (1<<0), USB_ENDPTCTRL(0));
+}
+
+static void ep0_setup_send(struct usb_info *ui, unsigned length)
+{
+	struct usb_request *req = ui->setup_req;
+	struct msm_request *r = to_msm_request(req);
+	struct msm_endpoint *ept = &ui->ep0in;
+
+	req->length = length;
+	req->complete = ep0_queue_ack_complete;
+	r->gadget_complete = 0;
+	usb_ept_queue_xfer(ept, req);
+}
+
+static void handle_setup(struct usb_info *ui)
+{
+	struct usb_ctrlrequest ctl;
+	struct usb_request *req = ui->setup_req;
+	int ret;
+#ifdef CONFIG_USB_OTG
+	u8 hnp;
+	unsigned long flags;
+#endif
+
+	memcpy(&ctl, ui->ep0out.head->setup_data, sizeof(ctl));
+	/* Ensure buffer is read before acknowledging to h/w */
+	mb();
+
+	writel(EPT_RX(0), USB_ENDPTSETUPSTAT);
+
+	if (ctl.bRequestType & USB_DIR_IN)
+		atomic_set(&ui->ep0_dir, USB_DIR_IN);
+	else
+		atomic_set(&ui->ep0_dir, USB_DIR_OUT);
+
+	/* any pending ep0 transactions must be canceled */
+	flush_endpoint(&ui->ep0out);
+	flush_endpoint(&ui->ep0in);
+
+	dev_dbg(&ui->pdev->dev,
+		"setup: type=%02x req=%02x val=%04x idx=%04x len=%04x\n",
+	       ctl.bRequestType, ctl.bRequest, ctl.wValue,
+	       ctl.wIndex, ctl.wLength);
+
+	if ((ctl.bRequestType & (USB_DIR_IN | USB_TYPE_MASK)) ==
+					(USB_DIR_IN | USB_TYPE_STANDARD)) {
+		if (ctl.bRequest == USB_REQ_GET_STATUS) {
+			/* OTG supplement Rev 2.0 introduces another device
+			 * GET_STATUS request for HNP polling with length = 1.
+			 */
+			u8 len = 2;
+			switch (ctl.bRequestType & USB_RECIP_MASK) {
+			case USB_RECIP_ENDPOINT:
+			{
+				struct msm_endpoint *ept;
+				unsigned num =
+					ctl.wIndex & USB_ENDPOINT_NUMBER_MASK;
+				u16 temp = 0;
+
+				if (num == 0) {
+					memset(req->buf, 0, 2);
+					break;
+				}
+				if (ctl.wIndex & USB_ENDPOINT_DIR_MASK)
+					num += 16;
+				ept = &ui->ep0out + num;
+				temp = usb_ep_get_stall(ept);
+				temp = temp << USB_ENDPOINT_HALT;
+				memcpy(req->buf, &temp, 2);
+				break;
+			}
+			case USB_RECIP_DEVICE:
+			{
+				u16 temp = 0;
+
+				if (ctl.wIndex == OTG_STATUS_SELECTOR) {
+#ifdef CONFIG_USB_OTG
+					spin_lock_irqsave(&ui->lock, flags);
+					hnp = (ui->gadget.host_request <<
+							HOST_REQUEST_FLAG);
+					ui->hnp_avail = 1;
+					spin_unlock_irqrestore(&ui->lock,
+							flags);
+					memcpy(req->buf, &hnp, 1);
+					len = 1;
+#else
+					goto stall;
+#endif
+				} else {
+					temp = (atomic_read(&ui->self_powered)
+						<< USB_DEVICE_SELF_POWERED);
+					temp |= (atomic_read(&ui->remote_wakeup)
+						<< USB_DEVICE_REMOTE_WAKEUP);
+					memcpy(req->buf, &temp, 2);
+				}
+				break;
+			}
+			case USB_RECIP_INTERFACE:
+				memset(req->buf, 0, 2);
+				break;
+			default:
+				goto stall;
+			}
+			ep0_setup_send(ui, len);
+			return;
+		}
+	}
+	if (ctl.bRequestType ==
+		    (USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_ENDPOINT)) {
+		if ((ctl.bRequest == USB_REQ_CLEAR_FEATURE) ||
+				(ctl.bRequest == USB_REQ_SET_FEATURE)) {
+			if ((ctl.wValue == 0) && (ctl.wLength == 0)) {
+				unsigned num = ctl.wIndex & 0x0f;
+
+				if (num != 0) {
+					struct msm_endpoint *ept;
+
+					if (ctl.wIndex & 0x80)
+						num += 16;
+					ept = &ui->ep0out + num;
+
+					if (ept->wedged)
+						goto ack;
+					if (ctl.bRequest == USB_REQ_SET_FEATURE)
+						usb_ept_set_halt(&ept->ep, 1);
+					else
+						usb_ept_set_halt(&ept->ep, 0);
+				}
+				goto ack;
+			}
+		}
+	}
+	if (ctl.bRequestType == (USB_DIR_OUT | USB_TYPE_STANDARD)) {
+		if (ctl.bRequest == USB_REQ_SET_CONFIGURATION) {
+			atomic_set(&ui->configured, !!ctl.wValue);
+			msm_hsusb_set_state(USB_STATE_CONFIGURED);
+		} else if (ctl.bRequest == USB_REQ_SET_ADDRESS) {
+			/*
+			 * Gadget speed should be set when PCI interrupt
+			 * occurs. But sometimes, PCI interrupt is not
+			 * occuring after reset. Hence update the gadget
+			 * speed here.
+			 */
+			if (ui->gadget.speed == USB_SPEED_UNKNOWN) {
+				dev_info(&ui->pdev->dev,
+					"PCI intr missed"
+					"set speed explictly\n");
+				msm_hsusb_set_speed(ui);
+			}
+			msm_hsusb_set_state(USB_STATE_ADDRESS);
+
+			/* write address delayed (will take effect
+			** after the next IN txn)
+			*/
+			writel((ctl.wValue << 25) | (1 << 24), USB_DEVICEADDR);
+			goto ack;
+		} else if (ctl.bRequest == USB_REQ_SET_FEATURE) {
+			switch (ctl.wValue) {
+			case USB_DEVICE_TEST_MODE:
+				switch (ctl.wIndex) {
+				case J_TEST:
+				case K_TEST:
+				case SE0_NAK_TEST:
+				case TST_PKT_TEST:
+					atomic_set(&ui->test_mode, ctl.wIndex);
+					goto ack;
+				}
+				goto stall;
+			case USB_DEVICE_REMOTE_WAKEUP:
+				atomic_set(&ui->remote_wakeup, 1);
+				goto ack;
+#ifdef CONFIG_USB_OTG
+			case USB_DEVICE_B_HNP_ENABLE:
+				ui->gadget.b_hnp_enable = 1;
+				goto ack;
+			case USB_DEVICE_A_HNP_SUPPORT:
+			case USB_DEVICE_A_ALT_HNP_SUPPORT:
+				/* B-devices compliant to OTG spec
+				 * Rev 2.0 are not required to
+				 * suppport these features.
+				 */
+				goto stall;
+#endif
+			}
+		} else if ((ctl.bRequest == USB_REQ_CLEAR_FEATURE) &&
+				(ctl.wValue == USB_DEVICE_REMOTE_WAKEUP)) {
+			atomic_set(&ui->remote_wakeup, 0);
+			goto ack;
+		}
+	}
+
+	/* delegate if we get here */
+	if (ui->driver) {
+		ret = ui->driver->setup(&ui->gadget, &ctl);
+		if (ret >= 0)
+			return;
+	}
+
+stall:
+	/* stall ep0 on error */
+	ep0_setup_stall(ui);
+	return;
+
+ack:
+	ep0_setup_ack(ui);
+}
+
+static void handle_endpoint(struct usb_info *ui, unsigned bit)
+{
+	struct msm_endpoint *ept = ui->ept + bit;
+	struct msm_request *req;
+	unsigned long flags;
+	unsigned info;
+
+	/*
+	INFO("handle_endpoint() %d %s req=%p(%08x)\n",
+		ept->num, (ept->flags & EPT_FLAG_IN) ? "in" : "out",
+		ept->req, ept->req ? ept->req->item_dma : 0);
+	*/
+
+	/* expire all requests that are no longer active */
+	spin_lock_irqsave(&ui->lock, flags);
+	while ((req = ept->req)) {
+		/* if we've processed all live requests, time to
+		 * restart the hardware on the next non-live request
+		 */
+		if (!req->live) {
+			usb_ept_start(ept);
+			break;
+		}
+
+		/* clean speculative fetches on req->item->info */
+		dma_coherent_post_ops();
+		info = req->item->info;
+		/* if the transaction is still in-flight, stop here */
+		if (info & INFO_ACTIVE)
+			break;
+
+		/* advance ept queue to the next request */
+		ept->req = req->next;
+		if (ept->req == 0)
+			ept->last = 0;
+
+		dma_unmap_single(NULL, req->dma, req->req.length,
+				 (ept->flags & EPT_FLAG_IN) ?
+				 DMA_TO_DEVICE : DMA_FROM_DEVICE);
+
+		if (info & (INFO_HALTED | INFO_BUFFER_ERROR | INFO_TXN_ERROR)) {
+			/* XXX pass on more specific error code */
+			req->req.status = -EIO;
+			req->req.actual = 0;
+			dev_err(&ui->pdev->dev,
+				"ept %d %s error. info=%08x\n",
+			       ept->num,
+			       (ept->flags & EPT_FLAG_IN) ? "in" : "out",
+			       info);
+		} else {
+			req->req.status = 0;
+			req->req.actual =
+				req->req.length - ((info >> 16) & 0x7FFF);
+		}
+		req->busy = 0;
+		req->live = 0;
+
+		if (req->req.complete) {
+			spin_unlock_irqrestore(&ui->lock, flags);
+			req->req.complete(&ept->ep, &req->req);
+			spin_lock_irqsave(&ui->lock, flags);
+		}
+	}
+	spin_unlock_irqrestore(&ui->lock, flags);
+}
+
+static void flush_endpoint_hw(struct usb_info *ui, unsigned bits)
+{
+	/* flush endpoint, canceling transactions
+	** - this can take a "large amount of time" (per databook)
+	** - the flush can fail in some cases, thus we check STAT
+	**   and repeat if we're still operating
+	**   (does the fact that this doesn't use the tripwire matter?!)
+	*/
+	do {
+		writel(bits, USB_ENDPTFLUSH);
+		while (readl(USB_ENDPTFLUSH) & bits)
+			udelay(100);
+	} while (readl(USB_ENDPTSTAT) & bits);
+}
+
+static void flush_endpoint_sw(struct msm_endpoint *ept)
+{
+	struct usb_info *ui = ept->ui;
+	struct msm_request *req, *next_req = NULL;
+	unsigned long flags;
+
+	/* inactive endpoints have nothing to do here */
+	if (ept->ep.maxpacket == 0)
+		return;
+
+	/* put the queue head in a sane state */
+	ept->head->info = 0;
+	ept->head->next = TERMINATE;
+
+	/* cancel any pending requests */
+	spin_lock_irqsave(&ui->lock, flags);
+	req = ept->req;
+	ept->req = 0;
+	ept->last = 0;
+	while (req != 0) {
+		req->busy = 0;
+		req->live = 0;
+		req->req.status = -ESHUTDOWN;
+		req->req.actual = 0;
+
+		/* Gadget driver may free the request in completion
+		 * handler. So keep a copy of next req pointer
+		 * before calling completion handler.
+		 */
+		next_req = req->next;
+		if (req->req.complete) {
+			spin_unlock_irqrestore(&ui->lock, flags);
+			req->req.complete(&ept->ep, &req->req);
+			spin_lock_irqsave(&ui->lock, flags);
+		}
+		req = next_req;
+	}
+	spin_unlock_irqrestore(&ui->lock, flags);
+}
+
+static void flush_endpoint(struct msm_endpoint *ept)
+{
+	flush_endpoint_hw(ept->ui, (1 << ept->bit));
+	flush_endpoint_sw(ept);
+}
+
+static irqreturn_t usb_interrupt(int irq, void *data)
+{
+	struct usb_info *ui = data;
+	unsigned n;
+	unsigned long flags;
+
+	n = readl(USB_USBSTS);
+	writel(n, USB_USBSTS);
+
+	/* somehow we got an IRQ while in the reset sequence: ignore it */
+	if (!atomic_read(&ui->running))
+		return IRQ_HANDLED;
+
+	if (n & STS_PCI) {
+		msm_hsusb_set_speed(ui);
+		if (atomic_read(&ui->configured)) {
+			wake_lock(&ui->wlock);
+
+			spin_lock_irqsave(&ui->lock, flags);
+			ui->usb_state = USB_STATE_CONFIGURED;
+			ui->flags = USB_FLAG_CONFIGURED;
+			spin_unlock_irqrestore(&ui->lock, flags);
+
+			ui->driver->resume(&ui->gadget);
+			schedule_work(&ui->work);
+		} else {
+			msm_hsusb_set_state(USB_STATE_DEFAULT);
+		}
+
+#ifdef CONFIG_USB_OTG
+		/* notify otg to clear A_BIDL_ADIS timer */
+		if (ui->gadget.is_a_peripheral)
+			otg_set_suspend(ui->xceiv, 0);
+#endif
+	}
+
+	if (n & STS_URI) {
+		dev_dbg(&ui->pdev->dev, "reset\n");
+		spin_lock_irqsave(&ui->lock, flags);
+		ui->gadget.speed = USB_SPEED_UNKNOWN;
+		spin_unlock_irqrestore(&ui->lock, flags);
+#ifdef CONFIG_USB_OTG
+		/* notify otg to clear A_BIDL_ADIS timer */
+		if (ui->gadget.is_a_peripheral)
+			otg_set_suspend(ui->xceiv, 0);
+		spin_lock_irqsave(&ui->lock, flags);
+		/* Host request is persistent across reset */
+		ui->gadget.b_hnp_enable = 0;
+		ui->hnp_avail = 0;
+		spin_unlock_irqrestore(&ui->lock, flags);
+#endif
+		msm_hsusb_set_state(USB_STATE_DEFAULT);
+		atomic_set(&ui->remote_wakeup, 0);
+		if (!ui->gadget.is_a_peripheral)
+			schedule_delayed_work(&ui->chg_stop, 0);
+
+		writel(readl(USB_ENDPTSETUPSTAT), USB_ENDPTSETUPSTAT);
+		writel(readl(USB_ENDPTCOMPLETE), USB_ENDPTCOMPLETE);
+		writel(0xffffffff, USB_ENDPTFLUSH);
+		writel(0, USB_ENDPTCTRL(1));
+
+		wake_lock(&ui->wlock);
+		if (atomic_read(&ui->configured)) {
+			/* marking us offline will cause ept queue attempts
+			** to fail
+			*/
+			atomic_set(&ui->configured, 0);
+			/* Defer sending offline uevent to userspace */
+			atomic_set(&ui->offline_pending, 1);
+
+			/* XXX: we can't seem to detect going offline,
+			 * XXX:  so deconfigure on reset for the time being
+			 */
+			if (ui->driver) {
+				dev_dbg(&ui->pdev->dev,
+					"usb: notify offline\n");
+				ui->driver->disconnect(&ui->gadget);
+			}
+			/* cancel pending ep0 transactions */
+			flush_endpoint(&ui->ep0out);
+			flush_endpoint(&ui->ep0in);
+
+		}
+		/* Start phy stuck timer */
+		if (ui->pdata && ui->pdata->is_phy_status_timer_on)
+			mod_timer(&phy_status_timer, PHY_STATUS_CHECK_DELAY);
+	}
+
+	if (n & STS_SLI) {
+		dev_dbg(&ui->pdev->dev, "suspend\n");
+
+		spin_lock_irqsave(&ui->lock, flags);
+		ui->usb_state = USB_STATE_SUSPENDED;
+		ui->flags = USB_FLAG_SUSPEND;
+		spin_unlock_irqrestore(&ui->lock, flags);
+
+		ui->driver->suspend(&ui->gadget);
+		schedule_work(&ui->work);
+#ifdef CONFIG_USB_OTG
+		/* notify otg for
+		 * 1. kicking A_BIDL_ADIS timer in case of A-peripheral
+		 * 2. disabling pull-up and kicking B_ASE0_RST timer
+		 */
+		if (ui->gadget.b_hnp_enable || ui->gadget.is_a_peripheral)
+			otg_set_suspend(ui->xceiv, 1);
+#endif
+	}
+
+	if (n & STS_UI) {
+		n = readl(USB_ENDPTSETUPSTAT);
+		if (n & EPT_RX(0))
+			handle_setup(ui);
+
+		n = readl(USB_ENDPTCOMPLETE);
+		writel(n, USB_ENDPTCOMPLETE);
+		while (n) {
+			unsigned bit = __ffs(n);
+			handle_endpoint(ui, bit);
+			n = n & (~(1 << bit));
+		}
+	}
+	return IRQ_HANDLED;
+}
+
+static void usb_prepare(struct usb_info *ui)
+{
+	spin_lock_init(&ui->lock);
+
+	memset(ui->buf, 0, 4096);
+	ui->head = (void *) (ui->buf + 0);
+
+	/* only important for reset/reinit */
+	memset(ui->ept, 0, sizeof(ui->ept));
+	ui->next_item = 0;
+	ui->next_ifc_num = 0;
+
+	init_endpoints(ui);
+
+	ui->ep0in.ep.maxpacket = 64;
+	ui->ep0out.ep.maxpacket = 64;
+
+	ui->setup_req =
+		usb_ept_alloc_req(&ui->ep0in, SETUP_BUF_SIZE, GFP_KERNEL);
+
+	INIT_WORK(&ui->work, usb_do_work);
+	INIT_DELAYED_WORK(&ui->chg_det, usb_chg_detect);
+	INIT_DELAYED_WORK(&ui->chg_stop, usb_chg_stop);
+	INIT_DELAYED_WORK(&ui->rw_work, usb_do_remote_wakeup);
+	if (ui->pdata && ui->pdata->is_phy_status_timer_on)
+		INIT_WORK(&ui->phy_status_check, usb_phy_stuck_recover);
+}
+
+static void usb_reset(struct usb_info *ui)
+{
+	struct msm_otg *otg = to_msm_otg(ui->xceiv);
+
+	dev_dbg(&ui->pdev->dev, "reset controller\n");
+
+	atomic_set(&ui->running, 0);
+
+	/*
+	 * PHY reset takes minimum 100 msec. Hence reset only link
+	 * during HNP. Reset PHY and link in B-peripheral mode.
+	 */
+	if (ui->gadget.is_a_peripheral)
+		otg->reset(ui->xceiv, 0);
+	else
+		otg->reset(ui->xceiv, 1);
+
+	/* set usb controller interrupt threshold to zero*/
+	writel((readl(USB_USBCMD) & ~USBCMD_ITC_MASK) | USBCMD_ITC(0),
+							USB_USBCMD);
+
+	writel(ui->dma, USB_ENDPOINTLISTADDR);
+
+	configure_endpoints(ui);
+
+	/* marking us offline will cause ept queue attempts to fail */
+	atomic_set(&ui->configured, 0);
+
+	if (ui->driver) {
+		dev_dbg(&ui->pdev->dev, "usb: notify offline\n");
+		ui->driver->disconnect(&ui->gadget);
+	}
+
+	/* cancel pending ep0 transactions */
+	flush_endpoint(&ui->ep0out);
+	flush_endpoint(&ui->ep0in);
+
+	/* enable interrupts */
+	writel(STS_URI | STS_SLI | STS_UI | STS_PCI, USB_USBINTR);
+
+	/* Ensure that h/w RESET is completed before returning */
+	mb();
+
+	atomic_set(&ui->running, 1);
+}
+
+static void usb_start(struct usb_info *ui)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&ui->lock, flags);
+	ui->flags |= USB_FLAG_START;
+	schedule_work(&ui->work);
+	spin_unlock_irqrestore(&ui->lock, flags);
+}
+
+static int usb_free(struct usb_info *ui, int ret)
+{
+	dev_dbg(&ui->pdev->dev, "usb_free(%d)\n", ret);
+
+	if (ui->xceiv)
+		otg_put_transceiver(ui->xceiv);
+
+	if (ui->irq)
+		free_irq(ui->irq, 0);
+	if (ui->pool)
+		dma_pool_destroy(ui->pool);
+	if (ui->dma)
+		dma_free_coherent(&ui->pdev->dev, 4096, ui->buf, ui->dma);
+	kfree(ui);
+	return ret;
+}
+
+static void usb_do_work_check_vbus(struct usb_info *ui)
+{
+	unsigned long iflags;
+
+	spin_lock_irqsave(&ui->lock, iflags);
+	if (is_usb_online(ui))
+		ui->flags |= USB_FLAG_VBUS_ONLINE;
+	else
+		ui->flags |= USB_FLAG_VBUS_OFFLINE;
+	spin_unlock_irqrestore(&ui->lock, iflags);
+}
+
+static void usb_do_work(struct work_struct *w)
+{
+	struct usb_info *ui = container_of(w, struct usb_info, work);
+	struct msm_otg *otg = to_msm_otg(ui->xceiv);
+	unsigned long iflags;
+	unsigned flags, _vbus;
+
+	for (;;) {
+		spin_lock_irqsave(&ui->lock, iflags);
+		flags = ui->flags;
+		ui->flags = 0;
+		_vbus = is_usb_online(ui);
+		spin_unlock_irqrestore(&ui->lock, iflags);
+
+		/* give up if we have nothing to do */
+		if (flags == 0)
+			break;
+
+		switch (ui->state) {
+		case USB_STATE_IDLE:
+			if (flags & USB_FLAG_START) {
+				int ret;
+
+				if (!_vbus) {
+					ui->state = USB_STATE_OFFLINE;
+					break;
+				}
+
+				pm_runtime_get_noresume(&ui->pdev->dev);
+				pm_runtime_resume(&ui->pdev->dev);
+				dev_dbg(&ui->pdev->dev,
+					"msm72k_udc: IDLE -> ONLINE\n");
+				usb_reset(ui);
+				ret = request_irq(otg->irq, usb_interrupt,
+							IRQF_SHARED,
+							ui->pdev->name, ui);
+				/* FIXME: should we call BUG_ON when
+				 * requst irq fails
+				 */
+				if (ret) {
+					dev_err(&ui->pdev->dev,
+						"hsusb: peripheral: request irq"
+						" failed:(%d)", ret);
+					break;
+				}
+				ui->irq = otg->irq;
+				ui->state = USB_STATE_ONLINE;
+				usb_do_work_check_vbus(ui);
+
+				if (!atomic_read(&ui->softconnect))
+					break;
+
+				msm72k_pullup_internal(&ui->gadget, 1);
+
+				if (!ui->gadget.is_a_peripheral)
+					schedule_delayed_work(
+							&ui->chg_det,
+							USB_CHG_DET_DELAY);
+
+			}
+			break;
+		case USB_STATE_ONLINE:
+			if (atomic_read(&ui->offline_pending)) {
+				switch_set_state(&ui->sdev, 0);
+				atomic_set(&ui->offline_pending, 0);
+			}
+
+			/* If at any point when we were online, we received
+			 * the signal to go offline, we must honor it
+			 */
+			if (flags & USB_FLAG_VBUS_OFFLINE) {
+
+				ui->chg_current = 0;
+				/* wait incase chg_detect is running */
+				if (!ui->gadget.is_a_peripheral)
+					cancel_delayed_work_sync(&ui->chg_det);
+
+				dev_dbg(&ui->pdev->dev,
+					"msm72k_udc: ONLINE -> OFFLINE\n");
+
+				atomic_set(&ui->running, 0);
+				atomic_set(&ui->remote_wakeup, 0);
+				atomic_set(&ui->configured, 0);
+
+				if (ui->driver) {
+					dev_dbg(&ui->pdev->dev,
+						"usb: notify offline\n");
+					ui->driver->disconnect(&ui->gadget);
+				}
+				/* cancel pending ep0 transactions */
+				flush_endpoint(&ui->ep0out);
+				flush_endpoint(&ui->ep0in);
+
+				/* synchronize with irq context */
+				spin_lock_irqsave(&ui->lock, iflags);
+#ifdef CONFIG_USB_OTG
+				ui->gadget.host_request = 0;
+				ui->gadget.b_hnp_enable = 0;
+				ui->hnp_avail = 0;
+#endif
+				msm72k_pullup_internal(&ui->gadget, 0);
+				spin_unlock_irqrestore(&ui->lock, iflags);
+
+
+				/* if charger is initialized to known type
+				 * we must let modem know about charger
+				 * disconnection
+				 */
+				otg_set_power(ui->xceiv, 0);
+
+				if (ui->irq) {
+					free_irq(ui->irq, ui);
+					ui->irq = 0;
+				}
+
+
+				switch_set_state(&ui->sdev, 0);
+
+				ui->state = USB_STATE_OFFLINE;
+				usb_do_work_check_vbus(ui);
+				pm_runtime_put_noidle(&ui->pdev->dev);
+				pm_runtime_suspend(&ui->pdev->dev);
+				wake_unlock(&ui->wlock);
+				break;
+			}
+			if (flags & USB_FLAG_SUSPEND) {
+				int maxpower = usb_get_max_power(ui);
+
+				if (maxpower < 0)
+					break;
+
+				otg_set_power(ui->xceiv, 0);
+				/* To support TCXO during bus suspend
+				 * This might be dummy check since bus suspend
+				 * is not implemented as of now
+				 * */
+				if (release_wlocks)
+					wake_unlock(&ui->wlock);
+
+				/* TBD: Initiate LPM at usb bus suspend */
+				break;
+			}
+			if (flags & USB_FLAG_CONFIGURED) {
+				int maxpower = usb_get_max_power(ui);
+
+				/* We may come here even when no configuration
+				 * is selected. Send online/offline event
+				 * accordingly.
+				 */
+				switch_set_state(&ui->sdev,
+						atomic_read(&ui->configured));
+
+				if (maxpower < 0)
+					break;
+
+				ui->chg_current = maxpower;
+				otg_set_power(ui->xceiv, maxpower);
+				break;
+			}
+			if (flags & USB_FLAG_RESET) {
+				dev_dbg(&ui->pdev->dev,
+					"msm72k_udc: ONLINE -> RESET\n");
+				msm72k_pullup_internal(&ui->gadget, 0);
+				usb_reset(ui);
+				msm72k_pullup_internal(&ui->gadget, 1);
+				dev_dbg(&ui->pdev->dev,
+					"msm72k_udc: RESET -> ONLINE\n");
+				break;
+			}
+			break;
+		case USB_STATE_OFFLINE:
+			/* If we were signaled to go online and vbus is still
+			 * present when we received the signal, go online.
+			 */
+			if ((flags & USB_FLAG_VBUS_ONLINE) && _vbus) {
+				int ret;
+
+				pm_runtime_get_noresume(&ui->pdev->dev);
+				pm_runtime_resume(&ui->pdev->dev);
+				dev_dbg(&ui->pdev->dev,
+					"msm72k_udc: OFFLINE -> ONLINE\n");
+
+				usb_reset(ui);
+				ui->state = USB_STATE_ONLINE;
+				usb_do_work_check_vbus(ui);
+				ret = request_irq(otg->irq, usb_interrupt,
+							IRQF_SHARED,
+							ui->pdev->name, ui);
+				/* FIXME: should we call BUG_ON when
+				 * requst irq fails
+				 */
+				if (ret) {
+					dev_err(&ui->pdev->dev,
+						"hsusb: peripheral: request irq"
+						" failed:(%d)", ret);
+					break;
+				}
+				ui->irq = otg->irq;
+				enable_irq_wake(otg->irq);
+
+				if (!atomic_read(&ui->softconnect))
+					break;
+				msm72k_pullup_internal(&ui->gadget, 1);
+
+				if (!ui->gadget.is_a_peripheral)
+					schedule_delayed_work(
+							&ui->chg_det,
+							USB_CHG_DET_DELAY);
+			}
+			break;
+		}
+	}
+}
+
+/* FIXME - the callers of this function should use a gadget API instead.
+ * This is called from htc_battery.c and board-halibut.c
+ * WARNING - this can get called before this driver is initialized.
+ */
+void msm_hsusb_set_vbus_state(int online)
+{
+	unsigned long flags;
+	struct usb_info *ui = the_usb_info;
+
+	if (!ui) {
+		pr_err("%s called before driver initialized\n", __func__);
+		return;
+	}
+
+	spin_lock_irqsave(&ui->lock, flags);
+
+	if (is_usb_online(ui) ==  online)
+		goto out;
+
+	if (online) {
+		ui->usb_state = USB_STATE_POWERED;
+		ui->flags |= USB_FLAG_VBUS_ONLINE;
+	} else {
+		ui->gadget.speed = USB_SPEED_UNKNOWN;
+		ui->usb_state = USB_STATE_NOTATTACHED;
+		ui->flags |= USB_FLAG_VBUS_OFFLINE;
+	}
+	if (in_interrupt()) {
+		schedule_work(&ui->work);
+	} else {
+		spin_unlock_irqrestore(&ui->lock, flags);
+		usb_do_work(&ui->work);
+		return;
+	}
+out:
+	spin_unlock_irqrestore(&ui->lock, flags);
+}
+
+#if defined(CONFIG_DEBUG_FS)
+
+void usb_function_reenumerate(void)
+{
+	struct usb_info *ui = the_usb_info;
+
+	/* disable and re-enable the D+ pullup */
+	dev_dbg(&ui->pdev->dev, "disable pullup\n");
+	writel(readl(USB_USBCMD) & ~USBCMD_RS, USB_USBCMD);
+
+	msleep(10);
+
+	dev_dbg(&ui->pdev->dev, "enable pullup\n");
+	writel(readl(USB_USBCMD) | USBCMD_RS, USB_USBCMD);
+}
+
+static char debug_buffer[PAGE_SIZE];
+
+static ssize_t debug_read_status(struct file *file, char __user *ubuf,
+				 size_t count, loff_t *ppos)
+{
+	struct usb_info *ui = file->private_data;
+	char *buf = debug_buffer;
+	unsigned long flags;
+	struct msm_endpoint *ept;
+	struct msm_request *req;
+	int n;
+	int i = 0;
+
+	spin_lock_irqsave(&ui->lock, flags);
+
+	i += scnprintf(buf + i, PAGE_SIZE - i,
+		   "regs: setup=%08x prime=%08x stat=%08x done=%08x\n",
+		   readl(USB_ENDPTSETUPSTAT),
+		   readl(USB_ENDPTPRIME),
+		   readl(USB_ENDPTSTAT),
+		   readl(USB_ENDPTCOMPLETE));
+	i += scnprintf(buf + i, PAGE_SIZE - i,
+		   "regs:   cmd=%08x   sts=%08x intr=%08x port=%08x\n\n",
+		   readl(USB_USBCMD),
+		   readl(USB_USBSTS),
+		   readl(USB_USBINTR),
+		   readl(USB_PORTSC));
+
+
+	for (n = 0; n < 32; n++) {
+		ept = ui->ept + n;
+		if (ept->ep.maxpacket == 0)
+			continue;
+
+		i += scnprintf(buf + i, PAGE_SIZE - i,
+			"ept%d %s cfg=%08x active=%08x next=%08x info=%08x\n",
+			ept->num, (ept->flags & EPT_FLAG_IN) ? "in " : "out",
+			ept->head->config, ept->head->active,
+			ept->head->next, ept->head->info);
+
+		for (req = ept->req; req; req = req->next)
+			i += scnprintf(buf + i, PAGE_SIZE - i,
+			"  req @%08x next=%08x info=%08x page0=%08x %c %c\n",
+				req->item_dma, req->item->next,
+				req->item->info, req->item->page0,
+				req->busy ? 'B' : ' ',
+				req->live ? 'L' : ' ');
+	}
+
+	i += scnprintf(buf + i, PAGE_SIZE - i,
+			   "phy failure count: %d\n", ui->phy_fail_count);
+
+	spin_unlock_irqrestore(&ui->lock, flags);
+
+	return simple_read_from_buffer(ubuf, count, ppos, buf, i);
+}
+
+static ssize_t debug_write_reset(struct file *file, const char __user *buf,
+				 size_t count, loff_t *ppos)
+{
+	struct usb_info *ui = file->private_data;
+	unsigned long flags;
+
+	spin_lock_irqsave(&ui->lock, flags);
+	ui->flags |= USB_FLAG_RESET;
+	schedule_work(&ui->work);
+	spin_unlock_irqrestore(&ui->lock, flags);
+
+	return count;
+}
+
+static ssize_t debug_write_cycle(struct file *file, const char __user *buf,
+				 size_t count, loff_t *ppos)
+{
+	usb_function_reenumerate();
+	return count;
+}
+
+static int debug_open(struct inode *inode, struct file *file)
+{
+	file->private_data = inode->i_private;
+	return 0;
+}
+
+const struct file_operations debug_stat_ops = {
+	.open = debug_open,
+	.read = debug_read_status,
+};
+
+const struct file_operations debug_reset_ops = {
+	.open = debug_open,
+	.write = debug_write_reset,
+};
+
+const struct file_operations debug_cycle_ops = {
+	.open = debug_open,
+	.write = debug_write_cycle,
+};
+
+static ssize_t debug_read_release_wlocks(struct file *file, char __user *ubuf,
+				 size_t count, loff_t *ppos)
+{
+	char kbuf[10];
+	size_t c = 0;
+
+	memset(kbuf, 0, 10);
+
+	c = scnprintf(kbuf, 10, "%d", release_wlocks);
+
+	if (copy_to_user(ubuf, kbuf, c))
+		return -EFAULT;
+
+	return c;
+}
+static ssize_t debug_write_release_wlocks(struct file *file,
+		const char __user *buf, size_t count, loff_t *ppos)
+{
+	char kbuf[10];
+	long temp;
+
+	memset(kbuf, 0, 10);
+
+	if (copy_from_user(kbuf, buf, count > 10 ? 10 : count))
+		return -EFAULT;
+
+	if (strict_strtol(kbuf, 10, &temp))
+		return -EINVAL;
+
+	if (temp)
+		release_wlocks = 1;
+	else
+		release_wlocks = 0;
+
+	return count;
+}
+static int debug_wake_lock_open(struct inode *inode, struct file *file)
+{
+	file->private_data = inode->i_private;
+	return 0;
+}
+const struct file_operations debug_wlocks_ops = {
+	.open = debug_wake_lock_open,
+	.read = debug_read_release_wlocks,
+	.write = debug_write_release_wlocks,
+};
+static void usb_debugfs_init(struct usb_info *ui)
+{
+	struct dentry *dent;
+	dent = debugfs_create_dir(dev_name(&ui->pdev->dev), 0);
+	if (IS_ERR(dent))
+		return;
+
+	debugfs_create_file("status", 0444, dent, ui, &debug_stat_ops);
+	debugfs_create_file("reset", 0222, dent, ui, &debug_reset_ops);
+	debugfs_create_file("cycle", 0222, dent, ui, &debug_cycle_ops);
+	debugfs_create_file("release_wlocks", 0666, dent, ui,
+						&debug_wlocks_ops);
+}
+#else
+static void usb_debugfs_init(struct usb_info *ui) {}
+#endif
+
+static int
+msm72k_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc)
+{
+	struct msm_endpoint *ept = to_msm_endpoint(_ep);
+	unsigned char ep_type =
+			desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK;
+
+	_ep->maxpacket = le16_to_cpu(desc->wMaxPacketSize);
+	config_ept(ept);
+	ept->wedged = 0;
+	usb_ept_enable(ept, 1, ep_type);
+	return 0;
+}
+
+static int msm72k_disable(struct usb_ep *_ep)
+{
+	struct msm_endpoint *ept = to_msm_endpoint(_ep);
+
+	usb_ept_enable(ept, 0, 0);
+	flush_endpoint(ept);
+	return 0;
+}
+
+static struct usb_request *
+msm72k_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags)
+{
+	return usb_ept_alloc_req(to_msm_endpoint(_ep), 0, gfp_flags);
+}
+
+static void
+msm72k_free_request(struct usb_ep *_ep, struct usb_request *_req)
+{
+	struct msm_request *req = to_msm_request(_req);
+	struct msm_endpoint *ept = to_msm_endpoint(_ep);
+	struct usb_info *ui = ept->ui;
+
+	/* request should not be busy */
+	BUG_ON(req->busy);
+	if (req->alloced)
+		kfree(req->req.buf);
+	dma_pool_free(ui->pool, req->item, req->item_dma);
+	kfree(req);
+}
+
+static int
+msm72k_queue(struct usb_ep *_ep, struct usb_request *req, gfp_t gfp_flags)
+{
+	struct msm_endpoint *ep = to_msm_endpoint(_ep);
+	struct usb_info *ui = ep->ui;
+
+	if (ep == &ui->ep0in) {
+		struct msm_request *r = to_msm_request(req);
+		if (!req->length)
+			goto ep_queue_done;
+		r->gadget_complete = req->complete;
+		/* ep0_queue_ack_complete queue a receive for ACK before
+		** calling req->complete
+		*/
+		req->complete = ep0_queue_ack_complete;
+		if (atomic_read(&ui->ep0_dir) == USB_DIR_OUT)
+			ep = &ui->ep0out;
+		goto ep_queue_done;
+	}
+
+ep_queue_done:
+	return usb_ept_queue_xfer(ep, req);
+}
+
+static int msm72k_dequeue(struct usb_ep *_ep, struct usb_request *_req)
+{
+	struct msm_endpoint *ep = to_msm_endpoint(_ep);
+	struct msm_request *req = to_msm_request(_req);
+	struct usb_info *ui = ep->ui;
+
+	struct msm_request *temp_req;
+	unsigned long flags;
+
+	if (!(ui && req && ep->req))
+		return -EINVAL;
+
+	spin_lock_irqsave(&ui->lock, flags);
+	if (!req->busy) {
+		dev_dbg(&ui->pdev->dev, "%s: !req->busy\n", __func__);
+		spin_unlock_irqrestore(&ui->lock, flags);
+		return -EINVAL;
+	}
+	/* Stop the transfer */
+	do {
+		writel((1 << ep->bit), USB_ENDPTFLUSH);
+		while (readl(USB_ENDPTFLUSH) & (1 << ep->bit))
+			udelay(100);
+	} while (readl(USB_ENDPTSTAT) & (1 << ep->bit));
+
+	req->req.status = 0;
+	req->busy = 0;
+
+	if (ep->req == req) {
+		ep->req = req->next;
+		ep->head->next = req->item->next;
+	} else {
+		req->prev->next = req->next;
+		if (req->next)
+			req->next->prev = req->prev;
+		req->prev->item->next = req->item->next;
+	}
+
+	if (!req->next)
+		ep->last = req->prev;
+
+	/* initialize request to default */
+	req->item->next = TERMINATE;
+	req->item->info = 0;
+	req->live = 0;
+	dma_unmap_single(NULL, req->dma, req->req.length,
+		(ep->flags & EPT_FLAG_IN) ?
+		DMA_TO_DEVICE : DMA_FROM_DEVICE);
+
+	if (req->req.complete) {
+		req->req.status = -ECONNRESET;
+		spin_unlock_irqrestore(&ui->lock, flags);
+		req->req.complete(&ep->ep, &req->req);
+		spin_lock_irqsave(&ui->lock, flags);
+	}
+
+	if (!req->live) {
+		/* Reprime the endpoint for the remaining transfers */
+		for (temp_req = ep->req ; temp_req ; temp_req = temp_req->next)
+			temp_req->live = 0;
+		if (ep->req)
+			usb_ept_start(ep);
+		spin_unlock_irqrestore(&ui->lock, flags);
+		return 0;
+	}
+	spin_unlock_irqrestore(&ui->lock, flags);
+	return 0;
+}
+
+static int
+usb_ept_set_halt(struct usb_ep *_ep, int value)
+{
+	struct msm_endpoint *ept = to_msm_endpoint(_ep);
+	struct usb_info *ui = ept->ui;
+	unsigned int in = ept->flags & EPT_FLAG_IN;
+	unsigned int n;
+	unsigned long flags;
+
+	spin_lock_irqsave(&ui->lock, flags);
+
+	n = readl(USB_ENDPTCTRL(ept->num));
+
+	if (in) {
+		if (value)
+			n |= CTRL_TXS;
+		else {
+			n &= ~CTRL_TXS;
+			n |= CTRL_TXR;
+		}
+	} else {
+		if (value)
+			n |= CTRL_RXS;
+		else {
+			n &= ~CTRL_RXS;
+			n |= CTRL_RXR;
+		}
+	}
+	writel(n, USB_ENDPTCTRL(ept->num));
+	if (!value)
+		ept->wedged = 0;
+	spin_unlock_irqrestore(&ui->lock, flags);
+
+	return 0;
+}
+
+static int
+msm72k_set_halt(struct usb_ep *_ep, int value)
+{
+	struct msm_endpoint *ept = to_msm_endpoint(_ep);
+	unsigned int in = ept->flags & EPT_FLAG_IN;
+
+	if (value && in && ept->req)
+		return -EAGAIN;
+
+	usb_ept_set_halt(_ep, value);
+
+	return 0;
+}
+
+static int
+msm72k_fifo_status(struct usb_ep *_ep)
+{
+	return -EOPNOTSUPP;
+}
+
+static void
+msm72k_fifo_flush(struct usb_ep *_ep)
+{
+	flush_endpoint(to_msm_endpoint(_ep));
+}
+static int msm72k_set_wedge(struct usb_ep *_ep)
+{
+	struct msm_endpoint *ept = to_msm_endpoint(_ep);
+
+	if (ept->num == 0)
+		return -EINVAL;
+
+	ept->wedged = 1;
+
+	return msm72k_set_halt(_ep, 1);
+}
+
+static const struct usb_ep_ops msm72k_ep_ops = {
+	.enable		= msm72k_enable,
+	.disable	= msm72k_disable,
+
+	.alloc_request	= msm72k_alloc_request,
+	.free_request	= msm72k_free_request,
+
+	.queue		= msm72k_queue,
+	.dequeue	= msm72k_dequeue,
+
+	.set_halt	= msm72k_set_halt,
+	.set_wedge	= msm72k_set_wedge,
+	.fifo_status	= msm72k_fifo_status,
+	.fifo_flush	= msm72k_fifo_flush,
+};
+
+static int msm72k_get_frame(struct usb_gadget *_gadget)
+{
+	struct usb_info *ui = container_of(_gadget, struct usb_info, gadget);
+
+	/* frame number is in bits 13:3 */
+	return (readl(USB_FRINDEX) >> 3) & 0x000007FF;
+}
+
+/* VBUS reporting logically comes from a transceiver */
+static int msm72k_udc_vbus_session(struct usb_gadget *_gadget, int is_active)
+{
+	struct usb_info *ui = container_of(_gadget, struct usb_info, gadget);
+	struct msm_otg *otg = to_msm_otg(ui->xceiv);
+
+	if (is_active || atomic_read(&otg->chg_type)
+					 == USB_CHG_TYPE__WALLCHARGER)
+		wake_lock(&ui->wlock);
+
+	msm_hsusb_set_vbus_state(is_active);
+	return 0;
+}
+
+/* SW workarounds
+Issue #1	- USB Spoof Disconnect Failure
+Symptom	- Writing 0 to run/stop bit of USBCMD doesn't cause disconnect
+SW workaround	- Making opmode non-driving and SuspendM set in function
+		register of SMSC phy
+*/
+/* drivers may have software control over D+ pullup */
+static int msm72k_pullup_internal(struct usb_gadget *_gadget, int is_active)
+{
+	struct usb_info *ui = container_of(_gadget, struct usb_info, gadget);
+	unsigned long flags;
+
+	if (is_active) {
+		spin_lock_irqsave(&ui->lock, flags);
+		if (is_usb_online(ui) && ui->driver)
+			writel(readl(USB_USBCMD) | USBCMD_RS, USB_USBCMD);
+		spin_unlock_irqrestore(&ui->lock, flags);
+	} else {
+		writel(readl(USB_USBCMD) & ~USBCMD_RS, USB_USBCMD);
+		/* S/W workaround, Issue#1 */
+		otg_io_write(ui->xceiv, 0x48, 0x04);
+	}
+
+	/* Ensure pull-up operation is completed before returning */
+	mb();
+
+	return 0;
+}
+
+static int msm72k_pullup(struct usb_gadget *_gadget, int is_active)
+{
+	struct usb_info *ui = container_of(_gadget, struct usb_info, gadget);
+	unsigned long flags;
+
+
+	atomic_set(&ui->softconnect, is_active);
+
+	spin_lock_irqsave(&ui->lock, flags);
+	if (ui->usb_state == USB_STATE_NOTATTACHED || ui->driver == NULL) {
+		spin_unlock_irqrestore(&ui->lock, flags);
+		return 0;
+	}
+	spin_unlock_irqrestore(&ui->lock, flags);
+
+	msm72k_pullup_internal(_gadget, is_active);
+
+	if (is_active && !ui->gadget.is_a_peripheral)
+		schedule_delayed_work(&ui->chg_det, USB_CHG_DET_DELAY);
+
+	return 0;
+}
+
+static int msm72k_wakeup(struct usb_gadget *_gadget)
+{
+	struct usb_info *ui = container_of(_gadget, struct usb_info, gadget);
+	struct msm_otg *otg = to_msm_otg(ui->xceiv);
+
+	if (!atomic_read(&ui->remote_wakeup)) {
+		dev_err(&ui->pdev->dev,
+			"%s: remote wakeup not supported\n", __func__);
+		return -ENOTSUPP;
+	}
+
+	if (!atomic_read(&ui->configured)) {
+		dev_err(&ui->pdev->dev,
+			"%s: device is not configured\n", __func__);
+		return -ENODEV;
+	}
+	otg_set_suspend(ui->xceiv, 0);
+
+	disable_irq(otg->irq);
+
+	if (!is_usb_active())
+		writel(readl(USB_PORTSC) | PORTSC_FPR, USB_PORTSC);
+
+	/* Ensure that USB port is resumed before enabling the IRQ */
+	mb();
+
+	enable_irq(otg->irq);
+
+	return 0;
+}
+
+/* when Gadget is configured, it will indicate how much power
+ * can be pulled from vbus, as specified in configuiration descriptor
+ */
+static int msm72k_udc_vbus_draw(struct usb_gadget *_gadget, unsigned mA)
+{
+	struct usb_info *ui = container_of(_gadget, struct usb_info, gadget);
+	unsigned long flags;
+
+
+	spin_lock_irqsave(&ui->lock, flags);
+	ui->b_max_pow = mA;
+	ui->flags = USB_FLAG_CONFIGURED;
+	spin_unlock_irqrestore(&ui->lock, flags);
+
+	schedule_work(&ui->work);
+
+	return 0;
+}
+
+static int msm72k_set_selfpowered(struct usb_gadget *_gadget, int set)
+{
+	struct usb_info *ui = container_of(_gadget, struct usb_info, gadget);
+	unsigned long flags;
+	int ret = 0;
+
+	spin_lock_irqsave(&ui->lock, flags);
+	if (set) {
+		if (ui->pdata && ui->pdata->self_powered)
+			atomic_set(&ui->self_powered, 1);
+		else
+			ret = -EOPNOTSUPP;
+	} else {
+		/* We can always work as a bus powered device */
+		atomic_set(&ui->self_powered, 0);
+	}
+	spin_unlock_irqrestore(&ui->lock, flags);
+
+	return ret;
+
+}
+
+static const struct usb_gadget_ops msm72k_ops = {
+	.get_frame	= msm72k_get_frame,
+	.vbus_session	= msm72k_udc_vbus_session,
+	.vbus_draw	= msm72k_udc_vbus_draw,
+	.pullup		= msm72k_pullup,
+	.wakeup		= msm72k_wakeup,
+	.set_selfpowered = msm72k_set_selfpowered,
+};
+
+static void usb_do_remote_wakeup(struct work_struct *w)
+{
+	struct usb_info *ui = the_usb_info;
+
+	msm72k_wakeup(&ui->gadget);
+}
+
+static ssize_t usb_remote_wakeup(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct usb_info *ui = the_usb_info;
+
+	msm72k_wakeup(&ui->gadget);
+
+	return count;
+}
+
+static ssize_t show_usb_state(struct device *dev, struct device_attribute *attr,
+		char *buf)
+{
+	size_t i;
+	char *state[] = {"USB_STATE_NOTATTACHED", "USB_STATE_ATTACHED",
+			"USB_STATE_POWERED", "USB_STATE_UNAUTHENTICATED",
+			"USB_STATE_RECONNECTING", "USB_STATE_DEFAULT",
+			"USB_STATE_ADDRESS", "USB_STATE_CONFIGURED",
+			"USB_STATE_SUSPENDED"
+	};
+
+	i = scnprintf(buf, PAGE_SIZE, "%s\n", state[msm_hsusb_get_state()]);
+	return i;
+}
+
+static ssize_t show_usb_speed(struct device *dev, struct device_attribute *attr,
+		char *buf)
+{
+	struct usb_info *ui = the_usb_info;
+	size_t i;
+	char *speed[] = {"USB_SPEED_UNKNOWN", "USB_SPEED_LOW",
+			"USB_SPEED_FULL", "USB_SPEED_HIGH"};
+
+	i = scnprintf(buf, PAGE_SIZE, "%s\n", speed[ui->gadget.speed]);
+	return i;
+}
+
+static ssize_t store_usb_chg_current(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct usb_info *ui = the_usb_info;
+	unsigned long mA;
+
+	if (ui->gadget.is_a_peripheral)
+		return -EINVAL;
+
+	if (strict_strtoul(buf, 10, &mA))
+		return -EINVAL;
+
+	ui->chg_current = mA;
+	otg_set_power(ui->xceiv, mA);
+
+	return count;
+}
+
+static ssize_t show_usb_chg_current(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct usb_info *ui = the_usb_info;
+	size_t count;
+
+	count = sprintf(buf, "%d", ui->chg_current);
+
+	return count;
+}
+
+static ssize_t show_usb_chg_type(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct usb_info *ui = the_usb_info;
+	struct msm_otg *otg = to_msm_otg(ui->xceiv);
+	size_t count;
+	char *chg_type[] = {"STD DOWNSTREAM PORT",
+			"CARKIT",
+			"DEDICATED CHARGER",
+			"INVALID"};
+
+	count = sprintf(buf, "%s",
+			chg_type[atomic_read(&otg->chg_type)]);
+
+	return count;
+}
+static DEVICE_ATTR(wakeup, S_IWUSR, 0, usb_remote_wakeup);
+static DEVICE_ATTR(usb_state, S_IRUSR, show_usb_state, 0);
+static DEVICE_ATTR(usb_speed, S_IRUSR, show_usb_speed, 0);
+static DEVICE_ATTR(chg_type, S_IRUSR, show_usb_chg_type, 0);
+static DEVICE_ATTR(chg_current, S_IWUSR | S_IRUSR,
+		show_usb_chg_current, store_usb_chg_current);
+
+#ifdef CONFIG_USB_OTG
+static ssize_t store_host_req(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct usb_info *ui = the_usb_info;
+	unsigned long val, flags;
+
+	if (strict_strtoul(buf, 10, &val))
+		return -EINVAL;
+
+	dev_dbg(&ui->pdev->dev, "%s host request\n",
+			val ? "set" : "clear");
+
+	spin_lock_irqsave(&ui->lock, flags);
+	if (ui->hnp_avail)
+		ui->gadget.host_request = !!val;
+	spin_unlock_irqrestore(&ui->lock, flags);
+
+	return count;
+}
+static DEVICE_ATTR(host_request, S_IWUSR, NULL, store_host_req);
+
+/* How do we notify user space about HNP availability?
+ * As we are compliant to Rev 2.0, Host will not set a_hnp_support.
+ * Introduce hnp_avail flag and set when HNP polling request arrives.
+ * The expectation is that user space checks hnp availability before
+ * requesting host role via above sysfs node.
+ */
+static ssize_t show_host_avail(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct usb_info *ui = the_usb_info;
+	size_t count;
+	unsigned long flags;
+
+	spin_lock_irqsave(&ui->lock, flags);
+	count = sprintf(buf, "%d\n", ui->hnp_avail);
+	spin_unlock_irqrestore(&ui->lock, flags);
+
+	return count;
+}
+static DEVICE_ATTR(host_avail, S_IRUSR, show_host_avail, NULL);
+
+static struct attribute *otg_attrs[] = {
+	&dev_attr_host_request.attr,
+	&dev_attr_host_avail.attr,
+	NULL,
+};
+
+static struct attribute_group otg_attr_grp = {
+	.name  = "otg",
+	.attrs = otg_attrs,
+};
+#endif
+
+static int msm72k_probe(struct platform_device *pdev)
+{
+	struct usb_info *ui;
+	struct msm_otg *otg;
+	int retval;
+
+	dev_dbg(&pdev->dev, "msm72k_probe\n");
+	ui = kzalloc(sizeof(struct usb_info), GFP_KERNEL);
+	if (!ui)
+		return -ENOMEM;
+
+	ui->pdev = pdev;
+	ui->pdata = pdev->dev.platform_data;
+
+	ui->buf = dma_alloc_coherent(&pdev->dev, 4096, &ui->dma, GFP_KERNEL);
+	if (!ui->buf)
+		return usb_free(ui, -ENOMEM);
+
+	ui->pool = dma_pool_create("msm72k_udc", NULL, 32, 32, 0);
+	if (!ui->pool)
+		return usb_free(ui, -ENOMEM);
+
+	ui->xceiv = otg_get_transceiver();
+	if (!ui->xceiv)
+		return usb_free(ui, -ENODEV);
+
+	otg = to_msm_otg(ui->xceiv);
+	ui->addr = otg->regs;
+
+	ui->gadget.ops = &msm72k_ops;
+	ui->gadget.is_dualspeed = 1;
+	device_initialize(&ui->gadget.dev);
+	dev_set_name(&ui->gadget.dev, "gadget");
+	ui->gadget.dev.parent = &pdev->dev;
+	ui->gadget.dev.dma_mask = pdev->dev.dma_mask;
+
+#ifdef CONFIG_USB_OTG
+	ui->gadget.is_otg = 1;
+#endif
+
+	ui->sdev.name = DRIVER_NAME;
+	ui->sdev.print_name = print_switch_name;
+	ui->sdev.print_state = print_switch_state;
+
+	retval = switch_dev_register(&ui->sdev);
+	if (retval)
+		return usb_free(ui, retval);
+
+	the_usb_info = ui;
+
+	wake_lock_init(&ui->wlock,
+			WAKE_LOCK_SUSPEND, "usb_bus_active");
+
+	usb_debugfs_init(ui);
+
+	usb_prepare(ui);
+
+#ifdef CONFIG_USB_OTG
+	retval = sysfs_create_group(&pdev->dev.kobj, &otg_attr_grp);
+	if (retval) {
+		dev_err(&ui->pdev->dev,
+			"failed to create otg sysfs directory:"
+			"err:(%d)\n", retval);
+	}
+#endif
+
+	retval = otg_set_peripheral(ui->xceiv, &ui->gadget);
+	if (retval) {
+		dev_err(&ui->pdev->dev,
+			"%s: Cannot bind the transceiver, retval:(%d)\n",
+			__func__, retval);
+		switch_dev_unregister(&ui->sdev);
+		wake_lock_destroy(&ui->wlock);
+		return usb_free(ui, retval);
+	}
+
+	pm_runtime_enable(&pdev->dev);
+
+	/* Setup phy stuck timer */
+	if (ui->pdata && ui->pdata->is_phy_status_timer_on)
+		setup_timer(&phy_status_timer, usb_phy_status_check_timer, 0);
+	return 0;
+}
+
+int usb_gadget_probe_driver(struct usb_gadget_driver *driver,
+			    int (*bind)(struct usb_gadget *))
+{
+	struct usb_info *ui = the_usb_info;
+	int			retval, n;
+
+	if (!driver
+			|| driver->speed < USB_SPEED_FULL
+			|| !bind
+			|| !driver->disconnect
+			|| !driver->setup)
+		return -EINVAL;
+	if (!ui)
+		return -ENODEV;
+	if (ui->driver)
+		return -EBUSY;
+
+	/* first hook up the driver ... */
+	ui->driver = driver;
+	ui->gadget.dev.driver = &driver->driver;
+	ui->gadget.name = driver_name;
+	INIT_LIST_HEAD(&ui->gadget.ep_list);
+	ui->gadget.ep0 = &ui->ep0in.ep;
+	INIT_LIST_HEAD(&ui->gadget.ep0->ep_list);
+	ui->gadget.speed = USB_SPEED_UNKNOWN;
+	atomic_set(&ui->softconnect, 1);
+
+	for (n = 1; n < 16; n++) {
+		struct msm_endpoint *ept = ui->ept + n;
+		list_add_tail(&ept->ep.ep_list, &ui->gadget.ep_list);
+		ept->ep.maxpacket = 512;
+	}
+	for (n = 17; n < 32; n++) {
+		struct msm_endpoint *ept = ui->ept + n;
+		list_add_tail(&ept->ep.ep_list, &ui->gadget.ep_list);
+		ept->ep.maxpacket = 512;
+	}
+
+	retval = device_add(&ui->gadget.dev);
+	if (retval)
+		goto fail;
+
+	retval = bind(&ui->gadget);
+	if (retval) {
+		dev_err(&ui->pdev->dev, "bind to driver %s --> error %d\n",
+				driver->driver.name, retval);
+		device_del(&ui->gadget.dev);
+		goto fail;
+	}
+
+	retval = device_create_file(&ui->gadget.dev, &dev_attr_wakeup);
+	if (retval != 0)
+		dev_err(&ui->pdev->dev, "failed to create sysfs entry:"
+			"(wakeup) error: (%d)\n", retval);
+	retval = device_create_file(&ui->gadget.dev, &dev_attr_usb_state);
+	if (retval != 0)
+		dev_err(&ui->pdev->dev, "failed to create sysfs entry:"
+			" (usb_state) error: (%d)\n", retval);
+
+	retval = device_create_file(&ui->gadget.dev, &dev_attr_usb_speed);
+	if (retval != 0)
+		dev_err(&ui->pdev->dev, "failed to create sysfs entry:"
+			" (usb_speed) error: (%d)\n", retval);
+
+	retval = device_create_file(&ui->gadget.dev, &dev_attr_chg_type);
+	if (retval != 0)
+		dev_err(&ui->pdev->dev,
+			"failed to create sysfs entry(chg_type): err:(%d)\n",
+					retval);
+	retval = device_create_file(&ui->gadget.dev, &dev_attr_chg_current);
+	if (retval != 0)
+		dev_err(&ui->pdev->dev,
+			"failed to create sysfs entry(chg_current):"
+			"err:(%d)\n", retval);
+
+	dev_dbg(&ui->pdev->dev, "registered gadget driver '%s'\n",
+			driver->driver.name);
+	usb_start(ui);
+
+	return 0;
+
+fail:
+	ui->driver = NULL;
+	ui->gadget.dev.driver = NULL;
+	return retval;
+}
+EXPORT_SYMBOL(usb_gadget_probe_driver);
+
+int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
+{
+	struct usb_info *dev = the_usb_info;
+
+	if (!dev)
+		return -ENODEV;
+	if (!driver || driver != dev->driver || !driver->unbind)
+		return -EINVAL;
+
+	msm72k_pullup_internal(&dev->gadget, 0);
+	if (dev->irq) {
+		free_irq(dev->irq, dev);
+		dev->irq = 0;
+	}
+	dev->state = USB_STATE_IDLE;
+	atomic_set(&dev->configured, 0);
+	switch_set_state(&dev->sdev, 0);
+	/* cancel pending ep0 transactions */
+	flush_endpoint(&dev->ep0out);
+	flush_endpoint(&dev->ep0in);
+
+	device_remove_file(&dev->gadget.dev, &dev_attr_wakeup);
+	device_remove_file(&dev->gadget.dev, &dev_attr_usb_state);
+	device_remove_file(&dev->gadget.dev, &dev_attr_usb_speed);
+	device_remove_file(&dev->gadget.dev, &dev_attr_chg_type);
+	device_remove_file(&dev->gadget.dev, &dev_attr_chg_current);
+	driver->disconnect(&dev->gadget);
+	driver->unbind(&dev->gadget);
+	dev->gadget.dev.driver = NULL;
+	dev->driver = NULL;
+
+	device_del(&dev->gadget.dev);
+
+	dev_dbg(&dev->pdev->dev,
+		"unregistered gadget driver '%s'\n", driver->driver.name);
+	return 0;
+}
+EXPORT_SYMBOL(usb_gadget_unregister_driver);
+
+
+static int msm72k_udc_runtime_suspend(struct device *dev)
+{
+	dev_dbg(dev, "pm_runtime: suspending...\n");
+	return 0;
+}
+
+static int msm72k_udc_runtime_resume(struct device *dev)
+{
+	dev_dbg(dev, "pm_runtime: resuming...\n");
+	return 0;
+}
+
+static int msm72k_udc_runtime_idle(struct device *dev)
+{
+	dev_dbg(dev, "pm_runtime: idling...\n");
+	return 0;
+}
+
+static struct dev_pm_ops msm72k_udc_dev_pm_ops = {
+	.runtime_suspend = msm72k_udc_runtime_suspend,
+	.runtime_resume = msm72k_udc_runtime_resume,
+	.runtime_idle = msm72k_udc_runtime_idle
+};
+
+static struct platform_driver usb_driver = {
+	.probe = msm72k_probe,
+	.driver = { .name = "msm_hsusb",
+		    .pm = &msm72k_udc_dev_pm_ops, },
+};
+
+static int __init init(void)
+{
+	return platform_driver_register(&usb_driver);
+}
+module_init(init);
+
+static void __exit cleanup(void)
+{
+	platform_driver_unregister(&usb_driver);
+}
+module_exit(cleanup);
+
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_AUTHOR("Mike Lockwood, Brian Swetland");
+MODULE_LICENSE("GPL");
diff --git a/drivers/usb/gadget/qcom_maemo.c b/drivers/usb/gadget/qcom_maemo.c
new file mode 100644
index 0000000..39686c4
--- /dev/null
+++ b/drivers/usb/gadget/qcom_maemo.c
@@ -0,0 +1,304 @@
+/*
+ * Qualcomm Maemo Composite driver
+ *
+ * Copyright (C) 2008 David Brownell
+ * Copyright (C) 2008 Nokia Corporation
+ * Copyright (C) 2009 Samsung Electronics
+ * Copyright (c) 2010, Code Aurora Forum. All rights reserved.
+ *
+ * This program from the Code Aurora Forum is free software; you can
+ * redistribute it and/or modify it under the GNU General Public License
+ * version 2 and only version 2 as published by the Free Software Foundation.
+ * The original work available from [git.kernel.org ] is subject to the
+ * notice below.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#include <linux/kernel.h>
+#include <linux/utsname.h>
+#include <linux/kdev_t.h>
+#include <linux/delay.h>
+
+
+#define DRIVER_DESC		"Qcom Maemo Composite Gadget"
+#define VENDOR_ID		0x05c6
+#define PRODUCT_ID		0x902E
+
+/*
+ * kbuild is not very cooperative with respect to linking separately
+ * compiled library objects into one module.  So for now we won't use
+ * separate compilation ... ensuring init/exit sections work to shrink
+ * the runtime footprint, and giving us at least some parts of what
+ * a "gcc --combine ... part1.c part2.c part3.c ... " build would.
+ */
+
+#include "composite.c"
+#include "usbstring.c"
+#include "config.c"
+#include "epautoconf.c"
+
+#define USB_ETH
+
+#define USB_ETH_RNDIS
+#ifdef USB_ETH_RNDIS
+#  include "f_rndis.c"
+#  include "rndis.c"
+#endif
+
+
+#include "u_serial.c"
+#include "f_serial.c"
+
+#include "u_ether.c"
+
+#undef DBG     /* u_ether.c has broken idea about macros */
+#undef VDBG    /* so clean up after it */
+#undef ERROR
+#undef INFO
+
+#include "f_mass_storage.c"
+#include "f_diag.c"
+#include "f_rmnet.c"
+
+/*-------------------------------------------------------------------------*/
+/* string IDs are assigned dynamically */
+
+#define STRING_MANUFACTURER_IDX         0
+#define STRING_PRODUCT_IDX              1
+#define STRING_SERIAL_IDX               2
+
+/* String Table */
+static struct usb_string strings_dev[] = {
+	/* These dummy values should be overridden by platform data */
+	[STRING_MANUFACTURER_IDX].s = "Qualcomm Incorporated",
+	[STRING_PRODUCT_IDX].s = "Usb composition",
+	[STRING_SERIAL_IDX].s = "0123456789ABCDEF",
+	{  }                    /* end of list */
+};
+
+static struct usb_gadget_strings stringtab_dev = {
+	.language       = 0x0409,       /* en-us */
+	.strings        = strings_dev,
+};
+
+static struct usb_gadget_strings *dev_strings[] = {
+	&stringtab_dev,
+	NULL,
+};
+
+static struct usb_device_descriptor device_desc = {
+	.bLength              = sizeof(device_desc),
+	.bDescriptorType      = USB_DT_DEVICE,
+	.bcdUSB               = __constant_cpu_to_le16(0x0200),
+	.bDeviceClass         = USB_CLASS_PER_INTERFACE,
+	.bDeviceSubClass      =      0,
+	.bDeviceProtocol      =      0,
+	.idVendor             = __constant_cpu_to_le16(VENDOR_ID),
+	.idProduct            = __constant_cpu_to_le16(PRODUCT_ID),
+	.bcdDevice            = __constant_cpu_to_le16(0xffff),
+	.bNumConfigurations   = 1,
+};
+
+static u8 hostaddr[ETH_ALEN];
+static struct usb_diag_ch *diag_ch;
+static struct usb_diag_platform_data usb_diag_pdata = {
+	.ch_name = DIAG_LEGACY,
+};
+
+/****************************** Configurations ******************************/
+static struct fsg_module_parameters mod_data = {
+	.stall = 0
+};
+FSG_MODULE_PARAMETERS(/* no prefix */, mod_data);
+
+static struct fsg_common *fsg_common;
+static int maemo_setup_config(struct usb_configuration *c,
+			const struct usb_ctrlrequest *ctrl);
+
+static int maemo_do_config(struct usb_configuration *c)
+{
+	int ret;
+
+	ret = rndis_bind_config(c, hostaddr);
+	if (ret < 0)
+		return ret;
+
+	ret = diag_function_add(c);
+	if (ret < 0)
+		return ret;
+
+	ret = gser_bind_config(c, 0);
+	if (ret < 0)
+		return ret;
+
+	ret = gser_bind_config(c, 1);
+	if (ret < 0)
+		return ret;
+
+	ret = rmnet_function_add(c);
+	if (ret < 0)
+		return ret;
+
+	ret = fsg_add(c->cdev, c, fsg_common);
+	if (ret < 0)
+		return ret;
+
+	return 0;
+}
+
+static struct usb_configuration maemo_config_driver = {
+	.label			= "Qcom Maemo Gadget",
+	.bind			= maemo_do_config,
+	.setup			= maemo_setup_config,
+	.bConfigurationValue	= 1,
+	.bMaxPower		= 0xFA,
+};
+static int maemo_setup_config(struct usb_configuration *c,
+		const struct usb_ctrlrequest *ctrl)
+{
+	int i;
+	int ret = -EOPNOTSUPP;
+
+	for (i = 0; i < maemo_config_driver.next_interface_id; i++) {
+		if (maemo_config_driver.interface[i]->setup) {
+			ret = maemo_config_driver.interface[i]->setup(
+				maemo_config_driver.interface[i], ctrl);
+			if (ret >= 0)
+				return ret;
+		}
+	}
+
+	return ret;
+}
+
+static int maemo_bind(struct usb_composite_dev *cdev)
+{
+	struct usb_gadget *gadget = cdev->gadget;
+	int status, gcnum;
+
+	/* set up diag channel */
+	diag_ch = diag_setup(&usb_diag_pdata);
+	if (IS_ERR(diag_ch))
+		return PTR_ERR(diag_ch);
+
+	/* set up network link layer */
+	status = gether_setup(cdev->gadget, hostaddr);
+	if (status < 0)
+		goto diag_clean;
+
+	/* set up serial link layer */
+	status = gserial_setup(cdev->gadget, 2);
+	if (status < 0)
+		goto fail0;
+
+	/* set up mass storage function */
+	fsg_common = fsg_common_from_params(0, cdev, &mod_data);
+	if (IS_ERR(fsg_common)) {
+		status = PTR_ERR(fsg_common);
+		goto fail1;
+	}
+
+	gcnum = usb_gadget_controller_number(gadget);
+	if (gcnum >= 0)
+		device_desc.bcdDevice = cpu_to_le16(0x0300 | gcnum);
+	else {
+		/* gadget zero is so simple (for now, no altsettings) that
+		 * it SHOULD NOT have problems with bulk-capable hardware.
+		 * so just warn about unrcognized controllers -- don't panic.
+		 *
+		 * things like configuration and altsetting numbering
+		 * can need hardware-specific attention though.
+		 */
+		WARNING(cdev, "controller '%s' not recognized\n",
+			gadget->name);
+		device_desc.bcdDevice = __constant_cpu_to_le16(0x9999);
+	}
+
+	/* Allocate string descriptor numbers ... note that string
+	 * contents can be overridden by the composite_dev glue.
+	*/
+
+	status = usb_string_id(cdev);
+	if (status < 0)
+		goto fail2;
+	strings_dev[STRING_MANUFACTURER_IDX].id = status;
+	device_desc.iManufacturer = status;
+
+	status = usb_string_id(cdev);
+	if (status < 0)
+		goto fail2;
+	strings_dev[STRING_PRODUCT_IDX].id = status;
+	device_desc.iProduct = status;
+
+	if (!usb_gadget_set_selfpowered(gadget))
+		maemo_config_driver.bmAttributes |= USB_CONFIG_ATT_SELFPOWER;
+
+	if (gadget->ops->wakeup)
+		maemo_config_driver.bmAttributes |= USB_CONFIG_ATT_WAKEUP;
+
+	/* register our first configuration */
+	status = usb_add_config(cdev, &maemo_config_driver);
+	if (status < 0)
+		goto fail2;
+
+	usb_gadget_set_selfpowered(gadget);
+	dev_info(&gadget->dev, DRIVER_DESC "\n");
+	fsg_common_put(fsg_common);
+	return 0;
+
+fail2:
+	fsg_common_put(fsg_common);
+fail1:
+	gserial_cleanup();
+fail0:
+	gether_cleanup();
+diag_clean:
+	diag_cleanup(diag_ch);
+
+	return status;
+}
+
+static int __exit maemo_unbind(struct usb_composite_dev *cdev)
+{
+	gserial_cleanup();
+	gether_cleanup();
+	diag_cleanup(diag_ch);
+	return 0;
+}
+
+static struct usb_composite_driver qcom_maemo_driver = {
+	.name		= "Qcom Maemo Gadget",
+	.dev		= &device_desc,
+	.strings	= dev_strings,
+	.bind		= maemo_bind,
+	.unbind		= __exit_p(maemo_unbind),
+};
+
+static int __init qcom_maemo_usb_init(void)
+{
+	return usb_composite_register(&qcom_maemo_driver);
+}
+module_init(qcom_maemo_usb_init);
+
+static void __exit qcom_maemo_usb_cleanup(void)
+{
+	usb_composite_unregister(&qcom_maemo_driver);
+}
+module_exit(qcom_maemo_usb_cleanup);
+
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL v2");
+MODULE_VERSION("1.0");
diff --git a/drivers/usb/gadget/storage_common.c b/drivers/usb/gadget/storage_common.c
index a872248..c3ccb72 100644
--- a/drivers/usb/gadget/storage_common.c
+++ b/drivers/usb/gadget/storage_common.c
@@ -262,8 +262,13 @@
 #define EP0_BUFSIZE	256
 #define DELAYED_STATUS	(EP0_BUFSIZE + 999)	/* An impossibly large value */
 
-/* Number of buffers we will use.  2 is enough for double-buffering */
-#define FSG_NUM_BUFFERS	2
+/* Number of buffers for CBW, DATA and CSW */
+#ifdef CONFIG_USB_CSW_HACK
+#define FSG_NUM_BUFFERS    4
+#else
+#define FSG_NUM_BUFFERS    2
+#endif
+
 
 /* Default size of buffer length. */
 #define FSG_BUFLEN	((u32)16384)
diff --git a/drivers/usb/gadget/u_bam.c b/drivers/usb/gadget/u_bam.c
new file mode 100644
index 0000000..a047cfc
--- /dev/null
+++ b/drivers/usb/gadget/u_bam.c
@@ -0,0 +1,812 @@
+/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/termios.h>
+#include <mach/msm_smd.h>
+#include <linux/netdevice.h>
+#include <mach/bam_dmux.h>
+#include <linux/debugfs.h>
+#include <linux/bitops.h>
+#include <linux/termios.h>
+
+#include "u_rmnet.h"
+
+#define BAM_N_PORTS	1
+
+static struct workqueue_struct *gbam_wq;
+static int n_bam_ports;
+static unsigned bam_ch_ids[] = { 8 };
+
+#define TX_PKT_DROP_THRESHOLD			1000
+#define RX_PKT_FLOW_CTRL_EN_THRESHOLD		1000
+#define RX_PKT_FLOW_CTRL_DISABLE		500
+#define RX_PKT_FLOW_CTRL_SUPPORT		1
+
+#define BAM_MUX_HDR				8
+
+#define RX_Q_SIZE				16
+#define TX_Q_SIZE				200
+#define RX_REQ_SIZE				(2048 - BAM_MUX_HDR)
+
+unsigned int tx_pkt_drop_thld = TX_PKT_DROP_THRESHOLD;
+module_param(tx_pkt_drop_thld, uint, S_IRUGO | S_IWUSR);
+
+unsigned int rx_fctrl_en_thld = RX_PKT_FLOW_CTRL_EN_THRESHOLD;
+module_param(rx_fctrl_en_thld, uint, S_IRUGO | S_IWUSR);
+
+unsigned int rx_fctrl_support = RX_PKT_FLOW_CTRL_SUPPORT;
+module_param(rx_fctrl_support, uint, S_IRUGO | S_IWUSR);
+
+unsigned int rx_fctrl_dis_thld = RX_PKT_FLOW_CTRL_DISABLE;
+module_param(rx_fctrl_dis_thld, uint, S_IRUGO | S_IWUSR);
+
+unsigned int tx_q_size = TX_Q_SIZE;
+module_param(tx_q_size, uint, S_IRUGO | S_IWUSR);
+
+unsigned int rx_q_size = RX_Q_SIZE;
+module_param(rx_q_size, uint, S_IRUGO | S_IWUSR);
+
+unsigned int rx_req_size = RX_REQ_SIZE;
+module_param(rx_req_size, uint, S_IRUGO | S_IWUSR);
+
+struct bam_ch_info {
+	atomic_t		opened;
+	unsigned		id;
+
+	struct list_head        tx_idle;
+	struct sk_buff_head	tx_skb_q;
+
+	struct list_head        rx_idle;
+	struct sk_buff_head	rx_skb_q;
+
+	struct gbam_port	*port;
+	struct work_struct	write_tobam_w;
+
+	/* stats */
+	unsigned int		pending_with_bam;
+	unsigned int		tohost_drp_cnt;
+	unsigned int		tomodem_drp_cnt;
+	unsigned int		tx_len;
+	unsigned int		rx_len;
+	unsigned long		to_modem;
+	unsigned long		to_host;
+};
+
+struct gbam_port {
+	unsigned		port_num;
+	spinlock_t		port_lock;
+
+	struct grmnet		*port_usb;
+
+	struct bam_ch_info	data_ch;
+
+	struct work_struct	connect_w;
+};
+
+static struct bam_portmaster {
+	struct gbam_port *port;
+} bam_ports[N_PORTS];
+
+static void gbam_start_rx(struct gbam_port *port);
+
+/*---------------misc functions---------------- */
+static void gbam_free_requests(struct usb_ep *ep, struct list_head *head)
+{
+	struct usb_request	*req;
+
+	while (!list_empty(head)) {
+		req = list_entry(head->next, struct usb_request, list);
+		list_del(&req->list);
+		usb_ep_free_request(ep, req);
+	}
+}
+
+static int gbam_alloc_requests(struct usb_ep *ep, struct list_head *head,
+		int num,
+		void (*cb)(struct usb_ep *ep, struct usb_request *),
+		gfp_t flags)
+{
+	int i;
+	struct usb_request *req;
+
+	pr_debug("%s: ep:%p head:%p num:%d cb:%p", __func__,
+			ep, head, num, cb);
+
+	for (i = 0; i < num; i++) {
+		req = usb_ep_alloc_request(ep, flags);
+		if (!req) {
+			pr_debug("%s: req allocated:%d\n", __func__, i);
+			return list_empty(head) ? -ENOMEM : 0;
+		}
+		req->complete = cb;
+		list_add(&req->list, head);
+	}
+
+	return 0;
+}
+/*--------------------------------------------- */
+
+/*------------data_path----------------------------*/
+static void gbam_write_data_tohost(struct gbam_port *port)
+{
+	unsigned long			flags;
+	struct bam_ch_info		*d = &port->data_ch;
+	struct sk_buff			*skb;
+	int				ret;
+	struct usb_request		*req;
+	struct usb_ep			*ep;
+
+	spin_lock_irqsave(&port->port_lock, flags);
+	if (!port->port_usb) {
+		spin_unlock_irqrestore(&port->port_lock, flags);
+		return;
+	}
+
+	ep = port->port_usb->in;
+
+	while (!list_empty(&d->tx_idle)) {
+		skb = __skb_dequeue(&d->tx_skb_q);
+		if (!skb) {
+			spin_unlock_irqrestore(&port->port_lock, flags);
+			return;
+		}
+		req = list_first_entry(&d->tx_idle,
+				struct usb_request,
+				list);
+		req->context = skb;
+		req->buf = skb->data;
+		req->length = skb->len;
+
+		list_del(&req->list);
+
+		spin_unlock(&port->port_lock);
+		ret = usb_ep_queue(ep, req, GFP_ATOMIC);
+		spin_lock(&port->port_lock);
+		if (ret) {
+			pr_err("%s: usb epIn failed\n", __func__);
+			list_add(&req->list, &d->tx_idle);
+			dev_kfree_skb_any(skb);
+			break;
+		}
+		d->to_host++;
+	}
+	spin_unlock_irqrestore(&port->port_lock, flags);
+}
+
+void gbam_data_recv_cb(void *p, struct sk_buff *skb)
+{
+	struct gbam_port	*port = p;
+	struct bam_ch_info	*d = &port->data_ch;
+	unsigned long		flags;
+
+	if (!skb)
+		return;
+
+	pr_debug("%s: p:%p#%d d:%p skb_len:%d\n", __func__,
+			port, port->port_num, d, skb->len);
+
+	spin_lock_irqsave(&port->port_lock, flags);
+	if (!port->port_usb) {
+		spin_unlock_irqrestore(&port->port_lock, flags);
+		dev_kfree_skb_any(skb);
+		return;
+	}
+
+	if (d->tx_skb_q.qlen > tx_pkt_drop_thld) {
+		d->tohost_drp_cnt++;
+		if (printk_ratelimit())
+			pr_err("%s: tx pkt dropped: tx_drop_cnt:%u\n",
+					__func__, d->tohost_drp_cnt);
+		spin_unlock_irqrestore(&port->port_lock, flags);
+		dev_kfree_skb_any(skb);
+		return;
+	}
+
+	__skb_queue_tail(&d->tx_skb_q, skb);
+	spin_unlock_irqrestore(&port->port_lock, flags);
+
+	gbam_write_data_tohost(port);
+}
+
+void gbam_data_write_done(void *p, struct sk_buff *skb)
+{
+	struct gbam_port	*port = p;
+	struct bam_ch_info	*d = &port->data_ch;
+	unsigned long		flags;
+
+	if (!skb)
+		return;
+
+	dev_kfree_skb_any(skb);
+
+	spin_lock_irqsave(&port->port_lock, flags);
+
+	d->pending_with_bam--;
+
+	pr_debug("%s: port:%p d:%p tom:%lu pbam:%u, pno:%d\n", __func__,
+			port, d, d->to_modem,
+			d->pending_with_bam, port->port_num);
+
+	if (rx_fctrl_support &&
+			d->pending_with_bam >= rx_fctrl_dis_thld) {
+
+		spin_unlock_irqrestore(&port->port_lock, flags);
+		return;
+	}
+	spin_unlock_irqrestore(&port->port_lock, flags);
+
+	gbam_start_rx(port);
+}
+
+static void gbam_data_write_tobam(struct work_struct *w)
+{
+	struct gbam_port	*port;
+	struct bam_ch_info	*d;
+	struct sk_buff		*skb;
+	unsigned long		flags;
+	int			ret;
+
+	d = container_of(w, struct bam_ch_info, write_tobam_w);
+	port = d->port;
+
+	spin_lock_irqsave(&port->port_lock, flags);
+	if (!port->port_usb) {
+		spin_unlock_irqrestore(&port->port_lock, flags);
+		return;
+	}
+
+	while ((skb = __skb_dequeue(&d->rx_skb_q))) {
+		d->pending_with_bam++;
+		d->to_modem++;
+
+		pr_debug("%s: port:%p d:%p tom:%lu pbam:%u pno:%d\n", __func__,
+				port, d, d->to_modem, d->pending_with_bam,
+				port->port_num);
+
+		spin_unlock_irqrestore(&port->port_lock, flags);
+		ret = msm_bam_dmux_write(d->id, skb);
+		spin_lock_irqsave(&port->port_lock, flags);
+		if (ret) {
+			pr_debug("%s: write error:%d\n", __func__, ret);
+			d->pending_with_bam--;
+			d->to_modem--;
+			d->tomodem_drp_cnt++;
+			dev_kfree_skb_any(skb);
+			break;
+		}
+	}
+	spin_unlock_irqrestore(&port->port_lock, flags);
+}
+/*-------------------------------------------------------------*/
+
+static void gbam_epin_complete(struct usb_ep *ep, struct usb_request *req)
+{
+	struct gbam_port	*port = ep->driver_data;
+	struct bam_ch_info	*d;
+	struct sk_buff		*skb = req->context;
+	int			status = req->status;
+
+	switch (status) {
+	case 0:
+		/* successful completion */
+	case -ECONNRESET:
+	case -ESHUTDOWN:
+		/* connection gone */
+		break;
+	default:
+		pr_err("%s: data tx ep error %d\n",
+				__func__, status);
+		break;
+	}
+
+	dev_kfree_skb_any(skb);
+
+	if (!port)
+		return;
+
+	spin_lock(&port->port_lock);
+	d = &port->data_ch;
+	list_add_tail(&req->list, &d->tx_idle);
+	spin_unlock(&port->port_lock);
+
+	gbam_write_data_tohost(port);
+}
+
+static void
+gbam_epout_complete(struct usb_ep *ep, struct usb_request *req)
+{
+	struct gbam_port	*port = ep->driver_data;
+	struct bam_ch_info	*d = &port->data_ch;
+	struct sk_buff		*skb = req->context;
+	int			status = req->status;
+	int			queue = 0;
+
+	switch (status) {
+	case 0:
+		skb_put(skb, req->actual);
+		queue = 1;
+		break;
+	case -ECONNRESET:
+	case -ESHUTDOWN:
+		/* cable disconnection */
+		dev_kfree_skb_any(skb);
+		req->buf = 0;
+		usb_ep_free_request(ep, req);
+		return;
+	default:
+		if (printk_ratelimit())
+			pr_err("%s: %s response error %d, %d/%d\n",
+				__func__, ep->name, status,
+				req->actual, req->length);
+		dev_kfree_skb_any(skb);
+		break;
+	}
+
+	spin_lock(&port->port_lock);
+	if (queue) {
+		__skb_queue_tail(&d->rx_skb_q, skb);
+		queue_work(gbam_wq, &d->write_tobam_w);
+	}
+
+	/* TODO: Handle flow control gracefully by having
+	 * having call back mechanism from bam driver
+	 */
+	if (rx_fctrl_support &&
+		d->pending_with_bam >= rx_fctrl_en_thld) {
+
+		list_add_tail(&req->list, &d->rx_idle);
+		spin_unlock(&port->port_lock);
+		return;
+	}
+	spin_unlock(&port->port_lock);
+
+	skb = alloc_skb(rx_req_size + BAM_MUX_HDR, GFP_ATOMIC);
+	if (!skb) {
+		spin_lock(&port->port_lock);
+		list_add_tail(&req->list, &d->rx_idle);
+		spin_unlock(&port->port_lock);
+		return;
+	}
+	skb_reserve(skb, BAM_MUX_HDR);
+
+	req->buf = skb->data;
+	req->length = rx_req_size;
+	req->context = skb;
+
+	status = usb_ep_queue(ep, req, GFP_ATOMIC);
+	if (status) {
+		dev_kfree_skb_any(skb);
+
+		if (printk_ratelimit())
+			pr_err("%s: data rx enqueue err %d\n",
+					__func__, status);
+
+		spin_lock(&port->port_lock);
+		list_add_tail(&req->list, &d->rx_idle);
+		spin_unlock(&port->port_lock);
+	}
+}
+
+static void gbam_start_rx(struct gbam_port *port)
+{
+	struct usb_request		*req;
+	struct bam_ch_info		*d;
+	struct usb_ep			*ep;
+	unsigned long			flags;
+	int				ret;
+	struct sk_buff			*skb;
+
+	spin_lock_irqsave(&port->port_lock, flags);
+	if (!port->port_usb) {
+		spin_unlock_irqrestore(&port->port_lock, flags);
+		return;
+	}
+
+	d = &port->data_ch;
+	ep = port->port_usb->out;
+
+	while (port->port_usb && !list_empty(&d->rx_idle)) {
+		req = list_first_entry(&d->rx_idle, struct usb_request, list);
+
+		skb = alloc_skb(rx_req_size + BAM_MUX_HDR, GFP_ATOMIC);
+		if (!skb)
+			break;
+		skb_reserve(skb, BAM_MUX_HDR);
+
+		list_del(&req->list);
+		req->buf = skb->data;
+		req->length = rx_req_size;
+		req->context = skb;
+
+		spin_unlock_irqrestore(&port->port_lock, flags);
+		ret = usb_ep_queue(ep, req, GFP_ATOMIC);
+		spin_lock_irqsave(&port->port_lock, flags);
+		if (ret) {
+			dev_kfree_skb_any(skb);
+
+			if (printk_ratelimit())
+				pr_err("%s: rx queue failed\n", __func__);
+
+			if (port->port_usb)
+				list_add(&req->list, &d->rx_idle);
+			else
+				usb_ep_free_request(ep, req);
+			break;
+		}
+	}
+	spin_unlock_irqrestore(&port->port_lock, flags);
+}
+
+static void gbam_start_io(struct gbam_port *port)
+{
+	unsigned long		flags;
+	struct usb_ep		*ep;
+	int			ret;
+	struct bam_ch_info	*d;
+
+	pr_debug("%s: port:%p\n", __func__, port);
+
+	spin_lock_irqsave(&port->port_lock, flags);
+	if (!port->port_usb) {
+		spin_unlock_irqrestore(&port->port_lock, flags);
+		return;
+	}
+
+	d = &port->data_ch;
+	ep = port->port_usb->out;
+	ret = gbam_alloc_requests(ep, &d->rx_idle, rx_q_size,
+			gbam_epout_complete, GFP_ATOMIC);
+	if (ret) {
+		pr_err("%s: rx req allocation failed\n", __func__);
+		return;
+	}
+
+	ep = port->port_usb->in;
+	ret = gbam_alloc_requests(ep, &d->tx_idle, tx_q_size,
+			gbam_epin_complete, GFP_ATOMIC);
+	if (ret) {
+		pr_err("%s: tx req allocation failed\n", __func__);
+		gbam_free_requests(ep, &d->rx_idle);
+		return;
+	}
+
+	spin_unlock_irqrestore(&port->port_lock, flags);
+
+	/* queue out requests */
+	gbam_start_rx(port);
+}
+
+static void gbam_connect_work(struct work_struct *w)
+{
+	struct gbam_port *port = container_of(w, struct gbam_port, connect_w);
+	struct bam_ch_info *d = &port->data_ch;
+	int ret;
+
+	ret = msm_bam_dmux_open(d->id, port,
+				gbam_data_recv_cb,
+				gbam_data_write_done);
+	if (ret) {
+		pr_err("%s: unable open bam ch:%d err:%d\n",
+				__func__, d->id, ret);
+		return;
+	}
+	atomic_set(&d->opened, 1);
+
+	gbam_start_io(port);
+
+	pr_debug("%s: done\n", __func__);
+}
+
+static void gbam_port_free(int portno)
+{
+	struct gbam_port *port = bam_ports[portno].port;
+
+	if (!port)
+		kfree(port);
+}
+
+static int gbam_port_alloc(int portno)
+{
+	struct gbam_port	*port;
+	struct bam_ch_info	*d;
+
+	port = kzalloc(sizeof(struct gbam_port), GFP_KERNEL);
+	if (!port)
+		return -ENOMEM;
+
+	port->port_num = portno;
+
+	/* port initialization */
+	spin_lock_init(&port->port_lock);
+	INIT_WORK(&port->connect_w, gbam_connect_work);
+
+	/* data ch */
+	d = &port->data_ch;
+	d->port = port;
+	INIT_LIST_HEAD(&d->tx_idle);
+	INIT_LIST_HEAD(&d->rx_idle);
+	INIT_WORK(&d->write_tobam_w, gbam_data_write_tobam);
+	skb_queue_head_init(&d->tx_skb_q);
+	skb_queue_head_init(&d->rx_skb_q);
+	d->id = bam_ch_ids[portno];
+
+	bam_ports[portno].port = port;
+
+	pr_debug("%s: port:%p portno:%d\n", __func__, port, portno);
+
+	return 0;
+}
+
+#if defined(CONFIG_DEBUG_FS)
+#define DEBUG_BUF_SIZE	1024
+static ssize_t gbam_read_stats(struct file *file, char __user *ubuf,
+		size_t count, loff_t *ppos)
+{
+	struct gbam_port	*port;
+	struct bam_ch_info	*d;
+	char			*buf;
+	unsigned long		flags;
+	int			ret;
+	int			i;
+	int			temp = 0;
+
+	buf = kzalloc(sizeof(char) * DEBUG_BUF_SIZE, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	for (i = 0; i < n_bam_ports; i++) {
+		port = bam_ports[i].port;
+		if (!port)
+			continue;
+		spin_lock_irqsave(&port->port_lock, flags);
+
+		d = &port->data_ch;
+
+		temp += scnprintf(buf + temp, DEBUG_BUF_SIZE - temp,
+				"#PORT:%d port:%p data_ch:%p#\n"
+				"dpkts_to_usbhost: %lu\n"
+				"dpkts_to_modem:  %lu\n"
+				"dpkts_pwith_bam: %u\n"
+				"to_usbhost_dcnt:  %u\n"
+				"tomodem__dcnt:  %u\n"
+				"tx_buf_len:	 %u\n"
+				"data_ch_opened: %d\n",
+				i, port, &port->data_ch,
+				d->to_host, d->to_modem,
+				d->pending_with_bam,
+				d->tohost_drp_cnt, d->tomodem_drp_cnt,
+				d->tx_skb_q.qlen, atomic_read(&d->opened));
+
+		spin_unlock_irqrestore(&port->port_lock, flags);
+	}
+
+	ret = simple_read_from_buffer(ubuf, count, ppos, buf, temp);
+
+	kfree(buf);
+
+	return ret;
+}
+
+static ssize_t gbam_reset_stats(struct file *file, const char __user *buf,
+				 size_t count, loff_t *ppos)
+{
+	struct gbam_port	*port;
+	struct bam_ch_info	*d;
+	int			i;
+	unsigned long		flags;
+
+	for (i = 0; i < n_bam_ports; i++) {
+		port = bam_ports[i].port;
+		if (!port)
+			continue;
+
+		spin_lock_irqsave(&port->port_lock, flags);
+
+		d = &port->data_ch;
+
+		d->to_host = 0;
+		d->to_modem = 0;
+		d->pending_with_bam = 0;
+		d->tohost_drp_cnt = 0;
+		d->tomodem_drp_cnt = 0;
+
+		spin_unlock_irqrestore(&port->port_lock, flags);
+	}
+	return count;
+}
+
+const struct file_operations gbam_stats_ops = {
+	.read = gbam_read_stats,
+	.write = gbam_reset_stats,
+};
+
+static void gbam_debugfs_init(void)
+{
+	struct dentry *dent;
+	struct dentry *dfile;
+
+	dent = debugfs_create_dir("usb_rmnet", 0);
+	if (IS_ERR(dent))
+		return;
+
+	/* TODO: Implement cleanup function to remove created file */
+	dfile = debugfs_create_file("status", 0444, dent, 0, &gbam_stats_ops);
+	if (!dfile || IS_ERR(dfile))
+		debugfs_remove(dent);
+}
+#else
+static void gam_debugfs_init(void) { }
+#endif
+
+static void gbam_free_buffers(struct gbam_port *port)
+{
+	struct sk_buff		*skb;
+	unsigned long		flags;
+	struct bam_ch_info	*d;
+
+	spin_lock_irqsave(&port->port_lock, flags);
+
+	if (!port || !port->port_usb)
+		goto free_buf_out;
+
+	d = &port->data_ch;
+
+	gbam_free_requests(port->port_usb->in, &d->tx_idle);
+	gbam_free_requests(port->port_usb->out, &d->rx_idle);
+
+	while ((skb = __skb_dequeue(&d->tx_skb_q)))
+		dev_kfree_skb_any(skb);
+
+	while ((skb = __skb_dequeue(&d->rx_skb_q)))
+		dev_kfree_skb_any(skb);
+
+free_buf_out:
+	spin_unlock_irqrestore(&port->port_lock, flags);
+}
+
+void gbam_disconnect(struct grmnet *gr, u8 port_num)
+{
+	struct gbam_port	*port;
+	unsigned long		flags;
+	struct bam_ch_info	*d;
+
+	pr_debug("%s: grmnet:%p port#%d\n", __func__, gr, port_num);
+
+	if (port_num >= n_bam_ports) {
+		pr_err("%s: invalid portno#%d\n", __func__, port_num);
+		return;
+	}
+
+	if (!gr) {
+		pr_err("%s: grmnet port is null\n", __func__);
+		return;
+	}
+
+	port = bam_ports[port_num].port;
+	d = &port->data_ch;
+
+	gbam_free_buffers(port);
+
+	spin_lock_irqsave(&port->port_lock, flags);
+	port->port_usb = 0;
+	spin_unlock_irqrestore(&port->port_lock, flags);
+
+	/* disable endpoints */
+	usb_ep_disable(gr->out);
+	usb_ep_disable(gr->in);
+
+	if (atomic_read(&d->opened))
+		msm_bam_dmux_close(d->id);
+
+	atomic_set(&d->opened, 0);
+}
+
+int gbam_connect(struct grmnet *gr, u8 port_num)
+{
+	struct gbam_port	*port;
+	struct bam_ch_info	*d;
+	int			ret;
+	unsigned long		flags;
+
+	pr_debug("%s: grmnet:%p port#%d\n", __func__, gr, port_num);
+
+	if (port_num >= n_bam_ports) {
+		pr_err("%s: invalid portno#%d\n", __func__, port_num);
+		return -ENODEV;
+	}
+
+	if (!gr) {
+		pr_err("%s: grmnet port is null\n", __func__);
+		return -ENODEV;
+	}
+
+	port = bam_ports[port_num].port;
+	d = &port->data_ch;
+
+	ret = usb_ep_enable(gr->in, gr->in_desc);
+	if (ret) {
+		pr_err("%s: usb_ep_enable failed eptype:IN ep:%p",
+				__func__, gr->in);
+		return ret;
+	}
+	gr->in->driver_data = port;
+
+	ret = usb_ep_enable(gr->out, gr->out_desc);
+	if (ret) {
+		pr_err("%s: usb_ep_enable failed eptype:OUT ep:%p",
+				__func__, gr->out);
+		gr->in->driver_data = 0;
+		return ret;
+	}
+	gr->out->driver_data = port;
+
+	spin_lock_irqsave(&port->port_lock, flags);
+	port->port_usb = gr;
+
+	d->to_host = 0;
+	d->to_modem = 0;
+	d->pending_with_bam = 0;
+	d->tohost_drp_cnt = 0;
+	d->tomodem_drp_cnt = 0;
+	spin_unlock_irqrestore(&port->port_lock, flags);
+
+
+	queue_work(gbam_wq, &port->connect_w);
+
+	return 0;
+}
+
+int gbam_setup(unsigned int count)
+{
+	int	i;
+	int	ret;
+
+	pr_debug("%s: requested ports:%d\n", __func__, count);
+
+	if (!count || count > BAM_N_PORTS) {
+		pr_err("%s: Invalid num of ports count:%d\n",
+				__func__, count);
+		return -EINVAL;
+	}
+
+	gbam_wq = alloc_workqueue("k_gbam", WQ_UNBOUND | WQ_MEM_RECLAIM, 1);
+	if (!gbam_wq) {
+		pr_err("%s: Unable to create workqueue gbam_wq\n",
+				__func__);
+		return -ENOMEM;
+	}
+
+	for (i = 0; i < count; i++) {
+		ret = gbam_port_alloc(i);
+		if (ret) {
+			pr_err("%s: Unable to alloc port:%d\n", __func__, i);
+			goto free_bam_ports;
+		}
+		n_bam_ports++;
+	}
+
+	gbam_debugfs_init();
+
+	return 0;
+free_bam_ports:
+	for (i = 0; i < n_bam_ports; i++)
+		gbam_port_free(i);
+
+	destroy_workqueue(gbam_wq);
+
+	return ret;
+}
diff --git a/drivers/usb/gadget/u_rmnet.h b/drivers/usb/gadget/u_rmnet.h
new file mode 100644
index 0000000..aeaddee
--- /dev/null
+++ b/drivers/usb/gadget/u_rmnet.h
@@ -0,0 +1,61 @@
+/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __U_RMNET_H
+#define __U_RMNET_H
+
+#include <linux/usb/composite.h>
+#include <linux/usb/cdc.h>
+#include <linux/wait.h>
+#include <linux/workqueue.h>
+
+struct rmnet_ctrl_pkt {
+	void			*buf;
+	int			len;
+	struct list_head	list;
+};
+
+struct grmnet {
+	struct usb_function		func;
+
+	struct usb_ep			*in;
+	struct usb_ep			*out;
+	struct usb_endpoint_descriptor	*in_desc;
+	struct usb_endpoint_descriptor	*out_desc;
+
+	/* to usb host, aka laptop, windows pc etc. Will
+	 * be filled by usb driver of rmnet functionality
+	 */
+	int (*send_cpkt_response)(struct grmnet *g,
+				struct rmnet_ctrl_pkt *pkt);
+
+	/* to modem, and to be filled by driver implementing
+	 * control function
+	 */
+	int (*send_cpkt_request)(struct grmnet *g,
+				u8 port_num,
+				struct rmnet_ctrl_pkt *pkt);
+
+	void (*send_cbits_tomodem)(struct grmnet *g,
+				u8 port_num,
+				int cbits);
+};
+
+int gbam_setup(unsigned int count);
+int gbam_connect(struct grmnet *, u8 port_num);
+void gbam_disconnect(struct grmnet *, u8 port_num);
+
+int gsmd_ctrl_connect(struct grmnet *gr, int port_num);
+void gsmd_ctrl_disconnect(struct grmnet *gr, u8 port_num);
+int gsmd_ctrl_setup(unsigned int count);
+
+#endif /* __U_RMNET_H*/
diff --git a/drivers/usb/gadget/u_rmnet_ctrl_smd.c b/drivers/usb/gadget/u_rmnet_ctrl_smd.c
new file mode 100644
index 0000000..4449d9e
--- /dev/null
+++ b/drivers/usb/gadget/u_rmnet_ctrl_smd.c
@@ -0,0 +1,652 @@
+/*
+ * Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/termios.h>
+#include <mach/msm_smd.h>
+#include <linux/debugfs.h>
+#include <linux/bitops.h>
+#include <linux/termios.h>
+
+#include "u_rmnet.h"
+
+#define NR_PORTS	1
+static int n_ports;
+static char *rmnet_ctrl_names[] = { "DATA40_CNTL" };
+static struct workqueue_struct *grmnet_ctrl_wq;
+
+#define SMD_CH_MAX_LEN	20
+#define CH_OPENED	0
+#define CH_READY	1
+struct smd_ch_info {
+	struct smd_channel	*ch;
+	char			*name;
+	unsigned long		flags;
+	wait_queue_head_t	wait;
+	unsigned		dtr;
+
+	struct list_head	tx_q;
+	unsigned long		tx_len;
+
+	struct work_struct	read_w;
+	struct work_struct	write_w;
+
+	struct rmnet_ctrl_port	*port;
+
+	int			cbits_tomodem;
+	/* stats */
+	unsigned long		to_modem;
+	unsigned long		to_host;
+};
+
+struct rmnet_ctrl_port {
+	struct smd_ch_info	ctrl_ch;
+	unsigned int		port_num;
+	struct grmnet		*port_usb;
+
+	spinlock_t		port_lock;
+	struct work_struct	connect_w;
+};
+
+static struct rmnet_ctrl_ports {
+	struct rmnet_ctrl_port *port;
+	struct platform_driver pdrv;
+} ports[NR_PORTS];
+
+
+/*---------------misc functions---------------- */
+
+static struct rmnet_ctrl_pkt *rmnet_alloc_ctrl_pkt(unsigned len, gfp_t flags)
+{
+	struct rmnet_ctrl_pkt *pkt;
+
+	pkt = kzalloc(sizeof(struct rmnet_ctrl_pkt), flags);
+	if (!pkt)
+		return ERR_PTR(-ENOMEM);
+
+	pkt->buf = kmalloc(len, flags);
+	if (!pkt->buf) {
+		kfree(pkt);
+		return ERR_PTR(-ENOMEM);
+	}
+	pkt->len = len;
+
+	return pkt;
+}
+
+static void rmnet_ctrl_pkt_free(struct rmnet_ctrl_pkt *pkt)
+{
+	kfree(pkt->buf);
+	kfree(pkt);
+}
+
+/*--------------------------------------------- */
+
+/*---------------control/smd channel functions---------------- */
+
+static void grmnet_ctrl_smd_read_w(struct work_struct *w)
+{
+	struct smd_ch_info *c = container_of(w, struct smd_ch_info, read_w);
+	struct rmnet_ctrl_port *port = c->port;
+	int sz;
+	struct rmnet_ctrl_pkt *cpkt;
+	unsigned long flags;
+
+	while (1) {
+		sz = smd_cur_packet_size(c->ch);
+		if (sz == 0)
+			break;
+
+		if (smd_read_avail(c->ch) < sz)
+			break;
+
+		cpkt = rmnet_alloc_ctrl_pkt(sz, GFP_KERNEL);
+		if (IS_ERR(cpkt)) {
+			pr_err("%s: unable to allocate rmnet control pkt\n",
+					__func__);
+			return;
+		}
+		cpkt->len = smd_read(c->ch, cpkt->buf, sz);
+
+		/* send it to USB here */
+		spin_lock_irqsave(&port->port_lock, flags);
+		if (port->port_usb && port->port_usb->send_cpkt_response) {
+			port->port_usb->send_cpkt_response(
+							port->port_usb,
+							cpkt);
+			c->to_host++;
+		}
+		spin_unlock_irqrestore(&port->port_lock, flags);
+	}
+}
+
+static void grmnet_ctrl_smd_write_w(struct work_struct *w)
+{
+	struct smd_ch_info *c = container_of(w, struct smd_ch_info, write_w);
+	struct rmnet_ctrl_port *port = c->port;
+	unsigned long flags;
+	struct rmnet_ctrl_pkt *cpkt;
+	int ret;
+
+	spin_lock_irqsave(&port->port_lock, flags);
+	while (1) {
+		if (list_empty(&c->tx_q))
+			break;
+
+		cpkt = list_first_entry(&c->tx_q, struct rmnet_ctrl_pkt, list);
+
+		if (smd_write_avail(c->ch) < cpkt->len)
+			break;
+
+		list_del(&cpkt->list);
+		spin_unlock_irqrestore(&port->port_lock, flags);
+		ret = smd_write(c->ch, cpkt->buf, cpkt->len);
+		spin_lock_irqsave(&port->port_lock, flags);
+		if (ret != cpkt->len) {
+			pr_err("%s: smd_write failed err:%d\n",
+					__func__, ret);
+			rmnet_ctrl_pkt_free(cpkt);
+			break;
+		}
+		rmnet_ctrl_pkt_free(cpkt);
+		c->to_modem++;
+	}
+	spin_unlock_irqrestore(&port->port_lock, flags);
+}
+
+static int
+grmnet_ctrl_smd_send_cpkt_tomodem(struct grmnet *gr, u8 portno,
+			struct rmnet_ctrl_pkt *cpkt)
+{
+	unsigned long		flags;
+	struct rmnet_ctrl_port	*port;
+	struct smd_ch_info	*c;
+
+	if (portno >= n_ports) {
+		pr_err("%s: Invalid portno#%d\n", __func__, portno);
+		return -ENODEV;
+	}
+
+	if (!gr) {
+		pr_err("%s: grmnet is null\n", __func__);
+		return -ENODEV;
+	}
+
+	port = ports[portno].port;
+
+	spin_lock_irqsave(&port->port_lock, flags);
+	c = &port->ctrl_ch;
+
+	/* drop cpkt if ch is not open */
+	if (!test_bit(CH_OPENED, &c->flags)) {
+		rmnet_ctrl_pkt_free(cpkt);
+		spin_unlock_irqrestore(&port->port_lock, flags);
+		return 0;
+	}
+
+	list_add_tail(&cpkt->list, &c->tx_q);
+	queue_work(grmnet_ctrl_wq, &c->write_w);
+	spin_unlock_irqrestore(&port->port_lock, flags);
+
+	return 0;
+}
+
+#define ACM_CTRL_DTR		0x01
+static void
+gsmd_ctrl_send_cbits_tomodem(struct grmnet *gr, u8 portno, int cbits)
+{
+	struct rmnet_ctrl_port	*port;
+	struct smd_ch_info	*c;
+	int			set_bits = 0;
+	int			clear_bits = 0;
+	int			temp = 0;
+
+	if (portno >= n_ports) {
+		pr_err("%s: Invalid portno#%d\n", __func__, portno);
+		return;
+	}
+
+	if (!gr) {
+		pr_err("%s: grmnet is null\n", __func__);
+		return;
+	}
+
+	port = ports[portno].port;
+	cbits = cbits & ACM_CTRL_DTR;
+	c = &port->ctrl_ch;
+
+	/* host driver will only send DTR, but to have generic
+	 * set and clear bit implementation using two separate
+	 * checks
+	 */
+	if (cbits & ACM_CTRL_DTR)
+		set_bits |= TIOCM_DTR;
+	else
+		clear_bits |= TIOCM_DTR;
+
+	temp |= set_bits;
+	temp &= ~clear_bits;
+
+	if (temp == c->cbits_tomodem)
+		return;
+
+	c->cbits_tomodem = temp;
+
+	if (!test_bit(CH_OPENED, &c->flags))
+		return;
+
+	pr_debug("%s: ctrl_tomodem:%d ctrl_bits:%d setbits:%d clearbits:%d\n",
+			__func__, temp, cbits, set_bits, clear_bits);
+
+	smd_tiocmset(c->ch, set_bits, clear_bits);
+}
+
+static char *get_smd_event(unsigned event)
+{
+	switch (event) {
+	case SMD_EVENT_DATA:
+		return "DATA";
+	case SMD_EVENT_OPEN:
+		return "OPEN";
+	case SMD_EVENT_CLOSE:
+		return "CLOSE";
+	}
+
+	return "UNDEFINED";
+}
+
+static void grmnet_ctrl_smd_notify(void *p, unsigned event)
+{
+	struct rmnet_ctrl_port	*port = p;
+	struct smd_ch_info	*c = &port->ctrl_ch;
+
+	pr_debug("%s: EVENT_(%s)\n", __func__, get_smd_event(event));
+
+	switch (event) {
+	case SMD_EVENT_DATA:
+		if (smd_read_avail(c->ch))
+			queue_work(grmnet_ctrl_wq, &c->read_w);
+		if (smd_write_avail(c->ch))
+			queue_work(grmnet_ctrl_wq, &c->write_w);
+		break;
+	case SMD_EVENT_OPEN:
+		set_bit(CH_OPENED, &c->flags);
+		wake_up(&c->wait);
+		break;
+	case SMD_EVENT_CLOSE:
+		clear_bit(CH_OPENED, &c->flags);
+		break;
+	}
+}
+/*------------------------------------------------------------ */
+
+static void grmnet_ctrl_smd_connect_w(struct work_struct *w)
+{
+	struct rmnet_ctrl_port *port =
+			container_of(w, struct rmnet_ctrl_port, connect_w);
+	struct smd_ch_info *c = &port->ctrl_ch;
+	unsigned long flags;
+	int ret;
+
+	pr_debug("%s:\n", __func__);
+
+	if (!test_bit(CH_READY, &c->flags))
+		return;
+
+	ret = smd_open(c->name, &c->ch, port, grmnet_ctrl_smd_notify);
+	if (ret) {
+		pr_err("%s: Unable to open smd ch:%s err:%d\n",
+				__func__, c->name, ret);
+		return;
+	}
+
+	spin_lock_irqsave(&port->port_lock, flags);
+	if (port->port_usb)
+		smd_tiocmset(c->ch, c->cbits_tomodem, ~c->cbits_tomodem);
+	spin_unlock_irqrestore(&port->port_lock, flags);
+}
+
+int gsmd_ctrl_connect(struct grmnet *gr, int port_num)
+{
+	struct rmnet_ctrl_port	*port;
+	struct smd_ch_info	*c;
+	unsigned long		flags;
+
+	pr_debug("%s: grmnet:%p port#%d\n", __func__, gr, port_num);
+
+	if (port_num >= n_ports) {
+		pr_err("%s: invalid portno#%d\n", __func__, port_num);
+		return -ENODEV;
+	}
+
+	if (!gr) {
+		pr_err("%s: grmnet port is null\n", __func__);
+		return -ENODEV;
+	}
+
+	port = ports[port_num].port;
+	c = &port->ctrl_ch;
+
+	spin_lock_irqsave(&port->port_lock, flags);
+	port->port_usb = gr;
+	gr->send_cpkt_request = grmnet_ctrl_smd_send_cpkt_tomodem;
+	gr->send_cbits_tomodem = gsmd_ctrl_send_cbits_tomodem;
+	spin_unlock_irqrestore(&port->port_lock, flags);
+
+	queue_work(grmnet_ctrl_wq, &port->connect_w);
+
+	return 0;
+}
+
+void gsmd_ctrl_disconnect(struct grmnet *gr, u8 port_num)
+{
+	struct rmnet_ctrl_port	*port;
+	unsigned long		flags;
+	struct smd_ch_info	*c;
+
+	pr_debug("%s: grmnet:%p port#%d\n", __func__, gr, port_num);
+
+	if (port_num >= n_ports) {
+		pr_err("%s: invalid portno#%d\n", __func__, port_num);
+		return;
+	}
+
+	if (!gr) {
+		pr_err("%s: grmnet port is null\n", __func__);
+		return;
+	}
+
+	port = ports[port_num].port;
+	c = &port->ctrl_ch;
+
+	spin_lock_irqsave(&port->port_lock, flags);
+	port->port_usb = 0;
+	gr->send_cpkt_request = 0;
+	gr->send_cbits_tomodem = 0;
+	c->cbits_tomodem = 0;
+	spin_unlock_irqrestore(&port->port_lock, flags);
+
+	if (test_bit(CH_OPENED, &c->flags)) {
+		/* this should send the dtr zero */
+		smd_close(c->ch);
+		clear_bit(CH_OPENED, &c->flags);
+	}
+}
+
+#define SMD_CH_MAX_LEN	20
+static int grmnet_ctrl_smd_ch_probe(struct platform_device *pdev)
+{
+	struct rmnet_ctrl_port	*port;
+	struct smd_ch_info	*c;
+	int			i;
+	unsigned long		flags;
+
+	pr_debug("%s: name:%s\n", __func__, pdev->name);
+
+	for (i = 0; i < n_ports; i++) {
+		port = ports[i].port;
+		c = &port->ctrl_ch;
+
+		if (!strncmp(c->name, pdev->name, SMD_CH_MAX_LEN)) {
+			set_bit(CH_READY, &c->flags);
+
+			/* if usb is online, try opening smd_ch */
+			spin_lock_irqsave(&port->port_lock, flags);
+			if (port->port_usb)
+				queue_work(grmnet_ctrl_wq, &port->connect_w);
+			spin_unlock_irqrestore(&port->port_lock, flags);
+
+			break;
+		}
+	}
+
+	return 0;
+}
+
+static int grmnet_ctrl_smd_ch_remove(struct platform_device *pdev)
+{
+	struct rmnet_ctrl_port	*port;
+	struct smd_ch_info	*c;
+	int			i;
+
+	pr_debug("%s: name:%s\n", __func__, pdev->name);
+
+	for (i = 0; i < n_ports; i++) {
+		port = ports[i].port;
+		c = &port->ctrl_ch;
+
+		if (!strncmp(c->name, pdev->name, SMD_CH_MAX_LEN)) {
+			clear_bit(CH_READY, &c->flags);
+			clear_bit(CH_OPENED, &c->flags);
+			smd_close(c->ch);
+			break;
+		}
+	}
+
+	return 0;
+}
+
+
+static void grmnet_ctrl_smd_port_free(int portno)
+{
+	struct rmnet_ctrl_port	*port = ports[portno].port;
+
+	if (!port)
+		kfree(port);
+}
+
+static int grmnet_ctrl_smd_port_alloc(int portno)
+{
+	struct rmnet_ctrl_port	*port;
+	struct smd_ch_info	*c;
+	struct platform_driver	*pdrv;
+
+	port = kzalloc(sizeof(struct rmnet_ctrl_port), GFP_KERNEL);
+	if (!port)
+		return -ENOMEM;
+
+	port->port_num = portno;
+
+	spin_lock_init(&port->port_lock);
+	INIT_WORK(&port->connect_w, grmnet_ctrl_smd_connect_w);
+
+	c = &port->ctrl_ch;
+	c->name = rmnet_ctrl_names[portno];
+	c->port = port;
+	init_waitqueue_head(&c->wait);
+	INIT_LIST_HEAD(&c->tx_q);
+	INIT_WORK(&c->read_w, grmnet_ctrl_smd_read_w);
+	INIT_WORK(&c->write_w, grmnet_ctrl_smd_write_w);
+
+	ports[portno].port = port;
+
+	pdrv = &ports[portno].pdrv;
+	pdrv->probe = grmnet_ctrl_smd_ch_probe;
+	pdrv->remove = grmnet_ctrl_smd_ch_remove;
+	pdrv->driver.name = c->name;
+	pdrv->driver.owner = THIS_MODULE;
+
+	platform_driver_register(pdrv);
+
+	pr_debug("%s: port:%p portno:%d\n", __func__, port, portno);
+
+	return 0;
+}
+
+int gsmd_ctrl_setup(unsigned int count)
+{
+	int	i;
+	int	ret;
+
+	pr_debug("%s: requested ports:%d\n", __func__, count);
+
+	if (!count || count > NR_PORTS) {
+		pr_err("%s: Invalid num of ports count:%d\n",
+				__func__, count);
+		return -EINVAL;
+	}
+
+	grmnet_ctrl_wq = alloc_workqueue("gsmd_ctrl",
+				WQ_UNBOUND | WQ_MEM_RECLAIM, 1);
+	if (!grmnet_ctrl_wq) {
+		pr_err("%s: Unable to create workqueue grmnet_ctrl\n",
+				__func__);
+		return -ENOMEM;
+	}
+
+	for (i = 0; i < count; i++) {
+		ret = grmnet_ctrl_smd_port_alloc(i);
+		if (ret) {
+			pr_err("%s: Unable to alloc port:%d\n", __func__, i);
+			goto free_ports;
+		}
+		n_ports++;
+	}
+
+	return 0;
+
+free_ports:
+	for (i = 0; i < n_ports; i++)
+		grmnet_ctrl_smd_port_free(i);
+
+	destroy_workqueue(grmnet_ctrl_wq);
+
+	return ret;
+}
+
+#if defined(CONFIG_DEBUG_FS)
+#define DEBUG_BUF_SIZE	1024
+static ssize_t gsmd_ctrl_read_stats(struct file *file, char __user *ubuf,
+		size_t count, loff_t *ppos)
+{
+	struct rmnet_ctrl_port	*port;
+	struct smd_ch_info	*c;
+	char			*buf;
+	unsigned long		flags;
+	int			ret;
+	int			i;
+	int			temp = 0;
+
+	buf = kzalloc(sizeof(char) * DEBUG_BUF_SIZE, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	for (i = 0; i < n_ports; i++) {
+		port = ports[i].port;
+		if (!port)
+			continue;
+		spin_lock_irqsave(&port->port_lock, flags);
+
+		c = &port->ctrl_ch;
+
+		temp += scnprintf(buf + temp, DEBUG_BUF_SIZE - temp,
+				"#PORT:%d port:%p ctrl_ch:%p#\n"
+				"to_usbhost: %lu\n"
+				"to_modem:   %lu\n"
+				"DTR:        %s\n"
+				"ch_open:    %d\n"
+				"ch_ready:   %d\n"
+				"read_avail: %d\n"
+				"write_avail:%d\n",
+				i, port, &port->ctrl_ch,
+				c->to_host, c->to_modem,
+				c->cbits_tomodem ? "HIGH" : "LOW",
+				test_bit(CH_OPENED, &c->flags),
+				test_bit(CH_READY, &c->flags),
+				smd_read_avail(c->ch),
+				smd_write_avail(c->ch));
+
+		spin_unlock_irqrestore(&port->port_lock, flags);
+	}
+
+	ret = simple_read_from_buffer(ubuf, count, ppos, buf, temp);
+
+	kfree(buf);
+
+	return ret;
+}
+
+static ssize_t gsmd_ctrl_reset_stats(struct file *file, const char __user *buf,
+				 size_t count, loff_t *ppos)
+{
+	struct rmnet_ctrl_port	*port;
+	struct smd_ch_info	*c;
+	int			i;
+	unsigned long		flags;
+
+	for (i = 0; i < n_ports; i++) {
+		port = ports[i].port;
+		if (!port)
+			continue;
+
+		spin_lock_irqsave(&port->port_lock, flags);
+
+		c = &port->ctrl_ch;
+
+		c->to_host = 0;
+		c->to_modem = 0;
+
+		spin_unlock_irqrestore(&port->port_lock, flags);
+	}
+	return count;
+}
+
+const struct file_operations gsmd_ctrl_stats_ops = {
+	.read = gsmd_ctrl_read_stats,
+	.write = gsmd_ctrl_reset_stats,
+};
+
+struct dentry *smd_ctrl_dent;
+struct dentry *smd_ctrl_dfile;
+static void gsmd_ctrl_debugfs_init(void)
+{
+	smd_ctrl_dent = debugfs_create_dir("usb_rmnet_ctrl_smd", 0);
+	if (IS_ERR(smd_ctrl_dent))
+		return;
+
+	smd_ctrl_dfile = debugfs_create_file("status", 0444, smd_ctrl_dent, 0,
+			&gsmd_ctrl_stats_ops);
+	if (!smd_ctrl_dfile || IS_ERR(smd_ctrl_dfile))
+		debugfs_remove(smd_ctrl_dent);
+}
+
+static void gsmd_ctrl_debugfs_exit(void)
+{
+	debugfs_remove(smd_ctrl_dfile);
+	debugfs_remove(smd_ctrl_dent);
+}
+
+#else
+static void gsmd_ctrl_debugfs_init(void) { }
+static void gsmd_ctrl_debugfs_exit(void) { }
+#endif
+
+static int __init gsmd_ctrl_init(void)
+{
+	gsmd_ctrl_debugfs_init();
+
+	return 0;
+}
+module_init(gsmd_ctrl_init);
+
+static void __exit gsmd_ctrl_exit(void)
+{
+	gsmd_ctrl_debugfs_exit();
+}
+module_exit(gsmd_ctrl_exit);
+MODULE_DESCRIPTION("smd control driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/gadget/u_sdio.c b/drivers/usb/gadget/u_sdio.c
new file mode 100644
index 0000000..09d898f
--- /dev/null
+++ b/drivers/usb/gadget/u_sdio.c
@@ -0,0 +1,1097 @@
+/*
+ * u_sdio.c - utilities for USB gadget serial over sdio
+ *
+ * This code also borrows from drivers/usb/gadget/u_serial.c, which is
+ * Copyright (C) 2003 Al Borchers (alborchers@steinerpoint.com)
+ * Copyright (C) 2008 David Brownell
+ * Copyright (C) 2008 by Nokia Corporation
+ * Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program from the Code Aurora Forum is free software; you can
+ * redistribute it and/or modify it under the GNU General Public License
+ * version 2 and only version 2 as published by the Free Software Foundation.
+ * The original work available from [kernel.org] is subject to the notice below.
+ *
+ * This software is distributed under the terms of the GNU General
+ * Public License ("GPL") as published by the Free Software Foundation,
+ * either version 2 of that License or (at your option) any later version.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/termios.h>
+#include <linux/debugfs.h>
+
+#include <mach/sdio_al.h>
+#include <mach/sdio_cmux.h>
+#include "u_serial.h"
+
+#define SDIO_RX_QUEUE_SIZE		8
+#define SDIO_RX_BUF_SIZE		2048
+
+#define SDIO_TX_QUEUE_SIZE		8
+#define SDIO_TX_BUF_SIZE		2048
+
+/* 1 - DUN, 2-NMEA/GPS */
+#define SDIO_N_PORTS	2
+static struct sdio_portmaster {
+	struct mutex lock;
+	struct gsdio_port *port;
+	struct platform_driver gsdio_ch;
+} sdio_ports[SDIO_N_PORTS];
+static unsigned n_sdio_ports;
+
+struct sdio_port_info {
+	/* data channel info */
+	char *data_ch_name;
+	struct sdio_channel *ch;
+
+	/* control channel info */
+	int ctrl_ch_id;
+};
+
+struct sdio_port_info sport_info[SDIO_N_PORTS] = {
+	{
+		.data_ch_name = "SDIO_DUN",
+		.ctrl_ch_id = 9,
+	},
+	{
+		.data_ch_name = "SDIO_NMEA",
+		.ctrl_ch_id = 10,
+	},
+};
+
+static struct workqueue_struct *gsdio_wq;
+
+struct gsdio_port {
+	unsigned			port_num;
+	spinlock_t			port_lock;
+
+	unsigned			n_read;
+	struct list_head		read_pool;
+	struct list_head		read_queue;
+	struct work_struct		push;
+	unsigned long			rp_len;
+	unsigned long			rq_len;
+
+	struct list_head		write_pool;
+	struct work_struct		pull;
+	unsigned long			wp_len;
+
+	struct work_struct		notify_modem;
+
+	struct gserial			*port_usb;
+	struct usb_cdc_line_coding	line_coding;
+
+	int				sdio_open;
+	int				ctrl_ch_err;
+	struct sdio_port_info		*sport_info;
+	struct delayed_work		sdio_open_work;
+
+#define SDIO_ACM_CTRL_RI		(1 << 3)
+#define SDIO_ACM_CTRL_DSR		(1 << 1)
+#define SDIO_ACM_CTRL_DCD		(1 << 0)
+	int				cbits_to_laptop;
+
+#define SDIO_ACM_CTRL_RTS	(1 << 1)	/* unused with full duplex */
+#define SDIO_ACM_CTRL_DTR	(1 << 0)	/* host is ready for data r/w */
+	int				cbits_to_modem;
+
+	/* pkt logging */
+	unsigned long			nbytes_tolaptop;
+	unsigned long			nbytes_tomodem;
+};
+
+void gsdio_free_req(struct usb_ep *ep, struct usb_request *req)
+{
+	kfree(req->buf);
+	usb_ep_free_request(ep, req);
+}
+
+struct usb_request *
+gsdio_alloc_req(struct usb_ep *ep, unsigned len, gfp_t flags)
+{
+	struct usb_request *req;
+
+	req = usb_ep_alloc_request(ep, flags);
+	if (!req) {
+		pr_err("%s: usb alloc request failed\n", __func__);
+		return NULL;
+	}
+
+	req->length = len;
+	req->buf = kmalloc(len, flags);
+	if (!req->buf) {
+		pr_err("%s: request buf allocation failed\n", __func__);
+		usb_ep_free_request(ep, req);
+		return NULL;
+	}
+
+	return req;
+}
+
+void gsdio_free_requests(struct usb_ep *ep, struct list_head *head)
+{
+	struct usb_request	*req;
+
+	while (!list_empty(head)) {
+		req = list_entry(head->next, struct usb_request, list);
+		list_del(&req->list);
+		gsdio_free_req(ep, req);
+	}
+}
+
+int gsdio_alloc_requests(struct usb_ep *ep, struct list_head *head,
+		int num, int size,
+		void (*cb)(struct usb_ep *ep, struct usb_request *))
+{
+	int i;
+	struct usb_request *req;
+
+	pr_debug("%s: ep:%p head:%p num:%d size:%d cb:%p", __func__,
+			ep, head, num, size, cb);
+
+	for (i = 0; i < num; i++) {
+		req = gsdio_alloc_req(ep, size, GFP_ATOMIC);
+		if (!req) {
+			pr_debug("%s: req allocated:%d\n", __func__, i);
+			return list_empty(head) ? -ENOMEM : 0;
+		}
+		req->complete = cb;
+		list_add(&req->list, head);
+	}
+
+	return 0;
+}
+
+void gsdio_start_rx(struct gsdio_port *port)
+{
+	struct list_head	*pool;
+	struct usb_ep		*out;
+	int ret;
+
+	if (!port) {
+		pr_err("%s: port is null\n", __func__);
+		return;
+	}
+
+	pr_debug("%s: port:%p port#%d\n", __func__, port, port->port_num);
+
+	spin_lock_irq(&port->port_lock);
+
+	if (!port->port_usb) {
+		pr_debug("%s: usb is disconnected\n", __func__);
+		goto start_rx_end;
+	}
+
+	pool = &port->read_pool;
+	out = port->port_usb->out;
+
+	while (!list_empty(pool)) {
+		struct usb_request	*req;
+
+		req = list_entry(pool->next, struct usb_request, list);
+		list_del(&req->list);
+		req->length = SDIO_RX_BUF_SIZE;
+		port->rp_len--;
+
+		spin_unlock_irq(&port->port_lock);
+		ret = usb_ep_queue(out, req, GFP_ATOMIC);
+		spin_lock_irq(&port->port_lock);
+		if (ret) {
+			pr_err("%s: usb ep out queue failed"
+					"port:%p, port#%d\n",
+					__func__, port, port->port_num);
+			list_add_tail(&req->list, pool);
+			port->rp_len++;
+			break;
+		}
+
+		/* usb could have disconnected while we released spin lock */
+		if (!port->port_usb) {
+			pr_debug("%s: usb is disconnected\n", __func__);
+			goto start_rx_end;
+		}
+	}
+
+start_rx_end:
+	spin_unlock_irq(&port->port_lock);
+}
+
+int gsdio_write(struct gsdio_port *port, struct usb_request *req)
+{
+	unsigned	avail;
+	char		*packet = req->buf;
+	unsigned	size = req->actual;
+	unsigned	n;
+	int		ret = 0;
+
+
+	if (!port) {
+		pr_err("%s: port is null\n", __func__);
+		return -ENODEV;
+	}
+
+	if (!req) {
+		pr_err("%s: usb request is null port#%d\n",
+				__func__, port->port_num);
+		return -ENODEV;
+	}
+
+	pr_debug("%s: port:%p port#%d req:%p actual:%d n_read:%d\n",
+			__func__, port, port->port_num, req,
+			req->actual, port->n_read);
+
+	if (!port->sdio_open) {
+		pr_debug("%s: SDIO IO is not supported\n", __func__);
+		return -ENODEV;
+	}
+
+	avail = sdio_write_avail(port->sport_info->ch);
+
+	pr_debug("%s: sdio_write_avail:%d", __func__, avail);
+
+	if (!avail)
+		return -EBUSY;
+
+	if (!req->actual) {
+		pr_debug("%s: req->actual is already zero,update bytes read\n",
+				__func__);
+		port->n_read = 0;
+		return -ENODEV;
+	}
+
+	packet = req->buf;
+	n = port->n_read;
+	if (n) {
+		packet += n;
+		size -= n;
+	}
+
+	if (size > avail)
+		size = avail;
+
+	spin_unlock_irq(&port->port_lock);
+	ret = sdio_write(port->sport_info->ch, packet, size);
+	spin_lock_irq(&port->port_lock);
+	if (ret) {
+		pr_err("%s: port#%d sdio write failed err:%d",
+				__func__, port->port_num, ret);
+		/* try again later */
+		return ret;
+	}
+
+	port->nbytes_tomodem += size;
+
+	if (size + n == req->actual)
+		port->n_read = 0;
+	else
+		port->n_read += size;
+
+	return ret;
+}
+
+void gsdio_rx_push(struct work_struct *w)
+{
+	struct gsdio_port *port = container_of(w, struct gsdio_port, push);
+	struct list_head *q = &port->read_queue;
+	struct usb_ep		*out;
+	int ret;
+
+	pr_debug("%s: port:%p port#%d read_queue:%p", __func__,
+			port, port->port_num, q);
+
+	spin_lock_irq(&port->port_lock);
+
+	if (!port->port_usb) {
+		pr_debug("%s: usb cable is disconencted\n", __func__);
+		spin_unlock_irq(&port->port_lock);
+		return;
+	}
+
+	out = port->port_usb->out;
+
+	while (!list_empty(q)) {
+		struct usb_request *req;
+
+		req = list_first_entry(q, struct usb_request, list);
+
+		switch (req->status) {
+		case -ESHUTDOWN:
+			pr_debug("%s: req status shutdown portno#%d port:%p",
+					__func__, port->port_num, port);
+			goto rx_push_end;
+		default:
+			pr_warning("%s: port:%p port#%d"
+					" Unexpected Rx Status:%d\n", __func__,
+					port, port->port_num, req->status);
+			/* FALL THROUGH */
+		case 0:
+			/* normal completion */
+			break;
+		}
+
+		if (!port->sdio_open) {
+			pr_err("%s: sio channel is not open\n", __func__);
+			list_move(&req->list, &port->read_pool);
+			port->rp_len++;
+			port->rq_len--;
+			goto rx_push_end;
+		}
+
+
+		list_del(&req->list);
+		port->rq_len--;
+
+		ret = gsdio_write(port, req);
+		/* as gsdio_write drops spin_lock while writing data
+		 * to sdio usb cable may have been disconnected
+		 */
+		if (!port->port_usb) {
+			port->n_read = 0;
+			gsdio_free_req(out, req);
+			spin_unlock_irq(&port->port_lock);
+			return;
+		}
+
+		if (ret || port->n_read) {
+			list_add(&req->list, &port->read_queue);
+			port->rq_len++;
+			goto rx_push_end;
+		}
+
+		list_add(&req->list, &port->read_pool);
+		port->rp_len++;
+	}
+
+	if (port->sdio_open && !list_empty(q)) {
+		if (sdio_write_avail(port->sport_info->ch))
+			queue_work(gsdio_wq, &port->push);
+	}
+rx_push_end:
+	spin_unlock_irq(&port->port_lock);
+
+	/* start queuing out requests again to host */
+	gsdio_start_rx(port);
+}
+
+void gsdio_read_complete(struct usb_ep *ep, struct usb_request *req)
+{
+	struct gsdio_port *port = ep->driver_data;
+	unsigned long flags;
+
+	pr_debug("%s: ep:%p port:%p\n", __func__, ep, port);
+
+	if (!port) {
+		pr_err("%s: port is null\n", __func__);
+		return;
+	}
+
+	spin_lock_irqsave(&port->port_lock, flags);
+	list_add_tail(&req->list, &port->read_queue);
+	port->rq_len++;
+	queue_work(gsdio_wq, &port->push);
+	spin_unlock_irqrestore(&port->port_lock, flags);
+
+	return;
+}
+
+void gsdio_write_complete(struct usb_ep *ep, struct usb_request *req)
+{
+	struct gsdio_port *port = ep->driver_data;
+	unsigned long flags;
+
+	pr_debug("%s: ep:%p port:%p\n", __func__, ep, port);
+
+	if (!port) {
+		pr_err("%s: port is null\n", __func__);
+		return;
+	}
+
+	spin_lock_irqsave(&port->port_lock, flags);
+	list_add(&req->list, &port->write_pool);
+	port->wp_len++;
+
+	switch (req->status) {
+	default:
+		pr_warning("%s: port:%p port#%d unexpected %s status %d\n",
+				__func__, port, port->port_num,
+				ep->name, req->status);
+		/* FALL THROUGH */
+	case 0:
+		queue_work(gsdio_wq, &port->pull);
+		break;
+
+	case -ESHUTDOWN:
+		/* disconnect */
+		pr_debug("%s: %s shutdown\n", __func__, ep->name);
+		break;
+	}
+
+	spin_unlock_irqrestore(&port->port_lock, flags);
+
+	return;
+}
+
+void gsdio_read_pending(struct gsdio_port *port)
+{
+	struct sdio_channel *ch;
+	char buf[1024];
+	int avail;
+
+	if (!port) {
+		pr_err("%s: port is null\n", __func__);
+		return;
+	}
+
+	ch = port->sport_info->ch;
+
+	if (!ch)
+		return;
+
+	while ((avail = sdio_read_avail(ch))) {
+		if (avail > 1024)
+			avail = 1024;
+		sdio_read(ch, buf, avail);
+
+		pr_debug("%s: flushed out %d bytes\n", __func__, avail);
+	}
+}
+
+void gsdio_tx_pull(struct work_struct *w)
+{
+	struct gsdio_port *port = container_of(w, struct gsdio_port, pull);
+	struct list_head *pool = &port->write_pool;
+
+	pr_debug("%s: port:%p port#%d pool:%p\n", __func__,
+			port, port->port_num, pool);
+
+	if (!port->port_usb) {
+		pr_err("%s: usb disconnected\n", __func__);
+
+		/* take out all the pending data from sdio */
+		gsdio_read_pending(port);
+
+		return;
+	}
+
+	spin_lock_irq(&port->port_lock);
+
+	while (!list_empty(pool)) {
+		int avail;
+		struct usb_ep *in = port->port_usb->in;
+		struct sdio_channel *ch = port->sport_info->ch;
+		struct usb_request *req;
+		unsigned len = SDIO_TX_BUF_SIZE;
+		int ret;
+
+
+		req = list_entry(pool->next, struct usb_request, list);
+
+		if (!port->sdio_open) {
+			pr_debug("%s: SDIO channel is not open\n", __func__);
+			goto tx_pull_end;
+		}
+
+		avail = sdio_read_avail(ch);
+		if (!avail) {
+			/* REVISIT: for ZLP */
+			pr_debug("%s: read_avail:%d port:%p port#%d\n",
+					__func__, avail, port, port->port_num);
+			goto tx_pull_end;
+		}
+
+		if (avail > len)
+			avail = len;
+
+		list_del(&req->list);
+		port->wp_len--;
+
+		spin_unlock_irq(&port->port_lock);
+		ret = sdio_read(ch, req->buf, avail);
+		spin_lock_irq(&port->port_lock);
+		if (ret) {
+			pr_err("%s: port:%p port#%d sdio read failed err:%d",
+					__func__, port, port->port_num, ret);
+
+			/* check if usb is still active */
+			if (!port->port_usb) {
+				gsdio_free_req(in, req);
+			} else {
+				list_add(&req->list, pool);
+				port->wp_len++;
+			}
+			goto tx_pull_end;
+		}
+
+		req->length = avail;
+
+		spin_unlock_irq(&port->port_lock);
+		ret = usb_ep_queue(in, req, GFP_KERNEL);
+		spin_lock_irq(&port->port_lock);
+		if (ret) {
+			pr_err("%s: usb ep out queue failed"
+					"port:%p, port#%d err:%d\n",
+					__func__, port, port->port_num, ret);
+
+			/* could be usb disconnected */
+			if (!port->port_usb) {
+				gsdio_free_req(in, req);
+			} else {
+				list_add(&req->list, pool);
+				port->wp_len++;
+			}
+			goto tx_pull_end;
+		}
+
+		port->nbytes_tolaptop += avail;
+	}
+tx_pull_end:
+	spin_unlock_irq(&port->port_lock);
+}
+
+int gsdio_start_io(struct gsdio_port *port)
+{
+	int			ret;
+	unsigned long		flags;
+
+	pr_debug("%s:\n", __func__);
+
+	spin_lock_irqsave(&port->port_lock, flags);
+
+	if (!port->port_usb) {
+		spin_unlock_irqrestore(&port->port_lock, flags);
+		return -ENODEV;
+	}
+
+	/* start usb out queue */
+	ret = gsdio_alloc_requests(port->port_usb->out,
+				&port->read_pool,
+				SDIO_RX_QUEUE_SIZE, SDIO_RX_BUF_SIZE,
+				gsdio_read_complete);
+	if (ret) {
+		spin_unlock_irqrestore(&port->port_lock, flags);
+		pr_err("%s: unable to allocate out reqs\n", __func__);
+		return ret;
+	}
+	port->rp_len = SDIO_RX_QUEUE_SIZE;
+
+	ret = gsdio_alloc_requests(port->port_usb->in,
+				&port->write_pool,
+				SDIO_TX_QUEUE_SIZE, SDIO_TX_BUF_SIZE,
+				gsdio_write_complete);
+	if (ret) {
+		gsdio_free_requests(port->port_usb->out, &port->read_pool);
+		port->rp_len = 0;
+		spin_unlock_irqrestore(&port->port_lock, flags);
+		pr_err("%s: unable to allocate in reqs\n", __func__);
+		return ret;
+	}
+	port->wp_len = SDIO_TX_QUEUE_SIZE;
+	spin_unlock_irqrestore(&port->port_lock, flags);
+
+	gsdio_start_rx(port);
+	queue_work(gsdio_wq, &port->pull);
+
+	return 0;
+}
+
+void gsdio_port_free(unsigned portno)
+{
+	struct gsdio_port *port = sdio_ports[portno].port;
+	struct platform_driver *pdriver = &sdio_ports[portno].gsdio_ch;
+
+	if (!port) {
+		pr_err("%s: invalid portno#%d\n", __func__, portno);
+		return;
+	}
+
+	platform_driver_unregister(pdriver);
+
+	kfree(port);
+}
+
+void gsdio_ctrl_wq(struct work_struct *w)
+{
+	struct gsdio_port *port;
+
+	port = container_of(w, struct gsdio_port, notify_modem);
+
+	if (!port) {
+		pr_err("%s: port is null\n", __func__);
+		return;
+	}
+
+	if (!port->sdio_open || port->ctrl_ch_err)
+		return;
+
+	sdio_cmux_tiocmset(port->sport_info->ctrl_ch_id,
+			port->cbits_to_modem, ~(port->cbits_to_modem));
+}
+
+void gsdio_ctrl_notify_modem(struct gserial *gser, u8 portno, int ctrl_bits)
+{
+	struct gsdio_port *port;
+	int temp;
+
+	if (portno >= n_sdio_ports) {
+		pr_err("%s: invalid portno#%d\n", __func__, portno);
+		return;
+	}
+
+	if (!gser) {
+		pr_err("%s: gser is null\n", __func__);
+		return;
+	}
+
+	port = sdio_ports[portno].port;
+
+	temp = ctrl_bits & SDIO_ACM_CTRL_DTR ? TIOCM_DTR : 0;
+
+	if (port->cbits_to_modem == temp)
+		return;
+
+	 port->cbits_to_modem = temp;
+
+	/* TIOCM_DTR - 0x002 - bit(1) */
+	pr_debug("%s: port:%p port#%d ctrl_bits:%08x\n", __func__,
+		port, port->port_num, ctrl_bits);
+
+	if (!port->sdio_open) {
+		pr_err("%s: port:%p port#%d sdio not connected\n",
+				__func__, port, port->port_num);
+		return;
+	}
+
+	/* whenever DTR is high let laptop know that modem status */
+	if (port->cbits_to_modem && gser->send_modem_ctrl_bits)
+		gser->send_modem_ctrl_bits(gser, port->cbits_to_laptop);
+
+	queue_work(gsdio_wq, &port->notify_modem);
+}
+
+void gsdio_ctrl_modem_status(int ctrl_bits, void *_dev)
+{
+	struct gsdio_port *port = _dev;
+
+	/* TIOCM_CD - 0x040 - bit(6)
+	 * TIOCM_RI - 0x080 - bit(7)
+	 * TIOCM_DSR- 0x100 - bit(8)
+	 */
+	pr_debug("%s: port:%p port#%d event:%08x\n", __func__,
+		port, port->port_num, ctrl_bits);
+
+	port->cbits_to_laptop = 0;
+	ctrl_bits &= TIOCM_RI | TIOCM_CD | TIOCM_DSR;
+	if (ctrl_bits & TIOCM_RI)
+		port->cbits_to_laptop |= SDIO_ACM_CTRL_RI;
+	if (ctrl_bits & TIOCM_CD)
+		port->cbits_to_laptop |= SDIO_ACM_CTRL_DCD;
+	if (ctrl_bits & TIOCM_DSR)
+		port->cbits_to_laptop |= SDIO_ACM_CTRL_DSR;
+
+	if (port->port_usb && port->port_usb->send_modem_ctrl_bits)
+		port->port_usb->send_modem_ctrl_bits(port->port_usb,
+					port->cbits_to_laptop);
+}
+
+void gsdio_ch_notify(void *_dev, unsigned event)
+{
+	struct gsdio_port *port = _dev;
+
+	pr_debug("%s: port:%p port#%d event:%s\n", __func__,
+		port, port->port_num,
+		event == 1 ? "READ AVAIL" : "WRITE_AVAIL");
+
+	if (event == SDIO_EVENT_DATA_WRITE_AVAIL)
+		queue_work(gsdio_wq, &port->push);
+	if (event == SDIO_EVENT_DATA_READ_AVAIL)
+		queue_work(gsdio_wq, &port->pull);
+}
+
+static void gsdio_open_work(struct work_struct *w)
+{
+	struct gsdio_port *port =
+			container_of(w, struct gsdio_port, sdio_open_work.work);
+	struct sdio_port_info *pi = port->sport_info;
+	struct gserial *gser;
+	int ret;
+	int ctrl_bits;
+	int startio;
+
+	ret = sdio_open(pi->data_ch_name, &pi->ch, port, gsdio_ch_notify);
+	if (ret) {
+		pr_err("%s: port:%p port#%d unable to open sdio ch:%s\n",
+				__func__, port, port->port_num,
+				pi->data_ch_name);
+		return;
+	}
+
+	ret = sdio_cmux_open(pi->ctrl_ch_id, 0, 0,
+			gsdio_ctrl_modem_status, port);
+	if (ret) {
+		pr_err("%s: port:%p port#%d unable to open ctrl ch:%d\n",
+				__func__, port, port->port_num, pi->ctrl_ch_id);
+		port->ctrl_ch_err = 1;
+	}
+
+	/* check for latest status update from modem */
+	if (!port->ctrl_ch_err) {
+		ctrl_bits = sdio_cmux_tiocmget(pi->ctrl_ch_id);
+		gsdio_ctrl_modem_status(ctrl_bits, port);
+	}
+
+	pr_debug("%s: SDIO data:%s ctrl:%d are open\n", __func__,
+					pi->data_ch_name,
+					pi->ctrl_ch_id);
+
+	port->sdio_open = 1;
+
+	/* start tx if usb is open already */
+	spin_lock_irq(&port->port_lock);
+	startio = port->port_usb ? 1 : 0;
+	gser = port->port_usb;
+	spin_unlock_irq(&port->port_lock);
+
+	if (startio) {
+		pr_debug("%s: USB is already open, start io\n", __func__);
+		gsdio_start_io(port);
+		 if (gser->send_modem_ctrl_bits)
+			gser->send_modem_ctrl_bits(gser, port->cbits_to_laptop);
+	}
+}
+
+#define SDIO_CH_NAME_MAX_LEN	9
+#define SDIO_OPEN_DELAY		msecs_to_jiffies(10000)
+static int gsdio_ch_probe(struct platform_device *dev)
+{
+	struct gsdio_port	*port;
+	struct sdio_port_info	*pi;
+	int i;
+
+	pr_debug("%s: name:%s\n", __func__, dev->name);
+
+	for (i = 0; i < n_sdio_ports; i++) {
+		port = sdio_ports[i].port;
+		pi = port->sport_info;
+
+		pr_debug("%s: sdio_ch_name:%s dev_name:%s\n", __func__,
+				pi->data_ch_name, dev->name);
+
+		/* unfortunately cmux channle might not be ready even if
+		 * sdio channel is ready. as we dont have good notification
+		 * mechanism schedule a delayed work
+		 */
+		if (!strncmp(pi->data_ch_name, dev->name,
+					SDIO_CH_NAME_MAX_LEN)) {
+			queue_delayed_work(gsdio_wq,
+				&port->sdio_open_work, SDIO_OPEN_DELAY);
+			return 0;
+		}
+	}
+
+	pr_info("%s: name:%s is not found\n", __func__, dev->name);
+
+	return -ENODEV;
+}
+
+int gsdio_port_alloc(unsigned portno,
+		struct usb_cdc_line_coding *coding,
+		struct sdio_port_info *pi)
+{
+	struct gsdio_port *port;
+	struct platform_driver *pdriver;
+
+	port = kzalloc(sizeof(struct gsdio_port), GFP_KERNEL);
+	if (!port) {
+		pr_err("%s: port allocation failed\n", __func__);
+		return -ENOMEM;
+	}
+
+	port->port_num = portno;
+	spin_lock_init(&port->port_lock);
+	port->line_coding = *coding;
+
+	/* READ: read from usb and write into sdio */
+	INIT_LIST_HEAD(&port->read_pool);
+	INIT_LIST_HEAD(&port->read_queue);
+	INIT_WORK(&port->push, gsdio_rx_push);
+
+	INIT_LIST_HEAD(&port->write_pool);
+	INIT_WORK(&port->pull, gsdio_tx_pull);
+
+	INIT_WORK(&port->notify_modem, gsdio_ctrl_wq);
+
+	INIT_DELAYED_WORK(&port->sdio_open_work, gsdio_open_work);
+
+	sdio_ports[portno].port = port;
+
+	port->sport_info = pi;
+	pdriver = &sdio_ports[portno].gsdio_ch;
+
+	pdriver->probe = gsdio_ch_probe;
+	pdriver->driver.name = pi->data_ch_name;
+	pdriver->driver.owner = THIS_MODULE;
+
+	pr_debug("%s: port:%p port#%d sdio_name: %s\n", __func__,
+			port, port->port_num, pi->data_ch_name);
+
+	platform_driver_register(pdriver);
+
+	pr_debug("%s: port:%p port#%d\n", __func__, port, port->port_num);
+
+	return 0;
+}
+
+int gsdio_connect(struct gserial *gser, u8 portno)
+{
+	struct gsdio_port *port;
+	int ret = 0;
+	unsigned long flags;
+
+	if (portno >= n_sdio_ports) {
+		pr_err("%s: invalid portno#%d\n", __func__, portno);
+		return -EINVAL;
+	}
+
+	if (!gser) {
+		pr_err("%s: gser is null\n", __func__);
+		return -EINVAL;
+	}
+
+	port = sdio_ports[portno].port;
+
+	spin_lock_irqsave(&port->port_lock, flags);
+	port->port_usb = gser;
+	gser->notify_modem = gsdio_ctrl_notify_modem;
+	spin_unlock_irqrestore(&port->port_lock, flags);
+
+	ret = usb_ep_enable(gser->in, gser->in_desc);
+	if (ret) {
+		pr_err("%s: failed to enable in ep w/ err:%d\n",
+					__func__, ret);
+		port->port_usb = 0;
+		return ret;
+	}
+	gser->in->driver_data = port;
+
+	ret = usb_ep_enable(gser->out, gser->out_desc);
+	if (ret) {
+		pr_err("%s: failed to enable in ep w/ err:%d\n",
+					__func__, ret);
+		usb_ep_disable(gser->in);
+		port->port_usb = 0;
+		gser->in->driver_data = 0;
+		return ret;
+	}
+	gser->out->driver_data = port;
+
+	if (port->sdio_open) {
+		pr_debug("%s: sdio is already open, start io\n", __func__);
+		gsdio_start_io(port);
+		 if (gser->send_modem_ctrl_bits)
+			gser->send_modem_ctrl_bits(gser, port->cbits_to_laptop);
+	}
+
+	return 0;
+}
+
+void gsdio_disconnect(struct gserial *gser, u8 portno)
+{
+	unsigned long flags;
+	struct gsdio_port *port;
+
+	if (portno >= n_sdio_ports) {
+		pr_err("%s: invalid portno#%d\n", __func__, portno);
+		return;
+	}
+
+	if (!gser) {
+		pr_err("%s: gser is null\n", __func__);
+		return;
+	}
+
+	port = sdio_ports[portno].port;
+
+	/* send dtr zero to modem to notify disconnect */
+	port->cbits_to_modem = 0;
+	queue_work(gsdio_wq, &port->notify_modem);
+
+	spin_lock_irqsave(&port->port_lock, flags);
+	port->port_usb = 0;
+	port->nbytes_tomodem = 0;
+	port->nbytes_tolaptop = 0;
+	spin_unlock_irqrestore(&port->port_lock, flags);
+
+	/* disable endpoints, aborting down any active I/O */
+	usb_ep_disable(gser->out);
+
+	usb_ep_disable(gser->in);
+
+	spin_lock_irqsave(&port->port_lock, flags);
+	gsdio_free_requests(gser->out, &port->read_pool);
+	gsdio_free_requests(gser->out, &port->read_queue);
+	gsdio_free_requests(gser->in, &port->write_pool);
+
+	port->rp_len = 0;
+	port->rq_len = 0;
+	port->wp_len = 0;
+	port->n_read = 0;
+	spin_unlock_irqrestore(&port->port_lock, flags);
+}
+
+#if defined(CONFIG_DEBUG_FS)
+static char debug_buffer[PAGE_SIZE];
+
+static ssize_t debug_sdio_read_stats(struct file *file, char __user *ubuf,
+		size_t count, loff_t *ppos)
+{
+	struct gsdio_port *port;
+	char *buf = debug_buffer;
+	unsigned long flags;
+	int i = 0;
+	int temp = 0;
+
+	while (i < n_sdio_ports) {
+		port = sdio_ports[i].port;
+		spin_lock_irqsave(&port->port_lock, flags);
+		temp += scnprintf(buf + temp, PAGE_SIZE - temp,
+				"###PORT:%d port:%p###\n"
+				"nbytes_tolaptop: %lu\n"
+				"nbytes_tomodem:  %lu\n"
+				"cbits_to_modem:  %u\n"
+				"cbits_to_laptop: %u\n"
+				"read_pool_len:   %lu\n"
+				"read_queue_len:  %lu\n"
+				"write_pool_len:  %lu\n"
+				"n_read:          %u\n",
+				i, port,
+				port->nbytes_tolaptop, port->nbytes_tomodem,
+				port->cbits_to_modem, port->cbits_to_laptop,
+				port->rp_len, port->rq_len, port->wp_len,
+				port->n_read);
+		spin_unlock_irqrestore(&port->port_lock, flags);
+		i++;
+	}
+
+	return simple_read_from_buffer(ubuf, count, ppos, buf, temp);
+}
+
+static ssize_t debug_sdio_reset_stats(struct file *file, const char __user *buf,
+				 size_t count, loff_t *ppos)
+{
+	struct gsdio_port *port;
+	unsigned long flags;
+	int i = 0;
+
+	while (i < n_sdio_ports) {
+		port = sdio_ports[i].port;
+
+		spin_lock_irqsave(&port->port_lock, flags);
+		port->nbytes_tolaptop = 0;
+		port->nbytes_tomodem = 0;
+		spin_unlock_irqrestore(&port->port_lock, flags);
+		i++;
+	}
+
+	return count;
+}
+
+static int debug_sdio_open(struct inode *inode, struct file *file)
+{
+	return 0;
+}
+
+static const struct file_operations debug_gsdio_ops = {
+	.open = debug_sdio_open,
+	.read = debug_sdio_read_stats,
+	.write = debug_sdio_reset_stats,
+};
+
+static void gsdio_debugfs_init(void)
+{
+	struct dentry *dent;
+
+	dent = debugfs_create_dir("usb_gsdio", 0);
+	if (IS_ERR(dent))
+		return;
+
+	debugfs_create_file("status", 0444, dent, 0, &debug_gsdio_ops);
+}
+#else
+static void gsdio_debugfs_init(void)
+{
+	return;
+}
+#endif
+
+/* connect, disconnect, alloc_requests, free_requests */
+int gsdio_setup(struct usb_gadget *g, unsigned count)
+{
+	struct usb_cdc_line_coding	coding;
+	int i;
+	int ret = 0;
+	struct sdio_port_info *port_info;
+
+	pr_debug("%s: gadget:(%p) count:%d\n", __func__, g, count);
+
+	if (count == 0 || count > SDIO_N_PORTS) {
+		pr_err("%s: invalid number of ports count:%d max_ports:%d\n",
+				__func__, count, SDIO_N_PORTS);
+		return -EINVAL;
+	}
+
+	coding.dwDTERate = cpu_to_le32(9600);
+	coding.bCharFormat = 8;
+	coding.bParityType = USB_CDC_NO_PARITY;
+	coding.bDataBits = USB_CDC_1_STOP_BITS;
+
+	gsdio_wq = create_singlethread_workqueue("k_gserial");
+	if (!gsdio_wq) {
+		pr_err("%s: unable to create workqueue gsdio_wq\n",
+				__func__);
+		return -ENOMEM;
+	}
+
+	for (i = 0; i < count; i++) {
+		mutex_init(&sdio_ports[i].lock);
+		ret = gsdio_port_alloc(i, &coding, sport_info + i);
+		if (ret) {
+			pr_err("%s: sdio logical port allocation failed\n",
+					__func__);
+			goto free_sdio_ports;
+		}
+		n_sdio_ports++;
+		port_info++;
+
+#ifdef DEBUG
+		/* REVISIT: create one file per port
+		 * or do not create any file
+		 */
+		if (i == 0) {
+			ret = device_create_file(&g->dev, &dev_attr_input);
+			if (ret)
+				pr_err("%s: unable to create device file\n",
+						__func__);
+		}
+#endif
+
+	}
+
+	gsdio_debugfs_init();
+
+	return 0;
+
+free_sdio_ports:
+	for (i = 0; i < n_sdio_ports; i++)
+		gsdio_port_free(i);
+	destroy_workqueue(gsdio_wq);
+
+	return ret;
+}
+
+/* TODO: Add gserial_cleanup */
diff --git a/drivers/usb/gadget/u_serial.c b/drivers/usb/gadget/u_serial.c
index 3fdcc9a..7bd9f33 100644
--- a/drivers/usb/gadget/u_serial.c
+++ b/drivers/usb/gadget/u_serial.c
@@ -25,6 +25,7 @@
 #include <linux/tty.h>
 #include <linux/tty_flip.h>
 #include <linux/slab.h>
+#include <linux/debugfs.h>
 
 #include "u_serial.h"
 
@@ -77,9 +78,14 @@
  * next layer of buffering.  For TX that's a circular buffer; for RX
  * consider it a NOP.  A third layer is provided by the TTY code.
  */
-#define QUEUE_SIZE		16
+#define TX_QUEUE_SIZE		8
+#define TX_BUF_SIZE		4096
 #define WRITE_BUF_SIZE		8192		/* TX only */
 
+#define RX_QUEUE_SIZE		8
+#define RX_BUF_SIZE		4096
+
+
 /* circular buffer */
 struct gs_buf {
 	unsigned		buf_size;
@@ -109,7 +115,7 @@
 	int read_allocated;
 	struct list_head	read_queue;
 	unsigned		n_read;
-	struct tasklet_struct	push;
+	struct work_struct	push;
 
 	struct list_head	write_pool;
 	int write_started;
@@ -119,6 +125,10 @@
 
 	/* REVISIT this state ... */
 	struct usb_cdc_line_coding port_line_coding;	/* 8-N-1 etc */
+	unsigned long           nbytes_from_host;
+	unsigned long           nbytes_to_tty;
+	unsigned long           nbytes_from_tty;
+	unsigned long           nbytes_to_host;
 };
 
 /* increase N_PORTS if you need more */
@@ -129,6 +139,8 @@
 } ports[N_PORTS];
 static unsigned	n_ports;
 
+static struct workqueue_struct *gserial_wq;
+
 #define GS_CLOSE_TIMEOUT		15		/* seconds */
 
 
@@ -361,18 +373,37 @@
 	struct list_head	*pool = &port->write_pool;
 	struct usb_ep		*in = port->port_usb->in;
 	int			status = 0;
+	static long 		prev_len;
 	bool			do_tty_wake = false;
 
 	while (!list_empty(pool)) {
 		struct usb_request	*req;
 		int			len;
 
-		if (port->write_started >= QUEUE_SIZE)
+		if (port->write_started >= TX_QUEUE_SIZE)
 			break;
 
 		req = list_entry(pool->next, struct usb_request, list);
-		len = gs_send_packet(port, req->buf, in->maxpacket);
+		len = gs_send_packet(port, req->buf, TX_BUF_SIZE);
 		if (len == 0) {
+			/* Queue zero length packet */
+			if (prev_len && (prev_len % in->maxpacket == 0)) {
+				req->length = 0;
+				list_del(&req->list);
+				spin_unlock(&port->port_lock);
+				status = usb_ep_queue(in, req, GFP_ATOMIC);
+				spin_lock(&port->port_lock);
+				if (!port->port_usb) {
+					gs_free_req(in, req);
+					break;
+				}
+				if (status) {
+					printk(KERN_ERR "%s: %s err %d\n",
+					__func__, "queue", status);
+					list_add(&req->list, pool);
+				}
+				prev_len = 0;
+			}
 			wake_up_interruptible(&port->drain_wait);
 			break;
 		}
@@ -396,19 +427,25 @@
 		spin_unlock(&port->port_lock);
 		status = usb_ep_queue(in, req, GFP_ATOMIC);
 		spin_lock(&port->port_lock);
-
+		/*
+		 * If port_usb is NULL, gserial disconnect is called
+		 * while the spinlock is dropped and all requests are
+		 * freed. Free the current request here.
+		 */
+		if (!port->port_usb) {
+			do_tty_wake = false;
+			gs_free_req(in, req);
+			break;
+		}
 		if (status) {
 			pr_debug("%s: %s %s err %d\n",
 					__func__, "queue", in->name, status);
 			list_add(&req->list, pool);
 			break;
 		}
+		prev_len = req->length;
+		port->nbytes_from_tty += req->length;
 
-		port->write_started++;
-
-		/* abort immediately after disconnect */
-		if (!port->port_usb)
-			break;
 	}
 
 	if (do_tty_wake && port->port_tty)
@@ -427,6 +464,7 @@
 {
 	struct list_head	*pool = &port->read_pool;
 	struct usb_ep		*out = port->port_usb->out;
+	unsigned		started = 0;
 
 	while (!list_empty(pool)) {
 		struct usb_request	*req;
@@ -438,12 +476,12 @@
 		if (!tty)
 			break;
 
-		if (port->read_started >= QUEUE_SIZE)
+		if (port->read_started >= RX_QUEUE_SIZE)
 			break;
 
 		req = list_entry(pool->next, struct usb_request, list);
 		list_del(&req->list);
-		req->length = out->maxpacket;
+		req->length = RX_BUF_SIZE;
 
 		/* drop lock while we call out; the controller driver
 		 * may need to call us back (e.g. for disconnect)
@@ -451,7 +489,16 @@
 		spin_unlock(&port->port_lock);
 		status = usb_ep_queue(out, req, GFP_ATOMIC);
 		spin_lock(&port->port_lock);
-
+		/*
+		 * If port_usb is NULL, gserial disconnect is called
+		 * while the spinlock is dropped and all requests are
+		 * freed. Free the current request here.
+		 */
+		if (!port->port_usb) {
+			started = 0;
+			gs_free_req(out, req);
+			break;
+		}
 		if (status) {
 			pr_debug("%s: %s %s err %d\n",
 					__func__, "queue", out->name, status);
@@ -460,9 +507,6 @@
 		}
 		port->read_started++;
 
-		/* abort immediately after disconnect */
-		if (!port->port_usb)
-			break;
 	}
 	return port->read_started;
 }
@@ -477,9 +521,9 @@
  * So QUEUE_SIZE packets plus however many the FIFO holds (usually two)
  * can be buffered before the TTY layer's buffers (currently 64 KB).
  */
-static void gs_rx_push(unsigned long _port)
+static void gs_rx_push(struct work_struct *w)
 {
-	struct gs_port		*port = (void *)_port;
+	struct gs_port		*port = container_of(w, struct gs_port, push);
 	struct tty_struct	*tty;
 	struct list_head	*queue = &port->read_queue;
 	bool			disconnect = false;
@@ -532,6 +576,7 @@
 			}
 
 			count = tty_insert_flip_string(tty, packet, size);
+			port->nbytes_to_tty += count;
 			if (count)
 				do_push = true;
 			if (count != size) {
@@ -549,11 +594,17 @@
 		port->read_started--;
 	}
 
-	/* Push from tty to ldisc; without low_latency set this is handled by
-	 * a workqueue, so we won't get callbacks and can hold port_lock
+	/* Push from tty to ldisc; this is immediate with low_latency, and
+	 * may trigger callbacks to this driver ... so drop the spinlock.
 	 */
 	if (tty && do_push) {
+		spin_unlock_irq(&port->port_lock);
 		tty_flip_buffer_push(tty);
+		wake_up_interruptible(&tty->read_wait);
+		spin_lock_irq(&port->port_lock);
+
+		/* tty may have been closed */
+		tty = port->port_tty;
 	}
 
 
@@ -562,13 +613,13 @@
 	 * this time around, there may be trouble unless there's an
 	 * implicit tty_unthrottle() call on its way...
 	 *
-	 * REVISIT we should probably add a timer to keep the tasklet
+	 * REVISIT we should probably add a timer to keep the work queue
 	 * from starving ... but it's not clear that case ever happens.
 	 */
 	if (!list_empty(queue) && tty) {
 		if (!test_bit(TTY_THROTTLED, &tty->flags)) {
 			if (do_push)
-				tasklet_schedule(&port->push);
+				queue_work(gserial_wq, &port->push);
 			else
 				pr_warning(PREFIX "%d: RX not scheduled?\n",
 					port->port_num);
@@ -585,19 +636,23 @@
 static void gs_read_complete(struct usb_ep *ep, struct usb_request *req)
 {
 	struct gs_port	*port = ep->driver_data;
+	unsigned long flags;
 
 	/* Queue all received data until the tty layer is ready for it. */
-	spin_lock(&port->port_lock);
+	spin_lock_irqsave(&port->port_lock, flags);
+	port->nbytes_from_host += req->actual;
 	list_add_tail(&req->list, &port->read_queue);
-	tasklet_schedule(&port->push);
-	spin_unlock(&port->port_lock);
+	queue_work(gserial_wq, &port->push);
+	spin_unlock_irqrestore(&port->port_lock, flags);
 }
 
 static void gs_write_complete(struct usb_ep *ep, struct usb_request *req)
 {
 	struct gs_port	*port = ep->driver_data;
+	unsigned long flags;
 
-	spin_lock(&port->port_lock);
+	spin_lock_irqsave(&port->port_lock, flags);
+	port->nbytes_to_host += req->actual;
 	list_add(&req->list, &port->write_pool);
 	port->write_started--;
 
@@ -609,7 +664,8 @@
 		/* FALL THROUGH */
 	case 0:
 		/* normal completion */
-		gs_start_tx(port);
+		if (port->port_usb)
+			gs_start_tx(port);
 		break;
 
 	case -ESHUTDOWN:
@@ -618,7 +674,7 @@
 		break;
 	}
 
-	spin_unlock(&port->port_lock);
+	spin_unlock_irqrestore(&port->port_lock, flags);
 }
 
 static void gs_free_requests(struct usb_ep *ep, struct list_head *head,
@@ -636,19 +692,18 @@
 }
 
 static int gs_alloc_requests(struct usb_ep *ep, struct list_head *head,
-		void (*fn)(struct usb_ep *, struct usb_request *),
+		int num, int size, void (*fn)(struct usb_ep *, struct usb_request *),
 		int *allocated)
 {
 	int			i;
 	struct usb_request	*req;
-	int n = allocated ? QUEUE_SIZE - *allocated : QUEUE_SIZE;
 
 	/* Pre-allocate up to QUEUE_SIZE transfers, but if we can't
 	 * do quite that many this time, don't fail ... we just won't
 	 * be as speedy as we might otherwise be.
 	 */
-	for (i = 0; i < n; i++) {
-		req = gs_alloc_req(ep, ep->maxpacket, GFP_ATOMIC);
+	for (i = 0; i < num; i++) {
+		req = gs_alloc_req(ep, size, GFP_ATOMIC);
 		if (!req)
 			return list_empty(head) ? -ENOMEM : 0;
 		req->complete = fn;
@@ -681,13 +736,13 @@
 	 * configurations may use different endpoints with a given port;
 	 * and high speed vs full speed changes packet sizes too.
 	 */
-	status = gs_alloc_requests(ep, head, gs_read_complete,
-		&port->read_allocated);
+	status = gs_alloc_requests(ep, head, RX_QUEUE_SIZE, RX_BUF_SIZE,
+			 gs_read_complete, &port->read_allocated);
 	if (status)
 		return status;
 
 	status = gs_alloc_requests(port->port_usb->in, &port->write_pool,
-			gs_write_complete, &port->write_allocated);
+			TX_QUEUE_SIZE, TX_BUF_SIZE, gs_write_complete, &port->write_allocated);
 	if (status) {
 		gs_free_requests(ep, head, &port->read_allocated);
 		return status;
@@ -697,6 +752,8 @@
 	port->n_read = 0;
 	started = gs_start_rx(port);
 
+	if (!port->port_usb)
+		return -EIO;
 	/* unblock any pending writes into our circular buffer */
 	if (started) {
 		tty_wakeup(port->port_tty);
@@ -801,6 +858,13 @@
 	port->open_count = 1;
 	port->openclose = false;
 
+	/* low_latency means ldiscs work is carried in the same context
+	 * of tty_flip_buffer_push. The same can be called from IRQ with
+	 * low_latency = 0. But better to use a dedicated worker thread
+	 * to push the data.
+	 */
+	tty->low_latency = 1;
+
 	/* if connected, start the I/O stream */
 	if (port->port_usb) {
 		struct gserial	*gser = port->port_usb;
@@ -874,7 +938,7 @@
 
 	/* Iff we're disconnected, there can be no I/O in flight so it's
 	 * ok to free the circular buffer; else just scrub it.  And don't
-	 * let the push tasklet fire again until we're re-opened.
+	 * let the push work queue fire again until we're re-opened.
 	 */
 	if (gser == NULL)
 		gs_buf_free(&port->port_write_buf);
@@ -890,6 +954,22 @@
 			port->port_num, tty, file);
 
 	wake_up_interruptible(&port->close_wait);
+
+	/*
+	 * Freeing the previously queued requests as they are
+	 * allocated again as a part of gs_open()
+	 */
+	if (port->port_usb) {
+		spin_unlock_irq(&port->port_lock);
+		usb_ep_fifo_flush(gser->out);
+		usb_ep_fifo_flush(gser->in);
+		spin_lock_irq(&port->port_lock);
+		gs_free_requests(gser->out, &port->read_queue, NULL);
+		gs_free_requests(gser->out, &port->read_pool, NULL);
+		gs_free_requests(gser->in, &port->write_pool, NULL);
+	}
+	port->read_allocated = port->read_started =
+		port->write_allocated = port->write_started = 0;
 exit:
 	spin_unlock_irq(&port->port_lock);
 }
@@ -988,7 +1068,7 @@
 		 * rts/cts, or other handshaking with the host, but if the
 		 * read queue backs up enough we'll be NAKing OUT packets.
 		 */
-		tasklet_schedule(&port->push);
+		queue_work(gserial_wq, &port->push);
 		pr_vdebug(PREFIX "%d: unthrottle\n", port->port_num);
 	}
 	spin_unlock_irqrestore(&port->port_lock, flags);
@@ -1012,6 +1092,77 @@
 	return status;
 }
 
+static int gs_tiocmget(struct tty_struct *tty)
+{
+	struct gs_port	*port = tty->driver_data;
+	struct gserial	*gser;
+	unsigned int result = 0;
+
+	spin_lock_irq(&port->port_lock);
+	gser = port->port_usb;
+	if (!gser) {
+		result = -ENODEV;
+		goto fail;
+	}
+
+	if (gser->get_dtr)
+		result |= (gser->get_dtr(gser) ? TIOCM_DTR : 0);
+
+	if (gser->get_rts)
+		result |= (gser->get_rts(gser) ? TIOCM_RTS : 0);
+
+	if (gser->serial_state & TIOCM_CD)
+		result |= TIOCM_CD;
+
+	if (gser->serial_state & TIOCM_RI)
+		result |= TIOCM_RI;
+fail:
+	spin_unlock_irq(&port->port_lock);
+	return result;
+}
+
+static int gs_tiocmset(struct tty_struct *tty,
+	unsigned int set, unsigned int clear)
+{
+	struct gs_port	*port = tty->driver_data;
+	struct gserial *gser;
+	int	status = 0;
+
+	spin_lock_irq(&port->port_lock);
+	gser = port->port_usb;
+	if (!gser) {
+		status = -ENODEV;
+		goto fail;
+	}
+
+	if (set & TIOCM_RI) {
+		if (gser->send_ring_indicator) {
+			gser->serial_state |= TIOCM_RI;
+			status = gser->send_ring_indicator(gser, 1);
+		}
+	}
+	if (clear & TIOCM_RI) {
+		if (gser->send_ring_indicator) {
+			gser->serial_state &= ~TIOCM_RI;
+			status = gser->send_ring_indicator(gser, 0);
+		}
+	}
+	if (set & TIOCM_CD) {
+		if (gser->send_carrier_detect) {
+			gser->serial_state |= TIOCM_CD;
+			status = gser->send_carrier_detect(gser, 1);
+		}
+	}
+	if (clear & TIOCM_CD) {
+		if (gser->send_carrier_detect) {
+			gser->serial_state &= ~TIOCM_CD;
+			status = gser->send_carrier_detect(gser, 0);
+		}
+	}
+fail:
+	spin_unlock_irq(&port->port_lock);
+	return status;
+}
 static const struct tty_operations gs_tty_ops = {
 	.open =			gs_open,
 	.close =		gs_close,
@@ -1022,6 +1173,8 @@
 	.chars_in_buffer =	gs_chars_in_buffer,
 	.unthrottle =		gs_unthrottle,
 	.break_ctl =		gs_break_ctl,
+	.tiocmget  =		gs_tiocmget,
+	.tiocmset  =		gs_tiocmset,
 };
 
 /*-------------------------------------------------------------------------*/
@@ -1041,7 +1194,7 @@
 	init_waitqueue_head(&port->close_wait);
 	init_waitqueue_head(&port->drain_wait);
 
-	tasklet_init(&port->push, gs_rx_push, (unsigned long) port);
+	INIT_WORK(&port->push, gs_rx_push);
 
 	INIT_LIST_HEAD(&port->read_pool);
 	INIT_LIST_HEAD(&port->read_queue);
@@ -1055,6 +1208,116 @@
 	return 0;
 }
 
+
+#if defined(CONFIG_DEBUG_FS)
+
+#define BUF_SIZE	512
+
+static ssize_t debug_read_status(struct file *file, char __user *ubuf,
+					size_t count, loff_t *ppos)
+{
+	struct gs_port *ui_dev = file->private_data;
+	struct tty_struct       *tty;
+	struct gserial		*gser;
+	char *buf;
+	unsigned long flags;
+	int i = 0;
+	int ret;
+	int result = 0;
+
+	tty = ui_dev->port_tty;
+	gser = ui_dev->port_usb;
+
+	buf = kzalloc(sizeof(char) * BUF_SIZE, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	spin_lock_irqsave(&ui_dev->port_lock, flags);
+
+	i += scnprintf(buf + i, BUF_SIZE - i,
+		"nbytes_from_host: %lu\n", ui_dev->nbytes_from_host);
+
+	i += scnprintf(buf + i, BUF_SIZE - i,
+		"nbytes_to_tty: %lu\n", ui_dev->nbytes_to_tty);
+
+	i += scnprintf(buf + i, BUF_SIZE - i, "nbytes_with_usb_OUT_txr: %lu\n",
+			(ui_dev->nbytes_from_host - ui_dev->nbytes_to_tty));
+
+	i += scnprintf(buf + i, BUF_SIZE - i,
+		"nbytes_from_tty: %lu\n", ui_dev->nbytes_from_tty);
+
+	i += scnprintf(buf + i, BUF_SIZE - i,
+		"nbytes_to_host: %lu\n", ui_dev->nbytes_to_host);
+
+	i += scnprintf(buf + i, BUF_SIZE - i, "nbytes_with_usb_IN_txr: %lu\n",
+			(ui_dev->nbytes_from_tty - ui_dev->nbytes_to_host));
+
+	if (tty)
+		i += scnprintf(buf + i, BUF_SIZE - i,
+			"tty_flags: %lu\n", tty->flags);
+
+	if (gser->get_dtr) {
+		result |= (gser->get_dtr(gser) ? TIOCM_DTR : 0);
+		i += scnprintf(buf + i, BUF_SIZE - i,
+			"DTR_status: %d\n", result);
+	}
+
+	spin_unlock_irqrestore(&ui_dev->port_lock, flags);
+
+	ret = simple_read_from_buffer(ubuf, count, ppos, buf, i);
+
+	kfree(buf);
+
+	return ret;
+}
+
+static ssize_t debug_write_reset(struct file *file, const char __user *buf,
+			size_t count, loff_t *ppos)
+{
+	struct gs_port *ui_dev = file->private_data;
+	unsigned long flags;
+
+	spin_lock_irqsave(&ui_dev->port_lock, flags);
+	ui_dev->nbytes_from_host = ui_dev->nbytes_to_tty =
+			ui_dev->nbytes_from_tty = ui_dev->nbytes_to_host = 0;
+	spin_unlock_irqrestore(&ui_dev->port_lock, flags);
+
+	return count;
+}
+
+static int serial_debug_open(struct inode *inode, struct file *file)
+{
+	file->private_data = inode->i_private;
+	return 0;
+}
+
+const struct file_operations debug_rst_ops = {
+	.open = serial_debug_open,
+	.write = debug_write_reset,
+};
+
+const struct file_operations debug_adb_ops = {
+	.open = serial_debug_open,
+	.read = debug_read_status,
+};
+
+static void usb_debugfs_init(struct gs_port *ui_dev, int port_num)
+{
+	struct dentry *dent;
+	char buf[48];
+
+	snprintf(buf, 48, "usb_serial%d", port_num);
+	dent = debugfs_create_dir(buf, 0);
+	if (IS_ERR(dent))
+		return;
+
+	debugfs_create_file("readstatus", 0444, dent, ui_dev, &debug_adb_ops);
+	debugfs_create_file("reset", 0222, dent, ui_dev, &debug_rst_ops);
+}
+#else
+static void usb_debugfs_init(struct gs_port *ui_dev) {}
+#endif
+
 /**
  * gserial_setup - initialize TTY driver for one or more ports
  * @g: gadget to associate with these ports
@@ -1094,7 +1357,8 @@
 
 	gs_tty_driver->type = TTY_DRIVER_TYPE_SERIAL;
 	gs_tty_driver->subtype = SERIAL_TYPE_NORMAL;
-	gs_tty_driver->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV;
+	gs_tty_driver->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV
+				| TTY_DRIVER_RESET_TERMIOS;
 	gs_tty_driver->init_termios = tty_std_termios;
 
 	/* 9600-8-N-1 ... matches defaults expected by "usbser.sys" on
@@ -1113,6 +1377,12 @@
 
 	tty_set_operations(gs_tty_driver, &gs_tty_ops);
 
+	gserial_wq = create_singlethread_workqueue("k_gserial");
+	if (!gserial_wq) {
+		status = -ENOMEM;
+		goto fail;
+	}
+
 	/* make devices be openable */
 	for (i = 0; i < count; i++) {
 		mutex_init(&ports[i].lock);
@@ -1127,6 +1397,7 @@
 	/* export the driver ... */
 	status = tty_register_driver(gs_tty_driver);
 	if (status) {
+		put_tty_driver(gs_tty_driver);
 		pr_err("%s: cannot register, err %d\n",
 				__func__, status);
 		goto fail;
@@ -1142,6 +1413,9 @@
 				__func__, i, PTR_ERR(tty_dev));
 	}
 
+	for (i = 0; i < count; i++)
+		usb_debugfs_init(ports[i].port, i);
+
 	pr_debug("%s: registered %d ttyGS* device%s\n", __func__,
 			count, (count == 1) ? "" : "s");
 
@@ -1149,6 +1423,7 @@
 fail:
 	while (count--)
 		kfree(ports[count].port);
+	destroy_workqueue(gserial_wq);
 	put_tty_driver(gs_tty_driver);
 	gs_tty_driver = NULL;
 	return status;
@@ -1195,7 +1470,7 @@
 		ports[i].port = NULL;
 		mutex_unlock(&ports[i].lock);
 
-		tasklet_kill(&port->push);
+		cancel_work_sync(&port->push);
 
 		/* wait for old opens to finish */
 		wait_event(port->close_wait, gs_closed(port));
@@ -1206,6 +1481,7 @@
 	}
 	n_ports = 0;
 
+	destroy_workqueue(gserial_wq);
 	tty_unregister_driver(gs_tty_driver);
 	put_tty_driver(gs_tty_driver);
 	gs_tty_driver = NULL;
@@ -1344,5 +1620,8 @@
 	port->read_allocated = port->read_started =
 		port->write_allocated = port->write_started = 0;
 
+	port->nbytes_from_host = port->nbytes_to_tty =
+		port->nbytes_from_tty = port->nbytes_to_host = 0;
+
 	spin_unlock_irqrestore(&port->port_lock, flags);
 }
diff --git a/drivers/usb/gadget/u_serial.h b/drivers/usb/gadget/u_serial.h
index 300f0ed..fea53d8 100644
--- a/drivers/usb/gadget/u_serial.h
+++ b/drivers/usb/gadget/u_serial.h
@@ -40,11 +40,22 @@
 
 	/* REVISIT avoid this CDC-ACM support harder ... */
 	struct usb_cdc_line_coding port_line_coding;	/* 9600-8-N-1 etc */
+	u16				serial_state;
+
+	/* control signal callbacks*/
+	unsigned int (*get_dtr)(struct gserial *p);
+	unsigned int (*get_rts)(struct gserial *p);
 
 	/* notification callbacks */
 	void (*connect)(struct gserial *p);
 	void (*disconnect)(struct gserial *p);
 	int (*send_break)(struct gserial *p, int duration);
+	unsigned int (*send_carrier_detect)(struct gserial *p, unsigned int);
+	unsigned int (*send_ring_indicator)(struct gserial *p, unsigned int);
+	int (*send_modem_ctrl_bits)(struct gserial *p, int ctrl_bits);
+
+	/* notification changes to modem */
+	void (*notify_modem)(struct gserial *gser, u8 portno, int ctrl_bits);
 };
 
 /* utilities to allocate/free request and buffer */
@@ -59,6 +70,15 @@
 int gserial_connect(struct gserial *, u8 port_num);
 void gserial_disconnect(struct gserial *);
 
+/* sdio related functions */
+int gsdio_setup(struct usb_gadget *g, unsigned n_ports);
+int gsdio_connect(struct gserial *, u8 port_num);
+void gsdio_disconnect(struct gserial *, u8 portno);
+
+int gsmd_setup(struct usb_gadget *g, unsigned n_ports);
+int gsmd_connect(struct gserial *, u8 port_num);
+void gsmd_disconnect(struct gserial *, u8 portno);
+
 /* functions are bound to configurations by a config or gadget driver */
 int acm_bind_config(struct usb_configuration *c, u8 port_num);
 int gser_bind_config(struct usb_configuration *c, u8 port_num);
diff --git a/drivers/usb/gadget/u_smd.c b/drivers/usb/gadget/u_smd.c
new file mode 100644
index 0000000..0e8f247
--- /dev/null
+++ b/drivers/usb/gadget/u_smd.c
@@ -0,0 +1,887 @@
+/*
+ * u_smd.c - utilities for USB gadget serial over smd
+ *
+ * Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+ *
+ * This code also borrows from drivers/usb/gadget/u_serial.c, which is
+ * Copyright (C) 2000 - 2003 Al Borchers (alborchers@steinerpoint.com)
+ * Copyright (C) 2008 David Brownell
+ * Copyright (C) 2008 by Nokia Corporation
+ * Copyright (C) 1999 - 2002 Greg Kroah-Hartman (greg@kroah.com)
+ * Copyright (C) 2000 Peter Berger (pberger@brimson.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/termios.h>
+#include <mach/msm_smd.h>
+#include <linux/debugfs.h>
+
+#include "u_serial.h"
+
+#define SMD_RX_QUEUE_SIZE		8
+#define SMD_RX_BUF_SIZE			2048
+
+#define SMD_TX_QUEUE_SIZE		8
+#define SMD_TX_BUF_SIZE			2048
+
+static struct workqueue_struct *gsmd_wq;
+
+#define SMD_N_PORTS	2
+#define CH_OPENED	0
+struct smd_port_info {
+	struct smd_channel	*ch;
+	char			*name;
+	unsigned long		flags;
+	wait_queue_head_t	wait;
+};
+
+struct smd_port_info smd_pi[SMD_N_PORTS] = {
+	{
+		.name = "DS",
+	},
+	{
+		.name = "UNUSED",
+	},
+};
+
+struct gsmd_port {
+	unsigned		port_num;
+	spinlock_t		port_lock;
+
+	unsigned		n_read;
+	struct list_head	read_pool;
+	struct list_head	read_queue;
+	struct work_struct	push;
+
+	struct list_head	write_pool;
+	struct work_struct	pull;
+
+	struct gserial		*port_usb;
+
+	struct smd_port_info	*pi;
+	struct work_struct	connect_work;
+
+	/* At present, smd does not notify
+	 * control bit change info from modem
+	 */
+	struct work_struct	update_modem_ctrl_sig;
+
+#define SMD_ACM_CTRL_DTR		0x01
+#define SMD_ACM_CTRL_RTS		0x02
+	unsigned		cbits_to_modem;
+
+#define SMD_ACM_CTRL_DCD		0x01
+#define SMD_ACM_CTRL_DSR		0x02
+#define SMD_ACM_CTRL_BRK		0x04
+#define SMD_ACM_CTRL_RI		0x08
+	unsigned		cbits_to_laptop;
+
+	/* pkt counters */
+	unsigned long		nbytes_tomodem;
+	unsigned long		nbytes_tolaptop;
+};
+
+static struct smd_portmaster {
+	struct mutex lock;
+	struct gsmd_port *port;
+} smd_ports[SMD_N_PORTS];
+static unsigned n_smd_ports;
+
+static void gsmd_free_req(struct usb_ep *ep, struct usb_request *req)
+{
+	kfree(req->buf);
+	usb_ep_free_request(ep, req);
+}
+
+static void gsmd_free_requests(struct usb_ep *ep, struct list_head *head)
+{
+	struct usb_request	*req;
+
+	while (!list_empty(head)) {
+		req = list_entry(head->next, struct usb_request, list);
+		list_del(&req->list);
+		gsmd_free_req(ep, req);
+	}
+}
+
+static struct usb_request *
+gsmd_alloc_req(struct usb_ep *ep, unsigned len, gfp_t flags)
+{
+	struct usb_request *req;
+
+	req = usb_ep_alloc_request(ep, flags);
+	if (!req) {
+		pr_err("%s: usb alloc request failed\n", __func__);
+		return 0;
+	}
+
+	req->length = len;
+	req->buf = kmalloc(len, flags);
+	if (!req->buf) {
+		pr_err("%s: request buf allocation failed\n", __func__);
+		usb_ep_free_request(ep, req);
+		return 0;
+	}
+
+	return req;
+}
+
+static int gsmd_alloc_requests(struct usb_ep *ep, struct list_head *head,
+		int num, int size,
+		void (*cb)(struct usb_ep *ep, struct usb_request *))
+{
+	int i;
+	struct usb_request *req;
+
+	pr_debug("%s: ep:%p head:%p num:%d size:%d cb:%p", __func__,
+			ep, head, num, size, cb);
+
+	for (i = 0; i < num; i++) {
+		req = gsmd_alloc_req(ep, size, GFP_ATOMIC);
+		if (!req) {
+			pr_debug("%s: req allocated:%d\n", __func__, i);
+			return list_empty(head) ? -ENOMEM : 0;
+		}
+		req->complete = cb;
+		list_add(&req->list, head);
+	}
+
+	return 0;
+}
+
+static void gsmd_start_rx(struct gsmd_port *port)
+{
+	struct list_head	*pool;
+	struct usb_ep		*out;
+	int ret;
+
+	if (!port) {
+		pr_err("%s: port is null\n", __func__);
+		return;
+	}
+
+	spin_lock_irq(&port->port_lock);
+
+	if (!port->port_usb) {
+		pr_debug("%s: USB disconnected\n", __func__);
+		goto start_rx_end;
+	}
+
+	pool = &port->read_pool;
+	out = port->port_usb->out;
+
+	while (!list_empty(pool)) {
+		struct usb_request	*req;
+
+		req = list_entry(pool->next, struct usb_request, list);
+		list_del(&req->list);
+		req->length = SMD_RX_BUF_SIZE;
+
+		spin_unlock_irq(&port->port_lock);
+		ret = usb_ep_queue(out, req, GFP_KERNEL);
+		spin_lock_irq(&port->port_lock);
+		if (ret) {
+			pr_err("%s: usb ep out queue failed"
+					"port:%p, port#%d\n",
+					 __func__, port, port->port_num);
+			list_add_tail(&req->list, pool);
+			break;
+		}
+	}
+start_rx_end:
+	spin_unlock_irq(&port->port_lock);
+}
+
+static void gsmd_rx_push(struct work_struct *w)
+{
+	struct gsmd_port *port = container_of(w, struct gsmd_port, push);
+	struct list_head *q;
+
+	pr_debug("%s: port:%p port#%d", __func__, port, port->port_num);
+
+	spin_lock_irq(&port->port_lock);
+
+	q = &port->read_queue;
+	while (!list_empty(q)) {
+		struct usb_request *req;
+		int avail;
+		struct smd_port_info *pi = port->pi;
+
+		req = list_first_entry(q, struct usb_request, list);
+
+		switch (req->status) {
+		case -ESHUTDOWN:
+			pr_debug("%s: req status shutdown portno#%d port:%p\n",
+					__func__, port->port_num, port);
+			goto rx_push_end;
+		default:
+			pr_warning("%s: port:%p port#%d"
+					" Unexpected Rx Status:%d\n", __func__,
+					port, port->port_num, req->status);
+		case 0:
+			/* normal completion */
+			break;
+		}
+
+		avail = smd_write_avail(pi->ch);
+		if (!avail)
+			goto rx_push_end;
+
+		if (req->actual) {
+			char		*packet = req->buf;
+			unsigned	size = req->actual;
+			unsigned	n;
+			unsigned	count;
+
+			n = port->n_read;
+			if (n) {
+				packet += n;
+				size -= n;
+			}
+
+			count = smd_write(pi->ch, packet, size);
+			if (count < 0) {
+				pr_err("%s: smd write failed err:%d\n",
+						__func__, count);
+				goto rx_push_end;
+			}
+
+			if (count != size) {
+				port->n_read += count;
+				goto rx_push_end;
+			}
+
+			port->nbytes_tomodem += count;
+		}
+
+		port->n_read = 0;
+		list_move(&req->list, &port->read_pool);
+	}
+
+rx_push_end:
+	spin_unlock_irq(&port->port_lock);
+
+	gsmd_start_rx(port);
+}
+
+static void gsmd_read_pending(struct gsmd_port *port)
+{
+	int avail;
+
+	if (!port || !port->pi->ch)
+		return;
+
+	/* passing null buffer discards the data */
+	while ((avail = smd_read_avail(port->pi->ch)))
+		smd_read(port->pi->ch, 0, avail);
+
+	return;
+}
+
+static void gsmd_tx_pull(struct work_struct *w)
+{
+	struct gsmd_port *port = container_of(w, struct gsmd_port, pull);
+	struct list_head *pool = &port->write_pool;
+
+	pr_debug("%s: port:%p port#%d pool:%p\n", __func__,
+			port, port->port_num, pool);
+
+	if (!port->port_usb) {
+		pr_debug("%s: usb is disconnected\n", __func__);
+		gsmd_read_pending(port);
+		return;
+	}
+
+	spin_lock_irq(&port->port_lock);
+	while (!list_empty(pool)) {
+		struct usb_request *req;
+		struct usb_ep *in = port->port_usb->in;
+		struct smd_port_info *pi = port->pi;
+		int avail;
+		int ret;
+
+		avail = smd_read_avail(pi->ch);
+		if (!avail)
+			break;
+
+		avail = avail > SMD_TX_BUF_SIZE ? SMD_TX_BUF_SIZE : avail;
+
+		req = list_entry(pool->next, struct usb_request, list);
+		list_del(&req->list);
+		req->length = smd_read(pi->ch, req->buf, avail);
+
+		spin_unlock_irq(&port->port_lock);
+		ret = usb_ep_queue(in, req, GFP_KERNEL);
+		spin_lock_irq(&port->port_lock);
+		if (ret) {
+			pr_err("%s: usb ep out queue failed"
+					"port:%p, port#%d err:%d\n",
+					__func__, port, port->port_num, ret);
+			/* could be usb disconnected */
+			if (!port->port_usb)
+				gsmd_free_req(in, req);
+			else
+				list_add(&req->list, pool);
+			goto tx_pull_end;
+		}
+
+		port->nbytes_tolaptop += req->length;
+	}
+
+tx_pull_end:
+	/* TBD: Check how code behaves on USB bus suspend */
+	if (port->port_usb && smd_read_avail(port->pi->ch) && !list_empty(pool))
+		queue_work(gsmd_wq, &port->pull);
+
+	spin_unlock_irq(&port->port_lock);
+
+	return;
+}
+
+static void gsmd_read_complete(struct usb_ep *ep, struct usb_request *req)
+{
+	struct gsmd_port *port = ep->driver_data;
+	unsigned long flags;
+
+	pr_debug("%s: ep:%p port:%p\n", __func__, ep, port);
+
+	if (!port) {
+		pr_err("%s: port is null\n", __func__);
+		return;
+	}
+
+	spin_lock_irqsave(&port->port_lock, flags);
+	list_add_tail(&req->list, &port->read_queue);
+	queue_work(gsmd_wq, &port->push);
+	spin_unlock_irqrestore(&port->port_lock, flags);
+
+	return;
+}
+
+static void gsmd_write_complete(struct usb_ep *ep, struct usb_request *req)
+{
+	struct gsmd_port *port = ep->driver_data;
+	unsigned long flags;
+
+	pr_debug("%s: ep:%p port:%p\n", __func__, ep, port);
+
+	if (!port) {
+		pr_err("%s: port is null\n", __func__);
+		return;
+	}
+
+	spin_lock_irqsave(&port->port_lock, flags);
+	list_add(&req->list, &port->write_pool);
+
+	switch (req->status) {
+	default:
+		pr_warning("%s: port:%p port#%d unexpected %s status %d\n",
+				__func__, port, port->port_num,
+				ep->name, req->status);
+		/* FALL THROUGH */
+	case 0:
+		queue_work(gsmd_wq, &port->pull);
+		break;
+
+	case -ESHUTDOWN:
+		/* disconnect */
+		pr_debug("%s: %s shutdown\n", __func__, ep->name);
+		gsmd_free_req(ep, req);
+		break;
+	}
+
+	spin_unlock_irqrestore(&port->port_lock, flags);
+
+	return;
+}
+
+static void gsmd_start_io(struct gsmd_port *port)
+{
+	int		ret = -ENODEV;
+	unsigned long	flags;
+
+	pr_debug("%s: port: %p\n", __func__, port);
+
+	spin_lock_irqsave(&port->port_lock, flags);
+
+	if (!port->port_usb)
+		goto start_io_out;
+
+	ret = gsmd_alloc_requests(port->port_usb->out,
+				&port->read_pool,
+				SMD_RX_QUEUE_SIZE, SMD_RX_BUF_SIZE,
+				gsmd_read_complete);
+	if (ret) {
+		pr_err("%s: unable to allocate out requests\n",
+				__func__);
+		goto start_io_out;
+	}
+
+	ret = gsmd_alloc_requests(port->port_usb->in,
+				&port->write_pool,
+				SMD_TX_QUEUE_SIZE, SMD_TX_BUF_SIZE,
+				gsmd_write_complete);
+	if (ret) {
+		gsmd_free_requests(port->port_usb->out, &port->read_pool);
+		pr_err("%s: unable to allocate IN requests\n",
+				__func__);
+		goto start_io_out;
+	}
+
+start_io_out:
+	spin_unlock_irqrestore(&port->port_lock, flags);
+
+	if (ret)
+		return;
+
+	gsmd_start_rx(port);
+}
+
+static unsigned int convert_uart_sigs_to_acm(unsigned uart_sig)
+{
+	unsigned int acm_sig = 0;
+
+	/* should this needs to be in calling functions ??? */
+	uart_sig &= (TIOCM_RI | TIOCM_CD | TIOCM_DSR);
+
+	if (uart_sig & TIOCM_RI)
+		acm_sig |= SMD_ACM_CTRL_RI;
+	if (uart_sig & TIOCM_CD)
+		acm_sig |= SMD_ACM_CTRL_DCD;
+	if (uart_sig & TIOCM_DSR)
+		acm_sig |= SMD_ACM_CTRL_DSR;
+
+	return acm_sig;
+}
+
+static unsigned int convert_acm_sigs_to_uart(unsigned acm_sig)
+{
+	unsigned int uart_sig = 0;
+
+	/* should this needs to be in calling functions ??? */
+	acm_sig &= (SMD_ACM_CTRL_DTR | SMD_ACM_CTRL_RTS);
+
+	if (acm_sig & SMD_ACM_CTRL_DTR)
+		uart_sig |= TIOCM_DTR;
+	if (acm_sig & SMD_ACM_CTRL_RTS)
+		uart_sig |= TIOCM_RTS;
+
+	return uart_sig;
+}
+
+static void gsmd_notify(void *priv, unsigned event)
+{
+	struct gsmd_port *port = priv;
+	struct smd_port_info *pi = port->pi;
+	int i;
+
+	switch (event) {
+	case SMD_EVENT_DATA:
+		pr_debug("%s: Event data\n", __func__);
+		if (smd_read_avail(pi->ch))
+			queue_work(gsmd_wq, &port->pull);
+		if (smd_write_avail(pi->ch))
+			queue_work(gsmd_wq, &port->push);
+		break;
+	case SMD_EVENT_OPEN:
+		pr_debug("%s: Event Open\n", __func__);
+		set_bit(CH_OPENED, &pi->flags);
+		wake_up(&pi->wait);
+		break;
+	case SMD_EVENT_CLOSE:
+		pr_debug("%s: Event Close\n", __func__);
+		clear_bit(CH_OPENED, &pi->flags);
+		break;
+	case SMD_EVENT_STATUS:
+		i = smd_tiocmget(port->pi->ch);
+		port->cbits_to_laptop = convert_uart_sigs_to_acm(i);
+		if (port->port_usb && port->port_usb->send_modem_ctrl_bits)
+			port->port_usb->send_modem_ctrl_bits(port->port_usb,
+						port->cbits_to_laptop);
+		break;
+	}
+}
+
+#define MAX_SMD_RETRY_CNT	20
+static void gsmd_connect_work(struct work_struct *w)
+{
+	struct gsmd_port *port;
+	struct smd_port_info *pi;
+	int ret;
+	int retry_cnt = 0;
+
+	port = container_of(w, struct gsmd_port, connect_work);
+	pi = port->pi;
+
+	pr_debug("%s: port:%p port#%d\n", __func__, port, port->port_num);
+
+	/* SMD driver comes online gets initialized and loads modem
+	 * 10 seconds after boot up. If USB cable is connected at boot-up,
+	 * this might result smd open failure. To work-around, retry
+	 * opening multiple times.
+	 */
+	do {
+		if (!port->port_usb)
+			return;
+
+		ret = smd_named_open_on_edge(pi->name, SMD_APPS_MODEM,
+					&pi->ch, port, gsmd_notify);
+		if (!ret)
+			break;
+
+		retry_cnt++;
+		msleep(1000);
+	} while (retry_cnt < MAX_SMD_RETRY_CNT);
+
+	if (ret) {
+		pr_err("%s: unable to open smd port:%s err:%d\n",
+				__func__, pi->name, ret);
+		return;
+	}
+
+	pr_debug("%s: SMD port open successful retrycnt:%d\n",
+			__func__, retry_cnt);
+
+	wait_event(pi->wait, test_bit(CH_OPENED, &pi->flags));
+
+	if (!port->port_usb)
+		return;
+
+	/* update usb control signals to modem */
+	if (port->cbits_to_modem)
+		smd_tiocmset(port->pi->ch,
+			port->cbits_to_modem,
+			~port->cbits_to_modem);
+
+	gsmd_start_io(port);
+}
+
+static void gsmd_notify_modem(struct gserial *gser, u8 portno, int ctrl_bits)
+{
+	struct gsmd_port *port;
+	int temp;
+
+	if (portno >= n_smd_ports) {
+		pr_err("%s: invalid portno#%d\n", __func__, portno);
+		return;
+	}
+
+	if (!gser) {
+		pr_err("%s: gser is null\n", __func__);
+		return;
+	}
+
+	port = smd_ports[portno].port;
+
+	temp = convert_acm_sigs_to_uart(ctrl_bits);
+
+	if (temp == port->cbits_to_modem)
+		return;
+
+	port->cbits_to_modem = temp;
+
+	/* usb could send control signal before smd is ready */
+	if (!test_bit(CH_OPENED, &port->pi->flags))
+		return;
+
+	/* if DTR is high, update latest modem info to laptop */
+	if (port->cbits_to_modem & TIOCM_DTR) {
+		unsigned i;
+
+		i = smd_tiocmget(port->pi->ch);
+		port->cbits_to_laptop = convert_uart_sigs_to_acm(i);
+
+		if (gser->send_modem_ctrl_bits)
+			gser->send_modem_ctrl_bits(
+					port->port_usb,
+					port->cbits_to_laptop);
+	}
+
+	smd_tiocmset(port->pi->ch,
+			port->cbits_to_modem,
+			~port->cbits_to_modem);
+}
+
+int gsmd_connect(struct gserial *gser, u8 portno)
+{
+	unsigned long flags;
+	int ret;
+	struct gsmd_port *port;
+
+	pr_debug("%s: gserial:%p portno:%u\n", __func__, gser, portno);
+
+	if (portno >= n_smd_ports) {
+		pr_err("%s: Invalid port no#%d", __func__, portno);
+		return -EINVAL;
+	}
+
+	if (!gser) {
+		pr_err("%s: gser is null\n", __func__);
+		return -EINVAL;
+	}
+
+	port = smd_ports[portno].port;
+
+	spin_lock_irqsave(&port->port_lock, flags);
+	port->port_usb = gser;
+	gser->notify_modem = gsmd_notify_modem;
+	port->nbytes_tomodem = 0;
+	port->nbytes_tolaptop = 0;
+	spin_unlock_irqrestore(&port->port_lock, flags);
+
+	ret = usb_ep_enable(gser->in, gser->in_desc);
+	if (ret) {
+		pr_err("%s: usb_ep_enable failed eptype:IN ep:%p",
+				__func__, gser->in);
+		port->port_usb = 0;
+		return ret;
+	}
+	gser->in->driver_data = port;
+
+	ret = usb_ep_enable(gser->out, gser->out_desc);
+	if (ret) {
+		pr_err("%s: usb_ep_enable failed eptype:OUT ep:%p",
+				__func__, gser->out);
+		port->port_usb = 0;
+		gser->in->driver_data = 0;
+		return ret;
+	}
+	gser->out->driver_data = port;
+
+	queue_work(gsmd_wq, &port->connect_work);
+
+	return 0;
+}
+
+void gsmd_disconnect(struct gserial *gser, u8 portno)
+{
+	unsigned long flags;
+	struct gsmd_port *port;
+
+	pr_debug("%s: gserial:%p portno:%u\n", __func__, gser, portno);
+
+	if (portno >= n_smd_ports) {
+		pr_err("%s: invalid portno#%d\n", __func__, portno);
+		return;
+	}
+
+	if (!gser) {
+		pr_err("%s: gser is null\n", __func__);
+		return;
+	}
+
+	port = smd_ports[portno].port;
+
+	spin_lock_irqsave(&port->port_lock, flags);
+	port->port_usb = 0;
+	spin_unlock_irqrestore(&port->port_lock, flags);
+
+	/* disable endpoints, aborting down any active I/O */
+	usb_ep_disable(gser->out);
+	usb_ep_disable(gser->in);
+
+	spin_lock_irqsave(&port->port_lock, flags);
+	gsmd_free_requests(gser->out, &port->read_pool);
+	gsmd_free_requests(gser->out, &port->read_queue);
+	gsmd_free_requests(gser->in, &port->write_pool);
+	port->n_read = 0;
+	spin_unlock_irqrestore(&port->port_lock, flags);
+
+	if (!test_bit(CH_OPENED, &port->pi->flags))
+		return;
+
+	/* lower the dtr */
+	port->cbits_to_modem = 0;
+	smd_tiocmset(port->pi->ch,
+			port->cbits_to_modem,
+			~port->cbits_to_modem);
+
+	smd_close(port->pi->ch);
+	port->pi->flags = 0;
+}
+
+static void gsmd_port_free(int portno)
+{
+	struct gsmd_port *port = smd_ports[portno].port;
+
+	if (!port)
+		kfree(port);
+}
+
+static int gsmd_port_alloc(int portno, struct usb_cdc_line_coding *coding)
+{
+	struct gsmd_port *port;
+
+	port = kzalloc(sizeof(struct gsmd_port), GFP_KERNEL);
+	if (!port)
+		return -ENOMEM;
+
+	port->port_num = portno;
+	port->pi = &smd_pi[portno];
+
+	spin_lock_init(&port->port_lock);
+
+	INIT_LIST_HEAD(&port->read_pool);
+	INIT_LIST_HEAD(&port->read_queue);
+	INIT_WORK(&port->push, gsmd_rx_push);
+
+	INIT_LIST_HEAD(&port->write_pool);
+	INIT_WORK(&port->pull, gsmd_tx_pull);
+
+	INIT_WORK(&port->connect_work, gsmd_connect_work);
+	init_waitqueue_head(&port->pi->wait);
+
+	smd_ports[portno].port = port;
+
+	pr_debug("%s: port:%p portno:%d\n", __func__, port, portno);
+
+	return 0;
+}
+
+#if defined(CONFIG_DEBUG_FS)
+static ssize_t debug_smd_read_stats(struct file *file, char __user *ubuf,
+		size_t count, loff_t *ppos)
+{
+	struct gsmd_port *port;
+	char *buf;
+	unsigned long flags;
+	int temp = 0;
+	int i;
+	int ret;
+
+	buf = kzalloc(sizeof(char) * 512, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	for (i = 0; i < n_smd_ports; i++) {
+		port = smd_ports[i].port;
+		spin_lock_irqsave(&port->port_lock, flags);
+		temp += scnprintf(buf + temp, 512 - temp,
+				"###PORT:%d###\n"
+				"nbytes_tolaptop: %lu\n"
+				"nbytes_tomodem:  %lu\n"
+				"cbits_to_modem:  %u\n"
+				"cbits_to_laptop: %u\n"
+				"n_read: %u\n",
+				i, port->nbytes_tolaptop, port->nbytes_tomodem,
+				port->cbits_to_modem, port->cbits_to_laptop,
+				port->n_read);
+		spin_unlock_irqrestore(&port->port_lock, flags);
+	}
+
+	ret = simple_read_from_buffer(ubuf, count, ppos, buf, temp);
+
+	kfree(buf);
+
+	return ret;
+
+}
+
+static ssize_t debug_smd_reset_stats(struct file *file, const char __user *buf,
+				 size_t count, loff_t *ppos)
+{
+	struct gsmd_port *port;
+	unsigned long flags;
+	int i;
+
+	for (i = 0; i < n_smd_ports; i++) {
+		port = smd_ports[i].port;
+
+		spin_lock_irqsave(&port->port_lock, flags);
+		port->nbytes_tolaptop = 0;
+		port->nbytes_tomodem = 0;
+		spin_unlock_irqrestore(&port->port_lock, flags);
+	}
+
+	return count;
+}
+
+static int debug_smd_open(struct inode *inode, struct file *file)
+{
+	return 0;
+}
+
+static const struct file_operations debug_gsmd_ops = {
+	.open = debug_smd_open,
+	.read = debug_smd_read_stats,
+	.write = debug_smd_reset_stats,
+};
+
+static void gsmd_debugfs_init(void)
+{
+	struct dentry *dent;
+
+	dent = debugfs_create_dir("usb_gsmd", 0);
+	if (IS_ERR(dent))
+		return;
+
+	debugfs_create_file("status", 0444, dent, 0, &debug_gsmd_ops);
+}
+#else
+static void gsmd_debugfs_init(void) {}
+#endif
+
+int gsmd_setup(struct usb_gadget *g, unsigned count)
+{
+	struct usb_cdc_line_coding	coding;
+	int ret;
+	int i;
+
+	pr_debug("%s: g:%p count: %d\n", __func__, g, count);
+
+	if (!count || count > SMD_N_PORTS) {
+		pr_err("%s: Invalid num of ports count:%d gadget:%p\n",
+				__func__, count, g);
+		return -EINVAL;
+	}
+
+	coding.dwDTERate = cpu_to_le32(9600);
+	coding.bCharFormat = 8;
+	coding.bParityType = USB_CDC_NO_PARITY;
+	coding.bDataBits = USB_CDC_1_STOP_BITS;
+
+	gsmd_wq = create_singlethread_workqueue("k_gsmd");
+	if (!gsmd_wq) {
+		pr_err("%s: Unable to create workqueue gsmd_wq\n",
+				__func__);
+		return -ENOMEM;
+	}
+
+	for (i = 0; i < count; i++) {
+		mutex_init(&smd_ports[i].lock);
+		ret = gsmd_port_alloc(i, &coding);
+		if (ret) {
+			pr_err("%s: Unable to alloc port:%d\n", __func__, i);
+			goto free_smd_ports;
+		}
+		n_smd_ports++;
+	}
+
+	gsmd_debugfs_init();
+
+	return 0;
+free_smd_ports:
+	for (i = 0; i < n_smd_ports; i++)
+		gsmd_port_free(i);
+
+	destroy_workqueue(gsmd_wq);
+
+	return ret;
+}
+
+void gsmd_cleanup(struct usb_gadget *g, unsigned count)
+{
+	/* TBD */
+}
diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig
index ab085f1..4447b0f 100644
--- a/drivers/usb/host/Kconfig
+++ b/drivers/usb/host/Kconfig
@@ -59,6 +59,19 @@
 	  To compile this driver as a module, choose M here: the
 	  module will be called ehci-hcd.
 
+config USB_EHCI_EHSET
+	bool "Embedded High-speed Host Electrical Test Support"
+	depends on USB_EHCI_HCD
+	---help---
+	  This option is required for EHSET Host Compliance Tests support on an
+	  embedded Hi-speed USB Host or OTG port.
+
+	  This enables the software support for the "Single Step Set Featue" test.
+	  Apart from this test, other EHSET tests TEST_SE0/J/K/PACKET are part
+	  of EHCI specification and their support already exists in the EHCI driver.
+
+	  If unsure, say N.
+
 config USB_EHCI_ROOT_HUB_TT
 	bool "Root Hub Transaction Translators"
 	depends on USB_EHCI_HCD
@@ -230,6 +243,22 @@
 	  To compile this driver as a module, choose M here: the
 	  module will be called oxu210hp-hcd.
 
+config USB_EHCI_MSM_72K
+	bool "Support for Legacy Qualcomm on-chip EHCI USB controller"
+	depends on USB_EHCI_HCD && USB_MSM_OTG_72K && ARCH_MSM
+	---help---
+	  This driver enables support for USB host controller
+	  in pre 8660 qualcomm chipsets(8660, 7X30, 8X50 and 7X27).
+
+config USB_FS_HOST
+	bool "Support for Full Speed Host Mode"
+	depends on USB_EHCI_MSM_72K && ARCH_QSD8X50
+	default n
+	---help---
+	  Enables support for the full speed USB controller core present
+	  on the Qualcomm chipsets
+
+
 config USB_ISP116X_HCD
 	tristate "ISP116X HCD support"
 	depends on USB
@@ -528,6 +557,15 @@
 	  To compile this driver a module, choose M here: the module
 	  will be called "whci-hcd".
 
+config USB_PEHCI_HCD
+	tristate "ST-E ISP1763A Host Controller"
+	depends on USB
+	help
+	  Driver for ST-E isp1763A USB Host 2.0 Controllers.
+
+	  To compile this driver a module, choose M here: the module
+	  will be called "pehci".
+
 config USB_HWA_HCD
 	tristate "Host Wire Adapter (HWA) driver (EXPERIMENTAL)"
 	depends on EXPERIMENTAL
diff --git a/drivers/usb/host/Makefile b/drivers/usb/host/Makefile
index 624a362..6f5b0e1 100644
--- a/drivers/usb/host/Makefile
+++ b/drivers/usb/host/Makefile
@@ -15,6 +15,7 @@
 xhci-hcd-y += xhci-ring.o xhci-hub.o xhci-dbg.o
 
 obj-$(CONFIG_USB_WHCI_HCD)	+= whci/
+obj-$(CONFIG_USB_PEHCI_HCD)	+= pehci/
 
 obj-$(CONFIG_PCI)		+= pci-quirks.o
 
diff --git a/drivers/usb/host/ehci-dbg.c b/drivers/usb/host/ehci-dbg.c
index 40a844c..dd67cad 100644
--- a/drivers/usb/host/ehci-dbg.c
+++ b/drivers/usb/host/ehci-dbg.c
@@ -39,7 +39,7 @@
  * (host controller _Structural_ parameters)
  * see EHCI spec, Table 2-4 for each value
  */
-static void dbg_hcs_params (struct ehci_hcd *ehci, char *label)
+static void __maybe_unused dbg_hcs_params (struct ehci_hcd *ehci, char *label)
 {
 	u32	params = ehci_readl(ehci, &ehci->caps->hcs_params);
 
@@ -83,7 +83,7 @@
  * (host controller _Capability_ parameters)
  * see EHCI Spec, Table 2-5 for each value
  * */
-static void dbg_hcc_params (struct ehci_hcd *ehci, char *label)
+static void __maybe_unused dbg_hcc_params (struct ehci_hcd *ehci, char *label)
 {
 	u32	params = ehci_readl(ehci, &ehci->caps->hcc_params);
 
diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
index 9ff9abc..8e6ef28 100644
--- a/drivers/usb/host/ehci-hcd.c
+++ b/drivers/usb/host/ehci-hcd.c
@@ -451,7 +451,7 @@
 	spin_unlock_irq(&ehci->lock);
 }
 
-static void ehci_port_power (struct ehci_hcd *ehci, int is_on)
+static void __maybe_unused ehci_port_power (struct ehci_hcd *ehci, int is_on)
 {
 	unsigned port;
 
@@ -667,7 +667,7 @@
 }
 
 /* start HC running; it's halted, ehci_init() has been run (once) */
-static int ehci_run (struct usb_hcd *hcd)
+static int __maybe_unused ehci_run (struct usb_hcd *hcd)
 {
 	struct ehci_hcd		*ehci = hcd_to_ehci (hcd);
 	int			retval;
@@ -1115,7 +1115,7 @@
 	spin_unlock_irqrestore (&ehci->lock, flags);
 }
 
-static void
+static void __maybe_unused
 ehci_endpoint_reset(struct usb_hcd *hcd, struct usb_host_endpoint *ep)
 {
 	struct ehci_hcd		*ehci = hcd_to_ehci(hcd);
@@ -1254,6 +1254,11 @@
 #define PLATFORM_DRIVER		spear_ehci_hcd_driver
 #endif
 
+#ifdef CONFIG_USB_EHCI_MSM_72K
+#include "ehci-msm72k.c"
+#define PLATFORM_DRIVER		ehci_msm_driver
+#endif
+
 #ifdef CONFIG_USB_EHCI_MSM
 #include "ehci-msm.c"
 #define PLATFORM_DRIVER		ehci_msm_driver
diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c
index 88cfb8f..15cac20 100644
--- a/drivers/usb/host/ehci-hub.c
+++ b/drivers/usb/host/ehci-hub.c
@@ -735,6 +735,151 @@
 }
 
 /*-------------------------------------------------------------------------*/
+#ifdef CONFIG_USB_EHCI_EHSET
+
+#define EHSET_TEST_SINGLE_STEP_SET_FEATURE 0x06
+
+static void usb_ehset_completion(struct urb *urb)
+{
+	struct completion  *done = urb->context;
+
+	complete(done);
+}
+static int submit_single_step_set_feature(
+	struct usb_hcd  *hcd,
+	struct urb      *urb,
+	int 		is_setup
+);
+
+/* Allocate a URB and initialize the various fields of it.
+ * This API is used by the single_step_set_feature test of
+ * EHSET where IN packet of the GetDescriptor request is
+ * sent after 15secs of the SETUP packet.
+ * Return NULL if failed.
+ */
+static struct urb *
+request_single_step_set_feature_urb(
+	struct usb_device 	*udev,
+	void 			*dr,
+	void 			*buf,
+	struct completion 	*done
+) {
+	struct urb *urb;
+	struct usb_hcd *hcd = bus_to_hcd(udev->bus);
+	struct usb_host_endpoint	*ep;
+
+	urb = usb_alloc_urb(0, GFP_KERNEL);
+	if (!urb)
+		return NULL;
+
+	urb->pipe = usb_rcvctrlpipe(udev, 0);
+	ep = (usb_pipein(urb->pipe) ? udev->ep_in : udev->ep_out)
+			[usb_pipeendpoint(urb->pipe)];
+	if (!ep) {
+		usb_free_urb(urb);
+		return NULL;
+	}
+
+	/* Initialize the various URB fields as these are used
+	 * by the HCD driver to queue it and as well as
+	 * when completion happens.
+	 */
+	urb->ep = ep;
+	urb->dev = udev;
+	urb->setup_packet = (void *)dr;
+	urb->transfer_buffer = buf;
+	urb->transfer_buffer_length = USB_DT_DEVICE_SIZE;
+	urb->complete = usb_ehset_completion;
+	urb->status = -EINPROGRESS;
+	urb->actual_length = 0;
+	urb->transfer_flags = (urb->transfer_flags & ~URB_DIR_MASK)
+				| URB_DIR_IN ;
+	usb_get_urb(urb);
+	atomic_inc(&urb->use_count);
+	atomic_inc(&urb->dev->urbnum);
+	urb->setup_dma = dma_map_single(
+			hcd->self.controller,
+			urb->setup_packet,
+			sizeof(struct usb_ctrlrequest),
+			DMA_TO_DEVICE);
+	urb->transfer_dma = dma_map_single(
+			hcd->self.controller,
+			urb->transfer_buffer,
+			urb->transfer_buffer_length,
+			DMA_FROM_DEVICE);
+	urb->context = done;
+	return urb;
+}
+
+static int ehset_single_step_set_feature(struct usb_hcd *hcd, int port)
+{
+	int retval = -ENOMEM;
+	struct usb_ctrlrequest *dr;
+	struct urb *urb;
+	struct usb_device *udev ;
+	struct ehci_hcd	*ehci = hcd_to_ehci(hcd);
+	struct usb_device_descriptor *buf;
+	DECLARE_COMPLETION_ONSTACK(done);
+
+	/*Obtain udev of the rhub's child port */
+	udev = hcd->self.root_hub->children[port];
+	if (!udev) {
+		ehci_err(ehci, "No device attached to the RootHub\n");
+		return -ENODEV;
+	}
+	buf = kmalloc(USB_DT_DEVICE_SIZE, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	dr = kmalloc(sizeof(struct usb_ctrlrequest), GFP_KERNEL);
+	if (!dr) {
+		kfree(buf);
+		return -ENOMEM;
+	}
+
+	/* Fill Setup packet for GetDescriptor */
+	dr->bRequestType = USB_DIR_IN;
+	dr->bRequest = USB_REQ_GET_DESCRIPTOR;
+	dr->wValue = cpu_to_le16(USB_DT_DEVICE << 8);
+	dr->wIndex = 0;
+	dr->wLength = cpu_to_le16(USB_DT_DEVICE_SIZE);
+	urb = request_single_step_set_feature_urb(udev, dr, buf, &done);
+	if (!urb)
+		goto cleanup;
+
+	/* Now complete just the SETUP stage */
+	retval = submit_single_step_set_feature(hcd, urb, 1);
+	if (retval)
+		goto out1;
+	if (!wait_for_completion_timeout(&done, msecs_to_jiffies(2000))) {
+		usb_kill_urb(urb);
+		retval = -ETIMEDOUT;
+		ehci_err(ehci, "%s SETUP stage timed out on ep0\n", __func__);
+		goto out1;
+	}
+	msleep(15 * 1000);
+	/* Complete remaining DATA and status stages */
+	/* No need to free the URB, we can reuse the same */
+	urb->status = -EINPROGRESS;
+	usb_get_urb(urb);
+	atomic_inc(&urb->use_count);
+	atomic_inc(&urb->dev->urbnum);
+	retval = submit_single_step_set_feature(hcd, urb, 0);
+	if (!retval && !wait_for_completion_timeout(&done,
+						msecs_to_jiffies(2000))) {
+		usb_kill_urb(urb);
+		retval = -ETIMEDOUT;
+		ehci_err(ehci, "%s IN stage timed out on ep0\n", __func__);
+	}
+out1:
+	usb_free_urb(urb);
+cleanup:
+	kfree(dr);
+	kfree(buf);
+	return retval;
+}
+#endif
+/*-------------------------------------------------------------------------*/
 
 static int ehci_hub_control (
 	struct usb_hcd	*hcd,
@@ -1056,6 +1201,16 @@
 					|| (temp & PORT_RESET) != 0)
 				goto error;
 
+			ehci_writel(ehci, temp | PORT_SUSPEND, status_reg);
+#ifdef	CONFIG_USB_OTG
+			if (hcd->self.otg_port == (wIndex + 1) &&
+					hcd->self.b_hnp_enable &&
+					ehci->start_hnp) {
+				set_bit(wIndex, &ehci->suspended_ports);
+				ehci->start_hnp(ehci);
+				break;
+			}
+#endif
 			/* After above check the port must be connected.
 			 * Set appropriate bit thus could put phy into low power
 			 * mode if we have hostpc feature
@@ -1118,12 +1273,23 @@
 		 * about the EHCI-specific stuff.
 		 */
 		case USB_PORT_FEAT_TEST:
-			if (!selector || selector > 5)
+			if (selector && selector <= 5) {
+				ehci_quiesce(ehci);
+				ehci_halt(ehci);
+				temp |= selector << 16;
+				ehci_writel(ehci, temp, status_reg);
+			}
+#ifdef CONFIG_USB_EHCI_EHSET
+			else if (selector
+				  == EHSET_TEST_SINGLE_STEP_SET_FEATURE) {
+				spin_unlock_irqrestore(&ehci->lock, flags);
+				retval = ehset_single_step_set_feature(hcd,
+								   wIndex);
+				spin_lock_irqsave(&ehci->lock, flags);
+			}
+#endif
+			else
 				goto error;
-			ehci_quiesce(ehci);
-			ehci_halt(ehci);
-			temp |= selector << 16;
-			ehci_writel(ehci, temp, status_reg);
 			break;
 
 		default:
@@ -1151,7 +1317,7 @@
 	set_owner(ehci, --portnum, PORT_OWNER);
 }
 
-static int ehci_port_handed_over(struct usb_hcd *hcd, int portnum)
+static int __maybe_unused ehci_port_handed_over(struct usb_hcd *hcd, int portnum)
 {
 	struct ehci_hcd		*ehci = hcd_to_ehci(hcd);
 	u32 __iomem		*reg;
diff --git a/drivers/usb/host/ehci-msm.c b/drivers/usb/host/ehci-msm.c
index b5a0bf6..411fa97 100644
--- a/drivers/usb/host/ehci-msm.c
+++ b/drivers/usb/host/ehci-msm.c
@@ -220,6 +220,9 @@
 
 	dev_dbg(dev, "ehci-msm PM suspend\n");
 
+	if (!hcd->rh_registered)
+		return 0;
+
 	/*
 	 * EHCI helper function has also the same check before manipulating
 	 * port wakeup flags.  We do check here the same condition before
@@ -241,6 +244,10 @@
 	struct usb_hcd *hcd = dev_get_drvdata(dev);
 
 	dev_dbg(dev, "ehci-msm PM resume\n");
+
+	if (!hcd->rh_registered)
+		return 0;
+
 	ehci_prepare_ports_for_controller_resume(hcd_to_ehci(hcd));
 
 	return 0;
diff --git a/drivers/usb/host/ehci-msm72k.c b/drivers/usb/host/ehci-msm72k.c
new file mode 100644
index 0000000..e550e2b
--- /dev/null
+++ b/drivers/usb/host/ehci-msm72k.c
@@ -0,0 +1,823 @@
+/* ehci-msm.c - HSUSB Host Controller Driver Implementation
+ *
+ * Copyright (c) 2008-2010, Code Aurora Forum. All rights reserved.
+ *
+ * Partly derived from ehci-fsl.c and ehci-hcd.c
+ * Copyright (c) 2000-2004 by David Brownell
+ * Copyright (c) 2005 MontaVista Software
+ *
+ * All source code in this file is licensed under the following license except
+ * where indicated.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * See the GNU General Public License for more details.
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can find it at http://www.fsf.org
+ */
+
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/workqueue.h>
+#include <linux/clk.h>
+#include <linux/spinlock.h>
+
+#include <mach/board.h>
+#include <mach/rpc_hsusb.h>
+#include <mach/msm_hsusb.h>
+#include <mach/msm_hsusb_hw.h>
+#include <mach/msm_otg.h>
+#include <mach/clk.h>
+#include <linux/wakelock.h>
+#include <linux/pm_runtime.h>
+
+#include <mach/msm72k_otg.h>
+
+#define MSM_USB_BASE (hcd->regs)
+
+struct msmusb_hcd {
+	struct ehci_hcd ehci;
+	struct clk *clk;
+	struct clk *pclk;
+	unsigned in_lpm;
+	struct work_struct lpm_exit_work;
+	spinlock_t lock;
+	struct wake_lock wlock;
+	unsigned int clk_enabled;
+	struct msm_usb_host_platform_data *pdata;
+	unsigned running;
+	struct otg_transceiver *xceiv;
+	struct work_struct otg_work;
+	unsigned flags;
+	struct msm_otg_ops otg_ops;
+};
+
+static inline struct msmusb_hcd *hcd_to_mhcd(struct usb_hcd *hcd)
+{
+	return (struct msmusb_hcd *) (hcd->hcd_priv);
+}
+
+static inline struct usb_hcd *mhcd_to_hcd(struct msmusb_hcd *mhcd)
+{
+	return container_of((void *) mhcd, struct usb_hcd, hcd_priv);
+}
+
+static void msm_xusb_pm_qos_update(struct msmusb_hcd *mhcd, int vote)
+{
+	struct msm_usb_host_platform_data *pdata = mhcd->pdata;
+
+	/* if otg driver is available, it would take
+	 * care of voting for appropriate pclk source
+	 */
+	if (mhcd->xceiv)
+		return;
+
+	if (vote)
+		clk_enable(pdata->ebi1_clk);
+	else
+		clk_disable(pdata->ebi1_clk);
+}
+
+static void msm_xusb_enable_clks(struct msmusb_hcd *mhcd)
+{
+	struct msm_usb_host_platform_data *pdata = mhcd->pdata;
+
+	if (mhcd->clk_enabled)
+		return;
+
+	switch (PHY_TYPE(pdata->phy_info)) {
+	case USB_PHY_INTEGRATED:
+		/* OTG driver takes care of clock management */
+		break;
+	case USB_PHY_SERIAL_PMIC:
+		clk_enable(mhcd->clk);
+		clk_enable(mhcd->pclk);
+		break;
+	default:
+		pr_err("%s: undefined phy type ( %X ) \n", __func__,
+						pdata->phy_info);
+		return;
+	}
+	mhcd->clk_enabled = 1;
+}
+
+static void msm_xusb_disable_clks(struct msmusb_hcd *mhcd)
+{
+	struct msm_usb_host_platform_data *pdata = mhcd->pdata;
+
+	if (!mhcd->clk_enabled)
+		return;
+
+	switch (PHY_TYPE(pdata->phy_info)) {
+	case USB_PHY_INTEGRATED:
+		/* OTG driver takes care of clock management */
+		break;
+	case USB_PHY_SERIAL_PMIC:
+		clk_disable(mhcd->clk);
+		clk_disable(mhcd->pclk);
+		break;
+	default:
+		pr_err("%s: undefined phy type ( %X ) \n", __func__,
+						pdata->phy_info);
+		return;
+	}
+	mhcd->clk_enabled = 0;
+
+}
+
+static int usb_wakeup_phy(struct usb_hcd *hcd)
+{
+	struct msmusb_hcd *mhcd = hcd_to_mhcd(hcd);
+	struct msm_usb_host_platform_data *pdata = mhcd->pdata;
+	int ret = -ENODEV;
+
+	switch (PHY_TYPE(pdata->phy_info)) {
+	case USB_PHY_INTEGRATED:
+		break;
+	case USB_PHY_SERIAL_PMIC:
+		ret = msm_fsusb_resume_phy();
+		break;
+	default:
+		pr_err("%s: undefined phy type ( %X ) \n", __func__,
+						pdata->phy_info);
+	}
+
+	return ret;
+}
+
+#ifdef CONFIG_PM
+static int usb_suspend_phy(struct usb_hcd *hcd)
+{
+	int ret = 0;
+	struct msmusb_hcd *mhcd = hcd_to_mhcd(hcd);
+	struct msm_usb_host_platform_data *pdata = mhcd->pdata;
+
+	switch (PHY_TYPE(pdata->phy_info)) {
+	case USB_PHY_INTEGRATED:
+		break;
+	case USB_PHY_SERIAL_PMIC:
+		ret = msm_fsusb_set_remote_wakeup();
+		ret = msm_fsusb_suspend_phy();
+		break;
+	default:
+		pr_err("%s: undefined phy type ( %X ) \n", __func__,
+						pdata->phy_info);
+		ret = -ENODEV;
+		break;
+	}
+
+	return ret;
+}
+
+static int usb_lpm_enter(struct usb_hcd *hcd)
+{
+	struct device *dev = container_of((void *)hcd, struct device,
+							platform_data);
+	struct msmusb_hcd *mhcd = hcd_to_mhcd(hcd);
+
+	disable_irq(hcd->irq);
+	if (mhcd->in_lpm) {
+		pr_info("%s: already in lpm. nothing to do\n", __func__);
+		enable_irq(hcd->irq);
+		return 0;
+	}
+
+	if (HC_IS_RUNNING(hcd->state)) {
+		pr_info("%s: can't enter into lpm. controller is runnning\n",
+			__func__);
+		enable_irq(hcd->irq);
+		return -1;
+	}
+
+	pr_info("%s: lpm enter procedure started\n", __func__);
+
+	mhcd->in_lpm = 1;
+
+	if (usb_suspend_phy(hcd)) {
+		mhcd->in_lpm = 0;
+		enable_irq(hcd->irq);
+		pr_info("phy suspend failed\n");
+		pr_info("%s: lpm enter procedure end\n", __func__);
+		return -1;
+	}
+
+	msm_xusb_disable_clks(mhcd);
+
+	if (mhcd->xceiv && mhcd->xceiv->set_suspend)
+		mhcd->xceiv->set_suspend(mhcd->xceiv, 1);
+
+	if (device_may_wakeup(dev))
+		enable_irq_wake(hcd->irq);
+	enable_irq(hcd->irq);
+	pr_info("%s: lpm enter procedure end\n", __func__);
+	return 0;
+}
+#endif
+
+void usb_lpm_exit_w(struct work_struct *work)
+{
+	struct msmusb_hcd *mhcd = container_of((void *) work,
+			struct msmusb_hcd, lpm_exit_work);
+
+	struct usb_hcd *hcd = mhcd_to_hcd(mhcd);
+
+	struct device *dev = container_of((void *)hcd, struct device,
+							platform_data);
+	msm_xusb_enable_clks(mhcd);
+
+
+	if (usb_wakeup_phy(hcd)) {
+		pr_err("fatal error: cannot bring phy out of lpm\n");
+		return;
+	}
+
+	/* If resume signalling finishes before lpm exit, PCD is not set in
+	 * USBSTS register. Drive resume signal to the downstream device now
+	 * so that EHCI can process the upcoming port change interrupt.*/
+
+	writel(readl(USB_PORTSC) | PORTSC_FPR, USB_PORTSC);
+
+	if (mhcd->xceiv && mhcd->xceiv->set_suspend)
+		mhcd->xceiv->set_suspend(mhcd->xceiv, 0);
+
+	if (device_may_wakeup(dev))
+		disable_irq_wake(hcd->irq);
+	enable_irq(hcd->irq);
+}
+
+static void usb_lpm_exit(struct usb_hcd *hcd)
+{
+	unsigned long flags;
+	struct msmusb_hcd *mhcd = hcd_to_mhcd(hcd);
+
+	spin_lock_irqsave(&mhcd->lock, flags);
+	if (!mhcd->in_lpm) {
+		spin_unlock_irqrestore(&mhcd->lock, flags);
+		return;
+	}
+	mhcd->in_lpm = 0;
+	disable_irq_nosync(hcd->irq);
+	schedule_work(&mhcd->lpm_exit_work);
+	spin_unlock_irqrestore(&mhcd->lock, flags);
+}
+
+static irqreturn_t ehci_msm_irq(struct usb_hcd *hcd)
+{
+	struct msmusb_hcd *mhcd = hcd_to_mhcd(hcd);
+	struct msm_otg *otg = container_of(mhcd->xceiv, struct msm_otg, otg);
+
+	/*
+	 * OTG scheduled a work to get Integrated PHY out of LPM,
+	 * WAIT till then */
+	if (PHY_TYPE(mhcd->pdata->phy_info) == USB_PHY_INTEGRATED)
+		if (atomic_read(&otg->in_lpm))
+			return IRQ_HANDLED;
+
+	return ehci_irq(hcd);
+}
+
+#ifdef CONFIG_PM
+
+static int ehci_msm_bus_suspend(struct usb_hcd *hcd)
+{
+	int ret;
+	struct msmusb_hcd *mhcd = hcd_to_mhcd(hcd);
+	struct device *dev = hcd->self.controller;
+
+	ret = ehci_bus_suspend(hcd);
+	if (ret) {
+		pr_err("ehci_bus suspend faield\n");
+		return ret;
+	}
+	if (PHY_TYPE(mhcd->pdata->phy_info) == USB_PHY_INTEGRATED)
+		ret = otg_set_suspend(mhcd->xceiv, 1);
+	else
+		ret = usb_lpm_enter(hcd);
+
+	pm_runtime_put_noidle(dev);
+	pm_runtime_suspend(dev);
+	wake_unlock(&mhcd->wlock);
+	return ret;
+}
+
+static int ehci_msm_bus_resume(struct usb_hcd *hcd)
+{
+	struct msmusb_hcd *mhcd = hcd_to_mhcd(hcd);
+	struct device *dev = hcd->self.controller;
+
+	wake_lock(&mhcd->wlock);
+	pm_runtime_get_noresume(dev);
+	pm_runtime_resume(dev);
+
+	if (PHY_TYPE(mhcd->pdata->phy_info) == USB_PHY_INTEGRATED) {
+		otg_set_suspend(mhcd->xceiv, 0);
+	} else { /* PMIC serial phy */
+		usb_lpm_exit(hcd);
+		if (cancel_work_sync(&(mhcd->lpm_exit_work)))
+			usb_lpm_exit_w(&mhcd->lpm_exit_work);
+	}
+
+	return ehci_bus_resume(hcd);
+
+}
+
+#else
+
+#define ehci_msm_bus_suspend NULL
+#define ehci_msm_bus_resume NULL
+
+#endif	/* CONFIG_PM */
+
+static int ehci_msm_reset(struct usb_hcd *hcd)
+{
+	struct ehci_hcd *ehci = hcd_to_ehci(hcd);
+	int retval;
+
+	ehci->caps = USB_CAPLENGTH;
+	ehci->regs = USB_CAPLENGTH +
+		HC_LENGTH(ehci, ehci_readl(ehci, &ehci->caps->hc_capbase));
+
+	/* cache the data to minimize the chip reads*/
+	ehci->hcs_params = ehci_readl(ehci, &ehci->caps->hcs_params);
+
+	retval = ehci_init(hcd);
+	if (retval)
+		return retval;
+
+	hcd->has_tt = 1;
+	ehci->sbrn = HCD_USB2;
+
+	retval = ehci_reset(ehci);
+
+	/* SW workaround for USB stability issues*/
+	writel(0x0, USB_AHB_MODE);
+	writel(0x0, USB_AHB_BURST);
+
+	return retval;
+}
+
+#define PTS_VAL(x) (PHY_TYPE(x) == USB_PHY_SERIAL_PMIC) ? PORTSC_PTS_SERIAL : \
+							PORTSC_PTS_ULPI
+
+static int ehci_msm_run(struct usb_hcd *hcd)
+{
+	struct ehci_hcd *ehci  = hcd_to_ehci(hcd);
+	struct msmusb_hcd *mhcd = hcd_to_mhcd(hcd);
+	int             retval = 0;
+	int     	port   = HCS_N_PORTS(ehci->hcs_params);
+	u32 __iomem     *reg_ptr;
+	u32             hcc_params;
+	struct msm_usb_host_platform_data *pdata = mhcd->pdata;
+
+	hcd->uses_new_polling = 1;
+	set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
+
+	/* set hostmode */
+	reg_ptr = (u32 __iomem *)(((u8 __iomem *)ehci->regs) + USBMODE);
+	ehci_writel(ehci, (USBMODE_VBUS | USBMODE_SDIS), reg_ptr);
+
+	/* port configuration - phy, port speed, port power, port enable */
+	while (port--)
+		ehci_writel(ehci, (PTS_VAL(pdata->phy_info) | PORT_POWER |
+				PORT_PE), &ehci->regs->port_status[port]);
+
+	ehci_writel(ehci, ehci->periodic_dma, &ehci->regs->frame_list);
+	ehci_writel(ehci, (u32)ehci->async->qh_dma, &ehci->regs->async_next);
+
+	hcc_params = ehci_readl(ehci, &ehci->caps->hcc_params);
+	if (HCC_64BIT_ADDR(hcc_params))
+		ehci_writel(ehci, 0, &ehci->regs->segment);
+
+	ehci->command &= ~(CMD_LRESET|CMD_IAAD|CMD_PSE|CMD_ASE|CMD_RESET);
+	ehci->command |= CMD_RUN;
+	ehci_writel(ehci, ehci->command, &ehci->regs->command);
+	ehci_readl(ehci, &ehci->regs->command); /* unblock posted writes */
+
+	hcd->state = HC_STATE_RUNNING;
+
+	/*Enable appropriate Interrupts*/
+	ehci_writel(ehci, INTR_MASK, &ehci->regs->intr_enable);
+
+	return retval;
+}
+
+static struct hc_driver msm_hc_driver = {
+	.description		= hcd_name,
+	.product_desc 		= "Qualcomm On-Chip EHCI Host Controller",
+	.hcd_priv_size 		= sizeof(struct msmusb_hcd),
+
+	/*
+	 * generic hardware linkage
+	 */
+	.irq 			= ehci_msm_irq,
+	.flags 			= HCD_USB2,
+
+	.reset 			= ehci_msm_reset,
+	.start 			= ehci_msm_run,
+
+	.stop			= ehci_stop,
+	.shutdown		= ehci_shutdown,
+
+	/*
+	 * managing i/o requests and associated device resources
+	 */
+	.urb_enqueue		= ehci_urb_enqueue,
+	.urb_dequeue		= ehci_urb_dequeue,
+	.endpoint_disable	= ehci_endpoint_disable,
+
+	/*
+	 * scheduling support
+	 */
+	.get_frame_number	= ehci_get_frame,
+
+	/*
+	 * root hub support
+	 */
+	.hub_status_data	= ehci_hub_status_data,
+	.hub_control		= ehci_hub_control,
+	.bus_suspend		= ehci_msm_bus_suspend,
+	.bus_resume		= ehci_msm_bus_resume,
+	.relinquish_port	= ehci_relinquish_port,
+
+	.clear_tt_buffer_complete = ehci_clear_tt_buffer_complete,
+};
+
+static void msm_hsusb_request_host(void *handle, int request)
+{
+	struct msmusb_hcd *mhcd = handle;
+	struct usb_hcd *hcd = mhcd_to_hcd(mhcd);
+	struct msm_usb_host_platform_data *pdata = mhcd->pdata;
+	struct msm_otg *otg = container_of(mhcd->xceiv, struct msm_otg, otg);
+#ifdef CONFIG_USB_OTG
+	struct usb_device *udev = hcd->self.root_hub;
+#endif
+	struct device *dev = hcd->self.controller;
+
+	switch (request) {
+#ifdef CONFIG_USB_OTG
+	case REQUEST_HNP_SUSPEND:
+		/* disable Root hub auto suspend. As hardware is configured
+		 * for peripheral mode, mark hardware is not available.
+		 */
+		if (PHY_TYPE(pdata->phy_info) == USB_PHY_INTEGRATED) {
+			pm_runtime_disable(&udev->dev);
+			/* Mark root hub as disconnected. This would
+			 * protect suspend/resume via sysfs.
+			 */
+			udev->state = USB_STATE_NOTATTACHED;
+			clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
+			hcd->state = HC_STATE_HALT;
+			pm_runtime_put_noidle(dev);
+			pm_runtime_suspend(dev);
+		}
+		break;
+	case REQUEST_HNP_RESUME:
+		if (PHY_TYPE(pdata->phy_info) == USB_PHY_INTEGRATED) {
+			pm_runtime_get_noresume(dev);
+			pm_runtime_resume(dev);
+			disable_irq(hcd->irq);
+			ehci_msm_reset(hcd);
+			ehci_msm_run(hcd);
+			set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
+			pm_runtime_enable(&udev->dev);
+			udev->state = USB_STATE_CONFIGURED;
+			enable_irq(hcd->irq);
+		}
+		break;
+#endif
+	case REQUEST_RESUME:
+		usb_hcd_resume_root_hub(hcd);
+		break;
+	case REQUEST_START:
+		if (mhcd->running)
+			break;
+		pm_runtime_get_noresume(dev);
+		pm_runtime_resume(dev);
+		wake_lock(&mhcd->wlock);
+		msm_xusb_pm_qos_update(mhcd, 1);
+		msm_xusb_enable_clks(mhcd);
+		if (PHY_TYPE(pdata->phy_info) == USB_PHY_INTEGRATED)
+			if (otg->set_clk)
+				otg->set_clk(mhcd->xceiv, 1);
+		if (pdata->vbus_power)
+			pdata->vbus_power(pdata->phy_info, 1);
+		if (pdata->config_gpio)
+			pdata->config_gpio(1);
+		usb_add_hcd(hcd, hcd->irq, IRQF_SHARED);
+		mhcd->running = 1;
+		if (PHY_TYPE(pdata->phy_info) == USB_PHY_INTEGRATED)
+			if (otg->set_clk)
+				otg->set_clk(mhcd->xceiv, 0);
+		break;
+	case REQUEST_STOP:
+		if (!mhcd->running)
+			break;
+		mhcd->running = 0;
+		/* come out of lpm before deregistration */
+		if (PHY_TYPE(pdata->phy_info) == USB_PHY_SERIAL_PMIC) {
+			usb_lpm_exit(hcd);
+			if (cancel_work_sync(&(mhcd->lpm_exit_work)))
+				usb_lpm_exit_w(&mhcd->lpm_exit_work);
+		}
+		usb_remove_hcd(hcd);
+		if (pdata->config_gpio)
+			pdata->config_gpio(0);
+		if (pdata->vbus_power)
+			pdata->vbus_power(pdata->phy_info, 0);
+		msm_xusb_disable_clks(mhcd);
+		wake_lock_timeout(&mhcd->wlock, HZ/2);
+		msm_xusb_pm_qos_update(mhcd, 0);
+		pm_runtime_put_noidle(dev);
+		pm_runtime_suspend(dev);
+		break;
+	}
+}
+
+static void msm_hsusb_otg_work(struct work_struct *work)
+{
+	struct msmusb_hcd *mhcd;
+
+	mhcd = container_of(work, struct msmusb_hcd, otg_work);
+	msm_hsusb_request_host((void *)mhcd, mhcd->flags);
+}
+static void msm_hsusb_start_host(struct usb_bus *bus, int start)
+{
+	struct usb_hcd *hcd = bus_to_hcd(bus);
+	struct msmusb_hcd *mhcd = hcd_to_mhcd(hcd);
+
+	mhcd->flags = start;
+	if (in_interrupt())
+		schedule_work(&mhcd->otg_work);
+	else
+		msm_hsusb_request_host((void *)mhcd, mhcd->flags);
+
+}
+
+static int msm_xusb_init_phy(struct msmusb_hcd *mhcd)
+{
+	int ret = -ENODEV;
+	struct usb_hcd *hcd = mhcd_to_hcd(mhcd);
+	struct msm_usb_host_platform_data *pdata = mhcd->pdata;
+
+	switch (PHY_TYPE(pdata->phy_info)) {
+	case USB_PHY_INTEGRATED:
+		ret = 0;
+	case USB_PHY_SERIAL_PMIC:
+		msm_xusb_enable_clks(mhcd);
+		writel(0, USB_USBINTR);
+		ret = msm_fsusb_rpc_init(&mhcd->otg_ops);
+		if (!ret)
+			msm_fsusb_init_phy();
+		msm_xusb_disable_clks(mhcd);
+		break;
+	default:
+		pr_err("%s: undefined phy type ( %X ) \n", __func__,
+						pdata->phy_info);
+	}
+
+	return ret;
+}
+
+static int msm_xusb_rpc_close(struct msmusb_hcd *mhcd)
+{
+	int retval = -ENODEV;
+	struct msm_usb_host_platform_data *pdata = mhcd->pdata;
+
+	switch (PHY_TYPE(pdata->phy_info)) {
+	case USB_PHY_INTEGRATED:
+		if (!mhcd->xceiv)
+			retval = msm_hsusb_rpc_close();
+		break;
+	case USB_PHY_SERIAL_PMIC:
+		retval = msm_fsusb_reset_phy();
+		msm_fsusb_rpc_deinit();
+		break;
+	default:
+		pr_err("%s: undefined phy type ( %X ) \n", __func__,
+						pdata->phy_info);
+	}
+	return retval;
+}
+
+#ifdef	CONFIG_USB_OTG
+static void ehci_msm_start_hnp(struct ehci_hcd *ehci)
+{
+	struct usb_hcd *hcd = ehci_to_hcd(ehci);
+	struct msmusb_hcd *mhcd = hcd_to_mhcd(hcd);
+
+	/* OTG driver handles HNP */
+	otg_start_hnp(mhcd->xceiv);
+}
+#else
+#define ehci_msm_start_hnp	NULL
+#endif
+
+static int msm_xusb_init_host(struct msmusb_hcd *mhcd)
+{
+	int ret = 0;
+	struct msm_otg *otg;
+	struct usb_hcd *hcd = mhcd_to_hcd(mhcd);
+	struct ehci_hcd *ehci = hcd_to_ehci(hcd);
+	struct msm_usb_host_platform_data *pdata = mhcd->pdata;
+
+	switch (PHY_TYPE(pdata->phy_info)) {
+	case USB_PHY_INTEGRATED:
+		msm_hsusb_rpc_connect();
+
+		if (pdata->vbus_init)
+			pdata->vbus_init(1);
+
+		/* VBUS might be present. Turn off vbus */
+		if (pdata->vbus_power)
+			pdata->vbus_power(pdata->phy_info, 0);
+
+		INIT_WORK(&mhcd->otg_work, msm_hsusb_otg_work);
+		mhcd->xceiv = otg_get_transceiver();
+		if (!mhcd->xceiv)
+			return -ENODEV;
+		otg = container_of(mhcd->xceiv, struct msm_otg, otg);
+		hcd->regs = otg->regs;
+		otg->start_host = msm_hsusb_start_host;
+		ehci->start_hnp = ehci_msm_start_hnp;
+
+		ret = otg_set_host(mhcd->xceiv, &hcd->self);
+		break;
+	case USB_PHY_SERIAL_PMIC:
+		hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len);
+
+		if (!hcd->regs)
+			return -EFAULT;
+		/* get usb clocks */
+		mhcd->clk = clk_get(NULL, "usb_hs2_clk");
+		if (IS_ERR(mhcd->clk)) {
+			iounmap(hcd->regs);
+			return PTR_ERR(mhcd->clk);
+		}
+
+		mhcd->pclk = clk_get(NULL, "usb_hs2_pclk");
+		if (IS_ERR(mhcd->pclk)) {
+			iounmap(hcd->regs);
+			clk_put(mhcd->clk);
+			return PTR_ERR(mhcd->pclk);
+		}
+		mhcd->otg_ops.request = msm_hsusb_request_host;
+		mhcd->otg_ops.handle = (void *) mhcd;
+		ret = msm_xusb_init_phy(mhcd);
+		if (ret < 0) {
+			iounmap(hcd->regs);
+			clk_put(mhcd->clk);
+			clk_put(mhcd->pclk);
+		}
+		break;
+	default:
+		pr_err("phy type is bad\n");
+	}
+	return ret;
+}
+
+static int __devinit ehci_msm_probe(struct platform_device *pdev)
+{
+	struct usb_hcd *hcd;
+	struct resource *res;
+	struct msm_usb_host_platform_data *pdata;
+	int retval;
+	struct msmusb_hcd *mhcd;
+
+	hcd = usb_create_hcd(&msm_hc_driver, &pdev->dev, dev_name(&pdev->dev));
+	if (!hcd)
+		return  -ENOMEM;
+
+	hcd->irq = platform_get_irq(pdev, 0);
+	if (hcd->irq < 0) {
+		usb_put_hcd(hcd);
+		return hcd->irq;
+	}
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!res) {
+		usb_put_hcd(hcd);
+		return -ENODEV;
+	}
+
+	hcd->rsrc_start = res->start;
+	hcd->rsrc_len = resource_size(res);
+
+	mhcd = hcd_to_mhcd(hcd);
+	spin_lock_init(&mhcd->lock);
+	mhcd->in_lpm = 0;
+	mhcd->running = 0;
+	device_init_wakeup(&pdev->dev, 1);
+
+	pdata = pdev->dev.platform_data;
+	if (PHY_TYPE(pdata->phy_info) == USB_PHY_UNDEFINED) {
+		usb_put_hcd(hcd);
+		return -ENODEV;
+	}
+	hcd->power_budget = pdata->power_budget;
+	mhcd->pdata = pdata;
+	INIT_WORK(&mhcd->lpm_exit_work, usb_lpm_exit_w);
+
+	wake_lock_init(&mhcd->wlock, WAKE_LOCK_SUSPEND, dev_name(&pdev->dev));
+	pdata->ebi1_clk = clk_get(NULL, "ebi1_usb_clk");
+	if (IS_ERR(pdata->ebi1_clk))
+		pdata->ebi1_clk = NULL;
+	else
+		clk_set_rate(pdata->ebi1_clk, INT_MAX);
+
+	retval = msm_xusb_init_host(mhcd);
+
+	if (retval < 0) {
+		wake_lock_destroy(&mhcd->wlock);
+		usb_put_hcd(hcd);
+		clk_put(pdata->ebi1_clk);
+	}
+
+	pm_runtime_enable(&pdev->dev);
+
+	return retval;
+}
+
+static void msm_xusb_uninit_host(struct msmusb_hcd *mhcd)
+{
+	struct usb_hcd *hcd = mhcd_to_hcd(mhcd);
+	struct msm_usb_host_platform_data *pdata = mhcd->pdata;
+
+	switch (PHY_TYPE(pdata->phy_info)) {
+	case USB_PHY_INTEGRATED:
+		if (pdata->vbus_init)
+			pdata->vbus_init(0);
+		otg_set_host(mhcd->xceiv, NULL);
+		otg_put_transceiver(mhcd->xceiv);
+		cancel_work_sync(&mhcd->otg_work);
+		break;
+	case USB_PHY_SERIAL_PMIC:
+		iounmap(hcd->regs);
+		clk_put(mhcd->clk);
+		clk_put(mhcd->pclk);
+		msm_fsusb_reset_phy();
+		msm_fsusb_rpc_deinit();
+		break;
+	default:
+		pr_err("phy type is bad\n");
+	}
+}
+static int __exit ehci_msm_remove(struct platform_device *pdev)
+{
+	struct usb_hcd *hcd = platform_get_drvdata(pdev);
+	struct msmusb_hcd *mhcd = hcd_to_mhcd(hcd);
+	struct msm_usb_host_platform_data *pdata;
+	int retval = 0;
+
+	pdata = pdev->dev.platform_data;
+	device_init_wakeup(&pdev->dev, 0);
+
+	msm_hsusb_request_host((void *)mhcd, REQUEST_STOP);
+	msm_xusb_uninit_host(mhcd);
+	retval = msm_xusb_rpc_close(mhcd);
+
+	wake_lock_destroy(&mhcd->wlock);
+	usb_put_hcd(hcd);
+	clk_put(pdata->ebi1_clk);
+
+	pm_runtime_disable(&pdev->dev);
+	pm_runtime_set_suspended(&pdev->dev);
+
+	return retval;
+}
+
+static int ehci_msm_runtime_suspend(struct device *dev)
+{
+	dev_dbg(dev, "pm_runtime: suspending...\n");
+	return 0;
+}
+
+static int ehci_msm_runtime_resume(struct device *dev)
+{
+	dev_dbg(dev, "pm_runtime: resuming...\n");
+	return 0;
+}
+
+static int ehci_msm_runtime_idle(struct device *dev)
+{
+	dev_dbg(dev, "pm_runtime: idling...\n");
+	return 0;
+}
+
+static const struct dev_pm_ops ehci_msm_dev_pm_ops = {
+	.runtime_suspend = ehci_msm_runtime_suspend,
+	.runtime_resume = ehci_msm_runtime_resume,
+	.runtime_idle = ehci_msm_runtime_idle
+};
+
+static struct platform_driver ehci_msm_driver = {
+	.probe	= ehci_msm_probe,
+	.remove	= __exit_p(ehci_msm_remove),
+	.driver	= {.name = "msm_hsusb_host",
+		    .pm = &ehci_msm_dev_pm_ops, },
+};
diff --git a/drivers/usb/host/ehci-q.c b/drivers/usb/host/ehci-q.c
index 0917e3a..a5bb387 100644
--- a/drivers/usb/host/ehci-q.c
+++ b/drivers/usb/host/ehci-q.c
@@ -1144,7 +1144,110 @@
 		qtd_list_free (ehci, urb, qtd_list);
 	return rc;
 }
+/*-------------------------------------------------------------------------*/
+/* This function creates the qtds and submits them for the
+ * SINGLE_STEP_SET_FEATURE Test.
+ * This is done in two parts: first SETUP req for GetDesc is sent then
+ * 15 seconds later, the IN stage for GetDesc starts to req data from dev
+ *
+ * is_setup : i/p arguement decides which of the two stage needs to be
+ * performed; TRUE - SETUP and FALSE - IN+STATUS
+ * Returns 0 if success
+ */
+#ifdef CONFIG_USB_EHCI_EHSET
+static int
+submit_single_step_set_feature(
+	struct usb_hcd  *hcd,
+	struct urb      *urb,
+	int 		is_setup
+) {
+	struct ehci_hcd		*ehci = hcd_to_ehci(hcd);
+	struct list_head	qtd_list;
+	struct list_head	*head ;
 
+	struct ehci_qtd		*qtd, *qtd_prev;
+	dma_addr_t		buf;
+	int			len, maxpacket;
+	u32			token;
+
+	INIT_LIST_HEAD(&qtd_list);
+	head = &qtd_list;
+
+	/*
+	 * URBs map to sequences of QTDs:  one logical transaction
+	 */
+	qtd = ehci_qtd_alloc(ehci, GFP_KERNEL);
+	if (unlikely(!qtd))
+		return -1;
+	list_add_tail(&qtd->qtd_list, head);
+	qtd->urb = urb;
+
+	token = QTD_STS_ACTIVE;
+	token |= (EHCI_TUNE_CERR << 10);
+
+	len = urb->transfer_buffer_length;
+	/* Check if the request is to perform just the SETUP stage (getDesc)
+	 * as in SINGLE_STEP_SET_FEATURE test, DATA stage (IN) happens
+	 * 15 secs after the setup
+	 */
+	if (is_setup) {
+		/* SETUP pid */
+		qtd_fill(ehci, qtd, urb->setup_dma,
+				sizeof(struct usb_ctrlrequest),
+				token | (2 /* "setup" */ << 8), 8);
+
+		submit_async(ehci, urb, &qtd_list, GFP_ATOMIC);
+		return 0; /*Return now; we shall come back after 15 seconds*/
+	}
+
+	/*---------------------------------------------------------------------
+	 * IN: data transfer stage:  buffer setup : start the IN txn phase for
+	 * the get_Desc SETUP which was sent 15seconds back
+	 */
+	token ^= QTD_TOGGLE;   /*We need to start IN with DATA-1 Pid-sequence*/
+	buf = urb->transfer_dma;
+
+	token |= (1 /* "in" */ << 8);  /*This is IN stage*/
+
+	maxpacket = max_packet(usb_maxpacket(urb->dev, urb->pipe, 0));
+
+	qtd_fill(ehci, qtd, buf, len, token, maxpacket);
+
+	/* Our IN phase shall always be a short read; so keep the queue running
+	* and let it advance to the next qtd which zero length OUT status */
+
+	qtd->hw_alt_next = EHCI_LIST_END(ehci);
+
+	/*----------------------------------------------------------------------
+	 * STATUS stage for GetDesc control request
+	 */
+	token ^= 0x0100;	/* "in" <--> "out"  */
+	token |= QTD_TOGGLE;	/* force DATA1 */
+
+	qtd_prev = qtd;
+	qtd = ehci_qtd_alloc(ehci, GFP_ATOMIC);
+	if (unlikely(!qtd))
+		goto cleanup;
+	qtd->urb = urb;
+	qtd_prev->hw_next = QTD_NEXT(ehci, qtd->qtd_dma);
+	list_add_tail(&qtd->qtd_list, head);
+
+	/* dont fill any data in such packets */
+	qtd_fill(ehci, qtd, 0, 0, token, 0);
+
+	/* by default, enable interrupt on urb completion */
+	if (likely(!(urb->transfer_flags & URB_NO_INTERRUPT)))
+		qtd->hw_token |= cpu_to_hc32(ehci, QTD_IOC);
+
+	submit_async(ehci, urb, &qtd_list, GFP_KERNEL);
+
+	return 0;
+
+cleanup:
+	qtd_list_free(ehci, urb, head);
+	return -1;
+}
+#endif
 /*-------------------------------------------------------------------------*/
 
 /* the async qh for the qtds being reclaimed are now unlinked from the HC */
diff --git a/drivers/usb/host/ehci.h b/drivers/usb/host/ehci.h
index 989e0a8..95802d9 100644
--- a/drivers/usb/host/ehci.h
+++ b/drivers/usb/host/ehci.h
@@ -124,6 +124,8 @@
 	ktime_t			last_periodic_enable;
 	u32			command;
 
+	void (*start_hnp)(struct ehci_hcd *ehci);
+
 	/* SILICON QUIRKS */
 	unsigned		no_selective_suspend:1;
 	unsigned		has_fsl_port_bug:1; /* FreeScale */
diff --git a/drivers/usb/host/pehci/Makefile b/drivers/usb/host/pehci/Makefile
new file mode 100644
index 0000000..8c0d17f
--- /dev/null
+++ b/drivers/usb/host/pehci/Makefile
@@ -0,0 +1,6 @@
+#
+# Makefile for the pehci driver (if driver is inside kernel tree).
+#
+
+obj-$(CONFIG_USB_PEHCI_HCD) += hal/ host/
+
diff --git a/drivers/usb/host/pehci/hal/Makefile b/drivers/usb/host/pehci/hal/Makefile
new file mode 100644
index 0000000..91408e5
--- /dev/null
+++ b/drivers/usb/host/pehci/hal/Makefile
@@ -0,0 +1,6 @@
+#
+# Makefile for the pehci driver (if driver is inside kernel tree).
+#
+
+obj-$(CONFIG_USB_PEHCI_HCD) += hal_msm.o
+
diff --git a/drivers/usb/host/pehci/hal/hal_intf.h b/drivers/usb/host/pehci/hal/hal_intf.h
new file mode 100644
index 0000000..2d66e57
--- /dev/null
+++ b/drivers/usb/host/pehci/hal/hal_intf.h
@@ -0,0 +1,313 @@
+/* 
+* Copyright (C) ST-Ericsson AP Pte Ltd 2010 
+*
+* ISP1763 Linux OTG Controller driver : hal
+* 
+* This program is free software; you can redistribute it and/or modify it under the terms of 
+* the GNU General Public License as published by the Free Software Foundation; version 
+* 2 of the License. 
+* 
+* This program is distributed in the hope that it will be useful, but WITHOUT ANY  
+* WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS  
+* FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more  
+* details. 
+* 
+* You should have received a copy of the GNU General Public License 
+* along with this program; if not, write to the Free Software 
+* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA. 
+* 
+* This is a hardware abstraction layer header file.
+* 
+* Author : wired support <wired.support@stericsson.com>
+*
+*/
+
+#ifndef HAL_INTF_H
+#define HAL_INTF_H
+
+
+/* Specify package here instead of including package.h */
+/* #include "package.h" */
+#define HCD_PACKAGE
+
+#define NON_PCI
+//#define PXA300
+
+//#define MSEC_INT_BASED
+#ifdef MSEC_INT_BASED
+#define THREAD_BASED 
+#endif
+
+#ifndef DATABUS_WIDTH_16
+#define DATABUS_WIDTH_16
+#endif
+
+#ifdef	DATABUS_WIDTH_16
+/*DMA SUPPORT */
+/* #define	ENABLE_PLX_DMA */
+//#undef	ENABLE_PLX_DMA//PXA300
+#endif
+
+//#define	EDGE_INTERRUPT
+//#define 	POL_HIGH_INTERRUPT
+
+#define	DMA_BUF_SIZE	(4096 * 2)
+
+#define ISP1763_CHIPID  0x176320
+
+/* Values for id_flags filed of isp1763_driver_t */
+#define ISP1763_HC				0	/* Host Controller Driver */
+#define ISP1763_DC				1	/* Device Controller Driver */
+#define ISP1763_OTG				2	/* Otg Controller Driver */
+#define ISP1763_LAST_DEV			(ISP1763_OTG + 1)
+#define ISP1763_1ST_DEV				(ISP1763_HC)
+
+#ifdef PXA300
+#define HC_SPARAMS_REG					(0x04<<1)	/* Structural Parameters Register */
+#define HC_CPARAMS_REG					(0x08<<1)	/* Capability Parameters Register */
+
+#define HC_USBCMD_REG						(0x8C<<1)	/* USB Command Register */
+#define HC_USBSTS_REG						(0x90<<1)	/* USB Status Register */
+#define HC_INTERRUPT_REG_EHCI				(0x94<<1)	/* INterrupt Enable Register */
+#define HC_FRINDEX_REG						(0x98<<1)	/* Frame Index Register */
+
+#define HC_CONFIGFLAG_REG					(0x9C<<1)	/* Conigured Flag  Register */
+#define HC_PORTSC1_REG					(0xA0<<1)	/* Port Status Control for Port1 */
+
+/*ISO Transfer Registers */
+#define HC_ISO_PTD_DONEMAP_REG			(0xA4<<1)	/* ISO PTD Done Map Register */
+#define HC_ISO_PTD_SKIPMAP_REG			(0xA6<<1)	/* ISO PTD Skip Map Register */
+#define HC_ISO_PTD_LASTPTD_REG				(0xA8<<1)	/* ISO PTD Last PTD Register */
+
+/*INT Transfer Registers */
+#define HC_INT_PTD_DONEMAP_REG			(0xAA<<1)	/* INT PTD Done Map Register */
+#define HC_INT_PTD_SKIPMAP_REG				(0xAC<<1)	/* INT PTD Skip Map Register */
+#define HC_INT_PTD_LASTPTD_REG				(0xAE<<1)	/* INT PTD Last PTD Register  */
+
+/*ATL Transfer Registers */
+#define HC_ATL_PTD_DONEMAP_REG			(0xB0<<1)	/* ATL PTD Last PTD Register  */
+#define HC_ATL_PTD_SKIPMAP_REG				(0xB2<<1)	/* ATL PTD Last PTD Register  */
+#define HC_ATL_PTD_LASTPTD_REG				(0xB4<<1)	/* ATL PTD Last PTD Register  */
+
+/*General Purpose Registers */
+#define HC_HW_MODE_REG					(0x0C<<1)	/* H/W Mode Register  */
+#define HC_CHIP_ID_REG						(0x70<<1)	/* Chip ID Register */
+#define HC_SCRATCH_REG					(0x78<<1)	/* Scratch Register */
+#define HC_RESET_REG						(0xB8<<1)	/* HC Reset Register */
+#define HC_HWMODECTRL_REG				(0xB6<<1)
+#define HC_UNLOCK_DEVICE					(0x7C<<1)
+
+/* Interrupt Registers */
+#define HC_INTERRUPT_REG					(0xD4<<1)	/* Interrupt Register */
+#define HC_INTENABLE_REG					(0xD6<<1)	/* Interrupt enable Register */
+#define HC_ISO_IRQ_MASK_OR_REG			(0xD8<<1)	/* ISO Mask OR Register */
+#define HC_INT_IRQ_MASK_OR_REG			(0xDA<<1)	/* INT Mask OR Register */
+#define HC_ATL_IRQ_MASK_OR_REG			(0xDC<<1)	/* ATL Mask OR Register */
+#define HC_ISO_IRQ_MASK_AND_REG			(0xDE<<1)	/* ISO Mask AND Register */
+#define HC_INT_IRQ_MASK_AND_REG			(0xE0<<1)	/* INT Mask AND Register */
+#define HC_ATL_IRQ_MASK_AND_REG			(0xE2<<1)	/* ATL Mask AND Register */
+
+/*power control reg */
+#define HC_POWER_DOWN_CONTROL_REG		(0xD0<<1)
+
+/*RAM Registers */
+#define HC_DMACONFIG_REG					(0xBC<<1)	/* DMA Config Register */
+#define HC_MEM_READ_REG					(0xC4<<1)	/* Memory Register */
+#define HC_DATA_REG						(0xC6<<1)	/* Data Register */
+
+#define OTG_CTRL_SET_REG					(0xE4<<1)
+#define OTG_CTRL_CLEAR_REG					(0xE6<<1)
+#define OTG_SOURCE_REG					(0xE8<<1)
+
+#define OTG_INTR_EN_F_SET_REG				(0xF0<<1)
+#define OTG_INTR_EN_R_SET_REG				(0xF4<<1)	/* OTG Interrupt Enable Rise register */
+
+#else
+#define HC_SPARAMS_REG					0x04	/* Structural Parameters Register */
+#define HC_CPARAMS_REG					0x08	/* Capability Parameters Register */
+
+#define HC_USBCMD_REG					0x8C	/* USB Command Register */
+#define HC_USBSTS_REG					0x90	/* USB Status Register */
+#define HC_INTERRUPT_REG_EHCI			0x94	/* INterrupt Enable Register */
+#define HC_FRINDEX_REG					0x98	/* Frame Index Register */
+
+#define HC_CONFIGFLAG_REG				0x9C	/* Conigured Flag  Register */
+#define HC_PORTSC1_REG					0xA0	/* Port Status Control for Port1 */
+
+/*ISO Transfer Registers */
+#define HC_ISO_PTD_DONEMAP_REG			0xA4	/* ISO PTD Done Map Register */
+#define HC_ISO_PTD_SKIPMAP_REG			0xA6	/* ISO PTD Skip Map Register */
+#define HC_ISO_PTD_LASTPTD_REG			0xA8	/* ISO PTD Last PTD Register */
+
+/*INT Transfer Registers */
+#define HC_INT_PTD_DONEMAP_REG			0xAA	/* INT PTD Done Map Register */
+#define HC_INT_PTD_SKIPMAP_REG			0xAC	/* INT PTD Skip Map Register */
+#define HC_INT_PTD_LASTPTD_REG			0xAE	/* INT PTD Last PTD Register  */
+
+/*ATL Transfer Registers */
+#define HC_ATL_PTD_DONEMAP_REG			0xB0	/* ATL PTD Last PTD Register  */
+#define HC_ATL_PTD_SKIPMAP_REG			0xB2	/* ATL PTD Last PTD Register  */
+#define HC_ATL_PTD_LASTPTD_REG			0xB4	/* ATL PTD Last PTD Register  */
+
+/*General Purpose Registers */
+#define HC_HW_MODE_REG					0x0C //0xB6	/* H/W Mode Register  */
+#define HC_CHIP_ID_REG					0x70	/* Chip ID Register */
+#define HC_SCRATCH_REG					0x78	/* Scratch Register */
+#define HC_RESET_REG					0xB8	/* HC Reset Register */
+#define HC_HWMODECTRL_REG				0xB6 //0x0C /* H/W Mode control Register  */
+#define HC_UNLOCK_DEVICE				0x7C
+
+/* Interrupt Registers */
+#define HC_INTERRUPT_REG				0xD4	/* Interrupt Register */
+#define HC_INTENABLE_REG				0xD6	/* Interrupt enable Register */
+#define HC_ISO_IRQ_MASK_OR_REG			0xD8	/* ISO Mask OR Register */
+#define HC_INT_IRQ_MASK_OR_REG			0xDA	/* INT Mask OR Register */
+#define HC_ATL_IRQ_MASK_OR_REG			0xDC	/* ATL Mask OR Register */
+#define HC_ISO_IRQ_MASK_AND_REG			0xDE	/* ISO Mask AND Register */
+#define HC_INT_IRQ_MASK_AND_REG			0xE0	/* INT Mask AND Register */
+#define HC_ATL_IRQ_MASK_AND_REG			0xE2	/* ATL Mask AND Register */
+
+/*power control reg */
+#define HC_POWER_DOWN_CONTROL_REG		0xD0
+
+/*RAM Registers */
+#define HC_DMACONFIG_REG				0xBC	/* DMA Config Register */
+#define HC_MEM_READ_REG					0xC4	/* Memory Register */
+#define HC_DATA_REG						0xC6	/* Data Register */
+
+#define OTG_CTRL_SET_REG				0xE4
+#define OTG_CTRL_CLEAR_REG				0xE6
+#define OTG_SOURCE_REG					0xE8
+
+#define OTG_INTR_EN_F_SET_REG			0xF0	/* OTG Interrupt Enable Fall register */
+#define OTG_INTR_EN_R_SET_REG			0xF4	/* OTG Interrupt Enable Rise register */
+
+#endif
+
+#define	OTG_CTRL_DPPULLUP				0x0001
+#define	OTG_CTRL_DPPULLDOWN				0x0002
+#define	OTG_CTRL_DMPULLDOWN				0x0004
+#define	OTG_CTRL_VBUS_DRV				0x0010
+#define	OTG_CTRL_VBUS_DISCHRG			0x0020
+#define	OTG_CTRL_VBUS_CHRG				0x0040
+#define	OTG_CTRL_SW_SEL_HC_DC			0x0080
+#define	OTG_CTRL_BDIS_ACON_EN			0x0100
+#define	OTG_CTRL_OTG_SE0_EN				0x0200
+#define	OTG_CTRL_OTG_DISABLE			0x0400
+#define	OTG_CTRL_VBUS_DRV_PORT2			0x1000
+#define	OTG_CTRL_SW_SEL_HC_2			0x8000
+
+/*interrupt count and buffer status register*/
+
+
+#ifdef PXA300
+#define HC_BUFFER_STATUS_REG			(0xBA<<1)
+#define HC_INT_THRESHOLD_REG			(0xC8<<1)
+#else
+#define HC_BUFFER_STATUS_REG			0xBA
+#define HC_INT_THRESHOLD_REG			0xC8
+#endif
+
+#define HC_OTG_INTERRUPT				0x400
+
+#ifdef PXA300
+#define DC_CHIPID						(0x70<<1)
+#else
+#define DC_CHIPID						0x70
+#endif
+
+
+#ifdef PXA300
+#define FPGA_CONFIG_REG				(0x100<<1)
+#else
+#define FPGA_CONFIG_REG					0x100
+#endif
+
+#define HC_HW_MODE_GOBAL_INTR_ENABLE	0x01
+#define HC_HW_MODE_INTR_EDGE			0x02
+#define HC_HW_MODE_INTR_POLARITY_HIGH	0x04
+#define HC_HW_MODE_LOCK				0x08
+#define HC_HW_MODE_DATABUSWIDTH_8	0x10
+#define HC_HW_MODE_DREQ_POL_HIGH		0x20
+#define HC_HW_MODE_DACK_POL_HIGH		0x40
+#define HC_HW_MODE_COMN_INT			0x80
+
+struct isp1763_driver;
+typedef struct _isp1763_id {
+	u16 idVendor;
+	u16 idProduct;
+	u32 driver_info;
+} isp1763_id;
+
+typedef struct isp1763_dev {
+	/*added for pci device */
+#ifdef  NON_PCI 
+		struct platform_device *dev;
+#else /*PCI*/
+	struct pci_dev *pcidev;
+#endif
+	struct isp1763_driver *driver;	/* which driver has allocated this device */
+	void *driver_data;	/* data private to the host controller driver */
+	void *otg_driver_data;	/*data private for otg controler */
+	unsigned char index;	/* local controller (HC/DC/OTG) */
+	unsigned int irq;	/*Interrupt Channel allocated for this device */
+	void (*handler) (struct isp1763_dev * dev, void *isr_data);	/* Interrupt Serrvice Routine */
+	void *isr_data;		/* isr data of the driver */
+	unsigned long int_reg;	/* Interrupt register */
+	unsigned long alt_int_reg;	/* Interrupt register 2 */
+	unsigned long start;
+	unsigned long length;
+	struct resource *mem_res;
+	unsigned long io_base;	/* Start Io address space for this device */
+	unsigned long io_len;	/* IO address space length for this device */
+
+	unsigned long chip_id;	/* Chip Id */
+
+	char name[80];		/* device name */
+	int active;		/* device status */
+
+	/* DMA resources should come here */
+	unsigned long dma;
+	u8 *baseaddress;	/*base address for i/o ops */
+	u8 *dmabase;
+	isp1763_id *id;
+} isp1763_dev_t;
+
+
+typedef struct isp1763_driver {
+	char *name;
+	unsigned long index;	/* HC or DC or OTG */
+	isp1763_id *id;		/*device ids */
+	int (*probe) (struct isp1763_dev * dev, isp1763_id * id);	/* New device inserted */
+	void (*remove) (struct isp1763_dev * dev);	/* Device removed (NULL if not a hot-plug capable driver) */
+	
+	void (*suspend) (struct isp1763_dev * dev);	/* Device suspended */
+	void (*resume) (struct isp1763_dev * dev);	/* Device woken up */
+	void (*remotewakeup) (struct isp1763_dev *dev);  /* Remote Wakeup */
+	void (*powerup) (struct isp1763_dev *dev);  /* Device poweup mode */
+	void (*powerdown)	(struct isp1763_dev *dev); /* Device power down mode */
+} isp_1763_driver_t;
+
+struct usb_device *phci_register_otg_device(struct isp1763_dev *dev);
+
+/*otg exported function from host*/
+int phci_suspend_otg_port(struct isp1763_dev *dev, u32 command);
+int phci_enumerate_otg_port(struct isp1763_dev *dev, u32 command);
+
+extern int isp1763_register_driver(struct isp1763_driver *drv);
+extern void isp1763_unregister_driver(struct isp1763_driver *drv);
+extern int isp1763_request_irq(void (*handler)(struct isp1763_dev * dev, void *isr_data),
+		      struct isp1763_dev *dev, void *isr_data);
+extern void isp1763_free_irq(struct isp1763_dev *dev, void *isr_data);
+
+extern u32 isp1763_reg_read32(isp1763_dev_t * dev, u16 reg, u32 data);
+extern u16 isp1763_reg_read16(isp1763_dev_t * dev, u16 reg, u16 data);
+extern u8 isp1763_reg_read8(struct isp1763_dev *dev, u16 reg, u8 data);
+extern void isp1763_reg_write32(isp1763_dev_t * dev, u16 reg, u32 data);
+extern void isp1763_reg_write16(isp1763_dev_t * dev, u16 reg, u16 data);
+extern void isp1763_reg_write8(struct isp1763_dev *dev, u16 reg, u8 data);
+extern int isp1763_mem_read(isp1763_dev_t * dev, u32 start_add,
+		     u32 end_add, u32 * buffer, u32 length, u16 dir);
+extern int isp1763_mem_write(isp1763_dev_t * dev, u32 start_add,
+		      u32 end_add, u32 * buffer, u32 length, u16 dir);
+#endif /* __HAL_INTF_H__ */
diff --git a/drivers/usb/host/pehci/hal/hal_msm.c b/drivers/usb/host/pehci/hal/hal_msm.c
new file mode 100644
index 0000000..35c0203
--- /dev/null
+++ b/drivers/usb/host/pehci/hal/hal_msm.c
@@ -0,0 +1,748 @@
+/* 
+* Copyright (C) ST-Ericsson AP Pte Ltd 2010 
+*
+* ISP1763 Linux HCD Controller driver : hal
+* 
+* This program is free software; you can redistribute it and/or modify it under the terms of 
+* the GNU General Public License as published by the Free Software Foundation; version 
+* 2 of the License. 
+* 
+* This program is distributed in the hope that it will be useful, but WITHOUT ANY  
+* WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS  
+* FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more  
+* details. 
+* 
+* You should have received a copy of the GNU General Public License 
+* along with this program; if not, write to the Free Software 
+* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA. 
+* 
+* This is the main hardware abstraction layer file. Hardware initialization, interupt
+* processing and read/write routines are handled here.
+* 
+* Author : wired support <wired.support@stericsson.com>
+*
+*/
+
+
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/ioport.h>
+#include <linux/sched.h>
+#include <linux/completion.h>
+#include <linux/slab.h>
+#include <linux/smp_lock.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/timer.h>
+#include <linux/list.h>
+#include <linux/interrupt.h>
+#include <linux/usb.h>
+#include <linux/gpio.h>
+#include <mach/board.h>
+#include <linux/poll.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/system.h>
+#include <asm/unaligned.h>
+
+
+/*--------------------------------------------------------------*
+ *               linux system include files
+ *--------------------------------------------------------------*/
+#include "hal_msm.h"
+#include "../hal/hal_intf.h"
+#include "../hal/isp1763.h"
+
+
+/*--------------------------------------------------------------*
+ *               Local variable Definitions
+ *--------------------------------------------------------------*/
+struct isp1763_dev isp1763_loc_dev[ISP1763_LAST_DEV];
+
+
+/*--------------------------------------------------------------*
+ *               Local # Definitions
+ *--------------------------------------------------------------*/
+#define         PCI_ACCESS_RETRY_COUNT  20
+#define         ISP1763_DRIVER_NAME     "isp1763_usb"
+
+/*--------------------------------------------------------------*
+ *               Local Function
+ *--------------------------------------------------------------*/
+
+static int __devexit isp1763_remove(struct platform_device *pdev);
+static int __devinit isp1763_probe(struct platform_device *pdev);
+
+
+/*--------------------------------------------------------------*
+ *               Platform Driver Interface Functions
+ *--------------------------------------------------------------*/
+
+static struct platform_driver isp1763_usb_driver = {
+	.remove = __exit_p(isp1763_remove),
+	.driver = {
+		.name = ISP1763_DRIVER_NAME,
+		.owner = THIS_MODULE,
+	},
+};
+
+
+/*--------------------------------------------------------------*
+ *               ISP1763 Read write routine
+ *--------------------------------------------------------------*/
+/*
+ * EBI2 on 8660 ignores the first bit and shifts the address by
+ * one bit to the right.
+ * Hence, shift left all the register addresses before accessing
+ * them over EBI2.
+ * This logic applies only for the register read/writes, for
+ * read/write from ISP memory this conversion is not needed
+ * as the ISP obtains the memory address from 'memory' register
+ */
+
+/* Write a 32 bit Register of isp1763 */
+void
+isp1763_reg_write32(struct isp1763_dev *dev, u16 reg, u32 data)
+{
+	/* Write the 32bit to the register address given to us */
+
+	reg <<= 1;
+#ifdef DATABUS_WIDTH_16
+	writew((u16) data, dev->baseaddress + ((reg)));
+	writew((u16) (data >> 16), dev->baseaddress + (((reg + 4))));
+#else
+	writeb((u8) data, dev->baseaddress + (reg));
+	writeb((u8) (data >> 8), dev->baseaddress + ((reg + 1)));
+	writeb((u8) (data >> 16), dev->baseaddress + ((reg + 2)));
+	writeb((u8) (data >> 24), dev->baseaddress + ((reg + 3)));
+#endif
+
+}
+EXPORT_SYMBOL(isp1763_reg_write32);
+
+
+/* Read a 32 bit Register of isp1763 */
+u32
+isp1763_reg_read32(struct isp1763_dev *dev, u16 reg, u32 data)
+{
+
+#ifdef DATABUS_WIDTH_16
+	u16 wvalue1, wvalue2;
+#else
+	u8 bval1, bval2, bval3, bval4;
+#endif
+	data = 0;
+	reg <<= 1;
+#ifdef DATABUS_WIDTH_16
+	wvalue1 = readw(dev->baseaddress + ((reg)));
+	wvalue2 = readw(dev->baseaddress + (((reg + 4))));
+	data |= wvalue2;
+	data <<= 16;
+	data |= wvalue1;
+#else
+
+	bval1 = readb(dev->baseaddress + (reg));
+	bval2 = readb(dev->baseaddress + (reg + 1));
+	bval3 = readb(dev->baseaddress + (reg + 2));
+	bval4 = readb(dev->baseaddress + (reg + 3));
+	data = 0;
+	data |= bval4;
+	data <<= 8;
+	data |= bval3;
+	data <<= 8;
+	data |= bval2;
+	data <<= 8;
+	data |= bval1;
+
+#endif
+
+	return data;
+}
+EXPORT_SYMBOL(isp1763_reg_read32);
+
+
+/* Read a 16 bit Register of isp1763 */
+u16
+isp1763_reg_read16(struct isp1763_dev * dev, u16 reg, u16 data)
+{
+	reg <<= 1;
+#ifdef DATABUS_WIDTH_16
+	data = readw(dev->baseaddress + ((reg)));
+#else
+	u8 bval1, bval2;
+	bval1 = readb(dev->baseaddress + (reg));
+	if (reg == HC_DATA_REG){
+		bval2 = readb(dev->baseaddress + (reg));
+	} else {
+		bval2 = readb(dev->baseaddress + ((reg + 1)));
+	}
+	data = 0;
+	data |= bval2;
+	data <<= 8;
+	data |= bval1;
+
+#endif
+	return data;
+}
+EXPORT_SYMBOL(isp1763_reg_read16);
+
+/* Write a 16 bit Register of isp1763 */
+void
+isp1763_reg_write16(struct isp1763_dev *dev, u16 reg, u16 data)
+{
+	reg <<= 1;
+#ifdef DATABUS_WIDTH_16
+	writew(data, dev->baseaddress + ((reg)));
+#else
+	writeb((u8) data, dev->baseaddress + (reg));
+	if (reg == HC_DATA_REG){
+		writeb((u8) (data >> 8), dev->baseaddress + (reg));
+	}else{
+		writeb((u8) (data >> 8), dev->baseaddress + ((reg + 1)));
+	}
+
+#endif
+}
+EXPORT_SYMBOL(isp1763_reg_write16);
+
+/* Read a 8 bit Register of isp1763 */
+u8
+isp1763_reg_read8(struct isp1763_dev *dev, u16 reg, u8 data)
+{
+	reg <<= 1;
+	data = readb((dev->baseaddress + (reg)));
+	return data;
+}
+EXPORT_SYMBOL(isp1763_reg_read8);
+
+/* Write a 8 bit Register of isp1763 */
+void
+isp1763_reg_write8(struct isp1763_dev *dev, u16 reg, u8 data)
+{
+	reg <<= 1;
+	writeb(data, (dev->baseaddress + (reg)));
+}
+EXPORT_SYMBOL(isp1763_reg_write8);
+
+
+/*--------------------------------------------------------------*
+ *
+ * Module dtatils: isp1763_mem_read
+ *
+ * Memory read using PIO method.
+ *
+ *  Input: struct isp1763_driver *drv  -->  Driver structure.
+ *                      u32 start_add     --> Starting address of memory
+ *              u32 end_add     ---> End address
+ *
+ *              u32 * buffer      --> Buffer pointer.
+ *              u32 length       ---> Length
+ *              u16 dir          ---> Direction ( Inc or Dec)
+ *
+ *  Output     int Length  ----> Number of bytes read
+ *
+ *  Called by: system function
+ *
+ *
+ *--------------------------------------------------------------*/
+/* Memory read function PIO */
+
+int
+isp1763_mem_read(struct isp1763_dev *dev, u32 start_add,
+	u32 end_add, u32 * buffer, u32 length, u16 dir)
+{
+	u8 *one = (u8 *) buffer;
+	u16 *two = (u16 *) buffer;
+	u32 a = (u32) length;
+	u32 w;
+	u32 w2;
+
+	if (buffer == 0) {
+		printk("Buffer address zero\n");
+		return 0;
+	}
+
+
+	isp1763_reg_write16(dev, HC_MEM_READ_REG, start_add);
+	/* This delay requirement comes from the ISP1763A programming guide */
+	ndelay(100);
+last:
+	w = isp1763_reg_read16(dev, HC_DATA_REG, w);
+	w2 = isp1763_reg_read16(dev, HC_DATA_REG, w);
+	w2 <<= 16;
+	w = w | w2;
+	if (a == 1) {
+		*one = (u8) w;
+		return 0;
+	}
+	if (a == 2) {
+		*two = (u16) w;
+		return 0;
+	}
+
+	if (a == 3) {
+		*two = (u16) w;
+		two += 1;
+		w >>= 16;
+		*two = (u8) (w);
+		return 0;
+
+	}
+	while (a > 0) {
+		*buffer = w;
+		a -= 4;
+		if (a <= 0) {
+			break;
+		}
+		if (a < 4) {
+			buffer += 1;
+			one = (u8 *) buffer;
+			two = (u16 *) buffer;
+			goto last;
+		}
+		buffer += 1;
+		w = isp1763_reg_read16(dev, HC_DATA_REG, w);
+		w2 = isp1763_reg_read16(dev, HC_DATA_REG, w);
+		w2 <<= 16;
+		w = w | w2;
+	}
+	return ((a < 0) || (a == 0)) ? 0 : (-1);
+
+}
+EXPORT_SYMBOL(isp1763_mem_read);
+
+
+/*--------------------------------------------------------------*
+ *
+ * Module dtatils: isp1763_mem_write
+ *
+ * Memory write using PIO method.
+ *
+ *  Input: struct isp1763_driver *drv  -->  Driver structure.
+ *                      u32 start_add     --> Starting address of memory
+ *              u32 end_add     ---> End address
+ *
+ *              u32 * buffer      --> Buffer pointer.
+ *              u32 length       ---> Length
+ *              u16 dir          ---> Direction ( Inc or Dec)
+ *
+ *  Output     int Length  ----> Number of bytes read
+ *
+ *  Called by: system function
+ *
+ *
+ *--------------------------------------------------------------*/
+
+/* Memory read function IO */
+
+int
+isp1763_mem_write(struct isp1763_dev *dev,
+	u32 start_add, u32 end_add, u32 * buffer, u32 length, u16 dir)
+{
+	int a = length;
+	u8 one = (u8) (*buffer);
+	u16 two = (u16) (*buffer);
+
+
+	isp1763_reg_write16(dev, HC_MEM_READ_REG, start_add);
+	/* This delay requirement comes from the ISP1763A programming guide */
+	ndelay(100);
+
+	if (a == 1) {
+		isp1763_reg_write16(dev, HC_DATA_REG, one);
+		return 0;
+	}
+	if (a == 2) {
+		isp1763_reg_write16(dev, HC_DATA_REG, two);
+		return 0;
+	}
+
+	while (a > 0) {
+		isp1763_reg_write16(dev, HC_DATA_REG, (u16) (*buffer));
+		if (a >= 3)
+			isp1763_reg_write16(dev, HC_DATA_REG,
+					    (u16) ((*buffer) >> 16));
+		start_add += 4;
+		a -= 4;
+		if (a <= 0)
+			break;
+		buffer += 1;
+
+	}
+
+	return ((a < 0) || (a == 0)) ? 0 : (-1);
+
+}
+EXPORT_SYMBOL(isp1763_mem_write);
+
+
+/*--------------------------------------------------------------*
+ *
+ * Module dtatils: isp1763_register_driver
+ *
+ * This function is used by top driver (OTG, HCD, DCD) to register
+ * their communication functions (probe, remove, suspend, resume) using
+ * the drv data structure.
+ * This function will call the probe function of the driver if the ISP1763
+ * corresponding to the driver is enabled
+ *
+ *  Input: struct isp1763_driver *drv  --> Driver structure.
+ *  Output result
+ *         0= complete
+ *         1= error.
+ *
+ *  Called by: system function module_init
+ *
+ *
+ *--------------------------------------------------------------*/
+
+int
+isp1763_register_driver(struct isp1763_driver *drv)
+{
+	struct isp1763_dev *dev;
+	int result = -EINVAL;
+
+	hal_entry("%s: Entered\n", __FUNCTION__);
+	info("isp1763_register_driver(drv=%p)\n", drv);
+
+	if (!drv) {
+		return -EINVAL;
+	}
+
+	dev = &isp1763_loc_dev[drv->index];
+	if (!dev->baseaddress)
+		return -EINVAL;
+
+	dev->active = 1;	/* set the driver as active*/
+
+	if (drv->probe) {
+		result = drv->probe(dev, drv->id);
+	} else {
+		printk("%s no probe function for indes %d \n", __FUNCTION__,
+			(int)drv->index);
+	}
+
+	if (result >= 0) {
+		pr_debug(KERN_INFO __FILE__ ": Registered Driver %s\n",
+			drv->name);
+		dev->driver = drv;
+	}
+	hal_entry("%s: Exit\n", __FUNCTION__);
+	return result;
+}				/* End of isp1763_register_driver */
+EXPORT_SYMBOL(isp1763_register_driver);
+
+
+/*--------------------------------------------------------------*
+ *
+ * Module dtatils: isp1763_unregister_driver
+ *
+ * This function is used by top driver (OTG, HCD, DCD) to de-register
+ * their communication functions (probe, remove, suspend, resume) using
+ * the drv data structure.
+ * This function will check whether the driver is registered or not and
+ * call the remove function of the driver if registered
+ *
+ *  Input: struct isp1763_driver *drv  --> Driver structure.
+ *  Output result
+ *         0= complete
+ *         1= error.
+ *
+ *  Called by: system function module_init
+ *
+ *
+ *--------------------------------------------------------------*/
+
+void
+isp1763_unregister_driver(struct isp1763_driver *drv)
+{
+	struct isp1763_dev *dev;
+	hal_entry("%s: Entered\n", __FUNCTION__);
+
+	info("isp1763_unregister_driver(drv=%p)\n", drv);
+	dev = &isp1763_loc_dev[drv->index];
+	if (dev->driver == drv) {
+		/* driver registered is same as the requestig driver */
+		drv->remove(dev);
+		dev->driver = NULL;
+		info(": De-registered Driver %s\n", drv->name);
+		return;
+	}
+	hal_entry("%s: Exit\n", __FUNCTION__);
+}				/* End of isp1763_unregister_driver */
+EXPORT_SYMBOL(isp1763_unregister_driver);
+
+
+/*--------------------------------------------------------------*
+ *               ISP1763 Platform driver interface routine.
+ *--------------------------------------------------------------*/
+
+
+/*--------------------------------------------------------------*
+ *
+ *  Module dtatils: isp1763_module_init
+ *
+ *  This  is the module initialization function. It registers to
+ *  driver for a isp1763 platform device. And also resets the
+ *  internal data structures.
+ *
+ *  Input: void
+ *  Output result
+ *         0= complete
+ *         1= error.
+ *
+ *  Called by: system function module_init
+ *
+ *
+ *
+ -------------------------------------------------------------------*/
+static int __init
+isp1763_module_init(void)
+{
+	int result = 0;
+	hal_entry("%s: Entered\n", __FUNCTION__);
+	pr_debug(KERN_NOTICE "+isp1763_module_init\n");
+	memset(isp1763_loc_dev, 0, sizeof(isp1763_loc_dev));
+
+	result = platform_driver_probe(&isp1763_usb_driver, isp1763_probe);
+
+	pr_debug(KERN_NOTICE "-isp1763_module_init\n");
+	hal_entry("%s: Exit\n", __FUNCTION__);
+	return result;
+}
+
+/*--------------------------------------------------------------*
+ *
+ *  Module dtatils: isp1763_module_cleanup
+ *
+ * This  is the module cleanup function. It de-registers the
+ * Platform driver and resets the internal data structures.
+ *
+ *  Input: void
+ *  Output void
+ *
+ *  Called by: system function module_cleanup
+ *
+ *
+ *
+ --------------------------------------------------------------*/
+
+static void __exit
+isp1763_module_cleanup(void)
+{
+	pr_debug("Hal Module Cleanup\n");
+	platform_driver_unregister(&isp1763_usb_driver);
+
+	memset(isp1763_loc_dev, 0, sizeof(isp1763_loc_dev));
+}
+
+void dummy_mem_read(struct isp1763_dev *dev)
+{
+	u32 w = 0;
+	isp1763_reg_write16(dev, HC_MEM_READ_REG, 0x0400);
+	w = isp1763_reg_read16(dev, HC_DATA_REG, w);
+
+	pr_debug("dummy_read DONE: %x\n", w);
+	msleep(10);
+}
+/*--------------------------------------------------------------*
+ *
+ *  Module dtatils: isp1763_probe
+ *
+ * probe function of ISP1763
+ * This function is called from module_init if the corresponding platform
+ * device is present. This function initializes the information
+ * for the Host Controller with the assigned resources and tests the register
+ * access to the controller and do a software reset and makes it ready
+ * for the driver to play with. It also calls setup_gpio passed from pdata
+ * to setup GPIOs (e.g. used for IRQ and RST lines).
+ *
+ *  Input:
+ *              struct platform_device *dev   ----> Platform Device structure
+ *  Output void
+ *
+ *  Called by: system function module_cleanup
+ *
+ *
+ *
+ --------------------------------------------------------------**/
+
+static int __devinit
+isp1763_probe(struct platform_device *pdev)
+{
+	u32 reg_data = 0;
+	struct isp1763_dev *loc_dev;
+	int status = 1;
+	u32 hwmodectrl = 0;
+	u16 us_reset_hc = 0;
+	u32 chipid = 0;
+	struct isp1763_platform_data *pdata = pdev->dev.platform_data;
+
+	hal_entry("%s: Entered\n", __FUNCTION__);
+
+	hal_init(("isp1763_probe(dev=%p)\n", dev));
+
+	loc_dev = &(isp1763_loc_dev[ISP1763_HC]);
+	loc_dev->dev = pdev;
+
+	/* Get the Host Controller IO and INT resources */
+	loc_dev->mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!loc_dev->mem_res) {
+		pr_err("%s: failed to get platform resource mem\n", __func__);
+		return -ENODEV;
+	}
+
+	loc_dev->baseaddress = ioremap_nocache(loc_dev->mem_res->start,
+					resource_size(loc_dev->mem_res));
+	if (!loc_dev->baseaddress) {
+		pr_err("%s: ioremap failed\n", __func__);
+		status = -ENOMEM;
+		goto put_mem_res;
+	}
+	pr_info("%s: ioremap done at: %x\n", __func__,
+					(int)loc_dev->baseaddress);
+	loc_dev->irq = platform_get_irq(pdev, 0);
+	if (!loc_dev->irq) {
+		pr_err("%s: platform_get_irq failed\n", __func__);
+		status = -ENODEV;
+		goto free_regs;
+	}
+
+	loc_dev->index = ISP1763_HC;	/*zero */
+	loc_dev->length = resource_size(loc_dev->mem_res);
+
+	hal_init(("isp1763 HC MEM Base= %p irq = %d\n",
+		loc_dev->baseaddress, loc_dev->irq));
+
+	/* Setup GPIOs and isssue RESET_N to Controller */
+	if (pdata->setup_gpio)
+		if (pdata->setup_gpio(1))
+			pr_err("%s: Failed to setup GPIOs for isp1763\n",
+								 __func__);
+	if (pdata->reset_gpio) {
+		gpio_set_value(pdata->reset_gpio, 0);
+		msleep(10);
+		gpio_set_value(pdata->reset_gpio, 1);
+	} else {
+		pr_err("%s: Failed to issue RESET_N to isp1763\n", __func__);
+	}
+
+	dummy_mem_read(loc_dev);
+
+	chipid = isp1763_reg_read32(loc_dev, DC_CHIPID, chipid);
+	pr_info("START: chip id:%x\n", chipid);
+
+	/*reset the host controller  */
+	pr_debug("RESETTING\n");
+	us_reset_hc |= 0x1;
+	isp1763_reg_write16(loc_dev, 0xB8, us_reset_hc);
+	msleep(20);
+	us_reset_hc = 0;
+	us_reset_hc |= 0x2;
+	isp1763_reg_write16(loc_dev, 0xB8, us_reset_hc);
+
+	chipid = isp1763_reg_read32(loc_dev, DC_CHIPID, chipid);
+	pr_info("after HC reset, chipid:%x\n", chipid);
+
+	msleep(20);
+	hwmodectrl = isp1763_reg_read16(loc_dev, HC_HWMODECTRL_REG, hwmodectrl);
+	pr_debug("Mode Ctrl Value b4 setting buswidth: %x\n", hwmodectrl);
+#ifdef DATABUS_WIDTH_16
+	hwmodectrl &= 0xFFEF;	/*enable the 16 bit bus */
+#else
+	pr_debug("Setting 8-BIT mode\n");
+	hwmodectrl |= 0x0010;	/*enable the 8 bit bus */
+#endif
+	isp1763_reg_write16(loc_dev, HC_HWMODECTRL_REG, hwmodectrl);
+	pr_debug("writing 0x%x to hw mode reg\n", hwmodectrl);
+
+	hwmodectrl = isp1763_reg_read16(loc_dev, HC_HWMODECTRL_REG, hwmodectrl);
+	msleep(100);
+
+	pr_debug("Mode Ctrl Value after setting buswidth: %x\n", hwmodectrl);
+
+
+	chipid = isp1763_reg_read32(loc_dev, DC_CHIPID, chipid);
+	pr_debug("after setting HW MODE to 8bit, chipid:%x\n", chipid);
+
+
+
+	hal_init(("isp1763 DC MEM Base= %lx irq = %d\n",
+		loc_dev->io_base, loc_dev->irq));
+	reg_data = isp1763_reg_read16(loc_dev, HC_SCRATCH_REG, reg_data);
+	pr_debug("Scratch register is 0x%x\n", reg_data);
+	reg_data = 0xABCD;
+	isp1763_reg_write16(loc_dev, HC_SCRATCH_REG, reg_data);
+	reg_data = isp1763_reg_read16(loc_dev, HC_SCRATCH_REG, reg_data);
+	pr_debug("After write, Scratch register is 0x%x\n", reg_data);
+
+	if (reg_data != 0xABCD) {
+		pr_err("%s: Scratch register write mismatch!!\n", __func__);
+		status = -ENODEV;
+		goto free_gpios;
+	}
+
+	memcpy(loc_dev->name, ISP1763_DRIVER_NAME, sizeof(ISP1763_DRIVER_NAME));
+	loc_dev->name[sizeof(ISP1763_DRIVER_NAME)] = 0;
+
+	pr_debug(KERN_NOTICE "-isp1763_pci_probe\n");
+	hal_entry("%s: Exit\n", __FUNCTION__);
+	return 0;
+
+free_gpios:
+	if (pdata->setup_gpio)
+		pdata->setup_gpio(0);
+free_regs:
+	iounmap(loc_dev->baseaddress);
+put_mem_res:
+	loc_dev->baseaddress = NULL;
+	hal_entry("%s: Exit\n", __FUNCTION__);
+	return status;
+}				/* End of isp1763_probe */
+
+
+/*--------------------------------------------------------------*
+ *
+ *  Module details: isp1763_remove
+ *
+ * cleanup function of ISP1763
+ * This functions de-initializes the local variables, frees GPIOs
+ * and releases memory resource.
+ *
+ *  Input:
+ *              struct platform_device *dev    ----> Platform Device structure
+ *
+ *  Output void
+ *
+ *  Called by: system function module_cleanup
+ *
+ *
+ *
+ --------------------------------------------------------------*/
+static int __devexit
+isp1763_remove(struct platform_device *pdev)
+{
+	struct isp1763_dev *loc_dev;
+	struct isp1763_platform_data *pdata = pdev->dev.platform_data;
+
+	hal_init(("isp1763_pci_remove(dev=%p)\n", dev));
+
+	loc_dev = &isp1763_loc_dev[ISP1763_HC];
+	iounmap(loc_dev->baseaddress);
+	loc_dev->baseaddress = NULL;
+	if (pdata->setup_gpio)
+		return pdata->setup_gpio(0);
+
+	return 0;
+}				/* End of isp1763_remove */
+
+
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL");
+
+module_init(isp1763_module_init);
+module_exit(isp1763_module_cleanup);
diff --git a/drivers/usb/host/pehci/hal/hal_msm.h b/drivers/usb/host/pehci/hal/hal_msm.h
new file mode 100644
index 0000000..a7a65b7
--- /dev/null
+++ b/drivers/usb/host/pehci/hal/hal_msm.h
@@ -0,0 +1,85 @@
+/* 
+* Copyright (C) ST-Ericsson AP Pte Ltd 2010 
+*
+* ISP1763 Linux OTG Controller driver : hal
+* 
+* This program is free software; you can redistribute it and/or modify it under the terms of 
+* the GNU General Public License as published by the Free Software Foundation; version 
+* 2 of the License. 
+* 
+* This program is distributed in the hope that it will be useful, but WITHOUT ANY  
+* WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS  
+* FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more  
+* details. 
+* 
+* You should have received a copy of the GNU General Public License 
+* along with this program; if not, write to the Free Software 
+* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA. 
+* 
+* This is a hardware abstraction layer header file.
+* 
+* Author : wired support <wired.support@stericsson.com>
+*
+*/
+
+#ifndef	HAL_X86_H
+#define	HAL_X86_H
+
+#define	DRIVER_AUTHOR	"ST-ERICSSON	  "
+#define	DRIVER_DESC	"ISP1763 bus driver"
+
+/* Driver tuning, per ST-ERICSSON requirements:	*/
+
+#define	MEM_TO_CHECK		4096	/*bytes, must be multiple of 2 */
+
+/* BIT defines */
+#define	BIT0	(1 << 0)
+#define	BIT1	(1 << 1)
+#define	BIT2	(1 << 2)
+#define	BIT3	(1 << 3)
+#define	BIT4	(1 << 4)
+#define	BIT5	(1 << 5)
+#define	BIT6	(1 << 6)
+#define	BIT7	(1 << 7)
+#define	BIT8	(1 << 8)
+#define	BIT9	(1 << 9)
+#define	BIT10	(1 << 10)
+#define	BIT11	(1 << 11)
+#define	BIT12	(1 << 12)
+#define	BIT13	(1 << 13)
+#define	BIT14	(1 << 14)
+#define	BIT15	(1 << 15)
+#define	BIT16	(1 << 16)
+#define	BIT17	(1 << 17)
+#define	BIT18	(1 << 18)
+#define	BIT19	(1 << 19)
+#define	BIT20	(1 << 20)
+#define	BIT21	(1 << 21)
+#define	BIT22	(1 << 22)
+#define	BIT23	(1 << 23)
+#define	BIT24	(1 << 24)
+#define	BIT25	(1 << 26)
+#define	BIT27	(1 << 27)
+#define	BIT28	(1 << 28)
+#define	BIT29	(1 << 29)
+#define	BIT30	(1 << 30)
+#define	BIT31	(1 << 31)
+
+/* Definitions Related to Chip Address and CPU Physical	Address
+ * cpu_phy_add:	CPU Physical Address , it uses 32 bit data per address
+ * chip_add   :	Chip Address, it uses double word(64) bit data per address
+ */
+#define	chip_add(cpu_phy_add)		(((cpu_phy_add)	- 0x400) / 8)
+#define	cpu_phy_add(chip_add)		((8 * (chip_add)) + 0x400)
+
+/* for getting end add,	and start add, provided	we have	one address with us */
+/* IMPORTANT length  hex(base16) and dec(base10) works fine*/
+#define	end_add(start_add, length)	(start_add + (length - 4))
+#define	start_add(end_add, length)	(end_add - (length - 4))
+
+/* Device Registers*/
+#define	DEV_UNLOCK_REGISTER		0x7C
+#define	DEV_INTERRUPT_REGISTER		0x18
+#define	INT_ENABLE_REGISTER		0x14
+
+#endif /*_HAL_X86_H_ */
diff --git a/drivers/usb/host/pehci/hal/isp1763.h b/drivers/usb/host/pehci/hal/isp1763.h
new file mode 100644
index 0000000..7355185
--- /dev/null
+++ b/drivers/usb/host/pehci/hal/isp1763.h
@@ -0,0 +1,227 @@
+/* 
+* Copyright (C) ST-Ericsson AP Pte Ltd 2010 
+*
+* ISP1763 Linux OTG Controller driver : hal
+* 
+* This program is free software; you can redistribute it and/or modify it under the terms of 
+* the GNU General Public License as published by the Free Software Foundation; version 
+* 2 of the License. 
+* 
+* This program is distributed in the hope that it will be useful, but WITHOUT ANY  
+* WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS  
+* FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more  
+* details. 
+* 
+* You should have received a copy of the GNU General Public License 
+* along with this program; if not, write to the Free Software 
+* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA. 
+* 
+* This is a hardware abstraction layer header file.
+* 
+* Author : wired support <wired.support@stericsson.com>
+*
+*/
+
+#ifndef	ISP1763_H
+#define	ISP1763_H
+
+
+
+/* For debugging option: ------------------- */
+#define PTD_DUMP_SCHEDULE
+#undef  PTD_DUMP_SCHEDULE
+
+#define PTD_DUMP_COMPLETE
+#undef  PTD_DUMP_COMPLETE
+/* ------------------------------------*/
+#define CONFIG_ISO_SUPPORT 
+
+#ifdef CONFIG_ISO_SUPPORT
+
+#define	ISO_DBG_ENTRY 1
+#define	ISO_DBG_EXIT  1
+#define	ISO_DBG_ADDR 1
+#define	ISO_DBG_DATA 1
+#define	ISO_DBG_ERR  1
+#define	ISO_DBG_INFO 1
+
+#if 0				/* Set to 1 to enable isochronous debugging */
+#define	iso_dbg(category, format, arg...) \
+do \
+{ \
+	if(category) \
+	{ \
+		printk(format, ## arg);	\
+	} \
+} while(0)
+#else
+#define	iso_dbg(category, format, arg...) while(0)
+#endif
+
+#endif /* CONFIG_ISO_SUPPORT */
+
+/*Debug	For Entry/Exit of the functions	*/
+//#define HCD_DEBUG_LEVEL1 
+#ifdef HCD_DEBUG_LEVEL1
+#define	pehci_entry(format, args... ) printk(format, ##args)
+#else
+#define	pehci_entry(format, args...) do	{ } while(0)
+#endif
+
+/*Debug	for Port Info and Errors */
+//#define HCD_DEBUG_LEVEL2 
+#ifdef HCD_DEBUG_LEVEL2
+#define	pehci_print(format, args... ) printk(format, ##args)
+#else
+#define	pehci_print(format, args...) do	{ } while(0)
+#endif
+
+/*Debug	For the	Port changes and Enumeration */
+//#define HCD_DEBUG_LEVEL3 
+#ifdef HCD_DEBUG_LEVEL3
+#define	pehci_info(format,arg...) printk(format, ##arg)
+#else
+#define	pehci_info(format,arg...) do {}	while (0)
+#endif
+
+/*Debug	For Transfer flow  */
+// #define HCD_DEBUG_LEVEL4 
+#ifdef HCD_DEBUG_LEVEL4
+#define	pehci_check(format,args...) printk(format, ##args)
+#else
+#define	pehci_check(format,args...)
+#endif
+/*******************END	HOST CONTROLLER**********************************/
+
+
+
+/*******************START DEVICE CONTROLLER******************************/
+
+/* For MTP support */
+#undef MTP_ENABLE		/* Enable to add MTP support; But requires MTP class driver to be present to work */
+/*For CHAPTER8 TEST */
+#undef	CHAPTER8_TEST		/* Enable to Pass Chapter 8 Test */
+
+/* Debug Entery/Exit of	Function as well as some other Info */
+//#define DEV_DEBUG_LEVEL2
+#ifdef DEV_DEBUG_LEVEL2
+#define	dev_print(format,arg...) printk(format,	##arg)
+#else
+#define	dev_print(format,arg...) do {} while (0)
+#endif
+
+/*Debug	for Interrupt ,	Registers , device Enable/Disable and some other info */
+//#define DEV_DEBUG_LEVEL3
+#undef dev_info
+#ifdef DEV_DEBUG_LEVEL3
+#define	dev_info(format,arg...)	printk(format, ##arg)
+#else
+#define	dev_info(format,arg...)	do {} while (0)
+#endif
+
+/*Debug	for Tranffer flow , Enumeration	and Packet info	*/
+//#define DEV_DEBUG_LEVEL4
+#ifdef DEV_DEBUG_LEVEL4
+#define	dev_check(format,args...) printk(format, ##args)
+#else
+#define	dev_check(format,args...) do{}while(0)
+#endif
+/*******************END	DEVICE CONTROLLER********************************/
+
+
+/*******************START MSCD*******************************************/
+/*Debug	Entery/Exit of Function	as well	as some	other Information*/
+//#define MSCD_DEBUG_LEVEL2
+#ifdef MSCD_DEBUG_LEVEL2
+#define	mscd_print(format,arg...) printk(format, ##arg)
+#else
+#define	mscd_print(format,arg...) do {}	while (0)
+#endif
+
+/*Debug	for Info */
+//#define MSCD_DEBUG_LEVEL3
+#ifdef MSCD_DEBUG_LEVEL3
+#define	mscd_info(format,arg...) printk(format,	##arg)
+#else
+#define	mscd_info(format,arg...) do {} while (0)
+#endif
+/*******************END	MSCD*********************************************/
+
+
+/*******************START OTG CONTROLLER*********************************/
+/*#define	OTG */			/*undef	for Device only	and Host only */
+#define	ALL_FSM_FLAGS
+/*Debug	for Entry/Exit and Info	*/
+/* #define OTG_DEBUG_LEVEL1 */
+#ifdef OTG_DEBUG_LEVEL1
+#define	otg_entry(format, args... ) printk(format, ##args)
+#else
+#define	otg_entry(format, args...) do {	} while(0)
+#endif
+
+/*Debug	for State Machine Flow */
+/* #define OTG_DEBUG_LEVEL2 */
+#ifdef OTG_DEBUG_LEVEL2
+#define	otg_print(format,arg...) printk(format,	##arg)
+#else
+#define	otg_print(format,arg...) do {} while (0)
+#endif
+/*Debug	for Info */
+/* #define OTG_DEBUG_LEVEL3 */
+#ifdef OTG_DEBUG_LEVEL3
+#define	otg_info(format,arg...)	printk(format, ##arg)
+#else
+#define	otg_info(format,arg...)	do {} while (0)
+#endif
+
+/* #define OTG_DEBUG_LEVEL4 */
+#ifdef OTG_DEBUG_LEVEL4
+#define	otg_printB(format,arg...) printk(format, ##arg)
+#else
+#define	otg_printB(format,arg...) do {}	while (0)
+#endif
+/*******************END	OTG CONTROLLER***********************************/
+
+
+
+/*******************START FOR HAL ***************************************/
+#define info pr_debug
+#define warn pr_warn
+/*Debug For Entry and Exit of the functions */
+#undef HAL_DEBUG_LEVEL1
+#ifdef HAL_DEBUG_LEVEL1
+#define	hal_entry(format, args... ) printk(format, ##args)
+#else
+#define	hal_entry(format, args...) do {	} while(0)
+#endif
+
+/*Debug	For Interrupt information */
+#undef HAL_DEBUG_LEVEL2
+#ifdef HAL_DEBUG_LEVEL2
+#define	hal_int(format,	args...	) printk(format, ##args)
+#else
+#define	hal_int(format,	args...) do { }	while(0)
+#endif
+
+/*Debug	For HAL	Initialisation and Mem Initialisation */
+#undef HAL_DEBUG_LEVEL3
+#ifdef HAL_DEBUG_LEVEL3
+#define	hal_init(format, args... ) printk(format, ##args)
+#else
+#define	hal_init(format, args...) do { } while(0)
+#endif
+/*******************END	FOR HAL*******************************************/
+
+
+
+/*******************START FOR ALL CONTROLLERS*****************************/
+/*#define	CONFIG_USB_OTG */	/*undef	for Device only	and Host only */
+/*#define	ISP1763_DEVICE */
+
+#ifdef CONFIG_USB_DEBUG
+#define	DEBUG
+#else
+#undef DEBUG
+#endif
+/*******************END	FOR ALL	CONTROLLERS*******************************/
+#endif
diff --git a/drivers/usb/host/pehci/host/Makefile b/drivers/usb/host/pehci/host/Makefile
new file mode 100644
index 0000000..0c8552e
--- /dev/null
+++ b/drivers/usb/host/pehci/host/Makefile
@@ -0,0 +1,6 @@
+#
+# Makefile for the pehci driver (if driver is inside kernel tree).
+#
+
+obj-$(CONFIG_USB_PEHCI_HCD) += pehci.o
+
diff --git a/drivers/usb/host/pehci/host/itdptd.c b/drivers/usb/host/pehci/host/itdptd.c
new file mode 100644
index 0000000..6699c3a
--- /dev/null
+++ b/drivers/usb/host/pehci/host/itdptd.c
@@ -0,0 +1,2156 @@
+/* 
+* Copyright (C) ST-Ericsson AP Pte Ltd 2010 
+*
+* ISP1763 Linux OTG Controller driver : host
+* 
+* This program is free software; you can redistribute it and/or modify it under the terms of 
+* the GNU General Public License as published by the Free Software Foundation; version 
+* 2 of the License. 
+* 
+* This program is distributed in the hope that it will be useful, but WITHOUT ANY  
+* WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS  
+* FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more  
+* details. 
+* 
+* You should have received a copy of the GNU General Public License 
+* along with this program; if not, write to the Free Software 
+* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA. 
+* 
+* This is a host controller driver file. Isochronous event processing is handled here.
+* 
+* Author : wired support <wired.support@stericsson.com>
+*
+*/
+#ifdef CONFIG_ISO_SUPPORT
+void phcd_clean_periodic_ep(void);
+#endif
+
+#ifdef CONFIG_ISO_SUPPORT
+
+#define MAX_URBS		8
+#define MAX_EPS			2/*maximum 2 endpoints supported in ISO transfers.*/
+/*number of microframe per frame which is scheduled, for high speed device
+* actually , NUMMICROFRAME should be 8 , but the micro frame #7 is fail , so
+* there's just 4 microframe is used (#0 -> #4)
+* Writer : LyNguyen - 25Nov09
+*/
+#define NUMMICROFRAME		8
+struct urb *gstUrb_pending[MAX_URBS] = { 0, 0, 0, 0, 0, 0, 0, 0 };
+
+struct usb_host_endpoint *periodic_ep[MAX_EPS];
+
+int giUrbCount = 0;		/* count the pending urb*/
+int giUrbIndex = 0;		/*the index of urb need to be scheduled next*/
+/*
+ * phcd_iso_sitd_to_ptd - convert an SITD into a PTD
+ *
+ * phci_hcd *hcd
+ *      - Main host controller driver structure
+ * struct ehci_sitd *sitd
+ *  - Isochronous Transfer Descriptor, contains elements as defined by the
+ *        EHCI standard plus a few more specific elements.
+ * struct urb *urb
+ *  - USB Request Block, contains information regarding the type and how much data
+ *    is requested to be transferred.
+ * void  * ptd
+ *  - Points to the ISO ptd structure that needs to be initialized
+ *
+ * API Description
+ * This is mainly responsible for:
+ *  -Initializing the PTD that will be used for the ISO transfer
+ */
+void *
+phcd_iso_sitd_to_ptd(phci_hcd * hcd,
+	struct ehci_sitd *sitd, struct urb *urb, void *ptd)
+{
+	struct _isp1763_isoptd *iso_ptd;
+	struct isp1763_mem_addr *mem_addr;
+
+	unsigned long max_packet, mult, length, td_info1, td_info3;
+	unsigned long token, port_num, hub_num, data_addr;
+	unsigned long frame_number;
+
+	iso_dbg(ISO_DBG_ENTRY, "phcd_iso_sitd_to_ptd entry\n");
+
+	/* Variable initialization */
+	iso_ptd = (struct _isp1763_isoptd *) ptd;
+	mem_addr = &sitd->mem_addr;
+
+	/*
+	 * For both ISO and INT endpoints descriptors, new bit fields we added to
+	 * specify whether or not the endpoint supports high bandwidth, and if so
+	 * the number of additional packets that the endpoint can support during a
+	 * single microframe.
+	 * Bits 12:11 specify whether the endpoint supports high-bandwidth transfers
+	 * Valid values:
+	 *             00 None (1 transaction/uFrame)
+	 *             01 1 additional transaction
+	 *             10 2 additional transactions
+	 *             11 reserved
+	 */
+	max_packet = usb_maxpacket(urb->dev, urb->pipe,usb_pipeout(urb->pipe));
+
+	/*
+	 * We need to add 1 since our Multi starts with 1 instead of the USB specs defined
+	 * zero (0).
+	 */
+	mult = 1 + ((max_packet >> 11) & 0x3);
+	max_packet &= 0x7ff;
+
+	/* This is the size of the request (bytes to write or bytes to read) */
+	length = sitd->length;
+
+	/*
+	 * Set V bit to indicate that there is payload to be sent or received. And
+	 * indicate that the current PTD is active.
+	 */
+	td_info1 = QHA_VALID;
+
+	/*
+	 * Set the number of bytes that can be transferred by this PTD. This indicates
+	 * the depth of the data field.
+	 */
+	td_info1 |= (length << 3);
+
+	/*
+	 * Set the maximum packet length which indicates the maximum number of bytes that
+	 * can be sent to or received from the endpoint in a single data packet.
+	 */
+	if (urb->dev->speed != USB_SPEED_HIGH) {
+		/*
+		 * According to the ISP1763 specs for sITDs, OUT token max packet should
+		 * not be more  than 188 bytes, while IN token max packet not more than
+		 * 192 bytes (ISP1763 Rev 3.01, Table 72, page 79
+		 */
+		if (usb_pipein(urb->pipe) && (max_packet > 192)) {
+			iso_dbg(ISO_DBG_INFO,
+				"IN Max packet over maximum\n");
+			max_packet = 192;
+		}
+
+		if ((!usb_pipein(urb->pipe)) && (max_packet > 188)) {
+			iso_dbg(ISO_DBG_INFO,
+				"OUT Max packet over maximum\n");
+			max_packet = 188;
+		}
+	}
+	td_info1 |= (max_packet << 18);
+
+	/*
+	 * Place the FIRST BIT of the endpoint number here.
+	 */
+	td_info1 |= (usb_pipeendpoint(urb->pipe) << 31);
+
+	/*
+	 * Set the number of successive packets the HC can submit to the endpoint.
+	 */
+	if (urb->dev->speed == USB_SPEED_HIGH) {
+		td_info1 |= MULTI(mult);
+	}
+
+	/* Set the first DWORD */
+	iso_ptd->td_info1 = td_info1;
+	iso_dbg(ISO_DBG_DATA, "[phcd_iso_itd_to_ptd]: DWORD0 = 0x%08x\n",
+		iso_ptd->td_info1);
+
+	/*
+	 * Since the first bit have already been added on the first DWORD of the PTD
+	 * we only need to add the last 3-bits of the endpoint number.
+	 */
+	token = (usb_pipeendpoint(urb->pipe) & 0xE) >> 1;
+
+	/*
+	 * Get the device address and set it accordingly to its assigned bits of the 2nd
+	 * DWORD.
+	 */
+	token |= usb_pipedevice(urb->pipe) << 3;
+
+	/* See a split transaction is needed */
+	if (urb->dev->speed != USB_SPEED_HIGH) {
+		/*
+		 * If we are performing a SPLIT transaction indicate that it is so by setting
+		 * the S bit of the second DWORD.
+		 */
+		token |= 1 << 14;
+
+		port_num = urb->dev->ttport;
+		hub_num = urb->dev->tt->hub->devnum;
+
+		/* Set the the port number of the hub or embedded TT */
+		token |= port_num << 18;
+
+		/*
+		 * Set the hub address, this should be zero for the internal or
+		 * embedded hub
+		 */
+		token |= hub_num << 25;
+	}
+
+	/* if(urb->dev->speed != USB_SPEED_HIGH) */
+	/*
+	 * Determine if the direction of this pipe is IN, if so set the Token bit of
+	 * the second DWORD to indicate it as IN. Since it is initialized to zero and
+	 * zero indicates an OUT token, then we do not need anything to the Token bit
+	 * if it is an OUT token.
+	 */
+	if (usb_pipein(urb->pipe)) {
+		token |= (IN_PID << 10);
+	}
+
+	/* Set endpoint type to Isochronous */
+	token |= EPTYPE_ISO;
+
+	/* Set the second DWORD */
+	iso_ptd->td_info2 = token;
+	iso_dbg(ISO_DBG_DATA, "[phcd_iso_itd_to_ptd]: DWORD1 = 0x%08x\n",
+		iso_ptd->td_info2);
+
+	/*
+	 * Get the physical address of the memory location that was allocated for this PTD
+	 * in the PAYLOAD region, using the formula indicated in sectin 7.2.2 of the ISP1763 specs
+	 * rev 3.01 page 17 to 18.
+	 */
+	data_addr = ((unsigned long) (mem_addr->phy_addr) & 0xffff) - 0x400;
+	data_addr >>= 3;
+
+	/*  Set it to its location in the third DWORD */
+	td_info3 =( 0xffff&data_addr) << 8;
+
+	/*
+	 * Set the frame number when this PTD will be sent for ISO OUT or IN
+	 * Bits 0 to 2 are don't care, only bits 3 to 7.
+	 */
+	frame_number = sitd->framenumber;
+	frame_number = sitd->start_frame;
+	td_info3 |= (0xff& ((frame_number) << 3));
+
+	/* Set the third DWORD */
+	iso_ptd->td_info3 = td_info3;
+	iso_dbg(ISO_DBG_DATA, "[phcd_iso_itd_to_ptd]: DWORD2 = 0x%08x\n",
+		iso_ptd->td_info3);
+
+	/*
+	 * Set the A bit of the fourth DWORD to 1 to indicate that this PTD is active.
+	 * This have the same functionality with the V bit of DWORD0
+	 */
+	iso_ptd->td_info4 = QHA_ACTIVE;
+	iso_dbg(ISO_DBG_DATA, "[phcd_iso_itd_to_ptd]: DWORD3 = 0x%08x\n",
+		iso_ptd->td_info4);
+
+	/* Set the fourth DWORD to specify which uSOFs the start split needs to be placed */
+	if (usb_pipein(urb->pipe)){
+		iso_ptd->td_info5 = (sitd->ssplit);
+	}else{
+		iso_ptd->td_info5 = (sitd->ssplit << 2);
+	}
+	iso_dbg(ISO_DBG_DATA, "[phcd_iso_itd_to_ptd]: DWORD4 = 0x%08x\n",
+		iso_ptd->td_info5);
+
+	/*
+	 * Set the fifth DWORD to specify which uSOFs the complete split needs to be sent.
+	 * This is VALID only for IN (since ISO transfers don't have handshake stages)
+	 */
+	iso_ptd->td_info6 = sitd->csplit;
+	iso_dbg(ISO_DBG_DATA, "[phcd_iso_itd_to_ptd]: DWORD5 = 0x%08x\n",
+		iso_ptd->td_info6);
+
+	/*printk(" [phcd_iso_itd_to_ptd]: DWORD0 = 0x%08x\n",iso_ptd->td_info1);
+	printk(" [phcd_iso_itd_to_ptd]: DWORD1 = 0x%08x\n",iso_ptd->td_info2);
+	printk(" [phcd_iso_itd_to_ptd]: DWORD2 = 0x%08x\n",iso_ptd->td_info3);
+	printk(" [phcd_iso_itd_to_ptd]: DWORD3 = 0x%08x\n",iso_ptd->td_info4);
+	printk(" [phcd_iso_itd_to_ptd]: DWORD4 = 0x%08x\n",iso_ptd->td_info5);
+	printk(" [phcd_iso_itd_to_ptd]: DWORD5 = 0x%08x\n",iso_ptd->td_info6);*/
+	iso_dbg(ISO_DBG_EXIT, "phcd_iso_itd_to_ptd exit\n");
+	return iso_ptd;
+}
+
+
+/*
+ * phcd_iso_itd_to_ptd - convert an ITD into a PTD
+ *
+ * phci_hcd *hcd
+ *      - Main host controller driver structure
+ * struct ehci_itd *itd
+ *  - Isochronous Transfer Descriptor, contains elements as defined by the
+ *        EHCI standard plus a few more ST-ERICSSON specific elements.
+ * struct urb *urb
+ *  - USB Request Block, contains information regarding the type and how much data
+ *    is requested to be transferred.
+ * void  * ptd
+ *  - Points to the ISO ptd structure that needs to be initialized
+ *
+ * API Description
+ * This is mainly responsible for:
+ *  -Initializing the PTD that will be used for the ISO transfer
+ */
+void *
+phcd_iso_itd_to_ptd(phci_hcd * hcd,
+	struct ehci_itd *itd, struct urb *urb, void *ptd)
+{
+	struct _isp1763_isoptd *iso_ptd;
+	struct isp1763_mem_addr *mem_addr;
+
+	unsigned long max_packet, mult, length, td_info1, td_info3;
+	unsigned long token, port_num, hub_num, data_addr;
+	unsigned long frame_number;
+	int maxpacket;
+	iso_dbg(ISO_DBG_ENTRY, "phcd_iso_itd_to_ptd entry\n");
+
+	/* Variable initialization */
+	iso_ptd = (struct _isp1763_isoptd *) ptd;
+	mem_addr = &itd->mem_addr;
+
+	/*
+	 * For both ISO and INT endpoints descriptors, new bit fields we added to
+	 * specify whether or not the endpoint supports high bandwidth, and if so
+	 * the number of additional packets that the endpoint can support during a
+	 * single microframe.
+	 * Bits 12:11 specify whether the endpoint supports high-bandwidth transfers
+	 * Valid values:
+	 *             00 None (1 transaction/uFrame)
+	 *             01 1 additional transaction
+	 *             10 2 additional transactions
+	 *             11 reserved
+	 */
+	max_packet = usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe));
+
+	maxpacket = usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe));	
+
+	/*
+	 * We need to add 1 since our Multi starts with 1 instead of the USB specs defined
+	 * zero (0).
+	 */
+	maxpacket &= 0x7ff;
+	mult = 1 + ((max_packet >> 11) & 0x3);
+
+
+	max_packet &= 0x7ff;
+
+	/* This is the size of the request (bytes to write or bytes to read) */
+	length = itd->length;
+
+	/*
+	 * Set V bit to indicate that there is payload to be sent or received. And
+	 * indicate that the current PTD is active.
+	 */
+	td_info1 = QHA_VALID;
+
+	/*
+	 * Set the number of bytes that can be transferred by this PTD. This indicates
+	 * the depth of the data field.
+	 */
+	td_info1 |= (length << 3);
+
+	/*
+	 * Set the maximum packet length which indicates the maximum number of bytes that
+	 * can be sent to or received from the endpoint in a single data packet.
+	 */
+	if (urb->dev->speed != USB_SPEED_HIGH) {
+		/*
+		 * According to the ISP1763 specs for sITDs, OUT token max packet should
+		 * not be more  than 188 bytes, while IN token max packet not more than
+		 * 192 bytes (ISP1763 Rev 3.01, Table 72, page 79
+		 */
+		if (usb_pipein(urb->pipe) && (max_packet > 192)) {
+			iso_dbg(ISO_DBG_INFO,
+				"[phcd_iso_itd_to_ptd]: IN Max packet over maximum\n");
+			max_packet = 192;
+		}
+
+		if ((!usb_pipein(urb->pipe)) && (max_packet > 188)) {
+			iso_dbg(ISO_DBG_INFO,
+				"[phcd_iso_itd_to_ptd]: OUT Max packet over maximum\n");
+			max_packet = 188;
+		}
+	} else {		/*HIGH SPEED */
+
+		if (max_packet > 1024){
+			max_packet = 1024;
+		}
+	}
+	td_info1 |= (max_packet << 18);
+
+	/*
+	 * Place the FIRST BIT of the endpoint number here.
+	 */
+	td_info1 |= (usb_pipeendpoint(urb->pipe) << 31);
+
+	/*
+	 * Set the number of successive packets the HC can submit to the endpoint.
+	 */
+	if (urb->dev->speed == USB_SPEED_HIGH) {
+		td_info1 |= MULTI(mult);
+	}
+
+	/* Set the first DWORD */
+	iso_ptd->td_info1 = td_info1;
+	iso_dbg(ISO_DBG_DATA, "[phcd_iso_itd_to_ptd]: DWORD0 = 0x%08x\n",
+		iso_ptd->td_info1);
+
+	/*
+	 * Since the first bit have already been added on the first DWORD of the PTD
+	 * we only need to add the last 3-bits of the endpoint number.
+	 */
+	token = (usb_pipeendpoint(urb->pipe) & 0xE) >> 1;
+
+	/*
+	 * Get the device address and set it accordingly to its assigned bits of the 2nd
+	 * DWORD.
+	 */
+	token |= usb_pipedevice(urb->pipe) << 3;
+
+	/* See a split transaction is needed */
+	if (urb->dev->speed != USB_SPEED_HIGH) {
+		/*
+		 * If we are performing a SPLIT transaction indicate that it is so by setting
+		 * the S bit of the second DWORD.
+		 */
+		token |= 1 << 14;
+
+		port_num = urb->dev->ttport;
+		hub_num = urb->dev->tt->hub->devnum;
+
+		/* Set the the port number of the hub or embedded TT */
+		token |= port_num << 18;
+
+		/*
+		 * Set the hub address, this should be zero for the internal or
+		 * embedded hub
+		 */
+		token |= hub_num << 25;
+	}
+
+	/* if(urb->dev->speed != USB_SPEED_HIGH) */
+	/*
+	 * Determine if the direction of this pipe is IN, if so set the Token bit of
+	 * the second DWORD to indicate it as IN. Since it is initialized to zero and
+	 * zero indicates an OUT token, then we do not need anything to the Token bit
+	 * if it is an OUT token.
+	 */
+	if (usb_pipein(urb->pipe)){
+		token |= (IN_PID << 10);
+	}
+
+	/* Set endpoint type to Isochronous */
+	token |= EPTYPE_ISO;
+
+	/* Set the second DWORD */
+	iso_ptd->td_info2 = token;
+	iso_dbg(ISO_DBG_DATA, "[phcd_iso_itd_to_ptd]: DWORD1 = 0x%08x\n",
+		iso_ptd->td_info2);
+
+	/*
+	 * Get the physical address of the memory location that was allocated for this PTD
+	 * in the PAYLOAD region, using the formula indicated in sectin 7.2.2 of the ISP1763 specs
+	 * rev 3.01 page 17 to 18.
+	 */
+	data_addr = ((unsigned long) (mem_addr->phy_addr) & 0xffff) - 0x400;
+	data_addr >>= 3;
+
+	/*  Set it to its location in the third DWORD */
+	td_info3 = (data_addr&0xffff) << 8;
+
+	/*
+	 * Set the frame number when this PTD will be sent for ISO OUT or IN
+	 * Bits 0 to 2 are don't care, only bits 3 to 7.
+	 */
+	frame_number = itd->framenumber;
+	td_info3 |= (0xff&(frame_number << 3));
+
+	/* Set the third DWORD */
+	iso_ptd->td_info3 = td_info3;
+	iso_dbg(ISO_DBG_DATA, "[phcd_iso_itd_to_ptd]: DWORD2 = 0x%08x\n",
+		iso_ptd->td_info3);
+
+	/*
+	 * Set the A bit of the fourth DWORD to 1 to indicate that this PTD is active.
+	 * This have the same functionality with the V bit of DWORD0
+	 */
+	iso_ptd->td_info4 = QHA_ACTIVE;
+	iso_dbg(ISO_DBG_DATA, "[phcd_iso_itd_to_ptd]: DWORD3 = 0x%08x\n",
+		iso_ptd->td_info4);
+
+	/* Set the fourth DWORD to specify which uSOFs the start split needs to be placed */
+	iso_ptd->td_info5 = itd->ssplit;
+	iso_dbg(ISO_DBG_DATA, "[phcd_iso_itd_to_ptd]: DWORD4 = 0x%08x\n",
+		iso_ptd->td_info5);
+
+	/*
+	 * Set the fifth DWORD to specify which uSOFs the complete split needs to be sent.
+	 * This is VALID only for IN (since ISO transfers don't have handshake stages)
+	 */
+	iso_ptd->td_info6 = itd->csplit;
+	iso_dbg(ISO_DBG_DATA, "[phcd_iso_itd_to_ptd]: DWORD5 = 0x%08x\n",
+		iso_ptd->td_info6);
+
+	iso_dbg(ISO_DBG_EXIT, "phcd_iso_itd_to_ptd exit\n");
+	return iso_ptd;
+}				/* phcd_iso_itd_to_ptd */
+
+/*
+ * phcd_iso_scheduling_info - Initializing the start split and complete split.
+ *
+ * phci_hcd *hcd
+ *      - Main host controller driver structure
+ * struct ehci_qh *qhead
+ *  - Contains information about the endpoint.
+ * unsigned long max_pkt
+ *  - Maximum packet size that the endpoint in capable of handling
+ * unsigned long high_speed
+ *  - Indicates if the bus is a high speed bus
+ * unsigned long ep_in
+ *  - Inidcates if the endpoint is an IN endpoint
+ *
+ * API Description
+ * This is mainly responsible for:
+ *  - Determining the number of start split needed during an OUT transaction or
+ *    the number of complete splits needed during an IN transaction.
+ */
+unsigned long
+phcd_iso_scheduling_info(phci_hcd * hcd,
+	struct ehci_qh *qhead,
+	unsigned long max_pkt,
+	unsigned long high_speed, unsigned long ep_in)
+{
+	unsigned long count, usof, temp;
+
+	/* Local variable initialization */
+	usof = 0x1;
+
+	if (high_speed) {
+		qhead->csplit = 0;
+
+		/* Always send high speed transfers in first uframes */
+		qhead->ssplit = 0x1;
+		return 0;
+	}
+
+	/* Determine how many 188 byte-transfers are needed to send all data */
+	count = max_pkt / 188;
+
+	/*
+	 * Check is the data is not a factor of 188, if it is not then we need
+	 * one more 188 transfer to move the last set of data less than 188.
+	 */
+	if (max_pkt % 188){
+		count += 1;
+	}
+
+	/*
+	 * Remember that usof was initialized to 0x1 so that means
+	 * that usof is always guranteed a value of 0x1 and then
+	 * depending on the maxp, other bits of usof will also be set.
+	 */
+	for (temp = 0; temp < count; temp++){
+		usof |= (0x1 << temp);
+	}
+
+	if (ep_in) {
+		/*
+		 * Send start split into first frame.
+		 */
+		qhead->ssplit = 0x1;
+
+		/*
+		 * Inidicate that we can send a complete split starting from
+		 * the third uFrame to how much complete split is needed to
+		 * retrieve all data.
+		 *
+		 * Of course, the first uFrame is reserved for the start split, the
+		 * second is reserved for the TT to send the request and get some
+		 * data.
+		 */
+		qhead->csplit = (usof << 2);
+	} else {
+		/*
+		 * For ISO OUT we don't need to send out a complete split
+		 * since we do not require and data coming in to us (since ISO
+		 * do not have integrity checking/handshake).
+		 *
+		 * For start split we indicate that we send a start split from the
+		 * first uFrame up to the the last uFrame needed to retrieve all
+		 * data
+		 */
+		qhead->ssplit = usof;
+		qhead->csplit = 0;
+	}	/* else for if(ep_in) */
+	return 0;
+}				/* phcd_iso_scheduling_info */
+
+/*
+ * phcd_iso_sitd_fill - Allocate memory from the PAYLOAD memory region
+ *
+ * phci_hcd *pHcd_st
+ *  - Main host controller driver structure
+ * struct ehci_sitd *sitd
+ *  - Isochronous Transfer Descriptor, contains elements as defined by the
+ *        EHCI standard plus a few more  specific elements.
+ * struct urb *urb
+ *  - USB Request Block, contains information regarding the type and how much data
+ *    is requested to be transferred.
+ * unsigned long packets
+ *  - Total number of packets to completely transfer this ISO transfer request.
+ *
+ * API Description
+ * This is mainly responsible for:
+ * - Initialize the following elements of the ITS structure
+ *       > sitd->length = length;        -- the size of the request
+ *       > sitd->multi = multi;          -- the number of transactions for
+ *                                         this EP per micro frame
+ *       > sitd->hw_bufp[0] = buf_dma;   -- The base address of the buffer where
+ *                                         to put the data (this base address was
+ *                                         the buffer provided plus the offset)
+ * - Allocating memory from the PAYLOAD memory area, where the data coming from
+ *   the requesting party will be placed or data requested by the requesting party will
+ *   be retrieved when it is available.
+ */
+unsigned long
+phcd_iso_sitd_fill(phci_hcd * hcd,
+	struct ehci_sitd *sitd,
+	struct urb *urb, unsigned long packets)
+{
+	unsigned long length, offset, pipe;
+	unsigned long max_pkt;
+	dma_addr_t buff_dma;
+	struct isp1763_mem_addr *mem_addr;
+
+#ifdef COMMON_MEMORY
+	struct ehci_qh *qhead = NULL;
+#endif
+
+	iso_dbg(ISO_DBG_ENTRY, "phcd_iso_itd_fill entry\n");
+	/*
+	 * The value for both these variables are supplied by the one
+	 * who submitted the URB.
+	 */
+	length = urb->iso_frame_desc[packets].length;
+	offset = urb->iso_frame_desc[packets].offset;
+
+	/* Initialize the status and actual length of this packet */
+	urb->iso_frame_desc[packets].actual_length = 0;
+	urb->iso_frame_desc[packets].status = -EXDEV;
+
+	/* Buffer for this packet */
+	buff_dma = (u32) ((unsigned char *) urb->transfer_buffer + offset);
+
+	/* Memory for this packet */
+	mem_addr = &sitd->mem_addr;
+
+	pipe = urb->pipe;
+	max_pkt = usb_maxpacket(urb->dev, pipe, usb_pipeout(pipe));
+
+	max_pkt = max_pkt & 0x7FF;
+
+	if ((length < 0) || (max_pkt < length)) {
+		iso_dbg(ISO_DBG_ERR,
+			"[phcd_iso_itd_fill Error]: No available memory.\n");
+		return -ENOSPC;
+	}
+	sitd->buf_dma = buff_dma;
+
+
+#ifndef COMMON_MEMORY
+	/*
+	 * Allocate memory in the PAYLOAD memory region for the
+	 * data buffer for this SITD
+	 */
+	phci_hcd_mem_alloc(length, mem_addr, 0);
+	if (length && ((mem_addr->phy_addr == 0) || (mem_addr->virt_addr == 0))) {
+		mem_addr = 0;
+		iso_dbg(ISO_DBG_ERR,
+			"[phcd_iso_itd_fill Error]: No payload memory available\n");
+		return -ENOMEM;
+	}
+#else
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
+	qhead=urb->hcpriv;
+#else
+	qhead = urb->ep->hcpriv;
+#endif
+	if (qhead) {
+
+		mem_addr->phy_addr = qhead->memory_addr.phy_addr + offset;
+
+		mem_addr->virt_addr = qhead->memory_addr.phy_addr + offset;
+	} else {
+		iso_dbg(ISO_DBG_ERR,
+			"[phcd_iso_itd_fill Error]: No payload memory available\n");
+		return -ENOMEM;
+	}
+
+
+#endif
+	/* Length of this packet */
+	sitd->length = length;
+
+	/* Buffer address, one ptd per packet */
+	sitd->hw_bufp[0] = buff_dma;
+
+	iso_dbg(ISO_DBG_EXIT, "phcd_iso_sitd_fill exit\n");
+	return 0;
+}
+
+/*
+ * phcd_iso_itd_fill - Allocate memory from the PAYLOAD memory region
+ *
+ * phci_hcd *pHcd_st
+ *  - Main host controller driver structure
+ * struct ehci_itd *itd
+ *  - Isochronous Transfer Descriptor, contains elements as defined by the
+ *        EHCI standard plus a few more IC specific elements.
+ * struct urb *urb
+ *  - USB Request Block, contains information regarding the type and how much data
+ *    is requested to be transferred.
+ * unsigned long packets
+ *  - Total number of packets to completely transfer this ISO transfer request.
+ *
+ * API Description
+ * This is mainly responsible for:
+ * - Initialize the following elements of the ITS structure
+ *       > itd->length = length;        -- the size of the request
+ *       > itd->multi = multi;          -- the number of transactions for
+ *                                         this EP per micro frame
+ *       > itd->hw_bufp[0] = buf_dma;   -- The base address of the buffer where
+ *                                         to put the data (this base address was
+ *                                         the buffer provided plus the offset)
+ * - Allocating memory from the PAYLOAD memory area, where the data coming from
+ *   the requesting party will be placed or data requested by the requesting party will
+ *   be retrieved when it is available.
+ */
+unsigned long
+phcd_iso_itd_fill(phci_hcd * hcd,
+	struct ehci_itd *itd,
+	struct urb *urb,
+	unsigned long packets, unsigned char numofPkts)
+{
+	unsigned long length, offset, pipe;
+	unsigned long max_pkt, mult;
+	dma_addr_t buff_dma;
+	struct isp1763_mem_addr *mem_addr;
+#ifdef COMMON_MEMORY
+	struct ehci_qh *qhead = NULL;
+#endif
+	int i = 0;
+
+	iso_dbg(ISO_DBG_ENTRY, "phcd_iso_itd_fill entry\n");
+	for (i = 0; i < 8; i++){
+		itd->hw_transaction[i] = 0;
+	}
+	/*
+	 * The value for both these variables are supplied by the one
+	 * who submitted the URB.
+	 */
+	length = urb->iso_frame_desc[packets].length;
+	offset = urb->iso_frame_desc[packets].offset;
+
+	/* Initialize the status and actual length of this packet */
+	urb->iso_frame_desc[packets].actual_length = 0;
+	urb->iso_frame_desc[packets].status = -EXDEV;
+
+	/* Buffer for this packet */
+	buff_dma = cpu_to_le32((unsigned char *) urb->transfer_buffer + offset);
+
+	/* Memory for this packet */
+	mem_addr = &itd->mem_addr;
+
+	pipe = urb->pipe;
+	max_pkt = usb_maxpacket(urb->dev, pipe, usb_pipeout(pipe));
+
+	mult = 1 + ((max_pkt >> 11) & 0x3);
+	max_pkt = max_pkt & 0x7FF;
+	max_pkt *= mult;
+
+	if ((length < 0) || (max_pkt < length)) {
+		iso_dbg(ISO_DBG_ERR,
+			"[phcd_iso_itd_fill Error]: No available memory.\n");
+		return -ENOSPC;
+	}
+	itd->buf_dma = buff_dma;
+	for (i = packets + 1; i < numofPkts + packets; i++)
+		length += urb->iso_frame_desc[i].length;
+
+	/*
+	 * Allocate memory in the PAYLOAD memory region for the
+	 * data buffer for this ITD
+	 */
+#ifndef COMMON_MEMORY
+
+	phci_hcd_mem_alloc(length, mem_addr, 0);
+	if (length && ((mem_addr->phy_addr == 0) || (mem_addr->virt_addr == 0))) {
+		mem_addr = 0;
+		iso_dbg(ISO_DBG_ERR,
+			"[phcd_iso_itd_fill Error]: No payload memory available\n");
+		return -ENOMEM;
+	}
+#else
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)
+	qhead = urb->ep->hcpriv;
+#else
+	qhead=urb->hcpriv;
+#endif
+	if (qhead) {
+
+		mem_addr->phy_addr = qhead->memory_addr.phy_addr + offset;
+
+		mem_addr->virt_addr = qhead->memory_addr.phy_addr + offset;
+	} else {
+		iso_dbg(ISO_DBG_ERR,
+			"[phcd_iso_itd_fill Error]: No payload memory available\n");
+		return -ENOMEM;
+	}
+
+
+#endif
+	/* Length of this packet */
+	itd->length = length;
+
+	/* Number of transaction per uframe */
+	itd->multi = mult;
+
+	/* Buffer address, one ptd per packet */
+	itd->hw_bufp[0] = buff_dma;
+
+	iso_dbg(ISO_DBG_EXIT, "phcd_iso_itd_fill exit\n");
+	return 0;
+}				/* phcd_iso_itd_fill */
+
+/*
+ * phcd_iso_get_sitd_ptd_index - Allocate an ISO PTD from the ISO PTD map list
+ *
+ * phci_hcd *hcd
+ *      - Main host controller driver structure
+ * struct ehci_sitd *sitd
+ *  - Isochronous Transfer Descriptor, contains elements as defined by the
+ *        EHCI standard plus a few more  specific elements.
+ *
+ * API Description
+ * This is mainly responsible for:
+ * - Allocating an ISO PTD from the ISO PTD map list
+ * - Set the equivalent bit of the allocated PTD to active
+ *   in the bitmap so that this PTD will be included into
+ *   the periodic schedule
+ */
+void
+phcd_iso_get_sitd_ptd_index(phci_hcd * hcd, struct ehci_sitd *sitd)
+{
+	td_ptd_map_buff_t *ptd_map_buff;
+	unsigned long buff_type, max_ptds;
+	unsigned char sitd_index, bitmap;
+
+	/* Local variable initialization */
+	bitmap = 0x1;
+	buff_type = td_ptd_pipe_x_buff_type[TD_PTD_BUFF_TYPE_ISTL];
+	ptd_map_buff = (td_ptd_map_buff_t *) & (td_ptd_map_buff[buff_type]);
+	max_ptds = ptd_map_buff->max_ptds;
+	sitd->sitd_index = TD_PTD_INV_PTD_INDEX;
+
+	for (sitd_index = 0; sitd_index < max_ptds; sitd_index++) {
+		/*
+		 * ISO have 32 PTDs, the first thing to do is look for a free PTD.
+		 */
+		if (ptd_map_buff->map_list[sitd_index].state == TD_PTD_NEW) {
+			iso_dbg(ISO_DBG_INFO,
+				"[phcd_iso_get_itd_ptd_index] There's a free PTD No. %d\n",
+				sitd_index);
+			/*
+			 * Determine if this is a newly allocated SITD by checking the
+			 * itd_index, since it was set to TD_PTD_INV_PTD_INDEX during
+			 * initialization
+			 */
+			if (sitd->sitd_index == TD_PTD_INV_PTD_INDEX) {
+				sitd->sitd_index = sitd_index;
+			}
+
+			/* Once there is a free slot, indicate that it is already taken */
+			ptd_map_buff->map_list[sitd_index].datatoggle = 0;
+			ptd_map_buff->map_list[sitd_index].state =
+				TD_PTD_ACTIVE;
+			ptd_map_buff->map_list[sitd_index].qtd = NULL;
+
+			/* Put a connection to the SITD with the PTD maplist */
+			ptd_map_buff->map_list[sitd_index].sitd = sitd;
+			ptd_map_buff->map_list[sitd_index].itd = NULL;
+			ptd_map_buff->map_list[sitd_index].qh = NULL;
+
+			/* ptd_bitmap just holds the bit assigned to this PTD. */
+			ptd_map_buff->map_list[sitd_index].ptd_bitmap =
+				bitmap << sitd_index;
+
+			phci_hcd_fill_ptd_addresses(&ptd_map_buff->
+				map_list[sitd_index], sitd->sitd_index,
+				buff_type);
+
+			/*
+			 * Indicate that this SITD is the last in the list and update
+			 * the number of active PTDs
+			 */
+			ptd_map_buff->map_list[sitd_index].lasttd = 0;
+			ptd_map_buff->total_ptds++;
+
+
+			ptd_map_buff->active_ptd_bitmap |=
+				(bitmap << sitd_index);
+			ptd_map_buff->pending_ptd_bitmap |= (bitmap << sitd_index);	
+			break;
+		}		/* if(ptd_map_buff->map_list[sitd_index].state == TD_PTD_NEW) */
+	}			/* for(itd_index = 0; itd_index < max_ptds; itd_index++) */
+	return;
+}
+
+/*
+ * phcd_iso_get_itd_ptd_index - Allocate an ISO PTD from the ISO PTD map list
+ *
+ * phci_hcd *hcd
+ *      - Main host controller driver structure
+ * struct ehci_itd *itd
+ *  - Isochronous Transfer Descriptor, contains elements as defined by the
+ *        EHCI standard plus a few more IC specific elements.
+ *
+ * API Description
+ * This is mainly responsible for:
+ * - Allocating an ISO PTD from the ISO PTD map list
+ * - Set the equivalent bit of the allocated PTD to active
+ *   in the bitmap so that this PTD will be included into
+ *   the periodic schedule
+ */
+void
+phcd_iso_get_itd_ptd_index(phci_hcd * hcd, struct ehci_itd *itd)
+{
+	td_ptd_map_buff_t *ptd_map_buff;
+	unsigned long buff_type, max_ptds;
+	unsigned char itd_index, bitmap;
+
+	/* Local variable initialization */
+	bitmap = 0x1;
+	buff_type = td_ptd_pipe_x_buff_type[TD_PTD_BUFF_TYPE_ISTL];
+	ptd_map_buff = (td_ptd_map_buff_t *) & (td_ptd_map_buff[buff_type]);
+	max_ptds = ptd_map_buff->max_ptds;
+
+	itd->itd_index = TD_PTD_INV_PTD_INDEX;
+
+	for (itd_index = 0; itd_index < max_ptds; itd_index++) {
+		/*
+		 * ISO have 32 PTDs, the first thing to do is look for a free PTD.
+		 */
+		if (ptd_map_buff->map_list[itd_index].state == TD_PTD_NEW) {
+			/*
+			 * Determine if this is a newly allocated ITD by checking the
+			 * itd_index, since it was set to TD_PTD_INV_PTD_INDEX during
+			 * initialization
+			 */
+			if (itd->itd_index == TD_PTD_INV_PTD_INDEX) {
+				itd->itd_index = itd_index;
+			}
+
+			/* Once there is a free slot, indicate that it is already taken */
+			ptd_map_buff->map_list[itd_index].datatoggle = 0;
+			ptd_map_buff->map_list[itd_index].state = TD_PTD_ACTIVE;
+			ptd_map_buff->map_list[itd_index].qtd = NULL;
+
+			/* Put a connection to the ITD with the PTD maplist */
+			ptd_map_buff->map_list[itd_index].itd = itd;
+			ptd_map_buff->map_list[itd_index].qh = NULL;
+
+			/* ptd_bitmap just holds the bit assigned to this PTD. */
+			ptd_map_buff->map_list[itd_index].ptd_bitmap =
+				bitmap << itd_index;
+
+			phci_hcd_fill_ptd_addresses(&ptd_map_buff->
+				map_list[itd_index],
+				itd->itd_index, buff_type);
+
+			/*
+			 * Indicate that this ITD is the last in the list and update
+			 * the number of active PTDs
+			 */
+			ptd_map_buff->map_list[itd_index].lasttd = 0;
+			ptd_map_buff->total_ptds++;
+
+			ptd_map_buff->active_ptd_bitmap |=
+				(bitmap << itd_index);
+			ptd_map_buff->pending_ptd_bitmap |= (bitmap << itd_index);	
+			break;
+		}		/* if(ptd_map_buff->map_list[itd_index].state == TD_PTD_NEW) */
+	}			/* for(itd_index = 0; itd_index < max_ptds; itd_index++) */
+	return;
+}				/* phcd_iso_get_itd_ptd_index */
+
+/*
+ * phcd_iso_sitd_free_list - Free memory used by SITDs in SITD list
+ *
+ * phci_hcd *hcd
+ *      - Main host controller driver structure
+ * struct urb *urb
+ *  - USB Request Block, contains information regarding the type and how much data
+ *    is requested to be transferred.
+ * unsigned long status
+ *  - Variable provided by the calling routine that contain the status of the
+ *        SITD list.
+ *
+ * API Description
+ * This is mainly responsible for:
+ *  - Cleaning up memory used by each SITD in the SITD list
+ */
+void
+phcd_iso_sitd_free_list(phci_hcd * hcd, struct urb *urb, unsigned long status)
+{
+	td_ptd_map_buff_t *ptd_map_buff;
+	struct ehci_sitd *first_sitd, *next_sitd, *sitd;
+	td_ptd_map_t *td_ptd_map;
+
+	/* Local variable initialization */
+	ptd_map_buff = &(td_ptd_map_buff[TD_PTD_BUFF_TYPE_ISTL]);
+	first_sitd = (struct ehci_sitd *) urb->hcpriv;
+	sitd = first_sitd;
+
+	/*
+	 * Check if there is only one SITD, if so immediately
+	 * go and clean it up.
+	 */
+	if (sitd->hw_next == EHCI_LIST_END) {
+		if (sitd->sitd_index != TD_PTD_INV_PTD_INDEX) {
+			td_ptd_map = &ptd_map_buff->map_list[sitd->sitd_index];
+			td_ptd_map->state = TD_PTD_NEW;
+		}
+
+		if (status != -ENOMEM) {
+			phci_hcd_mem_free(&sitd->mem_addr);
+		}
+
+		list_del(&sitd->sitd_list);
+		qha_free(qha_cache, sitd);
+
+		urb->hcpriv = 0;
+		return;
+	}
+	/* if(sitd->hw_next == EHCI_LIST_END) */
+	while (1) {
+		/* Get the SITD following the head SITD */
+		next_sitd = (struct ehci_sitd *) (sitd->hw_next);
+		if (next_sitd->hw_next == EHCI_LIST_END) {
+			/*
+			 * If the next SITD is the end of the list, check if space have
+			 * already been allocated in the PTD array.
+			 */
+			if (next_sitd->sitd_index != TD_PTD_INV_PTD_INDEX) {
+				/* Free up its allocation */
+				td_ptd_map =
+					&ptd_map_buff->map_list[next_sitd->
+					sitd_index];
+				td_ptd_map->state = TD_PTD_NEW;
+			}
+
+			/*
+			 * If the error is not about memory allocation problems, then
+			 * free up the memory used.
+			 */
+			if (status != -ENOMEM) {
+				iso_dbg(ISO_DBG_ERR,
+					"[phcd_iso_itd_free_list Error]: Memory not available\n");
+				phci_hcd_mem_free(&next_sitd->mem_addr);
+			}
+
+			/* Remove from the SITD list and free up space allocated for SITD structure */
+			list_del(&next_sitd->sitd_list);
+			qha_free(qha_cache, next_sitd);
+			break;
+		}
+
+		/* if(next_itd->hw_next == EHCI_LIST_END) */
+		/*
+		 * If SITD is not the end of the list, it only means that it already have everything allocated
+		 * and there is no need to check which procedure failed. So just free all resourcs immediately
+		 */
+		sitd->hw_next = next_sitd->hw_next;
+
+		td_ptd_map = &ptd_map_buff->map_list[next_sitd->sitd_index];
+		td_ptd_map->state = TD_PTD_NEW;
+		phci_hcd_mem_free(&next_sitd->mem_addr);
+		list_del(&next_sitd->sitd_list);
+		qha_free(qha_cache, next_sitd);
+	}			/*  while(1) */
+
+	/* Now work on the head SITD, it is the last one processed. */
+	if (first_sitd->sitd_index != TD_PTD_INV_PTD_INDEX) {
+		td_ptd_map = &ptd_map_buff->map_list[first_sitd->sitd_index];
+		td_ptd_map->state = TD_PTD_NEW;
+	}
+
+	if (status != -ENOMEM) {
+		iso_dbg(ISO_DBG_ERR,
+			"[phcd_iso_itd_free_list Error]: No memory\n");
+		phci_hcd_mem_free(&first_sitd->mem_addr);
+	}
+
+	list_del(&first_sitd->sitd_list);
+	qha_free(qha_cache, first_sitd);
+	urb->hcpriv = 0;
+	return;
+}
+
+/*
+ * phcd_iso_itd_free_list - Free memory used by ITDs in ITD list
+ *
+ * phci_hcd *hcd
+ *      - Main host controller driver structure
+ * struct urb *urb
+ *  - USB Request Block, contains information regarding the type and how much data
+ *    is requested to be transferred.
+ * unsigned long status
+ *  - Variable provided by the calling routine that contain the status of the
+ *        ITD list.
+ *
+ * API Description
+ * This is mainly responsible for:
+ *  - Cleaning up memory used by each ITD in the ITD list
+ */
+void
+phcd_iso_itd_free_list(phci_hcd * hcd, struct urb *urb, unsigned long status)
+{
+	td_ptd_map_buff_t *ptd_map_buff;
+	struct ehci_itd *first_itd, *next_itd, *itd;
+	td_ptd_map_t *td_ptd_map;
+
+	/* Local variable initialization */
+	ptd_map_buff = &(td_ptd_map_buff[TD_PTD_BUFF_TYPE_ISTL]);
+	first_itd = (struct ehci_itd *) urb->hcpriv;
+	itd = first_itd;
+
+	/*
+	 * Check if there is only one ITD, if so immediately
+	 * go and clean it up.
+	 */
+	if (itd->hw_next == EHCI_LIST_END) {
+		if (itd->itd_index != TD_PTD_INV_PTD_INDEX) {
+			td_ptd_map = &ptd_map_buff->map_list[itd->itd_index];
+			td_ptd_map->state = TD_PTD_NEW;
+		}
+
+		if (status != -ENOMEM) {
+			phci_hcd_mem_free(&itd->mem_addr);
+		}
+
+		list_del(&itd->itd_list);
+		qha_free(qha_cache, itd);
+
+		urb->hcpriv = 0;
+		return;
+	}
+	/* if(itd->hw_next == EHCI_LIST_END) */
+	while (1) {
+		/* Get the ITD following the head ITD */
+		next_itd = (struct ehci_itd *) le32_to_cpu(itd->hw_next);
+		if (next_itd->hw_next == EHCI_LIST_END) {
+			/*
+			 * If the next ITD is the end of the list, check if space have
+			 * already been allocated in the PTD array.
+			 */
+			if (next_itd->itd_index != TD_PTD_INV_PTD_INDEX) {
+				/* Free up its allocation */
+				td_ptd_map =
+					&ptd_map_buff->map_list[next_itd->
+					itd_index];
+				td_ptd_map->state = TD_PTD_NEW;
+			}
+
+			/*
+			 * If the error is not about memory allocation problems, then
+			 * free up the memory used.
+			 */
+			if (status != -ENOMEM) {
+				iso_dbg(ISO_DBG_ERR,
+					"[phcd_iso_itd_free_list Error]: Memory not available\n");
+				phci_hcd_mem_free(&next_itd->mem_addr);
+			}
+
+			/* Remove from the ITD list and free up space allocated for ITD structure */
+			list_del(&next_itd->itd_list);
+			qha_free(qha_cache, next_itd);
+			break;
+		}
+
+		/* if(next_itd->hw_next == EHCI_LIST_END) */
+		/*
+		 * If ITD is not the end of the list, it only means that it already have everything allocated
+		 * and there is no need to check which procedure failed. So just free all resourcs immediately
+		 */
+		itd->hw_next = next_itd->hw_next;
+
+		td_ptd_map = &ptd_map_buff->map_list[next_itd->itd_index];
+		td_ptd_map->state = TD_PTD_NEW;
+		phci_hcd_mem_free(&next_itd->mem_addr);
+		list_del(&next_itd->itd_list);
+		qha_free(qha_cache, next_itd);
+	}			/*  while(1) */
+
+	/* Now work on the head ITD, it is the last one processed. */
+	if (first_itd->itd_index != TD_PTD_INV_PTD_INDEX) {
+		td_ptd_map = &ptd_map_buff->map_list[first_itd->itd_index];
+		td_ptd_map->state = TD_PTD_NEW;
+	}
+
+	if (status != -ENOMEM) {
+		iso_dbg(ISO_DBG_ERR,
+			"[phcd_iso_itd_free_list Error]: No memory\n");
+		phci_hcd_mem_free(&first_itd->mem_addr);
+	}
+
+	list_del(&first_itd->itd_list);
+	qha_free(qha_cache, first_itd);
+	urb->hcpriv = 0;
+	return;
+}				/* phcd_iso_itd_free_list */
+
+void
+phcd_clean_iso_qh(phci_hcd * hcd, struct ehci_qh *qh)
+{
+	unsigned int i = 0;
+	u16 skipmap=0;
+	struct ehci_sitd *sitd;
+	struct ehci_itd *itd;
+
+	iso_dbg(ISO_DBG_ERR, "phcd_clean_iso_qh \n");
+	if (!qh){
+		return;
+	}
+	skipmap = isp1763_reg_read16(hcd->dev, hcd->regs.isotdskipmap, skipmap);
+	skipmap |= qh->periodic_list.ptdlocation;
+	isp1763_reg_write16(hcd->dev, hcd->regs.isotdskipmap, skipmap);
+#ifdef COMMON_MEMORY
+	phci_hcd_mem_free(&qh->memory_addr);
+#endif
+	for (i = 0; i < 16 && qh->periodic_list.ptdlocation; i++) {
+		if (qh->periodic_list.ptdlocation & (0x1 << i)) {
+			printk("[phcd_clean_iso_qh] : %x \n",
+				qh->periodic_list.high_speed);
+
+			qh->periodic_list.ptdlocation &= ~(0x1 << i);
+
+			if (qh->periodic_list.high_speed == 0) {
+				if (td_ptd_map_buff[TD_PTD_BUFF_TYPE_ISTL].
+					map_list[i].sitd) {
+
+					printk("SITD found \n");
+					sitd = td_ptd_map_buff
+						[TD_PTD_BUFF_TYPE_ISTL].
+						map_list[i].sitd;
+#ifndef COMMON_MEMORY
+					phci_hcd_mem_free(&sitd->mem_addr);
+#endif
+					/*
+					if(sitd->urb)
+						urb=sitd->urb;
+					*/
+					sitd->urb = NULL;
+					td_ptd_map_buff[TD_PTD_BUFF_TYPE_ISTL].
+						map_list[i].state = TD_PTD_NEW;
+					td_ptd_map_buff[TD_PTD_BUFF_TYPE_ISTL].
+						map_list[i].sitd = NULL;
+					qha_free(qha_cache, sitd);
+				}
+			} else {
+				if (td_ptd_map_buff[TD_PTD_BUFF_TYPE_ISTL].
+					map_list[i].itd) {
+
+					printk("ITD found \n");
+					itd = td_ptd_map_buff
+						[TD_PTD_BUFF_TYPE_ISTL].
+						map_list[i].itd;
+#ifdef COMMON_MEMORY
+					phci_hcd_mem_free(&itd->mem_addr);
+#endif
+
+					/*
+					if(itd->urb)
+					urb=itd->urb;
+					*/
+					itd->urb = NULL;
+					td_ptd_map_buff[TD_PTD_BUFF_TYPE_ISTL].
+						map_list[i].state = TD_PTD_NEW;
+					td_ptd_map_buff[TD_PTD_BUFF_TYPE_ISTL].
+						map_list[i].itd = NULL;
+					qha_free(qha_cache, itd);
+				}
+			}
+
+		}
+	}
+
+
+}
+
+
+/*
+ * phcd_store_urb_pending - store requested URB into a queue
+ *
+ * phci_hcd *hcd
+ *      - Main host controller driver structure
+ * struct urb *urb
+ *  - USB Request Block, contains information regarding the type and how much data
+ *    is requested to be transferred.
+ * unsigned long *status
+ *  - Variable provided by the calling routine that will contain the status of the
+ *        phcd_submit_iso actions
+ *
+ * API Description
+ * This is mainly responsible for:
+ *  - Store URB into a queue
+ *  - If ther's enough free PTD slots , repairing the PTDs
+ */
+void phcd_clean_periodic_ep(void){
+	periodic_ep[0] = NULL;
+	periodic_ep[1] = NULL;
+}
+
+int
+phcd_clean_urb_pending(phci_hcd * hcd, struct urb *urb)
+{
+	unsigned int i = 0;
+	struct ehci_qh *qhead;
+	struct ehci_sitd *sitd;
+	struct ehci_itd *itd;
+	u16 skipmap=0;;
+
+	iso_dbg(ISO_DBG_ENTRY, "[phcd_clean_urb_pending] : Enter\n");
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
+	qhead=urb->hcpriv;
+	if (periodic_ep[0] == qhead->ep) {
+		periodic_ep[0] = NULL;
+
+	}
+
+	if (periodic_ep[1] == qhead->ep) {
+		periodic_ep[1] = NULL;
+	}
+#else	
+	qhead = urb->ep->hcpriv;
+	if (periodic_ep[0] == urb->ep) {
+		periodic_ep[0] = NULL;
+
+	}
+
+	if (periodic_ep[1] == urb->ep) {
+		periodic_ep[1] = NULL;
+	}
+#endif	
+	if (!qhead) {
+		return 0;
+	}
+	skipmap = isp1763_reg_read16(hcd->dev, hcd->regs.isotdskipmap, skipmap);
+	skipmap |= qhead->periodic_list.ptdlocation;
+	isp1763_reg_write16(hcd->dev, hcd->regs.isotdskipmap, skipmap);
+#ifdef COMMON_MEMORY
+	phci_hcd_mem_free(&qhead->memory_addr);
+#endif
+
+	for (i = 0; i < 16 && qhead->periodic_list.ptdlocation; i++) {
+
+		qhead->periodic_list.ptdlocation &= ~(0x1 << i);
+
+		if (qhead->periodic_list.ptdlocation & (0x1 << i)) {
+
+			printk("[phcd_clean_urb_pending] : %x \n",
+				qhead->periodic_list.high_speed);
+
+			if (qhead->periodic_list.high_speed == 0) {
+
+				if (td_ptd_map_buff[TD_PTD_BUFF_TYPE_ISTL].
+					map_list[i].sitd) {
+
+					sitd = td_ptd_map_buff
+						[TD_PTD_BUFF_TYPE_ISTL].
+						map_list[i].sitd;
+#ifndef COMMON_MEMORY
+					phci_hcd_mem_free(&sitd->mem_addr);
+#endif
+					td_ptd_map_buff[TD_PTD_BUFF_TYPE_ISTL].
+						map_list[i].state = TD_PTD_NEW;
+					td_ptd_map_buff[TD_PTD_BUFF_TYPE_ISTL].
+						map_list[i].sitd = NULL;
+					qha_free(qha_cache, sitd);
+				}
+			} else {
+
+				if (td_ptd_map_buff[TD_PTD_BUFF_TYPE_ISTL].
+					map_list[i].itd) {
+
+					itd = td_ptd_map_buff
+						[TD_PTD_BUFF_TYPE_ISTL].
+						map_list[i].itd;
+#ifdef COMMON_MEMORY
+					phci_hcd_mem_free(&itd->mem_addr);
+#endif
+					td_ptd_map_buff[TD_PTD_BUFF_TYPE_ISTL].
+						map_list[i].state = TD_PTD_NEW;
+					td_ptd_map_buff[TD_PTD_BUFF_TYPE_ISTL].
+						map_list[i].itd = NULL;
+					qha_free(qha_cache, itd);
+				}
+			}
+
+		}
+
+	}
+	INIT_LIST_HEAD(&qhead->periodic_list.sitd_itd_head);
+	iso_dbg(ISO_DBG_ENTRY, "[phcd_clean_urb_pending] : Exit\n");
+	return 0;
+}
+
+
+
+int
+phcd_store_urb_pending(phci_hcd * hcd, int index, struct urb *urb, int *status)
+{
+	unsigned int uiNumofPTDs = 0;
+	unsigned int uiNumofSlots = 0;
+	unsigned int uiMult = 0;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)
+	iso_dbg(ISO_DBG_ENTRY, "[phcd_store_urb_pending] : Enter\n");
+	if (urb != NULL) {
+		if (periodic_ep[0] != urb->ep && periodic_ep[1] != urb->ep) {
+			if (periodic_ep[0] == NULL) {
+			//	printk("storing in 0 %x %x\n",urb,urb->pipe);
+				periodic_ep[0] = urb->ep;
+			} else if (periodic_ep[1] == NULL) {
+				printk("storing in 1\n");
+				periodic_ep[1] = urb->ep;
+				usb_hcd_link_urb_to_ep(&(hcd->usb_hcd), urb);
+				return -1;
+			} else {
+				iso_dbg(ISO_DBG_ERR,
+					"Support only 2 ISO endpoints simultaneously \n");
+				*status = -1;
+				return -1;
+			}
+		}
+		usb_hcd_link_urb_to_ep(&(hcd->usb_hcd), urb);
+		iso_dbg(ISO_DBG_DATA,
+			"[phcd_store_urb_pending] : Add an urb into gstUrb_pending array at index : %d\n",
+			giUrbCount);
+		giUrbCount++;
+	} else {
+
+		iso_dbg(ISO_DBG_ENTRY,
+			"[phcd_store_urb_pending] : getting urb from list \n");
+		if (index > 0 && index < 2) {
+			if (periodic_ep[index - 1]){
+				urb = container_of(periodic_ep[index - 1]->
+					urb_list.next, struct urb,
+					urb_list);
+			}
+		} else {
+			iso_dbg(ISO_DBG_ERR, " Unknown enpoints Error \n");
+			*status = -1;
+			return -1;
+		}
+
+	}
+
+
+	if ((urb != NULL && (urb->ep->urb_list.next == &urb->urb_list))){
+		iso_dbg(ISO_DBG_DATA,
+			"[phcd_store_urb_pending] : periodic_sched : %d\n",
+			hcd->periodic_sched);
+		iso_dbg(ISO_DBG_DATA,
+			"[phcd_store_urb_pending] : number_of_packets : %d\n",
+			urb->number_of_packets);
+		iso_dbg(ISO_DBG_DATA,
+			"[phcd_store_urb_pending] : Maximum PacketSize : %d\n",
+			usb_maxpacket(urb->dev,urb->pipe, usb_pipeout(urb->pipe)));
+		/*if enough free slots */
+		if (urb->dev->speed == USB_SPEED_FULL) {	/*for FULL SPEED */
+	//		if (hcd->periodic_sched < 
+		//		MAX_PERIODIC_SIZE - urb->number_of_packets) {
+			if(1){
+				if (phcd_submit_iso(hcd, 
+					#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
+						struct usb_host_endpoint *ep,
+					#endif
+						urb,
+						( unsigned long *) &status) == 0) {
+					pehci_hcd_iso_schedule(hcd, urb);
+				} else{
+				//*status = 0;
+				}
+			}
+		} else if (urb->dev->speed == USB_SPEED_HIGH) {	/*for HIGH SPEED */
+			/*number of slots for 1 PTD */
+			uiNumofSlots = NUMMICROFRAME / urb->interval;
+			/*max packets size */
+			uiMult = usb_maxpacket(urb->dev, urb->pipe,
+					usb_pipeout(urb->pipe));
+			/*mult */
+			uiMult = 1 + ((uiMult >> 11) & 0x3);
+			/*number of PTDs need to schedule for this PTD */
+			uiNumofPTDs =
+				(urb->number_of_packets / uiMult) /
+				uiNumofSlots;
+			if ((urb->number_of_packets / uiMult) % uiNumofSlots != 0){
+				uiNumofPTDs += 1;
+			}
+
+			iso_dbg(ISO_DBG_DATA,
+				"[phcd_store_urb_pending] : interval : %d\n",
+				urb->interval);
+			iso_dbg(ISO_DBG_DATA,
+				"[phcd_store_urb_pending] : uiMult : %d\n",
+				uiMult);
+			iso_dbg(ISO_DBG_DATA,
+				"[phcd_store_urb_pending] : uiNumofPTDs : %d\n",
+				uiNumofPTDs);
+
+			if (hcd->periodic_sched <=
+				MAX_PERIODIC_SIZE - uiNumofPTDs) {
+
+				if (phcd_submit_iso(hcd,
+					#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
+						struct usb_host_endpoint *ep,
+					#endif
+					urb, (unsigned long *) &status)== 0) {
+
+					pehci_hcd_iso_schedule(hcd, urb);
+				}
+			} else{
+				*status = 0;
+			}
+		}
+	} else{
+		iso_dbg(ISO_DBG_DATA,
+			"[phcd_store_urb_pending] : nextUrb is NULL\n");
+	}
+#endif
+	iso_dbg(ISO_DBG_ENTRY, "[phcd_store_urb_pending] : Exit\n");
+	return 0;
+}
+
+/*
+ * phcd_submit_iso - ISO transfer URB submit routine
+ *
+ * phci_hcd *hcd
+ *      - Main host controller driver structure
+ * struct urb *urb
+ *  - USB Request Block, contains information regarding the type and how much data
+ *    is requested to be transferred.
+ * unsigned long *status
+ *  - Variable provided by the calling routine that will contain the status of the
+ *        phcd_submit_iso actions
+ *
+ * API Description
+ * This is mainly responsible for:
+ *  - Allocating memory for the endpoint information structure (pQHead_st)
+ *  - Requesting for bus bandwidth from the USB core
+ *  - Allocating and initializing Payload and PTD memory
+ */
+unsigned long
+phcd_submit_iso(phci_hcd * hcd,
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
+	struct usb_host_endpoint *ep,
+#else
+#endif
+		struct urb *urb, unsigned long *status)
+{
+	struct _periodic_list *periodic_list;
+	struct hcd_dev *dev;
+	struct ehci_qh *qhead;
+	struct ehci_itd *itd, *prev_itd;
+	struct ehci_sitd *sitd, *prev_sitd;
+	struct list_head *sitd_itd_list;
+	unsigned long ep_in, max_pkt, mult;
+	unsigned long bus_time, high_speed, start_frame;
+	unsigned long temp;
+	unsigned long packets;
+	/*for high speed device */
+	unsigned int iMicroIndex = 0;
+	unsigned int iNumofSlots = 0;
+	unsigned int iNumofPTDs = 0;
+	unsigned int iPTDIndex = 0;
+	unsigned int iNumofPks = 0;
+	int iPG = 0;
+	dma_addr_t buff_dma;
+	unsigned long length, offset;
+	int i = 0;
+
+	iso_dbg(ISO_DBG_ENTRY, "phcd_submit_iso Entry\n");
+
+	*status = 0;
+	/* Local variable initialization */
+	high_speed = 0;
+	periodic_list = &hcd->periodic_list[0];
+	dev = (struct hcd_dev *) urb->hcpriv;
+	urb->hcpriv = (void *) 0;
+	prev_itd = (struct ehci_itd *) 0;
+	itd = (struct ehci_itd *) 0;
+	prev_sitd = (struct ehci_sitd *) 0;
+	sitd = (struct ehci_sitd *) 0;
+	start_frame = 0;
+
+	ep_in = usb_pipein(urb->pipe);
+
+	/*
+	 * Take the endpoint, if there is still no memory allocated
+	 * for it allocate some and indicate this is for ISO.
+	 */
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
+	qhead = ep->hcpriv;
+#else
+	qhead = urb->ep->hcpriv;
+#endif
+	if (!qhead) {
+
+		qhead = phci_hcd_qh_alloc(hcd);
+		if (qhead == 0) {
+			iso_dbg(ISO_DBG_ERR,
+				"[phcd_submit_iso Error]: Not enough memory\n");
+			return -ENOMEM;
+		}
+
+		qhead->type = TD_PTD_BUFF_TYPE_ISTL;
+		INIT_LIST_HEAD(&qhead->periodic_list.sitd_itd_head);
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
+		qhead->ep=ep;
+		ep->hcpriv = qhead;
+		urb->hcpriv=qhead;
+#else
+		urb->ep->hcpriv = qhead;
+#endif
+	}
+
+		urb->hcpriv=qhead;
+
+	/* if(!qhead) */
+	/*
+	 * Get the number of additional packets that the endpoint can support during a
+	 * single microframe.
+	 */
+	max_pkt = usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe));
+
+	/*
+	 * We need to add 1 since our Multi starts with 1 instead of the USB specs defined
+	 * zero (0).
+	 */
+	mult = 1 + ((max_pkt >> 11) & 0x3);
+
+	/* This is the actual length per for the whole transaction */
+	max_pkt *= mult;
+
+	/* Check bandwidth */
+	bus_time = 0;
+
+	if (urb->dev->speed == USB_SPEED_FULL) {
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
+		if (urb->bandwidth == 0) {
+			bus_time = usb_check_bandwidth(urb->dev, urb);
+			if (bus_time < 0) {
+				usb_dec_dev_use(urb->dev);
+				*status = bus_time;
+				return *status;
+			}
+		}
+#else
+#endif
+	} else {			/*HIGH SPEED */
+
+		high_speed = 1;
+
+		/*
+		 * Calculate bustime as dictated by the USB Specs Section 5.11.3
+		 * for high speed ISO
+		 */
+		bus_time = 633232L;
+		bus_time +=
+			(2083L * ((3167L + BitTime(max_pkt) * 1000L) / 1000L));
+		bus_time = bus_time / 1000L;
+		bus_time += BW_HOST_DELAY;
+		bus_time = NS_TO_US(bus_time);
+	}
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
+	usb_claim_bandwidth(urb->dev, urb, bus_time, 1);
+#else
+#endif
+
+	qhead->periodic_list.ptdlocation = 0;
+	/* Initialize the start split (ssplit) and complete split (csplit) variables of qhead */
+	if (phcd_iso_scheduling_info(hcd, qhead, max_pkt, high_speed, ep_in) <
+		0) {
+
+		iso_dbg(ISO_DBG_ERR,
+			"[phcd_submit_iso Error]: No space available\n");
+		return -ENOSPC;
+	}
+
+	if (urb->dev->speed == USB_SPEED_HIGH) {
+		iNumofSlots = NUMMICROFRAME / urb->interval;
+		/*number of PTDs need to schedule for this PTD */
+		iNumofPTDs = (urb->number_of_packets / mult) / iNumofSlots;
+		if ((urb->number_of_packets / mult) % iNumofSlots != 0){	
+			/*get remainder */
+			iNumofPTDs += 1;
+		}
+	}
+	if (urb->iso_frame_desc[0].offset != 0) {
+		*status = -EINVAL;
+		iso_dbg(ISO_DBG_ERR,
+			"[phcd_submit_iso Error]: Invalid value\n");
+		return *status;
+	}
+	if (1) {
+		/* Calculate the current frame number */
+		if (0){
+			if (urb->transfer_flags & URB_ISO_ASAP){
+				start_frame =
+					isp1763_reg_read16(hcd->dev,
+						hcd->regs.frameindex,
+						start_frame);
+			} else {
+				start_frame = urb->start_frame;
+			}
+		}
+
+		start_frame =
+			isp1763_reg_read16(hcd->dev, hcd->regs.frameindex,
+				start_frame);
+
+		/* The only valid bits of the frame index is the lower 14 bits. */
+
+		/*
+		 * Remove the count for the micro frame (uSOF) and just leave the
+		 * count for the frame (SOF). Since 1 SOF is equal to 8 uSOF then
+		 * shift right by three is like dividing it by 8 (each shift is divide by two)
+		 */
+		start_frame >>= 3;
+		if (urb->dev->speed != USB_SPEED_HIGH){
+			start_frame += 1;
+		}else{
+			start_frame += 2;
+		}
+		start_frame = start_frame & PTD_FRAME_MASK;
+		temp = start_frame;
+		if (urb->dev->speed != USB_SPEED_HIGH) {
+			qhead->next_uframe =
+				start_frame + urb->number_of_packets;
+		} else {
+			qhead->next_uframe = start_frame + iNumofPTDs;
+		}
+		qhead->next_uframe %= PTD_FRAME_MASK;
+		iso_dbg(ISO_DBG_DATA, "[phcd_submit_iso]: startframe = %ld\n",
+			start_frame);
+	} else {
+		/*
+		 * The periodic frame list size is only 32 elements deep, so we need
+		 * the frame index to be less than or equal to 32 (actually 31 if we
+		 * start from 0)
+		 */
+		start_frame = (qhead->next_uframe) % PTD_FRAME_MASK;
+		if (urb->dev->speed != USB_SPEED_HIGH){
+			qhead->next_uframe =
+				start_frame + urb->number_of_packets;
+				iNumofPTDs=urb->number_of_packets;
+		} else {
+			qhead->next_uframe = start_frame + iNumofPTDs;
+		}
+
+		qhead->next_uframe %= PTD_FRAME_MASK;
+	}
+
+
+	iso_dbg(ISO_DBG_DATA, "[phcd_submit_iso]: Start frame index: %ld\n",
+		start_frame);
+	iso_dbg(ISO_DBG_DATA, "[phcd_submit_iso]: Max packet: %d\n",
+		(int) max_pkt);
+
+#ifdef COMMON_MEMORY
+	if(urb->number_of_packets>8 && urb->dev->speed!=USB_SPEED_HIGH)
+		phci_hcd_mem_alloc(8*max_pkt, &qhead->memory_addr, 0);
+	else
+	phci_hcd_mem_alloc(urb->transfer_buffer_length, &qhead->memory_addr, 0);
+	if (urb->transfer_buffer_length && ((qhead->memory_addr.phy_addr == 0)
+		|| (qhead->memory_addr.virt_addr ==0))) {
+		iso_dbg(ISO_DBG_ERR,
+			"[URB FILL MEMORY Error]: No payload memory available\n");
+		return -ENOMEM;
+	}
+#endif
+
+	if (urb->dev->speed != USB_SPEED_HIGH) {
+		iNumofPks = urb->number_of_packets;
+		qhead->totalptds=urb->number_of_packets;
+		qhead->actualptds=0;
+
+		/* Make as many tds as number of packets */
+		for (packets = 0; packets < urb->number_of_packets; packets++) {
+			/*
+			 * Allocate memory for the SITD data structure and initialize it.
+			 *
+			 * This data structure follows the format of the SITD
+			 * structure defined by the EHCI standard on the top part
+			 * but also contains specific elements in the bottom
+			 * part
+			 */
+			sitd = kmalloc(sizeof(*sitd), GFP_ATOMIC);
+			if (!sitd) {
+				*status = -ENOMEM;
+				if (((int)(qhead->next_uframe -
+					urb->number_of_packets)) < 0){
+					/*plus max PTDs*/
+					qhead->next_uframe = qhead->next_uframe + PTD_PERIODIC_SIZE;	
+					
+				}
+				qhead->next_uframe -= urb->number_of_packets;
+
+				/* Handle SITD list cleanup */
+				if (urb->hcpriv) {
+					phcd_iso_sitd_free_list(hcd, urb, 
+						*status);
+				}
+				iso_dbg(ISO_DBG_ERR,
+					"[phcd_submit_iso Error]: No memory available\n");
+				return *status;
+			}
+
+			memset(sitd, 0, sizeof(struct ehci_sitd));
+
+			INIT_LIST_HEAD(&sitd->sitd_list);
+
+			sitd->sitd_dma = (u32) (sitd);
+			sitd->urb = urb;
+
+			/*
+			 * Indicate that this SITD is the last in the list.
+			 *
+			 * Also set the itd_index to TD_PTD_INV_PTD_INDEX
+			 * (0xFFFFFFFF). This would indicate when we allocate
+			 * a PTD that this SITD did not have a PTD allocated
+			 * before.
+			 */
+
+			sitd->hw_next = EHCI_LIST_END;
+			sitd->sitd_index = TD_PTD_INV_PTD_INDEX;
+
+			/* This SITD will go into this frame */
+			sitd->framenumber = start_frame + packets;
+			sitd->start_frame = temp + packets;
+
+			/* Number of the packet */
+			sitd->index = packets;
+
+			sitd->framenumber = sitd->framenumber & PTD_FRAME_MASK;
+			sitd->ssplit = qhead->ssplit;
+			sitd->csplit = qhead->csplit;
+
+			/* Initialize the following elements of the ITS structure
+			 *      > sitd->length = length;                 -- the size of the request
+			 *      > sitd->multi = multi;                   -- the number of transactions for
+			 *                                         this EP per micro frame
+			 *      > sitd->hw_bufp[0] = buf_dma;    -- The base address of the buffer where
+			 *                                         to put the data (this base address was
+			 *                                         the buffer provided plus the offset)
+			 * And then, allocating memory from the PAYLOAD memory area, where the data
+			 * coming from the requesting party will be placed or data requested by the
+			 * requesting party will be retrieved when it is available.
+			 */
+			*status = phcd_iso_sitd_fill(hcd, sitd, urb, packets);
+
+			if (*status != 0) {
+				if (((int)(qhead->next_uframe - 
+					urb->number_of_packets)) < 0){
+					/*plus max PTDs*/
+					qhead->next_uframe = qhead->next_uframe + 
+						PTD_PERIODIC_SIZE;	
+				}
+				qhead->next_uframe -= urb->number_of_packets;
+
+				/* Handle SITD list cleanup */
+				if (urb->hcpriv) {
+					phcd_iso_sitd_free_list(hcd, urb,
+						*status);
+				}
+				iso_dbg(ISO_DBG_ERR,
+					"[phcd_submit_iso Error]: Error in filling up SITD\n");
+				return *status;
+			}
+
+			/*
+			 * If this SITD is not the head/root SITD, link this SITD to the SITD
+			 * that came before it.
+			 */
+			if (prev_sitd) {
+				prev_sitd->hw_next = (u32) (sitd);
+			}
+
+			prev_sitd = sitd;
+
+			if(packets<8){  //bcs of memory constraint , we use only first 8 PTDs if number_of_packets is more than 8.
+			/*
+			 * Allocate an ISO PTD from the ISO PTD map list and
+			 * set the equivalent bit of the allocated PTD to active
+			 * in the bitmap so that this PTD will be included into
+			 * the periodic schedule
+			 */
+			phcd_iso_get_sitd_ptd_index(hcd, sitd);
+			iso_dbg(ISO_DBG_DATA,
+				"[phcd_submit_iso]: SITD index %d\n",
+				sitd->sitd_index);
+
+			/*if we dont have any space left */
+			if (sitd->sitd_index == TD_PTD_INV_PTD_INDEX) {
+				*status = -ENOSPC;
+				if (((int) (qhead->next_uframe -
+					urb->number_of_packets)) < 0){
+					/*plus max PTDs*/
+					qhead->next_uframe = qhead->next_uframe + PTD_PERIODIC_SIZE;	
+				}
+				qhead->next_uframe -= urb->number_of_packets;
+
+				/* Handle SITD list cleanup */
+				if (urb->hcpriv) {
+					phcd_iso_sitd_free_list(hcd, urb,
+						*status);
+				}
+				return *status;
+			}
+					qhead->actualptds++;
+			}
+			/* Insert this td into the periodic list */
+
+			sitd_itd_list = &qhead->periodic_list.sitd_itd_head;
+			list_add_tail(&sitd->sitd_list, sitd_itd_list);
+			qhead->periodic_list.high_speed = 0;
+			if(sitd->sitd_index!=TD_PTD_INV_PTD_INDEX)
+			qhead->periodic_list.ptdlocation |=
+				0x1 << sitd->sitd_index;
+			/* Inidcate that a new SITD have been scheduled */
+			hcd->periodic_sched++;
+
+			/* Determine if there are any SITD scheduled before this one. */
+			if (urb->hcpriv == 0){
+				urb->hcpriv = sitd;
+			}
+		}	/* for(packets = 0; packets... */
+	} else if (urb->dev->speed == USB_SPEED_HIGH) {	
+		iNumofPks = iNumofPTDs;
+
+		packets = 0;
+		iPTDIndex = 0;
+		while (packets < urb->number_of_packets) {
+			iNumofSlots = NUMMICROFRAME / urb->interval;
+			/*
+			 * Allocate memory for the ITD data structure and initialize it.
+			 *
+			 * This data structure follows the format of the ITD
+			 * structure defined by the EHCI standard on the top part
+			 * but also contains specific elements in the bottom
+			 * part
+			 */
+			itd = kmalloc(sizeof(*itd), GFP_ATOMIC);
+			if (!itd) {
+				*status = -ENOMEM;
+				if(((int) (qhead->next_uframe - iNumofPTDs))<0){
+					/*plus max PTDs*/
+					qhead->next_uframe = qhead->next_uframe + 
+						PTD_PERIODIC_SIZE;	
+				}
+				qhead->next_uframe -= iNumofPTDs;
+
+				/* Handle ITD list cleanup */
+				if (urb->hcpriv) {
+					phcd_iso_itd_free_list(hcd, urb,
+							       *status);
+				}
+				iso_dbg(ISO_DBG_ERR,
+					"[phcd_submit_iso Error]: No memory available\n");
+				return *status;
+			}
+			memset(itd, 0, sizeof(struct ehci_itd));
+
+			INIT_LIST_HEAD(&itd->itd_list);
+
+			itd->itd_dma = (u32) (itd);
+			itd->urb = urb;
+			/*
+			 * Indicate that this ITD is the last in the list.
+			 *
+			 * Also set the itd_index to TD_PTD_INV_PTD_INDEX
+			 * (0xFFFFFFFF). This would indicate when we allocate
+			 * a PTD that this SITD did not have a PTD allocated
+			 * before.
+			 */
+
+			itd->hw_next = EHCI_LIST_END;
+			itd->itd_index = TD_PTD_INV_PTD_INDEX;
+			/* This ITD will go into this frame */
+			itd->framenumber = start_frame + iPTDIndex;
+			/* Number of the packet */
+			itd->index = packets;
+
+			itd->framenumber = itd->framenumber & 0x1F;
+
+			itd->ssplit = qhead->ssplit;
+			itd->csplit = qhead->csplit;
+
+			/*caculate the number of packets for this itd */
+			itd->num_of_pkts = iNumofSlots * mult;
+			/*for the case , urb number_of_packets is less than (number of slot*mult*x times) */
+			if (itd->num_of_pkts >= urb->number_of_packets)
+			{
+				itd->num_of_pkts = urb->number_of_packets;
+			}
+			else {
+				if (itd->num_of_pkts >
+					urb->number_of_packets - packets){
+					itd->num_of_pkts =
+						urb->number_of_packets -
+						packets;
+				}
+			}
+
+			/* Initialize the following elements of the ITS structure
+			 *      > itd->length = length;                 -- the size of the request
+			 *      > itd->multi = multi;                   -- the number of transactions for
+			 *                                         this EP per micro frame
+			 *      > itd->hw_bufp[0] = buf_dma;    -- The base address of the buffer where
+			 *                                         to put the data (this base address was
+			 *                                         the buffer provided plus the offset)
+			 * And then, allocating memory from the PAYLOAD memory area, where the data
+			 * coming from the requesting party will be placed or data requested by the
+			 * requesting party will be retrieved when it is available.
+			 */
+			iso_dbg(ISO_DBG_DATA,
+				"[phcd_submit_iso] packets index = %ld itd->num_of_pkts = %d\n",
+				packets, itd->num_of_pkts);
+			*status =
+				phcd_iso_itd_fill(hcd, itd, urb, packets,
+						itd->num_of_pkts);
+			if (*status != 0) {
+				if (((int) (qhead->next_uframe - iNumofPTDs)) <
+					0) {
+					qhead->next_uframe = qhead->next_uframe + PTD_PERIODIC_SIZE;	/*plus max PTDs*/
+				}
+				qhead->next_uframe -= iNumofPTDs;
+
+				/* Handle SITD list cleanup */
+				if (urb->hcpriv) {
+					phcd_iso_itd_free_list(hcd, urb,
+						*status);
+				}
+				iso_dbg(ISO_DBG_ERR,
+					"[phcd_submit_iso Error]: Error in filling up ITD\n");
+				return *status;
+			}
+
+			iPG = 0;
+			iMicroIndex = 0;
+			while (iNumofSlots > 0) {
+				offset = urb->iso_frame_desc[packets].offset;
+				/* Buffer for this packet */
+				buff_dma =
+					(u32) ((unsigned char *) urb->
+						transfer_buffer + offset);
+
+				/*for the case mult is 2 or 3 */
+				length = 0;
+				for (i = packets; i < packets + mult; i++) {
+					length += urb->iso_frame_desc[i].length;
+				}
+				itd->hw_transaction[iMicroIndex] =
+					EHCI_ISOC_ACTIVE | (length & 
+					EHCI_ITD_TRANLENGTH)
+					<< 16 | iPG << 12 | buff_dma;
+					
+				if (itd->hw_bufp[iPG] != buff_dma){
+					itd->hw_bufp[++iPG] = buff_dma;
+				}
+
+				iso_dbg(ISO_DBG_DATA,
+					"[%s] offset : %ld buff_dma : 0x%08x length : %ld\n",
+					__FUNCTION__, offset,
+					(unsigned int) buff_dma, length);
+
+				itd->ssplit |= 1 << iMicroIndex;
+				packets++;
+				iMicroIndex += urb->interval;
+				iNumofSlots--;
+
+				/*last packets or last slot */
+				if (packets == urb->number_of_packets
+					|| iNumofSlots == 0) {
+
+					itd->hw_transaction[iMicroIndex] |=
+						EHCI_ITD_IOC;
+
+					break;
+					
+				}
+			}
+
+			/*
+			 * If this SITD is not the head/root SITD, link this SITD to the SITD
+			 * that came before it.
+			 */
+			if (prev_itd) {
+				prev_itd->hw_next = (u32) (itd);
+			}
+
+			prev_itd = itd;
+
+			/*
+			 * Allocate an ISO PTD from the ISO PTD map list and
+			 * set the equivalent bit of the allocated PTD to active
+			 * in the bitmap so that this PTD will be included into
+			 * the periodic schedule
+			 */
+
+
+			iso_dbg(ISO_DBG_DATA,
+				"[phcd_submit_iso]: ITD index %d\n",
+				itd->framenumber);
+			phcd_iso_get_itd_ptd_index(hcd, itd);
+			iso_dbg(ISO_DBG_DATA,
+				"[phcd_submit_iso]: ITD index %d\n",
+				itd->itd_index);
+
+			/*if we dont have any space left */
+			if (itd->itd_index == TD_PTD_INV_PTD_INDEX) {
+				*status = -ENOSPC;
+				if (((int) (qhead->next_uframe - iNumofPTDs)) <
+					0){
+					/*plus max PTDs*/
+					qhead->next_uframe = qhead->next_uframe + PTD_PERIODIC_SIZE;	
+				}
+				qhead->next_uframe -= iNumofPTDs;
+
+				/* Handle SITD list cleanup */
+				if (urb->hcpriv) {
+					phcd_iso_itd_free_list(hcd, urb,
+							       *status);
+				}
+				return *status;
+			}
+
+			sitd_itd_list = &qhead->periodic_list.sitd_itd_head;
+			list_add_tail(&itd->itd_list, sitd_itd_list);
+			qhead->periodic_list.high_speed = 1;
+			qhead->periodic_list.ptdlocation |=
+				0x1 << itd->itd_index;
+
+			/* Inidcate that a new SITD have been scheduled */
+			hcd->periodic_sched++;
+
+			/* Determine if there are any ITD scheduled before this one. */
+			if (urb->hcpriv == 0){
+				urb->hcpriv = itd;
+			}
+			iPTDIndex++;
+
+		}		/*end of while */
+	}
+
+	/*end of HIGH SPEED */
+	/* Last td of current transaction */
+	if (high_speed == 0){
+		sitd->hw_next = EHCI_LIST_END;
+	}
+	urb->error_count = 0;
+	return *status;
+}				/* phcd_submit_iso */
+#endif /* CONFIG_ISO_SUPPORT */
diff --git a/drivers/usb/host/pehci/host/mem.c b/drivers/usb/host/pehci/host/mem.c
new file mode 100644
index 0000000..dbf28a9
--- /dev/null
+++ b/drivers/usb/host/pehci/host/mem.c
@@ -0,0 +1,355 @@
+/* 
+* Copyright (C) ST-Ericsson AP Pte Ltd 2010 
+*
+* ISP1763 Linux OTG Controller driver : host
+* 
+* This program is free software; you can redistribute it and/or modify it under the terms of 
+* the GNU General Public License as published by the Free Software Foundation; version 
+* 2 of the License. 
+* 
+* This program is distributed in the hope that it will be useful, but WITHOUT ANY  
+* WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS  
+* FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more  
+* details. 
+* 
+* You should have received a copy of the GNU General Public License 
+* along with this program; if not, write to the Free Software 
+* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA. 
+* 
+* This is a host controller driver file. Memory initialization, allocation, and 
+* deallocation are handled here.
+* 
+* Author : wired support <wired.support@stericsson.com>
+*
+*/
+
+#ifdef CONFIG_ISO_SUPPORT
+
+/*memory utilization fuctions*/
+void
+phci_hcd_mem_init(void)
+{
+	int i = 0;
+	u32 start_addr = 0x1000;
+	struct isp1763_mem_addr *memaddr;
+	for (i = 0; i < BLK_TOTAL; i++) {
+		memaddr = &memalloc[i];
+		memset(memaddr, 0, sizeof *memaddr);
+	}
+	/*initialize block of 128bytes */
+	for (i = 0; i < BLK_128_; i++) {
+		memaddr = &memalloc[i];
+		memaddr->blk_num = i;
+		memaddr->used = 0;
+		memaddr->blk_size = BLK_SIZE_128;
+		memaddr->phy_addr = start_addr;
+		start_addr += BLK_SIZE_128;
+	}
+	/*initialize block of 256bytes */
+	for (i = BLK_128_; i < BLK_256_; i++) {
+		memaddr = &memalloc[i];
+		memaddr->blk_num = i;
+		memaddr->used = 0;
+		memaddr->blk_size = BLK_SIZE_256;
+		memaddr->phy_addr = start_addr;
+		start_addr += BLK_SIZE_256;
+	}
+	/*initialize block of 1024bytes */
+	for (i = BLK_128_ + BLK_256_; i < (BLK_128_ + BLK_256_ + BLK_1024_);
+		i++) {
+		memaddr = &memalloc[i];
+		memaddr->blk_num = i;
+		memaddr->used = 0;
+		memaddr->blk_size = BLK_SIZE_1024;
+		memaddr->phy_addr = start_addr;
+		start_addr += BLK_SIZE_1024;
+	}
+
+	/*initialize block of  2kbytes */
+	for (i = (BLK_128_ + BLK_256_ + BLK_1024_);
+		i < (BLK_128_ + BLK_256_ + BLK_1024_ + BLK_2048_); i++) {
+		memaddr = &memalloc[i];
+		memaddr->blk_num = i;
+		memaddr->used = 0;
+		memaddr->blk_size = BLK_SIZE_2048;
+		memaddr->phy_addr = start_addr;
+		start_addr += BLK_SIZE_2048;
+	}
+	/* initialize block of 4kbytes */
+	for (i = (BLK_128_ + BLK_256_ + BLK_1024_ + BLK_2048_);
+		i < (BLK_128_ + BLK_256_ + BLK_1024_ + BLK_2048_ + BLK_4096_); 
+		i++){
+		memaddr = &memalloc[i];
+		memaddr->blk_num = i;
+		memaddr->used = 0;
+		memaddr->blk_size = BLK_SIZE_4096;
+		memaddr->phy_addr = start_addr;
+		start_addr += BLK_SIZE_4096;
+	}
+	/* initialize block of 8kbytes */
+	for (i = (BLK_128_ + BLK_256_ + BLK_1024_ + BLK_2048_); i <
+		(BLK_128_ + BLK_256_ + BLK_1024_ + BLK_2048_ + BLK_4096_ +
+		BLK_8196_); i++) {
+		memaddr = &memalloc[i];
+		memaddr->blk_num = i;
+		memaddr->used = 0;
+		memaddr->blk_size = BLK_SIZE_8192;
+		memaddr->phy_addr = start_addr;
+		start_addr += BLK_SIZE_8192;
+	}
+
+}
+
+
+/*free memory*/
+static void
+phci_hcd_mem_free(struct isp1763_mem_addr *memptr)
+{
+	/*block number to be freed */
+	int block = memptr->blk_num;
+
+	if (block < BLK_TOTAL){
+		if ((memptr->blk_size) && (memalloc[block].used != 0)) {
+			memalloc[block].used = 0;
+			memptr->used = 0;
+		}
+	}
+}
+
+
+/*allocate memory*/
+static void
+phci_hcd_mem_alloc(u32 size, struct isp1763_mem_addr *memptr, u32 flag)
+{
+	u32 blk_size = size;
+	u16 i;
+	u32 nextblk1 = 0, nextblk4 = 0;
+	u32 start = 0, end = 0;
+	struct isp1763_mem_addr *memaddr = 0;
+
+	memset(memptr, 0, sizeof *memptr);
+
+	pehci_print("phci_hcd_mem_alloc(size = %d)\n", size);
+
+	if (blk_size == 0) {
+		memptr->phy_addr = 0;
+		memptr->virt_addr = 0;
+		memptr->blk_size = 0;
+		memptr->num_alloc = 0;
+		memptr->blk_num = 0;
+		return;
+	}
+
+	for (i = 0; i < BLK_TOTAL; i++) {
+		memaddr = &memalloc[i];
+		if (!memaddr->used && size <= memaddr->blk_size) {
+			memaddr->used = 1;
+			memptr->used = 1;
+			memptr->blk_num = i;
+			memptr->blk_size = memaddr->blk_size;
+			memptr->phy_addr = memaddr->phy_addr;
+			memptr->virt_addr = memptr->phy_addr;
+			return;
+		}
+	}
+
+	return;
+	/*end of the 1k blocks */
+	nextblk1 = BLK_256_ + BLK_1024_;
+	/*end of the 4k blocks */
+	nextblk4 = nextblk1 + BLK_4096_;
+
+	if (blk_size <= BLK_SIZE_128) {
+		blk_size = BLK_SIZE_128;
+		start = 0;
+		end = BLK_256_;
+	}
+	if (blk_size <= BLK_SIZE_256) {
+		blk_size = BLK_SIZE_256;
+		start = 0;
+		end = BLK_256_;
+	} else if (blk_size <= BLK_SIZE_1024) {
+		blk_size = BLK_SIZE_1024;
+		start = BLK_256_;
+		end = start + BLK_1024_;
+	} else if (blk_size > BLK_SIZE_1024) {
+		blk_size = BLK_SIZE_4096;
+		start = BLK_256_ + BLK_1024_;
+		end = start + BLK_4096_;
+	}
+
+	for (i = start; i < end; i++) {
+		memaddr = &memalloc[i];
+		if (!memaddr->used) {
+			memaddr->used = 1;
+			memptr->blk_num = i;
+			memptr->used = 1;
+			memptr->blk_size = blk_size;
+			memptr->phy_addr = memaddr->phy_addr;
+			memptr->virt_addr = memptr->phy_addr;
+			return;
+		}
+	}
+
+	/*look for in the next block if memory is free */
+	/*start from the first place of the next block */
+	start = end;
+
+	/*for 1k and 256 size request only 4k can be returned */
+	end = nextblk4;
+
+	for (i = start; i < end; i++) {
+		memaddr = &memalloc[i];
+		if (!memaddr->used) {
+			memaddr->used = 1;
+			memptr->used = 1;
+			memptr->blk_num = i;
+			memptr->blk_size = blk_size;
+			memptr->phy_addr = memaddr->phy_addr;
+			memptr->virt_addr = memptr->phy_addr;
+			return;
+		}
+	}
+
+}
+
+#else
+
+void
+phci_hcd_mem_init(void)
+{
+	int i = 0;
+	u32 start_addr = 0x1000;
+	struct isp1763_mem_addr *memaddr;
+	for (i = 0; i < BLK_TOTAL; i++) {
+		memaddr = &memalloc[i];
+		memset(memaddr, 0, sizeof *memaddr);
+	}
+
+	/*initialize block of 256bytes */
+	for (i = 0; i < BLK_256_; i++) {
+		memaddr = &memalloc[i];
+		memaddr->blk_num = i;
+		memaddr->used = 0;
+		memaddr->blk_size = BLK_SIZE_256;
+		memaddr->phy_addr = start_addr;
+		start_addr += BLK_SIZE_256;
+	}
+	/*initialize block of 1024bytes */
+	for (i = BLK_256_; i < (BLK_256_ + BLK_1024_); i++) {
+		memaddr = &memalloc[i];
+		memaddr->blk_num = i;
+		memaddr->used = 0;
+		memaddr->blk_size = BLK_SIZE_1024;
+		memaddr->phy_addr = start_addr;
+		start_addr += BLK_SIZE_1024;
+	}
+
+	/*initialize block of  4kbytes */
+	for (i = (BLK_256_ + BLK_1024_); i < (BLK_256_ + BLK_1024_ + BLK_4096_);
+		i++) {
+		memaddr = &memalloc[i];
+		memaddr->blk_num = i;
+		memaddr->used = 0;
+		memaddr->blk_size = BLK_SIZE_4096;
+		memaddr->phy_addr = start_addr;
+		start_addr += BLK_SIZE_4096;
+	}
+
+}
+
+
+/*free memory*/
+static void
+phci_hcd_mem_free(struct isp1763_mem_addr *memptr)
+{
+	/*block number to be freed */
+	int block = memptr->blk_num;
+
+	if (block < BLK_TOTAL)
+		if ((memptr->blk_size) && (memalloc[block].used != 0)) {
+			memalloc[block].used = 0;
+			memptr->used = 0;
+		}
+}
+
+
+/*allocate memory*/
+static void
+phci_hcd_mem_alloc(u32 size, struct isp1763_mem_addr *memptr, u32 flag)
+{
+	u32 blk_size = size;
+	u16 i;
+	u32 nextblk1 = 0, nextblk4 = 0;
+	u32 start = 0, end = 0;
+	struct isp1763_mem_addr *memaddr = 0;
+
+	memset(memptr, 0, sizeof *memptr);
+
+	pehci_print("phci_hcd_mem_alloc(size = %d)\n", size);
+
+	if (blk_size == 0) {
+		memptr->phy_addr = 0;
+		memptr->virt_addr = 0;
+		memptr->blk_size = 0;
+		memptr->num_alloc = 0;
+		memptr->blk_num = 0;
+		return;
+	}
+
+	/*end of the 1k blocks */
+	nextblk1 = BLK_256_ + BLK_1024_;
+	/*end of the 4k blocks */
+	nextblk4 = nextblk1 + BLK_4096_;
+
+
+	if (blk_size <= BLK_SIZE_256) {
+		blk_size = BLK_SIZE_256;
+		start = 0;
+		end = BLK_256_;
+	} else if (blk_size <= BLK_SIZE_1024) {
+		blk_size = BLK_SIZE_1024;
+		start = BLK_256_;
+		end = start + BLK_1024_;
+	} else if (blk_size > BLK_SIZE_1024) {
+		blk_size = BLK_SIZE_4096;
+		start = BLK_256_ + BLK_1024_;
+		end = start + BLK_4096_;
+	}
+
+	for (i = start; i < end; i++) {
+		memaddr = &memalloc[i];
+		if (!memaddr->used) {
+			memaddr->used = 1;
+			memptr->blk_num = i;
+			memptr->used = 1;
+			memptr->blk_size = blk_size;
+			memptr->phy_addr = memaddr->phy_addr;
+			memptr->virt_addr = memptr->phy_addr;
+			return;
+		}
+	}
+
+	/*look for in the next block if memory is free */
+	/*start from the first place of the next block */
+	start = end;
+
+	/*for 1k and 256 size request only 4k can be returned */
+	end = nextblk4;
+
+	for (i = start; i < end; i++) {
+		memaddr = &memalloc[i];
+		if (!memaddr->used) {
+			memaddr->used = 1;
+			memptr->used = 1;
+			memptr->blk_num = i;
+			memptr->blk_size = blk_size;
+			memptr->phy_addr = memaddr->phy_addr;
+			memptr->virt_addr = memptr->phy_addr;
+			return;
+		}
+	}
+
+}
+
+#endif
diff --git a/drivers/usb/host/pehci/host/otg.c b/drivers/usb/host/pehci/host/otg.c
new file mode 100755
index 0000000..546d9e9
--- /dev/null
+++ b/drivers/usb/host/pehci/host/otg.c
@@ -0,0 +1,189 @@
+/* 
+* Copyright (C) ST-Ericsson AP Pte Ltd 2010 
+*
+* ISP1763 Linux OTG Controller driver : host
+* 
+* This program is free software; you can redistribute it and/or modify it under the terms of 
+* the GNU General Public License as published by the Free Software Foundation; version 
+* 2 of the License. 
+* 
+* This program is distributed in the hope that it will be useful, but WITHOUT ANY  
+* WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS  
+* FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more  
+* details. 
+* 
+* You should have received a copy of the GNU General Public License 
+* along with this program; if not, write to the Free Software 
+* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA. 
+* 
+* This is a host controller driver file. OTG related events are handled here.
+* 
+* Author : wired support <wired.support@stericsson.com>
+*
+*/
+
+
+/*hub device which connected with root port*/
+struct usb_device *hubdev = 0;
+/* hub interrupt urb*/
+struct urb *huburb;
+
+/*return otghub from here*/
+struct usb_device *
+phci_register_otg_device(struct isp1763_dev *dev)
+{
+	printk("OTG dev %x %d\n",(u32) hubdev, hubdev->devnum);
+	if (hubdev && hubdev->devnum >= 0x2) {
+		return hubdev;
+	}
+
+	return NULL;
+}
+EXPORT_SYMBOL(phci_register_otg_device);
+
+/*suspend the otg port(0)
+ * needed when port is switching
+ * from host to device
+ * */
+int
+phci_suspend_otg_port(struct isp1763_dev *dev, u32 command)
+{
+	int status = 0;
+	hubdev->otgstate = USB_OTG_SUSPEND;
+	if (huburb->status == -EINPROGRESS) {
+		huburb->status = 0;
+	}
+
+	huburb->status = 0;
+	huburb->complete(huburb);
+	return status;
+}
+EXPORT_SYMBOL(phci_suspend_otg_port);
+
+/*set the flag to enumerate the device*/
+int
+phci_enumerate_otg_port(struct isp1763_dev *dev, u32 command)
+{
+	/*set the flag to enumerate */
+	/*connect change interrupt will happen from
+	 * phci_intl_worker only
+	 * */
+	hubdev->otgstate = USB_OTG_ENUMERATE;
+	if (huburb->status == -EINPROGRESS) {
+		huburb->status = 0;
+	}
+	/*complete the urb */
+
+	huburb->complete(huburb);
+
+	/*reset the otghub urb status */
+	huburb->status = -EINPROGRESS;
+	return 0;
+}
+EXPORT_SYMBOL(phci_enumerate_otg_port);
+
+/*host controller resume sequence at otg port*/
+int
+phci_resume_otg_port(struct isp1763_dev *dev, u32 command)
+{
+	printk("Resume is called\n");
+	hubdev->otgstate = USB_OTG_RESUME;
+	if (huburb->status == -EINPROGRESS) {
+		huburb->status = 0;
+	}
+	/*complete the urb */
+
+	huburb->complete(huburb);
+
+	/*reset the otghub urb status */
+	huburb->status = -EINPROGRESS;
+	return 0;
+}
+EXPORT_SYMBOL(phci_resume_otg_port);
+/*host controller remote wakeup sequence at otg port*/
+int
+phci_remotewakeup(struct isp1763_dev *dev)
+{
+    printk("phci_remotewakeup_otg_port is called\n");
+    hubdev->otgstate = USB_OTG_REMOTEWAKEUP;
+    if(huburb->status == -EINPROGRESS)
+        huburb->status = 0;
+    /*complete the urb*/
+#if ((defined LINUX_269) || defined (LINUX_2611))
+    huburb->complete(huburb,NULL);      
+#else
+	 huburb->complete(huburb);
+#endif
+    /*reset the otghub urb status*/
+    huburb->status = -EINPROGRESS;
+    return 0;
+}
+EXPORT_SYMBOL(phci_remotewakeup);
+
+/*host controller wakeup sequence at otg port*/
+int
+phci_resume_wakeup(struct isp1763_dev *dev)
+{
+    printk("phci_wakeup_otg_port is called\n");
+#if 0
+    hubdev->otgstate = USB_OTG_WAKEUP_ALL;
+    if(huburb->status == -EINPROGRESS)
+#endif
+        huburb->status = 0;
+    /*complete the urb*/
+#if ((defined LINUX_269) || defined (LINUX_2611))
+    huburb->complete(huburb,NULL);      
+#else
+	 huburb->complete(huburb);
+#endif
+    /*reset the otghub urb status*/
+    huburb->status = -EINPROGRESS;
+    return 0;
+}
+EXPORT_SYMBOL(phci_resume_wakeup);
+
+struct isp1763_driver *host_driver;
+struct isp1763_driver *device_driver;
+
+void
+pehci_delrhtimer(struct isp1763_dev *dev)
+{
+
+	struct usb_hcd *usb_hcd =
+		container_of(huburb->dev->parent->bus, struct usb_hcd, self);
+	del_timer_sync(&usb_hcd->rh_timer);
+	del_timer(&usb_hcd->rh_timer);
+
+}
+EXPORT_SYMBOL(pehci_delrhtimer);
+
+int
+pehci_Deinitialize(struct isp1763_dev *dev)
+{
+	dev -= 2;
+	if (dev->index == 0) {
+		if (dev->driver) {
+			if (dev->driver->powerdown) {
+				dev->driver->powerdown(dev);
+			}
+		}
+	}
+return 0;
+}
+EXPORT_SYMBOL(pehci_Deinitialize);
+
+int
+pehci_Reinitialize(struct isp1763_dev *dev)
+{
+
+	dev -= 2;
+	if (dev->index == 0) {
+		if(dev->driver->powerup){
+			dev->driver->powerup(dev);
+		}
+	}
+return 0;
+}
+EXPORT_SYMBOL(pehci_Reinitialize);
+
+
diff --git a/drivers/usb/host/pehci/host/pehci.c b/drivers/usb/host/pehci/host/pehci.c
new file mode 100644
index 0000000..19e9441
--- /dev/null
+++ b/drivers/usb/host/pehci/host/pehci.c
@@ -0,0 +1,6567 @@
+/* 
+* Copyright (C) ST-Ericsson AP Pte Ltd 2010 
+*
+* ISP1763 Linux OTG Controller driver : host
+* 
+* This program is free software; you can redistribute it and/or modify it under the terms of 
+* the GNU General Public License as published by the Free Software Foundation; version 
+* 2 of the License. 
+* 
+* This program is distributed in the hope that it will be useful, but WITHOUT ANY  
+* WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS  
+* FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more  
+* details. 
+* 
+* You should have received a copy of the GNU General Public License 
+* along with this program; if not, write to the Free Software 
+* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA. 
+* 
+* Refer to the follwing files in ~/drivers/usb/host for copyright owners:
+* ehci-dbg.c, ehci-hcd.c, ehci-hub.c, ehci-mem.c, ehci-q.c and ehic-sched.c (kernel version 2.6.9)
+* Code is modified for ST-Ericsson product 
+* 
+* Author : wired support <wired.support@stericsson.com>
+*
+*/
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/ioport.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/smp_lock.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/timer.h>
+#include <linux/list.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/usb.h>
+#include <linux/version.h>
+#include <stdarg.h>
+#include <asm/byteorder.h>
+#include <asm/io.h>
+#include <asm/dma.h>
+#include <asm/irq.h>
+#include <asm/system.h>
+#include <asm/unaligned.h>
+#include <linux/version.h>
+
+#include "../hal/isp1763.h"
+#include "pehci.h"
+#include "../hal/hal_intf.h"
+#include <linux/platform_device.h>
+#include <linux/wakelock.h>
+
+extern int HostComplianceTest;
+extern int HostTest;
+extern int No_Data_Phase;
+extern int No_Status_Phase;
+#define	EHCI_TUNE_CERR		3
+#define	URB_NO_INTERRUPT	0x0080
+#define	EHCI_TUNE_RL_TT		0
+#define	EHCI_TUNE_MULT_TT	1
+#define	EHCI_TUNE_RL_HS		0
+#define	EHCI_TUNE_MULT_HS	1
+
+
+#define POWER_DOWN_CTRL_NORMAL_VALUE	0xffff1ba0
+#define POWER_DOWN_CTRL_SUSPEND_VALUE	0xffff08b0
+
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)
+//This macro is not supported in linux-2.6.35
+#define	USB_PORT_FEAT_HIGHSPEED 10
+#endif
+
+#ifdef CONFIG_ISO_SUPPORT
+
+#define	FALSE 0
+#define	TRUE (!FALSE)
+extern void *phcd_iso_sitd_to_ptd(phci_hcd * hcd,
+	struct ehci_sitd *sitd,
+	struct urb *urb, void *ptd);
+extern void *phcd_iso_itd_to_ptd(phci_hcd * hcd,
+	struct	ehci_itd *itd,
+	struct	urb *urb, void *ptd);
+
+extern unsigned	long phcd_submit_iso(phci_hcd *	hcd,
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
+	struct usb_host_endpoint *ep,
+#else
+#endif
+	struct urb *urb, unsigned long *status);
+void pehci_hcd_iso_schedule(phci_hcd * hcd, struct urb *);
+unsigned long lgFrameIndex = 0;
+unsigned long lgScheduledPTDIndex = 0;
+int igNumOfPkts = 0;
+#endif /* CONFIG_ISO_SUPPORT */
+
+struct isp1763_dev *isp1763_hcd;
+
+#ifdef HCD_PACKAGE
+/*file operation*/
+struct fasync_struct *fasync_q;
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
+static void
+pehci_hcd_urb_complete(phci_hcd * hcd, struct ehci_qh *qh, struct urb *urb,
+	td_ptd_map_t * td_ptd_map, struct pt_regs *regs);
+#else
+static void
+pehci_hcd_urb_complete(phci_hcd * hcd, struct ehci_qh *qh, struct urb *urb,
+	td_ptd_map_t * td_ptd_map);
+#endif
+
+#include "otg.c"  /*OTG and HCD package needs it */
+
+
+int hcdpowerdown = 0;
+int portchange=0; //for remotewakeup
+EXPORT_SYMBOL(hcdpowerdown);
+unsigned char otg_se0_enable;
+EXPORT_SYMBOL(otg_se0_enable);
+
+
+/*Enable all other interrupt.*/
+
+#ifdef MSEC_INT_BASED
+#ifdef THREAD_BASED//This is to test interrupt mapping problem
+//#define	INTR_ENABLE_MASK (HC_OPR_REG_INT|HC_CLK_RDY_INT )
+#define INTR_ENABLE_MASK (/*HC_MSEC_INT |*/ HC_INTL_INT | HC_ATL_INT| HC_ISO_INT /*| HC_EOT_INT | HC_ISO_INT*/)
+#else
+#define	INTR_ENABLE_MASK (HC_MSEC_INT|HC_OPR_REG_INT|HC_CLK_RDY_INT )
+#endif
+#else
+#define	INTR_ENABLE_MASK ( HC_INTL_INT | HC_ATL_INT |HC_ISO_INT| HC_EOT_INT|HC_OPR_REG_INT|HC_CLK_RDY_INT)
+#endif
+
+
+
+#ifdef THREAD_BASED
+
+#define NO_SOF_REQ_IN_TSK 		0x1
+#define NO_SOF_REQ_IN_ISR 		0x2
+#define NO_SOF_REQ_IN_REQ 	0x3
+#define MSEC_INTERVAL_CHECKING 5
+
+typedef struct _st_UsbIt_Msg_Struc {
+	struct usb_hcd 		*usb_hcd;
+	u8				uIntStatus;
+	struct list_head 		list;
+} st_UsbIt_Msg_Struc, *pst_UsbIt_Msg_Struc ;
+
+typedef struct _st_UsbIt_Thread {
+    wait_queue_head_t       	ulThrdWaitQhead;
+    int                           		lThrdWakeUpNeeded;
+    struct task_struct           	*phThreadTask;
+    spinlock_t              lock;
+} st_UsbIt_Thread, *pst_UsbIt_Thread;
+
+st_UsbIt_Thread g_stUsbItThreadHandler;
+
+st_UsbIt_Msg_Struc 	g_messList;
+st_UsbIt_Msg_Struc 	g_enqueueMessList;
+spinlock_t              	enqueue_lock;
+
+int pehci_hcd_process_irq_it_handle(struct usb_hcd* usb_hcd_);
+int pehci_hcd_process_irq_in_thread(struct usb_hcd *usb_hcd_);
+
+#endif /*THREAD_BASED*/
+
+#ifdef THREAD_BASED
+phci_hcd *g_pehci_hcd;
+#endif
+
+
+struct wake_lock pehci_wake_lock;
+
+/*---------------------------------------------------
+ *    Globals for EHCI
+ -----------------------------------------------------*/
+
+/* used	when updating hcd data */
+static spinlock_t hcd_data_lock	= SPIN_LOCK_UNLOCKED;
+
+static const char hcd_name[] = "ST-Ericsson ISP1763";
+static td_ptd_map_buff_t td_ptd_map_buff[TD_PTD_TOTAL_BUFF_TYPES];	/* td-ptd map buffer for all 1362 buffers */
+
+static u8 td_ptd_pipe_x_buff_type[TD_PTD_TOTAL_BUFF_TYPES] = {
+	TD_PTD_BUFF_TYPE_ATL,
+	TD_PTD_BUFF_TYPE_INTL,
+	TD_PTD_BUFF_TYPE_ISTL
+};
+
+
+/*global memory	blocks*/
+isp1763_mem_addr_t memalloc[BLK_TOTAL];
+#include "mem.c"
+#include "qtdptd.c"
+
+#ifdef CONFIG_ISO_SUPPORT
+#include "itdptd.c"
+#endif /* CONFIG_ISO_SUPPORT */
+
+static int
+pehci_rh_control(struct	usb_hcd	*usb_hcd, u16 typeReq,
+		 u16 wValue, u16 wIndex, char *buf, u16	wLength);
+
+static int pehci_bus_suspend(struct usb_hcd *usb_hcd);
+static int pehci_bus_resume(struct usb_hcd *usb_hcd);
+/*----------------------------------------------------*/
+static void
+pehci_complete_device_removal(phci_hcd * hcd, struct ehci_qh *qh)
+{
+	td_ptd_map_t *td_ptd_map;
+	td_ptd_map_buff_t *td_ptd_buff;
+	struct urb * urb;
+	urb_priv_t *urb_priv;
+	struct ehci_qtd	*qtd = 0;
+//	struct usb_hcd *usb_hcd=&hcd->usb_hcd;
+	u16 skipmap=0;
+
+	if (qh->type ==	TD_PTD_BUFF_TYPE_ISTL) {
+#ifdef COMMON_MEMORY
+		phci_hcd_mem_free(&qh->memory_addr);
+#endif
+		return;
+	}
+
+	td_ptd_buff = &td_ptd_map_buff[qh->type];
+	td_ptd_map = &td_ptd_buff->map_list[qh->qtd_ptd_index];
+
+	/*this flag should only	be set when device is going */
+	td_ptd_map->state = TD_PTD_REMOVE;
+	/*if nothing there */
+	if (list_empty(&qh->qtd_list)) {
+		if (td_ptd_map->state != TD_PTD_NEW) {
+			phci_hcd_release_td_ptd_index(qh);
+		}
+		qha_free(qha_cache, qh);
+		qh = 0;
+		return;
+	} else {
+	
+		if(!list_empty(&qh->qtd_list)){
+				qtd=NULL;
+				qtd = list_entry(qh->qtd_list.next, struct ehci_qtd, qtd_list);
+				if(qtd){
+					urb=qtd->urb;
+					urb_priv= urb->hcpriv;
+					
+					if(urb)
+					switch (usb_pipetype(urb->pipe)) {
+						case PIPE_CONTROL:
+						case PIPE_BULK:
+							break;
+						case PIPE_INTERRUPT:
+							td_ptd_buff = &td_ptd_map_buff[TD_PTD_BUFF_TYPE_INTL];
+							td_ptd_map = &td_ptd_buff->map_list[qh->qtd_ptd_index];
+
+							/*urb is already been removed */
+						//	if (td_ptd_map->state == TD_PTD_NEW) {
+						//		kfree(urb_priv);
+						//		break;
+						//	}
+
+							/* These TDs are not pending anymore */
+							td_ptd_buff->pending_ptd_bitmap &= ~td_ptd_map->ptd_bitmap;
+
+							td_ptd_map->state = TD_PTD_REMOVE;
+							urb_priv->state	|= DELETE_URB;
+
+							/*read the skipmap, to see if this transfer has	to be rescheduled */
+							skipmap	=
+							isp1763_reg_read16(hcd->dev, hcd->regs.inttdskipmap,
+							skipmap);
+
+							isp1763_reg_write16(hcd->dev, hcd->regs.inttdskipmap,
+							skipmap | td_ptd_map->ptd_bitmap);
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
+							pehci_hcd_urb_complete(hcd, qh, urb, td_ptd_map, NULL);
+#else
+							pehci_hcd_urb_complete(hcd, qh, urb, td_ptd_map);
+#endif
+							break;
+					}
+
+					
+				}else{
+					//break;
+				}
+		}
+		qha_free(qha_cache, qh);
+		qh = 0;
+		return;
+	}
+	/*MUST not come	down below this	*/
+	err("Never Error: Should not come to this portion of code\n");
+
+	return;
+}
+
+/*functions looks for the values in register
+  specified in ptr, if register	values masked
+  with the mask	and result is equal to done,
+  operation is successful else fails with timeout*/
+static int
+pehci_hcd_handshake(phci_hcd * hcd, u32	ptr, u32 mask, u32 done, int usec)
+{
+	u32 result = 0;
+	do {
+		result = isp1763_reg_read16(hcd->dev, ptr, result);
+		printk(KERN_NOTICE "Registr %x val is %x\n", ptr, result);
+		if (result == ~(u32) 0)	{/* card removed */
+			return -ENODEV;
+		}
+		result &= mask;
+		if (result == done) {
+			return 0;
+		}
+		udelay(1);
+		usec--;
+	} while	(usec >	0);
+
+	return -ETIMEDOUT;
+}
+
+#ifndef	MSEC_INT_BASED
+/*schedule atl and interrupt tds,
+  only when we are not running on sof interrupt
+ */
+static void
+pehci_hcd_td_ptd_submit_urb(phci_hcd * hcd, struct ehci_qh *qh,	u8 bufftype)
+{
+	unsigned long flags=0;
+	struct ehci_qtd	*qtd = 0;
+	struct urb *urb	= 0;
+	struct _isp1763_qha *qha = 0;
+	u16 location = 0;
+	u16 skipmap = 0;
+	u16 buffstatus = 0;
+	u16 ormask = 0;
+	u16 intormask =	0;
+	u32 length = 0;
+	struct list_head *head;
+
+	td_ptd_map_t *td_ptd_map;
+	td_ptd_map_buff_t *ptd_map_buff;
+	struct isp1763_mem_addr	*mem_addr = 0;
+
+	pehci_entry("++	%s: Entered\n",	__FUNCTION__);
+	pehci_print("Buuffer type %d\n", bufftype);
+
+	spin_lock_irqsave(&hcd->lock, flags);
+	ptd_map_buff = &td_ptd_map_buff[bufftype];
+
+	qha = &hcd->qha;
+
+	switch (bufftype) {
+	case TD_PTD_BUFF_TYPE_ATL:
+
+		skipmap	=
+			isp1763_reg_read16(hcd->dev, hcd->regs.atltdskipmap,
+					   skipmap);
+
+		ormask = isp1763_reg_read16(hcd->dev, hcd->regs.atl_irq_mask_or,
+					    ormask);
+		break;
+	case TD_PTD_BUFF_TYPE_INTL:
+
+		skipmap	=
+			isp1763_reg_read16(hcd->dev, hcd->regs.inttdskipmap,
+					   skipmap);
+
+		intormask =
+			isp1763_reg_read16(hcd->dev, hcd->regs.int_irq_mask_or,
+					   intormask);
+		break;
+	default:
+
+		skipmap	=
+			isp1763_reg_read16(hcd->dev, hcd->regs.isotdskipmap,
+					   skipmap);
+		break;
+
+	}
+
+
+	buffstatus =
+		isp1763_reg_read16(hcd->dev, hcd->regs.buffer_status,
+				   buffstatus);
+
+	/*header, qtd, and urb of current transfer */
+	location = qh->qtd_ptd_index;
+	td_ptd_map = &ptd_map_buff->map_list[location];
+
+	if (!(qh->qh_state & QH_STATE_TAKE_NEXT)) {
+		pehci_check("qh	will schdule from interrupt routine,map	%x\n",
+			    td_ptd_map->ptd_bitmap);
+		spin_unlock_irqrestore(&hcd->lock, flags);
+		return;
+	}
+	head = &qh->qtd_list;
+	qtd = list_entry(head->next, struct ehci_qtd, qtd_list);
+
+	/*already scheduled, may be from interrupt */
+	if (!(qtd->state & QTD_STATE_NEW)) {
+		pehci_check("qtd already in, state %x\n", qtd->state);
+		spin_unlock_irqrestore(&hcd->lock, flags);
+		return;
+	}
+
+	qtd->state &= ~QTD_STATE_NEW;
+	qtd->state |= QTD_STATE_SCHEDULED;
+
+	qh->qh_state &=	~QH_STATE_TAKE_NEXT;
+	/*take the first td */
+	td_ptd_map->qtd	= qtd;
+	/*take the urb */
+	urb = qtd->urb;
+	ptd_map_buff->active_ptds++;
+
+	/*trust	the atl	worker,	at this	location there wont be any td */
+	/*if this td is	the last one */
+	if (qtd->state & QTD_STATE_LAST) {
+		qh->hw_current = cpu_to_le32(0);
+		/*else update the hw_next of qh	to the next td */
+	} else {
+		qh->hw_current = qtd->hw_next;
+	}
+	memset(qha, 0, sizeof(isp1763_qha));
+
+	pehci_check("td	being scheduled	: length: %d, device: %d, map: %x\n",
+		    qtd->length, urb->dev->devnum, td_ptd_map->ptd_bitmap);
+	/*NEW, now need	to get the memory for this transfer */
+	length = qtd->length;
+	mem_addr = &qtd->mem_addr;
+	phci_hcd_mem_alloc(length, mem_addr, 0);
+	if (length && ((mem_addr->phy_addr == 0) || (mem_addr->virt_addr == 0))) {
+		err("Never Error: Can not allocate memory for the current td,length %d\n", length);
+		/*should not happen */
+		/*can happen only when we exceed the limit of devices we support
+		   MAX 4 mass storage at a time	*/
+	}
+	phci_hcd_qha_from_qtd(hcd, qtd, qtd->urb, (void *) qha,
+		td_ptd_map->ptd_ram_data_addr, qh);
+	if (qh->type ==	TD_PTD_BUFF_TYPE_INTL) {
+		phci_hcd_qhint_schedule(hcd, qh, qtd, (isp1763_qhint *)	qha,
+					qtd->urb);
+	}
+	/*write	qha into the header of the host	controller */
+	isp1763_mem_write(hcd->dev, td_ptd_map->ptd_header_addr, 0,
+			  (u32 *) (qha), PHCI_QHA_LENGTH, 0);
+
+	/*if this is SETUP/OUT token , then need to write into the buffer */
+	/*length should	be valid and supported by the ptd */
+	if (qtd->length && (qtd->length <= HC_ATL_PL_SIZE)){
+		switch (PTD_PID(qha->td_info2))	{
+		case OUT_PID:
+		case SETUP_PID:
+
+			isp1763_mem_write(hcd->dev, (u32) mem_addr->phy_addr, 0,
+					  (void	*) qtd->hw_buf[0], length, 0);
+
+
+#if 0
+					int i=0;
+					int *data_addr= qtd->hw_buf[0];
+					printk("\n");
+					for(i=0;i<length;i+=4) printk("[0x%X] ",*data_addr++);
+					printk("\n");
+#endif
+
+			
+
+			break;
+		}
+	}
+
+	/*unskip the tds at this location */
+	switch (bufftype) {
+	case TD_PTD_BUFF_TYPE_ATL:
+		skipmap	&= ~td_ptd_map->ptd_bitmap;
+		/*enable atl interrupts	on donemap */
+		ormask |= td_ptd_map->ptd_bitmap;
+
+		isp1763_reg_write16(hcd->dev, hcd->regs.atl_irq_mask_or,
+				    ormask);
+		break;
+
+	case TD_PTD_BUFF_TYPE_INTL:
+		skipmap	&= ~td_ptd_map->ptd_bitmap;
+		intormask |= td_ptd_map->ptd_bitmap;
+
+		isp1763_reg_write16(hcd->dev, hcd->regs.int_irq_mask_or,
+				    intormask);
+		break;
+
+	case TD_PTD_BUFF_TYPE_ISTL:
+		skipmap	&= ~td_ptd_map->ptd_bitmap;
+
+		isp1763_reg_write16(hcd->dev, hcd->regs.isotdskipmap, skipmap);
+		break;
+	}
+
+	/*if any new schedule, enable the atl buffer */
+	switch (bufftype) {
+	case TD_PTD_BUFF_TYPE_ATL:
+
+		isp1763_reg_write16(hcd->dev, hcd->regs.buffer_status,
+				    buffstatus | ATL_BUFFER);
+
+		isp1763_reg_write16(hcd->dev, hcd->regs.atltdskipmap, skipmap);
+		buffstatus |= ATL_BUFFER;
+		break;
+	case TD_PTD_BUFF_TYPE_INTL:
+
+		isp1763_reg_write16(hcd->dev, hcd->regs.buffer_status,
+				    buffstatus | INT_BUFFER);
+
+		isp1763_reg_write16(hcd->dev, hcd->regs.inttdskipmap, skipmap);
+		break;
+	case TD_PTD_BUFF_TYPE_ISTL:
+		/*not supposed to be seen here */
+
+		isp1763_reg_write16(hcd->dev, hcd->regs.buffer_status,
+				    buffstatus | ISO_BUFFER);
+		break;
+	}
+	spin_unlock_irqrestore(&hcd->lock, flags);
+	pehci_entry("--	%s: Exit\n", __FUNCTION__);
+	return;
+
+}
+#endif
+
+
+
+#ifdef MSEC_INT_BASED
+/*schedule next	(atl/int)tds and any pending tds*/
+static void
+pehci_hcd_schedule_pending_ptds(phci_hcd * hcd, u16 donemap, u8 bufftype,
+				u16 only)
+{
+	struct ehci_qtd	*qtd = 0;
+	struct ehci_qh *qh = 0;
+	struct list_head *qtd_list = 0;
+	struct _isp1763_qha allqha;
+	struct _isp1763_qha *qha = 0;
+	u16 mask = 0x1,	index =	0;
+	u16 location = 0;
+	u16 skipmap = 0;
+	u32 newschedule	= 0;
+	u16 buffstatus = 0;
+	u16 schedulemap	= 0;
+#ifndef	CONFIG_ISO_SUPPORT
+	u16 lasttd = 1;
+#endif
+	u16 lastmap = 0;
+	struct urb *urb	= 0;
+	urb_priv_t *urbpriv = 0;
+	int length = 0;
+	u16 ormask = 0,	andmask	= 0;
+	u16 intormask =	0;
+	td_ptd_map_t *td_ptd_map;
+	td_ptd_map_buff_t *ptd_map_buff;
+	struct isp1763_mem_addr	*mem_addr = 0;
+
+	pehci_entry("++	%s: Entered\n",	__FUNCTION__);
+	pehci_print("Buffer type %d\n",	bufftype);
+
+	/*need to hold this lock if another interrupt is comming
+	   for previously scheduled transfer, while scheduling new tds
+	 */
+	spin_lock(&hcd_data_lock);
+	ptd_map_buff = &td_ptd_map_buff[bufftype];
+	qha = &allqha;
+	switch (bufftype) {
+	case TD_PTD_BUFF_TYPE_ATL:
+
+		skipmap	=
+			isp1763_reg_read16(hcd->dev, hcd->regs.atltdskipmap,
+					   skipmap);
+		rmb();
+
+		ormask = isp1763_reg_read16(hcd->dev, hcd->regs.atl_irq_mask_or,
+					    ormask);
+
+		andmask	=
+			isp1763_reg_read16(hcd->dev, hcd->regs.atl_irq_mask_and,
+					   andmask);
+		break;
+	case TD_PTD_BUFF_TYPE_INTL:
+
+		skipmap	=
+			isp1763_reg_read16(hcd->dev, hcd->regs.inttdskipmap,
+					   skipmap);
+		/*read the interrupt mask registers */
+
+		intormask =
+			isp1763_reg_read16(hcd->dev, hcd->regs.int_irq_mask_or,
+					   intormask);
+		break;
+	default:
+		err("Never Error: Bogus	type of	bufer\n");
+		return;
+	}
+
+	buffstatus =
+		isp1763_reg_read16(hcd->dev, hcd->regs.buffer_status,
+				   buffstatus);
+	/*td headers need attention */
+	schedulemap = donemap;
+	while (schedulemap) {
+		index =	schedulemap & mask;
+		schedulemap &= ~mask;
+		mask <<= 1;
+
+		if (!index) {
+			location++;
+			continue;
+		}
+
+		td_ptd_map = &ptd_map_buff->map_list[location];
+		/*	can happen if donemap comes after
+		   removal of the urb and associated tds
+		 */
+		if ((td_ptd_map->state == TD_PTD_NEW) ||
+			(td_ptd_map->state == TD_PTD_REMOVE)) {
+			qh = td_ptd_map->qh;
+			pehci_check
+				("should not come here,	map %x,pending map %x\n",
+				 td_ptd_map->ptd_bitmap,
+				 ptd_map_buff->pending_ptd_bitmap);
+
+			pehci_check("buffer type %s\n",
+				(bufftype == 0) ? "ATL" : "INTL");
+			donemap	&= ~td_ptd_map->ptd_bitmap;
+			/*clear	the pending map	*/
+			ptd_map_buff->pending_ptd_bitmap &=
+				~td_ptd_map->ptd_bitmap;
+			location++;
+			continue;
+		}
+
+		/*no endpoint at this location */
+		if (!(td_ptd_map->qh)) {
+			err("queue head	can not	be null	here\n");
+			/*move to the next location */
+			ptd_map_buff->pending_ptd_bitmap &=
+				~td_ptd_map->ptd_bitmap;
+			location++;
+			continue;
+		}
+
+		/*current endpoint */
+		qh = td_ptd_map->qh;
+		if (!(skipmap &	td_ptd_map->ptd_bitmap)) {
+			/*should not happen, if	happening, then	*/
+			pehci_check("buffertype	%d,td_ptd_map %x,skipnap %x\n",
+				    bufftype, td_ptd_map->ptd_bitmap, skipmap);
+			lastmap	= td_ptd_map->ptd_bitmap;
+			donemap	&= ~td_ptd_map->ptd_bitmap;
+			ptd_map_buff->pending_ptd_bitmap &=
+				~td_ptd_map->ptd_bitmap;
+			location++;
+			continue;
+		}
+
+		/*if we	processed all the tds in ths transfer */
+		if (td_ptd_map->lasttd)	{
+			err("should not	show  map %x,qtd %p\n",
+			td_ptd_map->ptd_bitmap, td_ptd_map->qtd);
+			/*this can happen in case the transfer is not being
+			 * procesed by the host	, tho the transfer is there
+			 * */
+			qh->hw_current = cpu_to_le32(td_ptd_map->qtd);
+			ptd_map_buff->pending_ptd_bitmap &=
+				~td_ptd_map->ptd_bitmap;
+			location++;
+			continue;
+		}
+
+		/*if we	have ptd that is going for reload */
+		if ((td_ptd_map->qtd) && (td_ptd_map->state & TD_PTD_RELOAD)) {
+			warn("%s: reload td\n",	__FUNCTION__);
+			td_ptd_map->state &= ~TD_PTD_RELOAD;
+			qtd = td_ptd_map->qtd;
+			goto loadtd;
+		}
+
+		/* qh is there but no qtd so it	means fresh transfer */
+		if ((td_ptd_map->qh) &&	!(td_ptd_map->qtd)) {
+			if (list_empty(&qh->qtd_list)) {
+				/*should not hapen again, as it	comes here
+				   when	it has td in its map
+				 */
+				pehci_check
+					("must not come	here any more, td map %x\n",
+					 td_ptd_map->ptd_bitmap);
+				/*this location	is idle	and can	be free	next time if
+				   no new transfers are	comming	for this */
+				donemap	&= ~td_ptd_map->ptd_bitmap;
+				td_ptd_map->state |= TD_PTD_IDLE;
+				ptd_map_buff->pending_ptd_bitmap &=
+					~td_ptd_map->ptd_bitmap;
+				location++;
+				continue;
+			}
+			qtd_list = &qh->qtd_list;
+			qtd = td_ptd_map->qtd =
+				list_entry(qtd_list->next, struct ehci_qtd,
+					   qtd_list);
+			/*got the td, now goto reload */
+			goto loadtd;
+		}
+
+		/*if there is already one qtd there in the transfer */
+		if (td_ptd_map->qtd) {
+			/*new schedule */
+			qtd = td_ptd_map->qtd;
+		}
+		loadtd:
+		/*should not happen */
+		if (!qtd) {
+			err("this piece	of code	should not be executed\n");
+			ptd_map_buff->pending_ptd_bitmap &=
+				~td_ptd_map->ptd_bitmap;
+			location++;
+			continue;
+		}
+
+		ptd_map_buff->active_ptds++;
+		/*clear	the pending map	here */
+		ptd_map_buff->pending_ptd_bitmap &= ~td_ptd_map->ptd_bitmap;
+
+
+
+		/*if this td is	the last one */
+		if (qtd->state & QTD_STATE_LAST) {
+			/*no qtd anymore */
+			qh->hw_current = cpu_to_le32(0);
+
+			/*else update the hw_next of qh	to the next td */
+		} else {
+			qh->hw_current = qtd->hw_next;
+		}
+
+		if (location !=	qh->qtd_ptd_index) {
+			err("Never Error: Endpoint header location and scheduling information are not same\n");
+		}
+
+		/*next location	*/
+		location++;
+		/*found	new transfer */
+		newschedule = 1;
+		/*take the urb */
+		urb = qtd->urb;
+		/*sometimes we miss due	to skipmap
+		   so to make sure that	we dont	put again the
+		   same	stuff
+		 */
+		if (!(qtd->state & QTD_STATE_NEW)) {
+			err("Never Error: We should not	put the	same stuff\n");
+			continue;
+		}
+
+		urbpriv	= (urb_priv_t *) urb->hcpriv;
+		urbpriv->timeout = 0;
+
+		/*no more new */
+		qtd->state &= ~QTD_STATE_NEW;
+		qtd->state |= QTD_STATE_SCHEDULED;
+
+
+
+		/*NEW, now need	to get the memory for this transfer */
+		length = qtd->length;
+		mem_addr = &qtd->mem_addr;
+		phci_hcd_mem_alloc(length, mem_addr, 0);
+		if (length && ((mem_addr->phy_addr == 0)
+			       || (mem_addr->virt_addr == 0))) {
+
+			err("Never Error: Can not allocate memory for the current td,length %d\n", length);
+			location++;
+			continue;
+		}
+
+		pehci_check("qtd being scheduled %p, device %d,map %x\n", qtd,
+			    urb->dev->devnum, td_ptd_map->ptd_bitmap);
+
+
+		memset(qha, 0, sizeof(isp1763_qha));
+		/*convert qtd to qha */
+		phci_hcd_qha_from_qtd(hcd, qtd,	qtd->urb, (void	*) qha,
+			td_ptd_map->ptd_ram_data_addr, qh);
+
+		if (qh->type ==	TD_PTD_BUFF_TYPE_INTL) {
+			phci_hcd_qhint_schedule(hcd, qh, qtd,
+				(isp1763_qhint *) qha,
+				qtd->urb);
+
+		}
+
+
+		length = PTD_XFERRED_LENGTH(qha->td_info1 >> 3);
+		if (length > HC_ATL_PL_SIZE) {
+			err("Never Error: Bogus	length,length %d(max %d)\n",
+			qtd->length, HC_ATL_PL_SIZE);
+		}
+
+		/*write	qha into the header of the host	controller */
+		isp1763_mem_write(hcd->dev, td_ptd_map->ptd_header_addr, 0,
+			(u32 *) (qha), PHCI_QHA_LENGTH, 0);
+
+#ifdef PTD_DUMP_SCHEDULE
+		printk("SCHEDULE next (atl/int)tds PTD header\n");
+		printk("DW0: 0x%08X\n", qha->td_info1);
+		printk("DW1: 0x%08X\n", qha->td_info2);
+		printk("DW2: 0x%08X\n", qha->td_info3);
+		printk("DW3: 0x%08X\n", qha->td_info4);
+#endif
+		
+		/*if this is SETUP/OUT token , then need to write into the buffer */
+		/*length should	be valid */
+		if (qtd->length && (length <= HC_ATL_PL_SIZE)){
+			switch (PTD_PID(qha->td_info2))	{
+			case OUT_PID:
+			case SETUP_PID:
+
+				isp1763_mem_write(hcd->dev,
+					(u32)	mem_addr->phy_addr, 0,
+					(void	*) qtd->hw_buf[0],
+					length, 0);
+#if 0
+					int i=0;
+					int *data_addr= qtd->hw_buf[0];
+					printk("\n");
+					for(i=0;i<length;i+=4) printk("[0x%X] ",*data_addr++);
+					printk("\n");
+#endif
+
+
+
+				break;
+			}
+		}
+
+		/*unskip the tds at this location */
+		switch (bufftype) {
+		case TD_PTD_BUFF_TYPE_ATL:
+			skipmap	&= ~td_ptd_map->ptd_bitmap;
+			lastmap	= td_ptd_map->ptd_bitmap;
+			/*try to reduce	the interrupts */
+			ormask |= td_ptd_map->ptd_bitmap;
+
+			isp1763_reg_write16(hcd->dev, hcd->regs.atl_irq_mask_or,
+					    ormask);
+			break;
+
+		case TD_PTD_BUFF_TYPE_INTL:
+			skipmap	&= ~td_ptd_map->ptd_bitmap;
+			lastmap	= td_ptd_map->ptd_bitmap;
+			intormask |= td_ptd_map->ptd_bitmap;
+			;
+			isp1763_reg_write16(hcd->dev, hcd->regs.int_irq_mask_or,
+					    intormask);
+			break;
+
+		case TD_PTD_BUFF_TYPE_ISTL:
+#ifdef CONFIG_ISO_SUPPORT
+			iso_dbg(ISO_DBG_INFO,
+				"Never Error: Should not come here\n");
+#else
+			skipmap	&= ~td_ptd_map->ptd_bitmap;
+
+			isp1763_reg_write16(hcd->dev, hcd->regs.isotdskipmap,
+					    skipmap);
+
+			isp1763_reg_write16(hcd->dev, hcd->regs.isotdlastmap,
+				lasttd);
+#endif /* CONFIG_ISO_SUPPORT */
+			break;
+		}
+
+
+	}
+	/*if any new schedule, enable the atl buffer */
+
+	if (newschedule) {
+		switch (bufftype) {
+		case TD_PTD_BUFF_TYPE_ATL:
+
+			isp1763_reg_write16(hcd->dev, hcd->regs.buffer_status,
+					    buffstatus | ATL_BUFFER);
+			/*i am comming here to only those tds that has to be scheduled */
+			/*so skip map must be in place */
+			if (skipmap & donemap) {
+				pehci_check
+					("must be both ones compliment of each other\n");
+				pehci_check
+					("problem, skipmap %x, donemap %x,\n",
+					 skipmap, donemap);
+
+			}
+			skipmap	&= ~donemap;
+
+			isp1763_reg_write16(hcd->dev, hcd->regs.atltdskipmap,
+					    skipmap);
+
+			break;
+		case TD_PTD_BUFF_TYPE_INTL:
+
+			isp1763_reg_write16(hcd->dev, hcd->regs.buffer_status,
+					    buffstatus | INT_BUFFER);
+			skipmap	&= ~donemap;
+
+			isp1763_reg_write16(hcd->dev, hcd->regs.inttdskipmap,
+					    skipmap);
+			break;
+		case TD_PTD_BUFF_TYPE_ISTL:
+#ifndef	CONFIG_ISO_SUPPORT
+
+			isp1763_reg_write16(hcd->dev, hcd->regs.buffer_status,
+					    buffstatus | ISO_BUFFER);
+#endif
+			break;
+		}
+	}
+	spin_unlock(&hcd_data_lock);
+	pehci_entry("--	%s: Exit\n", __FUNCTION__);
+}
+#endif
+
+
+
+static void
+pehci_hcd_qtd_schedule(phci_hcd	* hcd, struct ehci_qtd *qtd,
+		       struct ehci_qh *qh, td_ptd_map_t	* td_ptd_map)
+{
+	struct urb *urb;
+	urb_priv_t *urbpriv = 0;
+	u32 length=0;
+	struct isp1763_mem_addr	*mem_addr = 0;
+	struct _isp1763_qha *qha, qhtemp;
+
+	pehci_entry("++	%s: Entered\n",	__FUNCTION__);
+
+	if (qtd->state & QTD_STATE_SCHEDULED) {
+		return;
+	}
+	/*redundant */
+	qha = &qhtemp;
+
+	/*if this td is	the last one */
+	if (qtd->state & QTD_STATE_LAST) {
+		/*no qtd anymore */
+		qh->hw_current = cpu_to_le32(0);
+
+		/*else update the hw_next of qh	to the next td */
+	} else {
+		qh->hw_current = qtd->hw_next;
+	}
+
+	urb = qtd->urb;
+	urbpriv	= (urb_priv_t *) urb->hcpriv;
+	urbpriv->timeout = 0;
+
+	/*NEW, now need	to get the memory for this transfer */
+	length = qtd->length;
+	mem_addr = &qtd->mem_addr;
+	phci_hcd_mem_alloc(length, mem_addr, 0);
+	if (length && ((mem_addr->phy_addr == 0) || (mem_addr->virt_addr == 0))) {
+		err("Never Error: Cannot allocate memory for the current td,length %d\n", length);
+		return;
+	}
+
+	pehci_check("newqtd being scheduled, device: %d,map: %x\n",
+		    urb->dev->devnum, td_ptd_map->ptd_bitmap);
+
+	//udelay(100);
+
+	memset(qha, 0, sizeof(isp1763_qha));
+	/*convert qtd to qha */
+	phci_hcd_qha_from_qtd(hcd, qtd,	qtd->urb, (void	*) qha,
+			      td_ptd_map->ptd_ram_data_addr, qh
+			      /*td_ptd_map->datatoggle */ );
+
+	if (qh->type ==	TD_PTD_BUFF_TYPE_INTL) {
+		phci_hcd_qhint_schedule(hcd, qh, qtd, (isp1763_qhint *)	qha,
+					qtd->urb);
+	}
+
+
+	length = PTD_XFERRED_LENGTH(qha->td_info1 >> 3);
+	if (length > HC_ATL_PL_SIZE) {
+		err("Never Error: Bogus	length,length %d(max %d)\n",
+		qtd->length, HC_ATL_PL_SIZE);
+	}
+
+	/*write	qha into the header of the host	controller */
+	isp1763_mem_write(hcd->dev, td_ptd_map->ptd_header_addr, 0,
+			  (u32 *) (qha), PHCI_QHA_LENGTH, 0);
+	
+#if 0 //def PTD_DUMP_SCHEDULE
+		printk("SCHEDULE Next qtd\n");
+		printk("DW0: 0x%08X\n", qha->td_info1);
+		printk("DW1: 0x%08X\n", qha->td_info2);
+		printk("DW2: 0x%08X\n", qha->td_info3);
+		printk("DW3: 0x%08X\n", qha->td_info4);
+#endif
+	
+	/*if this is SETUP/OUT token , then need to write into the buffer */
+	/*length should	be valid */
+	if (qtd->length && (length <= HC_ATL_PL_SIZE)){
+		switch (PTD_PID(qha->td_info2))	{
+		case OUT_PID:
+		case SETUP_PID:
+
+			isp1763_mem_write(hcd->dev, (u32) mem_addr->phy_addr, 0,
+				(void	*) qtd->hw_buf[0], length, 0);
+
+#if 0
+					int i=0;
+					int *data_addr= qtd->hw_buf[0];
+					printk("\n");
+					for(i=0;i<length;i+=4) printk("[0x%X] ",*data_addr++);
+					printk("\n");
+#endif
+
+
+			break;
+		}
+	}
+	/*qtd is scheduled */
+	qtd->state &= ~QTD_STATE_NEW;
+	qtd->state |= QTD_STATE_SCHEDULED;
+
+	pehci_entry("--	%s: Exit\n", __FUNCTION__);
+	return;
+}
+#ifdef USBNET 
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
+static void
+pehci_hcd_urb_delayed_complete(phci_hcd * hcd, struct ehci_qh *qh, struct urb *urb,
+	td_ptd_map_t * td_ptd_map, struct pt_regs *regs)
+#else
+static void
+pehci_hcd_urb_delayed_complete(phci_hcd * hcd, struct ehci_qh *qh, struct urb *urb,
+	td_ptd_map_t * td_ptd_map)
+#endif
+{
+	static u32 remove = 0;
+	static u32 qh_state = 0;
+
+	urb_priv_t *urb_priv = (urb_priv_t *) urb->hcpriv;
+
+#ifdef USBNET 
+	struct isp1763_async_cleanup_urb *urb_st = 0;
+#endif
+
+
+
+	urb_priv->timeout = 0;
+
+	if((td_ptd_map->state == TD_PTD_REMOVE	) ||
+		  (urb_priv->state == DELETE_URB) ||
+		     !HCD_IS_RUNNING(hcd->state)){
+	remove=1;
+	}
+	qh_state=qh->qh_state;
+	qh->qh_state = QH_STATE_COMPLETING;
+	/*remove the done tds */
+	spin_lock(&hcd_data_lock);
+	phci_hcd_urb_free_priv(hcd, urb_priv, qh);
+	spin_unlock(&hcd_data_lock);
+
+	urb_priv->timeout = 0;
+	kfree(urb_priv);
+	urb->hcpriv = 0;
+
+
+	/*if normal completion */
+	if (urb->status	== -EINPROGRESS) {
+		urb->status = 0;
+	}
+
+	if(remove)
+	if (list_empty(&qh->qtd_list)) {
+		phci_hcd_release_td_ptd_index(qh);
+	}
+	remove=0;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)
+		if(!usb_hcd_check_unlink_urb(&hcd->usb_hcd, urb,0))
+					usb_hcd_unlink_urb_from_ep(&hcd->usb_hcd,urb);
+#endif
+
+//if(qh_state!=QH_STATE_COMPLETING)
+{
+//	spin_unlock(&hcd->lock);
+	/* assume interrupt has been disabled and has acquired hcd->lock */
+	urb_st = (struct isp1763_async_cleanup_urb *)kmalloc(sizeof(struct isp1763_async_cleanup_urb), GFP_ATOMIC);
+	urb_st->urb = urb;
+	list_add_tail(&urb_st->urb_list, &(hcd->cleanup_urb.urb_list));
+
+//	isp1763_reg_write16(hcd->dev, hcd->regs.interruptenable, INTR_ENABLE_MASK | HC_SOF_INT);
+	isp1763_reg_write16(hcd->dev, hcd->regs.interruptenable, HC_MSOF_INT);
+//	spin_lock(&hcd->lock);
+}
+
+	pehci_entry("--	%s: Exit\n", __FUNCTION__);
+}
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
+static void
+pehci_hcd_urb_complete(phci_hcd * hcd, struct ehci_qh *qh, struct urb *urb,
+	td_ptd_map_t * td_ptd_map, struct pt_regs *regs)
+#else
+static void
+pehci_hcd_urb_complete(phci_hcd * hcd, struct ehci_qh *qh, struct urb *urb,
+	td_ptd_map_t * td_ptd_map)
+#endif
+{
+	static u32 remove = 0;
+	static u32 qh_state = 0;
+	urb_priv_t *urb_priv = (urb_priv_t *) urb->hcpriv;
+	
+	if(urb_priv==NULL){
+	printk("***************urb_priv is NULL ************ %s: Entered\n",	__FUNCTION__);
+	goto exit;
+	}
+	pehci_check("complete the td , length: %d\n", td_ptd_map->qtd->length);
+	urb_priv->timeout = 0;
+
+	if((td_ptd_map->state == TD_PTD_REMOVE	) ||
+		  (urb_priv->state == DELETE_URB) ||
+		     !HCD_IS_RUNNING(hcd->state)){
+	remove=1;
+	}
+
+
+	qh_state=qh->qh_state;
+
+	qh->qh_state = QH_STATE_COMPLETING;
+	/*remove the done tds */
+	spin_lock(&hcd_data_lock);
+	phci_hcd_urb_free_priv(hcd, urb_priv, qh);
+	spin_unlock(&hcd_data_lock);
+
+	urb_priv->timeout = 0;
+	kfree(urb_priv);
+	urb->hcpriv = 0;
+
+
+	/*if normal completion */
+	if (urb->status	== -EINPROGRESS) {
+		urb->status = 0;
+	}
+
+	if(remove)
+	if (list_empty(&qh->qtd_list)) {
+		phci_hcd_release_td_ptd_index(qh);
+	}
+	remove=0;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)
+	if(!usb_hcd_check_unlink_urb(&hcd->usb_hcd, urb,0))
+	{
+		usb_hcd_unlink_urb_from_ep(&hcd->usb_hcd,urb);
+	}
+#endif
+	spin_unlock(&hcd->lock);
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
+	usb_hcd_giveback_urb(&hcd->usb_hcd, urb);
+#else
+	usb_hcd_giveback_urb(&hcd->usb_hcd, urb, urb->status);
+#endif
+	spin_lock(&hcd->lock);
+exit:
+	pehci_entry("--	%s: Exit\n", __FUNCTION__);
+	
+}
+
+/*update the error status of the td*/
+static void
+pehci_hcd_update_error_status(u32 ptdstatus, struct urb	*urb)
+{
+	/*if ptd status	is halted */
+	if (ptdstatus &	PTD_STATUS_HALTED) {
+		if (ptdstatus &	PTD_XACT_ERROR)	{
+			/*transaction error results due	to retry count goes to zero */
+			if (PTD_RETRY(ptdstatus)) {
+				/*halt the endpoint */
+				printk("transaction error , retries %d\n",
+					PTD_RETRY(ptdstatus));
+				urb->status = -EPIPE;
+			} else {
+				printk("transaction error , retries %d\n",
+					PTD_RETRY(ptdstatus));
+				/*protocol error */
+				urb->status = -EPROTO;
+			}
+		} else if (ptdstatus & PTD_BABBLE) {
+			printk("babble error, qha %x\n", ptdstatus);
+			/*babble error */
+			urb->status = -EOVERFLOW;
+		} else if (PTD_RETRY(ptdstatus)) {
+			printk("endpoint halted with retrie remaining %d\n",
+				PTD_RETRY(ptdstatus));
+			urb->status = -EPIPE;
+		} else {	/*unknown error, i will	report it as halted, as	i will never see xact error bit	set */
+			printk("protocol error, qha %x\n", ptdstatus);
+			urb->status = -EPIPE;
+		}
+
+		/*if halted need to recover */
+		if (urb->status	== -EPIPE) {
+		}
+	}
+}
+
+#ifdef CONFIG_ISO_SUPPORT	/* New code for	ISO support */
+
+/*******************************************************************
+ * phcd_iso_handler - ISOCHRONOUS Transfer handler
+ *
+ * phci_hcd *hcd,
+ *	Host controller	driver structure which contains	almost all data
+ *	needed by the host controller driver to	process	data and interact
+ *	with the host controller.
+ *
+ * struct pt_regs *regs
+ *
+ * API Description
+ * This	is the ISOCHRONOUS Transfer handler, mainly responsible	for:
+ *  - Checking the periodic list if there are any ITDs for scheduling or
+ *    removal.
+ *  - For ITD scheduling, converting an	ITD into a PTD,	which is the data
+ *    structure	that the host contrtoller can understand and process.
+ *  - For ITD completion, checking the transfer	status and performing the
+ *    required actions depending on status.
+ *  - Freeing up memory	used by	an ITDs	once it	is not needed anymore.
+ ************************************************************************/
+void 
+pehci_hcd_iso_sitd_schedule(phci_hcd *hcd,struct urb* urb,struct ehci_sitd* sitd){
+		td_ptd_map_t *td_ptd_map;
+		td_ptd_map_buff_t *ptd_map_buff;
+		struct _isp1763_isoptd *iso_ptd;
+		u32 ormask = 0, skip_map = 0,last_map=0,buff_stat=0;
+		struct isp1763_mem_addr *mem_addr;
+		ptd_map_buff = &(td_ptd_map_buff[TD_PTD_BUFF_TYPE_ISTL]);
+		
+		/* Get the PTD allocated for this SITD. */
+		td_ptd_map =
+				&ptd_map_buff->map_list[sitd->
+					sitd_index];
+		iso_ptd = &hcd->isotd;
+		
+		memset(iso_ptd, 0,	sizeof(struct _isp1763_isoptd));
+		/* Read buffer status register to check later if the ISO buffer is
+		filled or not */
+		buff_stat =
+			isp1763_reg_read16(hcd->dev, hcd->regs.buffer_status,buff_stat);
+
+		/* Read the contents of the ISO skipmap register */
+		skip_map =
+			isp1763_reg_read16(hcd->dev, hcd->regs.isotdskipmap,
+				skip_map);
+		iso_dbg(ISO_DBG_DATA,
+			"[pehci_hcd_iso_sitd_schedule]: Read skip map: 0x%08x\n",
+			(unsigned int) skip_map);
+
+		/* Read the contents of the ISO lastmap  register */
+		last_map =
+			isp1763_reg_read16(hcd->dev, hcd->regs.isotdlastmap,
+			last_map);
+
+		/* Read the contents of the ISO ormask  register */
+		ormask = isp1763_reg_read16(hcd->dev, hcd->regs.iso_irq_mask_or,
+			ormask);
+		
+		/* Create a PTD from an SITD */
+		phcd_iso_sitd_to_ptd(hcd, sitd, sitd->urb,
+				(void *) iso_ptd);	
+		/* Indicate that this SITD's PTD have been
+		filled up */
+		ptd_map_buff->pending_ptd_bitmap &=
+			~td_ptd_map->ptd_bitmap;		
+
+				/*
+				 * Place the newly initialized ISO PTD structure into
+				 the location allocated for this PTD in the ISO PTD
+				 memory region.
+				 */
+#ifdef SWAP
+				isp1763_mem_write(hcd->dev,
+					td_ptd_map->ptd_header_addr, 0,
+					(u32 *) iso_ptd, PHCI_QHA_LENGTH, 0,
+					PTD_HED);
+#else /* NO_SWAP */
+				isp1763_mem_write(hcd->dev,
+					td_ptd_map->ptd_header_addr, 0,
+					(u32 *) iso_ptd,PHCI_QHA_LENGTH, 0);
+#endif
+
+				/*
+ 				* Set this flag to avoid unlinking before
+ 				schedule at particular frame number
+				 */
+				td_ptd_map->state = TD_PTD_IN_SCHEDULE;
+
+				/*
+				 * If the length is not zero and the direction is
+				 OUT then  copy the  data to be transferred
+				 into the PAYLOAD memory area.
+				 */
+				if (sitd->length) {
+					switch (PTD_PID(iso_ptd->td_info2)) {
+					case OUT_PID:
+						/* Get the Payload memory
+						allocated for this PTD */
+						mem_addr = &sitd->mem_addr;
+#ifdef SWAP
+						isp1763_mem_write(hcd->dev,
+							(unsigned long)
+							mem_addr-> phy_addr,
+							0, (u32*)
+							((sitd->hw_bufp[0])),
+							sitd->length, 0,
+							PTD_PAY);
+#else /* NO_SWAP */
+						isp1763_mem_write(hcd->dev,
+							(unsigned long)
+							mem_addr->phy_addr,
+							0, (u32 *)
+							sitd->hw_bufp[0],
+							sitd->length, 0);
+#endif
+						break;
+					}
+					/* switch(PTD_PID(iso_ptd->td_info2))*/
+				}
+
+				/* if(sitd->length) */
+				/* If this is the last td, indicate to complete
+				the URB */
+				if (sitd->hw_next == EHCI_LIST_END) {
+					td_ptd_map->lasttd = 1;
+				}
+
+				/*
+				 * Clear the bit corresponding to this PTD in
+				 the skip map so that it will be processed on
+				 the next schedule traversal.
+				 */
+				skip_map &= ~td_ptd_map->ptd_bitmap;
+				iso_dbg(ISO_DBG_DATA,
+					"[pehci_hcd_iso_sitd_schedule]: Skip map:0x%08x\n",(unsigned int) skip_map);
+
+				/*
+				 * Update the last map register to indicate
+				 that the newly created PTD is the last PTD
+				 added only if it is larger than the previous
+				 bitmap.
+				 */
+				if (last_map < td_ptd_map->ptd_bitmap) {
+					isp1763_reg_write16(hcd->dev,
+						hcd->regs.isotdlastmap,
+						td_ptd_map->ptd_bitmap);
+					iso_dbg(ISO_DBG_DATA,
+						"[pehci_hcd_iso_sitd_schedule]:Last Map: 0x%08x\n",
+						td_ptd_map->ptd_bitmap);
+				}
+
+				/*
+				 * Set the ISO_BUF_FILL bit to 1 to indicate
+				 that there is a PTD for ISO that needs to
+				 * be processed.
+				 */
+				isp1763_reg_write16(hcd->dev,
+					hcd->regs.buffer_status,
+					(buff_stat | ISO_BUFFER));
+				
+				isp1763_reg_write16(hcd->dev, hcd->regs.isotdskipmap,skip_map);
+		
+}
+
+/*******************************************************************
+ * phcd_iso_handler - ISOCHRONOUS Transfer handler
+ *
+ * phci_hcd *hcd,
+ *	Host controller	driver structure which contains	almost all data
+ *	needed by the host controller driver to	process	data and interact
+ *	with the host controller.
+ *
+ * struct pt_regs *regs
+ *
+ * API Description
+ * This	is the ISOCHRONOUS Transfer handler, mainly responsible	for:
+ *  - Checking the periodic list if there are any ITDs for scheduling or
+ *    removal.
+ *  - For ITD scheduling, converting an	ITD into a PTD,	which is the data
+ *    structure	that the host contrtoller can understand and process.
+ *  - For ITD completion, checking the transfer	status and performing the
+ *    required actions depending on status.
+ *  - Freeing up memory	used by	an ITDs	once it	is not needed anymore.
+ ************************************************************************/
+void
+pehci_hcd_iso_schedule(phci_hcd * hcd, struct urb *urb)
+{
+	struct list_head *sitd_itd_sched, *position;
+	struct ehci_itd *itd;
+	struct ehci_sitd *sitd;
+	td_ptd_map_t *td_ptd_map;
+	unsigned long last_map;
+	td_ptd_map_buff_t *ptd_map_buff;
+	struct _isp1763_isoptd *iso_ptd;
+	unsigned long buff_stat;
+	struct isp1763_mem_addr *mem_addr;
+	u32 ormask = 0, skip_map = 0;
+	u32 iNumofPkts;
+	unsigned int iNumofSlots = 0, mult = 0;
+	struct ehci_qh *qhead;
+
+	buff_stat = 0;
+	iso_dbg(ISO_DBG_ENTRY, "[pehci_hcd_iso_schedule]: Enter\n");
+	iso_ptd = &hcd->isotd;
+
+	last_map = 0;
+	/* Check if there are any ITDs scheduled  for processing */
+	if (hcd->periodic_sched == 0) {
+		return;
+	}
+	if (urb->dev->speed == USB_SPEED_HIGH) {
+		mult = usb_maxpacket(urb->dev, urb->pipe,
+				usb_pipeout(urb->pipe));
+		mult = 1 + ((mult >> 11) & 0x3);
+		iNumofSlots = NUMMICROFRAME / urb->interval;
+		/*number of PTDs need to schedule for this PTD */
+		iNumofPkts = (urb->number_of_packets / mult) / iNumofSlots;
+		if ((urb->number_of_packets / mult) % iNumofSlots != 0){
+			/*get remainder */
+			iNumofPkts += 1;
+		}
+	} else{
+		iNumofPkts = urb->number_of_packets;
+	}
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
+	qhead = urb->hcpriv;
+#else
+	qhead = urb->ep->hcpriv;
+#endif
+	if (!qhead) {
+		iso_dbg(ISO_DBG_ENTRY,
+			"[pehci_hcd_iso_schedule]: Qhead==NULL\n");
+		return ;
+	}
+	ptd_map_buff = &(td_ptd_map_buff[TD_PTD_BUFF_TYPE_ISTL]);
+
+	while (iNumofPkts > 0) {
+	/* Read buffer status register to check later if the ISO buffer is
+	filled or not */
+	buff_stat =
+		isp1763_reg_read16(hcd->dev, hcd->regs.buffer_status,buff_stat);
+
+		/* Read the contents of the ISO skipmap register */
+		skip_map =
+			isp1763_reg_read16(hcd->dev, hcd->regs.isotdskipmap,
+				skip_map);
+		iso_dbg(ISO_DBG_DATA,
+			"[pehci_hcd_iso_schedule]: Read skip map: 0x%08x\n",
+			(unsigned int) skip_map);
+
+		/* Read the contents of the ISO lastmap  register */
+		last_map =
+			isp1763_reg_read16(hcd->dev, hcd->regs.isotdlastmap,
+			last_map);
+
+		/* Read the contents of the ISO ormask  register */
+		ormask = isp1763_reg_read16(hcd->dev, hcd->regs.iso_irq_mask_or,
+			ormask);
+
+		/* Process ITDs linked to this frame, checking if there are any that needs to
+		be scheduled */
+		sitd_itd_sched = &qhead->periodic_list.sitd_itd_head;
+		if (list_empty(sitd_itd_sched)) {
+			iso_dbg(ISO_DBG_INFO,
+				"[pehci_hcd_iso_schedule]: ISO schedule list's empty. Nothing to schedule.\n");
+			return;
+		}
+
+		list_for_each(position, sitd_itd_sched) {
+			if (qhead->periodic_list.high_speed == 0){
+				/* Get an SITD in the list for processing */
+				sitd = list_entry(position, struct ehci_sitd,
+					sitd_list);
+				iNumofPkts--;
+				iso_dbg(ISO_DBG_DATA,
+					"[pehci_hcd_iso_schedule]: SITD Index:%d\n", sitd->sitd_index);
+				if(sitd->sitd_index==TD_PTD_INV_PTD_INDEX)
+					continue;
+				/* Get the PTD allocated for this SITD. */
+				td_ptd_map =
+					&ptd_map_buff->map_list[sitd->
+					sitd_index];
+				memset(iso_ptd, 0,
+					sizeof(struct _isp1763_isoptd));
+
+				/* Create a PTD from an SITD */
+				phcd_iso_sitd_to_ptd(hcd, sitd, sitd->urb,
+					(void *) iso_ptd);
+
+				/* Indicate that this SITD's PTD have been
+				filled up */
+				ptd_map_buff->pending_ptd_bitmap &=
+					~td_ptd_map->ptd_bitmap;
+
+				/*
+				 * Place the newly initialized ISO PTD structure into
+				 the location allocated for this PTD in the ISO PTD
+				 memory region.
+				 */
+#ifdef SWAP
+				isp1763_mem_write(hcd->dev,
+					td_ptd_map->ptd_header_addr, 0,
+					(u32 *) iso_ptd, PHCI_QHA_LENGTH, 0,
+					PTD_HED);
+#else /* NO_SWAP */
+				isp1763_mem_write(hcd->dev,
+					td_ptd_map->ptd_header_addr, 0,
+					(u32 *) iso_ptd,PHCI_QHA_LENGTH, 0);
+#endif
+
+				/*
+ 				* Set this flag to avoid unlinking before
+ 				schedule at particular frame number
+				 */
+				td_ptd_map->state = TD_PTD_IN_SCHEDULE;
+
+				/*
+				 * If the length is not zero and the direction is
+				 OUT then  copy the  data to be transferred
+				 into the PAYLOAD memory area.
+				 */
+				if (sitd->length) {
+					switch (PTD_PID(iso_ptd->td_info2)) {
+					case OUT_PID:
+						/* Get the Payload memory
+						allocated for this PTD */
+						mem_addr = &sitd->mem_addr;
+#ifdef SWAP
+						isp1763_mem_write(hcd->dev,
+							(unsigned long)
+							mem_addr-> phy_addr,
+							0, (u32*)
+							((sitd->hw_bufp[0])),
+							sitd->length, 0,
+							PTD_PAY);
+#else /* NO_SWAP */
+						isp1763_mem_write(hcd->dev,
+							(unsigned long)
+							mem_addr->phy_addr,
+							0, (u32 *)
+							sitd->hw_bufp[0],
+							sitd->length, 0);
+#endif
+						break;
+					}
+					/* switch(PTD_PID(iso_ptd->td_info2))*/
+				}
+
+				/* if(sitd->length) */
+				/* If this is the last td, indicate to complete
+				the URB */
+				if (sitd->hw_next == EHCI_LIST_END) {
+					td_ptd_map->lasttd = 1;
+				}
+
+				/*
+				 * Clear the bit corresponding to this PTD in
+				 the skip map so that it will be processed on
+				 the next schedule traversal.
+				 */
+				skip_map &= ~td_ptd_map->ptd_bitmap;
+				iso_dbg(ISO_DBG_DATA,
+					"[pehci_hcd_iso_schedule]: Skip map:0x%08x\n",(unsigned int) skip_map);
+
+				/*
+				 * Update the last map register to indicate
+				 that the newly created PTD is the last PTD
+				 added only if it is larger than the previous
+				 bitmap.
+				 */
+				if (last_map < td_ptd_map->ptd_bitmap) {
+					isp1763_reg_write16(hcd->dev,
+						hcd->regs.isotdlastmap,
+						td_ptd_map->ptd_bitmap);
+					iso_dbg(ISO_DBG_DATA,
+						"[pehci_hcd_iso_schedule]:Last Map: 0x%08x\n",
+						td_ptd_map->ptd_bitmap);
+				}
+
+				/*
+				 * Set the ISO_BUF_FILL bit to 1 to indicate
+				 that there is a PTD for ISO that needs to
+				 * be processed.
+				 */
+				isp1763_reg_write16(hcd->dev,
+					hcd->regs.buffer_status,
+					(buff_stat | ISO_BUFFER));
+
+			} else {	/*HIGH SPEED */
+
+				/* Get an ITD in the list for processing */
+				itd = list_entry(position, struct ehci_itd,
+					itd_list);
+				iNumofPkts--;
+
+				iso_dbg(ISO_DBG_DATA,
+					"[pehci_hcd_iso_schedule]: ITD Index: %d\n",	itd->itd_index);
+				/* Get the PTD allocated for this ITD. */
+				td_ptd_map =
+					&ptd_map_buff->map_list[itd->itd_index];
+				memset(iso_ptd, 0,
+					sizeof(struct _isp1763_isoptd));
+
+				/* Create a PTD from an ITD */
+				phcd_iso_itd_to_ptd(hcd, itd, itd->urb,
+					(void *) iso_ptd);
+
+				/* Indicate that this SITD's PTD have been
+				filled up */
+				ptd_map_buff->pending_ptd_bitmap &=
+					~td_ptd_map->ptd_bitmap;
+
+				/*
+				 * Place the newly initialized ISO PTD
+				 structure into the location allocated
+				 * for this PTD in the ISO PTD memory region.
+				 */
+#ifdef SWAP
+				isp1763_mem_write(hcd->dev,
+					td_ptd_map->ptd_header_addr, 0,
+					(u32 *) iso_ptd,PHCI_QHA_LENGTH, 0,
+					PTD_HED);
+#else /* NO_SWAP */
+				isp1763_mem_write(hcd->dev,
+					td_ptd_map->ptd_header_addr, 0,
+					(u32 *) iso_ptd,PHCI_QHA_LENGTH, 0);
+#endif
+				/*
+				 * Set this flag to avoid unlinking before schedule
+				 * at particular frame number
+				 */
+				td_ptd_map->state = TD_PTD_IN_SCHEDULE;
+
+				/*
+				 * If the length is not zero and the direction
+				 is OUT then copy the data to be transferred
+				 into the PAYLOAD memory area.
+				 */
+				if (itd->length) {
+					switch (PTD_PID(iso_ptd->td_info2)) {
+					case OUT_PID:
+						/* Get the Payload memory
+						allocated for this PTD */
+						mem_addr = &itd->mem_addr;
+#ifdef SWAP
+						isp1763_mem_write(hcd->dev,
+							(unsigned long)
+							mem_addr->phy_addr, 0,
+							(u32*)
+							((itd->hw_bufp[0])),
+							itd->length, 0,
+							PTD_PAY);
+#else /* NO_SWAP */
+						isp1763_mem_write(hcd->dev,
+							(unsigned long)
+							mem_addr->phy_addr, 0,
+							(u32 *)itd->hw_bufp[0],
+							itd->length, 0);
+#endif
+						break;
+					}
+					/* switch(PTD_PID(iso_ptd->td_info2)) */
+				}
+
+				
+				/* If this is the last td, indicate to
+				complete the URB */
+				if (itd->hw_next == EHCI_LIST_END) {
+					td_ptd_map->lasttd = 1;
+				}
+
+				/*
+				 * Clear the bit corresponding to this PT D
+				 in the skip map so that it will be processed
+				 on the next schedule traversal.
+				 */
+				skip_map &= ~td_ptd_map->ptd_bitmap;
+				iso_dbg(ISO_DBG_DATA,
+					"[pehci_hcd_iso_schedule]: Skip map:0x%08x\n",(unsigned int) skip_map);
+				isp1763_reg_write16(hcd->dev,
+					hcd->regs.isotdskipmap,
+					skip_map);
+
+				/*
+				 * Update the last map register to indicate
+				 that the newly created PTD is the last PTD
+				 added only if it is larger than the previous
+				 bitmap.
+				 */
+				if (last_map < td_ptd_map->ptd_bitmap) {
+					isp1763_reg_write16(hcd->dev,
+						hcd->regs.isotdlastmap,
+						td_ptd_map->ptd_bitmap);
+					iso_dbg(ISO_DBG_DATA,
+						"[pehci_hcd_iso_schedule]:Last Map: 0x%08x\n",
+						td_ptd_map->ptd_bitmap);
+				}
+
+				/*
+				 * Set the ISO_BUF_FILL bit to 1 to indicate
+				 that there is a PTD for ISO that needs to
+				 * be processed.
+				 */
+				isp1763_reg_write16(hcd->dev,
+					hcd->regs.buffer_status,
+					(buff_stat | ISO_BUFFER));
+			}
+		}		/* list_for_each(position, itd_sched) */
+		isp1763_reg_write16(hcd->dev, hcd->regs.isotdskipmap,skip_map);
+	}/*end of while (igNumOfPkts) */
+
+	iso_dbg(ISO_DBG_INFO,
+		"[pehci_hcd_iso_schedule]: ISO-Frame scheduling done\n");
+	iso_dbg(ISO_DBG_ENTRY, "[pehci_hcd_iso_schedule]: Exit\n");
+}
+
+/*******************************************************************
+ * phcd_iso_handler - ISOCHRONOUS Transfer handler
+ *
+ * phci_hcd *hcd,
+ *      Host controller driver structure which contains almost all data
+ *      needed by the host controller driver to process data and interact
+ *      with the host controller.
+ *
+ * struct pt_regs *regs
+ *
+ * API Description
+ * This is the ISOCHRONOUS Transfer handler, mainly responsible for:
+ *  - Checking the periodic list if there are any ITDs for scheduling or
+ *    removal.
+ *  - For ITD scheduling, converting an ITD into a PTD, which is the data
+ *    structure that the host contrtoller can understand and process.
+ *  - For ITD completion, checking the transfer status and performing the
+ *    required actions depending on status.
+ *  - Freeing up memory used by an ITDs once it is not needed anymore.
+ ************************************************************************/
+
+int debugiso = 0;
+
+void
+pehci_hcd_iso_worker(phci_hcd * hcd)
+{
+	u32 donemap = 0, skipmap = 0; /*ormask = 0,  buff_stat = 0;*/
+	u32 pendingmap = 0;
+	u32 mask = 0x1, index = 0, donetoclear = 0;
+	u32 uFrIndex = 0;
+	unsigned char last_td = FALSE, iReject = 0;
+	struct isp1763_mem_addr *mem_addr;
+	struct _isp1763_isoptd *iso_ptd;
+	unsigned long length = 0, uframe_cnt, usof_stat;
+	struct ehci_qh *qhead;
+	struct ehci_itd *itd, *current_itd;
+	struct ehci_sitd *sitd=0, *current_sitd=0;
+	td_ptd_map_t *td_ptd_map;
+	td_ptd_map_buff_t *ptd_map_buff;
+	struct list_head *sitd_itd_remove, *position;// *lst_temp;	
+	struct urb *urb;
+	u8 i = 0;
+	unsigned long startAdd = 0;
+	int ret = 0;
+
+
+	iso_ptd = &hcd->isotd;
+
+	/* Check if there are any ITDs scheduled  for processing */
+	if (hcd->periodic_sched == 0) {
+		goto exit;
+	}
+	ptd_map_buff = &(td_ptd_map_buff[TD_PTD_BUFF_TYPE_ISTL]);
+	pendingmap = ptd_map_buff->pending_ptd_bitmap;
+
+
+	/*read the done map for interrupt transfers */
+	donemap = isp1763_reg_read16(hcd->dev, hcd->regs.isotddonemap, donemap);
+
+	iso_dbg(ISO_DBG_ENTRY, "[pehci_hcd_iso_worker]: Enter %x \n", donemap);
+	if (!donemap) {		/*there isnt any completed PTD */
+		goto exit;
+	}
+	donetoclear = donemap;
+	uFrIndex = 0;
+	while (donetoclear) {
+		mask = 0x1 << uFrIndex;
+		index = uFrIndex;
+		uFrIndex++;
+		if (!(donetoclear & mask))
+			continue;
+		donetoclear &= ~mask;
+		iso_dbg(ISO_DBG_DATA, "[pehci_hcd_iso_worker]: uFrIndex = %d\n", index);
+		iso_dbg(ISO_DBG_DATA,
+			"[pehci_hcd_iso_worker]:donetoclear = 0x%x mask = 0x%x\n",
+			donetoclear, mask);
+
+
+		if (ptd_map_buff->map_list[index].sitd) {
+			urb = ptd_map_buff->map_list[index].sitd->urb;
+			if (!urb) {
+				printk("ERROR : URB is NULL \n");
+				continue;
+			}
+			sitd = ptd_map_buff->map_list[index].sitd;
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
+			qhead=urb->hcpriv;
+#else
+			qhead = urb->ep->hcpriv;
+#endif
+			if (!qhead) {
+				printk("ERROR : Qhead is NULL \n");
+				continue;
+			}
+
+			sitd_itd_remove = &qhead->periodic_list.sitd_itd_head;
+		} else if (ptd_map_buff->map_list[index].itd) {
+			urb = ptd_map_buff->map_list[index].itd->urb;
+			if (!urb) {
+				printk("ERROR : URB is NULL \n");
+				continue;
+			}
+			itd = ptd_map_buff->map_list[index].itd;
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
+			qhead=urb->hcpriv;
+#else
+			qhead = urb->ep->hcpriv;
+#endif
+			if (!qhead) {
+				printk("ERROR : Qhead is NULL \n");
+				continue;
+			}
+
+			sitd_itd_remove = &qhead->periodic_list.sitd_itd_head;
+
+		} else {
+			printk("ERROR : NO sitd in that PTD location : \n");
+			continue;
+		}
+		/* Process ITDs linked to this frame, checking for completed ITDs */
+		iso_dbg(ISO_DBG_DATA,
+			"[pehci_hcd_iso_worker]: Removal Frame number: %d\n",
+			(int) index);
+		if (list_empty(sitd_itd_remove)) {
+			continue;
+		}
+
+		if (urb) {
+			last_td = FALSE;
+			if (qhead->periodic_list.high_speed == 0)/*FULL SPEED*/
+			{
+
+				/* Get the PTD that was allocated for this
+				particular SITD*/
+				td_ptd_map =
+					&ptd_map_buff->map_list[sitd->
+								sitd_index];
+
+				iso_dbg(ISO_DBG_INFO,
+					"[pehci_hcd_iso_worker]: PTD is done,%d\n",index);
+				iso_dbg(ISO_DBG_DATA,
+					"[pehci_hcd_iso_worker]: SITD Index: %d\n",sitd->sitd_index);
+				urb = sitd->urb;
+
+				/*
+				 * Get the base address of the memory allocated
+				 in the PAYLOAD region for this SITD
+				 */
+				mem_addr = &sitd->mem_addr;
+				memset(iso_ptd, 0,
+					sizeof(struct _isp1763_isoptd));
+
+				/*
+				 * Read this ptd from the ram address,
+				 address is in the td_ptd_map->ptd_header_addr
+				 */
+
+				isp1763_mem_read(hcd->dev,
+					td_ptd_map->ptd_header_addr,
+					0, (u32 *) iso_ptd,
+					PHCI_QHA_LENGTH, 0);
+				iso_dbg(ISO_DBG_DATA,
+					"[pehci_hcd_iso_worker]: DWORD0 = 0x%08x\n", iso_ptd->td_info1);
+				iso_dbg(ISO_DBG_DATA,
+					"[pehci_hcd_iso_worker]: DWORD1 = 0x%08x\n", iso_ptd->td_info2);
+				iso_dbg(ISO_DBG_DATA,
+					"[pehci_hcd_iso_worker]: DWORD2 = 0x%08x\n", iso_ptd->td_info3);
+				iso_dbg(ISO_DBG_DATA,
+					"[pehci_hcd_iso_worker]: DWORD3 = 0x%08x\n", iso_ptd->td_info4);
+				iso_dbg(ISO_DBG_DATA,
+					"[pehci_hcd_iso_worker]: DWORD4 = 0x%08x\n", iso_ptd->td_info5);
+				iso_dbg(ISO_DBG_DATA,
+					"[pehci_hcd_iso_worker]: DWORD5 = 0x%08x\n", iso_ptd->td_info6);
+				iso_dbg(ISO_DBG_DATA,
+					"[pehci_hcd_iso_worker]: DWORD6 = 0x%08x\n", iso_ptd->td_info7);
+				iso_dbg(ISO_DBG_DATA,
+					"[pehci_hcd_iso_worker]: DWORD7 = 0x%08x\n", iso_ptd->td_info8);
+
+				/* Go over the status of each of the 8 Micro Frames */
+				for (uframe_cnt = 0; uframe_cnt < 8;
+					uframe_cnt++) {
+					/*
+					 * We go over the status one at a time. The status bits and their
+					 * equivalent status are:
+					 * Bit 0 - Transaction Error (IN and OUT)
+					 * Bit 1 - Babble (IN token only)
+					 * Bit 2 - Underrun (OUT token only)
+					 */
+					usof_stat =
+						iso_ptd->td_info5 >> (8 +
+						(uframe_cnt * 3));
+
+					switch (usof_stat & 0x7) {
+					case INT_UNDERRUN:
+						iso_dbg(ISO_DBG_ERR,
+							"[pehci_hcd_iso_worker Error]: Buffer underrun\n");
+							urb->error_count++;
+						break;
+					case INT_EXACT:
+						iso_dbg(ISO_DBG_ERR,
+							"[pehci_hcd_iso_worker Error]: Transaction error\n");
+							printk("[pehci_hcd_iso_worker Error]: Transaction error\n");
+							urb->error_count++;
+						break;
+					case INT_BABBLE:
+						iso_dbg(ISO_DBG_ERR,
+							"[pehci_hcd_iso_worker Error]: Babble error\n");
+							printk("[pehci_hcd_iso_worker Error]: Babble error\n");
+						urb->iso_frame_desc[sitd->sitd_index].status
+							= -EOVERFLOW;
+						urb->error_count++;
+						break;
+					}	/* switch(usof_stat & 0x7) */
+				}	/* end of for( ulMicroFrmCnt = 0; ulMicroFrmCnt < 8; ulMicroFrmCnt++) */
+
+				/*
+				 * Get the number of bytes transferred. This indicates the number of
+				 * bytes sent or received for this transaction.
+				 */
+				if (urb->dev->speed != USB_SPEED_HIGH) {
+					/* Length is 1K for full/low speed device */
+					length = PTD_XFERRED_NONHSLENGTH
+						(iso_ptd->td_info4);
+				} else {
+					/* Length is 32K for high speed device */
+					length = PTD_XFERRED_LENGTH(iso_ptd->
+						td_info4);
+				}
+
+				/* Halted, need to finish all the transfer on this endpoint */
+				if (iso_ptd->td_info4 & PTD_STATUS_HALTED) {
+					iso_dbg(ISO_DBG_ERR,
+						"[pehci_hcd_iso_worker Error] PTD Halted\n");
+						printk("[pehci_hcd_iso_worker Error] PTD Halted\n");
+					/*
+					 * When there is an error, do not process the other PTDs.
+					 * Stop at the PTD with the error and remove all other PTDs.
+					 */
+					td_ptd_map->lasttd = 1;
+
+					/*
+					 * In case of halt, next transfer will start with toggle zero,
+					 * USB specs, 5.8.5
+					 */
+					td_ptd_map->datatoggle = 0;
+				}
+
+				/* if(iso_ptd->td_info4 & PTD_STATUS_HALTED) */
+				/* Update the actual length of the transfer from the data we got earlier */
+				urb->iso_frame_desc[sitd->index].actual_length =
+					length;
+
+				/* If the PTD have been executed properly the V bit should be cleared */
+				if (iso_ptd->td_info1 & QHA_VALID) {
+					iso_dbg(ISO_DBG_ERR,
+						"[pehci_hcd_iso_worker Error]: Valid bit not cleared\n");
+						printk("[pehci_hcd_iso_worker Error]: Valid bit not cleared\n");
+					urb->iso_frame_desc[sitd->index].
+						status = -ENOSPC;
+				} else {
+					urb->iso_frame_desc[sitd->index].
+						status = 0;
+				}
+
+				/* Check if this is the last SITD either due to some error or normal completion */
+				if ((td_ptd_map->lasttd)
+					|| (sitd->hw_next == EHCI_LIST_END)) {
+					last_td = TRUE;
+				}
+
+				/* Copy data to/from */
+				if (length && (length <= MAX_PTD_BUFFER_SIZE)) {
+					switch (PTD_PID(iso_ptd->td_info2)) {
+					case IN_PID:
+						/*
+						 * Get the data from the PAYLOAD area and place it into
+						 * the buffer provided by the requestor.
+						 */
+
+						isp1763_mem_read(hcd->dev,
+							(unsigned long)mem_addr->
+							phy_addr, 0,(u32 *)
+							sitd->hw_bufp[0],
+							length, 0);
+
+					case OUT_PID:
+						/*
+						 * urb->actual length was initialized to zero, so for the first
+						 * uFrame having it incremented immediately is not a problem.
+						 */
+						urb->actual_length += length;
+						break;
+					}/* switch(PTD_PID(iso_ptd->td_info2)) */
+				}
+				/* if(length && (length <= MAX_PTD_BUFFER_SIZE)) */
+//				removesitd:
+				/*read skip-map */
+				skipmap =
+					isp1763_reg_read16(hcd->dev,
+						hcd->regs.isotdskipmap,
+						skipmap);
+				iso_dbg(ISO_DBG_DATA,
+					"[%s] : read skipmap =0x%x\n",
+					__FUNCTION__, skipmap);
+				if (last_td == TRUE) {
+					/* Start removing the ITDs in the list */
+					while (1) {
+						/*
+						 * This indicates that we are processing the tail PTD.
+						 * Perform cleanup procedure on this last PTD
+						 */
+						if (sitd->hw_next == EHCI_LIST_END) {
+							td_ptd_map =
+								&ptd_map_buff->
+								map_list[sitd->
+								sitd_index];
+
+							/*
+							 * Free up our allocation in the PAYLOAD area so that others can use
+							 * it.
+							 */
+#ifndef COMMON_MEMORY
+							phci_hcd_mem_free
+								(&sitd->
+								mem_addr);
+#endif
+							/* Remove this SITD entry in the SITD list */
+							list_del(&sitd->
+								sitd_list);
+
+							/* Free up the memory allocated for the SITD structure */
+							qha_free(qha_cache,
+								sitd);
+
+							/* Indicate that the PTD we have used is now free */
+							td_ptd_map->state =
+								TD_PTD_NEW;
+							td_ptd_map->sitd = NULL;
+							td_ptd_map->itd = NULL;
+
+							/* Decrease the number of active PTDs scheduled */
+							hcd->periodic_sched--;
+
+							/* Skip this PTD during the next PTD processing. */
+							skipmap |=
+								td_ptd_map->ptd_bitmap;
+							isp1763_reg_write16
+								(hcd->dev,
+								hcd->regs.
+								isotdskipmap,
+								skipmap);
+
+							/* All ITDs in this list have been successfully removed. */
+							break;
+						} else {
+						/*
+						* This indicates that we stopped due to an error on a PTD that is
+						* not the last in the list. We need to free up this PTD as well as
+						* the PTDs after it.
+						*/
+						/*
+						 * Put the current SITD error onto this variable.
+						 * We will be unlinking this from the list and free up its
+						 * resources later.
+						 */
+							current_sitd = sitd;
+
+							td_ptd_map =
+								&ptd_map_buff->
+								map_list[sitd->
+								sitd_index];
+
+							/*
+							 * Get the next SITD, and place it to the sitd variable.
+							 * In a way we are moving forward in the SITD list.
+							 */
+							sitd = (struct ehci_sitd
+								*)
+								(current_sitd->
+								hw_next);
+							/* Free up the current SITD's resources */
+#ifndef COMMON_MEMORY
+							phci_hcd_mem_free
+								(&current_sitd->
+								 mem_addr);
+#endif
+							/* Remove this SITD entry in the SITD list */
+							list_del(&current_sitd->
+								sitd_list);
+
+							/* Free up the memory allocated for the SITD structure */
+							qha_free(qha_cache,
+								current_sitd);
+
+							/* Inidicate that the PTD we have used is now free */
+							td_ptd_map->state =
+								TD_PTD_NEW;
+							td_ptd_map->sitd = NULL;
+							td_ptd_map->itd = NULL;
+
+							/* Decrease the number of active PTDs scheduled */
+							hcd->periodic_sched--;
+
+							/* Sine it is done, skip this PTD during the next PTD processing. */
+							skipmap |=
+								td_ptd_map->
+								ptd_bitmap;
+							isp1763_reg_write16
+								(hcd->dev,
+								hcd->regs.
+								isotdskipmap,
+								skipmap);
+							/*
+							 * Start all over again until it gets to the tail of the
+							 * list of PTDs/ITDs
+							 */
+							continue;
+						}	/* else of if(sitd->hw_next == EHCI_LIST_END) */
+
+						/* It should never get here, but I put this as a precaution */
+						break;
+					}	/*end of while(1) */
+
+					/* Check if there were ITDs that were not processed due to the error */
+					if (urb->status == -EINPROGRESS) {
+						if ((urb->actual_length !=
+							urb->transfer_buffer_length)
+							&& (urb->transfer_flags &
+							URB_SHORT_NOT_OK)) {
+							iso_dbg(ISO_DBG_ERR,
+								"[pehci_hcd_iso_worker Error]: Short Packet\n");
+							urb->status =
+								-EREMOTEIO;
+						} else {
+							urb->status = 0;
+						}
+					}
+
+					urb->hcpriv = 0;
+					iso_dbg(ISO_DBG_DATA,
+						"[%s] : remain skipmap =0x%x\n",
+						__FUNCTION__, skipmap);
+#ifdef COMMON_MEMORY
+					phci_hcd_mem_free(&qhead->memory_addr);
+#endif
+					/* We need to unlock this here, since this was locked when we are called
+					 * from the interrupt handler */
+					spin_unlock(&hcd->lock);
+					/* Perform URB cleanup */
+					iso_dbg(ISO_DBG_INFO,
+						"[pehci_hcd_iso_worker] Complete a URB\n");
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)
+		if(!usb_hcd_check_unlink_urb(&hcd->usb_hcd, urb,0))
+					usb_hcd_unlink_urb_from_ep(&hcd->usb_hcd,
+						urb);
+#endif
+					hcd->periodic_more_urb = 0;
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
+						qhead=urb->hcpriv;
+					if (!list_empty(&qhead->ep->urb_list))
+#else
+					if (!list_empty(&urb->ep->urb_list))
+#endif
+						hcd->periodic_more_urb = 1;
+					
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
+				usb_hcd_giveback_urb(&hcd->usb_hcd, urb);
+#else
+				usb_hcd_giveback_urb(&hcd->usb_hcd, urb, urb->status);
+#endif
+
+					spin_lock(&hcd->lock);
+					continue;
+				}
+
+				/* if( last_td == TRUE ) */
+				/*
+				 * If the last_td is not set then we do not need to check for errors and directly
+				 * proceed with the cleaning sequence.
+				 */
+				iso_dbg(ISO_DBG_INFO,
+					"[pehci_hcd_iso_worker]: last_td is not set\n");
+				/*update skipmap */
+				skipmap |= td_ptd_map->ptd_bitmap;
+				isp1763_reg_write16(hcd->dev,
+					hcd->regs.isotdskipmap,
+					skipmap);
+				iso_dbg(ISO_DBG_DATA,
+					"%s : remain skipmap =0x%x\n",
+					__FUNCTION__, skipmap);
+
+				/* Decrement the count of active PTDs */
+				hcd->periodic_sched--;
+				/*schedule next PTD for this URB */
+				if(qhead->actualptds<qhead->totalptds)
+				{
+					sitd_itd_remove = &qhead->periodic_list.sitd_itd_head;
+					/* find sitd to schedule */
+					list_for_each(position, sitd_itd_remove) {
+						
+						if (qhead->periodic_list.high_speed == 0){
+						/* Get an SITD in the list for processing */
+							current_sitd= list_entry(position, struct ehci_sitd,
+									sitd_list);		
+							if(current_sitd->sitd_index==TD_PTD_INV_PTD_INDEX)
+								break;
+						}	
+					}
+				      if(current_sitd->sitd_index==TD_PTD_INV_PTD_INDEX){
+					  	qhead->actualptds++;
+					/*allocate memory and PTD index */
+						memcpy(&current_sitd->mem_addr,&sitd->mem_addr,sizeof(struct isp1763_mem_addr));
+//				printk("current %x\n",sitd->sitd_index);
+						current_sitd->sitd_index=sitd->sitd_index;
+					/*schedule PTD */
+						td_ptd_map->sitd = current_sitd;
+						hcd->periodic_sched++;
+						pehci_hcd_iso_sitd_schedule(hcd, urb,current_sitd);
+				      }
+
+				/* Remove this SITD from the list of active ITDs */
+				list_del(&sitd->sitd_list);
+
+				/* Free up the memory we allocated for the SITD structure */
+				qha_free(qha_cache, sitd);
+
+					
+				}else{
+#ifndef COMMON_MEMORY
+				phci_hcd_mem_free(&sitd->mem_addr);
+#endif
+				/* Remove this SITD from the list of active ITDs */
+				list_del(&sitd->sitd_list);
+
+				/* Free up the memory we allocated for the SITD structure */
+				qha_free(qha_cache, sitd);
+
+				/*
+				 * Clear the bit associated with this PTD from the grouptdmap and
+				 * make this PTD available for other transfers
+				 */
+				td_ptd_map->state = TD_PTD_NEW;
+				td_ptd_map->sitd = NULL;
+				td_ptd_map->itd = NULL;
+
+				}		
+
+				
+				
+			}	else {	/*HIGH SPEED */
+
+				/* Get an ITD in the list for processing */
+				itd = ptd_map_buff->map_list[index].itd;
+
+				/* Get the PTD that was allocated for this particular ITD. */
+				td_ptd_map =
+					&ptd_map_buff->map_list[itd->itd_index];
+
+				iso_dbg(ISO_DBG_INFO,
+					"[pehci_hcd_iso_worker]: PTD is done , %d\n",
+					index);
+				iso_dbg(ISO_DBG_DATA,
+					"[pehci_hcd_iso_worker]: ITD Index: %d\n",
+					itd->itd_index);
+
+				urb = itd->urb;
+
+				/*
+				 * Get the base address of the memory allocated in the
+				 * PAYLOAD region for this ITD
+				 */
+				mem_addr = &itd->mem_addr;
+				memset(iso_ptd, 0,
+					sizeof(struct _isp1763_isoptd));
+
+				/*
+				 * Read this ptd from the ram address,address is in the
+				 * td_ptd_map->ptd_header_addr
+				 */
+
+				isp1763_mem_read(hcd->dev,
+					td_ptd_map->ptd_header_addr,
+					0, (u32 *) iso_ptd,
+					PHCI_QHA_LENGTH, 0);
+
+				/* 
+				iso_dbg(ISO_DBG_DATA,
+					"[pehci_hcd_iso_worker]: DWORD0 =
+					0x%08x\n", iso_ptd->td_info1);
+				iso_dbg(ISO_DBG_DATA,
+					"[pehci_hcd_iso_worker]: DWORD1 =
+					0x%08x\n", iso_ptd->td_info2);
+				iso_dbg(ISO_DBG_DATA,
+					"[pehci_hcd_iso_worker]: DWORD2 =
+					0x%08x\n", iso_ptd->td_info3);
+				iso_dbg(ISO_DBG_DATA,
+					"[pehci_hcd_iso_worker]: DWORD3 =
+					0x%08x\n", iso_ptd->td_info4);
+				iso_dbg(ISO_DBG_DATA,
+					"[pehci_hcd_iso_worker]: DWORD4 =
+					0x%08x\n",iso_ptd->td_info5);
+				iso_dbg(ISO_DBG_DATA,
+					"[pehci_hcd_iso_worker]: DWORD5 =
+					0x%08x\n", iso_ptd->td_info6);
+				iso_dbg(ISO_DBG_DATA,
+					"[pehci_hcd_iso_worker]: DWORD6 =
+					0x%08x\n", iso_ptd->td_info7);
+				iso_dbg(ISO_DBG_DATA,
+					"[pehci_hcd_iso_worker]: DWORD7 =
+					0x%08x\n", iso_ptd->td_info8);
+				*/
+
+
+				/* If the PTD have been executed properly,
+				the V bit should be cleared */
+				if (iso_ptd->td_info1 & QHA_VALID) {
+					iso_dbg(ISO_DBG_ERR,
+						"[pehci_hcd_iso_worker Error]: Valid bit not cleared\n");
+					for(i = 0; i<itd->num_of_pkts; i++){
+						urb->iso_frame_desc[itd->index
+							+ i].status = -ENOSPC;
+					}
+				} else {
+					for (i = 0; i<itd->num_of_pkts; i++){
+						urb->iso_frame_desc[itd->index
+							+i].status = 0;
+					}
+				}
+
+				/* Go over the status of each of the 8 Micro Frames */
+				for (uframe_cnt = 0; (uframe_cnt < 8)
+					&& (uframe_cnt < itd->num_of_pkts);
+					uframe_cnt++) {
+					/*
+					 * We go over the status one at a time. The status bits and their
+					 * equivalent status are:
+					 * Bit 0 - Transaction Error (IN and OUT)
+					 * Bit 1 - Babble (IN token only)
+					 * Bit 2 - Underrun (OUT token only)
+					 */
+					usof_stat =
+						iso_ptd->td_info5 >> (8 +
+						(uframe_cnt * 3));
+
+					switch (usof_stat & 0x7) {
+					case INT_UNDERRUN:
+						iso_dbg(ISO_DBG_ERR,
+							"[pehci_hcd_iso_worker Error]: Buffer underrun\n");
+						urb->iso_frame_desc[itd->index +
+							uframe_cnt].
+						status = -ECOMM;
+						urb->error_count++;
+						break;
+					case INT_EXACT:
+						iso_dbg(ISO_DBG_ERR,
+							"[pehci_hcd_iso_worker Error]: %p Transaction error\n",
+							urb);
+						urb->iso_frame_desc[itd->index +
+							uframe_cnt].
+							status = -EPROTO;
+						urb->error_count++;
+						debugiso = 25;
+						break;
+					case INT_BABBLE:
+						iso_dbg(ISO_DBG_ERR,
+							"[pehci_hcd_iso_worker Error]: Babble error\n");
+						urb->iso_frame_desc[itd->index +
+							uframe_cnt].
+							status = -EOVERFLOW;
+						urb->error_count++;
+						break;
+					}/* switch(usof_stat & 0x7) */
+				}/* end of for( ulMicroFrmCnt = 0; ulMicroFrmCnt < 8; ulMicroFrmCnt++) */
+
+				/*
+				 * Get the number of bytes transferred. This indicates the number of
+				 * bytes sent or received for this transaction.
+				 */
+
+				/* Length is 32K for high speed device */
+				length = PTD_XFERRED_LENGTH(iso_ptd->td_info4);
+
+				/* Halted, need to finish all the transfer on this endpoint */
+				if (iso_ptd->td_info4 & PTD_STATUS_HALTED) {
+
+					iso_dbg(ISO_DBG_ERR,
+						"[pehci_hcd_iso_worker Error] PTD Halted\n");
+					printk("[pehci_hcd_iso_worker Error] PTD Halted===============\n");
+					/*
+					 * When there is an error, do not process the other PTDs.
+					 * Stop at the PTD with the error and remove all other PTDs.
+					 */
+					td_ptd_map->lasttd = 1;
+
+					/*
+					 * In case of halt, next transfer will start with toggle zero,
+					 * USB specs, 5.8.5
+					 */
+					td_ptd_map->datatoggle = 0;
+				}
+				/* if(iso_ptd->td_info4 & PTD_STATUS_HALTED) */
+				/* Update the actual length of the transfer from the data we got earlier */
+				if (PTD_PID(iso_ptd->td_info2) == OUT_PID) {
+					for (i = 0; i < itd->num_of_pkts; i++){
+						urb->iso_frame_desc[itd->index +
+						i].actual_length =(unsigned int)
+						length / itd->num_of_pkts;
+					}
+				} else{
+					iso_dbg(ISO_DBG_DATA,
+						"itd->num_of_pkts = %d, itd->ssplit = %x\n",
+						itd->num_of_pkts, itd->ssplit);
+					urb->iso_frame_desc[itd->index +
+						0].actual_length =
+						iso_ptd->td_info6 & 0x00000FFF;
+					iso_dbg(ISO_DBG_DATA,
+						"actual length[0] = %d\n",
+						urb->iso_frame_desc[itd->index +0].
+						actual_length);
+
+					if((itd->num_of_pkts > 1)
+						&& ((itd->ssplit & 0x2) == 0x2)
+						&& (urb->iso_frame_desc[itd->index +
+						1].status ==0)) {
+						
+						urb->iso_frame_desc[itd->index +1].
+							actual_length =	(iso_ptd->
+							td_info6 & 0x00FFF000)>> 12;
+
+						iso_dbg(ISO_DBG_DATA,
+							"actual length[1] = %d\n",
+							urb->
+							iso_frame_desc[itd->
+							index + 1].
+							actual_length);
+					}else{
+						urb->iso_frame_desc[itd->index +1].
+							actual_length = 0;
+					}
+
+					if ((itd->num_of_pkts > 2)
+						&& ((itd->ssplit & 0x4) == 0x4)
+						&& (urb->
+						iso_frame_desc[itd->index +
+						2].status ==0)) {
+						
+						urb->iso_frame_desc[itd->index +
+							2].actual_length =
+							((iso_ptd->td_info6 &
+							0xFF000000 )>> 24)
+							| ((iso_ptd->td_info7
+							& 0x0000000F)<< 8);
+						
+						iso_dbg(ISO_DBG_DATA,
+							"actual length[2] = %d\n",
+							urb->iso_frame_desc[itd->
+							index + 2].actual_length);
+					} else{
+						urb->iso_frame_desc[itd->index +2].
+							actual_length = 0;
+					}
+
+					if ((itd->num_of_pkts > 3)
+						&& ((itd->ssplit & 0x8) == 0x8)
+						&& (urb->iso_frame_desc[itd->index +
+						3].status == 0)) {
+
+						urb->iso_frame_desc[itd->index + 3].
+							actual_length =(iso_ptd->
+							td_info7 & 0x0000FFF0)>> 4;
+
+						iso_dbg(ISO_DBG_DATA,
+							"actual length[3] = %d\n",
+							urb->iso_frame_desc[itd->
+							index + 3].actual_length);
+					} else {
+						urb->iso_frame_desc[itd->index +3].
+							actual_length = 0;
+					}
+
+					if ((itd->num_of_pkts > 4)
+						&& ((itd->ssplit & 0x10) == 0x10)
+						&& (urb->
+						iso_frame_desc[itd->index +
+						4].status ==0)) {
+
+						urb->iso_frame_desc[itd->index +
+							4].actual_length =
+							(iso_ptd->
+							td_info7 & 0x0FFF0000) >> 16;
+
+						iso_dbg(ISO_DBG_DATA,
+							"actual length[4] = %d\n",
+							urb->iso_frame_desc[itd->index +
+							4].actual_length);
+					} else {
+						urb->iso_frame_desc[itd->index +
+							4].actual_length = 0;
+					}
+
+					if ((itd->num_of_pkts > 5)
+						&& ((itd->ssplit & 0x20) == 0x20)
+						&& (urb->
+						iso_frame_desc[itd->index +
+						5].status ==
+						0)) {
+
+						urb->iso_frame_desc[itd->index +
+							5].actual_length =
+							((iso_ptd->
+							td_info7 & 0xF0000000) >> 28) | 
+							((iso_ptd->td_info8 &
+							0x000000FF)
+							<< 4);
+
+						iso_dbg(ISO_DBG_DATA,
+							"actual length[5] = %d\n",
+							urb->
+							iso_frame_desc[itd->
+							index +
+							5].actual_length);
+					} else {
+						urb->iso_frame_desc[itd->index +
+							5].actual_length = 0;
+					}
+
+					if ((itd->num_of_pkts > 6)
+						&& ((itd->ssplit & 0x40) == 0x40)
+						&& (urb->
+						iso_frame_desc[itd->index +
+						6].status ==0)) {
+
+						urb->iso_frame_desc[itd->index +
+							6].actual_length =
+							(iso_ptd->
+							td_info8 & 0x000FFF00)
+							>> 8;
+						
+						iso_dbg(ISO_DBG_DATA,
+							"actual length[6] = %d\n",
+							urb->
+							iso_frame_desc[itd->
+							index +
+							6].actual_length);
+					} else {
+						urb->iso_frame_desc[itd->index +
+							6].actual_length = 0;
+					}
+
+					if ((itd->num_of_pkts > 7)
+						&& ((itd->ssplit & 0x80) == 0x80)
+						&& (urb->
+						iso_frame_desc[itd->index +
+						7].status ==
+						0)) {
+
+						urb->iso_frame_desc[itd->index +
+							7].actual_length =
+							(iso_ptd->
+							td_info8 & 0xFFF00000) >> 20;
+
+						iso_dbg(ISO_DBG_DATA,
+							"actual length[7] = %d\n",
+							urb->
+							iso_frame_desc[itd->
+							index +
+							7].actual_length);
+					} else {
+						urb->iso_frame_desc[itd->index +
+							7].actual_length = 0;
+					}
+				}
+				/* Check if this is the last ITD either due to some error or normal completion */
+				if ((td_ptd_map->lasttd)
+					|| (itd->hw_next == EHCI_LIST_END)) {
+
+					last_td = TRUE;
+
+				}
+
+				/* Copy data to/from */
+				if (length && (length <= MAX_PTD_BUFFER_SIZE)) {
+					switch (PTD_PID(iso_ptd->td_info2)) {
+					case IN_PID:
+						/*
+						 * Get the data from the PAYLOAD area and place it into
+						 * the buffer provided by the requestor.
+						 */
+						/*for first packet*/
+						startAdd = mem_addr->phy_addr;
+						iso_dbg(ISO_DBG_DATA,
+							"start add = %ld hw_bufp[0] = 0x%08x length = %d\n",
+							startAdd,
+							itd->hw_bufp[0],
+							urb->
+							iso_frame_desc[itd->
+							index].actual_length);
+						if (urb->
+							iso_frame_desc[itd->index].
+							status == 0) {
+
+							if (itd->hw_bufp[0] ==0) {
+								dma_addr_t
+									buff_dma;
+
+								buff_dma =
+									(u32) ((unsigned char *) urb->transfer_buffer +
+									urb->iso_frame_desc[itd->index].offset);
+								itd->buf_dma =
+									buff_dma;
+								itd->hw_bufp[0]
+									=
+									buff_dma;
+							}
+							if (itd->hw_bufp[0] !=0) {
+
+								ret = isp1763_mem_read(hcd->dev, (unsigned long)
+									startAdd,
+									0,(u32*)itd->
+									hw_bufp[0],
+									urb->
+									iso_frame_desc
+									[itd->
+									index].
+									actual_length,
+									0);
+
+							} else {
+								printk("isp1763_mem_read data payload fail\n");
+								printk("start add = %ld hw_bufp[0] = 0x%08x length = %d\n",
+									startAdd, itd->hw_bufp[0],
+									urb->iso_frame_desc[itd->index].actual_length);
+								urb->iso_frame_desc[itd->index].status = -EPROTO;
+								urb->error_count++;
+							}
+						}
+
+
+						for (i = 1;
+							i < itd->num_of_pkts;
+							i++) {
+							startAdd +=
+								(unsigned
+								long) (urb->
+								iso_frame_desc
+								[itd->
+								index +
+								i - 1].
+								actual_length);
+
+							iso_dbg(ISO_DBG_DATA,
+								"start add = %ld hw_bufp[%d] = 0x%08x length = %d\n",
+								startAdd, i,
+								itd->hw_bufp[i],
+								urb->
+								iso_frame_desc
+								[itd->index +
+								i].
+								actual_length);
+							if (urb->
+								iso_frame_desc[itd->
+								index + i].
+								status == 0) {
+
+								isp1763_mem_read
+									(hcd->dev,
+									startAdd,
+									0,(u32*)
+									itd->
+									hw_bufp
+									[i],urb->
+									iso_frame_desc
+									[itd->
+									index + i].
+									actual_length,
+									0);
+
+								if (ret == -EINVAL){
+									printk("isp1763_mem_read data payload fail %d\n", i);
+								}
+							}
+						}
+
+					case OUT_PID:
+						/*
+						 * urb->actual length was initialized to zero, so for the first
+						 * uFrame having it incremented immediately is not a problem.
+						 */
+						urb->actual_length += length;
+						break;
+					}	/* switch(PTD_PID(iso_ptd->td_info2)) */
+				}
+
+				/* if(length && (length <= MAX_PTD_BUFFER_SIZE)) */
+//				removeitd:
+				/*read skip-map */
+				skipmap =
+					isp1763_reg_read16(hcd->dev,
+						hcd->regs.isotdskipmap,
+						skipmap);
+
+				iso_dbg(ISO_DBG_DATA,
+					"[%s] : read skipmap =0x%x\n",
+					__FUNCTION__, skipmap);
+				if (last_td == TRUE) {
+					/* Start removing the ITDs in the list */
+					while (1) {
+						/*
+						 * This indicates that we are processing the tail PTD.
+						 * Perform cleanup procedure on this last PTD
+						 */
+						if (itd->hw_next ==
+							EHCI_LIST_END) {
+							td_ptd_map =
+							&ptd_map_buff->
+							map_list[itd->
+							itd_index];
+
+							/*
+							 * Free up our allocation in the PAYLOAD area so that others can use
+							 * it.
+							 */
+#ifndef COMMON_MEMORY
+							phci_hcd_mem_free(&itd->
+								mem_addr);
+#endif
+
+							/* Remove this ITD entry in the ITD list */
+							list_del(&itd->
+								itd_list);
+
+							/* Free up the memory allocated for the ITD structure */
+							qha_free(qha_cache,
+								itd);
+
+							/* Indicate that the PTD we have used is now free */
+							td_ptd_map->state =
+								TD_PTD_NEW;
+							td_ptd_map->sitd = NULL;
+							td_ptd_map->itd = NULL;
+
+							/* Decrease the number of active PTDs scheduled */
+							hcd->periodic_sched--;
+
+							/* Skip this PTD during the next PTD processing. */
+							skipmap |=
+								td_ptd_map->
+								ptd_bitmap;
+
+							isp1763_reg_write16
+								(hcd->dev,
+								hcd->regs.
+								isotdskipmap,
+								skipmap);
+
+							/* All ITDs in this list have been successfully removed. */
+							break;
+						}
+						/* if(itd->hw_next == EHCI_LIST_END) */
+						/*
+						 * This indicates that we stopped due to an error on a PTD that is
+						 * not the last in the list. We need to free up this PTD as well as
+						 * the PTDs after it.
+						 */
+						else {
+							/*
+							 * Put the current ITD error onto this variable.
+							 * We will be unlinking this from the list and free up its
+							 * resources later.
+							 */
+							current_itd = itd;
+
+							td_ptd_map =
+								&ptd_map_buff->
+								map_list[itd->
+								itd_index];
+
+							/*
+							 * Get the next ITD, and place it to the itd variable.
+							 * In a way we are moving forward in the ITD list.
+							 */
+							itd = (struct ehci_itd
+								*) (current_itd->
+								hw_next);
+#ifndef COMMON_MEMORY
+							/* Free up the current ITD's resources */
+							phci_hcd_mem_free
+								(&current_itd->
+								mem_addr);
+#endif
+
+							/* Remove this ITD entry in the ITD list */
+							list_del(&current_itd->
+								itd_list);
+
+							/* Free up the memory allocated for the ITD structure */
+							qha_free(qha_cache,
+								current_itd);
+
+							/* Inidicate that the PTD we have used is now free */
+							td_ptd_map->state =
+								TD_PTD_NEW;
+							td_ptd_map->sitd = NULL;
+							td_ptd_map->itd = NULL;
+
+							/* Decrease the number of active PTDs scheduled */
+							hcd->periodic_sched--;
+
+							/* Sine it is done, skip this PTD during the next PTD processing. */
+							skipmap |=
+								td_ptd_map->
+								ptd_bitmap;
+							isp1763_reg_write16
+								(hcd->dev,
+								hcd->regs.
+								isotdskipmap,
+								skipmap);
+							/*
+							 * Start all over again until it gets to the tail of the
+							 * list of PTDs/ITDs
+							 */
+							continue;
+						}/* else of if(itd->hw_next == EHCI_LIST_END) */
+						/* It should never get here, but I put this as a precaution */
+						break;
+					}	/*end of while(1) */
+					/* Check if there were ITDs that were not processed due to the error */
+					if (urb->status == -EINPROGRESS) {
+						if ((urb->actual_length !=
+							urb->transfer_buffer_length)
+							&& (urb->
+							transfer_flags &
+							URB_SHORT_NOT_OK)) {
+
+							iso_dbg(ISO_DBG_ERR,
+							"[pehci_hcd_iso_worker Error]: Short Packet\n");
+
+							urb->status =
+								-EREMOTEIO;
+						} else {
+							urb->status = 0;
+						}
+					}
+
+					urb->hcpriv = 0;
+					iso_dbg(ISO_DBG_DATA,
+						"[%s] : remain skipmap =0x%x\n",
+						__FUNCTION__, skipmap);
+
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,32)
+//					if (urb->reject.counter) {
+					if (unlikely(atomic_read(&urb->reject))) {// kernel reference code hcd.c
+						iso_dbg("ISO_DBG_INFO, [%s] urb reject\n", __FUNCTION__);
+						iReject = 1;
+					}
+#else
+					if (unlikely(urb->reject)) {
+						iso_dbg("ISO_DBG_INFO, [%s] urb reject\n", __FUNCTION__);
+						iReject = 1;
+					}
+#endif
+
+/*
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,28)
+
+					if (urb->reject.counter) {
+						iso_dbg("ISO_DBG_INFO, [%s] urb reject\n", __FUNCTION__);
+						iReject = 1;
+					}
+#else
+				        if (unlikely(urb->reject)) {					       
+				
+					
+						iso_dbg("ISO_DBG_INFO, [%s] urb reject\n", __FUNCTION__);
+						iReject = 1;
+					}
+#endif
+*/
+
+#ifdef COMMON_MEMORY
+					phci_hcd_mem_free(&qhead->memory_addr);
+#endif
+					/* We need to unlock this here, since this was locked when we are called */
+					/* from the interrupt handler */
+					spin_unlock(&hcd->lock);
+					/* Perform URB cleanup */
+					iso_dbg(ISO_DBG_INFO,
+						"[pehci_hcd_iso_worker] Complete a URB\n");
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)
+		if(!usb_hcd_check_unlink_urb(&hcd->usb_hcd, urb,0))
+					usb_hcd_unlink_urb_from_ep(&hcd->usb_hcd, urb);
+#endif
+					hcd->periodic_more_urb = 0;
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
+						qhead=urb->hcpriv;
+					if (!list_empty(&qhead->ep->urb_list)){
+
+#else
+					if (!list_empty(&urb->ep->urb_list)){
+#endif
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
+						if (urb->hcpriv== periodic_ep[0]){
+#else
+						if (urb->ep == periodic_ep[0]){
+#endif
+							hcd->periodic_more_urb =
+							1;
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
+						} else if (urb->hcpriv==
+							 periodic_ep[1]){
+#else
+						} else if (urb->ep ==
+							 periodic_ep[1]){
+#endif							 
+							hcd->periodic_more_urb =
+							2;
+						} else {
+							hcd->periodic_more_urb =
+							0;
+						}
+
+
+					}
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
+				usb_hcd_giveback_urb(&hcd->usb_hcd, urb);
+#else
+				usb_hcd_giveback_urb(&hcd->usb_hcd, urb, 
+										urb->status);
+#endif
+
+					spin_lock(&hcd->lock);
+					continue;
+				}
+				/* if( last_td == TRUE ) */
+				/*
+				 * If the last_td is not set then we do not need to check for errors and directly
+				 * proceed with the cleaning sequence.
+				 */
+				iso_dbg(ISO_DBG_INFO,
+					"[pehci_hcd_iso_worker]: last_td is not set\n");
+				/*update skipmap */
+				skipmap |= td_ptd_map->ptd_bitmap;
+				isp1763_reg_write16(hcd->dev,
+					hcd->regs.isotdskipmap,
+					skipmap);
+				iso_dbg(ISO_DBG_DATA,
+					"%s : remain skipmap =0x%x\n",
+					__FUNCTION__, skipmap);
+
+				/* Decrement the count of active PTDs */
+				hcd->periodic_sched--;
+#ifndef COMMON_MEMORY
+				/* Free up the memory we allocated in the PAYLOAD area */
+				phci_hcd_mem_free(&itd->mem_addr);
+#endif
+				/* Remove this ITD from the list of active ITDs */
+				list_del(&itd->itd_list);
+
+				/* Free up the memory we allocated for the ITD structure */
+				qha_free(qha_cache, itd);
+				/*
+				 * Clear the bit associated with this PTD from the grouptdmap and
+				 * make this PTD available for other transfers
+				 */
+				td_ptd_map->state = TD_PTD_NEW;
+				td_ptd_map->sitd = NULL;
+				td_ptd_map->itd = NULL;
+			}	/*end of HIGH SPEED */
+		}		/* end of list_for_each_safe(position, lst_temp, itd_remove) */
+		iso_dbg(ISO_DBG_INFO,
+			"[pehci_hcd_iso_worker]: ISO-Frame removal done\n");
+
+
+	}			/* while donetoclear */
+
+
+	if (iReject) {
+		spin_unlock(&hcd->lock);
+		if (hcd->periodic_more_urb) {
+
+			if(periodic_ep[hcd->periodic_more_urb])
+			while (&periodic_ep[hcd->periodic_more_urb - 1]->
+				urb_list) {
+
+				urb = container_of(periodic_ep
+					[hcd->periodic_more_urb -
+					1]->urb_list.next,
+					struct urb, urb_list);
+				
+				if (urb) {
+					urb->status = -ENOENT;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)
+		if(!usb_hcd_check_unlink_urb(&hcd->usb_hcd, urb,0))
+					usb_hcd_unlink_urb_from_ep(&hcd->
+					usb_hcd,urb);
+#endif
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
+					usb_hcd_giveback_urb(&hcd->usb_hcd, urb);
+#else
+					usb_hcd_giveback_urb(&hcd->usb_hcd, urb,
+						urb->status);
+#endif
+				}
+			}
+		}
+
+		spin_lock(&hcd->lock);
+	}
+
+	/* When there is no more PTDs queued for scheduling or removal
+	 * clear the buffer status to indicate there are no more PTDs for
+	 * processing and set the skip map to 1 to indicate that the first
+	 * PTD is also the last PTD.
+	 */
+
+	if (hcd->periodic_more_urb) {
+		int status = 0;
+		iso_dbg(ISO_DBG_INFO,
+			"[phcd_iso_handler]: No more PTDs queued\n");
+		hcd->periodic_sched = 0;
+		phcd_store_urb_pending(hcd, hcd->periodic_more_urb, NULL,
+				       &status);
+		hcd->periodic_more_urb = 0;
+	}
+exit:
+	iso_dbg(ISO_DBG_ENTRY, "-- %s: Exit\n", __FUNCTION__);
+}				/* end of pehci_hcd_iso_worker */
+
+#endif /* CONFIG_ISO_SUPPORT */
+
+/*interrupt transfer handler*/
+/********************************************************
+  1. read done map
+  2. read the ptd to see any errors
+  3. copy the payload to and from
+  4. update ehci td
+  5. make new ptd if transfer there and earlier done
+  6. schedule
+ *********************************************************/
+static void
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
+pehci_hcd_intl_worker(phci_hcd * hcd, struct pt_regs *regs)
+#else
+pehci_hcd_intl_worker(phci_hcd * hcd)
+#endif
+{
+	int i =	0;
+	u16 donemap = 0, donetoclear;
+	u16 mask = 0x1,	index =	0;
+	u16 pendingmap = 0;
+	u16 location = 0;
+	u32 length = 0;
+	u16 skipmap = 0;
+	u16 ormask = 0;
+	u32 usofstatus = 0;
+	struct urb *urb;
+	struct ehci_qtd	*qtd = 0;
+	struct ehci_qh *qh = 0;
+
+	struct _isp1763_qhint *qhint = &hcd->qhint;
+
+	td_ptd_map_t *td_ptd_map;
+	td_ptd_map_buff_t *ptd_map_buff;
+	struct isp1763_mem_addr	*mem_addr = 0;
+	u16 dontschedule = 0;
+
+	ptd_map_buff = &(td_ptd_map_buff[TD_PTD_BUFF_TYPE_INTL]);
+	pendingmap = ptd_map_buff->pending_ptd_bitmap;
+
+	/*read the done	map for	interrupt transfers */
+	donetoclear = donemap =
+		isp1763_reg_read16(hcd->dev, hcd->regs.inttddonemap, donemap);
+	if (donemap) {
+		/*skip done tds	*/
+		skipmap	=
+			isp1763_reg_read16(hcd->dev, hcd->regs.inttdskipmap,
+			skipmap);
+		skipmap	|= donemap;
+		isp1763_reg_write16(hcd->dev, hcd->regs.inttdskipmap, skipmap);
+		donemap	|= pendingmap;
+	}
+	/*if sof interrupt is enabled */
+#ifdef MSEC_INT_BASED
+	else {
+		/*if there is something	pending	, put this transfer in */
+		if (ptd_map_buff->pending_ptd_bitmap) {
+			pehci_hcd_schedule_pending_ptds(hcd, pendingmap, (u8)
+				TD_PTD_BUFF_TYPE_INTL,
+				1);
+		}
+		//return 0;
+		goto exit;
+	}
+#else
+	else {
+	goto exit;	
+	//return 0;
+	}
+
+#endif
+
+
+	ormask = isp1763_reg_read16(hcd->dev, hcd->regs.int_irq_mask_or,
+		ormask);
+	/*process all the endpoints first those	are done */
+	donetoclear = donemap;
+	while (donetoclear) {
+		/*index	is the number of endpoints open	currently */
+		index =	donetoclear & mask;
+		donetoclear &= ~mask;
+		mask <<= 1;
+		/*what if we are in the	middle of schedule
+		   where nothing is done */
+		if (!index) {
+			location++;
+			continue;
+		}
+
+		/*read our td_ptd_map */
+		td_ptd_map = &ptd_map_buff->map_list[location];
+
+		/*if this one is already in the	removal	*/
+		if (td_ptd_map->state == TD_PTD_REMOVE ||
+			td_ptd_map->state == TD_PTD_NEW) {
+			pehci_check("interrupt td is being removed\n");
+			/*this will be handled by urb_remove */
+			/*if this is last urb no need to complete it again */
+			donemap	&= ~td_ptd_map->ptd_bitmap;
+			/*if there is something	pending	*/
+			ptd_map_buff->pending_ptd_bitmap &=
+				~td_ptd_map->ptd_bitmap;
+			continue;
+		}
+
+
+		/*if we	found something	already	in */
+		if (!(skipmap &	td_ptd_map->ptd_bitmap)) {
+			pehci_check("intr td_ptd_map %x,skipnap	%x\n",
+			td_ptd_map->ptd_bitmap, skipmap);
+			donemap	&= ~td_ptd_map->ptd_bitmap;
+			/*in case pending */
+			ptd_map_buff->pending_ptd_bitmap &=
+				~td_ptd_map->ptd_bitmap;;
+			location++;
+			continue;
+		}
+
+
+		if (td_ptd_map->state == TD_PTD_NEW) {
+			pehci_check
+				("interrupt not	come here, map %x,location %d\n",
+				 td_ptd_map->ptd_bitmap, location);
+			donemap	&= ~td_ptd_map->ptd_bitmap;
+			/*in case pending */
+			ptd_map_buff->pending_ptd_bitmap &=
+				~td_ptd_map->ptd_bitmap;
+			donemap	&= ~td_ptd_map->ptd_bitmap;
+			location++;
+			continue;
+		}
+
+		/*move to the next schedule */
+		location++;
+		/*endpoint, td,	urb and	memory
+		 * for current transfer*/
+		qh = td_ptd_map->qh;
+		qtd = td_ptd_map->qtd;
+		if (qtd->state & QTD_STATE_NEW)	{
+			/*we need to schedule it */
+			goto schedule;
+		}
+		urb = qtd->urb;
+		mem_addr = &qtd->mem_addr;
+
+		/*clear	the irq	mask for this transfer */
+		ormask &= ~td_ptd_map->ptd_bitmap;
+		isp1763_reg_write16(hcd->dev, hcd->regs.int_irq_mask_or,
+			ormask);
+
+		ptd_map_buff->active_ptds--;
+		memset(qhint, 0, sizeof(struct _isp1763_qhint));
+
+		/*read this ptd	from the ram address,address is	in the
+		   td_ptd_map->ptd_header_addr */
+		isp1763_mem_read(hcd->dev, td_ptd_map->ptd_header_addr,	0,
+				 (u32 *) (qhint), PHCI_QHA_LENGTH, 0);
+
+#ifdef PTD_DUMP_COMPLETE
+		printk("INTL PTD header after COMPLETION\n");
+		printk("CDW0: 0x%08X\n", qhint->td_info1);
+		printk("CDW1: 0x%08X\n", qhint->td_info2);
+		printk("CDW2: 0x%08X\n", qhint->td_info3);
+		printk("CDW3: 0x%08X\n", qhint->td_info4);
+#endif
+
+		/*statuc of 8 uframes */
+		for (i = 0; i <	8; i++)	{
+			/*take care of errors */
+			usofstatus = qhint->td_info5 >>	(8 + i * 3);
+			switch (usofstatus & 0x7) {
+			case INT_UNDERRUN:
+				pehci_print("under run , %x\n",	usofstatus);
+				break;
+			case INT_EXACT:
+				pehci_print("transaction error,	%x\n",
+					    usofstatus);
+				break;
+			case INT_BABBLE:
+				pehci_print("babble error, %x\n", usofstatus);
+				break;
+			}
+		}
+
+		if (urb->dev->speed != USB_SPEED_HIGH) {
+			/*length is 1K for full/low speed device */
+			length = PTD_XFERRED_NONHSLENGTH(qhint->td_info4);
+		} else {
+			/*length is 32K	for high speed device */
+			length = PTD_XFERRED_LENGTH(qhint->td_info4);
+		}
+
+		pehci_hcd_update_error_status(qhint->td_info4, urb);
+		/*halted, need to finish all the transfer on this endpoint */
+		if (qhint->td_info4 & PTD_STATUS_HALTED) {
+			qtd->state |= QTD_STATE_LAST;
+			/*in case of halt, next	transfer will start with toggle	zero,
+			 *USB speck, 5.8.5*/
+			qh->datatoggle = td_ptd_map->datatoggle	= 0;
+			donemap	&= ~td_ptd_map->ptd_bitmap;
+			ptd_map_buff->pending_ptd_bitmap &=
+				~td_ptd_map->ptd_bitmap;
+			dontschedule = 1;
+			goto copylength;
+		}
+
+
+		copylength:
+		/*preserve the current data toggle */
+		qh->datatoggle = td_ptd_map->datatoggle	=
+			PTD_NEXTTOGGLE(qhint->td_info4);
+		/*copy data from the host */
+		switch (PTD_PID(qhint->td_info2)) {
+		case IN_PID:
+			if (length && (length <= MAX_PTD_BUFFER_SIZE))
+				/*do read only when there is somedata */
+				isp1763_mem_read(hcd->dev,
+					(u32) mem_addr->phy_addr, 0,
+					urb->transfer_buffer +
+					urb->actual_length, length, 0);
+
+		case OUT_PID:
+			urb->actual_length += length;
+			qh->hw_current = qtd->hw_next;
+			phci_hcd_mem_free(&qtd->mem_addr);
+			qtd->state &= ~QTD_STATE_NEW;
+			qtd->state |= QTD_STATE_DONE;
+			break;
+		}
+
+		if (qtd->state & QTD_STATE_LAST) {
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
+			pehci_hcd_urb_complete(hcd, qh, urb, td_ptd_map, regs);
+#else
+			pehci_hcd_urb_complete(hcd, qh, urb, td_ptd_map);
+#endif
+			if (dontschedule) {	/*cleanup will start from drivers */
+				dontschedule = 0;
+				continue;
+			}
+
+			/*take the next	if in the queue	*/
+			if (!list_empty(&qh->qtd_list))	{
+				struct list_head *head;
+				/*last td of previous urb */
+				head = &qh->qtd_list;
+				qtd = list_entry(head->next, struct ehci_qtd,
+					qtd_list);
+				td_ptd_map->qtd	= qtd;
+				qh->hw_current = cpu_to_le32(qtd);
+				qh->qh_state = QH_STATE_LINKED;
+
+			} else {
+				td_ptd_map->qtd	=
+						 (struct ehci_qtd *) le32_to_cpu(0);
+				qh->hw_current = cpu_to_le32(0);
+				qh->qh_state = QH_STATE_IDLE;
+				donemap	&= ~td_ptd_map->ptd_bitmap;
+				ptd_map_buff->pending_ptd_bitmap &= 
+						~td_ptd_map->ptd_bitmap;
+	       			td_ptd_map->state=TD_PTD_NEW;
+				continue;
+			}
+
+		}
+
+		schedule:
+		{
+			/*current td comes from	qh->hw_current */
+			ptd_map_buff->pending_ptd_bitmap &=
+				~td_ptd_map->ptd_bitmap;
+			ormask |= td_ptd_map->ptd_bitmap;
+			ptd_map_buff->active_ptds++;
+			pehci_check
+				("inter	schedule next qtd %p, active tds %d\n",
+				 qtd, ptd_map_buff->active_ptds);
+			pehci_hcd_qtd_schedule(hcd, qtd, qh, td_ptd_map);
+		}
+
+	}			/*end of while */
+
+
+	/*clear	all the	tds inside this	routine	*/
+	skipmap	&= ~donemap;
+	isp1763_reg_write16(hcd->dev, hcd->regs.inttdskipmap, skipmap);
+	ormask |= donemap;
+	isp1763_reg_write16(hcd->dev, hcd->regs.int_irq_mask_or, ormask);
+exit:
+	pehci_entry("--	%s: Exit\n", __FUNCTION__);
+	
+//	return (int)0;
+}
+
+/*atl(bulk/control) transfer handler*/
+/*1. read done map
+  2. read the ptd to see any errors
+  3. copy the payload to and from
+  4. update ehci td
+  5. make new ptd if transfer there and	earlier	done
+  6. schedule
+ */
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
+static void
+pehci_hcd_atl_worker(phci_hcd * hcd, struct pt_regs *regs)
+#else
+static void
+pehci_hcd_atl_worker(phci_hcd * hcd)
+#endif
+{
+	u16 donemap = 0, donetoclear = 0;
+	u16 pendingmap = 0;
+	u32 rl = 0;
+	u16 mask = 0x1,	index =	0;
+	u16 location = 0;
+	u32 nakcount = 0;
+	u32 active = 0;
+	u32 length = 0;
+	u16 skipmap = 0;
+	u16 tempskipmap	= 0;
+	u16 ormask = 0;
+	struct urb *urb;
+	struct ehci_qtd	*qtd = 0;
+	struct ehci_qh *qh;
+	struct _isp1763_qha atlqha;
+	struct _isp1763_qha *qha;
+	td_ptd_map_t *td_ptd_map;
+	td_ptd_map_buff_t *ptd_map_buff;
+	urb_priv_t *urbpriv = 0;
+	struct isp1763_mem_addr	*mem_addr = 0;
+	u16 dontschedule = 0;
+	ptd_map_buff = &(td_ptd_map_buff[TD_PTD_BUFF_TYPE_ATL]);
+	pendingmap = ptd_map_buff->pending_ptd_bitmap;
+
+#ifdef MSEC_INT_BASED
+	/*running on skipmap rather donemap,
+	   some	cases donemap may not be set
+	   for complete	transfer
+	 */
+	skipmap	= isp1763_reg_read16(hcd->dev, hcd->regs.atltdskipmap, skipmap);
+	tempskipmap = ~skipmap;
+	tempskipmap &= 0xffff;
+
+	if (tempskipmap) {
+		donemap	=
+			isp1763_reg_read16(hcd->dev, hcd->regs.atltddonemap,
+					   donemap);
+		skipmap	|= donemap;
+		isp1763_reg_write16(hcd->dev, hcd->regs.atltdskipmap, skipmap);
+		qha = &atlqha;
+		donemap	|= pendingmap;
+		tempskipmap &= ~donemap;
+	}  else {
+
+	/*if sof interrupt enabled */
+
+		/*if there is something	pending	, put this transfer in */
+		if (pendingmap)	{
+			pehci_hcd_schedule_pending_ptds(hcd, pendingmap, (u8)
+				TD_PTD_BUFF_TYPE_ATL,
+				1);
+		}
+		goto exit;
+	}
+#else
+
+	donemap	= isp1763_reg_read16(hcd->dev, hcd->regs.atltddonemap, donemap);
+	if (donemap) {
+
+
+		pehci_info("DoneMap Value in ATL Worker	%x\n", donemap);
+		skipmap	=
+			isp1763_reg_read16(hcd->dev, hcd->regs.atltdskipmap,
+					   skipmap);
+		skipmap	|= donemap;
+		isp1763_reg_write16(hcd->dev, hcd->regs.atltdskipmap, skipmap);
+		qha = &atlqha;
+	} else {
+		pehci_info("Done Map Value is 0x%X \n",	donemap);
+		pehci_entry("--	%s: Exit abnormally with DoneMap all zero \n",
+			    __FUNCTION__);
+		goto exit;
+
+	}
+#endif
+
+	/*read the interrupt mask registers */
+	ormask = isp1763_reg_read16(hcd->dev, hcd->regs.atl_irq_mask_or,
+				    ormask);
+
+
+	/*this map is used only	to update and
+	 * scheduling for the tds who are not
+	 * complete. the tds those are complete
+	 * new schedule	will happen from
+	 * td_ptd_submit_urb routine
+	 * */
+	donetoclear = donemap;
+	/*we will be processing	skipped	tds also */
+	donetoclear |= tempskipmap;
+	/*process all the endpoints first those	are done */
+	while (donetoclear) {
+		/*index	is the number of endpoint open currently */
+		index =	donetoclear & mask;
+		donetoclear &= ~mask;
+		mask <<= 1;
+		/*what if we are in the	middle of schedule
+		   where nothing is done
+		 */
+		if (!index) {
+			location++;
+			continue;
+		}
+
+		/*read our td_ptd_map */
+		td_ptd_map = &ptd_map_buff->map_list[location];
+
+		/*urb is in remove */
+		if (td_ptd_map->state == TD_PTD_NEW ||
+			td_ptd_map->state == TD_PTD_REMOVE)	{
+			pehci_check
+				("atl td is being removed,map %x, skipmap %x\n",
+				 td_ptd_map->ptd_bitmap, skipmap);
+			pehci_check("temp skipmap %x, pendign map %x,done %x\n",
+				    tempskipmap, pendingmap, donemap);
+
+			/*unlink urb will take care of this */
+			donemap	&= ((~td_ptd_map->ptd_bitmap) &	0xffff);
+			/*in case pending */
+			ptd_map_buff->pending_ptd_bitmap &=
+				((~td_ptd_map->ptd_bitmap) & 0xffff);
+			location++;
+			continue;
+		}
+
+
+		/*move to the next endpoint */
+		location++;
+		/*endpoint, td,	urb and	memory
+		 * for current endpoint*/
+		qh = td_ptd_map->qh;
+		qtd = td_ptd_map->qtd;
+		if (!qh	|| !qtd) {
+			donemap	&= ((~td_ptd_map->ptd_bitmap) &	0xffff);
+			/*in case pending */
+			ptd_map_buff->pending_ptd_bitmap &=
+				((~td_ptd_map->ptd_bitmap) & 0xffff);
+			continue;
+		}
+#ifdef MSEC_INT_BASED
+		/*new td must be scheduled */
+		if ((qtd->state	& QTD_STATE_NEW)	/*&&
+							   (pendingmap & td_ptd_map->ptd_bitmap) */ ) {
+			/*this td will come here first time from
+			 *pending tds, so its qh->hw_current needs to
+			 * adjusted
+			 */
+			qh->hw_current = QTD_NEXT(qtd->qtd_dma);
+			goto schedule;
+		}
+#endif
+		urb = qtd->urb;
+		if (urb	== NULL) {
+			donemap	&= ((~td_ptd_map->ptd_bitmap) &	0xffff);
+			/*in case pending */
+			ptd_map_buff->pending_ptd_bitmap &=
+				((~td_ptd_map->ptd_bitmap) & 0xffff);
+			continue;
+		}
+		urbpriv	= (urb_priv_t *) urb->hcpriv;
+		mem_addr = &qtd->mem_addr;
+
+#ifdef MSEC_INT_BASED
+		/*check	here for the td	if its done */
+		if (donemap & td_ptd_map->ptd_bitmap) {
+			/*nothing to do	*/
+			;
+		} else {
+			/*if td	is not done, lets check	how long
+			   its been scheduled
+			 */
+			if (tempskipmap	& td_ptd_map->ptd_bitmap) {
+				/*i will give 20 msec to complete */
+				if (urbpriv->timeout < 20) {
+					urbpriv->timeout++;
+					continue;
+				}
+				urbpriv->timeout++;
+				/*otherwise check its status */
+			}
+
+		}
+#endif
+		memset(qha, 0, sizeof(struct _isp1763_qha));
+
+		/*read this ptd	from the ram address,address is	in the
+		   td_ptd_map->ptd_header_addr */
+		isp1763_mem_read(hcd->dev, td_ptd_map->ptd_header_addr,	0,
+				 (u32 *) (qha),	PHCI_QHA_LENGTH, 0);
+
+#ifdef PTD_DUMP_COMPLETE
+		printk("ATL PTD header after COMPLETION\n");
+		printk("CDW0: 0x%08X\n", qha->td_info1);
+		printk("CDW1: 0x%08X\n", qha->td_info2);
+		printk("CDW2: 0x%08X\n", qha->td_info3);
+		printk("CDW3: 0x%08X\n", qha->td_info4);
+#endif
+
+#ifdef MSEC_INT_BASED
+		/*since	we are running on skipmap
+		   tds will be checked for completion state
+		 */
+		if ((qha->td_info1 & QHA_VALID)) {
+
+			pehci_check
+				("pendign map %x, donemap %x, tempskipmap %x\n",
+				 pendingmap, donemap, tempskipmap);
+			/*this could be	one of the unprotected urbs, clear it */
+			ptd_map_buff->pending_ptd_bitmap &=
+				((~td_ptd_map->ptd_bitmap) & 0xffff);
+			/*here also we need to increment the tds timeout count */
+			urbpriv->timeout++;
+			continue;
+		} else {
+			/*this td is going to be done,
+			   this	td could be the	one un-skipped but no donemap or
+			   maybe it could be one of those where	we get unprotected urbs,
+			   so checking against tempskipmap may not give	us correct td
+			 */
+
+			skipmap	|= td_ptd_map->ptd_bitmap;
+			isp1763_reg_write16(hcd->dev, hcd->regs.atltdskipmap,
+					    skipmap);
+
+			/*of course this is going to be	as good
+			   as td that is done and donemap is set
+			   also	skipmap	is set
+			 */
+			donemap	|= td_ptd_map->ptd_bitmap;
+		}
+#endif
+		/*clear	the corrosponding mask register	*/
+		ormask &= ((~td_ptd_map->ptd_bitmap) & 0xffff);
+		isp1763_reg_write16(hcd->dev, hcd->regs.atl_irq_mask_or,
+			ormask);
+
+		ptd_map_buff->active_ptds--;
+
+		urbpriv->timeout = 0;
+
+		/*take care of errors */
+		pehci_hcd_update_error_status(qha->td_info4, urb);
+		/*halted, need to finish all the transfer on this endpoint */
+		if (qha->td_info4 & PTD_STATUS_HALTED) {
+
+			printk(KERN_NOTICE "Endpoint is	halted\n");
+			qtd->state |= QTD_STATE_LAST;
+
+			donemap	&= ((~td_ptd_map->ptd_bitmap) &	0xffff);
+			/*in case pending */
+			ptd_map_buff->pending_ptd_bitmap &=
+				((~td_ptd_map->ptd_bitmap) & 0xffff);
+			/*in case of halt, next	transfer will start with toggle
+			   zero,USB speck, 5.8.5 */
+			qh->datatoggle = td_ptd_map->datatoggle	= 0;
+			/*cleanup the ping */
+			qh->ping = 0;
+			/*force	cleanup	after this */
+			dontschedule = 1;
+			goto copylength;
+		}
+
+
+
+		/*read the reload count	*/
+		rl = (qha->td_info3 >> 23);
+		rl &= 0xf;
+
+
+
+		/*if there is a	transaction error and the status is not	halted,
+		 * process whatever the	length we got.if the length is what we
+		 * expected complete the transfer*/
+		if ((qha->td_info4 & PTD_XACT_ERROR) &&
+			!(qha->td_info4 & PTD_STATUS_HALTED) &&
+			(qha->td_info4 & QHA_ACTIVE)) {
+
+			if (PTD_XFERRED_LENGTH(qha->td_info4) == qtd->length) {
+				;	/*nothing to do	its fake */
+			} else {
+
+				pehci_print
+					("xact error, info1 0x%08x,info4 0x%08x\n",
+					 qha->td_info1,	qha->td_info4);
+
+				/*if this is the case then we need to
+				   resubmit the	td again */
+				qha->td_info1 |= QHA_VALID;
+				skipmap	&= ~td_ptd_map->ptd_bitmap;
+				ormask |= td_ptd_map->ptd_bitmap;
+				donemap	&= ((~td_ptd_map->ptd_bitmap) &	0xffff);
+
+				/*set the retry	count to 3 again */
+				qha->td_info4 |= (rl <<	19);
+				/*set the active bit, if cleared, will be cleared if we	have some length */
+				qha->td_info4 |= QHA_ACTIVE;
+
+				/*clear	the xact error */
+				qha->td_info4 &= ~PTD_XACT_ERROR;
+				isp1763_reg_write16(hcd->dev,
+						    hcd->regs.atl_irq_mask_or,
+						    ormask);
+
+				/*copy back into the header, payload is	already
+				 * present no need to write again
+				 */
+				isp1763_mem_write(hcd->dev,
+						  td_ptd_map->ptd_header_addr,
+						  0, (u32 *) (qha),
+						  PHCI_QHA_LENGTH, 0);
+				/*unskip this td */
+				isp1763_reg_write16(hcd->dev,
+						    hcd->regs.atltdskipmap,
+						    skipmap);
+				continue;
+			}
+			goto copylength;
+		}
+
+		/*check	for the	nak count and active condition
+		 * to reload the ptd if	needed*/
+		nakcount = qha->td_info4 >> 19;
+		nakcount &= 0xf;
+		active = qha->td_info4 & QHA_ACTIVE;
+		/*if nak count is zero and active bit is set , it
+		 *means	that device is naking and need to reload
+		 *the same td*/
+		if (!nakcount && active) {
+			pehci_info("%s:	ptd is going for reload,length %d\n",
+				   __FUNCTION__, length);
+			/*make this td valid */
+			qha->td_info1 |= QHA_VALID;
+			donemap	&= ((~td_ptd_map->ptd_bitmap & 0xffff));
+			/*just like fresh td */
+
+			/*set the retry	count to 3 again */
+			qha->td_info4 |= (rl <<	19);
+			qha->td_info4 &= ~0x3;
+			qha->td_info4 |= (0x2 << 23);
+			ptd_map_buff->active_ptds++;
+			skipmap	&= ((~td_ptd_map->ptd_bitmap) &	0xffff);
+			ormask |= td_ptd_map->ptd_bitmap;
+			isp1763_reg_write16(hcd->dev, hcd->regs.atl_irq_mask_or,
+					    ormask);
+			/*copy back into the header, payload is	already
+			 * present no need to write again */
+			isp1763_mem_write(hcd->dev, td_ptd_map->ptd_header_addr,
+					  0, (u32 *) (qha), PHCI_QHA_LENGTH, 0);
+			/*unskip this td */
+			isp1763_reg_write16(hcd->dev, hcd->regs.atltdskipmap,
+					    skipmap);
+			continue;
+		}
+
+		copylength:
+		/*read the length transferred */
+		length = PTD_XFERRED_LENGTH(qha->td_info4);
+
+
+		/*short	complete in case of BULK only */
+		if ((length < qtd->length) && usb_pipebulk(urb->pipe)) {
+
+			/*if current ptd is not	able to	fetech enough data as
+			 * been	asked then device has no data, so complete this	transfer
+			 * */
+			/*can we complete our transfer here */
+			if ((urb->transfer_flags & URB_SHORT_NOT_OK)) {
+				pehci_check
+					("short	read, length %d(expected %d)\n",
+					 length, qtd->length);
+				urb->status = -EREMOTEIO;
+				/*if this is the only td,donemap will be cleared
+				   at completion, otherwise take the next one
+				 */
+				donemap	&= ((~td_ptd_map->ptd_bitmap) &	0xffff);
+				ptd_map_buff->pending_ptd_bitmap &=
+					((~td_ptd_map->ptd_bitmap) & 0xffff);
+				/*force	the cleanup from here */
+				dontschedule = 1;
+			}
+
+			/*this will be the last	td,in case of short read/write */
+			/*donemap, pending maps	will be	handled	at the while scheduling	or completion */
+			qtd->state |= QTD_STATE_LAST;
+
+		}
+		/*preserve the current data toggle */
+		qh->datatoggle = td_ptd_map->datatoggle	=
+			PTD_NEXTTOGGLE(qha->td_info4);
+		qh->ping = PTD_PING_STATE(qha->td_info4);
+		/*copy data from */
+		switch (PTD_PID(qha->td_info2))	{
+		case IN_PID:
+			qh->ping = 0;
+			/*do read only when there is some data */
+			if (length && (length <= HC_ATL_PL_SIZE)) {
+				isp1763_mem_read(hcd->dev,
+						 (u32) mem_addr->phy_addr, 0,
+						 (u32*) (le32_to_cpu(qtd->hw_buf[0])), length, 0);
+#if 0
+			//	printk("IN PayLoad length:%d\n", length); 
+			if(length<=4)	{
+					int i=0;
+					int *data_addr= qtd->hw_buf[0];
+					printk("\n");
+					for(i=0;i<length;i+=4) printk("[0x%X] ",*data_addr++);
+					printk("\n");
+				}
+#endif
+			}
+
+		case OUT_PID:
+			urb->actual_length += length;
+			qh->hw_current = qtd->hw_next;
+			phci_hcd_mem_free(&qtd->mem_addr);
+			qtd->state |= QTD_STATE_DONE;
+
+			break;
+		case SETUP_PID:
+			qh->hw_current = qtd->hw_next;
+			phci_hcd_mem_free(&qtd->mem_addr);
+			qtd->state |= QTD_STATE_DONE;
+			break;
+		}
+
+		if (qtd->state & QTD_STATE_LAST) {
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
+			pehci_hcd_urb_complete(hcd, qh, urb, td_ptd_map, regs);
+#else
+			pehci_hcd_urb_complete(hcd, qh, urb, td_ptd_map);
+#endif
+			if (dontschedule) {	/*cleanup will start from drivers */
+				dontschedule = 0;
+				/*so that we can take next one */
+				qh->qh_state = QH_STATE_TAKE_NEXT;
+				continue;
+			}
+			/*take the next	if in the queue	*/
+			if (!list_empty(&qh->qtd_list))	{
+				struct list_head *head;
+				/*last td of previous urb */
+				head = &qh->qtd_list;
+				qtd = list_entry(head->next, struct ehci_qtd,
+						 qtd_list);
+				td_ptd_map->qtd	= qtd;
+				qh->hw_current = cpu_to_le32(qtd);
+				qh->qh_state = QH_STATE_LINKED;
+
+			} else {
+				td_ptd_map->qtd	=
+					(struct	ehci_qtd *) le32_to_cpu(0);
+				qh->hw_current = cpu_to_le32(0);
+				qh->qh_state = QH_STATE_TAKE_NEXT;
+				donemap	&= ((~td_ptd_map->ptd_bitmap & 0xffff));
+				ptd_map_buff->pending_ptd_bitmap &=
+					((~td_ptd_map->ptd_bitmap) & 0xffff);
+				continue;
+			}
+		}
+
+#ifdef MSEC_INT_BASED
+		schedule:
+#endif
+		{
+			/*current td comes from	qh->hw_current */
+			ptd_map_buff->pending_ptd_bitmap &=
+				((~td_ptd_map->ptd_bitmap) & 0xffff);
+			td_ptd_map->qtd	=
+				(struct	ehci_qtd
+				 *) (le32_to_cpu(qh->hw_current));
+			qtd = td_ptd_map->qtd;
+			ormask |= td_ptd_map->ptd_bitmap;
+			ptd_map_buff->active_ptds++;
+			pehci_hcd_qtd_schedule(hcd, qtd, qh, td_ptd_map);
+		}
+
+	}			/*end of while */
+
+/*clear	all the	tds inside this	routine*/
+	skipmap	&= ((~donemap) & 0xffff);
+	isp1763_reg_write16(hcd->dev, hcd->regs.atltdskipmap, skipmap);
+	ormask |= donemap;
+	isp1763_reg_write16(hcd->dev, hcd->regs.atl_irq_mask_or, ormask);
+exit:
+	pehci_entry("--	%s: Exit\n", __FUNCTION__);
+}
+
+/*--------------------------------------------------------*
+  root hub functions
+ *--------------------------------------------------------*/
+
+/*return root hub descriptor, can not fail*/
+static void
+pehci_hub_descriptor(phci_hcd *	hcd, struct usb_hub_descriptor *desc)
+{
+	u32 ports = 0;
+	u16 temp = 0;
+
+	pehci_entry("++	%s: Entered\n",	__FUNCTION__);
+
+	ports =	0x11;
+	ports =	ports &	0xf;
+
+	pehci_info("%s:	number of ports	%d\n", __FUNCTION__, ports);
+
+	desc->bDescriptorType =	0x29;
+	desc->bPwrOn2PwrGood = 10;
+
+	desc->bHubContrCurrent = 0;
+
+	desc->bNbrPorts	= ports;
+	temp = 1 + (ports / 8);
+	desc->bDescLength = 7 +	2 * temp;
+	/* two bitmaps:	 ports removable, and usb 1.0 legacy PortPwrCtrlMask */
+
+	memset(&desc->DeviceRemovable[0], 0, temp);
+	memset(&desc->PortPwrCtrlMask[temp], 0xff, temp);
+
+	temp = 0x0008;		/* per-port overcurrent	reporting */
+	temp |=	0x0001;		/* per-port power control */
+	temp |=	0x0080;		/* per-port indicators (LEDs) */
+	desc->wHubCharacteristics = cpu_to_le16(temp);
+	pehci_entry("--	%s: Exit\n", __FUNCTION__);
+}
+
+/*after	reset on root hub,
+ * device high speed or	non-high speed
+ * */
+static int
+phci_check_reset_complete(phci_hcd * hcd, int index, int port_status)
+{
+	pehci_print("check reset complete\n");
+	if (!(port_status & PORT_CONNECT)) {
+		hcd->reset_done[index] = 0;
+		return port_status;
+	}
+
+	/* if reset finished and it's still not	enabled	-- handoff */
+	if (!(port_status & PORT_PE)) {
+		printk("port %d	full speed --> companion\n", index + 1);
+		port_status |= PORT_OWNER;
+		isp1763_reg_write32(hcd->dev, hcd->regs.ports[index],
+				    port_status);
+
+	} else {
+		pehci_print("port %d high speed\n", index + 1);
+	}
+
+	return port_status;
+
+}
+
+/*----------------------------------------------*
+  host controller initialization, removal functions
+ *----------------------------------------------*/
+
+
+/*initialize all three buffer(iso/atl/int) type	headers*/
+static void
+pehci_hcd_init_map_buffers(phci_hcd * phci)
+{
+	td_ptd_map_buff_t *ptd_map_buff;
+	u8 buff_type, ptd_index;
+	u32 bitmap;
+
+	pehci_entry("++	%s: Entered\n",	__FUNCTION__);
+	pehci_print("phci_init_map_buffers(phci	= 0x%p)\n", phci);
+	/* initialize for each buffer type */
+	for (buff_type = 0; buff_type <	TD_PTD_TOTAL_BUFF_TYPES; buff_type++) {
+		ptd_map_buff = &(td_ptd_map_buff[buff_type]);
+		ptd_map_buff->buffer_type = buff_type;
+		ptd_map_buff->active_ptds = 0;
+		ptd_map_buff->total_ptds = 0;
+		/*each bufer type can have atleast 32 ptds */
+		ptd_map_buff->max_ptds = 16;
+		ptd_map_buff->active_ptd_bitmap	= 0;
+		/*everything skipped */
+		/*nothing is pending */
+		ptd_map_buff->pending_ptd_bitmap = 0x00000000;
+
+		/* For each ptd	index of this buffer, set the fiedls */
+		bitmap = 0x00000001;
+		for (ptd_index = 0; ptd_index <	TD_PTD_MAX_BUFF_TDS;
+			ptd_index++) {
+			/*datatoggle zero */
+			ptd_map_buff->map_list[ptd_index].datatoggle = 0;
+			/*td state is not used */
+			ptd_map_buff->map_list[ptd_index].state	= TD_PTD_NEW;
+			/*no endpoint, no qtd */
+			ptd_map_buff->map_list[ptd_index].qh = NULL;
+			ptd_map_buff->map_list[ptd_index].qtd =	NULL;
+			ptd_map_buff->map_list[ptd_index].ptd_header_addr =
+				0xFFFF;
+		}		/* for(	ptd_index */
+	}			/* for(buff_type */
+	pehci_entry("--	%s: Exit\n", __FUNCTION__);
+}				/* phci_init_map_buffers */
+
+
+/*put the host controller into operational mode
+ * called phci_hcd_start routine,
+ * return 0, success else
+ * timeout, fails*/
+
+static int
+pehci_hcd_start_controller(phci_hcd * hcd)
+{
+	u32 temp = 0;
+	u32 command = 0;
+	int retval = 0;
+	pehci_entry("++	%s: Entered\n",	__FUNCTION__);
+	printk(KERN_NOTICE "++ %s: Entered\n", __FUNCTION__);
+
+
+	command	= isp1763_reg_read16(hcd->dev, hcd->regs.command, command);
+	printk(KERN_NOTICE "HC Command Reg val ...1 %x\n", command);
+
+	/*initialize the host controller */
+	command	|= CMD_RUN;
+
+	isp1763_reg_write16(hcd->dev, hcd->regs.command, command);
+
+
+	command	&= 0;
+
+	command	= isp1763_reg_read16(hcd->dev, hcd->regs.command, command);
+	printk(KERN_NOTICE "HC Command Reg val ...2 %x\n", command);
+
+	/*should be in operation in 1000 usecs */
+	if ((retval =
+		pehci_hcd_handshake(hcd, hcd->regs.command, CMD_RUN, CMD_RUN,
+		100000))) {
+		err("Host is not up(CMD_RUN) in	1000 usecs\n");
+		return retval;
+	}
+
+	printk(KERN_NOTICE "ISP1763 HC is running \n");
+
+
+	/*put the host controller to ehci mode */
+	command	&= 0;
+	command	|= 1;
+
+	isp1763_reg_write16(hcd->dev, hcd->regs.configflag, command);
+	mdelay(5);
+
+	temp = isp1763_reg_read16(hcd->dev, hcd->regs.configflag, temp);
+	pehci_print("%s: Config	Flag reg value:	0x%08x\n", __FUNCTION__, temp);
+
+	/*check	if ehci	mode switching is correct or not */
+	if ((retval =
+		pehci_hcd_handshake(hcd, hcd->regs.configflag, 1, 1, 100))) {
+		err("Host is not into ehci mode	in 100 usecs\n");
+		return retval;
+	}
+
+	mdelay(5);
+
+	pehci_entry("--	%s: Exit\n", __FUNCTION__);
+	printk(KERN_NOTICE "-- %s: Exit\n", __FUNCTION__);
+	return retval;
+}
+
+
+/*enable the interrupts
+ *called phci_1763_start routine
+ * return void*/
+static void
+pehci_hcd_enable_interrupts(phci_hcd * hcd)
+{
+	u32 temp = 0;
+	pehci_entry("++	%s: Entered\n",	__FUNCTION__);
+	printk(KERN_NOTICE "++ %s: Entered\n", __FUNCTION__);
+	/*disable the interrupt	source */
+	temp &=	0;
+	/*clear	all the	interrupts that	may be there */
+	temp |=	INTR_ENABLE_MASK;
+	isp1763_reg_write16(hcd->dev, hcd->regs.interrupt, temp);
+
+	/*enable interrupts */
+	temp = 0;
+	
+#ifdef OTG_PACKAGE
+	temp |= INTR_ENABLE_MASK | HC_OTG_INT;
+#else
+	temp |= INTR_ENABLE_MASK;
+#endif	
+	pehci_print("%s: enabled mask 0x%08x\n", __FUNCTION__, temp);
+	isp1763_reg_write16(hcd->dev, hcd->regs.interruptenable, temp);
+
+	temp = isp1763_reg_read16(hcd->dev, hcd->regs.interruptenable, temp);
+	pehci_print("%s: Intr enable reg value:	0x%08x\n", __FUNCTION__, temp);
+	
+#ifdef HCD_PACKAGE
+	temp = 0;
+	temp = isp1763_reg_read32(hcd->dev, HC_INT_THRESHOLD_REG, temp);
+//	temp |= 0x0800000F;
+	temp |= 0x0100000F;//125 micro second minimum width between two edge interrupts, 500ns int will remain low
+	//	15/30MHz=500 ns
+	isp1763_reg_write32(hcd->dev, HC_INT_THRESHOLD_REG, temp);
+#endif
+	/*enable the global interrupt */
+	temp &=	0;
+	temp = isp1763_reg_read16(hcd->dev, hcd->regs.hwmodecontrol, temp);
+	temp |=	0x01;		/*enable the global interrupt */
+#ifdef EDGE_INTERRUPT
+	temp |=	0x02;		/*enable the edge interrupt */
+#endif
+
+#ifdef POL_HIGH_INTERRUPT
+	temp |=	0x04;		/* enable interrupt polarity high */
+#endif
+
+	isp1763_reg_write16(hcd->dev, hcd->regs.hwmodecontrol, temp);
+
+	/*maximum rate is one msec */
+	/*enable the atl interrupts OR and AND mask */
+	temp = 0;
+	isp1763_reg_write16(hcd->dev, hcd->regs.atl_irq_mask_and, temp);
+	temp = 0;
+	isp1763_reg_write16(hcd->dev, hcd->regs.atl_irq_mask_or, temp);
+	temp = 0;
+	isp1763_reg_write16(hcd->dev, hcd->regs.int_irq_mask_and, temp);
+	temp = 0x0;
+	isp1763_reg_write16(hcd->dev, hcd->regs.int_irq_mask_or, temp);
+	temp = 0;
+	isp1763_reg_write16(hcd->dev, hcd->regs.iso_irq_mask_and, temp);
+	temp = 0xffff;
+	isp1763_reg_write16(hcd->dev, hcd->regs.iso_irq_mask_or, temp);
+
+	temp = isp1763_reg_read16(hcd->dev, hcd->regs.iso_irq_mask_or, temp);
+	pehci_print("%s:Iso irq	mask reg value:	0x%08x\n", __FUNCTION__, temp);
+
+	pehci_entry("--	%s: Exit\n", __FUNCTION__);
+}
+
+/*initialize the host controller register map from Isp1763 to EHCI */
+static void
+pehci_hcd_init_reg(phci_hcd * hcd)
+{
+	pehci_entry("++	%s: Entered\n",	__FUNCTION__);
+	/* scratch pad for the test */
+	hcd->regs.scratch = HC_SCRATCH_REG;
+
+	/*make a copy of our interrupt locations */
+	hcd->regs.command = HC_USBCMD_REG;
+	hcd->regs.usbstatus = HC_USBSTS_REG;
+	hcd->regs.usbinterrupt = HC_INTERRUPT_REG_EHCI;
+
+	hcd->regs.hcsparams = HC_SPARAMS_REG;
+	hcd->regs.frameindex = HC_FRINDEX_REG;
+
+	/*transfer specific registers */
+	hcd->regs.hwmodecontrol	= HC_HWMODECTRL_REG;
+	hcd->regs.interrupt = HC_INTERRUPT_REG;
+	hcd->regs.interruptenable = HC_INTENABLE_REG;
+	hcd->regs.atl_irq_mask_and = HC_ATL_IRQ_MASK_AND_REG;
+	hcd->regs.atl_irq_mask_or = HC_ATL_IRQ_MASK_OR_REG;
+
+	hcd->regs.int_irq_mask_and = HC_INT_IRQ_MASK_AND_REG;
+	hcd->regs.int_irq_mask_or = HC_INT_IRQ_MASK_OR_REG;
+	hcd->regs.iso_irq_mask_and = HC_ISO_IRQ_MASK_AND_REG;
+	hcd->regs.iso_irq_mask_or = HC_ISO_IRQ_MASK_OR_REG;
+	hcd->regs.buffer_status	= HC_BUFFER_STATUS_REG;
+	hcd->regs.interruptthreshold = HC_INT_THRESHOLD_REG;
+	/*initialization specific */
+	hcd->regs.reset	= HC_RESET_REG;
+	hcd->regs.configflag = HC_CONFIGFLAG_REG;
+	hcd->regs.ports[0] = HC_PORTSC1_REG;
+	hcd->regs.ports[1] = 0;	/*port1,port2,port3 status reg are removed */
+	hcd->regs.ports[2] = 0;
+	hcd->regs.ports[3] = 0;
+	hcd->regs.pwrdwn_ctrl =	HC_POWER_DOWN_CONTROL_REG;
+	/*transfer registers */
+	hcd->regs.isotddonemap = HC_ISO_PTD_DONEMAP_REG;
+	hcd->regs.isotdskipmap = HC_ISO_PTD_SKIPMAP_REG;
+	hcd->regs.isotdlastmap = HC_ISO_PTD_LASTPTD_REG;
+
+	hcd->regs.inttddonemap = HC_INT_PTD_DONEMAP_REG;
+
+	hcd->regs.inttdskipmap = HC_INT_PTD_SKIPMAP_REG;
+	hcd->regs.inttdlastmap = HC_INT_PTD_LASTPTD_REG;
+
+	hcd->regs.atltddonemap = HC_ATL_PTD_DONEMAP_REG;
+	hcd->regs.atltdskipmap = HC_ATL_PTD_SKIPMAP_REG;
+	hcd->regs.atltdlastmap = HC_ATL_PTD_LASTPTD_REG;
+
+	pehci_entry("--	%s: Exit\n", __FUNCTION__);
+}
+
+
+
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
+static void
+pehci_interrupt_handler(phci_hcd * hcd, struct pt_regs *regs)
+{
+	spin_lock(&hcd->lock);
+#ifdef CONFIG_ISO_SUPPORT
+	phcd_iso_handler(hcd, regs);
+#endif
+	pehci_hcd_intl_worker(hcd, regs);
+	pehci_hcd_atl_worker(hcd, regs);
+	spin_unlock(&hcd->lock);
+	return;
+}
+#else
+static void
+pehci_interrupt_handler(phci_hcd * hcd)
+{
+	spin_lock(&hcd->lock);
+#ifdef CONFIG_ISO_SUPPORT
+	pehci_hcd_iso_worker(hcd);
+#endif
+	pehci_hcd_intl_worker(hcd);
+	pehci_hcd_atl_worker(hcd);
+	spin_unlock(&hcd->lock);
+	return;
+}
+#endif
+irqreturn_t pehci_hcd_irq(struct usb_hcd *usb_hcd)
+{
+
+	int work = 0;
+	phci_hcd *pehci_hcd;
+	struct isp1763_dev *dev;
+	u32 intr = 0;
+	u32 resume=0;
+	u32 temp=0;
+	u32 irq_mask = 0;
+
+	if (!(usb_hcd->state & USB_STATE_READY)) {
+		info("interrupt	handler	state not ready	yet\n");
+	usb_hcd->state=USB_STATE_READY;
+	//	return IRQ_NONE;
+	}
+
+	/*our host */
+	pehci_hcd = usb_hcd_to_pehci_hcd(usb_hcd);
+	dev = pehci_hcd->dev;
+
+	spin_lock(&pehci_hcd->lock);
+	dev->int_reg = isp1763_reg_read16(dev, HC_INTERRUPT_REG, dev->int_reg);
+	/*Clear the interrupt*/
+	isp1763_reg_write16(dev, HC_INTERRUPT_REG, dev->int_reg);
+
+	irq_mask = isp1763_reg_read16(dev, HC_INTENABLE_REG, irq_mask);
+	dev->int_reg &= irq_mask;
+
+	intr = dev->int_reg;
+
+
+	if (atomic_read(&pehci_hcd->nuofsofs)) {
+		spin_unlock(&pehci_hcd->lock);
+		return IRQ_HANDLED;
+	}
+	atomic_inc(&pehci_hcd->nuofsofs);
+
+	irq_mask=isp1763_reg_read32(dev,HC_USBSTS_REG,0);
+	isp1763_reg_write32(dev,HC_USBSTS_REG,irq_mask);
+	if(irq_mask & 0x4){  // port status register.
+		if(intr & 0x50) {   // OPR register change
+			temp=isp1763_reg_read32(dev,HC_PORTSC1_REG,0);
+			if(temp & 0x4){   // Force resume bit is set
+				if (dev) {
+					if (dev->driver) {
+						if (dev->driver->resume) {
+						dev->driver->resume(dev);
+							resume=1;
+						}
+					}
+				}
+			}
+		}
+	}
+
+	set_bit(HCD_FLAG_SAW_IRQ, &usb_hcd->flags);
+
+#ifndef THREAD_BASED
+/*-----------------------------------------------------------*/
+#ifdef MSEC_INT_BASED
+	work = 1;
+#else
+	if (intr & (HC_MSEC_INT	& INTR_ENABLE_MASK)) {
+		work = 1;	/* phci_iso_worker(hcd); */
+	}
+
+#ifdef USBNET 
+	if (intr & HC_MSOF_INT ) {
+		struct list_head *pos, *q;
+	
+		list_for_each_safe(pos, q, &pehci_hcd->cleanup_urb.urb_list) {
+		struct isp1763_async_cleanup_urb *tmp;
+		
+			tmp = list_entry(pos, struct isp1763_async_cleanup_urb, urb_list);
+			if (tmp) {
+				spin_unlock(&pehci_hcd->lock);
+				usb_hcd_giveback_urb(usb_hcd, tmp->urb, tmp->urb->status);
+				spin_lock(&pehci_hcd->lock);
+
+				list_del(pos);
+				if(tmp)
+				kfree(tmp);
+			}
+		}
+		isp1763_reg_write16(dev, HC_INTENABLE_REG, INTR_ENABLE_MASK );
+	}
+#endif
+
+
+	if (intr & (HC_INTL_INT	& INTR_ENABLE_MASK)) {
+	//	spin_lock(&pehci_hcd->lock);
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
+		pehci_hcd_intl_worker(pehci_hcd, regs);
+#else
+		pehci_hcd_intl_worker(pehci_hcd);
+#endif
+	//	spin_unlock(&pehci_hcd->lock);
+		work = 0;	/*phci_intl_worker(hcd); */
+	}
+	
+	if (intr & (HC_ATL_INT & INTR_ENABLE_MASK)) {
+	//	spin_lock(&pehci_hcd->lock);
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
+		pehci_hcd_atl_worker(pehci_hcd, regs);
+#else
+		pehci_hcd_atl_worker(pehci_hcd);
+#endif
+	//	spin_unlock(&pehci_hcd->lock);
+		work = 0;	/*phci_atl_worker(hcd);	*/
+	}
+#ifdef CONFIG_ISO_SUPPORT
+	if (intr & (HC_ISO_INT & INTR_ENABLE_MASK)) {
+	//	spin_lock(&pehci_hcd->lock);
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
+		pehci_hcd_iso_worker(pehci_hcd);
+#else
+		pehci_hcd_iso_worker(pehci_hcd);
+#endif
+	//	spin_unlock(&pehci_hcd->lock);
+		work = 0;	/*phci_atl_worker(hcd); */
+	}
+#endif
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
+	if (work){
+		spin_unlock(&pehci_hcd->lock);
+		pehci_interrupt_handler(pehci_hcd, regs);
+		spin_lock(&pehci_hcd->lock);
+	}
+#else
+	if (work){
+		spin_unlock(&pehci_hcd->lock);
+		pehci_interrupt_handler(pehci_hcd);
+		spin_lock(&pehci_hcd->lock);
+	}
+#endif
+
+/*-----------------------------------------------------------*/
+#else
+	if ((intr & (HC_INTL_INT & INTR_ENABLE_MASK)) ||(intr & (HC_ATL_INT & INTR_ENABLE_MASK)))
+	{ //send
+		st_UsbIt_Msg_Struc *stUsbItMsgSnd ;
+		
+		stUsbItMsgSnd = (st_UsbIt_Msg_Struc *)kmalloc(sizeof(st_UsbIt_Msg_Struc), GFP_ATOMIC);
+		if (!stUsbItMsgSnd) return -ENOMEM;
+		
+		memset(stUsbItMsgSnd, 0, sizeof(stUsbItMsgSnd));
+		
+		stUsbItMsgSnd->usb_hcd = usb_hcd;
+		stUsbItMsgSnd->uIntStatus = NO_SOF_REQ_IN_ISR;
+		list_add_tail(&(stUsbItMsgSnd->list), &(g_messList.list));
+
+		pehci_print("\n------------- send mess : %d------------\n",stUsbItMsgSnd->uIntStatus);
+		if ((g_stUsbItThreadHandler.phThreadTask != NULL) && (g_stUsbItThreadHandler.lThrdWakeUpNeeded == 0))
+		{
+			pehci_print("\n------- wake up thread : %d-----\n",stUsbItMsgSnd->uIntStatus);
+			g_stUsbItThreadHandler.lThrdWakeUpNeeded = 1;
+			wake_up(&(g_stUsbItThreadHandler.ulThrdWaitQhead));
+		}
+	}
+/*-----------------------------------------------------------*/
+#endif
+
+	atomic_dec(&pehci_hcd->nuofsofs);
+	spin_unlock(&pehci_hcd->lock);
+		if(resume){
+			usb_hcd_poll_rh_status(usb_hcd);
+	}
+	return IRQ_HANDLED;
+}
+
+/*reset	the host controller
+ *called phci_hcd_start	routine
+ *return 0, success else
+ *timeout, fails*/
+static int
+pehci_hcd_reset(struct usb_hcd *usb_hcd)
+{
+	u32 command = 0;
+	u32 temp = 0;
+	phci_hcd *hcd =	usb_hcd_to_pehci_hcd(usb_hcd);
+	printk(KERN_NOTICE "++ %s: Entered\n", __FUNCTION__);
+	pehci_hcd_init_reg(hcd);
+	printk("chipid %x \n", isp1763_reg_read32(hcd->dev, HC_CHIP_ID_REG, temp)); //0x70
+
+	/*reset	the atx controller */
+	temp &=	0;
+	temp |=	8;
+	isp1763_reg_write16(hcd->dev, hcd->regs.reset, temp);
+	mdelay(10);
+	
+	/*reset	the host controller */
+	temp &=	0;
+	temp |=	1;
+	isp1763_reg_write16(hcd->dev, hcd->regs.reset, temp);
+
+	command	= 0;
+	do {
+
+		temp = isp1763_reg_read16(hcd->dev, hcd->regs.reset, temp);
+		mdelay(10);
+		command++;
+		if (command > 100) {
+			printk("not able to reset\n");
+			break;
+		}
+	} while	(temp &	0x01);
+
+
+	/*reset	the ehci controller registers */
+	temp = 0;
+	temp |=	(1 << 1);
+	isp1763_reg_write16(hcd->dev, hcd->regs.reset, temp);
+	command	= 0;
+	do {
+		temp = isp1763_reg_read16(hcd->dev, hcd->regs.reset, temp);
+		mdelay(10);
+		command++;
+		if (command > 100) {
+			printk("not able to reset\n");
+			break;
+		}
+	} while	(temp &	0x02);
+
+	/*read the command register */
+	command	= isp1763_reg_read16(hcd->dev, hcd->regs.command, command);
+
+	command	|= CMD_RESET;
+	/*write	back and wait for, 250 msec */
+	isp1763_reg_write16(hcd->dev, hcd->regs.command, command);
+	/*wait for maximum 250 msecs */
+	mdelay(200);
+	printk("command	%x\n",
+		isp1763_reg_read16(hcd->dev, hcd->regs.command, command));
+	printk(KERN_NOTICE "-- %s: Exit	\n", __FUNCTION__);
+	return 0;
+}
+
+/*host controller initialize routine,
+ *called by phci_hcd_probe
+ * */
+static int
+pehci_hcd_start(struct usb_hcd *usb_hcd)
+{
+
+	int retval;
+	int count = 0;
+	phci_hcd *pehci_hcd = NULL;
+	u32 temp = 0;
+	u32 hwmodectrl = 0;
+	u32 ul_scratchval = 0;
+
+	pehci_entry("++	%s: Entered\n",	__FUNCTION__);
+	pehci_hcd = usb_hcd_to_pehci_hcd(usb_hcd);
+
+	spin_lock_init(&pehci_hcd->lock);
+	atomic_set(&pehci_hcd->nuofsofs, 0);
+	atomic_set(&pehci_hcd->missedsofs, 0);
+
+	/*Initialize host controller registers */
+	pehci_hcd_init_reg(pehci_hcd);
+
+	/*reset	the host controller */
+	retval = pehci_hcd_reset(usb_hcd);
+	if (retval) {
+		err("phci_1763_start: error failing with status	%x\n", retval);
+		return retval;
+	}
+
+	hwmodectrl =
+		isp1763_reg_read16(pehci_hcd->dev,
+				   pehci_hcd->regs.hwmodecontrol, hwmodectrl);
+#ifdef DATABUS_WIDTH_16
+	printk(KERN_NOTICE "Mode Ctrl Value before 16width: %x\n", hwmodectrl);
+	hwmodectrl &= 0xFFEF;	/*enable the 16	bit bus	*/
+	hwmodectrl |= 0x0400;	/*enable common	int */
+#else
+	printk(KERN_NOTICE "Mode Ctrl Value before 8width : %x\n", hwmodectrl);
+	hwmodectrl |= 0x0010;	/*enable the 8 bit bus */
+	hwmodectrl |= 0x0400;	/*enable common	int */
+#endif
+	isp1763_reg_write16(pehci_hcd->dev, pehci_hcd->regs.hwmodecontrol,
+			    hwmodectrl);
+
+	hwmodectrl =
+		isp1763_reg_read16(pehci_hcd->dev,
+				   pehci_hcd->regs.hwmodecontrol, hwmodectrl);
+	hwmodectrl |=0x9;  //lock interface and enable global interrupt.
+	isp1763_reg_write16(pehci_hcd->dev, pehci_hcd->regs.hwmodecontrol,
+		hwmodectrl);	
+	printk(KERN_NOTICE "Mode Ctrl Value after buswidth: %x\n", hwmodectrl);
+
+	isp1763_reg_write16(pehci_hcd->dev, pehci_hcd->regs.scratch, 0x3344);
+
+	ul_scratchval =
+		isp1763_reg_read16(pehci_hcd->dev, pehci_hcd->regs.scratch,
+				   ul_scratchval);
+	printk(KERN_NOTICE "Scratch Reg	Value :	%x\n", ul_scratchval);
+	if (ul_scratchval != 0x3344) {
+		printk(KERN_NOTICE "Scratch Reg	Value Mismatch:	%x\n",
+		       ul_scratchval);
+
+	}
+
+
+	/*initialize the host controller initial values	*/
+	/*disable all the buffer */
+	isp1763_reg_write16(pehci_hcd->dev, pehci_hcd->regs.buffer_status, 0);
+	/*skip all the transfers */
+	isp1763_reg_write16(pehci_hcd->dev, pehci_hcd->regs.atltdskipmap,
+			    NO_TRANSFER_ACTIVE);
+	isp1763_reg_write16(pehci_hcd->dev, pehci_hcd->regs.inttdskipmap,
+			    NO_TRANSFER_ACTIVE);
+	isp1763_reg_write16(pehci_hcd->dev, pehci_hcd->regs.isotdskipmap,
+			    NO_TRANSFER_ACTIVE);
+	/*clear	done map */
+	isp1763_reg_write16(pehci_hcd->dev, pehci_hcd->regs.atltddonemap,
+			    NO_TRANSFER_DONE);
+	isp1763_reg_write16(pehci_hcd->dev, pehci_hcd->regs.inttddonemap,
+			    NO_TRANSFER_DONE);
+	isp1763_reg_write16(pehci_hcd->dev, pehci_hcd->regs.isotddonemap,
+			    NO_TRANSFER_DONE);
+	
+#ifdef HCD_PACKAGE
+	/*port1 as Host */
+	isp1763_reg_write16(pehci_hcd->dev, OTG_CTRL_SET_REG, 0x0400);
+	isp1763_reg_write16(pehci_hcd->dev, OTG_CTRL_CLEAR_REG, 0x0080);
+	/*port2 as Host */
+	isp1763_reg_write16(pehci_hcd->dev, OTG_CTRL_SET_REG, 0x0000);
+	isp1763_reg_write16(pehci_hcd->dev, OTG_CTRL_CLEAR_REG, 0x8000);
+	
+	#if 0 /* do not use bit 1&2 for pure host application */
+	ul_scratchval =	isp1763_reg_read32(pehci_hcd->dev, HC_POWER_DOWN_CONTROL_REG,0);
+	ul_scratchval |= 0x006;	
+	isp1763_reg_write32(pehci_hcd->dev, HC_POWER_DOWN_CONTROL_REG,ul_scratchval);
+	#endif
+	
+#elif defined(HCD_DCD_PACKAGE)
+
+	/*port1 as device */
+	isp1763_reg_write16(pehci_hcd->dev,OTG_CTRL_SET_REG, 
+			OTG_CTRL_DMPULLDOWN |OTG_CTRL_DPPULLDOWN | 
+			OTG_CTRL_SW_SEL_HC_DC |OTG_CTRL_OTG_DISABLE);	/* pure	Device Mode and	OTG disabled */
+
+	isp1763_reg_write16(pehci_hcd->dev, OTG_CTRL_SET_REG, 0x0480);
+	/*port2 as host */
+	isp1763_reg_write16(pehci_hcd->dev, OTG_CTRL_SET_REG, 0x0000);
+	isp1763_reg_write16(pehci_hcd->dev, OTG_CTRL_CLEAR_REG, 0x8000);
+	ul_scratchval =
+		isp1763_reg_read32(pehci_hcd->dev, HC_POWER_DOWN_CONTROL_REG,
+		0);
+#endif
+
+	/*enable interrupts */
+	pehci_hcd_enable_interrupts(pehci_hcd);
+
+	/*put controller into operational mode */
+	retval = pehci_hcd_start_controller(pehci_hcd);
+	if (retval) {
+		err("phci_1763_start: error failing with status	%x\n", retval);
+		return retval;
+	}
+
+	/*Init the phci	qtd <->	ptd map	buffers	*/
+	pehci_hcd_init_map_buffers(pehci_hcd);
+
+	/*set last maps, for iso its only 1, else 32 tds bitmap	*/
+	isp1763_reg_write16(pehci_hcd->dev, pehci_hcd->regs.atltdlastmap,
+			    0x8000);
+	isp1763_reg_write16(pehci_hcd->dev, pehci_hcd->regs.inttdlastmap, 0x80);
+	isp1763_reg_write16(pehci_hcd->dev, pehci_hcd->regs.isotdlastmap, 0x01);
+	/*iso transfers	are not	active */
+	pehci_hcd->next_uframe = -1;
+	pehci_hcd->periodic_sched = 0;
+	hwmodectrl =
+		isp1763_reg_read16(pehci_hcd->dev,
+				   pehci_hcd->regs.hwmodecontrol, hwmodectrl);
+
+	/*initialize the periodic list */
+	for (count = 0; count < PTD_PERIODIC_SIZE; count++) {
+		pehci_hcd->periodic_list[count].framenumber = 0;
+		INIT_LIST_HEAD(&pehci_hcd->periodic_list[count].sitd_itd_head);
+	}
+
+
+	/*set the state	of the host to ready,
+	 * start processing interrupts
+	 * */
+
+	usb_hcd->state = HC_STATE_RUNNING;
+	pehci_hcd->state = HC_STATE_RUNNING;
+
+
+	/*initialize root hub timer */
+	init_timer(&pehci_hcd->rh_timer);
+	/*initialize watchdog */
+	init_timer(&pehci_hcd->watchdog);
+
+	temp = isp1763_reg_read32(pehci_hcd->dev, HC_POWER_DOWN_CONTROL_REG,
+				  temp);
+	
+	temp = 0x3e81bA0;
+#if 0
+	temp |=	0x306;
+#endif
+	isp1763_reg_write32(pehci_hcd->dev, HC_POWER_DOWN_CONTROL_REG, temp);
+	temp = isp1763_reg_read32(pehci_hcd->dev, HC_POWER_DOWN_CONTROL_REG,
+				  temp);
+	printk(" Powerdown Reg Val: %x\n", temp);
+
+	pehci_entry("--	%s: Exit\n", __FUNCTION__);
+
+	return 0;
+}
+
+static void
+pehci_hcd_stop(struct usb_hcd *usb_hcd)
+{
+
+	pehci_entry("++	%s: Entered\n",	__FUNCTION__);
+
+	/* no more interrupts ... */
+	if (usb_hcd->state == USB_STATE_RUNNING) {
+		mdelay(2);
+	}
+	if (in_interrupt()) {	/* must	not happen!! */
+		pehci_info("stopped in_interrupt!\n");
+
+		return;
+	}
+
+	/*power	off our	root hub */
+	pehci_rh_control(usb_hcd, ClearPortFeature, USB_PORT_FEAT_POWER,
+			 1, NULL, 0);
+
+	/*let the roothub power	go off */
+	mdelay(20);
+	pehci_entry("--	%s: Exit\n", __FUNCTION__);
+
+	return;
+}
+
+
+/*submit urb , other than root hub*/
+static int
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
+pehci_hcd_urb_enqueue(struct usb_hcd *usb_hcd, struct usb_host_endpoint *ep,
+		struct urb *urb, gfp_t mem_flags)
+#else
+pehci_hcd_urb_enqueue(struct usb_hcd *usb_hcd, struct urb *urb, gfp_t mem_flags)
+#endif
+{
+
+	struct list_head qtd_list;
+	struct ehci_qh *qh = 0;
+	phci_hcd *pehci_hcd = usb_hcd_to_pehci_hcd(usb_hcd);
+	int status = 0;
+	int temp = 0, max = 0, num_tds = 0, mult = 0;
+	urb_priv_t *urb_priv = NULL;
+	unsigned long  flags;
+	
+	pehci_entry("++	%s: Entered\n",	__FUNCTION__);
+
+	
+	if (unlikely(atomic_read(&urb->reject))) 
+		return -EINVAL;
+	
+	INIT_LIST_HEAD(&qtd_list);
+	urb->transfer_flags &= ~EHCI_STATE_UNLINK;
+
+
+	temp = usb_pipetype(urb->pipe);
+	max = usb_maxpacket(urb->dev, urb->pipe, !usb_pipein(urb->pipe));
+	
+
+	if (hcdpowerdown == 1) {
+		printk("Enqueue	hcd power down\n");
+		return -EINVAL;
+	}
+
+
+	/*pathch to get	the otg	device */
+	if (!hubdev || 
+		(urb->dev->parent==usb_hcd->self.root_hub && 
+		hubdev!=urb->dev)) {
+		if(urb->dev->parent== usb_hcd->self.root_hub) {
+			hubdev = urb->dev;
+		}
+	}
+
+	switch (temp) {
+	case PIPE_INTERRUPT:
+		/*only one td */
+		num_tds	= 1;
+		mult = 1 + ((max >> 11)	& 0x03);
+		max &= 0x07ff;
+		max *= mult;
+
+		if (urb->transfer_buffer_length	> max) {
+			err("interrupt urb length is greater then %d\n", max);
+			return -EINVAL;
+		}
+
+		if (hubdev && urb->dev->parent == usb_hcd->self.root_hub) {
+			huburb = urb;
+		}
+
+		break;
+
+	case PIPE_CONTROL:
+		/*calculate the	number of tds, follow 1	pattern	*/
+		if (No_Data_Phase && No_Status_Phase) {
+			printk("Only SetUP Phase\n");
+			num_tds	= (urb->transfer_buffer_length == 0) ? 1 :
+				((urb->transfer_buffer_length -
+				  1) / HC_ATL_PL_SIZE +	1);
+		} else if (!No_Data_Phase && No_Status_Phase) {
+			printk("SetUP Phase and	Data Phase\n");
+			num_tds	= (urb->transfer_buffer_length == 0) ? 2 :
+				((urb->transfer_buffer_length -
+				  1) / HC_ATL_PL_SIZE +	3);
+		} else if (!No_Data_Phase && !No_Status_Phase) {
+			num_tds	= (urb->transfer_buffer_length == 0) ? 2 :
+				((urb->transfer_buffer_length -
+				  1) / HC_ATL_PL_SIZE +	3);
+		}
+		
+		break;
+		
+	case PIPE_BULK:
+		num_tds	=
+			(urb->transfer_buffer_length - 1) / HC_ATL_PL_SIZE + 1;
+		if ((urb->transfer_flags & URB_ZERO_PACKET)
+			&& !(urb->transfer_buffer_length % max)) {
+			num_tds++;
+		}
+		
+		break;
+		
+#ifdef CONFIG_ISO_SUPPORT
+	case PIPE_ISOCHRONOUS:
+		/* Don't need to do anything here */
+		break;
+#endif
+	default:
+		return -EINVAL;	/*not supported	isoc transfers */
+
+
+	}
+
+#ifdef CONFIG_ISO_SUPPORT
+	if (temp != PIPE_ISOCHRONOUS) {
+#endif
+		/*make number of tds required */
+		urb_priv = kmalloc(sizeof(urb_priv_t) +
+				   num_tds * sizeof(struct ehci_qtd),
+				   mem_flags);
+		if (!urb_priv) {
+			err("memory   allocation error\n");
+			return -ENOMEM;
+		}
+
+		memset(urb_priv, 0, sizeof(urb_priv_t) +
+			num_tds * sizeof(struct ehci_qtd));
+		INIT_LIST_HEAD(&urb_priv->qtd_list);
+		urb_priv->qtd[0] = NULL;
+		urb_priv->length = num_tds;
+		{
+			int i =	0;
+			/*allocate number of tds here. better to do this in qtd_make routine */
+			for (i = 0; i <	num_tds; i++) {
+				urb_priv->qtd[i] =
+					phci_hcd_qtd_allocate(mem_flags);
+				if (!urb_priv->qtd[i]) {
+					phci_hcd_urb_free_priv(pehci_hcd,
+							       urb_priv, NULL);
+					return -ENOMEM;
+				}
+			}
+		}
+		/*keep a copy of this */
+		urb->hcpriv = urb_priv;
+#ifdef CONFIG_ISO_SUPPORT
+	}
+#endif
+
+	switch (temp) {
+	case PIPE_INTERRUPT:
+		phci_hcd_make_qtd(pehci_hcd, &urb_priv->qtd_list,	urb, &status);
+		if (status < 0)	{
+			return status;
+		}
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
+		qh = phci_hcd_submit_interrupt(pehci_hcd, ep, &urb_priv->qtd_list, urb,
+			&status);
+#else
+		qh = phci_hcd_submit_interrupt(pehci_hcd, &urb_priv->qtd_list, urb,
+			&status);
+#endif
+		if (status < 0)
+			return status;
+		break;
+
+	case PIPE_CONTROL:
+	case PIPE_BULK:
+
+#ifdef THREAD_BASED
+	spin_lock_irqsave (&pehci_hcd->lock, flags);
+#endif
+		phci_hcd_make_qtd(pehci_hcd, &qtd_list,	urb, &status);
+		if (status < 0) {
+			return status;
+		}
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
+		qh = phci_hcd_submit_async(pehci_hcd, ep, &qtd_list, urb,
+			&status);
+#else
+		qh = phci_hcd_submit_async(pehci_hcd, &qtd_list, urb, &status);
+#endif
+
+#ifdef THREAD_BASED
+	spin_unlock_irqrestore (&pehci_hcd->lock, flags);
+#endif
+
+		if (status < 0) {
+			return status;
+		}
+		break;
+#ifdef CONFIG_ISO_SUPPORT
+	case PIPE_ISOCHRONOUS:
+		iso_dbg(ISO_DBG_DATA,
+			"[pehci_hcd_urb_enqueue]: URB Transfer buffer: 0x%08x\n",
+			(long) urb->transfer_buffer);
+		iso_dbg(ISO_DBG_DATA,
+			"[pehci_hcd_urb_enqueue]: URB Buffer Length: %d\n",
+			(long) urb->transfer_buffer_length);
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
+		phcd_submit_iso(pehci_hcd, ep, urb, (unsigned long *) &status);
+#else
+		spin_lock_irqsave(&pehci_hcd->lock, flags);
+		phcd_store_urb_pending(pehci_hcd, 0, urb, (int *) &status);
+		spin_unlock_irqrestore(&pehci_hcd->lock, flags);
+#endif
+
+		return status;
+
+		break;
+#endif
+	default:
+		return -ENODEV;
+	}			/*end of switch	*/
+
+#if (defined MSEC_INT_BASED)
+	return 0;
+#elif (defined THREAD_BASED)
+{ //send
+		st_UsbIt_Msg_Struc *stUsbItMsgSnd ;
+		unsigned long flags;
+		spin_lock_irqsave(&pehci_hcd->lock,flags);
+	
+		//local_irq_save(flags); /*disable interrupt*/
+		stUsbItMsgSnd = (st_UsbIt_Msg_Struc *)kmalloc(sizeof(st_UsbIt_Msg_Struc), GFP_ATOMIC);
+		if (!stUsbItMsgSnd)
+		{
+			return -ENOMEM;
+		}
+		
+		memset(stUsbItMsgSnd, 0, sizeof(stUsbItMsgSnd));
+		
+		stUsbItMsgSnd->usb_hcd = usb_hcd;
+		stUsbItMsgSnd->uIntStatus = NO_SOF_REQ_IN_REQ;
+		spin_lock(&enqueue_lock);
+		if(list_empty(&g_enqueueMessList.list))
+			list_add_tail(&(stUsbItMsgSnd->list), &(g_enqueueMessList.list));
+		spin_unlock(&enqueue_lock);
+		
+		pehci_print("\n------------- send mess : %d------------\n",stUsbItMsgSnd->uIntStatus);
+
+		//local_irq_restore(flags); /*disable interrupt*/
+		
+		spin_lock(&g_stUsbItThreadHandler.lock);
+		if ((g_stUsbItThreadHandler.phThreadTask != NULL) && (g_stUsbItThreadHandler.lThrdWakeUpNeeded == 0))
+		{
+			pehci_print("\n------- wake up thread : %d-----\n",stUsbItMsgSnd->uIntStatus);
+			g_stUsbItThreadHandler.lThrdWakeUpNeeded = 1;
+			wake_up(&(g_stUsbItThreadHandler.ulThrdWaitQhead));
+		}
+		spin_unlock(&g_stUsbItThreadHandler.lock);
+
+		spin_unlock_irqrestore(&pehci_hcd->lock,flags);
+	}
+	pehci_entry("-- %s: Exit\n",__FUNCTION__);
+    return 0;
+#else
+	/*submit tds but iso */
+    if (temp != PIPE_ISOCHRONOUS)
+	pehci_hcd_td_ptd_submit_urb(pehci_hcd, qh, qh->type);
+#endif
+	pehci_entry("--	%s: Exit\n", __FUNCTION__);
+	return 0;
+
+}
+
+/*---------------------------------------------*
+  io request handlers
+ *---------------------------------------------*/
+
+/*unlink urb*/
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
+static int
+pehci_hcd_urb_dequeue(struct usb_hcd *usb_hcd, struct urb *urb)
+#else
+static int
+pehci_hcd_urb_dequeue(struct usb_hcd *usb_hcd, struct urb *urb, int status)
+#endif
+{
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
+	int status = 0;
+#endif
+	int retval = 0;
+	td_ptd_map_buff_t *td_ptd_buf;
+	td_ptd_map_t *td_ptd_map;
+	struct ehci_qh *qh = 0;
+	u32 skipmap = 0;
+	u32 buffstatus = 0;
+	unsigned long flags;
+	struct ehci_qtd	*qtd = 0;
+	struct usb_host_endpoint *ep;
+
+	struct ehci_qtd	*cancel_qtd = 0;	/*added	for stopping ptd*/
+	struct urb *cancel_urb = 0;	/*added	for stopping ptd*/
+	urb_priv_t *cancel_urb_priv = 0;	/* added for stopping ptd */
+	struct _isp1763_qha atlqha;
+	struct _isp1763_qha *qha;
+	struct isp1763_mem_addr	*mem_addr = 0;
+	u32 ormask = 0;
+	struct list_head *qtd_list = 0;
+	urb_priv_t *urb_priv = (urb_priv_t *) urb->hcpriv;
+	phci_hcd *hcd =	usb_hcd_to_pehci_hcd(usb_hcd);
+	pehci_entry("++	%s: Entered\n",	__FUNCTION__);
+
+	pehci_info("device %d\n", urb->dev->devnum);
+
+	if(urb_priv==NULL){
+		printk("*******urb_priv is NULL*******	%s: Entered\n",	__FUNCTION__);
+		return 0;
+		}
+	spin_lock_irqsave(&hcd->lock, flags);
+
+
+	switch (usb_pipetype(urb->pipe)) {
+	case PIPE_CONTROL:
+	case PIPE_BULK:
+	//	status = 0;
+		qh = urb_priv->qh;
+		if(qh==NULL)
+			break;
+
+		td_ptd_buf = &td_ptd_map_buff[TD_PTD_BUFF_TYPE_ATL];
+		td_ptd_map = &td_ptd_buf->map_list[qh->qtd_ptd_index];
+
+		/*if its already been removed */
+		if (td_ptd_map->state == TD_PTD_NEW) {
+			break;
+		}
+/* patch added for stopping Full speed PTD */
+/* patch starts	ere */
+		if (urb->dev->speed != USB_SPEED_HIGH) {
+
+			cancel_qtd = td_ptd_map->qtd;
+			if (!qh	|| !cancel_qtd)	{
+				err("Never Error:QH and	QTD must not be	zero\n");
+			} else {
+				cancel_urb = cancel_qtd->urb;
+				cancel_urb_priv	=
+					(urb_priv_t *) cancel_urb->hcpriv;
+				mem_addr = &cancel_qtd->mem_addr;
+				qha = &atlqha;
+				memset(qha, 0, sizeof(struct _isp1763_qha));
+
+				skipmap	=
+					isp1763_reg_read16(hcd->dev,
+							   hcd->regs.
+							   atltdskipmap,
+							   skipmap);
+				skipmap	|= td_ptd_map->ptd_bitmap;
+				isp1763_reg_write16(hcd->dev,
+						    hcd->regs.atltdskipmap,
+						    skipmap);
+
+				/*read this ptd	from the ram address,address is	in the
+				   td_ptd_map->ptd_header_addr */
+				isp1763_mem_read(hcd->dev,
+						 td_ptd_map->ptd_header_addr, 0,
+						 (u32 *) (qha),	PHCI_QHA_LENGTH,
+						 0);
+				if ((qha->td_info1 & QHA_VALID)
+					|| (qha->td_info4 &	QHA_ACTIVE)) {
+
+					qha->td_info2 |= 0x00008000;
+					qha->td_info1 |= QHA_VALID;
+					qha->td_info4 |= QHA_ACTIVE;
+					skipmap	&= ~td_ptd_map->ptd_bitmap;
+					ormask |= td_ptd_map->ptd_bitmap;
+					isp1763_reg_write16(hcd->dev,
+						hcd->regs.
+						atl_irq_mask_or,
+						ormask);
+					/* copy back into the header, payload is	already
+					 * present no need to write again */
+					isp1763_mem_write(hcd->dev,
+						td_ptd_map->
+						ptd_header_addr, 0,
+						(u32 *) (qha),
+						PHCI_QHA_LENGTH, 0);
+					/*unskip this td */
+					isp1763_reg_write16(hcd->dev,
+						hcd->regs.
+						atltdskipmap,
+						skipmap);
+					udelay(100);
+				}
+
+				isp1763_mem_read(hcd->dev,
+					td_ptd_map->ptd_header_addr, 0,
+					(u32 *) (qha),	PHCI_QHA_LENGTH,
+					0);
+				if (!(qha->td_info1 & QHA_VALID)
+					&& !(qha->td_info4 & QHA_ACTIVE)) {
+					printk(KERN_NOTICE
+					"ptd has	been retired \n");
+				}
+
+			}
+		}
+
+/*   Patch Ends	*/
+		/* These TDs are not pending anymore */
+		td_ptd_buf->pending_ptd_bitmap &= ~td_ptd_map->ptd_bitmap;
+
+		/*tell atl worker this urb is going to be removed */
+		td_ptd_map->state = TD_PTD_REMOVE;
+		/* These TDs are not pending anymore */
+		td_ptd_buf->pending_ptd_bitmap &= ~td_ptd_map->ptd_bitmap;
+		/*tell atl worker this urb is going to be removed */
+		td_ptd_map->state = TD_PTD_REMOVE;
+		urb_priv->state	|= DELETE_URB;
+
+		/*read the skipmap, to see if this transfer has	to be rescheduled */
+		skipmap	=
+			isp1763_reg_read16(hcd->dev, hcd->regs.atltdskipmap,
+			skipmap);
+		pehci_check("remove skip map %x, ptd map %x\n",	skipmap,
+			td_ptd_map->ptd_bitmap);
+
+		buffstatus =
+			isp1763_reg_read16(hcd->dev, hcd->regs.buffer_status,
+			buffstatus);
+
+
+		isp1763_reg_write16(hcd->dev, hcd->regs.atltdskipmap,
+			skipmap | td_ptd_map->ptd_bitmap);
+
+		while (!(skipmap & td_ptd_map->ptd_bitmap)) {
+			udelay(125);
+
+			skipmap	= isp1763_reg_read16(hcd->dev,
+				hcd->regs.atltdskipmap,
+				skipmap);
+		}
+
+		/* if all  transfers skipped,
+		 * then	disable	the atl	buffer,
+		 * so that new transfer	can come in
+		 * need	to see the side	effects
+		 * */
+		if (skipmap == NO_TRANSFER_ACTIVE) {
+			/*disable the buffer */
+			pehci_info("disable the	atl buffer\n");
+			buffstatus &= ~ATL_BUFFER;
+			isp1763_reg_write16(hcd->dev, hcd->regs.buffer_status,
+				buffstatus);
+		}
+
+		qtd_list = &qh->qtd_list;
+		/*this should remove all pending transfers */
+		pehci_check("num tds %d, urb length %d,device %d\n",
+			urb_priv->length, urb->transfer_buffer_length,
+			urb->dev->devnum);
+
+		pehci_check("remove first qtd address %p\n", urb_priv->qtd[0]);
+		pehci_check("length of the urb %d, completed %d\n",
+			urb->transfer_buffer_length, urb->actual_length);
+		qtd = urb_priv->qtd[urb_priv->length - 1];
+		pehci_check("qtd state is %x\n", qtd->state);
+
+
+		urb->status=status;
+		status = 0;
+#ifdef USBNET 
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
+		pehci_hcd_urb_delayed_complete(hcd, qh, urb, td_ptd_map, NULL);
+#else
+		pehci_hcd_urb_delayed_complete(hcd, qh, urb, td_ptd_map);
+#endif
+#else
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
+		pehci_hcd_urb_complete(hcd, qh, urb, td_ptd_map, NULL);
+#else
+		pehci_hcd_urb_complete(hcd, qh, urb, td_ptd_map);
+#endif
+
+#endif
+		break;
+
+	case PIPE_INTERRUPT:
+		pehci_check("phci_1763_urb_dequeue: INTR needs to be done\n");
+		urb->status = status; //-ENOENT;//This will allow to suspend the system. in auto suspend mode
+		status = 0;
+		qh = urb_priv->qh;
+		if(qh==NULL)
+			break;
+
+		td_ptd_buf = &td_ptd_map_buff[TD_PTD_BUFF_TYPE_INTL];
+		td_ptd_map = &td_ptd_buf->map_list[qh->qtd_ptd_index];
+
+		/*urb is already been removed */
+		if (td_ptd_map->state == TD_PTD_NEW) {
+			kfree(urb_priv);
+			break;
+		}
+
+		/* These TDs are not pending anymore */
+		td_ptd_buf->pending_ptd_bitmap &= ~td_ptd_map->ptd_bitmap;
+
+		td_ptd_map->state = TD_PTD_REMOVE;
+		urb_priv->state	|= DELETE_URB;
+
+		/*read the skipmap, to see if this transfer has	to be rescheduled */
+		skipmap	=
+			isp1763_reg_read16(hcd->dev, hcd->regs.inttdskipmap,
+			skipmap);
+
+		isp1763_reg_write16(hcd->dev, hcd->regs.inttdskipmap,
+			skipmap | td_ptd_map->ptd_bitmap);
+		qtd_list = &qh->qtd_list;
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
+		pehci_hcd_urb_complete(hcd, qh, urb, td_ptd_map, NULL);
+#else
+		pehci_hcd_urb_complete(hcd, qh, urb, td_ptd_map);
+#endif
+		break;
+#ifdef CONFIG_ISO_SUPPORT
+	case PIPE_ISOCHRONOUS:
+		pehci_info("urb dequeue %x %x\n", urb,urb->pipe);
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)
+	if(urb->dev->speed==USB_SPEED_HIGH){
+		retval = usb_hcd_check_unlink_urb(usb_hcd, urb, status);
+		if (!retval) {
+			pehci_info("[pehci_hcd_urb_dequeue] usb_hcd_unlink_urb_from_ep with status = %d\n", status);
+			usb_hcd_unlink_urb_from_ep(usb_hcd, urb);
+
+
+		}
+	}
+#endif
+
+		
+		status = 0;
+		ep=urb->ep;
+		spin_unlock_irqrestore(&hcd->lock, flags);
+		mdelay(100);
+
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
+						if (urb->hcpriv!= periodic_ep[0]){
+#else
+						if (urb->ep != periodic_ep[0]){
+#endif
+	if(!list_empty(&ep->urb_list)){	
+		while(!list_empty(&ep->urb_list)){
+			urb=container_of(ep->urb_list.next,struct urb,urb_list);
+			pehci_info("list is not empty %x %x\n",urb,urb->dev->state);
+			if(urb){
+		retval = usb_hcd_check_unlink_urb(usb_hcd, urb,0);
+		if (!retval) {
+			pehci_info("[pehci_hcd_urb_dequeue] usb_hcd_unlink_urb_from_ep with status = %d\n", status);
+			usb_hcd_unlink_urb_from_ep(usb_hcd, urb);
+		}
+			urb->status=-ESHUTDOWN;
+	#if LINUX_VERSION_CODE <KERNEL_VERSION(2,6,24)
+			usb_hcd_giveback_urb(usb_hcd,urb);
+	#else
+			usb_hcd_giveback_urb(usb_hcd,urb,urb->status);
+	#endif
+				
+			}
+		}
+		}else{
+	if(urb){
+		pehci_info("list empty %x\n",urb->dev->state);
+		phcd_clean_urb_pending(hcd, urb);
+		retval = usb_hcd_check_unlink_urb(usb_hcd, urb,0);
+		if (!retval) {
+			pehci_info("[pehci_hcd_urb_dequeue] usb_hcd_unlink_urb_from_ep with status = %d\n", status);
+			usb_hcd_unlink_urb_from_ep(usb_hcd, urb);
+		}
+			urb->status=-ESHUTDOWN;
+	#if LINUX_VERSION_CODE <KERNEL_VERSION(2,6,24)
+			usb_hcd_giveback_urb(usb_hcd,urb);
+	#else
+			usb_hcd_giveback_urb(usb_hcd,urb,urb->status);
+	#endif
+				
+			}
+			
+		}
+	}	
+#endif
+		return 0;
+		/*nothing to do	here, wait till	all transfers are done in iso worker */
+		break;
+	}
+
+	spin_unlock_irqrestore(&hcd->lock, flags);
+	pehci_info("status %d\n", status);
+	pehci_entry("--	%s: Exit\n", __FUNCTION__);
+	return status;
+}
+
+/* bulk	qh holds the data toggle */
+
+static void
+pehci_hcd_endpoint_disable(struct usb_hcd *usb_hcd,
+			   struct usb_host_endpoint *ep)
+{
+	phci_hcd *ehci = usb_hcd_to_pehci_hcd(usb_hcd);
+	struct urb *urb;
+
+	unsigned long flags;
+	struct ehci_qh *qh;
+
+	pehci_entry("++	%s: Entered\n",	__FUNCTION__);
+	/* ASSERT:  any	requests/urbs are being	unlinked */
+	/* ASSERT:  nobody can be submitting urbs for this any more */
+	
+#ifdef CONFIG_ISO_SUPPORT
+	mdelay(100);  //delay for ISO
+#endif
+	spin_lock_irqsave(&ehci->lock, flags);
+
+	qh = ep->hcpriv;
+
+	if (!qh) {
+		goto done;
+	} else {
+#ifdef CONFIG_ISO_SUPPORT
+		pehci_info("disable endpoint %x %x\n", ep->desc.bEndpointAddress,qh->type);
+
+		
+		if (qh->type == TD_PTD_BUFF_TYPE_ISTL) {
+
+			/*wait for urb to get complete*/
+			pehci_info("disable %x \n", list_empty(&ep->urb_list));
+			while (!list_empty(&ep->urb_list)) {
+			
+				urb = container_of(ep->urb_list.next,
+					struct urb, urb_list);
+				if (urb) {
+					phcd_clean_urb_pending(ehci, urb);
+					spin_unlock_irqrestore(&ehci->lock,
+						flags);
+
+					urb->status = -ESHUTDOWN;
+					
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
+					usb_hcd_giveback_urb(usb_hcd, urb);
+#else
+					usb_hcd_giveback_urb(usb_hcd, urb,
+						urb->status);
+#endif
+					spin_lock_irqsave(&ehci->lock, flags);
+
+				}
+
+			}
+		}
+#endif
+		/*i will complete whatever left	on this	endpoint */
+		pehci_complete_device_removal(ehci, qh);
+#ifdef CONFIG_ISO_SUPPORT
+		phcd_clean_periodic_ep();
+#endif
+		ep->hcpriv = NULL;
+
+		goto done;
+	}
+	done:
+
+	ep->hcpriv = NULL;
+
+	spin_unlock_irqrestore(&ehci->lock, flags);
+	printk("disable endpoint exit\n");
+	pehci_entry("--	%s: Exit\n", __FUNCTION__);
+	return;
+}
+
+/*called by core, for current frame number*/
+static int
+pehci_hcd_get_frame_number(struct usb_hcd *usb_hcd)
+{
+	u32 framenumber	= 0;
+	phci_hcd *pehci_hcd = usb_hcd_to_pehci_hcd(usb_hcd);
+	framenumber =
+		isp1763_reg_read16(pehci_hcd->dev, pehci_hcd->regs.frameindex,
+		framenumber);
+	return framenumber;
+}
+
+/*root hub status data,	called by root hub timer
+ *return 0, if no change, else
+ *	1, incase of high speed	device
+ */
+static int
+pehci_rh_status_data(struct usb_hcd *usb_hcd, char *buf)
+{
+
+	u32 temp = 0, status = 0;
+	u32 ports = 0, i, retval = 1;
+	unsigned long flags;
+	phci_hcd *hcd =	usb_hcd_to_pehci_hcd(usb_hcd);
+
+	if (hcdpowerdown == 1)
+		return 0;
+
+	buf[0] = 0;
+	if(portchange==1){
+		printk("Remotewakeup-enumerate again \n");
+		buf[0] |= 2;
+		hcd->reset_done[0] = 0;
+		return 1;
+	}
+	/* init	status to no-changes */
+	buf[0] = 0;
+	/*number of ports */
+	ports =	0x1;
+	spin_lock_irqsave(&hcd->lock, flags);
+	/*read the port	status registers */
+	for (i = 0; i <	ports; i++) {
+		temp = isp1763_reg_read32(hcd->dev, hcd->regs.ports[i],	temp);
+		if (temp & PORT_OWNER) {
+			/* dont	report the port	status change in case of CC HCD
+			 * but clear the port status , if there	is any*/
+			if (temp & PORT_CSC) {
+				temp &=	~PORT_CSC;
+				isp1763_reg_write32(hcd->dev,
+						    hcd->regs.ports[i],	temp);
+				continue;
+			}
+		}
+
+		if (!(temp & PORT_CONNECT)) {
+			hcd->reset_done[i] = 0;
+		}
+		if ((temp & (PORT_CSC |	PORT_PEC | PORT_OCC)) != 0) {
+			if (i <	7) {
+				buf[0] |= 1 << (i + 1);
+			} else {
+				buf[1] |= 1 << (i - 7);
+			}
+			status = STS_PCD;
+		}
+	}
+
+	spin_unlock_irqrestore(&hcd->lock, flags);
+	return status ?	retval : 0;
+}
+
+/*root hub control requests*/
+static int
+pehci_rh_control(struct	usb_hcd	*usb_hcd, u16 typeReq, u16 wValue,
+		 u16 wIndex, char *buf,	u16 wLength)
+{
+	u32 ports = 0;
+	u32 temp = 0, status;
+	unsigned long flags;
+	int retval = 0;
+	phci_hcd *hcd =	usb_hcd_to_pehci_hcd(usb_hcd);
+
+	ports =	0x11;
+
+	printk("%s: request %x,wValuse:0x%x, wIndex:0x%x \n",__func__, typeReq,wValue,wIndex);
+	
+	spin_lock_irqsave(&hcd->lock, flags);
+	switch (typeReq) {
+	case ClearHubFeature:
+		switch (wValue)	{
+		case C_HUB_LOCAL_POWER:
+		case C_HUB_OVER_CURRENT:
+			/* no hub-wide feature/status flags */
+			break;
+		default:
+			goto error;
+		}
+		break;
+	case ClearPortFeature:
+		pehci_print("ClearPortFeature:0x%x\n", ClearPortFeature);
+		if (!wIndex || wIndex >	(ports & 0xf)) {
+			pehci_info
+				("ClearPortFeature not valid port number %d, should be %d\n",
+				 wIndex, (ports	& 0xf));
+			goto error;
+		}
+		wIndex--;
+		temp = isp1763_reg_read32(hcd->dev, hcd->regs.ports[wIndex],
+					  temp);
+		if (temp & PORT_OWNER) {
+			printk("port is	owned by the CC	host\n");
+			break;
+		}
+
+		switch (wValue)	{
+		case USB_PORT_FEAT_ENABLE:
+			pehci_print("enable the	port\n");
+			isp1763_reg_write32(hcd->dev, hcd->regs.ports[wIndex],
+					    temp & ~PORT_PE);
+
+			break;
+		case USB_PORT_FEAT_C_ENABLE:
+			printk("disable	the port\n");
+			isp1763_reg_write32(hcd->dev, hcd->regs.ports[wIndex],
+					    temp | PORT_PEC);
+			break;
+		case USB_PORT_FEAT_SUSPEND:
+		case USB_PORT_FEAT_C_SUSPEND:
+			printk("clear feature suspend  \n");
+			break;
+		case USB_PORT_FEAT_POWER:
+			if (ports & 0x10) {	/*port has has power control switches */
+				isp1763_reg_write32(hcd->dev,
+						    hcd->regs.ports[wIndex],
+						    temp & ~PORT_POWER);
+			}
+			break;
+		case USB_PORT_FEAT_C_CONNECTION:
+			pehci_print("connect change, status is 0x%08x\n", temp);
+			isp1763_reg_write32(hcd->dev, hcd->regs.ports[wIndex],
+					    temp | PORT_CSC);
+			break;
+		case USB_PORT_FEAT_C_OVER_CURRENT:
+			isp1763_reg_write32(hcd->dev, hcd->regs.ports[wIndex],
+					    temp | PORT_OCC);
+			break;
+		default:
+			goto error;
+
+		}
+		break;
+
+	case GetHubDescriptor:
+		pehci_hub_descriptor(hcd, (struct usb_hub_descriptor *)	buf);
+		break;
+
+	case GetHubStatus:
+		pehci_print("GetHubStatus:0x%x\n", GetHubStatus);
+		/* no hub-wide feature/status flags */
+		memset(buf, 0, 4);
+		break;
+	case GetPortStatus:
+		pehci_print("GetPortStatus:0x%x\n", GetPortStatus);
+		if (!wIndex || wIndex >	(ports & 0xf)) {
+			pehci_info
+				("GetPortStatus,not valid port number %d, should be %d\n",
+				 wIndex, (ports	& 0xf));
+			goto error;
+		}
+		wIndex--;
+		status = 0;
+		temp = isp1763_reg_read32(hcd->dev, hcd->regs.ports[wIndex],
+					  temp);
+		printk("root port status:0x%x\n", temp);
+		/*connect status chnage	*/
+		if (temp & PORT_CSC) {
+			status |= 1 << USB_PORT_FEAT_C_CONNECTION;
+			pehci_print("feature CSC 0x%08x	and status 0x%08x  \n",
+				    temp, status);
+		}
+		if(portchange){
+			portchange=0;
+			status |= 1 << USB_PORT_FEAT_C_CONNECTION;
+		}
+		/*port enable change */
+		if (temp & PORT_PEC) {
+			status |= 1 << USB_PORT_FEAT_C_ENABLE;
+			pehci_print("feature PEC  0x%08x and status 0x%08x  \n",
+				    temp, status);
+		}
+		/*port over-current */
+		if (temp & PORT_OCC) {
+			status |= 1 << USB_PORT_FEAT_C_OVER_CURRENT;
+			pehci_print("feature OCC 0x%08x	and status 0x%08x  \n",
+				    temp, status);
+		}
+
+		/* whoever resets must GetPortStatus to	complete it!! */
+		if ((temp & PORT_RESET)	&& jiffies > hcd->reset_done[wIndex]) {
+			status |= 1 << USB_PORT_FEAT_C_RESET;
+			pehci_print("feature reset 0x%08x and status 0x%08x\n",
+				temp, status);
+			printk(KERN_NOTICE
+				"feature	reset 0x%08x and status	0x%08x\n", temp,
+				status);
+			/* force reset to complete */
+			isp1763_reg_write32(hcd->dev, hcd->regs.ports[wIndex],
+					    temp & ~PORT_RESET);
+			do {
+				mdelay(20);
+				temp = isp1763_reg_read32(hcd->dev,
+							  hcd->regs.
+							  ports[wIndex], temp);
+			} while	(temp &	PORT_RESET);
+
+			/* see what we found out */
+			printk(KERN_NOTICE "after portreset: %x\n", temp);
+
+			temp = phci_check_reset_complete(hcd, wIndex, temp);
+			printk(KERN_NOTICE "after checkportreset: %x\n", temp);
+		}
+
+		/* don't show wPortStatus if it's owned	by a companion hc */
+
+		if (!(temp & PORT_OWNER)) {
+
+			if (temp & PORT_CONNECT) {
+				status |= 1 << USB_PORT_FEAT_CONNECTION;
+				status |= 1 << USB_PORT_FEAT_HIGHSPEED;
+			}
+			if (temp & PORT_PE) {
+				status |= 1 << USB_PORT_FEAT_ENABLE;
+			}
+			if (temp & PORT_SUSPEND) {
+				status |= 1 << USB_PORT_FEAT_SUSPEND;
+			}
+			if (temp & PORT_OC) {
+				status |= 1 << USB_PORT_FEAT_OVER_CURRENT;
+			}
+			if (temp & PORT_RESET) {
+				status |= 1 << USB_PORT_FEAT_RESET;
+			}
+			if (temp & PORT_POWER) {
+				status |= 1 << USB_PORT_FEAT_POWER;
+			}
+		}
+
+		/* This	alignment is good, caller used kmalloc() */
+		*((u32 *) buf) = cpu_to_le32(status);
+		break;
+
+	case SetHubFeature:
+		pehci_print("SetHubFeature:0x%x\n", SetHubFeature);
+		switch (wValue)	{
+		case C_HUB_LOCAL_POWER:
+		case C_HUB_OVER_CURRENT:
+			/* no hub-wide feature/status flags */
+			break;
+		default:
+			goto error;
+		}
+		break;
+	case SetPortFeature:
+		pehci_print("SetPortFeature:%x\n", SetPortFeature);
+		if (!wIndex || wIndex >	(ports & 0xf)) {
+			pehci_info
+				("SetPortFeature not valid port	number %d, should be %d\n",
+				 wIndex, (ports	& 0xf));
+			goto error;
+		}
+		wIndex--;
+		temp = isp1763_reg_read32(hcd->dev, hcd->regs.ports[wIndex],
+					  temp);
+		pehci_print("SetPortFeature:PortSc Val 0x%x\n",	temp);
+		if (temp & PORT_OWNER) {
+			break;
+		}
+		switch (wValue)	{
+		case USB_PORT_FEAT_ENABLE:
+			/*enable the port */
+			isp1763_reg_write32(hcd->dev, hcd->regs.ports[wIndex],
+				temp | PORT_PE);
+			break;
+		case USB_PORT_FEAT_SUSPEND:
+			
+			#if 0 /* Port suspend will be added in suspend function */
+			isp1763_reg_write32(hcd->dev, hcd->regs.ports[wIndex],
+				temp | PORT_SUSPEND);
+			#endif
+			
+			break;
+		case USB_PORT_FEAT_POWER:
+			pehci_print("Set Port Power 0x%x and Ports %x\n",
+				USB_PORT_FEAT_POWER, ports);
+			if (ports & 0x10) {
+				printk(KERN_NOTICE
+					"PortSc Reg %x an Value %x\n",
+					hcd->regs.ports[wIndex],
+					(temp | PORT_POWER));
+
+				isp1763_reg_write32(hcd->dev,
+					hcd->regs.ports[wIndex],
+					temp | PORT_POWER);
+			}
+			break;
+		case USB_PORT_FEAT_RESET:
+			pehci_print("Set Port Reset 0x%x\n",
+				USB_PORT_FEAT_RESET);
+			if ((temp & (PORT_PE | PORT_CONNECT)) == PORT_CONNECT
+				&& PORT_USB11(temp)) {
+				printk("error:port %d low speed	--> companion\n", wIndex + 1);
+				temp |=	PORT_OWNER;
+			} else {
+				temp |=	PORT_RESET;
+				temp &=	~PORT_PE;
+
+				/*
+				 * caller must wait, then call GetPortStatus
+				 * usb 2.0 spec	says 50	ms resets on root
+				 */
+				hcd->reset_done[wIndex]	= jiffies
+					+ ((50 /* msec */  * HZ) / 1000);
+			}
+			isp1763_reg_write32(hcd->dev, hcd->regs.ports[wIndex],
+				temp);
+			break;
+		default:
+			goto error;
+		}
+		break;
+	default:
+		pehci_print("this request doesnt fit anywhere\n");
+	error:
+		/* "stall" on error */
+		pehci_info
+			("unhandled root hub request: typereq 0x%08x, wValue %d, wIndex	%d\n",
+			 typeReq, wValue, wIndex);
+		retval = -EPIPE;
+	}
+
+	pehci_info("rh_control:exit\n");
+	spin_unlock_irqrestore(&hcd->lock, flags);
+	return retval;
+}
+
+
+
+/*-------------------------------------------------------------------------*/
+
+static const struct hc_driver pehci_driver = {
+	.description = hcd_name,
+	.product_desc =	"ST-ERICSSON ISP1763",
+	.hcd_priv_size = sizeof(phci_hcd),
+#ifdef LINUX_2620
+	.irq = NULL,
+#else
+	.irq = pehci_hcd_irq,
+#endif
+	/*
+	 * generic hardware linkage
+	 */
+	.flags = HCD_USB2 | HCD_MEMORY,
+
+	/*
+	 * basic lifecycle operations
+	 */
+	.reset = pehci_hcd_reset,
+	.start = pehci_hcd_start,
+	.bus_suspend = pehci_bus_suspend,
+	.bus_resume  = pehci_bus_resume,
+	.stop =	pehci_hcd_stop,
+	/*
+	 * managing i/o	requests and associated	device resources
+	 */
+	.urb_enqueue = pehci_hcd_urb_enqueue,
+	.urb_dequeue = pehci_hcd_urb_dequeue,
+	.endpoint_disable = pehci_hcd_endpoint_disable,
+
+	/*
+	 * scheduling support
+	 */
+	.get_frame_number = pehci_hcd_get_frame_number,
+
+	/*
+	 * root	hub support
+	 */
+	.hub_status_data = pehci_rh_status_data,
+	.hub_control = pehci_rh_control,
+};
+
+/*probe the PCI host*/
+
+#ifdef THREAD_BASED
+int pehci_hcd_process_irq_it_handle(struct usb_hcd* usb_hcd_)
+{
+	int istatus;
+	
+	struct usb_hcd 		*usb_hcd;
+	char					uIntStatus;
+	phci_hcd    *pehci_hcd;
+
+	struct list_head *pos, *lst_tmp;
+	st_UsbIt_Msg_Struc *mess;
+	unsigned long flags;
+	
+	g_stUsbItThreadHandler.phThreadTask = current;
+	siginitsetinv(&((g_stUsbItThreadHandler.phThreadTask)->blocked), sigmask(SIGKILL)|sigmask(SIGINT)|sigmask(SIGTERM));		
+	pehci_info("pehci_hcd_process_irq_it_thread ID : %d\n", g_stUsbItThreadHandler.phThreadTask->pid);
+	
+	while (1)
+	{
+		if (signal_pending(g_stUsbItThreadHandler.phThreadTask))
+		{
+	       	printk("thread handler:  Thread received signal\n");
+	       	break;
+		}
+
+		spin_lock(&g_stUsbItThreadHandler.lock);
+		g_stUsbItThreadHandler.lThrdWakeUpNeeded = 0;
+		spin_unlock(&g_stUsbItThreadHandler.lock);
+		
+		/* Wait until a signal arrives or we are woken up or timeout (5second)*/
+		istatus = wait_event_interruptible_timeout(g_stUsbItThreadHandler.ulThrdWaitQhead, (g_stUsbItThreadHandler.lThrdWakeUpNeeded== 1), msecs_to_jiffies(MSEC_INTERVAL_CHECKING));
+
+		local_irq_save(flags); /*disable interrupt*/
+		spin_lock(&g_stUsbItThreadHandler.lock);
+		g_stUsbItThreadHandler.lThrdWakeUpNeeded = 1;
+		spin_unlock(&g_stUsbItThreadHandler.lock);
+		//receive mess	
+		if (!list_empty(&g_messList.list)) //mess list not empty
+		{
+
+			list_for_each_safe(pos, lst_tmp, &(g_messList.list))
+			{
+				mess = list_entry(pos, st_UsbIt_Msg_Struc, list);
+
+				usb_hcd = mess->usb_hcd;
+				uIntStatus = mess->uIntStatus;
+				//pehci_print("-------------receive mess : %d------------\n",uIntStatus);
+				pehci_hcd = usb_hcd_to_pehci_hcd(usb_hcd);
+				if((uIntStatus & NO_SOF_REQ_IN_TSK)  || (uIntStatus & NO_SOF_REQ_IN_ISR) || (uIntStatus & NO_SOF_REQ_IN_REQ))
+					pehci_interrupt_handler(pehci_hcd);
+				spin_lock(&g_stUsbItThreadHandler.lock);
+				list_del(pos);
+				kfree(mess);
+				spin_unlock(&g_stUsbItThreadHandler.lock);				
+			}
+		}
+		else if(!list_empty(&g_enqueueMessList.list))
+		{
+			mess = list_first_entry(&(g_enqueueMessList.list), st_UsbIt_Msg_Struc, list);
+			usb_hcd = mess->usb_hcd;
+			uIntStatus = mess->uIntStatus;
+
+			pehci_print("-------------receive mess : %d------------\n",uIntStatus);
+			pehci_hcd = usb_hcd_to_pehci_hcd(usb_hcd);
+			if((uIntStatus & NO_SOF_REQ_IN_REQ))
+			{
+				pehci_interrupt_handler(pehci_hcd);
+			}	
+
+			{
+				spin_lock(&enqueue_lock);
+				list_del((g_enqueueMessList.list).next);
+				kfree(mess);
+				spin_unlock(&enqueue_lock);
+			}	
+		}
+		else if(istatus == 0) //timeout
+		{
+			pehci_hcd = NULL;
+			pehci_hcd = usb_hcd_to_pehci_hcd(usb_hcd_);
+			pehci_interrupt_handler(pehci_hcd);
+
+		}
+		local_irq_restore(flags);  /*enable interrupt*/
+	}
+
+	flush_signals(g_stUsbItThreadHandler.phThreadTask);
+	g_stUsbItThreadHandler.phThreadTask = NULL;
+	return 0;
+	
+}
+
+int pehci_hcd_process_irq_in_thread(struct usb_hcd* usb_hcd_)
+{
+	
+	//status = msgq_create("usb_it_queue", 10, sizeof(st_UsbIt_Msg_Struc), &uUsbIt_MsgQueId);
+	INIT_LIST_HEAD(&g_messList.list);
+	INIT_LIST_HEAD(&g_enqueueMessList.list);
+	spin_lock_init(&enqueue_lock);
+
+	memset(&g_stUsbItThreadHandler, 0, sizeof(st_UsbIt_Thread));
+	init_waitqueue_head(&(g_stUsbItThreadHandler.ulThrdWaitQhead));
+	g_stUsbItThreadHandler.lThrdWakeUpNeeded = 0;
+	spin_lock_init(&g_stUsbItThreadHandler.lock);
+	kernel_thread(pehci_hcd_process_irq_it_handle, usb_hcd_, 0);
+	
+    return 0;
+}
+#endif
+
+
+/*probe	the PCI	host*/
+int
+pehci_hcd_probe(struct isp1763_dev *isp1763_dev, isp1763_id * ids)
+{
+#ifdef NON_PCI
+    struct platform_device *dev = isp1763_dev->dev;
+#else /* PCI */
+	struct pci_dev *dev = isp1763_dev->pcidev;
+#endif
+	struct usb_hcd *usb_hcd;
+	phci_hcd *pehci_hcd;
+	int status = 0;
+
+#ifndef NON_PCI
+	u32 intcsr=0;
+#endif
+	pehci_entry("++	%s: Entered\n",	__FUNCTION__);
+	if (usb_disabled()) {
+		return -ENODEV;
+	}
+
+	usb_hcd	= usb_create_hcd(&pehci_driver,&dev->dev, "ISP1763");
+
+	if (usb_hcd == NULL) {
+		status = -ENOMEM;
+		goto clean;
+	}
+
+	/* this	is our host */
+	pehci_hcd = usb_hcd_to_pehci_hcd(usb_hcd);
+	pehci_hcd->dev = isp1763_dev;
+	pehci_hcd->iobase = (u8	*) isp1763_dev->baseaddress;
+	pehci_hcd->iolength = isp1763_dev->length;
+
+
+	/* lets	keep our host here */
+	isp1763_dev->driver_data = usb_hcd;
+#ifdef NON_PCI
+//Do nothing
+#else
+	/* Enable the interrupts from PLX to PCI */
+	/* CONFIGURE PCI/PLX interrupt */
+#ifdef DATABUS_WIDTH_16
+	wvalue1	= readw(pehci_hcd->plxiobase + 0x68);
+	wvalue2	= readw(pehci_hcd->plxiobase + 0x68 + 2);
+	intcsr |= wvalue2;
+	intcsr <<= 16;
+	intcsr |= wvalue1;
+	printk(KERN_NOTICE "Enable PCI Intr: %x	\n", intcsr);
+	intcsr |= 0x900;
+	writew((u16) intcsr, pehci_hcd->plxiobase + 0x68);
+	writew((u16) (intcsr >>	16), pehci_hcd->plxiobase + 0x68 + 2);
+#else
+	bvalue1	= readb(pehci_hcd->plxiobase + 0x68);
+	bvalue2	= readb(pehci_hcd->plxiobase + 0x68 + 1);
+	bvalue3	= readb(pehci_hcd->plxiobase + 0x68 + 2);
+	bvalue4	= readb(pehci_hcd->plxiobase + 0x68 + 3);
+	intcsr |= bvalue4;
+	intcsr <<= 8;
+	intcsr |= bvalue3;
+	intcsr <<= 8;
+	intcsr |= bvalue2;
+	intcsr <<= 8;
+	intcsr |= bvalue1;
+	writeb((u8) intcsr, pehci_hcd->plxiobase + 0x68);
+	writeb((u8) (intcsr >> 8), pehci_hcd->plxiobase	+ 0x68 + 1);
+	writeb((u8) (intcsr >> 16), pehci_hcd->plxiobase + 0x68	+ 2);
+	writeb((u8) (intcsr >> 24), pehci_hcd->plxiobase + 0x68	+ 3);
+#endif
+#endif
+
+	No_Data_Phase =	0;
+	No_Status_Phase	= 0;
+	usb_hcd->self.controller->dma_mask = 0;
+	usb_hcd->self.otg_port = 1;
+#if 0
+#ifndef THREAD_BASED 	
+	status = isp1763_request_irq(pehci_hcd_irq, isp1763_dev, usb_hcd);
+#endif
+#endif
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
+	if (status == 0) {
+		status = usb_add_hcd(usb_hcd, isp1763_dev->irq, SA_SHIRQ);
+	}
+#else /* Linux 2.6.28*/
+	usb_hcd->self.uses_dma = 0;
+	if (status == 0){
+		status = usb_add_hcd(usb_hcd, isp1763_dev->irq,
+		IRQF_SHARED | IRQF_DISABLED | IRQF_TRIGGER_LOW);
+	}
+#endif
+
+#ifdef THREAD_BASED 	
+	g_pehci_hcd = pehci_hcd;
+#endif
+
+#ifdef USBNET 
+	// initialize clean up urb list
+	INIT_LIST_HEAD(&(pehci_hcd->cleanup_urb.urb_list));
+#endif
+	enable_irq_wake(isp1763_dev->irq);
+	wake_lock_init(&pehci_wake_lock, WAKE_LOCK_SUSPEND,
+						dev_name(&dev->dev));
+	wake_lock(&pehci_wake_lock);
+
+	pehci_entry("--	%s: Exit\n", __FUNCTION__);
+	isp1763_hcd=isp1763_dev;
+	return status;
+
+	clean:
+	return status;
+
+}
+/*--------------------------------------------------------------*
+ *
+ *  Module details: pehci_hcd_powerup
+ *
+ *  This function powerdown the chip completely, which make chip works in minimal power
+ *
+ *  Input: struct isp1763_Dev *
+ *
+ *
+ *  
+ *
+ *  Called by: IOCTL function
+ *
+ *
+ --------------------------------------------------------------*/
+void 
+pehci_hcd_powerup(struct	isp1763_dev *dev)
+{
+	printk("%s\n", __FUNCTION__);
+	hcdpowerdown = 0;
+	dev->driver->probe(dev,dev->driver->id);
+
+	
+}
+void
+pehci_hcd_powerdown(struct	isp1763_dev *dev)
+{
+	struct usb_hcd *usb_hcd;
+
+	phci_hcd *hcd = NULL;
+	u32 temp;
+	usb_hcd = (struct usb_hcd *) dev->driver_data;
+	if (!usb_hcd) {
+		return;
+	}
+	
+	printk("%s\n", __FUNCTION__);
+	hcd = usb_hcd_to_pehci_hcd(usb_hcd);
+
+	temp = isp1763_reg_read16(dev, HC_USBCMD_REG, 0);
+	temp &= ~0x01;		/* stop the controller first */
+	isp1763_reg_write16(dev, HC_USBCMD_REG, temp);
+	printk("++ %s: Entered\n", __FUNCTION__);
+
+//	isp1763_free_irq(dev,usb_hcd);
+	usb_remove_hcd(usb_hcd);
+	dev->driver_data = NULL;
+	
+
+	temp = isp1763_reg_read16(dev, HC_INTENABLE_REG, temp); //0xD6
+	temp &= ~0x400;		/*disable otg interrupt*/
+	isp1763_reg_write16(dev, HC_INTENABLE_REG, temp); //0xD6
+
+	isp1763_reg_write16(dev, HC_UNLOCK_DEVICE, 0xAA37);	/*unlock the device 0x7c*/
+	mdelay(1);
+	temp = isp1763_reg_read32(dev, HC_PORTSC1_REG, 0);
+
+
+	if ((temp & 0x1005) == 0x1005) {
+		isp1763_reg_write32(dev, HC_PORTSC1_REG, 0x1000);
+		temp = isp1763_reg_read32(dev, HC_PORTSC1_REG, 0);
+		mdelay(10);
+		isp1763_reg_write32(dev, HC_PORTSC1_REG, 0x1104);
+		mdelay(10);
+		isp1763_reg_write32(dev, HC_PORTSC1_REG, 0x1007);
+		temp = isp1763_reg_read32(dev, HC_PORTSC1_REG, 0);
+		mdelay(10);
+		isp1763_reg_write32(dev, HC_PORTSC1_REG, 0x1005);
+
+		temp = isp1763_reg_read32(dev, HC_PORTSC1_REG, 0);
+	}
+	
+	printk("port status %x\n ", temp);
+	temp &= ~0x2;
+	temp &= ~0x40;		/*force port resume*/
+	temp |= 0x80;		/*suspend*/
+
+	isp1763_reg_write32(dev, HC_PORTSC1_REG, temp);
+	printk("port status %x\n ", temp);
+	mdelay(200);
+
+	temp = isp1763_reg_read16(dev, HC_HW_MODE_REG, 0);	/*suspend the device first 0xc*/
+	temp |= 0x2c;
+	isp1763_reg_write16(dev, HC_HW_MODE_REG, temp); //0xc
+	mdelay(20);
+
+	temp = isp1763_reg_read16(dev, HC_HW_MODE_REG, 0); //0xc
+	temp = 0xc;
+	isp1763_reg_write16(dev, HC_HW_MODE_REG, temp); //0xc
+
+	isp1763_reg_write32(dev, HC_POWER_DOWN_CONTROL_REG, 0xffff0800);
+
+	wake_unlock(&pehci_wake_lock);
+	wake_lock_destroy(&pehci_wake_lock);
+
+	hcdpowerdown = 1;
+	
+}
+
+static int pehci_bus_suspend(struct usb_hcd *usb_hcd)
+{
+	u32 temp=0;
+	unsigned long flags;
+	phci_hcd *pehci_hcd = NULL;
+	struct isp1763_dev *dev = NULL;
+
+	
+	if (!usb_hcd) {
+		return -EBUSY;
+	}
+	
+	printk("++ %s \n",__FUNCTION__);
+	pehci_hcd = usb_hcd_to_pehci_hcd(usb_hcd);
+
+	dev = pehci_hcd->dev;
+	
+	spin_lock_irqsave(&pehci_hcd->lock, flags);
+	if(hcdpowerdown){
+		spin_unlock_irqrestore(&pehci_hcd->lock, flags);
+		return 0;
+	}
+
+
+	isp1763_reg_write32(dev, HC_USBSTS_REG, 0x4); //0x90
+	isp1763_reg_write32(dev, HC_INTERRUPT_REG_EHCI, 0x4); //0x94
+	isp1763_reg_write16(dev, HC_INTERRUPT_REG, INTR_ENABLE_MASK); //0xd4
+
+	temp=isp1763_reg_read16(dev, HC_INTERRUPT_REG, 0); //0xd4
+
+	isp1763_reg_write16(dev,HC_INTENABLE_REG,INTR_ENABLE_MASK);
+	temp=isp1763_reg_read16(dev,HC_INTENABLE_REG,0);
+
+	hcdpowerdown = 1;
+	
+	/* stop the controller first */
+	temp = isp1763_reg_read16(dev, HC_USBCMD_REG, 0);
+	temp &= ~0x01;		
+	isp1763_reg_write16(dev, HC_USBCMD_REG, temp);
+
+	/* suspend root port which will suspend host controller of the ISP1763A */
+	temp = isp1763_reg_read32(dev, HC_PORTSC1_REG, 0);
+	temp |= (PORT_SUSPEND);//0x80
+	isp1763_reg_write32(dev, HC_PORTSC1_REG, temp);
+	
+	/* suspend device controller of the ISP1763a*/
+	temp = isp1763_reg_read16(dev, HC_HW_MODE_REG, 0);
+	temp |= 0x20;
+	isp1763_reg_write16(dev, HC_HW_MODE_REG, temp);
+	mdelay(1); // make sure there will not be huge delay here max is 1 ms
+	temp &= ~0x20;
+	isp1763_reg_write16(dev, HC_HW_MODE_REG, temp);
+	/* put host controoler into low power mode */
+	isp1763_reg_write32(dev, HC_POWER_DOWN_CONTROL_REG, POWER_DOWN_CTRL_SUSPEND_VALUE);
+
+//	usb_hcd->state = HC_STATE_SUSPENDED;
+
+	spin_unlock_irqrestore(&pehci_hcd->lock, flags);
+
+	printk("-- %s \n",__FUNCTION__);
+
+	wake_unlock(&pehci_wake_lock);
+
+	return 0;
+	
+
+}
+
+static int pehci_bus_resume(struct usb_hcd *usb_hcd)
+{
+	u32 temp,i;
+	phci_hcd *pehci_hcd = NULL;
+	struct isp1763_dev *dev = NULL;
+	unsigned long flags;
+	u32 portsc1;
+
+	printk("%s Enter \n",__func__);
+
+	if (!usb_hcd) {
+		return -EBUSY;
+	}
+
+	if(hcdpowerdown ==0){
+		printk("%s already executed\n ",__func__);
+		return 0;
+	}
+
+	pehci_hcd = usb_hcd_to_pehci_hcd(usb_hcd);
+	dev = pehci_hcd->dev;
+	spin_lock_irqsave(&pehci_hcd->lock, flags);
+
+	for (temp = 0; temp < 100; temp++)
+	{
+		i = isp1763_reg_read32(dev, HC_CHIP_ID_REG, 0);
+		if(i==0x176320)
+			break;
+		mdelay(2);
+	}
+	printk("temp=%d, chipid:0x%x \n",temp,i);
+	mdelay(10);
+	isp1763_reg_write16(dev, HC_UNLOCK_DEVICE, 0xAA37);	/*unlock the device 0x7c*/
+	i = isp1763_reg_read32(dev, HC_POWER_DOWN_CONTROL_REG, 0);
+	printk("POWER DOWN CTRL REG value during suspend =0x%x\n", i);
+	for (temp = 0; temp < 100; temp++) {
+		mdelay(1);
+		isp1763_reg_write32(dev, HC_POWER_DOWN_CONTROL_REG, POWER_DOWN_CTRL_NORMAL_VALUE);
+		mdelay(1);
+		i = isp1763_reg_read32(dev, HC_POWER_DOWN_CONTROL_REG, 0);
+		if(i==POWER_DOWN_CTRL_NORMAL_VALUE)
+			break;
+	}
+	if (temp == 100) {
+		spin_unlock_irqrestore(&pehci_hcd->lock, flags);
+		pr_err("%s:isp1763a failed to resume\n", __func__);
+		return -1;
+	}
+
+	wake_lock(&pehci_wake_lock);
+
+	printk("%s: Powerdown Reg Val: 0x%08x -- %d\n", __func__, i, temp);
+
+	isp1763_reg_write32(dev, HC_USBSTS_REG,0x0); //0x90
+	isp1763_reg_write32(dev, HC_INTERRUPT_REG_EHCI, 0x0); //0x94
+	isp1763_reg_write16(dev, HC_INTENABLE_REG,0); //0xD6
+
+	portsc1 = isp1763_reg_read32(dev, HC_PORTSC1_REG, 0);
+	printk("%s PORTSC1: 0x%x\n", __func__, portsc1);
+
+	temp = isp1763_reg_read16(dev, HC_USBCMD_REG, 0);
+	temp |= 0x01;		/* Start the controller */
+	isp1763_reg_write16(dev, HC_USBCMD_REG, temp);
+	mdelay(10);
+
+	temp = isp1763_reg_read32(dev, HC_PORTSC1_REG, 0);
+	if (temp & PORT_SUSPEND)
+		pr_err("%s: HC_PORTSC1_REG: 0x%08x\n", __func__, temp);
+	temp |= PORT_SUSPEND;    //0x80;
+	isp1763_reg_write32(dev, HC_PORTSC1_REG, temp);
+	mdelay(50);
+	temp = isp1763_reg_read32(dev, HC_PORTSC1_REG, 0);
+	temp |= PORT_RESUME;     //0x40;
+	temp &= ~(PORT_SUSPEND); //0x80;		/*suspend*/
+	isp1763_reg_write32(dev, HC_PORTSC1_REG, temp);
+	temp = isp1763_reg_read32(dev, HC_PORTSC1_REG, 0);
+	temp &= ~(PORT_RESUME);  //0x40;
+	isp1763_reg_write32(dev, HC_PORTSC1_REG, temp);
+
+	temp = INTR_ENABLE_MASK;
+	isp1763_reg_write16(dev, HC_INTENABLE_REG, temp); //0xD6
+	temp = isp1763_reg_read32(dev, HC_PORTSC1_REG, 0);
+	printk("%s resume port status: 0x%x\n", __func__, temp);
+	if(!(temp & 0x4)){ //port is disabled
+		isp1763_reg_write16(dev, HC_INTENABLE_REG, 0x1005); //0xD6
+		mdelay(10);
+	}
+//	phci_resume_wakeup(dev);
+
+	hcdpowerdown = 0;
+	if(hubdev){
+		hubdev->hcd_priv    = NULL;
+		hubdev->hcd_suspend = NULL;
+	}
+
+	spin_unlock_irqrestore(&pehci_hcd->lock, flags);
+	printk("%s Leave\n",__func__);
+
+	return 0;
+}
+
+void
+pehci_hcd_resume(struct	isp1763_dev *dev)
+{
+	struct usb_hcd *usb_hcd;
+	u32 temp,i;
+	usb_hcd = (struct usb_hcd *) dev->driver_data;
+	if (!usb_hcd) {
+		return;
+	}
+
+	if(hcdpowerdown ==0){
+		return ;
+	}
+
+	printk("%s \n",__FUNCTION__);
+
+	for (temp = 0; temp < 10; temp++)
+	{
+	i = isp1763_reg_read32(dev, HC_CHIP_ID_REG, 0);
+	printk("temp=%d, chipid:0x%x \n",temp,i);
+	if(i==0x176320)
+	break;
+	mdelay(1);
+	}
+
+	/* Start the controller */
+	temp = 0x01;		
+	isp1763_reg_write16(dev, HC_USBCMD_REG, temp);
+
+	/* update power down control reg value */
+	for (temp = 0; temp < 100; temp++) {
+		isp1763_reg_write32(dev, HC_POWER_DOWN_CONTROL_REG, POWER_DOWN_CTRL_NORMAL_VALUE);
+		i = isp1763_reg_read32(dev, HC_POWER_DOWN_CONTROL_REG, 0);
+		if(i==POWER_DOWN_CTRL_NORMAL_VALUE)
+		break;
+	}
+	
+	if (temp == 100) {
+		pr_err("%s:isp1763a failed to resume\n", __func__);
+		return;
+	}
+
+	wake_lock(&pehci_wake_lock);
+
+	isp1763_reg_write16(dev, HC_INTENABLE_REG,0); //0xD6
+	isp1763_reg_write32(dev,HC_INTERRUPT_REG_EHCI,0x4); //0x94 
+	isp1763_reg_write32(dev,HC_INTERRUPT_REG,0xFFFF); //0x94 
+	/* clear suspend bit and resume bit */	
+	temp = isp1763_reg_read32(dev, HC_PORTSC1_REG, 0);
+	temp &= ~(PORT_SUSPEND); //0x80;		/*suspend*/
+	temp &= ~(PORT_RESUME);  // 0x40;
+	isp1763_reg_write32(dev, HC_PORTSC1_REG, temp);
+	
+	isp1763_reg_write16(dev, HC_INTENABLE_REG, INTR_ENABLE_MASK); //0xD6
+	/*this is just make sure port is resumed back */	
+	mdelay(1);
+	temp = isp1763_reg_read32(dev, HC_PORTSC1_REG, 0);
+	printk("after hcd resume :port status %x\n ", temp);
+	
+	hcdpowerdown = 0;	
+
+	phci_resume_wakeup(dev);
+
+	if(hubdev){
+		hubdev->hcd_priv=NULL;
+		hubdev->hcd_suspend=NULL;
+	}
+//	usb_hcd->state = HC_STATE_RUNNING;
+
+}
+
+
+void
+pehci_hcd_suspend(struct isp1763_dev *dev)
+{
+	struct usb_hcd *usb_hcd;
+	u32 temp;
+	usb_hcd = (struct usb_hcd *) dev->driver_data;
+	if (!usb_hcd) {
+		return;
+	}
+	printk("%s \n",__FUNCTION__);
+	if(hcdpowerdown){
+		return ;
+	}
+
+	temp = isp1763_reg_read16(dev, HC_USBCMD_REG, 0);
+	temp &= ~0x01;		/* stop the controller first */
+	isp1763_reg_write16(dev, HC_USBCMD_REG, temp);
+
+	isp1763_reg_write32(dev, HC_USBSTS_REG, 0x4); //0x90
+	isp1763_reg_write32(dev, HC_INTERRUPT_REG_EHCI, 0x4); //0x94
+	isp1763_reg_write16(dev, HC_INTERRUPT_REG, INTR_ENABLE_MASK); //0xd4
+	
+	temp=isp1763_reg_read16(dev, HC_INTERRUPT_REG, 0); //0xd4
+
+	printk("suspend :Interrupt Status %x\n",temp);
+	isp1763_reg_write16(dev,HC_INTENABLE_REG,INTR_ENABLE_MASK);
+	temp=isp1763_reg_read16(dev,HC_INTENABLE_REG,0);
+	printk("suspend :Interrupt Enable %x\n",temp);
+	temp = isp1763_reg_read32(dev, HC_PORTSC1_REG, 0);
+	
+	printk("suspend :port status %x\n ", temp);
+	temp &= ~0x2;
+	temp &= ~0x40;		/*force port resume*/
+	temp |= 0x80;		/*suspend*/
+//	temp |= 0x700000;	/*WKCNNT_E,WKDSCNNT_E,WKOC_E*/
+	isp1763_reg_write32(dev, HC_PORTSC1_REG, temp);
+  //  mdelay(10);
+	temp = isp1763_reg_read32(dev, HC_PORTSC1_REG, 0);
+	printk("suspend :port status %x\n ", temp);
+	hcdpowerdown = 1;
+
+
+	temp = isp1763_reg_read16(dev,HC_HW_MODE_REG, 0);	/*suspend the device first 0xc*/
+	temp&=0xff7b;
+	isp1763_reg_write16(dev, HC_HW_MODE_REG, temp); //0xc
+
+
+	temp = isp1763_reg_read16(dev, HC_HW_MODE_REG, 0);	/*suspend the device first 0xc*/
+	temp |= 0x20;
+	isp1763_reg_write16(dev, HC_HW_MODE_REG, temp);//0xc
+	mdelay(2);
+	temp = isp1763_reg_read16(dev, HC_HW_MODE_REG, 0);//0xc
+	temp &= 0xffdf;
+	temp &= ~0x20;
+	isp1763_reg_write16(dev, HC_HW_MODE_REG, temp);//0xc
+
+	isp1763_reg_write32(dev, HC_POWER_DOWN_CONTROL_REG, 0xffff0830);
+
+	wake_unlock(&pehci_wake_lock);
+	
+}
+
+void 
+pehci_hcd_remotewakeup(struct isp1763_dev *dev){
+	if(hubdev){
+		hubdev->hcd_priv=dev;
+		hubdev->hcd_suspend=(void *)pehci_hcd_suspend;
+		}
+	phci_remotewakeup(dev);
+}
+
+/*remove the host controller*/
+static void
+pehci_hcd_remove(struct	isp1763_dev *isp1763_dev)
+{
+
+	struct usb_hcd *usb_hcd;
+	
+#ifdef NON_PCI
+#else	/* PCI */
+//	struct pci_dev *dev = isp1763_dev->pcidev;
+#endif
+
+	phci_hcd *hcd =	NULL;
+	u32 temp;
+	usb_hcd	= (struct usb_hcd *) isp1763_dev->driver_data;
+	if (!usb_hcd) {
+		return;
+	}
+	hcd=usb_hcd_to_pehci_hcd(usb_hcd);
+	isp1763_reg_write32(hcd->dev,hcd->regs.hwmodecontrol,0);
+	isp1763_reg_write32(hcd->dev,hcd->regs.interruptenable,0);
+	hubdev=0;
+	huburb=0;
+	temp = isp1763_reg_read16(hcd->dev, HC_USBCMD_REG, 0);
+	temp &= ~0x01;		/* stop the controller first */
+	isp1763_reg_write16(hcd->dev, HC_USBCMD_REG, temp);
+//	isp1763_free_irq(isp1763_dev,usb_hcd);
+	usb_remove_hcd(usb_hcd);
+
+	wake_unlock(&pehci_wake_lock);
+	wake_lock_destroy(&pehci_wake_lock);
+
+	return ;
+}
+
+
+static isp1763_id ids =	{
+	.idVendor = 0x04CC,	/*st ericsson isp1763 vendor_id	*/
+	.idProduct = 0x1A64,	/*st ericsson isp1763 product_id */
+	.driver_info = (unsigned long) &pehci_driver,
+};
+
+/* pci driver glue; this is a "new style" PCI driver module */
+static struct isp1763_driver pehci_hcd_pci_driver = {
+	.name =	(char *) hcd_name,
+	.index = 0,
+	.id = &ids,
+	.probe = pehci_hcd_probe,
+	.remove	= pehci_hcd_remove,
+	.suspend = pehci_hcd_suspend,
+	.resume	= pehci_hcd_resume,
+	.remotewakeup=pehci_hcd_remotewakeup,
+	.powerup	=	pehci_hcd_powerup,
+	.powerdown	=	pehci_hcd_powerdown,
+};
+
+#ifdef HCD_PACKAGE
+int
+usb_hcddev_open(struct inode *inode, struct file *fp)
+{
+
+	return 0;
+}
+
+int
+usb_hcddev_close(struct inode *inode, struct file *fp)
+{
+
+	return 0;
+}
+
+int
+usb_hcddev_fasync(int fd, struct file *fp, int mode)
+{
+
+	return fasync_helper(fd, fp, mode, &fasync_q);
+}
+
+long
+usb_hcddev_ioctl(struct file *fp,
+		 unsigned int cmd, unsigned long arg)
+{
+
+	switch (cmd) {
+	case HCD_IOC_POWERDOWN:	/* SET HCD DEEP SUSPEND MODE */
+		printk("HCD IOC POWERDOWN MODE\n");
+		if(isp1763_hcd->driver->powerdown)
+			isp1763_hcd->driver->powerdown(isp1763_hcd);
+
+		break;
+
+	case HCD_IOC_POWERUP:	/* Set HCD POWER UP */
+		printk("HCD IOC POWERUP MODE\n");
+		if(isp1763_hcd->driver->powerup)
+			isp1763_hcd->driver->powerup(isp1763_hcd);
+
+		break;
+	case HCD_IOC_TESTSE0_NACK:
+		HostComplianceTest = HOST_COMPILANCE_TEST_ENABLE;
+		HostTest = HOST_COMP_TEST_SE0_NAK;
+		break;
+	case   HCD_IOC_TEST_J:		
+		HostComplianceTest = HOST_COMPILANCE_TEST_ENABLE;
+		HostTest = HOST_COMP_TEST_J;
+		break;
+	case    HCD_IOC_TEST_K:
+		HostComplianceTest = HOST_COMPILANCE_TEST_ENABLE;
+		HostTest = HOST_COMP_TEST_K;
+		break;
+		
+	case   HCD_IOC_TEST_TESTPACKET:
+		HostComplianceTest = HOST_COMPILANCE_TEST_ENABLE;
+		HostTest = HOST_COMP_TEST_PACKET;
+		break;
+	case HCD_IOC_TEST_FORCE_ENABLE:
+		HostComplianceTest = HOST_COMPILANCE_TEST_ENABLE;
+		HostTest = HOST_COMP_TEST_FORCE_ENABLE;
+		break;
+	case	HCD_IOC_TEST_SUSPEND_RESUME:
+		HostComplianceTest = HOST_COMPILANCE_TEST_ENABLE;
+		HostTest = HOST_COMP_HS_HOST_PORT_SUSPEND_RESUME;
+		break;
+	case HCD_IOC_TEST_SINGLE_STEP_GET_DEV_DESC:
+		HostComplianceTest = HOST_COMPILANCE_TEST_ENABLE;
+		HostTest = HOST_COMP_SINGLE_STEP_GET_DEV_DESC;		
+		break;
+	case HCD_IOC_TEST_SINGLE_STEP_SET_FEATURE:
+		HostComplianceTest = HOST_COMPILANCE_TEST_ENABLE;
+		HostTest = HOST_COMP_SINGLE_STEP_SET_FEATURE;		
+		break;
+	case HCD_IOC_TEST_STOP:
+		HostComplianceTest = 0;
+		HostTest = 0;		
+		break;
+	case     HCD_IOC_SUSPEND_BUS:
+		printk("isp1763:SUSPEND bus\n");
+		if(isp1763_hcd->driver->suspend)
+			isp1763_hcd->driver->suspend(isp1763_hcd);
+		break;
+	case	HCD_IOC_RESUME_BUS:
+		printk("isp1763:RESUME bus\n");
+		if(isp1763_hcd->driver->resume)
+			isp1763_hcd->driver->resume(isp1763_hcd);		
+		break;
+	case     HCD_IOC_REMOTEWAKEUP_BUS:
+		printk("isp1763:SUSPEND bus\n");
+		if(isp1763_hcd->driver->remotewakeup)
+			isp1763_hcd->driver->remotewakeup(isp1763_hcd);
+		break;		
+	default:
+
+		break;
+
+	}
+	return 0;
+}
+
+
+/* HCD file operations */
+static struct file_operations usb_hcddev_fops = {
+	owner:THIS_MODULE,
+	read:NULL,
+	write:NULL,
+	poll:NULL,
+	unlocked_ioctl:usb_hcddev_ioctl,
+	open:usb_hcddev_open,
+	release:usb_hcddev_close,
+	fasync:usb_hcddev_fasync,
+};
+
+#endif
+
+
+static int __init
+pehci_module_init(void)
+{
+	int result = 0;
+	phci_hcd_mem_init();
+
+	/*register driver */
+	result = isp1763_register_driver(&pehci_hcd_pci_driver);
+	if (!result) {
+		info("Host Driver has been Registered");
+	} else {
+		err("Host Driver has not been Registered with errors : %x",
+			result);
+	}
+
+#ifdef THREAD_BASED 	
+	pehci_hcd_process_irq_in_thread(&(g_pehci_hcd->usb_hcd));
+   	printk("kernel_thread() Enter\n"); 
+#endif
+	
+#ifdef HCD_PACKAGE
+	printk("Register Char Driver for HCD\n");
+	result = register_chrdev(USB_HCD_MAJOR, USB_HCD_MODULE_NAME,
+		&usb_hcddev_fops);
+	
+#endif
+	return result;
+
+}
+
+static void __exit
+pehci_module_cleanup(void)
+{
+#ifdef THREAD_BASED	
+	printk("module exit:  Sending signal to stop thread\n");
+	if (g_stUsbItThreadHandler.phThreadTask != NULL)
+	{
+		send_sig(SIGKILL, g_stUsbItThreadHandler.phThreadTask, 1);
+		mdelay(6);
+	}
+#endif
+
+#ifdef HCD_PACKAGE
+	unregister_chrdev(USB_HCD_MAJOR, USB_HCD_MODULE_NAME);
+#endif
+	isp1763_unregister_driver(&pehci_hcd_pci_driver);
+}
+
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_LICENSE("GPL");
+module_init(pehci_module_init);
+module_exit(pehci_module_cleanup);
diff --git a/drivers/usb/host/pehci/host/pehci.h b/drivers/usb/host/pehci/host/pehci.h
new file mode 100644
index 0000000..cc6a06b
--- /dev/null
+++ b/drivers/usb/host/pehci/host/pehci.h
@@ -0,0 +1,752 @@
+/* 
+* Copyright (C) ST-Ericsson AP Pte Ltd 2010 
+*
+* ISP1763 Linux OTG Controller driver : host
+* 
+* This program is free software; you can redistribute it and/or modify it under the terms of 
+* the GNU General Public License as published by the Free Software Foundation; version 
+* 2 of the License. 
+* 
+* This program is distributed in the hope that it will be useful, but WITHOUT ANY  
+* WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS  
+* FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more  
+* details. 
+* 
+* You should have received a copy of the GNU General Public License 
+* along with this program; if not, write to the Free Software 
+* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA. 
+* 
+* Refer to file ~/drivers/usb/host/ehci-dbg.h for copyright owners (kernel version 2.6.9)
+* Code is modified for ST-Ericsson product 
+* 
+* Author : wired support <wired.support@stericsson.com>
+*
+*/
+
+#ifndef	__PEHCI_H__
+#define	__PEHCI_H__
+
+
+#define	DRIVER_AUTHOR	"ST-ERICSSON	  "
+#define	DRIVER_DESC "ISP1763 'Enhanced'	Host Controller	(EHCI) Driver"
+
+/*    bus related stuff	*/
+#define	__ACTIVE		0x01
+#define	__SLEEPY		0x02
+#define	__SUSPEND		0x04
+#define	__TRANSIENT		0x80
+
+#define	USB_STATE_HALT		0
+#define	USB_STATE_RUNNING	(__ACTIVE)
+#define	USB_STATE_READY		(__ACTIVE|__SLEEPY)
+#define	USB_STATE_QUIESCING	(__SUSPEND|__TRANSIENT|__ACTIVE)
+#define	USB_STATE_RESUMING	(__SUSPEND|__TRANSIENT)
+#define	USB_STATE_SUSPENDED	(__SUSPEND)
+
+/* System flags	 */
+#define	HCD_MEMORY		0x0001
+#define	HCD_USB2		0x0020
+#define	HCD_USB11		0x0010
+
+#define	HCD_IS_RUNNING(state) ((state) & __ACTIVE)
+#define	HCD_IS_SUSPENDED(state)	((state) & __SUSPEND)
+
+
+/*---------------------------------------------------
+ *    Host controller related
+ -----------------------------------------------------*/
+/* IRQ line for	the ISP1763 */
+#define	HCD_IRQ			IRQ_GPIO(25)
+#define	CMD_RESET		(1<<1)	/* reset HC not	bus */
+#define	CMD_RUN			(1<<0)	/* start/stop HC */
+#define	STS_PCD			(1<<2)	/* port	change detect */
+/* NOTE:  urb->transfer_flags expected to not use this bit !!! */
+#define	EHCI_STATE_UNLINK	0x8000	/* urb being unlinked */
+
+/*  Bits definations for qha*/
+/* Bits	PID*/
+#define	SETUP_PID		(2)
+#define	OUT_PID			(0)
+#define	IN_PID			(1)
+
+/* Bits	MULTI*/
+#define	MULTI(x)		((x)<< 29)
+#define	XFER_PER_UFRAME(x)	(((x) >> 29) & 0x3)
+
+/*Active, EP type and speed bits */
+#define	QHA_VALID		(1<<0)
+#define	QHA_ACTIVE		(1<<31)
+
+/*1763 error bit maps*/
+#define	HC_MSOF_INT		(1<< 0)
+#define	HC_MSEC_INT		(1 << 1)
+#define	HC_EOT_INT		(1 << 3)
+#define     HC_OPR_REG_INT	(1<<4)
+#define     HC_CLK_RDY_INT	(1<<6)
+#define	HC_INTL_INT		(1 << 7)
+#define	HC_ATL_INT		(1 << 8)
+#define	HC_ISO_INT		(1 << 9)
+#define	HC_OTG_INT		(1 << 10)
+
+/*PTD error codes*/
+#define	PTD_STATUS_HALTED	(1 << 30)
+#define	PTD_XACT_ERROR		(1 << 28)
+#define	PTD_BABBLE		(1 << 29)
+#define PTD_ERROR		(PTD_STATUS_HALTED | PTD_XACT_ERROR | PTD_BABBLE)
+/*ep types*/
+#define	EPTYPE_BULK		(2 << 12)
+#define	EPTYPE_CONTROL		(0 << 12)
+#define	EPTYPE_INT		(3 << 12)
+#define	EPTYPE_ISO		(1 << 12)
+
+#define	PHCI_QHA_LENGTH		32
+
+#define usb_inc_dev_use		usb_get_dev
+#define usb_dec_dev_use		usb_put_dev
+#define usb_free_dev		usb_put_dev
+/*1763 host controller periodic	size*/
+#define PTD_PERIODIC_SIZE	16
+#define MAX_PERIODIC_SIZE	16
+#define PTD_FRAME_MASK		0x1f
+/*periodic list*/
+struct _periodic_list {
+	int framenumber;
+	struct list_head sitd_itd_head;
+	char high_speed;	/*1 - HS ; 0 - FS*/
+	u16 ptdlocation;
+};
+typedef	struct _periodic_list periodic_list;
+
+
+/*iso ptd*/
+struct _isp1763_isoptd {
+	u32 td_info1;
+	u32 td_info2;
+	u32 td_info3;
+	u32 td_info4;
+	u32 td_info5;
+	u32 td_info6;
+	u32 td_info7;
+	u32 td_info8;
+} __attribute__	((aligned(32)));
+
+typedef	struct _isp1763_isoptd isp1763_isoptd;
+
+struct _isp1763_qhint {
+	u32 td_info1;
+	u32 td_info2;
+	u32 td_info3;
+	u32 td_info4;
+	u32 td_info5;
+#define	INT_UNDERRUN (1	<< 2)
+#define	INT_BABBLE    (1 << 1)
+#define	INT_EXACT     (1 << 0)
+	u32 td_info6;
+	u32 td_info7;
+	u32 td_info8;
+} __attribute__	((aligned(32)));
+
+typedef	struct _isp1763_qhint isp1763_qhint;
+
+
+struct _isp1763_qha {
+	u32 td_info1;		/* First 32 bit	*/
+	u32 td_info2;		/* Second 32 bit */
+	u32 td_info3;		/* third 32 bit	*/
+	u32 td_info4;		/* fourth 32 bit */
+	u32 reserved[4];
+};
+typedef	struct _isp1763_qha isp1763_qha, *pisp1763_qha;
+
+
+
+
+/*this does not	cover all interrupts in	1763 chip*/
+typedef	struct _ehci_regs {
+
+	/*standard ehci	registers */
+	u32 command;
+	u32 usbinterrupt;
+	u32 usbstatus;
+	u32 hcsparams;
+	u32 frameindex;
+
+	/*isp1763 interrupt specific registers */
+	u16 hwmodecontrol;
+	u16 interrupt;
+	u16 interruptenable;
+	u32 interruptthreshold;
+	u16 iso_irq_mask_or;
+	u16 int_irq_mask_or;
+	u16 atl_irq_mask_or;
+	u16 iso_irq_mask_and;
+	u16 int_irq_mask_and;
+	u16 atl_irq_mask_and;
+	u16 buffer_status;
+
+	/*isp1763 initialization registers */
+	u32 reset;
+	u32 configflag;
+	u32 ports[4];
+	u32 pwrdwn_ctrl;
+
+	/*isp1763 transfer specific registers */
+	u16 isotddonemap;
+	u16 inttddonemap;
+	u16 atltddonemap;
+	u16 isotdskipmap;
+	u16 inttdskipmap;
+	u16 atltdskipmap;
+	u16 isotdlastmap;
+	u16 inttdlastmap;
+	u16 atltdlastmap;
+	u16 scratch;
+
+} ehci_regs, *pehci_regs;
+
+/*memory management structures*/
+#define MEM_KV
+#ifdef MEM_KV
+typedef struct isp1763_mem_addr {
+	u32 phy_addr;		/* Physical address of the memory */
+	u32 virt_addr;		/* after ioremap() function call */
+	u8 num_alloc;		/* In case n*smaller size is allocated then for clearing purpose */
+	u32 blk_size;		/*block size */
+	u8 blk_num;		/* number of the block */
+	u8 used;		/*used/free */
+} isp1763_mem_addr_t;
+#else
+typedef struct isp1763_mem_addr {
+	void *phy_addr;		/* Physical address of the memory */
+	void *virt_addr;	/* after ioremap() function call */
+	u8 usage;
+	u32 blk_size;		/*block size */
+} isp1763_mem_addr_t;
+
+#endif
+/* type	tag from {qh,itd,sitd,fstn}->hw_next */
+#define	Q_NEXT_TYPE(dma) ((dma)	& __constant_cpu_to_le32 (3 << 1))
+
+/* values for that type	tag */
+#define	Q_TYPE_ITD	__constant_cpu_to_le32 (0 << 1)
+#define	Q_TYPE_QH	__constant_cpu_to_le32 (1 << 1)
+#define	Q_TYPE_SITD	__constant_cpu_to_le32 (2 << 1)
+#define	Q_TYPE_FSTN	__constant_cpu_to_le32 (3 << 1)
+
+/*next queuehead in execution*/
+#define	QH_NEXT(dma)	cpu_to_le32((u32)dma)
+
+struct ehci_qh {
+	/* first part defined by EHCI spec */
+	u32 hw_next;		/* see EHCI 3.6.1 */
+	u32 hw_info1;		/* see EHCI 3.6.2 */
+
+	u32 hw_info2;		/* see EHCI 3.6.2 */
+	u32 hw_current;		/* qtd list - see EHCI 3.6.4 */
+
+	/* qtd overlay (hardware parts of a struct ehci_qtd) */
+	u32 hw_qtd_next;
+	u32 hw_alt_next;
+	u32 hw_token;
+	u32 hw_buf[5];
+	u32 hw_buf_hi[5];
+	
+	/* the rest is HCD-private */
+	dma_addr_t qh_dma;	/* address of qh */
+	struct list_head qtd_list;	/* sw qtd list */
+	struct ehci_qtd	*dummy;
+	struct ehci_qh *reclaim;	/* next	to reclaim */
+
+	atomic_t refcount;
+	wait_queue_head_t waitforcomplete;
+	unsigned stamp;
+
+	u8 qh_state;
+
+	/* periodic schedule info */
+	u8 usecs;		/* intr	bandwidth */
+	u8 gap_uf;		/* uframes split/csplit	gap */
+	u8 c_usecs;		/* ... split completion	bw */
+	unsigned short period;	/* polling interval */
+	unsigned short start;	/* where polling starts	*/
+	u8 datatoggle;		/*data toggle */
+
+	/*handling the ping stuffs */
+	u8 ping;		/*ping bit */
+
+	/*qtd <-> ptd management */
+
+	u32 qtd_ptd_index;	/* Td-PTD map index for	this ptd */
+	u32 type;		/* endpoint type */
+
+	/*iso stuffs */
+	struct usb_host_endpoint *ep;
+	int next_uframe;	/*next uframe for this endpoint	*/
+	struct list_head itd_list;	/*list of tds to this endpoint */
+	isp1763_mem_addr_t memory_addr;
+	struct _periodic_list periodic_list;
+	/*scheduling requirements for this endpoint */
+	u32 ssplit;
+	u32 csplit;
+	u8 totalptds;   // total number of PTDs needed for current URB
+	u8 actualptds;	// scheduled PTDs until now for current URB
+};
+
+/* urb private part for	the driver. */
+typedef	struct {
+	struct ehci_qh *qh;
+	u16 length;		/* number of tds associated with this request */
+	u16 td_cnt;		/* number of tds already serviced */
+	int state;		/* State machine state when URB	is deleted  */
+	int timeout;		/* timeout for bulk transfers */
+	wait_queue_head_t wait;	/* wait	State machine state when URB is	deleted	*/
+	/*FIX solve the	full speed dying */
+	struct timer_list urb_timer;
+	struct list_head qtd_list;
+	struct ehci_qtd	*qtd[0];	/* list	pointer	to all corresponding TDs associated with this request */
+
+} urb_priv_t;
+
+/*
+ * EHCI	Specification 0.95 Section 3.6
+ * QH: describes control/bulk/interrupt	endpoints
+ * See Fig 3-7 "Queue Head Structure Layout".
+ *
+ * These appear	in both	the async and (for interrupt) periodic schedules.
+ */
+
+
+/*Defination required for the ehci Queuehead */
+#define	QH_HEAD			0x00008000
+#define	QH_STATE_LINKED		1	/* HC sees this	*/
+#define	QH_STATE_UNLINK		2	/* HC may still	see this */
+#define	QH_STATE_IDLE		3	/* HC doesn't see this */
+#define	QH_STATE_UNLINK_WAIT	4	/* LINKED and on reclaim q */
+#define	QH_STATE_COMPLETING	5	/* don't touch token.HALT */
+#define	QH_STATE_TAKE_NEXT	8	/*take the new transfer	from */
+#define	NO_FRAME ((unsigned short)~0)	/* pick	new start */
+
+
+#define EHCI_ITD_TRANLENGTH	0x0fff0000	/*transaction length */
+#define EHCI_ITD_PG		0x00007000	/*page select */
+#define EHCI_ITD_TRANOFFSET	0x00000fff	/*transaction offset */
+#define EHCI_ITD_BUFFPTR	0xfffff000	/*buffer pointer */
+
+struct ehci_sitd {
+	/* first part defined by EHCI spec */
+	u32 hw_next;		/* see EHCI 3.3.1 */
+	u32 hw_transaction[8];	/* see EHCI 3.3.2 */
+#define EHCI_ISOC_ACTIVE	(1<<31)	/* activate transfer this slot */
+#define EHCI_ISOC_BUF_ERR	(1<<30)	/* Data buffer error */
+#define EHCI_ISOC_BABBLE	(1<<29)	/* babble detected */
+#define EHCI_ISOC_XACTERR	(1<<28)	/* XactErr - transaction error */
+
+#define EHCI_ITD_LENGTH(tok)	(((tok)>>16) & 0x7fff)
+#define EHCI_ITD_IOC		(1 << 15)	/* interrupt on complete */
+
+	u32 hw_bufp[7];		/* see EHCI 3.3.3 */
+	u32 hw_bufp_hi[7];	/* Appendix B */
+
+	/* the rest is HCD-private */
+	dma_addr_t sitd_dma;	/* for this itd */
+	struct urb *urb;
+	struct list_head sitd_list;	/* list of urb frames' itds */
+	dma_addr_t buf_dma;	/* frame's buffer address */
+
+	/* for now, only one hw_transaction per itd */
+	u32 transaction;
+	u16 index;		/* in urb->iso_frame_desc */
+	u16 uframe;		/* in periodic schedule */
+	u16 usecs;
+	/*memory address */
+	struct isp1763_mem_addr mem_addr;
+	int length;
+	u32 framenumber;
+	u32 ptdframe;
+	int sitd_index;
+	/*scheduling fields */
+	u32 ssplit;
+	u32 csplit;
+	u32 start_frame;
+};
+
+struct ehci_itd	{
+	/* first part defined by EHCI spec */
+	u32 hw_next;		/* see EHCI 3.3.1 */
+	u32 hw_transaction[8];	/* see EHCI 3.3.2 */
+#define	EHCI_ISOC_ACTIVE	(1<<31)	/* activate transfer this slot */
+#define	EHCI_ISOC_BUF_ERR	(1<<30)	/* Data	buffer error */
+#define	EHCI_ISOC_BABBLE	(1<<29)	/* babble detected */
+#define	EHCI_ISOC_XACTERR	(1<<28)	/* XactErr - transaction error */
+
+#define	EHCI_ITD_LENGTH(tok)	(((tok)>>16) & 0x7fff)
+#define	EHCI_ITD_IOC		(1 << 15)	/* interrupt on	complete */
+
+	u32 hw_bufp[7];		/* see EHCI 3.3.3 */
+	u32 hw_bufp_hi[7];	/* Appendix B */
+
+	/* the rest is HCD-private */
+	dma_addr_t itd_dma;	/* for this itd	*/
+	struct urb *urb;
+	struct list_head itd_list;	/* list	of urb frames' itds */
+	dma_addr_t buf_dma;	/* frame's buffer address */
+	u8 num_of_pkts;		/*number of packets for this ITD */
+	/* for now, only one hw_transaction per	itd */
+	u32 transaction;
+	u16 index;		/* in urb->iso_frame_desc */
+	u16 uframe;		/* in periodic schedule	*/
+	u16 usecs;
+	/*memory address */
+	struct isp1763_mem_addr	mem_addr;
+	int length;
+	u32 multi;
+	u32 framenumber;
+	u32 ptdframe;
+	int itd_index;
+	/*scheduling fields */
+	u32 ssplit;
+	u32 csplit;
+};
+
+/*
+ * EHCI	Specification 0.95 Section 3.5
+ * QTD:	describe data transfer components (buffer, direction, ...)
+ * See Fig 3-6 "Queue Element Transfer Descriptor Block	Diagram".
+ *
+ * These are associated	only with "QH" (Queue Head) structures,
+ * used	with control, bulk, and	interrupt transfers.
+ */
+struct ehci_qtd	{
+	/* first part defined by EHCI spec */
+	u32 hw_next;		/* see EHCI 3.5.1 */
+	u32 hw_alt_next;	/* see EHCI 3.5.2 */
+	u32 hw_token;		/* see EHCI 3.5.3 */
+
+	u32 hw_buf[5];		/* see EHCI 3.5.4 */
+	u32 hw_buf_hi[5];	/* Appendix B */
+
+	/* the rest is HCD-private */
+	dma_addr_t qtd_dma;	/* qtd address */
+	struct list_head qtd_list;	/* sw qtd list */
+	struct urb *urb;	/* qtd's urb */
+	size_t length;		/* length of buffer */
+	u32 state;		/*state	of the qtd */
+#define	QTD_STATE_NEW			0x100
+#define	QTD_STATE_DONE			0x200
+#define	QTD_STATE_SCHEDULED		0x400
+#define	QTD_STATE_LAST			0x800
+	struct isp1763_mem_addr	mem_addr;
+};
+
+#define	QTD_TOGGLE			(1 << 31)	/* data	toggle */
+#define	QTD_LENGTH(tok)			(((tok)>>16) & 0x7fff)
+#define	QTD_IOC				(1 << 15)	/* interrupt on	complete */
+#define	QTD_CERR(tok)			(((tok)>>10) & 0x3)
+#define	QTD_PID(tok)			(((tok)>>8) & 0x3)
+#define	QTD_STS_ACTIVE			(1 << 7)	/* HC may execute this */
+#define	QTD_STS_HALT			(1 << 6)	/* halted on error */
+#define	QTD_STS_DBE			(1 << 5)	/* data	buffer error (in HC) */
+#define	QTD_STS_BABBLE			(1 << 4)	/* device was babbling (qtd halted) */
+#define	QTD_STS_XACT			(1 << 3)	/* device gave illegal response	*/
+#define	QTD_STS_MMF			(1 << 2)	/* incomplete split transaction	*/
+#define	QTD_STS_STS			(1 << 1)	/* split transaction state */
+#define	QTD_STS_PING			(1 << 0)	/* issue PING? */
+
+/* for periodic/async schedules	and qtd	lists, mark end	of list	*/
+#define	EHCI_LIST_END	__constant_cpu_to_le32(1)	/* "null pointer" to hw	*/
+#define	QTD_NEXT(dma)	cpu_to_le32((u32)dma)
+
+struct _phci_driver;
+struct _isp1763_hcd;
+#define	EHCI_MAX_ROOT_PORTS 1
+
+#include <linux/usb/hcd.h>
+
+#define USBNET
+#ifdef USBNET 
+struct isp1763_async_cleanup_urb {
+        struct list_head urb_list;
+        struct urb *urb;
+};
+#endif
+
+
+/*host controller*/
+typedef	struct _phci_hcd {
+
+	struct usb_hcd usb_hcd;
+	spinlock_t lock;
+
+	/* async schedule support */
+	struct ehci_qh *async;
+	struct ehci_qh *reclaim;
+	/* periodic schedule support */
+	unsigned periodic_size;
+	int next_uframe;	/* scan	periodic, start	here */
+	int periodic_sched;	/* periodic activity count */
+	int periodic_more_urb;
+	struct usb_device *otgdev;	/*otg deice, with address 2 */
+	struct timer_list rh_timer;	/* drives root hub */
+	struct list_head dev_list;	/* devices on this bus */
+	struct list_head urb_list;	/*iso testing */
+
+	/*msec break in	interrupts */
+	atomic_t nuofsofs;
+	atomic_t missedsofs;
+
+	struct isp1763_dev *dev;
+	/*hw info */
+	u8 *iobase;
+	u32 iolength;
+	u8 *plxiobase;
+	u32 plxiolength;
+
+	int irq;		/* irq allocated */
+	int state;		/*state	of the host controller */
+	unsigned long reset_done[EHCI_MAX_ROOT_PORTS];
+	ehci_regs regs;
+
+	struct _isp1763_qha qha;
+	struct _isp1763_qhint qhint;
+	struct _isp1763_isoptd isotd;
+
+	struct tasklet_struct tasklet;
+	/*this timer is	going to run every 20 msec */
+	struct timer_list watchdog;
+	void (*worker_function)	(struct	_phci_hcd * hcd);
+	struct _periodic_list periodic_list[PTD_PERIODIC_SIZE];
+#ifdef USBNET 
+	struct isp1763_async_cleanup_urb cleanup_urb;
+#endif
+} phci_hcd, *pphci_hcd;
+
+/*usb_device->hcpriv, points to	this structure*/
+typedef	struct hcd_dev {
+	struct list_head dev_list;
+	struct list_head urb_list;
+} hcd_dev;
+
+#define	usb_hcd_to_pehci_hcd(hcd)   container_of(hcd, struct _phci_hcd,	usb_hcd)
+
+/*td allocation*/
+#ifdef CONFIG_PHCI_MEM_SLAB
+
+#define	qha_alloc(t,c) kmem_cache_alloc(c,ALLOC_FLAGS)
+#define	qha_free(c,x) kmem_cache_free(c,x)
+static kmem_cache_t *qha_cache,	*qh_cache, *qtd_cache;
+static int
+phci_hcd_mem_init(void)
+{
+	/* qha TDs accessed by controllers and host */
+	qha_cache = kmem_cache_create("phci_ptd", sizeof(isp1763_qha), 0,
+				      SLAB_HWCACHE_ALIGN, NULL,	NULL);
+	if (!qha_cache)	{
+		printk("no TD cache?");
+		return -ENOMEM;
+	}
+
+	/* qh TDs accessed by controllers and host */
+	qh_cache = kmem_cache_create("phci_ptd", sizeof(isp1763_qha), 0,
+				     SLAB_HWCACHE_ALIGN, NULL, NULL);
+	if (!qh_cache) {
+		printk("no TD cache?");
+		return -ENOMEM;
+	}
+
+	/* qtd	accessed by controllers	and host */
+	qtd_cache = kmem_cache_create("phci_ptd", sizeof(isp1763_qha), 0,
+				      SLAB_HWCACHE_ALIGN, NULL,	NULL);
+	if (!qtd_cache)	{
+		printk("no TD cache?");
+		return -ENOMEM;
+	}
+	return 0;
+}
+static void
+phci_mem_cleanup(void)
+{
+	if (qha_cache && kmem_cache_destroy(qha_cache))
+		err("td_cache remained");
+	qha_cache = 0;
+}
+#else
+
+#define	qha_alloc(t,c)			kmalloc(t,ALLOC_FLAGS)
+#define	qha_free(c,x)			kfree(x)
+#define	qha_cache			0
+
+
+#ifdef CONFIG_ISO_SUPPORT
+/*memory constants*/
+#define BLK_128_	2
+#define BLK_256_	3
+#define BLK_1024_	1
+#define BLK_2048_	3
+#define BLK_4096_	3 //1
+#define BLK_8196_	0 //1
+#define BLK_TOTAL	(BLK_128_+BLK_256_ + BLK_1024_ +BLK_2048_+ BLK_4096_+BLK_8196_)
+
+#define BLK_SIZE_128	128
+#define BLK_SIZE_256	256
+#define BLK_SIZE_1024	1024
+#define BLK_SIZE_2048	2048
+#define BLK_SIZE_4096	4096
+#define BLK_SIZE_8192	8192
+
+#define  COMMON_MEMORY	1
+
+#else
+#define BLK_256_	8
+#define BLK_1024_	6
+#define BLK_4096_	3
+#define BLK_TOTAL	(BLK_256_ + BLK_1024_ + BLK_4096_)
+#define BLK_SIZE_256	256
+#define BLK_SIZE_1024	1024
+#define BLK_SIZE_4096	4096
+#endif
+static void phci_hcd_mem_init(void);
+static inline void
+phci_mem_cleanup(void)
+{
+	return;
+}
+
+#endif
+
+#define	PORT_WKOC_E			(1<<22)	/* wake	on overcurrent (enable)	*/
+#define	PORT_WKDISC_E			(1<<21)	/* wake	on disconnect (enable) */
+#define	PORT_WKCONN_E			(1<<20)	/* wake	on connect (enable) */
+/* 19:16 for port testing */
+/* 15:14 for using port	indicator leds (if HCS_INDICATOR allows) */
+#define	PORT_OWNER			(1<<13)	/* true: companion hc owns this	port */
+#define	PORT_POWER			(1<<12)	/* true: has power (see	PPC) */
+#define	PORT_USB11(x)			(((x)&(3<<10))==(1<<10))	/* USB 1.1 device */
+/* 11:10 for detecting lowspeed	devices	(reset vs release ownership) */
+/* 9 reserved */
+#define	PORT_RESET			(1<<8)	/* reset port */
+#define	PORT_SUSPEND			(1<<7)	/* suspend port	*/
+#define	PORT_RESUME			(1<<6)	/* resume it */
+#define	PORT_OCC			(1<<5)	/* over	current	change */
+
+#define	PORT_OC				(1<<4)	/* over	current	active */
+#define	PORT_PEC			(1<<3)	/* port	enable change */
+#define	PORT_PE				(1<<2)	/* port	enable */
+#define	PORT_CSC			(1<<1)	/* connect status change */
+#define	PORT_CONNECT			(1<<0)	/* device connected */
+#define PORT_RWC_BITS	(PORT_CSC | PORT_PEC | PORT_OCC)	
+/*Legends,
+ * ATL	  control, bulk	transfer
+ * INTL	  interrupt transfer
+ * ISTL	  iso transfer
+ * */
+
+/*buffer(transfer) bitmaps*/
+#define	ATL_BUFFER			0x1
+#define	INT_BUFFER			0x2
+#define	ISO_BUFFER			0x4
+#define	BUFFER_MAP			0x7
+
+/* buffer type for ST-ERICSSON HC */
+#define	TD_PTD_BUFF_TYPE_ATL		0	/* ATL buffer */
+#define	TD_PTD_BUFF_TYPE_INTL		1	/* INTL	buffer */
+#define	TD_PTD_BUFF_TYPE_ISTL		2	/* ISO buffer */
+#define	TD_PTD_TOTAL_BUFF_TYPES		(TD_PTD_BUFF_TYPE_ISTL +1)
+/*maximum number of tds	per transfer type*/
+#define	TD_PTD_MAX_BUFF_TDS		16
+
+/*invalid td index in the headers*/
+#define	TD_PTD_INV_PTD_INDEX		0xFFFF
+/*Host controller buffer defination*/
+#define	INVALID_FRAME_NUMBER		0xFFFFFFFF
+/*per td transfer size*/
+#define	HC_ATL_PL_SIZE			4096
+#define	HC_ISTL_PL_SIZE			1024
+#define	HC_INTL_PL_SIZE			1024
+
+/*TD_PTD_MAP states*/
+#define	TD_PTD_NEW			0x0000
+#define	TD_PTD_ACTIVE			0x0001
+#define	TD_PTD_IDLE			0x0002
+#define	TD_PTD_REMOVE			0x0004
+#define	TD_PTD_RELOAD			0x0008
+#define	TD_PTD_IN_SCHEDULE		0x0010
+#define	TD_PTD_DONE			0x0020
+
+#define	PTD_RETRY(x)			(((x) >> 23) & 0x3)
+#define	PTD_PID(x)			(((x) >> 10) & (0x3))
+#define	PTD_NEXTTOGGLE(x)		(((x) >> 25) & (0x1))
+#define	PTD_XFERRED_LENGTH(x)		((x) & 0x7fff)
+#define	PTD_XFERRED_NONHSLENGTH(x)	((x) & 0x7ff)
+#define	PTD_PING_STATE(x)		(((x) >> 26) & (0x1))
+
+/* urb state*/
+#define	DELETE_URB			0x0008
+#define	NO_TRANSFER_ACTIVE		0xFFFF
+#define	NO_TRANSFER_DONE		0x0000
+#define	MAX_PTD_BUFFER_SIZE		4096	/*max ptd size */
+
+/*information of the td	in headers of host memory*/
+typedef	struct td_ptd_map {
+	u32 state;		/* ACTIVE, NEW,	TO_BE_REMOVED */
+	u8 datatoggle;		/*to preserve the data toggle for ATL/ISTL transfers */
+	u32 ptd_bitmap;		/* Bitmap of this ptd in HC headers */
+	u32 ptd_header_addr;	/* headers address of  this td */
+	u32 ptd_data_addr;	/*data address of this td to write in and read from */
+	/*this is address is actual RAM	address	not the	CPU address
+	 * RAM address = (CPU ADDRESS-0x400) >>	3
+	 * */
+	u32 ptd_ram_data_addr;
+	u8 lasttd;		/*last td , complete the transfer */
+	struct ehci_qh *qh;	/* endpoint */
+	struct ehci_qtd	*qtd;	/* qtds	for this endpoint */
+	struct ehci_itd	*itd;	/*itd pointer */
+	struct ehci_sitd *sitd;	/*itd pointer */
+	/*iso specific only */
+	u32 grouptdmap;		/*if td	need to	complete with error, then process all the tds
+				   in the groupmap    */
+} td_ptd_map_t;
+
+/*buffer(ATL/ISTL/INTL)	managemnet*/
+typedef	struct td_ptd_map_buff {
+	u8 buffer_type;		/* Buffer type:	BUFF_TYPE_ATL/INTL/ISTL0/ISTL1 */
+	u8 active_ptds;		/* number of active td's in the	buffer */
+	u8 total_ptds;		/* Total number	of td's	present	in the buffer (active +	tobe removed + skip) */
+	u8 max_ptds;		/* Maximum number of ptd's(32) this buffer can withstand */
+	u16 active_ptd_bitmap;	/* Active PTD's	bitmap */
+	u16 pending_ptd_bitmap;	/* skip	PTD's bitmap */
+	td_ptd_map_t map_list[TD_PTD_MAX_BUFF_TDS];	/* td_ptd_map list */
+} td_ptd_map_buff_t;
+
+
+#define     USB_HCD_MAJOR           0
+#define     USB_HCD_MODULE_NAME     "isp1763hcd"
+/* static char devpath[] = "/dev/isp1763hcd"; */
+
+#define HCD_IOC_MAGIC	'h'
+
+#define     HCD_IOC_POWERDOWN							_IO(HCD_IOC_MAGIC, 1)
+#define     HCD_IOC_POWERUP								_IO(HCD_IOC_MAGIC, 2)
+#define     HCD_IOC_TESTSE0_NACK						_IO(HCD_IOC_MAGIC, 3)
+#define     HCD_IOC_TEST_J								_IO(HCD_IOC_MAGIC,4)
+#define     HCD_IOC_TEST_K								_IO(HCD_IOC_MAGIC,5)
+#define     HCD_IOC_TEST_TESTPACKET						_IO(HCD_IOC_MAGIC,6)
+#define     HCD_IOC_TEST_FORCE_ENABLE					_IO(HCD_IOC_MAGIC,7)
+#define	  HCD_IOC_TEST_SUSPEND_RESUME				_IO(HCD_IOC_MAGIC,8)
+#define     HCD_IOC_TEST_SINGLE_STEP_GET_DEV_DESC		_IO(HCD_IOC_MAGIC,9)
+#define     HCD_IOC_TEST_SINGLE_STEP_SET_FEATURE		_IO(HCD_IOC_MAGIC,10)
+#define     HCD_IOC_TEST_STOP							_IO(HCD_IOC_MAGIC,11)
+#define     HCD_IOC_SUSPEND_BUS							_IO(HCD_IOC_MAGIC,12)
+#define     HCD_IOC_RESUME_BUS							_IO(HCD_IOC_MAGIC,13)
+#define     HCD_IOC_REMOTEWAKEUP_BUS					_IO(HCD_IOC_MAGIC,14)
+
+#define HOST_COMPILANCE_TEST_ENABLE	1
+#define HOST_COMP_TEST_SE0_NAK	1
+#define HOST_COMP_TEST_J	2
+#define HOST_COMP_TEST_K	3
+#define HOST_COMP_TEST_PACKET		4
+#define HOST_COMP_TEST_FORCE_ENABLE	5
+#define HOST_COMP_HS_HOST_PORT_SUSPEND_RESUME	6
+#define HOST_COMP_SINGLE_STEP_GET_DEV_DESC	7
+#define HOST_COMP_SINGLE_STEP_SET_FEATURE	8
+
+#endif
diff --git a/drivers/usb/host/pehci/host/qtdptd.c b/drivers/usb/host/pehci/host/qtdptd.c
new file mode 100644
index 0000000..093800e
--- /dev/null
+++ b/drivers/usb/host/pehci/host/qtdptd.c
@@ -0,0 +1,1315 @@
+/* 
+* Copyright (C) ST-Ericsson AP Pte Ltd 2010 
+*
+* ISP1763 Linux OTG Controller driver : host
+* 
+* This program is free software; you can redistribute it and/or modify it under the terms of 
+* the GNU General Public License as published by the Free Software Foundation; version 
+* 2 of the License. 
+* 
+* This program is distributed in the hope that it will be useful, but WITHOUT ANY  
+* WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS  
+* FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more  
+* details. 
+* 
+* You should have received a copy of the GNU General Public License 
+* along with this program; if not, write to the Free Software 
+* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA. 
+* 
+* This is a host controller driver file.  QTD processing is handled here.
+* 
+* Author : wired support <wired.support@stericsson.com>
+*
+*/
+
+
+/*   Td	managenment routines  */
+
+#define	QUEUE_HEAD_NOT_EMPTY	0x001
+
+
+/*free the location used by removed urb/endpoint*/
+static void
+phci_hcd_release_td_ptd_index(struct ehci_qh *qh)
+{
+	td_ptd_map_buff_t *td_ptd_buff = &td_ptd_map_buff[qh->type];
+	td_ptd_map_t *td_ptd_map = &td_ptd_buff->map_list[qh->qtd_ptd_index];
+	pehci_entry("++	%s: Entered\n",	__FUNCTION__);
+	/*hold the global lock here */
+	td_ptd_map->state = TD_PTD_NEW;
+	qh->qh_state = QH_STATE_IDLE;
+	/*
+	   set these values to NULL as schedule
+	   is based on these values,
+	   rather td_ptd_map state
+	 */
+	td_ptd_map->qh = NULL;
+	td_ptd_map->qtd	= NULL;
+
+	td_ptd_buff->active_ptd_bitmap &= ~td_ptd_map->ptd_bitmap;
+
+	/* Only	pending	transfers on current QH	must be	cleared	*/
+	td_ptd_buff->pending_ptd_bitmap	&= ~td_ptd_map->ptd_bitmap;
+
+	pehci_entry("--	%s: Exit\n", __FUNCTION__);
+
+}
+
+/*print	ehciqtd*/
+static void
+print_ehci_qtd(struct ehci_qtd *qtd)
+{
+	pehci_print("hwnext 0x%08x, altnext 0x%08x,token 0x%08x, length	%d\n",
+		    qtd->hw_next, qtd->hw_alt_next,
+		    le32_to_cpu(qtd->hw_token),	qtd->length);
+
+	pehci_print("buf[0] 0x%08x\n", qtd->hw_buf[0]);
+
+}
+
+/*delete all qtds linked with this urb*/
+static void
+phci_hcd_qtd_list_free(phci_hcd	* ehci,
+		       struct urb *urb,	struct list_head *qtd_list)
+{
+	struct list_head *entry, *temp;
+
+	pehci_entry("++	%s: Entered\n",	__FUNCTION__);
+
+	list_for_each_safe(entry, temp,	qtd_list) {
+		struct ehci_qtd	*qtd;
+		qtd = list_entry(entry,	struct ehci_qtd, qtd_list);
+	if(!list_empty(&qtd->qtd_list))
+		list_del_init(&qtd->qtd_list);
+		qha_free(qha_cache, qtd);
+	}
+
+	pehci_entry("--	%s: Exit \n", __FUNCTION__);
+}
+
+
+/*
+ * free	all the	qtds for this transfer,	also
+ * free	the Host memory	to be reused
+ */
+static void
+phci_hcd_urb_free_priv(phci_hcd	* hcd,
+		       urb_priv_t * urb_priv_to_remove,	struct ehci_qh *qh)
+{
+	int i =	0;
+	struct ehci_qtd	*qtd;
+	for (i = 0; i <	urb_priv_to_remove->length; i++) {
+		if (urb_priv_to_remove->qtd[i])	{
+			qtd = urb_priv_to_remove->qtd[i];
+
+			if(!list_empty(&qtd->qtd_list))
+				list_del_init(&qtd->qtd_list);
+
+			/* This	is required when the device is abruptly	disconnected and the
+			 * PTDs	are not	completely processed
+			 */
+			if (qtd->length)
+				phci_hcd_mem_free(&qtd->mem_addr);
+
+			qha_free(qha_cache, qtd);
+			urb_priv_to_remove->qtd[i] = 0;
+			qtd = 0;
+		}
+
+	}
+	
+	return;
+}
+
+
+/*allocate the qtd*/
+struct ehci_qtd	*
+phci_hcd_qtd_allocate(int mem_flags)
+{
+
+	struct ehci_qtd	*qtd = 0;
+	qtd = kmalloc(sizeof *qtd, mem_flags);
+	if (!qtd)
+	{
+		return 0;
+	}
+	
+	memset(qtd, 0, sizeof *qtd);
+	qtd->qtd_dma = cpu_to_le32(qtd);
+	qtd->hw_next = EHCI_LIST_END;
+	qtd->hw_alt_next = EHCI_LIST_END;
+	qtd->state = QTD_STATE_NEW;
+	INIT_LIST_HEAD(&qtd->qtd_list);
+	return qtd;
+}
+
+/*
+ * calculates host memory for current length transfer td,
+ * maximum td length is	4K(custom made)
+ * */
+static int
+phci_hcd_qtd_fill(struct urb *urb,
+		  struct ehci_qtd *qtd,
+		  dma_addr_t buf, size_t len, int token, int *status)
+{
+	int count = 0;
+
+	qtd->hw_buf[0] = (u32) buf;
+	/*max lenggth is HC_ATL_PL_SIZE	*/
+	if (len	> HC_ATL_PL_SIZE) {
+		count =	HC_ATL_PL_SIZE;
+	} else {
+		count =	len;
+	}
+	qtd->hw_token =	cpu_to_le32((count << 16) | token);
+	qtd->length = count;
+
+	pehci_print("%s:qtd %p,	token %8x bytes	%d dma %x\n",
+		__FUNCTION__, qtd, le32_to_cpu(qtd->hw_token), count,
+		qtd->hw_buf[0]);
+
+	return count;
+}
+
+
+/*
+ * makes number	of qtds	required for
+ * interrupt/bulk/control transfer length
+ * and initilize qtds
+ * */
+struct list_head *
+phci_hcd_make_qtd(phci_hcd * hcd,
+		  struct list_head *head, struct urb *urb, int *status)
+{
+
+	struct ehci_qtd	*qtd, *qtd_prev;
+	dma_addr_t buf,	map_buf;
+	int len, maxpacket;
+	int is_input;
+	u32 token;
+	int cnt	= 0;
+	urb_priv_t *urb_priv = (urb_priv_t *) urb->hcpriv;
+
+	pehci_entry("++	%s, Entered\n",	__FUNCTION__);
+
+	/*take the qtd from already allocated
+	   structure from hcd_submit_urb
+	 */
+	qtd = urb_priv->qtd[cnt];
+	if (unlikely(!qtd)) {
+		*status	= -ENOMEM;
+		return 0;
+	}
+
+	qtd_prev = 0;
+	list_add_tail(&qtd->qtd_list, head);
+
+	qtd->urb = urb;
+
+	token =	QTD_STS_ACTIVE;
+	token |= (EHCI_TUNE_CERR << 10);
+
+	len = urb->transfer_buffer_length;
+
+	is_input = usb_pipein(urb->pipe);
+
+	if (usb_pipecontrol(urb->pipe))	{
+		/* SETUP pid */
+		if (phci_hcd_qtd_fill(urb, qtd,	cpu_to_le32(urb->setup_packet),
+			sizeof(struct usb_ctrlrequest),
+			token |	(2 /* "setup" */	<< 8),
+			status)	<	0) {
+			goto cleanup;
+		}
+
+		cnt++;		/* increment the index */
+		print_ehci_qtd(qtd);
+		/* ... and always at least one more pid	*/
+		token ^= QTD_TOGGLE;
+		qtd_prev = qtd;
+		qtd = urb_priv->qtd[cnt];
+		if (unlikely(!qtd)) {
+			*status	= -ENOMEM;
+			goto cleanup;
+		}
+		qtd->urb = urb;
+		qtd_prev->hw_next = QTD_NEXT(qtd->qtd_dma);
+		list_add_tail(&qtd->qtd_list, head);
+	}
+
+	/*
+	 * data	transfer stage:	 buffer	setup
+	 */
+	len = urb->transfer_buffer_length;
+	if (likely(len > 0)) {
+		/*update the buffer address */
+		buf = cpu_to_le32(urb->transfer_buffer);
+	} else {
+		buf = map_buf =	cpu_to_le32(0);	/*set-up stage has no data. */
+	}
+
+	/* So are we waiting for the ack only or there is a data stage with out. */
+	if (!buf || usb_pipein(urb->pipe)) {
+		token |= (1 /* "in" */	<< 8);
+	}
+	/* else	it's already initted to	"out" pid (0 <<	8) */
+	maxpacket = usb_maxpacket(urb->dev, urb->pipe,
+				  usb_pipeout(urb->pipe)) & 0x07ff;
+
+
+	/*
+	 * buffer gets wrapped in one or more qtds;
+	 * last	one may	be "short" (including zero len)
+	 * and may serve as a control status ack
+	 */
+
+	for (;;) {
+		int this_qtd_len;
+		this_qtd_len =
+			phci_hcd_qtd_fill(urb, qtd, buf, len, token, status);
+		if (this_qtd_len < 0)
+			goto cleanup;
+		print_ehci_qtd(qtd);
+		len -= this_qtd_len;
+		buf += this_qtd_len;
+		cnt++;
+		/* qh makes control packets use	qtd toggle; maybe switch it */
+		if ((maxpacket & (this_qtd_len + (maxpacket - 1))) == 0) {
+			token ^= QTD_TOGGLE;
+		}
+
+		if (likely(len <= 0)) {
+			break;
+		}
+		qtd_prev = qtd;
+		qtd = urb_priv->qtd[cnt];
+		if (unlikely(!qtd)) {
+			goto cleanup;
+		}
+		qtd->urb = urb;
+		qtd_prev->hw_next = QTD_NEXT(qtd->qtd_dma);
+		list_add_tail(&qtd->qtd_list, head);
+	}
+
+	/*
+	 * control requests may	need a terminating data	"status" ack;
+	 * bulk	ones may need a	terminating short packet (zero length).
+	 */
+	if (likely(buf != 0)) {
+		int one_more = 0;
+		if (usb_pipecontrol(urb->pipe))	{
+			one_more = 1;
+			token ^= 0x0100;	/* "in"	<--> "out"  */
+			token |= QTD_TOGGLE;	/* force DATA1 */
+
+		} else if (usb_pipebulk(urb->pipe)	/* bulk	data exactly terminated	on zero	lenth */
+			&&(urb->transfer_flags & URB_ZERO_PACKET)
+			&& !(urb->transfer_buffer_length % maxpacket)) {
+			one_more = 1;
+		}
+		if (one_more) {
+			qtd_prev = qtd;
+			qtd = urb_priv->qtd[cnt];
+			if (unlikely(!qtd)) {
+				goto cleanup;
+			}
+
+			qtd->urb = urb;
+			qtd_prev->hw_next = QTD_NEXT(qtd->qtd_dma);
+			list_add_tail(&qtd->qtd_list, head);
+			phci_hcd_qtd_fill(urb, qtd, 0, 0, token, status);
+			print_ehci_qtd(qtd);
+			cnt++;
+		}
+	}
+
+	/*this is our last td for current transfer */
+	qtd->state |= QTD_STATE_LAST;
+
+	/*number of tds	*/
+	if (urb_priv->length !=	cnt) {
+		err("Never Error: number of tds	allocated %d exceeding %d\n",
+		    urb_priv->length, cnt);
+	}
+	/* by default, enable interrupt	on urb completion */
+	if (likely(!(urb->transfer_flags & URB_NO_INTERRUPT))) {
+		qtd->hw_token |= __constant_cpu_to_le32(QTD_IOC);
+	}
+
+	pehci_entry("--	%s, Exit\n", __FUNCTION__);
+	return head;
+
+	cleanup:
+	phci_hcd_qtd_list_free(hcd, urb, head);
+	return 0;
+}
+
+/*allocates a queue head(endpoint*/
+struct ehci_qh *
+phci_hcd_qh_alloc(phci_hcd * hcd)
+{
+
+	struct ehci_qh *qh = kmalloc(sizeof(struct ehci_qh), GFP_ATOMIC);
+	if (!qh)
+	{
+		return qh;
+	}
+	
+	memset(qh, 0, sizeof *qh);
+	atomic_set(&qh->refcount, 1);
+	init_waitqueue_head(&qh->waitforcomplete);
+	qh->qh_dma = (u32) qh;
+	INIT_LIST_HEAD(&qh->qtd_list);
+	INIT_LIST_HEAD(&qh->itd_list);
+	qh->next_uframe	= -1;
+	return qh;
+}
+
+/* calculates header address for the tds*/
+static int
+phci_hcd_fill_ptd_addresses(td_ptd_map_t * td_ptd_map, int index, int bufftype)
+{
+	int i =	0;
+	unsigned long tdlocation = 0;
+	/*
+	 * the below payloadlocation and
+	 * payloadsize are redundant
+	 * */
+	unsigned long payloadlocation =	0;
+	unsigned long payloadsize = 0;
+	pehci_entry("++	%s: enter\n", __FUNCTION__);
+	switch (bufftype) {
+		/*atl header starts at 0xc00 */
+	case TD_PTD_BUFF_TYPE_ATL:
+		tdlocation = 0x0c00;
+		/*redundant */
+		payloadsize = 0x1000;
+		payloadlocation	= 0x1000;
+		break;
+	case TD_PTD_BUFF_TYPE_INTL:
+		/*interrupt header
+		 * starts at 0x800
+		 * */
+		tdlocation = 0x0800;
+		/*redundant */
+		payloadlocation	= 0x1000;
+		payloadsize = 0x1000;
+		break;
+
+	case TD_PTD_BUFF_TYPE_ISTL:
+		/*iso header starts
+		 * at 0x400*/
+
+		tdlocation = 0x0400;
+		/*redunndant */
+		payloadlocation	= 0x1000;
+		payloadsize = 0x1000;
+
+		break;
+	}
+
+
+	i = index;
+	payloadlocation	+= (i) * payloadsize;	/*each payload is of 4096 bytes	*/
+	tdlocation += (i) * PHCI_QHA_LENGTH;	/*each td is of	32 bytes */
+	td_ptd_map->ptd_header_addr = tdlocation;
+	td_ptd_map->ptd_data_addr = payloadlocation;
+	td_ptd_map->ptd_ram_data_addr =	((payloadlocation - 0x0400) >> 3);
+	pehci_print
+		("Index: %d, Header: 0x%08x, Payload: 0x%08x,Data start	address: 0x%08x\n",
+		 index,	td_ptd_map->ptd_header_addr, td_ptd_map->ptd_data_addr,
+		 td_ptd_map->ptd_ram_data_addr);
+	pehci_entry("--	%s: Exit", __FUNCTION__);
+	return payloadlocation;
+}
+
+
+/*--------------------------------------------------------------*
+ * calculate the header	location for the current
+ * endpoint, if	found returns a	valid index
+ * else	invalid
+ -----------------------------------------------------------*/
+static void
+phci_hcd_get_qtd_ptd_index(struct ehci_qh *qh,
+			   struct ehci_qtd *qtd, struct	ehci_itd *itd)
+{
+	u8 buff_type = td_ptd_pipe_x_buff_type[qh->type];
+	u8 qtd_ptd_index;	/*, index; */
+	/*this is the location of the ptd's skip map/done map, also
+	   calculating the td header, payload, data start address
+	   location */
+	u8 bitmap = 0x1;
+	u8 max_ptds;
+
+	td_ptd_map_buff_t *ptd_map_buff	= &(td_ptd_map_buff[buff_type]);
+	pehci_entry("++	%s, Entered, buffer type %d\n",	__FUNCTION__,
+		    buff_type);
+
+	/* ATL PTDs can	wait */
+	max_ptds = (buff_type == TD_PTD_BUFF_TYPE_ATL)
+		? TD_PTD_MAX_BUFF_TDS :	ptd_map_buff->max_ptds;
+
+	for (qtd_ptd_index = 0;	qtd_ptd_index <	max_ptds; qtd_ptd_index++) {	/* Find	the first free slot */
+		if (ptd_map_buff->map_list[qtd_ptd_index].state	== TD_PTD_NEW) {
+			/* Found a free	slot */
+			if (qh->qtd_ptd_index == TD_PTD_INV_PTD_INDEX) {
+				qh->qtd_ptd_index = qtd_ptd_index;
+			}
+			ptd_map_buff->map_list[qtd_ptd_index].datatoggle = 0;
+			/*put the ptd_index into operational state */
+			ptd_map_buff->map_list[qtd_ptd_index].state =
+				TD_PTD_ACTIVE;
+			ptd_map_buff->map_list[qtd_ptd_index].qtd = qtd;
+			/* No td transfer is in	progress */
+			ptd_map_buff->map_list[qtd_ptd_index].itd = itd;
+			/*initialize endpoint(queuehead) */
+			ptd_map_buff->map_list[qtd_ptd_index].qh = qh;
+			ptd_map_buff->map_list[qtd_ptd_index].ptd_bitmap =
+				bitmap << qtd_ptd_index;
+			phci_hcd_fill_ptd_addresses(&ptd_map_buff->
+				map_list[qtd_ptd_index],
+				qh->qtd_ptd_index,
+				buff_type);
+			ptd_map_buff->map_list[qtd_ptd_index].lasttd = 0;
+			ptd_map_buff->total_ptds++;	/* update # of total td's */
+			/*make the queuehead map, to process in	the phci_schedule_ptds */
+			ptd_map_buff->active_ptd_bitmap	|=
+				(bitmap	<< qtd_ptd_index);
+			break;
+		}
+	}
+	pehci_entry("--	%s, Exit\n", __FUNCTION__);
+	return;
+
+}				/* phci_get_td_ptd_index */
+
+
+
+/*
+ * calculate the header	location for the endpoint and
+ * all tds on this endpoint will use the same
+ * header location for all transfers on	this endpoint.
+ * also	puts the endpoint into the linked state
+ * */
+static void
+phci_hcd_qh_link_async(phci_hcd	* hcd, struct ehci_qh *qh, int *status)
+{
+	struct ehci_qtd	*qtd = 0;
+	struct list_head *qtd_list = &qh->qtd_list;
+
+#ifdef MSEC_INT_BASED
+	td_ptd_map_buff_t *ptd_map_buff;
+	td_ptd_map_t *td_ptd_map;
+#endif
+
+	/*  take the first td, in case we are not able to schedule the new td
+	   and this is going for remove
+	 */
+	qtd = list_entry(qtd_list->next, struct	ehci_qtd, qtd_list);
+
+	pehci_entry("++	%s: Entered\n",	__FUNCTION__);
+
+	/* Assign a td-ptd index for this ed so	that we	can put	ptd's in the HC	buffers	*/
+
+	qh->qtd_ptd_index = TD_PTD_INV_PTD_INDEX;
+	phci_hcd_get_qtd_ptd_index(qh, qtd, NULL);	/* Get a td-ptd	index */
+	if (qh->qtd_ptd_index == TD_PTD_INV_PTD_INDEX) {
+		err("can not find the location in our buffer\n");
+		*status	= -ENOSPC;
+		return;
+	}
+#ifdef MSEC_INT_BASED
+	/*first	transfers in sof interrupt goes	into pending */
+	ptd_map_buff = &(td_ptd_map_buff[qh->type]);
+	td_ptd_map = &ptd_map_buff->map_list[qh->qtd_ptd_index];
+	ptd_map_buff->pending_ptd_bitmap |= td_ptd_map->ptd_bitmap;
+
+#endif
+	/* open	the halt so that it acessed */
+	qh->hw_token &=	~__constant_cpu_to_le32(QTD_STS_HALT);
+	qh->qh_state = QH_STATE_LINKED;
+	qh->qh_state |=	QH_STATE_TAKE_NEXT;
+	pehci_entry("--	%s: Exit , qh %p\n", __FUNCTION__, qh);
+
+
+}
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * mainly used for setting up current td on current
+ * endpoint(queuehead),	endpoint may be	new or
+ * halted one
+ * */
+
+static inline void
+phci_hcd_qh_update(phci_hcd * ehci, struct ehci_qh *qh,	struct ehci_qtd	*qtd)
+{
+	/*make this current td */
+	qh->hw_current = QTD_NEXT(qtd->qtd_dma);
+	qh->hw_qtd_next	= QTD_NEXT(qtd->qtd_dma);
+	qh->hw_alt_next	= EHCI_LIST_END;
+	/* HC must see latest qtd and qh data before we	clear ACTIVE+HALT */
+	wmb();
+	qh->hw_token &=	__constant_cpu_to_le32(QTD_TOGGLE | QTD_STS_PING);
+}
+
+/*
+ * used	for ATL, INT transfers
+ * function creates new	endpoint,
+ * calculates bandwidth	for interrupt transfers,
+ * and initialize the qh based on endpoint type/speed
+ * */
+struct ehci_qh *
+phci_hcd_make_qh(phci_hcd * hcd,
+		 struct	urb *urb, struct list_head *qtd_list, int *status)
+{
+	struct ehci_qh *qh = 0;
+	u32 info1 = 0, info2 = 0;
+	int is_input, type;
+	int maxp = 0;
+	int mult = 0;
+	int bustime = 0;
+	struct ehci_qtd	*qtd =
+		list_entry(qtd_list->next, struct ehci_qtd, qtd_list);
+
+
+	pehci_entry("++	%s: Entered\n",	__FUNCTION__);
+
+	qh = phci_hcd_qh_alloc(hcd);
+	if (!qh) {
+		*status	= -ENOMEM;
+		return 0;
+	}
+
+	/*
+	 * init	endpoint/device	data for this QH
+	 */
+	info1 |= usb_pipeendpoint(urb->pipe) <<	8;
+	info1 |= usb_pipedevice(urb->pipe) << 0;
+
+	is_input = usb_pipein(urb->pipe);
+	type = usb_pipetype(urb->pipe);
+	maxp = usb_maxpacket(urb->dev, urb->pipe, !is_input);
+	mult = 1 + ((maxp >> 11) & 0x3);
+
+	/*set this queueheads index to invalid */
+	qh->qtd_ptd_index = TD_PTD_INV_PTD_INDEX;
+
+	switch (type) {
+	case PIPE_CONTROL:
+	case PIPE_BULK:
+		qh->type = TD_PTD_BUFF_TYPE_ATL;
+		break;
+
+	case PIPE_INTERRUPT:
+		qh->type = TD_PTD_BUFF_TYPE_INTL;
+		break;
+	case PIPE_ISOCHRONOUS:
+		qh->type = TD_PTD_BUFF_TYPE_ISTL;
+		break;
+
+	}
+
+
+
+	if (type == PIPE_INTERRUPT) {
+		/*for this interrupt transfer check how	much bustime in	usecs required */
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
+		bustime = usb_check_bandwidth(urb->dev, urb);
+
+		if (bustime < 0) {
+			*status = -ENOSPC;
+			goto done;
+		}
+
+		usb_claim_bandwidth(urb->dev, urb, bustime,
+			usb_pipeisoc(urb->pipe));
+#else
+#endif
+		qh->usecs = bustime;
+
+		qh->start = NO_FRAME;
+
+		if (urb->dev->speed == USB_SPEED_HIGH) {
+			qh->c_usecs = 0;
+			qh->gap_uf = 0;
+			/*after	how many uframes this interrupt	is to be executed */
+			qh->period = urb->interval >> 3;
+			if (qh->period < 1) {
+				printk("intr period %d uframes,\n",
+				urb->interval);
+			}
+			/*restore the original urb->interval in	qh->period */
+			qh->period = urb->interval;
+
+		} else {
+			/* gap is f(FS/LS transfer times) */
+			qh->gap_uf = 1 + 7;	/*usb_calc_bus_time (urb->dev->speed,
+						   is_input, 0,	maxp) /	(125 * 1000); */
+
+			if (is_input) {	/* SPLIT, gap, CSPLIT+DATA */
+
+				qh->c_usecs = qh->usecs	+ 1;	/*HS_USECS (0);	*/
+				qh->usecs = 10;	/*HS_USECS (1);	*/
+			} else {	/* SPLIT+DATA, gap, CSPLIT */
+				qh->usecs += 10;	/*HS_USECS (1);	*/
+				qh->c_usecs = 1;	/*HS_USECS (0);	*/
+			}
+
+
+			/*take the period ss/cs	scheduling will	be
+			   handled by submit urb
+			 */
+			qh->period = urb->interval;
+		}
+	}
+
+	/* using TT? */
+	switch (urb->dev->speed) {
+	case USB_SPEED_LOW:
+		info1 |= (1 << 12);	/* EPS "low" */
+		/* FALL	THROUGH	*/
+
+	case USB_SPEED_FULL:
+		/* EPS 0 means "full" */
+		if (type != PIPE_INTERRUPT) {
+			info1 |= (EHCI_TUNE_RL_TT << 28);
+		}
+		if (type == PIPE_CONTROL) {
+			info1 |= (1 << 27);	/* for TT */
+			info1 |= 1 << 14;	/* toggle from qtd */
+		}
+		info1 |= maxp << 16;
+
+		info2 |= (EHCI_TUNE_MULT_TT << 30);
+		info2 |= urb->dev->ttport << 23;
+		info2 |= urb->dev->tt->hub->devnum << 16;
+		break;
+
+
+	case USB_SPEED_HIGH:	/* no TT involved */
+		info1 |= (2 << 12);	/* EPS "high" */
+		if (type == PIPE_CONTROL) {
+			info1 |= (EHCI_TUNE_RL_HS << 28);
+			info1 |= 64 << 16;	/* usb2	fixed maxpacket	*/
+
+			info1 |= 1 << 14;	/* toggle from qtd */
+			info2 |= (EHCI_TUNE_MULT_HS << 30);
+		} else if (type	== PIPE_BULK) {
+			info1 |= (EHCI_TUNE_RL_HS << 28);
+			info1 |= 512 <<	16;	/* usb2	fixed maxpacket	*/
+			info2 |= (EHCI_TUNE_MULT_HS << 30);
+		} else {	/* PIPE_INTERRUPT */
+			info1 |= (maxp & 0x7ff)	/*max_packet (maxp) */ <<16;
+			info2 |= mult /*hb_mult	(maxp) */  << 30;
+		}
+		break;
+
+	default:
+		pehci_print("bogus dev %p speed	%d", urb->dev, urb->dev->speed);
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
+	done:
+#else
+#endif
+		qha_free(qha_cache, qh);
+		return 0;
+	}			/*end of switch	*/
+
+	/* NOTE:  if (PIPE_INTERRUPT) {	scheduler sets s-mask }	*/
+
+	/* init	as halted, toggle clear, advance to dummy */
+	qh->qh_state = QH_STATE_IDLE;
+	qh->hw_info1 = cpu_to_le32(info1);
+	qh->hw_info2 = cpu_to_le32(info2);
+	/*link the tds here */
+	list_splice(qtd_list, &qh->qtd_list);
+	phci_hcd_qh_update(hcd,	qh, qtd);
+	qh->hw_token = cpu_to_le32(QTD_STS_HALT);
+	if (!usb_pipecontrol(urb->pipe)) {
+		usb_settoggle(urb->dev,	usb_pipeendpoint(urb->pipe), !is_input,
+			1);
+	}
+	pehci_entry("--	%s: Exit, qh %p\n", __FUNCTION__, qh);
+	return qh;
+}
+
+
+/*-----------------------------------------------------------*/
+/*
+ * Hardware maintains data toggle (like	OHCI) ... here we (re)initialize
+ * the hardware	data toggle in the QH, and set the pseudo-toggle in udev
+ * so we can see if usb_clear_halt() was called.  NOP for control, since
+ * we set up qh->hw_info1 to always use	the QTD	toggle bits.
+ */
+static inline void
+phci_hcd_clear_toggle(struct usb_device	*udev, int ep, int is_out,
+		      struct ehci_qh *qh)
+{
+	pehci_print("clear toggle, dev %d ep 0x%x-%s\n",
+		    udev->devnum, ep, is_out ? "out" : "in");
+	qh->hw_token &=	~__constant_cpu_to_le32(QTD_TOGGLE);
+	usb_settoggle(udev, ep,	is_out,	1);
+}
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * For control/bulk/interrupt, return QH with these TDs	appended.
+ * Allocates and initializes the QH if necessary.
+ * Returns null	if it can't allocate a QH it needs to.
+ * If the QH has TDs (urbs) already, that's great.
+ */
+struct ehci_qh *
+phci_hcd_qh_append_tds(phci_hcd	* hcd,
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
+	struct usb_host_endpoint *ep,
+#else
+#endif
+	struct urb *urb,	struct list_head *qtd_list,
+	void **ptr, int *status)
+{
+
+	int epnum;
+
+	struct ehci_qh *qh = 0;
+	struct ehci_qtd	*qtd =
+		list_entry(qtd_list->next, struct ehci_qtd, qtd_list);
+	td_ptd_map_buff_t *ptd_map_buff;
+	td_ptd_map_t *td_ptd_map;
+
+
+
+	pehci_entry("++	%s: Entered\n",	__FUNCTION__);
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
+	epnum = ep->desc.bEndpointAddress;
+#else
+	epnum = urb->ep->desc.bEndpointAddress;
+#endif
+
+	qh = (struct ehci_qh *)	*ptr;
+	if (likely(qh != 0)) {
+		u32 hw_next = QTD_NEXT(qtd->qtd_dma);
+		pehci_print("%Queue head already %p\n",	qh);
+
+		ptd_map_buff = &(td_ptd_map_buff[qh->type]);
+		td_ptd_map = &ptd_map_buff->map_list[qh->qtd_ptd_index];
+
+		/* maybe patch the qh used for set_address */
+		if (unlikely
+			(epnum == 0	&& le32_to_cpu(qh->hw_info1 & 0x7f) == 0)) {
+			qh->hw_info1 |=	cpu_to_le32(usb_pipedevice(urb->pipe));
+		}
+
+		/* is an URB is	queued to this qh already? */
+		if (unlikely(!list_empty(&qh->qtd_list))) {
+			struct ehci_qtd	*last_qtd;
+			/* update the last qtd's "next"	pointer	*/
+			last_qtd = list_entry(qh->qtd_list.prev,
+				struct ehci_qtd, qtd_list);
+
+			/*queue	head is	not empty just add the
+			   td at the end of it , and return from here
+			 */
+			last_qtd->hw_next = hw_next;
+
+			/*set the status as positive */
+			*status	= (u32)	QUEUE_HEAD_NOT_EMPTY;
+
+			/* no URB queued */
+		} else {
+
+	//		qh->qh_state = QH_STATE_IDLE;
+
+
+			/* usb_clear_halt() means qh data toggle gets reset */
+			if (usb_pipebulk(urb->pipe)
+				&& unlikely(!usb_gettoggle(urb->dev, (epnum	& 0x0f),
+				!(epnum & 0x80)))) {
+
+				phci_hcd_clear_toggle(urb->dev,
+					epnum & 0x0f,
+					!(epnum &	0x80), qh);
+
+				/*reset	our data toggle	*/
+
+				qh->datatoggle = 0;
+				qh->ping = 0;
+
+			}
+			phci_hcd_qh_update(hcd,	qh, qtd);
+		}
+		/*put everything in pedning, will be cleared during scheduling */
+		ptd_map_buff->pending_ptd_bitmap |= td_ptd_map->ptd_bitmap;
+		list_splice(qtd_list, qh->qtd_list.prev);
+	} else {
+		qh = phci_hcd_make_qh(hcd, urb,	qtd_list, status);
+		*ptr = qh;
+	}
+	pehci_entry("--	%s: Exit qh %p\n", __FUNCTION__, qh);
+	return qh;
+}
+
+/*link qtds to endpoint(qh)*/
+struct ehci_qh *
+phci_hcd_submit_async(phci_hcd * hcd,
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
+	struct usb_host_endpoint *ep,
+#else
+#endif
+		      struct list_head *qtd_list, struct urb *urb, int *status)
+{
+	struct ehci_qtd	*qtd;
+	struct hcd_dev *dev;
+	int epnum;
+
+#ifndef THREAD_BASED
+	unsigned long flags;
+#endif
+
+	
+	struct ehci_qh *qh = 0;
+
+	urb_priv_t *urb_priv = urb->hcpriv;
+
+	qtd = list_entry(qtd_list->next, struct	ehci_qtd, qtd_list);
+	dev = (struct hcd_dev *) urb->hcpriv;
+	epnum =	usb_pipeendpoint(urb->pipe);
+	if (usb_pipein(urb->pipe) && !usb_pipecontrol(urb->pipe)) {
+		epnum |= 0x10;
+	}
+
+	pehci_entry("++	%s, enter\n", __FUNCTION__);
+
+	/* ehci_hcd->lock guards shared	data against other CPUs:
+	 *   ehci_hcd:	    async, reclaim, periodic (and shadow), ...
+	 *   hcd_dev:	    ep[]
+
+	 *   ehci_qh:	    qh_next, qtd_list
+
+	 *   ehci_qtd:	    qtd_list
+	 *
+	 * Also, hold this lock	when talking to	HC registers or
+	 * when	updating hw_* fields in	shared qh/qtd/... structures.
+	 */
+#ifndef THREAD_BASED
+	spin_lock_irqsave(&hcd->lock, flags);
+#endif
+
+	spin_lock(&hcd_data_lock);
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)
+	usb_hcd_link_urb_to_ep(&hcd->usb_hcd, urb);
+#endif
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
+	qh = phci_hcd_qh_append_tds(hcd, ep, urb, qtd_list, &ep->hcpriv,
+		status);
+#else
+	qh = phci_hcd_qh_append_tds(hcd, urb, qtd_list, &urb->ep->hcpriv,
+		status);
+#endif
+	if (!qh	|| *status < 0) {
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)
+		usb_hcd_unlink_urb_from_ep(&hcd->usb_hcd, urb);
+#endif
+		goto cleanup;
+	}
+	/* Control/bulk	operations through TTs don't need scheduling,
+	 * the HC and TT handle	it when	the TT has a buffer ready.
+	 */
+
+	/* now the quehead can not be in the unlink state */
+
+//	printk("qh->qh_state:0x%x \n",qh->qh_state);
+	if (qh->qh_state == QH_STATE_UNLINK) {
+		pehci_info("%s:	free the urb,qh->state %x\n", __FUNCTION__,
+			   qh->qh_state);
+		phci_hcd_qtd_list_free(hcd, urb, &qh->qtd_list);
+		spin_unlock(&hcd_data_lock);
+		
+#ifndef THREAD_BASED			
+		spin_unlock_irqrestore(&hcd->lock, flags);
+#endif
+		*status	= -ENODEV;
+		return 0;
+	}
+
+	if (likely(qh != 0)) {
+		urb_priv->qh = qh;
+		if (likely(qh->qh_state	== QH_STATE_IDLE))
+			phci_hcd_qh_link_async(hcd, qh,	status);
+	}
+
+	cleanup:
+	spin_unlock(&hcd_data_lock);
+
+#ifndef THREAD_BASED			
+	/* free	it from	lock systme can	sleep now */
+	spin_unlock_irqrestore(&hcd->lock, flags);
+#endif
+	
+	/* could not get the QH	terminate and clean. */
+	if (unlikely(qh	== 0) || *status < 0) {
+		phci_hcd_qtd_list_free(hcd, urb, qtd_list);
+		return qh;
+	}
+	return qh;
+}
+
+/*
+ * initilaize the s-mask c-mask	for
+ * interrupt transfers.
+ */
+static int
+phci_hcd_qhint_schedule(phci_hcd * hcd,
+			struct ehci_qh *qh,
+			struct ehci_qtd	*qtd,
+			struct _isp1763_qhint *qha, struct urb *urb)
+{
+	int i =	0;
+	u32 td_info3 = 0;
+	u32 td_info5 = 0;
+	u32 period = 0;
+	u32 usofmask = 1;
+	u32 usof = 0;
+	u32 ssplit = 0,	csplit = 0xFF;
+	int maxpacket;
+	u32 numberofusofs = 0;
+
+	/*and since whol msec frame is empty, i	can schedule in	any uframe */
+	maxpacket = usb_maxpacket(urb->dev, urb->pipe, !usb_pipein(urb->pipe));
+	maxpacket &= 0x7ff;
+	/*length of the	data per uframe	*/
+	maxpacket = XFER_PER_UFRAME(qha->td_info1) * maxpacket;
+
+	/*caculate the number of uframes are required */
+	numberofusofs =	urb->transfer_buffer_length / maxpacket;
+	/*if something left */
+	if (urb->transfer_buffer_length	% maxpacket) {
+		numberofusofs += 1;
+	}
+
+	for (i = 0; i <	numberofusofs; i++) {
+		usofmask <<= i;
+		usof |=	usofmask;
+
+	}
+
+	/*
+	   for full/low	speed devices, as we
+	   have	seperate location for all the endpoints
+	   let the start split goto the	first uframe, means 0 uframe
+	 */
+	if (urb->dev->speed != USB_SPEED_HIGH && usb_pipeint(urb->pipe)) {
+		/*set the complete splits */
+		/*set all the bits and lets see	whats happening	*/
+		/*but this will	be set based on	the maximum packet size	*/
+		ssplit = usof;
+		/*  need to fix	it */
+		csplit = 0x1C;
+		qha->td_info6 =	csplit;
+		period = qh->period;
+		if (period >= 32) {
+			period = qh->period / 2;
+		}
+		td_info3 = period;
+		goto done;
+
+	} else {
+		if (qh->period >= 8) {
+			period = qh->period / 8;
+		} else {
+			period = qh->period;
+		}
+	}
+	/*our limitaion	is maximum of 32 ie 31,	5 bits */
+	if (period >= 32) {
+		period = 32;
+		/*devide by 2 */
+		period >>= 1;
+	}
+	if (qh->period >= 8) {
+		/*millisecond period */
+		td_info3 = (period << 3);
+	} else {
+		/*usof based tranmsfers	*/
+		/*minimum 4 usofs */
+		td_info3 = period;
+		usof = 0x11;
+	}
+
+	done:
+	td_info5 = usof;
+	qha->td_info3 |= td_info3;
+	qha->td_info5 |= usof;
+	return numberofusofs;
+}
+
+/*link interrupts qtds to endpoint*/
+struct ehci_qh *
+phci_hcd_submit_interrupt(phci_hcd * hcd,
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
+	struct usb_host_endpoint *ep,
+#else
+#endif
+			  struct list_head *qtd_list,
+			  struct urb *urb, int *status)
+{
+	struct ehci_qtd	*qtd;
+	struct _hcd_dev	*dev;
+	int epnum;
+	unsigned long flags;
+	struct ehci_qh *qh = 0;
+	urb_priv_t *urb_priv = (urb_priv_t *) urb->hcpriv;
+
+	qtd = list_entry(qtd_list->next, struct	ehci_qtd, qtd_list);
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
+	dev = (struct hcd_dev *) urb->hcpriv;
+	epnum = ep->desc.bEndpointAddress;
+
+	pehci_entry("++ %s, enter\n", __FUNCTION__);
+
+
+	/*check for more than one urb queued for this endpoint */
+	qh = ep->hcpriv;
+#else
+	dev = (struct _hcd_dev *) (urb->hcpriv);
+	epnum = urb->ep->desc.bEndpointAddress;
+
+	pehci_entry("++ %s, enter\n", __FUNCTION__);
+
+
+	/*check for more than one urb queued for this endpoint */
+	qh = (struct ehci_qh *) urb->ep->hcpriv;
+#endif
+
+	spin_lock_irqsave(&hcd->lock, flags);
+	if (unlikely(qh	!= 0)) {
+		if (!list_empty(&qh->qtd_list))	{
+			*status	= -EBUSY;
+			goto done;
+		} else {
+			td_ptd_map_buff_t *ptd_map_buff;
+			td_ptd_map_t *td_ptd_map;
+			ptd_map_buff = &(td_ptd_map_buff[qh->type]);
+			td_ptd_map = &ptd_map_buff->map_list[qh->qtd_ptd_index];
+			ptd_map_buff->pending_ptd_bitmap |=
+				td_ptd_map->ptd_bitmap;
+			 /*NEW*/ td_ptd_map->qtd = qtd;
+			/* maybe reset hardware's data toggle in the qh	*/
+			if (unlikely(!usb_gettoggle(urb->dev, epnum & 0x0f,
+				!(epnum & 0x80)))) {
+
+				/*reset	our data toggle	*/
+				td_ptd_map->datatoggle = 0;
+				usb_settoggle(urb->dev,	epnum &	0x0f,
+					!(epnum &	0x80), 1);
+				qh->datatoggle = 0;
+			}
+			/* trust the QH	was set	up as interrupt	... */
+			list_splice(qtd_list, &qh->qtd_list);
+		}
+	}
+
+
+	if (!qh) {
+		qh = phci_hcd_make_qh(hcd, urb,	qtd_list, status);
+		if (likely(qh == 0)) {
+			*status	= -ENOMEM;
+			goto done;
+		}
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
+		ep->hcpriv = qh;
+#else
+		urb->ep->hcpriv = qh;
+#endif
+	}
+
+	if (likely(qh != 0)) {
+		urb_priv->qh = qh;
+		if (likely(qh->qh_state	== QH_STATE_IDLE)) {
+			phci_hcd_qh_link_async(hcd, qh,	status);
+		}
+	}
+
+
+	done:
+	/* free	it from	lock systme can	sleep now */
+	spin_unlock_irqrestore(&hcd->lock, flags);
+	/* could not get the QH	terminate and clean. */
+	if (unlikely(qh	== 0) || *status < 0) {
+		phci_hcd_qtd_list_free(hcd, urb, qtd_list);
+		return qh;
+	}
+	return qh;
+}
+
+
+
+
+/*
+ * converts original EHCI QTD into PTD(Proprietary transfer descriptor)
+ * we call PTD as qha also for atl transfers
+ * for ATL and INT transfers
+ */
+void *
+phci_hcd_qha_from_qtd(phci_hcd * hcd,
+	struct ehci_qtd *qtd,
+	struct urb *urb,
+	void *ptd, u32 ptd_data_addr, struct ehci_qh *qh)
+{
+	u8 toggle = qh->datatoggle;
+	u32 token = 0;
+	u32 td_info1 = 0;
+	u32 td_info3 = 0;
+	u32 td_info4 = 0;
+	int maxpacket =	0;
+	u32 length = 0,	temp = 0;
+	/*for non high speed devices */
+	u32 portnum = 0;
+	u32 hubnum = 0;
+	u32 se = 0, rl = 0x0, nk = 0x0;
+	u8 datatoggle =	0;
+	struct isp1763_mem_addr	*mem_addr = &qtd->mem_addr;
+	u32 data_addr =	0;
+	u32 multi = 0;
+	struct _isp1763_qha *qha = (isp1763_qha	*) ptd;
+	pehci_entry("++	%s: Entered\n",	__FUNCTION__);
+
+	maxpacket = usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe));
+
+	multi =	1 + ((maxpacket	>> 11) & 0x3);
+
+	maxpacket &= 0x7ff;
+
+	/************************first word*********************************/
+	length = qtd->length;
+	td_info1 = QHA_VALID;
+	td_info1 |= (length << 3);
+	td_info1 |= (maxpacket << 18);
+	td_info1 |= (usb_pipeendpoint(urb->pipe) << 31);
+	td_info1 |= MULTI(multi);
+	/*set the first	dword */
+	qha->td_info1 =	td_info1;
+
+	pehci_print("%s: length	%d, 1st	word 0x%08x\n",	__FUNCTION__, length,
+		    qha->td_info1);
+
+	/*******************second word***************************************/
+	temp = qtd->hw_token;
+
+	/*take the pid,	thats of only interest to me from qtd,
+	 */
+
+	temp = temp & 0x0300;
+	temp = temp >> 8;
+	/*take the endpoint and	its 3 bits */
+	token =	(usb_pipeendpoint(urb->pipe) & 0xE) >> 1;
+	token |= usb_pipedevice(urb->pipe) << 3;
+
+	if (urb->dev->speed != USB_SPEED_HIGH) {
+		pehci_print("device is full/low	speed, %d\n", urb->dev->speed);
+		token |= 1 << 14;
+		portnum	= urb->dev->ttport;
+		 /*IMMED*/ hubnum = urb->dev->tt->hub->devnum;
+		token |= portnum << 18;
+		token |= hubnum	<< 25;
+		/*for non-high speed transfer
+		   reload and nak counts are zero
+		 */
+		rl = 0x0;
+		nk = 0x0;
+
+	}
+
+	/*se should be 0x2 for only low	speed devices */
+	if (urb->dev->speed == USB_SPEED_LOW) {
+		se = 0x2;
+	}
+
+	if (usb_pipeint(urb->pipe)) {
+		/*	reload count and nakcount is
+		   required for	only async transfers
+		 */
+		rl = 0x0;
+	}
+
+	/*set the se field, should be zero for all
+	   but low speed devices
+	 */
+	token |= se << 16;
+	/*take the pid */
+	token |= temp << 10;
+
+	if (usb_pipebulk(urb->pipe)) {
+		token |= EPTYPE_BULK;
+	} else if (usb_pipeint(urb->pipe)) {
+		token |= EPTYPE_INT;
+	} else if (usb_pipeisoc(urb->pipe)) {
+		token |= EPTYPE_ISO;
+	}
+
+
+	qha->td_info2 =	token;
+
+	pehci_print("%s: second	word 0x%08x, qtd token 0x%08x\n",
+		    __FUNCTION__, qha->td_info2, temp);
+
+	/***********************Third word*************************************/
+
+	/*calculate the	data start address from	mem_addr for qha */
+
+	data_addr = ((u32) (mem_addr->phy_addr)	& 0xffff) - 0x400;
+	data_addr >>= 3;
+	pehci_print("data start	address	%x\n", data_addr);
+	/*use this field only if there
+	 * is something	to transfer
+	 * */
+	if (length) {
+		td_info3 = data_addr <<	8;
+	}
+	/*RL Count, 16 */
+	td_info3 |= (rl	<< 25);
+	qha->td_info3 =	td_info3;
+
+	pehci_print("%s: third word 0x%08x, tdinfo 0x%08x\n",
+		__FUNCTION__, qha->td_info3, td_info3);
+
+
+	/**************************fourt word*************************************/
+
+	if (usb_pipecontrol(urb->pipe))	{
+		datatoggle = qtd->hw_token >> 31;
+	} else {
+		/*take the data	toggle from the	previous completed transfer
+		   or zero in case of fresh */
+		datatoggle = toggle;
+	}
+
+	td_info4 = QHA_ACTIVE;
+	/*dt */
+	td_info4 |= datatoggle << 25;	/*QHA_DATA_TOGGLE; */
+	/*3 retry count	for setup else forever */
+	if (PTD_PID(qha->td_info2) == SETUP_PID) {
+		td_info4 |= (3 << 23);
+	} else {
+		td_info4 |= (0 << 23);
+	}
+	
+	/*nak count */
+	td_info4 |= (nk	<< 19);
+
+	td_info4 |= (qh->ping << 26);
+	qha->td_info4 =	td_info4;
+#ifdef PTD_DUMP_SCHEDULE
+	printk("SCHEDULE PTD DUMPE\n") ;
+	printk("SDW0: 0x%08x\n",qha->td_info1);
+	printk("SDW1: 0x%08x\n",qha->td_info2);
+	printk("SDW2: 0x%08x\n",qha->td_info3);
+	printk("SDW3: 0x%08x\n",qha->td_info4);
+#endif
+	pehci_print("%s: fourt word 0x%08x\n", __FUNCTION__, qha->td_info4);
+	pehci_entry("--	%s: Exit, qha %p\n", __FUNCTION__, qha);
+	return qha;
+
+}
diff --git a/drivers/usb/misc/Kconfig b/drivers/usb/misc/Kconfig
index 1bfcd02..cf5d452 100644
--- a/drivers/usb/misc/Kconfig
+++ b/drivers/usb/misc/Kconfig
@@ -218,6 +218,21 @@
 	  See <http://www.linux-usb.org/usbtest/> for more information,
 	  including sample test device firmware and "how to use it".
 
+config USB_EHSET_TEST_FIXTURE
+	tristate "USB EHSET Test Fixture Driver"
+	depends on USB && USB_EHCI_EHSET
+	default n
+	help
+	  Say Y here if you want to use EHSET Test Fixture device for host
+	  compliance testing.
+
+	  This driver initiates test modes on the downstream port to which the
+	  test fixture is attached.
+
+	  See <http://www.usb.org/developers/onthego/EHSET_v1.01.pdf>
+	  for more information.
+
+
 config USB_ISIGHTFW
 	tristate "iSight firmware loading support"
 	depends on USB
diff --git a/drivers/usb/misc/Makefile b/drivers/usb/misc/Makefile
index 796ce7e..c8e777a 100644
--- a/drivers/usb/misc/Makefile
+++ b/drivers/usb/misc/Makefile
@@ -21,6 +21,7 @@
 obj-$(CONFIG_USB_LEGOTOWER)		+= legousbtower.o
 obj-$(CONFIG_USB_RIO500)		+= rio500.o
 obj-$(CONFIG_USB_TEST)			+= usbtest.o
+obj-$(CONFIG_USB_EHSET_TEST_FIXTURE)	+= ehset.o
 obj-$(CONFIG_USB_TRANCEVIBRATOR)	+= trancevibrator.o
 obj-$(CONFIG_USB_USS720)		+= uss720.o
 obj-$(CONFIG_USB_SEVSEG)		+= usbsevseg.o
diff --git a/drivers/usb/misc/ehset.c b/drivers/usb/misc/ehset.c
new file mode 100644
index 0000000..30879e0
--- /dev/null
+++ b/drivers/usb/misc/ehset.c
@@ -0,0 +1,147 @@
+/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/usb.h>
+#include <linux/slab.h>
+#include <linux/usb/ch11.h>
+#include <linux/usb/hcd.h>
+
+#define TEST_SE0_NAK_PID		0x0101
+#define TEST_J_PID			0x0102
+#define TEST_K_PID			0x0103
+#define TEST_PACKET_PID			0x0104
+#define TEST_HS_HOST_PORT_SUSPEND_RESUME 0x0106
+#define TEST_SINGLE_STEP_GET_DEV_DESC	0x0107
+#define TEST_SINGLE_STEP_SET_FEATURE	0x0108
+
+static int ehset_probe(struct usb_interface *intf,
+		       const struct usb_device_id *id)
+{
+	int status = -1;
+	struct usb_device *dev = interface_to_usbdev(intf);
+	struct usb_device *rh_udev = dev->bus->root_hub;
+	struct usb_device *hub_udev = dev->parent;
+	int port1 = dev->portnum;
+	int test_mode = le16_to_cpu(dev->descriptor.idProduct);
+
+	switch (test_mode) {
+	case TEST_SE0_NAK_PID:
+		status = usb_control_msg(hub_udev, usb_sndctrlpipe(hub_udev, 0),
+			USB_REQ_SET_FEATURE, USB_RT_PORT, USB_PORT_FEAT_TEST,
+			(3 << 8) | port1, NULL, 0, 1000);
+		break;
+	case TEST_J_PID:
+		status = usb_control_msg(hub_udev, usb_sndctrlpipe(hub_udev, 0),
+			USB_REQ_SET_FEATURE, USB_RT_PORT, USB_PORT_FEAT_TEST,
+			(1 << 8) | port1, NULL, 0, 1000);
+		break;
+	case TEST_K_PID:
+		status = usb_control_msg(hub_udev, usb_sndctrlpipe(hub_udev, 0),
+			USB_REQ_SET_FEATURE, USB_RT_PORT, USB_PORT_FEAT_TEST,
+			(2 << 8) | port1, NULL, 0, 1000);
+		break;
+	case TEST_PACKET_PID:
+		status = usb_control_msg(hub_udev, usb_sndctrlpipe(hub_udev, 0),
+			USB_REQ_SET_FEATURE, USB_RT_PORT, USB_PORT_FEAT_TEST,
+			(4 << 8) | port1, NULL, 0, 1000);
+		break;
+	case TEST_HS_HOST_PORT_SUSPEND_RESUME:
+		/* Test: wait for 15secs -> suspend -> 15secs delay -> resume */
+		msleep(15 * 1000);
+		status = usb_control_msg(hub_udev, usb_sndctrlpipe(hub_udev, 0),
+			USB_REQ_SET_FEATURE, USB_RT_PORT,
+			USB_PORT_FEAT_SUSPEND, port1, NULL, 0, 1000);
+		if (status < 0)
+			break;
+		msleep(15 * 1000);
+		status = usb_control_msg(hub_udev, usb_sndctrlpipe(hub_udev, 0),
+			USB_REQ_CLEAR_FEATURE, USB_RT_PORT,
+			USB_PORT_FEAT_SUSPEND, port1, NULL, 0, 1000);
+		break;
+	case TEST_SINGLE_STEP_GET_DEV_DESC:
+		/* Test: wait for 15secs -> GetDescriptor request */
+		msleep(15 * 1000);
+		{
+			struct usb_device_descriptor *buf;
+			buf = kmalloc(USB_DT_DEVICE_SIZE, GFP_KERNEL);
+			if (!buf)
+				return -ENOMEM;
+
+			status = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0),
+				USB_REQ_GET_DESCRIPTOR, USB_DIR_IN,
+				USB_DT_DEVICE << 8, 0,
+				buf, USB_DT_DEVICE_SIZE,
+				USB_CTRL_GET_TIMEOUT);
+			kfree(buf);
+		}
+		break;
+	case TEST_SINGLE_STEP_SET_FEATURE:
+		/* GetDescriptor's SETUP request -> 15secs delay -> IN & STATUS
+		 * Issue request to ehci root hub driver with portnum = 1
+		 */
+		status = usb_control_msg(rh_udev, usb_sndctrlpipe(rh_udev, 0),
+			USB_REQ_SET_FEATURE, USB_RT_PORT, USB_PORT_FEAT_TEST,
+			(6 << 8) | 1, NULL, 0, 60 * 1000);
+
+		break;
+	default:
+		pr_err("%s: undefined test mode ( %X )\n", __func__, test_mode);
+		return -EINVAL;
+	}
+
+	return (status < 0) ? status : 0;
+}
+
+static void ehset_disconnect(struct usb_interface *intf)
+{
+}
+
+static struct usb_device_id ehset_id_table[] = {
+	{ USB_DEVICE(0x1a0a, TEST_SE0_NAK_PID) },
+	{ USB_DEVICE(0x1a0a, TEST_J_PID) },
+	{ USB_DEVICE(0x1a0a, TEST_K_PID) },
+	{ USB_DEVICE(0x1a0a, TEST_PACKET_PID) },
+	{ USB_DEVICE(0x1a0a, TEST_HS_HOST_PORT_SUSPEND_RESUME) },
+	{ USB_DEVICE(0x1a0a, TEST_SINGLE_STEP_GET_DEV_DESC) },
+	{ USB_DEVICE(0x1a0a, TEST_SINGLE_STEP_SET_FEATURE) },
+	{ }			/* Terminating entry */
+};
+
+MODULE_DEVICE_TABLE(usb, ehset_id_table);
+
+static struct usb_driver ehset_driver = {
+	.name =		"usb_ehset_test",
+	.probe =	ehset_probe,
+	.disconnect =	ehset_disconnect,
+	.id_table =	ehset_id_table,
+};
+
+static int __init ehset_init(void)
+{
+	return usb_register(&ehset_driver);
+}
+
+static void __exit ehset_exit(void)
+{
+	usb_deregister(&ehset_driver);
+}
+
+module_init(ehset_init);
+module_exit(ehset_exit);
+
+MODULE_DESCRIPTION("USB Driver for EHSET Test Fixture");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/musb/Kconfig b/drivers/usb/musb/Kconfig
index 1309348..9cc6cb0 100644
--- a/drivers/usb/musb/Kconfig
+++ b/drivers/usb/musb/Kconfig
@@ -14,7 +14,7 @@
 	select TWL4030_USB if MACH_OMAP_3430SDP
 	select TWL6030_USB if MACH_OMAP_4430SDP || MACH_OMAP4_PANDA
 	select USB_OTG_UTILS
-	tristate 'Inventra Highspeed Dual Role Controller (TI, ADI, ...)'
+	bool 'Inventra Highspeed Dual Role Controller (TI, ADI, ...)'
 	help
 	  Say Y here if your system has a dual role high speed USB
 	  controller based on the Mentor Graphics silicon IP.  Then
@@ -30,8 +30,8 @@
 
 	  If you do not know what this is, please say N.
 
-	  To compile this driver as a module, choose M here; the
-	  module will be called "musb-hdrc".
+#	  To compile this driver as a module, choose M here; the
+#	  module will be called "musb-hdrc".
 
 choice
 	prompt "Platform Glue Layer"
diff --git a/drivers/usb/otg/Kconfig b/drivers/usb/otg/Kconfig
index cd77719..0081182 100644
--- a/drivers/usb/otg/Kconfig
+++ b/drivers/usb/otg/Kconfig
@@ -74,6 +74,34 @@
 	  This transceiver supports high and full speed devices plus,
 	  in host mode, low speed.
 
+config USB_MSM_OTG_72K
+	bool "OTG support for Legcay Qualcomm on-chip USB controller"
+	depends on ARCH_MSM
+	select USB_OTG_UTILS
+	default USB_MSM_72K
+	help
+	  Enable this to support the USB OTG transceiver on MSM chips. It
+	  handles PHY initialization, clock management, low power mode and
+	  workarounds required after resetting the hardware. This driver is
+	  required for even peripheral only or host only mode configuration.
+	  Supports SRP and HNP when both gadget and Host are selected.
+
+config MSM_OTG_ENABLE_A_WAIT_BCON_TIMEOUT
+	bool "Enable A-device timeout for B-device connection"
+	depends on USB_MSM_OTG_72K
+	default n
+	help
+	   OTG specification allows A-device to turn off VBUS if B-device
+	   fails to signal connect event before TA_WAIT_BCON (1.1 - 30 sec).
+	   SRP detection is enabled and hardware is put into low power mode
+	   upon this timeout.
+
+	   If you say yes, VBUS will be turned off if B-device does not signal
+	   connect in 30 sec. Otherwise VBUS is not turned off when Micro-A
+	   cable is connected. But hardware is put into LPM. Say no if leakage
+	   currents in your system are minimum.
+
+
 config TWL6030_USB
 	tristate "TWL6030 USB Transceiver Driver"
 	depends on TWL4030_CORE
@@ -121,6 +149,15 @@
 	  This driver is not supported on boards like trout which
 	  has an external PHY.
 
+config USB_MSM_ACA
+	bool "Support for Accessory Charger Adapter (ACA)"
+	depends on (USB_MSM_OTG || USB_MSM_OTG_72K) && ARCH_MSM
+	default n
+	help
+	  Accesory Charger Adapter is a charger specified in USB Battery
+	  Charging Specification(1.1). It enables OTG devices to charge
+	  while operating as a host or peripheral at the same time.
+
 config AB8500_USB
         tristate "AB8500 USB Transceiver Driver"
         depends on AB8500_CORE
diff --git a/drivers/usb/otg/Makefile b/drivers/usb/otg/Makefile
index d2c0a7b..2984ee1 100644
--- a/drivers/usb/otg/Makefile
+++ b/drivers/usb/otg/Makefile
@@ -19,6 +19,7 @@
 obj-$(CONFIG_NOP_USB_XCEIV)	+= nop-usb-xceiv.o
 obj-$(CONFIG_USB_ULPI)		+= ulpi.o
 obj-$(CONFIG_USB_ULPI_VIEWPORT)	+= ulpi_viewport.o
+obj-$(CONFIG_USB_MSM_OTG_72K)	+= msm72k_otg.o
 obj-$(CONFIG_USB_MSM_OTG)	+= msm_otg.o
 obj-$(CONFIG_AB8500_USB)	+= ab8500-usb.o
 fsl_usb2_otg-objs		:= fsl_otg.o otg_fsm.o
diff --git a/drivers/usb/otg/msm72k_otg.c b/drivers/usb/otg/msm72k_otg.c
new file mode 100644
index 0000000..dddfa33
--- /dev/null
+++ b/drivers/usb/otg/msm72k_otg.c
@@ -0,0 +1,2957 @@
+/* Copyright (c) 2009-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/pm_runtime.h>
+
+#include <linux/device.h>
+#include <linux/pm_qos_params.h>
+#include <mach/msm_hsusb_hw.h>
+#include <mach/msm72k_otg.h>
+#include <mach/msm_hsusb.h>
+#include <linux/debugfs.h>
+#include <linux/uaccess.h>
+#include <mach/clk.h>
+#include <mach/msm_xo.h>
+
+#define MSM_USB_BASE	(dev->regs)
+#define USB_LINK_RESET_TIMEOUT	(msecs_to_jiffies(10))
+#define DRIVER_NAME	"msm_otg"
+static void otg_reset(struct otg_transceiver *xceiv, int phy_reset);
+static void msm_otg_set_vbus_state(int online);
+static void msm_otg_set_id_state(int online);
+
+struct msm_otg *the_msm_otg;
+
+static int is_host(void)
+{
+	struct msm_otg *dev = the_msm_otg;
+
+	if (dev->pmic_id_notif_supp)
+		return dev->pmic_id_status ? 0 : 1;
+	else if (dev->pdata->otg_mode == OTG_ID)
+		return (OTGSC_ID & readl(USB_OTGSC)) ? 0 : 1;
+	else
+		return !test_bit(ID, &dev->inputs);
+}
+
+static int is_b_sess_vld(void)
+{
+	struct msm_otg *dev = the_msm_otg;
+
+	if (dev->pdata->otg_mode == OTG_ID)
+		return (OTGSC_BSV & readl(USB_OTGSC)) ? 1 : 0;
+	else
+		return test_bit(B_SESS_VLD, &dev->inputs);
+}
+
+static unsigned ulpi_read(struct msm_otg *dev, unsigned reg)
+{
+	unsigned ret, timeout = 100000;
+	unsigned long flags;
+
+	spin_lock_irqsave(&dev->lock, flags);
+
+	/* initiate read operation */
+	writel(ULPI_RUN | ULPI_READ | ULPI_ADDR(reg),
+	       USB_ULPI_VIEWPORT);
+
+	/* wait for completion */
+	while ((readl(USB_ULPI_VIEWPORT) & ULPI_RUN) && (--timeout))
+		cpu_relax();
+
+	if (timeout == 0) {
+		pr_err("%s: timeout %08x\n", __func__,
+				 readl(USB_ULPI_VIEWPORT));
+		spin_unlock_irqrestore(&dev->lock, flags);
+		return 0xffffffff;
+	}
+	ret = ULPI_DATA_READ(readl(USB_ULPI_VIEWPORT));
+
+	spin_unlock_irqrestore(&dev->lock, flags);
+
+	return ret;
+}
+
+static int ulpi_write(struct msm_otg *dev, unsigned val, unsigned reg)
+{
+	unsigned timeout = 10000;
+	unsigned long flags;
+
+	spin_lock_irqsave(&dev->lock, flags);
+
+	/* initiate write operation */
+	writel(ULPI_RUN | ULPI_WRITE |
+	       ULPI_ADDR(reg) | ULPI_DATA(val),
+	       USB_ULPI_VIEWPORT);
+
+	/* wait for completion */
+	while ((readl(USB_ULPI_VIEWPORT) & ULPI_RUN) && (--timeout))
+		;
+
+	if (timeout == 0) {
+		pr_err("%s: timeout\n", __func__);
+		spin_unlock_irqrestore(&dev->lock, flags);
+		return -1;
+	}
+
+	spin_unlock_irqrestore(&dev->lock, flags);
+
+	return 0;
+}
+
+static int usb_ulpi_write(struct otg_transceiver *xceiv, u32 val, u32 reg)
+{
+	struct msm_otg *dev = container_of(xceiv, struct msm_otg, otg);
+
+	return ulpi_write(dev, val, reg);
+}
+
+static int usb_ulpi_read(struct otg_transceiver *xceiv, u32 reg)
+{
+	struct msm_otg *dev = container_of(xceiv, struct msm_otg, otg);
+
+	return ulpi_read(dev, reg);
+}
+
+#ifdef CONFIG_USB_EHCI_MSM_72K
+static void enable_idgnd(struct msm_otg *dev)
+{
+	/* Do nothing if instead of ID pin, USER controls mode switch */
+	if (dev->pdata->otg_mode == OTG_USER_CONTROL)
+		return;
+
+	ulpi_write(dev, (1<<4), 0x0E);
+	ulpi_write(dev, (1<<4), 0x11);
+	writel(readl(USB_OTGSC) | OTGSC_IDIE, USB_OTGSC);
+}
+
+static void disable_idgnd(struct msm_otg *dev)
+{
+	/* Do nothing if instead of ID pin, USER controls mode switch */
+	if (dev->pdata->otg_mode == OTG_USER_CONTROL)
+		return;
+
+	ulpi_write(dev, (1<<4), 0x0F);
+	ulpi_write(dev, (1<<4), 0x12);
+	writel(readl(USB_OTGSC) & ~OTGSC_IDIE, USB_OTGSC);
+}
+#endif
+
+static void enable_idabc(struct msm_otg *dev)
+{
+#ifdef CONFIG_USB_MSM_ACA
+	ulpi_write(dev, (1<<5), 0x0E);
+	ulpi_write(dev, (1<<5), 0x11);
+#endif
+}
+static void disable_idabc(struct msm_otg *dev)
+{
+#ifdef CONFIG_USB_MSM_ACA
+	ulpi_write(dev, (1<<5), 0x0F);
+	ulpi_write(dev, (1<<5), 0x12);
+#endif
+}
+
+static void enable_sess_valid(struct msm_otg *dev)
+{
+	/* Do nothing if instead of ID pin, USER controls mode switch */
+	if (dev->pdata->otg_mode == OTG_USER_CONTROL)
+		return;
+
+	ulpi_write(dev, (1<<2), 0x0E);
+	ulpi_write(dev, (1<<2), 0x11);
+	writel(readl(USB_OTGSC) | OTGSC_BSVIE, USB_OTGSC);
+}
+
+static void disable_sess_valid(struct msm_otg *dev)
+{
+	/* Do nothing if instead of ID pin, USER controls mode switch */
+	if (dev->pdata->otg_mode == OTG_USER_CONTROL)
+		return;
+
+	ulpi_write(dev, (1<<2), 0x0F);
+	ulpi_write(dev, (1<<2), 0x12);
+	writel(readl(USB_OTGSC) & ~OTGSC_BSVIE, USB_OTGSC);
+}
+#ifdef CONFIG_USB_MSM_ACA
+static void set_aca_id_inputs(struct msm_otg *dev)
+{
+	u8		phy_ints;
+
+	phy_ints = ulpi_read(dev, 0x13);
+	if (phy_ints == -ETIMEDOUT)
+		return;
+
+	pr_debug("phy_ints = %x\n", phy_ints);
+	clear_bit(ID_A, &dev->inputs);
+	clear_bit(ID_B, &dev->inputs);
+	clear_bit(ID_C, &dev->inputs);
+	if (phy_id_state_a(phy_ints)) {
+		pr_debug("ID_A set\n");
+		set_bit(ID_A, &dev->inputs);
+		set_bit(A_BUS_REQ, &dev->inputs);
+	} else if (phy_id_state_b(phy_ints)) {
+		pr_debug("ID_B set\n");
+		set_bit(ID_B, &dev->inputs);
+	} else if (phy_id_state_c(phy_ints)) {
+		pr_debug("ID_C set\n");
+		set_bit(ID_C, &dev->inputs);
+	}
+	if (is_b_sess_vld())
+		set_bit(B_SESS_VLD, &dev->inputs);
+	else
+		clear_bit(B_SESS_VLD, &dev->inputs);
+}
+#define get_aca_bmaxpower(dev)		(dev->b_max_power)
+#define set_aca_bmaxpower(dev, power)	(dev->b_max_power = power)
+#else
+#define get_aca_bmaxpower(dev)		0
+#define set_aca_bmaxpower(dev, power)
+#endif
+static inline void set_pre_emphasis_level(struct msm_otg *dev)
+{
+	unsigned res = 0;
+
+	if (!dev->pdata || dev->pdata->pemp_level == PRE_EMPHASIS_DEFAULT)
+		return;
+
+	res = ulpi_read(dev, ULPI_CONFIG_REG3);
+	res &= ~(ULPI_PRE_EMPHASIS_MASK);
+	if (dev->pdata->pemp_level != PRE_EMPHASIS_DISABLE)
+		res |= dev->pdata->pemp_level;
+	ulpi_write(dev, res, ULPI_CONFIG_REG3);
+}
+
+static inline void set_hsdrv_slope(struct msm_otg *dev)
+{
+	unsigned res = 0;
+
+	if (!dev->pdata || dev->pdata->hsdrvslope == HS_DRV_SLOPE_DEFAULT)
+		return;
+
+	res = ulpi_read(dev, ULPI_CONFIG_REG3);
+	res &= ~(ULPI_HSDRVSLOPE_MASK);
+	res |= (dev->pdata->hsdrvslope & ULPI_HSDRVSLOPE_MASK);
+	ulpi_write(dev, res, ULPI_CONFIG_REG3);
+}
+
+static inline void set_cdr_auto_reset(struct msm_otg *dev)
+{
+	unsigned res = 0;
+
+	if (!dev->pdata || dev->pdata->cdr_autoreset == CDR_AUTO_RESET_DEFAULT)
+		return;
+
+	res = ulpi_read(dev, ULPI_DIGOUT_CTRL);
+	if (dev->pdata->cdr_autoreset == CDR_AUTO_RESET_ENABLE)
+		res &=  ~ULPI_CDR_AUTORESET;
+	else
+		res |=  ULPI_CDR_AUTORESET;
+	ulpi_write(dev, res, ULPI_DIGOUT_CTRL);
+}
+
+static inline void set_se1_gating(struct msm_otg *dev)
+{
+	unsigned res = 0;
+
+	if (!dev->pdata || dev->pdata->se1_gating == SE1_GATING_DEFAULT)
+		return;
+
+	res = ulpi_read(dev, ULPI_DIGOUT_CTRL);
+	if (dev->pdata->se1_gating == SE1_GATING_ENABLE)
+		res &=  ~ULPI_SE1_GATE;
+	else
+		res |=  ULPI_SE1_GATE;
+	ulpi_write(dev, res, ULPI_DIGOUT_CTRL);
+}
+static inline void set_driver_amplitude(struct msm_otg *dev)
+{
+	unsigned res = 0;
+
+	if (!dev->pdata || dev->pdata->drv_ampl == HS_DRV_AMPLITUDE_DEFAULT)
+		return;
+
+	res = ulpi_read(dev, ULPI_CONFIG_REG2);
+	res &= ~ULPI_DRV_AMPL_MASK;
+	if (dev->pdata->drv_ampl != HS_DRV_AMPLITUDE_ZERO_PERCENT)
+		res |= dev->pdata->drv_ampl;
+	ulpi_write(dev, res, ULPI_CONFIG_REG2);
+}
+
+static const char *state_string(enum usb_otg_state state)
+{
+	switch (state) {
+	case OTG_STATE_A_IDLE:		return "a_idle";
+	case OTG_STATE_A_WAIT_VRISE:	return "a_wait_vrise";
+	case OTG_STATE_A_WAIT_BCON:	return "a_wait_bcon";
+	case OTG_STATE_A_HOST:		return "a_host";
+	case OTG_STATE_A_SUSPEND:	return "a_suspend";
+	case OTG_STATE_A_PERIPHERAL:	return "a_peripheral";
+	case OTG_STATE_A_WAIT_VFALL:	return "a_wait_vfall";
+	case OTG_STATE_A_VBUS_ERR:	return "a_vbus_err";
+	case OTG_STATE_B_IDLE:		return "b_idle";
+	case OTG_STATE_B_SRP_INIT:	return "b_srp_init";
+	case OTG_STATE_B_PERIPHERAL:	return "b_peripheral";
+	case OTG_STATE_B_WAIT_ACON:	return "b_wait_acon";
+	case OTG_STATE_B_HOST:		return "b_host";
+	default:			return "UNDEFINED";
+	}
+}
+
+static const char *timer_string(int bit)
+{
+	switch (bit) {
+	case A_WAIT_VRISE:		return "a_wait_vrise";
+	case A_WAIT_VFALL:		return "a_wait_vfall";
+	case B_SRP_FAIL:		return "b_srp_fail";
+	case A_WAIT_BCON:		return "a_wait_bcon";
+	case A_AIDL_BDIS:		return "a_aidl_bdis";
+	case A_BIDL_ADIS:		return "a_bidl_adis";
+	case B_ASE0_BRST:		return "b_ase0_brst";
+	default:			return "UNDEFINED";
+	}
+}
+
+/* Prevent idle power collapse(pc) while operating in peripheral mode */
+static void otg_pm_qos_update_latency(struct msm_otg *dev, int vote)
+{
+	struct msm_otg_platform_data *pdata = dev->pdata;
+	u32 swfi_latency = 0;
+
+	if (pdata)
+		swfi_latency = pdata->swfi_latency + 1;
+
+	if (vote)
+		pm_qos_update_request(&pdata->pm_qos_req_dma,
+				swfi_latency);
+	else
+		pm_qos_update_request(&pdata->pm_qos_req_dma,
+				PM_QOS_DEFAULT_VALUE);
+}
+
+/* If USB Core is running its protocol engine based on PCLK,
+ * PCLK must be running at >60Mhz for correct HSUSB operation and
+ * USB core cannot tolerate frequency changes on PCLK. For such
+ * USB cores, vote for maximum clk frequency on pclk source
+ */
+static void msm_otg_vote_for_pclk_source(struct msm_otg *dev, int vote)
+{
+	if (dev->pclk_src && pclk_requires_voting(&dev->otg)) {
+
+		if (vote)
+			clk_enable(dev->pclk_src);
+		else
+			clk_disable(dev->pclk_src);
+	}
+}
+
+/* Controller gives interrupt for every 1 mesc if 1MSIE is set in OTGSC.
+ * This interrupt can be used as a timer source and OTG timers can be
+ * implemented. But hrtimers on MSM hardware can give atleast 1/32 KHZ
+ * precision. This precision is more than enough for OTG timers.
+ */
+static enum hrtimer_restart msm_otg_timer_func(struct hrtimer *_timer)
+{
+	struct msm_otg *dev = container_of(_timer, struct msm_otg, timer);
+
+	/* Phy lockup issues are observed when VBUS Valid interrupt is
+	 * enabled. Hence set A_VBUS_VLD upon timer exipration.
+	 */
+	if (dev->active_tmout == A_WAIT_VRISE)
+		set_bit(A_VBUS_VLD, &dev->inputs);
+	else
+		set_bit(dev->active_tmout, &dev->tmouts);
+
+	pr_debug("expired %s timer\n", timer_string(dev->active_tmout));
+	queue_work(dev->wq, &dev->sm_work);
+	return HRTIMER_NORESTART;
+}
+
+static void msm_otg_del_timer(struct msm_otg *dev)
+{
+	int bit = dev->active_tmout;
+
+	pr_debug("deleting %s timer. remaining %lld msec \n", timer_string(bit),
+			div_s64(ktime_to_us(hrtimer_get_remaining(&dev->timer)),
+					1000));
+	hrtimer_cancel(&dev->timer);
+	clear_bit(bit, &dev->tmouts);
+}
+
+static void msm_otg_start_timer(struct msm_otg *dev, int time, int bit)
+{
+	clear_bit(bit, &dev->tmouts);
+	dev->active_tmout = bit;
+	pr_debug("starting %s timer\n", timer_string(bit));
+	hrtimer_start(&dev->timer,
+			ktime_set(time / 1000, (time % 1000) * 1000000),
+			HRTIMER_MODE_REL);
+}
+
+/* No two otg timers run in parallel. So one hrtimer is sufficient */
+static void msm_otg_init_timer(struct msm_otg *dev)
+{
+	hrtimer_init(&dev->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+	dev->timer.function = msm_otg_timer_func;
+}
+
+static const char *event_string(enum usb_otg_event event)
+{
+	switch (event) {
+	case OTG_EVENT_DEV_CONN_TMOUT:
+		return "DEV_CONN_TMOUT";
+	case OTG_EVENT_NO_RESP_FOR_HNP_ENABLE:
+		return "NO_RESP_FOR_HNP_ENABLE";
+	case OTG_EVENT_HUB_NOT_SUPPORTED:
+		return "HUB_NOT_SUPPORTED";
+	case OTG_EVENT_DEV_NOT_SUPPORTED:
+		return "DEV_NOT_SUPPORTED,";
+	case OTG_EVENT_HNP_FAILED:
+		return "HNP_FAILED";
+	case OTG_EVENT_NO_RESP_FOR_SRP:
+		return "NO_RESP_FOR_SRP";
+	default:
+		return "UNDEFINED";
+	}
+}
+
+static int msm_otg_send_event(struct otg_transceiver *xceiv,
+				enum usb_otg_event event)
+{
+	char module_name[16];
+	char udev_event[128];
+	char *envp[] = { module_name, udev_event, NULL };
+	int ret;
+
+	pr_debug("sending %s event\n", event_string(event));
+
+	snprintf(module_name, 16, "MODULE=%s", DRIVER_NAME);
+	snprintf(udev_event, 128, "EVENT=%s", event_string(event));
+	ret = kobject_uevent_env(&xceiv->dev->kobj, KOBJ_CHANGE, envp);
+	if (ret < 0)
+		pr_info("uevent sending failed with ret = %d\n", ret);
+	return ret;
+}
+
+static int msm_otg_start_hnp(struct otg_transceiver *xceiv)
+{
+	struct msm_otg *dev = container_of(xceiv, struct msm_otg, otg);
+	enum usb_otg_state state;
+	unsigned long flags;
+
+	spin_lock_irqsave(&dev->lock, flags);
+	state = dev->otg.state;
+	spin_unlock_irqrestore(&dev->lock, flags);
+
+	if (state != OTG_STATE_A_HOST) {
+		pr_err("HNP can not be initiated in %s state\n",
+				state_string(state));
+		return -EINVAL;
+	}
+
+	pr_debug("A-Host: HNP initiated\n");
+	clear_bit(A_BUS_REQ, &dev->inputs);
+	wake_lock(&dev->wlock);
+	queue_work(dev->wq, &dev->sm_work);
+	return 0;
+}
+
+static int msm_otg_start_srp(struct otg_transceiver *xceiv)
+{
+	struct msm_otg *dev = container_of(xceiv, struct msm_otg, otg);
+	u32	val;
+	int ret = 0;
+	enum usb_otg_state state;
+	unsigned long flags;
+
+	spin_lock_irqsave(&dev->lock, flags);
+	state = dev->otg.state;
+	spin_unlock_irqrestore(&dev->lock, flags);
+
+	if (state != OTG_STATE_B_IDLE) {
+		pr_err("SRP can not be initiated in %s state\n",
+				state_string(state));
+		ret = -EINVAL;
+		goto out;
+	}
+
+	if ((jiffies - dev->b_last_se0_sess) < msecs_to_jiffies(TB_SRP_INIT)) {
+		pr_debug("initial conditions of SRP are not met. Try again"
+				"after some time\n");
+		ret = -EAGAIN;
+		goto out;
+	}
+
+	/* Harware auto assist data pulsing: Data pulse is given
+	 * for 7msec; wait for vbus
+	 */
+	val = readl(USB_OTGSC);
+	writel((val & ~OTGSC_INTR_STS_MASK) | OTGSC_HADP, USB_OTGSC);
+
+	/* VBUS plusing is obsoleted in OTG 2.0 supplement */
+out:
+	return ret;
+}
+
+static int msm_otg_set_power(struct otg_transceiver *xceiv, unsigned mA)
+{
+	static enum chg_type 	curr_chg = USB_CHG_TYPE__INVALID;
+	struct msm_otg		*dev = container_of(xceiv, struct msm_otg, otg);
+	struct msm_otg_platform_data *pdata = dev->pdata;
+	enum chg_type 		new_chg = atomic_read(&dev->chg_type);
+	unsigned 		charge = mA;
+
+	/* Call chg_connected only if the charger has changed */
+	if (new_chg != curr_chg && pdata->chg_connected) {
+		curr_chg = new_chg;
+		pdata->chg_connected(new_chg);
+	}
+
+	/* Always use USB_IDCHG_MAX for charging in ID_B and ID_C */
+	if (test_bit(ID_C, &dev->inputs) ||
+				test_bit(ID_B, &dev->inputs))
+		charge = USB_IDCHG_MAX;
+
+	pr_debug("Charging with %dmA current\n", charge);
+	/* Call vbus_draw only if the charger is of known type and also
+	 * ignore request to stop charging as a result of suspend interrupt
+	 * when wall-charger is used.
+	 */
+	if (pdata->chg_vbus_draw && new_chg != USB_CHG_TYPE__INVALID &&
+		(charge || new_chg != USB_CHG_TYPE__WALLCHARGER))
+			pdata->chg_vbus_draw(charge);
+
+	if (new_chg == USB_CHG_TYPE__WALLCHARGER) {
+		wake_lock(&dev->wlock);
+		queue_work(dev->wq, &dev->sm_work);
+	}
+
+	return 0;
+}
+
+static int msm_otg_set_clk(struct otg_transceiver *xceiv, int on)
+{
+	struct msm_otg *dev = container_of(xceiv, struct msm_otg, otg);
+
+	if (!dev || (dev != the_msm_otg))
+		return -ENODEV;
+
+	if (on)
+		/* enable clocks */
+		clk_enable(dev->hs_clk);
+	else
+		clk_disable(dev->hs_clk);
+
+	return 0;
+}
+static void msm_otg_start_peripheral(struct otg_transceiver *xceiv, int on)
+{
+	struct msm_otg *dev = container_of(xceiv, struct msm_otg, otg);
+	struct msm_otg_platform_data *pdata = dev->pdata;
+
+	if (!xceiv->gadget)
+		return;
+
+	if (on) {
+		if (pdata->setup_gpio)
+			pdata->setup_gpio(USB_SWITCH_PERIPHERAL);
+		/* vote for minimum dma_latency to prevent idle
+		 * power collapse(pc) while running in peripheral mode.
+		 */
+		otg_pm_qos_update_latency(dev, 1);
+
+		/* increment the clk reference count so that
+		 * it would be still on when disabled from
+		 * low power mode routine
+		 */
+		if (dev->pdata->pclk_required_during_lpm)
+			clk_enable(dev->hs_pclk);
+
+		usb_gadget_vbus_connect(xceiv->gadget);
+	} else {
+		atomic_set(&dev->chg_type, USB_CHG_TYPE__INVALID);
+		usb_gadget_vbus_disconnect(xceiv->gadget);
+
+		/* decrement the clk reference count so that
+		 * it would be off when disabled from
+		 * low power mode routine
+		 */
+		if (dev->pdata->pclk_required_during_lpm)
+			clk_disable(dev->hs_pclk);
+
+		otg_pm_qos_update_latency(dev, 0);
+		if (pdata->setup_gpio)
+			pdata->setup_gpio(USB_SWITCH_DISABLE);
+	}
+}
+
+static void msm_otg_start_host(struct otg_transceiver *xceiv, int on)
+{
+	struct msm_otg *dev = container_of(xceiv, struct msm_otg, otg);
+	struct msm_otg_platform_data *pdata = dev->pdata;
+
+	if (!xceiv->host)
+		return;
+
+	if (dev->start_host) {
+		/* Some targets, e.g. ST1.5, use GPIO to choose b/w connector */
+		if (on && pdata->setup_gpio)
+			pdata->setup_gpio(USB_SWITCH_HOST);
+
+		/* increment or decrement the clk reference count
+		 * to avoid usb h/w lockup issues when low power
+		 * mode is initiated and vbus is on.
+		 */
+		if (dev->pdata->pclk_required_during_lpm) {
+			if (on)
+				clk_enable(dev->hs_pclk);
+			else
+				clk_disable(dev->hs_pclk);
+		}
+
+		dev->start_host(xceiv->host, on);
+
+		if (!on && pdata->setup_gpio)
+			pdata->setup_gpio(USB_SWITCH_DISABLE);
+	}
+}
+
+static int msm_otg_suspend(struct msm_otg *dev)
+{
+	unsigned long timeout;
+	bool host_bus_suspend;
+	unsigned ret;
+	enum chg_type chg_type = atomic_read(&dev->chg_type);
+	unsigned long flags;
+
+	disable_irq(dev->irq);
+	if (atomic_read(&dev->in_lpm))
+		goto out;
+#ifdef CONFIG_USB_MSM_ACA
+	/*
+	 * ACA interrupts are disabled before entering into LPM.
+	 * If LPM is allowed in host mode with accessory charger
+	 * connected or only accessory charger is connected,
+	 * there is a chance that charger is removed and we will
+	 * not know about it.
+	 *
+	 * REVISIT
+	 *
+	 * Allowing LPM in case of gadget bus suspend is tricky.
+	 * Bus suspend can happen in two states.
+	 * 1. ID_float:  Allowing LPM has pros and cons. If LPM is allowed
+	 * and accessory charger is connected, we miss ID_float --> ID_C
+	 * transition where we could draw large amount of current
+	 * compared to the suspend current.
+	 * 2. ID_C: We can not allow LPM. If accessory charger is removed
+	 * we should not draw more than what host could supply which will
+	 * be less compared to accessory charger.
+	 *
+	 * For simplicity, LPM is not allowed in bus suspend.
+	 */
+#ifndef CONFIG_USB_MSM_STANDARD_ACA
+	/*
+	 * RID_A and IdGnd states are only possible with standard ACA.  We can
+	 * exit from low power mode with !BSV or IdGnd interrupt.  Hence LPM
+	 * is allowed.
+	 */
+	if ((test_bit(ID, &dev->inputs) && test_bit(B_SESS_VLD, &dev->inputs) &&
+			chg_type != USB_CHG_TYPE__WALLCHARGER) ||
+			test_bit(ID_A, &dev->inputs))
+		goto out;
+#endif
+	/* Disable ID_abc interrupts else it causes spurious interrupt */
+	disable_idabc(dev);
+#endif
+	ulpi_read(dev, 0x14);/* clear PHY interrupt latch register */
+
+	/*
+	 * Turn on PHY comparators if,
+	 * 1. USB wall charger is connected (bus suspend is not supported)
+	 * 2. Host bus suspend
+	 * 3. host is supported, but, id is not routed to pmic
+	 * 4. peripheral is supported, but, vbus is not routed to pmic
+	 */
+	host_bus_suspend = dev->otg.host && is_host();
+	if ((dev->otg.gadget && chg_type == USB_CHG_TYPE__WALLCHARGER) ||
+		host_bus_suspend ||
+		(dev->otg.host && !dev->pmic_id_notif_supp) ||
+		(dev->otg.gadget && !dev->pmic_vbus_notif_supp)) {
+		ulpi_write(dev, 0x01, 0x30);
+	}
+
+	ulpi_write(dev, 0x08, 0x09);/* turn off PLL on integrated phy */
+
+	timeout = jiffies + msecs_to_jiffies(500);
+	disable_phy_clk();
+	while (!is_phy_clk_disabled()) {
+		if (time_after(jiffies, timeout)) {
+			pr_err("%s: Unable to suspend phy\n", __func__);
+			/*
+			 * Start otg state machine in default state upon
+			 * phy suspend failure*/
+			spin_lock_irqsave(&dev->lock, flags);
+			dev->otg.state = OTG_STATE_UNDEFINED;
+			spin_unlock_irqrestore(&dev->lock, flags);
+			queue_work(dev->wq, &dev->sm_work);
+			goto out;
+		}
+		msleep(1);
+		/* check if there are any pending interrupts*/
+		if (((readl(USB_OTGSC) & OTGSC_INTR_MASK) >> 8) &
+				readl(USB_OTGSC)) {
+			enable_idabc(dev);
+			goto out;
+		}
+	}
+
+	writel(readl(USB_USBCMD) | ASYNC_INTR_CTRL | ULPI_STP_CTRL, USB_USBCMD);
+	/* Ensure that above operation is completed before turning off clocks */
+	mb();
+
+	if (dev->hs_pclk)
+		clk_disable(dev->hs_pclk);
+	if (dev->hs_cclk)
+		clk_disable(dev->hs_cclk);
+	/* usb phy no more require TCXO clock, hence vote for TCXO disable*/
+	ret = msm_xo_mode_vote(dev->xo_handle, MSM_XO_MODE_OFF);
+	if (ret)
+		pr_err("%s failed to devote for"
+			"TCXO D1 buffer%d\n", __func__, ret);
+
+	if (device_may_wakeup(dev->otg.dev)) {
+		enable_irq_wake(dev->irq);
+		if (dev->vbus_on_irq)
+			enable_irq_wake(dev->vbus_on_irq);
+	}
+
+	msm_otg_vote_for_pclk_source(dev, 0);
+
+	atomic_set(&dev->in_lpm, 1);
+
+	if (!host_bus_suspend && dev->pmic_vbus_notif_supp) {
+		pr_debug("phy can power collapse: (%d)\n",
+			can_phy_power_collapse(dev));
+		if (can_phy_power_collapse(dev) && dev->pdata->ldo_enable) {
+			pr_debug("disabling the regulators\n");
+			dev->pdata->ldo_enable(0);
+		}
+	}
+
+	/* phy can interrupts when vddcx is at 0.75, so irrespective
+	 * of pmic notification support, configure vddcx @0.75
+	 */
+	if (dev->pdata->config_vddcx)
+		dev->pdata->config_vddcx(0);
+	pr_info("%s: usb in low power mode\n", __func__);
+
+out:
+	enable_irq(dev->irq);
+
+	return 0;
+}
+
+static int msm_otg_resume(struct msm_otg *dev)
+{
+	unsigned temp;
+	unsigned ret;
+
+	if (!atomic_read(&dev->in_lpm))
+		return 0;
+	/* vote for vddcx, as PHY cannot tolerate vddcx below 1.0V */
+	if (dev->pdata->config_vddcx) {
+		ret = dev->pdata->config_vddcx(1);
+		if (ret) {
+			pr_err("%s: unable to enable vddcx digital core:%d\n",
+				__func__, ret);
+		}
+	}
+	if (dev->pdata->ldo_set_voltage)
+		dev->pdata->ldo_set_voltage(3400);
+
+	/* Vote for TCXO when waking up the phy */
+	ret = msm_xo_mode_vote(dev->xo_handle, MSM_XO_MODE_ON);
+	if (ret)
+		pr_err("%s failed to vote for"
+			"TCXO D1 buffer%d\n", __func__, ret);
+
+	msm_otg_vote_for_pclk_source(dev, 1);
+
+	if (dev->hs_pclk)
+		clk_enable(dev->hs_pclk);
+	if (dev->hs_cclk)
+		clk_enable(dev->hs_cclk);
+
+	temp = readl(USB_USBCMD);
+	temp &= ~ASYNC_INTR_CTRL;
+	temp &= ~ULPI_STP_CTRL;
+	writel(temp, USB_USBCMD);
+
+	if (device_may_wakeup(dev->otg.dev)) {
+		disable_irq_wake(dev->irq);
+		if (dev->vbus_on_irq)
+			disable_irq_wake(dev->vbus_on_irq);
+	}
+
+	atomic_set(&dev->in_lpm, 0);
+
+	pr_info("%s: usb exited from low power mode\n", __func__);
+
+	return 0;
+}
+
+static void msm_otg_get_resume(struct msm_otg *dev)
+{
+#ifdef CONFIG_PM_RUNTIME
+	pm_runtime_get_noresume(dev->otg.dev);
+	pm_runtime_resume(dev->otg.dev);
+#else
+	msm_otg_resume(dev);
+#endif
+}
+
+static void msm_otg_put_suspend(struct msm_otg *dev)
+{
+#ifdef CONFIG_PM_RUNTIME
+	pm_runtime_put_sync(dev->otg.dev);
+#else
+	msm_otg_suspend(dev);
+#endif
+}
+
+static void msm_otg_resume_w(struct work_struct *w)
+{
+	struct msm_otg	*dev = container_of(w, struct msm_otg, otg_resume_work);
+	unsigned long timeout;
+
+	msm_otg_get_resume(dev);
+
+	if (!is_phy_clk_disabled())
+		goto phy_resumed;
+
+	timeout = jiffies + usecs_to_jiffies(100);
+	enable_phy_clk();
+	while (is_phy_clk_disabled() || !is_phy_active()) {
+		if (time_after(jiffies, timeout)) {
+			pr_err("%s: Unable to wakeup phy. is_phy_active: %x\n",
+				 __func__, !!is_phy_active());
+			/* Reset both phy and link */
+			otg_reset(&dev->otg, 1);
+			break;
+		}
+		udelay(10);
+	}
+
+phy_resumed:
+	/* Enable Idabc interrupts as these were disabled before entering LPM */
+	enable_idabc(dev);
+
+	/* If resume signalling finishes before lpm exit, PCD is not set in
+	 * USBSTS register. Drive resume signal to the downstream device now
+	 * so that host driver can process the upcoming port change interrupt.*/
+	if (is_host() || test_bit(ID_A, &dev->inputs)) {
+		writel(readl(USB_PORTSC) | PORTSC_FPR, USB_PORTSC);
+		msm_otg_start_host(&dev->otg, REQUEST_RESUME);
+	}
+
+	/* Enable irq which was disabled before scheduling this work.
+	 * But don't release wake_lock, as we got async interrupt and
+	 * there will be some work pending for OTG state machine.
+	 */
+	enable_irq(dev->irq);
+}
+
+static int msm_otg_set_suspend(struct otg_transceiver *xceiv, int suspend)
+{
+	struct msm_otg *dev = container_of(xceiv, struct msm_otg, otg);
+	enum usb_otg_state state;
+	unsigned long flags;
+
+	if (!dev || (dev != the_msm_otg))
+		return -ENODEV;
+
+	spin_lock_irqsave(&dev->lock, flags);
+	state = dev->otg.state;
+	spin_unlock_irqrestore(&dev->lock, flags);
+
+	pr_debug("suspend request in state: %s\n",
+			state_string(state));
+
+	if (suspend) {
+		switch (state) {
+#ifndef CONFIG_MSM_OTG_ENABLE_A_WAIT_BCON_TIMEOUT
+		case OTG_STATE_A_WAIT_BCON:
+			if (test_bit(ID_A, &dev->inputs))
+				msm_otg_set_power(xceiv, USB_IDCHG_MIN - 100);
+			msm_otg_put_suspend(dev);
+			break;
+#endif
+		case OTG_STATE_A_HOST:
+			clear_bit(A_BUS_REQ, &dev->inputs);
+			wake_lock(&dev->wlock);
+			queue_work(dev->wq, &dev->sm_work);
+			break;
+		case OTG_STATE_B_PERIPHERAL:
+			if (xceiv->gadget->b_hnp_enable) {
+				set_bit(A_BUS_SUSPEND, &dev->inputs);
+				set_bit(B_BUS_REQ, &dev->inputs);
+				wake_lock(&dev->wlock);
+				queue_work(dev->wq, &dev->sm_work);
+			}
+			break;
+		case OTG_STATE_A_PERIPHERAL:
+			msm_otg_start_timer(dev, TA_BIDL_ADIS,
+					A_BIDL_ADIS);
+			break;
+		default:
+			break;
+		}
+	} else {
+		unsigned long timeout;
+
+		switch (state) {
+		case OTG_STATE_A_PERIPHERAL:
+			/* A-peripheral observed activity on bus.
+			 * clear A_BIDL_ADIS timer.
+			 */
+			msm_otg_del_timer(dev);
+			break;
+		case OTG_STATE_A_SUSPEND:
+			/* Remote wakeup or resume */
+			set_bit(A_BUS_REQ, &dev->inputs);
+			spin_lock_irqsave(&dev->lock, flags);
+			dev->otg.state = OTG_STATE_A_HOST;
+			spin_unlock_irqrestore(&dev->lock, flags);
+			if (test_bit(ID_A, &dev->inputs) &&
+				(get_aca_bmaxpower(dev) < USB_IDCHG_MIN))
+				msm_otg_set_power(xceiv,
+					USB_IDCHG_MIN - get_aca_bmaxpower(dev));
+			break;
+		default:
+			break;
+		}
+
+		if (suspend == atomic_read(&dev->in_lpm))
+			return 0;
+
+		disable_irq(dev->irq);
+		if (dev->pmic_vbus_notif_supp)
+			if (can_phy_power_collapse(dev) &&
+					dev->pdata->ldo_enable)
+				dev->pdata->ldo_enable(1);
+
+		msm_otg_get_resume(dev);
+
+		if (!is_phy_clk_disabled())
+			goto out;
+
+		timeout = jiffies + usecs_to_jiffies(100);
+		enable_phy_clk();
+		while (is_phy_clk_disabled() || !is_phy_active()) {
+			if (time_after(jiffies, timeout)) {
+				pr_err("%s: Unable to wakeup phy. "
+					"is_phy_active: %x\n",
+					__func__, !!is_phy_active());
+				/* Reset both phy and link */
+				otg_reset(&dev->otg, 1);
+				break;
+			}
+			udelay(10);
+		}
+out:
+		enable_idabc(dev);
+		enable_irq(dev->irq);
+
+	}
+
+	return 0;
+}
+
+static int msm_otg_set_peripheral(struct otg_transceiver *xceiv,
+			struct usb_gadget *gadget)
+{
+	struct msm_otg *dev = container_of(xceiv, struct msm_otg, otg);
+
+	if (!dev || (dev != the_msm_otg))
+		return -ENODEV;
+
+	if (!gadget) {
+		msm_otg_start_peripheral(xceiv, 0);
+		dev->otg.gadget = 0;
+		disable_sess_valid(dev);
+		if (!dev->otg.host)
+			disable_idabc(dev);
+		return 0;
+	}
+	dev->otg.gadget = gadget;
+	pr_info("peripheral driver registered w/ tranceiver\n");
+
+	wake_lock(&dev->wlock);
+	queue_work(dev->wq, &dev->sm_work);
+	return 0;
+}
+
+#ifdef CONFIG_USB_EHCI_MSM_72K
+static int usbdev_notify(struct notifier_block *self,
+			unsigned long action, void *device)
+{
+	enum usb_otg_state state;
+	struct msm_otg *dev = container_of(self, struct msm_otg, usbdev_nb);
+	struct usb_device *udev = device;
+	int work = 1;
+	unsigned long flags;
+
+	/* Interested in only devices directly connected
+	 * to root hub directly.
+	 */
+	if (!udev->parent || udev->parent->parent)
+		goto out;
+
+	spin_lock_irqsave(&dev->lock, flags);
+	state = dev->otg.state;
+	spin_unlock_irqrestore(&dev->lock, flags);
+
+	switch (state) {
+	case OTG_STATE_A_WAIT_BCON:
+		if (action == USB_DEVICE_ADD) {
+			pr_debug("B_CONN set\n");
+			set_bit(B_CONN, &dev->inputs);
+			if (udev->actconfig) {
+				set_aca_bmaxpower(dev,
+					udev->actconfig->desc.bMaxPower * 2);
+				goto do_work;
+			}
+			if (udev->portnum == udev->bus->otg_port)
+				set_aca_bmaxpower(dev, USB_IB_UNCFG);
+			else
+				set_aca_bmaxpower(dev, 100);
+		}
+		break;
+	case OTG_STATE_A_HOST:
+		if (action == USB_DEVICE_REMOVE) {
+			pr_debug("B_CONN clear\n");
+			clear_bit(B_CONN, &dev->inputs);
+			set_aca_bmaxpower(dev, 0);
+		}
+		break;
+	default:
+		work = 0;
+		break;
+	}
+do_work:
+	if (work) {
+		wake_lock(&dev->wlock);
+		queue_work(dev->wq, &dev->sm_work);
+	}
+out:
+	return NOTIFY_OK;
+}
+
+static int msm_otg_set_host(struct otg_transceiver *xceiv, struct usb_bus *host)
+{
+	struct msm_otg *dev = container_of(xceiv, struct msm_otg, otg);
+
+	if (!dev || (dev != the_msm_otg))
+		return -ENODEV;
+
+	if (!dev->start_host)
+		return -ENODEV;
+
+	if (!host) {
+		msm_otg_start_host(xceiv, REQUEST_STOP);
+		usb_unregister_notify(&dev->usbdev_nb);
+		dev->otg.host = 0;
+		dev->start_host = 0;
+		disable_idgnd(dev);
+		if (!dev->otg.gadget)
+			disable_idabc(dev);
+		return 0;
+	}
+#ifdef CONFIG_USB_OTG
+	host->otg_port = 1;
+#endif
+	dev->usbdev_nb.notifier_call = usbdev_notify;
+	usb_register_notify(&dev->usbdev_nb);
+	dev->otg.host = host;
+	pr_info("host driver registered w/ tranceiver\n");
+
+#ifndef CONFIG_USB_MSM_72K
+	wake_lock(&dev->wlock);
+	queue_work(dev->wq, &dev->sm_work);
+#endif
+	return 0;
+}
+#endif
+
+void msm_otg_set_id_state(int id)
+{
+	struct msm_otg *dev = the_msm_otg;
+	unsigned long flags;
+
+	if (id == dev->pmic_id_status)
+		return;
+
+	if (id) {
+		set_bit(ID, &dev->inputs);
+		dev->pmic_id_status = 1;
+	} else {
+		clear_bit(ID, &dev->inputs);
+		set_bit(A_BUS_REQ, &dev->inputs);
+		dev->pmic_id_status = 0;
+	}
+	spin_lock_irqsave(&dev->lock, flags);
+	if (dev->otg.state != OTG_STATE_UNDEFINED) {
+		wake_lock(&dev->wlock);
+		queue_work(dev->wq, &dev->sm_work);
+	}
+	spin_unlock_irqrestore(&dev->lock, flags);
+}
+
+void msm_otg_set_vbus_state(int online)
+{
+	struct msm_otg *dev = the_msm_otg;
+
+	if (!atomic_read(&dev->in_lpm) || !online)
+		return;
+
+	wake_lock(&dev->wlock);
+	set_bit(B_SESS_VLD, &dev->inputs);
+	queue_work(dev->wq, &dev->sm_work);
+}
+
+static irqreturn_t msm_otg_irq(int irq, void *data)
+{
+	struct msm_otg *dev = data;
+	u32 otgsc, sts, pc, sts_mask;
+	irqreturn_t ret = IRQ_HANDLED;
+	int work = 0;
+	enum usb_otg_state state;
+	unsigned long flags;
+
+	if (atomic_read(&dev->in_lpm)) {
+		disable_irq_nosync(dev->irq);
+		wake_lock(&dev->wlock);
+		queue_work(dev->wq, &dev->otg_resume_work);
+		goto out;
+	}
+
+	/* Return immediately if instead of ID pin, USER controls mode switch */
+	if (dev->pdata->otg_mode == OTG_USER_CONTROL)
+		return IRQ_NONE;
+
+
+	otgsc = readl(USB_OTGSC);
+	sts = readl(USB_USBSTS);
+
+	sts_mask = (otgsc & OTGSC_INTR_MASK) >> 8;
+
+	if (!((otgsc & sts_mask) || (sts & STS_PCI))) {
+		ret = IRQ_NONE;
+		goto out;
+	}
+
+	spin_lock_irqsave(&dev->lock, flags);
+	state = dev->otg.state;
+	spin_unlock_irqrestore(&dev->lock, flags);
+
+	pr_debug("IRQ state: %s\n", state_string(state));
+	pr_debug("otgsc = %x\n", otgsc);
+
+	if ((otgsc & OTGSC_IDIE) && (otgsc & OTGSC_IDIS)) {
+		if (otgsc & OTGSC_ID) {
+			pr_debug("Id set\n");
+			set_bit(ID, &dev->inputs);
+		} else {
+			pr_debug("Id clear\n");
+			/* Assert a_bus_req to supply power on
+			 * VBUS when Micro/Mini-A cable is connected
+			 * with out user intervention.
+			 */
+			set_bit(A_BUS_REQ, &dev->inputs);
+			clear_bit(ID, &dev->inputs);
+		}
+		writel(otgsc, USB_OTGSC);
+		work = 1;
+	} else if (otgsc & OTGSC_BSVIS) {
+		writel(otgsc, USB_OTGSC);
+		/* BSV interrupt comes when operating as an A-device
+		 * (VBUS on/off).
+		 * But, handle BSV when charger is removed from ACA in ID_A
+		 */
+		if ((state >= OTG_STATE_A_IDLE) &&
+			!test_bit(ID_A, &dev->inputs))
+			goto out;
+		if (otgsc & OTGSC_BSV) {
+			pr_debug("BSV set\n");
+			set_bit(B_SESS_VLD, &dev->inputs);
+		} else {
+			pr_debug("BSV clear\n");
+			clear_bit(B_SESS_VLD, &dev->inputs);
+		}
+		work = 1;
+	} else if (otgsc & OTGSC_DPIS) {
+		pr_debug("DPIS detected\n");
+		writel(otgsc, USB_OTGSC);
+		set_bit(A_SRP_DET, &dev->inputs);
+		set_bit(A_BUS_REQ, &dev->inputs);
+		work = 1;
+	} else if (sts & STS_PCI) {
+		pc = readl(USB_PORTSC);
+		pr_debug("portsc = %x\n", pc);
+		ret = IRQ_NONE;
+		/* HCD Acks PCI interrupt. We use this to switch
+		 * between different OTG states.
+		 */
+		work = 1;
+		switch (state) {
+		case OTG_STATE_A_SUSPEND:
+			if (dev->otg.host->b_hnp_enable && (pc & PORTSC_CSC) &&
+					!(pc & PORTSC_CCS)) {
+				pr_debug("B_CONN clear\n");
+				clear_bit(B_CONN, &dev->inputs);
+			}
+			break;
+		case OTG_STATE_B_WAIT_ACON:
+			if ((pc & PORTSC_CSC) && (pc & PORTSC_CCS)) {
+				pr_debug("A_CONN set\n");
+				set_bit(A_CONN, &dev->inputs);
+				/* Clear ASE0_BRST timer */
+				msm_otg_del_timer(dev);
+			}
+			break;
+		case OTG_STATE_B_HOST:
+			if ((pc & PORTSC_CSC) && !(pc & PORTSC_CCS)) {
+				pr_debug("A_CONN clear\n");
+				clear_bit(A_CONN, &dev->inputs);
+			}
+			break;
+		default:
+			work = 0;
+			break;
+		}
+	}
+	if (work) {
+#ifdef CONFIG_USB_MSM_ACA
+		/* With ACA, ID can change bcoz of BSVIS as well, so update */
+		if ((otgsc & OTGSC_IDIS) || (otgsc & OTGSC_BSVIS))
+			set_aca_id_inputs(dev);
+#endif
+		wake_lock(&dev->wlock);
+		queue_work(dev->wq, &dev->sm_work);
+	}
+out:
+	return ret;
+}
+
+#define ULPI_VERIFY_MAX_LOOP_COUNT  5
+#define PHY_CALIB_RETRY_COUNT 10
+static void phy_clk_reset(struct msm_otg *dev)
+{
+	unsigned rc;
+	enum clk_reset_action assert = CLK_RESET_ASSERT;
+
+	if (dev->pdata->phy_reset_sig_inverted)
+		assert = CLK_RESET_DEASSERT;
+
+	rc = clk_reset(dev->phy_reset_clk, assert);
+	if (rc) {
+		pr_err("%s: phy clk assert failed\n", __func__);
+		return;
+	}
+
+	msleep(1);
+
+	rc = clk_reset(dev->phy_reset_clk, !assert);
+	if (rc) {
+		pr_err("%s: phy clk deassert failed\n", __func__);
+		return;
+	}
+
+	msleep(1);
+}
+
+static unsigned ulpi_read_with_reset(struct msm_otg *dev, unsigned reg)
+{
+	int temp;
+	unsigned res;
+
+	for (temp = 0; temp < ULPI_VERIFY_MAX_LOOP_COUNT; temp++) {
+		res = ulpi_read(dev, reg);
+		if (res != 0xffffffff)
+			return res;
+
+		phy_clk_reset(dev);
+	}
+
+	pr_err("%s: ulpi read failed for %d times\n",
+			__func__, ULPI_VERIFY_MAX_LOOP_COUNT);
+
+	return -1;
+}
+
+static int ulpi_write_with_reset(struct msm_otg *dev,
+unsigned val, unsigned reg)
+{
+	int temp, res;
+
+	for (temp = 0; temp < ULPI_VERIFY_MAX_LOOP_COUNT; temp++) {
+		res = ulpi_write(dev, val, reg);
+		if (!res)
+			return 0;
+		phy_clk_reset(dev);
+	}
+	pr_err("%s: ulpi write failed for %d times\n",
+		__func__, ULPI_VERIFY_MAX_LOOP_COUNT);
+
+	return -1;
+}
+
+/* some of the older targets does not turn off the PLL
+ * if onclock bit is set and clocksuspendM bit is on,
+ * hence clear them too and initiate the suspend mode
+ * by clearing SupendM bit.
+ */
+static inline int turn_off_phy_pll(struct msm_otg *dev)
+{
+	unsigned res;
+
+	res = ulpi_read_with_reset(dev, ULPI_CONFIG_REG1);
+	if (res == 0xffffffff)
+		return -ETIMEDOUT;
+
+	res = ulpi_write_with_reset(dev,
+		res & ~(ULPI_ONCLOCK), ULPI_CONFIG_REG1);
+	if (res)
+		return -ETIMEDOUT;
+
+	res = ulpi_write_with_reset(dev,
+		ULPI_CLOCK_SUSPENDM, ULPI_IFC_CTRL_CLR);
+	if (res)
+		return -ETIMEDOUT;
+
+	/*Clear SuspendM bit to initiate suspend mode */
+	res = ulpi_write_with_reset(dev,
+		ULPI_SUSPENDM, ULPI_FUNC_CTRL_CLR);
+	if (res)
+		return -ETIMEDOUT;
+
+	return res;
+}
+
+static inline int check_phy_caliberation(struct msm_otg *dev)
+{
+	unsigned res;
+
+	res = ulpi_read_with_reset(dev, ULPI_DEBUG);
+
+	if (res == 0xffffffff)
+		return -ETIMEDOUT;
+
+	if (!(res & ULPI_CALIB_STS) && ULPI_CALIB_VAL(res))
+		return 0;
+
+	return -1;
+}
+
+static int msm_otg_phy_caliberate(struct msm_otg *dev)
+{
+	int i = 0;
+	unsigned long res;
+
+	do {
+		res = turn_off_phy_pll(dev);
+		if (res)
+			return -ETIMEDOUT;
+
+		/* bring phy out of suspend */
+		phy_clk_reset(dev);
+
+		res = check_phy_caliberation(dev);
+		if (!res)
+			return res;
+		i++;
+
+	} while (i < PHY_CALIB_RETRY_COUNT);
+
+	return res;
+}
+
+static int msm_otg_phy_reset(struct msm_otg *dev)
+{
+	unsigned rc;
+	unsigned temp;
+	unsigned long timeout;
+
+	rc = clk_reset(dev->hs_clk, CLK_RESET_ASSERT);
+	if (rc) {
+		pr_err("%s: usb hs clk assert failed\n", __func__);
+		return -1;
+	}
+
+	phy_clk_reset(dev);
+
+	rc = clk_reset(dev->hs_clk, CLK_RESET_DEASSERT);
+	if (rc) {
+		pr_err("%s: usb hs clk deassert failed\n", __func__);
+		return -1;
+	}
+	/* Observing ulpi timeouts as part of PHY calibration. On resetting
+	 * the HW link explicity by setting the RESET bit in the USBCMD
+	 * register before PHY calibration fixes the ulpi timeout issue.
+	 * This workaround is required for unicorn target
+	 */
+	writel_relaxed(USBCMD_RESET, USB_USBCMD);
+	timeout = jiffies + USB_LINK_RESET_TIMEOUT;
+	do {
+		if (time_after(jiffies, timeout)) {
+			pr_err("msm_otg: usb link reset timeout\n");
+			break;
+		}
+		usleep_range(1000, 1200);
+	} while (readl_relaxed(USB_USBCMD) & USBCMD_RESET);
+
+	/* select ULPI phy */
+	temp = (readl(USB_PORTSC) & ~PORTSC_PTS);
+	writel(temp | PORTSC_PTS_ULPI, USB_PORTSC);
+
+	if (atomic_read(&dev->chg_type) !=
+				USB_CHG_TYPE__WALLCHARGER) {
+		rc = msm_otg_phy_caliberate(dev);
+		if (rc)
+			return rc;
+	}
+
+	/* TBD: There are two link resets. One is below and other one
+	 * is done immediately after this function. See if we can
+	 * eliminate one of these.
+	 */
+	writel(USBCMD_RESET, USB_USBCMD);
+	timeout = jiffies + USB_LINK_RESET_TIMEOUT;
+	do {
+		if (time_after(jiffies, timeout)) {
+			pr_err("msm_otg: usb link reset timeout\n");
+			break;
+		}
+		msleep(1);
+	} while (readl(USB_USBCMD) & USBCMD_RESET);
+
+	if (readl(USB_USBCMD) & USBCMD_RESET) {
+		pr_err("%s: usb core reset failed\n", __func__);
+		return -1;
+	}
+
+	return 0;
+}
+
+static void otg_reset(struct otg_transceiver *xceiv, int phy_reset)
+{
+	struct msm_otg *dev = container_of(xceiv, struct msm_otg, otg);
+	unsigned long timeout;
+	u32 mode, work = 0;
+
+	clk_enable(dev->hs_clk);
+
+	if (!phy_reset)
+		goto reset_link;
+
+	if (dev->pdata->phy_reset)
+		dev->pdata->phy_reset(dev->regs);
+	else
+		msm_otg_phy_reset(dev);
+
+	/*disable all phy interrupts*/
+	ulpi_write(dev, 0xFF, 0x0F);
+	ulpi_write(dev, 0xFF, 0x12);
+	msleep(100);
+
+reset_link:
+	writel(USBCMD_RESET, USB_USBCMD);
+	timeout = jiffies + USB_LINK_RESET_TIMEOUT;
+	do {
+		if (time_after(jiffies, timeout)) {
+			pr_err("msm_otg: usb link reset timeout\n");
+			break;
+		}
+		msleep(1);
+	} while (readl(USB_USBCMD) & USBCMD_RESET);
+
+	/* select ULPI phy */
+	writel(0x80000000, USB_PORTSC);
+
+	set_pre_emphasis_level(dev);
+	set_hsdrv_slope(dev);
+	set_cdr_auto_reset(dev);
+	set_driver_amplitude(dev);
+	set_se1_gating(dev);
+
+	writel(0x0, USB_AHB_BURST);
+	writel(0x00, USB_AHB_MODE);
+	/* Ensure that RESET operation is completed before turning off clock */
+	mb();
+
+	clk_disable(dev->hs_clk);
+
+	if ((xceiv->gadget && xceiv->gadget->is_a_peripheral) ||
+			test_bit(ID, &dev->inputs))
+		mode = USBMODE_SDIS | USBMODE_DEVICE;
+	else
+		mode = USBMODE_SDIS | USBMODE_HOST;
+	writel(mode, USB_USBMODE);
+
+	writel_relaxed((readl_relaxed(USB_OTGSC) | OTGSC_IDPU), USB_OTGSC);
+	if (dev->otg.gadget) {
+		enable_sess_valid(dev);
+		/* Due to the above 100ms delay, interrupts from PHY are
+		 * sometimes missed during fast plug-in/plug-out of cable.
+		 * Check for such cases here.
+		 */
+		if (is_b_sess_vld() && !test_bit(B_SESS_VLD, &dev->inputs)) {
+			pr_debug("%s: handle missing BSV event\n", __func__);
+			set_bit(B_SESS_VLD, &dev->inputs);
+			work = 1;
+		} else if (!is_b_sess_vld() && test_bit(B_SESS_VLD,
+				&dev->inputs)) {
+			pr_debug("%s: handle missing !BSV event\n", __func__);
+			clear_bit(B_SESS_VLD, &dev->inputs);
+			work = 1;
+		}
+	}
+
+#ifdef CONFIG_USB_EHCI_MSM_72K
+	if (dev->otg.host && !dev->pmic_id_notif_supp) {
+		enable_idgnd(dev);
+		/* Handle missing ID_GND interrupts during fast PIPO */
+		if (is_host() && test_bit(ID, &dev->inputs)) {
+			pr_debug("%s: handle missing ID_GND event\n", __func__);
+			clear_bit(ID, &dev->inputs);
+			work = 1;
+		} else if (!is_host() && !test_bit(ID, &dev->inputs)) {
+			pr_debug("%s: handle missing !ID_GND event\n",
+						__func__);
+			set_bit(ID, &dev->inputs);
+			work = 1;
+		}
+	} else {
+		disable_idgnd(dev);
+	}
+#endif
+
+	enable_idabc(dev);
+
+	if (work) {
+		wake_lock(&dev->wlock);
+		queue_work(dev->wq, &dev->sm_work);
+	}
+}
+
+static void msm_otg_sm_work(struct work_struct *w)
+{
+	struct msm_otg	*dev = container_of(w, struct msm_otg, sm_work);
+	enum chg_type	chg_type = atomic_read(&dev->chg_type);
+	int ret;
+	int work = 0;
+	enum usb_otg_state state;
+	unsigned long flags;
+
+	if (atomic_read(&dev->in_lpm))
+		msm_otg_set_suspend(&dev->otg, 0);
+
+	spin_lock_irqsave(&dev->lock, flags);
+	state = dev->otg.state;
+	spin_unlock_irqrestore(&dev->lock, flags);
+
+	switch (state) {
+	case OTG_STATE_UNDEFINED:
+
+		/*
+		 * We can come here when LPM fails with wall charger
+		 * connected. Increment the PM usage counter to reflect
+		 * the actual device state. Change the state to
+		 * B_PERIPHERAL and schedule the work which takes care
+		 * of resetting the PHY and putting the hardware in
+		 * low power mode.
+		 */
+		if (atomic_read(&dev->chg_type) ==
+				USB_CHG_TYPE__WALLCHARGER) {
+			msm_otg_get_resume(dev);
+			spin_lock_irqsave(&dev->lock, flags);
+			dev->otg.state = OTG_STATE_B_PERIPHERAL;
+			spin_unlock_irqrestore(&dev->lock, flags);
+			work = 1;
+			break;
+		}
+
+		/* Reset both phy and link */
+		otg_reset(&dev->otg, 1);
+
+#ifdef CONFIG_USB_MSM_ACA
+		set_aca_id_inputs(dev);
+#endif
+		if (dev->pdata->otg_mode == OTG_USER_CONTROL) {
+			if ((dev->pdata->usb_mode == USB_PERIPHERAL_MODE) ||
+					!dev->otg.host) {
+				set_bit(ID, &dev->inputs);
+				set_bit(B_SESS_VLD, &dev->inputs);
+			}
+		} else {
+			if (!dev->otg.host || !is_host())
+				set_bit(ID, &dev->inputs);
+
+			if (dev->otg.gadget && is_b_sess_vld())
+				set_bit(B_SESS_VLD, &dev->inputs);
+		}
+		spin_lock_irqsave(&dev->lock, flags);
+		if ((test_bit(ID, &dev->inputs)) &&
+				!test_bit(ID_A, &dev->inputs)) {
+			dev->otg.state = OTG_STATE_B_IDLE;
+		} else {
+			set_bit(A_BUS_REQ, &dev->inputs);
+			dev->otg.state = OTG_STATE_A_IDLE;
+		}
+		spin_unlock_irqrestore(&dev->lock, flags);
+
+		work = 1;
+		break;
+	case OTG_STATE_B_IDLE:
+		dev->otg.default_a = 0;
+		if (!test_bit(ID, &dev->inputs) ||
+				test_bit(ID_A, &dev->inputs)) {
+			pr_debug("!id || id_A\n");
+			clear_bit(B_BUS_REQ, &dev->inputs);
+			otg_reset(&dev->otg, 0);
+
+			spin_lock_irqsave(&dev->lock, flags);
+			dev->otg.state = OTG_STATE_A_IDLE;
+			spin_unlock_irqrestore(&dev->lock, flags);
+			msm_otg_set_power(&dev->otg, 0);
+			work = 1;
+		} else if (test_bit(B_SESS_VLD, &dev->inputs) &&
+				!test_bit(ID_B, &dev->inputs)) {
+			pr_debug("b_sess_vld\n");
+			spin_lock_irqsave(&dev->lock, flags);
+			dev->otg.state = OTG_STATE_B_PERIPHERAL;
+			spin_unlock_irqrestore(&dev->lock, flags);
+			msm_otg_set_power(&dev->otg, 0);
+			msm_otg_start_peripheral(&dev->otg, 1);
+		} else if (test_bit(B_BUS_REQ, &dev->inputs)) {
+			pr_debug("b_sess_end && b_bus_req\n");
+			ret = msm_otg_start_srp(&dev->otg);
+			if (ret < 0) {
+				/* notify user space */
+				clear_bit(B_BUS_REQ, &dev->inputs);
+				work = 1;
+				break;
+			}
+			spin_lock_irqsave(&dev->lock, flags);
+			dev->otg.state = OTG_STATE_B_SRP_INIT;
+			spin_unlock_irqrestore(&dev->lock, flags);
+			msm_otg_start_timer(dev, TB_SRP_FAIL, B_SRP_FAIL);
+			break;
+		} else if (test_bit(ID_B, &dev->inputs)) {
+			atomic_set(&dev->chg_type, USB_CHG_TYPE__SDP);
+			msm_otg_set_power(&dev->otg, USB_IDCHG_MAX);
+		} else {
+			msm_otg_set_power(&dev->otg, 0);
+			pr_debug("entering into lpm\n");
+			msm_otg_put_suspend(dev);
+
+			if (dev->pdata->ldo_set_voltage)
+				dev->pdata->ldo_set_voltage(3075);
+		}
+		break;
+	case OTG_STATE_B_SRP_INIT:
+		if (!test_bit(ID, &dev->inputs) ||
+				test_bit(ID_A, &dev->inputs) ||
+				test_bit(ID_C, &dev->inputs) ||
+				(test_bit(B_SESS_VLD, &dev->inputs) &&
+				!test_bit(ID_B, &dev->inputs))) {
+			pr_debug("!id || id_a/c || b_sess_vld+!id_b\n");
+			msm_otg_del_timer(dev);
+			spin_lock_irqsave(&dev->lock, flags);
+			dev->otg.state = OTG_STATE_B_IDLE;
+			spin_unlock_irqrestore(&dev->lock, flags);
+			work = 1;
+		} else if (test_bit(B_SRP_FAIL, &dev->tmouts)) {
+			pr_debug("b_srp_fail\n");
+			/* notify user space */
+			msm_otg_send_event(&dev->otg,
+				OTG_EVENT_NO_RESP_FOR_SRP);
+			clear_bit(B_BUS_REQ, &dev->inputs);
+			clear_bit(B_SRP_FAIL, &dev->tmouts);
+			spin_lock_irqsave(&dev->lock, flags);
+			dev->otg.state = OTG_STATE_B_IDLE;
+			spin_unlock_irqrestore(&dev->lock, flags);
+			dev->b_last_se0_sess = jiffies;
+			work = 1;
+		}
+		break;
+	case OTG_STATE_B_PERIPHERAL:
+		if (!test_bit(ID, &dev->inputs) ||
+				test_bit(ID_A, &dev->inputs) ||
+				test_bit(ID_B, &dev->inputs) ||
+				!test_bit(B_SESS_VLD, &dev->inputs)) {
+			pr_debug("!id  || id_a/b || !b_sess_vld\n");
+			clear_bit(B_BUS_REQ, &dev->inputs);
+			spin_lock_irqsave(&dev->lock, flags);
+			dev->otg.state = OTG_STATE_B_IDLE;
+			spin_unlock_irqrestore(&dev->lock, flags);
+			msm_otg_start_peripheral(&dev->otg, 0);
+			dev->b_last_se0_sess = jiffies;
+
+			/* Workaround: Reset phy after session */
+			otg_reset(&dev->otg, 1);
+			work = 1;
+		} else if (test_bit(B_BUS_REQ, &dev->inputs) &&
+				dev->otg.gadget->b_hnp_enable &&
+				test_bit(A_BUS_SUSPEND, &dev->inputs)) {
+			pr_debug("b_bus_req && b_hnp_en && a_bus_suspend\n");
+			msm_otg_start_timer(dev, TB_ASE0_BRST, B_ASE0_BRST);
+			msm_otg_start_peripheral(&dev->otg, 0);
+			spin_lock_irqsave(&dev->lock, flags);
+			dev->otg.state = OTG_STATE_B_WAIT_ACON;
+			spin_unlock_irqrestore(&dev->lock, flags);
+			/* start HCD even before A-device enable
+			 * pull-up to meet HNP timings.
+			 */
+			dev->otg.host->is_b_host = 1;
+			msm_otg_start_host(&dev->otg, REQUEST_START);
+
+		} else if (test_bit(ID_C, &dev->inputs)) {
+			atomic_set(&dev->chg_type, USB_CHG_TYPE__SDP);
+			msm_otg_set_power(&dev->otg, USB_IDCHG_MAX);
+		} else if (chg_type == USB_CHG_TYPE__WALLCHARGER) {
+#ifdef CONFIG_USB_MSM_ACA
+			del_timer_sync(&dev->id_timer);
+#endif
+			/* Workaround: Reset PHY in SE1 state */
+			otg_reset(&dev->otg, 1);
+			pr_debug("entering into lpm with wall-charger\n");
+			msm_otg_put_suspend(dev);
+			/* Allow idle power collapse */
+			otg_pm_qos_update_latency(dev, 0);
+		}
+		break;
+	case OTG_STATE_B_WAIT_ACON:
+		if (!test_bit(ID, &dev->inputs) ||
+				test_bit(ID_A, &dev->inputs) ||
+				test_bit(ID_B, &dev->inputs) ||
+				!test_bit(B_SESS_VLD, &dev->inputs)) {
+			pr_debug("!id || id_a/b || !b_sess_vld\n");
+			msm_otg_del_timer(dev);
+			/* A-device is physically disconnected during
+			 * HNP. Remove HCD.
+			 */
+			msm_otg_start_host(&dev->otg, REQUEST_STOP);
+			dev->otg.host->is_b_host = 0;
+
+			clear_bit(B_BUS_REQ, &dev->inputs);
+			clear_bit(A_BUS_SUSPEND, &dev->inputs);
+			dev->b_last_se0_sess = jiffies;
+			spin_lock_irqsave(&dev->lock, flags);
+			dev->otg.state = OTG_STATE_B_IDLE;
+			spin_unlock_irqrestore(&dev->lock, flags);
+
+			/* Workaround: Reset phy after session */
+			otg_reset(&dev->otg, 1);
+			work = 1;
+		} else if (test_bit(A_CONN, &dev->inputs)) {
+			pr_debug("a_conn\n");
+			clear_bit(A_BUS_SUSPEND, &dev->inputs);
+			spin_lock_irqsave(&dev->lock, flags);
+			dev->otg.state = OTG_STATE_B_HOST;
+			spin_unlock_irqrestore(&dev->lock, flags);
+			if (test_bit(ID_C, &dev->inputs)) {
+				atomic_set(&dev->chg_type, USB_CHG_TYPE__SDP);
+				msm_otg_set_power(&dev->otg, USB_IDCHG_MAX);
+			}
+		} else if (test_bit(B_ASE0_BRST, &dev->tmouts)) {
+			/* TODO: A-device may send reset after
+			 * enabling HNP; a_bus_resume case is
+			 * not handled for now.
+			 */
+			pr_debug("b_ase0_brst_tmout\n");
+			msm_otg_send_event(&dev->otg,
+				OTG_EVENT_HNP_FAILED);
+			msm_otg_start_host(&dev->otg, REQUEST_STOP);
+			dev->otg.host->is_b_host = 0;
+			clear_bit(B_ASE0_BRST, &dev->tmouts);
+			clear_bit(A_BUS_SUSPEND, &dev->inputs);
+			clear_bit(B_BUS_REQ, &dev->inputs);
+
+			spin_lock_irqsave(&dev->lock, flags);
+			dev->otg.state = OTG_STATE_B_PERIPHERAL;
+			spin_unlock_irqrestore(&dev->lock, flags);
+			msm_otg_start_peripheral(&dev->otg, 1);
+		} else if (test_bit(ID_C, &dev->inputs)) {
+			atomic_set(&dev->chg_type, USB_CHG_TYPE__SDP);
+			msm_otg_set_power(&dev->otg, USB_IDCHG_MAX);
+		}
+		break;
+	case OTG_STATE_B_HOST:
+		/* B_BUS_REQ is not exposed to user space. So
+		 * it must be A_CONN for now.
+		 */
+		if (!test_bit(B_BUS_REQ, &dev->inputs) ||
+				!test_bit(A_CONN, &dev->inputs)) {
+			pr_debug("!b_bus_req || !a_conn\n");
+			clear_bit(A_CONN, &dev->inputs);
+			clear_bit(B_BUS_REQ, &dev->inputs);
+
+			msm_otg_start_host(&dev->otg, REQUEST_STOP);
+			dev->otg.host->is_b_host = 0;
+
+			spin_lock_irqsave(&dev->lock, flags);
+			dev->otg.state = OTG_STATE_B_IDLE;
+			spin_unlock_irqrestore(&dev->lock, flags);
+			/* Workaround: Reset phy after session */
+			otg_reset(&dev->otg, 1);
+			work = 1;
+		} else if (test_bit(ID_C, &dev->inputs)) {
+			atomic_set(&dev->chg_type, USB_CHG_TYPE__SDP);
+			msm_otg_set_power(&dev->otg, USB_IDCHG_MAX);
+		}
+		break;
+	case OTG_STATE_A_IDLE:
+		dev->otg.default_a = 1;
+		if (test_bit(ID, &dev->inputs) &&
+				!test_bit(ID_A, &dev->inputs)) {
+			pr_debug("id && !id_a\n");
+			dev->otg.default_a = 0;
+			otg_reset(&dev->otg, 0);
+			spin_lock_irqsave(&dev->lock, flags);
+			dev->otg.state = OTG_STATE_B_IDLE;
+			spin_unlock_irqrestore(&dev->lock, flags);
+			msm_otg_set_power(&dev->otg, 0);
+			work = 1;
+		} else if (!test_bit(A_BUS_DROP, &dev->inputs) &&
+				(test_bit(A_SRP_DET, &dev->inputs) ||
+				 test_bit(A_BUS_REQ, &dev->inputs))) {
+			pr_debug("!a_bus_drop && (a_srp_det || a_bus_req)\n");
+
+			clear_bit(A_SRP_DET, &dev->inputs);
+			/* Disable SRP detection */
+			writel((readl(USB_OTGSC) & ~OTGSC_INTR_STS_MASK) &
+					~OTGSC_DPIE, USB_OTGSC);
+
+			spin_lock_irqsave(&dev->lock, flags);
+			dev->otg.state = OTG_STATE_A_WAIT_VRISE;
+			spin_unlock_irqrestore(&dev->lock, flags);
+			/* ACA: ID_A: Stop charging untill enumeration */
+			if (test_bit(ID_A, &dev->inputs))
+				msm_otg_set_power(&dev->otg, 0);
+			else
+				dev->pdata->vbus_power(USB_PHY_INTEGRATED, 1);
+			msm_otg_start_timer(dev, TA_WAIT_VRISE, A_WAIT_VRISE);
+			/* no need to schedule work now */
+		} else {
+			pr_debug("No session requested\n");
+
+			/* A-device is not providing power on VBUS.
+			 * Enable SRP detection.
+			 */
+			writel((readl(USB_OTGSC) & ~OTGSC_INTR_STS_MASK) |
+					OTGSC_DPIE, USB_OTGSC);
+			msm_otg_put_suspend(dev);
+
+		}
+		break;
+	case OTG_STATE_A_WAIT_VRISE:
+		if ((test_bit(ID, &dev->inputs) &&
+				!test_bit(ID_A, &dev->inputs)) ||
+				test_bit(A_BUS_DROP, &dev->inputs) ||
+				test_bit(A_WAIT_VRISE, &dev->tmouts)) {
+			pr_debug("id || a_bus_drop || a_wait_vrise_tmout\n");
+			clear_bit(A_BUS_REQ, &dev->inputs);
+			msm_otg_del_timer(dev);
+			dev->pdata->vbus_power(USB_PHY_INTEGRATED, 0);
+			spin_lock_irqsave(&dev->lock, flags);
+			dev->otg.state = OTG_STATE_A_WAIT_VFALL;
+			spin_unlock_irqrestore(&dev->lock, flags);
+			msm_otg_start_timer(dev, TA_WAIT_VFALL, A_WAIT_VFALL);
+		} else if (test_bit(A_VBUS_VLD, &dev->inputs)) {
+			pr_debug("a_vbus_vld\n");
+			spin_lock_irqsave(&dev->lock, flags);
+			dev->otg.state = OTG_STATE_A_WAIT_BCON;
+			spin_unlock_irqrestore(&dev->lock, flags);
+			if (TA_WAIT_BCON > 0)
+				msm_otg_start_timer(dev, TA_WAIT_BCON,
+					A_WAIT_BCON);
+			/* Start HCD to detect peripherals. */
+			msm_otg_start_host(&dev->otg, REQUEST_START);
+		}
+		break;
+	case OTG_STATE_A_WAIT_BCON:
+		if ((test_bit(ID, &dev->inputs) &&
+				!test_bit(ID_A, &dev->inputs)) ||
+				test_bit(A_BUS_DROP, &dev->inputs) ||
+				test_bit(A_WAIT_BCON, &dev->tmouts)) {
+			pr_debug("id_f/b/c || a_bus_drop ||"
+					"a_wait_bcon_tmout\n");
+			if (test_bit(A_WAIT_BCON, &dev->tmouts))
+				msm_otg_send_event(&dev->otg,
+					OTG_EVENT_DEV_CONN_TMOUT);
+			msm_otg_del_timer(dev);
+			clear_bit(A_BUS_REQ, &dev->inputs);
+			msm_otg_start_host(&dev->otg, REQUEST_STOP);
+			/* Reset both phy and link */
+			otg_reset(&dev->otg, 1);
+			/* ACA: ID_A with NO accessory, just the A plug is
+			 * attached to ACA: Use IDCHG_MAX for charging
+			 */
+			if (test_bit(ID_A, &dev->inputs))
+				msm_otg_set_power(&dev->otg, USB_IDCHG_MAX);
+			else
+				dev->pdata->vbus_power(USB_PHY_INTEGRATED, 0);
+			spin_lock_irqsave(&dev->lock, flags);
+			dev->otg.state = OTG_STATE_A_WAIT_VFALL;
+			spin_unlock_irqrestore(&dev->lock, flags);
+			msm_otg_start_timer(dev, TA_WAIT_VFALL, A_WAIT_VFALL);
+		} else if (test_bit(B_CONN, &dev->inputs)) {
+			pr_debug("b_conn\n");
+			msm_otg_del_timer(dev);
+			/* HCD is added already. just move to
+			 * A_HOST state.
+			 */
+			spin_lock_irqsave(&dev->lock, flags);
+			dev->otg.state = OTG_STATE_A_HOST;
+			spin_unlock_irqrestore(&dev->lock, flags);
+			if (test_bit(ID_A, &dev->inputs)) {
+				atomic_set(&dev->chg_type, USB_CHG_TYPE__SDP);
+				msm_otg_set_power(&dev->otg,
+					USB_IDCHG_MIN - get_aca_bmaxpower(dev));
+			}
+		} else if (!test_bit(A_VBUS_VLD, &dev->inputs)) {
+			pr_debug("!a_vbus_vld\n");
+			msm_otg_del_timer(dev);
+			msm_otg_start_host(&dev->otg, REQUEST_STOP);
+			spin_lock_irqsave(&dev->lock, flags);
+			dev->otg.state = OTG_STATE_A_VBUS_ERR;
+			spin_unlock_irqrestore(&dev->lock, flags);
+			/* Reset both phy and link */
+			otg_reset(&dev->otg, 1);
+		} else if (test_bit(ID_A, &dev->inputs)) {
+			dev->pdata->vbus_power(USB_PHY_INTEGRATED, 0);
+		} else if (!test_bit(ID, &dev->inputs)) {
+			dev->pdata->vbus_power(USB_PHY_INTEGRATED, 1);
+		}
+		break;
+	case OTG_STATE_A_HOST:
+		if ((test_bit(ID, &dev->inputs) &&
+				!test_bit(ID_A, &dev->inputs)) ||
+				test_bit(A_BUS_DROP, &dev->inputs)) {
+			pr_debug("id_f/b/c || a_bus_drop\n");
+			clear_bit(B_CONN, &dev->inputs);
+			spin_lock_irqsave(&dev->lock, flags);
+			dev->otg.state = OTG_STATE_A_WAIT_VFALL;
+			spin_unlock_irqrestore(&dev->lock, flags);
+			msm_otg_start_host(&dev->otg, REQUEST_STOP);
+			/* Reset both phy and link */
+			otg_reset(&dev->otg, 1);
+			if (!test_bit(ID_A, &dev->inputs))
+				dev->pdata->vbus_power(USB_PHY_INTEGRATED, 0);
+			msm_otg_start_timer(dev, TA_WAIT_VFALL, A_WAIT_VFALL);
+			msm_otg_set_power(&dev->otg, 0);
+		} else if (!test_bit(A_VBUS_VLD, &dev->inputs)) {
+			pr_debug("!a_vbus_vld\n");
+			clear_bit(B_CONN, &dev->inputs);
+			spin_lock_irqsave(&dev->lock, flags);
+			dev->otg.state = OTG_STATE_A_VBUS_ERR;
+			spin_unlock_irqrestore(&dev->lock, flags);
+			msm_otg_start_host(&dev->otg, REQUEST_STOP);
+			/* Reset both phy and link */
+			otg_reset(&dev->otg, 1);
+			/* no work */
+		} else if (!test_bit(A_BUS_REQ, &dev->inputs)) {
+			/* a_bus_req is de-asserted when root hub is
+			 * suspended or HNP is in progress.
+			 */
+			pr_debug("!a_bus_req\n");
+			spin_lock_irqsave(&dev->lock, flags);
+			dev->otg.state = OTG_STATE_A_SUSPEND;
+			spin_unlock_irqrestore(&dev->lock, flags);
+			if (dev->otg.host->b_hnp_enable) {
+				msm_otg_start_timer(dev, TA_AIDL_BDIS,
+						A_AIDL_BDIS);
+			} else {
+				/* No HNP. Root hub suspended */
+				msm_otg_put_suspend(dev);
+			}
+			if (test_bit(ID_A, &dev->inputs))
+				msm_otg_set_power(&dev->otg,
+						USB_IDCHG_MIN - USB_IB_UNCFG);
+		} else if (!test_bit(B_CONN, &dev->inputs)) {
+			pr_debug("!b_conn\n");
+			spin_lock_irqsave(&dev->lock, flags);
+			dev->otg.state = OTG_STATE_A_WAIT_BCON;
+			spin_unlock_irqrestore(&dev->lock, flags);
+			if (TA_WAIT_BCON > 0)
+				msm_otg_start_timer(dev, TA_WAIT_BCON,
+					A_WAIT_BCON);
+		} else if (test_bit(ID_A, &dev->inputs)) {
+			atomic_set(&dev->chg_type, USB_CHG_TYPE__SDP);
+			dev->pdata->vbus_power(USB_PHY_INTEGRATED, 0);
+			msm_otg_set_power(&dev->otg,
+					USB_IDCHG_MIN - get_aca_bmaxpower(dev));
+		} else if (!test_bit(ID, &dev->inputs)) {
+			atomic_set(&dev->chg_type, USB_CHG_TYPE__INVALID);
+			msm_otg_set_power(&dev->otg, 0);
+			dev->pdata->vbus_power(USB_PHY_INTEGRATED, 1);
+		}
+		break;
+	case OTG_STATE_A_SUSPEND:
+		if ((test_bit(ID, &dev->inputs) &&
+				!test_bit(ID_A, &dev->inputs)) ||
+				test_bit(A_BUS_DROP, &dev->inputs) ||
+				test_bit(A_AIDL_BDIS, &dev->tmouts)) {
+			pr_debug("id_f/b/c || a_bus_drop ||"
+					"a_aidl_bdis_tmout\n");
+			if (test_bit(A_AIDL_BDIS, &dev->tmouts))
+				msm_otg_send_event(&dev->otg,
+					OTG_EVENT_HNP_FAILED);
+			msm_otg_del_timer(dev);
+			clear_bit(B_CONN, &dev->inputs);
+			spin_lock_irqsave(&dev->lock, flags);
+			dev->otg.state = OTG_STATE_A_WAIT_VFALL;
+			spin_unlock_irqrestore(&dev->lock, flags);
+			msm_otg_start_host(&dev->otg, REQUEST_STOP);
+			dev->pdata->vbus_power(USB_PHY_INTEGRATED, 0);
+			/* Reset both phy and link */
+			otg_reset(&dev->otg, 1);
+			if (!test_bit(ID_A, &dev->inputs))
+				dev->pdata->vbus_power(USB_PHY_INTEGRATED, 0);
+			msm_otg_start_timer(dev, TA_WAIT_VFALL, A_WAIT_VFALL);
+			msm_otg_set_power(&dev->otg, 0);
+		} else if (!test_bit(A_VBUS_VLD, &dev->inputs)) {
+			pr_debug("!a_vbus_vld\n");
+			msm_otg_del_timer(dev);
+			clear_bit(B_CONN, &dev->inputs);
+			spin_lock_irqsave(&dev->lock, flags);
+			dev->otg.state = OTG_STATE_A_VBUS_ERR;
+			spin_unlock_irqrestore(&dev->lock, flags);
+			msm_otg_start_host(&dev->otg, REQUEST_STOP);
+			/* Reset both phy and link */
+			otg_reset(&dev->otg, 1);
+		} else if (!test_bit(B_CONN, &dev->inputs) &&
+				dev->otg.host->b_hnp_enable) {
+			pr_debug("!b_conn && b_hnp_enable");
+			/* Clear AIDL_BDIS timer */
+			msm_otg_del_timer(dev);
+			spin_lock_irqsave(&dev->lock, flags);
+			dev->otg.state = OTG_STATE_A_PERIPHERAL;
+			spin_unlock_irqrestore(&dev->lock, flags);
+
+			msm_otg_start_host(&dev->otg, REQUEST_HNP_SUSPEND);
+
+			/* We may come here even when B-dev is physically
+			 * disconnected during HNP. We go back to host
+			 * role if bus is idle for BIDL_ADIS time.
+			 */
+			dev->otg.gadget->is_a_peripheral = 1;
+			msm_otg_start_peripheral(&dev->otg, 1);
+			/* If ID_A: we can charge in a_peripheral as well */
+			if (test_bit(ID_A, &dev->inputs)) {
+				atomic_set(&dev->chg_type, USB_CHG_TYPE__SDP);
+				msm_otg_set_power(&dev->otg,
+					 USB_IDCHG_MIN - USB_IB_UNCFG);
+			}
+		} else if (!test_bit(B_CONN, &dev->inputs) &&
+				!dev->otg.host->b_hnp_enable) {
+			pr_debug("!b_conn && !b_hnp_enable");
+			/* bus request is dropped during suspend.
+			 * acquire again for next device.
+			 */
+			set_bit(A_BUS_REQ, &dev->inputs);
+			spin_lock_irqsave(&dev->lock, flags);
+			dev->otg.state = OTG_STATE_A_WAIT_BCON;
+			spin_unlock_irqrestore(&dev->lock, flags);
+			if (TA_WAIT_BCON > 0)
+				msm_otg_start_timer(dev, TA_WAIT_BCON,
+					A_WAIT_BCON);
+			msm_otg_set_power(&dev->otg, 0);
+		} else if (test_bit(ID_A, &dev->inputs)) {
+			dev->pdata->vbus_power(USB_PHY_INTEGRATED, 0);
+			atomic_set(&dev->chg_type, USB_CHG_TYPE__SDP);
+			msm_otg_set_power(&dev->otg,
+					 USB_IDCHG_MIN - USB_IB_UNCFG);
+		} else if (!test_bit(ID, &dev->inputs)) {
+			msm_otg_set_power(&dev->otg, 0);
+			dev->pdata->vbus_power(USB_PHY_INTEGRATED, 1);
+		}
+		break;
+	case OTG_STATE_A_PERIPHERAL:
+		if ((test_bit(ID, &dev->inputs) &&
+				!test_bit(ID_A, &dev->inputs)) ||
+				test_bit(A_BUS_DROP, &dev->inputs)) {
+			pr_debug("id _f/b/c || a_bus_drop\n");
+			/* Clear BIDL_ADIS timer */
+			msm_otg_del_timer(dev);
+			spin_lock_irqsave(&dev->lock, flags);
+			dev->otg.state = OTG_STATE_A_WAIT_VFALL;
+			spin_unlock_irqrestore(&dev->lock, flags);
+			msm_otg_start_peripheral(&dev->otg, 0);
+			dev->otg.gadget->is_a_peripheral = 0;
+			/* HCD was suspended before. Stop it now */
+			msm_otg_start_host(&dev->otg, REQUEST_STOP);
+
+			/* Reset both phy and link */
+			otg_reset(&dev->otg, 1);
+			if (!test_bit(ID_A, &dev->inputs))
+				dev->pdata->vbus_power(USB_PHY_INTEGRATED, 0);
+			msm_otg_start_timer(dev, TA_WAIT_VFALL, A_WAIT_VFALL);
+			msm_otg_set_power(&dev->otg, 0);
+		} else if (!test_bit(A_VBUS_VLD, &dev->inputs)) {
+			pr_debug("!a_vbus_vld\n");
+			/* Clear BIDL_ADIS timer */
+			msm_otg_del_timer(dev);
+			spin_lock_irqsave(&dev->lock, flags);
+			dev->otg.state = OTG_STATE_A_VBUS_ERR;
+			spin_unlock_irqrestore(&dev->lock, flags);
+			msm_otg_start_peripheral(&dev->otg, 0);
+			dev->otg.gadget->is_a_peripheral = 0;
+			/* HCD was suspended before. Stop it now */
+			msm_otg_start_host(&dev->otg, REQUEST_STOP);
+		} else if (test_bit(A_BIDL_ADIS, &dev->tmouts)) {
+			pr_debug("a_bidl_adis_tmout\n");
+			msm_otg_start_peripheral(&dev->otg, 0);
+			dev->otg.gadget->is_a_peripheral = 0;
+
+			spin_lock_irqsave(&dev->lock, flags);
+			dev->otg.state = OTG_STATE_A_WAIT_BCON;
+			spin_unlock_irqrestore(&dev->lock, flags);
+			set_bit(A_BUS_REQ, &dev->inputs);
+			msm_otg_start_host(&dev->otg, REQUEST_HNP_RESUME);
+			if (TA_WAIT_BCON > 0)
+				msm_otg_start_timer(dev, TA_WAIT_BCON,
+					A_WAIT_BCON);
+			msm_otg_set_power(&dev->otg, 0);
+		} else if (test_bit(ID_A, &dev->inputs)) {
+			dev->pdata->vbus_power(USB_PHY_INTEGRATED, 0);
+			atomic_set(&dev->chg_type, USB_CHG_TYPE__SDP);
+			msm_otg_set_power(&dev->otg,
+					 USB_IDCHG_MIN - USB_IB_UNCFG);
+		} else if (!test_bit(ID, &dev->inputs)) {
+			msm_otg_set_power(&dev->otg, 0);
+			dev->pdata->vbus_power(USB_PHY_INTEGRATED, 1);
+		}
+		break;
+	case OTG_STATE_A_WAIT_VFALL:
+		if (test_bit(A_WAIT_VFALL, &dev->tmouts)) {
+			clear_bit(A_VBUS_VLD, &dev->inputs);
+			spin_lock_irqsave(&dev->lock, flags);
+			dev->otg.state = OTG_STATE_A_IDLE;
+			spin_unlock_irqrestore(&dev->lock, flags);
+			work = 1;
+		}
+		break;
+	case OTG_STATE_A_VBUS_ERR:
+		if ((test_bit(ID, &dev->inputs) &&
+				!test_bit(ID_A, &dev->inputs)) ||
+				test_bit(A_BUS_DROP, &dev->inputs) ||
+				test_bit(A_CLR_ERR, &dev->inputs)) {
+			spin_lock_irqsave(&dev->lock, flags);
+			dev->otg.state = OTG_STATE_A_WAIT_VFALL;
+			spin_unlock_irqrestore(&dev->lock, flags);
+			if (!test_bit(ID_A, &dev->inputs))
+				dev->pdata->vbus_power(USB_PHY_INTEGRATED, 0);
+			msm_otg_start_timer(dev, TA_WAIT_VFALL, A_WAIT_VFALL);
+			msm_otg_set_power(&dev->otg, 0);
+		}
+		break;
+	default:
+		pr_err("invalid OTG state\n");
+	}
+
+	if (work)
+		queue_work(dev->wq, &dev->sm_work);
+
+#ifdef CONFIG_USB_MSM_ACA
+	/* Start id_polling if (ID_FLOAT&BSV) || ID_A/B/C */
+	if ((test_bit(ID, &dev->inputs) &&
+			test_bit(B_SESS_VLD, &dev->inputs) &&
+			chg_type != USB_CHG_TYPE__WALLCHARGER) ||
+			test_bit(ID_A, &dev->inputs)) {
+		mod_timer(&dev->id_timer, jiffies +
+				 msecs_to_jiffies(OTG_ID_POLL_MS));
+		return;
+	}
+	del_timer(&dev->id_timer);
+#endif
+	/* IRQ/sysfs may queue work. Check work_pending. otherwise
+	 * we might endup releasing wakelock after it is acquired
+	 * in IRQ/sysfs.
+	 */
+	if (!work_pending(&dev->sm_work) && !hrtimer_active(&dev->timer) &&
+			!work_pending(&dev->otg_resume_work))
+		wake_unlock(&dev->wlock);
+}
+
+#ifdef CONFIG_USB_MSM_ACA
+static void msm_otg_id_func(unsigned long _dev)
+{
+	struct msm_otg	*dev = (struct msm_otg *) _dev;
+	u8		phy_ints;
+
+#ifdef CONFIG_USB_MSM_STANDARD_ACA
+	/*
+	 * When standard ACA is attached RID_A and RID_GND states are only
+	 * possible.  RID_A-->RID_GND transition generates IdGnd interrupt
+	 * from PHY.  Hence polling is disabled.
+	 */
+	if (test_bit(ID_A, &dev->inputs))
+		goto out;
+#endif
+
+	if (atomic_read(&dev->in_lpm))
+		msm_otg_set_suspend(&dev->otg, 0);
+
+	phy_ints = ulpi_read(dev, 0x13);
+
+	/*
+	 * ACA timer will be kicked again after the PHY
+	 * state is recovered.
+	 */
+	if (phy_ints == -ETIMEDOUT)
+		return;
+
+
+	/* If id_gnd happened then stop and let isr take care of this */
+	if (phy_id_state_gnd(phy_ints))
+		goto out;
+
+	if ((test_bit(ID_A, &dev->inputs) == phy_id_state_a(phy_ints)) &&
+	    (test_bit(ID_B, &dev->inputs) == phy_id_state_b(phy_ints)) &&
+	    (test_bit(ID_C, &dev->inputs) == phy_id_state_c(phy_ints))) {
+		mod_timer(&dev->id_timer,
+				jiffies + msecs_to_jiffies(OTG_ID_POLL_MS));
+		goto out;
+	} else {
+		set_aca_id_inputs(dev);
+	}
+	wake_lock(&dev->wlock);
+	queue_work(dev->wq, &dev->sm_work);
+out:
+	/* OOPS: runing while !BSV, schedule work to initiate LPM */
+	if (!is_b_sess_vld()) {
+		clear_bit(B_SESS_VLD, &dev->inputs);
+		wake_lock(&dev->wlock);
+		queue_work(dev->wq, &dev->sm_work);
+	}
+	return;
+}
+#endif
+#ifdef CONFIG_USB_OTG
+static ssize_t
+set_pwr_down(struct device *_dev, struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	struct msm_otg *dev = the_msm_otg;
+	int value;
+	enum usb_otg_state state;
+	unsigned long flags;
+
+	spin_lock_irqsave(&dev->lock, flags);
+	state = dev->otg.state;
+	spin_unlock_irqrestore(&dev->lock, flags);
+
+	/* Applicable for only A-Device */
+	if (state <= OTG_STATE_A_IDLE)
+		return -EINVAL;
+
+	sscanf(buf, "%d", &value);
+
+	if (test_bit(A_BUS_DROP, &dev->inputs) != !!value) {
+		change_bit(A_BUS_DROP, &dev->inputs);
+		wake_lock(&dev->wlock);
+		queue_work(dev->wq, &dev->sm_work);
+	}
+
+	return count;
+}
+static DEVICE_ATTR(pwr_down, S_IRUGO | S_IWUSR, NULL, set_pwr_down);
+
+static ssize_t
+set_srp_req(struct device *_dev, struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	struct msm_otg *dev = the_msm_otg;
+	enum usb_otg_state state;
+	unsigned long flags;
+
+	spin_lock_irqsave(&dev->lock, flags);
+	state = dev->otg.state;
+	spin_unlock_irqrestore(&dev->lock, flags);
+
+	if (state != OTG_STATE_B_IDLE)
+		return -EINVAL;
+
+	set_bit(B_BUS_REQ, &dev->inputs);
+	wake_lock(&dev->wlock);
+	queue_work(dev->wq, &dev->sm_work);
+
+	return count;
+}
+static DEVICE_ATTR(srp_req, S_IRUGO | S_IWUSR, NULL, set_srp_req);
+
+static ssize_t
+set_clr_err(struct device *_dev, struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	struct msm_otg *dev = the_msm_otg;
+	enum usb_otg_state state;
+	unsigned long flags;
+
+	spin_lock_irqsave(&dev->lock, flags);
+	state = dev->otg.state;
+	spin_unlock_irqrestore(&dev->lock, flags);
+
+	if (state == OTG_STATE_A_VBUS_ERR) {
+		set_bit(A_CLR_ERR, &dev->inputs);
+		wake_lock(&dev->wlock);
+		queue_work(dev->wq, &dev->sm_work);
+	}
+
+	return count;
+}
+static DEVICE_ATTR(clr_err, S_IRUGO | S_IWUSR, NULL, set_clr_err);
+
+static struct attribute *msm_otg_attrs[] = {
+	&dev_attr_pwr_down.attr,
+	&dev_attr_srp_req.attr,
+	&dev_attr_clr_err.attr,
+	NULL,
+};
+
+static struct attribute_group msm_otg_attr_grp = {
+	.attrs = msm_otg_attrs,
+};
+#endif
+
+#ifdef CONFIG_DEBUG_FS
+static int otg_open(struct inode *inode, struct file *file)
+{
+	file->private_data = inode->i_private;
+	return 0;
+}
+static ssize_t otg_mode_write(struct file *file, const char __user *buf,
+				size_t count, loff_t *ppos)
+{
+	struct msm_otg *dev = file->private_data;
+	int ret = count;
+	int work = 0;
+	unsigned long flags;
+
+	spin_lock_irqsave(&dev->lock, flags);
+	dev->pdata->otg_mode = OTG_USER_CONTROL;
+	if (!memcmp(buf, "none", count - 1)) {
+		clear_bit(B_SESS_VLD, &dev->inputs);
+		set_bit(ID, &dev->inputs);
+		work = 1;
+	} else if (!memcmp(buf, "peripheral", count - 1)) {
+		set_bit(B_SESS_VLD, &dev->inputs);
+		set_bit(ID, &dev->inputs);
+		work = 1;
+	} else if (!memcmp(buf, "host", count - 1)) {
+		clear_bit(B_SESS_VLD, &dev->inputs);
+		clear_bit(ID, &dev->inputs);
+		set_bit(A_BUS_REQ, &dev->inputs);
+		work = 1;
+	} else {
+		pr_info("%s: unknown mode specified\n", __func__);
+		ret = -EINVAL;
+	}
+	spin_unlock_irqrestore(&dev->lock, flags);
+
+	if (work) {
+		wake_lock(&dev->wlock);
+		queue_work(dev->wq, &dev->sm_work);
+	}
+
+	return ret;
+}
+const struct file_operations otgfs_fops = {
+	.open	= otg_open,
+	.write	= otg_mode_write,
+};
+
+#define OTG_INFO_SIZE 512
+static ssize_t otg_info_read(struct file *file, char __user *ubuf,
+				size_t count, loff_t *ppos)
+{
+	char *buf;
+	int temp = 0;
+	int ret;
+	struct msm_otg *dev = file->private_data;
+
+	buf = kzalloc(sizeof(char) * OTG_INFO_SIZE, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	temp += scnprintf(buf + temp, OTG_INFO_SIZE - temp,
+			"OTG State:             %s\n"
+			"OTG Mode:              %d\n"
+			"OTG Inputs:            0x%lx\n"
+			"Charger Type:          %d\n"
+			"PMIC VBUS Support:     %u\n"
+			"PMIC ID Support:       %u\n"
+			"Core Clock:            %u\n"
+			"USB In SPS:            %d\n"
+			"pre_emphasis_level:    0x%x\n"
+			"cdr_auto_reset:        0x%x\n"
+			"hs_drv_amplitude:      0x%x\n"
+			"se1_gate_state:        0x%x\n"
+			"swfi_latency:          0x%x\n"
+			"PHY Powercollapse:     0x%x\n"
+			"PCLK Voting:           0x%x\n",
+			state_string(dev->otg.state),
+			dev->pdata->otg_mode,
+			dev->inputs,
+			atomic_read(&dev->chg_type),
+			dev->pmic_vbus_notif_supp,
+			dev->pmic_id_notif_supp,
+			dev->pdata->core_clk,
+			dev->pdata->usb_in_sps,
+			dev->pdata->pemp_level,
+			dev->pdata->cdr_autoreset,
+			dev->pdata->drv_ampl,
+			dev->pdata->se1_gating,
+			dev->pdata->swfi_latency,
+			dev->pdata->phy_can_powercollapse,
+			pclk_requires_voting(&dev->otg));
+
+	ret = simple_read_from_buffer(ubuf, count, ppos, buf, temp);
+
+	kfree(buf);
+
+	return ret;
+}
+
+const struct file_operations otgfs_info_fops = {
+	.open	= otg_open,
+	.read	= otg_info_read,
+};
+
+struct dentry *otg_debug_root;
+struct dentry *otg_debug_mode;
+struct dentry *otg_debug_info;
+#endif
+
+static int otg_debugfs_init(struct msm_otg *dev)
+{
+#ifdef CONFIG_DEBUG_FS
+	otg_debug_root = debugfs_create_dir("otg", NULL);
+	if (!otg_debug_root)
+		return -ENOENT;
+
+	otg_debug_mode = debugfs_create_file("mode", 0222,
+						otg_debug_root, dev,
+						&otgfs_fops);
+	if (!otg_debug_mode)
+		goto free_root;
+
+	otg_debug_info = debugfs_create_file("info", 0444,
+						otg_debug_root, dev,
+						&otgfs_info_fops);
+	if (!otg_debug_info)
+		goto free_mode;
+
+	return 0;
+
+free_mode:
+	debugfs_remove(otg_debug_mode);
+	otg_debug_mode = NULL;
+
+free_root:
+	debugfs_remove(otg_debug_root);
+	otg_debug_root = NULL;
+	return -ENOENT;
+#endif
+	return 0;
+}
+
+static void otg_debugfs_cleanup(void)
+{
+#ifdef CONFIG_DEBUG_FS
+	debugfs_remove(otg_debug_info);
+	debugfs_remove(otg_debug_mode);
+	debugfs_remove(otg_debug_root);
+#endif
+}
+
+struct otg_io_access_ops msm_otg_io_ops = {
+	.read = usb_ulpi_read,
+	.write = usb_ulpi_write,
+};
+
+static int __init msm_otg_probe(struct platform_device *pdev)
+{
+	int ret = 0;
+	struct resource *res;
+	struct msm_otg *dev;
+
+	dev = kzalloc(sizeof(struct msm_otg), GFP_KERNEL);
+	if (!dev)
+		return -ENOMEM;
+
+	the_msm_otg = dev;
+	dev->otg.dev = &pdev->dev;
+	dev->pdata = pdev->dev.platform_data;
+
+	if (!dev->pdata) {
+		ret = -ENODEV;
+		goto free_dev;
+	}
+
+#ifdef CONFIG_USB_EHCI_MSM_72K
+	if (!dev->pdata->vbus_power) {
+		ret = -ENODEV;
+		goto free_dev;
+	} else
+		dev->pdata->vbus_power(USB_PHY_INTEGRATED, 0);
+
+#endif
+
+
+	if (dev->pdata->rpc_connect) {
+		ret = dev->pdata->rpc_connect(1);
+		pr_debug("%s: rpc_connect(%d)\n", __func__, ret);
+		if (ret) {
+			pr_err("%s: rpc connect failed\n", __func__);
+			ret = -ENODEV;
+			goto free_dev;
+		}
+	}
+
+	dev->hs_clk = clk_get(&pdev->dev, "usb_hs_clk");
+	if (IS_ERR(dev->hs_clk)) {
+		pr_err("%s: failed to get usb_hs_clk\n", __func__);
+		ret = PTR_ERR(dev->hs_clk);
+		goto rpc_fail;
+	}
+	clk_set_rate(dev->hs_clk, 60000000);
+
+	/* pm qos request to prevent apps idle power collapse */
+	pm_qos_add_request(&dev->pdata->pm_qos_req_dma, PM_QOS_CPU_DMA_LATENCY,
+			   PM_QOS_DEFAULT_VALUE);
+
+	/* If USB Core is running its protocol engine based on PCLK,
+	 * PCLK must be running at >60Mhz for correct HSUSB operation and
+	 * USB core cannot tolerate frequency changes on PCLK. For such
+	 * USB cores, vote for maximum clk frequency on pclk source
+	 */
+	if (dev->pdata->pclk_src_name) {
+		dev->pclk_src = clk_get(0, dev->pdata->pclk_src_name);
+		if (IS_ERR(dev->pclk_src))
+			goto put_hs_clk;
+		clk_set_rate(dev->pclk_src, INT_MAX);
+		msm_otg_vote_for_pclk_source(dev, 1);
+	}
+
+	if (!dev->pdata->pclk_is_hw_gated) {
+		dev->hs_pclk = clk_get(&pdev->dev, "usb_hs_pclk");
+		if (IS_ERR(dev->hs_pclk)) {
+			pr_err("%s: failed to get usb_hs_pclk\n", __func__);
+			ret = PTR_ERR(dev->hs_pclk);
+			goto put_pclk_src;
+		}
+		clk_enable(dev->hs_pclk);
+	}
+
+	if (dev->pdata->core_clk) {
+		dev->hs_cclk = clk_get(&pdev->dev, "usb_hs_core_clk");
+		if (IS_ERR(dev->hs_cclk)) {
+			pr_err("%s: failed to get usb_hs_core_clk\n", __func__);
+			ret = PTR_ERR(dev->hs_cclk);
+			goto put_hs_pclk;
+		}
+		clk_enable(dev->hs_cclk);
+	}
+
+	if (!dev->pdata->phy_reset) {
+		dev->phy_reset_clk = clk_get(&pdev->dev, "usb_phy_clk");
+		if (IS_ERR(dev->phy_reset_clk)) {
+			pr_err("%s: failed to get usb_phy_clk\n", __func__);
+			ret = PTR_ERR(dev->phy_reset_clk);
+			goto put_hs_cclk;
+		}
+	}
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!res) {
+		pr_err("%s: failed to get platform resource mem\n", __func__);
+		ret = -ENODEV;
+		goto put_phy_clk;
+	}
+
+	dev->regs = ioremap(res->start, resource_size(res));
+	if (!dev->regs) {
+		pr_err("%s: ioremap failed\n", __func__);
+		ret = -ENOMEM;
+		goto put_phy_clk;
+	}
+	dev->irq = platform_get_irq(pdev, 0);
+	if (!dev->irq) {
+		pr_err("%s: platform_get_irq failed\n", __func__);
+		ret = -ENODEV;
+		goto free_regs;
+	}
+	dev->xo_handle = msm_xo_get(MSM_XO_TCXO_D1, "usb");
+	if (IS_ERR(dev->xo_handle)) {
+		pr_err(" %s not able to get the handle"
+			"to vote for TCXO D1 buffer\n", __func__);
+		ret = PTR_ERR(dev->xo_handle);
+		goto free_regs;
+	}
+
+	ret = msm_xo_mode_vote(dev->xo_handle, MSM_XO_MODE_ON);
+	if (ret) {
+		pr_err("%s failed to vote for TCXO"
+			"D1 buffer%d\n", __func__, ret);
+		goto free_xo_handle;
+	}
+
+
+	msm_otg_init_timer(dev);
+	INIT_WORK(&dev->sm_work, msm_otg_sm_work);
+	INIT_WORK(&dev->otg_resume_work, msm_otg_resume_w);
+	spin_lock_init(&dev->lock);
+	wake_lock_init(&dev->wlock, WAKE_LOCK_SUSPEND, "msm_otg");
+
+	dev->wq = alloc_workqueue("k_otg", WQ_NON_REENTRANT, 0);
+	if (!dev->wq) {
+		ret = -ENOMEM;
+		goto free_wlock;
+	}
+
+	if (dev->pdata->init_gpio) {
+		ret = dev->pdata->init_gpio(1);
+		if (ret) {
+			pr_err("%s: gpio init failed with err:%d\n",
+					__func__, ret);
+			goto free_wq;
+		}
+	}
+	/* To reduce phy power consumption and to avoid external LDO
+	 * on the board, PMIC comparators can be used to detect VBUS
+	 * session change.
+	 */
+	if (dev->pdata->pmic_vbus_notif_init) {
+		ret = dev->pdata->pmic_vbus_notif_init
+			(&msm_otg_set_vbus_state, 1);
+		if (!ret) {
+			dev->pmic_vbus_notif_supp = 1;
+		} else if (ret != -ENOTSUPP) {
+			pr_err("%s: pmic_vbus_notif_init() failed, err:%d\n",
+					__func__, ret);
+			goto free_gpio;
+		}
+	}
+
+	if (dev->pdata->pmic_id_notif_init) {
+		ret = dev->pdata->pmic_id_notif_init(&msm_otg_set_id_state, 1);
+		if (!ret) {
+			dev->pmic_id_notif_supp = 1;
+		} else if (ret != -ENOTSUPP) {
+			pr_err("%s: pmic_id_ notif_init failed err:%d",
+					__func__, ret);
+			goto free_pmic_vbus_notif;
+		}
+	}
+
+	if (dev->pdata->pmic_vbus_irq)
+		dev->vbus_on_irq = dev->pdata->pmic_vbus_irq;
+
+	/* vote for vddcx, as PHY cannot tolerate vddcx below 1.0V */
+	if (dev->pdata->init_vddcx) {
+		ret = dev->pdata->init_vddcx(1);
+		if (ret) {
+			pr_err("%s: unable to enable vddcx digital core:%d\n",
+				__func__, ret);
+			goto free_pmic_id_notif;
+		}
+	}
+
+	if (dev->pdata->ldo_init) {
+		ret = dev->pdata->ldo_init(1);
+		if (ret) {
+			pr_err("%s: ldo_init failed with err:%d\n",
+					__func__, ret);
+			goto free_config_vddcx;
+		}
+	}
+
+	if (dev->pdata->ldo_enable) {
+		ret = dev->pdata->ldo_enable(1);
+		if (ret) {
+			pr_err("%s: ldo_enable failed with err:%d\n",
+					__func__, ret);
+			goto free_ldo_init;
+		}
+	}
+
+
+	/* ACk all pending interrupts and clear interrupt enable registers */
+	writel((readl(USB_OTGSC) & ~OTGSC_INTR_MASK), USB_OTGSC);
+	writel(readl(USB_USBSTS), USB_USBSTS);
+	writel(0, USB_USBINTR);
+	/* Ensure that above STOREs are completed before enabling interrupts */
+	mb();
+
+	ret = request_irq(dev->irq, msm_otg_irq, IRQF_SHARED,
+					"msm_otg", dev);
+	if (ret) {
+		pr_err("%s: request irq failed\n", __func__);
+		goto free_ldo_enable;
+	}
+
+	dev->otg.set_peripheral = msm_otg_set_peripheral;
+#ifdef CONFIG_USB_EHCI_MSM_72K
+	dev->otg.set_host = msm_otg_set_host;
+#endif
+	dev->otg.set_suspend = msm_otg_set_suspend;
+	dev->otg.start_hnp = msm_otg_start_hnp;
+	dev->otg.send_event = msm_otg_send_event;
+	dev->otg.set_power = msm_otg_set_power;
+	dev->set_clk = msm_otg_set_clk;
+	dev->reset = otg_reset;
+	dev->otg.io_ops = &msm_otg_io_ops;
+	if (otg_set_transceiver(&dev->otg)) {
+		WARN_ON(1);
+		goto free_otg_irq;
+	}
+#ifdef CONFIG_USB_MSM_ACA
+	/* Link doesnt support id_a/b/c interrupts, hence polling
+	 * needs to be done to support ACA charger
+	 */
+	init_timer(&dev->id_timer);
+	dev->id_timer.function = msm_otg_id_func;
+	dev->id_timer.data = (unsigned long) dev;
+#endif
+
+	atomic_set(&dev->chg_type, USB_CHG_TYPE__INVALID);
+	if (dev->pdata->chg_init && dev->pdata->chg_init(1))
+		pr_err("%s: chg_init failed\n", __func__);
+
+	device_init_wakeup(&pdev->dev, 1);
+
+	ret = pm_runtime_set_active(&pdev->dev);
+	if (ret < 0)
+		pr_err("%s: pm_runtime: Fail to set active\n", __func__);
+
+	ret = 0;
+	pm_runtime_enable(&pdev->dev);
+	pm_runtime_get(&pdev->dev);
+
+
+	ret = otg_debugfs_init(dev);
+	if (ret) {
+		pr_err("%s: otg_debugfs_init failed\n", __func__);
+		goto chg_deinit;
+	}
+
+#ifdef CONFIG_USB_OTG
+	ret = sysfs_create_group(&pdev->dev.kobj, &msm_otg_attr_grp);
+	if (ret < 0) {
+		pr_err("%s: Failed to create the sysfs entry\n", __func__);
+		otg_debugfs_cleanup();
+		goto chg_deinit;
+	}
+#endif
+
+
+	return 0;
+
+chg_deinit:
+	if (dev->pdata->chg_init)
+		dev->pdata->chg_init(0);
+free_otg_irq:
+	free_irq(dev->irq, dev);
+free_ldo_enable:
+	if (dev->pdata->ldo_enable)
+		dev->pdata->ldo_enable(0);
+	if (dev->pdata->setup_gpio)
+		dev->pdata->setup_gpio(USB_SWITCH_DISABLE);
+free_ldo_init:
+	if (dev->pdata->ldo_init)
+		dev->pdata->ldo_init(0);
+free_config_vddcx:
+	if (dev->pdata->init_vddcx)
+		dev->pdata->init_vddcx(0);
+free_pmic_id_notif:
+	if (dev->pdata->pmic_id_notif_init && dev->pmic_id_notif_supp)
+		dev->pdata->pmic_id_notif_init(&msm_otg_set_id_state, 0);
+free_pmic_vbus_notif:
+	if (dev->pdata->pmic_vbus_notif_init && dev->pmic_vbus_notif_supp)
+		dev->pdata->pmic_vbus_notif_init(&msm_otg_set_vbus_state, 0);
+free_gpio:
+	if (dev->pdata->init_gpio)
+		dev->pdata->init_gpio(0);
+free_wq:
+	destroy_workqueue(dev->wq);
+free_wlock:
+	wake_lock_destroy(&dev->wlock);
+free_xo_handle:
+	msm_xo_put(dev->xo_handle);
+free_regs:
+	iounmap(dev->regs);
+put_phy_clk:
+	if (dev->phy_reset_clk)
+		clk_put(dev->phy_reset_clk);
+put_hs_cclk:
+	if (dev->hs_cclk) {
+		clk_disable(dev->hs_cclk);
+		clk_put(dev->hs_cclk);
+	}
+put_hs_pclk:
+	if (dev->hs_pclk) {
+		clk_disable(dev->hs_pclk);
+		clk_put(dev->hs_pclk);
+	}
+put_pclk_src:
+	if (dev->pclk_src) {
+		msm_otg_vote_for_pclk_source(dev, 0);
+		clk_put(dev->pclk_src);
+	}
+put_hs_clk:
+	if (dev->hs_clk)
+		clk_put(dev->hs_clk);
+rpc_fail:
+	if (dev->pdata->rpc_connect)
+		dev->pdata->rpc_connect(0);
+free_dev:
+	kfree(dev);
+	return ret;
+}
+
+static int __exit msm_otg_remove(struct platform_device *pdev)
+{
+	struct msm_otg *dev = the_msm_otg;
+
+	otg_debugfs_cleanup();
+#ifdef CONFIG_USB_OTG
+	sysfs_remove_group(&pdev->dev.kobj, &msm_otg_attr_grp);
+#endif
+	destroy_workqueue(dev->wq);
+	wake_lock_destroy(&dev->wlock);
+
+	if (dev->pdata->setup_gpio)
+		dev->pdata->setup_gpio(USB_SWITCH_DISABLE);
+
+	if (dev->pdata->init_vddcx)
+		dev->pdata->init_vddcx(0);
+	if (dev->pdata->ldo_enable)
+		dev->pdata->ldo_enable(0);
+
+	if (dev->pdata->ldo_init)
+		dev->pdata->ldo_init(0);
+
+	if (dev->pmic_vbus_notif_supp)
+		dev->pdata->pmic_vbus_notif_init(&msm_otg_set_vbus_state, 0);
+
+	if (dev->pmic_id_notif_supp)
+		dev->pdata->pmic_id_notif_init(&msm_otg_set_id_state, 0);
+
+#ifdef CONFIG_USB_MSM_ACA
+	del_timer_sync(&dev->id_timer);
+#endif
+	if (dev->pdata->chg_init)
+		dev->pdata->chg_init(0);
+	free_irq(dev->irq, pdev);
+	iounmap(dev->regs);
+	if (dev->hs_cclk) {
+		clk_disable(dev->hs_cclk);
+		clk_put(dev->hs_cclk);
+	}
+	if (dev->hs_pclk) {
+		clk_disable(dev->hs_pclk);
+		clk_put(dev->hs_pclk);
+	}
+	if (dev->hs_clk)
+		clk_put(dev->hs_clk);
+	if (dev->phy_reset_clk)
+		clk_put(dev->phy_reset_clk);
+	if (dev->pdata->rpc_connect)
+		dev->pdata->rpc_connect(0);
+	msm_xo_put(dev->xo_handle);
+
+	pm_runtime_put(&pdev->dev);
+	pm_runtime_disable(&pdev->dev);
+	kfree(dev);
+	pm_qos_remove_request(&dev->pdata->pm_qos_req_dma);
+	clk_put(dev->pclk_src);
+	return 0;
+}
+
+static int msm_otg_runtime_suspend(struct device *dev)
+{
+	struct msm_otg *otg = the_msm_otg;
+
+	dev_dbg(dev, "pm_runtime: suspending...\n");
+	msm_otg_suspend(otg);
+	return  0;
+}
+
+static int msm_otg_runtime_resume(struct device *dev)
+{
+	struct msm_otg *otg = the_msm_otg;
+
+	dev_dbg(dev, "pm_runtime: resuming...\n");
+	msm_otg_resume(otg);
+	return  0;
+}
+
+static int msm_otg_runtime_idle(struct device *dev)
+{
+	dev_dbg(dev, "pm_runtime: idling...\n");
+	return  0;
+}
+
+static struct dev_pm_ops msm_otg_dev_pm_ops = {
+	.runtime_suspend = msm_otg_runtime_suspend,
+	.runtime_resume = msm_otg_runtime_resume,
+	.runtime_idle = msm_otg_runtime_idle,
+};
+
+static struct platform_driver msm_otg_driver = {
+	.remove = __exit_p(msm_otg_remove),
+	.driver = {
+		.name = DRIVER_NAME,
+		.owner = THIS_MODULE,
+		.pm = &msm_otg_dev_pm_ops,
+	},
+};
+
+static int __init msm_otg_init(void)
+{
+	return platform_driver_probe(&msm_otg_driver, msm_otg_probe);
+}
+
+static void __exit msm_otg_exit(void)
+{
+	platform_driver_unregister(&msm_otg_driver);
+}
+
+module_init(msm_otg_init);
+module_exit(msm_otg_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("MSM usb transceiver driver");
+MODULE_VERSION("1.00");
diff --git a/drivers/usb/otg/msm_otg.c b/drivers/usb/otg/msm_otg.c
index b276f8f..ead2976 100644
--- a/drivers/usb/otg/msm_otg.c
+++ b/drivers/usb/otg/msm_otg.c
@@ -9,11 +9,6 @@
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
  *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- *
  */
 
 #include <linux/module.h>
@@ -39,12 +34,34 @@
 #include <linux/usb/msm_hsusb.h>
 #include <linux/usb/msm_hsusb_hw.h>
 #include <linux/regulator/consumer.h>
+#include <linux/mfd/pm8xxx/pm8921-charger.h>
 
 #include <mach/clk.h>
 
 #define MSM_USB_BASE	(motg->regs)
 #define DRIVER_NAME	"msm_otg"
 
+#ifdef CONFIG_USB_MSM_ACA
+static void msm_chg_enable_aca_det(struct msm_otg *motg);
+static void msm_chg_enable_aca_intr(struct msm_otg *motg);
+#else
+static inline bool msm_chg_aca_detect(struct msm_otg *motg)
+{
+	return false;
+}
+
+static inline void msm_chg_enable_aca_det(struct msm_otg *motg)
+{
+}
+static inline void msm_chg_enable_aca_intr(struct msm_otg *motg)
+{
+}
+static inline bool msm_chg_check_aca_intr(struct msm_otg *motg)
+{
+	return false;
+}
+#endif
+
 #define ULPI_IO_TIMEOUT_USEC	(10 * 1000)
 
 #define USB_PHY_3P3_VOL_MIN	3050000 /* uV */
@@ -60,6 +77,8 @@
 #define USB_PHY_VDD_DIG_VOL_MIN	1000000 /* uV */
 #define USB_PHY_VDD_DIG_VOL_MAX	1320000 /* uV */
 
+static struct msm_otg *the_msm_otg;
+
 static struct regulator *hsusb_3p3;
 static struct regulator *hsusb_1p8;
 static struct regulator *hsusb_vddcx;
@@ -87,18 +106,28 @@
 
 		ret = regulator_enable(hsusb_vddcx);
 		if (ret) {
-			dev_err(motg->otg.dev, "unable to enable hsusb vddcx\n");
+			regulator_set_voltage(hsusb_vddcx, 0,
+			USB_PHY_VDD_DIG_VOL_MIN);
 			regulator_put(hsusb_vddcx);
+			dev_err(motg->otg.dev, "unable to enable the hsusb vddcx\n");
+			return ret;
 		}
+
 	} else {
-		ret = regulator_set_voltage(hsusb_vddcx, 0,
-			USB_PHY_VDD_DIG_VOL_MAX);
-		if (ret)
-			dev_err(motg->otg.dev, "unable to set the voltage "
-					"for hsusb vddcx\n");
+
 		ret = regulator_disable(hsusb_vddcx);
-		if (ret)
+		if (ret) {
 			dev_err(motg->otg.dev, "unable to disable hsusb vddcx\n");
+			return ret;
+		}
+
+		ret = regulator_set_voltage(hsusb_vddcx, 0,
+			USB_PHY_VDD_DIG_VOL_MIN);
+		if (ret) {
+			dev_err(motg->otg.dev, "unable to set the voltage"
+					"for hsusb vddcx\n");
+			return ret;
+		}
 
 		regulator_put(hsusb_vddcx);
 	}
@@ -120,42 +149,32 @@
 		rc = regulator_set_voltage(hsusb_3p3, USB_PHY_3P3_VOL_MIN,
 				USB_PHY_3P3_VOL_MAX);
 		if (rc) {
-			dev_err(motg->otg.dev, "unable to set voltage level "
-					"for hsusb 3p3\n");
-			goto put_3p3;
-		}
-		rc = regulator_enable(hsusb_3p3);
-		if (rc) {
-			dev_err(motg->otg.dev, "unable to enable the hsusb 3p3\n");
+			dev_err(motg->otg.dev, "unable to set voltage level for"
+					"hsusb 3p3\n");
 			goto put_3p3;
 		}
 		hsusb_1p8 = regulator_get(motg->otg.dev, "HSUSB_1p8");
 		if (IS_ERR(hsusb_1p8)) {
 			dev_err(motg->otg.dev, "unable to get hsusb 1p8\n");
 			rc = PTR_ERR(hsusb_1p8);
-			goto disable_3p3;
+			goto put_3p3_lpm;
 		}
 		rc = regulator_set_voltage(hsusb_1p8, USB_PHY_1P8_VOL_MIN,
 				USB_PHY_1P8_VOL_MAX);
 		if (rc) {
-			dev_err(motg->otg.dev, "unable to set voltage level "
-					"for hsusb 1p8\n");
-			goto put_1p8;
-		}
-		rc = regulator_enable(hsusb_1p8);
-		if (rc) {
-			dev_err(motg->otg.dev, "unable to enable the hsusb 1p8\n");
+			dev_err(motg->otg.dev, "unable to set voltage level for"
+					"hsusb 1p8\n");
 			goto put_1p8;
 		}
 
 		return 0;
 	}
 
-	regulator_disable(hsusb_1p8);
 put_1p8:
+	regulator_set_voltage(hsusb_1p8, 0, USB_PHY_1P8_VOL_MAX);
 	regulator_put(hsusb_1p8);
-disable_3p3:
-	regulator_disable(hsusb_3p3);
+put_3p3_lpm:
+	regulator_set_voltage(hsusb_3p3, 0, USB_PHY_3P3_VOL_MAX);
 put_3p3:
 	regulator_put(hsusb_3p3);
 	return rc;
@@ -187,7 +206,7 @@
 }
 #endif
 
-static int msm_hsusb_ldo_set_mode(int on)
+static int msm_hsusb_ldo_enable(struct msm_otg *motg, int on)
 {
 	int ret = 0;
 
@@ -205,29 +224,61 @@
 		ret = regulator_set_optimum_mode(hsusb_1p8,
 				USB_PHY_1P8_HPM_LOAD);
 		if (ret < 0) {
-			pr_err("%s: Unable to set HPM of the regulator "
+			pr_err("%s: Unable to set HPM of the regulator:"
 				"HSUSB_1p8\n", __func__);
 			return ret;
 		}
+
+		ret = regulator_enable(hsusb_1p8);
+		if (ret) {
+			dev_err(motg->otg.dev, "%s: unable to enable the hsusb 1p8\n",
+				__func__);
+			regulator_set_optimum_mode(hsusb_1p8, 0);
+			return ret;
+		}
+
 		ret = regulator_set_optimum_mode(hsusb_3p3,
 				USB_PHY_3P3_HPM_LOAD);
 		if (ret < 0) {
-			pr_err("%s: Unable to set HPM of the regulator "
+			pr_err("%s: Unable to set HPM of the regulator:"
 				"HSUSB_3p3\n", __func__);
-			regulator_set_optimum_mode(hsusb_1p8,
-				USB_PHY_1P8_LPM_LOAD);
+			regulator_set_optimum_mode(hsusb_1p8, 0);
+			regulator_disable(hsusb_1p8);
 			return ret;
 		}
+
+		ret = regulator_enable(hsusb_3p3);
+		if (ret) {
+			dev_err(motg->otg.dev, "%s: unable to enable the hsusb 3p3\n",
+				__func__);
+			regulator_set_optimum_mode(hsusb_3p3, 0);
+			regulator_set_optimum_mode(hsusb_1p8, 0);
+			regulator_disable(hsusb_1p8);
+			return ret;
+		}
+
 	} else {
-		ret = regulator_set_optimum_mode(hsusb_1p8,
-				USB_PHY_1P8_LPM_LOAD);
+		ret = regulator_disable(hsusb_1p8);
+		if (ret) {
+			dev_err(motg->otg.dev, "%s: unable to disable the hsusb 1p8\n",
+				__func__);
+			return ret;
+		}
+
+		ret = regulator_set_optimum_mode(hsusb_1p8, 0);
 		if (ret < 0)
-			pr_err("%s: Unable to set LPM of the regulator "
+			pr_err("%s: Unable to set LPM of the regulator:"
 				"HSUSB_1p8\n", __func__);
-		ret = regulator_set_optimum_mode(hsusb_3p3,
-				USB_PHY_3P3_LPM_LOAD);
+
+		ret = regulator_disable(hsusb_3p3);
+		if (ret) {
+			dev_err(motg->otg.dev, "%s: unable to disable the hsusb 3p3\n",
+				 __func__);
+			return ret;
+		}
+		ret = regulator_set_optimum_mode(hsusb_3p3, 0);
 		if (ret < 0)
-			pr_err("%s: Unable to set LPM of the regulator "
+			pr_err("%s: Unable to set LPM of the regulator:"
 				"HSUSB_3p3\n", __func__);
 	}
 
@@ -399,6 +450,7 @@
 	u32 val = 0;
 	u32 ulpi_val = 0;
 
+	clk_enable(motg->clk);
 	ret = msm_otg_phy_reset(motg);
 	if (ret) {
 		dev_err(otg->dev, "phy_reset failed\n");
@@ -425,19 +477,24 @@
 	writel(0x0, USB_AHBBURST);
 	writel(0x00, USB_AHBMODE);
 
-	if (pdata->otg_control == OTG_PHY_CONTROL) {
-		val = readl(USB_OTGSC);
-		if (pdata->mode == USB_OTG) {
-			ulpi_val = ULPI_INT_IDGRD | ULPI_INT_SESS_VALID;
-			val |= OTGSC_IDIE | OTGSC_BSVIE;
-		} else if (pdata->mode == USB_PERIPHERAL) {
-			ulpi_val = ULPI_INT_SESS_VALID;
-			val |= OTGSC_BSVIE;
-		}
-		writel(val, USB_OTGSC);
-		ulpi_write(otg, ulpi_val, ULPI_USB_INT_EN_RISE);
-		ulpi_write(otg, ulpi_val, ULPI_USB_INT_EN_FALL);
+	/* Ensure that RESET operation is completed before turning off clock */
+	mb();
+	clk_disable(motg->clk);
+
+	val = readl_relaxed(USB_OTGSC);
+	if (pdata->mode == USB_OTG) {
+		ulpi_val = ULPI_INT_IDGRD | ULPI_INT_SESS_VALID;
+		val |= OTGSC_IDIE | OTGSC_BSVIE;
+	} else if (pdata->mode == USB_PERIPHERAL) {
+		ulpi_val = ULPI_INT_SESS_VALID;
+		val |= OTGSC_BSVIE;
 	}
+	writel_relaxed(val, USB_OTGSC);
+	ulpi_write(otg, ulpi_val, ULPI_USB_INT_EN_RISE);
+	ulpi_write(otg, ulpi_val, ULPI_USB_INT_EN_FALL);
+
+	msm_chg_enable_aca_det(motg);
+	msm_chg_enable_aca_intr(motg);
 
 	return 0;
 }
@@ -452,11 +509,14 @@
 	struct usb_bus *bus = otg->host;
 	struct msm_otg_platform_data *pdata = motg->pdata;
 	int cnt = 0;
+	bool session_active;
 
 	if (atomic_read(&motg->in_lpm))
 		return 0;
 
 	disable_irq(motg->irq);
+	session_active = (otg->host && !test_bit(ID, &motg->inputs)) ||
+				test_bit(B_SESS_VLD, &motg->inputs);
 	/*
 	 * Chipidea 45-nm PHY suspend sequence:
 	 *
@@ -482,6 +542,16 @@
 	}
 
 	/*
+	 * Turn off the OTG comparators, if depends on PMIC for
+	 * VBUS and ID notifications.
+	 */
+	if ((motg->caps & ALLOW_PHY_COMP_DISABLE) && !session_active) {
+		ulpi_write(otg, OTG_COMP_DISABLE,
+			ULPI_SET(ULPI_PWR_CLK_MNG_REG));
+		motg->lpm_flags |= PHY_OTG_COMP_DISABLED;
+	}
+
+	/*
 	 * PHY may take some time or even fail to enter into low power
 	 * mode (LPM). Hence poll for 500 msec and reset the PHY and link
 	 * in failure case.
@@ -510,31 +580,40 @@
 	 */
 	writel(readl(USB_USBCMD) | ASYNC_INTR_CTRL | ULPI_STP_CTRL, USB_USBCMD);
 
-	if (motg->pdata->phy_type == SNPS_28NM_INTEGRATED_PHY &&
-			motg->pdata->otg_control == OTG_PMIC_CONTROL)
-		writel(readl(USB_PHY_CTRL) | PHY_RETEN, USB_PHY_CTRL);
+	if (motg->caps & ALLOW_PHY_RETENTION && !session_active) {
+		writel_relaxed(readl_relaxed(USB_PHY_CTRL) & ~PHY_RETEN,
+				USB_PHY_CTRL);
+		motg->lpm_flags |= PHY_RETENTIONED;
+	}
 
+	/* Ensure that above operation is completed before turning off clocks */
+	mb();
 	clk_disable(motg->pclk);
-	clk_disable(motg->clk);
 	if (motg->core_clk)
 		clk_disable(motg->core_clk);
 
 	if (!IS_ERR(motg->pclk_src))
 		clk_disable(motg->pclk_src);
 
-	if (motg->pdata->phy_type == SNPS_28NM_INTEGRATED_PHY &&
-			motg->pdata->otg_control == OTG_PMIC_CONTROL) {
-		msm_hsusb_ldo_set_mode(0);
-		msm_hsusb_config_vddcx(0);
+	if (motg->caps & ALLOW_PHY_POWER_COLLAPSE && !session_active) {
+		msm_hsusb_ldo_enable(motg, 0);
+		motg->lpm_flags |= PHY_PWR_COLLAPSED;
 	}
 
-	if (device_may_wakeup(otg->dev))
+	if (motg->lpm_flags & PHY_RETENTIONED)
+		msm_hsusb_config_vddcx(0);
+
+	if (device_may_wakeup(otg->dev)) {
 		enable_irq_wake(motg->irq);
+		if (motg->pdata->pmic_id_irq)
+			enable_irq_wake(motg->pdata->pmic_id_irq);
+	}
 	if (bus)
 		clear_bit(HCD_FLAG_HW_ACCESSIBLE, &(bus_to_hcd(bus))->flags);
 
 	atomic_set(&motg->in_lpm, 1);
 	enable_irq(motg->irq);
+	wake_unlock(&motg->wlock);
 
 	dev_info(otg->dev, "USB in low power mode\n");
 
@@ -551,19 +630,24 @@
 	if (!atomic_read(&motg->in_lpm))
 		return 0;
 
+	wake_lock(&motg->wlock);
 	if (!IS_ERR(motg->pclk_src))
 		clk_enable(motg->pclk_src);
 
 	clk_enable(motg->pclk);
-	clk_enable(motg->clk);
 	if (motg->core_clk)
 		clk_enable(motg->core_clk);
 
-	if (motg->pdata->phy_type == SNPS_28NM_INTEGRATED_PHY &&
-			motg->pdata->otg_control == OTG_PMIC_CONTROL) {
-		msm_hsusb_ldo_set_mode(1);
+	if (motg->lpm_flags & PHY_PWR_COLLAPSED) {
+		msm_hsusb_ldo_enable(motg, 1);
+		motg->lpm_flags &= ~PHY_PWR_COLLAPSED;
+	}
+
+	if (motg->lpm_flags & PHY_RETENTIONED) {
 		msm_hsusb_config_vddcx(1);
-		writel(readl(USB_PHY_CTRL) & ~PHY_RETEN, USB_PHY_CTRL);
+		writel_relaxed(readl_relaxed(USB_PHY_CTRL) | PHY_RETEN,
+				USB_PHY_CTRL);
+		motg->lpm_flags &= ~PHY_RETENTIONED;
 	}
 
 	temp = readl(USB_USBCMD);
@@ -598,8 +682,17 @@
 	}
 
 skip_phy_resume:
-	if (device_may_wakeup(otg->dev))
+	/* Turn on the OTG comparators on resume */
+	if (motg->lpm_flags & PHY_OTG_COMP_DISABLED) {
+		ulpi_write(otg, OTG_COMP_DISABLE,
+			ULPI_CLR(ULPI_PWR_CLK_MNG_REG));
+		motg->lpm_flags &= ~PHY_OTG_COMP_DISABLED;
+	}
+	if (device_may_wakeup(otg->dev)) {
 		disable_irq_wake(motg->irq);
+		if (motg->pdata->pmic_id_irq)
+			disable_irq_wake(motg->pdata->pmic_id_irq);
+	}
 	if (bus)
 		set_bit(HCD_FLAG_HW_ACCESSIBLE, &(bus_to_hcd(bus))->flags);
 
@@ -622,8 +715,8 @@
 	if (motg->cur_power == mA)
 		return;
 
-	/* TODO: Notify PMIC about available current */
 	dev_info(motg->otg.dev, "Avail curr from USB = %u\n", mA);
+	pm8921_charger_vbus_draw(mA);
 	motg->cur_power = mA;
 }
 
@@ -658,8 +751,6 @@
 	if (on) {
 		dev_dbg(otg->dev, "host on\n");
 
-		if (pdata->vbus_power)
-			pdata->vbus_power(1);
 		/*
 		 * Some boards have a switch cotrolled by gpio
 		 * to enable/disable internal HUB. Enable internal
@@ -667,22 +758,49 @@
 		 */
 		if (pdata->setup_gpio)
 			pdata->setup_gpio(OTG_STATE_A_HOST);
-#ifdef CONFIG_USB
 		usb_add_hcd(hcd, hcd->irq, IRQF_SHARED);
-#endif
 	} else {
 		dev_dbg(otg->dev, "host off\n");
 
-#ifdef CONFIG_USB
 		usb_remove_hcd(hcd);
-#endif
 		if (pdata->setup_gpio)
 			pdata->setup_gpio(OTG_STATE_UNDEFINED);
-		if (pdata->vbus_power)
-			pdata->vbus_power(0);
 	}
 }
 
+static int msm_otg_usbdev_notify(struct notifier_block *self,
+			unsigned long action, void *priv)
+{
+	struct msm_otg *motg = container_of(self, struct msm_otg, usbdev_nb);
+	struct usb_device *udev;
+
+	switch (action) {
+	case USB_DEVICE_ADD:
+	case USB_DEVICE_CONFIG:
+		udev = priv;
+		/*
+		 * Interested in devices connected directly to the root hub.
+		 * ACA dock can supply IDEV_CHG irrespective devices connected
+		 * on the accessory port.
+		 */
+		if (!udev->parent || udev->parent->parent ||
+				motg->chg_type == USB_ACA_DOCK_CHARGER)
+			break;
+		if (udev->actconfig)
+			motg->mA_port = udev->actconfig->desc.bMaxPower * 2;
+		else
+			motg->mA_port = IUNIT;
+
+		if (test_bit(ID_A, &motg->inputs))
+			msm_otg_notify_charger(motg, IDEV_CHG_MIN -
+					motg->mA_port);
+		break;
+	default:
+		break;
+	}
+	return NOTIFY_OK;
+}
+
 static int msm_otg_set_host(struct otg_transceiver *otg, struct usb_bus *host)
 {
 	struct msm_otg *motg = container_of(otg, struct msm_otg, otg);
@@ -700,7 +818,10 @@
 	if (!host) {
 		if (otg->state == OTG_STATE_A_HOST) {
 			pm_runtime_get_sync(otg->dev);
+			usb_unregister_notify(&motg->usbdev_nb);
 			msm_otg_start_host(otg, 0);
+			if (motg->pdata->vbus_power)
+				motg->pdata->vbus_power(0);
 			otg->host = NULL;
 			otg->state = OTG_STATE_UNDEFINED;
 			schedule_work(&motg->sm_work);
@@ -714,6 +835,8 @@
 	hcd = bus_to_hcd(host);
 	hcd->power_budget = motg->pdata->power_budget;
 
+	motg->usbdev_nb.notifier_call = msm_otg_usbdev_notify;
+	usb_register_notify(&motg->usbdev_nb);
 	otg->host = host;
 	dev_dbg(otg->dev, "host driver registered w/ tranceiver\n");
 
@@ -798,6 +921,108 @@
 	return 0;
 }
 
+#ifdef CONFIG_USB_MSM_ACA
+static bool msm_chg_aca_detect(struct msm_otg *motg)
+{
+	struct otg_transceiver *otg = &motg->otg;
+	u32 int_sts;
+	bool ret = false;
+
+	if (motg->pdata->phy_type == CI_45NM_INTEGRATED_PHY)
+		goto out;
+
+	int_sts = ulpi_read(otg, 0x87);
+	switch (int_sts & 0x1C) {
+	case 0x08:
+		if (!test_and_set_bit(ID_A, &motg->inputs)) {
+			dev_dbg(otg->dev, "ID_A\n");
+			motg->chg_type = USB_ACA_A_CHARGER;
+			motg->chg_state = USB_CHG_STATE_DETECTED;
+			clear_bit(ID_B, &motg->inputs);
+			clear_bit(ID_C, &motg->inputs);
+			ret = true;
+		}
+		break;
+	case 0x0C:
+		if (!test_and_set_bit(ID_B, &motg->inputs)) {
+			dev_dbg(otg->dev, "ID_B\n");
+			motg->chg_type = USB_ACA_B_CHARGER;
+			motg->chg_state = USB_CHG_STATE_DETECTED;
+			clear_bit(ID_A, &motg->inputs);
+			clear_bit(ID_C, &motg->inputs);
+			ret = true;
+		}
+		break;
+	case 0x10:
+		if (!test_and_set_bit(ID_C, &motg->inputs)) {
+			dev_dbg(otg->dev, "ID_C\n");
+			motg->chg_type = USB_ACA_C_CHARGER;
+			motg->chg_state = USB_CHG_STATE_DETECTED;
+			clear_bit(ID_A, &motg->inputs);
+			clear_bit(ID_B, &motg->inputs);
+			ret = true;
+		}
+		break;
+	default:
+		ret = test_and_clear_bit(ID_A, &motg->inputs) |
+			test_and_clear_bit(ID_B, &motg->inputs) |
+			test_and_clear_bit(ID_C, &motg->inputs);
+		if (ret) {
+			dev_dbg(otg->dev, "ID A/B/C is no more\n");
+			motg->chg_type = USB_INVALID_CHARGER;
+			motg->chg_state = USB_CHG_STATE_UNDEFINED;
+		}
+	}
+out:
+	return ret;
+}
+
+static void msm_chg_enable_aca_det(struct msm_otg *motg)
+{
+	struct otg_transceiver *otg = &motg->otg;
+
+	switch (motg->pdata->phy_type) {
+	case SNPS_28NM_INTEGRATED_PHY:
+		/* ACA ID pin resistance detection enable */
+		ulpi_write(otg, 0x20, 0x85);
+		break;
+	default:
+		break;
+	}
+}
+
+static void msm_chg_enable_aca_intr(struct msm_otg *motg)
+{
+	struct otg_transceiver *otg = &motg->otg;
+
+	switch (motg->pdata->phy_type) {
+	case SNPS_28NM_INTEGRATED_PHY:
+		/* Enables ACA Detection interrupt (on any RID change) */
+		ulpi_write(otg, 0x20, 0x94);
+		break;
+	default:
+		break;
+	}
+}
+
+static bool msm_chg_check_aca_intr(struct msm_otg *motg)
+{
+	struct otg_transceiver *otg = &motg->otg;
+	bool ret = false;
+
+	switch (motg->pdata->phy_type) {
+	case SNPS_28NM_INTEGRATED_PHY:
+		if (ulpi_read(otg, 0x91) & 1) {
+			dev_dbg(otg->dev, "RID change\n");
+			ulpi_write(otg, 0x01, 0x92);
+			ret = msm_chg_aca_detect(motg);
+		}
+	default:
+		break;
+	}
+	return ret;
+}
+#endif
 static bool msm_chg_check_secondary_det(struct msm_otg *motg)
 {
 	struct otg_transceiver *otg = &motg->otg;
@@ -1039,7 +1264,7 @@
 {
 	struct msm_otg *motg = container_of(w, struct msm_otg, chg_work.work);
 	struct otg_transceiver *otg = &motg->otg;
-	bool is_dcd, tmout, vout;
+	bool is_dcd, tmout, vout, is_aca;
 	unsigned long delay;
 
 	dev_dbg(otg->dev, "chg detection work\n");
@@ -1048,11 +1273,25 @@
 		pm_runtime_get_sync(otg->dev);
 		msm_chg_block_on(motg);
 		msm_chg_enable_dcd(motg);
+		msm_chg_enable_aca_det(motg);
 		motg->chg_state = USB_CHG_STATE_WAIT_FOR_DCD;
 		motg->dcd_retries = 0;
 		delay = MSM_CHG_DCD_POLL_TIME;
 		break;
 	case USB_CHG_STATE_WAIT_FOR_DCD:
+		is_aca = msm_chg_aca_detect(motg);
+		if (is_aca) {
+			/*
+			 * ID_A can be ACA dock too. continue
+			 * primary detection after DCD.
+			 */
+			if (test_bit(ID_A, &motg->inputs)) {
+				motg->chg_state = USB_CHG_STATE_WAIT_FOR_DCD;
+			} else {
+				delay = 0;
+				break;
+			}
+		}
 		is_dcd = msm_chg_check_dcd(motg);
 		tmout = ++motg->dcd_retries == MSM_CHG_DCD_MAX_RETRIES;
 		if (is_dcd || tmout) {
@@ -1066,6 +1305,13 @@
 		break;
 	case USB_CHG_STATE_DCD_DONE:
 		vout = msm_chg_check_primary_det(motg);
+		is_aca = msm_chg_aca_detect(motg);
+		if (is_aca) {
+			if (vout && test_bit(ID_A, &motg->inputs))
+				motg->chg_type = USB_ACA_DOCK_CHARGER;
+			delay = 0;
+			break;
+		}
 		if (vout) {
 			msm_chg_enable_secondary_det(motg);
 			delay = MSM_CHG_SECONDARY_DET_TIME;
@@ -1088,6 +1334,8 @@
 		motg->chg_state = USB_CHG_STATE_DETECTED;
 	case USB_CHG_STATE_DETECTED:
 		msm_chg_block_off(motg);
+		msm_chg_enable_aca_det(motg);
+		msm_chg_enable_aca_intr(motg);
 		dev_dbg(otg->dev, "charger = %d\n", motg->chg_type);
 		schedule_work(&motg->sm_work);
 		return;
@@ -1112,17 +1360,7 @@
 
 	switch (pdata->mode) {
 	case USB_OTG:
-		if (pdata->otg_control == OTG_PHY_CONTROL) {
-			if (otgsc & OTGSC_ID)
-				set_bit(ID, &motg->inputs);
-			else
-				clear_bit(ID, &motg->inputs);
-
-			if (otgsc & OTGSC_BSV)
-				set_bit(B_SESS_VLD, &motg->inputs);
-			else
-				clear_bit(B_SESS_VLD, &motg->inputs);
-		} else if (pdata->otg_control == OTG_USER_CONTROL) {
+		if (pdata->otg_control == OTG_USER_CONTROL) {
 			if (pdata->default_mode == USB_HOST) {
 				clear_bit(ID, &motg->inputs);
 			} else if (pdata->default_mode == USB_PERIPHERAL) {
@@ -1132,6 +1370,16 @@
 				set_bit(ID, &motg->inputs);
 				clear_bit(B_SESS_VLD, &motg->inputs);
 			}
+		} else {
+			if (otgsc & OTGSC_ID)
+				set_bit(ID, &motg->inputs);
+			else
+				clear_bit(ID, &motg->inputs);
+
+			if (otgsc & OTGSC_BSV)
+				set_bit(B_SESS_VLD, &motg->inputs);
+			else
+				clear_bit(B_SESS_VLD, &motg->inputs);
 		}
 		break;
 	case USB_HOST:
@@ -1163,9 +1411,16 @@
 		/* FALL THROUGH */
 	case OTG_STATE_B_IDLE:
 		dev_dbg(otg->dev, "OTG_STATE_B_IDLE state\n");
-		if (!test_bit(ID, &motg->inputs) && otg->host) {
+		if ((!test_bit(ID, &motg->inputs) ||
+				test_bit(ID_A, &motg->inputs)) && otg->host) {
 			/* disable BSV bit */
 			writel(readl(USB_OTGSC) & ~OTGSC_BSVIE, USB_OTGSC);
+			if (motg->chg_type == USB_ACA_DOCK_CHARGER)
+				msm_otg_notify_charger(motg,
+						IDEV_CHG_MAX);
+			else if (!test_bit(ID_A, &motg->inputs) &&
+					motg->pdata->vbus_power)
+				motg->pdata->vbus_power(1);
 			msm_otg_start_host(otg, 1);
 			otg->state = OTG_STATE_A_HOST;
 		} else if (test_bit(B_SESS_VLD, &motg->inputs)) {
@@ -1176,10 +1431,12 @@
 			case USB_CHG_STATE_DETECTED:
 				switch (motg->chg_type) {
 				case USB_DCP_CHARGER:
+				case USB_ACA_B_CHARGER:
 					msm_otg_notify_charger(motg,
 							IDEV_CHG_MAX);
 					break;
 				case USB_CDP_CHARGER:
+				case USB_ACA_C_CHARGER:
 					msm_otg_notify_charger(motg,
 							IDEV_CHG_MAX);
 					msm_otg_start_peripheral(otg, 1);
@@ -1216,23 +1473,51 @@
 	case OTG_STATE_B_PERIPHERAL:
 		dev_dbg(otg->dev, "OTG_STATE_B_PERIPHERAL state\n");
 		if (!test_bit(B_SESS_VLD, &motg->inputs) ||
-				!test_bit(ID, &motg->inputs)) {
+				!test_bit(ID, &motg->inputs) ||
+				!test_bit(ID_C, &motg->inputs)) {
 			msm_otg_notify_charger(motg, 0);
 			msm_otg_start_peripheral(otg, 0);
+			if (!test_bit(ID_B, &motg->inputs) &&
+				!test_bit(ID_A, &motg->inputs)) {
+				motg->chg_state = USB_CHG_STATE_UNDEFINED;
+				motg->chg_type = USB_INVALID_CHARGER;
+			}
+			otg->state = OTG_STATE_B_IDLE;
+			msm_otg_reset(otg);
+			schedule_work(w);
+		} else if (test_bit(ID_C, &motg->inputs)) {
+			msm_otg_notify_charger(motg, IDEV_CHG_MAX);
+			pm_runtime_put_sync(otg->dev);
+		}
+		break;
+	case OTG_STATE_A_HOST:
+		dev_dbg(otg->dev, "OTG_STATE_A_HOST state\n");
+		if (test_bit(ID, &motg->inputs) &&
+				!test_bit(ID_A, &motg->inputs)) {
+			msm_otg_start_host(otg, 0);
+			writel(readl(USB_OTGSC) & ~OTGSC_BSVIE, USB_OTGSC);
+			if (motg->pdata->vbus_power)
+				motg->pdata->vbus_power(0);
 			motg->chg_state = USB_CHG_STATE_UNDEFINED;
 			motg->chg_type = USB_INVALID_CHARGER;
 			otg->state = OTG_STATE_B_IDLE;
 			msm_otg_reset(otg);
 			schedule_work(w);
-		}
-		break;
-	case OTG_STATE_A_HOST:
-		dev_dbg(otg->dev, "OTG_STATE_A_HOST state\n");
-		if (test_bit(ID, &motg->inputs)) {
-			msm_otg_start_host(otg, 0);
-			otg->state = OTG_STATE_B_IDLE;
-			msm_otg_reset(otg);
-			schedule_work(w);
+		} else if (test_bit(ID_A, &motg->inputs)) {
+			writel(readl(USB_OTGSC) & ~OTGSC_BSVIE, USB_OTGSC);
+			if (motg->pdata->vbus_power)
+				motg->pdata->vbus_power(0);
+			msm_otg_notify_charger(motg,
+					IDEV_CHG_MIN - motg->mA_port);
+			pm_runtime_put_sync(otg->dev);
+		} else if (!test_bit(ID, &motg->inputs)) {
+			motg->chg_state = USB_CHG_STATE_UNDEFINED;
+			motg->chg_type = USB_INVALID_CHARGER;
+			msm_otg_notify_charger(motg, 0);
+			writel(readl(USB_OTGSC) & ~OTGSC_BSVIE, USB_OTGSC);
+			if (motg->pdata->vbus_power)
+				motg->pdata->vbus_power(1);
+			pm_runtime_put_sync(otg->dev);
 		}
 		break;
 	default:
@@ -1244,7 +1529,7 @@
 {
 	struct msm_otg *motg = data;
 	struct otg_transceiver *otg = &motg->otg;
-	u32 otgsc = 0;
+	u32 otgsc = 0, usbsts;
 
 	if (atomic_read(&motg->in_lpm)) {
 		disable_irq_nosync(irq);
@@ -1253,6 +1538,16 @@
 		return IRQ_HANDLED;
 	}
 
+	usbsts = readl(USB_USBSTS);
+	if ((usbsts & PHY_ALT_INT)) {
+		writel(PHY_ALT_INT, USB_USBSTS);
+		if (msm_chg_check_aca_intr(motg)) {
+			pm_runtime_get_noresume(otg->dev);
+			schedule_work(&motg->sm_work);
+		}
+		return IRQ_HANDLED;
+	}
+
 	otgsc = readl(USB_OTGSC);
 	if (!(otgsc & (OTGSC_IDIS | OTGSC_BSVIS)))
 		return IRQ_NONE;
@@ -1263,6 +1558,7 @@
 		else
 			clear_bit(ID, &motg->inputs);
 		dev_dbg(otg->dev, "ID set/clear\n");
+		schedule_work(&motg->sm_work);
 		pm_runtime_get_noresume(otg->dev);
 	} else if ((otgsc & OTGSC_BSVIS) && (otgsc & OTGSC_BSVIE)) {
 		if (otgsc & OTGSC_BSV)
@@ -1270,11 +1566,36 @@
 		else
 			clear_bit(B_SESS_VLD, &motg->inputs);
 		dev_dbg(otg->dev, "BSV set/clear\n");
+		schedule_work(&motg->sm_work);
 		pm_runtime_get_noresume(otg->dev);
 	}
 
 	writel(otgsc, USB_OTGSC);
-	schedule_work(&motg->sm_work);
+	return IRQ_HANDLED;
+}
+
+static void msm_otg_set_vbus_state(int online)
+{
+	struct msm_otg *motg = the_msm_otg;
+
+	/* We depend on PMIC for only VBUS ON interrupt */
+	if (!atomic_read(&motg->in_lpm) || !online)
+		return;
+
+	/*
+	 * Let interrupt handler take care of resuming
+	 * the hardware.
+	 */
+	msm_otg_irq(motg->irq, (void *) motg);
+}
+
+static irqreturn_t msm_pmic_id_irq(int irq, void *data)
+{
+	struct msm_otg *motg = data;
+
+	if (atomic_read(&motg->in_lpm) && !motg->async_int)
+		msm_otg_irq(motg->irq, motg);
+
 	return IRQ_HANDLED;
 }
 
@@ -1428,6 +1749,7 @@
 		return -ENOMEM;
 	}
 
+	the_msm_otg = motg;
 	motg->pdata = pdev->dev.platform_data;
 	otg = &motg->otg;
 	otg->dev = &pdev->dev;
@@ -1503,24 +1825,30 @@
 		goto free_regs;
 	}
 
-	clk_enable(motg->clk);
 	clk_enable(motg->pclk);
 
 	ret = msm_hsusb_init_vddcx(motg, 1);
 	if (ret) {
-		dev_err(&pdev->dev, "hsusb vddcx configuration failed\n");
+		dev_err(&pdev->dev, "hsusb vddcx init failed\n");
 		goto free_regs;
 	}
 
+	ret = msm_hsusb_config_vddcx(1);
+	if (ret) {
+		dev_err(&pdev->dev, "hsusb vddcx configuration failed\n");
+		goto free_init_vddcx;
+	}
+
 	ret = msm_hsusb_ldo_init(motg, 1);
 	if (ret) {
 		dev_err(&pdev->dev, "hsusb vreg configuration failed\n");
-		goto vddcx_exit;
+		goto free_init_vddcx;
 	}
-	ret = msm_hsusb_ldo_set_mode(1);
+
+	ret = msm_hsusb_ldo_enable(motg, 1);
 	if (ret) {
 		dev_err(&pdev->dev, "hsusb vreg enable failed\n");
-		goto ldo_exit;
+		goto free_ldo_init;
 	}
 
 	if (motg->core_clk)
@@ -1528,14 +1856,17 @@
 
 	writel(0, USB_USBINTR);
 	writel(0, USB_OTGSC);
+	/* Ensure that above STOREs are completed before enabling interrupts */
+	mb();
 
+	wake_lock_init(&motg->wlock, WAKE_LOCK_SUSPEND, "msm_otg");
 	INIT_WORK(&motg->sm_work, msm_otg_sm_work);
 	INIT_DELAYED_WORK(&motg->chg_work, msm_chg_detect_work);
 	ret = request_irq(motg->irq, msm_otg_irq, IRQF_SHARED,
 					"msm_otg", motg);
 	if (ret) {
 		dev_err(&pdev->dev, "request irq failed\n");
-		goto disable_clks;
+		goto destroy_wlock;
 	}
 
 	otg->init = msm_otg_reset;
@@ -1551,8 +1882,27 @@
 		goto free_irq;
 	}
 
+	if (motg->pdata->otg_control == OTG_PMIC_CONTROL) {
+		if (motg->pdata->pmic_id_irq) {
+			ret = request_irq(motg->pdata->pmic_id_irq,
+						msm_pmic_id_irq,
+						IRQF_TRIGGER_RISING |
+						IRQF_TRIGGER_FALLING,
+						"msm_otg", motg);
+			if (ret) {
+				dev_err(&pdev->dev, "request irq failed for PMIC ID\n");
+				goto remove_otg;
+			}
+		} else {
+			ret = -ENODEV;
+			dev_err(&pdev->dev, "PMIC IRQ for ID notifications doesn't exist\n");
+			goto remove_otg;
+		}
+	}
+
 	platform_set_drvdata(pdev, motg);
 	device_init_wakeup(&pdev->dev, 1);
+	motg->mA_port = IUNIT;
 
 	if (motg->pdata->mode == USB_OTG &&
 			motg->pdata->otg_control == OTG_USER_CONTROL) {
@@ -1562,25 +1912,39 @@
 					"not available\n");
 	}
 
+	if (motg->pdata->otg_control == OTG_PMIC_CONTROL)
+		pm8921_charger_register_vbus_sn(&msm_otg_set_vbus_state);
+
+	if (motg->pdata->phy_type == SNPS_28NM_INTEGRATED_PHY &&
+			motg->pdata->otg_control == OTG_PMIC_CONTROL &&
+			motg->pdata->pmic_id_irq)
+		motg->caps = ALLOW_PHY_POWER_COLLAPSE |
+				ALLOW_PHY_RETENTION |
+				ALLOW_PHY_COMP_DISABLE;
+
+	wake_lock(&motg->wlock);
 	pm_runtime_set_active(&pdev->dev);
 	pm_runtime_enable(&pdev->dev);
 
 	return 0;
+
+remove_otg:
+	otg_set_transceiver(NULL);
 free_irq:
 	free_irq(motg->irq, motg);
-disable_clks:
+destroy_wlock:
+	wake_lock_destroy(&motg->wlock);
 	clk_disable(motg->pclk);
-	clk_disable(motg->clk);
-ldo_exit:
+	msm_hsusb_ldo_enable(motg, 0);
+free_ldo_init:
 	msm_hsusb_ldo_init(motg, 0);
-vddcx_exit:
+free_init_vddcx:
 	msm_hsusb_init_vddcx(motg, 0);
 free_regs:
 	iounmap(motg->regs);
 put_core_clk:
 	if (motg->core_clk)
 		clk_put(motg->core_clk);
-	clk_put(motg->pclk);
 put_pclk_src:
 	if (!IS_ERR(motg->pclk_src)) {
 		clk_disable(motg->pclk_src);
@@ -1604,6 +1968,8 @@
 	if (otg->host || otg->gadget)
 		return -EBUSY;
 
+	if (motg->pdata->otg_control == OTG_PMIC_CONTROL)
+		pm8921_charger_unregister_vbus_sn(0);
 	msm_otg_debugfs_cleanup();
 	cancel_delayed_work_sync(&motg->chg_work);
 	cancel_work_sync(&motg->sm_work);
@@ -1612,7 +1978,10 @@
 
 	device_init_wakeup(&pdev->dev, 0);
 	pm_runtime_disable(&pdev->dev);
+	wake_lock_destroy(&motg->wlock);
 
+	if (motg->pdata->pmic_id_irq)
+		free_irq(motg->pdata->pmic_id_irq, motg);
 	otg_set_transceiver(NULL);
 	free_irq(motg->irq, motg);
 
@@ -1633,14 +2002,15 @@
 		dev_err(otg->dev, "Unable to suspend PHY\n");
 
 	clk_disable(motg->pclk);
-	clk_disable(motg->clk);
 	if (motg->core_clk)
 		clk_disable(motg->core_clk);
 	if (!IS_ERR(motg->pclk_src)) {
 		clk_disable(motg->pclk_src);
 		clk_put(motg->pclk_src);
 	}
+	msm_hsusb_ldo_enable(motg, 0);
 	msm_hsusb_ldo_init(motg, 0);
+	msm_hsusb_init_vddcx(motg, 0);
 
 	iounmap(motg->regs);
 	pm_runtime_set_suspended(&pdev->dev);
diff --git a/drivers/usb/otg/otg.c b/drivers/usb/otg/otg.c
index fb7adef..40a34ec 100644
--- a/drivers/usb/otg/otg.c
+++ b/drivers/usb/otg/otg.c
@@ -99,3 +99,18 @@
 	}
 }
 EXPORT_SYMBOL(otg_state_string);
+
+int otg_send_event(enum usb_otg_event event)
+{
+	struct otg_transceiver *otg = otg_get_transceiver();
+	int ret = -ENOTSUPP;
+
+	if (otg && otg->send_event)
+		ret = otg->send_event(otg, event);
+
+	if (otg)
+		otg_put_transceiver(otg);
+
+	return ret;
+}
+EXPORT_SYMBOL(otg_send_event);
\ No newline at end of file
diff --git a/drivers/usb/otg/otg_id.c b/drivers/usb/otg/otg_id.c
index ce22b46..64e1bd4 100644
--- a/drivers/usb/otg/otg_id.c
+++ b/drivers/usb/otg/otg_id.c
@@ -42,7 +42,7 @@
 
 static void __otg_id_notify(void)
 {
-	int ret;
+	int ret = 0;
 	struct otg_id_notifier_block *otg_id_nb;
 	bool proxy_wait = false;
 	if (plist_head_empty(&otg_id_plist))