Merge 3.16-rc6 into driver-core-next

We want the platform changes in here as well.

Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
diff --git a/Documentation/ABI/testing/sysfs-bus-platform b/Documentation/ABI/testing/sysfs-bus-platform
new file mode 100644
index 0000000..5172a61
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-bus-platform
@@ -0,0 +1,20 @@
+What:		/sys/bus/platform/devices/.../driver_override
+Date:		April 2014
+Contact:	Kim Phillips <kim.phillips@freescale.com>
+Description:
+		This file allows the driver for a device to be specified which
+		will override standard OF, ACPI, ID table, and name matching.
+		When specified, only a driver with a name matching the value
+		written to driver_override will have an opportunity to bind
+		to the device.  The override is specified by writing a string
+		to the driver_override file (echo vfio-platform > \
+		driver_override) and may be cleared with an empty string
+		(echo > driver_override).  This returns the device to standard
+		matching rules binding.  Writing to driver_override does not
+		automatically unbind the device from its current driver or make
+		any attempt to automatically load the specified driver.  If no
+		driver with a matching name is currently loaded in the kernel,
+		the device will not bind to any driver.  This also allows
+		devices to opt-out of driver binding using a driver_override
+		name such as "none".  Only a single driver may be specified in
+		the override, there is no support for parsing delimiters.
diff --git a/Documentation/DocBook/device-drivers.tmpl b/Documentation/DocBook/device-drivers.tmpl
index cc63f30..dd3f278 100644
--- a/Documentation/DocBook/device-drivers.tmpl
+++ b/Documentation/DocBook/device-drivers.tmpl
@@ -128,8 +128,12 @@
 !Edrivers/base/bus.c
      </sect1>
      <sect1><title>Device Drivers DMA Management</title>
-!Edrivers/base/dma-buf.c
-!Edrivers/base/reservation.c
+!Edrivers/dma-buf/dma-buf.c
+!Edrivers/dma-buf/fence.c
+!Edrivers/dma-buf/seqno-fence.c
+!Iinclude/linux/fence.h
+!Iinclude/linux/seqno-fence.h
+!Edrivers/dma-buf/reservation.c
 !Iinclude/linux/reservation.h
 !Edrivers/base/dma-coherent.c
 !Edrivers/base/dma-mapping.c
diff --git a/Documentation/driver-model/devres.txt b/Documentation/driver-model/devres.txt
index 1525e30..d14710b 100644
--- a/Documentation/driver-model/devres.txt
+++ b/Documentation/driver-model/devres.txt
@@ -233,66 +233,78 @@
   6. List of managed interfaces
   -----------------------------
 
-MEM
-  devm_kzalloc()
-  devm_kfree()
-  devm_kmemdup()
-  devm_get_free_pages()
-  devm_free_pages()
+CLOCK
+  devm_clk_get()
+  devm_clk_put()
+
+DMA
+  dmam_alloc_coherent()
+  dmam_alloc_noncoherent()
+  dmam_declare_coherent_memory()
+  dmam_free_coherent()
+  dmam_free_noncoherent()
+  dmam_pool_create()
+  dmam_pool_destroy()
+
+GPIO
+  devm_gpiod_get()
+  devm_gpiod_get_index()
+  devm_gpiod_get_index_optional()
+  devm_gpiod_get_optional()
+  devm_gpiod_put()
 
 IIO
   devm_iio_device_alloc()
   devm_iio_device_free()
-  devm_iio_trigger_alloc()
-  devm_iio_trigger_free()
   devm_iio_device_register()
   devm_iio_device_unregister()
+  devm_iio_trigger_alloc()
+  devm_iio_trigger_free()
 
 IO region
-  devm_request_region()
-  devm_request_mem_region()
-  devm_release_region()
   devm_release_mem_region()
-
-IRQ
-  devm_request_irq()
-  devm_free_irq()
-
-DMA
-  dmam_alloc_coherent()
-  dmam_free_coherent()
-  dmam_alloc_noncoherent()
-  dmam_free_noncoherent()
-  dmam_declare_coherent_memory()
-  dmam_pool_create()
-  dmam_pool_destroy()
-
-PCI
-  pcim_enable_device()	: after success, all PCI ops become managed
-  pcim_pin_device()	: keep PCI device enabled after release
+  devm_release_region()
+  devm_request_mem_region()
+  devm_request_region()
 
 IOMAP
   devm_ioport_map()
   devm_ioport_unmap()
   devm_ioremap()
   devm_ioremap_nocache()
-  devm_iounmap()
   devm_ioremap_resource() : checks resource, requests memory region, ioremaps
-  devm_request_and_ioremap() : obsoleted by devm_ioremap_resource()
+  devm_iounmap()
   pcim_iomap()
-  pcim_iounmap()
-  pcim_iomap_table()	: array of mapped addresses indexed by BAR
   pcim_iomap_regions()	: do request_region() and iomap() on multiple BARs
+  pcim_iomap_table()	: array of mapped addresses indexed by BAR
+  pcim_iounmap()
 
-REGULATOR
-  devm_regulator_get()
-  devm_regulator_put()
-  devm_regulator_bulk_get()
-  devm_regulator_register()
+IRQ
+  devm_free_irq()
+  devm_request_irq()
 
-CLOCK
-  devm_clk_get()
-  devm_clk_put()
+MDIO
+  devm_mdiobus_alloc()
+  devm_mdiobus_alloc_size()
+  devm_mdiobus_free()
+
+MEM
+  devm_free_pages()
+  devm_get_free_pages()
+  devm_kcalloc()
+  devm_kfree()
+  devm_kmalloc()
+  devm_kmalloc_array()
+  devm_kmemdup()
+  devm_kzalloc()
+
+PCI
+  pcim_enable_device()	: after success, all PCI ops become managed
+  pcim_pin_device()	: keep PCI device enabled after release
+
+PHY
+  devm_usb_get_phy()
+  devm_usb_put_phy()
 
 PINCTRL
   devm_pinctrl_get()
@@ -302,24 +314,14 @@
   devm_pwm_get()
   devm_pwm_put()
 
-PHY
-  devm_usb_get_phy()
-  devm_usb_put_phy()
+REGULATOR
+  devm_regulator_bulk_get()
+  devm_regulator_get()
+  devm_regulator_put()
+  devm_regulator_register()
 
 SLAVE DMA ENGINE
   devm_acpi_dma_controller_register()
 
 SPI
   devm_spi_register_master()
-
-GPIO
-  devm_gpiod_get()
-  devm_gpiod_get_index()
-  devm_gpiod_get_optional()
-  devm_gpiod_get_index_optional()
-  devm_gpiod_put()
-
-MDIO
-  devm_mdiobus_alloc()
-  devm_mdiobus_alloc_size()
-  devm_mdiobus_free()
diff --git a/Documentation/firmware_class/README b/Documentation/firmware_class/README
index 43fada9..71f8685 100644
--- a/Documentation/firmware_class/README
+++ b/Documentation/firmware_class/README
@@ -64,7 +64,7 @@
 
 	 if(request_firmware(&fw_entry, $FIRMWARE, device) == 0)
 	 	copy_fw_to_device(fw_entry->data, fw_entry->size);
-	 release(fw_entry);
+	 release_firmware(fw_entry);
 
  Sample/simple hotplug script:
  ============================
@@ -74,7 +74,7 @@
 	HOTPLUG_FW_DIR=/usr/lib/hotplug/firmware/
 
 	echo 1 > /sys/$DEVPATH/loading
-	cat $HOTPLUG_FW_DIR/$FIRMWARE > /sysfs/$DEVPATH/data
+	cat $HOTPLUG_FW_DIR/$FIRMWARE > /sys/$DEVPATH/data
 	echo 0 > /sys/$DEVPATH/loading
 
  Random notes:
@@ -123,6 +123,6 @@
  --------------------
  After firmware cache mechanism is introduced during system sleep,
  request_firmware can be called safely inside device's suspend and
- resume callback, and callers need't cache the firmware by
+ resume callback, and callers needn't cache the firmware by
  themselves any more for dealing with firmware loss during system
  resume.
diff --git a/Documentation/stable_kernel_rules.txt b/Documentation/stable_kernel_rules.txt
index cbc2f03..aee73e7 100644
--- a/Documentation/stable_kernel_rules.txt
+++ b/Documentation/stable_kernel_rules.txt
@@ -29,6 +29,9 @@
 
 Procedure for submitting patches to the -stable tree:
 
+ - If the patch covers files in net/ or drivers/net please follow netdev stable
+   submission guidelines as described in
+   Documentation/networking/netdev-FAQ.txt
  - Send the patch, after verifying that it follows the above rules, to
    stable@vger.kernel.org.  You must note the upstream commit ID in the
    changelog of your submission, as well as the kernel version you wish
diff --git a/MAINTAINERS b/MAINTAINERS
index 61a8f48..11665a8 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -2913,8 +2913,8 @@
 L:	linux-media@vger.kernel.org
 L:	dri-devel@lists.freedesktop.org
 L:	linaro-mm-sig@lists.linaro.org
-F:	drivers/base/dma-buf*
-F:	include/linux/dma-buf*
+F:	drivers/dma-buf/
+F:	include/linux/dma-buf* include/linux/reservation.h include/linux/*fence.h
 F:	Documentation/dma-buf-sharing.txt
 T:	git git://git.linaro.org/people/sumitsemwal/linux-dma-buf.git
 
diff --git a/drivers/Makefile b/drivers/Makefile
index f98b50d..c00337b 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -61,6 +61,7 @@
 
 obj-$(CONFIG_PARPORT)		+= parport/
 obj-y				+= base/ block/ misc/ mfd/ nfc/
+obj-$(CONFIG_DMA_SHARED_BUFFER) += dma-buf/
 obj-$(CONFIG_NUBUS)		+= nubus/
 obj-y				+= macintosh/
 obj-$(CONFIG_IDE)		+= ide/
diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig
index 23b8726..88500fe 100644
--- a/drivers/base/Kconfig
+++ b/drivers/base/Kconfig
@@ -149,15 +149,21 @@
 	  some other directory containing the firmware files.
 
 config FW_LOADER_USER_HELPER
+	bool
+
+config FW_LOADER_USER_HELPER_FALLBACK
 	bool "Fallback user-helper invocation for firmware loading"
 	depends on FW_LOADER
-	default y
+	select FW_LOADER_USER_HELPER
 	help
 	  This option enables / disables the invocation of user-helper
 	  (e.g. udev) for loading firmware files as a fallback after the
 	  direct file loading in kernel fails.  The user-mode helper is
 	  no longer required unless you have a special firmware file that
-	  resides in a non-standard path.
+	  resides in a non-standard path. Moreover, the udev support has
+	  been deprecated upstream.
+
+	  If you are unsure about this, say N here.
 
 config DEBUG_DRIVER
 	bool "Driver Core verbose debug messages"
@@ -208,6 +214,15 @@
 	  APIs extension; the file's descriptor can then be passed on to other
 	  driver.
 
+config FENCE_TRACE
+	bool "Enable verbose FENCE_TRACE messages"
+	depends on DMA_SHARED_BUFFER
+	help
+	  Enable the FENCE_TRACE printks. This will add extra
+	  spam to the console log, but will make it easier to diagnose
+	  lockup related problems for dma-buffers shared across multiple
+	  devices.
+
 config DMA_CMA
 	bool "DMA Contiguous Memory Allocator"
 	depends on HAVE_DMA_CONTIGUOUS && CMA
diff --git a/drivers/base/Makefile b/drivers/base/Makefile
index 04b314e..4aab26e 100644
--- a/drivers/base/Makefile
+++ b/drivers/base/Makefile
@@ -10,7 +10,6 @@
 obj-y			+= power/
 obj-$(CONFIG_HAS_DMA)	+= dma-mapping.o
 obj-$(CONFIG_HAVE_GENERIC_DMA_COHERENT) += dma-coherent.o
-obj-$(CONFIG_DMA_SHARED_BUFFER) += dma-buf.o reservation.o
 obj-$(CONFIG_ISA)	+= isa.o
 obj-$(CONFIG_FW_LOADER)	+= firmware_class.o
 obj-$(CONFIG_NUMA)	+= node.o
diff --git a/drivers/base/component.c b/drivers/base/component.c
index c477899..f748430 100644
--- a/drivers/base/component.c
+++ b/drivers/base/component.c
@@ -18,6 +18,15 @@
 #include <linux/mutex.h>
 #include <linux/slab.h>
 
+struct component_match {
+	size_t alloc;
+	size_t num;
+	struct {
+		void *data;
+		int (*fn)(struct device *, void *);
+	} compare[0];
+};
+
 struct master {
 	struct list_head node;
 	struct list_head components;
@@ -25,6 +34,7 @@
 
 	const struct component_master_ops *ops;
 	struct device *dev;
+	struct component_match *match;
 };
 
 struct component {
@@ -69,6 +79,11 @@
 	c->master = NULL;
 }
 
+/*
+ * Add a component to a master, finding the component via the compare
+ * function and compare data.  This is safe to call for duplicate matches
+ * and will not result in the same component being added multiple times.
+ */
 int component_master_add_child(struct master *master,
 	int (*compare)(struct device *, void *), void *compare_data)
 {
@@ -76,11 +91,12 @@
 	int ret = -ENXIO;
 
 	list_for_each_entry(c, &component_list, node) {
-		if (c->master)
+		if (c->master && c->master != master)
 			continue;
 
 		if (compare(c->dev, compare_data)) {
-			component_attach_master(master, c);
+			if (!c->master)
+				component_attach_master(master, c);
 			ret = 0;
 			break;
 		}
@@ -90,6 +106,34 @@
 }
 EXPORT_SYMBOL_GPL(component_master_add_child);
 
+static int find_components(struct master *master)
+{
+	struct component_match *match = master->match;
+	size_t i;
+	int ret = 0;
+
+	if (!match) {
+		/*
+		 * Search the list of components, looking for components that
+		 * belong to this master, and attach them to the master.
+		 */
+		return master->ops->add_components(master->dev, master);
+	}
+
+	/*
+	 * Scan the array of match functions and attach
+	 * any components which are found to this master.
+	 */
+	for (i = 0; i < match->num; i++) {
+		ret = component_master_add_child(master,
+						 match->compare[i].fn,
+						 match->compare[i].data);
+		if (ret)
+			break;
+	}
+	return ret;
+}
+
 /* Detach all attached components from this master */
 static void master_remove_components(struct master *master)
 {
@@ -113,44 +157,44 @@
 static int try_to_bring_up_master(struct master *master,
 	struct component *component)
 {
-	int ret = 0;
+	int ret;
 
-	if (!master->bound) {
-		/*
-		 * Search the list of components, looking for components that
-		 * belong to this master, and attach them to the master.
-		 */
-		if (master->ops->add_components(master->dev, master)) {
-			/* Failed to find all components */
-			master_remove_components(master);
-			ret = 0;
-			goto out;
-		}
+	if (master->bound)
+		return 0;
 
-		if (component && component->master != master) {
-			master_remove_components(master);
-			ret = 0;
-			goto out;
-		}
-
-		if (!devres_open_group(master->dev, NULL, GFP_KERNEL)) {
-			ret = -ENOMEM;
-			goto out;
-		}
-
-		/* Found all components */
-		ret = master->ops->bind(master->dev);
-		if (ret < 0) {
-			devres_release_group(master->dev, NULL);
-			dev_info(master->dev, "master bind failed: %d\n", ret);
-			master_remove_components(master);
-			goto out;
-		}
-
-		master->bound = true;
-		ret = 1;
+	/*
+	 * Search the list of components, looking for components that
+	 * belong to this master, and attach them to the master.
+	 */
+	if (find_components(master)) {
+		/* Failed to find all components */
+		ret = 0;
+		goto out;
 	}
+
+	if (component && component->master != master) {
+		ret = 0;
+		goto out;
+	}
+
+	if (!devres_open_group(master->dev, NULL, GFP_KERNEL)) {
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	/* Found all components */
+	ret = master->ops->bind(master->dev);
+	if (ret < 0) {
+		devres_release_group(master->dev, NULL);
+		dev_info(master->dev, "master bind failed: %d\n", ret);
+		goto out;
+	}
+
+	master->bound = true;
+	return 1;
+
 out:
+	master_remove_components(master);
 
 	return ret;
 }
@@ -180,18 +224,89 @@
 	master_remove_components(master);
 }
 
-int component_master_add(struct device *dev,
-	const struct component_master_ops *ops)
+static size_t component_match_size(size_t num)
+{
+	return offsetof(struct component_match, compare[num]);
+}
+
+static struct component_match *component_match_realloc(struct device *dev,
+	struct component_match *match, size_t num)
+{
+	struct component_match *new;
+
+	if (match && match->alloc == num)
+		return match;
+
+	new = devm_kmalloc(dev, component_match_size(num), GFP_KERNEL);
+	if (!new)
+		return ERR_PTR(-ENOMEM);
+
+	if (match) {
+		memcpy(new, match, component_match_size(min(match->num, num)));
+		devm_kfree(dev, match);
+	} else {
+		new->num = 0;
+	}
+
+	new->alloc = num;
+
+	return new;
+}
+
+/*
+ * Add a component to be matched.
+ *
+ * The match array is first created or extended if necessary.
+ */
+void component_match_add(struct device *dev, struct component_match **matchptr,
+	int (*compare)(struct device *, void *), void *compare_data)
+{
+	struct component_match *match = *matchptr;
+
+	if (IS_ERR(match))
+		return;
+
+	if (!match || match->num == match->alloc) {
+		size_t new_size = match ? match->alloc + 16 : 15;
+
+		match = component_match_realloc(dev, match, new_size);
+
+		*matchptr = match;
+
+		if (IS_ERR(match))
+			return;
+	}
+
+	match->compare[match->num].fn = compare;
+	match->compare[match->num].data = compare_data;
+	match->num++;
+}
+EXPORT_SYMBOL(component_match_add);
+
+int component_master_add_with_match(struct device *dev,
+	const struct component_master_ops *ops,
+	struct component_match *match)
 {
 	struct master *master;
 	int ret;
 
+	if (ops->add_components && match)
+		return -EINVAL;
+
+	if (match) {
+		/* Reallocate the match array for its true size */
+		match = component_match_realloc(dev, match, match->num);
+		if (IS_ERR(match))
+			return PTR_ERR(match);
+	}
+
 	master = kzalloc(sizeof(*master), GFP_KERNEL);
 	if (!master)
 		return -ENOMEM;
 
 	master->dev = dev;
 	master->ops = ops;
+	master->match = match;
 	INIT_LIST_HEAD(&master->components);
 
 	/* Add to the list of available masters. */
@@ -209,6 +324,13 @@
 
 	return ret < 0 ? ret : 0;
 }
+EXPORT_SYMBOL_GPL(component_master_add_with_match);
+
+int component_master_add(struct device *dev,
+	const struct component_master_ops *ops)
+{
+	return component_master_add_with_match(dev, ops, NULL);
+}
 EXPORT_SYMBOL_GPL(component_master_add);
 
 void component_master_del(struct device *dev,
diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c
index d276e33..124d50c 100644
--- a/drivers/base/firmware_class.c
+++ b/drivers/base/firmware_class.c
@@ -100,10 +100,16 @@
 #define FW_OPT_UEVENT	(1U << 0)
 #define FW_OPT_NOWAIT	(1U << 1)
 #ifdef CONFIG_FW_LOADER_USER_HELPER
-#define FW_OPT_FALLBACK	(1U << 2)
+#define FW_OPT_USERHELPER	(1U << 2)
 #else
-#define FW_OPT_FALLBACK	0
+#define FW_OPT_USERHELPER	0
 #endif
+#ifdef CONFIG_FW_LOADER_USER_HELPER_FALLBACK
+#define FW_OPT_FALLBACK		FW_OPT_USERHELPER
+#else
+#define FW_OPT_FALLBACK		0
+#endif
+#define FW_OPT_NO_WARN	(1U << 3)
 
 struct firmware_cache {
 	/* firmware_buf instance will be added into the below list */
@@ -279,26 +285,15 @@
 module_param_string(path, fw_path_para, sizeof(fw_path_para), 0644);
 MODULE_PARM_DESC(path, "customized firmware image search path with a higher priority than default path");
 
-/* Don't inline this: 'struct kstat' is biggish */
-static noinline_for_stack int fw_file_size(struct file *file)
-{
-	struct kstat st;
-	if (vfs_getattr(&file->f_path, &st))
-		return -1;
-	if (!S_ISREG(st.mode))
-		return -1;
-	if (st.size != (int)st.size)
-		return -1;
-	return st.size;
-}
-
 static int fw_read_file_contents(struct file *file, struct firmware_buf *fw_buf)
 {
 	int size;
 	char *buf;
 	int rc;
 
-	size = fw_file_size(file);
+	if (!S_ISREG(file_inode(file)->i_mode))
+		return -EINVAL;
+	size = i_size_read(file_inode(file));
 	if (size <= 0)
 		return -EINVAL;
 	buf = vmalloc(size);
@@ -718,7 +713,7 @@
 static int fw_realloc_buffer(struct firmware_priv *fw_priv, int min_size)
 {
 	struct firmware_buf *buf = fw_priv->buf;
-	int pages_needed = ALIGN(min_size, PAGE_SIZE) >> PAGE_SHIFT;
+	int pages_needed = PAGE_ALIGN(min_size) >> PAGE_SHIFT;
 
 	/* If the array of pages is too small, grow it... */
 	if (buf->page_array_size < pages_needed) {
@@ -1111,10 +1106,11 @@
 
 	ret = fw_get_filesystem_firmware(device, fw->priv);
 	if (ret) {
-		if (opt_flags & FW_OPT_FALLBACK) {
+		if (!(opt_flags & FW_OPT_NO_WARN))
 			dev_warn(device,
-				 "Direct firmware load failed with error %d\n",
-				 ret);
+				 "Direct firmware load for %s failed with error %d\n",
+				 name, ret);
+		if (opt_flags & FW_OPT_USERHELPER) {
 			dev_warn(device, "Falling back to user helper\n");
 			ret = fw_load_from_user_helper(fw, name, device,
 						       opt_flags, timeout);
@@ -1171,7 +1167,6 @@
 }
 EXPORT_SYMBOL(request_firmware);
 
-#ifdef CONFIG_FW_LOADER_USER_HELPER
 /**
  * request_firmware: - load firmware directly without usermode helper
  * @firmware_p: pointer to firmware image
@@ -1188,12 +1183,12 @@
 {
 	int ret;
 	__module_get(THIS_MODULE);
-	ret = _request_firmware(firmware_p, name, device, FW_OPT_UEVENT);
+	ret = _request_firmware(firmware_p, name, device,
+				FW_OPT_UEVENT | FW_OPT_NO_WARN);
 	module_put(THIS_MODULE);
 	return ret;
 }
 EXPORT_SYMBOL_GPL(request_firmware_direct);
-#endif
 
 /**
  * release_firmware: - release the resource associated with a firmware image
@@ -1277,7 +1272,7 @@
 	fw_work->context = context;
 	fw_work->cont = cont;
 	fw_work->opt_flags = FW_OPT_NOWAIT | FW_OPT_FALLBACK |
-		(uevent ? FW_OPT_UEVENT : 0);
+		(uevent ? FW_OPT_UEVENT : FW_OPT_USERHELPER);
 
 	if (!try_module_get(module)) {
 		kfree(fw_work);
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index eee48c4..7cc7440 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -23,6 +23,7 @@
 #include <linux/pm_runtime.h>
 #include <linux/idr.h>
 #include <linux/acpi.h>
+#include <linux/limits.h>
 
 #include "base.h"
 #include "power/power.h"
@@ -175,7 +176,7 @@
 
 struct platform_object {
 	struct platform_device pdev;
-	char name[1];
+	char name[];
 };
 
 /**
@@ -201,6 +202,7 @@
 	kfree(pa->pdev.dev.platform_data);
 	kfree(pa->pdev.mfd_cell);
 	kfree(pa->pdev.resource);
+	kfree(pa->pdev.driver_override);
 	kfree(pa);
 }
 
@@ -216,7 +218,7 @@
 {
 	struct platform_object *pa;
 
-	pa = kzalloc(sizeof(struct platform_object) + strlen(name), GFP_KERNEL);
+	pa = kzalloc(sizeof(*pa) + strlen(name) + 1, GFP_KERNEL);
 	if (pa) {
 		strcpy(pa->name, name);
 		pa->pdev.name = pa->name;
@@ -708,8 +710,49 @@
 }
 static DEVICE_ATTR_RO(modalias);
 
+static ssize_t driver_override_store(struct device *dev,
+				     struct device_attribute *attr,
+				     const char *buf, size_t count)
+{
+	struct platform_device *pdev = to_platform_device(dev);
+	char *driver_override, *old = pdev->driver_override, *cp;
+
+	if (count > PATH_MAX)
+		return -EINVAL;
+
+	driver_override = kstrndup(buf, count, GFP_KERNEL);
+	if (!driver_override)
+		return -ENOMEM;
+
+	cp = strchr(driver_override, '\n');
+	if (cp)
+		*cp = '\0';
+
+	if (strlen(driver_override)) {
+		pdev->driver_override = driver_override;
+	} else {
+		kfree(driver_override);
+		pdev->driver_override = NULL;
+	}
+
+	kfree(old);
+
+	return count;
+}
+
+static ssize_t driver_override_show(struct device *dev,
+				    struct device_attribute *attr, char *buf)
+{
+	struct platform_device *pdev = to_platform_device(dev);
+
+	return sprintf(buf, "%s\n", pdev->driver_override);
+}
+static DEVICE_ATTR_RW(driver_override);
+
+
 static struct attribute *platform_dev_attrs[] = {
 	&dev_attr_modalias.attr,
+	&dev_attr_driver_override.attr,
 	NULL,
 };
 ATTRIBUTE_GROUPS(platform_dev);
@@ -765,6 +808,10 @@
 	struct platform_device *pdev = to_platform_device(dev);
 	struct platform_driver *pdrv = to_platform_driver(drv);
 
+	/* When driver_override is set, only bind to the matching driver */
+	if (pdev->driver_override)
+		return !strcmp(pdev->driver_override, drv->name);
+
 	/* Attempt an OF style match first */
 	if (of_driver_match_device(dev, drv))
 		return 1;
diff --git a/drivers/base/reservation.c b/drivers/base/reservation.c
deleted file mode 100644
index a73fbf3..0000000
--- a/drivers/base/reservation.c
+++ /dev/null
@@ -1,39 +0,0 @@
-/*
- * Copyright (C) 2012-2013 Canonical Ltd
- *
- * Based on bo.c which bears the following copyright notice,
- * but is dual licensed:
- *
- * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- **************************************************************************/
-/*
- * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
- */
-
-#include <linux/reservation.h>
-#include <linux/export.h>
-
-DEFINE_WW_CLASS(reservation_ww_class);
-EXPORT_SYMBOL(reservation_ww_class);
diff --git a/drivers/bus/brcmstb_gisb.c b/drivers/bus/brcmstb_gisb.c
index 6159b77..f2cd6a2d 100644
--- a/drivers/bus/brcmstb_gisb.c
+++ b/drivers/bus/brcmstb_gisb.c
@@ -212,9 +212,9 @@
 	mutex_init(&gdev->lock);
 	INIT_LIST_HEAD(&gdev->next);
 
-	gdev->base = devm_request_and_ioremap(&pdev->dev, r);
-	if (!gdev->base)
-		return -ENOMEM;
+	gdev->base = devm_ioremap_resource(&pdev->dev, r);
+	if (IS_ERR(gdev->base))
+		return PTR_ERR(gdev->base);
 
 	err = devm_request_irq(&pdev->dev, timeout_irq,
 				brcmstb_gisb_timeout_handler, 0, pdev->name,
diff --git a/drivers/dma-buf/Makefile b/drivers/dma-buf/Makefile
new file mode 100644
index 0000000..57a675f
--- /dev/null
+++ b/drivers/dma-buf/Makefile
@@ -0,0 +1 @@
+obj-y := dma-buf.o fence.o reservation.o seqno-fence.o
diff --git a/drivers/base/dma-buf.c b/drivers/dma-buf/dma-buf.c
similarity index 81%
rename from drivers/base/dma-buf.c
rename to drivers/dma-buf/dma-buf.c
index 840c7fa..f3014c4 100644
--- a/drivers/base/dma-buf.c
+++ b/drivers/dma-buf/dma-buf.c
@@ -25,10 +25,13 @@
 #include <linux/fs.h>
 #include <linux/slab.h>
 #include <linux/dma-buf.h>
+#include <linux/fence.h>
 #include <linux/anon_inodes.h>
 #include <linux/export.h>
 #include <linux/debugfs.h>
 #include <linux/seq_file.h>
+#include <linux/poll.h>
+#include <linux/reservation.h>
 
 static inline int is_dma_buf_file(struct file *);
 
@@ -50,12 +53,25 @@
 
 	BUG_ON(dmabuf->vmapping_counter);
 
+	/*
+	 * Any fences that a dma-buf poll can wait on should be signaled
+	 * before releasing dma-buf. This is the responsibility of each
+	 * driver that uses the reservation objects.
+	 *
+	 * If you hit this BUG() it means someone dropped their ref to the
+	 * dma-buf while still having pending operation to the buffer.
+	 */
+	BUG_ON(dmabuf->cb_shared.active || dmabuf->cb_excl.active);
+
 	dmabuf->ops->release(dmabuf);
 
 	mutex_lock(&db_list.lock);
 	list_del(&dmabuf->list_node);
 	mutex_unlock(&db_list.lock);
 
+	if (dmabuf->resv == (struct reservation_object *)&dmabuf[1])
+		reservation_object_fini(dmabuf->resv);
+
 	kfree(dmabuf);
 	return 0;
 }
@@ -103,10 +119,141 @@
 	return base + offset;
 }
 
+static void dma_buf_poll_cb(struct fence *fence, struct fence_cb *cb)
+{
+	struct dma_buf_poll_cb_t *dcb = (struct dma_buf_poll_cb_t *)cb;
+	unsigned long flags;
+
+	spin_lock_irqsave(&dcb->poll->lock, flags);
+	wake_up_locked_poll(dcb->poll, dcb->active);
+	dcb->active = 0;
+	spin_unlock_irqrestore(&dcb->poll->lock, flags);
+}
+
+static unsigned int dma_buf_poll(struct file *file, poll_table *poll)
+{
+	struct dma_buf *dmabuf;
+	struct reservation_object *resv;
+	struct reservation_object_list *fobj;
+	struct fence *fence_excl;
+	unsigned long events;
+	unsigned shared_count, seq;
+
+	dmabuf = file->private_data;
+	if (!dmabuf || !dmabuf->resv)
+		return POLLERR;
+
+	resv = dmabuf->resv;
+
+	poll_wait(file, &dmabuf->poll, poll);
+
+	events = poll_requested_events(poll) & (POLLIN | POLLOUT);
+	if (!events)
+		return 0;
+
+retry:
+	seq = read_seqcount_begin(&resv->seq);
+	rcu_read_lock();
+
+	fobj = rcu_dereference(resv->fence);
+	if (fobj)
+		shared_count = fobj->shared_count;
+	else
+		shared_count = 0;
+	fence_excl = rcu_dereference(resv->fence_excl);
+	if (read_seqcount_retry(&resv->seq, seq)) {
+		rcu_read_unlock();
+		goto retry;
+	}
+
+	if (fence_excl && (!(events & POLLOUT) || shared_count == 0)) {
+		struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_excl;
+		unsigned long pevents = POLLIN;
+
+		if (shared_count == 0)
+			pevents |= POLLOUT;
+
+		spin_lock_irq(&dmabuf->poll.lock);
+		if (dcb->active) {
+			dcb->active |= pevents;
+			events &= ~pevents;
+		} else
+			dcb->active = pevents;
+		spin_unlock_irq(&dmabuf->poll.lock);
+
+		if (events & pevents) {
+			if (!fence_get_rcu(fence_excl)) {
+				/* force a recheck */
+				events &= ~pevents;
+				dma_buf_poll_cb(NULL, &dcb->cb);
+			} else if (!fence_add_callback(fence_excl, &dcb->cb,
+						       dma_buf_poll_cb)) {
+				events &= ~pevents;
+				fence_put(fence_excl);
+			} else {
+				/*
+				 * No callback queued, wake up any additional
+				 * waiters.
+				 */
+				fence_put(fence_excl);
+				dma_buf_poll_cb(NULL, &dcb->cb);
+			}
+		}
+	}
+
+	if ((events & POLLOUT) && shared_count > 0) {
+		struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_shared;
+		int i;
+
+		/* Only queue a new callback if no event has fired yet */
+		spin_lock_irq(&dmabuf->poll.lock);
+		if (dcb->active)
+			events &= ~POLLOUT;
+		else
+			dcb->active = POLLOUT;
+		spin_unlock_irq(&dmabuf->poll.lock);
+
+		if (!(events & POLLOUT))
+			goto out;
+
+		for (i = 0; i < shared_count; ++i) {
+			struct fence *fence = rcu_dereference(fobj->shared[i]);
+
+			if (!fence_get_rcu(fence)) {
+				/*
+				 * fence refcount dropped to zero, this means
+				 * that fobj has been freed
+				 *
+				 * call dma_buf_poll_cb and force a recheck!
+				 */
+				events &= ~POLLOUT;
+				dma_buf_poll_cb(NULL, &dcb->cb);
+				break;
+			}
+			if (!fence_add_callback(fence, &dcb->cb,
+						dma_buf_poll_cb)) {
+				fence_put(fence);
+				events &= ~POLLOUT;
+				break;
+			}
+			fence_put(fence);
+		}
+
+		/* No callback queued, wake up any additional waiters. */
+		if (i == shared_count)
+			dma_buf_poll_cb(NULL, &dcb->cb);
+	}
+
+out:
+	rcu_read_unlock();
+	return events;
+}
+
 static const struct file_operations dma_buf_fops = {
 	.release	= dma_buf_release,
 	.mmap		= dma_buf_mmap_internal,
 	.llseek		= dma_buf_llseek,
+	.poll		= dma_buf_poll,
 };
 
 /*
@@ -128,6 +275,7 @@
  * @size:	[in]	Size of the buffer
  * @flags:	[in]	mode flags for the file.
  * @exp_name:	[in]	name of the exporting module - useful for debugging.
+ * @resv:	[in]	reservation-object, NULL to allocate default one.
  *
  * Returns, on success, a newly created dma_buf object, which wraps the
  * supplied private data and operations for dma_buf_ops. On either missing
@@ -135,10 +283,17 @@
  *
  */
 struct dma_buf *dma_buf_export_named(void *priv, const struct dma_buf_ops *ops,
-				size_t size, int flags, const char *exp_name)
+				size_t size, int flags, const char *exp_name,
+				struct reservation_object *resv)
 {
 	struct dma_buf *dmabuf;
 	struct file *file;
+	size_t alloc_size = sizeof(struct dma_buf);
+	if (!resv)
+		alloc_size += sizeof(struct reservation_object);
+	else
+		/* prevent &dma_buf[1] == dma_buf->resv */
+		alloc_size += 1;
 
 	if (WARN_ON(!priv || !ops
 			  || !ops->map_dma_buf
@@ -150,7 +305,7 @@
 		return ERR_PTR(-EINVAL);
 	}
 
-	dmabuf = kzalloc(sizeof(struct dma_buf), GFP_KERNEL);
+	dmabuf = kzalloc(alloc_size, GFP_KERNEL);
 	if (dmabuf == NULL)
 		return ERR_PTR(-ENOMEM);
 
@@ -158,6 +313,15 @@
 	dmabuf->ops = ops;
 	dmabuf->size = size;
 	dmabuf->exp_name = exp_name;
+	init_waitqueue_head(&dmabuf->poll);
+	dmabuf->cb_excl.poll = dmabuf->cb_shared.poll = &dmabuf->poll;
+	dmabuf->cb_excl.active = dmabuf->cb_shared.active = 0;
+
+	if (!resv) {
+		resv = (struct reservation_object *)&dmabuf[1];
+		reservation_object_init(resv);
+	}
+	dmabuf->resv = resv;
 
 	file = anon_inode_getfile("dmabuf", &dma_buf_fops, dmabuf, flags);
 	if (IS_ERR(file)) {
diff --git a/drivers/dma-buf/fence.c b/drivers/dma-buf/fence.c
new file mode 100644
index 0000000..4222cb2
--- /dev/null
+++ b/drivers/dma-buf/fence.c
@@ -0,0 +1,431 @@
+/*
+ * Fence mechanism for dma-buf and to allow for asynchronous dma access
+ *
+ * Copyright (C) 2012 Canonical Ltd
+ * Copyright (C) 2012 Texas Instruments
+ *
+ * Authors:
+ * Rob Clark <robdclark@gmail.com>
+ * Maarten Lankhorst <maarten.lankhorst@canonical.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/slab.h>
+#include <linux/export.h>
+#include <linux/atomic.h>
+#include <linux/fence.h>
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/fence.h>
+
+EXPORT_TRACEPOINT_SYMBOL(fence_annotate_wait_on);
+EXPORT_TRACEPOINT_SYMBOL(fence_emit);
+
+/**
+ * fence context counter: each execution context should have its own
+ * fence context, this allows checking if fences belong to the same
+ * context or not. One device can have multiple separate contexts,
+ * and they're used if some engine can run independently of another.
+ */
+static atomic_t fence_context_counter = ATOMIC_INIT(0);
+
+/**
+ * fence_context_alloc - allocate an array of fence contexts
+ * @num:	[in]	amount of contexts to allocate
+ *
+ * This function will return the first index of the number of fences allocated.
+ * The fence context is used for setting fence->context to a unique number.
+ */
+unsigned fence_context_alloc(unsigned num)
+{
+	BUG_ON(!num);
+	return atomic_add_return(num, &fence_context_counter) - num;
+}
+EXPORT_SYMBOL(fence_context_alloc);
+
+/**
+ * fence_signal_locked - signal completion of a fence
+ * @fence: the fence to signal
+ *
+ * Signal completion for software callbacks on a fence, this will unblock
+ * fence_wait() calls and run all the callbacks added with
+ * fence_add_callback(). Can be called multiple times, but since a fence
+ * can only go from unsignaled to signaled state, it will only be effective
+ * the first time.
+ *
+ * Unlike fence_signal, this function must be called with fence->lock held.
+ */
+int fence_signal_locked(struct fence *fence)
+{
+	struct fence_cb *cur, *tmp;
+	int ret = 0;
+
+	if (WARN_ON(!fence))
+		return -EINVAL;
+
+	if (!ktime_to_ns(fence->timestamp)) {
+		fence->timestamp = ktime_get();
+		smp_mb__before_atomic();
+	}
+
+	if (test_and_set_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
+		ret = -EINVAL;
+
+		/*
+		 * we might have raced with the unlocked fence_signal,
+		 * still run through all callbacks
+		 */
+	} else
+		trace_fence_signaled(fence);
+
+	list_for_each_entry_safe(cur, tmp, &fence->cb_list, node) {
+		list_del_init(&cur->node);
+		cur->func(fence, cur);
+	}
+	return ret;
+}
+EXPORT_SYMBOL(fence_signal_locked);
+
+/**
+ * fence_signal - signal completion of a fence
+ * @fence: the fence to signal
+ *
+ * Signal completion for software callbacks on a fence, this will unblock
+ * fence_wait() calls and run all the callbacks added with
+ * fence_add_callback(). Can be called multiple times, but since a fence
+ * can only go from unsignaled to signaled state, it will only be effective
+ * the first time.
+ */
+int fence_signal(struct fence *fence)
+{
+	unsigned long flags;
+
+	if (!fence)
+		return -EINVAL;
+
+	if (!ktime_to_ns(fence->timestamp)) {
+		fence->timestamp = ktime_get();
+		smp_mb__before_atomic();
+	}
+
+	if (test_and_set_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags))
+		return -EINVAL;
+
+	trace_fence_signaled(fence);
+
+	if (test_bit(FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags)) {
+		struct fence_cb *cur, *tmp;
+
+		spin_lock_irqsave(fence->lock, flags);
+		list_for_each_entry_safe(cur, tmp, &fence->cb_list, node) {
+			list_del_init(&cur->node);
+			cur->func(fence, cur);
+		}
+		spin_unlock_irqrestore(fence->lock, flags);
+	}
+	return 0;
+}
+EXPORT_SYMBOL(fence_signal);
+
+/**
+ * fence_wait_timeout - sleep until the fence gets signaled
+ * or until timeout elapses
+ * @fence:	[in]	the fence to wait on
+ * @intr:	[in]	if true, do an interruptible wait
+ * @timeout:	[in]	timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT
+ *
+ * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or the
+ * remaining timeout in jiffies on success. Other error values may be
+ * returned on custom implementations.
+ *
+ * Performs a synchronous wait on this fence. It is assumed the caller
+ * directly or indirectly (buf-mgr between reservation and committing)
+ * holds a reference to the fence, otherwise the fence might be
+ * freed before return, resulting in undefined behavior.
+ */
+signed long
+fence_wait_timeout(struct fence *fence, bool intr, signed long timeout)
+{
+	signed long ret;
+
+	if (WARN_ON(timeout < 0))
+		return -EINVAL;
+
+	trace_fence_wait_start(fence);
+	ret = fence->ops->wait(fence, intr, timeout);
+	trace_fence_wait_end(fence);
+	return ret;
+}
+EXPORT_SYMBOL(fence_wait_timeout);
+
+void fence_release(struct kref *kref)
+{
+	struct fence *fence =
+			container_of(kref, struct fence, refcount);
+
+	trace_fence_destroy(fence);
+
+	BUG_ON(!list_empty(&fence->cb_list));
+
+	if (fence->ops->release)
+		fence->ops->release(fence);
+	else
+		fence_free(fence);
+}
+EXPORT_SYMBOL(fence_release);
+
+void fence_free(struct fence *fence)
+{
+	kfree_rcu(fence, rcu);
+}
+EXPORT_SYMBOL(fence_free);
+
+/**
+ * fence_enable_sw_signaling - enable signaling on fence
+ * @fence:	[in]	the fence to enable
+ *
+ * this will request for sw signaling to be enabled, to make the fence
+ * complete as soon as possible
+ */
+void fence_enable_sw_signaling(struct fence *fence)
+{
+	unsigned long flags;
+
+	if (!test_and_set_bit(FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags) &&
+	    !test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
+		trace_fence_enable_signal(fence);
+
+		spin_lock_irqsave(fence->lock, flags);
+
+		if (!fence->ops->enable_signaling(fence))
+			fence_signal_locked(fence);
+
+		spin_unlock_irqrestore(fence->lock, flags);
+	}
+}
+EXPORT_SYMBOL(fence_enable_sw_signaling);
+
+/**
+ * fence_add_callback - add a callback to be called when the fence
+ * is signaled
+ * @fence:	[in]	the fence to wait on
+ * @cb:		[in]	the callback to register
+ * @func:	[in]	the function to call
+ *
+ * cb will be initialized by fence_add_callback, no initialization
+ * by the caller is required. Any number of callbacks can be registered
+ * to a fence, but a callback can only be registered to one fence at a time.
+ *
+ * Note that the callback can be called from an atomic context.  If
+ * fence is already signaled, this function will return -ENOENT (and
+ * *not* call the callback)
+ *
+ * Add a software callback to the fence. Same restrictions apply to
+ * refcount as it does to fence_wait, however the caller doesn't need to
+ * keep a refcount to fence afterwards: when software access is enabled,
+ * the creator of the fence is required to keep the fence alive until
+ * after it signals with fence_signal. The callback itself can be called
+ * from irq context.
+ *
+ */
+int fence_add_callback(struct fence *fence, struct fence_cb *cb,
+		       fence_func_t func)
+{
+	unsigned long flags;
+	int ret = 0;
+	bool was_set;
+
+	if (WARN_ON(!fence || !func))
+		return -EINVAL;
+
+	if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
+		INIT_LIST_HEAD(&cb->node);
+		return -ENOENT;
+	}
+
+	spin_lock_irqsave(fence->lock, flags);
+
+	was_set = test_and_set_bit(FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags);
+
+	if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags))
+		ret = -ENOENT;
+	else if (!was_set) {
+		trace_fence_enable_signal(fence);
+
+		if (!fence->ops->enable_signaling(fence)) {
+			fence_signal_locked(fence);
+			ret = -ENOENT;
+		}
+	}
+
+	if (!ret) {
+		cb->func = func;
+		list_add_tail(&cb->node, &fence->cb_list);
+	} else
+		INIT_LIST_HEAD(&cb->node);
+	spin_unlock_irqrestore(fence->lock, flags);
+
+	return ret;
+}
+EXPORT_SYMBOL(fence_add_callback);
+
+/**
+ * fence_remove_callback - remove a callback from the signaling list
+ * @fence:	[in]	the fence to wait on
+ * @cb:		[in]	the callback to remove
+ *
+ * Remove a previously queued callback from the fence. This function returns
+ * true if the callback is succesfully removed, or false if the fence has
+ * already been signaled.
+ *
+ * *WARNING*:
+ * Cancelling a callback should only be done if you really know what you're
+ * doing, since deadlocks and race conditions could occur all too easily. For
+ * this reason, it should only ever be done on hardware lockup recovery,
+ * with a reference held to the fence.
+ */
+bool
+fence_remove_callback(struct fence *fence, struct fence_cb *cb)
+{
+	unsigned long flags;
+	bool ret;
+
+	spin_lock_irqsave(fence->lock, flags);
+
+	ret = !list_empty(&cb->node);
+	if (ret)
+		list_del_init(&cb->node);
+
+	spin_unlock_irqrestore(fence->lock, flags);
+
+	return ret;
+}
+EXPORT_SYMBOL(fence_remove_callback);
+
+struct default_wait_cb {
+	struct fence_cb base;
+	struct task_struct *task;
+};
+
+static void
+fence_default_wait_cb(struct fence *fence, struct fence_cb *cb)
+{
+	struct default_wait_cb *wait =
+		container_of(cb, struct default_wait_cb, base);
+
+	wake_up_state(wait->task, TASK_NORMAL);
+}
+
+/**
+ * fence_default_wait - default sleep until the fence gets signaled
+ * or until timeout elapses
+ * @fence:	[in]	the fence to wait on
+ * @intr:	[in]	if true, do an interruptible wait
+ * @timeout:	[in]	timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT
+ *
+ * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or the
+ * remaining timeout in jiffies on success.
+ */
+signed long
+fence_default_wait(struct fence *fence, bool intr, signed long timeout)
+{
+	struct default_wait_cb cb;
+	unsigned long flags;
+	signed long ret = timeout;
+	bool was_set;
+
+	if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags))
+		return timeout;
+
+	spin_lock_irqsave(fence->lock, flags);
+
+	if (intr && signal_pending(current)) {
+		ret = -ERESTARTSYS;
+		goto out;
+	}
+
+	was_set = test_and_set_bit(FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags);
+
+	if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags))
+		goto out;
+
+	if (!was_set) {
+		trace_fence_enable_signal(fence);
+
+		if (!fence->ops->enable_signaling(fence)) {
+			fence_signal_locked(fence);
+			goto out;
+		}
+	}
+
+	cb.base.func = fence_default_wait_cb;
+	cb.task = current;
+	list_add(&cb.base.node, &fence->cb_list);
+
+	while (!test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags) && ret > 0) {
+		if (intr)
+			__set_current_state(TASK_INTERRUPTIBLE);
+		else
+			__set_current_state(TASK_UNINTERRUPTIBLE);
+		spin_unlock_irqrestore(fence->lock, flags);
+
+		ret = schedule_timeout(ret);
+
+		spin_lock_irqsave(fence->lock, flags);
+		if (ret > 0 && intr && signal_pending(current))
+			ret = -ERESTARTSYS;
+	}
+
+	if (!list_empty(&cb.base.node))
+		list_del(&cb.base.node);
+	__set_current_state(TASK_RUNNING);
+
+out:
+	spin_unlock_irqrestore(fence->lock, flags);
+	return ret;
+}
+EXPORT_SYMBOL(fence_default_wait);
+
+/**
+ * fence_init - Initialize a custom fence.
+ * @fence:	[in]	the fence to initialize
+ * @ops:	[in]	the fence_ops for operations on this fence
+ * @lock:	[in]	the irqsafe spinlock to use for locking this fence
+ * @context:	[in]	the execution context this fence is run on
+ * @seqno:	[in]	a linear increasing sequence number for this context
+ *
+ * Initializes an allocated fence, the caller doesn't have to keep its
+ * refcount after committing with this fence, but it will need to hold a
+ * refcount again if fence_ops.enable_signaling gets called. This can
+ * be used for other implementing other types of fence.
+ *
+ * context and seqno are used for easy comparison between fences, allowing
+ * to check which fence is later by simply using fence_later.
+ */
+void
+fence_init(struct fence *fence, const struct fence_ops *ops,
+	     spinlock_t *lock, unsigned context, unsigned seqno)
+{
+	BUG_ON(!lock);
+	BUG_ON(!ops || !ops->wait || !ops->enable_signaling ||
+	       !ops->get_driver_name || !ops->get_timeline_name);
+
+	kref_init(&fence->refcount);
+	fence->ops = ops;
+	INIT_LIST_HEAD(&fence->cb_list);
+	fence->lock = lock;
+	fence->context = context;
+	fence->seqno = seqno;
+	fence->flags = 0UL;
+
+	trace_fence_init(fence);
+}
+EXPORT_SYMBOL(fence_init);
diff --git a/drivers/dma-buf/reservation.c b/drivers/dma-buf/reservation.c
new file mode 100644
index 0000000..3c97c8f
--- /dev/null
+++ b/drivers/dma-buf/reservation.c
@@ -0,0 +1,477 @@
+/*
+ * Copyright (C) 2012-2014 Canonical Ltd (Maarten Lankhorst)
+ *
+ * Based on bo.c which bears the following copyright notice,
+ * but is dual licensed:
+ *
+ * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
+ */
+
+#include <linux/reservation.h>
+#include <linux/export.h>
+
+DEFINE_WW_CLASS(reservation_ww_class);
+EXPORT_SYMBOL(reservation_ww_class);
+
+struct lock_class_key reservation_seqcount_class;
+EXPORT_SYMBOL(reservation_seqcount_class);
+
+const char reservation_seqcount_string[] = "reservation_seqcount";
+EXPORT_SYMBOL(reservation_seqcount_string);
+/*
+ * Reserve space to add a shared fence to a reservation_object,
+ * must be called with obj->lock held.
+ */
+int reservation_object_reserve_shared(struct reservation_object *obj)
+{
+	struct reservation_object_list *fobj, *old;
+	u32 max;
+
+	old = reservation_object_get_list(obj);
+
+	if (old && old->shared_max) {
+		if (old->shared_count < old->shared_max) {
+			/* perform an in-place update */
+			kfree(obj->staged);
+			obj->staged = NULL;
+			return 0;
+		} else
+			max = old->shared_max * 2;
+	} else
+		max = 4;
+
+	/*
+	 * resize obj->staged or allocate if it doesn't exist,
+	 * noop if already correct size
+	 */
+	fobj = krealloc(obj->staged, offsetof(typeof(*fobj), shared[max]),
+			GFP_KERNEL);
+	if (!fobj)
+		return -ENOMEM;
+
+	obj->staged = fobj;
+	fobj->shared_max = max;
+	return 0;
+}
+EXPORT_SYMBOL(reservation_object_reserve_shared);
+
+static void
+reservation_object_add_shared_inplace(struct reservation_object *obj,
+				      struct reservation_object_list *fobj,
+				      struct fence *fence)
+{
+	u32 i;
+
+	fence_get(fence);
+
+	preempt_disable();
+	write_seqcount_begin(&obj->seq);
+
+	for (i = 0; i < fobj->shared_count; ++i) {
+		struct fence *old_fence;
+
+		old_fence = rcu_dereference_protected(fobj->shared[i],
+						reservation_object_held(obj));
+
+		if (old_fence->context == fence->context) {
+			/* memory barrier is added by write_seqcount_begin */
+			RCU_INIT_POINTER(fobj->shared[i], fence);
+			write_seqcount_end(&obj->seq);
+			preempt_enable();
+
+			fence_put(old_fence);
+			return;
+		}
+	}
+
+	/*
+	 * memory barrier is added by write_seqcount_begin,
+	 * fobj->shared_count is protected by this lock too
+	 */
+	RCU_INIT_POINTER(fobj->shared[fobj->shared_count], fence);
+	fobj->shared_count++;
+
+	write_seqcount_end(&obj->seq);
+	preempt_enable();
+}
+
+static void
+reservation_object_add_shared_replace(struct reservation_object *obj,
+				      struct reservation_object_list *old,
+				      struct reservation_object_list *fobj,
+				      struct fence *fence)
+{
+	unsigned i;
+	struct fence *old_fence = NULL;
+
+	fence_get(fence);
+
+	if (!old) {
+		RCU_INIT_POINTER(fobj->shared[0], fence);
+		fobj->shared_count = 1;
+		goto done;
+	}
+
+	/*
+	 * no need to bump fence refcounts, rcu_read access
+	 * requires the use of kref_get_unless_zero, and the
+	 * references from the old struct are carried over to
+	 * the new.
+	 */
+	fobj->shared_count = old->shared_count;
+
+	for (i = 0; i < old->shared_count; ++i) {
+		struct fence *check;
+
+		check = rcu_dereference_protected(old->shared[i],
+						reservation_object_held(obj));
+
+		if (!old_fence && check->context == fence->context) {
+			old_fence = check;
+			RCU_INIT_POINTER(fobj->shared[i], fence);
+		} else
+			RCU_INIT_POINTER(fobj->shared[i], check);
+	}
+	if (!old_fence) {
+		RCU_INIT_POINTER(fobj->shared[fobj->shared_count], fence);
+		fobj->shared_count++;
+	}
+
+done:
+	preempt_disable();
+	write_seqcount_begin(&obj->seq);
+	/*
+	 * RCU_INIT_POINTER can be used here,
+	 * seqcount provides the necessary barriers
+	 */
+	RCU_INIT_POINTER(obj->fence, fobj);
+	write_seqcount_end(&obj->seq);
+	preempt_enable();
+
+	if (old)
+		kfree_rcu(old, rcu);
+
+	if (old_fence)
+		fence_put(old_fence);
+}
+
+/*
+ * Add a fence to a shared slot, obj->lock must be held, and
+ * reservation_object_reserve_shared_fence has been called.
+ */
+void reservation_object_add_shared_fence(struct reservation_object *obj,
+					 struct fence *fence)
+{
+	struct reservation_object_list *old, *fobj = obj->staged;
+
+	old = reservation_object_get_list(obj);
+	obj->staged = NULL;
+
+	if (!fobj) {
+		BUG_ON(old->shared_count >= old->shared_max);
+		reservation_object_add_shared_inplace(obj, old, fence);
+	} else
+		reservation_object_add_shared_replace(obj, old, fobj, fence);
+}
+EXPORT_SYMBOL(reservation_object_add_shared_fence);
+
+void reservation_object_add_excl_fence(struct reservation_object *obj,
+				       struct fence *fence)
+{
+	struct fence *old_fence = reservation_object_get_excl(obj);
+	struct reservation_object_list *old;
+	u32 i = 0;
+
+	old = reservation_object_get_list(obj);
+	if (old)
+		i = old->shared_count;
+
+	if (fence)
+		fence_get(fence);
+
+	preempt_disable();
+	write_seqcount_begin(&obj->seq);
+	/* write_seqcount_begin provides the necessary memory barrier */
+	RCU_INIT_POINTER(obj->fence_excl, fence);
+	if (old)
+		old->shared_count = 0;
+	write_seqcount_end(&obj->seq);
+	preempt_enable();
+
+	/* inplace update, no shared fences */
+	while (i--)
+		fence_put(rcu_dereference_protected(old->shared[i],
+						reservation_object_held(obj)));
+
+	if (old_fence)
+		fence_put(old_fence);
+}
+EXPORT_SYMBOL(reservation_object_add_excl_fence);
+
+int reservation_object_get_fences_rcu(struct reservation_object *obj,
+				      struct fence **pfence_excl,
+				      unsigned *pshared_count,
+				      struct fence ***pshared)
+{
+	unsigned shared_count = 0;
+	unsigned retry = 1;
+	struct fence **shared = NULL, *fence_excl = NULL;
+	int ret = 0;
+
+	while (retry) {
+		struct reservation_object_list *fobj;
+		unsigned seq;
+
+		seq = read_seqcount_begin(&obj->seq);
+
+		rcu_read_lock();
+
+		fobj = rcu_dereference(obj->fence);
+		if (fobj) {
+			struct fence **nshared;
+			size_t sz = sizeof(*shared) * fobj->shared_max;
+
+			nshared = krealloc(shared, sz,
+					   GFP_NOWAIT | __GFP_NOWARN);
+			if (!nshared) {
+				rcu_read_unlock();
+				nshared = krealloc(shared, sz, GFP_KERNEL);
+				if (nshared) {
+					shared = nshared;
+					continue;
+				}
+
+				ret = -ENOMEM;
+				shared_count = 0;
+				break;
+			}
+			shared = nshared;
+			memcpy(shared, fobj->shared, sz);
+			shared_count = fobj->shared_count;
+		} else
+			shared_count = 0;
+		fence_excl = rcu_dereference(obj->fence_excl);
+
+		retry = read_seqcount_retry(&obj->seq, seq);
+		if (retry)
+			goto unlock;
+
+		if (!fence_excl || fence_get_rcu(fence_excl)) {
+			unsigned i;
+
+			for (i = 0; i < shared_count; ++i) {
+				if (fence_get_rcu(shared[i]))
+					continue;
+
+				/* uh oh, refcount failed, abort and retry */
+				while (i--)
+					fence_put(shared[i]);
+
+				if (fence_excl) {
+					fence_put(fence_excl);
+					fence_excl = NULL;
+				}
+
+				retry = 1;
+				break;
+			}
+		} else
+			retry = 1;
+
+unlock:
+		rcu_read_unlock();
+	}
+	*pshared_count = shared_count;
+	if (shared_count)
+		*pshared = shared;
+	else {
+		*pshared = NULL;
+		kfree(shared);
+	}
+	*pfence_excl = fence_excl;
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(reservation_object_get_fences_rcu);
+
+long reservation_object_wait_timeout_rcu(struct reservation_object *obj,
+					 bool wait_all, bool intr,
+					 unsigned long timeout)
+{
+	struct fence *fence;
+	unsigned seq, shared_count, i = 0;
+	long ret = timeout;
+
+retry:
+	fence = NULL;
+	shared_count = 0;
+	seq = read_seqcount_begin(&obj->seq);
+	rcu_read_lock();
+
+	if (wait_all) {
+		struct reservation_object_list *fobj = rcu_dereference(obj->fence);
+
+		if (fobj)
+			shared_count = fobj->shared_count;
+
+		if (read_seqcount_retry(&obj->seq, seq))
+			goto unlock_retry;
+
+		for (i = 0; i < shared_count; ++i) {
+			struct fence *lfence = rcu_dereference(fobj->shared[i]);
+
+			if (test_bit(FENCE_FLAG_SIGNALED_BIT, &lfence->flags))
+				continue;
+
+			if (!fence_get_rcu(lfence))
+				goto unlock_retry;
+
+			if (fence_is_signaled(lfence)) {
+				fence_put(lfence);
+				continue;
+			}
+
+			fence = lfence;
+			break;
+		}
+	}
+
+	if (!shared_count) {
+		struct fence *fence_excl = rcu_dereference(obj->fence_excl);
+
+		if (read_seqcount_retry(&obj->seq, seq))
+			goto unlock_retry;
+
+		if (fence_excl &&
+		    !test_bit(FENCE_FLAG_SIGNALED_BIT, &fence_excl->flags)) {
+			if (!fence_get_rcu(fence_excl))
+				goto unlock_retry;
+
+			if (fence_is_signaled(fence_excl))
+				fence_put(fence_excl);
+			else
+				fence = fence_excl;
+		}
+	}
+
+	rcu_read_unlock();
+	if (fence) {
+		ret = fence_wait_timeout(fence, intr, ret);
+		fence_put(fence);
+		if (ret > 0 && wait_all && (i + 1 < shared_count))
+			goto retry;
+	}
+	return ret;
+
+unlock_retry:
+	rcu_read_unlock();
+	goto retry;
+}
+EXPORT_SYMBOL_GPL(reservation_object_wait_timeout_rcu);
+
+
+static inline int
+reservation_object_test_signaled_single(struct fence *passed_fence)
+{
+	struct fence *fence, *lfence = passed_fence;
+	int ret = 1;
+
+	if (!test_bit(FENCE_FLAG_SIGNALED_BIT, &lfence->flags)) {
+		int ret;
+
+		fence = fence_get_rcu(lfence);
+		if (!fence)
+			return -1;
+
+		ret = !!fence_is_signaled(fence);
+		fence_put(fence);
+	}
+	return ret;
+}
+
+bool reservation_object_test_signaled_rcu(struct reservation_object *obj,
+					  bool test_all)
+{
+	unsigned seq, shared_count;
+	int ret = true;
+
+retry:
+	shared_count = 0;
+	seq = read_seqcount_begin(&obj->seq);
+	rcu_read_lock();
+
+	if (test_all) {
+		unsigned i;
+
+		struct reservation_object_list *fobj = rcu_dereference(obj->fence);
+
+		if (fobj)
+			shared_count = fobj->shared_count;
+
+		if (read_seqcount_retry(&obj->seq, seq))
+			goto unlock_retry;
+
+		for (i = 0; i < shared_count; ++i) {
+			struct fence *fence = rcu_dereference(fobj->shared[i]);
+
+			ret = reservation_object_test_signaled_single(fence);
+			if (ret < 0)
+				goto unlock_retry;
+			else if (!ret)
+				break;
+		}
+
+		/*
+		 * There could be a read_seqcount_retry here, but nothing cares
+		 * about whether it's the old or newer fence pointers that are
+		 * signaled. That race could still have happened after checking
+		 * read_seqcount_retry. If you care, use ww_mutex_lock.
+		 */
+	}
+
+	if (!shared_count) {
+		struct fence *fence_excl = rcu_dereference(obj->fence_excl);
+
+		if (read_seqcount_retry(&obj->seq, seq))
+			goto unlock_retry;
+
+		if (fence_excl) {
+			ret = reservation_object_test_signaled_single(fence_excl);
+			if (ret < 0)
+				goto unlock_retry;
+		}
+	}
+
+	rcu_read_unlock();
+	return ret;
+
+unlock_retry:
+	rcu_read_unlock();
+	goto retry;
+}
+EXPORT_SYMBOL_GPL(reservation_object_test_signaled_rcu);
diff --git a/drivers/dma-buf/seqno-fence.c b/drivers/dma-buf/seqno-fence.c
new file mode 100644
index 0000000..7d12a39
--- /dev/null
+++ b/drivers/dma-buf/seqno-fence.c
@@ -0,0 +1,73 @@
+/*
+ * seqno-fence, using a dma-buf to synchronize fencing
+ *
+ * Copyright (C) 2012 Texas Instruments
+ * Copyright (C) 2012-2014 Canonical Ltd
+ * Authors:
+ *   Rob Clark <robdclark@gmail.com>
+ *   Maarten Lankhorst <maarten.lankhorst@canonical.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/slab.h>
+#include <linux/export.h>
+#include <linux/seqno-fence.h>
+
+static const char *seqno_fence_get_driver_name(struct fence *fence)
+{
+	struct seqno_fence *seqno_fence = to_seqno_fence(fence);
+	return seqno_fence->ops->get_driver_name(fence);
+}
+
+static const char *seqno_fence_get_timeline_name(struct fence *fence)
+{
+	struct seqno_fence *seqno_fence = to_seqno_fence(fence);
+	return seqno_fence->ops->get_timeline_name(fence);
+}
+
+static bool seqno_enable_signaling(struct fence *fence)
+{
+	struct seqno_fence *seqno_fence = to_seqno_fence(fence);
+	return seqno_fence->ops->enable_signaling(fence);
+}
+
+static bool seqno_signaled(struct fence *fence)
+{
+	struct seqno_fence *seqno_fence = to_seqno_fence(fence);
+	return seqno_fence->ops->signaled && seqno_fence->ops->signaled(fence);
+}
+
+static void seqno_release(struct fence *fence)
+{
+	struct seqno_fence *f = to_seqno_fence(fence);
+
+	dma_buf_put(f->sync_buf);
+	if (f->ops->release)
+		f->ops->release(fence);
+	else
+		fence_free(&f->base);
+}
+
+static signed long seqno_wait(struct fence *fence, bool intr, signed long timeout)
+{
+	struct seqno_fence *f = to_seqno_fence(fence);
+	return f->ops->wait(fence, intr, timeout);
+}
+
+const struct fence_ops seqno_fence_ops = {
+	.get_driver_name = seqno_fence_get_driver_name,
+	.get_timeline_name = seqno_fence_get_timeline_name,
+	.enable_signaling = seqno_enable_signaling,
+	.signaled = seqno_signaled,
+	.wait = seqno_wait,
+	.release = seqno_release,
+};
+EXPORT_SYMBOL(seqno_fence_ops);
diff --git a/drivers/gpu/drm/armada/armada_crtc.c b/drivers/gpu/drm/armada/armada_crtc.c
index 81c34f9..3aedf9e 100644
--- a/drivers/gpu/drm/armada/armada_crtc.c
+++ b/drivers/gpu/drm/armada/armada_crtc.c
@@ -1039,11 +1039,9 @@
 	if (ret)
 		return ret;
 
-	base = devm_request_and_ioremap(dev->dev, res);
-	if (!base) {
-		DRM_ERROR("failed to ioremap register\n");
-		return -ENOMEM;
-	}
+	base = devm_ioremap_resource(dev->dev, res);
+	if (IS_ERR(base))
+		return PTR_ERR(base);
 
 	dcrtc = kzalloc(sizeof(*dcrtc), GFP_KERNEL);
 	if (!dcrtc) {
diff --git a/drivers/gpu/drm/armada/armada_gem.c b/drivers/gpu/drm/armada/armada_gem.c
index bb9b642..7496f55 100644
--- a/drivers/gpu/drm/armada/armada_gem.c
+++ b/drivers/gpu/drm/armada/armada_gem.c
@@ -539,7 +539,7 @@
 	int flags)
 {
 	return dma_buf_export(obj, &armada_gem_prime_dmabuf_ops, obj->size,
-			      O_RDWR);
+			      O_RDWR, NULL);
 }
 
 struct drm_gem_object *
diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
index 304ca8c..99d578b 100644
--- a/drivers/gpu/drm/drm_prime.c
+++ b/drivers/gpu/drm/drm_prime.c
@@ -336,7 +336,13 @@
 struct dma_buf *drm_gem_prime_export(struct drm_device *dev,
 				     struct drm_gem_object *obj, int flags)
 {
-	return dma_buf_export(obj, &drm_gem_prime_dmabuf_ops, obj->size, flags);
+	struct reservation_object *robj = NULL;
+
+	if (dev->driver->gem_prime_res_obj)
+		robj = dev->driver->gem_prime_res_obj(obj);
+
+	return dma_buf_export(obj, &drm_gem_prime_dmabuf_ops, obj->size,
+			      flags, robj);
 }
 EXPORT_SYMBOL(drm_gem_prime_export);
 
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c b/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
index 2a3ad24..60192ed 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
@@ -187,7 +187,7 @@
 	struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
 
 	return dma_buf_export(obj, &exynos_dmabuf_ops,
-				exynos_gem_obj->base.size, flags);
+				exynos_gem_obj->base.size, flags, NULL);
 }
 
 struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev,
diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
index 580aa42..82a1f4b 100644
--- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
@@ -237,7 +237,8 @@
 			return ERR_PTR(ret);
 	}
 
-	return dma_buf_export(gem_obj, &i915_dmabuf_ops, gem_obj->size, flags);
+	return dma_buf_export(gem_obj, &i915_dmabuf_ops, gem_obj->size, flags,
+			      NULL);
 }
 
 static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj)
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 5425ffe..c9428c9 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -845,6 +845,7 @@
 	.gem_prime_export = drm_gem_prime_export,
 	.gem_prime_import = drm_gem_prime_import,
 	.gem_prime_pin = nouveau_gem_prime_pin,
+	.gem_prime_res_obj = nouveau_gem_prime_res_obj,
 	.gem_prime_unpin = nouveau_gem_prime_unpin,
 	.gem_prime_get_sg_table = nouveau_gem_prime_get_sg_table,
 	.gem_prime_import_sg_table = nouveau_gem_prime_import_sg_table,
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.h b/drivers/gpu/drm/nouveau/nouveau_gem.h
index 7caca05..ddab762 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.h
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.h
@@ -35,6 +35,7 @@
 				  struct drm_file *);
 
 extern int nouveau_gem_prime_pin(struct drm_gem_object *);
+struct reservation_object *nouveau_gem_prime_res_obj(struct drm_gem_object *);
 extern void nouveau_gem_prime_unpin(struct drm_gem_object *);
 extern struct sg_table *nouveau_gem_prime_get_sg_table(struct drm_gem_object *);
 extern struct drm_gem_object *nouveau_gem_prime_import_sg_table(
diff --git a/drivers/gpu/drm/nouveau/nouveau_prime.c b/drivers/gpu/drm/nouveau/nouveau_prime.c
index 51a2cb1..1f51008 100644
--- a/drivers/gpu/drm/nouveau/nouveau_prime.c
+++ b/drivers/gpu/drm/nouveau/nouveau_prime.c
@@ -102,3 +102,10 @@
 
 	nouveau_bo_unpin(nvbo);
 }
+
+struct reservation_object *nouveau_gem_prime_res_obj(struct drm_gem_object *obj)
+{
+	struct nouveau_bo *nvbo = nouveau_gem_object(obj);
+
+	return nvbo->bo.resv;
+}
diff --git a/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c b/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
index 4fcca8d..a2dbfb1 100644
--- a/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
+++ b/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
@@ -171,7 +171,7 @@
 struct dma_buf *omap_gem_prime_export(struct drm_device *dev,
 		struct drm_gem_object *obj, int flags)
 {
-	return dma_buf_export(obj, &omap_dmabuf_ops, obj->size, flags);
+	return dma_buf_export(obj, &omap_dmabuf_ops, obj->size, flags, NULL);
 }
 
 struct drm_gem_object *omap_gem_prime_import(struct drm_device *dev,
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index cb14213..5fa62a1 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -132,6 +132,7 @@
 							struct sg_table *sg);
 int radeon_gem_prime_pin(struct drm_gem_object *obj);
 void radeon_gem_prime_unpin(struct drm_gem_object *obj);
+struct reservation_object *radeon_gem_prime_res_obj(struct drm_gem_object *);
 void *radeon_gem_prime_vmap(struct drm_gem_object *obj);
 void radeon_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
 extern long radeon_kms_compat_ioctl(struct file *filp, unsigned int cmd,
@@ -566,6 +567,7 @@
 	.gem_prime_import = drm_gem_prime_import,
 	.gem_prime_pin = radeon_gem_prime_pin,
 	.gem_prime_unpin = radeon_gem_prime_unpin,
+	.gem_prime_res_obj = radeon_gem_prime_res_obj,
 	.gem_prime_get_sg_table = radeon_gem_prime_get_sg_table,
 	.gem_prime_import_sg_table = radeon_gem_prime_import_sg_table,
 	.gem_prime_vmap = radeon_gem_prime_vmap,
diff --git a/drivers/gpu/drm/radeon/radeon_prime.c b/drivers/gpu/drm/radeon/radeon_prime.c
index 2007456..28d7107 100644
--- a/drivers/gpu/drm/radeon/radeon_prime.c
+++ b/drivers/gpu/drm/radeon/radeon_prime.c
@@ -103,3 +103,11 @@
 	radeon_bo_unpin(bo);
 	radeon_bo_unreserve(bo);
 }
+
+
+struct reservation_object *radeon_gem_prime_res_obj(struct drm_gem_object *obj)
+{
+	struct radeon_bo *bo = gem_to_radeon_bo(obj);
+
+	return bo->tbo.resv;
+}
diff --git a/drivers/gpu/drm/tegra/gem.c b/drivers/gpu/drm/tegra/gem.c
index aa85b7b..78cc814 100644
--- a/drivers/gpu/drm/tegra/gem.c
+++ b/drivers/gpu/drm/tegra/gem.c
@@ -420,7 +420,7 @@
 				       int flags)
 {
 	return dma_buf_export(gem, &tegra_gem_prime_dmabuf_ops, gem->size,
-			      flags);
+			      flags, NULL);
 }
 
 struct drm_gem_object *tegra_gem_prime_import(struct drm_device *drm,
diff --git a/drivers/gpu/drm/ttm/ttm_object.c b/drivers/gpu/drm/ttm/ttm_object.c
index d2a0533..12c8711 100644
--- a/drivers/gpu/drm/ttm/ttm_object.c
+++ b/drivers/gpu/drm/ttm/ttm_object.c
@@ -695,7 +695,7 @@
 		}
 
 		dma_buf = dma_buf_export(prime, &tdev->ops,
-					 prime->size, flags);
+					 prime->size, flags, NULL);
 		if (IS_ERR(dma_buf)) {
 			ret = PTR_ERR(dma_buf);
 			ttm_mem_global_free(tdev->mem_glob,
diff --git a/drivers/media/v4l2-core/videobuf2-dma-contig.c b/drivers/media/v4l2-core/videobuf2-dma-contig.c
index 880be07..c4e4dfa 100644
--- a/drivers/media/v4l2-core/videobuf2-dma-contig.c
+++ b/drivers/media/v4l2-core/videobuf2-dma-contig.c
@@ -404,7 +404,7 @@
 	if (WARN_ON(!buf->sgt_base))
 		return NULL;
 
-	dbuf = dma_buf_export(buf, &vb2_dc_dmabuf_ops, buf->size, flags);
+	dbuf = dma_buf_export(buf, &vb2_dc_dmabuf_ops, buf->size, flags, NULL);
 	if (IS_ERR(dbuf))
 		return NULL;
 
diff --git a/drivers/staging/android/Kconfig b/drivers/staging/android/Kconfig
index 99e484f..51607e9 100644
--- a/drivers/staging/android/Kconfig
+++ b/drivers/staging/android/Kconfig
@@ -88,6 +88,7 @@
 	bool "Synchronization framework"
 	default n
 	select ANON_INODES
+	select DMA_SHARED_BUFFER
 	---help---
 	  This option enables the framework for synchronization between multiple
 	  drivers.  Sync implementations can take advantage of hardware
diff --git a/drivers/staging/android/Makefile b/drivers/staging/android/Makefile
index 0a01e191..517ad5f 100644
--- a/drivers/staging/android/Makefile
+++ b/drivers/staging/android/Makefile
@@ -9,5 +9,5 @@
 obj-$(CONFIG_ANDROID_TIMED_GPIO)	+= timed_gpio.o
 obj-$(CONFIG_ANDROID_LOW_MEMORY_KILLER)	+= lowmemorykiller.o
 obj-$(CONFIG_ANDROID_INTF_ALARM_DEV)	+= alarm-dev.o
-obj-$(CONFIG_SYNC)			+= sync.o
+obj-$(CONFIG_SYNC)			+= sync.o sync_debug.o
 obj-$(CONFIG_SW_SYNC)			+= sw_sync.o
diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c
index 389b8f6..2703609 100644
--- a/drivers/staging/android/ion/ion.c
+++ b/drivers/staging/android/ion/ion.c
@@ -1120,7 +1120,8 @@
 	ion_buffer_get(buffer);
 	mutex_unlock(&client->lock);
 
-	dmabuf = dma_buf_export(buffer, &dma_buf_ops, buffer->size, O_RDWR);
+	dmabuf = dma_buf_export(buffer, &dma_buf_ops, buffer->size, O_RDWR,
+				NULL);
 	if (IS_ERR(dmabuf)) {
 		ion_buffer_put(buffer);
 		return dmabuf;
diff --git a/drivers/staging/android/sw_sync.c b/drivers/staging/android/sw_sync.c
index 12a136e..a76db3f 100644
--- a/drivers/staging/android/sw_sync.c
+++ b/drivers/staging/android/sw_sync.c
@@ -50,7 +50,7 @@
 {
 	struct sw_sync_pt *pt = (struct sw_sync_pt *) sync_pt;
 	struct sw_sync_timeline *obj =
-		(struct sw_sync_timeline *)sync_pt->parent;
+		(struct sw_sync_timeline *)sync_pt_parent(sync_pt);
 
 	return (struct sync_pt *) sw_sync_pt_create(obj, pt->value);
 }
@@ -59,7 +59,7 @@
 {
 	struct sw_sync_pt *pt = (struct sw_sync_pt *)sync_pt;
 	struct sw_sync_timeline *obj =
-		(struct sw_sync_timeline *)sync_pt->parent;
+		(struct sw_sync_timeline *)sync_pt_parent(sync_pt);
 
 	return sw_sync_cmp(obj->value, pt->value) >= 0;
 }
@@ -97,7 +97,6 @@
 				       char *str, int size)
 {
 	struct sw_sync_pt *pt = (struct sw_sync_pt *)sync_pt;
-
 	snprintf(str, size, "%d", pt->value);
 }
 
@@ -157,7 +156,6 @@
 static int sw_sync_release(struct inode *inode, struct file *file)
 {
 	struct sw_sync_timeline *obj = file->private_data;
-
 	sync_timeline_destroy(&obj->obj);
 	return 0;
 }
diff --git a/drivers/staging/android/sync.c b/drivers/staging/android/sync.c
index 18174f7..c9a0c2c 100644
--- a/drivers/staging/android/sync.c
+++ b/drivers/staging/android/sync.c
@@ -31,22 +31,13 @@
 #define CREATE_TRACE_POINTS
 #include "trace/sync.h"
 
-static void sync_fence_signal_pt(struct sync_pt *pt);
-static int _sync_pt_has_signaled(struct sync_pt *pt);
-static void sync_fence_free(struct kref *kref);
-static void sync_dump(void);
-
-static LIST_HEAD(sync_timeline_list_head);
-static DEFINE_SPINLOCK(sync_timeline_list_lock);
-
-static LIST_HEAD(sync_fence_list_head);
-static DEFINE_SPINLOCK(sync_fence_list_lock);
+static const struct fence_ops android_fence_ops;
+static const struct file_operations sync_fence_fops;
 
 struct sync_timeline *sync_timeline_create(const struct sync_timeline_ops *ops,
 					   int size, const char *name)
 {
 	struct sync_timeline *obj;
-	unsigned long flags;
 
 	if (size < sizeof(struct sync_timeline))
 		return NULL;
@@ -57,17 +48,14 @@
 
 	kref_init(&obj->kref);
 	obj->ops = ops;
+	obj->context = fence_context_alloc(1);
 	strlcpy(obj->name, name, sizeof(obj->name));
 
 	INIT_LIST_HEAD(&obj->child_list_head);
+	INIT_LIST_HEAD(&obj->active_list_head);
 	spin_lock_init(&obj->child_list_lock);
 
-	INIT_LIST_HEAD(&obj->active_list_head);
-	spin_lock_init(&obj->active_list_lock);
-
-	spin_lock_irqsave(&sync_timeline_list_lock, flags);
-	list_add_tail(&obj->sync_timeline_list, &sync_timeline_list_head);
-	spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
+	sync_timeline_debug_add(obj);
 
 	return obj;
 }
@@ -77,11 +65,8 @@
 {
 	struct sync_timeline *obj =
 		container_of(kref, struct sync_timeline, kref);
-	unsigned long flags;
 
-	spin_lock_irqsave(&sync_timeline_list_lock, flags);
-	list_del(&obj->sync_timeline_list);
-	spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
+	sync_timeline_debug_remove(obj);
 
 	if (obj->ops->release_obj)
 		obj->ops->release_obj(obj);
@@ -89,6 +74,16 @@
 	kfree(obj);
 }
 
+static void sync_timeline_get(struct sync_timeline *obj)
+{
+	kref_get(&obj->kref);
+}
+
+static void sync_timeline_put(struct sync_timeline *obj)
+{
+	kref_put(&obj->kref, sync_timeline_free);
+}
+
 void sync_timeline_destroy(struct sync_timeline *obj)
 {
 	obj->destroyed = true;
@@ -102,75 +97,33 @@
 	 * signal any children that their parent is going away.
 	 */
 	sync_timeline_signal(obj);
-
-	kref_put(&obj->kref, sync_timeline_free);
+	sync_timeline_put(obj);
 }
 EXPORT_SYMBOL(sync_timeline_destroy);
 
-static void sync_timeline_add_pt(struct sync_timeline *obj, struct sync_pt *pt)
-{
-	unsigned long flags;
-
-	pt->parent = obj;
-
-	spin_lock_irqsave(&obj->child_list_lock, flags);
-	list_add_tail(&pt->child_list, &obj->child_list_head);
-	spin_unlock_irqrestore(&obj->child_list_lock, flags);
-}
-
-static void sync_timeline_remove_pt(struct sync_pt *pt)
-{
-	struct sync_timeline *obj = pt->parent;
-	unsigned long flags;
-
-	spin_lock_irqsave(&obj->active_list_lock, flags);
-	if (!list_empty(&pt->active_list))
-		list_del_init(&pt->active_list);
-	spin_unlock_irqrestore(&obj->active_list_lock, flags);
-
-	spin_lock_irqsave(&obj->child_list_lock, flags);
-	if (!list_empty(&pt->child_list))
-		list_del_init(&pt->child_list);
-
-	spin_unlock_irqrestore(&obj->child_list_lock, flags);
-}
-
 void sync_timeline_signal(struct sync_timeline *obj)
 {
 	unsigned long flags;
 	LIST_HEAD(signaled_pts);
-	struct list_head *pos, *n;
+	struct sync_pt *pt, *next;
 
 	trace_sync_timeline(obj);
 
-	spin_lock_irqsave(&obj->active_list_lock, flags);
+	spin_lock_irqsave(&obj->child_list_lock, flags);
 
-	list_for_each_safe(pos, n, &obj->active_list_head) {
-		struct sync_pt *pt =
-			container_of(pos, struct sync_pt, active_list);
-
-		if (_sync_pt_has_signaled(pt)) {
-			list_del_init(pos);
-			list_add(&pt->signaled_list, &signaled_pts);
-			kref_get(&pt->fence->kref);
-		}
+	list_for_each_entry_safe(pt, next, &obj->active_list_head,
+				 active_list) {
+		if (fence_is_signaled_locked(&pt->base))
+			list_del(&pt->active_list);
 	}
 
-	spin_unlock_irqrestore(&obj->active_list_lock, flags);
-
-	list_for_each_safe(pos, n, &signaled_pts) {
-		struct sync_pt *pt =
-			container_of(pos, struct sync_pt, signaled_list);
-
-		list_del_init(pos);
-		sync_fence_signal_pt(pt);
-		kref_put(&pt->fence->kref, sync_fence_free);
-	}
+	spin_unlock_irqrestore(&obj->child_list_lock, flags);
 }
 EXPORT_SYMBOL(sync_timeline_signal);
 
-struct sync_pt *sync_pt_create(struct sync_timeline *parent, int size)
+struct sync_pt *sync_pt_create(struct sync_timeline *obj, int size)
 {
+	unsigned long flags;
 	struct sync_pt *pt;
 
 	if (size < sizeof(struct sync_pt))
@@ -180,87 +133,28 @@
 	if (pt == NULL)
 		return NULL;
 
+	spin_lock_irqsave(&obj->child_list_lock, flags);
+	sync_timeline_get(obj);
+	fence_init(&pt->base, &android_fence_ops, &obj->child_list_lock,
+		   obj->context, ++obj->value);
+	list_add_tail(&pt->child_list, &obj->child_list_head);
 	INIT_LIST_HEAD(&pt->active_list);
-	kref_get(&parent->kref);
-	sync_timeline_add_pt(parent, pt);
-
+	spin_unlock_irqrestore(&obj->child_list_lock, flags);
 	return pt;
 }
 EXPORT_SYMBOL(sync_pt_create);
 
 void sync_pt_free(struct sync_pt *pt)
 {
-	if (pt->parent->ops->free_pt)
-		pt->parent->ops->free_pt(pt);
-
-	sync_timeline_remove_pt(pt);
-
-	kref_put(&pt->parent->kref, sync_timeline_free);
-
-	kfree(pt);
+	fence_put(&pt->base);
 }
 EXPORT_SYMBOL(sync_pt_free);
 
-/* call with pt->parent->active_list_lock held */
-static int _sync_pt_has_signaled(struct sync_pt *pt)
-{
-	int old_status = pt->status;
-
-	if (!pt->status)
-		pt->status = pt->parent->ops->has_signaled(pt);
-
-	if (!pt->status && pt->parent->destroyed)
-		pt->status = -ENOENT;
-
-	if (pt->status != old_status)
-		pt->timestamp = ktime_get();
-
-	return pt->status;
-}
-
-static struct sync_pt *sync_pt_dup(struct sync_pt *pt)
-{
-	return pt->parent->ops->dup(pt);
-}
-
-/* Adds a sync pt to the active queue.  Called when added to a fence */
-static void sync_pt_activate(struct sync_pt *pt)
-{
-	struct sync_timeline *obj = pt->parent;
-	unsigned long flags;
-	int err;
-
-	spin_lock_irqsave(&obj->active_list_lock, flags);
-
-	err = _sync_pt_has_signaled(pt);
-	if (err != 0)
-		goto out;
-
-	list_add_tail(&pt->active_list, &obj->active_list_head);
-
-out:
-	spin_unlock_irqrestore(&obj->active_list_lock, flags);
-}
-
-static int sync_fence_release(struct inode *inode, struct file *file);
-static unsigned int sync_fence_poll(struct file *file, poll_table *wait);
-static long sync_fence_ioctl(struct file *file, unsigned int cmd,
-			     unsigned long arg);
-
-
-static const struct file_operations sync_fence_fops = {
-	.release = sync_fence_release,
-	.poll = sync_fence_poll,
-	.unlocked_ioctl = sync_fence_ioctl,
-	.compat_ioctl = sync_fence_ioctl,
-};
-
-static struct sync_fence *sync_fence_alloc(const char *name)
+static struct sync_fence *sync_fence_alloc(int size, const char *name)
 {
 	struct sync_fence *fence;
-	unsigned long flags;
 
-	fence = kzalloc(sizeof(struct sync_fence), GFP_KERNEL);
+	fence = kzalloc(size, GFP_KERNEL);
 	if (fence == NULL)
 		return NULL;
 
@@ -272,16 +166,8 @@
 	kref_init(&fence->kref);
 	strlcpy(fence->name, name, sizeof(fence->name));
 
-	INIT_LIST_HEAD(&fence->pt_list_head);
-	INIT_LIST_HEAD(&fence->waiter_list_head);
-	spin_lock_init(&fence->waiter_list_lock);
-
 	init_waitqueue_head(&fence->wq);
 
-	spin_lock_irqsave(&sync_fence_list_lock, flags);
-	list_add_tail(&fence->sync_fence_list, &sync_fence_list_head);
-	spin_unlock_irqrestore(&sync_fence_list_lock, flags);
-
 	return fence;
 
 err:
@@ -289,121 +175,43 @@
 	return NULL;
 }
 
+static void fence_check_cb_func(struct fence *f, struct fence_cb *cb)
+{
+	struct sync_fence_cb *check;
+	struct sync_fence *fence;
+
+	check = container_of(cb, struct sync_fence_cb, cb);
+	fence = check->fence;
+
+	if (atomic_dec_and_test(&fence->status))
+		wake_up_all(&fence->wq);
+}
+
 /* TODO: implement a create which takes more that one sync_pt */
 struct sync_fence *sync_fence_create(const char *name, struct sync_pt *pt)
 {
 	struct sync_fence *fence;
 
-	if (pt->fence)
-		return NULL;
-
-	fence = sync_fence_alloc(name);
+	fence = sync_fence_alloc(offsetof(struct sync_fence, cbs[1]), name);
 	if (fence == NULL)
 		return NULL;
 
-	pt->fence = fence;
-	list_add(&pt->pt_list, &fence->pt_list_head);
-	sync_pt_activate(pt);
+	fence->num_fences = 1;
+	atomic_set(&fence->status, 1);
 
-	/*
-	 * signal the fence in case pt was activated before
-	 * sync_pt_activate(pt) was called
-	 */
-	sync_fence_signal_pt(pt);
+	fence_get(&pt->base);
+	fence->cbs[0].sync_pt = &pt->base;
+	fence->cbs[0].fence = fence;
+	if (fence_add_callback(&pt->base, &fence->cbs[0].cb,
+			       fence_check_cb_func))
+		atomic_dec(&fence->status);
+
+	sync_fence_debug_add(fence);
 
 	return fence;
 }
 EXPORT_SYMBOL(sync_fence_create);
 
-static int sync_fence_copy_pts(struct sync_fence *dst, struct sync_fence *src)
-{
-	struct list_head *pos;
-
-	list_for_each(pos, &src->pt_list_head) {
-		struct sync_pt *orig_pt =
-			container_of(pos, struct sync_pt, pt_list);
-		struct sync_pt *new_pt = sync_pt_dup(orig_pt);
-
-		if (new_pt == NULL)
-			return -ENOMEM;
-
-		new_pt->fence = dst;
-		list_add(&new_pt->pt_list, &dst->pt_list_head);
-	}
-
-	return 0;
-}
-
-static int sync_fence_merge_pts(struct sync_fence *dst, struct sync_fence *src)
-{
-	struct list_head *src_pos, *dst_pos, *n;
-
-	list_for_each(src_pos, &src->pt_list_head) {
-		struct sync_pt *src_pt =
-			container_of(src_pos, struct sync_pt, pt_list);
-		bool collapsed = false;
-
-		list_for_each_safe(dst_pos, n, &dst->pt_list_head) {
-			struct sync_pt *dst_pt =
-				container_of(dst_pos, struct sync_pt, pt_list);
-			/* collapse two sync_pts on the same timeline
-			 * to a single sync_pt that will signal at
-			 * the later of the two
-			 */
-			if (dst_pt->parent == src_pt->parent) {
-				if (dst_pt->parent->ops->compare(dst_pt, src_pt)
-						 == -1) {
-					struct sync_pt *new_pt =
-						sync_pt_dup(src_pt);
-					if (new_pt == NULL)
-						return -ENOMEM;
-
-					new_pt->fence = dst;
-					list_replace(&dst_pt->pt_list,
-						     &new_pt->pt_list);
-					sync_pt_free(dst_pt);
-				}
-				collapsed = true;
-				break;
-			}
-		}
-
-		if (!collapsed) {
-			struct sync_pt *new_pt = sync_pt_dup(src_pt);
-
-			if (new_pt == NULL)
-				return -ENOMEM;
-
-			new_pt->fence = dst;
-			list_add(&new_pt->pt_list, &dst->pt_list_head);
-		}
-	}
-
-	return 0;
-}
-
-static void sync_fence_detach_pts(struct sync_fence *fence)
-{
-	struct list_head *pos, *n;
-
-	list_for_each_safe(pos, n, &fence->pt_list_head) {
-		struct sync_pt *pt = container_of(pos, struct sync_pt, pt_list);
-
-		sync_timeline_remove_pt(pt);
-	}
-}
-
-static void sync_fence_free_pts(struct sync_fence *fence)
-{
-	struct list_head *pos, *n;
-
-	list_for_each_safe(pos, n, &fence->pt_list_head) {
-		struct sync_pt *pt = container_of(pos, struct sync_pt, pt_list);
-
-		sync_pt_free(pt);
-	}
-}
-
 struct sync_fence *sync_fence_fdget(int fd)
 {
 	struct file *file = fget(fd);
@@ -434,197 +242,155 @@
 }
 EXPORT_SYMBOL(sync_fence_install);
 
-static int sync_fence_get_status(struct sync_fence *fence)
+static void sync_fence_add_pt(struct sync_fence *fence,
+			      int *i, struct fence *pt)
 {
-	struct list_head *pos;
-	int status = 1;
+	fence->cbs[*i].sync_pt = pt;
+	fence->cbs[*i].fence = fence;
 
-	list_for_each(pos, &fence->pt_list_head) {
-		struct sync_pt *pt = container_of(pos, struct sync_pt, pt_list);
-		int pt_status = pt->status;
-
-		if (pt_status < 0) {
-			status = pt_status;
-			break;
-		} else if (status == 1) {
-			status = pt_status;
-		}
+	if (!fence_add_callback(pt, &fence->cbs[*i].cb, fence_check_cb_func)) {
+		fence_get(pt);
+		(*i)++;
 	}
-
-	return status;
 }
 
 struct sync_fence *sync_fence_merge(const char *name,
 				    struct sync_fence *a, struct sync_fence *b)
 {
+	int num_fences = a->num_fences + b->num_fences;
 	struct sync_fence *fence;
-	struct list_head *pos;
-	int err;
+	int i, i_a, i_b;
+	unsigned long size = offsetof(struct sync_fence, cbs[num_fences]);
 
-	fence = sync_fence_alloc(name);
+	fence = sync_fence_alloc(size, name);
 	if (fence == NULL)
 		return NULL;
 
-	err = sync_fence_copy_pts(fence, a);
-	if (err < 0)
-		goto err;
-
-	err = sync_fence_merge_pts(fence, b);
-	if (err < 0)
-		goto err;
-
-	list_for_each(pos, &fence->pt_list_head) {
-		struct sync_pt *pt =
-			container_of(pos, struct sync_pt, pt_list);
-		sync_pt_activate(pt);
-	}
+	atomic_set(&fence->status, num_fences);
 
 	/*
-	 * signal the fence in case one of it's pts were activated before
-	 * they were activated
+	 * Assume sync_fence a and b are both ordered and have no
+	 * duplicates with the same context.
+	 *
+	 * If a sync_fence can only be created with sync_fence_merge
+	 * and sync_fence_create, this is a reasonable assumption.
 	 */
-	sync_fence_signal_pt(list_first_entry(&fence->pt_list_head,
-					      struct sync_pt,
-					      pt_list));
+	for (i = i_a = i_b = 0; i_a < a->num_fences && i_b < b->num_fences; ) {
+		struct fence *pt_a = a->cbs[i_a].sync_pt;
+		struct fence *pt_b = b->cbs[i_b].sync_pt;
 
+		if (pt_a->context < pt_b->context) {
+			sync_fence_add_pt(fence, &i, pt_a);
+
+			i_a++;
+		} else if (pt_a->context > pt_b->context) {
+			sync_fence_add_pt(fence, &i, pt_b);
+
+			i_b++;
+		} else {
+			if (pt_a->seqno - pt_b->seqno <= INT_MAX)
+				sync_fence_add_pt(fence, &i, pt_a);
+			else
+				sync_fence_add_pt(fence, &i, pt_b);
+
+			i_a++;
+			i_b++;
+		}
+	}
+
+	for (; i_a < a->num_fences; i_a++)
+		sync_fence_add_pt(fence, &i, a->cbs[i_a].sync_pt);
+
+	for (; i_b < b->num_fences; i_b++)
+		sync_fence_add_pt(fence, &i, b->cbs[i_b].sync_pt);
+
+	if (num_fences > i)
+		atomic_sub(num_fences - i, &fence->status);
+	fence->num_fences = i;
+
+	sync_fence_debug_add(fence);
 	return fence;
-err:
-	sync_fence_free_pts(fence);
-	kfree(fence);
-	return NULL;
 }
 EXPORT_SYMBOL(sync_fence_merge);
 
-static void sync_fence_signal_pt(struct sync_pt *pt)
+int sync_fence_wake_up_wq(wait_queue_t *curr, unsigned mode,
+				 int wake_flags, void *key)
 {
-	LIST_HEAD(signaled_waiters);
-	struct sync_fence *fence = pt->fence;
-	struct list_head *pos;
-	struct list_head *n;
-	unsigned long flags;
-	int status;
+	struct sync_fence_waiter *wait;
 
-	status = sync_fence_get_status(fence);
+	wait = container_of(curr, struct sync_fence_waiter, work);
+	list_del_init(&wait->work.task_list);
 
-	spin_lock_irqsave(&fence->waiter_list_lock, flags);
-	/*
-	 * this should protect against two threads racing on the signaled
-	 * false -> true transition
-	 */
-	if (status && !fence->status) {
-		list_for_each_safe(pos, n, &fence->waiter_list_head)
-			list_move(pos, &signaled_waiters);
-
-		fence->status = status;
-	} else {
-		status = 0;
-	}
-	spin_unlock_irqrestore(&fence->waiter_list_lock, flags);
-
-	if (status) {
-		list_for_each_safe(pos, n, &signaled_waiters) {
-			struct sync_fence_waiter *waiter =
-				container_of(pos, struct sync_fence_waiter,
-					     waiter_list);
-
-			list_del(pos);
-			waiter->callback(fence, waiter);
-		}
-		wake_up(&fence->wq);
-	}
+	wait->callback(wait->work.private, wait);
+	return 1;
 }
 
 int sync_fence_wait_async(struct sync_fence *fence,
 			  struct sync_fence_waiter *waiter)
 {
+	int err = atomic_read(&fence->status);
 	unsigned long flags;
-	int err = 0;
 
-	spin_lock_irqsave(&fence->waiter_list_lock, flags);
+	if (err < 0)
+		return err;
 
-	if (fence->status) {
-		err = fence->status;
-		goto out;
-	}
+	if (!err)
+		return 1;
 
-	list_add_tail(&waiter->waiter_list, &fence->waiter_list_head);
-out:
-	spin_unlock_irqrestore(&fence->waiter_list_lock, flags);
+	init_waitqueue_func_entry(&waiter->work, sync_fence_wake_up_wq);
+	waiter->work.private = fence;
 
-	return err;
+	spin_lock_irqsave(&fence->wq.lock, flags);
+	err = atomic_read(&fence->status);
+	if (err > 0)
+		__add_wait_queue_tail(&fence->wq, &waiter->work);
+	spin_unlock_irqrestore(&fence->wq.lock, flags);
+
+	if (err < 0)
+		return err;
+
+	return !err;
 }
 EXPORT_SYMBOL(sync_fence_wait_async);
 
 int sync_fence_cancel_async(struct sync_fence *fence,
 			     struct sync_fence_waiter *waiter)
 {
-	struct list_head *pos;
-	struct list_head *n;
 	unsigned long flags;
-	int ret = -ENOENT;
+	int ret = 0;
 
-	spin_lock_irqsave(&fence->waiter_list_lock, flags);
-	/*
-	 * Make sure waiter is still in waiter_list because it is possible for
-	 * the waiter to be removed from the list while the callback is still
-	 * pending.
-	 */
-	list_for_each_safe(pos, n, &fence->waiter_list_head) {
-		struct sync_fence_waiter *list_waiter =
-			container_of(pos, struct sync_fence_waiter,
-				     waiter_list);
-		if (list_waiter == waiter) {
-			list_del(pos);
-			ret = 0;
-			break;
-		}
-	}
-	spin_unlock_irqrestore(&fence->waiter_list_lock, flags);
+	spin_lock_irqsave(&fence->wq.lock, flags);
+	if (!list_empty(&waiter->work.task_list))
+		list_del_init(&waiter->work.task_list);
+	else
+		ret = -ENOENT;
+	spin_unlock_irqrestore(&fence->wq.lock, flags);
 	return ret;
 }
 EXPORT_SYMBOL(sync_fence_cancel_async);
 
-static bool sync_fence_check(struct sync_fence *fence)
-{
-	/*
-	 * Make sure that reads to fence->status are ordered with the
-	 * wait queue event triggering
-	 */
-	smp_rmb();
-	return fence->status != 0;
-}
-
 int sync_fence_wait(struct sync_fence *fence, long timeout)
 {
-	int err = 0;
-	struct sync_pt *pt;
+	long ret;
+	int i;
+
+	if (timeout < 0)
+		timeout = MAX_SCHEDULE_TIMEOUT;
+	else
+		timeout = msecs_to_jiffies(timeout);
 
 	trace_sync_wait(fence, 1);
-	list_for_each_entry(pt, &fence->pt_list_head, pt_list)
-		trace_sync_pt(pt);
-
-	if (timeout > 0) {
-		timeout = msecs_to_jiffies(timeout);
-		err = wait_event_interruptible_timeout(fence->wq,
-						       sync_fence_check(fence),
-						       timeout);
-	} else if (timeout < 0) {
-		err = wait_event_interruptible(fence->wq,
-					       sync_fence_check(fence));
-	}
+	for (i = 0; i < fence->num_fences; ++i)
+		trace_sync_pt(fence->cbs[i].sync_pt);
+	ret = wait_event_interruptible_timeout(fence->wq,
+					       atomic_read(&fence->status) <= 0,
+					       timeout);
 	trace_sync_wait(fence, 0);
 
-	if (err < 0)
-		return err;
-
-	if (fence->status < 0) {
-		pr_info("fence error %d on [%p]\n", fence->status, fence);
-		sync_dump();
-		return fence->status;
-	}
-
-	if (fence->status == 0) {
-		if (timeout > 0) {
+	if (ret < 0)
+		return ret;
+	else if (ret == 0) {
+		if (timeout) {
 			pr_info("fence timeout on [%p] after %dms\n", fence,
 				jiffies_to_msecs(timeout));
 			sync_dump();
@@ -632,15 +398,136 @@
 		return -ETIME;
 	}
 
-	return 0;
+	ret = atomic_read(&fence->status);
+	if (ret) {
+		pr_info("fence error %ld on [%p]\n", ret, fence);
+		sync_dump();
+	}
+	return ret;
 }
 EXPORT_SYMBOL(sync_fence_wait);
 
+static const char *android_fence_get_driver_name(struct fence *fence)
+{
+	struct sync_pt *pt = container_of(fence, struct sync_pt, base);
+	struct sync_timeline *parent = sync_pt_parent(pt);
+
+	return parent->ops->driver_name;
+}
+
+static const char *android_fence_get_timeline_name(struct fence *fence)
+{
+	struct sync_pt *pt = container_of(fence, struct sync_pt, base);
+	struct sync_timeline *parent = sync_pt_parent(pt);
+
+	return parent->name;
+}
+
+static void android_fence_release(struct fence *fence)
+{
+	struct sync_pt *pt = container_of(fence, struct sync_pt, base);
+	struct sync_timeline *parent = sync_pt_parent(pt);
+	unsigned long flags;
+
+	spin_lock_irqsave(fence->lock, flags);
+	list_del(&pt->child_list);
+	if (WARN_ON_ONCE(!list_empty(&pt->active_list)))
+		list_del(&pt->active_list);
+	spin_unlock_irqrestore(fence->lock, flags);
+
+	if (parent->ops->free_pt)
+		parent->ops->free_pt(pt);
+
+	sync_timeline_put(parent);
+	fence_free(&pt->base);
+}
+
+static bool android_fence_signaled(struct fence *fence)
+{
+	struct sync_pt *pt = container_of(fence, struct sync_pt, base);
+	struct sync_timeline *parent = sync_pt_parent(pt);
+	int ret;
+
+	ret = parent->ops->has_signaled(pt);
+	if (ret < 0)
+		fence->status = ret;
+	return ret;
+}
+
+static bool android_fence_enable_signaling(struct fence *fence)
+{
+	struct sync_pt *pt = container_of(fence, struct sync_pt, base);
+	struct sync_timeline *parent = sync_pt_parent(pt);
+
+	if (android_fence_signaled(fence))
+		return false;
+
+	list_add_tail(&pt->active_list, &parent->active_list_head);
+	return true;
+}
+
+static int android_fence_fill_driver_data(struct fence *fence,
+					  void *data, int size)
+{
+	struct sync_pt *pt = container_of(fence, struct sync_pt, base);
+	struct sync_timeline *parent = sync_pt_parent(pt);
+
+	if (!parent->ops->fill_driver_data)
+		return 0;
+	return parent->ops->fill_driver_data(pt, data, size);
+}
+
+static void android_fence_value_str(struct fence *fence,
+				    char *str, int size)
+{
+	struct sync_pt *pt = container_of(fence, struct sync_pt, base);
+	struct sync_timeline *parent = sync_pt_parent(pt);
+
+	if (!parent->ops->pt_value_str) {
+		if (size)
+			*str = 0;
+		return;
+	}
+	parent->ops->pt_value_str(pt, str, size);
+}
+
+static void android_fence_timeline_value_str(struct fence *fence,
+					     char *str, int size)
+{
+	struct sync_pt *pt = container_of(fence, struct sync_pt, base);
+	struct sync_timeline *parent = sync_pt_parent(pt);
+
+	if (!parent->ops->timeline_value_str) {
+		if (size)
+			*str = 0;
+		return;
+	}
+	parent->ops->timeline_value_str(parent, str, size);
+}
+
+static const struct fence_ops android_fence_ops = {
+	.get_driver_name = android_fence_get_driver_name,
+	.get_timeline_name = android_fence_get_timeline_name,
+	.enable_signaling = android_fence_enable_signaling,
+	.signaled = android_fence_signaled,
+	.wait = fence_default_wait,
+	.release = android_fence_release,
+	.fill_driver_data = android_fence_fill_driver_data,
+	.fence_value_str = android_fence_value_str,
+	.timeline_value_str = android_fence_timeline_value_str,
+};
+
 static void sync_fence_free(struct kref *kref)
 {
 	struct sync_fence *fence = container_of(kref, struct sync_fence, kref);
+	int i, status = atomic_read(&fence->status);
 
-	sync_fence_free_pts(fence);
+	for (i = 0; i < fence->num_fences; ++i) {
+		if (status)
+			fence_remove_callback(fence->cbs[i].sync_pt,
+					      &fence->cbs[i].cb);
+		fence_put(fence->cbs[i].sync_pt);
+	}
 
 	kfree(fence);
 }
@@ -648,44 +535,25 @@
 static int sync_fence_release(struct inode *inode, struct file *file)
 {
 	struct sync_fence *fence = file->private_data;
-	unsigned long flags;
 
-	/*
-	 * We need to remove all ways to access this fence before droping
-	 * our ref.
-	 *
-	 * start with its membership in the global fence list
-	 */
-	spin_lock_irqsave(&sync_fence_list_lock, flags);
-	list_del(&fence->sync_fence_list);
-	spin_unlock_irqrestore(&sync_fence_list_lock, flags);
-
-	/*
-	 * remove its pts from their parents so that sync_timeline_signal()
-	 * can't reference the fence.
-	 */
-	sync_fence_detach_pts(fence);
+	sync_fence_debug_remove(fence);
 
 	kref_put(&fence->kref, sync_fence_free);
-
 	return 0;
 }
 
 static unsigned int sync_fence_poll(struct file *file, poll_table *wait)
 {
 	struct sync_fence *fence = file->private_data;
+	int status;
 
 	poll_wait(file, &fence->wq, wait);
 
-	/*
-	 * Make sure that reads to fence->status are ordered with the
-	 * wait queue event triggering
-	 */
-	smp_rmb();
+	status = atomic_read(&fence->status);
 
-	if (fence->status == 1)
+	if (!status)
 		return POLLIN;
-	else if (fence->status < 0)
+	else if (status < 0)
 		return POLLERR;
 	else
 		return 0;
@@ -750,7 +618,7 @@
 	return err;
 }
 
-static int sync_fill_pt_info(struct sync_pt *pt, void *data, int size)
+static int sync_fill_pt_info(struct fence *fence, void *data, int size)
 {
 	struct sync_pt_info *info = data;
 	int ret;
@@ -760,20 +628,24 @@
 
 	info->len = sizeof(struct sync_pt_info);
 
-	if (pt->parent->ops->fill_driver_data) {
-		ret = pt->parent->ops->fill_driver_data(pt, info->driver_data,
-							size - sizeof(*info));
+	if (fence->ops->fill_driver_data) {
+		ret = fence->ops->fill_driver_data(fence, info->driver_data,
+						   size - sizeof(*info));
 		if (ret < 0)
 			return ret;
 
 		info->len += ret;
 	}
 
-	strlcpy(info->obj_name, pt->parent->name, sizeof(info->obj_name));
-	strlcpy(info->driver_name, pt->parent->ops->driver_name,
+	strlcpy(info->obj_name, fence->ops->get_timeline_name(fence),
+		sizeof(info->obj_name));
+	strlcpy(info->driver_name, fence->ops->get_driver_name(fence),
 		sizeof(info->driver_name));
-	info->status = pt->status;
-	info->timestamp_ns = ktime_to_ns(pt->timestamp);
+	if (fence_is_signaled(fence))
+		info->status = fence->status >= 0 ? 1 : fence->status;
+	else
+		info->status = 0;
+	info->timestamp_ns = ktime_to_ns(fence->timestamp);
 
 	return info->len;
 }
@@ -782,10 +654,9 @@
 					unsigned long arg)
 {
 	struct sync_fence_info_data *data;
-	struct list_head *pos;
 	__u32 size;
 	__u32 len = 0;
-	int ret;
+	int ret, i;
 
 	if (copy_from_user(&size, (void __user *)arg, sizeof(size)))
 		return -EFAULT;
@@ -801,12 +672,14 @@
 		return -ENOMEM;
 
 	strlcpy(data->name, fence->name, sizeof(data->name));
-	data->status = fence->status;
+	data->status = atomic_read(&fence->status);
+	if (data->status >= 0)
+		data->status = !data->status;
+
 	len = sizeof(struct sync_fence_info_data);
 
-	list_for_each(pos, &fence->pt_list_head) {
-		struct sync_pt *pt =
-			container_of(pos, struct sync_pt, pt_list);
+	for (i = 0; i < fence->num_fences; ++i) {
+		struct fence *pt = fence->cbs[i].sync_pt;
 
 		ret = sync_fill_pt_info(pt, (u8 *)data + len, size - len);
 
@@ -833,7 +706,6 @@
 			     unsigned long arg)
 {
 	struct sync_fence *fence = file->private_data;
-
 	switch (cmd) {
 	case SYNC_IOC_WAIT:
 		return sync_fence_ioctl_wait(fence, arg);
@@ -849,181 +721,10 @@
 	}
 }
 
-#ifdef CONFIG_DEBUG_FS
-static const char *sync_status_str(int status)
-{
-	if (status > 0)
-		return "signaled";
-	else if (status == 0)
-		return "active";
-	else
-		return "error";
-}
-
-static void sync_print_pt(struct seq_file *s, struct sync_pt *pt, bool fence)
-{
-	int status = pt->status;
-
-	seq_printf(s, "  %s%spt %s",
-		   fence ? pt->parent->name : "",
-		   fence ? "_" : "",
-		   sync_status_str(status));
-	if (pt->status) {
-		struct timeval tv = ktime_to_timeval(pt->timestamp);
-
-		seq_printf(s, "@%ld.%06ld", tv.tv_sec, tv.tv_usec);
-	}
-
-	if (pt->parent->ops->timeline_value_str &&
-	    pt->parent->ops->pt_value_str) {
-		char value[64];
-
-		pt->parent->ops->pt_value_str(pt, value, sizeof(value));
-		seq_printf(s, ": %s", value);
-		if (fence) {
-			pt->parent->ops->timeline_value_str(pt->parent, value,
-						    sizeof(value));
-			seq_printf(s, " / %s", value);
-		}
-	} else if (pt->parent->ops->print_pt) {
-		seq_puts(s, ": ");
-		pt->parent->ops->print_pt(s, pt);
-	}
-
-	seq_puts(s, "\n");
-}
-
-static void sync_print_obj(struct seq_file *s, struct sync_timeline *obj)
-{
-	struct list_head *pos;
-	unsigned long flags;
-
-	seq_printf(s, "%s %s", obj->name, obj->ops->driver_name);
-
-	if (obj->ops->timeline_value_str) {
-		char value[64];
-
-		obj->ops->timeline_value_str(obj, value, sizeof(value));
-		seq_printf(s, ": %s", value);
-	} else if (obj->ops->print_obj) {
-		seq_puts(s, ": ");
-		obj->ops->print_obj(s, obj);
-	}
-
-	seq_puts(s, "\n");
-
-	spin_lock_irqsave(&obj->child_list_lock, flags);
-	list_for_each(pos, &obj->child_list_head) {
-		struct sync_pt *pt =
-			container_of(pos, struct sync_pt, child_list);
-		sync_print_pt(s, pt, false);
-	}
-	spin_unlock_irqrestore(&obj->child_list_lock, flags);
-}
-
-static void sync_print_fence(struct seq_file *s, struct sync_fence *fence)
-{
-	struct list_head *pos;
-	unsigned long flags;
-
-	seq_printf(s, "[%p] %s: %s\n", fence, fence->name,
-		   sync_status_str(fence->status));
-
-	list_for_each(pos, &fence->pt_list_head) {
-		struct sync_pt *pt =
-			container_of(pos, struct sync_pt, pt_list);
-		sync_print_pt(s, pt, true);
-	}
-
-	spin_lock_irqsave(&fence->waiter_list_lock, flags);
-	list_for_each(pos, &fence->waiter_list_head) {
-		struct sync_fence_waiter *waiter =
-			container_of(pos, struct sync_fence_waiter,
-				     waiter_list);
-
-		seq_printf(s, "waiter %pF\n", waiter->callback);
-	}
-	spin_unlock_irqrestore(&fence->waiter_list_lock, flags);
-}
-
-static int sync_debugfs_show(struct seq_file *s, void *unused)
-{
-	unsigned long flags;
-	struct list_head *pos;
-
-	seq_puts(s, "objs:\n--------------\n");
-
-	spin_lock_irqsave(&sync_timeline_list_lock, flags);
-	list_for_each(pos, &sync_timeline_list_head) {
-		struct sync_timeline *obj =
-			container_of(pos, struct sync_timeline,
-				     sync_timeline_list);
-
-		sync_print_obj(s, obj);
-		seq_puts(s, "\n");
-	}
-	spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
-
-	seq_puts(s, "fences:\n--------------\n");
-
-	spin_lock_irqsave(&sync_fence_list_lock, flags);
-	list_for_each(pos, &sync_fence_list_head) {
-		struct sync_fence *fence =
-			container_of(pos, struct sync_fence, sync_fence_list);
-
-		sync_print_fence(s, fence);
-		seq_puts(s, "\n");
-	}
-	spin_unlock_irqrestore(&sync_fence_list_lock, flags);
-	return 0;
-}
-
-static int sync_debugfs_open(struct inode *inode, struct file *file)
-{
-	return single_open(file, sync_debugfs_show, inode->i_private);
-}
-
-static const struct file_operations sync_debugfs_fops = {
-	.open           = sync_debugfs_open,
-	.read           = seq_read,
-	.llseek         = seq_lseek,
-	.release        = single_release,
+static const struct file_operations sync_fence_fops = {
+	.release = sync_fence_release,
+	.poll = sync_fence_poll,
+	.unlocked_ioctl = sync_fence_ioctl,
+	.compat_ioctl = sync_fence_ioctl,
 };
 
-static __init int sync_debugfs_init(void)
-{
-	debugfs_create_file("sync", S_IRUGO, NULL, NULL, &sync_debugfs_fops);
-	return 0;
-}
-late_initcall(sync_debugfs_init);
-
-#define DUMP_CHUNK 256
-static char sync_dump_buf[64 * 1024];
-static void sync_dump(void)
-{
-	struct seq_file s = {
-		.buf = sync_dump_buf,
-		.size = sizeof(sync_dump_buf) - 1,
-	};
-	int i;
-
-	sync_debugfs_show(&s, NULL);
-
-	for (i = 0; i < s.count; i += DUMP_CHUNK) {
-		if ((s.count - i) > DUMP_CHUNK) {
-			char c = s.buf[i + DUMP_CHUNK];
-
-			s.buf[i + DUMP_CHUNK] = 0;
-			pr_cont("%s", s.buf + i);
-			s.buf[i + DUMP_CHUNK] = c;
-		} else {
-			s.buf[s.count] = 0;
-			pr_cont("%s", s.buf + i);
-		}
-	}
-}
-#else
-static void sync_dump(void)
-{
-}
-#endif
diff --git a/drivers/staging/android/sync.h b/drivers/staging/android/sync.h
index eaf57ccc..66b0f43 100644
--- a/drivers/staging/android/sync.h
+++ b/drivers/staging/android/sync.h
@@ -19,6 +19,7 @@
 #include <linux/list.h>
 #include <linux/spinlock.h>
 #include <linux/wait.h>
+#include <linux/fence.h>
 
 #include "uapi/sync.h"
 
@@ -40,8 +41,6 @@
  *			 -1 if a will signal before b
  * @free_pt:		called before sync_pt is freed
  * @release_obj:	called before sync_timeline is freed
- * @print_obj:		deprecated
- * @print_pt:		deprecated
  * @fill_driver_data:	write implementation specific driver data to data.
  *			  should return an error if there is not enough room
  *			  as specified by size.  This information is returned
@@ -67,13 +66,6 @@
 	/* optional */
 	void (*release_obj)(struct sync_timeline *sync_timeline);
 
-	/* deprecated */
-	void (*print_obj)(struct seq_file *s,
-			  struct sync_timeline *sync_timeline);
-
-	/* deprecated */
-	void (*print_pt)(struct seq_file *s, struct sync_pt *sync_pt);
-
 	/* optional */
 	int (*fill_driver_data)(struct sync_pt *syncpt, void *data, int size);
 
@@ -104,19 +96,21 @@
 
 	/* protected by child_list_lock */
 	bool			destroyed;
+	int			context, value;
 
 	struct list_head	child_list_head;
 	spinlock_t		child_list_lock;
 
 	struct list_head	active_list_head;
-	spinlock_t		active_list_lock;
 
+#ifdef CONFIG_DEBUG_FS
 	struct list_head	sync_timeline_list;
+#endif
 };
 
 /**
  * struct sync_pt - sync point
- * @parent:		sync_timeline to which this sync_pt belongs
+ * @fence:		base fence class
  * @child_list:		membership in sync_timeline.child_list_head
  * @active_list:	membership in sync_timeline.active_list_head
  * @signaled_list:	membership in temporary signaled_list on stack
@@ -127,19 +121,22 @@
  *			  signaled or error.
  */
 struct sync_pt {
-	struct sync_timeline		*parent;
+	struct fence base;
+
 	struct list_head	child_list;
-
 	struct list_head	active_list;
-	struct list_head	signaled_list;
+};
 
-	struct sync_fence	*fence;
-	struct list_head	pt_list;
+static inline struct sync_timeline *sync_pt_parent(struct sync_pt *pt)
+{
+	return container_of(pt->base.lock, struct sync_timeline,
+			    child_list_lock);
+}
 
-	/* protected by parent->active_list_lock */
-	int			status;
-
-	ktime_t			timestamp;
+struct sync_fence_cb {
+	struct fence_cb cb;
+	struct fence *sync_pt;
+	struct sync_fence *fence;
 };
 
 /**
@@ -149,9 +146,7 @@
  * @name:		name of sync_fence.  Useful for debugging
  * @pt_list_head:	list of sync_pts in the fence.  immutable once fence
  *			  is created
- * @waiter_list_head:	list of asynchronous waiters on this fence
- * @waiter_list_lock:	lock protecting @waiter_list_head and @status
- * @status:		1: signaled, 0:active, <0: error
+ * @status:		0: signaled, >0:active, <0: error
  *
  * @wq:			wait queue for fence signaling
  * @sync_fence_list:	membership in global fence list
@@ -160,17 +155,15 @@
 	struct file		*file;
 	struct kref		kref;
 	char			name[32];
-
-	/* this list is immutable once the fence is created */
-	struct list_head	pt_list_head;
-
-	struct list_head	waiter_list_head;
-	spinlock_t		waiter_list_lock; /* also protects status */
-	int			status;
+#ifdef CONFIG_DEBUG_FS
+	struct list_head	sync_fence_list;
+#endif
+	int num_fences;
 
 	wait_queue_head_t	wq;
+	atomic_t		status;
 
-	struct list_head	sync_fence_list;
+	struct sync_fence_cb	cbs[];
 };
 
 struct sync_fence_waiter;
@@ -184,14 +177,14 @@
  * @callback_data:	pointer to pass to @callback
  */
 struct sync_fence_waiter {
-	struct list_head	waiter_list;
-
-	sync_callback_t		callback;
+	wait_queue_t work;
+	sync_callback_t callback;
 };
 
 static inline void sync_fence_waiter_init(struct sync_fence_waiter *waiter,
 					  sync_callback_t callback)
 {
+	INIT_LIST_HEAD(&waiter->work.task_list);
 	waiter->callback = callback;
 }
 
@@ -341,4 +334,22 @@
  */
 int sync_fence_wait(struct sync_fence *fence, long timeout);
 
+#ifdef CONFIG_DEBUG_FS
+
+extern void sync_timeline_debug_add(struct sync_timeline *obj);
+extern void sync_timeline_debug_remove(struct sync_timeline *obj);
+extern void sync_fence_debug_add(struct sync_fence *fence);
+extern void sync_fence_debug_remove(struct sync_fence *fence);
+extern void sync_dump(void);
+
+#else
+# define sync_timeline_debug_add(obj)
+# define sync_timeline_debug_remove(obj)
+# define sync_fence_debug_add(fence)
+# define sync_fence_debug_remove(fence)
+# define sync_dump()
+#endif
+int sync_fence_wake_up_wq(wait_queue_t *curr, unsigned mode,
+				 int wake_flags, void *key);
+
 #endif /* _LINUX_SYNC_H */
diff --git a/drivers/staging/android/sync_debug.c b/drivers/staging/android/sync_debug.c
new file mode 100644
index 0000000..257fc91
--- /dev/null
+++ b/drivers/staging/android/sync_debug.c
@@ -0,0 +1,252 @@
+/*
+ * drivers/base/sync.c
+ *
+ * Copyright (C) 2012 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/debugfs.h>
+#include <linux/export.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/kernel.h>
+#include <linux/poll.h>
+#include <linux/sched.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/anon_inodes.h>
+#include "sync.h"
+
+#ifdef CONFIG_DEBUG_FS
+
+static LIST_HEAD(sync_timeline_list_head);
+static DEFINE_SPINLOCK(sync_timeline_list_lock);
+static LIST_HEAD(sync_fence_list_head);
+static DEFINE_SPINLOCK(sync_fence_list_lock);
+
+void sync_timeline_debug_add(struct sync_timeline *obj)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&sync_timeline_list_lock, flags);
+	list_add_tail(&obj->sync_timeline_list, &sync_timeline_list_head);
+	spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
+}
+
+void sync_timeline_debug_remove(struct sync_timeline *obj)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&sync_timeline_list_lock, flags);
+	list_del(&obj->sync_timeline_list);
+	spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
+}
+
+void sync_fence_debug_add(struct sync_fence *fence)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&sync_fence_list_lock, flags);
+	list_add_tail(&fence->sync_fence_list, &sync_fence_list_head);
+	spin_unlock_irqrestore(&sync_fence_list_lock, flags);
+}
+
+void sync_fence_debug_remove(struct sync_fence *fence)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&sync_fence_list_lock, flags);
+	list_del(&fence->sync_fence_list);
+	spin_unlock_irqrestore(&sync_fence_list_lock, flags);
+}
+
+static const char *sync_status_str(int status)
+{
+	if (status == 0)
+		return "signaled";
+
+	if (status > 0)
+		return "active";
+
+	return "error";
+}
+
+static void sync_print_pt(struct seq_file *s, struct sync_pt *pt, bool fence)
+{
+	int status = 1;
+	struct sync_timeline *parent = sync_pt_parent(pt);
+
+	if (fence_is_signaled_locked(&pt->base))
+		status = pt->base.status;
+
+	seq_printf(s, "  %s%spt %s",
+		   fence ? parent->name : "",
+		   fence ? "_" : "",
+		   sync_status_str(status));
+
+	if (status <= 0) {
+		struct timeval tv = ktime_to_timeval(pt->base.timestamp);
+
+		seq_printf(s, "@%ld.%06ld", tv.tv_sec, tv.tv_usec);
+	}
+
+	if (parent->ops->timeline_value_str &&
+	    parent->ops->pt_value_str) {
+		char value[64];
+
+		parent->ops->pt_value_str(pt, value, sizeof(value));
+		seq_printf(s, ": %s", value);
+		if (fence) {
+			parent->ops->timeline_value_str(parent, value,
+						    sizeof(value));
+			seq_printf(s, " / %s", value);
+		}
+	}
+
+	seq_puts(s, "\n");
+}
+
+static void sync_print_obj(struct seq_file *s, struct sync_timeline *obj)
+{
+	struct list_head *pos;
+	unsigned long flags;
+
+	seq_printf(s, "%s %s", obj->name, obj->ops->driver_name);
+
+	if (obj->ops->timeline_value_str) {
+		char value[64];
+
+		obj->ops->timeline_value_str(obj, value, sizeof(value));
+		seq_printf(s, ": %s", value);
+	}
+
+	seq_puts(s, "\n");
+
+	spin_lock_irqsave(&obj->child_list_lock, flags);
+	list_for_each(pos, &obj->child_list_head) {
+		struct sync_pt *pt =
+			container_of(pos, struct sync_pt, child_list);
+		sync_print_pt(s, pt, false);
+	}
+	spin_unlock_irqrestore(&obj->child_list_lock, flags);
+}
+
+static void sync_print_fence(struct seq_file *s, struct sync_fence *fence)
+{
+	wait_queue_t *pos;
+	unsigned long flags;
+	int i;
+
+	seq_printf(s, "[%p] %s: %s\n", fence, fence->name,
+		   sync_status_str(atomic_read(&fence->status)));
+
+	for (i = 0; i < fence->num_fences; ++i) {
+		struct sync_pt *pt =
+			container_of(fence->cbs[i].sync_pt,
+				     struct sync_pt, base);
+
+		sync_print_pt(s, pt, true);
+	}
+
+	spin_lock_irqsave(&fence->wq.lock, flags);
+	list_for_each_entry(pos, &fence->wq.task_list, task_list) {
+		struct sync_fence_waiter *waiter;
+
+		if (pos->func != &sync_fence_wake_up_wq)
+			continue;
+
+		waiter = container_of(pos, struct sync_fence_waiter, work);
+
+		seq_printf(s, "waiter %pF\n", waiter->callback);
+	}
+	spin_unlock_irqrestore(&fence->wq.lock, flags);
+}
+
+static int sync_debugfs_show(struct seq_file *s, void *unused)
+{
+	unsigned long flags;
+	struct list_head *pos;
+
+	seq_puts(s, "objs:\n--------------\n");
+
+	spin_lock_irqsave(&sync_timeline_list_lock, flags);
+	list_for_each(pos, &sync_timeline_list_head) {
+		struct sync_timeline *obj =
+			container_of(pos, struct sync_timeline,
+				     sync_timeline_list);
+
+		sync_print_obj(s, obj);
+		seq_puts(s, "\n");
+	}
+	spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
+
+	seq_puts(s, "fences:\n--------------\n");
+
+	spin_lock_irqsave(&sync_fence_list_lock, flags);
+	list_for_each(pos, &sync_fence_list_head) {
+		struct sync_fence *fence =
+			container_of(pos, struct sync_fence, sync_fence_list);
+
+		sync_print_fence(s, fence);
+		seq_puts(s, "\n");
+	}
+	spin_unlock_irqrestore(&sync_fence_list_lock, flags);
+	return 0;
+}
+
+static int sync_debugfs_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, sync_debugfs_show, inode->i_private);
+}
+
+static const struct file_operations sync_debugfs_fops = {
+	.open           = sync_debugfs_open,
+	.read           = seq_read,
+	.llseek         = seq_lseek,
+	.release        = single_release,
+};
+
+static __init int sync_debugfs_init(void)
+{
+	debugfs_create_file("sync", S_IRUGO, NULL, NULL, &sync_debugfs_fops);
+	return 0;
+}
+late_initcall(sync_debugfs_init);
+
+#define DUMP_CHUNK 256
+static char sync_dump_buf[64 * 1024];
+void sync_dump(void)
+{
+	struct seq_file s = {
+		.buf = sync_dump_buf,
+		.size = sizeof(sync_dump_buf) - 1,
+	};
+	int i;
+
+	sync_debugfs_show(&s, NULL);
+
+	for (i = 0; i < s.count; i += DUMP_CHUNK) {
+		if ((s.count - i) > DUMP_CHUNK) {
+			char c = s.buf[i + DUMP_CHUNK];
+
+			s.buf[i + DUMP_CHUNK] = 0;
+			pr_cont("%s", s.buf + i);
+			s.buf[i + DUMP_CHUNK] = c;
+		} else {
+			s.buf[s.count] = 0;
+			pr_cont("%s", s.buf + i);
+		}
+	}
+}
+
+#endif
diff --git a/drivers/staging/android/trace/sync.h b/drivers/staging/android/trace/sync.h
index 9546235..77edb97 100644
--- a/drivers/staging/android/trace/sync.h
+++ b/drivers/staging/android/trace/sync.h
@@ -45,7 +45,7 @@
 
 	TP_fast_assign(
 			__assign_str(name, fence->name);
-			__entry->status = fence->status;
+			__entry->status = atomic_read(&fence->status);
 			__entry->begin = begin;
 	),
 
@@ -54,19 +54,19 @@
 );
 
 TRACE_EVENT(sync_pt,
-	TP_PROTO(struct sync_pt *pt),
+	TP_PROTO(struct fence *pt),
 
 	TP_ARGS(pt),
 
 	TP_STRUCT__entry(
-		__string(timeline, pt->parent->name)
+		__string(timeline, pt->ops->get_timeline_name(pt))
 		__array(char, value, 32)
 	),
 
 	TP_fast_assign(
-		__assign_str(timeline, pt->parent->name);
-		if (pt->parent->ops->pt_value_str) {
-			pt->parent->ops->pt_value_str(pt, __entry->value,
+		__assign_str(timeline, pt->ops->get_timeline_name(pt));
+		if (pt->ops->fence_value_str) {
+			pt->ops->fence_value_str(pt, __entry->value,
 							sizeof(__entry->value));
 		} else {
 			__entry->value[0] = '\0';
diff --git a/fs/debugfs/file.c b/fs/debugfs/file.c
index 6314629..76c08c2 100644
--- a/fs/debugfs/file.c
+++ b/fs/debugfs/file.c
@@ -451,7 +451,7 @@
 {
 	char buf[3];
 	u32 *val = file->private_data;
-	
+
 	if (*val)
 		buf[0] = 'Y';
 	else
diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
index 8c41b52..1e3b99d 100644
--- a/fs/debugfs/inode.c
+++ b/fs/debugfs/inode.c
@@ -66,7 +66,7 @@
 			break;
 		}
 	}
-	return inode; 
+	return inode;
 }
 
 /* SMP-safe */
@@ -317,7 +317,7 @@
 		goto exit;
 
 	/* If the parent is not specified, we create it in the root.
-	 * We need the root dentry to do this, which is in the super 
+	 * We need the root dentry to do this, which is in the super
 	 * block. A pointer to that is in the struct vfsmount that we
 	 * have around.
 	 */
@@ -330,7 +330,7 @@
 		switch (mode & S_IFMT) {
 		case S_IFDIR:
 			error = debugfs_mkdir(parent->d_inode, dentry, mode);
-					      
+
 			break;
 		case S_IFLNK:
 			error = debugfs_link(parent->d_inode, dentry, mode,
@@ -534,7 +534,7 @@
  */
 void debugfs_remove_recursive(struct dentry *dentry)
 {
-	struct dentry *child, *next, *parent;
+	struct dentry *child, *parent;
 
 	if (IS_ERR_OR_NULL(dentry))
 		return;
@@ -546,30 +546,49 @@
 	parent = dentry;
  down:
 	mutex_lock(&parent->d_inode->i_mutex);
-	list_for_each_entry_safe(child, next, &parent->d_subdirs, d_u.d_child) {
+ loop:
+	/*
+	 * The parent->d_subdirs is protected by the d_lock. Outside that
+	 * lock, the child can be unlinked and set to be freed which can
+	 * use the d_u.d_child as the rcu head and corrupt this list.
+	 */
+	spin_lock(&parent->d_lock);
+	list_for_each_entry(child, &parent->d_subdirs, d_u.d_child) {
 		if (!debugfs_positive(child))
 			continue;
 
 		/* perhaps simple_empty(child) makes more sense */
 		if (!list_empty(&child->d_subdirs)) {
+			spin_unlock(&parent->d_lock);
 			mutex_unlock(&parent->d_inode->i_mutex);
 			parent = child;
 			goto down;
 		}
- up:
+
+		spin_unlock(&parent->d_lock);
+
 		if (!__debugfs_remove(child, parent))
 			simple_release_fs(&debugfs_mount, &debugfs_mount_count);
+
+		/*
+		 * The parent->d_lock protects agaist child from unlinking
+		 * from d_subdirs. When releasing the parent->d_lock we can
+		 * no longer trust that the next pointer is valid.
+		 * Restart the loop. We'll skip this one with the
+		 * debugfs_positive() check.
+		 */
+		goto loop;
 	}
+	spin_unlock(&parent->d_lock);
 
 	mutex_unlock(&parent->d_inode->i_mutex);
 	child = parent;
 	parent = parent->d_parent;
 	mutex_lock(&parent->d_inode->i_mutex);
 
-	if (child != dentry) {
-		next = list_next_entry(child, d_u.d_child);
-		goto up;
-	}
+	if (child != dentry)
+		/* go up */
+		goto loop;
 
 	if (!__debugfs_remove(child, parent))
 		simple_release_fs(&debugfs_mount, &debugfs_mount_count);
diff --git a/fs/kernfs/file.c b/fs/kernfs/file.c
index d895b4b..4429d6d 100644
--- a/fs/kernfs/file.c
+++ b/fs/kernfs/file.c
@@ -896,7 +896,7 @@
  * @ops: kernfs operations for the file
  * @priv: private data for the file
  * @ns: optional namespace tag of the file
- * @static_name: don't copy file name
+ * @name_is_static: don't copy file name
  * @key: lockdep key for the file's active_ref, %NULL to disable lockdep
  *
  * Returns the created node on success, ERR_PTR() value on error.
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index 8af71a8..e41f17e 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -83,6 +83,7 @@
 
 struct device_node;
 struct videomode;
+struct reservation_object;
 
 #include <drm/drm_os_linux.h>
 #include <drm/drm_hashtab.h>
@@ -923,6 +924,8 @@
 	/* low-level interface used by drm_gem_prime_{import,export} */
 	int (*gem_prime_pin)(struct drm_gem_object *obj);
 	void (*gem_prime_unpin)(struct drm_gem_object *obj);
+	struct reservation_object * (*gem_prime_res_obj)(
+				struct drm_gem_object *obj);
 	struct sg_table *(*gem_prime_get_sg_table)(struct drm_gem_object *obj);
 	struct drm_gem_object *(*gem_prime_import_sg_table)(
 				struct drm_device *dev, size_t size,
diff --git a/include/linux/component.h b/include/linux/component.h
index 6887018..c00dcc3 100644
--- a/include/linux/component.h
+++ b/include/linux/component.h
@@ -29,4 +29,11 @@
 int component_master_add_child(struct master *master,
 	int (*compare)(struct device *, void *), void *compare_data);
 
+struct component_match;
+
+int component_master_add_with_match(struct device *,
+	const struct component_master_ops *, struct component_match *);
+void component_match_add(struct device *, struct component_match **,
+	int (*compare)(struct device *, void *), void *compare_data);
+
 #endif
diff --git a/include/linux/device.h b/include/linux/device.h
index af424ac..921fa0a 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -631,8 +631,6 @@
 extern void devm_free_pages(struct device *dev, unsigned long addr);
 
 void __iomem *devm_ioremap_resource(struct device *dev, struct resource *res);
-void __iomem *devm_request_and_ioremap(struct device *dev,
-			struct resource *res);
 
 /* allows to add/remove a custom action to devres stack */
 int devm_add_action(struct device *dev, void (*action)(void *), void *data);
diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h
index f886985..694e1fe 100644
--- a/include/linux/dma-buf.h
+++ b/include/linux/dma-buf.h
@@ -30,6 +30,8 @@
 #include <linux/list.h>
 #include <linux/dma-mapping.h>
 #include <linux/fs.h>
+#include <linux/fence.h>
+#include <linux/wait.h>
 
 struct device;
 struct dma_buf;
@@ -115,6 +117,7 @@
  * @exp_name: name of the exporter; useful for debugging.
  * @list_node: node for dma_buf accounting and debugging.
  * @priv: exporter specific private data for this buffer object.
+ * @resv: reservation object linked to this dma-buf
  */
 struct dma_buf {
 	size_t size;
@@ -128,6 +131,17 @@
 	const char *exp_name;
 	struct list_head list_node;
 	void *priv;
+	struct reservation_object *resv;
+
+	/* poll support */
+	wait_queue_head_t poll;
+
+	struct dma_buf_poll_cb_t {
+		struct fence_cb cb;
+		wait_queue_head_t *poll;
+
+		unsigned long active;
+	} cb_excl, cb_shared;
 };
 
 /**
@@ -168,10 +182,11 @@
 				struct dma_buf_attachment *dmabuf_attach);
 
 struct dma_buf *dma_buf_export_named(void *priv, const struct dma_buf_ops *ops,
-			       size_t size, int flags, const char *);
+			       size_t size, int flags, const char *,
+			       struct reservation_object *);
 
-#define dma_buf_export(priv, ops, size, flags)	\
-	dma_buf_export_named(priv, ops, size, flags, KBUILD_MODNAME)
+#define dma_buf_export(priv, ops, size, flags, resv)	\
+	dma_buf_export_named(priv, ops, size, flags, KBUILD_MODNAME, resv)
 
 int dma_buf_fd(struct dma_buf *dmabuf, int flags);
 struct dma_buf *dma_buf_get(int fd);
diff --git a/include/linux/fence.h b/include/linux/fence.h
new file mode 100644
index 0000000..d174585
--- /dev/null
+++ b/include/linux/fence.h
@@ -0,0 +1,360 @@
+/*
+ * Fence mechanism for dma-buf to allow for asynchronous dma access
+ *
+ * Copyright (C) 2012 Canonical Ltd
+ * Copyright (C) 2012 Texas Instruments
+ *
+ * Authors:
+ * Rob Clark <robdclark@gmail.com>
+ * Maarten Lankhorst <maarten.lankhorst@canonical.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __LINUX_FENCE_H
+#define __LINUX_FENCE_H
+
+#include <linux/err.h>
+#include <linux/wait.h>
+#include <linux/list.h>
+#include <linux/bitops.h>
+#include <linux/kref.h>
+#include <linux/sched.h>
+#include <linux/printk.h>
+#include <linux/rcupdate.h>
+
+struct fence;
+struct fence_ops;
+struct fence_cb;
+
+/**
+ * struct fence - software synchronization primitive
+ * @refcount: refcount for this fence
+ * @ops: fence_ops associated with this fence
+ * @rcu: used for releasing fence with kfree_rcu
+ * @cb_list: list of all callbacks to call
+ * @lock: spin_lock_irqsave used for locking
+ * @context: execution context this fence belongs to, returned by
+ *           fence_context_alloc()
+ * @seqno: the sequence number of this fence inside the execution context,
+ * can be compared to decide which fence would be signaled later.
+ * @flags: A mask of FENCE_FLAG_* defined below
+ * @timestamp: Timestamp when the fence was signaled.
+ * @status: Optional, only valid if < 0, must be set before calling
+ * fence_signal, indicates that the fence has completed with an error.
+ *
+ * the flags member must be manipulated and read using the appropriate
+ * atomic ops (bit_*), so taking the spinlock will not be needed most
+ * of the time.
+ *
+ * FENCE_FLAG_SIGNALED_BIT - fence is already signaled
+ * FENCE_FLAG_ENABLE_SIGNAL_BIT - enable_signaling might have been called*
+ * FENCE_FLAG_USER_BITS - start of the unused bits, can be used by the
+ * implementer of the fence for its own purposes. Can be used in different
+ * ways by different fence implementers, so do not rely on this.
+ *
+ * *) Since atomic bitops are used, this is not guaranteed to be the case.
+ * Particularly, if the bit was set, but fence_signal was called right
+ * before this bit was set, it would have been able to set the
+ * FENCE_FLAG_SIGNALED_BIT, before enable_signaling was called.
+ * Adding a check for FENCE_FLAG_SIGNALED_BIT after setting
+ * FENCE_FLAG_ENABLE_SIGNAL_BIT closes this race, and makes sure that
+ * after fence_signal was called, any enable_signaling call will have either
+ * been completed, or never called at all.
+ */
+struct fence {
+	struct kref refcount;
+	const struct fence_ops *ops;
+	struct rcu_head rcu;
+	struct list_head cb_list;
+	spinlock_t *lock;
+	unsigned context, seqno;
+	unsigned long flags;
+	ktime_t timestamp;
+	int status;
+};
+
+enum fence_flag_bits {
+	FENCE_FLAG_SIGNALED_BIT,
+	FENCE_FLAG_ENABLE_SIGNAL_BIT,
+	FENCE_FLAG_USER_BITS, /* must always be last member */
+};
+
+typedef void (*fence_func_t)(struct fence *fence, struct fence_cb *cb);
+
+/**
+ * struct fence_cb - callback for fence_add_callback
+ * @node: used by fence_add_callback to append this struct to fence::cb_list
+ * @func: fence_func_t to call
+ *
+ * This struct will be initialized by fence_add_callback, additional
+ * data can be passed along by embedding fence_cb in another struct.
+ */
+struct fence_cb {
+	struct list_head node;
+	fence_func_t func;
+};
+
+/**
+ * struct fence_ops - operations implemented for fence
+ * @get_driver_name: returns the driver name.
+ * @get_timeline_name: return the name of the context this fence belongs to.
+ * @enable_signaling: enable software signaling of fence.
+ * @signaled: [optional] peek whether the fence is signaled, can be null.
+ * @wait: custom wait implementation, or fence_default_wait.
+ * @release: [optional] called on destruction of fence, can be null
+ * @fill_driver_data: [optional] callback to fill in free-form debug info
+ * Returns amount of bytes filled, or -errno.
+ * @fence_value_str: [optional] fills in the value of the fence as a string
+ * @timeline_value_str: [optional] fills in the current value of the timeline
+ * as a string
+ *
+ * Notes on enable_signaling:
+ * For fence implementations that have the capability for hw->hw
+ * signaling, they can implement this op to enable the necessary
+ * irqs, or insert commands into cmdstream, etc.  This is called
+ * in the first wait() or add_callback() path to let the fence
+ * implementation know that there is another driver waiting on
+ * the signal (ie. hw->sw case).
+ *
+ * This function can be called called from atomic context, but not
+ * from irq context, so normal spinlocks can be used.
+ *
+ * A return value of false indicates the fence already passed,
+ * or some failure occured that made it impossible to enable
+ * signaling. True indicates succesful enabling.
+ *
+ * fence->status may be set in enable_signaling, but only when false is
+ * returned.
+ *
+ * Calling fence_signal before enable_signaling is called allows
+ * for a tiny race window in which enable_signaling is called during,
+ * before, or after fence_signal. To fight this, it is recommended
+ * that before enable_signaling returns true an extra reference is
+ * taken on the fence, to be released when the fence is signaled.
+ * This will mean fence_signal will still be called twice, but
+ * the second time will be a noop since it was already signaled.
+ *
+ * Notes on signaled:
+ * May set fence->status if returning true.
+ *
+ * Notes on wait:
+ * Must not be NULL, set to fence_default_wait for default implementation.
+ * the fence_default_wait implementation should work for any fence, as long
+ * as enable_signaling works correctly.
+ *
+ * Must return -ERESTARTSYS if the wait is intr = true and the wait was
+ * interrupted, and remaining jiffies if fence has signaled, or 0 if wait
+ * timed out. Can also return other error values on custom implementations,
+ * which should be treated as if the fence is signaled. For example a hardware
+ * lockup could be reported like that.
+ *
+ * Notes on release:
+ * Can be NULL, this function allows additional commands to run on
+ * destruction of the fence. Can be called from irq context.
+ * If pointer is set to NULL, kfree will get called instead.
+ */
+
+struct fence_ops {
+	const char * (*get_driver_name)(struct fence *fence);
+	const char * (*get_timeline_name)(struct fence *fence);
+	bool (*enable_signaling)(struct fence *fence);
+	bool (*signaled)(struct fence *fence);
+	signed long (*wait)(struct fence *fence, bool intr, signed long timeout);
+	void (*release)(struct fence *fence);
+
+	int (*fill_driver_data)(struct fence *fence, void *data, int size);
+	void (*fence_value_str)(struct fence *fence, char *str, int size);
+	void (*timeline_value_str)(struct fence *fence, char *str, int size);
+};
+
+void fence_init(struct fence *fence, const struct fence_ops *ops,
+		spinlock_t *lock, unsigned context, unsigned seqno);
+
+void fence_release(struct kref *kref);
+void fence_free(struct fence *fence);
+
+/**
+ * fence_get - increases refcount of the fence
+ * @fence:	[in]	fence to increase refcount of
+ *
+ * Returns the same fence, with refcount increased by 1.
+ */
+static inline struct fence *fence_get(struct fence *fence)
+{
+	if (fence)
+		kref_get(&fence->refcount);
+	return fence;
+}
+
+/**
+ * fence_get_rcu - get a fence from a reservation_object_list with rcu read lock
+ * @fence:	[in]	fence to increase refcount of
+ *
+ * Function returns NULL if no refcount could be obtained, or the fence.
+ */
+static inline struct fence *fence_get_rcu(struct fence *fence)
+{
+	if (kref_get_unless_zero(&fence->refcount))
+		return fence;
+	else
+		return NULL;
+}
+
+/**
+ * fence_put - decreases refcount of the fence
+ * @fence:	[in]	fence to reduce refcount of
+ */
+static inline void fence_put(struct fence *fence)
+{
+	if (fence)
+		kref_put(&fence->refcount, fence_release);
+}
+
+int fence_signal(struct fence *fence);
+int fence_signal_locked(struct fence *fence);
+signed long fence_default_wait(struct fence *fence, bool intr, signed long timeout);
+int fence_add_callback(struct fence *fence, struct fence_cb *cb,
+		       fence_func_t func);
+bool fence_remove_callback(struct fence *fence, struct fence_cb *cb);
+void fence_enable_sw_signaling(struct fence *fence);
+
+/**
+ * fence_is_signaled_locked - Return an indication if the fence is signaled yet.
+ * @fence:	[in]	the fence to check
+ *
+ * Returns true if the fence was already signaled, false if not. Since this
+ * function doesn't enable signaling, it is not guaranteed to ever return
+ * true if fence_add_callback, fence_wait or fence_enable_sw_signaling
+ * haven't been called before.
+ *
+ * This function requires fence->lock to be held.
+ */
+static inline bool
+fence_is_signaled_locked(struct fence *fence)
+{
+	if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags))
+		return true;
+
+	if (fence->ops->signaled && fence->ops->signaled(fence)) {
+		fence_signal_locked(fence);
+		return true;
+	}
+
+	return false;
+}
+
+/**
+ * fence_is_signaled - Return an indication if the fence is signaled yet.
+ * @fence:	[in]	the fence to check
+ *
+ * Returns true if the fence was already signaled, false if not. Since this
+ * function doesn't enable signaling, it is not guaranteed to ever return
+ * true if fence_add_callback, fence_wait or fence_enable_sw_signaling
+ * haven't been called before.
+ *
+ * It's recommended for seqno fences to call fence_signal when the
+ * operation is complete, it makes it possible to prevent issues from
+ * wraparound between time of issue and time of use by checking the return
+ * value of this function before calling hardware-specific wait instructions.
+ */
+static inline bool
+fence_is_signaled(struct fence *fence)
+{
+	if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags))
+		return true;
+
+	if (fence->ops->signaled && fence->ops->signaled(fence)) {
+		fence_signal(fence);
+		return true;
+	}
+
+	return false;
+}
+
+/**
+ * fence_later - return the chronologically later fence
+ * @f1:	[in]	the first fence from the same context
+ * @f2:	[in]	the second fence from the same context
+ *
+ * Returns NULL if both fences are signaled, otherwise the fence that would be
+ * signaled last. Both fences must be from the same context, since a seqno is
+ * not re-used across contexts.
+ */
+static inline struct fence *fence_later(struct fence *f1, struct fence *f2)
+{
+	if (WARN_ON(f1->context != f2->context))
+		return NULL;
+
+	/*
+	 * can't check just FENCE_FLAG_SIGNALED_BIT here, it may never have been
+	 * set if enable_signaling wasn't called, and enabling that here is
+	 * overkill.
+	 */
+	if (f2->seqno - f1->seqno <= INT_MAX)
+		return fence_is_signaled(f2) ? NULL : f2;
+	else
+		return fence_is_signaled(f1) ? NULL : f1;
+}
+
+signed long fence_wait_timeout(struct fence *, bool intr, signed long timeout);
+
+
+/**
+ * fence_wait - sleep until the fence gets signaled
+ * @fence:	[in]	the fence to wait on
+ * @intr:	[in]	if true, do an interruptible wait
+ *
+ * This function will return -ERESTARTSYS if interrupted by a signal,
+ * or 0 if the fence was signaled. Other error values may be
+ * returned on custom implementations.
+ *
+ * Performs a synchronous wait on this fence. It is assumed the caller
+ * directly or indirectly holds a reference to the fence, otherwise the
+ * fence might be freed before return, resulting in undefined behavior.
+ */
+static inline signed long fence_wait(struct fence *fence, bool intr)
+{
+	signed long ret;
+
+	/* Since fence_wait_timeout cannot timeout with
+	 * MAX_SCHEDULE_TIMEOUT, only valid return values are
+	 * -ERESTARTSYS and MAX_SCHEDULE_TIMEOUT.
+	 */
+	ret = fence_wait_timeout(fence, intr, MAX_SCHEDULE_TIMEOUT);
+
+	return ret < 0 ? ret : 0;
+}
+
+unsigned fence_context_alloc(unsigned num);
+
+#define FENCE_TRACE(f, fmt, args...) \
+	do {								\
+		struct fence *__ff = (f);				\
+		if (config_enabled(CONFIG_FENCE_TRACE))			\
+			pr_info("f %u#%u: " fmt,			\
+				__ff->context, __ff->seqno, ##args);	\
+	} while (0)
+
+#define FENCE_WARN(f, fmt, args...) \
+	do {								\
+		struct fence *__ff = (f);				\
+		pr_warn("f %u#%u: " fmt, __ff->context, __ff->seqno,	\
+			 ##args);					\
+	} while (0)
+
+#define FENCE_ERR(f, fmt, args...) \
+	do {								\
+		struct fence *__ff = (f);				\
+		pr_err("f %u#%u: " fmt, __ff->context, __ff->seqno,	\
+			##args);					\
+	} while (0)
+
+#endif /* __LINUX_FENCE_H */
diff --git a/include/linux/firmware.h b/include/linux/firmware.h
index 5952933..5c41c5e 100644
--- a/include/linux/firmware.h
+++ b/include/linux/firmware.h
@@ -45,6 +45,8 @@
 	struct module *module, bool uevent,
 	const char *name, struct device *device, gfp_t gfp, void *context,
 	void (*cont)(const struct firmware *fw, void *context));
+int request_firmware_direct(const struct firmware **fw, const char *name,
+			    struct device *device);
 
 void release_firmware(const struct firmware *fw);
 #else
@@ -66,13 +68,12 @@
 {
 }
 
-#endif
+static inline int request_firmware_direct(const struct firmware **fw,
+					  const char *name,
+					  struct device *device)
+{
+	return -EINVAL;
+}
 
-#ifdef CONFIG_FW_LOADER_USER_HELPER
-int request_firmware_direct(const struct firmware **fw, const char *name,
-			    struct device *device);
-#else
-#define request_firmware_direct	request_firmware
 #endif
-
 #endif
diff --git a/include/linux/platform_device.h b/include/linux/platform_device.h
index 16f6654..153d303 100644
--- a/include/linux/platform_device.h
+++ b/include/linux/platform_device.h
@@ -28,6 +28,7 @@
 	struct resource	*resource;
 
 	const struct platform_device_id	*id_entry;
+	char *driver_override; /* Driver name to force a match */
 
 	/* MFD cell pointer */
 	struct mfd_cell *mfd_cell;
diff --git a/include/linux/reservation.h b/include/linux/reservation.h
index 813dae9..5a0b64c 100644
--- a/include/linux/reservation.h
+++ b/include/linux/reservation.h
@@ -6,7 +6,7 @@
  * Copyright (C) 2012 Texas Instruments
  *
  * Authors:
- * Rob Clark <rob.clark@linaro.org>
+ * Rob Clark <robdclark@gmail.com>
  * Maarten Lankhorst <maarten.lankhorst@canonical.com>
  * Thomas Hellstrom <thellstrom-at-vmware-dot-com>
  *
@@ -40,23 +40,103 @@
 #define _LINUX_RESERVATION_H
 
 #include <linux/ww_mutex.h>
+#include <linux/fence.h>
+#include <linux/slab.h>
+#include <linux/seqlock.h>
+#include <linux/rcupdate.h>
 
 extern struct ww_class reservation_ww_class;
+extern struct lock_class_key reservation_seqcount_class;
+extern const char reservation_seqcount_string[];
+
+struct reservation_object_list {
+	struct rcu_head rcu;
+	u32 shared_count, shared_max;
+	struct fence __rcu *shared[];
+};
 
 struct reservation_object {
 	struct ww_mutex lock;
+	seqcount_t seq;
+
+	struct fence __rcu *fence_excl;
+	struct reservation_object_list __rcu *fence;
+	struct reservation_object_list *staged;
 };
 
+#define reservation_object_held(obj) lockdep_is_held(&(obj)->lock.base)
+#define reservation_object_assert_held(obj) \
+	lockdep_assert_held(&(obj)->lock.base)
+
 static inline void
 reservation_object_init(struct reservation_object *obj)
 {
 	ww_mutex_init(&obj->lock, &reservation_ww_class);
+
+	__seqcount_init(&obj->seq, reservation_seqcount_string, &reservation_seqcount_class);
+	RCU_INIT_POINTER(obj->fence, NULL);
+	RCU_INIT_POINTER(obj->fence_excl, NULL);
+	obj->staged = NULL;
 }
 
 static inline void
 reservation_object_fini(struct reservation_object *obj)
 {
+	int i;
+	struct reservation_object_list *fobj;
+	struct fence *excl;
+
+	/*
+	 * This object should be dead and all references must have
+	 * been released to it, so no need to be protected with rcu.
+	 */
+	excl = rcu_dereference_protected(obj->fence_excl, 1);
+	if (excl)
+		fence_put(excl);
+
+	fobj = rcu_dereference_protected(obj->fence, 1);
+	if (fobj) {
+		for (i = 0; i < fobj->shared_count; ++i)
+			fence_put(rcu_dereference_protected(fobj->shared[i], 1));
+
+		kfree(fobj);
+	}
+	kfree(obj->staged);
+
 	ww_mutex_destroy(&obj->lock);
 }
 
+static inline struct reservation_object_list *
+reservation_object_get_list(struct reservation_object *obj)
+{
+	return rcu_dereference_protected(obj->fence,
+					 reservation_object_held(obj));
+}
+
+static inline struct fence *
+reservation_object_get_excl(struct reservation_object *obj)
+{
+	return rcu_dereference_protected(obj->fence_excl,
+					 reservation_object_held(obj));
+}
+
+int reservation_object_reserve_shared(struct reservation_object *obj);
+void reservation_object_add_shared_fence(struct reservation_object *obj,
+					 struct fence *fence);
+
+void reservation_object_add_excl_fence(struct reservation_object *obj,
+				       struct fence *fence);
+
+int reservation_object_get_fences_rcu(struct reservation_object *obj,
+				      struct fence **pfence_excl,
+				      unsigned *pshared_count,
+				      struct fence ***pshared);
+
+long reservation_object_wait_timeout_rcu(struct reservation_object *obj,
+					 bool wait_all, bool intr,
+					 unsigned long timeout);
+
+bool reservation_object_test_signaled_rcu(struct reservation_object *obj,
+					  bool test_all);
+
 #endif /* _LINUX_RESERVATION_H */
diff --git a/include/linux/seqno-fence.h b/include/linux/seqno-fence.h
new file mode 100644
index 0000000..3d6003d
--- /dev/null
+++ b/include/linux/seqno-fence.h
@@ -0,0 +1,116 @@
+/*
+ * seqno-fence, using a dma-buf to synchronize fencing
+ *
+ * Copyright (C) 2012 Texas Instruments
+ * Copyright (C) 2012 Canonical Ltd
+ * Authors:
+ *   Rob Clark <robdclark@gmail.com>
+ *   Maarten Lankhorst <maarten.lankhorst@canonical.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __LINUX_SEQNO_FENCE_H
+#define __LINUX_SEQNO_FENCE_H
+
+#include <linux/fence.h>
+#include <linux/dma-buf.h>
+
+enum seqno_fence_condition {
+	SEQNO_FENCE_WAIT_GEQUAL,
+	SEQNO_FENCE_WAIT_NONZERO
+};
+
+struct seqno_fence {
+	struct fence base;
+
+	const struct fence_ops *ops;
+	struct dma_buf *sync_buf;
+	uint32_t seqno_ofs;
+	enum seqno_fence_condition condition;
+};
+
+extern const struct fence_ops seqno_fence_ops;
+
+/**
+ * to_seqno_fence - cast a fence to a seqno_fence
+ * @fence: fence to cast to a seqno_fence
+ *
+ * Returns NULL if the fence is not a seqno_fence,
+ * or the seqno_fence otherwise.
+ */
+static inline struct seqno_fence *
+to_seqno_fence(struct fence *fence)
+{
+	if (fence->ops != &seqno_fence_ops)
+		return NULL;
+	return container_of(fence, struct seqno_fence, base);
+}
+
+/**
+ * seqno_fence_init - initialize a seqno fence
+ * @fence: seqno_fence to initialize
+ * @lock: pointer to spinlock to use for fence
+ * @sync_buf: buffer containing the memory location to signal on
+ * @context: the execution context this fence is a part of
+ * @seqno_ofs: the offset within @sync_buf
+ * @seqno: the sequence # to signal on
+ * @ops: the fence_ops for operations on this seqno fence
+ *
+ * This function initializes a struct seqno_fence with passed parameters,
+ * and takes a reference on sync_buf which is released on fence destruction.
+ *
+ * A seqno_fence is a dma_fence which can complete in software when
+ * enable_signaling is called, but it also completes when
+ * (s32)((sync_buf)[seqno_ofs] - seqno) >= 0 is true
+ *
+ * The seqno_fence will take a refcount on the sync_buf until it's
+ * destroyed, but actual lifetime of sync_buf may be longer if one of the
+ * callers take a reference to it.
+ *
+ * Certain hardware have instructions to insert this type of wait condition
+ * in the command stream, so no intervention from software would be needed.
+ * This type of fence can be destroyed before completed, however a reference
+ * on the sync_buf dma-buf can be taken. It is encouraged to re-use the same
+ * dma-buf for sync_buf, since mapping or unmapping the sync_buf to the
+ * device's vm can be expensive.
+ *
+ * It is recommended for creators of seqno_fence to call fence_signal
+ * before destruction. This will prevent possible issues from wraparound at
+ * time of issue vs time of check, since users can check fence_is_signaled
+ * before submitting instructions for the hardware to wait on the fence.
+ * However, when ops.enable_signaling is not called, it doesn't have to be
+ * done as soon as possible, just before there's any real danger of seqno
+ * wraparound.
+ */
+static inline void
+seqno_fence_init(struct seqno_fence *fence, spinlock_t *lock,
+		 struct dma_buf *sync_buf,  uint32_t context,
+		 uint32_t seqno_ofs, uint32_t seqno,
+		 enum seqno_fence_condition cond,
+		 const struct fence_ops *ops)
+{
+	BUG_ON(!fence || !sync_buf || !ops);
+	BUG_ON(!ops->wait || !ops->enable_signaling ||
+	       !ops->get_driver_name || !ops->get_timeline_name);
+
+	/*
+	 * ops is used in fence_init for get_driver_name, so needs to be
+	 * initialized first
+	 */
+	fence->ops = ops;
+	fence_init(&fence->base, &seqno_fence_ops, lock, context, seqno);
+	get_dma_buf(sync_buf);
+	fence->sync_buf = sync_buf;
+	fence->seqno_ofs = seqno_ofs;
+	fence->condition = cond;
+}
+
+#endif /* __LINUX_SEQNO_FENCE_H */
diff --git a/include/trace/events/fence.h b/include/trace/events/fence.h
new file mode 100644
index 0000000..98feb1b
--- /dev/null
+++ b/include/trace/events/fence.h
@@ -0,0 +1,128 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM fence
+
+#if !defined(_TRACE_FENCE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_FENCE_H
+
+#include <linux/tracepoint.h>
+
+struct fence;
+
+TRACE_EVENT(fence_annotate_wait_on,
+
+	/* fence: the fence waiting on f1, f1: the fence to be waited on. */
+	TP_PROTO(struct fence *fence, struct fence *f1),
+
+	TP_ARGS(fence, f1),
+
+	TP_STRUCT__entry(
+		__string(driver, fence->ops->get_driver_name(fence))
+		__string(timeline, fence->ops->get_driver_name(fence))
+		__field(unsigned int, context)
+		__field(unsigned int, seqno)
+
+		__string(waiting_driver, f1->ops->get_driver_name(f1))
+		__string(waiting_timeline, f1->ops->get_timeline_name(f1))
+		__field(unsigned int, waiting_context)
+		__field(unsigned int, waiting_seqno)
+	),
+
+	TP_fast_assign(
+		__assign_str(driver, fence->ops->get_driver_name(fence))
+		__assign_str(timeline, fence->ops->get_timeline_name(fence))
+		__entry->context = fence->context;
+		__entry->seqno = fence->seqno;
+
+		__assign_str(waiting_driver, f1->ops->get_driver_name(f1))
+		__assign_str(waiting_timeline, f1->ops->get_timeline_name(f1))
+		__entry->waiting_context = f1->context;
+		__entry->waiting_seqno = f1->seqno;
+
+	),
+
+	TP_printk("driver=%s timeline=%s context=%u seqno=%u "	\
+		  "waits on driver=%s timeline=%s context=%u seqno=%u",
+		  __get_str(driver), __get_str(timeline), __entry->context,
+		  __entry->seqno,
+		  __get_str(waiting_driver), __get_str(waiting_timeline),
+		  __entry->waiting_context, __entry->waiting_seqno)
+);
+
+DECLARE_EVENT_CLASS(fence,
+
+	TP_PROTO(struct fence *fence),
+
+	TP_ARGS(fence),
+
+	TP_STRUCT__entry(
+		__string(driver, fence->ops->get_driver_name(fence))
+		__string(timeline, fence->ops->get_timeline_name(fence))
+		__field(unsigned int, context)
+		__field(unsigned int, seqno)
+	),
+
+	TP_fast_assign(
+		__assign_str(driver, fence->ops->get_driver_name(fence))
+		__assign_str(timeline, fence->ops->get_timeline_name(fence))
+		__entry->context = fence->context;
+		__entry->seqno = fence->seqno;
+	),
+
+	TP_printk("driver=%s timeline=%s context=%u seqno=%u",
+		  __get_str(driver), __get_str(timeline), __entry->context,
+		  __entry->seqno)
+);
+
+DEFINE_EVENT(fence, fence_emit,
+
+	TP_PROTO(struct fence *fence),
+
+	TP_ARGS(fence)
+);
+
+DEFINE_EVENT(fence, fence_init,
+
+	TP_PROTO(struct fence *fence),
+
+	TP_ARGS(fence)
+);
+
+DEFINE_EVENT(fence, fence_destroy,
+
+	TP_PROTO(struct fence *fence),
+
+	TP_ARGS(fence)
+);
+
+DEFINE_EVENT(fence, fence_enable_signal,
+
+	TP_PROTO(struct fence *fence),
+
+	TP_ARGS(fence)
+);
+
+DEFINE_EVENT(fence, fence_signaled,
+
+	TP_PROTO(struct fence *fence),
+
+	TP_ARGS(fence)
+);
+
+DEFINE_EVENT(fence, fence_wait_start,
+
+	TP_PROTO(struct fence *fence),
+
+	TP_ARGS(fence)
+);
+
+DEFINE_EVENT(fence, fence_wait_end,
+
+	TP_PROTO(struct fence *fence),
+
+	TP_ARGS(fence)
+);
+
+#endif /*  _TRACE_FENCE_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 7a638aa..213cd9f 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -1649,6 +1649,19 @@
 
 	  If unsure, say N.
 
+config TEST_FIRMWARE
+	tristate "Test firmware loading via userspace interface"
+	default n
+	depends on FW_LOADER
+	help
+	  This builds the "test_firmware" module that creates a userspace
+	  interface for testing firmware loading. This can be used to
+	  control the triggering of firmware loading without needing an
+	  actual firmware-using device. The contents can be rechecked by
+	  userspace.
+
+	  If unsure, say N.
+
 source "samples/Kconfig"
 
 source "lib/Kconfig.kgdb"
diff --git a/lib/Makefile b/lib/Makefile
index ba967a1..230b4b1 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -34,6 +34,7 @@
 obj-$(CONFIG_TEST_MODULE) += test_module.o
 obj-$(CONFIG_TEST_USER_COPY) += test_user_copy.o
 obj-$(CONFIG_TEST_BPF) += test_bpf.o
+obj-$(CONFIG_TEST_FIRMWARE) += test_firmware.o
 
 ifeq ($(CONFIG_DEBUG_KOBJECT),y)
 CFLAGS_kobject.o += -DDEBUG
diff --git a/lib/devres.c b/lib/devres.c
index f562bf6..6a4aee8 100644
--- a/lib/devres.c
+++ b/lib/devres.c
@@ -142,34 +142,6 @@
 }
 EXPORT_SYMBOL(devm_ioremap_resource);
 
-/**
- * devm_request_and_ioremap() - Check, request region, and ioremap resource
- * @dev: Generic device to handle the resource for
- * @res: resource to be handled
- *
- * Takes all necessary steps to ioremap a mem resource. Uses managed device, so
- * everything is undone on driver detach. Checks arguments, so you can feed
- * it the result from e.g. platform_get_resource() directly. Returns the
- * remapped pointer or NULL on error. Usage example:
- *
- *	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- *	base = devm_request_and_ioremap(&pdev->dev, res);
- *	if (!base)
- *		return -EADDRNOTAVAIL;
- */
-void __iomem *devm_request_and_ioremap(struct device *dev,
-				       struct resource *res)
-{
-	void __iomem *dest_ptr;
-
-	dest_ptr = devm_ioremap_resource(dev, res);
-	if (IS_ERR(dest_ptr))
-		return NULL;
-
-	return dest_ptr;
-}
-EXPORT_SYMBOL(devm_request_and_ioremap);
-
 #ifdef CONFIG_HAS_IOPORT_MAP
 /*
  * Generic iomap devres
diff --git a/lib/test_firmware.c b/lib/test_firmware.c
new file mode 100644
index 0000000..86374c1
--- /dev/null
+++ b/lib/test_firmware.c
@@ -0,0 +1,117 @@
+/*
+ * This module provides an interface to trigger and test firmware loading.
+ *
+ * It is designed to be used for basic evaluation of the firmware loading
+ * subsystem (for example when validating firmware verification). It lacks
+ * any extra dependencies, and will not normally be loaded by the system
+ * unless explicitly requested by name.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/printk.h>
+#include <linux/firmware.h>
+#include <linux/device.h>
+#include <linux/fs.h>
+#include <linux/miscdevice.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+
+static DEFINE_MUTEX(test_fw_mutex);
+static const struct firmware *test_firmware;
+
+static ssize_t test_fw_misc_read(struct file *f, char __user *buf,
+				 size_t size, loff_t *offset)
+{
+	ssize_t rc = 0;
+
+	mutex_lock(&test_fw_mutex);
+	if (test_firmware)
+		rc = simple_read_from_buffer(buf, size, offset,
+					     test_firmware->data,
+					     test_firmware->size);
+	mutex_unlock(&test_fw_mutex);
+	return rc;
+}
+
+static const struct file_operations test_fw_fops = {
+	.owner          = THIS_MODULE,
+	.read           = test_fw_misc_read,
+};
+
+static struct miscdevice test_fw_misc_device = {
+	.minor          = MISC_DYNAMIC_MINOR,
+	.name           = "test_firmware",
+	.fops           = &test_fw_fops,
+};
+
+static ssize_t trigger_request_store(struct device *dev,
+				     struct device_attribute *attr,
+				     const char *buf, size_t count)
+{
+	int rc;
+	char *name;
+
+	name = kzalloc(count + 1, GFP_KERNEL);
+	if (!name)
+		return -ENOSPC;
+	memcpy(name, buf, count);
+
+	pr_info("loading '%s'\n", name);
+
+	mutex_lock(&test_fw_mutex);
+	release_firmware(test_firmware);
+	test_firmware = NULL;
+	rc = request_firmware(&test_firmware, name, dev);
+	if (rc)
+		pr_info("load of '%s' failed: %d\n", name, rc);
+	pr_info("loaded: %zu\n", test_firmware ? test_firmware->size : 0);
+	mutex_unlock(&test_fw_mutex);
+
+	kfree(name);
+
+	return count;
+}
+static DEVICE_ATTR_WO(trigger_request);
+
+static int __init test_firmware_init(void)
+{
+	int rc;
+
+	rc = misc_register(&test_fw_misc_device);
+	if (rc) {
+		pr_err("could not register misc device: %d\n", rc);
+		return rc;
+	}
+	rc = device_create_file(test_fw_misc_device.this_device,
+				&dev_attr_trigger_request);
+	if (rc) {
+		pr_err("could not create sysfs interface: %d\n", rc);
+		goto dereg;
+	}
+
+	pr_warn("interface ready\n");
+
+	return 0;
+dereg:
+	misc_deregister(&test_fw_misc_device);
+	return rc;
+}
+
+module_init(test_firmware_init);
+
+static void __exit test_firmware_exit(void)
+{
+	release_firmware(test_firmware);
+	device_remove_file(test_fw_misc_device.this_device,
+			   &dev_attr_trigger_request);
+	misc_deregister(&test_fw_misc_device);
+	pr_warn("removed interface\n");
+}
+
+module_exit(test_firmware_exit);
+
+MODULE_AUTHOR("Kees Cook <keescook@chromium.org>");
+MODULE_LICENSE("GPL");
diff --git a/scripts/coccinelle/api/devm_ioremap_resource.cocci b/scripts/coccinelle/api/devm_ioremap_resource.cocci
deleted file mode 100644
index 495daa3..0000000
--- a/scripts/coccinelle/api/devm_ioremap_resource.cocci
+++ /dev/null
@@ -1,90 +0,0 @@
-virtual patch
-virtual report
-
-@depends on patch@
-expression base, dev, res;
-@@
-
--base = devm_request_and_ioremap(dev, res);
-+base = devm_ioremap_resource(dev, res);
- ...
- if (
--base == NULL
-+IS_ERR(base)
- || ...) {
-<...
--	return ...;
-+	return PTR_ERR(base);
-...>
- }
-
-@depends on patch@
-expression e, E, ret;
-identifier l;
-@@
-
- e = devm_ioremap_resource(...);
- ...
- if (IS_ERR(e) || ...) {
- 	... when any
--	ret = E;
-+	ret = PTR_ERR(e);
- 	...
-(
- 	return ret;
-|
- 	goto l;
-)
- }
-
-@depends on patch@
-expression e;
-@@
-
- e = devm_ioremap_resource(...);
- ...
- if (IS_ERR(e) || ...) {
- 	...
--	\(dev_dbg\|dev_err\|pr_debug\|pr_err\|DRM_ERROR\)(...);
- 	...
- }
-
-@depends on patch@
-expression e;
-identifier l;
-@@
-
- e = devm_ioremap_resource(...);
- ...
- if (IS_ERR(e) || ...)
--{
-(
- 	return ...;
-|
- 	goto l;
-)
--}
-
-@r depends on report@
-expression e;
-identifier l;
-position p1;
-@@
-
-*e = devm_request_and_ioremap@p1(...);
- ...
- if (e == NULL || ...) {
- 	...
-(
- 	return ...;
-|
- 	goto l;
-)
- }
-
-@script:python depends on r@
-p1 << r.p1;
-@@
-
-msg = "ERROR: deprecated devm_request_and_ioremap() API used on line %s" % (p1[0].line)
-coccilib.report.print_report(p1[0], msg)
diff --git a/tools/testing/selftests/Makefile b/tools/testing/selftests/Makefile
index e66e710..5c2bf8e 100644
--- a/tools/testing/selftests/Makefile
+++ b/tools/testing/selftests/Makefile
@@ -11,6 +11,7 @@
 TARGETS += powerpc
 TARGETS += user
 TARGETS += sysctl
+TARGETS += firmware
 
 all:
 	for TARGET in $(TARGETS); do \
diff --git a/tools/testing/selftests/firmware/Makefile b/tools/testing/selftests/firmware/Makefile
new file mode 100644
index 0000000..e23cce0
--- /dev/null
+++ b/tools/testing/selftests/firmware/Makefile
@@ -0,0 +1,27 @@
+# Makefile for firmware loading selftests
+
+# No binaries, but make sure arg-less "make" doesn't trigger "run_tests"
+all:
+
+fw_filesystem:
+	@if /bin/sh ./fw_filesystem.sh ; then \
+                echo "fw_filesystem: ok"; \
+        else \
+                echo "fw_filesystem: [FAIL]"; \
+                exit 1; \
+        fi
+
+fw_userhelper:
+	@if /bin/sh ./fw_userhelper.sh ; then \
+                echo "fw_userhelper: ok"; \
+        else \
+                echo "fw_userhelper: [FAIL]"; \
+                exit 1; \
+        fi
+
+run_tests: all fw_filesystem fw_userhelper
+
+# Nothing to clean up.
+clean:
+
+.PHONY: all clean run_tests fw_filesystem fw_userhelper
diff --git a/tools/testing/selftests/firmware/fw_filesystem.sh b/tools/testing/selftests/firmware/fw_filesystem.sh
new file mode 100644
index 0000000..3fc6c10
--- /dev/null
+++ b/tools/testing/selftests/firmware/fw_filesystem.sh
@@ -0,0 +1,62 @@
+#!/bin/sh
+# This validates that the kernel will load firmware out of its list of
+# firmware locations on disk. Since the user helper does similar work,
+# we reset the custom load directory to a location the user helper doesn't
+# know so we can be sure we're not accidentally testing the user helper.
+set -e
+
+modprobe test_firmware
+
+DIR=/sys/devices/virtual/misc/test_firmware
+
+OLD_TIMEOUT=$(cat /sys/class/firmware/timeout)
+OLD_FWPATH=$(cat /sys/module/firmware_class/parameters/path)
+
+FWPATH=$(mktemp -d)
+FW="$FWPATH/test-firmware.bin"
+
+test_finish()
+{
+	echo "$OLD_TIMEOUT" >/sys/class/firmware/timeout
+	echo -n "$OLD_PATH" >/sys/module/firmware_class/parameters/path
+	rm -f "$FW"
+	rmdir "$FWPATH"
+}
+
+trap "test_finish" EXIT
+
+# Turn down the timeout so failures don't take so long.
+echo 1 >/sys/class/firmware/timeout
+# Set the kernel search path.
+echo -n "$FWPATH" >/sys/module/firmware_class/parameters/path
+
+# This is an unlikely real-world firmware content. :)
+echo "ABCD0123" >"$FW"
+
+NAME=$(basename "$FW")
+
+# Request a firmware that doesn't exist, it should fail.
+echo -n "nope-$NAME" >"$DIR"/trigger_request
+if diff -q "$FW" /dev/test_firmware >/dev/null ; then
+	echo "$0: firmware was not expected to match" >&2
+	exit 1
+else
+	echo "$0: timeout works"
+fi
+
+# This should succeed via kernel load or will fail after 1 second after
+# being handed over to the user helper, which won't find the fw either.
+if ! echo -n "$NAME" >"$DIR"/trigger_request ; then
+	echo "$0: could not trigger request" >&2
+	exit 1
+fi
+
+# Verify the contents are what we expect.
+if ! diff -q "$FW" /dev/test_firmware >/dev/null ; then
+	echo "$0: firmware was not loaded" >&2
+	exit 1
+else
+	echo "$0: filesystem loading works"
+fi
+
+exit 0
diff --git a/tools/testing/selftests/firmware/fw_userhelper.sh b/tools/testing/selftests/firmware/fw_userhelper.sh
new file mode 100644
index 0000000..6efbade
--- /dev/null
+++ b/tools/testing/selftests/firmware/fw_userhelper.sh
@@ -0,0 +1,89 @@
+#!/bin/sh
+# This validates that the kernel will fall back to using the user helper
+# to load firmware it can't find on disk itself. We must request a firmware
+# that the kernel won't find, and any installed helper (e.g. udev) also
+# won't find so that we can do the load ourself manually.
+set -e
+
+modprobe test_firmware
+
+DIR=/sys/devices/virtual/misc/test_firmware
+
+OLD_TIMEOUT=$(cat /sys/class/firmware/timeout)
+
+FWPATH=$(mktemp -d)
+FW="$FWPATH/test-firmware.bin"
+
+test_finish()
+{
+	echo "$OLD_TIMEOUT" >/sys/class/firmware/timeout
+	rm -f "$FW"
+	rmdir "$FWPATH"
+}
+
+load_fw()
+{
+	local name="$1"
+	local file="$2"
+
+	# This will block until our load (below) has finished.
+	echo -n "$name" >"$DIR"/trigger_request &
+
+	# Give kernel a chance to react.
+	local timeout=10
+	while [ ! -e "$DIR"/"$name"/loading ]; do
+		sleep 0.1
+		timeout=$(( $timeout - 1 ))
+		if [ "$timeout" -eq 0 ]; then
+			echo "$0: firmware interface never appeared" >&2
+			exit 1
+		fi
+	done
+
+	echo 1 >"$DIR"/"$name"/loading
+	cat "$file" >"$DIR"/"$name"/data
+	echo 0 >"$DIR"/"$name"/loading
+
+	# Wait for request to finish.
+	wait
+}
+
+trap "test_finish" EXIT
+
+# This is an unlikely real-world firmware content. :)
+echo "ABCD0123" >"$FW"
+NAME=$(basename "$FW")
+
+# Test failure when doing nothing (timeout works).
+echo 1 >/sys/class/firmware/timeout
+echo -n "$NAME" >"$DIR"/trigger_request
+if diff -q "$FW" /dev/test_firmware >/dev/null ; then
+	echo "$0: firmware was not expected to match" >&2
+	exit 1
+else
+	echo "$0: timeout works"
+fi
+
+# Put timeout high enough for us to do work but not so long that failures
+# slow down this test too much.
+echo 4 >/sys/class/firmware/timeout
+
+# Load this script instead of the desired firmware.
+load_fw "$NAME" "$0"
+if diff -q "$FW" /dev/test_firmware >/dev/null ; then
+	echo "$0: firmware was not expected to match" >&2
+	exit 1
+else
+	echo "$0: firmware comparison works"
+fi
+
+# Do a proper load, which should work correctly.
+load_fw "$NAME" "$FW"
+if ! diff -q "$FW" /dev/test_firmware >/dev/null ; then
+	echo "$0: firmware was not loaded" >&2
+	exit 1
+else
+	echo "$0: user helper firmware loading works"
+fi
+
+exit 0