Merge tag 'fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rusty/linux

Pull virtio/lguest fixes from Rusty Russell:
 "Missing license tag and some fallout from the lguest pagetable rework"

* tag 'fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rusty/linux:
  lguest: clear cached last cpu when guest_set_pgd() called.
  Add missing module license tag to vring helpers.
diff --git a/Documentation/ABI/testing/sysfs-block-bcache b/Documentation/ABI/testing/sysfs-block-bcache
new file mode 100644
index 0000000..9e4bbc5
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-block-bcache
@@ -0,0 +1,156 @@
+What:		/sys/block/<disk>/bcache/unregister
+Date:		November 2010
+Contact:	Kent Overstreet <kent.overstreet@gmail.com>
+Description:
+		A write to this file causes the backing device or cache to be
+		unregistered. If a backing device had dirty data in the cache,
+		writeback mode is automatically disabled and all dirty data is
+		flushed before the device is unregistered. Caches unregister
+		all associated backing devices before unregistering themselves.
+
+What:		/sys/block/<disk>/bcache/clear_stats
+Date:		November 2010
+Contact:	Kent Overstreet <kent.overstreet@gmail.com>
+Description:
+		Writing to this file resets all the statistics for the device.
+
+What:		/sys/block/<disk>/bcache/cache
+Date:		November 2010
+Contact:	Kent Overstreet <kent.overstreet@gmail.com>
+Description:
+		For a backing device that has cache, a symlink to
+		the bcache/ dir of that cache.
+
+What:		/sys/block/<disk>/bcache/cache_hits
+Date:		November 2010
+Contact:	Kent Overstreet <kent.overstreet@gmail.com>
+Description:
+		For backing devices: integer number of full cache hits,
+		counted per bio. A partial cache hit counts as a miss.
+
+What:		/sys/block/<disk>/bcache/cache_misses
+Date:		November 2010
+Contact:	Kent Overstreet <kent.overstreet@gmail.com>
+Description:
+		For backing devices: integer number of cache misses.
+
+What:		/sys/block/<disk>/bcache/cache_hit_ratio
+Date:		November 2010
+Contact:	Kent Overstreet <kent.overstreet@gmail.com>
+Description:
+		For backing devices: cache hits as a percentage.
+
+What:		/sys/block/<disk>/bcache/sequential_cutoff
+Date:		November 2010
+Contact:	Kent Overstreet <kent.overstreet@gmail.com>
+Description:
+		For backing devices: Threshold past which sequential IO will
+		skip the cache. Read and written as bytes in human readable
+		units (i.e. echo 10M > sequntial_cutoff).
+
+What:		/sys/block/<disk>/bcache/bypassed
+Date:		November 2010
+Contact:	Kent Overstreet <kent.overstreet@gmail.com>
+Description:
+		Sum of all reads and writes that have bypassed the cache (due
+		to the sequential cutoff).  Expressed as bytes in human
+		readable units.
+
+What:		/sys/block/<disk>/bcache/writeback
+Date:		November 2010
+Contact:	Kent Overstreet <kent.overstreet@gmail.com>
+Description:
+		For backing devices: When on, writeback caching is enabled and
+		writes will be buffered in the cache. When off, caching is in
+		writethrough mode; reads and writes will be added to the
+		cache but no write buffering will take place.
+
+What:		/sys/block/<disk>/bcache/writeback_running
+Date:		November 2010
+Contact:	Kent Overstreet <kent.overstreet@gmail.com>
+Description:
+		For backing devices: when off, dirty data will not be written
+		from the cache to the backing device. The cache will still be
+		used to buffer writes until it is mostly full, at which point
+		writes transparently revert to writethrough mode. Intended only
+		for benchmarking/testing.
+
+What:		/sys/block/<disk>/bcache/writeback_delay
+Date:		November 2010
+Contact:	Kent Overstreet <kent.overstreet@gmail.com>
+Description:
+		For backing devices: In writeback mode, when dirty data is
+		written to the cache and the cache held no dirty data for that
+		backing device, writeback from cache to backing device starts
+		after this delay, expressed as an integer number of seconds.
+
+What:		/sys/block/<disk>/bcache/writeback_percent
+Date:		November 2010
+Contact:	Kent Overstreet <kent.overstreet@gmail.com>
+Description:
+		For backing devices: If nonzero, writeback from cache to
+		backing device only takes place when more than this percentage
+		of the cache is used, allowing more write coalescing to take
+		place and reducing total number of writes sent to the backing
+		device. Integer between 0 and 40.
+
+What:		/sys/block/<disk>/bcache/synchronous
+Date:		November 2010
+Contact:	Kent Overstreet <kent.overstreet@gmail.com>
+Description:
+		For a cache, a boolean that allows synchronous mode to be
+		switched on and off. In synchronous mode all writes are ordered
+		such that the cache can reliably recover from unclean shutdown;
+		if disabled bcache will not generally wait for writes to
+		complete but if the cache is not shut down cleanly all data
+		will be discarded from the cache. Should not be turned off with
+		writeback caching enabled.
+
+What:		/sys/block/<disk>/bcache/discard
+Date:		November 2010
+Contact:	Kent Overstreet <kent.overstreet@gmail.com>
+Description:
+		For a cache, a boolean allowing discard/TRIM to be turned off
+		or back on if the device supports it.
+
+What:		/sys/block/<disk>/bcache/bucket_size
+Date:		November 2010
+Contact:	Kent Overstreet <kent.overstreet@gmail.com>
+Description:
+		For a cache, bucket size in human readable units, as set at
+		cache creation time; should match the erase block size of the
+		SSD for optimal performance.
+
+What:		/sys/block/<disk>/bcache/nbuckets
+Date:		November 2010
+Contact:	Kent Overstreet <kent.overstreet@gmail.com>
+Description:
+		For a cache, the number of usable buckets.
+
+What:		/sys/block/<disk>/bcache/tree_depth
+Date:		November 2010
+Contact:	Kent Overstreet <kent.overstreet@gmail.com>
+Description:
+		For a cache, height of the btree excluding leaf nodes (i.e. a
+		one node tree will have a depth of 0).
+
+What:		/sys/block/<disk>/bcache/btree_cache_size
+Date:		November 2010
+Contact:	Kent Overstreet <kent.overstreet@gmail.com>
+Description:
+		Number of btree buckets/nodes that are currently cached in
+		memory; cache dynamically grows and shrinks in response to
+		memory pressure from the rest of the system.
+
+What:		/sys/block/<disk>/bcache/written
+Date:		November 2010
+Contact:	Kent Overstreet <kent.overstreet@gmail.com>
+Description:
+		For a cache, total amount of data in human readable units
+		written to the cache, excluding all metadata.
+
+What:		/sys/block/<disk>/bcache/btree_written
+Date:		November 2010
+Contact:	Kent Overstreet <kent.overstreet@gmail.com>
+Description:
+		For a cache, sum of all btree writes in human readable units.
diff --git a/Documentation/ABI/testing/sysfs-class-mtd b/Documentation/ABI/testing/sysfs-class-mtd
index 938ef71..3105644 100644
--- a/Documentation/ABI/testing/sysfs-class-mtd
+++ b/Documentation/ABI/testing/sysfs-class-mtd
@@ -14,8 +14,7 @@
 		The /sys/class/mtd/mtd{0,1,2,3,...} directories correspond
 		to each /dev/mtdX character device.  These may represent
 		physical/simulated flash devices, partitions on a flash
-		device, or concatenated flash devices.  They exist regardless
-		of whether CONFIG_MTD_CHAR is actually enabled.
+		device, or concatenated flash devices.
 
 What:		/sys/class/mtd/mtdXro/
 Date:		April 2009
@@ -23,8 +22,7 @@
 Contact:	linux-mtd@lists.infradead.org
 Description:
 		These directories provide the corresponding read-only device
-		nodes for /sys/class/mtd/mtdX/ .  They are only created
-		(for the benefit of udev) if CONFIG_MTD_CHAR is enabled.
+		nodes for /sys/class/mtd/mtdX/ .
 
 What:		/sys/class/mtd/mtdX/dev
 Date:		April 2009
diff --git a/Documentation/acpi/enumeration.txt b/Documentation/acpi/enumeration.txt
index b0d5410..d9be7a9 100644
--- a/Documentation/acpi/enumeration.txt
+++ b/Documentation/acpi/enumeration.txt
@@ -66,6 +66,83 @@
 drivers/acpi/acpi_platform.c. This limitation is only for the platform
 devices, SPI and I2C devices are created automatically as described below.
 
+DMA support
+~~~~~~~~~~~
+DMA controllers enumerated via ACPI should be registered in the system to
+provide generic access to their resources. For example, a driver that would
+like to be accessible to slave devices via generic API call
+dma_request_slave_channel() must register itself at the end of the probe
+function like this:
+
+	err = devm_acpi_dma_controller_register(dev, xlate_func, dw);
+	/* Handle the error if it's not a case of !CONFIG_ACPI */
+
+and implement custom xlate function if needed (usually acpi_dma_simple_xlate()
+is enough) which converts the FixedDMA resource provided by struct
+acpi_dma_spec into the corresponding DMA channel. A piece of code for that case
+could look like:
+
+	#ifdef CONFIG_ACPI
+	struct filter_args {
+		/* Provide necessary information for the filter_func */
+		...
+	};
+
+	static bool filter_func(struct dma_chan *chan, void *param)
+	{
+		/* Choose the proper channel */
+		...
+	}
+
+	static struct dma_chan *xlate_func(struct acpi_dma_spec *dma_spec,
+			struct acpi_dma *adma)
+	{
+		dma_cap_mask_t cap;
+		struct filter_args args;
+
+		/* Prepare arguments for filter_func */
+		...
+		return dma_request_channel(cap, filter_func, &args);
+	}
+	#else
+	static struct dma_chan *xlate_func(struct acpi_dma_spec *dma_spec,
+			struct acpi_dma *adma)
+	{
+		return NULL;
+	}
+	#endif
+
+dma_request_slave_channel() will call xlate_func() for each registered DMA
+controller. In the xlate function the proper channel must be chosen based on
+information in struct acpi_dma_spec and the properties of the controller
+provided by struct acpi_dma.
+
+Clients must call dma_request_slave_channel() with the string parameter that
+corresponds to a specific FixedDMA resource. By default "tx" means the first
+entry of the FixedDMA resource array, "rx" means the second entry. The table
+below shows a layout:
+
+	Device (I2C0)
+	{
+		...
+		Method (_CRS, 0, NotSerialized)
+		{
+			Name (DBUF, ResourceTemplate ()
+			{
+				FixedDMA (0x0018, 0x0004, Width32bit, _Y48)
+				FixedDMA (0x0019, 0x0005, Width32bit, )
+			})
+		...
+		}
+	}
+
+So, the FixedDMA with request line 0x0018 is "tx" and next one is "rx" in
+this example.
+
+In robust cases the client unfortunately needs to call
+acpi_dma_request_slave_chan_by_index() directly and therefore choose the
+specific FixedDMA resource by its index.
+
 SPI serial bus support
 ~~~~~~~~~~~~~~~~~~~~~~
 Slave devices behind SPI bus have SpiSerialBus resource attached to them.
diff --git a/Documentation/bcache.txt b/Documentation/bcache.txt
new file mode 100644
index 0000000..77db880
--- /dev/null
+++ b/Documentation/bcache.txt
@@ -0,0 +1,431 @@
+Say you've got a big slow raid 6, and an X-25E or three. Wouldn't it be
+nice if you could use them as cache... Hence bcache.
+
+Wiki and git repositories are at:
+  http://bcache.evilpiepirate.org
+  http://evilpiepirate.org/git/linux-bcache.git
+  http://evilpiepirate.org/git/bcache-tools.git
+
+It's designed around the performance characteristics of SSDs - it only allocates
+in erase block sized buckets, and it uses a hybrid btree/log to track cached
+extants (which can be anywhere from a single sector to the bucket size). It's
+designed to avoid random writes at all costs; it fills up an erase block
+sequentially, then issues a discard before reusing it.
+
+Both writethrough and writeback caching are supported. Writeback defaults to
+off, but can be switched on and off arbitrarily at runtime. Bcache goes to
+great lengths to protect your data - it reliably handles unclean shutdown. (It
+doesn't even have a notion of a clean shutdown; bcache simply doesn't return
+writes as completed until they're on stable storage).
+
+Writeback caching can use most of the cache for buffering writes - writing
+dirty data to the backing device is always done sequentially, scanning from the
+start to the end of the index.
+
+Since random IO is what SSDs excel at, there generally won't be much benefit
+to caching large sequential IO. Bcache detects sequential IO and skips it;
+it also keeps a rolling average of the IO sizes per task, and as long as the
+average is above the cutoff it will skip all IO from that task - instead of
+caching the first 512k after every seek. Backups and large file copies should
+thus entirely bypass the cache.
+
+In the event of a data IO error on the flash it will try to recover by reading
+from disk or invalidating cache entries.  For unrecoverable errors (meta data
+or dirty data), caching is automatically disabled; if dirty data was present
+in the cache it first disables writeback caching and waits for all dirty data
+to be flushed.
+
+Getting started:
+You'll need make-bcache from the bcache-tools repository. Both the cache device
+and backing device must be formatted before use.
+  make-bcache -B /dev/sdb
+  make-bcache -C /dev/sdc
+
+make-bcache has the ability to format multiple devices at the same time - if
+you format your backing devices and cache device at the same time, you won't
+have to manually attach:
+  make-bcache -B /dev/sda /dev/sdb -C /dev/sdc
+
+To make bcache devices known to the kernel, echo them to /sys/fs/bcache/register:
+
+  echo /dev/sdb > /sys/fs/bcache/register
+  echo /dev/sdc > /sys/fs/bcache/register
+
+To register your bcache devices automatically, you could add something like
+this to an init script:
+
+  echo /dev/sd* > /sys/fs/bcache/register_quiet
+
+It'll look for bcache superblocks and ignore everything that doesn't have one.
+
+Registering the backing device makes the bcache show up in /dev; you can now
+format it and use it as normal. But the first time using a new bcache device,
+it'll be running in passthrough mode until you attach it to a cache. See the
+section on attaching.
+
+The devices show up at /dev/bcacheN, and can be controlled via sysfs from
+/sys/block/bcacheN/bcache:
+
+  mkfs.ext4 /dev/bcache0
+  mount /dev/bcache0 /mnt
+
+Cache devices are managed as sets; multiple caches per set isn't supported yet
+but will allow for mirroring of metadata and dirty data in the future. Your new
+cache set shows up as /sys/fs/bcache/<UUID>
+
+ATTACHING:
+
+After your cache device and backing device are registered, the backing device
+must be attached to your cache set to enable caching. Attaching a backing
+device to a cache set is done thusly, with the UUID of the cache set in
+/sys/fs/bcache:
+
+  echo <UUID> > /sys/block/bcache0/bcache/attach
+
+This only has to be done once. The next time you reboot, just reregister all
+your bcache devices. If a backing device has data in a cache somewhere, the
+/dev/bcache# device won't be created until the cache shows up - particularly
+important if you have writeback caching turned on.
+
+If you're booting up and your cache device is gone and never coming back, you
+can force run the backing device:
+
+  echo 1 > /sys/block/sdb/bcache/running
+
+(You need to use /sys/block/sdb (or whatever your backing device is called), not
+/sys/block/bcache0, because bcache0 doesn't exist yet. If you're using a
+partition, the bcache directory would be at /sys/block/sdb/sdb2/bcache)
+
+The backing device will still use that cache set if it shows up in the future,
+but all the cached data will be invalidated. If there was dirty data in the
+cache, don't expect the filesystem to be recoverable - you will have massive
+filesystem corruption, though ext4's fsck does work miracles.
+
+ERROR HANDLING:
+
+Bcache tries to transparently handle IO errors to/from the cache device without
+affecting normal operation; if it sees too many errors (the threshold is
+configurable, and defaults to 0) it shuts down the cache device and switches all
+the backing devices to passthrough mode.
+
+ - For reads from the cache, if they error we just retry the read from the
+   backing device.
+
+ - For writethrough writes, if the write to the cache errors we just switch to
+   invalidating the data at that lba in the cache (i.e. the same thing we do for
+   a write that bypasses the cache)
+
+ - For writeback writes, we currently pass that error back up to the
+   filesystem/userspace. This could be improved - we could retry it as a write
+   that skips the cache so we don't have to error the write.
+
+ - When we detach, we first try to flush any dirty data (if we were running in
+   writeback mode). It currently doesn't do anything intelligent if it fails to
+   read some of the dirty data, though.
+
+TROUBLESHOOTING PERFORMANCE:
+
+Bcache has a bunch of config options and tunables. The defaults are intended to
+be reasonable for typical desktop and server workloads, but they're not what you
+want for getting the best possible numbers when benchmarking.
+
+ - Bad write performance
+
+   If write performance is not what you expected, you probably wanted to be
+   running in writeback mode, which isn't the default (not due to a lack of
+   maturity, but simply because in writeback mode you'll lose data if something
+   happens to your SSD)
+
+   # echo writeback > /sys/block/bcache0/cache_mode
+
+ - Bad performance, or traffic not going to the SSD that you'd expect
+
+   By default, bcache doesn't cache everything. It tries to skip sequential IO -
+   because you really want to be caching the random IO, and if you copy a 10
+   gigabyte file you probably don't want that pushing 10 gigabytes of randomly
+   accessed data out of your cache.
+
+   But if you want to benchmark reads from cache, and you start out with fio
+   writing an 8 gigabyte test file - so you want to disable that.
+
+   # echo 0 > /sys/block/bcache0/bcache/sequential_cutoff
+
+   To set it back to the default (4 mb), do
+
+   # echo 4M > /sys/block/bcache0/bcache/sequential_cutoff
+
+ - Traffic's still going to the spindle/still getting cache misses
+
+   In the real world, SSDs don't always keep up with disks - particularly with
+   slower SSDs, many disks being cached by one SSD, or mostly sequential IO. So
+   you want to avoid being bottlenecked by the SSD and having it slow everything
+   down.
+
+   To avoid that bcache tracks latency to the cache device, and gradually
+   throttles traffic if the latency exceeds a threshold (it does this by
+   cranking down the sequential bypass).
+
+   You can disable this if you need to by setting the thresholds to 0:
+
+   # echo 0 > /sys/fs/bcache/<cache set>/congested_read_threshold_us
+   # echo 0 > /sys/fs/bcache/<cache set>/congested_write_threshold_us
+
+   The default is 2000 us (2 milliseconds) for reads, and 20000 for writes.
+
+ - Still getting cache misses, of the same data
+
+   One last issue that sometimes trips people up is actually an old bug, due to
+   the way cache coherency is handled for cache misses. If a btree node is full,
+   a cache miss won't be able to insert a key for the new data and the data
+   won't be written to the cache.
+
+   In practice this isn't an issue because as soon as a write comes along it'll
+   cause the btree node to be split, and you need almost no write traffic for
+   this to not show up enough to be noticable (especially since bcache's btree
+   nodes are huge and index large regions of the device). But when you're
+   benchmarking, if you're trying to warm the cache by reading a bunch of data
+   and there's no other traffic - that can be a problem.
+
+   Solution: warm the cache by doing writes, or use the testing branch (there's
+   a fix for the issue there).
+
+SYSFS - BACKING DEVICE:
+
+attach
+  Echo the UUID of a cache set to this file to enable caching.
+
+cache_mode
+  Can be one of either writethrough, writeback, writearound or none.
+
+clear_stats
+  Writing to this file resets the running total stats (not the day/hour/5 minute
+  decaying versions).
+
+detach
+  Write to this file to detach from a cache set. If there is dirty data in the
+  cache, it will be flushed first.
+
+dirty_data
+  Amount of dirty data for this backing device in the cache. Continuously
+  updated unlike the cache set's version, but may be slightly off.
+
+label
+  Name of underlying device.
+
+readahead
+  Size of readahead that should be performed.  Defaults to 0.  If set to e.g.
+  1M, it will round cache miss reads up to that size, but without overlapping
+  existing cache entries.
+
+running
+  1 if bcache is running (i.e. whether the /dev/bcache device exists, whether
+  it's in passthrough mode or caching).
+
+sequential_cutoff
+  A sequential IO will bypass the cache once it passes this threshhold; the
+  most recent 128 IOs are tracked so sequential IO can be detected even when
+  it isn't all done at once.
+
+sequential_merge
+  If non zero, bcache keeps a list of the last 128 requests submitted to compare
+  against all new requests to determine which new requests are sequential
+  continuations of previous requests for the purpose of determining sequential
+  cutoff. This is necessary if the sequential cutoff value is greater than the
+  maximum acceptable sequential size for any single request. 
+
+state
+  The backing device can be in one of four different states:
+
+  no cache: Has never been attached to a cache set.
+
+  clean: Part of a cache set, and there is no cached dirty data.
+
+  dirty: Part of a cache set, and there is cached dirty data.
+
+  inconsistent: The backing device was forcibly run by the user when there was
+  dirty data cached but the cache set was unavailable; whatever data was on the
+  backing device has likely been corrupted.
+
+stop
+  Write to this file to shut down the bcache device and close the backing
+  device.
+
+writeback_delay
+  When dirty data is written to the cache and it previously did not contain
+  any, waits some number of seconds before initiating writeback. Defaults to
+  30.
+
+writeback_percent
+  If nonzero, bcache tries to keep around this percentage of the cache dirty by
+  throttling background writeback and using a PD controller to smoothly adjust
+  the rate.
+
+writeback_rate
+  Rate in sectors per second - if writeback_percent is nonzero, background
+  writeback is throttled to this rate. Continuously adjusted by bcache but may
+  also be set by the user.
+
+writeback_running
+  If off, writeback of dirty data will not take place at all. Dirty data will
+  still be added to the cache until it is mostly full; only meant for
+  benchmarking. Defaults to on.
+
+SYSFS - BACKING DEVICE STATS:
+
+There are directories with these numbers for a running total, as well as
+versions that decay over the past day, hour and 5 minutes; they're also
+aggregated in the cache set directory as well.
+
+bypassed
+  Amount of IO (both reads and writes) that has bypassed the cache
+
+cache_hits
+cache_misses
+cache_hit_ratio
+  Hits and misses are counted per individual IO as bcache sees them; a
+  partial hit is counted as a miss.
+
+cache_bypass_hits
+cache_bypass_misses
+  Hits and misses for IO that is intended to skip the cache are still counted,
+  but broken out here.
+
+cache_miss_collisions
+  Counts instances where data was going to be inserted into the cache from a
+  cache miss, but raced with a write and data was already present (usually 0
+  since the synchronization for cache misses was rewritten)
+
+cache_readaheads
+  Count of times readahead occured.
+
+SYSFS - CACHE SET:
+
+average_key_size
+  Average data per key in the btree.
+
+bdev<0..n>
+  Symlink to each of the attached backing devices.
+
+block_size
+  Block size of the cache devices.
+
+btree_cache_size
+  Amount of memory currently used by the btree cache
+
+bucket_size
+  Size of buckets
+
+cache<0..n>
+  Symlink to each of the cache devices comprising this cache set. 
+
+cache_available_percent
+  Percentage of cache device free.
+
+clear_stats
+  Clears the statistics associated with this cache
+
+dirty_data
+  Amount of dirty data is in the cache (updated when garbage collection runs).
+
+flash_vol_create
+  Echoing a size to this file (in human readable units, k/M/G) creates a thinly
+  provisioned volume backed by the cache set.
+
+io_error_halflife
+io_error_limit
+  These determines how many errors we accept before disabling the cache.
+  Each error is decayed by the half life (in # ios).  If the decaying count
+  reaches io_error_limit dirty data is written out and the cache is disabled.
+
+journal_delay_ms
+  Journal writes will delay for up to this many milliseconds, unless a cache
+  flush happens sooner. Defaults to 100.
+
+root_usage_percent
+  Percentage of the root btree node in use.  If this gets too high the node
+  will split, increasing the tree depth.
+
+stop
+  Write to this file to shut down the cache set - waits until all attached
+  backing devices have been shut down.
+
+tree_depth
+  Depth of the btree (A single node btree has depth 0).
+
+unregister
+  Detaches all backing devices and closes the cache devices; if dirty data is
+  present it will disable writeback caching and wait for it to be flushed.
+
+SYSFS - CACHE SET INTERNAL:
+
+This directory also exposes timings for a number of internal operations, with
+separate files for average duration, average frequency, last occurence and max
+duration: garbage collection, btree read, btree node sorts and btree splits.
+
+active_journal_entries
+  Number of journal entries that are newer than the index.
+
+btree_nodes
+  Total nodes in the btree.
+
+btree_used_percent
+  Average fraction of btree in use.
+
+bset_tree_stats
+  Statistics about the auxiliary search trees
+
+btree_cache_max_chain
+  Longest chain in the btree node cache's hash table
+
+cache_read_races
+  Counts instances where while data was being read from the cache, the bucket
+  was reused and invalidated - i.e. where the pointer was stale after the read
+  completed. When this occurs the data is reread from the backing device.
+
+trigger_gc
+  Writing to this file forces garbage collection to run.
+
+SYSFS - CACHE DEVICE:
+
+block_size
+  Minimum granularity of writes - should match hardware sector size.
+
+btree_written
+  Sum of all btree writes, in (kilo/mega/giga) bytes
+
+bucket_size
+  Size of buckets
+
+cache_replacement_policy
+  One of either lru, fifo or random.
+
+discard
+  Boolean; if on a discard/TRIM will be issued to each bucket before it is
+  reused. Defaults to off, since SATA TRIM is an unqueued command (and thus
+  slow).
+
+freelist_percent
+  Size of the freelist as a percentage of nbuckets. Can be written to to
+  increase the number of buckets kept on the freelist, which lets you
+  artificially reduce the size of the cache at runtime. Mostly for testing
+  purposes (i.e. testing how different size caches affect your hit rate), but
+  since buckets are discarded when they move on to the freelist will also make
+  the SSD's garbage collection easier by effectively giving it more reserved
+  space.
+
+io_errors
+  Number of errors that have occured, decayed by io_error_halflife.
+
+metadata_written
+  Sum of all non data writes (btree writes and all other metadata).
+
+nbuckets
+  Total buckets in this cache
+
+priority_stats
+  Statistics about how recently data in the cache has been accessed.  This can
+  reveal your working set size.
+
+written
+  Sum of all data that has been written to the cache; comparison with
+  btree_written gives the amount of write inflation in bcache.
diff --git a/Documentation/block/cfq-iosched.txt b/Documentation/block/cfq-iosched.txt
index a5eb7d1..9887f04 100644
--- a/Documentation/block/cfq-iosched.txt
+++ b/Documentation/block/cfq-iosched.txt
@@ -5,7 +5,7 @@
 I/O bandwidth for all the processes which requests an I/O operation.
 
 CFQ maintains the per process queue for the processes which request I/O
-operation(syncronous requests). In case of asynchronous requests, all the
+operation(synchronous requests). In case of asynchronous requests, all the
 requests from all the processes are batched together according to their
 process's I/O priority.
 
@@ -66,6 +66,47 @@
 value of this is 124ms. In case to favor synchronous requests over asynchronous
 one, this value should be decreased relative to fifo_expire_async.
 
+group_idle
+-----------
+This parameter forces idling at the CFQ group level instead of CFQ
+queue level. This was introduced after after a bottleneck was observed
+in higher end storage due to idle on sequential queue and allow dispatch
+from a single queue. The idea with this parameter is that it can be run with
+slice_idle=0 and group_idle=8, so that idling does not happen on individual
+queues in the group but happens overall on the group and thus still keeps the
+IO controller working.
+Not idling on individual queues in the group will dispatch requests from
+multiple queues in the group at the same time and achieve higher throughput
+on higher end storage.
+
+Default value for this parameter is 8ms.
+
+latency
+-------
+This parameter is used to enable/disable the latency mode of the CFQ
+scheduler. If latency mode (called low_latency) is enabled, CFQ tries
+to recompute the slice time for each process based on the target_latency set
+for the system. This favors fairness over throughput. Disabling low
+latency (setting it to 0) ignores target latency, allowing each process in the
+system to get a full time slice.
+
+By default low latency mode is enabled.
+
+target_latency
+--------------
+This parameter is used to calculate the time slice for a process if cfq's
+latency mode is enabled. It will ensure that sync requests have an estimated
+latency. But if sequential workload is higher(e.g. sequential read),
+then to meet the latency constraints, throughput may decrease because of less
+time for each process to issue I/O request before the cfq queue is switched.
+
+Though this can be overcome by disabling the latency_mode, it may increase
+the read latency for some applications. This parameter allows for changing
+target_latency through the sysfs interface which can provide the balanced
+throughput and read latency.
+
+Default value for target_latency is 300ms.
+
 slice_async
 -----------
 This parameter is same as of slice_sync but for asynchronous queue. The
@@ -98,8 +139,8 @@
 request.
 
 In case of storage with several disk, this setting can limit the parallel
-processing of request. Therefore, increasing the value can imporve the
-performace although this can cause the latency of some I/O to increase due
+processing of request. Therefore, increasing the value can improve the
+performance although this can cause the latency of some I/O to increase due
 to more number of requests.
 
 CFQ Group scheduling
diff --git a/Documentation/cgroups/memory.txt b/Documentation/cgroups/memory.txt
index 09027a9..ddf4f93 100644
--- a/Documentation/cgroups/memory.txt
+++ b/Documentation/cgroups/memory.txt
@@ -480,7 +480,9 @@
 
 # per-memory cgroup local status
 cache		- # of bytes of page cache memory.
-rss		- # of bytes of anonymous and swap cache memory.
+rss		- # of bytes of anonymous and swap cache memory (includes
+		transparent hugepages).
+rss_huge	- # of bytes of anonymous transparent hugepages.
 mapped_file	- # of bytes of mapped file (includes tmpfs/shmem)
 pgpgin		- # of charging events to the memory cgroup. The charging
 		event happens each time a page is accounted as either mapped
diff --git a/Documentation/devicetree/bindings/dma/atmel-dma.txt b/Documentation/devicetree/bindings/dma/atmel-dma.txt
index 3c046ee..c80e8a3 100644
--- a/Documentation/devicetree/bindings/dma/atmel-dma.txt
+++ b/Documentation/devicetree/bindings/dma/atmel-dma.txt
@@ -1,14 +1,39 @@
 * Atmel Direct Memory Access Controller (DMA)
 
 Required properties:
-- compatible: Should be "atmel,<chip>-dma"
-- reg: Should contain DMA registers location and length
-- interrupts: Should contain DMA interrupt
+- compatible: Should be "atmel,<chip>-dma".
+- reg: Should contain DMA registers location and length.
+- interrupts: Should contain DMA interrupt.
+- #dma-cells: Must be <2>, used to represent the number of integer cells in
+the dmas property of client devices.
 
-Examples:
+Example:
 
-dma@ffffec00 {
+dma0: dma@ffffec00 {
 	compatible = "atmel,at91sam9g45-dma";
 	reg = <0xffffec00 0x200>;
 	interrupts = <21>;
+	#dma-cells = <2>;
+};
+
+DMA clients connected to the Atmel DMA controller must use the format
+described in the dma.txt file, using a three-cell specifier for each channel:
+a phandle plus two interger cells.
+The three cells in order are:
+
+1. A phandle pointing to the DMA controller.
+2. The memory interface (16 most significant bits), the peripheral interface
+(16 less significant bits).
+3. The peripheral identifier for the hardware handshaking interface. The
+identifier can be different for tx and rx.
+
+Example:
+
+i2c0@i2c@f8010000 {
+	compatible = "atmel,at91sam9x5-i2c";
+	reg = <0xf8010000 0x100>;
+	interrupts = <9 4 6>;
+	dmas = <&dma0 1 7>,
+	       <&dma0 1 8>;
+	dma-names = "tx", "rx";
 };
diff --git a/Documentation/devicetree/bindings/mips/ralink.txt b/Documentation/devicetree/bindings/mips/ralink.txt
new file mode 100644
index 0000000..b35a8d0
--- /dev/null
+++ b/Documentation/devicetree/bindings/mips/ralink.txt
@@ -0,0 +1,17 @@
+Ralink MIPS SoC device tree bindings
+
+1. SoCs
+
+Each device tree must specify a compatible value for the Ralink SoC
+it uses in the compatible property of the root node. The compatible
+value must be one of the following values:
+
+  ralink,rt2880-soc
+  ralink,rt3050-soc
+  ralink,rt3052-soc
+  ralink,rt3350-soc
+  ralink,rt3352-soc
+  ralink,rt3883-soc
+  ralink,rt5350-soc
+  ralink,mt7620a-soc
+  ralink,mt7620n-soc
diff --git a/Documentation/devicetree/bindings/mtd/gpmc-nand.txt b/Documentation/devicetree/bindings/mtd/gpmc-nand.txt
index e7f8d7e..6a983c1 100644
--- a/Documentation/devicetree/bindings/mtd/gpmc-nand.txt
+++ b/Documentation/devicetree/bindings/mtd/gpmc-nand.txt
@@ -56,20 +56,20 @@
 			nand-bus-width = <16>;
 			ti,nand-ecc-opt = "bch8";
 
-			gpmc,sync-clk = <0>;
-			gpmc,cs-on = <0>;
-			gpmc,cs-rd-off = <44>;
-			gpmc,cs-wr-off = <44>;
-			gpmc,adv-on = <6>;
-			gpmc,adv-rd-off = <34>;
-			gpmc,adv-wr-off = <44>;
-			gpmc,we-off = <40>;
-			gpmc,oe-off = <54>;
-			gpmc,access = <64>;
-			gpmc,rd-cycle = <82>;
-			gpmc,wr-cycle = <82>;
-			gpmc,wr-access = <40>;
-			gpmc,wr-data-mux-bus = <0>;
+			gpmc,sync-clk-ps = <0>;
+			gpmc,cs-on-ns = <0>;
+			gpmc,cs-rd-off-ns = <44>;
+			gpmc,cs-wr-off-ns = <44>;
+			gpmc,adv-on-ns = <6>;
+			gpmc,adv-rd-off-ns = <34>;
+			gpmc,adv-wr-off-ns = <44>;
+			gpmc,we-off-ns = <40>;
+			gpmc,oe-off-ns = <54>;
+			gpmc,access-ns = <64>;
+			gpmc,rd-cycle-ns = <82>;
+			gpmc,wr-cycle-ns = <82>;
+			gpmc,wr-access-ns = <40>;
+			gpmc,wr-data-mux-bus-ns = <0>;
 
 			#address-cells = <1>;
 			#size-cells = <1>;
diff --git a/Documentation/devicetree/bindings/mtd/partition.txt b/Documentation/devicetree/bindings/mtd/partition.txt
index 6e1f61f1..9315ac9 100644
--- a/Documentation/devicetree/bindings/mtd/partition.txt
+++ b/Documentation/devicetree/bindings/mtd/partition.txt
@@ -5,8 +5,12 @@
 used for what purposes, but which don't use an on-flash partition table such
 as RedBoot.
 
-#address-cells & #size-cells must both be present in the mtd device and be
-equal to 1.
+#address-cells & #size-cells must both be present in the mtd device. There are
+two valid values for both:
+<1>: for partitions that require a single 32-bit cell to represent their
+     size/address (aka the value is below 4 GiB)
+<2>: for partitions that require two 32-bit cells to represent their
+     size/address (aka the value is 4 GiB or greater).
 
 Required properties:
 - reg : The partition's offset and size within the mtd bank.
@@ -36,3 +40,31 @@
 		reg = <0x0100000 0x200000>;
 	};
 };
+
+flash@1 {
+	#address-cells = <1>;
+	#size-cells = <2>;
+
+	/* a 4 GiB partition */
+	partition@0 {
+		label = "filesystem";
+		reg = <0x00000000 0x1 0x00000000>;
+	};
+};
+
+flash@2 {
+	#address-cells = <2>;
+	#size-cells = <2>;
+
+	/* an 8 GiB partition */
+	partition@0 {
+		label = "filesystem #1";
+		reg = <0x0 0x00000000 0x2 0x00000000>;
+	};
+
+	/* a 4 GiB partition */
+	partition@200000000 {
+		label = "filesystem #2";
+		reg = <0x2 0x00000000 0x1 0x00000000>;
+	};
+};
diff --git a/Documentation/devicetree/bindings/net/gpmc-eth.txt b/Documentation/devicetree/bindings/net/gpmc-eth.txt
index 24cb4e4..ace4a64 100644
--- a/Documentation/devicetree/bindings/net/gpmc-eth.txt
+++ b/Documentation/devicetree/bindings/net/gpmc-eth.txt
@@ -26,16 +26,16 @@
 - bank-width: 		Address width of the device in bytes. GPMC supports 8-bit
 			and 16-bit devices and so must be either 1 or 2 bytes.
 - compatible:		Compatible string property for the ethernet child device.
-- gpmc,cs-on:		Chip-select assertion time
-- gpmc,cs-rd-off:	Chip-select de-assertion time for reads
-- gpmc,cs-wr-off:	Chip-select de-assertion time for writes
-- gpmc,oe-on:		Output-enable assertion time
-- gpmc,oe-off		Output-enable de-assertion time
-- gpmc,we-on:		Write-enable assertion time
-- gpmc,we-off:		Write-enable de-assertion time
-- gpmc,access:		Start cycle to first data capture (read access)
-- gpmc,rd-cycle:	Total read cycle time
-- gpmc,wr-cycle:	Total write cycle time
+- gpmc,cs-on-ns:	Chip-select assertion time
+- gpmc,cs-rd-off-ns:	Chip-select de-assertion time for reads
+- gpmc,cs-wr-off-ns:	Chip-select de-assertion time for writes
+- gpmc,oe-on-ns:	Output-enable assertion time
+- gpmc,oe-off-ns:	Output-enable de-assertion time
+- gpmc,we-on-ns:	Write-enable assertion time
+- gpmc,we-off-ns:	Write-enable de-assertion time
+- gpmc,access-ns:	Start cycle to first data capture (read access)
+- gpmc,rd-cycle-ns:	Total read cycle time
+- gpmc,wr-cycle-ns:	Total write cycle time
 - reg:			Chip-select, base address (relative to chip-select)
 			and size of the memory mapped for the device.
 			Note that base address will be typically 0 as this
@@ -65,24 +65,24 @@
 		bank-width = <2>;
 
 		gpmc,mux-add-data;
-		gpmc,cs-on = <0>;
-		gpmc,cs-rd-off = <186>;
-		gpmc,cs-wr-off = <186>;
-		gpmc,adv-on = <12>;
-		gpmc,adv-rd-off = <48>;
-		gpmc,adv-wr-off = <48>;
-		gpmc,oe-on = <54>;
-		gpmc,oe-off = <168>;
-		gpmc,we-on = <54>;
-		gpmc,we-off = <168>;
-		gpmc,rd-cycle = <186>;
-		gpmc,wr-cycle = <186>;
-		gpmc,access = <114>;
-		gpmc,page-burst-access = <6>;
-		gpmc,bus-turnaround = <12>;
-		gpmc,cycle2cycle-delay = <18>;
-		gpmc,wr-data-mux-bus = <90>;
-		gpmc,wr-access = <186>;
+		gpmc,cs-on-ns = <0>;
+		gpmc,cs-rd-off-ns = <186>;
+		gpmc,cs-wr-off-ns = <186>;
+		gpmc,adv-on-ns = <12>;
+		gpmc,adv-rd-off-ns = <48>;
+		gpmc,adv-wr-off-ns = <48>;
+		gpmc,oe-on-ns = <54>;
+		gpmc,oe-off-ns = <168>;
+		gpmc,we-on-ns = <54>;
+		gpmc,we-off-ns = <168>;
+		gpmc,rd-cycle-ns = <186>;
+		gpmc,wr-cycle-ns = <186>;
+		gpmc,access-ns = <114>;
+		gpmc,page-burst-access-ns = <6>;
+		gpmc,bus-turnaround-ns = <12>;
+		gpmc,cycle2cycle-delay-ns = <18>;
+		gpmc,wr-data-mux-bus-ns = <90>;
+		gpmc,wr-access-ns = <186>;
 		gpmc,cycle2cycle-samecsen;
 		gpmc,cycle2cycle-diffcsen;
 
diff --git a/Documentation/devicetree/bindings/thermal/armada-thermal.txt b/Documentation/devicetree/bindings/thermal/armada-thermal.txt
new file mode 100644
index 0000000..fff93d5
--- /dev/null
+++ b/Documentation/devicetree/bindings/thermal/armada-thermal.txt
@@ -0,0 +1,22 @@
+* Marvell Armada 370/XP thermal management
+
+Required properties:
+
+- compatible:	Should be set to one of the following:
+		marvell,armada370-thermal
+		marvell,armadaxp-thermal
+
+- reg:		Device's register space.
+		Two entries are expected, see the examples below.
+		The first one is required for the sensor register;
+		the second one is required for the control register
+		to be used for sensor initialization (a.k.a. calibration).
+
+Example:
+
+	thermal@d0018300 {
+		compatible = "marvell,armada370-thermal";
+                reg = <0xd0018300 0x4
+		       0xd0018304 0x4>;
+		status = "okay";
+	};
diff --git a/Documentation/devicetree/bindings/vendor-prefixes.txt b/Documentation/devicetree/bindings/vendor-prefixes.txt
index 4d1919b..6931c43 100644
--- a/Documentation/devicetree/bindings/vendor-prefixes.txt
+++ b/Documentation/devicetree/bindings/vendor-prefixes.txt
@@ -42,6 +42,7 @@
 picochip	Picochip Ltd
 powervr	PowerVR (deprecated, use img)
 qcom	Qualcomm, Inc.
+ralink	Mediatek/Ralink Technology Corp.
 ramtron	Ramtron International
 realtek Realtek Semiconductor Corp.
 renesas	Renesas Electronics Corporation
diff --git a/Documentation/dmatest.txt b/Documentation/dmatest.txt
new file mode 100644
index 0000000..279ac0a
--- /dev/null
+++ b/Documentation/dmatest.txt
@@ -0,0 +1,81 @@
+				DMA Test Guide
+				==============
+
+		Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+
+This small document introduces how to test DMA drivers using dmatest module.
+
+	Part 1 - How to build the test module
+
+The menuconfig contains an option that could be found by following path:
+	Device Drivers -> DMA Engine support -> DMA Test client
+
+In the configuration file the option called CONFIG_DMATEST. The dmatest could
+be built as module or inside kernel. Let's consider those cases.
+
+	Part 2 - When dmatest is built as a module...
+
+After mounting debugfs and loading the module, the /sys/kernel/debug/dmatest
+folder with nodes will be created. They are the same as module parameters with
+addition of the 'run' node that controls run and stop phases of the test.
+
+Note that in this case test will not run on load automatically.
+
+Example of usage:
+	% echo dma0chan0 > /sys/kernel/debug/dmatest/channel
+	% echo 2000 > /sys/kernel/debug/dmatest/timeout
+	% echo 1 > /sys/kernel/debug/dmatest/iterations
+	% echo 1 > /sys/kernel/debug/dmatest/run
+
+Hint: available channel list could be extracted by running the following
+command:
+	% ls -1 /sys/class/dma/
+
+After a while you will start to get messages about current status or error like
+in the original code.
+
+Note that running a new test will stop any in progress test.
+
+The following command should return actual state of the test.
+	% cat /sys/kernel/debug/dmatest/run
+
+To wait for test done the user may perform a busy loop that checks the state.
+
+	% while [ $(cat /sys/kernel/debug/dmatest/run) = "Y" ]
+	> do
+	> 	echo -n "."
+	> 	sleep 1
+	> done
+	> echo
+
+	Part 3 - When built-in in the kernel...
+
+The module parameters that is supplied to the kernel command line will be used
+for the first performed test. After user gets a control, the test could be
+interrupted or re-run with same or different parameters. For the details see
+the above section "Part 2 - When dmatest is built as a module..."
+
+In both cases the module parameters are used as initial values for the test case.
+You always could check them at run-time by running
+	% grep -H . /sys/module/dmatest/parameters/*
+
+	Part 4 - Gathering the test results
+
+The module provides a storage for the test results in the memory. The gathered
+data could be used after test is done.
+
+The special file 'results' in the debugfs represents gathered data of the in
+progress test. The messages collected are printed to the kernel log as well.
+
+Example of output:
+	% cat /sys/kernel/debug/dmatest/results
+	dma0chan0-copy0: #1: No errors with src_off=0x7bf dst_off=0x8ad len=0x3fea (0)
+
+The message format is unified across the different types of errors. A number in
+the parens represents additional information, e.g. error code, error counter,
+or status.
+
+Comparison between buffers is stored to the dedicated structure.
+
+Note that the verify result is now accessible only via file 'results' in the
+debugfs.
diff --git a/Documentation/filesystems/btrfs.txt b/Documentation/filesystems/btrfs.txt
index 7671352..b349d57 100644
--- a/Documentation/filesystems/btrfs.txt
+++ b/Documentation/filesystems/btrfs.txt
@@ -1,8 +1,8 @@
 
-	BTRFS
-	=====
+BTRFS
+=====
 
-Btrfs is a new copy on write filesystem for Linux aimed at
+Btrfs is a copy on write filesystem for Linux aimed at
 implementing advanced features while focusing on fault tolerance,
 repair and easy administration. Initially developed by Oracle, Btrfs
 is licensed under the GPL and open for contribution from anyone.
@@ -34,9 +34,175 @@
     * Online filesystem defragmentation
 
 
+Mount Options
+=============
 
-	MAILING LIST
-	============
+When mounting a btrfs filesystem, the following option are accepted.
+Unless otherwise specified, all options default to off.
+
+  alloc_start=<bytes>
+	Debugging option to force all block allocations above a certain
+	byte threshold on each block device.  The value is specified in
+	bytes, optionally with a K, M, or G suffix, case insensitive.
+	Default is 1MB.
+
+  autodefrag
+	Detect small random writes into files and queue them up for the
+	defrag process.  Works best for small files; Not well suited for
+	large database workloads.
+
+  check_int
+  check_int_data
+  check_int_print_mask=<value>
+	These debugging options control the behavior of the integrity checking
+	module (the BTRFS_FS_CHECK_INTEGRITY config option required).
+
+	check_int enables the integrity checker module, which examines all
+	block write requests to ensure on-disk consistency, at a large
+	memory and CPU cost.  
+
+	check_int_data includes extent data in the integrity checks, and
+	implies the check_int option.
+
+	check_int_print_mask takes a bitmask of BTRFSIC_PRINT_MASK_* values
+	as defined in fs/btrfs/check-integrity.c, to control the integrity
+	checker module behavior.
+
+	See comments at the top of fs/btrfs/check-integrity.c for more info.
+
+  compress
+  compress=<type>
+  compress-force
+  compress-force=<type>
+	Control BTRFS file data compression.  Type may be specified as "zlib"
+	"lzo" or "no" (for no compression, used for remounting).  If no type
+	is specified, zlib is used.  If compress-force is specified,
+	all files will be compressed, whether or not they compress well.
+	If compression is enabled, nodatacow and nodatasum are disabled.
+
+  degraded
+	Allow mounts to continue with missing devices.  A read-write mount may
+	fail with too many devices missing, for example if a stripe member
+	is completely missing.
+
+  device=<devicepath>
+	Specify a device during mount so that ioctls on the control device
+	can be avoided.  Especialy useful when trying to mount a multi-device
+	setup as root.  May be specified multiple times for multiple devices.
+
+  discard
+	Issue frequent commands to let the block device reclaim space freed by
+	the filesystem.  This is useful for SSD devices, thinly provisioned
+	LUNs and virtual machine images, but may have a significant
+	performance impact.  (The fstrim command is also available to
+	initiate batch trims from userspace).
+
+  enospc_debug
+	Debugging option to be more verbose in some ENOSPC conditions.
+
+  fatal_errors=<action>
+	Action to take when encountering a fatal error: 
+	  "bug" - BUG() on a fatal error.  This is the default.
+	  "panic" - panic() on a fatal error.
+
+  flushoncommit
+	The 'flushoncommit' mount option forces any data dirtied by a write in a
+	prior transaction to commit as part of the current commit.  This makes
+	the committed state a fully consistent view of the file system from the
+	application's perspective (i.e., it includes all completed file system
+	operations).  This was previously the behavior only when a snapshot is
+	created.
+
+  inode_cache
+	Enable free inode number caching.   Defaults to off due to an overflow
+	problem when the free space crcs don't fit inside a single page.
+
+  max_inline=<bytes>
+	Specify the maximum amount of space, in bytes, that can be inlined in
+	a metadata B-tree leaf.  The value is specified in bytes, optionally 
+	with a K, M, or G suffix, case insensitive.  In practice, this value
+	is limited by the root sector size, with some space unavailable due
+	to leaf headers.  For a 4k sectorsize, max inline data is ~3900 bytes.
+
+  metadata_ratio=<value>
+	Specify that 1 metadata chunk should be allocated after every <value>
+	data chunks.  Off by default.
+
+  noacl
+	Disable support for Posix Access Control Lists (ACLs).  See the
+	acl(5) manual page for more information about ACLs.
+
+  nobarrier
+        Disables the use of block layer write barriers.  Write barriers ensure
+	that certain IOs make it through the device cache and are on persistent
+	storage.  If used on a device with a volatile (non-battery-backed)
+	write-back cache, this option will lead to filesystem corruption on a
+	system crash or power loss.
+
+  nodatacow
+	Disable data copy-on-write for newly created files.  Implies nodatasum,
+	and disables all compression.
+
+  nodatasum
+	Disable data checksumming for newly created files.
+
+  notreelog
+	Disable the tree logging used for fsync and O_SYNC writes.
+
+  recovery
+	Enable autorecovery attempts if a bad tree root is found at mount time.
+	Currently this scans a list of several previous tree roots and tries to 
+	use the first readable.
+
+ skip_balance
+	Skip automatic resume of interrupted balance operation after mount.
+	May be resumed with "btrfs balance resume."
+
+  space_cache (*)
+	Enable the on-disk freespace cache.
+  nospace_cache
+	Disable freespace cache loading without clearing the cache.
+  clear_cache
+	Force clearing and rebuilding of the disk space cache if something
+	has gone wrong.
+
+  ssd
+  nossd
+  ssd_spread
+	Options to control ssd allocation schemes.  By default, BTRFS will
+	enable or disable ssd allocation heuristics depending on whether a
+	rotational or nonrotational disk is in use.  The ssd and nossd options
+	can override this autodetection.
+
+	The ssd_spread mount option attempts to allocate into big chunks
+	of unused space, and may perform better on low-end ssds.  ssd_spread
+	implies ssd, enabling all other ssd heuristics as well.
+
+  subvol=<path>
+	Mount subvolume at <path> rather than the root subvolume.  <path> is
+	relative to the top level subvolume.
+
+  subvolid=<ID>
+	Mount subvolume specified by an ID number rather than the root subvolume.
+	This allows mounting of subvolumes which are not in the root of the mounted
+	filesystem.
+	You can use "btrfs subvolume list" to see subvolume ID numbers.
+
+  subvolrootid=<objectid> (deprecated)
+	Mount subvolume specified by <objectid> rather than the root subvolume.
+	This allows mounting of subvolumes which are not in the root of the mounted
+	filesystem.
+	You can use "btrfs subvolume show " to see the object ID for a subvolume.
+	
+  thread_pool=<number>
+	The number of worker threads to allocate.  The default number is equal
+	to the number of CPUs + 2, or 8, whichever is smaller.
+
+  user_subvol_rm_allowed
+	Allow subvolumes to be deleted by a non-root user. Use with caution. 
+
+MAILING LIST
+============
 
 There is a Btrfs mailing list hosted on vger.kernel.org. You can
 find details on how to subscribe here:
@@ -49,8 +215,8 @@
 
 
 
-	IRC
-	===
+IRC
+===
 
 Discussion of Btrfs also occurs on the #btrfs channel of the Freenode
 IRC network.
diff --git a/Documentation/filesystems/f2fs.txt b/Documentation/filesystems/f2fs.txt
index dcf338e..bd3c56c 100644
--- a/Documentation/filesystems/f2fs.txt
+++ b/Documentation/filesystems/f2fs.txt
@@ -146,7 +146,7 @@
 
 Format options
 --------------
--l [label]   : Give a volume label, up to 256 unicode name.
+-l [label]   : Give a volume label, up to 512 unicode name.
 -a [0 or 1]  : Split start location of each area for heap-based allocation.
                1 is set by default, which performs this.
 -o [int]     : Set overprovision ratio in percent over volume size.
@@ -156,6 +156,8 @@
 -z [int]     : Set the number of sections per zone.
                1 is set by default.
 -e [str]     : Set basic extension list. e.g. "mp3,gif,mov"
+-t [0 or 1]  : Disable discard command or not.
+               1 is set by default, which conducts discard.
 
 ================================================================================
 DESIGN
diff --git a/Documentation/gpio.txt b/Documentation/gpio.txt
index 77a1d11..6f83fa9 100644
--- a/Documentation/gpio.txt
+++ b/Documentation/gpio.txt
@@ -72,11 +72,11 @@
 not care how it's implemented.)
 
 That said, if the convention is supported on their platform, drivers should
-use it when possible.  Platforms must declare GENERIC_GPIO support in their
-Kconfig (boolean true), and provide an <asm/gpio.h> file.  Drivers that can't
-work without standard GPIO calls should have Kconfig entries which depend
-on GENERIC_GPIO.  The GPIO calls are available, either as "real code" or as
-optimized-away stubs, when drivers use the include file:
+use it when possible.  Platforms must select ARCH_REQUIRE_GPIOLIB or
+ARCH_WANT_OPTIONAL_GPIOLIB in their Kconfig.  Drivers that can't work without
+standard GPIO calls should have Kconfig entries which depend on GPIOLIB.  The
+GPIO calls are available, either as "real code" or as optimized-away stubs,
+when drivers use the include file:
 
 	#include <linux/gpio.h>
 
diff --git a/Documentation/thermal/exynos_thermal_emulation b/Documentation/thermal/exynos_thermal_emulation
index b73bbfb..36a3e79 100644
--- a/Documentation/thermal/exynos_thermal_emulation
+++ b/Documentation/thermal/exynos_thermal_emulation
@@ -13,11 +13,11 @@
 manually with software code and TMU will read current temperature from user value not from
 sensor's value.
 
-Enabling CONFIG_EXYNOS_THERMAL_EMUL option will make this support in available.
-When it's enabled, sysfs node will be created under
-/sys/bus/platform/devices/'exynos device name'/ with name of 'emulation'.
+Enabling CONFIG_THERMAL_EMULATION option will make this support available.
+When it's enabled, sysfs node will be created as
+/sys/devices/virtual/thermal/thermal_zone'zone id'/emul_temp.
 
-The sysfs node, 'emulation', will contain value 0 for the initial state. When you input any
+The sysfs node, 'emul_node', will contain value 0 for the initial state. When you input any
 temperature you want to update to sysfs node, it automatically enable emulation mode and
 current temperature will be changed into it.
 (Exynos also supports user changable delay time which would be used to delay of
diff --git a/Documentation/thermal/sysfs-api.txt b/Documentation/thermal/sysfs-api.txt
index 6859661..a71bd5b 100644
--- a/Documentation/thermal/sysfs-api.txt
+++ b/Documentation/thermal/sysfs-api.txt
@@ -31,15 +31,17 @@
 1. thermal sysfs driver interface functions
 
 1.1 thermal zone device interface
-1.1.1 struct thermal_zone_device *thermal_zone_device_register(char *name,
+1.1.1 struct thermal_zone_device *thermal_zone_device_register(char *type,
 		int trips, int mask, void *devdata,
-		struct thermal_zone_device_ops *ops)
+		struct thermal_zone_device_ops *ops,
+		const struct thermal_zone_params *tzp,
+		int passive_delay, int polling_delay))
 
     This interface function adds a new thermal zone device (sensor) to
     /sys/class/thermal folder as thermal_zone[0-*]. It tries to bind all the
     thermal cooling devices registered at the same time.
 
-    name: the thermal zone name.
+    type: the thermal zone type.
     trips: the total number of trip points this thermal zone supports.
     mask: Bit string: If 'n'th bit is set, then trip point 'n' is writeable.
     devdata: device private data
@@ -57,6 +59,12 @@
 			will be fired.
 	.set_emul_temp: set the emulation temperature which helps in debugging
 			different threshold temperature points.
+    tzp: thermal zone platform parameters.
+    passive_delay: number of milliseconds to wait between polls when
+	performing passive cooling.
+    polling_delay: number of milliseconds to wait between polls when checking
+	whether trip points have been crossed (0 for interrupt driven systems).
+
 
 1.1.2 void thermal_zone_device_unregister(struct thermal_zone_device *tz)
 
@@ -265,6 +273,10 @@
 	Unit: millidegree Celsius
 	WO, Optional
 
+	  WARNING: Be careful while enabling this option on production systems,
+	  because userland can easily disable the thermal policy by simply
+	  flooding this sysfs node with low temperature values.
+
 *****************************
 * Cooling device attributes *
 *****************************
@@ -363,7 +375,7 @@
 {thermal_zone, cooling_device, trip_point} combination. Returns NULL
 if such an instance does not exist.
 
-5.3:notify_thermal_framework:
+5.3:thermal_notify_framework:
 This function handles the trip events from sensor drivers. It starts
 throttling the cooling devices according to the policy configured.
 For CRITICAL and HOT trip points, this notifies the respective drivers,
@@ -375,11 +387,3 @@
 This function serves as an arbitrator to set the state of a cooling
 device. It sets the cooling device to the deepest cooling state if
 possible.
-
-5.5:thermal_register_governor:
-This function lets the various thermal governors to register themselves
-with the Thermal framework. At run time, depending on a zone's platform
-data, a particular governor is used for throttling.
-
-5.6:thermal_unregister_governor:
-This function unregisters a governor from the thermal framework.
diff --git a/Documentation/xtensa/mmu.txt b/Documentation/xtensa/mmu.txt
new file mode 100644
index 0000000..2b1af76
--- /dev/null
+++ b/Documentation/xtensa/mmu.txt
@@ -0,0 +1,46 @@
+MMUv3 initialization sequence.
+
+The code in the initialize_mmu macro sets up MMUv3 memory mapping
+identically to MMUv2 fixed memory mapping. Depending on
+CONFIG_INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX symbol this code is
+located in one of the following address ranges:
+
+    0xF0000000..0xFFFFFFFF (will keep same address in MMU v2 layout;
+    			 typically ROM)
+    0x00000000..0x07FFFFFF (system RAM; this code is actually linked
+    			 at 0xD0000000..0xD7FFFFFF [cached]
+    			 or 0xD8000000..0xDFFFFFFF [uncached];
+    			 in any case, initially runs elsewhere
+    			 than linked, so have to be careful)
+
+The code has the following assumptions:
+  This code fragment is run only on an MMU v3.
+  TLBs are in their reset state.
+  ITLBCFG and DTLBCFG are zero (reset state).
+  RASID is 0x04030201 (reset state).
+  PS.RING is zero (reset state).
+  LITBASE is zero (reset state, PC-relative literals); required to be PIC.
+
+TLB setup proceeds along the following steps.
+
+  Legend:
+    VA = virtual address (two upper nibbles of it);
+    PA = physical address (two upper nibbles of it);
+    pc = physical range that contains this code;
+
+After step 2, we jump to virtual address in 0x40000000..0x5fffffff
+that corresponds to next instruction to execute in this code.
+After step 4, we jump to intended (linked) address of this code.
+
+    Step 0     Step1     Step 2     Step3     Step 4     Step5
+ ============  =====  ============  =====  ============  =====
+   VA      PA     PA    VA      PA     PA    VA      PA     PA
+ ------    --     --  ------    --     --  ------    --     --
+ E0..FF -> E0  -> E0  E0..FF -> E0         F0..FF -> F0  -> F0
+ C0..DF -> C0  -> C0  C0..DF -> C0         E0..EF -> F0  -> F0
+ A0..BF -> A0  -> A0  A0..BF -> A0         D8..DF -> 00  -> 00
+ 80..9F -> 80  -> 80  80..9F -> 80         D0..D7 -> 00  -> 00
+ 60..7F -> 60  -> 60  60..7F -> 60
+ 40..5F -> 40         40..5F -> pc  -> pc  40..5F -> pc
+ 20..3F -> 20  -> 20  20..3F -> 20
+ 00..1F -> 00  -> 00  00..1F -> 00
diff --git a/Documentation/zh_CN/gpio.txt b/Documentation/zh_CN/gpio.txt
index 4fa7b4e..d5b8f01 100644
--- a/Documentation/zh_CN/gpio.txt
+++ b/Documentation/zh_CN/gpio.txt
@@ -84,10 +84,10 @@
 控制器的抽象函数来实现它。(有一些可选的代码能支持这种策略的实现,本文档
 后面会介绍,但作为 GPIO 接口的客户端驱动程序必须与它的实现无关。)
 
-也就是说,如果在他们的平台上支持这个公约,驱动应尽可能的使用它。平台
-必须在 Kconfig 中声明对 GENERIC_GPIO的支持 (布尔型 true),并提供
-一个 <asm/gpio.h> 文件。那些调用标准 GPIO 函数的驱动应该在 Kconfig
-入口中声明依赖GENERIC_GPIO。当驱动包含文件:
+也就是说,如果在他们的平台上支持这个公约,驱动应尽可能的使用它。同时,平台
+必须在 Kconfig 中选择 ARCH_REQUIRE_GPIOLIB 或者 ARCH_WANT_OPTIONAL_GPIOLIB
+选项。那些调用标准 GPIO 函数的驱动应该在 Kconfig 入口中声明依赖GENERIC_GPIO。
+当驱动包含文件:
 
 	#include <linux/gpio.h>
 
diff --git a/MAINTAINERS b/MAINTAINERS
index e73c374..3d7782b 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1620,6 +1620,13 @@
 S:	Maintained
 F:	drivers/net/hamradio/baycom*
 
+BCACHE (BLOCK LAYER CACHE)
+M:	Kent Overstreet <koverstreet@google.com>
+L:	linux-bcache@vger.kernel.org
+W:	http://bcache.evilpiepirate.org
+S:	Maintained:
+F:	drivers/md/bcache/
+
 BEFS FILE SYSTEM
 S:	Orphan
 F:	Documentation/filesystems/befs.txt
@@ -8022,11 +8029,14 @@
 
 THERMAL
 M:      Zhang Rui <rui.zhang@intel.com>
+M:      Eduardo Valentin <eduardo.valentin@ti.com>
 L:      linux-pm@vger.kernel.org
 T:      git git://git.kernel.org/pub/scm/linux/kernel/git/rzhang/linux.git
+Q:      https://patchwork.kernel.org/project/linux-pm/list/
 S:      Supported
 F:      drivers/thermal/
 F:      include/linux/thermal.h
+F:      include/linux/cpu_cooling.h
 
 THINGM BLINK(1) USB RGB LED DRIVER
 M:	Vivien Didelot <vivien.didelot@savoirfairelinux.com>
diff --git a/Makefile b/Makefile
index a3a834b..cd11e88 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 3
-PATCHLEVEL = 9
+PATCHLEVEL = 10
 SUBLEVEL = 0
-EXTRAVERSION =
+EXTRAVERSION = -rc1
 NAME = Unicycling Gorilla
 
 # *DOCUMENTATION*
diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig
index 8629127..837a1f2 100644
--- a/arch/alpha/Kconfig
+++ b/arch/alpha/Kconfig
@@ -55,9 +55,6 @@
 	bool
 	default y
 
-config GENERIC_GPIO
-	bool
-
 config ZONE_DMA
 	bool
 	default y
diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig
index e6f4eca..5917099 100644
--- a/arch/arc/Kconfig
+++ b/arch/arc/Kconfig
@@ -16,8 +16,6 @@
 	select GENERIC_FIND_FIRST_BIT
 	# for now, we don't need GENERIC_IRQ_PROBE, CONFIG_GENERIC_IRQ_CHIP
 	select GENERIC_IRQ_SHOW
-	select GENERIC_KERNEL_EXECVE
-	select GENERIC_KERNEL_THREAD
 	select GENERIC_PENDING_IRQ if SMP
 	select GENERIC_SMP_IDLE_THREAD
 	select HAVE_ARCH_KGDB
@@ -61,9 +59,6 @@
 config GENERIC_HWEIGHT
 	def_bool y
 
-config BINFMT_ELF
-	def_bool y
-
 config STACKTRACE_SUPPORT
 	def_bool y
 	select STACKTRACE
@@ -82,6 +77,7 @@
 menu "ARC Platform/SoC/Board"
 
 source "arch/arc/plat-arcfpga/Kconfig"
+source "arch/arc/plat-tb10x/Kconfig"
 #New platform adds here
 
 endmenu
@@ -134,9 +130,6 @@
 config ARC_HAS_COH_CACHES
 	def_bool n
 
-config ARC_HAS_COH_LLSC
-	def_bool n
-
 config ARC_HAS_COH_RTSC
 	def_bool n
 
@@ -189,6 +182,10 @@
 	  Note that Global I/D ENABLE + Per Page DISABLE works but corollary
 	  Global DISABLE + Per Page ENABLE won't work
 
+config ARC_CACHE_VIPT_ALIASING
+	bool "Support VIPT Aliasing D$"
+	default n
+
 endif	#ARC_CACHE
 
 config ARC_HAS_ICCM
@@ -304,6 +301,9 @@
 	  based on actual usage of FPU by a task. Thus our implemn does
 	  this for all tasks in system.
 
+config ARC_CANT_LLSC
+	def_bool n
+
 menuconfig ARC_CPU_REL_4_10
 	bool "Enable support for Rel 4.10 features"
 	default n
@@ -314,9 +314,7 @@
 config ARC_HAS_LLSC
 	bool "Insn: LLOCK/SCOND (efficient atomic ops)"
 	default y
-	depends on ARC_CPU_770
-	# if SMP, enable LLSC ONLY if ARC implementation has coherent atomics
-	depends on !SMP || ARC_HAS_COH_LLSC
+	depends on ARC_CPU_770 && !ARC_CANT_LLSC
 
 config ARC_HAS_SWAPE
 	bool "Insn: SWAPE (endian-swap)"
@@ -415,13 +413,6 @@
 	  Counts number of I and D TLB Misses and exports them via Debugfs
 	  The counters can be cleared via Debugfs as well
 
-config CMDLINE
-	string "Kernel command line to built-in"
-	default "print-fatal-signals=1"
-	help
-	  The default command line which will be appended to the optional
-	  u-boot provided command line (see below)
-
 config CMDLINE_UBOOT
 	bool "Support U-boot kernel command line passing"
 	default n
@@ -430,8 +421,8 @@
 	  command line from the U-boot environment to the Linux kernel then
 	  switch this option on.
 	  ARC U-boot will setup the cmdline in RAM/flash and set r2 to point
-	  to it. kernel startup code will copy the string into cmdline buffer
-	  and also append CONFIG_CMDLINE.
+	  to it. kernel startup code will append this to DeviceTree
+	  /bootargs provided cmdline args.
 
 config ARC_BUILTIN_DTB_NAME
 	string "Built in DTB"
@@ -441,6 +432,10 @@
 
 source "kernel/Kconfig.preempt"
 
+menu "Executable file formats"
+source "fs/Kconfig.binfmt"
+endmenu
+
 endmenu	 # "ARC Architecture Configuration"
 
 source "mm/Kconfig"
diff --git a/arch/arc/Makefile b/arch/arc/Makefile
index 92379c7..183397f 100644
--- a/arch/arc/Makefile
+++ b/arch/arc/Makefile
@@ -8,6 +8,10 @@
 
 UTS_MACHINE := arc
 
+ifeq ($(CROSS_COMPILE),)
+CROSS_COMPILE := arc-elf32-
+endif
+
 KBUILD_DEFCONFIG := fpga_defconfig
 
 cflags-y	+= -mA7 -fno-common -pipe -fno-builtin -D__linux__
@@ -87,20 +91,23 @@
 core-y		+= arch/arc/boot/dts/
 
 core-$(CONFIG_ARC_PLAT_FPGA_LEGACY)	+= arch/arc/plat-arcfpga/
+core-$(CONFIG_ARC_PLAT_TB10X)		+= arch/arc/plat-tb10x/
 
 drivers-$(CONFIG_OPROFILE)	+= arch/arc/oprofile/
 
 libs-y		+= arch/arc/lib/ $(LIBGCC)
 
+boot		:= arch/arc/boot
+
 #default target for make without any arguements.
-KBUILD_IMAGE := bootpImage
+KBUILD_IMAGE	:= bootpImage
 
 all:	$(KBUILD_IMAGE)
-boot	:= arch/arc/boot
-
 bootpImage: vmlinux
 
-uImage: vmlinux
+boot_targets += uImage uImage.bin uImage.gz
+
+$(boot_targets): vmlinux
 	$(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
 
 %.dtb %.dtb.S %.dtb.o: scripts
diff --git a/arch/arc/boot/Makefile b/arch/arc/boot/Makefile
index 7d514c2..e597cb34 100644
--- a/arch/arc/boot/Makefile
+++ b/arch/arc/boot/Makefile
@@ -3,7 +3,6 @@
 # uImage build relies on mkimage being availble on your host for ARC target
 # You will need to build u-boot for ARC, rename mkimage to arc-elf32-mkimage
 # and make sure it's reacable from your PATH
-MKIMAGE := $(srctree)/scripts/mkuboot.sh
 
 OBJCOPYFLAGS= -O binary -R .note -R .note.gnu.build-id -R .comment -S
 
@@ -12,7 +11,12 @@
 
 UIMAGE_LOADADDR    = $(CONFIG_LINUX_LINK_BASE)
 UIMAGE_ENTRYADDR   = $(LINUX_START_TEXT)
-UIMAGE_COMPRESSION = gzip
+
+suffix-y := bin
+suffix-$(CONFIG_KERNEL_GZIP)	:= gz
+
+targets += uImage uImage.bin uImage.gz
+extra-y += vmlinux.bin vmlinux.bin.gz
 
 $(obj)/vmlinux.bin: vmlinux FORCE
 	$(call if_changed,objcopy)
@@ -20,7 +24,12 @@
 $(obj)/vmlinux.bin.gz: $(obj)/vmlinux.bin FORCE
 	$(call if_changed,gzip)
 
-$(obj)/uImage: $(obj)/vmlinux.bin.gz FORCE
-	$(call if_changed,uimage)
+$(obj)/uImage.bin: $(obj)/vmlinux.bin FORCE
+	$(call if_changed,uimage,none)
 
-PHONY += FORCE
+$(obj)/uImage.gz: $(obj)/vmlinux.bin.gz FORCE
+	$(call if_changed,uimage,gzip)
+
+$(obj)/uImage: $(obj)/uImage.$(suffix-y)
+	@ln -sf $(notdir $<) $@
+	@echo '  Image $@ is ready'
diff --git a/arch/arc/boot/dts/Makefile b/arch/arc/boot/dts/Makefile
index 5776835..faf240e 100644
--- a/arch/arc/boot/dts/Makefile
+++ b/arch/arc/boot/dts/Makefile
@@ -8,6 +8,8 @@
 obj-y   += $(builtindtb-y).dtb.o
 targets += $(builtindtb-y).dtb
 
+.SECONDARY: $(obj)/$(builtindtb-y).dtb.S
+
 dtbs:  $(addprefix  $(obj)/, $(builtindtb-y).dtb)
 
-clean-files := *.dtb
+clean-files := *.dtb  *.dtb.S
diff --git a/arch/arc/boot/dts/abilis_tb100.dtsi b/arch/arc/boot/dts/abilis_tb100.dtsi
new file mode 100644
index 0000000..941ad11
--- /dev/null
+++ b/arch/arc/boot/dts/abilis_tb100.dtsi
@@ -0,0 +1,340 @@
+/*
+ * Abilis Systems TB100 SOC device tree
+ *
+ * Copyright (C) Abilis Systems 2013
+ *
+ * Author: Christian Ruppert <christian.ruppert@abilis.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
+ */
+
+/include/ "abilis_tb10x.dtsi"
+
+/* interrupt specifiers
+ * --------------------
+ * 0: rising, 1: low, 2: high, 3: falling,
+ */
+
+/ {
+	clock-frequency		= <500000000>;	/* 500 MHZ */
+
+	soc100 {
+		bus-frequency	= <166666666>;
+
+		pll0: oscillator {
+			clock-frequency  = <1000000000>;
+		};
+		cpu_clk: clkdiv_cpu {
+			clock-mult = <1>;
+			clock-div = <2>;
+		};
+		ahb_clk: clkdiv_ahb {
+			clock-mult = <1>;
+			clock-div = <6>;
+		};
+
+		iomux: iomux@FF10601c {
+			/* Port 1 */
+			pctl_tsin_s0: pctl-tsin-s0 {   /* Serial TS-in 0 */
+				pingrp = "mis0_pins";
+			};
+			pctl_tsin_s1: pctl-tsin-s1 {   /* Serial TS-in 1 */
+				pingrp = "mis1_pins";
+			};
+			pctl_gpio_a: pctl-gpio-a {     /* GPIO bank A */
+				pingrp = "gpioa_pins";
+			};
+			pctl_tsin_p1: pctl-tsin-p1 {   /* Parallel TS-in 1 */
+				pingrp = "mip1_pins";
+			};
+			/* Port 2 */
+			pctl_tsin_s2: pctl-tsin-s2 {   /* Serial TS-in 2 */
+				pingrp = "mis2_pins";
+			};
+			pctl_tsin_s3: pctl-tsin-s3 {   /* Serial TS-in 3 */
+				pingrp = "mis3_pins";
+			};
+			pctl_gpio_c: pctl-gpio-c {     /* GPIO bank C */
+				pingrp = "gpioc_pins";
+			};
+			pctl_tsin_p3: pctl-tsin-p3 {   /* Parallel TS-in 3 */
+				pingrp = "mip3_pins";
+			};
+			/* Port 3 */
+			pctl_tsin_s4: pctl-tsin-s4 {   /* Serial TS-in 4 */
+				pingrp = "mis4_pins";
+			};
+			pctl_tsin_s5: pctl-tsin-s5 {   /* Serial TS-in 5 */
+				pingrp = "mis5_pins";
+			};
+			pctl_gpio_e: pctl-gpio-e {     /* GPIO bank E */
+				pingrp = "gpioe_pins";
+			};
+			pctl_tsin_p5: pctl-tsin-p5 {   /* Parallel TS-in 5 */
+				pingrp = "mip5_pins";
+			};
+			/* Port 4 */
+			pctl_tsin_s6: pctl-tsin-s6 {   /* Serial TS-in 6 */
+				pingrp = "mis6_pins";
+			};
+			pctl_tsin_s7: pctl-tsin-s7 {   /* Serial TS-in 7 */
+				pingrp = "mis7_pins";
+			};
+			pctl_gpio_g: pctl-gpio-g {     /* GPIO bank G */
+				pingrp = "gpiog_pins";
+			};
+			pctl_tsin_p7: pctl-tsin-p7 {   /* Parallel TS-in 7 */
+				pingrp = "mip7_pins";
+			};
+			/* Port 5 */
+			pctl_gpio_j: pctl-gpio-j {     /* GPIO bank J */
+				pingrp = "gpioj_pins";
+			};
+			pctl_gpio_k: pctl-gpio-k {     /* GPIO bank K */
+				pingrp = "gpiok_pins";
+			};
+			pctl_ciplus: pctl-ciplus {     /* CI+ interface */
+				pingrp = "ciplus_pins";
+			};
+			pctl_mcard: pctl-mcard {       /* M-Card interface */
+				pingrp = "mcard_pins";
+			};
+			/* Port 6 */
+			pctl_tsout_p: pctl-tsout-p {   /* Parallel TS-out */
+				pingrp = "mop_pins";
+			};
+			pctl_tsout_s0: pctl-tsout-s0 { /* Serial TS-out 0 */
+				pingrp = "mos0_pins";
+			};
+			pctl_tsout_s1: pctl-tsout-s1 { /* Serial TS-out 1 */
+				pingrp = "mos1_pins";
+			};
+			pctl_tsout_s2: pctl-tsout-s2 { /* Serial TS-out 2 */
+				pingrp = "mos2_pins";
+			};
+			pctl_tsout_s3: pctl-tsout-s3 { /* Serial TS-out 3 */
+				pingrp = "mos3_pins";
+			};
+			/* Port 7 */
+			pctl_uart0: pctl-uart0 {       /* UART 0 */
+				pingrp = "uart0_pins";
+			};
+			pctl_uart1: pctl-uart1 {       /* UART 1 */
+				pingrp = "uart1_pins";
+			};
+			pctl_gpio_l: pctl-gpio-l {     /* GPIO bank L */
+				pingrp = "gpiol_pins";
+			};
+			pctl_gpio_m: pctl-gpio-m {     /* GPIO bank M */
+				pingrp = "gpiom_pins";
+			};
+			/* Port 8 */
+			pctl_spi3: pctl-spi3 {
+				pingrp = "spi3_pins";
+			};
+			/* Port 9 */
+			pctl_spi1: pctl-spi1 {
+				pingrp = "spi1_pins";
+			};
+			pctl_gpio_n: pctl-gpio-n {
+				pingrp = "gpion_pins";
+			};
+			/* Unmuxed GPIOs */
+			pctl_gpio_b: pctl-gpio-b {
+				pingrp = "gpiob_pins";
+			};
+			pctl_gpio_d: pctl-gpio-d {
+				pingrp = "gpiod_pins";
+			};
+			pctl_gpio_f: pctl-gpio-f {
+				pingrp = "gpiof_pins";
+			};
+			pctl_gpio_h: pctl-gpio-h {
+				pingrp = "gpioh_pins";
+			};
+			pctl_gpio_i: pctl-gpio-i {
+				pingrp = "gpioi_pins";
+			};
+		};
+
+		gpioa: gpio@FF140000 {
+			compatible = "abilis,tb10x-gpio";
+			interrupt-controller;
+			#interrupt-cells = <1>;
+			interrupt-parent = <&tb10x_ictl>;
+			interrupts = <27 1>;
+			reg = <0xFF140000 0x1000>;
+			gpio-controller;
+			#gpio-cells = <1>;
+			gpio-base  = <0>;
+			gpio-pins = <&pctl_gpio_a>;
+		};
+		gpiob: gpio@FF141000 {
+			compatible = "abilis,tb10x-gpio";
+			interrupt-controller;
+			#interrupt-cells = <1>;
+			interrupt-parent = <&tb10x_ictl>;
+			interrupts = <27 1>;
+			reg = <0xFF141000 0x1000>;
+			gpio-controller;
+			#gpio-cells = <1>;
+			gpio-base  = <3>;
+			gpio-pins = <&pctl_gpio_b>;
+		};
+		gpioc: gpio@FF142000 {
+			compatible = "abilis,tb10x-gpio";
+			interrupt-controller;
+			#interrupt-cells = <1>;
+			interrupt-parent = <&tb10x_ictl>;
+			interrupts = <27 1>;
+			reg = <0xFF142000 0x1000>;
+			gpio-controller;
+			#gpio-cells = <1>;
+			gpio-base  = <5>;
+			gpio-pins = <&pctl_gpio_c>;
+		};
+		gpiod: gpio@FF143000 {
+			compatible = "abilis,tb10x-gpio";
+			interrupt-controller;
+			#interrupt-cells = <1>;
+			interrupt-parent = <&tb10x_ictl>;
+			interrupts = <27 1>;
+			reg = <0xFF143000 0x1000>;
+			gpio-controller;
+			#gpio-cells = <1>;
+			gpio-base  = <8>;
+			gpio-pins = <&pctl_gpio_d>;
+		};
+		gpioe: gpio@FF144000 {
+			compatible = "abilis,tb10x-gpio";
+			interrupt-controller;
+			#interrupt-cells = <1>;
+			interrupt-parent = <&tb10x_ictl>;
+			interrupts = <27 1>;
+			reg = <0xFF144000 0x1000>;
+			gpio-controller;
+			#gpio-cells = <1>;
+			gpio-base  = <10>;
+			gpio-pins = <&pctl_gpio_e>;
+		};
+		gpiof: gpio@FF145000 {
+			compatible = "abilis,tb10x-gpio";
+			interrupt-controller;
+			#interrupt-cells = <1>;
+			interrupt-parent = <&tb10x_ictl>;
+			interrupts = <27 1>;
+			reg = <0xFF145000 0x1000>;
+			gpio-controller;
+			#gpio-cells = <1>;
+			gpio-base  = <13>;
+			gpio-pins = <&pctl_gpio_f>;
+		};
+		gpiog: gpio@FF146000 {
+			compatible = "abilis,tb10x-gpio";
+			interrupt-controller;
+			#interrupt-cells = <1>;
+			interrupt-parent = <&tb10x_ictl>;
+			interrupts = <27 1>;
+			reg = <0xFF146000 0x1000>;
+			gpio-controller;
+			#gpio-cells = <1>;
+			gpio-base  = <15>;
+			gpio-pins = <&pctl_gpio_g>;
+		};
+		gpioh: gpio@FF147000 {
+			compatible = "abilis,tb10x-gpio";
+			interrupt-controller;
+			#interrupt-cells = <1>;
+			interrupt-parent = <&tb10x_ictl>;
+			interrupts = <27 1>;
+			reg = <0xFF147000 0x1000>;
+			gpio-controller;
+			#gpio-cells = <1>;
+			gpio-base  = <18>;
+			gpio-pins = <&pctl_gpio_h>;
+		};
+		gpioi: gpio@FF148000 {
+			compatible = "abilis,tb10x-gpio";
+			interrupt-controller;
+			#interrupt-cells = <1>;
+			interrupt-parent = <&tb10x_ictl>;
+			interrupts = <27 1>;
+			reg = <0xFF148000 0x1000>;
+			gpio-controller;
+			#gpio-cells = <1>;
+			gpio-base  = <20>;
+			gpio-pins = <&pctl_gpio_i>;
+		};
+		gpioj: gpio@FF149000 {
+			compatible = "abilis,tb10x-gpio";
+			interrupt-controller;
+			#interrupt-cells = <1>;
+			interrupt-parent = <&tb10x_ictl>;
+			interrupts = <27 1>;
+			reg = <0xFF149000 0x1000>;
+			gpio-controller;
+			#gpio-cells = <1>;
+			gpio-base  = <32>;
+			gpio-pins = <&pctl_gpio_j>;
+		};
+		gpiok: gpio@FF14a000 {
+			compatible = "abilis,tb10x-gpio";
+			interrupt-controller;
+			#interrupt-cells = <1>;
+			interrupt-parent = <&tb10x_ictl>;
+			interrupts = <27 1>;
+			reg = <0xFF14A000 0x1000>;
+			gpio-controller;
+			#gpio-cells = <1>;
+			gpio-base  = <64>;
+			gpio-pins = <&pctl_gpio_k>;
+		};
+		gpiol: gpio@FF14b000 {
+			compatible = "abilis,tb10x-gpio";
+			interrupt-controller;
+			#interrupt-cells = <1>;
+			interrupt-parent = <&tb10x_ictl>;
+			interrupts = <27 1>;
+			reg = <0xFF14B000 0x1000>;
+			gpio-controller;
+			#gpio-cells = <1>;
+			gpio-base  = <86>;
+			gpio-pins = <&pctl_gpio_l>;
+		};
+		gpiom: gpio@FF14c000 {
+			compatible = "abilis,tb10x-gpio";
+			interrupt-controller;
+			#interrupt-cells = <1>;
+			interrupt-parent = <&tb10x_ictl>;
+			interrupts = <27 1>;
+			reg = <0xFF14C000 0x1000>;
+			gpio-controller;
+			#gpio-cells = <1>;
+			gpio-base  = <90>;
+			gpio-pins = <&pctl_gpio_m>;
+		};
+		gpion: gpio@FF14d000 {
+			compatible = "abilis,tb10x-gpio";
+			interrupt-controller;
+			#interrupt-cells = <1>;
+			interrupt-parent = <&tb10x_ictl>;
+			interrupts = <27 1>;
+			reg = <0xFF14D000 0x1000>;
+			gpio-controller;
+			#gpio-cells = <1>;
+			gpio-base  = <94>;
+			gpio-pins = <&pctl_gpio_n>;
+		};
+	};
+};
diff --git a/arch/arc/boot/dts/abilis_tb100_dvk.dts b/arch/arc/boot/dts/abilis_tb100_dvk.dts
new file mode 100644
index 0000000..c0fd362
--- /dev/null
+++ b/arch/arc/boot/dts/abilis_tb100_dvk.dts
@@ -0,0 +1,127 @@
+/*
+ * Abilis Systems TB100 Development Kit PCB device tree
+ *
+ * Copyright (C) Abilis Systems 2013
+ *
+ * Author: Christian Ruppert <christian.ruppert@abilis.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
+ */
+
+/dts-v1/;
+
+/include/ "abilis_tb100.dtsi"
+
+/ {
+	chosen {
+		bootargs = "earlycon=uart8250,mmio32,0xff100000,9600n8 console=ttyS0,9600n8";
+	};
+
+	aliases { };
+
+	memory {
+		device_type = "memory";
+		reg = <0x80000000 0x08000000>;	/* 128M */
+	};
+
+	soc100 {
+		uart@FF100000 {
+			pinctrl-names = "abilis,simple-default";
+			pinctrl-0 = <&pctl_uart0>;
+		};
+		ethernet@FE100000 {
+			phy-mode = "rgmii";
+		};
+
+		i2c0: i2c@FF120000 {
+			sda-hold-time = <432>;
+		};
+		i2c1: i2c@FF121000 {
+			sda-hold-time = <432>;
+		};
+		i2c2: i2c@FF122000 {
+			sda-hold-time = <432>;
+		};
+		i2c3: i2c@FF123000 {
+			sda-hold-time = <432>;
+		};
+		i2c4: i2c@FF124000 {
+			sda-hold-time = <432>;
+		};
+
+		leds {
+			compatible = "gpio-leds";
+			power {
+				label = "Power";
+				gpios = <&gpioi 0>;
+				linux,default-trigger = "default-on";
+			};
+			heartbeat {
+				label = "Heartbeat";
+				gpios = <&gpioi 1>;
+				linux,default-trigger = "heartbeat";
+			};
+			led2 {
+				label = "LED2";
+				gpios = <&gpioi 2>;
+				default-state = "off";
+			};
+			led3 {
+				label = "LED3";
+				gpios = <&gpioi 3>;
+				default-state = "off";
+			};
+			led4 {
+				label = "LED4";
+				gpios = <&gpioi 4>;
+				default-state = "off";
+			};
+			led5 {
+				label = "LED5";
+				gpios = <&gpioi 5>;
+				default-state = "off";
+			};
+			led6 {
+				label = "LED6";
+				gpios = <&gpioi 6>;
+				default-state = "off";
+			};
+			led7 {
+				label = "LED7";
+				gpios = <&gpioi 7>;
+				default-state = "off";
+			};
+			led8 {
+				label = "LED8";
+				gpios = <&gpioi 8>;
+				default-state = "off";
+			};
+			led9 {
+				label = "LED9";
+				gpios = <&gpioi 9>;
+				default-state = "off";
+			};
+			led10 {
+				label = "LED10";
+				gpios = <&gpioi 10>;
+				default-state = "off";
+			};
+			led11 {
+				label = "LED11";
+				gpios = <&gpioi 11>;
+				default-state = "off";
+			};
+		};
+	};
+};
diff --git a/arch/arc/boot/dts/abilis_tb101.dtsi b/arch/arc/boot/dts/abilis_tb101.dtsi
new file mode 100644
index 0000000..fd25c212
--- /dev/null
+++ b/arch/arc/boot/dts/abilis_tb101.dtsi
@@ -0,0 +1,349 @@
+/*
+ * Abilis Systems TB101 SOC device tree
+ *
+ * Copyright (C) Abilis Systems 2013
+ *
+ * Author: Christian Ruppert <christian.ruppert@abilis.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
+ */
+
+/include/ "abilis_tb10x.dtsi"
+
+/* interrupt specifiers
+ * --------------------
+ * 0: rising, 1: low, 2: high, 3: falling,
+ */
+
+/ {
+	clock-frequency		= <500000000>;	/* 500 MHZ */
+
+	soc100 {
+		bus-frequency	= <166666666>;
+
+		pll0: oscillator {
+			clock-frequency  = <1000000000>;
+		};
+		cpu_clk: clkdiv_cpu {
+			clock-mult = <1>;
+			clock-div = <2>;
+		};
+		ahb_clk: clkdiv_ahb {
+			clock-mult = <1>;
+			clock-div = <6>;
+		};
+
+		iomux: iomux@FF10601c {
+			/* Port 1 */
+			pctl_tsin_s0: pctl-tsin-s0 {   /* Serial TS-in 0 */
+				pingrp = "mis0_pins";
+			};
+			pctl_tsin_s1: pctl-tsin-s1 {   /* Serial TS-in 1 */
+				pingrp = "mis1_pins";
+			};
+			pctl_gpio_a: pctl-gpio-a {     /* GPIO bank A */
+				pingrp = "gpioa_pins";
+			};
+			pctl_tsin_p1: pctl-tsin-p1 {   /* Parallel TS-in 1 */
+				pingrp = "mip1_pins";
+			};
+			/* Port 2 */
+			pctl_tsin_s2: pctl-tsin-s2 {   /* Serial TS-in 2 */
+				pingrp = "mis2_pins";
+			};
+			pctl_tsin_s3: pctl-tsin-s3 {   /* Serial TS-in 3 */
+				pingrp = "mis3_pins";
+			};
+			pctl_gpio_c: pctl-gpio-c {     /* GPIO bank C */
+				pingrp = "gpioc_pins";
+			};
+			pctl_tsin_p3: pctl-tsin-p3 {   /* Parallel TS-in 3 */
+				pingrp = "mip3_pins";
+			};
+			/* Port 3 */
+			pctl_tsin_s4: pctl-tsin-s4 {   /* Serial TS-in 4 */
+				pingrp = "mis4_pins";
+			};
+			pctl_tsin_s5: pctl-tsin-s5 {   /* Serial TS-in 5 */
+				pingrp = "mis5_pins";
+			};
+			pctl_gpio_e: pctl-gpio-e {     /* GPIO bank E */
+				pingrp = "gpioe_pins";
+			};
+			pctl_tsin_p5: pctl-tsin-p5 {   /* Parallel TS-in 5 */
+				pingrp = "mip5_pins";
+			};
+			/* Port 4 */
+			pctl_tsin_s6: pctl-tsin-s6 {   /* Serial TS-in 6 */
+				pingrp = "mis6_pins";
+			};
+			pctl_tsin_s7: pctl-tsin-s7 {   /* Serial TS-in 7 */
+				pingrp = "mis7_pins";
+			};
+			pctl_gpio_g: pctl-gpio-g {     /* GPIO bank G */
+				pingrp = "gpiog_pins";
+			};
+			pctl_tsin_p7: pctl-tsin-p7 {   /* Parallel TS-in 7 */
+				pingrp = "mip7_pins";
+			};
+			/* Port 5 */
+			pctl_gpio_j: pctl-gpio-j {     /* GPIO bank J */
+				pingrp = "gpioj_pins";
+			};
+			pctl_gpio_k: pctl-gpio-k {     /* GPIO bank K */
+				pingrp = "gpiok_pins";
+			};
+			pctl_ciplus: pctl-ciplus {     /* CI+ interface */
+				pingrp = "ciplus_pins";
+			};
+			pctl_mcard: pctl-mcard {       /* M-Card interface */
+				pingrp = "mcard_pins";
+			};
+			pctl_stc0: pctl-stc0 {         /* Smart card I/F 0 */
+				pingrp = "stc0_pins";
+			};
+			pctl_stc1: pctl-stc1 {         /* Smart card I/F 1 */
+				pingrp = "stc1_pins";
+			};
+			/* Port 6 */
+			pctl_tsout_p: pctl-tsout-p {   /* Parallel TS-out */
+				pingrp = "mop_pins";
+			};
+			pctl_tsout_s0: pctl-tsout-s0 { /* Serial TS-out 0 */
+				pingrp = "mos0_pins";
+			};
+			pctl_tsout_s1: pctl-tsout-s1 { /* Serial TS-out 1 */
+				pingrp = "mos1_pins";
+			};
+			pctl_tsout_s2: pctl-tsout-s2 { /* Serial TS-out 2 */
+				pingrp = "mos2_pins";
+			};
+			pctl_tsout_s3: pctl-tsout-s3 { /* Serial TS-out 3 */
+				pingrp = "mos3_pins";
+			};
+			/* Port 7 */
+			pctl_uart0: pctl-uart0 {       /* UART 0 */
+				pingrp = "uart0_pins";
+			};
+			pctl_uart1: pctl-uart1 {       /* UART 1 */
+				pingrp = "uart1_pins";
+			};
+			pctl_gpio_l: pctl-gpio-l {     /* GPIO bank L */
+				pingrp = "gpiol_pins";
+			};
+			pctl_gpio_m: pctl-gpio-m {     /* GPIO bank M */
+				pingrp = "gpiom_pins";
+			};
+			/* Port 8 */
+			pctl_spi3: pctl-spi3 {
+				pingrp = "spi3_pins";
+			};
+			pctl_jtag: pctl-jtag {
+				pingrp = "jtag_pins";
+			};
+			/* Port 9 */
+			pctl_spi1: pctl-spi1 {
+				pingrp = "spi1_pins";
+			};
+			pctl_gpio_n: pctl-gpio-n {
+				pingrp = "gpion_pins";
+			};
+			/* Unmuxed GPIOs */
+			pctl_gpio_b: pctl-gpio-b {
+				pingrp = "gpiob_pins";
+			};
+			pctl_gpio_d: pctl-gpio-d {
+				pingrp = "gpiod_pins";
+			};
+			pctl_gpio_f: pctl-gpio-f {
+				pingrp = "gpiof_pins";
+			};
+			pctl_gpio_h: pctl-gpio-h {
+				pingrp = "gpioh_pins";
+			};
+			pctl_gpio_i: pctl-gpio-i {
+				pingrp = "gpioi_pins";
+			};
+		};
+
+		gpioa: gpio@FF140000 {
+			compatible = "abilis,tb10x-gpio";
+			interrupt-controller;
+			#interrupt-cells = <1>;
+			interrupt-parent = <&tb10x_ictl>;
+			interrupts = <27 1>;
+			reg = <0xFF140000 0x1000>;
+			gpio-controller;
+			#gpio-cells = <1>;
+			gpio-base  = <0>;
+			gpio-pins = <&pctl_gpio_a>;
+		};
+		gpiob: gpio@FF141000 {
+			compatible = "abilis,tb10x-gpio";
+			interrupt-controller;
+			#interrupt-cells = <1>;
+			interrupt-parent = <&tb10x_ictl>;
+			interrupts = <27 1>;
+			reg = <0xFF141000 0x1000>;
+			gpio-controller;
+			#gpio-cells = <1>;
+			gpio-base  = <3>;
+			gpio-pins = <&pctl_gpio_b>;
+		};
+		gpioc: gpio@FF142000 {
+			compatible = "abilis,tb10x-gpio";
+			interrupt-controller;
+			#interrupt-cells = <1>;
+			interrupt-parent = <&tb10x_ictl>;
+			interrupts = <27 1>;
+			reg = <0xFF142000 0x1000>;
+			gpio-controller;
+			#gpio-cells = <1>;
+			gpio-base  = <5>;
+			gpio-pins = <&pctl_gpio_c>;
+		};
+		gpiod: gpio@FF143000 {
+			compatible = "abilis,tb10x-gpio";
+			interrupt-controller;
+			#interrupt-cells = <1>;
+			interrupt-parent = <&tb10x_ictl>;
+			interrupts = <27 1>;
+			reg = <0xFF143000 0x1000>;
+			gpio-controller;
+			#gpio-cells = <1>;
+			gpio-base  = <8>;
+			gpio-pins = <&pctl_gpio_d>;
+		};
+		gpioe: gpio@FF144000 {
+			compatible = "abilis,tb10x-gpio";
+			interrupt-controller;
+			#interrupt-cells = <1>;
+			interrupt-parent = <&tb10x_ictl>;
+			interrupts = <27 1>;
+			reg = <0xFF144000 0x1000>;
+			gpio-controller;
+			#gpio-cells = <1>;
+			gpio-base  = <10>;
+			gpio-pins = <&pctl_gpio_e>;
+		};
+		gpiof: gpio@FF145000 {
+			compatible = "abilis,tb10x-gpio";
+			interrupt-controller;
+			#interrupt-cells = <1>;
+			interrupt-parent = <&tb10x_ictl>;
+			interrupts = <27 1>;
+			reg = <0xFF145000 0x1000>;
+			gpio-controller;
+			#gpio-cells = <1>;
+			gpio-base  = <13>;
+			gpio-pins = <&pctl_gpio_f>;
+		};
+		gpiog: gpio@FF146000 {
+			compatible = "abilis,tb10x-gpio";
+			interrupt-controller;
+			#interrupt-cells = <1>;
+			interrupt-parent = <&tb10x_ictl>;
+			interrupts = <27 1>;
+			reg = <0xFF146000 0x1000>;
+			gpio-controller;
+			#gpio-cells = <1>;
+			gpio-base  = <15>;
+			gpio-pins = <&pctl_gpio_g>;
+		};
+		gpioh: gpio@FF147000 {
+			compatible = "abilis,tb10x-gpio";
+			interrupt-controller;
+			#interrupt-cells = <1>;
+			interrupt-parent = <&tb10x_ictl>;
+			interrupts = <27 1>;
+			reg = <0xFF147000 0x1000>;
+			gpio-controller;
+			#gpio-cells = <1>;
+			gpio-base  = <18>;
+			gpio-pins = <&pctl_gpio_h>;
+		};
+		gpioi: gpio@FF148000 {
+			compatible = "abilis,tb10x-gpio";
+			interrupt-controller;
+			#interrupt-cells = <1>;
+			interrupt-parent = <&tb10x_ictl>;
+			interrupts = <27 1>;
+			reg = <0xFF148000 0x1000>;
+			gpio-controller;
+			#gpio-cells = <1>;
+			gpio-base  = <20>;
+			gpio-pins = <&pctl_gpio_i>;
+		};
+		gpioj: gpio@FF149000 {
+			compatible = "abilis,tb10x-gpio";
+			interrupt-controller;
+			#interrupt-cells = <1>;
+			interrupt-parent = <&tb10x_ictl>;
+			interrupts = <27 1>;
+			reg = <0xFF149000 0x1000>;
+			gpio-controller;
+			#gpio-cells = <1>;
+			gpio-base  = <32>;
+			gpio-pins = <&pctl_gpio_j>;
+		};
+		gpiok: gpio@FF14a000 {
+			compatible = "abilis,tb10x-gpio";
+			interrupt-controller;
+			#interrupt-cells = <1>;
+			interrupt-parent = <&tb10x_ictl>;
+			interrupts = <27 1>;
+			reg = <0xFF14A000 0x1000>;
+			gpio-controller;
+			#gpio-cells = <1>;
+			gpio-base  = <64>;
+			gpio-pins = <&pctl_gpio_k>;
+		};
+		gpiol: gpio@FF14b000 {
+			compatible = "abilis,tb10x-gpio";
+			interrupt-controller;
+			#interrupt-cells = <1>;
+			interrupt-parent = <&tb10x_ictl>;
+			interrupts = <27 1>;
+			reg = <0xFF14B000 0x1000>;
+			gpio-controller;
+			#gpio-cells = <1>;
+			gpio-base  = <86>;
+			gpio-pins = <&pctl_gpio_l>;
+		};
+		gpiom: gpio@FF14c000 {
+			compatible = "abilis,tb10x-gpio";
+			interrupt-controller;
+			#interrupt-cells = <1>;
+			interrupt-parent = <&tb10x_ictl>;
+			interrupts = <27 1>;
+			reg = <0xFF14C000 0x1000>;
+			gpio-controller;
+			#gpio-cells = <1>;
+			gpio-base  = <90>;
+			gpio-pins = <&pctl_gpio_m>;
+		};
+		gpion: gpio@FF14d000 {
+			compatible = "abilis,tb10x-gpio";
+			interrupt-controller;
+			#interrupt-cells = <1>;
+			interrupt-parent = <&tb10x_ictl>;
+			interrupts = <27 1>;
+			reg = <0xFF14D000 0x1000>;
+			gpio-controller;
+			#gpio-cells = <1>;
+			gpio-base  = <94>;
+			gpio-pins = <&pctl_gpio_n>;
+		};
+	};
+};
diff --git a/arch/arc/boot/dts/abilis_tb101_dvk.dts b/arch/arc/boot/dts/abilis_tb101_dvk.dts
new file mode 100644
index 0000000..6f8c381
--- /dev/null
+++ b/arch/arc/boot/dts/abilis_tb101_dvk.dts
@@ -0,0 +1,127 @@
+/*
+ * Abilis Systems TB101 Development Kit PCB device tree
+ *
+ * Copyright (C) Abilis Systems 2013
+ *
+ * Author: Christian Ruppert <christian.ruppert@abilis.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
+ */
+
+/dts-v1/;
+
+/include/ "abilis_tb101.dtsi"
+
+/ {
+	chosen {
+		bootargs = "earlycon=uart8250,mmio32,0xff100000,9600n8 console=ttyS0,9600n8";
+	};
+
+	aliases { };
+
+	memory {
+		device_type = "memory";
+		reg = <0x80000000 0x08000000>;	/* 128M */
+	};
+
+	soc100 {
+		uart@FF100000 {
+			pinctrl-names = "abilis,simple-default";
+			pinctrl-0 = <&pctl_uart0>;
+		};
+		ethernet@FE100000 {
+			phy-mode = "rgmii";
+		};
+
+		i2c0: i2c@FF120000 {
+			sda-hold-time = <432>;
+		};
+		i2c1: i2c@FF121000 {
+			sda-hold-time = <432>;
+		};
+		i2c2: i2c@FF122000 {
+			sda-hold-time = <432>;
+		};
+		i2c3: i2c@FF123000 {
+			sda-hold-time = <432>;
+		};
+		i2c4: i2c@FF124000 {
+			sda-hold-time = <432>;
+		};
+
+		leds {
+			compatible = "gpio-leds";
+			power {
+				label = "Power";
+				gpios = <&gpioi 0>;
+				linux,default-trigger = "default-on";
+			};
+			heartbeat {
+				label = "Heartbeat";
+				gpios = <&gpioi 1>;
+				linux,default-trigger = "heartbeat";
+			};
+			led2 {
+				label = "LED2";
+				gpios = <&gpioi 2>;
+				default-state = "off";
+			};
+			led3 {
+				label = "LED3";
+				gpios = <&gpioi 3>;
+				default-state = "off";
+			};
+			led4 {
+				label = "LED4";
+				gpios = <&gpioi 4>;
+				default-state = "off";
+			};
+			led5 {
+				label = "LED5";
+				gpios = <&gpioi 5>;
+				default-state = "off";
+			};
+			led6 {
+				label = "LED6";
+				gpios = <&gpioi 6>;
+				default-state = "off";
+			};
+			led7 {
+				label = "LED7";
+				gpios = <&gpioi 7>;
+				default-state = "off";
+			};
+			led8 {
+				label = "LED8";
+				gpios = <&gpioi 8>;
+				default-state = "off";
+			};
+			led9 {
+				label = "LED9";
+				gpios = <&gpioi 9>;
+				default-state = "off";
+			};
+			led10 {
+				label = "LED10";
+				gpios = <&gpioi 10>;
+				default-state = "off";
+			};
+			led11 {
+				label = "LED11";
+				gpios = <&gpioi 11>;
+				default-state = "off";
+			};
+		};
+	};
+};
diff --git a/arch/arc/boot/dts/abilis_tb10x.dtsi b/arch/arc/boot/dts/abilis_tb10x.dtsi
new file mode 100644
index 0000000..a6139fc
--- /dev/null
+++ b/arch/arc/boot/dts/abilis_tb10x.dtsi
@@ -0,0 +1,247 @@
+/*
+ * Abilis Systems TB10X SOC device tree
+ *
+ * Copyright (C) Abilis Systems 2013
+ *
+ * Author: Christian Ruppert <christian.ruppert@abilis.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
+ */
+
+/* interrupt specifiers
+ * --------------------
+ * 0: rising, 1: low, 2: high, 3: falling,
+ */
+
+/ {
+	compatible		= "abilis,arc-tb10x";
+	#address-cells		= <1>;
+	#size-cells		= <1>;
+
+	cpus {
+		#address-cells = <1>;
+		#size-cells = <0>;
+		cpu@0 {
+			device_type = "cpu";
+			compatible = "snps,arc770d";
+			reg = <0>;
+		};
+	};
+
+	soc100 {
+		#address-cells	= <1>;
+		#size-cells	= <1>;
+		device_type	= "soc";
+		ranges		= <0xfe000000 0xfe000000 0x02000000
+				0x000F0000 0x000F0000 0x00010000>;
+		compatible	= "abilis,tb10x", "simple-bus";
+
+		pll0: oscillator {
+			compatible = "fixed-clock";
+			#clock-cells = <0>;
+			clock-output-names = "pll0";
+		};
+		cpu_clk: clkdiv_cpu {
+			compatible = "fixed-factor-clock";
+			#clock-cells = <0>;
+			clocks = <&pll0>;
+			clock-output-names = "cpu_clk";
+		};
+		ahb_clk: clkdiv_ahb {
+			compatible = "fixed-factor-clock";
+			#clock-cells = <0>;
+			clocks = <&pll0>;
+			clock-output-names = "ahb_clk";
+		};
+
+		iomux: iomux@FF10601c {
+			#address-cells = <1>;
+			#size-cells = <1>;
+			compatible = "abilis,tb10x-iomux";
+			reg = <0xFF10601c 0x4>;
+		};
+
+		intc: interrupt-controller {
+			compatible = "snps,arc700-intc";
+			interrupt-controller;
+			#interrupt-cells = <1>;
+		};
+		tb10x_ictl: pic@fe002000 {
+			compatible = "abilis,tb10x_ictl";
+			reg = <0xFE002000 0x20>;
+			interrupt-controller;
+			#interrupt-cells = <2>;
+			interrupt-parent = <&intc>;
+			interrupts = <5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
+					20 21 22 23 24 25 26 27 28 29 30 31>;
+		};
+
+		uart@FF100000 {
+			compatible = "snps,dw-apb-uart",
+					"abilis,simple-pinctrl";
+			reg = <0xFF100000 0x100>;
+			clock-frequency = <166666666>;
+			interrupts = <25 1>;
+			reg-shift = <2>;
+			reg-io-width = <4>;
+			interrupt-parent = <&tb10x_ictl>;
+		};
+		ethernet@FE100000 {
+			compatible = "snps,dwmac-3.70a","snps,dwmac";
+			reg = <0xFE100000 0x1058>;
+			interrupt-parent = <&tb10x_ictl>;
+			interrupts = <6 1>;
+			interrupt-names = "macirq";
+			clocks = <&ahb_clk>;
+			clock-names = "stmmaceth";
+		};
+		dma@FE000000 {
+			compatible = "snps,dma-spear1340";
+			reg = <0xFE000000 0x400>;
+			interrupt-parent = <&tb10x_ictl>;
+			interrupts = <14 1>;
+			dma-channels = <6>;
+			dma-requests = <0>;
+			dma-masters = <1>;
+			#dma-cells = <3>;
+			chan_allocation_order = <0>;
+			chan_priority = <1>;
+			block_size = <0x7ff>;
+			data_width = <2 0 0 0>;
+			clocks = <&ahb_clk>;
+			clock-names = "hclk";
+		};
+
+		i2c0: i2c@FF120000 {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			compatible = "snps,designware-i2c";
+			reg = <0xFF120000 0x1000>;
+			interrupt-parent = <&tb10x_ictl>;
+			interrupts = <12 1>;
+			clocks = <&ahb_clk>;
+		};
+		i2c1: i2c@FF121000 {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			compatible = "snps,designware-i2c";
+			reg = <0xFF121000 0x1000>;
+			interrupt-parent = <&tb10x_ictl>;
+			interrupts = <12 1>;
+			clocks = <&ahb_clk>;
+		};
+		i2c2: i2c@FF122000 {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			compatible = "snps,designware-i2c";
+			reg = <0xFF122000 0x1000>;
+			interrupt-parent = <&tb10x_ictl>;
+			interrupts = <12 1>;
+			clocks = <&ahb_clk>;
+		};
+		i2c3: i2c@FF123000 {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			compatible = "snps,designware-i2c";
+			reg = <0xFF123000 0x1000>;
+			interrupt-parent = <&tb10x_ictl>;
+			interrupts = <12 1>;
+			clocks = <&ahb_clk>;
+		};
+		i2c4: i2c@FF124000 {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			compatible = "snps,designware-i2c";
+			reg = <0xFF124000 0x1000>;
+			interrupt-parent = <&tb10x_ictl>;
+			interrupts = <12 1>;
+			clocks = <&ahb_clk>;
+		};
+
+		spi0: spi@0xFE010000 {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			cell-index = <0>;
+			compatible = "abilis,tb100-spi";
+			num-cs = <1>;
+			reg = <0xFE010000 0x20>;
+			interrupt-parent = <&tb10x_ictl>;
+			interrupts = <26 1>;
+			clocks = <&ahb_clk>;
+		};
+		spi1: spi@0xFE011000 {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			cell-index = <1>;
+			compatible = "abilis,tb100-spi",
+					"abilis,simple-pinctrl";
+			num-cs = <2>;
+			reg = <0xFE011000 0x20>;
+			interrupt-parent = <&tb10x_ictl>;
+			interrupts = <10 1>;
+			clocks = <&ahb_clk>;
+		};
+
+		tb10x_tsm: tb10x-tsm@ff316000 {
+			compatible = "abilis,tb100-tsm";
+			reg = <0xff316000 0x400>;
+			interrupt-parent = <&tb10x_ictl>;
+			interrupts = <17 1>;
+			output-clkdiv = <4>;
+			global-packet-delay = <0x21>;
+			port-packet-delay = <0>;
+		};
+		tb10x_stream_proc: tb10x-stream-proc {
+			compatible = "abilis,tb100-streamproc";
+			reg =   <0xfff00000 0x200>,
+				<0x000f0000 0x10000>,
+				<0xfff00200 0x105>,
+				<0xff10600c 0x1>,
+				<0xfe001018 0x1>;
+			reg-names =     "mbox",
+					"sp_iccm",
+					"mbox_irq",
+					"cpuctrl",
+					"a6it_int_force";
+			interrupt-parent = <&tb10x_ictl>;
+			interrupts = <20 1>, <19 1>;
+			interrupt-names = "cmd_irq", "event_irq";
+		};
+		tb10x_mdsc0: tb10x-mdscr@FF300000 {
+			compatible = "abilis,tb100-mdscr";
+			reg = <0xFF300000 0x7000>;
+			tb100-mdscr-manage-tsin;
+		};
+		tb10x_mscr0: tb10x-mdscr@FF307000 {
+			compatible = "abilis,tb100-mdscr";
+			reg = <0xFF307000 0x7000>;
+		};
+		tb10x_scr0: tb10x-mdscr@ff30e000 {
+			compatible = "abilis,tb100-mdscr";
+			reg = <0xFF30e000 0x4000>;
+			tb100-mdscr-manage-tsin;
+		};
+		tb10x_scr1: tb10x-mdscr@ff312000 {
+			compatible = "abilis,tb100-mdscr";
+			reg = <0xFF312000 0x4000>;
+			tb100-mdscr-manage-tsin;
+		};
+		tb10x_wfb: tb10x-wfb@ff319000 {
+			compatible = "abilis,tb100-wfb";
+			reg = <0xff319000 0x1000>;
+			interrupt-parent = <&tb10x_ictl>;
+			interrupts = <16 1>;
+		};
+	};
+};
diff --git a/arch/arc/boot/dts/nsimosci.dts b/arch/arc/boot/dts/nsimosci.dts
new file mode 100644
index 0000000..ea16d78
--- /dev/null
+++ b/arch/arc/boot/dts/nsimosci.dts
@@ -0,0 +1,77 @@
+/*
+ * Copyright (C) 2013 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+/dts-v1/;
+
+/include/ "skeleton.dtsi"
+
+/ {
+	compatible = "snps,nsimosci";
+	clock-frequency = <80000000>;	/* 80 MHZ */
+	#address-cells = <1>;
+	#size-cells = <1>;
+	interrupt-parent = <&intc>;
+
+	chosen {
+		bootargs = "console=tty0 consoleblank=0";
+	};
+
+	aliases {
+		serial0 = &uart0;
+	};
+
+	memory {
+		device_type = "memory";
+		reg = <0x80000000 0x10000000>;	/* 256M */
+	};
+
+	fpga {
+		compatible = "simple-bus";
+		#address-cells = <1>;
+		#size-cells = <1>;
+
+		/* child and parent address space 1:1 mapped */
+		ranges;
+
+		intc: interrupt-controller {
+			compatible = "snps,arc700-intc";
+			interrupt-controller;
+			#interrupt-cells = <1>;
+		};
+
+		uart0: serial@c0000000 {
+			compatible = "snps,dw-apb-uart";
+			reg = <0xc0000000 0x2000>;
+			interrupts = <11>;
+			#clock-frequency = <80000000>;
+			clock-frequency = <3686400>;
+			baud = <115200>;
+			reg-shift = <2>;
+			reg-io-width = <4>;
+			status = "okay";
+		};
+
+		pgu0: pgu@c9000000 {
+			compatible = "snps,arcpgufb";
+			reg = <0xc9000000 0x400>;
+		};
+
+		ps2: ps2@c9001000 {
+			compatible = "snps,arc_ps2";
+			reg = <0xc9000400 0x14>;
+			interrupts = <13>;
+			interrupt-names = "arc_ps2_irq";
+		};
+
+		eth0: ethernet@c0003000 {
+			compatible = "snps,oscilan";
+			reg = <0xc0003000 0x44>;
+			interrupts = <7>, <8>;
+			interrupt-names = "rx", "tx";
+		};
+	};
+};
diff --git a/arch/arc/configs/fpga_defconfig b/arch/arc/configs/fpga_defconfig
index b869806..95350be6 100644
--- a/arch/arc/configs/fpga_defconfig
+++ b/arch/arc/configs/fpga_defconfig
@@ -9,7 +9,7 @@
 # CONFIG_UTS_NS is not set
 # CONFIG_PID_NS is not set
 CONFIG_BLK_DEV_INITRD=y
-CONFIG_INITRAMFS_SOURCE="../arc_initramfs"
+CONFIG_INITRAMFS_SOURCE="../arc_initramfs/"
 CONFIG_KALLSYMS_ALL=y
 CONFIG_EMBEDDED=y
 # CONFIG_SLUB_DEBUG is not set
@@ -24,6 +24,7 @@
 CONFIG_ARC_BOARD_ML509=y
 # CONFIG_ARC_HAS_RTSC is not set
 CONFIG_ARC_BUILTIN_DTB_NAME="angel4"
+CONFIG_PREEMPT=y
 # CONFIG_COMPACTION is not set
 # CONFIG_CROSS_MEMORY_ATTACH is not set
 CONFIG_NET=y
diff --git a/arch/arc/configs/nsimosci_defconfig b/arch/arc/configs/nsimosci_defconfig
new file mode 100644
index 0000000..446c96c
--- /dev/null
+++ b/arch/arc/configs/nsimosci_defconfig
@@ -0,0 +1,75 @@
+CONFIG_CROSS_COMPILE="arc-elf32-"
+# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_DEFAULT_HOSTNAME="ARCLinux"
+# CONFIG_SWAP is not set
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_NAMESPACES=y
+# CONFIG_UTS_NS is not set
+# CONFIG_PID_NS is not set
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_INITRAMFS_SOURCE="../arc_initramfs"
+CONFIG_KALLSYMS_ALL=y
+CONFIG_EMBEDDED=y
+# CONFIG_SLUB_DEBUG is not set
+# CONFIG_COMPAT_BRK is not set
+CONFIG_KPROBES=y
+CONFIG_MODULES=y
+# CONFIG_LBDAF is not set
+# CONFIG_BLK_DEV_BSG is not set
+# CONFIG_IOSCHED_DEADLINE is not set
+# CONFIG_IOSCHED_CFQ is not set
+CONFIG_ARC_PLAT_FPGA_LEGACY=y
+CONFIG_ARC_BOARD_ML509=y
+# CONFIG_ARC_IDE is not set
+# CONFIG_ARCTANGENT_EMAC is not set
+# CONFIG_ARC_HAS_RTSC is not set
+CONFIG_ARC_BUILTIN_DTB_NAME="nsimosci"
+# CONFIG_COMPACTION is not set
+# CONFIG_CROSS_MEMORY_ATTACH is not set
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_UNIX_DIAG=y
+CONFIG_NET_KEY=y
+CONFIG_INET=y
+# CONFIG_IPV6 is not set
+# CONFIG_STANDALONE is not set
+# CONFIG_PREVENT_FIRMWARE_BUILD is not set
+# CONFIG_FIRMWARE_IN_KERNEL is not set
+# CONFIG_BLK_DEV is not set
+CONFIG_NETDEVICES=y
+# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
+# CONFIG_MOUSE_PS2_ALPS is not set
+# CONFIG_MOUSE_PS2_LOGIPS2PP is not set
+# CONFIG_MOUSE_PS2_SYNAPTICS is not set
+# CONFIG_MOUSE_PS2_TRACKPOINT is not set
+CONFIG_MOUSE_PS2_TOUCHKIT=y
+# CONFIG_SERIO_I8042 is not set
+# CONFIG_SERIO_SERPORT is not set
+CONFIG_SERIO_ARC_PS2=y
+# CONFIG_LEGACY_PTYS is not set
+# CONFIG_DEVKMEM is not set
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_DW=y
+CONFIG_SERIAL_ARC=y
+CONFIG_SERIAL_ARC_CONSOLE=y
+# CONFIG_HW_RANDOM is not set
+# CONFIG_HWMON is not set
+CONFIG_FB=y
+# CONFIG_VGA_CONSOLE is not set
+CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_LOGO=y
+# CONFIG_HID is not set
+# CONFIG_USB_SUPPORT is not set
+# CONFIG_IOMMU_SUPPORT is not set
+CONFIG_EXT2_FS=y
+CONFIG_EXT2_FS_XATTR=y
+CONFIG_TMPFS=y
+# CONFIG_MISC_FILESYSTEMS is not set
+CONFIG_NFS_FS=y
+# CONFIG_ENABLE_WARN_DEPRECATED is not set
+# CONFIG_ENABLE_MUST_CHECK is not set
+CONFIG_XZ_DEC=y
diff --git a/arch/arc/configs/tb10x_defconfig b/arch/arc/configs/tb10x_defconfig
new file mode 100644
index 0000000..4fa5cd9
--- /dev/null
+++ b/arch/arc/configs/tb10x_defconfig
@@ -0,0 +1,117 @@
+CONFIG_CROSS_COMPILE="arc-elf32-"
+# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_DEFAULT_HOSTNAME="tb10x"
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_BSD_PROCESS_ACCT=y
+CONFIG_BSD_PROCESS_ACCT_V3=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_BUF_SHIFT=16
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_INITRAMFS_SOURCE="../tb10x-rootfs.cpio"
+CONFIG_INITRAMFS_ROOT_UID=2100
+CONFIG_INITRAMFS_ROOT_GID=501
+# CONFIG_RD_GZIP is not set
+CONFIG_SYSCTL_SYSCALL=y
+CONFIG_KALLSYMS_ALL=y
+# CONFIG_AIO is not set
+CONFIG_EMBEDDED=y
+# CONFIG_COMPAT_BRK is not set
+CONFIG_SLAB=y
+CONFIG_MODULES=y
+CONFIG_MODULE_FORCE_LOAD=y
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_BLOCK is not set
+CONFIG_ARC_PLAT_TB10X=y
+CONFIG_ARC_CACHE_LINE_SHIFT=5
+# CONFIG_ARC_HAS_RTSC is not set
+CONFIG_ARC_STACK_NONEXEC=y
+CONFIG_HZ=250
+CONFIG_ARC_BUILTIN_DTB_NAME="abilis_tb100_dvk"
+CONFIG_PREEMPT_VOLUNTARY=y
+# CONFIG_COMPACTION is not set
+# CONFIG_CROSS_MEMORY_ATTACH is not set
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
+# CONFIG_INET_LRO is not set
+# CONFIG_INET_DIAG is not set
+# CONFIG_IPV6 is not set
+# CONFIG_WIRELESS is not set
+# CONFIG_FIRMWARE_IN_KERNEL is not set
+CONFIG_PROC_DEVICETREE=y
+CONFIG_NETDEVICES=y
+# CONFIG_NET_CADENCE is not set
+# CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_VENDOR_INTEL is not set
+# CONFIG_NET_VENDOR_MARVELL is not set
+# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_SEEQ is not set
+CONFIG_STMMAC_ETH=y
+CONFIG_STMMAC_DEBUG_FS=y
+CONFIG_STMMAC_DA=y
+CONFIG_STMMAC_CHAINED=y
+# CONFIG_NET_VENDOR_WIZNET is not set
+# CONFIG_WLAN is not set
+# CONFIG_INPUT is not set
+# CONFIG_SERIO is not set
+# CONFIG_VT is not set
+CONFIG_DEVPTS_MULTIPLE_INSTANCES=y
+# CONFIG_LEGACY_PTYS is not set
+# CONFIG_DEVKMEM is not set
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_NR_UARTS=1
+CONFIG_SERIAL_8250_RUNTIME_UARTS=1
+CONFIG_SERIAL_8250_DW=y
+# CONFIG_HW_RANDOM is not set
+CONFIG_I2C=y
+# CONFIG_I2C_COMPAT is not set
+CONFIG_I2C_DESIGNWARE_PLATFORM=y
+CONFIG_GPIO_SYSFS=y
+# CONFIG_HWMON is not set
+# CONFIG_USB_SUPPORT is not set
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_GPIO=y
+CONFIG_LEDS_TRIGGERS=y
+CONFIG_LEDS_TRIGGER_TIMER=y
+CONFIG_LEDS_TRIGGER_ONESHOT=y
+CONFIG_LEDS_TRIGGER_HEARTBEAT=y
+CONFIG_LEDS_TRIGGER_CPU=y
+CONFIG_LEDS_TRIGGER_GPIO=y
+CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
+CONFIG_LEDS_TRIGGER_TRANSIENT=y
+CONFIG_DMADEVICES=y
+CONFIG_DW_DMAC=y
+CONFIG_NET_DMA=y
+CONFIG_ASYNC_TX_DMA=y
+# CONFIG_IOMMU_SUPPORT is not set
+# CONFIG_DNOTIFY is not set
+CONFIG_PROC_KCORE=y
+CONFIG_TMPFS=y
+CONFIG_CONFIGFS_FS=y
+# CONFIG_MISC_FILESYSTEMS is not set
+# CONFIG_NETWORK_FILESYSTEMS is not set
+# CONFIG_ENABLE_WARN_DEPRECATED is not set
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_STRIP_ASM_SYMS=y
+CONFIG_DEBUG_FS=y
+CONFIG_HEADERS_CHECK=y
+CONFIG_DEBUG_SECTION_MISMATCH=y
+CONFIG_DETECT_HUNG_TASK=y
+CONFIG_SCHEDSTATS=y
+CONFIG_TIMER_STATS=y
+CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_MEMORY_INIT=y
+CONFIG_DEBUG_STACKOVERFLOW=y
+# CONFIG_CRYPTO_ANSI_CPRNG is not set
+# CONFIG_CRYPTO_HW is not set
diff --git a/arch/arc/include/asm/Kbuild b/arch/arc/include/asm/Kbuild
index 48af742..d8dd660 100644
--- a/arch/arc/include/asm/Kbuild
+++ b/arch/arc/include/asm/Kbuild
@@ -32,7 +32,6 @@
 generic-y += scatterlist.h
 generic-y += sembuf.h
 generic-y += shmbuf.h
-generic-y += shmparam.h
 generic-y += siginfo.h
 generic-y += socket.h
 generic-y += sockios.h
diff --git a/arch/arc/include/asm/cache.h b/arch/arc/include/asm/cache.h
index 6632273..d5555fe 100644
--- a/arch/arc/include/asm/cache.h
+++ b/arch/arc/include/asm/cache.h
@@ -55,9 +55,6 @@
 	: "r"(data), "r"(ptr));		\
 })
 
-/* used to give SHMLBA a value to avoid Cache Aliasing */
-extern unsigned int ARC_shmlba;
-
 #define ARCH_DMA_MINALIGN      L1_CACHE_BYTES
 
 /*
diff --git a/arch/arc/include/asm/cacheflush.h b/arch/arc/include/asm/cacheflush.h
index 97ee96f..9f841af 100644
--- a/arch/arc/include/asm/cacheflush.h
+++ b/arch/arc/include/asm/cacheflush.h
@@ -19,13 +19,24 @@
 #define _ASM_CACHEFLUSH_H
 
 #include <linux/mm.h>
+#include <asm/shmparam.h>
+
+/*
+ * Semantically we need this because icache doesn't snoop dcache/dma.
+ * However ARC Cache flush requires paddr as well as vaddr, latter not available
+ * in the flush_icache_page() API. So we no-op it but do the equivalent work
+ * in update_mmu_cache()
+ */
+#define flush_icache_page(vma, page)
 
 void flush_cache_all(void);
 
 void flush_icache_range(unsigned long start, unsigned long end);
-void flush_icache_page(struct vm_area_struct *vma, struct page *page);
-void flush_icache_range_vaddr(unsigned long paddr, unsigned long u_vaddr,
-				     int len);
+void __sync_icache_dcache(unsigned long paddr, unsigned long vaddr, int len);
+void __inv_icache_page(unsigned long paddr, unsigned long vaddr);
+void ___flush_dcache_page(unsigned long paddr, unsigned long vaddr);
+#define __flush_dcache_page(p, v)	\
+		___flush_dcache_page((unsigned long)p, (unsigned long)v)
 
 #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
 
@@ -42,23 +53,60 @@
 #define flush_cache_vmap(start, end)		flush_cache_all()
 #define flush_cache_vunmap(start, end)		flush_cache_all()
 
-/*
- * VM callbacks when entire/range of user-space V-P mappings are
- * torn-down/get-invalidated
- *
- * Currently we don't support D$ aliasing configs for our VIPT caches
- * NOPS for VIPT Cache with non-aliasing D$ configurations only
- */
-#define flush_cache_dup_mm(mm)			/* called on fork */
+#define flush_cache_dup_mm(mm)			/* called on fork (VIVT only) */
+
+#ifndef CONFIG_ARC_CACHE_VIPT_ALIASING
+
 #define flush_cache_mm(mm)			/* called on munmap/exit */
 #define flush_cache_range(mm, u_vstart, u_vend)
 #define flush_cache_page(vma, u_vaddr, pfn)	/* PF handling/COW-break */
 
+#else	/* VIPT aliasing dcache */
+
+/* To clear out stale userspace mappings */
+void flush_cache_mm(struct mm_struct *mm);
+void flush_cache_range(struct vm_area_struct *vma,
+	unsigned long start,unsigned long end);
+void flush_cache_page(struct vm_area_struct *vma,
+	unsigned long user_addr, unsigned long page);
+
+/*
+ * To make sure that userspace mapping is flushed to memory before
+ * get_user_pages() uses a kernel mapping to access the page
+ */
+#define ARCH_HAS_FLUSH_ANON_PAGE
+void flush_anon_page(struct vm_area_struct *vma,
+	struct page *page, unsigned long u_vaddr);
+
+#endif	/* CONFIG_ARC_CACHE_VIPT_ALIASING */
+
+/*
+ * Simple wrapper over config option
+ * Bootup code ensures that hardware matches kernel configuration
+ */
+static inline int cache_is_vipt_aliasing(void)
+{
+#ifdef CONFIG_ARC_CACHE_VIPT_ALIASING
+	return 1;
+#else
+	return 0;
+#endif
+}
+
+#define CACHE_COLOR(addr)	(((unsigned long)(addr) >> (PAGE_SHIFT)) & 3)
+
+/*
+ * checks if two addresses (after page aligning) index into same cache set
+ */
+#define addr_not_cache_congruent(addr1, addr2)				\
+	cache_is_vipt_aliasing() ? 					\
+		(CACHE_COLOR(addr1) != CACHE_COLOR(addr2)) : 0		\
+
 #define copy_to_user_page(vma, page, vaddr, dst, src, len)		\
 do {									\
 	memcpy(dst, src, len);						\
 	if (vma->vm_flags & VM_EXEC)					\
-		flush_icache_range_vaddr((unsigned long)(dst), vaddr, len);\
+		__sync_icache_dcache((unsigned long)(dst), vaddr, len);	\
 } while (0)
 
 #define copy_from_user_page(vma, page, vaddr, dst, src, len)		\
diff --git a/arch/arc/include/asm/irq.h b/arch/arc/include/asm/irq.h
index 4c588f9..57898a1 100644
--- a/arch/arc/include/asm/irq.h
+++ b/arch/arc/include/asm/irq.h
@@ -9,7 +9,8 @@
 #ifndef __ASM_ARC_IRQ_H
 #define __ASM_ARC_IRQ_H
 
-#define NR_IRQS		32
+#define NR_CPU_IRQS	32  /* number of interrupt lines of ARC770 CPU */
+#define NR_IRQS		128 /* allow some CPU external IRQ handling */
 
 /* Platform Independent IRQs */
 #define TIMER0_IRQ      3
diff --git a/arch/arc/include/asm/page.h b/arch/arc/include/asm/page.h
index bdf5461..374a355 100644
--- a/arch/arc/include/asm/page.h
+++ b/arch/arc/include/asm/page.h
@@ -16,13 +16,27 @@
 #define get_user_page(vaddr)		__get_free_page(GFP_KERNEL)
 #define free_user_page(page, addr)	free_page(addr)
 
-/* TBD: for now don't worry about VIPT D$ aliasing */
 #define clear_page(paddr)		memset((paddr), 0, PAGE_SIZE)
 #define copy_page(to, from)		memcpy((to), (from), PAGE_SIZE)
 
+#ifndef CONFIG_ARC_CACHE_VIPT_ALIASING
+
 #define clear_user_page(addr, vaddr, pg)	clear_page(addr)
 #define copy_user_page(vto, vfrom, vaddr, pg)	copy_page(vto, vfrom)
 
+#else	/* VIPT aliasing dcache */
+
+struct vm_area_struct;
+struct page;
+
+#define __HAVE_ARCH_COPY_USER_HIGHPAGE
+
+void copy_user_highpage(struct page *to, struct page *from,
+			unsigned long u_vaddr, struct vm_area_struct *vma);
+void clear_user_page(void *to, unsigned long u_vaddr, struct page *page);
+
+#endif	/* CONFIG_ARC_CACHE_VIPT_ALIASING */
+
 #undef STRICT_MM_TYPECHECKS
 
 #ifdef STRICT_MM_TYPECHECKS
diff --git a/arch/arc/include/asm/pgtable.h b/arch/arc/include/asm/pgtable.h
index b7e3668..1cc4720 100644
--- a/arch/arc/include/asm/pgtable.h
+++ b/arch/arc/include/asm/pgtable.h
@@ -395,6 +395,9 @@
 
 #include <asm-generic/pgtable.h>
 
+/* to cope with aliasing VIPT cache */
+#define HAVE_ARCH_UNMAPPED_AREA
+
 /*
  * No page table caches to initialise
  */
diff --git a/arch/arc/include/asm/serial.h b/arch/arc/include/asm/serial.h
index 4dff5a1..602b097 100644
--- a/arch/arc/include/asm/serial.h
+++ b/arch/arc/include/asm/serial.h
@@ -22,4 +22,14 @@
 
 #define BASE_BAUD	(arc_get_core_freq() / 16)
 
+/*
+ * This is definitely going to break early 8250 consoles on multi-platform
+ * images but hey, it won't add any code complexity for a debug feature of
+ * one broken driver.
+ */
+#ifdef CONFIG_ARC_PLAT_TB10X
+#undef BASE_BAUD
+#define BASE_BAUD	(arc_get_core_freq() / 16 / 3)
+#endif
+
 #endif /* _ASM_ARC_SERIAL_H */
diff --git a/arch/arc/include/asm/shmparam.h b/arch/arc/include/asm/shmparam.h
new file mode 100644
index 0000000..fffeecc
--- /dev/null
+++ b/arch/arc/include/asm/shmparam.h
@@ -0,0 +1,18 @@
+/*
+ * Copyright (C) 2013 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ARC_ASM_SHMPARAM_H
+#define __ARC_ASM_SHMPARAM_H
+
+/* Handle upto 2 cache bins */
+#define	SHMLBA	(2 * PAGE_SIZE)
+
+/* Enforce SHMLBA in shmat */
+#define __ARCH_FORCE_SHMLBA
+
+#endif
diff --git a/arch/arc/include/asm/tlb.h b/arch/arc/include/asm/tlb.h
index 3eb2ce0..85b6df8 100644
--- a/arch/arc/include/asm/tlb.h
+++ b/arch/arc/include/asm/tlb.h
@@ -21,20 +21,35 @@
 
 #ifndef __ASSEMBLY__
 
-#define tlb_flush(tlb) local_flush_tlb_mm((tlb)->mm)
+#define tlb_flush(tlb)				\
+do {						\
+	if (tlb->fullmm)			\
+		flush_tlb_mm((tlb)->mm);	\
+} while (0)
 
 /*
  * This pair is called at time of munmap/exit to flush cache and TLB entries
  * for mappings being torn down.
- * 1) cache-flush part -implemented via tlb_start_vma( ) can be NOP (for now)
- *    as we don't support aliasing configs in our VIPT D$.
- * 2) tlb-flush part - implemted via tlb_end_vma( ) can be NOP as well-
- *    albiet for difft reasons - its better handled by moving to new ASID
+ * 1) cache-flush part -implemented via tlb_start_vma( ) for VIPT aliasing D$
+ * 2) tlb-flush part - implemted via tlb_end_vma( ) flushes the TLB range
  *
  * Note, read http://lkml.org/lkml/2004/1/15/6
  */
+#ifndef CONFIG_ARC_CACHE_VIPT_ALIASING
 #define tlb_start_vma(tlb, vma)
-#define tlb_end_vma(tlb, vma)
+#else
+#define tlb_start_vma(tlb, vma)						\
+do {									\
+	if (!tlb->fullmm)						\
+		flush_cache_range(vma, vma->vm_start, vma->vm_end);	\
+} while(0)
+#endif
+
+#define tlb_end_vma(tlb, vma)						\
+do {									\
+	if (!tlb->fullmm)						\
+		flush_tlb_range(vma, vma->vm_start, vma->vm_end);	\
+} while (0)
 
 #define __tlb_remove_tlb_entry(tlb, ptep, address)
 
diff --git a/arch/arc/kernel/asm-offsets.c b/arch/arc/kernel/asm-offsets.c
index 0dc148e..7dcda70 100644
--- a/arch/arc/kernel/asm-offsets.c
+++ b/arch/arc/kernel/asm-offsets.c
@@ -11,9 +11,9 @@
 #include <linux/interrupt.h>
 #include <linux/thread_info.h>
 #include <linux/kbuild.h>
+#include <linux/ptrace.h>
 #include <asm/hardirq.h>
 #include <asm/page.h>
-#include <asm/ptrace.h>
 
 int main(void)
 {
diff --git a/arch/arc/kernel/clk.c b/arch/arc/kernel/clk.c
index 66ce0dc..10c7b0b 100644
--- a/arch/arc/kernel/clk.c
+++ b/arch/arc/kernel/clk.c
@@ -8,7 +8,7 @@
 
 #include <asm/clk.h>
 
-unsigned long core_freq = 800000000;
+unsigned long core_freq = 80000000;
 
 /*
  * As of now we default to device-tree provided clock
diff --git a/arch/arc/kernel/disasm.c b/arch/arc/kernel/disasm.c
index d14764a..b8a549c 100644
--- a/arch/arc/kernel/disasm.c
+++ b/arch/arc/kernel/disasm.c
@@ -12,8 +12,8 @@
 #include <linux/types.h>
 #include <linux/kprobes.h>
 #include <linux/slab.h>
+#include <linux/uaccess.h>
 #include <asm/disasm.h>
-#include <asm/uaccess.h>
 
 #if defined(CONFIG_KGDB) || defined(CONFIG_ARC_MISALIGN_ACCESS) || \
 	defined(CONFIG_KPROBES)
diff --git a/arch/arc/kernel/entry.S b/arch/arc/kernel/entry.S
index 91eeab8..0c6d664 100644
--- a/arch/arc/kernel/entry.S
+++ b/arch/arc/kernel/entry.S
@@ -393,12 +393,14 @@
 #ifdef  CONFIG_ARC_MISALIGN_ACCESS
 	SAVE_CALLEE_SAVED_USER
 	mov r3, sp              ; callee_regs
-#endif
 
 	bl  do_misaligned_access
 
-#ifdef  CONFIG_ARC_MISALIGN_ACCESS
-	DISCARD_CALLEE_SAVED_USER
+	; TBD: optimize - do this only if a callee reg was involved
+	; either a dst of emulated LD/ST or src with address-writeback
+	RESTORE_CALLEE_SAVED_USER
+#else
+	bl  do_misaligned_error
 #endif
 
 	b   ret_from_exception
diff --git a/arch/arc/kernel/irq.c b/arch/arc/kernel/irq.c
index 551c10d..8115fa5 100644
--- a/arch/arc/kernel/irq.c
+++ b/arch/arc/kernel/irq.c
@@ -11,6 +11,8 @@
 #include <linux/module.h>
 #include <linux/of.h>
 #include <linux/irqdomain.h>
+#include <linux/irqchip.h>
+#include "../../drivers/irqchip/irqchip.h"
 #include <asm/sections.h>
 #include <asm/irq.h>
 #include <asm/mach_desc.h>
@@ -26,7 +28,7 @@
  * -Disable all IRQs (on CPU side)
  * -Optionally, setup the High priority Interrupts as Level 2 IRQs
  */
-void __init arc_init_IRQ(void)
+void __cpuinit arc_init_IRQ(void)
 {
 	int level_mask = 0;
 
@@ -97,15 +99,13 @@
 
 static struct irq_domain *root_domain;
 
-void __init init_onchip_IRQ(void)
+static int __init
+init_onchip_IRQ(struct device_node *intc, struct device_node *parent)
 {
-	struct device_node *intc = NULL;
+	if (parent)
+		panic("DeviceTree incore intc not a root irq controller\n");
 
-	intc = of_find_compatible_node(NULL, NULL, "snps,arc700-intc");
-	if(!intc)
-		panic("DeviceTree Missing incore intc\n");
-
-	root_domain = irq_domain_add_legacy(intc, NR_IRQS, 0, 0,
+	root_domain = irq_domain_add_legacy(intc, NR_CPU_IRQS, 0, 0,
 					    &arc_intc_domain_ops, NULL);
 
 	if (!root_domain)
@@ -113,8 +113,12 @@
 
 	/* with this we don't need to export root_domain */
 	irq_set_default_host(root_domain);
+
+	return 0;
 }
 
+IRQCHIP_DECLARE(arc_intc, "snps,arc700-intc", init_onchip_IRQ);
+
 /*
  * Late Interrupt system init called from start_kernel for Boot CPU only
  *
@@ -123,12 +127,13 @@
  */
 void __init init_IRQ(void)
 {
-	init_onchip_IRQ();
-
 	/* Any external intc can be setup here */
 	if (machine_desc->init_irq)
 		machine_desc->init_irq();
 
+	/* process the entire interrupt tree in one go */
+	irqchip_init();
+
 #ifdef CONFIG_SMP
 	/* Master CPU can initialize it's side of IPI */
 	if (machine_desc->init_smp)
diff --git a/arch/arc/kernel/kprobes.c b/arch/arc/kernel/kprobes.c
index 3bfeacb..5a7b80e 100644
--- a/arch/arc/kernel/kprobes.c
+++ b/arch/arc/kernel/kprobes.c
@@ -10,7 +10,6 @@
 #include <linux/kprobes.h>
 #include <linux/slab.h>
 #include <linux/module.h>
-#include <linux/kprobes.h>
 #include <linux/kdebug.h>
 #include <linux/sched.h>
 #include <linux/uaccess.h>
diff --git a/arch/arc/kernel/module.c b/arch/arc/kernel/module.c
index cdd3593..376e046 100644
--- a/arch/arc/kernel/module.c
+++ b/arch/arc/kernel/module.c
@@ -47,7 +47,7 @@
 		}
 	}
 #endif
-    return 0;
+	return 0;
 }
 
 void module_arch_cleanup(struct module *mod)
@@ -141,5 +141,5 @@
 		mod->arch.unw_info = unw;
 	}
 #endif
-    return 0;
+	return 0;
 }
diff --git a/arch/arc/kernel/setup.c b/arch/arc/kernel/setup.c
index 2d95ac0..b2b3731 100644
--- a/arch/arc/kernel/setup.c
+++ b/arch/arc/kernel/setup.c
@@ -14,14 +14,13 @@
 #include <linux/module.h>
 #include <linux/cpu.h>
 #include <linux/of_fdt.h>
+#include <linux/cache.h>
 #include <asm/sections.h>
 #include <asm/arcregs.h>
 #include <asm/tlb.h>
-#include <asm/cache.h>
 #include <asm/setup.h>
 #include <asm/page.h>
 #include <asm/irq.h>
-#include <asm/arcregs.h>
 #include <asm/prom.h>
 #include <asm/unwind.h>
 #include <asm/clk.h>
@@ -32,14 +31,14 @@
 int running_on_hw = 1;	/* vs. on ISS */
 
 char __initdata command_line[COMMAND_LINE_SIZE];
-struct machine_desc *machine_desc __initdata;
+struct machine_desc *machine_desc __cpuinitdata;
 
 struct task_struct *_current_task[NR_CPUS];	/* For stack switching */
 
 struct cpuinfo_arc cpuinfo_arc700[NR_CPUS];
 
 
-void __init read_arc_build_cfg_regs(void)
+void __cpuinit read_arc_build_cfg_regs(void)
 {
 	struct bcr_perip uncached_space;
 	struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()];
@@ -238,7 +237,7 @@
 	return buf;
 }
 
-void __init arc_chk_ccms(void)
+void __cpuinit arc_chk_ccms(void)
 {
 #if defined(CONFIG_ARC_HAS_DCCM) || defined(CONFIG_ARC_HAS_ICCM)
 	struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()];
@@ -273,7 +272,7 @@
  * hardware has dedicated regs which need to be saved/restored on ctx-sw
  * (Single Precision uses core regs), thus kernel is kind of oblivious to it
  */
-void __init arc_chk_fpu(void)
+void __cpuinit arc_chk_fpu(void)
 {
 	struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()];
 
@@ -294,7 +293,7 @@
  *    such as only for boot CPU etc
  */
 
-void __init setup_processor(void)
+void __cpuinit setup_processor(void)
 {
 	char str[512];
 	int cpu_id = smp_processor_id();
@@ -319,24 +318,21 @@
 
 void __init setup_arch(char **cmdline_p)
 {
-#ifdef CONFIG_CMDLINE_UBOOT
-	/* Make sure that a whitespace is inserted before */
-	strlcat(command_line, " ", sizeof(command_line));
-#endif
-	/*
-	 * Append .config cmdline to base command line, which might already
-	 * contain u-boot "bootargs" (handled by head.S, if so configured)
-	 */
-	strlcat(command_line, CONFIG_CMDLINE, sizeof(command_line));
-
-	/* Save unparsed command line copy for /proc/cmdline */
-	strlcpy(boot_command_line, command_line, COMMAND_LINE_SIZE);
-	*cmdline_p = command_line;
-
+	/* This also populates @boot_command_line from /bootargs */
 	machine_desc = setup_machine_fdt(__dtb_start);
 	if (!machine_desc)
 		panic("Embedded DT invalid\n");
 
+	/* Append any u-boot provided cmdline */
+#ifdef CONFIG_CMDLINE_UBOOT
+	/* Add a whitespace seperator between the 2 cmdlines */
+	strlcat(boot_command_line, " ", COMMAND_LINE_SIZE);
+	strlcat(boot_command_line, command_line, COMMAND_LINE_SIZE);
+#endif
+
+	/* Save unparsed command line copy for /proc/cmdline */
+	*cmdline_p = boot_command_line;
+
 	/* To force early parsing of things like mem=xxx */
 	parse_early_param();
 
diff --git a/arch/arc/kernel/time.c b/arch/arc/kernel/time.c
index f13f728..09f4309 100644
--- a/arch/arc/kernel/time.c
+++ b/arch/arc/kernel/time.c
@@ -33,7 +33,6 @@
 #include <linux/module.h>
 #include <linux/sched.h>
 #include <linux/kernel.h>
-#include <linux/interrupt.h>
 #include <linux/time.h>
 #include <linux/init.h>
 #include <linux/timex.h>
diff --git a/arch/arc/kernel/traps.c b/arch/arc/kernel/traps.c
index 7496995..0471d9c 100644
--- a/arch/arc/kernel/traps.c
+++ b/arch/arc/kernel/traps.c
@@ -16,11 +16,12 @@
 #include <linux/sched.h>
 #include <linux/kdebug.h>
 #include <linux/uaccess.h>
-#include <asm/ptrace.h>
+#include <linux/ptrace.h>
+#include <linux/kprobes.h>
+#include <linux/kgdb.h>
 #include <asm/setup.h>
-#include <asm/kprobes.h>
 #include <asm/unaligned.h>
-#include <asm/kgdb.h>
+#include <asm/kprobes.h>
 
 void __init trap_init(void)
 {
@@ -83,6 +84,7 @@
 DO_ERROR_INFO(SIGILL, "Illegal Insn (or Seq)", insterror_is_error, ILL_ILLOPC)
 DO_ERROR_INFO(SIGBUS, "Invalid Mem Access", do_memory_error, BUS_ADRERR)
 DO_ERROR_INFO(SIGTRAP, "Breakpoint Set", trap_is_brkpt, TRAP_BRKPT)
+DO_ERROR_INFO(SIGBUS, "Misaligned Access", do_misaligned_error, BUS_ADRALN)
 
 #ifdef CONFIG_ARC_MISALIGN_ACCESS
 /*
@@ -91,21 +93,11 @@
 int do_misaligned_access(unsigned long cause, unsigned long address,
 			 struct pt_regs *regs, struct callee_regs *cregs)
 {
-	if (misaligned_fixup(address, regs, cause, cregs) != 0) {
-		siginfo_t info;
+	if (misaligned_fixup(address, regs, cause, cregs) != 0)
+		return do_misaligned_error(cause, address, regs);
 
-		info.si_signo = SIGBUS;
-		info.si_errno = 0;
-		info.si_code = BUS_ADRALN;
-		info.si_addr = (void __user *)address;
-		return handle_exception(cause, "Misaligned Access", regs,
-					  &info);
-	}
 	return 0;
 }
-
-#else
-DO_ERROR_INFO(SIGSEGV, "Misaligned Access", do_misaligned_access, SEGV_ACCERR)
 #endif
 
 /*
diff --git a/arch/arc/kernel/troubleshoot.c b/arch/arc/kernel/troubleshoot.c
index 0aec0198..11c301b 100644
--- a/arch/arc/kernel/troubleshoot.c
+++ b/arch/arc/kernel/troubleshoot.c
@@ -26,7 +26,6 @@
 	char buf[512];
 	int n = 0, len = sizeof(buf);
 
-	/* weird loop because pt_regs regs rev r12..r0, r25..r13 */
 	for (i = start_num; i < start_num + 13; i++) {
 		n += scnprintf(buf + n, len - n, "r%02u: 0x%08lx\t",
 			       i, (unsigned long)*reg_rev);
@@ -34,13 +33,18 @@
 		if (((i + 1) % 3) == 0)
 			n += scnprintf(buf + n, len - n, "\n");
 
+		/* because pt_regs has regs reversed: r12..r0, r25..r13 */
 		reg_rev--;
 	}
 
 	if (start_num != 0)
 		n += scnprintf(buf + n, len - n, "\n\n");
 
-	pr_info("%s", buf);
+	/* To continue printing callee regs on same line as scratch regs */
+	if (start_num == 0)
+		pr_info("%s", buf);
+	else
+		pr_cont("%s\n", buf);
 }
 
 static void show_callee_regs(struct callee_regs *cregs)
@@ -83,6 +87,10 @@
 	dev_t dev = 0;
 	char *nm = buf;
 
+	/* can't use print_vma_addr() yet as it doesn't check for
+	 * non-inclusive vma
+	 */
+
 	vma = find_vma(current->active_mm, address);
 
 	/* check against the find_vma( ) behaviour which returns the next VMA
@@ -98,10 +106,13 @@
 			ino = inode->i_ino;
 		}
 		pr_info("    @off 0x%lx in [%s]\n"
-			"    VMA: 0x%08lx to 0x%08lx\n\n",
-		       address - vma->vm_start, nm, vma->vm_start, vma->vm_end);
-	} else
+			"    VMA: 0x%08lx to 0x%08lx\n",
+			vma->vm_start < TASK_UNMAPPED_BASE ?
+				address : address - vma->vm_start,
+			nm, vma->vm_start, vma->vm_end);
+	} else {
 		pr_info("    @No matching VMA found\n");
+	}
 }
 
 static void show_ecr_verbose(struct pt_regs *regs)
@@ -110,7 +121,7 @@
 	unsigned long address;
 
 	cause_reg = current->thread.cause_code;
-	pr_info("\n[ECR]: 0x%08x => ", cause_reg);
+	pr_info("\n[ECR   ]: 0x%08x => ", cause_reg);
 
 	/* For Data fault, this is data address not instruction addr */
 	address = current->thread.fault_address;
@@ -120,7 +131,7 @@
 
 	/* For DTLB Miss or ProtV, display the memory involved too */
 	if (vec == ECR_V_DTLB_MISS) {
-		pr_cont("Invalid (%s) @ 0x%08lx by insn @ 0x%08lx\n",
+		pr_cont("Invalid %s 0x%08lx by insn @ 0x%08lx\n",
 		       (cause_code == 0x01) ? "Read From" :
 		       ((cause_code == 0x02) ? "Write to" : "EX"),
 		       address, regs->ret);
@@ -168,20 +179,23 @@
 	if (current->thread.cause_code)
 		show_ecr_verbose(regs);
 
-	pr_info("[EFA]: 0x%08lx\n", current->thread.fault_address);
-	pr_info("[ERET]: 0x%08lx (PC of Faulting Instr)\n", regs->ret);
+	pr_info("[EFA   ]: 0x%08lx\n[BLINK ]: %pS\n[ERET  ]: %pS\n",
+		current->thread.fault_address,
+		(void *)regs->blink, (void *)regs->ret);
 
-	show_faulting_vma(regs->ret, buf);	/* faulting code, not data */
+	if (user_mode(regs))
+		show_faulting_vma(regs->ret, buf); /* faulting code, not data */
 
-	/* can't use print_vma_addr() yet as it doesn't check for
-	 * non-inclusive vma
-	 */
+	pr_info("[STAT32]: 0x%08lx", regs->status32);
 
-	/* print special regs */
-	pr_info("status32: 0x%08lx\n", regs->status32);
-	pr_info(" SP: 0x%08lx\tFP: 0x%08lx\n", regs->sp, regs->fp);
-	pr_info("BTA: 0x%08lx\tBLINK: 0x%08lx\n",
-		regs->bta, regs->blink);
+#define STS_BIT(r, bit)	r->status32 & STATUS_##bit##_MASK ? #bit : ""
+	if (!user_mode(regs))
+		pr_cont(" : %2s %2s %2s %2s %2s\n",
+			STS_BIT(regs, AE), STS_BIT(regs, A2), STS_BIT(regs, A1),
+			STS_BIT(regs, E2), STS_BIT(regs, E1));
+
+	pr_info("BTA: 0x%08lx\t SP: 0x%08lx\t FP: 0x%08lx\n",
+		regs->bta, regs->sp, regs->fp);
 	pr_info("LPS: 0x%08lx\tLPE: 0x%08lx\tLPC: 0x%08lx\n",
 	       regs->lp_start, regs->lp_end, regs->lp_count);
 
diff --git a/arch/arc/mm/Makefile b/arch/arc/mm/Makefile
index 168dc14..ac95cc2 100644
--- a/arch/arc/mm/Makefile
+++ b/arch/arc/mm/Makefile
@@ -7,4 +7,4 @@
 #
 
 obj-y	:= extable.o ioremap.o dma.o fault.o init.o
-obj-y	+= tlb.o tlbex.o cache_arc700.o
+obj-y	+= tlb.o tlbex.o cache_arc700.o mmap.o
diff --git a/arch/arc/mm/cache_arc700.c b/arch/arc/mm/cache_arc700.c
index 88d617d..2f12bca 100644
--- a/arch/arc/mm/cache_arc700.c
+++ b/arch/arc/mm/cache_arc700.c
@@ -68,20 +68,11 @@
 #include <linux/mmu_context.h>
 #include <linux/syscalls.h>
 #include <linux/uaccess.h>
+#include <linux/pagemap.h>
 #include <asm/cacheflush.h>
 #include <asm/cachectl.h>
 #include <asm/setup.h>
 
-
-#ifdef CONFIG_ARC_HAS_ICACHE
-static void __ic_line_inv_no_alias(unsigned long, int);
-static void __ic_line_inv_2_alias(unsigned long, int);
-static void __ic_line_inv_4_alias(unsigned long, int);
-
-/* Holds the ptr to flush routine, dependign on size due to aliasing issues */
-static void (*___flush_icache_rtn) (unsigned long, int);
-#endif
-
 char *arc_cache_mumbojumbo(int cpu_id, char *buf, int len)
 {
 	int n = 0;
@@ -109,7 +100,7 @@
  * the cpuinfo structure for later use.
  * No Validation done here, simply read/convert the BCRs
  */
-void __init read_decode_cache_bcr(void)
+void __cpuinit read_decode_cache_bcr(void)
 {
 	struct bcr_cache ibcr, dbcr;
 	struct cpuinfo_arc_cache *p_ic, *p_dc;
@@ -141,13 +132,14 @@
  * 3. Enable the Caches, setup default flush mode for D-Cache
  * 3. Calculate the SHMLBA used by user space
  */
-void __init arc_cache_init(void)
+void __cpuinit arc_cache_init(void)
 {
 	unsigned int temp;
 	unsigned int cpu = smp_processor_id();
 	struct cpuinfo_arc_cache *ic = &cpuinfo_arc700[cpu].icache;
 	struct cpuinfo_arc_cache *dc = &cpuinfo_arc700[cpu].dcache;
 	int way_pg_ratio = way_pg_ratio;
+	int dcache_does_alias;
 	char str[256];
 
 	printk(arc_cache_mumbojumbo(0, str, sizeof(str)));
@@ -171,30 +163,6 @@
 
 	}
 #endif
-
-	/*
-	 * if Cache way size is <= page size then no aliasing exhibited
-	 * otherwise ratio determines num of aliases.
-	 * e.g. 32K I$, 2 way set assoc, 8k pg size
-	 *       way-sz = 32k/2 = 16k
-	 *       way-pg-ratio = 16k/8k = 2, so 2 aliases possible
-	 *       (meaning 1 line could be in 2 possible locations).
-	 */
-	way_pg_ratio = ic->sz / ARC_ICACHE_WAYS / PAGE_SIZE;
-	switch (way_pg_ratio) {
-	case 0:
-	case 1:
-		___flush_icache_rtn = __ic_line_inv_no_alias;
-		break;
-	case 2:
-		___flush_icache_rtn = __ic_line_inv_2_alias;
-		break;
-	case 4:
-		___flush_icache_rtn = __ic_line_inv_4_alias;
-		break;
-	default:
-		panic("Unsupported I-Cache Sz\n");
-	}
 #endif
 
 	/* Enable/disable I-Cache */
@@ -218,9 +186,13 @@
 		panic("Cache H/W doesn't match kernel Config");
 	}
 
+	dcache_does_alias = (dc->sz / ARC_DCACHE_WAYS) > PAGE_SIZE;
+
 	/* check for D-Cache aliasing */
-	if ((dc->sz / ARC_DCACHE_WAYS) > PAGE_SIZE)
-		panic("D$ aliasing not handled right now\n");
+	if (dcache_does_alias && !cache_is_vipt_aliasing())
+		panic("Enable CONFIG_ARC_CACHE_VIPT_ALIASING\n");
+	else if (!dcache_does_alias && cache_is_vipt_aliasing())
+		panic("Don't need CONFIG_ARC_CACHE_VIPT_ALIASING\n");
 #endif
 
 	/* Set the default Invalidate Mode to "simpy discard dirty lines"
@@ -303,47 +275,57 @@
  * Per Line Operation on D-Cache
  * Doesn't deal with type-of-op/IRQ-disabling/waiting-for-flush-to-complete
  * It's sole purpose is to help gcc generate ZOL
+ * (aliasing VIPT dcache flushing needs both vaddr and paddr)
  */
-static inline void __dc_line_loop(unsigned long start, unsigned long sz,
-					  int aux_reg)
+static inline void __dc_line_loop(unsigned long paddr, unsigned long vaddr,
+				  unsigned long sz, const int aux_reg)
 {
-	int num_lines, slack;
+	int num_lines;
 
 	/* Ensure we properly floor/ceil the non-line aligned/sized requests
-	 * and have @start - aligned to cache line and integral @num_lines.
+	 * and have @paddr - aligned to cache line and integral @num_lines.
 	 * This however can be avoided for page sized since:
-	 *  -@start will be cache-line aligned already (being page aligned)
+	 *  -@paddr will be cache-line aligned already (being page aligned)
 	 *  -@sz will be integral multiple of line size (being page sized).
 	 */
 	if (!(__builtin_constant_p(sz) && sz == PAGE_SIZE)) {
-		slack = start & ~DCACHE_LINE_MASK;
-		sz += slack;
-		start -= slack;
+		sz += paddr & ~DCACHE_LINE_MASK;
+		paddr &= DCACHE_LINE_MASK;
+		vaddr &= DCACHE_LINE_MASK;
 	}
 
 	num_lines = DIV_ROUND_UP(sz, ARC_DCACHE_LINE_LEN);
 
+#if (CONFIG_ARC_MMU_VER <= 2)
+	paddr |= (vaddr >> PAGE_SHIFT) & 0x1F;
+#endif
+
 	while (num_lines-- > 0) {
 #if (CONFIG_ARC_MMU_VER > 2)
 		/*
 		 * Just as for I$, in MMU v3, D$ ops also require
 		 * "tag" bits in DC_PTAG, "index" bits in FLDL,IVDL ops
-		 * But we pass phy addr for both. This works since Linux
-		 * doesn't support aliasing configs for D$, yet.
-		 * Thus paddr is enough to provide both tag and index.
 		 */
-		write_aux_reg(ARC_REG_DC_PTAG, start);
+		write_aux_reg(ARC_REG_DC_PTAG, paddr);
+
+		write_aux_reg(aux_reg, vaddr);
+		vaddr += ARC_DCACHE_LINE_LEN;
+#else
+		/* paddr contains stuffed vaddrs bits */
+		write_aux_reg(aux_reg, paddr);
 #endif
-		write_aux_reg(aux_reg, start);
-		start += ARC_DCACHE_LINE_LEN;
+		paddr += ARC_DCACHE_LINE_LEN;
 	}
 }
 
+/* For kernel mappings cache operation: index is same as paddr */
+#define __dc_line_op_k(p, sz, op)	__dc_line_op(p, p, sz, op)
+
 /*
  * D-Cache : Per Line INV (discard or wback+discard) or FLUSH (wback)
  */
-static inline void __dc_line_op(unsigned long start, unsigned long sz,
-					const int cacheop)
+static inline void __dc_line_op(unsigned long paddr, unsigned long vaddr,
+				unsigned long sz, const int cacheop)
 {
 	unsigned long flags, tmp = tmp;
 	int aux;
@@ -366,7 +348,7 @@
 	else
 		aux = ARC_REG_DC_FLDL;
 
-	__dc_line_loop(start, sz, aux);
+	__dc_line_loop(paddr, vaddr, sz, aux);
 
 	if (cacheop & OP_FLUSH)	/* flush / flush-n-inv both wait */
 		wait_for_flush();
@@ -381,7 +363,8 @@
 #else
 
 #define __dc_entire_op(cacheop)
-#define __dc_line_op(start, sz, cacheop)
+#define __dc_line_op(paddr, vaddr, sz, cacheop)
+#define __dc_line_op_k(paddr, sz, cacheop)
 
 #endif /* CONFIG_ARC_HAS_DCACHE */
 
@@ -391,75 +374,38 @@
 /*
  *		I-Cache Aliasing in ARC700 VIPT caches
  *
- * For fetching code from I$, ARC700 uses vaddr (embedded in program code)
- * to "index" into SET of cache-line and paddr from MMU to match the TAG
- * in the WAYS of SET.
+ * ARC VIPT I-cache uses vaddr to index into cache and paddr to match the tag.
+ * The orig Cache Management Module "CDU" only required paddr to invalidate a
+ * certain line since it sufficed as index in Non-Aliasing VIPT cache-geometry.
+ * Infact for distinct V1,V2,P: all of {V1-P},{V2-P},{P-P} would end up fetching
+ * the exact same line.
  *
- * However the CDU iterface (to flush/inv) lines from software, only takes
- * paddr (to have simpler hardware interface). For simpler cases, using paddr
- * alone suffices.
- * e.g. 2-way-set-assoc, 16K I$ (8k MMU pg sz, 32b cache line size):
- *      way_sz = cache_sz / num_ways = 16k/2 = 8k
- *      num_sets = way_sz / line_sz = 8k/32 = 256 => 8 bits
- *   Ignoring the bottom 5 bits corresp to the off within a 32b cacheline,
- *   bits req for calc set-index = bits 12:5 (0 based). Since this range fits
- *   inside the bottom 13 bits of paddr, which are same for vaddr and paddr
- *   (with 8k pg sz), paddr alone can be safely used by CDU to unambigously
- *   locate a cache-line.
- *
- * However for a difft sized cache, say 32k I$, above math yields need
- * for 14 bits of vaddr to locate a cache line, which can't be provided by
- * paddr, since the bit 13 (0 based) might differ between the two.
- *
- * This lack of extra bits needed for correct line addressing, defines the
- * classical problem of Cache aliasing with VIPT architectures
- * num_aliases = 1 << extra_bits
- * e.g. 2-way-set-assoc, 32K I$ with 8k MMU pg sz => 2 aliases
- *      2-way-set-assoc, 64K I$ with 8k MMU pg sz => 4 aliases
- *      2-way-set-assoc, 16K I$ with 8k MMU pg sz => NO aliases
+ * However for larger Caches (way-size > page-size) - i.e. in Aliasing config,
+ * paddr alone could not be used to correctly index the cache.
  *
  * ------------------
  * MMU v1/v2 (Fixed Page Size 8k)
  * ------------------
  * The solution was to provide CDU with these additonal vaddr bits. These
- * would be bits [x:13], x would depend on cache-geom.
+ * would be bits [x:13], x would depend on cache-geometry, 13 comes from
+ * standard page size of 8k.
  * H/w folks chose [17:13] to be a future safe range, and moreso these 5 bits
  * of vaddr could easily be "stuffed" in the paddr as bits [4:0] since the
  * orig 5 bits of paddr were anyways ignored by CDU line ops, as they
  * represent the offset within cache-line. The adv of using this "clumsy"
- * interface for additional info was no new reg was needed in CDU.
+ * interface for additional info was no new reg was needed in CDU programming
+ * model.
  *
  * 17:13 represented the max num of bits passable, actual bits needed were
  * fewer, based on the num-of-aliases possible.
  * -for 2 alias possibility, only bit 13 needed (32K cache)
  * -for 4 alias possibility, bits 14:13 needed (64K cache)
  *
- * Since vaddr was not available for all instances of I$ flush req by core
- * kernel, the only safe way (non-optimal though) was to kill all possible
- * lines which could represent an alias (even if they didnt represent one
- * in execution).
- * e.g. for 64K I$, 4 aliases possible, so we did
- *      flush start
- *      flush start | 0x01
- *      flush start | 0x2
- *      flush start | 0x3
- *
- * The penalty was invoking the operation itself, since tag match is anyways
- * paddr based, a line which didn't represent an alias would not match the
- * paddr, hence wont be killed
- *
- * Note that aliasing concerns are independent of line-sz for a given cache
- * geometry (size + set_assoc) because the extra bits required by line-sz are
- * reduced from the set calc.
- * e.g. 2-way-set-assoc, 32K I$ with 8k MMU pg sz and using math above
- *  32b line-sz: 9 bits set-index-calc, 5 bits offset-in-line => 1 extra bit
- *  64b line-sz: 8 bits set-index-calc, 6 bits offset-in-line => 1 extra bit
- *
  * ------------------
  * MMU v3
  * ------------------
- * This ver of MMU supports var page sizes (1k-16k) - Linux will support
- * 8k (default), 16k and 4k.
+ * This ver of MMU supports variable page sizes (1k-16k): although Linux will
+ * only support 8k (default), 16k and 4k.
  * However from hardware perspective, smaller page sizes aggrevate aliasing
  * meaning more vaddr bits needed to disambiguate the cache-line-op ;
  * the existing scheme of piggybacking won't work for certain configurations.
@@ -468,144 +414,53 @@
  */
 
 /***********************************************************
- * Machine specific helpers for per line I-Cache invalidate.
- * 3 routines to accpunt for 1, 2, 4 aliases possible
+ * Machine specific helper for per line I-Cache invalidate.
  */
-
-static void __ic_line_inv_no_alias(unsigned long start, int num_lines)
-{
-	while (num_lines-- > 0) {
-#if (CONFIG_ARC_MMU_VER > 2)
-		write_aux_reg(ARC_REG_IC_PTAG, start);
-#endif
-		write_aux_reg(ARC_REG_IC_IVIL, start);
-		start += ARC_ICACHE_LINE_LEN;
-	}
-}
-
-static void __ic_line_inv_2_alias(unsigned long start, int num_lines)
-{
-	while (num_lines-- > 0) {
-
-#if (CONFIG_ARC_MMU_VER > 2)
-		/*
-		 *  MMU v3, CDU prog model (for line ops) now uses a new IC_PTAG
-		 * reg to pass the "tag" bits and existing IVIL reg only looks
-		 * at bits relevant for "index" (details above)
-		 * Programming Notes:
-		 * -when writing tag to PTAG reg, bit chopping can be avoided,
-		 *  CDU ignores non-tag bits.
-		 * -Ideally "index" must be computed from vaddr, but it is not
-		 *  avail in these rtns. So to be safe, we kill the lines in all
-		 *  possible indexes corresp to num of aliases possible for
-		 *  given cache config.
-		 */
-		write_aux_reg(ARC_REG_IC_PTAG, start);
-		write_aux_reg(ARC_REG_IC_IVIL,
-				  start & ~(0x1 << PAGE_SHIFT));
-		write_aux_reg(ARC_REG_IC_IVIL, start | (0x1 << PAGE_SHIFT));
-#else
-		write_aux_reg(ARC_REG_IC_IVIL, start);
-		write_aux_reg(ARC_REG_IC_IVIL, start | 0x01);
-#endif
-		start += ARC_ICACHE_LINE_LEN;
-	}
-}
-
-static void __ic_line_inv_4_alias(unsigned long start, int num_lines)
-{
-	while (num_lines-- > 0) {
-
-#if (CONFIG_ARC_MMU_VER > 2)
-		write_aux_reg(ARC_REG_IC_PTAG, start);
-
-		write_aux_reg(ARC_REG_IC_IVIL,
-				  start & ~(0x3 << PAGE_SHIFT));
-		write_aux_reg(ARC_REG_IC_IVIL,
-				  start & ~(0x2 << PAGE_SHIFT));
-		write_aux_reg(ARC_REG_IC_IVIL,
-				  start & ~(0x1 << PAGE_SHIFT));
-		write_aux_reg(ARC_REG_IC_IVIL, start | (0x3 << PAGE_SHIFT));
-#else
-		write_aux_reg(ARC_REG_IC_IVIL, start);
-		write_aux_reg(ARC_REG_IC_IVIL, start | 0x01);
-		write_aux_reg(ARC_REG_IC_IVIL, start | 0x02);
-		write_aux_reg(ARC_REG_IC_IVIL, start | 0x03);
-#endif
-		start += ARC_ICACHE_LINE_LEN;
-	}
-}
-
-static void __ic_line_inv(unsigned long start, unsigned long sz)
+static void __ic_line_inv_vaddr(unsigned long paddr, unsigned long vaddr,
+				unsigned long sz)
 {
 	unsigned long flags;
-	int num_lines, slack;
+	int num_lines;
 
 	/*
-	 * Ensure we properly floor/ceil the non-line aligned/sized requests
-	 * and have @start - aligned to cache line, and integral @num_lines
+	 * Ensure we properly floor/ceil the non-line aligned/sized requests:
 	 * However page sized flushes can be compile time optimised.
-	 *  -@start will be cache-line aligned already (being page aligned)
+	 *  -@paddr will be cache-line aligned already (being page aligned)
 	 *  -@sz will be integral multiple of line size (being page sized).
 	 */
 	if (!(__builtin_constant_p(sz) && sz == PAGE_SIZE)) {
-		slack = start & ~ICACHE_LINE_MASK;
-		sz += slack;
-		start -= slack;
+		sz += paddr & ~ICACHE_LINE_MASK;
+		paddr &= ICACHE_LINE_MASK;
+		vaddr &= ICACHE_LINE_MASK;
 	}
 
 	num_lines = DIV_ROUND_UP(sz, ARC_ICACHE_LINE_LEN);
 
-	local_irq_save(flags);
-	(*___flush_icache_rtn) (start, num_lines);
-	local_irq_restore(flags);
-}
-
-/* Unlike routines above, having vaddr for flush op (along with paddr),
- * prevents the need to speculatively kill the lines in multiple sets
- * based on ratio of way_sz : pg_sz
- */
-static void __ic_line_inv_vaddr(unsigned long phy_start,
-					 unsigned long vaddr, unsigned long sz)
-{
-	unsigned long flags;
-	int num_lines, slack;
-	unsigned int addr;
-
-	slack = phy_start & ~ICACHE_LINE_MASK;
-	sz += slack;
-	phy_start -= slack;
-	num_lines = DIV_ROUND_UP(sz, ARC_ICACHE_LINE_LEN);
-
-#if (CONFIG_ARC_MMU_VER > 2)
-	vaddr &= ~ICACHE_LINE_MASK;
-	addr = phy_start;
-#else
+#if (CONFIG_ARC_MMU_VER <= 2)
 	/* bits 17:13 of vaddr go as bits 4:0 of paddr */
-	addr = phy_start | ((vaddr >> 13) & 0x1F);
+	paddr |= (vaddr >> PAGE_SHIFT) & 0x1F;
 #endif
 
 	local_irq_save(flags);
 	while (num_lines-- > 0) {
 #if (CONFIG_ARC_MMU_VER > 2)
 		/* tag comes from phy addr */
-		write_aux_reg(ARC_REG_IC_PTAG, addr);
+		write_aux_reg(ARC_REG_IC_PTAG, paddr);
 
 		/* index bits come from vaddr */
 		write_aux_reg(ARC_REG_IC_IVIL, vaddr);
 		vaddr += ARC_ICACHE_LINE_LEN;
 #else
-		/* this paddr contains vaddrs bits as needed */
-		write_aux_reg(ARC_REG_IC_IVIL, addr);
+		/* paddr contains stuffed vaddrs bits */
+		write_aux_reg(ARC_REG_IC_IVIL, paddr);
 #endif
-		addr += ARC_ICACHE_LINE_LEN;
+		paddr += ARC_ICACHE_LINE_LEN;
 	}
 	local_irq_restore(flags);
 }
 
 #else
 
-#define __ic_line_inv(start, sz)
 #define __ic_line_inv_vaddr(pstart, vstart, sz)
 
 #endif /* CONFIG_ARC_HAS_ICACHE */
@@ -615,35 +470,72 @@
  * Exported APIs
  */
 
-/* TBD: use pg_arch_1 to optimize this */
+/*
+ * Handle cache congruency of kernel and userspace mappings of page when kernel
+ * writes-to/reads-from
+ *
+ * The idea is to defer flushing of kernel mapping after a WRITE, possible if:
+ *  -dcache is NOT aliasing, hence any U/K-mappings of page are congruent
+ *  -U-mapping doesn't exist yet for page (finalised in update_mmu_cache)
+ *  -In SMP, if hardware caches are coherent
+ *
+ * There's a corollary case, where kernel READs from a userspace mapped page.
+ * If the U-mapping is not congruent to to K-mapping, former needs flushing.
+ */
 void flush_dcache_page(struct page *page)
 {
-	__dc_line_op((unsigned long)page_address(page), PAGE_SIZE, OP_FLUSH);
+	struct address_space *mapping;
+
+	if (!cache_is_vipt_aliasing()) {
+		set_bit(PG_arch_1, &page->flags);
+		return;
+	}
+
+	/* don't handle anon pages here */
+	mapping = page_mapping(page);
+	if (!mapping)
+		return;
+
+	/*
+	 * pagecache page, file not yet mapped to userspace
+	 * Make a note that K-mapping is dirty
+	 */
+	if (!mapping_mapped(mapping)) {
+		set_bit(PG_arch_1, &page->flags);
+	} else if (page_mapped(page)) {
+
+		/* kernel reading from page with U-mapping */
+		void *paddr = page_address(page);
+		unsigned long vaddr = page->index << PAGE_CACHE_SHIFT;
+
+		if (addr_not_cache_congruent(paddr, vaddr))
+			__flush_dcache_page(paddr, vaddr);
+	}
 }
 EXPORT_SYMBOL(flush_dcache_page);
 
 
 void dma_cache_wback_inv(unsigned long start, unsigned long sz)
 {
-	__dc_line_op(start, sz, OP_FLUSH_N_INV);
+	__dc_line_op_k(start, sz, OP_FLUSH_N_INV);
 }
 EXPORT_SYMBOL(dma_cache_wback_inv);
 
 void dma_cache_inv(unsigned long start, unsigned long sz)
 {
-	__dc_line_op(start, sz, OP_INV);
+	__dc_line_op_k(start, sz, OP_INV);
 }
 EXPORT_SYMBOL(dma_cache_inv);
 
 void dma_cache_wback(unsigned long start, unsigned long sz)
 {
-	__dc_line_op(start, sz, OP_FLUSH);
+	__dc_line_op_k(start, sz, OP_FLUSH);
 }
 EXPORT_SYMBOL(dma_cache_wback);
 
 /*
- * This is API for making I/D Caches consistent when modifying code
- * (loadable modules, kprobes,  etc)
+ * This is API for making I/D Caches consistent when modifying
+ * kernel code (loadable modules, kprobes, kgdb...)
  * This is called on insmod, with kernel virtual address for CODE of
  * the module. ARC cache maintenance ops require PHY address thus we
  * need to convert vmalloc addr to PHY addr
@@ -652,7 +544,6 @@
 {
 	unsigned int tot_sz, off, sz;
 	unsigned long phy, pfn;
-	unsigned long flags;
 
 	/* printk("Kernel Cache Cohenercy: %lx to %lx\n",kstart, kend); */
 
@@ -673,8 +564,13 @@
 
 	/* Case: Kernel Phy addr (0x8000_0000 onwards) */
 	if (likely(kstart > PAGE_OFFSET)) {
-		__ic_line_inv(kstart, kend - kstart);
-		__dc_line_op(kstart, kend - kstart, OP_FLUSH);
+		/*
+		 * The 2nd arg despite being paddr will be used to index icache
+		 * This is OK since no alternate virtual mappings will exist
+		 * given the callers for this case: kprobe/kgdb in built-in
+		 * kernel code only.
+		 */
+		__sync_icache_dcache(kstart, kstart, kend - kstart);
 		return;
 	}
 
@@ -692,42 +588,45 @@
 		pfn = vmalloc_to_pfn((void *)kstart);
 		phy = (pfn << PAGE_SHIFT) + off;
 		sz = min_t(unsigned int, tot_sz, PAGE_SIZE - off);
-		local_irq_save(flags);
-		__dc_line_op(phy, sz, OP_FLUSH);
-		__ic_line_inv(phy, sz);
-		local_irq_restore(flags);
+		__sync_icache_dcache(phy, kstart, sz);
 		kstart += sz;
 		tot_sz -= sz;
 	}
 }
 
 /*
- * Optimised ver of flush_icache_range() with spec callers: ptrace/signals
- * where vaddr is also available. This allows passing both vaddr and paddr
- * bits to CDU for cache flush, short-circuting the current pessimistic algo
- * which kills all possible aliases.
- * An added adv of knowing that vaddr is user-vaddr avoids various checks
- * and handling for k-vaddr, k-paddr as done in orig ver above
+ * General purpose helper to make I and D cache lines consistent.
+ * @paddr is phy addr of region
+ * @vaddr is typically user or kernel vaddr (vmalloc)
+ *    Howver in one instance, flush_icache_range() by kprobe (for a breakpt in
+ *    builtin kernel code) @vaddr will be paddr only, meaning CDU operation will
+ *    use a paddr to index the cache (despite VIPT). This is fine since since a
+ *    built-in kernel page will not have any virtual mappings (not even kernel)
+ *    kprobe on loadable module is different as it will have kvaddr.
  */
-void flush_icache_range_vaddr(unsigned long paddr, unsigned long u_vaddr,
-			      int len)
+void __sync_icache_dcache(unsigned long paddr, unsigned long vaddr, int len)
 {
-	__ic_line_inv_vaddr(paddr, u_vaddr, len);
-	__dc_line_op(paddr, len, OP_FLUSH);
+	unsigned long flags;
+
+	local_irq_save(flags);
+	__ic_line_inv_vaddr(paddr, vaddr, len);
+	__dc_line_op(paddr, vaddr, len, OP_FLUSH);
+	local_irq_restore(flags);
+}
+
+/* wrapper to compile time eliminate alignment checks in flush loop */
+void __inv_icache_page(unsigned long paddr, unsigned long vaddr)
+{
+	__ic_line_inv_vaddr(paddr, vaddr, PAGE_SIZE);
 }
 
 /*
- * XXX: This also needs to be optim using pg_arch_1
- * This is called when a page-cache page is about to be mapped into a
- * user process' address space.  It offers an opportunity for a
- * port to ensure d-cache/i-cache coherency if necessary.
+ * wrapper to clearout kernel or userspace mappings of a page
+ * For kernel mappings @vaddr == @paddr
  */
-void flush_icache_page(struct vm_area_struct *vma, struct page *page)
+void ___flush_dcache_page(unsigned long paddr, unsigned long vaddr)
 {
-	if (!(vma->vm_flags & VM_EXEC))
-		return;
-
-	__ic_line_inv((unsigned long)page_address(page), PAGE_SIZE);
+	__dc_line_op(paddr, vaddr & PAGE_MASK, PAGE_SIZE, OP_FLUSH_N_INV);
 }
 
 void flush_icache_all(void)
@@ -756,6 +655,87 @@
 
 }
 
+#ifdef CONFIG_ARC_CACHE_VIPT_ALIASING
+
+void flush_cache_mm(struct mm_struct *mm)
+{
+	flush_cache_all();
+}
+
+void flush_cache_page(struct vm_area_struct *vma, unsigned long u_vaddr,
+		      unsigned long pfn)
+{
+	unsigned int paddr = pfn << PAGE_SHIFT;
+
+	__sync_icache_dcache(paddr, u_vaddr, PAGE_SIZE);
+}
+
+void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
+		       unsigned long end)
+{
+	flush_cache_all();
+}
+
+void copy_user_highpage(struct page *to, struct page *from,
+	unsigned long u_vaddr, struct vm_area_struct *vma)
+{
+	void *kfrom = page_address(from);
+	void *kto = page_address(to);
+	int clean_src_k_mappings = 0;
+
+	/*
+	 * If SRC page was already mapped in userspace AND it's U-mapping is
+	 * not congruent with K-mapping, sync former to physical page so that
+	 * K-mapping in memcpy below, sees the right data
+	 *
+	 * Note that while @u_vaddr refers to DST page's userspace vaddr, it is
+	 * equally valid for SRC page as well
+	 */
+	if (page_mapped(from) && addr_not_cache_congruent(kfrom, u_vaddr)) {
+		__flush_dcache_page(kfrom, u_vaddr);
+		clean_src_k_mappings = 1;
+	}
+
+	copy_page(kto, kfrom);
+
+	/*
+	 * Mark DST page K-mapping as dirty for a later finalization by
+	 * update_mmu_cache(). Although the finalization could have been done
+	 * here as well (given that both vaddr/paddr are available).
+	 * But update_mmu_cache() already has code to do that for other
+	 * non copied user pages (e.g. read faults which wire in pagecache page
+	 * directly).
+	 */
+	set_bit(PG_arch_1, &to->flags);
+
+	/*
+	 * if SRC was already usermapped and non-congruent to kernel mapping
+	 * sync the kernel mapping back to physical page
+	 */
+	if (clean_src_k_mappings) {
+		__flush_dcache_page(kfrom, kfrom);
+	} else {
+		set_bit(PG_arch_1, &from->flags);
+	}
+}
+
+void clear_user_page(void *to, unsigned long u_vaddr, struct page *page)
+{
+	clear_page(to);
+	set_bit(PG_arch_1, &page->flags);
+}
+
+void flush_anon_page(struct vm_area_struct *vma, struct page *page,
+		     unsigned long u_vaddr)
+{
+	/* TBD: do we really need to clear the kernel mapping */
+	__flush_dcache_page(page_address(page), u_vaddr);
+	__flush_dcache_page(page_address(page), page_address(page));
+
+}
+
+#endif
+
 /**********************************************************************
  * Explicit Cache flush request from user space via syscall
  * Needed for JITs which generate code on the fly
diff --git a/arch/arc/mm/extable.c b/arch/arc/mm/extable.c
index 014172b..aa652e2 100644
--- a/arch/arc/mm/extable.c
+++ b/arch/arc/mm/extable.c
@@ -27,7 +27,7 @@
 
 #ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE
 
-long arc_copy_from_user_noinline(void *to, const void __user * from,
+long arc_copy_from_user_noinline(void *to, const void __user *from,
 		unsigned long n)
 {
 	return __arc_copy_from_user(to, from, n);
@@ -48,7 +48,7 @@
 }
 EXPORT_SYMBOL(arc_clear_user_noinline);
 
-long arc_strncpy_from_user_noinline (char *dst, const char __user *src,
+long arc_strncpy_from_user_noinline(char *dst, const char __user *src,
 		long count)
 {
 	return __arc_strncpy_from_user(dst, src, count);
diff --git a/arch/arc/mm/fault.c b/arch/arc/mm/fault.c
index af55aab..689ffd8 100644
--- a/arch/arc/mm/fault.c
+++ b/arch/arc/mm/fault.c
@@ -12,7 +12,6 @@
 #include <linux/sched.h>
 #include <linux/errno.h>
 #include <linux/ptrace.h>
-#include <linux/version.h>
 #include <linux/uaccess.h>
 #include <linux/kdebug.h>
 #include <asm/pgalloc.h>
diff --git a/arch/arc/mm/init.c b/arch/arc/mm/init.c
index 727d479..4a17736 100644
--- a/arch/arc/mm/init.c
+++ b/arch/arc/mm/init.c
@@ -10,9 +10,6 @@
 #include <linux/mm.h>
 #include <linux/bootmem.h>
 #include <linux/memblock.h>
-#ifdef CONFIG_BLOCK_DEV_RAM
-#include <linux/blk.h>
-#endif
 #include <linux/swap.h>
 #include <linux/module.h>
 #include <asm/page.h>
diff --git a/arch/arc/mm/ioremap.c b/arch/arc/mm/ioremap.c
index 3e5c92c..739e65f 100644
--- a/arch/arc/mm/ioremap.c
+++ b/arch/arc/mm/ioremap.c
@@ -12,7 +12,7 @@
 #include <linux/io.h>
 #include <linux/mm.h>
 #include <linux/slab.h>
-#include <asm/cache.h>
+#include <linux/cache.h>
 
 void __iomem *ioremap(unsigned long paddr, unsigned long size)
 {
diff --git a/arch/arc/mm/mmap.c b/arch/arc/mm/mmap.c
new file mode 100644
index 0000000..2e06d56
--- /dev/null
+++ b/arch/arc/mm/mmap.c
@@ -0,0 +1,78 @@
+/*
+ * ARC700 mmap
+ *
+ * (started from arm version - for VIPT alias handling)
+ *
+ * Copyright (C) 2013 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/mman.h>
+#include <linux/sched.h>
+#include <asm/cacheflush.h>
+
+#define COLOUR_ALIGN(addr, pgoff)			\
+	((((addr) + SHMLBA - 1) & ~(SHMLBA - 1)) +	\
+	 (((pgoff) << PAGE_SHIFT) & (SHMLBA - 1)))
+
+/*
+ * Ensure that shared mappings are correctly aligned to
+ * avoid aliasing issues with VIPT caches.
+ * We need to ensure that
+ * a specific page of an object is always mapped at a multiple of
+ * SHMLBA bytes.
+ */
+unsigned long
+arch_get_unmapped_area(struct file *filp, unsigned long addr,
+		unsigned long len, unsigned long pgoff, unsigned long flags)
+{
+	struct mm_struct *mm = current->mm;
+	struct vm_area_struct *vma;
+	int do_align = 0;
+	int aliasing = cache_is_vipt_aliasing();
+	struct vm_unmapped_area_info info;
+
+	/*
+	 * We only need to do colour alignment if D cache aliases.
+	 */
+	if (aliasing)
+		do_align = filp || (flags & MAP_SHARED);
+
+	/*
+	 * We enforce the MAP_FIXED case.
+	 */
+	if (flags & MAP_FIXED) {
+		if (aliasing && flags & MAP_SHARED &&
+		    (addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1))
+			return -EINVAL;
+		return addr;
+	}
+
+	if (len > TASK_SIZE)
+		return -ENOMEM;
+
+	if (addr) {
+		if (do_align)
+			addr = COLOUR_ALIGN(addr, pgoff);
+		else
+			addr = PAGE_ALIGN(addr);
+
+		vma = find_vma(mm, addr);
+		if (TASK_SIZE - len >= addr &&
+		    (!vma || addr + len <= vma->vm_start))
+			return addr;
+	}
+
+	info.flags = 0;
+	info.length = len;
+	info.low_limit = mm->mmap_base;
+	info.high_limit = TASK_SIZE;
+	info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
+	info.align_offset = pgoff << PAGE_SHIFT;
+	return vm_unmapped_area(&info);
+}
diff --git a/arch/arc/mm/tlb.c b/arch/arc/mm/tlb.c
index 9b9ce23..066145b 100644
--- a/arch/arc/mm/tlb.c
+++ b/arch/arc/mm/tlb.c
@@ -418,23 +418,52 @@
 	local_irq_restore(flags);
 }
 
-/* arch hook called by core VM at the end of handle_mm_fault( ),
- * when a new PTE is entered in Page Tables or an existing one
- * is modified. We aggresively pre-install a TLB entry
+/*
+ * Called at the end of pagefault, for a userspace mapped page
+ *  -pre-install the corresponding TLB entry into MMU
+ *  -Finalize the delayed D-cache flush of kernel mapping of page due to
+ *  	flush_dcache_page(), copy_user_page()
+ *
+ * Note that flush (when done) involves both WBACK - so physical page is
+ * in sync as well as INV - so any non-congruent aliases don't remain
  */
-
-void update_mmu_cache(struct vm_area_struct *vma, unsigned long vaddress,
+void update_mmu_cache(struct vm_area_struct *vma, unsigned long vaddr_unaligned,
 		      pte_t *ptep)
 {
+	unsigned long vaddr = vaddr_unaligned & PAGE_MASK;
+	unsigned long paddr = pte_val(*ptep) & PAGE_MASK;
 
-	create_tlb(vma, vaddress, ptep);
+	create_tlb(vma, vaddr, ptep);
+
+	/*
+	 * Exec page : Independent of aliasing/page-color considerations,
+	 *	       since icache doesn't snoop dcache on ARC, any dirty
+	 *	       K-mapping of a code page needs to be wback+inv so that
+	 *	       icache fetch by userspace sees code correctly.
+	 * !EXEC page: If K-mapping is NOT congruent to U-mapping, flush it
+	 *	       so userspace sees the right data.
+	 *  (Avoids the flush for Non-exec + congruent mapping case)
+	 */
+	if (vma->vm_flags & VM_EXEC || addr_not_cache_congruent(paddr, vaddr)) {
+		struct page *page = pfn_to_page(pte_pfn(*ptep));
+
+		int dirty = test_and_clear_bit(PG_arch_1, &page->flags);
+		if (dirty) {
+			/* wback + inv dcache lines */
+			__flush_dcache_page(paddr, paddr);
+
+			/* invalidate any existing icache lines */
+			if (vma->vm_flags & VM_EXEC)
+				__inv_icache_page(paddr, vaddr);
+		}
+	}
 }
 
 /* Read the Cache Build Confuration Registers, Decode them and save into
  * the cpuinfo structure for later use.
  * No Validation is done here, simply read/convert the BCRs
  */
-void __init read_decode_mmu_bcr(void)
+void __cpuinit read_decode_mmu_bcr(void)
 {
 	unsigned int tmp;
 	struct bcr_mmu_1_2 *mmu2;	/* encoded MMU2 attr */
@@ -466,7 +495,7 @@
 char *arc_mmu_mumbojumbo(int cpu_id, char *buf, int len)
 {
 	int n = 0;
-	struct cpuinfo_arc_mmu *p_mmu = &cpuinfo_arc700[smp_processor_id()].mmu;
+	struct cpuinfo_arc_mmu *p_mmu = &cpuinfo_arc700[cpu_id].mmu;
 
 	n += scnprintf(buf + n, len - n, "ARC700 MMU [v%x]\t: %dk PAGE, ",
 		       p_mmu->ver, TO_KB(p_mmu->pg_sz));
@@ -480,7 +509,7 @@
 	return buf;
 }
 
-void __init arc_mmu_init(void)
+void __cpuinit arc_mmu_init(void)
 {
 	char str[256];
 	struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu;
diff --git a/arch/arc/plat-arcfpga/platform.c b/arch/arc/plat-arcfpga/platform.c
index 4e20a1a..b3700c0 100644
--- a/arch/arc/plat-arcfpga/platform.c
+++ b/arch/arc/plat-arcfpga/platform.c
@@ -224,3 +224,15 @@
 	.init_smp	= iss_model_init_smp,
 #endif
 MACHINE_END
+
+static const char *nsimosci_compat[] __initdata = {
+	"snps,nsimosci",
+	NULL,
+};
+
+MACHINE_START(NSIMOSCI, "nsimosci")
+	.dt_compat	= nsimosci_compat,
+	.init_early	= NULL,
+	.init_machine	= plat_fpga_populate_dev,
+	.init_irq	= NULL,
+MACHINE_END
diff --git a/arch/arc/plat-tb10x/Kconfig b/arch/arc/plat-tb10x/Kconfig
new file mode 100644
index 0000000..1d34521
--- /dev/null
+++ b/arch/arc/plat-tb10x/Kconfig
@@ -0,0 +1,29 @@
+# Abilis Systems TB10x platform kernel configuration file
+#
+# Author: Christian Ruppert <christian.ruppert@abilis.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
+
+
+menuconfig ARC_PLAT_TB10X
+	bool "Abilis TB10x"
+	select COMMON_CLK
+	select PINCTRL
+	select PINMUX
+	select ARCH_REQUIRE_GPIOLIB
+	help
+	  Support for platforms based on the TB10x home media gateway SOC by
+	  Abilis Systems. TB10x is based on the ARC700 CPU architecture.
+	  Say Y if you are building a kernel for one of the SOCs in this
+	  series (e.g. TB100 or TB101). If in doubt say N.
diff --git a/arch/arc/plat-tb10x/Makefile b/arch/arc/plat-tb10x/Makefile
new file mode 100644
index 0000000..89611d2
--- /dev/null
+++ b/arch/arc/plat-tb10x/Makefile
@@ -0,0 +1,21 @@
+# Abilis Systems TB10x platform Makefile
+#
+# Author: Christian Ruppert <christian.ruppert@abilis.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
+
+
+KBUILD_CFLAGS   += -Iarch/arc/plat-tb10x/include
+
+obj-y += tb10x.o
diff --git a/arch/arc/plat-tb10x/tb10x.c b/arch/arc/plat-tb10x/tb10x.c
new file mode 100644
index 0000000..d356769
--- /dev/null
+++ b/arch/arc/plat-tb10x/tb10x.c
@@ -0,0 +1,71 @@
+/*
+ * Abilis Systems TB10x platform initialisation
+ *
+ * Copyright (C) Abilis Systems 2012
+ *
+ * Author: Christian Ruppert <christian.ruppert@abilis.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
+ */
+
+
+#include <linux/init.h>
+#include <linux/of_platform.h>
+#include <linux/clk-provider.h>
+#include <linux/pinctrl/consumer.h>
+
+#include <asm/mach_desc.h>
+
+
+static void __init tb10x_platform_init(void)
+{
+	of_clk_init(NULL);
+	of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
+}
+
+static void __init tb10x_platform_late_init(void)
+{
+	struct device_node *dn;
+
+	/*
+	 * Pinctrl documentation recommends setting up the iomux here for
+	 * all modules which don't require control over the pins themselves.
+	 * Modules which need this kind of assistance are compatible with
+	 * "abilis,simple-pinctrl", i.e. we can easily iterate over them.
+	 * TODO: Does this recommended method work cleanly with pins required
+	 * by modules?
+	 */
+	for_each_compatible_node(dn, NULL, "abilis,simple-pinctrl") {
+		struct platform_device *pd = of_find_device_by_node(dn);
+		struct pinctrl *pctl;
+
+		pctl = pinctrl_get_select(&pd->dev, "abilis,simple-default");
+		if (IS_ERR(pctl)) {
+			int ret = PTR_ERR(pctl);
+			dev_err(&pd->dev, "Could not set up pinctrl: %d\n",
+				ret);
+		}
+	}
+}
+
+static const char *tb10x_compat[] __initdata = {
+	"abilis,arc-tb10x",
+	NULL,
+};
+
+MACHINE_START(TB10x, "tb10x")
+	.dt_compat	= tb10x_compat,
+	.init_machine	= tb10x_platform_init,
+	.init_late	= tb10x_platform_late_init,
+MACHINE_END
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 34ef016..d423d58 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -109,9 +109,6 @@
 config SYS_SUPPORTS_APM_EMULATION
 	bool
 
-config GENERIC_GPIO
-	bool
-
 config HAVE_TCM
 	bool
 	select GENERIC_ALLOCATOR
@@ -900,7 +897,6 @@
 	bool "ARMv7 based platforms (Cortex-A, PJ4, Scorpion, Krait)"
 	default y
 	select ARCH_MULTI_V6_V7
-	select ARCH_VEXPRESS
 	select CPU_V7
 
 config ARCH_MULTI_V6_V7
@@ -1794,6 +1790,7 @@
 	depends on ARM && AEABI && OF
 	depends on CPU_V7 && !CPU_V6
 	depends on !GENERIC_ATOMIC64
+	select ARM_PSCI
 	help
 	  Say Y if you want to run Linux in a Virtual Machine on Xen on ARM.
 
diff --git a/arch/arm/boot/dts/Makefile b/arch/arm/boot/dts/Makefile
index 8562af4..b9f7121 100644
--- a/arch/arm/boot/dts/Makefile
+++ b/arch/arm/boot/dts/Makefile
@@ -200,8 +200,8 @@
 dtb-$(CONFIG_ARCH_VEXPRESS) += vexpress-v2p-ca5s.dtb \
 	vexpress-v2p-ca9.dtb \
 	vexpress-v2p-ca15-tc1.dtb \
-	vexpress-v2p-ca15_a7.dtb \
-	xenvm-4.2.dtb
+	vexpress-v2p-ca15_a7.dtb
+dtb-$(CONFIG_ARCH_VIRT) += xenvm-4.2.dtb
 dtb-$(CONFIG_ARCH_VT8500) += vt8500-bv07.dtb \
 	wm8505-ref.dtb \
 	wm8650-mid.dtb \
diff --git a/arch/arm/boot/dts/am33xx.dtsi b/arch/arm/boot/dts/am33xx.dtsi
index d110110..1460d9b 100644
--- a/arch/arm/boot/dts/am33xx.dtsi
+++ b/arch/arm/boot/dts/am33xx.dtsi
@@ -403,5 +403,17 @@
 			       0x44d80000 0x2000>;	/* M3 DMEM */
 			ti,hwmods = "wkup_m3";
 		};
+
+		gpmc: gpmc@50000000 {
+			compatible = "ti,am3352-gpmc";
+			ti,hwmods = "gpmc";
+			reg = <0x50000000 0x2000>;
+			interrupts = <100>;
+			num-cs = <7>;
+			num-waitpins = <2>;
+			#address-cells = <2>;
+			#size-cells = <1>;
+			status = "disabled";
+		};
 	};
 };
diff --git a/arch/arm/boot/dts/cros5250-common.dtsi b/arch/arm/boot/dts/cros5250-common.dtsi
index 0a61bbb..3f0239e 100644
--- a/arch/arm/boot/dts/cros5250-common.dtsi
+++ b/arch/arm/boot/dts/cros5250-common.dtsi
@@ -175,6 +175,14 @@
 	i2c@12C70000 {
 		samsung,i2c-sda-delay = <100>;
 		samsung,i2c-max-bus-freq = <378000>;
+
+		trackpad {
+			reg = <0x67>;
+			compatible = "cypress,cyapa";
+			interrupts = <2 0>;
+			interrupt-parent = <&gpx1>;
+			wakeup-source;
+		};
 	};
 
 	i2c@12C80000 {
diff --git a/arch/arm/boot/dts/exynos5250-smdk5250.dts b/arch/arm/boot/dts/exynos5250-smdk5250.dts
index 26d856b..3e0c792 100644
--- a/arch/arm/boot/dts/exynos5250-smdk5250.dts
+++ b/arch/arm/boot/dts/exynos5250-smdk5250.dts
@@ -214,7 +214,7 @@
 	};
 
 	usb@12110000 {
-		samsung,vbus-gpio = <&gpx2 6 1 3 3>;
+		samsung,vbus-gpio = <&gpx2 6 0>;
 	};
 
 	dp-controller {
diff --git a/arch/arm/boot/dts/exynos5250-snow.dts b/arch/arm/boot/dts/exynos5250-snow.dts
index bf4744b..d449feb 100644
--- a/arch/arm/boot/dts/exynos5250-snow.dts
+++ b/arch/arm/boot/dts/exynos5250-snow.dts
@@ -183,7 +183,7 @@
 	};
 
 	usb@12110000 {
-		samsung,vbus-gpio = <&gpx1 1 1 3 3>;
+		samsung,vbus-gpio = <&gpx1 1 0>;
 	};
 
 	fixed-rate-clocks {
diff --git a/arch/arm/boot/dts/omap3-beagle-xm.dts b/arch/arm/boot/dts/omap3-beagle-xm.dts
index 5a31964a..3046d1f 100644
--- a/arch/arm/boot/dts/omap3-beagle-xm.dts
+++ b/arch/arm/boot/dts/omap3-beagle-xm.dts
@@ -122,6 +122,7 @@
 
 &usb_otg_hs {
 	interface-type = <0>;
+	usb-phy = <&usb2_phy>;
 	mode = <3>;
 	power = <50>;
 };
diff --git a/arch/arm/boot/dts/omap3-evm.dts b/arch/arm/boot/dts/omap3-evm.dts
index 05f51e1..96d1c20 100644
--- a/arch/arm/boot/dts/omap3-evm.dts
+++ b/arch/arm/boot/dts/omap3-evm.dts
@@ -68,6 +68,7 @@
 
 &usb_otg_hs {
 	interface-type = <0>;
+	usb-phy = <&usb2_phy>;
 	mode = <3>;
 	power = <50>;
 };
diff --git a/arch/arm/boot/dts/omap3-overo.dtsi b/arch/arm/boot/dts/omap3-overo.dtsi
index d4a7280d..a626c50 100644
--- a/arch/arm/boot/dts/omap3-overo.dtsi
+++ b/arch/arm/boot/dts/omap3-overo.dtsi
@@ -73,6 +73,7 @@
 
 &usb_otg_hs {
 	interface-type = <0>;
+	usb-phy = <&usb2_phy>;
 	mode = <3>;
 	power = <50>;
 };
diff --git a/arch/arm/boot/dts/omap3.dtsi b/arch/arm/boot/dts/omap3.dtsi
index 4ad03d9..82a404d 100644
--- a/arch/arm/boot/dts/omap3.dtsi
+++ b/arch/arm/boot/dts/omap3.dtsi
@@ -519,7 +519,6 @@
 			interrupts = <0 92 0x4>, <0 93 0x4>;
 			interrupt-names = "mc", "dma";
 			ti,hwmods = "usb_otg_hs";
-			usb-phy = <&usb2_phy>;
 			multipoint = <1>;
 			num-eps = <16>;
 			ram-bits = <12>;
diff --git a/arch/arm/boot/dts/omap36xx.dtsi b/arch/arm/boot/dts/omap36xx.dtsi
index b89233e..f3447bc 100644
--- a/arch/arm/boot/dts/omap36xx.dtsi
+++ b/arch/arm/boot/dts/omap36xx.dtsi
@@ -20,9 +20,9 @@
 		cpu@0 {
 			operating-points = <
 				/* kHz    uV */
-				300000   975000
-				600000  1075000
-				800000  1200000
+				300000  1012500
+				600000  1200000
+				800000  1325000
 			>;
 			clock-latency = <300000>; /* From legacy driver */
 		};
diff --git a/arch/arm/boot/dts/omap4-sdp.dts b/arch/arm/boot/dts/omap4-sdp.dts
index c387bdc..a35d9cd 100644
--- a/arch/arm/boot/dts/omap4-sdp.dts
+++ b/arch/arm/boot/dts/omap4-sdp.dts
@@ -223,6 +223,15 @@
 		>;
 	};
 
+	mcspi1_pins: pinmux_mcspi1_pins {
+		pinctrl-single,pins = <
+			0xf2 0x100	/*  mcspi1_clk.mcspi1_clk INPUT | MODE0 */
+			0xf4 0x100	/*  mcspi1_somi.mcspi1_somi INPUT | MODE0 */
+			0xf6 0x100	/*  mcspi1_simo.mcspi1_simo INPUT | MODE0 */
+			0xf8 0x100	/*  mcspi1_cs0.mcspi1_cs0 INPUT | MODE0*/
+		>;
+	};
+
 	dss_hdmi_pins: pinmux_dss_hdmi_pins {
 		pinctrl-single,pins = <
 			0x5a 0x118	/* hdmi_cec.hdmi_cec INPUT PULLUP | MODE 0 */
@@ -358,12 +367,15 @@
 };
 
 &mcspi1 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&mcspi1_pins>;
+
 	eth@0 {
 		compatible = "ks8851";
 		spi-max-frequency = <24000000>;
 		reg = <0>;
 		interrupt-parent = <&gpio2>;
-		interrupts = <2>; /* gpio line 34 */
+		interrupts = <2 8>; /* gpio line 34, low triggered */
 		vdd-supply = <&vdd_eth>;
 	};
 };
diff --git a/arch/arm/boot/dts/omap4-var-som.dts b/arch/arm/boot/dts/omap4-var-som.dts
index 222a413..7e04103 100644
--- a/arch/arm/boot/dts/omap4-var-som.dts
+++ b/arch/arm/boot/dts/omap4-var-som.dts
@@ -68,7 +68,7 @@
 		spi-max-frequency = <24000000>;
 		reg = <0>;
 		interrupt-parent = <&gpio6>;
-		interrupts = <11>; /* gpio line 171 */
+		interrupts = <11 8>; /* gpio line 171, low triggered */
 		vdd-supply = <&vdd_eth>;
 	};
 };
diff --git a/arch/arm/boot/dts/omap4460.dtsi b/arch/arm/boot/dts/omap4460.dtsi
index 7c2c23c..2cf227c 100644
--- a/arch/arm/boot/dts/omap4460.dtsi
+++ b/arch/arm/boot/dts/omap4460.dtsi
@@ -15,9 +15,9 @@
 		cpu@0 {
 			operating-points = <
 				/* kHz    uV */
-				350000   975000
-				700000  1075000
-				920000  1200000
+				350000  1025000
+				700000  1200000
+				920000  1313000
 			>;
 			clock-latency = <300000>; /* From legacy driver */
 		};
diff --git a/arch/arm/boot/dts/xenvm-4.2.dts b/arch/arm/boot/dts/xenvm-4.2.dts
index ec3f952..3369151 100644
--- a/arch/arm/boot/dts/xenvm-4.2.dts
+++ b/arch/arm/boot/dts/xenvm-4.2.dts
@@ -29,6 +29,19 @@
 			compatible = "arm,cortex-a15";
 			reg = <0>;
 		};
+
+		cpu@1 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a15";
+			reg = <1>;
+		};
+	};
+
+	psci {
+		compatible      = "arm,psci";
+		method          = "hvc";
+		cpu_off         = <1>;
+		cpu_on          = <2>;
 	};
 
 	memory@80000000 {
diff --git a/arch/arm/configs/omap2plus_defconfig b/arch/arm/configs/omap2plus_defconfig
index 33903ca..c1ef64b 100644
--- a/arch/arm/configs/omap2plus_defconfig
+++ b/arch/arm/configs/omap2plus_defconfig
@@ -137,6 +137,8 @@
 CONFIG_SERIAL_8250_RSA=y
 CONFIG_SERIAL_AMBA_PL011=y
 CONFIG_SERIAL_AMBA_PL011_CONSOLE=y
+CONFIG_SERIAL_OMAP=y
+CONFIG_SERIAL_OMAP_CONSOLE=y
 CONFIG_HW_RANDOM=y
 CONFIG_I2C_CHARDEV=y
 CONFIG_SPI=y
@@ -153,6 +155,7 @@
 CONFIG_TWL4030_WATCHDOG=y
 CONFIG_MFD_TPS65217=y
 CONFIG_MFD_TPS65910=y
+CONFIG_TWL6040_CORE=y
 CONFIG_REGULATOR_TWL4030=y
 CONFIG_REGULATOR_TPS65023=y
 CONFIG_REGULATOR_TPS6507X=y
@@ -195,6 +198,7 @@
 CONFIG_SND_SOC=m
 CONFIG_SND_OMAP_SOC=m
 CONFIG_SND_OMAP_SOC_OMAP_TWL4030=m
+CONFIG_SND_OMAP_SOC_OMAP_ABE_TWL6040=m
 CONFIG_SND_OMAP_SOC_OMAP3_PANDORA=m
 CONFIG_USB=y
 CONFIG_USB_DEBUG=y
diff --git a/arch/arm/include/asm/xen/hypercall.h b/arch/arm/include/asm/xen/hypercall.h
index 8a82325..799f42e 100644
--- a/arch/arm/include/asm/xen/hypercall.h
+++ b/arch/arm/include/asm/xen/hypercall.h
@@ -46,6 +46,7 @@
 unsigned long HYPERVISOR_hvm_op(int op, void *arg);
 int HYPERVISOR_memory_op(unsigned int cmd, void *arg);
 int HYPERVISOR_physdev_op(int cmd, void *arg);
+int HYPERVISOR_vcpu_op(int cmd, int vcpuid, void *extra_args);
 
 static inline void
 MULTI_update_va_mapping(struct multicall_entry *mcl, unsigned long va,
diff --git a/arch/arm/kernel/devtree.c b/arch/arm/kernel/devtree.c
index 70f1bde..5af04f6 100644
--- a/arch/arm/kernel/devtree.c
+++ b/arch/arm/kernel/devtree.c
@@ -180,6 +180,13 @@
 	unsigned long dt_root;
 	const char *model;
 
+#ifdef CONFIG_ARCH_MULTIPLATFORM
+	DT_MACHINE_START(GENERIC_DT, "Generic DT based system")
+	MACHINE_END
+
+	mdesc_best = (struct machine_desc *)&__mach_desc_GENERIC_DT;
+#endif
+
 	if (!dt_phys)
 		return NULL;
 
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index 728007c..1522c7a 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -18,6 +18,7 @@
 #include <linux/bootmem.h>
 #include <linux/seq_file.h>
 #include <linux/screen_info.h>
+#include <linux/of_platform.h>
 #include <linux/init.h>
 #include <linux/kexec.h>
 #include <linux/of_fdt.h>
@@ -659,9 +660,19 @@
 
 static int __init customize_machine(void)
 {
-	/* customizes platform devices, or adds new ones */
+	/*
+	 * customizes platform devices, or adds new ones
+	 * On DT based machines, we fall back to populating the
+	 * machine from the device tree, if no callback is provided,
+	 * otherwise we would always need an init_machine callback.
+	 */
 	if (machine_desc->init_machine)
 		machine_desc->init_machine();
+#ifdef CONFIG_OF
+	else
+		of_platform_populate(NULL, of_default_bus_match_table,
+					NULL, NULL);
+#endif
 	return 0;
 }
 arch_initcall(customize_machine);
diff --git a/arch/arm/mach-exynos/include/mach/regs-pmu.h b/arch/arm/mach-exynos/include/mach/regs-pmu.h
index 3f30aa1..57344b7 100644
--- a/arch/arm/mach-exynos/include/mach/regs-pmu.h
+++ b/arch/arm/mach-exynos/include/mach/regs-pmu.h
@@ -344,6 +344,7 @@
 #define EXYNOS5_FSYS_ARM_OPTION					S5P_PMUREG(0x2208)
 #define EXYNOS5_ISP_ARM_OPTION					S5P_PMUREG(0x2288)
 #define EXYNOS5_ARM_COMMON_OPTION				S5P_PMUREG(0x2408)
+#define EXYNOS5_ARM_L2_OPTION					S5P_PMUREG(0x2608)
 #define EXYNOS5_TOP_PWR_OPTION					S5P_PMUREG(0x2C48)
 #define EXYNOS5_TOP_PWR_SYSMEM_OPTION				S5P_PMUREG(0x2CC8)
 #define EXYNOS5_JPEG_MEM_OPTION					S5P_PMUREG(0x2F48)
diff --git a/arch/arm/mach-exynos/pmu.c b/arch/arm/mach-exynos/pmu.c
index daebc1a..97d6885 100644
--- a/arch/arm/mach-exynos/pmu.c
+++ b/arch/arm/mach-exynos/pmu.c
@@ -228,6 +228,7 @@
 	{ EXYNOS5_DIS_IRQ_ISP_ARM_CENTRAL_SYS_PWR_REG,	{ 0x0, 0x0, 0x0} },
 	{ EXYNOS5_ARM_COMMON_SYS_PWR_REG,		{ 0x0, 0x0, 0x2} },
 	{ EXYNOS5_ARM_L2_SYS_PWR_REG,			{ 0x3, 0x3, 0x3} },
+	{ EXYNOS5_ARM_L2_OPTION,			{ 0x10, 0x10, 0x0 } },
 	{ EXYNOS5_CMU_ACLKSTOP_SYS_PWR_REG,		{ 0x1, 0x0, 0x1} },
 	{ EXYNOS5_CMU_SCLKSTOP_SYS_PWR_REG,		{ 0x1, 0x0, 0x1} },
 	{ EXYNOS5_CMU_RESET_SYS_PWR_REG,		{ 0x1, 0x1, 0x0} },
@@ -353,11 +354,9 @@
 
 	/*
 	 * SKIP_DEACTIVATE_ACEACP_IN_PWDN_BITFIELD Enable
-	 * MANUAL_L2RSTDISABLE_CONTROL_BITFIELD Enable
 	 */
 	tmp = __raw_readl(EXYNOS5_ARM_COMMON_OPTION);
-	tmp |= (EXYNOS5_MANUAL_L2RSTDISABLE_CONTROL |
-		EXYNOS5_SKIP_DEACTIVATE_ACEACP_IN_PWDN);
+	tmp |= EXYNOS5_SKIP_DEACTIVATE_ACEACP_IN_PWDN;
 	__raw_writel(tmp, EXYNOS5_ARM_COMMON_OPTION);
 
 	/*
diff --git a/arch/arm/mach-imx/Kconfig b/arch/arm/mach-imx/Kconfig
index 78f795d..ba44328 100644
--- a/arch/arm/mach-imx/Kconfig
+++ b/arch/arm/mach-imx/Kconfig
@@ -5,6 +5,7 @@
 	select AUTO_ZRELADDR if !ZBOOT_ROM
 	select CLKDEV_LOOKUP
 	select CLKSRC_MMIO
+	select GENERIC_ALLOCATOR
 	select GENERIC_CLOCKEVENTS
 	select GENERIC_IRQ_CHIP
 	select MULTI_IRQ_HANDLER
@@ -61,10 +62,6 @@
 config ARCH_HAS_RNGA
 	bool
 
-config IRAM_ALLOC
-	bool
-	select GENERIC_ALLOCATOR
-
 config HAVE_IMX_ANATOP
 	bool
 
diff --git a/arch/arm/mach-imx/Makefile b/arch/arm/mach-imx/Makefile
index 9309589..70ae7c4 100644
--- a/arch/arm/mach-imx/Makefile
+++ b/arch/arm/mach-imx/Makefile
@@ -23,7 +23,6 @@
 obj-$(CONFIG_MXC_TZIC) += tzic.o
 obj-$(CONFIG_MXC_AVIC) += avic.o
 
-obj-$(CONFIG_IRAM_ALLOC) += iram_alloc.o
 obj-$(CONFIG_MXC_ULPI) += ulpi.o
 obj-$(CONFIG_MXC_USE_EPIT) += epit.o
 obj-$(CONFIG_MXC_DEBUG_BOARD) += 3ds_debugboard.o
diff --git a/arch/arm/mach-imx/common.h b/arch/arm/mach-imx/common.h
index 4cba7db..c08ae3f 100644
--- a/arch/arm/mach-imx/common.h
+++ b/arch/arm/mach-imx/common.h
@@ -12,6 +12,7 @@
 #define __ASM_ARCH_MXC_COMMON_H__
 
 struct platform_device;
+struct pt_regs;
 struct clk;
 enum mxc_cpu_pwr_mode;
 
diff --git a/arch/arm/mach-imx/headsmp.S b/arch/arm/mach-imx/headsmp.S
index a58c8b0..67b9c48 100644
--- a/arch/arm/mach-imx/headsmp.S
+++ b/arch/arm/mach-imx/headsmp.S
@@ -24,7 +24,7 @@
 ENDPROC(v7_secondary_startup)
 #endif
 
-#ifdef CONFIG_PM
+#ifdef CONFIG_ARM_CPU_SUSPEND
 /*
  * The following code must assume it is running from physical address
  * where absolute virtual addresses to the data section have to be
diff --git a/arch/arm/mach-imx/hotplug.c b/arch/arm/mach-imx/hotplug.c
index 5e91112..3daf1ed 100644
--- a/arch/arm/mach-imx/hotplug.c
+++ b/arch/arm/mach-imx/hotplug.c
@@ -11,7 +11,9 @@
  */
 
 #include <linux/errno.h>
+#include <linux/jiffies.h>
 #include <asm/cp15.h>
+#include <asm/proc-fns.h>
 
 #include "common.h"
 
diff --git a/arch/arm/mach-imx/iram_alloc.c b/arch/arm/mach-imx/iram_alloc.c
deleted file mode 100644
index e05cf40..0000000
--- a/arch/arm/mach-imx/iram_alloc.c
+++ /dev/null
@@ -1,73 +0,0 @@
-/*
- * Copyright (C) 2010 Freescale Semiconductor, Inc. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
- * MA 02110-1301, USA.
- */
-
-#include <linux/kernel.h>
-#include <linux/io.h>
-#include <linux/module.h>
-#include <linux/spinlock.h>
-#include <linux/genalloc.h>
-#include "linux/platform_data/imx-iram.h"
-
-static unsigned long iram_phys_base;
-static void __iomem *iram_virt_base;
-static struct gen_pool *iram_pool;
-
-static inline void __iomem *iram_phys_to_virt(unsigned long p)
-{
-	return iram_virt_base + (p - iram_phys_base);
-}
-
-void __iomem *iram_alloc(unsigned int size, unsigned long *dma_addr)
-{
-	if (!iram_pool)
-		return NULL;
-
-	*dma_addr = gen_pool_alloc(iram_pool, size);
-	pr_debug("iram alloc - %dB@0x%lX\n", size, *dma_addr);
-	if (!*dma_addr)
-		return NULL;
-	return iram_phys_to_virt(*dma_addr);
-}
-EXPORT_SYMBOL(iram_alloc);
-
-void iram_free(unsigned long addr, unsigned int size)
-{
-	if (!iram_pool)
-		return;
-
-	gen_pool_free(iram_pool, addr, size);
-}
-EXPORT_SYMBOL(iram_free);
-
-int __init iram_init(unsigned long base, unsigned long size)
-{
-	iram_phys_base = base;
-
-	iram_pool = gen_pool_create(PAGE_SHIFT, -1);
-	if (!iram_pool)
-		return -ENOMEM;
-
-	gen_pool_add(iram_pool, base, size, -1);
-	iram_virt_base = ioremap(iram_phys_base, size);
-	if (!iram_virt_base)
-		return -EIO;
-
-	pr_debug("i.MX IRAM pool: %ld KB@0x%p\n", size / 1024, iram_virt_base);
-	return 0;
-}
diff --git a/arch/arm/mach-msm/last_radio_log.c b/arch/arm/mach-msm/last_radio_log.c
index 7777767..9c392a2 100644
--- a/arch/arm/mach-msm/last_radio_log.c
+++ b/arch/arm/mach-msm/last_radio_log.c
@@ -66,6 +66,6 @@
 	pr_err("%s: last radio log is %d bytes long\n", __func__,
 		radio_log_size);
 	last_radio_log_fops.owner = owner;
-	entry->size = radio_log_size;
+	proc_set_size(entry, radio_log_size);
 }
 EXPORT_SYMBOL(msm_init_last_radio_log);
diff --git a/arch/arm/mach-omap1/dma.c b/arch/arm/mach-omap1/dma.c
index 1a4e887..68ab858 100644
--- a/arch/arm/mach-omap1/dma.c
+++ b/arch/arm/mach-omap1/dma.c
@@ -301,7 +301,7 @@
 	if (ret) {
 		dev_err(&pdev->dev, "%s: Unable to add resources for %s%d\n",
 			__func__, pdev->name, pdev->id);
-		goto exit_device_put;
+		goto exit_iounmap;
 	}
 
 	p = kzalloc(sizeof(struct omap_system_dma_plat_info), GFP_KERNEL);
@@ -309,7 +309,7 @@
 		dev_err(&pdev->dev, "%s: Unable to allocate 'p' for %s\n",
 			__func__, pdev->name);
 		ret = -ENOMEM;
-		goto exit_device_del;
+		goto exit_iounmap;
 	}
 
 	d = kzalloc(sizeof(struct omap_dma_dev_attr), GFP_KERNEL);
@@ -402,8 +402,8 @@
 	kfree(d);
 exit_release_p:
 	kfree(p);
-exit_device_del:
-	platform_device_del(pdev);
+exit_iounmap:
+	iounmap(dma_base);
 exit_device_put:
 	platform_device_put(pdev);
 
diff --git a/arch/arm/mach-omap2/Kconfig b/arch/arm/mach-omap2/Kconfig
index 857b1f0..f49cd51 100644
--- a/arch/arm/mach-omap2/Kconfig
+++ b/arch/arm/mach-omap2/Kconfig
@@ -37,8 +37,6 @@
 	select NEON if ARCH_OMAP3 || ARCH_OMAP4 || SOC_OMAP5
 	select PM_RUNTIME
 	select REGULATOR
-	select SERIAL_OMAP
-	select SERIAL_OMAP_CONSOLE
 	select TWL4030_CORE if ARCH_OMAP3 || ARCH_OMAP4
 	select TWL4030_POWER if ARCH_OMAP3 || ARCH_OMAP4
 	select VFP
diff --git a/arch/arm/mach-omap2/Makefile b/arch/arm/mach-omap2/Makefile
index 62bb352..55a9d67 100644
--- a/arch/arm/mach-omap2/Makefile
+++ b/arch/arm/mach-omap2/Makefile
@@ -32,12 +32,12 @@
 
 # SMP support ONLY available for OMAP4
 
-obj-$(CONFIG_SMP)			+= omap-smp.o omap-headsmp.o
-obj-$(CONFIG_HOTPLUG_CPU)		+= omap-hotplug.o
+smp-$(CONFIG_SMP)			+= omap-smp.o omap-headsmp.o
+smp-$(CONFIG_HOTPLUG_CPU)		+= omap-hotplug.o
 omap-4-5-common				=  omap4-common.o omap-wakeupgen.o \
 					   sleep44xx.o
-obj-$(CONFIG_ARCH_OMAP4)		+= $(omap-4-5-common)
-obj-$(CONFIG_SOC_OMAP5)			+= $(omap-4-5-common)
+obj-$(CONFIG_ARCH_OMAP4)		+= $(omap-4-5-common) $(smp-y)
+obj-$(CONFIG_SOC_OMAP5)			+= $(omap-4-5-common) $(smp-y)
 
 plus_sec := $(call as-instr,.arch_extension sec,+sec)
 AFLAGS_omap-headsmp.o			:=-Wa,-march=armv7-a$(plus_sec)
diff --git a/arch/arm/mach-omap2/board-omap3beagle.c b/arch/arm/mach-omap2/board-omap3beagle.c
index 6de7860..04c1165 100644
--- a/arch/arm/mach-omap2/board-omap3beagle.c
+++ b/arch/arm/mach-omap2/board-omap3beagle.c
@@ -112,13 +112,13 @@
  */
 static struct {
 	int mmc1_gpio_wp;
-	int usb_pwr_level;
+	bool usb_pwr_level;	/* 0 - Active Low, 1 - Active High */
 	int dvi_pd_gpio;
 	int usr_button_gpio;
 	int mmc_caps;
 } beagle_config = {
 	.mmc1_gpio_wp = -EINVAL,
-	.usb_pwr_level = GPIOF_OUT_INIT_LOW,
+	.usb_pwr_level = 0,
 	.dvi_pd_gpio = -EINVAL,
 	.usr_button_gpio = 4,
 	.mmc_caps = MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA,
@@ -178,7 +178,7 @@
 	case 0:
 		printk(KERN_INFO "OMAP3 Beagle Rev: xM Ax/Bx\n");
 		omap3_beagle_version = OMAP3BEAGLE_BOARD_XM;
-		beagle_config.usb_pwr_level = GPIOF_OUT_INIT_HIGH;
+		beagle_config.usb_pwr_level = 1;
 		beagle_config.mmc_caps &= ~MMC_CAP_8_BIT_DATA;
 		break;
 	case 2:
diff --git a/arch/arm/mach-omap2/board-rx51-peripherals.c b/arch/arm/mach-omap2/board-rx51-peripherals.c
index 1a88467..18ca61e 100644
--- a/arch/arm/mach-omap2/board-rx51-peripherals.c
+++ b/arch/arm/mach-omap2/board-rx51-peripherals.c
@@ -73,11 +73,11 @@
 #define LIS302_IRQ1_GPIO 181
 #define LIS302_IRQ2_GPIO 180  /* Not yet in use */
 
-/* list all spi devices here */
+/* List all SPI devices here. Note that the list/probe order seems to matter! */
 enum {
 	RX51_SPI_WL1251,
-	RX51_SPI_MIPID,		/* LCD panel */
 	RX51_SPI_TSC2005,	/* Touch Controller */
+	RX51_SPI_MIPID,		/* LCD panel */
 };
 
 static struct wl12xx_platform_data wl1251_pdata;
diff --git a/arch/arm/mach-omap2/dma.c b/arch/arm/mach-omap2/dma.c
index dab9fc0..49fd0d5 100644
--- a/arch/arm/mach-omap2/dma.c
+++ b/arch/arm/mach-omap2/dma.c
@@ -28,6 +28,7 @@
 #include <linux/init.h>
 #include <linux/device.h>
 #include <linux/dma-mapping.h>
+#include <linux/of.h>
 #include <linux/omap-dma.h>
 
 #include "soc.h"
@@ -304,6 +305,9 @@
 	if (res)
 		return res;
 
+	if (of_have_populated_dt())
+		return res;
+
 	pdev = platform_device_register_full(&omap_dma_dev_info);
 	if (IS_ERR(pdev))
 		return PTR_ERR(pdev);
diff --git a/arch/arm/mach-omap2/gpmc.c b/arch/arm/mach-omap2/gpmc.c
index ed946df..6c4da12 100644
--- a/arch/arm/mach-omap2/gpmc.c
+++ b/arch/arm/mach-omap2/gpmc.c
@@ -1520,36 +1520,22 @@
 		return ret;
 	}
 
-	for_each_node_by_name(child, "nand") {
-		ret = gpmc_probe_nand_child(pdev, child);
-		if (ret < 0) {
-			of_node_put(child);
-			return ret;
-		}
-	}
+	for_each_child_of_node(pdev->dev.of_node, child) {
 
-	for_each_node_by_name(child, "onenand") {
-		ret = gpmc_probe_onenand_child(pdev, child);
-		if (ret < 0) {
-			of_node_put(child);
-			return ret;
-		}
-	}
+		if (!child->name)
+			continue;
 
-	for_each_node_by_name(child, "nor") {
-		ret = gpmc_probe_generic_child(pdev, child);
-		if (ret < 0) {
-			of_node_put(child);
-			return ret;
-		}
-	}
+		if (of_node_cmp(child->name, "nand") == 0)
+			ret = gpmc_probe_nand_child(pdev, child);
+		else if (of_node_cmp(child->name, "onenand") == 0)
+			ret = gpmc_probe_onenand_child(pdev, child);
+		else if (of_node_cmp(child->name, "ethernet") == 0 ||
+			 of_node_cmp(child->name, "nor") == 0)
+			ret = gpmc_probe_generic_child(pdev, child);
 
-	for_each_node_by_name(child, "ethernet") {
-		ret = gpmc_probe_generic_child(pdev, child);
-		if (ret < 0) {
+		if (WARN(ret < 0, "%s: probing gpmc child %s failed\n",
+			 __func__, child->full_name))
 			of_node_put(child);
-			return ret;
-		}
 	}
 
 	return 0;
diff --git a/arch/arm/mach-omap2/id.c b/arch/arm/mach-omap2/id.c
index 0f4c18e..1272c41 100644
--- a/arch/arm/mach-omap2/id.c
+++ b/arch/arm/mach-omap2/id.c
@@ -419,11 +419,15 @@
 			cpu_rev = "1.0";
 			break;
 		case 1:
-		/* FALLTHROUGH */
-		default:
 			omap_revision = AM335X_REV_ES2_0;
 			cpu_rev = "2.0";
 			break;
+		case 2:
+		/* FALLTHROUGH */
+		default:
+			omap_revision = AM335X_REV_ES2_1;
+			cpu_rev = "2.1";
+			break;
 		}
 		break;
 	case 0xb8f2:
@@ -644,13 +648,12 @@
 	soc_dev_attr->revision = soc_rev;
 
 	soc_dev = soc_device_register(soc_dev_attr);
-	if (IS_ERR_OR_NULL(soc_dev)) {
+	if (IS_ERR(soc_dev)) {
 		kfree(soc_dev_attr);
 		return;
 	}
 
 	parent = soc_device_to_device(soc_dev);
-	if (!IS_ERR_OR_NULL(parent))
-		device_create_file(parent, &omap_soc_attr);
+	device_create_file(parent, &omap_soc_attr);
 }
 #endif /* CONFIG_SOC_BUS */
diff --git a/arch/arm/mach-omap2/mux34xx.h b/arch/arm/mach-omap2/mux34xx.h
index 6543ebf..3f26d29 100644
--- a/arch/arm/mach-omap2/mux34xx.h
+++ b/arch/arm/mach-omap2/mux34xx.h
@@ -393,6 +393,10 @@
 #define OMAP3_CONTROL_PADCONF_SAD2D_SWAKEUP_OFFSET		0xa1c
 #define OMAP3_CONTROL_PADCONF_JTAG_RTCK_OFFSET			0xa1e
 #define OMAP3_CONTROL_PADCONF_JTAG_TDO_OFFSET			0xa20
+#define OMAP3_CONTROL_PADCONF_GPIO_127				0xa24
+#define OMAP3_CONTROL_PADCONF_GPIO_126				0xa26
+#define OMAP3_CONTROL_PADCONF_GPIO_128				0xa28
+#define OMAP3_CONTROL_PADCONF_GPIO_129				0xa2a
 
 #define OMAP3_CONTROL_PADCONF_MUX_SIZE				\
-		(OMAP3_CONTROL_PADCONF_JTAG_TDO_OFFSET + 0x2)
+		(OMAP3_CONTROL_PADCONF_GPIO_129 + 0x2)
diff --git a/arch/arm/mach-omap2/omap_device.c b/arch/arm/mach-omap2/omap_device.c
index eeea4fa..e6d2307 100644
--- a/arch/arm/mach-omap2/omap_device.c
+++ b/arch/arm/mach-omap2/omap_device.c
@@ -876,4 +876,4 @@
 	bus_for_each_dev(&platform_bus_type, NULL, NULL, omap_device_late_idle);
 	return 0;
 }
-omap_late_initcall(omap_device_late_init);
+omap_late_initcall_sync(omap_device_late_init);
diff --git a/arch/arm/mach-omap2/soc.h b/arch/arm/mach-omap2/soc.h
index 18fdeeb..197cc16 100644
--- a/arch/arm/mach-omap2/soc.h
+++ b/arch/arm/mach-omap2/soc.h
@@ -396,6 +396,7 @@
 #define AM335X_CLASS		0x33500033
 #define AM335X_REV_ES1_0	AM335X_CLASS
 #define AM335X_REV_ES2_0	(AM335X_CLASS | (0x1 << 8))
+#define AM335X_REV_ES2_1	(AM335X_CLASS | (0x2 << 8))
 
 #define OMAP443X_CLASS		0x44300044
 #define OMAP4430_REV_ES1_0	(OMAP443X_CLASS | (0x10 << 8))
@@ -496,6 +497,7 @@
 #define omap_subsys_initcall(fn)	omap_initcall(subsys_initcall, fn)
 #define omap_device_initcall(fn)	omap_initcall(device_initcall, fn)
 #define omap_late_initcall(fn)		omap_initcall(late_initcall, fn)
+#define omap_late_initcall_sync(fn)	omap_initcall(late_initcall_sync, fn)
 
 #endif	/* __ASSEMBLY__ */
 
diff --git a/arch/arm/mach-omap2/timer.c b/arch/arm/mach-omap2/timer.c
index 0548149..f8b23b8 100644
--- a/arch/arm/mach-omap2/timer.c
+++ b/arch/arm/mach-omap2/timer.c
@@ -553,6 +553,8 @@
 			       clksrc_nr, clksrc_src, clksrc_prop)	\
 void __init omap##name##_gptimer_timer_init(void)			\
 {									\
+	if (omap_clk_init)						\
+		omap_clk_init();					\
 	omap_dmtimer_init();						\
 	omap2_gp_clockevent_init((clkev_nr), clkev_src, clkev_prop);	\
 	omap2_gptimer_clocksource_init((clksrc_nr), clksrc_src,		\
@@ -563,6 +565,8 @@
 				clksrc_nr, clksrc_src, clksrc_prop)	\
 void __init omap##name##_sync32k_timer_init(void)		\
 {									\
+	if (omap_clk_init)						\
+		omap_clk_init();					\
 	omap_dmtimer_init();						\
 	omap2_gp_clockevent_init((clkev_nr), clkev_src, clkev_prop);	\
 	/* Enable the use of clocksource="gp_timer" kernel parameter */	\
diff --git a/arch/arm/mach-prima2/Kconfig b/arch/arm/mach-prima2/Kconfig
index 80ca974..6988b11 100644
--- a/arch/arm/mach-prima2/Kconfig
+++ b/arch/arm/mach-prima2/Kconfig
@@ -38,7 +38,7 @@
 	select CPU_V7
 	select HAVE_ARM_SCU if SMP
 	select HAVE_SMP
-	select SMP_ON_UP
+	select SMP_ON_UP if SMP
 	help
           Support for CSR SiRFSoC ARM Cortex A9 Platform
 
diff --git a/arch/arm/mach-pxa/Kconfig b/arch/arm/mach-pxa/Kconfig
index 9075461..96100db 100644
--- a/arch/arm/mach-pxa/Kconfig
+++ b/arch/arm/mach-pxa/Kconfig
@@ -162,7 +162,6 @@
 	select MTD
 	select MTD_CFI
 	select MTD_CFI_INTELEXT
-	select MTD_CHAR
 	select MTD_PHYSMAP
 	select PXA25x
 	select SMC91X
diff --git a/arch/arm/mach-spear/spear13xx.c b/arch/arm/mach-spear/spear13xx.c
index 3621599..7aa6e8c 100644
--- a/arch/arm/mach-spear/spear13xx.c
+++ b/arch/arm/mach-spear/spear13xx.c
@@ -35,6 +35,8 @@
 	 * write alloc and 'Full line of zero' options
 	 *
 	 */
+	if (!IS_ENABLED(CONFIG_CACHE_L2X0))
+		return;
 
 	writel_relaxed(0x06, VA_L2CC_BASE + L2X0_PREFETCH_CTRL);
 
diff --git a/arch/arm/mach-tegra/Kconfig b/arch/arm/mach-tegra/Kconfig
index 20c3b37..84d72fc 100644
--- a/arch/arm/mach-tegra/Kconfig
+++ b/arch/arm/mach-tegra/Kconfig
@@ -63,6 +63,7 @@
 	select ARM_ARCH_TIMER
 	select ARM_GIC
 	select ARM_L1_CACHE_SHIFT_6
+	select CPU_FREQ_TABLE if CPU_FREQ
 	select CPU_V7
 	select PINCTRL
 	select PINCTRL_TEGRA114
diff --git a/arch/arm/mach-ux500/Kconfig b/arch/arm/mach-ux500/Kconfig
index f66d7de..6a4387e 100644
--- a/arch/arm/mach-ux500/Kconfig
+++ b/arch/arm/mach-ux500/Kconfig
@@ -19,6 +19,8 @@
 config UX500_SOC_COMMON
 	bool
 	default y
+	select ABX500_CORE
+	select AB8500_CORE
 	select ARM_ERRATA_754322
 	select ARM_ERRATA_764369 if SMP
 	select ARM_GIC
diff --git a/arch/arm/mach-ux500/board-mop500.c b/arch/arm/mach-ux500/board-mop500.c
index a15dd6b..3cd555a 100644
--- a/arch/arm/mach-ux500/board-mop500.c
+++ b/arch/arm/mach-ux500/board-mop500.c
@@ -403,8 +403,8 @@
 			"no regulator\n");
 		return PTR_ERR(prox_regulator);
 	}
-	regulator_enable(prox_regulator);
-	return 0;
+
+	return regulator_enable(prox_regulator);
 }
 
 static void mop500_prox_deactivate(struct device *dev)
diff --git a/arch/arm/mach-ux500/cpu-db8500.c b/arch/arm/mach-ux500/cpu-db8500.c
index 995928b..e90b5ab 100644
--- a/arch/arm/mach-ux500/cpu-db8500.c
+++ b/arch/arm/mach-ux500/cpu-db8500.c
@@ -191,7 +191,7 @@
 	/* Throw these device-specific numbers into the entropy pool */
 	add_device_randomness(uid, 0x14);
 	return kasprintf(GFP_KERNEL, "%08x%08x%08x%08x%08x",
-			 readl((u32 *)uid+1),
+			 readl((u32 *)uid+0),
 			 readl((u32 *)uid+1), readl((u32 *)uid+2),
 			 readl((u32 *)uid+3), readl((u32 *)uid+4));
 }
diff --git a/arch/arm/mach-vexpress/v2m.c b/arch/arm/mach-vexpress/v2m.c
index b6083bb..8802030 100644
--- a/arch/arm/mach-vexpress/v2m.c
+++ b/arch/arm/mach-vexpress/v2m.c
@@ -450,7 +450,6 @@
 
 static const char * const v2m_dt_match[] __initconst = {
 	"arm,vexpress",
-	"xen,xenvm",
 	NULL,
 };
 
diff --git a/arch/arm/mach-virt/virt.c b/arch/arm/mach-virt/virt.c
index adc0945..061f283 100644
--- a/arch/arm/mach-virt/virt.c
+++ b/arch/arm/mach-virt/virt.c
@@ -32,6 +32,7 @@
 
 static const char *virt_dt_match[] = {
 	"linux,dummy-virt",
+	"xen,xenvm",
 	NULL
 };
 
diff --git a/arch/arm/plat-orion/Makefile b/arch/arm/plat-orion/Makefile
index 2eca54b..9433605 100644
--- a/arch/arm/plat-orion/Makefile
+++ b/arch/arm/plat-orion/Makefile
@@ -3,6 +3,6 @@
 #
 ccflags-$(CONFIG_ARCH_MULTIPLATFORM) := -I$(srctree)/$(src)/include
 
-orion-gpio-$(CONFIG_GENERIC_GPIO) += gpio.o
+orion-gpio-$(CONFIG_GPIOLIB)      += gpio.o
 obj-$(CONFIG_PLAT_ORION_LEGACY)   += irq.o pcie.o time.o common.o mpp.o
 obj-$(CONFIG_PLAT_ORION_LEGACY)   += $(orion-gpio-y)
diff --git a/arch/arm/plat-orion/gpio.c b/arch/arm/plat-orion/gpio.c
index e39c2ba..249fe63 100644
--- a/arch/arm/plat-orion/gpio.c
+++ b/arch/arm/plat-orion/gpio.c
@@ -150,7 +150,7 @@
 }
 
 /*
- * GENERIC_GPIO primitives.
+ * GPIO primitives.
  */
 static int orion_gpio_request(struct gpio_chip *chip, unsigned pin)
 {
diff --git a/arch/arm/xen/enlighten.c b/arch/arm/xen/enlighten.c
index 8dc0605..d30042e 100644
--- a/arch/arm/xen/enlighten.c
+++ b/arch/arm/xen/enlighten.c
@@ -2,6 +2,7 @@
 #include <xen/events.h>
 #include <xen/grant_table.h>
 #include <xen/hvm.h>
+#include <xen/interface/vcpu.h>
 #include <xen/interface/xen.h>
 #include <xen/interface/memory.h>
 #include <xen/interface/hvm/params.h>
@@ -9,9 +10,11 @@
 #include <xen/platform_pci.h>
 #include <xen/xenbus.h>
 #include <xen/page.h>
+#include <xen/interface/sched.h>
 #include <xen/xen-ops.h>
 #include <asm/xen/hypervisor.h>
 #include <asm/xen/hypercall.h>
+#include <asm/system_misc.h>
 #include <linux/interrupt.h>
 #include <linux/irqreturn.h>
 #include <linux/module.h>
@@ -32,6 +35,7 @@
 struct shared_info *HYPERVISOR_shared_info = (void *)&xen_dummy_shared_info;
 
 DEFINE_PER_CPU(struct vcpu_info *, xen_vcpu);
+static struct vcpu_info __percpu *xen_vcpu_info;
 
 /* These are unused until we support booting "pre-ballooned" */
 unsigned long xen_released_pages;
@@ -148,6 +152,47 @@
 }
 EXPORT_SYMBOL_GPL(xen_unmap_domain_mfn_range);
 
+static int __init xen_secondary_init(unsigned int cpu)
+{
+	struct vcpu_register_vcpu_info info;
+	struct vcpu_info *vcpup;
+	int err;
+
+	pr_info("Xen: initializing cpu%d\n", cpu);
+	vcpup = per_cpu_ptr(xen_vcpu_info, cpu);
+
+	info.mfn = __pa(vcpup) >> PAGE_SHIFT;
+	info.offset = offset_in_page(vcpup);
+
+	err = HYPERVISOR_vcpu_op(VCPUOP_register_vcpu_info, cpu, &info);
+	if (err) {
+		pr_debug("register_vcpu_info failed: err=%d\n", err);
+	} else {
+		/* This cpu is using the registered vcpu info, even if
+		   later ones fail to. */
+		per_cpu(xen_vcpu, cpu) = vcpup;
+	}
+	return 0;
+}
+
+static void xen_restart(char str, const char *cmd)
+{
+	struct sched_shutdown r = { .reason = SHUTDOWN_reboot };
+	int rc;
+	rc = HYPERVISOR_sched_op(SCHEDOP_shutdown, &r);
+	if (rc)
+		BUG();
+}
+
+static void xen_power_off(void)
+{
+	struct sched_shutdown r = { .reason = SHUTDOWN_poweroff };
+	int rc;
+	rc = HYPERVISOR_sched_op(SCHEDOP_shutdown, &r);
+	if (rc)
+		BUG();
+}
+
 /*
  * see Documentation/devicetree/bindings/arm/xen.txt for the
  * documentation of the Xen Device Tree format.
@@ -163,6 +208,7 @@
 	const char *version = NULL;
 	const char *xen_prefix = "xen,xen-";
 	struct resource res;
+	int i;
 
 	node = of_find_compatible_node(NULL, NULL, "xen,xen");
 	if (!node) {
@@ -209,18 +255,26 @@
 
 	/* xen_vcpu is a pointer to the vcpu_info struct in the shared_info
 	 * page, we use it in the event channel upcall and in some pvclock
-	 * related functions. We don't need the vcpu_info placement
-	 * optimizations because we don't use any pv_mmu or pv_irq op on
-	 * HVM.
+	 * related functions. 
 	 * The shared info contains exactly 1 CPU (the boot CPU). The guest
 	 * is required to use VCPUOP_register_vcpu_info to place vcpu info
-	 * for secondary CPUs as they are brought up. */
-	per_cpu(xen_vcpu, 0) = &HYPERVISOR_shared_info->vcpu_info[0];
+	 * for secondary CPUs as they are brought up.
+	 * For uniformity we use VCPUOP_register_vcpu_info even on cpu0.
+	 */
+	xen_vcpu_info = __alloc_percpu(sizeof(struct vcpu_info),
+			                       sizeof(struct vcpu_info));
+	if (xen_vcpu_info == NULL)
+		return -ENOMEM;
+	for_each_online_cpu(i)
+		xen_secondary_init(i);
 
 	gnttab_init();
 	if (!xen_initial_domain())
 		xenbus_probe(NULL);
 
+	pm_power_off = xen_power_off;
+	arm_pm_restart = xen_restart;
+
 	return 0;
 }
 core_initcall(xen_guest_init);
@@ -231,6 +285,11 @@
 	return IRQ_HANDLED;
 }
 
+static __init void xen_percpu_enable_events(void *unused)
+{
+	enable_percpu_irq(xen_events_irq, 0);
+}
+
 static int __init xen_init_events(void)
 {
 	if (!xen_domain() || xen_events_irq < 0)
@@ -239,12 +298,12 @@
 	xen_init_IRQ();
 
 	if (request_percpu_irq(xen_events_irq, xen_arm_callback,
-			"events", xen_vcpu)) {
+			"events", &xen_vcpu)) {
 		pr_err("Error requesting IRQ %d\n", xen_events_irq);
 		return -EINVAL;
 	}
 
-	enable_percpu_irq(xen_events_irq, 0);
+	on_each_cpu(xen_percpu_enable_events, NULL, 0);
 
 	return 0;
 }
@@ -259,4 +318,5 @@
 EXPORT_SYMBOL_GPL(HYPERVISOR_hvm_op);
 EXPORT_SYMBOL_GPL(HYPERVISOR_memory_op);
 EXPORT_SYMBOL_GPL(HYPERVISOR_physdev_op);
+EXPORT_SYMBOL_GPL(HYPERVISOR_vcpu_op);
 EXPORT_SYMBOL_GPL(privcmd_call);
diff --git a/arch/arm/xen/hypercall.S b/arch/arm/xen/hypercall.S
index 71f7239..199cb2d 100644
--- a/arch/arm/xen/hypercall.S
+++ b/arch/arm/xen/hypercall.S
@@ -87,6 +87,7 @@
 HYPERCALL2(hvm_op);
 HYPERCALL2(memory_op);
 HYPERCALL2(physdev_op);
+HYPERCALL3(vcpu_op);
 
 ENTRY(privcmd_call)
 	stmdb sp!, {r4}
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 73b6e76..48347dc 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -6,6 +6,7 @@
 	select ARCH_WANT_FRAME_POINTERS
 	select ARM_AMBA
 	select ARM_ARCH_TIMER
+	select ARM_GIC
 	select CLONE_BACKWARDS
 	select COMMON_CLK
 	select GENERIC_CLOCKEVENTS
@@ -31,6 +32,8 @@
 	select OF
 	select OF_EARLY_FLATTREE
 	select PERF_USE_VMALLOC
+	select POWER_RESET
+	select POWER_SUPPLY
 	select RTC_LIB
 	select SPARSE_IRQ
 	select SYSCTL_EXCEPTION_TRACE
@@ -92,9 +95,6 @@
 config IOMMU_HELPER
 	def_bool SWIOTLB
 
-config GENERIC_GPIO
-	bool
-
 source "init/Kconfig"
 
 source "kernel/Kconfig.freezer"
@@ -105,6 +105,7 @@
 	bool "ARMv8 software model (Versatile Express)"
 	select ARCH_REQUIRE_GPIOLIB
 	select COMMON_CLK_VERSATILE
+	select POWER_RESET_VEXPRESS
 	select VEXPRESS_CONFIG
 	help
 	  This enables support for the ARMv8 software model (Versatile
diff --git a/arch/arm64/boot/dts/foundation-v8.dts b/arch/arm64/boot/dts/foundation-v8.dts
index 198682b..84fcc50 100644
--- a/arch/arm64/boot/dts/foundation-v8.dts
+++ b/arch/arm64/boot/dts/foundation-v8.dts
@@ -23,7 +23,7 @@
 	};
 
 	cpus {
-		#address-cells = <1>;
+		#address-cells = <2>;
 		#size-cells = <0>;
 
 		cpu@0 {
diff --git a/arch/arm64/include/asm/system_misc.h b/arch/arm64/include/asm/system_misc.h
index 95e4072..a6e1750 100644
--- a/arch/arm64/include/asm/system_misc.h
+++ b/arch/arm64/include/asm/system_misc.h
@@ -41,7 +41,7 @@
 extern void __show_regs(struct pt_regs *);
 
 void soft_restart(unsigned long);
-extern void (*pm_restart)(const char *cmd);
+extern void (*arm_pm_restart)(char str, const char *cmd);
 
 #define UDBG_UNDEFINED	(1 << 0)
 #define UDBG_SYSCALL	(1 << 1)
diff --git a/arch/arm64/include/asm/unistd32.h b/arch/arm64/include/asm/unistd32.h
index 12f2249..58125bf 100644
--- a/arch/arm64/include/asm/unistd32.h
+++ b/arch/arm64/include/asm/unistd32.h
@@ -389,7 +389,7 @@
 __SYSCALL(365, compat_sys_recvmmsg)
 __SYSCALL(366, sys_accept4)
 __SYSCALL(367, sys_fanotify_init)
-__SYSCALL(368, compat_sys_fanotify_mark_wrapper)
+__SYSCALL(368, compat_sys_fanotify_mark)
 __SYSCALL(369, sys_prlimit64)
 __SYSCALL(370, sys_name_to_handle_at)
 __SYSCALL(371, compat_sys_open_by_handle_at)
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index f491972..46f02c3 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -81,8 +81,8 @@
 void (*pm_power_off)(void);
 EXPORT_SYMBOL_GPL(pm_power_off);
 
-void (*pm_restart)(const char *cmd);
-EXPORT_SYMBOL_GPL(pm_restart);
+void (*arm_pm_restart)(char str, const char *cmd);
+EXPORT_SYMBOL_GPL(arm_pm_restart);
 
 void arch_cpu_idle_prepare(void)
 {
@@ -131,8 +131,8 @@
 	local_fiq_disable();
 
 	/* Now call the architecture specific reboot code. */
-	if (pm_restart)
-		pm_restart(cmd);
+	if (arm_pm_restart)
+		arm_pm_restart('h', cmd);
 
 	/*
 	 * Whoops - the architecture was unable to reboot.
diff --git a/arch/arm64/kernel/sys32.S b/arch/arm64/kernel/sys32.S
index db01aa9..a1b19ed 100644
--- a/arch/arm64/kernel/sys32.S
+++ b/arch/arm64/kernel/sys32.S
@@ -104,13 +104,6 @@
 	b	sys_fallocate
 ENDPROC(compat_sys_fallocate_wrapper)
 
-compat_sys_fanotify_mark_wrapper:
-	orr	x2, x2, x3, lsl #32
-	mov	w3, w4
-	mov	w4, w5
-	b	sys_fanotify_mark
-ENDPROC(compat_sys_fanotify_mark_wrapper)
-
 #undef __SYSCALL
 #define __SYSCALL(x, y)		.quad	y	// x
 
diff --git a/arch/arm64/lib/bitops.S b/arch/arm64/lib/bitops.S
index 36216d3..e5db797 100644
--- a/arch/arm64/lib/bitops.S
+++ b/arch/arm64/lib/bitops.S
@@ -21,13 +21,13 @@
 
 /*
  * x0: bits 5:0  bit offset
- *     bits 63:6 word offset
+ *     bits 31:6 word offset
  * x1: address
  */
 	.macro	bitop, name, instr
 ENTRY(	\name	)
-	and	x3, x0, #63		// Get bit offset
-	eor	x0, x0, x3		// Clear low bits
+	and	w3, w0, #63		// Get bit offset
+	eor	w0, w0, w3		// Clear low bits
 	mov	x2, #1
 	add	x1, x1, x0, lsr #3	// Get word offset
 	lsl	x3, x2, x3		// Create mask
@@ -41,8 +41,8 @@
 
 	.macro	testop, name, instr
 ENTRY(	\name	)
-	and	x3, x0, #63		// Get bit offset
-	eor	x0, x0, x3		// Clear low bits
+	and	w3, w0, #63		// Get bit offset
+	eor	w0, w0, w3		// Clear low bits
 	mov	x2, #1
 	add	x1, x1, x0, lsr #3	// Get word offset
 	lsl	x4, x2, x3		// Create mask
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index 5263817..98af6e7 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -148,6 +148,7 @@
 #define VM_FAULT_BADACCESS	0x020000
 
 #define ESR_WRITE		(1 << 6)
+#define ESR_CM			(1 << 8)
 #define ESR_LNX_EXEC		(1 << 24)
 
 /*
@@ -206,7 +207,7 @@
 	struct task_struct *tsk;
 	struct mm_struct *mm;
 	int fault, sig, code;
-	int write = esr & ESR_WRITE;
+	bool write = (esr & ESR_WRITE) && !(esr & ESR_CM);
 	unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
 		(write ? FAULT_FLAG_WRITE : 0);
 
diff --git a/arch/avr32/Kconfig b/arch/avr32/Kconfig
index 22c4030..bdc3558 100644
--- a/arch/avr32/Kconfig
+++ b/arch/avr32/Kconfig
@@ -26,9 +26,6 @@
 	  There is an AVR32 Linux project with a web page at
 	  http://avr32linux.org/.
 
-config GENERIC_GPIO
-	def_bool y
-
 config STACKTRACE_SUPPORT
 	def_bool y
 
diff --git a/arch/blackfin/Kconfig b/arch/blackfin/Kconfig
index 453ebe4..a117652 100644
--- a/arch/blackfin/Kconfig
+++ b/arch/blackfin/Kconfig
@@ -27,7 +27,7 @@
 	select HAVE_OPROFILE
 	select HAVE_PERF_EVENTS
 	select ARCH_HAVE_CUSTOM_GPIO_H
-	select ARCH_WANT_OPTIONAL_GPIOLIB
+	select ARCH_REQUIRE_GPIOLIB
 	select HAVE_UID16
 	select HAVE_UNDERSCORE_SYMBOL_PREFIX
 	select VIRT_TO_BUS
@@ -52,9 +52,6 @@
 config ZONE_DMA
 	def_bool y
 
-config GENERIC_GPIO
-	def_bool y
-
 config FORCE_MAX_ZONEORDER
 	int
 	default "14"
diff --git a/arch/blackfin/Makefile b/arch/blackfin/Makefile
index 66cf000..1fce086 100644
--- a/arch/blackfin/Makefile
+++ b/arch/blackfin/Makefile
@@ -141,11 +141,11 @@
 
 INSTALL_PATH ?= /tftpboot
 boot := arch/$(ARCH)/boot
-BOOT_TARGETS = vmImage vmImage.bin vmImage.bz2 vmImage.gz vmImage.lzma vmImage.lzo vmImage.xip
+BOOT_TARGETS = uImage uImage.bin uImage.bz2 uImage.gz uImage.lzma uImage.lzo uImage.xip
 PHONY += $(BOOT_TARGETS) install
-KBUILD_IMAGE := $(boot)/vmImage
+KBUILD_IMAGE := $(boot)/uImage
 
-all: vmImage
+all: uImage
 
 $(BOOT_TARGETS): vmlinux
 	$(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
diff --git a/arch/blackfin/boot/Makefile b/arch/blackfin/boot/Makefile
index f7d27d5..3efaa09 100644
--- a/arch/blackfin/boot/Makefile
+++ b/arch/blackfin/boot/Makefile
@@ -6,7 +6,7 @@
 # for more details.
 #
 
-targets := vmImage vmImage.bin vmImage.bz2 vmImage.gz vmImage.lzma vmImage.lzo vmImage.xip
+targets := uImage uImage.bin uImage.bz2 uImage.gz uImage.lzma uImage.lzo uImage.xip
 extra-y += vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 vmlinux.bin.lzma vmlinux.bin.lzo vmlinux.bin.xip
 
 ifeq ($(CONFIG_RAMKERNEL),y)
@@ -39,22 +39,22 @@
 $(obj)/vmlinux.bin.xip: $(obj)/vmlinux.bin FORCE
 	$(call if_changed,mk_bin_xip)
 
-$(obj)/vmImage.bin: $(obj)/vmlinux.bin
+$(obj)/uImage.bin: $(obj)/vmlinux.bin
 	$(call if_changed,uimage,none)
 
-$(obj)/vmImage.bz2: $(obj)/vmlinux.bin.bz2
+$(obj)/uImage.bz2: $(obj)/vmlinux.bin.bz2
 	$(call if_changed,uimage,bzip2)
 
-$(obj)/vmImage.gz: $(obj)/vmlinux.bin.gz
+$(obj)/uImage.gz: $(obj)/vmlinux.bin.gz
 	$(call if_changed,uimage,gzip)
 
-$(obj)/vmImage.lzma: $(obj)/vmlinux.bin.lzma
+$(obj)/uImage.lzma: $(obj)/vmlinux.bin.lzma
 	$(call if_changed,uimage,lzma)
 
-$(obj)/vmImage.lzo: $(obj)/vmlinux.bin.lzo
+$(obj)/uImage.lzo: $(obj)/vmlinux.bin.lzo
 	$(call if_changed,uimage,lzo)
 
-$(obj)/vmImage.xip: $(obj)/vmlinux.bin.xip
+$(obj)/uImage.xip: $(obj)/vmlinux.bin.xip
 	$(call if_changed,uimage,none)
 
 suffix-y                      := bin
@@ -64,7 +64,7 @@
 suffix-$(CONFIG_KERNEL_LZO)   := lzo
 suffix-$(CONFIG_ROMKERNEL)    := xip
 
-$(obj)/vmImage: $(obj)/vmImage.$(suffix-y)
+$(obj)/uImage: $(obj)/uImage.$(suffix-y)
 	@ln -sf $(notdir $<) $@
 
 install:
diff --git a/arch/blackfin/include/asm/atomic.h b/arch/blackfin/include/asm/atomic.h
index c8db653..a107a98 100644
--- a/arch/blackfin/include/asm/atomic.h
+++ b/arch/blackfin/include/asm/atomic.h
@@ -11,7 +11,9 @@
 
 #ifdef CONFIG_SMP
 
+#include <asm/barrier.h>
 #include <linux/linkage.h>
+#include <linux/types.h>
 
 asmlinkage int __raw_uncached_fetch_asm(const volatile int *ptr);
 asmlinkage int __raw_atomic_update_asm(volatile int *ptr, int value);
diff --git a/arch/blackfin/include/asm/bfin_sdh.h b/arch/blackfin/include/asm/bfin_sdh.h
index 6a4cfe2..a99957e 100644
--- a/arch/blackfin/include/asm/bfin_sdh.h
+++ b/arch/blackfin/include/asm/bfin_sdh.h
@@ -24,18 +24,27 @@
 #define CMD_INT_E          (1 << 8)    /* Command Interrupt */
 #define CMD_PEND_E         (1 << 9)    /* Command Pending */
 #define CMD_E              (1 << 10)   /* Command Enable */
+#ifdef RSI_BLKSZ
+#define CMD_CRC_CHECK_D    (1 << 11)   /* CRC Check is disabled */
+#define CMD_DATA0_BUSY     (1 << 12)   /* Check for Busy State on the DATA0 pin */
+#endif
 
 /* SDH_PWR_CTL bitmasks */
+#ifndef RSI_BLKSZ
 #define PWR_ON             0x3         /* Power On */
 #define SD_CMD_OD          (1 << 6)    /* Open Drain Output */
 #define ROD_CTL            (1 << 7)    /* Rod Control */
+#endif
 
 /* SDH_CLK_CTL bitmasks */
 #define CLKDIV             0xff        /* MC_CLK Divisor */
 #define CLK_E              (1 << 8)    /* MC_CLK Bus Clock Enable */
 #define PWR_SV_E           (1 << 9)    /* Power Save Enable */
 #define CLKDIV_BYPASS      (1 << 10)   /* Bypass Divisor */
-#define WIDE_BUS           (1 << 11)   /* Wide Bus Mode Enable */
+#define BUS_MODE_MASK      0x1800      /* Bus Mode Mask */
+#define STD_BUS_1          0x000       /* Standard Bus 1 bit mode */
+#define WIDE_BUS_4         0x800       /* Wide Bus 4 bit mode */
+#define BYTE_BUS_8         0x1000      /* Byte Bus 8 bit mode */
 
 /* SDH_RESP_CMD bitmasks */
 #define RESP_CMD           0x3f        /* Response Command */
@@ -45,7 +54,13 @@
 #define DTX_DIR            (1 << 1)    /* Data Transfer Direction */
 #define DTX_MODE           (1 << 2)    /* Data Transfer Mode */
 #define DTX_DMA_E          (1 << 3)    /* Data Transfer DMA Enable */
+#ifndef RSI_BLKSZ
 #define DTX_BLK_LGTH       (0xf << 4)  /* Data Transfer Block Length */
+#else
+
+/* Bit masks for SDH_BLK_SIZE */
+#define DTX_BLK_LGTH       0x1fff      /* Data Transfer Block Length */
+#endif
 
 /* SDH_STATUS bitmasks */
 #define CMD_CRC_FAIL       (1 << 0)    /* CMD CRC Fail */
@@ -114,10 +129,14 @@
 /* SDH_E_STATUS bitmasks */
 #define SDIO_INT_DET       (1 << 1)    /* SDIO Int Detected */
 #define SD_CARD_DET        (1 << 4)    /* SD Card Detect */
+#define SD_CARD_BUSYMODE   (1 << 31)   /* Card is in Busy mode */
+#define SD_CARD_SLPMODE    (1 << 30)   /* Card in Sleep Mode */
+#define SD_CARD_READY      (1 << 17)   /* Card Ready */
 
 /* SDH_E_MASK bitmasks */
 #define SDIO_MSK           (1 << 1)    /* Mask SDIO Int Detected */
-#define SCD_MSK            (1 << 6)    /* Mask Card Detect */
+#define SCD_MSK            (1 << 4)    /* Mask Card Detect */
+#define CARD_READY_MSK     (1 << 16)   /* Mask Card Ready */
 
 /* SDH_CFG bitmasks */
 #define CLKS_EN            (1 << 0)    /* Clocks Enable */
@@ -126,7 +145,15 @@
 #define SD_RST             (1 << 4)    /* SDMMC Reset */
 #define PUP_SDDAT          (1 << 5)    /* Pull-up SD_DAT */
 #define PUP_SDDAT3         (1 << 6)    /* Pull-up SD_DAT3 */
+#ifndef RSI_BLKSZ
 #define PD_SDDAT3          (1 << 7)    /* Pull-down SD_DAT3 */
+#else
+#define PWR_ON             0x600       /* Power On */
+#define SD_CMD_OD          (1 << 11)   /* Open Drain Output */
+#define BOOT_EN            (1 << 12)   /* Boot Enable */
+#define BOOT_MODE          (1 << 13)   /* Alternate Boot Mode */
+#define BOOT_ACK_EN        (1 << 14)   /* Boot ACK is expected */
+#endif
 
 /* SDH_RD_WAIT_EN bitmasks */
 #define RWR                (1 << 0)    /* Read Wait Request */
diff --git a/arch/blackfin/include/asm/bitops.h b/arch/blackfin/include/asm/bitops.h
index 8a0fed1..0ca40dd 100644
--- a/arch/blackfin/include/asm/bitops.h
+++ b/arch/blackfin/include/asm/bitops.h
@@ -41,6 +41,7 @@
 #include <asm-generic/bitops/non-atomic.h>
 #else
 
+#include <asm/barrier.h>
 #include <asm/byteorder.h>	/* swab32 */
 #include <linux/linkage.h>
 
diff --git a/arch/blackfin/include/asm/def_LPBlackfin.h b/arch/blackfin/include/asm/def_LPBlackfin.h
index fe0ca03..ca67145 100644
--- a/arch/blackfin/include/asm/def_LPBlackfin.h
+++ b/arch/blackfin/include/asm/def_LPBlackfin.h
@@ -622,10 +622,12 @@
 #define PAGE_SIZE_4KB      0x00010000	/* 4 KB page size */
 #define PAGE_SIZE_1MB      0x00020000	/* 1 MB page size */
 #define PAGE_SIZE_4MB      0x00030000	/* 4 MB page size */
+#ifdef CONFIG_BF60x
 #define PAGE_SIZE_16KB     0x00040000	/* 16 KB page size */
 #define PAGE_SIZE_64KB     0x00050000	/* 64 KB page size */
 #define PAGE_SIZE_16MB     0x00060000	/* 16 MB page size */
 #define PAGE_SIZE_64MB     0x00070000	/* 64 MB page size */
+#endif
 #define CPLB_L1SRAM        0x00000020	/* 0=SRAM mapped in L1, 0=SRAM not
 					 * mapped to L1
 					 */
diff --git a/arch/blackfin/include/asm/mem_init.h b/arch/blackfin/include/asm/mem_init.h
index 9b33e72..c865b33 100644
--- a/arch/blackfin/include/asm/mem_init.h
+++ b/arch/blackfin/include/asm/mem_init.h
@@ -335,6 +335,7 @@
 struct ddr_config {
 	u32 ddr_clk;
 	u32 dmc_ddrctl;
+	u32 dmc_effctl;
 	u32 dmc_ddrcfg;
 	u32 dmc_ddrtr0;
 	u32 dmc_ddrtr1;
@@ -348,6 +349,7 @@
 	[0] = {
 		.ddr_clk    = 125,
 		.dmc_ddrctl = 0x00000904,
+		.dmc_effctl = 0x004400C0,
 		.dmc_ddrcfg = 0x00000422,
 		.dmc_ddrtr0 = 0x20705212,
 		.dmc_ddrtr1 = 0x201003CF,
@@ -358,6 +360,7 @@
 	[1] = {
 		.ddr_clk    = 133,
 		.dmc_ddrctl = 0x00000904,
+		.dmc_effctl = 0x004400C0,
 		.dmc_ddrcfg = 0x00000422,
 		.dmc_ddrtr0 = 0x20806313,
 		.dmc_ddrtr1 = 0x2013040D,
@@ -368,6 +371,7 @@
 	[2] = {
 		.ddr_clk    = 150,
 		.dmc_ddrctl = 0x00000904,
+		.dmc_effctl = 0x004400C0,
 		.dmc_ddrcfg = 0x00000422,
 		.dmc_ddrtr0 = 0x20A07323,
 		.dmc_ddrtr1 = 0x20160492,
@@ -378,6 +382,7 @@
 	[3] = {
 		.ddr_clk    = 166,
 		.dmc_ddrctl = 0x00000904,
+		.dmc_effctl = 0x004400C0,
 		.dmc_ddrcfg = 0x00000422,
 		.dmc_ddrtr0 = 0x20A07323,
 		.dmc_ddrtr1 = 0x2016050E,
@@ -388,6 +393,7 @@
 	[4] = {
 		.ddr_clk    = 200,
 		.dmc_ddrctl = 0x00000904,
+		.dmc_effctl = 0x004400C0,
 		.dmc_ddrcfg = 0x00000422,
 		.dmc_ddrtr0 = 0x20a07323,
 		.dmc_ddrtr1 = 0x2016050f,
@@ -398,6 +404,7 @@
 	[5] = {
 		.ddr_clk    = 225,
 		.dmc_ddrctl = 0x00000904,
+		.dmc_effctl = 0x004400C0,
 		.dmc_ddrcfg = 0x00000422,
 		.dmc_ddrtr0 = 0x20E0A424,
 		.dmc_ddrtr1 = 0x302006DB,
@@ -408,6 +415,7 @@
 	[6] = {
 		.ddr_clk    = 250,
 		.dmc_ddrctl = 0x00000904,
+		.dmc_effctl = 0x004400C0,
 		.dmc_ddrcfg = 0x00000422,
 		.dmc_ddrtr0 = 0x20E0A424,
 		.dmc_ddrtr1 = 0x3020079E,
@@ -469,6 +477,7 @@
 			bfin_write_DMC0_TR2(ddr_config_table[i].dmc_ddrtr2);
 			bfin_write_DMC0_MR(ddr_config_table[i].dmc_ddrmr);
 			bfin_write_DMC0_EMR1(ddr_config_table[i].dmc_ddrmr1);
+			bfin_write_DMC0_EFFCTL(ddr_config_table[i].dmc_effctl);
 			bfin_write_DMC0_CTL(ddr_config_table[i].dmc_ddrctl);
 			break;
 		}
diff --git a/arch/blackfin/kernel/cplb-nompu/cplbinit.c b/arch/blackfin/kernel/cplb-nompu/cplbinit.c
index 34e96ce..b49a53b 100644
--- a/arch/blackfin/kernel/cplb-nompu/cplbinit.c
+++ b/arch/blackfin/kernel/cplb-nompu/cplbinit.c
@@ -30,6 +30,7 @@
 {
 	int i_d, i_i;
 	unsigned long addr;
+	unsigned long cplb_pageflags, cplb_pagesize;
 
 	struct cplb_entry *d_tbl = dcplb_tbl[cpu];
 	struct cplb_entry *i_tbl = icplb_tbl[cpu];
@@ -49,11 +50,20 @@
 	/* Cover kernel memory with 4M pages.  */
 	addr = 0;
 
-	for (; addr < memory_start; addr += 4 * 1024 * 1024) {
+#ifdef PAGE_SIZE_16MB
+	cplb_pageflags = PAGE_SIZE_16MB;
+	cplb_pagesize = SIZE_16M;
+#else
+	cplb_pageflags = PAGE_SIZE_4MB;
+	cplb_pagesize = SIZE_4M;
+#endif
+
+
+	for (; addr < memory_start; addr += cplb_pagesize) {
 		d_tbl[i_d].addr = addr;
-		d_tbl[i_d++].data = SDRAM_DGENERIC | PAGE_SIZE_4MB;
+		d_tbl[i_d++].data = SDRAM_DGENERIC | cplb_pageflags;
 		i_tbl[i_i].addr = addr;
-		i_tbl[i_i++].data = SDRAM_IGENERIC | PAGE_SIZE_4MB;
+		i_tbl[i_i++].data = SDRAM_IGENERIC | cplb_pageflags;
 	}
 
 #ifdef CONFIG_ROMKERNEL
diff --git a/arch/blackfin/kernel/cplb-nompu/cplbmgr.c b/arch/blackfin/kernel/cplb-nompu/cplbmgr.c
index e854f90..79cc0f6 100644
--- a/arch/blackfin/kernel/cplb-nompu/cplbmgr.c
+++ b/arch/blackfin/kernel/cplb-nompu/cplbmgr.c
@@ -145,7 +145,7 @@
 	unsigned long addr = bfin_read_DCPLB_FAULT_ADDR();
 	int status = bfin_read_DCPLB_STATUS();
 	int idx;
-	unsigned long d_data, base, addr1, eaddr;
+	unsigned long d_data, base, addr1, eaddr, cplb_pagesize, cplb_pageflags;
 
 	nr_dcplb_miss[cpu]++;
 	if (unlikely(status & FAULT_USERSUPV))
@@ -167,18 +167,37 @@
 	if (unlikely(d_data == 0))
 		return CPLB_NO_ADDR_MATCH;
 
-	addr1 = addr & ~(SIZE_4M - 1);
 	addr &= ~(SIZE_1M - 1);
 	d_data |= PAGE_SIZE_1MB;
-	if (addr1 >= base && (addr1 + SIZE_4M) <= eaddr) {
+
+	/* BF60x support large than 4M CPLB page size */
+#ifdef PAGE_SIZE_16MB
+	cplb_pageflags = PAGE_SIZE_16MB;
+	cplb_pagesize = SIZE_16M;
+#else
+	cplb_pageflags = PAGE_SIZE_4MB;
+	cplb_pagesize = SIZE_4M;
+#endif
+
+find_pagesize:
+	addr1 = addr & ~(cplb_pagesize - 1);
+	if (addr1 >= base && (addr1 + cplb_pagesize) <= eaddr) {
 		/*
 		 * This works because
 		 * (PAGE_SIZE_4MB & PAGE_SIZE_1MB) == PAGE_SIZE_1MB.
 		 */
-		d_data |= PAGE_SIZE_4MB;
+		d_data |= cplb_pageflags;
 		addr = addr1;
+		goto found_pagesize;
+	} else {
+		if (cplb_pagesize > SIZE_4M) {
+			cplb_pageflags = PAGE_SIZE_4MB;
+			cplb_pagesize = SIZE_4M;
+			goto find_pagesize;
+		}
 	}
 
+found_pagesize:
 #ifdef CONFIG_BF60x
 	if ((addr >= ASYNC_BANK0_BASE)
 		&& (addr < ASYNC_BANK3_BASE + ASYNC_BANK3_SIZE))
diff --git a/arch/blackfin/kernel/cplbinfo.c b/arch/blackfin/kernel/cplbinfo.c
index 404045d..5b80d59 100644
--- a/arch/blackfin/kernel/cplbinfo.c
+++ b/arch/blackfin/kernel/cplbinfo.c
@@ -17,8 +17,13 @@
 #include <asm/cplbinit.h>
 #include <asm/blackfin.h>
 
-static char const page_strtbl[][3] = { "1K", "4K", "1M", "4M" };
-#define page(flags)    (((flags) & 0x30000) >> 16)
+static char const page_strtbl[][4] = {
+	"1K", "4K", "1M", "4M",
+#ifdef CONFIG_BF60x
+	"16K", "64K", "16M", "64M",
+#endif
+};
+#define page(flags)    (((flags) & 0x70000) >> 16)
 #define strpage(flags) page_strtbl[page(flags)]
 
 struct cplbinfo_data {
diff --git a/arch/blackfin/kernel/setup.c b/arch/blackfin/kernel/setup.c
index fb96e60..107b306 100644
--- a/arch/blackfin/kernel/setup.c
+++ b/arch/blackfin/kernel/setup.c
@@ -1314,7 +1314,7 @@
 			seq_printf(m, "(Compiled for Rev %d)", bfin_compiled_revid());
 	}
 
-	seq_printf(m, "\ncpu MHz\t\t: %lu.%03lu/%lu.%03lu\n",
+	seq_printf(m, "\ncpu MHz\t\t: %lu.%06lu/%lu.%06lu\n",
 		cclk/1000000, cclk%1000000,
 		sclk/1000000, sclk%1000000);
 	seq_printf(m, "bogomips\t: %lu.%02lu\n"
diff --git a/arch/blackfin/mach-bf537/boards/stamp.c b/arch/blackfin/mach-bf537/boards/stamp.c
index 95114ed..6a3a14b 100644
--- a/arch/blackfin/mach-bf537/boards/stamp.c
+++ b/arch/blackfin/mach-bf537/boards/stamp.c
@@ -455,6 +455,7 @@
 static void bfin_plat_nand_init(void)
 {
 	gpio_request(BFIN_NAND_PLAT_READY, "bfin_nand_plat");
+	gpio_direction_input(BFIN_NAND_PLAT_READY);
 }
 #else
 static void bfin_plat_nand_init(void) {}
diff --git a/arch/blackfin/mach-bf538/boards/ezkit.c b/arch/blackfin/mach-bf538/boards/ezkit.c
index a4fce03..755f0dc 100644
--- a/arch/blackfin/mach-bf538/boards/ezkit.c
+++ b/arch/blackfin/mach-bf538/boards/ezkit.c
@@ -764,7 +764,6 @@
 	.num_resources = ARRAY_SIZE(bfin_twi1_resource),
 	.resource = bfin_twi1_resource,
 };
-#endif	/* CONFIG_BF542 */
 #endif	/* CONFIG_I2C_BLACKFIN_TWI */
 
 #if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE)
diff --git a/arch/blackfin/mach-bf609/include/mach/cdefBF60x_base.h b/arch/blackfin/mach-bf609/include/mach/cdefBF60x_base.h
index 4954cf3..102ee40 100644
--- a/arch/blackfin/mach-bf609/include/mach/cdefBF60x_base.h
+++ b/arch/blackfin/mach-bf609/include/mach/cdefBF60x_base.h
@@ -312,6 +312,8 @@
 #define bfin_write_DMC0_EMR1(val) bfin_write32(DMC0_EMR1, val)
 #define bfin_read_DMC0_CTL() bfin_read32(DMC0_CTL)
 #define bfin_write_DMC0_CTL(val) bfin_write32(DMC0_CTL, val)
+#define bfin_read_DMC0_EFFCTL() bfin_read32(DMC0_EFFCTL)
+#define bfin_write_DMC0_EFFCTL(val) bfin_write32(DMC0_EFFCTL, val)
 #define bfin_read_DMC0_STAT() bfin_read32(DMC0_STAT)
 #define bfin_write_DMC0_STAT(val) bfin_write32(DMC0_STAT, val)
 #define bfin_read_DMC0_DLLCTL() bfin_read32(DMC0_DLLCTL)
diff --git a/arch/cris/Kconfig b/arch/cris/Kconfig
index 06dd026..8769a90 100644
--- a/arch/cris/Kconfig
+++ b/arch/cris/Kconfig
@@ -264,7 +264,6 @@
 	select MTD_CFI
 	select MTD_CFI_AMDSTD
 	select MTD_JEDECPROBE if ETRAX_ARCH_V32
-	select MTD_CHAR
 	select MTD_BLOCK
 	select MTD_COMPLEX_MAPPINGS
 	help
diff --git a/arch/cris/arch-v32/drivers/Kconfig b/arch/cris/arch-v32/drivers/Kconfig
index af4a486..c55971a 100644
--- a/arch/cris/arch-v32/drivers/Kconfig
+++ b/arch/cris/arch-v32/drivers/Kconfig
@@ -404,7 +404,6 @@
 	select MTD_CFI
 	select MTD_CFI_AMDSTD
 	select MTD_JEDECPROBE
-	select MTD_CHAR
 	select MTD_BLOCK
 	select MTD_COMPLEX_MAPPINGS
 	help
diff --git a/arch/cris/kernel/profile.c b/arch/cris/kernel/profile.c
index b82e086..cd9f15b 100644
--- a/arch/cris/kernel/profile.c
+++ b/arch/cris/kernel/profile.c
@@ -76,7 +76,7 @@
 	entry = proc_create("system_profile", S_IWUSR | S_IRUGO, NULL,
 			    &cris_proc_profile_operations);
 	if (entry) {
-		entry->size = SAMPLE_BUFFER_SIZE;
+		proc_set_size(entry, SAMPLE_BUFFER_SIZE);
 	}
 	prof_running = 1;
 
diff --git a/arch/hexagon/Kconfig b/arch/hexagon/Kconfig
index 04dff5b..33a9792 100644
--- a/arch/hexagon/Kconfig
+++ b/arch/hexagon/Kconfig
@@ -30,8 +30,6 @@
 	select GENERIC_CLOCKEVENTS_BROADCAST
 	select MODULES_USE_ELF_RELA
 	select GENERIC_CPU_DEVICES
-	select GENERIC_KERNEL_THREAD
-	select GENERIC_KERNEL_EXECVE
 	---help---
 	  Qualcomm Hexagon is a processor architecture designed for high
 	  performance and low power across a wide variety of applications.
@@ -157,9 +155,6 @@
 
 source "kernel/Kconfig.hz"
 
-config GENERIC_GPIO
-	def_bool n
-
 endmenu
 
 source "init/Kconfig"
diff --git a/arch/hexagon/kernel/vm_entry.S b/arch/hexagon/kernel/vm_entry.S
index e308618..67c6ccc 100644
--- a/arch/hexagon/kernel/vm_entry.S
+++ b/arch/hexagon/kernel/vm_entry.S
@@ -291,12 +291,12 @@
 	/*  "Nested control path" -- if the previous mode was kernel  */
 	{
 		R0 = memw(R29 + #_PT_ER_VMEST);
-		R16.L = #LO(do_work_pending);
+		R26.L = #LO(do_work_pending);
 	}
 	{
 		P0 = tstbit(R0, #HVM_VMEST_UM_SFT);
 		if (!P0.new) jump:nt restore_all;
-		R16.H = #HI(do_work_pending);
+		R26.H = #HI(do_work_pending);
 		R0 = #VM_INT_DISABLE;
 	}
 
@@ -304,7 +304,7 @@
 	 * Check also the return from fork/system call, normally coming back from
 	 * user mode
 	 *
-	 * R16 needs to have do_work_pending, and R0 should have VM_INT_DISABLE
+	 * R26 needs to have do_work_pending, and R0 should have VM_INT_DISABLE
 	 */
 
 check_work_pending:
@@ -313,7 +313,7 @@
 	{
 		R0 = R29;  /*  regs should still be at top of stack  */
 		R1 = memw(THREADINFO_REG + #_THREAD_INFO_FLAGS);
-		callr R16;
+		callr R26;
 	}
 
 	{
@@ -375,11 +375,11 @@
 ret_from_fork:
 	{
 		call schedule_tail
-		R16.H = #HI(do_work_pending);
+		R26.H = #HI(do_work_pending);
 	}
 	{
 		P0 = cmp.eq(R24, #0);
-		R16.L = #LO(do_work_pending);
+		R26.L = #LO(do_work_pending);
 		R0 = #VM_INT_DISABLE;
 	}
 	if P0 jump check_work_pending
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index d393f84..1a2b774 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -101,9 +101,6 @@
 config HAVE_SETUP_PER_CPU_AREA
 	def_bool y
 
-config GENERIC_GPIO
-	bool
-
 config DMI
 	bool
 	default y
diff --git a/arch/m68k/Kconfig b/arch/m68k/Kconfig
index 6de8133..821170e 100644
--- a/arch/m68k/Kconfig
+++ b/arch/m68k/Kconfig
@@ -35,9 +35,6 @@
 config ARCH_HAS_ILOG2_U64
 	bool
 
-config GENERIC_GPIO
-	bool
-
 config GENERIC_HWEIGHT
 	bool
 	default y
diff --git a/arch/m68k/Kconfig.cpu b/arch/m68k/Kconfig.cpu
index b1cfff8..33013df 100644
--- a/arch/m68k/Kconfig.cpu
+++ b/arch/m68k/Kconfig.cpu
@@ -22,8 +22,7 @@
 
 config COLDFIRE
 	bool "Coldfire CPU family support"
-	select GENERIC_GPIO
-	select ARCH_WANT_OPTIONAL_GPIOLIB
+	select ARCH_REQUIRE_GPIOLIB
 	select ARCH_HAVE_CUSTOM_GPIO_H
 	select CPU_HAS_NO_BITFIELDS
 	select CPU_HAS_NO_MULDIV64
@@ -224,13 +223,25 @@
 	help
 	  Motorola ColdFire 5307 processor support.
 
+config M53xx
+	bool
+
 config M532x
 	bool "MCF532x"
 	depends on !MMU
+	select M53xx
 	select HAVE_CACHE_CB
 	help
 	  Freescale (Motorola) ColdFire 532x processor support.
 
+config M537x
+	bool "MCF537x"
+	depends on !MMU
+	select M53xx
+	select HAVE_CACHE_CB
+	help
+	  Freescale ColdFire 537x processor support.
+
 config M5407
 	bool "MCF5407"
 	depends on !MMU
diff --git a/arch/m68k/Kconfig.machine b/arch/m68k/Kconfig.machine
index 7240584..b9ab0a6 100644
--- a/arch/m68k/Kconfig.machine
+++ b/arch/m68k/Kconfig.machine
@@ -358,6 +358,13 @@
 	help
 	  Support for the senTec COBRA5329 board.
 
+config M5373EVB
+	bool "Freescale M5373EVB board support"
+	depends on M537x
+	select FREESCALE
+	help
+	  Support for the Freescale M5373EVB board.
+
 config M5407C3
 	bool "Motorola M5407C3 board support"
 	depends on M5407
@@ -539,15 +546,6 @@
 	  68000 type variants the vectors are at the base of the boot device
 	  on system startup.
 
-config ROMVECSIZE
-	hex "Size of ROM vector region (in bytes)"
-	default "0x400"
-	depends on ROM
-	help
-	  Define the size of the vector region in ROM. For most 68000
-	  variants this would be 0x400 bytes in size. Set to 0 if you do
-	  not want a vector region at the start of the ROM.
-
 config ROMSTART
 	hex "Address of the base of system image in ROM"
 	default "0x400"
diff --git a/arch/m68k/Makefile b/arch/m68k/Makefile
index 2f02acf..7f7830f 100644
--- a/arch/m68k/Makefile
+++ b/arch/m68k/Makefile
@@ -45,6 +45,7 @@
 cpuflags-$(CONFIG_M54xx)	:= $(call cc-option,-mcpu=5475,-m5200)
 cpuflags-$(CONFIG_M5407)	:= $(call cc-option,-mcpu=5407,-m5200)
 cpuflags-$(CONFIG_M532x)	:= $(call cc-option,-mcpu=532x,-m5307)
+cpuflags-$(CONFIG_M537x)	:= $(call cc-option,-mcpu=537x,-m5307)
 cpuflags-$(CONFIG_M5307)	:= $(call cc-option,-mcpu=5307,-m5200)
 cpuflags-$(CONFIG_M528x)	:= $(call cc-option,-mcpu=528x,-m5307)
 cpuflags-$(CONFIG_M5275)	:= $(call cc-option,-mcpu=5275,-m5307)
diff --git a/arch/m68k/include/asm/commproc.h b/arch/m68k/include/asm/commproc.h
index a739985..66a36bd 100644
--- a/arch/m68k/include/asm/commproc.h
+++ b/arch/m68k/include/asm/commproc.h
@@ -480,23 +480,6 @@
 #define SICR_ENET_CLKRT	((uint)0x0000003d)
 #endif
 
-#ifdef CONFIG_RPXLITE
-/* This ENET stuff is for the MPC850 with ethernet on SCC2.  Some of
- * this may be unique to the RPX-Lite configuration.
- * Note TENA is on Port B.
- */
-#define PA_ENET_RXD	((ushort)0x0004)
-#define PA_ENET_TXD	((ushort)0x0008)
-#define PA_ENET_TCLK	((ushort)0x0200)
-#define PA_ENET_RCLK	((ushort)0x0800)
-#define PB_ENET_TENA	((uint)0x00002000)
-#define PC_ENET_CLSN	((ushort)0x0040)
-#define PC_ENET_RENA	((ushort)0x0080)
-
-#define SICR_ENET_MASK	((uint)0x0000ff00)
-#define SICR_ENET_CLKRT	((uint)0x00003d00)
-#endif
-
 #ifdef CONFIG_BSEIP
 /* This ENET stuff is for the MPC823 with ethernet on SCC2.
  * This is unique to the BSE ip-Engine board.
diff --git a/arch/m68k/include/asm/dbg.h b/arch/m68k/include/asm/dbg.h
deleted file mode 100644
index 27af327..0000000
--- a/arch/m68k/include/asm/dbg.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#define DEBUG 1
-#ifdef CONFIG_COLDFIRE
-#define	BREAK asm volatile ("halt")
-#else
-#define BREAK *(volatile unsigned char *)0xdeadbee0 = 0
-#endif
diff --git a/arch/m68k/include/asm/dma.h b/arch/m68k/include/asm/dma.h
index 0ff3fc6..429fe26 100644
--- a/arch/m68k/include/asm/dma.h
+++ b/arch/m68k/include/asm/dma.h
@@ -39,7 +39,7 @@
 #define MAX_M68K_DMA_CHANNELS 4
 #elif defined(CONFIG_M5272)
 #define MAX_M68K_DMA_CHANNELS 1
-#elif defined(CONFIG_M532x)
+#elif defined(CONFIG_M53xx)
 #define MAX_M68K_DMA_CHANNELS 0
 #else
 #define MAX_M68K_DMA_CHANNELS 2
diff --git a/arch/m68k/include/asm/m53xxacr.h b/arch/m68k/include/asm/m53xxacr.h
index cd952b0..3177ce8 100644
--- a/arch/m68k/include/asm/m53xxacr.h
+++ b/arch/m68k/include/asm/m53xxacr.h
@@ -55,8 +55,8 @@
 #define	CACHE_SIZE	0x2000		/* 8k of unified cache */
 #define	ICACHE_SIZE	CACHE_SIZE
 #define	DCACHE_SIZE	CACHE_SIZE
-#elif defined(CONFIG_M532x)
-#define	CACHE_SIZE	0x4000		/* 32k of unified cache */
+#elif defined(CONFIG_M53xx)
+#define	CACHE_SIZE	0x4000		/* 16k of unified cache */
 #define	ICACHE_SIZE	CACHE_SIZE
 #define	DCACHE_SIZE	CACHE_SIZE
 #endif
diff --git a/arch/m68k/include/asm/m532xsim.h b/arch/m68k/include/asm/m53xxsim.h
similarity index 99%
rename from arch/m68k/include/asm/m532xsim.h
rename to arch/m68k/include/asm/m53xxsim.h
index 8668e47..faa1a21 100644
--- a/arch/m68k/include/asm/m532xsim.h
+++ b/arch/m68k/include/asm/m53xxsim.h
@@ -1,15 +1,15 @@
 /****************************************************************************/
 
 /*
- *	m532xsim.h -- ColdFire 5329 registers
+ *	m53xxsim.h -- ColdFire 5329 registers
  */
 
 /****************************************************************************/
-#ifndef	m532xsim_h
-#define	m532xsim_h
+#ifndef	m53xxsim_h
+#define	m53xxsim_h
 /****************************************************************************/
 
-#define	CPU_NAME		"COLDFIRE(m532x)"
+#define	CPU_NAME		"COLDFIRE(m53xx)"
 #define	CPU_INSTR_PER_JIFFY	3
 #define	MCF_BUSCLK		(MCF_CLK / 3)
 
@@ -107,7 +107,7 @@
 /*
  *  QSPI module.
  */
-#define	MCFQSPI_BASE		0xFC058000	/* Base address of QSPI */
+#define	MCFQSPI_BASE		0xFC05C000	/* Base address of QSPI */
 #define	MCFQSPI_SIZE		0x40		/* Size of QSPI region */
 
 #define	MCFQSPI_CS0		84
@@ -1238,4 +1238,4 @@
 #define MCFEPORT_EPFR                 (0xFC094006)
 
 /********************************************************************/
-#endif	/* m532xsim_h */
+#endif	/* m53xxsim_h */
diff --git a/arch/m68k/include/asm/m54xxacr.h b/arch/m68k/include/asm/m54xxacr.h
index 192bbfe..6d13cae 100644
--- a/arch/m68k/include/asm/m54xxacr.h
+++ b/arch/m68k/include/asm/m54xxacr.h
@@ -96,8 +96,13 @@
  */
 #define ACR0_MODE	(ACR_BA(CONFIG_MBAR)+ACR_ADMSK(0x1000000)+ \
 			 ACR_ENABLE+ACR_SUPER+ACR_CM_OFF_PRE+ACR_SP)
+#if defined(CONFIG_CACHE_COPYBACK)
 #define ACR1_MODE	(ACR_BA(CONFIG_RAMBASE)+ACR_ADMSK(CONFIG_RAMSIZE)+ \
-			 ACR_ENABLE+ACR_SUPER+ACR_SP)
+			 ACR_ENABLE+ACR_SUPER+ACR_SP+ACR_CM_CP)
+#else
+#define ACR1_MODE	(ACR_BA(CONFIG_RAMBASE)+ACR_ADMSK(CONFIG_RAMSIZE)+ \
+			 ACR_ENABLE+ACR_SUPER+ACR_SP+ACR_CM_WT)
+#endif
 #define ACR2_MODE	0
 #define ACR3_MODE	(ACR_BA(CONFIG_RAMBASE)+ACR_ADMSK(CONFIG_RAMSIZE)+ \
 			 ACR_ENABLE+ACR_SUPER+ACR_SP)
diff --git a/arch/m68k/include/asm/mcfgpio.h b/arch/m68k/include/asm/mcfgpio.h
index fa1059f..c41ebf4 100644
--- a/arch/m68k/include/asm/mcfgpio.h
+++ b/arch/m68k/include/asm/mcfgpio.h
@@ -104,7 +104,7 @@
 #if defined(CONFIG_M5206) || defined(CONFIG_M5206e) || \
     defined(CONFIG_M520x) || defined(CONFIG_M523x) || \
     defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
-    defined(CONFIG_M532x) || defined(CONFIG_M54xx) || \
+    defined(CONFIG_M53xx) || defined(CONFIG_M54xx) || \
     defined(CONFIG_M5441x)
 
 /* These parts have GPIO organized by 8 bit ports */
@@ -139,7 +139,7 @@
 
 #if defined(CONFIG_M520x) || defined(CONFIG_M523x) || \
     defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
-    defined(CONFIG_M532x) || defined(CONFIG_M5441x)
+    defined(CONFIG_M53xx) || defined(CONFIG_M5441x)
 /*
  * These parts have an 'Edge' Port module (external interrupt/GPIO) which uses
  * read-modify-write to change an output and a GPIO module which has separate
@@ -195,7 +195,7 @@
 		return MCFSIM2_GPIO1READ;
 #elif defined(CONFIG_M520x) || defined(CONFIG_M523x) || \
       defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
-      defined(CONFIG_M532x) || defined(CONFIG_M5441x)
+      defined(CONFIG_M53xx) || defined(CONFIG_M5441x)
 #if !defined(CONFIG_M5441x)
 	if (gpio < 8)
 		return MCFEPORT_EPPDR;
@@ -237,7 +237,7 @@
 		return MCFSIM2_GPIO1WRITE;
 #elif defined(CONFIG_M520x) || defined(CONFIG_M523x) || \
       defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
-      defined(CONFIG_M532x) || defined(CONFIG_M5441x)
+      defined(CONFIG_M53xx) || defined(CONFIG_M5441x)
 #if !defined(CONFIG_M5441x)
 	if (gpio < 8)
 		return MCFEPORT_EPDR;
@@ -279,7 +279,7 @@
 		return MCFSIM2_GPIO1ENABLE;
 #elif defined(CONFIG_M520x) || defined(CONFIG_M523x) || \
       defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
-      defined(CONFIG_M532x) || defined(CONFIG_M5441x)
+      defined(CONFIG_M53xx) || defined(CONFIG_M5441x)
 #if !defined(CONFIG_M5441x)
 	if (gpio < 8)
 		return MCFEPORT_EPDDR;
diff --git a/arch/m68k/include/asm/mcfsim.h b/arch/m68k/include/asm/mcfsim.h
index a04fd9b..bc867de 100644
--- a/arch/m68k/include/asm/mcfsim.h
+++ b/arch/m68k/include/asm/mcfsim.h
@@ -36,8 +36,8 @@
 #elif defined(CONFIG_M5307)
 #include <asm/m5307sim.h>
 #include <asm/mcfintc.h>
-#elif defined(CONFIG_M532x)
-#include <asm/m532xsim.h>
+#elif defined(CONFIG_M53xx)
+#include <asm/m53xxsim.h>
 #elif defined(CONFIG_M5407)
 #include <asm/m5407sim.h>
 #include <asm/mcfintc.h>
diff --git a/arch/m68k/include/asm/mcftimer.h b/arch/m68k/include/asm/mcftimer.h
index da2fa43..089f0f1 100644
--- a/arch/m68k/include/asm/mcftimer.h
+++ b/arch/m68k/include/asm/mcftimer.h
@@ -19,7 +19,7 @@
 #define	MCFTIMER_TRR		0x04		/* Timer Reference (r/w) */
 #define	MCFTIMER_TCR		0x08		/* Timer Capture reg (r/w) */
 #define	MCFTIMER_TCN		0x0C		/* Timer Counter reg (r/w) */
-#if defined(CONFIG_M532x) || defined(CONFIG_M5441x)
+#if defined(CONFIG_M53xx) || defined(CONFIG_M5441x)
 #define	MCFTIMER_TER		0x03		/* Timer Event reg (r/w) */
 #else
 #define	MCFTIMER_TER		0x11		/* Timer Event reg (r/w) */
diff --git a/arch/m68k/platform/coldfire/Makefile b/arch/m68k/platform/coldfire/Makefile
index 02591a10..68f0fac 100644
--- a/arch/m68k/platform/coldfire/Makefile
+++ b/arch/m68k/platform/coldfire/Makefile
@@ -25,7 +25,7 @@
 obj-$(CONFIG_M5272)	+= m5272.o intc-5272.o timers.o
 obj-$(CONFIG_M528x)	+= m528x.o pit.o intc-2.o reset.o
 obj-$(CONFIG_M5307)	+= m5307.o timers.o intc.o reset.o
-obj-$(CONFIG_M532x)	+= m532x.o timers.o intc-simr.o reset.o
+obj-$(CONFIG_M53xx)	+= m53xx.o timers.o intc-simr.o reset.o
 obj-$(CONFIG_M5407)	+= m5407.o timers.o intc.o reset.o
 obj-$(CONFIG_M54xx)	+= m54xx.o sltimers.o intc-2.o
 obj-$(CONFIG_M5441x)	+= m5441x.o pit.o intc-simr.o reset.o
diff --git a/arch/m68k/platform/coldfire/m532x.c b/arch/m68k/platform/coldfire/m53xx.c
similarity index 97%
rename from arch/m68k/platform/coldfire/m532x.c
rename to arch/m68k/platform/coldfire/m53xx.c
index 7951d1d..5286f98 100644
--- a/arch/m68k/platform/coldfire/m532x.c
+++ b/arch/m68k/platform/coldfire/m53xx.c
@@ -1,7 +1,7 @@
 /***************************************************************************/
 
 /*
- *	linux/arch/m68knommu/platform/532x/config.c
+ *	m53xx.c -- platform support for ColdFire 53xx based boards
  *
  *	Copyright (C) 1999-2002, Greg Ungerer (gerg@snapgear.com)
  *	Copyright (C) 2000, Lineo (www.lineo.com)
@@ -118,7 +118,8 @@
 	&__clk_0_24,	/* mcfuart.0 */
 	&__clk_0_25,	/* mcfuart.1 */
 	&__clk_0_26,	/* mcfuart.2 */
-
+	&__clk_0_28,	/* mcftmr.0 */
+	&__clk_0_29,	/* mcftmr.1 */
 	&__clk_0_32,	/* mcfpit.0 */
 	&__clk_0_33,	/* mcfpit.1 */
 	&__clk_0_37,	/* mcfeport.0 */
@@ -134,8 +135,6 @@
 	&__clk_0_17,	/* edma */
 	&__clk_0_22,	/* mcfi2c.0 */
 	&__clk_0_23,	/* mcfqspi.0 */
-	&__clk_0_28,	/* mcftmr.0 */
-	&__clk_0_29,	/* mcftmr.1 */
 	&__clk_0_30,	/* mcftmr.2 */
 	&__clk_0_31,	/* mcftmr.3 */
 	&__clk_0_34,	/* mcfpit.2 */
@@ -153,7 +152,7 @@
 };
 
 
-static void __init m532x_clk_init(void)
+static void __init m53xx_clk_init(void)
 {
 	unsigned i;
 
@@ -169,7 +168,7 @@
 
 #if IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI)
 
-static void __init m532x_qspi_init(void)
+static void __init m53xx_qspi_init(void)
 {
 	/* setup QSPS pins for QSPI with gpio CS control */
 	writew(0x01f0, MCFGPIO_PAR_QSPI);
@@ -179,7 +178,7 @@
 
 /***************************************************************************/
 
-static void __init m532x_uarts_init(void)
+static void __init m53xx_uarts_init(void)
 {
 	/* UART GPIO initialization */
 	writew(readw(MCFGPIO_PAR_UART) | 0x0FFF, MCFGPIO_PAR_UART);
@@ -187,7 +186,7 @@
 
 /***************************************************************************/
 
-static void __init m532x_fec_init(void)
+static void __init m53xx_fec_init(void)
 {
 	u8 v;
 
@@ -217,11 +216,11 @@
 	}
 #endif
 	mach_sched_init = hw_timer_init;
-	m532x_clk_init();
-	m532x_uarts_init();
-	m532x_fec_init();
+	m53xx_clk_init();
+	m53xx_uarts_init();
+	m53xx_fec_init();
 #if IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI)
-	m532x_qspi_init();
+	m53xx_qspi_init();
 #endif
 
 #ifdef CONFIG_BDM_DISABLE
diff --git a/arch/m68k/platform/coldfire/timers.c b/arch/m68k/platform/coldfire/timers.c
index 51f6d2a..d06068e 100644
--- a/arch/m68k/platform/coldfire/timers.c
+++ b/arch/m68k/platform/coldfire/timers.c
@@ -36,7 +36,7 @@
  */
 void coldfire_profile_init(void);
 
-#if defined(CONFIG_M532x) || defined(CONFIG_M5441x)
+#if defined(CONFIG_M53xx) || defined(CONFIG_M5441x)
 #define	__raw_readtrr	__raw_readl
 #define	__raw_writetrr	__raw_writel
 #else
diff --git a/arch/metag/Kconfig b/arch/metag/Kconfig
index 6f16c14..dcd9440 100644
--- a/arch/metag/Kconfig
+++ b/arch/metag/Kconfig
@@ -52,9 +52,6 @@
 config GENERIC_CALIBRATE_DELAY
 	def_bool y
 
-config GENERIC_GPIO
-	def_bool n
-
 config NO_IOPORT
 	def_bool y
 
diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig
index 54237af..d22a4ec 100644
--- a/arch/microblaze/Kconfig
+++ b/arch/microblaze/Kconfig
@@ -54,9 +54,6 @@
 config GENERIC_CALIBRATE_DELAY
 	def_bool y
 
-config GENERIC_GPIO
-	bool
-
 config GENERIC_CSUM
 	def_bool y
 
diff --git a/arch/microblaze/configs/mmu_defconfig b/arch/microblaze/configs/mmu_defconfig
index d2b097a..3649a8b 100644
--- a/arch/microblaze/configs/mmu_defconfig
+++ b/arch/microblaze/configs/mmu_defconfig
@@ -17,7 +17,6 @@
 # CONFIG_BLK_DEV_BSG is not set
 CONFIG_PARTITION_ADVANCED=y
 # CONFIG_EFI_PARTITION is not set
-CONFIG_OPT_LIB_ASM=y
 CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR=1
 CONFIG_XILINX_MICROBLAZE0_USE_PCMP_INSTR=1
 CONFIG_XILINX_MICROBLAZE0_USE_BARREL=1
diff --git a/arch/microblaze/include/asm/pci.h b/arch/microblaze/include/asm/pci.h
index 41cc841..d52abb6 100644
--- a/arch/microblaze/include/asm/pci.h
+++ b/arch/microblaze/include/asm/pci.h
@@ -153,7 +153,5 @@
 static inline void __init xilinx_pci_init(void) { return; }
 #endif
 
-#include <asm-generic/pci-dma-compat.h>
-
 #endif	/* __KERNEL__ */
 #endif /* __ASM_MICROBLAZE_PCI_H */
diff --git a/arch/microblaze/include/asm/uaccess.h b/arch/microblaze/include/asm/uaccess.h
index a1ab5f0..efe59d8 100644
--- a/arch/microblaze/include/asm/uaccess.h
+++ b/arch/microblaze/include/asm/uaccess.h
@@ -90,17 +90,25 @@
 
 #else
 
-/*
- * Address is valid if:
- *  - "addr", "addr + size" and "size" are all below the limit
- */
-#define access_ok(type, addr, size) \
-	(get_fs().seg >= (((unsigned long)(addr)) | \
-		(size) | ((unsigned long)(addr) + (size))))
+static inline int access_ok(int type, const void __user *addr,
+							unsigned long size)
+{
+	if (!size)
+		goto ok;
 
-/* || printk("access_ok failed for %s at 0x%08lx (size %d), seg 0x%08x\n",
- type?"WRITE":"READ",addr,size,get_fs().seg)) */
-
+	if ((get_fs().seg < ((unsigned long)addr)) ||
+			(get_fs().seg < ((unsigned long)addr + size - 1))) {
+		pr_debug("ACCESS fail: %s at 0x%08x (size 0x%x), seg 0x%08x\n",
+			type ? "WRITE" : "READ ", (u32)addr, (u32)size,
+			(u32)get_fs().seg);
+		return 0;
+	}
+ok:
+	pr_debug("ACCESS OK: %s at 0x%08x (size 0x%x), seg 0x%08x\n",
+			type ? "WRITE" : "READ ", (u32)addr, (u32)size,
+			(u32)get_fs().seg);
+	return 1;
+}
 #endif
 
 #ifdef CONFIG_MMU
diff --git a/arch/microblaze/kernel/cpu/cpuinfo.c b/arch/microblaze/kernel/cpu/cpuinfo.c
index 0b2299b..410398f 100644
--- a/arch/microblaze/kernel/cpu/cpuinfo.c
+++ b/arch/microblaze/kernel/cpu/cpuinfo.c
@@ -37,6 +37,8 @@
 	{"8.20.a", 0x15},
 	{"8.20.b", 0x16},
 	{"8.30.a", 0x17},
+	{"8.40.a", 0x18},
+	{"8.40.b", 0x19},
 	{NULL, 0},
 };
 
@@ -57,6 +59,9 @@
 	{"virtex6", 0xe},
 	/* FIXME There is no key code defined for spartan2 */
 	{"spartan2", 0xf0},
+	{"kintex7", 0x10},
+	{"artix7", 0x11},
+	{"zynq7000", 0x12},
 	{NULL, 0},
 };
 
diff --git a/arch/microblaze/kernel/head.S b/arch/microblaze/kernel/head.S
index eef84de..fcc797f 100644
--- a/arch/microblaze/kernel/head.S
+++ b/arch/microblaze/kernel/head.S
@@ -112,16 +112,16 @@
  * copy command line directly to cmd_line placed in data section.
  */
 	beqid	r5, skip	/* Skip if NULL pointer */
-	or	r6, r0, r0		/* incremment */
+	or	r11, r0, r0		/* incremment */
 	ori	r4, r0, cmd_line	/* load address of command line */
 	tophys(r4,r4)			/* convert to phys address */
 	ori	r3, r0, COMMAND_LINE_SIZE - 1 /* number of loops */
 _copy_command_line:
 	/* r2=r5+r6 - r5 contain pointer to command line */
-	lbu		r2, r5, r6
+	lbu	r2, r5, r11
 	beqid	r2, skip		/* Skip if no data */
-	sb		r2, r4, r6		/* addr[r4+r6]= r2*/
-	addik	r6, r6, 1		/* increment counting */
+	sb	r2, r4, r11		/* addr[r4+r6]= r2 */
+	addik	r11, r11, 1		/* increment counting */
 	bgtid	r3, _copy_command_line	/* loop for all entries       */
 	addik	r3, r3, -1		/* decrement loop */
 	addik	r5, r4, 0		/* add new space for command line */
@@ -131,13 +131,13 @@
 
 #ifdef NOT_COMPILE
 /* save bram context */
-	or	r6, r0, r0				/* incremment */
+	or	r11, r0, r0				/* incremment */
 	ori	r4, r0, TOPHYS(_bram_load_start)	/* save bram context */
 	ori	r3, r0, (LMB_SIZE - 4)
 _copy_bram:
-	lw	r7, r0, r6		/* r7 = r0 + r6 */
-	sw	r7, r4, r6		/* addr[r4 + r6] = r7*/
-	addik	r6, r6, 4		/* increment counting */
+	lw	r7, r0, r11		/* r7 = r0 + r6 */
+	sw	r7, r4, r11		/* addr[r4 + r6] = r7 */
+	addik	r11, r11, 4		/* increment counting */
 	bgtid	r3, _copy_bram		/* loop for all entries */
 	addik	r3, r3, -4		/* descrement loop */
 #endif
@@ -303,8 +303,8 @@
 	 * the exception vectors, using a 4k real==virtual mapping.
 	 */
 	/* Use temporary TLB_ID for LMB - clear this temporary mapping later */
-	ori	r6, r0, MICROBLAZE_LMB_TLB_ID
-	mts     rtlbx,r6
+	ori	r11, r0, MICROBLAZE_LMB_TLB_ID
+	mts     rtlbx,r11
 
 	ori	r4,r0,(TLB_WR | TLB_EX)
 	ori	r3,r0,(TLB_VALID | TLB_PAGESZ(PAGESZ_4K))
diff --git a/arch/microblaze/kernel/intc.c b/arch/microblaze/kernel/intc.c
index 8778adf..d85fa3a 100644
--- a/arch/microblaze/kernel/intc.c
+++ b/arch/microblaze/kernel/intc.c
@@ -172,4 +172,6 @@
 	 * and commits this patch.  ~~gcl */
 	root_domain = irq_domain_add_linear(intc, nr_irq, &xintc_irq_domain_ops,
 							(void *)intr_mask);
+
+	irq_set_default_host(root_domain);
 }
diff --git a/arch/microblaze/kernel/process.c b/arch/microblaze/kernel/process.c
index a558938..7d1a9c8 100644
--- a/arch/microblaze/kernel/process.c
+++ b/arch/microblaze/kernel/process.c
@@ -160,3 +160,8 @@
 	return 0; /* MicroBlaze has no separate FPU registers */
 }
 #endif /* CONFIG_MMU */
+
+void arch_cpu_idle(void)
+{
+       local_irq_enable();
+}
diff --git a/arch/microblaze/mm/init.c b/arch/microblaze/mm/init.c
index 4ec137d..b38ae3a 100644
--- a/arch/microblaze/mm/init.c
+++ b/arch/microblaze/mm/init.c
@@ -404,10 +404,11 @@
 
 #if defined(CONFIG_BLK_DEV_INITRD)
 	/* Remove the init RAM disk from the available memory. */
-/*	if (initrd_start) {
-		mem_pieces_remove(&phys_avail, __pa(initrd_start),
-				  initrd_end - initrd_start, 1);
-	}*/
+	if (initrd_start) {
+		unsigned long size;
+		size = initrd_end - initrd_start;
+		memblock_reserve(virt_to_phys(initrd_start), size);
+	}
 #endif /* CONFIG_BLK_DEV_INITRD */
 
 	/* Initialize the MMU hardware */
diff --git a/arch/microblaze/pci/pci-common.c b/arch/microblaze/pci/pci-common.c
index 9ea521e..bdb8ea1 100644
--- a/arch/microblaze/pci/pci-common.c
+++ b/arch/microblaze/pci/pci-common.c
@@ -30,7 +30,6 @@
 #include <linux/of.h>
 #include <linux/of_address.h>
 #include <linux/of_pci.h>
-#include <linux/pci.h>
 #include <linux/export.h>
 
 #include <asm/processor.h>
diff --git a/arch/mips/Kbuild b/arch/mips/Kbuild
index 7dd65cf..d2cfe45 100644
--- a/arch/mips/Kbuild
+++ b/arch/mips/Kbuild
@@ -17,3 +17,7 @@
 obj-y += kernel/
 obj-y += mm/
 obj-y += math-emu/
+
+ifdef CONFIG_KVM
+obj-y += kvm/
+endif
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index e5f3794..7a58ab9 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -61,8 +61,7 @@
 	select SYS_HAS_CPU_MIPS32_R1
 	select SYS_SUPPORTS_32BIT_KERNEL
 	select SYS_SUPPORTS_APM_EMULATION
-	select GENERIC_GPIO
-	select ARCH_WANT_OPTIONAL_GPIOLIB
+	select ARCH_REQUIRE_GPIOLIB
 	select SYS_SUPPORTS_ZBOOT
 	select USB_ARCH_HAS_OHCI
 	select USB_ARCH_HAS_EHCI
@@ -225,7 +224,6 @@
 	select SYS_SUPPORTS_ZBOOT_UART16550
 	select DMA_NONCOHERENT
 	select IRQ_CPU
-	select GENERIC_GPIO
 	select ARCH_REQUIRE_GPIOLIB
 	select SYS_HAS_EARLY_PRINTK
 	select HAVE_PWM
@@ -306,7 +304,6 @@
 	select HW_HAS_PCI
 	select I8253
 	select I8259
-	select MIPS_BOARDS_GEN
 	select MIPS_BONITO64
 	select MIPS_CPU_SCACHE
 	select PCI_GT64XXX_PCI0
@@ -337,12 +334,12 @@
 	select BOOT_RAW
 	select CEVT_R4K
 	select CSRC_R4K
+	select CSRC_GIC
 	select CPU_MIPSR2_IRQ_VI
 	select CPU_MIPSR2_IRQ_EI
 	select DMA_NONCOHERENT
 	select IRQ_CPU
 	select IRQ_GIC
-	select MIPS_BOARDS_GEN
 	select MIPS_CPU_SCACHE
 	select MIPS_MSC
 	select SYS_HAS_CPU_MIPS32_R1
@@ -354,6 +351,7 @@
 	select SYS_SUPPORTS_BIG_ENDIAN
 	select SYS_SUPPORTS_LITTLE_ENDIAN
 	select SYS_SUPPORTS_SMARTMIPS
+	select SYS_SUPPORTS_MICROMIPS
 	select USB_ARCH_HAS_EHCI
 	select USB_EHCI_BIG_ENDIAN_DESC
 	select USB_EHCI_BIG_ENDIAN_MMIO
@@ -912,6 +910,9 @@
 config CEVT_R4K
 	bool
 
+config CEVT_GIC
+	bool
+
 config CEVT_SB1250
 	bool
 
@@ -937,7 +938,6 @@
 	bool
 
 config GPIO_TXX9
-	select GENERIC_GPIO
 	select ARCH_REQUIRE_GPIOLIB
 	bool
 
@@ -985,9 +985,6 @@
 config MIPS_NILE4
 	bool
 
-config MIPS_DISABLE_OBSOLETE_IDE
-	bool
-
 config SYNC_R4K
 	bool
 
@@ -1009,9 +1006,6 @@
 config ISA_DMA_API
 	bool
 
-config GENERIC_GPIO
-	bool
-
 config HOLES_IN_ZONE
 	bool
 
@@ -1081,9 +1075,6 @@
 config IRQ_GIC
 	bool
 
-config MIPS_BOARDS_GEN
-	bool
-
 config PCI_GT64XXX_PCI0
 	bool
 
@@ -1112,7 +1103,6 @@
 	select SYS_SUPPORTS_32BIT_KERNEL
 	select SYS_SUPPORTS_LITTLE_ENDIAN
 	select SYS_SUPPORTS_BIG_ENDIAN
-	select GENERIC_GPIO
 	select CPU_MIPSR2_IRQ_VI
 
 config SOC_PNX8335
@@ -1154,7 +1144,7 @@
 
 config MIPS_L1_CACHE_SHIFT
 	int
-	default "4" if MACH_DECSTATION || MIKROTIK_RB532 || PMC_MSP4200_EVAL
+	default "4" if MACH_DECSTATION || MIKROTIK_RB532 || PMC_MSP4200_EVAL || SOC_RT288X
 	default "6" if MIPS_CPU_SCACHE
 	default "7" if SGI_IP22 || SGI_IP27 || SGI_IP28 || SNI_RM || CPU_CAVIUM_OCTEON
 	default "5"
@@ -1203,7 +1193,6 @@
 	bool "Loongson 2F"
 	depends on SYS_HAS_CPU_LOONGSON2F
 	select CPU_LOONGSON2
-	select GENERIC_GPIO
 	select ARCH_REQUIRE_GPIOLIB
 	help
 	  The Loongson 2F processor implements the MIPS III instruction set
@@ -1244,6 +1233,7 @@
 	select CPU_HAS_PREFETCH
 	select CPU_SUPPORTS_32BIT_KERNEL
 	select CPU_SUPPORTS_HIGHMEM
+	select HAVE_KVM
 	help
 	  Choose this option to build a kernel for release 2 or later of the
 	  MIPS32 architecture.  Most modern embedded systems with a 32-bit
@@ -1744,6 +1734,20 @@
 
 endchoice
 
+config KVM_GUEST
+	bool "KVM Guest Kernel"
+	help
+	  Select this option if building a guest kernel for KVM (Trap & Emulate) mode
+
+config KVM_HOST_FREQ
+	int "KVM Host Processor Frequency (MHz)"
+	depends on KVM_GUEST
+	default 500
+	help
+	  Select this option if building a guest kernel for KVM to skip
+	  RTC emulation when determining guest CPU Frequency.  Instead, the guest
+	  processor frequency is automatically derived from the host frequency.
+
 choice
 	prompt "Kernel page size"
 	default PAGE_SIZE_4KB
@@ -1819,6 +1823,15 @@
 	  The page size is not necessarily 4KB.  Keep this in mind
 	  when choosing a value for this option.
 
+config CEVT_GIC
+	bool "Use GIC global counter for clock events"
+	depends on IRQ_GIC && !(MIPS_SEAD3 || MIPS_MT_SMTC)
+	help
+	  Use the GIC global counter for the clock events. The R4K clock
+	  event driver is always present, so if the platform ends up not
+	  detecting a GIC, it will fall back to the R4K timer for the
+	  generation of clock events.
+
 config BOARD_SCACHE
 	bool
 
@@ -2024,6 +2037,7 @@
 	depends on CPU_SB1 && CPU_SB1_PASS_2
 	default y
 
+
 config 64BIT_PHYS_ADDR
 	bool
 
@@ -2042,6 +2056,13 @@
 	  you don't know you probably don't have SmartMIPS and should say N
 	  here.
 
+config CPU_MICROMIPS
+	depends on SYS_SUPPORTS_MICROMIPS
+	bool "Build kernel using microMIPS ISA"
+	help
+	  When this option is enabled the kernel will be built using the
+	  microMIPS ISA
+
 config CPU_HAS_WB
 	bool
 
@@ -2104,6 +2125,9 @@
 config SYS_SUPPORTS_SMARTMIPS
 	bool
 
+config SYS_SUPPORTS_MICROMIPS
+	bool
+
 config ARCH_FLATMEM_ENABLE
 	def_bool y
 	depends on !NUMA && !CPU_LOONGSON2
@@ -2564,3 +2588,5 @@
 source "crypto/Kconfig"
 
 source "lib/Kconfig"
+
+source "arch/mips/kvm/Kconfig"
diff --git a/arch/mips/Makefile b/arch/mips/Makefile
index 6f7978f..dd58a04 100644
--- a/arch/mips/Makefile
+++ b/arch/mips/Makefile
@@ -114,6 +114,7 @@
 cflags-$(CONFIG_CPU_LITTLE_ENDIAN)	+= $(shell $(CC) -dumpmachine |grep -q 'mips.*el-.*' || echo -EL $(undef-all) $(predef-le))
 
 cflags-$(CONFIG_CPU_HAS_SMARTMIPS)	+= $(call cc-option,-msmartmips)
+cflags-$(CONFIG_CPU_MICROMIPS) += $(call cc-option,-mmicromips -mno-jals)
 
 cflags-$(CONFIG_SB1XXX_CORELIS)	+= $(call cc-option,-mno-sched-prolog) \
 				   -fno-omit-frame-pointer
diff --git a/arch/mips/alchemy/Kconfig b/arch/mips/alchemy/Kconfig
index c8862bd..7032ac7 100644
--- a/arch/mips/alchemy/Kconfig
+++ b/arch/mips/alchemy/Kconfig
@@ -31,7 +31,6 @@
 	select ALCHEMY_GPIOINT_AU1000
 	select DMA_NONCOHERENT
 	select HW_HAS_PCI
-	select MIPS_DISABLE_OBSOLETE_IDE
 	select SYS_SUPPORTS_BIG_ENDIAN
 	select SYS_SUPPORTS_LITTLE_ENDIAN
 	select SYS_HAS_EARLY_PRINTK
@@ -41,7 +40,6 @@
 	select ARCH_REQUIRE_GPIOLIB
 	select HW_HAS_PCI
 	select DMA_COHERENT
-	select MIPS_DISABLE_OBSOLETE_IDE
 	select SYS_SUPPORTS_LITTLE_ENDIAN
 	select SYS_HAS_EARLY_PRINTK
 
@@ -57,7 +55,6 @@
 	select ALCHEMY_GPIOINT_AU1000
 	select HW_HAS_PCI
 	select DMA_NONCOHERENT
-	select MIPS_DISABLE_OBSOLETE_IDE
 	select SYS_SUPPORTS_LITTLE_ENDIAN
 	select SYS_HAS_EARLY_PRINTK
 
diff --git a/arch/mips/alchemy/Platform b/arch/mips/alchemy/Platform
index fa1bdd1..b3afcdd 100644
--- a/arch/mips/alchemy/Platform
+++ b/arch/mips/alchemy/Platform
@@ -5,32 +5,14 @@
 
 
 #
-# AMD Alchemy Pb1100 eval board
-#
-platform-$(CONFIG_MIPS_PB1100)	+= alchemy/devboards/
-load-$(CONFIG_MIPS_PB1100)	+= 0xffffffff80100000
-
-#
-# AMD Alchemy Pb1500 eval board
-#
-platform-$(CONFIG_MIPS_PB1500)	+= alchemy/devboards/
-load-$(CONFIG_MIPS_PB1500)	+= 0xffffffff80100000
-
-#
-# AMD Alchemy Pb1550 eval board
-#
-platform-$(CONFIG_MIPS_PB1550)	+= alchemy/devboards/
-load-$(CONFIG_MIPS_PB1550)	+= 0xffffffff80100000
-
-#
-# AMD Alchemy Db1000/Db1500/Db1100 eval boards
+# AMD Alchemy Db1000/Db1500/Pb1500/Db1100/Pb1100 eval boards
 #
 platform-$(CONFIG_MIPS_DB1000)	+= alchemy/devboards/
 cflags-$(CONFIG_MIPS_DB1000)	+= -I$(srctree)/arch/mips/include/asm/mach-db1x00
 load-$(CONFIG_MIPS_DB1000)	+= 0xffffffff80100000
 
 #
-# AMD Alchemy Db1200/Pb1200/Db1550/Db1300 eval boards
+# AMD Alchemy Db1200/Pb1200/Db1550/Pb1550/Db1300 eval boards
 #
 platform-$(CONFIG_MIPS_DB1235)	+= alchemy/devboards/
 cflags-$(CONFIG_MIPS_DB1235)	+= -I$(srctree)/arch/mips/include/asm/mach-db1x00
diff --git a/arch/mips/ar7/memory.c b/arch/mips/ar7/memory.c
index 28abfee..92dfa48 100644
--- a/arch/mips/ar7/memory.c
+++ b/arch/mips/ar7/memory.c
@@ -30,7 +30,6 @@
 #include <asm/sections.h>
 
 #include <asm/mach-ar7/ar7.h>
-#include <asm/mips-boards/prom.h>
 
 static int __init memsize(void)
 {
diff --git a/arch/mips/ath79/setup.c b/arch/mips/ath79/setup.c
index d5b3c90..a0233a2 100644
--- a/arch/mips/ath79/setup.c
+++ b/arch/mips/ath79/setup.c
@@ -51,20 +51,6 @@
 		cpu_wait();
 }
 
-static void __init ath79_detect_mem_size(void)
-{
-	unsigned long size;
-
-	for (size = ATH79_MEM_SIZE_MIN; size < ATH79_MEM_SIZE_MAX;
-	     size <<= 1) {
-		if (!memcmp(ath79_detect_mem_size,
-			    ath79_detect_mem_size + size, 1024))
-			break;
-	}
-
-	add_memory_region(0, size, BOOT_MEM_RAM);
-}
-
 static void __init ath79_detect_sys_type(void)
 {
 	char *chip = "????";
@@ -212,7 +198,7 @@
 					 AR71XX_DDR_CTRL_SIZE);
 
 	ath79_detect_sys_type();
-	ath79_detect_mem_size();
+	detect_memory_region(0, ATH79_MEM_SIZE_MIN, ATH79_MEM_SIZE_MAX);
 	ath79_clocks_init();
 
 	_machine_restart = ath79_restart;
diff --git a/arch/mips/bcm63xx/Kconfig b/arch/mips/bcm63xx/Kconfig
index d03e879..5639662 100644
--- a/arch/mips/bcm63xx/Kconfig
+++ b/arch/mips/bcm63xx/Kconfig
@@ -25,6 +25,10 @@
 	bool "support 6358 CPU"
 	select HW_HAS_PCI
 
+config BCM63XX_CPU_6362
+	bool "support 6362 CPU"
+	select HW_HAS_PCI
+
 config BCM63XX_CPU_6368
 	bool "support 6368 CPU"
 	select HW_HAS_PCI
diff --git a/arch/mips/bcm63xx/boards/board_bcm963xx.c b/arch/mips/bcm63xx/boards/board_bcm963xx.c
index 9aa7d44..a9505c4 100644
--- a/arch/mips/bcm63xx/boards/board_bcm963xx.c
+++ b/arch/mips/bcm63xx/boards/board_bcm963xx.c
@@ -726,11 +726,11 @@
 	u32 val;
 
 	/* read base address of boot chip select (0)
-	 * 6328 does not have MPI but boots from a fixed address
+	 * 6328/6362 do not have MPI but boot from a fixed address
 	 */
-	if (BCMCPU_IS_6328())
+	if (BCMCPU_IS_6328() || BCMCPU_IS_6362()) {
 		val = 0x18000000;
-	else {
+	} else {
 		val = bcm_mpi_readl(MPI_CSBASE_REG(0));
 		val &= MPI_CSBASE_BASE_MASK;
 	}
diff --git a/arch/mips/bcm63xx/clk.c b/arch/mips/bcm63xx/clk.c
index b9e948d..c726a97 100644
--- a/arch/mips/bcm63xx/clk.c
+++ b/arch/mips/bcm63xx/clk.c
@@ -15,7 +15,13 @@
 #include <bcm63xx_io.h>
 #include <bcm63xx_regs.h>
 #include <bcm63xx_reset.h>
-#include <bcm63xx_clk.h>
+
+struct clk {
+	void		(*set)(struct clk *, int);
+	unsigned int	rate;
+	unsigned int	usage;
+	int		id;
+};
 
 static DEFINE_MUTEX(clocks_mutex);
 
@@ -119,11 +125,18 @@
  */
 static void enetsw_set(struct clk *clk, int enable)
 {
-	if (!BCMCPU_IS_6368())
+	if (BCMCPU_IS_6328())
+		bcm_hwclock_set(CKCTL_6328_ROBOSW_EN, enable);
+	else if (BCMCPU_IS_6362())
+		bcm_hwclock_set(CKCTL_6362_ROBOSW_EN, enable);
+	else if (BCMCPU_IS_6368())
+		bcm_hwclock_set(CKCTL_6368_ROBOSW_EN |
+				CKCTL_6368_SWPKT_USB_EN |
+				CKCTL_6368_SWPKT_SAR_EN,
+				enable);
+	else
 		return;
-	bcm_hwclock_set(CKCTL_6368_ROBOSW_EN |
-			CKCTL_6368_SWPKT_USB_EN |
-			CKCTL_6368_SWPKT_SAR_EN, enable);
+
 	if (enable) {
 		/* reset switch core afer clock change */
 		bcm63xx_core_set_reset(BCM63XX_RESET_ENETSW, 1);
@@ -160,6 +173,8 @@
 		bcm_hwclock_set(CKCTL_6328_USBH_EN, enable);
 	else if (BCMCPU_IS_6348())
 		bcm_hwclock_set(CKCTL_6348_USBH_EN, enable);
+	else if (BCMCPU_IS_6362())
+		bcm_hwclock_set(CKCTL_6362_USBH_EN, enable);
 	else if (BCMCPU_IS_6368())
 		bcm_hwclock_set(CKCTL_6368_USBH_EN, enable);
 }
@@ -175,6 +190,8 @@
 {
 	if (BCMCPU_IS_6328())
 		bcm_hwclock_set(CKCTL_6328_USBD_EN, enable);
+	else if (BCMCPU_IS_6362())
+		bcm_hwclock_set(CKCTL_6362_USBD_EN, enable);
 	else if (BCMCPU_IS_6368())
 		bcm_hwclock_set(CKCTL_6368_USBD_EN, enable);
 }
@@ -196,6 +213,8 @@
 		mask = CKCTL_6348_SPI_EN;
 	else if (BCMCPU_IS_6358())
 		mask = CKCTL_6358_SPI_EN;
+	else if (BCMCPU_IS_6362())
+		mask = CKCTL_6362_SPI_EN;
 	else
 		/* BCMCPU_IS_6368 */
 		mask = CKCTL_6368_SPI_EN;
@@ -236,7 +255,10 @@
  */
 static void ipsec_set(struct clk *clk, int enable)
 {
-	bcm_hwclock_set(CKCTL_6368_IPSEC_EN, enable);
+	if (BCMCPU_IS_6362())
+		bcm_hwclock_set(CKCTL_6362_IPSEC_EN, enable);
+	else if (BCMCPU_IS_6368())
+		bcm_hwclock_set(CKCTL_6368_IPSEC_EN, enable);
 }
 
 static struct clk clk_ipsec = {
@@ -249,7 +271,10 @@
 
 static void pcie_set(struct clk *clk, int enable)
 {
-	bcm_hwclock_set(CKCTL_6328_PCIE_EN, enable);
+	if (BCMCPU_IS_6328())
+		bcm_hwclock_set(CKCTL_6328_PCIE_EN, enable);
+	else if (BCMCPU_IS_6362())
+		bcm_hwclock_set(CKCTL_6362_PCIE_EN, enable);
 }
 
 static struct clk clk_pcie = {
@@ -315,9 +340,9 @@
 		return &clk_periph;
 	if (BCMCPU_IS_6358() && !strcmp(id, "pcm"))
 		return &clk_pcm;
-	if (BCMCPU_IS_6368() && !strcmp(id, "ipsec"))
+	if ((BCMCPU_IS_6362() || BCMCPU_IS_6368()) && !strcmp(id, "ipsec"))
 		return &clk_ipsec;
-	if (BCMCPU_IS_6328() && !strcmp(id, "pcie"))
+	if ((BCMCPU_IS_6328() || BCMCPU_IS_6362()) && !strcmp(id, "pcie"))
 		return &clk_pcie;
 	return ERR_PTR(-ENOENT);
 }
diff --git a/arch/mips/bcm63xx/cpu.c b/arch/mips/bcm63xx/cpu.c
index a7afb28..79fe32d 100644
--- a/arch/mips/bcm63xx/cpu.c
+++ b/arch/mips/bcm63xx/cpu.c
@@ -25,7 +25,7 @@
 EXPORT_SYMBOL(bcm63xx_irqs);
 
 static u16 bcm63xx_cpu_id;
-static u16 bcm63xx_cpu_rev;
+static u8 bcm63xx_cpu_rev;
 static unsigned int bcm63xx_cpu_freq;
 static unsigned int bcm63xx_memory_size;
 
@@ -71,6 +71,15 @@
 
 };
 
+static const unsigned long bcm6362_regs_base[] = {
+	__GEN_CPU_REGS_TABLE(6362)
+};
+
+static const int bcm6362_irqs[] = {
+	__GEN_CPU_IRQ_TABLE(6362)
+
+};
+
 static const unsigned long bcm6368_regs_base[] = {
 	__GEN_CPU_REGS_TABLE(6368)
 };
@@ -87,7 +96,7 @@
 
 EXPORT_SYMBOL(__bcm63xx_get_cpu_id);
 
-u16 bcm63xx_get_cpu_rev(void)
+u8 bcm63xx_get_cpu_rev(void)
 {
 	return bcm63xx_cpu_rev;
 }
@@ -169,6 +178,42 @@
 		return (16 * 1000000 * n1 * n2) / m1;
 	}
 
+	case BCM6362_CPU_ID:
+	{
+		unsigned int tmp, mips_pll_fcvo;
+
+		tmp = bcm_misc_readl(MISC_STRAPBUS_6362_REG);
+		mips_pll_fcvo = (tmp & STRAPBUS_6362_FCVO_MASK)
+				>> STRAPBUS_6362_FCVO_SHIFT;
+		switch (mips_pll_fcvo) {
+		case 0x03:
+		case 0x0b:
+		case 0x13:
+		case 0x1b:
+			return 240000000;
+		case 0x04:
+		case 0x0c:
+		case 0x14:
+		case 0x1c:
+			return 160000000;
+		case 0x05:
+		case 0x0e:
+		case 0x16:
+		case 0x1e:
+		case 0x1f:
+			return 400000000;
+		case 0x06:
+			return 440000000;
+		case 0x07:
+		case 0x17:
+			return 384000000;
+		case 0x15:
+		case 0x1d:
+			return 200000000;
+		default:
+			return 320000000;
+		}
+	}
 	case BCM6368_CPU_ID:
 	{
 		unsigned int tmp, p1, p2, ndiv, m1;
@@ -205,7 +250,7 @@
 	unsigned int cols = 0, rows = 0, is_32bits = 0, banks = 0;
 	u32 val;
 
-	if (BCMCPU_IS_6328())
+	if (BCMCPU_IS_6328() || BCMCPU_IS_6362())
 		return bcm_ddr_readl(DDR_CSEND_REG) << 24;
 
 	if (BCMCPU_IS_6345()) {
@@ -240,53 +285,27 @@
 
 void __init bcm63xx_cpu_init(void)
 {
-	unsigned int tmp, expected_cpu_id;
+	unsigned int tmp;
 	struct cpuinfo_mips *c = &current_cpu_data;
 	unsigned int cpu = smp_processor_id();
+	u32 chipid_reg;
 
 	/* soc registers location depends on cpu type */
-	expected_cpu_id = 0;
+	chipid_reg = 0;
 
 	switch (c->cputype) {
 	case CPU_BMIPS3300:
-		if ((read_c0_prid() & 0xff00) == PRID_IMP_BMIPS3300_ALT) {
-			expected_cpu_id = BCM6348_CPU_ID;
-			bcm63xx_regs_base = bcm6348_regs_base;
-			bcm63xx_irqs = bcm6348_irqs;
-		} else {
+		if ((read_c0_prid() & 0xff00) != PRID_IMP_BMIPS3300_ALT)
 			__cpu_name[cpu] = "Broadcom BCM6338";
-			expected_cpu_id = BCM6338_CPU_ID;
-			bcm63xx_regs_base = bcm6338_regs_base;
-			bcm63xx_irqs = bcm6338_irqs;
-		}
-		break;
+		/* fall-through */
 	case CPU_BMIPS32:
-		expected_cpu_id = BCM6345_CPU_ID;
-		bcm63xx_regs_base = bcm6345_regs_base;
-		bcm63xx_irqs = bcm6345_irqs;
+		chipid_reg = BCM_6345_PERF_BASE;
 		break;
 	case CPU_BMIPS4350:
-		if ((read_c0_prid() & 0xf0) == 0x10) {
-			expected_cpu_id = BCM6358_CPU_ID;
-			bcm63xx_regs_base = bcm6358_regs_base;
-			bcm63xx_irqs = bcm6358_irqs;
-		} else {
-			/* all newer chips have the same chip id location */
-			u16 chip_id = bcm_readw(BCM_6368_PERF_BASE);
-
-			switch (chip_id) {
-			case BCM6328_CPU_ID:
-				expected_cpu_id = BCM6328_CPU_ID;
-				bcm63xx_regs_base = bcm6328_regs_base;
-				bcm63xx_irqs = bcm6328_irqs;
-				break;
-			case BCM6368_CPU_ID:
-				expected_cpu_id = BCM6368_CPU_ID;
-				bcm63xx_regs_base = bcm6368_regs_base;
-				bcm63xx_irqs = bcm6368_irqs;
-				break;
-			}
-		}
+		if ((read_c0_prid() & 0xf0) == 0x10)
+			chipid_reg = BCM_6345_PERF_BASE;
+		else
+			chipid_reg = BCM_6368_PERF_BASE;
 		break;
 	}
 
@@ -294,20 +313,47 @@
 	 * really early to panic, but delaying panic would not help since we
 	 * will never get any working console
 	 */
-	if (!expected_cpu_id)
+	if (!chipid_reg)
 		panic("unsupported Broadcom CPU");
 
-	/*
-	 * bcm63xx_regs_base is set, we can access soc registers
-	 */
-
-	/* double check CPU type */
-	tmp = bcm_perf_readl(PERF_REV_REG);
+	/* read out CPU type */
+	tmp = bcm_readl(chipid_reg);
 	bcm63xx_cpu_id = (tmp & REV_CHIPID_MASK) >> REV_CHIPID_SHIFT;
 	bcm63xx_cpu_rev = (tmp & REV_REVID_MASK) >> REV_REVID_SHIFT;
 
-	if (bcm63xx_cpu_id != expected_cpu_id)
-		panic("bcm63xx CPU id mismatch");
+	switch (bcm63xx_cpu_id) {
+	case BCM6328_CPU_ID:
+		bcm63xx_regs_base = bcm6328_regs_base;
+		bcm63xx_irqs = bcm6328_irqs;
+		break;
+	case BCM6338_CPU_ID:
+		bcm63xx_regs_base = bcm6338_regs_base;
+		bcm63xx_irqs = bcm6338_irqs;
+		break;
+	case BCM6345_CPU_ID:
+		bcm63xx_regs_base = bcm6345_regs_base;
+		bcm63xx_irqs = bcm6345_irqs;
+		break;
+	case BCM6348_CPU_ID:
+		bcm63xx_regs_base = bcm6348_regs_base;
+		bcm63xx_irqs = bcm6348_irqs;
+		break;
+	case BCM6358_CPU_ID:
+		bcm63xx_regs_base = bcm6358_regs_base;
+		bcm63xx_irqs = bcm6358_irqs;
+		break;
+	case BCM6362_CPU_ID:
+		bcm63xx_regs_base = bcm6362_regs_base;
+		bcm63xx_irqs = bcm6362_irqs;
+		break;
+	case BCM6368_CPU_ID:
+		bcm63xx_regs_base = bcm6368_regs_base;
+		bcm63xx_irqs = bcm6368_irqs;
+		break;
+	default:
+		panic("unsupported broadcom CPU %x", bcm63xx_cpu_id);
+		break;
+	}
 
 	bcm63xx_cpu_freq = detect_cpu_clock();
 	bcm63xx_memory_size = detect_memory_size();
diff --git a/arch/mips/bcm63xx/dev-flash.c b/arch/mips/bcm63xx/dev-flash.c
index 58371c7..588d1ec 100644
--- a/arch/mips/bcm63xx/dev-flash.c
+++ b/arch/mips/bcm63xx/dev-flash.c
@@ -77,6 +77,12 @@
 			return BCM63XX_FLASH_TYPE_PARALLEL;
 		else
 			return BCM63XX_FLASH_TYPE_SERIAL;
+	case BCM6362_CPU_ID:
+		val = bcm_misc_readl(MISC_STRAPBUS_6362_REG);
+		if (val & STRAPBUS_6362_BOOT_SEL_SERIAL)
+			return BCM63XX_FLASH_TYPE_SERIAL;
+		else
+			return BCM63XX_FLASH_TYPE_NAND;
 	case BCM6368_CPU_ID:
 		val = bcm_gpio_readl(GPIO_STRAPBUS_REG);
 		switch (val & STRAPBUS_6368_BOOT_SEL_MASK) {
diff --git a/arch/mips/bcm63xx/dev-spi.c b/arch/mips/bcm63xx/dev-spi.c
index e97fd60..3065bb6 100644
--- a/arch/mips/bcm63xx/dev-spi.c
+++ b/arch/mips/bcm63xx/dev-spi.c
@@ -22,10 +22,6 @@
 /*
  * register offsets
  */
-static const unsigned long bcm6338_regs_spi[] = {
-	__GEN_SPI_REGS_TABLE(6338)
-};
-
 static const unsigned long bcm6348_regs_spi[] = {
 	__GEN_SPI_REGS_TABLE(6348)
 };
@@ -34,23 +30,15 @@
 	__GEN_SPI_REGS_TABLE(6358)
 };
 
-static const unsigned long bcm6368_regs_spi[] = {
-	__GEN_SPI_REGS_TABLE(6368)
-};
-
 const unsigned long *bcm63xx_regs_spi;
 EXPORT_SYMBOL(bcm63xx_regs_spi);
 
 static __init void bcm63xx_spi_regs_init(void)
 {
-	if (BCMCPU_IS_6338())
-		bcm63xx_regs_spi = bcm6338_regs_spi;
-	if (BCMCPU_IS_6348())
+	if (BCMCPU_IS_6338() || BCMCPU_IS_6348())
 		bcm63xx_regs_spi = bcm6348_regs_spi;
-	if (BCMCPU_IS_6358())
+	if (BCMCPU_IS_6358() || BCMCPU_IS_6362() || BCMCPU_IS_6368())
 		bcm63xx_regs_spi = bcm6358_regs_spi;
-	if (BCMCPU_IS_6368())
-		bcm63xx_regs_spi = bcm6368_regs_spi;
 }
 #else
 static __init void bcm63xx_spi_regs_init(void) { }
@@ -93,13 +81,13 @@
 	spi_resources[1].start = bcm63xx_get_irq_number(IRQ_SPI);
 
 	if (BCMCPU_IS_6338() || BCMCPU_IS_6348()) {
-		spi_resources[0].end += BCM_6338_RSET_SPI_SIZE - 1;
-		spi_pdata.fifo_size = SPI_6338_MSG_DATA_SIZE;
-		spi_pdata.msg_type_shift = SPI_6338_MSG_TYPE_SHIFT;
-		spi_pdata.msg_ctl_width = SPI_6338_MSG_CTL_WIDTH;
+		spi_resources[0].end += BCM_6348_RSET_SPI_SIZE - 1;
+		spi_pdata.fifo_size = SPI_6348_MSG_DATA_SIZE;
+		spi_pdata.msg_type_shift = SPI_6348_MSG_TYPE_SHIFT;
+		spi_pdata.msg_ctl_width = SPI_6348_MSG_CTL_WIDTH;
 	}
 
-	if (BCMCPU_IS_6358() || BCMCPU_IS_6368()) {
+	if (BCMCPU_IS_6358() || BCMCPU_IS_6362() || BCMCPU_IS_6368()) {
 		spi_resources[0].end += BCM_6358_RSET_SPI_SIZE - 1;
 		spi_pdata.fifo_size = SPI_6358_MSG_DATA_SIZE;
 		spi_pdata.msg_type_shift = SPI_6358_MSG_TYPE_SHIFT;
diff --git a/arch/mips/bcm63xx/irq.c b/arch/mips/bcm63xx/irq.c
index da24c2b..c0ab388 100644
--- a/arch/mips/bcm63xx/irq.c
+++ b/arch/mips/bcm63xx/irq.c
@@ -82,6 +82,17 @@
 #define ext_irq_cfg_reg1	PERF_EXTIRQ_CFG_REG_6358
 #define ext_irq_cfg_reg2	0
 #endif
+#ifdef CONFIG_BCM63XX_CPU_6362
+#define irq_stat_reg		PERF_IRQSTAT_6362_REG
+#define irq_mask_reg		PERF_IRQMASK_6362_REG
+#define irq_bits		64
+#define is_ext_irq_cascaded	1
+#define ext_irq_start		(BCM_6362_EXT_IRQ0 - IRQ_INTERNAL_BASE)
+#define ext_irq_end		(BCM_6362_EXT_IRQ3 - IRQ_INTERNAL_BASE)
+#define ext_irq_count		4
+#define ext_irq_cfg_reg1	PERF_EXTIRQ_CFG_REG_6362
+#define ext_irq_cfg_reg2	0
+#endif
 #ifdef CONFIG_BCM63XX_CPU_6368
 #define irq_stat_reg		PERF_IRQSTAT_6368_REG
 #define irq_mask_reg		PERF_IRQMASK_6368_REG
@@ -170,6 +181,16 @@
 		ext_irq_end = BCM_6358_EXT_IRQ3 - IRQ_INTERNAL_BASE;
 		ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6358;
 		break;
+	case BCM6362_CPU_ID:
+		irq_stat_addr += PERF_IRQSTAT_6362_REG;
+		irq_mask_addr += PERF_IRQMASK_6362_REG;
+		irq_bits = 64;
+		ext_irq_count = 4;
+		is_ext_irq_cascaded = 1;
+		ext_irq_start = BCM_6362_EXT_IRQ0 - IRQ_INTERNAL_BASE;
+		ext_irq_end = BCM_6362_EXT_IRQ3 - IRQ_INTERNAL_BASE;
+		ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6362;
+		break;
 	case BCM6368_CPU_ID:
 		irq_stat_addr += PERF_IRQSTAT_6368_REG;
 		irq_mask_addr += PERF_IRQMASK_6368_REG;
@@ -458,6 +479,7 @@
 	case BCM6338_CPU_ID:
 	case BCM6345_CPU_ID:
 	case BCM6358_CPU_ID:
+	case BCM6362_CPU_ID:
 	case BCM6368_CPU_ID:
 		if (levelsense)
 			reg |= EXTIRQ_CFG_LEVELSENSE(irq);
diff --git a/arch/mips/bcm63xx/prom.c b/arch/mips/bcm63xx/prom.c
index 10eaff4..fd69808 100644
--- a/arch/mips/bcm63xx/prom.c
+++ b/arch/mips/bcm63xx/prom.c
@@ -36,6 +36,8 @@
 		mask = CKCTL_6348_ALL_SAFE_EN;
 	else if (BCMCPU_IS_6358())
 		mask = CKCTL_6358_ALL_SAFE_EN;
+	else if (BCMCPU_IS_6362())
+		mask = CKCTL_6362_ALL_SAFE_EN;
 	else if (BCMCPU_IS_6368())
 		mask = CKCTL_6368_ALL_SAFE_EN;
 	else
diff --git a/arch/mips/bcm63xx/reset.c b/arch/mips/bcm63xx/reset.c
index 68a31bb..317931c 100644
--- a/arch/mips/bcm63xx/reset.c
+++ b/arch/mips/bcm63xx/reset.c
@@ -85,6 +85,20 @@
 #define BCM6358_RESET_PCIE	0
 #define BCM6358_RESET_PCIE_EXT	0
 
+#define BCM6362_RESET_SPI	SOFTRESET_6362_SPI_MASK
+#define BCM6362_RESET_ENET	0
+#define BCM6362_RESET_USBH	SOFTRESET_6362_USBH_MASK
+#define BCM6362_RESET_USBD	SOFTRESET_6362_USBS_MASK
+#define BCM6362_RESET_DSL	0
+#define BCM6362_RESET_SAR	SOFTRESET_6362_SAR_MASK
+#define BCM6362_RESET_EPHY	SOFTRESET_6362_EPHY_MASK
+#define BCM6362_RESET_ENETSW	SOFTRESET_6362_ENETSW_MASK
+#define BCM6362_RESET_PCM	SOFTRESET_6362_PCM_MASK
+#define BCM6362_RESET_MPI	0
+#define BCM6362_RESET_PCIE      (SOFTRESET_6362_PCIE_MASK | \
+				 SOFTRESET_6362_PCIE_CORE_MASK)
+#define BCM6362_RESET_PCIE_EXT	SOFTRESET_6362_PCIE_EXT_MASK
+
 #define BCM6368_RESET_SPI	SOFTRESET_6368_SPI_MASK
 #define BCM6368_RESET_ENET	0
 #define BCM6368_RESET_USBH	SOFTRESET_6368_USBH_MASK
@@ -119,6 +133,10 @@
 	__GEN_RESET_BITS_TABLE(6358)
 };
 
+static const u32 bcm6362_reset_bits[] = {
+	__GEN_RESET_BITS_TABLE(6362)
+};
+
 static const u32 bcm6368_reset_bits[] = {
 	__GEN_RESET_BITS_TABLE(6368)
 };
@@ -140,6 +158,9 @@
 	} else if (BCMCPU_IS_6358()) {
 		reset_reg = PERF_SOFTRESET_6358_REG;
 		bcm63xx_reset_bits = bcm6358_reset_bits;
+	} else if (BCMCPU_IS_6362()) {
+		reset_reg = PERF_SOFTRESET_6362_REG;
+		bcm63xx_reset_bits = bcm6362_reset_bits;
 	} else if (BCMCPU_IS_6368()) {
 		reset_reg = PERF_SOFTRESET_6368_REG;
 		bcm63xx_reset_bits = bcm6368_reset_bits;
@@ -182,6 +203,13 @@
 #define reset_reg PERF_SOFTRESET_6358_REG
 #endif
 
+#ifdef CONFIG_BCM63XX_CPU_6362
+static const u32 bcm63xx_reset_bits[] = {
+	__GEN_RESET_BITS_TABLE(6362)
+};
+#define reset_reg PERF_SOFTRESET_6362_REG
+#endif
+
 #ifdef CONFIG_BCM63XX_CPU_6368
 static const u32 bcm63xx_reset_bits[] = {
 	__GEN_RESET_BITS_TABLE(6368)
diff --git a/arch/mips/bcm63xx/setup.c b/arch/mips/bcm63xx/setup.c
index 35e18e9..24a2444 100644
--- a/arch/mips/bcm63xx/setup.c
+++ b/arch/mips/bcm63xx/setup.c
@@ -83,6 +83,9 @@
 	case BCM6358_CPU_ID:
 		perf_regs[0] = PERF_EXTIRQ_CFG_REG_6358;
 		break;
+	case BCM6362_CPU_ID:
+		perf_regs[0] = PERF_EXTIRQ_CFG_REG_6362;
+		break;
 	}
 
 	for (i = 0; i < 2; i++) {
@@ -126,7 +129,7 @@
 const char *get_system_type(void)
 {
 	static char buf[128];
-	snprintf(buf, sizeof(buf), "bcm63xx/%s (0x%04x/0x%04X)",
+	snprintf(buf, sizeof(buf), "bcm63xx/%s (0x%04x/0x%02X)",
 		 board_get_name(),
 		 bcm63xx_get_cpu_id(), bcm63xx_get_cpu_rev());
 	return buf;
diff --git a/arch/mips/cavium-octeon/octeon-irq.c b/arch/mips/cavium-octeon/octeon-irq.c
index 156aa61..a22f06a 100644
--- a/arch/mips/cavium-octeon/octeon-irq.c
+++ b/arch/mips/cavium-octeon/octeon-irq.c
@@ -1032,9 +1032,8 @@
 	if (!octeon_irq_virq_in_range(virq))
 		return -EINVAL;
 
-	hw += gpiod->base_hwirq;
-	line = hw >> 6;
-	bit = hw & 63;
+	line = (hw + gpiod->base_hwirq) >> 6;
+	bit = (hw + gpiod->base_hwirq) & 63;
 	if (line > line_limit || octeon_irq_ciu_to_irq[line][bit] != 0)
 		return -EINVAL;
 
diff --git a/arch/mips/configs/malta_defconfig b/arch/mips/configs/malta_defconfig
index cd732e5..ce1d3ee 100644
--- a/arch/mips/configs/malta_defconfig
+++ b/arch/mips/configs/malta_defconfig
@@ -2,30 +2,21 @@
 CONFIG_CPU_LITTLE_ENDIAN=y
 CONFIG_CPU_MIPS32_R2=y
 CONFIG_MIPS_MT_SMP=y
+CONFIG_HZ_100=y
+CONFIG_SYSVIPC=y
 CONFIG_NO_HZ=y
 CONFIG_HIGH_RES_TIMERS=y
-CONFIG_HZ_100=y
-CONFIG_EXPERIMENTAL=y
-CONFIG_SYSVIPC=y
 CONFIG_LOG_BUF_SHIFT=15
-CONFIG_SYSFS_DEPRECATED_V2=y
-CONFIG_RELAY=y
 CONFIG_NAMESPACES=y
-CONFIG_UTS_NS=y
-CONFIG_IPC_NS=y
-CONFIG_PID_NS=y
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
+CONFIG_RELAY=y
 CONFIG_EXPERT=y
-# CONFIG_SYSCTL_SYSCALL is not set
 # CONFIG_COMPAT_BRK is not set
 CONFIG_SLAB=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
 CONFIG_MODVERSIONS=y
 CONFIG_MODULE_SRCVERSION_ALL=y
-# CONFIG_BLK_DEV_BSG is not set
 CONFIG_PCI=y
-CONFIG_PM=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
 CONFIG_XFRM_USER=m
@@ -41,8 +32,6 @@
 CONFIG_IP_PNP_DHCP=y
 CONFIG_IP_PNP_BOOTP=y
 CONFIG_NET_IPIP=m
-CONFIG_NET_IPGRE=m
-CONFIG_NET_IPGRE_BROADCAST=y
 CONFIG_IP_MROUTE=y
 CONFIG_IP_PIMSM_V1=y
 CONFIG_IP_PIMSM_V2=y
@@ -65,7 +54,6 @@
 CONFIG_IPV6_PIMSM_V2=y
 CONFIG_NETWORK_SECMARK=y
 CONFIG_NETFILTER=y
-CONFIG_NETFILTER_NETLINK_QUEUE=m
 CONFIG_NF_CONNTRACK=m
 CONFIG_NF_CONNTRACK_SECMARK=y
 CONFIG_NF_CONNTRACK_EVENTS=y
@@ -136,23 +124,15 @@
 CONFIG_IP_VS_SH=m
 CONFIG_IP_VS_SED=m
 CONFIG_IP_VS_NQ=m
-CONFIG_IP_VS_FTP=m
 CONFIG_NF_CONNTRACK_IPV4=m
 CONFIG_IP_NF_QUEUE=m
 CONFIG_IP_NF_IPTABLES=m
-CONFIG_IP_NF_MATCH_ADDRTYPE=m
 CONFIG_IP_NF_MATCH_AH=m
 CONFIG_IP_NF_MATCH_ECN=m
 CONFIG_IP_NF_MATCH_TTL=m
 CONFIG_IP_NF_FILTER=m
 CONFIG_IP_NF_TARGET_REJECT=m
-CONFIG_IP_NF_TARGET_LOG=m
 CONFIG_IP_NF_TARGET_ULOG=m
-CONFIG_NF_NAT=m
-CONFIG_IP_NF_TARGET_MASQUERADE=m
-CONFIG_IP_NF_TARGET_NETMAP=m
-CONFIG_IP_NF_TARGET_REDIRECT=m
-CONFIG_NF_NAT_SNMP_BASIC=m
 CONFIG_IP_NF_MANGLE=m
 CONFIG_IP_NF_TARGET_CLUSTERIP=m
 CONFIG_IP_NF_TARGET_ECN=m
@@ -162,8 +142,6 @@
 CONFIG_IP_NF_ARPFILTER=m
 CONFIG_IP_NF_ARP_MANGLE=m
 CONFIG_NF_CONNTRACK_IPV6=m
-CONFIG_IP6_NF_QUEUE=m
-CONFIG_IP6_NF_IPTABLES=m
 CONFIG_IP6_NF_MATCH_AH=m
 CONFIG_IP6_NF_MATCH_EUI64=m
 CONFIG_IP6_NF_MATCH_FRAG=m
@@ -173,7 +151,6 @@
 CONFIG_IP6_NF_MATCH_MH=m
 CONFIG_IP6_NF_MATCH_RT=m
 CONFIG_IP6_NF_TARGET_HL=m
-CONFIG_IP6_NF_TARGET_LOG=m
 CONFIG_IP6_NF_FILTER=m
 CONFIG_IP6_NF_TARGET_REJECT=m
 CONFIG_IP6_NF_MANGLE=m
@@ -247,12 +224,10 @@
 CONFIG_MAC80211_RC_PID=y
 CONFIG_MAC80211_RC_DEFAULT_PID=y
 CONFIG_MAC80211_MESH=y
-CONFIG_MAC80211_LEDS=y
 CONFIG_RFKILL=m
 CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
 CONFIG_CONNECTOR=m
 CONFIG_MTD=y
-CONFIG_MTD_PARTITIONS=y
 CONFIG_MTD_CHAR=y
 CONFIG_MTD_BLOCK=y
 CONFIG_MTD_OOPS=m
@@ -271,7 +246,6 @@
 CONFIG_BLK_DEV_RAM=y
 CONFIG_CDROM_PKTCDVD=m
 CONFIG_ATA_OVER_ETH=m
-# CONFIG_MISC_DEVICES is not set
 CONFIG_IDE=y
 CONFIG_BLK_DEV_IDECD=y
 CONFIG_IDE_GENERIC=y
@@ -317,13 +291,19 @@
 CONFIG_DM_ZERO=m
 CONFIG_DM_MULTIPATH=m
 CONFIG_NETDEVICES=y
-CONFIG_IFB=m
-CONFIG_DUMMY=m
 CONFIG_BONDING=m
-CONFIG_MACVLAN=m
+CONFIG_DUMMY=m
 CONFIG_EQUALIZER=m
+CONFIG_IFB=m
+CONFIG_MACVLAN=m
 CONFIG_TUN=m
 CONFIG_VETH=m
+# CONFIG_NET_VENDOR_3COM is not set
+CONFIG_PCNET32=y
+CONFIG_CHELSIO_T3=m
+CONFIG_AX88796=m
+CONFIG_NETXEN_NIC=m
+CONFIG_TC35815=m
 CONFIG_MARVELL_PHY=m
 CONFIG_DAVICOM_PHY=m
 CONFIG_QSEMI_PHY=m
@@ -334,14 +314,6 @@
 CONFIG_BROADCOM_PHY=m
 CONFIG_ICPLUS_PHY=m
 CONFIG_REALTEK_PHY=m
-CONFIG_MDIO_BITBANG=m
-CONFIG_NET_ETHERNET=y
-CONFIG_AX88796=m
-CONFIG_NET_PCI=y
-CONFIG_PCNET32=y
-CONFIG_TC35815=m
-CONFIG_CHELSIO_T3=m
-CONFIG_NETXEN_NIC=m
 CONFIG_ATMEL=m
 CONFIG_PCI_ATMEL=m
 CONFIG_PRISM54=m
@@ -352,15 +324,7 @@
 CONFIG_HOSTAP_PCI=m
 CONFIG_IPW2100=m
 CONFIG_IPW2100_MONITOR=y
-CONFIG_IPW2200=m
-CONFIG_IPW2200_MONITOR=y
-CONFIG_IPW2200_PROMISCUOUS=y
-CONFIG_IPW2200_QOS=y
 CONFIG_LIBERTAS=m
-CONFIG_HERMES=m
-CONFIG_PLX_HERMES=m
-CONFIG_TMD_HERMES=m
-CONFIG_NORTEL_HERMES=m
 # CONFIG_INPUT_KEYBOARD is not set
 # CONFIG_INPUT_MOUSE is not set
 # CONFIG_SERIO_I8042 is not set
@@ -373,12 +337,6 @@
 # CONFIG_VGA_CONSOLE is not set
 CONFIG_FRAMEBUFFER_CONSOLE=y
 CONFIG_HID=m
-CONFIG_LEDS_CLASS=y
-CONFIG_LEDS_TRIGGER_TIMER=m
-CONFIG_LEDS_TRIGGER_IDE_DISK=y
-CONFIG_LEDS_TRIGGER_HEARTBEAT=m
-CONFIG_LEDS_TRIGGER_BACKLIGHT=m
-CONFIG_LEDS_TRIGGER_DEFAULT_ON=m
 CONFIG_RTC_CLASS=y
 CONFIG_RTC_DRV_CMOS=y
 CONFIG_UIO=m
@@ -398,7 +356,6 @@
 CONFIG_XFS_POSIX_ACL=y
 CONFIG_QUOTA=y
 CONFIG_QFMT_V2=y
-CONFIG_AUTOFS_FS=y
 CONFIG_FUSE_FS=m
 CONFIG_ISO9660_FS=m
 CONFIG_JOLIET=y
@@ -425,7 +382,6 @@
 CONFIG_SYSV_FS=m
 CONFIG_UFS_FS=m
 CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
 CONFIG_ROOT_NFS=y
 CONFIG_NFSD=y
 CONFIG_NFSD_V3=y
@@ -466,7 +422,6 @@
 CONFIG_NLS_ISO8859_15=m
 CONFIG_NLS_KOI8_R=m
 CONFIG_NLS_KOI8_U=m
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
 CONFIG_CRYPTO_NULL=m
 CONFIG_CRYPTO_CRYPTD=m
 CONFIG_CRYPTO_LRW=m
diff --git a/arch/mips/configs/malta_kvm_defconfig b/arch/mips/configs/malta_kvm_defconfig
new file mode 100644
index 0000000..341bb47
--- /dev/null
+++ b/arch/mips/configs/malta_kvm_defconfig
@@ -0,0 +1,456 @@
+CONFIG_MIPS_MALTA=y
+CONFIG_CPU_LITTLE_ENDIAN=y
+CONFIG_CPU_MIPS32_R2=y
+CONFIG_PAGE_SIZE_16KB=y
+CONFIG_MIPS_MT_SMP=y
+CONFIG_HZ_100=y
+CONFIG_SYSVIPC=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_LOG_BUF_SHIFT=15
+CONFIG_NAMESPACES=y
+CONFIG_RELAY=y
+CONFIG_EXPERT=y
+CONFIG_PERF_EVENTS=y
+# CONFIG_COMPAT_BRK is not set
+CONFIG_SLAB=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+CONFIG_MODULE_SRCVERSION_ALL=y
+CONFIG_PCI=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_XFRM_USER=m
+CONFIG_NET_KEY=y
+CONFIG_NET_KEY_MIGRATE=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_ROUTE_MULTIPATH=y
+CONFIG_IP_ROUTE_VERBOSE=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_NET_IPIP=m
+CONFIG_IP_MROUTE=y
+CONFIG_IP_PIMSM_V1=y
+CONFIG_IP_PIMSM_V2=y
+CONFIG_SYN_COOKIES=y
+CONFIG_INET_AH=m
+CONFIG_INET_ESP=m
+CONFIG_INET_IPCOMP=m
+CONFIG_INET_XFRM_MODE_TRANSPORT=m
+CONFIG_INET_XFRM_MODE_TUNNEL=m
+CONFIG_TCP_MD5SIG=y
+CONFIG_IPV6_PRIVACY=y
+CONFIG_IPV6_ROUTER_PREF=y
+CONFIG_IPV6_ROUTE_INFO=y
+CONFIG_IPV6_OPTIMISTIC_DAD=y
+CONFIG_INET6_AH=m
+CONFIG_INET6_ESP=m
+CONFIG_INET6_IPCOMP=m
+CONFIG_IPV6_TUNNEL=m
+CONFIG_IPV6_MROUTE=y
+CONFIG_IPV6_PIMSM_V2=y
+CONFIG_NETWORK_SECMARK=y
+CONFIG_NETFILTER=y
+CONFIG_NF_CONNTRACK=m
+CONFIG_NF_CONNTRACK_SECMARK=y
+CONFIG_NF_CONNTRACK_EVENTS=y
+CONFIG_NF_CT_PROTO_DCCP=m
+CONFIG_NF_CT_PROTO_UDPLITE=m
+CONFIG_NF_CONNTRACK_AMANDA=m
+CONFIG_NF_CONNTRACK_FTP=m
+CONFIG_NF_CONNTRACK_H323=m
+CONFIG_NF_CONNTRACK_IRC=m
+CONFIG_NF_CONNTRACK_PPTP=m
+CONFIG_NF_CONNTRACK_SANE=m
+CONFIG_NF_CONNTRACK_SIP=m
+CONFIG_NF_CONNTRACK_TFTP=m
+CONFIG_NF_CT_NETLINK=m
+CONFIG_NETFILTER_TPROXY=m
+CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
+CONFIG_NETFILTER_XT_TARGET_CONNMARK=m
+CONFIG_NETFILTER_XT_TARGET_MARK=m
+CONFIG_NETFILTER_XT_TARGET_NFLOG=m
+CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
+CONFIG_NETFILTER_XT_TARGET_TPROXY=m
+CONFIG_NETFILTER_XT_TARGET_TRACE=m
+CONFIG_NETFILTER_XT_TARGET_SECMARK=m
+CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
+CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m
+CONFIG_NETFILTER_XT_MATCH_COMMENT=m
+CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m
+CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m
+CONFIG_NETFILTER_XT_MATCH_CONNMARK=m
+CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
+CONFIG_NETFILTER_XT_MATCH_DCCP=m
+CONFIG_NETFILTER_XT_MATCH_ESP=m
+CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m
+CONFIG_NETFILTER_XT_MATCH_HELPER=m
+CONFIG_NETFILTER_XT_MATCH_IPRANGE=m
+CONFIG_NETFILTER_XT_MATCH_LENGTH=m
+CONFIG_NETFILTER_XT_MATCH_LIMIT=m
+CONFIG_NETFILTER_XT_MATCH_MAC=m
+CONFIG_NETFILTER_XT_MATCH_MARK=m
+CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m
+CONFIG_NETFILTER_XT_MATCH_OWNER=m
+CONFIG_NETFILTER_XT_MATCH_POLICY=m
+CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
+CONFIG_NETFILTER_XT_MATCH_QUOTA=m
+CONFIG_NETFILTER_XT_MATCH_RATEEST=m
+CONFIG_NETFILTER_XT_MATCH_REALM=m
+CONFIG_NETFILTER_XT_MATCH_RECENT=m
+CONFIG_NETFILTER_XT_MATCH_SOCKET=m
+CONFIG_NETFILTER_XT_MATCH_STATE=m
+CONFIG_NETFILTER_XT_MATCH_STATISTIC=m
+CONFIG_NETFILTER_XT_MATCH_STRING=m
+CONFIG_NETFILTER_XT_MATCH_TCPMSS=m
+CONFIG_NETFILTER_XT_MATCH_TIME=m
+CONFIG_NETFILTER_XT_MATCH_U32=m
+CONFIG_IP_VS=m
+CONFIG_IP_VS_IPV6=y
+CONFIG_IP_VS_PROTO_TCP=y
+CONFIG_IP_VS_PROTO_UDP=y
+CONFIG_IP_VS_PROTO_ESP=y
+CONFIG_IP_VS_PROTO_AH=y
+CONFIG_IP_VS_RR=m
+CONFIG_IP_VS_WRR=m
+CONFIG_IP_VS_LC=m
+CONFIG_IP_VS_WLC=m
+CONFIG_IP_VS_LBLC=m
+CONFIG_IP_VS_LBLCR=m
+CONFIG_IP_VS_DH=m
+CONFIG_IP_VS_SH=m
+CONFIG_IP_VS_SED=m
+CONFIG_IP_VS_NQ=m
+CONFIG_NF_CONNTRACK_IPV4=m
+CONFIG_IP_NF_QUEUE=m
+CONFIG_IP_NF_IPTABLES=m
+CONFIG_IP_NF_MATCH_AH=m
+CONFIG_IP_NF_MATCH_ECN=m
+CONFIG_IP_NF_MATCH_TTL=m
+CONFIG_IP_NF_FILTER=m
+CONFIG_IP_NF_TARGET_REJECT=m
+CONFIG_IP_NF_TARGET_ULOG=m
+CONFIG_IP_NF_MANGLE=m
+CONFIG_IP_NF_TARGET_CLUSTERIP=m
+CONFIG_IP_NF_TARGET_ECN=m
+CONFIG_IP_NF_TARGET_TTL=m
+CONFIG_IP_NF_RAW=m
+CONFIG_IP_NF_ARPTABLES=m
+CONFIG_IP_NF_ARPFILTER=m
+CONFIG_IP_NF_ARP_MANGLE=m
+CONFIG_NF_CONNTRACK_IPV6=m
+CONFIG_IP6_NF_MATCH_AH=m
+CONFIG_IP6_NF_MATCH_EUI64=m
+CONFIG_IP6_NF_MATCH_FRAG=m
+CONFIG_IP6_NF_MATCH_OPTS=m
+CONFIG_IP6_NF_MATCH_HL=m
+CONFIG_IP6_NF_MATCH_IPV6HEADER=m
+CONFIG_IP6_NF_MATCH_MH=m
+CONFIG_IP6_NF_MATCH_RT=m
+CONFIG_IP6_NF_TARGET_HL=m
+CONFIG_IP6_NF_FILTER=m
+CONFIG_IP6_NF_TARGET_REJECT=m
+CONFIG_IP6_NF_MANGLE=m
+CONFIG_IP6_NF_RAW=m
+CONFIG_BRIDGE_NF_EBTABLES=m
+CONFIG_BRIDGE_EBT_BROUTE=m
+CONFIG_BRIDGE_EBT_T_FILTER=m
+CONFIG_BRIDGE_EBT_T_NAT=m
+CONFIG_BRIDGE_EBT_802_3=m
+CONFIG_BRIDGE_EBT_AMONG=m
+CONFIG_BRIDGE_EBT_ARP=m
+CONFIG_BRIDGE_EBT_IP=m
+CONFIG_BRIDGE_EBT_IP6=m
+CONFIG_BRIDGE_EBT_LIMIT=m
+CONFIG_BRIDGE_EBT_MARK=m
+CONFIG_BRIDGE_EBT_PKTTYPE=m
+CONFIG_BRIDGE_EBT_STP=m
+CONFIG_BRIDGE_EBT_VLAN=m
+CONFIG_BRIDGE_EBT_ARPREPLY=m
+CONFIG_BRIDGE_EBT_DNAT=m
+CONFIG_BRIDGE_EBT_MARK_T=m
+CONFIG_BRIDGE_EBT_REDIRECT=m
+CONFIG_BRIDGE_EBT_SNAT=m
+CONFIG_BRIDGE_EBT_LOG=m
+CONFIG_BRIDGE_EBT_ULOG=m
+CONFIG_BRIDGE_EBT_NFLOG=m
+CONFIG_IP_SCTP=m
+CONFIG_BRIDGE=m
+CONFIG_VLAN_8021Q=m
+CONFIG_VLAN_8021Q_GVRP=y
+CONFIG_ATALK=m
+CONFIG_DEV_APPLETALK=m
+CONFIG_IPDDP=m
+CONFIG_IPDDP_ENCAP=y
+CONFIG_IPDDP_DECAP=y
+CONFIG_PHONET=m
+CONFIG_NET_SCHED=y
+CONFIG_NET_SCH_CBQ=m
+CONFIG_NET_SCH_HTB=m
+CONFIG_NET_SCH_HFSC=m
+CONFIG_NET_SCH_PRIO=m
+CONFIG_NET_SCH_RED=m
+CONFIG_NET_SCH_SFQ=m
+CONFIG_NET_SCH_TEQL=m
+CONFIG_NET_SCH_TBF=m
+CONFIG_NET_SCH_GRED=m
+CONFIG_NET_SCH_DSMARK=m
+CONFIG_NET_SCH_NETEM=m
+CONFIG_NET_SCH_INGRESS=m
+CONFIG_NET_CLS_BASIC=m
+CONFIG_NET_CLS_TCINDEX=m
+CONFIG_NET_CLS_ROUTE4=m
+CONFIG_NET_CLS_FW=m
+CONFIG_NET_CLS_U32=m
+CONFIG_NET_CLS_RSVP=m
+CONFIG_NET_CLS_RSVP6=m
+CONFIG_NET_CLS_FLOW=m
+CONFIG_NET_CLS_ACT=y
+CONFIG_NET_ACT_POLICE=y
+CONFIG_NET_ACT_GACT=m
+CONFIG_GACT_PROB=y
+CONFIG_NET_ACT_MIRRED=m
+CONFIG_NET_ACT_IPT=m
+CONFIG_NET_ACT_NAT=m
+CONFIG_NET_ACT_PEDIT=m
+CONFIG_NET_ACT_SIMP=m
+CONFIG_NET_ACT_SKBEDIT=m
+CONFIG_NET_CLS_IND=y
+CONFIG_CFG80211=m
+CONFIG_MAC80211=m
+CONFIG_MAC80211_RC_PID=y
+CONFIG_MAC80211_RC_DEFAULT_PID=y
+CONFIG_MAC80211_MESH=y
+CONFIG_RFKILL=m
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_CONNECTOR=m
+CONFIG_MTD=y
+CONFIG_MTD_CHAR=y
+CONFIG_MTD_BLOCK=y
+CONFIG_MTD_OOPS=m
+CONFIG_MTD_CFI=y
+CONFIG_MTD_CFI_INTELEXT=y
+CONFIG_MTD_CFI_AMDSTD=y
+CONFIG_MTD_CFI_STAA=y
+CONFIG_MTD_PHYSMAP=y
+CONFIG_MTD_UBI=m
+CONFIG_MTD_UBI_GLUEBI=m
+CONFIG_BLK_DEV_FD=m
+CONFIG_BLK_DEV_UMEM=m
+CONFIG_BLK_DEV_LOOP=m
+CONFIG_BLK_DEV_CRYPTOLOOP=m
+CONFIG_BLK_DEV_NBD=m
+CONFIG_BLK_DEV_RAM=y
+CONFIG_CDROM_PKTCDVD=m
+CONFIG_ATA_OVER_ETH=m
+CONFIG_IDE=y
+CONFIG_BLK_DEV_IDECD=y
+CONFIG_IDE_GENERIC=y
+CONFIG_BLK_DEV_GENERIC=y
+CONFIG_BLK_DEV_PIIX=y
+CONFIG_BLK_DEV_IT8213=m
+CONFIG_BLK_DEV_TC86C001=m
+CONFIG_RAID_ATTRS=m
+CONFIG_SCSI=m
+CONFIG_SCSI_TGT=m
+CONFIG_BLK_DEV_SD=m
+CONFIG_CHR_DEV_ST=m
+CONFIG_CHR_DEV_OSST=m
+CONFIG_BLK_DEV_SR=m
+CONFIG_BLK_DEV_SR_VENDOR=y
+CONFIG_CHR_DEV_SG=m
+CONFIG_SCSI_MULTI_LUN=y
+CONFIG_SCSI_CONSTANTS=y
+CONFIG_SCSI_LOGGING=y
+CONFIG_SCSI_SCAN_ASYNC=y
+CONFIG_SCSI_FC_ATTRS=m
+CONFIG_ISCSI_TCP=m
+CONFIG_BLK_DEV_3W_XXXX_RAID=m
+CONFIG_SCSI_3W_9XXX=m
+CONFIG_SCSI_ACARD=m
+CONFIG_SCSI_AACRAID=m
+CONFIG_SCSI_AIC7XXX=m
+CONFIG_AIC7XXX_RESET_DELAY_MS=15000
+# CONFIG_AIC7XXX_DEBUG_ENABLE is not set
+CONFIG_MD=y
+CONFIG_BLK_DEV_MD=m
+CONFIG_MD_LINEAR=m
+CONFIG_MD_RAID0=m
+CONFIG_MD_RAID1=m
+CONFIG_MD_RAID10=m
+CONFIG_MD_RAID456=m
+CONFIG_MD_MULTIPATH=m
+CONFIG_MD_FAULTY=m
+CONFIG_BLK_DEV_DM=m
+CONFIG_DM_CRYPT=m
+CONFIG_DM_SNAPSHOT=m
+CONFIG_DM_MIRROR=m
+CONFIG_DM_ZERO=m
+CONFIG_DM_MULTIPATH=m
+CONFIG_NETDEVICES=y
+CONFIG_BONDING=m
+CONFIG_DUMMY=m
+CONFIG_EQUALIZER=m
+CONFIG_IFB=m
+CONFIG_MACVLAN=m
+CONFIG_TUN=m
+CONFIG_VETH=m
+CONFIG_PCNET32=y
+CONFIG_CHELSIO_T3=m
+CONFIG_AX88796=m
+CONFIG_NETXEN_NIC=m
+CONFIG_TC35815=m
+CONFIG_MARVELL_PHY=m
+CONFIG_DAVICOM_PHY=m
+CONFIG_QSEMI_PHY=m
+CONFIG_LXT_PHY=m
+CONFIG_CICADA_PHY=m
+CONFIG_VITESSE_PHY=m
+CONFIG_SMSC_PHY=m
+CONFIG_BROADCOM_PHY=m
+CONFIG_ICPLUS_PHY=m
+CONFIG_REALTEK_PHY=m
+CONFIG_ATMEL=m
+CONFIG_PCI_ATMEL=m
+CONFIG_PRISM54=m
+CONFIG_HOSTAP=m
+CONFIG_HOSTAP_FIRMWARE=y
+CONFIG_HOSTAP_FIRMWARE_NVRAM=y
+CONFIG_HOSTAP_PLX=m
+CONFIG_HOSTAP_PCI=m
+CONFIG_IPW2100=m
+CONFIG_IPW2100_MONITOR=y
+CONFIG_LIBERTAS=m
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_SERIO_I8042 is not set
+CONFIG_VT_HW_CONSOLE_BINDING=y
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+# CONFIG_HWMON is not set
+CONFIG_FB=y
+CONFIG_FB_CIRRUS=y
+# CONFIG_VGA_CONSOLE is not set
+CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_HID=m
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_CMOS=y
+CONFIG_UIO=m
+CONFIG_UIO_CIF=m
+CONFIG_EXT2_FS=y
+CONFIG_EXT3_FS=y
+CONFIG_REISERFS_FS=m
+CONFIG_REISERFS_PROC_INFO=y
+CONFIG_REISERFS_FS_XATTR=y
+CONFIG_REISERFS_FS_POSIX_ACL=y
+CONFIG_REISERFS_FS_SECURITY=y
+CONFIG_JFS_FS=m
+CONFIG_JFS_POSIX_ACL=y
+CONFIG_JFS_SECURITY=y
+CONFIG_XFS_FS=m
+CONFIG_XFS_QUOTA=y
+CONFIG_XFS_POSIX_ACL=y
+CONFIG_QUOTA=y
+CONFIG_QFMT_V2=y
+CONFIG_FUSE_FS=m
+CONFIG_ISO9660_FS=m
+CONFIG_JOLIET=y
+CONFIG_ZISOFS=y
+CONFIG_UDF_FS=m
+CONFIG_MSDOS_FS=m
+CONFIG_VFAT_FS=m
+CONFIG_PROC_KCORE=y
+CONFIG_TMPFS=y
+CONFIG_CONFIGFS_FS=y
+CONFIG_AFFS_FS=m
+CONFIG_HFS_FS=m
+CONFIG_HFSPLUS_FS=m
+CONFIG_BEFS_FS=m
+CONFIG_BFS_FS=m
+CONFIG_EFS_FS=m
+CONFIG_JFFS2_FS=m
+CONFIG_JFFS2_FS_XATTR=y
+CONFIG_JFFS2_COMPRESSION_OPTIONS=y
+CONFIG_JFFS2_RUBIN=y
+CONFIG_CRAMFS=m
+CONFIG_VXFS_FS=m
+CONFIG_MINIX_FS=m
+CONFIG_ROMFS_FS=m
+CONFIG_SYSV_FS=m
+CONFIG_UFS_FS=m
+CONFIG_NFS_FS=y
+CONFIG_ROOT_NFS=y
+CONFIG_NFSD=y
+CONFIG_NFSD_V3=y
+CONFIG_NLS_CODEPAGE_437=m
+CONFIG_NLS_CODEPAGE_737=m
+CONFIG_NLS_CODEPAGE_775=m
+CONFIG_NLS_CODEPAGE_850=m
+CONFIG_NLS_CODEPAGE_852=m
+CONFIG_NLS_CODEPAGE_855=m
+CONFIG_NLS_CODEPAGE_857=m
+CONFIG_NLS_CODEPAGE_860=m
+CONFIG_NLS_CODEPAGE_861=m
+CONFIG_NLS_CODEPAGE_862=m
+CONFIG_NLS_CODEPAGE_863=m
+CONFIG_NLS_CODEPAGE_864=m
+CONFIG_NLS_CODEPAGE_865=m
+CONFIG_NLS_CODEPAGE_866=m
+CONFIG_NLS_CODEPAGE_869=m
+CONFIG_NLS_CODEPAGE_936=m
+CONFIG_NLS_CODEPAGE_950=m
+CONFIG_NLS_CODEPAGE_932=m
+CONFIG_NLS_CODEPAGE_949=m
+CONFIG_NLS_CODEPAGE_874=m
+CONFIG_NLS_ISO8859_8=m
+CONFIG_NLS_CODEPAGE_1250=m
+CONFIG_NLS_CODEPAGE_1251=m
+CONFIG_NLS_ASCII=m
+CONFIG_NLS_ISO8859_1=m
+CONFIG_NLS_ISO8859_2=m
+CONFIG_NLS_ISO8859_3=m
+CONFIG_NLS_ISO8859_4=m
+CONFIG_NLS_ISO8859_5=m
+CONFIG_NLS_ISO8859_6=m
+CONFIG_NLS_ISO8859_7=m
+CONFIG_NLS_ISO8859_9=m
+CONFIG_NLS_ISO8859_13=m
+CONFIG_NLS_ISO8859_14=m
+CONFIG_NLS_ISO8859_15=m
+CONFIG_NLS_KOI8_R=m
+CONFIG_NLS_KOI8_U=m
+CONFIG_RCU_CPU_STALL_TIMEOUT=60
+CONFIG_ENABLE_DEFAULT_TRACERS=y
+CONFIG_CRYPTO_NULL=m
+CONFIG_CRYPTO_CRYPTD=m
+CONFIG_CRYPTO_LRW=m
+CONFIG_CRYPTO_PCBC=m
+CONFIG_CRYPTO_HMAC=y
+CONFIG_CRYPTO_XCBC=m
+CONFIG_CRYPTO_MD4=m
+CONFIG_CRYPTO_SHA256=m
+CONFIG_CRYPTO_SHA512=m
+CONFIG_CRYPTO_TGR192=m
+CONFIG_CRYPTO_WP512=m
+CONFIG_CRYPTO_ANUBIS=m
+CONFIG_CRYPTO_BLOWFISH=m
+CONFIG_CRYPTO_CAMELLIA=m
+CONFIG_CRYPTO_CAST5=m
+CONFIG_CRYPTO_CAST6=m
+CONFIG_CRYPTO_FCRYPT=m
+CONFIG_CRYPTO_KHAZAD=m
+CONFIG_CRYPTO_SERPENT=m
+CONFIG_CRYPTO_TEA=m
+CONFIG_CRYPTO_TWOFISH=m
+# CONFIG_CRYPTO_ANSI_CPRNG is not set
+CONFIG_CRC16=m
+CONFIG_VIRTUALIZATION=y
+CONFIG_KVM=m
+CONFIG_KVM_MIPS_DYN_TRANS=y
+CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS=y
+CONFIG_VHOST_NET=m
diff --git a/arch/mips/configs/malta_kvm_guest_defconfig b/arch/mips/configs/malta_kvm_guest_defconfig
new file mode 100644
index 0000000..2b8558b
--- /dev/null
+++ b/arch/mips/configs/malta_kvm_guest_defconfig
@@ -0,0 +1,453 @@
+CONFIG_MIPS_MALTA=y
+CONFIG_CPU_LITTLE_ENDIAN=y
+CONFIG_CPU_MIPS32_R2=y
+CONFIG_KVM_GUEST=y
+CONFIG_PAGE_SIZE_16KB=y
+CONFIG_HZ_100=y
+CONFIG_SYSVIPC=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_LOG_BUF_SHIFT=15
+CONFIG_NAMESPACES=y
+CONFIG_RELAY=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_EXPERT=y
+# CONFIG_COMPAT_BRK is not set
+CONFIG_SLAB=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+CONFIG_MODULE_SRCVERSION_ALL=y
+CONFIG_PCI=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_XFRM_USER=m
+CONFIG_NET_KEY=y
+CONFIG_NET_KEY_MIGRATE=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_ROUTE_MULTIPATH=y
+CONFIG_IP_ROUTE_VERBOSE=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_NET_IPIP=m
+CONFIG_IP_MROUTE=y
+CONFIG_IP_PIMSM_V1=y
+CONFIG_IP_PIMSM_V2=y
+CONFIG_SYN_COOKIES=y
+CONFIG_INET_AH=m
+CONFIG_INET_ESP=m
+CONFIG_INET_IPCOMP=m
+CONFIG_INET_XFRM_MODE_TRANSPORT=m
+CONFIG_INET_XFRM_MODE_TUNNEL=m
+CONFIG_TCP_MD5SIG=y
+CONFIG_IPV6_PRIVACY=y
+CONFIG_IPV6_ROUTER_PREF=y
+CONFIG_IPV6_ROUTE_INFO=y
+CONFIG_IPV6_OPTIMISTIC_DAD=y
+CONFIG_INET6_AH=m
+CONFIG_INET6_ESP=m
+CONFIG_INET6_IPCOMP=m
+CONFIG_IPV6_TUNNEL=m
+CONFIG_IPV6_MROUTE=y
+CONFIG_IPV6_PIMSM_V2=y
+CONFIG_NETWORK_SECMARK=y
+CONFIG_NETFILTER=y
+CONFIG_NF_CONNTRACK=m
+CONFIG_NF_CONNTRACK_SECMARK=y
+CONFIG_NF_CONNTRACK_EVENTS=y
+CONFIG_NF_CT_PROTO_DCCP=m
+CONFIG_NF_CT_PROTO_UDPLITE=m
+CONFIG_NF_CONNTRACK_AMANDA=m
+CONFIG_NF_CONNTRACK_FTP=m
+CONFIG_NF_CONNTRACK_H323=m
+CONFIG_NF_CONNTRACK_IRC=m
+CONFIG_NF_CONNTRACK_PPTP=m
+CONFIG_NF_CONNTRACK_SANE=m
+CONFIG_NF_CONNTRACK_SIP=m
+CONFIG_NF_CONNTRACK_TFTP=m
+CONFIG_NF_CT_NETLINK=m
+CONFIG_NETFILTER_TPROXY=m
+CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
+CONFIG_NETFILTER_XT_TARGET_CONNMARK=m
+CONFIG_NETFILTER_XT_TARGET_MARK=m
+CONFIG_NETFILTER_XT_TARGET_NFLOG=m
+CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
+CONFIG_NETFILTER_XT_TARGET_TPROXY=m
+CONFIG_NETFILTER_XT_TARGET_TRACE=m
+CONFIG_NETFILTER_XT_TARGET_SECMARK=m
+CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
+CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m
+CONFIG_NETFILTER_XT_MATCH_COMMENT=m
+CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m
+CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m
+CONFIG_NETFILTER_XT_MATCH_CONNMARK=m
+CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
+CONFIG_NETFILTER_XT_MATCH_DCCP=m
+CONFIG_NETFILTER_XT_MATCH_ESP=m
+CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m
+CONFIG_NETFILTER_XT_MATCH_HELPER=m
+CONFIG_NETFILTER_XT_MATCH_IPRANGE=m
+CONFIG_NETFILTER_XT_MATCH_LENGTH=m
+CONFIG_NETFILTER_XT_MATCH_LIMIT=m
+CONFIG_NETFILTER_XT_MATCH_MAC=m
+CONFIG_NETFILTER_XT_MATCH_MARK=m
+CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m
+CONFIG_NETFILTER_XT_MATCH_OWNER=m
+CONFIG_NETFILTER_XT_MATCH_POLICY=m
+CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
+CONFIG_NETFILTER_XT_MATCH_QUOTA=m
+CONFIG_NETFILTER_XT_MATCH_RATEEST=m
+CONFIG_NETFILTER_XT_MATCH_REALM=m
+CONFIG_NETFILTER_XT_MATCH_RECENT=m
+CONFIG_NETFILTER_XT_MATCH_SOCKET=m
+CONFIG_NETFILTER_XT_MATCH_STATE=m
+CONFIG_NETFILTER_XT_MATCH_STATISTIC=m
+CONFIG_NETFILTER_XT_MATCH_STRING=m
+CONFIG_NETFILTER_XT_MATCH_TCPMSS=m
+CONFIG_NETFILTER_XT_MATCH_TIME=m
+CONFIG_NETFILTER_XT_MATCH_U32=m
+CONFIG_IP_VS=m
+CONFIG_IP_VS_IPV6=y
+CONFIG_IP_VS_PROTO_TCP=y
+CONFIG_IP_VS_PROTO_UDP=y
+CONFIG_IP_VS_PROTO_ESP=y
+CONFIG_IP_VS_PROTO_AH=y
+CONFIG_IP_VS_RR=m
+CONFIG_IP_VS_WRR=m
+CONFIG_IP_VS_LC=m
+CONFIG_IP_VS_WLC=m
+CONFIG_IP_VS_LBLC=m
+CONFIG_IP_VS_LBLCR=m
+CONFIG_IP_VS_DH=m
+CONFIG_IP_VS_SH=m
+CONFIG_IP_VS_SED=m
+CONFIG_IP_VS_NQ=m
+CONFIG_NF_CONNTRACK_IPV4=m
+CONFIG_IP_NF_QUEUE=m
+CONFIG_IP_NF_IPTABLES=m
+CONFIG_IP_NF_MATCH_AH=m
+CONFIG_IP_NF_MATCH_ECN=m
+CONFIG_IP_NF_MATCH_TTL=m
+CONFIG_IP_NF_FILTER=m
+CONFIG_IP_NF_TARGET_REJECT=m
+CONFIG_IP_NF_TARGET_ULOG=m
+CONFIG_IP_NF_MANGLE=m
+CONFIG_IP_NF_TARGET_CLUSTERIP=m
+CONFIG_IP_NF_TARGET_ECN=m
+CONFIG_IP_NF_TARGET_TTL=m
+CONFIG_IP_NF_RAW=m
+CONFIG_IP_NF_ARPTABLES=m
+CONFIG_IP_NF_ARPFILTER=m
+CONFIG_IP_NF_ARP_MANGLE=m
+CONFIG_NF_CONNTRACK_IPV6=m
+CONFIG_IP6_NF_MATCH_AH=m
+CONFIG_IP6_NF_MATCH_EUI64=m
+CONFIG_IP6_NF_MATCH_FRAG=m
+CONFIG_IP6_NF_MATCH_OPTS=m
+CONFIG_IP6_NF_MATCH_HL=m
+CONFIG_IP6_NF_MATCH_IPV6HEADER=m
+CONFIG_IP6_NF_MATCH_MH=m
+CONFIG_IP6_NF_MATCH_RT=m
+CONFIG_IP6_NF_TARGET_HL=m
+CONFIG_IP6_NF_FILTER=m
+CONFIG_IP6_NF_TARGET_REJECT=m
+CONFIG_IP6_NF_MANGLE=m
+CONFIG_IP6_NF_RAW=m
+CONFIG_BRIDGE_NF_EBTABLES=m
+CONFIG_BRIDGE_EBT_BROUTE=m
+CONFIG_BRIDGE_EBT_T_FILTER=m
+CONFIG_BRIDGE_EBT_T_NAT=m
+CONFIG_BRIDGE_EBT_802_3=m
+CONFIG_BRIDGE_EBT_AMONG=m
+CONFIG_BRIDGE_EBT_ARP=m
+CONFIG_BRIDGE_EBT_IP=m
+CONFIG_BRIDGE_EBT_IP6=m
+CONFIG_BRIDGE_EBT_LIMIT=m
+CONFIG_BRIDGE_EBT_MARK=m
+CONFIG_BRIDGE_EBT_PKTTYPE=m
+CONFIG_BRIDGE_EBT_STP=m
+CONFIG_BRIDGE_EBT_VLAN=m
+CONFIG_BRIDGE_EBT_ARPREPLY=m
+CONFIG_BRIDGE_EBT_DNAT=m
+CONFIG_BRIDGE_EBT_MARK_T=m
+CONFIG_BRIDGE_EBT_REDIRECT=m
+CONFIG_BRIDGE_EBT_SNAT=m
+CONFIG_BRIDGE_EBT_LOG=m
+CONFIG_BRIDGE_EBT_ULOG=m
+CONFIG_BRIDGE_EBT_NFLOG=m
+CONFIG_IP_SCTP=m
+CONFIG_BRIDGE=m
+CONFIG_VLAN_8021Q=m
+CONFIG_VLAN_8021Q_GVRP=y
+CONFIG_ATALK=m
+CONFIG_DEV_APPLETALK=m
+CONFIG_IPDDP=m
+CONFIG_IPDDP_ENCAP=y
+CONFIG_IPDDP_DECAP=y
+CONFIG_PHONET=m
+CONFIG_NET_SCHED=y
+CONFIG_NET_SCH_CBQ=m
+CONFIG_NET_SCH_HTB=m
+CONFIG_NET_SCH_HFSC=m
+CONFIG_NET_SCH_PRIO=m
+CONFIG_NET_SCH_RED=m
+CONFIG_NET_SCH_SFQ=m
+CONFIG_NET_SCH_TEQL=m
+CONFIG_NET_SCH_TBF=m
+CONFIG_NET_SCH_GRED=m
+CONFIG_NET_SCH_DSMARK=m
+CONFIG_NET_SCH_NETEM=m
+CONFIG_NET_SCH_INGRESS=m
+CONFIG_NET_CLS_BASIC=m
+CONFIG_NET_CLS_TCINDEX=m
+CONFIG_NET_CLS_ROUTE4=m
+CONFIG_NET_CLS_FW=m
+CONFIG_NET_CLS_U32=m
+CONFIG_NET_CLS_RSVP=m
+CONFIG_NET_CLS_RSVP6=m
+CONFIG_NET_CLS_FLOW=m
+CONFIG_NET_CLS_ACT=y
+CONFIG_NET_ACT_POLICE=y
+CONFIG_NET_ACT_GACT=m
+CONFIG_GACT_PROB=y
+CONFIG_NET_ACT_MIRRED=m
+CONFIG_NET_ACT_IPT=m
+CONFIG_NET_ACT_NAT=m
+CONFIG_NET_ACT_PEDIT=m
+CONFIG_NET_ACT_SIMP=m
+CONFIG_NET_ACT_SKBEDIT=m
+CONFIG_NET_CLS_IND=y
+CONFIG_CFG80211=m
+CONFIG_MAC80211=m
+CONFIG_MAC80211_RC_PID=y
+CONFIG_MAC80211_RC_DEFAULT_PID=y
+CONFIG_MAC80211_MESH=y
+CONFIG_RFKILL=m
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_CONNECTOR=m
+CONFIG_MTD=y
+CONFIG_MTD_CHAR=y
+CONFIG_MTD_BLOCK=y
+CONFIG_MTD_OOPS=m
+CONFIG_MTD_CFI=y
+CONFIG_MTD_CFI_INTELEXT=y
+CONFIG_MTD_CFI_AMDSTD=y
+CONFIG_MTD_CFI_STAA=y
+CONFIG_MTD_PHYSMAP=y
+CONFIG_MTD_UBI=m
+CONFIG_MTD_UBI_GLUEBI=m
+CONFIG_BLK_DEV_FD=m
+CONFIG_BLK_DEV_UMEM=m
+CONFIG_BLK_DEV_LOOP=m
+CONFIG_BLK_DEV_CRYPTOLOOP=m
+CONFIG_BLK_DEV_NBD=m
+CONFIG_BLK_DEV_RAM=y
+CONFIG_CDROM_PKTCDVD=m
+CONFIG_ATA_OVER_ETH=m
+CONFIG_VIRTIO_BLK=y
+CONFIG_IDE=y
+CONFIG_BLK_DEV_IDECD=y
+CONFIG_IDE_GENERIC=y
+CONFIG_BLK_DEV_GENERIC=y
+CONFIG_BLK_DEV_PIIX=y
+CONFIG_BLK_DEV_IT8213=m
+CONFIG_BLK_DEV_TC86C001=m
+CONFIG_RAID_ATTRS=m
+CONFIG_SCSI=m
+CONFIG_SCSI_TGT=m
+CONFIG_BLK_DEV_SD=m
+CONFIG_CHR_DEV_ST=m
+CONFIG_CHR_DEV_OSST=m
+CONFIG_BLK_DEV_SR=m
+CONFIG_BLK_DEV_SR_VENDOR=y
+CONFIG_CHR_DEV_SG=m
+CONFIG_SCSI_MULTI_LUN=y
+CONFIG_SCSI_CONSTANTS=y
+CONFIG_SCSI_LOGGING=y
+CONFIG_SCSI_SCAN_ASYNC=y
+CONFIG_SCSI_FC_ATTRS=m
+CONFIG_ISCSI_TCP=m
+CONFIG_BLK_DEV_3W_XXXX_RAID=m
+CONFIG_SCSI_3W_9XXX=m
+CONFIG_SCSI_ACARD=m
+CONFIG_SCSI_AACRAID=m
+CONFIG_SCSI_AIC7XXX=m
+CONFIG_AIC7XXX_RESET_DELAY_MS=15000
+# CONFIG_AIC7XXX_DEBUG_ENABLE is not set
+CONFIG_MD=y
+CONFIG_BLK_DEV_MD=m
+CONFIG_MD_LINEAR=m
+CONFIG_MD_RAID0=m
+CONFIG_MD_RAID1=m
+CONFIG_MD_RAID10=m
+CONFIG_MD_RAID456=m
+CONFIG_MD_MULTIPATH=m
+CONFIG_MD_FAULTY=m
+CONFIG_BLK_DEV_DM=m
+CONFIG_DM_CRYPT=m
+CONFIG_DM_SNAPSHOT=m
+CONFIG_DM_MIRROR=m
+CONFIG_DM_ZERO=m
+CONFIG_DM_MULTIPATH=m
+CONFIG_NETDEVICES=y
+CONFIG_BONDING=m
+CONFIG_DUMMY=m
+CONFIG_EQUALIZER=m
+CONFIG_IFB=m
+CONFIG_MACVLAN=m
+CONFIG_TUN=m
+CONFIG_VETH=m
+CONFIG_VIRTIO_NET=y
+CONFIG_PCNET32=y
+CONFIG_CHELSIO_T3=m
+CONFIG_AX88796=m
+CONFIG_NETXEN_NIC=m
+CONFIG_TC35815=m
+CONFIG_MARVELL_PHY=m
+CONFIG_DAVICOM_PHY=m
+CONFIG_QSEMI_PHY=m
+CONFIG_LXT_PHY=m
+CONFIG_CICADA_PHY=m
+CONFIG_VITESSE_PHY=m
+CONFIG_SMSC_PHY=m
+CONFIG_BROADCOM_PHY=m
+CONFIG_ICPLUS_PHY=m
+CONFIG_REALTEK_PHY=m
+CONFIG_ATMEL=m
+CONFIG_PCI_ATMEL=m
+CONFIG_PRISM54=m
+CONFIG_HOSTAP=m
+CONFIG_HOSTAP_FIRMWARE=y
+CONFIG_HOSTAP_FIRMWARE_NVRAM=y
+CONFIG_HOSTAP_PLX=m
+CONFIG_HOSTAP_PCI=m
+CONFIG_IPW2100=m
+CONFIG_IPW2100_MONITOR=y
+CONFIG_LIBERTAS=m
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_SERIO_I8042 is not set
+CONFIG_VT_HW_CONSOLE_BINDING=y
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+# CONFIG_HWMON is not set
+CONFIG_FB=y
+CONFIG_FB_CIRRUS=y
+# CONFIG_VGA_CONSOLE is not set
+CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_HID=m
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_CMOS=y
+CONFIG_UIO=m
+CONFIG_UIO_CIF=m
+CONFIG_VIRTIO_PCI=y
+CONFIG_VIRTIO_BALLOON=y
+CONFIG_VIRTIO_MMIO=y
+CONFIG_EXT2_FS=y
+CONFIG_EXT3_FS=y
+CONFIG_REISERFS_FS=m
+CONFIG_REISERFS_PROC_INFO=y
+CONFIG_REISERFS_FS_XATTR=y
+CONFIG_REISERFS_FS_POSIX_ACL=y
+CONFIG_REISERFS_FS_SECURITY=y
+CONFIG_JFS_FS=m
+CONFIG_JFS_POSIX_ACL=y
+CONFIG_JFS_SECURITY=y
+CONFIG_XFS_FS=m
+CONFIG_XFS_QUOTA=y
+CONFIG_XFS_POSIX_ACL=y
+CONFIG_QUOTA=y
+CONFIG_QFMT_V2=y
+CONFIG_FUSE_FS=m
+CONFIG_ISO9660_FS=m
+CONFIG_JOLIET=y
+CONFIG_ZISOFS=y
+CONFIG_UDF_FS=m
+CONFIG_MSDOS_FS=m
+CONFIG_VFAT_FS=m
+CONFIG_PROC_KCORE=y
+CONFIG_TMPFS=y
+CONFIG_AFFS_FS=m
+CONFIG_HFS_FS=m
+CONFIG_HFSPLUS_FS=m
+CONFIG_BEFS_FS=m
+CONFIG_BFS_FS=m
+CONFIG_EFS_FS=m
+CONFIG_JFFS2_FS=m
+CONFIG_JFFS2_FS_XATTR=y
+CONFIG_JFFS2_COMPRESSION_OPTIONS=y
+CONFIG_JFFS2_RUBIN=y
+CONFIG_CRAMFS=m
+CONFIG_VXFS_FS=m
+CONFIG_MINIX_FS=m
+CONFIG_ROMFS_FS=m
+CONFIG_SYSV_FS=m
+CONFIG_UFS_FS=m
+CONFIG_NFS_FS=y
+CONFIG_ROOT_NFS=y
+CONFIG_NFSD=y
+CONFIG_NFSD_V3=y
+CONFIG_NLS_CODEPAGE_437=m
+CONFIG_NLS_CODEPAGE_737=m
+CONFIG_NLS_CODEPAGE_775=m
+CONFIG_NLS_CODEPAGE_850=m
+CONFIG_NLS_CODEPAGE_852=m
+CONFIG_NLS_CODEPAGE_855=m
+CONFIG_NLS_CODEPAGE_857=m
+CONFIG_NLS_CODEPAGE_860=m
+CONFIG_NLS_CODEPAGE_861=m
+CONFIG_NLS_CODEPAGE_862=m
+CONFIG_NLS_CODEPAGE_863=m
+CONFIG_NLS_CODEPAGE_864=m
+CONFIG_NLS_CODEPAGE_865=m
+CONFIG_NLS_CODEPAGE_866=m
+CONFIG_NLS_CODEPAGE_869=m
+CONFIG_NLS_CODEPAGE_936=m
+CONFIG_NLS_CODEPAGE_950=m
+CONFIG_NLS_CODEPAGE_932=m
+CONFIG_NLS_CODEPAGE_949=m
+CONFIG_NLS_CODEPAGE_874=m
+CONFIG_NLS_ISO8859_8=m
+CONFIG_NLS_CODEPAGE_1250=m
+CONFIG_NLS_CODEPAGE_1251=m
+CONFIG_NLS_ASCII=m
+CONFIG_NLS_ISO8859_1=m
+CONFIG_NLS_ISO8859_2=m
+CONFIG_NLS_ISO8859_3=m
+CONFIG_NLS_ISO8859_4=m
+CONFIG_NLS_ISO8859_5=m
+CONFIG_NLS_ISO8859_6=m
+CONFIG_NLS_ISO8859_7=m
+CONFIG_NLS_ISO8859_9=m
+CONFIG_NLS_ISO8859_13=m
+CONFIG_NLS_ISO8859_14=m
+CONFIG_NLS_ISO8859_15=m
+CONFIG_NLS_KOI8_R=m
+CONFIG_NLS_KOI8_U=m
+CONFIG_CRYPTO_NULL=m
+CONFIG_CRYPTO_CRYPTD=m
+CONFIG_CRYPTO_LRW=m
+CONFIG_CRYPTO_PCBC=m
+CONFIG_CRYPTO_HMAC=y
+CONFIG_CRYPTO_XCBC=m
+CONFIG_CRYPTO_MD4=m
+CONFIG_CRYPTO_SHA256=m
+CONFIG_CRYPTO_SHA512=m
+CONFIG_CRYPTO_TGR192=m
+CONFIG_CRYPTO_WP512=m
+CONFIG_CRYPTO_ANUBIS=m
+CONFIG_CRYPTO_BLOWFISH=m
+CONFIG_CRYPTO_CAMELLIA=m
+CONFIG_CRYPTO_CAST5=m
+CONFIG_CRYPTO_CAST6=m
+CONFIG_CRYPTO_FCRYPT=m
+CONFIG_CRYPTO_KHAZAD=m
+CONFIG_CRYPTO_SERPENT=m
+CONFIG_CRYPTO_TEA=m
+CONFIG_CRYPTO_TWOFISH=m
+# CONFIG_CRYPTO_ANSI_CPRNG is not set
+CONFIG_CRC16=m
diff --git a/arch/mips/configs/maltaaprp_defconfig b/arch/mips/configs/maltaaprp_defconfig
new file mode 100644
index 0000000..93057a7
--- /dev/null
+++ b/arch/mips/configs/maltaaprp_defconfig
@@ -0,0 +1,195 @@
+CONFIG_MIPS_MALTA=y
+CONFIG_CPU_LITTLE_ENDIAN=y
+CONFIG_CPU_MIPS32_R2=y
+CONFIG_MIPS_VPE_LOADER=y
+CONFIG_MIPS_VPE_APSP_API=y
+CONFIG_HZ_100=y
+CONFIG_LOCALVERSION="aprp"
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_AUDIT=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_BUF_SHIFT=15
+CONFIG_SYSCTL_SYSCALL=y
+CONFIG_EMBEDDED=y
+CONFIG_SLAB=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+CONFIG_MODULE_SRCVERSION_ALL=y
+# CONFIG_BLK_DEV_BSG is not set
+CONFIG_PCI=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_XFRM_USER=m
+CONFIG_NET_KEY=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_ROUTE_MULTIPATH=y
+CONFIG_IP_ROUTE_VERBOSE=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_NET_IPIP=m
+CONFIG_IP_MROUTE=y
+CONFIG_IP_PIMSM_V1=y
+CONFIG_IP_PIMSM_V2=y
+CONFIG_SYN_COOKIES=y
+CONFIG_INET_AH=m
+CONFIG_INET_ESP=m
+CONFIG_INET_IPCOMP=m
+# CONFIG_INET_LRO is not set
+CONFIG_IPV6_PRIVACY=y
+CONFIG_INET6_AH=m
+CONFIG_INET6_ESP=m
+CONFIG_INET6_IPCOMP=m
+CONFIG_IPV6_TUNNEL=m
+CONFIG_BRIDGE=m
+CONFIG_VLAN_8021Q=m
+CONFIG_ATALK=m
+CONFIG_DEV_APPLETALK=m
+CONFIG_IPDDP=m
+CONFIG_IPDDP_ENCAP=y
+CONFIG_IPDDP_DECAP=y
+CONFIG_NET_SCHED=y
+CONFIG_NET_SCH_CBQ=m
+CONFIG_NET_SCH_HTB=m
+CONFIG_NET_SCH_HFSC=m
+CONFIG_NET_SCH_PRIO=m
+CONFIG_NET_SCH_RED=m
+CONFIG_NET_SCH_SFQ=m
+CONFIG_NET_SCH_TEQL=m
+CONFIG_NET_SCH_TBF=m
+CONFIG_NET_SCH_GRED=m
+CONFIG_NET_SCH_DSMARK=m
+CONFIG_NET_SCH_NETEM=m
+CONFIG_NET_SCH_INGRESS=m
+CONFIG_NET_CLS_BASIC=m
+CONFIG_NET_CLS_TCINDEX=m
+CONFIG_NET_CLS_ROUTE4=m
+CONFIG_NET_CLS_FW=m
+CONFIG_NET_CLS_U32=m
+CONFIG_NET_CLS_RSVP=m
+CONFIG_NET_CLS_RSVP6=m
+CONFIG_NET_CLS_ACT=y
+CONFIG_NET_ACT_POLICE=y
+CONFIG_NET_CLS_IND=y
+# CONFIG_WIRELESS is not set
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_CRYPTOLOOP=m
+CONFIG_IDE=y
+# CONFIG_IDE_PROC_FS is not set
+# CONFIG_IDEPCI_PCIBUS_ORDER is not set
+CONFIG_BLK_DEV_GENERIC=y
+CONFIG_BLK_DEV_PIIX=y
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_CHR_DEV_SG=y
+# CONFIG_SCSI_LOWLEVEL is not set
+CONFIG_NETDEVICES=y
+# CONFIG_NET_VENDOR_3COM is not set
+# CONFIG_NET_VENDOR_ADAPTEC is not set
+# CONFIG_NET_VENDOR_ALTEON is not set
+CONFIG_PCNET32=y
+# CONFIG_NET_VENDOR_ATHEROS is not set
+# CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_VENDOR_BROCADE is not set
+# CONFIG_NET_VENDOR_CHELSIO is not set
+# CONFIG_NET_VENDOR_CISCO is not set
+# CONFIG_NET_VENDOR_DEC is not set
+# CONFIG_NET_VENDOR_DLINK is not set
+# CONFIG_NET_VENDOR_EMULEX is not set
+# CONFIG_NET_VENDOR_EXAR is not set
+# CONFIG_NET_VENDOR_HP is not set
+# CONFIG_NET_VENDOR_INTEL is not set
+# CONFIG_NET_VENDOR_MARVELL is not set
+# CONFIG_NET_VENDOR_MELLANOX is not set
+# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_MYRI is not set
+# CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_NVIDIA is not set
+# CONFIG_NET_VENDOR_OKI is not set
+# CONFIG_NET_PACKET_ENGINE is not set
+# CONFIG_NET_VENDOR_QLOGIC is not set
+# CONFIG_NET_VENDOR_REALTEK is not set
+# CONFIG_NET_VENDOR_RDC is not set
+# CONFIG_NET_VENDOR_SEEQ is not set
+# CONFIG_NET_VENDOR_SILAN is not set
+# CONFIG_NET_VENDOR_SIS is not set
+# CONFIG_NET_VENDOR_SMSC is not set
+# CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_SUN is not set
+# CONFIG_NET_VENDOR_TEHUTI is not set
+# CONFIG_NET_VENDOR_TI is not set
+# CONFIG_NET_VENDOR_TOSHIBA is not set
+# CONFIG_NET_VENDOR_VIA is not set
+# CONFIG_WLAN is not set
+# CONFIG_VT is not set
+CONFIG_LEGACY_PTY_COUNT=16
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_HW_RANDOM=y
+# CONFIG_HWMON is not set
+CONFIG_VIDEO_OUTPUT_CONTROL=m
+CONFIG_FB=y
+CONFIG_FIRMWARE_EDID=y
+CONFIG_FB_MATROX=y
+CONFIG_FB_MATROX_G=y
+CONFIG_USB=y
+CONFIG_USB_EHCI_HCD=y
+# CONFIG_USB_EHCI_TT_NEWSCHED is not set
+CONFIG_USB_UHCI_HCD=y
+CONFIG_USB_STORAGE=y
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_TRIGGERS=y
+CONFIG_LEDS_TRIGGER_TIMER=y
+CONFIG_LEDS_TRIGGER_IDE_DISK=y
+CONFIG_LEDS_TRIGGER_HEARTBEAT=y
+CONFIG_LEDS_TRIGGER_BACKLIGHT=y
+CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_CMOS=y
+CONFIG_EXT2_FS=y
+CONFIG_EXT3_FS=y
+# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_XFS_FS=y
+CONFIG_XFS_QUOTA=y
+CONFIG_XFS_POSIX_ACL=y
+CONFIG_QUOTA=y
+CONFIG_QFMT_V2=y
+CONFIG_MSDOS_FS=m
+CONFIG_VFAT_FS=m
+CONFIG_PROC_KCORE=y
+CONFIG_TMPFS=y
+CONFIG_NFS_FS=y
+CONFIG_ROOT_NFS=y
+CONFIG_CIFS=m
+CONFIG_CIFS_WEAK_PW_HASH=y
+CONFIG_CIFS_XATTR=y
+CONFIG_CIFS_POSIX=y
+CONFIG_NLS_CODEPAGE_437=m
+CONFIG_NLS_ISO8859_1=m
+# CONFIG_FTRACE is not set
+CONFIG_CRYPTO_NULL=m
+CONFIG_CRYPTO_PCBC=m
+CONFIG_CRYPTO_HMAC=y
+CONFIG_CRYPTO_MICHAEL_MIC=m
+CONFIG_CRYPTO_SHA512=m
+CONFIG_CRYPTO_TGR192=m
+CONFIG_CRYPTO_WP512=m
+CONFIG_CRYPTO_ANUBIS=m
+CONFIG_CRYPTO_BLOWFISH=m
+CONFIG_CRYPTO_CAST5=m
+CONFIG_CRYPTO_CAST6=m
+CONFIG_CRYPTO_KHAZAD=m
+CONFIG_CRYPTO_SERPENT=m
+CONFIG_CRYPTO_TEA=m
+CONFIG_CRYPTO_TWOFISH=m
+# CONFIG_CRYPTO_ANSI_CPRNG is not set
+# CONFIG_CRYPTO_HW is not set
diff --git a/arch/mips/configs/maltasmtc_defconfig b/arch/mips/configs/maltasmtc_defconfig
new file mode 100644
index 0000000..4e54b75
--- /dev/null
+++ b/arch/mips/configs/maltasmtc_defconfig
@@ -0,0 +1,196 @@
+CONFIG_MIPS_MALTA=y
+CONFIG_CPU_LITTLE_ENDIAN=y
+CONFIG_CPU_MIPS32_R2=y
+CONFIG_MIPS_MT_SMTC=y
+# CONFIG_MIPS_MT_FPAFF is not set
+CONFIG_NR_CPUS=9
+CONFIG_HZ_48=y
+CONFIG_LOCALVERSION="smtc"
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_AUDIT=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_BUF_SHIFT=15
+CONFIG_SYSCTL_SYSCALL=y
+CONFIG_EMBEDDED=y
+CONFIG_SLAB=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+CONFIG_MODULE_SRCVERSION_ALL=y
+# CONFIG_BLK_DEV_BSG is not set
+CONFIG_PCI=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_XFRM_USER=m
+CONFIG_NET_KEY=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_ROUTE_MULTIPATH=y
+CONFIG_IP_ROUTE_VERBOSE=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_NET_IPIP=m
+CONFIG_IP_MROUTE=y
+CONFIG_IP_PIMSM_V1=y
+CONFIG_IP_PIMSM_V2=y
+CONFIG_SYN_COOKIES=y
+CONFIG_INET_AH=m
+CONFIG_INET_ESP=m
+CONFIG_INET_IPCOMP=m
+# CONFIG_INET_LRO is not set
+CONFIG_IPV6_PRIVACY=y
+CONFIG_INET6_AH=m
+CONFIG_INET6_ESP=m
+CONFIG_INET6_IPCOMP=m
+CONFIG_IPV6_TUNNEL=m
+CONFIG_BRIDGE=m
+CONFIG_VLAN_8021Q=m
+CONFIG_ATALK=m
+CONFIG_DEV_APPLETALK=m
+CONFIG_IPDDP=m
+CONFIG_IPDDP_ENCAP=y
+CONFIG_IPDDP_DECAP=y
+CONFIG_NET_SCHED=y
+CONFIG_NET_SCH_CBQ=m
+CONFIG_NET_SCH_HTB=m
+CONFIG_NET_SCH_HFSC=m
+CONFIG_NET_SCH_PRIO=m
+CONFIG_NET_SCH_RED=m
+CONFIG_NET_SCH_SFQ=m
+CONFIG_NET_SCH_TEQL=m
+CONFIG_NET_SCH_TBF=m
+CONFIG_NET_SCH_GRED=m
+CONFIG_NET_SCH_DSMARK=m
+CONFIG_NET_SCH_NETEM=m
+CONFIG_NET_SCH_INGRESS=m
+CONFIG_NET_CLS_BASIC=m
+CONFIG_NET_CLS_TCINDEX=m
+CONFIG_NET_CLS_ROUTE4=m
+CONFIG_NET_CLS_FW=m
+CONFIG_NET_CLS_U32=m
+CONFIG_NET_CLS_RSVP=m
+CONFIG_NET_CLS_RSVP6=m
+CONFIG_NET_CLS_ACT=y
+CONFIG_NET_ACT_POLICE=y
+CONFIG_NET_CLS_IND=y
+# CONFIG_WIRELESS is not set
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_CRYPTOLOOP=m
+CONFIG_IDE=y
+# CONFIG_IDE_PROC_FS is not set
+# CONFIG_IDEPCI_PCIBUS_ORDER is not set
+CONFIG_BLK_DEV_GENERIC=y
+CONFIG_BLK_DEV_PIIX=y
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_CHR_DEV_SG=y
+# CONFIG_SCSI_LOWLEVEL is not set
+CONFIG_NETDEVICES=y
+# CONFIG_NET_VENDOR_3COM is not set
+# CONFIG_NET_VENDOR_ADAPTEC is not set
+# CONFIG_NET_VENDOR_ALTEON is not set
+CONFIG_PCNET32=y
+# CONFIG_NET_VENDOR_ATHEROS is not set
+# CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_VENDOR_BROCADE is not set
+# CONFIG_NET_VENDOR_CHELSIO is not set
+# CONFIG_NET_VENDOR_CISCO is not set
+# CONFIG_NET_VENDOR_DEC is not set
+# CONFIG_NET_VENDOR_DLINK is not set
+# CONFIG_NET_VENDOR_EMULEX is not set
+# CONFIG_NET_VENDOR_EXAR is not set
+# CONFIG_NET_VENDOR_HP is not set
+# CONFIG_NET_VENDOR_INTEL is not set
+# CONFIG_NET_VENDOR_MARVELL is not set
+# CONFIG_NET_VENDOR_MELLANOX is not set
+# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_MYRI is not set
+# CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_NVIDIA is not set
+# CONFIG_NET_VENDOR_OKI is not set
+# CONFIG_NET_PACKET_ENGINE is not set
+# CONFIG_NET_VENDOR_QLOGIC is not set
+# CONFIG_NET_VENDOR_REALTEK is not set
+# CONFIG_NET_VENDOR_RDC is not set
+# CONFIG_NET_VENDOR_SEEQ is not set
+# CONFIG_NET_VENDOR_SILAN is not set
+# CONFIG_NET_VENDOR_SIS is not set
+# CONFIG_NET_VENDOR_SMSC is not set
+# CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_SUN is not set
+# CONFIG_NET_VENDOR_TEHUTI is not set
+# CONFIG_NET_VENDOR_TI is not set
+# CONFIG_NET_VENDOR_TOSHIBA is not set
+# CONFIG_NET_VENDOR_VIA is not set
+# CONFIG_WLAN is not set
+# CONFIG_VT is not set
+CONFIG_LEGACY_PTY_COUNT=16
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_HW_RANDOM=y
+# CONFIG_HWMON is not set
+CONFIG_VIDEO_OUTPUT_CONTROL=m
+CONFIG_FB=y
+CONFIG_FIRMWARE_EDID=y
+CONFIG_FB_MATROX=y
+CONFIG_FB_MATROX_G=y
+CONFIG_USB=y
+CONFIG_USB_EHCI_HCD=y
+# CONFIG_USB_EHCI_TT_NEWSCHED is not set
+CONFIG_USB_UHCI_HCD=y
+CONFIG_USB_STORAGE=y
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_TRIGGERS=y
+CONFIG_LEDS_TRIGGER_TIMER=y
+CONFIG_LEDS_TRIGGER_IDE_DISK=y
+CONFIG_LEDS_TRIGGER_HEARTBEAT=y
+CONFIG_LEDS_TRIGGER_BACKLIGHT=y
+CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_CMOS=y
+CONFIG_EXT2_FS=y
+CONFIG_EXT3_FS=y
+# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_XFS_FS=y
+CONFIG_XFS_QUOTA=y
+CONFIG_XFS_POSIX_ACL=y
+CONFIG_QUOTA=y
+CONFIG_QFMT_V2=y
+CONFIG_MSDOS_FS=m
+CONFIG_VFAT_FS=m
+CONFIG_PROC_KCORE=y
+CONFIG_TMPFS=y
+CONFIG_NFS_FS=y
+CONFIG_ROOT_NFS=y
+CONFIG_CIFS=m
+CONFIG_CIFS_WEAK_PW_HASH=y
+CONFIG_CIFS_XATTR=y
+CONFIG_CIFS_POSIX=y
+CONFIG_NLS_CODEPAGE_437=m
+CONFIG_NLS_ISO8859_1=m
+# CONFIG_FTRACE is not set
+CONFIG_CRYPTO_NULL=m
+CONFIG_CRYPTO_PCBC=m
+CONFIG_CRYPTO_HMAC=y
+CONFIG_CRYPTO_MICHAEL_MIC=m
+CONFIG_CRYPTO_SHA512=m
+CONFIG_CRYPTO_TGR192=m
+CONFIG_CRYPTO_WP512=m
+CONFIG_CRYPTO_ANUBIS=m
+CONFIG_CRYPTO_BLOWFISH=m
+CONFIG_CRYPTO_CAST5=m
+CONFIG_CRYPTO_CAST6=m
+CONFIG_CRYPTO_KHAZAD=m
+CONFIG_CRYPTO_SERPENT=m
+CONFIG_CRYPTO_TEA=m
+CONFIG_CRYPTO_TWOFISH=m
+# CONFIG_CRYPTO_ANSI_CPRNG is not set
+# CONFIG_CRYPTO_HW is not set
diff --git a/arch/mips/configs/maltasmvp_defconfig b/arch/mips/configs/maltasmvp_defconfig
new file mode 100644
index 0000000..8a66602
--- /dev/null
+++ b/arch/mips/configs/maltasmvp_defconfig
@@ -0,0 +1,199 @@
+CONFIG_MIPS_MALTA=y
+CONFIG_CPU_LITTLE_ENDIAN=y
+CONFIG_CPU_MIPS32_R2=y
+CONFIG_MIPS_MT_SMP=y
+CONFIG_SCHED_SMT=y
+CONFIG_MIPS_CMP=y
+CONFIG_NR_CPUS=8
+CONFIG_HZ_100=y
+CONFIG_LOCALVERSION="cmp"
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_AUDIT=y
+CONFIG_NO_HZ=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_BUF_SHIFT=15
+CONFIG_SYSCTL_SYSCALL=y
+CONFIG_EMBEDDED=y
+CONFIG_SLAB=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+CONFIG_MODULE_SRCVERSION_ALL=y
+# CONFIG_BLK_DEV_BSG is not set
+CONFIG_PCI=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_XFRM_USER=m
+CONFIG_NET_KEY=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_ROUTE_MULTIPATH=y
+CONFIG_IP_ROUTE_VERBOSE=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_NET_IPIP=m
+CONFIG_IP_MROUTE=y
+CONFIG_IP_PIMSM_V1=y
+CONFIG_IP_PIMSM_V2=y
+CONFIG_SYN_COOKIES=y
+CONFIG_INET_AH=m
+CONFIG_INET_ESP=m
+CONFIG_INET_IPCOMP=m
+# CONFIG_INET_LRO is not set
+CONFIG_IPV6_PRIVACY=y
+CONFIG_INET6_AH=m
+CONFIG_INET6_ESP=m
+CONFIG_INET6_IPCOMP=m
+CONFIG_IPV6_TUNNEL=m
+CONFIG_BRIDGE=m
+CONFIG_VLAN_8021Q=m
+CONFIG_ATALK=m
+CONFIG_DEV_APPLETALK=m
+CONFIG_IPDDP=m
+CONFIG_IPDDP_ENCAP=y
+CONFIG_IPDDP_DECAP=y
+CONFIG_NET_SCHED=y
+CONFIG_NET_SCH_CBQ=m
+CONFIG_NET_SCH_HTB=m
+CONFIG_NET_SCH_HFSC=m
+CONFIG_NET_SCH_PRIO=m
+CONFIG_NET_SCH_RED=m
+CONFIG_NET_SCH_SFQ=m
+CONFIG_NET_SCH_TEQL=m
+CONFIG_NET_SCH_TBF=m
+CONFIG_NET_SCH_GRED=m
+CONFIG_NET_SCH_DSMARK=m
+CONFIG_NET_SCH_NETEM=m
+CONFIG_NET_SCH_INGRESS=m
+CONFIG_NET_CLS_BASIC=m
+CONFIG_NET_CLS_TCINDEX=m
+CONFIG_NET_CLS_ROUTE4=m
+CONFIG_NET_CLS_FW=m
+CONFIG_NET_CLS_U32=m
+CONFIG_NET_CLS_RSVP=m
+CONFIG_NET_CLS_RSVP6=m
+CONFIG_NET_CLS_ACT=y
+CONFIG_NET_ACT_POLICE=y
+CONFIG_NET_CLS_IND=y
+# CONFIG_WIRELESS is not set
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_CRYPTOLOOP=m
+CONFIG_IDE=y
+# CONFIG_IDE_PROC_FS is not set
+# CONFIG_IDEPCI_PCIBUS_ORDER is not set
+CONFIG_BLK_DEV_GENERIC=y
+CONFIG_BLK_DEV_PIIX=y
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_CHR_DEV_SG=y
+# CONFIG_SCSI_LOWLEVEL is not set
+CONFIG_NETDEVICES=y
+# CONFIG_NET_VENDOR_3COM is not set
+# CONFIG_NET_VENDOR_ADAPTEC is not set
+# CONFIG_NET_VENDOR_ALTEON is not set
+CONFIG_PCNET32=y
+# CONFIG_NET_VENDOR_ATHEROS is not set
+# CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_VENDOR_BROCADE is not set
+# CONFIG_NET_VENDOR_CHELSIO is not set
+# CONFIG_NET_VENDOR_CISCO is not set
+# CONFIG_NET_VENDOR_DEC is not set
+# CONFIG_NET_VENDOR_DLINK is not set
+# CONFIG_NET_VENDOR_EMULEX is not set
+# CONFIG_NET_VENDOR_EXAR is not set
+# CONFIG_NET_VENDOR_HP is not set
+# CONFIG_NET_VENDOR_INTEL is not set
+# CONFIG_NET_VENDOR_MARVELL is not set
+# CONFIG_NET_VENDOR_MELLANOX is not set
+# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_MYRI is not set
+# CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_NVIDIA is not set
+# CONFIG_NET_VENDOR_OKI is not set
+# CONFIG_NET_PACKET_ENGINE is not set
+# CONFIG_NET_VENDOR_QLOGIC is not set
+# CONFIG_NET_VENDOR_REALTEK is not set
+# CONFIG_NET_VENDOR_RDC is not set
+# CONFIG_NET_VENDOR_SEEQ is not set
+# CONFIG_NET_VENDOR_SILAN is not set
+# CONFIG_NET_VENDOR_SIS is not set
+# CONFIG_NET_VENDOR_SMSC is not set
+# CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_SUN is not set
+# CONFIG_NET_VENDOR_TEHUTI is not set
+# CONFIG_NET_VENDOR_TI is not set
+# CONFIG_NET_VENDOR_TOSHIBA is not set
+# CONFIG_NET_VENDOR_VIA is not set
+# CONFIG_NET_VENDOR_WIZNET is not set
+# CONFIG_WLAN is not set
+# CONFIG_VT is not set
+CONFIG_LEGACY_PTY_COUNT=4
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_HW_RANDOM=y
+# CONFIG_HWMON is not set
+CONFIG_VIDEO_OUTPUT_CONTROL=m
+CONFIG_FB=y
+CONFIG_FIRMWARE_EDID=y
+CONFIG_FB_MATROX=y
+CONFIG_FB_MATROX_G=y
+CONFIG_USB=y
+CONFIG_USB_EHCI_HCD=y
+# CONFIG_USB_EHCI_TT_NEWSCHED is not set
+CONFIG_USB_UHCI_HCD=y
+CONFIG_USB_STORAGE=y
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_TRIGGERS=y
+CONFIG_LEDS_TRIGGER_TIMER=y
+CONFIG_LEDS_TRIGGER_IDE_DISK=y
+CONFIG_LEDS_TRIGGER_HEARTBEAT=y
+CONFIG_LEDS_TRIGGER_BACKLIGHT=y
+CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_CMOS=y
+CONFIG_EXT2_FS=y
+CONFIG_EXT3_FS=y
+# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_XFS_FS=y
+CONFIG_XFS_QUOTA=y
+CONFIG_XFS_POSIX_ACL=y
+CONFIG_QUOTA=y
+CONFIG_QFMT_V2=y
+CONFIG_MSDOS_FS=m
+CONFIG_VFAT_FS=m
+CONFIG_PROC_KCORE=y
+CONFIG_TMPFS=y
+CONFIG_NFS_FS=y
+CONFIG_ROOT_NFS=y
+CONFIG_CIFS=m
+CONFIG_CIFS_WEAK_PW_HASH=y
+CONFIG_CIFS_XATTR=y
+CONFIG_CIFS_POSIX=y
+CONFIG_NLS_CODEPAGE_437=m
+CONFIG_NLS_ISO8859_1=m
+# CONFIG_FTRACE is not set
+CONFIG_CRYPTO_NULL=m
+CONFIG_CRYPTO_PCBC=m
+CONFIG_CRYPTO_HMAC=y
+CONFIG_CRYPTO_MICHAEL_MIC=m
+CONFIG_CRYPTO_SHA512=m
+CONFIG_CRYPTO_TGR192=m
+CONFIG_CRYPTO_WP512=m
+CONFIG_CRYPTO_ANUBIS=m
+CONFIG_CRYPTO_BLOWFISH=m
+CONFIG_CRYPTO_CAST5=m
+CONFIG_CRYPTO_CAST6=m
+CONFIG_CRYPTO_KHAZAD=m
+CONFIG_CRYPTO_SERPENT=m
+CONFIG_CRYPTO_TEA=m
+CONFIG_CRYPTO_TWOFISH=m
+# CONFIG_CRYPTO_ANSI_CPRNG is not set
+# CONFIG_CRYPTO_HW is not set
diff --git a/arch/mips/configs/maltaup_defconfig b/arch/mips/configs/maltaup_defconfig
new file mode 100644
index 0000000..9868fc9
--- /dev/null
+++ b/arch/mips/configs/maltaup_defconfig
@@ -0,0 +1,194 @@
+CONFIG_MIPS_MALTA=y
+CONFIG_CPU_LITTLE_ENDIAN=y
+CONFIG_CPU_MIPS32_R2=y
+CONFIG_HZ_100=y
+CONFIG_LOCALVERSION="up"
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_AUDIT=y
+CONFIG_NO_HZ=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_BUF_SHIFT=15
+CONFIG_SYSCTL_SYSCALL=y
+CONFIG_EMBEDDED=y
+CONFIG_SLAB=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+CONFIG_MODULE_SRCVERSION_ALL=y
+# CONFIG_BLK_DEV_BSG is not set
+CONFIG_PCI=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_XFRM_USER=m
+CONFIG_NET_KEY=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_ROUTE_MULTIPATH=y
+CONFIG_IP_ROUTE_VERBOSE=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_NET_IPIP=m
+CONFIG_IP_MROUTE=y
+CONFIG_IP_PIMSM_V1=y
+CONFIG_IP_PIMSM_V2=y
+CONFIG_SYN_COOKIES=y
+CONFIG_INET_AH=m
+CONFIG_INET_ESP=m
+CONFIG_INET_IPCOMP=m
+# CONFIG_INET_LRO is not set
+CONFIG_IPV6_PRIVACY=y
+CONFIG_INET6_AH=m
+CONFIG_INET6_ESP=m
+CONFIG_INET6_IPCOMP=m
+CONFIG_IPV6_TUNNEL=m
+CONFIG_BRIDGE=m
+CONFIG_VLAN_8021Q=m
+CONFIG_ATALK=m
+CONFIG_DEV_APPLETALK=m
+CONFIG_IPDDP=m
+CONFIG_IPDDP_ENCAP=y
+CONFIG_IPDDP_DECAP=y
+CONFIG_NET_SCHED=y
+CONFIG_NET_SCH_CBQ=m
+CONFIG_NET_SCH_HTB=m
+CONFIG_NET_SCH_HFSC=m
+CONFIG_NET_SCH_PRIO=m
+CONFIG_NET_SCH_RED=m
+CONFIG_NET_SCH_SFQ=m
+CONFIG_NET_SCH_TEQL=m
+CONFIG_NET_SCH_TBF=m
+CONFIG_NET_SCH_GRED=m
+CONFIG_NET_SCH_DSMARK=m
+CONFIG_NET_SCH_NETEM=m
+CONFIG_NET_SCH_INGRESS=m
+CONFIG_NET_CLS_BASIC=m
+CONFIG_NET_CLS_TCINDEX=m
+CONFIG_NET_CLS_ROUTE4=m
+CONFIG_NET_CLS_FW=m
+CONFIG_NET_CLS_U32=m
+CONFIG_NET_CLS_RSVP=m
+CONFIG_NET_CLS_RSVP6=m
+CONFIG_NET_CLS_ACT=y
+CONFIG_NET_ACT_POLICE=y
+CONFIG_NET_CLS_IND=y
+# CONFIG_WIRELESS is not set
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_CRYPTOLOOP=m
+CONFIG_IDE=y
+# CONFIG_IDE_PROC_FS is not set
+# CONFIG_IDEPCI_PCIBUS_ORDER is not set
+CONFIG_BLK_DEV_GENERIC=y
+CONFIG_BLK_DEV_PIIX=y
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_CHR_DEV_SG=y
+# CONFIG_SCSI_LOWLEVEL is not set
+CONFIG_NETDEVICES=y
+# CONFIG_NET_VENDOR_3COM is not set
+# CONFIG_NET_VENDOR_ADAPTEC is not set
+# CONFIG_NET_VENDOR_ALTEON is not set
+CONFIG_PCNET32=y
+# CONFIG_NET_VENDOR_ATHEROS is not set
+# CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_VENDOR_BROCADE is not set
+# CONFIG_NET_VENDOR_CHELSIO is not set
+# CONFIG_NET_VENDOR_CISCO is not set
+# CONFIG_NET_VENDOR_DEC is not set
+# CONFIG_NET_VENDOR_DLINK is not set
+# CONFIG_NET_VENDOR_EMULEX is not set
+# CONFIG_NET_VENDOR_EXAR is not set
+# CONFIG_NET_VENDOR_HP is not set
+# CONFIG_NET_VENDOR_INTEL is not set
+# CONFIG_NET_VENDOR_MARVELL is not set
+# CONFIG_NET_VENDOR_MELLANOX is not set
+# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_MYRI is not set
+# CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_NVIDIA is not set
+# CONFIG_NET_VENDOR_OKI is not set
+# CONFIG_NET_PACKET_ENGINE is not set
+# CONFIG_NET_VENDOR_QLOGIC is not set
+# CONFIG_NET_VENDOR_REALTEK is not set
+# CONFIG_NET_VENDOR_RDC is not set
+# CONFIG_NET_VENDOR_SEEQ is not set
+# CONFIG_NET_VENDOR_SILAN is not set
+# CONFIG_NET_VENDOR_SIS is not set
+# CONFIG_NET_VENDOR_SMSC is not set
+# CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_SUN is not set
+# CONFIG_NET_VENDOR_TEHUTI is not set
+# CONFIG_NET_VENDOR_TI is not set
+# CONFIG_NET_VENDOR_TOSHIBA is not set
+# CONFIG_NET_VENDOR_VIA is not set
+# CONFIG_WLAN is not set
+# CONFIG_VT is not set
+CONFIG_LEGACY_PTY_COUNT=16
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_HW_RANDOM=y
+# CONFIG_HWMON is not set
+CONFIG_VIDEO_OUTPUT_CONTROL=m
+CONFIG_FB=y
+CONFIG_FIRMWARE_EDID=y
+CONFIG_FB_MATROX=y
+CONFIG_FB_MATROX_G=y
+CONFIG_USB=y
+CONFIG_USB_EHCI_HCD=y
+# CONFIG_USB_EHCI_TT_NEWSCHED is not set
+CONFIG_USB_UHCI_HCD=y
+CONFIG_USB_STORAGE=y
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_TRIGGERS=y
+CONFIG_LEDS_TRIGGER_TIMER=y
+CONFIG_LEDS_TRIGGER_IDE_DISK=y
+CONFIG_LEDS_TRIGGER_HEARTBEAT=y
+CONFIG_LEDS_TRIGGER_BACKLIGHT=y
+CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_CMOS=y
+CONFIG_EXT2_FS=y
+CONFIG_EXT3_FS=y
+# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_XFS_FS=y
+CONFIG_XFS_QUOTA=y
+CONFIG_XFS_POSIX_ACL=y
+CONFIG_QUOTA=y
+CONFIG_QFMT_V2=y
+CONFIG_MSDOS_FS=m
+CONFIG_VFAT_FS=m
+CONFIG_PROC_KCORE=y
+CONFIG_TMPFS=y
+CONFIG_NFS_FS=y
+CONFIG_ROOT_NFS=y
+CONFIG_CIFS=m
+CONFIG_CIFS_WEAK_PW_HASH=y
+CONFIG_CIFS_XATTR=y
+CONFIG_CIFS_POSIX=y
+CONFIG_NLS_CODEPAGE_437=m
+CONFIG_NLS_ISO8859_1=m
+# CONFIG_FTRACE is not set
+CONFIG_CRYPTO_NULL=m
+CONFIG_CRYPTO_PCBC=m
+CONFIG_CRYPTO_HMAC=y
+CONFIG_CRYPTO_MICHAEL_MIC=m
+CONFIG_CRYPTO_SHA512=m
+CONFIG_CRYPTO_TGR192=m
+CONFIG_CRYPTO_WP512=m
+CONFIG_CRYPTO_ANUBIS=m
+CONFIG_CRYPTO_BLOWFISH=m
+CONFIG_CRYPTO_CAST5=m
+CONFIG_CRYPTO_CAST6=m
+CONFIG_CRYPTO_KHAZAD=m
+CONFIG_CRYPTO_SERPENT=m
+CONFIG_CRYPTO_TEA=m
+CONFIG_CRYPTO_TWOFISH=m
+# CONFIG_CRYPTO_ANSI_CPRNG is not set
+# CONFIG_CRYPTO_HW is not set
diff --git a/arch/mips/configs/sead3_defconfig b/arch/mips/configs/sead3_defconfig
index e3eec68..0abe681 100644
--- a/arch/mips/configs/sead3_defconfig
+++ b/arch/mips/configs/sead3_defconfig
@@ -2,7 +2,6 @@
 CONFIG_CPU_LITTLE_ENDIAN=y
 CONFIG_CPU_MIPS32_R2=y
 CONFIG_HZ_100=y
-CONFIG_EXPERIMENTAL=y
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
 CONFIG_NO_HZ=y
@@ -115,10 +114,8 @@
 CONFIG_NLS_ISO8859_15=y
 CONFIG_NLS_UTF8=y
 # CONFIG_FTRACE is not set
-CONFIG_CRYPTO=y
 CONFIG_CRYPTO_CBC=y
 CONFIG_CRYPTO_ECB=y
-CONFIG_CRYPTO_AES=y
 CONFIG_CRYPTO_ARC4=y
 # CONFIG_CRYPTO_ANSI_CPRNG is not set
 # CONFIG_CRYPTO_HW is not set
diff --git a/arch/mips/configs/sead3micro_defconfig b/arch/mips/configs/sead3micro_defconfig
new file mode 100644
index 0000000..2a0da5b
--- /dev/null
+++ b/arch/mips/configs/sead3micro_defconfig
@@ -0,0 +1,122 @@
+CONFIG_MIPS_SEAD3=y
+CONFIG_CPU_LITTLE_ENDIAN=y
+CONFIG_CPU_MIPS32_R2=y
+CONFIG_CPU_MICROMIPS=y
+CONFIG_HZ_100=y
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_BUF_SHIFT=15
+CONFIG_EMBEDDED=y
+CONFIG_SLAB=y
+CONFIG_PROFILING=y
+CONFIG_OPROFILE=y
+CONFIG_MODULES=y
+# CONFIG_BLK_DEV_BSG is not set
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_INET=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
+# CONFIG_INET_LRO is not set
+# CONFIG_INET_DIAG is not set
+# CONFIG_IPV6 is not set
+# CONFIG_WIRELESS is not set
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_MTD=y
+CONFIG_MTD_CHAR=y
+CONFIG_MTD_BLOCK=y
+CONFIG_MTD_CFI=y
+CONFIG_MTD_CFI_INTELEXT=y
+CONFIG_MTD_PHYSMAP=y
+CONFIG_MTD_UBI=y
+CONFIG_MTD_UBI_GLUEBI=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_CRYPTOLOOP=m
+CONFIG_SCSI=y
+# CONFIG_SCSI_PROC_FS is not set
+CONFIG_BLK_DEV_SD=y
+CONFIG_CHR_DEV_SG=y
+# CONFIG_SCSI_LOWLEVEL is not set
+CONFIG_NETDEVICES=y
+CONFIG_SMSC911X=y
+# CONFIG_NET_VENDOR_WIZNET is not set
+CONFIG_MARVELL_PHY=y
+CONFIG_DAVICOM_PHY=y
+CONFIG_QSEMI_PHY=y
+CONFIG_LXT_PHY=y
+CONFIG_CICADA_PHY=y
+CONFIG_VITESSE_PHY=y
+CONFIG_SMSC_PHY=y
+CONFIG_BROADCOM_PHY=y
+CONFIG_ICPLUS_PHY=y
+# CONFIG_WLAN is not set
+# CONFIG_INPUT_MOUSEDEV is not set
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_SERIO is not set
+# CONFIG_CONSOLE_TRANSLATIONS is not set
+CONFIG_VT_HW_CONSOLE_BINDING=y
+CONFIG_LEGACY_PTY_COUNT=32
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_NR_UARTS=2
+CONFIG_SERIAL_8250_RUNTIME_UARTS=2
+# CONFIG_HW_RANDOM is not set
+CONFIG_I2C=y
+# CONFIG_I2C_COMPAT is not set
+CONFIG_I2C_CHARDEV=y
+# CONFIG_I2C_HELPER_AUTO is not set
+CONFIG_SPI=y
+CONFIG_SENSORS_ADT7475=y
+CONFIG_BACKLIGHT_LCD_SUPPORT=y
+CONFIG_LCD_CLASS_DEVICE=y
+CONFIG_BACKLIGHT_CLASS_DEVICE=y
+# CONFIG_VGA_CONSOLE is not set
+CONFIG_USB=y
+CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
+CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_EHCI_ROOT_HUB_TT=y
+CONFIG_USB_STORAGE=y
+CONFIG_MMC=y
+CONFIG_MMC_DEBUG=y
+CONFIG_MMC_SPI=y
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_TRIGGERS=y
+CONFIG_LEDS_TRIGGER_HEARTBEAT=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_M41T80=y
+CONFIG_EXT3_FS=y
+# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_XFS_FS=y
+CONFIG_XFS_QUOTA=y
+CONFIG_XFS_POSIX_ACL=y
+CONFIG_QUOTA=y
+# CONFIG_PRINT_QUOTA_WARNING is not set
+CONFIG_MSDOS_FS=m
+CONFIG_VFAT_FS=m
+CONFIG_TMPFS=y
+CONFIG_JFFS2_FS=y
+CONFIG_NFS_FS=y
+CONFIG_ROOT_NFS=y
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_ASCII=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_NLS_ISO8859_15=y
+CONFIG_NLS_UTF8=y
+# CONFIG_FTRACE is not set
+CONFIG_CRYPTO_CBC=y
+CONFIG_CRYPTO_ECB=y
+CONFIG_CRYPTO_ARC4=y
+# CONFIG_CRYPTO_ANSI_CPRNG is not set
+# CONFIG_CRYPTO_HW is not set
diff --git a/arch/mips/fw/lib/Makefile b/arch/mips/fw/lib/Makefile
index 84befc9..5291505 100644
--- a/arch/mips/fw/lib/Makefile
+++ b/arch/mips/fw/lib/Makefile
@@ -2,4 +2,6 @@
 # Makefile for generic prom monitor library routines under Linux.
 #
 
+lib-y			+= cmdline.o
+
 lib-$(CONFIG_64BIT)	+= call_o32.o
diff --git a/arch/mips/fw/lib/cmdline.c b/arch/mips/fw/lib/cmdline.c
new file mode 100644
index 0000000..ffd0345
--- /dev/null
+++ b/arch/mips/fw/lib/cmdline.c
@@ -0,0 +1,101 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2012 MIPS Technologies, Inc.  All rights reserved.
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+
+#include <asm/addrspace.h>
+#include <asm/fw/fw.h>
+
+int fw_argc;
+int *_fw_argv;
+int *_fw_envp;
+
+void __init fw_init_cmdline(void)
+{
+	int i;
+
+	/* Validate command line parameters. */
+	if ((fw_arg0 >= CKSEG0) || (fw_arg1 < CKSEG0)) {
+		fw_argc = 0;
+		_fw_argv = NULL;
+	} else {
+		fw_argc = (fw_arg0 & 0x0000ffff);
+		_fw_argv = (int *)fw_arg1;
+	}
+
+	/* Validate environment pointer. */
+	if (fw_arg2 < CKSEG0)
+		_fw_envp = NULL;
+	else
+		_fw_envp = (int *)fw_arg2;
+
+	for (i = 1; i < fw_argc; i++) {
+		strlcat(arcs_cmdline, fw_argv(i), COMMAND_LINE_SIZE);
+		if (i < (fw_argc - 1))
+			strlcat(arcs_cmdline, " ", COMMAND_LINE_SIZE);
+	}
+}
+
+char * __init fw_getcmdline(void)
+{
+	return &(arcs_cmdline[0]);
+}
+
+char *fw_getenv(char *envname)
+{
+	char *result = NULL;
+
+	if (_fw_envp != NULL) {
+		/*
+		 * Return a pointer to the given environment variable.
+		 * YAMON uses "name", "value" pairs, while U-Boot uses
+		 * "name=value".
+		 */
+		int i, yamon, index = 0;
+
+		yamon = (strchr(fw_envp(index), '=') == NULL);
+		i = strlen(envname);
+
+		while (fw_envp(index)) {
+			if (strncmp(envname, fw_envp(index), i) == 0) {
+				if (yamon) {
+					result = fw_envp(index + 1);
+					break;
+				} else if (fw_envp(index)[i] == '=') {
+					result = (fw_envp(index + 1) + i);
+					break;
+				}
+			}
+
+			/* Increment array index. */
+			if (yamon)
+				index += 2;
+			else
+				index += 1;
+		}
+	}
+
+	return result;
+}
+
+unsigned long fw_getenvl(char *envname)
+{
+	unsigned long envl = 0UL;
+	char *str;
+	long val;
+	int tmp;
+
+	str = fw_getenv(envname);
+	if (str) {
+		tmp = kstrtol(str, 0, &val);
+		envl = (unsigned long)val;
+	}
+
+	return envl;
+}
diff --git a/arch/mips/include/asm/asm.h b/arch/mips/include/asm/asm.h
index 164a21e..879691d 100644
--- a/arch/mips/include/asm/asm.h
+++ b/arch/mips/include/asm/asm.h
@@ -296,6 +296,7 @@
 #define LONG_SUBU	subu
 #define LONG_L		lw
 #define LONG_S		sw
+#define LONG_SP		swp
 #define LONG_SLL	sll
 #define LONG_SLLV	sllv
 #define LONG_SRL	srl
@@ -318,6 +319,7 @@
 #define LONG_SUBU	dsubu
 #define LONG_L		ld
 #define LONG_S		sd
+#define LONG_SP		sdp
 #define LONG_SLL	dsll
 #define LONG_SLLV	dsllv
 #define LONG_SRL	dsrl
diff --git a/arch/mips/include/asm/bootinfo.h b/arch/mips/include/asm/bootinfo.h
index b71dd5b..4d2cdea 100644
--- a/arch/mips/include/asm/bootinfo.h
+++ b/arch/mips/include/asm/bootinfo.h
@@ -104,6 +104,7 @@
 extern struct boot_mem_map boot_mem_map;
 
 extern void add_memory_region(phys_t start, phys_t size, long type);
+extern void detect_memory_region(phys_t start, phys_t sz_min,  phys_t sz_max);
 
 extern void prom_init(void);
 extern void prom_free_prom_memory(void);
diff --git a/arch/mips/include/asm/branch.h b/arch/mips/include/asm/branch.h
index 888766a..e28a3e0 100644
--- a/arch/mips/include/asm/branch.h
+++ b/arch/mips/include/asm/branch.h
@@ -11,6 +11,14 @@
 #include <asm/ptrace.h>
 #include <asm/inst.h>
 
+extern int __isa_exception_epc(struct pt_regs *regs);
+extern int __compute_return_epc(struct pt_regs *regs);
+extern int __compute_return_epc_for_insn(struct pt_regs *regs,
+					 union mips_instruction insn);
+extern int __microMIPS_compute_return_epc(struct pt_regs *regs);
+extern int __MIPS16e_compute_return_epc(struct pt_regs *regs);
+
+
 static inline int delay_slot(struct pt_regs *regs)
 {
 	return regs->cp0_cause & CAUSEF_BD;
@@ -18,20 +26,27 @@
 
 static inline unsigned long exception_epc(struct pt_regs *regs)
 {
-	if (!delay_slot(regs))
+	if (likely(!delay_slot(regs)))
 		return regs->cp0_epc;
 
+	if (get_isa16_mode(regs->cp0_epc))
+		return __isa_exception_epc(regs);
+
 	return regs->cp0_epc + 4;
 }
 
 #define BRANCH_LIKELY_TAKEN 0x0001
 
-extern int __compute_return_epc(struct pt_regs *regs);
-extern int __compute_return_epc_for_insn(struct pt_regs *regs,
-					 union mips_instruction insn);
-
 static inline int compute_return_epc(struct pt_regs *regs)
 {
+	if (get_isa16_mode(regs->cp0_epc)) {
+		if (cpu_has_mmips)
+			return __microMIPS_compute_return_epc(regs);
+		if (cpu_has_mips16)
+			return __MIPS16e_compute_return_epc(regs);
+		return regs->cp0_epc;
+	}
+
 	if (!delay_slot(regs)) {
 		regs->cp0_epc += 4;
 		return 0;
@@ -40,4 +55,19 @@
 	return __compute_return_epc(regs);
 }
 
+static inline int MIPS16e_compute_return_epc(struct pt_regs *regs,
+					     union mips16e_instruction *inst)
+{
+	if (likely(!delay_slot(regs))) {
+		if (inst->ri.opcode == MIPS16e_extend_op) {
+			regs->cp0_epc += 4;
+			return 0;
+		}
+		regs->cp0_epc += 2;
+		return 0;
+	}
+
+	return __MIPS16e_compute_return_epc(regs);
+}
+
 #endif /* _ASM_BRANCH_H */
diff --git a/arch/mips/include/asm/cpu-features.h b/arch/mips/include/asm/cpu-features.h
index 1a57e8b..e5ec8fc 100644
--- a/arch/mips/include/asm/cpu-features.h
+++ b/arch/mips/include/asm/cpu-features.h
@@ -113,6 +113,9 @@
 #ifndef cpu_has_pindexed_dcache
 #define cpu_has_pindexed_dcache (cpu_data[0].dcache.flags & MIPS_CACHE_PINDEX)
 #endif
+#ifndef cpu_has_local_ebase
+#define cpu_has_local_ebase	1
+#endif
 
 /*
  * I-Cache snoops remote store.	 This only matters on SMP.  Some multiprocessors
diff --git a/arch/mips/include/asm/dma-coherence.h b/arch/mips/include/asm/dma-coherence.h
new file mode 100644
index 0000000..242cbb3
--- /dev/null
+++ b/arch/mips/include/asm/dma-coherence.h
@@ -0,0 +1,15 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2006  Ralf Baechle <ralf@linux-mips.org>
+ *
+ */
+#ifndef __ASM_DMA_COHERENCE_H
+#define __ASM_DMA_COHERENCE_H
+
+extern int coherentio;
+extern int hw_coherentio;
+
+#endif
diff --git a/arch/mips/include/asm/dma-mapping.h b/arch/mips/include/asm/dma-mapping.h
index f8fc74b..84238c5 100644
--- a/arch/mips/include/asm/dma-mapping.h
+++ b/arch/mips/include/asm/dma-mapping.h
@@ -2,6 +2,7 @@
 #define _ASM_DMA_MAPPING_H
 
 #include <asm/scatterlist.h>
+#include <asm/dma-coherence.h>
 #include <asm/cache.h>
 #include <asm-generic/dma-coherent.h>
 
diff --git a/arch/mips/include/asm/fpu_emulator.h b/arch/mips/include/asm/fpu_emulator.h
index 3b40927..2abb587 100644
--- a/arch/mips/include/asm/fpu_emulator.h
+++ b/arch/mips/include/asm/fpu_emulator.h
@@ -54,6 +54,12 @@
 extern int mips_dsemul(struct pt_regs *regs, mips_instruction ir,
 	unsigned long cpc);
 extern int do_dsemulret(struct pt_regs *xcp);
+extern int fpu_emulator_cop1Handler(struct pt_regs *xcp,
+				    struct mips_fpu_struct *ctx, int has_fpu,
+				    void *__user *fault_addr);
+int process_fpemu_return(int sig, void __user *fault_addr);
+int mm_isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
+		     unsigned long *contpc);
 
 /*
  * Instruction inserted following the badinst to further tag the sequence
diff --git a/arch/mips/include/asm/fw/fw.h b/arch/mips/include/asm/fw/fw.h
new file mode 100644
index 0000000..d6c50a7
--- /dev/null
+++ b/arch/mips/include/asm/fw/fw.h
@@ -0,0 +1,47 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2012 MIPS Technologies, Inc.
+ */
+#ifndef __ASM_FW_H_
+#define __ASM_FW_H_
+
+#include <asm/bootinfo.h>	/* For cleaner code... */
+
+enum fw_memtypes {
+	fw_dontuse,
+	fw_code,
+	fw_free,
+};
+
+typedef struct {
+	unsigned long base;	/* Within KSEG0 */
+	unsigned int size;	/* bytes */
+	enum fw_memtypes type;	/* fw_memtypes */
+} fw_memblock_t;
+
+/* Maximum number of memory block descriptors. */
+#define FW_MAX_MEMBLOCKS	32
+
+extern int fw_argc;
+extern int *_fw_argv;
+extern int *_fw_envp;
+
+/*
+ * Most firmware like YAMON, PMON, etc. pass arguments and environment
+ * variables as 32-bit pointers. These take care of sign extension.
+ */
+#define fw_argv(index)		((char *)(long)_fw_argv[(index)])
+#define fw_envp(index)		((char *)(long)_fw_envp[(index)])
+
+extern void fw_init_cmdline(void);
+extern char *fw_getcmdline(void);
+extern fw_memblock_t *fw_getmdesc(void);
+extern void fw_meminit(void);
+extern char *fw_getenv(char *name);
+extern unsigned long fw_getenvl(char *name);
+extern void fw_init_early_console(char port);
+
+#endif /* __ASM_FW_H_ */
diff --git a/arch/mips/include/asm/gic.h b/arch/mips/include/asm/gic.h
index bdc9786..7153b32 100644
--- a/arch/mips/include/asm/gic.h
+++ b/arch/mips/include/asm/gic.h
@@ -202,7 +202,7 @@
 #define GIC_VPE_WD_COUNT0_OFS		0x0094
 #define GIC_VPE_WD_INITIAL0_OFS		0x0098
 #define GIC_VPE_COMPARE_LO_OFS		0x00a0
-#define GIC_VPE_COMPARE_HI		0x00a4
+#define GIC_VPE_COMPARE_HI_OFS		0x00a4
 
 #define GIC_VPE_EIC_SHADOW_SET_BASE	0x0100
 #define GIC_VPE_EIC_SS(intr) \
@@ -359,7 +359,11 @@
 /* Mapped interrupt to pin X, then GIC will generate the vector (X+1). */
 #define GIC_PIN_TO_VEC_OFFSET	(1)
 
-extern int gic_present;
+#include <linux/clocksource.h>
+#include <linux/irq.h>
+
+extern unsigned int gic_present;
+extern unsigned int gic_frequency;
 extern unsigned long _gic_base;
 extern unsigned int gic_irq_base;
 extern unsigned int gic_irq_flags[];
@@ -368,18 +372,20 @@
 extern void gic_init(unsigned long gic_base_addr,
 	unsigned long gic_addrspace_size, struct gic_intr_map *intrmap,
 	unsigned int intrmap_size, unsigned int irqbase);
-
 extern void gic_clocksource_init(unsigned int);
-extern unsigned int gic_get_int(void);
+extern unsigned int gic_compare_int (void);
+extern cycle_t gic_read_count(void);
+extern cycle_t gic_read_compare(void);
+extern void gic_write_compare(cycle_t cnt);
 extern void gic_send_ipi(unsigned int intr);
 extern unsigned int plat_ipi_call_int_xlate(unsigned int);
 extern unsigned int plat_ipi_resched_int_xlate(unsigned int);
 extern void gic_bind_eic_interrupt(int irq, int set);
 extern unsigned int gic_get_timer_pending(void);
+extern unsigned int gic_get_int(void);
 extern void gic_enable_interrupt(int irq_vec);
 extern void gic_disable_interrupt(int irq_vec);
 extern void gic_irq_ack(struct irq_data *d);
 extern void gic_finish_irq(struct irq_data *d);
 extern void gic_platform_init(int irqs, struct irq_chip *irq_controller);
-
 #endif /* _ASM_GICREGS_H */
diff --git a/arch/mips/include/asm/hazards.h b/arch/mips/include/asm/hazards.h
index 44d6a5b..e3ee92d 100644
--- a/arch/mips/include/asm/hazards.h
+++ b/arch/mips/include/asm/hazards.h
@@ -10,34 +10,13 @@
 #ifndef _ASM_HAZARDS_H
 #define _ASM_HAZARDS_H
 
-#ifdef __ASSEMBLY__
-#define ASMMACRO(name, code...) .macro name; code; .endm
-#else
+#include <linux/stringify.h>
 
-#include <asm/cpu-features.h>
+#define ___ssnop							\
+	sll	$0, $0, 1
 
-#define ASMMACRO(name, code...)						\
-__asm__(".macro " #name "; " #code "; .endm");				\
-									\
-static inline void name(void)						\
-{									\
-	__asm__ __volatile__ (#name);					\
-}
-
-/*
- * MIPS R2 instruction hazard barrier.	 Needs to be called as a subroutine.
- */
-extern void mips_ihb(void);
-
-#endif
-
-ASMMACRO(_ssnop,
-	 sll	$0, $0, 1
-	)
-
-ASMMACRO(_ehb,
-	 sll	$0, $0, 3
-	)
+#define ___ehb								\
+	sll	$0, $0, 3
 
 /*
  * TLB hazards
@@ -48,24 +27,24 @@
  * MIPSR2 defines ehb for hazard avoidance
  */
 
-ASMMACRO(mtc0_tlbw_hazard,
-	 _ehb
-	)
-ASMMACRO(tlbw_use_hazard,
-	 _ehb
-	)
-ASMMACRO(tlb_probe_hazard,
-	 _ehb
-	)
-ASMMACRO(irq_enable_hazard,
-	 _ehb
-	)
-ASMMACRO(irq_disable_hazard,
-	_ehb
-	)
-ASMMACRO(back_to_back_c0_hazard,
-	 _ehb
-	)
+#define __mtc0_tlbw_hazard						\
+	___ehb
+
+#define __tlbw_use_hazard						\
+	___ehb
+
+#define __tlb_probe_hazard						\
+	___ehb
+
+#define __irq_enable_hazard						\
+	___ehb
+
+#define __irq_disable_hazard						\
+	___ehb
+
+#define __back_to_back_c0_hazard					\
+	___ehb
+
 /*
  * gcc has a tradition of misscompiling the previous construct using the
  * address of a label as argument to inline assembler.	Gas otoh has the
@@ -94,24 +73,42 @@
  * These are slightly complicated by the fact that we guarantee R1 kernels to
  * run fine on R2 processors.
  */
-ASMMACRO(mtc0_tlbw_hazard,
-	_ssnop; _ssnop; _ehb
-	)
-ASMMACRO(tlbw_use_hazard,
-	_ssnop; _ssnop; _ssnop; _ehb
-	)
-ASMMACRO(tlb_probe_hazard,
-	 _ssnop; _ssnop; _ssnop; _ehb
-	)
-ASMMACRO(irq_enable_hazard,
-	 _ssnop; _ssnop; _ssnop; _ehb
-	)
-ASMMACRO(irq_disable_hazard,
-	_ssnop; _ssnop; _ssnop; _ehb
-	)
-ASMMACRO(back_to_back_c0_hazard,
-	 _ssnop; _ssnop; _ssnop; _ehb
-	)
+
+#define __mtc0_tlbw_hazard						\
+	___ssnop;							\
+	___ssnop;							\
+	___ehb
+
+#define __tlbw_use_hazard						\
+	___ssnop;							\
+	___ssnop;							\
+	___ssnop;							\
+	___ehb
+
+#define __tlb_probe_hazard						\
+	___ssnop;							\
+	___ssnop;							\
+	___ssnop;							\
+	___ehb
+
+#define __irq_enable_hazard						\
+	___ssnop;							\
+	___ssnop;							\
+	___ssnop;							\
+	___ehb
+
+#define __irq_disable_hazard						\
+	___ssnop;							\
+	___ssnop;							\
+	___ssnop;							\
+	___ehb
+
+#define __back_to_back_c0_hazard					\
+	___ssnop;							\
+	___ssnop;							\
+	___ssnop;							\
+	___ehb
+
 /*
  * gcc has a tradition of misscompiling the previous construct using the
  * address of a label as argument to inline assembler.	Gas otoh has the
@@ -147,18 +144,18 @@
  * R10000 rocks - all hazards handled in hardware, so this becomes a nobrainer.
  */
 
-ASMMACRO(mtc0_tlbw_hazard,
-	)
-ASMMACRO(tlbw_use_hazard,
-	)
-ASMMACRO(tlb_probe_hazard,
-	)
-ASMMACRO(irq_enable_hazard,
-	)
-ASMMACRO(irq_disable_hazard,
-	)
-ASMMACRO(back_to_back_c0_hazard,
-	)
+#define __mtc0_tlbw_hazard
+
+#define __tlbw_use_hazard
+
+#define __tlb_probe_hazard
+
+#define __irq_enable_hazard
+
+#define __irq_disable_hazard
+
+#define __back_to_back_c0_hazard
+
 #define instruction_hazard() do { } while (0)
 
 #elif defined(CONFIG_CPU_SB1)
@@ -166,19 +163,21 @@
 /*
  * Mostly like R4000 for historic reasons
  */
-ASMMACRO(mtc0_tlbw_hazard,
-	)
-ASMMACRO(tlbw_use_hazard,
-	)
-ASMMACRO(tlb_probe_hazard,
-	)
-ASMMACRO(irq_enable_hazard,
-	)
-ASMMACRO(irq_disable_hazard,
-	 _ssnop; _ssnop; _ssnop
-	)
-ASMMACRO(back_to_back_c0_hazard,
-	)
+#define __mtc0_tlbw_hazard
+
+#define __tlbw_use_hazard
+
+#define __tlb_probe_hazard
+
+#define __irq_enable_hazard
+
+#define __irq_disable_hazard						\
+	___ssnop;							\
+	___ssnop;							\
+	___ssnop
+
+#define __back_to_back_c0_hazard
+
 #define instruction_hazard() do { } while (0)
 
 #else
@@ -192,24 +191,35 @@
  * hazard so this is nice trick to have an optimal code for a range of
  * processors.
  */
-ASMMACRO(mtc0_tlbw_hazard,
-	nop; nop
-	)
-ASMMACRO(tlbw_use_hazard,
-	nop; nop; nop
-	)
-ASMMACRO(tlb_probe_hazard,
-	 nop; nop; nop
-	)
-ASMMACRO(irq_enable_hazard,
-	 _ssnop; _ssnop; _ssnop;
-	)
-ASMMACRO(irq_disable_hazard,
-	nop; nop; nop
-	)
-ASMMACRO(back_to_back_c0_hazard,
-	 _ssnop; _ssnop; _ssnop;
-	)
+#define __mtc0_tlbw_hazard						\
+	nop;								\
+	nop
+
+#define __tlbw_use_hazard						\
+	nop;								\
+	nop;								\
+	nop
+
+#define __tlb_probe_hazard						\
+	nop;								\
+	nop;								\
+	nop
+
+#define __irq_enable_hazard						\
+	___ssnop;							\
+	___ssnop;							\
+	___ssnop
+
+#define __irq_disable_hazard						\
+	nop;								\
+	nop;								\
+	nop
+
+#define __back_to_back_c0_hazard					\
+	___ssnop;							\
+	___ssnop;							\
+	___ssnop
+
 #define instruction_hazard() do { } while (0)
 
 #endif
@@ -218,32 +228,137 @@
 /* FPU hazards */
 
 #if defined(CONFIG_CPU_SB1)
-ASMMACRO(enable_fpu_hazard,
-	 .set	push;
-	 .set	mips64;
-	 .set	noreorder;
-	 _ssnop;
-	 bnezl	$0, .+4;
-	 _ssnop;
-	 .set	pop
-)
-ASMMACRO(disable_fpu_hazard,
-)
+
+#define __enable_fpu_hazard						\
+	.set	push;							\
+	.set	mips64;							\
+	.set	noreorder;						\
+	___ssnop;							\
+	bnezl	$0, .+4;						\
+	___ssnop;							\
+	.set	pop
+
+#define __disable_fpu_hazard
 
 #elif defined(CONFIG_CPU_MIPSR2)
-ASMMACRO(enable_fpu_hazard,
-	 _ehb
-)
-ASMMACRO(disable_fpu_hazard,
-	 _ehb
-)
+
+#define __enable_fpu_hazard						\
+	___ehb
+
+#define __disable_fpu_hazard						\
+	___ehb
+
 #else
-ASMMACRO(enable_fpu_hazard,
-	 nop; nop; nop; nop
-)
-ASMMACRO(disable_fpu_hazard,
-	 _ehb
-)
+
+#define __enable_fpu_hazard						\
+	nop;								\
+	nop;								\
+	nop;								\
+	nop
+
+#define __disable_fpu_hazard						\
+	___ehb
+
 #endif
 
+#ifdef __ASSEMBLY__
+
+#define _ssnop ___ssnop
+#define	_ehb ___ehb
+#define mtc0_tlbw_hazard __mtc0_tlbw_hazard
+#define tlbw_use_hazard __tlbw_use_hazard
+#define tlb_probe_hazard __tlb_probe_hazard
+#define irq_enable_hazard __irq_enable_hazard
+#define irq_disable_hazard __irq_disable_hazard
+#define back_to_back_c0_hazard __back_to_back_c0_hazard
+#define enable_fpu_hazard __enable_fpu_hazard
+#define disable_fpu_hazard __disable_fpu_hazard
+
+#else
+
+#define _ssnop()							\
+do {									\
+	__asm__ __volatile__(						\
+	__stringify(___ssnop)						\
+	);								\
+} while (0)
+
+#define	_ehb()								\
+do {									\
+	__asm__ __volatile__(						\
+	__stringify(___ehb)						\
+	);								\
+} while (0)
+
+
+#define mtc0_tlbw_hazard()						\
+do {									\
+	__asm__ __volatile__(						\
+	__stringify(__mtc0_tlbw_hazard)					\
+	);								\
+} while (0)
+
+
+#define tlbw_use_hazard()						\
+do {									\
+	__asm__ __volatile__(						\
+	__stringify(__tlbw_use_hazard)					\
+	);								\
+} while (0)
+
+
+#define tlb_probe_hazard()						\
+do {									\
+	__asm__ __volatile__(						\
+	__stringify(__tlb_probe_hazard)					\
+	);								\
+} while (0)
+
+
+#define irq_enable_hazard()						\
+do {									\
+	__asm__ __volatile__(						\
+	__stringify(__irq_enable_hazard)				\
+	);								\
+} while (0)
+
+
+#define irq_disable_hazard()						\
+do {									\
+	__asm__ __volatile__(						\
+	__stringify(__irq_disable_hazard)				\
+	);								\
+} while (0)
+
+
+#define back_to_back_c0_hazard() 					\
+do {									\
+	__asm__ __volatile__(						\
+	__stringify(__back_to_back_c0_hazard)				\
+	);								\
+} while (0)
+
+
+#define enable_fpu_hazard()						\
+do {									\
+	__asm__ __volatile__(						\
+	__stringify(__enable_fpu_hazard)				\
+	);								\
+} while (0)
+
+
+#define disable_fpu_hazard()						\
+do {									\
+	__asm__ __volatile__(						\
+	__stringify(__disable_fpu_hazard)				\
+	);								\
+} while (0)
+
+/*
+ * MIPS R2 instruction hazard barrier.   Needs to be called as a subroutine.
+ */
+extern void mips_ihb(void);
+
+#endif /* __ASSEMBLY__  */
+
 #endif /* _ASM_HAZARDS_H */
diff --git a/arch/mips/include/asm/inst.h b/arch/mips/include/asm/inst.h
index f1eadf7..22912f7 100644
--- a/arch/mips/include/asm/inst.h
+++ b/arch/mips/include/asm/inst.h
@@ -73,4 +73,16 @@
 
 typedef unsigned int mips_instruction;
 
+/* microMIPS instruction decode structure. Do NOT export!!! */
+struct mm_decoded_insn {
+	mips_instruction insn;
+	mips_instruction next_insn;
+	int pc_inc;
+	int next_pc_inc;
+	int micro_mips_mode;
+};
+
+/* Recode table from 16-bit register notation to 32-bit GPR. Do NOT export!!! */
+extern const int reg16to32[];
+
 #endif /* _ASM_INST_H */
diff --git a/arch/mips/include/asm/irqflags.h b/arch/mips/include/asm/irqflags.h
index 9f3384c..45c0095 100644
--- a/arch/mips/include/asm/irqflags.h
+++ b/arch/mips/include/asm/irqflags.h
@@ -14,53 +14,48 @@
 #ifndef __ASSEMBLY__
 
 #include <linux/compiler.h>
+#include <linux/stringify.h>
 #include <asm/hazards.h>
 
 #if defined(CONFIG_CPU_MIPSR2) && !defined(CONFIG_MIPS_MT_SMTC)
 
-__asm__(
-	"	.macro	arch_local_irq_disable\n"
-	"	.set	push						\n"
-	"	.set	noat						\n"
-	"	di							\n"
-	"	irq_disable_hazard					\n"
-	"	.set	pop						\n"
-	"	.endm							\n");
-
 static inline void arch_local_irq_disable(void)
 {
 	__asm__ __volatile__(
-		"arch_local_irq_disable"
-		: /* no outputs */
-		: /* no inputs */
-		: "memory");
-}
-
-
-__asm__(
-	"	.macro	arch_local_irq_save result			\n"
 	"	.set	push						\n"
-	"	.set	reorder						\n"
 	"	.set	noat						\n"
-	"	di	\\result					\n"
-	"	andi	\\result, 1					\n"
-	"	irq_disable_hazard					\n"
+	"	di							\n"
+	"	" __stringify(__irq_disable_hazard) "			\n"
 	"	.set	pop						\n"
-	"	.endm							\n");
+	: /* no outputs */
+	: /* no inputs */
+	: "memory");
+}
 
 static inline unsigned long arch_local_irq_save(void)
 {
 	unsigned long flags;
-	asm volatile("arch_local_irq_save\t%0"
-		     : "=r" (flags)
-		     : /* no inputs */
-		     : "memory");
+
+	asm __volatile__(
+	"	.set	push						\n"
+	"	.set	reorder						\n"
+	"	.set	noat						\n"
+	"	di	%[flags]					\n"
+	"	andi	%[flags], 1					\n"
+	"	" __stringify(__irq_disable_hazard) "			\n"
+	"	.set	pop						\n"
+	: [flags] "=r" (flags)
+	: /* no inputs */
+	: "memory");
+
 	return flags;
 }
 
+static inline void arch_local_irq_restore(unsigned long flags)
+{
+	unsigned long __tmp1;
 
-__asm__(
-	"	.macro	arch_local_irq_restore flags			\n"
+	__asm__ __volatile__(
 	"	.set	push						\n"
 	"	.set	noreorder					\n"
 	"	.set	noat						\n"
@@ -69,7 +64,7 @@
 	 * Slow, but doesn't suffer from a relatively unlikely race
 	 * condition we're having since days 1.
 	 */
-	"	beqz	\\flags, 1f					\n"
+	"	beqz	%[flags], 1f					\n"
 	"	di							\n"
 	"	ei							\n"
 	"1:								\n"
@@ -78,33 +73,44 @@
 	 * Fast, dangerous.  Life is fun, life is good.
 	 */
 	"	mfc0	$1, $12						\n"
-	"	ins	$1, \\flags, 0, 1				\n"
+	"	ins	$1, %[flags], 0, 1				\n"
 	"	mtc0	$1, $12						\n"
 #endif
-	"	irq_disable_hazard					\n"
+	"	" __stringify(__irq_disable_hazard) "			\n"
 	"	.set	pop						\n"
-	"	.endm							\n");
-
-static inline void arch_local_irq_restore(unsigned long flags)
-{
-	unsigned long __tmp1;
-
-	__asm__ __volatile__(
-		"arch_local_irq_restore\t%0"
-		: "=r" (__tmp1)
-		: "0" (flags)
-		: "memory");
+	: [flags] "=r" (__tmp1)
+	: "0" (flags)
+	: "memory");
 }
 
 static inline void __arch_local_irq_restore(unsigned long flags)
 {
-	unsigned long __tmp1;
-
 	__asm__ __volatile__(
-		"arch_local_irq_restore\t%0"
-		: "=r" (__tmp1)
-		: "0" (flags)
-		: "memory");
+	"	.set	push						\n"
+	"	.set	noreorder					\n"
+	"	.set	noat						\n"
+#if defined(CONFIG_IRQ_CPU)
+	/*
+	 * Slow, but doesn't suffer from a relatively unlikely race
+	 * condition we're having since days 1.
+	 */
+	"	beqz	%[flags], 1f					\n"
+	"	di							\n"
+	"	ei							\n"
+	"1:								\n"
+#else
+	/*
+	 * Fast, dangerous.  Life is fun, life is good.
+	 */
+	"	mfc0	$1, $12						\n"
+	"	ins	$1, %[flags], 0, 1				\n"
+	"	mtc0	$1, $12						\n"
+#endif
+	"	" __stringify(__irq_disable_hazard) "			\n"
+	"	.set	pop						\n"
+	: [flags] "=r" (flags)
+	: "0" (flags)
+	: "memory");
 }
 #else
 /* Functions that require preempt_{dis,en}able() are in mips-atomic.c */
@@ -115,8 +121,18 @@
 #endif /* if defined(CONFIG_CPU_MIPSR2) && !defined(CONFIG_MIPS_MT_SMTC) */
 
 
-__asm__(
-	"	.macro	arch_local_irq_enable				\n"
+extern void smtc_ipi_replay(void);
+
+static inline void arch_local_irq_enable(void)
+{
+#ifdef CONFIG_MIPS_MT_SMTC
+	/*
+	 * SMTC kernel needs to do a software replay of queued
+	 * IPIs, at the cost of call overhead on each local_irq_enable()
+	 */
+	smtc_ipi_replay();
+#endif
+	__asm__ __volatile__(
 	"	.set	push						\n"
 	"	.set	reorder						\n"
 	"	.set	noat						\n"
@@ -133,45 +149,28 @@
 	"	xori	$1,0x1e						\n"
 	"	mtc0	$1,$12						\n"
 #endif
-	"	irq_enable_hazard					\n"
+	"	" __stringify(__irq_enable_hazard) "			\n"
 	"	.set	pop						\n"
-	"	.endm");
-
-extern void smtc_ipi_replay(void);
-
-static inline void arch_local_irq_enable(void)
-{
-#ifdef CONFIG_MIPS_MT_SMTC
-	/*
-	 * SMTC kernel needs to do a software replay of queued
-	 * IPIs, at the cost of call overhead on each local_irq_enable()
-	 */
-	smtc_ipi_replay();
-#endif
-	__asm__ __volatile__(
-		"arch_local_irq_enable"
-		: /* no outputs */
-		: /* no inputs */
-		: "memory");
+	: /* no outputs */
+	: /* no inputs */
+	: "memory");
 }
 
-
-__asm__(
-	"	.macro	arch_local_save_flags flags			\n"
-	"	.set	push						\n"
-	"	.set	reorder						\n"
-#ifdef CONFIG_MIPS_MT_SMTC
-	"	mfc0	\\flags, $2, 1					\n"
-#else
-	"	mfc0	\\flags, $12					\n"
-#endif
-	"	.set	pop						\n"
-	"	.endm							\n");
-
 static inline unsigned long arch_local_save_flags(void)
 {
 	unsigned long flags;
-	asm volatile("arch_local_save_flags %0" : "=r" (flags));
+
+	asm __volatile__(
+	"	.set	push						\n"
+	"	.set	reorder						\n"
+#ifdef CONFIG_MIPS_MT_SMTC
+	"	mfc0	%[flags], $2, 1					\n"
+#else
+	"	mfc0	%[flags], $12					\n"
+#endif
+	"	.set	pop						\n"
+	: [flags] "=r" (flags));
+
 	return flags;
 }
 
diff --git a/arch/mips/include/asm/kvm.h b/arch/mips/include/asm/kvm.h
new file mode 100644
index 0000000..85789ea
--- /dev/null
+++ b/arch/mips/include/asm/kvm.h
@@ -0,0 +1,55 @@
+/*
+* This file is subject to the terms and conditions of the GNU General Public
+* License.  See the file "COPYING" in the main directory of this archive
+* for more details.
+*
+* Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
+* Authors: Sanjay Lal <sanjayl@kymasys.com>
+*/
+
+#ifndef __LINUX_KVM_MIPS_H
+#define __LINUX_KVM_MIPS_H
+
+#include <linux/types.h>
+
+#define __KVM_MIPS
+
+#define N_MIPS_COPROC_REGS      32
+#define N_MIPS_COPROC_SEL   	8
+
+/* for KVM_GET_REGS and KVM_SET_REGS */
+struct kvm_regs {
+	__u32 gprs[32];
+	__u32 hi;
+	__u32 lo;
+	__u32 pc;
+
+	__u32 cp0reg[N_MIPS_COPROC_REGS][N_MIPS_COPROC_SEL];
+};
+
+/* for KVM_GET_SREGS and KVM_SET_SREGS */
+struct kvm_sregs {
+};
+
+/* for KVM_GET_FPU and KVM_SET_FPU */
+struct kvm_fpu {
+};
+
+struct kvm_debug_exit_arch {
+};
+
+/* for KVM_SET_GUEST_DEBUG */
+struct kvm_guest_debug_arch {
+};
+
+struct kvm_mips_interrupt {
+	/* in */
+	__u32 cpu;
+	__u32 irq;
+};
+
+/* definition of registers in kvm_run */
+struct kvm_sync_regs {
+};
+
+#endif /* __LINUX_KVM_MIPS_H */
diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h
new file mode 100644
index 0000000..e68781e
--- /dev/null
+++ b/arch/mips/include/asm/kvm_host.h
@@ -0,0 +1,667 @@
+/*
+* This file is subject to the terms and conditions of the GNU General Public
+* License.  See the file "COPYING" in the main directory of this archive
+* for more details.
+*
+* Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
+* Authors: Sanjay Lal <sanjayl@kymasys.com>
+*/
+
+#ifndef __MIPS_KVM_HOST_H__
+#define __MIPS_KVM_HOST_H__
+
+#include <linux/mutex.h>
+#include <linux/hrtimer.h>
+#include <linux/interrupt.h>
+#include <linux/types.h>
+#include <linux/kvm.h>
+#include <linux/kvm_types.h>
+#include <linux/threads.h>
+#include <linux/spinlock.h>
+
+
+#define KVM_MAX_VCPUS		1
+#define KVM_USER_MEM_SLOTS	8
+/* memory slots that does not exposed to userspace */
+#define KVM_PRIVATE_MEM_SLOTS 	0
+
+#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
+
+/* Don't support huge pages */
+#define KVM_HPAGE_GFN_SHIFT(x)	0
+
+/* We don't currently support large pages. */
+#define KVM_NR_PAGE_SIZES	1
+#define KVM_PAGES_PER_HPAGE(x)	1
+
+
+
+/* Special address that contains the comm page, used for reducing # of traps */
+#define KVM_GUEST_COMMPAGE_ADDR     0x0
+
+#define KVM_GUEST_KERNEL_MODE(vcpu)	((kvm_read_c0_guest_status(vcpu->arch.cop0) & (ST0_EXL | ST0_ERL)) || \
+					((kvm_read_c0_guest_status(vcpu->arch.cop0) & KSU_USER) == 0))
+
+#define KVM_GUEST_KUSEG             0x00000000UL
+#define KVM_GUEST_KSEG0             0x40000000UL
+#define KVM_GUEST_KSEG23            0x60000000UL
+#define KVM_GUEST_KSEGX(a)          ((_ACAST32_(a)) & 0x60000000)
+#define KVM_GUEST_CPHYSADDR(a)      ((_ACAST32_(a)) & 0x1fffffff)
+
+#define KVM_GUEST_CKSEG0ADDR(a)		(KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG0)
+#define KVM_GUEST_CKSEG1ADDR(a)		(KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG1)
+#define KVM_GUEST_CKSEG23ADDR(a)	(KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG23)
+
+/*
+ * Map an address to a certain kernel segment
+ */
+#define KVM_GUEST_KSEG0ADDR(a)		(KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG0)
+#define KVM_GUEST_KSEG1ADDR(a)		(KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG1)
+#define KVM_GUEST_KSEG23ADDR(a)		(KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG23)
+
+#define KVM_INVALID_PAGE            0xdeadbeef
+#define KVM_INVALID_INST            0xdeadbeef
+#define KVM_INVALID_ADDR            0xdeadbeef
+
+#define KVM_MALTA_GUEST_RTC_ADDR    0xb8000070UL
+
+#define GUEST_TICKS_PER_JIFFY (40000000/HZ)
+#define MS_TO_NS(x) (x * 1E6L)
+
+#define CAUSEB_DC       27
+#define CAUSEF_DC       (_ULCAST_(1)   << 27)
+
+struct kvm;
+struct kvm_run;
+struct kvm_vcpu;
+struct kvm_interrupt;
+
+extern atomic_t kvm_mips_instance;
+extern pfn_t(*kvm_mips_gfn_to_pfn) (struct kvm *kvm, gfn_t gfn);
+extern void (*kvm_mips_release_pfn_clean) (pfn_t pfn);
+extern bool(*kvm_mips_is_error_pfn) (pfn_t pfn);
+
+struct kvm_vm_stat {
+	u32 remote_tlb_flush;
+};
+
+struct kvm_vcpu_stat {
+	u32 wait_exits;
+	u32 cache_exits;
+	u32 signal_exits;
+	u32 int_exits;
+	u32 cop_unusable_exits;
+	u32 tlbmod_exits;
+	u32 tlbmiss_ld_exits;
+	u32 tlbmiss_st_exits;
+	u32 addrerr_st_exits;
+	u32 addrerr_ld_exits;
+	u32 syscall_exits;
+	u32 resvd_inst_exits;
+	u32 break_inst_exits;
+	u32 flush_dcache_exits;
+	u32 halt_wakeup;
+};
+
+enum kvm_mips_exit_types {
+	WAIT_EXITS,
+	CACHE_EXITS,
+	SIGNAL_EXITS,
+	INT_EXITS,
+	COP_UNUSABLE_EXITS,
+	TLBMOD_EXITS,
+	TLBMISS_LD_EXITS,
+	TLBMISS_ST_EXITS,
+	ADDRERR_ST_EXITS,
+	ADDRERR_LD_EXITS,
+	SYSCALL_EXITS,
+	RESVD_INST_EXITS,
+	BREAK_INST_EXITS,
+	FLUSH_DCACHE_EXITS,
+	MAX_KVM_MIPS_EXIT_TYPES
+};
+
+struct kvm_arch_memory_slot {
+};
+
+struct kvm_arch {
+	/* Guest GVA->HPA page table */
+	unsigned long *guest_pmap;
+	unsigned long guest_pmap_npages;
+
+	/* Wired host TLB used for the commpage */
+	int commpage_tlb;
+};
+
+#define N_MIPS_COPROC_REGS      32
+#define N_MIPS_COPROC_SEL   	8
+
+struct mips_coproc {
+	unsigned long reg[N_MIPS_COPROC_REGS][N_MIPS_COPROC_SEL];
+#ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
+	unsigned long stat[N_MIPS_COPROC_REGS][N_MIPS_COPROC_SEL];
+#endif
+};
+
+/*
+ * Coprocessor 0 register names
+ */
+#define	MIPS_CP0_TLB_INDEX	    0
+#define	MIPS_CP0_TLB_RANDOM	    1
+#define	MIPS_CP0_TLB_LOW	    2
+#define	MIPS_CP0_TLB_LO0	    2
+#define	MIPS_CP0_TLB_LO1	    3
+#define	MIPS_CP0_TLB_CONTEXT	4
+#define	MIPS_CP0_TLB_PG_MASK	5
+#define	MIPS_CP0_TLB_WIRED	    6
+#define	MIPS_CP0_HWRENA 	    7
+#define	MIPS_CP0_BAD_VADDR	    8
+#define	MIPS_CP0_COUNT	        9
+#define	MIPS_CP0_TLB_HI	        10
+#define	MIPS_CP0_COMPARE	    11
+#define	MIPS_CP0_STATUS	        12
+#define	MIPS_CP0_CAUSE	        13
+#define	MIPS_CP0_EXC_PC	        14
+#define	MIPS_CP0_PRID		    15
+#define	MIPS_CP0_CONFIG	        16
+#define	MIPS_CP0_LLADDR	        17
+#define	MIPS_CP0_WATCH_LO	    18
+#define	MIPS_CP0_WATCH_HI	    19
+#define	MIPS_CP0_TLB_XCONTEXT   20
+#define	MIPS_CP0_ECC		    26
+#define	MIPS_CP0_CACHE_ERR	    27
+#define	MIPS_CP0_TAG_LO	        28
+#define	MIPS_CP0_TAG_HI	        29
+#define	MIPS_CP0_ERROR_PC	    30
+#define	MIPS_CP0_DEBUG	        23
+#define	MIPS_CP0_DEPC		    24
+#define	MIPS_CP0_PERFCNT	    25
+#define	MIPS_CP0_ERRCTL         26
+#define	MIPS_CP0_DATA_LO	    28
+#define	MIPS_CP0_DATA_HI	    29
+#define	MIPS_CP0_DESAVE	        31
+
+#define MIPS_CP0_CONFIG_SEL	    0
+#define MIPS_CP0_CONFIG1_SEL    1
+#define MIPS_CP0_CONFIG2_SEL    2
+#define MIPS_CP0_CONFIG3_SEL    3
+
+/* Config0 register bits */
+#define CP0C0_M    31
+#define CP0C0_K23  28
+#define CP0C0_KU   25
+#define CP0C0_MDU  20
+#define CP0C0_MM   17
+#define CP0C0_BM   16
+#define CP0C0_BE   15
+#define CP0C0_AT   13
+#define CP0C0_AR   10
+#define CP0C0_MT   7
+#define CP0C0_VI   3
+#define CP0C0_K0   0
+
+/* Config1 register bits */
+#define CP0C1_M    31
+#define CP0C1_MMU  25
+#define CP0C1_IS   22
+#define CP0C1_IL   19
+#define CP0C1_IA   16
+#define CP0C1_DS   13
+#define CP0C1_DL   10
+#define CP0C1_DA   7
+#define CP0C1_C2   6
+#define CP0C1_MD   5
+#define CP0C1_PC   4
+#define CP0C1_WR   3
+#define CP0C1_CA   2
+#define CP0C1_EP   1
+#define CP0C1_FP   0
+
+/* Config2 Register bits */
+#define CP0C2_M    31
+#define CP0C2_TU   28
+#define CP0C2_TS   24
+#define CP0C2_TL   20
+#define CP0C2_TA   16
+#define CP0C2_SU   12
+#define CP0C2_SS   8
+#define CP0C2_SL   4
+#define CP0C2_SA   0
+
+/* Config3 Register bits */
+#define CP0C3_M    31
+#define CP0C3_ISA_ON_EXC 16
+#define CP0C3_ULRI  13
+#define CP0C3_DSPP 10
+#define CP0C3_LPA  7
+#define CP0C3_VEIC 6
+#define CP0C3_VInt 5
+#define CP0C3_SP   4
+#define CP0C3_MT   2
+#define CP0C3_SM   1
+#define CP0C3_TL   0
+
+/* Have config1, Cacheable, noncoherent, write-back, write allocate*/
+#define MIPS_CONFIG0                                              \
+  ((1 << CP0C0_M) | (0x3 << CP0C0_K0))
+
+/* Have config2, no coprocessor2 attached, no MDMX support attached,
+   no performance counters, watch registers present,
+   no code compression, EJTAG present, no FPU, no watch registers */
+#define MIPS_CONFIG1                                              \
+((1 << CP0C1_M) |                                                 \
+ (0 << CP0C1_C2) | (0 << CP0C1_MD) | (0 << CP0C1_PC) |            \
+ (0 << CP0C1_WR) | (0 << CP0C1_CA) | (1 << CP0C1_EP) |            \
+ (0 << CP0C1_FP))
+
+/* Have config3, no tertiary/secondary caches implemented */
+#define MIPS_CONFIG2                                              \
+((1 << CP0C2_M))
+
+/* No config4, no DSP ASE, no large physaddr (PABITS),
+   no external interrupt controller, no vectored interrupts,
+   no 1kb pages, no SmartMIPS ASE, no trace logic */
+#define MIPS_CONFIG3                                              \
+((0 << CP0C3_M) | (0 << CP0C3_DSPP) | (0 << CP0C3_LPA) |          \
+ (0 << CP0C3_VEIC) | (0 << CP0C3_VInt) | (0 << CP0C3_SP) |        \
+ (0 << CP0C3_SM) | (0 << CP0C3_TL))
+
+/* MMU types, the first four entries have the same layout as the
+   CP0C0_MT field.  */
+enum mips_mmu_types {
+	MMU_TYPE_NONE,
+	MMU_TYPE_R4000,
+	MMU_TYPE_RESERVED,
+	MMU_TYPE_FMT,
+	MMU_TYPE_R3000,
+	MMU_TYPE_R6000,
+	MMU_TYPE_R8000
+};
+
+/*
+ * Trap codes
+ */
+#define T_INT           0	/* Interrupt pending */
+#define T_TLB_MOD       1	/* TLB modified fault */
+#define T_TLB_LD_MISS       2	/* TLB miss on load or ifetch */
+#define T_TLB_ST_MISS       3	/* TLB miss on a store */
+#define T_ADDR_ERR_LD       4	/* Address error on a load or ifetch */
+#define T_ADDR_ERR_ST       5	/* Address error on a store */
+#define T_BUS_ERR_IFETCH    6	/* Bus error on an ifetch */
+#define T_BUS_ERR_LD_ST     7	/* Bus error on a load or store */
+#define T_SYSCALL       8	/* System call */
+#define T_BREAK         9	/* Breakpoint */
+#define T_RES_INST      10	/* Reserved instruction exception */
+#define T_COP_UNUSABLE      11	/* Coprocessor unusable */
+#define T_OVFLOW        12	/* Arithmetic overflow */
+
+/*
+ * Trap definitions added for r4000 port.
+ */
+#define T_TRAP          13	/* Trap instruction */
+#define T_VCEI          14	/* Virtual coherency exception */
+#define T_FPE           15	/* Floating point exception */
+#define T_WATCH         23	/* Watch address reference */
+#define T_VCED          31	/* Virtual coherency data */
+
+/* Resume Flags */
+#define RESUME_FLAG_DR          (1<<0)	/* Reload guest nonvolatile state? */
+#define RESUME_FLAG_HOST        (1<<1)	/* Resume host? */
+
+#define RESUME_GUEST            0
+#define RESUME_GUEST_DR         RESUME_FLAG_DR
+#define RESUME_HOST             RESUME_FLAG_HOST
+
+enum emulation_result {
+	EMULATE_DONE,		/* no further processing */
+	EMULATE_DO_MMIO,	/* kvm_run filled with MMIO request */
+	EMULATE_FAIL,		/* can't emulate this instruction */
+	EMULATE_WAIT,		/* WAIT instruction */
+	EMULATE_PRIV_FAIL,
+};
+
+#define MIPS3_PG_G  0x00000001	/* Global; ignore ASID if in lo0 & lo1 */
+#define MIPS3_PG_V  0x00000002	/* Valid */
+#define MIPS3_PG_NV 0x00000000
+#define MIPS3_PG_D  0x00000004	/* Dirty */
+
+#define mips3_paddr_to_tlbpfn(x) \
+    (((unsigned long)(x) >> MIPS3_PG_SHIFT) & MIPS3_PG_FRAME)
+#define mips3_tlbpfn_to_paddr(x) \
+    ((unsigned long)((x) & MIPS3_PG_FRAME) << MIPS3_PG_SHIFT)
+
+#define MIPS3_PG_SHIFT      6
+#define MIPS3_PG_FRAME      0x3fffffc0
+
+#define VPN2_MASK           0xffffe000
+#define TLB_IS_GLOBAL(x)    (((x).tlb_lo0 & MIPS3_PG_G) && ((x).tlb_lo1 & MIPS3_PG_G))
+#define TLB_VPN2(x)         ((x).tlb_hi & VPN2_MASK)
+#define TLB_ASID(x)         (ASID_MASK((x).tlb_hi))
+#define TLB_IS_VALID(x, va) (((va) & (1 << PAGE_SHIFT)) ? ((x).tlb_lo1 & MIPS3_PG_V) : ((x).tlb_lo0 & MIPS3_PG_V))
+
+struct kvm_mips_tlb {
+	long tlb_mask;
+	long tlb_hi;
+	long tlb_lo0;
+	long tlb_lo1;
+};
+
+#define KVM_MIPS_GUEST_TLB_SIZE     64
+struct kvm_vcpu_arch {
+	void *host_ebase, *guest_ebase;
+	unsigned long host_stack;
+	unsigned long host_gp;
+
+	/* Host CP0 registers used when handling exits from guest */
+	unsigned long host_cp0_badvaddr;
+	unsigned long host_cp0_cause;
+	unsigned long host_cp0_epc;
+	unsigned long host_cp0_entryhi;
+	uint32_t guest_inst;
+
+	/* GPRS */
+	unsigned long gprs[32];
+	unsigned long hi;
+	unsigned long lo;
+	unsigned long pc;
+
+	/* FPU State */
+	struct mips_fpu_struct fpu;
+
+	/* COP0 State */
+	struct mips_coproc *cop0;
+
+	/* Host KSEG0 address of the EI/DI offset */
+	void *kseg0_commpage;
+
+	u32 io_gpr;		/* GPR used as IO source/target */
+
+	/* Used to calibrate the virutal count register for the guest */
+	int32_t host_cp0_count;
+
+	/* Bitmask of exceptions that are pending */
+	unsigned long pending_exceptions;
+
+	/* Bitmask of pending exceptions to be cleared */
+	unsigned long pending_exceptions_clr;
+
+	unsigned long pending_load_cause;
+
+	/* Save/Restore the entryhi register when are are preempted/scheduled back in */
+	unsigned long preempt_entryhi;
+
+	/* S/W Based TLB for guest */
+	struct kvm_mips_tlb guest_tlb[KVM_MIPS_GUEST_TLB_SIZE];
+
+	/* Cached guest kernel/user ASIDs */
+	uint32_t guest_user_asid[NR_CPUS];
+	uint32_t guest_kernel_asid[NR_CPUS];
+	struct mm_struct guest_kernel_mm, guest_user_mm;
+
+	struct kvm_mips_tlb shadow_tlb[NR_CPUS][KVM_MIPS_GUEST_TLB_SIZE];
+
+
+	struct hrtimer comparecount_timer;
+
+	int last_sched_cpu;
+
+	/* WAIT executed */
+	int wait;
+};
+
+
+#define kvm_read_c0_guest_index(cop0)               (cop0->reg[MIPS_CP0_TLB_INDEX][0])
+#define kvm_write_c0_guest_index(cop0, val)         (cop0->reg[MIPS_CP0_TLB_INDEX][0] = val)
+#define kvm_read_c0_guest_entrylo0(cop0)            (cop0->reg[MIPS_CP0_TLB_LO0][0])
+#define kvm_read_c0_guest_entrylo1(cop0)            (cop0->reg[MIPS_CP0_TLB_LO1][0])
+#define kvm_read_c0_guest_context(cop0)             (cop0->reg[MIPS_CP0_TLB_CONTEXT][0])
+#define kvm_write_c0_guest_context(cop0, val)       (cop0->reg[MIPS_CP0_TLB_CONTEXT][0] = (val))
+#define kvm_read_c0_guest_userlocal(cop0)           (cop0->reg[MIPS_CP0_TLB_CONTEXT][2])
+#define kvm_read_c0_guest_pagemask(cop0)            (cop0->reg[MIPS_CP0_TLB_PG_MASK][0])
+#define kvm_write_c0_guest_pagemask(cop0, val)      (cop0->reg[MIPS_CP0_TLB_PG_MASK][0] = (val))
+#define kvm_read_c0_guest_wired(cop0)               (cop0->reg[MIPS_CP0_TLB_WIRED][0])
+#define kvm_write_c0_guest_wired(cop0, val)         (cop0->reg[MIPS_CP0_TLB_WIRED][0] = (val))
+#define kvm_read_c0_guest_badvaddr(cop0)            (cop0->reg[MIPS_CP0_BAD_VADDR][0])
+#define kvm_write_c0_guest_badvaddr(cop0, val)      (cop0->reg[MIPS_CP0_BAD_VADDR][0] = (val))
+#define kvm_read_c0_guest_count(cop0)               (cop0->reg[MIPS_CP0_COUNT][0])
+#define kvm_write_c0_guest_count(cop0, val)         (cop0->reg[MIPS_CP0_COUNT][0] = (val))
+#define kvm_read_c0_guest_entryhi(cop0)             (cop0->reg[MIPS_CP0_TLB_HI][0])
+#define kvm_write_c0_guest_entryhi(cop0, val)       (cop0->reg[MIPS_CP0_TLB_HI][0] = (val))
+#define kvm_read_c0_guest_compare(cop0)             (cop0->reg[MIPS_CP0_COMPARE][0])
+#define kvm_write_c0_guest_compare(cop0, val)       (cop0->reg[MIPS_CP0_COMPARE][0] = (val))
+#define kvm_read_c0_guest_status(cop0)              (cop0->reg[MIPS_CP0_STATUS][0])
+#define kvm_write_c0_guest_status(cop0, val)        (cop0->reg[MIPS_CP0_STATUS][0] = (val))
+#define kvm_read_c0_guest_intctl(cop0)              (cop0->reg[MIPS_CP0_STATUS][1])
+#define kvm_write_c0_guest_intctl(cop0, val)        (cop0->reg[MIPS_CP0_STATUS][1] = (val))
+#define kvm_read_c0_guest_cause(cop0)               (cop0->reg[MIPS_CP0_CAUSE][0])
+#define kvm_write_c0_guest_cause(cop0, val)         (cop0->reg[MIPS_CP0_CAUSE][0] = (val))
+#define kvm_read_c0_guest_epc(cop0)                 (cop0->reg[MIPS_CP0_EXC_PC][0])
+#define kvm_write_c0_guest_epc(cop0, val)           (cop0->reg[MIPS_CP0_EXC_PC][0] = (val))
+#define kvm_read_c0_guest_prid(cop0)                (cop0->reg[MIPS_CP0_PRID][0])
+#define kvm_write_c0_guest_prid(cop0, val)          (cop0->reg[MIPS_CP0_PRID][0] = (val))
+#define kvm_read_c0_guest_ebase(cop0)               (cop0->reg[MIPS_CP0_PRID][1])
+#define kvm_write_c0_guest_ebase(cop0, val)         (cop0->reg[MIPS_CP0_PRID][1] = (val))
+#define kvm_read_c0_guest_config(cop0)              (cop0->reg[MIPS_CP0_CONFIG][0])
+#define kvm_read_c0_guest_config1(cop0)             (cop0->reg[MIPS_CP0_CONFIG][1])
+#define kvm_read_c0_guest_config2(cop0)             (cop0->reg[MIPS_CP0_CONFIG][2])
+#define kvm_read_c0_guest_config3(cop0)             (cop0->reg[MIPS_CP0_CONFIG][3])
+#define kvm_read_c0_guest_config7(cop0)             (cop0->reg[MIPS_CP0_CONFIG][7])
+#define kvm_write_c0_guest_config(cop0, val)        (cop0->reg[MIPS_CP0_CONFIG][0] = (val))
+#define kvm_write_c0_guest_config1(cop0, val)       (cop0->reg[MIPS_CP0_CONFIG][1] = (val))
+#define kvm_write_c0_guest_config2(cop0, val)       (cop0->reg[MIPS_CP0_CONFIG][2] = (val))
+#define kvm_write_c0_guest_config3(cop0, val)       (cop0->reg[MIPS_CP0_CONFIG][3] = (val))
+#define kvm_write_c0_guest_config7(cop0, val)       (cop0->reg[MIPS_CP0_CONFIG][7] = (val))
+#define kvm_read_c0_guest_errorepc(cop0)            (cop0->reg[MIPS_CP0_ERROR_PC][0])
+#define kvm_write_c0_guest_errorepc(cop0, val)      (cop0->reg[MIPS_CP0_ERROR_PC][0] = (val))
+
+#define kvm_set_c0_guest_status(cop0, val)          (cop0->reg[MIPS_CP0_STATUS][0] |= (val))
+#define kvm_clear_c0_guest_status(cop0, val)        (cop0->reg[MIPS_CP0_STATUS][0] &= ~(val))
+#define kvm_set_c0_guest_cause(cop0, val)           (cop0->reg[MIPS_CP0_CAUSE][0] |= (val))
+#define kvm_clear_c0_guest_cause(cop0, val)         (cop0->reg[MIPS_CP0_CAUSE][0] &= ~(val))
+#define kvm_change_c0_guest_cause(cop0, change, val)  \
+{                                                     \
+    kvm_clear_c0_guest_cause(cop0, change);           \
+    kvm_set_c0_guest_cause(cop0, ((val) & (change))); \
+}
+#define kvm_set_c0_guest_ebase(cop0, val)           (cop0->reg[MIPS_CP0_PRID][1] |= (val))
+#define kvm_clear_c0_guest_ebase(cop0, val)         (cop0->reg[MIPS_CP0_PRID][1] &= ~(val))
+#define kvm_change_c0_guest_ebase(cop0, change, val)  \
+{                                                     \
+    kvm_clear_c0_guest_ebase(cop0, change);           \
+    kvm_set_c0_guest_ebase(cop0, ((val) & (change))); \
+}
+
+
+struct kvm_mips_callbacks {
+	int (*handle_cop_unusable) (struct kvm_vcpu *vcpu);
+	int (*handle_tlb_mod) (struct kvm_vcpu *vcpu);
+	int (*handle_tlb_ld_miss) (struct kvm_vcpu *vcpu);
+	int (*handle_tlb_st_miss) (struct kvm_vcpu *vcpu);
+	int (*handle_addr_err_st) (struct kvm_vcpu *vcpu);
+	int (*handle_addr_err_ld) (struct kvm_vcpu *vcpu);
+	int (*handle_syscall) (struct kvm_vcpu *vcpu);
+	int (*handle_res_inst) (struct kvm_vcpu *vcpu);
+	int (*handle_break) (struct kvm_vcpu *vcpu);
+	int (*vm_init) (struct kvm *kvm);
+	int (*vcpu_init) (struct kvm_vcpu *vcpu);
+	int (*vcpu_setup) (struct kvm_vcpu *vcpu);
+	 gpa_t(*gva_to_gpa) (gva_t gva);
+	void (*queue_timer_int) (struct kvm_vcpu *vcpu);
+	void (*dequeue_timer_int) (struct kvm_vcpu *vcpu);
+	void (*queue_io_int) (struct kvm_vcpu *vcpu,
+			      struct kvm_mips_interrupt *irq);
+	void (*dequeue_io_int) (struct kvm_vcpu *vcpu,
+				struct kvm_mips_interrupt *irq);
+	int (*irq_deliver) (struct kvm_vcpu *vcpu, unsigned int priority,
+			    uint32_t cause);
+	int (*irq_clear) (struct kvm_vcpu *vcpu, unsigned int priority,
+			  uint32_t cause);
+	int (*vcpu_ioctl_get_regs) (struct kvm_vcpu *vcpu,
+				    struct kvm_regs *regs);
+	int (*vcpu_ioctl_set_regs) (struct kvm_vcpu *vcpu,
+				    struct kvm_regs *regs);
+};
+extern struct kvm_mips_callbacks *kvm_mips_callbacks;
+int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks);
+
+/* Debug: dump vcpu state */
+int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu);
+
+/* Trampoline ASM routine to start running in "Guest" context */
+extern int __kvm_mips_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu);
+
+/* TLB handling */
+uint32_t kvm_get_kernel_asid(struct kvm_vcpu *vcpu);
+
+uint32_t kvm_get_user_asid(struct kvm_vcpu *vcpu);
+
+uint32_t kvm_get_commpage_asid (struct kvm_vcpu *vcpu);
+
+extern int kvm_mips_handle_kseg0_tlb_fault(unsigned long badbaddr,
+					   struct kvm_vcpu *vcpu);
+
+extern int kvm_mips_handle_commpage_tlb_fault(unsigned long badvaddr,
+					      struct kvm_vcpu *vcpu);
+
+extern int kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu,
+						struct kvm_mips_tlb *tlb,
+						unsigned long *hpa0,
+						unsigned long *hpa1);
+
+extern enum emulation_result kvm_mips_handle_tlbmiss(unsigned long cause,
+						     uint32_t *opc,
+						     struct kvm_run *run,
+						     struct kvm_vcpu *vcpu);
+
+extern enum emulation_result kvm_mips_handle_tlbmod(unsigned long cause,
+						    uint32_t *opc,
+						    struct kvm_run *run,
+						    struct kvm_vcpu *vcpu);
+
+extern void kvm_mips_dump_host_tlbs(void);
+extern void kvm_mips_dump_guest_tlbs(struct kvm_vcpu *vcpu);
+extern void kvm_mips_dump_shadow_tlbs(struct kvm_vcpu *vcpu);
+extern void kvm_mips_flush_host_tlb(int skip_kseg0);
+extern int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long entryhi);
+extern int kvm_mips_host_tlb_inv_index(struct kvm_vcpu *vcpu, int index);
+
+extern int kvm_mips_guest_tlb_lookup(struct kvm_vcpu *vcpu,
+				     unsigned long entryhi);
+extern int kvm_mips_host_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long vaddr);
+extern unsigned long kvm_mips_translate_guest_kseg0_to_hpa(struct kvm_vcpu *vcpu,
+						   unsigned long gva);
+extern void kvm_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu,
+				    struct kvm_vcpu *vcpu);
+extern void kvm_shadow_tlb_put(struct kvm_vcpu *vcpu);
+extern void kvm_shadow_tlb_load(struct kvm_vcpu *vcpu);
+extern void kvm_local_flush_tlb_all(void);
+extern void kvm_mips_init_shadow_tlb(struct kvm_vcpu *vcpu);
+extern void kvm_mips_alloc_new_mmu_context(struct kvm_vcpu *vcpu);
+extern void kvm_mips_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
+extern void kvm_mips_vcpu_put(struct kvm_vcpu *vcpu);
+
+/* Emulation */
+uint32_t kvm_get_inst(uint32_t *opc, struct kvm_vcpu *vcpu);
+enum emulation_result update_pc(struct kvm_vcpu *vcpu, uint32_t cause);
+
+extern enum emulation_result kvm_mips_emulate_inst(unsigned long cause,
+						   uint32_t *opc,
+						   struct kvm_run *run,
+						   struct kvm_vcpu *vcpu);
+
+extern enum emulation_result kvm_mips_emulate_syscall(unsigned long cause,
+						      uint32_t *opc,
+						      struct kvm_run *run,
+						      struct kvm_vcpu *vcpu);
+
+extern enum emulation_result kvm_mips_emulate_tlbmiss_ld(unsigned long cause,
+							 uint32_t *opc,
+							 struct kvm_run *run,
+							 struct kvm_vcpu *vcpu);
+
+extern enum emulation_result kvm_mips_emulate_tlbinv_ld(unsigned long cause,
+							uint32_t *opc,
+							struct kvm_run *run,
+							struct kvm_vcpu *vcpu);
+
+extern enum emulation_result kvm_mips_emulate_tlbmiss_st(unsigned long cause,
+							 uint32_t *opc,
+							 struct kvm_run *run,
+							 struct kvm_vcpu *vcpu);
+
+extern enum emulation_result kvm_mips_emulate_tlbinv_st(unsigned long cause,
+							uint32_t *opc,
+							struct kvm_run *run,
+							struct kvm_vcpu *vcpu);
+
+extern enum emulation_result kvm_mips_emulate_tlbmod(unsigned long cause,
+						     uint32_t *opc,
+						     struct kvm_run *run,
+						     struct kvm_vcpu *vcpu);
+
+extern enum emulation_result kvm_mips_emulate_fpu_exc(unsigned long cause,
+						      uint32_t *opc,
+						      struct kvm_run *run,
+						      struct kvm_vcpu *vcpu);
+
+extern enum emulation_result kvm_mips_handle_ri(unsigned long cause,
+						uint32_t *opc,
+						struct kvm_run *run,
+						struct kvm_vcpu *vcpu);
+
+extern enum emulation_result kvm_mips_emulate_ri_exc(unsigned long cause,
+						     uint32_t *opc,
+						     struct kvm_run *run,
+						     struct kvm_vcpu *vcpu);
+
+extern enum emulation_result kvm_mips_emulate_bp_exc(unsigned long cause,
+						     uint32_t *opc,
+						     struct kvm_run *run,
+						     struct kvm_vcpu *vcpu);
+
+extern enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu,
+							 struct kvm_run *run);
+
+enum emulation_result kvm_mips_emulate_count(struct kvm_vcpu *vcpu);
+
+enum emulation_result kvm_mips_check_privilege(unsigned long cause,
+					       uint32_t *opc,
+					       struct kvm_run *run,
+					       struct kvm_vcpu *vcpu);
+
+enum emulation_result kvm_mips_emulate_cache(uint32_t inst,
+					     uint32_t *opc,
+					     uint32_t cause,
+					     struct kvm_run *run,
+					     struct kvm_vcpu *vcpu);
+enum emulation_result kvm_mips_emulate_CP0(uint32_t inst,
+					   uint32_t *opc,
+					   uint32_t cause,
+					   struct kvm_run *run,
+					   struct kvm_vcpu *vcpu);
+enum emulation_result kvm_mips_emulate_store(uint32_t inst,
+					     uint32_t cause,
+					     struct kvm_run *run,
+					     struct kvm_vcpu *vcpu);
+enum emulation_result kvm_mips_emulate_load(uint32_t inst,
+					    uint32_t cause,
+					    struct kvm_run *run,
+					    struct kvm_vcpu *vcpu);
+
+/* Dynamic binary translation */
+extern int kvm_mips_trans_cache_index(uint32_t inst, uint32_t *opc,
+				      struct kvm_vcpu *vcpu);
+extern int kvm_mips_trans_cache_va(uint32_t inst, uint32_t *opc,
+				   struct kvm_vcpu *vcpu);
+extern int kvm_mips_trans_mfc0(uint32_t inst, uint32_t *opc,
+			       struct kvm_vcpu *vcpu);
+extern int kvm_mips_trans_mtc0(uint32_t inst, uint32_t *opc,
+			       struct kvm_vcpu *vcpu);
+
+/* Misc */
+extern void mips32_SyncICache(unsigned long addr, unsigned long size);
+extern int kvm_mips_dump_stats(struct kvm_vcpu *vcpu);
+extern unsigned long kvm_mips_get_ramsize(struct kvm *kvm);
+
+
+#endif /* __MIPS_KVM_HOST_H__ */
diff --git a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_clk.h b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_clk.h
deleted file mode 100644
index 8fcf8df..0000000
--- a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_clk.h
+++ /dev/null
@@ -1,11 +0,0 @@
-#ifndef BCM63XX_CLK_H_
-#define BCM63XX_CLK_H_
-
-struct clk {
-	void		(*set)(struct clk *, int);
-	unsigned int	rate;
-	unsigned int	usage;
-	int		id;
-};
-
-#endif /* ! BCM63XX_CLK_H_ */
diff --git a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_cpu.h b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_cpu.h
index cb922b9..3362289 100644
--- a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_cpu.h
+++ b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_cpu.h
@@ -14,11 +14,12 @@
 #define BCM6345_CPU_ID		0x6345
 #define BCM6348_CPU_ID		0x6348
 #define BCM6358_CPU_ID		0x6358
+#define BCM6362_CPU_ID		0x6362
 #define BCM6368_CPU_ID		0x6368
 
 void __init bcm63xx_cpu_init(void);
 u16 __bcm63xx_get_cpu_id(void);
-u16 bcm63xx_get_cpu_rev(void);
+u8 bcm63xx_get_cpu_rev(void);
 unsigned int bcm63xx_get_cpu_freq(void);
 
 #ifdef CONFIG_BCM63XX_CPU_6328
@@ -86,6 +87,20 @@
 # define BCMCPU_IS_6358()	(0)
 #endif
 
+#ifdef CONFIG_BCM63XX_CPU_6362
+# ifdef bcm63xx_get_cpu_id
+#  undef bcm63xx_get_cpu_id
+#  define bcm63xx_get_cpu_id()	__bcm63xx_get_cpu_id()
+#  define BCMCPU_RUNTIME_DETECT
+# else
+#  define bcm63xx_get_cpu_id()	BCM6362_CPU_ID
+# endif
+# define BCMCPU_IS_6362()	(bcm63xx_get_cpu_id() == BCM6362_CPU_ID)
+#else
+# define BCMCPU_IS_6362()	(0)
+#endif
+
+
 #ifdef CONFIG_BCM63XX_CPU_6368
 # ifdef bcm63xx_get_cpu_id
 #  undef bcm63xx_get_cpu_id
@@ -406,6 +421,62 @@
 
 
 /*
+ * 6362 register sets base address
+ */
+#define BCM_6362_DSL_LMEM_BASE		(0xdeadbeef)
+#define BCM_6362_PERF_BASE		(0xb0000000)
+#define BCM_6362_TIMER_BASE		(0xb0000040)
+#define BCM_6362_WDT_BASE		(0xb000005c)
+#define BCM_6362_UART0_BASE             (0xb0000100)
+#define BCM_6362_UART1_BASE		(0xb0000120)
+#define BCM_6362_GPIO_BASE		(0xb0000080)
+#define BCM_6362_SPI_BASE		(0xb0000800)
+#define BCM_6362_HSSPI_BASE		(0xb0001000)
+#define BCM_6362_UDC0_BASE		(0xdeadbeef)
+#define BCM_6362_USBDMA_BASE		(0xb000c000)
+#define BCM_6362_OHCI0_BASE		(0xb0002600)
+#define BCM_6362_OHCI_PRIV_BASE		(0xdeadbeef)
+#define BCM_6362_USBH_PRIV_BASE		(0xb0002700)
+#define BCM_6362_USBD_BASE		(0xb0002400)
+#define BCM_6362_MPI_BASE		(0xdeadbeef)
+#define BCM_6362_PCMCIA_BASE		(0xdeadbeef)
+#define BCM_6362_PCIE_BASE		(0xb0e40000)
+#define BCM_6362_SDRAM_REGS_BASE	(0xdeadbeef)
+#define BCM_6362_DSL_BASE		(0xdeadbeef)
+#define BCM_6362_UBUS_BASE		(0xdeadbeef)
+#define BCM_6362_ENET0_BASE		(0xdeadbeef)
+#define BCM_6362_ENET1_BASE		(0xdeadbeef)
+#define BCM_6362_ENETDMA_BASE		(0xb000d800)
+#define BCM_6362_ENETDMAC_BASE		(0xb000da00)
+#define BCM_6362_ENETDMAS_BASE		(0xb000dc00)
+#define BCM_6362_ENETSW_BASE		(0xb0e00000)
+#define BCM_6362_EHCI0_BASE		(0xb0002500)
+#define BCM_6362_SDRAM_BASE		(0xdeadbeef)
+#define BCM_6362_MEMC_BASE		(0xdeadbeef)
+#define BCM_6362_DDR_BASE		(0xb0003000)
+#define BCM_6362_M2M_BASE		(0xdeadbeef)
+#define BCM_6362_ATM_BASE		(0xdeadbeef)
+#define BCM_6362_XTM_BASE		(0xb0007800)
+#define BCM_6362_XTMDMA_BASE		(0xb000b800)
+#define BCM_6362_XTMDMAC_BASE		(0xdeadbeef)
+#define BCM_6362_XTMDMAS_BASE		(0xdeadbeef)
+#define BCM_6362_PCM_BASE		(0xb000a800)
+#define BCM_6362_PCMDMA_BASE		(0xdeadbeef)
+#define BCM_6362_PCMDMAC_BASE		(0xdeadbeef)
+#define BCM_6362_PCMDMAS_BASE		(0xdeadbeef)
+#define BCM_6362_RNG_BASE		(0xdeadbeef)
+#define BCM_6362_MISC_BASE		(0xb0001800)
+
+#define BCM_6362_NAND_REG_BASE		(0xb0000200)
+#define BCM_6362_NAND_CACHE_BASE	(0xb0000600)
+#define BCM_6362_LED_BASE		(0xb0001900)
+#define BCM_6362_IPSEC_BASE		(0xb0002800)
+#define BCM_6362_IPSEC_DMA_BASE		(0xb000d000)
+#define BCM_6362_WLAN_CHIPCOMMON_BASE	(0xb0004000)
+#define BCM_6362_WLAN_D11_BASE		(0xb0005000)
+#define BCM_6362_WLAN_SHIM_BASE		(0xb0007000)
+
+/*
  * 6368 register sets base address
  */
 #define BCM_6368_DSL_LMEM_BASE		(0xdeadbeef)
@@ -564,6 +635,9 @@
 #ifdef CONFIG_BCM63XX_CPU_6358
 	__GEN_RSET(6358)
 #endif
+#ifdef CONFIG_BCM63XX_CPU_6362
+	__GEN_RSET(6362)
+#endif
 #ifdef CONFIG_BCM63XX_CPU_6368
 	__GEN_RSET(6368)
 #endif
@@ -820,6 +894,71 @@
 #define BCM_6358_EXT_IRQ3		(IRQ_INTERNAL_BASE + 28)
 
 /*
+ * 6362 irqs
+ */
+#define BCM_6362_HIGH_IRQ_BASE		(IRQ_INTERNAL_BASE + 32)
+
+#define BCM_6362_TIMER_IRQ		(IRQ_INTERNAL_BASE + 0)
+#define BCM_6362_SPI_IRQ		(IRQ_INTERNAL_BASE + 2)
+#define BCM_6362_UART0_IRQ		(IRQ_INTERNAL_BASE + 3)
+#define BCM_6362_UART1_IRQ		(IRQ_INTERNAL_BASE + 4)
+#define BCM_6362_DSL_IRQ		(IRQ_INTERNAL_BASE + 28)
+#define BCM_6362_UDC0_IRQ		0
+#define BCM_6362_ENET0_IRQ		0
+#define BCM_6362_ENET1_IRQ		0
+#define BCM_6362_ENET_PHY_IRQ		(IRQ_INTERNAL_BASE + 14)
+#define BCM_6362_HSSPI_IRQ		(IRQ_INTERNAL_BASE + 5)
+#define BCM_6362_OHCI0_IRQ		(IRQ_INTERNAL_BASE + 9)
+#define BCM_6362_EHCI0_IRQ		(IRQ_INTERNAL_BASE + 10)
+#define BCM_6362_USBD_IRQ		(IRQ_INTERNAL_BASE + 11)
+#define BCM_6362_USBD_RXDMA0_IRQ	(IRQ_INTERNAL_BASE + 20)
+#define BCM_6362_USBD_TXDMA0_IRQ	(IRQ_INTERNAL_BASE + 21)
+#define BCM_6362_USBD_RXDMA1_IRQ	(IRQ_INTERNAL_BASE + 22)
+#define BCM_6362_USBD_TXDMA1_IRQ	(IRQ_INTERNAL_BASE + 23)
+#define BCM_6362_USBD_RXDMA2_IRQ	(IRQ_INTERNAL_BASE + 24)
+#define BCM_6362_USBD_TXDMA2_IRQ	(IRQ_INTERNAL_BASE + 25)
+#define BCM_6362_PCMCIA_IRQ		0
+#define BCM_6362_ENET0_RXDMA_IRQ	0
+#define BCM_6362_ENET0_TXDMA_IRQ	0
+#define BCM_6362_ENET1_RXDMA_IRQ	0
+#define BCM_6362_ENET1_TXDMA_IRQ	0
+#define BCM_6362_PCI_IRQ		(IRQ_INTERNAL_BASE + 30)
+#define BCM_6362_ATM_IRQ		0
+#define BCM_6362_ENETSW_RXDMA0_IRQ	(BCM_6362_HIGH_IRQ_BASE + 0)
+#define BCM_6362_ENETSW_RXDMA1_IRQ	(BCM_6362_HIGH_IRQ_BASE + 1)
+#define BCM_6362_ENETSW_RXDMA2_IRQ	(BCM_6362_HIGH_IRQ_BASE + 2)
+#define BCM_6362_ENETSW_RXDMA3_IRQ	(BCM_6362_HIGH_IRQ_BASE + 3)
+#define BCM_6362_ENETSW_TXDMA0_IRQ	0
+#define BCM_6362_ENETSW_TXDMA1_IRQ	0
+#define BCM_6362_ENETSW_TXDMA2_IRQ	0
+#define BCM_6362_ENETSW_TXDMA3_IRQ	0
+#define BCM_6362_XTM_IRQ		0
+#define BCM_6362_XTM_DMA0_IRQ		(BCM_6362_HIGH_IRQ_BASE + 12)
+
+#define BCM_6362_RING_OSC_IRQ		(IRQ_INTERNAL_BASE + 1)
+#define BCM_6362_WLAN_GPIO_IRQ		(IRQ_INTERNAL_BASE + 6)
+#define BCM_6362_WLAN_IRQ		(IRQ_INTERNAL_BASE + 7)
+#define BCM_6362_IPSEC_IRQ		(IRQ_INTERNAL_BASE + 8)
+#define BCM_6362_NAND_IRQ		(IRQ_INTERNAL_BASE + 12)
+#define BCM_6362_PCM_IRQ		(IRQ_INTERNAL_BASE + 13)
+#define BCM_6362_DG_IRQ			(IRQ_INTERNAL_BASE + 15)
+#define BCM_6362_EPHY_ENERGY0_IRQ	(IRQ_INTERNAL_BASE + 16)
+#define BCM_6362_EPHY_ENERGY1_IRQ	(IRQ_INTERNAL_BASE + 17)
+#define BCM_6362_EPHY_ENERGY2_IRQ	(IRQ_INTERNAL_BASE + 18)
+#define BCM_6362_EPHY_ENERGY3_IRQ	(IRQ_INTERNAL_BASE + 19)
+#define BCM_6362_IPSEC_DMA0_IRQ		(IRQ_INTERNAL_BASE + 26)
+#define BCM_6362_IPSEC_DMA1_IRQ		(IRQ_INTERNAL_BASE + 27)
+#define BCM_6362_FAP0_IRQ		(IRQ_INTERNAL_BASE + 29)
+#define BCM_6362_PCM_DMA0_IRQ		(BCM_6362_HIGH_IRQ_BASE + 4)
+#define BCM_6362_PCM_DMA1_IRQ		(BCM_6362_HIGH_IRQ_BASE + 5)
+#define BCM_6362_DECT0_IRQ		(BCM_6362_HIGH_IRQ_BASE + 6)
+#define BCM_6362_DECT1_IRQ		(BCM_6362_HIGH_IRQ_BASE + 7)
+#define BCM_6362_EXT_IRQ0		(BCM_6362_HIGH_IRQ_BASE + 8)
+#define BCM_6362_EXT_IRQ1		(BCM_6362_HIGH_IRQ_BASE + 9)
+#define BCM_6362_EXT_IRQ2		(BCM_6362_HIGH_IRQ_BASE + 10)
+#define BCM_6362_EXT_IRQ3		(BCM_6362_HIGH_IRQ_BASE + 11)
+
+/*
  * 6368 irqs
  */
 #define BCM_6368_HIGH_IRQ_BASE		(IRQ_INTERNAL_BASE + 32)
diff --git a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_spi.h b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_spi.h
index b0184cf..c426cab 100644
--- a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_spi.h
+++ b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_spi.h
@@ -71,18 +71,13 @@
 
 	return bcm63xx_regs_spi[reg];
 #else
-#ifdef CONFIG_BCM63XX_CPU_6338
-	__GEN_SPI_RSET(6338)
-#endif
-#ifdef CONFIG_BCM63XX_CPU_6348
+#if defined(CONFIG_BCM63XX_CPU_6338) || defined(CONFIG_BCM63XX_CPU_6348)
 	__GEN_SPI_RSET(6348)
 #endif
-#ifdef CONFIG_BCM63XX_CPU_6358
+#if defined(CONFIG_BCM63XX_CPU_6358) || defined(CONFIG_BCM63XX_CPU_6362) || \
+	defined(CONFIG_BCM63XX_CPU_6368)
 	__GEN_SPI_RSET(6358)
 #endif
-#ifdef CONFIG_BCM63XX_CPU_6368
-	__GEN_SPI_RSET(6368)
-#endif
 #endif
 	return 0;
 }
diff --git a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_gpio.h b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_gpio.h
index 0a9891f..35baa1a 100644
--- a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_gpio.h
+++ b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_gpio.h
@@ -17,6 +17,8 @@
 		return 8;
 	case BCM6345_CPU_ID:
 		return 16;
+	case BCM6362_CPU_ID:
+		return 48;
 	case BCM6368_CPU_ID:
 		return 38;
 	case BCM6348_CPU_ID:
diff --git a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_regs.h b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_regs.h
index 81b4702..3203fe4 100644
--- a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_regs.h
+++ b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_regs.h
@@ -10,7 +10,7 @@
 #define REV_CHIPID_SHIFT		16
 #define REV_CHIPID_MASK			(0xffff << REV_CHIPID_SHIFT)
 #define REV_REVID_SHIFT			0
-#define REV_REVID_MASK			(0xffff << REV_REVID_SHIFT)
+#define REV_REVID_MASK			(0xff << REV_REVID_SHIFT)
 
 /* Clock Control register */
 #define PERF_CKCTL_REG			0x4
@@ -112,6 +112,39 @@
 					CKCTL_6358_USBSU_EN |		\
 					CKCTL_6358_EPHY_EN)
 
+#define CKCTL_6362_ADSL_QPROC_EN	(1 << 1)
+#define CKCTL_6362_ADSL_AFE_EN		(1 << 2)
+#define CKCTL_6362_ADSL_EN		(1 << 3)
+#define CKCTL_6362_MIPS_EN		(1 << 4)
+#define CKCTL_6362_WLAN_OCP_EN		(1 << 5)
+#define CKCTL_6362_SWPKT_USB_EN		(1 << 7)
+#define CKCTL_6362_SWPKT_SAR_EN		(1 << 8)
+#define CKCTL_6362_SAR_EN		(1 << 9)
+#define CKCTL_6362_ROBOSW_EN		(1 << 10)
+#define CKCTL_6362_PCM_EN		(1 << 11)
+#define CKCTL_6362_USBD_EN		(1 << 12)
+#define CKCTL_6362_USBH_EN		(1 << 13)
+#define CKCTL_6362_IPSEC_EN		(1 << 14)
+#define CKCTL_6362_SPI_EN		(1 << 15)
+#define CKCTL_6362_HSSPI_EN		(1 << 16)
+#define CKCTL_6362_PCIE_EN		(1 << 17)
+#define CKCTL_6362_FAP_EN		(1 << 18)
+#define CKCTL_6362_PHYMIPS_EN		(1 << 19)
+#define CKCTL_6362_NAND_EN		(1 << 20)
+
+#define CKCTL_6362_ALL_SAFE_EN		(CKCTL_6362_PHYMIPS_EN |	\
+					CKCTL_6362_ADSL_QPROC_EN |	\
+					CKCTL_6362_ADSL_AFE_EN |	\
+					CKCTL_6362_ADSL_EN |		\
+					CKCTL_6362_SAR_EN  |		\
+					CKCTL_6362_PCM_EN  |		\
+					CKCTL_6362_IPSEC_EN |		\
+					CKCTL_6362_USBD_EN |		\
+					CKCTL_6362_USBH_EN |		\
+					CKCTL_6362_ROBOSW_EN |		\
+					CKCTL_6362_PCIE_EN)
+
+
 #define CKCTL_6368_VDSL_QPROC_EN	(1 << 2)
 #define CKCTL_6368_VDSL_AFE_EN		(1 << 3)
 #define CKCTL_6368_VDSL_BONDING_EN	(1 << 4)
@@ -153,6 +186,7 @@
 #define PERF_IRQMASK_6345_REG		0xc
 #define PERF_IRQMASK_6348_REG		0xc
 #define PERF_IRQMASK_6358_REG		0xc
+#define PERF_IRQMASK_6362_REG		0x20
 #define PERF_IRQMASK_6368_REG		0x20
 
 /* Interrupt Status register */
@@ -161,6 +195,7 @@
 #define PERF_IRQSTAT_6345_REG		0x10
 #define PERF_IRQSTAT_6348_REG		0x10
 #define PERF_IRQSTAT_6358_REG		0x10
+#define PERF_IRQSTAT_6362_REG		0x28
 #define PERF_IRQSTAT_6368_REG		0x28
 
 /* External Interrupt Configuration register */
@@ -169,6 +204,7 @@
 #define PERF_EXTIRQ_CFG_REG_6345	0x14
 #define PERF_EXTIRQ_CFG_REG_6348	0x14
 #define PERF_EXTIRQ_CFG_REG_6358	0x14
+#define PERF_EXTIRQ_CFG_REG_6362	0x18
 #define PERF_EXTIRQ_CFG_REG_6368	0x18
 
 #define PERF_EXTIRQ_CFG_REG2_6368	0x1c
@@ -197,6 +233,7 @@
 #define PERF_SOFTRESET_REG		0x28
 #define PERF_SOFTRESET_6328_REG		0x10
 #define PERF_SOFTRESET_6358_REG		0x34
+#define PERF_SOFTRESET_6362_REG		0x10
 #define PERF_SOFTRESET_6368_REG		0x10
 
 #define SOFTRESET_6328_SPI_MASK		(1 << 0)
@@ -259,6 +296,22 @@
 #define SOFTRESET_6358_PCM_MASK		(1 << 13)
 #define SOFTRESET_6358_ADSL_MASK	(1 << 14)
 
+#define SOFTRESET_6362_SPI_MASK		(1 << 0)
+#define SOFTRESET_6362_IPSEC_MASK	(1 << 1)
+#define SOFTRESET_6362_EPHY_MASK	(1 << 2)
+#define SOFTRESET_6362_SAR_MASK		(1 << 3)
+#define SOFTRESET_6362_ENETSW_MASK	(1 << 4)
+#define SOFTRESET_6362_USBS_MASK	(1 << 5)
+#define SOFTRESET_6362_USBH_MASK	(1 << 6)
+#define SOFTRESET_6362_PCM_MASK		(1 << 7)
+#define SOFTRESET_6362_PCIE_CORE_MASK	(1 << 8)
+#define SOFTRESET_6362_PCIE_MASK	(1 << 9)
+#define SOFTRESET_6362_PCIE_EXT_MASK	(1 << 10)
+#define SOFTRESET_6362_WLAN_SHIM_MASK	(1 << 11)
+#define SOFTRESET_6362_DDR_PHY_MASK	(1 << 12)
+#define SOFTRESET_6362_FAP_MASK		(1 << 13)
+#define SOFTRESET_6362_WLAN_UBUS_MASK	(1 << 14)
+
 #define SOFTRESET_6368_SPI_MASK		(1 << 0)
 #define SOFTRESET_6368_MPI_MASK		(1 << 3)
 #define SOFTRESET_6368_EPHY_MASK	(1 << 6)
@@ -1223,24 +1276,7 @@
  * _REG relative to RSET_SPI
  *************************************************************************/
 
-/* BCM 6338 SPI core */
-#define SPI_6338_CMD			0x00	/* 16-bits register */
-#define SPI_6338_INT_STATUS		0x02
-#define SPI_6338_INT_MASK_ST		0x03
-#define SPI_6338_INT_MASK		0x04
-#define SPI_6338_ST			0x05
-#define SPI_6338_CLK_CFG		0x06
-#define SPI_6338_FILL_BYTE		0x07
-#define SPI_6338_MSG_TAIL		0x09
-#define SPI_6338_RX_TAIL		0x0b
-#define SPI_6338_MSG_CTL		0x40	/* 8-bits register */
-#define SPI_6338_MSG_CTL_WIDTH		8
-#define SPI_6338_MSG_DATA		0x41
-#define SPI_6338_MSG_DATA_SIZE		0x3f
-#define SPI_6338_RX_DATA		0x80
-#define SPI_6338_RX_DATA_SIZE		0x3f
-
-/* BCM 6348 SPI core */
+/* BCM 6338/6348 SPI core */
 #define SPI_6348_CMD			0x00	/* 16-bits register */
 #define SPI_6348_INT_STATUS		0x02
 #define SPI_6348_INT_MASK_ST		0x03
@@ -1257,7 +1293,7 @@
 #define SPI_6348_RX_DATA		0x80
 #define SPI_6348_RX_DATA_SIZE		0x3f
 
-/* BCM 6358 SPI core */
+/* BCM 6358/6262/6368 SPI core */
 #define SPI_6358_MSG_CTL		0x00	/* 16-bits register */
 #define SPI_6358_MSG_CTL_WIDTH		16
 #define SPI_6358_MSG_DATA		0x02
@@ -1274,23 +1310,6 @@
 #define SPI_6358_MSG_TAIL		0x709
 #define SPI_6358_RX_TAIL		0x70B
 
-/* BCM 6358 SPI core */
-#define SPI_6368_MSG_CTL		0x00	/* 16-bits register */
-#define SPI_6368_MSG_CTL_WIDTH		16
-#define SPI_6368_MSG_DATA		0x02
-#define SPI_6368_MSG_DATA_SIZE		0x21e
-#define SPI_6368_RX_DATA		0x400
-#define SPI_6368_RX_DATA_SIZE		0x220
-#define SPI_6368_CMD			0x700	/* 16-bits register */
-#define SPI_6368_INT_STATUS		0x702
-#define SPI_6368_INT_MASK_ST		0x703
-#define SPI_6368_INT_MASK		0x704
-#define SPI_6368_ST			0x705
-#define SPI_6368_CLK_CFG		0x706
-#define SPI_6368_FILL_BYTE		0x707
-#define SPI_6368_MSG_TAIL		0x709
-#define SPI_6368_RX_TAIL		0x70B
-
 /* Shared SPI definitions */
 
 /* Message configuration */
@@ -1298,10 +1317,8 @@
 #define SPI_HD_W			0x01
 #define SPI_HD_R			0x02
 #define SPI_BYTE_CNT_SHIFT		0
-#define SPI_6338_MSG_TYPE_SHIFT		6
 #define SPI_6348_MSG_TYPE_SHIFT		6
 #define SPI_6358_MSG_TYPE_SHIFT		14
-#define SPI_6368_MSG_TYPE_SHIFT		14
 
 /* Command */
 #define SPI_CMD_NOOP			0x00
@@ -1348,10 +1365,18 @@
 /*************************************************************************
  * _REG relative to RSET_MISC
  *************************************************************************/
-#define MISC_SERDES_CTRL_REG		0x0
+#define MISC_SERDES_CTRL_6328_REG	0x0
+#define MISC_SERDES_CTRL_6362_REG	0x4
 #define SERDES_PCIE_EN			(1 << 0)
 #define SERDES_PCIE_EXD_EN		(1 << 15)
 
+#define MISC_STRAPBUS_6362_REG		0x14
+#define STRAPBUS_6362_FCVO_SHIFT	1
+#define STRAPBUS_6362_HSSPI_CLK_FAST	(1 << 13)
+#define STRAPBUS_6362_FCVO_MASK		(0x1f << STRAPBUS_6362_FCVO_SHIFT)
+#define STRAPBUS_6362_BOOT_SEL_SERIAL	(1 << 15)
+#define STRAPBUS_6362_BOOT_SEL_NAND	(0 << 15)
+
 #define MISC_STRAPBUS_6328_REG		0x240
 #define STRAPBUS_6328_FCVO_SHIFT	7
 #define STRAPBUS_6328_FCVO_MASK		(0x1f << STRAPBUS_6328_FCVO_SHIFT)
diff --git a/arch/mips/include/asm/mach-bcm63xx/ioremap.h b/arch/mips/include/asm/mach-bcm63xx/ioremap.h
index 30931c4..94e3011 100644
--- a/arch/mips/include/asm/mach-bcm63xx/ioremap.h
+++ b/arch/mips/include/asm/mach-bcm63xx/ioremap.h
@@ -19,6 +19,7 @@
 			return 1;
 		break;
 	case BCM6328_CPU_ID:
+	case BCM6362_CPU_ID:
 	case BCM6368_CPU_ID:
 		if (offset >= 0xb0000000 && offset < 0xb1000000)
 			return 1;
diff --git a/arch/mips/include/asm/mach-generic/dma-coherence.h b/arch/mips/include/asm/mach-generic/dma-coherence.h
index 9c95177..fe23034 100644
--- a/arch/mips/include/asm/mach-generic/dma-coherence.h
+++ b/arch/mips/include/asm/mach-generic/dma-coherence.h
@@ -61,9 +61,8 @@
 {
 #ifdef CONFIG_DMA_COHERENT
 	return 1;
-#endif
-#ifdef CONFIG_DMA_NONCOHERENT
-	return 0;
+#else
+	return coherentio;
 #endif
 }
 
diff --git a/arch/mips/include/asm/mach-generic/spaces.h b/arch/mips/include/asm/mach-generic/spaces.h
index 73d717a..5b2f2e6 100644
--- a/arch/mips/include/asm/mach-generic/spaces.h
+++ b/arch/mips/include/asm/mach-generic/spaces.h
@@ -20,14 +20,21 @@
 #endif
 
 #ifdef CONFIG_32BIT
-
+#ifdef CONFIG_KVM_GUEST
+#define CAC_BASE		_AC(0x40000000, UL)
+#else
 #define CAC_BASE		_AC(0x80000000, UL)
+#endif
 #define IO_BASE			_AC(0xa0000000, UL)
 #define UNCAC_BASE		_AC(0xa0000000, UL)
 
 #ifndef MAP_BASE
+#ifdef CONFIG_KVM_GUEST
+#define MAP_BASE		_AC(0x60000000, UL)
+#else
 #define MAP_BASE		_AC(0xc0000000, UL)
 #endif
+#endif
 
 /*
  * Memory above this physical address will be considered highmem.
diff --git a/arch/mips/include/asm/mach-loongson/cpu-feature-overrides.h b/arch/mips/include/asm/mach-loongson/cpu-feature-overrides.h
index 75fd8c0..c0f3ef4 100644
--- a/arch/mips/include/asm/mach-loongson/cpu-feature-overrides.h
+++ b/arch/mips/include/asm/mach-loongson/cpu-feature-overrides.h
@@ -57,5 +57,6 @@
 #define cpu_has_vint		0
 #define cpu_has_vtag_icache	0
 #define cpu_has_watch		1
+#define cpu_has_local_ebase	0
 
 #endif /* __ASM_MACH_LOONGSON_CPU_FEATURE_OVERRIDES_H */
diff --git a/arch/mips/include/asm/mach-ralink/mt7620.h b/arch/mips/include/asm/mach-ralink/mt7620.h
new file mode 100644
index 0000000..9809972
--- /dev/null
+++ b/arch/mips/include/asm/mach-ralink/mt7620.h
@@ -0,0 +1,84 @@
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * Parts of this file are based on Ralink's 2.6.21 BSP
+ *
+ * Copyright (C) 2008-2011 Gabor Juhos <juhosg@openwrt.org>
+ * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
+ * Copyright (C) 2013 John Crispin <blogic@openwrt.org>
+ */
+
+#ifndef _MT7620_REGS_H_
+#define _MT7620_REGS_H_
+
+#define MT7620_SYSC_BASE		0x10000000
+
+#define SYSC_REG_CHIP_NAME0		0x00
+#define SYSC_REG_CHIP_NAME1		0x04
+#define SYSC_REG_CHIP_REV		0x0c
+#define SYSC_REG_SYSTEM_CONFIG0		0x10
+#define SYSC_REG_SYSTEM_CONFIG1		0x14
+#define SYSC_REG_CPLL_CONFIG0		0x54
+#define SYSC_REG_CPLL_CONFIG1		0x58
+
+#define MT7620N_CHIP_NAME0		0x33365452
+#define MT7620N_CHIP_NAME1		0x20203235
+
+#define MT7620A_CHIP_NAME0		0x3637544d
+#define MT7620A_CHIP_NAME1		0x20203032
+
+#define CHIP_REV_PKG_MASK		0x1
+#define CHIP_REV_PKG_SHIFT		16
+#define CHIP_REV_VER_MASK		0xf
+#define CHIP_REV_VER_SHIFT		8
+#define CHIP_REV_ECO_MASK		0xf
+
+#define CPLL_SW_CONFIG_SHIFT		31
+#define CPLL_SW_CONFIG_MASK		0x1
+#define CPLL_CPU_CLK_SHIFT		24
+#define CPLL_CPU_CLK_MASK		0x1
+#define CPLL_MULT_RATIO_SHIFT           16
+#define CPLL_MULT_RATIO                 0x7
+#define CPLL_DIV_RATIO_SHIFT            10
+#define CPLL_DIV_RATIO                  0x3
+
+#define SYSCFG0_DRAM_TYPE_MASK		0x3
+#define SYSCFG0_DRAM_TYPE_SHIFT		4
+#define SYSCFG0_DRAM_TYPE_SDRAM		0
+#define SYSCFG0_DRAM_TYPE_DDR1		1
+#define SYSCFG0_DRAM_TYPE_DDR2		2
+
+#define MT7620_DRAM_BASE		0x0
+#define MT7620_SDRAM_SIZE_MIN		2
+#define MT7620_SDRAM_SIZE_MAX		64
+#define MT7620_DDR1_SIZE_MIN		32
+#define MT7620_DDR1_SIZE_MAX		128
+#define MT7620_DDR2_SIZE_MIN		32
+#define MT7620_DDR2_SIZE_MAX		256
+
+#define MT7620_GPIO_MODE_I2C		BIT(0)
+#define MT7620_GPIO_MODE_UART0_SHIFT	2
+#define MT7620_GPIO_MODE_UART0_MASK	0x7
+#define MT7620_GPIO_MODE_UART0(x)	((x) << MT7620_GPIO_MODE_UART0_SHIFT)
+#define MT7620_GPIO_MODE_UARTF		0x0
+#define MT7620_GPIO_MODE_PCM_UARTF	0x1
+#define MT7620_GPIO_MODE_PCM_I2S	0x2
+#define MT7620_GPIO_MODE_I2S_UARTF	0x3
+#define MT7620_GPIO_MODE_PCM_GPIO	0x4
+#define MT7620_GPIO_MODE_GPIO_UARTF	0x5
+#define MT7620_GPIO_MODE_GPIO_I2S	0x6
+#define MT7620_GPIO_MODE_GPIO		0x7
+#define MT7620_GPIO_MODE_UART1		BIT(5)
+#define MT7620_GPIO_MODE_MDIO		BIT(8)
+#define MT7620_GPIO_MODE_RGMII1		BIT(9)
+#define MT7620_GPIO_MODE_RGMII2		BIT(10)
+#define MT7620_GPIO_MODE_SPI		BIT(11)
+#define MT7620_GPIO_MODE_SPI_REF_CLK	BIT(12)
+#define MT7620_GPIO_MODE_WLED		BIT(13)
+#define MT7620_GPIO_MODE_JTAG		BIT(15)
+#define MT7620_GPIO_MODE_EPHY		BIT(15)
+#define MT7620_GPIO_MODE_WDT		BIT(22)
+
+#endif
diff --git a/arch/mips/include/asm/mach-ralink/rt288x.h b/arch/mips/include/asm/mach-ralink/rt288x.h
new file mode 100644
index 0000000..03ad716
--- /dev/null
+++ b/arch/mips/include/asm/mach-ralink/rt288x.h
@@ -0,0 +1,53 @@
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * Parts of this file are based on Ralink's 2.6.21 BSP
+ *
+ * Copyright (C) 2008-2011 Gabor Juhos <juhosg@openwrt.org>
+ * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
+ * Copyright (C) 2013 John Crispin <blogic@openwrt.org>
+ */
+
+#ifndef _RT288X_REGS_H_
+#define _RT288X_REGS_H_
+
+#define RT2880_SYSC_BASE		0x00300000
+
+#define SYSC_REG_CHIP_NAME0		0x00
+#define SYSC_REG_CHIP_NAME1		0x04
+#define SYSC_REG_CHIP_ID		0x0c
+#define SYSC_REG_SYSTEM_CONFIG		0x10
+#define SYSC_REG_CLKCFG			0x30
+
+#define RT2880_CHIP_NAME0		0x38325452
+#define RT2880_CHIP_NAME1		0x20203038
+
+#define CHIP_ID_ID_MASK			0xff
+#define CHIP_ID_ID_SHIFT		8
+#define CHIP_ID_REV_MASK		0xff
+
+#define SYSTEM_CONFIG_CPUCLK_SHIFT	20
+#define SYSTEM_CONFIG_CPUCLK_MASK	0x3
+#define SYSTEM_CONFIG_CPUCLK_250	0x0
+#define SYSTEM_CONFIG_CPUCLK_266	0x1
+#define SYSTEM_CONFIG_CPUCLK_280	0x2
+#define SYSTEM_CONFIG_CPUCLK_300	0x3
+
+#define RT2880_GPIO_MODE_I2C		BIT(0)
+#define RT2880_GPIO_MODE_UART0		BIT(1)
+#define RT2880_GPIO_MODE_SPI		BIT(2)
+#define RT2880_GPIO_MODE_UART1		BIT(3)
+#define RT2880_GPIO_MODE_JTAG		BIT(4)
+#define RT2880_GPIO_MODE_MDIO		BIT(5)
+#define RT2880_GPIO_MODE_SDRAM		BIT(6)
+#define RT2880_GPIO_MODE_PCI		BIT(7)
+
+#define CLKCFG_SRAM_CS_N_WDT		BIT(9)
+
+#define RT2880_SDRAM_BASE		0x08000000
+#define RT2880_MEM_SIZE_MIN		2
+#define RT2880_MEM_SIZE_MAX		128
+
+#endif
diff --git a/arch/mips/include/asm/mach-ralink/rt288x/cpu-feature-overrides.h b/arch/mips/include/asm/mach-ralink/rt288x/cpu-feature-overrides.h
new file mode 100644
index 0000000..72fc106
--- /dev/null
+++ b/arch/mips/include/asm/mach-ralink/rt288x/cpu-feature-overrides.h
@@ -0,0 +1,56 @@
+/*
+ * Ralink RT288x specific CPU feature overrides
+ *
+ * Copyright (C) 2008-2009 Gabor Juhos <juhosg@openwrt.org>
+ * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
+ *
+ * This file was derived from: include/asm-mips/cpu-features.h
+ *	Copyright (C) 2003, 2004 Ralf Baechle
+ *	Copyright (C) 2004 Maciej W. Rozycki
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ */
+#ifndef _RT288X_CPU_FEATURE_OVERRIDES_H
+#define _RT288X_CPU_FEATURE_OVERRIDES_H
+
+#define cpu_has_tlb		1
+#define cpu_has_4kex		1
+#define cpu_has_3k_cache	0
+#define cpu_has_4k_cache	1
+#define cpu_has_tx39_cache	0
+#define cpu_has_sb1_cache	0
+#define cpu_has_fpu		0
+#define cpu_has_32fpr		0
+#define cpu_has_counter		1
+#define cpu_has_watch		1
+#define cpu_has_divec		1
+
+#define cpu_has_prefetch	1
+#define cpu_has_ejtag		1
+#define cpu_has_llsc		1
+
+#define cpu_has_mips16		1
+#define cpu_has_mdmx		0
+#define cpu_has_mips3d		0
+#define cpu_has_smartmips	0
+
+#define cpu_has_mips32r1	1
+#define cpu_has_mips32r2	1
+#define cpu_has_mips64r1	0
+#define cpu_has_mips64r2	0
+
+#define cpu_has_dsp		0
+#define cpu_has_mipsmt		0
+
+#define cpu_has_64bits		0
+#define cpu_has_64bit_zero_reg	0
+#define cpu_has_64bit_gp_regs	0
+#define cpu_has_64bit_addresses	0
+
+#define cpu_dcache_line_size()	16
+#define cpu_icache_line_size()	16
+
+#endif /* _RT288X_CPU_FEATURE_OVERRIDES_H */
diff --git a/arch/mips/include/asm/mach-ralink/rt305x.h b/arch/mips/include/asm/mach-ralink/rt305x.h
index 7d344f2..069bf37 100644
--- a/arch/mips/include/asm/mach-ralink/rt305x.h
+++ b/arch/mips/include/asm/mach-ralink/rt305x.h
@@ -97,6 +97,14 @@
 #define RT5350_SYSCFG0_CPUCLK_320	0x2
 #define RT5350_SYSCFG0_CPUCLK_300	0x3
 
+#define RT5350_SYSCFG0_DRAM_SIZE_SHIFT  12
+#define RT5350_SYSCFG0_DRAM_SIZE_MASK   7
+#define RT5350_SYSCFG0_DRAM_SIZE_2M     0
+#define RT5350_SYSCFG0_DRAM_SIZE_8M     1
+#define RT5350_SYSCFG0_DRAM_SIZE_16M    2
+#define RT5350_SYSCFG0_DRAM_SIZE_32M    3
+#define RT5350_SYSCFG0_DRAM_SIZE_64M    4
+
 /* multi function gpio pins */
 #define RT305X_GPIO_I2C_SD		1
 #define RT305X_GPIO_I2C_SCLK		2
@@ -136,4 +144,23 @@
 #define RT305X_GPIO_MODE_SDRAM		BIT(8)
 #define RT305X_GPIO_MODE_RGMII		BIT(9)
 
+#define RT3352_SYSC_REG_SYSCFG0		0x010
+#define RT3352_SYSC_REG_SYSCFG1         0x014
+#define RT3352_SYSC_REG_CLKCFG1         0x030
+#define RT3352_SYSC_REG_RSTCTRL         0x034
+#define RT3352_SYSC_REG_USB_PS          0x05c
+
+#define RT3352_CLKCFG0_XTAL_SEL		BIT(20)
+#define RT3352_CLKCFG1_UPHY0_CLK_EN	BIT(18)
+#define RT3352_CLKCFG1_UPHY1_CLK_EN	BIT(20)
+#define RT3352_RSTCTRL_UHST		BIT(22)
+#define RT3352_RSTCTRL_UDEV		BIT(25)
+#define RT3352_SYSCFG1_USB0_HOST_MODE	BIT(10)
+
+#define RT305X_SDRAM_BASE		0x00000000
+#define RT305X_MEM_SIZE_MIN		2
+#define RT305X_MEM_SIZE_MAX		64
+#define RT3352_MEM_SIZE_MIN		2
+#define RT3352_MEM_SIZE_MAX		256
+
 #endif
diff --git a/arch/mips/include/asm/mach-ralink/rt305x/cpu-feature-overrides.h b/arch/mips/include/asm/mach-ralink/rt305x/cpu-feature-overrides.h
new file mode 100644
index 0000000..917c286
--- /dev/null
+++ b/arch/mips/include/asm/mach-ralink/rt305x/cpu-feature-overrides.h
@@ -0,0 +1,56 @@
+/*
+ * Ralink RT305x specific CPU feature overrides
+ *
+ * Copyright (C) 2008-2009 Gabor Juhos <juhosg@openwrt.org>
+ * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
+ *
+ * This file was derived from: include/asm-mips/cpu-features.h
+ *	Copyright (C) 2003, 2004 Ralf Baechle
+ *	Copyright (C) 2004 Maciej W. Rozycki
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ */
+#ifndef _RT305X_CPU_FEATURE_OVERRIDES_H
+#define _RT305X_CPU_FEATURE_OVERRIDES_H
+
+#define cpu_has_tlb		1
+#define cpu_has_4kex		1
+#define cpu_has_3k_cache	0
+#define cpu_has_4k_cache	1
+#define cpu_has_tx39_cache	0
+#define cpu_has_sb1_cache	0
+#define cpu_has_fpu		0
+#define cpu_has_32fpr		0
+#define cpu_has_counter		1
+#define cpu_has_watch		1
+#define cpu_has_divec		1
+
+#define cpu_has_prefetch	1
+#define cpu_has_ejtag		1
+#define cpu_has_llsc		1
+
+#define cpu_has_mips16		1
+#define cpu_has_mdmx		0
+#define cpu_has_mips3d		0
+#define cpu_has_smartmips	0
+
+#define cpu_has_mips32r1	1
+#define cpu_has_mips32r2	1
+#define cpu_has_mips64r1	0
+#define cpu_has_mips64r2	0
+
+#define cpu_has_dsp		1
+#define cpu_has_mipsmt		0
+
+#define cpu_has_64bits		0
+#define cpu_has_64bit_zero_reg	0
+#define cpu_has_64bit_gp_regs	0
+#define cpu_has_64bit_addresses	0
+
+#define cpu_dcache_line_size()	32
+#define cpu_icache_line_size()	32
+
+#endif /* _RT305X_CPU_FEATURE_OVERRIDES_H */
diff --git a/arch/mips/include/asm/mach-ralink/rt3883.h b/arch/mips/include/asm/mach-ralink/rt3883.h
new file mode 100644
index 0000000..058382f
--- /dev/null
+++ b/arch/mips/include/asm/mach-ralink/rt3883.h
@@ -0,0 +1,252 @@
+/*
+ * Ralink RT3662/RT3883 SoC register definitions
+ *
+ * Copyright (C) 2011-2012 Gabor Juhos <juhosg@openwrt.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ */
+
+#ifndef _RT3883_REGS_H_
+#define _RT3883_REGS_H_
+
+#include <linux/bitops.h>
+
+#define RT3883_SDRAM_BASE	0x00000000
+#define RT3883_SYSC_BASE	0x10000000
+#define RT3883_TIMER_BASE	0x10000100
+#define RT3883_INTC_BASE	0x10000200
+#define RT3883_MEMC_BASE	0x10000300
+#define RT3883_UART0_BASE	0x10000500
+#define RT3883_PIO_BASE		0x10000600
+#define RT3883_FSCC_BASE	0x10000700
+#define RT3883_NANDC_BASE	0x10000810
+#define RT3883_I2C_BASE		0x10000900
+#define RT3883_I2S_BASE		0x10000a00
+#define RT3883_SPI_BASE		0x10000b00
+#define RT3883_UART1_BASE	0x10000c00
+#define RT3883_PCM_BASE		0x10002000
+#define RT3883_GDMA_BASE	0x10002800
+#define RT3883_CODEC1_BASE	0x10003000
+#define RT3883_CODEC2_BASE	0x10003800
+#define RT3883_FE_BASE		0x10100000
+#define RT3883_ROM_BASE		0x10118000
+#define RT3883_USBDEV_BASE	0x10112000
+#define RT3883_PCI_BASE		0x10140000
+#define RT3883_WLAN_BASE	0x10180000
+#define RT3883_USBHOST_BASE	0x101c0000
+#define RT3883_BOOT_BASE	0x1c000000
+#define RT3883_SRAM_BASE	0x1e000000
+#define RT3883_PCIMEM_BASE	0x20000000
+
+#define RT3883_EHCI_BASE	(RT3883_USBHOST_BASE)
+#define RT3883_OHCI_BASE	(RT3883_USBHOST_BASE + 0x1000)
+
+#define RT3883_SYSC_SIZE	0x100
+#define RT3883_TIMER_SIZE	0x100
+#define RT3883_INTC_SIZE	0x100
+#define RT3883_MEMC_SIZE	0x100
+#define RT3883_UART0_SIZE	0x100
+#define RT3883_UART1_SIZE	0x100
+#define RT3883_PIO_SIZE		0x100
+#define RT3883_FSCC_SIZE	0x100
+#define RT3883_NANDC_SIZE	0x0f0
+#define RT3883_I2C_SIZE		0x100
+#define RT3883_I2S_SIZE		0x100
+#define RT3883_SPI_SIZE		0x100
+#define RT3883_PCM_SIZE		0x800
+#define RT3883_GDMA_SIZE	0x800
+#define RT3883_CODEC1_SIZE	0x800
+#define RT3883_CODEC2_SIZE	0x800
+#define RT3883_FE_SIZE		0x10000
+#define RT3883_ROM_SIZE		0x4000
+#define RT3883_USBDEV_SIZE	0x4000
+#define RT3883_PCI_SIZE		0x40000
+#define RT3883_WLAN_SIZE	0x40000
+#define RT3883_USBHOST_SIZE	0x40000
+#define RT3883_BOOT_SIZE	(32 * 1024 * 1024)
+#define RT3883_SRAM_SIZE	(32 * 1024 * 1024)
+
+/* SYSC registers */
+#define RT3883_SYSC_REG_CHIPID0_3	0x00	/* Chip ID 0 */
+#define RT3883_SYSC_REG_CHIPID4_7	0x04	/* Chip ID 1 */
+#define RT3883_SYSC_REG_REVID		0x0c	/* Chip Revision Identification */
+#define RT3883_SYSC_REG_SYSCFG0		0x10	/* System Configuration 0 */
+#define RT3883_SYSC_REG_SYSCFG1		0x14	/* System Configuration 1 */
+#define RT3883_SYSC_REG_CLKCFG0		0x2c	/* Clock Configuration 0 */
+#define RT3883_SYSC_REG_CLKCFG1		0x30	/* Clock Configuration 1 */
+#define RT3883_SYSC_REG_RSTCTRL		0x34	/* Reset Control*/
+#define RT3883_SYSC_REG_RSTSTAT		0x38	/* Reset Status*/
+#define RT3883_SYSC_REG_USB_PS		0x5c	/* USB Power saving control */
+#define RT3883_SYSC_REG_GPIO_MODE	0x60	/* GPIO Purpose Select */
+#define RT3883_SYSC_REG_PCIE_CLK_GEN0	0x7c
+#define RT3883_SYSC_REG_PCIE_CLK_GEN1	0x80
+#define RT3883_SYSC_REG_PCIE_CLK_GEN2	0x84
+#define RT3883_SYSC_REG_PMU		0x88
+#define RT3883_SYSC_REG_PMU1		0x8c
+
+#define RT3883_CHIP_NAME0		0x38335452
+#define RT3883_CHIP_NAME1		0x20203338
+
+#define RT3883_REVID_VER_ID_MASK	0x0f
+#define RT3883_REVID_VER_ID_SHIFT	8
+#define RT3883_REVID_ECO_ID_MASK	0x0f
+
+#define RT3883_SYSCFG0_DRAM_TYPE_DDR2	BIT(17)
+#define RT3883_SYSCFG0_CPUCLK_SHIFT	8
+#define RT3883_SYSCFG0_CPUCLK_MASK	0x3
+#define RT3883_SYSCFG0_CPUCLK_250	0x0
+#define RT3883_SYSCFG0_CPUCLK_384	0x1
+#define RT3883_SYSCFG0_CPUCLK_480	0x2
+#define RT3883_SYSCFG0_CPUCLK_500	0x3
+
+#define RT3883_SYSCFG1_USB0_HOST_MODE	BIT(10)
+#define RT3883_SYSCFG1_PCIE_RC_MODE	BIT(8)
+#define RT3883_SYSCFG1_PCI_HOST_MODE	BIT(7)
+#define RT3883_SYSCFG1_PCI_66M_MODE	BIT(6)
+#define RT3883_SYSCFG1_GPIO2_AS_WDT_OUT	BIT(2)
+
+#define RT3883_CLKCFG1_PCIE_CLK_EN	BIT(21)
+#define RT3883_CLKCFG1_UPHY1_CLK_EN	BIT(20)
+#define RT3883_CLKCFG1_PCI_CLK_EN	BIT(19)
+#define RT3883_CLKCFG1_UPHY0_CLK_EN	BIT(18)
+
+#define RT3883_GPIO_MODE_I2C		BIT(0)
+#define RT3883_GPIO_MODE_SPI		BIT(1)
+#define RT3883_GPIO_MODE_UART0_SHIFT	2
+#define RT3883_GPIO_MODE_UART0_MASK	0x7
+#define RT3883_GPIO_MODE_UART0(x)	((x) << RT3883_GPIO_MODE_UART0_SHIFT)
+#define RT3883_GPIO_MODE_UARTF		0x0
+#define RT3883_GPIO_MODE_PCM_UARTF	0x1
+#define RT3883_GPIO_MODE_PCM_I2S	0x2
+#define RT3883_GPIO_MODE_I2S_UARTF	0x3
+#define RT3883_GPIO_MODE_PCM_GPIO	0x4
+#define RT3883_GPIO_MODE_GPIO_UARTF	0x5
+#define RT3883_GPIO_MODE_GPIO_I2S	0x6
+#define RT3883_GPIO_MODE_GPIO		0x7
+#define RT3883_GPIO_MODE_UART1		BIT(5)
+#define RT3883_GPIO_MODE_JTAG		BIT(6)
+#define RT3883_GPIO_MODE_MDIO		BIT(7)
+#define RT3883_GPIO_MODE_GE1		BIT(9)
+#define RT3883_GPIO_MODE_GE2		BIT(10)
+#define RT3883_GPIO_MODE_PCI_SHIFT	11
+#define RT3883_GPIO_MODE_PCI_MASK	0x7
+#define RT3883_GPIO_MODE_PCI		(RT3883_GPIO_MODE_PCI_MASK << RT3883_GPIO_MODE_PCI_SHIFT)
+#define RT3883_GPIO_MODE_LNA_A_SHIFT	16
+#define RT3883_GPIO_MODE_LNA_A_MASK	0x3
+#define _RT3883_GPIO_MODE_LNA_A(_x)	((_x) << RT3883_GPIO_MODE_LNA_A_SHIFT)
+#define RT3883_GPIO_MODE_LNA_A_GPIO	0x3
+#define RT3883_GPIO_MODE_LNA_A		_RT3883_GPIO_MODE_LNA_A(RT3883_GPIO_MODE_LNA_A_MASK)
+#define RT3883_GPIO_MODE_LNA_G_SHIFT	18
+#define RT3883_GPIO_MODE_LNA_G_MASK	0x3
+#define _RT3883_GPIO_MODE_LNA_G(_x)	((_x) << RT3883_GPIO_MODE_LNA_G_SHIFT)
+#define RT3883_GPIO_MODE_LNA_G_GPIO	0x3
+#define RT3883_GPIO_MODE_LNA_G		_RT3883_GPIO_MODE_LNA_G(RT3883_GPIO_MODE_LNA_G_MASK)
+
+#define RT3883_GPIO_I2C_SD		1
+#define RT3883_GPIO_I2C_SCLK		2
+#define RT3883_GPIO_SPI_CS0		3
+#define RT3883_GPIO_SPI_CLK		4
+#define RT3883_GPIO_SPI_MOSI		5
+#define RT3883_GPIO_SPI_MISO		6
+#define RT3883_GPIO_7			7
+#define RT3883_GPIO_10			10
+#define RT3883_GPIO_11			11
+#define RT3883_GPIO_14			14
+#define RT3883_GPIO_UART1_TXD		15
+#define RT3883_GPIO_UART1_RXD		16
+#define RT3883_GPIO_JTAG_TDO		17
+#define RT3883_GPIO_JTAG_TDI		18
+#define RT3883_GPIO_JTAG_TMS		19
+#define RT3883_GPIO_JTAG_TCLK		20
+#define RT3883_GPIO_JTAG_TRST_N		21
+#define RT3883_GPIO_MDIO_MDC		22
+#define RT3883_GPIO_MDIO_MDIO		23
+#define RT3883_GPIO_LNA_PE_A0		32
+#define RT3883_GPIO_LNA_PE_A1		33
+#define RT3883_GPIO_LNA_PE_A2		34
+#define RT3883_GPIO_LNA_PE_G0		35
+#define RT3883_GPIO_LNA_PE_G1		36
+#define RT3883_GPIO_LNA_PE_G2		37
+#define RT3883_GPIO_PCI_AD0		40
+#define RT3883_GPIO_PCI_AD31		71
+#define RT3883_GPIO_GE2_TXD0		72
+#define RT3883_GPIO_GE2_TXD1		73
+#define RT3883_GPIO_GE2_TXD2		74
+#define RT3883_GPIO_GE2_TXD3		75
+#define RT3883_GPIO_GE2_TXEN		76
+#define RT3883_GPIO_GE2_TXCLK		77
+#define RT3883_GPIO_GE2_RXD0		78
+#define RT3883_GPIO_GE2_RXD1		79
+#define RT3883_GPIO_GE2_RXD2		80
+#define RT3883_GPIO_GE2_RXD3		81
+#define RT3883_GPIO_GE2_RXDV		82
+#define RT3883_GPIO_GE2_RXCLK		83
+#define RT3883_GPIO_GE1_TXD0		84
+#define RT3883_GPIO_GE1_TXD1		85
+#define RT3883_GPIO_GE1_TXD2		86
+#define RT3883_GPIO_GE1_TXD3		87
+#define RT3883_GPIO_GE1_TXEN		88
+#define RT3883_GPIO_GE1_TXCLK		89
+#define RT3883_GPIO_GE1_RXD0		90
+#define RT3883_GPIO_GE1_RXD1		91
+#define RT3883_GPIO_GE1_RXD2		92
+#define RT3883_GPIO_GE1_RXD3		93
+#define RT3883_GPIO_GE1_RXDV		94
+#define RT3883_GPIO_GE1_RXCLK	95
+
+#define RT3883_RSTCTRL_PCIE_PCI_PDM	BIT(27)
+#define RT3883_RSTCTRL_FLASH		BIT(26)
+#define RT3883_RSTCTRL_UDEV		BIT(25)
+#define RT3883_RSTCTRL_PCI		BIT(24)
+#define RT3883_RSTCTRL_PCIE		BIT(23)
+#define RT3883_RSTCTRL_UHST		BIT(22)
+#define RT3883_RSTCTRL_FE		BIT(21)
+#define RT3883_RSTCTRL_WLAN		BIT(20)
+#define RT3883_RSTCTRL_UART1		BIT(29)
+#define RT3883_RSTCTRL_SPI		BIT(18)
+#define RT3883_RSTCTRL_I2S		BIT(17)
+#define RT3883_RSTCTRL_I2C		BIT(16)
+#define RT3883_RSTCTRL_NAND		BIT(15)
+#define RT3883_RSTCTRL_DMA		BIT(14)
+#define RT3883_RSTCTRL_PIO		BIT(13)
+#define RT3883_RSTCTRL_UART		BIT(12)
+#define RT3883_RSTCTRL_PCM		BIT(11)
+#define RT3883_RSTCTRL_MC		BIT(10)
+#define RT3883_RSTCTRL_INTC		BIT(9)
+#define RT3883_RSTCTRL_TIMER		BIT(8)
+#define RT3883_RSTCTRL_SYS		BIT(0)
+
+#define RT3883_INTC_INT_SYSCTL	BIT(0)
+#define RT3883_INTC_INT_TIMER0	BIT(1)
+#define RT3883_INTC_INT_TIMER1	BIT(2)
+#define RT3883_INTC_INT_IA	BIT(3)
+#define RT3883_INTC_INT_PCM	BIT(4)
+#define RT3883_INTC_INT_UART0	BIT(5)
+#define RT3883_INTC_INT_PIO	BIT(6)
+#define RT3883_INTC_INT_DMA	BIT(7)
+#define RT3883_INTC_INT_NAND	BIT(8)
+#define RT3883_INTC_INT_PERFC	BIT(9)
+#define RT3883_INTC_INT_I2S	BIT(10)
+#define RT3883_INTC_INT_UART1	BIT(12)
+#define RT3883_INTC_INT_UHST	BIT(18)
+#define RT3883_INTC_INT_UDEV	BIT(19)
+
+/* FLASH/SRAM/Codec Controller registers */
+#define RT3883_FSCC_REG_FLASH_CFG0	0x00
+#define RT3883_FSCC_REG_FLASH_CFG1	0x04
+#define RT3883_FSCC_REG_CODEC_CFG0	0x40
+#define RT3883_FSCC_REG_CODEC_CFG1	0x44
+
+#define RT3883_FLASH_CFG_WIDTH_SHIFT	26
+#define RT3883_FLASH_CFG_WIDTH_MASK	0x3
+#define RT3883_FLASH_CFG_WIDTH_8BIT	0x0
+#define RT3883_FLASH_CFG_WIDTH_16BIT	0x1
+#define RT3883_FLASH_CFG_WIDTH_32BIT	0x2
+
+#define RT3883_SDRAM_BASE		0x00000000
+#define RT3883_MEM_SIZE_MIN		2
+#define RT3883_MEM_SIZE_MAX		256
+
+#endif /* _RT3883_REGS_H_ */
diff --git a/arch/mips/include/asm/mach-ralink/rt3883/cpu-feature-overrides.h b/arch/mips/include/asm/mach-ralink/rt3883/cpu-feature-overrides.h
new file mode 100644
index 0000000..181fbf4
--- /dev/null
+++ b/arch/mips/include/asm/mach-ralink/rt3883/cpu-feature-overrides.h
@@ -0,0 +1,55 @@
+/*
+ * Ralink RT3662/RT3883 specific CPU feature overrides
+ *
+ * Copyright (C) 2011-2013 Gabor Juhos <juhosg@openwrt.org>
+ *
+ * This file was derived from: include/asm-mips/cpu-features.h
+ *	Copyright (C) 2003, 2004 Ralf Baechle
+ *	Copyright (C) 2004 Maciej W. Rozycki
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ */
+#ifndef _RT3883_CPU_FEATURE_OVERRIDES_H
+#define _RT3883_CPU_FEATURE_OVERRIDES_H
+
+#define cpu_has_tlb		1
+#define cpu_has_4kex		1
+#define cpu_has_3k_cache	0
+#define cpu_has_4k_cache	1
+#define cpu_has_tx39_cache	0
+#define cpu_has_sb1_cache	0
+#define cpu_has_fpu		0
+#define cpu_has_32fpr		0
+#define cpu_has_counter		1
+#define cpu_has_watch		1
+#define cpu_has_divec		1
+
+#define cpu_has_prefetch	1
+#define cpu_has_ejtag		1
+#define cpu_has_llsc		1
+
+#define cpu_has_mips16		1
+#define cpu_has_mdmx		0
+#define cpu_has_mips3d		0
+#define cpu_has_smartmips	0
+
+#define cpu_has_mips32r1	1
+#define cpu_has_mips32r2	1
+#define cpu_has_mips64r1	0
+#define cpu_has_mips64r2	0
+
+#define cpu_has_dsp		1
+#define cpu_has_mipsmt		0
+
+#define cpu_has_64bits		0
+#define cpu_has_64bit_zero_reg	0
+#define cpu_has_64bit_gp_regs	0
+#define cpu_has_64bit_addresses	0
+
+#define cpu_dcache_line_size()	32
+#define cpu_icache_line_size()	32
+
+#endif /* _RT3883_CPU_FEATURE_OVERRIDES_H */
diff --git a/arch/mips/include/asm/mach-sead3/cpu-feature-overrides.h b/arch/mips/include/asm/mach-sead3/cpu-feature-overrides.h
index 193c091..bfbd703 100644
--- a/arch/mips/include/asm/mach-sead3/cpu-feature-overrides.h
+++ b/arch/mips/include/asm/mach-sead3/cpu-feature-overrides.h
@@ -28,7 +28,11 @@
 /* #define cpu_has_prefetch	? */
 #define cpu_has_mcheck		1
 /* #define cpu_has_ejtag	? */
+#ifdef CONFIG_CPU_MICROMIPS
+#define cpu_has_llsc		0
+#else
 #define cpu_has_llsc		1
+#endif
 /* #define cpu_has_vtag_icache	? */
 /* #define cpu_has_dc_aliases	? */
 /* #define cpu_has_ic_fills_f_dc ? */
diff --git a/arch/mips/include/asm/mips-boards/generic.h b/arch/mips/include/asm/mips-boards/generic.h
index 44a09a6..bd9746f 100644
--- a/arch/mips/include/asm/mips-boards/generic.h
+++ b/arch/mips/include/asm/mips-boards/generic.h
@@ -83,4 +83,7 @@
 #define mips_pcibios_init() do { } while (0)
 #endif
 
+extern void mips_scroll_message(void);
+extern void mips_display_message(const char *str);
+
 #endif	/* __ASM_MIPS_BOARDS_GENERIC_H */
diff --git a/arch/mips/include/asm/mips-boards/prom.h b/arch/mips/include/asm/mips-boards/prom.h
deleted file mode 100644
index e7aed3e..0000000
--- a/arch/mips/include/asm/mips-boards/prom.h
+++ /dev/null
@@ -1,47 +0,0 @@
-/*
- * Carsten Langgaard, carstenl@mips.com
- * Copyright (C) 2000 MIPS Technologies, Inc.  All rights reserved.
- *
- * ########################################################################
- *
- *  This program is free software; you can distribute it and/or modify it
- *  under the terms of the GNU General Public License (Version 2) as
- *  published by the Free Software Foundation.
- *
- *  This program is distributed in the hope it will be useful, but WITHOUT
- *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- *  for more details.
- *
- *  You should have received a copy of the GNU General Public License along
- *  with this program; if not, write to the Free Software Foundation, Inc.,
- *  59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
- *
- * ########################################################################
- *
- * MIPS boards bootprom interface for the Linux kernel.
- *
- */
-
-#ifndef _MIPS_PROM_H
-#define _MIPS_PROM_H
-
-extern char *prom_getcmdline(void);
-extern char *prom_getenv(char *name);
-extern void prom_init_cmdline(void);
-extern void prom_meminit(void);
-extern void prom_fixup_mem_map(unsigned long start_mem, unsigned long end_mem);
-extern void mips_display_message(const char *str);
-extern void mips_display_word(unsigned int num);
-extern void mips_scroll_message(void);
-extern int get_ethernet_addr(char *ethernet_addr);
-
-/* Memory descriptor management. */
-#define PROM_MAX_PMEMBLOCKS    32
-struct prom_pmemblock {
-	unsigned long base; /* Within KSEG0. */
-	unsigned int size;  /* In bytes. */
-	unsigned int type;  /* free or prom memory */
-};
-
-#endif /* !(_MIPS_PROM_H) */
diff --git a/arch/mips/include/asm/mips_machine.h b/arch/mips/include/asm/mips_machine.h
index 363bb35..9d00aeb 100644
--- a/arch/mips/include/asm/mips_machine.h
+++ b/arch/mips/include/asm/mips_machine.h
@@ -42,13 +42,9 @@
 #ifdef CONFIG_MIPS_MACHINE
 int  mips_machtype_setup(char *id) __init;
 void mips_machine_setup(void) __init;
-void mips_set_machine_name(const char *name) __init;
-char *mips_get_machine_name(void);
 #else
 static inline int mips_machtype_setup(char *id) { return 1; }
 static inline void mips_machine_setup(void) { }
-static inline void mips_set_machine_name(const char *name) { }
-static inline char *mips_get_machine_name(void) { return NULL; }
 #endif /* CONFIG_MIPS_MACHINE */
 
 #endif /* __ASM_MIPS_MACHINE_H */
diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h
index 0da44d4..87e6207 100644
--- a/arch/mips/include/asm/mipsregs.h
+++ b/arch/mips/include/asm/mipsregs.h
@@ -596,6 +596,7 @@
 #define MIPS_CONF3_RXI		(_ULCAST_(1) << 12)
 #define MIPS_CONF3_ULRI		(_ULCAST_(1) << 13)
 #define MIPS_CONF3_ISA		(_ULCAST_(3) << 14)
+#define MIPS_CONF3_ISA_OE	(_ULCAST_(3) << 16)
 #define MIPS_CONF3_VZ		(_ULCAST_(1) << 23)
 
 #define MIPS_CONF4_MMUSIZEEXT	(_ULCAST_(255) << 0)
@@ -623,6 +624,24 @@
 #ifndef __ASSEMBLY__
 
 /*
+ * Macros for handling the ISA mode bit for microMIPS.
+ */
+#define get_isa16_mode(x)		((x) & 0x1)
+#define msk_isa16_mode(x)		((x) & ~0x1)
+#define set_isa16_mode(x)		do { (x) |= 0x1; } while(0)
+
+/*
+ * microMIPS instructions can be 16-bit or 32-bit in length. This
+ * returns a 1 if the instruction is 16-bit and a 0 if 32-bit.
+ */
+static inline int mm_insn_16bit(u16 insn)
+{
+	u16 opcode = (insn >> 10) & 0x7;
+
+	return (opcode >= 1 && opcode <= 3) ? 1 : 0;
+}
+
+/*
  * Functions to access the R10000 performance counters.	 These are basically
  * mfc0 and mtc0 instructions from and to coprocessor register with a 5-bit
  * performance counter number encoded into bits 1 ... 5 of the instruction.
diff --git a/arch/mips/include/asm/mmu_context.h b/arch/mips/include/asm/mmu_context.h
index e81d719..1554721 100644
--- a/arch/mips/include/asm/mmu_context.h
+++ b/arch/mips/include/asm/mmu_context.h
@@ -26,10 +26,15 @@
 
 #ifdef CONFIG_MIPS_PGD_C0_CONTEXT
 
-#define TLBMISS_HANDLER_SETUP_PGD(pgd)				\
-	tlbmiss_handler_setup_pgd((unsigned long)(pgd))
-
-extern void tlbmiss_handler_setup_pgd(unsigned long pgd);
+#define TLBMISS_HANDLER_SETUP_PGD(pgd)					\
+do {									\
+	void (*tlbmiss_handler_setup_pgd)(unsigned long);		\
+	extern u32 tlbmiss_handler_setup_pgd_array[16];			\
+									\
+	tlbmiss_handler_setup_pgd =					\
+		(__typeof__(tlbmiss_handler_setup_pgd)) tlbmiss_handler_setup_pgd_array; \
+	tlbmiss_handler_setup_pgd((unsigned long)(pgd));		\
+} while (0)
 
 #define TLBMISS_HANDLER_SETUP()						\
 	do {								\
@@ -62,59 +67,88 @@
 	TLBMISS_HANDLER_SETUP_PGD(swapper_pg_dir)
 #endif
 #endif /* CONFIG_MIPS_PGD_C0_CONTEXT*/
-#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
 
-#define ASID_INC	0x40
-#define ASID_MASK	0xfc0
+#define ASID_INC(asid)						\
+({								\
+	unsigned long __asid = asid;				\
+	__asm__("1:\taddiu\t%0,1\t\t\t\t# patched\n\t"		\
+	".section\t__asid_inc,\"a\"\n\t"			\
+	".word\t1b\n\t"						\
+	".previous"						\
+	:"=r" (__asid)						\
+	:"0" (__asid));						\
+	__asid;							\
+})
+#define ASID_MASK(asid)						\
+({								\
+	unsigned long __asid = asid;				\
+	__asm__("1:\tandi\t%0,%1,0xfc0\t\t\t# patched\n\t"	\
+	".section\t__asid_mask,\"a\"\n\t"			\
+	".word\t1b\n\t"						\
+	".previous"						\
+	:"=r" (__asid)						\
+	:"r" (__asid));						\
+	__asid;							\
+})
+#define ASID_VERSION_MASK					\
+({								\
+	unsigned long __asid;					\
+	__asm__("1:\taddiu\t%0,$0,0xff00\t\t\t\t# patched\n\t"	\
+	".section\t__asid_version_mask,\"a\"\n\t"		\
+	".word\t1b\n\t"						\
+	".previous"						\
+	:"=r" (__asid));					\
+	__asid;							\
+})
+#define ASID_FIRST_VERSION					\
+({								\
+	unsigned long __asid = asid;				\
+	__asm__("1:\tli\t%0,0x100\t\t\t\t# patched\n\t"		\
+	".section\t__asid_first_version,\"a\"\n\t"		\
+	".word\t1b\n\t"						\
+	".previous"						\
+	:"=r" (__asid));					\
+	__asid;							\
+})
 
-#elif defined(CONFIG_CPU_R8000)
+#define ASID_FIRST_VERSION_R3000	0x1000
+#define ASID_FIRST_VERSION_R4000	0x100
+#define ASID_FIRST_VERSION_R8000	0x1000
+#define ASID_FIRST_VERSION_RM9000	0x1000
 
-#define ASID_INC	0x10
-#define ASID_MASK	0xff0
-
-#elif defined(CONFIG_MIPS_MT_SMTC)
-
-#define ASID_INC	0x1
-extern unsigned long smtc_asid_mask;
-#define ASID_MASK	(smtc_asid_mask)
-#define HW_ASID_MASK	0xff
-/* End SMTC/34K debug hack */
-#else /* FIXME: not correct for R6000 */
-
-#define ASID_INC	0x1
-#define ASID_MASK	0xff
-
+#ifdef CONFIG_MIPS_MT_SMTC
+#define SMTC_HW_ASID_MASK		0xff
+extern unsigned int smtc_asid_mask;
 #endif
 
 #define cpu_context(cpu, mm)	((mm)->context.asid[cpu])
-#define cpu_asid(cpu, mm)	(cpu_context((cpu), (mm)) & ASID_MASK)
+#define cpu_asid(cpu, mm)	ASID_MASK(cpu_context((cpu), (mm)))
 #define asid_cache(cpu)		(cpu_data[cpu].asid_cache)
 
 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
 {
 }
 
-/*
- *  All unused by hardware upper bits will be considered
- *  as a software asid extension.
- */
-#define ASID_VERSION_MASK  ((unsigned long)~(ASID_MASK|(ASID_MASK-1)))
-#define ASID_FIRST_VERSION ((unsigned long)(~ASID_VERSION_MASK) + 1)
-
 #ifndef CONFIG_MIPS_MT_SMTC
 /* Normal, classic MIPS get_new_mmu_context */
 static inline void
 get_new_mmu_context(struct mm_struct *mm, unsigned long cpu)
 {
+	extern void kvm_local_flush_tlb_all(void);
 	unsigned long asid = asid_cache(cpu);
 
-	if (! ((asid += ASID_INC) & ASID_MASK) ) {
+	if (!ASID_MASK((asid = ASID_INC(asid)))) {
 		if (cpu_has_vtag_icache)
 			flush_icache_all();
+#ifdef CONFIG_VIRTUALIZATION
+		kvm_local_flush_tlb_all();      /* start new asid cycle */
+#else
 		local_flush_tlb_all();	/* start new asid cycle */
+#endif
 		if (!asid)		/* fix version if needed */
 			asid = ASID_FIRST_VERSION;
 	}
+
 	cpu_context(cpu, mm) = asid_cache(cpu) = asid;
 }
 
@@ -133,7 +167,7 @@
 {
 	int i;
 
-	for_each_online_cpu(i)
+	for_each_possible_cpu(i)
 		cpu_context(i, mm) = 0;
 
 	return 0;
@@ -166,7 +200,7 @@
 	 * free up the ASID value for use and flush any old
 	 * instances of it from the TLB.
 	 */
-	oldasid = (read_c0_entryhi() & ASID_MASK);
+	oldasid = ASID_MASK(read_c0_entryhi());
 	if(smtc_live_asid[mytlb][oldasid]) {
 		smtc_live_asid[mytlb][oldasid] &= ~(0x1 << cpu);
 		if(smtc_live_asid[mytlb][oldasid] == 0)
@@ -177,7 +211,7 @@
 	 * having ASID_MASK smaller than the hardware maximum,
 	 * make sure no "soft" bits become "hard"...
 	 */
-	write_c0_entryhi((read_c0_entryhi() & ~HW_ASID_MASK) |
+	write_c0_entryhi((read_c0_entryhi() & ~SMTC_HW_ASID_MASK) |
 			 cpu_asid(cpu, next));
 	ehb(); /* Make sure it propagates to TCStatus */
 	evpe(mtflags);
@@ -230,15 +264,15 @@
 #ifdef CONFIG_MIPS_MT_SMTC
 	/* See comments for similar code above */
 	mtflags = dvpe();
-	oldasid = read_c0_entryhi() & ASID_MASK;
+	oldasid = ASID_MASK(read_c0_entryhi());
 	if(smtc_live_asid[mytlb][oldasid]) {
 		smtc_live_asid[mytlb][oldasid] &= ~(0x1 << cpu);
 		if(smtc_live_asid[mytlb][oldasid] == 0)
 			 smtc_flush_tlb_asid(oldasid);
 	}
 	/* See comments for similar code above */
-	write_c0_entryhi((read_c0_entryhi() & ~HW_ASID_MASK) |
-			 cpu_asid(cpu, next));
+	write_c0_entryhi((read_c0_entryhi() & ~SMTC_HW_ASID_MASK) |
+	                 cpu_asid(cpu, next));
 	ehb(); /* Make sure it propagates to TCStatus */
 	evpe(mtflags);
 #else
@@ -275,14 +309,14 @@
 #ifdef CONFIG_MIPS_MT_SMTC
 		/* See comments for similar code above */
 		prevvpe = dvpe();
-		oldasid = (read_c0_entryhi() & ASID_MASK);
+		oldasid = ASID_MASK(read_c0_entryhi());
 		if (smtc_live_asid[mytlb][oldasid]) {
 			smtc_live_asid[mytlb][oldasid] &= ~(0x1 << cpu);
 			if(smtc_live_asid[mytlb][oldasid] == 0)
 				smtc_flush_tlb_asid(oldasid);
 		}
 		/* See comments for similar code above */
-		write_c0_entryhi((read_c0_entryhi() & ~HW_ASID_MASK)
+		write_c0_entryhi((read_c0_entryhi() & ~SMTC_HW_ASID_MASK)
 				| cpu_asid(cpu, mm));
 		ehb(); /* Make sure it propagates to TCStatus */
 		evpe(prevvpe);
diff --git a/arch/mips/include/asm/netlogic/haldefs.h b/arch/mips/include/asm/netlogic/haldefs.h
index 419d8ae..79c7ccc 100644
--- a/arch/mips/include/asm/netlogic/haldefs.h
+++ b/arch/mips/include/asm/netlogic/haldefs.h
@@ -35,42 +35,13 @@
 #ifndef __NLM_HAL_HALDEFS_H__
 #define __NLM_HAL_HALDEFS_H__
 
+#include <linux/irqflags.h>	/* for local_irq_disable */
+
 /*
  * This file contains platform specific memory mapped IO implementation
  * and will provide a way to read 32/64 bit memory mapped registers in
  * all ABIs
  */
-#if !defined(CONFIG_64BIT) && defined(CONFIG_CPU_XLP)
-#error "o32 compile not supported on XLP yet"
-#endif
-/*
- * For o32 compilation, we have to disable interrupts and enable KX bit to
- * access 64 bit addresses or data.
- *
- * We need to disable interrupts because we save just the lower 32 bits of
- * registers in	 interrupt handling. So if we get hit by an interrupt while
- * using the upper 32 bits of a register, we lose.
- */
-static inline uint32_t nlm_save_flags_kx(void)
-{
-	return change_c0_status(ST0_KX | ST0_IE, ST0_KX);
-}
-
-static inline uint32_t nlm_save_flags_cop2(void)
-{
-	return change_c0_status(ST0_CU2 | ST0_IE, ST0_CU2);
-}
-
-static inline void nlm_restore_flags(uint32_t sr)
-{
-	write_c0_status(sr);
-}
-
-/*
- * The n64 implementations are simple, the o32 implementations when they
- * are added, will have to disable interrupts and enable KX before doing
- * 64 bit ops.
- */
 static inline uint32_t
 nlm_read_reg(uint64_t base, uint32_t reg)
 {
@@ -87,13 +58,40 @@
 	*addr = val;
 }
 
+/*
+ * For o32 compilation, we have to disable interrupts to access 64 bit
+ * registers
+ *
+ * We need to disable interrupts because we save just the lower 32 bits of
+ * registers in  interrupt handling. So if we get hit by an interrupt while
+ * using the upper 32 bits of a register, we lose.
+ */
+
 static inline uint64_t
 nlm_read_reg64(uint64_t base, uint32_t reg)
 {
 	uint64_t addr = base + (reg >> 1) * sizeof(uint64_t);
 	volatile uint64_t *ptr = (volatile uint64_t *)(long)addr;
+	uint64_t val;
 
-	return *ptr;
+	if (sizeof(unsigned long) == 4) {
+		unsigned long flags;
+
+		local_irq_save(flags);
+		__asm__ __volatile__(
+			".set	push"			"\n\t"
+			".set	mips64"			"\n\t"
+			"ld	%L0, %1"		"\n\t"
+			"dsra32	%M0, %L0, 0"		"\n\t"
+			"sll	%L0, %L0, 0"		"\n\t"
+			".set	pop"			"\n"
+			: "=r" (val)
+			: "m" (*ptr));
+		local_irq_restore(flags);
+	} else
+		val = *ptr;
+
+	return val;
 }
 
 static inline void
@@ -102,7 +100,25 @@
 	uint64_t addr = base + (reg >> 1) * sizeof(uint64_t);
 	volatile uint64_t *ptr = (volatile uint64_t *)(long)addr;
 
-	*ptr = val;
+	if (sizeof(unsigned long) == 4) {
+		unsigned long flags;
+		uint64_t tmp;
+
+		local_irq_save(flags);
+		__asm__ __volatile__(
+			".set	push"			"\n\t"
+			".set	mips64"			"\n\t"
+			"dsll32	%L0, %L0, 0"		"\n\t"
+			"dsrl32	%L0, %L0, 0"		"\n\t"
+			"dsll32	%M0, %M0, 0"		"\n\t"
+			"or	%L0, %L0, %M0"		"\n\t"
+			"sd	%L0, %2"		"\n\t"
+			".set	pop"			"\n"
+			: "=r" (tmp)
+			: "0" (val), "m" (*ptr));
+		local_irq_restore(flags);
+	} else
+		*ptr = val;
 }
 
 /*
@@ -143,14 +159,6 @@
 	return nlm_io_base + devoffset;
 }
 
-static inline uint64_t
-nlm_xkphys_map_pcibar0(uint64_t pcibase)
-{
-	uint64_t paddr;
-
-	paddr = nlm_read_reg(pcibase, 0x4) & ~0xfu;
-	return (uint64_t)0x9000000000000000 | paddr;
-}
 #elif defined(CONFIG_CPU_XLR)
 
 static inline uint64_t
diff --git a/arch/mips/include/asm/netlogic/mips-extns.h b/arch/mips/include/asm/netlogic/mips-extns.h
index 8ad2e0f..f299d31 100644
--- a/arch/mips/include/asm/netlogic/mips-extns.h
+++ b/arch/mips/include/asm/netlogic/mips-extns.h
@@ -38,21 +38,16 @@
 /*
  * XLR and XLP interrupt request and interrupt mask registers
  */
-#define read_c0_eirr()		__read_64bit_c0_register($9, 6)
-#define read_c0_eimr()		__read_64bit_c0_register($9, 7)
-#define write_c0_eirr(val)	__write_64bit_c0_register($9, 6, val)
-
 /*
- * Writing EIMR in 32 bit is a special case, the lower 8 bit of the
- * EIMR is shadowed in the status register, so we cannot save and
- * restore status register for split read.
+ * NOTE: Do not save/restore flags around write_c0_eimr().
+ * On non-R2 platforms the flags has part of EIMR that is shadowed in STATUS
+ * register. Restoring flags will overwrite the lower 8 bits of EIMR.
+ *
+ * Call with interrupts disabled.
  */
 #define write_c0_eimr(val)						\
 do {									\
 	if (sizeof(unsigned long) == 4) {				\
-		unsigned long __flags;					\
-									\
-		local_irq_save(__flags);				\
 		__asm__ __volatile__(					\
 			".set\tmips64\n\t"				\
 			"dsll\t%L0, %L0, 32\n\t"			\
@@ -62,8 +57,6 @@
 			"dmtc0\t%L0, $9, 7\n\t"				\
 			".set\tmips0"					\
 			: : "r" (val));					\
-		__flags = (__flags & 0xffff00ff) | (((val) & 0xff) << 8);\
-		local_irq_restore(__flags);				\
 	} else								\
 		__write_64bit_c0_register($9, 7, (val));		\
 } while (0)
@@ -128,7 +121,7 @@
 	uint64_t val;
 
 #ifdef CONFIG_64BIT
-	val = read_c0_eimr() & read_c0_eirr();
+	val = __read_64bit_c0_register($9, 6) & __read_64bit_c0_register($9, 7);
 #else
 	__asm__ __volatile__(
 		".set	push\n\t"
@@ -143,7 +136,6 @@
 		".set	pop"
 		: "=r" (val));
 #endif
-
 	return val;
 }
 
diff --git a/arch/mips/include/asm/netlogic/xlp-hal/pic.h b/arch/mips/include/asm/netlogic/xlp-hal/pic.h
index 3df5301..a981f46 100644
--- a/arch/mips/include/asm/netlogic/xlp-hal/pic.h
+++ b/arch/mips/include/asm/netlogic/xlp-hal/pic.h
@@ -191,59 +191,6 @@
 #define PIC_IRT_PCIE_LINK_2_INDEX	80
 #define PIC_IRT_PCIE_LINK_3_INDEX	81
 #define PIC_IRT_PCIE_LINK_INDEX(num)	((num) + PIC_IRT_PCIE_LINK_0_INDEX)
-/* 78 to 81 */
-#define PIC_NUM_NA_IRTS			32
-/* 82 to 113 */
-#define PIC_IRT_NA_0_INDEX		82
-#define PIC_IRT_NA_INDEX(num)		((num) + PIC_IRT_NA_0_INDEX)
-#define PIC_IRT_POE_INDEX		114
-
-#define PIC_NUM_USB_IRTS		6
-#define PIC_IRT_USB_0_INDEX		115
-#define PIC_IRT_EHCI_0_INDEX		115
-#define PIC_IRT_OHCI_0_INDEX		116
-#define PIC_IRT_OHCI_1_INDEX		117
-#define PIC_IRT_EHCI_1_INDEX		118
-#define PIC_IRT_OHCI_2_INDEX		119
-#define PIC_IRT_OHCI_3_INDEX		120
-#define PIC_IRT_USB_INDEX(num)		((num) + PIC_IRT_USB_0_INDEX)
-/* 115 to 120 */
-#define PIC_IRT_GDX_INDEX		121
-#define PIC_IRT_SEC_INDEX		122
-#define PIC_IRT_RSA_INDEX		123
-
-#define PIC_NUM_COMP_IRTS		4
-#define PIC_IRT_COMP_0_INDEX		124
-#define PIC_IRT_COMP_INDEX(num)		((num) + PIC_IRT_COMP_0_INDEX)
-/* 124 to 127 */
-#define PIC_IRT_GBU_INDEX		128
-#define PIC_IRT_ICC_0_INDEX		129 /* ICC - Inter Chip Coherency */
-#define PIC_IRT_ICC_1_INDEX		130
-#define PIC_IRT_ICC_2_INDEX		131
-#define PIC_IRT_CAM_INDEX		132
-#define PIC_IRT_UART_0_INDEX		133
-#define PIC_IRT_UART_1_INDEX		134
-#define PIC_IRT_I2C_0_INDEX		135
-#define PIC_IRT_I2C_1_INDEX		136
-#define PIC_IRT_SYS_0_INDEX		137
-#define PIC_IRT_SYS_1_INDEX		138
-#define PIC_IRT_JTAG_INDEX		139
-#define PIC_IRT_PIC_INDEX		140
-#define PIC_IRT_NBU_INDEX		141
-#define PIC_IRT_TCU_INDEX		142
-#define PIC_IRT_GCU_INDEX		143 /* GBC - Global Coherency */
-#define PIC_IRT_DMC_0_INDEX		144
-#define PIC_IRT_DMC_1_INDEX		145
-
-#define PIC_NUM_GPIO_IRTS		4
-#define PIC_IRT_GPIO_0_INDEX		146
-#define PIC_IRT_GPIO_INDEX(num)		((num) + PIC_IRT_GPIO_0_INDEX)
-
-/* 146 to 149 */
-#define PIC_IRT_NOR_INDEX		150
-#define PIC_IRT_NAND_INDEX		151
-#define PIC_IRT_SPI_INDEX		152
-#define PIC_IRT_MMC_INDEX		153
 
 #define PIC_CLOCK_TIMER			7
 #define PIC_IRQ_BASE			8
diff --git a/arch/mips/include/asm/netlogic/xlp-hal/usb.h b/arch/mips/include/asm/netlogic/xlp-hal/usb.h
deleted file mode 100644
index a9cd350..0000000
--- a/arch/mips/include/asm/netlogic/xlp-hal/usb.h
+++ /dev/null
@@ -1,64 +0,0 @@
-/*
- * Copyright (c) 2003-2012 Broadcom Corporation
- * All Rights Reserved
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the Broadcom
- * license below:
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in
- *    the documentation and/or other materials provided with the
- *    distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY BROADCOM ``AS IS'' AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL BROADCOM OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
- * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
- * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef __NLM_HAL_USB_H__
-#define __NLM_HAL_USB_H__
-
-#define USB_CTL_0			0x01
-#define USB_PHY_0			0x0A
-#define USB_PHY_RESET			0x01
-#define USB_PHY_PORT_RESET_0		0x10
-#define USB_PHY_PORT_RESET_1		0x20
-#define USB_CONTROLLER_RESET		0x01
-#define USB_INT_STATUS			0x0E
-#define USB_INT_EN			0x0F
-#define USB_PHY_INTERRUPT_EN		0x01
-#define USB_OHCI_INTERRUPT_EN		0x02
-#define USB_OHCI_INTERRUPT1_EN		0x04
-#define USB_OHCI_INTERRUPT2_EN		0x08
-#define USB_CTRL_INTERRUPT_EN		0x10
-
-#ifndef __ASSEMBLY__
-
-#define nlm_read_usb_reg(b, r)			nlm_read_reg(b, r)
-#define nlm_write_usb_reg(b, r, v)		nlm_write_reg(b, r, v)
-#define nlm_get_usb_pcibase(node, inst)		\
-	nlm_pcicfg_base(XLP_IO_USB_OFFSET(node, inst))
-#define nlm_get_usb_hcd_base(node, inst)	\
-	nlm_xkphys_map_pcibar0(nlm_get_usb_pcibase(node, inst))
-#define nlm_get_usb_regbase(node, inst)		\
-	(nlm_get_usb_pcibase(node, inst) + XLP_IO_PCI_HDRSZ)
-
-#endif
-#endif /* __NLM_HAL_USB_H__ */
diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h
index fdc62fb..8b8f6b3 100644
--- a/arch/mips/include/asm/pgtable.h
+++ b/arch/mips/include/asm/pgtable.h
@@ -8,6 +8,7 @@
 #ifndef _ASM_PGTABLE_H
 #define _ASM_PGTABLE_H
 
+#include <linux/mm_types.h>
 #include <linux/mmzone.h>
 #ifdef CONFIG_32BIT
 #include <asm/pgtable-32.h>
diff --git a/arch/mips/include/asm/processor.h b/arch/mips/include/asm/processor.h
index 2a5fa7a..71686c8 100644
--- a/arch/mips/include/asm/processor.h
+++ b/arch/mips/include/asm/processor.h
@@ -44,11 +44,16 @@
 #define SPECIAL_PAGES_SIZE PAGE_SIZE
 
 #ifdef CONFIG_32BIT
+#ifdef CONFIG_KVM_GUEST
+/* User space process size is limited to 1GB in KVM Guest Mode */
+#define TASK_SIZE	0x3fff8000UL
+#else
 /*
  * User space process size: 2GB. This is hardcoded into a few places,
  * so don't change it unless you know what you are doing.
  */
 #define TASK_SIZE	0x7fff8000UL
+#endif
 
 #ifdef __KERNEL__
 #define STACK_TOP_MAX	TASK_SIZE
diff --git a/arch/mips/include/asm/prom.h b/arch/mips/include/asm/prom.h
index 8808bf5..1e7e096 100644
--- a/arch/mips/include/asm/prom.h
+++ b/arch/mips/include/asm/prom.h
@@ -48,4 +48,7 @@
 static inline void device_tree_init(void) { }
 #endif /* CONFIG_OF */
 
+extern char *mips_get_machine_name(void);
+extern void mips_set_machine_name(const char *name);
+
 #endif /* __ASM_PROM_H */
diff --git a/arch/mips/include/asm/sn/sn_private.h b/arch/mips/include/asm/sn/sn_private.h
index 1a2c302..fdfae43 100644
--- a/arch/mips/include/asm/sn/sn_private.h
+++ b/arch/mips/include/asm/sn/sn_private.h
@@ -14,6 +14,6 @@
 extern void install_ipi(void);
 extern void setup_replication_mask(void);
 extern void replicate_kernel_text(void);
-extern pfn_t node_getfirstfree(cnodeid_t);
+extern unsigned long node_getfirstfree(cnodeid_t);
 
 #endif /* __ASM_SN_SN_PRIVATE_H */
diff --git a/arch/mips/include/asm/sn/types.h b/arch/mips/include/asm/sn/types.h
index c4813d6..6d24d4e 100644
--- a/arch/mips/include/asm/sn/types.h
+++ b/arch/mips/include/asm/sn/types.h
@@ -19,7 +19,6 @@
 typedef signed short	moduleid_t;	/* user-visible module number type */
 typedef signed short	cmoduleid_t;	/* kernel compact module id type */
 typedef unsigned char	clusterid_t;	/* Clusterid of the cell */
-typedef unsigned long	pfn_t;
 
 typedef dev_t		vertex_hdl_t;	/* hardware graph vertex handle */
 
diff --git a/arch/mips/include/asm/spinlock.h b/arch/mips/include/asm/spinlock.h
index 5130c88..78d201f 100644
--- a/arch/mips/include/asm/spinlock.h
+++ b/arch/mips/include/asm/spinlock.h
@@ -71,7 +71,6 @@
 		"	 nop						\n"
 		"	srl	%[my_ticket], %[ticket], 16		\n"
 		"	andi	%[ticket], %[ticket], 0xffff		\n"
-		"	andi	%[my_ticket], %[my_ticket], 0xffff	\n"
 		"	bne	%[ticket], %[my_ticket], 4f		\n"
 		"	 subu	%[ticket], %[my_ticket], %[ticket]	\n"
 		"2:							\n"
@@ -105,7 +104,6 @@
 		"	beqz	%[my_ticket], 1b			\n"
 		"	 srl	%[my_ticket], %[ticket], 16		\n"
 		"	andi	%[ticket], %[ticket], 0xffff		\n"
-		"	andi	%[my_ticket], %[my_ticket], 0xffff	\n"
 		"	bne	%[ticket], %[my_ticket], 4f		\n"
 		"	 subu	%[ticket], %[my_ticket], %[ticket]	\n"
 		"2:							\n"
@@ -153,7 +151,6 @@
 		"							\n"
 		"1:	ll	%[ticket], %[ticket_ptr]		\n"
 		"	srl	%[my_ticket], %[ticket], 16		\n"
-		"	andi	%[my_ticket], %[my_ticket], 0xffff	\n"
 		"	andi	%[now_serving], %[ticket], 0xffff	\n"
 		"	bne	%[my_ticket], %[now_serving], 3f	\n"
 		"	 addu	%[ticket], %[ticket], %[inc]		\n"
@@ -178,7 +175,6 @@
 		"							\n"
 		"1:	ll	%[ticket], %[ticket_ptr]		\n"
 		"	srl	%[my_ticket], %[ticket], 16		\n"
-		"	andi	%[my_ticket], %[my_ticket], 0xffff	\n"
 		"	andi	%[now_serving], %[ticket], 0xffff	\n"
 		"	bne	%[my_ticket], %[now_serving], 3f	\n"
 		"	 addu	%[ticket], %[ticket], %[inc]		\n"
@@ -242,25 +238,16 @@
 		: "m" (rw->lock)
 		: "memory");
 	} else {
-		__asm__ __volatile__(
-		"	.set	noreorder	# arch_read_lock	\n"
-		"1:	ll	%1, %2					\n"
-		"	bltz	%1, 3f					\n"
-		"	 addu	%1, 1					\n"
-		"2:	sc	%1, %0					\n"
-		"	beqz	%1, 1b					\n"
-		"	 nop						\n"
-		"	.subsection 2					\n"
-		"3:	ll	%1, %2					\n"
-		"	bltz	%1, 3b					\n"
-		"	 addu	%1, 1					\n"
-		"	b	2b					\n"
-		"	 nop						\n"
-		"	.previous					\n"
-		"	.set	reorder					\n"
-		: "=m" (rw->lock), "=&r" (tmp)
-		: "m" (rw->lock)
-		: "memory");
+		do {
+			__asm__ __volatile__(
+			"1:	ll	%1, %2	# arch_read_lock	\n"
+			"	bltz	%1, 1b				\n"
+			"	 addu	%1, 1				\n"
+			"2:	sc	%1, %0				\n"
+			: "=m" (rw->lock), "=&r" (tmp)
+			: "m" (rw->lock)
+			: "memory");
+		} while (unlikely(!tmp));
 	}
 
 	smp_llsc_mb();
@@ -285,21 +272,15 @@
 		: "m" (rw->lock)
 		: "memory");
 	} else {
-		__asm__ __volatile__(
-		"	.set	noreorder	# arch_read_unlock	\n"
-		"1:	ll	%1, %2					\n"
-		"	sub	%1, 1					\n"
-		"	sc	%1, %0					\n"
-		"	beqz	%1, 2f					\n"
-		"	 nop						\n"
-		"	.subsection 2					\n"
-		"2:	b	1b					\n"
-		"	 nop						\n"
-		"	.previous					\n"
-		"	.set	reorder					\n"
-		: "=m" (rw->lock), "=&r" (tmp)
-		: "m" (rw->lock)
-		: "memory");
+		do {
+			__asm__ __volatile__(
+			"1:	ll	%1, %2	# arch_read_unlock	\n"
+			"	sub	%1, 1				\n"
+			"	sc	%1, %0				\n"
+			: "=m" (rw->lock), "=&r" (tmp)
+			: "m" (rw->lock)
+			: "memory");
+		} while (unlikely(!tmp));
 	}
 }
 
@@ -321,25 +302,16 @@
 		: "m" (rw->lock)
 		: "memory");
 	} else {
-		__asm__ __volatile__(
-		"	.set	noreorder	# arch_write_lock	\n"
-		"1:	ll	%1, %2					\n"
-		"	bnez	%1, 3f					\n"
-		"	 lui	%1, 0x8000				\n"
-		"2:	sc	%1, %0					\n"
-		"	beqz	%1, 3f					\n"
-		"	 nop						\n"
-		"	.subsection 2					\n"
-		"3:	ll	%1, %2					\n"
-		"	bnez	%1, 3b					\n"
-		"	 lui	%1, 0x8000				\n"
-		"	b	2b					\n"
-		"	 nop						\n"
-		"	.previous					\n"
-		"	.set	reorder					\n"
-		: "=m" (rw->lock), "=&r" (tmp)
-		: "m" (rw->lock)
-		: "memory");
+		do {
+			__asm__ __volatile__(
+			"1:	ll	%1, %2	# arch_write_lock	\n"
+			"	bnez	%1, 1b				\n"
+			"	 lui	%1, 0x8000			\n"
+			"2:	sc	%1, %0				\n"
+			: "=m" (rw->lock), "=&r" (tmp)
+			: "m" (rw->lock)
+			: "memory");
+		} while (unlikely(!tmp));
 	}
 
 	smp_llsc_mb();
@@ -424,25 +396,21 @@
 		: "m" (rw->lock)
 		: "memory");
 	} else {
-		__asm__ __volatile__(
-		"	.set	noreorder	# arch_write_trylock	\n"
-		"	li	%2, 0					\n"
-		"1:	ll	%1, %3					\n"
-		"	bnez	%1, 2f					\n"
-		"	lui	%1, 0x8000				\n"
-		"	sc	%1, %0					\n"
-		"	beqz	%1, 3f					\n"
-		"	 li	%2, 1					\n"
-		"2:							\n"
-		__WEAK_LLSC_MB
-		"	.subsection 2					\n"
-		"3:	b	1b					\n"
-		"	 li	%2, 0					\n"
-		"	.previous					\n"
-		"	.set	reorder					\n"
-		: "=m" (rw->lock), "=&r" (tmp), "=&r" (ret)
-		: "m" (rw->lock)
-		: "memory");
+		do {
+			__asm__ __volatile__(
+			"	ll	%1, %3	# arch_write_trylock	\n"
+			"	li	%2, 0				\n"
+			"	bnez	%1, 2f				\n"
+			"	lui	%1, 0x8000			\n"
+			"	sc	%1, %0				\n"
+			"	li	%2, 1				\n"
+			"2:						\n"
+			: "=m" (rw->lock), "=&r" (tmp), "=&r" (ret)
+			: "m" (rw->lock)
+			: "memory");
+		} while (unlikely(!tmp));
+
+		smp_llsc_mb();
 	}
 
 	return ret;
diff --git a/arch/mips/include/asm/stackframe.h b/arch/mips/include/asm/stackframe.h
index c993840..a89d1b1 100644
--- a/arch/mips/include/asm/stackframe.h
+++ b/arch/mips/include/asm/stackframe.h
@@ -139,7 +139,7 @@
 1:		move	ra, k0
 		li	k0, 3
 		mtc0	k0, $22
-#endif /* CONFIG_CPU_LOONGSON2F */
+#endif /* CONFIG_CPU_JUMP_WORKAROUNDS */
 #if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32)
 		lui	k1, %hi(kernelsp)
 #else
@@ -189,6 +189,7 @@
 		LONG_S	$0, PT_R0(sp)
 		mfc0	v1, CP0_STATUS
 		LONG_S	$2, PT_R2(sp)
+		LONG_S	v1, PT_STATUS(sp)
 #ifdef CONFIG_MIPS_MT_SMTC
 		/*
 		 * Ideally, these instructions would be shuffled in
@@ -200,21 +201,20 @@
 		LONG_S	k0, PT_TCSTATUS(sp)
 #endif /* CONFIG_MIPS_MT_SMTC */
 		LONG_S	$4, PT_R4(sp)
-		LONG_S	$5, PT_R5(sp)
-		LONG_S	v1, PT_STATUS(sp)
 		mfc0	v1, CP0_CAUSE
-		LONG_S	$6, PT_R6(sp)
-		LONG_S	$7, PT_R7(sp)
+		LONG_S	$5, PT_R5(sp)
 		LONG_S	v1, PT_CAUSE(sp)
+		LONG_S	$6, PT_R6(sp)
 		MFC0	v1, CP0_EPC
+		LONG_S	$7, PT_R7(sp)
 #ifdef CONFIG_64BIT
 		LONG_S	$8, PT_R8(sp)
 		LONG_S	$9, PT_R9(sp)
 #endif
+		LONG_S	v1, PT_EPC(sp)
 		LONG_S	$25, PT_R25(sp)
 		LONG_S	$28, PT_R28(sp)
 		LONG_S	$31, PT_R31(sp)
-		LONG_S	v1, PT_EPC(sp)
 		ori	$28, sp, _THREAD_MASK
 		xori	$28, _THREAD_MASK
 #ifdef CONFIG_CPU_CAVIUM_OCTEON
diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h
index 178f792..895320e 100644
--- a/arch/mips/include/asm/thread_info.h
+++ b/arch/mips/include/asm/thread_info.h
@@ -58,8 +58,12 @@
 #define init_stack		(init_thread_union.stack)
 
 /* How to get the thread information struct from C.  */
-register struct thread_info *__current_thread_info __asm__("$28");
-#define current_thread_info()  __current_thread_info
+static inline struct thread_info *current_thread_info(void)
+{
+	register struct thread_info *__current_thread_info __asm__("$28");
+
+	return __current_thread_info;
+}
 
 #endif /* !__ASSEMBLY__ */
 
diff --git a/arch/mips/include/asm/time.h b/arch/mips/include/asm/time.h
index debc800..2d7b9df 100644
--- a/arch/mips/include/asm/time.h
+++ b/arch/mips/include/asm/time.h
@@ -52,13 +52,15 @@
  */
 extern unsigned int __weak get_c0_compare_int(void);
 extern int r4k_clockevent_init(void);
+extern int smtc_clockevent_init(void);
+extern int gic_clockevent_init(void);
 
 static inline int mips_clockevent_init(void)
 {
 #ifdef CONFIG_MIPS_MT_SMTC
-	extern int smtc_clockevent_init(void);
-
 	return smtc_clockevent_init();
+#elif defined(CONFIG_CEVT_GIC)
+	return (gic_clockevent_init() | r4k_clockevent_init());
 #elif defined(CONFIG_CEVT_R4K)
 	return r4k_clockevent_init();
 #else
@@ -69,9 +71,7 @@
 /*
  * Initialize the count register as a clocksource
  */
-#ifdef CONFIG_CSRC_R4K
 extern int init_r4k_clocksource(void);
-#endif
 
 static inline int init_mips_clocksource(void)
 {
diff --git a/arch/mips/include/asm/uaccess.h b/arch/mips/include/asm/uaccess.h
index bd87e36..f3fa375 100644
--- a/arch/mips/include/asm/uaccess.h
+++ b/arch/mips/include/asm/uaccess.h
@@ -23,7 +23,11 @@
  */
 #ifdef CONFIG_32BIT
 
-#define __UA_LIMIT	0x80000000UL
+#ifdef CONFIG_KVM_GUEST
+#define __UA_LIMIT 0x40000000UL
+#else
+#define __UA_LIMIT 0x80000000UL
+#endif
 
 #define __UA_ADDR	".word"
 #define __UA_LA		"la"
@@ -55,8 +59,13 @@
  * address in this range it's the process's problem, not ours :-)
  */
 
+#ifdef CONFIG_KVM_GUEST
+#define KERNEL_DS	((mm_segment_t) { 0x80000000UL })
+#define USER_DS		((mm_segment_t) { 0xC0000000UL })
+#else
 #define KERNEL_DS	((mm_segment_t) { 0UL })
 #define USER_DS		((mm_segment_t) { __UA_LIMIT })
+#endif
 
 #define VERIFY_READ    0
 #define VERIFY_WRITE   1
@@ -261,6 +270,7 @@
 	__asm__ __volatile__(						\
 	"1:	" insn "	%1, %3				\n"	\
 	"2:							\n"	\
+	"	.insn						\n"	\
 	"	.section .fixup,\"ax\"				\n"	\
 	"3:	li	%0, %4					\n"	\
 	"	j	2b					\n"	\
@@ -287,7 +297,9 @@
 	__asm__ __volatile__(						\
 	"1:	lw	%1, (%3)				\n"	\
 	"2:	lw	%D1, 4(%3)				\n"	\
-	"3:	.section	.fixup,\"ax\"			\n"	\
+	"3:							\n"	\
+	"	.insn						\n"	\
+	"	.section	.fixup,\"ax\"			\n"	\
 	"4:	li	%0, %4					\n"	\
 	"	move	%1, $0					\n"	\
 	"	move	%D1, $0					\n"	\
@@ -355,6 +367,7 @@
 	__asm__ __volatile__(						\
 	"1:	" insn "	%z2, %3		# __put_user_asm\n"	\
 	"2:							\n"	\
+	"	.insn						\n"	\
 	"	.section	.fixup,\"ax\"			\n"	\
 	"3:	li	%0, %4					\n"	\
 	"	j	2b					\n"	\
@@ -373,6 +386,7 @@
 	"1:	sw	%2, (%3)	# __put_user_asm_ll32	\n"	\
 	"2:	sw	%D2, 4(%3)				\n"	\
 	"3:							\n"	\
+	"	.insn						\n"	\
 	"	.section	.fixup,\"ax\"			\n"	\
 	"4:	li	%0, %4					\n"	\
 	"	j	3b					\n"	\
@@ -524,6 +538,7 @@
 	__asm__ __volatile__(						\
 	"1:	" insn "	%1, %3				\n"	\
 	"2:							\n"	\
+	"	.insn						\n"	\
 	"	.section .fixup,\"ax\"				\n"	\
 	"3:	li	%0, %4					\n"	\
 	"	j	2b					\n"	\
@@ -549,7 +564,9 @@
 	"1:	ulw	%1, (%3)				\n"	\
 	"2:	ulw	%D1, 4(%3)				\n"	\
 	"	move	%0, $0					\n"	\
-	"3:	.section	.fixup,\"ax\"			\n"	\
+	"3:							\n"	\
+	"	.insn						\n"	\
+	"	.section	.fixup,\"ax\"			\n"	\
 	"4:	li	%0, %4					\n"	\
 	"	move	%1, $0					\n"	\
 	"	move	%D1, $0					\n"	\
@@ -616,6 +633,7 @@
 	__asm__ __volatile__(						\
 	"1:	" insn "	%z2, %3		# __put_user_unaligned_asm\n" \
 	"2:							\n"	\
+	"	.insn						\n"	\
 	"	.section	.fixup,\"ax\"			\n"	\
 	"3:	li	%0, %4					\n"	\
 	"	j	2b					\n"	\
@@ -634,6 +652,7 @@
 	"1:	sw	%2, (%3)	# __put_user_unaligned_asm_ll32 \n" \
 	"2:	sw	%D2, 4(%3)				\n"	\
 	"3:							\n"	\
+	"	.insn						\n"	\
 	"	.section	.fixup,\"ax\"			\n"	\
 	"4:	li	%0, %4					\n"	\
 	"	j	3b					\n"	\
diff --git a/arch/mips/include/asm/uasm.h b/arch/mips/include/asm/uasm.h
index 058e941..370d967 100644
--- a/arch/mips/include/asm/uasm.h
+++ b/arch/mips/include/asm/uasm.h
@@ -6,7 +6,7 @@
  * Copyright (C) 2004, 2005, 2006, 2008	 Thiemo Seufer
  * Copyright (C) 2005  Maciej W. Rozycki
  * Copyright (C) 2006  Ralf Baechle (ralf@linux-mips.org)
- * Copyright (C) 2012  MIPS Technologies, Inc.
+ * Copyright (C) 2012, 2013  MIPS Technologies, Inc.  All rights reserved.
  */
 
 #include <linux/types.h>
@@ -22,44 +22,75 @@
 #define UASM_EXPORT_SYMBOL(sym)
 #endif
 
+#define _UASM_ISA_CLASSIC	0
+#define _UASM_ISA_MICROMIPS	1
+
+#ifndef UASM_ISA
+#ifdef CONFIG_CPU_MICROMIPS
+#define UASM_ISA	_UASM_ISA_MICROMIPS
+#else
+#define UASM_ISA	_UASM_ISA_CLASSIC
+#endif
+#endif
+
+#if (UASM_ISA == _UASM_ISA_CLASSIC)
+#ifdef CONFIG_CPU_MICROMIPS
+#define ISAOPC(op)	CL_uasm_i##op
+#define ISAFUNC(x)	CL_##x
+#else
+#define ISAOPC(op)	uasm_i##op
+#define ISAFUNC(x)	x
+#endif
+#elif (UASM_ISA == _UASM_ISA_MICROMIPS)
+#ifdef CONFIG_CPU_MICROMIPS
+#define ISAOPC(op)	uasm_i##op
+#define ISAFUNC(x)	x
+#else
+#define ISAOPC(op)	MM_uasm_i##op
+#define ISAFUNC(x)	MM_##x
+#endif
+#else
+#error Unsupported micro-assembler ISA!!!
+#endif
+
 #define Ip_u1u2u3(op)							\
 void __uasminit								\
-uasm_i##op(u32 **buf, unsigned int a, unsigned int b, unsigned int c)
+ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b, unsigned int c)
 
 #define Ip_u2u1u3(op)							\
 void __uasminit								\
-uasm_i##op(u32 **buf, unsigned int a, unsigned int b, unsigned int c)
+ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b, unsigned int c)
 
 #define Ip_u3u1u2(op)							\
 void __uasminit								\
-uasm_i##op(u32 **buf, unsigned int a, unsigned int b, unsigned int c)
+ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b, unsigned int c)
 
 #define Ip_u1u2s3(op)							\
 void __uasminit								\
-uasm_i##op(u32 **buf, unsigned int a, unsigned int b, signed int c)
+ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b, signed int c)
 
 #define Ip_u2s3u1(op)							\
 void __uasminit								\
-uasm_i##op(u32 **buf, unsigned int a, signed int b, unsigned int c)
+ISAOPC(op)(u32 **buf, unsigned int a, signed int b, unsigned int c)
 
 #define Ip_u2u1s3(op)							\
 void __uasminit								\
-uasm_i##op(u32 **buf, unsigned int a, unsigned int b, signed int c)
+ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b, signed int c)
 
 #define Ip_u2u1msbu3(op)						\
 void __uasminit								\
-uasm_i##op(u32 **buf, unsigned int a, unsigned int b, unsigned int c,	\
+ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b, unsigned int c,	\
 	   unsigned int d)
 
 #define Ip_u1u2(op)							\
-void __uasminit uasm_i##op(u32 **buf, unsigned int a, unsigned int b)
+void __uasminit ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b)
 
 #define Ip_u1s2(op)							\
-void __uasminit uasm_i##op(u32 **buf, unsigned int a, signed int b)
+void __uasminit ISAOPC(op)(u32 **buf, unsigned int a, signed int b)
 
-#define Ip_u1(op) void __uasminit uasm_i##op(u32 **buf, unsigned int a)
+#define Ip_u1(op) void __uasminit ISAOPC(op)(u32 **buf, unsigned int a)
 
-#define Ip_0(op) void __uasminit uasm_i##op(u32 **buf)
+#define Ip_0(op) void __uasminit ISAOPC(op)(u32 **buf)
 
 Ip_u2u1s3(_addiu);
 Ip_u3u1u2(_addu);
@@ -132,19 +163,20 @@
 	int lab;
 };
 
-void __uasminit uasm_build_label(struct uasm_label **lab, u32 *addr, int lid);
+void __uasminit ISAFUNC(uasm_build_label)(struct uasm_label **lab, u32 *addr,
+			int lid);
 #ifdef CONFIG_64BIT
-int uasm_in_compat_space_p(long addr);
+int ISAFUNC(uasm_in_compat_space_p)(long addr);
 #endif
-int uasm_rel_hi(long val);
-int uasm_rel_lo(long val);
-void UASM_i_LA_mostly(u32 **buf, unsigned int rs, long addr);
-void UASM_i_LA(u32 **buf, unsigned int rs, long addr);
+int ISAFUNC(uasm_rel_hi)(long val);
+int ISAFUNC(uasm_rel_lo)(long val);
+void ISAFUNC(UASM_i_LA_mostly)(u32 **buf, unsigned int rs, long addr);
+void ISAFUNC(UASM_i_LA)(u32 **buf, unsigned int rs, long addr);
 
 #define UASM_L_LA(lb)							\
-static inline void __uasminit uasm_l##lb(struct uasm_label **lab, u32 *addr) \
+static inline void __uasminit ISAFUNC(uasm_l##lb)(struct uasm_label **lab, u32 *addr) \
 {									\
-	uasm_build_label(lab, addr, label##lb);				\
+	ISAFUNC(uasm_build_label)(lab, addr, label##lb);		\
 }
 
 /* convenience macros for instructions */
@@ -196,27 +228,27 @@
 				     unsigned int a2, unsigned int a3)
 {
 	if (a3 < 32)
-		uasm_i_drotr(p, a1, a2, a3);
+		ISAOPC(_drotr)(p, a1, a2, a3);
 	else
-		uasm_i_drotr32(p, a1, a2, a3 - 32);
+		ISAOPC(_drotr32)(p, a1, a2, a3 - 32);
 }
 
 static inline void uasm_i_dsll_safe(u32 **p, unsigned int a1,
 				    unsigned int a2, unsigned int a3)
 {
 	if (a3 < 32)
-		uasm_i_dsll(p, a1, a2, a3);
+		ISAOPC(_dsll)(p, a1, a2, a3);
 	else
-		uasm_i_dsll32(p, a1, a2, a3 - 32);
+		ISAOPC(_dsll32)(p, a1, a2, a3 - 32);
 }
 
 static inline void uasm_i_dsrl_safe(u32 **p, unsigned int a1,
 				    unsigned int a2, unsigned int a3)
 {
 	if (a3 < 32)
-		uasm_i_dsrl(p, a1, a2, a3);
+		ISAOPC(_dsrl)(p, a1, a2, a3);
 	else
-		uasm_i_dsrl32(p, a1, a2, a3 - 32);
+		ISAOPC(_dsrl32)(p, a1, a2, a3 - 32);
 }
 
 /* Handle relocations. */
diff --git a/arch/mips/include/uapi/asm/inst.h b/arch/mips/include/uapi/asm/inst.h
index 4d07881..0f4aec2 100644
--- a/arch/mips/include/uapi/asm/inst.h
+++ b/arch/mips/include/uapi/asm/inst.h
@@ -7,6 +7,7 @@
  *
  * Copyright (C) 1996, 2000 by Ralf Baechle
  * Copyright (C) 2006 by Thiemo Seufer
+ * Copyright (C) 2012 MIPS Technologies, Inc.  All rights reserved.
  */
 #ifndef _UAPI_ASM_INST_H
 #define _UAPI_ASM_INST_H
@@ -193,6 +194,282 @@
 };
 
 /*
+ * (microMIPS) Major opcodes.
+ */
+enum mm_major_op {
+	mm_pool32a_op, mm_pool16a_op, mm_lbu16_op, mm_move16_op,
+	mm_addi32_op, mm_lbu32_op, mm_sb32_op, mm_lb32_op,
+	mm_pool32b_op, mm_pool16b_op, mm_lhu16_op, mm_andi16_op,
+	mm_addiu32_op, mm_lhu32_op, mm_sh32_op, mm_lh32_op,
+	mm_pool32i_op, mm_pool16c_op, mm_lwsp16_op, mm_pool16d_op,
+	mm_ori32_op, mm_pool32f_op, mm_reserved1_op, mm_reserved2_op,
+	mm_pool32c_op, mm_lwgp16_op, mm_lw16_op, mm_pool16e_op,
+	mm_xori32_op, mm_jals32_op, mm_addiupc_op, mm_reserved3_op,
+	mm_reserved4_op, mm_pool16f_op, mm_sb16_op, mm_beqz16_op,
+	mm_slti32_op, mm_beq32_op, mm_swc132_op, mm_lwc132_op,
+	mm_reserved5_op, mm_reserved6_op, mm_sh16_op, mm_bnez16_op,
+	mm_sltiu32_op, mm_bne32_op, mm_sdc132_op, mm_ldc132_op,
+	mm_reserved7_op, mm_reserved8_op, mm_swsp16_op, mm_b16_op,
+	mm_andi32_op, mm_j32_op, mm_sd32_op, mm_ld32_op,
+	mm_reserved11_op, mm_reserved12_op, mm_sw16_op, mm_li16_op,
+	mm_jalx32_op, mm_jal32_op, mm_sw32_op, mm_lw32_op,
+};
+
+/*
+ * (microMIPS) POOL32I minor opcodes.
+ */
+enum mm_32i_minor_op {
+	mm_bltz_op, mm_bltzal_op, mm_bgez_op, mm_bgezal_op,
+	mm_blez_op, mm_bnezc_op, mm_bgtz_op, mm_beqzc_op,
+	mm_tlti_op, mm_tgei_op, mm_tltiu_op, mm_tgeiu_op,
+	mm_tnei_op, mm_lui_op, mm_teqi_op, mm_reserved13_op,
+	mm_synci_op, mm_bltzals_op, mm_reserved14_op, mm_bgezals_op,
+	mm_bc2f_op, mm_bc2t_op, mm_reserved15_op, mm_reserved16_op,
+	mm_reserved17_op, mm_reserved18_op, mm_bposge64_op, mm_bposge32_op,
+	mm_bc1f_op, mm_bc1t_op, mm_reserved19_op, mm_reserved20_op,
+	mm_bc1any2f_op, mm_bc1any2t_op, mm_bc1any4f_op, mm_bc1any4t_op,
+};
+
+/*
+ * (microMIPS) POOL32A minor opcodes.
+ */
+enum mm_32a_minor_op {
+	mm_sll32_op = 0x000,
+	mm_ins_op = 0x00c,
+	mm_ext_op = 0x02c,
+	mm_pool32axf_op = 0x03c,
+	mm_srl32_op = 0x040,
+	mm_sra_op = 0x080,
+	mm_rotr_op = 0x0c0,
+	mm_lwxs_op = 0x118,
+	mm_addu32_op = 0x150,
+	mm_subu32_op = 0x1d0,
+	mm_and_op = 0x250,
+	mm_or32_op = 0x290,
+	mm_xor32_op = 0x310,
+};
+
+/*
+ * (microMIPS) POOL32B functions.
+ */
+enum mm_32b_func {
+	mm_lwc2_func = 0x0,
+	mm_lwp_func = 0x1,
+	mm_ldc2_func = 0x2,
+	mm_ldp_func = 0x4,
+	mm_lwm32_func = 0x5,
+	mm_cache_func = 0x6,
+	mm_ldm_func = 0x7,
+	mm_swc2_func = 0x8,
+	mm_swp_func = 0x9,
+	mm_sdc2_func = 0xa,
+	mm_sdp_func = 0xc,
+	mm_swm32_func = 0xd,
+	mm_sdm_func = 0xf,
+};
+
+/*
+ * (microMIPS) POOL32C functions.
+ */
+enum mm_32c_func {
+	mm_pref_func = 0x2,
+	mm_ll_func = 0x3,
+	mm_swr_func = 0x9,
+	mm_sc_func = 0xb,
+	mm_lwu_func = 0xe,
+};
+
+/*
+ * (microMIPS) POOL32AXF minor opcodes.
+ */
+enum mm_32axf_minor_op {
+	mm_mfc0_op = 0x003,
+	mm_mtc0_op = 0x00b,
+	mm_tlbp_op = 0x00d,
+	mm_jalr_op = 0x03c,
+	mm_tlbr_op = 0x04d,
+	mm_jalrhb_op = 0x07c,
+	mm_tlbwi_op = 0x08d,
+	mm_tlbwr_op = 0x0cd,
+	mm_jalrs_op = 0x13c,
+	mm_jalrshb_op = 0x17c,
+	mm_syscall_op = 0x22d,
+	mm_eret_op = 0x3cd,
+};
+
+/*
+ * (microMIPS) POOL32F minor opcodes.
+ */
+enum mm_32f_minor_op {
+	mm_32f_00_op = 0x00,
+	mm_32f_01_op = 0x01,
+	mm_32f_02_op = 0x02,
+	mm_32f_10_op = 0x08,
+	mm_32f_11_op = 0x09,
+	mm_32f_12_op = 0x0a,
+	mm_32f_20_op = 0x10,
+	mm_32f_30_op = 0x18,
+	mm_32f_40_op = 0x20,
+	mm_32f_41_op = 0x21,
+	mm_32f_42_op = 0x22,
+	mm_32f_50_op = 0x28,
+	mm_32f_51_op = 0x29,
+	mm_32f_52_op = 0x2a,
+	mm_32f_60_op = 0x30,
+	mm_32f_70_op = 0x38,
+	mm_32f_73_op = 0x3b,
+	mm_32f_74_op = 0x3c,
+};
+
+/*
+ * (microMIPS) POOL32F secondary minor opcodes.
+ */
+enum mm_32f_10_minor_op {
+	mm_lwxc1_op = 0x1,
+	mm_swxc1_op,
+	mm_ldxc1_op,
+	mm_sdxc1_op,
+	mm_luxc1_op,
+	mm_suxc1_op,
+};
+
+enum mm_32f_func {
+	mm_lwxc1_func = 0x048,
+	mm_swxc1_func = 0x088,
+	mm_ldxc1_func = 0x0c8,
+	mm_sdxc1_func = 0x108,
+};
+
+/*
+ * (microMIPS) POOL32F secondary minor opcodes.
+ */
+enum mm_32f_40_minor_op {
+	mm_fmovf_op,
+	mm_fmovt_op,
+};
+
+/*
+ * (microMIPS) POOL32F secondary minor opcodes.
+ */
+enum mm_32f_60_minor_op {
+	mm_fadd_op,
+	mm_fsub_op,
+	mm_fmul_op,
+	mm_fdiv_op,
+};
+
+/*
+ * (microMIPS) POOL32F secondary minor opcodes.
+ */
+enum mm_32f_70_minor_op {
+	mm_fmovn_op,
+	mm_fmovz_op,
+};
+
+/*
+ * (microMIPS) POOL32FXF secondary minor opcodes for POOL32F.
+ */
+enum mm_32f_73_minor_op {
+	mm_fmov0_op = 0x01,
+	mm_fcvtl_op = 0x04,
+	mm_movf0_op = 0x05,
+	mm_frsqrt_op = 0x08,
+	mm_ffloorl_op = 0x0c,
+	mm_fabs0_op = 0x0d,
+	mm_fcvtw_op = 0x24,
+	mm_movt0_op = 0x25,
+	mm_fsqrt_op = 0x28,
+	mm_ffloorw_op = 0x2c,
+	mm_fneg0_op = 0x2d,
+	mm_cfc1_op = 0x40,
+	mm_frecip_op = 0x48,
+	mm_fceill_op = 0x4c,
+	mm_fcvtd0_op = 0x4d,
+	mm_ctc1_op = 0x60,
+	mm_fceilw_op = 0x6c,
+	mm_fcvts0_op = 0x6d,
+	mm_mfc1_op = 0x80,
+	mm_fmov1_op = 0x81,
+	mm_movf1_op = 0x85,
+	mm_ftruncl_op = 0x8c,
+	mm_fabs1_op = 0x8d,
+	mm_mtc1_op = 0xa0,
+	mm_movt1_op = 0xa5,
+	mm_ftruncw_op = 0xac,
+	mm_fneg1_op = 0xad,
+	mm_froundl_op = 0xcc,
+	mm_fcvtd1_op = 0xcd,
+	mm_froundw_op = 0xec,
+	mm_fcvts1_op = 0xed,
+};
+
+/*
+ * (microMIPS) POOL16C minor opcodes.
+ */
+enum mm_16c_minor_op {
+	mm_lwm16_op = 0x04,
+	mm_swm16_op = 0x05,
+	mm_jr16_op = 0x18,
+	mm_jrc_op = 0x1a,
+	mm_jalr16_op = 0x1c,
+	mm_jalrs16_op = 0x1e,
+};
+
+/*
+ * (microMIPS) POOL16D minor opcodes.
+ */
+enum mm_16d_minor_op {
+	mm_addius5_func,
+	mm_addiusp_func,
+};
+
+/*
+ * (MIPS16e) opcodes.
+ */
+enum MIPS16e_ops {
+	MIPS16e_jal_op = 003,
+	MIPS16e_ld_op = 007,
+	MIPS16e_i8_op = 014,
+	MIPS16e_sd_op = 017,
+	MIPS16e_lb_op = 020,
+	MIPS16e_lh_op = 021,
+	MIPS16e_lwsp_op = 022,
+	MIPS16e_lw_op = 023,
+	MIPS16e_lbu_op = 024,
+	MIPS16e_lhu_op = 025,
+	MIPS16e_lwpc_op = 026,
+	MIPS16e_lwu_op = 027,
+	MIPS16e_sb_op = 030,
+	MIPS16e_sh_op = 031,
+	MIPS16e_swsp_op = 032,
+	MIPS16e_sw_op = 033,
+	MIPS16e_rr_op = 035,
+	MIPS16e_extend_op = 036,
+	MIPS16e_i64_op = 037,
+};
+
+enum MIPS16e_i64_func {
+	MIPS16e_ldsp_func,
+	MIPS16e_sdsp_func,
+	MIPS16e_sdrasp_func,
+	MIPS16e_dadjsp_func,
+	MIPS16e_ldpc_func,
+};
+
+enum MIPS16e_rr_func {
+	MIPS16e_jr_func,
+};
+
+enum MIPS6e_i8_func {
+	MIPS16e_swrasp_func = 02,
+};
+
+/*
+ * (microMIPS & MIPS16e) NOP instruction.
+ */
+#define MM_NOP16	0x0c00
+
+/*
  * Damn ...  bitfields depend from byteorder :-(
  */
 #ifdef __MIPSEB__
@@ -311,6 +588,262 @@
 	;)))))))
 };
 
+/*
+ * microMIPS instruction formats (32-bit length)
+ *
+ * NOTE:
+ *	Parenthesis denote whether the format is a microMIPS instruction or
+ *	if it is MIPS32 instruction re-encoded for use in the microMIPS ASE.
+ */
+struct fb_format {		/* FPU branch format (MIPS32) */
+	BITFIELD_FIELD(unsigned int opcode : 6,
+	BITFIELD_FIELD(unsigned int bc : 5,
+	BITFIELD_FIELD(unsigned int cc : 3,
+	BITFIELD_FIELD(unsigned int flag : 2,
+	BITFIELD_FIELD(signed int simmediate : 16,
+	;)))))
+};
+
+struct fp0_format {		/* FPU multiply and add format (MIPS32) */
+	BITFIELD_FIELD(unsigned int opcode : 6,
+	BITFIELD_FIELD(unsigned int fmt : 5,
+	BITFIELD_FIELD(unsigned int ft : 5,
+	BITFIELD_FIELD(unsigned int fs : 5,
+	BITFIELD_FIELD(unsigned int fd : 5,
+	BITFIELD_FIELD(unsigned int func : 6,
+	;))))))
+};
+
+struct mm_fp0_format {		/* FPU multipy and add format (microMIPS) */
+	BITFIELD_FIELD(unsigned int opcode : 6,
+	BITFIELD_FIELD(unsigned int ft : 5,
+	BITFIELD_FIELD(unsigned int fs : 5,
+	BITFIELD_FIELD(unsigned int fd : 5,
+	BITFIELD_FIELD(unsigned int fmt : 3,
+	BITFIELD_FIELD(unsigned int op : 2,
+	BITFIELD_FIELD(unsigned int func : 6,
+	;)))))))
+};
+
+struct fp1_format {		/* FPU mfc1 and cfc1 format (MIPS32) */
+	BITFIELD_FIELD(unsigned int opcode : 6,
+	BITFIELD_FIELD(unsigned int op : 5,
+	BITFIELD_FIELD(unsigned int rt : 5,
+	BITFIELD_FIELD(unsigned int fs : 5,
+	BITFIELD_FIELD(unsigned int fd : 5,
+	BITFIELD_FIELD(unsigned int func : 6,
+	;))))))
+};
+
+struct mm_fp1_format {		/* FPU mfc1 and cfc1 format (microMIPS) */
+	BITFIELD_FIELD(unsigned int opcode : 6,
+	BITFIELD_FIELD(unsigned int rt : 5,
+	BITFIELD_FIELD(unsigned int fs : 5,
+	BITFIELD_FIELD(unsigned int fmt : 2,
+	BITFIELD_FIELD(unsigned int op : 8,
+	BITFIELD_FIELD(unsigned int func : 6,
+	;))))))
+};
+
+struct mm_fp2_format {		/* FPU movt and movf format (microMIPS) */
+	BITFIELD_FIELD(unsigned int opcode : 6,
+	BITFIELD_FIELD(unsigned int fd : 5,
+	BITFIELD_FIELD(unsigned int fs : 5,
+	BITFIELD_FIELD(unsigned int cc : 3,
+	BITFIELD_FIELD(unsigned int zero : 2,
+	BITFIELD_FIELD(unsigned int fmt : 2,
+	BITFIELD_FIELD(unsigned int op : 3,
+	BITFIELD_FIELD(unsigned int func : 6,
+	;))))))))
+};
+
+struct mm_fp3_format {		/* FPU abs and neg format (microMIPS) */
+	BITFIELD_FIELD(unsigned int opcode : 6,
+	BITFIELD_FIELD(unsigned int rt : 5,
+	BITFIELD_FIELD(unsigned int fs : 5,
+	BITFIELD_FIELD(unsigned int fmt : 3,
+	BITFIELD_FIELD(unsigned int op : 7,
+	BITFIELD_FIELD(unsigned int func : 6,
+	;))))))
+};
+
+struct mm_fp4_format {		/* FPU c.cond format (microMIPS) */
+	BITFIELD_FIELD(unsigned int opcode : 6,
+	BITFIELD_FIELD(unsigned int rt : 5,
+	BITFIELD_FIELD(unsigned int fs : 5,
+	BITFIELD_FIELD(unsigned int cc : 3,
+	BITFIELD_FIELD(unsigned int fmt : 3,
+	BITFIELD_FIELD(unsigned int cond : 4,
+	BITFIELD_FIELD(unsigned int func : 6,
+	;)))))))
+};
+
+struct mm_fp5_format {		/* FPU lwxc1 and swxc1 format (microMIPS) */
+	BITFIELD_FIELD(unsigned int opcode : 6,
+	BITFIELD_FIELD(unsigned int index : 5,
+	BITFIELD_FIELD(unsigned int base : 5,
+	BITFIELD_FIELD(unsigned int fd : 5,
+	BITFIELD_FIELD(unsigned int op : 5,
+	BITFIELD_FIELD(unsigned int func : 6,
+	;))))))
+};
+
+struct fp6_format {		/* FPU madd and msub format (MIPS IV) */
+	BITFIELD_FIELD(unsigned int opcode : 6,
+	BITFIELD_FIELD(unsigned int fr : 5,
+	BITFIELD_FIELD(unsigned int ft : 5,
+	BITFIELD_FIELD(unsigned int fs : 5,
+	BITFIELD_FIELD(unsigned int fd : 5,
+	BITFIELD_FIELD(unsigned int func : 6,
+	;))))))
+};
+
+struct mm_fp6_format {		/* FPU madd and msub format (microMIPS) */
+	BITFIELD_FIELD(unsigned int opcode : 6,
+	BITFIELD_FIELD(unsigned int ft : 5,
+	BITFIELD_FIELD(unsigned int fs : 5,
+	BITFIELD_FIELD(unsigned int fd : 5,
+	BITFIELD_FIELD(unsigned int fr : 5,
+	BITFIELD_FIELD(unsigned int func : 6,
+	;))))))
+};
+
+struct mm_i_format {		/* Immediate format (microMIPS) */
+	BITFIELD_FIELD(unsigned int opcode : 6,
+	BITFIELD_FIELD(unsigned int rt : 5,
+	BITFIELD_FIELD(unsigned int rs : 5,
+	BITFIELD_FIELD(signed int simmediate : 16,
+	;))))
+};
+
+struct mm_m_format {		/* Multi-word load/store format (microMIPS) */
+	BITFIELD_FIELD(unsigned int opcode : 6,
+	BITFIELD_FIELD(unsigned int rd : 5,
+	BITFIELD_FIELD(unsigned int base : 5,
+	BITFIELD_FIELD(unsigned int func : 4,
+	BITFIELD_FIELD(signed int simmediate : 12,
+	;)))))
+};
+
+struct mm_x_format {		/* Scaled indexed load format (microMIPS) */
+	BITFIELD_FIELD(unsigned int opcode : 6,
+	BITFIELD_FIELD(unsigned int index : 5,
+	BITFIELD_FIELD(unsigned int base : 5,
+	BITFIELD_FIELD(unsigned int rd : 5,
+	BITFIELD_FIELD(unsigned int func : 11,
+	;)))))
+};
+
+/*
+ * microMIPS instruction formats (16-bit length)
+ */
+struct mm_b0_format {		/* Unconditional branch format (microMIPS) */
+	BITFIELD_FIELD(unsigned int opcode : 6,
+	BITFIELD_FIELD(signed int simmediate : 10,
+	BITFIELD_FIELD(unsigned int : 16, /* Ignored */
+	;)))
+};
+
+struct mm_b1_format {		/* Conditional branch format (microMIPS) */
+	BITFIELD_FIELD(unsigned int opcode : 6,
+	BITFIELD_FIELD(unsigned int rs : 3,
+	BITFIELD_FIELD(signed int simmediate : 7,
+	BITFIELD_FIELD(unsigned int : 16, /* Ignored */
+	;))))
+};
+
+struct mm16_m_format {		/* Multi-word load/store format */
+	BITFIELD_FIELD(unsigned int opcode : 6,
+	BITFIELD_FIELD(unsigned int func : 4,
+	BITFIELD_FIELD(unsigned int rlist : 2,
+	BITFIELD_FIELD(unsigned int imm : 4,
+	BITFIELD_FIELD(unsigned int : 16, /* Ignored */
+	;)))))
+};
+
+struct mm16_rb_format {		/* Signed immediate format */
+	BITFIELD_FIELD(unsigned int opcode : 6,
+	BITFIELD_FIELD(unsigned int rt : 3,
+	BITFIELD_FIELD(unsigned int base : 3,
+	BITFIELD_FIELD(signed int simmediate : 4,
+	BITFIELD_FIELD(unsigned int : 16, /* Ignored */
+	;)))))
+};
+
+struct mm16_r3_format {		/* Load from global pointer format */
+	BITFIELD_FIELD(unsigned int opcode : 6,
+	BITFIELD_FIELD(unsigned int rt : 3,
+	BITFIELD_FIELD(signed int simmediate : 7,
+	BITFIELD_FIELD(unsigned int : 16, /* Ignored */
+	;))))
+};
+
+struct mm16_r5_format {		/* Load/store from stack pointer format */
+	BITFIELD_FIELD(unsigned int opcode : 6,
+	BITFIELD_FIELD(unsigned int rt : 5,
+	BITFIELD_FIELD(signed int simmediate : 5,
+	BITFIELD_FIELD(unsigned int : 16, /* Ignored */
+	;))))
+};
+
+/*
+ * MIPS16e instruction formats (16-bit length)
+ */
+struct m16e_rr {
+	BITFIELD_FIELD(unsigned int opcode : 5,
+	BITFIELD_FIELD(unsigned int rx : 3,
+	BITFIELD_FIELD(unsigned int nd : 1,
+	BITFIELD_FIELD(unsigned int l : 1,
+	BITFIELD_FIELD(unsigned int ra : 1,
+	BITFIELD_FIELD(unsigned int func : 5,
+	;))))))
+};
+
+struct m16e_jal {
+	BITFIELD_FIELD(unsigned int opcode : 5,
+	BITFIELD_FIELD(unsigned int x : 1,
+	BITFIELD_FIELD(unsigned int imm20_16 : 5,
+	BITFIELD_FIELD(signed int imm25_21 : 5,
+	;))))
+};
+
+struct m16e_i64 {
+	BITFIELD_FIELD(unsigned int opcode : 5,
+	BITFIELD_FIELD(unsigned int func : 3,
+	BITFIELD_FIELD(unsigned int imm : 8,
+	;)))
+};
+
+struct m16e_ri64 {
+	BITFIELD_FIELD(unsigned int opcode : 5,
+	BITFIELD_FIELD(unsigned int func : 3,
+	BITFIELD_FIELD(unsigned int ry : 3,
+	BITFIELD_FIELD(unsigned int imm : 5,
+	;))))
+};
+
+struct m16e_ri {
+	BITFIELD_FIELD(unsigned int opcode : 5,
+	BITFIELD_FIELD(unsigned int rx : 3,
+	BITFIELD_FIELD(unsigned int imm : 8,
+	;)))
+};
+
+struct m16e_rri {
+	BITFIELD_FIELD(unsigned int opcode : 5,
+	BITFIELD_FIELD(unsigned int rx : 3,
+	BITFIELD_FIELD(unsigned int ry : 3,
+	BITFIELD_FIELD(unsigned int imm : 5,
+	;))))
+};
+
+struct m16e_i8 {
+	BITFIELD_FIELD(unsigned int opcode : 5,
+	BITFIELD_FIELD(unsigned int func : 3,
+	BITFIELD_FIELD(unsigned int imm : 8,
+	;)))
+};
+
 union mips_instruction {
 	unsigned int word;
 	unsigned short halfword[2];
@@ -326,6 +859,37 @@
 	struct b_format b_format;
 	struct ps_format ps_format;
 	struct v_format v_format;
+	struct fb_format fb_format;
+	struct fp0_format fp0_format;
+	struct mm_fp0_format mm_fp0_format;
+	struct fp1_format fp1_format;
+	struct mm_fp1_format mm_fp1_format;
+	struct mm_fp2_format mm_fp2_format;
+	struct mm_fp3_format mm_fp3_format;
+	struct mm_fp4_format mm_fp4_format;
+	struct mm_fp5_format mm_fp5_format;
+	struct fp6_format fp6_format;
+	struct mm_fp6_format mm_fp6_format;
+	struct mm_i_format mm_i_format;
+	struct mm_m_format mm_m_format;
+	struct mm_x_format mm_x_format;
+	struct mm_b0_format mm_b0_format;
+	struct mm_b1_format mm_b1_format;
+	struct mm16_m_format mm16_m_format ;
+	struct mm16_rb_format mm16_rb_format;
+	struct mm16_r3_format mm16_r3_format;
+	struct mm16_r5_format mm16_r5_format;
+};
+
+union mips16e_instruction {
+	unsigned int full : 16;
+	struct m16e_rr rr;
+	struct m16e_jal jal;
+	struct m16e_i64 i64;
+	struct m16e_ri64 ri64;
+	struct m16e_ri ri;
+	struct m16e_rri rri;
+	struct m16e_i8 i8;
 };
 
 #endif /* _UAPI_ASM_INST_H */
diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile
index 520a908..6ad9e04 100644
--- a/arch/mips/kernel/Makefile
+++ b/arch/mips/kernel/Makefile
@@ -5,7 +5,7 @@
 extra-y		:= head.o vmlinux.lds
 
 obj-y		+= cpu-probe.o branch.o entry.o genex.o irq.o process.o \
-		   ptrace.o reset.o setup.o signal.o syscall.o \
+		   prom.o ptrace.o reset.o setup.o signal.o syscall.o \
 		   time.o topology.o traps.o unaligned.o watch.o vdso.o
 
 ifdef CONFIG_FUNCTION_TRACER
@@ -19,15 +19,16 @@
 obj-$(CONFIG_CEVT_R4K)		+= cevt-r4k.o
 obj-$(CONFIG_MIPS_MT_SMTC)	+= cevt-smtc.o
 obj-$(CONFIG_CEVT_DS1287)	+= cevt-ds1287.o
+obj-$(CONFIG_CEVT_GIC)		+= cevt-gic.o
 obj-$(CONFIG_CEVT_GT641XX)	+= cevt-gt641xx.o
 obj-$(CONFIG_CEVT_SB1250)	+= cevt-sb1250.o
 obj-$(CONFIG_CEVT_TXX9)		+= cevt-txx9.o
 obj-$(CONFIG_CSRC_BCM1480)	+= csrc-bcm1480.o
+obj-$(CONFIG_CSRC_GIC)		+= csrc-gic.o
 obj-$(CONFIG_CSRC_IOASIC)	+= csrc-ioasic.o
 obj-$(CONFIG_CSRC_POWERTV)	+= csrc-powertv.o
 obj-$(CONFIG_CSRC_R4K)		+= csrc-r4k.o
 obj-$(CONFIG_CSRC_SB1250)	+= csrc-sb1250.o
-obj-$(CONFIG_CSRC_GIC)		+= csrc-gic.o
 obj-$(CONFIG_SYNC_R4K)		+= sync-r4k.o
 
 obj-$(CONFIG_STACKTRACE)	+= stacktrace.o
@@ -86,8 +87,6 @@
 obj-$(CONFIG_SPINLOCK_TEST)	+= spinlock_test.o
 obj-$(CONFIG_MIPS_MACHINE)	+= mips_machine.o
 
-obj-$(CONFIG_OF)		+= prom.o
-
 CFLAGS_cpu-bugs64.o	= $(shell if $(CC) $(KBUILD_CFLAGS) -Wa,-mdaddi -c -o /dev/null -x c /dev/null >/dev/null 2>&1; then echo "-DHAVE_AS_SET_DADDI"; fi)
 
 obj-$(CONFIG_HAVE_STD_PC_SERIAL_PORT)	+= 8250-platform.o
diff --git a/arch/mips/kernel/asm-offsets.c b/arch/mips/kernel/asm-offsets.c
index 50285b2..0845091 100644
--- a/arch/mips/kernel/asm-offsets.c
+++ b/arch/mips/kernel/asm-offsets.c
@@ -17,6 +17,8 @@
 #include <asm/ptrace.h>
 #include <asm/processor.h>
 
+#include <linux/kvm_host.h>
+
 void output_ptreg_defines(void)
 {
 	COMMENT("MIPS pt_regs offsets.");
@@ -328,3 +330,67 @@
 	BLANK();
 }
 #endif
+
+void output_kvm_defines(void)
+{
+	COMMENT(" KVM/MIPS Specfic offsets. ");
+	DEFINE(VCPU_ARCH_SIZE, sizeof(struct kvm_vcpu_arch));
+	OFFSET(VCPU_RUN, kvm_vcpu, run);
+	OFFSET(VCPU_HOST_ARCH, kvm_vcpu, arch);
+
+	OFFSET(VCPU_HOST_EBASE, kvm_vcpu_arch, host_ebase);
+	OFFSET(VCPU_GUEST_EBASE, kvm_vcpu_arch, guest_ebase);
+
+	OFFSET(VCPU_HOST_STACK, kvm_vcpu_arch, host_stack);
+	OFFSET(VCPU_HOST_GP, kvm_vcpu_arch, host_gp);
+
+	OFFSET(VCPU_HOST_CP0_BADVADDR, kvm_vcpu_arch, host_cp0_badvaddr);
+	OFFSET(VCPU_HOST_CP0_CAUSE, kvm_vcpu_arch, host_cp0_cause);
+	OFFSET(VCPU_HOST_EPC, kvm_vcpu_arch, host_cp0_epc);
+	OFFSET(VCPU_HOST_ENTRYHI, kvm_vcpu_arch, host_cp0_entryhi);
+
+	OFFSET(VCPU_GUEST_INST, kvm_vcpu_arch, guest_inst);
+
+	OFFSET(VCPU_R0, kvm_vcpu_arch, gprs[0]);
+	OFFSET(VCPU_R1, kvm_vcpu_arch, gprs[1]);
+	OFFSET(VCPU_R2, kvm_vcpu_arch, gprs[2]);
+	OFFSET(VCPU_R3, kvm_vcpu_arch, gprs[3]);
+	OFFSET(VCPU_R4, kvm_vcpu_arch, gprs[4]);
+	OFFSET(VCPU_R5, kvm_vcpu_arch, gprs[5]);
+	OFFSET(VCPU_R6, kvm_vcpu_arch, gprs[6]);
+	OFFSET(VCPU_R7, kvm_vcpu_arch, gprs[7]);
+	OFFSET(VCPU_R8, kvm_vcpu_arch, gprs[8]);
+	OFFSET(VCPU_R9, kvm_vcpu_arch, gprs[9]);
+	OFFSET(VCPU_R10, kvm_vcpu_arch, gprs[10]);
+	OFFSET(VCPU_R11, kvm_vcpu_arch, gprs[11]);
+	OFFSET(VCPU_R12, kvm_vcpu_arch, gprs[12]);
+	OFFSET(VCPU_R13, kvm_vcpu_arch, gprs[13]);
+	OFFSET(VCPU_R14, kvm_vcpu_arch, gprs[14]);
+	OFFSET(VCPU_R15, kvm_vcpu_arch, gprs[15]);
+	OFFSET(VCPU_R16, kvm_vcpu_arch, gprs[16]);
+	OFFSET(VCPU_R17, kvm_vcpu_arch, gprs[17]);
+	OFFSET(VCPU_R18, kvm_vcpu_arch, gprs[18]);
+	OFFSET(VCPU_R19, kvm_vcpu_arch, gprs[19]);
+	OFFSET(VCPU_R20, kvm_vcpu_arch, gprs[20]);
+	OFFSET(VCPU_R21, kvm_vcpu_arch, gprs[21]);
+	OFFSET(VCPU_R22, kvm_vcpu_arch, gprs[22]);
+	OFFSET(VCPU_R23, kvm_vcpu_arch, gprs[23]);
+	OFFSET(VCPU_R24, kvm_vcpu_arch, gprs[24]);
+	OFFSET(VCPU_R25, kvm_vcpu_arch, gprs[25]);
+	OFFSET(VCPU_R26, kvm_vcpu_arch, gprs[26]);
+	OFFSET(VCPU_R27, kvm_vcpu_arch, gprs[27]);
+	OFFSET(VCPU_R28, kvm_vcpu_arch, gprs[28]);
+	OFFSET(VCPU_R29, kvm_vcpu_arch, gprs[29]);
+	OFFSET(VCPU_R30, kvm_vcpu_arch, gprs[30]);
+	OFFSET(VCPU_R31, kvm_vcpu_arch, gprs[31]);
+	OFFSET(VCPU_LO, kvm_vcpu_arch, lo);
+	OFFSET(VCPU_HI, kvm_vcpu_arch, hi);
+	OFFSET(VCPU_PC, kvm_vcpu_arch, pc);
+	OFFSET(VCPU_COP0, kvm_vcpu_arch, cop0);
+	OFFSET(VCPU_GUEST_KERNEL_ASID, kvm_vcpu_arch, guest_kernel_asid);
+	OFFSET(VCPU_GUEST_USER_ASID, kvm_vcpu_arch, guest_user_asid);
+
+	OFFSET(COP0_TLB_HI, mips_coproc, reg[MIPS_CP0_TLB_HI][0]);
+	OFFSET(COP0_STATUS, mips_coproc, reg[MIPS_CP0_STATUS][0]);
+	BLANK();
+}
diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c
index 556a435..97c5a16 100644
--- a/arch/mips/kernel/binfmt_elfo32.c
+++ b/arch/mips/kernel/binfmt_elfo32.c
@@ -48,7 +48,11 @@
 	__res;								\
 })
 
+#ifdef CONFIG_KVM_GUEST
+#define TASK32_SIZE		0x3fff8000UL
+#else
 #define TASK32_SIZE		0x7fff8000UL
+#endif
 #undef ELF_ET_DYN_BASE
 #define ELF_ET_DYN_BASE		(TASK32_SIZE / 3 * 2)
 
diff --git a/arch/mips/kernel/branch.c b/arch/mips/kernel/branch.c
index 83ffe95..46c2ad0 100644
--- a/arch/mips/kernel/branch.c
+++ b/arch/mips/kernel/branch.c
@@ -14,10 +14,186 @@
 #include <asm/cpu.h>
 #include <asm/cpu-features.h>
 #include <asm/fpu.h>
+#include <asm/fpu_emulator.h>
 #include <asm/inst.h>
 #include <asm/ptrace.h>
 #include <asm/uaccess.h>
 
+/*
+ * Calculate and return exception PC in case of branch delay slot
+ * for microMIPS and MIPS16e. It does not clear the ISA mode bit.
+ */
+int __isa_exception_epc(struct pt_regs *regs)
+{
+	unsigned short inst;
+	long epc = regs->cp0_epc;
+
+	/* Calculate exception PC in branch delay slot. */
+	if (__get_user(inst, (u16 __user *) msk_isa16_mode(epc))) {
+		/* This should never happen because delay slot was checked. */
+		force_sig(SIGSEGV, current);
+		return epc;
+	}
+	if (cpu_has_mips16) {
+		if (((union mips16e_instruction)inst).ri.opcode
+				== MIPS16e_jal_op)
+			epc += 4;
+		else
+			epc += 2;
+	} else if (mm_insn_16bit(inst))
+		epc += 2;
+	else
+		epc += 4;
+
+	return epc;
+}
+
+/*
+ * Compute return address and emulate branch in microMIPS mode after an
+ * exception only. It does not handle compact branches/jumps and cannot
+ * be used in interrupt context. (Compact branches/jumps do not cause
+ * exceptions.)
+ */
+int __microMIPS_compute_return_epc(struct pt_regs *regs)
+{
+	u16 __user *pc16;
+	u16 halfword;
+	unsigned int word;
+	unsigned long contpc;
+	struct mm_decoded_insn mminsn = { 0 };
+
+	mminsn.micro_mips_mode = 1;
+
+	/* This load never faults. */
+	pc16 = (unsigned short __user *)msk_isa16_mode(regs->cp0_epc);
+	__get_user(halfword, pc16);
+	pc16++;
+	contpc = regs->cp0_epc + 2;
+	word = ((unsigned int)halfword << 16);
+	mminsn.pc_inc = 2;
+
+	if (!mm_insn_16bit(halfword)) {
+		__get_user(halfword, pc16);
+		pc16++;
+		contpc = regs->cp0_epc + 4;
+		mminsn.pc_inc = 4;
+		word |= halfword;
+	}
+	mminsn.insn = word;
+
+	if (get_user(halfword, pc16))
+		goto sigsegv;
+	mminsn.next_pc_inc = 2;
+	word = ((unsigned int)halfword << 16);
+
+	if (!mm_insn_16bit(halfword)) {
+		pc16++;
+		if (get_user(halfword, pc16))
+			goto sigsegv;
+		mminsn.next_pc_inc = 4;
+		word |= halfword;
+	}
+	mminsn.next_insn = word;
+
+	mm_isBranchInstr(regs, mminsn, &contpc);
+
+	regs->cp0_epc = contpc;
+
+	return 0;
+
+sigsegv:
+	force_sig(SIGSEGV, current);
+	return -EFAULT;
+}
+
+/*
+ * Compute return address and emulate branch in MIPS16e mode after an
+ * exception only. It does not handle compact branches/jumps and cannot
+ * be used in interrupt context. (Compact branches/jumps do not cause
+ * exceptions.)
+ */
+int __MIPS16e_compute_return_epc(struct pt_regs *regs)
+{
+	u16 __user *addr;
+	union mips16e_instruction inst;
+	u16 inst2;
+	u32 fullinst;
+	long epc;
+
+	epc = regs->cp0_epc;
+
+	/* Read the instruction. */
+	addr = (u16 __user *)msk_isa16_mode(epc);
+	if (__get_user(inst.full, addr)) {
+		force_sig(SIGSEGV, current);
+		return -EFAULT;
+	}
+
+	switch (inst.ri.opcode) {
+	case MIPS16e_extend_op:
+		regs->cp0_epc += 4;
+		return 0;
+
+		/*
+		 *  JAL and JALX in MIPS16e mode
+		 */
+	case MIPS16e_jal_op:
+		addr += 1;
+		if (__get_user(inst2, addr)) {
+			force_sig(SIGSEGV, current);
+			return -EFAULT;
+		}
+		fullinst = ((unsigned)inst.full << 16) | inst2;
+		regs->regs[31] = epc + 6;
+		epc += 4;
+		epc >>= 28;
+		epc <<= 28;
+		/*
+		 * JAL:5 X:1 TARGET[20-16]:5 TARGET[25:21]:5 TARGET[15:0]:16
+		 *
+		 * ......TARGET[15:0].................TARGET[20:16]...........
+		 * ......TARGET[25:21]
+		 */
+		epc |=
+		    ((fullinst & 0xffff) << 2) | ((fullinst & 0x3e00000) >> 3) |
+		    ((fullinst & 0x1f0000) << 7);
+		if (!inst.jal.x)
+			set_isa16_mode(epc);	/* Set ISA mode bit. */
+		regs->cp0_epc = epc;
+		return 0;
+
+		/*
+		 *  J(AL)R(C)
+		 */
+	case MIPS16e_rr_op:
+		if (inst.rr.func == MIPS16e_jr_func) {
+
+			if (inst.rr.ra)
+				regs->cp0_epc = regs->regs[31];
+			else
+				regs->cp0_epc =
+				    regs->regs[reg16to32[inst.rr.rx]];
+
+			if (inst.rr.l) {
+				if (inst.rr.nd)
+					regs->regs[31] = epc + 2;
+				else
+					regs->regs[31] = epc + 4;
+			}
+			return 0;
+		}
+		break;
+	}
+
+	/*
+	 * All other cases have no branch delay slot and are 16-bits.
+	 * Branches do not cause an exception.
+	 */
+	regs->cp0_epc += 2;
+
+	return 0;
+}
+
 /**
  * __compute_return_epc_for_insn - Computes the return address and do emulate
  *				    branch simulation, if required.
@@ -129,6 +305,8 @@
 		epc <<= 28;
 		epc |= (insn.j_format.target << 2);
 		regs->cp0_epc = epc;
+		if (insn.i_format.opcode == jalx_op)
+			set_isa16_mode(regs->cp0_epc);
 		break;
 
 	/*
diff --git a/arch/mips/kernel/cevt-gic.c b/arch/mips/kernel/cevt-gic.c
new file mode 100644
index 0000000..730eaf9
--- /dev/null
+++ b/arch/mips/kernel/cevt-gic.c
@@ -0,0 +1,104 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2013  Imagination Technologies Ltd.
+ */
+#include <linux/clockchips.h>
+#include <linux/interrupt.h>
+#include <linux/percpu.h>
+#include <linux/smp.h>
+#include <linux/irq.h>
+
+#include <asm/time.h>
+#include <asm/gic.h>
+#include <asm/mips-boards/maltaint.h>
+
+DEFINE_PER_CPU(struct clock_event_device, gic_clockevent_device);
+int gic_timer_irq_installed;
+
+
+static int gic_next_event(unsigned long delta, struct clock_event_device *evt)
+{
+	u64 cnt;
+	int res;
+
+	cnt = gic_read_count();
+	cnt += (u64)delta;
+	gic_write_compare(cnt);
+	res = ((int)(gic_read_count() - cnt) >= 0) ? -ETIME : 0;
+	return res;
+}
+
+void gic_set_clock_mode(enum clock_event_mode mode,
+				struct clock_event_device *evt)
+{
+	/* Nothing to do ...  */
+}
+
+irqreturn_t gic_compare_interrupt(int irq, void *dev_id)
+{
+	struct clock_event_device *cd;
+	int cpu = smp_processor_id();
+
+	gic_write_compare(gic_read_compare());
+	cd = &per_cpu(gic_clockevent_device, cpu);
+	cd->event_handler(cd);
+	return IRQ_HANDLED;
+}
+
+struct irqaction gic_compare_irqaction = {
+	.handler = gic_compare_interrupt,
+	.flags = IRQF_PERCPU | IRQF_TIMER,
+	.name = "timer",
+};
+
+
+void gic_event_handler(struct clock_event_device *dev)
+{
+}
+
+int __cpuinit gic_clockevent_init(void)
+{
+	unsigned int cpu = smp_processor_id();
+	struct clock_event_device *cd;
+	unsigned int irq;
+
+	if (!cpu_has_counter || !gic_frequency)
+		return -ENXIO;
+
+	irq = MIPS_GIC_IRQ_BASE;
+
+	cd = &per_cpu(gic_clockevent_device, cpu);
+
+	cd->name		= "MIPS GIC";
+	cd->features		= CLOCK_EVT_FEAT_ONESHOT;
+
+	clockevent_set_clock(cd, gic_frequency);
+
+	/* Calculate the min / max delta */
+	cd->max_delta_ns	= clockevent_delta2ns(0x7fffffff, cd);
+	cd->min_delta_ns	= clockevent_delta2ns(0x300, cd);
+
+	cd->rating		= 300;
+	cd->irq			= irq;
+	cd->cpumask		= cpumask_of(cpu);
+	cd->set_next_event	= gic_next_event;
+	cd->set_mode		= gic_set_clock_mode;
+	cd->event_handler	= gic_event_handler;
+
+	clockevents_register_device(cd);
+
+	GICWRITE(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_MAP), 0x80000002);
+	GICWRITE(GIC_REG(VPE_LOCAL, GIC_VPE_SMASK), GIC_VPE_SMASK_CMP_MSK);
+
+	if (gic_timer_irq_installed)
+		return 0;
+
+	gic_timer_irq_installed = 1;
+
+	setup_irq(irq, &gic_compare_irqaction);
+	irq_set_handler(irq, handle_percpu_irq);
+	return 0;
+}
diff --git a/arch/mips/kernel/cevt-r4k.c b/arch/mips/kernel/cevt-r4k.c
index 07b847d..02033ea 100644
--- a/arch/mips/kernel/cevt-r4k.c
+++ b/arch/mips/kernel/cevt-r4k.c
@@ -23,7 +23,6 @@
  */
 
 #ifndef CONFIG_MIPS_MT_SMTC
-
 static int mips_next_event(unsigned long delta,
 			   struct clock_event_device *evt)
 {
@@ -49,7 +48,6 @@
 int cp0_timer_irq_installed;
 
 #ifndef CONFIG_MIPS_MT_SMTC
-
 irqreturn_t c0_compare_interrupt(int irq, void *dev_id)
 {
 	const int r2 = cpu_has_mips_r2;
@@ -74,6 +72,9 @@
 		/* Clear Count/Compare Interrupt */
 		write_c0_compare(read_c0_compare());
 		cd = &per_cpu(mips_clockevent_device, cpu);
+#ifdef CONFIG_CEVT_GIC
+		if (!gic_present)
+#endif
 		cd->event_handler(cd);
 	}
 
@@ -118,6 +119,10 @@
 	unsigned int delta;
 	unsigned int cnt;
 
+#ifdef CONFIG_KVM_GUEST
+    return 1;
+#endif
+
 	/*
 	 * IP7 already pending?	 Try to clear it by acking the timer.
 	 */
@@ -166,7 +171,6 @@
 }
 
 #ifndef CONFIG_MIPS_MT_SMTC
-
 int __cpuinit r4k_clockevent_init(void)
 {
 	unsigned int cpu = smp_processor_id();
@@ -206,6 +210,9 @@
 	cd->set_mode		= mips_set_clock_mode;
 	cd->event_handler	= mips_event_handler;
 
+#ifdef CONFIG_CEVT_GIC
+	if (!gic_present)
+#endif
 	clockevents_register_device(cd);
 
 	if (cp0_timer_irq_installed)
diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
index 5fe66a0..4bbffdb 100644
--- a/arch/mips/kernel/cpu-probe.c
+++ b/arch/mips/kernel/cpu-probe.c
@@ -470,6 +470,9 @@
 		c->options |= MIPS_CPU_ULRI;
 	if (config3 & MIPS_CONF3_ISA)
 		c->options |= MIPS_CPU_MICROMIPS;
+#ifdef CONFIG_CPU_MICROMIPS
+	write_c0_config3(read_c0_config3() | MIPS_CONF3_ISA_OE);
+#endif
 	if (config3 & MIPS_CONF3_VZ)
 		c->ases |= MIPS_ASE_VZ;
 
diff --git a/arch/mips/kernel/csrc-gic.c b/arch/mips/kernel/csrc-gic.c
index 5dca24b..e026209 100644
--- a/arch/mips/kernel/csrc-gic.c
+++ b/arch/mips/kernel/csrc-gic.c
@@ -5,23 +5,14 @@
  *
  * Copyright (C) 2012 MIPS Technologies, Inc.  All rights reserved.
  */
-#include <linux/clocksource.h>
 #include <linux/init.h>
+#include <linux/time.h>
 
-#include <asm/time.h>
 #include <asm/gic.h>
 
 static cycle_t gic_hpt_read(struct clocksource *cs)
 {
-	unsigned int hi, hi2, lo;
-
-	do {
-		GICREAD(GIC_REG(SHARED, GIC_SH_COUNTER_63_32), hi);
-		GICREAD(GIC_REG(SHARED, GIC_SH_COUNTER_31_00), lo);
-		GICREAD(GIC_REG(SHARED, GIC_SH_COUNTER_63_32), hi2);
-	} while (hi2 != hi);
-
-	return (((cycle_t) hi) << 32) + lo;
+	return gic_read_count();
 }
 
 static struct clocksource gic_clocksource = {
diff --git a/arch/mips/kernel/genex.S b/arch/mips/kernel/genex.S
index ecb347c..5c2ba9f 100644
--- a/arch/mips/kernel/genex.S
+++ b/arch/mips/kernel/genex.S
@@ -5,8 +5,8 @@
  *
  * Copyright (C) 1994 - 2000, 2001, 2003 Ralf Baechle
  * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
- * Copyright (C) 2001 MIPS Technologies, Inc.
  * Copyright (C) 2002, 2007  Maciej W. Rozycki
+ * Copyright (C) 2001, 2012 MIPS Technologies, Inc.  All rights reserved.
  */
 #include <linux/init.h>
 
@@ -21,8 +21,10 @@
 #include <asm/war.h>
 #include <asm/thread_info.h>
 
+#ifdef CONFIG_MIPS_MT_SMTC
 #define PANIC_PIC(msg)					\
-		.set push;				\
+		.set	push;				\
+		.set	nomicromips;			\
 		.set	reorder;			\
 		PTR_LA	a0,8f;				\
 		.set	noat;				\
@@ -31,17 +33,10 @@
 9:		b	9b;				\
 		.set	pop;				\
 		TEXT(msg)
+#endif
 
 	__INIT
 
-NESTED(except_vec0_generic, 0, sp)
-	PANIC_PIC("Exception vector 0 called")
-	END(except_vec0_generic)
-
-NESTED(except_vec1_generic, 0, sp)
-	PANIC_PIC("Exception vector 1 called")
-	END(except_vec1_generic)
-
 /*
  * General exception vector for all other CPUs.
  *
@@ -138,12 +133,19 @@
 	 nop
 	nop
 	nop
+#ifdef CONFIG_CPU_MICROMIPS
+	nop
+	nop
+	nop
+	nop
+#endif
 	.set	mips3
 	wait
 	/* end of rollback region (the region size must be power of two) */
-	.set	pop
 1:
 	jr	ra
+	nop
+	.set	pop
 	END(r4k_wait)
 
 	.macro	BUILD_ROLLBACK_PROLOGUE handler
@@ -201,7 +203,11 @@
 	LONG_L	s0, TI_REGS($28)
 	LONG_S	sp, TI_REGS($28)
 	PTR_LA	ra, ret_from_irq
-	j	plat_irq_dispatch
+	PTR_LA  v0, plat_irq_dispatch
+	jr	v0
+#ifdef CONFIG_CPU_MICROMIPS
+	nop
+#endif
 	END(handle_int)
 
 	__INIT
@@ -222,11 +228,14 @@
 /*
  * EJTAG debug exception handler.
  * The EJTAG debug exception entry point is 0xbfc00480, which
- * normally is in the boot PROM, so the boot PROM must do a
+ * normally is in the boot PROM, so the boot PROM must do an
  * unconditional jump to this vector.
  */
 NESTED(except_vec_ejtag_debug, 0, sp)
 	j	ejtag_debug_handler
+#ifdef CONFIG_CPU_MICROMIPS
+	 nop
+#endif
 	END(except_vec_ejtag_debug)
 
 	__FINIT
@@ -251,9 +260,10 @@
 FEXPORT(except_vec_vi_mori)
 	ori	a0, $0, 0
 #endif /* CONFIG_MIPS_MT_SMTC */
+	PTR_LA	v1, except_vec_vi_handler
 FEXPORT(except_vec_vi_lui)
 	lui	v0, 0		/* Patched */
-	j	except_vec_vi_handler
+	jr	v1
 FEXPORT(except_vec_vi_ori)
 	 ori	v0, 0		/* Patched */
 	.set	pop
@@ -354,6 +364,9 @@
  */
 NESTED(except_vec_nmi, 0, sp)
 	j	nmi_handler
+#ifdef CONFIG_CPU_MICROMIPS
+	 nop
+#endif
 	END(except_vec_nmi)
 
 	__FINIT
@@ -480,7 +493,7 @@
 	.set	noreorder
 	/* check if TLB contains a entry for EPC */
 	MFC0	k1, CP0_ENTRYHI
-	andi	k1, 0xff	/* ASID_MASK */
+	andi	k1, 0xff	/* ASID_MASK patched at run-time!! */
 	MFC0	k0, CP0_EPC
 	PTR_SRL k0, _PAGE_SHIFT + 1
 	PTR_SLL k0, _PAGE_SHIFT + 1
@@ -500,13 +513,35 @@
 	.set	push
 	.set	noat
 	.set	noreorder
-	/* 0x7c03e83b: rdhwr v1,$29 */
+	/* MIPS32:    0x7c03e83b: rdhwr v1,$29 */
+	/* microMIPS: 0x007d6b3c: rdhwr v1,$29 */
 	MFC0	k1, CP0_EPC
-	lui	k0, 0x7c03
-	lw	k1, (k1)
-	ori	k0, 0xe83b
-	.set	reorder
+#if defined(CONFIG_CPU_MICROMIPS) || defined(CONFIG_CPU_MIPS32_R2) || defined(CONFIG_CPU_MIPS64_R2)
+	and     k0, k1, 1
+	beqz    k0, 1f
+	xor     k1, k0
+	lhu     k0, (k1)
+	lhu     k1, 2(k1)
+	ins     k1, k0, 16, 16
+	lui     k0, 0x007d
+	b       docheck
+	ori     k0, 0x6b3c
+1:
+	lui     k0, 0x7c03
+	lw      k1, (k1)
+	ori     k0, 0xe83b
+#else
+	andi    k0, k1, 1
+	bnez    k0, handle_ri
+	lui     k0, 0x7c03
+	lw      k1, (k1)
+	ori     k0, 0xe83b
+#endif
+	.set    reorder
+docheck:
 	bne	k0, k1, handle_ri	/* if not ours */
+
+isrdhwr:
 	/* The insn is rdhwr.  No need to check CAUSE.BD here. */
 	get_saved_sp	/* k1 := current_thread_info */
 	.set	noreorder
diff --git a/arch/mips/kernel/irq-gic.c b/arch/mips/kernel/irq-gic.c
index 485e6a9..c01b307 100644
--- a/arch/mips/kernel/irq-gic.c
+++ b/arch/mips/kernel/irq-gic.c
@@ -10,6 +10,7 @@
 #include <linux/init.h>
 #include <linux/smp.h>
 #include <linux/irq.h>
+#include <linux/clocksource.h>
 
 #include <asm/io.h>
 #include <asm/gic.h>
@@ -19,6 +20,8 @@
 #include <linux/hardirq.h>
 #include <asm-generic/bitops/find.h>
 
+unsigned int gic_frequency;
+unsigned int gic_present;
 unsigned long _gic_base;
 unsigned int gic_irq_base;
 unsigned int gic_irq_flags[GIC_NUM_INTRS];
@@ -30,6 +33,39 @@
 static struct gic_pending_regs pending_regs[NR_CPUS];
 static struct gic_intrmask_regs intrmask_regs[NR_CPUS];
 
+#if defined(CONFIG_CSRC_GIC) || defined(CONFIG_CEVT_GIC)
+cycle_t gic_read_count(void)
+{
+	unsigned int hi, hi2, lo;
+
+	do {
+		GICREAD(GIC_REG(SHARED, GIC_SH_COUNTER_63_32), hi);
+		GICREAD(GIC_REG(SHARED, GIC_SH_COUNTER_31_00), lo);
+		GICREAD(GIC_REG(SHARED, GIC_SH_COUNTER_63_32), hi2);
+	} while (hi2 != hi);
+
+	return (((cycle_t) hi) << 32) + lo;
+}
+
+void gic_write_compare(cycle_t cnt)
+{
+	GICWRITE(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_HI),
+				(int)(cnt >> 32));
+	GICWRITE(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_LO),
+				(int)(cnt & 0xffffffff));
+}
+
+cycle_t gic_read_compare(void)
+{
+	unsigned int hi, lo;
+
+	GICREAD(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_HI), hi);
+	GICREAD(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_LO), lo);
+
+	return (((cycle_t) hi) << 32) + lo;
+}
+#endif
+
 unsigned int gic_get_timer_pending(void)
 {
 	unsigned int vpe_pending;
@@ -116,6 +152,17 @@
 	}
 }
 
+unsigned int gic_compare_int(void)
+{
+	unsigned int pending;
+
+	GICREAD(GIC_REG(VPE_LOCAL, GIC_VPE_PEND), pending);
+	if (pending & GIC_VPE_PEND_CMP_MSK)
+		return 1;
+	else
+		return 0;
+}
+
 unsigned int gic_get_int(void)
 {
 	unsigned int i;
diff --git a/arch/mips/kernel/linux32.c b/arch/mips/kernel/linux32.c
index d1d576b..0b29646 100644
--- a/arch/mips/kernel/linux32.c
+++ b/arch/mips/kernel/linux32.c
@@ -165,10 +165,3 @@
 	return sys_fallocate(fd, mode, merge_64(offset_a2, offset_a3),
 			     merge_64(len_a4, len_a5));
 }
-
-SYSCALL_DEFINE6(32_fanotify_mark, int, fanotify_fd, unsigned int, flags,
-		u64, a3, u64, a4, int, dfd, const char	__user *, pathname)
-{
-	return sys_fanotify_mark(fanotify_fd, flags, merge_64(a3, a4),
-				 dfd, pathname);
-}
diff --git a/arch/mips/kernel/mips_machine.c b/arch/mips/kernel/mips_machine.c
index 411a058..8760975 100644
--- a/arch/mips/kernel/mips_machine.c
+++ b/arch/mips/kernel/mips_machine.c
@@ -11,9 +11,9 @@
 #include <linux/slab.h>
 
 #include <asm/mips_machine.h>
+#include <asm/prom.h>
 
 static struct mips_machine *mips_machine __initdata;
-static char *mips_machine_name = "Unknown";
 
 #define for_each_machine(mach) \
 	for ((mach) = (struct mips_machine *)&__mips_machines_start; \
@@ -21,25 +21,6 @@
 	     (unsigned long)(mach) < (unsigned long)&__mips_machines_end; \
 	     (mach)++)
 
-__init void mips_set_machine_name(const char *name)
-{
-	char *p;
-
-	if (name == NULL)
-		return;
-
-	p = kstrdup(name, GFP_KERNEL);
-	if (!p)
-		pr_err("MIPS: no memory for machine_name\n");
-
-	mips_machine_name = p;
-}
-
-char *mips_get_machine_name(void)
-{
-	return mips_machine_name;
-}
-
 __init int mips_machtype_setup(char *id)
 {
 	struct mips_machine *mach;
@@ -79,7 +60,6 @@
 		return;
 
 	mips_set_machine_name(mips_machine->mach_name);
-	pr_info("MIPS: machine is %s\n", mips_machine_name);
 
 	if (mips_machine->mach_setup)
 		mips_machine->mach_setup();
diff --git a/arch/mips/kernel/proc.c b/arch/mips/kernel/proc.c
index 7a54f74..a3e4614 100644
--- a/arch/mips/kernel/proc.c
+++ b/arch/mips/kernel/proc.c
@@ -12,7 +12,7 @@
 #include <asm/cpu-features.h>
 #include <asm/mipsregs.h>
 #include <asm/processor.h>
-#include <asm/mips_machine.h>
+#include <asm/prom.h>
 
 unsigned int vced_count, vcei_count;
 
@@ -99,6 +99,10 @@
 	if (cpu_has_vz)		seq_printf(m, "%s", " vz");
 	seq_printf(m, "\n");
 
+	if (cpu_has_mmips) {
+		seq_printf(m, "micromips kernel\t: %s\n",
+		      (read_c0_config3() & MIPS_CONF3_ISA_OE) ?  "yes" : "no");
+	}
 	seq_printf(m, "shadow register sets\t: %d\n",
 		      cpu_data[n].srsets);
 	seq_printf(m, "kscratch registers\t: %d\n",
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
index cfc742d..eb902c1 100644
--- a/arch/mips/kernel/process.c
+++ b/arch/mips/kernel/process.c
@@ -7,6 +7,7 @@
  * Copyright (C) 2005, 2006 by Ralf Baechle (ralf@linux-mips.org)
  * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
  * Copyright (C) 2004 Thiemo Seufer
+ * Copyright (C) 2013  Imagination Technologies Ltd.
  */
 #include <linux/errno.h>
 #include <linux/sched.h>
@@ -225,34 +226,115 @@
 
 static inline int is_ra_save_ins(union mips_instruction *ip)
 {
+#ifdef CONFIG_CPU_MICROMIPS
+	union mips_instruction mmi;
+
+	/*
+	 * swsp ra,offset
+	 * swm16 reglist,offset(sp)
+	 * swm32 reglist,offset(sp)
+	 * sw32 ra,offset(sp)
+	 * jradiussp - NOT SUPPORTED
+	 *
+	 * microMIPS is way more fun...
+	 */
+	if (mm_insn_16bit(ip->halfword[0])) {
+		mmi.word = (ip->halfword[0] << 16);
+		return ((mmi.mm16_r5_format.opcode == mm_swsp16_op &&
+			 mmi.mm16_r5_format.rt == 31) ||
+			(mmi.mm16_m_format.opcode == mm_pool16c_op &&
+			 mmi.mm16_m_format.func == mm_swm16_op));
+	}
+	else {
+		mmi.halfword[0] = ip->halfword[1];
+		mmi.halfword[1] = ip->halfword[0];
+		return ((mmi.mm_m_format.opcode == mm_pool32b_op &&
+			 mmi.mm_m_format.rd > 9 &&
+			 mmi.mm_m_format.base == 29 &&
+			 mmi.mm_m_format.func == mm_swm32_func) ||
+			(mmi.i_format.opcode == mm_sw32_op &&
+			 mmi.i_format.rs == 29 &&
+			 mmi.i_format.rt == 31));
+	}
+#else
 	/* sw / sd $ra, offset($sp) */
 	return (ip->i_format.opcode == sw_op || ip->i_format.opcode == sd_op) &&
 		ip->i_format.rs == 29 &&
 		ip->i_format.rt == 31;
+#endif
 }
 
 static inline int is_jal_jalr_jr_ins(union mips_instruction *ip)
 {
+#ifdef CONFIG_CPU_MICROMIPS
+	/*
+	 * jr16,jrc,jalr16,jalr16
+	 * jal
+	 * jalr/jr,jalr.hb/jr.hb,jalrs,jalrs.hb
+	 * jraddiusp - NOT SUPPORTED
+	 *
+	 * microMIPS is kind of more fun...
+	 */
+	union mips_instruction mmi;
+
+	mmi.word = (ip->halfword[0] << 16);
+
+	if ((mmi.mm16_r5_format.opcode == mm_pool16c_op &&
+	    (mmi.mm16_r5_format.rt & mm_jr16_op) == mm_jr16_op) ||
+	    ip->j_format.opcode == mm_jal32_op)
+		return 1;
+	if (ip->r_format.opcode != mm_pool32a_op ||
+			ip->r_format.func != mm_pool32axf_op)
+		return 0;
+	return (((ip->u_format.uimmediate >> 6) & mm_jalr_op) == mm_jalr_op);
+#else
 	if (ip->j_format.opcode == jal_op)
 		return 1;
 	if (ip->r_format.opcode != spec_op)
 		return 0;
 	return ip->r_format.func == jalr_op || ip->r_format.func == jr_op;
+#endif
 }
 
 static inline int is_sp_move_ins(union mips_instruction *ip)
 {
+#ifdef CONFIG_CPU_MICROMIPS
+	/*
+	 * addiusp -imm
+	 * addius5 sp,-imm
+	 * addiu32 sp,sp,-imm
+	 * jradiussp - NOT SUPPORTED
+	 *
+	 * microMIPS is not more fun...
+	 */
+	if (mm_insn_16bit(ip->halfword[0])) {
+		union mips_instruction mmi;
+
+		mmi.word = (ip->halfword[0] << 16);
+		return ((mmi.mm16_r3_format.opcode == mm_pool16d_op &&
+			 mmi.mm16_r3_format.simmediate && mm_addiusp_func) ||
+			(mmi.mm16_r5_format.opcode == mm_pool16d_op &&
+			 mmi.mm16_r5_format.rt == 29));
+	}
+	return (ip->mm_i_format.opcode == mm_addiu32_op &&
+		 ip->mm_i_format.rt == 29 && ip->mm_i_format.rs == 29);
+#else
 	/* addiu/daddiu sp,sp,-imm */
 	if (ip->i_format.rs != 29 || ip->i_format.rt != 29)
 		return 0;
 	if (ip->i_format.opcode == addiu_op || ip->i_format.opcode == daddiu_op)
 		return 1;
+#endif
 	return 0;
 }
 
 static int get_frame_info(struct mips_frame_info *info)
 {
+#ifdef CONFIG_CPU_MICROMIPS
+	union mips_instruction *ip = (void *) (((char *) info->func) - 1);
+#else
 	union mips_instruction *ip = info->func;
+#endif
 	unsigned max_insns = info->func_size / sizeof(union mips_instruction);
 	unsigned i;
 
@@ -272,7 +354,26 @@
 			break;
 		if (!info->frame_size) {
 			if (is_sp_move_ins(ip))
+			{
+#ifdef CONFIG_CPU_MICROMIPS
+				if (mm_insn_16bit(ip->halfword[0]))
+				{
+					unsigned short tmp;
+
+					if (ip->halfword[0] & mm_addiusp_func)
+					{
+						tmp = (((ip->halfword[0] >> 1) & 0x1ff) << 2);
+						info->frame_size = -(signed short)(tmp | ((tmp & 0x100) ? 0xfe00 : 0));
+					} else {
+						tmp = (ip->halfword[0] >> 1);
+						info->frame_size = -(signed short)(tmp & 0xf);
+					}
+					ip = (void *) &ip->halfword[1];
+					ip--;
+				} else
+#endif
 				info->frame_size = - ip->i_format.simmediate;
+			}
 			continue;
 		}
 		if (info->pc_offset == -1 && is_ra_save_ins(ip)) {
diff --git a/arch/mips/kernel/prom.c b/arch/mips/kernel/prom.c
index 028f6f8..5712bb5 100644
--- a/arch/mips/kernel/prom.c
+++ b/arch/mips/kernel/prom.c
@@ -23,6 +23,23 @@
 #include <asm/page.h>
 #include <asm/prom.h>
 
+static char mips_machine_name[64] = "Unknown";
+
+__init void mips_set_machine_name(const char *name)
+{
+	if (name == NULL)
+		return;
+
+	strncpy(mips_machine_name, name, sizeof(mips_machine_name));
+	pr_info("MIPS: machine is %s\n", mips_get_machine_name());
+}
+
+char *mips_get_machine_name(void)
+{
+	return mips_machine_name;
+}
+
+#ifdef CONFIG_OF
 int __init early_init_dt_scan_memory_arch(unsigned long node,
 					  const char *uname, int depth,
 					  void *data)
@@ -50,6 +67,18 @@
 }
 #endif
 
+int __init early_init_dt_scan_model(unsigned long node,	const char *uname,
+				    int depth, void *data)
+{
+	if (!depth) {
+		char *model = of_get_flat_dt_prop(node, "model", NULL);
+
+		if (model)
+			mips_set_machine_name(model);
+	}
+	return 0;
+}
+
 void __init early_init_devtree(void *params)
 {
 	/* Setup flat device-tree pointer */
@@ -65,6 +94,9 @@
 	/* Scan memory nodes */
 	of_scan_flat_dt(early_init_dt_scan_root, NULL);
 	of_scan_flat_dt(early_init_dt_scan_memory_arch, NULL);
+
+	/* try to load the mips machine name */
+	of_scan_flat_dt(early_init_dt_scan_model, NULL);
 }
 
 void __init __dt_setup_arch(struct boot_param_header *bph)
@@ -79,3 +111,4 @@
 
 	early_init_devtree(initial_boot_params);
 }
+#endif
diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S
index 9ea2964..9b36424 100644
--- a/arch/mips/kernel/scall32-o32.S
+++ b/arch/mips/kernel/scall32-o32.S
@@ -138,9 +138,18 @@
 5:	jr	t1
 	 sw	t5, 16(sp)		# argument #5 to ksp
 
+#ifdef CONFIG_CPU_MICROMIPS
+	sw	t8, 28(sp)		# argument #8 to ksp
+	nop
+	sw	t7, 24(sp)		# argument #7 to ksp
+	nop
+	sw	t6, 20(sp)		# argument #6 to ksp
+	nop
+#else
 	sw	t8, 28(sp)		# argument #8 to ksp
 	sw	t7, 24(sp)		# argument #7 to ksp
 	sw	t6, 20(sp)		# argument #6 to ksp
+#endif
 6:	j	stack_done		# go back
 	 nop
 	.set	pop
diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
index 103bfe5..74f485d 100644
--- a/arch/mips/kernel/scall64-o32.S
+++ b/arch/mips/kernel/scall64-o32.S
@@ -529,7 +529,7 @@
 	PTR	sys_accept4
 	PTR	compat_sys_recvmmsg		/* 4335 */
 	PTR	sys_fanotify_init
-	PTR	sys_32_fanotify_mark
+	PTR	compat_sys_fanotify_mark
 	PTR	sys_prlimit64
 	PTR	sys_name_to_handle_at
 	PTR	compat_sys_open_by_handle_at	/* 4340 */
diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c
index 4c774d5..c7f9051 100644
--- a/arch/mips/kernel/setup.c
+++ b/arch/mips/kernel/setup.c
@@ -23,6 +23,7 @@
 #include <linux/pfn.h>
 #include <linux/debugfs.h>
 #include <linux/kexec.h>
+#include <linux/sizes.h>
 
 #include <asm/addrspace.h>
 #include <asm/bootinfo.h>
@@ -77,6 +78,8 @@
 static struct resource code_resource = { .name = "Kernel code", };
 static struct resource data_resource = { .name = "Kernel data", };
 
+static void *detect_magic __initdata = detect_memory_region;
+
 void __init add_memory_region(phys_t start, phys_t size, long type)
 {
 	int x = boot_mem_map.nr_map;
@@ -122,6 +125,25 @@
 	boot_mem_map.nr_map++;
 }
 
+void __init detect_memory_region(phys_t start, phys_t sz_min, phys_t sz_max)
+{
+	void *dm = &detect_magic;
+	phys_t size;
+
+	for (size = sz_min; size < sz_max; size <<= 1) {
+		if (!memcmp(dm, dm + size, sizeof(detect_magic)))
+			break;
+	}
+
+	pr_debug("Memory: %lluMB of RAM detected at 0x%llx (min: %lluMB, max: %lluMB)\n",
+		((unsigned long long) size) / SZ_1M,
+		(unsigned long long) start,
+		((unsigned long long) sz_min) / SZ_1M,
+		((unsigned long long) sz_max) / SZ_1M);
+
+	add_memory_region(start, size, BOOT_MEM_RAM);
+}
+
 static void __init print_memory_map(void)
 {
 	int i;
diff --git a/arch/mips/kernel/signal.c b/arch/mips/kernel/signal.c
index b5e88fd..fd3ef2c 100644
--- a/arch/mips/kernel/signal.c
+++ b/arch/mips/kernel/signal.c
@@ -35,6 +35,7 @@
 #include <asm/war.h>
 #include <asm/vdso.h>
 #include <asm/dsp.h>
+#include <asm/inst.h>
 
 #include "signal-common.h"
 
@@ -480,7 +481,15 @@
 	sigset_t *oldset = sigmask_to_save();
 	int ret;
 	struct mips_abi *abi = current->thread.abi;
+#ifdef CONFIG_CPU_MICROMIPS
+	void *vdso;
+	unsigned int tmp = (unsigned int)current->mm->context.vdso;
+
+	set_isa16_mode(tmp);
+	vdso = (void *)tmp;
+#else
 	void *vdso = current->mm->context.vdso;
+#endif
 
 	if (regs->regs[0]) {
 		switch(regs->regs[2]) {
diff --git a/arch/mips/kernel/smp-mt.c b/arch/mips/kernel/smp-mt.c
index bfede06..3e5164c 100644
--- a/arch/mips/kernel/smp-mt.c
+++ b/arch/mips/kernel/smp-mt.c
@@ -34,6 +34,7 @@
 #include <asm/mipsregs.h>
 #include <asm/mipsmtregs.h>
 #include <asm/mips_mt.h>
+#include <asm/gic.h>
 
 static void __init smvp_copy_vpe_config(void)
 {
@@ -151,8 +152,6 @@
 static void __cpuinit vsmp_init_secondary(void)
 {
 #ifdef CONFIG_IRQ_GIC
-	extern int gic_present;
-
 	/* This is Malta specific: IPI,performance and timer interrupts */
 	if (gic_present)
 		change_c0_status(ST0_IM, STATUSF_IP3 | STATUSF_IP4 |
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c
index aee04af..c17619f 100644
--- a/arch/mips/kernel/smp.c
+++ b/arch/mips/kernel/smp.c
@@ -83,6 +83,7 @@
 }
 
 struct plat_smp_ops *mp_ops;
+EXPORT_SYMBOL(mp_ops);
 
 __cpuinit void register_smp_ops(struct plat_smp_ops *ops)
 {
diff --git a/arch/mips/kernel/smtc-asm.S b/arch/mips/kernel/smtc-asm.S
index 76016ac..2866863 100644
--- a/arch/mips/kernel/smtc-asm.S
+++ b/arch/mips/kernel/smtc-asm.S
@@ -49,6 +49,9 @@
 	.text
 	.align 5
 FEXPORT(__smtc_ipi_vector)
+#ifdef CONFIG_CPU_MICROMIPS
+	nop
+#endif
 	.set	noat
 	/* Disable thread scheduling to make Status update atomic */
 	DMT	27					# dmt	k1
diff --git a/arch/mips/kernel/smtc.c b/arch/mips/kernel/smtc.c
index 7186222..31d22f3 100644
--- a/arch/mips/kernel/smtc.c
+++ b/arch/mips/kernel/smtc.c
@@ -111,7 +111,7 @@
 static int ipibuffers;
 static int nostlb;
 static int asidmask;
-unsigned long smtc_asid_mask = 0xff;
+unsigned int smtc_asid_mask = 0xff;
 
 static int __init vpe0tcs(char *str)
 {
@@ -1395,7 +1395,7 @@
 	asid = asid_cache(cpu);
 
 	do {
-		if (!((asid += ASID_INC) & ASID_MASK) ) {
+		if (!ASID_MASK(ASID_INC(asid))) {
 			if (cpu_has_vtag_icache)
 				flush_icache_all();
 			/* Traverse all online CPUs (hack requires contiguous range) */
@@ -1414,7 +1414,7 @@
 						mips_ihb();
 					}
 					tcstat = read_tc_c0_tcstatus();
-					smtc_live_asid[tlb][(tcstat & ASID_MASK)] |= (asiduse)(0x1 << i);
+					smtc_live_asid[tlb][ASID_MASK(tcstat)] |= (asiduse)(0x1 << i);
 					if (!prevhalt)
 						write_tc_c0_tchalt(0);
 				}
@@ -1423,7 +1423,7 @@
 				asid = ASID_FIRST_VERSION;
 			local_flush_tlb_all();	/* start new asid cycle */
 		}
-	} while (smtc_live_asid[tlb][(asid & ASID_MASK)]);
+	} while (smtc_live_asid[tlb][ASID_MASK(asid)]);
 
 	/*
 	 * SMTC shares the TLB within VPEs and possibly across all VPEs.
@@ -1461,7 +1461,7 @@
 		tlb_read();
 		ehb();
 		ehi = read_c0_entryhi();
-		if ((ehi & ASID_MASK) == asid) {
+		if (ASID_MASK(ehi) == asid) {
 		    /*
 		     * Invalidate only entries with specified ASID,
 		     * makiing sure all entries differ.
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index 2522551..77cff1f 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -8,8 +8,8 @@
  * Copyright (C) 1998 Ulf Carlsson
  * Copyright (C) 1999 Silicon Graphics, Inc.
  * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
- * Copyright (C) 2000, 01 MIPS Technologies, Inc.
  * Copyright (C) 2002, 2003, 2004, 2005, 2007  Maciej W. Rozycki
+ * Copyright (C) 2000, 2001, 2012 MIPS Technologies, Inc.  All rights reserved.
  */
 #include <linux/bug.h>
 #include <linux/compiler.h>
@@ -60,9 +60,9 @@
 extern asmlinkage void r4k_wait(void);
 extern asmlinkage void rollback_handle_int(void);
 extern asmlinkage void handle_int(void);
-extern asmlinkage void handle_tlbm(void);
-extern asmlinkage void handle_tlbl(void);
-extern asmlinkage void handle_tlbs(void);
+extern u32 handle_tlbl[];
+extern u32 handle_tlbs[];
+extern u32 handle_tlbm[];
 extern asmlinkage void handle_adel(void);
 extern asmlinkage void handle_ades(void);
 extern asmlinkage void handle_ibe(void);
@@ -83,10 +83,6 @@
 extern asmlinkage void handle_mcheck(void);
 extern asmlinkage void handle_reserved(void);
 
-extern int fpu_emulator_cop1Handler(struct pt_regs *xcp,
-				    struct mips_fpu_struct *ctx, int has_fpu,
-				    void *__user *fault_addr);
-
 void (*board_be_init)(void);
 int (*board_be_handler)(struct pt_regs *regs, int is_fixup);
 void (*board_nmi_handler_setup)(void);
@@ -482,6 +478,12 @@
 #define SYNC   0x0000000f
 #define RDHWR  0x0000003b
 
+/*  microMIPS definitions   */
+#define MM_POOL32A_FUNC 0xfc00ffff
+#define MM_RDHWR        0x00006b3c
+#define MM_RS           0x001f0000
+#define MM_RT           0x03e00000
+
 /*
  * The ll_bit is cleared by r*_switch.S
  */
@@ -596,42 +598,62 @@
  * Simulate trapping 'rdhwr' instructions to provide user accessible
  * registers not implemented in hardware.
  */
-static int simulate_rdhwr(struct pt_regs *regs, unsigned int opcode)
+static int simulate_rdhwr(struct pt_regs *regs, int rd, int rt)
 {
 	struct thread_info *ti = task_thread_info(current);
 
+	perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
+			1, regs, 0);
+	switch (rd) {
+	case 0:		/* CPU number */
+		regs->regs[rt] = smp_processor_id();
+		return 0;
+	case 1:		/* SYNCI length */
+		regs->regs[rt] = min(current_cpu_data.dcache.linesz,
+				     current_cpu_data.icache.linesz);
+		return 0;
+	case 2:		/* Read count register */
+		regs->regs[rt] = read_c0_count();
+		return 0;
+	case 3:		/* Count register resolution */
+		switch (current_cpu_data.cputype) {
+		case CPU_20KC:
+		case CPU_25KF:
+			regs->regs[rt] = 1;
+			break;
+		default:
+			regs->regs[rt] = 2;
+		}
+		return 0;
+	case 29:
+		regs->regs[rt] = ti->tp_value;
+		return 0;
+	default:
+		return -1;
+	}
+}
+
+static int simulate_rdhwr_normal(struct pt_regs *regs, unsigned int opcode)
+{
 	if ((opcode & OPCODE) == SPEC3 && (opcode & FUNC) == RDHWR) {
 		int rd = (opcode & RD) >> 11;
 		int rt = (opcode & RT) >> 16;
-		perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
-				1, regs, 0);
-		switch (rd) {
-		case 0:		/* CPU number */
-			regs->regs[rt] = smp_processor_id();
-			return 0;
-		case 1:		/* SYNCI length */
-			regs->regs[rt] = min(current_cpu_data.dcache.linesz,
-					     current_cpu_data.icache.linesz);
-			return 0;
-		case 2:		/* Read count register */
-			regs->regs[rt] = read_c0_count();
-			return 0;
-		case 3:		/* Count register resolution */
-			switch (current_cpu_data.cputype) {
-			case CPU_20KC:
-			case CPU_25KF:
-				regs->regs[rt] = 1;
-				break;
-			default:
-				regs->regs[rt] = 2;
-			}
-			return 0;
-		case 29:
-			regs->regs[rt] = ti->tp_value;
-			return 0;
-		default:
-			return -1;
-		}
+
+		simulate_rdhwr(regs, rd, rt);
+		return 0;
+	}
+
+	/* Not ours.  */
+	return -1;
+}
+
+static int simulate_rdhwr_mm(struct pt_regs *regs, unsigned short opcode)
+{
+	if ((opcode & MM_POOL32A_FUNC) == MM_RDHWR) {
+		int rd = (opcode & MM_RS) >> 16;
+		int rt = (opcode & MM_RT) >> 21;
+		simulate_rdhwr(regs, rd, rt);
+		return 0;
 	}
 
 	/* Not ours.  */
@@ -662,7 +684,7 @@
 	force_sig_info(SIGFPE, &info, current);
 }
 
-static int process_fpemu_return(int sig, void __user *fault_addr)
+int process_fpemu_return(int sig, void __user *fault_addr)
 {
 	if (sig == SIGSEGV || sig == SIGBUS) {
 		struct siginfo si = {0};
@@ -813,9 +835,29 @@
 asmlinkage void do_bp(struct pt_regs *regs)
 {
 	unsigned int opcode, bcode;
+	unsigned long epc;
+	u16 instr[2];
 
-	if (__get_user(opcode, (unsigned int __user *) exception_epc(regs)))
-		goto out_sigsegv;
+	if (get_isa16_mode(regs->cp0_epc)) {
+		/* Calculate EPC. */
+		epc = exception_epc(regs);
+		if (cpu_has_mmips) {
+			if ((__get_user(instr[0], (u16 __user *)msk_isa16_mode(epc)) ||
+			    (__get_user(instr[1], (u16 __user *)msk_isa16_mode(epc + 2)))))
+				goto out_sigsegv;
+		    opcode = (instr[0] << 16) | instr[1];
+		} else {
+		    /* MIPS16e mode */
+		    if (__get_user(instr[0], (u16 __user *)msk_isa16_mode(epc)))
+				goto out_sigsegv;
+		    bcode = (instr[0] >> 6) & 0x3f;
+		    do_trap_or_bp(regs, bcode, "Break");
+		    return;
+		}
+	} else {
+		if (__get_user(opcode, (unsigned int __user *) exception_epc(regs)))
+			goto out_sigsegv;
+	}
 
 	/*
 	 * There is the ancient bug in the MIPS assemblers that the break
@@ -856,13 +898,22 @@
 asmlinkage void do_tr(struct pt_regs *regs)
 {
 	unsigned int opcode, tcode = 0;
+	u16 instr[2];
+	unsigned long epc = exception_epc(regs);
 
-	if (__get_user(opcode, (unsigned int __user *) exception_epc(regs)))
-		goto out_sigsegv;
+	if ((__get_user(instr[0], (u16 __user *)msk_isa16_mode(epc))) ||
+		(__get_user(instr[1], (u16 __user *)msk_isa16_mode(epc + 2))))
+			goto out_sigsegv;
+	opcode = (instr[0] << 16) | instr[1];
 
 	/* Immediate versions don't provide a code.  */
-	if (!(opcode & OPCODE))
-		tcode = ((opcode >> 6) & ((1 << 10) - 1));
+	if (!(opcode & OPCODE)) {
+		if (get_isa16_mode(regs->cp0_epc))
+			/* microMIPS */
+			tcode = (opcode >> 12) & 0x1f;
+		else
+			tcode = ((opcode >> 6) & ((1 << 10) - 1));
+	}
 
 	do_trap_or_bp(regs, tcode, "Trap");
 	return;
@@ -875,6 +926,7 @@
 {
 	unsigned int __user *epc = (unsigned int __user *)exception_epc(regs);
 	unsigned long old_epc = regs->cp0_epc;
+	unsigned long old31 = regs->regs[31];
 	unsigned int opcode = 0;
 	int status = -1;
 
@@ -887,23 +939,37 @@
 	if (unlikely(compute_return_epc(regs) < 0))
 		return;
 
-	if (unlikely(get_user(opcode, epc) < 0))
-		status = SIGSEGV;
+	if (get_isa16_mode(regs->cp0_epc)) {
+		unsigned short mmop[2] = { 0 };
 
-	if (!cpu_has_llsc && status < 0)
-		status = simulate_llsc(regs, opcode);
+		if (unlikely(get_user(mmop[0], epc) < 0))
+			status = SIGSEGV;
+		if (unlikely(get_user(mmop[1], epc) < 0))
+			status = SIGSEGV;
+		opcode = (mmop[0] << 16) | mmop[1];
 
-	if (status < 0)
-		status = simulate_rdhwr(regs, opcode);
+		if (status < 0)
+			status = simulate_rdhwr_mm(regs, opcode);
+	} else {
+		if (unlikely(get_user(opcode, epc) < 0))
+			status = SIGSEGV;
 
-	if (status < 0)
-		status = simulate_sync(regs, opcode);
+		if (!cpu_has_llsc && status < 0)
+			status = simulate_llsc(regs, opcode);
+
+		if (status < 0)
+			status = simulate_rdhwr_normal(regs, opcode);
+
+		if (status < 0)
+			status = simulate_sync(regs, opcode);
+	}
 
 	if (status < 0)
 		status = SIGILL;
 
 	if (unlikely(status > 0)) {
 		regs->cp0_epc = old_epc;		/* Undo skip-over.  */
+		regs->regs[31] = old31;
 		force_sig(status, current);
 	}
 }
@@ -973,7 +1039,7 @@
 asmlinkage void do_cpu(struct pt_regs *regs)
 {
 	unsigned int __user *epc;
-	unsigned long old_epc;
+	unsigned long old_epc, old31;
 	unsigned int opcode;
 	unsigned int cpid;
 	int status;
@@ -987,26 +1053,41 @@
 	case 0:
 		epc = (unsigned int __user *)exception_epc(regs);
 		old_epc = regs->cp0_epc;
+		old31 = regs->regs[31];
 		opcode = 0;
 		status = -1;
 
 		if (unlikely(compute_return_epc(regs) < 0))
 			return;
 
-		if (unlikely(get_user(opcode, epc) < 0))
-			status = SIGSEGV;
+		if (get_isa16_mode(regs->cp0_epc)) {
+			unsigned short mmop[2] = { 0 };
 
-		if (!cpu_has_llsc && status < 0)
-			status = simulate_llsc(regs, opcode);
+			if (unlikely(get_user(mmop[0], epc) < 0))
+				status = SIGSEGV;
+			if (unlikely(get_user(mmop[1], epc) < 0))
+				status = SIGSEGV;
+			opcode = (mmop[0] << 16) | mmop[1];
 
-		if (status < 0)
-			status = simulate_rdhwr(regs, opcode);
+			if (status < 0)
+				status = simulate_rdhwr_mm(regs, opcode);
+		} else {
+			if (unlikely(get_user(opcode, epc) < 0))
+				status = SIGSEGV;
+
+			if (!cpu_has_llsc && status < 0)
+				status = simulate_llsc(regs, opcode);
+
+			if (status < 0)
+				status = simulate_rdhwr_normal(regs, opcode);
+		}
 
 		if (status < 0)
 			status = SIGILL;
 
 		if (unlikely(status > 0)) {
 			regs->cp0_epc = old_epc;	/* Undo skip-over.  */
+			regs->regs[31] = old31;
 			force_sig(status, current);
 		}
 
@@ -1320,7 +1401,7 @@
 void ejtag_exception_handler(struct pt_regs *regs)
 {
 	const int field = 2 * sizeof(unsigned long);
-	unsigned long depc, old_epc;
+	unsigned long depc, old_epc, old_ra;
 	unsigned int debug;
 
 	printk(KERN_DEBUG "SDBBP EJTAG debug exception - not handled yet, just ignored!\n");
@@ -1335,10 +1416,12 @@
 		 * calculation.
 		 */
 		old_epc = regs->cp0_epc;
+		old_ra = regs->regs[31];
 		regs->cp0_epc = depc;
-		__compute_return_epc(regs);
+		compute_return_epc(regs);
 		depc = regs->cp0_epc;
 		regs->cp0_epc = old_epc;
+		regs->regs[31] = old_ra;
 	} else
 		depc += 4;
 	write_c0_depc(depc);
@@ -1377,11 +1460,27 @@
 void __init *set_except_vector(int n, void *addr)
 {
 	unsigned long handler = (unsigned long) addr;
-	unsigned long old_handler = exception_handlers[n];
+	unsigned long old_handler;
 
-	exception_handlers[n] = handler;
+#ifdef CONFIG_CPU_MICROMIPS
+	/*
+	 * Only the TLB handlers are cache aligned with an even
+	 * address. All other handlers are on an odd address and
+	 * require no modification. Otherwise, MIPS32 mode will
+	 * be entered when handling any TLB exceptions. That
+	 * would be bad...since we must stay in microMIPS mode.
+	 */
+	if (!(handler & 0x1))
+		handler |= 1;
+#endif
+	old_handler = xchg(&exception_handlers[n], handler);
+
 	if (n == 0 && cpu_has_divec) {
+#ifdef CONFIG_CPU_MICROMIPS
+		unsigned long jump_mask = ~((1 << 27) - 1);
+#else
 		unsigned long jump_mask = ~((1 << 28) - 1);
+#endif
 		u32 *buf = (u32 *)(ebase + 0x200);
 		unsigned int k0 = 26;
 		if ((handler & jump_mask) == ((ebase + 0x200) & jump_mask)) {
@@ -1397,7 +1496,7 @@
 	return (void *)old_handler;
 }
 
-static asmlinkage void do_default_vi(void)
+static void do_default_vi(void)
 {
 	show_regs(get_irq_regs());
 	panic("Caught unexpected vectored interrupt.");
@@ -1408,17 +1507,18 @@
 	unsigned long handler;
 	unsigned long old_handler = vi_handlers[n];
 	int srssets = current_cpu_data.srsets;
-	u32 *w;
+	u16 *h;
 	unsigned char *b;
 
 	BUG_ON(!cpu_has_veic && !cpu_has_vint);
+	BUG_ON((n < 0) && (n > 9));
 
 	if (addr == NULL) {
 		handler = (unsigned long) do_default_vi;
 		srs = 0;
 	} else
 		handler = (unsigned long) addr;
-	vi_handlers[n] = (unsigned long) addr;
+	vi_handlers[n] = handler;
 
 	b = (unsigned char *)(ebase + 0x200 + n*VECTORSPACING);
 
@@ -1437,9 +1537,8 @@
 	if (srs == 0) {
 		/*
 		 * If no shadow set is selected then use the default handler
-		 * that does normal register saving and a standard interrupt exit
+		 * that does normal register saving and standard interrupt exit
 		 */
-
 		extern char except_vec_vi, except_vec_vi_lui;
 		extern char except_vec_vi_ori, except_vec_vi_end;
 		extern char rollback_except_vec_vi;
@@ -1452,11 +1551,20 @@
 		 * Status.IM bit to be masked before going there.
 		 */
 		extern char except_vec_vi_mori;
+#if defined(CONFIG_CPU_MICROMIPS) || defined(CONFIG_CPU_BIG_ENDIAN)
+		const int mori_offset = &except_vec_vi_mori - vec_start + 2;
+#else
 		const int mori_offset = &except_vec_vi_mori - vec_start;
+#endif
 #endif /* CONFIG_MIPS_MT_SMTC */
-		const int handler_len = &except_vec_vi_end - vec_start;
+#if defined(CONFIG_CPU_MICROMIPS) || defined(CONFIG_CPU_BIG_ENDIAN)
+		const int lui_offset = &except_vec_vi_lui - vec_start + 2;
+		const int ori_offset = &except_vec_vi_ori - vec_start + 2;
+#else
 		const int lui_offset = &except_vec_vi_lui - vec_start;
 		const int ori_offset = &except_vec_vi_ori - vec_start;
+#endif
+		const int handler_len = &except_vec_vi_end - vec_start;
 
 		if (handler_len > VECTORSPACING) {
 			/*
@@ -1466,30 +1574,44 @@
 			panic("VECTORSPACING too small");
 		}
 
-		memcpy(b, vec_start, handler_len);
+		set_handler(((unsigned long)b - ebase), vec_start,
+#ifdef CONFIG_CPU_MICROMIPS
+				(handler_len - 1));
+#else
+				handler_len);
+#endif
 #ifdef CONFIG_MIPS_MT_SMTC
 		BUG_ON(n > 7);	/* Vector index %d exceeds SMTC maximum. */
 
-		w = (u32 *)(b + mori_offset);
-		*w = (*w & 0xffff0000) | (0x100 << n);
+		h = (u16 *)(b + mori_offset);
+		*h = (0x100 << n);
 #endif /* CONFIG_MIPS_MT_SMTC */
-		w = (u32 *)(b + lui_offset);
-		*w = (*w & 0xffff0000) | (((u32)handler >> 16) & 0xffff);
-		w = (u32 *)(b + ori_offset);
-		*w = (*w & 0xffff0000) | ((u32)handler & 0xffff);
+		h = (u16 *)(b + lui_offset);
+		*h = (handler >> 16) & 0xffff;
+		h = (u16 *)(b + ori_offset);
+		*h = (handler & 0xffff);
 		local_flush_icache_range((unsigned long)b,
 					 (unsigned long)(b+handler_len));
 	}
 	else {
 		/*
-		 * In other cases jump directly to the interrupt handler
-		 *
-		 * It is the handlers responsibility to save registers if required
-		 * (eg hi/lo) and return from the exception using "eret"
+		 * In other cases jump directly to the interrupt handler. It
+		 * is the handler's responsibility to save registers if required
+		 * (eg hi/lo) and return from the exception using "eret".
 		 */
-		w = (u32 *)b;
-		*w++ = 0x08000000 | (((u32)handler >> 2) & 0x03fffff); /* j handler */
-		*w = 0;
+		u32 insn;
+
+		h = (u16 *)b;
+		/* j handler */
+#ifdef CONFIG_CPU_MICROMIPS
+		insn = 0xd4000000 | (((u32)handler & 0x07ffffff) >> 1);
+#else
+		insn = 0x08000000 | (((u32)handler & 0x0fffffff) >> 2);
+#endif
+		h[0] = (insn >> 16) & 0xffff;
+		h[1] = insn & 0xffff;
+		h[2] = 0;
+		h[3] = 0;
 		local_flush_icache_range((unsigned long)b,
 					 (unsigned long)(b+8));
 	}
@@ -1534,6 +1656,7 @@
 	unsigned int cpu = smp_processor_id();
 	unsigned int status_set = ST0_CU0;
 	unsigned int hwrena = cpu_hwrena_impl_bits;
+	unsigned long asid = 0;
 #ifdef CONFIG_MIPS_MT_SMTC
 	int secondaryTC = 0;
 	int bootTC = (cpu == 0);
@@ -1617,8 +1740,9 @@
 	}
 #endif /* CONFIG_MIPS_MT_SMTC */
 
-	if (!cpu_data[cpu].asid_cache)
-		cpu_data[cpu].asid_cache = ASID_FIRST_VERSION;
+	asid = ASID_FIRST_VERSION;
+	cpu_data[cpu].asid_cache = asid;
+	TLBMISS_HANDLER_SETUP();
 
 	atomic_inc(&init_mm.mm_count);
 	current->active_mm = &init_mm;
@@ -1648,7 +1772,11 @@
 /* Install CPU exception handler */
 void __cpuinit set_handler(unsigned long offset, void *addr, unsigned long size)
 {
+#ifdef CONFIG_CPU_MICROMIPS
+	memcpy((void *)(ebase + offset), ((unsigned char *)addr - 1), size);
+#else
 	memcpy((void *)(ebase + offset), addr, size);
+#endif
 	local_flush_icache_range(ebase + offset, ebase + offset + size);
 }
 
@@ -1682,8 +1810,9 @@
 
 void __init trap_init(void)
 {
-	extern char except_vec3_generic, except_vec3_r4000;
+	extern char except_vec3_generic;
 	extern char except_vec4;
+	extern char except_vec3_r4000;
 	unsigned long i;
 	int rollback;
 
@@ -1700,7 +1829,12 @@
 		ebase = (unsigned long)
 			__alloc_bootmem(size, 1 << fls(size), 0);
 	} else {
-		ebase = CKSEG0;
+#ifdef CONFIG_KVM_GUEST
+#define KVM_GUEST_KSEG0     0x40000000
+        ebase = KVM_GUEST_KSEG0;
+#else
+        ebase = CKSEG0;
+#endif
 		if (cpu_has_mips_r2)
 			ebase += (read_c0_ebase() & 0x3ffff000);
 	}
@@ -1816,11 +1950,11 @@
 
 	if (cpu_has_vce)
 		/* Special exception: R4[04]00 uses also the divec space. */
-		memcpy((void *)(ebase + 0x180), &except_vec3_r4000, 0x100);
+		set_handler(0x180, &except_vec3_r4000, 0x100);
 	else if (cpu_has_4kex)
-		memcpy((void *)(ebase + 0x180), &except_vec3_generic, 0x80);
+		set_handler(0x180, &except_vec3_generic, 0x80);
 	else
-		memcpy((void *)(ebase + 0x080), &except_vec3_generic, 0x80);
+		set_handler(0x080, &except_vec3_generic, 0x80);
 
 	local_flush_icache_range(ebase, ebase + 0x400);
 	flush_tlb_handlers();
diff --git a/arch/mips/kernel/unaligned.c b/arch/mips/kernel/unaligned.c
index 6087a54..203d885 100644
--- a/arch/mips/kernel/unaligned.c
+++ b/arch/mips/kernel/unaligned.c
@@ -83,8 +83,12 @@
 #include <asm/branch.h>
 #include <asm/byteorder.h>
 #include <asm/cop2.h>
+#include <asm/fpu.h>
+#include <asm/fpu_emulator.h>
 #include <asm/inst.h>
 #include <asm/uaccess.h>
+#include <asm/fpu.h>
+#include <asm/fpu_emulator.h>
 
 #define STR(x)	__STR(x)
 #define __STR(x)  #x
@@ -102,12 +106,332 @@
 #endif
 extern void show_registers(struct pt_regs *regs);
 
+#ifdef __BIG_ENDIAN
+#define     LoadHW(addr, value, res)  \
+		__asm__ __volatile__ (".set\tnoat\n"        \
+			"1:\tlb\t%0, 0(%2)\n"               \
+			"2:\tlbu\t$1, 1(%2)\n\t"            \
+			"sll\t%0, 0x8\n\t"                  \
+			"or\t%0, $1\n\t"                    \
+			"li\t%1, 0\n"                       \
+			"3:\t.set\tat\n\t"                  \
+			".insn\n\t"                         \
+			".section\t.fixup,\"ax\"\n\t"       \
+			"4:\tli\t%1, %3\n\t"                \
+			"j\t3b\n\t"                         \
+			".previous\n\t"                     \
+			".section\t__ex_table,\"a\"\n\t"    \
+			STR(PTR)"\t1b, 4b\n\t"              \
+			STR(PTR)"\t2b, 4b\n\t"              \
+			".previous"                         \
+			: "=&r" (value), "=r" (res)         \
+			: "r" (addr), "i" (-EFAULT));
+
+#define     LoadW(addr, value, res)   \
+		__asm__ __volatile__ (                      \
+			"1:\tlwl\t%0, (%2)\n"               \
+			"2:\tlwr\t%0, 3(%2)\n\t"            \
+			"li\t%1, 0\n"                       \
+			"3:\n\t"                            \
+			".insn\n\t"                         \
+			".section\t.fixup,\"ax\"\n\t"       \
+			"4:\tli\t%1, %3\n\t"                \
+			"j\t3b\n\t"                         \
+			".previous\n\t"                     \
+			".section\t__ex_table,\"a\"\n\t"    \
+			STR(PTR)"\t1b, 4b\n\t"              \
+			STR(PTR)"\t2b, 4b\n\t"              \
+			".previous"                         \
+			: "=&r" (value), "=r" (res)         \
+			: "r" (addr), "i" (-EFAULT));
+
+#define     LoadHWU(addr, value, res) \
+		__asm__ __volatile__ (                      \
+			".set\tnoat\n"                      \
+			"1:\tlbu\t%0, 0(%2)\n"              \
+			"2:\tlbu\t$1, 1(%2)\n\t"            \
+			"sll\t%0, 0x8\n\t"                  \
+			"or\t%0, $1\n\t"                    \
+			"li\t%1, 0\n"                       \
+			"3:\n\t"                            \
+			".insn\n\t"                         \
+			".set\tat\n\t"                      \
+			".section\t.fixup,\"ax\"\n\t"       \
+			"4:\tli\t%1, %3\n\t"                \
+			"j\t3b\n\t"                         \
+			".previous\n\t"                     \
+			".section\t__ex_table,\"a\"\n\t"    \
+			STR(PTR)"\t1b, 4b\n\t"              \
+			STR(PTR)"\t2b, 4b\n\t"              \
+			".previous"                         \
+			: "=&r" (value), "=r" (res)         \
+			: "r" (addr), "i" (-EFAULT));
+
+#define     LoadWU(addr, value, res)  \
+		__asm__ __volatile__ (                      \
+			"1:\tlwl\t%0, (%2)\n"               \
+			"2:\tlwr\t%0, 3(%2)\n\t"            \
+			"dsll\t%0, %0, 32\n\t"              \
+			"dsrl\t%0, %0, 32\n\t"              \
+			"li\t%1, 0\n"                       \
+			"3:\n\t"                            \
+			".insn\n\t"                         \
+			"\t.section\t.fixup,\"ax\"\n\t"     \
+			"4:\tli\t%1, %3\n\t"                \
+			"j\t3b\n\t"                         \
+			".previous\n\t"                     \
+			".section\t__ex_table,\"a\"\n\t"    \
+			STR(PTR)"\t1b, 4b\n\t"              \
+			STR(PTR)"\t2b, 4b\n\t"              \
+			".previous"                         \
+			: "=&r" (value), "=r" (res)         \
+			: "r" (addr), "i" (-EFAULT));
+
+#define     LoadDW(addr, value, res)  \
+		__asm__ __volatile__ (                      \
+			"1:\tldl\t%0, (%2)\n"               \
+			"2:\tldr\t%0, 7(%2)\n\t"            \
+			"li\t%1, 0\n"                       \
+			"3:\n\t"                            \
+			".insn\n\t"                         \
+			"\t.section\t.fixup,\"ax\"\n\t"     \
+			"4:\tli\t%1, %3\n\t"                \
+			"j\t3b\n\t"                         \
+			".previous\n\t"                     \
+			".section\t__ex_table,\"a\"\n\t"    \
+			STR(PTR)"\t1b, 4b\n\t"              \
+			STR(PTR)"\t2b, 4b\n\t"              \
+			".previous"                         \
+			: "=&r" (value), "=r" (res)         \
+			: "r" (addr), "i" (-EFAULT));
+
+#define     StoreHW(addr, value, res) \
+		__asm__ __volatile__ (                      \
+			".set\tnoat\n"                      \
+			"1:\tsb\t%1, 1(%2)\n\t"             \
+			"srl\t$1, %1, 0x8\n"                \
+			"2:\tsb\t$1, 0(%2)\n\t"             \
+			".set\tat\n\t"                      \
+			"li\t%0, 0\n"                       \
+			"3:\n\t"                            \
+			".insn\n\t"                         \
+			".section\t.fixup,\"ax\"\n\t"       \
+			"4:\tli\t%0, %3\n\t"                \
+			"j\t3b\n\t"                         \
+			".previous\n\t"                     \
+			".section\t__ex_table,\"a\"\n\t"    \
+			STR(PTR)"\t1b, 4b\n\t"              \
+			STR(PTR)"\t2b, 4b\n\t"              \
+			".previous"                         \
+			: "=r" (res)                        \
+			: "r" (value), "r" (addr), "i" (-EFAULT));
+
+#define     StoreW(addr, value, res)  \
+		__asm__ __volatile__ (                      \
+			"1:\tswl\t%1,(%2)\n"                \
+			"2:\tswr\t%1, 3(%2)\n\t"            \
+			"li\t%0, 0\n"                       \
+			"3:\n\t"                            \
+			".insn\n\t"                         \
+			".section\t.fixup,\"ax\"\n\t"       \
+			"4:\tli\t%0, %3\n\t"                \
+			"j\t3b\n\t"                         \
+			".previous\n\t"                     \
+			".section\t__ex_table,\"a\"\n\t"    \
+			STR(PTR)"\t1b, 4b\n\t"              \
+			STR(PTR)"\t2b, 4b\n\t"              \
+			".previous"                         \
+		: "=r" (res)                                \
+		: "r" (value), "r" (addr), "i" (-EFAULT));
+
+#define     StoreDW(addr, value, res) \
+		__asm__ __volatile__ (                      \
+			"1:\tsdl\t%1,(%2)\n"                \
+			"2:\tsdr\t%1, 7(%2)\n\t"            \
+			"li\t%0, 0\n"                       \
+			"3:\n\t"                            \
+			".insn\n\t"                         \
+			".section\t.fixup,\"ax\"\n\t"       \
+			"4:\tli\t%0, %3\n\t"                \
+			"j\t3b\n\t"                         \
+			".previous\n\t"                     \
+			".section\t__ex_table,\"a\"\n\t"    \
+			STR(PTR)"\t1b, 4b\n\t"              \
+			STR(PTR)"\t2b, 4b\n\t"              \
+			".previous"                         \
+		: "=r" (res)                                \
+		: "r" (value), "r" (addr), "i" (-EFAULT));
+#endif
+
+#ifdef __LITTLE_ENDIAN
+#define     LoadHW(addr, value, res)  \
+		__asm__ __volatile__ (".set\tnoat\n"        \
+			"1:\tlb\t%0, 1(%2)\n"               \
+			"2:\tlbu\t$1, 0(%2)\n\t"            \
+			"sll\t%0, 0x8\n\t"                  \
+			"or\t%0, $1\n\t"                    \
+			"li\t%1, 0\n"                       \
+			"3:\t.set\tat\n\t"                  \
+			".insn\n\t"                         \
+			".section\t.fixup,\"ax\"\n\t"       \
+			"4:\tli\t%1, %3\n\t"                \
+			"j\t3b\n\t"                         \
+			".previous\n\t"                     \
+			".section\t__ex_table,\"a\"\n\t"    \
+			STR(PTR)"\t1b, 4b\n\t"              \
+			STR(PTR)"\t2b, 4b\n\t"              \
+			".previous"                         \
+			: "=&r" (value), "=r" (res)         \
+			: "r" (addr), "i" (-EFAULT));
+
+#define     LoadW(addr, value, res)   \
+		__asm__ __volatile__ (                      \
+			"1:\tlwl\t%0, 3(%2)\n"              \
+			"2:\tlwr\t%0, (%2)\n\t"             \
+			"li\t%1, 0\n"                       \
+			"3:\n\t"                            \
+			".insn\n\t"                         \
+			".section\t.fixup,\"ax\"\n\t"       \
+			"4:\tli\t%1, %3\n\t"                \
+			"j\t3b\n\t"                         \
+			".previous\n\t"                     \
+			".section\t__ex_table,\"a\"\n\t"    \
+			STR(PTR)"\t1b, 4b\n\t"              \
+			STR(PTR)"\t2b, 4b\n\t"              \
+			".previous"                         \
+			: "=&r" (value), "=r" (res)         \
+			: "r" (addr), "i" (-EFAULT));
+
+#define     LoadHWU(addr, value, res) \
+		__asm__ __volatile__ (                      \
+			".set\tnoat\n"                      \
+			"1:\tlbu\t%0, 1(%2)\n"              \
+			"2:\tlbu\t$1, 0(%2)\n\t"            \
+			"sll\t%0, 0x8\n\t"                  \
+			"or\t%0, $1\n\t"                    \
+			"li\t%1, 0\n"                       \
+			"3:\n\t"                            \
+			".insn\n\t"                         \
+			".set\tat\n\t"                      \
+			".section\t.fixup,\"ax\"\n\t"       \
+			"4:\tli\t%1, %3\n\t"                \
+			"j\t3b\n\t"                         \
+			".previous\n\t"                     \
+			".section\t__ex_table,\"a\"\n\t"    \
+			STR(PTR)"\t1b, 4b\n\t"              \
+			STR(PTR)"\t2b, 4b\n\t"              \
+			".previous"                         \
+			: "=&r" (value), "=r" (res)         \
+			: "r" (addr), "i" (-EFAULT));
+
+#define     LoadWU(addr, value, res)  \
+		__asm__ __volatile__ (                      \
+			"1:\tlwl\t%0, 3(%2)\n"              \
+			"2:\tlwr\t%0, (%2)\n\t"             \
+			"dsll\t%0, %0, 32\n\t"              \
+			"dsrl\t%0, %0, 32\n\t"              \
+			"li\t%1, 0\n"                       \
+			"3:\n\t"                            \
+			".insn\n\t"                         \
+			"\t.section\t.fixup,\"ax\"\n\t"     \
+			"4:\tli\t%1, %3\n\t"                \
+			"j\t3b\n\t"                         \
+			".previous\n\t"                     \
+			".section\t__ex_table,\"a\"\n\t"    \
+			STR(PTR)"\t1b, 4b\n\t"              \
+			STR(PTR)"\t2b, 4b\n\t"              \
+			".previous"                         \
+			: "=&r" (value), "=r" (res)         \
+			: "r" (addr), "i" (-EFAULT));
+
+#define     LoadDW(addr, value, res)  \
+		__asm__ __volatile__ (                      \
+			"1:\tldl\t%0, 7(%2)\n"              \
+			"2:\tldr\t%0, (%2)\n\t"             \
+			"li\t%1, 0\n"                       \
+			"3:\n\t"                            \
+			".insn\n\t"                         \
+			"\t.section\t.fixup,\"ax\"\n\t"     \
+			"4:\tli\t%1, %3\n\t"                \
+			"j\t3b\n\t"                         \
+			".previous\n\t"                     \
+			".section\t__ex_table,\"a\"\n\t"    \
+			STR(PTR)"\t1b, 4b\n\t"              \
+			STR(PTR)"\t2b, 4b\n\t"              \
+			".previous"                         \
+			: "=&r" (value), "=r" (res)         \
+			: "r" (addr), "i" (-EFAULT));
+
+#define     StoreHW(addr, value, res) \
+		__asm__ __volatile__ (                      \
+			".set\tnoat\n"                      \
+			"1:\tsb\t%1, 0(%2)\n\t"             \
+			"srl\t$1,%1, 0x8\n"                 \
+			"2:\tsb\t$1, 1(%2)\n\t"             \
+			".set\tat\n\t"                      \
+			"li\t%0, 0\n"                       \
+			"3:\n\t"                            \
+			".insn\n\t"                         \
+			".section\t.fixup,\"ax\"\n\t"       \
+			"4:\tli\t%0, %3\n\t"                \
+			"j\t3b\n\t"                         \
+			".previous\n\t"                     \
+			".section\t__ex_table,\"a\"\n\t"    \
+			STR(PTR)"\t1b, 4b\n\t"              \
+			STR(PTR)"\t2b, 4b\n\t"              \
+			".previous"                         \
+			: "=r" (res)                        \
+			: "r" (value), "r" (addr), "i" (-EFAULT));
+
+#define     StoreW(addr, value, res)  \
+		__asm__ __volatile__ (                      \
+			"1:\tswl\t%1, 3(%2)\n"              \
+			"2:\tswr\t%1, (%2)\n\t"             \
+			"li\t%0, 0\n"                       \
+			"3:\n\t"                            \
+			".insn\n\t"                         \
+			".section\t.fixup,\"ax\"\n\t"       \
+			"4:\tli\t%0, %3\n\t"                \
+			"j\t3b\n\t"                         \
+			".previous\n\t"                     \
+			".section\t__ex_table,\"a\"\n\t"    \
+			STR(PTR)"\t1b, 4b\n\t"              \
+			STR(PTR)"\t2b, 4b\n\t"              \
+			".previous"                         \
+		: "=r" (res)                                \
+		: "r" (value), "r" (addr), "i" (-EFAULT));
+
+#define     StoreDW(addr, value, res) \
+		__asm__ __volatile__ (                      \
+			"1:\tsdl\t%1, 7(%2)\n"              \
+			"2:\tsdr\t%1, (%2)\n\t"             \
+			"li\t%0, 0\n"                       \
+			"3:\n\t"                            \
+			".insn\n\t"                         \
+			".section\t.fixup,\"ax\"\n\t"       \
+			"4:\tli\t%0, %3\n\t"                \
+			"j\t3b\n\t"                         \
+			".previous\n\t"                     \
+			".section\t__ex_table,\"a\"\n\t"    \
+			STR(PTR)"\t1b, 4b\n\t"              \
+			STR(PTR)"\t2b, 4b\n\t"              \
+			".previous"                         \
+		: "=r" (res)                                \
+		: "r" (value), "r" (addr), "i" (-EFAULT));
+#endif
+
 static void emulate_load_store_insn(struct pt_regs *regs,
 	void __user *addr, unsigned int __user *pc)
 {
 	union mips_instruction insn;
 	unsigned long value;
 	unsigned int res;
+	unsigned long origpc;
+	unsigned long orig31;
+	void __user *fault_addr = NULL;
+
+	origpc = (unsigned long)pc;
+	orig31 = regs->regs[31];
 
 	perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0);
 
@@ -117,22 +441,22 @@
 	__get_user(insn.word, pc);
 
 	switch (insn.i_format.opcode) {
-	/*
-	 * These are instructions that a compiler doesn't generate.  We
-	 * can assume therefore that the code is MIPS-aware and
-	 * really buggy.  Emulating these instructions would break the
-	 * semantics anyway.
-	 */
+		/*
+		 * These are instructions that a compiler doesn't generate.  We
+		 * can assume therefore that the code is MIPS-aware and
+		 * really buggy.  Emulating these instructions would break the
+		 * semantics anyway.
+		 */
 	case ll_op:
 	case lld_op:
 	case sc_op:
 	case scd_op:
 
-	/*
-	 * For these instructions the only way to create an address
-	 * error is an attempted access to kernel/supervisor address
-	 * space.
-	 */
+		/*
+		 * For these instructions the only way to create an address
+		 * error is an attempted access to kernel/supervisor address
+		 * space.
+		 */
 	case ldl_op:
 	case ldr_op:
 	case lwl_op:
@@ -146,36 +470,15 @@
 	case sb_op:
 		goto sigbus;
 
-	/*
-	 * The remaining opcodes are the ones that are really of interest.
-	 */
+		/*
+		 * The remaining opcodes are the ones that are really of
+		 * interest.
+		 */
 	case lh_op:
 		if (!access_ok(VERIFY_READ, addr, 2))
 			goto sigbus;
 
-		__asm__ __volatile__ (".set\tnoat\n"
-#ifdef __BIG_ENDIAN
-			"1:\tlb\t%0, 0(%2)\n"
-			"2:\tlbu\t$1, 1(%2)\n\t"
-#endif
-#ifdef __LITTLE_ENDIAN
-			"1:\tlb\t%0, 1(%2)\n"
-			"2:\tlbu\t$1, 0(%2)\n\t"
-#endif
-			"sll\t%0, 0x8\n\t"
-			"or\t%0, $1\n\t"
-			"li\t%1, 0\n"
-			"3:\t.set\tat\n\t"
-			".section\t.fixup,\"ax\"\n\t"
-			"4:\tli\t%1, %3\n\t"
-			"j\t3b\n\t"
-			".previous\n\t"
-			".section\t__ex_table,\"a\"\n\t"
-			STR(PTR)"\t1b, 4b\n\t"
-			STR(PTR)"\t2b, 4b\n\t"
-			".previous"
-			: "=&r" (value), "=r" (res)
-			: "r" (addr), "i" (-EFAULT));
+		LoadHW(addr, value, res);
 		if (res)
 			goto fault;
 		compute_return_epc(regs);
@@ -186,26 +489,7 @@
 		if (!access_ok(VERIFY_READ, addr, 4))
 			goto sigbus;
 
-		__asm__ __volatile__ (
-#ifdef __BIG_ENDIAN
-			"1:\tlwl\t%0, (%2)\n"
-			"2:\tlwr\t%0, 3(%2)\n\t"
-#endif
-#ifdef __LITTLE_ENDIAN
-			"1:\tlwl\t%0, 3(%2)\n"
-			"2:\tlwr\t%0, (%2)\n\t"
-#endif
-			"li\t%1, 0\n"
-			"3:\t.section\t.fixup,\"ax\"\n\t"
-			"4:\tli\t%1, %3\n\t"
-			"j\t3b\n\t"
-			".previous\n\t"
-			".section\t__ex_table,\"a\"\n\t"
-			STR(PTR)"\t1b, 4b\n\t"
-			STR(PTR)"\t2b, 4b\n\t"
-			".previous"
-			: "=&r" (value), "=r" (res)
-			: "r" (addr), "i" (-EFAULT));
+		LoadW(addr, value, res);
 		if (res)
 			goto fault;
 		compute_return_epc(regs);
@@ -216,30 +500,7 @@
 		if (!access_ok(VERIFY_READ, addr, 2))
 			goto sigbus;
 
-		__asm__ __volatile__ (
-			".set\tnoat\n"
-#ifdef __BIG_ENDIAN
-			"1:\tlbu\t%0, 0(%2)\n"
-			"2:\tlbu\t$1, 1(%2)\n\t"
-#endif
-#ifdef __LITTLE_ENDIAN
-			"1:\tlbu\t%0, 1(%2)\n"
-			"2:\tlbu\t$1, 0(%2)\n\t"
-#endif
-			"sll\t%0, 0x8\n\t"
-			"or\t%0, $1\n\t"
-			"li\t%1, 0\n"
-			"3:\t.set\tat\n\t"
-			".section\t.fixup,\"ax\"\n\t"
-			"4:\tli\t%1, %3\n\t"
-			"j\t3b\n\t"
-			".previous\n\t"
-			".section\t__ex_table,\"a\"\n\t"
-			STR(PTR)"\t1b, 4b\n\t"
-			STR(PTR)"\t2b, 4b\n\t"
-			".previous"
-			: "=&r" (value), "=r" (res)
-			: "r" (addr), "i" (-EFAULT));
+		LoadHWU(addr, value, res);
 		if (res)
 			goto fault;
 		compute_return_epc(regs);
@@ -258,28 +519,7 @@
 		if (!access_ok(VERIFY_READ, addr, 4))
 			goto sigbus;
 
-		__asm__ __volatile__ (
-#ifdef __BIG_ENDIAN
-			"1:\tlwl\t%0, (%2)\n"
-			"2:\tlwr\t%0, 3(%2)\n\t"
-#endif
-#ifdef __LITTLE_ENDIAN
-			"1:\tlwl\t%0, 3(%2)\n"
-			"2:\tlwr\t%0, (%2)\n\t"
-#endif
-			"dsll\t%0, %0, 32\n\t"
-			"dsrl\t%0, %0, 32\n\t"
-			"li\t%1, 0\n"
-			"3:\t.section\t.fixup,\"ax\"\n\t"
-			"4:\tli\t%1, %3\n\t"
-			"j\t3b\n\t"
-			".previous\n\t"
-			".section\t__ex_table,\"a\"\n\t"
-			STR(PTR)"\t1b, 4b\n\t"
-			STR(PTR)"\t2b, 4b\n\t"
-			".previous"
-			: "=&r" (value), "=r" (res)
-			: "r" (addr), "i" (-EFAULT));
+		LoadWU(addr, value, res);
 		if (res)
 			goto fault;
 		compute_return_epc(regs);
@@ -302,26 +542,7 @@
 		if (!access_ok(VERIFY_READ, addr, 8))
 			goto sigbus;
 
-		__asm__ __volatile__ (
-#ifdef __BIG_ENDIAN
-			"1:\tldl\t%0, (%2)\n"
-			"2:\tldr\t%0, 7(%2)\n\t"
-#endif
-#ifdef __LITTLE_ENDIAN
-			"1:\tldl\t%0, 7(%2)\n"
-			"2:\tldr\t%0, (%2)\n\t"
-#endif
-			"li\t%1, 0\n"
-			"3:\t.section\t.fixup,\"ax\"\n\t"
-			"4:\tli\t%1, %3\n\t"
-			"j\t3b\n\t"
-			".previous\n\t"
-			".section\t__ex_table,\"a\"\n\t"
-			STR(PTR)"\t1b, 4b\n\t"
-			STR(PTR)"\t2b, 4b\n\t"
-			".previous"
-			: "=&r" (value), "=r" (res)
-			: "r" (addr), "i" (-EFAULT));
+		LoadDW(addr, value, res);
 		if (res)
 			goto fault;
 		compute_return_epc(regs);
@@ -336,68 +557,22 @@
 		if (!access_ok(VERIFY_WRITE, addr, 2))
 			goto sigbus;
 
+		compute_return_epc(regs);
 		value = regs->regs[insn.i_format.rt];
-		__asm__ __volatile__ (
-#ifdef __BIG_ENDIAN
-			".set\tnoat\n"
-			"1:\tsb\t%1, 1(%2)\n\t"
-			"srl\t$1, %1, 0x8\n"
-			"2:\tsb\t$1, 0(%2)\n\t"
-			".set\tat\n\t"
-#endif
-#ifdef __LITTLE_ENDIAN
-			".set\tnoat\n"
-			"1:\tsb\t%1, 0(%2)\n\t"
-			"srl\t$1,%1, 0x8\n"
-			"2:\tsb\t$1, 1(%2)\n\t"
-			".set\tat\n\t"
-#endif
-			"li\t%0, 0\n"
-			"3:\n\t"
-			".section\t.fixup,\"ax\"\n\t"
-			"4:\tli\t%0, %3\n\t"
-			"j\t3b\n\t"
-			".previous\n\t"
-			".section\t__ex_table,\"a\"\n\t"
-			STR(PTR)"\t1b, 4b\n\t"
-			STR(PTR)"\t2b, 4b\n\t"
-			".previous"
-			: "=r" (res)
-			: "r" (value), "r" (addr), "i" (-EFAULT));
+		StoreHW(addr, value, res);
 		if (res)
 			goto fault;
-		compute_return_epc(regs);
 		break;
 
 	case sw_op:
 		if (!access_ok(VERIFY_WRITE, addr, 4))
 			goto sigbus;
 
+		compute_return_epc(regs);
 		value = regs->regs[insn.i_format.rt];
-		__asm__ __volatile__ (
-#ifdef __BIG_ENDIAN
-			"1:\tswl\t%1,(%2)\n"
-			"2:\tswr\t%1, 3(%2)\n\t"
-#endif
-#ifdef __LITTLE_ENDIAN
-			"1:\tswl\t%1, 3(%2)\n"
-			"2:\tswr\t%1, (%2)\n\t"
-#endif
-			"li\t%0, 0\n"
-			"3:\n\t"
-			".section\t.fixup,\"ax\"\n\t"
-			"4:\tli\t%0, %3\n\t"
-			"j\t3b\n\t"
-			".previous\n\t"
-			".section\t__ex_table,\"a\"\n\t"
-			STR(PTR)"\t1b, 4b\n\t"
-			STR(PTR)"\t2b, 4b\n\t"
-			".previous"
-		: "=r" (res)
-		: "r" (value), "r" (addr), "i" (-EFAULT));
+		StoreW(addr, value, res);
 		if (res)
 			goto fault;
-		compute_return_epc(regs);
 		break;
 
 	case sd_op:
@@ -412,31 +587,11 @@
 		if (!access_ok(VERIFY_WRITE, addr, 8))
 			goto sigbus;
 
+		compute_return_epc(regs);
 		value = regs->regs[insn.i_format.rt];
-		__asm__ __volatile__ (
-#ifdef __BIG_ENDIAN
-			"1:\tsdl\t%1,(%2)\n"
-			"2:\tsdr\t%1, 7(%2)\n\t"
-#endif
-#ifdef __LITTLE_ENDIAN
-			"1:\tsdl\t%1, 7(%2)\n"
-			"2:\tsdr\t%1, (%2)\n\t"
-#endif
-			"li\t%0, 0\n"
-			"3:\n\t"
-			".section\t.fixup,\"ax\"\n\t"
-			"4:\tli\t%0, %3\n\t"
-			"j\t3b\n\t"
-			".previous\n\t"
-			".section\t__ex_table,\"a\"\n\t"
-			STR(PTR)"\t1b, 4b\n\t"
-			STR(PTR)"\t2b, 4b\n\t"
-			".previous"
-		: "=r" (res)
-		: "r" (value), "r" (addr), "i" (-EFAULT));
+		StoreDW(addr, value, res);
 		if (res)
 			goto fault;
-		compute_return_epc(regs);
 		break;
 #endif /* CONFIG_64BIT */
 
@@ -447,10 +602,21 @@
 	case ldc1_op:
 	case swc1_op:
 	case sdc1_op:
-		/*
-		 * I herewith declare: this does not happen.  So send SIGBUS.
-		 */
-		goto sigbus;
+		die_if_kernel("Unaligned FP access in kernel code", regs);
+		BUG_ON(!used_math());
+		BUG_ON(!is_fpu_owner());
+
+		lose_fpu(1);	/* Save FPU state for the emulator. */
+		res = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 1,
+					       &fault_addr);
+		own_fpu(1);	/* Restore FPU state. */
+
+		/* Signal if something went wrong. */
+		process_fpemu_return(res, fault_addr);
+
+		if (res == 0)
+			break;
+		return;
 
 	/*
 	 * COP2 is available to implementor for application specific use.
@@ -488,6 +654,9 @@
 	return;
 
 fault:
+	/* roll back jump/branch */
+	regs->cp0_epc = origpc;
+	regs->regs[31] = orig31;
 	/* Did we have an exception handler installed? */
 	if (fixup_exception(regs))
 		return;
@@ -504,10 +673,881 @@
 	return;
 
 sigill:
-	die_if_kernel("Unhandled kernel unaligned access or invalid instruction", regs);
+	die_if_kernel
+	    ("Unhandled kernel unaligned access or invalid instruction", regs);
 	force_sig(SIGILL, current);
 }
 
+/* Recode table from 16-bit register notation to 32-bit GPR. */
+const int reg16to32[] = { 16, 17, 2, 3, 4, 5, 6, 7 };
+
+/* Recode table from 16-bit STORE register notation to 32-bit GPR. */
+const int reg16to32st[] = { 0, 17, 2, 3, 4, 5, 6, 7 };
+
+void emulate_load_store_microMIPS(struct pt_regs *regs, void __user * addr)
+{
+	unsigned long value;
+	unsigned int res;
+	int i;
+	unsigned int reg = 0, rvar;
+	unsigned long orig31;
+	u16 __user *pc16;
+	u16 halfword;
+	unsigned int word;
+	unsigned long origpc, contpc;
+	union mips_instruction insn;
+	struct mm_decoded_insn mminsn;
+	void __user *fault_addr = NULL;
+
+	origpc = regs->cp0_epc;
+	orig31 = regs->regs[31];
+
+	mminsn.micro_mips_mode = 1;
+
+	/*
+	 * This load never faults.
+	 */
+	pc16 = (unsigned short __user *)msk_isa16_mode(regs->cp0_epc);
+	__get_user(halfword, pc16);
+	pc16++;
+	contpc = regs->cp0_epc + 2;
+	word = ((unsigned int)halfword << 16);
+	mminsn.pc_inc = 2;
+
+	if (!mm_insn_16bit(halfword)) {
+		__get_user(halfword, pc16);
+		pc16++;
+		contpc = regs->cp0_epc + 4;
+		mminsn.pc_inc = 4;
+		word |= halfword;
+	}
+	mminsn.insn = word;
+
+	if (get_user(halfword, pc16))
+		goto fault;
+	mminsn.next_pc_inc = 2;
+	word = ((unsigned int)halfword << 16);
+
+	if (!mm_insn_16bit(halfword)) {
+		pc16++;
+		if (get_user(halfword, pc16))
+			goto fault;
+		mminsn.next_pc_inc = 4;
+		word |= halfword;
+	}
+	mminsn.next_insn = word;
+
+	insn = (union mips_instruction)(mminsn.insn);
+	if (mm_isBranchInstr(regs, mminsn, &contpc))
+		insn = (union mips_instruction)(mminsn.next_insn);
+
+	/*  Parse instruction to find what to do */
+
+	switch (insn.mm_i_format.opcode) {
+
+	case mm_pool32a_op:
+		switch (insn.mm_x_format.func) {
+		case mm_lwxs_op:
+			reg = insn.mm_x_format.rd;
+			goto loadW;
+		}
+
+		goto sigbus;
+
+	case mm_pool32b_op:
+		switch (insn.mm_m_format.func) {
+		case mm_lwp_func:
+			reg = insn.mm_m_format.rd;
+			if (reg == 31)
+				goto sigbus;
+
+			if (!access_ok(VERIFY_READ, addr, 8))
+				goto sigbus;
+
+			LoadW(addr, value, res);
+			if (res)
+				goto fault;
+			regs->regs[reg] = value;
+			addr += 4;
+			LoadW(addr, value, res);
+			if (res)
+				goto fault;
+			regs->regs[reg + 1] = value;
+			goto success;
+
+		case mm_swp_func:
+			reg = insn.mm_m_format.rd;
+			if (reg == 31)
+				goto sigbus;
+
+			if (!access_ok(VERIFY_WRITE, addr, 8))
+				goto sigbus;
+
+			value = regs->regs[reg];
+			StoreW(addr, value, res);
+			if (res)
+				goto fault;
+			addr += 4;
+			value = regs->regs[reg + 1];
+			StoreW(addr, value, res);
+			if (res)
+				goto fault;
+			goto success;
+
+		case mm_ldp_func:
+#ifdef CONFIG_64BIT
+			reg = insn.mm_m_format.rd;
+			if (reg == 31)
+				goto sigbus;
+
+			if (!access_ok(VERIFY_READ, addr, 16))
+				goto sigbus;
+
+			LoadDW(addr, value, res);
+			if (res)
+				goto fault;
+			regs->regs[reg] = value;
+			addr += 8;
+			LoadDW(addr, value, res);
+			if (res)
+				goto fault;
+			regs->regs[reg + 1] = value;
+			goto success;
+#endif /* CONFIG_64BIT */
+
+			goto sigill;
+
+		case mm_sdp_func:
+#ifdef CONFIG_64BIT
+			reg = insn.mm_m_format.rd;
+			if (reg == 31)
+				goto sigbus;
+
+			if (!access_ok(VERIFY_WRITE, addr, 16))
+				goto sigbus;
+
+			value = regs->regs[reg];
+			StoreDW(addr, value, res);
+			if (res)
+				goto fault;
+			addr += 8;
+			value = regs->regs[reg + 1];
+			StoreDW(addr, value, res);
+			if (res)
+				goto fault;
+			goto success;
+#endif /* CONFIG_64BIT */
+
+			goto sigill;
+
+		case mm_lwm32_func:
+			reg = insn.mm_m_format.rd;
+			rvar = reg & 0xf;
+			if ((rvar > 9) || !reg)
+				goto sigill;
+			if (reg & 0x10) {
+				if (!access_ok
+				    (VERIFY_READ, addr, 4 * (rvar + 1)))
+					goto sigbus;
+			} else {
+				if (!access_ok(VERIFY_READ, addr, 4 * rvar))
+					goto sigbus;
+			}
+			if (rvar == 9)
+				rvar = 8;
+			for (i = 16; rvar; rvar--, i++) {
+				LoadW(addr, value, res);
+				if (res)
+					goto fault;
+				addr += 4;
+				regs->regs[i] = value;
+			}
+			if ((reg & 0xf) == 9) {
+				LoadW(addr, value, res);
+				if (res)
+					goto fault;
+				addr += 4;
+				regs->regs[30] = value;
+			}
+			if (reg & 0x10) {
+				LoadW(addr, value, res);
+				if (res)
+					goto fault;
+				regs->regs[31] = value;
+			}
+			goto success;
+
+		case mm_swm32_func:
+			reg = insn.mm_m_format.rd;
+			rvar = reg & 0xf;
+			if ((rvar > 9) || !reg)
+				goto sigill;
+			if (reg & 0x10) {
+				if (!access_ok
+				    (VERIFY_WRITE, addr, 4 * (rvar + 1)))
+					goto sigbus;
+			} else {
+				if (!access_ok(VERIFY_WRITE, addr, 4 * rvar))
+					goto sigbus;
+			}
+			if (rvar == 9)
+				rvar = 8;
+			for (i = 16; rvar; rvar--, i++) {
+				value = regs->regs[i];
+				StoreW(addr, value, res);
+				if (res)
+					goto fault;
+				addr += 4;
+			}
+			if ((reg & 0xf) == 9) {
+				value = regs->regs[30];
+				StoreW(addr, value, res);
+				if (res)
+					goto fault;
+				addr += 4;
+			}
+			if (reg & 0x10) {
+				value = regs->regs[31];
+				StoreW(addr, value, res);
+				if (res)
+					goto fault;
+			}
+			goto success;
+
+		case mm_ldm_func:
+#ifdef CONFIG_64BIT
+			reg = insn.mm_m_format.rd;
+			rvar = reg & 0xf;
+			if ((rvar > 9) || !reg)
+				goto sigill;
+			if (reg & 0x10) {
+				if (!access_ok
+				    (VERIFY_READ, addr, 8 * (rvar + 1)))
+					goto sigbus;
+			} else {
+				if (!access_ok(VERIFY_READ, addr, 8 * rvar))
+					goto sigbus;
+			}
+			if (rvar == 9)
+				rvar = 8;
+
+			for (i = 16; rvar; rvar--, i++) {
+				LoadDW(addr, value, res);
+				if (res)
+					goto fault;
+				addr += 4;
+				regs->regs[i] = value;
+			}
+			if ((reg & 0xf) == 9) {
+				LoadDW(addr, value, res);
+				if (res)
+					goto fault;
+				addr += 8;
+				regs->regs[30] = value;
+			}
+			if (reg & 0x10) {
+				LoadDW(addr, value, res);
+				if (res)
+					goto fault;
+				regs->regs[31] = value;
+			}
+			goto success;
+#endif /* CONFIG_64BIT */
+
+			goto sigill;
+
+		case mm_sdm_func:
+#ifdef CONFIG_64BIT
+			reg = insn.mm_m_format.rd;
+			rvar = reg & 0xf;
+			if ((rvar > 9) || !reg)
+				goto sigill;
+			if (reg & 0x10) {
+				if (!access_ok
+				    (VERIFY_WRITE, addr, 8 * (rvar + 1)))
+					goto sigbus;
+			} else {
+				if (!access_ok(VERIFY_WRITE, addr, 8 * rvar))
+					goto sigbus;
+			}
+			if (rvar == 9)
+				rvar = 8;
+
+			for (i = 16; rvar; rvar--, i++) {
+				value = regs->regs[i];
+				StoreDW(addr, value, res);
+				if (res)
+					goto fault;
+				addr += 8;
+			}
+			if ((reg & 0xf) == 9) {
+				value = regs->regs[30];
+				StoreDW(addr, value, res);
+				if (res)
+					goto fault;
+				addr += 8;
+			}
+			if (reg & 0x10) {
+				value = regs->regs[31];
+				StoreDW(addr, value, res);
+				if (res)
+					goto fault;
+			}
+			goto success;
+#endif /* CONFIG_64BIT */
+
+			goto sigill;
+
+			/*  LWC2, SWC2, LDC2, SDC2 are not serviced */
+		}
+
+		goto sigbus;
+
+	case mm_pool32c_op:
+		switch (insn.mm_m_format.func) {
+		case mm_lwu_func:
+			reg = insn.mm_m_format.rd;
+			goto loadWU;
+		}
+
+		/*  LL,SC,LLD,SCD are not serviced */
+		goto sigbus;
+
+	case mm_pool32f_op:
+		switch (insn.mm_x_format.func) {
+		case mm_lwxc1_func:
+		case mm_swxc1_func:
+		case mm_ldxc1_func:
+		case mm_sdxc1_func:
+			goto fpu_emul;
+		}
+
+		goto sigbus;
+
+	case mm_ldc132_op:
+	case mm_sdc132_op:
+	case mm_lwc132_op:
+	case mm_swc132_op:
+fpu_emul:
+		/* roll back jump/branch */
+		regs->cp0_epc = origpc;
+		regs->regs[31] = orig31;
+
+		die_if_kernel("Unaligned FP access in kernel code", regs);
+		BUG_ON(!used_math());
+		BUG_ON(!is_fpu_owner());
+
+		lose_fpu(1);	/* save the FPU state for the emulator */
+		res = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 1,
+					       &fault_addr);
+		own_fpu(1);	/* restore FPU state */
+
+		/* If something went wrong, signal */
+		process_fpemu_return(res, fault_addr);
+
+		if (res == 0)
+			goto success;
+		return;
+
+	case mm_lh32_op:
+		reg = insn.mm_i_format.rt;
+		goto loadHW;
+
+	case mm_lhu32_op:
+		reg = insn.mm_i_format.rt;
+		goto loadHWU;
+
+	case mm_lw32_op:
+		reg = insn.mm_i_format.rt;
+		goto loadW;
+
+	case mm_sh32_op:
+		reg = insn.mm_i_format.rt;
+		goto storeHW;
+
+	case mm_sw32_op:
+		reg = insn.mm_i_format.rt;
+		goto storeW;
+
+	case mm_ld32_op:
+		reg = insn.mm_i_format.rt;
+		goto loadDW;
+
+	case mm_sd32_op:
+		reg = insn.mm_i_format.rt;
+		goto storeDW;
+
+	case mm_pool16c_op:
+		switch (insn.mm16_m_format.func) {
+		case mm_lwm16_op:
+			reg = insn.mm16_m_format.rlist;
+			rvar = reg + 1;
+			if (!access_ok(VERIFY_READ, addr, 4 * rvar))
+				goto sigbus;
+
+			for (i = 16; rvar; rvar--, i++) {
+				LoadW(addr, value, res);
+				if (res)
+					goto fault;
+				addr += 4;
+				regs->regs[i] = value;
+			}
+			LoadW(addr, value, res);
+			if (res)
+				goto fault;
+			regs->regs[31] = value;
+
+			goto success;
+
+		case mm_swm16_op:
+			reg = insn.mm16_m_format.rlist;
+			rvar = reg + 1;
+			if (!access_ok(VERIFY_WRITE, addr, 4 * rvar))
+				goto sigbus;
+
+			for (i = 16; rvar; rvar--, i++) {
+				value = regs->regs[i];
+				StoreW(addr, value, res);
+				if (res)
+					goto fault;
+				addr += 4;
+			}
+			value = regs->regs[31];
+			StoreW(addr, value, res);
+			if (res)
+				goto fault;
+
+			goto success;
+
+		}
+
+		goto sigbus;
+
+	case mm_lhu16_op:
+		reg = reg16to32[insn.mm16_rb_format.rt];
+		goto loadHWU;
+
+	case mm_lw16_op:
+		reg = reg16to32[insn.mm16_rb_format.rt];
+		goto loadW;
+
+	case mm_sh16_op:
+		reg = reg16to32st[insn.mm16_rb_format.rt];
+		goto storeHW;
+
+	case mm_sw16_op:
+		reg = reg16to32st[insn.mm16_rb_format.rt];
+		goto storeW;
+
+	case mm_lwsp16_op:
+		reg = insn.mm16_r5_format.rt;
+		goto loadW;
+
+	case mm_swsp16_op:
+		reg = insn.mm16_r5_format.rt;
+		goto storeW;
+
+	case mm_lwgp16_op:
+		reg = reg16to32[insn.mm16_r3_format.rt];
+		goto loadW;
+
+	default:
+		goto sigill;
+	}
+
+loadHW:
+	if (!access_ok(VERIFY_READ, addr, 2))
+		goto sigbus;
+
+	LoadHW(addr, value, res);
+	if (res)
+		goto fault;
+	regs->regs[reg] = value;
+	goto success;
+
+loadHWU:
+	if (!access_ok(VERIFY_READ, addr, 2))
+		goto sigbus;
+
+	LoadHWU(addr, value, res);
+	if (res)
+		goto fault;
+	regs->regs[reg] = value;
+	goto success;
+
+loadW:
+	if (!access_ok(VERIFY_READ, addr, 4))
+		goto sigbus;
+
+	LoadW(addr, value, res);
+	if (res)
+		goto fault;
+	regs->regs[reg] = value;
+	goto success;
+
+loadWU:
+#ifdef CONFIG_64BIT
+	/*
+	 * A 32-bit kernel might be running on a 64-bit processor.  But
+	 * if we're on a 32-bit processor and an i-cache incoherency
+	 * or race makes us see a 64-bit instruction here the sdl/sdr
+	 * would blow up, so for now we don't handle unaligned 64-bit
+	 * instructions on 32-bit kernels.
+	 */
+	if (!access_ok(VERIFY_READ, addr, 4))
+		goto sigbus;
+
+	LoadWU(addr, value, res);
+	if (res)
+		goto fault;
+	regs->regs[reg] = value;
+	goto success;
+#endif /* CONFIG_64BIT */
+
+	/* Cannot handle 64-bit instructions in 32-bit kernel */
+	goto sigill;
+
+loadDW:
+#ifdef CONFIG_64BIT
+	/*
+	 * A 32-bit kernel might be running on a 64-bit processor.  But
+	 * if we're on a 32-bit processor and an i-cache incoherency
+	 * or race makes us see a 64-bit instruction here the sdl/sdr
+	 * would blow up, so for now we don't handle unaligned 64-bit
+	 * instructions on 32-bit kernels.
+	 */
+	if (!access_ok(VERIFY_READ, addr, 8))
+		goto sigbus;
+
+	LoadDW(addr, value, res);
+	if (res)
+		goto fault;
+	regs->regs[reg] = value;
+	goto success;
+#endif /* CONFIG_64BIT */
+
+	/* Cannot handle 64-bit instructions in 32-bit kernel */
+	goto sigill;
+
+storeHW:
+	if (!access_ok(VERIFY_WRITE, addr, 2))
+		goto sigbus;
+
+	value = regs->regs[reg];
+	StoreHW(addr, value, res);
+	if (res)
+		goto fault;
+	goto success;
+
+storeW:
+	if (!access_ok(VERIFY_WRITE, addr, 4))
+		goto sigbus;
+
+	value = regs->regs[reg];
+	StoreW(addr, value, res);
+	if (res)
+		goto fault;
+	goto success;
+
+storeDW:
+#ifdef CONFIG_64BIT
+	/*
+	 * A 32-bit kernel might be running on a 64-bit processor.  But
+	 * if we're on a 32-bit processor and an i-cache incoherency
+	 * or race makes us see a 64-bit instruction here the sdl/sdr
+	 * would blow up, so for now we don't handle unaligned 64-bit
+	 * instructions on 32-bit kernels.
+	 */
+	if (!access_ok(VERIFY_WRITE, addr, 8))
+		goto sigbus;
+
+	value = regs->regs[reg];
+	StoreDW(addr, value, res);
+	if (res)
+		goto fault;
+	goto success;
+#endif /* CONFIG_64BIT */
+
+	/* Cannot handle 64-bit instructions in 32-bit kernel */
+	goto sigill;
+
+success:
+	regs->cp0_epc = contpc;	/* advance or branch */
+
+#ifdef CONFIG_DEBUG_FS
+	unaligned_instructions++;
+#endif
+	return;
+
+fault:
+	/* roll back jump/branch */
+	regs->cp0_epc = origpc;
+	regs->regs[31] = orig31;
+	/* Did we have an exception handler installed? */
+	if (fixup_exception(regs))
+		return;
+
+	die_if_kernel("Unhandled kernel unaligned access", regs);
+	force_sig(SIGSEGV, current);
+
+	return;
+
+sigbus:
+	die_if_kernel("Unhandled kernel unaligned access", regs);
+	force_sig(SIGBUS, current);
+
+	return;
+
+sigill:
+	die_if_kernel
+	    ("Unhandled kernel unaligned access or invalid instruction", regs);
+	force_sig(SIGILL, current);
+}
+
+static void emulate_load_store_MIPS16e(struct pt_regs *regs, void __user * addr)
+{
+	unsigned long value;
+	unsigned int res;
+	int reg;
+	unsigned long orig31;
+	u16 __user *pc16;
+	unsigned long origpc;
+	union mips16e_instruction mips16inst, oldinst;
+
+	origpc = regs->cp0_epc;
+	orig31 = regs->regs[31];
+	pc16 = (unsigned short __user *)msk_isa16_mode(origpc);
+	/*
+	 * This load never faults.
+	 */
+	__get_user(mips16inst.full, pc16);
+	oldinst = mips16inst;
+
+	/* skip EXTEND instruction */
+	if (mips16inst.ri.opcode == MIPS16e_extend_op) {
+		pc16++;
+		__get_user(mips16inst.full, pc16);
+	} else if (delay_slot(regs)) {
+		/*  skip jump instructions */
+		/*  JAL/JALX are 32 bits but have OPCODE in first short int */
+		if (mips16inst.ri.opcode == MIPS16e_jal_op)
+			pc16++;
+		pc16++;
+		if (get_user(mips16inst.full, pc16))
+			goto sigbus;
+	}
+
+	switch (mips16inst.ri.opcode) {
+	case MIPS16e_i64_op:	/* I64 or RI64 instruction */
+		switch (mips16inst.i64.func) {	/* I64/RI64 func field check */
+		case MIPS16e_ldpc_func:
+		case MIPS16e_ldsp_func:
+			reg = reg16to32[mips16inst.ri64.ry];
+			goto loadDW;
+
+		case MIPS16e_sdsp_func:
+			reg = reg16to32[mips16inst.ri64.ry];
+			goto writeDW;
+
+		case MIPS16e_sdrasp_func:
+			reg = 29;	/* GPRSP */
+			goto writeDW;
+		}
+
+		goto sigbus;
+
+	case MIPS16e_swsp_op:
+	case MIPS16e_lwpc_op:
+	case MIPS16e_lwsp_op:
+		reg = reg16to32[mips16inst.ri.rx];
+		break;
+
+	case MIPS16e_i8_op:
+		if (mips16inst.i8.func != MIPS16e_swrasp_func)
+			goto sigbus;
+		reg = 29;	/* GPRSP */
+		break;
+
+	default:
+		reg = reg16to32[mips16inst.rri.ry];
+		break;
+	}
+
+	switch (mips16inst.ri.opcode) {
+
+	case MIPS16e_lb_op:
+	case MIPS16e_lbu_op:
+	case MIPS16e_sb_op:
+		goto sigbus;
+
+	case MIPS16e_lh_op:
+		if (!access_ok(VERIFY_READ, addr, 2))
+			goto sigbus;
+
+		LoadHW(addr, value, res);
+		if (res)
+			goto fault;
+		MIPS16e_compute_return_epc(regs, &oldinst);
+		regs->regs[reg] = value;
+		break;
+
+	case MIPS16e_lhu_op:
+		if (!access_ok(VERIFY_READ, addr, 2))
+			goto sigbus;
+
+		LoadHWU(addr, value, res);
+		if (res)
+			goto fault;
+		MIPS16e_compute_return_epc(regs, &oldinst);
+		regs->regs[reg] = value;
+		break;
+
+	case MIPS16e_lw_op:
+	case MIPS16e_lwpc_op:
+	case MIPS16e_lwsp_op:
+		if (!access_ok(VERIFY_READ, addr, 4))
+			goto sigbus;
+
+		LoadW(addr, value, res);
+		if (res)
+			goto fault;
+		MIPS16e_compute_return_epc(regs, &oldinst);
+		regs->regs[reg] = value;
+		break;
+
+	case MIPS16e_lwu_op:
+#ifdef CONFIG_64BIT
+		/*
+		 * A 32-bit kernel might be running on a 64-bit processor.  But
+		 * if we're on a 32-bit processor and an i-cache incoherency
+		 * or race makes us see a 64-bit instruction here the sdl/sdr
+		 * would blow up, so for now we don't handle unaligned 64-bit
+		 * instructions on 32-bit kernels.
+		 */
+		if (!access_ok(VERIFY_READ, addr, 4))
+			goto sigbus;
+
+		LoadWU(addr, value, res);
+		if (res)
+			goto fault;
+		MIPS16e_compute_return_epc(regs, &oldinst);
+		regs->regs[reg] = value;
+		break;
+#endif /* CONFIG_64BIT */
+
+		/* Cannot handle 64-bit instructions in 32-bit kernel */
+		goto sigill;
+
+	case MIPS16e_ld_op:
+loadDW:
+#ifdef CONFIG_64BIT
+		/*
+		 * A 32-bit kernel might be running on a 64-bit processor.  But
+		 * if we're on a 32-bit processor and an i-cache incoherency
+		 * or race makes us see a 64-bit instruction here the sdl/sdr
+		 * would blow up, so for now we don't handle unaligned 64-bit
+		 * instructions on 32-bit kernels.
+		 */
+		if (!access_ok(VERIFY_READ, addr, 8))
+			goto sigbus;
+
+		LoadDW(addr, value, res);
+		if (res)
+			goto fault;
+		MIPS16e_compute_return_epc(regs, &oldinst);
+		regs->regs[reg] = value;
+		break;
+#endif /* CONFIG_64BIT */
+
+		/* Cannot handle 64-bit instructions in 32-bit kernel */
+		goto sigill;
+
+	case MIPS16e_sh_op:
+		if (!access_ok(VERIFY_WRITE, addr, 2))
+			goto sigbus;
+
+		MIPS16e_compute_return_epc(regs, &oldinst);
+		value = regs->regs[reg];
+		StoreHW(addr, value, res);
+		if (res)
+			goto fault;
+		break;
+
+	case MIPS16e_sw_op:
+	case MIPS16e_swsp_op:
+	case MIPS16e_i8_op:	/* actually - MIPS16e_swrasp_func */
+		if (!access_ok(VERIFY_WRITE, addr, 4))
+			goto sigbus;
+
+		MIPS16e_compute_return_epc(regs, &oldinst);
+		value = regs->regs[reg];
+		StoreW(addr, value, res);
+		if (res)
+			goto fault;
+		break;
+
+	case MIPS16e_sd_op:
+writeDW:
+#ifdef CONFIG_64BIT
+		/*
+		 * A 32-bit kernel might be running on a 64-bit processor.  But
+		 * if we're on a 32-bit processor and an i-cache incoherency
+		 * or race makes us see a 64-bit instruction here the sdl/sdr
+		 * would blow up, so for now we don't handle unaligned 64-bit
+		 * instructions on 32-bit kernels.
+		 */
+		if (!access_ok(VERIFY_WRITE, addr, 8))
+			goto sigbus;
+
+		MIPS16e_compute_return_epc(regs, &oldinst);
+		value = regs->regs[reg];
+		StoreDW(addr, value, res);
+		if (res)
+			goto fault;
+		break;
+#endif /* CONFIG_64BIT */
+
+		/* Cannot handle 64-bit instructions in 32-bit kernel */
+		goto sigill;
+
+	default:
+		/*
+		 * Pheeee...  We encountered an yet unknown instruction or
+		 * cache coherence problem.  Die sucker, die ...
+		 */
+		goto sigill;
+	}
+
+#ifdef CONFIG_DEBUG_FS
+	unaligned_instructions++;
+#endif
+
+	return;
+
+fault:
+	/* roll back jump/branch */
+	regs->cp0_epc = origpc;
+	regs->regs[31] = orig31;
+	/* Did we have an exception handler installed? */
+	if (fixup_exception(regs))
+		return;
+
+	die_if_kernel("Unhandled kernel unaligned access", regs);
+	force_sig(SIGSEGV, current);
+
+	return;
+
+sigbus:
+	die_if_kernel("Unhandled kernel unaligned access", regs);
+	force_sig(SIGBUS, current);
+
+	return;
+
+sigill:
+	die_if_kernel
+	    ("Unhandled kernel unaligned access or invalid instruction", regs);
+	force_sig(SIGILL, current);
+}
 asmlinkage void do_ade(struct pt_regs *regs)
 {
 	unsigned int __user *pc;
@@ -517,23 +1557,62 @@
 			1, regs, regs->cp0_badvaddr);
 	/*
 	 * Did we catch a fault trying to load an instruction?
-	 * Or are we running in MIPS16 mode?
 	 */
-	if ((regs->cp0_badvaddr == regs->cp0_epc) || (regs->cp0_epc & 0x1))
+	if (regs->cp0_badvaddr == regs->cp0_epc)
 		goto sigbus;
 
-	pc = (unsigned int __user *) exception_epc(regs);
 	if (user_mode(regs) && !test_thread_flag(TIF_FIXADE))
 		goto sigbus;
 	if (unaligned_action == UNALIGNED_ACTION_SIGNAL)
 		goto sigbus;
-	else if (unaligned_action == UNALIGNED_ACTION_SHOW)
-		show_registers(regs);
 
 	/*
 	 * Do branch emulation only if we didn't forward the exception.
 	 * This is all so but ugly ...
 	 */
+
+	/*
+	 * Are we running in microMIPS mode?
+	 */
+	if (get_isa16_mode(regs->cp0_epc)) {
+		/*
+		 * Did we catch a fault trying to load an instruction in
+		 * 16-bit mode?
+		 */
+		if (regs->cp0_badvaddr == msk_isa16_mode(regs->cp0_epc))
+			goto sigbus;
+		if (unaligned_action == UNALIGNED_ACTION_SHOW)
+			show_registers(regs);
+
+		if (cpu_has_mmips) {
+			seg = get_fs();
+			if (!user_mode(regs))
+				set_fs(KERNEL_DS);
+			emulate_load_store_microMIPS(regs,
+				(void __user *)regs->cp0_badvaddr);
+			set_fs(seg);
+
+			return;
+		}
+
+		if (cpu_has_mips16) {
+			seg = get_fs();
+			if (!user_mode(regs))
+				set_fs(KERNEL_DS);
+			emulate_load_store_MIPS16e(regs,
+				(void __user *)regs->cp0_badvaddr);
+			set_fs(seg);
+
+			return;
+	}
+
+		goto sigbus;
+	}
+
+	if (unaligned_action == UNALIGNED_ACTION_SHOW)
+		show_registers(regs);
+	pc = (unsigned int __user *)exception_epc(regs);
+
 	seg = get_fs();
 	if (!user_mode(regs))
 		set_fs(KERNEL_DS);
diff --git a/arch/mips/kvm/00README.txt b/arch/mips/kvm/00README.txt
new file mode 100644
index 0000000..51617e4
--- /dev/null
+++ b/arch/mips/kvm/00README.txt
@@ -0,0 +1,31 @@
+KVM/MIPS Trap & Emulate Release Notes
+=====================================
+
+(1) KVM/MIPS should support MIPS32R2 and beyond. It has been tested on the following platforms:
+    Malta Board with FPGA based 34K
+    Sigma Designs TangoX board with a 24K based 8654 SoC.
+    Malta Board with 74K @ 1GHz
+
+(2) Both Guest kernel and Guest Userspace execute in UM.
+    Guest User address space:   0x00000000 -> 0x40000000
+    Guest Kernel Unmapped:      0x40000000 -> 0x60000000
+    Guest Kernel Mapped:        0x60000000 -> 0x80000000
+
+    Guest Usermode virtual memory is limited to 1GB.
+
+(2) 16K Page Sizes: Both Host Kernel and Guest Kernel should have the same page size, currently at least 16K.
+    Note that due to cache aliasing issues, 4K page sizes are NOT supported.
+
+(3) No HugeTLB Support
+    Both the host kernel and Guest kernel should have the page size set to 16K.
+    This will be implemented in a future release.
+
+(4) KVM/MIPS does not have support for SMP Guests
+    Linux-3.7-rc2 based SMP guest hangs due to the following code sequence in the generated TLB handlers:
+	LL/TLBP/SC.  Since the TLBP instruction causes a trap the reservation gets cleared
+	when we ERET back to the guest. This causes the guest to hang in an infinite loop.
+	This will be fixed in a future release.
+
+(5) Use Host FPU
+    Currently KVM/MIPS emulates a 24K CPU without a FPU.
+    This will be fixed in a future release
diff --git a/arch/mips/kvm/Kconfig b/arch/mips/kvm/Kconfig
new file mode 100644
index 0000000..2c15590
--- /dev/null
+++ b/arch/mips/kvm/Kconfig
@@ -0,0 +1,49 @@
+#
+# KVM configuration
+#
+source "virt/kvm/Kconfig"
+
+menuconfig VIRTUALIZATION
+	bool "Virtualization"
+	depends on HAVE_KVM
+	---help---
+	  Say Y here to get to see options for using your Linux host to run
+	  other operating systems inside virtual machines (guests).
+	  This option alone does not add any kernel code.
+
+	  If you say N, all options in this submenu will be skipped and disabled.
+
+if VIRTUALIZATION
+
+config KVM
+	tristate "Kernel-based Virtual Machine (KVM) support"
+	depends on HAVE_KVM
+	select PREEMPT_NOTIFIERS
+	select ANON_INODES
+	select KVM_MMIO
+	---help---
+	  Support for hosting Guest kernels.
+	  Currently supported on MIPS32 processors.
+
+config KVM_MIPS_DYN_TRANS
+	bool "KVM/MIPS: Dynamic binary translation to reduce traps"
+	depends on KVM
+	---help---
+	  When running in Trap & Emulate mode patch privileged
+	  instructions to reduce the number of traps.
+
+	  If unsure, say Y.
+
+config KVM_MIPS_DEBUG_COP0_COUNTERS
+	bool "Maintain counters for COP0 accesses"
+	depends on KVM
+	---help---
+	  Maintain statistics for Guest COP0 accesses.
+	  A histogram of COP0 accesses is printed when the VM is
+	  shutdown.
+
+	  If unsure, say N.
+
+source drivers/vhost/Kconfig
+
+endif # VIRTUALIZATION
diff --git a/arch/mips/kvm/Makefile b/arch/mips/kvm/Makefile
new file mode 100644
index 0000000..78d87bb
--- /dev/null
+++ b/arch/mips/kvm/Makefile
@@ -0,0 +1,13 @@
+# Makefile for KVM support for MIPS
+#
+
+common-objs = $(addprefix ../../../virt/kvm/, kvm_main.o coalesced_mmio.o)
+
+EXTRA_CFLAGS += -Ivirt/kvm -Iarch/mips/kvm
+
+kvm-objs := $(common-objs) kvm_mips.o kvm_mips_emul.o kvm_locore.o \
+	    kvm_mips_int.o kvm_mips_stats.o kvm_mips_commpage.o \
+	    kvm_mips_dyntrans.o kvm_trap_emul.o
+
+obj-$(CONFIG_KVM)	+= kvm.o
+obj-y			+= kvm_cb.o kvm_tlb.o
diff --git a/arch/mips/kvm/kvm_cb.c b/arch/mips/kvm/kvm_cb.c
new file mode 100644
index 0000000..313c2e3
--- /dev/null
+++ b/arch/mips/kvm/kvm_cb.c
@@ -0,0 +1,14 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
+ * Authors: Yann Le Du <ledu@kymasys.com>
+ */
+
+#include <linux/export.h>
+#include <linux/kvm_host.h>
+
+struct kvm_mips_callbacks *kvm_mips_callbacks;
+EXPORT_SYMBOL(kvm_mips_callbacks);
diff --git a/arch/mips/kvm/kvm_locore.S b/arch/mips/kvm/kvm_locore.S
new file mode 100644
index 0000000..dca2aa6
--- /dev/null
+++ b/arch/mips/kvm/kvm_locore.S
@@ -0,0 +1,650 @@
+/*
+* This file is subject to the terms and conditions of the GNU General Public
+* License.  See the file "COPYING" in the main directory of this archive
+* for more details.
+*
+* Main entry point for the guest, exception handling.
+*
+* Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
+* Authors: Sanjay Lal <sanjayl@kymasys.com>
+*/
+
+#include <asm/asm.h>
+#include <asm/asmmacro.h>
+#include <asm/regdef.h>
+#include <asm/mipsregs.h>
+#include <asm/stackframe.h>
+#include <asm/asm-offsets.h>
+
+
+#define _C_LABEL(x)     x
+#define MIPSX(name)     mips32_ ## name
+#define CALLFRAME_SIZ   32
+
+/*
+ * VECTOR
+ *  exception vector entrypoint
+ */
+#define VECTOR(x, regmask)      \
+    .ent    _C_LABEL(x),0;      \
+    EXPORT(x);
+
+#define VECTOR_END(x)      \
+    EXPORT(x);
+
+/* Overload, Danger Will Robinson!! */
+#define PT_HOST_ASID        PT_BVADDR
+#define PT_HOST_USERLOCAL   PT_EPC
+
+#define CP0_DDATA_LO        $28,3
+#define CP0_EBASE           $15,1
+
+#define CP0_INTCTL          $12,1
+#define CP0_SRSCTL          $12,2
+#define CP0_SRSMAP          $12,3
+#define CP0_HWRENA          $7,0
+
+/* Resume Flags */
+#define RESUME_FLAG_HOST        (1<<1)  /* Resume host? */
+
+#define RESUME_GUEST            0
+#define RESUME_HOST             RESUME_FLAG_HOST
+
+/*
+ * __kvm_mips_vcpu_run: entry point to the guest
+ * a0: run
+ * a1: vcpu
+ */
+
+FEXPORT(__kvm_mips_vcpu_run)
+    .set    push
+    .set    noreorder
+    .set    noat
+
+    /* k0/k1 not being used in host kernel context */
+	addiu  		k1,sp, -PT_SIZE
+    LONG_S	    $0, PT_R0(k1)
+    LONG_S     	$1, PT_R1(k1)
+    LONG_S     	$2, PT_R2(k1)
+    LONG_S     	$3, PT_R3(k1)
+
+    LONG_S     	$4, PT_R4(k1)
+    LONG_S     	$5, PT_R5(k1)
+    LONG_S     	$6, PT_R6(k1)
+    LONG_S     	$7, PT_R7(k1)
+
+    LONG_S     	$8,  PT_R8(k1)
+    LONG_S     	$9,  PT_R9(k1)
+    LONG_S     	$10, PT_R10(k1)
+    LONG_S     	$11, PT_R11(k1)
+    LONG_S     	$12, PT_R12(k1)
+    LONG_S     	$13, PT_R13(k1)
+    LONG_S     	$14, PT_R14(k1)
+    LONG_S     	$15, PT_R15(k1)
+    LONG_S     	$16, PT_R16(k1)
+    LONG_S     	$17, PT_R17(k1)
+
+    LONG_S     	$18, PT_R18(k1)
+    LONG_S     	$19, PT_R19(k1)
+    LONG_S     	$20, PT_R20(k1)
+    LONG_S     	$21, PT_R21(k1)
+    LONG_S     	$22, PT_R22(k1)
+    LONG_S     	$23, PT_R23(k1)
+    LONG_S     	$24, PT_R24(k1)
+    LONG_S     	$25, PT_R25(k1)
+
+	/* XXXKYMA k0/k1 not saved, not being used if we got here through an ioctl() */
+
+    LONG_S     	$28, PT_R28(k1)
+    LONG_S     	$29, PT_R29(k1)
+    LONG_S     	$30, PT_R30(k1)
+    LONG_S     	$31, PT_R31(k1)
+
+    /* Save hi/lo */
+	mflo		v0
+	LONG_S		v0, PT_LO(k1)
+	mfhi   		v1
+	LONG_S		v1, PT_HI(k1)
+
+	/* Save host status */
+	mfc0		v0, CP0_STATUS
+	LONG_S		v0, PT_STATUS(k1)
+
+	/* Save host ASID, shove it into the BVADDR location */
+	mfc0 		v1,CP0_ENTRYHI
+	andi		v1, 0xff
+	LONG_S		v1, PT_HOST_ASID(k1)
+
+    /* Save DDATA_LO, will be used to store pointer to vcpu */
+    mfc0        v1, CP0_DDATA_LO
+    LONG_S      v1, PT_HOST_USERLOCAL(k1)
+
+    /* DDATA_LO has pointer to vcpu */
+    mtc0        a1,CP0_DDATA_LO
+
+    /* Offset into vcpu->arch */
+	addiu		k1, a1, VCPU_HOST_ARCH
+
+    /* Save the host stack to VCPU, used for exception processing when we exit from the Guest */
+    LONG_S      sp, VCPU_HOST_STACK(k1)
+
+    /* Save the kernel gp as well */
+    LONG_S      gp, VCPU_HOST_GP(k1)
+
+	/* Setup status register for running the guest in UM, interrupts are disabled */
+	li			k0,(ST0_EXL | KSU_USER| ST0_BEV)
+	mtc0		k0,CP0_STATUS
+    ehb
+
+    /* load up the new EBASE */
+    LONG_L      k0, VCPU_GUEST_EBASE(k1)
+    mtc0        k0,CP0_EBASE
+
+    /* Now that the new EBASE has been loaded, unset BEV, set interrupt mask as it was
+     * but make sure that timer interrupts are enabled
+     */
+    li          k0,(ST0_EXL | KSU_USER | ST0_IE)
+    andi        v0, v0, ST0_IM
+    or          k0, k0, v0
+    mtc0        k0,CP0_STATUS
+    ehb
+
+
+	/* Set Guest EPC */
+	LONG_L		t0, VCPU_PC(k1)
+	mtc0		t0, CP0_EPC
+
+FEXPORT(__kvm_mips_load_asid)
+    /* Set the ASID for the Guest Kernel */
+    sll         t0, t0, 1                       /* with kseg0 @ 0x40000000, kernel */
+                                                /* addresses shift to 0x80000000 */
+    bltz        t0, 1f                          /* If kernel */
+	addiu       t1, k1, VCPU_GUEST_KERNEL_ASID  /* (BD)  */
+    addiu       t1, k1, VCPU_GUEST_USER_ASID    /* else user */
+1:
+    /* t1: contains the base of the ASID array, need to get the cpu id  */
+    LONG_L      t2, TI_CPU($28)             /* smp_processor_id */
+    sll         t2, t2, 2                   /* x4 */
+    addu        t3, t1, t2
+    LONG_L      k0, (t3)
+    andi        k0, k0, 0xff
+	mtc0		k0,CP0_ENTRYHI
+    ehb
+
+    /* Disable RDHWR access */
+    mtc0    zero,  CP0_HWRENA
+
+    /* Now load up the Guest Context from VCPU */
+    LONG_L     	$1, VCPU_R1(k1)
+    LONG_L     	$2, VCPU_R2(k1)
+    LONG_L     	$3, VCPU_R3(k1)
+
+    LONG_L     	$4, VCPU_R4(k1)
+    LONG_L     	$5, VCPU_R5(k1)
+    LONG_L     	$6, VCPU_R6(k1)
+    LONG_L     	$7, VCPU_R7(k1)
+
+    LONG_L     	$8,  VCPU_R8(k1)
+    LONG_L     	$9,  VCPU_R9(k1)
+    LONG_L     	$10, VCPU_R10(k1)
+    LONG_L     	$11, VCPU_R11(k1)
+    LONG_L     	$12, VCPU_R12(k1)
+    LONG_L     	$13, VCPU_R13(k1)
+    LONG_L     	$14, VCPU_R14(k1)
+    LONG_L     	$15, VCPU_R15(k1)
+    LONG_L     	$16, VCPU_R16(k1)
+    LONG_L     	$17, VCPU_R17(k1)
+    LONG_L     	$18, VCPU_R18(k1)
+    LONG_L     	$19, VCPU_R19(k1)
+    LONG_L     	$20, VCPU_R20(k1)
+    LONG_L     	$21, VCPU_R21(k1)
+    LONG_L     	$22, VCPU_R22(k1)
+    LONG_L     	$23, VCPU_R23(k1)
+    LONG_L     	$24, VCPU_R24(k1)
+    LONG_L     	$25, VCPU_R25(k1)
+
+    /* k0/k1 loaded up later */
+
+    LONG_L     	$28, VCPU_R28(k1)
+    LONG_L     	$29, VCPU_R29(k1)
+    LONG_L     	$30, VCPU_R30(k1)
+    LONG_L     	$31, VCPU_R31(k1)
+
+    /* Restore hi/lo */
+	LONG_L		k0, VCPU_LO(k1)
+	mtlo		k0
+
+	LONG_L		k0, VCPU_HI(k1)
+	mthi   		k0
+
+FEXPORT(__kvm_mips_load_k0k1)
+	/* Restore the guest's k0/k1 registers */
+    LONG_L     	k0, VCPU_R26(k1)
+    LONG_L     	k1, VCPU_R27(k1)
+
+    /* Jump to guest */
+	eret
+	.set	pop
+
+VECTOR(MIPSX(exception), unknown)
+/*
+ * Find out what mode we came from and jump to the proper handler.
+ */
+    .set    push
+	.set	noat
+    .set    noreorder
+    mtc0    k0, CP0_ERROREPC    #01: Save guest k0
+    ehb                         #02:
+
+    mfc0    k0, CP0_EBASE       #02: Get EBASE
+    srl     k0, k0, 10          #03: Get rid of CPUNum
+    sll     k0, k0, 10          #04
+    LONG_S  k1, 0x3000(k0)      #05: Save k1 @ offset 0x3000
+    addiu   k0, k0, 0x2000      #06: Exception handler is installed @ offset 0x2000
+	j	k0				        #07: jump to the function
+	nop				        	#08: branch delay slot
+	.set	push
+VECTOR_END(MIPSX(exceptionEnd))
+.end MIPSX(exception)
+
+/*
+ * Generic Guest exception handler. We end up here when the guest
+ * does something that causes a trap to kernel mode.
+ *
+ */
+NESTED (MIPSX(GuestException), CALLFRAME_SIZ, ra)
+    .set    push
+    .set    noat
+    .set    noreorder
+
+    /* Get the VCPU pointer from DDTATA_LO */
+    mfc0        k1, CP0_DDATA_LO
+	addiu		k1, k1, VCPU_HOST_ARCH
+
+    /* Start saving Guest context to VCPU */
+    LONG_S  $0, VCPU_R0(k1)
+    LONG_S  $1, VCPU_R1(k1)
+    LONG_S  $2, VCPU_R2(k1)
+    LONG_S  $3, VCPU_R3(k1)
+    LONG_S  $4, VCPU_R4(k1)
+    LONG_S  $5, VCPU_R5(k1)
+    LONG_S  $6, VCPU_R6(k1)
+    LONG_S  $7, VCPU_R7(k1)
+    LONG_S  $8, VCPU_R8(k1)
+    LONG_S  $9, VCPU_R9(k1)
+    LONG_S  $10, VCPU_R10(k1)
+    LONG_S  $11, VCPU_R11(k1)
+    LONG_S  $12, VCPU_R12(k1)
+    LONG_S  $13, VCPU_R13(k1)
+    LONG_S  $14, VCPU_R14(k1)
+    LONG_S  $15, VCPU_R15(k1)
+    LONG_S  $16, VCPU_R16(k1)
+    LONG_S  $17,VCPU_R17(k1)
+    LONG_S  $18, VCPU_R18(k1)
+    LONG_S  $19, VCPU_R19(k1)
+    LONG_S  $20, VCPU_R20(k1)
+    LONG_S  $21, VCPU_R21(k1)
+    LONG_S  $22, VCPU_R22(k1)
+    LONG_S  $23, VCPU_R23(k1)
+    LONG_S  $24, VCPU_R24(k1)
+    LONG_S  $25, VCPU_R25(k1)
+
+    /* Guest k0/k1 saved later */
+
+    LONG_S  $28, VCPU_R28(k1)
+    LONG_S  $29, VCPU_R29(k1)
+    LONG_S  $30, VCPU_R30(k1)
+    LONG_S  $31, VCPU_R31(k1)
+
+    /* We need to save hi/lo and restore them on
+     * the way out
+     */
+    mfhi    t0
+    LONG_S  t0, VCPU_HI(k1)
+
+    mflo    t0
+    LONG_S  t0, VCPU_LO(k1)
+
+    /* Finally save guest k0/k1 to VCPU */
+    mfc0    t0, CP0_ERROREPC
+    LONG_S  t0, VCPU_R26(k1)
+
+    /* Get GUEST k1 and save it in VCPU */
+    la      t1, ~0x2ff
+    mfc0    t0, CP0_EBASE
+    and     t0, t0, t1
+    LONG_L  t0, 0x3000(t0)
+    LONG_S  t0, VCPU_R27(k1)
+
+    /* Now that context has been saved, we can use other registers */
+
+    /* Restore vcpu */
+    mfc0        a1, CP0_DDATA_LO
+    move        s1, a1
+
+   /* Restore run (vcpu->run) */
+    LONG_L      a0, VCPU_RUN(a1)
+    /* Save pointer to run in s0, will be saved by the compiler */
+    move        s0, a0
+
+
+    /* Save Host level EPC, BadVaddr and Cause to VCPU, useful to process the exception */
+    mfc0    k0,CP0_EPC
+    LONG_S  k0, VCPU_PC(k1)
+
+    mfc0    k0, CP0_BADVADDR
+    LONG_S  k0, VCPU_HOST_CP0_BADVADDR(k1)
+
+    mfc0    k0, CP0_CAUSE
+    LONG_S  k0, VCPU_HOST_CP0_CAUSE(k1)
+
+    mfc0    k0, CP0_ENTRYHI
+    LONG_S  k0, VCPU_HOST_ENTRYHI(k1)
+
+    /* Now restore the host state just enough to run the handlers */
+
+    /* Swtich EBASE to the one used by Linux */
+    /* load up the host EBASE */
+    mfc0        v0, CP0_STATUS
+
+    .set at
+	or          k0, v0, ST0_BEV
+    .set noat
+
+    mtc0        k0, CP0_STATUS
+    ehb
+
+    LONG_L      k0, VCPU_HOST_EBASE(k1)
+    mtc0        k0,CP0_EBASE
+
+
+    /* Now that the new EBASE has been loaded, unset BEV and KSU_USER */
+    .set at
+	and         v0, v0, ~(ST0_EXL | KSU_USER | ST0_IE)
+    or          v0, v0, ST0_CU0
+    .set noat
+    mtc0        v0, CP0_STATUS
+    ehb
+
+    /* Load up host GP */
+    LONG_L  gp, VCPU_HOST_GP(k1)
+
+    /* Need a stack before we can jump to "C" */
+    LONG_L  sp, VCPU_HOST_STACK(k1)
+
+    /* Saved host state */
+    addiu   sp,sp, -PT_SIZE
+
+    /* XXXKYMA do we need to load the host ASID, maybe not because the
+     * kernel entries are marked GLOBAL, need to verify
+     */
+
+    /* Restore host DDATA_LO */
+    LONG_L      k0, PT_HOST_USERLOCAL(sp)
+    mtc0        k0, CP0_DDATA_LO
+
+    /* Restore RDHWR access */
+    la      k0, 0x2000000F
+    mtc0    k0,  CP0_HWRENA
+
+    /* Jump to handler */
+FEXPORT(__kvm_mips_jump_to_handler)
+    /* XXXKYMA: not sure if this is safe, how large is the stack?? */
+    /* Now jump to the kvm_mips_handle_exit() to see if we can deal with this in the kernel */
+    la          t9,kvm_mips_handle_exit
+    jalr.hb     t9
+    addiu       sp,sp, -CALLFRAME_SIZ           /* BD Slot */
+
+    /* Return from handler Make sure interrupts are disabled */
+    di
+    ehb
+
+    /* XXXKYMA: k0/k1 could have been blown away if we processed an exception
+     * while we were handling the exception from the guest, reload k1
+     */
+    move        k1, s1
+	addiu		k1, k1, VCPU_HOST_ARCH
+
+    /* Check return value, should tell us if we are returning to the host (handle I/O etc)
+     * or resuming the guest
+     */
+    andi        t0, v0, RESUME_HOST
+    bnez        t0, __kvm_mips_return_to_host
+    nop
+
+__kvm_mips_return_to_guest:
+    /* Put the saved pointer to vcpu (s1) back into the DDATA_LO Register */
+    mtc0        s1, CP0_DDATA_LO
+
+    /* Load up the Guest EBASE to minimize the window where BEV is set */
+    LONG_L      t0, VCPU_GUEST_EBASE(k1)
+
+    /* Switch EBASE back to the one used by KVM */
+    mfc0        v1, CP0_STATUS
+    .set at
+	or          k0, v1, ST0_BEV
+    .set noat
+    mtc0        k0, CP0_STATUS
+    ehb
+    mtc0        t0,CP0_EBASE
+
+    /* Setup status register for running guest in UM */
+    .set at
+    or     v1, v1, (ST0_EXL | KSU_USER | ST0_IE)
+    and     v1, v1, ~ST0_CU0
+    .set noat
+    mtc0    v1, CP0_STATUS
+    ehb
+
+
+	/* Set Guest EPC */
+	LONG_L		t0, VCPU_PC(k1)
+	mtc0		t0, CP0_EPC
+
+    /* Set the ASID for the Guest Kernel */
+    sll         t0, t0, 1                       /* with kseg0 @ 0x40000000, kernel */
+                                                /* addresses shift to 0x80000000 */
+    bltz        t0, 1f                          /* If kernel */
+	addiu       t1, k1, VCPU_GUEST_KERNEL_ASID  /* (BD)  */
+    addiu       t1, k1, VCPU_GUEST_USER_ASID    /* else user */
+1:
+    /* t1: contains the base of the ASID array, need to get the cpu id  */
+    LONG_L      t2, TI_CPU($28)             /* smp_processor_id */
+    sll         t2, t2, 2                   /* x4 */
+    addu        t3, t1, t2
+    LONG_L      k0, (t3)
+    andi        k0, k0, 0xff
+	mtc0		k0,CP0_ENTRYHI
+    ehb
+
+    /* Disable RDHWR access */
+    mtc0    zero,  CP0_HWRENA
+
+    /* load the guest context from VCPU and return */
+    LONG_L  $0, VCPU_R0(k1)
+    LONG_L  $1, VCPU_R1(k1)
+    LONG_L  $2, VCPU_R2(k1)
+    LONG_L  $3, VCPU_R3(k1)
+    LONG_L  $4, VCPU_R4(k1)
+    LONG_L  $5, VCPU_R5(k1)
+    LONG_L  $6, VCPU_R6(k1)
+    LONG_L  $7, VCPU_R7(k1)
+    LONG_L  $8, VCPU_R8(k1)
+    LONG_L  $9, VCPU_R9(k1)
+    LONG_L  $10, VCPU_R10(k1)
+    LONG_L  $11, VCPU_R11(k1)
+    LONG_L  $12, VCPU_R12(k1)
+    LONG_L  $13, VCPU_R13(k1)
+    LONG_L  $14, VCPU_R14(k1)
+    LONG_L  $15, VCPU_R15(k1)
+    LONG_L  $16, VCPU_R16(k1)
+    LONG_L  $17, VCPU_R17(k1)
+    LONG_L  $18, VCPU_R18(k1)
+    LONG_L  $19, VCPU_R19(k1)
+    LONG_L  $20, VCPU_R20(k1)
+    LONG_L  $21, VCPU_R21(k1)
+    LONG_L  $22, VCPU_R22(k1)
+    LONG_L  $23, VCPU_R23(k1)
+    LONG_L  $24, VCPU_R24(k1)
+    LONG_L  $25, VCPU_R25(k1)
+
+    /* $/k1 loaded later */
+    LONG_L  $28, VCPU_R28(k1)
+    LONG_L  $29, VCPU_R29(k1)
+    LONG_L  $30, VCPU_R30(k1)
+    LONG_L  $31, VCPU_R31(k1)
+
+FEXPORT(__kvm_mips_skip_guest_restore)
+    LONG_L  k0, VCPU_HI(k1)
+    mthi    k0
+
+    LONG_L  k0, VCPU_LO(k1)
+    mtlo    k0
+
+    LONG_L  k0, VCPU_R26(k1)
+    LONG_L  k1, VCPU_R27(k1)
+
+    eret
+
+__kvm_mips_return_to_host:
+    /* EBASE is already pointing to Linux */
+    LONG_L  k1, VCPU_HOST_STACK(k1)
+	addiu  	k1,k1, -PT_SIZE
+
+    /* Restore host DDATA_LO */
+    LONG_L      k0, PT_HOST_USERLOCAL(k1)
+    mtc0        k0, CP0_DDATA_LO
+
+    /* Restore host ASID */
+    LONG_L      k0, PT_HOST_ASID(sp)
+    andi        k0, 0xff
+    mtc0        k0,CP0_ENTRYHI
+    ehb
+
+    /* Load context saved on the host stack */
+    LONG_L  $0, PT_R0(k1)
+    LONG_L  $1, PT_R1(k1)
+
+    /* r2/v0 is the return code, shift it down by 2 (arithmetic) to recover the err code  */
+    sra     k0, v0, 2
+    move    $2, k0
+
+    LONG_L  $3, PT_R3(k1)
+    LONG_L  $4, PT_R4(k1)
+    LONG_L  $5, PT_R5(k1)
+    LONG_L  $6, PT_R6(k1)
+    LONG_L  $7, PT_R7(k1)
+    LONG_L  $8, PT_R8(k1)
+    LONG_L  $9, PT_R9(k1)
+    LONG_L  $10, PT_R10(k1)
+    LONG_L  $11, PT_R11(k1)
+    LONG_L  $12, PT_R12(k1)
+    LONG_L  $13, PT_R13(k1)
+    LONG_L  $14, PT_R14(k1)
+    LONG_L  $15, PT_R15(k1)
+    LONG_L  $16, PT_R16(k1)
+    LONG_L  $17, PT_R17(k1)
+    LONG_L  $18, PT_R18(k1)
+    LONG_L  $19, PT_R19(k1)
+    LONG_L  $20, PT_R20(k1)
+    LONG_L  $21, PT_R21(k1)
+    LONG_L  $22, PT_R22(k1)
+    LONG_L  $23, PT_R23(k1)
+    LONG_L  $24, PT_R24(k1)
+    LONG_L  $25, PT_R25(k1)
+
+    /* Host k0/k1 were not saved */
+
+    LONG_L  $28, PT_R28(k1)
+    LONG_L  $29, PT_R29(k1)
+    LONG_L  $30, PT_R30(k1)
+
+    LONG_L  k0, PT_HI(k1)
+    mthi    k0
+
+    LONG_L  k0, PT_LO(k1)
+    mtlo    k0
+
+    /* Restore RDHWR access */
+    la      k0, 0x2000000F
+    mtc0    k0,  CP0_HWRENA
+
+
+    /* Restore RA, which is the address we will return to */
+    LONG_L  ra, PT_R31(k1)
+    j       ra
+    nop
+
+    .set    pop
+VECTOR_END(MIPSX(GuestExceptionEnd))
+.end MIPSX(GuestException)
+
+MIPSX(exceptions):
+	####
+	##### The exception handlers.
+	#####
+	.word _C_LABEL(MIPSX(GuestException))	#  0
+	.word _C_LABEL(MIPSX(GuestException))	#  1
+	.word _C_LABEL(MIPSX(GuestException))	#  2
+	.word _C_LABEL(MIPSX(GuestException))	#  3
+	.word _C_LABEL(MIPSX(GuestException))	#  4
+	.word _C_LABEL(MIPSX(GuestException))	#  5
+	.word _C_LABEL(MIPSX(GuestException))	#  6
+	.word _C_LABEL(MIPSX(GuestException))	#  7
+	.word _C_LABEL(MIPSX(GuestException))	#  8
+	.word _C_LABEL(MIPSX(GuestException))	#  9
+	.word _C_LABEL(MIPSX(GuestException))	# 10
+	.word _C_LABEL(MIPSX(GuestException))	# 11
+	.word _C_LABEL(MIPSX(GuestException))	# 12
+	.word _C_LABEL(MIPSX(GuestException))	# 13
+	.word _C_LABEL(MIPSX(GuestException))	# 14
+	.word _C_LABEL(MIPSX(GuestException))	# 15
+	.word _C_LABEL(MIPSX(GuestException))	# 16
+	.word _C_LABEL(MIPSX(GuestException))	# 17
+	.word _C_LABEL(MIPSX(GuestException))	# 18
+	.word _C_LABEL(MIPSX(GuestException))	# 19
+	.word _C_LABEL(MIPSX(GuestException))	# 20
+	.word _C_LABEL(MIPSX(GuestException))	# 21
+	.word _C_LABEL(MIPSX(GuestException))	# 22
+	.word _C_LABEL(MIPSX(GuestException))	# 23
+	.word _C_LABEL(MIPSX(GuestException))	# 24
+	.word _C_LABEL(MIPSX(GuestException))	# 25
+	.word _C_LABEL(MIPSX(GuestException))	# 26
+	.word _C_LABEL(MIPSX(GuestException))	# 27
+	.word _C_LABEL(MIPSX(GuestException))	# 28
+	.word _C_LABEL(MIPSX(GuestException))	# 29
+	.word _C_LABEL(MIPSX(GuestException))	# 30
+	.word _C_LABEL(MIPSX(GuestException))	# 31
+
+
+/* This routine makes changes to the instruction stream effective to the hardware.
+ * It should be called after the instruction stream is written.
+ * On return, the new instructions are effective.
+ * Inputs:
+ * a0 = Start address of new instruction stream
+ * a1 = Size, in bytes, of new instruction stream
+ */
+
+#define HW_SYNCI_Step       $1
+LEAF(MIPSX(SyncICache))
+    .set    push
+	.set	mips32r2
+    beq     a1, zero, 20f
+    nop
+    addu    a1, a0, a1
+    rdhwr   v0, HW_SYNCI_Step
+    beq     v0, zero, 20f
+    nop
+
+10:
+    synci   0(a0)
+    addu    a0, a0, v0
+    sltu    v1, a0, a1
+    bne     v1, zero, 10b
+    nop
+    sync
+20:
+    jr.hb   ra
+    nop
+    .set pop
+END(MIPSX(SyncICache))
diff --git a/arch/mips/kvm/kvm_mips.c b/arch/mips/kvm/kvm_mips.c
new file mode 100644
index 0000000..e0dad02
--- /dev/null
+++ b/arch/mips/kvm/kvm_mips.c
@@ -0,0 +1,958 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * KVM/MIPS: MIPS specific KVM APIs
+ *
+ * Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
+ * Authors: Sanjay Lal <sanjayl@kymasys.com>
+*/
+
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/vmalloc.h>
+#include <linux/fs.h>
+#include <linux/bootmem.h>
+#include <asm/page.h>
+#include <asm/cacheflush.h>
+#include <asm/mmu_context.h>
+
+#include <linux/kvm_host.h>
+
+#include "kvm_mips_int.h"
+#include "kvm_mips_comm.h"
+
+#define CREATE_TRACE_POINTS
+#include "trace.h"
+
+#ifndef VECTORSPACING
+#define VECTORSPACING 0x100	/* for EI/VI mode */
+#endif
+
+#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
+struct kvm_stats_debugfs_item debugfs_entries[] = {
+	{ "wait", VCPU_STAT(wait_exits) },
+	{ "cache", VCPU_STAT(cache_exits) },
+	{ "signal", VCPU_STAT(signal_exits) },
+	{ "interrupt", VCPU_STAT(int_exits) },
+	{ "cop_unsuable", VCPU_STAT(cop_unusable_exits) },
+	{ "tlbmod", VCPU_STAT(tlbmod_exits) },
+	{ "tlbmiss_ld", VCPU_STAT(tlbmiss_ld_exits) },
+	{ "tlbmiss_st", VCPU_STAT(tlbmiss_st_exits) },
+	{ "addrerr_st", VCPU_STAT(addrerr_st_exits) },
+	{ "addrerr_ld", VCPU_STAT(addrerr_ld_exits) },
+	{ "syscall", VCPU_STAT(syscall_exits) },
+	{ "resvd_inst", VCPU_STAT(resvd_inst_exits) },
+	{ "break_inst", VCPU_STAT(break_inst_exits) },
+	{ "flush_dcache", VCPU_STAT(flush_dcache_exits) },
+	{ "halt_wakeup", VCPU_STAT(halt_wakeup) },
+	{NULL}
+};
+
+static int kvm_mips_reset_vcpu(struct kvm_vcpu *vcpu)
+{
+	int i;
+	for_each_possible_cpu(i) {
+		vcpu->arch.guest_kernel_asid[i] = 0;
+		vcpu->arch.guest_user_asid[i] = 0;
+	}
+	return 0;
+}
+
+gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
+{
+	return gfn;
+}
+
+/* XXXKYMA: We are simulatoring a processor that has the WII bit set in Config7, so we
+ * are "runnable" if interrupts are pending
+ */
+int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
+{
+	return !!(vcpu->arch.pending_exceptions);
+}
+
+int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
+{
+	return 1;
+}
+
+int kvm_arch_hardware_enable(void *garbage)
+{
+	return 0;
+}
+
+void kvm_arch_hardware_disable(void *garbage)
+{
+}
+
+int kvm_arch_hardware_setup(void)
+{
+	return 0;
+}
+
+void kvm_arch_hardware_unsetup(void)
+{
+}
+
+void kvm_arch_check_processor_compat(void *rtn)
+{
+	int *r = (int *)rtn;
+	*r = 0;
+	return;
+}
+
+static void kvm_mips_init_tlbs(struct kvm *kvm)
+{
+	unsigned long wired;
+
+	/* Add a wired entry to the TLB, it is used to map the commpage to the Guest kernel */
+	wired = read_c0_wired();
+	write_c0_wired(wired + 1);
+	mtc0_tlbw_hazard();
+	kvm->arch.commpage_tlb = wired;
+
+	kvm_debug("[%d] commpage TLB: %d\n", smp_processor_id(),
+		  kvm->arch.commpage_tlb);
+}
+
+static void kvm_mips_init_vm_percpu(void *arg)
+{
+	struct kvm *kvm = (struct kvm *)arg;
+
+	kvm_mips_init_tlbs(kvm);
+	kvm_mips_callbacks->vm_init(kvm);
+
+}
+
+int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
+{
+	if (atomic_inc_return(&kvm_mips_instance) == 1) {
+		kvm_info("%s: 1st KVM instance, setup host TLB parameters\n",
+			 __func__);
+		on_each_cpu(kvm_mips_init_vm_percpu, kvm, 1);
+	}
+
+
+	return 0;
+}
+
+void kvm_mips_free_vcpus(struct kvm *kvm)
+{
+	unsigned int i;
+	struct kvm_vcpu *vcpu;
+
+	/* Put the pages we reserved for the guest pmap */
+	for (i = 0; i < kvm->arch.guest_pmap_npages; i++) {
+		if (kvm->arch.guest_pmap[i] != KVM_INVALID_PAGE)
+			kvm_mips_release_pfn_clean(kvm->arch.guest_pmap[i]);
+	}
+
+	if (kvm->arch.guest_pmap)
+		kfree(kvm->arch.guest_pmap);
+
+	kvm_for_each_vcpu(i, vcpu, kvm) {
+		kvm_arch_vcpu_free(vcpu);
+	}
+
+	mutex_lock(&kvm->lock);
+
+	for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
+		kvm->vcpus[i] = NULL;
+
+	atomic_set(&kvm->online_vcpus, 0);
+
+	mutex_unlock(&kvm->lock);
+}
+
+void kvm_arch_sync_events(struct kvm *kvm)
+{
+}
+
+static void kvm_mips_uninit_tlbs(void *arg)
+{
+	/* Restore wired count */
+	write_c0_wired(0);
+	mtc0_tlbw_hazard();
+	/* Clear out all the TLBs */
+	kvm_local_flush_tlb_all();
+}
+
+void kvm_arch_destroy_vm(struct kvm *kvm)
+{
+	kvm_mips_free_vcpus(kvm);
+
+	/* If this is the last instance, restore wired count */
+	if (atomic_dec_return(&kvm_mips_instance) == 0) {
+		kvm_info("%s: last KVM instance, restoring TLB parameters\n",
+			 __func__);
+		on_each_cpu(kvm_mips_uninit_tlbs, NULL, 1);
+	}
+}
+
+long
+kvm_arch_dev_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
+{
+	return -EINVAL;
+}
+
+void kvm_arch_free_memslot(struct kvm_memory_slot *free,
+			   struct kvm_memory_slot *dont)
+{
+}
+
+int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages)
+{
+	return 0;
+}
+
+int kvm_arch_prepare_memory_region(struct kvm *kvm,
+                                struct kvm_memory_slot *memslot,
+                                struct kvm_userspace_memory_region *mem,
+                                enum kvm_mr_change change)
+{
+	return 0;
+}
+
+void kvm_arch_commit_memory_region(struct kvm *kvm,
+                                struct kvm_userspace_memory_region *mem,
+                                const struct kvm_memory_slot *old,
+                                enum kvm_mr_change change)
+{
+	unsigned long npages = 0;
+	int i, err = 0;
+
+	kvm_debug("%s: kvm: %p slot: %d, GPA: %llx, size: %llx, QVA: %llx\n",
+		  __func__, kvm, mem->slot, mem->guest_phys_addr,
+		  mem->memory_size, mem->userspace_addr);
+
+	/* Setup Guest PMAP table */
+	if (!kvm->arch.guest_pmap) {
+		if (mem->slot == 0)
+			npages = mem->memory_size >> PAGE_SHIFT;
+
+		if (npages) {
+			kvm->arch.guest_pmap_npages = npages;
+			kvm->arch.guest_pmap =
+			    kzalloc(npages * sizeof(unsigned long), GFP_KERNEL);
+
+			if (!kvm->arch.guest_pmap) {
+				kvm_err("Failed to allocate guest PMAP");
+				err = -ENOMEM;
+				goto out;
+			}
+
+			kvm_info
+			    ("Allocated space for Guest PMAP Table (%ld pages) @ %p\n",
+			     npages, kvm->arch.guest_pmap);
+
+			/* Now setup the page table */
+			for (i = 0; i < npages; i++) {
+				kvm->arch.guest_pmap[i] = KVM_INVALID_PAGE;
+			}
+		}
+	}
+out:
+	return;
+}
+
+void kvm_arch_flush_shadow_all(struct kvm *kvm)
+{
+}
+
+void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
+				   struct kvm_memory_slot *slot)
+{
+}
+
+void kvm_arch_flush_shadow(struct kvm *kvm)
+{
+}
+
+struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
+{
+	extern char mips32_exception[], mips32_exceptionEnd[];
+	extern char mips32_GuestException[], mips32_GuestExceptionEnd[];
+	int err, size, offset;
+	void *gebase;
+	int i;
+
+	struct kvm_vcpu *vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL);
+
+	if (!vcpu) {
+		err = -ENOMEM;
+		goto out;
+	}
+
+	err = kvm_vcpu_init(vcpu, kvm, id);
+
+	if (err)
+		goto out_free_cpu;
+
+	kvm_info("kvm @ %p: create cpu %d at %p\n", kvm, id, vcpu);
+
+	/* Allocate space for host mode exception handlers that handle
+	 * guest mode exits
+	 */
+	if (cpu_has_veic || cpu_has_vint) {
+		size = 0x200 + VECTORSPACING * 64;
+	} else {
+		size = 0x200;
+	}
+
+	/* Save Linux EBASE */
+	vcpu->arch.host_ebase = (void *)read_c0_ebase();
+
+	gebase = kzalloc(ALIGN(size, PAGE_SIZE), GFP_KERNEL);
+
+	if (!gebase) {
+		err = -ENOMEM;
+		goto out_free_cpu;
+	}
+	kvm_info("Allocated %d bytes for KVM Exception Handlers @ %p\n",
+		 ALIGN(size, PAGE_SIZE), gebase);
+
+	/* Save new ebase */
+	vcpu->arch.guest_ebase = gebase;
+
+	/* Copy L1 Guest Exception handler to correct offset */
+
+	/* TLB Refill, EXL = 0 */
+	memcpy(gebase, mips32_exception,
+	       mips32_exceptionEnd - mips32_exception);
+
+	/* General Exception Entry point */
+	memcpy(gebase + 0x180, mips32_exception,
+	       mips32_exceptionEnd - mips32_exception);
+
+	/* For vectored interrupts poke the exception code @ all offsets 0-7 */
+	for (i = 0; i < 8; i++) {
+		kvm_debug("L1 Vectored handler @ %p\n",
+			  gebase + 0x200 + (i * VECTORSPACING));
+		memcpy(gebase + 0x200 + (i * VECTORSPACING), mips32_exception,
+		       mips32_exceptionEnd - mips32_exception);
+	}
+
+	/* General handler, relocate to unmapped space for sanity's sake */
+	offset = 0x2000;
+	kvm_info("Installing KVM Exception handlers @ %p, %#x bytes\n",
+		 gebase + offset,
+		 mips32_GuestExceptionEnd - mips32_GuestException);
+
+	memcpy(gebase + offset, mips32_GuestException,
+	       mips32_GuestExceptionEnd - mips32_GuestException);
+
+	/* Invalidate the icache for these ranges */
+	mips32_SyncICache((unsigned long) gebase, ALIGN(size, PAGE_SIZE));
+
+	/* Allocate comm page for guest kernel, a TLB will be reserved for mapping GVA @ 0xFFFF8000 to this page */
+	vcpu->arch.kseg0_commpage = kzalloc(PAGE_SIZE << 1, GFP_KERNEL);
+
+	if (!vcpu->arch.kseg0_commpage) {
+		err = -ENOMEM;
+		goto out_free_gebase;
+	}
+
+	kvm_info("Allocated COMM page @ %p\n", vcpu->arch.kseg0_commpage);
+	kvm_mips_commpage_init(vcpu);
+
+	/* Init */
+	vcpu->arch.last_sched_cpu = -1;
+
+	/* Start off the timer */
+	kvm_mips_emulate_count(vcpu);
+
+	return vcpu;
+
+out_free_gebase:
+	kfree(gebase);
+
+out_free_cpu:
+	kfree(vcpu);
+
+out:
+	return ERR_PTR(err);
+}
+
+void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
+{
+	hrtimer_cancel(&vcpu->arch.comparecount_timer);
+
+	kvm_vcpu_uninit(vcpu);
+
+	kvm_mips_dump_stats(vcpu);
+
+	if (vcpu->arch.guest_ebase)
+		kfree(vcpu->arch.guest_ebase);
+
+	if (vcpu->arch.kseg0_commpage)
+		kfree(vcpu->arch.kseg0_commpage);
+
+}
+
+void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
+{
+	kvm_arch_vcpu_free(vcpu);
+}
+
+int
+kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
+				    struct kvm_guest_debug *dbg)
+{
+	return -EINVAL;
+}
+
+int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
+{
+	int r = 0;
+	sigset_t sigsaved;
+
+	if (vcpu->sigset_active)
+		sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
+
+	if (vcpu->mmio_needed) {
+		if (!vcpu->mmio_is_write)
+			kvm_mips_complete_mmio_load(vcpu, run);
+		vcpu->mmio_needed = 0;
+	}
+
+	/* Check if we have any exceptions/interrupts pending */
+	kvm_mips_deliver_interrupts(vcpu,
+				    kvm_read_c0_guest_cause(vcpu->arch.cop0));
+
+	local_irq_disable();
+	kvm_guest_enter();
+
+	r = __kvm_mips_vcpu_run(run, vcpu);
+
+	kvm_guest_exit();
+	local_irq_enable();
+
+	if (vcpu->sigset_active)
+		sigprocmask(SIG_SETMASK, &sigsaved, NULL);
+
+	return r;
+}
+
+int
+kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_mips_interrupt *irq)
+{
+	int intr = (int)irq->irq;
+	struct kvm_vcpu *dvcpu = NULL;
+
+	if (intr == 3 || intr == -3 || intr == 4 || intr == -4)
+		kvm_debug("%s: CPU: %d, INTR: %d\n", __func__, irq->cpu,
+			  (int)intr);
+
+	if (irq->cpu == -1)
+		dvcpu = vcpu;
+	else
+		dvcpu = vcpu->kvm->vcpus[irq->cpu];
+
+	if (intr == 2 || intr == 3 || intr == 4) {
+		kvm_mips_callbacks->queue_io_int(dvcpu, irq);
+
+	} else if (intr == -2 || intr == -3 || intr == -4) {
+		kvm_mips_callbacks->dequeue_io_int(dvcpu, irq);
+	} else {
+		kvm_err("%s: invalid interrupt ioctl (%d:%d)\n", __func__,
+			irq->cpu, irq->irq);
+		return -EINVAL;
+	}
+
+	dvcpu->arch.wait = 0;
+
+	if (waitqueue_active(&dvcpu->wq)) {
+		wake_up_interruptible(&dvcpu->wq);
+	}
+
+	return 0;
+}
+
+int
+kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
+				struct kvm_mp_state *mp_state)
+{
+	return -EINVAL;
+}
+
+int
+kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
+				struct kvm_mp_state *mp_state)
+{
+	return -EINVAL;
+}
+
+long
+kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
+{
+	struct kvm_vcpu *vcpu = filp->private_data;
+	void __user *argp = (void __user *)arg;
+	long r;
+	int intr;
+
+	switch (ioctl) {
+	case KVM_NMI:
+		/* Treat the NMI as a CPU reset */
+		r = kvm_mips_reset_vcpu(vcpu);
+		break;
+	case KVM_INTERRUPT:
+		{
+			struct kvm_mips_interrupt irq;
+			r = -EFAULT;
+			if (copy_from_user(&irq, argp, sizeof(irq)))
+				goto out;
+
+			intr = (int)irq.irq;
+
+			kvm_debug("[%d] %s: irq: %d\n", vcpu->vcpu_id, __func__,
+				  irq.irq);
+
+			r = kvm_vcpu_ioctl_interrupt(vcpu, &irq);
+			break;
+		}
+	default:
+		r = -EINVAL;
+	}
+
+out:
+	return r;
+}
+
+/*
+ * Get (and clear) the dirty memory log for a memory slot.
+ */
+int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
+{
+	struct kvm_memory_slot *memslot;
+	unsigned long ga, ga_end;
+	int is_dirty = 0;
+	int r;
+	unsigned long n;
+
+	mutex_lock(&kvm->slots_lock);
+
+	r = kvm_get_dirty_log(kvm, log, &is_dirty);
+	if (r)
+		goto out;
+
+	/* If nothing is dirty, don't bother messing with page tables. */
+	if (is_dirty) {
+		memslot = &kvm->memslots->memslots[log->slot];
+
+		ga = memslot->base_gfn << PAGE_SHIFT;
+		ga_end = ga + (memslot->npages << PAGE_SHIFT);
+
+		printk("%s: dirty, ga: %#lx, ga_end %#lx\n", __func__, ga,
+		       ga_end);
+
+		n = kvm_dirty_bitmap_bytes(memslot);
+		memset(memslot->dirty_bitmap, 0, n);
+	}
+
+	r = 0;
+out:
+	mutex_unlock(&kvm->slots_lock);
+	return r;
+
+}
+
+long kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
+{
+	long r;
+
+	switch (ioctl) {
+	default:
+		r = -EINVAL;
+	}
+
+	return r;
+}
+
+int kvm_arch_init(void *opaque)
+{
+	int ret;
+
+	if (kvm_mips_callbacks) {
+		kvm_err("kvm: module already exists\n");
+		return -EEXIST;
+	}
+
+	ret = kvm_mips_emulation_init(&kvm_mips_callbacks);
+
+	return ret;
+}
+
+void kvm_arch_exit(void)
+{
+	kvm_mips_callbacks = NULL;
+}
+
+int
+kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
+{
+	return -ENOTSUPP;
+}
+
+int
+kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
+{
+	return -ENOTSUPP;
+}
+
+int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
+{
+	return 0;
+}
+
+int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
+{
+	return -ENOTSUPP;
+}
+
+int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
+{
+	return -ENOTSUPP;
+}
+
+int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
+{
+	return VM_FAULT_SIGBUS;
+}
+
+int kvm_dev_ioctl_check_extension(long ext)
+{
+	int r;
+
+	switch (ext) {
+	case KVM_CAP_COALESCED_MMIO:
+		r = KVM_COALESCED_MMIO_PAGE_OFFSET;
+		break;
+	default:
+		r = 0;
+		break;
+	}
+	return r;
+
+}
+
+int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
+{
+	return kvm_mips_pending_timer(vcpu);
+}
+
+int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu)
+{
+	int i;
+	struct mips_coproc *cop0;
+
+	if (!vcpu)
+		return -1;
+
+	printk("VCPU Register Dump:\n");
+	printk("\tpc = 0x%08lx\n", vcpu->arch.pc);;
+	printk("\texceptions: %08lx\n", vcpu->arch.pending_exceptions);
+
+	for (i = 0; i < 32; i += 4) {
+		printk("\tgpr%02d: %08lx %08lx %08lx %08lx\n", i,
+		       vcpu->arch.gprs[i],
+		       vcpu->arch.gprs[i + 1],
+		       vcpu->arch.gprs[i + 2], vcpu->arch.gprs[i + 3]);
+	}
+	printk("\thi: 0x%08lx\n", vcpu->arch.hi);
+	printk("\tlo: 0x%08lx\n", vcpu->arch.lo);
+
+	cop0 = vcpu->arch.cop0;
+	printk("\tStatus: 0x%08lx, Cause: 0x%08lx\n",
+	       kvm_read_c0_guest_status(cop0), kvm_read_c0_guest_cause(cop0));
+
+	printk("\tEPC: 0x%08lx\n", kvm_read_c0_guest_epc(cop0));
+
+	return 0;
+}
+
+int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
+{
+	int i;
+
+	for (i = 0; i < 32; i++)
+		vcpu->arch.gprs[i] = regs->gprs[i];
+
+	vcpu->arch.hi = regs->hi;
+	vcpu->arch.lo = regs->lo;
+	vcpu->arch.pc = regs->pc;
+
+	return kvm_mips_callbacks->vcpu_ioctl_set_regs(vcpu, regs);
+}
+
+int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
+{
+	int i;
+
+	for (i = 0; i < 32; i++)
+		regs->gprs[i] = vcpu->arch.gprs[i];
+
+	regs->hi = vcpu->arch.hi;
+	regs->lo = vcpu->arch.lo;
+	regs->pc = vcpu->arch.pc;
+
+	return kvm_mips_callbacks->vcpu_ioctl_get_regs(vcpu, regs);
+}
+
+void kvm_mips_comparecount_func(unsigned long data)
+{
+	struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data;
+
+	kvm_mips_callbacks->queue_timer_int(vcpu);
+
+	vcpu->arch.wait = 0;
+	if (waitqueue_active(&vcpu->wq)) {
+		wake_up_interruptible(&vcpu->wq);
+	}
+}
+
+/*
+ * low level hrtimer wake routine.
+ */
+enum hrtimer_restart kvm_mips_comparecount_wakeup(struct hrtimer *timer)
+{
+	struct kvm_vcpu *vcpu;
+
+	vcpu = container_of(timer, struct kvm_vcpu, arch.comparecount_timer);
+	kvm_mips_comparecount_func((unsigned long) vcpu);
+	hrtimer_forward_now(&vcpu->arch.comparecount_timer,
+			    ktime_set(0, MS_TO_NS(10)));
+	return HRTIMER_RESTART;
+}
+
+int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
+{
+	kvm_mips_callbacks->vcpu_init(vcpu);
+	hrtimer_init(&vcpu->arch.comparecount_timer, CLOCK_MONOTONIC,
+		     HRTIMER_MODE_REL);
+	vcpu->arch.comparecount_timer.function = kvm_mips_comparecount_wakeup;
+	kvm_mips_init_shadow_tlb(vcpu);
+	return 0;
+}
+
+void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
+{
+	return;
+}
+
+int
+kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, struct kvm_translation *tr)
+{
+	return 0;
+}
+
+/* Initial guest state */
+int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
+{
+	return kvm_mips_callbacks->vcpu_setup(vcpu);
+}
+
+static
+void kvm_mips_set_c0_status(void)
+{
+	uint32_t status = read_c0_status();
+
+	if (cpu_has_fpu)
+		status |= (ST0_CU1);
+
+	if (cpu_has_dsp)
+		status |= (ST0_MX);
+
+	write_c0_status(status);
+	ehb();
+}
+
+/*
+ * Return value is in the form (errcode<<2 | RESUME_FLAG_HOST | RESUME_FLAG_NV)
+ */
+int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
+{
+	uint32_t cause = vcpu->arch.host_cp0_cause;
+	uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
+	uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
+	unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
+	enum emulation_result er = EMULATE_DONE;
+	int ret = RESUME_GUEST;
+
+	/* Set a default exit reason */
+	run->exit_reason = KVM_EXIT_UNKNOWN;
+	run->ready_for_interrupt_injection = 1;
+
+	/* Set the appropriate status bits based on host CPU features, before we hit the scheduler */
+	kvm_mips_set_c0_status();
+
+	local_irq_enable();
+
+	kvm_debug("kvm_mips_handle_exit: cause: %#x, PC: %p, kvm_run: %p, kvm_vcpu: %p\n",
+			cause, opc, run, vcpu);
+
+	/* Do a privilege check, if in UM most of these exit conditions end up
+	 * causing an exception to be delivered to the Guest Kernel
+	 */
+	er = kvm_mips_check_privilege(cause, opc, run, vcpu);
+	if (er == EMULATE_PRIV_FAIL) {
+		goto skip_emul;
+	} else if (er == EMULATE_FAIL) {
+		run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+		ret = RESUME_HOST;
+		goto skip_emul;
+	}
+
+	switch (exccode) {
+	case T_INT:
+		kvm_debug("[%d]T_INT @ %p\n", vcpu->vcpu_id, opc);
+
+		++vcpu->stat.int_exits;
+		trace_kvm_exit(vcpu, INT_EXITS);
+
+		if (need_resched()) {
+			cond_resched();
+		}
+
+		ret = RESUME_GUEST;
+		break;
+
+	case T_COP_UNUSABLE:
+		kvm_debug("T_COP_UNUSABLE: @ PC: %p\n", opc);
+
+		++vcpu->stat.cop_unusable_exits;
+		trace_kvm_exit(vcpu, COP_UNUSABLE_EXITS);
+		ret = kvm_mips_callbacks->handle_cop_unusable(vcpu);
+		/* XXXKYMA: Might need to return to user space */
+		if (run->exit_reason == KVM_EXIT_IRQ_WINDOW_OPEN) {
+			ret = RESUME_HOST;
+		}
+		break;
+
+	case T_TLB_MOD:
+		++vcpu->stat.tlbmod_exits;
+		trace_kvm_exit(vcpu, TLBMOD_EXITS);
+		ret = kvm_mips_callbacks->handle_tlb_mod(vcpu);
+		break;
+
+	case T_TLB_ST_MISS:
+		kvm_debug
+		    ("TLB ST fault:  cause %#x, status %#lx, PC: %p, BadVaddr: %#lx\n",
+		     cause, kvm_read_c0_guest_status(vcpu->arch.cop0), opc,
+		     badvaddr);
+
+		++vcpu->stat.tlbmiss_st_exits;
+		trace_kvm_exit(vcpu, TLBMISS_ST_EXITS);
+		ret = kvm_mips_callbacks->handle_tlb_st_miss(vcpu);
+		break;
+
+	case T_TLB_LD_MISS:
+		kvm_debug("TLB LD fault: cause %#x, PC: %p, BadVaddr: %#lx\n",
+			  cause, opc, badvaddr);
+
+		++vcpu->stat.tlbmiss_ld_exits;
+		trace_kvm_exit(vcpu, TLBMISS_LD_EXITS);
+		ret = kvm_mips_callbacks->handle_tlb_ld_miss(vcpu);
+		break;
+
+	case T_ADDR_ERR_ST:
+		++vcpu->stat.addrerr_st_exits;
+		trace_kvm_exit(vcpu, ADDRERR_ST_EXITS);
+		ret = kvm_mips_callbacks->handle_addr_err_st(vcpu);
+		break;
+
+	case T_ADDR_ERR_LD:
+		++vcpu->stat.addrerr_ld_exits;
+		trace_kvm_exit(vcpu, ADDRERR_LD_EXITS);
+		ret = kvm_mips_callbacks->handle_addr_err_ld(vcpu);
+		break;
+
+	case T_SYSCALL:
+		++vcpu->stat.syscall_exits;
+		trace_kvm_exit(vcpu, SYSCALL_EXITS);
+		ret = kvm_mips_callbacks->handle_syscall(vcpu);
+		break;
+
+	case T_RES_INST:
+		++vcpu->stat.resvd_inst_exits;
+		trace_kvm_exit(vcpu, RESVD_INST_EXITS);
+		ret = kvm_mips_callbacks->handle_res_inst(vcpu);
+		break;
+
+	case T_BREAK:
+		++vcpu->stat.break_inst_exits;
+		trace_kvm_exit(vcpu, BREAK_INST_EXITS);
+		ret = kvm_mips_callbacks->handle_break(vcpu);
+		break;
+
+	default:
+		kvm_err
+		    ("Exception Code: %d, not yet handled, @ PC: %p, inst: 0x%08x  BadVaddr: %#lx Status: %#lx\n",
+		     exccode, opc, kvm_get_inst(opc, vcpu), badvaddr,
+		     kvm_read_c0_guest_status(vcpu->arch.cop0));
+		kvm_arch_vcpu_dump_regs(vcpu);
+		run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+		ret = RESUME_HOST;
+		break;
+
+	}
+
+skip_emul:
+	local_irq_disable();
+
+	if (er == EMULATE_DONE && !(ret & RESUME_HOST))
+		kvm_mips_deliver_interrupts(vcpu, cause);
+
+	if (!(ret & RESUME_HOST)) {
+		/* Only check for signals if not already exiting to userspace  */
+		if (signal_pending(current)) {
+			run->exit_reason = KVM_EXIT_INTR;
+			ret = (-EINTR << 2) | RESUME_HOST;
+			++vcpu->stat.signal_exits;
+			trace_kvm_exit(vcpu, SIGNAL_EXITS);
+		}
+	}
+
+	return ret;
+}
+
+int __init kvm_mips_init(void)
+{
+	int ret;
+
+	ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
+
+	if (ret)
+		return ret;
+
+	/* On MIPS, kernel modules are executed from "mapped space", which requires TLBs.
+	 * The TLB handling code is statically linked with the rest of the kernel (kvm_tlb.c)
+	 * to avoid the possibility of double faulting. The issue is that the TLB code
+	 * references routines that are part of the the KVM module,
+	 * which are only available once the module is loaded.
+	 */
+	kvm_mips_gfn_to_pfn = gfn_to_pfn;
+	kvm_mips_release_pfn_clean = kvm_release_pfn_clean;
+	kvm_mips_is_error_pfn = is_error_pfn;
+
+	pr_info("KVM/MIPS Initialized\n");
+	return 0;
+}
+
+void __exit kvm_mips_exit(void)
+{
+	kvm_exit();
+
+	kvm_mips_gfn_to_pfn = NULL;
+	kvm_mips_release_pfn_clean = NULL;
+	kvm_mips_is_error_pfn = NULL;
+
+	pr_info("KVM/MIPS unloaded\n");
+}
+
+module_init(kvm_mips_init);
+module_exit(kvm_mips_exit);
+
+EXPORT_TRACEPOINT_SYMBOL(kvm_exit);
diff --git a/arch/mips/kvm/kvm_mips_comm.h b/arch/mips/kvm/kvm_mips_comm.h
new file mode 100644
index 0000000..a4a8c85
--- /dev/null
+++ b/arch/mips/kvm/kvm_mips_comm.h
@@ -0,0 +1,23 @@
+/*
+* This file is subject to the terms and conditions of the GNU General Public
+* License.  See the file "COPYING" in the main directory of this archive
+* for more details.
+*
+* KVM/MIPS: commpage: mapped into get kernel space
+*
+* Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
+* Authors: Sanjay Lal <sanjayl@kymasys.com>
+*/
+
+#ifndef __KVM_MIPS_COMMPAGE_H__
+#define __KVM_MIPS_COMMPAGE_H__
+
+struct kvm_mips_commpage {
+	struct mips_coproc cop0;	/* COP0 state is mapped into Guest kernel via commpage */
+};
+
+#define KVM_MIPS_COMM_EIDI_OFFSET       0x0
+
+extern void kvm_mips_commpage_init(struct kvm_vcpu *vcpu);
+
+#endif /* __KVM_MIPS_COMMPAGE_H__ */
diff --git a/arch/mips/kvm/kvm_mips_commpage.c b/arch/mips/kvm/kvm_mips_commpage.c
new file mode 100644
index 0000000..3873b1e
--- /dev/null
+++ b/arch/mips/kvm/kvm_mips_commpage.c
@@ -0,0 +1,37 @@
+/*
+* This file is subject to the terms and conditions of the GNU General Public
+* License.  See the file "COPYING" in the main directory of this archive
+* for more details.
+*
+* commpage, currently used for Virtual COP0 registers.
+* Mapped into the guest kernel @ 0x0.
+*
+* Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
+* Authors: Sanjay Lal <sanjayl@kymasys.com>
+*/
+
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/vmalloc.h>
+#include <linux/fs.h>
+#include <linux/bootmem.h>
+#include <asm/page.h>
+#include <asm/cacheflush.h>
+#include <asm/mmu_context.h>
+
+#include <linux/kvm_host.h>
+
+#include "kvm_mips_comm.h"
+
+void kvm_mips_commpage_init(struct kvm_vcpu *vcpu)
+{
+	struct kvm_mips_commpage *page = vcpu->arch.kseg0_commpage;
+	memset(page, 0, sizeof(struct kvm_mips_commpage));
+
+	/* Specific init values for fields */
+	vcpu->arch.cop0 = &page->cop0;
+	memset(vcpu->arch.cop0, 0, sizeof(struct mips_coproc));
+
+	return;
+}
diff --git a/arch/mips/kvm/kvm_mips_dyntrans.c b/arch/mips/kvm/kvm_mips_dyntrans.c
new file mode 100644
index 0000000..96528e2
--- /dev/null
+++ b/arch/mips/kvm/kvm_mips_dyntrans.c
@@ -0,0 +1,149 @@
+/*
+* This file is subject to the terms and conditions of the GNU General Public
+* License.  See the file "COPYING" in the main directory of this archive
+* for more details.
+*
+* KVM/MIPS: Binary Patching for privileged instructions, reduces traps.
+*
+* Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
+* Authors: Sanjay Lal <sanjayl@kymasys.com>
+*/
+
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/kvm_host.h>
+#include <linux/module.h>
+#include <linux/vmalloc.h>
+#include <linux/fs.h>
+#include <linux/bootmem.h>
+
+#include "kvm_mips_comm.h"
+
+#define SYNCI_TEMPLATE  0x041f0000
+#define SYNCI_BASE(x)   (((x) >> 21) & 0x1f)
+#define SYNCI_OFFSET    ((x) & 0xffff)
+
+#define LW_TEMPLATE     0x8c000000
+#define CLEAR_TEMPLATE  0x00000020
+#define SW_TEMPLATE     0xac000000
+
+int
+kvm_mips_trans_cache_index(uint32_t inst, uint32_t *opc,
+			   struct kvm_vcpu *vcpu)
+{
+	int result = 0;
+	unsigned long kseg0_opc;
+	uint32_t synci_inst = 0x0;
+
+	/* Replace the CACHE instruction, with a NOP */
+	kseg0_opc =
+	    CKSEG0ADDR(kvm_mips_translate_guest_kseg0_to_hpa
+		       (vcpu, (unsigned long) opc));
+	memcpy((void *)kseg0_opc, (void *)&synci_inst, sizeof(uint32_t));
+	mips32_SyncICache(kseg0_opc, 32);
+
+	return result;
+}
+
+/*
+ *  Address based CACHE instructions are transformed into synci(s). A little heavy
+ * for just D-cache invalidates, but avoids an expensive trap
+ */
+int
+kvm_mips_trans_cache_va(uint32_t inst, uint32_t *opc,
+			struct kvm_vcpu *vcpu)
+{
+	int result = 0;
+	unsigned long kseg0_opc;
+	uint32_t synci_inst = SYNCI_TEMPLATE, base, offset;
+
+	base = (inst >> 21) & 0x1f;
+	offset = inst & 0xffff;
+	synci_inst |= (base << 21);
+	synci_inst |= offset;
+
+	kseg0_opc =
+	    CKSEG0ADDR(kvm_mips_translate_guest_kseg0_to_hpa
+		       (vcpu, (unsigned long) opc));
+	memcpy((void *)kseg0_opc, (void *)&synci_inst, sizeof(uint32_t));
+	mips32_SyncICache(kseg0_opc, 32);
+
+	return result;
+}
+
+int
+kvm_mips_trans_mfc0(uint32_t inst, uint32_t *opc, struct kvm_vcpu *vcpu)
+{
+	int32_t rt, rd, sel;
+	uint32_t mfc0_inst;
+	unsigned long kseg0_opc, flags;
+
+	rt = (inst >> 16) & 0x1f;
+	rd = (inst >> 11) & 0x1f;
+	sel = inst & 0x7;
+
+	if ((rd == MIPS_CP0_ERRCTL) && (sel == 0)) {
+		mfc0_inst = CLEAR_TEMPLATE;
+		mfc0_inst |= ((rt & 0x1f) << 16);
+	} else {
+		mfc0_inst = LW_TEMPLATE;
+		mfc0_inst |= ((rt & 0x1f) << 16);
+		mfc0_inst |=
+		    offsetof(struct mips_coproc,
+			     reg[rd][sel]) + offsetof(struct kvm_mips_commpage,
+						      cop0);
+	}
+
+	if (KVM_GUEST_KSEGX(opc) == KVM_GUEST_KSEG0) {
+		kseg0_opc =
+		    CKSEG0ADDR(kvm_mips_translate_guest_kseg0_to_hpa
+			       (vcpu, (unsigned long) opc));
+		memcpy((void *)kseg0_opc, (void *)&mfc0_inst, sizeof(uint32_t));
+		mips32_SyncICache(kseg0_opc, 32);
+	} else if (KVM_GUEST_KSEGX((unsigned long) opc) == KVM_GUEST_KSEG23) {
+		local_irq_save(flags);
+		memcpy((void *)opc, (void *)&mfc0_inst, sizeof(uint32_t));
+		mips32_SyncICache((unsigned long) opc, 32);
+		local_irq_restore(flags);
+	} else {
+		kvm_err("%s: Invalid address: %p\n", __func__, opc);
+		return -EFAULT;
+	}
+
+	return 0;
+}
+
+int
+kvm_mips_trans_mtc0(uint32_t inst, uint32_t *opc, struct kvm_vcpu *vcpu)
+{
+	int32_t rt, rd, sel;
+	uint32_t mtc0_inst = SW_TEMPLATE;
+	unsigned long kseg0_opc, flags;
+
+	rt = (inst >> 16) & 0x1f;
+	rd = (inst >> 11) & 0x1f;
+	sel = inst & 0x7;
+
+	mtc0_inst |= ((rt & 0x1f) << 16);
+	mtc0_inst |=
+	    offsetof(struct mips_coproc,
+		     reg[rd][sel]) + offsetof(struct kvm_mips_commpage, cop0);
+
+	if (KVM_GUEST_KSEGX(opc) == KVM_GUEST_KSEG0) {
+		kseg0_opc =
+		    CKSEG0ADDR(kvm_mips_translate_guest_kseg0_to_hpa
+			       (vcpu, (unsigned long) opc));
+		memcpy((void *)kseg0_opc, (void *)&mtc0_inst, sizeof(uint32_t));
+		mips32_SyncICache(kseg0_opc, 32);
+	} else if (KVM_GUEST_KSEGX((unsigned long) opc) == KVM_GUEST_KSEG23) {
+		local_irq_save(flags);
+		memcpy((void *)opc, (void *)&mtc0_inst, sizeof(uint32_t));
+		mips32_SyncICache((unsigned long) opc, 32);
+		local_irq_restore(flags);
+	} else {
+		kvm_err("%s: Invalid address: %p\n", __func__, opc);
+		return -EFAULT;
+	}
+
+	return 0;
+}
diff --git a/arch/mips/kvm/kvm_mips_emul.c b/arch/mips/kvm/kvm_mips_emul.c
new file mode 100644
index 0000000..2b2bac9
--- /dev/null
+++ b/arch/mips/kvm/kvm_mips_emul.c
@@ -0,0 +1,1826 @@
+/*
+* This file is subject to the terms and conditions of the GNU General Public
+* License.  See the file "COPYING" in the main directory of this archive
+* for more details.
+*
+* KVM/MIPS: Instruction/Exception emulation
+*
+* Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
+* Authors: Sanjay Lal <sanjayl@kymasys.com>
+*/
+
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/kvm_host.h>
+#include <linux/module.h>
+#include <linux/vmalloc.h>
+#include <linux/fs.h>
+#include <linux/bootmem.h>
+#include <linux/random.h>
+#include <asm/page.h>
+#include <asm/cacheflush.h>
+#include <asm/cpu-info.h>
+#include <asm/mmu_context.h>
+#include <asm/tlbflush.h>
+#include <asm/inst.h>
+
+#undef CONFIG_MIPS_MT
+#include <asm/r4kcache.h>
+#define CONFIG_MIPS_MT
+
+#include "kvm_mips_opcode.h"
+#include "kvm_mips_int.h"
+#include "kvm_mips_comm.h"
+
+#include "trace.h"
+
+/*
+ * Compute the return address and do emulate branch simulation, if required.
+ * This function should be called only in branch delay slot active.
+ */
+unsigned long kvm_compute_return_epc(struct kvm_vcpu *vcpu,
+	unsigned long instpc)
+{
+	unsigned int dspcontrol;
+	union mips_instruction insn;
+	struct kvm_vcpu_arch *arch = &vcpu->arch;
+	long epc = instpc;
+	long nextpc = KVM_INVALID_INST;
+
+	if (epc & 3)
+		goto unaligned;
+
+	/*
+	 * Read the instruction
+	 */
+	insn.word = kvm_get_inst((uint32_t *) epc, vcpu);
+
+	if (insn.word == KVM_INVALID_INST)
+		return KVM_INVALID_INST;
+
+	switch (insn.i_format.opcode) {
+		/*
+		 * jr and jalr are in r_format format.
+		 */
+	case spec_op:
+		switch (insn.r_format.func) {
+		case jalr_op:
+			arch->gprs[insn.r_format.rd] = epc + 8;
+			/* Fall through */
+		case jr_op:
+			nextpc = arch->gprs[insn.r_format.rs];
+			break;
+		}
+		break;
+
+		/*
+		 * This group contains:
+		 * bltz_op, bgez_op, bltzl_op, bgezl_op,
+		 * bltzal_op, bgezal_op, bltzall_op, bgezall_op.
+		 */
+	case bcond_op:
+		switch (insn.i_format.rt) {
+		case bltz_op:
+		case bltzl_op:
+			if ((long)arch->gprs[insn.i_format.rs] < 0)
+				epc = epc + 4 + (insn.i_format.simmediate << 2);
+			else
+				epc += 8;
+			nextpc = epc;
+			break;
+
+		case bgez_op:
+		case bgezl_op:
+			if ((long)arch->gprs[insn.i_format.rs] >= 0)
+				epc = epc + 4 + (insn.i_format.simmediate << 2);
+			else
+				epc += 8;
+			nextpc = epc;
+			break;
+
+		case bltzal_op:
+		case bltzall_op:
+			arch->gprs[31] = epc + 8;
+			if ((long)arch->gprs[insn.i_format.rs] < 0)
+				epc = epc + 4 + (insn.i_format.simmediate << 2);
+			else
+				epc += 8;
+			nextpc = epc;
+			break;
+
+		case bgezal_op:
+		case bgezall_op:
+			arch->gprs[31] = epc + 8;
+			if ((long)arch->gprs[insn.i_format.rs] >= 0)
+				epc = epc + 4 + (insn.i_format.simmediate << 2);
+			else
+				epc += 8;
+			nextpc = epc;
+			break;
+		case bposge32_op:
+			if (!cpu_has_dsp)
+				goto sigill;
+
+			dspcontrol = rddsp(0x01);
+
+			if (dspcontrol >= 32) {
+				epc = epc + 4 + (insn.i_format.simmediate << 2);
+			} else
+				epc += 8;
+			nextpc = epc;
+			break;
+		}
+		break;
+
+		/*
+		 * These are unconditional and in j_format.
+		 */
+	case jal_op:
+		arch->gprs[31] = instpc + 8;
+	case j_op:
+		epc += 4;
+		epc >>= 28;
+		epc <<= 28;
+		epc |= (insn.j_format.target << 2);
+		nextpc = epc;
+		break;
+
+		/*
+		 * These are conditional and in i_format.
+		 */
+	case beq_op:
+	case beql_op:
+		if (arch->gprs[insn.i_format.rs] ==
+		    arch->gprs[insn.i_format.rt])
+			epc = epc + 4 + (insn.i_format.simmediate << 2);
+		else
+			epc += 8;
+		nextpc = epc;
+		break;
+
+	case bne_op:
+	case bnel_op:
+		if (arch->gprs[insn.i_format.rs] !=
+		    arch->gprs[insn.i_format.rt])
+			epc = epc + 4 + (insn.i_format.simmediate << 2);
+		else
+			epc += 8;
+		nextpc = epc;
+		break;
+
+	case blez_op:		/* not really i_format */
+	case blezl_op:
+		/* rt field assumed to be zero */
+		if ((long)arch->gprs[insn.i_format.rs] <= 0)
+			epc = epc + 4 + (insn.i_format.simmediate << 2);
+		else
+			epc += 8;
+		nextpc = epc;
+		break;
+
+	case bgtz_op:
+	case bgtzl_op:
+		/* rt field assumed to be zero */
+		if ((long)arch->gprs[insn.i_format.rs] > 0)
+			epc = epc + 4 + (insn.i_format.simmediate << 2);
+		else
+			epc += 8;
+		nextpc = epc;
+		break;
+
+		/*
+		 * And now the FPA/cp1 branch instructions.
+		 */
+	case cop1_op:
+		printk("%s: unsupported cop1_op\n", __func__);
+		break;
+	}
+
+	return nextpc;
+
+unaligned:
+	printk("%s: unaligned epc\n", __func__);
+	return nextpc;
+
+sigill:
+	printk("%s: DSP branch but not DSP ASE\n", __func__);
+	return nextpc;
+}
+
+enum emulation_result update_pc(struct kvm_vcpu *vcpu, uint32_t cause)
+{
+	unsigned long branch_pc;
+	enum emulation_result er = EMULATE_DONE;
+
+	if (cause & CAUSEF_BD) {
+		branch_pc = kvm_compute_return_epc(vcpu, vcpu->arch.pc);
+		if (branch_pc == KVM_INVALID_INST) {
+			er = EMULATE_FAIL;
+		} else {
+			vcpu->arch.pc = branch_pc;
+			kvm_debug("BD update_pc(): New PC: %#lx\n", vcpu->arch.pc);
+		}
+	} else
+		vcpu->arch.pc += 4;
+
+	kvm_debug("update_pc(): New PC: %#lx\n", vcpu->arch.pc);
+
+	return er;
+}
+
+/* Everytime the compare register is written to, we need to decide when to fire
+ * the timer that represents timer ticks to the GUEST.
+ *
+ */
+enum emulation_result kvm_mips_emulate_count(struct kvm_vcpu *vcpu)
+{
+	struct mips_coproc *cop0 = vcpu->arch.cop0;
+	enum emulation_result er = EMULATE_DONE;
+
+	/* If COUNT is enabled */
+	if (!(kvm_read_c0_guest_cause(cop0) & CAUSEF_DC)) {
+		hrtimer_try_to_cancel(&vcpu->arch.comparecount_timer);
+		hrtimer_start(&vcpu->arch.comparecount_timer,
+			      ktime_set(0, MS_TO_NS(10)), HRTIMER_MODE_REL);
+	} else {
+		hrtimer_try_to_cancel(&vcpu->arch.comparecount_timer);
+	}
+
+	return er;
+}
+
+enum emulation_result kvm_mips_emul_eret(struct kvm_vcpu *vcpu)
+{
+	struct mips_coproc *cop0 = vcpu->arch.cop0;
+	enum emulation_result er = EMULATE_DONE;
+
+	if (kvm_read_c0_guest_status(cop0) & ST0_EXL) {
+		kvm_debug("[%#lx] ERET to %#lx\n", vcpu->arch.pc,
+			  kvm_read_c0_guest_epc(cop0));
+		kvm_clear_c0_guest_status(cop0, ST0_EXL);
+		vcpu->arch.pc = kvm_read_c0_guest_epc(cop0);
+
+	} else if (kvm_read_c0_guest_status(cop0) & ST0_ERL) {
+		kvm_clear_c0_guest_status(cop0, ST0_ERL);
+		vcpu->arch.pc = kvm_read_c0_guest_errorepc(cop0);
+	} else {
+		printk("[%#lx] ERET when MIPS_SR_EXL|MIPS_SR_ERL == 0\n",
+		       vcpu->arch.pc);
+		er = EMULATE_FAIL;
+	}
+
+	return er;
+}
+
+enum emulation_result kvm_mips_emul_wait(struct kvm_vcpu *vcpu)
+{
+	enum emulation_result er = EMULATE_DONE;
+
+	kvm_debug("[%#lx] !!!WAIT!!! (%#lx)\n", vcpu->arch.pc,
+		  vcpu->arch.pending_exceptions);
+
+	++vcpu->stat.wait_exits;
+	trace_kvm_exit(vcpu, WAIT_EXITS);
+	if (!vcpu->arch.pending_exceptions) {
+		vcpu->arch.wait = 1;
+		kvm_vcpu_block(vcpu);
+
+		/* We we are runnable, then definitely go off to user space to check if any
+		 * I/O interrupts are pending.
+		 */
+		if (kvm_check_request(KVM_REQ_UNHALT, vcpu)) {
+			clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
+			vcpu->run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN;
+		}
+	}
+
+	return er;
+}
+
+/* XXXKYMA: Linux doesn't seem to use TLBR, return EMULATE_FAIL for now so that we can catch
+ * this, if things ever change
+ */
+enum emulation_result kvm_mips_emul_tlbr(struct kvm_vcpu *vcpu)
+{
+	struct mips_coproc *cop0 = vcpu->arch.cop0;
+	enum emulation_result er = EMULATE_FAIL;
+	uint32_t pc = vcpu->arch.pc;
+
+	printk("[%#x] COP0_TLBR [%ld]\n", pc, kvm_read_c0_guest_index(cop0));
+	return er;
+}
+
+/* Write Guest TLB Entry @ Index */
+enum emulation_result kvm_mips_emul_tlbwi(struct kvm_vcpu *vcpu)
+{
+	struct mips_coproc *cop0 = vcpu->arch.cop0;
+	int index = kvm_read_c0_guest_index(cop0);
+	enum emulation_result er = EMULATE_DONE;
+	struct kvm_mips_tlb *tlb = NULL;
+	uint32_t pc = vcpu->arch.pc;
+
+	if (index < 0 || index >= KVM_MIPS_GUEST_TLB_SIZE) {
+		printk("%s: illegal index: %d\n", __func__, index);
+		printk
+		    ("[%#x] COP0_TLBWI [%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx, mask: %#lx)\n",
+		     pc, index, kvm_read_c0_guest_entryhi(cop0),
+		     kvm_read_c0_guest_entrylo0(cop0),
+		     kvm_read_c0_guest_entrylo1(cop0),
+		     kvm_read_c0_guest_pagemask(cop0));
+		index = (index & ~0x80000000) % KVM_MIPS_GUEST_TLB_SIZE;
+	}
+
+	tlb = &vcpu->arch.guest_tlb[index];
+#if 1
+	/* Probe the shadow host TLB for the entry being overwritten, if one matches, invalidate it */
+	kvm_mips_host_tlb_inv(vcpu, tlb->tlb_hi);
+#endif
+
+	tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0);
+	tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0);
+	tlb->tlb_lo0 = kvm_read_c0_guest_entrylo0(cop0);
+	tlb->tlb_lo1 = kvm_read_c0_guest_entrylo1(cop0);
+
+	kvm_debug
+	    ("[%#x] COP0_TLBWI [%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx, mask: %#lx)\n",
+	     pc, index, kvm_read_c0_guest_entryhi(cop0),
+	     kvm_read_c0_guest_entrylo0(cop0), kvm_read_c0_guest_entrylo1(cop0),
+	     kvm_read_c0_guest_pagemask(cop0));
+
+	return er;
+}
+
+/* Write Guest TLB Entry @ Random Index */
+enum emulation_result kvm_mips_emul_tlbwr(struct kvm_vcpu *vcpu)
+{
+	struct mips_coproc *cop0 = vcpu->arch.cop0;
+	enum emulation_result er = EMULATE_DONE;
+	struct kvm_mips_tlb *tlb = NULL;
+	uint32_t pc = vcpu->arch.pc;
+	int index;
+
+#if 1
+	get_random_bytes(&index, sizeof(index));
+	index &= (KVM_MIPS_GUEST_TLB_SIZE - 1);
+#else
+	index = jiffies % KVM_MIPS_GUEST_TLB_SIZE;
+#endif
+
+	if (index < 0 || index >= KVM_MIPS_GUEST_TLB_SIZE) {
+		printk("%s: illegal index: %d\n", __func__, index);
+		return EMULATE_FAIL;
+	}
+
+	tlb = &vcpu->arch.guest_tlb[index];
+
+#if 1
+	/* Probe the shadow host TLB for the entry being overwritten, if one matches, invalidate it */
+	kvm_mips_host_tlb_inv(vcpu, tlb->tlb_hi);
+#endif
+
+	tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0);
+	tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0);
+	tlb->tlb_lo0 = kvm_read_c0_guest_entrylo0(cop0);
+	tlb->tlb_lo1 = kvm_read_c0_guest_entrylo1(cop0);
+
+	kvm_debug
+	    ("[%#x] COP0_TLBWR[%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx)\n",
+	     pc, index, kvm_read_c0_guest_entryhi(cop0),
+	     kvm_read_c0_guest_entrylo0(cop0),
+	     kvm_read_c0_guest_entrylo1(cop0));
+
+	return er;
+}
+
+enum emulation_result kvm_mips_emul_tlbp(struct kvm_vcpu *vcpu)
+{
+	struct mips_coproc *cop0 = vcpu->arch.cop0;
+	long entryhi = kvm_read_c0_guest_entryhi(cop0);
+	enum emulation_result er = EMULATE_DONE;
+	uint32_t pc = vcpu->arch.pc;
+	int index = -1;
+
+	index = kvm_mips_guest_tlb_lookup(vcpu, entryhi);
+
+	kvm_write_c0_guest_index(cop0, index);
+
+	kvm_debug("[%#x] COP0_TLBP (entryhi: %#lx), index: %d\n", pc, entryhi,
+		  index);
+
+	return er;
+}
+
+enum emulation_result
+kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc, uint32_t cause,
+		     struct kvm_run *run, struct kvm_vcpu *vcpu)
+{
+	struct mips_coproc *cop0 = vcpu->arch.cop0;
+	enum emulation_result er = EMULATE_DONE;
+	int32_t rt, rd, copz, sel, co_bit, op;
+	uint32_t pc = vcpu->arch.pc;
+	unsigned long curr_pc;
+
+	/*
+	 * Update PC and hold onto current PC in case there is
+	 * an error and we want to rollback the PC
+	 */
+	curr_pc = vcpu->arch.pc;
+	er = update_pc(vcpu, cause);
+	if (er == EMULATE_FAIL) {
+		return er;
+	}
+
+	copz = (inst >> 21) & 0x1f;
+	rt = (inst >> 16) & 0x1f;
+	rd = (inst >> 11) & 0x1f;
+	sel = inst & 0x7;
+	co_bit = (inst >> 25) & 1;
+
+	/* Verify that the register is valid */
+	if (rd > MIPS_CP0_DESAVE) {
+		printk("Invalid rd: %d\n", rd);
+		er = EMULATE_FAIL;
+		goto done;
+	}
+
+	if (co_bit) {
+		op = (inst) & 0xff;
+
+		switch (op) {
+		case tlbr_op:	/*  Read indexed TLB entry  */
+			er = kvm_mips_emul_tlbr(vcpu);
+			break;
+		case tlbwi_op:	/*  Write indexed  */
+			er = kvm_mips_emul_tlbwi(vcpu);
+			break;
+		case tlbwr_op:	/*  Write random  */
+			er = kvm_mips_emul_tlbwr(vcpu);
+			break;
+		case tlbp_op:	/* TLB Probe */
+			er = kvm_mips_emul_tlbp(vcpu);
+			break;
+		case rfe_op:
+			printk("!!!COP0_RFE!!!\n");
+			break;
+		case eret_op:
+			er = kvm_mips_emul_eret(vcpu);
+			goto dont_update_pc;
+			break;
+		case wait_op:
+			er = kvm_mips_emul_wait(vcpu);
+			break;
+		}
+	} else {
+		switch (copz) {
+		case mfc_op:
+#ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
+			cop0->stat[rd][sel]++;
+#endif
+			/* Get reg */
+			if ((rd == MIPS_CP0_COUNT) && (sel == 0)) {
+				/* XXXKYMA: Run the Guest count register @ 1/4 the rate of the host */
+				vcpu->arch.gprs[rt] = (read_c0_count() >> 2);
+			} else if ((rd == MIPS_CP0_ERRCTL) && (sel == 0)) {
+				vcpu->arch.gprs[rt] = 0x0;
+#ifdef CONFIG_KVM_MIPS_DYN_TRANS
+				kvm_mips_trans_mfc0(inst, opc, vcpu);
+#endif
+			}
+			else {
+				vcpu->arch.gprs[rt] = cop0->reg[rd][sel];
+
+#ifdef CONFIG_KVM_MIPS_DYN_TRANS
+				kvm_mips_trans_mfc0(inst, opc, vcpu);
+#endif
+			}
+
+			kvm_debug
+			    ("[%#x] MFCz[%d][%d], vcpu->arch.gprs[%d]: %#lx\n",
+			     pc, rd, sel, rt, vcpu->arch.gprs[rt]);
+
+			break;
+
+		case dmfc_op:
+			vcpu->arch.gprs[rt] = cop0->reg[rd][sel];
+			break;
+
+		case mtc_op:
+#ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
+			cop0->stat[rd][sel]++;
+#endif
+			if ((rd == MIPS_CP0_TLB_INDEX)
+			    && (vcpu->arch.gprs[rt] >=
+				KVM_MIPS_GUEST_TLB_SIZE)) {
+				printk("Invalid TLB Index: %ld",
+				       vcpu->arch.gprs[rt]);
+				er = EMULATE_FAIL;
+				break;
+			}
+#define C0_EBASE_CORE_MASK 0xff
+			if ((rd == MIPS_CP0_PRID) && (sel == 1)) {
+				/* Preserve CORE number */
+				kvm_change_c0_guest_ebase(cop0,
+							  ~(C0_EBASE_CORE_MASK),
+							  vcpu->arch.gprs[rt]);
+				printk("MTCz, cop0->reg[EBASE]: %#lx\n",
+				       kvm_read_c0_guest_ebase(cop0));
+			} else if (rd == MIPS_CP0_TLB_HI && sel == 0) {
+				uint32_t nasid = ASID_MASK(vcpu->arch.gprs[rt]);
+				if ((KSEGX(vcpu->arch.gprs[rt]) != CKSEG0)
+				    &&
+				    (ASID_MASK(kvm_read_c0_guest_entryhi(cop0))
+				      != nasid)) {
+
+					kvm_debug
+					    ("MTCz, change ASID from %#lx to %#lx\n",
+					     ASID_MASK(kvm_read_c0_guest_entryhi(cop0)),
+					     ASID_MASK(vcpu->arch.gprs[rt]));
+
+					/* Blow away the shadow host TLBs */
+					kvm_mips_flush_host_tlb(1);
+				}
+				kvm_write_c0_guest_entryhi(cop0,
+							   vcpu->arch.gprs[rt]);
+			}
+			/* Are we writing to COUNT */
+			else if ((rd == MIPS_CP0_COUNT) && (sel == 0)) {
+				/* Linux doesn't seem to write into COUNT, we throw an error
+				 * if we notice a write to COUNT
+				 */
+				/*er = EMULATE_FAIL; */
+				goto done;
+			} else if ((rd == MIPS_CP0_COMPARE) && (sel == 0)) {
+				kvm_debug("[%#x] MTCz, COMPARE %#lx <- %#lx\n",
+					  pc, kvm_read_c0_guest_compare(cop0),
+					  vcpu->arch.gprs[rt]);
+
+				/* If we are writing to COMPARE */
+				/* Clear pending timer interrupt, if any */
+				kvm_mips_callbacks->dequeue_timer_int(vcpu);
+				kvm_write_c0_guest_compare(cop0,
+							   vcpu->arch.gprs[rt]);
+			} else if ((rd == MIPS_CP0_STATUS) && (sel == 0)) {
+				kvm_write_c0_guest_status(cop0,
+							  vcpu->arch.gprs[rt]);
+				/* Make sure that CU1 and NMI bits are never set */
+				kvm_clear_c0_guest_status(cop0,
+							  (ST0_CU1 | ST0_NMI));
+
+#ifdef CONFIG_KVM_MIPS_DYN_TRANS
+				kvm_mips_trans_mtc0(inst, opc, vcpu);
+#endif
+			} else {
+				cop0->reg[rd][sel] = vcpu->arch.gprs[rt];
+#ifdef CONFIG_KVM_MIPS_DYN_TRANS
+				kvm_mips_trans_mtc0(inst, opc, vcpu);
+#endif
+			}
+
+			kvm_debug("[%#x] MTCz, cop0->reg[%d][%d]: %#lx\n", pc,
+				  rd, sel, cop0->reg[rd][sel]);
+			break;
+
+		case dmtc_op:
+			printk
+			    ("!!!!!!![%#lx]dmtc_op: rt: %d, rd: %d, sel: %d!!!!!!\n",
+			     vcpu->arch.pc, rt, rd, sel);
+			er = EMULATE_FAIL;
+			break;
+
+		case mfmcz_op:
+#ifdef KVM_MIPS_DEBUG_COP0_COUNTERS
+			cop0->stat[MIPS_CP0_STATUS][0]++;
+#endif
+			if (rt != 0) {
+				vcpu->arch.gprs[rt] =
+				    kvm_read_c0_guest_status(cop0);
+			}
+			/* EI */
+			if (inst & 0x20) {
+				kvm_debug("[%#lx] mfmcz_op: EI\n",
+					  vcpu->arch.pc);
+				kvm_set_c0_guest_status(cop0, ST0_IE);
+			} else {
+				kvm_debug("[%#lx] mfmcz_op: DI\n",
+					  vcpu->arch.pc);
+				kvm_clear_c0_guest_status(cop0, ST0_IE);
+			}
+
+			break;
+
+		case wrpgpr_op:
+			{
+				uint32_t css =
+				    cop0->reg[MIPS_CP0_STATUS][2] & 0xf;
+				uint32_t pss =
+				    (cop0->reg[MIPS_CP0_STATUS][2] >> 6) & 0xf;
+				/* We don't support any shadow register sets, so SRSCtl[PSS] == SRSCtl[CSS] = 0 */
+				if (css || pss) {
+					er = EMULATE_FAIL;
+					break;
+				}
+				kvm_debug("WRPGPR[%d][%d] = %#lx\n", pss, rd,
+					  vcpu->arch.gprs[rt]);
+				vcpu->arch.gprs[rd] = vcpu->arch.gprs[rt];
+			}
+			break;
+		default:
+			printk
+			    ("[%#lx]MachEmulateCP0: unsupported COP0, copz: 0x%x\n",
+			     vcpu->arch.pc, copz);
+			er = EMULATE_FAIL;
+			break;
+		}
+	}
+
+done:
+	/*
+	 * Rollback PC only if emulation was unsuccessful
+	 */
+	if (er == EMULATE_FAIL) {
+		vcpu->arch.pc = curr_pc;
+	}
+
+dont_update_pc:
+	/*
+	 * This is for special instructions whose emulation
+	 * updates the PC, so do not overwrite the PC under
+	 * any circumstances
+	 */
+
+	return er;
+}
+
+enum emulation_result
+kvm_mips_emulate_store(uint32_t inst, uint32_t cause,
+		       struct kvm_run *run, struct kvm_vcpu *vcpu)
+{
+	enum emulation_result er = EMULATE_DO_MMIO;
+	int32_t op, base, rt, offset;
+	uint32_t bytes;
+	void *data = run->mmio.data;
+	unsigned long curr_pc;
+
+	/*
+	 * Update PC and hold onto current PC in case there is
+	 * an error and we want to rollback the PC
+	 */
+	curr_pc = vcpu->arch.pc;
+	er = update_pc(vcpu, cause);
+	if (er == EMULATE_FAIL)
+		return er;
+
+	rt = (inst >> 16) & 0x1f;
+	base = (inst >> 21) & 0x1f;
+	offset = inst & 0xffff;
+	op = (inst >> 26) & 0x3f;
+
+	switch (op) {
+	case sb_op:
+		bytes = 1;
+		if (bytes > sizeof(run->mmio.data)) {
+			kvm_err("%s: bad MMIO length: %d\n", __func__,
+			       run->mmio.len);
+		}
+		run->mmio.phys_addr =
+		    kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
+						   host_cp0_badvaddr);
+		if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
+			er = EMULATE_FAIL;
+			break;
+		}
+		run->mmio.len = bytes;
+		run->mmio.is_write = 1;
+		vcpu->mmio_needed = 1;
+		vcpu->mmio_is_write = 1;
+		*(u8 *) data = vcpu->arch.gprs[rt];
+		kvm_debug("OP_SB: eaddr: %#lx, gpr: %#lx, data: %#x\n",
+			  vcpu->arch.host_cp0_badvaddr, vcpu->arch.gprs[rt],
+			  *(uint8_t *) data);
+
+		break;
+
+	case sw_op:
+		bytes = 4;
+		if (bytes > sizeof(run->mmio.data)) {
+			kvm_err("%s: bad MMIO length: %d\n", __func__,
+			       run->mmio.len);
+		}
+		run->mmio.phys_addr =
+		    kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
+						   host_cp0_badvaddr);
+		if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
+			er = EMULATE_FAIL;
+			break;
+		}
+
+		run->mmio.len = bytes;
+		run->mmio.is_write = 1;
+		vcpu->mmio_needed = 1;
+		vcpu->mmio_is_write = 1;
+		*(uint32_t *) data = vcpu->arch.gprs[rt];
+
+		kvm_debug("[%#lx] OP_SW: eaddr: %#lx, gpr: %#lx, data: %#x\n",
+			  vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
+			  vcpu->arch.gprs[rt], *(uint32_t *) data);
+		break;
+
+	case sh_op:
+		bytes = 2;
+		if (bytes > sizeof(run->mmio.data)) {
+			kvm_err("%s: bad MMIO length: %d\n", __func__,
+			       run->mmio.len);
+		}
+		run->mmio.phys_addr =
+		    kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
+						   host_cp0_badvaddr);
+		if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
+			er = EMULATE_FAIL;
+			break;
+		}
+
+		run->mmio.len = bytes;
+		run->mmio.is_write = 1;
+		vcpu->mmio_needed = 1;
+		vcpu->mmio_is_write = 1;
+		*(uint16_t *) data = vcpu->arch.gprs[rt];
+
+		kvm_debug("[%#lx] OP_SH: eaddr: %#lx, gpr: %#lx, data: %#x\n",
+			  vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
+			  vcpu->arch.gprs[rt], *(uint32_t *) data);
+		break;
+
+	default:
+		printk("Store not yet supported");
+		er = EMULATE_FAIL;
+		break;
+	}
+
+	/*
+	 * Rollback PC if emulation was unsuccessful
+	 */
+	if (er == EMULATE_FAIL) {
+		vcpu->arch.pc = curr_pc;
+	}
+
+	return er;
+}
+
+enum emulation_result
+kvm_mips_emulate_load(uint32_t inst, uint32_t cause,
+		      struct kvm_run *run, struct kvm_vcpu *vcpu)
+{
+	enum emulation_result er = EMULATE_DO_MMIO;
+	int32_t op, base, rt, offset;
+	uint32_t bytes;
+
+	rt = (inst >> 16) & 0x1f;
+	base = (inst >> 21) & 0x1f;
+	offset = inst & 0xffff;
+	op = (inst >> 26) & 0x3f;
+
+	vcpu->arch.pending_load_cause = cause;
+	vcpu->arch.io_gpr = rt;
+
+	switch (op) {
+	case lw_op:
+		bytes = 4;
+		if (bytes > sizeof(run->mmio.data)) {
+			kvm_err("%s: bad MMIO length: %d\n", __func__,
+			       run->mmio.len);
+			er = EMULATE_FAIL;
+			break;
+		}
+		run->mmio.phys_addr =
+		    kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
+						   host_cp0_badvaddr);
+		if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
+			er = EMULATE_FAIL;
+			break;
+		}
+
+		run->mmio.len = bytes;
+		run->mmio.is_write = 0;
+		vcpu->mmio_needed = 1;
+		vcpu->mmio_is_write = 0;
+		break;
+
+	case lh_op:
+	case lhu_op:
+		bytes = 2;
+		if (bytes > sizeof(run->mmio.data)) {
+			kvm_err("%s: bad MMIO length: %d\n", __func__,
+			       run->mmio.len);
+			er = EMULATE_FAIL;
+			break;
+		}
+		run->mmio.phys_addr =
+		    kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
+						   host_cp0_badvaddr);
+		if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
+			er = EMULATE_FAIL;
+			break;
+		}
+
+		run->mmio.len = bytes;
+		run->mmio.is_write = 0;
+		vcpu->mmio_needed = 1;
+		vcpu->mmio_is_write = 0;
+
+		if (op == lh_op)
+			vcpu->mmio_needed = 2;
+		else
+			vcpu->mmio_needed = 1;
+
+		break;
+
+	case lbu_op:
+	case lb_op:
+		bytes = 1;
+		if (bytes > sizeof(run->mmio.data)) {
+			kvm_err("%s: bad MMIO length: %d\n", __func__,
+			       run->mmio.len);
+			er = EMULATE_FAIL;
+			break;
+		}
+		run->mmio.phys_addr =
+		    kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
+						   host_cp0_badvaddr);
+		if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
+			er = EMULATE_FAIL;
+			break;
+		}
+
+		run->mmio.len = bytes;
+		run->mmio.is_write = 0;
+		vcpu->mmio_is_write = 0;
+
+		if (op == lb_op)
+			vcpu->mmio_needed = 2;
+		else
+			vcpu->mmio_needed = 1;
+
+		break;
+
+	default:
+		printk("Load not yet supported");
+		er = EMULATE_FAIL;
+		break;
+	}
+
+	return er;
+}
+
+int kvm_mips_sync_icache(unsigned long va, struct kvm_vcpu *vcpu)
+{
+	unsigned long offset = (va & ~PAGE_MASK);
+	struct kvm *kvm = vcpu->kvm;
+	unsigned long pa;
+	gfn_t gfn;
+	pfn_t pfn;
+
+	gfn = va >> PAGE_SHIFT;
+
+	if (gfn >= kvm->arch.guest_pmap_npages) {
+		printk("%s: Invalid gfn: %#llx\n", __func__, gfn);
+		kvm_mips_dump_host_tlbs();
+		kvm_arch_vcpu_dump_regs(vcpu);
+		return -1;
+	}
+	pfn = kvm->arch.guest_pmap[gfn];
+	pa = (pfn << PAGE_SHIFT) | offset;
+
+	printk("%s: va: %#lx, unmapped: %#x\n", __func__, va, CKSEG0ADDR(pa));
+
+	mips32_SyncICache(CKSEG0ADDR(pa), 32);
+	return 0;
+}
+
+#define MIPS_CACHE_OP_INDEX_INV         0x0
+#define MIPS_CACHE_OP_INDEX_LD_TAG      0x1
+#define MIPS_CACHE_OP_INDEX_ST_TAG      0x2
+#define MIPS_CACHE_OP_IMP               0x3
+#define MIPS_CACHE_OP_HIT_INV           0x4
+#define MIPS_CACHE_OP_FILL_WB_INV       0x5
+#define MIPS_CACHE_OP_HIT_HB            0x6
+#define MIPS_CACHE_OP_FETCH_LOCK        0x7
+
+#define MIPS_CACHE_ICACHE               0x0
+#define MIPS_CACHE_DCACHE               0x1
+#define MIPS_CACHE_SEC                  0x3
+
+enum emulation_result
+kvm_mips_emulate_cache(uint32_t inst, uint32_t *opc, uint32_t cause,
+		       struct kvm_run *run, struct kvm_vcpu *vcpu)
+{
+	struct mips_coproc *cop0 = vcpu->arch.cop0;
+	extern void (*r4k_blast_dcache) (void);
+	extern void (*r4k_blast_icache) (void);
+	enum emulation_result er = EMULATE_DONE;
+	int32_t offset, cache, op_inst, op, base;
+	struct kvm_vcpu_arch *arch = &vcpu->arch;
+	unsigned long va;
+	unsigned long curr_pc;
+
+	/*
+	 * Update PC and hold onto current PC in case there is
+	 * an error and we want to rollback the PC
+	 */
+	curr_pc = vcpu->arch.pc;
+	er = update_pc(vcpu, cause);
+	if (er == EMULATE_FAIL)
+		return er;
+
+	base = (inst >> 21) & 0x1f;
+	op_inst = (inst >> 16) & 0x1f;
+	offset = inst & 0xffff;
+	cache = (inst >> 16) & 0x3;
+	op = (inst >> 18) & 0x7;
+
+	va = arch->gprs[base] + offset;
+
+	kvm_debug("CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
+		  cache, op, base, arch->gprs[base], offset);
+
+	/* Treat INDEX_INV as a nop, basically issued by Linux on startup to invalidate
+	 * the caches entirely by stepping through all the ways/indexes
+	 */
+	if (op == MIPS_CACHE_OP_INDEX_INV) {
+		kvm_debug
+		    ("@ %#lx/%#lx CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
+		     vcpu->arch.pc, vcpu->arch.gprs[31], cache, op, base,
+		     arch->gprs[base], offset);
+
+		if (cache == MIPS_CACHE_DCACHE)
+			r4k_blast_dcache();
+		else if (cache == MIPS_CACHE_ICACHE)
+			r4k_blast_icache();
+		else {
+			printk("%s: unsupported CACHE INDEX operation\n",
+			       __func__);
+			return EMULATE_FAIL;
+		}
+
+#ifdef CONFIG_KVM_MIPS_DYN_TRANS
+		kvm_mips_trans_cache_index(inst, opc, vcpu);
+#endif
+		goto done;
+	}
+
+	preempt_disable();
+	if (KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG0) {
+
+		if (kvm_mips_host_tlb_lookup(vcpu, va) < 0) {
+			kvm_mips_handle_kseg0_tlb_fault(va, vcpu);
+		}
+	} else if ((KVM_GUEST_KSEGX(va) < KVM_GUEST_KSEG0) ||
+		   KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG23) {
+		int index;
+
+		/* If an entry already exists then skip */
+		if (kvm_mips_host_tlb_lookup(vcpu, va) >= 0) {
+			goto skip_fault;
+		}
+
+		/* If address not in the guest TLB, then give the guest a fault, the
+		 * resulting handler will do the right thing
+		 */
+		index = kvm_mips_guest_tlb_lookup(vcpu, (va & VPN2_MASK) |
+						  ASID_MASK(kvm_read_c0_guest_entryhi(cop0)));
+
+		if (index < 0) {
+			vcpu->arch.host_cp0_entryhi = (va & VPN2_MASK);
+			vcpu->arch.host_cp0_badvaddr = va;
+			er = kvm_mips_emulate_tlbmiss_ld(cause, NULL, run,
+							 vcpu);
+			preempt_enable();
+			goto dont_update_pc;
+		} else {
+			struct kvm_mips_tlb *tlb = &vcpu->arch.guest_tlb[index];
+			/* Check if the entry is valid, if not then setup a TLB invalid exception to the guest */
+			if (!TLB_IS_VALID(*tlb, va)) {
+				er = kvm_mips_emulate_tlbinv_ld(cause, NULL,
+								run, vcpu);
+				preempt_enable();
+				goto dont_update_pc;
+			} else {
+				/* We fault an entry from the guest tlb to the shadow host TLB */
+				kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb,
+								     NULL,
+								     NULL);
+			}
+		}
+	} else {
+		printk
+		    ("INVALID CACHE INDEX/ADDRESS (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
+		     cache, op, base, arch->gprs[base], offset);
+		er = EMULATE_FAIL;
+		preempt_enable();
+		goto dont_update_pc;
+
+	}
+
+skip_fault:
+	/* XXXKYMA: Only a subset of cache ops are supported, used by Linux */
+	if (cache == MIPS_CACHE_DCACHE
+	    && (op == MIPS_CACHE_OP_FILL_WB_INV
+		|| op == MIPS_CACHE_OP_HIT_INV)) {
+		flush_dcache_line(va);
+
+#ifdef CONFIG_KVM_MIPS_DYN_TRANS
+		/* Replace the CACHE instruction, with a SYNCI, not the same, but avoids a trap */
+		kvm_mips_trans_cache_va(inst, opc, vcpu);
+#endif
+	} else if (op == MIPS_CACHE_OP_HIT_INV && cache == MIPS_CACHE_ICACHE) {
+		flush_dcache_line(va);
+		flush_icache_line(va);
+
+#ifdef CONFIG_KVM_MIPS_DYN_TRANS
+		/* Replace the CACHE instruction, with a SYNCI */
+		kvm_mips_trans_cache_va(inst, opc, vcpu);
+#endif
+	} else {
+		printk
+		    ("NO-OP CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
+		     cache, op, base, arch->gprs[base], offset);
+		er = EMULATE_FAIL;
+		preempt_enable();
+		goto dont_update_pc;
+	}
+
+	preempt_enable();
+
+      dont_update_pc:
+	/*
+	 * Rollback PC
+	 */
+	vcpu->arch.pc = curr_pc;
+      done:
+	return er;
+}
+
+enum emulation_result
+kvm_mips_emulate_inst(unsigned long cause, uint32_t *opc,
+		      struct kvm_run *run, struct kvm_vcpu *vcpu)
+{
+	enum emulation_result er = EMULATE_DONE;
+	uint32_t inst;
+
+	/*
+	 *  Fetch the instruction.
+	 */
+	if (cause & CAUSEF_BD) {
+		opc += 1;
+	}
+
+	inst = kvm_get_inst(opc, vcpu);
+
+	switch (((union mips_instruction)inst).r_format.opcode) {
+	case cop0_op:
+		er = kvm_mips_emulate_CP0(inst, opc, cause, run, vcpu);
+		break;
+	case sb_op:
+	case sh_op:
+	case sw_op:
+		er = kvm_mips_emulate_store(inst, cause, run, vcpu);
+		break;
+	case lb_op:
+	case lbu_op:
+	case lhu_op:
+	case lh_op:
+	case lw_op:
+		er = kvm_mips_emulate_load(inst, cause, run, vcpu);
+		break;
+
+	case cache_op:
+		++vcpu->stat.cache_exits;
+		trace_kvm_exit(vcpu, CACHE_EXITS);
+		er = kvm_mips_emulate_cache(inst, opc, cause, run, vcpu);
+		break;
+
+	default:
+		printk("Instruction emulation not supported (%p/%#x)\n", opc,
+		       inst);
+		kvm_arch_vcpu_dump_regs(vcpu);
+		er = EMULATE_FAIL;
+		break;
+	}
+
+	return er;
+}
+
+enum emulation_result
+kvm_mips_emulate_syscall(unsigned long cause, uint32_t *opc,
+			 struct kvm_run *run, struct kvm_vcpu *vcpu)
+{
+	struct mips_coproc *cop0 = vcpu->arch.cop0;
+	struct kvm_vcpu_arch *arch = &vcpu->arch;
+	enum emulation_result er = EMULATE_DONE;
+
+	if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
+		/* save old pc */
+		kvm_write_c0_guest_epc(cop0, arch->pc);
+		kvm_set_c0_guest_status(cop0, ST0_EXL);
+
+		if (cause & CAUSEF_BD)
+			kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
+		else
+			kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
+
+		kvm_debug("Delivering SYSCALL @ pc %#lx\n", arch->pc);
+
+		kvm_change_c0_guest_cause(cop0, (0xff),
+					  (T_SYSCALL << CAUSEB_EXCCODE));
+
+		/* Set PC to the exception entry point */
+		arch->pc = KVM_GUEST_KSEG0 + 0x180;
+
+	} else {
+		printk("Trying to deliver SYSCALL when EXL is already set\n");
+		er = EMULATE_FAIL;
+	}
+
+	return er;
+}
+
+enum emulation_result
+kvm_mips_emulate_tlbmiss_ld(unsigned long cause, uint32_t *opc,
+			    struct kvm_run *run, struct kvm_vcpu *vcpu)
+{
+	struct mips_coproc *cop0 = vcpu->arch.cop0;
+	struct kvm_vcpu_arch *arch = &vcpu->arch;
+	enum emulation_result er = EMULATE_DONE;
+	unsigned long entryhi = (vcpu->arch.  host_cp0_badvaddr & VPN2_MASK) |
+				ASID_MASK(kvm_read_c0_guest_entryhi(cop0));
+
+	if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
+		/* save old pc */
+		kvm_write_c0_guest_epc(cop0, arch->pc);
+		kvm_set_c0_guest_status(cop0, ST0_EXL);
+
+		if (cause & CAUSEF_BD)
+			kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
+		else
+			kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
+
+		kvm_debug("[EXL == 0] delivering TLB MISS @ pc %#lx\n",
+			  arch->pc);
+
+		/* set pc to the exception entry point */
+		arch->pc = KVM_GUEST_KSEG0 + 0x0;
+
+	} else {
+		kvm_debug("[EXL == 1] delivering TLB MISS @ pc %#lx\n",
+			  arch->pc);
+
+		arch->pc = KVM_GUEST_KSEG0 + 0x180;
+	}
+
+	kvm_change_c0_guest_cause(cop0, (0xff),
+				  (T_TLB_LD_MISS << CAUSEB_EXCCODE));
+
+	/* setup badvaddr, context and entryhi registers for the guest */
+	kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
+	/* XXXKYMA: is the context register used by linux??? */
+	kvm_write_c0_guest_entryhi(cop0, entryhi);
+	/* Blow away the shadow host TLBs */
+	kvm_mips_flush_host_tlb(1);
+
+	return er;
+}
+
+enum emulation_result
+kvm_mips_emulate_tlbinv_ld(unsigned long cause, uint32_t *opc,
+			   struct kvm_run *run, struct kvm_vcpu *vcpu)
+{
+	struct mips_coproc *cop0 = vcpu->arch.cop0;
+	struct kvm_vcpu_arch *arch = &vcpu->arch;
+	enum emulation_result er = EMULATE_DONE;
+	unsigned long entryhi =
+		(vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
+		ASID_MASK(kvm_read_c0_guest_entryhi(cop0));
+
+	if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
+		/* save old pc */
+		kvm_write_c0_guest_epc(cop0, arch->pc);
+		kvm_set_c0_guest_status(cop0, ST0_EXL);
+
+		if (cause & CAUSEF_BD)
+			kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
+		else
+			kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
+
+		kvm_debug("[EXL == 0] delivering TLB INV @ pc %#lx\n",
+			  arch->pc);
+
+		/* set pc to the exception entry point */
+		arch->pc = KVM_GUEST_KSEG0 + 0x180;
+
+	} else {
+		kvm_debug("[EXL == 1] delivering TLB MISS @ pc %#lx\n",
+			  arch->pc);
+		arch->pc = KVM_GUEST_KSEG0 + 0x180;
+	}
+
+	kvm_change_c0_guest_cause(cop0, (0xff),
+				  (T_TLB_LD_MISS << CAUSEB_EXCCODE));
+
+	/* setup badvaddr, context and entryhi registers for the guest */
+	kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
+	/* XXXKYMA: is the context register used by linux??? */
+	kvm_write_c0_guest_entryhi(cop0, entryhi);
+	/* Blow away the shadow host TLBs */
+	kvm_mips_flush_host_tlb(1);
+
+	return er;
+}
+
+enum emulation_result
+kvm_mips_emulate_tlbmiss_st(unsigned long cause, uint32_t *opc,
+			    struct kvm_run *run, struct kvm_vcpu *vcpu)
+{
+	struct mips_coproc *cop0 = vcpu->arch.cop0;
+	struct kvm_vcpu_arch *arch = &vcpu->arch;
+	enum emulation_result er = EMULATE_DONE;
+	unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
+				ASID_MASK(kvm_read_c0_guest_entryhi(cop0));
+
+	if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
+		/* save old pc */
+		kvm_write_c0_guest_epc(cop0, arch->pc);
+		kvm_set_c0_guest_status(cop0, ST0_EXL);
+
+		if (cause & CAUSEF_BD)
+			kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
+		else
+			kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
+
+		kvm_debug("[EXL == 0] Delivering TLB MISS @ pc %#lx\n",
+			  arch->pc);
+
+		/* Set PC to the exception entry point */
+		arch->pc = KVM_GUEST_KSEG0 + 0x0;
+	} else {
+		kvm_debug("[EXL == 1] Delivering TLB MISS @ pc %#lx\n",
+			  arch->pc);
+		arch->pc = KVM_GUEST_KSEG0 + 0x180;
+	}
+
+	kvm_change_c0_guest_cause(cop0, (0xff),
+				  (T_TLB_ST_MISS << CAUSEB_EXCCODE));
+
+	/* setup badvaddr, context and entryhi registers for the guest */
+	kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
+	/* XXXKYMA: is the context register used by linux??? */
+	kvm_write_c0_guest_entryhi(cop0, entryhi);
+	/* Blow away the shadow host TLBs */
+	kvm_mips_flush_host_tlb(1);
+
+	return er;
+}
+
+enum emulation_result
+kvm_mips_emulate_tlbinv_st(unsigned long cause, uint32_t *opc,
+			   struct kvm_run *run, struct kvm_vcpu *vcpu)
+{
+	struct mips_coproc *cop0 = vcpu->arch.cop0;
+	struct kvm_vcpu_arch *arch = &vcpu->arch;
+	enum emulation_result er = EMULATE_DONE;
+	unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
+		ASID_MASK(kvm_read_c0_guest_entryhi(cop0));
+
+	if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
+		/* save old pc */
+		kvm_write_c0_guest_epc(cop0, arch->pc);
+		kvm_set_c0_guest_status(cop0, ST0_EXL);
+
+		if (cause & CAUSEF_BD)
+			kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
+		else
+			kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
+
+		kvm_debug("[EXL == 0] Delivering TLB MISS @ pc %#lx\n",
+			  arch->pc);
+
+		/* Set PC to the exception entry point */
+		arch->pc = KVM_GUEST_KSEG0 + 0x180;
+	} else {
+		kvm_debug("[EXL == 1] Delivering TLB MISS @ pc %#lx\n",
+			  arch->pc);
+		arch->pc = KVM_GUEST_KSEG0 + 0x180;
+	}
+
+	kvm_change_c0_guest_cause(cop0, (0xff),
+				  (T_TLB_ST_MISS << CAUSEB_EXCCODE));
+
+	/* setup badvaddr, context and entryhi registers for the guest */
+	kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
+	/* XXXKYMA: is the context register used by linux??? */
+	kvm_write_c0_guest_entryhi(cop0, entryhi);
+	/* Blow away the shadow host TLBs */
+	kvm_mips_flush_host_tlb(1);
+
+	return er;
+}
+
+/* TLBMOD: store into address matching TLB with Dirty bit off */
+enum emulation_result
+kvm_mips_handle_tlbmod(unsigned long cause, uint32_t *opc,
+		       struct kvm_run *run, struct kvm_vcpu *vcpu)
+{
+	enum emulation_result er = EMULATE_DONE;
+
+#ifdef DEBUG
+	/*
+	 * If address not in the guest TLB, then we are in trouble
+	 */
+	index = kvm_mips_guest_tlb_lookup(vcpu, entryhi);
+	if (index < 0) {
+		/* XXXKYMA Invalidate and retry */
+		kvm_mips_host_tlb_inv(vcpu, vcpu->arch.host_cp0_badvaddr);
+		kvm_err("%s: host got TLBMOD for %#lx but entry not present in Guest TLB\n",
+		     __func__, entryhi);
+		kvm_mips_dump_guest_tlbs(vcpu);
+		kvm_mips_dump_host_tlbs();
+		return EMULATE_FAIL;
+	}
+#endif
+
+	er = kvm_mips_emulate_tlbmod(cause, opc, run, vcpu);
+	return er;
+}
+
+enum emulation_result
+kvm_mips_emulate_tlbmod(unsigned long cause, uint32_t *opc,
+			struct kvm_run *run, struct kvm_vcpu *vcpu)
+{
+	struct mips_coproc *cop0 = vcpu->arch.cop0;
+	unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
+				ASID_MASK(kvm_read_c0_guest_entryhi(cop0));
+	struct kvm_vcpu_arch *arch = &vcpu->arch;
+	enum emulation_result er = EMULATE_DONE;
+
+	if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
+		/* save old pc */
+		kvm_write_c0_guest_epc(cop0, arch->pc);
+		kvm_set_c0_guest_status(cop0, ST0_EXL);
+
+		if (cause & CAUSEF_BD)
+			kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
+		else
+			kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
+
+		kvm_debug("[EXL == 0] Delivering TLB MOD @ pc %#lx\n",
+			  arch->pc);
+
+		arch->pc = KVM_GUEST_KSEG0 + 0x180;
+	} else {
+		kvm_debug("[EXL == 1] Delivering TLB MOD @ pc %#lx\n",
+			  arch->pc);
+		arch->pc = KVM_GUEST_KSEG0 + 0x180;
+	}
+
+	kvm_change_c0_guest_cause(cop0, (0xff), (T_TLB_MOD << CAUSEB_EXCCODE));
+
+	/* setup badvaddr, context and entryhi registers for the guest */
+	kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
+	/* XXXKYMA: is the context register used by linux??? */
+	kvm_write_c0_guest_entryhi(cop0, entryhi);
+	/* Blow away the shadow host TLBs */
+	kvm_mips_flush_host_tlb(1);
+
+	return er;
+}
+
+enum emulation_result
+kvm_mips_emulate_fpu_exc(unsigned long cause, uint32_t *opc,
+			 struct kvm_run *run, struct kvm_vcpu *vcpu)
+{
+	struct mips_coproc *cop0 = vcpu->arch.cop0;
+	struct kvm_vcpu_arch *arch = &vcpu->arch;
+	enum emulation_result er = EMULATE_DONE;
+
+	if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
+		/* save old pc */
+		kvm_write_c0_guest_epc(cop0, arch->pc);
+		kvm_set_c0_guest_status(cop0, ST0_EXL);
+
+		if (cause & CAUSEF_BD)
+			kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
+		else
+			kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
+
+	}
+
+	arch->pc = KVM_GUEST_KSEG0 + 0x180;
+
+	kvm_change_c0_guest_cause(cop0, (0xff),
+				  (T_COP_UNUSABLE << CAUSEB_EXCCODE));
+	kvm_change_c0_guest_cause(cop0, (CAUSEF_CE), (0x1 << CAUSEB_CE));
+
+	return er;
+}
+
+enum emulation_result
+kvm_mips_emulate_ri_exc(unsigned long cause, uint32_t *opc,
+			struct kvm_run *run, struct kvm_vcpu *vcpu)
+{
+	struct mips_coproc *cop0 = vcpu->arch.cop0;
+	struct kvm_vcpu_arch *arch = &vcpu->arch;
+	enum emulation_result er = EMULATE_DONE;
+
+	if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
+		/* save old pc */
+		kvm_write_c0_guest_epc(cop0, arch->pc);
+		kvm_set_c0_guest_status(cop0, ST0_EXL);
+
+		if (cause & CAUSEF_BD)
+			kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
+		else
+			kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
+
+		kvm_debug("Delivering RI @ pc %#lx\n", arch->pc);
+
+		kvm_change_c0_guest_cause(cop0, (0xff),
+					  (T_RES_INST << CAUSEB_EXCCODE));
+
+		/* Set PC to the exception entry point */
+		arch->pc = KVM_GUEST_KSEG0 + 0x180;
+
+	} else {
+		kvm_err("Trying to deliver RI when EXL is already set\n");
+		er = EMULATE_FAIL;
+	}
+
+	return er;
+}
+
+enum emulation_result
+kvm_mips_emulate_bp_exc(unsigned long cause, uint32_t *opc,
+			struct kvm_run *run, struct kvm_vcpu *vcpu)
+{
+	struct mips_coproc *cop0 = vcpu->arch.cop0;
+	struct kvm_vcpu_arch *arch = &vcpu->arch;
+	enum emulation_result er = EMULATE_DONE;
+
+	if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
+		/* save old pc */
+		kvm_write_c0_guest_epc(cop0, arch->pc);
+		kvm_set_c0_guest_status(cop0, ST0_EXL);
+
+		if (cause & CAUSEF_BD)
+			kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
+		else
+			kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
+
+		kvm_debug("Delivering BP @ pc %#lx\n", arch->pc);
+
+		kvm_change_c0_guest_cause(cop0, (0xff),
+					  (T_BREAK << CAUSEB_EXCCODE));
+
+		/* Set PC to the exception entry point */
+		arch->pc = KVM_GUEST_KSEG0 + 0x180;
+
+	} else {
+		printk("Trying to deliver BP when EXL is already set\n");
+		er = EMULATE_FAIL;
+	}
+
+	return er;
+}
+
+/*
+ * ll/sc, rdhwr, sync emulation
+ */
+
+#define OPCODE 0xfc000000
+#define BASE   0x03e00000
+#define RT     0x001f0000
+#define OFFSET 0x0000ffff
+#define LL     0xc0000000
+#define SC     0xe0000000
+#define SPEC0  0x00000000
+#define SPEC3  0x7c000000
+#define RD     0x0000f800
+#define FUNC   0x0000003f
+#define SYNC   0x0000000f
+#define RDHWR  0x0000003b
+
+enum emulation_result
+kvm_mips_handle_ri(unsigned long cause, uint32_t *opc,
+		   struct kvm_run *run, struct kvm_vcpu *vcpu)
+{
+	struct mips_coproc *cop0 = vcpu->arch.cop0;
+	struct kvm_vcpu_arch *arch = &vcpu->arch;
+	enum emulation_result er = EMULATE_DONE;
+	unsigned long curr_pc;
+	uint32_t inst;
+
+	/*
+	 * Update PC and hold onto current PC in case there is
+	 * an error and we want to rollback the PC
+	 */
+	curr_pc = vcpu->arch.pc;
+	er = update_pc(vcpu, cause);
+	if (er == EMULATE_FAIL)
+		return er;
+
+	/*
+	 *  Fetch the instruction.
+	 */
+	if (cause & CAUSEF_BD)
+		opc += 1;
+
+	inst = kvm_get_inst(opc, vcpu);
+
+	if (inst == KVM_INVALID_INST) {
+		printk("%s: Cannot get inst @ %p\n", __func__, opc);
+		return EMULATE_FAIL;
+	}
+
+	if ((inst & OPCODE) == SPEC3 && (inst & FUNC) == RDHWR) {
+		int rd = (inst & RD) >> 11;
+		int rt = (inst & RT) >> 16;
+		switch (rd) {
+		case 0:	/* CPU number */
+			arch->gprs[rt] = 0;
+			break;
+		case 1:	/* SYNCI length */
+			arch->gprs[rt] = min(current_cpu_data.dcache.linesz,
+					     current_cpu_data.icache.linesz);
+			break;
+		case 2:	/* Read count register */
+			printk("RDHWR: Cont register\n");
+			arch->gprs[rt] = kvm_read_c0_guest_count(cop0);
+			break;
+		case 3:	/* Count register resolution */
+			switch (current_cpu_data.cputype) {
+			case CPU_20KC:
+			case CPU_25KF:
+				arch->gprs[rt] = 1;
+				break;
+			default:
+				arch->gprs[rt] = 2;
+			}
+			break;
+		case 29:
+#if 1
+			arch->gprs[rt] = kvm_read_c0_guest_userlocal(cop0);
+#else
+			/* UserLocal not implemented */
+			er = kvm_mips_emulate_ri_exc(cause, opc, run, vcpu);
+#endif
+			break;
+
+		default:
+			printk("RDHWR not supported\n");
+			er = EMULATE_FAIL;
+			break;
+		}
+	} else {
+		printk("Emulate RI not supported @ %p: %#x\n", opc, inst);
+		er = EMULATE_FAIL;
+	}
+
+	/*
+	 * Rollback PC only if emulation was unsuccessful
+	 */
+	if (er == EMULATE_FAIL) {
+		vcpu->arch.pc = curr_pc;
+	}
+	return er;
+}
+
+enum emulation_result
+kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu, struct kvm_run *run)
+{
+	unsigned long *gpr = &vcpu->arch.gprs[vcpu->arch.io_gpr];
+	enum emulation_result er = EMULATE_DONE;
+	unsigned long curr_pc;
+
+	if (run->mmio.len > sizeof(*gpr)) {
+		printk("Bad MMIO length: %d", run->mmio.len);
+		er = EMULATE_FAIL;
+		goto done;
+	}
+
+	/*
+	 * Update PC and hold onto current PC in case there is
+	 * an error and we want to rollback the PC
+	 */
+	curr_pc = vcpu->arch.pc;
+	er = update_pc(vcpu, vcpu->arch.pending_load_cause);
+	if (er == EMULATE_FAIL)
+		return er;
+
+	switch (run->mmio.len) {
+	case 4:
+		*gpr = *(int32_t *) run->mmio.data;
+		break;
+
+	case 2:
+		if (vcpu->mmio_needed == 2)
+			*gpr = *(int16_t *) run->mmio.data;
+		else
+			*gpr = *(int16_t *) run->mmio.data;
+
+		break;
+	case 1:
+		if (vcpu->mmio_needed == 2)
+			*gpr = *(int8_t *) run->mmio.data;
+		else
+			*gpr = *(u8 *) run->mmio.data;
+		break;
+	}
+
+	if (vcpu->arch.pending_load_cause & CAUSEF_BD)
+		kvm_debug
+		    ("[%#lx] Completing %d byte BD Load to gpr %d (0x%08lx) type %d\n",
+		     vcpu->arch.pc, run->mmio.len, vcpu->arch.io_gpr, *gpr,
+		     vcpu->mmio_needed);
+
+done:
+	return er;
+}
+
+static enum emulation_result
+kvm_mips_emulate_exc(unsigned long cause, uint32_t *opc,
+		     struct kvm_run *run, struct kvm_vcpu *vcpu)
+{
+	uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
+	struct mips_coproc *cop0 = vcpu->arch.cop0;
+	struct kvm_vcpu_arch *arch = &vcpu->arch;
+	enum emulation_result er = EMULATE_DONE;
+
+	if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
+		/* save old pc */
+		kvm_write_c0_guest_epc(cop0, arch->pc);
+		kvm_set_c0_guest_status(cop0, ST0_EXL);
+
+		if (cause & CAUSEF_BD)
+			kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
+		else
+			kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
+
+		kvm_change_c0_guest_cause(cop0, (0xff),
+					  (exccode << CAUSEB_EXCCODE));
+
+		/* Set PC to the exception entry point */
+		arch->pc = KVM_GUEST_KSEG0 + 0x180;
+		kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
+
+		kvm_debug("Delivering EXC %d @ pc %#lx, badVaddr: %#lx\n",
+			  exccode, kvm_read_c0_guest_epc(cop0),
+			  kvm_read_c0_guest_badvaddr(cop0));
+	} else {
+		printk("Trying to deliver EXC when EXL is already set\n");
+		er = EMULATE_FAIL;
+	}
+
+	return er;
+}
+
+enum emulation_result
+kvm_mips_check_privilege(unsigned long cause, uint32_t *opc,
+			 struct kvm_run *run, struct kvm_vcpu *vcpu)
+{
+	enum emulation_result er = EMULATE_DONE;
+	uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
+	unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
+
+	int usermode = !KVM_GUEST_KERNEL_MODE(vcpu);
+
+	if (usermode) {
+		switch (exccode) {
+		case T_INT:
+		case T_SYSCALL:
+		case T_BREAK:
+		case T_RES_INST:
+			break;
+
+		case T_COP_UNUSABLE:
+			if (((cause & CAUSEF_CE) >> CAUSEB_CE) == 0)
+				er = EMULATE_PRIV_FAIL;
+			break;
+
+		case T_TLB_MOD:
+			break;
+
+		case T_TLB_LD_MISS:
+			/* We we are accessing Guest kernel space, then send an address error exception to the guest */
+			if (badvaddr >= (unsigned long) KVM_GUEST_KSEG0) {
+				printk("%s: LD MISS @ %#lx\n", __func__,
+				       badvaddr);
+				cause &= ~0xff;
+				cause |= (T_ADDR_ERR_LD << CAUSEB_EXCCODE);
+				er = EMULATE_PRIV_FAIL;
+			}
+			break;
+
+		case T_TLB_ST_MISS:
+			/* We we are accessing Guest kernel space, then send an address error exception to the guest */
+			if (badvaddr >= (unsigned long) KVM_GUEST_KSEG0) {
+				printk("%s: ST MISS @ %#lx\n", __func__,
+				       badvaddr);
+				cause &= ~0xff;
+				cause |= (T_ADDR_ERR_ST << CAUSEB_EXCCODE);
+				er = EMULATE_PRIV_FAIL;
+			}
+			break;
+
+		case T_ADDR_ERR_ST:
+			printk("%s: address error ST @ %#lx\n", __func__,
+			       badvaddr);
+			if ((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR) {
+				cause &= ~0xff;
+				cause |= (T_TLB_ST_MISS << CAUSEB_EXCCODE);
+			}
+			er = EMULATE_PRIV_FAIL;
+			break;
+		case T_ADDR_ERR_LD:
+			printk("%s: address error LD @ %#lx\n", __func__,
+			       badvaddr);
+			if ((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR) {
+				cause &= ~0xff;
+				cause |= (T_TLB_LD_MISS << CAUSEB_EXCCODE);
+			}
+			er = EMULATE_PRIV_FAIL;
+			break;
+		default:
+			er = EMULATE_PRIV_FAIL;
+			break;
+		}
+	}
+
+	if (er == EMULATE_PRIV_FAIL) {
+		kvm_mips_emulate_exc(cause, opc, run, vcpu);
+	}
+	return er;
+}
+
+/* User Address (UA) fault, this could happen if
+ * (1) TLB entry not present/valid in both Guest and shadow host TLBs, in this
+ *     case we pass on the fault to the guest kernel and let it handle it.
+ * (2) TLB entry is present in the Guest TLB but not in the shadow, in this
+ *     case we inject the TLB from the Guest TLB into the shadow host TLB
+ */
+enum emulation_result
+kvm_mips_handle_tlbmiss(unsigned long cause, uint32_t *opc,
+			struct kvm_run *run, struct kvm_vcpu *vcpu)
+{
+	enum emulation_result er = EMULATE_DONE;
+	uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
+	unsigned long va = vcpu->arch.host_cp0_badvaddr;
+	int index;
+
+	kvm_debug("kvm_mips_handle_tlbmiss: badvaddr: %#lx, entryhi: %#lx\n",
+		  vcpu->arch.host_cp0_badvaddr, vcpu->arch.host_cp0_entryhi);
+
+	/* KVM would not have got the exception if this entry was valid in the shadow host TLB
+	 * Check the Guest TLB, if the entry is not there then send the guest an
+	 * exception. The guest exc handler should then inject an entry into the
+	 * guest TLB
+	 */
+	index = kvm_mips_guest_tlb_lookup(vcpu,
+					  (va & VPN2_MASK) |
+					  ASID_MASK(kvm_read_c0_guest_entryhi
+					   (vcpu->arch.cop0)));
+	if (index < 0) {
+		if (exccode == T_TLB_LD_MISS) {
+			er = kvm_mips_emulate_tlbmiss_ld(cause, opc, run, vcpu);
+		} else if (exccode == T_TLB_ST_MISS) {
+			er = kvm_mips_emulate_tlbmiss_st(cause, opc, run, vcpu);
+		} else {
+			printk("%s: invalid exc code: %d\n", __func__, exccode);
+			er = EMULATE_FAIL;
+		}
+	} else {
+		struct kvm_mips_tlb *tlb = &vcpu->arch.guest_tlb[index];
+
+		/* Check if the entry is valid, if not then setup a TLB invalid exception to the guest */
+		if (!TLB_IS_VALID(*tlb, va)) {
+			if (exccode == T_TLB_LD_MISS) {
+				er = kvm_mips_emulate_tlbinv_ld(cause, opc, run,
+								vcpu);
+			} else if (exccode == T_TLB_ST_MISS) {
+				er = kvm_mips_emulate_tlbinv_st(cause, opc, run,
+								vcpu);
+			} else {
+				printk("%s: invalid exc code: %d\n", __func__,
+				       exccode);
+				er = EMULATE_FAIL;
+			}
+		} else {
+#ifdef DEBUG
+			kvm_debug
+			    ("Injecting hi: %#lx, lo0: %#lx, lo1: %#lx into shadow host TLB\n",
+			     tlb->tlb_hi, tlb->tlb_lo0, tlb->tlb_lo1);
+#endif
+			/* OK we have a Guest TLB entry, now inject it into the shadow host TLB */
+			kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb, NULL,
+							     NULL);
+		}
+	}
+
+	return er;
+}
diff --git a/arch/mips/kvm/kvm_mips_int.c b/arch/mips/kvm/kvm_mips_int.c
new file mode 100644
index 0000000..1e5de16
--- /dev/null
+++ b/arch/mips/kvm/kvm_mips_int.c
@@ -0,0 +1,243 @@
+/*
+* This file is subject to the terms and conditions of the GNU General Public
+* License.  See the file "COPYING" in the main directory of this archive
+* for more details.
+*
+* KVM/MIPS: Interrupt delivery
+*
+* Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
+* Authors: Sanjay Lal <sanjayl@kymasys.com>
+*/
+
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/vmalloc.h>
+#include <linux/fs.h>
+#include <linux/bootmem.h>
+#include <asm/page.h>
+#include <asm/cacheflush.h>
+
+#include <linux/kvm_host.h>
+
+#include "kvm_mips_int.h"
+
+void kvm_mips_queue_irq(struct kvm_vcpu *vcpu, uint32_t priority)
+{
+	set_bit(priority, &vcpu->arch.pending_exceptions);
+}
+
+void kvm_mips_dequeue_irq(struct kvm_vcpu *vcpu, uint32_t priority)
+{
+	clear_bit(priority, &vcpu->arch.pending_exceptions);
+}
+
+void kvm_mips_queue_timer_int_cb(struct kvm_vcpu *vcpu)
+{
+	/* Cause bits to reflect the pending timer interrupt,
+	 * the EXC code will be set when we are actually
+	 * delivering the interrupt:
+	 */
+	kvm_set_c0_guest_cause(vcpu->arch.cop0, (C_IRQ5 | C_TI));
+
+	/* Queue up an INT exception for the core */
+	kvm_mips_queue_irq(vcpu, MIPS_EXC_INT_TIMER);
+
+}
+
+void kvm_mips_dequeue_timer_int_cb(struct kvm_vcpu *vcpu)
+{
+	kvm_clear_c0_guest_cause(vcpu->arch.cop0, (C_IRQ5 | C_TI));
+	kvm_mips_dequeue_irq(vcpu, MIPS_EXC_INT_TIMER);
+}
+
+void
+kvm_mips_queue_io_int_cb(struct kvm_vcpu *vcpu, struct kvm_mips_interrupt *irq)
+{
+	int intr = (int)irq->irq;
+
+	/* Cause bits to reflect the pending IO interrupt,
+	 * the EXC code will be set when we are actually
+	 * delivering the interrupt:
+	 */
+	switch (intr) {
+	case 2:
+		kvm_set_c0_guest_cause(vcpu->arch.cop0, (C_IRQ0));
+		/* Queue up an INT exception for the core */
+		kvm_mips_queue_irq(vcpu, MIPS_EXC_INT_IO);
+		break;
+
+	case 3:
+		kvm_set_c0_guest_cause(vcpu->arch.cop0, (C_IRQ1));
+		kvm_mips_queue_irq(vcpu, MIPS_EXC_INT_IPI_1);
+		break;
+
+	case 4:
+		kvm_set_c0_guest_cause(vcpu->arch.cop0, (C_IRQ2));
+		kvm_mips_queue_irq(vcpu, MIPS_EXC_INT_IPI_2);
+		break;
+
+	default:
+		break;
+	}
+
+}
+
+void
+kvm_mips_dequeue_io_int_cb(struct kvm_vcpu *vcpu,
+			   struct kvm_mips_interrupt *irq)
+{
+	int intr = (int)irq->irq;
+	switch (intr) {
+	case -2:
+		kvm_clear_c0_guest_cause(vcpu->arch.cop0, (C_IRQ0));
+		kvm_mips_dequeue_irq(vcpu, MIPS_EXC_INT_IO);
+		break;
+
+	case -3:
+		kvm_clear_c0_guest_cause(vcpu->arch.cop0, (C_IRQ1));
+		kvm_mips_dequeue_irq(vcpu, MIPS_EXC_INT_IPI_1);
+		break;
+
+	case -4:
+		kvm_clear_c0_guest_cause(vcpu->arch.cop0, (C_IRQ2));
+		kvm_mips_dequeue_irq(vcpu, MIPS_EXC_INT_IPI_2);
+		break;
+
+	default:
+		break;
+	}
+
+}
+
+/* Deliver the interrupt of the corresponding priority, if possible. */
+int
+kvm_mips_irq_deliver_cb(struct kvm_vcpu *vcpu, unsigned int priority,
+			uint32_t cause)
+{
+	int allowed = 0;
+	uint32_t exccode;
+
+	struct kvm_vcpu_arch *arch = &vcpu->arch;
+	struct mips_coproc *cop0 = vcpu->arch.cop0;
+
+	switch (priority) {
+	case MIPS_EXC_INT_TIMER:
+		if ((kvm_read_c0_guest_status(cop0) & ST0_IE)
+		    && (!(kvm_read_c0_guest_status(cop0) & (ST0_EXL | ST0_ERL)))
+		    && (kvm_read_c0_guest_status(cop0) & IE_IRQ5)) {
+			allowed = 1;
+			exccode = T_INT;
+		}
+		break;
+
+	case MIPS_EXC_INT_IO:
+		if ((kvm_read_c0_guest_status(cop0) & ST0_IE)
+		    && (!(kvm_read_c0_guest_status(cop0) & (ST0_EXL | ST0_ERL)))
+		    && (kvm_read_c0_guest_status(cop0) & IE_IRQ0)) {
+			allowed = 1;
+			exccode = T_INT;
+		}
+		break;
+
+	case MIPS_EXC_INT_IPI_1:
+		if ((kvm_read_c0_guest_status(cop0) & ST0_IE)
+		    && (!(kvm_read_c0_guest_status(cop0) & (ST0_EXL | ST0_ERL)))
+		    && (kvm_read_c0_guest_status(cop0) & IE_IRQ1)) {
+			allowed = 1;
+			exccode = T_INT;
+		}
+		break;
+
+	case MIPS_EXC_INT_IPI_2:
+		if ((kvm_read_c0_guest_status(cop0) & ST0_IE)
+		    && (!(kvm_read_c0_guest_status(cop0) & (ST0_EXL | ST0_ERL)))
+		    && (kvm_read_c0_guest_status(cop0) & IE_IRQ2)) {
+			allowed = 1;
+			exccode = T_INT;
+		}
+		break;
+
+	default:
+		break;
+	}
+
+	/* Are we allowed to deliver the interrupt ??? */
+	if (allowed) {
+
+		if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
+			/* save old pc */
+			kvm_write_c0_guest_epc(cop0, arch->pc);
+			kvm_set_c0_guest_status(cop0, ST0_EXL);
+
+			if (cause & CAUSEF_BD)
+				kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
+			else
+				kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
+
+			kvm_debug("Delivering INT @ pc %#lx\n", arch->pc);
+
+		} else
+			kvm_err("Trying to deliver interrupt when EXL is already set\n");
+
+		kvm_change_c0_guest_cause(cop0, CAUSEF_EXCCODE,
+					  (exccode << CAUSEB_EXCCODE));
+
+		/* XXXSL Set PC to the interrupt exception entry point */
+		if (kvm_read_c0_guest_cause(cop0) & CAUSEF_IV)
+			arch->pc = KVM_GUEST_KSEG0 + 0x200;
+		else
+			arch->pc = KVM_GUEST_KSEG0 + 0x180;
+
+		clear_bit(priority, &vcpu->arch.pending_exceptions);
+	}
+
+	return allowed;
+}
+
+int
+kvm_mips_irq_clear_cb(struct kvm_vcpu *vcpu, unsigned int priority,
+		      uint32_t cause)
+{
+	return 1;
+}
+
+void kvm_mips_deliver_interrupts(struct kvm_vcpu *vcpu, uint32_t cause)
+{
+	unsigned long *pending = &vcpu->arch.pending_exceptions;
+	unsigned long *pending_clr = &vcpu->arch.pending_exceptions_clr;
+	unsigned int priority;
+
+	if (!(*pending) && !(*pending_clr))
+		return;
+
+	priority = __ffs(*pending_clr);
+	while (priority <= MIPS_EXC_MAX) {
+		if (kvm_mips_callbacks->irq_clear(vcpu, priority, cause)) {
+			if (!KVM_MIPS_IRQ_CLEAR_ALL_AT_ONCE)
+				break;
+		}
+
+		priority = find_next_bit(pending_clr,
+					 BITS_PER_BYTE * sizeof(*pending_clr),
+					 priority + 1);
+	}
+
+	priority = __ffs(*pending);
+	while (priority <= MIPS_EXC_MAX) {
+		if (kvm_mips_callbacks->irq_deliver(vcpu, priority, cause)) {
+			if (!KVM_MIPS_IRQ_DELIVER_ALL_AT_ONCE)
+				break;
+		}
+
+		priority = find_next_bit(pending,
+					 BITS_PER_BYTE * sizeof(*pending),
+					 priority + 1);
+	}
+
+}
+
+int kvm_mips_pending_timer(struct kvm_vcpu *vcpu)
+{
+	return test_bit(MIPS_EXC_INT_TIMER, &vcpu->arch.pending_exceptions);
+}
diff --git a/arch/mips/kvm/kvm_mips_int.h b/arch/mips/kvm/kvm_mips_int.h
new file mode 100644
index 0000000..20da7d2
--- /dev/null
+++ b/arch/mips/kvm/kvm_mips_int.h
@@ -0,0 +1,49 @@
+/*
+* This file is subject to the terms and conditions of the GNU General Public
+* License.  See the file "COPYING" in the main directory of this archive
+* for more details.
+*
+* KVM/MIPS: Interrupts
+* Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
+* Authors: Sanjay Lal <sanjayl@kymasys.com>
+*/
+
+/* MIPS Exception Priorities, exceptions (including interrupts) are queued up
+ * for the guest in the order specified by their priorities
+ */
+
+#define MIPS_EXC_RESET              0
+#define MIPS_EXC_SRESET             1
+#define MIPS_EXC_DEBUG_ST           2
+#define MIPS_EXC_DEBUG              3
+#define MIPS_EXC_DDB                4
+#define MIPS_EXC_NMI                5
+#define MIPS_EXC_MCHK               6
+#define MIPS_EXC_INT_TIMER          7
+#define MIPS_EXC_INT_IO             8
+#define MIPS_EXC_EXECUTE            9
+#define MIPS_EXC_INT_IPI_1          10
+#define MIPS_EXC_INT_IPI_2          11
+#define MIPS_EXC_MAX                12
+/* XXXSL More to follow */
+
+#define C_TI        (_ULCAST_(1) << 30)
+
+#define KVM_MIPS_IRQ_DELIVER_ALL_AT_ONCE (0)
+#define KVM_MIPS_IRQ_CLEAR_ALL_AT_ONCE   (0)
+
+void kvm_mips_queue_irq(struct kvm_vcpu *vcpu, uint32_t priority);
+void kvm_mips_dequeue_irq(struct kvm_vcpu *vcpu, uint32_t priority);
+int kvm_mips_pending_timer(struct kvm_vcpu *vcpu);
+
+void kvm_mips_queue_timer_int_cb(struct kvm_vcpu *vcpu);
+void kvm_mips_dequeue_timer_int_cb(struct kvm_vcpu *vcpu);
+void kvm_mips_queue_io_int_cb(struct kvm_vcpu *vcpu,
+			      struct kvm_mips_interrupt *irq);
+void kvm_mips_dequeue_io_int_cb(struct kvm_vcpu *vcpu,
+				struct kvm_mips_interrupt *irq);
+int kvm_mips_irq_deliver_cb(struct kvm_vcpu *vcpu, unsigned int priority,
+			    uint32_t cause);
+int kvm_mips_irq_clear_cb(struct kvm_vcpu *vcpu, unsigned int priority,
+			  uint32_t cause);
+void kvm_mips_deliver_interrupts(struct kvm_vcpu *vcpu, uint32_t cause);
diff --git a/arch/mips/kvm/kvm_mips_opcode.h b/arch/mips/kvm/kvm_mips_opcode.h
new file mode 100644
index 0000000..86d3b4c
--- /dev/null
+++ b/arch/mips/kvm/kvm_mips_opcode.h
@@ -0,0 +1,24 @@
+/*
+* This file is subject to the terms and conditions of the GNU General Public
+* License.  See the file "COPYING" in the main directory of this archive
+* for more details.
+*
+* Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
+* Authors: Sanjay Lal <sanjayl@kymasys.com>
+*/
+
+/*
+ * Define opcode values not defined in <asm/isnt.h>
+ */
+
+#ifndef __KVM_MIPS_OPCODE_H__
+#define __KVM_MIPS_OPCODE_H__
+
+/* COP0 Ops */
+#define     mfmcz_op         0x0b	/*  01011  */
+#define     wrpgpr_op        0x0e	/*  01110  */
+
+/*  COP0 opcodes (only if COP0 and CO=1):  */
+#define     wait_op               0x20	/*  100000  */
+
+#endif /* __KVM_MIPS_OPCODE_H__ */
diff --git a/arch/mips/kvm/kvm_mips_stats.c b/arch/mips/kvm/kvm_mips_stats.c
new file mode 100644
index 0000000..075904b
--- /dev/null
+++ b/arch/mips/kvm/kvm_mips_stats.c
@@ -0,0 +1,82 @@
+/*
+* This file is subject to the terms and conditions of the GNU General Public
+* License.  See the file "COPYING" in the main directory of this archive
+* for more details.
+*
+* KVM/MIPS: COP0 access histogram
+*
+* Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
+* Authors: Sanjay Lal <sanjayl@kymasys.com>
+*/
+
+#include <linux/kvm_host.h>
+
+char *kvm_mips_exit_types_str[MAX_KVM_MIPS_EXIT_TYPES] = {
+	"WAIT",
+	"CACHE",
+	"Signal",
+	"Interrupt",
+	"COP0/1 Unusable",
+	"TLB Mod",
+	"TLB Miss (LD)",
+	"TLB Miss (ST)",
+	"Address Err (ST)",
+	"Address Error (LD)",
+	"System Call",
+	"Reserved Inst",
+	"Break Inst",
+	"D-Cache Flushes",
+};
+
+char *kvm_cop0_str[N_MIPS_COPROC_REGS] = {
+	"Index",
+	"Random",
+	"EntryLo0",
+	"EntryLo1",
+	"Context",
+	"PG Mask",
+	"Wired",
+	"HWREna",
+	"BadVAddr",
+	"Count",
+	"EntryHI",
+	"Compare",
+	"Status",
+	"Cause",
+	"EXC PC",
+	"PRID",
+	"Config",
+	"LLAddr",
+	"Watch Lo",
+	"Watch Hi",
+	"X Context",
+	"Reserved",
+	"Impl Dep",
+	"Debug",
+	"DEPC",
+	"PerfCnt",
+	"ErrCtl",
+	"CacheErr",
+	"TagLo",
+	"TagHi",
+	"ErrorEPC",
+	"DESAVE"
+};
+
+int kvm_mips_dump_stats(struct kvm_vcpu *vcpu)
+{
+#ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
+	int i, j;
+
+	printk("\nKVM VCPU[%d] COP0 Access Profile:\n", vcpu->vcpu_id);
+	for (i = 0; i < N_MIPS_COPROC_REGS; i++) {
+		for (j = 0; j < N_MIPS_COPROC_SEL; j++) {
+			if (vcpu->arch.cop0->stat[i][j])
+				printk("%s[%d]: %lu\n", kvm_cop0_str[i], j,
+				       vcpu->arch.cop0->stat[i][j]);
+		}
+	}
+#endif
+
+	return 0;
+}
diff --git a/arch/mips/kvm/kvm_tlb.c b/arch/mips/kvm/kvm_tlb.c
new file mode 100644
index 0000000..89511a9
--- /dev/null
+++ b/arch/mips/kvm/kvm_tlb.c
@@ -0,0 +1,928 @@
+/*
+* This file is subject to the terms and conditions of the GNU General Public
+* License.  See the file "COPYING" in the main directory of this archive
+* for more details.
+*
+* KVM/MIPS TLB handling, this file is part of the Linux host kernel so that
+* TLB handlers run from KSEG0
+*
+* Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
+* Authors: Sanjay Lal <sanjayl@kymasys.com>
+*/
+
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/smp.h>
+#include <linux/mm.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/kvm_host.h>
+
+#include <asm/cpu.h>
+#include <asm/bootinfo.h>
+#include <asm/mmu_context.h>
+#include <asm/pgtable.h>
+#include <asm/cacheflush.h>
+
+#undef CONFIG_MIPS_MT
+#include <asm/r4kcache.h>
+#define CONFIG_MIPS_MT
+
+#define KVM_GUEST_PC_TLB    0
+#define KVM_GUEST_SP_TLB    1
+
+#define PRIx64 "llx"
+
+/* Use VZ EntryHi.EHINV to invalidate TLB entries */
+#define UNIQUE_ENTRYHI(idx) (CKSEG0 + ((idx) << (PAGE_SHIFT + 1)))
+
+atomic_t kvm_mips_instance;
+EXPORT_SYMBOL(kvm_mips_instance);
+
+/* These function pointers are initialized once the KVM module is loaded */
+pfn_t(*kvm_mips_gfn_to_pfn) (struct kvm *kvm, gfn_t gfn);
+EXPORT_SYMBOL(kvm_mips_gfn_to_pfn);
+
+void (*kvm_mips_release_pfn_clean) (pfn_t pfn);
+EXPORT_SYMBOL(kvm_mips_release_pfn_clean);
+
+bool(*kvm_mips_is_error_pfn) (pfn_t pfn);
+EXPORT_SYMBOL(kvm_mips_is_error_pfn);
+
+uint32_t kvm_mips_get_kernel_asid(struct kvm_vcpu *vcpu)
+{
+	return ASID_MASK(vcpu->arch.guest_kernel_asid[smp_processor_id()]);
+}
+
+
+uint32_t kvm_mips_get_user_asid(struct kvm_vcpu *vcpu)
+{
+	return ASID_MASK(vcpu->arch.guest_user_asid[smp_processor_id()]);
+}
+
+inline uint32_t kvm_mips_get_commpage_asid (struct kvm_vcpu *vcpu)
+{
+	return vcpu->kvm->arch.commpage_tlb;
+}
+
+
+/*
+ * Structure defining an tlb entry data set.
+ */
+
+void kvm_mips_dump_host_tlbs(void)
+{
+	unsigned long old_entryhi;
+	unsigned long old_pagemask;
+	struct kvm_mips_tlb tlb;
+	unsigned long flags;
+	int i;
+
+	local_irq_save(flags);
+
+	old_entryhi = read_c0_entryhi();
+	old_pagemask = read_c0_pagemask();
+
+	printk("HOST TLBs:\n");
+	printk("ASID: %#lx\n", ASID_MASK(read_c0_entryhi()));
+
+	for (i = 0; i < current_cpu_data.tlbsize; i++) {
+		write_c0_index(i);
+		mtc0_tlbw_hazard();
+
+		tlb_read();
+		tlbw_use_hazard();
+
+		tlb.tlb_hi = read_c0_entryhi();
+		tlb.tlb_lo0 = read_c0_entrylo0();
+		tlb.tlb_lo1 = read_c0_entrylo1();
+		tlb.tlb_mask = read_c0_pagemask();
+
+		printk("TLB%c%3d Hi 0x%08lx ",
+		       (tlb.tlb_lo0 | tlb.tlb_lo1) & MIPS3_PG_V ? ' ' : '*',
+		       i, tlb.tlb_hi);
+		printk("Lo0=0x%09" PRIx64 " %c%c attr %lx ",
+		       (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo0),
+		       (tlb.tlb_lo0 & MIPS3_PG_D) ? 'D' : ' ',
+		       (tlb.tlb_lo0 & MIPS3_PG_G) ? 'G' : ' ',
+		       (tlb.tlb_lo0 >> 3) & 7);
+		printk("Lo1=0x%09" PRIx64 " %c%c attr %lx sz=%lx\n",
+		       (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo1),
+		       (tlb.tlb_lo1 & MIPS3_PG_D) ? 'D' : ' ',
+		       (tlb.tlb_lo1 & MIPS3_PG_G) ? 'G' : ' ',
+		       (tlb.tlb_lo1 >> 3) & 7, tlb.tlb_mask);
+	}
+	write_c0_entryhi(old_entryhi);
+	write_c0_pagemask(old_pagemask);
+	mtc0_tlbw_hazard();
+	local_irq_restore(flags);
+}
+
+void kvm_mips_dump_guest_tlbs(struct kvm_vcpu *vcpu)
+{
+	struct mips_coproc *cop0 = vcpu->arch.cop0;
+	struct kvm_mips_tlb tlb;
+	int i;
+
+	printk("Guest TLBs:\n");
+	printk("Guest EntryHi: %#lx\n", kvm_read_c0_guest_entryhi(cop0));
+
+	for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) {
+		tlb = vcpu->arch.guest_tlb[i];
+		printk("TLB%c%3d Hi 0x%08lx ",
+		       (tlb.tlb_lo0 | tlb.tlb_lo1) & MIPS3_PG_V ? ' ' : '*',
+		       i, tlb.tlb_hi);
+		printk("Lo0=0x%09" PRIx64 " %c%c attr %lx ",
+		       (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo0),
+		       (tlb.tlb_lo0 & MIPS3_PG_D) ? 'D' : ' ',
+		       (tlb.tlb_lo0 & MIPS3_PG_G) ? 'G' : ' ',
+		       (tlb.tlb_lo0 >> 3) & 7);
+		printk("Lo1=0x%09" PRIx64 " %c%c attr %lx sz=%lx\n",
+		       (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo1),
+		       (tlb.tlb_lo1 & MIPS3_PG_D) ? 'D' : ' ',
+		       (tlb.tlb_lo1 & MIPS3_PG_G) ? 'G' : ' ',
+		       (tlb.tlb_lo1 >> 3) & 7, tlb.tlb_mask);
+	}
+}
+
+void kvm_mips_dump_shadow_tlbs(struct kvm_vcpu *vcpu)
+{
+	int i;
+	volatile struct kvm_mips_tlb tlb;
+
+	printk("Shadow TLBs:\n");
+	for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) {
+		tlb = vcpu->arch.shadow_tlb[smp_processor_id()][i];
+		printk("TLB%c%3d Hi 0x%08lx ",
+		       (tlb.tlb_lo0 | tlb.tlb_lo1) & MIPS3_PG_V ? ' ' : '*',
+		       i, tlb.tlb_hi);
+		printk("Lo0=0x%09" PRIx64 " %c%c attr %lx ",
+		       (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo0),
+		       (tlb.tlb_lo0 & MIPS3_PG_D) ? 'D' : ' ',
+		       (tlb.tlb_lo0 & MIPS3_PG_G) ? 'G' : ' ',
+		       (tlb.tlb_lo0 >> 3) & 7);
+		printk("Lo1=0x%09" PRIx64 " %c%c attr %lx sz=%lx\n",
+		       (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo1),
+		       (tlb.tlb_lo1 & MIPS3_PG_D) ? 'D' : ' ',
+		       (tlb.tlb_lo1 & MIPS3_PG_G) ? 'G' : ' ',
+		       (tlb.tlb_lo1 >> 3) & 7, tlb.tlb_mask);
+	}
+}
+
+static void kvm_mips_map_page(struct kvm *kvm, gfn_t gfn)
+{
+	pfn_t pfn;
+
+	if (kvm->arch.guest_pmap[gfn] != KVM_INVALID_PAGE)
+		return;
+
+	pfn = kvm_mips_gfn_to_pfn(kvm, gfn);
+
+	if (kvm_mips_is_error_pfn(pfn)) {
+		panic("Couldn't get pfn for gfn %#" PRIx64 "!\n", gfn);
+	}
+
+	kvm->arch.guest_pmap[gfn] = pfn;
+	return;
+}
+
+/* Translate guest KSEG0 addresses to Host PA */
+unsigned long kvm_mips_translate_guest_kseg0_to_hpa(struct kvm_vcpu *vcpu,
+	unsigned long gva)
+{
+	gfn_t gfn;
+	uint32_t offset = gva & ~PAGE_MASK;
+	struct kvm *kvm = vcpu->kvm;
+
+	if (KVM_GUEST_KSEGX(gva) != KVM_GUEST_KSEG0) {
+		kvm_err("%s/%p: Invalid gva: %#lx\n", __func__,
+			__builtin_return_address(0), gva);
+		return KVM_INVALID_PAGE;
+	}
+
+	gfn = (KVM_GUEST_CPHYSADDR(gva) >> PAGE_SHIFT);
+
+	if (gfn >= kvm->arch.guest_pmap_npages) {
+		kvm_err("%s: Invalid gfn: %#llx, GVA: %#lx\n", __func__, gfn,
+			gva);
+		return KVM_INVALID_PAGE;
+	}
+	kvm_mips_map_page(vcpu->kvm, gfn);
+	return (kvm->arch.guest_pmap[gfn] << PAGE_SHIFT) + offset;
+}
+
+/* XXXKYMA: Must be called with interrupts disabled */
+/* set flush_dcache_mask == 0 if no dcache flush required */
+int
+kvm_mips_host_tlb_write(struct kvm_vcpu *vcpu, unsigned long entryhi,
+	unsigned long entrylo0, unsigned long entrylo1, int flush_dcache_mask)
+{
+	unsigned long flags;
+	unsigned long old_entryhi;
+	volatile int idx;
+
+	local_irq_save(flags);
+
+
+	old_entryhi = read_c0_entryhi();
+	write_c0_entryhi(entryhi);
+	mtc0_tlbw_hazard();
+
+	tlb_probe();
+	tlb_probe_hazard();
+	idx = read_c0_index();
+
+	if (idx > current_cpu_data.tlbsize) {
+		kvm_err("%s: Invalid Index: %d\n", __func__, idx);
+		kvm_mips_dump_host_tlbs();
+		return -1;
+	}
+
+	if (idx < 0) {
+		idx = read_c0_random() % current_cpu_data.tlbsize;
+		write_c0_index(idx);
+		mtc0_tlbw_hazard();
+	}
+	write_c0_entrylo0(entrylo0);
+	write_c0_entrylo1(entrylo1);
+	mtc0_tlbw_hazard();
+
+	tlb_write_indexed();
+	tlbw_use_hazard();
+
+#ifdef DEBUG
+	if (debug) {
+		kvm_debug("@ %#lx idx: %2d [entryhi(R): %#lx] "
+			  "entrylo0(R): 0x%08lx, entrylo1(R): 0x%08lx\n",
+			  vcpu->arch.pc, idx, read_c0_entryhi(),
+			  read_c0_entrylo0(), read_c0_entrylo1());
+	}
+#endif
+
+	/* Flush D-cache */
+	if (flush_dcache_mask) {
+		if (entrylo0 & MIPS3_PG_V) {
+			++vcpu->stat.flush_dcache_exits;
+			flush_data_cache_page((entryhi & VPN2_MASK) & ~flush_dcache_mask);
+		}
+		if (entrylo1 & MIPS3_PG_V) {
+			++vcpu->stat.flush_dcache_exits;
+			flush_data_cache_page(((entryhi & VPN2_MASK) & ~flush_dcache_mask) |
+				(0x1 << PAGE_SHIFT));
+		}
+	}
+
+	/* Restore old ASID */
+	write_c0_entryhi(old_entryhi);
+	mtc0_tlbw_hazard();
+	tlbw_use_hazard();
+	local_irq_restore(flags);
+	return 0;
+}
+
+
+/* XXXKYMA: Must be called with interrupts disabled */
+int kvm_mips_handle_kseg0_tlb_fault(unsigned long badvaddr,
+	struct kvm_vcpu *vcpu)
+{
+	gfn_t gfn;
+	pfn_t pfn0, pfn1;
+	unsigned long vaddr = 0;
+	unsigned long entryhi = 0, entrylo0 = 0, entrylo1 = 0;
+	int even;
+	struct kvm *kvm = vcpu->kvm;
+	const int flush_dcache_mask = 0;
+
+
+	if (KVM_GUEST_KSEGX(badvaddr) != KVM_GUEST_KSEG0) {
+		kvm_err("%s: Invalid BadVaddr: %#lx\n", __func__, badvaddr);
+		kvm_mips_dump_host_tlbs();
+		return -1;
+	}
+
+	gfn = (KVM_GUEST_CPHYSADDR(badvaddr) >> PAGE_SHIFT);
+	if (gfn >= kvm->arch.guest_pmap_npages) {
+		kvm_err("%s: Invalid gfn: %#llx, BadVaddr: %#lx\n", __func__,
+			gfn, badvaddr);
+		kvm_mips_dump_host_tlbs();
+		return -1;
+	}
+	even = !(gfn & 0x1);
+	vaddr = badvaddr & (PAGE_MASK << 1);
+
+	kvm_mips_map_page(vcpu->kvm, gfn);
+	kvm_mips_map_page(vcpu->kvm, gfn ^ 0x1);
+
+	if (even) {
+		pfn0 = kvm->arch.guest_pmap[gfn];
+		pfn1 = kvm->arch.guest_pmap[gfn ^ 0x1];
+	} else {
+		pfn0 = kvm->arch.guest_pmap[gfn ^ 0x1];
+		pfn1 = kvm->arch.guest_pmap[gfn];
+	}
+
+	entryhi = (vaddr | kvm_mips_get_kernel_asid(vcpu));
+	entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) | (1 << 2) |
+			(0x1 << 1);
+	entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) | (0x3 << 3) | (1 << 2) |
+			(0x1 << 1);
+
+	return kvm_mips_host_tlb_write(vcpu, entryhi, entrylo0, entrylo1,
+				       flush_dcache_mask);
+}
+
+int kvm_mips_handle_commpage_tlb_fault(unsigned long badvaddr,
+	struct kvm_vcpu *vcpu)
+{
+	pfn_t pfn0, pfn1;
+	unsigned long flags, old_entryhi = 0, vaddr = 0;
+	unsigned long entrylo0 = 0, entrylo1 = 0;
+
+
+	pfn0 = CPHYSADDR(vcpu->arch.kseg0_commpage) >> PAGE_SHIFT;
+	pfn1 = 0;
+	entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) | (1 << 2) |
+			(0x1 << 1);
+	entrylo1 = 0;
+
+	local_irq_save(flags);
+
+	old_entryhi = read_c0_entryhi();
+	vaddr = badvaddr & (PAGE_MASK << 1);
+	write_c0_entryhi(vaddr | kvm_mips_get_kernel_asid(vcpu));
+	mtc0_tlbw_hazard();
+	write_c0_entrylo0(entrylo0);
+	mtc0_tlbw_hazard();
+	write_c0_entrylo1(entrylo1);
+	mtc0_tlbw_hazard();
+	write_c0_index(kvm_mips_get_commpage_asid(vcpu));
+	mtc0_tlbw_hazard();
+	tlb_write_indexed();
+	mtc0_tlbw_hazard();
+	tlbw_use_hazard();
+
+#ifdef DEBUG
+	kvm_debug ("@ %#lx idx: %2d [entryhi(R): %#lx] entrylo0 (R): 0x%08lx, entrylo1(R): 0x%08lx\n",
+	     vcpu->arch.pc, read_c0_index(), read_c0_entryhi(),
+	     read_c0_entrylo0(), read_c0_entrylo1());
+#endif
+
+	/* Restore old ASID */
+	write_c0_entryhi(old_entryhi);
+	mtc0_tlbw_hazard();
+	tlbw_use_hazard();
+	local_irq_restore(flags);
+
+	return 0;
+}
+
+int
+kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu,
+	struct kvm_mips_tlb *tlb, unsigned long *hpa0, unsigned long *hpa1)
+{
+	unsigned long entryhi = 0, entrylo0 = 0, entrylo1 = 0;
+	struct kvm *kvm = vcpu->kvm;
+	pfn_t pfn0, pfn1;
+
+
+	if ((tlb->tlb_hi & VPN2_MASK) == 0) {
+		pfn0 = 0;
+		pfn1 = 0;
+	} else {
+		kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo0) >> PAGE_SHIFT);
+		kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo1) >> PAGE_SHIFT);
+
+		pfn0 = kvm->arch.guest_pmap[mips3_tlbpfn_to_paddr(tlb->tlb_lo0) >> PAGE_SHIFT];
+		pfn1 = kvm->arch.guest_pmap[mips3_tlbpfn_to_paddr(tlb->tlb_lo1) >> PAGE_SHIFT];
+	}
+
+	if (hpa0)
+		*hpa0 = pfn0 << PAGE_SHIFT;
+
+	if (hpa1)
+		*hpa1 = pfn1 << PAGE_SHIFT;
+
+	/* Get attributes from the Guest TLB */
+	entryhi = (tlb->tlb_hi & VPN2_MASK) | (KVM_GUEST_KERNEL_MODE(vcpu) ?
+			kvm_mips_get_kernel_asid(vcpu) : kvm_mips_get_user_asid(vcpu));
+	entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) |
+			(tlb->tlb_lo0 & MIPS3_PG_D) | (tlb->tlb_lo0 & MIPS3_PG_V);
+	entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) | (0x3 << 3) |
+			(tlb->tlb_lo1 & MIPS3_PG_D) | (tlb->tlb_lo1 & MIPS3_PG_V);
+
+#ifdef DEBUG
+	kvm_debug("@ %#lx tlb_lo0: 0x%08lx tlb_lo1: 0x%08lx\n", vcpu->arch.pc,
+		  tlb->tlb_lo0, tlb->tlb_lo1);
+#endif
+
+	return kvm_mips_host_tlb_write(vcpu, entryhi, entrylo0, entrylo1,
+				       tlb->tlb_mask);
+}
+
+int kvm_mips_guest_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long entryhi)
+{
+	int i;
+	int index = -1;
+	struct kvm_mips_tlb *tlb = vcpu->arch.guest_tlb;
+
+
+	for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) {
+		if (((TLB_VPN2(tlb[i]) & ~tlb[i].tlb_mask) == ((entryhi & VPN2_MASK) & ~tlb[i].tlb_mask)) &&
+			(TLB_IS_GLOBAL(tlb[i]) || (TLB_ASID(tlb[i]) == ASID_MASK(entryhi)))) {
+			index = i;
+			break;
+		}
+	}
+
+#ifdef DEBUG
+	kvm_debug("%s: entryhi: %#lx, index: %d lo0: %#lx, lo1: %#lx\n",
+		  __func__, entryhi, index, tlb[i].tlb_lo0, tlb[i].tlb_lo1);
+#endif
+
+	return index;
+}
+
+int kvm_mips_host_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long vaddr)
+{
+	unsigned long old_entryhi, flags;
+	volatile int idx;
+
+
+	local_irq_save(flags);
+
+	old_entryhi = read_c0_entryhi();
+
+	if (KVM_GUEST_KERNEL_MODE(vcpu))
+		write_c0_entryhi((vaddr & VPN2_MASK) | kvm_mips_get_kernel_asid(vcpu));
+	else {
+		write_c0_entryhi((vaddr & VPN2_MASK) | kvm_mips_get_user_asid(vcpu));
+	}
+
+	mtc0_tlbw_hazard();
+
+	tlb_probe();
+	tlb_probe_hazard();
+	idx = read_c0_index();
+
+	/* Restore old ASID */
+	write_c0_entryhi(old_entryhi);
+	mtc0_tlbw_hazard();
+	tlbw_use_hazard();
+
+	local_irq_restore(flags);
+
+#ifdef DEBUG
+	kvm_debug("Host TLB lookup, %#lx, idx: %2d\n", vaddr, idx);
+#endif
+
+	return idx;
+}
+
+int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long va)
+{
+	int idx;
+	unsigned long flags, old_entryhi;
+
+	local_irq_save(flags);
+
+
+	old_entryhi = read_c0_entryhi();
+
+	write_c0_entryhi((va & VPN2_MASK) | kvm_mips_get_user_asid(vcpu));
+	mtc0_tlbw_hazard();
+
+	tlb_probe();
+	tlb_probe_hazard();
+	idx = read_c0_index();
+
+	if (idx >= current_cpu_data.tlbsize)
+		BUG();
+
+	if (idx > 0) {
+		write_c0_entryhi(UNIQUE_ENTRYHI(idx));
+		mtc0_tlbw_hazard();
+
+		write_c0_entrylo0(0);
+		mtc0_tlbw_hazard();
+
+		write_c0_entrylo1(0);
+		mtc0_tlbw_hazard();
+
+		tlb_write_indexed();
+		mtc0_tlbw_hazard();
+	}
+
+	write_c0_entryhi(old_entryhi);
+	mtc0_tlbw_hazard();
+	tlbw_use_hazard();
+
+	local_irq_restore(flags);
+
+#ifdef DEBUG
+	if (idx > 0) {
+		kvm_debug("%s: Invalidated entryhi %#lx @ idx %d\n", __func__,
+			  (va & VPN2_MASK) | (vcpu->arch.asid_map[va & ASID_MASK] & ASID_MASK), idx);
+	}
+#endif
+
+	return 0;
+}
+
+/* XXXKYMA: Fix Guest USER/KERNEL no longer share the same ASID*/
+int kvm_mips_host_tlb_inv_index(struct kvm_vcpu *vcpu, int index)
+{
+	unsigned long flags, old_entryhi;
+
+	if (index >= current_cpu_data.tlbsize)
+		BUG();
+
+	local_irq_save(flags);
+
+
+	old_entryhi = read_c0_entryhi();
+
+	write_c0_entryhi(UNIQUE_ENTRYHI(index));
+	mtc0_tlbw_hazard();
+
+	write_c0_index(index);
+	mtc0_tlbw_hazard();
+
+	write_c0_entrylo0(0);
+	mtc0_tlbw_hazard();
+
+	write_c0_entrylo1(0);
+	mtc0_tlbw_hazard();
+
+	tlb_write_indexed();
+	mtc0_tlbw_hazard();
+	tlbw_use_hazard();
+
+	write_c0_entryhi(old_entryhi);
+	mtc0_tlbw_hazard();
+	tlbw_use_hazard();
+
+	local_irq_restore(flags);
+
+	return 0;
+}
+
+void kvm_mips_flush_host_tlb(int skip_kseg0)
+{
+	unsigned long flags;
+	unsigned long old_entryhi, entryhi;
+	unsigned long old_pagemask;
+	int entry = 0;
+	int maxentry = current_cpu_data.tlbsize;
+
+
+	local_irq_save(flags);
+
+	old_entryhi = read_c0_entryhi();
+	old_pagemask = read_c0_pagemask();
+
+	/* Blast 'em all away. */
+	for (entry = 0; entry < maxentry; entry++) {
+
+		write_c0_index(entry);
+		mtc0_tlbw_hazard();
+
+		if (skip_kseg0) {
+			tlb_read();
+			tlbw_use_hazard();
+
+			entryhi = read_c0_entryhi();
+
+			/* Don't blow away guest kernel entries */
+			if (KVM_GUEST_KSEGX(entryhi) == KVM_GUEST_KSEG0) {
+				continue;
+			}
+		}
+
+		/* Make sure all entries differ. */
+		write_c0_entryhi(UNIQUE_ENTRYHI(entry));
+		mtc0_tlbw_hazard();
+		write_c0_entrylo0(0);
+		mtc0_tlbw_hazard();
+		write_c0_entrylo1(0);
+		mtc0_tlbw_hazard();
+
+		tlb_write_indexed();
+		mtc0_tlbw_hazard();
+	}
+
+	tlbw_use_hazard();
+
+	write_c0_entryhi(old_entryhi);
+	write_c0_pagemask(old_pagemask);
+	mtc0_tlbw_hazard();
+	tlbw_use_hazard();
+
+	local_irq_restore(flags);
+}
+
+void
+kvm_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu,
+			struct kvm_vcpu *vcpu)
+{
+	unsigned long asid = asid_cache(cpu);
+
+	if (!(ASID_MASK(ASID_INC(asid)))) {
+		if (cpu_has_vtag_icache) {
+			flush_icache_all();
+		}
+
+		kvm_local_flush_tlb_all();      /* start new asid cycle */
+
+		if (!asid)      /* fix version if needed */
+			asid = ASID_FIRST_VERSION;
+	}
+
+	cpu_context(cpu, mm) = asid_cache(cpu) = asid;
+}
+
+void kvm_shadow_tlb_put(struct kvm_vcpu *vcpu)
+{
+	unsigned long flags;
+	unsigned long old_entryhi;
+	unsigned long old_pagemask;
+	int entry = 0;
+	int cpu = smp_processor_id();
+
+	local_irq_save(flags);
+
+	old_entryhi = read_c0_entryhi();
+	old_pagemask = read_c0_pagemask();
+
+	for (entry = 0; entry < current_cpu_data.tlbsize; entry++) {
+		write_c0_index(entry);
+		mtc0_tlbw_hazard();
+		tlb_read();
+		tlbw_use_hazard();
+
+		vcpu->arch.shadow_tlb[cpu][entry].tlb_hi = read_c0_entryhi();
+		vcpu->arch.shadow_tlb[cpu][entry].tlb_lo0 = read_c0_entrylo0();
+		vcpu->arch.shadow_tlb[cpu][entry].tlb_lo1 = read_c0_entrylo1();
+		vcpu->arch.shadow_tlb[cpu][entry].tlb_mask = read_c0_pagemask();
+	}
+
+	write_c0_entryhi(old_entryhi);
+	write_c0_pagemask(old_pagemask);
+	mtc0_tlbw_hazard();
+
+	local_irq_restore(flags);
+
+}
+
+void kvm_shadow_tlb_load(struct kvm_vcpu *vcpu)
+{
+	unsigned long flags;
+	unsigned long old_ctx;
+	int entry;
+	int cpu = smp_processor_id();
+
+	local_irq_save(flags);
+
+	old_ctx = read_c0_entryhi();
+
+	for (entry = 0; entry < current_cpu_data.tlbsize; entry++) {
+		write_c0_entryhi(vcpu->arch.shadow_tlb[cpu][entry].tlb_hi);
+		mtc0_tlbw_hazard();
+		write_c0_entrylo0(vcpu->arch.shadow_tlb[cpu][entry].tlb_lo0);
+		write_c0_entrylo1(vcpu->arch.shadow_tlb[cpu][entry].tlb_lo1);
+
+		write_c0_index(entry);
+		mtc0_tlbw_hazard();
+
+		tlb_write_indexed();
+		tlbw_use_hazard();
+	}
+
+	tlbw_use_hazard();
+	write_c0_entryhi(old_ctx);
+	mtc0_tlbw_hazard();
+	local_irq_restore(flags);
+}
+
+
+void kvm_local_flush_tlb_all(void)
+{
+	unsigned long flags;
+	unsigned long old_ctx;
+	int entry = 0;
+
+	local_irq_save(flags);
+	/* Save old context and create impossible VPN2 value */
+	old_ctx = read_c0_entryhi();
+	write_c0_entrylo0(0);
+	write_c0_entrylo1(0);
+
+	/* Blast 'em all away. */
+	while (entry < current_cpu_data.tlbsize) {
+		/* Make sure all entries differ. */
+		write_c0_entryhi(UNIQUE_ENTRYHI(entry));
+		write_c0_index(entry);
+		mtc0_tlbw_hazard();
+		tlb_write_indexed();
+		entry++;
+	}
+	tlbw_use_hazard();
+	write_c0_entryhi(old_ctx);
+	mtc0_tlbw_hazard();
+
+	local_irq_restore(flags);
+}
+
+void kvm_mips_init_shadow_tlb(struct kvm_vcpu *vcpu)
+{
+	int cpu, entry;
+
+	for_each_possible_cpu(cpu) {
+		for (entry = 0; entry < current_cpu_data.tlbsize; entry++) {
+			vcpu->arch.shadow_tlb[cpu][entry].tlb_hi =
+			    UNIQUE_ENTRYHI(entry);
+			vcpu->arch.shadow_tlb[cpu][entry].tlb_lo0 = 0x0;
+			vcpu->arch.shadow_tlb[cpu][entry].tlb_lo1 = 0x0;
+			vcpu->arch.shadow_tlb[cpu][entry].tlb_mask =
+			    read_c0_pagemask();
+#ifdef DEBUG
+			kvm_debug
+			    ("shadow_tlb[%d][%d]: tlb_hi: %#lx, lo0: %#lx, lo1: %#lx\n",
+			     cpu, entry,
+			     vcpu->arch.shadow_tlb[cpu][entry].tlb_hi,
+			     vcpu->arch.shadow_tlb[cpu][entry].tlb_lo0,
+			     vcpu->arch.shadow_tlb[cpu][entry].tlb_lo1);
+#endif
+		}
+	}
+}
+
+/* Restore ASID once we are scheduled back after preemption */
+void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
+{
+	unsigned long flags;
+	int newasid = 0;
+
+#ifdef DEBUG
+	kvm_debug("%s: vcpu %p, cpu: %d\n", __func__, vcpu, cpu);
+#endif
+
+	/* Alocate new kernel and user ASIDs if needed */
+
+	local_irq_save(flags);
+
+	if (((vcpu->arch.
+	      guest_kernel_asid[cpu] ^ asid_cache(cpu)) & ASID_VERSION_MASK)) {
+		kvm_get_new_mmu_context(&vcpu->arch.guest_kernel_mm, cpu, vcpu);
+		vcpu->arch.guest_kernel_asid[cpu] =
+		    vcpu->arch.guest_kernel_mm.context.asid[cpu];
+		kvm_get_new_mmu_context(&vcpu->arch.guest_user_mm, cpu, vcpu);
+		vcpu->arch.guest_user_asid[cpu] =
+		    vcpu->arch.guest_user_mm.context.asid[cpu];
+		newasid++;
+
+		kvm_info("[%d]: cpu_context: %#lx\n", cpu,
+			 cpu_context(cpu, current->mm));
+		kvm_info("[%d]: Allocated new ASID for Guest Kernel: %#x\n",
+			 cpu, vcpu->arch.guest_kernel_asid[cpu]);
+		kvm_info("[%d]: Allocated new ASID for Guest User: %#x\n", cpu,
+			 vcpu->arch.guest_user_asid[cpu]);
+	}
+
+	if (vcpu->arch.last_sched_cpu != cpu) {
+		kvm_info("[%d->%d]KVM VCPU[%d] switch\n",
+			 vcpu->arch.last_sched_cpu, cpu, vcpu->vcpu_id);
+	}
+
+	/* Only reload shadow host TLB if new ASIDs haven't been allocated */
+#if 0
+	if ((atomic_read(&kvm_mips_instance) > 1) && !newasid) {
+		kvm_mips_flush_host_tlb(0);
+		kvm_shadow_tlb_load(vcpu);
+	}
+#endif
+
+	if (!newasid) {
+		/* If we preempted while the guest was executing, then reload the pre-empted ASID */
+		if (current->flags & PF_VCPU) {
+			write_c0_entryhi(ASID_MASK(vcpu->arch.preempt_entryhi));
+			ehb();
+		}
+	} else {
+		/* New ASIDs were allocated for the VM */
+
+		/* Were we in guest context? If so then the pre-empted ASID is no longer
+		 * valid, we need to set it to what it should be based on the mode of
+		 * the Guest (Kernel/User)
+		 */
+		if (current->flags & PF_VCPU) {
+			if (KVM_GUEST_KERNEL_MODE(vcpu))
+				write_c0_entryhi(ASID_MASK(vcpu->arch.
+						 guest_kernel_asid[cpu]));
+			else
+				write_c0_entryhi(ASID_MASK(vcpu->arch.
+						 guest_user_asid[cpu]));
+			ehb();
+		}
+	}
+
+	local_irq_restore(flags);
+
+}
+
+/* ASID can change if another task is scheduled during preemption */
+void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
+{
+	unsigned long flags;
+	uint32_t cpu;
+
+	local_irq_save(flags);
+
+	cpu = smp_processor_id();
+
+
+	vcpu->arch.preempt_entryhi = read_c0_entryhi();
+	vcpu->arch.last_sched_cpu = cpu;
+
+#if 0
+	if ((atomic_read(&kvm_mips_instance) > 1)) {
+		kvm_shadow_tlb_put(vcpu);
+	}
+#endif
+
+	if (((cpu_context(cpu, current->mm) ^ asid_cache(cpu)) &
+	     ASID_VERSION_MASK)) {
+		kvm_debug("%s: Dropping MMU Context:  %#lx\n", __func__,
+			  cpu_context(cpu, current->mm));
+		drop_mmu_context(current->mm, cpu);
+	}
+	write_c0_entryhi(cpu_asid(cpu, current->mm));
+	ehb();
+
+	local_irq_restore(flags);
+}
+
+uint32_t kvm_get_inst(uint32_t *opc, struct kvm_vcpu *vcpu)
+{
+	struct mips_coproc *cop0 = vcpu->arch.cop0;
+	unsigned long paddr, flags;
+	uint32_t inst;
+	int index;
+
+	if (KVM_GUEST_KSEGX((unsigned long) opc) < KVM_GUEST_KSEG0 ||
+	    KVM_GUEST_KSEGX((unsigned long) opc) == KVM_GUEST_KSEG23) {
+		local_irq_save(flags);
+		index = kvm_mips_host_tlb_lookup(vcpu, (unsigned long) opc);
+		if (index >= 0) {
+			inst = *(opc);
+		} else {
+			index =
+			    kvm_mips_guest_tlb_lookup(vcpu,
+						      ((unsigned long) opc & VPN2_MASK)
+						      |
+						      ASID_MASK(kvm_read_c0_guest_entryhi(cop0)));
+			if (index < 0) {
+				kvm_err
+				    ("%s: get_user_failed for %p, vcpu: %p, ASID: %#lx\n",
+				     __func__, opc, vcpu, read_c0_entryhi());
+				kvm_mips_dump_host_tlbs();
+				local_irq_restore(flags);
+				return KVM_INVALID_INST;
+			}
+			kvm_mips_handle_mapped_seg_tlb_fault(vcpu,
+							     &vcpu->arch.
+							     guest_tlb[index],
+							     NULL, NULL);
+			inst = *(opc);
+		}
+		local_irq_restore(flags);
+	} else if (KVM_GUEST_KSEGX(opc) == KVM_GUEST_KSEG0) {
+		paddr =
+		    kvm_mips_translate_guest_kseg0_to_hpa(vcpu,
+							 (unsigned long) opc);
+		inst = *(uint32_t *) CKSEG0ADDR(paddr);
+	} else {
+		kvm_err("%s: illegal address: %p\n", __func__, opc);
+		return KVM_INVALID_INST;
+	}
+
+	return inst;
+}
+
+EXPORT_SYMBOL(kvm_local_flush_tlb_all);
+EXPORT_SYMBOL(kvm_shadow_tlb_put);
+EXPORT_SYMBOL(kvm_mips_handle_mapped_seg_tlb_fault);
+EXPORT_SYMBOL(kvm_mips_handle_commpage_tlb_fault);
+EXPORT_SYMBOL(kvm_mips_init_shadow_tlb);
+EXPORT_SYMBOL(kvm_mips_dump_host_tlbs);
+EXPORT_SYMBOL(kvm_mips_handle_kseg0_tlb_fault);
+EXPORT_SYMBOL(kvm_mips_host_tlb_lookup);
+EXPORT_SYMBOL(kvm_mips_flush_host_tlb);
+EXPORT_SYMBOL(kvm_mips_guest_tlb_lookup);
+EXPORT_SYMBOL(kvm_mips_host_tlb_inv);
+EXPORT_SYMBOL(kvm_mips_translate_guest_kseg0_to_hpa);
+EXPORT_SYMBOL(kvm_shadow_tlb_load);
+EXPORT_SYMBOL(kvm_mips_dump_shadow_tlbs);
+EXPORT_SYMBOL(kvm_mips_dump_guest_tlbs);
+EXPORT_SYMBOL(kvm_get_inst);
+EXPORT_SYMBOL(kvm_arch_vcpu_load);
+EXPORT_SYMBOL(kvm_arch_vcpu_put);
diff --git a/arch/mips/kvm/kvm_trap_emul.c b/arch/mips/kvm/kvm_trap_emul.c
new file mode 100644
index 0000000..466aeef
--- /dev/null
+++ b/arch/mips/kvm/kvm_trap_emul.c
@@ -0,0 +1,482 @@
+/*
+* This file is subject to the terms and conditions of the GNU General Public
+* License.  See the file "COPYING" in the main directory of this archive
+* for more details.
+*
+* KVM/MIPS: Deliver/Emulate exceptions to the guest kernel
+*
+* Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
+* Authors: Sanjay Lal <sanjayl@kymasys.com>
+*/
+
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/vmalloc.h>
+
+#include <linux/kvm_host.h>
+
+#include "kvm_mips_opcode.h"
+#include "kvm_mips_int.h"
+
+static gpa_t kvm_trap_emul_gva_to_gpa_cb(gva_t gva)
+{
+	gpa_t gpa;
+	uint32_t kseg = KSEGX(gva);
+
+	if ((kseg == CKSEG0) || (kseg == CKSEG1))
+		gpa = CPHYSADDR(gva);
+	else {
+		printk("%s: cannot find GPA for GVA: %#lx\n", __func__, gva);
+		kvm_mips_dump_host_tlbs();
+		gpa = KVM_INVALID_ADDR;
+	}
+
+#ifdef DEBUG
+	kvm_debug("%s: gva %#lx, gpa: %#llx\n", __func__, gva, gpa);
+#endif
+
+	return gpa;
+}
+
+
+static int kvm_trap_emul_handle_cop_unusable(struct kvm_vcpu *vcpu)
+{
+	struct kvm_run *run = vcpu->run;
+	uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
+	unsigned long cause = vcpu->arch.host_cp0_cause;
+	enum emulation_result er = EMULATE_DONE;
+	int ret = RESUME_GUEST;
+
+	if (((cause & CAUSEF_CE) >> CAUSEB_CE) == 1) {
+		er = kvm_mips_emulate_fpu_exc(cause, opc, run, vcpu);
+	} else
+		er = kvm_mips_emulate_inst(cause, opc, run, vcpu);
+
+	switch (er) {
+	case EMULATE_DONE:
+		ret = RESUME_GUEST;
+		break;
+
+	case EMULATE_FAIL:
+		run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+		ret = RESUME_HOST;
+		break;
+
+	case EMULATE_WAIT:
+		run->exit_reason = KVM_EXIT_INTR;
+		ret = RESUME_HOST;
+		break;
+
+	default:
+		BUG();
+	}
+	return ret;
+}
+
+static int kvm_trap_emul_handle_tlb_mod(struct kvm_vcpu *vcpu)
+{
+	struct kvm_run *run = vcpu->run;
+	uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
+	unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
+	unsigned long cause = vcpu->arch.host_cp0_cause;
+	enum emulation_result er = EMULATE_DONE;
+	int ret = RESUME_GUEST;
+
+	if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0
+	    || KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) {
+#ifdef DEBUG
+		kvm_debug
+		    ("USER/KSEG23 ADDR TLB MOD fault: cause %#lx, PC: %p, BadVaddr: %#lx\n",
+		     cause, opc, badvaddr);
+#endif
+		er = kvm_mips_handle_tlbmod(cause, opc, run, vcpu);
+
+		if (er == EMULATE_DONE)
+			ret = RESUME_GUEST;
+		else {
+			run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+			ret = RESUME_HOST;
+		}
+	} else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) {
+		/* XXXKYMA: The guest kernel does not expect to get this fault when we are not
+		 * using HIGHMEM. Need to address this in a HIGHMEM kernel
+		 */
+		printk
+		    ("TLB MOD fault not handled, cause %#lx, PC: %p, BadVaddr: %#lx\n",
+		     cause, opc, badvaddr);
+		kvm_mips_dump_host_tlbs();
+		kvm_arch_vcpu_dump_regs(vcpu);
+		run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+		ret = RESUME_HOST;
+	} else {
+		printk
+		    ("Illegal TLB Mod fault address , cause %#lx, PC: %p, BadVaddr: %#lx\n",
+		     cause, opc, badvaddr);
+		kvm_mips_dump_host_tlbs();
+		kvm_arch_vcpu_dump_regs(vcpu);
+		run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+		ret = RESUME_HOST;
+	}
+	return ret;
+}
+
+static int kvm_trap_emul_handle_tlb_st_miss(struct kvm_vcpu *vcpu)
+{
+	struct kvm_run *run = vcpu->run;
+	uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
+	unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
+	unsigned long cause = vcpu->arch.host_cp0_cause;
+	enum emulation_result er = EMULATE_DONE;
+	int ret = RESUME_GUEST;
+
+	if (((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR)
+	    && KVM_GUEST_KERNEL_MODE(vcpu)) {
+		if (kvm_mips_handle_commpage_tlb_fault(badvaddr, vcpu) < 0) {
+			run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+			ret = RESUME_HOST;
+		}
+	} else if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0
+		   || KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) {
+#ifdef DEBUG
+		kvm_debug
+		    ("USER ADDR TLB LD fault: cause %#lx, PC: %p, BadVaddr: %#lx\n",
+		     cause, opc, badvaddr);
+#endif
+		er = kvm_mips_handle_tlbmiss(cause, opc, run, vcpu);
+		if (er == EMULATE_DONE)
+			ret = RESUME_GUEST;
+		else {
+			run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+			ret = RESUME_HOST;
+		}
+	} else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) {
+		/* All KSEG0 faults are handled by KVM, as the guest kernel does not
+		 * expect to ever get them
+		 */
+		if (kvm_mips_handle_kseg0_tlb_fault
+		    (vcpu->arch.host_cp0_badvaddr, vcpu) < 0) {
+			run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+			ret = RESUME_HOST;
+		}
+	} else {
+		kvm_err
+		    ("Illegal TLB LD fault address , cause %#lx, PC: %p, BadVaddr: %#lx\n",
+		     cause, opc, badvaddr);
+		kvm_mips_dump_host_tlbs();
+		kvm_arch_vcpu_dump_regs(vcpu);
+		run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+		ret = RESUME_HOST;
+	}
+	return ret;
+}
+
+static int kvm_trap_emul_handle_tlb_ld_miss(struct kvm_vcpu *vcpu)
+{
+	struct kvm_run *run = vcpu->run;
+	uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
+	unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
+	unsigned long cause = vcpu->arch.host_cp0_cause;
+	enum emulation_result er = EMULATE_DONE;
+	int ret = RESUME_GUEST;
+
+	if (((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR)
+	    && KVM_GUEST_KERNEL_MODE(vcpu)) {
+		if (kvm_mips_handle_commpage_tlb_fault(badvaddr, vcpu) < 0) {
+			run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+			ret = RESUME_HOST;
+		}
+	} else if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0
+		   || KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) {
+#ifdef DEBUG
+		kvm_debug("USER ADDR TLB ST fault: PC: %#lx, BadVaddr: %#lx\n",
+			  vcpu->arch.pc, badvaddr);
+#endif
+
+		/* User Address (UA) fault, this could happen if
+		 * (1) TLB entry not present/valid in both Guest and shadow host TLBs, in this
+		 *     case we pass on the fault to the guest kernel and let it handle it.
+		 * (2) TLB entry is present in the Guest TLB but not in the shadow, in this
+		 *     case we inject the TLB from the Guest TLB into the shadow host TLB
+		 */
+
+		er = kvm_mips_handle_tlbmiss(cause, opc, run, vcpu);
+		if (er == EMULATE_DONE)
+			ret = RESUME_GUEST;
+		else {
+			run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+			ret = RESUME_HOST;
+		}
+	} else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) {
+		if (kvm_mips_handle_kseg0_tlb_fault
+		    (vcpu->arch.host_cp0_badvaddr, vcpu) < 0) {
+			run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+			ret = RESUME_HOST;
+		}
+	} else {
+		printk
+		    ("Illegal TLB ST fault address , cause %#lx, PC: %p, BadVaddr: %#lx\n",
+		     cause, opc, badvaddr);
+		kvm_mips_dump_host_tlbs();
+		kvm_arch_vcpu_dump_regs(vcpu);
+		run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+		ret = RESUME_HOST;
+	}
+	return ret;
+}
+
+static int kvm_trap_emul_handle_addr_err_st(struct kvm_vcpu *vcpu)
+{
+	struct kvm_run *run = vcpu->run;
+	uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
+	unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
+	unsigned long cause = vcpu->arch.host_cp0_cause;
+	enum emulation_result er = EMULATE_DONE;
+	int ret = RESUME_GUEST;
+
+	if (KVM_GUEST_KERNEL_MODE(vcpu)
+	    && (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1)) {
+#ifdef DEBUG
+		kvm_debug("Emulate Store to MMIO space\n");
+#endif
+		er = kvm_mips_emulate_inst(cause, opc, run, vcpu);
+		if (er == EMULATE_FAIL) {
+			printk("Emulate Store to MMIO space failed\n");
+			run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+			ret = RESUME_HOST;
+		} else {
+			run->exit_reason = KVM_EXIT_MMIO;
+			ret = RESUME_HOST;
+		}
+	} else {
+		printk
+		    ("Address Error (STORE): cause %#lx, PC: %p, BadVaddr: %#lx\n",
+		     cause, opc, badvaddr);
+		run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+		ret = RESUME_HOST;
+	}
+	return ret;
+}
+
+static int kvm_trap_emul_handle_addr_err_ld(struct kvm_vcpu *vcpu)
+{
+	struct kvm_run *run = vcpu->run;
+	uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
+	unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
+	unsigned long cause = vcpu->arch.host_cp0_cause;
+	enum emulation_result er = EMULATE_DONE;
+	int ret = RESUME_GUEST;
+
+	if (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1) {
+#ifdef DEBUG
+		kvm_debug("Emulate Load from MMIO space @ %#lx\n", badvaddr);
+#endif
+		er = kvm_mips_emulate_inst(cause, opc, run, vcpu);
+		if (er == EMULATE_FAIL) {
+			printk("Emulate Load from MMIO space failed\n");
+			run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+			ret = RESUME_HOST;
+		} else {
+			run->exit_reason = KVM_EXIT_MMIO;
+			ret = RESUME_HOST;
+		}
+	} else {
+		printk
+		    ("Address Error (LOAD): cause %#lx, PC: %p, BadVaddr: %#lx\n",
+		     cause, opc, badvaddr);
+		run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+		ret = RESUME_HOST;
+		er = EMULATE_FAIL;
+	}
+	return ret;
+}
+
+static int kvm_trap_emul_handle_syscall(struct kvm_vcpu *vcpu)
+{
+	struct kvm_run *run = vcpu->run;
+	uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
+	unsigned long cause = vcpu->arch.host_cp0_cause;
+	enum emulation_result er = EMULATE_DONE;
+	int ret = RESUME_GUEST;
+
+	er = kvm_mips_emulate_syscall(cause, opc, run, vcpu);
+	if (er == EMULATE_DONE)
+		ret = RESUME_GUEST;
+	else {
+		run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+		ret = RESUME_HOST;
+	}
+	return ret;
+}
+
+static int kvm_trap_emul_handle_res_inst(struct kvm_vcpu *vcpu)
+{
+	struct kvm_run *run = vcpu->run;
+	uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
+	unsigned long cause = vcpu->arch.host_cp0_cause;
+	enum emulation_result er = EMULATE_DONE;
+	int ret = RESUME_GUEST;
+
+	er = kvm_mips_handle_ri(cause, opc, run, vcpu);
+	if (er == EMULATE_DONE)
+		ret = RESUME_GUEST;
+	else {
+		run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+		ret = RESUME_HOST;
+	}
+	return ret;
+}
+
+static int kvm_trap_emul_handle_break(struct kvm_vcpu *vcpu)
+{
+	struct kvm_run *run = vcpu->run;
+	uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
+	unsigned long cause = vcpu->arch.host_cp0_cause;
+	enum emulation_result er = EMULATE_DONE;
+	int ret = RESUME_GUEST;
+
+	er = kvm_mips_emulate_bp_exc(cause, opc, run, vcpu);
+	if (er == EMULATE_DONE)
+		ret = RESUME_GUEST;
+	else {
+		run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+		ret = RESUME_HOST;
+	}
+	return ret;
+}
+
+static int
+kvm_trap_emul_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
+{
+	struct mips_coproc *cop0 = vcpu->arch.cop0;
+
+	kvm_write_c0_guest_index(cop0, regs->cp0reg[MIPS_CP0_TLB_INDEX][0]);
+	kvm_write_c0_guest_context(cop0, regs->cp0reg[MIPS_CP0_TLB_CONTEXT][0]);
+	kvm_write_c0_guest_badvaddr(cop0, regs->cp0reg[MIPS_CP0_BAD_VADDR][0]);
+	kvm_write_c0_guest_entryhi(cop0, regs->cp0reg[MIPS_CP0_TLB_HI][0]);
+	kvm_write_c0_guest_epc(cop0, regs->cp0reg[MIPS_CP0_EXC_PC][0]);
+
+	kvm_write_c0_guest_status(cop0, regs->cp0reg[MIPS_CP0_STATUS][0]);
+	kvm_write_c0_guest_cause(cop0, regs->cp0reg[MIPS_CP0_CAUSE][0]);
+	kvm_write_c0_guest_pagemask(cop0,
+				    regs->cp0reg[MIPS_CP0_TLB_PG_MASK][0]);
+	kvm_write_c0_guest_wired(cop0, regs->cp0reg[MIPS_CP0_TLB_WIRED][0]);
+	kvm_write_c0_guest_errorepc(cop0, regs->cp0reg[MIPS_CP0_ERROR_PC][0]);
+
+	return 0;
+}
+
+static int
+kvm_trap_emul_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
+{
+	struct mips_coproc *cop0 = vcpu->arch.cop0;
+
+	regs->cp0reg[MIPS_CP0_TLB_INDEX][0] = kvm_read_c0_guest_index(cop0);
+	regs->cp0reg[MIPS_CP0_TLB_CONTEXT][0] = kvm_read_c0_guest_context(cop0);
+	regs->cp0reg[MIPS_CP0_BAD_VADDR][0] = kvm_read_c0_guest_badvaddr(cop0);
+	regs->cp0reg[MIPS_CP0_TLB_HI][0] = kvm_read_c0_guest_entryhi(cop0);
+	regs->cp0reg[MIPS_CP0_EXC_PC][0] = kvm_read_c0_guest_epc(cop0);
+
+	regs->cp0reg[MIPS_CP0_STATUS][0] = kvm_read_c0_guest_status(cop0);
+	regs->cp0reg[MIPS_CP0_CAUSE][0] = kvm_read_c0_guest_cause(cop0);
+	regs->cp0reg[MIPS_CP0_TLB_PG_MASK][0] =
+	    kvm_read_c0_guest_pagemask(cop0);
+	regs->cp0reg[MIPS_CP0_TLB_WIRED][0] = kvm_read_c0_guest_wired(cop0);
+	regs->cp0reg[MIPS_CP0_ERROR_PC][0] = kvm_read_c0_guest_errorepc(cop0);
+
+	regs->cp0reg[MIPS_CP0_CONFIG][0] = kvm_read_c0_guest_config(cop0);
+	regs->cp0reg[MIPS_CP0_CONFIG][1] = kvm_read_c0_guest_config1(cop0);
+	regs->cp0reg[MIPS_CP0_CONFIG][2] = kvm_read_c0_guest_config2(cop0);
+	regs->cp0reg[MIPS_CP0_CONFIG][3] = kvm_read_c0_guest_config3(cop0);
+	regs->cp0reg[MIPS_CP0_CONFIG][7] = kvm_read_c0_guest_config7(cop0);
+
+	return 0;
+}
+
+static int kvm_trap_emul_vm_init(struct kvm *kvm)
+{
+	return 0;
+}
+
+static int kvm_trap_emul_vcpu_init(struct kvm_vcpu *vcpu)
+{
+	return 0;
+}
+
+static int kvm_trap_emul_vcpu_setup(struct kvm_vcpu *vcpu)
+{
+	struct mips_coproc *cop0 = vcpu->arch.cop0;
+	uint32_t config1;
+	int vcpu_id = vcpu->vcpu_id;
+
+	/* Arch specific stuff, set up config registers properly so that the
+	 * guest will come up as expected, for now we simulate a
+	 * MIPS 24kc
+	 */
+	kvm_write_c0_guest_prid(cop0, 0x00019300);
+	kvm_write_c0_guest_config(cop0,
+				  MIPS_CONFIG0 | (0x1 << CP0C0_AR) |
+				  (MMU_TYPE_R4000 << CP0C0_MT));
+
+	/* Read the cache characteristics from the host Config1 Register */
+	config1 = (read_c0_config1() & ~0x7f);
+
+	/* Set up MMU size */
+	config1 &= ~(0x3f << 25);
+	config1 |= ((KVM_MIPS_GUEST_TLB_SIZE - 1) << 25);
+
+	/* We unset some bits that we aren't emulating */
+	config1 &=
+	    ~((1 << CP0C1_C2) | (1 << CP0C1_MD) | (1 << CP0C1_PC) |
+	      (1 << CP0C1_WR) | (1 << CP0C1_CA));
+	kvm_write_c0_guest_config1(cop0, config1);
+
+	kvm_write_c0_guest_config2(cop0, MIPS_CONFIG2);
+	/* MIPS_CONFIG2 | (read_c0_config2() & 0xfff) */
+	kvm_write_c0_guest_config3(cop0,
+				   MIPS_CONFIG3 | (0 << CP0C3_VInt) | (1 <<
+								       CP0C3_ULRI));
+
+	/* Set Wait IE/IXMT Ignore in Config7, IAR, AR */
+	kvm_write_c0_guest_config7(cop0, (MIPS_CONF7_WII) | (1 << 10));
+
+	/* Setup IntCtl defaults, compatibilty mode for timer interrupts (HW5) */
+	kvm_write_c0_guest_intctl(cop0, 0xFC000000);
+
+	/* Put in vcpu id as CPUNum into Ebase Reg to handle SMP Guests */
+	kvm_write_c0_guest_ebase(cop0, KVM_GUEST_KSEG0 | (vcpu_id & 0xFF));
+
+	return 0;
+}
+
+static struct kvm_mips_callbacks kvm_trap_emul_callbacks = {
+	/* exit handlers */
+	.handle_cop_unusable = kvm_trap_emul_handle_cop_unusable,
+	.handle_tlb_mod = kvm_trap_emul_handle_tlb_mod,
+	.handle_tlb_st_miss = kvm_trap_emul_handle_tlb_st_miss,
+	.handle_tlb_ld_miss = kvm_trap_emul_handle_tlb_ld_miss,
+	.handle_addr_err_st = kvm_trap_emul_handle_addr_err_st,
+	.handle_addr_err_ld = kvm_trap_emul_handle_addr_err_ld,
+	.handle_syscall = kvm_trap_emul_handle_syscall,
+	.handle_res_inst = kvm_trap_emul_handle_res_inst,
+	.handle_break = kvm_trap_emul_handle_break,
+
+	.vm_init = kvm_trap_emul_vm_init,
+	.vcpu_init = kvm_trap_emul_vcpu_init,
+	.vcpu_setup = kvm_trap_emul_vcpu_setup,
+	.gva_to_gpa = kvm_trap_emul_gva_to_gpa_cb,
+	.queue_timer_int = kvm_mips_queue_timer_int_cb,
+	.dequeue_timer_int = kvm_mips_dequeue_timer_int_cb,
+	.queue_io_int = kvm_mips_queue_io_int_cb,
+	.dequeue_io_int = kvm_mips_dequeue_io_int_cb,
+	.irq_deliver = kvm_mips_irq_deliver_cb,
+	.irq_clear = kvm_mips_irq_clear_cb,
+	.vcpu_ioctl_get_regs = kvm_trap_emul_ioctl_get_regs,
+	.vcpu_ioctl_set_regs = kvm_trap_emul_ioctl_set_regs,
+};
+
+int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks)
+{
+	*install_callbacks = &kvm_trap_emul_callbacks;
+	return 0;
+}
diff --git a/arch/mips/kvm/trace.h b/arch/mips/kvm/trace.h
new file mode 100644
index 0000000..bc9e0f4
--- /dev/null
+++ b/arch/mips/kvm/trace.h
@@ -0,0 +1,46 @@
+/*
+* This file is subject to the terms and conditions of the GNU General Public
+* License.  See the file "COPYING" in the main directory of this archive
+* for more details.
+*
+* Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
+* Authors: Sanjay Lal <sanjayl@kymasys.com>
+*/
+
+#if !defined(_TRACE_KVM_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_KVM_H
+
+#include <linux/tracepoint.h>
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM kvm
+#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_FILE trace
+
+/*
+ * Tracepoints for VM eists
+ */
+extern char *kvm_mips_exit_types_str[MAX_KVM_MIPS_EXIT_TYPES];
+
+TRACE_EVENT(kvm_exit,
+	    TP_PROTO(struct kvm_vcpu *vcpu, unsigned int reason),
+	    TP_ARGS(vcpu, reason),
+	    TP_STRUCT__entry(
+			__field(struct kvm_vcpu *, vcpu)
+			__field(unsigned int, reason)
+	    ),
+
+	    TP_fast_assign(
+			__entry->vcpu = vcpu;
+			__entry->reason = reason;
+	    ),
+
+	    TP_printk("[%s]PC: 0x%08lx",
+		      kvm_mips_exit_types_str[__entry->reason],
+		      __entry->vcpu->arch.pc)
+);
+
+#endif /* _TRACE_KVM_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/arch/mips/lib/bitops.c b/arch/mips/lib/bitops.c
index a64daee..3b2a1e7 100644
--- a/arch/mips/lib/bitops.c
+++ b/arch/mips/lib/bitops.c
@@ -19,7 +19,7 @@
  */
 void __mips_set_bit(unsigned long nr, volatile unsigned long *addr)
 {
-	volatile unsigned long *a = addr;
+	unsigned long *a = (unsigned long *)addr;
 	unsigned bit = nr & SZLONG_MASK;
 	unsigned long mask;
 	unsigned long flags;
@@ -41,7 +41,7 @@
  */
 void __mips_clear_bit(unsigned long nr, volatile unsigned long *addr)
 {
-	volatile unsigned long *a = addr;
+	unsigned long *a = (unsigned long *)addr;
 	unsigned bit = nr & SZLONG_MASK;
 	unsigned long mask;
 	unsigned long flags;
@@ -63,7 +63,7 @@
  */
 void __mips_change_bit(unsigned long nr, volatile unsigned long *addr)
 {
-	volatile unsigned long *a = addr;
+	unsigned long *a = (unsigned long *)addr;
 	unsigned bit = nr & SZLONG_MASK;
 	unsigned long mask;
 	unsigned long flags;
@@ -86,7 +86,7 @@
 int __mips_test_and_set_bit(unsigned long nr,
 			    volatile unsigned long *addr)
 {
-	volatile unsigned long *a = addr;
+	unsigned long *a = (unsigned long *)addr;
 	unsigned bit = nr & SZLONG_MASK;
 	unsigned long mask;
 	unsigned long flags;
@@ -112,7 +112,7 @@
 int __mips_test_and_set_bit_lock(unsigned long nr,
 				 volatile unsigned long *addr)
 {
-	volatile unsigned long *a = addr;
+	unsigned long *a = (unsigned long *)addr;
 	unsigned bit = nr & SZLONG_MASK;
 	unsigned long mask;
 	unsigned long flags;
@@ -137,7 +137,7 @@
  */
 int __mips_test_and_clear_bit(unsigned long nr, volatile unsigned long *addr)
 {
-	volatile unsigned long *a = addr;
+	unsigned long *a = (unsigned long *)addr;
 	unsigned bit = nr & SZLONG_MASK;
 	unsigned long mask;
 	unsigned long flags;
@@ -162,7 +162,7 @@
  */
 int __mips_test_and_change_bit(unsigned long nr, volatile unsigned long *addr)
 {
-	volatile unsigned long *a = addr;
+	unsigned long *a = (unsigned long *)addr;
 	unsigned bit = nr & SZLONG_MASK;
 	unsigned long mask;
 	unsigned long flags;
diff --git a/arch/mips/lib/dump_tlb.c b/arch/mips/lib/dump_tlb.c
index 32b9f21..8a12d00 100644
--- a/arch/mips/lib/dump_tlb.c
+++ b/arch/mips/lib/dump_tlb.c
@@ -11,6 +11,7 @@
 #include <asm/page.h>
 #include <asm/pgtable.h>
 #include <asm/tlbdebug.h>
+#include <asm/mmu_context.h>
 
 static inline const char *msk2str(unsigned int mask)
 {
@@ -55,7 +56,7 @@
 	s_pagemask = read_c0_pagemask();
 	s_entryhi = read_c0_entryhi();
 	s_index = read_c0_index();
-	asid = s_entryhi & 0xff;
+	asid = ASID_MASK(s_entryhi);
 
 	for (i = first; i <= last; i++) {
 		write_c0_index(i);
@@ -85,7 +86,7 @@
 
 			printk("va=%0*lx asid=%02lx\n",
 			       width, (entryhi & ~0x1fffUL),
-			       entryhi & 0xff);
+			       ASID_MASK(entryhi));
 			printk("\t[pa=%0*llx c=%d d=%d v=%d g=%d] ",
 			       width,
 			       (entrylo0 << 6) & PAGE_MASK, c0,
diff --git a/arch/mips/lib/memset.S b/arch/mips/lib/memset.S
index 053d3b0..0580194 100644
--- a/arch/mips/lib/memset.S
+++ b/arch/mips/lib/memset.S
@@ -5,7 +5,8 @@
  *
  * Copyright (C) 1998, 1999, 2000 by Ralf Baechle
  * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
- * Copyright (C) 2007  Maciej W. Rozycki
+ * Copyright (C) 2007 by Maciej W. Rozycki
+ * Copyright (C) 2011, 2012 MIPS Technologies, Inc.
  */
 #include <asm/asm.h>
 #include <asm/asm-offsets.h>
@@ -19,6 +20,20 @@
 #define LONG_S_R sdr
 #endif
 
+#ifdef CONFIG_CPU_MICROMIPS
+#define STORSIZE (LONGSIZE * 2)
+#define STORMASK (STORSIZE - 1)
+#define FILL64RG t8
+#define FILLPTRG t7
+#undef  LONG_S
+#define LONG_S LONG_SP
+#else
+#define STORSIZE LONGSIZE
+#define STORMASK LONGMASK
+#define FILL64RG a1
+#define FILLPTRG t0
+#endif
+
 #define EX(insn,reg,addr,handler)			\
 9:	insn	reg, addr;				\
 	.section __ex_table,"a";			\
@@ -26,23 +41,25 @@
 	.previous
 
 	.macro	f_fill64 dst, offset, val, fixup
-	EX(LONG_S, \val, (\offset +  0 * LONGSIZE)(\dst), \fixup)
-	EX(LONG_S, \val, (\offset +  1 * LONGSIZE)(\dst), \fixup)
-	EX(LONG_S, \val, (\offset +  2 * LONGSIZE)(\dst), \fixup)
-	EX(LONG_S, \val, (\offset +  3 * LONGSIZE)(\dst), \fixup)
-	EX(LONG_S, \val, (\offset +  4 * LONGSIZE)(\dst), \fixup)
-	EX(LONG_S, \val, (\offset +  5 * LONGSIZE)(\dst), \fixup)
-	EX(LONG_S, \val, (\offset +  6 * LONGSIZE)(\dst), \fixup)
-	EX(LONG_S, \val, (\offset +  7 * LONGSIZE)(\dst), \fixup)
-#if LONGSIZE == 4
-	EX(LONG_S, \val, (\offset +  8 * LONGSIZE)(\dst), \fixup)
-	EX(LONG_S, \val, (\offset +  9 * LONGSIZE)(\dst), \fixup)
-	EX(LONG_S, \val, (\offset + 10 * LONGSIZE)(\dst), \fixup)
-	EX(LONG_S, \val, (\offset + 11 * LONGSIZE)(\dst), \fixup)
-	EX(LONG_S, \val, (\offset + 12 * LONGSIZE)(\dst), \fixup)
-	EX(LONG_S, \val, (\offset + 13 * LONGSIZE)(\dst), \fixup)
-	EX(LONG_S, \val, (\offset + 14 * LONGSIZE)(\dst), \fixup)
-	EX(LONG_S, \val, (\offset + 15 * LONGSIZE)(\dst), \fixup)
+	EX(LONG_S, \val, (\offset +  0 * STORSIZE)(\dst), \fixup)
+	EX(LONG_S, \val, (\offset +  1 * STORSIZE)(\dst), \fixup)
+	EX(LONG_S, \val, (\offset +  2 * STORSIZE)(\dst), \fixup)
+	EX(LONG_S, \val, (\offset +  3 * STORSIZE)(\dst), \fixup)
+#if ((defined(CONFIG_CPU_MICROMIPS) && (LONGSIZE == 4)) || !defined(CONFIG_CPU_MICROMIPS))
+	EX(LONG_S, \val, (\offset +  4 * STORSIZE)(\dst), \fixup)
+	EX(LONG_S, \val, (\offset +  5 * STORSIZE)(\dst), \fixup)
+	EX(LONG_S, \val, (\offset +  6 * STORSIZE)(\dst), \fixup)
+	EX(LONG_S, \val, (\offset +  7 * STORSIZE)(\dst), \fixup)
+#endif
+#if (!defined(CONFIG_CPU_MICROMIPS) && (LONGSIZE == 4))
+	EX(LONG_S, \val, (\offset +  8 * STORSIZE)(\dst), \fixup)
+	EX(LONG_S, \val, (\offset +  9 * STORSIZE)(\dst), \fixup)
+	EX(LONG_S, \val, (\offset + 10 * STORSIZE)(\dst), \fixup)
+	EX(LONG_S, \val, (\offset + 11 * STORSIZE)(\dst), \fixup)
+	EX(LONG_S, \val, (\offset + 12 * STORSIZE)(\dst), \fixup)
+	EX(LONG_S, \val, (\offset + 13 * STORSIZE)(\dst), \fixup)
+	EX(LONG_S, \val, (\offset + 14 * STORSIZE)(\dst), \fixup)
+	EX(LONG_S, \val, (\offset + 15 * STORSIZE)(\dst), \fixup)
 #endif
 	.endm
 
@@ -71,16 +88,20 @@
 1:
 
 FEXPORT(__bzero)
-	sltiu		t0, a2, LONGSIZE	/* very small region? */
+	sltiu		t0, a2, STORSIZE	/* very small region? */
 	bnez		t0, .Lsmall_memset
-	 andi		t0, a0, LONGMASK	/* aligned? */
+	 andi		t0, a0, STORMASK	/* aligned? */
 
+#ifdef CONFIG_CPU_MICROMIPS
+	move		t8, a1			/* used by 'swp' instruction */
+	move		t9, a1
+#endif
 #ifndef CONFIG_CPU_DADDI_WORKAROUNDS
 	beqz		t0, 1f
-	 PTR_SUBU	t0, LONGSIZE		/* alignment in bytes */
+	 PTR_SUBU	t0, STORSIZE		/* alignment in bytes */
 #else
 	.set		noat
-	li		AT, LONGSIZE
+	li		AT, STORSIZE
 	beqz		t0, 1f
 	 PTR_SUBU	t0, AT			/* alignment in bytes */
 	.set		at
@@ -99,24 +120,27 @@
 1:	ori		t1, a2, 0x3f		/* # of full blocks */
 	xori		t1, 0x3f
 	beqz		t1, .Lmemset_partial	/* no block to fill */
-	 andi		t0, a2, 0x40-LONGSIZE
+	 andi		t0, a2, 0x40-STORSIZE
 
 	PTR_ADDU	t1, a0			/* end address */
 	.set		reorder
 1:	PTR_ADDIU	a0, 64
 	R10KCBARRIER(0(ra))
-	f_fill64 a0, -64, a1, .Lfwd_fixup
+	f_fill64 a0, -64, FILL64RG, .Lfwd_fixup
 	bne		t1, a0, 1b
 	.set		noreorder
 
 .Lmemset_partial:
 	R10KCBARRIER(0(ra))
 	PTR_LA		t1, 2f			/* where to start */
+#ifdef CONFIG_CPU_MICROMIPS
+	LONG_SRL	t7, t0, 1
+#endif
 #if LONGSIZE == 4
-	PTR_SUBU	t1, t0
+	PTR_SUBU	t1, FILLPTRG
 #else
 	.set		noat
-	LONG_SRL		AT, t0, 1
+	LONG_SRL	AT, FILLPTRG, 1
 	PTR_SUBU	t1, AT
 	.set		at
 #endif
@@ -126,9 +150,9 @@
 	.set		push
 	.set		noreorder
 	.set		nomacro
-	f_fill64 a0, -64, a1, .Lpartial_fixup	/* ... but first do longs ... */
+	f_fill64 a0, -64, FILL64RG, .Lpartial_fixup	/* ... but first do longs ... */
 2:	.set		pop
-	andi		a2, LONGMASK		/* At most one long to go */
+	andi		a2, STORMASK		/* At most one long to go */
 
 	beqz		a2, 1f
 	 PTR_ADDU	a0, a2			/* What's left */
@@ -169,7 +193,7 @@
 
 .Lpartial_fixup:
 	PTR_L		t0, TI_TASK($28)
-	andi		a2, LONGMASK
+	andi		a2, STORMASK
 	LONG_L		t0, THREAD_BUADDR(t0)
 	LONG_ADDU	a2, t1
 	jr		ra
@@ -177,4 +201,4 @@
 
 .Llast_fixup:
 	jr		ra
-	 andi		v1, a2, LONGMASK
+	 andi		v1, a2, STORMASK
diff --git a/arch/mips/lib/mips-atomic.c b/arch/mips/lib/mips-atomic.c
index cd160be..6807f71 100644
--- a/arch/mips/lib/mips-atomic.c
+++ b/arch/mips/lib/mips-atomic.c
@@ -13,6 +13,7 @@
 #include <linux/compiler.h>
 #include <linux/preempt.h>
 #include <linux/export.h>
+#include <linux/stringify.h>
 
 #if !defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_MIPS_MT_SMTC)
 
@@ -34,8 +35,11 @@
  *
  * Workaround: mask EXL bit of the result or place a nop before mfc0.
  */
-__asm__(
-	"	.macro	arch_local_irq_disable\n"
+notrace void arch_local_irq_disable(void)
+{
+	preempt_disable();
+
+	__asm__ __volatile__(
 	"	.set	push						\n"
 	"	.set	noat						\n"
 #ifdef CONFIG_MIPS_MT_SMTC
@@ -52,89 +56,54 @@
 	"	.set	noreorder					\n"
 	"	mtc0	$1,$12						\n"
 #endif
-	"	irq_disable_hazard					\n"
+	"	" __stringify(__irq_disable_hazard) "			\n"
 	"	.set	pop						\n"
-	"	.endm							\n");
+	: /* no outputs */
+	: /* no inputs */
+	: "memory");
 
-notrace void arch_local_irq_disable(void)
-{
-	preempt_disable();
-	__asm__ __volatile__(
-		"arch_local_irq_disable"
-		: /* no outputs */
-		: /* no inputs */
-		: "memory");
 	preempt_enable();
 }
 EXPORT_SYMBOL(arch_local_irq_disable);
 
 
-__asm__(
-	"	.macro	arch_local_irq_save result			\n"
+notrace unsigned long arch_local_irq_save(void)
+{
+	unsigned long flags;
+
+	preempt_disable();
+
+	__asm__ __volatile__(
 	"	.set	push						\n"
 	"	.set	reorder						\n"
 	"	.set	noat						\n"
 #ifdef CONFIG_MIPS_MT_SMTC
-	"	mfc0	\\result, $2, 1					\n"
-	"	ori	$1, \\result, 0x400				\n"
+	"	mfc0	%[flags], $2, 1				\n"
+	"	ori	$1, %[flags], 0x400				\n"
 	"	.set	noreorder					\n"
 	"	mtc0	$1, $2, 1					\n"
-	"	andi	\\result, \\result, 0x400			\n"
+	"	andi	%[flags], %[flags], 0x400			\n"
 #elif defined(CONFIG_CPU_MIPSR2)
 	/* see irqflags.h for inline function */
 #else
-	"	mfc0	\\result, $12					\n"
-	"	ori	$1, \\result, 0x1f				\n"
+	"	mfc0	%[flags], $12					\n"
+	"	ori	$1, %[flags], 0x1f				\n"
 	"	xori	$1, 0x1f					\n"
 	"	.set	noreorder					\n"
 	"	mtc0	$1, $12						\n"
 #endif
-	"	irq_disable_hazard					\n"
+	"	" __stringify(__irq_disable_hazard) "			\n"
 	"	.set	pop						\n"
-	"	.endm							\n");
+	: [flags] "=r" (flags)
+	: /* no inputs */
+	: "memory");
 
-notrace unsigned long arch_local_irq_save(void)
-{
-	unsigned long flags;
-	preempt_disable();
-	asm volatile("arch_local_irq_save\t%0"
-		     : "=r" (flags)
-		     : /* no inputs */
-		     : "memory");
 	preempt_enable();
+
 	return flags;
 }
 EXPORT_SYMBOL(arch_local_irq_save);
 
-
-__asm__(
-	"	.macro	arch_local_irq_restore flags			\n"
-	"	.set	push						\n"
-	"	.set	noreorder					\n"
-	"	.set	noat						\n"
-#ifdef CONFIG_MIPS_MT_SMTC
-	"mfc0	$1, $2, 1						\n"
-	"andi	\\flags, 0x400						\n"
-	"ori	$1, 0x400						\n"
-	"xori	$1, 0x400						\n"
-	"or	\\flags, $1						\n"
-	"mtc0	\\flags, $2, 1						\n"
-#elif defined(CONFIG_CPU_MIPSR2) && defined(CONFIG_IRQ_CPU)
-	/* see irqflags.h for inline function */
-#elif defined(CONFIG_CPU_MIPSR2)
-	/* see irqflags.h for inline function */
-#else
-	"	mfc0	$1, $12						\n"
-	"	andi	\\flags, 1					\n"
-	"	ori	$1, 0x1f					\n"
-	"	xori	$1, 0x1f					\n"
-	"	or	\\flags, $1					\n"
-	"	mtc0	\\flags, $12					\n"
-#endif
-	"	irq_disable_hazard					\n"
-	"	.set	pop						\n"
-	"	.endm							\n");
-
 notrace void arch_local_irq_restore(unsigned long flags)
 {
 	unsigned long __tmp1;
@@ -149,11 +118,36 @@
 		smtc_ipi_replay();
 #endif
 	preempt_disable();
+
 	__asm__ __volatile__(
-		"arch_local_irq_restore\t%0"
-		: "=r" (__tmp1)
-		: "0" (flags)
-		: "memory");
+	"	.set	push						\n"
+	"	.set	noreorder					\n"
+	"	.set	noat						\n"
+#ifdef CONFIG_MIPS_MT_SMTC
+	"	mfc0	$1, $2, 1					\n"
+	"	andi	%[flags], 0x400					\n"
+	"	ori	$1, 0x400					\n"
+	"	xori	$1, 0x400					\n"
+	"	or	%[flags], $1					\n"
+	"	mtc0	%[flags], $2, 1					\n"
+#elif defined(CONFIG_CPU_MIPSR2) && defined(CONFIG_IRQ_CPU)
+	/* see irqflags.h for inline function */
+#elif defined(CONFIG_CPU_MIPSR2)
+	/* see irqflags.h for inline function */
+#else
+	"	mfc0	$1, $12						\n"
+	"	andi	%[flags], 1					\n"
+	"	ori	$1, 0x1f					\n"
+	"	xori	$1, 0x1f					\n"
+	"	or	%[flags], $1					\n"
+	"	mtc0	%[flags], $12					\n"
+#endif
+	"	" __stringify(__irq_disable_hazard) "			\n"
+	"	.set	pop						\n"
+	: [flags] "=r" (__tmp1)
+	: "0" (flags)
+	: "memory");
+
 	preempt_enable();
 }
 EXPORT_SYMBOL(arch_local_irq_restore);
@@ -164,11 +158,36 @@
 	unsigned long __tmp1;
 
 	preempt_disable();
+
 	__asm__ __volatile__(
-		"arch_local_irq_restore\t%0"
-		: "=r" (__tmp1)
-		: "0" (flags)
-		: "memory");
+	"	.set	push						\n"
+	"	.set	noreorder					\n"
+	"	.set	noat						\n"
+#ifdef CONFIG_MIPS_MT_SMTC
+	"	mfc0	$1, $2, 1					\n"
+	"	andi	%[flags], 0x400					\n"
+	"	ori	$1, 0x400					\n"
+	"	xori	$1, 0x400					\n"
+	"	or	%[flags], $1					\n"
+	"	mtc0	%[flags], $2, 1					\n"
+#elif defined(CONFIG_CPU_MIPSR2) && defined(CONFIG_IRQ_CPU)
+	/* see irqflags.h for inline function */
+#elif defined(CONFIG_CPU_MIPSR2)
+	/* see irqflags.h for inline function */
+#else
+	"	mfc0	$1, $12						\n"
+	"	andi	%[flags], 1					\n"
+	"	ori	$1, 0x1f					\n"
+	"	xori	$1, 0x1f					\n"
+	"	or	%[flags], $1					\n"
+	"	mtc0	%[flags], $12					\n"
+#endif
+	"	" __stringify(__irq_disable_hazard) "			\n"
+	"	.set	pop						\n"
+	: [flags] "=r" (__tmp1)
+	: "0" (flags)
+	: "memory");
+
 	preempt_enable();
 }
 EXPORT_SYMBOL(__arch_local_irq_restore);
diff --git a/arch/mips/lib/r3k_dump_tlb.c b/arch/mips/lib/r3k_dump_tlb.c
index 91615c2..8327698 100644
--- a/arch/mips/lib/r3k_dump_tlb.c
+++ b/arch/mips/lib/r3k_dump_tlb.c
@@ -9,6 +9,7 @@
 #include <linux/mm.h>
 
 #include <asm/mipsregs.h>
+#include <asm/mmu_context.h>
 #include <asm/page.h>
 #include <asm/pgtable.h>
 #include <asm/tlbdebug.h>
@@ -21,7 +22,7 @@
 	unsigned int asid;
 	unsigned long entryhi, entrylo0;
 
-	asid = read_c0_entryhi() & 0xfc0;
+	asid = ASID_MASK(read_c0_entryhi());
 
 	for (i = first; i <= last; i++) {
 		write_c0_index(i<<8);
@@ -35,7 +36,7 @@
 
 		/* Unused entries have a virtual address of KSEG0.  */
 		if ((entryhi & 0xffffe000) != 0x80000000
-		    && (entryhi & 0xfc0) == asid) {
+		    && (ASID_MASK(entryhi) == asid)) {
 			/*
 			 * Only print entries in use
 			 */
@@ -44,7 +45,7 @@
 			printk("va=%08lx asid=%08lx"
 			       "  [pa=%06lx n=%d d=%d v=%d g=%d]",
 			       (entryhi & 0xffffe000),
-			       entryhi & 0xfc0,
+			       ASID_MASK(entryhi),
 			       entrylo0 & PAGE_MASK,
 			       (entrylo0 & (1 << 11)) ? 1 : 0,
 			       (entrylo0 & (1 << 10)) ? 1 : 0,
diff --git a/arch/mips/lib/strlen_user.S b/arch/mips/lib/strlen_user.S
index fdbb970..e362dcd 100644
--- a/arch/mips/lib/strlen_user.S
+++ b/arch/mips/lib/strlen_user.S
@@ -3,8 +3,9 @@
  * License.  See the file "COPYING" in the main directory of this archive
  * for more details.
  *
- * Copyright (c) 1996, 1998, 1999, 2004 by Ralf Baechle
- * Copyright (c) 1999 Silicon Graphics, Inc.
+ * Copyright (C) 1996, 1998, 1999, 2004 by Ralf Baechle
+ * Copyright (C) 1999 Silicon Graphics, Inc.
+ * Copyright (C) 2011 MIPS Technologies, Inc.
  */
 #include <asm/asm.h>
 #include <asm/asm-offsets.h>
@@ -28,9 +29,9 @@
 
 FEXPORT(__strlen_user_nocheck_asm)
 	move		v0, a0
-1:	EX(lb, t0, (v0), .Lfault)
+1:	EX(lbu, v1, (v0), .Lfault)
 	PTR_ADDIU	v0, 1
-	bnez		t0, 1b
+	bnez		v1, 1b
 	PTR_SUBU	v0, a0
 	jr		ra
 	END(__strlen_user_asm)
diff --git a/arch/mips/lib/strncpy_user.S b/arch/mips/lib/strncpy_user.S
index bad5394..92870b6 100644
--- a/arch/mips/lib/strncpy_user.S
+++ b/arch/mips/lib/strncpy_user.S
@@ -3,7 +3,8 @@
  * License.  See the file "COPYING" in the main directory of this archive
  * for more details.
  *
- * Copyright (c) 1996, 1999 by Ralf Baechle
+ * Copyright (C) 1996, 1999 by Ralf Baechle
+ * Copyright (C) 2011 MIPS Technologies, Inc.
  */
 #include <linux/errno.h>
 #include <asm/asm.h>
@@ -33,26 +34,27 @@
 	bnez		v0, .Lfault
 
 FEXPORT(__strncpy_from_user_nocheck_asm)
-	move		v0, zero
-	move		v1, a1
 	.set		noreorder
-1:	EX(lbu, t0, (v1), .Lfault)
+	move		t0, zero
+	move		v1, a1
+1:	EX(lbu, v0, (v1), .Lfault)
 	PTR_ADDIU	v1, 1
 	R10KCBARRIER(0(ra))
-	beqz		t0, 2f
-	 sb		t0, (a0)
-	PTR_ADDIU	v0, 1
-	.set		reorder
-	PTR_ADDIU	a0, 1
-	bne		v0, a2, 1b
-2:	PTR_ADDU	t0, a1, v0
-	xor		t0, a1
-	bltz		t0, .Lfault
+	beqz		v0, 2f
+	 sb		v0, (a0)
+	PTR_ADDIU	t0, 1
+	bne		t0, a2, 1b
+	 PTR_ADDIU	a0, 1
+2:	PTR_ADDU	v0, a1, t0
+	xor		v0, a1
+	bltz		v0, .Lfault
+	 nop
 	jr		ra			# return n
+	 move		v0, t0
 	END(__strncpy_from_user_asm)
 
-.Lfault:	li		v0, -EFAULT
-	jr		ra
+.Lfault: jr		ra
+	  li		v0, -EFAULT
 
 	.section	__ex_table,"a"
 	PTR		1b, .Lfault
diff --git a/arch/mips/lib/strnlen_user.S b/arch/mips/lib/strnlen_user.S
index beea03c..fcacea5 100644
--- a/arch/mips/lib/strnlen_user.S
+++ b/arch/mips/lib/strnlen_user.S
@@ -35,7 +35,7 @@
 	PTR_ADDU	a1, a0			# stop pointer
 1:	beq		v0, a1, 1f		# limit reached?
 	EX(lb, t0, (v0), .Lfault)
-	PTR_ADDU	v0, 1
+	PTR_ADDIU	v0, 1
 	bnez		t0, 1b
 1:	PTR_SUBU	v0, a0
 	jr		ra
diff --git a/arch/mips/loongson/common/Makefile b/arch/mips/loongson/common/Makefile
index e526488..4c57b3e 100644
--- a/arch/mips/loongson/common/Makefile
+++ b/arch/mips/loongson/common/Makefile
@@ -4,7 +4,7 @@
 
 obj-y += setup.o init.o cmdline.o env.o time.o reset.o irq.o \
     pci.o bonito-irq.o mem.o machtype.o platform.o
-obj-$(CONFIG_GENERIC_GPIO) += gpio.o
+obj-$(CONFIG_GPIOLIB) += gpio.o
 
 #
 # Serial port support
diff --git a/arch/mips/math-emu/cp1emu.c b/arch/mips/math-emu/cp1emu.c
index afb5a0b..f03771900 100644
--- a/arch/mips/math-emu/cp1emu.c
+++ b/arch/mips/math-emu/cp1emu.c
@@ -45,6 +45,7 @@
 #include <asm/signal.h>
 #include <asm/mipsregs.h>
 #include <asm/fpu_emulator.h>
+#include <asm/fpu.h>
 #include <asm/uaccess.h>
 #include <asm/branch.h>
 
@@ -81,6 +82,11 @@
 /* Determine rounding mode from the RM bits of the FCSR */
 #define modeindex(v) ((v) & FPU_CSR_RM)
 
+/* microMIPS bitfields */
+#define MM_POOL32A_MINOR_MASK	0x3f
+#define MM_POOL32A_MINOR_SHIFT	0x6
+#define MM_MIPS32_COND_FC	0x30
+
 /* Convert Mips rounding mode (0..3) to IEEE library modes. */
 static const unsigned char ieee_rm[4] = {
 	[FPU_CSR_RN] = IEEE754_RN,
@@ -110,6 +116,556 @@
 };
 #endif
 
+/* (microMIPS) Convert 16-bit register encoding to 32-bit register encoding. */
+static const unsigned int reg16to32map[8] = {16, 17, 2, 3, 4, 5, 6, 7};
+
+/* (microMIPS) Convert certain microMIPS instructions to MIPS32 format. */
+static const int sd_format[] = {16, 17, 0, 0, 0, 0, 0, 0};
+static const int sdps_format[] = {16, 17, 22, 0, 0, 0, 0, 0};
+static const int dwl_format[] = {17, 20, 21, 0, 0, 0, 0, 0};
+static const int swl_format[] = {16, 20, 21, 0, 0, 0, 0, 0};
+
+/*
+ * This functions translates a 32-bit microMIPS instruction
+ * into a 32-bit MIPS32 instruction. Returns 0 on success
+ * and SIGILL otherwise.
+ */
+static int microMIPS32_to_MIPS32(union mips_instruction *insn_ptr)
+{
+	union mips_instruction insn = *insn_ptr;
+	union mips_instruction mips32_insn = insn;
+	int func, fmt, op;
+
+	switch (insn.mm_i_format.opcode) {
+	case mm_ldc132_op:
+		mips32_insn.mm_i_format.opcode = ldc1_op;
+		mips32_insn.mm_i_format.rt = insn.mm_i_format.rs;
+		mips32_insn.mm_i_format.rs = insn.mm_i_format.rt;
+		break;
+	case mm_lwc132_op:
+		mips32_insn.mm_i_format.opcode = lwc1_op;
+		mips32_insn.mm_i_format.rt = insn.mm_i_format.rs;
+		mips32_insn.mm_i_format.rs = insn.mm_i_format.rt;
+		break;
+	case mm_sdc132_op:
+		mips32_insn.mm_i_format.opcode = sdc1_op;
+		mips32_insn.mm_i_format.rt = insn.mm_i_format.rs;
+		mips32_insn.mm_i_format.rs = insn.mm_i_format.rt;
+		break;
+	case mm_swc132_op:
+		mips32_insn.mm_i_format.opcode = swc1_op;
+		mips32_insn.mm_i_format.rt = insn.mm_i_format.rs;
+		mips32_insn.mm_i_format.rs = insn.mm_i_format.rt;
+		break;
+	case mm_pool32i_op:
+		/* NOTE: offset is << by 1 if in microMIPS mode. */
+		if ((insn.mm_i_format.rt == mm_bc1f_op) ||
+		    (insn.mm_i_format.rt == mm_bc1t_op)) {
+			mips32_insn.fb_format.opcode = cop1_op;
+			mips32_insn.fb_format.bc = bc_op;
+			mips32_insn.fb_format.flag =
+				(insn.mm_i_format.rt == mm_bc1t_op) ? 1 : 0;
+		} else
+			return SIGILL;
+		break;
+	case mm_pool32f_op:
+		switch (insn.mm_fp0_format.func) {
+		case mm_32f_01_op:
+		case mm_32f_11_op:
+		case mm_32f_02_op:
+		case mm_32f_12_op:
+		case mm_32f_41_op:
+		case mm_32f_51_op:
+		case mm_32f_42_op:
+		case mm_32f_52_op:
+			op = insn.mm_fp0_format.func;
+			if (op == mm_32f_01_op)
+				func = madd_s_op;
+			else if (op == mm_32f_11_op)
+				func = madd_d_op;
+			else if (op == mm_32f_02_op)
+				func = nmadd_s_op;
+			else if (op == mm_32f_12_op)
+				func = nmadd_d_op;
+			else if (op == mm_32f_41_op)
+				func = msub_s_op;
+			else if (op == mm_32f_51_op)
+				func = msub_d_op;
+			else if (op == mm_32f_42_op)
+				func = nmsub_s_op;
+			else
+				func = nmsub_d_op;
+			mips32_insn.fp6_format.opcode = cop1x_op;
+			mips32_insn.fp6_format.fr = insn.mm_fp6_format.fr;
+			mips32_insn.fp6_format.ft = insn.mm_fp6_format.ft;
+			mips32_insn.fp6_format.fs = insn.mm_fp6_format.fs;
+			mips32_insn.fp6_format.fd = insn.mm_fp6_format.fd;
+			mips32_insn.fp6_format.func = func;
+			break;
+		case mm_32f_10_op:
+			func = -1;	/* Invalid */
+			op = insn.mm_fp5_format.op & 0x7;
+			if (op == mm_ldxc1_op)
+				func = ldxc1_op;
+			else if (op == mm_sdxc1_op)
+				func = sdxc1_op;
+			else if (op == mm_lwxc1_op)
+				func = lwxc1_op;
+			else if (op == mm_swxc1_op)
+				func = swxc1_op;
+
+			if (func != -1) {
+				mips32_insn.r_format.opcode = cop1x_op;
+				mips32_insn.r_format.rs =
+					insn.mm_fp5_format.base;
+				mips32_insn.r_format.rt =
+					insn.mm_fp5_format.index;
+				mips32_insn.r_format.rd = 0;
+				mips32_insn.r_format.re = insn.mm_fp5_format.fd;
+				mips32_insn.r_format.func = func;
+			} else
+				return SIGILL;
+			break;
+		case mm_32f_40_op:
+			op = -1;	/* Invalid */
+			if (insn.mm_fp2_format.op == mm_fmovt_op)
+				op = 1;
+			else if (insn.mm_fp2_format.op == mm_fmovf_op)
+				op = 0;
+			if (op != -1) {
+				mips32_insn.fp0_format.opcode = cop1_op;
+				mips32_insn.fp0_format.fmt =
+					sdps_format[insn.mm_fp2_format.fmt];
+				mips32_insn.fp0_format.ft =
+					(insn.mm_fp2_format.cc<<2) + op;
+				mips32_insn.fp0_format.fs =
+					insn.mm_fp2_format.fs;
+				mips32_insn.fp0_format.fd =
+					insn.mm_fp2_format.fd;
+				mips32_insn.fp0_format.func = fmovc_op;
+			} else
+				return SIGILL;
+			break;
+		case mm_32f_60_op:
+			func = -1;	/* Invalid */
+			if (insn.mm_fp0_format.op == mm_fadd_op)
+				func = fadd_op;
+			else if (insn.mm_fp0_format.op == mm_fsub_op)
+				func = fsub_op;
+			else if (insn.mm_fp0_format.op == mm_fmul_op)
+				func = fmul_op;
+			else if (insn.mm_fp0_format.op == mm_fdiv_op)
+				func = fdiv_op;
+			if (func != -1) {
+				mips32_insn.fp0_format.opcode = cop1_op;
+				mips32_insn.fp0_format.fmt =
+					sdps_format[insn.mm_fp0_format.fmt];
+				mips32_insn.fp0_format.ft =
+					insn.mm_fp0_format.ft;
+				mips32_insn.fp0_format.fs =
+					insn.mm_fp0_format.fs;
+				mips32_insn.fp0_format.fd =
+					insn.mm_fp0_format.fd;
+				mips32_insn.fp0_format.func = func;
+			} else
+				return SIGILL;
+			break;
+		case mm_32f_70_op:
+			func = -1;	/* Invalid */
+			if (insn.mm_fp0_format.op == mm_fmovn_op)
+				func = fmovn_op;
+			else if (insn.mm_fp0_format.op == mm_fmovz_op)
+				func = fmovz_op;
+			if (func != -1) {
+				mips32_insn.fp0_format.opcode = cop1_op;
+				mips32_insn.fp0_format.fmt =
+					sdps_format[insn.mm_fp0_format.fmt];
+				mips32_insn.fp0_format.ft =
+					insn.mm_fp0_format.ft;
+				mips32_insn.fp0_format.fs =
+					insn.mm_fp0_format.fs;
+				mips32_insn.fp0_format.fd =
+					insn.mm_fp0_format.fd;
+				mips32_insn.fp0_format.func = func;
+			} else
+				return SIGILL;
+			break;
+		case mm_32f_73_op:    /* POOL32FXF */
+			switch (insn.mm_fp1_format.op) {
+			case mm_movf0_op:
+			case mm_movf1_op:
+			case mm_movt0_op:
+			case mm_movt1_op:
+				if ((insn.mm_fp1_format.op & 0x7f) ==
+				    mm_movf0_op)
+					op = 0;
+				else
+					op = 1;
+				mips32_insn.r_format.opcode = spec_op;
+				mips32_insn.r_format.rs = insn.mm_fp4_format.fs;
+				mips32_insn.r_format.rt =
+					(insn.mm_fp4_format.cc << 2) + op;
+				mips32_insn.r_format.rd = insn.mm_fp4_format.rt;
+				mips32_insn.r_format.re = 0;
+				mips32_insn.r_format.func = movc_op;
+				break;
+			case mm_fcvtd0_op:
+			case mm_fcvtd1_op:
+			case mm_fcvts0_op:
+			case mm_fcvts1_op:
+				if ((insn.mm_fp1_format.op & 0x7f) ==
+				    mm_fcvtd0_op) {
+					func = fcvtd_op;
+					fmt = swl_format[insn.mm_fp3_format.fmt];
+				} else {
+					func = fcvts_op;
+					fmt = dwl_format[insn.mm_fp3_format.fmt];
+				}
+				mips32_insn.fp0_format.opcode = cop1_op;
+				mips32_insn.fp0_format.fmt = fmt;
+				mips32_insn.fp0_format.ft = 0;
+				mips32_insn.fp0_format.fs =
+					insn.mm_fp3_format.fs;
+				mips32_insn.fp0_format.fd =
+					insn.mm_fp3_format.rt;
+				mips32_insn.fp0_format.func = func;
+				break;
+			case mm_fmov0_op:
+			case mm_fmov1_op:
+			case mm_fabs0_op:
+			case mm_fabs1_op:
+			case mm_fneg0_op:
+			case mm_fneg1_op:
+				if ((insn.mm_fp1_format.op & 0x7f) ==
+				    mm_fmov0_op)
+					func = fmov_op;
+				else if ((insn.mm_fp1_format.op & 0x7f) ==
+					 mm_fabs0_op)
+					func = fabs_op;
+				else
+					func = fneg_op;
+				mips32_insn.fp0_format.opcode = cop1_op;
+				mips32_insn.fp0_format.fmt =
+					sdps_format[insn.mm_fp3_format.fmt];
+				mips32_insn.fp0_format.ft = 0;
+				mips32_insn.fp0_format.fs =
+					insn.mm_fp3_format.fs;
+				mips32_insn.fp0_format.fd =
+					insn.mm_fp3_format.rt;
+				mips32_insn.fp0_format.func = func;
+				break;
+			case mm_ffloorl_op:
+			case mm_ffloorw_op:
+			case mm_fceill_op:
+			case mm_fceilw_op:
+			case mm_ftruncl_op:
+			case mm_ftruncw_op:
+			case mm_froundl_op:
+			case mm_froundw_op:
+			case mm_fcvtl_op:
+			case mm_fcvtw_op:
+				if (insn.mm_fp1_format.op == mm_ffloorl_op)
+					func = ffloorl_op;
+				else if (insn.mm_fp1_format.op == mm_ffloorw_op)
+					func = ffloor_op;
+				else if (insn.mm_fp1_format.op == mm_fceill_op)
+					func = fceill_op;
+				else if (insn.mm_fp1_format.op == mm_fceilw_op)
+					func = fceil_op;
+				else if (insn.mm_fp1_format.op == mm_ftruncl_op)
+					func = ftruncl_op;
+				else if (insn.mm_fp1_format.op == mm_ftruncw_op)
+					func = ftrunc_op;
+				else if (insn.mm_fp1_format.op == mm_froundl_op)
+					func = froundl_op;
+				else if (insn.mm_fp1_format.op == mm_froundw_op)
+					func = fround_op;
+				else if (insn.mm_fp1_format.op == mm_fcvtl_op)
+					func = fcvtl_op;
+				else
+					func = fcvtw_op;
+				mips32_insn.fp0_format.opcode = cop1_op;
+				mips32_insn.fp0_format.fmt =
+					sd_format[insn.mm_fp1_format.fmt];
+				mips32_insn.fp0_format.ft = 0;
+				mips32_insn.fp0_format.fs =
+					insn.mm_fp1_format.fs;
+				mips32_insn.fp0_format.fd =
+					insn.mm_fp1_format.rt;
+				mips32_insn.fp0_format.func = func;
+				break;
+			case mm_frsqrt_op:
+			case mm_fsqrt_op:
+			case mm_frecip_op:
+				if (insn.mm_fp1_format.op == mm_frsqrt_op)
+					func = frsqrt_op;
+				else if (insn.mm_fp1_format.op == mm_fsqrt_op)
+					func = fsqrt_op;
+				else
+					func = frecip_op;
+				mips32_insn.fp0_format.opcode = cop1_op;
+				mips32_insn.fp0_format.fmt =
+					sdps_format[insn.mm_fp1_format.fmt];
+				mips32_insn.fp0_format.ft = 0;
+				mips32_insn.fp0_format.fs =
+					insn.mm_fp1_format.fs;
+				mips32_insn.fp0_format.fd =
+					insn.mm_fp1_format.rt;
+				mips32_insn.fp0_format.func = func;
+				break;
+			case mm_mfc1_op:
+			case mm_mtc1_op:
+			case mm_cfc1_op:
+			case mm_ctc1_op:
+				if (insn.mm_fp1_format.op == mm_mfc1_op)
+					op = mfc_op;
+				else if (insn.mm_fp1_format.op == mm_mtc1_op)
+					op = mtc_op;
+				else if (insn.mm_fp1_format.op == mm_cfc1_op)
+					op = cfc_op;
+				else
+					op = ctc_op;
+				mips32_insn.fp1_format.opcode = cop1_op;
+				mips32_insn.fp1_format.op = op;
+				mips32_insn.fp1_format.rt =
+					insn.mm_fp1_format.rt;
+				mips32_insn.fp1_format.fs =
+					insn.mm_fp1_format.fs;
+				mips32_insn.fp1_format.fd = 0;
+				mips32_insn.fp1_format.func = 0;
+				break;
+			default:
+				return SIGILL;
+				break;
+			}
+			break;
+		case mm_32f_74_op:	/* c.cond.fmt */
+			mips32_insn.fp0_format.opcode = cop1_op;
+			mips32_insn.fp0_format.fmt =
+				sdps_format[insn.mm_fp4_format.fmt];
+			mips32_insn.fp0_format.ft = insn.mm_fp4_format.rt;
+			mips32_insn.fp0_format.fs = insn.mm_fp4_format.fs;
+			mips32_insn.fp0_format.fd = insn.mm_fp4_format.cc << 2;
+			mips32_insn.fp0_format.func =
+				insn.mm_fp4_format.cond | MM_MIPS32_COND_FC;
+			break;
+		default:
+			return SIGILL;
+			break;
+		}
+		break;
+	default:
+		return SIGILL;
+		break;
+	}
+
+	*insn_ptr = mips32_insn;
+	return 0;
+}
+
+int mm_isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
+		     unsigned long *contpc)
+{
+	union mips_instruction insn = (union mips_instruction)dec_insn.insn;
+	int bc_false = 0;
+	unsigned int fcr31;
+	unsigned int bit;
+
+	switch (insn.mm_i_format.opcode) {
+	case mm_pool32a_op:
+		if ((insn.mm_i_format.simmediate & MM_POOL32A_MINOR_MASK) ==
+		    mm_pool32axf_op) {
+			switch (insn.mm_i_format.simmediate >>
+				MM_POOL32A_MINOR_SHIFT) {
+			case mm_jalr_op:
+			case mm_jalrhb_op:
+			case mm_jalrs_op:
+			case mm_jalrshb_op:
+				if (insn.mm_i_format.rt != 0)	/* Not mm_jr */
+					regs->regs[insn.mm_i_format.rt] =
+						regs->cp0_epc +
+						dec_insn.pc_inc +
+						dec_insn.next_pc_inc;
+				*contpc = regs->regs[insn.mm_i_format.rs];
+				return 1;
+				break;
+			}
+		}
+		break;
+	case mm_pool32i_op:
+		switch (insn.mm_i_format.rt) {
+		case mm_bltzals_op:
+		case mm_bltzal_op:
+			regs->regs[31] = regs->cp0_epc +
+				dec_insn.pc_inc +
+				dec_insn.next_pc_inc;
+			/* Fall through */
+		case mm_bltz_op:
+			if ((long)regs->regs[insn.mm_i_format.rs] < 0)
+				*contpc = regs->cp0_epc +
+					dec_insn.pc_inc +
+					(insn.mm_i_format.simmediate << 1);
+			else
+				*contpc = regs->cp0_epc +
+					dec_insn.pc_inc +
+					dec_insn.next_pc_inc;
+			return 1;
+			break;
+		case mm_bgezals_op:
+		case mm_bgezal_op:
+			regs->regs[31] = regs->cp0_epc +
+					dec_insn.pc_inc +
+					dec_insn.next_pc_inc;
+			/* Fall through */
+		case mm_bgez_op:
+			if ((long)regs->regs[insn.mm_i_format.rs] >= 0)
+				*contpc = regs->cp0_epc +
+					dec_insn.pc_inc +
+					(insn.mm_i_format.simmediate << 1);
+			else
+				*contpc = regs->cp0_epc +
+					dec_insn.pc_inc +
+					dec_insn.next_pc_inc;
+			return 1;
+			break;
+		case mm_blez_op:
+			if ((long)regs->regs[insn.mm_i_format.rs] <= 0)
+				*contpc = regs->cp0_epc +
+					dec_insn.pc_inc +
+					(insn.mm_i_format.simmediate << 1);
+			else
+				*contpc = regs->cp0_epc +
+					dec_insn.pc_inc +
+					dec_insn.next_pc_inc;
+			return 1;
+			break;
+		case mm_bgtz_op:
+			if ((long)regs->regs[insn.mm_i_format.rs] <= 0)
+				*contpc = regs->cp0_epc +
+					dec_insn.pc_inc +
+					(insn.mm_i_format.simmediate << 1);
+			else
+				*contpc = regs->cp0_epc +
+					dec_insn.pc_inc +
+					dec_insn.next_pc_inc;
+			return 1;
+			break;
+		case mm_bc2f_op:
+		case mm_bc1f_op:
+			bc_false = 1;
+			/* Fall through */
+		case mm_bc2t_op:
+		case mm_bc1t_op:
+			preempt_disable();
+			if (is_fpu_owner())
+				asm volatile("cfc1\t%0,$31" : "=r" (fcr31));
+			else
+				fcr31 = current->thread.fpu.fcr31;
+			preempt_enable();
+
+			if (bc_false)
+				fcr31 = ~fcr31;
+
+			bit = (insn.mm_i_format.rs >> 2);
+			bit += (bit != 0);
+			bit += 23;
+			if (fcr31 & (1 << bit))
+				*contpc = regs->cp0_epc +
+					dec_insn.pc_inc +
+					(insn.mm_i_format.simmediate << 1);
+			else
+				*contpc = regs->cp0_epc +
+					dec_insn.pc_inc + dec_insn.next_pc_inc;
+			return 1;
+			break;
+		}
+		break;
+	case mm_pool16c_op:
+		switch (insn.mm_i_format.rt) {
+		case mm_jalr16_op:
+		case mm_jalrs16_op:
+			regs->regs[31] = regs->cp0_epc +
+				dec_insn.pc_inc + dec_insn.next_pc_inc;
+			/* Fall through */
+		case mm_jr16_op:
+			*contpc = regs->regs[insn.mm_i_format.rs];
+			return 1;
+			break;
+		}
+		break;
+	case mm_beqz16_op:
+		if ((long)regs->regs[reg16to32map[insn.mm_b1_format.rs]] == 0)
+			*contpc = regs->cp0_epc +
+				dec_insn.pc_inc +
+				(insn.mm_b1_format.simmediate << 1);
+		else
+			*contpc = regs->cp0_epc +
+				dec_insn.pc_inc + dec_insn.next_pc_inc;
+		return 1;
+		break;
+	case mm_bnez16_op:
+		if ((long)regs->regs[reg16to32map[insn.mm_b1_format.rs]] != 0)
+			*contpc = regs->cp0_epc +
+				dec_insn.pc_inc +
+				(insn.mm_b1_format.simmediate << 1);
+		else
+			*contpc = regs->cp0_epc +
+				dec_insn.pc_inc + dec_insn.next_pc_inc;
+		return 1;
+		break;
+	case mm_b16_op:
+		*contpc = regs->cp0_epc + dec_insn.pc_inc +
+			 (insn.mm_b0_format.simmediate << 1);
+		return 1;
+		break;
+	case mm_beq32_op:
+		if (regs->regs[insn.mm_i_format.rs] ==
+		    regs->regs[insn.mm_i_format.rt])
+			*contpc = regs->cp0_epc +
+				dec_insn.pc_inc +
+				(insn.mm_i_format.simmediate << 1);
+		else
+			*contpc = regs->cp0_epc +
+				dec_insn.pc_inc +
+				dec_insn.next_pc_inc;
+		return 1;
+		break;
+	case mm_bne32_op:
+		if (regs->regs[insn.mm_i_format.rs] !=
+		    regs->regs[insn.mm_i_format.rt])
+			*contpc = regs->cp0_epc +
+				dec_insn.pc_inc +
+				(insn.mm_i_format.simmediate << 1);
+		else
+			*contpc = regs->cp0_epc +
+				dec_insn.pc_inc + dec_insn.next_pc_inc;
+		return 1;
+		break;
+	case mm_jalx32_op:
+		regs->regs[31] = regs->cp0_epc +
+			dec_insn.pc_inc + dec_insn.next_pc_inc;
+		*contpc = regs->cp0_epc + dec_insn.pc_inc;
+		*contpc >>= 28;
+		*contpc <<= 28;
+		*contpc |= (insn.j_format.target << 2);
+		return 1;
+		break;
+	case mm_jals32_op:
+	case mm_jal32_op:
+		regs->regs[31] = regs->cp0_epc +
+			dec_insn.pc_inc + dec_insn.next_pc_inc;
+		/* Fall through */
+	case mm_j32_op:
+		*contpc = regs->cp0_epc + dec_insn.pc_inc;
+		*contpc >>= 27;
+		*contpc <<= 27;
+		*contpc |= (insn.j_format.target << 1);
+		set_isa16_mode(*contpc);
+		return 1;
+		break;
+	}
+	return 0;
+}
 
 /*
  * Redundant with logic already in kernel/branch.c,
@@ -117,53 +673,177 @@
  * a single subroutine should be used across both
  * modules.
  */
-static int isBranchInstr(mips_instruction * i)
+static int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
+			 unsigned long *contpc)
 {
-	switch (MIPSInst_OPCODE(*i)) {
+	union mips_instruction insn = (union mips_instruction)dec_insn.insn;
+	unsigned int fcr31;
+	unsigned int bit = 0;
+
+	switch (insn.i_format.opcode) {
 	case spec_op:
-		switch (MIPSInst_FUNC(*i)) {
+		switch (insn.r_format.func) {
 		case jalr_op:
+			regs->regs[insn.r_format.rd] =
+				regs->cp0_epc + dec_insn.pc_inc +
+				dec_insn.next_pc_inc;
+			/* Fall through */
 		case jr_op:
+			*contpc = regs->regs[insn.r_format.rs];
 			return 1;
+			break;
 		}
 		break;
-
 	case bcond_op:
-		switch (MIPSInst_RT(*i)) {
-		case bltz_op:
-		case bgez_op:
-		case bltzl_op:
-		case bgezl_op:
+		switch (insn.i_format.rt) {
 		case bltzal_op:
-		case bgezal_op:
 		case bltzall_op:
-		case bgezall_op:
+			regs->regs[31] = regs->cp0_epc +
+				dec_insn.pc_inc +
+				dec_insn.next_pc_inc;
+			/* Fall through */
+		case bltz_op:
+		case bltzl_op:
+			if ((long)regs->regs[insn.i_format.rs] < 0)
+				*contpc = regs->cp0_epc +
+					dec_insn.pc_inc +
+					(insn.i_format.simmediate << 2);
+			else
+				*contpc = regs->cp0_epc +
+					dec_insn.pc_inc +
+					dec_insn.next_pc_inc;
 			return 1;
+			break;
+		case bgezal_op:
+		case bgezall_op:
+			regs->regs[31] = regs->cp0_epc +
+				dec_insn.pc_inc +
+				dec_insn.next_pc_inc;
+			/* Fall through */
+		case bgez_op:
+		case bgezl_op:
+			if ((long)regs->regs[insn.i_format.rs] >= 0)
+				*contpc = regs->cp0_epc +
+					dec_insn.pc_inc +
+					(insn.i_format.simmediate << 2);
+			else
+				*contpc = regs->cp0_epc +
+					dec_insn.pc_inc +
+					dec_insn.next_pc_inc;
+			return 1;
+			break;
 		}
 		break;
-
-	case j_op:
-	case jal_op:
 	case jalx_op:
-	case beq_op:
-	case bne_op:
-	case blez_op:
-	case bgtz_op:
-	case beql_op:
-	case bnel_op:
-	case blezl_op:
-	case bgtzl_op:
+		set_isa16_mode(bit);
+	case jal_op:
+		regs->regs[31] = regs->cp0_epc +
+			dec_insn.pc_inc +
+			dec_insn.next_pc_inc;
+		/* Fall through */
+	case j_op:
+		*contpc = regs->cp0_epc + dec_insn.pc_inc;
+		*contpc >>= 28;
+		*contpc <<= 28;
+		*contpc |= (insn.j_format.target << 2);
+		/* Set microMIPS mode bit: XOR for jalx. */
+		*contpc ^= bit;
 		return 1;
-
+		break;
+	case beq_op:
+	case beql_op:
+		if (regs->regs[insn.i_format.rs] ==
+		    regs->regs[insn.i_format.rt])
+			*contpc = regs->cp0_epc +
+				dec_insn.pc_inc +
+				(insn.i_format.simmediate << 2);
+		else
+			*contpc = regs->cp0_epc +
+				dec_insn.pc_inc +
+				dec_insn.next_pc_inc;
+		return 1;
+		break;
+	case bne_op:
+	case bnel_op:
+		if (regs->regs[insn.i_format.rs] !=
+		    regs->regs[insn.i_format.rt])
+			*contpc = regs->cp0_epc +
+				dec_insn.pc_inc +
+				(insn.i_format.simmediate << 2);
+		else
+			*contpc = regs->cp0_epc +
+				dec_insn.pc_inc +
+				dec_insn.next_pc_inc;
+		return 1;
+		break;
+	case blez_op:
+	case blezl_op:
+		if ((long)regs->regs[insn.i_format.rs] <= 0)
+			*contpc = regs->cp0_epc +
+				dec_insn.pc_inc +
+				(insn.i_format.simmediate << 2);
+		else
+			*contpc = regs->cp0_epc +
+				dec_insn.pc_inc +
+				dec_insn.next_pc_inc;
+		return 1;
+		break;
+	case bgtz_op:
+	case bgtzl_op:
+		if ((long)regs->regs[insn.i_format.rs] > 0)
+			*contpc = regs->cp0_epc +
+				dec_insn.pc_inc +
+				(insn.i_format.simmediate << 2);
+		else
+			*contpc = regs->cp0_epc +
+				dec_insn.pc_inc +
+				dec_insn.next_pc_inc;
+		return 1;
+		break;
 	case cop0_op:
 	case cop1_op:
 	case cop2_op:
 	case cop1x_op:
-		if (MIPSInst_RS(*i) == bc_op)
-			return 1;
+		if (insn.i_format.rs == bc_op) {
+			preempt_disable();
+			if (is_fpu_owner())
+				asm volatile("cfc1\t%0,$31" : "=r" (fcr31));
+			else
+				fcr31 = current->thread.fpu.fcr31;
+			preempt_enable();
+
+			bit = (insn.i_format.rt >> 2);
+			bit += (bit != 0);
+			bit += 23;
+			switch (insn.i_format.rt & 3) {
+			case 0:	/* bc1f */
+			case 2:	/* bc1fl */
+				if (~fcr31 & (1 << bit))
+					*contpc = regs->cp0_epc +
+						dec_insn.pc_inc +
+						(insn.i_format.simmediate << 2);
+				else
+					*contpc = regs->cp0_epc +
+						dec_insn.pc_inc +
+						dec_insn.next_pc_inc;
+				return 1;
+				break;
+			case 1:	/* bc1t */
+			case 3:	/* bc1tl */
+				if (fcr31 & (1 << bit))
+					*contpc = regs->cp0_epc +
+						dec_insn.pc_inc +
+						(insn.i_format.simmediate << 2);
+				else
+					*contpc = regs->cp0_epc +
+						dec_insn.pc_inc +
+						dec_insn.next_pc_inc;
+				return 1;
+				break;
+			}
+		}
 		break;
 	}
-
 	return 0;
 }
 
@@ -210,26 +890,23 @@
  */
 
 static int cop1Emulate(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
-		       void *__user *fault_addr)
+		struct mm_decoded_insn dec_insn, void *__user *fault_addr)
 {
 	mips_instruction ir;
-	unsigned long emulpc, contpc;
+	unsigned long contpc = xcp->cp0_epc + dec_insn.pc_inc;
 	unsigned int cond;
-
-	if (!access_ok(VERIFY_READ, xcp->cp0_epc, sizeof(mips_instruction))) {
-		MIPS_FPU_EMU_INC_STATS(errors);
-		*fault_addr = (mips_instruction __user *)xcp->cp0_epc;
-		return SIGBUS;
-	}
-	if (__get_user(ir, (mips_instruction __user *) xcp->cp0_epc)) {
-		MIPS_FPU_EMU_INC_STATS(errors);
-		*fault_addr = (mips_instruction __user *)xcp->cp0_epc;
-		return SIGSEGV;
-	}
+	int pc_inc;
 
 	/* XXX NEC Vr54xx bug workaround */
-	if ((xcp->cp0_cause & CAUSEF_BD) && !isBranchInstr(&ir))
-		xcp->cp0_cause &= ~CAUSEF_BD;
+	if (xcp->cp0_cause & CAUSEF_BD) {
+		if (dec_insn.micro_mips_mode) {
+			if (!mm_isBranchInstr(xcp, dec_insn, &contpc))
+				xcp->cp0_cause &= ~CAUSEF_BD;
+		} else {
+			if (!isBranchInstr(xcp, dec_insn, &contpc))
+				xcp->cp0_cause &= ~CAUSEF_BD;
+		}
+	}
 
 	if (xcp->cp0_cause & CAUSEF_BD) {
 		/*
@@ -244,32 +921,33 @@
 		 * Linux MIPS branch emulator operates on context, updating the
 		 * cp0_epc.
 		 */
-		emulpc = xcp->cp0_epc + 4;	/* Snapshot emulation target */
-
-		if (__compute_return_epc(xcp) < 0) {
-#ifdef CP1DBG
-			printk("failed to emulate branch at %p\n",
-				(void *) (xcp->cp0_epc));
-#endif
-			return SIGILL;
-		}
-		if (!access_ok(VERIFY_READ, emulpc, sizeof(mips_instruction))) {
-			MIPS_FPU_EMU_INC_STATS(errors);
-			*fault_addr = (mips_instruction __user *)emulpc;
-			return SIGBUS;
-		}
-		if (__get_user(ir, (mips_instruction __user *) emulpc)) {
-			MIPS_FPU_EMU_INC_STATS(errors);
-			*fault_addr = (mips_instruction __user *)emulpc;
-			return SIGSEGV;
-		}
-		/* __compute_return_epc() will have updated cp0_epc */
-		contpc = xcp->cp0_epc;
-		/* In order not to confuse ptrace() et al, tweak context */
-		xcp->cp0_epc = emulpc - 4;
+		ir = dec_insn.next_insn;  /* process delay slot instr */
+		pc_inc = dec_insn.next_pc_inc;
 	} else {
-		emulpc = xcp->cp0_epc;
-		contpc = xcp->cp0_epc + 4;
+		ir = dec_insn.insn;       /* process current instr */
+		pc_inc = dec_insn.pc_inc;
+	}
+
+	/*
+	 * Since microMIPS FPU instructios are a subset of MIPS32 FPU
+	 * instructions, we want to convert microMIPS FPU instructions
+	 * into MIPS32 instructions so that we could reuse all of the
+	 * FPU emulation code.
+	 *
+	 * NOTE: We cannot do this for branch instructions since they
+	 *       are not a subset. Example: Cannot emulate a 16-bit
+	 *       aligned target address with a MIPS32 instruction.
+	 */
+	if (dec_insn.micro_mips_mode) {
+		/*
+		 * If next instruction is a 16-bit instruction, then it
+		 * it cannot be a FPU instruction. This could happen
+		 * since we can be called for non-FPU instructions.
+		 */
+		if ((pc_inc == 2) ||
+			(microMIPS32_to_MIPS32((union mips_instruction *)&ir)
+			 == SIGILL))
+			return SIGILL;
 	}
 
       emul:
@@ -474,22 +1152,35 @@
 				/* branch taken: emulate dslot
 				 * instruction
 				 */
-				xcp->cp0_epc += 4;
-				contpc = (xcp->cp0_epc +
-					(MIPSInst_SIMM(ir) << 2));
+				xcp->cp0_epc += dec_insn.pc_inc;
 
-				if (!access_ok(VERIFY_READ, xcp->cp0_epc,
-					       sizeof(mips_instruction))) {
-					MIPS_FPU_EMU_INC_STATS(errors);
-					*fault_addr = (mips_instruction __user *)xcp->cp0_epc;
-					return SIGBUS;
-				}
-				if (__get_user(ir,
-				    (mips_instruction __user *) xcp->cp0_epc)) {
-					MIPS_FPU_EMU_INC_STATS(errors);
-					*fault_addr = (mips_instruction __user *)xcp->cp0_epc;
-					return SIGSEGV;
-				}
+				contpc = MIPSInst_SIMM(ir);
+				ir = dec_insn.next_insn;
+				if (dec_insn.micro_mips_mode) {
+					contpc = (xcp->cp0_epc + (contpc << 1));
+
+					/* If 16-bit instruction, not FPU. */
+					if ((dec_insn.next_pc_inc == 2) ||
+						(microMIPS32_to_MIPS32((union mips_instruction *)&ir) == SIGILL)) {
+
+						/*
+						 * Since this instruction will
+						 * be put on the stack with
+						 * 32-bit words, get around
+						 * this problem by putting a
+						 * NOP16 as the second one.
+						 */
+						if (dec_insn.next_pc_inc == 2)
+							ir = (ir & (~0xffff)) | MM_NOP16;
+
+						/*
+						 * Single step the non-CP1
+						 * instruction in the dslot.
+						 */
+						return mips_dsemul(xcp, ir, contpc);
+					}
+				} else
+					contpc = (xcp->cp0_epc + (contpc << 2));
 
 				switch (MIPSInst_OPCODE(ir)) {
 				case lwc1_op:
@@ -525,8 +1216,8 @@
 					 * branch likely nullifies
 					 * dslot if not taken
 					 */
-					xcp->cp0_epc += 4;
-					contpc += 4;
+					xcp->cp0_epc += dec_insn.pc_inc;
+					contpc += dec_insn.pc_inc;
 					/*
 					 * else continue & execute
 					 * dslot as normal insn
@@ -1313,25 +2004,75 @@
 	int has_fpu, void *__user *fault_addr)
 {
 	unsigned long oldepc, prevepc;
-	mips_instruction insn;
+	struct mm_decoded_insn dec_insn;
+	u16 instr[4];
+	u16 *instr_ptr;
 	int sig = 0;
 
 	oldepc = xcp->cp0_epc;
 	do {
 		prevepc = xcp->cp0_epc;
 
-		if (!access_ok(VERIFY_READ, xcp->cp0_epc, sizeof(mips_instruction))) {
-			MIPS_FPU_EMU_INC_STATS(errors);
-			*fault_addr = (mips_instruction __user *)xcp->cp0_epc;
-			return SIGBUS;
+		if (get_isa16_mode(prevepc) && cpu_has_mmips) {
+			/*
+			 * Get next 2 microMIPS instructions and convert them
+			 * into 32-bit instructions.
+			 */
+			if ((get_user(instr[0], (u16 __user *)msk_isa16_mode(xcp->cp0_epc))) ||
+			    (get_user(instr[1], (u16 __user *)msk_isa16_mode(xcp->cp0_epc + 2))) ||
+			    (get_user(instr[2], (u16 __user *)msk_isa16_mode(xcp->cp0_epc + 4))) ||
+			    (get_user(instr[3], (u16 __user *)msk_isa16_mode(xcp->cp0_epc + 6)))) {
+				MIPS_FPU_EMU_INC_STATS(errors);
+				return SIGBUS;
+			}
+			instr_ptr = instr;
+
+			/* Get first instruction. */
+			if (mm_insn_16bit(*instr_ptr)) {
+				/* Duplicate the half-word. */
+				dec_insn.insn = (*instr_ptr << 16) |
+					(*instr_ptr);
+				/* 16-bit instruction. */
+				dec_insn.pc_inc = 2;
+				instr_ptr += 1;
+			} else {
+				dec_insn.insn = (*instr_ptr << 16) |
+					*(instr_ptr+1);
+				/* 32-bit instruction. */
+				dec_insn.pc_inc = 4;
+				instr_ptr += 2;
+			}
+			/* Get second instruction. */
+			if (mm_insn_16bit(*instr_ptr)) {
+				/* Duplicate the half-word. */
+				dec_insn.next_insn = (*instr_ptr << 16) |
+					(*instr_ptr);
+				/* 16-bit instruction. */
+				dec_insn.next_pc_inc = 2;
+			} else {
+				dec_insn.next_insn = (*instr_ptr << 16) |
+					*(instr_ptr+1);
+				/* 32-bit instruction. */
+				dec_insn.next_pc_inc = 4;
+			}
+			dec_insn.micro_mips_mode = 1;
+		} else {
+			if ((get_user(dec_insn.insn,
+			    (mips_instruction __user *) xcp->cp0_epc)) ||
+			    (get_user(dec_insn.next_insn,
+			    (mips_instruction __user *)(xcp->cp0_epc+4)))) {
+				MIPS_FPU_EMU_INC_STATS(errors);
+				return SIGBUS;
+			}
+			dec_insn.pc_inc = 4;
+			dec_insn.next_pc_inc = 4;
+			dec_insn.micro_mips_mode = 0;
 		}
-		if (__get_user(insn, (mips_instruction __user *) xcp->cp0_epc)) {
-			MIPS_FPU_EMU_INC_STATS(errors);
-			*fault_addr = (mips_instruction __user *)xcp->cp0_epc;
-			return SIGSEGV;
-		}
-		if (insn == 0)
-			xcp->cp0_epc += 4;	/* skip nops */
+
+		if ((dec_insn.insn == 0) ||
+		   ((dec_insn.pc_inc == 2) &&
+		   ((dec_insn.insn & 0xffff) == MM_NOP16)))
+			xcp->cp0_epc += dec_insn.pc_inc;	/* Skip NOPs */
 		else {
 			/*
 			 * The 'ieee754_csr' is an alias of
@@ -1341,7 +2082,7 @@
 			 */
 			/* convert to ieee library modes */
 			ieee754_csr.rm = ieee_rm[ieee754_csr.rm];
-			sig = cop1Emulate(xcp, ctx, fault_addr);
+			sig = cop1Emulate(xcp, ctx, dec_insn, fault_addr);
 			/* revert to mips rounding mode */
 			ieee754_csr.rm = mips_rm[ieee754_csr.rm];
 		}
diff --git a/arch/mips/math-emu/dsemul.c b/arch/mips/math-emu/dsemul.c
index 384a3b0..7ea622a 100644
--- a/arch/mips/math-emu/dsemul.c
+++ b/arch/mips/math-emu/dsemul.c
@@ -55,7 +55,9 @@
 	struct emuframe __user *fr;
 	int err;
 
-	if (ir == 0) {		/* a nop is easy */
+	if ((get_isa16_mode(regs->cp0_epc) && ((ir >> 16) == MM_NOP16)) ||
+		(ir == 0)) {
+		/* NOP is easy */
 		regs->cp0_epc = cpc;
 		regs->cp0_cause &= ~CAUSEF_BD;
 		return 0;
@@ -91,8 +93,16 @@
 	if (unlikely(!access_ok(VERIFY_WRITE, fr, sizeof(struct emuframe))))
 		return SIGBUS;
 
-	err = __put_user(ir, &fr->emul);
-	err |= __put_user((mips_instruction)BREAK_MATH, &fr->badinst);
+	if (get_isa16_mode(regs->cp0_epc)) {
+		err = __put_user(ir >> 16, (u16 __user *)(&fr->emul));
+		err |= __put_user(ir & 0xffff, (u16 __user *)((long)(&fr->emul) + 2));
+		err |= __put_user(BREAK_MATH >> 16, (u16 __user *)(&fr->badinst));
+		err |= __put_user(BREAK_MATH & 0xffff, (u16 __user *)((long)(&fr->badinst) + 2));
+	} else {
+		err = __put_user(ir, &fr->emul);
+		err |= __put_user((mips_instruction)BREAK_MATH, &fr->badinst);
+	}
+
 	err |= __put_user((mips_instruction)BD_COOKIE, &fr->cookie);
 	err |= __put_user(cpc, &fr->epc);
 
@@ -101,7 +111,8 @@
 		return SIGBUS;
 	}
 
-	regs->cp0_epc = (unsigned long) &fr->emul;
+	regs->cp0_epc = ((unsigned long) &fr->emul) |
+		get_isa16_mode(regs->cp0_epc);
 
 	flush_cache_sigtramp((unsigned long)&fr->badinst);
 
@@ -114,9 +125,10 @@
 	unsigned long epc;
 	u32 insn, cookie;
 	int err = 0;
+	u16 instr[2];
 
 	fr = (struct emuframe __user *)
-		(xcp->cp0_epc - sizeof(mips_instruction));
+		(msk_isa16_mode(xcp->cp0_epc) - sizeof(mips_instruction));
 
 	/*
 	 * If we can't even access the area, something is very wrong, but we'll
@@ -131,7 +143,13 @@
 	 *  - Is the instruction pointed to by the EPC an BREAK_MATH?
 	 *  - Is the following memory word the BD_COOKIE?
 	 */
-	err = __get_user(insn, &fr->badinst);
+	if (get_isa16_mode(xcp->cp0_epc)) {
+		err = __get_user(instr[0], (u16 __user *)(&fr->badinst));
+		err |= __get_user(instr[1], (u16 __user *)((long)(&fr->badinst) + 2));
+		insn = (instr[0] << 16) | instr[1];
+	} else {
+		err = __get_user(insn, &fr->badinst);
+	}
 	err |= __get_user(cookie, &fr->cookie);
 
 	if (unlikely(err || (insn != BREAK_MATH) || (cookie != BD_COOKIE))) {
diff --git a/arch/mips/mm/Makefile b/arch/mips/mm/Makefile
index 1dcec30..e87aae1 100644
--- a/arch/mips/mm/Makefile
+++ b/arch/mips/mm/Makefile
@@ -4,7 +4,7 @@
 
 obj-y				+= cache.o dma-default.o extable.o fault.o \
 				   gup.o init.o mmap.o page.o page-funcs.o \
-				   tlbex.o tlbex-fault.o uasm.o
+				   tlbex.o tlbex-fault.o uasm-mips.o
 
 obj-$(CONFIG_32BIT)		+= ioremap.o pgtable-32.o
 obj-$(CONFIG_64BIT)		+= pgtable-64.o
@@ -22,3 +22,5 @@
 obj-$(CONFIG_R5000_CPU_SCACHE)	+= sc-r5k.o
 obj-$(CONFIG_RM7000_CPU_SCACHE) += sc-rm7k.o
 obj-$(CONFIG_MIPS_CPU_SCACHE)	+= sc-mips.o
+
+obj-$(CONFIG_SYS_SUPPORTS_MICROMIPS) += uasm-micromips.o
diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c
index 2078915..21813be 100644
--- a/arch/mips/mm/c-r4k.c
+++ b/arch/mips/mm/c-r4k.c
@@ -33,6 +33,7 @@
 #include <asm/war.h>
 #include <asm/cacheflush.h> /* for run_uncached() */
 #include <asm/traps.h>
+#include <asm/dma-coherence.h>
 
 /*
  * Special Variant of smp_call_function for use by cache functions:
@@ -136,7 +137,8 @@
 		r4k_blast_dcache_page_indexed = blast_dcache64_page_indexed;
 }
 
-static void (* r4k_blast_dcache)(void);
+void (* r4k_blast_dcache)(void);
+EXPORT_SYMBOL(r4k_blast_dcache);
 
 static void __cpuinit r4k_blast_dcache_setup(void)
 {
@@ -264,7 +266,8 @@
 		r4k_blast_icache_page_indexed = blast_icache64_page_indexed;
 }
 
-static void (* r4k_blast_icache)(void);
+void (* r4k_blast_icache)(void);
+EXPORT_SYMBOL(r4k_blast_icache);
 
 static void __cpuinit r4k_blast_icache_setup(void)
 {
@@ -1377,20 +1380,6 @@
 	}
 }
 
-#if defined(CONFIG_DMA_NONCOHERENT)
-
-static int __cpuinitdata coherentio;
-
-static int __init setcoherentio(char *str)
-{
-	coherentio = 1;
-
-	return 0;
-}
-
-early_param("coherentio", setcoherentio);
-#endif
-
 static void __cpuinit r4k_cache_error_setup(void)
 {
 	extern char __weak except_vec2_generic;
@@ -1472,9 +1461,14 @@
 
 	build_clear_page();
 	build_copy_page();
-#if !defined(CONFIG_MIPS_CMP)
+
+	/*
+	 * We want to run CMP kernels on core with and without coherent
+	 * caches. Therefore, do not use CONFIG_MIPS_CMP to decide whether
+	 * or not to flush caches.
+	 */
 	local_r4k___flush_cache_all(NULL);
-#endif
+
 	coherency_setup();
 	board_cache_error_setup = r4k_cache_error_setup;
 }
diff --git a/arch/mips/mm/cache.c b/arch/mips/mm/cache.c
index 07cec44..5aeb3eb 100644
--- a/arch/mips/mm/cache.c
+++ b/arch/mips/mm/cache.c
@@ -48,6 +48,7 @@
 
 EXPORT_SYMBOL_GPL(local_flush_data_cache_page);
 EXPORT_SYMBOL(flush_data_cache_page);
+EXPORT_SYMBOL(flush_icache_all);
 
 #ifdef CONFIG_DMA_NONCOHERENT
 
diff --git a/arch/mips/mm/dma-default.c b/arch/mips/mm/dma-default.c
index f9ef838..caf92ec 100644
--- a/arch/mips/mm/dma-default.c
+++ b/arch/mips/mm/dma-default.c
@@ -22,6 +22,26 @@
 
 #include <dma-coherence.h>
 
+int coherentio = 0;	/* User defined DMA coherency from command line. */
+EXPORT_SYMBOL_GPL(coherentio);
+int hw_coherentio = 0;	/* Actual hardware supported DMA coherency setting. */
+
+static int __init setcoherentio(char *str)
+{
+	coherentio = 1;
+	pr_info("Hardware DMA cache coherency (command line)\n");
+	return 0;
+}
+early_param("coherentio", setcoherentio);
+
+static int __init setnocoherentio(char *str)
+{
+	coherentio = 0;
+	pr_info("Software DMA cache coherency (command line)\n");
+	return 0;
+}
+early_param("nocoherentio", setnocoherentio);
+
 static inline struct page *dma_addr_to_page(struct device *dev,
 	dma_addr_t dma_addr)
 {
@@ -115,7 +135,8 @@
 
 		if (!plat_device_is_coherent(dev)) {
 			dma_cache_wback_inv((unsigned long) ret, size);
-			ret = UNCAC_ADDR(ret);
+			if (!hw_coherentio)
+				ret = UNCAC_ADDR(ret);
 		}
 	}
 
@@ -142,7 +163,7 @@
 
 	plat_unmap_dma_mem(dev, dma_handle, size, DMA_BIDIRECTIONAL);
 
-	if (!plat_device_is_coherent(dev))
+	if (!plat_device_is_coherent(dev) && !hw_coherentio)
 		addr = CAC_ADDR(addr);
 
 	free_pages(addr, get_order(size));
diff --git a/arch/mips/mm/page.c b/arch/mips/mm/page.c
index a29fba5..4eb8dcf 100644
--- a/arch/mips/mm/page.c
+++ b/arch/mips/mm/page.c
@@ -247,6 +247,11 @@
 	struct uasm_label *l = labels;
 	struct uasm_reloc *r = relocs;
 	int i;
+	static atomic_t run_once = ATOMIC_INIT(0);
+
+	if (atomic_xchg(&run_once, 1)) {
+		return;
+	}
 
 	memset(labels, 0, sizeof(labels));
 	memset(relocs, 0, sizeof(relocs));
@@ -389,6 +394,11 @@
 	struct uasm_label *l = labels;
 	struct uasm_reloc *r = relocs;
 	int i;
+	static atomic_t run_once = ATOMIC_INIT(0);
+
+	if (atomic_xchg(&run_once, 1)) {
+		return;
+	}
 
 	memset(labels, 0, sizeof(labels));
 	memset(relocs, 0, sizeof(relocs));
diff --git a/arch/mips/mm/tlb-r3k.c b/arch/mips/mm/tlb-r3k.c
index a63d1ed..4a13c15 100644
--- a/arch/mips/mm/tlb-r3k.c
+++ b/arch/mips/mm/tlb-r3k.c
@@ -51,7 +51,7 @@
 #endif
 
 	local_irq_save(flags);
-	old_ctx = read_c0_entryhi() & ASID_MASK;
+	old_ctx = ASID_MASK(read_c0_entryhi());
 	write_c0_entrylo0(0);
 	entry = r3k_have_wired_reg ? read_c0_wired() : 8;
 	for (; entry < current_cpu_data.tlbsize; entry++) {
@@ -87,13 +87,13 @@
 
 #ifdef DEBUG_TLB
 		printk("[tlbrange<%lu,0x%08lx,0x%08lx>]",
-			cpu_context(cpu, mm) & ASID_MASK, start, end);
+			ASID_MASK(cpu_context(cpu, mm)), start, end);
 #endif
 		local_irq_save(flags);
 		size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
 		if (size <= current_cpu_data.tlbsize) {
-			int oldpid = read_c0_entryhi() & ASID_MASK;
-			int newpid = cpu_context(cpu, mm) & ASID_MASK;
+			int oldpid = ASID_MASK(read_c0_entryhi());
+			int newpid = ASID_MASK(cpu_context(cpu, mm));
 
 			start &= PAGE_MASK;
 			end += PAGE_SIZE - 1;
@@ -166,10 +166,10 @@
 #ifdef DEBUG_TLB
 		printk("[tlbpage<%lu,0x%08lx>]", cpu_context(cpu, vma->vm_mm), page);
 #endif
-		newpid = cpu_context(cpu, vma->vm_mm) & ASID_MASK;
+		newpid = ASID_MASK(cpu_context(cpu, vma->vm_mm));
 		page &= PAGE_MASK;
 		local_irq_save(flags);
-		oldpid = read_c0_entryhi() & ASID_MASK;
+		oldpid = ASID_MASK(read_c0_entryhi());
 		write_c0_entryhi(page | newpid);
 		BARRIER;
 		tlb_probe();
@@ -197,10 +197,10 @@
 	if (current->active_mm != vma->vm_mm)
 		return;
 
-	pid = read_c0_entryhi() & ASID_MASK;
+	pid = ASID_MASK(read_c0_entryhi());
 
 #ifdef DEBUG_TLB
-	if ((pid != (cpu_context(cpu, vma->vm_mm) & ASID_MASK)) || (cpu_context(cpu, vma->vm_mm) == 0)) {
+	if ((pid != ASID_MASK(cpu_context(cpu, vma->vm_mm))) || (cpu_context(cpu, vma->vm_mm) == 0)) {
 		printk("update_mmu_cache: Wheee, bogus tlbpid mmpid=%lu tlbpid=%d\n",
 		       (cpu_context(cpu, vma->vm_mm)), pid);
 	}
@@ -241,7 +241,7 @@
 
 		local_irq_save(flags);
 		/* Save old context and create impossible VPN2 value */
-		old_ctx = read_c0_entryhi() & ASID_MASK;
+		old_ctx = ASID_MASK(read_c0_entryhi());
 		old_pagemask = read_c0_pagemask();
 		w = read_c0_wired();
 		write_c0_wired(w + 1);
@@ -264,7 +264,7 @@
 #endif
 
 		local_irq_save(flags);
-		old_ctx = read_c0_entryhi() & ASID_MASK;
+		old_ctx = ASID_MASK(read_c0_entryhi());
 		write_c0_entrylo0(entrylo0);
 		write_c0_entryhi(entryhi);
 		write_c0_index(wired);
diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c
index 493131c..09653b2 100644
--- a/arch/mips/mm/tlb-r4k.c
+++ b/arch/mips/mm/tlb-r4k.c
@@ -13,6 +13,7 @@
 #include <linux/smp.h>
 #include <linux/mm.h>
 #include <linux/hugetlb.h>
+#include <linux/module.h>
 
 #include <asm/cpu.h>
 #include <asm/bootinfo.h>
@@ -94,6 +95,7 @@
 	FLUSH_ITLB;
 	EXIT_CRITICAL(flags);
 }
+EXPORT_SYMBOL(local_flush_tlb_all);
 
 /* All entries common to a mm share an asid.  To effectively flush
    these entries, we just bump the asid. */
@@ -285,7 +287,7 @@
 
 	ENTER_CRITICAL(flags);
 
-	pid = read_c0_entryhi() & ASID_MASK;
+	pid = ASID_MASK(read_c0_entryhi());
 	address &= (PAGE_MASK << 1);
 	write_c0_entryhi(address | pid);
 	pgdp = pgd_offset(vma->vm_mm, address);
diff --git a/arch/mips/mm/tlb-r8k.c b/arch/mips/mm/tlb-r8k.c
index 91c2499..122f920 100644
--- a/arch/mips/mm/tlb-r8k.c
+++ b/arch/mips/mm/tlb-r8k.c
@@ -195,7 +195,7 @@
 	if (current->active_mm != vma->vm_mm)
 		return;
 
-	pid = read_c0_entryhi() & ASID_MASK;
+	pid = ASID_MASK(read_c0_entryhi());
 
 	local_irq_save(flags);
 	address &= PAGE_MASK;
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
index 820e661..4d46d37 100644
--- a/arch/mips/mm/tlbex.c
+++ b/arch/mips/mm/tlbex.c
@@ -29,6 +29,7 @@
 #include <linux/init.h>
 #include <linux/cache.h>
 
+#include <asm/mmu_context.h>
 #include <asm/cacheflush.h>
 #include <asm/pgtable.h>
 #include <asm/war.h>
@@ -305,6 +306,78 @@
 static int check_for_high_segbits __cpuinitdata;
 #endif
 
+static void __cpuinit insn_fixup(unsigned int **start, unsigned int **stop,
+					unsigned int i_const)
+{
+	unsigned int **p;
+
+	for (p = start; p < stop; p++) {
+#ifndef CONFIG_CPU_MICROMIPS
+		unsigned int *ip;
+
+		ip = *p;
+		*ip = (*ip & 0xffff0000) | i_const;
+#else
+		unsigned short *ip;
+
+		ip = ((unsigned short *)((unsigned int)*p - 1));
+		if ((*ip & 0xf000) == 0x4000) {
+			*ip &= 0xfff1;
+			*ip |= (i_const << 1);
+		} else if ((*ip & 0xf000) == 0x6000) {
+			*ip &= 0xfff1;
+			*ip |= ((i_const >> 2) << 1);
+		} else {
+			ip++;
+			*ip = i_const;
+		}
+#endif
+		local_flush_icache_range((unsigned long)ip,
+					 (unsigned long)ip + sizeof(*ip));
+	}
+}
+
+#define asid_insn_fixup(section, const)					\
+do {									\
+	extern unsigned int *__start_ ## section;			\
+	extern unsigned int *__stop_ ## section;			\
+	insn_fixup(&__start_ ## section, &__stop_ ## section, const);	\
+} while(0)
+
+/*
+ * Caller is assumed to flush the caches before the first context switch.
+ */
+static void __cpuinit setup_asid(unsigned int inc, unsigned int mask,
+				 unsigned int version_mask,
+				 unsigned int first_version)
+{
+	extern asmlinkage void handle_ri_rdhwr_vivt(void);
+	unsigned long *vivt_exc;
+
+#ifdef CONFIG_CPU_MICROMIPS
+	/*
+	 * Worst case optimised microMIPS addiu instructions support
+	 * only a 3-bit immediate value.
+	 */
+	if(inc > 7)
+		panic("Invalid ASID increment value!");
+#endif
+	asid_insn_fixup(__asid_inc, inc);
+	asid_insn_fixup(__asid_mask, mask);
+	asid_insn_fixup(__asid_version_mask, version_mask);
+	asid_insn_fixup(__asid_first_version, first_version);
+
+	/* Patch up the 'handle_ri_rdhwr_vivt' handler. */
+	vivt_exc = (unsigned long *) &handle_ri_rdhwr_vivt;
+#ifdef CONFIG_CPU_MICROMIPS
+	vivt_exc = (unsigned long *)((unsigned long) vivt_exc - 1);
+#endif
+	vivt_exc++;
+	*vivt_exc = (*vivt_exc & ~mask) | mask;
+
+	current_cpu_data.asid_cache = first_version;
+}
+
 static int check_for_high_segbits __cpuinitdata;
 
 static unsigned int kscratch_used_mask __cpuinitdata;
@@ -1458,17 +1531,17 @@
 u32 handle_tlbs[FASTPATH_SIZE] __cacheline_aligned;
 u32 handle_tlbm[FASTPATH_SIZE] __cacheline_aligned;
 #ifdef CONFIG_MIPS_PGD_C0_CONTEXT
-u32 tlbmiss_handler_setup_pgd[16] __cacheline_aligned;
+u32 tlbmiss_handler_setup_pgd_array[16] __cacheline_aligned;
 
 static void __cpuinit build_r4000_setup_pgd(void)
 {
 	const int a0 = 4;
 	const int a1 = 5;
-	u32 *p = tlbmiss_handler_setup_pgd;
+	u32 *p = tlbmiss_handler_setup_pgd_array;
 	struct uasm_label *l = labels;
 	struct uasm_reloc *r = relocs;
 
-	memset(tlbmiss_handler_setup_pgd, 0, sizeof(tlbmiss_handler_setup_pgd));
+	memset(tlbmiss_handler_setup_pgd_array, 0, sizeof(tlbmiss_handler_setup_pgd_array));
 	memset(labels, 0, sizeof(labels));
 	memset(relocs, 0, sizeof(relocs));
 
@@ -1496,15 +1569,15 @@
 		uasm_i_jr(&p, 31);
 		UASM_i_MTC0(&p, a0, 31, pgd_reg);
 	}
-	if (p - tlbmiss_handler_setup_pgd > ARRAY_SIZE(tlbmiss_handler_setup_pgd))
-		panic("tlbmiss_handler_setup_pgd space exceeded");
+	if (p - tlbmiss_handler_setup_pgd_array > ARRAY_SIZE(tlbmiss_handler_setup_pgd_array))
+		panic("tlbmiss_handler_setup_pgd_array space exceeded");
 	uasm_resolve_relocs(relocs, labels);
-	pr_debug("Wrote tlbmiss_handler_setup_pgd (%u instructions).\n",
-		 (unsigned int)(p - tlbmiss_handler_setup_pgd));
+	pr_debug("Wrote tlbmiss_handler_setup_pgd_array (%u instructions).\n",
+		 (unsigned int)(p - tlbmiss_handler_setup_pgd_array));
 
 	dump_handler("tlbmiss_handler",
-		     tlbmiss_handler_setup_pgd,
-		     ARRAY_SIZE(tlbmiss_handler_setup_pgd));
+		     tlbmiss_handler_setup_pgd_array,
+		     ARRAY_SIZE(tlbmiss_handler_setup_pgd_array));
 }
 #endif
 
@@ -2030,6 +2103,13 @@
 
 	uasm_l_nopage_tlbl(&l, p);
 	build_restore_work_registers(&p);
+#ifdef CONFIG_CPU_MICROMIPS
+	if ((unsigned long)tlb_do_page_fault_0 & 1) {
+		uasm_i_lui(&p, K0, uasm_rel_hi((long)tlb_do_page_fault_0));
+		uasm_i_addiu(&p, K0, K0, uasm_rel_lo((long)tlb_do_page_fault_0));
+		uasm_i_jr(&p, K0);
+	} else
+#endif
 	uasm_i_j(&p, (unsigned long)tlb_do_page_fault_0 & 0x0fffffff);
 	uasm_i_nop(&p);
 
@@ -2077,6 +2157,13 @@
 
 	uasm_l_nopage_tlbs(&l, p);
 	build_restore_work_registers(&p);
+#ifdef CONFIG_CPU_MICROMIPS
+	if ((unsigned long)tlb_do_page_fault_1 & 1) {
+		uasm_i_lui(&p, K0, uasm_rel_hi((long)tlb_do_page_fault_1));
+		uasm_i_addiu(&p, K0, K0, uasm_rel_lo((long)tlb_do_page_fault_1));
+		uasm_i_jr(&p, K0);
+	} else
+#endif
 	uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff);
 	uasm_i_nop(&p);
 
@@ -2125,6 +2212,13 @@
 
 	uasm_l_nopage_tlbm(&l, p);
 	build_restore_work_registers(&p);
+#ifdef CONFIG_CPU_MICROMIPS
+	if ((unsigned long)tlb_do_page_fault_1 & 1) {
+		uasm_i_lui(&p, K0, uasm_rel_hi((long)tlb_do_page_fault_1));
+		uasm_i_addiu(&p, K0, K0, uasm_rel_lo((long)tlb_do_page_fault_1));
+		uasm_i_jr(&p, K0);
+	} else
+#endif
 	uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff);
 	uasm_i_nop(&p);
 
@@ -2162,8 +2256,12 @@
 	case CPU_TX3922:
 	case CPU_TX3927:
 #ifndef CONFIG_MIPS_PGD_C0_CONTEXT
-		build_r3000_tlb_refill_handler();
+		setup_asid(0x40, 0xfc0, 0xf000, ASID_FIRST_VERSION_R3000);
+		if (cpu_has_local_ebase)
+			build_r3000_tlb_refill_handler();
 		if (!run_once) {
+			if (!cpu_has_local_ebase)
+				build_r3000_tlb_refill_handler();
 			build_r3000_tlb_load_handler();
 			build_r3000_tlb_store_handler();
 			build_r3000_tlb_modify_handler();
@@ -2184,6 +2282,11 @@
 		break;
 
 	default:
+#ifndef CONFIG_MIPS_MT_SMTC
+		setup_asid(0x1, 0xff, 0xff00, ASID_FIRST_VERSION_R4000);
+#else
+		setup_asid(0x1, smtc_asid_mask, 0xff00, ASID_FIRST_VERSION_R4000);
+#endif
 		if (!run_once) {
 			scratch_reg = allocate_kscratch();
 #ifdef CONFIG_MIPS_PGD_C0_CONTEXT
@@ -2192,9 +2295,12 @@
 			build_r4000_tlb_load_handler();
 			build_r4000_tlb_store_handler();
 			build_r4000_tlb_modify_handler();
+			if (!cpu_has_local_ebase)
+				build_r4000_tlb_refill_handler();
 			run_once++;
 		}
-		build_r4000_tlb_refill_handler();
+		if (cpu_has_local_ebase)
+			build_r4000_tlb_refill_handler();
 	}
 }
 
@@ -2207,7 +2313,7 @@
 	local_flush_icache_range((unsigned long)handle_tlbm,
 			   (unsigned long)handle_tlbm + sizeof(handle_tlbm));
 #ifdef CONFIG_MIPS_PGD_C0_CONTEXT
-	local_flush_icache_range((unsigned long)tlbmiss_handler_setup_pgd,
-			   (unsigned long)tlbmiss_handler_setup_pgd + sizeof(handle_tlbm));
+	local_flush_icache_range((unsigned long)tlbmiss_handler_setup_pgd_array,
+			   (unsigned long)tlbmiss_handler_setup_pgd_array + sizeof(handle_tlbm));
 #endif
 }
diff --git a/arch/mips/mm/uasm-micromips.c b/arch/mips/mm/uasm-micromips.c
new file mode 100644
index 0000000..162ee6d
--- /dev/null
+++ b/arch/mips/mm/uasm-micromips.c
@@ -0,0 +1,221 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * A small micro-assembler. It is intentionally kept simple, does only
+ * support a subset of instructions, and does not try to hide pipeline
+ * effects like branch delay slots.
+ *
+ * Copyright (C) 2004, 2005, 2006, 2008	 Thiemo Seufer
+ * Copyright (C) 2005, 2007  Maciej W. Rozycki
+ * Copyright (C) 2006  Ralf Baechle (ralf@linux-mips.org)
+ * Copyright (C) 2012, 2013   MIPS Technologies, Inc.  All rights reserved.
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/init.h>
+
+#include <asm/inst.h>
+#include <asm/elf.h>
+#include <asm/bugs.h>
+#define UASM_ISA	_UASM_ISA_MICROMIPS
+#include <asm/uasm.h>
+
+#define RS_MASK		0x1f
+#define RS_SH		16
+#define RT_MASK		0x1f
+#define RT_SH		21
+#define SCIMM_MASK	0x3ff
+#define SCIMM_SH	16
+
+/* This macro sets the non-variable bits of an instruction. */
+#define M(a, b, c, d, e, f)					\
+	((a) << OP_SH						\
+	 | (b) << RT_SH						\
+	 | (c) << RS_SH						\
+	 | (d) << RD_SH						\
+	 | (e) << RE_SH						\
+	 | (f) << FUNC_SH)
+
+/* Define these when we are not the ISA the kernel is being compiled with. */
+#ifndef CONFIG_CPU_MICROMIPS
+#define MM_uasm_i_b(buf, off) ISAOPC(_beq)(buf, 0, 0, off)
+#define MM_uasm_i_beqz(buf, rs, off) ISAOPC(_beq)(buf, rs, 0, off)
+#define MM_uasm_i_beqzl(buf, rs, off) ISAOPC(_beql)(buf, rs, 0, off)
+#define MM_uasm_i_bnez(buf, rs, off) ISAOPC(_bne)(buf, rs, 0, off)
+#endif
+
+#include "uasm.c"
+
+static struct insn insn_table_MM[] __uasminitdata = {
+	{ insn_addu, M(mm_pool32a_op, 0, 0, 0, 0, mm_addu32_op), RT | RS | RD },
+	{ insn_addiu, M(mm_addiu32_op, 0, 0, 0, 0, 0), RT | RS | SIMM },
+	{ insn_and, M(mm_pool32a_op, 0, 0, 0, 0, mm_and_op), RT | RS | RD },
+	{ insn_andi, M(mm_andi32_op, 0, 0, 0, 0, 0), RT | RS | UIMM },
+	{ insn_beq, M(mm_beq32_op, 0, 0, 0, 0, 0), RS | RT | BIMM },
+	{ insn_beql, 0, 0 },
+	{ insn_bgez, M(mm_pool32i_op, mm_bgez_op, 0, 0, 0, 0), RS | BIMM },
+	{ insn_bgezl, 0, 0 },
+	{ insn_bltz, M(mm_pool32i_op, mm_bltz_op, 0, 0, 0, 0), RS | BIMM },
+	{ insn_bltzl, 0, 0 },
+	{ insn_bne, M(mm_bne32_op, 0, 0, 0, 0, 0), RT | RS | BIMM },
+	{ insn_cache, M(mm_pool32b_op, 0, 0, mm_cache_func, 0, 0), RT | RS | SIMM },
+	{ insn_daddu, 0, 0 },
+	{ insn_daddiu, 0, 0 },
+	{ insn_dmfc0, 0, 0 },
+	{ insn_dmtc0, 0, 0 },
+	{ insn_dsll, 0, 0 },
+	{ insn_dsll32, 0, 0 },
+	{ insn_dsra, 0, 0 },
+	{ insn_dsrl, 0, 0 },
+	{ insn_dsrl32, 0, 0 },
+	{ insn_drotr, 0, 0 },
+	{ insn_drotr32, 0, 0 },
+	{ insn_dsubu, 0, 0 },
+	{ insn_eret, M(mm_pool32a_op, 0, 0, 0, mm_eret_op, mm_pool32axf_op), 0 },
+	{ insn_ins, M(mm_pool32a_op, 0, 0, 0, 0, mm_ins_op), RT | RS | RD | RE },
+	{ insn_ext, M(mm_pool32a_op, 0, 0, 0, 0, mm_ext_op), RT | RS | RD | RE },
+	{ insn_j, M(mm_j32_op, 0, 0, 0, 0, 0), JIMM },
+	{ insn_jal, M(mm_jal32_op, 0, 0, 0, 0, 0), JIMM },
+	{ insn_jr, M(mm_pool32a_op, 0, 0, 0, mm_jalr_op, mm_pool32axf_op), RS },
+	{ insn_ld, 0, 0 },
+	{ insn_ll, M(mm_pool32c_op, 0, 0, (mm_ll_func << 1), 0, 0), RS | RT | SIMM },
+	{ insn_lld, 0, 0 },
+	{ insn_lui, M(mm_pool32i_op, mm_lui_op, 0, 0, 0, 0), RS | SIMM },
+	{ insn_lw, M(mm_lw32_op, 0, 0, 0, 0, 0), RT | RS | SIMM },
+	{ insn_mfc0, M(mm_pool32a_op, 0, 0, 0, mm_mfc0_op, mm_pool32axf_op), RT | RS | RD },
+	{ insn_mtc0, M(mm_pool32a_op, 0, 0, 0, mm_mtc0_op, mm_pool32axf_op), RT | RS | RD },
+	{ insn_or, M(mm_pool32a_op, 0, 0, 0, 0, mm_or32_op), RT | RS | RD },
+	{ insn_ori, M(mm_ori32_op, 0, 0, 0, 0, 0), RT | RS | UIMM },
+	{ insn_pref, M(mm_pool32c_op, 0, 0, (mm_pref_func << 1), 0, 0), RT | RS | SIMM },
+	{ insn_rfe, 0, 0 },
+	{ insn_sc, M(mm_pool32c_op, 0, 0, (mm_sc_func << 1), 0, 0), RT | RS | SIMM },
+	{ insn_scd, 0, 0 },
+	{ insn_sd, 0, 0 },
+	{ insn_sll, M(mm_pool32a_op, 0, 0, 0, 0, mm_sll32_op), RT | RS | RD },
+	{ insn_sra, M(mm_pool32a_op, 0, 0, 0, 0, mm_sra_op), RT | RS | RD },
+	{ insn_srl, M(mm_pool32a_op, 0, 0, 0, 0, mm_srl32_op), RT | RS | RD },
+	{ insn_rotr, M(mm_pool32a_op, 0, 0, 0, 0, mm_rotr_op), RT | RS | RD },
+	{ insn_subu, M(mm_pool32a_op, 0, 0, 0, 0, mm_subu32_op), RT | RS | RD },
+	{ insn_sw, M(mm_sw32_op, 0, 0, 0, 0, 0), RT | RS | SIMM },
+	{ insn_tlbp, M(mm_pool32a_op, 0, 0, 0, mm_tlbp_op, mm_pool32axf_op), 0 },
+	{ insn_tlbr, M(mm_pool32a_op, 0, 0, 0, mm_tlbr_op, mm_pool32axf_op), 0 },
+	{ insn_tlbwi, M(mm_pool32a_op, 0, 0, 0, mm_tlbwi_op, mm_pool32axf_op), 0 },
+	{ insn_tlbwr, M(mm_pool32a_op, 0, 0, 0, mm_tlbwr_op, mm_pool32axf_op), 0 },
+	{ insn_xor, M(mm_pool32a_op, 0, 0, 0, 0, mm_xor32_op), RT | RS | RD },
+	{ insn_xori, M(mm_xori32_op, 0, 0, 0, 0, 0), RT | RS | UIMM },
+	{ insn_dins, 0, 0 },
+	{ insn_dinsm, 0, 0 },
+	{ insn_syscall, M(mm_pool32a_op, 0, 0, 0, mm_syscall_op, mm_pool32axf_op), SCIMM},
+	{ insn_bbit0, 0, 0 },
+	{ insn_bbit1, 0, 0 },
+	{ insn_lwx, 0, 0 },
+	{ insn_ldx, 0, 0 },
+	{ insn_invalid, 0, 0 }
+};
+
+#undef M
+
+static inline __uasminit u32 build_bimm(s32 arg)
+{
+	WARN(arg > 0xffff || arg < -0x10000,
+	     KERN_WARNING "Micro-assembler field overflow\n");
+
+	WARN(arg & 0x3, KERN_WARNING "Invalid micro-assembler branch target\n");
+
+	return ((arg < 0) ? (1 << 15) : 0) | ((arg >> 1) & 0x7fff);
+}
+
+static inline __uasminit u32 build_jimm(u32 arg)
+{
+
+	WARN(arg & ~((JIMM_MASK << 2) | 1),
+	     KERN_WARNING "Micro-assembler field overflow\n");
+
+	return (arg >> 1) & JIMM_MASK;
+}
+
+/*
+ * The order of opcode arguments is implicitly left to right,
+ * starting with RS and ending with FUNC or IMM.
+ */
+static void __uasminit build_insn(u32 **buf, enum opcode opc, ...)
+{
+	struct insn *ip = NULL;
+	unsigned int i;
+	va_list ap;
+	u32 op;
+
+	for (i = 0; insn_table_MM[i].opcode != insn_invalid; i++)
+		if (insn_table_MM[i].opcode == opc) {
+			ip = &insn_table_MM[i];
+			break;
+		}
+
+	if (!ip || (opc == insn_daddiu && r4k_daddiu_bug()))
+		panic("Unsupported Micro-assembler instruction %d", opc);
+
+	op = ip->match;
+	va_start(ap, opc);
+	if (ip->fields & RS) {
+		if (opc == insn_mfc0 || opc == insn_mtc0)
+			op |= build_rt(va_arg(ap, u32));
+		else
+			op |= build_rs(va_arg(ap, u32));
+	}
+	if (ip->fields & RT) {
+		if (opc == insn_mfc0 || opc == insn_mtc0)
+			op |= build_rs(va_arg(ap, u32));
+		else
+			op |= build_rt(va_arg(ap, u32));
+	}
+	if (ip->fields & RD)
+		op |= build_rd(va_arg(ap, u32));
+	if (ip->fields & RE)
+		op |= build_re(va_arg(ap, u32));
+	if (ip->fields & SIMM)
+		op |= build_simm(va_arg(ap, s32));
+	if (ip->fields & UIMM)
+		op |= build_uimm(va_arg(ap, u32));
+	if (ip->fields & BIMM)
+		op |= build_bimm(va_arg(ap, s32));
+	if (ip->fields & JIMM)
+		op |= build_jimm(va_arg(ap, u32));
+	if (ip->fields & FUNC)
+		op |= build_func(va_arg(ap, u32));
+	if (ip->fields & SET)
+		op |= build_set(va_arg(ap, u32));
+	if (ip->fields & SCIMM)
+		op |= build_scimm(va_arg(ap, u32));
+	va_end(ap);
+
+#ifdef CONFIG_CPU_LITTLE_ENDIAN
+	**buf = ((op & 0xffff) << 16) | (op >> 16);
+#else
+	**buf = op;
+#endif
+	(*buf)++;
+}
+
+static inline void __uasminit
+__resolve_relocs(struct uasm_reloc *rel, struct uasm_label *lab)
+{
+	long laddr = (long)lab->addr;
+	long raddr = (long)rel->addr;
+
+	switch (rel->type) {
+	case R_MIPS_PC16:
+#ifdef CONFIG_CPU_LITTLE_ENDIAN
+		*rel->addr |= (build_bimm(laddr - (raddr + 4)) << 16);
+#else
+		*rel->addr |= build_bimm(laddr - (raddr + 4));
+#endif
+		break;
+
+	default:
+		panic("Unsupported Micro-assembler relocation %d",
+		      rel->type);
+	}
+}
diff --git a/arch/mips/mm/uasm-mips.c b/arch/mips/mm/uasm-mips.c
new file mode 100644
index 0000000..5fcdd8f
--- /dev/null
+++ b/arch/mips/mm/uasm-mips.c
@@ -0,0 +1,205 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * A small micro-assembler. It is intentionally kept simple, does only
+ * support a subset of instructions, and does not try to hide pipeline
+ * effects like branch delay slots.
+ *
+ * Copyright (C) 2004, 2005, 2006, 2008	 Thiemo Seufer
+ * Copyright (C) 2005, 2007  Maciej W. Rozycki
+ * Copyright (C) 2006  Ralf Baechle (ralf@linux-mips.org)
+ * Copyright (C) 2012, 2013  MIPS Technologies, Inc.  All rights reserved.
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/init.h>
+
+#include <asm/inst.h>
+#include <asm/elf.h>
+#include <asm/bugs.h>
+#define UASM_ISA	_UASM_ISA_CLASSIC
+#include <asm/uasm.h>
+
+#define RS_MASK		0x1f
+#define RS_SH		21
+#define RT_MASK		0x1f
+#define RT_SH		16
+#define SCIMM_MASK	0xfffff
+#define SCIMM_SH	6
+
+/* This macro sets the non-variable bits of an instruction. */
+#define M(a, b, c, d, e, f)					\
+	((a) << OP_SH						\
+	 | (b) << RS_SH						\
+	 | (c) << RT_SH						\
+	 | (d) << RD_SH						\
+	 | (e) << RE_SH						\
+	 | (f) << FUNC_SH)
+
+/* Define these when we are not the ISA the kernel is being compiled with. */
+#ifdef CONFIG_CPU_MICROMIPS
+#define CL_uasm_i_b(buf, off) ISAOPC(_beq)(buf, 0, 0, off)
+#define CL_uasm_i_beqz(buf, rs, off) ISAOPC(_beq)(buf, rs, 0, off)
+#define CL_uasm_i_beqzl(buf, rs, off) ISAOPC(_beql)(buf, rs, 0, off)
+#define CL_uasm_i_bnez(buf, rs, off) ISAOPC(_bne)(buf, rs, 0, off)
+#endif
+
+#include "uasm.c"
+
+static struct insn insn_table[] __uasminitdata = {
+	{ insn_addiu, M(addiu_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
+	{ insn_addu, M(spec_op, 0, 0, 0, 0, addu_op), RS | RT | RD },
+	{ insn_andi, M(andi_op, 0, 0, 0, 0, 0), RS | RT | UIMM },
+	{ insn_and, M(spec_op, 0, 0, 0, 0, and_op), RS | RT | RD },
+	{ insn_bbit0, M(lwc2_op, 0, 0, 0, 0, 0), RS | RT | BIMM },
+	{ insn_bbit1, M(swc2_op, 0, 0, 0, 0, 0), RS | RT | BIMM },
+	{ insn_beql, M(beql_op, 0, 0, 0, 0, 0), RS | RT | BIMM },
+	{ insn_beq, M(beq_op, 0, 0, 0, 0, 0), RS | RT | BIMM },
+	{ insn_bgezl, M(bcond_op, 0, bgezl_op, 0, 0, 0), RS | BIMM },
+	{ insn_bgez, M(bcond_op, 0, bgez_op, 0, 0, 0), RS | BIMM },
+	{ insn_bltzl, M(bcond_op, 0, bltzl_op, 0, 0, 0), RS | BIMM },
+	{ insn_bltz, M(bcond_op, 0, bltz_op, 0, 0, 0), RS | BIMM },
+	{ insn_bne, M(bne_op, 0, 0, 0, 0, 0), RS | RT | BIMM },
+	{ insn_cache,  M(cache_op, 0, 0, 0, 0, 0),  RS | RT | SIMM },
+	{ insn_daddiu, M(daddiu_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
+	{ insn_daddu, M(spec_op, 0, 0, 0, 0, daddu_op), RS | RT | RD },
+	{ insn_dinsm, M(spec3_op, 0, 0, 0, 0, dinsm_op), RS | RT | RD | RE },
+	{ insn_dins, M(spec3_op, 0, 0, 0, 0, dins_op), RS | RT | RD | RE },
+	{ insn_dmfc0, M(cop0_op, dmfc_op, 0, 0, 0, 0), RT | RD | SET},
+	{ insn_dmtc0, M(cop0_op, dmtc_op, 0, 0, 0, 0), RT | RD | SET},
+	{ insn_drotr32, M(spec_op, 1, 0, 0, 0, dsrl32_op), RT | RD | RE },
+	{ insn_drotr, M(spec_op, 1, 0, 0, 0, dsrl_op), RT | RD | RE },
+	{ insn_dsll32, M(spec_op, 0, 0, 0, 0, dsll32_op), RT | RD | RE },
+	{ insn_dsll, M(spec_op, 0, 0, 0, 0, dsll_op), RT | RD | RE },
+	{ insn_dsra, M(spec_op, 0, 0, 0, 0, dsra_op), RT | RD | RE },
+	{ insn_dsrl32, M(spec_op, 0, 0, 0, 0, dsrl32_op), RT | RD | RE },
+	{ insn_dsrl, M(spec_op, 0, 0, 0, 0, dsrl_op), RT | RD | RE },
+	{ insn_dsubu, M(spec_op, 0, 0, 0, 0, dsubu_op), RS | RT | RD },
+	{ insn_eret,  M(cop0_op, cop_op, 0, 0, 0, eret_op),  0 },
+	{ insn_ext, M(spec3_op, 0, 0, 0, 0, ext_op), RS | RT | RD | RE },
+	{ insn_ins, M(spec3_op, 0, 0, 0, 0, ins_op), RS | RT | RD | RE },
+	{ insn_j,  M(j_op, 0, 0, 0, 0, 0),  JIMM },
+	{ insn_jal,  M(jal_op, 0, 0, 0, 0, 0),	JIMM },
+	{ insn_j,  M(j_op, 0, 0, 0, 0, 0),  JIMM },
+	{ insn_jr,  M(spec_op, 0, 0, 0, 0, jr_op),  RS },
+	{ insn_ld,  M(ld_op, 0, 0, 0, 0, 0),  RS | RT | SIMM },
+	{ insn_ldx, M(spec3_op, 0, 0, 0, ldx_op, lx_op), RS | RT | RD },
+	{ insn_lld,  M(lld_op, 0, 0, 0, 0, 0),	RS | RT | SIMM },
+	{ insn_ll,  M(ll_op, 0, 0, 0, 0, 0),  RS | RT | SIMM },
+	{ insn_lui,  M(lui_op, 0, 0, 0, 0, 0),	RT | SIMM },
+	{ insn_lw,  M(lw_op, 0, 0, 0, 0, 0),  RS | RT | SIMM },
+	{ insn_lwx, M(spec3_op, 0, 0, 0, lwx_op, lx_op), RS | RT | RD },
+	{ insn_mfc0,  M(cop0_op, mfc_op, 0, 0, 0, 0),  RT | RD | SET},
+	{ insn_mtc0,  M(cop0_op, mtc_op, 0, 0, 0, 0),  RT | RD | SET},
+	{ insn_ori,  M(ori_op, 0, 0, 0, 0, 0),	RS | RT | UIMM },
+	{ insn_or,  M(spec_op, 0, 0, 0, 0, or_op),  RS | RT | RD },
+	{ insn_pref,  M(pref_op, 0, 0, 0, 0, 0),  RS | RT | SIMM },
+	{ insn_rfe,  M(cop0_op, cop_op, 0, 0, 0, rfe_op),  0 },
+	{ insn_rotr,  M(spec_op, 1, 0, 0, 0, srl_op),  RT | RD | RE },
+	{ insn_scd,  M(scd_op, 0, 0, 0, 0, 0),	RS | RT | SIMM },
+	{ insn_sc,  M(sc_op, 0, 0, 0, 0, 0),  RS | RT | SIMM },
+	{ insn_sd,  M(sd_op, 0, 0, 0, 0, 0),  RS | RT | SIMM },
+	{ insn_sll,  M(spec_op, 0, 0, 0, 0, sll_op),  RT | RD | RE },
+	{ insn_sra,  M(spec_op, 0, 0, 0, 0, sra_op),  RT | RD | RE },
+	{ insn_srl,  M(spec_op, 0, 0, 0, 0, srl_op),  RT | RD | RE },
+	{ insn_subu,  M(spec_op, 0, 0, 0, 0, subu_op),	RS | RT | RD },
+	{ insn_sw,  M(sw_op, 0, 0, 0, 0, 0),  RS | RT | SIMM },
+	{ insn_syscall, M(spec_op, 0, 0, 0, 0, syscall_op), SCIMM},
+	{ insn_tlbp,  M(cop0_op, cop_op, 0, 0, 0, tlbp_op),  0 },
+	{ insn_tlbr,  M(cop0_op, cop_op, 0, 0, 0, tlbr_op),  0 },
+	{ insn_tlbwi,  M(cop0_op, cop_op, 0, 0, 0, tlbwi_op),  0 },
+	{ insn_tlbwr,  M(cop0_op, cop_op, 0, 0, 0, tlbwr_op),  0 },
+	{ insn_xori,  M(xori_op, 0, 0, 0, 0, 0),  RS | RT | UIMM },
+	{ insn_xor,  M(spec_op, 0, 0, 0, 0, xor_op),  RS | RT | RD },
+	{ insn_invalid, 0, 0 }
+};
+
+#undef M
+
+static inline __uasminit u32 build_bimm(s32 arg)
+{
+	WARN(arg > 0x1ffff || arg < -0x20000,
+	     KERN_WARNING "Micro-assembler field overflow\n");
+
+	WARN(arg & 0x3, KERN_WARNING "Invalid micro-assembler branch target\n");
+
+	return ((arg < 0) ? (1 << 15) : 0) | ((arg >> 2) & 0x7fff);
+}
+
+static inline __uasminit u32 build_jimm(u32 arg)
+{
+	WARN(arg & ~(JIMM_MASK << 2),
+	     KERN_WARNING "Micro-assembler field overflow\n");
+
+	return (arg >> 2) & JIMM_MASK;
+}
+
+/*
+ * The order of opcode arguments is implicitly left to right,
+ * starting with RS and ending with FUNC or IMM.
+ */
+static void __uasminit build_insn(u32 **buf, enum opcode opc, ...)
+{
+	struct insn *ip = NULL;
+	unsigned int i;
+	va_list ap;
+	u32 op;
+
+	for (i = 0; insn_table[i].opcode != insn_invalid; i++)
+		if (insn_table[i].opcode == opc) {
+			ip = &insn_table[i];
+			break;
+		}
+
+	if (!ip || (opc == insn_daddiu && r4k_daddiu_bug()))
+		panic("Unsupported Micro-assembler instruction %d", opc);
+
+	op = ip->match;
+	va_start(ap, opc);
+	if (ip->fields & RS)
+		op |= build_rs(va_arg(ap, u32));
+	if (ip->fields & RT)
+		op |= build_rt(va_arg(ap, u32));
+	if (ip->fields & RD)
+		op |= build_rd(va_arg(ap, u32));
+	if (ip->fields & RE)
+		op |= build_re(va_arg(ap, u32));
+	if (ip->fields & SIMM)
+		op |= build_simm(va_arg(ap, s32));
+	if (ip->fields & UIMM)
+		op |= build_uimm(va_arg(ap, u32));
+	if (ip->fields & BIMM)
+		op |= build_bimm(va_arg(ap, s32));
+	if (ip->fields & JIMM)
+		op |= build_jimm(va_arg(ap, u32));
+	if (ip->fields & FUNC)
+		op |= build_func(va_arg(ap, u32));
+	if (ip->fields & SET)
+		op |= build_set(va_arg(ap, u32));
+	if (ip->fields & SCIMM)
+		op |= build_scimm(va_arg(ap, u32));
+	va_end(ap);
+
+	**buf = op;
+	(*buf)++;
+}
+
+static inline void __uasminit
+__resolve_relocs(struct uasm_reloc *rel, struct uasm_label *lab)
+{
+	long laddr = (long)lab->addr;
+	long raddr = (long)rel->addr;
+
+	switch (rel->type) {
+	case R_MIPS_PC16:
+		*rel->addr |= build_bimm(laddr - (raddr + 4));
+		break;
+
+	default:
+		panic("Unsupported Micro-assembler relocation %d",
+		      rel->type);
+	}
+}
diff --git a/arch/mips/mm/uasm.c b/arch/mips/mm/uasm.c
index 942ff6c..7eb5e43 100644
--- a/arch/mips/mm/uasm.c
+++ b/arch/mips/mm/uasm.c
@@ -10,17 +10,9 @@
  * Copyright (C) 2004, 2005, 2006, 2008	 Thiemo Seufer
  * Copyright (C) 2005, 2007  Maciej W. Rozycki
  * Copyright (C) 2006  Ralf Baechle (ralf@linux-mips.org)
+ * Copyright (C) 2012, 2013  MIPS Technologies, Inc.  All rights reserved.
  */
 
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/init.h>
-
-#include <asm/inst.h>
-#include <asm/elf.h>
-#include <asm/bugs.h>
-#include <asm/uasm.h>
-
 enum fields {
 	RS = 0x001,
 	RT = 0x002,
@@ -37,10 +29,6 @@
 
 #define OP_MASK		0x3f
 #define OP_SH		26
-#define RS_MASK		0x1f
-#define RS_SH		21
-#define RT_MASK		0x1f
-#define RT_SH		16
 #define RD_MASK		0x1f
 #define RD_SH		11
 #define RE_MASK		0x1f
@@ -53,8 +41,6 @@
 #define FUNC_SH		0
 #define SET_MASK	0x7
 #define SET_SH		0
-#define SCIMM_MASK	0xfffff
-#define SCIMM_SH	6
 
 enum opcode {
 	insn_invalid,
@@ -77,85 +63,6 @@
 	enum fields fields;
 };
 
-/* This macro sets the non-variable bits of an instruction. */
-#define M(a, b, c, d, e, f)					\
-	((a) << OP_SH						\
-	 | (b) << RS_SH						\
-	 | (c) << RT_SH						\
-	 | (d) << RD_SH						\
-	 | (e) << RE_SH						\
-	 | (f) << FUNC_SH)
-
-static struct insn insn_table[] __uasminitdata = {
-	{ insn_addiu, M(addiu_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
-	{ insn_addu, M(spec_op, 0, 0, 0, 0, addu_op), RS | RT | RD },
-	{ insn_andi, M(andi_op, 0, 0, 0, 0, 0), RS | RT | UIMM },
-	{ insn_and, M(spec_op, 0, 0, 0, 0, and_op), RS | RT | RD },
-	{ insn_bbit0, M(lwc2_op, 0, 0, 0, 0, 0), RS | RT | BIMM },
-	{ insn_bbit1, M(swc2_op, 0, 0, 0, 0, 0), RS | RT | BIMM },
-	{ insn_beql, M(beql_op, 0, 0, 0, 0, 0), RS | RT | BIMM },
-	{ insn_beq, M(beq_op, 0, 0, 0, 0, 0), RS | RT | BIMM },
-	{ insn_bgezl, M(bcond_op, 0, bgezl_op, 0, 0, 0), RS | BIMM },
-	{ insn_bgez, M(bcond_op, 0, bgez_op, 0, 0, 0), RS | BIMM },
-	{ insn_bltzl, M(bcond_op, 0, bltzl_op, 0, 0, 0), RS | BIMM },
-	{ insn_bltz, M(bcond_op, 0, bltz_op, 0, 0, 0), RS | BIMM },
-	{ insn_bne, M(bne_op, 0, 0, 0, 0, 0), RS | RT | BIMM },
-	{ insn_cache,  M(cache_op, 0, 0, 0, 0, 0),  RS | RT | SIMM },
-	{ insn_daddiu, M(daddiu_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
-	{ insn_daddu, M(spec_op, 0, 0, 0, 0, daddu_op), RS | RT | RD },
-	{ insn_dinsm, M(spec3_op, 0, 0, 0, 0, dinsm_op), RS | RT | RD | RE },
-	{ insn_dins, M(spec3_op, 0, 0, 0, 0, dins_op), RS | RT | RD | RE },
-	{ insn_dmfc0, M(cop0_op, dmfc_op, 0, 0, 0, 0), RT | RD | SET},
-	{ insn_dmtc0, M(cop0_op, dmtc_op, 0, 0, 0, 0), RT | RD | SET},
-	{ insn_drotr32, M(spec_op, 1, 0, 0, 0, dsrl32_op), RT | RD | RE },
-	{ insn_drotr, M(spec_op, 1, 0, 0, 0, dsrl_op), RT | RD | RE },
-	{ insn_dsll32, M(spec_op, 0, 0, 0, 0, dsll32_op), RT | RD | RE },
-	{ insn_dsll, M(spec_op, 0, 0, 0, 0, dsll_op), RT | RD | RE },
-	{ insn_dsra, M(spec_op, 0, 0, 0, 0, dsra_op), RT | RD | RE },
-	{ insn_dsrl32, M(spec_op, 0, 0, 0, 0, dsrl32_op), RT | RD | RE },
-	{ insn_dsrl, M(spec_op, 0, 0, 0, 0, dsrl_op), RT | RD | RE },
-	{ insn_dsubu, M(spec_op, 0, 0, 0, 0, dsubu_op), RS | RT | RD },
-	{ insn_eret,  M(cop0_op, cop_op, 0, 0, 0, eret_op),  0 },
-	{ insn_ext, M(spec3_op, 0, 0, 0, 0, ext_op), RS | RT | RD | RE },
-	{ insn_ins, M(spec3_op, 0, 0, 0, 0, ins_op), RS | RT | RD | RE },
-	{ insn_j,  M(j_op, 0, 0, 0, 0, 0),  JIMM },
-	{ insn_jal,  M(jal_op, 0, 0, 0, 0, 0),	JIMM },
-	{ insn_j,  M(j_op, 0, 0, 0, 0, 0),  JIMM },
-	{ insn_jr,  M(spec_op, 0, 0, 0, 0, jr_op),  RS },
-	{ insn_ld,  M(ld_op, 0, 0, 0, 0, 0),  RS | RT | SIMM },
-	{ insn_ldx, M(spec3_op, 0, 0, 0, ldx_op, lx_op), RS | RT | RD },
-	{ insn_lld,  M(lld_op, 0, 0, 0, 0, 0),	RS | RT | SIMM },
-	{ insn_ll,  M(ll_op, 0, 0, 0, 0, 0),  RS | RT | SIMM },
-	{ insn_lui,  M(lui_op, 0, 0, 0, 0, 0),	RT | SIMM },
-	{ insn_lw,  M(lw_op, 0, 0, 0, 0, 0),  RS | RT | SIMM },
-	{ insn_lwx, M(spec3_op, 0, 0, 0, lwx_op, lx_op), RS | RT | RD },
-	{ insn_mfc0,  M(cop0_op, mfc_op, 0, 0, 0, 0),  RT | RD | SET},
-	{ insn_mtc0,  M(cop0_op, mtc_op, 0, 0, 0, 0),  RT | RD | SET},
-	{ insn_ori,  M(ori_op, 0, 0, 0, 0, 0),	RS | RT | UIMM },
-	{ insn_or,  M(spec_op, 0, 0, 0, 0, or_op),  RS | RT | RD },
-	{ insn_pref,  M(pref_op, 0, 0, 0, 0, 0),  RS | RT | SIMM },
-	{ insn_rfe,  M(cop0_op, cop_op, 0, 0, 0, rfe_op),  0 },
-	{ insn_rotr,  M(spec_op, 1, 0, 0, 0, srl_op),  RT | RD | RE },
-	{ insn_scd,  M(scd_op, 0, 0, 0, 0, 0),	RS | RT | SIMM },
-	{ insn_sc,  M(sc_op, 0, 0, 0, 0, 0),  RS | RT | SIMM },
-	{ insn_sd,  M(sd_op, 0, 0, 0, 0, 0),  RS | RT | SIMM },
-	{ insn_sll,  M(spec_op, 0, 0, 0, 0, sll_op),  RT | RD | RE },
-	{ insn_sra,  M(spec_op, 0, 0, 0, 0, sra_op),  RT | RD | RE },
-	{ insn_srl,  M(spec_op, 0, 0, 0, 0, srl_op),  RT | RD | RE },
-	{ insn_subu,  M(spec_op, 0, 0, 0, 0, subu_op),	RS | RT | RD },
-	{ insn_sw,  M(sw_op, 0, 0, 0, 0, 0),  RS | RT | SIMM },
-	{ insn_syscall, M(spec_op, 0, 0, 0, 0, syscall_op), SCIMM},
-	{ insn_tlbp,  M(cop0_op, cop_op, 0, 0, 0, tlbp_op),  0 },
-	{ insn_tlbr,  M(cop0_op, cop_op, 0, 0, 0, tlbr_op),  0 },
-	{ insn_tlbwi,  M(cop0_op, cop_op, 0, 0, 0, tlbwi_op),  0 },
-	{ insn_tlbwr,  M(cop0_op, cop_op, 0, 0, 0, tlbwr_op),  0 },
-	{ insn_xori,  M(xori_op, 0, 0, 0, 0, 0),  RS | RT | UIMM },
-	{ insn_xor,  M(spec_op, 0, 0, 0, 0, xor_op),  RS | RT | RD },
-	{ insn_invalid, 0, 0 }
-};
-
-#undef M
-
 static inline __uasminit u32 build_rs(u32 arg)
 {
 	WARN(arg & ~RS_MASK, KERN_WARNING "Micro-assembler field overflow\n");
@@ -199,24 +106,6 @@
 	return arg & IMM_MASK;
 }
 
-static inline __uasminit u32 build_bimm(s32 arg)
-{
-	WARN(arg > 0x1ffff || arg < -0x20000,
-	     KERN_WARNING "Micro-assembler field overflow\n");
-
-	WARN(arg & 0x3, KERN_WARNING "Invalid micro-assembler branch target\n");
-
-	return ((arg < 0) ? (1 << 15) : 0) | ((arg >> 2) & 0x7fff);
-}
-
-static inline __uasminit u32 build_jimm(u32 arg)
-{
-	WARN(arg & ~(JIMM_MASK << 2),
-	     KERN_WARNING "Micro-assembler field overflow\n");
-
-	return (arg >> 2) & JIMM_MASK;
-}
-
 static inline __uasminit u32 build_scimm(u32 arg)
 {
 	WARN(arg & ~SCIMM_MASK,
@@ -239,55 +128,7 @@
 	return arg & SET_MASK;
 }
 
-/*
- * The order of opcode arguments is implicitly left to right,
- * starting with RS and ending with FUNC or IMM.
- */
-static void __uasminit build_insn(u32 **buf, enum opcode opc, ...)
-{
-	struct insn *ip = NULL;
-	unsigned int i;
-	va_list ap;
-	u32 op;
-
-	for (i = 0; insn_table[i].opcode != insn_invalid; i++)
-		if (insn_table[i].opcode == opc) {
-			ip = &insn_table[i];
-			break;
-		}
-
-	if (!ip || (opc == insn_daddiu && r4k_daddiu_bug()))
-		panic("Unsupported Micro-assembler instruction %d", opc);
-
-	op = ip->match;
-	va_start(ap, opc);
-	if (ip->fields & RS)
-		op |= build_rs(va_arg(ap, u32));
-	if (ip->fields & RT)
-		op |= build_rt(va_arg(ap, u32));
-	if (ip->fields & RD)
-		op |= build_rd(va_arg(ap, u32));
-	if (ip->fields & RE)
-		op |= build_re(va_arg(ap, u32));
-	if (ip->fields & SIMM)
-		op |= build_simm(va_arg(ap, s32));
-	if (ip->fields & UIMM)
-		op |= build_uimm(va_arg(ap, u32));
-	if (ip->fields & BIMM)
-		op |= build_bimm(va_arg(ap, s32));
-	if (ip->fields & JIMM)
-		op |= build_jimm(va_arg(ap, u32));
-	if (ip->fields & FUNC)
-		op |= build_func(va_arg(ap, u32));
-	if (ip->fields & SET)
-		op |= build_set(va_arg(ap, u32));
-	if (ip->fields & SCIMM)
-		op |= build_scimm(va_arg(ap, u32));
-	va_end(ap);
-
-	**buf = op;
-	(*buf)++;
-}
+static void __uasminit build_insn(u32 **buf, enum opcode opc, ...);
 
 #define I_u1u2u3(op)					\
 Ip_u1u2u3(op)						\
@@ -445,7 +286,7 @@
 
 #ifdef CONFIG_CPU_CAVIUM_OCTEON
 #include <asm/octeon/octeon.h>
-void __uasminit uasm_i_pref(u32 **buf, unsigned int a, signed int b,
+void __uasminit ISAFUNC(uasm_i_pref)(u32 **buf, unsigned int a, signed int b,
 			    unsigned int c)
 {
 	if (OCTEON_IS_MODEL(OCTEON_CN63XX_PASS1_X) && a <= 24 && a != 5)
@@ -457,21 +298,21 @@
 	else
 		build_insn(buf, insn_pref, c, a, b);
 }
-UASM_EXPORT_SYMBOL(uasm_i_pref);
+UASM_EXPORT_SYMBOL(ISAFUNC(uasm_i_pref));
 #else
 I_u2s3u1(_pref)
 #endif
 
 /* Handle labels. */
-void __uasminit uasm_build_label(struct uasm_label **lab, u32 *addr, int lid)
+void __uasminit ISAFUNC(uasm_build_label)(struct uasm_label **lab, u32 *addr, int lid)
 {
 	(*lab)->addr = addr;
 	(*lab)->lab = lid;
 	(*lab)++;
 }
-UASM_EXPORT_SYMBOL(uasm_build_label);
+UASM_EXPORT_SYMBOL(ISAFUNC(uasm_build_label));
 
-int __uasminit uasm_in_compat_space_p(long addr)
+int __uasminit ISAFUNC(uasm_in_compat_space_p)(long addr)
 {
 	/* Is this address in 32bit compat space? */
 #ifdef CONFIG_64BIT
@@ -480,7 +321,7 @@
 	return 1;
 #endif
 }
-UASM_EXPORT_SYMBOL(uasm_in_compat_space_p);
+UASM_EXPORT_SYMBOL(ISAFUNC(uasm_in_compat_space_p));
 
 static int __uasminit uasm_rel_highest(long val)
 {
@@ -500,77 +341,66 @@
 #endif
 }
 
-int __uasminit uasm_rel_hi(long val)
+int __uasminit ISAFUNC(uasm_rel_hi)(long val)
 {
 	return ((((val + 0x8000L) >> 16) & 0xffff) ^ 0x8000) - 0x8000;
 }
-UASM_EXPORT_SYMBOL(uasm_rel_hi);
+UASM_EXPORT_SYMBOL(ISAFUNC(uasm_rel_hi));
 
-int __uasminit uasm_rel_lo(long val)
+int __uasminit ISAFUNC(uasm_rel_lo)(long val)
 {
 	return ((val & 0xffff) ^ 0x8000) - 0x8000;
 }
-UASM_EXPORT_SYMBOL(uasm_rel_lo);
+UASM_EXPORT_SYMBOL(ISAFUNC(uasm_rel_lo));
 
-void __uasminit UASM_i_LA_mostly(u32 **buf, unsigned int rs, long addr)
+void __uasminit ISAFUNC(UASM_i_LA_mostly)(u32 **buf, unsigned int rs, long addr)
 {
-	if (!uasm_in_compat_space_p(addr)) {
-		uasm_i_lui(buf, rs, uasm_rel_highest(addr));
+	if (!ISAFUNC(uasm_in_compat_space_p)(addr)) {
+		ISAFUNC(uasm_i_lui)(buf, rs, uasm_rel_highest(addr));
 		if (uasm_rel_higher(addr))
-			uasm_i_daddiu(buf, rs, rs, uasm_rel_higher(addr));
-		if (uasm_rel_hi(addr)) {
-			uasm_i_dsll(buf, rs, rs, 16);
-			uasm_i_daddiu(buf, rs, rs, uasm_rel_hi(addr));
-			uasm_i_dsll(buf, rs, rs, 16);
+			ISAFUNC(uasm_i_daddiu)(buf, rs, rs, uasm_rel_higher(addr));
+		if (ISAFUNC(uasm_rel_hi(addr))) {
+			ISAFUNC(uasm_i_dsll)(buf, rs, rs, 16);
+			ISAFUNC(uasm_i_daddiu)(buf, rs, rs,
+					ISAFUNC(uasm_rel_hi)(addr));
+			ISAFUNC(uasm_i_dsll)(buf, rs, rs, 16);
 		} else
-			uasm_i_dsll32(buf, rs, rs, 0);
+			ISAFUNC(uasm_i_dsll32)(buf, rs, rs, 0);
 	} else
-		uasm_i_lui(buf, rs, uasm_rel_hi(addr));
+		ISAFUNC(uasm_i_lui)(buf, rs, ISAFUNC(uasm_rel_hi(addr)));
 }
-UASM_EXPORT_SYMBOL(UASM_i_LA_mostly);
+UASM_EXPORT_SYMBOL(ISAFUNC(UASM_i_LA_mostly));
 
-void __uasminit UASM_i_LA(u32 **buf, unsigned int rs, long addr)
+void __uasminit ISAFUNC(UASM_i_LA)(u32 **buf, unsigned int rs, long addr)
 {
-	UASM_i_LA_mostly(buf, rs, addr);
-	if (uasm_rel_lo(addr)) {
-		if (!uasm_in_compat_space_p(addr))
-			uasm_i_daddiu(buf, rs, rs, uasm_rel_lo(addr));
+	ISAFUNC(UASM_i_LA_mostly)(buf, rs, addr);
+	if (ISAFUNC(uasm_rel_lo(addr))) {
+		if (!ISAFUNC(uasm_in_compat_space_p)(addr))
+			ISAFUNC(uasm_i_daddiu)(buf, rs, rs,
+					ISAFUNC(uasm_rel_lo(addr)));
 		else
-			uasm_i_addiu(buf, rs, rs, uasm_rel_lo(addr));
+			ISAFUNC(uasm_i_addiu)(buf, rs, rs,
+					ISAFUNC(uasm_rel_lo(addr)));
 	}
 }
-UASM_EXPORT_SYMBOL(UASM_i_LA);
+UASM_EXPORT_SYMBOL(ISAFUNC(UASM_i_LA));
 
 /* Handle relocations. */
 void __uasminit
-uasm_r_mips_pc16(struct uasm_reloc **rel, u32 *addr, int lid)
+ISAFUNC(uasm_r_mips_pc16)(struct uasm_reloc **rel, u32 *addr, int lid)
 {
 	(*rel)->addr = addr;
 	(*rel)->type = R_MIPS_PC16;
 	(*rel)->lab = lid;
 	(*rel)++;
 }
-UASM_EXPORT_SYMBOL(uasm_r_mips_pc16);
+UASM_EXPORT_SYMBOL(ISAFUNC(uasm_r_mips_pc16));
 
 static inline void __uasminit
-__resolve_relocs(struct uasm_reloc *rel, struct uasm_label *lab)
-{
-	long laddr = (long)lab->addr;
-	long raddr = (long)rel->addr;
-
-	switch (rel->type) {
-	case R_MIPS_PC16:
-		*rel->addr |= build_bimm(laddr - (raddr + 4));
-		break;
-
-	default:
-		panic("Unsupported Micro-assembler relocation %d",
-		      rel->type);
-	}
-}
+__resolve_relocs(struct uasm_reloc *rel, struct uasm_label *lab);
 
 void __uasminit
-uasm_resolve_relocs(struct uasm_reloc *rel, struct uasm_label *lab)
+ISAFUNC(uasm_resolve_relocs)(struct uasm_reloc *rel, struct uasm_label *lab)
 {
 	struct uasm_label *l;
 
@@ -579,40 +409,40 @@
 			if (rel->lab == l->lab)
 				__resolve_relocs(rel, l);
 }
-UASM_EXPORT_SYMBOL(uasm_resolve_relocs);
+UASM_EXPORT_SYMBOL(ISAFUNC(uasm_resolve_relocs));
 
 void __uasminit
-uasm_move_relocs(struct uasm_reloc *rel, u32 *first, u32 *end, long off)
+ISAFUNC(uasm_move_relocs)(struct uasm_reloc *rel, u32 *first, u32 *end, long off)
 {
 	for (; rel->lab != UASM_LABEL_INVALID; rel++)
 		if (rel->addr >= first && rel->addr < end)
 			rel->addr += off;
 }
-UASM_EXPORT_SYMBOL(uasm_move_relocs);
+UASM_EXPORT_SYMBOL(ISAFUNC(uasm_move_relocs));
 
 void __uasminit
-uasm_move_labels(struct uasm_label *lab, u32 *first, u32 *end, long off)
+ISAFUNC(uasm_move_labels)(struct uasm_label *lab, u32 *first, u32 *end, long off)
 {
 	for (; lab->lab != UASM_LABEL_INVALID; lab++)
 		if (lab->addr >= first && lab->addr < end)
 			lab->addr += off;
 }
-UASM_EXPORT_SYMBOL(uasm_move_labels);
+UASM_EXPORT_SYMBOL(ISAFUNC(uasm_move_labels));
 
 void __uasminit
-uasm_copy_handler(struct uasm_reloc *rel, struct uasm_label *lab, u32 *first,
+ISAFUNC(uasm_copy_handler)(struct uasm_reloc *rel, struct uasm_label *lab, u32 *first,
 		  u32 *end, u32 *target)
 {
 	long off = (long)(target - first);
 
 	memcpy(target, first, (end - first) * sizeof(u32));
 
-	uasm_move_relocs(rel, first, end, off);
-	uasm_move_labels(lab, first, end, off);
+	ISAFUNC(uasm_move_relocs(rel, first, end, off));
+	ISAFUNC(uasm_move_labels(lab, first, end, off));
 }
-UASM_EXPORT_SYMBOL(uasm_copy_handler);
+UASM_EXPORT_SYMBOL(ISAFUNC(uasm_copy_handler));
 
-int __uasminit uasm_insn_has_bdelay(struct uasm_reloc *rel, u32 *addr)
+int __uasminit ISAFUNC(uasm_insn_has_bdelay)(struct uasm_reloc *rel, u32 *addr)
 {
 	for (; rel->lab != UASM_LABEL_INVALID; rel++) {
 		if (rel->addr == addr
@@ -623,88 +453,88 @@
 
 	return 0;
 }
-UASM_EXPORT_SYMBOL(uasm_insn_has_bdelay);
+UASM_EXPORT_SYMBOL(ISAFUNC(uasm_insn_has_bdelay));
 
 /* Convenience functions for labeled branches. */
 void __uasminit
-uasm_il_bltz(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid)
+ISAFUNC(uasm_il_bltz)(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid)
 {
 	uasm_r_mips_pc16(r, *p, lid);
-	uasm_i_bltz(p, reg, 0);
+	ISAFUNC(uasm_i_bltz)(p, reg, 0);
 }
-UASM_EXPORT_SYMBOL(uasm_il_bltz);
+UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_bltz));
 
 void __uasminit
-uasm_il_b(u32 **p, struct uasm_reloc **r, int lid)
+ISAFUNC(uasm_il_b)(u32 **p, struct uasm_reloc **r, int lid)
 {
 	uasm_r_mips_pc16(r, *p, lid);
-	uasm_i_b(p, 0);
+	ISAFUNC(uasm_i_b)(p, 0);
 }
-UASM_EXPORT_SYMBOL(uasm_il_b);
+UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_b));
 
 void __uasminit
-uasm_il_beqz(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid)
+ISAFUNC(uasm_il_beqz)(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid)
 {
 	uasm_r_mips_pc16(r, *p, lid);
-	uasm_i_beqz(p, reg, 0);
+	ISAFUNC(uasm_i_beqz)(p, reg, 0);
 }
-UASM_EXPORT_SYMBOL(uasm_il_beqz);
+UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_beqz));
 
 void __uasminit
-uasm_il_beqzl(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid)
+ISAFUNC(uasm_il_beqzl)(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid)
 {
 	uasm_r_mips_pc16(r, *p, lid);
-	uasm_i_beqzl(p, reg, 0);
+	ISAFUNC(uasm_i_beqzl)(p, reg, 0);
 }
-UASM_EXPORT_SYMBOL(uasm_il_beqzl);
+UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_beqzl));
 
 void __uasminit
-uasm_il_bne(u32 **p, struct uasm_reloc **r, unsigned int reg1,
+ISAFUNC(uasm_il_bne)(u32 **p, struct uasm_reloc **r, unsigned int reg1,
 	unsigned int reg2, int lid)
 {
 	uasm_r_mips_pc16(r, *p, lid);
-	uasm_i_bne(p, reg1, reg2, 0);
+	ISAFUNC(uasm_i_bne)(p, reg1, reg2, 0);
 }
-UASM_EXPORT_SYMBOL(uasm_il_bne);
+UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_bne));
 
 void __uasminit
-uasm_il_bnez(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid)
+ISAFUNC(uasm_il_bnez)(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid)
 {
 	uasm_r_mips_pc16(r, *p, lid);
-	uasm_i_bnez(p, reg, 0);
+	ISAFUNC(uasm_i_bnez)(p, reg, 0);
 }
-UASM_EXPORT_SYMBOL(uasm_il_bnez);
+UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_bnez));
 
 void __uasminit
-uasm_il_bgezl(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid)
+ISAFUNC(uasm_il_bgezl)(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid)
 {
 	uasm_r_mips_pc16(r, *p, lid);
-	uasm_i_bgezl(p, reg, 0);
+	ISAFUNC(uasm_i_bgezl)(p, reg, 0);
 }
-UASM_EXPORT_SYMBOL(uasm_il_bgezl);
+UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_bgezl));
 
 void __uasminit
-uasm_il_bgez(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid)
+ISAFUNC(uasm_il_bgez)(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid)
 {
 	uasm_r_mips_pc16(r, *p, lid);
-	uasm_i_bgez(p, reg, 0);
+	ISAFUNC(uasm_i_bgez)(p, reg, 0);
 }
-UASM_EXPORT_SYMBOL(uasm_il_bgez);
+UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_bgez));
 
 void __uasminit
-uasm_il_bbit0(u32 **p, struct uasm_reloc **r, unsigned int reg,
+ISAFUNC(uasm_il_bbit0)(u32 **p, struct uasm_reloc **r, unsigned int reg,
 	      unsigned int bit, int lid)
 {
 	uasm_r_mips_pc16(r, *p, lid);
-	uasm_i_bbit0(p, reg, bit, 0);
+	ISAFUNC(uasm_i_bbit0)(p, reg, bit, 0);
 }
-UASM_EXPORT_SYMBOL(uasm_il_bbit0);
+UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_bbit0));
 
 void __uasminit
-uasm_il_bbit1(u32 **p, struct uasm_reloc **r, unsigned int reg,
+ISAFUNC(uasm_il_bbit1)(u32 **p, struct uasm_reloc **r, unsigned int reg,
 	      unsigned int bit, int lid)
 {
 	uasm_r_mips_pc16(r, *p, lid);
-	uasm_i_bbit1(p, reg, bit, 0);
+	ISAFUNC(uasm_i_bbit1)(p, reg, bit, 0);
 }
-UASM_EXPORT_SYMBOL(uasm_il_bbit1);
+UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_bbit1));
diff --git a/arch/mips/mti-malta/Makefile b/arch/mips/mti-malta/Makefile
index 6079ef3..0388fc8 100644
--- a/arch/mips/mti-malta/Makefile
+++ b/arch/mips/mti-malta/Makefile
@@ -5,9 +5,8 @@
 # Copyright (C) 2008 Wind River Systems, Inc.
 #   written by Ralf Baechle <ralf@linux-mips.org>
 #
-obj-y				:= malta-amon.o malta-cmdline.o \
-				   malta-display.o malta-init.o malta-int.o \
-				   malta-memory.o malta-platform.o \
+obj-y				:= malta-amon.o malta-display.o malta-init.o \
+				   malta-int.o malta-memory.o malta-platform.o \
 				   malta-reset.o malta-setup.o malta-time.o
 
 obj-$(CONFIG_EARLY_PRINTK)	+= malta-console.o
diff --git a/arch/mips/mti-malta/Platform b/arch/mips/mti-malta/Platform
index 5b548b5..2cc72c9 100644
--- a/arch/mips/mti-malta/Platform
+++ b/arch/mips/mti-malta/Platform
@@ -3,5 +3,9 @@
 #
 platform-$(CONFIG_MIPS_MALTA)	+= mti-malta/
 cflags-$(CONFIG_MIPS_MALTA)	+= -I$(srctree)/arch/mips/include/asm/mach-malta
-load-$(CONFIG_MIPS_MALTA)	+= 0xffffffff80100000
+ifdef CONFIG_KVM_GUEST
+    load-$(CONFIG_MIPS_MALTA)	+= 0x0000000040100000
+else
+    load-$(CONFIG_MIPS_MALTA)	+= 0xffffffff80100000
+endif
 all-$(CONFIG_MIPS_MALTA)	:= $(COMPRESSION_FNAME).bin
diff --git a/arch/mips/mti-malta/malta-cmdline.c b/arch/mips/mti-malta/malta-cmdline.c
deleted file mode 100644
index 5576a30..0000000
--- a/arch/mips/mti-malta/malta-cmdline.c
+++ /dev/null
@@ -1,59 +0,0 @@
-/*
- * Carsten Langgaard, carstenl@mips.com
- * Copyright (C) 1999,2000 MIPS Technologies, Inc.  All rights reserved.
- *
- * This program is free software; you can distribute it and/or modify it
- * under the terms of the GNU General Public License (Version 2) as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
- *
- * Kernel command line creation using the prom monitor (YAMON) argc/argv.
- */
-#include <linux/init.h>
-#include <linux/string.h>
-
-#include <asm/bootinfo.h>
-
-extern int prom_argc;
-extern int *_prom_argv;
-
-/*
- * YAMON (32-bit PROM) pass arguments and environment as 32-bit pointer.
- * This macro take care of sign extension.
- */
-#define prom_argv(index) ((char *)(long)_prom_argv[(index)])
-
-char * __init prom_getcmdline(void)
-{
-	return &(arcs_cmdline[0]);
-}
-
-
-void  __init prom_init_cmdline(void)
-{
-	char *cp;
-	int actr;
-
-	actr = 1; /* Always ignore argv[0] */
-
-	cp = &(arcs_cmdline[0]);
-	while(actr < prom_argc) {
-		strcpy(cp, prom_argv(actr));
-		cp += strlen(prom_argv(actr));
-		*cp++ = ' ';
-		actr++;
-	}
-	if (cp != &(arcs_cmdline[0])) {
-		/* get rid of trailing space */
-		--cp;
-		*cp = '\0';
-	}
-}
diff --git a/arch/mips/mti-malta/malta-display.c b/arch/mips/mti-malta/malta-display.c
index 9bc58a2..d4f8071 100644
--- a/arch/mips/mti-malta/malta-display.c
+++ b/arch/mips/mti-malta/malta-display.c
@@ -1,28 +1,20 @@
 /*
- * Carsten Langgaard, carstenl@mips.com
- * Copyright (C) 1999,2000 MIPS Technologies, Inc.  All rights reserved.
- *
- *  This program is free software; you can distribute it and/or modify it
- *  under the terms of the GNU General Public License (Version 2) as
- *  published by the Free Software Foundation.
- *
- *  This program is distributed in the hope it will be useful, but WITHOUT
- *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- *  for more details.
- *
- *  You should have received a copy of the GNU General Public License along
- *  with this program; if not, write to the Free Software Foundation, Inc.,
- *  59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
  *
  * Display routines for display messages in MIPS boards ascii display.
+ *
+ * Copyright (C) 1999,2000,2012  MIPS Technologies, Inc.
+ * All rights reserved.
+ * Authors: Carsten Langgaard <carstenl@mips.com>
+ *          Steven J. Hill <sjhill@mips.com>
  */
-
 #include <linux/compiler.h>
 #include <linux/timer.h>
-#include <asm/io.h>
+#include <linux/io.h>
+
 #include <asm/mips-boards/generic.h>
-#include <asm/mips-boards/prom.h>
 
 extern const char display_string[];
 static unsigned int display_count;
@@ -36,11 +28,11 @@
 	if (unlikely(display == NULL))
 		display = ioremap(ASCII_DISPLAY_POS_BASE, 16*sizeof(int));
 
-	for (i = 0; i <= 14; i=i+2) {
-		 if (*str)
-			 __raw_writel(*str++, display + i);
-		 else
-			 __raw_writel(' ', display + i);
+	for (i = 0; i <= 14; i += 2) {
+		if (*str)
+			__raw_writel(*str++, display + i);
+		else
+			__raw_writel(' ', display + i);
 	}
 }
 
diff --git a/arch/mips/mti-malta/malta-init.c b/arch/mips/mti-malta/malta-init.c
index c2cbce9..ff8caff 100644
--- a/arch/mips/mti-malta/malta-init.c
+++ b/arch/mips/mti-malta/malta-init.c
@@ -1,54 +1,28 @@
 /*
- * Copyright (C) 1999, 2000, 2004, 2005	 MIPS Technologies, Inc.
- *	All rights reserved.
- *	Authors: Carsten Langgaard <carstenl@mips.com>
- *		 Maciej W. Rozycki <macro@mips.com>
- *
- *  This program is free software; you can distribute it and/or modify it
- *  under the terms of the GNU General Public License (Version 2) as
- *  published by the Free Software Foundation.
- *
- *  This program is distributed in the hope it will be useful, but WITHOUT
- *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- *  for more details.
- *
- *  You should have received a copy of the GNU General Public License along
- *  with this program; if not, write to the Free Software Foundation, Inc.,
- *  59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
  *
  * PROM library initialisation code.
+ *
+ * Copyright (C) 1999,2000,2004,2005,2012  MIPS Technologies, Inc.
+ * All rights reserved.
+ * Authors: Carsten Langgaard <carstenl@mips.com>
+ *         Maciej W. Rozycki <macro@mips.com>
+ *          Steven J. Hill <sjhill@mips.com>
  */
 #include <linux/init.h>
 #include <linux/string.h>
 #include <linux/kernel.h>
 
-#include <asm/bootinfo.h>
-#include <asm/gt64120.h>
-#include <asm/io.h>
 #include <asm/cacheflush.h>
 #include <asm/smp-ops.h>
 #include <asm/traps.h>
-
+#include <asm/fw/fw.h>
 #include <asm/gcmpregs.h>
-#include <asm/mips-boards/prom.h>
 #include <asm/mips-boards/generic.h>
-#include <asm/mips-boards/bonito64.h>
-#include <asm/mips-boards/msc01_pci.h>
-
 #include <asm/mips-boards/malta.h>
 
-int prom_argc;
-int *_prom_argv, *_prom_envp;
-
-/*
- * YAMON (32-bit PROM) pass arguments and environment as 32-bit pointer.
- * This macro take care of sign extension, if running in 64-bit mode.
- */
-#define prom_envp(index) ((char *)(long)_prom_envp[(index)])
-
-int init_debug;
-
 static int mips_revision_corid;
 int mips_revision_sconid;
 
@@ -62,74 +36,6 @@
 /* MIPS System controller register base */
 unsigned long _pcictrl_msc;
 
-char *prom_getenv(char *envname)
-{
-	/*
-	 * Return a pointer to the given environment variable.
-	 * In 64-bit mode: we're using 64-bit pointers, but all pointers
-	 * in the PROM structures are only 32-bit, so we need some
-	 * workarounds, if we are running in 64-bit mode.
-	 */
-	int i, index=0;
-
-	i = strlen(envname);
-
-	while (prom_envp(index)) {
-		if(strncmp(envname, prom_envp(index), i) == 0) {
-			return(prom_envp(index+1));
-		}
-		index += 2;
-	}
-
-	return NULL;
-}
-
-static inline unsigned char str2hexnum(unsigned char c)
-{
-	if (c >= '0' && c <= '9')
-		return c - '0';
-	if (c >= 'a' && c <= 'f')
-		return c - 'a' + 10;
-	return 0; /* foo */
-}
-
-static inline void str2eaddr(unsigned char *ea, unsigned char *str)
-{
-	int i;
-
-	for (i = 0; i < 6; i++) {
-		unsigned char num;
-
-		if((*str == '.') || (*str == ':'))
-			str++;
-		num = str2hexnum(*str++) << 4;
-		num |= (str2hexnum(*str++));
-		ea[i] = num;
-	}
-}
-
-int get_ethernet_addr(char *ethernet_addr)
-{
-	char *ethaddr_str;
-
-	ethaddr_str = prom_getenv("ethaddr");
-	if (!ethaddr_str) {
-		printk("ethaddr not set in boot prom\n");
-		return -1;
-	}
-	str2eaddr(ethernet_addr, ethaddr_str);
-
-	if (init_debug > 1) {
-		int i;
-		printk("get_ethernet_addr: ");
-		for (i=0; i<5; i++)
-			printk("%02x:", (unsigned char)*(ethernet_addr+i));
-		printk("%02x\n", *(ethernet_addr+i));
-	}
-
-	return 0;
-}
-
 #ifdef CONFIG_SERIAL_8250_CONSOLE
 static void __init console_config(void)
 {
@@ -138,17 +44,23 @@
 	char parity = '\0', bits = '\0', flow = '\0';
 	char *s;
 
-	if ((strstr(prom_getcmdline(), "console=")) == NULL) {
-		s = prom_getenv("modetty0");
+	if ((strstr(fw_getcmdline(), "console=")) == NULL) {
+		s = fw_getenv("modetty0");
 		if (s) {
 			while (*s >= '0' && *s <= '9')
 				baud = baud*10 + *s++ - '0';
-			if (*s == ',') s++;
-			if (*s) parity = *s++;
-			if (*s == ',') s++;
-			if (*s) bits = *s++;
-			if (*s == ',') s++;
-			if (*s == 'h') flow = 'r';
+			if (*s == ',')
+				s++;
+			if (*s)
+				parity = *s++;
+			if (*s == ',')
+				s++;
+			if (*s)
+				bits = *s++;
+			if (*s == ',')
+				s++;
+			if (*s == 'h')
+				flow = 'r';
 		}
 		if (baud == 0)
 			baud = 38400;
@@ -158,8 +70,9 @@
 			bits = '8';
 		if (flow == '\0')
 			flow = 'r';
-		sprintf(console_string, " console=ttyS0,%d%c%c%c", baud, parity, bits, flow);
-		strcat(prom_getcmdline(), console_string);
+		sprintf(console_string, " console=ttyS0,%d%c%c%c", baud,
+			parity, bits, flow);
+		strcat(fw_getcmdline(), console_string);
 		pr_info("Config serial console:%s\n", console_string);
 	}
 }
@@ -193,10 +106,6 @@
 
 void __init prom_init(void)
 {
-	prom_argc = fw_arg0;
-	_prom_argv = (int *) fw_arg1;
-	_prom_envp = (int *) fw_arg2;
-
 	mips_display_message("LINUX");
 
 	/*
@@ -306,7 +215,7 @@
 	case MIPS_REVISION_SCON_SOCIT:
 	case MIPS_REVISION_SCON_ROCIT:
 		_pcictrl_msc = (unsigned long)ioremap(MIPS_MSC01_PCI_REG_BASE, 0x2000);
-	mips_pci_controller:
+mips_pci_controller:
 		mb();
 		MSC_READ(MSC01_PCI_CFG, data);
 		MSC_WRITE(MSC01_PCI_CFG, data & ~MSC01_PCI_CFG_EN_BIT);
@@ -348,13 +257,13 @@
 	default:
 		/* Unknown system controller */
 		mips_display_message("SC Error");
-		while (1);   /* We die here... */
+		while (1);	/* We die here... */
 	}
 	board_nmi_handler_setup = mips_nmi_setup;
 	board_ejtag_handler_setup = mips_ejtag_setup;
 
-	prom_init_cmdline();
-	prom_meminit();
+	fw_init_cmdline();
+	fw_meminit();
 #ifdef CONFIG_SERIAL_8250_CONSOLE
 	console_config();
 #endif
diff --git a/arch/mips/mti-malta/malta-int.c b/arch/mips/mti-malta/malta-int.c
index e364af7..0a1339a 100644
--- a/arch/mips/mti-malta/malta-int.c
+++ b/arch/mips/mti-malta/malta-int.c
@@ -47,7 +47,6 @@
 #include <asm/setup.h>
 
 int gcmp_present = -1;
-int gic_present;
 static unsigned long _msc01_biu_base;
 static unsigned long _gcmp_base;
 static unsigned int ipi_map[NR_CPUS];
@@ -134,6 +133,9 @@
 {
 	int irq;
 
+	if (gic_compare_int())
+		do_IRQ(MIPS_GIC_IRQ_BASE);
+
 	irq = gic_get_int();
 	if (irq < 0)
 		return;	 /* interrupt has already been cleared */
diff --git a/arch/mips/mti-malta/malta-memory.c b/arch/mips/mti-malta/malta-memory.c
index f3d43aa..1f73d63 100644
--- a/arch/mips/mti-malta/malta-memory.c
+++ b/arch/mips/mti-malta/malta-memory.c
@@ -1,73 +1,45 @@
 /*
- * Carsten Langgaard, carstenl@mips.com
- * Copyright (C) 1999,2000 MIPS Technologies, Inc.  All rights reserved.
- *
- *  This program is free software; you can distribute it and/or modify it
- *  under the terms of the GNU General Public License (Version 2) as
- *  published by the Free Software Foundation.
- *
- *  This program is distributed in the hope it will be useful, but WITHOUT
- *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- *  for more details.
- *
- *  You should have received a copy of the GNU General Public License along
- *  with this program; if not, write to the Free Software Foundation, Inc.,
- *  59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
  *
  * PROM library functions for acquiring/using memory descriptors given to
  * us from the YAMON.
+ *
+ * Copyright (C) 1999,2000,2012  MIPS Technologies, Inc.
+ * All rights reserved.
+ * Authors: Carsten Langgaard <carstenl@mips.com>
+ *          Steven J. Hill <sjhill@mips.com>
  */
 #include <linux/init.h>
-#include <linux/mm.h>
 #include <linux/bootmem.h>
-#include <linux/pfn.h>
 #include <linux/string.h>
 
 #include <asm/bootinfo.h>
-#include <asm/page.h>
 #include <asm/sections.h>
+#include <asm/fw/fw.h>
 
-#include <asm/mips-boards/prom.h>
-
-/*#define DEBUG*/
-
-enum yamon_memtypes {
-	yamon_dontuse,
-	yamon_prom,
-	yamon_free,
-};
-static struct prom_pmemblock mdesc[PROM_MAX_PMEMBLOCKS];
-
-#ifdef DEBUG
-static char *mtypes[3] = {
-	"Dont use memory",
-	"YAMON PROM memory",
-	"Free memory",
-};
-#endif
+static fw_memblock_t mdesc[FW_MAX_MEMBLOCKS];
 
 /* determined physical memory size, not overridden by command line args	 */
 unsigned long physical_memsize = 0L;
 
-static struct prom_pmemblock * __init prom_getmdesc(void)
+fw_memblock_t * __init fw_getmdesc(void)
 {
-	char *memsize_str;
+	char *memsize_str, *ptr;
 	unsigned int memsize;
-	char *ptr;
 	static char cmdline[COMMAND_LINE_SIZE] __initdata;
+	long val;
+	int tmp;
 
 	/* otherwise look in the environment */
-	memsize_str = prom_getenv("memsize");
+	memsize_str = fw_getenv("memsize");
 	if (!memsize_str) {
-		printk(KERN_WARNING
-		       "memsize not set in boot prom, set to default (32Mb)\n");
+		pr_warn("memsize not set in YAMON, set to default (32Mb)\n");
 		physical_memsize = 0x02000000;
 	} else {
-#ifdef DEBUG
-		pr_debug("prom_memsize = %s\n", memsize_str);
-#endif
-		physical_memsize = simple_strtol(memsize_str, NULL, 0);
+		tmp = kstrtol(memsize_str, 0, &val);
+		physical_memsize = (unsigned long)val;
 	}
 
 #ifdef CONFIG_CPU_BIG_ENDIAN
@@ -90,11 +62,11 @@
 
 	memset(mdesc, 0, sizeof(mdesc));
 
-	mdesc[0].type = yamon_dontuse;
+	mdesc[0].type = fw_dontuse;
 	mdesc[0].base = 0x00000000;
 	mdesc[0].size = 0x00001000;
 
-	mdesc[1].type = yamon_prom;
+	mdesc[1].type = fw_code;
 	mdesc[1].base = 0x00001000;
 	mdesc[1].size = 0x000ef000;
 
@@ -105,55 +77,45 @@
 	 * This mean that this area can't be used as DMA memory for PCI
 	 * devices.
 	 */
-	mdesc[2].type = yamon_dontuse;
+	mdesc[2].type = fw_dontuse;
 	mdesc[2].base = 0x000f0000;
 	mdesc[2].size = 0x00010000;
 
-	mdesc[3].type = yamon_dontuse;
+	mdesc[3].type = fw_dontuse;
 	mdesc[3].base = 0x00100000;
-	mdesc[3].size = CPHYSADDR(PFN_ALIGN((unsigned long)&_end)) - mdesc[3].base;
+	mdesc[3].size = CPHYSADDR(PFN_ALIGN((unsigned long)&_end)) -
+		mdesc[3].base;
 
-	mdesc[4].type = yamon_free;
+	mdesc[4].type = fw_free;
 	mdesc[4].base = CPHYSADDR(PFN_ALIGN(&_end));
 	mdesc[4].size = memsize - mdesc[4].base;
 
 	return &mdesc[0];
 }
 
-static int __init prom_memtype_classify(unsigned int type)
+static int __init fw_memtype_classify(unsigned int type)
 {
 	switch (type) {
-	case yamon_free:
+	case fw_free:
 		return BOOT_MEM_RAM;
-	case yamon_prom:
+	case fw_code:
 		return BOOT_MEM_ROM_DATA;
 	default:
 		return BOOT_MEM_RESERVED;
 	}
 }
 
-void __init prom_meminit(void)
+void __init fw_meminit(void)
 {
-	struct prom_pmemblock *p;
+	fw_memblock_t *p;
 
-#ifdef DEBUG
-	pr_debug("YAMON MEMORY DESCRIPTOR dump:\n");
-	p = prom_getmdesc();
-	while (p->size) {
-		int i = 0;
-		pr_debug("[%d,%p]: base<%08lx> size<%08lx> type<%s>\n",
-			 i, p, p->base, p->size, mtypes[p->type]);
-		p++;
-		i++;
-	}
-#endif
-	p = prom_getmdesc();
+	p = fw_getmdesc();
 
 	while (p->size) {
 		long type;
 		unsigned long base, size;
 
-		type = prom_memtype_classify(p->type);
+		type = fw_memtype_classify(p->type);
 		base = p->base;
 		size = p->size;
 
@@ -172,7 +134,7 @@
 			continue;
 
 		addr = boot_mem_map.map[i].addr;
-		free_init_pages("prom memory",
+		free_init_pages("YAMON memory",
 				addr, addr + boot_mem_map.map[i].size);
 	}
 }
diff --git a/arch/mips/mti-malta/malta-setup.c b/arch/mips/mti-malta/malta-setup.c
index 200f64d..c72a069 100644
--- a/arch/mips/mti-malta/malta-setup.c
+++ b/arch/mips/mti-malta/malta-setup.c
@@ -25,13 +25,13 @@
 #include <linux/screen_info.h>
 #include <linux/time.h>
 
-#include <asm/bootinfo.h>
+#include <asm/fw/fw.h>
 #include <asm/mips-boards/generic.h>
-#include <asm/mips-boards/prom.h>
 #include <asm/mips-boards/malta.h>
 #include <asm/mips-boards/maltaint.h>
 #include <asm/dma.h>
 #include <asm/traps.h>
+#include <asm/gcmpregs.h>
 #ifdef CONFIG_VT
 #include <linux/console.h>
 #endif
@@ -105,6 +105,66 @@
 }
 #endif
 
+static int __init plat_enable_iocoherency(void)
+{
+	int supported = 0;
+	if (mips_revision_sconid == MIPS_REVISION_SCON_BONITO) {
+		if (BONITO_PCICACHECTRL & BONITO_PCICACHECTRL_CPUCOH_PRES) {
+			BONITO_PCICACHECTRL |= BONITO_PCICACHECTRL_CPUCOH_EN;
+			pr_info("Enabled Bonito CPU coherency\n");
+			supported = 1;
+		}
+		if (strstr(fw_getcmdline(), "iobcuncached")) {
+			BONITO_PCICACHECTRL &= ~BONITO_PCICACHECTRL_IOBCCOH_EN;
+			BONITO_PCIMEMBASECFG = BONITO_PCIMEMBASECFG &
+				~(BONITO_PCIMEMBASECFG_MEMBASE0_CACHED |
+				  BONITO_PCIMEMBASECFG_MEMBASE1_CACHED);
+			pr_info("Disabled Bonito IOBC coherency\n");
+		} else {
+			BONITO_PCICACHECTRL |= BONITO_PCICACHECTRL_IOBCCOH_EN;
+			BONITO_PCIMEMBASECFG |=
+				(BONITO_PCIMEMBASECFG_MEMBASE0_CACHED |
+				 BONITO_PCIMEMBASECFG_MEMBASE1_CACHED);
+			pr_info("Enabled Bonito IOBC coherency\n");
+		}
+	} else if (gcmp_niocu() != 0) {
+		/* Nothing special needs to be done to enable coherency */
+		pr_info("CMP IOCU detected\n");
+		if ((*(unsigned int *)0xbf403000 & 0x81) != 0x81) {
+			pr_crit("IOCU OPERATION DISABLED BY SWITCH - DEFAULTING TO SW IO COHERENCY\n");
+			return 0;
+		}
+		supported = 1;
+	}
+	hw_coherentio = supported;
+	return supported;
+}
+
+static void __init plat_setup_iocoherency(void)
+{
+#ifdef CONFIG_DMA_NONCOHERENT
+	/*
+	 * Kernel has been configured with software coherency
+	 * but we might choose to turn it off and use hardware
+	 * coherency instead.
+	 */
+	if (plat_enable_iocoherency()) {
+		if (coherentio == 0)
+			pr_info("Hardware DMA cache coherency disabled\n");
+		else
+			pr_info("Hardware DMA cache coherency enabled\n");
+	} else {
+		if (coherentio == 1)
+			pr_info("Hardware DMA cache coherency unsupported, but enabled from command line!\n");
+		else
+			pr_info("Software DMA cache coherency enabled\n");
+	}
+#else
+	if (!plat_enable_iocoherency())
+		panic("Hardware DMA cache coherency not supported!");
+#endif
+}
+
 #ifdef CONFIG_BLK_DEV_IDE
 static void __init pci_clock_check(void)
 {
@@ -115,16 +175,15 @@
 		33, 20, 25, 30, 12, 16, 37, 10
 	};
 	int pciclock = pciclocks[jmpr];
-	char *argptr = prom_getcmdline();
+	char *argptr = fw_getcmdline();
 
 	if (pciclock != 33 && !strstr(argptr, "idebus=")) {
-		printk(KERN_WARNING "WARNING: PCI clock is %dMHz, "
-				"setting idebus\n", pciclock);
+		pr_warn("WARNING: PCI clock is %dMHz, setting idebus\n",
+			pciclock);
 		argptr += strlen(argptr);
 		sprintf(argptr, " idebus=%d", pciclock);
 		if (pciclock < 20 || pciclock > 66)
-			printk(KERN_WARNING "WARNING: IDE timing "
-					"calculations will be incorrect\n");
+			pr_warn("WARNING: IDE timing calculations will be incorrect\n");
 	}
 }
 #endif
@@ -153,31 +212,31 @@
 {
 	char *argptr;
 
-	argptr = prom_getcmdline();
+	argptr = fw_getcmdline();
 	if (strstr(argptr, "debug")) {
 		BONITO_BONGENCFG |= BONITO_BONGENCFG_DEBUGMODE;
-		printk(KERN_INFO "Enabled Bonito debug mode\n");
+		pr_info("Enabled Bonito debug mode\n");
 	} else
 		BONITO_BONGENCFG &= ~BONITO_BONGENCFG_DEBUGMODE;
 
 #ifdef CONFIG_DMA_COHERENT
 	if (BONITO_PCICACHECTRL & BONITO_PCICACHECTRL_CPUCOH_PRES) {
 		BONITO_PCICACHECTRL |= BONITO_PCICACHECTRL_CPUCOH_EN;
-		printk(KERN_INFO "Enabled Bonito CPU coherency\n");
+		pr_info("Enabled Bonito CPU coherency\n");
 
-		argptr = prom_getcmdline();
+		argptr = fw_getcmdline();
 		if (strstr(argptr, "iobcuncached")) {
 			BONITO_PCICACHECTRL &= ~BONITO_PCICACHECTRL_IOBCCOH_EN;
 			BONITO_PCIMEMBASECFG = BONITO_PCIMEMBASECFG &
 				~(BONITO_PCIMEMBASECFG_MEMBASE0_CACHED |
 					BONITO_PCIMEMBASECFG_MEMBASE1_CACHED);
-			printk(KERN_INFO "Disabled Bonito IOBC coherency\n");
+			pr_info("Disabled Bonito IOBC coherency\n");
 		} else {
 			BONITO_PCICACHECTRL |= BONITO_PCICACHECTRL_IOBCCOH_EN;
 			BONITO_PCIMEMBASECFG |=
 				(BONITO_PCIMEMBASECFG_MEMBASE0_CACHED |
 					BONITO_PCIMEMBASECFG_MEMBASE1_CACHED);
-			printk(KERN_INFO "Enabled Bonito IOBC coherency\n");
+			pr_info("Enabled Bonito IOBC coherency\n");
 		}
 	} else
 		panic("Hardware DMA cache coherency not supported");
@@ -207,6 +266,8 @@
 	if (mips_revision_sconid == MIPS_REVISION_SCON_BONITO)
 		bonito_quirks_setup();
 
+	plat_setup_iocoherency();
+
 #ifdef CONFIG_BLK_DEV_IDE
 	pci_clock_check();
 #endif
diff --git a/arch/mips/mti-malta/malta-time.c b/arch/mips/mti-malta/malta-time.c
index a144b89..0ad305f 100644
--- a/arch/mips/mti-malta/malta-time.c
+++ b/arch/mips/mti-malta/malta-time.c
@@ -39,12 +39,9 @@
 #include <asm/gic.h>
 
 #include <asm/mips-boards/generic.h>
-#include <asm/mips-boards/prom.h>
-
 #include <asm/mips-boards/maltaint.h>
 
 unsigned long cpu_khz;
-int gic_frequency;
 
 static int mips_cpu_timer_irq;
 static int mips_cpu_perf_irq;
@@ -74,7 +71,24 @@
 {
 	unsigned long flags;
 	unsigned int count, start;
+#ifdef CONFIG_IRQ_GIC
 	unsigned int giccount = 0, gicstart = 0;
+#endif
+
+#if defined (CONFIG_KVM_GUEST) && defined (CONFIG_KVM_HOST_FREQ)
+	unsigned int prid = read_c0_prid() & 0xffff00;
+
+	/*
+	 * XXXKYMA: hardwire the CPU frequency to Host Freq/4
+	 */
+	count = (CONFIG_KVM_HOST_FREQ * 1000000) >> 3;
+	if ((prid != (PRID_COMP_MIPS | PRID_IMP_20KC)) &&
+	    (prid != (PRID_COMP_MIPS | PRID_IMP_25KF)))
+		count *= 2;
+
+	mips_hpt_frequency = count;
+	return;
+#endif
 
 	local_irq_save(flags);
 
@@ -84,26 +98,32 @@
 
 	/* Initialize counters. */
 	start = read_c0_count();
+#ifdef CONFIG_IRQ_GIC
 	if (gic_present)
 		GICREAD(GIC_REG(SHARED, GIC_SH_COUNTER_31_00), gicstart);
+#endif
 
 	/* Read counter exactly on falling edge of update flag. */
 	while (CMOS_READ(RTC_REG_A) & RTC_UIP);
 	while (!(CMOS_READ(RTC_REG_A) & RTC_UIP));
 
 	count = read_c0_count();
+#ifdef CONFIG_IRQ_GIC
 	if (gic_present)
 		GICREAD(GIC_REG(SHARED, GIC_SH_COUNTER_31_00), giccount);
+#endif
 
 	local_irq_restore(flags);
 
 	count -= start;
-	if (gic_present)
-		giccount -= gicstart;
-
 	mips_hpt_frequency = count;
-	if (gic_present)
+
+#ifdef CONFIG_IRQ_GIC
+	if (gic_present) {
+		giccount -= gicstart;
 		gic_frequency = giccount;
+	}
+#endif
 }
 
 void read_persistent_clock(struct timespec *ts)
@@ -159,24 +179,27 @@
 	    (prid != (PRID_COMP_MIPS | PRID_IMP_25KF)))
 		freq *= 2;
 	freq = freqround(freq, 5000);
-	pr_debug("CPU frequency %d.%02d MHz\n", freq/1000000,
+	printk("CPU frequency %d.%02d MHz\n", freq/1000000,
 	       (freq%1000000)*100/1000000);
 	cpu_khz = freq / 1000;
 
-	if (gic_present) {
-		freq = freqround(gic_frequency, 5000);
-		pr_debug("GIC frequency %d.%02d MHz\n", freq/1000000,
-		       (freq%1000000)*100/1000000);
-		gic_clocksource_init(gic_frequency);
-	} else
-		init_r4k_clocksource();
+	mips_scroll_message();
 
 #ifdef CONFIG_I8253
 	/* Only Malta has a PIT. */
 	setup_pit_timer();
 #endif
 
-	mips_scroll_message();
+#ifdef CONFIG_IRQ_GIC
+	if (gic_present) {
+		freq = freqround(gic_frequency, 5000);
+		printk("GIC frequency %d.%02d MHz\n", freq/1000000,
+		       (freq%1000000)*100/1000000);
+#ifdef CONFIG_CSRC_GIC
+		gic_clocksource_init(gic_frequency);
+#endif
+	}
+#endif
 
 	plat_perf_setup();
 }
diff --git a/arch/mips/mti-sead3/Makefile b/arch/mips/mti-sead3/Makefile
index 10ec701..be11420 100644
--- a/arch/mips/mti-sead3/Makefile
+++ b/arch/mips/mti-sead3/Makefile
@@ -8,10 +8,10 @@
 # Copyright (C) 2012 MIPS Technoligies, Inc.  All rights reserved.
 # Steven J. Hill <sjhill@mips.com>
 #
-obj-y				:= sead3-lcd.o sead3-cmdline.o \
-				   sead3-display.o sead3-init.o sead3-int.o \
-				   sead3-mtd.o sead3-net.o sead3-platform.o \
-				   sead3-reset.o sead3-setup.o sead3-time.o
+obj-y				:= sead3-lcd.o sead3-display.o sead3-init.o \
+				   sead3-int.o sead3-mtd.o sead3-net.o \
+				   sead3-platform.o sead3-reset.o \
+				   sead3-setup.o sead3-time.o
 
 obj-y				+= sead3-i2c-dev.o sead3-i2c.o \
 				   sead3-pic32-i2c-drv.o sead3-pic32-bus.o \
diff --git a/arch/mips/mti-sead3/leds-sead3.c b/arch/mips/mti-sead3/leds-sead3.c
index 322148c..0a168c9 100644
--- a/arch/mips/mti-sead3/leds-sead3.c
+++ b/arch/mips/mti-sead3/leds-sead3.c
@@ -34,33 +34,15 @@
 static struct led_classdev sead3_pled = {
 	.name		= "sead3::pled",
 	.brightness_set = sead3_pled_set,
+	.flags		= LED_CORE_SUSPENDRESUME,
 };
 
 static struct led_classdev sead3_fled = {
 	.name		= "sead3::fled",
 	.brightness_set = sead3_fled_set,
+	.flags		= LED_CORE_SUSPENDRESUME,
 };
 
-#ifdef CONFIG_PM
-static int sead3_led_suspend(struct platform_device *dev,
-		pm_message_t state)
-{
-	led_classdev_suspend(&sead3_pled);
-	led_classdev_suspend(&sead3_fled);
-	return 0;
-}
-
-static int sead3_led_resume(struct platform_device *dev)
-{
-	led_classdev_resume(&sead3_pled);
-	led_classdev_resume(&sead3_fled);
-	return 0;
-}
-#else
-#define sead3_led_suspend NULL
-#define sead3_led_resume NULL
-#endif
-
 static int sead3_led_probe(struct platform_device *pdev)
 {
 	int ret;
@@ -86,8 +68,6 @@
 static struct platform_driver sead3_led_driver = {
 	.probe		= sead3_led_probe,
 	.remove		= sead3_led_remove,
-	.suspend	= sead3_led_suspend,
-	.resume		= sead3_led_resume,
 	.driver		= {
 		.name		= DRVNAME,
 		.owner		= THIS_MODULE,
diff --git a/arch/mips/mti-sead3/sead3-cmdline.c b/arch/mips/mti-sead3/sead3-cmdline.c
deleted file mode 100644
index a2e6cec..0000000
--- a/arch/mips/mti-sead3/sead3-cmdline.c
+++ /dev/null
@@ -1,46 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License.  See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2012 MIPS Technologies, Inc.  All rights reserved.
- */
-#include <linux/init.h>
-#include <linux/string.h>
-
-#include <asm/bootinfo.h>
-
-extern int prom_argc;
-extern int *_prom_argv;
-
-/*
- * YAMON (32-bit PROM) pass arguments and environment as 32-bit pointer.
- * This macro take care of sign extension.
- */
-#define prom_argv(index) ((char *)(long)_prom_argv[(index)])
-
-char * __init prom_getcmdline(void)
-{
-	return &(arcs_cmdline[0]);
-}
-
-void  __init prom_init_cmdline(void)
-{
-	char *cp;
-	int actr;
-
-	actr = 1; /* Always ignore argv[0] */
-
-	cp = &(arcs_cmdline[0]);
-	while (actr < prom_argc) {
-		strcpy(cp, prom_argv(actr));
-		cp += strlen(prom_argv(actr));
-		*cp++ = ' ';
-		actr++;
-	}
-	if (cp != &(arcs_cmdline[0])) {
-		/* get rid of trailing space */
-		--cp;
-		*cp = '\0';
-	}
-}
diff --git a/arch/mips/mti-sead3/sead3-console.c b/arch/mips/mti-sead3/sead3-console.c
index 2ddef19..031f47d 100644
--- a/arch/mips/mti-sead3/sead3-console.c
+++ b/arch/mips/mti-sead3/sead3-console.c
@@ -26,7 +26,7 @@
 	__raw_writel(value, PORT(base_addr, offset));
 }
 
-void __init prom_init_early_console(char port)
+void __init fw_init_early_console(char port)
 {
 	console_port = port;
 }
diff --git a/arch/mips/mti-sead3/sead3-display.c b/arch/mips/mti-sead3/sead3-display.c
index e389326..9487599 100644
--- a/arch/mips/mti-sead3/sead3-display.c
+++ b/arch/mips/mti-sead3/sead3-display.c
@@ -8,7 +8,6 @@
 #include <linux/timer.h>
 #include <linux/io.h>
 #include <asm/mips-boards/generic.h>
-#include <asm/mips-boards/prom.h>
 
 static unsigned int display_count;
 static unsigned int max_display_count;
diff --git a/arch/mips/mti-sead3/sead3-init.c b/arch/mips/mti-sead3/sead3-init.c
index f95abaa..bfbd17b 100644
--- a/arch/mips/mti-sead3/sead3-init.c
+++ b/arch/mips/mti-sead3/sead3-init.c
@@ -12,38 +12,51 @@
 #include <asm/cacheflush.h>
 #include <asm/traps.h>
 #include <asm/mips-boards/generic.h>
-#include <asm/mips-boards/prom.h>
-
-extern void prom_init_early_console(char port);
+#include <asm/fw/fw.h>
 
 extern char except_vec_nmi;
 extern char except_vec_ejtag_debug;
 
-int prom_argc;
-int *_prom_argv, *_prom_envp;
-
-#define prom_envp(index) ((char *)(long)_prom_envp[(index)])
-
-char *prom_getenv(char *envname)
+#ifdef CONFIG_SERIAL_8250_CONSOLE
+static void __init console_config(void)
 {
-	/*
-	 * Return a pointer to the given environment variable.
-	 * In 64-bit mode: we're using 64-bit pointers, but all pointers
-	 * in the PROM structures are only 32-bit, so we need some
-	 * workarounds, if we are running in 64-bit mode.
-	 */
-	int i, index = 0;
+	char console_string[40];
+	int baud = 0;
+	char parity = '\0', bits = '\0', flow = '\0';
+	char *s;
 
-	i = strlen(envname);
-
-	while (prom_envp(index)) {
-		if (strncmp(envname, prom_envp(index), i) == 0)
-			return prom_envp(index+1);
-		index += 2;
+	if ((strstr(fw_getcmdline(), "console=")) == NULL) {
+		s = fw_getenv("modetty0");
+		if (s) {
+			while (*s >= '0' && *s <= '9')
+				baud = baud*10 + *s++ - '0';
+			if (*s == ',')
+				s++;
+			if (*s)
+				parity = *s++;
+			if (*s == ',')
+				s++;
+			if (*s)
+				bits = *s++;
+			if (*s == ',')
+				s++;
+			if (*s == 'h')
+				flow = 'r';
+		}
+		if (baud == 0)
+			baud = 38400;
+		if (parity != 'n' && parity != 'o' && parity != 'e')
+			parity = 'n';
+		if (bits != '7' && bits != '8')
+			bits = '8';
+		if (flow == '\0')
+			flow = 'r';
+		sprintf(console_string, " console=ttyS0,%d%c%c%c", baud,
+			parity, bits, flow);
+		strcat(fw_getcmdline(), console_string);
 	}
-
-	return NULL;
 }
+#endif
 
 static void __init mips_nmi_setup(void)
 {
@@ -52,7 +65,41 @@
 	base = cpu_has_veic ?
 		(void *)(CAC_BASE + 0xa80) :
 		(void *)(CAC_BASE + 0x380);
+#ifdef CONFIG_CPU_MICROMIPS
+	/*
+	 * Decrement the exception vector address by one for microMIPS.
+	 */
+	memcpy(base, (&except_vec_nmi - 1), 0x80);
+
+	/*
+	 * This is a hack. We do not know if the boot loader was built with
+	 * microMIPS instructions or not. If it was not, the NMI exception
+	 * code at 0x80000a80 will be taken in MIPS32 mode. The hand coded
+	 * assembly below forces us into microMIPS mode if we are a pure
+	 * microMIPS kernel. The assembly instructions are:
+	 *
+	 *  3C1A8000   lui       k0,0x8000
+	 *  375A0381   ori       k0,k0,0x381
+	 *  03400008   jr        k0
+	 *  00000000   nop
+	 *
+	 * The mode switch occurs by jumping to the unaligned exception
+	 * vector address at 0x80000381 which would have been 0x80000380
+	 * in MIPS32 mode. The jump to the unaligned address transitions
+	 * us into microMIPS mode.
+	 */
+	if (!cpu_has_veic) {
+		void *base2 = (void *)(CAC_BASE + 0xa80);
+		*((unsigned int *)base2) = 0x3c1a8000;
+		*((unsigned int *)base2 + 1) = 0x375a0381;
+		*((unsigned int *)base2 + 2) = 0x03400008;
+		*((unsigned int *)base2 + 3) = 0x00000000;
+		flush_icache_range((unsigned long)base2,
+			(unsigned long)base2 + 0x10);
+	}
+#else
 	memcpy(base, &except_vec_nmi, 0x80);
+#endif
 	flush_icache_range((unsigned long)base, (unsigned long)base + 0x80);
 }
 
@@ -63,29 +110,40 @@
 	base = cpu_has_veic ?
 		(void *)(CAC_BASE + 0xa00) :
 		(void *)(CAC_BASE + 0x300);
+#ifdef CONFIG_CPU_MICROMIPS
+	/* Deja vu... */
+	memcpy(base, (&except_vec_ejtag_debug - 1), 0x80);
+	if (!cpu_has_veic) {
+		void *base2 = (void *)(CAC_BASE + 0xa00);
+		*((unsigned int *)base2) = 0x3c1a8000;
+		*((unsigned int *)base2 + 1) = 0x375a0301;
+		*((unsigned int *)base2 + 2) = 0x03400008;
+		*((unsigned int *)base2 + 3) = 0x00000000;
+		flush_icache_range((unsigned long)base2,
+			(unsigned long)base2 + 0x10);
+	}
+#else
 	memcpy(base, &except_vec_ejtag_debug, 0x80);
+#endif
 	flush_icache_range((unsigned long)base, (unsigned long)base + 0x80);
 }
 
 void __init prom_init(void)
 {
-	prom_argc = fw_arg0;
-	_prom_argv = (int *) fw_arg1;
-	_prom_envp = (int *) fw_arg2;
-
 	board_nmi_handler_setup = mips_nmi_setup;
 	board_ejtag_handler_setup = mips_ejtag_setup;
 
-	prom_init_cmdline();
+	fw_init_cmdline();
 #ifdef CONFIG_EARLY_PRINTK
-	if ((strstr(prom_getcmdline(), "console=ttyS0")) != NULL)
-		prom_init_early_console(0);
-	else if ((strstr(prom_getcmdline(), "console=ttyS1")) != NULL)
-		prom_init_early_console(1);
+	if ((strstr(fw_getcmdline(), "console=ttyS0")) != NULL)
+		fw_init_early_console(0);
+	else if ((strstr(fw_getcmdline(), "console=ttyS1")) != NULL)
+		fw_init_early_console(1);
 #endif
 #ifdef CONFIG_SERIAL_8250_CONSOLE
-	if ((strstr(prom_getcmdline(), "console=")) == NULL)
-		strcat(prom_getcmdline(), " console=ttyS0,38400n8r");
+	if ((strstr(fw_getcmdline(), "console=")) == NULL)
+		strcat(fw_getcmdline(), " console=ttyS0,38400n8r");
+	console_config();
 #endif
 }
 
diff --git a/arch/mips/mti-sead3/sead3-int.c b/arch/mips/mti-sead3/sead3-int.c
index e26e082..6a560ac 100644
--- a/arch/mips/mti-sead3/sead3-int.c
+++ b/arch/mips/mti-sead3/sead3-int.c
@@ -20,7 +20,6 @@
 #define SEAD_CONFIG_BASE		0x1b100110
 #define SEAD_CONFIG_SIZE		4
 
-int gic_present;
 static unsigned long sead3_config_reg;
 
 /*
diff --git a/arch/mips/mti-sead3/sead3-setup.c b/arch/mips/mti-sead3/sead3-setup.c
index f012fd1..b5059dc 100644
--- a/arch/mips/mti-sead3/sead3-setup.c
+++ b/arch/mips/mti-sead3/sead3-setup.c
@@ -11,10 +11,6 @@
 #include <linux/bootmem.h>
 
 #include <asm/mips-boards/generic.h>
-#include <asm/prom.h>
-
-int coherentio;		/* 0 => no DMA cache coherency (may be set by user) */
-int hw_coherentio;	/* 0 => no HW DMA cache coherency (reflects real HW) */
 
 const char *get_system_type(void)
 {
diff --git a/arch/mips/mti-sead3/sead3-time.c b/arch/mips/mti-sead3/sead3-time.c
index 239e4e3..96b42eb 100644
--- a/arch/mips/mti-sead3/sead3-time.c
+++ b/arch/mips/mti-sead3/sead3-time.c
@@ -11,7 +11,6 @@
 #include <asm/time.h>
 #include <asm/irq.h>
 #include <asm/mips-boards/generic.h>
-#include <asm/mips-boards/prom.h>
 
 unsigned long cpu_khz;
 
diff --git a/arch/mips/netlogic/Kconfig b/arch/mips/netlogic/Kconfig
index 3c05bf9..e0873a3 100644
--- a/arch/mips/netlogic/Kconfig
+++ b/arch/mips/netlogic/Kconfig
@@ -2,13 +2,22 @@
 
 if NLM_XLP_BOARD
 config DT_XLP_EVP
-	bool "Built-in device tree for XLP EVP/SVP boards"
+	bool "Built-in device tree for XLP EVP boards"
 	default y
 	help
-	  Add an FDT blob for XLP EVP and SVP boards into the kernel.
+	  Add an FDT blob for XLP EVP boards into the kernel.
 	  This DTB will be used if the firmware does not pass in a DTB
-          pointer to the kernel.  The corresponding DTS file is at
-          arch/mips/netlogic/dts/xlp_evp.dts
+	  pointer to the kernel.  The corresponding DTS file is at
+	  arch/mips/netlogic/dts/xlp_evp.dts
+
+config DT_XLP_SVP
+	bool "Built-in device tree for XLP SVP boards"
+	default y
+	help
+	  Add an FDT blob for XLP VP boards into the kernel.
+	  This DTB will be used if the firmware does not pass in a DTB
+	  pointer to the kernel.  The corresponding DTS file is at
+	  arch/mips/netlogic/dts/xlp_svp.dts
 
 config NLM_MULTINODE
 	bool "Support for multi-chip boards"
diff --git a/arch/mips/netlogic/common/smp.c b/arch/mips/netlogic/common/smp.c
index 2bb95dcf..ffba524 100644
--- a/arch/mips/netlogic/common/smp.c
+++ b/arch/mips/netlogic/common/smp.c
@@ -148,8 +148,7 @@
 int nlm_cpu_ready[NR_CPUS];
 unsigned long nlm_next_gp;
 unsigned long nlm_next_sp;
-
-cpumask_t phys_cpu_present_map;
+static cpumask_t phys_cpu_present_mask;
 
 void nlm_boot_secondary(int logical_cpu, struct task_struct *idle)
 {
@@ -169,11 +168,12 @@
 {
 	unsigned int boot_cpu;
 	int num_cpus, i, ncore;
+	char buf[64];
 
 	boot_cpu = hard_smp_processor_id();
-	cpumask_clear(&phys_cpu_present_map);
+	cpumask_clear(&phys_cpu_present_mask);
 
-	cpumask_set_cpu(boot_cpu, &phys_cpu_present_map);
+	cpumask_set_cpu(boot_cpu, &phys_cpu_present_mask);
 	__cpu_number_map[boot_cpu] = 0;
 	__cpu_logical_map[0] = boot_cpu;
 	set_cpu_possible(0, true);
@@ -185,7 +185,7 @@
 		 * it is only set for ASPs (see smpboot.S)
 		 */
 		if (nlm_cpu_ready[i]) {
-			cpumask_set_cpu(i, &phys_cpu_present_map);
+			cpumask_set_cpu(i, &phys_cpu_present_mask);
 			__cpu_number_map[i] = num_cpus;
 			__cpu_logical_map[num_cpus] = i;
 			set_cpu_possible(num_cpus, true);
@@ -193,16 +193,19 @@
 		}
 	}
 
+	cpumask_scnprintf(buf, ARRAY_SIZE(buf), &phys_cpu_present_mask);
+	pr_info("Physical CPU mask: %s\n", buf);
+	cpumask_scnprintf(buf, ARRAY_SIZE(buf), cpu_possible_mask);
+	pr_info("Possible CPU mask: %s\n", buf);
+
 	/* check with the cores we have worken up */
 	for (ncore = 0, i = 0; i < NLM_NR_NODES; i++)
 		ncore += hweight32(nlm_get_node(i)->coremask);
 
-	pr_info("Phys CPU present map: %lx, possible map %lx\n",
-		(unsigned long)cpumask_bits(&phys_cpu_present_map)[0],
-		(unsigned long)cpumask_bits(cpu_possible_mask)[0]);
-
 	pr_info("Detected (%dc%dt) %d Slave CPU(s)\n", ncore,
 		nlm_threads_per_core, num_cpus);
+
+	/* switch NMI handler to boot CPUs */
 	nlm_set_nmi_handler(nlm_boot_secondary_cpus);
 }
 
diff --git a/arch/mips/netlogic/dts/Makefile b/arch/mips/netlogic/dts/Makefile
index d117d46..aecb6fa 100644
--- a/arch/mips/netlogic/dts/Makefile
+++ b/arch/mips/netlogic/dts/Makefile
@@ -1 +1,2 @@
 obj-$(CONFIG_DT_XLP_EVP) := xlp_evp.dtb.o
+obj-$(CONFIG_DT_XLP_SVP) += xlp_svp.dtb.o
diff --git a/arch/mips/netlogic/dts/xlp_evp.dts b/arch/mips/netlogic/dts/xlp_evp.dts
index 7628b54..e14f423 100644
--- a/arch/mips/netlogic/dts/xlp_evp.dts
+++ b/arch/mips/netlogic/dts/xlp_evp.dts
@@ -20,7 +20,7 @@
 		#address-cells = <2>;
 		#size-cells = <1>;
 		compatible = "simple-bus";
-		ranges = <0 0  0 0x18000000  0x04000000	  // PCIe CFG
+		ranges = <0 0  0 0x18000000  0x04000000   // PCIe CFG
 			  1 0  0 0x16000000  0x01000000>; // GBU chipselects
 
 		serial0: serial@30000 {
diff --git a/arch/mips/netlogic/dts/xlp_svp.dts b/arch/mips/netlogic/dts/xlp_svp.dts
new file mode 100644
index 0000000..8af4bdb
--- /dev/null
+++ b/arch/mips/netlogic/dts/xlp_svp.dts
@@ -0,0 +1,124 @@
+/*
+ * XLP3XX Device Tree Source for SVP boards
+ */
+
+/dts-v1/;
+/ {
+	model = "netlogic,XLP-SVP";
+	compatible = "netlogic,xlp";
+	#address-cells = <2>;
+	#size-cells = <2>;
+
+	memory {
+		device_type = "memory";
+		reg =  <0 0x00100000 0 0x0FF00000	// 255M at 1M
+			0 0x20000000 0 0xa0000000	// 2560M at 512M
+			0 0xe0000000 0 0x40000000>;
+	};
+
+	soc {
+		#address-cells = <2>;
+		#size-cells = <1>;
+		compatible = "simple-bus";
+		ranges = <0 0  0 0x18000000  0x04000000   // PCIe CFG
+			  1 0  0 0x16000000  0x01000000>; // GBU chipselects
+
+		serial0: serial@30000 {
+			device_type = "serial";
+			compatible = "ns16550";
+			reg = <0 0x30100 0xa00>;
+			reg-shift = <2>;
+			reg-io-width = <4>;
+			clock-frequency = <133333333>;
+			interrupt-parent = <&pic>;
+			interrupts = <17>;
+		};
+		serial1: serial@31000 {
+			device_type = "serial";
+			compatible = "ns16550";
+			reg = <0 0x31100 0xa00>;
+			reg-shift = <2>;
+			reg-io-width = <4>;
+			clock-frequency = <133333333>;
+			interrupt-parent = <&pic>;
+			interrupts = <18>;
+		};
+		i2c0: ocores@32000 {
+			compatible = "opencores,i2c-ocores";
+			#address-cells = <1>;
+			#size-cells = <0>;
+			reg = <0 0x32100 0xa00>;
+			reg-shift = <2>;
+			reg-io-width = <4>;
+			clock-frequency = <32000000>;
+			interrupt-parent = <&pic>;
+			interrupts = <30>;
+		};
+		i2c1: ocores@33000 {
+			compatible = "opencores,i2c-ocores";
+			#address-cells = <1>;
+			#size-cells = <0>;
+			reg = <0 0x33100 0xa00>;
+			reg-shift = <2>;
+			reg-io-width = <4>;
+			clock-frequency = <32000000>;
+			interrupt-parent = <&pic>;
+			interrupts = <31>;
+
+			rtc@68 {
+				compatible = "dallas,ds1374";
+				reg = <0x68>;
+			};
+
+			dtt@4c {
+				compatible = "national,lm90";
+				reg = <0x4c>;
+			};
+		};
+		pic: pic@4000 {
+			interrupt-controller;
+			#address-cells = <0>;
+			#interrupt-cells = <1>;
+			reg = <0 0x4000 0x200>;
+		};
+
+		nor_flash@1,0 {
+			compatible = "cfi-flash";
+			#address-cells = <1>;
+			#size-cells = <1>;
+			bank-width = <2>;
+			reg = <1 0 0x1000000>;
+
+			partition@0 {
+				label = "x-loader";
+				reg = <0x0 0x100000>; /* 1M */
+				read-only;
+			};
+
+			partition@100000 {
+				label = "u-boot";
+				reg = <0x100000 0x100000>; /* 1M */
+			};
+
+			partition@200000 {
+				label = "kernel";
+				reg = <0x200000 0x500000>; /* 5M */
+			};
+
+			partition@700000 {
+				label = "rootfs";
+				reg = <0x700000 0x800000>; /* 8M */
+			};
+
+			partition@f00000 {
+				label = "env";
+				reg = <0xf00000 0x100000>; /* 1M */
+				read-only;
+			};
+		};
+	};
+
+	chosen {
+		bootargs = "console=ttyS0,115200 rdinit=/sbin/init";
+	};
+};
diff --git a/arch/mips/netlogic/xlp/nlm_hal.c b/arch/mips/netlogic/xlp/nlm_hal.c
index c68fd40..87560e4 100644
--- a/arch/mips/netlogic/xlp/nlm_hal.c
+++ b/arch/mips/netlogic/xlp/nlm_hal.c
@@ -61,43 +61,61 @@
 
 int nlm_irq_to_irt(int irq)
 {
-	if (!PIC_IRQ_IS_IRT(irq))
-		return -1;
+	uint64_t pcibase;
+	int devoff, irt;
 
 	switch (irq) {
 	case PIC_UART_0_IRQ:
-		return PIC_IRT_UART_0_INDEX;
+		devoff = XLP_IO_UART0_OFFSET(0);
+		break;
 	case PIC_UART_1_IRQ:
-		return PIC_IRT_UART_1_INDEX;
-	case PIC_PCIE_LINK_0_IRQ:
-	       return PIC_IRT_PCIE_LINK_0_INDEX;
-	case PIC_PCIE_LINK_1_IRQ:
-	       return PIC_IRT_PCIE_LINK_1_INDEX;
-	case PIC_PCIE_LINK_2_IRQ:
-	       return PIC_IRT_PCIE_LINK_2_INDEX;
-	case PIC_PCIE_LINK_3_IRQ:
-	       return PIC_IRT_PCIE_LINK_3_INDEX;
+		devoff = XLP_IO_UART1_OFFSET(0);
+		break;
 	case PIC_EHCI_0_IRQ:
-	       return PIC_IRT_EHCI_0_INDEX;
+		devoff = XLP_IO_USB_EHCI0_OFFSET(0);
+		break;
 	case PIC_EHCI_1_IRQ:
-	       return PIC_IRT_EHCI_1_INDEX;
+		devoff = XLP_IO_USB_EHCI1_OFFSET(0);
+		break;
 	case PIC_OHCI_0_IRQ:
-	       return PIC_IRT_OHCI_0_INDEX;
+		devoff = XLP_IO_USB_OHCI0_OFFSET(0);
+		break;
 	case PIC_OHCI_1_IRQ:
-	       return PIC_IRT_OHCI_1_INDEX;
+		devoff = XLP_IO_USB_OHCI1_OFFSET(0);
+		break;
 	case PIC_OHCI_2_IRQ:
-	       return PIC_IRT_OHCI_2_INDEX;
+		devoff = XLP_IO_USB_OHCI2_OFFSET(0);
+		break;
 	case PIC_OHCI_3_IRQ:
-	       return PIC_IRT_OHCI_3_INDEX;
+		devoff = XLP_IO_USB_OHCI3_OFFSET(0);
+		break;
 	case PIC_MMC_IRQ:
-	       return PIC_IRT_MMC_INDEX;
+		devoff = XLP_IO_SD_OFFSET(0);
+		break;
 	case PIC_I2C_0_IRQ:
-		return PIC_IRT_I2C_0_INDEX;
+		devoff = XLP_IO_I2C0_OFFSET(0);
+		break;
 	case PIC_I2C_1_IRQ:
-		return PIC_IRT_I2C_1_INDEX;
+		devoff = XLP_IO_I2C1_OFFSET(0);
+		break;
 	default:
-		return -1;
+		devoff = 0;
+		break;
 	}
+
+	if (devoff != 0) {
+		pcibase = nlm_pcicfg_base(devoff);
+		irt = nlm_read_reg(pcibase, XLP_PCI_IRTINFO_REG) & 0xffff;
+		/* HW bug, I2C 1 irt entry is off by one */
+		if (irq == PIC_I2C_1_IRQ)
+			irt = irt + 1;
+	} else if (irq >= PIC_PCIE_LINK_0_IRQ && irq <= PIC_PCIE_LINK_3_IRQ) {
+		/* HW bug, PCI IRT entries are bad on early silicon, fix */
+		irt = PIC_IRT_PCIE_LINK_INDEX(irq - PIC_PCIE_LINK_0_IRQ);
+	} else {
+		irt = -1;
+	}
+	return irt;
 }
 
 unsigned int nlm_get_core_frequency(int node, int core)
diff --git a/arch/mips/netlogic/xlp/setup.c b/arch/mips/netlogic/xlp/setup.c
index 4894d62..af31914 100644
--- a/arch/mips/netlogic/xlp/setup.c
+++ b/arch/mips/netlogic/xlp/setup.c
@@ -56,7 +56,7 @@
 struct nlm_soc_info nlm_nodes[NLM_NR_NODES];
 cpumask_t nlm_cpumask = CPU_MASK_CPU0;
 unsigned int nlm_threads_per_core;
-extern u32 __dtb_start[];
+extern u32 __dtb_xlp_evp_begin[], __dtb_xlp_svp_begin[], __dtb_start[];
 
 static void nlm_linux_exit(void)
 {
@@ -82,8 +82,24 @@
 	 * 64-bit, so convert pointer.
 	 */
 	fdtp = (void *)(long)fw_arg0;
-	if (!fdtp)
-		fdtp = __dtb_start;
+	if (!fdtp) {
+		switch (current_cpu_data.processor_id & 0xff00) {
+#ifdef CONFIG_DT_XLP_SVP
+		case PRID_IMP_NETLOGIC_XLP3XX:
+			fdtp = __dtb_xlp_svp_begin;
+			break;
+#endif
+#ifdef CONFIG_DT_XLP_EVP
+		case PRID_IMP_NETLOGIC_XLP8XX:
+			fdtp = __dtb_xlp_evp_begin;
+			break;
+#endif
+		default:
+			/* Pick a built-in if any, and hope for the best */
+			fdtp = __dtb_start;
+			break;
+		}
+	}
 	fdtp = phys_to_virt(__pa(fdtp));
 	early_init_devtree(fdtp);
 }
diff --git a/arch/mips/netlogic/xlp/usb-init.c b/arch/mips/netlogic/xlp/usb-init.c
index 1d0b66c..9c401dd 100644
--- a/arch/mips/netlogic/xlp/usb-init.c
+++ b/arch/mips/netlogic/xlp/usb-init.c
@@ -42,7 +42,30 @@
 #include <asm/netlogic/haldefs.h>
 #include <asm/netlogic/xlp-hal/iomap.h>
 #include <asm/netlogic/xlp-hal/xlp.h>
-#include <asm/netlogic/xlp-hal/usb.h>
+
+/*
+ * USB glue logic registers, used only during initialization
+ */
+#define USB_CTL_0			0x01
+#define USB_PHY_0			0x0A
+#define USB_PHY_RESET			0x01
+#define USB_PHY_PORT_RESET_0		0x10
+#define USB_PHY_PORT_RESET_1		0x20
+#define USB_CONTROLLER_RESET		0x01
+#define USB_INT_STATUS			0x0E
+#define USB_INT_EN			0x0F
+#define USB_PHY_INTERRUPT_EN		0x01
+#define USB_OHCI_INTERRUPT_EN		0x02
+#define USB_OHCI_INTERRUPT1_EN		0x04
+#define USB_OHCI_INTERRUPT2_EN		0x08
+#define USB_CTRL_INTERRUPT_EN		0x10
+
+#define nlm_read_usb_reg(b, r)			nlm_read_reg(b, r)
+#define nlm_write_usb_reg(b, r, v)		nlm_write_reg(b, r, v)
+#define nlm_get_usb_pcibase(node, inst)		\
+	nlm_pcicfg_base(XLP_IO_USB_OFFSET(node, inst))
+#define nlm_get_usb_regbase(node, inst)		\
+	(nlm_get_usb_pcibase(node, inst) + XLP_IO_PCI_HDRSZ)
 
 static void nlm_usb_intr_en(int node, int port)
 {
@@ -99,23 +122,23 @@
 	dev->dev.coherent_dma_mask	= DMA_BIT_MASK(64);
 	switch (dev->devfn) {
 	case 0x10:
-	       dev->irq = PIC_EHCI_0_IRQ;
-	       break;
+		dev->irq = PIC_EHCI_0_IRQ;
+		break;
 	case 0x11:
-	       dev->irq = PIC_OHCI_0_IRQ;
-	       break;
+		dev->irq = PIC_OHCI_0_IRQ;
+		break;
 	case 0x12:
-	       dev->irq = PIC_OHCI_1_IRQ;
-	       break;
+		dev->irq = PIC_OHCI_1_IRQ;
+		break;
 	case 0x13:
-	       dev->irq = PIC_EHCI_1_IRQ;
-	       break;
+		dev->irq = PIC_EHCI_1_IRQ;
+		break;
 	case 0x14:
-	       dev->irq = PIC_OHCI_2_IRQ;
-	       break;
+		dev->irq = PIC_OHCI_2_IRQ;
+		break;
 	case 0x15:
-	       dev->irq = PIC_OHCI_3_IRQ;
-	       break;
+		dev->irq = PIC_OHCI_3_IRQ;
+		break;
 	}
 }
 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_NETLOGIC, PCI_DEVICE_ID_NLM_EHCI,
diff --git a/arch/mips/oprofile/op_model_mipsxx.c b/arch/mips/oprofile/op_model_mipsxx.c
index 1fd3614..e4b1140 100644
--- a/arch/mips/oprofile/op_model_mipsxx.c
+++ b/arch/mips/oprofile/op_model_mipsxx.c
@@ -41,7 +41,7 @@
  * first hardware thread in the core for setup and init.
  * Skip CPUs with non-zero hardware thread id (4 hwt per core)
  */
-#ifdef CONFIG_CPU_XLR
+#if defined(CONFIG_CPU_XLR) && defined(CONFIG_SMP)
 #define oprofile_skip_cpu(c)	((cpu_logical_map(c) & 0x3) != 0)
 #else
 #define oprofile_skip_cpu(c)	0
diff --git a/arch/mips/pci/pci-ar71xx.c b/arch/mips/pci/pci-ar71xx.c
index 412ec02..18517dd 100644
--- a/arch/mips/pci/pci-ar71xx.c
+++ b/arch/mips/pci/pci-ar71xx.c
@@ -366,9 +366,9 @@
 	if (!res)
 		return -EINVAL;
 
-	apc->cfg_base = devm_request_and_ioremap(&pdev->dev, res);
-	if (!apc->cfg_base)
-		return -ENOMEM;
+	apc->cfg_base = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(apc->cfg_base))
+		return PTR_ERR(apc->cfg_base);
 
 	apc->irq = platform_get_irq(pdev, 0);
 	if (apc->irq < 0)
diff --git a/arch/mips/pci/pci-ar724x.c b/arch/mips/pci/pci-ar724x.c
index 8a0700d..65ec032 100644
--- a/arch/mips/pci/pci-ar724x.c
+++ b/arch/mips/pci/pci-ar724x.c
@@ -365,25 +365,25 @@
 	if (!res)
 		return -EINVAL;
 
-	apc->ctrl_base = devm_request_and_ioremap(&pdev->dev, res);
-	if (apc->ctrl_base == NULL)
-		return -EBUSY;
+	apc->ctrl_base = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(apc->ctrl_base))
+		return PTR_ERR(apc->ctrl_base);
 
 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cfg_base");
 	if (!res)
 		return -EINVAL;
 
-	apc->devcfg_base = devm_request_and_ioremap(&pdev->dev, res);
-	if (!apc->devcfg_base)
-		return -EBUSY;
+	apc->devcfg_base = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(apc->devcfg_base))
+		return PTR_ERR(apc->devcfg_base);
 
 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "crp_base");
 	if (!res)
 		return -EINVAL;
 
-	apc->crp_base = devm_request_and_ioremap(&pdev->dev, res);
-	if (apc->crp_base == NULL)
-		return -EBUSY;
+	apc->crp_base = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(apc->crp_base))
+		return PTR_ERR(apc->crp_base);
 
 	apc->irq = platform_get_irq(pdev, 0);
 	if (apc->irq < 0)
diff --git a/arch/mips/pci/pci-bcm63xx.c b/arch/mips/pci/pci-bcm63xx.c
index 88e781c..2eb9542 100644
--- a/arch/mips/pci/pci-bcm63xx.c
+++ b/arch/mips/pci/pci-bcm63xx.c
@@ -121,11 +121,17 @@
 static void __init bcm63xx_reset_pcie(void)
 {
 	u32 val;
+	u32 reg;
 
 	/* enable SERDES */
-	val = bcm_misc_readl(MISC_SERDES_CTRL_REG);
+	if (BCMCPU_IS_6328())
+		reg = MISC_SERDES_CTRL_6328_REG;
+	else
+		reg = MISC_SERDES_CTRL_6362_REG;
+
+	val = bcm_misc_readl(reg);
 	val |= SERDES_PCIE_EN | SERDES_PCIE_EXD_EN;
-	bcm_misc_writel(val, MISC_SERDES_CTRL_REG);
+	bcm_misc_writel(val, reg);
 
 	/* reset the PCIe core */
 	bcm63xx_core_set_reset(BCM63XX_RESET_PCIE, 1);
@@ -330,6 +336,7 @@
 
 	switch (bcm63xx_get_cpu_id()) {
 	case BCM6328_CPU_ID:
+	case BCM6362_CPU_ID:
 		return bcm63xx_register_pcie();
 	case BCM6348_CPU_ID:
 	case BCM6358_CPU_ID:
diff --git a/arch/mips/powertv/init.c b/arch/mips/powertv/init.c
index 5bd9d8f..a01baff 100644
--- a/arch/mips/powertv/init.c
+++ b/arch/mips/powertv/init.c
@@ -29,10 +29,11 @@
 #include <asm/cacheflush.h>
 #include <asm/traps.h>
 
-#include <asm/mips-boards/prom.h>
 #include <asm/mips-boards/generic.h>
 #include <asm/mach-powertv/asic.h>
 
+#include "init.h"
+
 static int *_prom_envp;
 unsigned long _prom_memsize;
 
diff --git a/arch/mips/powertv/init.h b/arch/mips/powertv/init.h
index b194c34..c1a8bd0 100644
--- a/arch/mips/powertv/init.h
+++ b/arch/mips/powertv/init.h
@@ -23,4 +23,6 @@
 #ifndef _POWERTV_INIT_H
 #define _POWERTV_INIT_H
 extern unsigned long _prom_memsize;
+extern void prom_meminit(void);
+extern char *prom_getenv(char *name);
 #endif
diff --git a/arch/mips/powertv/memory.c b/arch/mips/powertv/memory.c
index 6e5f1bd..bc2f3ca 100644
--- a/arch/mips/powertv/memory.c
+++ b/arch/mips/powertv/memory.c
@@ -29,7 +29,6 @@
 #include <asm/page.h>
 #include <asm/sections.h>
 
-#include <asm/mips-boards/prom.h>
 #include <asm/mach-powertv/asic.h>
 #include <asm/mach-powertv/ioremap.h>
 
diff --git a/arch/mips/powertv/powertv_setup.c b/arch/mips/powertv/powertv_setup.c
index 820b848..24689bf 100644
--- a/arch/mips/powertv/powertv_setup.c
+++ b/arch/mips/powertv/powertv_setup.c
@@ -31,7 +31,6 @@
 #include <asm/bootinfo.h>
 #include <asm/irq.h>
 #include <asm/mips-boards/generic.h>
-#include <asm/mips-boards/prom.h>
 #include <asm/dma.h>
 #include <asm/asm.h>
 #include <asm/traps.h>
diff --git a/arch/mips/ralink/Kconfig b/arch/mips/ralink/Kconfig
index a0b0197..026e823 100644
--- a/arch/mips/ralink/Kconfig
+++ b/arch/mips/ralink/Kconfig
@@ -6,12 +6,23 @@
 	help
 	  Select Ralink MIPS SoC type.
 
+	config SOC_RT288X
+		bool "RT288x"
+
 	config SOC_RT305X
 		bool "RT305x"
 		select USB_ARCH_HAS_HCD
 		select USB_ARCH_HAS_OHCI
 		select USB_ARCH_HAS_EHCI
 
+	config SOC_RT3883
+		bool "RT3883"
+		select USB_ARCH_HAS_OHCI
+		select USB_ARCH_HAS_EHCI
+
+	config SOC_MT7620
+		bool "MT7620"
+
 endchoice
 
 choice
@@ -23,10 +34,22 @@
 	config DTB_RT_NONE
 		bool "None"
 
+	config DTB_RT2880_EVAL
+		bool "RT2880 eval kit"
+		depends on SOC_RT288X
+
 	config DTB_RT305X_EVAL
 		bool "RT305x eval kit"
 		depends on SOC_RT305X
 
+	config DTB_RT3883_EVAL
+		bool "RT3883 eval kit"
+		depends on SOC_RT3883
+
+	config DTB_MT7620A_EVAL
+		bool "MT7620A eval kit"
+		depends on SOC_MT7620
+
 endchoice
 
 endif
diff --git a/arch/mips/ralink/Makefile b/arch/mips/ralink/Makefile
index 939757f..38cf1a8 100644
--- a/arch/mips/ralink/Makefile
+++ b/arch/mips/ralink/Makefile
@@ -8,7 +8,10 @@
 
 obj-y := prom.o of.o reset.o clk.o irq.o
 
+obj-$(CONFIG_SOC_RT288X) += rt288x.o
 obj-$(CONFIG_SOC_RT305X) += rt305x.o
+obj-$(CONFIG_SOC_RT3883) += rt3883.o
+obj-$(CONFIG_SOC_MT7620) += mt7620.o
 
 obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
 
diff --git a/arch/mips/ralink/Platform b/arch/mips/ralink/Platform
index 6babd65..cda4b66 100644
--- a/arch/mips/ralink/Platform
+++ b/arch/mips/ralink/Platform
@@ -5,6 +5,24 @@
 cflags-$(CONFIG_RALINK)		+= -I$(srctree)/arch/mips/include/asm/mach-ralink
 
 #
+# Ralink RT288x
+#
+load-$(CONFIG_SOC_RT288X)	+= 0xffffffff88000000
+cflags-$(CONFIG_SOC_RT288X)	+= -I$(srctree)/arch/mips/include/asm/mach-ralink/rt288x
+
+#
 # Ralink RT305x
 #
 load-$(CONFIG_SOC_RT305X)	+= 0xffffffff80000000
+cflags-$(CONFIG_SOC_RT305X)	+= -I$(srctree)/arch/mips/include/asm/mach-ralink/rt305x
+
+#
+# Ralink RT3883
+#
+load-$(CONFIG_SOC_RT3883)	+= 0xffffffff80000000
+cflags-$(CONFIG_SOC_RT3883)	+= -I$(srctree)/arch/mips/include/asm/mach-ralink/rt3883
+
+#
+# Ralink MT7620
+#
+load-$(CONFIG_SOC_MT7620)	+= 0xffffffff80000000
diff --git a/arch/mips/ralink/common.h b/arch/mips/ralink/common.h
index 3009903..83144c3 100644
--- a/arch/mips/ralink/common.h
+++ b/arch/mips/ralink/common.h
@@ -22,13 +22,22 @@
 	struct ralink_pinmux_grp *mode;
 	struct ralink_pinmux_grp *uart;
 	int uart_shift;
+	u32 uart_mask;
 	void (*wdt_reset)(void);
+	struct ralink_pinmux_grp *pci;
+	int pci_shift;
+	u32 pci_mask;
 };
-extern struct ralink_pinmux gpio_pinmux;
+extern struct ralink_pinmux rt_gpio_pinmux;
 
 struct ralink_soc_info {
 	unsigned char sys_type[RAMIPS_SYS_TYPE_LEN];
 	unsigned char *compatible;
+
+	unsigned long mem_base;
+	unsigned long mem_size;
+	unsigned long mem_size_min;
+	unsigned long mem_size_max;
 };
 extern struct ralink_soc_info soc_info;
 
diff --git a/arch/mips/ralink/dts/Makefile b/arch/mips/ralink/dts/Makefile
index 1a69fb3..18194fa 100644
--- a/arch/mips/ralink/dts/Makefile
+++ b/arch/mips/ralink/dts/Makefile
@@ -1 +1,4 @@
+obj-$(CONFIG_DTB_RT2880_EVAL) := rt2880_eval.dtb.o
 obj-$(CONFIG_DTB_RT305X_EVAL) := rt3052_eval.dtb.o
+obj-$(CONFIG_DTB_RT3883_EVAL) := rt3883_eval.dtb.o
+obj-$(CONFIG_DTB_MT7620A_EVAL) := mt7620a_eval.dtb.o
diff --git a/arch/mips/ralink/dts/mt7620a.dtsi b/arch/mips/ralink/dts/mt7620a.dtsi
new file mode 100644
index 0000000..08bf24f
--- /dev/null
+++ b/arch/mips/ralink/dts/mt7620a.dtsi
@@ -0,0 +1,58 @@
+/ {
+	#address-cells = <1>;
+	#size-cells = <1>;
+	compatible = "ralink,mtk7620a-soc";
+
+	cpus {
+		cpu@0 {
+			compatible = "mips,mips24KEc";
+		};
+	};
+
+	cpuintc: cpuintc@0 {
+		#address-cells = <0>;
+		#interrupt-cells = <1>;
+		interrupt-controller;
+		compatible = "mti,cpu-interrupt-controller";
+	};
+
+	palmbus@10000000 {
+		compatible = "palmbus";
+		reg = <0x10000000 0x200000>;
+                ranges = <0x0 0x10000000 0x1FFFFF>;
+
+		#address-cells = <1>;
+		#size-cells = <1>;
+
+		sysc@0 {
+			compatible = "ralink,mt7620a-sysc";
+			reg = <0x0 0x100>;
+		};
+
+		intc: intc@200 {
+			compatible = "ralink,mt7620a-intc", "ralink,rt2880-intc";
+			reg = <0x200 0x100>;
+
+			interrupt-controller;
+			#interrupt-cells = <1>;
+
+			interrupt-parent = <&cpuintc>;
+			interrupts = <2>;
+		};
+
+		memc@300 {
+			compatible = "ralink,mt7620a-memc", "ralink,rt3050-memc";
+			reg = <0x300 0x100>;
+		};
+
+		uartlite@c00 {
+			compatible = "ralink,mt7620a-uart", "ralink,rt2880-uart", "ns16550a";
+			reg = <0xc00 0x100>;
+
+			interrupt-parent = <&intc>;
+			interrupts = <12>;
+
+			reg-shift = <2>;
+		};
+	};
+};
diff --git a/arch/mips/ralink/dts/mt7620a_eval.dts b/arch/mips/ralink/dts/mt7620a_eval.dts
new file mode 100644
index 0000000..35eb874
--- /dev/null
+++ b/arch/mips/ralink/dts/mt7620a_eval.dts
@@ -0,0 +1,16 @@
+/dts-v1/;
+
+/include/ "mt7620a.dtsi"
+
+/ {
+	compatible = "ralink,mt7620a-eval-board", "ralink,mt7620a-soc";
+	model = "Ralink MT7620A evaluation board";
+
+	memory@0 {
+		reg = <0x0 0x2000000>;
+	};
+
+	chosen {
+		bootargs = "console=ttyS0,57600";
+	};
+};
diff --git a/arch/mips/ralink/dts/rt2880.dtsi b/arch/mips/ralink/dts/rt2880.dtsi
new file mode 100644
index 0000000..182afde
--- /dev/null
+++ b/arch/mips/ralink/dts/rt2880.dtsi
@@ -0,0 +1,58 @@
+/ {
+	#address-cells = <1>;
+	#size-cells = <1>;
+	compatible = "ralink,rt2880-soc";
+
+	cpus {
+		cpu@0 {
+			compatible = "mips,mips4KEc";
+		};
+	};
+
+	cpuintc: cpuintc@0 {
+		#address-cells = <0>;
+		#interrupt-cells = <1>;
+		interrupt-controller;
+		compatible = "mti,cpu-interrupt-controller";
+	};
+
+	palmbus@300000 {
+		compatible = "palmbus";
+		reg = <0x300000 0x200000>;
+                ranges = <0x0 0x300000 0x1FFFFF>;
+
+		#address-cells = <1>;
+		#size-cells = <1>;
+
+		sysc@0 {
+			compatible = "ralink,rt2880-sysc";
+			reg = <0x0 0x100>;
+		};
+
+		intc: intc@200 {
+			compatible = "ralink,rt2880-intc";
+			reg = <0x200 0x100>;
+
+			interrupt-controller;
+			#interrupt-cells = <1>;
+
+			interrupt-parent = <&cpuintc>;
+			interrupts = <2>;
+		};
+
+		memc@300 {
+			compatible = "ralink,rt2880-memc";
+			reg = <0x300 0x100>;
+		};
+
+		uartlite@c00 {
+			compatible = "ralink,rt2880-uart", "ns16550a";
+			reg = <0xc00 0x100>;
+
+			interrupt-parent = <&intc>;
+			interrupts = <8>;
+
+			reg-shift = <2>;
+		};
+	};
+};
diff --git a/arch/mips/ralink/dts/rt2880_eval.dts b/arch/mips/ralink/dts/rt2880_eval.dts
new file mode 100644
index 0000000..322d700
--- /dev/null
+++ b/arch/mips/ralink/dts/rt2880_eval.dts
@@ -0,0 +1,46 @@
+/dts-v1/;
+
+/include/ "rt2880.dtsi"
+
+/ {
+	compatible = "ralink,rt2880-eval-board", "ralink,rt2880-soc";
+	model = "Ralink RT2880 evaluation board";
+
+	memory@0 {
+		reg = <0x8000000 0x2000000>;
+	};
+
+	chosen {
+		bootargs = "console=ttyS0,57600";
+	};
+
+	cfi@1f000000 {
+		compatible = "cfi-flash";
+		reg = <0x1f000000 0x400000>;
+
+		bank-width = <2>;
+		device-width = <2>;
+		#address-cells = <1>;
+		#size-cells = <1>;
+
+		partition@0 {
+			label = "uboot";
+			reg = <0x0 0x30000>;
+			read-only;
+		};
+		partition@30000 {
+			label = "uboot-env";
+			reg = <0x30000 0x10000>;
+			read-only;
+		};
+		partition@40000 {
+			label = "calibration";
+			reg = <0x40000 0x10000>;
+			read-only;
+		};
+		partition@50000 {
+			label = "linux";
+			reg = <0x50000 0x3b0000>;
+		};
+	};
+};
diff --git a/arch/mips/ralink/dts/rt3050.dtsi b/arch/mips/ralink/dts/rt3050.dtsi
index 069d066..ef7da1e 100644
--- a/arch/mips/ralink/dts/rt3050.dtsi
+++ b/arch/mips/ralink/dts/rt3050.dtsi
@@ -1,7 +1,7 @@
 / {
 	#address-cells = <1>;
 	#size-cells = <1>;
-	compatible = "ralink,rt3050-soc", "ralink,rt3052-soc";
+	compatible = "ralink,rt3050-soc", "ralink,rt3052-soc", "ralink,rt3350-soc";
 
 	cpus {
 		cpu@0 {
@@ -9,10 +9,6 @@
 		};
 	};
 
-	chosen {
-		bootargs = "console=ttyS0,57600 init=/init";
-	};
-
 	cpuintc: cpuintc@0 {
 		#address-cells = <0>;
 		#interrupt-cells = <1>;
@@ -23,7 +19,7 @@
 	palmbus@10000000 {
 		compatible = "palmbus";
 		reg = <0x10000000 0x200000>;
-                ranges = <0x0 0x10000000 0x1FFFFF>;
+		ranges = <0x0 0x10000000 0x1FFFFF>;
 
 		#address-cells = <1>;
 		#size-cells = <1>;
@@ -33,11 +29,6 @@
 			reg = <0x0 0x100>;
 		};
 
-		timer@100 {
-			compatible = "ralink,rt3052-wdt", "ralink,rt2880-wdt";
-			reg = <0x100 0x100>;
-		};
-
 		intc: intc@200 {
 			compatible = "ralink,rt3052-intc", "ralink,rt2880-intc";
 			reg = <0x200 0x100>;
@@ -54,45 +45,6 @@
 			reg = <0x300 0x100>;
 		};
 
-		gpio0: gpio@600 {
-			compatible = "ralink,rt3052-gpio", "ralink,rt2880-gpio";
-			reg = <0x600 0x34>;
-
-			gpio-controller;
-			#gpio-cells = <2>;
-
-			ralink,ngpio = <24>;
-			ralink,regs = [ 00 04 08 0c
-					20 24 28 2c
-					30 34 ];
-		};
-
-		gpio1: gpio@638 {
-			compatible = "ralink,rt3052-gpio", "ralink,rt2880-gpio";
-			reg = <0x638 0x24>;
-
-			gpio-controller;
-			#gpio-cells = <2>;
-
-			ralink,ngpio = <16>;
-			ralink,regs = [ 00 04 08 0c
-					10 14 18 1c
-					20 24 ];
-		};
-
-		gpio2: gpio@660 {
-			compatible = "ralink,rt3052-gpio", "ralink,rt2880-gpio";
-			reg = <0x660 0x24>;
-
-			gpio-controller;
-			#gpio-cells = <2>;
-
-			ralink,ngpio = <12>;
-			ralink,regs = [ 00 04 08 0c
-					10 14 18 1c
-					20 24 ];
-		};
-
 		uartlite@c00 {
 			compatible = "ralink,rt3052-uart", "ralink,rt2880-uart", "ns16550a";
 			reg = <0xc00 0x100>;
diff --git a/arch/mips/ralink/dts/rt3052_eval.dts b/arch/mips/ralink/dts/rt3052_eval.dts
index 148a590..c18c9a8 100644
--- a/arch/mips/ralink/dts/rt3052_eval.dts
+++ b/arch/mips/ralink/dts/rt3052_eval.dts
@@ -1,10 +1,8 @@
 /dts-v1/;
 
-/include/ "rt3050.dtsi"
+#include "rt3050.dtsi"
 
 / {
-	#address-cells = <1>;
-	#size-cells = <1>;
 	compatible = "ralink,rt3052-eval-board", "ralink,rt3052-soc";
 	model = "Ralink RT3052 evaluation board";
 
@@ -12,12 +10,8 @@
 		reg = <0x0 0x2000000>;
 	};
 
-	palmbus@10000000 {
-		sysc@0 {
-			ralink,pinmmux = "uartlite", "spi";
-			ralink,uartmux = "gpio";
-			ralink,wdtmux = <0>;
-		};
+	chosen {
+		bootargs = "console=ttyS0,57600";
 	};
 
 	cfi@1f000000 {
diff --git a/arch/mips/ralink/dts/rt3883.dtsi b/arch/mips/ralink/dts/rt3883.dtsi
new file mode 100644
index 0000000..3b131dd
--- /dev/null
+++ b/arch/mips/ralink/dts/rt3883.dtsi
@@ -0,0 +1,58 @@
+/ {
+	#address-cells = <1>;
+	#size-cells = <1>;
+	compatible = "ralink,rt3883-soc";
+
+	cpus {
+		cpu@0 {
+			compatible = "mips,mips74Kc";
+		};
+	};
+
+	cpuintc: cpuintc@0 {
+		#address-cells = <0>;
+		#interrupt-cells = <1>;
+		interrupt-controller;
+		compatible = "mti,cpu-interrupt-controller";
+	};
+
+	palmbus@10000000 {
+		compatible = "palmbus";
+		reg = <0x10000000 0x200000>;
+		ranges = <0x0 0x10000000 0x1FFFFF>;
+
+		#address-cells = <1>;
+		#size-cells = <1>;
+
+		sysc@0 {
+			compatible = "ralink,rt3883-sysc", "ralink,rt3050-sysc";
+			reg = <0x0 0x100>;
+		};
+
+		intc: intc@200 {
+			compatible = "ralink,rt3883-intc", "ralink,rt2880-intc";
+			reg = <0x200 0x100>;
+
+			interrupt-controller;
+			#interrupt-cells = <1>;
+
+			interrupt-parent = <&cpuintc>;
+			interrupts = <2>;
+		};
+
+		memc@300 {
+			compatible = "ralink,rt3883-memc", "ralink,rt3050-memc";
+			reg = <0x300 0x100>;
+		};
+
+		uartlite@c00 {
+			compatible = "ralink,rt3883-uart", "ralink,rt2880-uart", "ns16550a";
+			reg = <0xc00 0x100>;
+
+			interrupt-parent = <&intc>;
+			interrupts = <12>;
+
+			reg-shift = <2>;
+		};
+	};
+};
diff --git a/arch/mips/ralink/dts/rt3883_eval.dts b/arch/mips/ralink/dts/rt3883_eval.dts
new file mode 100644
index 0000000..2fa6b33
--- /dev/null
+++ b/arch/mips/ralink/dts/rt3883_eval.dts
@@ -0,0 +1,16 @@
+/dts-v1/;
+
+/include/ "rt3883.dtsi"
+
+/ {
+	compatible = "ralink,rt3883-eval-board", "ralink,rt3883-soc";
+	model = "Ralink RT3883 evaluation board";
+
+	memory@0 {
+		reg = <0x0 0x2000000>;
+	};
+
+	chosen {
+		bootargs = "console=ttyS0,57600";
+	};
+};
diff --git a/arch/mips/ralink/early_printk.c b/arch/mips/ralink/early_printk.c
index c4ae47e..b46d041 100644
--- a/arch/mips/ralink/early_printk.c
+++ b/arch/mips/ralink/early_printk.c
@@ -11,7 +11,11 @@
 
 #include <asm/addrspace.h>
 
+#ifdef CONFIG_SOC_RT288X
+#define EARLY_UART_BASE         0x300c00
+#else
 #define EARLY_UART_BASE         0x10000c00
+#endif
 
 #define UART_REG_RX             0x00
 #define UART_REG_TX             0x04
diff --git a/arch/mips/ralink/irq.c b/arch/mips/ralink/irq.c
index 6d054c5..320b1f1 100644
--- a/arch/mips/ralink/irq.c
+++ b/arch/mips/ralink/irq.c
@@ -31,6 +31,7 @@
 #define INTC_INT_GLOBAL		BIT(31)
 
 #define RALINK_CPU_IRQ_INTC	(MIPS_CPU_IRQ_BASE + 2)
+#define RALINK_CPU_IRQ_PCI	(MIPS_CPU_IRQ_BASE + 4)
 #define RALINK_CPU_IRQ_FE	(MIPS_CPU_IRQ_BASE + 5)
 #define RALINK_CPU_IRQ_WIFI	(MIPS_CPU_IRQ_BASE + 6)
 #define RALINK_CPU_IRQ_COUNTER	(MIPS_CPU_IRQ_BASE + 7)
@@ -104,6 +105,9 @@
 	else if (pending & STATUSF_IP6)
 		do_IRQ(RALINK_CPU_IRQ_WIFI);
 
+	else if (pending & STATUSF_IP4)
+		do_IRQ(RALINK_CPU_IRQ_PCI);
+
 	else if (pending & STATUSF_IP2)
 		do_IRQ(RALINK_CPU_IRQ_INTC);
 
@@ -162,6 +166,7 @@
 	irq_set_chained_handler(irq, ralink_intc_irq_handler);
 	irq_set_handler_data(irq, domain);
 
+	/* tell the kernel which irq is used for performance monitoring */
 	cp0_perfcount_irq = irq_create_mapping(domain, 9);
 
 	return 0;
diff --git a/arch/mips/ralink/mt7620.c b/arch/mips/ralink/mt7620.c
new file mode 100644
index 0000000..0018b1a
--- /dev/null
+++ b/arch/mips/ralink/mt7620.c
@@ -0,0 +1,234 @@
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * Parts of this file are based on Ralink's 2.6.21 BSP
+ *
+ * Copyright (C) 2008-2011 Gabor Juhos <juhosg@openwrt.org>
+ * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
+ * Copyright (C) 2013 John Crispin <blogic@openwrt.org>
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+
+#include <asm/mipsregs.h>
+#include <asm/mach-ralink/ralink_regs.h>
+#include <asm/mach-ralink/mt7620.h>
+
+#include "common.h"
+
+/* does the board have sdram or ddram */
+static int dram_type;
+
+/* the pll dividers */
+static u32 mt7620_clk_divider[] = { 2, 3, 4, 8 };
+
+static struct ralink_pinmux_grp mode_mux[] = {
+	{
+		.name = "i2c",
+		.mask = MT7620_GPIO_MODE_I2C,
+		.gpio_first = 1,
+		.gpio_last = 2,
+	}, {
+		.name = "spi",
+		.mask = MT7620_GPIO_MODE_SPI,
+		.gpio_first = 3,
+		.gpio_last = 6,
+	}, {
+		.name = "uartlite",
+		.mask = MT7620_GPIO_MODE_UART1,
+		.gpio_first = 15,
+		.gpio_last = 16,
+	}, {
+		.name = "wdt",
+		.mask = MT7620_GPIO_MODE_WDT,
+		.gpio_first = 17,
+		.gpio_last = 17,
+	}, {
+		.name = "mdio",
+		.mask = MT7620_GPIO_MODE_MDIO,
+		.gpio_first = 22,
+		.gpio_last = 23,
+	}, {
+		.name = "rgmii1",
+		.mask = MT7620_GPIO_MODE_RGMII1,
+		.gpio_first = 24,
+		.gpio_last = 35,
+	}, {
+		.name = "spi refclk",
+		.mask = MT7620_GPIO_MODE_SPI_REF_CLK,
+		.gpio_first = 37,
+		.gpio_last = 39,
+	}, {
+		.name = "jtag",
+		.mask = MT7620_GPIO_MODE_JTAG,
+		.gpio_first = 40,
+		.gpio_last = 44,
+	}, {
+		/* shared lines with jtag */
+		.name = "ephy",
+		.mask = MT7620_GPIO_MODE_EPHY,
+		.gpio_first = 40,
+		.gpio_last = 44,
+	}, {
+		.name = "nand",
+		.mask = MT7620_GPIO_MODE_JTAG,
+		.gpio_first = 45,
+		.gpio_last = 59,
+	}, {
+		.name = "rgmii2",
+		.mask = MT7620_GPIO_MODE_RGMII2,
+		.gpio_first = 60,
+		.gpio_last = 71,
+	}, {
+		.name = "wled",
+		.mask = MT7620_GPIO_MODE_WLED,
+		.gpio_first = 72,
+		.gpio_last = 72,
+	}, {0}
+};
+
+static struct ralink_pinmux_grp uart_mux[] = {
+	{
+		.name = "uartf",
+		.mask = MT7620_GPIO_MODE_UARTF,
+		.gpio_first = 7,
+		.gpio_last = 14,
+	}, {
+		.name = "pcm uartf",
+		.mask = MT7620_GPIO_MODE_PCM_UARTF,
+		.gpio_first = 7,
+		.gpio_last = 14,
+	}, {
+		.name = "pcm i2s",
+		.mask = MT7620_GPIO_MODE_PCM_I2S,
+		.gpio_first = 7,
+		.gpio_last = 14,
+	}, {
+		.name = "i2s uartf",
+		.mask = MT7620_GPIO_MODE_I2S_UARTF,
+		.gpio_first = 7,
+		.gpio_last = 14,
+	}, {
+		.name = "pcm gpio",
+		.mask = MT7620_GPIO_MODE_PCM_GPIO,
+		.gpio_first = 11,
+		.gpio_last = 14,
+	}, {
+		.name = "gpio uartf",
+		.mask = MT7620_GPIO_MODE_GPIO_UARTF,
+		.gpio_first = 7,
+		.gpio_last = 10,
+	}, {
+		.name = "gpio i2s",
+		.mask = MT7620_GPIO_MODE_GPIO_I2S,
+		.gpio_first = 7,
+		.gpio_last = 10,
+	}, {
+		.name = "gpio",
+		.mask = MT7620_GPIO_MODE_GPIO,
+	}, {0}
+};
+
+struct ralink_pinmux rt_gpio_pinmux = {
+	.mode = mode_mux,
+	.uart = uart_mux,
+	.uart_shift = MT7620_GPIO_MODE_UART0_SHIFT,
+	.uart_mask = MT7620_GPIO_MODE_UART0_MASK,
+};
+
+void __init ralink_clk_init(void)
+{
+	unsigned long cpu_rate, sys_rate;
+	u32 c0 = rt_sysc_r32(SYSC_REG_CPLL_CONFIG0);
+	u32 c1 = rt_sysc_r32(SYSC_REG_CPLL_CONFIG1);
+	u32 swconfig = (c0 >> CPLL_SW_CONFIG_SHIFT) & CPLL_SW_CONFIG_MASK;
+	u32 cpu_clk = (c1 >> CPLL_CPU_CLK_SHIFT) & CPLL_CPU_CLK_MASK;
+
+	if (cpu_clk) {
+		cpu_rate = 480000000;
+	} else if (!swconfig) {
+		cpu_rate = 600000000;
+	} else {
+		u32 m = (c0 >> CPLL_MULT_RATIO_SHIFT) & CPLL_MULT_RATIO;
+		u32 d = (c0 >> CPLL_DIV_RATIO_SHIFT) & CPLL_DIV_RATIO;
+
+		cpu_rate = ((40 * (m + 24)) / mt7620_clk_divider[d]) * 1000000;
+	}
+
+	if (dram_type == SYSCFG0_DRAM_TYPE_SDRAM)
+		sys_rate = cpu_rate / 4;
+	else
+		sys_rate = cpu_rate / 3;
+
+	ralink_clk_add("cpu", cpu_rate);
+	ralink_clk_add("10000100.timer", 40000000);
+	ralink_clk_add("10000500.uart", 40000000);
+	ralink_clk_add("10000c00.uartlite", 40000000);
+}
+
+void __init ralink_of_remap(void)
+{
+	rt_sysc_membase = plat_of_remap_node("ralink,mt7620a-sysc");
+	rt_memc_membase = plat_of_remap_node("ralink,mt7620a-memc");
+
+	if (!rt_sysc_membase || !rt_memc_membase)
+		panic("Failed to remap core resources");
+}
+
+void prom_soc_init(struct ralink_soc_info *soc_info)
+{
+	void __iomem *sysc = (void __iomem *) KSEG1ADDR(MT7620_SYSC_BASE);
+	unsigned char *name = NULL;
+	u32 n0;
+	u32 n1;
+	u32 rev;
+	u32 cfg0;
+
+	n0 = __raw_readl(sysc + SYSC_REG_CHIP_NAME0);
+	n1 = __raw_readl(sysc + SYSC_REG_CHIP_NAME1);
+
+	if (n0 == MT7620N_CHIP_NAME0 && n1 == MT7620N_CHIP_NAME1) {
+		name = "MT7620N";
+		soc_info->compatible = "ralink,mt7620n-soc";
+	} else if (n0 == MT7620A_CHIP_NAME0 && n1 == MT7620A_CHIP_NAME1) {
+		name = "MT7620A";
+		soc_info->compatible = "ralink,mt7620a-soc";
+	} else {
+		panic("mt7620: unknown SoC, n0:%08x n1:%08x\n", n0, n1);
+	}
+
+	rev = __raw_readl(sysc + SYSC_REG_CHIP_REV);
+
+	snprintf(soc_info->sys_type, RAMIPS_SYS_TYPE_LEN,
+		"Ralink %s ver:%u eco:%u",
+		name,
+		(rev >> CHIP_REV_VER_SHIFT) & CHIP_REV_VER_MASK,
+		(rev & CHIP_REV_ECO_MASK));
+
+	cfg0 = __raw_readl(sysc + SYSC_REG_SYSTEM_CONFIG0);
+	dram_type = (cfg0 >> SYSCFG0_DRAM_TYPE_SHIFT) & SYSCFG0_DRAM_TYPE_MASK;
+
+	switch (dram_type) {
+	case SYSCFG0_DRAM_TYPE_SDRAM:
+		soc_info->mem_size_min = MT7620_SDRAM_SIZE_MIN;
+		soc_info->mem_size_max = MT7620_SDRAM_SIZE_MAX;
+		break;
+
+	case SYSCFG0_DRAM_TYPE_DDR1:
+		soc_info->mem_size_min = MT7620_DDR1_SIZE_MIN;
+		soc_info->mem_size_max = MT7620_DDR1_SIZE_MAX;
+		break;
+
+	case SYSCFG0_DRAM_TYPE_DDR2:
+		soc_info->mem_size_min = MT7620_DDR2_SIZE_MIN;
+		soc_info->mem_size_max = MT7620_DDR2_SIZE_MAX;
+		break;
+	default:
+		BUG();
+	}
+	soc_info->mem_base = MT7620_DRAM_BASE;
+}
diff --git a/arch/mips/ralink/of.c b/arch/mips/ralink/of.c
index 4165e70..fb15695 100644
--- a/arch/mips/ralink/of.c
+++ b/arch/mips/ralink/of.c
@@ -11,6 +11,7 @@
 #include <linux/io.h>
 #include <linux/clk.h>
 #include <linux/init.h>
+#include <linux/sizes.h>
 #include <linux/of_fdt.h>
 #include <linux/kernel.h>
 #include <linux/bootmem.h>
@@ -85,6 +86,14 @@
 	 * parsed resulting in our memory appearing
 	 */
 	__dt_setup_arch(&__dtb_start);
+
+	if (soc_info.mem_size)
+		add_memory_region(soc_info.mem_base, soc_info.mem_size,
+				  BOOT_MEM_RAM);
+	else
+		detect_memory_region(soc_info.mem_base,
+				     soc_info.mem_size_min * SZ_1M,
+				     soc_info.mem_size_max * SZ_1M);
 }
 
 static int __init plat_of_setup(void)
diff --git a/arch/mips/ralink/rt288x.c b/arch/mips/ralink/rt288x.c
new file mode 100644
index 0000000..f87de1a
--- /dev/null
+++ b/arch/mips/ralink/rt288x.c
@@ -0,0 +1,143 @@
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * Parts of this file are based on Ralink's 2.6.21 BSP
+ *
+ * Copyright (C) 2008-2011 Gabor Juhos <juhosg@openwrt.org>
+ * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
+ * Copyright (C) 2013 John Crispin <blogic@openwrt.org>
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+
+#include <asm/mipsregs.h>
+#include <asm/mach-ralink/ralink_regs.h>
+#include <asm/mach-ralink/rt288x.h>
+
+#include "common.h"
+
+static struct ralink_pinmux_grp mode_mux[] = {
+	{
+		.name = "i2c",
+		.mask = RT2880_GPIO_MODE_I2C,
+		.gpio_first = 1,
+		.gpio_last = 2,
+	}, {
+		.name = "spi",
+		.mask = RT2880_GPIO_MODE_SPI,
+		.gpio_first = 3,
+		.gpio_last = 6,
+	}, {
+		.name = "uartlite",
+		.mask = RT2880_GPIO_MODE_UART0,
+		.gpio_first = 7,
+		.gpio_last = 14,
+	}, {
+		.name = "jtag",
+		.mask = RT2880_GPIO_MODE_JTAG,
+		.gpio_first = 17,
+		.gpio_last = 21,
+	}, {
+		.name = "mdio",
+		.mask = RT2880_GPIO_MODE_MDIO,
+		.gpio_first = 22,
+		.gpio_last = 23,
+	}, {
+		.name = "sdram",
+		.mask = RT2880_GPIO_MODE_SDRAM,
+		.gpio_first = 24,
+		.gpio_last = 39,
+	}, {
+		.name = "pci",
+		.mask = RT2880_GPIO_MODE_PCI,
+		.gpio_first = 40,
+		.gpio_last = 71,
+	}, {0}
+};
+
+static void rt288x_wdt_reset(void)
+{
+	u32 t;
+
+	/* enable WDT reset output on pin SRAM_CS_N */
+	t = rt_sysc_r32(SYSC_REG_CLKCFG);
+	t |= CLKCFG_SRAM_CS_N_WDT;
+	rt_sysc_w32(t, SYSC_REG_CLKCFG);
+}
+
+struct ralink_pinmux rt_gpio_pinmux = {
+	.mode = mode_mux,
+	.wdt_reset = rt288x_wdt_reset,
+};
+
+void __init ralink_clk_init(void)
+{
+	unsigned long cpu_rate;
+	u32 t = rt_sysc_r32(SYSC_REG_SYSTEM_CONFIG);
+	t = ((t >> SYSTEM_CONFIG_CPUCLK_SHIFT) & SYSTEM_CONFIG_CPUCLK_MASK);
+
+	switch (t) {
+	case SYSTEM_CONFIG_CPUCLK_250:
+		cpu_rate = 250000000;
+		break;
+	case SYSTEM_CONFIG_CPUCLK_266:
+		cpu_rate = 266666667;
+		break;
+	case SYSTEM_CONFIG_CPUCLK_280:
+		cpu_rate = 280000000;
+		break;
+	case SYSTEM_CONFIG_CPUCLK_300:
+		cpu_rate = 300000000;
+		break;
+	}
+
+	ralink_clk_add("cpu", cpu_rate);
+	ralink_clk_add("300100.timer", cpu_rate / 2);
+	ralink_clk_add("300120.watchdog", cpu_rate / 2);
+	ralink_clk_add("300500.uart", cpu_rate / 2);
+	ralink_clk_add("300c00.uartlite", cpu_rate / 2);
+	ralink_clk_add("400000.ethernet", cpu_rate / 2);
+}
+
+void __init ralink_of_remap(void)
+{
+	rt_sysc_membase = plat_of_remap_node("ralink,rt2880-sysc");
+	rt_memc_membase = plat_of_remap_node("ralink,rt2880-memc");
+
+	if (!rt_sysc_membase || !rt_memc_membase)
+		panic("Failed to remap core resources");
+}
+
+void prom_soc_init(struct ralink_soc_info *soc_info)
+{
+	void __iomem *sysc = (void __iomem *) KSEG1ADDR(RT2880_SYSC_BASE);
+	const char *name;
+	u32 n0;
+	u32 n1;
+	u32 id;
+
+	n0 = __raw_readl(sysc + SYSC_REG_CHIP_NAME0);
+	n1 = __raw_readl(sysc + SYSC_REG_CHIP_NAME1);
+	id = __raw_readl(sysc + SYSC_REG_CHIP_ID);
+
+	if (n0 == RT2880_CHIP_NAME0 && n1 == RT2880_CHIP_NAME1) {
+		soc_info->compatible = "ralink,r2880-soc";
+		name = "RT2880";
+	} else {
+		panic("rt288x: unknown SoC, n0:%08x n1:%08x", n0, n1);
+	}
+
+	snprintf(soc_info->sys_type, RAMIPS_SYS_TYPE_LEN,
+		"Ralink %s id:%u rev:%u",
+		name,
+		(id >> CHIP_ID_ID_SHIFT) & CHIP_ID_ID_MASK,
+		(id & CHIP_ID_REV_MASK));
+
+	soc_info->mem_base = RT2880_SDRAM_BASE;
+	soc_info->mem_size_min = RT2880_MEM_SIZE_MIN;
+	soc_info->mem_size_max = RT2880_MEM_SIZE_MAX;
+}
diff --git a/arch/mips/ralink/rt305x.c b/arch/mips/ralink/rt305x.c
index 0a4bbdc..ca7ee3a 100644
--- a/arch/mips/ralink/rt305x.c
+++ b/arch/mips/ralink/rt305x.c
@@ -22,7 +22,7 @@
 
 enum rt305x_soc_type rt305x_soc;
 
-struct ralink_pinmux_grp mode_mux[] = {
+static struct ralink_pinmux_grp mode_mux[] = {
 	{
 		.name = "i2c",
 		.mask = RT305X_GPIO_MODE_I2C,
@@ -61,7 +61,7 @@
 	}, {0}
 };
 
-struct ralink_pinmux_grp uart_mux[] = {
+static struct ralink_pinmux_grp uart_mux[] = {
 	{
 		.name = "uartf",
 		.mask = RT305X_GPIO_MODE_UARTF,
@@ -91,19 +91,19 @@
 		.name = "gpio uartf",
 		.mask = RT305X_GPIO_MODE_GPIO_UARTF,
 		.gpio_first = RT305X_GPIO_7,
-		.gpio_last = RT305X_GPIO_14,
+		.gpio_last = RT305X_GPIO_10,
 	}, {
 		.name = "gpio i2s",
 		.mask = RT305X_GPIO_MODE_GPIO_I2S,
 		.gpio_first = RT305X_GPIO_7,
-		.gpio_last = RT305X_GPIO_14,
+		.gpio_last = RT305X_GPIO_10,
 	}, {
 		.name = "gpio",
 		.mask = RT305X_GPIO_MODE_GPIO,
 	}, {0}
 };
 
-void rt305x_wdt_reset(void)
+static void rt305x_wdt_reset(void)
 {
 	u32 t;
 
@@ -114,16 +114,53 @@
 	rt_sysc_w32(t, SYSC_REG_SYSTEM_CONFIG);
 }
 
-struct ralink_pinmux gpio_pinmux = {
+struct ralink_pinmux rt_gpio_pinmux = {
 	.mode = mode_mux,
 	.uart = uart_mux,
 	.uart_shift = RT305X_GPIO_MODE_UART0_SHIFT,
+	.uart_mask = RT305X_GPIO_MODE_UART0_MASK,
 	.wdt_reset = rt305x_wdt_reset,
 };
 
+static unsigned long rt5350_get_mem_size(void)
+{
+	void __iomem *sysc = (void __iomem *) KSEG1ADDR(RT305X_SYSC_BASE);
+	unsigned long ret;
+	u32 t;
+
+	t = __raw_readl(sysc + SYSC_REG_SYSTEM_CONFIG);
+	t = (t >> RT5350_SYSCFG0_DRAM_SIZE_SHIFT) &
+		RT5350_SYSCFG0_DRAM_SIZE_MASK;
+
+	switch (t) {
+	case RT5350_SYSCFG0_DRAM_SIZE_2M:
+		ret = 2;
+		break;
+	case RT5350_SYSCFG0_DRAM_SIZE_8M:
+		ret = 8;
+		break;
+	case RT5350_SYSCFG0_DRAM_SIZE_16M:
+		ret = 16;
+		break;
+	case RT5350_SYSCFG0_DRAM_SIZE_32M:
+		ret = 32;
+		break;
+	case RT5350_SYSCFG0_DRAM_SIZE_64M:
+		ret = 64;
+		break;
+	default:
+		panic("rt5350: invalid DRAM size: %u", t);
+		break;
+	}
+
+	return ret;
+}
+
 void __init ralink_clk_init(void)
 {
 	unsigned long cpu_rate, sys_rate, wdt_rate, uart_rate;
+	unsigned long wmac_rate = 40000000;
+
 	u32 t = rt_sysc_r32(SYSC_REG_SYSTEM_CONFIG);
 
 	if (soc_is_rt305x() || soc_is_rt3350()) {
@@ -176,11 +213,21 @@
 		BUG();
 	}
 
+	if (soc_is_rt3352() || soc_is_rt5350()) {
+		u32 val = rt_sysc_r32(RT3352_SYSC_REG_SYSCFG0);
+
+		if (!(val & RT3352_CLKCFG0_XTAL_SEL))
+			wmac_rate = 20000000;
+	}
+
 	ralink_clk_add("cpu", cpu_rate);
 	ralink_clk_add("10000b00.spi", sys_rate);
 	ralink_clk_add("10000100.timer", wdt_rate);
+	ralink_clk_add("10000120.watchdog", wdt_rate);
 	ralink_clk_add("10000500.uart", uart_rate);
 	ralink_clk_add("10000c00.uartlite", uart_rate);
+	ralink_clk_add("10100000.ethernet", sys_rate);
+	ralink_clk_add("10180000.wmac", wmac_rate);
 }
 
 void __init ralink_of_remap(void)
@@ -239,4 +286,15 @@
 		name,
 		(id >> CHIP_ID_ID_SHIFT) & CHIP_ID_ID_MASK,
 		(id & CHIP_ID_REV_MASK));
+
+	soc_info->mem_base = RT305X_SDRAM_BASE;
+	if (soc_is_rt5350()) {
+		soc_info->mem_size = rt5350_get_mem_size();
+	} else if (soc_is_rt305x() || soc_is_rt3350()) {
+		soc_info->mem_size_min = RT305X_MEM_SIZE_MIN;
+		soc_info->mem_size_max = RT305X_MEM_SIZE_MAX;
+	} else if (soc_is_rt3352()) {
+		soc_info->mem_size_min = RT3352_MEM_SIZE_MIN;
+		soc_info->mem_size_max = RT3352_MEM_SIZE_MAX;
+	}
 }
diff --git a/arch/mips/ralink/rt3883.c b/arch/mips/ralink/rt3883.c
new file mode 100644
index 0000000..b474ac2
--- /dev/null
+++ b/arch/mips/ralink/rt3883.c
@@ -0,0 +1,246 @@
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * Parts of this file are based on Ralink's 2.6.21 BSP
+ *
+ * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
+ * Copyright (C) 2008-2011 Gabor Juhos <juhosg@openwrt.org>
+ * Copyright (C) 2013 John Crispin <blogic@openwrt.org>
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+
+#include <asm/mipsregs.h>
+#include <asm/mach-ralink/ralink_regs.h>
+#include <asm/mach-ralink/rt3883.h>
+
+#include "common.h"
+
+static struct ralink_pinmux_grp mode_mux[] = {
+	{
+		.name = "i2c",
+		.mask = RT3883_GPIO_MODE_I2C,
+		.gpio_first = RT3883_GPIO_I2C_SD,
+		.gpio_last = RT3883_GPIO_I2C_SCLK,
+	}, {
+		.name = "spi",
+		.mask = RT3883_GPIO_MODE_SPI,
+		.gpio_first = RT3883_GPIO_SPI_CS0,
+		.gpio_last = RT3883_GPIO_SPI_MISO,
+	}, {
+		.name = "uartlite",
+		.mask = RT3883_GPIO_MODE_UART1,
+		.gpio_first = RT3883_GPIO_UART1_TXD,
+		.gpio_last = RT3883_GPIO_UART1_RXD,
+	}, {
+		.name = "jtag",
+		.mask = RT3883_GPIO_MODE_JTAG,
+		.gpio_first = RT3883_GPIO_JTAG_TDO,
+		.gpio_last = RT3883_GPIO_JTAG_TCLK,
+	}, {
+		.name = "mdio",
+		.mask = RT3883_GPIO_MODE_MDIO,
+		.gpio_first = RT3883_GPIO_MDIO_MDC,
+		.gpio_last = RT3883_GPIO_MDIO_MDIO,
+	}, {
+		.name = "ge1",
+		.mask = RT3883_GPIO_MODE_GE1,
+		.gpio_first = RT3883_GPIO_GE1_TXD0,
+		.gpio_last = RT3883_GPIO_GE1_RXCLK,
+	}, {
+		.name = "ge2",
+		.mask = RT3883_GPIO_MODE_GE2,
+		.gpio_first = RT3883_GPIO_GE2_TXD0,
+		.gpio_last = RT3883_GPIO_GE2_RXCLK,
+	}, {
+		.name = "pci",
+		.mask = RT3883_GPIO_MODE_PCI,
+		.gpio_first = RT3883_GPIO_PCI_AD0,
+		.gpio_last = RT3883_GPIO_PCI_AD31,
+	}, {
+		.name = "lna a",
+		.mask = RT3883_GPIO_MODE_LNA_A,
+		.gpio_first = RT3883_GPIO_LNA_PE_A0,
+		.gpio_last = RT3883_GPIO_LNA_PE_A2,
+	}, {
+		.name = "lna g",
+		.mask = RT3883_GPIO_MODE_LNA_G,
+		.gpio_first = RT3883_GPIO_LNA_PE_G0,
+		.gpio_last = RT3883_GPIO_LNA_PE_G2,
+	}, {0}
+};
+
+static struct ralink_pinmux_grp uart_mux[] = {
+	{
+		.name = "uartf",
+		.mask = RT3883_GPIO_MODE_UARTF,
+		.gpio_first = RT3883_GPIO_7,
+		.gpio_last = RT3883_GPIO_14,
+	}, {
+		.name = "pcm uartf",
+		.mask = RT3883_GPIO_MODE_PCM_UARTF,
+		.gpio_first = RT3883_GPIO_7,
+		.gpio_last = RT3883_GPIO_14,
+	}, {
+		.name = "pcm i2s",
+		.mask = RT3883_GPIO_MODE_PCM_I2S,
+		.gpio_first = RT3883_GPIO_7,
+		.gpio_last = RT3883_GPIO_14,
+	}, {
+		.name = "i2s uartf",
+		.mask = RT3883_GPIO_MODE_I2S_UARTF,
+		.gpio_first = RT3883_GPIO_7,
+		.gpio_last = RT3883_GPIO_14,
+	}, {
+		.name = "pcm gpio",
+		.mask = RT3883_GPIO_MODE_PCM_GPIO,
+		.gpio_first = RT3883_GPIO_11,
+		.gpio_last = RT3883_GPIO_14,
+	}, {
+		.name = "gpio uartf",
+		.mask = RT3883_GPIO_MODE_GPIO_UARTF,
+		.gpio_first = RT3883_GPIO_7,
+		.gpio_last = RT3883_GPIO_10,
+	}, {
+		.name = "gpio i2s",
+		.mask = RT3883_GPIO_MODE_GPIO_I2S,
+		.gpio_first = RT3883_GPIO_7,
+		.gpio_last = RT3883_GPIO_10,
+	}, {
+		.name = "gpio",
+		.mask = RT3883_GPIO_MODE_GPIO,
+	}, {0}
+};
+
+static struct ralink_pinmux_grp pci_mux[] = {
+	{
+		.name = "pci-dev",
+		.mask = 0,
+		.gpio_first = RT3883_GPIO_PCI_AD0,
+		.gpio_last = RT3883_GPIO_PCI_AD31,
+	}, {
+		.name = "pci-host2",
+		.mask = 1,
+		.gpio_first = RT3883_GPIO_PCI_AD0,
+		.gpio_last = RT3883_GPIO_PCI_AD31,
+	}, {
+		.name = "pci-host1",
+		.mask = 2,
+		.gpio_first = RT3883_GPIO_PCI_AD0,
+		.gpio_last = RT3883_GPIO_PCI_AD31,
+	}, {
+		.name = "pci-fnc",
+		.mask = 3,
+		.gpio_first = RT3883_GPIO_PCI_AD0,
+		.gpio_last = RT3883_GPIO_PCI_AD31,
+	}, {
+		.name = "pci-gpio",
+		.mask = 7,
+		.gpio_first = RT3883_GPIO_PCI_AD0,
+		.gpio_last = RT3883_GPIO_PCI_AD31,
+	}, {0}
+};
+
+static void rt3883_wdt_reset(void)
+{
+	u32 t;
+
+	/* enable WDT reset output on GPIO 2 */
+	t = rt_sysc_r32(RT3883_SYSC_REG_SYSCFG1);
+	t |= RT3883_SYSCFG1_GPIO2_AS_WDT_OUT;
+	rt_sysc_w32(t, RT3883_SYSC_REG_SYSCFG1);
+}
+
+struct ralink_pinmux rt_gpio_pinmux = {
+	.mode = mode_mux,
+	.uart = uart_mux,
+	.uart_shift = RT3883_GPIO_MODE_UART0_SHIFT,
+	.uart_mask = RT3883_GPIO_MODE_UART0_MASK,
+	.wdt_reset = rt3883_wdt_reset,
+	.pci = pci_mux,
+	.pci_shift = RT3883_GPIO_MODE_PCI_SHIFT,
+	.pci_mask = RT3883_GPIO_MODE_PCI_MASK,
+};
+
+void __init ralink_clk_init(void)
+{
+	unsigned long cpu_rate, sys_rate;
+	u32 syscfg0;
+	u32 clksel;
+	u32 ddr2;
+
+	syscfg0 = rt_sysc_r32(RT3883_SYSC_REG_SYSCFG0);
+	clksel = ((syscfg0 >> RT3883_SYSCFG0_CPUCLK_SHIFT) &
+		RT3883_SYSCFG0_CPUCLK_MASK);
+	ddr2 = syscfg0 & RT3883_SYSCFG0_DRAM_TYPE_DDR2;
+
+	switch (clksel) {
+	case RT3883_SYSCFG0_CPUCLK_250:
+		cpu_rate = 250000000;
+		sys_rate = (ddr2) ? 125000000 : 83000000;
+		break;
+	case RT3883_SYSCFG0_CPUCLK_384:
+		cpu_rate = 384000000;
+		sys_rate = (ddr2) ? 128000000 : 96000000;
+		break;
+	case RT3883_SYSCFG0_CPUCLK_480:
+		cpu_rate = 480000000;
+		sys_rate = (ddr2) ? 160000000 : 120000000;
+		break;
+	case RT3883_SYSCFG0_CPUCLK_500:
+		cpu_rate = 500000000;
+		sys_rate = (ddr2) ? 166000000 : 125000000;
+		break;
+	}
+
+	ralink_clk_add("cpu", cpu_rate);
+	ralink_clk_add("10000100.timer", sys_rate);
+	ralink_clk_add("10000120.watchdog", sys_rate);
+	ralink_clk_add("10000500.uart", 40000000);
+	ralink_clk_add("10000b00.spi", sys_rate);
+	ralink_clk_add("10000c00.uartlite", 40000000);
+	ralink_clk_add("10100000.ethernet", sys_rate);
+}
+
+void __init ralink_of_remap(void)
+{
+	rt_sysc_membase = plat_of_remap_node("ralink,rt3883-sysc");
+	rt_memc_membase = plat_of_remap_node("ralink,rt3883-memc");
+
+	if (!rt_sysc_membase || !rt_memc_membase)
+		panic("Failed to remap core resources");
+}
+
+void prom_soc_init(struct ralink_soc_info *soc_info)
+{
+	void __iomem *sysc = (void __iomem *) KSEG1ADDR(RT3883_SYSC_BASE);
+	const char *name;
+	u32 n0;
+	u32 n1;
+	u32 id;
+
+	n0 = __raw_readl(sysc + RT3883_SYSC_REG_CHIPID0_3);
+	n1 = __raw_readl(sysc + RT3883_SYSC_REG_CHIPID4_7);
+	id = __raw_readl(sysc + RT3883_SYSC_REG_REVID);
+
+	if (n0 == RT3883_CHIP_NAME0 && n1 == RT3883_CHIP_NAME1) {
+		soc_info->compatible = "ralink,rt3883-soc";
+		name = "RT3883";
+	} else {
+		panic("rt3883: unknown SoC, n0:%08x n1:%08x", n0, n1);
+	}
+
+	snprintf(soc_info->sys_type, RAMIPS_SYS_TYPE_LEN,
+		"Ralink %s ver:%u eco:%u",
+		name,
+		(id >> RT3883_REVID_VER_ID_SHIFT) & RT3883_REVID_VER_ID_MASK,
+		(id & RT3883_REVID_ECO_ID_MASK));
+
+	soc_info->mem_base = RT3883_SDRAM_BASE;
+	soc_info->mem_size_min = RT3883_MEM_SIZE_MIN;
+	soc_info->mem_size_max = RT3883_MEM_SIZE_MAX;
+}
diff --git a/arch/mips/sgi-ip27/ip27-klnuma.c b/arch/mips/sgi-ip27/ip27-klnuma.c
index 1d1919a..7a53b1e 100644
--- a/arch/mips/sgi-ip27/ip27-klnuma.c
+++ b/arch/mips/sgi-ip27/ip27-klnuma.c
@@ -114,7 +114,7 @@
  * data structures on the first couple of pages of the first slot of each
  * node. If this is the case, getfirstfree(node) > getslotstart(node, 0).
  */
-pfn_t node_getfirstfree(cnodeid_t cnode)
+unsigned long node_getfirstfree(cnodeid_t cnode)
 {
 	unsigned long loadbase = REP_BASE;
 	nasid_t nasid = COMPACT_TO_NASID_NODEID(cnode);
diff --git a/arch/mips/sgi-ip27/ip27-memory.c b/arch/mips/sgi-ip27/ip27-memory.c
index 5f2bddb..1230f56 100644
--- a/arch/mips/sgi-ip27/ip27-memory.c
+++ b/arch/mips/sgi-ip27/ip27-memory.c
@@ -255,14 +255,14 @@
 	}
 }
 
-static pfn_t __init slot_getbasepfn(cnodeid_t cnode, int slot)
+static unsigned long __init slot_getbasepfn(cnodeid_t cnode, int slot)
 {
 	nasid_t nasid = COMPACT_TO_NASID_NODEID(cnode);
 
-	return ((pfn_t)nasid << PFN_NASIDSHFT) | (slot << SLOT_PFNSHIFT);
+	return ((unsigned long)nasid << PFN_NASIDSHFT) | (slot << SLOT_PFNSHIFT);
 }
 
-static pfn_t __init slot_psize_compute(cnodeid_t node, int slot)
+static unsigned long __init slot_psize_compute(cnodeid_t node, int slot)
 {
 	nasid_t nasid;
 	lboard_t *brd;
@@ -353,7 +353,7 @@
 
 static void __init szmem(void)
 {
-	pfn_t slot_psize, slot0sz = 0, nodebytes;	/* Hack to detect problem configs */
+	unsigned long slot_psize, slot0sz = 0, nodebytes;	/* Hack to detect problem configs */
 	int slot;
 	cnodeid_t node;
 
@@ -390,10 +390,10 @@
 
 static void __init node_mem_init(cnodeid_t node)
 {
-	pfn_t slot_firstpfn = slot_getbasepfn(node, 0);
-	pfn_t slot_freepfn = node_getfirstfree(node);
+	unsigned long slot_firstpfn = slot_getbasepfn(node, 0);
+	unsigned long slot_freepfn = node_getfirstfree(node);
 	unsigned long bootmap_size;
-	pfn_t start_pfn, end_pfn;
+	unsigned long start_pfn, end_pfn;
 
 	get_pfn_range_for_nid(node, &start_pfn, &end_pfn);
 
@@ -467,7 +467,7 @@
 	pagetable_init();
 
 	for_each_online_node(node) {
-		pfn_t start_pfn, end_pfn;
+		unsigned long start_pfn, end_pfn;
 
 		get_pfn_range_for_nid(node, &start_pfn, &end_pfn);
 
diff --git a/arch/mips/sgi-ip27/ip27-timer.c b/arch/mips/sgi-ip27/ip27-timer.c
index fff58ac1..2e21b76 100644
--- a/arch/mips/sgi-ip27/ip27-timer.c
+++ b/arch/mips/sgi-ip27/ip27-timer.c
@@ -69,7 +69,7 @@
 	/* Nothing to do ...  */
 }
 
-int rt_timer_irq;
+unsigned int rt_timer_irq;
 
 static DEFINE_PER_CPU(struct clock_event_device, hub_rt_clockevent);
 static DEFINE_PER_CPU(char [11], hub_rt_name);
diff --git a/arch/mips/txx9/generic/setup.c b/arch/mips/txx9/generic/setup.c
index 5524f2c..5364aab 100644
--- a/arch/mips/txx9/generic/setup.c
+++ b/arch/mips/txx9/generic/setup.c
@@ -118,7 +118,7 @@
 
 /* GPIO support */
 
-#ifdef CONFIG_GENERIC_GPIO
+#ifdef CONFIG_GPIOLIB
 int gpio_to_irq(unsigned gpio)
 {
 	return -EINVAL;
diff --git a/arch/openrisc/Kconfig b/arch/openrisc/Kconfig
index 81b9ddb..1072bfd 100644
--- a/arch/openrisc/Kconfig
+++ b/arch/openrisc/Kconfig
@@ -44,9 +44,6 @@
 config NO_IOPORT
 	def_bool y
 
-config GENERIC_GPIO
-	def_bool y
-
 config TRACE_IRQFLAGS_SUPPORT
         def_bool y
 
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
index 433e75a..cad060f 100644
--- a/arch/parisc/Kconfig
+++ b/arch/parisc/Kconfig
@@ -13,6 +13,7 @@
 	select BUG
 	select HAVE_PERF_EVENTS
 	select GENERIC_ATOMIC64 if !64BIT
+	select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
 	select HAVE_GENERIC_HARDIRQS
 	select BROKEN_RODATA
 	select GENERIC_IRQ_PROBE
@@ -242,6 +243,14 @@
 
 	  If you don't know what to do here, say N.
 
+config IRQSTACKS
+	bool "Use separate kernel stacks when processing interrupts"
+	default n
+	help
+	  If you say Y here the kernel will use separate kernel stacks
+	  for handling hard and soft interrupts.  This can help avoid
+	  overflowing the process kernel stacks.
+
 config HOTPLUG_CPU
 	bool
 	default y if SMP
diff --git a/arch/parisc/Kconfig.debug b/arch/parisc/Kconfig.debug
index bc989e5..08a332f 100644
--- a/arch/parisc/Kconfig.debug
+++ b/arch/parisc/Kconfig.debug
@@ -13,3 +13,14 @@
          If in doubt, say "N".
 
 endmenu
+
+config DEBUG_STACKOVERFLOW
+	bool "Check for stack overflows"
+	default y
+	depends on DEBUG_KERNEL
+	---help---
+	  Say Y here if you want to check the overflows of kernel, IRQ
+	  and exception stacks. This option will cause messages of the
+	  stacks in detail when free stack space drops below a certain
+	  limit.
+	  If in doubt, say "N".
diff --git a/arch/parisc/Makefile b/arch/parisc/Makefile
index 113e282..2f967cc 100644
--- a/arch/parisc/Makefile
+++ b/arch/parisc/Makefile
@@ -24,9 +24,7 @@
 LIBGCC		= $(shell $(CC) $(KBUILD_CFLAGS) -print-libgcc-file-name)
 
 MACHINE		:= $(shell uname -m)
-ifeq ($(MACHINE),parisc*)
-NATIVE		:= 1
-endif
+NATIVE		:= $(if $(filter parisc%,$(MACHINE)),1,0)
 
 ifdef CONFIG_64BIT
 UTS_MACHINE	:= parisc64
diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h
index f38e198..472886c 100644
--- a/arch/parisc/include/asm/atomic.h
+++ b/arch/parisc/include/asm/atomic.h
@@ -229,6 +229,29 @@
 
 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
 
+/*
+ * atomic64_dec_if_positive - decrement by 1 if old value positive
+ * @v: pointer of type atomic_t
+ *
+ * The function returns the old value of *v minus 1, even if
+ * the atomic variable, v, was not decremented.
+ */
+static inline long atomic64_dec_if_positive(atomic64_t *v)
+{
+	long c, old, dec;
+	c = atomic64_read(v);
+	for (;;) {
+		dec = c - 1;
+		if (unlikely(dec < 0))
+			break;
+		old = atomic64_cmpxchg((v), c, dec);
+		if (likely(old == c))
+			break;
+		c = old;
+	}
+	return dec;
+}
+
 #endif /* !CONFIG_64BIT */
 
 
diff --git a/arch/parisc/include/asm/dma-mapping.h b/arch/parisc/include/asm/dma-mapping.h
index 106b395..d0eae5f 100644
--- a/arch/parisc/include/asm/dma-mapping.h
+++ b/arch/parisc/include/asm/dma-mapping.h
@@ -46,6 +46,9 @@
 
 extern struct hppa_dma_ops *hppa_dma_ops;
 
+#define dma_alloc_attrs(d, s, h, f, a) dma_alloc_coherent(d, s, h, f)
+#define dma_free_attrs(d, s, h, f, a) dma_free_coherent(d, s, h, f)
+
 static inline void *
 dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
 		   gfp_t flag)
diff --git a/arch/parisc/include/asm/hardirq.h b/arch/parisc/include/asm/hardirq.h
index 0d68184..12373c4 100644
--- a/arch/parisc/include/asm/hardirq.h
+++ b/arch/parisc/include/asm/hardirq.h
@@ -1,11 +1,41 @@
 /* hardirq.h: PA-RISC hard IRQ support.
  *
  * Copyright (C) 2001 Matthew Wilcox <matthew@wil.cx>
+ * Copyright (C) 2013 Helge Deller <deller@gmx.de>
  */
 
 #ifndef _PARISC_HARDIRQ_H
 #define _PARISC_HARDIRQ_H
 
-#include <asm-generic/hardirq.h>
+#include <linux/cache.h>
+#include <linux/threads.h>
+#include <linux/irq.h>
+
+typedef struct {
+	unsigned int __softirq_pending;
+#ifdef CONFIG_DEBUG_STACKOVERFLOW
+	unsigned int kernel_stack_usage;
+#endif
+#ifdef CONFIG_SMP
+	unsigned int irq_resched_count;
+	unsigned int irq_call_count;
+#endif
+	unsigned int irq_tlb_count;
+} ____cacheline_aligned irq_cpustat_t;
+
+DECLARE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
+
+#define __ARCH_IRQ_STAT
+#define __IRQ_STAT(cpu, member) (irq_stat[cpu].member)
+#define inc_irq_stat(member)	this_cpu_inc(irq_stat.member)
+#define local_softirq_pending()	this_cpu_read(irq_stat.__softirq_pending)
+
+#define __ARCH_SET_SOFTIRQ_PENDING
+
+#define set_softirq_pending(x)	\
+		this_cpu_write(irq_stat.__softirq_pending, (x))
+#define or_softirq_pending(x)	this_cpu_or(irq_stat.__softirq_pending, (x))
+
+#define ack_bad_irq(irq) WARN(1, "unexpected IRQ trap at vector %02x\n", irq)
 
 #endif /* _PARISC_HARDIRQ_H */
diff --git a/arch/parisc/include/asm/processor.h b/arch/parisc/include/asm/processor.h
index 09b54a5..0640155 100644
--- a/arch/parisc/include/asm/processor.h
+++ b/arch/parisc/include/asm/processor.h
@@ -20,8 +20,6 @@
 
 #endif /* __ASSEMBLY__ */
 
-#define KERNEL_STACK_SIZE 	(4*PAGE_SIZE)
-
 /*
  * Default implementation of macro that returns current
  * instruction pointer ("program counter").
@@ -61,6 +59,23 @@
 #ifndef __ASSEMBLY__
 
 /*
+ * IRQ STACK - used for irq handler
+ */
+#ifdef __KERNEL__
+
+#define IRQ_STACK_SIZE      (4096 << 2) /* 16k irq stack size */
+
+union irq_stack_union {
+	unsigned long stack[IRQ_STACK_SIZE/sizeof(unsigned long)];
+};
+
+DECLARE_PER_CPU(union irq_stack_union, irq_stack_union);
+
+void call_on_stack(unsigned long p1, void *func, unsigned long new_stack);
+
+#endif /* __KERNEL__ */
+
+/*
  * Data detected about CPUs at boot time which is the same for all CPU's.
  * HP boxes are SMP - ie identical processors.
  *
@@ -97,7 +112,6 @@
 	unsigned long txn_addr;     /* MMIO addr of EIR or id_eid */
 #ifdef CONFIG_SMP
 	unsigned long pending_ipi;  /* bitmap of type ipi_message_type */
-	unsigned long ipi_count;    /* number ipi Interrupts */
 #endif
 	unsigned long bh_count;     /* number of times bh was invoked */
 	unsigned long prof_counter; /* per CPU profiling support */
diff --git a/arch/parisc/include/asm/thread_info.h b/arch/parisc/include/asm/thread_info.h
index 6182832..540c88f 100644
--- a/arch/parisc/include/asm/thread_info.h
+++ b/arch/parisc/include/asm/thread_info.h
@@ -40,7 +40,7 @@
 
 /* thread information allocation */
 
-#define THREAD_SIZE_ORDER            2
+#define THREAD_SIZE_ORDER	2 /* PA-RISC requires at least 16k stack */
 /* Be sure to hunt all references to this down when you change the size of
  * the kernel stack */
 #define THREAD_SIZE             (PAGE_SIZE << THREAD_SIZE_ORDER)
diff --git a/arch/parisc/include/asm/tlbflush.h b/arch/parisc/include/asm/tlbflush.h
index 8f1a810..5273da9 100644
--- a/arch/parisc/include/asm/tlbflush.h
+++ b/arch/parisc/include/asm/tlbflush.h
@@ -22,6 +22,8 @@
 extern void flush_tlb_all(void);
 extern void flush_tlb_all_local(void *);
 
+#define smp_flush_tlb_all()	flush_tlb_all()
+
 /*
  * flush_tlb_mm()
  *
diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c
index 83ded26..65fb4cb 100644
--- a/arch/parisc/kernel/cache.c
+++ b/arch/parisc/kernel/cache.c
@@ -606,7 +606,7 @@
 	/* Clear using TMPALIAS region.  The page doesn't need to
 	   be flushed but the kernel mapping needs to be purged.  */
 
-	vto = kmap_atomic(page, KM_USER0);
+	vto = kmap_atomic(page);
 
 	/* The PA-RISC 2.0 Architecture book states on page F-6:
 	   "Before a write-capable translation is enabled, *all*
@@ -641,8 +641,8 @@
 	   the `to' page must be flushed in copy_user_page_asm since
 	   it can be used to bring in executable code.  */
 
-	vfrom = kmap_atomic(from, KM_USER0);
-	vto = kmap_atomic(to, KM_USER1);
+	vfrom = kmap_atomic(from);
+	vto = kmap_atomic(to);
 
 	purge_kernel_dcache_page_asm((unsigned long)vto);
 	purge_tlb_start(flags);
diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S
index f33201b..4bb96ad 100644
--- a/arch/parisc/kernel/entry.S
+++ b/arch/parisc/kernel/entry.S
@@ -400,7 +400,15 @@
 #if PT_NLEVELS == 3
 	extru		\va,31-ASM_PMD_SHIFT,ASM_BITS_PER_PMD,\index
 #else
+# if defined(CONFIG_64BIT)
+	extrd,u		\va,63-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index
+  #else
+  # if PAGE_SIZE > 4096
+	extru		\va,31-ASM_PGDIR_SHIFT,32-ASM_PGDIR_SHIFT,\index
+  # else
 	extru		\va,31-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index
+  # endif
+# endif
 #endif
 	dep             %r0,31,PAGE_SHIFT,\pmd  /* clear offset */
 	copy		%r0,\pte
@@ -615,7 +623,7 @@
 
 	.text
 
-	.align	PAGE_SIZE
+	.align 4096
 
 ENTRY(fault_vector_20)
 	/* First vector is invalid (0) */
@@ -825,11 +833,6 @@
 	STREG   %r19,PT_SR7(%r16)
 
 intr_return:
-	/* NOTE: Need to enable interrupts incase we schedule. */
-	ssm     PSW_SM_I, %r0
-
-intr_check_resched:
-
 	/* check for reschedule */
 	mfctl   %cr30,%r1
 	LDREG   TI_FLAGS(%r1),%r19	/* sched.h: TIF_NEED_RESCHED */
@@ -856,6 +859,11 @@
 	LDREG	PT_IASQ1(%r16), %r20
 	cmpib,COND(=),n 0,%r20,intr_restore /* backward */
 
+	/* NOTE: We need to enable interrupts if we have to deliver
+	 * signals. We used to do this earlier but it caused kernel
+	 * stack overflows. */
+	ssm     PSW_SM_I, %r0
+
 	copy	%r0, %r25			/* long in_syscall = 0 */
 #ifdef CONFIG_64BIT
 	ldo	-16(%r30),%r29			/* Reference param save area */
@@ -907,6 +915,10 @@
 	cmpib,COND(=)	0, %r20, intr_do_preempt
 	nop
 
+	/* NOTE: We need to enable interrupts if we schedule.  We used
+	 * to do this earlier but it caused kernel stack overflows. */
+	ssm     PSW_SM_I, %r0
+
 #ifdef CONFIG_64BIT
 	ldo	-16(%r30),%r29		/* Reference param save area */
 #endif
@@ -1694,7 +1706,8 @@
 	ldo	TASK_REGS(%r1),%r1
 	reg_save %r1
 	mfctl	%cr27, %r28
-	b	sys_\name
+	ldil	L%sys_\name, %r31
+	be	R%sys_\name(%sr4,%r31)
 	STREG	%r28, PT_CR27(%r1)
 ENDPROC(sys_\name\()_wrapper)
 	.endm
@@ -1997,6 +2010,47 @@
 ENDPROC(return_to_handler)
 #endif	/* CONFIG_FUNCTION_TRACER */
 
+#ifdef CONFIG_IRQSTACKS
+/* void call_on_stack(unsigned long param1, void *func,
+		      unsigned long new_stack) */
+ENTRY(call_on_stack)
+	copy	%sp, %r1
+
+	/* Regarding the HPPA calling conventions for function pointers,
+	   we assume the PIC register is not changed across call.  For
+	   CONFIG_64BIT, the argument pointer is left to point at the
+	   argument region allocated for the call to call_on_stack. */
+# ifdef CONFIG_64BIT
+	/* Switch to new stack.  We allocate two 128 byte frames.  */
+	ldo	256(%arg2), %sp
+	/* Save previous stack pointer and return pointer in frame marker */
+	STREG	%rp, -144(%sp)
+	/* Calls always use function descriptor */
+	LDREG	16(%arg1), %arg1
+	bve,l	(%arg1), %rp
+	STREG	%r1, -136(%sp)
+	LDREG	-144(%sp), %rp
+	bve	(%rp)
+	LDREG	-136(%sp), %sp
+# else
+	/* Switch to new stack.  We allocate two 64 byte frames.  */
+	ldo	128(%arg2), %sp
+	/* Save previous stack pointer and return pointer in frame marker */
+	STREG	%r1, -68(%sp)
+	STREG	%rp, -84(%sp)
+	/* Calls use function descriptor if PLABEL bit is set */
+	bb,>=,n	%arg1, 30, 1f
+	depwi	0,31,2, %arg1
+	LDREG	0(%arg1), %arg1
+1:
+	be,l	0(%sr4,%arg1), %sr0, %r31
+	copy	%r31, %rp
+	LDREG	-84(%sp), %rp
+	bv	(%rp)
+	LDREG	-68(%sp), %sp
+# endif /* CONFIG_64BIT */
+ENDPROC(call_on_stack)
+#endif /* CONFIG_IRQSTACKS */
 
 get_register:
 	/*
diff --git a/arch/parisc/kernel/hpmc.S b/arch/parisc/kernel/hpmc.S
index 5595a2f..e158b6f 100644
--- a/arch/parisc/kernel/hpmc.S
+++ b/arch/parisc/kernel/hpmc.S
@@ -55,13 +55,13 @@
 	 * IODC requires 7K byte stack.  That leaves 1K byte for os_hpmc.
 	 */
 
-	.align	PAGE_SIZE
+	.align 4096
 hpmc_stack:
 	.block 16384
 
 #define HPMC_IODC_BUF_SIZE 0x8000
 
-	.align	PAGE_SIZE
+	.align 4096
 hpmc_iodc_buf:
 	.block HPMC_IODC_BUF_SIZE
 
diff --git a/arch/parisc/kernel/irq.c b/arch/parisc/kernel/irq.c
index 8094d3e..e255db0 100644
--- a/arch/parisc/kernel/irq.c
+++ b/arch/parisc/kernel/irq.c
@@ -152,6 +152,39 @@
 	.irq_retrigger	= NULL,
 };
 
+DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
+#define irq_stats(x)		(&per_cpu(irq_stat, x))
+
+/*
+ * /proc/interrupts printing for arch specific interrupts
+ */
+int arch_show_interrupts(struct seq_file *p, int prec)
+{
+	int j;
+
+#ifdef CONFIG_DEBUG_STACKOVERFLOW
+	seq_printf(p, "%*s: ", prec, "STK");
+	for_each_online_cpu(j)
+		seq_printf(p, "%10u ", irq_stats(j)->kernel_stack_usage);
+	seq_printf(p, "  Kernel stack usage\n");
+#endif
+#ifdef CONFIG_SMP
+	seq_printf(p, "%*s: ", prec, "RES");
+	for_each_online_cpu(j)
+		seq_printf(p, "%10u ", irq_stats(j)->irq_resched_count);
+	seq_printf(p, "  Rescheduling interrupts\n");
+	seq_printf(p, "%*s: ", prec, "CAL");
+	for_each_online_cpu(j)
+		seq_printf(p, "%10u ", irq_stats(j)->irq_call_count);
+	seq_printf(p, "  Function call interrupts\n");
+#endif
+	seq_printf(p, "%*s: ", prec, "TLB");
+	for_each_online_cpu(j)
+		seq_printf(p, "%10u ", irq_stats(j)->irq_tlb_count);
+	seq_printf(p, "  TLB shootdowns\n");
+	return 0;
+}
+
 int show_interrupts(struct seq_file *p, void *v)
 {
 	int i = *(loff_t *) v, j;
@@ -219,6 +252,9 @@
 		raw_spin_unlock_irqrestore(&desc->lock, flags);
 	}
 
+	if (i == NR_IRQS)
+		arch_show_interrupts(p, 3);
+
 	return 0;
 }
 
@@ -330,6 +366,66 @@
 	return (BITS_PER_LONG - bit) + TIMER_IRQ;
 }
 
+int sysctl_panic_on_stackoverflow = 1;
+
+static inline void stack_overflow_check(struct pt_regs *regs)
+{
+#ifdef CONFIG_DEBUG_STACKOVERFLOW
+	#define STACK_MARGIN	(256*6)
+
+	/* Our stack starts directly behind the thread_info struct. */
+	unsigned long stack_start = (unsigned long) current_thread_info();
+	unsigned long sp = regs->gr[30];
+	unsigned long stack_usage;
+	unsigned int *last_usage;
+
+	/* if sr7 != 0, we interrupted a userspace process which we do not want
+	 * to check for stack overflow. We will only check the kernel stack. */
+	if (regs->sr[7])
+		return;
+
+	/* calculate kernel stack usage */
+	stack_usage = sp - stack_start;
+	last_usage = &per_cpu(irq_stat.kernel_stack_usage, smp_processor_id());
+
+	if (unlikely(stack_usage > *last_usage))
+		*last_usage = stack_usage;
+
+	if (likely(stack_usage < (THREAD_SIZE - STACK_MARGIN)))
+		return;
+
+	pr_emerg("stackcheck: %s will most likely overflow kernel stack "
+		 "(sp:%lx, stk bottom-top:%lx-%lx)\n",
+		current->comm, sp, stack_start, stack_start + THREAD_SIZE);
+
+	if (sysctl_panic_on_stackoverflow)
+		panic("low stack detected by irq handler - check messages\n");
+#endif
+}
+
+#ifdef CONFIG_IRQSTACKS
+DEFINE_PER_CPU(union irq_stack_union, irq_stack_union);
+
+static void execute_on_irq_stack(void *func, unsigned long param1)
+{
+	unsigned long *irq_stack_start;
+	unsigned long irq_stack;
+	int cpu = smp_processor_id();
+
+	irq_stack_start = &per_cpu(irq_stack_union, cpu).stack[0];
+	irq_stack = (unsigned long) irq_stack_start;
+	irq_stack = ALIGN(irq_stack, 16); /* align for stack frame usage */
+
+	BUG_ON(*irq_stack_start); /* report bug if we were called recursive. */
+	*irq_stack_start = 1;
+
+	/* This is where we switch to the IRQ stack. */
+	call_on_stack(param1, func, irq_stack);
+
+	*irq_stack_start = 0;
+}
+#endif /* CONFIG_IRQSTACKS */
+
 /* ONLY called from entry.S:intr_extint() */
 void do_cpu_irq_mask(struct pt_regs *regs)
 {
@@ -364,7 +460,13 @@
 		goto set_out;
 	}
 #endif
+	stack_overflow_check(regs);
+
+#ifdef CONFIG_IRQSTACKS
+	execute_on_irq_stack(&generic_handle_irq, irq);
+#else
 	generic_handle_irq(irq);
+#endif /* CONFIG_IRQSTACKS */
 
  out:
 	irq_exit();
@@ -420,6 +522,4 @@
 	cpu_eiem = EIEM_MASK(TIMER_IRQ);
 #endif
         set_eiem(cpu_eiem);	/* EIEM : enable all external intr */
-
 }
-
diff --git a/arch/parisc/kernel/pacache.S b/arch/parisc/kernel/pacache.S
index 312b484..5e1de60 100644
--- a/arch/parisc/kernel/pacache.S
+++ b/arch/parisc/kernel/pacache.S
@@ -563,6 +563,15 @@
  *          %r23 physical page (shifted for tlb insert) of "from" translation
  */
 
+        /* Drop prot bits and convert to page addr for iitlbt and idtlbt */
+        #define PAGE_ADD_SHIFT  (PAGE_SHIFT-12)
+        .macro          convert_phys_for_tlb_insert20  phys
+        extrd,u         \phys, 56-PAGE_ADD_SHIFT, 32-PAGE_ADD_SHIFT, \phys
+#if _PAGE_SIZE_ENCODING_DEFAULT
+        depdi           _PAGE_SIZE_ENCODING_DEFAULT, 63, (63-58), \phys
+#endif
+	.endm
+
 	/*
 	 * We can't do this since copy_user_page is used to bring in
 	 * file data that might have instructions. Since the data would
@@ -589,15 +598,14 @@
 	sub		%r25, %r1, %r23
 
 	ldil		L%(TMPALIAS_MAP_START), %r28
-	/* FIXME for different page sizes != 4k */
 #ifdef CONFIG_64BIT
 #if (TMPALIAS_MAP_START >= 0x80000000)
 	depdi		0, 31,32, %r28		/* clear any sign extension */
 #endif
-	extrd,u		%r26,56,32, %r26	/* convert phys addr to tlb insert format */
-	extrd,u		%r23,56,32, %r23	/* convert phys addr to tlb insert format */
+	convert_phys_for_tlb_insert20 %r26	/* convert phys addr to tlb insert format */
+	convert_phys_for_tlb_insert20 %r23	/* convert phys addr to tlb insert format */
 	depd		%r24,63,22, %r28	/* Form aliased virtual address 'to' */
-	depdi		0, 63,12, %r28		/* Clear any offset bits */
+	depdi		0, 63,PAGE_SHIFT, %r28  /* Clear any offset bits */
 	copy		%r28, %r29
 	depdi		1, 41,1, %r29		/* Form aliased virtual address 'from' */
 #else
@@ -747,11 +755,10 @@
 #ifdef CONFIG_64BIT
 #if (TMPALIAS_MAP_START >= 0x80000000)
 	depdi		0, 31,32, %r28		/* clear any sign extension */
-	/* FIXME: page size dependend */
 #endif
-	extrd,u		%r26, 56,32, %r26	/* convert phys addr to tlb insert format */
+	convert_phys_for_tlb_insert20 %r26	/* convert phys addr to tlb insert format */
 	depd		%r25, 63,22, %r28	/* Form aliased virtual address 'to' */
-	depdi		0, 63,12, %r28		/* Clear any offset bits */
+	depdi		0, 63,PAGE_SHIFT, %r28	/* Clear any offset bits */
 #else
 	extrw,u		%r26, 24,25, %r26	/* convert phys addr to tlb insert format */
 	depw		%r25, 31,22, %r28	/* Form aliased virtual address 'to' */
@@ -832,11 +839,10 @@
 #ifdef CONFIG_64BIT
 #if (TMPALIAS_MAP_START >= 0x80000000)
 	depdi		0, 31,32, %r28		/* clear any sign extension */
-	/* FIXME: page size dependend */
 #endif
-	extrd,u		%r26, 56,32, %r26	/* convert phys addr to tlb insert format */
+	convert_phys_for_tlb_insert20 %r26	/* convert phys addr to tlb insert format */
 	depd		%r25, 63,22, %r28	/* Form aliased virtual address 'to' */
-	depdi		0, 63,12, %r28		/* Clear any offset bits */
+	depdi		0, 63,PAGE_SHIFT, %r28	/* Clear any offset bits */
 #else
 	extrw,u		%r26, 24,25, %r26	/* convert phys addr to tlb insert format */
 	depw		%r25, 31,22, %r28	/* Form aliased virtual address 'to' */
@@ -909,11 +915,10 @@
 #ifdef CONFIG_64BIT
 #if (TMPALIAS_MAP_START >= 0x80000000)
 	depdi		0, 31,32, %r28		/* clear any sign extension */
-	/* FIXME: page size dependend */
 #endif
-	extrd,u		%r26, 56,32, %r26	/* convert phys addr to tlb insert format */
+	convert_phys_for_tlb_insert20 %r26	/* convert phys addr to tlb insert format */
 	depd		%r25, 63,22, %r28	/* Form aliased virtual address 'to' */
-	depdi		0, 63,12, %r28		/* Clear any offset bits */
+	depdi		0, 63,PAGE_SHIFT, %r28  /* Clear any offset bits */
 #else
 	extrw,u		%r26, 24,25, %r26	/* convert phys addr to tlb insert format */
 	depw		%r25, 31,22, %r28	/* Form aliased virtual address 'to' */
@@ -959,7 +964,7 @@
 	fic,m		%r1(%sr4,%r28)
 	fic,m		%r1(%sr4,%r28)
 	fic,m		%r1(%sr4,%r28)
-	cmpb,COND(<<)		%r28, %r25,1b
+	cmpb,COND(<<)	%r28, %r25,1b
 	fic,m		%r1(%sr4,%r28)
 
 	sync
diff --git a/arch/parisc/kernel/setup.c b/arch/parisc/kernel/setup.c
index a3328c2..76b63e72 100644
--- a/arch/parisc/kernel/setup.c
+++ b/arch/parisc/kernel/setup.c
@@ -129,6 +129,8 @@
 	printk(KERN_INFO "The 32-bit Kernel has started...\n");
 #endif
 
+	printk(KERN_INFO "Default page size is %dKB.\n", (int)(PAGE_SIZE / 1024));
+
 	pdc_console_init();
 
 #ifdef CONFIG_64BIT
diff --git a/arch/parisc/kernel/smp.c b/arch/parisc/kernel/smp.c
index fd1bb15..e3614fb 100644
--- a/arch/parisc/kernel/smp.c
+++ b/arch/parisc/kernel/smp.c
@@ -127,7 +127,7 @@
 	unsigned long flags;
 
 	/* Count this now; we may make a call that never returns. */
-	p->ipi_count++;
+	inc_irq_stat(irq_call_count);
 
 	mb();	/* Order interrupt and bit testing. */
 
@@ -155,6 +155,7 @@
 				
 			case IPI_RESCHEDULE:
 				smp_debug(100, KERN_DEBUG "CPU%d IPI_RESCHEDULE\n", this_cpu);
+				inc_irq_stat(irq_resched_count);
 				scheduler_ipi();
 				break;
 
@@ -263,17 +264,6 @@
 }
 
 /*
- * Flush all other CPU's tlb and then mine.  Do this with on_each_cpu()
- * as we want to ensure all TLB's flushed before proceeding.
- */
-
-void
-smp_flush_tlb_all(void)
-{
-	on_each_cpu(flush_tlb_all_local, NULL, 1);
-}
-
-/*
  * Called by secondaries to update state and initialize CPU registers.
  */
 static void __init
diff --git a/arch/parisc/kernel/sys_parisc32.c b/arch/parisc/kernel/sys_parisc32.c
index f517e08..a134ff4 100644
--- a/arch/parisc/kernel/sys_parisc32.c
+++ b/arch/parisc/kernel/sys_parisc32.c
@@ -59,11 +59,3 @@
     	current->comm, current->pid, r20);
     return -ENOSYS;
 }
-
-asmlinkage long compat_sys_fanotify_mark(int fan_fd, int flags, u32 mask_hi,
-					 u32 mask_lo, int fd,
-					 const char __user *pathname)
-{
-	return sys_fanotify_mark(fan_fd, flags, ((u64)mask_hi << 32) | mask_lo,
-				 fd, pathname);
-}
diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S
index 5e05524..e767ab7 100644
--- a/arch/parisc/kernel/syscall.S
+++ b/arch/parisc/kernel/syscall.S
@@ -1,12 +1,35 @@
 /* 
  * Linux/PA-RISC Project (http://www.parisc-linux.org/)
  * 
- * System call entry code Copyright (c) Matthew Wilcox 1999 <willy@bofh.ai>
+ * System call entry code / Linux gateway page
+ * Copyright (c) Matthew Wilcox 1999 <willy@bofh.ai>
  * Licensed under the GNU GPL.
  * thanks to Philipp Rumpf, Mike Shaver and various others
  * sorry about the wall, puffin..
  */
 
+/*
+How does the Linux gateway page on PA-RISC work?
+------------------------------------------------
+The Linux gateway page on PA-RISC is "special".
+It actually has PAGE_GATEWAY bits set (this is linux terminology; in parisc
+terminology it's Execute, promote to PL0) in the page map.  So anything
+executing on this page executes with kernel level privilege (there's more to it
+than that: to have this happen, you also have to use a branch with a ,gate
+completer to activate the privilege promotion).  The upshot is that everything
+that runs on the gateway page runs at kernel privilege but with the current
+user process address space (although you have access to kernel space via %sr2).
+For the 0x100 syscall entry, we redo the space registers to point to the kernel
+address space (preserving the user address space in %sr3), move to wide mode if
+required, save the user registers and branch into the kernel syscall entry
+point.  For all the other functions, we execute at kernel privilege but don't
+flip address spaces. The basic upshot of this is that these code snippets are
+executed atomically (because the kernel can't be pre-empted) and they may
+perform architecturally forbidden (to PL3) operations (like setting control
+registers).
+*/
+
+
 #include <asm/asm-offsets.h>
 #include <asm/unistd.h>
 #include <asm/errno.h>
@@ -15,6 +38,7 @@
 #include <asm/thread_info.h>
 #include <asm/assembly.h>
 #include <asm/processor.h>
+#include <asm/cache.h>
 
 #include <linux/linkage.h>
 
@@ -643,7 +667,7 @@
 
 	.section .rodata,"a"
 
-	.align PAGE_SIZE
+	.align 8
 	/* Light-weight-syscall table */
 	/* Start of lws table. */
 ENTRY(lws_table)
@@ -652,13 +676,13 @@
 END(lws_table)
 	/* End of lws table */
 
-	.align PAGE_SIZE
+	.align 8
 ENTRY(sys_call_table)
 #include "syscall_table.S"
 END(sys_call_table)
 
 #ifdef CONFIG_64BIT
-	.align PAGE_SIZE
+	.align 8
 ENTRY(sys_call_table64)
 #define SYSCALL_TABLE_64BIT
 #include "syscall_table.S"
@@ -674,7 +698,7 @@
 		with ldcw.
 	*/
 	.section .data
-	.align	PAGE_SIZE
+	.align	L1_CACHE_BYTES
 ENTRY(lws_lock_start)
 	/* lws locks */
 	.rept 16
diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
index f702bff..fe41a98 100644
--- a/arch/parisc/kernel/traps.c
+++ b/arch/parisc/kernel/traps.c
@@ -522,10 +522,10 @@
 	 */
 	if (((unsigned long)regs->iaoq[0] & 3) &&
 	    ((unsigned long)regs->iasq[0] != (unsigned long)regs->sr[7])) { 
-	  	/* Kill the user process later */
-	  	regs->iaoq[0] = 0 | 3;
+		/* Kill the user process later */
+		regs->iaoq[0] = 0 | 3;
 		regs->iaoq[1] = regs->iaoq[0] + 4;
-	 	regs->iasq[0] = regs->iasq[1] = regs->sr[7];
+		regs->iasq[0] = regs->iasq[1] = regs->sr[7];
 		regs->gr[0] &= ~PSW_B;
 		return;
 	}
@@ -541,8 +541,8 @@
 		
 		/* set up a new led state on systems shipped with a LED State panel */
 		pdc_chassis_send_status(PDC_CHASSIS_DIRECT_HPMC);
-		    
-	    	parisc_terminate("High Priority Machine Check (HPMC)",
+
+		parisc_terminate("High Priority Machine Check (HPMC)",
 				regs, code, 0);
 		/* NOT REACHED */
 		
@@ -584,13 +584,13 @@
 		/* Break instruction trap */
 		handle_break(regs);
 		return;
-	
+
 	case 10:
 		/* Privileged operation trap */
 		die_if_kernel("Privileged operation", regs, code);
 		si.si_code = ILL_PRVOPC;
 		goto give_sigill;
-	
+
 	case 11:
 		/* Privileged register trap */
 		if ((regs->iir & 0xffdfffe0) == 0x034008a0) {
@@ -634,7 +634,7 @@
 		if(user_mode(regs)){
 			si.si_signo = SIGFPE;
 			/* Set to zero, and let the userspace app figure it out from
-		   	   the insn pointed to by si_addr */
+			   the insn pointed to by si_addr */
 			si.si_code = 0;
 			si.si_addr = (void __user *) regs->iaoq[0];
 			force_sig_info(SIGFPE, &si, current);
@@ -648,7 +648,7 @@
 		die_if_kernel("Floating point exception", regs, 0); /* quiet */
 		handle_fpe(regs);
 		return;
-		
+
 	case 15:
 		/* Data TLB miss fault/Data page fault */
 		/* Fall through */
@@ -660,15 +660,15 @@
 	case 17:
 		/* Non-access data TLB miss fault/Non-access data page fault */
 		/* FIXME: 
-		 	 Still need to add slow path emulation code here!
-		         If the insn used a non-shadow register, then the tlb
+			 Still need to add slow path emulation code here!
+			 If the insn used a non-shadow register, then the tlb
 			 handlers could not have their side-effect (e.g. probe
 			 writing to a target register) emulated since rfir would
 			 erase the changes to said register. Instead we have to
 			 setup everything, call this function we are in, and emulate
 			 by hand. Technically we need to emulate:
 			 fdc,fdce,pdc,"fic,4f",prober,probeir,probew, probeiw
-		*/			  
+		*/
 		fault_address = regs->ior;
 		fault_space = regs->isr;
 		break;
diff --git a/arch/parisc/kernel/vmlinux.lds.S b/arch/parisc/kernel/vmlinux.lds.S
index 64a9998..4bb095a 100644
--- a/arch/parisc/kernel/vmlinux.lds.S
+++ b/arch/parisc/kernel/vmlinux.lds.S
@@ -95,7 +95,7 @@
 	NOTES
 
 	/* Data */
-	RW_DATA_SECTION(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE)
+	RW_DATA_SECTION(L1_CACHE_BYTES, PAGE_SIZE, PAGE_SIZE)
 
 	/* PA-RISC locks requires 16-byte alignment */
 	. = ALIGN(16);
diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c
index 157b931..ce939ac 100644
--- a/arch/parisc/mm/init.c
+++ b/arch/parisc/mm/init.c
@@ -1069,6 +1069,7 @@
 {
 	int do_recycle;
 
+	inc_irq_stat(irq_tlb_count);
 	do_recycle = 0;
 	spin_lock(&sid_lock);
 	if (dirty_space_ids > RECYCLE_THRESHOLD) {
@@ -1089,6 +1090,7 @@
 #else
 void flush_tlb_all(void)
 {
+	inc_irq_stat(irq_tlb_count);
 	spin_lock(&sid_lock);
 	flush_tlb_all_local(NULL);
 	recycle_sids();
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index bbbe021..c33e3ad 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -82,11 +82,6 @@
 	bool
 	default y
 
-config GENERIC_GPIO
-	bool
-	help
-	  Generic GPIO API support
-
 config PPC
 	bool
 	default y
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index 3fe5259..915fbb4 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -150,7 +150,7 @@
 	CURRENT_THREAD_INFO(r11, r1)
 	ld	r10,TI_FLAGS(r11)
 	andi.	r11,r10,_TIF_SYSCALL_T_OR_A
-	bne-	syscall_dotrace
+	bne	syscall_dotrace
 .Lsyscall_dotrace_cont:
 	cmpldi	0,r0,NR_syscalls
 	bge-	syscall_enosys
diff --git a/arch/powerpc/kernel/sys_ppc32.c b/arch/powerpc/kernel/sys_ppc32.c
index cd6e19d..8a28587 100644
--- a/arch/powerpc/kernel/sys_ppc32.c
+++ b/arch/powerpc/kernel/sys_ppc32.c
@@ -126,11 +126,3 @@
 
 	return sys_sync_file_range(fd, offset, nbytes, flags);
 }
-
-asmlinkage long compat_sys_fanotify_mark(int fanotify_fd, unsigned int flags,
-					 unsigned mask_hi, unsigned mask_lo,
-					 int dfd, const char __user *pathname)
-{
-	u64 mask = ((u64)mask_hi << 32) | mask_lo;
-	return sys_fanotify_mark(fanotify_fd, flags, mask, dfd, pathname);
-}
diff --git a/arch/powerpc/platforms/40x/Kconfig b/arch/powerpc/platforms/40x/Kconfig
index bd40bbb..6e287f1 100644
--- a/arch/powerpc/platforms/40x/Kconfig
+++ b/arch/powerpc/platforms/40x/Kconfig
@@ -138,7 +138,6 @@
 	bool "PPC4xx GPIO support"
 	depends on 40x
 	select ARCH_REQUIRE_GPIOLIB
-	select GENERIC_GPIO
 	help
 	  Enable gpiolib support for ppc40x based boards
 
diff --git a/arch/powerpc/platforms/44x/Kconfig b/arch/powerpc/platforms/44x/Kconfig
index 7be9336..d6c7506 100644
--- a/arch/powerpc/platforms/44x/Kconfig
+++ b/arch/powerpc/platforms/44x/Kconfig
@@ -248,7 +248,6 @@
 	bool "PPC4xx GPIO support"
 	depends on 44x
 	select ARCH_REQUIRE_GPIOLIB
-	select GENERIC_GPIO
 	help
 	  Enable gpiolib support for ppc440 based boards
 
diff --git a/arch/powerpc/platforms/85xx/Kconfig b/arch/powerpc/platforms/85xx/Kconfig
index 8f02b05..efdd37c7 100644
--- a/arch/powerpc/platforms/85xx/Kconfig
+++ b/arch/powerpc/platforms/85xx/Kconfig
@@ -203,7 +203,6 @@
 	select DEFAULT_UIMAGE
 	select SWIOTLB
 	select MMIO_NVRAM
-	select GENERIC_GPIO
 	select ARCH_REQUIRE_GPIOLIB
 	select GE_FPGA
 	help
@@ -328,7 +327,7 @@
 	select PPC_E500MC
 	select PHYS_64BIT
 	select SWIOTLB
-	select GENERIC_GPIO
+	select GPIOLIB
 	select ARCH_REQUIRE_GPIOLIB
 	select HAS_RAPIDIO
 	select PPC_EPAPR_HV_PIC
diff --git a/arch/powerpc/platforms/86xx/Kconfig b/arch/powerpc/platforms/86xx/Kconfig
index 7a6279e..1afd1e4 100644
--- a/arch/powerpc/platforms/86xx/Kconfig
+++ b/arch/powerpc/platforms/86xx/Kconfig
@@ -37,7 +37,6 @@
 	bool "GE PPC9A"
 	select DEFAULT_UIMAGE
 	select MMIO_NVRAM
-	select GENERIC_GPIO
 	select ARCH_REQUIRE_GPIOLIB
 	select GE_FPGA
 	help
@@ -47,7 +46,6 @@
 	bool "GE SBC310"
 	select DEFAULT_UIMAGE
 	select MMIO_NVRAM
-	select GENERIC_GPIO
 	select ARCH_REQUIRE_GPIOLIB
 	select GE_FPGA
 	help
@@ -57,7 +55,6 @@
 	bool "GE SBC610"
 	select DEFAULT_UIMAGE
 	select MMIO_NVRAM
-	select GENERIC_GPIO
 	select ARCH_REQUIRE_GPIOLIB
 	select GE_FPGA
 	select HAS_RAPIDIO
diff --git a/arch/powerpc/platforms/8xx/Kconfig b/arch/powerpc/platforms/8xx/Kconfig
index 1fb0b3c..8dec3c0 100644
--- a/arch/powerpc/platforms/8xx/Kconfig
+++ b/arch/powerpc/platforms/8xx/Kconfig
@@ -114,7 +114,6 @@
 
 config 8xx_GPIO
 	bool "GPIO API Support"
-	select GENERIC_GPIO
 	select ARCH_REQUIRE_GPIOLIB
 	help
 	  Saying Y here will cause the ports on an MPC8xx processor to be used
diff --git a/arch/powerpc/platforms/Kconfig b/arch/powerpc/platforms/Kconfig
index 34d224b..a881232 100644
--- a/arch/powerpc/platforms/Kconfig
+++ b/arch/powerpc/platforms/Kconfig
@@ -302,7 +302,6 @@
 config QE_GPIO
 	bool "QE GPIO support"
 	depends on QUICC_ENGINE
-	select GENERIC_GPIO
 	select ARCH_REQUIRE_GPIOLIB
 	help
 	  Say Y here if you're going to use hardware that connects to the
@@ -315,7 +314,6 @@
 	select PPC_LIB_RHEAP
 	select PPC_PCI_CHOICE
 	select ARCH_REQUIRE_GPIOLIB
-	select GENERIC_GPIO
 	help
 	  The CPM2 (Communications Processor Module) is a coprocessor on
 	  embedded CPUs made by Freescale.  Selecting this option means that
@@ -353,7 +351,6 @@
 config SIMPLE_GPIO
 	bool "Support for simple, memory-mapped GPIO controllers"
 	depends on PPC
-	select GENERIC_GPIO
 	select ARCH_REQUIRE_GPIOLIB
 	help
 	  Say Y here to support simple, memory-mapped GPIO controllers.
@@ -364,7 +361,6 @@
 config MCU_MPC8349EMITX
 	bool "MPC8349E-mITX MCU driver"
 	depends on I2C=y && PPC_83xx
-	select GENERIC_GPIO
 	select ARCH_REQUIRE_GPIOLIB
 	help
 	  Say Y here to enable soft power-off functionality on the Freescale
diff --git a/arch/s390/hypfs/inode.c b/arch/s390/hypfs/inode.c
index 5f7d7ba..7a539f4 100644
--- a/arch/s390/hypfs/inode.c
+++ b/arch/s390/hypfs/inode.c
@@ -21,6 +21,7 @@
 #include <linux/module.h>
 #include <linux/seq_file.h>
 #include <linux/mount.h>
+#include <linux/aio.h>
 #include <asm/ebcdic.h>
 #include "hypfs.h"
 
diff --git a/arch/s390/kernel/compat_wrapper.S b/arch/s390/kernel/compat_wrapper.S
index 2d72d9e..9cb1b97 100644
--- a/arch/s390/kernel/compat_wrapper.S
+++ b/arch/s390/kernel/compat_wrapper.S
@@ -793,10 +793,6 @@
 	llgtr	%r2,%r2			# long *
 	jg	compat_sys_stime	# branch to system call
 
-ENTRY(sys32_sysctl_wrapper)
-	llgtr	%r2,%r2 		# struct compat_sysctl_args *
-	jg	compat_sys_sysctl
-
 ENTRY(sys32_fstat64_wrapper)
 	llgfr	%r2,%r2			# unsigned long
 	llgtr	%r3,%r3			# struct stat64 *
@@ -1349,15 +1345,6 @@
 	llgfr	%r3,%r3			# unsigned int
 	jg	sys_fanotify_init	# branch to system call
 
-ENTRY(sys_fanotify_mark_wrapper)
-	lgfr	%r2,%r2			# int
-	llgfr	%r3,%r3			# unsigned int
-	sllg	%r4,%r4,32		# get high word of 64bit mask
-	lr	%r4,%r5			# get low word of 64bit mask
-	llgfr	%r5,%r6			# unsigned int
-	llgt	%r6,164(%r15)		# char *
-	jg	sys_fanotify_mark	# branch to system call
-
 ENTRY(sys_prlimit64_wrapper)
 	lgfr	%r2,%r2			# pid_t
 	llgfr	%r3,%r3			# unsigned int
diff --git a/arch/s390/kernel/syscalls.S b/arch/s390/kernel/syscalls.S
index 9f214e9..913410b 100644
--- a/arch/s390/kernel/syscalls.S
+++ b/arch/s390/kernel/syscalls.S
@@ -157,7 +157,7 @@
 SYSCALL(sys_writev,sys_writev,compat_sys_writev_wrapper)
 SYSCALL(sys_getsid,sys_getsid,sys32_getsid_wrapper)
 SYSCALL(sys_fdatasync,sys_fdatasync,sys32_fdatasync_wrapper)
-SYSCALL(sys_sysctl,sys_sysctl,sys32_sysctl_wrapper)
+SYSCALL(sys_sysctl,sys_sysctl,compat_sys_sysctl)
 SYSCALL(sys_mlock,sys_mlock,sys32_mlock_wrapper)		/* 150 */
 SYSCALL(sys_munlock,sys_munlock,sys32_munlock_wrapper)
 SYSCALL(sys_mlockall,sys_mlockall,sys32_mlockall_wrapper)
@@ -341,7 +341,7 @@
 SYSCALL(sys_rt_tgsigqueueinfo,sys_rt_tgsigqueueinfo,compat_sys_rt_tgsigqueueinfo) /* 330 */
 SYSCALL(sys_perf_event_open,sys_perf_event_open,sys_perf_event_open_wrapper)
 SYSCALL(sys_fanotify_init,sys_fanotify_init,sys_fanotify_init_wrapper)
-SYSCALL(sys_fanotify_mark,sys_fanotify_mark,sys_fanotify_mark_wrapper)
+SYSCALL(sys_fanotify_mark,sys_fanotify_mark,compat_sys_fanotify_mark)
 SYSCALL(sys_prlimit64,sys_prlimit64,sys_prlimit64_wrapper)
 SYSCALL(sys_name_to_handle_at,sys_name_to_handle_at,sys_name_to_handle_at_wrapper) /* 335 */
 SYSCALL(sys_open_by_handle_at,sys_open_by_handle_at,compat_sys_open_by_handle_at)
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig
index 78d8ace..8c868cf 100644
--- a/arch/sh/Kconfig
+++ b/arch/sh/Kconfig
@@ -93,9 +93,6 @@
 config GENERIC_HWEIGHT
 	def_bool y
 
-config GENERIC_GPIO
-	def_bool n
-
 config GENERIC_CALIBRATE_DELAY
 	bool
 
diff --git a/arch/sh/boards/mach-sdk7786/Makefile b/arch/sh/boards/mach-sdk7786/Makefile
index 8ae56e9..45d32e3 100644
--- a/arch/sh/boards/mach-sdk7786/Makefile
+++ b/arch/sh/boards/mach-sdk7786/Makefile
@@ -1,4 +1,4 @@
 obj-y	:= fpga.o irq.o nmi.o setup.o
 
-obj-$(CONFIG_GENERIC_GPIO)	+= gpio.o
+obj-$(CONFIG_GPIOLIB)		+= gpio.o
 obj-$(CONFIG_HAVE_SRAM_POOL)	+= sram.o
diff --git a/arch/sh/boards/mach-x3proto/Makefile b/arch/sh/boards/mach-x3proto/Makefile
index 708c21c..0cbe3d0 100644
--- a/arch/sh/boards/mach-x3proto/Makefile
+++ b/arch/sh/boards/mach-x3proto/Makefile
@@ -1,3 +1,3 @@
 obj-y += setup.o ilsel.o
 
-obj-$(CONFIG_GENERIC_GPIO)	+= gpio.o
+obj-$(CONFIG_GPIOLIB)		+= gpio.o
diff --git a/arch/sh/kernel/cpu/sh2a/Makefile b/arch/sh/kernel/cpu/sh2a/Makefile
index 7fdc102..990195d 100644
--- a/arch/sh/kernel/cpu/sh2a/Makefile
+++ b/arch/sh/kernel/cpu/sh2a/Makefile
@@ -21,4 +21,4 @@
 pinmux-$(CONFIG_CPU_SUBTYPE_SH7264)	:= pinmux-sh7264.o
 pinmux-$(CONFIG_CPU_SUBTYPE_SH7269)	:= pinmux-sh7269.o
 
-obj-$(CONFIG_GENERIC_GPIO)	+= $(pinmux-y)
+obj-$(CONFIG_GPIOLIB)			+= $(pinmux-y)
diff --git a/arch/sh/kernel/cpu/sh3/Makefile b/arch/sh/kernel/cpu/sh3/Makefile
index 6f13f33..d3634ae 100644
--- a/arch/sh/kernel/cpu/sh3/Makefile
+++ b/arch/sh/kernel/cpu/sh3/Makefile
@@ -30,4 +30,4 @@
 pinmux-$(CONFIG_CPU_SUBTYPE_SH7720)	:= pinmux-sh7720.o
 
 obj-y	+= $(clock-y)
-obj-$(CONFIG_GENERIC_GPIO)	+= $(pinmux-y)
+obj-$(CONFIG_GPIOLIB)			+= $(pinmux-y)
diff --git a/arch/sh/kernel/cpu/sh4a/Makefile b/arch/sh/kernel/cpu/sh4a/Makefile
index 8fc6ec2..0705df7 100644
--- a/arch/sh/kernel/cpu/sh4a/Makefile
+++ b/arch/sh/kernel/cpu/sh4a/Makefile
@@ -47,6 +47,6 @@
 
 obj-y					+= $(clock-y)
 obj-$(CONFIG_SMP)			+= $(smp-y)
-obj-$(CONFIG_GENERIC_GPIO)		+= $(pinmux-y)
+obj-$(CONFIG_GPIOLIB)			+= $(pinmux-y)
 obj-$(CONFIG_PERF_EVENTS)		+= perf_event.o
 obj-$(CONFIG_HAVE_HW_BREAKPOINT)	+= ubc.o
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index a639c0d..9ac9f16 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -137,11 +137,6 @@
 	bool
 	default y if SPARC32
 
-config GENERIC_GPIO
-	bool
-	help
-	  Generic GPIO API support
-
 config ARCH_SUPPORTS_DEBUG_PAGEALLOC
 	def_bool y if SPARC64
 
diff --git a/arch/sparc/kernel/leon_smp.c b/arch/sparc/kernel/leon_smp.c
index 9b40c9c..6cfc1b0 100644
--- a/arch/sparc/kernel/leon_smp.c
+++ b/arch/sparc/kernel/leon_smp.c
@@ -253,24 +253,15 @@
 
 	/* Free unneeded trap tables */
 	if (!cpu_present(1)) {
-		ClearPageReserved(virt_to_page(&trapbase_cpu1));
-		init_page_count(virt_to_page(&trapbase_cpu1));
-		free_page((unsigned long)&trapbase_cpu1);
-		totalram_pages++;
+		free_reserved_page(virt_to_page(&trapbase_cpu1));
 		num_physpages++;
 	}
 	if (!cpu_present(2)) {
-		ClearPageReserved(virt_to_page(&trapbase_cpu2));
-		init_page_count(virt_to_page(&trapbase_cpu2));
-		free_page((unsigned long)&trapbase_cpu2);
-		totalram_pages++;
+		free_reserved_page(virt_to_page(&trapbase_cpu2));
 		num_physpages++;
 	}
 	if (!cpu_present(3)) {
-		ClearPageReserved(virt_to_page(&trapbase_cpu3));
-		init_page_count(virt_to_page(&trapbase_cpu3));
-		free_page((unsigned long)&trapbase_cpu3);
-		totalram_pages++;
+		free_reserved_page(virt_to_page(&trapbase_cpu3));
 		num_physpages++;
 	}
 	/* Ok, they are spinning and ready to go. */
diff --git a/arch/sparc/kernel/sys32.S b/arch/sparc/kernel/sys32.S
index 2e680b5..f7c72b6 100644
--- a/arch/sparc/kernel/sys32.S
+++ b/arch/sparc/kernel/sys32.S
@@ -239,15 +239,6 @@
 	nop
 	nop
 
-	.globl		sys32_fanotify_mark
-sys32_fanotify_mark:
-	sethi		%hi(sys_fanotify_mark), %g1
-	sllx		%o2, 32, %o2
-	or		%o2, %o3, %o2
-	mov		%o4, %o3
-	jmpl		%g1 + %lo(sys_fanotify_mark), %g0
-	 mov		%o5, %o4
-
 	.section	__ex_table,"a"
 	.align		4
 	.word		1b, __retl_efault, 2b, __retl_efault
diff --git a/arch/sparc/kernel/systbls_64.S b/arch/sparc/kernel/systbls_64.S
index 8fd9320..6d81597 100644
--- a/arch/sparc/kernel/systbls_64.S
+++ b/arch/sparc/kernel/systbls_64.S
@@ -84,7 +84,7 @@
 	.word compat_sys_timerfd_settime, compat_sys_timerfd_gettime, compat_sys_signalfd4, sys_eventfd2, sys_epoll_create1
 /*320*/	.word sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, compat_sys_preadv
 	.word compat_sys_pwritev, compat_sys_rt_tgsigqueueinfo, sys_perf_event_open, compat_sys_recvmmsg, sys_fanotify_init
-/*330*/	.word sys32_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, compat_sys_open_by_handle_at, compat_sys_clock_adjtime
+/*330*/	.word compat_sys_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, compat_sys_open_by_handle_at, compat_sys_clock_adjtime
 	.word sys_syncfs, compat_sys_sendmmsg, sys_setns, compat_sys_process_vm_readv, compat_sys_process_vm_writev
 /*340*/	.word sys_kern_features, sys_kcmp, sys_finit_module
 
diff --git a/arch/sparc/mm/init_32.c b/arch/sparc/mm/init_32.c
index 4490c39..af472cf 100644
--- a/arch/sparc/mm/init_32.c
+++ b/arch/sparc/mm/init_32.c
@@ -366,45 +366,14 @@
 
 void free_initmem (void)
 {
-	unsigned long addr;
-	unsigned long freed;
-
-	addr = (unsigned long)(&__init_begin);
-	freed = (unsigned long)(&__init_end) - addr;
-	for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) {
-		struct page *p;
-
-		memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE);
-		p = virt_to_page(addr);
-
-		ClearPageReserved(p);
-		init_page_count(p);
-		__free_page(p);
-		totalram_pages++;
-		num_physpages++;
-	}
-	printk(KERN_INFO "Freeing unused kernel memory: %ldk freed\n",
-		freed >> 10);
+	num_physpages += free_initmem_default(POISON_FREE_INITMEM);
 }
 
 #ifdef CONFIG_BLK_DEV_INITRD
 void free_initrd_mem(unsigned long start, unsigned long end)
 {
-	if (start < end)
-		printk(KERN_INFO "Freeing initrd memory: %ldk freed\n",
-			(end - start) >> 10);
-	for (; start < end; start += PAGE_SIZE) {
-		struct page *p;
-
-		memset((void *)start, POISON_FREE_INITMEM, PAGE_SIZE);
-		p = virt_to_page(start);
-
-		ClearPageReserved(p);
-		init_page_count(p);
-		__free_page(p);
-		totalram_pages++;
-		num_physpages++;
-	}
+	num_physpages += free_reserved_area(start, end, POISON_FREE_INITMEM,
+					    "initrd");
 }
 #endif
 
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
index cf72a8a..a717199 100644
--- a/arch/sparc/mm/init_64.c
+++ b/arch/sparc/mm/init_64.c
@@ -2059,8 +2059,7 @@
 	/* We subtract one to account for the mem_map_zero page
 	 * allocated below.
 	 */
-	totalram_pages -= 1;
-	num_physpages = totalram_pages;
+	num_physpages = totalram_pages - 1;
 
 	/*
 	 * Set up the zero page, mark it reserved, so that page count
@@ -2071,7 +2070,7 @@
 		prom_printf("paging_init: Cannot alloc zero page.\n");
 		prom_halt();
 	}
-	SetPageReserved(mem_map_zero);
+	mark_page_reserved(mem_map_zero);
 
 	codepages = (((unsigned long) _etext) - ((unsigned long) _start));
 	codepages = PAGE_ALIGN(codepages) >> PAGE_SHIFT;
@@ -2111,37 +2110,22 @@
 	initend = (unsigned long)(__init_end) & PAGE_MASK;
 	for (; addr < initend; addr += PAGE_SIZE) {
 		unsigned long page;
-		struct page *p;
 
 		page = (addr +
 			((unsigned long) __va(kern_base)) -
 			((unsigned long) KERNBASE));
 		memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE);
 
-		if (do_free) {
-			p = virt_to_page(page);
-
-			ClearPageReserved(p);
-			init_page_count(p);
-			__free_page(p);
-			totalram_pages++;
-		}
+		if (do_free)
+			free_reserved_page(virt_to_page(page));
 	}
 }
 
 #ifdef CONFIG_BLK_DEV_INITRD
 void free_initrd_mem(unsigned long start, unsigned long end)
 {
-	if (start < end)
-		printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
-	for (; start < end; start += PAGE_SIZE) {
-		struct page *p = virt_to_page(start);
-
-		ClearPageReserved(p);
-		init_page_count(p);
-		__free_page(p);
-		totalram_pages++;
-	}
+	num_physpages += free_reserved_area(start, end, POISON_FREE_INITMEM,
+					    "initrd");
 }
 #endif
 
diff --git a/arch/tile/Kconfig b/arch/tile/Kconfig
index 5b6a40d..3aa3766 100644
--- a/arch/tile/Kconfig
+++ b/arch/tile/Kconfig
@@ -355,11 +355,17 @@
 config KERNEL_PL
 	int "Processor protection level for kernel"
 	range 1 2
-	default "1"
+	default 2 if TILEGX
+	default 1 if !TILEGX
 	---help---
-	  This setting determines the processor protection level the
-	  kernel will be built to run at.  Generally you should use
-	  the default value here.
+	  Since MDE 4.2, the Tilera hypervisor runs the kernel
+	  at PL2 by default.  If running under an older hypervisor,
+	  or as a KVM guest, you must run at PL1.  (The current
+	  hypervisor may also be recompiled with "make HV_PL=2" to
+	  allow it to run a kernel at PL1, but clients running at PL1
+	  are not expected to be supported indefinitely.)
+
+	  If you're not sure, don't change the default.
 
 source "arch/tile/gxio/Kconfig"
 
diff --git a/arch/tile/include/hv/hypervisor.h b/arch/tile/include/hv/hypervisor.h
index ccd847e2..837dca5 100644
--- a/arch/tile/include/hv/hypervisor.h
+++ b/arch/tile/include/hv/hypervisor.h
@@ -107,7 +107,22 @@
 #define HV_DISPATCH_ENTRY_SIZE 32
 
 /** Version of the hypervisor interface defined by this file */
-#define _HV_VERSION 11
+#define _HV_VERSION 13
+
+/** Last version of the hypervisor interface with old hv_init() ABI.
+ *
+ * The change from version 12 to version 13 corresponds to launching
+ * the client by default at PL2 instead of PL1 (corresponding to the
+ * hv itself running at PL3 instead of PL2).  To make this explicit,
+ * the hv_init() API was also extended so the client can report its
+ * desired PL, resulting in a more helpful failure diagnostic.  If you
+ * call hv_init() with _HV_VERSION_OLD_HV_INIT and omit the client_pl
+ * argument, the hypervisor will assume client_pl = 1.
+ *
+ * Note that this is a deprecated solution and we do not expect to
+ * support clients of the Tilera hypervisor running at PL1 indefinitely.
+ */
+#define _HV_VERSION_OLD_HV_INIT 12
 
 /* Index into hypervisor interface dispatch code blocks.
  *
@@ -377,7 +392,11 @@
 #ifndef __ASSEMBLER__
 
 /** Pass HV_VERSION to hv_init to request this version of the interface. */
-typedef enum { HV_VERSION = _HV_VERSION } HV_VersionNumber;
+typedef enum {
+  HV_VERSION = _HV_VERSION,
+  HV_VERSION_OLD_HV_INIT = _HV_VERSION_OLD_HV_INIT,
+
+} HV_VersionNumber;
 
 /** Initializes the hypervisor.
  *
@@ -385,9 +404,11 @@
  * that this program expects, typically HV_VERSION.
  * @param chip_num Architecture number of the chip the client was built for.
  * @param chip_rev_num Revision number of the chip the client was built for.
+ * @param client_pl Privilege level the client is built for
+ *   (not required if interface_version_number == HV_VERSION_OLD_HV_INIT).
  */
 void hv_init(HV_VersionNumber interface_version_number,
-             int chip_num, int chip_rev_num);
+             int chip_num, int chip_rev_num, int client_pl);
 
 
 /** Queries we can make for hv_sysconf().
diff --git a/arch/tile/kernel/head_32.S b/arch/tile/kernel/head_32.S
index f71bfee..ac11530 100644
--- a/arch/tile/kernel/head_32.S
+++ b/arch/tile/kernel/head_32.S
@@ -38,7 +38,7 @@
 	  movei r2, TILE_CHIP_REV
 	}
 	{
-	  moveli r0, _HV_VERSION
+	  moveli r0, _HV_VERSION_OLD_HV_INIT
 	  jal hv_init
 	}
 	/* Get a reasonable default ASID in r0 */
diff --git a/arch/tile/kernel/head_64.S b/arch/tile/kernel/head_64.S
index f9a2734..6093964 100644
--- a/arch/tile/kernel/head_64.S
+++ b/arch/tile/kernel/head_64.S
@@ -34,13 +34,19 @@
 ENTRY(_start)
 	/* Notify the hypervisor of what version of the API we want */
 	{
+#if KERNEL_PL == 1 && _HV_VERSION == 13
+	  /* Support older hypervisors by asking for API version 12. */
+	  movei r0, _HV_VERSION_OLD_HV_INIT
+#else
+	  movei r0, _HV_VERSION
+#endif
 	  movei r1, TILE_CHIP
-	  movei r2, TILE_CHIP_REV
 	}
 	{
-	  moveli r0, _HV_VERSION
-	  jal hv_init
+	  movei r2, TILE_CHIP_REV
+	  movei r3, KERNEL_PL
 	}
+	jal hv_init
 	/* Get a reasonable default ASID in r0 */
 	{
 	  move r0, zero
diff --git a/arch/tile/lib/spinlock_32.c b/arch/tile/lib/spinlock_32.c
index b16ac49..b34f79a 100644
--- a/arch/tile/lib/spinlock_32.c
+++ b/arch/tile/lib/spinlock_32.c
@@ -101,7 +101,7 @@
  * preserve the semantic that the same read lock can be acquired in an
  * interrupt context.
  */
-inline int arch_read_trylock(arch_rwlock_t *rwlock)
+int arch_read_trylock(arch_rwlock_t *rwlock)
 {
 	u32 val;
 	__insn_mtspr(SPR_INTERRUPT_CRITICAL_SECTION, 1);
diff --git a/arch/um/drivers/ubd_kern.c b/arch/um/drivers/ubd_kern.c
index 41bf720..879990c 100644
--- a/arch/um/drivers/ubd_kern.c
+++ b/arch/um/drivers/ubd_kern.c
@@ -87,7 +87,7 @@
 static DEFINE_MUTEX(ubd_mutex); /* replaces BKL, might not be needed */
 
 static int ubd_open(struct block_device *bdev, fmode_t mode);
-static int ubd_release(struct gendisk *disk, fmode_t mode);
+static void ubd_release(struct gendisk *disk, fmode_t mode);
 static int ubd_ioctl(struct block_device *bdev, fmode_t mode,
 		     unsigned int cmd, unsigned long arg);
 static int ubd_getgeo(struct block_device *bdev, struct hd_geometry *geo);
@@ -1138,7 +1138,7 @@
 	return err;
 }
 
-static int ubd_release(struct gendisk *disk, fmode_t mode)
+static void ubd_release(struct gendisk *disk, fmode_t mode)
 {
 	struct ubd *ubd_dev = disk->private_data;
 
@@ -1146,7 +1146,6 @@
 	if(--ubd_dev->count == 0)
 		ubd_close_dev(ubd_dev);
 	mutex_unlock(&ubd_mutex);
-	return 0;
 }
 
 static void cowify_bitmap(__u64 io_offset, int length, unsigned long *cow_mask,
diff --git a/arch/unicore32/Kconfig b/arch/unicore32/Kconfig
index 2943e3a..41bcc00 100644
--- a/arch/unicore32/Kconfig
+++ b/arch/unicore32/Kconfig
@@ -23,9 +23,6 @@
 	  designs licensed by PKUnity Ltd.
 	  Please see web page at <http://www.pkunity.com/>.
 
-config GENERIC_GPIO
-	def_bool y
-
 config GENERIC_CSUM
 	def_bool y
 
@@ -156,7 +153,7 @@
 
 config LEDS
 	def_bool y
-	depends on GENERIC_GPIO
+	depends on GPIOLIB
 
 config ALIGNMENT_TRAP
 	def_bool y
@@ -219,7 +216,6 @@
 config PUV3_GPIO
 	bool
 	depends on !ARCH_FPGA
-	select GENERIC_GPIO
 	select GPIO_SYSFS
 	default y
 
diff --git a/arch/unicore32/kernel/sys.c b/arch/unicore32/kernel/sys.c
index cfe79c9..f9e8625 100644
--- a/arch/unicore32/kernel/sys.c
+++ b/arch/unicore32/kernel/sys.c
@@ -28,19 +28,11 @@
 #include <asm/syscalls.h>
 #include <asm/cacheflush.h>
 
-/* Note: used by the compat code even in 64-bit Linux. */
-SYSCALL_DEFINE6(mmap2, unsigned long, addr, unsigned long, len,
-		unsigned long, prot, unsigned long, flags,
-		unsigned long, fd, unsigned long, off_4k)
-{
-	return sys_mmap_pgoff(addr, len, prot, flags, fd,
-			      off_4k);
-}
-
 /* Provide the actual syscall number to call mapping. */
 #undef __SYSCALL
 #define __SYSCALL(nr, call)	[nr] = (call),
 
+#define sys_mmap2 sys_mmap_pgoff
 /* Note that we don't include <linux/unistd.h> but <asm/unistd.h> */
 void *sys_call_table[__NR_syscalls] = {
 	[0 ... __NR_syscalls-1] = sys_ni_syscall,
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 5db2117..6a154a9 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -174,9 +174,6 @@
 config GENERIC_HWEIGHT
 	def_bool y
 
-config GENERIC_GPIO
-	bool
-
 config ARCH_MAY_HAVE_PC_FDC
 	def_bool y
 	depends on ISA_DMA_API
diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c
index 4e4907c..8e0ceec 100644
--- a/arch/x86/ia32/sys_ia32.c
+++ b/arch/x86/ia32/sys_ia32.c
@@ -243,12 +243,3 @@
 	return sys_fallocate(fd, mode, ((u64)offset_hi << 32) | offset_lo,
 			     ((u64)len_hi << 32) | len_lo);
 }
-
-asmlinkage long sys32_fanotify_mark(int fanotify_fd, unsigned int flags,
-				    u32 mask_lo, u32 mask_hi,
-				    int fd, const char  __user *pathname)
-{
-	return sys_fanotify_mark(fanotify_fd, flags,
-				 ((u64)mask_hi << 32) | mask_lo,
-				 fd, pathname);
-}
diff --git a/arch/x86/include/asm/sys_ia32.h b/arch/x86/include/asm/sys_ia32.h
index 0ef202e..82c34ee 100644
--- a/arch/x86/include/asm/sys_ia32.h
+++ b/arch/x86/include/asm/sys_ia32.h
@@ -50,9 +50,6 @@
 asmlinkage long sys32_sigreturn(void);
 asmlinkage long sys32_rt_sigreturn(void);
 
-asmlinkage long sys32_fanotify_mark(int, unsigned int, u32, u32, int,
-				    const char __user *);
-
 #endif /* CONFIG_COMPAT */
 
 #endif /* _ASM_X86_SYS_IA32_H */
diff --git a/arch/x86/include/asm/syscalls.h b/arch/x86/include/asm/syscalls.h
index 5f87b35..2917a64 100644
--- a/arch/x86/include/asm/syscalls.h
+++ b/arch/x86/include/asm/syscalls.h
@@ -37,8 +37,8 @@
 unsigned long sys_sigreturn(void);
 
 /* kernel/vm86_32.c */
-int sys_vm86old(struct vm86_struct __user *);
-int sys_vm86(unsigned long, unsigned long);
+asmlinkage long sys_vm86old(struct vm86_struct __user *);
+asmlinkage long sys_vm86(unsigned long, unsigned long);
 
 #else /* CONFIG_X86_32 */
 
diff --git a/arch/x86/include/uapi/asm/msr-index.h b/arch/x86/include/uapi/asm/msr-index.h
index b3a4866..2af848d 100644
--- a/arch/x86/include/uapi/asm/msr-index.h
+++ b/arch/x86/include/uapi/asm/msr-index.h
@@ -120,6 +120,9 @@
 #define MSR_CORE_C6_RESIDENCY		0x000003fd
 #define MSR_CORE_C7_RESIDENCY		0x000003fe
 #define MSR_PKG_C2_RESIDENCY		0x0000060d
+#define MSR_PKG_C8_RESIDENCY		0x00000630
+#define MSR_PKG_C9_RESIDENCY		0x00000631
+#define MSR_PKG_C10_RESIDENCY		0x00000632
 
 /* Run Time Average Power Limiting (RAPL) Interface */
 
diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
index 1cf5766..e8edcf5 100644
--- a/arch/x86/kernel/vm86_32.c
+++ b/arch/x86/kernel/vm86_32.c
@@ -33,6 +33,7 @@
 #include <linux/capability.h>
 #include <linux/errno.h>
 #include <linux/interrupt.h>
+#include <linux/syscalls.h>
 #include <linux/sched.h>
 #include <linux/kernel.h>
 #include <linux/signal.h>
@@ -48,7 +49,6 @@
 #include <asm/io.h>
 #include <asm/tlbflush.h>
 #include <asm/irq.h>
-#include <asm/syscalls.h>
 
 /*
  * Known problems:
@@ -202,36 +202,32 @@
 static int do_vm86_irq_handling(int subfunction, int irqnumber);
 static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk);
 
-int sys_vm86old(struct vm86_struct __user *v86)
+SYSCALL_DEFINE1(vm86old, struct vm86_struct __user *, v86)
 {
 	struct kernel_vm86_struct info; /* declare this _on top_,
 					 * this avoids wasting of stack space.
 					 * This remains on the stack until we
 					 * return to 32 bit user space.
 					 */
-	struct task_struct *tsk;
-	int tmp, ret = -EPERM;
+	struct task_struct *tsk = current;
+	int tmp;
 
-	tsk = current;
 	if (tsk->thread.saved_sp0)
-		goto out;
+		return -EPERM;
 	tmp = copy_vm86_regs_from_user(&info.regs, &v86->regs,
 				       offsetof(struct kernel_vm86_struct, vm86plus) -
 				       sizeof(info.regs));
-	ret = -EFAULT;
 	if (tmp)
-		goto out;
+		return -EFAULT;
 	memset(&info.vm86plus, 0, (int)&info.regs32 - (int)&info.vm86plus);
 	info.regs32 = current_pt_regs();
 	tsk->thread.vm86_info = v86;
 	do_sys_vm86(&info, tsk);
-	ret = 0;	/* we never return here */
-out:
-	return ret;
+	return 0;	/* we never return here */
 }
 
 
-int sys_vm86(unsigned long cmd, unsigned long arg)
+SYSCALL_DEFINE2(vm86, unsigned long, cmd, unsigned long, arg)
 {
 	struct kernel_vm86_struct info; /* declare this _on top_,
 					 * this avoids wasting of stack space.
@@ -239,7 +235,7 @@
 					 * return to 32 bit user space.
 					 */
 	struct task_struct *tsk;
-	int tmp, ret;
+	int tmp;
 	struct vm86plus_struct __user *v86;
 
 	tsk = current;
@@ -248,8 +244,7 @@
 	case VM86_FREE_IRQ:
 	case VM86_GET_IRQ_BITS:
 	case VM86_GET_AND_RESET_IRQ:
-		ret = do_vm86_irq_handling(cmd, (int)arg);
-		goto out;
+		return do_vm86_irq_handling(cmd, (int)arg);
 	case VM86_PLUS_INSTALL_CHECK:
 		/*
 		 * NOTE: on old vm86 stuff this will return the error
@@ -257,28 +252,23 @@
 		 *  interpreted as (invalid) address to vm86_struct.
 		 *  So the installation check works.
 		 */
-		ret = 0;
-		goto out;
+		return 0;
 	}
 
 	/* we come here only for functions VM86_ENTER, VM86_ENTER_NO_BYPASS */
-	ret = -EPERM;
 	if (tsk->thread.saved_sp0)
-		goto out;
+		return -EPERM;
 	v86 = (struct vm86plus_struct __user *)arg;
 	tmp = copy_vm86_regs_from_user(&info.regs, &v86->regs,
 				       offsetof(struct kernel_vm86_struct, regs32) -
 				       sizeof(info.regs));
-	ret = -EFAULT;
 	if (tmp)
-		goto out;
+		return -EFAULT;
 	info.regs32 = current_pt_regs();
 	info.vm86plus.is_vm86pus = 1;
 	tsk->thread.vm86_info = (struct vm86_struct __user *)v86;
 	do_sys_vm86(&info, tsk);
-	ret = 0;	/* we never return here */
-out:
-	return ret;
+	return 0;	/* we never return here */
 }
 
 
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index 8e517bb..8db0010 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -60,6 +60,7 @@
 #define OpGS              25ull  /* GS */
 #define OpMem8            26ull  /* 8-bit zero extended memory operand */
 #define OpImm64           27ull  /* Sign extended 16/32/64-bit immediate */
+#define OpXLat            28ull  /* memory at BX/EBX/RBX + zero-extended AL */
 
 #define OpBits             5  /* Width of operand field */
 #define OpMask             ((1ull << OpBits) - 1)
@@ -99,6 +100,7 @@
 #define SrcImmUByte (OpImmUByte << SrcShift)
 #define SrcImmU     (OpImmU << SrcShift)
 #define SrcSI       (OpSI << SrcShift)
+#define SrcXLat     (OpXLat << SrcShift)
 #define SrcImmFAddr (OpImmFAddr << SrcShift)
 #define SrcMemFAddr (OpMemFAddr << SrcShift)
 #define SrcAcc      (OpAcc << SrcShift)
@@ -533,6 +535,9 @@
 FOP_SETCC(setnle)
 FOP_END;
 
+FOP_START(salc) "pushf; sbb %al, %al; popf \n\t" FOP_RET
+FOP_END;
+
 #define __emulate_1op_rax_rdx(ctxt, _op, _suffix, _ex)			\
 	do {								\
 		unsigned long _tmp;					\
@@ -2996,6 +3001,28 @@
 	return X86EMUL_CONTINUE;
 }
 
+static int em_aam(struct x86_emulate_ctxt *ctxt)
+{
+	u8 al, ah;
+
+	if (ctxt->src.val == 0)
+		return emulate_de(ctxt);
+
+	al = ctxt->dst.val & 0xff;
+	ah = al / ctxt->src.val;
+	al %= ctxt->src.val;
+
+	ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al | (ah << 8);
+
+	/* Set PF, ZF, SF */
+	ctxt->src.type = OP_IMM;
+	ctxt->src.val = 0;
+	ctxt->src.bytes = 1;
+	fastop(ctxt, em_or);
+
+	return X86EMUL_CONTINUE;
+}
+
 static int em_aad(struct x86_emulate_ctxt *ctxt)
 {
 	u8 al = ctxt->dst.val & 0xff;
@@ -3936,7 +3963,10 @@
 	/* 0xD0 - 0xD7 */
 	G(Src2One | ByteOp, group2), G(Src2One, group2),
 	G(Src2CL | ByteOp, group2), G(Src2CL, group2),
-	N, I(DstAcc | SrcImmByte | No64, em_aad), N, N,
+	I(DstAcc | SrcImmUByte | No64, em_aam),
+	I(DstAcc | SrcImmUByte | No64, em_aad),
+	F(DstAcc | ByteOp | No64, em_salc),
+	I(DstAcc | SrcXLat | ByteOp, em_mov),
 	/* 0xD8 - 0xDF */
 	N, E(0, &escape_d9), N, E(0, &escape_db), N, E(0, &escape_dd), N, N,
 	/* 0xE0 - 0xE7 */
@@ -4198,6 +4228,16 @@
 		op->val = 0;
 		op->count = 1;
 		break;
+	case OpXLat:
+		op->type = OP_MEM;
+		op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
+		op->addr.mem.ea =
+			register_address(ctxt,
+				reg_read(ctxt, VCPU_REGS_RBX) +
+				(reg_read(ctxt, VCPU_REGS_RAX) & 0xff));
+		op->addr.mem.seg = seg_override(ctxt);
+		op->val = 0;
+		break;
 	case OpImmFAddr:
 		op->type = OP_IMM;
 		op->addr.mem.ea = ctxt->_eip;
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 25a791e..260a919 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -5434,6 +5434,12 @@
 			return 0;
 		}
 
+		if (vcpu->arch.halt_request) {
+			vcpu->arch.halt_request = 0;
+			ret = kvm_emulate_halt(vcpu);
+			goto out;
+		}
+
 		if (signal_pending(current))
 			goto out;
 		if (need_resched())
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 05a8b1a..094b5d9 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -555,6 +555,25 @@
 }
 EXPORT_SYMBOL_GPL(kvm_lmsw);
 
+static void kvm_load_guest_xcr0(struct kvm_vcpu *vcpu)
+{
+	if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE) &&
+			!vcpu->guest_xcr0_loaded) {
+		/* kvm_set_xcr() also depends on this */
+		xsetbv(XCR_XFEATURE_ENABLED_MASK, vcpu->arch.xcr0);
+		vcpu->guest_xcr0_loaded = 1;
+	}
+}
+
+static void kvm_put_guest_xcr0(struct kvm_vcpu *vcpu)
+{
+	if (vcpu->guest_xcr0_loaded) {
+		if (vcpu->arch.xcr0 != host_xcr0)
+			xsetbv(XCR_XFEATURE_ENABLED_MASK, host_xcr0);
+		vcpu->guest_xcr0_loaded = 0;
+	}
+}
+
 int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
 {
 	u64 xcr0;
@@ -571,8 +590,8 @@
 		return 1;
 	if (xcr0 & ~host_xcr0)
 		return 1;
+	kvm_put_guest_xcr0(vcpu);
 	vcpu->arch.xcr0 = xcr0;
-	vcpu->guest_xcr0_loaded = 0;
 	return 0;
 }
 
@@ -5614,25 +5633,6 @@
 	}
 }
 
-static void kvm_load_guest_xcr0(struct kvm_vcpu *vcpu)
-{
-	if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE) &&
-			!vcpu->guest_xcr0_loaded) {
-		/* kvm_set_xcr() also depends on this */
-		xsetbv(XCR_XFEATURE_ENABLED_MASK, vcpu->arch.xcr0);
-		vcpu->guest_xcr0_loaded = 1;
-	}
-}
-
-static void kvm_put_guest_xcr0(struct kvm_vcpu *vcpu)
-{
-	if (vcpu->guest_xcr0_loaded) {
-		if (vcpu->arch.xcr0 != host_xcr0)
-			xsetbv(XCR_XFEATURE_ENABLED_MASK, host_xcr0);
-		vcpu->guest_xcr0_loaded = 0;
-	}
-}
-
 static void process_nmi(struct kvm_vcpu *vcpu)
 {
 	unsigned limit = 2;
diff --git a/arch/x86/pci/mrst.c b/arch/x86/pci/mrst.c
index 6eb18c4..0e0fabf 100644
--- a/arch/x86/pci/mrst.c
+++ b/arch/x86/pci/mrst.c
@@ -141,6 +141,11 @@
  */
 static bool type1_access_ok(unsigned int bus, unsigned int devfn, int reg)
 {
+	if (bus == 0 && (devfn == PCI_DEVFN(2, 0)
+				|| devfn == PCI_DEVFN(0, 0)
+				|| devfn == PCI_DEVFN(3, 0)))
+		return 1;
+
 	/* This is a workaround for A0 LNC bug where PCI status register does
 	 * not have new CAP bit set. can not be written by SW either.
 	 *
@@ -150,10 +155,7 @@
 	 */
 	if (reg >= 0x100 || reg == PCI_STATUS || reg == PCI_HEADER_TYPE)
 		return 0;
-	if (bus == 0 && (devfn == PCI_DEVFN(2, 0)
-				|| devfn == PCI_DEVFN(0, 0)
-				|| devfn == PCI_DEVFN(3, 0)))
-		return 1;
+
 	return 0; /* langwell on others */
 }
 
diff --git a/arch/x86/pci/xen.c b/arch/x86/pci/xen.c
index 4a9be6d..48e8461 100644
--- a/arch/x86/pci/xen.c
+++ b/arch/x86/pci/xen.c
@@ -295,11 +295,10 @@
 			int pos;
 			u32 table_offset, bir;
 
-			pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
-
+			pos = dev->msix_cap;
 			pci_read_config_dword(dev, pos + PCI_MSIX_TABLE,
 					      &table_offset);
-			bir = (u8)(table_offset & PCI_MSIX_FLAGS_BIRMASK);
+			bir = (u8)(table_offset & PCI_MSIX_TABLE_BIR);
 
 			map_irq.table_base = pci_resource_start(dev, bir);
 			map_irq.entry_nr = msidesc->msi_attrib.entry_nr;
diff --git a/arch/x86/syscalls/syscall_32.tbl b/arch/x86/syscalls/syscall_32.tbl
index d0d59bf..aabfb83 100644
--- a/arch/x86/syscalls/syscall_32.tbl
+++ b/arch/x86/syscalls/syscall_32.tbl
@@ -345,7 +345,7 @@
 336	i386	perf_event_open		sys_perf_event_open
 337	i386	recvmmsg		sys_recvmmsg			compat_sys_recvmmsg
 338	i386	fanotify_init		sys_fanotify_init
-339	i386	fanotify_mark		sys_fanotify_mark		sys32_fanotify_mark
+339	i386	fanotify_mark		sys_fanotify_mark		compat_sys_fanotify_mark
 340	i386	prlimit64		sys_prlimit64
 341	i386	name_to_handle_at	sys_name_to_handle_at
 342	i386	open_by_handle_at	sys_open_by_handle_at		compat_sys_open_by_handle_at
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index 53d4f68..a492be2 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -85,7 +85,29 @@
 
 EXPORT_SYMBOL_GPL(hypercall_page);
 
+/*
+ * Pointer to the xen_vcpu_info structure or
+ * &HYPERVISOR_shared_info->vcpu_info[cpu]. See xen_hvm_init_shared_info
+ * and xen_vcpu_setup for details. By default it points to share_info->vcpu_info
+ * but if the hypervisor supports VCPUOP_register_vcpu_info then it can point
+ * to xen_vcpu_info. The pointer is used in __xen_evtchn_do_upcall to
+ * acknowledge pending events.
+ * Also more subtly it is used by the patched version of irq enable/disable
+ * e.g. xen_irq_enable_direct and xen_iret in PV mode.
+ *
+ * The desire to be able to do those mask/unmask operations as a single
+ * instruction by using the per-cpu offset held in %gs is the real reason
+ * vcpu info is in a per-cpu pointer and the original reason for this
+ * hypercall.
+ *
+ */
 DEFINE_PER_CPU(struct vcpu_info *, xen_vcpu);
+
+/*
+ * Per CPU pages used if hypervisor supports VCPUOP_register_vcpu_info
+ * hypercall. This can be used both in PV and PVHVM mode. The structure
+ * overrides the default per_cpu(xen_vcpu, cpu) value.
+ */
 DEFINE_PER_CPU(struct vcpu_info, xen_vcpu_info);
 
 enum xen_domain_type xen_domain_type = XEN_NATIVE;
@@ -157,6 +179,21 @@
 
 	BUG_ON(HYPERVISOR_shared_info == &xen_dummy_shared_info);
 
+	/*
+	 * This path is called twice on PVHVM - first during bootup via
+	 * smp_init -> xen_hvm_cpu_notify, and then if the VCPU is being
+	 * hotplugged: cpu_up -> xen_hvm_cpu_notify.
+	 * As we can only do the VCPUOP_register_vcpu_info once lets
+	 * not over-write its result.
+	 *
+	 * For PV it is called during restore (xen_vcpu_restore) and bootup
+	 * (xen_setup_vcpu_info_placement). The hotplug mechanism does not
+	 * use this function.
+	 */
+	if (xen_hvm_domain()) {
+		if (per_cpu(xen_vcpu, cpu) == &per_cpu(xen_vcpu_info, cpu))
+			return;
+	}
 	if (cpu < MAX_VIRT_CPUS)
 		per_cpu(xen_vcpu,cpu) = &HYPERVISOR_shared_info->vcpu_info[cpu];
 
@@ -172,7 +209,12 @@
 
 	/* Check to see if the hypervisor will put the vcpu_info
 	   structure where we want it, which allows direct access via
-	   a percpu-variable. */
+	   a percpu-variable.
+	   N.B. This hypercall can _only_ be called once per CPU. Subsequent
+	   calls will error out with -EINVAL. This is due to the fact that
+	   hypervisor has no unregister variant and this hypercall does not
+	   allow to over-write info.mfn and info.offset.
+	 */
 	err = HYPERVISOR_vcpu_op(VCPUOP_register_vcpu_info, cpu, &info);
 
 	if (err) {
@@ -387,6 +429,9 @@
 		cpuid_leaf1_edx_mask &=
 			~((1 << X86_FEATURE_APIC) |  /* disable local APIC */
 			  (1 << X86_FEATURE_ACPI));  /* disable ACPI */
+
+	cpuid_leaf1_ecx_mask &= ~(1 << (X86_FEATURE_X2APIC % 32));
+
 	ax = 1;
 	cx = 0;
 	xen_cpuid(&ax, &bx, &cx, &dx);
@@ -1603,6 +1648,9 @@
 	 * online but xen_hvm_init_shared_info is run at resume time too and
 	 * in that case multiple vcpus might be online. */
 	for_each_online_cpu(cpu) {
+		/* Leave it to be NULL. */
+		if (cpu >= MAX_VIRT_CPUS)
+			continue;
 		per_cpu(xen_vcpu, cpu) = &HYPERVISOR_shared_info->vcpu_info[cpu];
 	}
 }
diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c
index 8b54603..3002ec1 100644
--- a/arch/x86/xen/spinlock.c
+++ b/arch/x86/xen/spinlock.c
@@ -364,7 +364,7 @@
 	int irq;
 	const char *name;
 
-	WARN(per_cpu(lock_kicker_irq, cpu) > 0, "spinlock on CPU%d exists on IRQ%d!\n",
+	WARN(per_cpu(lock_kicker_irq, cpu) >= 0, "spinlock on CPU%d exists on IRQ%d!\n",
 	     cpu, per_cpu(lock_kicker_irq, cpu));
 
 	/*
diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig
index b09de49..0a1b95f 100644
--- a/arch/xtensa/Kconfig
+++ b/arch/xtensa/Kconfig
@@ -1,11 +1,9 @@
-config FRAME_POINTER
-	def_bool n
-
 config ZONE_DMA
 	def_bool y
 
 config XTENSA
 	def_bool y
+	select ARCH_WANT_FRAME_POINTERS
 	select HAVE_IDE
 	select GENERIC_ATOMIC64
 	select HAVE_GENERIC_HARDIRQS
@@ -33,9 +31,6 @@
 config GENERIC_HWEIGHT
 	def_bool y
 
-config GENERIC_GPIO
-	bool
-
 config ARCH_HAS_ILOG2_U32
 	def_bool n
 
@@ -52,6 +47,15 @@
 source "init/Kconfig"
 source "kernel/Kconfig.freezer"
 
+config LOCKDEP_SUPPORT
+	def_bool y
+
+config STACKTRACE_SUPPORT
+	def_bool y
+
+config TRACE_IRQFLAGS_SUPPORT
+	def_bool y
+
 config MMU
 	def_bool n
 
@@ -103,6 +107,35 @@
 	help
 	Can we use information of configuration file?
 
+config INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX
+	bool "Initialize Xtensa MMU inside the Linux kernel code"
+	default y
+	help
+	  Earlier version initialized the MMU in the exception vector
+	  before jumping to _startup in head.S and had an advantage that
+	  it was possible to place a software breakpoint at 'reset' and
+	  then enter your normal kernel breakpoints once the MMU was mapped
+	  to the kernel mappings (0XC0000000).
+
+	  This unfortunately doesn't work for U-Boot and likley also wont
+	  work for using KEXEC to have a hot kernel ready for doing a
+	  KDUMP.
+
+	  So now the MMU is initialized in head.S but it's necessary to
+	  use hardware breakpoints (gdb 'hbreak' cmd) to break at _startup.
+	  xt-gdb can't place a Software Breakpoint in the  0XD region prior
+	  to mapping the MMU and after mapping even if the area of low memory
+	  was mapped gdb wouldn't remove the breakpoint on hitting it as the
+	  PC wouldn't match. Since Hardware Breakpoints are recommended for
+	  Linux configurations it seems reasonable to just assume they exist
+	  and leave this older mechanism for unfortunate souls that choose
+	  not to follow Tensilica's recommendation.
+
+	  Selecting this will cause U-Boot to set the KERNEL Load and Entry
+	  address at 0x00003000 instead of the mapped std of 0xD0003000.
+
+	  If in doubt, say Y.
+
 endmenu
 
 config XTENSA_CALIBRATE_CCOUNT
@@ -252,21 +285,6 @@
 
 menu "Executable file formats"
 
-# only elf supported
-config KCORE_ELF
-	def_bool y
-        depends on PROC_FS
-        help
-          If you enabled support for /proc file system then the file
-          /proc/kcore will contain the kernel core image in ELF format. This
-          can be used in gdb:
-
-          $ cd /usr/src/linux ; gdb vmlinux /proc/kcore
-
-          This is especially useful if you have compiled the kernel with the
-          "-g" option to preserve debugging information. It is mainly used
-	  for examining kernel data structures on the live kernel.
-
 source "fs/Kconfig.binfmt"
 
 endmenu
diff --git a/arch/xtensa/boot/boot-elf/Makefile b/arch/xtensa/boot/boot-elf/Makefile
index 1fe01b7..89db089 100644
--- a/arch/xtensa/boot/boot-elf/Makefile
+++ b/arch/xtensa/boot/boot-elf/Makefile
@@ -12,6 +12,7 @@
 
 export OBJCOPY_ARGS
 export CPPFLAGS_boot.lds += -P -C
+export KBUILD_AFLAGS += -mtext-section-literals
 
 boot-y		:= bootstrap.o
 
diff --git a/arch/xtensa/boot/boot-elf/boot.lds.S b/arch/xtensa/boot/boot-elf/boot.lds.S
index 7b646e0..932b58e 100644
--- a/arch/xtensa/boot/boot-elf/boot.lds.S
+++ b/arch/xtensa/boot/boot-elf/boot.lds.S
@@ -1,41 +1,29 @@
-#include <variant/core.h>
+/*
+ *  linux/arch/xtensa/boot/boot-elf/boot.lds.S
+ *
+ *  Copyright (C) 2008 - 2013 by Tensilica Inc.
+ *
+ *  Chris Zankel <chris@zankel.net>
+ *  Marc Gauthier <marc@tensilica.com
+ *  Pete Delaney <piet@tensilica.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <asm/vectors.h>
 OUTPUT_ARCH(xtensa)
 ENTRY(_ResetVector)
 
 SECTIONS
 {
-	.start 0xD0000000 : { *(.start) }
-
-	.text 0xD0000000:
+	.ResetVector.text XCHAL_RESET_VECTOR_VADDR :
 	{
-		__reloc_start = . ;
-		_text_start = . ;
-		*(.literal .text.literal .text)
-		_text_end = . ;
+		*(.ResetVector.text)
 	}
 
-	.rodata ALIGN(0x04):
-	{
-		*(.rodata)
-		*(.rodata1)
-	}
-
-	.data ALIGN(0x04):
-	{
-		*(.data)
-		*(.data1)
-		*(.sdata)
-		*(.sdata2)
-		*(.got.plt)
-		*(.got)
-		*(.dynamic)
-	}
-
-	__reloc_end = . ;
-
-	. = ALIGN(0x10);
-	__image_load = . ;
-	.image 0xd0001000:
+	.image KERNELOFFSET: AT (LOAD_MEMORY_ADDRESS)
 	{
 		_image_start = .;
 		*(image)
@@ -43,7 +31,6 @@
 		_image_end = .	;
 	}
 
-
 	.bss ((LOADADDR(.image) + SIZEOF(.image) + 3) & ~ 3):
 	{
 		__bss_start = .;
@@ -53,14 +40,15 @@
 		*(.bss)
 		__bss_end = .;
 	}
-	_end = .;
-	_param_start = .;
 
-	.ResetVector.text XCHAL_RESET_VECTOR_VADDR :
+	/*
+	 * This is a remapped copy of the Reset Vector Code.
+	 * It keeps gdb in sync with the PC after switching
+	 * to the temporary mapping used while setting up
+	 * the V2 MMU mappings for Linux.
+	 */
+	.ResetVector.remapped_text 0x46000000 (INFO):
 	{
-		*(.ResetVector.text)
+		*(.ResetVector.remapped_text)
 	}
-
-
-	PROVIDE (end = .);
 }
diff --git a/arch/xtensa/boot/boot-elf/bootstrap.S b/arch/xtensa/boot/boot-elf/bootstrap.S
index 464298b..1388a49 100644
--- a/arch/xtensa/boot/boot-elf/bootstrap.S
+++ b/arch/xtensa/boot/boot-elf/bootstrap.S
@@ -1,18 +1,77 @@
+/*
+ * arch/xtensa/boot/boot-elf/bootstrap.S
+ *
+ * Low-level exception handling
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2004 - 2013 by Tensilica Inc.
+ *
+ * Chris Zankel <chris@zankel.net>
+ * Marc Gauthier <marc@tensilica.com>
+ * Piet Delaney <piet@tensilica.com>
+ */
 
 #include <asm/bootparam.h>
+#include <asm/processor.h>
+#include <asm/pgtable.h>
+#include <asm/page.h>
+#include <asm/cacheasm.h>
+#include <asm/initialize_mmu.h>
+#include <linux/linkage.h>
 
-
-/* ResetVector
- */
-	.section        .ResetVector.text, "ax"
+	.section	.ResetVector.text, "ax"
 	.global         _ResetVector
+	.global         reset
+
 _ResetVector:
-	_j reset
+	_j _SetupMMU
+
+	.begin  no-absolute-literals
+	.literal_position
+
 	.align 4
 RomInitAddr:
-	.word 0xd0001000
+#if defined(CONFIG_INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX) && \
+	XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY
+	.word 0x00003000
+#else
+	.word 0xd0003000
+#endif
 RomBootParam:
 	.word _bootparam
+_bootparam:
+	.short	BP_TAG_FIRST
+	.short	4
+	.long	BP_VERSION
+	.short	BP_TAG_LAST
+	.short	0
+	.long	0
+
+	.align  4
+_SetupMMU:
+	movi	a0, 0
+	wsr	a0, windowbase
+	rsync
+	movi	a0, 1
+	wsr	a0, windowstart
+	rsync
+	movi	a0, 0x1F
+	wsr	a0, ps
+	rsync
+
+	Offset = _SetupMMU - _ResetVector
+
+#ifndef CONFIG_INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX
+	initialize_mmu
+#endif
+
+	.end    no-absolute-literals
+
+	rsil    a0, XCHAL_DEBUGLEVEL-1
+	rsync
 reset:
 	l32r    a0, RomInitAddr
 	l32r	a2, RomBootParam
@@ -21,13 +80,25 @@
 	jx      a0
 
 	.align 4
-	.section .bootstrap.data, "aw"
 
-	.globl _bootparam
-_bootparam:
-	.short	BP_TAG_FIRST
-	.short	4
-	.long	BP_VERSION
-	.short	BP_TAG_LAST
-	.short	0
-	.long	0
+	.section	.ResetVector.remapped_text, "x"
+	.global         _RemappedResetVector
+
+	/* Do org before literals */
+	.org 0
+
+_RemappedResetVector:
+	.begin  no-absolute-literals
+	.literal_position
+
+	_j	_RemappedSetupMMU
+
+	/* Position Remapped code at the same location as the original code */
+	. = _RemappedResetVector + Offset
+
+_RemappedSetupMMU:
+#ifndef CONFIG_INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX
+	initialize_mmu
+#endif
+
+	.end    no-absolute-literals
diff --git a/arch/xtensa/boot/boot-redboot/boot.ld b/arch/xtensa/boot/boot-redboot/boot.ld
index 5bbcaf9..b0b9e95 100644
--- a/arch/xtensa/boot/boot-redboot/boot.ld
+++ b/arch/xtensa/boot/boot-redboot/boot.ld
@@ -33,7 +33,7 @@
 
 	. = ALIGN(0x10);
 	__image_load = . ;
-	.image 0xd0001000: AT(__image_load)
+	.image 0xd0003000: AT(__image_load)
 	{
 		_image_start = .;
 		*(image)
diff --git a/arch/xtensa/boot/boot-uboot/Makefile b/arch/xtensa/boot/boot-uboot/Makefile
index bfbf8af..5457598 100644
--- a/arch/xtensa/boot/boot-uboot/Makefile
+++ b/arch/xtensa/boot/boot-uboot/Makefile
@@ -4,7 +4,11 @@
 # for more details.
 #
 
-UIMAGE_LOADADDR = 0xd0001000
+ifdef CONFIG_INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX
+UIMAGE_LOADADDR = 0x00003000
+else
+UIMAGE_LOADADDR = 0xd0003000
+endif
 UIMAGE_COMPRESSION = gzip
 
 $(obj)/../uImage: vmlinux.bin.gz FORCE
diff --git a/arch/xtensa/configs/iss_defconfig b/arch/xtensa/configs/iss_defconfig
index ddab37b..77c52f8 100644
--- a/arch/xtensa/configs/iss_defconfig
+++ b/arch/xtensa/configs/iss_defconfig
@@ -10,7 +10,6 @@
 CONFIG_GENERIC_FIND_NEXT_BIT=y
 CONFIG_GENERIC_HWEIGHT=y
 CONFIG_GENERIC_HARDIRQS=y
-CONFIG_GENERIC_GPIO=y
 # CONFIG_ARCH_HAS_ILOG2_U32 is not set
 # CONFIG_ARCH_HAS_ILOG2_U64 is not set
 CONFIG_NO_IOPORT=y
diff --git a/arch/xtensa/configs/s6105_defconfig b/arch/xtensa/configs/s6105_defconfig
index eaf1b8f..4799c6a 100644
--- a/arch/xtensa/configs/s6105_defconfig
+++ b/arch/xtensa/configs/s6105_defconfig
@@ -10,7 +10,6 @@
 CONFIG_GENERIC_FIND_NEXT_BIT=y
 CONFIG_GENERIC_HWEIGHT=y
 CONFIG_GENERIC_HARDIRQS=y
-CONFIG_GENERIC_GPIO=y
 # CONFIG_ARCH_HAS_ILOG2_U32 is not set
 # CONFIG_ARCH_HAS_ILOG2_U64 is not set
 CONFIG_NO_IOPORT=y
diff --git a/arch/xtensa/include/asm/Kbuild b/arch/xtensa/include/asm/Kbuild
index 095f0a2..1b98264 100644
--- a/arch/xtensa/include/asm/Kbuild
+++ b/arch/xtensa/include/asm/Kbuild
@@ -15,6 +15,7 @@
 generic-y += kdebug.h
 generic-y += kmap_types.h
 generic-y += kvm_para.h
+generic-y += linkage.h
 generic-y += local.h
 generic-y += local64.h
 generic-y += percpu.h
diff --git a/arch/xtensa/include/asm/ftrace.h b/arch/xtensa/include/asm/ftrace.h
index 40a8c178..36dc7a6 100644
--- a/arch/xtensa/include/asm/ftrace.h
+++ b/arch/xtensa/include/asm/ftrace.h
@@ -1 +1,33 @@
-/* empty */
+/*
+ * arch/xtensa/include/asm/ftrace.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2013 Tensilica Inc.
+ */
+#ifndef _XTENSA_FTRACE_H
+#define _XTENSA_FTRACE_H
+
+#include <asm/processor.h>
+
+#define HAVE_ARCH_CALLER_ADDR
+#define CALLER_ADDR0 ({ unsigned long a0, a1; \
+		__asm__ __volatile__ ( \
+			"mov %0, a0\n" \
+			"mov %1, a1\n" \
+			: "=r"(a0), "=r"(a1) : : ); \
+		MAKE_PC_FROM_RA(a0, a1); })
+#ifdef CONFIG_FRAME_POINTER
+extern unsigned long return_address(unsigned level);
+#define CALLER_ADDR1 return_address(1)
+#define CALLER_ADDR2 return_address(2)
+#define CALLER_ADDR3 return_address(3)
+#else
+#define CALLER_ADDR1 (0)
+#define CALLER_ADDR2 (0)
+#define CALLER_ADDR3 (0)
+#endif
+
+#endif /* _XTENSA_FTRACE_H */
diff --git a/arch/xtensa/include/asm/initialize_mmu.h b/arch/xtensa/include/asm/initialize_mmu.h
index e1f8ba4..722553f 100644
--- a/arch/xtensa/include/asm/initialize_mmu.h
+++ b/arch/xtensa/include/asm/initialize_mmu.h
@@ -23,6 +23,9 @@
 #ifndef _XTENSA_INITIALIZE_MMU_H
 #define _XTENSA_INITIALIZE_MMU_H
 
+#include <asm/pgtable.h>
+#include <asm/vectors.h>
+
 #ifdef __ASSEMBLY__
 
 #define XTENSA_HWVERSION_RC_2009_0 230000
@@ -48,6 +51,110 @@
 	 * (XCHAL_HW_MIN_VERSION >= XTENSA_HWVERSION_RC_2009_0)
 	 */
 
+#if defined(CONFIG_MMU) && XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY
+/*
+ * Have MMU v3
+ */
+
+#if !XCHAL_HAVE_VECBASE
+# error "MMU v3 requires reloc vectors"
+#endif
+
+	movi	a1, 0
+	_call0	1f
+	_j	2f
+
+	.align	4
+1:	movi	a2, 0x10000000
+	movi	a3, 0x18000000
+	add	a2, a2, a0
+9:	bgeu	a2, a3, 9b	/* PC is out of the expected range */
+
+	/* Step 1: invalidate mapping at 0x40000000..0x5FFFFFFF. */
+
+	movi	a2, 0x40000006
+	idtlb	a2
+	iitlb	a2
+	isync
+
+	/* Step 2: map 0x40000000..0x47FFFFFF to paddr containing this code
+	 * and jump to the new mapping.
+	 */
+#define CA_BYPASS	(_PAGE_CA_BYPASS | _PAGE_HW_WRITE | _PAGE_HW_EXEC)
+#define CA_WRITEBACK	(_PAGE_CA_WB     | _PAGE_HW_WRITE | _PAGE_HW_EXEC)
+
+	srli	a3, a0, 27
+	slli	a3, a3, 27
+	addi	a3, a3, CA_BYPASS
+	addi	a7, a2, -1
+	wdtlb	a3, a7
+	witlb	a3, a7
+	isync
+
+	slli	a4, a0, 5
+	srli	a4, a4, 5
+	addi	a5, a2, -6
+	add	a4, a4, a5
+	jx	a4
+
+	/* Step 3: unmap everything other than current area.
+	 *	   Start at 0x60000000, wrap around, and end with 0x20000000
+	 */
+2:	movi	a4, 0x20000000
+	add	a5, a2, a4
+3:	idtlb	a5
+	iitlb	a5
+	add	a5, a5, a4
+	bne	a5, a2, 3b
+
+	/* Step 4: Setup MMU with the old V2 mappings. */
+	movi	a6, 0x01000000
+	wsr	a6, ITLBCFG
+	wsr	a6, DTLBCFG
+	isync
+
+	movi	a5, 0xd0000005
+	movi	a4, CA_WRITEBACK
+	wdtlb	a4, a5
+	witlb	a4, a5
+
+	movi	a5, 0xd8000005
+	movi	a4, CA_BYPASS
+	wdtlb	a4, a5
+	witlb	a4, a5
+
+	movi	a5, 0xe0000006
+	movi	a4, 0xf0000000 + CA_WRITEBACK
+	wdtlb	a4, a5
+	witlb	a4, a5
+
+	movi	a5, 0xf0000006
+	movi	a4, 0xf0000000 + CA_BYPASS
+	wdtlb	a4, a5
+	witlb	a4, a5
+
+	isync
+
+	/* Jump to self, using MMU v2 mappings. */
+	movi	a4, 1f
+	jx	a4
+
+1:
+	movi    a2, VECBASE_RESET_VADDR
+	wsr	a2, vecbase
+
+	/* Step 5: remove temporary mapping. */
+	idtlb	a7
+	iitlb	a7
+	isync
+
+	movi	a0, 0
+	wsr	a0, ptevaddr
+	rsync
+
+#endif /* defined(CONFIG_MMU) && XCHAL_HAVE_PTP_MMU &&
+	  XCHAL_HAVE_SPANNING_WAY */
+
 	.endm
 
 #endif /*__ASSEMBLY__*/
diff --git a/arch/xtensa/include/asm/irqflags.h b/arch/xtensa/include/asm/irqflags.h
index f865b1c..ea36674 100644
--- a/arch/xtensa/include/asm/irqflags.h
+++ b/arch/xtensa/include/asm/irqflags.h
@@ -47,7 +47,10 @@
 
 static inline bool arch_irqs_disabled_flags(unsigned long flags)
 {
-	return (flags & 0xf) != 0;
+#if XCHAL_EXCM_LEVEL < LOCKLEVEL || (1 << PS_EXCM_BIT) < LOCKLEVEL
+#error "XCHAL_EXCM_LEVEL and 1<<PS_EXCM_BIT must be no less than LOCKLEVEL"
+#endif
+	return (flags & (PS_INTLEVEL_MASK | (1 << PS_EXCM_BIT))) >= LOCKLEVEL;
 }
 
 static inline bool arch_irqs_disabled(void)
diff --git a/arch/xtensa/include/asm/linkage.h b/arch/xtensa/include/asm/linkage.h
deleted file mode 100644
index bf2128a..0000000
--- a/arch/xtensa/include/asm/linkage.h
+++ /dev/null
@@ -1,16 +0,0 @@
-/*
- * include/asm-xtensa/linkage.h
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License.  See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2001 - 2005 Tensilica Inc.
- */
-
-#ifndef _XTENSA_LINKAGE_H
-#define _XTENSA_LINKAGE_H
-
-/* Nothing to do here ... */
-
-#endif	/* _XTENSA_LINKAGE_H */
diff --git a/arch/xtensa/include/asm/stacktrace.h b/arch/xtensa/include/asm/stacktrace.h
new file mode 100644
index 0000000..6a05fcb
--- /dev/null
+++ b/arch/xtensa/include/asm/stacktrace.h
@@ -0,0 +1,36 @@
+/*
+ * arch/xtensa/include/asm/stacktrace.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2013 Tensilica Inc.
+ */
+#ifndef _XTENSA_STACKTRACE_H
+#define _XTENSA_STACKTRACE_H
+
+#include <linux/sched.h>
+
+struct stackframe {
+	unsigned long pc;
+	unsigned long sp;
+};
+
+static __always_inline unsigned long *stack_pointer(struct task_struct *task)
+{
+	unsigned long *sp;
+
+	if (!task || task == current)
+		__asm__ __volatile__ ("mov %0, a1\n" : "=a"(sp));
+	else
+		sp = (unsigned long *)task->thread.sp;
+
+	return sp;
+}
+
+void walk_stackframe(unsigned long *sp,
+		int (*fn)(struct stackframe *frame, void *data),
+		void *data);
+
+#endif /* _XTENSA_STACKTRACE_H */
diff --git a/arch/xtensa/include/asm/timex.h b/arch/xtensa/include/asm/timex.h
index 9e85ce8..3d35e5d 100644
--- a/arch/xtensa/include/asm/timex.h
+++ b/arch/xtensa/include/asm/timex.h
@@ -19,13 +19,16 @@
 #define _INTLEVEL(x)	XCHAL_INT ## x ## _LEVEL
 #define INTLEVEL(x)	_INTLEVEL(x)
 
-#if INTLEVEL(XCHAL_TIMER0_INTERRUPT) <= XCHAL_EXCM_LEVEL
+#if XCHAL_NUM_TIMERS > 0 && \
+	INTLEVEL(XCHAL_TIMER0_INTERRUPT) <= XCHAL_EXCM_LEVEL
 # define LINUX_TIMER     0
 # define LINUX_TIMER_INT XCHAL_TIMER0_INTERRUPT
-#elif INTLEVEL(XCHAL_TIMER1_INTERRUPT) <= XCHAL_EXCM_LEVEL
+#elif XCHAL_NUM_TIMERS > 1 && \
+	INTLEVEL(XCHAL_TIMER1_INTERRUPT) <= XCHAL_EXCM_LEVEL
 # define LINUX_TIMER     1
 # define LINUX_TIMER_INT XCHAL_TIMER1_INTERRUPT
-#elif INTLEVEL(XCHAL_TIMER2_INTERRUPT) <= XCHAL_EXCM_LEVEL
+#elif XCHAL_NUM_TIMERS > 2 && \
+	INTLEVEL(XCHAL_TIMER2_INTERRUPT) <= XCHAL_EXCM_LEVEL
 # define LINUX_TIMER     2
 # define LINUX_TIMER_INT XCHAL_TIMER2_INTERRUPT
 #else
diff --git a/arch/xtensa/include/asm/traps.h b/arch/xtensa/include/asm/traps.h
index b5464ef..917488a 100644
--- a/arch/xtensa/include/asm/traps.h
+++ b/arch/xtensa/include/asm/traps.h
@@ -22,10 +22,9 @@
 
 static inline void spill_registers(void)
 {
-	unsigned int a0, ps;
 
 	__asm__ __volatile__ (
-		"movi	a14, " __stringify(PS_EXCM_BIT | LOCKLEVEL) "\n\t"
+		"movi	a14, "__stringify((1 << PS_EXCM_BIT) | LOCKLEVEL)"\n\t"
 		"mov	a12, a0\n\t"
 		"rsr	a13, sar\n\t"
 		"xsr	a14, ps\n\t"
@@ -35,7 +34,7 @@
 		"mov	a0, a12\n\t"
 		"wsr	a13, sar\n\t"
 		"wsr	a14, ps\n\t"
-		: : "a" (&a0), "a" (&ps)
+		: :
 #if defined(CONFIG_FRAME_POINTER)
 		: "a2", "a3", "a4",       "a11", "a12", "a13", "a14", "a15",
 #else
diff --git a/arch/xtensa/include/asm/vectors.h b/arch/xtensa/include/asm/vectors.h
new file mode 100644
index 0000000..c52b656
--- /dev/null
+++ b/arch/xtensa/include/asm/vectors.h
@@ -0,0 +1,125 @@
+/*
+ * arch/xtensa/include/asm/xchal_vaddr_remap.h
+ *
+ * Xtensa macros for MMU V3 Support. Deals with re-mapping the Virtual
+ * Memory Addresses from "Virtual == Physical" to their prevvious V2 MMU
+ * mappings (KSEG at 0xD0000000 and KIO at 0XF0000000).
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2008 - 2012 Tensilica Inc.
+ *
+ * Pete Delaney <piet@tensilica.com>
+ * Marc Gauthier <marc@tensilica.com
+ */
+
+#ifndef _XTENSA_VECTORS_H
+#define _XTENSA_VECTORS_H
+
+#include <variant/core.h>
+
+#if defined(CONFIG_MMU)
+
+/* Will Become VECBASE */
+#define VIRTUAL_MEMORY_ADDRESS		0xD0000000
+
+/* Image Virtual Start Address */
+#define KERNELOFFSET			0xD0003000
+
+#if defined(XCHAL_HAVE_PTP_MMU) && XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY
+  /* MMU v3  - XCHAL_HAVE_PTP_MMU  == 1 */
+  #define PHYSICAL_MEMORY_ADDRESS	0x00000000
+  #define LOAD_MEMORY_ADDRESS		0x00003000
+#else
+  /* MMU V2 -  XCHAL_HAVE_PTP_MMU  == 0 */
+  #define PHYSICAL_MEMORY_ADDRESS	0xD0000000
+  #define LOAD_MEMORY_ADDRESS		0xD0003000
+#endif
+
+#else /* !defined(CONFIG_MMU) */
+  /* MMU Not being used - Virtual == Physical */
+
+  /* VECBASE */
+  #define VIRTUAL_MEMORY_ADDRESS	0x00002000
+
+  /* Location of the start of the kernel text, _start */
+  #define KERNELOFFSET			0x00003000
+  #define PHYSICAL_MEMORY_ADDRESS	0x00000000
+
+  /* Loaded just above possibly live vectors */
+  #define LOAD_MEMORY_ADDRESS		0x00003000
+
+#endif /* CONFIG_MMU */
+
+#define XC_VADDR(offset)		(VIRTUAL_MEMORY_ADDRESS  + offset)
+#define XC_PADDR(offset)		(PHYSICAL_MEMORY_ADDRESS + offset)
+
+/* Used to set VECBASE register */
+#define VECBASE_RESET_VADDR		VIRTUAL_MEMORY_ADDRESS
+
+#define RESET_VECTOR_VECOFS		(XCHAL_RESET_VECTOR_VADDR - \
+						VECBASE_RESET_VADDR)
+#define RESET_VECTOR_VADDR		XC_VADDR(RESET_VECTOR_VECOFS)
+
+#define RESET_VECTOR1_VECOFS		(XCHAL_RESET_VECTOR1_VADDR - \
+						VECBASE_RESET_VADDR)
+#define RESET_VECTOR1_VADDR		XC_VADDR(RESET_VECTOR1_VECOFS)
+
+#if XCHAL_HAVE_VECBASE
+
+#define USER_VECTOR_VADDR		XC_VADDR(XCHAL_USER_VECOFS)
+#define KERNEL_VECTOR_VADDR		XC_VADDR(XCHAL_KERNEL_VECOFS)
+#define DOUBLEEXC_VECTOR_VADDR		XC_VADDR(XCHAL_DOUBLEEXC_VECOFS)
+#define WINDOW_VECTORS_VADDR		XC_VADDR(XCHAL_WINDOW_OF4_VECOFS)
+#define INTLEVEL2_VECTOR_VADDR		XC_VADDR(XCHAL_INTLEVEL2_VECOFS)
+#define INTLEVEL3_VECTOR_VADDR		XC_VADDR(XCHAL_INTLEVEL3_VECOFS)
+#define INTLEVEL4_VECTOR_VADDR		XC_VADDR(XCHAL_INTLEVEL4_VECOFS)
+#define INTLEVEL5_VECTOR_VADDR		XC_VADDR(XCHAL_INTLEVEL5_VECOFS)
+#define INTLEVEL6_VECTOR_VADDR		XC_VADDR(XCHAL_INTLEVEL6_VECOFS)
+
+#define DEBUG_VECTOR_VADDR		XC_VADDR(XCHAL_DEBUG_VECOFS)
+
+#undef  XCHAL_NMI_VECTOR_VADDR
+#define XCHAL_NMI_VECTOR_VADDR		XC_VADDR(XCHAL_NMI_VECOFS)
+
+#undef  XCHAL_INTLEVEL7_VECTOR_VADDR
+#define XCHAL_INTLEVEL7_VECTOR_VADDR	XC_VADDR(XCHAL_INTLEVEL7_VECOFS)
+
+/*
+ * These XCHAL_* #defines from varian/core.h
+ * are not valid to use with V3 MMU. Non-XCHAL
+ * constants are defined above and should be used.
+ */
+#undef  XCHAL_VECBASE_RESET_VADDR
+#undef  XCHAL_RESET_VECTOR0_VADDR
+#undef  XCHAL_USER_VECTOR_VADDR
+#undef  XCHAL_KERNEL_VECTOR_VADDR
+#undef  XCHAL_DOUBLEEXC_VECTOR_VADDR
+#undef  XCHAL_WINDOW_VECTORS_VADDR
+#undef  XCHAL_INTLEVEL2_VECTOR_VADDR
+#undef  XCHAL_INTLEVEL3_VECTOR_VADDR
+#undef  XCHAL_INTLEVEL4_VECTOR_VADDR
+#undef  XCHAL_INTLEVEL5_VECTOR_VADDR
+#undef  XCHAL_INTLEVEL6_VECTOR_VADDR
+#undef  XCHAL_DEBUG_VECTOR_VADDR
+#undef  XCHAL_NMI_VECTOR_VADDR
+#undef  XCHAL_INTLEVEL7_VECTOR_VADDR
+
+#else
+
+#define USER_VECTOR_VADDR		XCHAL_USER_VECTOR_VADDR
+#define KERNEL_VECTOR_VADDR		XCHAL_KERNEL_VECTOR_VADDR
+#define DOUBLEEXC_VECTOR_VADDR		XCHAL_DOUBLEEXC_VECTOR_VADDR
+#define WINDOW_VECTORS_VADDR		XCHAL_WINDOW_VECTORS_VADDR
+#define INTLEVEL2_VECTOR_VADDR		XCHAL_INTLEVEL2_VECTOR_VADDR
+#define INTLEVEL3_VECTOR_VADDR		XCHAL_INTLEVEL3_VECTOR_VADDR
+#define INTLEVEL4_VECTOR_VADDR		XCHAL_INTLEVEL4_VECTOR_VADDR
+#define INTLEVEL5_VECTOR_VADDR		XCHAL_INTLEVEL5_VECTOR_VADDR
+#define INTLEVEL6_VECTOR_VADDR		XCHAL_INTLEVEL6_VECTOR_VADDR
+#define DEBUG_VECTOR_VADDR		XCHAL_DEBUG_VECTOR_VADDR
+
+#endif
+
+#endif /* _XTENSA_VECTORS_H */
diff --git a/arch/xtensa/kernel/Makefile b/arch/xtensa/kernel/Makefile
index c3a59d9..1e7fc87 100644
--- a/arch/xtensa/kernel/Makefile
+++ b/arch/xtensa/kernel/Makefile
@@ -4,14 +4,16 @@
 
 extra-y := head.o vmlinux.lds
 
-obj-y := align.o entry.o irq.o coprocessor.o process.o ptrace.o \
-	 setup.o signal.o syscall.o time.o traps.o vectors.o platform.o  \
-	 pci-dma.o
+obj-y := align.o coprocessor.o entry.o irq.o pci-dma.o platform.o process.o \
+	 ptrace.o setup.o signal.o stacktrace.o syscall.o time.o traps.o \
+	 vectors.o
 
 obj-$(CONFIG_KGDB) += xtensa-stub.o
 obj-$(CONFIG_PCI) += pci.o
 obj-$(CONFIG_MODULES) += xtensa_ksyms.o module.o
 
+AFLAGS_head.o += -mtext-section-literals
+
 # In the Xtensa architecture, assembly generates literals which must always
 # precede the L32R instruction with a relative offset less than 256 kB.
 # Therefore, the .text and .literal section must be combined in parenthesis
diff --git a/arch/xtensa/kernel/entry.S b/arch/xtensa/kernel/entry.S
index 63845f9..5082507 100644
--- a/arch/xtensa/kernel/entry.S
+++ b/arch/xtensa/kernel/entry.S
@@ -354,16 +354,16 @@
 	 * so we can allow exceptions and interrupts (*) again.
 	 * Set PS(EXCM = 0, UM = 0, RING = 0, OWB = 0, WOE = 1, INTLEVEL = X)
 	 *
-	 * (*) We only allow interrupts of higher priority than current IRQ
+	 * (*) We only allow interrupts if they were previously enabled and
+	 *     we're not handling an IRQ
 	 */
 
 	rsr	a3, ps
-	addi	a0, a0, -4
-	movi	a2, 1
+	addi	a0, a0, -EXCCAUSE_LEVEL1_INTERRUPT
+	movi	a2, LOCKLEVEL
 	extui	a3, a3, PS_INTLEVEL_SHIFT, PS_INTLEVEL_WIDTH
 					# a3 = PS.INTLEVEL
-	movnez	a2, a3, a3		# a2 = 1: level-1, > 1: high priority
-	moveqz	a3, a2, a0		# a3 = IRQ level iff interrupt
+	moveqz	a3, a2, a0		# a3 = LOCKLEVEL iff interrupt
 	movi	a2, 1 << PS_WOE_BIT
 	or	a3, a3, a2
 	rsr	a0, exccause
@@ -389,6 +389,22 @@
 
 	save_xtregs_opt a1 a2 a4 a5 a6 a7 PT_XTREGS_OPT
 	
+#ifdef CONFIG_TRACE_IRQFLAGS
+	l32i	a4, a1, PT_DEPC
+	/* Double exception means we came here with an exception
+	 * while PS.EXCM was set, i.e. interrupts disabled.
+	 */
+	bgeui	a4, VALID_DOUBLE_EXCEPTION_ADDRESS, 1f
+	l32i	a4, a1, PT_EXCCAUSE
+	bnei	a4, EXCCAUSE_LEVEL1_INTERRUPT, 1f
+	/* We came here with an interrupt means interrupts were enabled
+	 * and we've just disabled them.
+	 */
+	movi	a4, trace_hardirqs_off
+	callx4	a4
+1:
+#endif
+
 	/* Go to second-level dispatcher. Set up parameters to pass to the
 	 * exception handler and call the exception handler.
 	 */
@@ -407,11 +423,29 @@
 	.global common_exception_return
 common_exception_return:
 
+#ifdef CONFIG_TRACE_IRQFLAGS
+	l32i	a4, a1, PT_DEPC
+	/* Double exception means we came here with an exception
+	 * while PS.EXCM was set, i.e. interrupts disabled.
+	 */
+	bgeui	a4, VALID_DOUBLE_EXCEPTION_ADDRESS, 1f
+	l32i	a4, a1, PT_EXCCAUSE
+	bnei	a4, EXCCAUSE_LEVEL1_INTERRUPT, 1f
+	/* We came here with an interrupt means interrupts were enabled
+	 * and we'll reenable them on return.
+	 */
+	movi	a4, trace_hardirqs_on
+	callx4	a4
+1:
+#endif
+
 	/* Jump if we are returning from kernel exceptions. */
 
 1:	l32i	a3, a1, PT_PS
 	_bbci.l	a3, PS_UM_BIT, 4f
 
+	rsil	a2, 0
+
 	/* Specific to a user exception exit:
 	 * We need to check some flags for signal handling and rescheduling,
 	 * and have to restore WB and WS, extra states, and all registers
@@ -652,51 +686,19 @@
 
 	l32i	a0, a1, PT_DEPC
 	l32i	a3, a1, PT_AREG3
-	_bltui	a0, VALID_DOUBLE_EXCEPTION_ADDRESS, 1f
-
-	wsr	a0, depc
 	l32i	a2, a1, PT_AREG2
-	l32i	a0, a1, PT_AREG0
-	l32i	a1, a1, PT_AREG1
-	rfde
+	_bgeui	a0, VALID_DOUBLE_EXCEPTION_ADDRESS, 1f
 
-1:
 	/* Restore a0...a3 and return */
 
-	rsr	a0, ps
-	extui	a2, a0, PS_INTLEVEL_SHIFT, PS_INTLEVEL_WIDTH
-	movi	a0, 2f
-	slli	a2, a2, 4
-	add	a0, a2, a0
-	l32i	a2, a1, PT_AREG2
-	jx	a0
-
-	.macro	irq_exit_level level
-	.align	16
-	.if	XCHAL_EXCM_LEVEL >= \level
-	l32i	a0, a1, PT_PC
-	wsr	a0, epc\level
-	l32i	a0, a1, PT_AREG0
-	l32i	a1, a1, PT_AREG1
-	rfi	\level
-	.endif
-	.endm
-
-	.align	16
-2:
 	l32i	a0, a1, PT_AREG0
 	l32i	a1, a1, PT_AREG1
 	rfe
 
-	.align	16
-	/* no rfi for level-1 irq, handled by rfe above*/
-	nop
-
-	irq_exit_level 2
-	irq_exit_level 3
-	irq_exit_level 4
-	irq_exit_level 5
-	irq_exit_level 6
+1: 	wsr	a0, depc
+	l32i	a0, a1, PT_AREG0
+	l32i	a1, a1, PT_AREG1
+	rfde
 
 ENDPROC(kernel_exception)
 
diff --git a/arch/xtensa/kernel/head.S b/arch/xtensa/kernel/head.S
index df88f98..ef12c0e 100644
--- a/arch/xtensa/kernel/head.S
+++ b/arch/xtensa/kernel/head.S
@@ -48,17 +48,36 @@
 	 */
 
 	__HEAD
+	.begin	no-absolute-literals
+
 ENTRY(_start)
 
-	_j	2f
+	/* Preserve the pointer to the boot parameter list in EXCSAVE_1 */
+	wsr     a2, excsave1
+	_j	_SetupMMU
+
 	.align	4
-1:	.word	_startup
-2:	l32r	a0, 1b
+	.literal_position
+.Lstartup:
+	.word	_startup
+
+	.align	4
+	.global _SetupMMU
+_SetupMMU:
+	Offset = _SetupMMU - _start
+
+#ifdef CONFIG_INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX
+	initialize_mmu
+#endif
+	.end	no-absolute-literals
+
+	l32r	a0, .Lstartup
 	jx	a0
 
 ENDPROC(_start)
 
-	.section .init.text, "ax"
+	__INIT
+	.literal_position
 
 ENTRY(_startup)
 
@@ -67,10 +86,6 @@
 	movi	a0, LOCKLEVEL
 	wsr	a0, ps
 
-	/* Preserve the pointer to the boot parameter list in EXCSAVE_1 */
-
-	wsr	a2, excsave1
-
 	/* Start with a fresh windowbase and windowstart.  */
 
 	movi	a1, 1
@@ -86,7 +101,9 @@
 	/* Clear debugging registers. */
 
 #if XCHAL_HAVE_DEBUG
+#if XCHAL_NUM_IBREAK > 0
 	wsr	a0, ibreakenable
+#endif
 	wsr	a0, icount
 	movi	a1, 15
 	wsr	a0, icountlevel
@@ -156,8 +173,6 @@
 
 	isync
 
-	initialize_mmu
-
 	/* Unpack data sections
 	 *
 	 * The linker script used to build the Linux kernel image
@@ -205,6 +220,10 @@
 
 	___flush_dcache_all a2 a3
 #endif
+	memw
+	isync
+	___invalidate_icache_all a2 a3
+	isync
 
 	/* Setup stack and enable window exceptions (keep irqs disabled) */
 
diff --git a/arch/xtensa/kernel/platform.c b/arch/xtensa/kernel/platform.c
index 44bf21c..2bd6c35 100644
--- a/arch/xtensa/kernel/platform.c
+++ b/arch/xtensa/kernel/platform.c
@@ -36,6 +36,7 @@
 _F(void, idle, (void), { __asm__ __volatile__ ("waiti 0" ::: "memory"); });
 _F(void, heartbeat, (void), { });
 _F(int,  pcibios_fixup, (void), { return 0; });
+_F(void, pcibios_init, (void), { });
 
 #ifdef CONFIG_XTENSA_CALIBRATE_CCOUNT
 _F(void, calibrate_ccount, (void),
diff --git a/arch/xtensa/kernel/stacktrace.c b/arch/xtensa/kernel/stacktrace.c
new file mode 100644
index 0000000..7d2c317
--- /dev/null
+++ b/arch/xtensa/kernel/stacktrace.c
@@ -0,0 +1,120 @@
+/*
+ * arch/xtensa/kernel/stacktrace.c
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2013 Tensilica Inc.
+ */
+#include <linux/export.h>
+#include <linux/sched.h>
+#include <linux/stacktrace.h>
+
+#include <asm/stacktrace.h>
+#include <asm/traps.h>
+
+void walk_stackframe(unsigned long *sp,
+		int (*fn)(struct stackframe *frame, void *data),
+		void *data)
+{
+	unsigned long a0, a1;
+	unsigned long sp_end;
+
+	a1 = (unsigned long)sp;
+	sp_end = ALIGN(a1, THREAD_SIZE);
+
+	spill_registers();
+
+	while (a1 < sp_end) {
+		struct stackframe frame;
+
+		sp = (unsigned long *)a1;
+
+		a0 = *(sp - 4);
+		a1 = *(sp - 3);
+
+		if (a1 <= (unsigned long)sp)
+			break;
+
+		frame.pc = MAKE_PC_FROM_RA(a0, a1);
+		frame.sp = a1;
+
+		if (fn(&frame, data))
+			return;
+	}
+}
+
+#ifdef CONFIG_STACKTRACE
+
+struct stack_trace_data {
+	struct stack_trace *trace;
+	unsigned skip;
+};
+
+static int stack_trace_cb(struct stackframe *frame, void *data)
+{
+	struct stack_trace_data *trace_data = data;
+	struct stack_trace *trace = trace_data->trace;
+
+	if (trace_data->skip) {
+		--trace_data->skip;
+		return 0;
+	}
+	if (!kernel_text_address(frame->pc))
+		return 0;
+
+	trace->entries[trace->nr_entries++] = frame->pc;
+	return trace->nr_entries >= trace->max_entries;
+}
+
+void save_stack_trace_tsk(struct task_struct *task, struct stack_trace *trace)
+{
+	struct stack_trace_data trace_data = {
+		.trace = trace,
+		.skip = trace->skip,
+	};
+	walk_stackframe(stack_pointer(task), stack_trace_cb, &trace_data);
+}
+EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
+
+void save_stack_trace(struct stack_trace *trace)
+{
+	save_stack_trace_tsk(current, trace);
+}
+EXPORT_SYMBOL_GPL(save_stack_trace);
+
+#endif
+
+#ifdef CONFIG_FRAME_POINTER
+
+struct return_addr_data {
+	unsigned long addr;
+	unsigned skip;
+};
+
+static int return_address_cb(struct stackframe *frame, void *data)
+{
+	struct return_addr_data *r = data;
+
+	if (r->skip) {
+		--r->skip;
+		return 0;
+	}
+	if (!kernel_text_address(frame->pc))
+		return 0;
+	r->addr = frame->pc;
+	return 1;
+}
+
+unsigned long return_address(unsigned level)
+{
+	struct return_addr_data r = {
+		.skip = level + 1,
+	};
+	walk_stackframe(stack_pointer(NULL), return_address_cb, &r);
+	return r.addr;
+}
+EXPORT_SYMBOL(return_address);
+
+#endif
diff --git a/arch/xtensa/kernel/traps.c b/arch/xtensa/kernel/traps.c
index 458186d..3e8a05c 100644
--- a/arch/xtensa/kernel/traps.c
+++ b/arch/xtensa/kernel/traps.c
@@ -11,7 +11,7 @@
  *
  * Essentially rewritten for the Xtensa architecture port.
  *
- * Copyright (C) 2001 - 2005 Tensilica Inc.
+ * Copyright (C) 2001 - 2013 Tensilica Inc.
  *
  * Joe Taylor	<joe@tensilica.com, joetylr@yahoo.com>
  * Chris Zankel	<chris@zankel.net>
@@ -32,6 +32,7 @@
 #include <linux/delay.h>
 #include <linux/hardirq.h>
 
+#include <asm/stacktrace.h>
 #include <asm/ptrace.h>
 #include <asm/timex.h>
 #include <asm/uaccess.h>
@@ -195,7 +196,6 @@
 
 /*
  * IRQ handler.
- * PS.INTLEVEL is the current IRQ priority level.
  */
 
 extern void do_IRQ(int, struct pt_regs *);
@@ -212,18 +212,21 @@
 		XCHAL_INTLEVEL6_MASK,
 		XCHAL_INTLEVEL7_MASK,
 	};
-	unsigned level = get_sr(ps) & PS_INTLEVEL_MASK;
-
-	if (WARN_ON_ONCE(level >= ARRAY_SIZE(int_level_mask)))
-		return;
 
 	for (;;) {
 		unsigned intread = get_sr(interrupt);
 		unsigned intenable = get_sr(intenable);
-		unsigned int_at_level = intread & intenable &
-			int_level_mask[level];
+		unsigned int_at_level = intread & intenable;
+		unsigned level;
 
-		if (!int_at_level)
+		for (level = LOCKLEVEL; level > 0; --level) {
+			if (int_at_level & int_level_mask[level]) {
+				int_at_level &= int_level_mask[level];
+				break;
+			}
+		}
+
+		if (level == 0)
 			return;
 
 		/*
@@ -404,53 +407,25 @@
 		       regs->syscall);
 }
 
-static __always_inline unsigned long *stack_pointer(struct task_struct *task)
+static int show_trace_cb(struct stackframe *frame, void *data)
 {
-	unsigned long *sp;
-
-	if (!task || task == current)
-		__asm__ __volatile__ ("mov %0, a1\n" : "=a"(sp));
-	else
-		sp = (unsigned long *)task->thread.sp;
-
-	return sp;
+	if (kernel_text_address(frame->pc)) {
+		printk(" [<%08lx>] ", frame->pc);
+		print_symbol("%s\n", frame->pc);
+	}
+	return 0;
 }
 
 void show_trace(struct task_struct *task, unsigned long *sp)
 {
-	unsigned long a0, a1, pc;
-	unsigned long sp_start, sp_end;
-
-	if (sp)
-		a1 = (unsigned long)sp;
-	else
-		a1 = (unsigned long)stack_pointer(task);
-
-	sp_start = a1 & ~(THREAD_SIZE-1);
-	sp_end = sp_start + THREAD_SIZE;
+	if (!sp)
+		sp = stack_pointer(task);
 
 	printk("Call Trace:");
 #ifdef CONFIG_KALLSYMS
 	printk("\n");
 #endif
-	spill_registers();
-
-	while (a1 > sp_start && a1 < sp_end) {
-		sp = (unsigned long*)a1;
-
-		a0 = *(sp - 4);
-		a1 = *(sp - 3);
-
-		if (a1 <= (unsigned long) sp)
-			break;
-
-		pc = MAKE_PC_FROM_RA(a0, a1);
-
-		if (kernel_text_address(pc)) {
-			printk(" [<%08lx>] ", pc);
-			print_symbol("%s\n", pc);
-		}
-	}
+	walk_stackframe(sp, show_trace_cb, NULL);
 	printk("\n");
 }
 
diff --git a/arch/xtensa/kernel/vectors.S b/arch/xtensa/kernel/vectors.S
index 82109b42..f9e1753 100644
--- a/arch/xtensa/kernel/vectors.S
+++ b/arch/xtensa/kernel/vectors.S
@@ -50,6 +50,7 @@
 #include <asm/processor.h>
 #include <asm/page.h>
 #include <asm/thread_info.h>
+#include <asm/vectors.h>
 
 #define WINDOW_VECTORS_SIZE   0x180
 
@@ -220,7 +221,7 @@
 
 	xsr	a0, depc		# get DEPC, save a0
 
-	movi	a3, XCHAL_WINDOW_VECTORS_VADDR
+	movi	a3, WINDOW_VECTORS_VADDR
 	_bltu	a0, a3, .Lfixup
 	addi	a3, a3, WINDOW_VECTORS_SIZE
 	_bgeu	a0, a3, .Lfixup
@@ -385,9 +386,12 @@
 	.if	XCHAL_EXCM_LEVEL >= \level
 	.section .Level\level\()InterruptVector.text, "ax"
 ENTRY(_Level\level\()InterruptVector)
-	wsr	a0, epc1
+	wsr	a0, excsave2
 	rsr	a0, epc\level
-	xsr	a0, epc1
+	wsr	a0, epc1
+	movi	a0, EXCCAUSE_LEVEL1_INTERRUPT
+	wsr	a0, exccause
+	rsr	a0, eps\level
 					# branch to user or kernel vector
 	j	_SimulateUserKernelVectorException
 	.endif
@@ -439,10 +443,8 @@
 	 */
 	.align 4
 _SimulateUserKernelVectorException:
-	wsr	a0, excsave2
-	movi	a0, 4			# LEVEL1_INTERRUPT cause
-	wsr	a0, exccause
-	rsr	a0, ps
+	addi	a0, a0, (1 << PS_EXCM_BIT)
+	wsr	a0, ps
 	bbsi.l	a0, PS_UM_BIT, 1f	# branch if user mode
 	rsr	a0, excsave2		# restore a0
 	j	_KernelExceptionVector	# simulate kernel vector exception
diff --git a/arch/xtensa/kernel/vmlinux.lds.S b/arch/xtensa/kernel/vmlinux.lds.S
index 1469524..21acd11 100644
--- a/arch/xtensa/kernel/vmlinux.lds.S
+++ b/arch/xtensa/kernel/vmlinux.lds.S
@@ -18,6 +18,7 @@
 #include <asm/page.h>
 #include <asm/thread_info.h>
 
+#include <asm/vectors.h>
 #include <variant/core.h>
 #include <platform/hardware.h>
 OUTPUT_ARCH(xtensa)
@@ -30,7 +31,7 @@
 #endif
 
 #ifndef KERNELOFFSET
-#define KERNELOFFSET 0xd0001000
+#define KERNELOFFSET 0xd0003000
 #endif
 
 /* Note: In the following macros, it would be nice to specify only the
@@ -185,16 +186,16 @@
 
   SECTION_VECTOR (_WindowVectors_text,
 		  .WindowVectors.text,
-		  XCHAL_WINDOW_VECTORS_VADDR, 4,
+		  WINDOW_VECTORS_VADDR, 4,
 		  .dummy)
   SECTION_VECTOR (_DebugInterruptVector_literal,
 		  .DebugInterruptVector.literal,
-		  XCHAL_DEBUG_VECTOR_VADDR - 4,
+		  DEBUG_VECTOR_VADDR - 4,
 		  SIZEOF(.WindowVectors.text),
 		  .WindowVectors.text)
   SECTION_VECTOR (_DebugInterruptVector_text,
 		  .DebugInterruptVector.text,
-		  XCHAL_DEBUG_VECTOR_VADDR,
+		  DEBUG_VECTOR_VADDR,
 		  4,
 		  .DebugInterruptVector.literal)
 #undef LAST
@@ -202,7 +203,7 @@
 #if XCHAL_EXCM_LEVEL >= 2
   SECTION_VECTOR (_Level2InterruptVector_text,
 		  .Level2InterruptVector.text,
-		  XCHAL_INTLEVEL2_VECTOR_VADDR,
+		  INTLEVEL2_VECTOR_VADDR,
 		  SIZEOF(LAST), LAST)
 # undef LAST
 # define LAST	.Level2InterruptVector.text
@@ -210,7 +211,7 @@
 #if XCHAL_EXCM_LEVEL >= 3
   SECTION_VECTOR (_Level3InterruptVector_text,
 		  .Level3InterruptVector.text,
-		  XCHAL_INTLEVEL3_VECTOR_VADDR,
+		  INTLEVEL3_VECTOR_VADDR,
 		  SIZEOF(LAST), LAST)
 # undef LAST
 # define LAST	.Level3InterruptVector.text
@@ -218,7 +219,7 @@
 #if XCHAL_EXCM_LEVEL >= 4
   SECTION_VECTOR (_Level4InterruptVector_text,
 		  .Level4InterruptVector.text,
-		  XCHAL_INTLEVEL4_VECTOR_VADDR,
+		  INTLEVEL4_VECTOR_VADDR,
 		  SIZEOF(LAST), LAST)
 # undef LAST
 # define LAST	.Level4InterruptVector.text
@@ -226,7 +227,7 @@
 #if XCHAL_EXCM_LEVEL >= 5
   SECTION_VECTOR (_Level5InterruptVector_text,
 		  .Level5InterruptVector.text,
-		  XCHAL_INTLEVEL5_VECTOR_VADDR,
+		  INTLEVEL5_VECTOR_VADDR,
 		  SIZEOF(LAST), LAST)
 # undef LAST
 # define LAST	.Level5InterruptVector.text
@@ -234,39 +235,39 @@
 #if XCHAL_EXCM_LEVEL >= 6
   SECTION_VECTOR (_Level6InterruptVector_text,
 		  .Level6InterruptVector.text,
-		  XCHAL_INTLEVEL6_VECTOR_VADDR,
+		  INTLEVEL6_VECTOR_VADDR,
 		  SIZEOF(LAST), LAST)
 # undef LAST
 # define LAST	.Level6InterruptVector.text
 #endif
   SECTION_VECTOR (_KernelExceptionVector_literal,
 		  .KernelExceptionVector.literal,
-		  XCHAL_KERNEL_VECTOR_VADDR - 4,
+		  KERNEL_VECTOR_VADDR - 4,
 		  SIZEOF(LAST), LAST)
 #undef LAST
   SECTION_VECTOR (_KernelExceptionVector_text,
 		  .KernelExceptionVector.text,
-		  XCHAL_KERNEL_VECTOR_VADDR,
+		  KERNEL_VECTOR_VADDR,
 		  4,
 		  .KernelExceptionVector.literal)
   SECTION_VECTOR (_UserExceptionVector_literal,
 		  .UserExceptionVector.literal,
-		  XCHAL_USER_VECTOR_VADDR - 4,
+		  USER_VECTOR_VADDR - 4,
 		  SIZEOF(.KernelExceptionVector.text),
 		  .KernelExceptionVector.text)
   SECTION_VECTOR (_UserExceptionVector_text,
 		  .UserExceptionVector.text,
-		  XCHAL_USER_VECTOR_VADDR,
+		  USER_VECTOR_VADDR,
 		  4,
 		  .UserExceptionVector.literal)
   SECTION_VECTOR (_DoubleExceptionVector_literal,
 		  .DoubleExceptionVector.literal,
-		  XCHAL_DOUBLEEXC_VECTOR_VADDR - 16,
+		  DOUBLEEXC_VECTOR_VADDR - 16,
 		  SIZEOF(.UserExceptionVector.text),
 		  .UserExceptionVector.text)
   SECTION_VECTOR (_DoubleExceptionVector_text,
 		  .DoubleExceptionVector.text,
-		  XCHAL_DOUBLEEXC_VECTOR_VADDR,
+		  DOUBLEEXC_VECTOR_VADDR,
 		  32,
 		  .DoubleExceptionVector.literal)
 
@@ -284,11 +285,26 @@
   . = ALIGN(0x10);
   .bootstrap : { *(.bootstrap.literal .bootstrap.text .bootstrap.data) }
 
-  .ResetVector.text XCHAL_RESET_VECTOR_VADDR :
+  .ResetVector.text RESET_VECTOR_VADDR :
   {
     *(.ResetVector.text)
   }
 
+
+  /*
+   * This is a remapped copy of the Secondary Reset Vector Code.
+   * It keeps gdb in sync with the PC after switching
+   * to the temporary mapping used while setting up
+   * the V2 MMU mappings for Linux.
+   *
+   * Only debug information about this section is put in the kernel image.
+   */
+  .SecondaryResetVector.remapped_text 0x46000000 (INFO):
+  {
+	*(.SecondaryResetVector.remapped_text)
+  }
+
+
   .xt.lit : { *(.xt.lit) }
   .xt.prop : { *(.xt.prop) }
 
diff --git a/arch/xtensa/kernel/xtensa_ksyms.c b/arch/xtensa/kernel/xtensa_ksyms.c
index afe058b..42c53c87 100644
--- a/arch/xtensa/kernel/xtensa_ksyms.c
+++ b/arch/xtensa/kernel/xtensa_ksyms.c
@@ -119,3 +119,8 @@
 EXPORT_SYMBOL(insb);
 EXPORT_SYMBOL(insw);
 EXPORT_SYMBOL(insl);
+
+extern long common_exception_return;
+extern long _spill_registers;
+EXPORT_SYMBOL(common_exception_return);
+EXPORT_SYMBOL(_spill_registers);
diff --git a/arch/xtensa/mm/mmu.c b/arch/xtensa/mm/mmu.c
index 0f77f9d..a107757 100644
--- a/arch/xtensa/mm/mmu.c
+++ b/arch/xtensa/mm/mmu.c
@@ -24,15 +24,19 @@
  */
 void __init init_mmu(void)
 {
-	/* Writing zeros to the <t>TLBCFG special registers ensure
-	 * that valid values exist in the register.  For existing
-	 * PGSZID<w> fields, zero selects the first element of the
-	 * page-size array.  For nonexistent PGSZID<w> fields, zero is
-	 * the best value to write.  Also, when changing PGSZID<w>
+#if !(XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY)
+	/*
+	 * Writing zeros to the instruction and data TLBCFG special
+	 * registers ensure that valid values exist in the register.
+	 *
+	 * For existing PGSZID<w> fields, zero selects the first element
+	 * of the page-size array.  For nonexistent PGSZID<w> fields,
+	 * zero is the best value to write.  Also, when changing PGSZID<w>
 	 * fields, the corresponding TLB must be flushed.
 	 */
 	set_itlbcfg_register(0);
 	set_dtlbcfg_register(0);
+#endif
 	flush_tlb_all();
 
 	/* Set rasid register to a known value. */
diff --git a/arch/xtensa/oprofile/backtrace.c b/arch/xtensa/oprofile/backtrace.c
index 66f32ee..5f03a59 100644
--- a/arch/xtensa/oprofile/backtrace.c
+++ b/arch/xtensa/oprofile/backtrace.c
@@ -132,9 +132,7 @@
 		pc = MAKE_PC_FROM_RA(a0, pc);
 
 		/* Add the PC to the trace. */
-		if (kernel_text_address(pc))
-			oprofile_add_trace(pc);
-
+		oprofile_add_trace(pc);
 		if (pc == (unsigned long) &common_exception_return) {
 			regs = (struct pt_regs *)a1;
 			if (user_mode(regs)) {
diff --git a/arch/xtensa/platforms/iss/console.c b/arch/xtensa/platforms/iss/console.c
index da9866f..70cb408 100644
--- a/arch/xtensa/platforms/iss/console.c
+++ b/arch/xtensa/platforms/iss/console.c
@@ -56,13 +56,13 @@
 static int rs_open(struct tty_struct *tty, struct file * filp)
 {
 	tty->port = &serial_port;
-	spin_lock(&timer_lock);
+	spin_lock_bh(&timer_lock);
 	if (tty->count == 1) {
 		setup_timer(&serial_timer, rs_poll,
 				(unsigned long)&serial_port);
 		mod_timer(&serial_timer, jiffies + SERIAL_TIMER_VALUE);
 	}
-	spin_unlock(&timer_lock);
+	spin_unlock_bh(&timer_lock);
 
 	return 0;
 }
@@ -99,14 +99,13 @@
 static void rs_poll(unsigned long priv)
 {
 	struct tty_port *port = (struct tty_port *)priv;
-	struct timeval tv = { .tv_sec = 0, .tv_usec = 0 };
 	int i = 0;
 	unsigned char c;
 
 	spin_lock(&timer_lock);
 
-	while (__simc(SYS_select_one, 0, XTISS_SELECT_ONE_READ, (int)&tv,0,0)){
-		__simc (SYS_read, 0, (unsigned long)&c, 1, 0, 0);
+	while (simc_poll(0)) {
+		simc_read(0, &c, 1);
 		tty_insert_flip_char(port, c, TTY_NORMAL);
 		i++;
 	}
@@ -244,8 +243,7 @@
 	int len = strlen(s);
 
 	if (s != 0 && *s != 0)
-		__simc (SYS_write, 1, (unsigned long)s,
-			count < len ? count : len,0,0);
+		simc_write(1, s, count < len ? count : len);
 }
 
 static struct tty_driver* iss_console_device(struct console *c, int *index)
diff --git a/arch/xtensa/platforms/iss/include/platform/simcall.h b/arch/xtensa/platforms/iss/include/platform/simcall.h
index b5a4edf..12b15ad 100644
--- a/arch/xtensa/platforms/iss/include/platform/simcall.h
+++ b/arch/xtensa/platforms/iss/include/platform/simcall.h
@@ -59,56 +59,58 @@
 
 static int errno;
 
-static inline int __simc(int a, int b, int c, int d, int e, int f)
+static inline int __simc(int a, int b, int c, int d)
 {
 	int ret;
 	register int a1 asm("a2") = a;
 	register int b1 asm("a3") = b;
 	register int c1 asm("a4") = c;
 	register int d1 asm("a5") = d;
-	register int e1 asm("a6") = e;
-	register int f1 asm("a7") = f;
 	__asm__ __volatile__ (
 			"simcall\n"
 			"mov %0, a2\n"
 			"mov %1, a3\n"
 			: "=a" (ret), "=a" (errno), "+r"(a1), "+r"(b1)
-			: "r"(c1), "r"(d1), "r"(e1), "r"(f1)
+			: "r"(c1), "r"(d1)
 			: "memory");
 	return ret;
 }
 
 static inline int simc_open(const char *file, int flags, int mode)
 {
-	return __simc(SYS_open, (int) file, flags, mode, 0, 0);
+	return __simc(SYS_open, (int) file, flags, mode);
 }
 
 static inline int simc_close(int fd)
 {
-	return __simc(SYS_close, fd, 0, 0, 0, 0);
+	return __simc(SYS_close, fd, 0, 0);
 }
 
 static inline int simc_ioctl(int fd, int request, void *arg)
 {
-	return __simc(SYS_ioctl, fd, request, (int) arg, 0, 0);
+	return __simc(SYS_ioctl, fd, request, (int) arg);
 }
 
 static inline int simc_read(int fd, void *buf, size_t count)
 {
-	return __simc(SYS_read, fd, (int) buf, count, 0, 0);
+	return __simc(SYS_read, fd, (int) buf, count);
 }
 
 static inline int simc_write(int fd, const void *buf, size_t count)
 {
-	return __simc(SYS_write, fd, (int) buf, count, 0, 0);
+	return __simc(SYS_write, fd, (int) buf, count);
 }
 
 static inline int simc_poll(int fd)
 {
 	struct timeval tv = { .tv_sec = 0, .tv_usec = 0 };
 
-	return __simc(SYS_select_one, fd, XTISS_SELECT_ONE_READ, (int)&tv,
-			0, 0);
+	return __simc(SYS_select_one, fd, XTISS_SELECT_ONE_READ, (int)&tv);
+}
+
+static inline int simc_lseek(int fd, uint32_t off, int whence)
+{
+	return __simc(SYS_lseek, fd, off, whence);
 }
 
 #endif /* _XTENSA_PLATFORM_ISS_SIMCALL_H */
diff --git a/arch/xtensa/platforms/iss/setup.c b/arch/xtensa/platforms/iss/setup.c
index e170010..da7d182 100644
--- a/arch/xtensa/platforms/iss/setup.c
+++ b/arch/xtensa/platforms/iss/setup.c
@@ -38,12 +38,6 @@
 
 }
 
-#ifdef CONFIG_PCI
-void platform_pcibios_init(void)
-{
-}
-#endif
-
 void platform_halt(void)
 {
 	pr_info(" ** Called platform_halt() **\n");
@@ -64,7 +58,9 @@
 			     "wsr	a2, icountlevel\n\t"
 			     "movi	a2, 0\n\t"
 			     "wsr	a2, icount\n\t"
+#if XCHAL_NUM_IBREAK > 0
 			     "wsr	a2, ibreakenable\n\t"
+#endif
 			     "wsr	a2, lcount\n\t"
 			     "movi	a2, 0x1f\n\t"
 			     "wsr	a2, ps\n\t"
diff --git a/arch/xtensa/platforms/iss/simdisk.c b/arch/xtensa/platforms/iss/simdisk.c
index 88608cc..c0edb35 100644
--- a/arch/xtensa/platforms/iss/simdisk.c
+++ b/arch/xtensa/platforms/iss/simdisk.c
@@ -85,7 +85,7 @@
 	while (nbytes > 0) {
 		unsigned long io;
 
-		__simc(SYS_lseek, dev->fd, offset, SEEK_SET, 0, 0);
+		simc_lseek(dev->fd, offset, SEEK_SET);
 		if (write)
 			io = simc_write(dev->fd, buffer, nbytes);
 		else
@@ -139,13 +139,12 @@
 	return 0;
 }
 
-static int simdisk_release(struct gendisk *disk, fmode_t mode)
+static void simdisk_release(struct gendisk *disk, fmode_t mode)
 {
 	struct simdisk *dev = disk->private_data;
 	spin_lock(&dev->lock);
 	--dev->users;
 	spin_unlock(&dev->lock);
-	return 0;
 }
 
 static const struct block_device_operations simdisk_ops = {
@@ -177,7 +176,7 @@
 		err = -ENODEV;
 		goto out;
 	}
-	dev->size = __simc(SYS_lseek, dev->fd, 0, SEEK_END, 0, 0);
+	dev->size = simc_lseek(dev->fd, 0, SEEK_END);
 	set_capacity(dev->gd, dev->size >> SECTOR_SHIFT);
 	dev->filename = filename;
 	pr_info("SIMDISK: %s=%s\n", dev->gd->disk_name, dev->filename);
@@ -218,7 +217,7 @@
 			size_t size, loff_t *ppos)
 {
 	struct simdisk *dev = PDE_DATA(file_inode(file));
-	char *s = dev->filename;
+	const char *s = dev->filename;
 	if (s) {
 		ssize_t n = simple_read_from_buffer(buf, size, ppos,
 							s, strlen(s));
@@ -239,7 +238,7 @@
 
 	if (tmp == NULL)
 		return -ENOMEM;
-	if (copy_from_user(tmp, buffer, count)) {
+	if (copy_from_user(tmp, buf, count)) {
 		err = -EFAULT;
 		goto out_free;
 	}
diff --git a/arch/xtensa/platforms/xt2000/setup.c b/arch/xtensa/platforms/xt2000/setup.c
index c7d90f1..f9bc879 100644
--- a/arch/xtensa/platforms/xt2000/setup.c
+++ b/arch/xtensa/platforms/xt2000/setup.c
@@ -69,7 +69,9 @@
 			      "wsr	a2, icountlevel\n\t"
 			      "movi	a2, 0\n\t"
 			      "wsr	a2, icount\n\t"
+#if XCHAL_NUM_IBREAK > 0
 			      "wsr	a2, ibreakenable\n\t"
+#endif
 			      "wsr	a2, lcount\n\t"
 			      "movi	a2, 0x1f\n\t"
 			      "wsr	a2, ps\n\t"
diff --git a/arch/xtensa/platforms/xtfpga/setup.c b/arch/xtensa/platforms/xtfpga/setup.c
index 9d888a2..96ef8ee 100644
--- a/arch/xtensa/platforms/xtfpga/setup.c
+++ b/arch/xtensa/platforms/xtfpga/setup.c
@@ -60,7 +60,9 @@
 			      "wsr	a2, icountlevel\n\t"
 			      "movi	a2, 0\n\t"
 			      "wsr	a2, icount\n\t"
+#if XCHAL_NUM_IBREAK > 0
 			      "wsr	a2, ibreakenable\n\t"
+#endif
 			      "wsr	a2, lcount\n\t"
 			      "movi	a2, 0x1f\n\t"
 			      "wsr	a2, ps\n\t"
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index b2b9837..e8918ff 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -972,10 +972,10 @@
 	if (!new_blkg)
 		return -ENOMEM;
 
-	preloaded = !radix_tree_preload(GFP_KERNEL);
-
 	blk_queue_bypass_start(q);
 
+	preloaded = !radix_tree_preload(GFP_KERNEL);
+
 	/*
 	 * Make sure the root blkg exists and count the existing blkgs.  As
 	 * @q is bypassing at this point, blkg_lookup_create() can't be
diff --git a/block/blk-core.c b/block/blk-core.c
index 7c28835..33c33bc 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -30,6 +30,7 @@
 #include <linux/list_sort.h>
 #include <linux/delay.h>
 #include <linux/ratelimit.h>
+#include <linux/pm_runtime.h>
 
 #define CREATE_TRACE_POINTS
 #include <trace/events/block.h>
@@ -159,20 +160,10 @@
 	else if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
 		error = -EIO;
 
-	if (unlikely(nbytes > bio->bi_size)) {
-		printk(KERN_ERR "%s: want %u bytes done, %u left\n",
-		       __func__, nbytes, bio->bi_size);
-		nbytes = bio->bi_size;
-	}
-
 	if (unlikely(rq->cmd_flags & REQ_QUIET))
 		set_bit(BIO_QUIET, &bio->bi_flags);
 
-	bio->bi_size -= nbytes;
-	bio->bi_sector += (nbytes >> 9);
-
-	if (bio_integrity(bio))
-		bio_integrity_advance(bio, nbytes);
+	bio_advance(bio, nbytes);
 
 	/* don't actually finish bio if it's part of flush sequence */
 	if (bio->bi_size == 0 && !(rq->cmd_flags & REQ_FLUSH_SEQ))
@@ -1264,6 +1255,16 @@
 }
 EXPORT_SYMBOL_GPL(part_round_stats);
 
+#ifdef CONFIG_PM_RUNTIME
+static void blk_pm_put_request(struct request *rq)
+{
+	if (rq->q->dev && !(rq->cmd_flags & REQ_PM) && !--rq->q->nr_pending)
+		pm_runtime_mark_last_busy(rq->q->dev);
+}
+#else
+static inline void blk_pm_put_request(struct request *rq) {}
+#endif
+
 /*
  * queue lock must be held
  */
@@ -1274,6 +1275,8 @@
 	if (unlikely(--req->ref_count))
 		return;
 
+	blk_pm_put_request(req);
+
 	elv_completed_request(q, req);
 
 	/* this is a bio leak */
@@ -1597,7 +1600,7 @@
 	printk(KERN_INFO "%s: rw=%ld, want=%Lu, limit=%Lu\n",
 			bdevname(bio->bi_bdev, b),
 			bio->bi_rw,
-			(unsigned long long)bio->bi_sector + bio_sectors(bio),
+			(unsigned long long)bio_end_sector(bio),
 			(long long)(i_size_read(bio->bi_bdev->bd_inode) >> 9));
 
 	set_bit(BIO_EOF, &bio->bi_flags);
@@ -2053,6 +2056,28 @@
 	}
 }
 
+#ifdef CONFIG_PM_RUNTIME
+/*
+ * Don't process normal requests when queue is suspended
+ * or in the process of suspending/resuming
+ */
+static struct request *blk_pm_peek_request(struct request_queue *q,
+					   struct request *rq)
+{
+	if (q->dev && (q->rpm_status == RPM_SUSPENDED ||
+	    (q->rpm_status != RPM_ACTIVE && !(rq->cmd_flags & REQ_PM))))
+		return NULL;
+	else
+		return rq;
+}
+#else
+static inline struct request *blk_pm_peek_request(struct request_queue *q,
+						  struct request *rq)
+{
+	return rq;
+}
+#endif
+
 /**
  * blk_peek_request - peek at the top of a request queue
  * @q: request queue to peek at
@@ -2075,6 +2100,11 @@
 	int ret;
 
 	while ((rq = __elv_next_request(q)) != NULL) {
+
+		rq = blk_pm_peek_request(q, rq);
+		if (!rq)
+			break;
+
 		if (!(rq->cmd_flags & REQ_STARTED)) {
 			/*
 			 * This is the first time the device driver
@@ -2253,8 +2283,7 @@
  **/
 bool blk_update_request(struct request *req, int error, unsigned int nr_bytes)
 {
-	int total_bytes, bio_nbytes, next_idx = 0;
-	struct bio *bio;
+	int total_bytes;
 
 	if (!req->bio)
 		return false;
@@ -2300,56 +2329,21 @@
 
 	blk_account_io_completion(req, nr_bytes);
 
-	total_bytes = bio_nbytes = 0;
-	while ((bio = req->bio) != NULL) {
-		int nbytes;
+	total_bytes = 0;
+	while (req->bio) {
+		struct bio *bio = req->bio;
+		unsigned bio_bytes = min(bio->bi_size, nr_bytes);
 
-		if (nr_bytes >= bio->bi_size) {
+		if (bio_bytes == bio->bi_size)
 			req->bio = bio->bi_next;
-			nbytes = bio->bi_size;
-			req_bio_endio(req, bio, nbytes, error);
-			next_idx = 0;
-			bio_nbytes = 0;
-		} else {
-			int idx = bio->bi_idx + next_idx;
 
-			if (unlikely(idx >= bio->bi_vcnt)) {
-				blk_dump_rq_flags(req, "__end_that");
-				printk(KERN_ERR "%s: bio idx %d >= vcnt %d\n",
-				       __func__, idx, bio->bi_vcnt);
-				break;
-			}
+		req_bio_endio(req, bio, bio_bytes, error);
 
-			nbytes = bio_iovec_idx(bio, idx)->bv_len;
-			BIO_BUG_ON(nbytes > bio->bi_size);
+		total_bytes += bio_bytes;
+		nr_bytes -= bio_bytes;
 
-			/*
-			 * not a complete bvec done
-			 */
-			if (unlikely(nbytes > nr_bytes)) {
-				bio_nbytes += nr_bytes;
-				total_bytes += nr_bytes;
-				break;
-			}
-
-			/*
-			 * advance to the next vector
-			 */
-			next_idx++;
-			bio_nbytes += nbytes;
-		}
-
-		total_bytes += nbytes;
-		nr_bytes -= nbytes;
-
-		bio = req->bio;
-		if (bio) {
-			/*
-			 * end more in this run, or just return 'not-done'
-			 */
-			if (unlikely(nr_bytes <= 0))
-				break;
-		}
+		if (!nr_bytes)
+			break;
 	}
 
 	/*
@@ -2365,16 +2359,6 @@
 		return false;
 	}
 
-	/*
-	 * if the request wasn't completed, update state
-	 */
-	if (bio_nbytes) {
-		req_bio_endio(req, bio, bio_nbytes, error);
-		bio->bi_idx += next_idx;
-		bio_iovec(bio)->bv_offset += nr_bytes;
-		bio_iovec(bio)->bv_len -= nr_bytes;
-	}
-
 	req->__data_len -= total_bytes;
 	req->buffer = bio_data(req->bio);
 
@@ -3046,6 +3030,149 @@
 }
 EXPORT_SYMBOL(blk_finish_plug);
 
+#ifdef CONFIG_PM_RUNTIME
+/**
+ * blk_pm_runtime_init - Block layer runtime PM initialization routine
+ * @q: the queue of the device
+ * @dev: the device the queue belongs to
+ *
+ * Description:
+ *    Initialize runtime-PM-related fields for @q and start auto suspend for
+ *    @dev. Drivers that want to take advantage of request-based runtime PM
+ *    should call this function after @dev has been initialized, and its
+ *    request queue @q has been allocated, and runtime PM for it can not happen
+ *    yet(either due to disabled/forbidden or its usage_count > 0). In most
+ *    cases, driver should call this function before any I/O has taken place.
+ *
+ *    This function takes care of setting up using auto suspend for the device,
+ *    the autosuspend delay is set to -1 to make runtime suspend impossible
+ *    until an updated value is either set by user or by driver. Drivers do
+ *    not need to touch other autosuspend settings.
+ *
+ *    The block layer runtime PM is request based, so only works for drivers
+ *    that use request as their IO unit instead of those directly use bio's.
+ */
+void blk_pm_runtime_init(struct request_queue *q, struct device *dev)
+{
+	q->dev = dev;
+	q->rpm_status = RPM_ACTIVE;
+	pm_runtime_set_autosuspend_delay(q->dev, -1);
+	pm_runtime_use_autosuspend(q->dev);
+}
+EXPORT_SYMBOL(blk_pm_runtime_init);
+
+/**
+ * blk_pre_runtime_suspend - Pre runtime suspend check
+ * @q: the queue of the device
+ *
+ * Description:
+ *    This function will check if runtime suspend is allowed for the device
+ *    by examining if there are any requests pending in the queue. If there
+ *    are requests pending, the device can not be runtime suspended; otherwise,
+ *    the queue's status will be updated to SUSPENDING and the driver can
+ *    proceed to suspend the device.
+ *
+ *    For the not allowed case, we mark last busy for the device so that
+ *    runtime PM core will try to autosuspend it some time later.
+ *
+ *    This function should be called near the start of the device's
+ *    runtime_suspend callback.
+ *
+ * Return:
+ *    0		- OK to runtime suspend the device
+ *    -EBUSY	- Device should not be runtime suspended
+ */
+int blk_pre_runtime_suspend(struct request_queue *q)
+{
+	int ret = 0;
+
+	spin_lock_irq(q->queue_lock);
+	if (q->nr_pending) {
+		ret = -EBUSY;
+		pm_runtime_mark_last_busy(q->dev);
+	} else {
+		q->rpm_status = RPM_SUSPENDING;
+	}
+	spin_unlock_irq(q->queue_lock);
+	return ret;
+}
+EXPORT_SYMBOL(blk_pre_runtime_suspend);
+
+/**
+ * blk_post_runtime_suspend - Post runtime suspend processing
+ * @q: the queue of the device
+ * @err: return value of the device's runtime_suspend function
+ *
+ * Description:
+ *    Update the queue's runtime status according to the return value of the
+ *    device's runtime suspend function and mark last busy for the device so
+ *    that PM core will try to auto suspend the device at a later time.
+ *
+ *    This function should be called near the end of the device's
+ *    runtime_suspend callback.
+ */
+void blk_post_runtime_suspend(struct request_queue *q, int err)
+{
+	spin_lock_irq(q->queue_lock);
+	if (!err) {
+		q->rpm_status = RPM_SUSPENDED;
+	} else {
+		q->rpm_status = RPM_ACTIVE;
+		pm_runtime_mark_last_busy(q->dev);
+	}
+	spin_unlock_irq(q->queue_lock);
+}
+EXPORT_SYMBOL(blk_post_runtime_suspend);
+
+/**
+ * blk_pre_runtime_resume - Pre runtime resume processing
+ * @q: the queue of the device
+ *
+ * Description:
+ *    Update the queue's runtime status to RESUMING in preparation for the
+ *    runtime resume of the device.
+ *
+ *    This function should be called near the start of the device's
+ *    runtime_resume callback.
+ */
+void blk_pre_runtime_resume(struct request_queue *q)
+{
+	spin_lock_irq(q->queue_lock);
+	q->rpm_status = RPM_RESUMING;
+	spin_unlock_irq(q->queue_lock);
+}
+EXPORT_SYMBOL(blk_pre_runtime_resume);
+
+/**
+ * blk_post_runtime_resume - Post runtime resume processing
+ * @q: the queue of the device
+ * @err: return value of the device's runtime_resume function
+ *
+ * Description:
+ *    Update the queue's runtime status according to the return value of the
+ *    device's runtime_resume function. If it is successfully resumed, process
+ *    the requests that are queued into the device's queue when it is resuming
+ *    and then mark last busy and initiate autosuspend for it.
+ *
+ *    This function should be called near the end of the device's
+ *    runtime_resume callback.
+ */
+void blk_post_runtime_resume(struct request_queue *q, int err)
+{
+	spin_lock_irq(q->queue_lock);
+	if (!err) {
+		q->rpm_status = RPM_ACTIVE;
+		__blk_run_queue(q);
+		pm_runtime_mark_last_busy(q->dev);
+		pm_runtime_autosuspend(q->dev);
+	} else {
+		q->rpm_status = RPM_SUSPENDED;
+	}
+	spin_unlock_irq(q->queue_lock);
+}
+EXPORT_SYMBOL(blk_post_runtime_resume);
+#endif
+
 int __init blk_dev_init(void)
 {
 	BUILD_BUG_ON(__REQ_NR_BITS > 8 *
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 4f0ade7..d5cd3131 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -2270,11 +2270,8 @@
 		return NULL;
 
 	cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio));
-	if (cfqq) {
-		sector_t sector = bio->bi_sector + bio_sectors(bio);
-
-		return elv_rb_find(&cfqq->sort_list, sector);
-	}
+	if (cfqq)
+		return elv_rb_find(&cfqq->sort_list, bio_end_sector(bio));
 
 	return NULL;
 }
diff --git a/block/deadline-iosched.c b/block/deadline-iosched.c
index 90037b5..ba19a3a 100644
--- a/block/deadline-iosched.c
+++ b/block/deadline-iosched.c
@@ -132,7 +132,7 @@
 	 * check for front merge
 	 */
 	if (dd->front_merges) {
-		sector_t sector = bio->bi_sector + bio_sectors(bio);
+		sector_t sector = bio_end_sector(bio);
 
 		__rq = elv_rb_find(&dd->sort_list[bio_data_dir(bio)], sector);
 		if (__rq) {
diff --git a/block/elevator.c b/block/elevator.c
index a0ffdd9..eba5b04 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -34,6 +34,7 @@
 #include <linux/blktrace_api.h>
 #include <linux/hash.h>
 #include <linux/uaccess.h>
+#include <linux/pm_runtime.h>
 
 #include <trace/events/block.h>
 
@@ -536,6 +537,27 @@
 		e->type->ops.elevator_bio_merged_fn(q, rq, bio);
 }
 
+#ifdef CONFIG_PM_RUNTIME
+static void blk_pm_requeue_request(struct request *rq)
+{
+	if (rq->q->dev && !(rq->cmd_flags & REQ_PM))
+		rq->q->nr_pending--;
+}
+
+static void blk_pm_add_request(struct request_queue *q, struct request *rq)
+{
+	if (q->dev && !(rq->cmd_flags & REQ_PM) && q->nr_pending++ == 0 &&
+	    (q->rpm_status == RPM_SUSPENDED || q->rpm_status == RPM_SUSPENDING))
+		pm_request_resume(q->dev);
+}
+#else
+static inline void blk_pm_requeue_request(struct request *rq) {}
+static inline void blk_pm_add_request(struct request_queue *q,
+				      struct request *rq)
+{
+}
+#endif
+
 void elv_requeue_request(struct request_queue *q, struct request *rq)
 {
 	/*
@@ -550,6 +572,8 @@
 
 	rq->cmd_flags &= ~REQ_STARTED;
 
+	blk_pm_requeue_request(rq);
+
 	__elv_add_request(q, rq, ELEVATOR_INSERT_REQUEUE);
 }
 
@@ -572,6 +596,8 @@
 {
 	trace_block_rq_insert(q, rq);
 
+	blk_pm_add_request(q, rq);
+
 	rq->q = q;
 
 	if (rq->cmd_flags & REQ_SOFTBARRIER) {
diff --git a/block/partitions/efi.c b/block/partitions/efi.c
index ff5804e..c85fc89 100644
--- a/block/partitions/efi.c
+++ b/block/partitions/efi.c
@@ -238,7 +238,7 @@
                 le32_to_cpu(gpt->sizeof_partition_entry);
 	if (!count)
 		return NULL;
-	pte = kzalloc(count, GFP_KERNEL);
+	pte = kmalloc(count, GFP_KERNEL);
 	if (!pte)
 		return NULL;
 
@@ -267,7 +267,7 @@
 	gpt_header *gpt;
 	unsigned ssz = bdev_logical_block_size(state->bdev);
 
-	gpt = kzalloc(ssz, GFP_KERNEL);
+	gpt = kmalloc(ssz, GFP_KERNEL);
 	if (!gpt)
 		return NULL;
 
diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
index 9a87daa..a5ffcc9 100644
--- a/block/scsi_ioctl.c
+++ b/block/scsi_ioctl.c
@@ -27,6 +27,7 @@
 #include <linux/ratelimit.h>
 #include <linux/slab.h>
 #include <linux/times.h>
+#include <linux/uio.h>
 #include <asm/uaccess.h>
 
 #include <scsi/scsi.h>
diff --git a/drivers/acpi/acpica/exfldio.c b/drivers/acpi/acpica/exfldio.c
index ec7f569..c84ee95 100644
--- a/drivers/acpi/acpica/exfldio.c
+++ b/drivers/acpi/acpica/exfldio.c
@@ -720,7 +720,19 @@
 
 	if ((obj_desc->common_field.start_field_bit_offset == 0) &&
 	    (obj_desc->common_field.bit_length == access_bit_width)) {
-		status = acpi_ex_field_datum_io(obj_desc, 0, buffer, ACPI_READ);
+		if (buffer_length >= sizeof(u64)) {
+			status =
+			    acpi_ex_field_datum_io(obj_desc, 0, buffer,
+						   ACPI_READ);
+		} else {
+			/* Use raw_datum (u64) to handle buffers < 64 bits */
+
+			status =
+			    acpi_ex_field_datum_io(obj_desc, 0, &raw_datum,
+						   ACPI_READ);
+			ACPI_MEMCPY(buffer, &raw_datum, buffer_length);
+		}
+
 		return_ACPI_STATUS(status);
 	}
 
diff --git a/drivers/acpi/acpica/nsinit.c b/drivers/acpi/acpica/nsinit.c
index 2a431ec..46f0f83 100644
--- a/drivers/acpi/acpica/nsinit.c
+++ b/drivers/acpi/acpica/nsinit.c
@@ -558,6 +558,7 @@
 	ACPI_DEBUG_EXEC(acpi_ut_display_init_pathname
 			(ACPI_TYPE_METHOD, device_node, METHOD_NAME__INI));
 
+	ACPI_MEMSET(info, 0, sizeof(struct acpi_evaluate_info));
 	info->prefix_node = device_node;
 	info->pathname = METHOD_NAME__INI;
 	info->parameters = NULL;
diff --git a/drivers/acpi/acpica/utosi.c b/drivers/acpi/acpica/utosi.c
index b15aceb..7e80772 100644
--- a/drivers/acpi/acpica/utosi.c
+++ b/drivers/acpi/acpica/utosi.c
@@ -349,7 +349,8 @@
 	return_value = 0;
 	status = acpi_os_acquire_mutex(acpi_gbl_osi_mutex, ACPI_WAIT_FOREVER);
 	if (ACPI_FAILURE(status)) {
-		return (status);
+		acpi_ut_remove_reference(return_desc);
+		return_ACPI_STATUS(status);
 	}
 
 	/* Lookup the interface in the global _OSI list */
diff --git a/drivers/bcma/driver_mips.c b/drivers/bcma/driver_mips.c
index 9a7f0e3..11115bb 100644
--- a/drivers/bcma/driver_mips.c
+++ b/drivers/bcma/driver_mips.c
@@ -21,7 +21,7 @@
 #include <linux/serial_reg.h>
 #include <linux/time.h>
 
-static const char *part_probes[] = { "bcm47xxpart", NULL };
+static const char * const part_probes[] = { "bcm47xxpart", NULL };
 
 static struct physmap_flash_data bcma_pflash_data = {
 	.part_probe_types	= part_probes,
diff --git a/drivers/block/Makefile b/drivers/block/Makefile
index a3b4023..ca07399 100644
--- a/drivers/block/Makefile
+++ b/drivers/block/Makefile
@@ -42,4 +42,5 @@
 
 obj-$(CONFIG_BLK_DEV_RSXX) += rsxx/
 
+nvme-y		:= nvme-core.o nvme-scsi.o
 swim_mod-y	:= swim.o swim_asm.o
diff --git a/drivers/block/amiflop.c b/drivers/block/amiflop.c
index 386146d..4ff85b8 100644
--- a/drivers/block/amiflop.c
+++ b/drivers/block/amiflop.c
@@ -1634,7 +1634,7 @@
 	return 0;
 }
 
-static int floppy_release(struct gendisk *disk, fmode_t mode)
+static void floppy_release(struct gendisk *disk, fmode_t mode)
 {
 	struct amiga_floppy_struct *p = disk->private_data;
 	int drive = p - unit;
@@ -1654,7 +1654,6 @@
 	floppy_off (drive | 0x40000000);
 #endif
 	mutex_unlock(&amiflop_mutex);
-	return 0;
 }
 
 /*
diff --git a/drivers/block/aoe/aoeblk.c b/drivers/block/aoe/aoeblk.c
index a129f8c..916d9ed 100644
--- a/drivers/block/aoe/aoeblk.c
+++ b/drivers/block/aoe/aoeblk.c
@@ -169,7 +169,7 @@
 	return -ENODEV;
 }
 
-static int
+static void
 aoeblk_release(struct gendisk *disk, fmode_t mode)
 {
 	struct aoedev *d = disk->private_data;
@@ -180,11 +180,9 @@
 	if (--d->nopen == 0) {
 		spin_unlock_irqrestore(&d->lock, flags);
 		aoecmd_cfg(d->aoemajor, d->aoeminor);
-		return 0;
+		return;
 	}
 	spin_unlock_irqrestore(&d->lock, flags);
-
-	return 0;
 }
 
 static void
diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c
index 92b6d7c..fc803ec 100644
--- a/drivers/block/aoe/aoecmd.c
+++ b/drivers/block/aoe/aoecmd.c
@@ -920,16 +920,14 @@
 static void
 bufinit(struct buf *buf, struct request *rq, struct bio *bio)
 {
-	struct bio_vec *bv;
-
 	memset(buf, 0, sizeof(*buf));
 	buf->rq = rq;
 	buf->bio = bio;
 	buf->resid = bio->bi_size;
 	buf->sector = bio->bi_sector;
 	bio_pageinc(bio);
-	buf->bv = bv = &bio->bi_io_vec[bio->bi_idx];
-	buf->bv_resid = bv->bv_len;
+	buf->bv = bio_iovec(bio);
+	buf->bv_resid = buf->bv->bv_len;
 	WARN_ON(buf->bv_resid == 0);
 }
 
diff --git a/drivers/block/ataflop.c b/drivers/block/ataflop.c
index ede16c6..0e30c6e 100644
--- a/drivers/block/ataflop.c
+++ b/drivers/block/ataflop.c
@@ -367,7 +367,7 @@
 static int fd_test_drive_present( int drive );
 static void config_types( void );
 static int floppy_open(struct block_device *bdev, fmode_t mode);
-static int floppy_release(struct gendisk *disk, fmode_t mode);
+static void floppy_release(struct gendisk *disk, fmode_t mode);
 
 /************************* End of Prototypes **************************/
 
@@ -1886,7 +1886,7 @@
 	return ret;
 }
 
-static int floppy_release(struct gendisk *disk, fmode_t mode)
+static void floppy_release(struct gendisk *disk, fmode_t mode)
 {
 	struct atari_floppy_struct *p = disk->private_data;
 	mutex_lock(&ataflop_mutex);
@@ -1897,7 +1897,6 @@
 		p->ref = 0;
 	}
 	mutex_unlock(&ataflop_mutex);
-	return 0;
 }
 
 static const struct block_device_operations floppy_fops = {
diff --git a/drivers/block/brd.c b/drivers/block/brd.c
index 531ceb3..f1a29f8 100644
--- a/drivers/block/brd.c
+++ b/drivers/block/brd.c
@@ -334,8 +334,7 @@
 	int err = -EIO;
 
 	sector = bio->bi_sector;
-	if (sector + (bio->bi_size >> SECTOR_SHIFT) >
-						get_capacity(bdev->bd_disk))
+	if (bio_end_sector(bio) > get_capacity(bdev->bd_disk))
 		goto out;
 
 	if (unlikely(bio->bi_rw & REQ_DISCARD)) {
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index e18c991..6374dc1 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -75,6 +75,12 @@
 MODULE_PARM_DESC(cciss_simple_mode,
 	"Use 'simple mode' rather than 'performant mode'");
 
+static int cciss_allow_hpsa;
+module_param(cciss_allow_hpsa, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(cciss_allow_hpsa,
+	"Prevent cciss driver from accessing hardware known to be "
+	" supported by the hpsa driver");
+
 static DEFINE_MUTEX(cciss_mutex);
 static struct proc_dir_entry *proc_cciss;
 
@@ -161,7 +167,7 @@
 static irqreturn_t do_cciss_msix_intr(int irq, void *dev_id);
 static int cciss_open(struct block_device *bdev, fmode_t mode);
 static int cciss_unlocked_open(struct block_device *bdev, fmode_t mode);
-static int cciss_release(struct gendisk *disk, fmode_t mode);
+static void cciss_release(struct gendisk *disk, fmode_t mode);
 static int do_ioctl(struct block_device *bdev, fmode_t mode,
 		    unsigned int cmd, unsigned long arg);
 static int cciss_ioctl(struct block_device *bdev, fmode_t mode,
@@ -1123,7 +1129,7 @@
 /*
  * Close.  Sync first.
  */
-static int cciss_release(struct gendisk *disk, fmode_t mode)
+static void cciss_release(struct gendisk *disk, fmode_t mode)
 {
 	ctlr_info_t *h;
 	drive_info_struct *drv;
@@ -1135,7 +1141,6 @@
 	drv->usage_count--;
 	h->usage_count--;
 	mutex_unlock(&cciss_mutex);
-	return 0;
 }
 
 static int do_ioctl(struct block_device *bdev, fmode_t mode,
@@ -4116,9 +4121,13 @@
 	*board_id = ((subsystem_device_id << 16) & 0xffff0000) |
 			subsystem_vendor_id;
 
-	for (i = 0; i < ARRAY_SIZE(products); i++)
+	for (i = 0; i < ARRAY_SIZE(products); i++) {
+		/* Stand aside for hpsa driver on request */
+		if (cciss_allow_hpsa)
+			return -ENODEV;
 		if (*board_id == products[i].board_id)
 			return i;
+	}
 	dev_warn(&pdev->dev, "unrecognized board ID: 0x%08x, ignoring.\n",
 		*board_id);
 	return -ENODEV;
@@ -4960,6 +4969,16 @@
 	ctlr_info_t *h;
 	unsigned long flags;
 
+	/*
+	 * By default the cciss driver is used for all older HP Smart Array
+	 * controllers. There are module paramaters that allow a user to
+	 * override this behavior and instead use the hpsa SCSI driver. If
+	 * this is the case cciss may be loaded first from the kdump initrd
+	 * image and cause a kernel panic. So if reset_devices is true and
+	 * cciss_allow_hpsa is set just bail.
+	 */
+	if ((reset_devices) && (cciss_allow_hpsa == 1))
+		return -ENODEV;
 	rc = cciss_init_reset_devices(pdev);
 	if (rc) {
 		if (rc != -ENOTSUPP)
diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
index 3b9e8eb..639d26b 100644
--- a/drivers/block/cpqarray.c
+++ b/drivers/block/cpqarray.c
@@ -160,7 +160,7 @@
 	unsigned int log_unit );
 
 static int ida_unlocked_open(struct block_device *bdev, fmode_t mode);
-static int ida_release(struct gendisk *disk, fmode_t mode);
+static void ida_release(struct gendisk *disk, fmode_t mode);
 static int ida_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg);
 static int ida_getgeo(struct block_device *bdev, struct hd_geometry *geo);
 static int ida_ctlr_ioctl(ctlr_info_t *h, int dsk, ida_ioctl_t *io);
@@ -856,7 +856,7 @@
 /*
  * Close.  Sync first.
  */
-static int ida_release(struct gendisk *disk, fmode_t mode)
+static void ida_release(struct gendisk *disk, fmode_t mode)
 {
 	ctlr_info_t *host;
 
@@ -864,8 +864,6 @@
 	host = get_host(disk);
 	host->usage_count--;
 	mutex_unlock(&cpqarray_mutex);
-
-	return 0;
 }
 
 /*
diff --git a/drivers/block/drbd/drbd_actlog.c b/drivers/block/drbd/drbd_actlog.c
index 92510f8..6608076 100644
--- a/drivers/block/drbd/drbd_actlog.c
+++ b/drivers/block/drbd/drbd_actlog.c
@@ -104,7 +104,6 @@
 	int err;
 };
 
-static int al_write_transaction(struct drbd_conf *mdev);
 
 void *drbd_md_get_buffer(struct drbd_conf *mdev)
 {
@@ -168,7 +167,11 @@
 	bio->bi_end_io = drbd_md_io_complete;
 	bio->bi_rw = rw;
 
-	if (!get_ldev_if_state(mdev, D_ATTACHING)) {  /* Corresponding put_ldev in drbd_md_io_complete() */
+	if (!(rw & WRITE) && mdev->state.disk == D_DISKLESS && mdev->ldev == NULL)
+		/* special case, drbd_md_read() during drbd_adm_attach(): no get_ldev */
+		;
+	else if (!get_ldev_if_state(mdev, D_ATTACHING)) {
+		/* Corresponding put_ldev in drbd_md_io_complete() */
 		dev_err(DEV, "ASSERT FAILED: get_ldev_if_state() == 1 in _drbd_md_sync_page_io()\n");
 		err = -ENODEV;
 		goto out;
@@ -199,9 +202,10 @@
 
 	BUG_ON(!bdev->md_bdev);
 
-	dev_dbg(DEV, "meta_data io: %s [%d]:%s(,%llus,%s)\n",
+	dev_dbg(DEV, "meta_data io: %s [%d]:%s(,%llus,%s) %pS\n",
 	     current->comm, current->pid, __func__,
-	     (unsigned long long)sector, (rw & WRITE) ? "WRITE" : "READ");
+	     (unsigned long long)sector, (rw & WRITE) ? "WRITE" : "READ",
+	     (void*)_RET_IP_ );
 
 	if (sector < drbd_md_first_sector(bdev) ||
 	    sector + 7 > drbd_md_last_sector(bdev))
@@ -209,7 +213,8 @@
 		     current->comm, current->pid, __func__,
 		     (unsigned long long)sector, (rw & WRITE) ? "WRITE" : "READ");
 
-	err = _drbd_md_sync_page_io(mdev, bdev, iop, sector, rw, MD_BLOCK_SIZE);
+	/* we do all our meta data IO in aligned 4k blocks. */
+	err = _drbd_md_sync_page_io(mdev, bdev, iop, sector, rw, 4096);
 	if (err) {
 		dev_err(DEV, "drbd_md_sync_page_io(,%llus,%s) failed with error %d\n",
 		    (unsigned long long)sector, (rw & WRITE) ? "WRITE" : "READ", err);
@@ -217,44 +222,99 @@
 	return err;
 }
 
-static struct lc_element *_al_get(struct drbd_conf *mdev, unsigned int enr)
+static struct bm_extent *find_active_resync_extent(struct drbd_conf *mdev, unsigned int enr)
 {
-	struct lc_element *al_ext;
 	struct lc_element *tmp;
-	int wake;
-
-	spin_lock_irq(&mdev->al_lock);
 	tmp = lc_find(mdev->resync, enr/AL_EXT_PER_BM_SECT);
 	if (unlikely(tmp != NULL)) {
 		struct bm_extent  *bm_ext = lc_entry(tmp, struct bm_extent, lce);
-		if (test_bit(BME_NO_WRITES, &bm_ext->flags)) {
-			wake = !test_and_set_bit(BME_PRIORITY, &bm_ext->flags);
-			spin_unlock_irq(&mdev->al_lock);
-			if (wake)
-				wake_up(&mdev->al_wait);
-			return NULL;
-		}
+		if (test_bit(BME_NO_WRITES, &bm_ext->flags))
+			return bm_ext;
 	}
-	al_ext = lc_get(mdev->act_log, enr);
+	return NULL;
+}
+
+static struct lc_element *_al_get(struct drbd_conf *mdev, unsigned int enr, bool nonblock)
+{
+	struct lc_element *al_ext;
+	struct bm_extent *bm_ext;
+	int wake;
+
+	spin_lock_irq(&mdev->al_lock);
+	bm_ext = find_active_resync_extent(mdev, enr);
+	if (bm_ext) {
+		wake = !test_and_set_bit(BME_PRIORITY, &bm_ext->flags);
+		spin_unlock_irq(&mdev->al_lock);
+		if (wake)
+			wake_up(&mdev->al_wait);
+		return NULL;
+	}
+	if (nonblock)
+		al_ext = lc_try_get(mdev->act_log, enr);
+	else
+		al_ext = lc_get(mdev->act_log, enr);
 	spin_unlock_irq(&mdev->al_lock);
 	return al_ext;
 }
 
-void drbd_al_begin_io(struct drbd_conf *mdev, struct drbd_interval *i)
+bool drbd_al_begin_io_fastpath(struct drbd_conf *mdev, struct drbd_interval *i)
+{
+	/* for bios crossing activity log extent boundaries,
+	 * we may need to activate two extents in one go */
+	unsigned first = i->sector >> (AL_EXTENT_SHIFT-9);
+	unsigned last = i->size == 0 ? first : (i->sector + (i->size >> 9) - 1) >> (AL_EXTENT_SHIFT-9);
+
+	D_ASSERT((unsigned)(last - first) <= 1);
+	D_ASSERT(atomic_read(&mdev->local_cnt) > 0);
+
+	/* FIXME figure out a fast path for bios crossing AL extent boundaries */
+	if (first != last)
+		return false;
+
+	return _al_get(mdev, first, true);
+}
+
+bool drbd_al_begin_io_prepare(struct drbd_conf *mdev, struct drbd_interval *i)
 {
 	/* for bios crossing activity log extent boundaries,
 	 * we may need to activate two extents in one go */
 	unsigned first = i->sector >> (AL_EXTENT_SHIFT-9);
 	unsigned last = i->size == 0 ? first : (i->sector + (i->size >> 9) - 1) >> (AL_EXTENT_SHIFT-9);
 	unsigned enr;
-	bool locked = false;
-
+	bool need_transaction = false;
 
 	D_ASSERT(first <= last);
 	D_ASSERT(atomic_read(&mdev->local_cnt) > 0);
 
-	for (enr = first; enr <= last; enr++)
-		wait_event(mdev->al_wait, _al_get(mdev, enr) != NULL);
+	for (enr = first; enr <= last; enr++) {
+		struct lc_element *al_ext;
+		wait_event(mdev->al_wait,
+				(al_ext = _al_get(mdev, enr, false)) != NULL);
+		if (al_ext->lc_number != enr)
+			need_transaction = true;
+	}
+	return need_transaction;
+}
+
+static int al_write_transaction(struct drbd_conf *mdev, bool delegate);
+
+/* When called through generic_make_request(), we must delegate
+ * activity log I/O to the worker thread: a further request
+ * submitted via generic_make_request() within the same task
+ * would be queued on current->bio_list, and would only start
+ * after this function returns (see generic_make_request()).
+ *
+ * However, if we *are* the worker, we must not delegate to ourselves.
+ */
+
+/*
+ * @delegate:   delegate activity log I/O to the worker thread
+ */
+void drbd_al_begin_io_commit(struct drbd_conf *mdev, bool delegate)
+{
+	bool locked = false;
+
+	BUG_ON(delegate && current == mdev->tconn->worker.task);
 
 	/* Serialize multiple transactions.
 	 * This uses test_and_set_bit, memory barrier is implicit.
@@ -264,13 +324,6 @@
 			(locked = lc_try_lock_for_transaction(mdev->act_log)));
 
 	if (locked) {
-		/* drbd_al_write_transaction(mdev,al_ext,enr);
-		 * recurses into generic_make_request(), which
-		 * disallows recursion, bios being serialized on the
-		 * current->bio_tail list now.
-		 * we have to delegate updates to the activity log
-		 * to the worker thread. */
-
 		/* Double check: it may have been committed by someone else,
 		 * while we have been waiting for the lock. */
 		if (mdev->act_log->pending_changes) {
@@ -280,11 +333,8 @@
 			write_al_updates = rcu_dereference(mdev->ldev->disk_conf)->al_updates;
 			rcu_read_unlock();
 
-			if (write_al_updates) {
-				al_write_transaction(mdev);
-				mdev->al_writ_cnt++;
-			}
-
+			if (write_al_updates)
+				al_write_transaction(mdev, delegate);
 			spin_lock_irq(&mdev->al_lock);
 			/* FIXME
 			if (err)
@@ -298,6 +348,66 @@
 	}
 }
 
+/*
+ * @delegate:   delegate activity log I/O to the worker thread
+ */
+void drbd_al_begin_io(struct drbd_conf *mdev, struct drbd_interval *i, bool delegate)
+{
+	BUG_ON(delegate && current == mdev->tconn->worker.task);
+
+	if (drbd_al_begin_io_prepare(mdev, i))
+		drbd_al_begin_io_commit(mdev, delegate);
+}
+
+int drbd_al_begin_io_nonblock(struct drbd_conf *mdev, struct drbd_interval *i)
+{
+	struct lru_cache *al = mdev->act_log;
+	/* for bios crossing activity log extent boundaries,
+	 * we may need to activate two extents in one go */
+	unsigned first = i->sector >> (AL_EXTENT_SHIFT-9);
+	unsigned last = i->size == 0 ? first : (i->sector + (i->size >> 9) - 1) >> (AL_EXTENT_SHIFT-9);
+	unsigned nr_al_extents;
+	unsigned available_update_slots;
+	unsigned enr;
+
+	D_ASSERT(first <= last);
+
+	nr_al_extents = 1 + last - first; /* worst case: all touched extends are cold. */
+	available_update_slots = min(al->nr_elements - al->used,
+				al->max_pending_changes - al->pending_changes);
+
+	/* We want all necessary updates for a given request within the same transaction
+	 * We could first check how many updates are *actually* needed,
+	 * and use that instead of the worst-case nr_al_extents */
+	if (available_update_slots < nr_al_extents)
+		return -EWOULDBLOCK;
+
+	/* Is resync active in this area? */
+	for (enr = first; enr <= last; enr++) {
+		struct lc_element *tmp;
+		tmp = lc_find(mdev->resync, enr/AL_EXT_PER_BM_SECT);
+		if (unlikely(tmp != NULL)) {
+			struct bm_extent  *bm_ext = lc_entry(tmp, struct bm_extent, lce);
+			if (test_bit(BME_NO_WRITES, &bm_ext->flags)) {
+				if (!test_and_set_bit(BME_PRIORITY, &bm_ext->flags))
+					return -EBUSY;
+				return -EWOULDBLOCK;
+			}
+		}
+	}
+
+	/* Checkout the refcounts.
+	 * Given that we checked for available elements and update slots above,
+	 * this has to be successful. */
+	for (enr = first; enr <= last; enr++) {
+		struct lc_element *al_ext;
+		al_ext = lc_get_cumulative(mdev->act_log, enr);
+		if (!al_ext)
+			dev_info(DEV, "LOGIC BUG for enr=%u\n", enr);
+	}
+	return 0;
+}
+
 void drbd_al_complete_io(struct drbd_conf *mdev, struct drbd_interval *i)
 {
 	/* for bios crossing activity log extent boundaries,
@@ -350,6 +460,24 @@
 		 (BM_EXT_SHIFT - BM_BLOCK_SHIFT));
 }
 
+static sector_t al_tr_number_to_on_disk_sector(struct drbd_conf *mdev)
+{
+	const unsigned int stripes = mdev->ldev->md.al_stripes;
+	const unsigned int stripe_size_4kB = mdev->ldev->md.al_stripe_size_4k;
+
+	/* transaction number, modulo on-disk ring buffer wrap around */
+	unsigned int t = mdev->al_tr_number % (mdev->ldev->md.al_size_4k);
+
+	/* ... to aligned 4k on disk block */
+	t = ((t % stripes) * stripe_size_4kB) + t/stripes;
+
+	/* ... to 512 byte sector in activity log */
+	t *= 8;
+
+	/* ... plus offset to the on disk position */
+	return mdev->ldev->md.md_offset + mdev->ldev->md.al_offset + t;
+}
+
 static int
 _al_write_transaction(struct drbd_conf *mdev)
 {
@@ -432,23 +560,27 @@
 	if (mdev->al_tr_cycle >= mdev->act_log->nr_elements)
 		mdev->al_tr_cycle = 0;
 
-	sector =  mdev->ldev->md.md_offset
-		+ mdev->ldev->md.al_offset
-		+ mdev->al_tr_pos * (MD_BLOCK_SIZE>>9);
+	sector = al_tr_number_to_on_disk_sector(mdev);
 
 	crc = crc32c(0, buffer, 4096);
 	buffer->crc32c = cpu_to_be32(crc);
 
 	if (drbd_bm_write_hinted(mdev))
 		err = -EIO;
-		/* drbd_chk_io_error done already */
-	else if (drbd_md_sync_page_io(mdev, mdev->ldev, sector, WRITE)) {
-		err = -EIO;
-		drbd_chk_io_error(mdev, 1, DRBD_META_IO_ERROR);
-	} else {
-		/* advance ringbuffer position and transaction counter */
-		mdev->al_tr_pos = (mdev->al_tr_pos + 1) % (MD_AL_SECTORS*512/MD_BLOCK_SIZE);
-		mdev->al_tr_number++;
+	else {
+		bool write_al_updates;
+		rcu_read_lock();
+		write_al_updates = rcu_dereference(mdev->ldev->disk_conf)->al_updates;
+		rcu_read_unlock();
+		if (write_al_updates) {
+			if (drbd_md_sync_page_io(mdev, mdev->ldev, sector, WRITE)) {
+				err = -EIO;
+				drbd_chk_io_error(mdev, 1, DRBD_META_IO_ERROR);
+			} else {
+				mdev->al_tr_number++;
+				mdev->al_writ_cnt++;
+			}
+		}
 	}
 
 	drbd_md_put_buffer(mdev);
@@ -474,20 +606,18 @@
 /* Calls from worker context (see w_restart_disk_io()) need to write the
    transaction directly. Others came through generic_make_request(),
    those need to delegate it to the worker. */
-static int al_write_transaction(struct drbd_conf *mdev)
+static int al_write_transaction(struct drbd_conf *mdev, bool delegate)
 {
-	struct update_al_work al_work;
-
-	if (current == mdev->tconn->worker.task)
+	if (delegate) {
+		struct update_al_work al_work;
+		init_completion(&al_work.event);
+		al_work.w.cb = w_al_write_transaction;
+		al_work.w.mdev = mdev;
+		drbd_queue_work_front(&mdev->tconn->sender_work, &al_work.w);
+		wait_for_completion(&al_work.event);
+		return al_work.err;
+	} else
 		return _al_write_transaction(mdev);
-
-	init_completion(&al_work.event);
-	al_work.w.cb = w_al_write_transaction;
-	al_work.w.mdev = mdev;
-	drbd_queue_work_front(&mdev->tconn->sender_work, &al_work.w);
-	wait_for_completion(&al_work.event);
-
-	return al_work.err;
 }
 
 static int _try_lc_del(struct drbd_conf *mdev, struct lc_element *al_ext)
diff --git a/drivers/block/drbd/drbd_bitmap.c b/drivers/block/drbd/drbd_bitmap.c
index 8dc2950..64fbb83 100644
--- a/drivers/block/drbd/drbd_bitmap.c
+++ b/drivers/block/drbd/drbd_bitmap.c
@@ -612,6 +612,17 @@
 	}
 }
 
+/* For the layout, see comment above drbd_md_set_sector_offsets(). */
+static u64 drbd_md_on_disk_bits(struct drbd_backing_dev *ldev)
+{
+	u64 bitmap_sectors;
+	if (ldev->md.al_offset == 8)
+		bitmap_sectors = ldev->md.md_size_sect - ldev->md.bm_offset;
+	else
+		bitmap_sectors = ldev->md.al_offset - ldev->md.bm_offset;
+	return bitmap_sectors << (9 + 3);
+}
+
 /*
  * make sure the bitmap has enough room for the attached storage,
  * if necessary, resize.
@@ -668,7 +679,7 @@
 	words = ALIGN(bits, 64) >> LN2_BPL;
 
 	if (get_ldev(mdev)) {
-		u64 bits_on_disk = ((u64)mdev->ldev->md.md_size_sect-MD_BM_OFFSET) << 12;
+		u64 bits_on_disk = drbd_md_on_disk_bits(mdev->ldev);
 		put_ldev(mdev);
 		if (bits > bits_on_disk) {
 			dev_info(DEV, "bits = %lu\n", bits);
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
index 6b51afa..f943aac 100644
--- a/drivers/block/drbd/drbd_int.h
+++ b/drivers/block/drbd/drbd_int.h
@@ -753,13 +753,16 @@
 	u32 flags;
 	u32 md_size_sect;
 
-	s32 al_offset;	/* signed relative sector offset to al area */
+	s32 al_offset;	/* signed relative sector offset to activity log */
 	s32 bm_offset;	/* signed relative sector offset to bitmap */
 
-	/* u32 al_nr_extents;	   important for restoring the AL
-	 * is stored into  ldev->dc.al_extents, which in turn
-	 * gets applied to act_log->nr_elements
-	 */
+	/* cached value of bdev->disk_conf->meta_dev_idx (see below) */
+	s32 meta_dev_idx;
+
+	/* see al_tr_number_to_on_disk_sector() */
+	u32 al_stripes;
+	u32 al_stripe_size_4k;
+	u32 al_size_4k; /* cached product of the above */
 };
 
 struct drbd_backing_dev {
@@ -891,6 +894,14 @@
 	} send;
 };
 
+struct submit_worker {
+	struct workqueue_struct *wq;
+	struct work_struct worker;
+
+	spinlock_t lock;
+	struct list_head writes;
+};
+
 struct drbd_conf {
 	struct drbd_tconn *tconn;
 	int vnr;			/* volume number within the connection */
@@ -1009,7 +1020,6 @@
 	struct lru_cache *act_log;	/* activity log */
 	unsigned int al_tr_number;
 	int al_tr_cycle;
-	int al_tr_pos;   /* position of the next transaction in the journal */
 	wait_queue_head_t seq_wait;
 	atomic_t packet_seq;
 	unsigned int peer_seq;
@@ -1032,6 +1042,10 @@
 	atomic_t ap_in_flight; /* App sectors in flight (waiting for ack) */
 	unsigned int peer_max_bio_size;
 	unsigned int local_max_bio_size;
+
+	/* any requests that would block in drbd_make_request()
+	 * are deferred to this single-threaded work queue */
+	struct submit_worker submit;
 };
 
 static inline struct drbd_conf *minor_to_mdev(unsigned int minor)
@@ -1148,25 +1162,44 @@
 		char *why, enum bm_flag flags);
 extern int drbd_bmio_set_n_write(struct drbd_conf *mdev);
 extern int drbd_bmio_clear_n_write(struct drbd_conf *mdev);
-extern void drbd_go_diskless(struct drbd_conf *mdev);
 extern void drbd_ldev_destroy(struct drbd_conf *mdev);
 
 /* Meta data layout
-   We reserve a 128MB Block (4k aligned)
-   * either at the end of the backing device
-   * or on a separate meta data device. */
+ *
+ * We currently have two possible layouts.
+ * Offsets in (512 byte) sectors.
+ * external:
+ *   |----------- md_size_sect ------------------|
+ *   [ 4k superblock ][ activity log ][  Bitmap  ]
+ *   | al_offset == 8 |
+ *   | bm_offset = al_offset + X      |
+ *  ==> bitmap sectors = md_size_sect - bm_offset
+ *
+ *  Variants:
+ *     old, indexed fixed size meta data:
+ *
+ * internal:
+ *            |----------- md_size_sect ------------------|
+ * [data.....][  Bitmap  ][ activity log ][ 4k superblock ][padding*]
+ *                        | al_offset < 0 |
+ *            | bm_offset = al_offset - Y |
+ *  ==> bitmap sectors = Y = al_offset - bm_offset
+ *
+ *  [padding*] are zero or up to 7 unused 512 Byte sectors to the
+ *  end of the device, so that the [4k superblock] will be 4k aligned.
+ *
+ *  The activity log consists of 4k transaction blocks,
+ *  which are written in a ring-buffer, or striped ring-buffer like fashion,
+ *  which are writtensize used to be fixed 32kB,
+ *  but is about to become configurable.
+ */
 
-/* The following numbers are sectors */
-/* Allows up to about 3.8TB, so if you want more,
+/* Our old fixed size meta data layout
+ * allows up to about 3.8TB, so if you want more,
  * you need to use the "flexible" meta data format. */
-#define MD_RESERVED_SECT (128LU << 11)  /* 128 MB, unit sectors */
-#define MD_AL_OFFSET	8    /* 8 Sectors after start of meta area */
-#define MD_AL_SECTORS	64   /* = 32 kB on disk activity log ring buffer */
-#define MD_BM_OFFSET (MD_AL_OFFSET + MD_AL_SECTORS)
-
-/* we do all meta data IO in 4k blocks */
-#define MD_BLOCK_SHIFT	12
-#define MD_BLOCK_SIZE	(1<<MD_BLOCK_SHIFT)
+#define MD_128MB_SECT (128LLU << 11)  /* 128 MB, unit sectors */
+#define MD_4kB_SECT	 8
+#define MD_32kB_SECT	64
 
 /* One activity log extent represents 4M of storage */
 #define AL_EXTENT_SHIFT 22
@@ -1256,7 +1289,6 @@
 
 /* in one sector of the bitmap, we have this many activity_log extents. */
 #define AL_EXT_PER_BM_SECT  (1 << (BM_EXT_SHIFT - AL_EXTENT_SHIFT))
-#define BM_WORDS_PER_AL_EXT (1 << (AL_EXTENT_SHIFT-BM_BLOCK_SHIFT-LN2_BPL))
 
 #define BM_BLOCKS_PER_BM_EXT_B (BM_EXT_SHIFT - BM_BLOCK_SHIFT)
 #define BM_BLOCKS_PER_BM_EXT_MASK  ((1<<BM_BLOCKS_PER_BM_EXT_B) - 1)
@@ -1276,16 +1308,18 @@
  */
 
 #define DRBD_MAX_SECTORS_32 (0xffffffffLU)
-#define DRBD_MAX_SECTORS_BM \
-	  ((MD_RESERVED_SECT - MD_BM_OFFSET) * (1LL<<(BM_EXT_SHIFT-9)))
-#if DRBD_MAX_SECTORS_BM < DRBD_MAX_SECTORS_32
-#define DRBD_MAX_SECTORS      DRBD_MAX_SECTORS_BM
-#define DRBD_MAX_SECTORS_FLEX DRBD_MAX_SECTORS_BM
-#elif !defined(CONFIG_LBDAF) && BITS_PER_LONG == 32
+/* we have a certain meta data variant that has a fixed on-disk size of 128
+ * MiB, of which 4k are our "superblock", and 32k are the fixed size activity
+ * log, leaving this many sectors for the bitmap.
+ */
+
+#define DRBD_MAX_SECTORS_FIXED_BM \
+	  ((MD_128MB_SECT - MD_32kB_SECT - MD_4kB_SECT) * (1LL<<(BM_EXT_SHIFT-9)))
+#if !defined(CONFIG_LBDAF) && BITS_PER_LONG == 32
 #define DRBD_MAX_SECTORS      DRBD_MAX_SECTORS_32
 #define DRBD_MAX_SECTORS_FLEX DRBD_MAX_SECTORS_32
 #else
-#define DRBD_MAX_SECTORS      DRBD_MAX_SECTORS_BM
+#define DRBD_MAX_SECTORS      DRBD_MAX_SECTORS_FIXED_BM
 /* 16 TB in units of sectors */
 #if BITS_PER_LONG == 32
 /* adjust by one page worth of bitmap,
@@ -1418,6 +1452,7 @@
 extern int proc_details;
 
 /* drbd_req */
+extern void do_submit(struct work_struct *ws);
 extern void __drbd_make_request(struct drbd_conf *, struct bio *, unsigned long);
 extern void drbd_make_request(struct request_queue *q, struct bio *bio);
 extern int drbd_read_remote(struct drbd_conf *mdev, struct drbd_request *req);
@@ -1576,7 +1611,10 @@
 extern const char *drbd_role_str(enum drbd_role s);
 
 /* drbd_actlog.c */
-extern void drbd_al_begin_io(struct drbd_conf *mdev, struct drbd_interval *i);
+extern int drbd_al_begin_io_nonblock(struct drbd_conf *mdev, struct drbd_interval *i);
+extern void drbd_al_begin_io_commit(struct drbd_conf *mdev, bool delegate);
+extern bool drbd_al_begin_io_fastpath(struct drbd_conf *mdev, struct drbd_interval *i);
+extern void drbd_al_begin_io(struct drbd_conf *mdev, struct drbd_interval *i, bool delegate);
 extern void drbd_al_complete_io(struct drbd_conf *mdev, struct drbd_interval *i);
 extern void drbd_rs_complete_io(struct drbd_conf *mdev, sector_t sector);
 extern int drbd_rs_begin_io(struct drbd_conf *mdev, sector_t sector);
@@ -1755,9 +1793,9 @@
  * BTW, for internal meta data, this happens to be the maximum capacity
  * we could agree upon with our peer node.
  */
-static inline sector_t _drbd_md_first_sector(int meta_dev_idx, struct drbd_backing_dev *bdev)
+static inline sector_t drbd_md_first_sector(struct drbd_backing_dev *bdev)
 {
-	switch (meta_dev_idx) {
+	switch (bdev->md.meta_dev_idx) {
 	case DRBD_MD_INDEX_INTERNAL:
 	case DRBD_MD_INDEX_FLEX_INT:
 		return bdev->md.md_offset + bdev->md.bm_offset;
@@ -1767,36 +1805,19 @@
 	}
 }
 
-static inline sector_t drbd_md_first_sector(struct drbd_backing_dev *bdev)
-{
-	int meta_dev_idx;
-
-	rcu_read_lock();
-	meta_dev_idx = rcu_dereference(bdev->disk_conf)->meta_dev_idx;
-	rcu_read_unlock();
-
-	return _drbd_md_first_sector(meta_dev_idx, bdev);
-}
-
 /**
  * drbd_md_last_sector() - Return the last sector number of the meta data area
  * @bdev:	Meta data block device.
  */
 static inline sector_t drbd_md_last_sector(struct drbd_backing_dev *bdev)
 {
-	int meta_dev_idx;
-
-	rcu_read_lock();
-	meta_dev_idx = rcu_dereference(bdev->disk_conf)->meta_dev_idx;
-	rcu_read_unlock();
-
-	switch (meta_dev_idx) {
+	switch (bdev->md.meta_dev_idx) {
 	case DRBD_MD_INDEX_INTERNAL:
 	case DRBD_MD_INDEX_FLEX_INT:
-		return bdev->md.md_offset + MD_AL_OFFSET - 1;
+		return bdev->md.md_offset + MD_4kB_SECT -1;
 	case DRBD_MD_INDEX_FLEX_EXT:
 	default:
-		return bdev->md.md_offset + bdev->md.md_size_sect;
+		return bdev->md.md_offset + bdev->md.md_size_sect -1;
 	}
 }
 
@@ -1818,18 +1839,13 @@
 static inline sector_t drbd_get_max_capacity(struct drbd_backing_dev *bdev)
 {
 	sector_t s;
-	int meta_dev_idx;
 
-	rcu_read_lock();
-	meta_dev_idx = rcu_dereference(bdev->disk_conf)->meta_dev_idx;
-	rcu_read_unlock();
-
-	switch (meta_dev_idx) {
+	switch (bdev->md.meta_dev_idx) {
 	case DRBD_MD_INDEX_INTERNAL:
 	case DRBD_MD_INDEX_FLEX_INT:
 		s = drbd_get_capacity(bdev->backing_bdev)
 			? min_t(sector_t, DRBD_MAX_SECTORS_FLEX,
-				_drbd_md_first_sector(meta_dev_idx, bdev))
+				drbd_md_first_sector(bdev))
 			: 0;
 		break;
 	case DRBD_MD_INDEX_FLEX_EXT:
@@ -1848,39 +1864,24 @@
 }
 
 /**
- * drbd_md_ss__() - Return the sector number of our meta data super block
- * @mdev:	DRBD device.
+ * drbd_md_ss() - Return the sector number of our meta data super block
  * @bdev:	Meta data block device.
  */
-static inline sector_t drbd_md_ss__(struct drbd_conf *mdev,
-				    struct drbd_backing_dev *bdev)
+static inline sector_t drbd_md_ss(struct drbd_backing_dev *bdev)
 {
-	int meta_dev_idx;
+	const int meta_dev_idx = bdev->md.meta_dev_idx;
 
-	rcu_read_lock();
-	meta_dev_idx = rcu_dereference(bdev->disk_conf)->meta_dev_idx;
-	rcu_read_unlock();
-
-	switch (meta_dev_idx) {
-	default: /* external, some index */
-		return MD_RESERVED_SECT * meta_dev_idx;
-	case DRBD_MD_INDEX_INTERNAL:
-		/* with drbd08, internal meta data is always "flexible" */
-	case DRBD_MD_INDEX_FLEX_INT:
-		/* sizeof(struct md_on_disk_07) == 4k
-		 * position: last 4k aligned block of 4k size */
-		if (!bdev->backing_bdev) {
-			if (__ratelimit(&drbd_ratelimit_state)) {
-				dev_err(DEV, "bdev->backing_bdev==NULL\n");
-				dump_stack();
-			}
-			return 0;
-		}
-		return (drbd_get_capacity(bdev->backing_bdev) & ~7ULL)
-			- MD_AL_OFFSET;
-	case DRBD_MD_INDEX_FLEX_EXT:
+	if (meta_dev_idx == DRBD_MD_INDEX_FLEX_EXT)
 		return 0;
-	}
+
+	/* Since drbd08, internal meta data is always "flexible".
+	 * position: last 4k aligned block of 4k size */
+	if (meta_dev_idx == DRBD_MD_INDEX_INTERNAL ||
+	    meta_dev_idx == DRBD_MD_INDEX_FLEX_INT)
+		return (drbd_get_capacity(bdev->backing_bdev) & ~7ULL) - 8;
+
+	/* external, some index; this is the old fixed size layout */
+	return MD_128MB_SECT * bdev->md.meta_dev_idx;
 }
 
 static inline void
@@ -2053,9 +2054,11 @@
 		if (mdev->state.disk == D_DISKLESS)
 			/* even internal references gone, safe to destroy */
 			drbd_ldev_destroy(mdev);
-		if (mdev->state.disk == D_FAILED)
+		if (mdev->state.disk == D_FAILED) {
 			/* all application IO references gone. */
-			drbd_go_diskless(mdev);
+			if (!test_and_set_bit(GO_DISKLESS, &mdev->flags))
+				drbd_queue_work(&mdev->tconn->sender_work, &mdev->go_diskless);
+		}
 		wake_up(&mdev->misc_wait);
 	}
 }
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index e98da67..a5dca6a 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -45,7 +45,7 @@
 #include <linux/reboot.h>
 #include <linux/notifier.h>
 #include <linux/kthread.h>
-
+#include <linux/workqueue.h>
 #define __KERNEL_SYSCALLS__
 #include <linux/unistd.h>
 #include <linux/vmalloc.h>
@@ -63,7 +63,7 @@
 
 int drbd_init(void);
 static int drbd_open(struct block_device *bdev, fmode_t mode);
-static int drbd_release(struct gendisk *gd, fmode_t mode);
+static void drbd_release(struct gendisk *gd, fmode_t mode);
 static int w_md_sync(struct drbd_work *w, int unused);
 static void md_sync_timer_fn(unsigned long data);
 static int w_bitmap_io(struct drbd_work *w, int unused);
@@ -1849,13 +1849,12 @@
 	return rv;
 }
 
-static int drbd_release(struct gendisk *gd, fmode_t mode)
+static void drbd_release(struct gendisk *gd, fmode_t mode)
 {
 	struct drbd_conf *mdev = gd->private_data;
 	mutex_lock(&drbd_main_mutex);
 	mdev->open_cnt--;
 	mutex_unlock(&drbd_main_mutex);
-	return 0;
 }
 
 static void drbd_set_defaults(struct drbd_conf *mdev)
@@ -2300,6 +2299,7 @@
 	idr_for_each_entry(&minors, mdev, i) {
 		idr_remove(&minors, mdev_to_minor(mdev));
 		idr_remove(&mdev->tconn->volumes, mdev->vnr);
+		destroy_workqueue(mdev->submit.wq);
 		del_gendisk(mdev->vdisk);
 		/* synchronize_rcu(); No other threads running at this point */
 		kref_put(&mdev->kref, &drbd_minor_destroy);
@@ -2589,6 +2589,21 @@
 	kfree(tconn);
 }
 
+int init_submitter(struct drbd_conf *mdev)
+{
+	/* opencoded create_singlethread_workqueue(),
+	 * to be able to say "drbd%d", ..., minor */
+	mdev->submit.wq = alloc_workqueue("drbd%u_submit",
+			WQ_UNBOUND | WQ_MEM_RECLAIM, 1, mdev->minor);
+	if (!mdev->submit.wq)
+		return -ENOMEM;
+
+	INIT_WORK(&mdev->submit.worker, do_submit);
+	spin_lock_init(&mdev->submit.lock);
+	INIT_LIST_HEAD(&mdev->submit.writes);
+	return 0;
+}
+
 enum drbd_ret_code conn_new_minor(struct drbd_tconn *tconn, unsigned int minor, int vnr)
 {
 	struct drbd_conf *mdev;
@@ -2678,6 +2693,12 @@
 		goto out_idr_remove_minor;
 	}
 
+	if (init_submitter(mdev)) {
+		err = ERR_NOMEM;
+		drbd_msg_put_info("unable to create submit workqueue");
+		goto out_idr_remove_vol;
+	}
+
 	add_disk(disk);
 	kref_init(&mdev->kref); /* one ref for both idrs and the the add_disk */
 
@@ -2688,6 +2709,8 @@
 
 	return NO_ERROR;
 
+out_idr_remove_vol:
+	idr_remove(&tconn->volumes, vnr_got);
 out_idr_remove_minor:
 	idr_remove(&minors, minor_got);
 	synchronize_rcu();
@@ -2795,6 +2818,7 @@
 	blkdev_put(ldev->backing_bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
 	blkdev_put(ldev->md_bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
 
+	kfree(ldev->disk_conf);
 	kfree(ldev);
 }
 
@@ -2834,8 +2858,9 @@
 	rcu_read_unlock();
 }
 
+/* aligned 4kByte */
 struct meta_data_on_disk {
-	u64 la_size;           /* last agreed size. */
+	u64 la_size_sect;      /* last agreed size. */
 	u64 uuid[UI_SIZE];   /* UUIDs. */
 	u64 device_uuid;
 	u64 reserved_u64_1;
@@ -2843,13 +2868,17 @@
 	u32 magic;
 	u32 md_size_sect;
 	u32 al_offset;         /* offset to this block */
-	u32 al_nr_extents;     /* important for restoring the AL */
+	u32 al_nr_extents;     /* important for restoring the AL (userspace) */
 	      /* `-- act_log->nr_elements <-- ldev->dc.al_extents */
 	u32 bm_offset;         /* offset to the bitmap, from here */
 	u32 bm_bytes_per_bit;  /* BM_BLOCK_SIZE */
 	u32 la_peer_max_bio_size;   /* last peer max_bio_size */
-	u32 reserved_u32[3];
 
+	/* see al_tr_number_to_on_disk_sector() */
+	u32 al_stripes;
+	u32 al_stripe_size_4k;
+
+	u8 reserved_u8[4096 - (7*8 + 10*4)];
 } __packed;
 
 /**
@@ -2862,6 +2891,10 @@
 	sector_t sector;
 	int i;
 
+	/* Don't accidentally change the DRBD meta data layout. */
+	BUILD_BUG_ON(UI_SIZE != 4);
+	BUILD_BUG_ON(sizeof(struct meta_data_on_disk) != 4096);
+
 	del_timer(&mdev->md_sync_timer);
 	/* timer may be rearmed by drbd_md_mark_dirty() now. */
 	if (!test_and_clear_bit(MD_DIRTY, &mdev->flags))
@@ -2876,9 +2909,9 @@
 	if (!buffer)
 		goto out;
 
-	memset(buffer, 0, 512);
+	memset(buffer, 0, sizeof(*buffer));
 
-	buffer->la_size = cpu_to_be64(drbd_get_capacity(mdev->this_bdev));
+	buffer->la_size_sect = cpu_to_be64(drbd_get_capacity(mdev->this_bdev));
 	for (i = UI_CURRENT; i < UI_SIZE; i++)
 		buffer->uuid[i] = cpu_to_be64(mdev->ldev->md.uuid[i]);
 	buffer->flags = cpu_to_be32(mdev->ldev->md.flags);
@@ -2893,7 +2926,10 @@
 	buffer->bm_offset = cpu_to_be32(mdev->ldev->md.bm_offset);
 	buffer->la_peer_max_bio_size = cpu_to_be32(mdev->peer_max_bio_size);
 
-	D_ASSERT(drbd_md_ss__(mdev, mdev->ldev) == mdev->ldev->md.md_offset);
+	buffer->al_stripes = cpu_to_be32(mdev->ldev->md.al_stripes);
+	buffer->al_stripe_size_4k = cpu_to_be32(mdev->ldev->md.al_stripe_size_4k);
+
+	D_ASSERT(drbd_md_ss(mdev->ldev) == mdev->ldev->md.md_offset);
 	sector = mdev->ldev->md.md_offset;
 
 	if (drbd_md_sync_page_io(mdev, mdev->ldev, sector, WRITE)) {
@@ -2911,13 +2947,141 @@
 	put_ldev(mdev);
 }
 
+static int check_activity_log_stripe_size(struct drbd_conf *mdev,
+		struct meta_data_on_disk *on_disk,
+		struct drbd_md *in_core)
+{
+	u32 al_stripes = be32_to_cpu(on_disk->al_stripes);
+	u32 al_stripe_size_4k = be32_to_cpu(on_disk->al_stripe_size_4k);
+	u64 al_size_4k;
+
+	/* both not set: default to old fixed size activity log */
+	if (al_stripes == 0 && al_stripe_size_4k == 0) {
+		al_stripes = 1;
+		al_stripe_size_4k = MD_32kB_SECT/8;
+	}
+
+	/* some paranoia plausibility checks */
+
+	/* we need both values to be set */
+	if (al_stripes == 0 || al_stripe_size_4k == 0)
+		goto err;
+
+	al_size_4k = (u64)al_stripes * al_stripe_size_4k;
+
+	/* Upper limit of activity log area, to avoid potential overflow
+	 * problems in al_tr_number_to_on_disk_sector(). As right now, more
+	 * than 72 * 4k blocks total only increases the amount of history,
+	 * limiting this arbitrarily to 16 GB is not a real limitation ;-)  */
+	if (al_size_4k > (16 * 1024 * 1024/4))
+		goto err;
+
+	/* Lower limit: we need at least 8 transaction slots (32kB)
+	 * to not break existing setups */
+	if (al_size_4k < MD_32kB_SECT/8)
+		goto err;
+
+	in_core->al_stripe_size_4k = al_stripe_size_4k;
+	in_core->al_stripes = al_stripes;
+	in_core->al_size_4k = al_size_4k;
+
+	return 0;
+err:
+	dev_err(DEV, "invalid activity log striping: al_stripes=%u, al_stripe_size_4k=%u\n",
+			al_stripes, al_stripe_size_4k);
+	return -EINVAL;
+}
+
+static int check_offsets_and_sizes(struct drbd_conf *mdev, struct drbd_backing_dev *bdev)
+{
+	sector_t capacity = drbd_get_capacity(bdev->md_bdev);
+	struct drbd_md *in_core = &bdev->md;
+	s32 on_disk_al_sect;
+	s32 on_disk_bm_sect;
+
+	/* The on-disk size of the activity log, calculated from offsets, and
+	 * the size of the activity log calculated from the stripe settings,
+	 * should match.
+	 * Though we could relax this a bit: it is ok, if the striped activity log
+	 * fits in the available on-disk activity log size.
+	 * Right now, that would break how resize is implemented.
+	 * TODO: make drbd_determine_dev_size() (and the drbdmeta tool) aware
+	 * of possible unused padding space in the on disk layout. */
+	if (in_core->al_offset < 0) {
+		if (in_core->bm_offset > in_core->al_offset)
+			goto err;
+		on_disk_al_sect = -in_core->al_offset;
+		on_disk_bm_sect = in_core->al_offset - in_core->bm_offset;
+	} else {
+		if (in_core->al_offset != MD_4kB_SECT)
+			goto err;
+		if (in_core->bm_offset < in_core->al_offset + in_core->al_size_4k * MD_4kB_SECT)
+			goto err;
+
+		on_disk_al_sect = in_core->bm_offset - MD_4kB_SECT;
+		on_disk_bm_sect = in_core->md_size_sect - in_core->bm_offset;
+	}
+
+	/* old fixed size meta data is exactly that: fixed. */
+	if (in_core->meta_dev_idx >= 0) {
+		if (in_core->md_size_sect != MD_128MB_SECT
+		||  in_core->al_offset != MD_4kB_SECT
+		||  in_core->bm_offset != MD_4kB_SECT + MD_32kB_SECT
+		||  in_core->al_stripes != 1
+		||  in_core->al_stripe_size_4k != MD_32kB_SECT/8)
+			goto err;
+	}
+
+	if (capacity < in_core->md_size_sect)
+		goto err;
+	if (capacity - in_core->md_size_sect < drbd_md_first_sector(bdev))
+		goto err;
+
+	/* should be aligned, and at least 32k */
+	if ((on_disk_al_sect & 7) || (on_disk_al_sect < MD_32kB_SECT))
+		goto err;
+
+	/* should fit (for now: exactly) into the available on-disk space;
+	 * overflow prevention is in check_activity_log_stripe_size() above. */
+	if (on_disk_al_sect != in_core->al_size_4k * MD_4kB_SECT)
+		goto err;
+
+	/* again, should be aligned */
+	if (in_core->bm_offset & 7)
+		goto err;
+
+	/* FIXME check for device grow with flex external meta data? */
+
+	/* can the available bitmap space cover the last agreed device size? */
+	if (on_disk_bm_sect < (in_core->la_size_sect+7)/MD_4kB_SECT/8/512)
+		goto err;
+
+	return 0;
+
+err:
+	dev_err(DEV, "meta data offsets don't make sense: idx=%d "
+			"al_s=%u, al_sz4k=%u, al_offset=%d, bm_offset=%d, "
+			"md_size_sect=%u, la_size=%llu, md_capacity=%llu\n",
+			in_core->meta_dev_idx,
+			in_core->al_stripes, in_core->al_stripe_size_4k,
+			in_core->al_offset, in_core->bm_offset, in_core->md_size_sect,
+			(unsigned long long)in_core->la_size_sect,
+			(unsigned long long)capacity);
+
+	return -EINVAL;
+}
+
+
 /**
  * drbd_md_read() - Reads in the meta data super block
  * @mdev:	DRBD device.
  * @bdev:	Device from which the meta data should be read in.
  *
- * Return 0 (NO_ERROR) on success, and an enum drbd_ret_code in case
+ * Return NO_ERROR on success, and an enum drbd_ret_code in case
  * something goes wrong.
+ *
+ * Called exactly once during drbd_adm_attach(), while still being D_DISKLESS,
+ * even before @bdev is assigned to @mdev->ldev.
  */
 int drbd_md_read(struct drbd_conf *mdev, struct drbd_backing_dev *bdev)
 {
@@ -2925,12 +3089,17 @@
 	u32 magic, flags;
 	int i, rv = NO_ERROR;
 
-	if (!get_ldev_if_state(mdev, D_ATTACHING))
-		return ERR_IO_MD_DISK;
+	if (mdev->state.disk != D_DISKLESS)
+		return ERR_DISK_CONFIGURED;
 
 	buffer = drbd_md_get_buffer(mdev);
 	if (!buffer)
-		goto out;
+		return ERR_NOMEM;
+
+	/* First, figure out where our meta data superblock is located,
+	 * and read it. */
+	bdev->md.meta_dev_idx = bdev->disk_conf->meta_dev_idx;
+	bdev->md.md_offset = drbd_md_ss(bdev);
 
 	if (drbd_md_sync_page_io(mdev, bdev, bdev->md.md_offset, READ)) {
 		/* NOTE: can't do normal error processing here as this is
@@ -2949,46 +3118,52 @@
 		rv = ERR_MD_UNCLEAN;
 		goto err;
 	}
+
+	rv = ERR_MD_INVALID;
 	if (magic != DRBD_MD_MAGIC_08) {
 		if (magic == DRBD_MD_MAGIC_07)
 			dev_err(DEV, "Found old (0.7) meta data magic. Did you \"drbdadm create-md\"?\n");
 		else
 			dev_err(DEV, "Meta data magic not found. Did you \"drbdadm create-md\"?\n");
-		rv = ERR_MD_INVALID;
-		goto err;
-	}
-	if (be32_to_cpu(buffer->al_offset) != bdev->md.al_offset) {
-		dev_err(DEV, "unexpected al_offset: %d (expected %d)\n",
-		    be32_to_cpu(buffer->al_offset), bdev->md.al_offset);
-		rv = ERR_MD_INVALID;
-		goto err;
-	}
-	if (be32_to_cpu(buffer->bm_offset) != bdev->md.bm_offset) {
-		dev_err(DEV, "unexpected bm_offset: %d (expected %d)\n",
-		    be32_to_cpu(buffer->bm_offset), bdev->md.bm_offset);
-		rv = ERR_MD_INVALID;
-		goto err;
-	}
-	if (be32_to_cpu(buffer->md_size_sect) != bdev->md.md_size_sect) {
-		dev_err(DEV, "unexpected md_size: %u (expected %u)\n",
-		    be32_to_cpu(buffer->md_size_sect), bdev->md.md_size_sect);
-		rv = ERR_MD_INVALID;
 		goto err;
 	}
 
 	if (be32_to_cpu(buffer->bm_bytes_per_bit) != BM_BLOCK_SIZE) {
 		dev_err(DEV, "unexpected bm_bytes_per_bit: %u (expected %u)\n",
 		    be32_to_cpu(buffer->bm_bytes_per_bit), BM_BLOCK_SIZE);
-		rv = ERR_MD_INVALID;
 		goto err;
 	}
 
-	bdev->md.la_size_sect = be64_to_cpu(buffer->la_size);
+
+	/* convert to in_core endian */
+	bdev->md.la_size_sect = be64_to_cpu(buffer->la_size_sect);
 	for (i = UI_CURRENT; i < UI_SIZE; i++)
 		bdev->md.uuid[i] = be64_to_cpu(buffer->uuid[i]);
 	bdev->md.flags = be32_to_cpu(buffer->flags);
 	bdev->md.device_uuid = be64_to_cpu(buffer->device_uuid);
 
+	bdev->md.md_size_sect = be32_to_cpu(buffer->md_size_sect);
+	bdev->md.al_offset = be32_to_cpu(buffer->al_offset);
+	bdev->md.bm_offset = be32_to_cpu(buffer->bm_offset);
+
+	if (check_activity_log_stripe_size(mdev, buffer, &bdev->md))
+		goto err;
+	if (check_offsets_and_sizes(mdev, bdev))
+		goto err;
+
+	if (be32_to_cpu(buffer->bm_offset) != bdev->md.bm_offset) {
+		dev_err(DEV, "unexpected bm_offset: %d (expected %d)\n",
+		    be32_to_cpu(buffer->bm_offset), bdev->md.bm_offset);
+		goto err;
+	}
+	if (be32_to_cpu(buffer->md_size_sect) != bdev->md.md_size_sect) {
+		dev_err(DEV, "unexpected md_size: %u (expected %u)\n",
+		    be32_to_cpu(buffer->md_size_sect), bdev->md.md_size_sect);
+		goto err;
+	}
+
+	rv = NO_ERROR;
+
 	spin_lock_irq(&mdev->tconn->req_lock);
 	if (mdev->state.conn < C_CONNECTED) {
 		unsigned int peer;
@@ -3000,8 +3175,6 @@
 
  err:
 	drbd_md_put_buffer(mdev);
- out:
-	put_ldev(mdev);
 
 	return rv;
 }
@@ -3239,8 +3412,12 @@
 	 * end up here after a failed attach, before ldev was even assigned.
 	 */
 	if (mdev->bitmap && mdev->ldev) {
+		/* An interrupted resync or similar is allowed to recounts bits
+		 * while we detach.
+		 * Any modifications would not be expected anymore, though.
+		 */
 		if (drbd_bitmap_io_from_worker(mdev, drbd_bm_write,
-					"detach", BM_LOCKED_MASK)) {
+					"detach", BM_LOCKED_TEST_ALLOWED)) {
 			if (test_bit(WAS_READ_ERROR, &mdev->flags)) {
 				drbd_md_set_flag(mdev, MDF_FULL_SYNC);
 				drbd_md_sync(mdev);
@@ -3252,13 +3429,6 @@
 	return 0;
 }
 
-void drbd_go_diskless(struct drbd_conf *mdev)
-{
-	D_ASSERT(mdev->state.disk == D_FAILED);
-	if (!test_and_set_bit(GO_DISKLESS, &mdev->flags))
-		drbd_queue_work(&mdev->tconn->sender_work, &mdev->go_diskless);
-}
-
 /**
  * drbd_queue_bitmap_io() - Queues an IO operation on the whole bitmap
  * @mdev:	DRBD device.
diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
index 2af26fc..9e3f441 100644
--- a/drivers/block/drbd/drbd_nl.c
+++ b/drivers/block/drbd/drbd_nl.c
@@ -696,37 +696,52 @@
 	return 0;
 }
 
-/* initializes the md.*_offset members, so we are able to find
- * the on disk meta data */
+/* Initializes the md.*_offset members, so we are able to find
+ * the on disk meta data.
+ *
+ * We currently have two possible layouts:
+ * external:
+ *   |----------- md_size_sect ------------------|
+ *   [ 4k superblock ][ activity log ][  Bitmap  ]
+ *   | al_offset == 8 |
+ *   | bm_offset = al_offset + X      |
+ *  ==> bitmap sectors = md_size_sect - bm_offset
+ *
+ * internal:
+ *            |----------- md_size_sect ------------------|
+ * [data.....][  Bitmap  ][ activity log ][ 4k superblock ]
+ *                        | al_offset < 0 |
+ *            | bm_offset = al_offset - Y |
+ *  ==> bitmap sectors = Y = al_offset - bm_offset
+ *
+ *  Activity log size used to be fixed 32kB,
+ *  but is about to become configurable.
+ */
 static void drbd_md_set_sector_offsets(struct drbd_conf *mdev,
 				       struct drbd_backing_dev *bdev)
 {
 	sector_t md_size_sect = 0;
-	int meta_dev_idx;
+	unsigned int al_size_sect = bdev->md.al_size_4k * 8;
 
-	rcu_read_lock();
-	meta_dev_idx = rcu_dereference(bdev->disk_conf)->meta_dev_idx;
+	bdev->md.md_offset = drbd_md_ss(bdev);
 
-	switch (meta_dev_idx) {
+	switch (bdev->md.meta_dev_idx) {
 	default:
 		/* v07 style fixed size indexed meta data */
-		bdev->md.md_size_sect = MD_RESERVED_SECT;
-		bdev->md.md_offset = drbd_md_ss__(mdev, bdev);
-		bdev->md.al_offset = MD_AL_OFFSET;
-		bdev->md.bm_offset = MD_BM_OFFSET;
+		bdev->md.md_size_sect = MD_128MB_SECT;
+		bdev->md.al_offset = MD_4kB_SECT;
+		bdev->md.bm_offset = MD_4kB_SECT + al_size_sect;
 		break;
 	case DRBD_MD_INDEX_FLEX_EXT:
 		/* just occupy the full device; unit: sectors */
 		bdev->md.md_size_sect = drbd_get_capacity(bdev->md_bdev);
-		bdev->md.md_offset = 0;
-		bdev->md.al_offset = MD_AL_OFFSET;
-		bdev->md.bm_offset = MD_BM_OFFSET;
+		bdev->md.al_offset = MD_4kB_SECT;
+		bdev->md.bm_offset = MD_4kB_SECT + al_size_sect;
 		break;
 	case DRBD_MD_INDEX_INTERNAL:
 	case DRBD_MD_INDEX_FLEX_INT:
-		bdev->md.md_offset = drbd_md_ss__(mdev, bdev);
 		/* al size is still fixed */
-		bdev->md.al_offset = -MD_AL_SECTORS;
+		bdev->md.al_offset = -al_size_sect;
 		/* we need (slightly less than) ~ this much bitmap sectors: */
 		md_size_sect = drbd_get_capacity(bdev->backing_bdev);
 		md_size_sect = ALIGN(md_size_sect, BM_SECT_PER_EXT);
@@ -735,14 +750,13 @@
 
 		/* plus the "drbd meta data super block",
 		 * and the activity log; */
-		md_size_sect += MD_BM_OFFSET;
+		md_size_sect += MD_4kB_SECT + al_size_sect;
 
 		bdev->md.md_size_sect = md_size_sect;
 		/* bitmap offset is adjusted by 'super' block size */
-		bdev->md.bm_offset   = -md_size_sect + MD_AL_OFFSET;
+		bdev->md.bm_offset   = -md_size_sect + MD_4kB_SECT;
 		break;
 	}
-	rcu_read_unlock();
 }
 
 /* input size is expected to be in KB */
@@ -805,7 +819,7 @@
 enum determine_dev_size drbd_determine_dev_size(struct drbd_conf *mdev, enum dds_flags flags) __must_hold(local)
 {
 	sector_t prev_first_sect, prev_size; /* previous meta location */
-	sector_t la_size, u_size;
+	sector_t la_size_sect, u_size;
 	sector_t size;
 	char ppb[10];
 
@@ -828,7 +842,7 @@
 
 	prev_first_sect = drbd_md_first_sector(mdev->ldev);
 	prev_size = mdev->ldev->md.md_size_sect;
-	la_size = mdev->ldev->md.la_size_sect;
+	la_size_sect = mdev->ldev->md.la_size_sect;
 
 	/* TODO: should only be some assert here, not (re)init... */
 	drbd_md_set_sector_offsets(mdev, mdev->ldev);
@@ -864,7 +878,7 @@
 	if (rv == dev_size_error)
 		goto out;
 
-	la_size_changed = (la_size != mdev->ldev->md.la_size_sect);
+	la_size_changed = (la_size_sect != mdev->ldev->md.la_size_sect);
 
 	md_moved = prev_first_sect != drbd_md_first_sector(mdev->ldev)
 		|| prev_size	   != mdev->ldev->md.md_size_sect;
@@ -886,9 +900,9 @@
 		drbd_md_mark_dirty(mdev);
 	}
 
-	if (size > la_size)
+	if (size > la_size_sect)
 		rv = grew;
-	if (size < la_size)
+	if (size < la_size_sect)
 		rv = shrunk;
 out:
 	lc_unlock(mdev->act_log);
@@ -903,7 +917,7 @@
 		  sector_t u_size, int assume_peer_has_space)
 {
 	sector_t p_size = mdev->p_size;   /* partner's disk size. */
-	sector_t la_size = bdev->md.la_size_sect; /* last agreed size. */
+	sector_t la_size_sect = bdev->md.la_size_sect; /* last agreed size. */
 	sector_t m_size; /* my size */
 	sector_t size = 0;
 
@@ -917,8 +931,8 @@
 	if (p_size && m_size) {
 		size = min_t(sector_t, p_size, m_size);
 	} else {
-		if (la_size) {
-			size = la_size;
+		if (la_size_sect) {
+			size = la_size_sect;
 			if (m_size && m_size < size)
 				size = m_size;
 			if (p_size && p_size < size)
@@ -1127,15 +1141,32 @@
 	return 0 != (flags & DRBD_GENL_F_SET_DEFAULTS);
 }
 
-static void enforce_disk_conf_limits(struct disk_conf *dc)
+static unsigned int drbd_al_extents_max(struct drbd_backing_dev *bdev)
 {
-	if (dc->al_extents < DRBD_AL_EXTENTS_MIN)
-		dc->al_extents = DRBD_AL_EXTENTS_MIN;
-	if (dc->al_extents > DRBD_AL_EXTENTS_MAX)
-		dc->al_extents = DRBD_AL_EXTENTS_MAX;
+	/* This is limited by 16 bit "slot" numbers,
+	 * and by available on-disk context storage.
+	 *
+	 * Also (u16)~0 is special (denotes a "free" extent).
+	 *
+	 * One transaction occupies one 4kB on-disk block,
+	 * we have n such blocks in the on disk ring buffer,
+	 * the "current" transaction may fail (n-1),
+	 * and there is 919 slot numbers context information per transaction.
+	 *
+	 * 72 transaction blocks amounts to more than 2**16 context slots,
+	 * so cap there first.
+	 */
+	const unsigned int max_al_nr = DRBD_AL_EXTENTS_MAX;
+	const unsigned int sufficient_on_disk =
+		(max_al_nr + AL_CONTEXT_PER_TRANSACTION -1)
+		/AL_CONTEXT_PER_TRANSACTION;
 
-	if (dc->c_plan_ahead > DRBD_C_PLAN_AHEAD_MAX)
-		dc->c_plan_ahead = DRBD_C_PLAN_AHEAD_MAX;
+	unsigned int al_size_4k = bdev->md.al_size_4k;
+
+	if (al_size_4k > sufficient_on_disk)
+		return max_al_nr;
+
+	return (al_size_4k - 1) * AL_CONTEXT_PER_TRANSACTION;
 }
 
 int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info)
@@ -1182,7 +1213,13 @@
 	if (!expect(new_disk_conf->resync_rate >= 1))
 		new_disk_conf->resync_rate = 1;
 
-	enforce_disk_conf_limits(new_disk_conf);
+	if (new_disk_conf->al_extents < DRBD_AL_EXTENTS_MIN)
+		new_disk_conf->al_extents = DRBD_AL_EXTENTS_MIN;
+	if (new_disk_conf->al_extents > drbd_al_extents_max(mdev->ldev))
+		new_disk_conf->al_extents = drbd_al_extents_max(mdev->ldev);
+
+	if (new_disk_conf->c_plan_ahead > DRBD_C_PLAN_AHEAD_MAX)
+		new_disk_conf->c_plan_ahead = DRBD_C_PLAN_AHEAD_MAX;
 
 	fifo_size = (new_disk_conf->c_plan_ahead * 10 * SLEEP_TIME) / HZ;
 	if (fifo_size != mdev->rs_plan_s->size) {
@@ -1330,7 +1367,8 @@
 		goto fail;
 	}
 
-	enforce_disk_conf_limits(new_disk_conf);
+	if (new_disk_conf->c_plan_ahead > DRBD_C_PLAN_AHEAD_MAX)
+		new_disk_conf->c_plan_ahead = DRBD_C_PLAN_AHEAD_MAX;
 
 	new_plan = fifo_alloc((new_disk_conf->c_plan_ahead * 10 * SLEEP_TIME) / HZ);
 	if (!new_plan) {
@@ -1343,6 +1381,12 @@
 		goto fail;
 	}
 
+	write_lock_irq(&global_state_lock);
+	retcode = drbd_resync_after_valid(mdev, new_disk_conf->resync_after);
+	write_unlock_irq(&global_state_lock);
+	if (retcode != NO_ERROR)
+		goto fail;
+
 	rcu_read_lock();
 	nc = rcu_dereference(mdev->tconn->net_conf);
 	if (nc) {
@@ -1399,8 +1443,16 @@
 		goto fail;
 	}
 
-	/* RT - for drbd_get_max_capacity() DRBD_MD_INDEX_FLEX_INT */
-	drbd_md_set_sector_offsets(mdev, nbc);
+	/* Read our meta data super block early.
+	 * This also sets other on-disk offsets. */
+	retcode = drbd_md_read(mdev, nbc);
+	if (retcode != NO_ERROR)
+		goto fail;
+
+	if (new_disk_conf->al_extents < DRBD_AL_EXTENTS_MIN)
+		new_disk_conf->al_extents = DRBD_AL_EXTENTS_MIN;
+	if (new_disk_conf->al_extents > drbd_al_extents_max(nbc))
+		new_disk_conf->al_extents = drbd_al_extents_max(nbc);
 
 	if (drbd_get_max_capacity(nbc) < new_disk_conf->disk_size) {
 		dev_err(DEV, "max capacity %llu smaller than disk size %llu\n",
@@ -1416,7 +1468,7 @@
 		min_md_device_sectors = (2<<10);
 	} else {
 		max_possible_sectors = DRBD_MAX_SECTORS;
-		min_md_device_sectors = MD_RESERVED_SECT * (new_disk_conf->meta_dev_idx + 1);
+		min_md_device_sectors = MD_128MB_SECT * (new_disk_conf->meta_dev_idx + 1);
 	}
 
 	if (drbd_get_capacity(nbc->md_bdev) < min_md_device_sectors) {
@@ -1467,8 +1519,6 @@
 	if (!get_ldev_if_state(mdev, D_ATTACHING))
 		goto force_diskless;
 
-	drbd_md_set_sector_offsets(mdev, nbc);
-
 	if (!mdev->bitmap) {
 		if (drbd_bm_init(mdev)) {
 			retcode = ERR_NOMEM;
@@ -1476,10 +1526,6 @@
 		}
 	}
 
-	retcode = drbd_md_read(mdev, nbc);
-	if (retcode != NO_ERROR)
-		goto force_diskless_dec;
-
 	if (mdev->state.conn < C_CONNECTED &&
 	    mdev->state.role == R_PRIMARY &&
 	    (mdev->ed_uuid & ~((u64)1)) != (nbc->md.uuid[UI_CURRENT] & ~((u64)1))) {
@@ -2158,8 +2204,11 @@
 		return SS_SUCCESS;
 	case SS_PRIMARY_NOP:
 		/* Our state checking code wants to see the peer outdated. */
-		rv = conn_request_state(tconn, NS2(conn, C_DISCONNECTING,
-						pdsk, D_OUTDATED), CS_VERBOSE);
+		rv = conn_request_state(tconn, NS2(conn, C_DISCONNECTING, pdsk, D_OUTDATED), 0);
+
+		if (rv == SS_OUTDATE_WO_CONN) /* lost connection before graceful disconnect succeeded */
+			rv = conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_VERBOSE);
+
 		break;
 	case SS_CW_FAILED_BY_PEER:
 		/* The peer probably wants to see us outdated. */
@@ -2406,22 +2455,19 @@
 	wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
 	drbd_flush_workqueue(mdev);
 
-	retcode = _drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T), CS_ORDERED);
-
-	if (retcode < SS_SUCCESS && retcode != SS_NEED_CONNECTION)
+	/* If we happen to be C_STANDALONE R_SECONDARY, just change to
+	 * D_INCONSISTENT, and set all bits in the bitmap.  Otherwise,
+	 * try to start a resync handshake as sync target for full sync.
+	 */
+	if (mdev->state.conn == C_STANDALONE && mdev->state.role == R_SECONDARY) {
+		retcode = drbd_request_state(mdev, NS(disk, D_INCONSISTENT));
+		if (retcode >= SS_SUCCESS) {
+			if (drbd_bitmap_io(mdev, &drbd_bmio_set_n_write,
+				"set_n_write from invalidate", BM_LOCKED_MASK))
+				retcode = ERR_IO_MD_DISK;
+		}
+	} else
 		retcode = drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T));
-
-	while (retcode == SS_NEED_CONNECTION) {
-		spin_lock_irq(&mdev->tconn->req_lock);
-		if (mdev->state.conn < C_CONNECTED)
-			retcode = _drbd_set_state(_NS(mdev, disk, D_INCONSISTENT), CS_VERBOSE, NULL);
-		spin_unlock_irq(&mdev->tconn->req_lock);
-
-		if (retcode != SS_NEED_CONNECTION)
-			break;
-
-		retcode = drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T));
-	}
 	drbd_resume_io(mdev);
 
 out:
@@ -2475,21 +2521,22 @@
 	wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
 	drbd_flush_workqueue(mdev);
 
-	retcode = _drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_S), CS_ORDERED);
-	if (retcode < SS_SUCCESS) {
-		if (retcode == SS_NEED_CONNECTION && mdev->state.role == R_PRIMARY) {
-			/* The peer will get a resync upon connect anyways.
-			 * Just make that into a full resync. */
-			retcode = drbd_request_state(mdev, NS(pdsk, D_INCONSISTENT));
-			if (retcode >= SS_SUCCESS) {
-				if (drbd_bitmap_io(mdev, &drbd_bmio_set_susp_al,
-						   "set_n_write from invalidate_peer",
-						   BM_LOCKED_SET_ALLOWED))
-					retcode = ERR_IO_MD_DISK;
-			}
-		} else
-			retcode = drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_S));
-	}
+	/* If we happen to be C_STANDALONE R_PRIMARY, just set all bits
+	 * in the bitmap.  Otherwise, try to start a resync handshake
+	 * as sync source for full sync.
+	 */
+	if (mdev->state.conn == C_STANDALONE && mdev->state.role == R_PRIMARY) {
+		/* The peer will get a resync upon connect anyways. Just make that
+		   into a full resync. */
+		retcode = drbd_request_state(mdev, NS(pdsk, D_INCONSISTENT));
+		if (retcode >= SS_SUCCESS) {
+			if (drbd_bitmap_io(mdev, &drbd_bmio_set_susp_al,
+				"set_n_write from invalidate_peer",
+				BM_LOCKED_SET_ALLOWED))
+				retcode = ERR_IO_MD_DISK;
+		}
+	} else
+		retcode = drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_S));
 	drbd_resume_io(mdev);
 
 out:
@@ -3162,6 +3209,7 @@
 				    CS_VERBOSE + CS_WAIT_COMPLETE);
 		idr_remove(&mdev->tconn->volumes, mdev->vnr);
 		idr_remove(&minors, mdev_to_minor(mdev));
+		destroy_workqueue(mdev->submit.wq);
 		del_gendisk(mdev->vdisk);
 		synchronize_rcu();
 		kref_put(&mdev->kref, &drbd_minor_destroy);
diff --git a/drivers/block/drbd/drbd_proc.c b/drivers/block/drbd/drbd_proc.c
index 928adb8..bf31d41 100644
--- a/drivers/block/drbd/drbd_proc.c
+++ b/drivers/block/drbd/drbd_proc.c
@@ -313,8 +313,14 @@
 
 static int drbd_proc_open(struct inode *inode, struct file *file)
 {
-	if (try_module_get(THIS_MODULE))
-		return single_open(file, drbd_seq_show, PDE_DATA(inode));
+	int err;
+
+	if (try_module_get(THIS_MODULE)) {
+		err = single_open(file, drbd_seq_show, PDE_DATA(inode));
+		if (err)
+			module_put(THIS_MODULE);
+		return err;
+	}
 	return -ENODEV;
 }
 
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index 83c5ae0..4222aff 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -850,6 +850,7 @@
 		err = drbd_send_current_state(mdev);
 	clear_bit(USE_DEGR_WFC_T, &mdev->flags);
 	clear_bit(RESIZE_PENDING, &mdev->flags);
+	atomic_set(&mdev->ap_in_flight, 0);
 	mod_timer(&mdev->request_timer, jiffies + HZ); /* just start it here. */
 	return err;
 }
@@ -2266,7 +2267,7 @@
 		drbd_set_out_of_sync(mdev, peer_req->i.sector, peer_req->i.size);
 		peer_req->flags |= EE_CALL_AL_COMPLETE_IO;
 		peer_req->flags &= ~EE_MAY_SET_IN_SYNC;
-		drbd_al_begin_io(mdev, &peer_req->i);
+		drbd_al_begin_io(mdev, &peer_req->i, true);
 	}
 
 	err = drbd_submit_peer_request(mdev, peer_req, rw, DRBD_FAULT_DT_WR);
@@ -2662,7 +2663,6 @@
 		if (hg == -1 && mdev->state.role == R_PRIMARY) {
 			enum drbd_state_rv rv2;
 
-			drbd_set_role(mdev, R_SECONDARY, 0);
 			 /* drbd_change_state() does not sleep while in SS_IN_TRANSIENT_STATE,
 			  * we might be here in C_WF_REPORT_PARAMS which is transient.
 			  * we do not need to wait for the after state change work either. */
@@ -3993,7 +3993,7 @@
 
 	clear_bit(DISCARD_MY_DATA, &mdev->flags);
 
-	drbd_md_sync(mdev); /* update connected indicator, la_size, ... */
+	drbd_md_sync(mdev); /* update connected indicator, la_size_sect, ... */
 
 	return 0;
 }
@@ -4660,8 +4660,8 @@
 #if !defined(CONFIG_CRYPTO_HMAC) && !defined(CONFIG_CRYPTO_HMAC_MODULE)
 static int drbd_do_auth(struct drbd_tconn *tconn)
 {
-	dev_err(DEV, "This kernel was build without CONFIG_CRYPTO_HMAC.\n");
-	dev_err(DEV, "You need to disable 'cram-hmac-alg' in drbd.conf.\n");
+	conn_err(tconn, "This kernel was build without CONFIG_CRYPTO_HMAC.\n");
+	conn_err(tconn, "You need to disable 'cram-hmac-alg' in drbd.conf.\n");
 	return -1;
 }
 #else
@@ -5258,9 +5258,11 @@
 	bool ping_timeout_active = false;
 	struct net_conf *nc;
 	int ping_timeo, tcp_cork, ping_int;
+	struct sched_param param = { .sched_priority = 2 };
 
-	current->policy = SCHED_RR;  /* Make this a realtime task! */
-	current->rt_priority = 2;    /* more important than all other tasks */
+	rv = sched_setscheduler(current, SCHED_RR, &param);
+	if (rv < 0)
+		conn_err(tconn, "drbd_asender: ERROR set priority, ret=%d\n", rv);
 
 	while (get_t_state(thi) == RUNNING) {
 		drbd_thread_current_set_cpu(thi);
diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c
index 2b8303a..c24379f 100644
--- a/drivers/block/drbd/drbd_req.c
+++ b/drivers/block/drbd/drbd_req.c
@@ -34,14 +34,14 @@
 static bool drbd_may_do_local_read(struct drbd_conf *mdev, sector_t sector, int size);
 
 /* Update disk stats at start of I/O request */
-static void _drbd_start_io_acct(struct drbd_conf *mdev, struct drbd_request *req, struct bio *bio)
+static void _drbd_start_io_acct(struct drbd_conf *mdev, struct drbd_request *req)
 {
-	const int rw = bio_data_dir(bio);
+	const int rw = bio_data_dir(req->master_bio);
 	int cpu;
 	cpu = part_stat_lock();
 	part_round_stats(cpu, &mdev->vdisk->part0);
 	part_stat_inc(cpu, &mdev->vdisk->part0, ios[rw]);
-	part_stat_add(cpu, &mdev->vdisk->part0, sectors[rw], bio_sectors(bio));
+	part_stat_add(cpu, &mdev->vdisk->part0, sectors[rw], req->i.size >> 9);
 	(void) cpu; /* The macro invocations above want the cpu argument, I do not like
 		       the compiler warning about cpu only assigned but never used... */
 	part_inc_in_flight(&mdev->vdisk->part0, rw);
@@ -263,8 +263,7 @@
 		else
 			root = &mdev->read_requests;
 		drbd_remove_request_interval(root, req);
-	} else if (!(s & RQ_POSTPONED))
-		D_ASSERT((s & (RQ_NET_MASK & ~RQ_NET_DONE)) == 0);
+	}
 
 	/* Before we can signal completion to the upper layers,
 	 * we may need to close the current transfer log epoch.
@@ -755,6 +754,11 @@
 		D_ASSERT(req->rq_state & RQ_NET_PENDING);
 		mod_rq_state(req, m, RQ_NET_PENDING, RQ_NET_OK|RQ_NET_DONE);
 		break;
+
+	case QUEUE_AS_DRBD_BARRIER:
+		start_new_tl_epoch(mdev->tconn);
+		mod_rq_state(req, m, 0, RQ_NET_OK|RQ_NET_DONE);
+		break;
 	};
 
 	return rv;
@@ -861,8 +865,10 @@
 	bool congested = false;
 	enum drbd_on_congestion on_congestion;
 
+	rcu_read_lock();
 	nc = rcu_dereference(tconn->net_conf);
 	on_congestion = nc ? nc->on_congestion : OC_BLOCK;
+	rcu_read_unlock();
 	if (on_congestion == OC_BLOCK ||
 	    tconn->agreed_pro_version < 96)
 		return;
@@ -956,14 +962,8 @@
 	struct drbd_conf *mdev = req->w.mdev;
 	int remote, send_oos;
 
-	rcu_read_lock();
 	remote = drbd_should_do_remote(mdev->state);
-	if (remote) {
-		maybe_pull_ahead(mdev);
-		remote = drbd_should_do_remote(mdev->state);
-	}
 	send_oos = drbd_should_send_out_of_sync(mdev->state);
-	rcu_read_unlock();
 
 	/* Need to replicate writes.  Unless it is an empty flush,
 	 * which is better mapped to a DRBD P_BARRIER packet,
@@ -975,8 +975,8 @@
 		/* The only size==0 bios we expect are empty flushes. */
 		D_ASSERT(req->master_bio->bi_rw & REQ_FLUSH);
 		if (remote)
-			start_new_tl_epoch(mdev->tconn);
-		return 0;
+			_req_mod(req, QUEUE_AS_DRBD_BARRIER);
+		return remote;
 	}
 
 	if (!remote && !send_oos)
@@ -1020,12 +1020,24 @@
 		bio_endio(bio, -EIO);
 }
 
-void __drbd_make_request(struct drbd_conf *mdev, struct bio *bio, unsigned long start_time)
+static void drbd_queue_write(struct drbd_conf *mdev, struct drbd_request *req)
 {
-	const int rw = bio_rw(bio);
-	struct bio_and_error m = { NULL, };
+	spin_lock(&mdev->submit.lock);
+	list_add_tail(&req->tl_requests, &mdev->submit.writes);
+	spin_unlock(&mdev->submit.lock);
+	queue_work(mdev->submit.wq, &mdev->submit.worker);
+}
+
+/* returns the new drbd_request pointer, if the caller is expected to
+ * drbd_send_and_submit() it (to save latency), or NULL if we queued the
+ * request on the submitter thread.
+ * Returns ERR_PTR(-ENOMEM) if we cannot allocate a drbd_request.
+ */
+struct drbd_request *
+drbd_request_prepare(struct drbd_conf *mdev, struct bio *bio, unsigned long start_time)
+{
+	const int rw = bio_data_dir(bio);
 	struct drbd_request *req;
-	bool no_remote = false;
 
 	/* allocate outside of all locks; */
 	req = drbd_req_new(mdev, bio);
@@ -1035,7 +1047,7 @@
 		 * if user cannot handle io errors, that's not our business. */
 		dev_err(DEV, "could not kmalloc() req\n");
 		bio_endio(bio, -ENOMEM);
-		return;
+		return ERR_PTR(-ENOMEM);
 	}
 	req->start_time = start_time;
 
@@ -1044,28 +1056,40 @@
 		req->private_bio = NULL;
 	}
 
-	/* For WRITES going to the local disk, grab a reference on the target
-	 * extent.  This waits for any resync activity in the corresponding
-	 * resync extent to finish, and, if necessary, pulls in the target
-	 * extent into the activity log, which involves further disk io because
-	 * of transactional on-disk meta data updates.
-	 * Empty flushes don't need to go into the activity log, they can only
-	 * flush data for pending writes which are already in there. */
+	/* Update disk stats */
+	_drbd_start_io_acct(mdev, req);
+
 	if (rw == WRITE && req->private_bio && req->i.size
 	&& !test_bit(AL_SUSPENDED, &mdev->flags)) {
+		if (!drbd_al_begin_io_fastpath(mdev, &req->i)) {
+			drbd_queue_write(mdev, req);
+			return NULL;
+		}
 		req->rq_state |= RQ_IN_ACT_LOG;
-		drbd_al_begin_io(mdev, &req->i);
 	}
 
+	return req;
+}
+
+static void drbd_send_and_submit(struct drbd_conf *mdev, struct drbd_request *req)
+{
+	const int rw = bio_rw(req->master_bio);
+	struct bio_and_error m = { NULL, };
+	bool no_remote = false;
+
 	spin_lock_irq(&mdev->tconn->req_lock);
 	if (rw == WRITE) {
 		/* This may temporarily give up the req_lock,
 		 * but will re-aquire it before it returns here.
 		 * Needs to be before the check on drbd_suspended() */
 		complete_conflicting_writes(req);
+		/* no more giving up req_lock from now on! */
+
+		/* check for congestion, and potentially stop sending
+		 * full data updates, but start sending "dirty bits" only. */
+		maybe_pull_ahead(mdev);
 	}
 
-	/* no more giving up req_lock from now on! */
 
 	if (drbd_suspended(mdev)) {
 		/* push back and retry: */
@@ -1078,9 +1102,6 @@
 		goto out;
 	}
 
-	/* Update disk stats */
-	_drbd_start_io_acct(mdev, req, bio);
-
 	/* We fail READ/READA early, if we can not serve it.
 	 * We must do this before req is registered on any lists.
 	 * Otherwise, drbd_req_complete() will queue failed READ for retry. */
@@ -1137,7 +1158,116 @@
 
 	if (m.bio)
 		complete_master_bio(mdev, &m);
-	return;
+}
+
+void __drbd_make_request(struct drbd_conf *mdev, struct bio *bio, unsigned long start_time)
+{
+	struct drbd_request *req = drbd_request_prepare(mdev, bio, start_time);
+	if (IS_ERR_OR_NULL(req))
+		return;
+	drbd_send_and_submit(mdev, req);
+}
+
+static void submit_fast_path(struct drbd_conf *mdev, struct list_head *incoming)
+{
+	struct drbd_request *req, *tmp;
+	list_for_each_entry_safe(req, tmp, incoming, tl_requests) {
+		const int rw = bio_data_dir(req->master_bio);
+
+		if (rw == WRITE /* rw != WRITE should not even end up here! */
+		&& req->private_bio && req->i.size
+		&& !test_bit(AL_SUSPENDED, &mdev->flags)) {
+			if (!drbd_al_begin_io_fastpath(mdev, &req->i))
+				continue;
+
+			req->rq_state |= RQ_IN_ACT_LOG;
+		}
+
+		list_del_init(&req->tl_requests);
+		drbd_send_and_submit(mdev, req);
+	}
+}
+
+static bool prepare_al_transaction_nonblock(struct drbd_conf *mdev,
+					    struct list_head *incoming,
+					    struct list_head *pending)
+{
+	struct drbd_request *req, *tmp;
+	int wake = 0;
+	int err;
+
+	spin_lock_irq(&mdev->al_lock);
+	list_for_each_entry_safe(req, tmp, incoming, tl_requests) {
+		err = drbd_al_begin_io_nonblock(mdev, &req->i);
+		if (err == -EBUSY)
+			wake = 1;
+		if (err)
+			continue;
+		req->rq_state |= RQ_IN_ACT_LOG;
+		list_move_tail(&req->tl_requests, pending);
+	}
+	spin_unlock_irq(&mdev->al_lock);
+	if (wake)
+		wake_up(&mdev->al_wait);
+
+	return !list_empty(pending);
+}
+
+void do_submit(struct work_struct *ws)
+{
+	struct drbd_conf *mdev = container_of(ws, struct drbd_conf, submit.worker);
+	LIST_HEAD(incoming);
+	LIST_HEAD(pending);
+	struct drbd_request *req, *tmp;
+
+	for (;;) {
+		spin_lock(&mdev->submit.lock);
+		list_splice_tail_init(&mdev->submit.writes, &incoming);
+		spin_unlock(&mdev->submit.lock);
+
+		submit_fast_path(mdev, &incoming);
+		if (list_empty(&incoming))
+			break;
+
+		wait_event(mdev->al_wait, prepare_al_transaction_nonblock(mdev, &incoming, &pending));
+		/* Maybe more was queued, while we prepared the transaction?
+		 * Try to stuff them into this transaction as well.
+		 * Be strictly non-blocking here, no wait_event, we already
+		 * have something to commit.
+		 * Stop if we don't make any more progres.
+		 */
+		for (;;) {
+			LIST_HEAD(more_pending);
+			LIST_HEAD(more_incoming);
+			bool made_progress;
+
+			/* It is ok to look outside the lock,
+			 * it's only an optimization anyways */
+			if (list_empty(&mdev->submit.writes))
+				break;
+
+			spin_lock(&mdev->submit.lock);
+			list_splice_tail_init(&mdev->submit.writes, &more_incoming);
+			spin_unlock(&mdev->submit.lock);
+
+			if (list_empty(&more_incoming))
+				break;
+
+			made_progress = prepare_al_transaction_nonblock(mdev, &more_incoming, &more_pending);
+
+			list_splice_tail_init(&more_pending, &pending);
+			list_splice_tail_init(&more_incoming, &incoming);
+
+			if (!made_progress)
+				break;
+		}
+		drbd_al_begin_io_commit(mdev, false);
+
+		list_for_each_entry_safe(req, tmp, &pending, tl_requests) {
+			list_del_init(&req->tl_requests);
+			drbd_send_and_submit(mdev, req);
+		}
+	}
 }
 
 void drbd_make_request(struct request_queue *q, struct bio *bio)
diff --git a/drivers/block/drbd/drbd_req.h b/drivers/block/drbd/drbd_req.h
index c08d229..978cb1a 100644
--- a/drivers/block/drbd/drbd_req.h
+++ b/drivers/block/drbd/drbd_req.h
@@ -88,6 +88,14 @@
 	QUEUE_FOR_NET_READ,
 	QUEUE_FOR_SEND_OOS,
 
+	/* An empty flush is queued as P_BARRIER,
+	 * which will cause it to complete "successfully",
+	 * even if the local disk flush failed.
+	 *
+	 * Just like "real" requests, empty flushes (blkdev_issue_flush()) will
+	 * only see an error if neither local nor remote data is reachable. */
+	QUEUE_AS_DRBD_BARRIER,
+
 	SEND_CANCELED,
 	SEND_FAILED,
 	HANDED_OVER_TO_NETWORK,
diff --git a/drivers/block/drbd/drbd_state.c b/drivers/block/drbd/drbd_state.c
index 0fe220c..90c5be2 100644
--- a/drivers/block/drbd/drbd_state.c
+++ b/drivers/block/drbd/drbd_state.c
@@ -570,6 +570,13 @@
 		  mdev->tconn->agreed_pro_version < 88)
 		rv = SS_NOT_SUPPORTED;
 
+	else if (ns.role == R_PRIMARY && ns.disk < D_UP_TO_DATE && ns.pdsk < D_UP_TO_DATE)
+		rv = SS_NO_UP_TO_DATE_DISK;
+
+	else if ((ns.conn == C_STARTING_SYNC_S || ns.conn == C_STARTING_SYNC_T) &&
+                 ns.pdsk == D_UNKNOWN)
+		rv = SS_NEED_CONNECTION;
+
 	else if (ns.conn >= C_CONNECTED && ns.pdsk == D_UNKNOWN)
 		rv = SS_CONNECTED_OUTDATES;
 
@@ -635,6 +642,10 @@
 	    && os.conn < C_WF_REPORT_PARAMS)
 		rv = SS_NEED_CONNECTION; /* No NetworkFailure -> SyncTarget etc... */
 
+	if (ns.conn == C_DISCONNECTING && ns.pdsk == D_OUTDATED &&
+	    os.conn < C_CONNECTED && os.pdsk > D_OUTDATED)
+		rv = SS_OUTDATE_WO_CONN;
+
 	return rv;
 }
 
@@ -1377,13 +1388,6 @@
 			&drbd_bmio_set_n_write, &abw_start_sync,
 			"set_n_write from StartingSync", BM_LOCKED_TEST_ALLOWED);
 
-	/* We are invalidating our self... */
-	if (os.conn < C_CONNECTED && ns.conn < C_CONNECTED &&
-	    os.disk > D_INCONSISTENT && ns.disk == D_INCONSISTENT)
-		/* other bitmap operation expected during this phase */
-		drbd_queue_bitmap_io(mdev, &drbd_bmio_set_n_write, NULL,
-			"set_n_write from invalidate", BM_LOCKED_MASK);
-
 	/* first half of local IO error, failure to attach,
 	 * or administrative detach */
 	if (os.disk != D_FAILED && ns.disk == D_FAILED) {
@@ -1748,13 +1752,9 @@
 	if (test_and_clear_bit(CONN_WD_ST_CHG_FAIL, &tconn->flags))
 		return SS_CW_FAILED_BY_PEER;
 
-	rv = tconn->cstate != C_WF_REPORT_PARAMS ? SS_CW_NO_NEED : SS_UNKNOWN_ERROR;
-
-	if (rv == SS_UNKNOWN_ERROR)
-		rv = conn_is_valid_transition(tconn, mask, val, 0);
-
-	if (rv == SS_SUCCESS)
-		rv = SS_UNKNOWN_ERROR; /* cont waiting, otherwise fail. */
+	rv = conn_is_valid_transition(tconn, mask, val, 0);
+	if (rv == SS_SUCCESS && tconn->cstate == C_WF_REPORT_PARAMS)
+		rv = SS_UNKNOWN_ERROR; /* continue waiting */
 
 	return rv;
 }
diff --git a/drivers/block/drbd/drbd_strings.c b/drivers/block/drbd/drbd_strings.c
index 9a664bd..58e08ff 100644
--- a/drivers/block/drbd/drbd_strings.c
+++ b/drivers/block/drbd/drbd_strings.c
@@ -89,6 +89,7 @@
 	[-SS_LOWER_THAN_OUTDATED] = "Disk state is lower than outdated",
 	[-SS_IN_TRANSIENT_STATE] = "In transient state, retry after next state change",
 	[-SS_CONCURRENT_ST_CHG] = "Concurrent state changes detected and aborted",
+	[-SS_OUTDATE_WO_CONN] = "Need a connection for a graceful disconnect/outdate peer",
 	[-SS_O_VOL_PEER_PRI] = "Other vol primary on peer not allowed by config",
 };
 
diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c
index 424dc7b..891c0ec 100644
--- a/drivers/block/drbd/drbd_worker.c
+++ b/drivers/block/drbd/drbd_worker.c
@@ -89,7 +89,8 @@
 	md_io->done = 1;
 	wake_up(&mdev->misc_wait);
 	bio_put(bio);
-	put_ldev(mdev);
+	if (mdev->ldev) /* special case: drbd_md_read() during drbd_adm_attach() */
+		put_ldev(mdev);
 }
 
 /* reads on behalf of the partner,
@@ -1410,7 +1411,7 @@
 	struct drbd_conf *mdev = w->mdev;
 
 	if (bio_data_dir(req->master_bio) == WRITE && req->rq_state & RQ_IN_ACT_LOG)
-		drbd_al_begin_io(mdev, &req->i);
+		drbd_al_begin_io(mdev, &req->i, false);
 
 	drbd_req_make_private_bio(req, req->master_bio);
 	req->private_bio->bi_bdev = mdev->ldev->backing_bdev;
@@ -1425,7 +1426,7 @@
 	int resync_after;
 
 	while (1) {
-		if (!odev->ldev)
+		if (!odev->ldev || odev->state.disk == D_DISKLESS)
 			return 1;
 		rcu_read_lock();
 		resync_after = rcu_dereference(odev->ldev->disk_conf)->resync_after;
@@ -1433,7 +1434,7 @@
 		if (resync_after == -1)
 			return 1;
 		odev = minor_to_mdev(resync_after);
-		if (!expect(odev))
+		if (!odev)
 			return 1;
 		if ((odev->state.conn >= C_SYNC_SOURCE &&
 		     odev->state.conn <= C_PAUSED_SYNC_T) ||
@@ -1515,7 +1516,7 @@
 
 	if (o_minor == -1)
 		return NO_ERROR;
-	if (o_minor < -1 || minor_to_mdev(o_minor) == NULL)
+	if (o_minor < -1 || o_minor > MINORMASK)
 		return ERR_RESYNC_AFTER;
 
 	/* check for loops */
@@ -1524,6 +1525,15 @@
 		if (odev == mdev)
 			return ERR_RESYNC_AFTER_CYCLE;
 
+		/* You are free to depend on diskless, non-existing,
+		 * or not yet/no longer existing minors.
+		 * We only reject dependency loops.
+		 * We cannot follow the dependency chain beyond a detached or
+		 * missing minor.
+		 */
+		if (!odev || !odev->ldev || odev->state.disk == D_DISKLESS)
+			return NO_ERROR;
+
 		rcu_read_lock();
 		resync_after = rcu_dereference(odev->ldev->disk_conf)->resync_after;
 		rcu_read_unlock();
@@ -1652,7 +1662,9 @@
 	clear_bit(B_RS_H_DONE, &mdev->flags);
 
 	write_lock_irq(&global_state_lock);
-	if (!get_ldev_if_state(mdev, D_NEGOTIATING)) {
+	/* Did some connection breakage or IO error race with us? */
+	if (mdev->state.conn < C_CONNECTED
+	|| !get_ldev_if_state(mdev, D_NEGOTIATING)) {
 		write_unlock_irq(&global_state_lock);
 		mutex_unlock(mdev->state_mutex);
 		return;
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index 2ddd64a..04ceb7e 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -3601,7 +3601,7 @@
 		pr_cont("\n");
 }
 
-static int floppy_release(struct gendisk *disk, fmode_t mode)
+static void floppy_release(struct gendisk *disk, fmode_t mode)
 {
 	int drive = (long)disk->private_data;
 
@@ -3615,8 +3615,6 @@
 		opened_bdev[drive] = NULL;
 	mutex_unlock(&open_lock);
 	mutex_unlock(&floppy_mutex);
-
-	return 0;
 }
 
 /*
@@ -3777,7 +3775,6 @@
 	bio_vec.bv_len = size;
 	bio_vec.bv_offset = 0;
 	bio.bi_vcnt = 1;
-	bio.bi_idx = 0;
 	bio.bi_size = size;
 	bio.bi_bdev = bdev;
 	bio.bi_sector = 0;
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index b2955b3..d92d50f 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -1518,7 +1518,7 @@
 	return err;
 }
 
-static int lo_release(struct gendisk *disk, fmode_t mode)
+static void lo_release(struct gendisk *disk, fmode_t mode)
 {
 	struct loop_device *lo = disk->private_data;
 	int err;
@@ -1535,7 +1535,7 @@
 		 */
 		err = loop_clr_fd(lo);
 		if (!err)
-			goto out_unlocked;
+			return;
 	} else {
 		/*
 		 * Otherwise keep thread (if running) and config,
@@ -1546,8 +1546,6 @@
 
 out:
 	mutex_unlock(&lo->lo_ctl_mutex);
-out_unlocked:
-	return 0;
 }
 
 static const struct block_device_operations lo_fops = {
diff --git a/drivers/block/mg_disk.c b/drivers/block/mg_disk.c
index 076ae7f..a56cfcd 100644
--- a/drivers/block/mg_disk.c
+++ b/drivers/block/mg_disk.c
@@ -780,6 +780,7 @@
 	.getgeo = mg_getgeo
 };
 
+#ifdef CONFIG_PM_SLEEP
 static int mg_suspend(struct device *dev)
 {
 	struct mg_drv_data *prv_data = dev->platform_data;
@@ -824,6 +825,7 @@
 
 	return 0;
 }
+#endif
 
 static SIMPLE_DEV_PM_OPS(mg_pm, mg_suspend, mg_resume);
 
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c
index 32c6780..847107e 100644
--- a/drivers/block/mtip32xx/mtip32xx.c
+++ b/drivers/block/mtip32xx/mtip32xx.c
@@ -728,7 +728,10 @@
 	atomic_set(&port->commands[tag].active, 0);
 	release_slot(port, tag);
 
-	up(&port->cmd_slot);
+	if (unlikely(command->unaligned))
+		up(&port->cmd_slot_unal);
+	else
+		up(&port->cmd_slot);
 }
 
 /*
@@ -1560,10 +1563,12 @@
 	}
 #endif
 
+#ifdef MTIP_TRIM /* Disabling TRIM support temporarily */
 	/* Demux ID.DRAT & ID.RZAT to determine trim support */
 	if (port->identify[69] & (1 << 14) && port->identify[69] & (1 << 5))
 		port->dd->trim_supp = true;
 	else
+#endif
 		port->dd->trim_supp = false;
 
 	/* Set the identify buffer as valid. */
@@ -2557,7 +2562,7 @@
  */
 static void mtip_hw_submit_io(struct driver_data *dd, sector_t sector,
 			      int nsect, int nents, int tag, void *callback,
-			      void *data, int dir)
+			      void *data, int dir, int unaligned)
 {
 	struct host_to_dev_fis	*fis;
 	struct mtip_port *port = dd->port;
@@ -2570,6 +2575,7 @@
 
 	command->scatter_ents = nents;
 
+	command->unaligned = unaligned;
 	/*
 	 * The number of retries for this command before it is
 	 * reported as a failure to the upper layers.
@@ -2598,6 +2604,9 @@
 	fis->res3        = 0;
 	fill_command_sg(dd, command, nents);
 
+	if (unaligned)
+		fis->device |= 1 << 7;
+
 	/* Populate the command header */
 	command->command_header->opts =
 			__force_bit2int cpu_to_le32(
@@ -2644,9 +2653,13 @@
  * return value
  *      None
  */
-static void mtip_hw_release_scatterlist(struct driver_data *dd, int tag)
+static void mtip_hw_release_scatterlist(struct driver_data *dd, int tag,
+								int unaligned)
 {
+	struct semaphore *sem = unaligned ? &dd->port->cmd_slot_unal :
+							&dd->port->cmd_slot;
 	release_slot(dd->port, tag);
+	up(sem);
 }
 
 /*
@@ -2661,22 +2674,25 @@
  *	or NULL if no command slots are available.
  */
 static struct scatterlist *mtip_hw_get_scatterlist(struct driver_data *dd,
-						   int *tag)
+						   int *tag, int unaligned)
 {
+	struct semaphore *sem = unaligned ? &dd->port->cmd_slot_unal :
+							&dd->port->cmd_slot;
+
 	/*
 	 * It is possible that, even with this semaphore, a thread
 	 * may think that no command slots are available. Therefore, we
 	 * need to make an attempt to get_slot().
 	 */
-	down(&dd->port->cmd_slot);
+	down(sem);
 	*tag = get_slot(dd->port);
 
 	if (unlikely(test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag))) {
-		up(&dd->port->cmd_slot);
+		up(sem);
 		return NULL;
 	}
 	if (unlikely(*tag < 0)) {
-		up(&dd->port->cmd_slot);
+		up(sem);
 		return NULL;
 	}
 
@@ -3010,6 +3026,11 @@
 		dd->mmio + HOST_HSORG);
 }
 
+static int mtip_device_unaligned_constrained(struct driver_data *dd)
+{
+	return (dd->pdev->device == P420M_DEVICE_ID ? 1 : 0);
+}
+
 /*
  * Detect the details of the product, and store anything needed
  * into the driver data structure.  This includes product type and
@@ -3232,8 +3253,15 @@
 	for (i = 0; i < MTIP_MAX_SLOT_GROUPS; i++)
 		dd->work[i].port = dd->port;
 
+	/* Enable unaligned IO constraints for some devices */
+	if (mtip_device_unaligned_constrained(dd))
+		dd->unal_qdepth = MTIP_MAX_UNALIGNED_SLOTS;
+	else
+		dd->unal_qdepth = 0;
+
 	/* Counting semaphore to track command slot usage */
-	sema_init(&dd->port->cmd_slot, num_command_slots - 1);
+	sema_init(&dd->port->cmd_slot, num_command_slots - 1 - dd->unal_qdepth);
+	sema_init(&dd->port->cmd_slot_unal, dd->unal_qdepth);
 
 	/* Spinlock to prevent concurrent issue */
 	for (i = 0; i < MTIP_MAX_SLOT_GROUPS; i++)
@@ -3836,7 +3864,7 @@
 	struct scatterlist *sg;
 	struct bio_vec *bvec;
 	int nents = 0;
-	int tag = 0;
+	int tag = 0, unaligned = 0;
 
 	if (unlikely(dd->dd_flag & MTIP_DDF_STOP_IO)) {
 		if (unlikely(test_bit(MTIP_DDF_REMOVE_PENDING_BIT,
@@ -3872,7 +3900,15 @@
 		return;
 	}
 
-	sg = mtip_hw_get_scatterlist(dd, &tag);
+	if (bio_data_dir(bio) == WRITE && bio_sectors(bio) <= 64 &&
+							dd->unal_qdepth) {
+		if (bio->bi_sector % 8 != 0) /* Unaligned on 4k boundaries */
+			unaligned = 1;
+		else if (bio_sectors(bio) % 8 != 0) /* Aligned but not 4k/8k */
+			unaligned = 1;
+	}
+
+	sg = mtip_hw_get_scatterlist(dd, &tag, unaligned);
 	if (likely(sg != NULL)) {
 		blk_queue_bounce(queue, &bio);
 
@@ -3880,7 +3916,7 @@
 			dev_warn(&dd->pdev->dev,
 				"Maximum number of SGL entries exceeded\n");
 			bio_io_error(bio);
-			mtip_hw_release_scatterlist(dd, tag);
+			mtip_hw_release_scatterlist(dd, tag, unaligned);
 			return;
 		}
 
@@ -3900,7 +3936,8 @@
 				tag,
 				bio_endio,
 				bio,
-				bio_data_dir(bio));
+				bio_data_dir(bio),
+				unaligned);
 	} else
 		bio_io_error(bio);
 }
@@ -4156,26 +4193,24 @@
  */
 static int mtip_block_shutdown(struct driver_data *dd)
 {
-	dev_info(&dd->pdev->dev,
-		"Shutting down %s ...\n", dd->disk->disk_name);
-
 	/* Delete our gendisk structure, and cleanup the blk queue. */
 	if (dd->disk) {
-		if (dd->disk->queue)
-			del_gendisk(dd->disk);
-		else
-			put_disk(dd->disk);
-	}
+		dev_info(&dd->pdev->dev,
+			"Shutting down %s ...\n", dd->disk->disk_name);
 
+		if (dd->disk->queue) {
+			del_gendisk(dd->disk);
+			blk_cleanup_queue(dd->queue);
+		} else
+			put_disk(dd->disk);
+		dd->disk  = NULL;
+		dd->queue = NULL;
+	}
 
 	spin_lock(&rssd_index_lock);
 	ida_remove(&rssd_index_ida, dd->index);
 	spin_unlock(&rssd_index_lock);
 
-	blk_cleanup_queue(dd->queue);
-	dd->disk  = NULL;
-	dd->queue = NULL;
-
 	mtip_hw_shutdown(dd);
 	return 0;
 }
diff --git a/drivers/block/mtip32xx/mtip32xx.h b/drivers/block/mtip32xx/mtip32xx.h
index 8e8334c..3bb8a29 100644
--- a/drivers/block/mtip32xx/mtip32xx.h
+++ b/drivers/block/mtip32xx/mtip32xx.h
@@ -52,6 +52,9 @@
 #define MTIP_FTL_REBUILD_MAGIC		0xED51
 #define MTIP_FTL_REBUILD_TIMEOUT_MS	2400000
 
+/* unaligned IO handling */
+#define MTIP_MAX_UNALIGNED_SLOTS	8
+
 /* Macro to extract the tag bit number from a tag value. */
 #define MTIP_TAG_BIT(tag)	(tag & 0x1F)
 
@@ -333,6 +336,8 @@
 
 	int scatter_ents; /* Number of scatter list entries used */
 
+	int unaligned; /* command is unaligned on 4k boundary */
+
 	struct scatterlist sg[MTIP_MAX_SG]; /* Scatter list entries */
 
 	int retries; /* The number of retries left for this command. */
@@ -452,6 +457,10 @@
 	 * command slots available.
 	 */
 	struct semaphore cmd_slot;
+
+	/* Semaphore to control queue depth of unaligned IOs */
+	struct semaphore cmd_slot_unal;
+
 	/* Spinlock for working around command-issue bug. */
 	spinlock_t cmd_issue_lock[MTIP_MAX_SLOT_GROUPS];
 };
@@ -502,6 +511,8 @@
 
 	int isr_binding;
 
+	int unal_qdepth; /* qdepth of unaligned IO queue */
+
 	struct list_head online_list; /* linkage for online list */
 
 	struct list_head remove_list; /* linkage for removing list */
diff --git a/drivers/block/nvme.c b/drivers/block/nvme-core.c
similarity index 78%
rename from drivers/block/nvme.c
rename to drivers/block/nvme-core.c
index 9dcefe4..8efdfaa 100644
--- a/drivers/block/nvme.c
+++ b/drivers/block/nvme-core.c
@@ -39,14 +39,13 @@
 #include <linux/sched.h>
 #include <linux/slab.h>
 #include <linux/types.h>
-
+#include <scsi/sg.h>
 #include <asm-generic/io-64-nonatomic-lo-hi.h>
 
 #define NVME_Q_DEPTH 1024
 #define SQ_SIZE(depth)		(depth * sizeof(struct nvme_command))
 #define CQ_SIZE(depth)		(depth * sizeof(struct nvme_completion))
 #define NVME_MINORS 64
-#define NVME_IO_TIMEOUT	(5 * HZ)
 #define ADMIN_TIMEOUT	(60 * HZ)
 
 static int nvme_major;
@@ -60,43 +59,6 @@
 static struct task_struct *nvme_thread;
 
 /*
- * Represents an NVM Express device.  Each nvme_dev is a PCI function.
- */
-struct nvme_dev {
-	struct list_head node;
-	struct nvme_queue **queues;
-	u32 __iomem *dbs;
-	struct pci_dev *pci_dev;
-	struct dma_pool *prp_page_pool;
-	struct dma_pool *prp_small_pool;
-	int instance;
-	int queue_count;
-	int db_stride;
-	u32 ctrl_config;
-	struct msix_entry *entry;
-	struct nvme_bar __iomem *bar;
-	struct list_head namespaces;
-	char serial[20];
-	char model[40];
-	char firmware_rev[8];
-	u32 max_hw_sectors;
-};
-
-/*
- * An NVM Express namespace is equivalent to a SCSI LUN
- */
-struct nvme_ns {
-	struct list_head list;
-
-	struct nvme_dev *dev;
-	struct request_queue *queue;
-	struct gendisk *disk;
-
-	int ns_id;
-	int lba_shift;
-};
-
-/*
  * An NVM Express queue.  Each device has at least two (one for admin
  * commands and one for I/O commands).
  */
@@ -131,6 +93,7 @@
 	BUILD_BUG_ON(sizeof(struct nvme_create_sq) != 64);
 	BUILD_BUG_ON(sizeof(struct nvme_delete_queue) != 64);
 	BUILD_BUG_ON(sizeof(struct nvme_features) != 64);
+	BUILD_BUG_ON(sizeof(struct nvme_format_cmd) != 64);
 	BUILD_BUG_ON(sizeof(struct nvme_command) != 64);
 	BUILD_BUG_ON(sizeof(struct nvme_id_ctrl) != 4096);
 	BUILD_BUG_ON(sizeof(struct nvme_id_ns) != 4096);
@@ -261,12 +224,12 @@
 	return ctx;
 }
 
-static struct nvme_queue *get_nvmeq(struct nvme_dev *dev)
+struct nvme_queue *get_nvmeq(struct nvme_dev *dev)
 {
 	return dev->queues[get_cpu() + 1];
 }
 
-static void put_nvmeq(struct nvme_queue *nvmeq)
+void put_nvmeq(struct nvme_queue *nvmeq)
 {
 	put_cpu();
 }
@@ -294,22 +257,6 @@
 	return 0;
 }
 
-/*
- * The nvme_iod describes the data in an I/O, including the list of PRP
- * entries.  You can't see it in this data structure because C doesn't let
- * me express that.  Use nvme_alloc_iod to ensure there's enough space
- * allocated to store the PRP list.
- */
-struct nvme_iod {
-	void *private;		/* For the use of the submitter of the I/O */
-	int npages;		/* In the PRP list. 0 means small pool in use */
-	int offset;		/* Of PRP list */
-	int nents;		/* Used in scatterlist */
-	int length;		/* Of data, in bytes */
-	dma_addr_t first_dma;
-	struct scatterlist sg[0];
-};
-
 static __le64 **iod_list(struct nvme_iod *iod)
 {
 	return ((void *)iod) + iod->offset;
@@ -343,7 +290,7 @@
 	return iod;
 }
 
-static void nvme_free_iod(struct nvme_dev *dev, struct nvme_iod *iod)
+void nvme_free_iod(struct nvme_dev *dev, struct nvme_iod *iod)
 {
 	const int last_prp = PAGE_SIZE / 8 - 1;
 	int i;
@@ -361,16 +308,6 @@
 	kfree(iod);
 }
 
-static void requeue_bio(struct nvme_dev *dev, struct bio *bio)
-{
-	struct nvme_queue *nvmeq = get_nvmeq(dev);
-	if (bio_list_empty(&nvmeq->sq_cong))
-		add_wait_queue(&nvmeq->sq_full, &nvmeq->sq_cong_wait);
-	bio_list_add(&nvmeq->sq_cong, bio);
-	put_nvmeq(nvmeq);
-	wake_up_process(nvme_thread);
-}
-
 static void bio_completion(struct nvme_dev *dev, void *ctx,
 						struct nvme_completion *cqe)
 {
@@ -382,19 +319,15 @@
 		dma_unmap_sg(&dev->pci_dev->dev, iod->sg, iod->nents,
 			bio_data_dir(bio) ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
 	nvme_free_iod(dev, iod);
-	if (status) {
+	if (status)
 		bio_endio(bio, -EIO);
-	} else if (bio->bi_vcnt > bio->bi_idx) {
-		requeue_bio(dev, bio);
-	} else {
+	else
 		bio_endio(bio, 0);
-	}
 }
 
 /* length is in bytes.  gfp flags indicates whether we may sleep. */
-static int nvme_setup_prps(struct nvme_dev *dev,
-			struct nvme_common_command *cmd, struct nvme_iod *iod,
-			int total_len, gfp_t gfp)
+int nvme_setup_prps(struct nvme_dev *dev, struct nvme_common_command *cmd,
+			struct nvme_iod *iod, int total_len, gfp_t gfp)
 {
 	struct dma_pool *pool;
 	int length = total_len;
@@ -473,43 +406,193 @@
 	return total_len;
 }
 
+struct nvme_bio_pair {
+	struct bio b1, b2, *parent;
+	struct bio_vec *bv1, *bv2;
+	int err;
+	atomic_t cnt;
+};
+
+static void nvme_bio_pair_endio(struct bio *bio, int err)
+{
+	struct nvme_bio_pair *bp = bio->bi_private;
+
+	if (err)
+		bp->err = err;
+
+	if (atomic_dec_and_test(&bp->cnt)) {
+		bio_endio(bp->parent, bp->err);
+		if (bp->bv1)
+			kfree(bp->bv1);
+		if (bp->bv2)
+			kfree(bp->bv2);
+		kfree(bp);
+	}
+}
+
+static struct nvme_bio_pair *nvme_bio_split(struct bio *bio, int idx,
+							int len, int offset)
+{
+	struct nvme_bio_pair *bp;
+
+	BUG_ON(len > bio->bi_size);
+	BUG_ON(idx > bio->bi_vcnt);
+
+	bp = kmalloc(sizeof(*bp), GFP_ATOMIC);
+	if (!bp)
+		return NULL;
+	bp->err = 0;
+
+	bp->b1 = *bio;
+	bp->b2 = *bio;
+
+	bp->b1.bi_size = len;
+	bp->b2.bi_size -= len;
+	bp->b1.bi_vcnt = idx;
+	bp->b2.bi_idx = idx;
+	bp->b2.bi_sector += len >> 9;
+
+	if (offset) {
+		bp->bv1 = kmalloc(bio->bi_max_vecs * sizeof(struct bio_vec),
+								GFP_ATOMIC);
+		if (!bp->bv1)
+			goto split_fail_1;
+
+		bp->bv2 = kmalloc(bio->bi_max_vecs * sizeof(struct bio_vec),
+								GFP_ATOMIC);
+		if (!bp->bv2)
+			goto split_fail_2;
+
+		memcpy(bp->bv1, bio->bi_io_vec,
+			bio->bi_max_vecs * sizeof(struct bio_vec));
+		memcpy(bp->bv2, bio->bi_io_vec,
+			bio->bi_max_vecs * sizeof(struct bio_vec));
+
+		bp->b1.bi_io_vec = bp->bv1;
+		bp->b2.bi_io_vec = bp->bv2;
+		bp->b2.bi_io_vec[idx].bv_offset += offset;
+		bp->b2.bi_io_vec[idx].bv_len -= offset;
+		bp->b1.bi_io_vec[idx].bv_len = offset;
+		bp->b1.bi_vcnt++;
+	} else
+		bp->bv1 = bp->bv2 = NULL;
+
+	bp->b1.bi_private = bp;
+	bp->b2.bi_private = bp;
+
+	bp->b1.bi_end_io = nvme_bio_pair_endio;
+	bp->b2.bi_end_io = nvme_bio_pair_endio;
+
+	bp->parent = bio;
+	atomic_set(&bp->cnt, 2);
+
+	return bp;
+
+ split_fail_2:
+	kfree(bp->bv1);
+ split_fail_1:
+	kfree(bp);
+	return NULL;
+}
+
+static int nvme_split_and_submit(struct bio *bio, struct nvme_queue *nvmeq,
+						int idx, int len, int offset)
+{
+	struct nvme_bio_pair *bp = nvme_bio_split(bio, idx, len, offset);
+	if (!bp)
+		return -ENOMEM;
+
+	if (bio_list_empty(&nvmeq->sq_cong))
+		add_wait_queue(&nvmeq->sq_full, &nvmeq->sq_cong_wait);
+	bio_list_add(&nvmeq->sq_cong, &bp->b1);
+	bio_list_add(&nvmeq->sq_cong, &bp->b2);
+
+	return 0;
+}
+
 /* NVMe scatterlists require no holes in the virtual address */
 #define BIOVEC_NOT_VIRT_MERGEABLE(vec1, vec2)	((vec2)->bv_offset || \
 			(((vec1)->bv_offset + (vec1)->bv_len) % PAGE_SIZE))
 
-static int nvme_map_bio(struct device *dev, struct nvme_iod *iod,
+static int nvme_map_bio(struct nvme_queue *nvmeq, struct nvme_iod *iod,
 		struct bio *bio, enum dma_data_direction dma_dir, int psegs)
 {
 	struct bio_vec *bvec, *bvprv = NULL;
 	struct scatterlist *sg = NULL;
-	int i, old_idx, length = 0, nsegs = 0;
+	int i, length = 0, nsegs = 0, split_len = bio->bi_size;
+
+	if (nvmeq->dev->stripe_size)
+		split_len = nvmeq->dev->stripe_size -
+			((bio->bi_sector << 9) & (nvmeq->dev->stripe_size - 1));
 
 	sg_init_table(iod->sg, psegs);
-	old_idx = bio->bi_idx;
 	bio_for_each_segment(bvec, bio, i) {
 		if (bvprv && BIOVEC_PHYS_MERGEABLE(bvprv, bvec)) {
 			sg->length += bvec->bv_len;
 		} else {
 			if (bvprv && BIOVEC_NOT_VIRT_MERGEABLE(bvprv, bvec))
-				break;
+				return nvme_split_and_submit(bio, nvmeq, i,
+								length, 0);
+
 			sg = sg ? sg + 1 : iod->sg;
 			sg_set_page(sg, bvec->bv_page, bvec->bv_len,
 							bvec->bv_offset);
 			nsegs++;
 		}
+
+		if (split_len - length < bvec->bv_len)
+			return nvme_split_and_submit(bio, nvmeq, i, split_len,
+							split_len - length);
 		length += bvec->bv_len;
 		bvprv = bvec;
 	}
-	bio->bi_idx = i;
 	iod->nents = nsegs;
 	sg_mark_end(sg);
-	if (dma_map_sg(dev, iod->sg, iod->nents, dma_dir) == 0) {
-		bio->bi_idx = old_idx;
+	if (dma_map_sg(nvmeq->q_dmadev, iod->sg, iod->nents, dma_dir) == 0)
 		return -ENOMEM;
-	}
+
+	BUG_ON(length != bio->bi_size);
 	return length;
 }
 
+/*
+ * We reuse the small pool to allocate the 16-byte range here as it is not
+ * worth having a special pool for these or additional cases to handle freeing
+ * the iod.
+ */
+static int nvme_submit_discard(struct nvme_queue *nvmeq, struct nvme_ns *ns,
+		struct bio *bio, struct nvme_iod *iod, int cmdid)
+{
+	struct nvme_dsm_range *range;
+	struct nvme_command *cmnd = &nvmeq->sq_cmds[nvmeq->sq_tail];
+
+	range = dma_pool_alloc(nvmeq->dev->prp_small_pool, GFP_ATOMIC,
+							&iod->first_dma);
+	if (!range)
+		return -ENOMEM;
+
+	iod_list(iod)[0] = (__le64 *)range;
+	iod->npages = 0;
+
+	range->cattr = cpu_to_le32(0);
+	range->nlb = cpu_to_le32(bio->bi_size >> ns->lba_shift);
+	range->slba = cpu_to_le64(nvme_block_nr(ns, bio->bi_sector));
+
+	memset(cmnd, 0, sizeof(*cmnd));
+	cmnd->dsm.opcode = nvme_cmd_dsm;
+	cmnd->dsm.command_id = cmdid;
+	cmnd->dsm.nsid = cpu_to_le32(ns->ns_id);
+	cmnd->dsm.prp1 = cpu_to_le64(iod->first_dma);
+	cmnd->dsm.nr = 0;
+	cmnd->dsm.attributes = cpu_to_le32(NVME_DSMGMT_AD);
+
+	if (++nvmeq->sq_tail == nvmeq->q_depth)
+		nvmeq->sq_tail = 0;
+	writel(nvmeq->sq_tail, nvmeq->q_db);
+
+	return 0;
+}
+
 static int nvme_submit_flush(struct nvme_queue *nvmeq, struct nvme_ns *ns,
 								int cmdid)
 {
@@ -527,7 +610,7 @@
 	return 0;
 }
 
-static int nvme_submit_flush_data(struct nvme_queue *nvmeq, struct nvme_ns *ns)
+int nvme_submit_flush_data(struct nvme_queue *nvmeq, struct nvme_ns *ns)
 {
 	int cmdid = alloc_cmdid(nvmeq, (void *)CMD_CTX_FLUSH,
 					special_completion, NVME_IO_TIMEOUT);
@@ -567,6 +650,12 @@
 	if (unlikely(cmdid < 0))
 		goto free_iod;
 
+	if (bio->bi_rw & REQ_DISCARD) {
+		result = nvme_submit_discard(nvmeq, ns, bio, iod, cmdid);
+		if (result)
+			goto free_cmdid;
+		return result;
+	}
 	if ((bio->bi_rw & REQ_FLUSH) && !psegs)
 		return nvme_submit_flush(nvmeq, ns, cmdid);
 
@@ -591,8 +680,8 @@
 		dma_dir = DMA_FROM_DEVICE;
 	}
 
-	result = nvme_map_bio(nvmeq->q_dmadev, iod, bio, dma_dir, psegs);
-	if (result < 0)
+	result = nvme_map_bio(nvmeq, iod, bio, dma_dir, psegs);
+	if (result <= 0)
 		goto free_cmdid;
 	length = result;
 
@@ -600,13 +689,11 @@
 	cmnd->rw.nsid = cpu_to_le32(ns->ns_id);
 	length = nvme_setup_prps(nvmeq->dev, &cmnd->common, iod, length,
 								GFP_ATOMIC);
-	cmnd->rw.slba = cpu_to_le64(bio->bi_sector >> (ns->lba_shift - 9));
+	cmnd->rw.slba = cpu_to_le64(nvme_block_nr(ns, bio->bi_sector));
 	cmnd->rw.length = cpu_to_le16((length >> ns->lba_shift) - 1);
 	cmnd->rw.control = cpu_to_le16(control);
 	cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt);
 
-	bio->bi_sector += length >> 9;
-
 	if (++nvmeq->sq_tail == nvmeq->q_depth)
 		nvmeq->sq_tail = 0;
 	writel(nvmeq->sq_tail, nvmeq->q_db);
@@ -724,8 +811,8 @@
  * Returns 0 on success.  If the result is negative, it's a Linux error code;
  * if the result is positive, it's an NVM Express status code
  */
-static int nvme_submit_sync_cmd(struct nvme_queue *nvmeq,
-			struct nvme_command *cmd, u32 *result, unsigned timeout)
+int nvme_submit_sync_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd,
+						u32 *result, unsigned timeout)
 {
 	int cmdid;
 	struct sync_cmd_info cmdinfo;
@@ -741,7 +828,7 @@
 
 	set_current_state(TASK_KILLABLE);
 	nvme_submit_cmd(nvmeq, cmd);
-	schedule();
+	schedule_timeout(timeout);
 
 	if (cmdinfo.status == -EINTR) {
 		nvme_abort_command(nvmeq, cmdid);
@@ -754,7 +841,7 @@
 	return cmdinfo.status;
 }
 
-static int nvme_submit_admin_cmd(struct nvme_dev *dev, struct nvme_command *cmd,
+int nvme_submit_admin_cmd(struct nvme_dev *dev, struct nvme_command *cmd,
 								u32 *result)
 {
 	return nvme_submit_sync_cmd(dev->queues[0], cmd, result, ADMIN_TIMEOUT);
@@ -827,7 +914,7 @@
 	return adapter_delete_queue(dev, nvme_admin_delete_sq, sqid);
 }
 
-static int nvme_identify(struct nvme_dev *dev, unsigned nsid, unsigned cns,
+int nvme_identify(struct nvme_dev *dev, unsigned nsid, unsigned cns,
 							dma_addr_t dma_addr)
 {
 	struct nvme_command c;
@@ -841,7 +928,7 @@
 	return nvme_submit_admin_cmd(dev, &c, NULL);
 }
 
-static int nvme_get_features(struct nvme_dev *dev, unsigned fid, unsigned nsid,
+int nvme_get_features(struct nvme_dev *dev, unsigned fid, unsigned nsid,
 					dma_addr_t dma_addr, u32 *result)
 {
 	struct nvme_command c;
@@ -855,8 +942,8 @@
 	return nvme_submit_admin_cmd(dev, &c, result);
 }
 
-static int nvme_set_features(struct nvme_dev *dev, unsigned fid,
-			unsigned dword11, dma_addr_t dma_addr, u32 *result)
+int nvme_set_features(struct nvme_dev *dev, unsigned fid, unsigned dword11,
+					dma_addr_t dma_addr, u32 *result)
 {
 	struct nvme_command c;
 
@@ -885,7 +972,7 @@
 		void *ctx;
 		nvme_completion_fn fn;
 		static struct nvme_completion cqe = {
-			.status = cpu_to_le16(NVME_SC_ABORT_REQ) << 1,
+			.status = cpu_to_le16(NVME_SC_ABORT_REQ << 1),
 		};
 
 		if (timeout && !time_after(now, info[cmdid].timeout))
@@ -966,7 +1053,7 @@
 	return nvmeq;
 
  free_cqdma:
-	dma_free_coherent(dmadev, CQ_SIZE(nvmeq->q_depth), (void *)nvmeq->cqes,
+	dma_free_coherent(dmadev, CQ_SIZE(depth), (void *)nvmeq->cqes,
 							nvmeq->cq_dma_addr);
  free_nvmeq:
 	kfree(nvmeq);
@@ -1021,15 +1108,60 @@
 	return ERR_PTR(result);
 }
 
+static int nvme_wait_ready(struct nvme_dev *dev, u64 cap, bool enabled)
+{
+	unsigned long timeout;
+	u32 bit = enabled ? NVME_CSTS_RDY : 0;
+
+	timeout = ((NVME_CAP_TIMEOUT(cap) + 1) * HZ / 2) + jiffies;
+
+	while ((readl(&dev->bar->csts) & NVME_CSTS_RDY) != bit) {
+		msleep(100);
+		if (fatal_signal_pending(current))
+			return -EINTR;
+		if (time_after(jiffies, timeout)) {
+			dev_err(&dev->pci_dev->dev,
+				"Device not ready; aborting initialisation\n");
+			return -ENODEV;
+		}
+	}
+
+	return 0;
+}
+
+/*
+ * If the device has been passed off to us in an enabled state, just clear
+ * the enabled bit.  The spec says we should set the 'shutdown notification
+ * bits', but doing so may cause the device to complete commands to the
+ * admin queue ... and we don't know what memory that might be pointing at!
+ */
+static int nvme_disable_ctrl(struct nvme_dev *dev, u64 cap)
+{
+	u32 cc = readl(&dev->bar->cc);
+
+	if (cc & NVME_CC_ENABLE)
+		writel(cc & ~NVME_CC_ENABLE, &dev->bar->cc);
+	return nvme_wait_ready(dev, cap, false);
+}
+
+static int nvme_enable_ctrl(struct nvme_dev *dev, u64 cap)
+{
+	return nvme_wait_ready(dev, cap, true);
+}
+
 static int nvme_configure_admin_queue(struct nvme_dev *dev)
 {
-	int result = 0;
+	int result;
 	u32 aqa;
-	u64 cap;
-	unsigned long timeout;
+	u64 cap = readq(&dev->bar->cap);
 	struct nvme_queue *nvmeq;
 
 	dev->dbs = ((void __iomem *)dev->bar) + 4096;
+	dev->db_stride = NVME_CAP_STRIDE(cap);
+
+	result = nvme_disable_ctrl(dev, cap);
+	if (result < 0)
+		return result;
 
 	nvmeq = nvme_alloc_queue(dev, 0, 64, 0);
 	if (!nvmeq)
@@ -1043,38 +1175,28 @@
 	dev->ctrl_config |= NVME_CC_ARB_RR | NVME_CC_SHN_NONE;
 	dev->ctrl_config |= NVME_CC_IOSQES | NVME_CC_IOCQES;
 
-	writel(0, &dev->bar->cc);
 	writel(aqa, &dev->bar->aqa);
 	writeq(nvmeq->sq_dma_addr, &dev->bar->asq);
 	writeq(nvmeq->cq_dma_addr, &dev->bar->acq);
 	writel(dev->ctrl_config, &dev->bar->cc);
 
-	cap = readq(&dev->bar->cap);
-	timeout = ((NVME_CAP_TIMEOUT(cap) + 1) * HZ / 2) + jiffies;
-	dev->db_stride = NVME_CAP_STRIDE(cap);
-
-	while (!result && !(readl(&dev->bar->csts) & NVME_CSTS_RDY)) {
-		msleep(100);
-		if (fatal_signal_pending(current))
-			result = -EINTR;
-		if (time_after(jiffies, timeout)) {
-			dev_err(&dev->pci_dev->dev,
-				"Device not ready; aborting initialisation\n");
-			result = -ENODEV;
-		}
-	}
-
-	if (result) {
-		nvme_free_queue_mem(nvmeq);
-		return result;
-	}
+	result = nvme_enable_ctrl(dev, cap);
+	if (result)
+		goto free_q;
 
 	result = queue_request_irq(dev, nvmeq, "nvme admin");
+	if (result)
+		goto free_q;
+
 	dev->queues[0] = nvmeq;
 	return result;
+
+ free_q:
+	nvme_free_queue_mem(nvmeq);
+	return result;
 }
 
-static struct nvme_iod *nvme_map_user_pages(struct nvme_dev *dev, int write,
+struct nvme_iod *nvme_map_user_pages(struct nvme_dev *dev, int write,
 				unsigned long addr, unsigned length)
 {
 	int i, err, count, nents, offset;
@@ -1130,7 +1252,7 @@
 	return ERR_PTR(err);
 }
 
-static void nvme_unmap_user_pages(struct nvme_dev *dev, int write,
+void nvme_unmap_user_pages(struct nvme_dev *dev, int write,
 			struct nvme_iod *iod)
 {
 	int i;
@@ -1148,13 +1270,19 @@
 	struct nvme_queue *nvmeq;
 	struct nvme_user_io io;
 	struct nvme_command c;
-	unsigned length;
-	int status;
-	struct nvme_iod *iod;
+	unsigned length, meta_len;
+	int status, i;
+	struct nvme_iod *iod, *meta_iod = NULL;
+	dma_addr_t meta_dma_addr;
+	void *meta, *uninitialized_var(meta_mem);
 
 	if (copy_from_user(&io, uio, sizeof(io)))
 		return -EFAULT;
 	length = (io.nblocks + 1) << ns->lba_shift;
+	meta_len = (io.nblocks + 1) * ns->ms;
+
+	if (meta_len && ((io.metadata & 3) || !io.metadata))
+		return -EINVAL;
 
 	switch (io.opcode) {
 	case nvme_cmd_write:
@@ -1176,11 +1304,42 @@
 	c.rw.slba = cpu_to_le64(io.slba);
 	c.rw.length = cpu_to_le16(io.nblocks);
 	c.rw.control = cpu_to_le16(io.control);
-	c.rw.dsmgmt = cpu_to_le16(io.dsmgmt);
-	c.rw.reftag = io.reftag;
-	c.rw.apptag = io.apptag;
-	c.rw.appmask = io.appmask;
-	/* XXX: metadata */
+	c.rw.dsmgmt = cpu_to_le32(io.dsmgmt);
+	c.rw.reftag = cpu_to_le32(io.reftag);
+	c.rw.apptag = cpu_to_le16(io.apptag);
+	c.rw.appmask = cpu_to_le16(io.appmask);
+
+	if (meta_len) {
+		meta_iod = nvme_map_user_pages(dev, io.opcode & 1, io.metadata, meta_len);
+		if (IS_ERR(meta_iod)) {
+			status = PTR_ERR(meta_iod);
+			meta_iod = NULL;
+			goto unmap;
+		}
+
+		meta_mem = dma_alloc_coherent(&dev->pci_dev->dev, meta_len,
+						&meta_dma_addr, GFP_KERNEL);
+		if (!meta_mem) {
+			status = -ENOMEM;
+			goto unmap;
+		}
+
+		if (io.opcode & 1) {
+			int meta_offset = 0;
+
+			for (i = 0; i < meta_iod->nents; i++) {
+				meta = kmap_atomic(sg_page(&meta_iod->sg[i])) +
+						meta_iod->sg[i].offset;
+				memcpy(meta_mem + meta_offset, meta,
+						meta_iod->sg[i].length);
+				kunmap_atomic(meta);
+				meta_offset += meta_iod->sg[i].length;
+			}
+		}
+
+		c.rw.metadata = cpu_to_le64(meta_dma_addr);
+	}
+
 	length = nvme_setup_prps(dev, &c.common, iod, length, GFP_KERNEL);
 
 	nvmeq = get_nvmeq(dev);
@@ -1196,8 +1355,33 @@
 	else
 		status = nvme_submit_sync_cmd(nvmeq, &c, NULL, NVME_IO_TIMEOUT);
 
+	if (meta_len) {
+		if (status == NVME_SC_SUCCESS && !(io.opcode & 1)) {
+			int meta_offset = 0;
+
+			for (i = 0; i < meta_iod->nents; i++) {
+				meta = kmap_atomic(sg_page(&meta_iod->sg[i])) +
+						meta_iod->sg[i].offset;
+				memcpy(meta, meta_mem + meta_offset,
+						meta_iod->sg[i].length);
+				kunmap_atomic(meta);
+				meta_offset += meta_iod->sg[i].length;
+			}
+		}
+
+		dma_free_coherent(&dev->pci_dev->dev, meta_len, meta_mem,
+								meta_dma_addr);
+	}
+
+ unmap:
 	nvme_unmap_user_pages(dev, io.opcode & 1, iod);
 	nvme_free_iod(dev, iod);
+
+	if (meta_iod) {
+		nvme_unmap_user_pages(dev, io.opcode & 1, meta_iod);
+		nvme_free_iod(dev, meta_iod);
+	}
+
 	return status;
 }
 
@@ -1208,6 +1392,7 @@
 	struct nvme_command c;
 	int status, length;
 	struct nvme_iod *uninitialized_var(iod);
+	unsigned timeout;
 
 	if (!capable(CAP_SYS_ADMIN))
 		return -EACCES;
@@ -1237,10 +1422,13 @@
 								GFP_KERNEL);
 	}
 
+	timeout = cmd.timeout_ms ? msecs_to_jiffies(cmd.timeout_ms) :
+								ADMIN_TIMEOUT;
 	if (length != cmd.data_len)
 		status = -ENOMEM;
 	else
-		status = nvme_submit_admin_cmd(dev, &c, &cmd.result);
+		status = nvme_submit_sync_cmd(dev->queues[0], &c, &cmd.result,
+								timeout);
 
 	if (cmd.data_len) {
 		nvme_unmap_user_pages(dev, cmd.opcode & 1, iod);
@@ -1266,6 +1454,10 @@
 		return nvme_user_admin_cmd(ns->dev, (void __user *)arg);
 	case NVME_IOCTL_SUBMIT_IO:
 		return nvme_submit_io(ns, (void __user *)arg);
+	case SG_GET_VERSION_NUM:
+		return nvme_sg_get_version_num((void __user *)arg);
+	case SG_IO:
+		return nvme_sg_io(ns, (void __user *)arg);
 	default:
 		return -ENOTTY;
 	}
@@ -1282,13 +1474,17 @@
 	while (bio_list_peek(&nvmeq->sq_cong)) {
 		struct bio *bio = bio_list_pop(&nvmeq->sq_cong);
 		struct nvme_ns *ns = bio->bi_bdev->bd_disk->private_data;
-		if (nvme_submit_bio_queue(nvmeq, ns, bio)) {
-			bio_list_add_head(&nvmeq->sq_cong, bio);
-			break;
-		}
+
 		if (bio_list_empty(&nvmeq->sq_cong))
 			remove_wait_queue(&nvmeq->sq_full,
 							&nvmeq->sq_cong_wait);
+		if (nvme_submit_bio_queue(nvmeq, ns, bio)) {
+			if (bio_list_empty(&nvmeq->sq_cong))
+				add_wait_queue(&nvmeq->sq_full,
+							&nvmeq->sq_cong_wait);
+			bio_list_add_head(&nvmeq->sq_cong, bio);
+			break;
+		}
 	}
 }
 
@@ -1297,7 +1493,7 @@
 	struct nvme_dev *dev;
 
 	while (!kthread_should_stop()) {
-		__set_current_state(TASK_RUNNING);
+		set_current_state(TASK_INTERRUPTIBLE);
 		spin_lock(&dev_list_lock);
 		list_for_each_entry(dev, &dev_list, node) {
 			int i;
@@ -1314,8 +1510,7 @@
 			}
 		}
 		spin_unlock(&dev_list_lock);
-		set_current_state(TASK_INTERRUPTIBLE);
-		schedule_timeout(HZ);
+		schedule_timeout(round_jiffies_relative(HZ));
 	}
 	return 0;
 }
@@ -1347,6 +1542,16 @@
 	spin_unlock(&dev_list_lock);
 }
 
+static void nvme_config_discard(struct nvme_ns *ns)
+{
+	u32 logical_block_size = queue_logical_block_size(ns->queue);
+	ns->queue->limits.discard_zeroes_data = 0;
+	ns->queue->limits.discard_alignment = logical_block_size;
+	ns->queue->limits.discard_granularity = logical_block_size;
+	ns->queue->limits.max_discard_sectors = 0xffffffff;
+	queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, ns->queue);
+}
+
 static struct nvme_ns *nvme_alloc_ns(struct nvme_dev *dev, int nsid,
 			struct nvme_id_ns *id, struct nvme_lba_range_type *rt)
 {
@@ -1366,7 +1571,6 @@
 	ns->queue->queue_flags = QUEUE_FLAG_DEFAULT;
 	queue_flag_set_unlocked(QUEUE_FLAG_NOMERGES, ns->queue);
 	queue_flag_set_unlocked(QUEUE_FLAG_NONROT, ns->queue);
-/*	queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, ns->queue); */
 	blk_queue_make_request(ns->queue, nvme_make_request);
 	ns->dev = dev;
 	ns->queue->queuedata = ns;
@@ -1378,6 +1582,7 @@
 	ns->disk = disk;
 	lbaf = id->flbas & 0xf;
 	ns->lba_shift = id->lbaf[lbaf].ds;
+	ns->ms = le16_to_cpu(id->lbaf[lbaf].ms);
 	blk_queue_logical_block_size(ns->queue, 1 << ns->lba_shift);
 	if (dev->max_hw_sectors)
 		blk_queue_max_hw_sectors(ns->queue, dev->max_hw_sectors);
@@ -1392,6 +1597,9 @@
 	sprintf(disk->disk_name, "nvme%dn%d", dev->instance, nsid);
 	set_capacity(disk, le64_to_cpup(&id->nsze) << (ns->lba_shift - 9));
 
+	if (dev->oncs & NVME_CTRL_ONCS_DSM)
+		nvme_config_discard(ns);
+
 	return ns;
 
  out_free_queue:
@@ -1496,14 +1704,21 @@
 		nvme_free_queue(dev, i);
 }
 
+/*
+ * Return: error value if an error occurred setting up the queues or calling
+ * Identify Device.  0 if these succeeded, even if adding some of the
+ * namespaces failed.  At the moment, these failures are silent.  TBD which
+ * failures should be reported.
+ */
 static int nvme_dev_add(struct nvme_dev *dev)
 {
 	int res, nn, i;
-	struct nvme_ns *ns, *next;
+	struct nvme_ns *ns;
 	struct nvme_id_ctrl *ctrl;
 	struct nvme_id_ns *id_ns;
 	void *mem;
 	dma_addr_t dma_addr;
+	int shift = NVME_CAP_MPSMIN(readq(&dev->bar->cap)) + 12;
 
 	res = nvme_setup_io_queues(dev);
 	if (res)
@@ -1511,22 +1726,26 @@
 
 	mem = dma_alloc_coherent(&dev->pci_dev->dev, 8192, &dma_addr,
 								GFP_KERNEL);
+	if (!mem)
+		return -ENOMEM;
 
 	res = nvme_identify(dev, 0, 1, dma_addr);
 	if (res) {
 		res = -EIO;
-		goto out_free;
+		goto out;
 	}
 
 	ctrl = mem;
 	nn = le32_to_cpup(&ctrl->nn);
+	dev->oncs = le16_to_cpup(&ctrl->oncs);
 	memcpy(dev->serial, ctrl->sn, sizeof(ctrl->sn));
 	memcpy(dev->model, ctrl->mn, sizeof(ctrl->mn));
 	memcpy(dev->firmware_rev, ctrl->fr, sizeof(ctrl->fr));
-	if (ctrl->mdts) {
-		int shift = NVME_CAP_MPSMIN(readq(&dev->bar->cap)) + 12;
+	if (ctrl->mdts)
 		dev->max_hw_sectors = 1 << (ctrl->mdts + shift - 9);
-	}
+	if ((dev->pci_dev->vendor == PCI_VENDOR_ID_INTEL) &&
+			(dev->pci_dev->device == 0x0953) && ctrl->vs[3])
+		dev->stripe_size = 1 << (ctrl->vs[3] + shift);
 
 	id_ns = mem;
 	for (i = 1; i <= nn; i++) {
@@ -1548,14 +1767,7 @@
 	}
 	list_for_each_entry(ns, &dev->namespaces, list)
 		add_disk(ns->disk);
-
-	goto out;
-
- out_free:
-	list_for_each_entry_safe(ns, next, &dev->namespaces, list) {
-		list_del(&ns->list);
-		nvme_ns_free(ns);
-	}
+	res = 0;
 
  out:
 	dma_free_coherent(&dev->pci_dev->dev, 8192, mem, dma_addr);
@@ -1634,6 +1846,56 @@
 	spin_unlock(&dev_list_lock);
 }
 
+static void nvme_free_dev(struct kref *kref)
+{
+	struct nvme_dev *dev = container_of(kref, struct nvme_dev, kref);
+	nvme_dev_remove(dev);
+	pci_disable_msix(dev->pci_dev);
+	iounmap(dev->bar);
+	nvme_release_instance(dev);
+	nvme_release_prp_pools(dev);
+	pci_disable_device(dev->pci_dev);
+	pci_release_regions(dev->pci_dev);
+	kfree(dev->queues);
+	kfree(dev->entry);
+	kfree(dev);
+}
+
+static int nvme_dev_open(struct inode *inode, struct file *f)
+{
+	struct nvme_dev *dev = container_of(f->private_data, struct nvme_dev,
+								miscdev);
+	kref_get(&dev->kref);
+	f->private_data = dev;
+	return 0;
+}
+
+static int nvme_dev_release(struct inode *inode, struct file *f)
+{
+	struct nvme_dev *dev = f->private_data;
+	kref_put(&dev->kref, nvme_free_dev);
+	return 0;
+}
+
+static long nvme_dev_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
+{
+	struct nvme_dev *dev = f->private_data;
+	switch (cmd) {
+	case NVME_IOCTL_ADMIN_CMD:
+		return nvme_user_admin_cmd(dev, (void __user *)arg);
+	default:
+		return -ENOTTY;
+	}
+}
+
+static const struct file_operations nvme_dev_fops = {
+	.owner		= THIS_MODULE,
+	.open		= nvme_dev_open,
+	.release	= nvme_dev_release,
+	.unlocked_ioctl	= nvme_dev_ioctl,
+	.compat_ioctl	= nvme_dev_ioctl,
+};
+
 static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 {
 	int bars, result = -ENOMEM;
@@ -1692,8 +1954,20 @@
 	if (result)
 		goto delete;
 
+	scnprintf(dev->name, sizeof(dev->name), "nvme%d", dev->instance);
+	dev->miscdev.minor = MISC_DYNAMIC_MINOR;
+	dev->miscdev.parent = &pdev->dev;
+	dev->miscdev.name = dev->name;
+	dev->miscdev.fops = &nvme_dev_fops;
+	result = misc_register(&dev->miscdev);
+	if (result)
+		goto remove;
+
+	kref_init(&dev->kref);
 	return 0;
 
+ remove:
+	nvme_dev_remove(dev);
  delete:
 	spin_lock(&dev_list_lock);
 	list_del(&dev->node);
@@ -1719,16 +1993,8 @@
 static void nvme_remove(struct pci_dev *pdev)
 {
 	struct nvme_dev *dev = pci_get_drvdata(pdev);
-	nvme_dev_remove(dev);
-	pci_disable_msix(pdev);
-	iounmap(dev->bar);
-	nvme_release_instance(dev);
-	nvme_release_prp_pools(dev);
-	pci_disable_device(pdev);
-	pci_release_regions(pdev);
-	kfree(dev->queues);
-	kfree(dev->entry);
-	kfree(dev);
+	misc_deregister(&dev->miscdev);
+	kref_put(&dev->kref, nvme_free_dev);
 }
 
 /* These functions are yet to be implemented */
diff --git a/drivers/block/nvme-scsi.c b/drivers/block/nvme-scsi.c
new file mode 100644
index 0000000..fed54b0
--- /dev/null
+++ b/drivers/block/nvme-scsi.c
@@ -0,0 +1,3053 @@
+/*
+ * NVM Express device driver
+ * Copyright (c) 2011, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+/*
+ * Refer to the SCSI-NVMe Translation spec for details on how
+ * each command is translated.
+ */
+
+#include <linux/nvme.h>
+#include <linux/bio.h>
+#include <linux/bitops.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/genhd.h>
+#include <linux/idr.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kdev_t.h>
+#include <linux/kthread.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/pci.h>
+#include <linux/poison.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/version.h>
+#include <scsi/sg.h>
+#include <scsi/scsi.h>
+
+
+static int sg_version_num = 30534;	/* 2 digits for each component */
+
+#define SNTI_TRANSLATION_SUCCESS			0
+#define SNTI_INTERNAL_ERROR				1
+
+/* VPD Page Codes */
+#define VPD_SUPPORTED_PAGES				0x00
+#define VPD_SERIAL_NUMBER				0x80
+#define VPD_DEVICE_IDENTIFIERS				0x83
+#define VPD_EXTENDED_INQUIRY				0x86
+#define VPD_BLOCK_DEV_CHARACTERISTICS			0xB1
+
+/* CDB offsets */
+#define REPORT_LUNS_CDB_ALLOC_LENGTH_OFFSET		6
+#define REPORT_LUNS_SR_OFFSET				2
+#define READ_CAP_16_CDB_ALLOC_LENGTH_OFFSET		10
+#define REQUEST_SENSE_CDB_ALLOC_LENGTH_OFFSET		4
+#define REQUEST_SENSE_DESC_OFFSET			1
+#define REQUEST_SENSE_DESC_MASK				0x01
+#define DESCRIPTOR_FORMAT_SENSE_DATA_TYPE		1
+#define INQUIRY_EVPD_BYTE_OFFSET			1
+#define INQUIRY_PAGE_CODE_BYTE_OFFSET			2
+#define INQUIRY_EVPD_BIT_MASK				1
+#define INQUIRY_CDB_ALLOCATION_LENGTH_OFFSET		3
+#define START_STOP_UNIT_CDB_IMMED_OFFSET		1
+#define START_STOP_UNIT_CDB_IMMED_MASK			0x1
+#define START_STOP_UNIT_CDB_POWER_COND_MOD_OFFSET	3
+#define START_STOP_UNIT_CDB_POWER_COND_MOD_MASK		0xF
+#define START_STOP_UNIT_CDB_POWER_COND_OFFSET		4
+#define START_STOP_UNIT_CDB_POWER_COND_MASK		0xF0
+#define START_STOP_UNIT_CDB_NO_FLUSH_OFFSET		4
+#define START_STOP_UNIT_CDB_NO_FLUSH_MASK		0x4
+#define START_STOP_UNIT_CDB_START_OFFSET		4
+#define START_STOP_UNIT_CDB_START_MASK			0x1
+#define WRITE_BUFFER_CDB_MODE_OFFSET			1
+#define WRITE_BUFFER_CDB_MODE_MASK			0x1F
+#define WRITE_BUFFER_CDB_BUFFER_ID_OFFSET		2
+#define WRITE_BUFFER_CDB_BUFFER_OFFSET_OFFSET		3
+#define WRITE_BUFFER_CDB_PARM_LIST_LENGTH_OFFSET	6
+#define FORMAT_UNIT_CDB_FORMAT_PROT_INFO_OFFSET		1
+#define FORMAT_UNIT_CDB_FORMAT_PROT_INFO_MASK		0xC0
+#define FORMAT_UNIT_CDB_FORMAT_PROT_INFO_SHIFT		6
+#define FORMAT_UNIT_CDB_LONG_LIST_OFFSET		1
+#define FORMAT_UNIT_CDB_LONG_LIST_MASK			0x20
+#define FORMAT_UNIT_CDB_FORMAT_DATA_OFFSET		1
+#define FORMAT_UNIT_CDB_FORMAT_DATA_MASK		0x10
+#define FORMAT_UNIT_SHORT_PARM_LIST_LEN			4
+#define FORMAT_UNIT_LONG_PARM_LIST_LEN			8
+#define FORMAT_UNIT_PROT_INT_OFFSET			3
+#define FORMAT_UNIT_PROT_FIELD_USAGE_OFFSET		0
+#define FORMAT_UNIT_PROT_FIELD_USAGE_MASK		0x07
+#define UNMAP_CDB_PARAM_LIST_LENGTH_OFFSET		7
+
+/* Misc. defines */
+#define NIBBLE_SHIFT					4
+#define FIXED_SENSE_DATA				0x70
+#define DESC_FORMAT_SENSE_DATA				0x72
+#define FIXED_SENSE_DATA_ADD_LENGTH			10
+#define LUN_ENTRY_SIZE					8
+#define LUN_DATA_HEADER_SIZE				8
+#define ALL_LUNS_RETURNED				0x02
+#define ALL_WELL_KNOWN_LUNS_RETURNED			0x01
+#define RESTRICTED_LUNS_RETURNED			0x00
+#define NVME_POWER_STATE_START_VALID			0x00
+#define NVME_POWER_STATE_ACTIVE				0x01
+#define NVME_POWER_STATE_IDLE				0x02
+#define NVME_POWER_STATE_STANDBY			0x03
+#define NVME_POWER_STATE_LU_CONTROL			0x07
+#define POWER_STATE_0					0
+#define POWER_STATE_1					1
+#define POWER_STATE_2					2
+#define POWER_STATE_3					3
+#define DOWNLOAD_SAVE_ACTIVATE				0x05
+#define DOWNLOAD_SAVE_DEFER_ACTIVATE			0x0E
+#define ACTIVATE_DEFERRED_MICROCODE			0x0F
+#define FORMAT_UNIT_IMMED_MASK				0x2
+#define FORMAT_UNIT_IMMED_OFFSET			1
+#define KELVIN_TEMP_FACTOR				273
+#define FIXED_FMT_SENSE_DATA_SIZE			18
+#define DESC_FMT_SENSE_DATA_SIZE			8
+
+/* SCSI/NVMe defines and bit masks */
+#define INQ_STANDARD_INQUIRY_PAGE			0x00
+#define INQ_SUPPORTED_VPD_PAGES_PAGE			0x00
+#define INQ_UNIT_SERIAL_NUMBER_PAGE			0x80
+#define INQ_DEVICE_IDENTIFICATION_PAGE			0x83
+#define INQ_EXTENDED_INQUIRY_DATA_PAGE			0x86
+#define INQ_BDEV_CHARACTERISTICS_PAGE			0xB1
+#define INQ_SERIAL_NUMBER_LENGTH			0x14
+#define INQ_NUM_SUPPORTED_VPD_PAGES			5
+#define VERSION_SPC_4					0x06
+#define ACA_UNSUPPORTED					0
+#define STANDARD_INQUIRY_LENGTH				36
+#define ADDITIONAL_STD_INQ_LENGTH			31
+#define EXTENDED_INQUIRY_DATA_PAGE_LENGTH		0x3C
+#define RESERVED_FIELD					0
+
+/* SCSI READ/WRITE Defines */
+#define IO_CDB_WP_MASK					0xE0
+#define IO_CDB_WP_SHIFT					5
+#define IO_CDB_FUA_MASK					0x8
+#define IO_6_CDB_LBA_OFFSET				0
+#define IO_6_CDB_LBA_MASK				0x001FFFFF
+#define IO_6_CDB_TX_LEN_OFFSET				4
+#define IO_6_DEFAULT_TX_LEN				256
+#define IO_10_CDB_LBA_OFFSET				2
+#define IO_10_CDB_TX_LEN_OFFSET				7
+#define IO_10_CDB_WP_OFFSET				1
+#define IO_10_CDB_FUA_OFFSET				1
+#define IO_12_CDB_LBA_OFFSET				2
+#define IO_12_CDB_TX_LEN_OFFSET				6
+#define IO_12_CDB_WP_OFFSET				1
+#define IO_12_CDB_FUA_OFFSET				1
+#define IO_16_CDB_FUA_OFFSET				1
+#define IO_16_CDB_WP_OFFSET				1
+#define IO_16_CDB_LBA_OFFSET				2
+#define IO_16_CDB_TX_LEN_OFFSET				10
+
+/* Mode Sense/Select defines */
+#define MODE_PAGE_INFO_EXCEP				0x1C
+#define MODE_PAGE_CACHING				0x08
+#define MODE_PAGE_CONTROL				0x0A
+#define MODE_PAGE_POWER_CONDITION			0x1A
+#define MODE_PAGE_RETURN_ALL				0x3F
+#define MODE_PAGE_BLK_DES_LEN				0x08
+#define MODE_PAGE_LLBAA_BLK_DES_LEN			0x10
+#define MODE_PAGE_CACHING_LEN				0x14
+#define MODE_PAGE_CONTROL_LEN				0x0C
+#define MODE_PAGE_POW_CND_LEN				0x28
+#define MODE_PAGE_INF_EXC_LEN				0x0C
+#define MODE_PAGE_ALL_LEN				0x54
+#define MODE_SENSE6_MPH_SIZE				4
+#define MODE_SENSE6_ALLOC_LEN_OFFSET			4
+#define MODE_SENSE_PAGE_CONTROL_OFFSET			2
+#define MODE_SENSE_PAGE_CONTROL_MASK			0xC0
+#define MODE_SENSE_PAGE_CODE_OFFSET			2
+#define MODE_SENSE_PAGE_CODE_MASK			0x3F
+#define MODE_SENSE_LLBAA_OFFSET				1
+#define MODE_SENSE_LLBAA_MASK				0x10
+#define MODE_SENSE_LLBAA_SHIFT				4
+#define MODE_SENSE_DBD_OFFSET				1
+#define MODE_SENSE_DBD_MASK				8
+#define MODE_SENSE_DBD_SHIFT				3
+#define MODE_SENSE10_MPH_SIZE				8
+#define MODE_SENSE10_ALLOC_LEN_OFFSET			7
+#define MODE_SELECT_CDB_PAGE_FORMAT_OFFSET		1
+#define MODE_SELECT_CDB_SAVE_PAGES_OFFSET		1
+#define MODE_SELECT_6_CDB_PARAM_LIST_LENGTH_OFFSET	4
+#define MODE_SELECT_10_CDB_PARAM_LIST_LENGTH_OFFSET	7
+#define MODE_SELECT_CDB_PAGE_FORMAT_MASK		0x10
+#define MODE_SELECT_CDB_SAVE_PAGES_MASK			0x1
+#define MODE_SELECT_6_BD_OFFSET				3
+#define MODE_SELECT_10_BD_OFFSET			6
+#define MODE_SELECT_10_LLBAA_OFFSET			4
+#define MODE_SELECT_10_LLBAA_MASK			1
+#define MODE_SELECT_6_MPH_SIZE				4
+#define MODE_SELECT_10_MPH_SIZE				8
+#define CACHING_MODE_PAGE_WCE_MASK			0x04
+#define MODE_SENSE_BLK_DESC_ENABLED			0
+#define MODE_SENSE_BLK_DESC_COUNT			1
+#define MODE_SELECT_PAGE_CODE_MASK			0x3F
+#define SHORT_DESC_BLOCK				8
+#define LONG_DESC_BLOCK					16
+#define MODE_PAGE_POW_CND_LEN_FIELD			0x26
+#define MODE_PAGE_INF_EXC_LEN_FIELD			0x0A
+#define MODE_PAGE_CACHING_LEN_FIELD			0x12
+#define MODE_PAGE_CONTROL_LEN_FIELD			0x0A
+#define MODE_SENSE_PC_CURRENT_VALUES			0
+
+/* Log Sense defines */
+#define LOG_PAGE_SUPPORTED_LOG_PAGES_PAGE		0x00
+#define LOG_PAGE_SUPPORTED_LOG_PAGES_LENGTH		0x07
+#define LOG_PAGE_INFORMATIONAL_EXCEPTIONS_PAGE		0x2F
+#define LOG_PAGE_TEMPERATURE_PAGE			0x0D
+#define LOG_SENSE_CDB_SP_OFFSET				1
+#define LOG_SENSE_CDB_SP_NOT_ENABLED			0
+#define LOG_SENSE_CDB_PC_OFFSET				2
+#define LOG_SENSE_CDB_PC_MASK				0xC0
+#define LOG_SENSE_CDB_PC_SHIFT				6
+#define LOG_SENSE_CDB_PC_CUMULATIVE_VALUES		1
+#define LOG_SENSE_CDB_PAGE_CODE_MASK			0x3F
+#define LOG_SENSE_CDB_ALLOC_LENGTH_OFFSET		7
+#define REMAINING_INFO_EXCP_PAGE_LENGTH			0x8
+#define LOG_INFO_EXCP_PAGE_LENGTH			0xC
+#define REMAINING_TEMP_PAGE_LENGTH			0xC
+#define LOG_TEMP_PAGE_LENGTH				0x10
+#define LOG_TEMP_UNKNOWN				0xFF
+#define SUPPORTED_LOG_PAGES_PAGE_LENGTH			0x3
+
+/* Read Capacity defines */
+#define READ_CAP_10_RESP_SIZE				8
+#define READ_CAP_16_RESP_SIZE				32
+
+/* NVMe Namespace and Command Defines */
+#define NVME_GET_SMART_LOG_PAGE				0x02
+#define NVME_GET_FEAT_TEMP_THRESH			0x04
+#define BYTES_TO_DWORDS					4
+#define NVME_MAX_FIRMWARE_SLOT				7
+
+/* Report LUNs defines */
+#define REPORT_LUNS_FIRST_LUN_OFFSET			8
+
+/* SCSI ADDITIONAL SENSE Codes */
+
+#define SCSI_ASC_NO_SENSE				0x00
+#define SCSI_ASC_PERIPHERAL_DEV_WRITE_FAULT		0x03
+#define SCSI_ASC_LUN_NOT_READY				0x04
+#define SCSI_ASC_WARNING				0x0B
+#define SCSI_ASC_LOG_BLOCK_GUARD_CHECK_FAILED		0x10
+#define SCSI_ASC_LOG_BLOCK_APPTAG_CHECK_FAILED		0x10
+#define SCSI_ASC_LOG_BLOCK_REFTAG_CHECK_FAILED		0x10
+#define SCSI_ASC_UNRECOVERED_READ_ERROR			0x11
+#define SCSI_ASC_MISCOMPARE_DURING_VERIFY		0x1D
+#define SCSI_ASC_ACCESS_DENIED_INVALID_LUN_ID		0x20
+#define SCSI_ASC_ILLEGAL_COMMAND			0x20
+#define SCSI_ASC_ILLEGAL_BLOCK				0x21
+#define SCSI_ASC_INVALID_CDB				0x24
+#define SCSI_ASC_INVALID_LUN				0x25
+#define SCSI_ASC_INVALID_PARAMETER			0x26
+#define SCSI_ASC_FORMAT_COMMAND_FAILED			0x31
+#define SCSI_ASC_INTERNAL_TARGET_FAILURE		0x44
+
+/* SCSI ADDITIONAL SENSE Code Qualifiers */
+
+#define SCSI_ASCQ_CAUSE_NOT_REPORTABLE			0x00
+#define SCSI_ASCQ_FORMAT_COMMAND_FAILED			0x01
+#define SCSI_ASCQ_LOG_BLOCK_GUARD_CHECK_FAILED		0x01
+#define SCSI_ASCQ_LOG_BLOCK_APPTAG_CHECK_FAILED		0x02
+#define SCSI_ASCQ_LOG_BLOCK_REFTAG_CHECK_FAILED		0x03
+#define SCSI_ASCQ_FORMAT_IN_PROGRESS			0x04
+#define SCSI_ASCQ_POWER_LOSS_EXPECTED			0x08
+#define SCSI_ASCQ_INVALID_LUN_ID			0x09
+
+/**
+ * DEVICE_SPECIFIC_PARAMETER in mode parameter header (see sbc2r16) to
+ * enable DPOFUA support type 0x10 value.
+ */
+#define DEVICE_SPECIFIC_PARAMETER			0
+#define VPD_ID_DESCRIPTOR_LENGTH sizeof(VPD_IDENTIFICATION_DESCRIPTOR)
+
+/* MACROs to extract information from CDBs */
+
+#define GET_OPCODE(cdb)		cdb[0]
+
+#define GET_U8_FROM_CDB(cdb, index) (cdb[index] << 0)
+
+#define GET_U16_FROM_CDB(cdb, index) ((cdb[index] << 8) | (cdb[index + 1] << 0))
+
+#define GET_U24_FROM_CDB(cdb, index) ((cdb[index] << 16) | \
+(cdb[index + 1] <<  8) | \
+(cdb[index + 2] <<  0))
+
+#define GET_U32_FROM_CDB(cdb, index) ((cdb[index] << 24) | \
+(cdb[index + 1] << 16) | \
+(cdb[index + 2] <<  8) | \
+(cdb[index + 3] <<  0))
+
+#define GET_U64_FROM_CDB(cdb, index) ((((u64)cdb[index]) << 56) | \
+(((u64)cdb[index + 1]) << 48) | \
+(((u64)cdb[index + 2]) << 40) | \
+(((u64)cdb[index + 3]) << 32) | \
+(((u64)cdb[index + 4]) << 24) | \
+(((u64)cdb[index + 5]) << 16) | \
+(((u64)cdb[index + 6]) <<  8) | \
+(((u64)cdb[index + 7]) <<  0))
+
+/* Inquiry Helper Macros */
+#define GET_INQ_EVPD_BIT(cdb) \
+((GET_U8_FROM_CDB(cdb, INQUIRY_EVPD_BYTE_OFFSET) &		\
+INQUIRY_EVPD_BIT_MASK) ? 1 : 0)
+
+#define GET_INQ_PAGE_CODE(cdb)					\
+(GET_U8_FROM_CDB(cdb, INQUIRY_PAGE_CODE_BYTE_OFFSET))
+
+#define GET_INQ_ALLOC_LENGTH(cdb)				\
+(GET_U16_FROM_CDB(cdb, INQUIRY_CDB_ALLOCATION_LENGTH_OFFSET))
+
+/* Report LUNs Helper Macros */
+#define GET_REPORT_LUNS_ALLOC_LENGTH(cdb)			\
+(GET_U32_FROM_CDB(cdb, REPORT_LUNS_CDB_ALLOC_LENGTH_OFFSET))
+
+/* Read Capacity Helper Macros */
+#define GET_READ_CAP_16_ALLOC_LENGTH(cdb)			\
+(GET_U32_FROM_CDB(cdb, READ_CAP_16_CDB_ALLOC_LENGTH_OFFSET))
+
+#define IS_READ_CAP_16(cdb)					\
+((cdb[0] == SERVICE_ACTION_IN && cdb[1] == SAI_READ_CAPACITY_16) ? 1 : 0)
+
+/* Request Sense Helper Macros */
+#define GET_REQUEST_SENSE_ALLOC_LENGTH(cdb)			\
+(GET_U8_FROM_CDB(cdb, REQUEST_SENSE_CDB_ALLOC_LENGTH_OFFSET))
+
+/* Mode Sense Helper Macros */
+#define GET_MODE_SENSE_DBD(cdb)					\
+((GET_U8_FROM_CDB(cdb, MODE_SENSE_DBD_OFFSET) & MODE_SENSE_DBD_MASK) >>	\
+MODE_SENSE_DBD_SHIFT)
+
+#define GET_MODE_SENSE_LLBAA(cdb)				\
+((GET_U8_FROM_CDB(cdb, MODE_SENSE_LLBAA_OFFSET) &		\
+MODE_SENSE_LLBAA_MASK) >> MODE_SENSE_LLBAA_SHIFT)
+
+#define GET_MODE_SENSE_MPH_SIZE(cdb10)				\
+(cdb10 ? MODE_SENSE10_MPH_SIZE : MODE_SENSE6_MPH_SIZE)
+
+
+/* Struct to gather data that needs to be extracted from a SCSI CDB.
+   Not conforming to any particular CDB variant, but compatible with all. */
+
+struct nvme_trans_io_cdb {
+	u8 fua;
+	u8 prot_info;
+	u64 lba;
+	u32 xfer_len;
+};
+
+
+/* Internal Helper Functions */
+
+
+/* Copy data to userspace memory */
+
+static int nvme_trans_copy_to_user(struct sg_io_hdr *hdr, void *from,
+								unsigned long n)
+{
+	int res = SNTI_TRANSLATION_SUCCESS;
+	unsigned long not_copied;
+	int i;
+	void *index = from;
+	size_t remaining = n;
+	size_t xfer_len;
+
+	if (hdr->iovec_count > 0) {
+		struct sg_iovec sgl;
+
+		for (i = 0; i < hdr->iovec_count; i++) {
+			not_copied = copy_from_user(&sgl, hdr->dxferp +
+						i * sizeof(struct sg_iovec),
+						sizeof(struct sg_iovec));
+			if (not_copied)
+				return -EFAULT;
+			xfer_len = min(remaining, sgl.iov_len);
+			not_copied = copy_to_user(sgl.iov_base, index,
+								xfer_len);
+			if (not_copied) {
+				res = -EFAULT;
+				break;
+			}
+			index += xfer_len;
+			remaining -= xfer_len;
+			if (remaining == 0)
+				break;
+		}
+		return res;
+	}
+	not_copied = copy_to_user(hdr->dxferp, from, n);
+	if (not_copied)
+		res = -EFAULT;
+	return res;
+}
+
+/* Copy data from userspace memory */
+
+static int nvme_trans_copy_from_user(struct sg_io_hdr *hdr, void *to,
+								unsigned long n)
+{
+	int res = SNTI_TRANSLATION_SUCCESS;
+	unsigned long not_copied;
+	int i;
+	void *index = to;
+	size_t remaining = n;
+	size_t xfer_len;
+
+	if (hdr->iovec_count > 0) {
+		struct sg_iovec sgl;
+
+		for (i = 0; i < hdr->iovec_count; i++) {
+			not_copied = copy_from_user(&sgl, hdr->dxferp +
+						i * sizeof(struct sg_iovec),
+						sizeof(struct sg_iovec));
+			if (not_copied)
+				return -EFAULT;
+			xfer_len = min(remaining, sgl.iov_len);
+			not_copied = copy_from_user(index, sgl.iov_base,
+								xfer_len);
+			if (not_copied) {
+				res = -EFAULT;
+				break;
+			}
+			index += xfer_len;
+			remaining -= xfer_len;
+			if (remaining == 0)
+				break;
+		}
+		return res;
+	}
+
+	not_copied = copy_from_user(to, hdr->dxferp, n);
+	if (not_copied)
+		res = -EFAULT;
+	return res;
+}
+
+/* Status/Sense Buffer Writeback */
+
+static int nvme_trans_completion(struct sg_io_hdr *hdr, u8 status, u8 sense_key,
+				 u8 asc, u8 ascq)
+{
+	int res = SNTI_TRANSLATION_SUCCESS;
+	u8 xfer_len;
+	u8 resp[DESC_FMT_SENSE_DATA_SIZE];
+
+	if (scsi_status_is_good(status)) {
+		hdr->status = SAM_STAT_GOOD;
+		hdr->masked_status = GOOD;
+		hdr->host_status = DID_OK;
+		hdr->driver_status = DRIVER_OK;
+		hdr->sb_len_wr = 0;
+	} else {
+		hdr->status = status;
+		hdr->masked_status = status >> 1;
+		hdr->host_status = DID_OK;
+		hdr->driver_status = DRIVER_OK;
+
+		memset(resp, 0, DESC_FMT_SENSE_DATA_SIZE);
+		resp[0] = DESC_FORMAT_SENSE_DATA;
+		resp[1] = sense_key;
+		resp[2] = asc;
+		resp[3] = ascq;
+
+		xfer_len = min_t(u8, hdr->mx_sb_len, DESC_FMT_SENSE_DATA_SIZE);
+		hdr->sb_len_wr = xfer_len;
+		if (copy_to_user(hdr->sbp, resp, xfer_len) > 0)
+			res = -EFAULT;
+	}
+
+	return res;
+}
+
+static int nvme_trans_status_code(struct sg_io_hdr *hdr, int nvme_sc)
+{
+	u8 status, sense_key, asc, ascq;
+	int res = SNTI_TRANSLATION_SUCCESS;
+
+	/* For non-nvme (Linux) errors, simply return the error code */
+	if (nvme_sc < 0)
+		return nvme_sc;
+
+	/* Mask DNR, More, and reserved fields */
+	nvme_sc &= 0x7FF;
+
+	switch (nvme_sc) {
+	/* Generic Command Status */
+	case NVME_SC_SUCCESS:
+		status = SAM_STAT_GOOD;
+		sense_key = NO_SENSE;
+		asc = SCSI_ASC_NO_SENSE;
+		ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
+		break;
+	case NVME_SC_INVALID_OPCODE:
+		status = SAM_STAT_CHECK_CONDITION;
+		sense_key = ILLEGAL_REQUEST;
+		asc = SCSI_ASC_ILLEGAL_COMMAND;
+		ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
+		break;
+	case NVME_SC_INVALID_FIELD:
+		status = SAM_STAT_CHECK_CONDITION;
+		sense_key = ILLEGAL_REQUEST;
+		asc = SCSI_ASC_INVALID_CDB;
+		ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
+		break;
+	case NVME_SC_DATA_XFER_ERROR:
+		status = SAM_STAT_CHECK_CONDITION;
+		sense_key = MEDIUM_ERROR;
+		asc = SCSI_ASC_NO_SENSE;
+		ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
+		break;
+	case NVME_SC_POWER_LOSS:
+		status = SAM_STAT_TASK_ABORTED;
+		sense_key = ABORTED_COMMAND;
+		asc = SCSI_ASC_WARNING;
+		ascq = SCSI_ASCQ_POWER_LOSS_EXPECTED;
+		break;
+	case NVME_SC_INTERNAL:
+		status = SAM_STAT_CHECK_CONDITION;
+		sense_key = HARDWARE_ERROR;
+		asc = SCSI_ASC_INTERNAL_TARGET_FAILURE;
+		ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
+		break;
+	case NVME_SC_ABORT_REQ:
+		status = SAM_STAT_TASK_ABORTED;
+		sense_key = ABORTED_COMMAND;
+		asc = SCSI_ASC_NO_SENSE;
+		ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
+		break;
+	case NVME_SC_ABORT_QUEUE:
+		status = SAM_STAT_TASK_ABORTED;
+		sense_key = ABORTED_COMMAND;
+		asc = SCSI_ASC_NO_SENSE;
+		ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
+		break;
+	case NVME_SC_FUSED_FAIL:
+		status = SAM_STAT_TASK_ABORTED;
+		sense_key = ABORTED_COMMAND;
+		asc = SCSI_ASC_NO_SENSE;
+		ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
+		break;
+	case NVME_SC_FUSED_MISSING:
+		status = SAM_STAT_TASK_ABORTED;
+		sense_key = ABORTED_COMMAND;
+		asc = SCSI_ASC_NO_SENSE;
+		ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
+		break;
+	case NVME_SC_INVALID_NS:
+		status = SAM_STAT_CHECK_CONDITION;
+		sense_key = ILLEGAL_REQUEST;
+		asc = SCSI_ASC_ACCESS_DENIED_INVALID_LUN_ID;
+		ascq = SCSI_ASCQ_INVALID_LUN_ID;
+		break;
+	case NVME_SC_LBA_RANGE:
+		status = SAM_STAT_CHECK_CONDITION;
+		sense_key = ILLEGAL_REQUEST;
+		asc = SCSI_ASC_ILLEGAL_BLOCK;
+		ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
+		break;
+	case NVME_SC_CAP_EXCEEDED:
+		status = SAM_STAT_CHECK_CONDITION;
+		sense_key = MEDIUM_ERROR;
+		asc = SCSI_ASC_NO_SENSE;
+		ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
+		break;
+	case NVME_SC_NS_NOT_READY:
+		status = SAM_STAT_CHECK_CONDITION;
+		sense_key = NOT_READY;
+		asc = SCSI_ASC_LUN_NOT_READY;
+		ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
+		break;
+
+	/* Command Specific Status */
+	case NVME_SC_INVALID_FORMAT:
+		status = SAM_STAT_CHECK_CONDITION;
+		sense_key = ILLEGAL_REQUEST;
+		asc = SCSI_ASC_FORMAT_COMMAND_FAILED;
+		ascq = SCSI_ASCQ_FORMAT_COMMAND_FAILED;
+		break;
+	case NVME_SC_BAD_ATTRIBUTES:
+		status = SAM_STAT_CHECK_CONDITION;
+		sense_key = ILLEGAL_REQUEST;
+		asc = SCSI_ASC_INVALID_CDB;
+		ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
+		break;
+
+	/* Media Errors */
+	case NVME_SC_WRITE_FAULT:
+		status = SAM_STAT_CHECK_CONDITION;
+		sense_key = MEDIUM_ERROR;
+		asc = SCSI_ASC_PERIPHERAL_DEV_WRITE_FAULT;
+		ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
+		break;
+	case NVME_SC_READ_ERROR:
+		status = SAM_STAT_CHECK_CONDITION;
+		sense_key = MEDIUM_ERROR;
+		asc = SCSI_ASC_UNRECOVERED_READ_ERROR;
+		ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
+		break;
+	case NVME_SC_GUARD_CHECK:
+		status = SAM_STAT_CHECK_CONDITION;
+		sense_key = MEDIUM_ERROR;
+		asc = SCSI_ASC_LOG_BLOCK_GUARD_CHECK_FAILED;
+		ascq = SCSI_ASCQ_LOG_BLOCK_GUARD_CHECK_FAILED;
+		break;
+	case NVME_SC_APPTAG_CHECK:
+		status = SAM_STAT_CHECK_CONDITION;
+		sense_key = MEDIUM_ERROR;
+		asc = SCSI_ASC_LOG_BLOCK_APPTAG_CHECK_FAILED;
+		ascq = SCSI_ASCQ_LOG_BLOCK_APPTAG_CHECK_FAILED;
+		break;
+	case NVME_SC_REFTAG_CHECK:
+		status = SAM_STAT_CHECK_CONDITION;
+		sense_key = MEDIUM_ERROR;
+		asc = SCSI_ASC_LOG_BLOCK_REFTAG_CHECK_FAILED;
+		ascq = SCSI_ASCQ_LOG_BLOCK_REFTAG_CHECK_FAILED;
+		break;
+	case NVME_SC_COMPARE_FAILED:
+		status = SAM_STAT_CHECK_CONDITION;
+		sense_key = MISCOMPARE;
+		asc = SCSI_ASC_MISCOMPARE_DURING_VERIFY;
+		ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
+		break;
+	case NVME_SC_ACCESS_DENIED:
+		status = SAM_STAT_CHECK_CONDITION;
+		sense_key = ILLEGAL_REQUEST;
+		asc = SCSI_ASC_ACCESS_DENIED_INVALID_LUN_ID;
+		ascq = SCSI_ASCQ_INVALID_LUN_ID;
+		break;
+
+	/* Unspecified/Default */
+	case NVME_SC_CMDID_CONFLICT:
+	case NVME_SC_CMD_SEQ_ERROR:
+	case NVME_SC_CQ_INVALID:
+	case NVME_SC_QID_INVALID:
+	case NVME_SC_QUEUE_SIZE:
+	case NVME_SC_ABORT_LIMIT:
+	case NVME_SC_ABORT_MISSING:
+	case NVME_SC_ASYNC_LIMIT:
+	case NVME_SC_FIRMWARE_SLOT:
+	case NVME_SC_FIRMWARE_IMAGE:
+	case NVME_SC_INVALID_VECTOR:
+	case NVME_SC_INVALID_LOG_PAGE:
+	default:
+		status = SAM_STAT_CHECK_CONDITION;
+		sense_key = ILLEGAL_REQUEST;
+		asc = SCSI_ASC_NO_SENSE;
+		ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
+		break;
+	}
+
+	res = nvme_trans_completion(hdr, status, sense_key, asc, ascq);
+
+	return res;
+}
+
+/* INQUIRY Helper Functions */
+
+static int nvme_trans_standard_inquiry_page(struct nvme_ns *ns,
+					struct sg_io_hdr *hdr, u8 *inq_response,
+					int alloc_len)
+{
+	struct nvme_dev *dev = ns->dev;
+	dma_addr_t dma_addr;
+	void *mem;
+	struct nvme_id_ns *id_ns;
+	int res = SNTI_TRANSLATION_SUCCESS;
+	int nvme_sc;
+	int xfer_len;
+	u8 resp_data_format = 0x02;
+	u8 protect;
+	u8 cmdque = 0x01 << 1;
+
+	mem = dma_alloc_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns),
+				&dma_addr, GFP_KERNEL);
+	if (mem == NULL) {
+		res = -ENOMEM;
+		goto out_dma;
+	}
+
+	/* nvme ns identify - use DPS value for PROTECT field */
+	nvme_sc = nvme_identify(dev, ns->ns_id, 0, dma_addr);
+	res = nvme_trans_status_code(hdr, nvme_sc);
+	/*
+	 * If nvme_sc was -ve, res will be -ve here.
+	 * If nvme_sc was +ve, the status would bace been translated, and res
+	 *  can only be 0 or -ve.
+	 *    - If 0 && nvme_sc > 0, then go into next if where res gets nvme_sc
+	 *    - If -ve, return because its a Linux error.
+	 */
+	if (res)
+		goto out_free;
+	if (nvme_sc) {
+		res = nvme_sc;
+		goto out_free;
+	}
+	id_ns = mem;
+	(id_ns->dps) ? (protect = 0x01) : (protect = 0);
+
+	memset(inq_response, 0, STANDARD_INQUIRY_LENGTH);
+	inq_response[2] = VERSION_SPC_4;
+	inq_response[3] = resp_data_format;	/*normaca=0 | hisup=0 */
+	inq_response[4] = ADDITIONAL_STD_INQ_LENGTH;
+	inq_response[5] = protect;	/* sccs=0 | acc=0 | tpgs=0 | pc3=0 */
+	inq_response[7] = cmdque;	/* wbus16=0 | sync=0 | vs=0 */
+	strncpy(&inq_response[8], "NVMe    ", 8);
+	strncpy(&inq_response[16], dev->model, 16);
+	strncpy(&inq_response[32], dev->firmware_rev, 4);
+
+	xfer_len = min(alloc_len, STANDARD_INQUIRY_LENGTH);
+	res = nvme_trans_copy_to_user(hdr, inq_response, xfer_len);
+
+ out_free:
+	dma_free_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns), mem,
+			  dma_addr);
+ out_dma:
+	return res;
+}
+
+static int nvme_trans_supported_vpd_pages(struct nvme_ns *ns,
+					struct sg_io_hdr *hdr, u8 *inq_response,
+					int alloc_len)
+{
+	int res = SNTI_TRANSLATION_SUCCESS;
+	int xfer_len;
+
+	memset(inq_response, 0, STANDARD_INQUIRY_LENGTH);
+	inq_response[1] = INQ_SUPPORTED_VPD_PAGES_PAGE;   /* Page Code */
+	inq_response[3] = INQ_NUM_SUPPORTED_VPD_PAGES;    /* Page Length */
+	inq_response[4] = INQ_SUPPORTED_VPD_PAGES_PAGE;
+	inq_response[5] = INQ_UNIT_SERIAL_NUMBER_PAGE;
+	inq_response[6] = INQ_DEVICE_IDENTIFICATION_PAGE;
+	inq_response[7] = INQ_EXTENDED_INQUIRY_DATA_PAGE;
+	inq_response[8] = INQ_BDEV_CHARACTERISTICS_PAGE;
+
+	xfer_len = min(alloc_len, STANDARD_INQUIRY_LENGTH);
+	res = nvme_trans_copy_to_user(hdr, inq_response, xfer_len);
+
+	return res;
+}
+
+static int nvme_trans_unit_serial_page(struct nvme_ns *ns,
+					struct sg_io_hdr *hdr, u8 *inq_response,
+					int alloc_len)
+{
+	struct nvme_dev *dev = ns->dev;
+	int res = SNTI_TRANSLATION_SUCCESS;
+	int xfer_len;
+
+	memset(inq_response, 0, STANDARD_INQUIRY_LENGTH);
+	inq_response[1] = INQ_UNIT_SERIAL_NUMBER_PAGE; /* Page Code */
+	inq_response[3] = INQ_SERIAL_NUMBER_LENGTH;    /* Page Length */
+	strncpy(&inq_response[4], dev->serial, INQ_SERIAL_NUMBER_LENGTH);
+
+	xfer_len = min(alloc_len, STANDARD_INQUIRY_LENGTH);
+	res = nvme_trans_copy_to_user(hdr, inq_response, xfer_len);
+
+	return res;
+}
+
+static int nvme_trans_device_id_page(struct nvme_ns *ns, struct sg_io_hdr *hdr,
+					u8 *inq_response, int alloc_len)
+{
+	struct nvme_dev *dev = ns->dev;
+	dma_addr_t dma_addr;
+	void *mem;
+	struct nvme_id_ctrl *id_ctrl;
+	int res = SNTI_TRANSLATION_SUCCESS;
+	int nvme_sc;
+	u8 ieee[4];
+	int xfer_len;
+	__be32 tmp_id = cpu_to_be32(ns->ns_id);
+
+	mem = dma_alloc_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns),
+					&dma_addr, GFP_KERNEL);
+	if (mem == NULL) {
+		res = -ENOMEM;
+		goto out_dma;
+	}
+
+	/* nvme controller identify */
+	nvme_sc = nvme_identify(dev, 0, 1, dma_addr);
+	res = nvme_trans_status_code(hdr, nvme_sc);
+	if (res)
+		goto out_free;
+	if (nvme_sc) {
+		res = nvme_sc;
+		goto out_free;
+	}
+	id_ctrl = mem;
+
+	/* Since SCSI tried to save 4 bits... [SPC-4(r34) Table 591] */
+	ieee[0] = id_ctrl->ieee[0] << 4;
+	ieee[1] = id_ctrl->ieee[0] >> 4 | id_ctrl->ieee[1] << 4;
+	ieee[2] = id_ctrl->ieee[1] >> 4 | id_ctrl->ieee[2] << 4;
+	ieee[3] = id_ctrl->ieee[2] >> 4;
+
+	memset(inq_response, 0, STANDARD_INQUIRY_LENGTH);
+	inq_response[1] = INQ_DEVICE_IDENTIFICATION_PAGE;    /* Page Code */
+	inq_response[3] = 20;      /* Page Length */
+	/* Designation Descriptor start */
+	inq_response[4] = 0x01;    /* Proto ID=0h | Code set=1h */
+	inq_response[5] = 0x03;    /* PIV=0b | Asso=00b | Designator Type=3h */
+	inq_response[6] = 0x00;    /* Rsvd */
+	inq_response[7] = 16;      /* Designator Length */
+	/* Designator start */
+	inq_response[8] = 0x60 | ieee[3]; /* NAA=6h | IEEE ID MSB, High nibble*/
+	inq_response[9] = ieee[2];        /* IEEE ID */
+	inq_response[10] = ieee[1];       /* IEEE ID */
+	inq_response[11] = ieee[0];       /* IEEE ID| Vendor Specific ID... */
+	inq_response[12] = (dev->pci_dev->vendor & 0xFF00) >> 8;
+	inq_response[13] = (dev->pci_dev->vendor & 0x00FF);
+	inq_response[14] = dev->serial[0];
+	inq_response[15] = dev->serial[1];
+	inq_response[16] = dev->model[0];
+	inq_response[17] = dev->model[1];
+	memcpy(&inq_response[18], &tmp_id, sizeof(u32));
+	/* Last 2 bytes are zero */
+
+	xfer_len = min(alloc_len, STANDARD_INQUIRY_LENGTH);
+	res = nvme_trans_copy_to_user(hdr, inq_response, xfer_len);
+
+ out_free:
+	dma_free_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns), mem,
+			  dma_addr);
+ out_dma:
+	return res;
+}
+
+static int nvme_trans_ext_inq_page(struct nvme_ns *ns, struct sg_io_hdr *hdr,
+					int alloc_len)
+{
+	u8 *inq_response;
+	int res = SNTI_TRANSLATION_SUCCESS;
+	int nvme_sc;
+	struct nvme_dev *dev = ns->dev;
+	dma_addr_t dma_addr;
+	void *mem;
+	struct nvme_id_ctrl *id_ctrl;
+	struct nvme_id_ns *id_ns;
+	int xfer_len;
+	u8 microcode = 0x80;
+	u8 spt;
+	u8 spt_lut[8] = {0, 0, 2, 1, 4, 6, 5, 7};
+	u8 grd_chk, app_chk, ref_chk, protect;
+	u8 uask_sup = 0x20;
+	u8 v_sup;
+	u8 luiclr = 0x01;
+
+	inq_response = kmalloc(EXTENDED_INQUIRY_DATA_PAGE_LENGTH, GFP_KERNEL);
+	if (inq_response == NULL) {
+		res = -ENOMEM;
+		goto out_mem;
+	}
+
+	mem = dma_alloc_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns),
+							&dma_addr, GFP_KERNEL);
+	if (mem == NULL) {
+		res = -ENOMEM;
+		goto out_dma;
+	}
+
+	/* nvme ns identify */
+	nvme_sc = nvme_identify(dev, ns->ns_id, 0, dma_addr);
+	res = nvme_trans_status_code(hdr, nvme_sc);
+	if (res)
+		goto out_free;
+	if (nvme_sc) {
+		res = nvme_sc;
+		goto out_free;
+	}
+	id_ns = mem;
+	spt = spt_lut[(id_ns->dpc) & 0x07] << 3;
+	(id_ns->dps) ? (protect = 0x01) : (protect = 0);
+	grd_chk = protect << 2;
+	app_chk = protect << 1;
+	ref_chk = protect;
+
+	/* nvme controller identify */
+	nvme_sc = nvme_identify(dev, 0, 1, dma_addr);
+	res = nvme_trans_status_code(hdr, nvme_sc);
+	if (res)
+		goto out_free;
+	if (nvme_sc) {
+		res = nvme_sc;
+		goto out_free;
+	}
+	id_ctrl = mem;
+	v_sup = id_ctrl->vwc;
+
+	memset(inq_response, 0, EXTENDED_INQUIRY_DATA_PAGE_LENGTH);
+	inq_response[1] = INQ_EXTENDED_INQUIRY_DATA_PAGE;    /* Page Code */
+	inq_response[2] = 0x00;    /* Page Length MSB */
+	inq_response[3] = 0x3C;    /* Page Length LSB */
+	inq_response[4] = microcode | spt | grd_chk | app_chk | ref_chk;
+	inq_response[5] = uask_sup;
+	inq_response[6] = v_sup;
+	inq_response[7] = luiclr;
+	inq_response[8] = 0;
+	inq_response[9] = 0;
+
+	xfer_len = min(alloc_len, EXTENDED_INQUIRY_DATA_PAGE_LENGTH);
+	res = nvme_trans_copy_to_user(hdr, inq_response, xfer_len);
+
+ out_free:
+	dma_free_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns), mem,
+			  dma_addr);
+ out_dma:
+	kfree(inq_response);
+ out_mem:
+	return res;
+}
+
+static int nvme_trans_bdev_char_page(struct nvme_ns *ns, struct sg_io_hdr *hdr,
+					int alloc_len)
+{
+	u8 *inq_response;
+	int res = SNTI_TRANSLATION_SUCCESS;
+	int xfer_len;
+
+	inq_response = kmalloc(EXTENDED_INQUIRY_DATA_PAGE_LENGTH, GFP_KERNEL);
+	if (inq_response == NULL) {
+		res = -ENOMEM;
+		goto out_mem;
+	}
+
+	memset(inq_response, 0, EXTENDED_INQUIRY_DATA_PAGE_LENGTH);
+	inq_response[1] = INQ_BDEV_CHARACTERISTICS_PAGE;    /* Page Code */
+	inq_response[2] = 0x00;    /* Page Length MSB */
+	inq_response[3] = 0x3C;    /* Page Length LSB */
+	inq_response[4] = 0x00;    /* Medium Rotation Rate MSB */
+	inq_response[5] = 0x01;    /* Medium Rotation Rate LSB */
+	inq_response[6] = 0x00;    /* Form Factor */
+
+	xfer_len = min(alloc_len, EXTENDED_INQUIRY_DATA_PAGE_LENGTH);
+	res = nvme_trans_copy_to_user(hdr, inq_response, xfer_len);
+
+	kfree(inq_response);
+ out_mem:
+	return res;
+}
+
+/* LOG SENSE Helper Functions */
+
+static int nvme_trans_log_supp_pages(struct nvme_ns *ns, struct sg_io_hdr *hdr,
+					int alloc_len)
+{
+	int res = SNTI_TRANSLATION_SUCCESS;
+	int xfer_len;
+	u8 *log_response;
+
+	log_response = kmalloc(LOG_PAGE_SUPPORTED_LOG_PAGES_LENGTH, GFP_KERNEL);
+	if (log_response == NULL) {
+		res = -ENOMEM;
+		goto out_mem;
+	}
+	memset(log_response, 0, LOG_PAGE_SUPPORTED_LOG_PAGES_LENGTH);
+
+	log_response[0] = LOG_PAGE_SUPPORTED_LOG_PAGES_PAGE;
+	/* Subpage=0x00, Page Length MSB=0 */
+	log_response[3] = SUPPORTED_LOG_PAGES_PAGE_LENGTH;
+	log_response[4] = LOG_PAGE_SUPPORTED_LOG_PAGES_PAGE;
+	log_response[5] = LOG_PAGE_INFORMATIONAL_EXCEPTIONS_PAGE;
+	log_response[6] = LOG_PAGE_TEMPERATURE_PAGE;
+
+	xfer_len = min(alloc_len, LOG_PAGE_SUPPORTED_LOG_PAGES_LENGTH);
+	res = nvme_trans_copy_to_user(hdr, log_response, xfer_len);
+
+	kfree(log_response);
+ out_mem:
+	return res;
+}
+
+static int nvme_trans_log_info_exceptions(struct nvme_ns *ns,
+					struct sg_io_hdr *hdr, int alloc_len)
+{
+	int res = SNTI_TRANSLATION_SUCCESS;
+	int xfer_len;
+	u8 *log_response;
+	struct nvme_command c;
+	struct nvme_dev *dev = ns->dev;
+	struct nvme_smart_log *smart_log;
+	dma_addr_t dma_addr;
+	void *mem;
+	u8 temp_c;
+	u16 temp_k;
+
+	log_response = kmalloc(LOG_INFO_EXCP_PAGE_LENGTH, GFP_KERNEL);
+	if (log_response == NULL) {
+		res = -ENOMEM;
+		goto out_mem;
+	}
+	memset(log_response, 0, LOG_INFO_EXCP_PAGE_LENGTH);
+
+	mem = dma_alloc_coherent(&dev->pci_dev->dev,
+					sizeof(struct nvme_smart_log),
+					&dma_addr, GFP_KERNEL);
+	if (mem == NULL) {
+		res = -ENOMEM;
+		goto out_dma;
+	}
+
+	/* Get SMART Log Page */
+	memset(&c, 0, sizeof(c));
+	c.common.opcode = nvme_admin_get_log_page;
+	c.common.nsid = cpu_to_le32(0xFFFFFFFF);
+	c.common.prp1 = cpu_to_le64(dma_addr);
+	c.common.cdw10[0] = cpu_to_le32(((sizeof(struct nvme_smart_log) /
+			BYTES_TO_DWORDS) << 16) | NVME_GET_SMART_LOG_PAGE);
+	res = nvme_submit_admin_cmd(dev, &c, NULL);
+	if (res != NVME_SC_SUCCESS) {
+		temp_c = LOG_TEMP_UNKNOWN;
+	} else {
+		smart_log = mem;
+		temp_k = (smart_log->temperature[1] << 8) +
+				(smart_log->temperature[0]);
+		temp_c = temp_k - KELVIN_TEMP_FACTOR;
+	}
+
+	log_response[0] = LOG_PAGE_INFORMATIONAL_EXCEPTIONS_PAGE;
+	/* Subpage=0x00, Page Length MSB=0 */
+	log_response[3] = REMAINING_INFO_EXCP_PAGE_LENGTH;
+	/* Informational Exceptions Log Parameter 1 Start */
+	/* Parameter Code=0x0000 bytes 4,5 */
+	log_response[6] = 0x23; /* DU=0, TSD=1, ETC=0, TMC=0, FMT_AND_LNK=11b */
+	log_response[7] = 0x04; /* PARAMETER LENGTH */
+	/* Add sense Code and qualifier = 0x00 each */
+	/* Use Temperature from NVMe Get Log Page, convert to C from K */
+	log_response[10] = temp_c;
+
+	xfer_len = min(alloc_len, LOG_INFO_EXCP_PAGE_LENGTH);
+	res = nvme_trans_copy_to_user(hdr, log_response, xfer_len);
+
+	dma_free_coherent(&dev->pci_dev->dev, sizeof(struct nvme_smart_log),
+			  mem, dma_addr);
+ out_dma:
+	kfree(log_response);
+ out_mem:
+	return res;
+}
+
+static int nvme_trans_log_temperature(struct nvme_ns *ns, struct sg_io_hdr *hdr,
+					int alloc_len)
+{
+	int res = SNTI_TRANSLATION_SUCCESS;
+	int xfer_len;
+	u8 *log_response;
+	struct nvme_command c;
+	struct nvme_dev *dev = ns->dev;
+	struct nvme_smart_log *smart_log;
+	dma_addr_t dma_addr;
+	void *mem;
+	u32 feature_resp;
+	u8 temp_c_cur, temp_c_thresh;
+	u16 temp_k;
+
+	log_response = kmalloc(LOG_TEMP_PAGE_LENGTH, GFP_KERNEL);
+	if (log_response == NULL) {
+		res = -ENOMEM;
+		goto out_mem;
+	}
+	memset(log_response, 0, LOG_TEMP_PAGE_LENGTH);
+
+	mem = dma_alloc_coherent(&dev->pci_dev->dev,
+					sizeof(struct nvme_smart_log),
+					&dma_addr, GFP_KERNEL);
+	if (mem == NULL) {
+		res = -ENOMEM;
+		goto out_dma;
+	}
+
+	/* Get SMART Log Page */
+	memset(&c, 0, sizeof(c));
+	c.common.opcode = nvme_admin_get_log_page;
+	c.common.nsid = cpu_to_le32(0xFFFFFFFF);
+	c.common.prp1 = cpu_to_le64(dma_addr);
+	c.common.cdw10[0] = cpu_to_le32(((sizeof(struct nvme_smart_log) /
+			BYTES_TO_DWORDS) << 16) | NVME_GET_SMART_LOG_PAGE);
+	res = nvme_submit_admin_cmd(dev, &c, NULL);
+	if (res != NVME_SC_SUCCESS) {
+		temp_c_cur = LOG_TEMP_UNKNOWN;
+	} else {
+		smart_log = mem;
+		temp_k = (smart_log->temperature[1] << 8) +
+				(smart_log->temperature[0]);
+		temp_c_cur = temp_k - KELVIN_TEMP_FACTOR;
+	}
+
+	/* Get Features for Temp Threshold */
+	res = nvme_get_features(dev, NVME_FEAT_TEMP_THRESH, 0, 0,
+								&feature_resp);
+	if (res != NVME_SC_SUCCESS)
+		temp_c_thresh = LOG_TEMP_UNKNOWN;
+	else
+		temp_c_thresh = (feature_resp & 0xFFFF) - KELVIN_TEMP_FACTOR;
+
+	log_response[0] = LOG_PAGE_TEMPERATURE_PAGE;
+	/* Subpage=0x00, Page Length MSB=0 */
+	log_response[3] = REMAINING_TEMP_PAGE_LENGTH;
+	/* Temperature Log Parameter 1 (Temperature) Start */
+	/* Parameter Code = 0x0000 */
+	log_response[6] = 0x01;		/* Format and Linking = 01b */
+	log_response[7] = 0x02;		/* Parameter Length */
+	/* Use Temperature from NVMe Get Log Page, convert to C from K */
+	log_response[9] = temp_c_cur;
+	/* Temperature Log Parameter 2 (Reference Temperature) Start */
+	log_response[11] = 0x01;	/* Parameter Code = 0x0001 */
+	log_response[12] = 0x01;	/* Format and Linking = 01b */
+	log_response[13] = 0x02;	/* Parameter Length */
+	/* Use Temperature Thresh from NVMe Get Log Page, convert to C from K */
+	log_response[15] = temp_c_thresh;
+
+	xfer_len = min(alloc_len, LOG_TEMP_PAGE_LENGTH);
+	res = nvme_trans_copy_to_user(hdr, log_response, xfer_len);
+
+	dma_free_coherent(&dev->pci_dev->dev, sizeof(struct nvme_smart_log),
+			  mem, dma_addr);
+ out_dma:
+	kfree(log_response);
+ out_mem:
+	return res;
+}
+
+/* MODE SENSE Helper Functions */
+
+static int nvme_trans_fill_mode_parm_hdr(u8 *resp, int len, u8 cdb10, u8 llbaa,
+					u16 mode_data_length, u16 blk_desc_len)
+{
+	/* Quick check to make sure I don't stomp on my own memory... */
+	if ((cdb10 && len < 8) || (!cdb10 && len < 4))
+		return SNTI_INTERNAL_ERROR;
+
+	if (cdb10) {
+		resp[0] = (mode_data_length & 0xFF00) >> 8;
+		resp[1] = (mode_data_length & 0x00FF);
+		/* resp[2] and [3] are zero */
+		resp[4] = llbaa;
+		resp[5] = RESERVED_FIELD;
+		resp[6] = (blk_desc_len & 0xFF00) >> 8;
+		resp[7] = (blk_desc_len & 0x00FF);
+	} else {
+		resp[0] = (mode_data_length & 0x00FF);
+		/* resp[1] and [2] are zero */
+		resp[3] = (blk_desc_len & 0x00FF);
+	}
+
+	return SNTI_TRANSLATION_SUCCESS;
+}
+
+static int nvme_trans_fill_blk_desc(struct nvme_ns *ns, struct sg_io_hdr *hdr,
+				    u8 *resp, int len, u8 llbaa)
+{
+	int res = SNTI_TRANSLATION_SUCCESS;
+	int nvme_sc;
+	struct nvme_dev *dev = ns->dev;
+	dma_addr_t dma_addr;
+	void *mem;
+	struct nvme_id_ns *id_ns;
+	u8 flbas;
+	u32 lba_length;
+
+	if (llbaa == 0 && len < MODE_PAGE_BLK_DES_LEN)
+		return SNTI_INTERNAL_ERROR;
+	else if (llbaa > 0 && len < MODE_PAGE_LLBAA_BLK_DES_LEN)
+		return SNTI_INTERNAL_ERROR;
+
+	mem = dma_alloc_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns),
+							&dma_addr, GFP_KERNEL);
+	if (mem == NULL) {
+		res = -ENOMEM;
+		goto out;
+	}
+
+	/* nvme ns identify */
+	nvme_sc = nvme_identify(dev, ns->ns_id, 0, dma_addr);
+	res = nvme_trans_status_code(hdr, nvme_sc);
+	if (res)
+		goto out_dma;
+	if (nvme_sc) {
+		res = nvme_sc;
+		goto out_dma;
+	}
+	id_ns = mem;
+	flbas = (id_ns->flbas) & 0x0F;
+	lba_length = (1 << (id_ns->lbaf[flbas].ds));
+
+	if (llbaa == 0) {
+		__be32 tmp_cap = cpu_to_be32(le64_to_cpu(id_ns->ncap));
+		/* Byte 4 is reserved */
+		__be32 tmp_len = cpu_to_be32(lba_length & 0x00FFFFFF);
+
+		memcpy(resp, &tmp_cap, sizeof(u32));
+		memcpy(&resp[4], &tmp_len, sizeof(u32));
+	} else {
+		__be64 tmp_cap = cpu_to_be64(le64_to_cpu(id_ns->ncap));
+		__be32 tmp_len = cpu_to_be32(lba_length);
+
+		memcpy(resp, &tmp_cap, sizeof(u64));
+		/* Bytes 8, 9, 10, 11 are reserved */
+		memcpy(&resp[12], &tmp_len, sizeof(u32));
+	}
+
+ out_dma:
+	dma_free_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns), mem,
+			  dma_addr);
+ out:
+	return res;
+}
+
+static int nvme_trans_fill_control_page(struct nvme_ns *ns,
+					struct sg_io_hdr *hdr, u8 *resp,
+					int len)
+{
+	if (len < MODE_PAGE_CONTROL_LEN)
+		return SNTI_INTERNAL_ERROR;
+
+	resp[0] = MODE_PAGE_CONTROL;
+	resp[1] = MODE_PAGE_CONTROL_LEN_FIELD;
+	resp[2] = 0x0E;		/* TST=000b, TMF_ONLY=0, DPICZ=1,
+				 * D_SENSE=1, GLTSD=1, RLEC=0 */
+	resp[3] = 0x12;		/* Q_ALGO_MODIFIER=1h, NUAR=0, QERR=01b */
+	/* Byte 4:  VS=0, RAC=0, UA_INT=0, SWP=0 */
+	resp[5] = 0x40;		/* ATO=0, TAS=1, ATMPE=0, RWWP=0, AUTOLOAD=0 */
+	/* resp[6] and [7] are obsolete, thus zero */
+	resp[8] = 0xFF;		/* Busy timeout period = 0xffff */
+	resp[9] = 0xFF;
+	/* Bytes 10,11: Extended selftest completion time = 0x0000 */
+
+	return SNTI_TRANSLATION_SUCCESS;
+}
+
+static int nvme_trans_fill_caching_page(struct nvme_ns *ns,
+					struct sg_io_hdr *hdr,
+					u8 *resp, int len)
+{
+	int res = SNTI_TRANSLATION_SUCCESS;
+	int nvme_sc;
+	struct nvme_dev *dev = ns->dev;
+	u32 feature_resp;
+	u8 vwc;
+
+	if (len < MODE_PAGE_CACHING_LEN)
+		return SNTI_INTERNAL_ERROR;
+
+	nvme_sc = nvme_get_features(dev, NVME_FEAT_VOLATILE_WC, 0, 0,
+								&feature_resp);
+	res = nvme_trans_status_code(hdr, nvme_sc);
+	if (res)
+		goto out;
+	if (nvme_sc) {
+		res = nvme_sc;
+		goto out;
+	}
+	vwc = feature_resp & 0x00000001;
+
+	resp[0] = MODE_PAGE_CACHING;
+	resp[1] = MODE_PAGE_CACHING_LEN_FIELD;
+	resp[2] = vwc << 2;
+
+ out:
+	return res;
+}
+
+static int nvme_trans_fill_pow_cnd_page(struct nvme_ns *ns,
+					struct sg_io_hdr *hdr, u8 *resp,
+					int len)
+{
+	int res = SNTI_TRANSLATION_SUCCESS;
+
+	if (len < MODE_PAGE_POW_CND_LEN)
+		return SNTI_INTERNAL_ERROR;
+
+	resp[0] = MODE_PAGE_POWER_CONDITION;
+	resp[1] = MODE_PAGE_POW_CND_LEN_FIELD;
+	/* All other bytes are zero */
+
+	return res;
+}
+
+static int nvme_trans_fill_inf_exc_page(struct nvme_ns *ns,
+					struct sg_io_hdr *hdr, u8 *resp,
+					int len)
+{
+	int res = SNTI_TRANSLATION_SUCCESS;
+
+	if (len < MODE_PAGE_INF_EXC_LEN)
+		return SNTI_INTERNAL_ERROR;
+
+	resp[0] = MODE_PAGE_INFO_EXCEP;
+	resp[1] = MODE_PAGE_INF_EXC_LEN_FIELD;
+	resp[2] = 0x88;
+	/* All other bytes are zero */
+
+	return res;
+}
+
+static int nvme_trans_fill_all_pages(struct nvme_ns *ns, struct sg_io_hdr *hdr,
+				     u8 *resp, int len)
+{
+	int res = SNTI_TRANSLATION_SUCCESS;
+	u16 mode_pages_offset_1 = 0;
+	u16 mode_pages_offset_2, mode_pages_offset_3, mode_pages_offset_4;
+
+	mode_pages_offset_2 = mode_pages_offset_1 + MODE_PAGE_CACHING_LEN;
+	mode_pages_offset_3 = mode_pages_offset_2 + MODE_PAGE_CONTROL_LEN;
+	mode_pages_offset_4 = mode_pages_offset_3 + MODE_PAGE_POW_CND_LEN;
+
+	res = nvme_trans_fill_caching_page(ns, hdr, &resp[mode_pages_offset_1],
+					MODE_PAGE_CACHING_LEN);
+	if (res != SNTI_TRANSLATION_SUCCESS)
+		goto out;
+	res = nvme_trans_fill_control_page(ns, hdr, &resp[mode_pages_offset_2],
+					MODE_PAGE_CONTROL_LEN);
+	if (res != SNTI_TRANSLATION_SUCCESS)
+		goto out;
+	res = nvme_trans_fill_pow_cnd_page(ns, hdr, &resp[mode_pages_offset_3],
+					MODE_PAGE_POW_CND_LEN);
+	if (res != SNTI_TRANSLATION_SUCCESS)
+		goto out;
+	res = nvme_trans_fill_inf_exc_page(ns, hdr, &resp[mode_pages_offset_4],
+					MODE_PAGE_INF_EXC_LEN);
+	if (res != SNTI_TRANSLATION_SUCCESS)
+		goto out;
+
+ out:
+	return res;
+}
+
+static inline int nvme_trans_get_blk_desc_len(u8 dbd, u8 llbaa)
+{
+	if (dbd == MODE_SENSE_BLK_DESC_ENABLED) {
+		/* SPC-4: len = 8 x Num_of_descriptors if llbaa = 0, 16x if 1 */
+		return 8 * (llbaa + 1) * MODE_SENSE_BLK_DESC_COUNT;
+	} else {
+		return 0;
+	}
+}
+
+static int nvme_trans_mode_page_create(struct nvme_ns *ns,
+					struct sg_io_hdr *hdr, u8 *cmd,
+					u16 alloc_len, u8 cdb10,
+					int (*mode_page_fill_func)
+					(struct nvme_ns *,
+					struct sg_io_hdr *hdr, u8 *, int),
+					u16 mode_pages_tot_len)
+{
+	int res = SNTI_TRANSLATION_SUCCESS;
+	int xfer_len;
+	u8 *response;
+	u8 dbd, llbaa;
+	u16 resp_size;
+	int mph_size;
+	u16 mode_pages_offset_1;
+	u16 blk_desc_len, blk_desc_offset, mode_data_length;
+
+	dbd = GET_MODE_SENSE_DBD(cmd);
+	llbaa = GET_MODE_SENSE_LLBAA(cmd);
+	mph_size = GET_MODE_SENSE_MPH_SIZE(cdb10);
+	blk_desc_len = nvme_trans_get_blk_desc_len(dbd, llbaa);
+
+	resp_size = mph_size + blk_desc_len + mode_pages_tot_len;
+	/* Refer spc4r34 Table 440 for calculation of Mode data Length field */
+	mode_data_length = 3 + (3 * cdb10) + blk_desc_len + mode_pages_tot_len;
+
+	blk_desc_offset = mph_size;
+	mode_pages_offset_1 = blk_desc_offset + blk_desc_len;
+
+	response = kmalloc(resp_size, GFP_KERNEL);
+	if (response == NULL) {
+		res = -ENOMEM;
+		goto out_mem;
+	}
+	memset(response, 0, resp_size);
+
+	res = nvme_trans_fill_mode_parm_hdr(&response[0], mph_size, cdb10,
+					llbaa, mode_data_length, blk_desc_len);
+	if (res != SNTI_TRANSLATION_SUCCESS)
+		goto out_free;
+	if (blk_desc_len > 0) {
+		res = nvme_trans_fill_blk_desc(ns, hdr,
+					       &response[blk_desc_offset],
+					       blk_desc_len, llbaa);
+		if (res != SNTI_TRANSLATION_SUCCESS)
+			goto out_free;
+	}
+	res = mode_page_fill_func(ns, hdr, &response[mode_pages_offset_1],
+					mode_pages_tot_len);
+	if (res != SNTI_TRANSLATION_SUCCESS)
+		goto out_free;
+
+	xfer_len = min(alloc_len, resp_size);
+	res = nvme_trans_copy_to_user(hdr, response, xfer_len);
+
+ out_free:
+	kfree(response);
+ out_mem:
+	return res;
+}
+
+/* Read Capacity Helper Functions */
+
+static void nvme_trans_fill_read_cap(u8 *response, struct nvme_id_ns *id_ns,
+								u8 cdb16)
+{
+	u8 flbas;
+	u32 lba_length;
+	u64 rlba;
+	u8 prot_en;
+	u8 p_type_lut[4] = {0, 0, 1, 2};
+	__be64 tmp_rlba;
+	__be32 tmp_rlba_32;
+	__be32 tmp_len;
+
+	flbas = (id_ns->flbas) & 0x0F;
+	lba_length = (1 << (id_ns->lbaf[flbas].ds));
+	rlba = le64_to_cpup(&id_ns->nsze) - 1;
+	(id_ns->dps) ? (prot_en = 0x01) : (prot_en = 0);
+
+	if (!cdb16) {
+		if (rlba > 0xFFFFFFFF)
+			rlba = 0xFFFFFFFF;
+		tmp_rlba_32 = cpu_to_be32(rlba);
+		tmp_len = cpu_to_be32(lba_length);
+		memcpy(response, &tmp_rlba_32, sizeof(u32));
+		memcpy(&response[4], &tmp_len, sizeof(u32));
+	} else {
+		tmp_rlba = cpu_to_be64(rlba);
+		tmp_len = cpu_to_be32(lba_length);
+		memcpy(response, &tmp_rlba, sizeof(u64));
+		memcpy(&response[8], &tmp_len, sizeof(u32));
+		response[12] = (p_type_lut[id_ns->dps & 0x3] << 1) | prot_en;
+		/* P_I_Exponent = 0x0 | LBPPBE = 0x0 */
+		/* LBPME = 0 | LBPRZ = 0 | LALBA = 0x00 */
+		/* Bytes 16-31 - Reserved */
+	}
+}
+
+/* Start Stop Unit Helper Functions */
+
+static int nvme_trans_power_state(struct nvme_ns *ns, struct sg_io_hdr *hdr,
+						u8 pc, u8 pcmod, u8 start)
+{
+	int res = SNTI_TRANSLATION_SUCCESS;
+	int nvme_sc;
+	struct nvme_dev *dev = ns->dev;
+	dma_addr_t dma_addr;
+	void *mem;
+	struct nvme_id_ctrl *id_ctrl;
+	int lowest_pow_st;	/* max npss = lowest power consumption */
+	unsigned ps_desired = 0;
+
+	/* NVMe Controller Identify */
+	mem = dma_alloc_coherent(&dev->pci_dev->dev,
+				sizeof(struct nvme_id_ctrl),
+				&dma_addr, GFP_KERNEL);
+	if (mem == NULL) {
+		res = -ENOMEM;
+		goto out;
+	}
+	nvme_sc = nvme_identify(dev, 0, 1, dma_addr);
+	res = nvme_trans_status_code(hdr, nvme_sc);
+	if (res)
+		goto out_dma;
+	if (nvme_sc) {
+		res = nvme_sc;
+		goto out_dma;
+	}
+	id_ctrl = mem;
+	lowest_pow_st = id_ctrl->npss - 1;
+
+	switch (pc) {
+	case NVME_POWER_STATE_START_VALID:
+		/* Action unspecified if POWER CONDITION MODIFIER != 0 */
+		if (pcmod == 0 && start == 0x1)
+			ps_desired = POWER_STATE_0;
+		if (pcmod == 0 && start == 0x0)
+			ps_desired = lowest_pow_st;
+		break;
+	case NVME_POWER_STATE_ACTIVE:
+		/* Action unspecified if POWER CONDITION MODIFIER != 0 */
+		if (pcmod == 0)
+			ps_desired = POWER_STATE_0;
+		break;
+	case NVME_POWER_STATE_IDLE:
+		/* Action unspecified if POWER CONDITION MODIFIER != [0,1,2] */
+		/* min of desired state and (lps-1) because lps is STOP */
+		if (pcmod == 0x0)
+			ps_desired = min(POWER_STATE_1, (lowest_pow_st - 1));
+		else if (pcmod == 0x1)
+			ps_desired = min(POWER_STATE_2, (lowest_pow_st - 1));
+		else if (pcmod == 0x2)
+			ps_desired = min(POWER_STATE_3, (lowest_pow_st - 1));
+		break;
+	case NVME_POWER_STATE_STANDBY:
+		/* Action unspecified if POWER CONDITION MODIFIER != [0,1] */
+		if (pcmod == 0x0)
+			ps_desired = max(0, (lowest_pow_st - 2));
+		else if (pcmod == 0x1)
+			ps_desired = max(0, (lowest_pow_st - 1));
+		break;
+	case NVME_POWER_STATE_LU_CONTROL:
+	default:
+		res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
+				ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
+				SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
+		break;
+	}
+	nvme_sc = nvme_set_features(dev, NVME_FEAT_POWER_MGMT, ps_desired, 0,
+				    NULL);
+	res = nvme_trans_status_code(hdr, nvme_sc);
+	if (res)
+		goto out_dma;
+	if (nvme_sc)
+		res = nvme_sc;
+ out_dma:
+	dma_free_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ctrl), mem,
+			  dma_addr);
+ out:
+	return res;
+}
+
+/* Write Buffer Helper Functions */
+/* Also using this for Format Unit with hdr passed as NULL, and buffer_id, 0 */
+
+static int nvme_trans_send_fw_cmd(struct nvme_ns *ns, struct sg_io_hdr *hdr,
+					u8 opcode, u32 tot_len, u32 offset,
+					u8 buffer_id)
+{
+	int res = SNTI_TRANSLATION_SUCCESS;
+	int nvme_sc;
+	struct nvme_dev *dev = ns->dev;
+	struct nvme_command c;
+	struct nvme_iod *iod = NULL;
+	unsigned length;
+
+	memset(&c, 0, sizeof(c));
+	c.common.opcode = opcode;
+	if (opcode == nvme_admin_download_fw) {
+		if (hdr->iovec_count > 0) {
+			/* Assuming SGL is not allowed for this command */
+			res = nvme_trans_completion(hdr,
+						SAM_STAT_CHECK_CONDITION,
+						ILLEGAL_REQUEST,
+						SCSI_ASC_INVALID_CDB,
+						SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
+			goto out;
+		}
+		iod = nvme_map_user_pages(dev, DMA_TO_DEVICE,
+				(unsigned long)hdr->dxferp, tot_len);
+		if (IS_ERR(iod)) {
+			res = PTR_ERR(iod);
+			goto out;
+		}
+		length = nvme_setup_prps(dev, &c.common, iod, tot_len,
+								GFP_KERNEL);
+		if (length != tot_len) {
+			res = -ENOMEM;
+			goto out_unmap;
+		}
+
+		c.dlfw.numd = cpu_to_le32((tot_len/BYTES_TO_DWORDS) - 1);
+		c.dlfw.offset = cpu_to_le32(offset/BYTES_TO_DWORDS);
+	} else if (opcode == nvme_admin_activate_fw) {
+		u32 cdw10 = buffer_id | NVME_FWACT_REPL_ACTV;
+		c.common.cdw10[0] = cpu_to_le32(cdw10);
+	}
+
+	nvme_sc = nvme_submit_admin_cmd(dev, &c, NULL);
+	res = nvme_trans_status_code(hdr, nvme_sc);
+	if (res)
+		goto out_unmap;
+	if (nvme_sc)
+		res = nvme_sc;
+
+ out_unmap:
+	if (opcode == nvme_admin_download_fw) {
+		nvme_unmap_user_pages(dev, DMA_TO_DEVICE, iod);
+		nvme_free_iod(dev, iod);
+	}
+ out:
+	return res;
+}
+
+/* Mode Select Helper Functions */
+
+static inline void nvme_trans_modesel_get_bd_len(u8 *parm_list, u8 cdb10,
+						u16 *bd_len, u8 *llbaa)
+{
+	if (cdb10) {
+		/* 10 Byte CDB */
+		*bd_len = (parm_list[MODE_SELECT_10_BD_OFFSET] << 8) +
+			parm_list[MODE_SELECT_10_BD_OFFSET + 1];
+		*llbaa = parm_list[MODE_SELECT_10_LLBAA_OFFSET] &&
+				MODE_SELECT_10_LLBAA_MASK;
+	} else {
+		/* 6 Byte CDB */
+		*bd_len = parm_list[MODE_SELECT_6_BD_OFFSET];
+	}
+}
+
+static void nvme_trans_modesel_save_bd(struct nvme_ns *ns, u8 *parm_list,
+					u16 idx, u16 bd_len, u8 llbaa)
+{
+	u16 bd_num;
+
+	bd_num = bd_len / ((llbaa == 0) ?
+			SHORT_DESC_BLOCK : LONG_DESC_BLOCK);
+	/* Store block descriptor info if a FORMAT UNIT comes later */
+	/* TODO Saving 1st BD info; what to do if multiple BD received? */
+	if (llbaa == 0) {
+		/* Standard Block Descriptor - spc4r34 7.5.5.1 */
+		ns->mode_select_num_blocks =
+				(parm_list[idx + 1] << 16) +
+				(parm_list[idx + 2] << 8) +
+				(parm_list[idx + 3]);
+
+		ns->mode_select_block_len =
+				(parm_list[idx + 5] << 16) +
+				(parm_list[idx + 6] << 8) +
+				(parm_list[idx + 7]);
+	} else {
+		/* Long LBA Block Descriptor - sbc3r27 6.4.2.3 */
+		ns->mode_select_num_blocks =
+				(((u64)parm_list[idx + 0]) << 56) +
+				(((u64)parm_list[idx + 1]) << 48) +
+				(((u64)parm_list[idx + 2]) << 40) +
+				(((u64)parm_list[idx + 3]) << 32) +
+				(((u64)parm_list[idx + 4]) << 24) +
+				(((u64)parm_list[idx + 5]) << 16) +
+				(((u64)parm_list[idx + 6]) << 8) +
+				((u64)parm_list[idx + 7]);
+
+		ns->mode_select_block_len =
+				(parm_list[idx + 12] << 24) +
+				(parm_list[idx + 13] << 16) +
+				(parm_list[idx + 14] << 8) +
+				(parm_list[idx + 15]);
+	}
+}
+
+static u16 nvme_trans_modesel_get_mp(struct nvme_ns *ns, struct sg_io_hdr *hdr,
+					u8 *mode_page, u8 page_code)
+{
+	int res = SNTI_TRANSLATION_SUCCESS;
+	int nvme_sc;
+	struct nvme_dev *dev = ns->dev;
+	unsigned dword11;
+
+	switch (page_code) {
+	case MODE_PAGE_CACHING:
+		dword11 = ((mode_page[2] & CACHING_MODE_PAGE_WCE_MASK) ? 1 : 0);
+		nvme_sc = nvme_set_features(dev, NVME_FEAT_VOLATILE_WC, dword11,
+					    0, NULL);
+		res = nvme_trans_status_code(hdr, nvme_sc);
+		if (res)
+			break;
+		if (nvme_sc) {
+			res = nvme_sc;
+			break;
+		}
+		break;
+	case MODE_PAGE_CONTROL:
+		break;
+	case MODE_PAGE_POWER_CONDITION:
+		/* Verify the OS is not trying to set timers */
+		if ((mode_page[2] & 0x01) != 0 || (mode_page[3] & 0x0F) != 0) {
+			res = nvme_trans_completion(hdr,
+						SAM_STAT_CHECK_CONDITION,
+						ILLEGAL_REQUEST,
+						SCSI_ASC_INVALID_PARAMETER,
+						SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
+			if (!res)
+				res = SNTI_INTERNAL_ERROR;
+			break;
+		}
+		break;
+	default:
+		res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
+					ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
+					SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
+		if (!res)
+			res = SNTI_INTERNAL_ERROR;
+		break;
+	}
+
+	return res;
+}
+
+static int nvme_trans_modesel_data(struct nvme_ns *ns, struct sg_io_hdr *hdr,
+					u8 *cmd, u16 parm_list_len, u8 pf,
+					u8 sp, u8 cdb10)
+{
+	int res = SNTI_TRANSLATION_SUCCESS;
+	u8 *parm_list;
+	u16 bd_len;
+	u8 llbaa = 0;
+	u16 index, saved_index;
+	u8 page_code;
+	u16 mp_size;
+
+	/* Get parm list from data-in/out buffer */
+	parm_list = kmalloc(parm_list_len, GFP_KERNEL);
+	if (parm_list == NULL) {
+		res = -ENOMEM;
+		goto out;
+	}
+
+	res = nvme_trans_copy_from_user(hdr, parm_list, parm_list_len);
+	if (res != SNTI_TRANSLATION_SUCCESS)
+		goto out_mem;
+
+	nvme_trans_modesel_get_bd_len(parm_list, cdb10, &bd_len, &llbaa);
+	index = (cdb10) ? (MODE_SELECT_10_MPH_SIZE) : (MODE_SELECT_6_MPH_SIZE);
+
+	if (bd_len != 0) {
+		/* Block Descriptors present, parse */
+		nvme_trans_modesel_save_bd(ns, parm_list, index, bd_len, llbaa);
+		index += bd_len;
+	}
+	saved_index = index;
+
+	/* Multiple mode pages may be present; iterate through all */
+	/* In 1st Iteration, don't do NVME Command, only check for CDB errors */
+	do {
+		page_code = parm_list[index] & MODE_SELECT_PAGE_CODE_MASK;
+		mp_size = parm_list[index + 1] + 2;
+		if ((page_code != MODE_PAGE_CACHING) &&
+		    (page_code != MODE_PAGE_CONTROL) &&
+		    (page_code != MODE_PAGE_POWER_CONDITION)) {
+			res = nvme_trans_completion(hdr,
+						SAM_STAT_CHECK_CONDITION,
+						ILLEGAL_REQUEST,
+						SCSI_ASC_INVALID_CDB,
+						SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
+			goto out_mem;
+		}
+		index += mp_size;
+	} while (index < parm_list_len);
+
+	/* In 2nd Iteration, do the NVME Commands */
+	index = saved_index;
+	do {
+		page_code = parm_list[index] & MODE_SELECT_PAGE_CODE_MASK;
+		mp_size = parm_list[index + 1] + 2;
+		res = nvme_trans_modesel_get_mp(ns, hdr, &parm_list[index],
+								page_code);
+		if (res != SNTI_TRANSLATION_SUCCESS)
+			break;
+		index += mp_size;
+	} while (index < parm_list_len);
+
+ out_mem:
+	kfree(parm_list);
+ out:
+	return res;
+}
+
+/* Format Unit Helper Functions */
+
+static int nvme_trans_fmt_set_blk_size_count(struct nvme_ns *ns,
+					     struct sg_io_hdr *hdr)
+{
+	int res = SNTI_TRANSLATION_SUCCESS;
+	int nvme_sc;
+	struct nvme_dev *dev = ns->dev;
+	dma_addr_t dma_addr;
+	void *mem;
+	struct nvme_id_ns *id_ns;
+	u8 flbas;
+
+	/*
+	 * SCSI Expects a MODE SELECT would have been issued prior to
+	 * a FORMAT UNIT, and the block size and number would be used
+	 * from the block descriptor in it. If a MODE SELECT had not
+	 * been issued, FORMAT shall use the current values for both.
+	 */
+
+	if (ns->mode_select_num_blocks == 0 || ns->mode_select_block_len == 0) {
+		mem = dma_alloc_coherent(&dev->pci_dev->dev,
+			sizeof(struct nvme_id_ns), &dma_addr, GFP_KERNEL);
+		if (mem == NULL) {
+			res = -ENOMEM;
+			goto out;
+		}
+		/* nvme ns identify */
+		nvme_sc = nvme_identify(dev, ns->ns_id, 0, dma_addr);
+		res = nvme_trans_status_code(hdr, nvme_sc);
+		if (res)
+			goto out_dma;
+		if (nvme_sc) {
+			res = nvme_sc;
+			goto out_dma;
+		}
+		id_ns = mem;
+
+		if (ns->mode_select_num_blocks == 0)
+			ns->mode_select_num_blocks = le64_to_cpu(id_ns->ncap);
+		if (ns->mode_select_block_len == 0) {
+			flbas = (id_ns->flbas) & 0x0F;
+			ns->mode_select_block_len =
+						(1 << (id_ns->lbaf[flbas].ds));
+		}
+ out_dma:
+		dma_free_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns),
+				  mem, dma_addr);
+	}
+ out:
+	return res;
+}
+
+static int nvme_trans_fmt_get_parm_header(struct sg_io_hdr *hdr, u8 len,
+					u8 format_prot_info, u8 *nvme_pf_code)
+{
+	int res = SNTI_TRANSLATION_SUCCESS;
+	u8 *parm_list;
+	u8 pf_usage, pf_code;
+
+	parm_list = kmalloc(len, GFP_KERNEL);
+	if (parm_list == NULL) {
+		res = -ENOMEM;
+		goto out;
+	}
+	res = nvme_trans_copy_from_user(hdr, parm_list, len);
+	if (res != SNTI_TRANSLATION_SUCCESS)
+		goto out_mem;
+
+	if ((parm_list[FORMAT_UNIT_IMMED_OFFSET] &
+				FORMAT_UNIT_IMMED_MASK) != 0) {
+		res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
+					ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
+					SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
+		goto out_mem;
+	}
+
+	if (len == FORMAT_UNIT_LONG_PARM_LIST_LEN &&
+	    (parm_list[FORMAT_UNIT_PROT_INT_OFFSET] & 0x0F) != 0) {
+		res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
+					ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
+					SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
+		goto out_mem;
+	}
+	pf_usage = parm_list[FORMAT_UNIT_PROT_FIELD_USAGE_OFFSET] &
+			FORMAT_UNIT_PROT_FIELD_USAGE_MASK;
+	pf_code = (pf_usage << 2) | format_prot_info;
+	switch (pf_code) {
+	case 0:
+		*nvme_pf_code = 0;
+		break;
+	case 2:
+		*nvme_pf_code = 1;
+		break;
+	case 3:
+		*nvme_pf_code = 2;
+		break;
+	case 7:
+		*nvme_pf_code = 3;
+		break;
+	default:
+		res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
+					ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
+					SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
+		break;
+	}
+
+ out_mem:
+	kfree(parm_list);
+ out:
+	return res;
+}
+
+static int nvme_trans_fmt_send_cmd(struct nvme_ns *ns, struct sg_io_hdr *hdr,
+				   u8 prot_info)
+{
+	int res = SNTI_TRANSLATION_SUCCESS;
+	int nvme_sc;
+	struct nvme_dev *dev = ns->dev;
+	dma_addr_t dma_addr;
+	void *mem;
+	struct nvme_id_ns *id_ns;
+	u8 i;
+	u8 flbas, nlbaf;
+	u8 selected_lbaf = 0xFF;
+	u32 cdw10 = 0;
+	struct nvme_command c;
+
+	/* Loop thru LBAF's in id_ns to match reqd lbaf, put in cdw10 */
+	mem = dma_alloc_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns),
+							&dma_addr, GFP_KERNEL);
+	if (mem == NULL) {
+		res = -ENOMEM;
+		goto out;
+	}
+	/* nvme ns identify */
+	nvme_sc = nvme_identify(dev, ns->ns_id, 0, dma_addr);
+	res = nvme_trans_status_code(hdr, nvme_sc);
+	if (res)
+		goto out_dma;
+	if (nvme_sc) {
+		res = nvme_sc;
+		goto out_dma;
+	}
+	id_ns = mem;
+	flbas = (id_ns->flbas) & 0x0F;
+	nlbaf = id_ns->nlbaf;
+
+	for (i = 0; i < nlbaf; i++) {
+		if (ns->mode_select_block_len == (1 << (id_ns->lbaf[i].ds))) {
+			selected_lbaf = i;
+			break;
+		}
+	}
+	if (selected_lbaf > 0x0F) {
+		res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
+				ILLEGAL_REQUEST, SCSI_ASC_INVALID_PARAMETER,
+				SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
+	}
+	if (ns->mode_select_num_blocks != le64_to_cpu(id_ns->ncap)) {
+		res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
+				ILLEGAL_REQUEST, SCSI_ASC_INVALID_PARAMETER,
+				SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
+	}
+
+	cdw10 |= prot_info << 5;
+	cdw10 |= selected_lbaf & 0x0F;
+	memset(&c, 0, sizeof(c));
+	c.format.opcode = nvme_admin_format_nvm;
+	c.format.nsid = cpu_to_le32(ns->ns_id);
+	c.format.cdw10 = cpu_to_le32(cdw10);
+
+	nvme_sc = nvme_submit_admin_cmd(dev, &c, NULL);
+	res = nvme_trans_status_code(hdr, nvme_sc);
+	if (res)
+		goto out_dma;
+	if (nvme_sc)
+		res = nvme_sc;
+
+ out_dma:
+	dma_free_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns), mem,
+			  dma_addr);
+ out:
+	return res;
+}
+
+/* Read/Write Helper Functions */
+
+static inline void nvme_trans_get_io_cdb6(u8 *cmd,
+					struct nvme_trans_io_cdb *cdb_info)
+{
+	cdb_info->fua = 0;
+	cdb_info->prot_info = 0;
+	cdb_info->lba = GET_U32_FROM_CDB(cmd, IO_6_CDB_LBA_OFFSET) &
+					IO_6_CDB_LBA_MASK;
+	cdb_info->xfer_len = GET_U8_FROM_CDB(cmd, IO_6_CDB_TX_LEN_OFFSET);
+
+	/* sbc3r27 sec 5.32 - TRANSFER LEN of 0 implies a 256 Block transfer */
+	if (cdb_info->xfer_len == 0)
+		cdb_info->xfer_len = IO_6_DEFAULT_TX_LEN;
+}
+
+static inline void nvme_trans_get_io_cdb10(u8 *cmd,
+					struct nvme_trans_io_cdb *cdb_info)
+{
+	cdb_info->fua = GET_U8_FROM_CDB(cmd, IO_10_CDB_FUA_OFFSET) &
+					IO_CDB_FUA_MASK;
+	cdb_info->prot_info = GET_U8_FROM_CDB(cmd, IO_10_CDB_WP_OFFSET) &
+					IO_CDB_WP_MASK >> IO_CDB_WP_SHIFT;
+	cdb_info->lba = GET_U32_FROM_CDB(cmd, IO_10_CDB_LBA_OFFSET);
+	cdb_info->xfer_len = GET_U16_FROM_CDB(cmd, IO_10_CDB_TX_LEN_OFFSET);
+}
+
+static inline void nvme_trans_get_io_cdb12(u8 *cmd,
+					struct nvme_trans_io_cdb *cdb_info)
+{
+	cdb_info->fua = GET_U8_FROM_CDB(cmd, IO_12_CDB_FUA_OFFSET) &
+					IO_CDB_FUA_MASK;
+	cdb_info->prot_info = GET_U8_FROM_CDB(cmd, IO_12_CDB_WP_OFFSET) &
+					IO_CDB_WP_MASK >> IO_CDB_WP_SHIFT;
+	cdb_info->lba = GET_U32_FROM_CDB(cmd, IO_12_CDB_LBA_OFFSET);
+	cdb_info->xfer_len = GET_U32_FROM_CDB(cmd, IO_12_CDB_TX_LEN_OFFSET);
+}
+
+static inline void nvme_trans_get_io_cdb16(u8 *cmd,
+					struct nvme_trans_io_cdb *cdb_info)
+{
+	cdb_info->fua = GET_U8_FROM_CDB(cmd, IO_16_CDB_FUA_OFFSET) &
+					IO_CDB_FUA_MASK;
+	cdb_info->prot_info = GET_U8_FROM_CDB(cmd, IO_16_CDB_WP_OFFSET) &
+					IO_CDB_WP_MASK >> IO_CDB_WP_SHIFT;
+	cdb_info->lba = GET_U64_FROM_CDB(cmd, IO_16_CDB_LBA_OFFSET);
+	cdb_info->xfer_len = GET_U32_FROM_CDB(cmd, IO_16_CDB_TX_LEN_OFFSET);
+}
+
+static inline u32 nvme_trans_io_get_num_cmds(struct sg_io_hdr *hdr,
+					struct nvme_trans_io_cdb *cdb_info,
+					u32 max_blocks)
+{
+	/* If using iovecs, send one nvme command per vector */
+	if (hdr->iovec_count > 0)
+		return hdr->iovec_count;
+	else if (cdb_info->xfer_len > max_blocks)
+		return ((cdb_info->xfer_len - 1) / max_blocks) + 1;
+	else
+		return 1;
+}
+
+static u16 nvme_trans_io_get_control(struct nvme_ns *ns,
+					struct nvme_trans_io_cdb *cdb_info)
+{
+	u16 control = 0;
+
+	/* When Protection information support is added, implement here */
+
+	if (cdb_info->fua > 0)
+		control |= NVME_RW_FUA;
+
+	return control;
+}
+
+static int nvme_trans_do_nvme_io(struct nvme_ns *ns, struct sg_io_hdr *hdr,
+				struct nvme_trans_io_cdb *cdb_info, u8 is_write)
+{
+	int res = SNTI_TRANSLATION_SUCCESS;
+	int nvme_sc;
+	struct nvme_dev *dev = ns->dev;
+	struct nvme_queue *nvmeq;
+	u32 num_cmds;
+	struct nvme_iod *iod;
+	u64 unit_len;
+	u64 unit_num_blocks;	/* Number of blocks to xfer in each nvme cmd */
+	u32 retcode;
+	u32 i = 0;
+	u64 nvme_offset = 0;
+	void __user *next_mapping_addr;
+	struct nvme_command c;
+	u8 opcode = (is_write ? nvme_cmd_write : nvme_cmd_read);
+	u16 control;
+	u32 max_blocks = nvme_block_nr(ns, dev->max_hw_sectors);
+
+	num_cmds = nvme_trans_io_get_num_cmds(hdr, cdb_info, max_blocks);
+
+	/*
+	 * This loop handles two cases.
+	 * First, when an SGL is used in the form of an iovec list:
+	 *   - Use iov_base as the next mapping address for the nvme command_id
+	 *   - Use iov_len as the data transfer length for the command.
+	 * Second, when we have a single buffer
+	 *   - If larger than max_blocks, split into chunks, offset
+	 *        each nvme command accordingly.
+	 */
+	for (i = 0; i < num_cmds; i++) {
+		memset(&c, 0, sizeof(c));
+		if (hdr->iovec_count > 0) {
+			struct sg_iovec sgl;
+
+			retcode = copy_from_user(&sgl, hdr->dxferp +
+					i * sizeof(struct sg_iovec),
+					sizeof(struct sg_iovec));
+			if (retcode)
+				return -EFAULT;
+			unit_len = sgl.iov_len;
+			unit_num_blocks = unit_len >> ns->lba_shift;
+			next_mapping_addr = sgl.iov_base;
+		} else {
+			unit_num_blocks = min((u64)max_blocks,
+					(cdb_info->xfer_len - nvme_offset));
+			unit_len = unit_num_blocks << ns->lba_shift;
+			next_mapping_addr = hdr->dxferp +
+					((1 << ns->lba_shift) * nvme_offset);
+		}
+
+		c.rw.opcode = opcode;
+		c.rw.nsid = cpu_to_le32(ns->ns_id);
+		c.rw.slba = cpu_to_le64(cdb_info->lba + nvme_offset);
+		c.rw.length = cpu_to_le16(unit_num_blocks - 1);
+		control = nvme_trans_io_get_control(ns, cdb_info);
+		c.rw.control = cpu_to_le16(control);
+
+		iod = nvme_map_user_pages(dev,
+			(is_write) ? DMA_TO_DEVICE : DMA_FROM_DEVICE,
+			(unsigned long)next_mapping_addr, unit_len);
+		if (IS_ERR(iod)) {
+			res = PTR_ERR(iod);
+			goto out;
+		}
+		retcode = nvme_setup_prps(dev, &c.common, iod, unit_len,
+							GFP_KERNEL);
+		if (retcode != unit_len) {
+			nvme_unmap_user_pages(dev,
+				(is_write) ? DMA_TO_DEVICE : DMA_FROM_DEVICE,
+				iod);
+			nvme_free_iod(dev, iod);
+			res = -ENOMEM;
+			goto out;
+		}
+
+		nvme_offset += unit_num_blocks;
+
+		nvmeq = get_nvmeq(dev);
+		/*
+		 * Since nvme_submit_sync_cmd sleeps, we can't keep
+		 * preemption disabled.  We may be preempted at any
+		 * point, and be rescheduled to a different CPU.  That
+		 * will cause cacheline bouncing, but no additional
+		 * races since q_lock already protects against other
+		 * CPUs.
+		 */
+		put_nvmeq(nvmeq);
+		nvme_sc = nvme_submit_sync_cmd(nvmeq, &c, NULL,
+						NVME_IO_TIMEOUT);
+		if (nvme_sc != NVME_SC_SUCCESS) {
+			nvme_unmap_user_pages(dev,
+				(is_write) ? DMA_TO_DEVICE : DMA_FROM_DEVICE,
+				iod);
+			nvme_free_iod(dev, iod);
+			res = nvme_trans_status_code(hdr, nvme_sc);
+			goto out;
+		}
+		nvme_unmap_user_pages(dev,
+				(is_write) ? DMA_TO_DEVICE : DMA_FROM_DEVICE,
+				iod);
+		nvme_free_iod(dev, iod);
+	}
+	res = nvme_trans_status_code(hdr, NVME_SC_SUCCESS);
+
+ out:
+	return res;
+}
+
+
+/* SCSI Command Translation Functions */
+
+static int nvme_trans_io(struct nvme_ns *ns, struct sg_io_hdr *hdr, u8 is_write,
+							u8 *cmd)
+{
+	int res = SNTI_TRANSLATION_SUCCESS;
+	struct nvme_trans_io_cdb cdb_info;
+	u8 opcode = cmd[0];
+	u64 xfer_bytes;
+	u64 sum_iov_len = 0;
+	struct sg_iovec sgl;
+	int i;
+	size_t not_copied;
+
+	/* Extract Fields from CDB */
+	switch (opcode) {
+	case WRITE_6:
+	case READ_6:
+		nvme_trans_get_io_cdb6(cmd, &cdb_info);
+		break;
+	case WRITE_10:
+	case READ_10:
+		nvme_trans_get_io_cdb10(cmd, &cdb_info);
+		break;
+	case WRITE_12:
+	case READ_12:
+		nvme_trans_get_io_cdb12(cmd, &cdb_info);
+		break;
+	case WRITE_16:
+	case READ_16:
+		nvme_trans_get_io_cdb16(cmd, &cdb_info);
+		break;
+	default:
+		/* Will never really reach here */
+		res = SNTI_INTERNAL_ERROR;
+		goto out;
+	}
+
+	/* Calculate total length of transfer (in bytes) */
+	if (hdr->iovec_count > 0) {
+		for (i = 0; i < hdr->iovec_count; i++) {
+			not_copied = copy_from_user(&sgl, hdr->dxferp +
+						i * sizeof(struct sg_iovec),
+						sizeof(struct sg_iovec));
+			if (not_copied)
+				return -EFAULT;
+			sum_iov_len += sgl.iov_len;
+			/* IO vector sizes should be multiples of block size */
+			if (sgl.iov_len % (1 << ns->lba_shift) != 0) {
+				res = nvme_trans_completion(hdr,
+						SAM_STAT_CHECK_CONDITION,
+						ILLEGAL_REQUEST,
+						SCSI_ASC_INVALID_PARAMETER,
+						SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
+				goto out;
+			}
+		}
+	} else {
+		sum_iov_len = hdr->dxfer_len;
+	}
+
+	/* As Per sg ioctl howto, if the lengths differ, use the lower one */
+	xfer_bytes = min(((u64)hdr->dxfer_len), sum_iov_len);
+
+	/* If block count and actual data buffer size dont match, error out */
+	if (xfer_bytes != (cdb_info.xfer_len << ns->lba_shift)) {
+		res = -EINVAL;
+		goto out;
+	}
+
+	/* Check for 0 length transfer - it is not illegal */
+	if (cdb_info.xfer_len == 0)
+		goto out;
+
+	/* Send NVMe IO Command(s) */
+	res = nvme_trans_do_nvme_io(ns, hdr, &cdb_info, is_write);
+	if (res != SNTI_TRANSLATION_SUCCESS)
+		goto out;
+
+ out:
+	return res;
+}
+
+static int nvme_trans_inquiry(struct nvme_ns *ns, struct sg_io_hdr *hdr,
+							u8 *cmd)
+{
+	int res = SNTI_TRANSLATION_SUCCESS;
+	u8 evpd;
+	u8 page_code;
+	int alloc_len;
+	u8 *inq_response;
+
+	evpd = GET_INQ_EVPD_BIT(cmd);
+	page_code = GET_INQ_PAGE_CODE(cmd);
+	alloc_len = GET_INQ_ALLOC_LENGTH(cmd);
+
+	inq_response = kmalloc(STANDARD_INQUIRY_LENGTH, GFP_KERNEL);
+	if (inq_response == NULL) {
+		res = -ENOMEM;
+		goto out_mem;
+	}
+
+	if (evpd == 0) {
+		if (page_code == INQ_STANDARD_INQUIRY_PAGE) {
+			res = nvme_trans_standard_inquiry_page(ns, hdr,
+						inq_response, alloc_len);
+		} else {
+			res = nvme_trans_completion(hdr,
+						SAM_STAT_CHECK_CONDITION,
+						ILLEGAL_REQUEST,
+						SCSI_ASC_INVALID_CDB,
+						SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
+		}
+	} else {
+		switch (page_code) {
+		case VPD_SUPPORTED_PAGES:
+			res = nvme_trans_supported_vpd_pages(ns, hdr,
+						inq_response, alloc_len);
+			break;
+		case VPD_SERIAL_NUMBER:
+			res = nvme_trans_unit_serial_page(ns, hdr, inq_response,
+								alloc_len);
+			break;
+		case VPD_DEVICE_IDENTIFIERS:
+			res = nvme_trans_device_id_page(ns, hdr, inq_response,
+								alloc_len);
+			break;
+		case VPD_EXTENDED_INQUIRY:
+			res = nvme_trans_ext_inq_page(ns, hdr, alloc_len);
+			break;
+		case VPD_BLOCK_DEV_CHARACTERISTICS:
+			res = nvme_trans_bdev_char_page(ns, hdr, alloc_len);
+			break;
+		default:
+			res = nvme_trans_completion(hdr,
+						SAM_STAT_CHECK_CONDITION,
+						ILLEGAL_REQUEST,
+						SCSI_ASC_INVALID_CDB,
+						SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
+			break;
+		}
+	}
+	kfree(inq_response);
+ out_mem:
+	return res;
+}
+
+static int nvme_trans_log_sense(struct nvme_ns *ns, struct sg_io_hdr *hdr,
+							u8 *cmd)
+{
+	int res = SNTI_TRANSLATION_SUCCESS;
+	u16 alloc_len;
+	u8 sp;
+	u8 pc;
+	u8 page_code;
+
+	sp = GET_U8_FROM_CDB(cmd, LOG_SENSE_CDB_SP_OFFSET);
+	if (sp != LOG_SENSE_CDB_SP_NOT_ENABLED) {
+		res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
+					ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
+					SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
+		goto out;
+	}
+	pc = GET_U8_FROM_CDB(cmd, LOG_SENSE_CDB_PC_OFFSET);
+	page_code = pc & LOG_SENSE_CDB_PAGE_CODE_MASK;
+	pc = (pc & LOG_SENSE_CDB_PC_MASK) >> LOG_SENSE_CDB_PC_SHIFT;
+	if (pc != LOG_SENSE_CDB_PC_CUMULATIVE_VALUES) {
+		res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
+					ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
+					SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
+		goto out;
+	}
+	alloc_len = GET_U16_FROM_CDB(cmd, LOG_SENSE_CDB_ALLOC_LENGTH_OFFSET);
+	switch (page_code) {
+	case LOG_PAGE_SUPPORTED_LOG_PAGES_PAGE:
+		res = nvme_trans_log_supp_pages(ns, hdr, alloc_len);
+		break;
+	case LOG_PAGE_INFORMATIONAL_EXCEPTIONS_PAGE:
+		res = nvme_trans_log_info_exceptions(ns, hdr, alloc_len);
+		break;
+	case LOG_PAGE_TEMPERATURE_PAGE:
+		res = nvme_trans_log_temperature(ns, hdr, alloc_len);
+		break;
+	default:
+		res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
+					ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
+					SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
+		break;
+	}
+
+ out:
+	return res;
+}
+
+static int nvme_trans_mode_select(struct nvme_ns *ns, struct sg_io_hdr *hdr,
+							u8 *cmd)
+{
+	int res = SNTI_TRANSLATION_SUCCESS;
+	u8 cdb10 = 0;
+	u16 parm_list_len;
+	u8 page_format;
+	u8 save_pages;
+
+	page_format = GET_U8_FROM_CDB(cmd, MODE_SELECT_CDB_PAGE_FORMAT_OFFSET);
+	page_format &= MODE_SELECT_CDB_PAGE_FORMAT_MASK;
+
+	save_pages = GET_U8_FROM_CDB(cmd, MODE_SELECT_CDB_SAVE_PAGES_OFFSET);
+	save_pages &= MODE_SELECT_CDB_SAVE_PAGES_MASK;
+
+	if (GET_OPCODE(cmd) == MODE_SELECT) {
+		parm_list_len = GET_U8_FROM_CDB(cmd,
+				MODE_SELECT_6_CDB_PARAM_LIST_LENGTH_OFFSET);
+	} else {
+		parm_list_len = GET_U16_FROM_CDB(cmd,
+				MODE_SELECT_10_CDB_PARAM_LIST_LENGTH_OFFSET);
+		cdb10 = 1;
+	}
+
+	if (parm_list_len != 0) {
+		/*
+		 * According to SPC-4 r24, a paramter list length field of 0
+		 * shall not be considered an error
+		 */
+		res = nvme_trans_modesel_data(ns, hdr, cmd, parm_list_len,
+						page_format, save_pages, cdb10);
+	}
+
+	return res;
+}
+
+static int nvme_trans_mode_sense(struct nvme_ns *ns, struct sg_io_hdr *hdr,
+							u8 *cmd)
+{
+	int res = SNTI_TRANSLATION_SUCCESS;
+	u16 alloc_len;
+	u8 cdb10 = 0;
+	u8 page_code;
+	u8 pc;
+
+	if (GET_OPCODE(cmd) == MODE_SENSE) {
+		alloc_len = GET_U8_FROM_CDB(cmd, MODE_SENSE6_ALLOC_LEN_OFFSET);
+	} else {
+		alloc_len = GET_U16_FROM_CDB(cmd,
+						MODE_SENSE10_ALLOC_LEN_OFFSET);
+		cdb10 = 1;
+	}
+
+	pc = GET_U8_FROM_CDB(cmd, MODE_SENSE_PAGE_CONTROL_OFFSET) &
+						MODE_SENSE_PAGE_CONTROL_MASK;
+	if (pc != MODE_SENSE_PC_CURRENT_VALUES) {
+		res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
+					ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
+					SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
+		goto out;
+	}
+
+	page_code = GET_U8_FROM_CDB(cmd, MODE_SENSE_PAGE_CODE_OFFSET) &
+					MODE_SENSE_PAGE_CODE_MASK;
+	switch (page_code) {
+	case MODE_PAGE_CACHING:
+		res = nvme_trans_mode_page_create(ns, hdr, cmd, alloc_len,
+						cdb10,
+						&nvme_trans_fill_caching_page,
+						MODE_PAGE_CACHING_LEN);
+		break;
+	case MODE_PAGE_CONTROL:
+		res = nvme_trans_mode_page_create(ns, hdr, cmd, alloc_len,
+						cdb10,
+						&nvme_trans_fill_control_page,
+						MODE_PAGE_CONTROL_LEN);
+		break;
+	case MODE_PAGE_POWER_CONDITION:
+		res = nvme_trans_mode_page_create(ns, hdr, cmd, alloc_len,
+						cdb10,
+						&nvme_trans_fill_pow_cnd_page,
+						MODE_PAGE_POW_CND_LEN);
+		break;
+	case MODE_PAGE_INFO_EXCEP:
+		res = nvme_trans_mode_page_create(ns, hdr, cmd, alloc_len,
+						cdb10,
+						&nvme_trans_fill_inf_exc_page,
+						MODE_PAGE_INF_EXC_LEN);
+		break;
+	case MODE_PAGE_RETURN_ALL:
+		res = nvme_trans_mode_page_create(ns, hdr, cmd, alloc_len,
+						cdb10,
+						&nvme_trans_fill_all_pages,
+						MODE_PAGE_ALL_LEN);
+		break;
+	default:
+		res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
+					ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
+					SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
+		break;
+	}
+
+ out:
+	return res;
+}
+
+static int nvme_trans_read_capacity(struct nvme_ns *ns, struct sg_io_hdr *hdr,
+							u8 *cmd)
+{
+	int res = SNTI_TRANSLATION_SUCCESS;
+	int nvme_sc;
+	u32 alloc_len = READ_CAP_10_RESP_SIZE;
+	u32 resp_size = READ_CAP_10_RESP_SIZE;
+	u32 xfer_len;
+	u8 cdb16;
+	struct nvme_dev *dev = ns->dev;
+	dma_addr_t dma_addr;
+	void *mem;
+	struct nvme_id_ns *id_ns;
+	u8 *response;
+
+	cdb16 = IS_READ_CAP_16(cmd);
+	if (cdb16) {
+		alloc_len = GET_READ_CAP_16_ALLOC_LENGTH(cmd);
+		resp_size = READ_CAP_16_RESP_SIZE;
+	}
+
+	mem = dma_alloc_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns),
+							&dma_addr, GFP_KERNEL);
+	if (mem == NULL) {
+		res = -ENOMEM;
+		goto out;
+	}
+	/* nvme ns identify */
+	nvme_sc = nvme_identify(dev, ns->ns_id, 0, dma_addr);
+	res = nvme_trans_status_code(hdr, nvme_sc);
+	if (res)
+		goto out_dma;
+	if (nvme_sc) {
+		res = nvme_sc;
+		goto out_dma;
+	}
+	id_ns = mem;
+
+	response = kmalloc(resp_size, GFP_KERNEL);
+	if (response == NULL) {
+		res = -ENOMEM;
+		goto out_dma;
+	}
+	memset(response, 0, resp_size);
+	nvme_trans_fill_read_cap(response, id_ns, cdb16);
+
+	xfer_len = min(alloc_len, resp_size);
+	res = nvme_trans_copy_to_user(hdr, response, xfer_len);
+
+	kfree(response);
+ out_dma:
+	dma_free_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns), mem,
+			  dma_addr);
+ out:
+	return res;
+}
+
+static int nvme_trans_report_luns(struct nvme_ns *ns, struct sg_io_hdr *hdr,
+							u8 *cmd)
+{
+	int res = SNTI_TRANSLATION_SUCCESS;
+	int nvme_sc;
+	u32 alloc_len, xfer_len, resp_size;
+	u8 select_report;
+	u8 *response;
+	struct nvme_dev *dev = ns->dev;
+	dma_addr_t dma_addr;
+	void *mem;
+	struct nvme_id_ctrl *id_ctrl;
+	u32 ll_length, lun_id;
+	u8 lun_id_offset = REPORT_LUNS_FIRST_LUN_OFFSET;
+	__be32 tmp_len;
+
+	alloc_len = GET_REPORT_LUNS_ALLOC_LENGTH(cmd);
+	select_report = GET_U8_FROM_CDB(cmd, REPORT_LUNS_SR_OFFSET);
+
+	if ((select_report != ALL_LUNS_RETURNED) &&
+	    (select_report != ALL_WELL_KNOWN_LUNS_RETURNED) &&
+	    (select_report != RESTRICTED_LUNS_RETURNED)) {
+		res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
+					ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
+					SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
+		goto out;
+	} else {
+		/* NVMe Controller Identify */
+		mem = dma_alloc_coherent(&dev->pci_dev->dev,
+					sizeof(struct nvme_id_ctrl),
+					&dma_addr, GFP_KERNEL);
+		if (mem == NULL) {
+			res = -ENOMEM;
+			goto out;
+		}
+		nvme_sc = nvme_identify(dev, 0, 1, dma_addr);
+		res = nvme_trans_status_code(hdr, nvme_sc);
+		if (res)
+			goto out_dma;
+		if (nvme_sc) {
+			res = nvme_sc;
+			goto out_dma;
+		}
+		id_ctrl = mem;
+		ll_length = le32_to_cpu(id_ctrl->nn) * LUN_ENTRY_SIZE;
+		resp_size = ll_length + LUN_DATA_HEADER_SIZE;
+
+		if (alloc_len < resp_size) {
+			res = nvme_trans_completion(hdr,
+					SAM_STAT_CHECK_CONDITION,
+					ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
+					SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
+			goto out_dma;
+		}
+
+		response = kmalloc(resp_size, GFP_KERNEL);
+		if (response == NULL) {
+			res = -ENOMEM;
+			goto out_dma;
+		}
+		memset(response, 0, resp_size);
+
+		/* The first LUN ID will always be 0 per the SAM spec */
+		for (lun_id = 0; lun_id < le32_to_cpu(id_ctrl->nn); lun_id++) {
+			/*
+			 * Set the LUN Id and then increment to the next LUN
+			 * location in the parameter data.
+			 */
+			__be64 tmp_id = cpu_to_be64(lun_id);
+			memcpy(&response[lun_id_offset], &tmp_id, sizeof(u64));
+			lun_id_offset += LUN_ENTRY_SIZE;
+		}
+		tmp_len = cpu_to_be32(ll_length);
+		memcpy(response, &tmp_len, sizeof(u32));
+	}
+
+	xfer_len = min(alloc_len, resp_size);
+	res = nvme_trans_copy_to_user(hdr, response, xfer_len);
+
+	kfree(response);
+ out_dma:
+	dma_free_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ctrl), mem,
+			  dma_addr);
+ out:
+	return res;
+}
+
+static int nvme_trans_request_sense(struct nvme_ns *ns, struct sg_io_hdr *hdr,
+							u8 *cmd)
+{
+	int res = SNTI_TRANSLATION_SUCCESS;
+	u8 alloc_len, xfer_len, resp_size;
+	u8 desc_format;
+	u8 *response;
+
+	alloc_len = GET_REQUEST_SENSE_ALLOC_LENGTH(cmd);
+	desc_format = GET_U8_FROM_CDB(cmd, REQUEST_SENSE_DESC_OFFSET);
+	desc_format &= REQUEST_SENSE_DESC_MASK;
+
+	resp_size = ((desc_format) ? (DESC_FMT_SENSE_DATA_SIZE) :
+					(FIXED_FMT_SENSE_DATA_SIZE));
+	response = kmalloc(resp_size, GFP_KERNEL);
+	if (response == NULL) {
+		res = -ENOMEM;
+		goto out;
+	}
+	memset(response, 0, resp_size);
+
+	if (desc_format == DESCRIPTOR_FORMAT_SENSE_DATA_TYPE) {
+		/* Descriptor Format Sense Data */
+		response[0] = DESC_FORMAT_SENSE_DATA;
+		response[1] = NO_SENSE;
+		/* TODO How is LOW POWER CONDITION ON handled? (byte 2) */
+		response[2] = SCSI_ASC_NO_SENSE;
+		response[3] = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
+		/* SDAT_OVFL = 0 | Additional Sense Length = 0 */
+	} else {
+		/* Fixed Format Sense Data */
+		response[0] = FIXED_SENSE_DATA;
+		/* Byte 1 = Obsolete */
+		response[2] = NO_SENSE; /* FM, EOM, ILI, SDAT_OVFL = 0 */
+		/* Bytes 3-6 - Information - set to zero */
+		response[7] = FIXED_SENSE_DATA_ADD_LENGTH;
+		/* Bytes 8-11 - Cmd Specific Information - set to zero */
+		response[12] = SCSI_ASC_NO_SENSE;
+		response[13] = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
+		/* Byte 14 = Field Replaceable Unit Code = 0 */
+		/* Bytes 15-17 - SKSV=0; Sense Key Specific = 0 */
+	}
+
+	xfer_len = min(alloc_len, resp_size);
+	res = nvme_trans_copy_to_user(hdr, response, xfer_len);
+
+	kfree(response);
+ out:
+	return res;
+}
+
+static int nvme_trans_security_protocol(struct nvme_ns *ns,
+					struct sg_io_hdr *hdr,
+					u8 *cmd)
+{
+	return nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
+				ILLEGAL_REQUEST, SCSI_ASC_ILLEGAL_COMMAND,
+				SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
+}
+
+static int nvme_trans_start_stop(struct nvme_ns *ns, struct sg_io_hdr *hdr,
+							u8 *cmd)
+{
+	int res = SNTI_TRANSLATION_SUCCESS;
+	int nvme_sc;
+	struct nvme_queue *nvmeq;
+	struct nvme_command c;
+	u8 immed, pcmod, pc, no_flush, start;
+
+	immed = GET_U8_FROM_CDB(cmd, START_STOP_UNIT_CDB_IMMED_OFFSET);
+	pcmod = GET_U8_FROM_CDB(cmd, START_STOP_UNIT_CDB_POWER_COND_MOD_OFFSET);
+	pc = GET_U8_FROM_CDB(cmd, START_STOP_UNIT_CDB_POWER_COND_OFFSET);
+	no_flush = GET_U8_FROM_CDB(cmd, START_STOP_UNIT_CDB_NO_FLUSH_OFFSET);
+	start = GET_U8_FROM_CDB(cmd, START_STOP_UNIT_CDB_START_OFFSET);
+
+	immed &= START_STOP_UNIT_CDB_IMMED_MASK;
+	pcmod &= START_STOP_UNIT_CDB_POWER_COND_MOD_MASK;
+	pc = (pc & START_STOP_UNIT_CDB_POWER_COND_MASK) >> NIBBLE_SHIFT;
+	no_flush &= START_STOP_UNIT_CDB_NO_FLUSH_MASK;
+	start &= START_STOP_UNIT_CDB_START_MASK;
+
+	if (immed != 0) {
+		res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
+					ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
+					SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
+	} else {
+		if (no_flush == 0) {
+			/* Issue NVME FLUSH command prior to START STOP UNIT */
+			memset(&c, 0, sizeof(c));
+			c.common.opcode = nvme_cmd_flush;
+			c.common.nsid = cpu_to_le32(ns->ns_id);
+
+			nvmeq = get_nvmeq(ns->dev);
+			put_nvmeq(nvmeq);
+			nvme_sc = nvme_submit_sync_cmd(nvmeq, &c, NULL, NVME_IO_TIMEOUT);
+
+			res = nvme_trans_status_code(hdr, nvme_sc);
+			if (res)
+				goto out;
+			if (nvme_sc) {
+				res = nvme_sc;
+				goto out;
+			}
+		}
+		/* Setup the expected power state transition */
+		res = nvme_trans_power_state(ns, hdr, pc, pcmod, start);
+	}
+
+ out:
+	return res;
+}
+
+static int nvme_trans_synchronize_cache(struct nvme_ns *ns,
+					struct sg_io_hdr *hdr, u8 *cmd)
+{
+	int res = SNTI_TRANSLATION_SUCCESS;
+	int nvme_sc;
+	struct nvme_command c;
+	struct nvme_queue *nvmeq;
+
+	memset(&c, 0, sizeof(c));
+	c.common.opcode = nvme_cmd_flush;
+	c.common.nsid = cpu_to_le32(ns->ns_id);
+
+	nvmeq = get_nvmeq(ns->dev);
+	put_nvmeq(nvmeq);
+	nvme_sc = nvme_submit_sync_cmd(nvmeq, &c, NULL, NVME_IO_TIMEOUT);
+
+	res = nvme_trans_status_code(hdr, nvme_sc);
+	if (res)
+		goto out;
+	if (nvme_sc)
+		res = nvme_sc;
+
+ out:
+	return res;
+}
+
+static int nvme_trans_format_unit(struct nvme_ns *ns, struct sg_io_hdr *hdr,
+							u8 *cmd)
+{
+	int res = SNTI_TRANSLATION_SUCCESS;
+	u8 parm_hdr_len = 0;
+	u8 nvme_pf_code = 0;
+	u8 format_prot_info, long_list, format_data;
+
+	format_prot_info = GET_U8_FROM_CDB(cmd,
+				FORMAT_UNIT_CDB_FORMAT_PROT_INFO_OFFSET);
+	long_list = GET_U8_FROM_CDB(cmd, FORMAT_UNIT_CDB_LONG_LIST_OFFSET);
+	format_data = GET_U8_FROM_CDB(cmd, FORMAT_UNIT_CDB_FORMAT_DATA_OFFSET);
+
+	format_prot_info = (format_prot_info &
+				FORMAT_UNIT_CDB_FORMAT_PROT_INFO_MASK) >>
+				FORMAT_UNIT_CDB_FORMAT_PROT_INFO_SHIFT;
+	long_list &= FORMAT_UNIT_CDB_LONG_LIST_MASK;
+	format_data &= FORMAT_UNIT_CDB_FORMAT_DATA_MASK;
+
+	if (format_data != 0) {
+		if (format_prot_info != 0) {
+			if (long_list == 0)
+				parm_hdr_len = FORMAT_UNIT_SHORT_PARM_LIST_LEN;
+			else
+				parm_hdr_len = FORMAT_UNIT_LONG_PARM_LIST_LEN;
+		}
+	} else if (format_data == 0 && format_prot_info != 0) {
+		res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
+					ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
+					SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
+		goto out;
+	}
+
+	/* Get parm header from data-in/out buffer */
+	/*
+	 * According to the translation spec, the only fields in the parameter
+	 * list we are concerned with are in the header. So allocate only that.
+	 */
+	if (parm_hdr_len > 0) {
+		res = nvme_trans_fmt_get_parm_header(hdr, parm_hdr_len,
+					format_prot_info, &nvme_pf_code);
+		if (res != SNTI_TRANSLATION_SUCCESS)
+			goto out;
+	}
+
+	/* Attempt to activate any previously downloaded firmware image */
+	res = nvme_trans_send_fw_cmd(ns, hdr, nvme_admin_activate_fw, 0, 0, 0);
+
+	/* Determine Block size and count and send format command */
+	res = nvme_trans_fmt_set_blk_size_count(ns, hdr);
+	if (res != SNTI_TRANSLATION_SUCCESS)
+		goto out;
+
+	res = nvme_trans_fmt_send_cmd(ns, hdr, nvme_pf_code);
+
+ out:
+	return res;
+}
+
+static int nvme_trans_test_unit_ready(struct nvme_ns *ns,
+					struct sg_io_hdr *hdr,
+					u8 *cmd)
+{
+	int res = SNTI_TRANSLATION_SUCCESS;
+	struct nvme_dev *dev = ns->dev;
+
+	if (!(readl(&dev->bar->csts) & NVME_CSTS_RDY))
+		res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
+					    NOT_READY, SCSI_ASC_LUN_NOT_READY,
+					    SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
+	else
+		res = nvme_trans_completion(hdr, SAM_STAT_GOOD, NO_SENSE, 0, 0);
+
+	return res;
+}
+
+static int nvme_trans_write_buffer(struct nvme_ns *ns, struct sg_io_hdr *hdr,
+							u8 *cmd)
+{
+	int res = SNTI_TRANSLATION_SUCCESS;
+	u32 buffer_offset, parm_list_length;
+	u8 buffer_id, mode;
+
+	parm_list_length =
+		GET_U24_FROM_CDB(cmd, WRITE_BUFFER_CDB_PARM_LIST_LENGTH_OFFSET);
+	if (parm_list_length % BYTES_TO_DWORDS != 0) {
+		/* NVMe expects Firmware file to be a whole number of DWORDS */
+		res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
+					ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
+					SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
+		goto out;
+	}
+	buffer_id = GET_U8_FROM_CDB(cmd, WRITE_BUFFER_CDB_BUFFER_ID_OFFSET);
+	if (buffer_id > NVME_MAX_FIRMWARE_SLOT) {
+		res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
+					ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
+					SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
+		goto out;
+	}
+	mode = GET_U8_FROM_CDB(cmd, WRITE_BUFFER_CDB_MODE_OFFSET) &
+						WRITE_BUFFER_CDB_MODE_MASK;
+	buffer_offset =
+		GET_U24_FROM_CDB(cmd, WRITE_BUFFER_CDB_BUFFER_OFFSET_OFFSET);
+
+	switch (mode) {
+	case DOWNLOAD_SAVE_ACTIVATE:
+		res = nvme_trans_send_fw_cmd(ns, hdr, nvme_admin_download_fw,
+						parm_list_length, buffer_offset,
+						buffer_id);
+		if (res != SNTI_TRANSLATION_SUCCESS)
+			goto out;
+		res = nvme_trans_send_fw_cmd(ns, hdr, nvme_admin_activate_fw,
+						parm_list_length, buffer_offset,
+						buffer_id);
+		break;
+	case DOWNLOAD_SAVE_DEFER_ACTIVATE:
+		res = nvme_trans_send_fw_cmd(ns, hdr, nvme_admin_download_fw,
+						parm_list_length, buffer_offset,
+						buffer_id);
+		break;
+	case ACTIVATE_DEFERRED_MICROCODE:
+		res = nvme_trans_send_fw_cmd(ns, hdr, nvme_admin_activate_fw,
+						parm_list_length, buffer_offset,
+						buffer_id);
+		break;
+	default:
+		res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
+					ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
+					SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
+		break;
+	}
+
+ out:
+	return res;
+}
+
+struct scsi_unmap_blk_desc {
+	__be64	slba;
+	__be32	nlb;
+	u32	resv;
+};
+
+struct scsi_unmap_parm_list {
+	__be16	unmap_data_len;
+	__be16	unmap_blk_desc_data_len;
+	u32	resv;
+	struct scsi_unmap_blk_desc desc[0];
+};
+
+static int nvme_trans_unmap(struct nvme_ns *ns, struct sg_io_hdr *hdr,
+							u8 *cmd)
+{
+	struct nvme_dev *dev = ns->dev;
+	struct scsi_unmap_parm_list *plist;
+	struct nvme_dsm_range *range;
+	struct nvme_queue *nvmeq;
+	struct nvme_command c;
+	int i, nvme_sc, res = -ENOMEM;
+	u16 ndesc, list_len;
+	dma_addr_t dma_addr;
+
+	list_len = GET_U16_FROM_CDB(cmd, UNMAP_CDB_PARAM_LIST_LENGTH_OFFSET);
+	if (!list_len)
+		return -EINVAL;
+
+	plist = kmalloc(list_len, GFP_KERNEL);
+	if (!plist)
+		return -ENOMEM;
+
+	res = nvme_trans_copy_from_user(hdr, plist, list_len);
+	if (res != SNTI_TRANSLATION_SUCCESS)
+		goto out;
+
+	ndesc = be16_to_cpu(plist->unmap_blk_desc_data_len) >> 4;
+	if (!ndesc || ndesc > 256) {
+		res = -EINVAL;
+		goto out;
+	}
+
+	range = dma_alloc_coherent(&dev->pci_dev->dev, ndesc * sizeof(*range),
+							&dma_addr, GFP_KERNEL);
+	if (!range)
+		goto out;
+
+	for (i = 0; i < ndesc; i++) {
+		range[i].nlb = cpu_to_le32(be32_to_cpu(plist->desc[i].nlb));
+		range[i].slba = cpu_to_le64(be64_to_cpu(plist->desc[i].slba));
+		range[i].cattr = 0;
+	}
+
+	memset(&c, 0, sizeof(c));
+	c.dsm.opcode = nvme_cmd_dsm;
+	c.dsm.nsid = cpu_to_le32(ns->ns_id);
+	c.dsm.prp1 = cpu_to_le64(dma_addr);
+	c.dsm.nr = cpu_to_le32(ndesc - 1);
+	c.dsm.attributes = cpu_to_le32(NVME_DSMGMT_AD);
+
+	nvmeq = get_nvmeq(dev);
+	put_nvmeq(nvmeq);
+
+	nvme_sc = nvme_submit_sync_cmd(nvmeq, &c, NULL, NVME_IO_TIMEOUT);
+	res = nvme_trans_status_code(hdr, nvme_sc);
+
+	dma_free_coherent(&dev->pci_dev->dev, ndesc * sizeof(*range),
+							range, dma_addr);
+ out:
+	kfree(plist);
+	return res;
+}
+
+static int nvme_scsi_translate(struct nvme_ns *ns, struct sg_io_hdr *hdr)
+{
+	u8 cmd[BLK_MAX_CDB];
+	int retcode;
+	unsigned int opcode;
+
+	if (hdr->cmdp == NULL)
+		return -EMSGSIZE;
+	if (copy_from_user(cmd, hdr->cmdp, hdr->cmd_len))
+		return -EFAULT;
+
+	opcode = cmd[0];
+
+	switch (opcode) {
+	case READ_6:
+	case READ_10:
+	case READ_12:
+	case READ_16:
+		retcode = nvme_trans_io(ns, hdr, 0, cmd);
+		break;
+	case WRITE_6:
+	case WRITE_10:
+	case WRITE_12:
+	case WRITE_16:
+		retcode = nvme_trans_io(ns, hdr, 1, cmd);
+		break;
+	case INQUIRY:
+		retcode = nvme_trans_inquiry(ns, hdr, cmd);
+		break;
+	case LOG_SENSE:
+		retcode = nvme_trans_log_sense(ns, hdr, cmd);
+		break;
+	case MODE_SELECT:
+	case MODE_SELECT_10:
+		retcode = nvme_trans_mode_select(ns, hdr, cmd);
+		break;
+	case MODE_SENSE:
+	case MODE_SENSE_10:
+		retcode = nvme_trans_mode_sense(ns, hdr, cmd);
+		break;
+	case READ_CAPACITY:
+		retcode = nvme_trans_read_capacity(ns, hdr, cmd);
+		break;
+	case SERVICE_ACTION_IN:
+		if (IS_READ_CAP_16(cmd))
+			retcode = nvme_trans_read_capacity(ns, hdr, cmd);
+		else
+			goto out;
+		break;
+	case REPORT_LUNS:
+		retcode = nvme_trans_report_luns(ns, hdr, cmd);
+		break;
+	case REQUEST_SENSE:
+		retcode = nvme_trans_request_sense(ns, hdr, cmd);
+		break;
+	case SECURITY_PROTOCOL_IN:
+	case SECURITY_PROTOCOL_OUT:
+		retcode = nvme_trans_security_protocol(ns, hdr, cmd);
+		break;
+	case START_STOP:
+		retcode = nvme_trans_start_stop(ns, hdr, cmd);
+		break;
+	case SYNCHRONIZE_CACHE:
+		retcode = nvme_trans_synchronize_cache(ns, hdr, cmd);
+		break;
+	case FORMAT_UNIT:
+		retcode = nvme_trans_format_unit(ns, hdr, cmd);
+		break;
+	case TEST_UNIT_READY:
+		retcode = nvme_trans_test_unit_ready(ns, hdr, cmd);
+		break;
+	case WRITE_BUFFER:
+		retcode = nvme_trans_write_buffer(ns, hdr, cmd);
+		break;
+	case UNMAP:
+		retcode = nvme_trans_unmap(ns, hdr, cmd);
+		break;
+	default:
+ out:
+		retcode = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
+				ILLEGAL_REQUEST, SCSI_ASC_ILLEGAL_COMMAND,
+				SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
+		break;
+	}
+	return retcode;
+}
+
+int nvme_sg_io(struct nvme_ns *ns, struct sg_io_hdr __user *u_hdr)
+{
+	struct sg_io_hdr hdr;
+	int retcode;
+
+	if (!capable(CAP_SYS_ADMIN))
+		return -EACCES;
+	if (copy_from_user(&hdr, u_hdr, sizeof(hdr)))
+		return -EFAULT;
+	if (hdr.interface_id != 'S')
+		return -EINVAL;
+	if (hdr.cmd_len > BLK_MAX_CDB)
+		return -EINVAL;
+
+	retcode = nvme_scsi_translate(ns, &hdr);
+	if (retcode < 0)
+		return retcode;
+	if (retcode > 0)
+		retcode = SNTI_TRANSLATION_SUCCESS;
+	if (copy_to_user(u_hdr, &hdr, sizeof(sg_io_hdr_t)) > 0)
+		return -EFAULT;
+
+	return retcode;
+}
+
+int nvme_sg_get_version_num(int __user *ip)
+{
+	return put_user(sg_version_num, ip);
+}
diff --git a/drivers/block/paride/pcd.c b/drivers/block/paride/pcd.c
index ba2b6b5..e76bdc0 100644
--- a/drivers/block/paride/pcd.c
+++ b/drivers/block/paride/pcd.c
@@ -236,13 +236,12 @@
 	return ret;
 }
 
-static int pcd_block_release(struct gendisk *disk, fmode_t mode)
+static void pcd_block_release(struct gendisk *disk, fmode_t mode)
 {
 	struct pcd_unit *cd = disk->private_data;
 	mutex_lock(&pcd_mutex);
 	cdrom_release(&cd->info, mode);
 	mutex_unlock(&pcd_mutex);
-	return 0;
 }
 
 static int pcd_block_ioctl(struct block_device *bdev, fmode_t mode,
diff --git a/drivers/block/paride/pd.c b/drivers/block/paride/pd.c
index 831e3ac..19ad8f0 100644
--- a/drivers/block/paride/pd.c
+++ b/drivers/block/paride/pd.c
@@ -783,7 +783,7 @@
 	}
 }
 
-static int pd_release(struct gendisk *p, fmode_t mode)
+static void pd_release(struct gendisk *p, fmode_t mode)
 {
 	struct pd_unit *disk = p->private_data;
 
@@ -791,8 +791,6 @@
 	if (!--disk->access && disk->removable)
 		pd_special_command(disk, pd_door_unlock);
 	mutex_unlock(&pd_mutex);
-
-	return 0;
 }
 
 static unsigned int pd_check_events(struct gendisk *p, unsigned int clearing)
diff --git a/drivers/block/paride/pf.c b/drivers/block/paride/pf.c
index ec8f9ed..f5c86d5 100644
--- a/drivers/block/paride/pf.c
+++ b/drivers/block/paride/pf.c
@@ -211,7 +211,7 @@
 		    unsigned int cmd, unsigned long arg);
 static int pf_getgeo(struct block_device *bdev, struct hd_geometry *geo);
 
-static int pf_release(struct gendisk *disk, fmode_t mode);
+static void pf_release(struct gendisk *disk, fmode_t mode);
 
 static int pf_detect(void);
 static void do_pf_read(void);
@@ -360,14 +360,15 @@
 	return 0;
 }
 
-static int pf_release(struct gendisk *disk, fmode_t mode)
+static void pf_release(struct gendisk *disk, fmode_t mode)
 {
 	struct pf_unit *pf = disk->private_data;
 
 	mutex_lock(&pf_mutex);
 	if (pf->access <= 0) {
 		mutex_unlock(&pf_mutex);
-		return -EINVAL;
+		WARN_ON(1);
+		return;
 	}
 
 	pf->access--;
@@ -376,8 +377,6 @@
 		pf_lock(pf, 0);
 
 	mutex_unlock(&pf_mutex);
-	return 0;
-
 }
 
 static unsigned int pf_check_events(struct gendisk *disk, unsigned int clearing)
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index e0588c6..3c08983 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -901,7 +901,7 @@
 			pd->iosched.successive_reads += bio->bi_size >> 10;
 		else {
 			pd->iosched.successive_reads = 0;
-			pd->iosched.last_write = bio->bi_sector + bio_sectors(bio);
+			pd->iosched.last_write = bio_end_sector(bio);
 		}
 		if (pd->iosched.successive_reads >= HI_SPEED_SWITCH) {
 			if (pd->read_speed == pd->write_speed) {
@@ -948,31 +948,6 @@
 }
 
 /*
- * Copy CD_FRAMESIZE bytes from src_bio into a destination page
- */
-static void pkt_copy_bio_data(struct bio *src_bio, int seg, int offs, struct page *dst_page, int dst_offs)
-{
-	unsigned int copy_size = CD_FRAMESIZE;
-
-	while (copy_size > 0) {
-		struct bio_vec *src_bvl = bio_iovec_idx(src_bio, seg);
-		void *vfrom = kmap_atomic(src_bvl->bv_page) +
-			src_bvl->bv_offset + offs;
-		void *vto = page_address(dst_page) + dst_offs;
-		int len = min_t(int, copy_size, src_bvl->bv_len - offs);
-
-		BUG_ON(len < 0);
-		memcpy(vto, vfrom, len);
-		kunmap_atomic(vfrom);
-
-		seg++;
-		offs = 0;
-		dst_offs += len;
-		copy_size -= len;
-	}
-}
-
-/*
  * Copy all data for this packet to pkt->pages[], so that
  * a) The number of required segments for the write bio is minimized, which
  *    is necessary for some scsi controllers.
@@ -1181,16 +1156,15 @@
 	new_sector = new_block * (CD_FRAMESIZE >> 9);
 	pkt->sector = new_sector;
 
+	bio_reset(pkt->bio);
+	pkt->bio->bi_bdev = pd->bdev;
+	pkt->bio->bi_rw = REQ_WRITE;
 	pkt->bio->bi_sector = new_sector;
-	pkt->bio->bi_next = NULL;
-	pkt->bio->bi_flags = 1 << BIO_UPTODATE;
-	pkt->bio->bi_idx = 0;
+	pkt->bio->bi_size = pkt->frames * CD_FRAMESIZE;
+	pkt->bio->bi_vcnt = pkt->frames;
 
-	BUG_ON(pkt->bio->bi_rw != REQ_WRITE);
-	BUG_ON(pkt->bio->bi_vcnt != pkt->frames);
-	BUG_ON(pkt->bio->bi_size != pkt->frames * CD_FRAMESIZE);
-	BUG_ON(pkt->bio->bi_end_io != pkt_end_io_packet_write);
-	BUG_ON(pkt->bio->bi_private != pkt);
+	pkt->bio->bi_end_io = pkt_end_io_packet_write;
+	pkt->bio->bi_private = pkt;
 
 	drop_super(sb);
 	return 1;
@@ -1325,55 +1299,35 @@
  */
 static void pkt_start_write(struct pktcdvd_device *pd, struct packet_data *pkt)
 {
-	struct bio *bio;
 	int f;
-	int frames_write;
 	struct bio_vec *bvec = pkt->w_bio->bi_io_vec;
 
+	bio_reset(pkt->w_bio);
+	pkt->w_bio->bi_sector = pkt->sector;
+	pkt->w_bio->bi_bdev = pd->bdev;
+	pkt->w_bio->bi_end_io = pkt_end_io_packet_write;
+	pkt->w_bio->bi_private = pkt;
+
+	/* XXX: locking? */
 	for (f = 0; f < pkt->frames; f++) {
 		bvec[f].bv_page = pkt->pages[(f * CD_FRAMESIZE) / PAGE_SIZE];
 		bvec[f].bv_offset = (f * CD_FRAMESIZE) % PAGE_SIZE;
+		if (!bio_add_page(pkt->w_bio, bvec[f].bv_page, CD_FRAMESIZE, bvec[f].bv_offset))
+			BUG();
 	}
+	VPRINTK(DRIVER_NAME": vcnt=%d\n", pkt->w_bio->bi_vcnt);
 
 	/*
 	 * Fill-in bvec with data from orig_bios.
 	 */
-	frames_write = 0;
 	spin_lock(&pkt->lock);
-	bio_list_for_each(bio, &pkt->orig_bios) {
-		int segment = bio->bi_idx;
-		int src_offs = 0;
-		int first_frame = (bio->bi_sector - pkt->sector) / (CD_FRAMESIZE >> 9);
-		int num_frames = bio->bi_size / CD_FRAMESIZE;
-		BUG_ON(first_frame < 0);
-		BUG_ON(first_frame + num_frames > pkt->frames);
-		for (f = first_frame; f < first_frame + num_frames; f++) {
-			struct bio_vec *src_bvl = bio_iovec_idx(bio, segment);
+	bio_copy_data(pkt->w_bio, pkt->orig_bios.head);
 
-			while (src_offs >= src_bvl->bv_len) {
-				src_offs -= src_bvl->bv_len;
-				segment++;
-				BUG_ON(segment >= bio->bi_vcnt);
-				src_bvl = bio_iovec_idx(bio, segment);
-			}
-
-			if (src_bvl->bv_len - src_offs >= CD_FRAMESIZE) {
-				bvec[f].bv_page = src_bvl->bv_page;
-				bvec[f].bv_offset = src_bvl->bv_offset + src_offs;
-			} else {
-				pkt_copy_bio_data(bio, segment, src_offs,
-						  bvec[f].bv_page, bvec[f].bv_offset);
-			}
-			src_offs += CD_FRAMESIZE;
-			frames_write++;
-		}
-	}
 	pkt_set_state(pkt, PACKET_WRITE_WAIT_STATE);
 	spin_unlock(&pkt->lock);
 
 	VPRINTK("pkt_start_write: Writing %d frames for zone %llx\n",
-		frames_write, (unsigned long long)pkt->sector);
-	BUG_ON(frames_write != pkt->write_size);
+		pkt->write_size, (unsigned long long)pkt->sector);
 
 	if (test_bit(PACKET_MERGE_SEGS, &pd->flags) || (pkt->write_size < pkt->frames)) {
 		pkt_make_local_copy(pkt, bvec);
@@ -1383,16 +1337,6 @@
 	}
 
 	/* Start the write request */
-	bio_reset(pkt->w_bio);
-	pkt->w_bio->bi_sector = pkt->sector;
-	pkt->w_bio->bi_bdev = pd->bdev;
-	pkt->w_bio->bi_end_io = pkt_end_io_packet_write;
-	pkt->w_bio->bi_private = pkt;
-	for (f = 0; f < pkt->frames; f++)
-		if (!bio_add_page(pkt->w_bio, bvec[f].bv_page, CD_FRAMESIZE, bvec[f].bv_offset))
-			BUG();
-	VPRINTK(DRIVER_NAME": vcnt=%d\n", pkt->w_bio->bi_vcnt);
-
 	atomic_set(&pkt->io_wait, 1);
 	pkt->w_bio->bi_rw = WRITE;
 	pkt_queue_bio(pd, pkt->w_bio);
@@ -2376,10 +2320,9 @@
 	return ret;
 }
 
-static int pkt_close(struct gendisk *disk, fmode_t mode)
+static void pkt_close(struct gendisk *disk, fmode_t mode)
 {
 	struct pktcdvd_device *pd = disk->private_data;
-	int ret = 0;
 
 	mutex_lock(&pktcdvd_mutex);
 	mutex_lock(&ctl_mutex);
@@ -2391,7 +2334,6 @@
 	}
 	mutex_unlock(&ctl_mutex);
 	mutex_unlock(&pktcdvd_mutex);
-	return ret;
 }
 
 
@@ -2433,7 +2375,7 @@
 		cloned_bio->bi_bdev = pd->bdev;
 		cloned_bio->bi_private = psd;
 		cloned_bio->bi_end_io = pkt_end_io_read_cloned;
-		pd->stats.secs_r += bio->bi_size >> 9;
+		pd->stats.secs_r += bio_sectors(bio);
 		pkt_queue_bio(pd, cloned_bio);
 		return;
 	}
@@ -2454,7 +2396,7 @@
 	zone = ZONE(bio->bi_sector, pd);
 	VPRINTK("pkt_make_request: start = %6llx stop = %6llx\n",
 		(unsigned long long)bio->bi_sector,
-		(unsigned long long)(bio->bi_sector + bio_sectors(bio)));
+		(unsigned long long)bio_end_sector(bio));
 
 	/* Check if we have to split the bio */
 	{
@@ -2462,7 +2404,7 @@
 		sector_t last_zone;
 		int first_sectors;
 
-		last_zone = ZONE(bio->bi_sector + bio_sectors(bio) - 1, pd);
+		last_zone = ZONE(bio_end_sector(bio) - 1, pd);
 		if (last_zone != zone) {
 			BUG_ON(last_zone != zone + pd->settings.size);
 			first_sectors = last_zone - bio->bi_sector;
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index c2ca181..ca63104 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -460,7 +460,7 @@
 	return 0;
 }
 
-static int rbd_release(struct gendisk *disk, fmode_t mode)
+static void rbd_release(struct gendisk *disk, fmode_t mode)
 {
 	struct rbd_device *rbd_dev = disk->private_data;
 	unsigned long open_count_before;
@@ -473,8 +473,6 @@
 	mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
 	put_device(&rbd_dev->dev);
 	mutex_unlock(&ctl_mutex);
-
-	return 0;
 }
 
 static const struct block_device_operations rbd_bd_ops = {
@@ -1145,7 +1143,7 @@
 	/* Find first affected segment... */
 
 	resid = offset;
-	__bio_for_each_segment(bv, bio_src, idx, 0) {
+	bio_for_each_segment(bv, bio_src, idx) {
 		if (resid < bv->bv_len)
 			break;
 		resid -= bv->bv_len;
diff --git a/drivers/block/swim.c b/drivers/block/swim.c
index 8766a22..2f445b7 100644
--- a/drivers/block/swim.c
+++ b/drivers/block/swim.c
@@ -673,7 +673,7 @@
 	return ret;
 }
 
-static int floppy_release(struct gendisk *disk, fmode_t mode)
+static void floppy_release(struct gendisk *disk, fmode_t mode)
 {
 	struct floppy_state *fs = disk->private_data;
 	struct swim __iomem *base = fs->swd->base;
@@ -687,8 +687,6 @@
 	if (fs->ref_count == 0)
 		swim_motor(base, OFF);
 	mutex_unlock(&swim_mutex);
-
-	return 0;
 }
 
 static int floppy_ioctl(struct block_device *bdev, fmode_t mode,
diff --git a/drivers/block/swim3.c b/drivers/block/swim3.c
index 758f2ac..20e061c3 100644
--- a/drivers/block/swim3.c
+++ b/drivers/block/swim3.c
@@ -251,7 +251,7 @@
 static int floppy_ioctl(struct block_device *bdev, fmode_t mode,
 			unsigned int cmd, unsigned long param);
 static int floppy_open(struct block_device *bdev, fmode_t mode);
-static int floppy_release(struct gendisk *disk, fmode_t mode);
+static void floppy_release(struct gendisk *disk, fmode_t mode);
 static unsigned int floppy_check_events(struct gendisk *disk,
 					unsigned int clearing);
 static int floppy_revalidate(struct gendisk *disk);
@@ -1017,7 +1017,7 @@
 	return ret;
 }
 
-static int floppy_release(struct gendisk *disk, fmode_t mode)
+static void floppy_release(struct gendisk *disk, fmode_t mode)
 {
 	struct floppy_state *fs = disk->private_data;
 	struct swim3 __iomem *sw = fs->swim3;
@@ -1029,7 +1029,6 @@
 		swim3_select(fs, RELAX);
 	}
 	mutex_unlock(&swim3_mutex);
-	return 0;
 }
 
 static unsigned int floppy_check_events(struct gendisk *disk,
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index a894f88..d89ef86 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -1617,7 +1617,7 @@
 	return err;
 }
 
-static int blkif_release(struct gendisk *disk, fmode_t mode)
+static void blkif_release(struct gendisk *disk, fmode_t mode)
 {
 	struct blkfront_info *info = disk->private_data;
 	struct block_device *bdev;
@@ -1658,7 +1658,6 @@
 out:
 	bdput(bdev);
 	mutex_unlock(&blkfront_mutex);
-	return 0;
 }
 
 static const struct block_device_operations xlvbd_block_fops =
diff --git a/drivers/block/xsysace.c b/drivers/block/xsysace.c
index 1f38643..f8ef15f 100644
--- a/drivers/block/xsysace.c
+++ b/drivers/block/xsysace.c
@@ -915,7 +915,7 @@
 	return 0;
 }
 
-static int ace_release(struct gendisk *disk, fmode_t mode)
+static void ace_release(struct gendisk *disk, fmode_t mode)
 {
 	struct ace_device *ace = disk->private_data;
 	unsigned long flags;
@@ -932,7 +932,6 @@
 	}
 	spin_unlock_irqrestore(&ace->lock, flags);
 	mutex_unlock(&xsysace_mutex);
-	return 0;
 }
 
 static int ace_getgeo(struct block_device *bdev, struct hd_geometry *geo)
diff --git a/drivers/block/z2ram.c b/drivers/block/z2ram.c
index a22e3f8..5a95baf 100644
--- a/drivers/block/z2ram.c
+++ b/drivers/block/z2ram.c
@@ -309,20 +309,18 @@
     return rc;
 }
 
-static int
+static void
 z2_release(struct gendisk *disk, fmode_t mode)
 {
     mutex_lock(&z2ram_mutex);
     if ( current_device == -1 ) {
     	mutex_unlock(&z2ram_mutex);
-    	return 0;
+    	return;
     }
     mutex_unlock(&z2ram_mutex);
     /*
      * FIXME: unmap memory
      */
-
-    return 0;
 }
 
 static const struct block_device_operations z2_fops =
diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c
index d59cdcb..4afcb65 100644
--- a/drivers/cdrom/gdrom.c
+++ b/drivers/cdrom/gdrom.c
@@ -503,12 +503,11 @@
 	return ret;
 }
 
-static int gdrom_bdops_release(struct gendisk *disk, fmode_t mode)
+static void gdrom_bdops_release(struct gendisk *disk, fmode_t mode)
 {
 	mutex_lock(&gdrom_mutex);
 	cdrom_release(gd.cd_info, mode);
 	mutex_unlock(&gdrom_mutex);
-	return 0;
 }
 
 static unsigned int gdrom_bdops_check_events(struct gendisk *disk,
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index 2c644af..1ccbe94 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -28,6 +28,7 @@
 #include <linux/pfn.h>
 #include <linux/export.h>
 #include <linux/io.h>
+#include <linux/aio.h>
 
 #include <asm/uaccess.h>
 
@@ -627,6 +628,18 @@
 	return count;
 }
 
+static ssize_t aio_read_null(struct kiocb *iocb, const struct iovec *iov,
+			     unsigned long nr_segs, loff_t pos)
+{
+	return 0;
+}
+
+static ssize_t aio_write_null(struct kiocb *iocb, const struct iovec *iov,
+			      unsigned long nr_segs, loff_t pos)
+{
+	return iov_length(iov, nr_segs);
+}
+
 static int pipe_to_null(struct pipe_inode_info *info, struct pipe_buffer *buf,
 			struct splice_desc *sd)
 {
@@ -670,6 +683,24 @@
 	return written ? written : -EFAULT;
 }
 
+static ssize_t aio_read_zero(struct kiocb *iocb, const struct iovec *iov,
+			     unsigned long nr_segs, loff_t pos)
+{
+	size_t written = 0;
+	unsigned long i;
+	ssize_t ret;
+
+	for (i = 0; i < nr_segs; i++) {
+		ret = read_zero(iocb->ki_filp, iov[i].iov_base, iov[i].iov_len,
+				&pos);
+		if (ret < 0)
+			break;
+		written += ret;
+	}
+
+	return written ? written : -EFAULT;
+}
+
 static int mmap_zero(struct file *file, struct vm_area_struct *vma)
 {
 #ifndef CONFIG_MMU
@@ -738,6 +769,7 @@
 #define full_lseek      null_lseek
 #define write_zero	write_null
 #define read_full       read_zero
+#define aio_write_zero	aio_write_null
 #define open_mem	open_port
 #define open_kmem	open_mem
 #define open_oldmem	open_mem
@@ -766,6 +798,8 @@
 	.llseek		= null_lseek,
 	.read		= read_null,
 	.write		= write_null,
+	.aio_read	= aio_read_null,
+	.aio_write	= aio_write_null,
 	.splice_write	= splice_write_null,
 };
 
@@ -782,6 +816,8 @@
 	.llseek		= zero_lseek,
 	.read		= read_zero,
 	.write		= write_zero,
+	.aio_read	= aio_read_zero,
+	.aio_write	= aio_write_zero,
 	.mmap		= mmap_zero,
 };
 
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index aeaea32..e992489 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -63,8 +63,6 @@
 	depends on PCI && X86
 	select DMA_ENGINE
 	select DCA
-	select ASYNC_TX_DISABLE_PQ_VAL_DMA
-	select ASYNC_TX_DISABLE_XOR_VAL_DMA
 	help
 	  Enable support for the Intel(R) I/OAT DMA engine present
 	  in recent Intel Xeon chipsets.
@@ -174,15 +172,7 @@
 	  This DMA controller transfers data from memory to peripheral fifo
 	  or vice versa. It does not support memory to memory data transfer.
 
-
-
-config SH_DMAE
-	tristate "Renesas SuperH DMAC support"
-	depends on (SUPERH && SH_DMA) || (ARM && ARCH_SHMOBILE)
-	depends on !SH_DMA_API
-	select DMA_ENGINE
-	help
-	  Enable support for the Renesas SuperH DMA controllers.
+source "drivers/dma/sh/Kconfig"
 
 config COH901318
 	bool "ST-Ericsson COH901318 DMA support"
@@ -328,6 +318,10 @@
 config DMA_VIRTUAL_CHANNELS
 	tristate
 
+config DMA_ACPI
+	def_bool y
+	depends on ACPI
+
 config DMA_OF
 	def_bool y
 	depends on OF
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index 488e3ff..a2b0df5 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -3,6 +3,7 @@
 
 obj-$(CONFIG_DMA_ENGINE) += dmaengine.o
 obj-$(CONFIG_DMA_VIRTUAL_CHANNELS) += virt-dma.o
+obj-$(CONFIG_DMA_ACPI) += acpi-dma.o
 obj-$(CONFIG_DMA_OF) += of-dma.o
 
 obj-$(CONFIG_NET_DMA) += iovlock.o
@@ -18,7 +19,7 @@
 obj-$(CONFIG_AT_HDMAC) += at_hdmac.o
 obj-$(CONFIG_MX3_IPU) += ipu/
 obj-$(CONFIG_TXX9_DMAC) += txx9dmac.o
-obj-$(CONFIG_SH_DMAE) += sh/
+obj-$(CONFIG_SH_DMAE_BASE) += sh/
 obj-$(CONFIG_COH901318) += coh901318.o coh901318_lli.o
 obj-$(CONFIG_AMCC_PPC440SPE_ADMA) += ppc4xx/
 obj-$(CONFIG_IMX_SDMA) += imx-sdma.o
diff --git a/drivers/dma/acpi-dma.c b/drivers/dma/acpi-dma.c
new file mode 100644
index 0000000..ba6fc62
--- /dev/null
+++ b/drivers/dma/acpi-dma.c
@@ -0,0 +1,279 @@
+/*
+ * ACPI helpers for DMA request / controller
+ *
+ * Based on of-dma.c
+ *
+ * Copyright (C) 2013, Intel Corporation
+ * Author: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/acpi.h>
+#include <linux/acpi_dma.h>
+
+static LIST_HEAD(acpi_dma_list);
+static DEFINE_MUTEX(acpi_dma_lock);
+
+/**
+ * acpi_dma_controller_register - Register a DMA controller to ACPI DMA helpers
+ * @dev:		struct device of DMA controller
+ * @acpi_dma_xlate:	translation function which converts a dma specifier
+ *			into a dma_chan structure
+ * @data		pointer to controller specific data to be used by
+ *			translation function
+ *
+ * Returns 0 on success or appropriate errno value on error.
+ *
+ * Allocated memory should be freed with appropriate acpi_dma_controller_free()
+ * call.
+ */
+int acpi_dma_controller_register(struct device *dev,
+		struct dma_chan *(*acpi_dma_xlate)
+		(struct acpi_dma_spec *, struct acpi_dma *),
+		void *data)
+{
+	struct acpi_device *adev;
+	struct acpi_dma	*adma;
+
+	if (!dev || !acpi_dma_xlate)
+		return -EINVAL;
+
+	/* Check if the device was enumerated by ACPI */
+	if (!ACPI_HANDLE(dev))
+		return -EINVAL;
+
+	if (acpi_bus_get_device(ACPI_HANDLE(dev), &adev))
+		return -EINVAL;
+
+	adma = kzalloc(sizeof(*adma), GFP_KERNEL);
+	if (!adma)
+		return -ENOMEM;
+
+	adma->dev = dev;
+	adma->acpi_dma_xlate = acpi_dma_xlate;
+	adma->data = data;
+
+	/* Now queue acpi_dma controller structure in list */
+	mutex_lock(&acpi_dma_lock);
+	list_add_tail(&adma->dma_controllers, &acpi_dma_list);
+	mutex_unlock(&acpi_dma_lock);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(acpi_dma_controller_register);
+
+/**
+ * acpi_dma_controller_free - Remove a DMA controller from ACPI DMA helpers list
+ * @dev:	struct device of DMA controller
+ *
+ * Memory allocated by acpi_dma_controller_register() is freed here.
+ */
+int acpi_dma_controller_free(struct device *dev)
+{
+	struct acpi_dma *adma;
+
+	if (!dev)
+		return -EINVAL;
+
+	mutex_lock(&acpi_dma_lock);
+
+	list_for_each_entry(adma, &acpi_dma_list, dma_controllers)
+		if (adma->dev == dev) {
+			list_del(&adma->dma_controllers);
+			mutex_unlock(&acpi_dma_lock);
+			kfree(adma);
+			return 0;
+		}
+
+	mutex_unlock(&acpi_dma_lock);
+	return -ENODEV;
+}
+EXPORT_SYMBOL_GPL(acpi_dma_controller_free);
+
+static void devm_acpi_dma_release(struct device *dev, void *res)
+{
+	acpi_dma_controller_free(dev);
+}
+
+/**
+ * devm_acpi_dma_controller_register - resource managed acpi_dma_controller_register()
+ * @dev:		device that is registering this DMA controller
+ * @acpi_dma_xlate:	translation function
+ * @data		pointer to controller specific data
+ *
+ * Managed acpi_dma_controller_register(). DMA controller registered by this
+ * function are automatically freed on driver detach. See
+ * acpi_dma_controller_register() for more information.
+ */
+int devm_acpi_dma_controller_register(struct device *dev,
+		struct dma_chan *(*acpi_dma_xlate)
+		(struct acpi_dma_spec *, struct acpi_dma *),
+		void *data)
+{
+	void *res;
+	int ret;
+
+	res = devres_alloc(devm_acpi_dma_release, 0, GFP_KERNEL);
+	if (!res)
+		return -ENOMEM;
+
+	ret = acpi_dma_controller_register(dev, acpi_dma_xlate, data);
+	if (ret) {
+		devres_free(res);
+		return ret;
+	}
+	devres_add(dev, res);
+	return 0;
+}
+EXPORT_SYMBOL_GPL(devm_acpi_dma_controller_register);
+
+/**
+ * devm_acpi_dma_controller_free - resource managed acpi_dma_controller_free()
+ *
+ * Unregister a DMA controller registered with
+ * devm_acpi_dma_controller_register(). Normally this function will not need to
+ * be called and the resource management code will ensure that the resource is
+ * freed.
+ */
+void devm_acpi_dma_controller_free(struct device *dev)
+{
+	WARN_ON(devres_destroy(dev, devm_acpi_dma_release, NULL, NULL));
+}
+EXPORT_SYMBOL_GPL(devm_acpi_dma_controller_free);
+
+struct acpi_dma_parser_data {
+	struct acpi_dma_spec dma_spec;
+	size_t index;
+	size_t n;
+};
+
+/**
+ * acpi_dma_parse_fixed_dma - Parse FixedDMA ACPI resources to a DMA specifier
+ * @res:	struct acpi_resource to get FixedDMA resources from
+ * @data:	pointer to a helper struct acpi_dma_parser_data
+ */
+static int acpi_dma_parse_fixed_dma(struct acpi_resource *res, void *data)
+{
+	struct acpi_dma_parser_data *pdata = data;
+
+	if (res->type == ACPI_RESOURCE_TYPE_FIXED_DMA) {
+		struct acpi_resource_fixed_dma *dma = &res->data.fixed_dma;
+
+		if (pdata->n++ == pdata->index) {
+			pdata->dma_spec.chan_id = dma->channels;
+			pdata->dma_spec.slave_id = dma->request_lines;
+		}
+	}
+
+	/* Tell the ACPI core to skip this resource */
+	return 1;
+}
+
+/**
+ * acpi_dma_request_slave_chan_by_index - Get the DMA slave channel
+ * @dev:	struct device to get DMA request from
+ * @index:	index of FixedDMA descriptor for @dev
+ *
+ * Returns pointer to appropriate dma channel on success or NULL on error.
+ */
+struct dma_chan *acpi_dma_request_slave_chan_by_index(struct device *dev,
+		size_t index)
+{
+	struct acpi_dma_parser_data pdata;
+	struct acpi_dma_spec *dma_spec = &pdata.dma_spec;
+	struct list_head resource_list;
+	struct acpi_device *adev;
+	struct acpi_dma *adma;
+	struct dma_chan *chan = NULL;
+
+	/* Check if the device was enumerated by ACPI */
+	if (!dev || !ACPI_HANDLE(dev))
+		return NULL;
+
+	if (acpi_bus_get_device(ACPI_HANDLE(dev), &adev))
+		return NULL;
+
+	memset(&pdata, 0, sizeof(pdata));
+	pdata.index = index;
+
+	/* Initial values for the request line and channel */
+	dma_spec->chan_id = -1;
+	dma_spec->slave_id = -1;
+
+	INIT_LIST_HEAD(&resource_list);
+	acpi_dev_get_resources(adev, &resource_list,
+			acpi_dma_parse_fixed_dma, &pdata);
+	acpi_dev_free_resource_list(&resource_list);
+
+	if (dma_spec->slave_id < 0 || dma_spec->chan_id < 0)
+		return NULL;
+
+	mutex_lock(&acpi_dma_lock);
+
+	list_for_each_entry(adma, &acpi_dma_list, dma_controllers) {
+		dma_spec->dev = adma->dev;
+		chan = adma->acpi_dma_xlate(dma_spec, adma);
+		if (chan)
+			break;
+	}
+
+	mutex_unlock(&acpi_dma_lock);
+	return chan;
+}
+EXPORT_SYMBOL_GPL(acpi_dma_request_slave_chan_by_index);
+
+/**
+ * acpi_dma_request_slave_chan_by_name - Get the DMA slave channel
+ * @dev:	struct device to get DMA request from
+ * @name:	represents corresponding FixedDMA descriptor for @dev
+ *
+ * In order to support both Device Tree and ACPI in a single driver we
+ * translate the names "tx" and "rx" here based on the most common case where
+ * the first FixedDMA descriptor is TX and second is RX.
+ *
+ * Returns pointer to appropriate dma channel on success or NULL on error.
+ */
+struct dma_chan *acpi_dma_request_slave_chan_by_name(struct device *dev,
+		const char *name)
+{
+	size_t index;
+
+	if (!strcmp(name, "tx"))
+		index = 0;
+	else if (!strcmp(name, "rx"))
+		index = 1;
+	else
+		return NULL;
+
+	return acpi_dma_request_slave_chan_by_index(dev, index);
+}
+EXPORT_SYMBOL_GPL(acpi_dma_request_slave_chan_by_name);
+
+/**
+ * acpi_dma_simple_xlate - Simple ACPI DMA engine translation helper
+ * @dma_spec: pointer to ACPI DMA specifier
+ * @adma: pointer to ACPI DMA controller data
+ *
+ * A simple translation function for ACPI based devices. Passes &struct
+ * dma_spec to the DMA controller driver provided filter function. Returns
+ * pointer to the channel if found or %NULL otherwise.
+ */
+struct dma_chan *acpi_dma_simple_xlate(struct acpi_dma_spec *dma_spec,
+		struct acpi_dma *adma)
+{
+	struct acpi_dma_filter_info *info = adma->data;
+
+	if (!info || !info->filter_fn)
+		return NULL;
+
+	return dma_request_channel(info->dma_cap, info->filter_fn, dma_spec);
+}
+EXPORT_SYMBOL_GPL(acpi_dma_simple_xlate);
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
index 88cfc61..e923cda 100644
--- a/drivers/dma/at_hdmac.c
+++ b/drivers/dma/at_hdmac.c
@@ -24,6 +24,7 @@
 #include <linux/slab.h>
 #include <linux/of.h>
 #include <linux/of_device.h>
+#include <linux/of_dma.h>
 
 #include "at_hdmac_regs.h"
 #include "dmaengine.h"
@@ -677,7 +678,7 @@
 		ctrlb |=  ATC_DST_ADDR_MODE_FIXED
 			| ATC_SRC_ADDR_MODE_INCR
 			| ATC_FC_MEM2PER
-			| ATC_SIF(AT_DMA_MEM_IF) | ATC_DIF(AT_DMA_PER_IF);
+			| ATC_SIF(atchan->mem_if) | ATC_DIF(atchan->per_if);
 		reg = sconfig->dst_addr;
 		for_each_sg(sgl, sg, sg_len, i) {
 			struct at_desc	*desc;
@@ -716,7 +717,7 @@
 		ctrlb |=  ATC_DST_ADDR_MODE_INCR
 			| ATC_SRC_ADDR_MODE_FIXED
 			| ATC_FC_PER2MEM
-			| ATC_SIF(AT_DMA_PER_IF) | ATC_DIF(AT_DMA_MEM_IF);
+			| ATC_SIF(atchan->per_if) | ATC_DIF(atchan->mem_if);
 
 		reg = sconfig->src_addr;
 		for_each_sg(sgl, sg, sg_len, i) {
@@ -822,8 +823,8 @@
 		desc->lli.ctrlb = ATC_DST_ADDR_MODE_FIXED
 				| ATC_SRC_ADDR_MODE_INCR
 				| ATC_FC_MEM2PER
-				| ATC_SIF(AT_DMA_MEM_IF)
-				| ATC_DIF(AT_DMA_PER_IF);
+				| ATC_SIF(atchan->mem_if)
+				| ATC_DIF(atchan->per_if);
 		break;
 
 	case DMA_DEV_TO_MEM:
@@ -833,8 +834,8 @@
 		desc->lli.ctrlb = ATC_DST_ADDR_MODE_INCR
 				| ATC_SRC_ADDR_MODE_FIXED
 				| ATC_FC_PER2MEM
-				| ATC_SIF(AT_DMA_PER_IF)
-				| ATC_DIF(AT_DMA_MEM_IF);
+				| ATC_SIF(atchan->per_if)
+				| ATC_DIF(atchan->mem_if);
 		break;
 
 	default:
@@ -1188,6 +1189,67 @@
 	dev_vdbg(chan2dev(chan), "free_chan_resources: done\n");
 }
 
+#ifdef CONFIG_OF
+static bool at_dma_filter(struct dma_chan *chan, void *slave)
+{
+	struct at_dma_slave *atslave = slave;
+
+	if (atslave->dma_dev == chan->device->dev) {
+		chan->private = atslave;
+		return true;
+	} else {
+		return false;
+	}
+}
+
+static struct dma_chan *at_dma_xlate(struct of_phandle_args *dma_spec,
+				     struct of_dma *of_dma)
+{
+	struct dma_chan *chan;
+	struct at_dma_chan *atchan;
+	struct at_dma_slave *atslave;
+	dma_cap_mask_t mask;
+	unsigned int per_id;
+	struct platform_device *dmac_pdev;
+
+	if (dma_spec->args_count != 2)
+		return NULL;
+
+	dmac_pdev = of_find_device_by_node(dma_spec->np);
+
+	dma_cap_zero(mask);
+	dma_cap_set(DMA_SLAVE, mask);
+
+	atslave = devm_kzalloc(&dmac_pdev->dev, sizeof(*atslave), GFP_KERNEL);
+	if (!atslave)
+		return NULL;
+	/*
+	 * We can fill both SRC_PER and DST_PER, one of these fields will be
+	 * ignored depending on DMA transfer direction.
+	 */
+	per_id = dma_spec->args[1];
+	atslave->cfg = ATC_FIFOCFG_HALFFIFO | ATC_DST_H2SEL_HW
+		      | ATC_SRC_H2SEL_HW | ATC_DST_PER(per_id)
+		      | ATC_SRC_PER(per_id);
+	atslave->dma_dev = &dmac_pdev->dev;
+
+	chan = dma_request_channel(mask, at_dma_filter, atslave);
+	if (!chan)
+		return NULL;
+
+	atchan = to_at_dma_chan(chan);
+	atchan->per_if = dma_spec->args[0] & 0xff;
+	atchan->mem_if = (dma_spec->args[0] >> 16) & 0xff;
+
+	return chan;
+}
+#else
+static struct dma_chan *at_dma_xlate(struct of_phandle_args *dma_spec,
+				     struct of_dma *of_dma)
+{
+	return NULL;
+}
+#endif
 
 /*--  Module Management  -----------------------------------------------*/
 
@@ -1342,6 +1404,8 @@
 	for (i = 0; i < plat_dat->nr_channels; i++) {
 		struct at_dma_chan	*atchan = &atdma->chan[i];
 
+		atchan->mem_if = AT_DMA_MEM_IF;
+		atchan->per_if = AT_DMA_PER_IF;
 		atchan->chan_common.device = &atdma->dma_common;
 		dma_cookie_init(&atchan->chan_common);
 		list_add_tail(&atchan->chan_common.device_node,
@@ -1388,8 +1452,25 @@
 
 	dma_async_device_register(&atdma->dma_common);
 
+	/*
+	 * Do not return an error if the dmac node is not present in order to
+	 * not break the existing way of requesting channel with
+	 * dma_request_channel().
+	 */
+	if (pdev->dev.of_node) {
+		err = of_dma_controller_register(pdev->dev.of_node,
+						 at_dma_xlate, atdma);
+		if (err) {
+			dev_err(&pdev->dev, "could not register of_dma_controller\n");
+			goto err_of_dma_controller_register;
+		}
+	}
+
 	return 0;
 
+err_of_dma_controller_register:
+	dma_async_device_unregister(&atdma->dma_common);
+	dma_pool_destroy(atdma->dma_desc_pool);
 err_pool_create:
 	platform_set_drvdata(pdev, NULL);
 	free_irq(platform_get_irq(pdev, 0), atdma);
@@ -1406,7 +1487,7 @@
 	return err;
 }
 
-static int __exit at_dma_remove(struct platform_device *pdev)
+static int at_dma_remove(struct platform_device *pdev)
 {
 	struct at_dma		*atdma = platform_get_drvdata(pdev);
 	struct dma_chan		*chan, *_chan;
@@ -1564,7 +1645,7 @@
 };
 
 static struct platform_driver at_dma_driver = {
-	.remove		= __exit_p(at_dma_remove),
+	.remove		= at_dma_remove,
 	.shutdown	= at_dma_shutdown,
 	.id_table	= atdma_devtypes,
 	.driver = {
diff --git a/drivers/dma/at_hdmac_regs.h b/drivers/dma/at_hdmac_regs.h
index 0eb3c13..c604d26 100644
--- a/drivers/dma/at_hdmac_regs.h
+++ b/drivers/dma/at_hdmac_regs.h
@@ -220,6 +220,8 @@
  * @device: parent device
  * @ch_regs: memory mapped register base
  * @mask: channel index in a mask
+ * @per_if: peripheral interface
+ * @mem_if: memory interface
  * @status: transmit status information from irq/prep* functions
  *                to tasklet (use atomic operations)
  * @tasklet: bottom half to finish transaction work
@@ -238,6 +240,8 @@
 	struct at_dma		*device;
 	void __iomem		*ch_regs;
 	u8			mask;
+	u8			per_if;
+	u8			mem_if;
 	unsigned long		status;
 	struct tasklet_struct	tasklet;
 	u32			save_cfg;
diff --git a/drivers/dma/coh901318.c b/drivers/dma/coh901318.c
index 797940e..3b23061 100644
--- a/drivers/dma/coh901318.c
+++ b/drivers/dma/coh901318.c
@@ -2748,7 +2748,7 @@
 	return err;
 }
 
-static int __exit coh901318_remove(struct platform_device *pdev)
+static int coh901318_remove(struct platform_device *pdev)
 {
 	struct coh901318_base *base = platform_get_drvdata(pdev);
 
@@ -2760,7 +2760,7 @@
 
 
 static struct platform_driver coh901318_driver = {
-	.remove = __exit_p(coh901318_remove),
+	.remove = coh901318_remove,
 	.driver = {
 		.name	= "coh901318",
 	},
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index b2728d6..93f7992 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -62,6 +62,8 @@
 #include <linux/rculist.h>
 #include <linux/idr.h>
 #include <linux/slab.h>
+#include <linux/acpi.h>
+#include <linux/acpi_dma.h>
 #include <linux/of_dma.h>
 
 static DEFINE_MUTEX(dma_list_mutex);
@@ -174,7 +176,8 @@
 #define dma_device_satisfies_mask(device, mask) \
 	__dma_device_satisfies_mask((device), &(mask))
 static int
-__dma_device_satisfies_mask(struct dma_device *device, dma_cap_mask_t *want)
+__dma_device_satisfies_mask(struct dma_device *device,
+			    const dma_cap_mask_t *want)
 {
 	dma_cap_mask_t has;
 
@@ -463,7 +466,8 @@
 		}
 }
 
-static struct dma_chan *private_candidate(dma_cap_mask_t *mask, struct dma_device *dev,
+static struct dma_chan *private_candidate(const dma_cap_mask_t *mask,
+					  struct dma_device *dev,
 					  dma_filter_fn fn, void *fn_param)
 {
 	struct dma_chan *chan;
@@ -505,7 +509,8 @@
  * @fn: optional callback to disposition available channels
  * @fn_param: opaque parameter to pass to dma_filter_fn
  */
-struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, dma_filter_fn fn, void *fn_param)
+struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask,
+				       dma_filter_fn fn, void *fn_param)
 {
 	struct dma_device *device, *_d;
 	struct dma_chan *chan = NULL;
@@ -555,12 +560,16 @@
  * @dev:	pointer to client device structure
  * @name:	slave channel name
  */
-struct dma_chan *dma_request_slave_channel(struct device *dev, char *name)
+struct dma_chan *dma_request_slave_channel(struct device *dev, const char *name)
 {
 	/* If device-tree is present get slave info from here */
 	if (dev->of_node)
 		return of_dma_request_slave_channel(dev->of_node, name);
 
+	/* If device was enumerated by ACPI get slave info from here */
+	if (ACPI_HANDLE(dev))
+		return acpi_dma_request_slave_chan_by_name(dev, name);
+
 	return NULL;
 }
 EXPORT_SYMBOL_GPL(dma_request_slave_channel);
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
index a2c8904..d8ce4ec 100644
--- a/drivers/dma/dmatest.c
+++ b/drivers/dma/dmatest.c
@@ -2,6 +2,7 @@
  * DMA Engine test module
  *
  * Copyright (C) 2007 Atmel Corporation
+ * Copyright (C) 2013 Intel Corporation
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
@@ -18,6 +19,10 @@
 #include <linux/random.h>
 #include <linux/slab.h>
 #include <linux/wait.h>
+#include <linux/ctype.h>
+#include <linux/debugfs.h>
+#include <linux/uaccess.h>
+#include <linux/seq_file.h>
 
 static unsigned int test_buf_size = 16384;
 module_param(test_buf_size, uint, S_IRUGO);
@@ -61,6 +66,9 @@
 MODULE_PARM_DESC(timeout, "Transfer Timeout in msec (default: 3000), "
 		 "Pass -1 for infinite timeout");
 
+/* Maximum amount of mismatched bytes in buffer to print */
+#define MAX_ERROR_COUNT		32
+
 /*
  * Initialization patterns. All bytes in the source buffer has bit 7
  * set, all bytes in the destination buffer has bit 7 cleared.
@@ -78,13 +86,65 @@
 #define PATTERN_OVERWRITE	0x20
 #define PATTERN_COUNT_MASK	0x1f
 
+enum dmatest_error_type {
+	DMATEST_ET_OK,
+	DMATEST_ET_MAP_SRC,
+	DMATEST_ET_MAP_DST,
+	DMATEST_ET_PREP,
+	DMATEST_ET_SUBMIT,
+	DMATEST_ET_TIMEOUT,
+	DMATEST_ET_DMA_ERROR,
+	DMATEST_ET_DMA_IN_PROGRESS,
+	DMATEST_ET_VERIFY,
+	DMATEST_ET_VERIFY_BUF,
+};
+
+struct dmatest_verify_buffer {
+	unsigned int	index;
+	u8		expected;
+	u8		actual;
+};
+
+struct dmatest_verify_result {
+	unsigned int			error_count;
+	struct dmatest_verify_buffer	data[MAX_ERROR_COUNT];
+	u8				pattern;
+	bool				is_srcbuf;
+};
+
+struct dmatest_thread_result {
+	struct list_head	node;
+	unsigned int		n;
+	unsigned int		src_off;
+	unsigned int		dst_off;
+	unsigned int		len;
+	enum dmatest_error_type	type;
+	union {
+		unsigned long			data;
+		dma_cookie_t			cookie;
+		enum dma_status			status;
+		int				error;
+		struct dmatest_verify_result	*vr;
+	};
+};
+
+struct dmatest_result {
+	struct list_head	node;
+	char			*name;
+	struct list_head	results;
+};
+
+struct dmatest_info;
+
 struct dmatest_thread {
 	struct list_head	node;
+	struct dmatest_info	*info;
 	struct task_struct	*task;
 	struct dma_chan		*chan;
 	u8			**srcs;
 	u8			**dsts;
 	enum dma_transaction_type type;
+	bool			done;
 };
 
 struct dmatest_chan {
@@ -93,25 +153,69 @@
 	struct list_head	threads;
 };
 
-/*
- * These are protected by dma_list_mutex since they're only used by
- * the DMA filter function callback
+/**
+ * struct dmatest_params - test parameters.
+ * @buf_size:		size of the memcpy test buffer
+ * @channel:		bus ID of the channel to test
+ * @device:		bus ID of the DMA Engine to test
+ * @threads_per_chan:	number of threads to start per channel
+ * @max_channels:	maximum number of channels to use
+ * @iterations:		iterations before stopping test
+ * @xor_sources:	number of xor source buffers
+ * @pq_sources:		number of p+q source buffers
+ * @timeout:		transfer timeout in msec, -1 for infinite timeout
  */
-static LIST_HEAD(dmatest_channels);
-static unsigned int nr_channels;
+struct dmatest_params {
+	unsigned int	buf_size;
+	char		channel[20];
+	char		device[20];
+	unsigned int	threads_per_chan;
+	unsigned int	max_channels;
+	unsigned int	iterations;
+	unsigned int	xor_sources;
+	unsigned int	pq_sources;
+	int		timeout;
+};
 
-static bool dmatest_match_channel(struct dma_chan *chan)
+/**
+ * struct dmatest_info - test information.
+ * @params:		test parameters
+ * @lock:		access protection to the fields of this structure
+ */
+struct dmatest_info {
+	/* Test parameters */
+	struct dmatest_params	params;
+
+	/* Internal state */
+	struct list_head	channels;
+	unsigned int		nr_channels;
+	struct mutex		lock;
+
+	/* debugfs related stuff */
+	struct dentry		*root;
+	struct dmatest_params	dbgfs_params;
+
+	/* Test results */
+	struct list_head	results;
+	struct mutex		results_lock;
+};
+
+static struct dmatest_info test_info;
+
+static bool dmatest_match_channel(struct dmatest_params *params,
+		struct dma_chan *chan)
 {
-	if (test_channel[0] == '\0')
+	if (params->channel[0] == '\0')
 		return true;
-	return strcmp(dma_chan_name(chan), test_channel) == 0;
+	return strcmp(dma_chan_name(chan), params->channel) == 0;
 }
 
-static bool dmatest_match_device(struct dma_device *device)
+static bool dmatest_match_device(struct dmatest_params *params,
+		struct dma_device *device)
 {
-	if (test_device[0] == '\0')
+	if (params->device[0] == '\0')
 		return true;
-	return strcmp(dev_name(device->dev), test_device) == 0;
+	return strcmp(dev_name(device->dev), params->device) == 0;
 }
 
 static unsigned long dmatest_random(void)
@@ -122,7 +226,8 @@
 	return buf;
 }
 
-static void dmatest_init_srcs(u8 **bufs, unsigned int start, unsigned int len)
+static void dmatest_init_srcs(u8 **bufs, unsigned int start, unsigned int len,
+		unsigned int buf_size)
 {
 	unsigned int i;
 	u8 *buf;
@@ -133,13 +238,14 @@
 		for ( ; i < start + len; i++)
 			buf[i] = PATTERN_SRC | PATTERN_COPY
 				| (~i & PATTERN_COUNT_MASK);
-		for ( ; i < test_buf_size; i++)
+		for ( ; i < buf_size; i++)
 			buf[i] = PATTERN_SRC | (~i & PATTERN_COUNT_MASK);
 		buf++;
 	}
 }
 
-static void dmatest_init_dsts(u8 **bufs, unsigned int start, unsigned int len)
+static void dmatest_init_dsts(u8 **bufs, unsigned int start, unsigned int len,
+		unsigned int buf_size)
 {
 	unsigned int i;
 	u8 *buf;
@@ -150,40 +256,14 @@
 		for ( ; i < start + len; i++)
 			buf[i] = PATTERN_DST | PATTERN_OVERWRITE
 				| (~i & PATTERN_COUNT_MASK);
-		for ( ; i < test_buf_size; i++)
+		for ( ; i < buf_size; i++)
 			buf[i] = PATTERN_DST | (~i & PATTERN_COUNT_MASK);
 	}
 }
 
-static void dmatest_mismatch(u8 actual, u8 pattern, unsigned int index,
-		unsigned int counter, bool is_srcbuf)
-{
-	u8		diff = actual ^ pattern;
-	u8		expected = pattern | (~counter & PATTERN_COUNT_MASK);
-	const char	*thread_name = current->comm;
-
-	if (is_srcbuf)
-		pr_warning("%s: srcbuf[0x%x] overwritten!"
-				" Expected %02x, got %02x\n",
-				thread_name, index, expected, actual);
-	else if ((pattern & PATTERN_COPY)
-			&& (diff & (PATTERN_COPY | PATTERN_OVERWRITE)))
-		pr_warning("%s: dstbuf[0x%x] not copied!"
-				" Expected %02x, got %02x\n",
-				thread_name, index, expected, actual);
-	else if (diff & PATTERN_SRC)
-		pr_warning("%s: dstbuf[0x%x] was copied!"
-				" Expected %02x, got %02x\n",
-				thread_name, index, expected, actual);
-	else
-		pr_warning("%s: dstbuf[0x%x] mismatch!"
-				" Expected %02x, got %02x\n",
-				thread_name, index, expected, actual);
-}
-
-static unsigned int dmatest_verify(u8 **bufs, unsigned int start,
-		unsigned int end, unsigned int counter, u8 pattern,
-		bool is_srcbuf)
+static unsigned int dmatest_verify(struct dmatest_verify_result *vr, u8 **bufs,
+		unsigned int start, unsigned int end, unsigned int counter,
+		u8 pattern, bool is_srcbuf)
 {
 	unsigned int i;
 	unsigned int error_count = 0;
@@ -191,6 +271,7 @@
 	u8 expected;
 	u8 *buf;
 	unsigned int counter_orig = counter;
+	struct dmatest_verify_buffer *vb;
 
 	for (; (buf = *bufs); bufs++) {
 		counter = counter_orig;
@@ -198,18 +279,21 @@
 			actual = buf[i];
 			expected = pattern | (~counter & PATTERN_COUNT_MASK);
 			if (actual != expected) {
-				if (error_count < 32)
-					dmatest_mismatch(actual, pattern, i,
-							 counter, is_srcbuf);
+				if (error_count < MAX_ERROR_COUNT && vr) {
+					vb = &vr->data[error_count];
+					vb->index = i;
+					vb->expected = expected;
+					vb->actual = actual;
+				}
 				error_count++;
 			}
 			counter++;
 		}
 	}
 
-	if (error_count > 32)
+	if (error_count > MAX_ERROR_COUNT)
 		pr_warning("%s: %u errors suppressed\n",
-			current->comm, error_count - 32);
+			current->comm, error_count - MAX_ERROR_COUNT);
 
 	return error_count;
 }
@@ -249,6 +333,170 @@
 	return val % 2 ? val : val - 1;
 }
 
+static char *verify_result_get_one(struct dmatest_verify_result *vr,
+		unsigned int i)
+{
+	struct dmatest_verify_buffer *vb = &vr->data[i];
+	u8 diff = vb->actual ^ vr->pattern;
+	static char buf[512];
+	char *msg;
+
+	if (vr->is_srcbuf)
+		msg = "srcbuf overwritten!";
+	else if ((vr->pattern & PATTERN_COPY)
+			&& (diff & (PATTERN_COPY | PATTERN_OVERWRITE)))
+		msg = "dstbuf not copied!";
+	else if (diff & PATTERN_SRC)
+		msg = "dstbuf was copied!";
+	else
+		msg = "dstbuf mismatch!";
+
+	snprintf(buf, sizeof(buf) - 1, "%s [0x%x] Expected %02x, got %02x", msg,
+		 vb->index, vb->expected, vb->actual);
+
+	return buf;
+}
+
+static char *thread_result_get(const char *name,
+		struct dmatest_thread_result *tr)
+{
+	static const char * const messages[] = {
+		[DMATEST_ET_OK]			= "No errors",
+		[DMATEST_ET_MAP_SRC]		= "src mapping error",
+		[DMATEST_ET_MAP_DST]		= "dst mapping error",
+		[DMATEST_ET_PREP]		= "prep error",
+		[DMATEST_ET_SUBMIT]		= "submit error",
+		[DMATEST_ET_TIMEOUT]		= "test timed out",
+		[DMATEST_ET_DMA_ERROR]		=
+			"got completion callback (DMA_ERROR)",
+		[DMATEST_ET_DMA_IN_PROGRESS]	=
+			"got completion callback (DMA_IN_PROGRESS)",
+		[DMATEST_ET_VERIFY]		= "errors",
+		[DMATEST_ET_VERIFY_BUF]		= "verify errors",
+	};
+	static char buf[512];
+
+	snprintf(buf, sizeof(buf) - 1,
+		 "%s: #%u: %s with src_off=0x%x ""dst_off=0x%x len=0x%x (%lu)",
+		 name, tr->n, messages[tr->type], tr->src_off, tr->dst_off,
+		 tr->len, tr->data);
+
+	return buf;
+}
+
+static int thread_result_add(struct dmatest_info *info,
+		struct dmatest_result *r, enum dmatest_error_type type,
+		unsigned int n, unsigned int src_off, unsigned int dst_off,
+		unsigned int len, unsigned long data)
+{
+	struct dmatest_thread_result *tr;
+
+	tr = kzalloc(sizeof(*tr), GFP_KERNEL);
+	if (!tr)
+		return -ENOMEM;
+
+	tr->type = type;
+	tr->n = n;
+	tr->src_off = src_off;
+	tr->dst_off = dst_off;
+	tr->len = len;
+	tr->data = data;
+
+	mutex_lock(&info->results_lock);
+	list_add_tail(&tr->node, &r->results);
+	mutex_unlock(&info->results_lock);
+
+	pr_warn("%s\n", thread_result_get(r->name, tr));
+	return 0;
+}
+
+static unsigned int verify_result_add(struct dmatest_info *info,
+		struct dmatest_result *r, unsigned int n,
+		unsigned int src_off, unsigned int dst_off, unsigned int len,
+		u8 **bufs, int whence, unsigned int counter, u8 pattern,
+		bool is_srcbuf)
+{
+	struct dmatest_verify_result *vr;
+	unsigned int error_count;
+	unsigned int buf_off = is_srcbuf ? src_off : dst_off;
+	unsigned int start, end;
+
+	if (whence < 0) {
+		start = 0;
+		end = buf_off;
+	} else if (whence > 0) {
+		start = buf_off + len;
+		end = info->params.buf_size;
+	} else {
+		start = buf_off;
+		end = buf_off + len;
+	}
+
+	vr = kmalloc(sizeof(*vr), GFP_KERNEL);
+	if (!vr) {
+		pr_warn("dmatest: No memory to store verify result\n");
+		return dmatest_verify(NULL, bufs, start, end, counter, pattern,
+				      is_srcbuf);
+	}
+
+	vr->pattern = pattern;
+	vr->is_srcbuf = is_srcbuf;
+
+	error_count = dmatest_verify(vr, bufs, start, end, counter, pattern,
+				     is_srcbuf);
+	if (error_count) {
+		vr->error_count = error_count;
+		thread_result_add(info, r, DMATEST_ET_VERIFY_BUF, n, src_off,
+				  dst_off, len, (unsigned long)vr);
+		return error_count;
+	}
+
+	kfree(vr);
+	return 0;
+}
+
+static void result_free(struct dmatest_info *info, const char *name)
+{
+	struct dmatest_result *r, *_r;
+
+	mutex_lock(&info->results_lock);
+	list_for_each_entry_safe(r, _r, &info->results, node) {
+		struct dmatest_thread_result *tr, *_tr;
+
+		if (name && strcmp(r->name, name))
+			continue;
+
+		list_for_each_entry_safe(tr, _tr, &r->results, node) {
+			if (tr->type == DMATEST_ET_VERIFY_BUF)
+				kfree(tr->vr);
+			list_del(&tr->node);
+			kfree(tr);
+		}
+
+		kfree(r->name);
+		list_del(&r->node);
+		kfree(r);
+	}
+
+	mutex_unlock(&info->results_lock);
+}
+
+static struct dmatest_result *result_init(struct dmatest_info *info,
+		const char *name)
+{
+	struct dmatest_result *r;
+
+	r = kzalloc(sizeof(*r), GFP_KERNEL);
+	if (r) {
+		r->name = kstrdup(name, GFP_KERNEL);
+		INIT_LIST_HEAD(&r->results);
+		mutex_lock(&info->results_lock);
+		list_add_tail(&r->node, &info->results);
+		mutex_unlock(&info->results_lock);
+	}
+	return r;
+}
+
 /*
  * This function repeatedly tests DMA transfers of various lengths and
  * offsets for a given operation type until it is told to exit by
@@ -268,6 +516,8 @@
 	DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_wait);
 	struct dmatest_thread	*thread = data;
 	struct dmatest_done	done = { .wait = &done_wait };
+	struct dmatest_info	*info;
+	struct dmatest_params	*params;
 	struct dma_chan		*chan;
 	struct dma_device	*dev;
 	const char		*thread_name;
@@ -278,11 +528,12 @@
 	dma_cookie_t		cookie;
 	enum dma_status		status;
 	enum dma_ctrl_flags 	flags;
-	u8			pq_coefs[pq_sources + 1];
+	u8			*pq_coefs = NULL;
 	int			ret;
 	int			src_cnt;
 	int			dst_cnt;
 	int			i;
+	struct dmatest_result	*result;
 
 	thread_name = current->comm;
 	set_freezable();
@@ -290,28 +541,39 @@
 	ret = -ENOMEM;
 
 	smp_rmb();
+	info = thread->info;
+	params = &info->params;
 	chan = thread->chan;
 	dev = chan->device;
 	if (thread->type == DMA_MEMCPY)
 		src_cnt = dst_cnt = 1;
 	else if (thread->type == DMA_XOR) {
 		/* force odd to ensure dst = src */
-		src_cnt = min_odd(xor_sources | 1, dev->max_xor);
+		src_cnt = min_odd(params->xor_sources | 1, dev->max_xor);
 		dst_cnt = 1;
 	} else if (thread->type == DMA_PQ) {
 		/* force odd to ensure dst = src */
-		src_cnt = min_odd(pq_sources | 1, dma_maxpq(dev, 0));
+		src_cnt = min_odd(params->pq_sources | 1, dma_maxpq(dev, 0));
 		dst_cnt = 2;
+
+		pq_coefs = kmalloc(params->pq_sources+1, GFP_KERNEL);
+		if (!pq_coefs)
+			goto err_thread_type;
+
 		for (i = 0; i < src_cnt; i++)
 			pq_coefs[i] = 1;
 	} else
+		goto err_thread_type;
+
+	result = result_init(info, thread_name);
+	if (!result)
 		goto err_srcs;
 
 	thread->srcs = kcalloc(src_cnt+1, sizeof(u8 *), GFP_KERNEL);
 	if (!thread->srcs)
 		goto err_srcs;
 	for (i = 0; i < src_cnt; i++) {
-		thread->srcs[i] = kmalloc(test_buf_size, GFP_KERNEL);
+		thread->srcs[i] = kmalloc(params->buf_size, GFP_KERNEL);
 		if (!thread->srcs[i])
 			goto err_srcbuf;
 	}
@@ -321,7 +583,7 @@
 	if (!thread->dsts)
 		goto err_dsts;
 	for (i = 0; i < dst_cnt; i++) {
-		thread->dsts[i] = kmalloc(test_buf_size, GFP_KERNEL);
+		thread->dsts[i] = kmalloc(params->buf_size, GFP_KERNEL);
 		if (!thread->dsts[i])
 			goto err_dstbuf;
 	}
@@ -337,7 +599,7 @@
 	      | DMA_COMPL_SKIP_DEST_UNMAP | DMA_COMPL_SRC_UNMAP_SINGLE;
 
 	while (!kthread_should_stop()
-	       && !(iterations && total_tests >= iterations)) {
+	       && !(params->iterations && total_tests >= params->iterations)) {
 		struct dma_async_tx_descriptor *tx = NULL;
 		dma_addr_t dma_srcs[src_cnt];
 		dma_addr_t dma_dsts[dst_cnt];
@@ -353,24 +615,24 @@
 		else if (thread->type == DMA_PQ)
 			align = dev->pq_align;
 
-		if (1 << align > test_buf_size) {
+		if (1 << align > params->buf_size) {
 			pr_err("%u-byte buffer too small for %d-byte alignment\n",
-			       test_buf_size, 1 << align);
+			       params->buf_size, 1 << align);
 			break;
 		}
 
-		len = dmatest_random() % test_buf_size + 1;
+		len = dmatest_random() % params->buf_size + 1;
 		len = (len >> align) << align;
 		if (!len)
 			len = 1 << align;
-		src_off = dmatest_random() % (test_buf_size - len + 1);
-		dst_off = dmatest_random() % (test_buf_size - len + 1);
+		src_off = dmatest_random() % (params->buf_size - len + 1);
+		dst_off = dmatest_random() % (params->buf_size - len + 1);
 
 		src_off = (src_off >> align) << align;
 		dst_off = (dst_off >> align) << align;
 
-		dmatest_init_srcs(thread->srcs, src_off, len);
-		dmatest_init_dsts(thread->dsts, dst_off, len);
+		dmatest_init_srcs(thread->srcs, src_off, len, params->buf_size);
+		dmatest_init_dsts(thread->dsts, dst_off, len, params->buf_size);
 
 		for (i = 0; i < src_cnt; i++) {
 			u8 *buf = thread->srcs[i] + src_off;
@@ -380,10 +642,10 @@
 			ret = dma_mapping_error(dev->dev, dma_srcs[i]);
 			if (ret) {
 				unmap_src(dev->dev, dma_srcs, len, i);
-				pr_warn("%s: #%u: mapping error %d with "
-					"src_off=0x%x len=0x%x\n",
-					thread_name, total_tests - 1, ret,
-					src_off, len);
+				thread_result_add(info, result,
+						  DMATEST_ET_MAP_SRC,
+						  total_tests, src_off, dst_off,
+						  len, ret);
 				failed_tests++;
 				continue;
 			}
@@ -391,16 +653,17 @@
 		/* map with DMA_BIDIRECTIONAL to force writeback/invalidate */
 		for (i = 0; i < dst_cnt; i++) {
 			dma_dsts[i] = dma_map_single(dev->dev, thread->dsts[i],
-						     test_buf_size,
+						     params->buf_size,
 						     DMA_BIDIRECTIONAL);
 			ret = dma_mapping_error(dev->dev, dma_dsts[i]);
 			if (ret) {
 				unmap_src(dev->dev, dma_srcs, len, src_cnt);
-				unmap_dst(dev->dev, dma_dsts, test_buf_size, i);
-				pr_warn("%s: #%u: mapping error %d with "
-					"dst_off=0x%x len=0x%x\n",
-					thread_name, total_tests - 1, ret,
-					dst_off, test_buf_size);
+				unmap_dst(dev->dev, dma_dsts, params->buf_size,
+					  i);
+				thread_result_add(info, result,
+						  DMATEST_ET_MAP_DST,
+						  total_tests, src_off, dst_off,
+						  len, ret);
 				failed_tests++;
 				continue;
 			}
@@ -428,11 +691,11 @@
 
 		if (!tx) {
 			unmap_src(dev->dev, dma_srcs, len, src_cnt);
-			unmap_dst(dev->dev, dma_dsts, test_buf_size, dst_cnt);
-			pr_warning("%s: #%u: prep error with src_off=0x%x "
-					"dst_off=0x%x len=0x%x\n",
-					thread_name, total_tests - 1,
-					src_off, dst_off, len);
+			unmap_dst(dev->dev, dma_dsts, params->buf_size,
+				  dst_cnt);
+			thread_result_add(info, result, DMATEST_ET_PREP,
+					  total_tests, src_off, dst_off,
+					  len, 0);
 			msleep(100);
 			failed_tests++;
 			continue;
@@ -444,18 +707,18 @@
 		cookie = tx->tx_submit(tx);
 
 		if (dma_submit_error(cookie)) {
-			pr_warning("%s: #%u: submit error %d with src_off=0x%x "
-					"dst_off=0x%x len=0x%x\n",
-					thread_name, total_tests - 1, cookie,
-					src_off, dst_off, len);
+			thread_result_add(info, result, DMATEST_ET_SUBMIT,
+					  total_tests, src_off, dst_off,
+					  len, cookie);
 			msleep(100);
 			failed_tests++;
 			continue;
 		}
 		dma_async_issue_pending(chan);
 
-		wait_event_freezable_timeout(done_wait, done.done,
-					     msecs_to_jiffies(timeout));
+		wait_event_freezable_timeout(done_wait,
+					     done.done || kthread_should_stop(),
+					     msecs_to_jiffies(params->timeout));
 
 		status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
 
@@ -468,56 +731,57 @@
 			 * free it this time?" dancing.  For now, just
 			 * leave it dangling.
 			 */
-			pr_warning("%s: #%u: test timed out\n",
-				   thread_name, total_tests - 1);
+			thread_result_add(info, result, DMATEST_ET_TIMEOUT,
+					  total_tests, src_off, dst_off,
+					  len, 0);
 			failed_tests++;
 			continue;
 		} else if (status != DMA_SUCCESS) {
-			pr_warning("%s: #%u: got completion callback,"
-				   " but status is \'%s\'\n",
-				   thread_name, total_tests - 1,
-				   status == DMA_ERROR ? "error" : "in progress");
+			enum dmatest_error_type type = (status == DMA_ERROR) ?
+				DMATEST_ET_DMA_ERROR : DMATEST_ET_DMA_IN_PROGRESS;
+			thread_result_add(info, result, type,
+					  total_tests, src_off, dst_off,
+					  len, status);
 			failed_tests++;
 			continue;
 		}
 
 		/* Unmap by myself (see DMA_COMPL_SKIP_DEST_UNMAP above) */
-		unmap_dst(dev->dev, dma_dsts, test_buf_size, dst_cnt);
+		unmap_dst(dev->dev, dma_dsts, params->buf_size, dst_cnt);
 
 		error_count = 0;
 
 		pr_debug("%s: verifying source buffer...\n", thread_name);
-		error_count += dmatest_verify(thread->srcs, 0, src_off,
+		error_count += verify_result_add(info, result, total_tests,
+				src_off, dst_off, len, thread->srcs, -1,
 				0, PATTERN_SRC, true);
-		error_count += dmatest_verify(thread->srcs, src_off,
-				src_off + len, src_off,
-				PATTERN_SRC | PATTERN_COPY, true);
-		error_count += dmatest_verify(thread->srcs, src_off + len,
-				test_buf_size, src_off + len,
-				PATTERN_SRC, true);
+		error_count += verify_result_add(info, result, total_tests,
+				src_off, dst_off, len, thread->srcs, 0,
+				src_off, PATTERN_SRC | PATTERN_COPY, true);
+		error_count += verify_result_add(info, result, total_tests,
+				src_off, dst_off, len, thread->srcs, 1,
+				src_off + len, PATTERN_SRC, true);
 
-		pr_debug("%s: verifying dest buffer...\n",
-				thread->task->comm);
-		error_count += dmatest_verify(thread->dsts, 0, dst_off,
+		pr_debug("%s: verifying dest buffer...\n", thread_name);
+		error_count += verify_result_add(info, result, total_tests,
+				src_off, dst_off, len, thread->dsts, -1,
 				0, PATTERN_DST, false);
-		error_count += dmatest_verify(thread->dsts, dst_off,
-				dst_off + len, src_off,
-				PATTERN_SRC | PATTERN_COPY, false);
-		error_count += dmatest_verify(thread->dsts, dst_off + len,
-				test_buf_size, dst_off + len,
-				PATTERN_DST, false);
+		error_count += verify_result_add(info, result, total_tests,
+				src_off, dst_off, len, thread->dsts, 0,
+				src_off, PATTERN_SRC | PATTERN_COPY, false);
+		error_count += verify_result_add(info, result, total_tests,
+				src_off, dst_off, len, thread->dsts, 1,
+				dst_off + len, PATTERN_DST, false);
 
 		if (error_count) {
-			pr_warning("%s: #%u: %u errors with "
-				"src_off=0x%x dst_off=0x%x len=0x%x\n",
-				thread_name, total_tests - 1, error_count,
-				src_off, dst_off, len);
+			thread_result_add(info, result, DMATEST_ET_VERIFY,
+					  total_tests, src_off, dst_off,
+					  len, error_count);
 			failed_tests++;
 		} else {
-			pr_debug("%s: #%u: No errors with "
-				"src_off=0x%x dst_off=0x%x len=0x%x\n",
-				thread_name, total_tests - 1,
-				src_off, dst_off, len);
+			thread_result_add(info, result, DMATEST_ET_OK,
+					  total_tests, src_off, dst_off,
+					  len, 0);
 		}
 	}
 
@@ -532,6 +796,8 @@
 err_srcbuf:
 	kfree(thread->srcs);
 err_srcs:
+	kfree(pq_coefs);
+err_thread_type:
 	pr_notice("%s: terminating after %u tests, %u failures (status %d)\n",
 			thread_name, total_tests, failed_tests, ret);
 
@@ -539,7 +805,9 @@
 	if (ret)
 		dmaengine_terminate_all(chan);
 
-	if (iterations > 0)
+	thread->done = true;
+
+	if (params->iterations > 0)
 		while (!kthread_should_stop()) {
 			DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wait_dmatest_exit);
 			interruptible_sleep_on(&wait_dmatest_exit);
@@ -568,8 +836,10 @@
 	kfree(dtc);
 }
 
-static int dmatest_add_threads(struct dmatest_chan *dtc, enum dma_transaction_type type)
+static int dmatest_add_threads(struct dmatest_info *info,
+		struct dmatest_chan *dtc, enum dma_transaction_type type)
 {
+	struct dmatest_params *params = &info->params;
 	struct dmatest_thread *thread;
 	struct dma_chan *chan = dtc->chan;
 	char *op;
@@ -584,7 +854,7 @@
 	else
 		return -EINVAL;
 
-	for (i = 0; i < threads_per_chan; i++) {
+	for (i = 0; i < params->threads_per_chan; i++) {
 		thread = kzalloc(sizeof(struct dmatest_thread), GFP_KERNEL);
 		if (!thread) {
 			pr_warning("dmatest: No memory for %s-%s%u\n",
@@ -592,6 +862,7 @@
 
 			break;
 		}
+		thread->info = info;
 		thread->chan = dtc->chan;
 		thread->type = type;
 		smp_wmb();
@@ -612,7 +883,8 @@
 	return i;
 }
 
-static int dmatest_add_channel(struct dma_chan *chan)
+static int dmatest_add_channel(struct dmatest_info *info,
+		struct dma_chan *chan)
 {
 	struct dmatest_chan	*dtc;
 	struct dma_device	*dma_dev = chan->device;
@@ -629,75 +901,418 @@
 	INIT_LIST_HEAD(&dtc->threads);
 
 	if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) {
-		cnt = dmatest_add_threads(dtc, DMA_MEMCPY);
+		cnt = dmatest_add_threads(info, dtc, DMA_MEMCPY);
 		thread_count += cnt > 0 ? cnt : 0;
 	}
 	if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
-		cnt = dmatest_add_threads(dtc, DMA_XOR);
+		cnt = dmatest_add_threads(info, dtc, DMA_XOR);
 		thread_count += cnt > 0 ? cnt : 0;
 	}
 	if (dma_has_cap(DMA_PQ, dma_dev->cap_mask)) {
-		cnt = dmatest_add_threads(dtc, DMA_PQ);
+		cnt = dmatest_add_threads(info, dtc, DMA_PQ);
 		thread_count += cnt > 0 ? cnt : 0;
 	}
 
 	pr_info("dmatest: Started %u threads using %s\n",
 		thread_count, dma_chan_name(chan));
 
-	list_add_tail(&dtc->node, &dmatest_channels);
-	nr_channels++;
+	list_add_tail(&dtc->node, &info->channels);
+	info->nr_channels++;
 
 	return 0;
 }
 
 static bool filter(struct dma_chan *chan, void *param)
 {
-	if (!dmatest_match_channel(chan) || !dmatest_match_device(chan->device))
+	struct dmatest_params *params = param;
+
+	if (!dmatest_match_channel(params, chan) ||
+	    !dmatest_match_device(params, chan->device))
 		return false;
 	else
 		return true;
 }
 
-static int __init dmatest_init(void)
+static int __run_threaded_test(struct dmatest_info *info)
 {
 	dma_cap_mask_t mask;
 	struct dma_chan *chan;
+	struct dmatest_params *params = &info->params;
 	int err = 0;
 
 	dma_cap_zero(mask);
 	dma_cap_set(DMA_MEMCPY, mask);
 	for (;;) {
-		chan = dma_request_channel(mask, filter, NULL);
+		chan = dma_request_channel(mask, filter, params);
 		if (chan) {
-			err = dmatest_add_channel(chan);
+			err = dmatest_add_channel(info, chan);
 			if (err) {
 				dma_release_channel(chan);
 				break; /* add_channel failed, punt */
 			}
 		} else
 			break; /* no more channels available */
-		if (max_channels && nr_channels >= max_channels)
+		if (params->max_channels &&
+		    info->nr_channels >= params->max_channels)
 			break; /* we have all we need */
 	}
-
 	return err;
 }
+
+#ifndef MODULE
+static int run_threaded_test(struct dmatest_info *info)
+{
+	int ret;
+
+	mutex_lock(&info->lock);
+	ret = __run_threaded_test(info);
+	mutex_unlock(&info->lock);
+	return ret;
+}
+#endif
+
+static void __stop_threaded_test(struct dmatest_info *info)
+{
+	struct dmatest_chan *dtc, *_dtc;
+	struct dma_chan *chan;
+
+	list_for_each_entry_safe(dtc, _dtc, &info->channels, node) {
+		list_del(&dtc->node);
+		chan = dtc->chan;
+		dmatest_cleanup_channel(dtc);
+		pr_debug("dmatest: dropped channel %s\n", dma_chan_name(chan));
+		dma_release_channel(chan);
+	}
+
+	info->nr_channels = 0;
+}
+
+static void stop_threaded_test(struct dmatest_info *info)
+{
+	mutex_lock(&info->lock);
+	__stop_threaded_test(info);
+	mutex_unlock(&info->lock);
+}
+
+static int __restart_threaded_test(struct dmatest_info *info, bool run)
+{
+	struct dmatest_params *params = &info->params;
+	int ret;
+
+	/* Stop any running test first */
+	__stop_threaded_test(info);
+
+	if (run == false)
+		return 0;
+
+	/* Clear results from previous run */
+	result_free(info, NULL);
+
+	/* Copy test parameters */
+	memcpy(params, &info->dbgfs_params, sizeof(*params));
+
+	/* Run test with new parameters */
+	ret = __run_threaded_test(info);
+	if (ret) {
+		__stop_threaded_test(info);
+		pr_err("dmatest: Can't run test\n");
+	}
+
+	return ret;
+}
+
+static ssize_t dtf_write_string(void *to, size_t available, loff_t *ppos,
+		const void __user *from, size_t count)
+{
+	char tmp[20];
+	ssize_t len;
+
+	len = simple_write_to_buffer(tmp, sizeof(tmp) - 1, ppos, from, count);
+	if (len >= 0) {
+		tmp[len] = '\0';
+		strlcpy(to, strim(tmp), available);
+	}
+
+	return len;
+}
+
+static ssize_t dtf_read_channel(struct file *file, char __user *buf,
+		size_t count, loff_t *ppos)
+{
+	struct dmatest_info *info = file->private_data;
+	return simple_read_from_buffer(buf, count, ppos,
+			info->dbgfs_params.channel,
+			strlen(info->dbgfs_params.channel));
+}
+
+static ssize_t dtf_write_channel(struct file *file, const char __user *buf,
+		size_t size, loff_t *ppos)
+{
+	struct dmatest_info *info = file->private_data;
+	return dtf_write_string(info->dbgfs_params.channel,
+				sizeof(info->dbgfs_params.channel),
+				ppos, buf, size);
+}
+
+static const struct file_operations dtf_channel_fops = {
+	.read	= dtf_read_channel,
+	.write	= dtf_write_channel,
+	.open	= simple_open,
+	.llseek	= default_llseek,
+};
+
+static ssize_t dtf_read_device(struct file *file, char __user *buf,
+		size_t count, loff_t *ppos)
+{
+	struct dmatest_info *info = file->private_data;
+	return simple_read_from_buffer(buf, count, ppos,
+			info->dbgfs_params.device,
+			strlen(info->dbgfs_params.device));
+}
+
+static ssize_t dtf_write_device(struct file *file, const char __user *buf,
+		size_t size, loff_t *ppos)
+{
+	struct dmatest_info *info = file->private_data;
+	return dtf_write_string(info->dbgfs_params.device,
+				sizeof(info->dbgfs_params.device),
+				ppos, buf, size);
+}
+
+static const struct file_operations dtf_device_fops = {
+	.read	= dtf_read_device,
+	.write	= dtf_write_device,
+	.open	= simple_open,
+	.llseek	= default_llseek,
+};
+
+static ssize_t dtf_read_run(struct file *file, char __user *user_buf,
+		size_t count, loff_t *ppos)
+{
+	struct dmatest_info *info = file->private_data;
+	char buf[3];
+	struct dmatest_chan *dtc;
+	bool alive = false;
+
+	mutex_lock(&info->lock);
+	list_for_each_entry(dtc, &info->channels, node) {
+		struct dmatest_thread *thread;
+
+		list_for_each_entry(thread, &dtc->threads, node) {
+			if (!thread->done) {
+				alive = true;
+				break;
+			}
+		}
+	}
+
+	if (alive) {
+		buf[0] = 'Y';
+	} else {
+		__stop_threaded_test(info);
+		buf[0] = 'N';
+	}
+
+	mutex_unlock(&info->lock);
+	buf[1] = '\n';
+	buf[2] = 0x00;
+	return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
+}
+
+static ssize_t dtf_write_run(struct file *file, const char __user *user_buf,
+		size_t count, loff_t *ppos)
+{
+	struct dmatest_info *info = file->private_data;
+	char buf[16];
+	bool bv;
+	int ret = 0;
+
+	if (copy_from_user(buf, user_buf, min(count, (sizeof(buf) - 1))))
+		return -EFAULT;
+
+	if (strtobool(buf, &bv) == 0) {
+		mutex_lock(&info->lock);
+		ret = __restart_threaded_test(info, bv);
+		mutex_unlock(&info->lock);
+	}
+
+	return ret ? ret : count;
+}
+
+static const struct file_operations dtf_run_fops = {
+	.read	= dtf_read_run,
+	.write	= dtf_write_run,
+	.open	= simple_open,
+	.llseek	= default_llseek,
+};
+
+static int dtf_results_show(struct seq_file *sf, void *data)
+{
+	struct dmatest_info *info = sf->private;
+	struct dmatest_result *result;
+	struct dmatest_thread_result *tr;
+	unsigned int i;
+
+	mutex_lock(&info->results_lock);
+	list_for_each_entry(result, &info->results, node) {
+		list_for_each_entry(tr, &result->results, node) {
+			seq_printf(sf, "%s\n",
+				thread_result_get(result->name, tr));
+			if (tr->type == DMATEST_ET_VERIFY_BUF) {
+				for (i = 0; i < tr->vr->error_count; i++) {
+					seq_printf(sf, "\t%s\n",
+						verify_result_get_one(tr->vr, i));
+				}
+			}
+		}
+	}
+
+	mutex_unlock(&info->results_lock);
+	return 0;
+}
+
+static int dtf_results_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, dtf_results_show, inode->i_private);
+}
+
+static const struct file_operations dtf_results_fops = {
+	.open		= dtf_results_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+
+static int dmatest_register_dbgfs(struct dmatest_info *info)
+{
+	struct dentry *d;
+	struct dmatest_params *params = &info->dbgfs_params;
+	int ret = -ENOMEM;
+
+	d = debugfs_create_dir("dmatest", NULL);
+	if (IS_ERR(d))
+		return PTR_ERR(d);
+	if (!d)
+		goto err_root;
+
+	info->root = d;
+
+	/* Copy initial values */
+	memcpy(params, &info->params, sizeof(*params));
+
+	/* Test parameters */
+
+	d = debugfs_create_u32("test_buf_size", S_IWUSR | S_IRUGO, info->root,
+			       (u32 *)&params->buf_size);
+	if (IS_ERR_OR_NULL(d))
+		goto err_node;
+
+	d = debugfs_create_file("channel", S_IRUGO | S_IWUSR, info->root,
+				info, &dtf_channel_fops);
+	if (IS_ERR_OR_NULL(d))
+		goto err_node;
+
+	d = debugfs_create_file("device", S_IRUGO | S_IWUSR, info->root,
+				info, &dtf_device_fops);
+	if (IS_ERR_OR_NULL(d))
+		goto err_node;
+
+	d = debugfs_create_u32("threads_per_chan", S_IWUSR | S_IRUGO, info->root,
+			       (u32 *)&params->threads_per_chan);
+	if (IS_ERR_OR_NULL(d))
+		goto err_node;
+
+	d = debugfs_create_u32("max_channels", S_IWUSR | S_IRUGO, info->root,
+			       (u32 *)&params->max_channels);
+	if (IS_ERR_OR_NULL(d))
+		goto err_node;
+
+	d = debugfs_create_u32("iterations", S_IWUSR | S_IRUGO, info->root,
+			       (u32 *)&params->iterations);
+	if (IS_ERR_OR_NULL(d))
+		goto err_node;
+
+	d = debugfs_create_u32("xor_sources", S_IWUSR | S_IRUGO, info->root,
+			       (u32 *)&params->xor_sources);
+	if (IS_ERR_OR_NULL(d))
+		goto err_node;
+
+	d = debugfs_create_u32("pq_sources", S_IWUSR | S_IRUGO, info->root,
+			       (u32 *)&params->pq_sources);
+	if (IS_ERR_OR_NULL(d))
+		goto err_node;
+
+	d = debugfs_create_u32("timeout", S_IWUSR | S_IRUGO, info->root,
+			       (u32 *)&params->timeout);
+	if (IS_ERR_OR_NULL(d))
+		goto err_node;
+
+	/* Run or stop threaded test */
+	d = debugfs_create_file("run", S_IWUSR | S_IRUGO, info->root,
+				info, &dtf_run_fops);
+	if (IS_ERR_OR_NULL(d))
+		goto err_node;
+
+	/* Results of test in progress */
+	d = debugfs_create_file("results", S_IRUGO, info->root, info,
+				&dtf_results_fops);
+	if (IS_ERR_OR_NULL(d))
+		goto err_node;
+
+	return 0;
+
+err_node:
+	debugfs_remove_recursive(info->root);
+err_root:
+	pr_err("dmatest: Failed to initialize debugfs\n");
+	return ret;
+}
+
+static int __init dmatest_init(void)
+{
+	struct dmatest_info *info = &test_info;
+	struct dmatest_params *params = &info->params;
+	int ret;
+
+	memset(info, 0, sizeof(*info));
+
+	mutex_init(&info->lock);
+	INIT_LIST_HEAD(&info->channels);
+
+	mutex_init(&info->results_lock);
+	INIT_LIST_HEAD(&info->results);
+
+	/* Set default parameters */
+	params->buf_size = test_buf_size;
+	strlcpy(params->channel, test_channel, sizeof(params->channel));
+	strlcpy(params->device, test_device, sizeof(params->device));
+	params->threads_per_chan = threads_per_chan;
+	params->max_channels = max_channels;
+	params->iterations = iterations;
+	params->xor_sources = xor_sources;
+	params->pq_sources = pq_sources;
+	params->timeout = timeout;
+
+	ret = dmatest_register_dbgfs(info);
+	if (ret)
+		return ret;
+
+#ifdef MODULE
+	return 0;
+#else
+	return run_threaded_test(info);
+#endif
+}
 /* when compiled-in wait for drivers to load first */
 late_initcall(dmatest_init);
 
 static void __exit dmatest_exit(void)
 {
-	struct dmatest_chan *dtc, *_dtc;
-	struct dma_chan *chan;
+	struct dmatest_info *info = &test_info;
 
-	list_for_each_entry_safe(dtc, _dtc, &dmatest_channels, node) {
-		list_del(&dtc->node);
-		chan = dtc->chan;
-		dmatest_cleanup_channel(dtc);
-		pr_debug("dmatest: dropped channel %s\n",
-			 dma_chan_name(chan));
-		dma_release_channel(chan);
-	}
+	debugfs_remove_recursive(info->root);
+	stop_threaded_test(info);
+	result_free(info, NULL);
 }
 module_exit(dmatest_exit);
 
diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c
index 43a5329..2e5deaa 100644
--- a/drivers/dma/dw_dmac.c
+++ b/drivers/dma/dw_dmac.c
@@ -25,6 +25,8 @@
 #include <linux/module.h>
 #include <linux/platform_device.h>
 #include <linux/slab.h>
+#include <linux/acpi.h>
+#include <linux/acpi_dma.h>
 
 #include "dw_dmac_regs.h"
 #include "dmaengine.h"
@@ -49,29 +51,22 @@
 	return slave ? slave->src_master : 1;
 }
 
-#define SRC_MASTER	0
-#define DST_MASTER	1
-
-static inline unsigned int dwc_get_master(struct dma_chan *chan, int master)
+static inline void dwc_set_masters(struct dw_dma_chan *dwc)
 {
-	struct dw_dma *dw = to_dw_dma(chan->device);
-	struct dw_dma_slave *dws = chan->private;
-	unsigned int m;
+	struct dw_dma *dw = to_dw_dma(dwc->chan.device);
+	struct dw_dma_slave *dws = dwc->chan.private;
+	unsigned char mmax = dw->nr_masters - 1;
 
-	if (master == SRC_MASTER)
-		m = dwc_get_sms(dws);
-	else
-		m = dwc_get_dms(dws);
-
-	return min_t(unsigned int, dw->nr_masters - 1, m);
+	if (dwc->request_line == ~0) {
+		dwc->src_master = min_t(unsigned char, mmax, dwc_get_sms(dws));
+		dwc->dst_master = min_t(unsigned char, mmax, dwc_get_dms(dws));
+	}
 }
 
 #define DWC_DEFAULT_CTLLO(_chan) ({				\
 		struct dw_dma_chan *_dwc = to_dw_dma_chan(_chan);	\
 		struct dma_slave_config	*_sconfig = &_dwc->dma_sconfig;	\
 		bool _is_slave = is_slave_direction(_dwc->direction);	\
-		int _dms = dwc_get_master(_chan, DST_MASTER);		\
-		int _sms = dwc_get_master(_chan, SRC_MASTER);		\
 		u8 _smsize = _is_slave ? _sconfig->src_maxburst :	\
 			DW_DMA_MSIZE_16;			\
 		u8 _dmsize = _is_slave ? _sconfig->dst_maxburst :	\
@@ -81,8 +76,8 @@
 		 | DWC_CTLL_SRC_MSIZE(_smsize)			\
 		 | DWC_CTLL_LLP_D_EN				\
 		 | DWC_CTLL_LLP_S_EN				\
-		 | DWC_CTLL_DMS(_dms)				\
-		 | DWC_CTLL_SMS(_sms));				\
+		 | DWC_CTLL_DMS(_dwc->dst_master)		\
+		 | DWC_CTLL_SMS(_dwc->src_master));		\
 	})
 
 /*
@@ -92,13 +87,6 @@
  */
 #define NR_DESCS_PER_CHANNEL	64
 
-static inline unsigned int dwc_get_data_width(struct dma_chan *chan, int master)
-{
-	struct dw_dma *dw = to_dw_dma(chan->device);
-
-	return dw->data_width[dwc_get_master(chan, master)];
-}
-
 /*----------------------------------------------------------------------*/
 
 static struct device *chan2dev(struct dma_chan *chan)
@@ -172,13 +160,7 @@
 	if (dwc->initialized == true)
 		return;
 
-	if (dws && dws->cfg_hi == ~0 && dws->cfg_lo == ~0) {
-		/* autoconfigure based on request line from DT */
-		if (dwc->direction == DMA_MEM_TO_DEV)
-			cfghi = DWC_CFGH_DST_PER(dwc->request_line);
-		else if (dwc->direction == DMA_DEV_TO_MEM)
-			cfghi = DWC_CFGH_SRC_PER(dwc->request_line);
-	} else if (dws) {
+	if (dws) {
 		/*
 		 * We need controller-specific data to set up slave
 		 * transfers.
@@ -189,9 +171,9 @@
 		cfglo |= dws->cfg_lo & ~DWC_CFGL_CH_PRIOR_MASK;
 	} else {
 		if (dwc->direction == DMA_MEM_TO_DEV)
-			cfghi = DWC_CFGH_DST_PER(dwc->dma_sconfig.slave_id);
+			cfghi = DWC_CFGH_DST_PER(dwc->request_line);
 		else if (dwc->direction == DMA_DEV_TO_MEM)
-			cfghi = DWC_CFGH_SRC_PER(dwc->dma_sconfig.slave_id);
+			cfghi = DWC_CFGH_SRC_PER(dwc->request_line);
 	}
 
 	channel_writel(dwc, CFG_LO, cfglo);
@@ -473,16 +455,16 @@
 			(unsigned long long)llp);
 
 	list_for_each_entry_safe(desc, _desc, &dwc->active_list, desc_node) {
-		/* initial residue value */
+		/* Initial residue value */
 		dwc->residue = desc->total_len;
 
-		/* check first descriptors addr */
+		/* Check first descriptors addr */
 		if (desc->txd.phys == llp) {
 			spin_unlock_irqrestore(&dwc->lock, flags);
 			return;
 		}
 
-		/* check first descriptors llp */
+		/* Check first descriptors llp */
 		if (desc->lli.llp == llp) {
 			/* This one is currently in progress */
 			dwc->residue -= dwc_get_sent(dwc);
@@ -588,7 +570,7 @@
 }
 EXPORT_SYMBOL(dw_dma_get_dst_addr);
 
-/* called with dwc->lock held and all DMAC interrupts disabled */
+/* Called with dwc->lock held and all DMAC interrupts disabled */
 static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc,
 		u32 status_err, u32 status_xfer)
 {
@@ -626,7 +608,7 @@
 
 		dwc_chan_disable(dw, dwc);
 
-		/* make sure DMA does not restart by loading a new list */
+		/* Make sure DMA does not restart by loading a new list */
 		channel_writel(dwc, LLP, 0);
 		channel_writel(dwc, CTL_LO, 0);
 		channel_writel(dwc, CTL_HI, 0);
@@ -745,6 +727,7 @@
 		size_t len, unsigned long flags)
 {
 	struct dw_dma_chan	*dwc = to_dw_dma_chan(chan);
+	struct dw_dma		*dw = to_dw_dma(chan->device);
 	struct dw_desc		*desc;
 	struct dw_desc		*first;
 	struct dw_desc		*prev;
@@ -767,8 +750,8 @@
 
 	dwc->direction = DMA_MEM_TO_MEM;
 
-	data_width = min_t(unsigned int, dwc_get_data_width(chan, SRC_MASTER),
-			   dwc_get_data_width(chan, DST_MASTER));
+	data_width = min_t(unsigned int, dw->data_width[dwc->src_master],
+			   dw->data_width[dwc->dst_master]);
 
 	src_width = dst_width = min_t(unsigned int, data_width,
 				      dwc_fast_fls(src | dest | len));
@@ -826,6 +809,7 @@
 		unsigned long flags, void *context)
 {
 	struct dw_dma_chan	*dwc = to_dw_dma_chan(chan);
+	struct dw_dma		*dw = to_dw_dma(chan->device);
 	struct dma_slave_config	*sconfig = &dwc->dma_sconfig;
 	struct dw_desc		*prev;
 	struct dw_desc		*first;
@@ -859,7 +843,7 @@
 		ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_M2P) :
 			DWC_CTLL_FC(DW_DMA_FC_D_M2P);
 
-		data_width = dwc_get_data_width(chan, SRC_MASTER);
+		data_width = dw->data_width[dwc->src_master];
 
 		for_each_sg(sgl, sg, sg_len, i) {
 			struct dw_desc	*desc;
@@ -919,7 +903,7 @@
 		ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_P2M) :
 			DWC_CTLL_FC(DW_DMA_FC_D_P2M);
 
-		data_width = dwc_get_data_width(chan, DST_MASTER);
+		data_width = dw->data_width[dwc->dst_master];
 
 		for_each_sg(sgl, sg, sg_len, i) {
 			struct dw_desc	*desc;
@@ -1001,13 +985,6 @@
 		*maxburst = 0;
 }
 
-static inline void convert_slave_id(struct dw_dma_chan *dwc)
-{
-	struct dw_dma *dw = to_dw_dma(dwc->chan.device);
-
-	dwc->dma_sconfig.slave_id -= dw->request_line_base;
-}
-
 static int
 set_runtime_config(struct dma_chan *chan, struct dma_slave_config *sconfig)
 {
@@ -1020,9 +997,12 @@
 	memcpy(&dwc->dma_sconfig, sconfig, sizeof(*sconfig));
 	dwc->direction = sconfig->direction;
 
+	/* Take the request line from slave_id member */
+	if (dwc->request_line == ~0)
+		dwc->request_line = sconfig->slave_id;
+
 	convert_burst(&dwc->dma_sconfig.src_maxburst);
 	convert_burst(&dwc->dma_sconfig.dst_maxburst);
-	convert_slave_id(dwc);
 
 	return 0;
 }
@@ -1030,10 +1010,11 @@
 static inline void dwc_chan_pause(struct dw_dma_chan *dwc)
 {
 	u32 cfglo = channel_readl(dwc, CFG_LO);
+	unsigned int count = 20;	/* timeout iterations */
 
 	channel_writel(dwc, CFG_LO, cfglo | DWC_CFGL_CH_SUSP);
-	while (!(channel_readl(dwc, CFG_LO) & DWC_CFGL_FIFO_EMPTY))
-		cpu_relax();
+	while (!(channel_readl(dwc, CFG_LO) & DWC_CFGL_FIFO_EMPTY) && count--)
+		udelay(2);
 
 	dwc->paused = true;
 }
@@ -1169,6 +1150,8 @@
 	 * doesn't mean what you think it means), and status writeback.
 	 */
 
+	dwc_set_masters(dwc);
+
 	spin_lock_irqsave(&dwc->lock, flags);
 	i = dwc->descs_allocated;
 	while (dwc->descs_allocated < NR_DESCS_PER_CHANNEL) {
@@ -1226,6 +1209,7 @@
 	list_splice_init(&dwc->free_list, &list);
 	dwc->descs_allocated = 0;
 	dwc->initialized = false;
+	dwc->request_line = ~0;
 
 	/* Disable interrupts */
 	channel_clear_bit(dw, MASK.XFER, dwc->mask);
@@ -1241,42 +1225,36 @@
 	dev_vdbg(chan2dev(chan), "%s: done\n", __func__);
 }
 
-struct dw_dma_filter_args {
+/*----------------------------------------------------------------------*/
+
+struct dw_dma_of_filter_args {
 	struct dw_dma *dw;
 	unsigned int req;
 	unsigned int src;
 	unsigned int dst;
 };
 
-static bool dw_dma_generic_filter(struct dma_chan *chan, void *param)
+static bool dw_dma_of_filter(struct dma_chan *chan, void *param)
 {
 	struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
-	struct dw_dma *dw = to_dw_dma(chan->device);
-	struct dw_dma_filter_args *fargs = param;
-	struct dw_dma_slave *dws = &dwc->slave;
+	struct dw_dma_of_filter_args *fargs = param;
 
-	/* ensure the device matches our channel */
+	/* Ensure the device matches our channel */
         if (chan->device != &fargs->dw->dma)
                 return false;
 
-	dws->dma_dev	= dw->dma.dev;
-	dws->cfg_hi	= ~0;
-	dws->cfg_lo	= ~0;
-	dws->src_master	= fargs->src;
-	dws->dst_master	= fargs->dst;
-
 	dwc->request_line = fargs->req;
-
-	chan->private = dws;
+	dwc->src_master	= fargs->src;
+	dwc->dst_master	= fargs->dst;
 
 	return true;
 }
 
-static struct dma_chan *dw_dma_xlate(struct of_phandle_args *dma_spec,
-					 struct of_dma *ofdma)
+static struct dma_chan *dw_dma_of_xlate(struct of_phandle_args *dma_spec,
+					struct of_dma *ofdma)
 {
 	struct dw_dma *dw = ofdma->of_dma_data;
-	struct dw_dma_filter_args fargs = {
+	struct dw_dma_of_filter_args fargs = {
 		.dw = dw,
 	};
 	dma_cap_mask_t cap;
@@ -1297,9 +1275,49 @@
 	dma_cap_set(DMA_SLAVE, cap);
 
 	/* TODO: there should be a simpler way to do this */
-	return dma_request_channel(cap, dw_dma_generic_filter, &fargs);
+	return dma_request_channel(cap, dw_dma_of_filter, &fargs);
 }
 
+#ifdef CONFIG_ACPI
+static bool dw_dma_acpi_filter(struct dma_chan *chan, void *param)
+{
+	struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
+	struct acpi_dma_spec *dma_spec = param;
+
+	if (chan->device->dev != dma_spec->dev ||
+	    chan->chan_id != dma_spec->chan_id)
+		return false;
+
+	dwc->request_line = dma_spec->slave_id;
+	dwc->src_master = dwc_get_sms(NULL);
+	dwc->dst_master = dwc_get_dms(NULL);
+
+	return true;
+}
+
+static void dw_dma_acpi_controller_register(struct dw_dma *dw)
+{
+	struct device *dev = dw->dma.dev;
+	struct acpi_dma_filter_info *info;
+	int ret;
+
+	info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
+	if (!info)
+		return;
+
+	dma_cap_zero(info->dma_cap);
+	dma_cap_set(DMA_SLAVE, info->dma_cap);
+	info->filter_fn = dw_dma_acpi_filter;
+
+	ret = devm_acpi_dma_controller_register(dev, acpi_dma_simple_xlate,
+						info);
+	if (ret)
+		dev_err(dev, "could not register acpi_dma_controller\n");
+}
+#else /* !CONFIG_ACPI */
+static inline void dw_dma_acpi_controller_register(struct dw_dma *dw) {}
+#endif /* !CONFIG_ACPI */
+
 /* --------------------- Cyclic DMA API extensions -------------------- */
 
 /**
@@ -1322,7 +1340,7 @@
 
 	spin_lock_irqsave(&dwc->lock, flags);
 
-	/* assert channel is idle */
+	/* Assert channel is idle */
 	if (dma_readl(dw, CH_EN) & dwc->mask) {
 		dev_err(chan2dev(&dwc->chan),
 			"BUG: Attempted to start non-idle channel\n");
@@ -1334,7 +1352,7 @@
 	dma_writel(dw, CLEAR.ERROR, dwc->mask);
 	dma_writel(dw, CLEAR.XFER, dwc->mask);
 
-	/* setup DMAC channel registers */
+	/* Setup DMAC channel registers */
 	channel_writel(dwc, LLP, dwc->cdesc->desc[0]->txd.phys);
 	channel_writel(dwc, CTL_LO, DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN);
 	channel_writel(dwc, CTL_HI, 0);
@@ -1501,7 +1519,7 @@
 		last = desc;
 	}
 
-	/* lets make a cyclic list */
+	/* Let's make a cyclic list */
 	last->lli.llp = cdesc->desc[0]->txd.phys;
 
 	dev_dbg(chan2dev(&dwc->chan), "cyclic prepared buf 0x%llx len %zu "
@@ -1636,7 +1654,6 @@
 
 static int dw_probe(struct platform_device *pdev)
 {
-	const struct platform_device_id *match;
 	struct dw_dma_platform_data *pdata;
 	struct resource		*io;
 	struct dw_dma		*dw;
@@ -1706,7 +1723,7 @@
 
 	dw->regs = regs;
 
-	/* get hardware configuration parameters */
+	/* Get hardware configuration parameters */
 	if (autocfg) {
 		max_blk_size = dma_readl(dw, MAX_BLK_SIZE);
 
@@ -1720,18 +1737,13 @@
 		memcpy(dw->data_width, pdata->data_width, 4);
 	}
 
-	/* Get the base request line if set */
-	match = platform_get_device_id(pdev);
-	if (match)
-		dw->request_line_base = (unsigned int)match->driver_data;
-
 	/* Calculate all channel mask before DMA setup */
 	dw->all_chan_mask = (1 << nr_channels) - 1;
 
-	/* force dma off, just in case */
+	/* Force dma off, just in case */
 	dw_dma_off(dw);
 
-	/* disable BLOCK interrupts as well */
+	/* Disable BLOCK interrupts as well */
 	channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask);
 
 	err = devm_request_irq(&pdev->dev, irq, dw_dma_interrupt, 0,
@@ -1741,7 +1753,7 @@
 
 	platform_set_drvdata(pdev, dw);
 
-	/* create a pool of consistent memory blocks for hardware descriptors */
+	/* Create a pool of consistent memory blocks for hardware descriptors */
 	dw->desc_pool = dmam_pool_create("dw_dmac_desc_pool", &pdev->dev,
 					 sizeof(struct dw_desc), 4, 0);
 	if (!dw->desc_pool) {
@@ -1781,8 +1793,9 @@
 		channel_clear_bit(dw, CH_EN, dwc->mask);
 
 		dwc->direction = DMA_TRANS_NONE;
+		dwc->request_line = ~0;
 
-		/* hardware configuration */
+		/* Hardware configuration */
 		if (autocfg) {
 			unsigned int dwc_params;
 
@@ -1842,12 +1855,15 @@
 
 	if (pdev->dev.of_node) {
 		err = of_dma_controller_register(pdev->dev.of_node,
-						 dw_dma_xlate, dw);
-		if (err && err != -ENODEV)
+						 dw_dma_of_xlate, dw);
+		if (err)
 			dev_err(&pdev->dev,
 				"could not register of_dma_controller\n");
 	}
 
+	if (ACPI_HANDLE(&pdev->dev))
+		dw_dma_acpi_controller_register(dw);
+
 	return 0;
 }
 
@@ -1912,18 +1928,19 @@
 };
 
 #ifdef CONFIG_OF
-static const struct of_device_id dw_dma_id_table[] = {
+static const struct of_device_id dw_dma_of_id_table[] = {
 	{ .compatible = "snps,dma-spear1340" },
 	{}
 };
-MODULE_DEVICE_TABLE(of, dw_dma_id_table);
+MODULE_DEVICE_TABLE(of, dw_dma_of_id_table);
 #endif
 
-static const struct platform_device_id dw_dma_ids[] = {
-	/* Name,	Request Line Base */
-	{ "INTL9C60",	(kernel_ulong_t)16 },
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id dw_dma_acpi_id_table[] = {
+	{ "INTL9C60", 0 },
 	{ }
 };
+#endif
 
 static struct platform_driver dw_driver = {
 	.probe		= dw_probe,
@@ -1932,9 +1949,9 @@
 	.driver = {
 		.name	= "dw_dmac",
 		.pm	= &dw_dev_pm_ops,
-		.of_match_table = of_match_ptr(dw_dma_id_table),
+		.of_match_table = of_match_ptr(dw_dma_of_id_table),
+		.acpi_match_table = ACPI_PTR(dw_dma_acpi_id_table),
 	},
-	.id_table	= dw_dma_ids,
 };
 
 static int __init dw_init(void)
diff --git a/drivers/dma/dw_dmac_regs.h b/drivers/dma/dw_dmac_regs.h
index 4d02c36..9d41720 100644
--- a/drivers/dma/dw_dmac_regs.h
+++ b/drivers/dma/dw_dmac_regs.h
@@ -212,8 +212,11 @@
 	/* hardware configuration */
 	unsigned int		block_size;
 	bool			nollp;
+
+	/* custom slave configuration */
 	unsigned int		request_line;
-	struct dw_dma_slave	slave;
+	unsigned char		src_master;
+	unsigned char		dst_master;
 
 	/* configuration passed via DMA_SLAVE_CONFIG */
 	struct dma_slave_config dma_sconfig;
@@ -247,7 +250,6 @@
 	/* hardware configuration */
 	unsigned char		nr_masters;
 	unsigned char		data_width[4];
-	unsigned int		request_line_base;
 
 	struct dw_dma_chan	chan[0];
 };
diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c
index 70b8975..f285833 100644
--- a/drivers/dma/imx-dma.c
+++ b/drivers/dma/imx-dma.c
@@ -859,8 +859,7 @@
 
 	desc = list_first_entry(&imxdmac->ld_free, struct imxdma_desc, node);
 
-	if (imxdmac->sg_list)
-		kfree(imxdmac->sg_list);
+	kfree(imxdmac->sg_list);
 
 	imxdmac->sg_list = kcalloc(periods + 1,
 			sizeof(struct scatterlist), GFP_KERNEL);
@@ -1145,7 +1144,7 @@
 	return ret;
 }
 
-static int __exit imxdma_remove(struct platform_device *pdev)
+static int imxdma_remove(struct platform_device *pdev)
 {
 	struct imxdma_engine *imxdma = platform_get_drvdata(pdev);
 
@@ -1162,7 +1161,7 @@
 		.name	= "imx-dma",
 	},
 	.id_table	= imx_dma_devtype,
-	.remove		= __exit_p(imxdma_remove),
+	.remove		= imxdma_remove,
 };
 
 static int __init imxdma_module_init(void)
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index f082aa3..092867b 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -1462,7 +1462,7 @@
 	return ret;
 }
 
-static int __exit sdma_remove(struct platform_device *pdev)
+static int sdma_remove(struct platform_device *pdev)
 {
 	return -EBUSY;
 }
@@ -1473,7 +1473,7 @@
 		.of_match_table = sdma_dt_ids,
 	},
 	.id_table	= sdma_devtypes,
-	.remove		= __exit_p(sdma_remove),
+	.remove		= sdma_remove,
 };
 
 static int __init sdma_module_init(void)
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c
index 1879a59..17a2393 100644
--- a/drivers/dma/ioat/dma.c
+++ b/drivers/dma/ioat/dma.c
@@ -892,7 +892,7 @@
  * ioat_dma_setup_interrupts - setup interrupt handler
  * @device: ioat device
  */
-static int ioat_dma_setup_interrupts(struct ioatdma_device *device)
+int ioat_dma_setup_interrupts(struct ioatdma_device *device)
 {
 	struct ioat_chan_common *chan;
 	struct pci_dev *pdev = device->pdev;
@@ -941,6 +941,7 @@
 		}
 	}
 	intrctrl |= IOAT_INTRCTRL_MSIX_VECTOR_CONTROL;
+	device->irq_mode = IOAT_MSIX;
 	goto done;
 
 msix_single_vector:
@@ -956,6 +957,7 @@
 		pci_disable_msix(pdev);
 		goto msi;
 	}
+	device->irq_mode = IOAT_MSIX_SINGLE;
 	goto done;
 
 msi:
@@ -969,6 +971,7 @@
 		pci_disable_msi(pdev);
 		goto intx;
 	}
+	device->irq_mode = IOAT_MSIX;
 	goto done;
 
 intx:
@@ -977,6 +980,7 @@
 	if (err)
 		goto err_no_irq;
 
+	device->irq_mode = IOAT_INTX;
 done:
 	if (device->intr_quirk)
 		device->intr_quirk(device);
@@ -987,9 +991,11 @@
 err_no_irq:
 	/* Disable all interrupt generation */
 	writeb(0, device->reg_base + IOAT_INTRCTRL_OFFSET);
+	device->irq_mode = IOAT_NOIRQ;
 	dev_err(dev, "no usable interrupts\n");
 	return err;
 }
+EXPORT_SYMBOL(ioat_dma_setup_interrupts);
 
 static void ioat_disable_interrupts(struct ioatdma_device *device)
 {
diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h
index 53a4cbb..54fb7b9 100644
--- a/drivers/dma/ioat/dma.h
+++ b/drivers/dma/ioat/dma.h
@@ -39,6 +39,7 @@
 #define to_ioat_desc(lh) container_of(lh, struct ioat_desc_sw, node)
 #define tx_to_ioat_desc(tx) container_of(tx, struct ioat_desc_sw, txd)
 #define to_dev(ioat_chan) (&(ioat_chan)->device->pdev->dev)
+#define to_pdev(ioat_chan) ((ioat_chan)->device->pdev)
 
 #define chan_num(ch) ((int)((ch)->reg_base - (ch)->device->reg_base) / 0x80)
 
@@ -48,6 +49,14 @@
  */
 #define NULL_DESC_BUFFER_SIZE 1
 
+enum ioat_irq_mode {
+	IOAT_NOIRQ = 0,
+	IOAT_MSIX,
+	IOAT_MSIX_SINGLE,
+	IOAT_MSI,
+	IOAT_INTX
+};
+
 /**
  * struct ioatdma_device - internal representation of a IOAT device
  * @pdev: PCI-Express device
@@ -72,11 +81,16 @@
 	void __iomem *reg_base;
 	struct pci_pool *dma_pool;
 	struct pci_pool *completion_pool;
+#define MAX_SED_POOLS	5
+	struct dma_pool *sed_hw_pool[MAX_SED_POOLS];
+	struct kmem_cache *sed_pool;
 	struct dma_device common;
 	u8 version;
 	struct msix_entry msix_entries[4];
 	struct ioat_chan_common *idx[4];
 	struct dca_provider *dca;
+	enum ioat_irq_mode irq_mode;
+	u32 cap;
 	void (*intr_quirk)(struct ioatdma_device *device);
 	int (*enumerate_channels)(struct ioatdma_device *device);
 	int (*reset_hw)(struct ioat_chan_common *chan);
@@ -131,6 +145,20 @@
 	u16 active;
 };
 
+/**
+ * struct ioat_sed_ent - wrapper around super extended hardware descriptor
+ * @hw: hardware SED
+ * @sed_dma: dma address for the SED
+ * @list: list member
+ * @parent: point to the dma descriptor that's the parent
+ */
+struct ioat_sed_ent {
+	struct ioat_sed_raw_descriptor *hw;
+	dma_addr_t dma;
+	struct ioat_ring_ent *parent;
+	unsigned int hw_pool;
+};
+
 static inline struct ioat_chan_common *to_chan_common(struct dma_chan *c)
 {
 	return container_of(c, struct ioat_chan_common, common);
@@ -179,7 +207,7 @@
 	struct device *dev = to_dev(chan);
 
 	dev_dbg(dev, "desc[%d]: (%#llx->%#llx) cookie: %d flags: %#x"
-		" ctl: %#x (op: %d int_en: %d compl: %d)\n", id,
+		" ctl: %#10.8x (op: %#x int_en: %d compl: %d)\n", id,
 		(unsigned long long) tx->phys,
 		(unsigned long long) hw->next, tx->cookie, tx->flags,
 		hw->ctl, hw->ctl_f.op, hw->ctl_f.int_en, hw->ctl_f.compl_write);
@@ -201,7 +229,7 @@
 	return device->idx[index];
 }
 
-static inline u64 ioat_chansts(struct ioat_chan_common *chan)
+static inline u64 ioat_chansts_32(struct ioat_chan_common *chan)
 {
 	u8 ver = chan->device->version;
 	u64 status;
@@ -218,6 +246,26 @@
 	return status;
 }
 
+#if BITS_PER_LONG == 64
+
+static inline u64 ioat_chansts(struct ioat_chan_common *chan)
+{
+	u8 ver = chan->device->version;
+	u64 status;
+
+	 /* With IOAT v3.3 the status register is 64bit.  */
+	if (ver >= IOAT_VER_3_3)
+		status = readq(chan->reg_base + IOAT_CHANSTS_OFFSET(ver));
+	else
+		status = ioat_chansts_32(chan);
+
+	return status;
+}
+
+#else
+#define ioat_chansts ioat_chansts_32
+#endif
+
 static inline void ioat_start(struct ioat_chan_common *chan)
 {
 	u8 ver = chan->device->version;
@@ -321,6 +369,7 @@
 			   dma_addr_t *phys_complete);
 void ioat_kobject_add(struct ioatdma_device *device, struct kobj_type *type);
 void ioat_kobject_del(struct ioatdma_device *device);
+int ioat_dma_setup_interrupts(struct ioatdma_device *device);
 extern const struct sysfs_ops ioat_sysfs_ops;
 extern struct ioat_sysfs_entry ioat_version_attr;
 extern struct ioat_sysfs_entry ioat_cap_attr;
diff --git a/drivers/dma/ioat/dma_v2.h b/drivers/dma/ioat/dma_v2.h
index e100f64..29bf944 100644
--- a/drivers/dma/ioat/dma_v2.h
+++ b/drivers/dma/ioat/dma_v2.h
@@ -137,6 +137,7 @@
 	#ifdef DEBUG
 	int id;
 	#endif
+	struct ioat_sed_ent *sed;
 };
 
 static inline struct ioat_ring_ent *
@@ -157,6 +158,7 @@
 
 int ioat2_dma_probe(struct ioatdma_device *dev, int dca);
 int ioat3_dma_probe(struct ioatdma_device *dev, int dca);
+void ioat3_dma_remove(struct ioatdma_device *dev);
 struct dca_provider *ioat2_dca_init(struct pci_dev *pdev, void __iomem *iobase);
 struct dca_provider *ioat3_dca_init(struct pci_dev *pdev, void __iomem *iobase);
 int ioat2_check_space_lock(struct ioat2_dma_chan *ioat, int num_descs);
diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c
index e8336cc..ca6ea9b 100644
--- a/drivers/dma/ioat/dma_v3.c
+++ b/drivers/dma/ioat/dma_v3.c
@@ -55,7 +55,7 @@
 /*
  * Support routines for v3+ hardware
  */
-
+#include <linux/module.h>
 #include <linux/pci.h>
 #include <linux/gfp.h>
 #include <linux/dmaengine.h>
@@ -70,6 +70,10 @@
 /* ioat hardware assumes at least two sources for raid operations */
 #define src_cnt_to_sw(x) ((x) + 2)
 #define src_cnt_to_hw(x) ((x) - 2)
+#define ndest_to_sw(x) ((x) + 1)
+#define ndest_to_hw(x) ((x) - 1)
+#define src16_cnt_to_sw(x) ((x) + 9)
+#define src16_cnt_to_hw(x) ((x) - 9)
 
 /* provide a lookup table for setting the source address in the base or
  * extended descriptor of an xor or pq descriptor
@@ -77,7 +81,20 @@
 static const u8 xor_idx_to_desc = 0xe0;
 static const u8 xor_idx_to_field[] = { 1, 4, 5, 6, 7, 0, 1, 2 };
 static const u8 pq_idx_to_desc = 0xf8;
+static const u8 pq16_idx_to_desc[] = { 0, 0, 1, 1, 1, 1, 1, 1, 1,
+				       2, 2, 2, 2, 2, 2, 2 };
 static const u8 pq_idx_to_field[] = { 1, 4, 5, 0, 1, 2, 4, 5 };
+static const u8 pq16_idx_to_field[] = { 1, 4, 1, 2, 3, 4, 5, 6, 7,
+					0, 1, 2, 3, 4, 5, 6 };
+
+/*
+ * technically sources 1 and 2 do not require SED, but the op will have
+ * at least 9 descriptors so that's irrelevant.
+ */
+static const u8 pq16_idx_to_sed[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0,
+				      1, 1, 1, 1, 1, 1, 1 };
+
+static void ioat3_eh(struct ioat2_dma_chan *ioat);
 
 static dma_addr_t xor_get_src(struct ioat_raw_descriptor *descs[2], int idx)
 {
@@ -101,6 +118,13 @@
 	return raw->field[pq_idx_to_field[idx]];
 }
 
+static dma_addr_t pq16_get_src(struct ioat_raw_descriptor *desc[3], int idx)
+{
+	struct ioat_raw_descriptor *raw = desc[pq16_idx_to_desc[idx]];
+
+	return raw->field[pq16_idx_to_field[idx]];
+}
+
 static void pq_set_src(struct ioat_raw_descriptor *descs[2],
 		       dma_addr_t addr, u32 offset, u8 coef, int idx)
 {
@@ -111,6 +135,167 @@
 	pq->coef[idx] = coef;
 }
 
+static int sed_get_pq16_pool_idx(int src_cnt)
+{
+
+	return pq16_idx_to_sed[src_cnt];
+}
+
+static bool is_jf_ioat(struct pci_dev *pdev)
+{
+	switch (pdev->device) {
+	case PCI_DEVICE_ID_INTEL_IOAT_JSF0:
+	case PCI_DEVICE_ID_INTEL_IOAT_JSF1:
+	case PCI_DEVICE_ID_INTEL_IOAT_JSF2:
+	case PCI_DEVICE_ID_INTEL_IOAT_JSF3:
+	case PCI_DEVICE_ID_INTEL_IOAT_JSF4:
+	case PCI_DEVICE_ID_INTEL_IOAT_JSF5:
+	case PCI_DEVICE_ID_INTEL_IOAT_JSF6:
+	case PCI_DEVICE_ID_INTEL_IOAT_JSF7:
+	case PCI_DEVICE_ID_INTEL_IOAT_JSF8:
+	case PCI_DEVICE_ID_INTEL_IOAT_JSF9:
+		return true;
+	default:
+		return false;
+	}
+}
+
+static bool is_snb_ioat(struct pci_dev *pdev)
+{
+	switch (pdev->device) {
+	case PCI_DEVICE_ID_INTEL_IOAT_SNB0:
+	case PCI_DEVICE_ID_INTEL_IOAT_SNB1:
+	case PCI_DEVICE_ID_INTEL_IOAT_SNB2:
+	case PCI_DEVICE_ID_INTEL_IOAT_SNB3:
+	case PCI_DEVICE_ID_INTEL_IOAT_SNB4:
+	case PCI_DEVICE_ID_INTEL_IOAT_SNB5:
+	case PCI_DEVICE_ID_INTEL_IOAT_SNB6:
+	case PCI_DEVICE_ID_INTEL_IOAT_SNB7:
+	case PCI_DEVICE_ID_INTEL_IOAT_SNB8:
+	case PCI_DEVICE_ID_INTEL_IOAT_SNB9:
+		return true;
+	default:
+		return false;
+	}
+}
+
+static bool is_ivb_ioat(struct pci_dev *pdev)
+{
+	switch (pdev->device) {
+	case PCI_DEVICE_ID_INTEL_IOAT_IVB0:
+	case PCI_DEVICE_ID_INTEL_IOAT_IVB1:
+	case PCI_DEVICE_ID_INTEL_IOAT_IVB2:
+	case PCI_DEVICE_ID_INTEL_IOAT_IVB3:
+	case PCI_DEVICE_ID_INTEL_IOAT_IVB4:
+	case PCI_DEVICE_ID_INTEL_IOAT_IVB5:
+	case PCI_DEVICE_ID_INTEL_IOAT_IVB6:
+	case PCI_DEVICE_ID_INTEL_IOAT_IVB7:
+	case PCI_DEVICE_ID_INTEL_IOAT_IVB8:
+	case PCI_DEVICE_ID_INTEL_IOAT_IVB9:
+		return true;
+	default:
+		return false;
+	}
+
+}
+
+static bool is_hsw_ioat(struct pci_dev *pdev)
+{
+	switch (pdev->device) {
+	case PCI_DEVICE_ID_INTEL_IOAT_HSW0:
+	case PCI_DEVICE_ID_INTEL_IOAT_HSW1:
+	case PCI_DEVICE_ID_INTEL_IOAT_HSW2:
+	case PCI_DEVICE_ID_INTEL_IOAT_HSW3:
+	case PCI_DEVICE_ID_INTEL_IOAT_HSW4:
+	case PCI_DEVICE_ID_INTEL_IOAT_HSW5:
+	case PCI_DEVICE_ID_INTEL_IOAT_HSW6:
+	case PCI_DEVICE_ID_INTEL_IOAT_HSW7:
+	case PCI_DEVICE_ID_INTEL_IOAT_HSW8:
+	case PCI_DEVICE_ID_INTEL_IOAT_HSW9:
+		return true;
+	default:
+		return false;
+	}
+
+}
+
+static bool is_xeon_cb32(struct pci_dev *pdev)
+{
+	return is_jf_ioat(pdev) || is_snb_ioat(pdev) || is_ivb_ioat(pdev) ||
+		is_hsw_ioat(pdev);
+}
+
+static bool is_bwd_ioat(struct pci_dev *pdev)
+{
+	switch (pdev->device) {
+	case PCI_DEVICE_ID_INTEL_IOAT_BWD0:
+	case PCI_DEVICE_ID_INTEL_IOAT_BWD1:
+	case PCI_DEVICE_ID_INTEL_IOAT_BWD2:
+	case PCI_DEVICE_ID_INTEL_IOAT_BWD3:
+		return true;
+	default:
+		return false;
+	}
+}
+
+static bool is_bwd_noraid(struct pci_dev *pdev)
+{
+	switch (pdev->device) {
+	case PCI_DEVICE_ID_INTEL_IOAT_BWD2:
+	case PCI_DEVICE_ID_INTEL_IOAT_BWD3:
+		return true;
+	default:
+		return false;
+	}
+
+}
+
+static void pq16_set_src(struct ioat_raw_descriptor *desc[3],
+			dma_addr_t addr, u32 offset, u8 coef, int idx)
+{
+	struct ioat_pq_descriptor *pq = (struct ioat_pq_descriptor *)desc[0];
+	struct ioat_pq16a_descriptor *pq16 =
+		(struct ioat_pq16a_descriptor *)desc[1];
+	struct ioat_raw_descriptor *raw = desc[pq16_idx_to_desc[idx]];
+
+	raw->field[pq16_idx_to_field[idx]] = addr + offset;
+
+	if (idx < 8)
+		pq->coef[idx] = coef;
+	else
+		pq16->coef[idx - 8] = coef;
+}
+
+static struct ioat_sed_ent *
+ioat3_alloc_sed(struct ioatdma_device *device, unsigned int hw_pool)
+{
+	struct ioat_sed_ent *sed;
+	gfp_t flags = __GFP_ZERO | GFP_ATOMIC;
+
+	sed = kmem_cache_alloc(device->sed_pool, flags);
+	if (!sed)
+		return NULL;
+
+	sed->hw_pool = hw_pool;
+	sed->hw = dma_pool_alloc(device->sed_hw_pool[hw_pool],
+				 flags, &sed->dma);
+	if (!sed->hw) {
+		kmem_cache_free(device->sed_pool, sed);
+		return NULL;
+	}
+
+	return sed;
+}
+
+static void ioat3_free_sed(struct ioatdma_device *device, struct ioat_sed_ent *sed)
+{
+	if (!sed)
+		return;
+
+	dma_pool_free(device->sed_hw_pool[sed->hw_pool], sed->hw, sed->dma);
+	kmem_cache_free(device->sed_pool, sed);
+}
+
 static void ioat3_dma_unmap(struct ioat2_dma_chan *ioat,
 			    struct ioat_ring_ent *desc, int idx)
 {
@@ -223,6 +408,54 @@
 		}
 		break;
 	}
+	case IOAT_OP_PQ_16S:
+	case IOAT_OP_PQ_VAL_16S: {
+		struct ioat_pq_descriptor *pq = desc->pq;
+		int src_cnt = src16_cnt_to_sw(pq->ctl_f.src_cnt);
+		struct ioat_raw_descriptor *descs[4];
+		int i;
+
+		/* in the 'continue' case don't unmap the dests as sources */
+		if (dmaf_p_disabled_continue(flags))
+			src_cnt--;
+		else if (dmaf_continue(flags))
+			src_cnt -= 3;
+
+		if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
+			descs[0] = (struct ioat_raw_descriptor *)pq;
+			descs[1] = (struct ioat_raw_descriptor *)(desc->sed->hw);
+			descs[2] = (struct ioat_raw_descriptor *)(&desc->sed->hw->b[0]);
+			for (i = 0; i < src_cnt; i++) {
+				dma_addr_t src = pq16_get_src(descs, i);
+
+				ioat_unmap(pdev, src - offset, len,
+					   PCI_DMA_TODEVICE, flags, 0);
+			}
+
+			/* the dests are sources in pq validate operations */
+			if (pq->ctl_f.op == IOAT_OP_XOR_VAL) {
+				if (!(flags & DMA_PREP_PQ_DISABLE_P))
+					ioat_unmap(pdev, pq->p_addr - offset,
+						   len, PCI_DMA_TODEVICE,
+						   flags, 0);
+				if (!(flags & DMA_PREP_PQ_DISABLE_Q))
+					ioat_unmap(pdev, pq->q_addr - offset,
+						   len, PCI_DMA_TODEVICE,
+						   flags, 0);
+				break;
+			}
+		}
+
+		if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
+			if (!(flags & DMA_PREP_PQ_DISABLE_P))
+				ioat_unmap(pdev, pq->p_addr - offset, len,
+					   PCI_DMA_BIDIRECTIONAL, flags, 1);
+			if (!(flags & DMA_PREP_PQ_DISABLE_Q))
+				ioat_unmap(pdev, pq->q_addr - offset, len,
+					   PCI_DMA_BIDIRECTIONAL, flags, 1);
+		}
+		break;
+	}
 	default:
 		dev_err(&pdev->dev, "%s: unknown op type: %#x\n",
 			__func__, desc->hw->ctl_f.op);
@@ -250,6 +483,63 @@
 	return false;
 }
 
+static u64 ioat3_get_current_completion(struct ioat_chan_common *chan)
+{
+	u64 phys_complete;
+	u64 completion;
+
+	completion = *chan->completion;
+	phys_complete = ioat_chansts_to_addr(completion);
+
+	dev_dbg(to_dev(chan), "%s: phys_complete: %#llx\n", __func__,
+		(unsigned long long) phys_complete);
+
+	return phys_complete;
+}
+
+static bool ioat3_cleanup_preamble(struct ioat_chan_common *chan,
+				   u64 *phys_complete)
+{
+	*phys_complete = ioat3_get_current_completion(chan);
+	if (*phys_complete == chan->last_completion)
+		return false;
+
+	clear_bit(IOAT_COMPLETION_ACK, &chan->state);
+	mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
+
+	return true;
+}
+
+static void
+desc_get_errstat(struct ioat2_dma_chan *ioat, struct ioat_ring_ent *desc)
+{
+	struct ioat_dma_descriptor *hw = desc->hw;
+
+	switch (hw->ctl_f.op) {
+	case IOAT_OP_PQ_VAL:
+	case IOAT_OP_PQ_VAL_16S:
+	{
+		struct ioat_pq_descriptor *pq = desc->pq;
+
+		/* check if there's error written */
+		if (!pq->dwbes_f.wbes)
+			return;
+
+		/* need to set a chanerr var for checking to clear later */
+
+		if (pq->dwbes_f.p_val_err)
+			*desc->result |= SUM_CHECK_P_RESULT;
+
+		if (pq->dwbes_f.q_val_err)
+			*desc->result |= SUM_CHECK_Q_RESULT;
+
+		return;
+	}
+	default:
+		return;
+	}
+}
+
 /**
  * __cleanup - reclaim used descriptors
  * @ioat: channel (ring) to clean
@@ -260,6 +550,7 @@
 static void __cleanup(struct ioat2_dma_chan *ioat, dma_addr_t phys_complete)
 {
 	struct ioat_chan_common *chan = &ioat->base;
+	struct ioatdma_device *device = chan->device;
 	struct ioat_ring_ent *desc;
 	bool seen_current = false;
 	int idx = ioat->tail, i;
@@ -268,6 +559,16 @@
 	dev_dbg(to_dev(chan), "%s: head: %#x tail: %#x issued: %#x\n",
 		__func__, ioat->head, ioat->tail, ioat->issued);
 
+	/*
+	 * At restart of the channel, the completion address and the
+	 * channel status will be 0 due to starting a new chain. Since
+	 * it's new chain and the first descriptor "fails", there is
+	 * nothing to clean up. We do not want to reap the entire submitted
+	 * chain due to this 0 address value and then BUG.
+	 */
+	if (!phys_complete)
+		return;
+
 	active = ioat2_ring_active(ioat);
 	for (i = 0; i < active && !seen_current; i++) {
 		struct dma_async_tx_descriptor *tx;
@@ -276,6 +577,11 @@
 		prefetch(ioat2_get_ring_ent(ioat, idx + i + 1));
 		desc = ioat2_get_ring_ent(ioat, idx + i);
 		dump_desc_dbg(ioat, desc);
+
+		/* set err stat if we are using dwbes */
+		if (device->cap & IOAT_CAP_DWBES)
+			desc_get_errstat(ioat, desc);
+
 		tx = &desc->txd;
 		if (tx->cookie) {
 			dma_cookie_complete(tx);
@@ -294,6 +600,12 @@
 			BUG_ON(i + 1 >= active);
 			i++;
 		}
+
+		/* cleanup super extended descriptors */
+		if (desc->sed) {
+			ioat3_free_sed(device, desc->sed);
+			desc->sed = NULL;
+		}
 	}
 	smp_mb(); /* finish all descriptor reads before incrementing tail */
 	ioat->tail = idx + i;
@@ -314,11 +626,22 @@
 static void ioat3_cleanup(struct ioat2_dma_chan *ioat)
 {
 	struct ioat_chan_common *chan = &ioat->base;
-	dma_addr_t phys_complete;
+	u64 phys_complete;
 
 	spin_lock_bh(&chan->cleanup_lock);
-	if (ioat_cleanup_preamble(chan, &phys_complete))
+
+	if (ioat3_cleanup_preamble(chan, &phys_complete))
 		__cleanup(ioat, phys_complete);
+
+	if (is_ioat_halted(*chan->completion)) {
+		u32 chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
+
+		if (chanerr & IOAT_CHANERR_HANDLE_MASK) {
+			mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT);
+			ioat3_eh(ioat);
+		}
+	}
+
 	spin_unlock_bh(&chan->cleanup_lock);
 }
 
@@ -333,15 +656,78 @@
 static void ioat3_restart_channel(struct ioat2_dma_chan *ioat)
 {
 	struct ioat_chan_common *chan = &ioat->base;
-	dma_addr_t phys_complete;
+	u64 phys_complete;
 
 	ioat2_quiesce(chan, 0);
-	if (ioat_cleanup_preamble(chan, &phys_complete))
+	if (ioat3_cleanup_preamble(chan, &phys_complete))
 		__cleanup(ioat, phys_complete);
 
 	__ioat2_restart_chan(ioat);
 }
 
+static void ioat3_eh(struct ioat2_dma_chan *ioat)
+{
+	struct ioat_chan_common *chan = &ioat->base;
+	struct pci_dev *pdev = to_pdev(chan);
+	struct ioat_dma_descriptor *hw;
+	u64 phys_complete;
+	struct ioat_ring_ent *desc;
+	u32 err_handled = 0;
+	u32 chanerr_int;
+	u32 chanerr;
+
+	/* cleanup so tail points to descriptor that caused the error */
+	if (ioat3_cleanup_preamble(chan, &phys_complete))
+		__cleanup(ioat, phys_complete);
+
+	chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
+	pci_read_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, &chanerr_int);
+
+	dev_dbg(to_dev(chan), "%s: error = %x:%x\n",
+		__func__, chanerr, chanerr_int);
+
+	desc = ioat2_get_ring_ent(ioat, ioat->tail);
+	hw = desc->hw;
+	dump_desc_dbg(ioat, desc);
+
+	switch (hw->ctl_f.op) {
+	case IOAT_OP_XOR_VAL:
+		if (chanerr & IOAT_CHANERR_XOR_P_OR_CRC_ERR) {
+			*desc->result |= SUM_CHECK_P_RESULT;
+			err_handled |= IOAT_CHANERR_XOR_P_OR_CRC_ERR;
+		}
+		break;
+	case IOAT_OP_PQ_VAL:
+	case IOAT_OP_PQ_VAL_16S:
+		if (chanerr & IOAT_CHANERR_XOR_P_OR_CRC_ERR) {
+			*desc->result |= SUM_CHECK_P_RESULT;
+			err_handled |= IOAT_CHANERR_XOR_P_OR_CRC_ERR;
+		}
+		if (chanerr & IOAT_CHANERR_XOR_Q_ERR) {
+			*desc->result |= SUM_CHECK_Q_RESULT;
+			err_handled |= IOAT_CHANERR_XOR_Q_ERR;
+		}
+		break;
+	}
+
+	/* fault on unhandled error or spurious halt */
+	if (chanerr ^ err_handled || chanerr == 0) {
+		dev_err(to_dev(chan), "%s: fatal error (%x:%x)\n",
+			__func__, chanerr, err_handled);
+		BUG();
+	}
+
+	writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET);
+	pci_write_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, chanerr_int);
+
+	/* mark faulting descriptor as complete */
+	*chan->completion = desc->txd.phys;
+
+	spin_lock_bh(&ioat->prep_lock);
+	ioat3_restart_channel(ioat);
+	spin_unlock_bh(&ioat->prep_lock);
+}
+
 static void check_active(struct ioat2_dma_chan *ioat)
 {
 	struct ioat_chan_common *chan = &ioat->base;
@@ -605,7 +991,8 @@
 	int i;
 
 	dev_dbg(dev, "desc[%d]: (%#llx->%#llx) flags: %#x"
-		" sz: %#x ctl: %#x (op: %d int: %d compl: %d pq: '%s%s' src_cnt: %d)\n",
+		" sz: %#10.8x ctl: %#x (op: %#x int: %d compl: %d pq: '%s%s'"
+		" src_cnt: %d)\n",
 		desc_id(desc), (unsigned long long) desc->txd.phys,
 		(unsigned long long) (pq_ex ? pq_ex->next : pq->next),
 		desc->txd.flags, pq->size, pq->ctl, pq->ctl_f.op, pq->ctl_f.int_en,
@@ -617,6 +1004,42 @@
 			(unsigned long long) pq_get_src(descs, i), pq->coef[i]);
 	dev_dbg(dev, "\tP: %#llx\n", pq->p_addr);
 	dev_dbg(dev, "\tQ: %#llx\n", pq->q_addr);
+	dev_dbg(dev, "\tNEXT: %#llx\n", pq->next);
+}
+
+static void dump_pq16_desc_dbg(struct ioat2_dma_chan *ioat,
+			       struct ioat_ring_ent *desc)
+{
+	struct device *dev = to_dev(&ioat->base);
+	struct ioat_pq_descriptor *pq = desc->pq;
+	struct ioat_raw_descriptor *descs[] = { (void *)pq,
+						(void *)pq,
+						(void *)pq };
+	int src_cnt = src16_cnt_to_sw(pq->ctl_f.src_cnt);
+	int i;
+
+	if (desc->sed) {
+		descs[1] = (void *)desc->sed->hw;
+		descs[2] = (void *)desc->sed->hw + 64;
+	}
+
+	dev_dbg(dev, "desc[%d]: (%#llx->%#llx) flags: %#x"
+		" sz: %#x ctl: %#x (op: %#x int: %d compl: %d pq: '%s%s'"
+		" src_cnt: %d)\n",
+		desc_id(desc), (unsigned long long) desc->txd.phys,
+		(unsigned long long) pq->next,
+		desc->txd.flags, pq->size, pq->ctl,
+		pq->ctl_f.op, pq->ctl_f.int_en,
+		pq->ctl_f.compl_write,
+		pq->ctl_f.p_disable ? "" : "p", pq->ctl_f.q_disable ? "" : "q",
+		pq->ctl_f.src_cnt);
+	for (i = 0; i < src_cnt; i++) {
+		dev_dbg(dev, "\tsrc[%d]: %#llx coef: %#x\n", i,
+			(unsigned long long) pq16_get_src(descs, i),
+			pq->coef[i]);
+	}
+	dev_dbg(dev, "\tP: %#llx\n", pq->p_addr);
+	dev_dbg(dev, "\tQ: %#llx\n", pq->q_addr);
 }
 
 static struct dma_async_tx_descriptor *
@@ -627,6 +1050,7 @@
 {
 	struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
 	struct ioat_chan_common *chan = &ioat->base;
+	struct ioatdma_device *device = chan->device;
 	struct ioat_ring_ent *compl_desc;
 	struct ioat_ring_ent *desc;
 	struct ioat_ring_ent *ext;
@@ -637,6 +1061,7 @@
 	u32 offset = 0;
 	u8 op = result ? IOAT_OP_PQ_VAL : IOAT_OP_PQ;
 	int i, s, idx, with_ext, num_descs;
+	int cb32 = (device->version < IOAT_VER_3_3) ? 1 : 0;
 
 	dev_dbg(to_dev(chan), "%s\n", __func__);
 	/* the engine requires at least two sources (we provide
@@ -662,7 +1087,7 @@
 	 * order.
 	 */
 	if (likely(num_descs) &&
-	    ioat2_check_space_lock(ioat, num_descs+1) == 0)
+	    ioat2_check_space_lock(ioat, num_descs + cb32) == 0)
 		idx = ioat->head;
 	else
 		return NULL;
@@ -700,6 +1125,9 @@
 		pq->q_addr = dst[1] + offset;
 		pq->ctl = 0;
 		pq->ctl_f.op = op;
+		/* we turn on descriptor write back error status */
+		if (device->cap & IOAT_CAP_DWBES)
+			pq->ctl_f.wb_en = result ? 1 : 0;
 		pq->ctl_f.src_cnt = src_cnt_to_hw(s);
 		pq->ctl_f.p_disable = !!(flags & DMA_PREP_PQ_DISABLE_P);
 		pq->ctl_f.q_disable = !!(flags & DMA_PREP_PQ_DISABLE_Q);
@@ -716,26 +1144,140 @@
 	pq->ctl_f.fence = !!(flags & DMA_PREP_FENCE);
 	dump_pq_desc_dbg(ioat, desc, ext);
 
-	/* completion descriptor carries interrupt bit */
-	compl_desc = ioat2_get_ring_ent(ioat, idx + i);
-	compl_desc->txd.flags = flags & DMA_PREP_INTERRUPT;
-	hw = compl_desc->hw;
-	hw->ctl = 0;
-	hw->ctl_f.null = 1;
-	hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT);
-	hw->ctl_f.compl_write = 1;
-	hw->size = NULL_DESC_BUFFER_SIZE;
-	dump_desc_dbg(ioat, compl_desc);
+	if (!cb32) {
+		pq->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT);
+		pq->ctl_f.compl_write = 1;
+		compl_desc = desc;
+	} else {
+		/* completion descriptor carries interrupt bit */
+		compl_desc = ioat2_get_ring_ent(ioat, idx + i);
+		compl_desc->txd.flags = flags & DMA_PREP_INTERRUPT;
+		hw = compl_desc->hw;
+		hw->ctl = 0;
+		hw->ctl_f.null = 1;
+		hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT);
+		hw->ctl_f.compl_write = 1;
+		hw->size = NULL_DESC_BUFFER_SIZE;
+		dump_desc_dbg(ioat, compl_desc);
+	}
+
 
 	/* we leave the channel locked to ensure in order submission */
 	return &compl_desc->txd;
 }
 
 static struct dma_async_tx_descriptor *
+__ioat3_prep_pq16_lock(struct dma_chan *c, enum sum_check_flags *result,
+		       const dma_addr_t *dst, const dma_addr_t *src,
+		       unsigned int src_cnt, const unsigned char *scf,
+		       size_t len, unsigned long flags)
+{
+	struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
+	struct ioat_chan_common *chan = &ioat->base;
+	struct ioatdma_device *device = chan->device;
+	struct ioat_ring_ent *desc;
+	size_t total_len = len;
+	struct ioat_pq_descriptor *pq;
+	u32 offset = 0;
+	u8 op;
+	int i, s, idx, num_descs;
+
+	/* this function only handles src_cnt 9 - 16 */
+	BUG_ON(src_cnt < 9);
+
+	/* this function is only called with 9-16 sources */
+	op = result ? IOAT_OP_PQ_VAL_16S : IOAT_OP_PQ_16S;
+
+	dev_dbg(to_dev(chan), "%s\n", __func__);
+
+	num_descs = ioat2_xferlen_to_descs(ioat, len);
+
+	/*
+	 * 16 source pq is only available on cb3.3 and has no completion
+	 * write hw bug.
+	 */
+	if (num_descs && ioat2_check_space_lock(ioat, num_descs) == 0)
+		idx = ioat->head;
+	else
+		return NULL;
+
+	i = 0;
+
+	do {
+		struct ioat_raw_descriptor *descs[4];
+		size_t xfer_size = min_t(size_t, len, 1 << ioat->xfercap_log);
+
+		desc = ioat2_get_ring_ent(ioat, idx + i);
+		pq = desc->pq;
+
+		descs[0] = (struct ioat_raw_descriptor *) pq;
+
+		desc->sed = ioat3_alloc_sed(device,
+					    sed_get_pq16_pool_idx(src_cnt));
+		if (!desc->sed) {
+			dev_err(to_dev(chan),
+				"%s: no free sed entries\n", __func__);
+			return NULL;
+		}
+
+		pq->sed_addr = desc->sed->dma;
+		desc->sed->parent = desc;
+
+		descs[1] = (struct ioat_raw_descriptor *)desc->sed->hw;
+		descs[2] = (void *)descs[1] + 64;
+
+		for (s = 0; s < src_cnt; s++)
+			pq16_set_src(descs, src[s], offset, scf[s], s);
+
+		/* see the comment for dma_maxpq in include/linux/dmaengine.h */
+		if (dmaf_p_disabled_continue(flags))
+			pq16_set_src(descs, dst[1], offset, 1, s++);
+		else if (dmaf_continue(flags)) {
+			pq16_set_src(descs, dst[0], offset, 0, s++);
+			pq16_set_src(descs, dst[1], offset, 1, s++);
+			pq16_set_src(descs, dst[1], offset, 0, s++);
+		}
+
+		pq->size = xfer_size;
+		pq->p_addr = dst[0] + offset;
+		pq->q_addr = dst[1] + offset;
+		pq->ctl = 0;
+		pq->ctl_f.op = op;
+		pq->ctl_f.src_cnt = src16_cnt_to_hw(s);
+		/* we turn on descriptor write back error status */
+		if (device->cap & IOAT_CAP_DWBES)
+			pq->ctl_f.wb_en = result ? 1 : 0;
+		pq->ctl_f.p_disable = !!(flags & DMA_PREP_PQ_DISABLE_P);
+		pq->ctl_f.q_disable = !!(flags & DMA_PREP_PQ_DISABLE_Q);
+
+		len -= xfer_size;
+		offset += xfer_size;
+	} while (++i < num_descs);
+
+	/* last pq descriptor carries the unmap parameters and fence bit */
+	desc->txd.flags = flags;
+	desc->len = total_len;
+	if (result)
+		desc->result = result;
+	pq->ctl_f.fence = !!(flags & DMA_PREP_FENCE);
+
+	/* with cb3.3 we should be able to do completion w/o a null desc */
+	pq->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT);
+	pq->ctl_f.compl_write = 1;
+
+	dump_pq16_desc_dbg(ioat, desc);
+
+	/* we leave the channel locked to ensure in order submission */
+	return &desc->txd;
+}
+
+static struct dma_async_tx_descriptor *
 ioat3_prep_pq(struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src,
 	      unsigned int src_cnt, const unsigned char *scf, size_t len,
 	      unsigned long flags)
 {
+	struct dma_device *dma = chan->device;
+
 	/* specify valid address for disabled result */
 	if (flags & DMA_PREP_PQ_DISABLE_P)
 		dst[0] = dst[1];
@@ -755,11 +1297,20 @@
 		single_source_coef[0] = scf[0];
 		single_source_coef[1] = 0;
 
-		return __ioat3_prep_pq_lock(chan, NULL, dst, single_source, 2,
-					    single_source_coef, len, flags);
-	} else
-		return __ioat3_prep_pq_lock(chan, NULL, dst, src, src_cnt, scf,
-					    len, flags);
+		return (src_cnt > 8) && (dma->max_pq > 8) ?
+			__ioat3_prep_pq16_lock(chan, NULL, dst, single_source,
+					       2, single_source_coef, len,
+					       flags) :
+			__ioat3_prep_pq_lock(chan, NULL, dst, single_source, 2,
+					     single_source_coef, len, flags);
+
+	} else {
+		return (src_cnt > 8) && (dma->max_pq > 8) ?
+			__ioat3_prep_pq16_lock(chan, NULL, dst, src, src_cnt,
+					       scf, len, flags) :
+			__ioat3_prep_pq_lock(chan, NULL, dst, src, src_cnt,
+					     scf, len, flags);
+	}
 }
 
 struct dma_async_tx_descriptor *
@@ -767,6 +1318,8 @@
 		  unsigned int src_cnt, const unsigned char *scf, size_t len,
 		  enum sum_check_flags *pqres, unsigned long flags)
 {
+	struct dma_device *dma = chan->device;
+
 	/* specify valid address for disabled result */
 	if (flags & DMA_PREP_PQ_DISABLE_P)
 		pq[0] = pq[1];
@@ -778,14 +1331,18 @@
 	 */
 	*pqres = 0;
 
-	return __ioat3_prep_pq_lock(chan, pqres, pq, src, src_cnt, scf, len,
-				    flags);
+	return (src_cnt > 8) && (dma->max_pq > 8) ?
+		__ioat3_prep_pq16_lock(chan, pqres, pq, src, src_cnt, scf, len,
+				       flags) :
+		__ioat3_prep_pq_lock(chan, pqres, pq, src, src_cnt, scf, len,
+				     flags);
 }
 
 static struct dma_async_tx_descriptor *
 ioat3_prep_pqxor(struct dma_chan *chan, dma_addr_t dst, dma_addr_t *src,
 		 unsigned int src_cnt, size_t len, unsigned long flags)
 {
+	struct dma_device *dma = chan->device;
 	unsigned char scf[src_cnt];
 	dma_addr_t pq[2];
 
@@ -794,8 +1351,11 @@
 	flags |= DMA_PREP_PQ_DISABLE_Q;
 	pq[1] = dst; /* specify valid address for disabled result */
 
-	return __ioat3_prep_pq_lock(chan, NULL, pq, src, src_cnt, scf, len,
-				    flags);
+	return (src_cnt > 8) && (dma->max_pq > 8) ?
+		__ioat3_prep_pq16_lock(chan, NULL, pq, src, src_cnt, scf, len,
+				       flags) :
+		__ioat3_prep_pq_lock(chan, NULL, pq, src, src_cnt, scf, len,
+				     flags);
 }
 
 struct dma_async_tx_descriptor *
@@ -803,6 +1363,7 @@
 		     unsigned int src_cnt, size_t len,
 		     enum sum_check_flags *result, unsigned long flags)
 {
+	struct dma_device *dma = chan->device;
 	unsigned char scf[src_cnt];
 	dma_addr_t pq[2];
 
@@ -816,8 +1377,12 @@
 	flags |= DMA_PREP_PQ_DISABLE_Q;
 	pq[1] = pq[0]; /* specify valid address for disabled result */
 
-	return __ioat3_prep_pq_lock(chan, result, pq, &src[1], src_cnt - 1, scf,
-				    len, flags);
+
+	return (src_cnt > 8) && (dma->max_pq > 8) ?
+		__ioat3_prep_pq16_lock(chan, result, pq, &src[1], src_cnt - 1,
+				       scf, len, flags) :
+		__ioat3_prep_pq_lock(chan, result, pq, &src[1], src_cnt - 1,
+				     scf, len, flags);
 }
 
 static struct dma_async_tx_descriptor *
@@ -1167,6 +1732,56 @@
 	return 0;
 }
 
+static int ioat3_irq_reinit(struct ioatdma_device *device)
+{
+	int msixcnt = device->common.chancnt;
+	struct pci_dev *pdev = device->pdev;
+	int i;
+	struct msix_entry *msix;
+	struct ioat_chan_common *chan;
+	int err = 0;
+
+	switch (device->irq_mode) {
+	case IOAT_MSIX:
+
+		for (i = 0; i < msixcnt; i++) {
+			msix = &device->msix_entries[i];
+			chan = ioat_chan_by_index(device, i);
+			devm_free_irq(&pdev->dev, msix->vector, chan);
+		}
+
+		pci_disable_msix(pdev);
+		break;
+
+	case IOAT_MSIX_SINGLE:
+		msix = &device->msix_entries[0];
+		chan = ioat_chan_by_index(device, 0);
+		devm_free_irq(&pdev->dev, msix->vector, chan);
+		pci_disable_msix(pdev);
+		break;
+
+	case IOAT_MSI:
+		chan = ioat_chan_by_index(device, 0);
+		devm_free_irq(&pdev->dev, pdev->irq, chan);
+		pci_disable_msi(pdev);
+		break;
+
+	case IOAT_INTX:
+		chan = ioat_chan_by_index(device, 0);
+		devm_free_irq(&pdev->dev, pdev->irq, chan);
+		break;
+
+	default:
+		return 0;
+	}
+
+	device->irq_mode = IOAT_NOIRQ;
+
+	err = ioat_dma_setup_interrupts(device);
+
+	return err;
+}
+
 static int ioat3_reset_hw(struct ioat_chan_common *chan)
 {
 	/* throw away whatever the channel was doing and get it
@@ -1183,82 +1798,67 @@
 	chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
 	writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET);
 
-	/* clear any pending errors */
-	err = pci_read_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, &chanerr);
+	if (device->version < IOAT_VER_3_3) {
+		/* clear any pending errors */
+		err = pci_read_config_dword(pdev,
+				IOAT_PCI_CHANERR_INT_OFFSET, &chanerr);
+		if (err) {
+			dev_err(&pdev->dev,
+				"channel error register unreachable\n");
+			return err;
+		}
+		pci_write_config_dword(pdev,
+				IOAT_PCI_CHANERR_INT_OFFSET, chanerr);
+
+		/* Clear DMAUNCERRSTS Cfg-Reg Parity Error status bit
+		 * (workaround for spurious config parity error after restart)
+		 */
+		pci_read_config_word(pdev, IOAT_PCI_DEVICE_ID_OFFSET, &dev_id);
+		if (dev_id == PCI_DEVICE_ID_INTEL_IOAT_TBG0) {
+			pci_write_config_dword(pdev,
+					       IOAT_PCI_DMAUNCERRSTS_OFFSET,
+					       0x10);
+		}
+	}
+
+	err = ioat2_reset_sync(chan, msecs_to_jiffies(200));
 	if (err) {
-		dev_err(&pdev->dev, "channel error register unreachable\n");
+		dev_err(&pdev->dev, "Failed to reset!\n");
 		return err;
 	}
-	pci_write_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, chanerr);
 
-	/* Clear DMAUNCERRSTS Cfg-Reg Parity Error status bit
-	 * (workaround for spurious config parity error after restart)
+	if (device->irq_mode != IOAT_NOIRQ && is_bwd_ioat(pdev))
+		err = ioat3_irq_reinit(device);
+
+	return err;
+}
+
+static void ioat3_intr_quirk(struct ioatdma_device *device)
+{
+	struct dma_device *dma;
+	struct dma_chan *c;
+	struct ioat_chan_common *chan;
+	u32 errmask;
+
+	dma = &device->common;
+
+	/*
+	 * if we have descriptor write back error status, we mask the
+	 * error interrupts
 	 */
-	pci_read_config_word(pdev, IOAT_PCI_DEVICE_ID_OFFSET, &dev_id);
-	if (dev_id == PCI_DEVICE_ID_INTEL_IOAT_TBG0)
-		pci_write_config_dword(pdev, IOAT_PCI_DMAUNCERRSTS_OFFSET, 0x10);
-
-	return ioat2_reset_sync(chan, msecs_to_jiffies(200));
-}
-
-static bool is_jf_ioat(struct pci_dev *pdev)
-{
-	switch (pdev->device) {
-	case PCI_DEVICE_ID_INTEL_IOAT_JSF0:
-	case PCI_DEVICE_ID_INTEL_IOAT_JSF1:
-	case PCI_DEVICE_ID_INTEL_IOAT_JSF2:
-	case PCI_DEVICE_ID_INTEL_IOAT_JSF3:
-	case PCI_DEVICE_ID_INTEL_IOAT_JSF4:
-	case PCI_DEVICE_ID_INTEL_IOAT_JSF5:
-	case PCI_DEVICE_ID_INTEL_IOAT_JSF6:
-	case PCI_DEVICE_ID_INTEL_IOAT_JSF7:
-	case PCI_DEVICE_ID_INTEL_IOAT_JSF8:
-	case PCI_DEVICE_ID_INTEL_IOAT_JSF9:
-		return true;
-	default:
-		return false;
+	if (device->cap & IOAT_CAP_DWBES) {
+		list_for_each_entry(c, &dma->channels, device_node) {
+			chan = to_chan_common(c);
+			errmask = readl(chan->reg_base +
+					IOAT_CHANERR_MASK_OFFSET);
+			errmask |= IOAT_CHANERR_XOR_P_OR_CRC_ERR |
+				   IOAT_CHANERR_XOR_Q_ERR;
+			writel(errmask, chan->reg_base +
+					IOAT_CHANERR_MASK_OFFSET);
+		}
 	}
 }
 
-static bool is_snb_ioat(struct pci_dev *pdev)
-{
-	switch (pdev->device) {
-	case PCI_DEVICE_ID_INTEL_IOAT_SNB0:
-	case PCI_DEVICE_ID_INTEL_IOAT_SNB1:
-	case PCI_DEVICE_ID_INTEL_IOAT_SNB2:
-	case PCI_DEVICE_ID_INTEL_IOAT_SNB3:
-	case PCI_DEVICE_ID_INTEL_IOAT_SNB4:
-	case PCI_DEVICE_ID_INTEL_IOAT_SNB5:
-	case PCI_DEVICE_ID_INTEL_IOAT_SNB6:
-	case PCI_DEVICE_ID_INTEL_IOAT_SNB7:
-	case PCI_DEVICE_ID_INTEL_IOAT_SNB8:
-	case PCI_DEVICE_ID_INTEL_IOAT_SNB9:
-		return true;
-	default:
-		return false;
-	}
-}
-
-static bool is_ivb_ioat(struct pci_dev *pdev)
-{
-	switch (pdev->device) {
-	case PCI_DEVICE_ID_INTEL_IOAT_IVB0:
-	case PCI_DEVICE_ID_INTEL_IOAT_IVB1:
-	case PCI_DEVICE_ID_INTEL_IOAT_IVB2:
-	case PCI_DEVICE_ID_INTEL_IOAT_IVB3:
-	case PCI_DEVICE_ID_INTEL_IOAT_IVB4:
-	case PCI_DEVICE_ID_INTEL_IOAT_IVB5:
-	case PCI_DEVICE_ID_INTEL_IOAT_IVB6:
-	case PCI_DEVICE_ID_INTEL_IOAT_IVB7:
-	case PCI_DEVICE_ID_INTEL_IOAT_IVB8:
-	case PCI_DEVICE_ID_INTEL_IOAT_IVB9:
-		return true;
-	default:
-		return false;
-	}
-
-}
-
 int ioat3_dma_probe(struct ioatdma_device *device, int dca)
 {
 	struct pci_dev *pdev = device->pdev;
@@ -1268,30 +1868,33 @@
 	struct ioat_chan_common *chan;
 	bool is_raid_device = false;
 	int err;
-	u32 cap;
 
 	device->enumerate_channels = ioat2_enumerate_channels;
 	device->reset_hw = ioat3_reset_hw;
 	device->self_test = ioat3_dma_self_test;
+	device->intr_quirk = ioat3_intr_quirk;
 	dma = &device->common;
 	dma->device_prep_dma_memcpy = ioat2_dma_prep_memcpy_lock;
 	dma->device_issue_pending = ioat2_issue_pending;
 	dma->device_alloc_chan_resources = ioat2_alloc_chan_resources;
 	dma->device_free_chan_resources = ioat2_free_chan_resources;
 
-	if (is_jf_ioat(pdev) || is_snb_ioat(pdev) || is_ivb_ioat(pdev))
+	if (is_xeon_cb32(pdev))
 		dma->copy_align = 6;
 
 	dma_cap_set(DMA_INTERRUPT, dma->cap_mask);
 	dma->device_prep_dma_interrupt = ioat3_prep_interrupt_lock;
 
-	cap = readl(device->reg_base + IOAT_DMA_CAP_OFFSET);
+	device->cap = readl(device->reg_base + IOAT_DMA_CAP_OFFSET);
+
+	if (is_bwd_noraid(pdev))
+		device->cap &= ~(IOAT_CAP_XOR | IOAT_CAP_PQ | IOAT_CAP_RAID16SS);
 
 	/* dca is incompatible with raid operations */
-	if (dca_en && (cap & (IOAT_CAP_XOR|IOAT_CAP_PQ)))
-		cap &= ~(IOAT_CAP_XOR|IOAT_CAP_PQ);
+	if (dca_en && (device->cap & (IOAT_CAP_XOR|IOAT_CAP_PQ)))
+		device->cap &= ~(IOAT_CAP_XOR|IOAT_CAP_PQ);
 
-	if (cap & IOAT_CAP_XOR) {
+	if (device->cap & IOAT_CAP_XOR) {
 		is_raid_device = true;
 		dma->max_xor = 8;
 		dma->xor_align = 6;
@@ -1302,53 +1905,86 @@
 		dma_cap_set(DMA_XOR_VAL, dma->cap_mask);
 		dma->device_prep_dma_xor_val = ioat3_prep_xor_val;
 	}
-	if (cap & IOAT_CAP_PQ) {
+
+	if (device->cap & IOAT_CAP_PQ) {
 		is_raid_device = true;
-		dma_set_maxpq(dma, 8, 0);
-		dma->pq_align = 6;
 
-		dma_cap_set(DMA_PQ, dma->cap_mask);
 		dma->device_prep_dma_pq = ioat3_prep_pq;
-
-		dma_cap_set(DMA_PQ_VAL, dma->cap_mask);
 		dma->device_prep_dma_pq_val = ioat3_prep_pq_val;
+		dma_cap_set(DMA_PQ, dma->cap_mask);
+		dma_cap_set(DMA_PQ_VAL, dma->cap_mask);
 
-		if (!(cap & IOAT_CAP_XOR)) {
-			dma->max_xor = 8;
-			dma->xor_align = 6;
+		if (device->cap & IOAT_CAP_RAID16SS) {
+			dma_set_maxpq(dma, 16, 0);
+			dma->pq_align = 0;
+		} else {
+			dma_set_maxpq(dma, 8, 0);
+			if (is_xeon_cb32(pdev))
+				dma->pq_align = 6;
+			else
+				dma->pq_align = 0;
+		}
 
-			dma_cap_set(DMA_XOR, dma->cap_mask);
+		if (!(device->cap & IOAT_CAP_XOR)) {
 			dma->device_prep_dma_xor = ioat3_prep_pqxor;
-
-			dma_cap_set(DMA_XOR_VAL, dma->cap_mask);
 			dma->device_prep_dma_xor_val = ioat3_prep_pqxor_val;
+			dma_cap_set(DMA_XOR, dma->cap_mask);
+			dma_cap_set(DMA_XOR_VAL, dma->cap_mask);
+
+			if (device->cap & IOAT_CAP_RAID16SS) {
+				dma->max_xor = 16;
+				dma->xor_align = 0;
+			} else {
+				dma->max_xor = 8;
+				if (is_xeon_cb32(pdev))
+					dma->xor_align = 6;
+				else
+					dma->xor_align = 0;
+			}
 		}
 	}
-	if (is_raid_device && (cap & IOAT_CAP_FILL_BLOCK)) {
+
+	if (is_raid_device && (device->cap & IOAT_CAP_FILL_BLOCK)) {
 		dma_cap_set(DMA_MEMSET, dma->cap_mask);
 		dma->device_prep_dma_memset = ioat3_prep_memset_lock;
 	}
 
 
-	if (is_raid_device) {
-		dma->device_tx_status = ioat3_tx_status;
-		device->cleanup_fn = ioat3_cleanup_event;
-		device->timer_fn = ioat3_timer_event;
-	} else {
-		dma->device_tx_status = ioat_dma_tx_status;
-		device->cleanup_fn = ioat2_cleanup_event;
-		device->timer_fn = ioat2_timer_event;
+	dma->device_tx_status = ioat3_tx_status;
+	device->cleanup_fn = ioat3_cleanup_event;
+	device->timer_fn = ioat3_timer_event;
+
+	if (is_xeon_cb32(pdev)) {
+		dma_cap_clear(DMA_XOR_VAL, dma->cap_mask);
+		dma->device_prep_dma_xor_val = NULL;
+
+		dma_cap_clear(DMA_PQ_VAL, dma->cap_mask);
+		dma->device_prep_dma_pq_val = NULL;
 	}
 
-	#ifdef CONFIG_ASYNC_TX_DISABLE_PQ_VAL_DMA
-	dma_cap_clear(DMA_PQ_VAL, dma->cap_mask);
-	dma->device_prep_dma_pq_val = NULL;
-	#endif
+	/* starting with CB3.3 super extended descriptors are supported */
+	if (device->cap & IOAT_CAP_RAID16SS) {
+		char pool_name[14];
+		int i;
 
-	#ifdef CONFIG_ASYNC_TX_DISABLE_XOR_VAL_DMA
-	dma_cap_clear(DMA_XOR_VAL, dma->cap_mask);
-	dma->device_prep_dma_xor_val = NULL;
-	#endif
+		/* allocate sw descriptor pool for SED */
+		device->sed_pool = kmem_cache_create("ioat_sed",
+				sizeof(struct ioat_sed_ent), 0, 0, NULL);
+		if (!device->sed_pool)
+			return -ENOMEM;
+
+		for (i = 0; i < MAX_SED_POOLS; i++) {
+			snprintf(pool_name, 14, "ioat_hw%d_sed", i);
+
+			/* allocate SED DMA pool */
+			device->sed_hw_pool[i] = dma_pool_create(pool_name,
+					&pdev->dev,
+					SED_SIZE * (i + 1), 64, 0);
+			if (!device->sed_hw_pool[i])
+				goto sed_pool_cleanup;
+
+		}
+	}
 
 	err = ioat_probe(device);
 	if (err)
@@ -1371,4 +2007,28 @@
 		device->dca = ioat3_dca_init(pdev, device->reg_base);
 
 	return 0;
+
+sed_pool_cleanup:
+	if (device->sed_pool) {
+		int i;
+		kmem_cache_destroy(device->sed_pool);
+
+		for (i = 0; i < MAX_SED_POOLS; i++)
+			if (device->sed_hw_pool[i])
+				dma_pool_destroy(device->sed_hw_pool[i]);
+	}
+
+	return -ENOMEM;
+}
+
+void ioat3_dma_remove(struct ioatdma_device *device)
+{
+	if (device->sed_pool) {
+		int i;
+		kmem_cache_destroy(device->sed_pool);
+
+		for (i = 0; i < MAX_SED_POOLS; i++)
+			if (device->sed_hw_pool[i])
+				dma_pool_destroy(device->sed_hw_pool[i]);
+	}
 }
diff --git a/drivers/dma/ioat/hw.h b/drivers/dma/ioat/hw.h
index 7cb74c6..5ee57d4 100644
--- a/drivers/dma/ioat/hw.h
+++ b/drivers/dma/ioat/hw.h
@@ -30,11 +30,6 @@
 #define IOAT_PCI_DID_SCNB       0x65FF
 #define IOAT_PCI_DID_SNB        0x402F
 
-#define IOAT_VER_1_2            0x12    /* Version 1.2 */
-#define IOAT_VER_2_0            0x20    /* Version 2.0 */
-#define IOAT_VER_3_0            0x30    /* Version 3.0 */
-#define IOAT_VER_3_2            0x32    /* Version 3.2 */
-
 #define PCI_DEVICE_ID_INTEL_IOAT_IVB0	0x0e20
 #define PCI_DEVICE_ID_INTEL_IOAT_IVB1	0x0e21
 #define PCI_DEVICE_ID_INTEL_IOAT_IVB2	0x0e22
@@ -46,6 +41,29 @@
 #define PCI_DEVICE_ID_INTEL_IOAT_IVB8	0x0e2e
 #define PCI_DEVICE_ID_INTEL_IOAT_IVB9	0x0e2f
 
+#define PCI_DEVICE_ID_INTEL_IOAT_HSW0	0x2f20
+#define PCI_DEVICE_ID_INTEL_IOAT_HSW1	0x2f21
+#define PCI_DEVICE_ID_INTEL_IOAT_HSW2	0x2f22
+#define PCI_DEVICE_ID_INTEL_IOAT_HSW3	0x2f23
+#define PCI_DEVICE_ID_INTEL_IOAT_HSW4	0x2f24
+#define PCI_DEVICE_ID_INTEL_IOAT_HSW5	0x2f25
+#define PCI_DEVICE_ID_INTEL_IOAT_HSW6	0x2f26
+#define PCI_DEVICE_ID_INTEL_IOAT_HSW7	0x2f27
+#define PCI_DEVICE_ID_INTEL_IOAT_HSW8	0x2f2e
+#define PCI_DEVICE_ID_INTEL_IOAT_HSW9	0x2f2f
+
+#define PCI_DEVICE_ID_INTEL_IOAT_BWD0	0x0C50
+#define PCI_DEVICE_ID_INTEL_IOAT_BWD1	0x0C51
+#define PCI_DEVICE_ID_INTEL_IOAT_BWD2	0x0C52
+#define PCI_DEVICE_ID_INTEL_IOAT_BWD3	0x0C53
+
+#define IOAT_VER_1_2            0x12    /* Version 1.2 */
+#define IOAT_VER_2_0            0x20    /* Version 2.0 */
+#define IOAT_VER_3_0            0x30    /* Version 3.0 */
+#define IOAT_VER_3_2            0x32    /* Version 3.2 */
+#define IOAT_VER_3_3            0x33    /* Version 3.3 */
+
+
 int system_has_dca_enabled(struct pci_dev *pdev);
 
 struct ioat_dma_descriptor {
@@ -147,7 +165,17 @@
 };
 
 struct ioat_pq_descriptor {
-	uint32_t	size;
+	union {
+		uint32_t	size;
+		uint32_t	dwbes;
+		struct {
+			unsigned int rsvd:25;
+			unsigned int p_val_err:1;
+			unsigned int q_val_err:1;
+			unsigned int rsvd1:4;
+			unsigned int wbes:1;
+		} dwbes_f;
+	};
 	union {
 		uint32_t ctl;
 		struct {
@@ -162,9 +190,14 @@
 			unsigned int hint:1;
 			unsigned int p_disable:1;
 			unsigned int q_disable:1;
-			unsigned int rsvd:11;
+			unsigned int rsvd2:2;
+			unsigned int wb_en:1;
+			unsigned int prl_en:1;
+			unsigned int rsvd3:7;
 			#define IOAT_OP_PQ 0x89
 			#define IOAT_OP_PQ_VAL 0x8a
+			#define IOAT_OP_PQ_16S 0xa0
+			#define IOAT_OP_PQ_VAL_16S 0xa1
 			unsigned int op:8;
 		} ctl_f;
 	};
@@ -172,7 +205,10 @@
 	uint64_t	p_addr;
 	uint64_t	next;
 	uint64_t	src_addr2;
-	uint64_t	src_addr3;
+	union {
+		uint64_t	src_addr3;
+		uint64_t	sed_addr;
+	};
 	uint8_t		coef[8];
 	uint64_t	q_addr;
 };
@@ -221,4 +257,40 @@
 struct ioat_raw_descriptor {
 	uint64_t	field[8];
 };
+
+struct ioat_pq16a_descriptor {
+	uint8_t coef[8];
+	uint64_t src_addr3;
+	uint64_t src_addr4;
+	uint64_t src_addr5;
+	uint64_t src_addr6;
+	uint64_t src_addr7;
+	uint64_t src_addr8;
+	uint64_t src_addr9;
+};
+
+struct ioat_pq16b_descriptor {
+	uint64_t src_addr10;
+	uint64_t src_addr11;
+	uint64_t src_addr12;
+	uint64_t src_addr13;
+	uint64_t src_addr14;
+	uint64_t src_addr15;
+	uint64_t src_addr16;
+	uint64_t rsvd;
+};
+
+union ioat_sed_pq_descriptor {
+	struct ioat_pq16a_descriptor a;
+	struct ioat_pq16b_descriptor b;
+};
+
+#define SED_SIZE	64
+
+struct ioat_sed_raw_descriptor {
+	uint64_t	a[8];
+	uint64_t	b[8];
+	uint64_t	c[8];
+};
+
 #endif
diff --git a/drivers/dma/ioat/pci.c b/drivers/dma/ioat/pci.c
index 71c7ecd..2c8d560 100644
--- a/drivers/dma/ioat/pci.c
+++ b/drivers/dma/ioat/pci.c
@@ -94,6 +94,23 @@
 	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB8) },
 	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB9) },
 
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW0) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW1) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW2) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW3) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW4) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW5) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW6) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW7) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW8) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW9) },
+
+	/* I/OAT v3.3 platforms */
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BWD0) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BWD1) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BWD2) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BWD3) },
+
 	{ 0, }
 };
 MODULE_DEVICE_TABLE(pci, ioat_pci_tbl);
@@ -190,6 +207,9 @@
 	if (!device)
 		return;
 
+	if (device->version >= IOAT_VER_3_0)
+		ioat3_dma_remove(device);
+
 	dev_err(&pdev->dev, "Removing dma and dca services\n");
 	if (device->dca) {
 		unregister_dca_provider(device->dca, &pdev->dev);
diff --git a/drivers/dma/ioat/registers.h b/drivers/dma/ioat/registers.h
index 1391798..2f1cfa0 100644
--- a/drivers/dma/ioat/registers.h
+++ b/drivers/dma/ioat/registers.h
@@ -79,6 +79,8 @@
 #define IOAT_CAP_APIC				0x00000080
 #define IOAT_CAP_XOR				0x00000100
 #define IOAT_CAP_PQ				0x00000200
+#define IOAT_CAP_DWBES				0x00002000
+#define IOAT_CAP_RAID16SS			0x00020000
 
 #define IOAT_CHANNEL_MMIO_SIZE			0x80	/* Each Channel MMIO space is this size */
 
@@ -93,6 +95,8 @@
 #define IOAT_CHANCTRL_ERR_COMPLETION_EN		0x0004
 #define IOAT_CHANCTRL_INT_REARM			0x0001
 #define IOAT_CHANCTRL_RUN			(IOAT_CHANCTRL_INT_REARM |\
+						 IOAT_CHANCTRL_ERR_INT_EN |\
+						 IOAT_CHANCTRL_ERR_COMPLETION_EN |\
 						 IOAT_CHANCTRL_ANY_ERR_ABORT_EN)
 
 #define IOAT_DMA_COMP_OFFSET			0x02	/* 16-bit DMA channel compatibility */
diff --git a/drivers/dma/ipu/ipu_idmac.c b/drivers/dma/ipu/ipu_idmac.c
index 8c61d17..d39c2cd 100644
--- a/drivers/dma/ipu/ipu_idmac.c
+++ b/drivers/dma/ipu/ipu_idmac.c
@@ -1642,7 +1642,7 @@
 	return dma_async_device_register(&idmac->dma);
 }
 
-static void __exit ipu_idmac_exit(struct ipu *ipu)
+static void ipu_idmac_exit(struct ipu *ipu)
 {
 	int i;
 	struct idmac *idmac = &ipu->idmac;
@@ -1756,7 +1756,7 @@
 	return ret;
 }
 
-static int __exit ipu_remove(struct platform_device *pdev)
+static int ipu_remove(struct platform_device *pdev)
 {
 	struct ipu *ipu = platform_get_drvdata(pdev);
 
@@ -1781,7 +1781,7 @@
 		.name	= "ipu-core",
 		.owner	= THIS_MODULE,
 	},
-	.remove		= __exit_p(ipu_remove),
+	.remove		= ipu_remove,
 };
 
 static int __init ipu_init(void)
diff --git a/drivers/dma/of-dma.c b/drivers/dma/of-dma.c
index 69d04d2..7aa0864 100644
--- a/drivers/dma/of-dma.c
+++ b/drivers/dma/of-dma.c
@@ -13,43 +13,31 @@
 #include <linux/device.h>
 #include <linux/err.h>
 #include <linux/module.h>
-#include <linux/rculist.h>
+#include <linux/mutex.h>
 #include <linux/slab.h>
 #include <linux/of.h>
 #include <linux/of_dma.h>
 
 static LIST_HEAD(of_dma_list);
-static DEFINE_SPINLOCK(of_dma_lock);
+static DEFINE_MUTEX(of_dma_lock);
 
 /**
- * of_dma_get_controller - Get a DMA controller in DT DMA helpers list
+ * of_dma_find_controller - Get a DMA controller in DT DMA helpers list
  * @dma_spec:	pointer to DMA specifier as found in the device tree
  *
  * Finds a DMA controller with matching device node and number for dma cells
- * in a list of registered DMA controllers. If a match is found the use_count
- * variable is increased and a valid pointer to the DMA data stored is retuned.
- * A NULL pointer is returned if no match is found.
+ * in a list of registered DMA controllers. If a match is found a valid pointer
+ * to the DMA data stored is retuned. A NULL pointer is returned if no match is
+ * found.
  */
-static struct of_dma *of_dma_get_controller(struct of_phandle_args *dma_spec)
+static struct of_dma *of_dma_find_controller(struct of_phandle_args *dma_spec)
 {
 	struct of_dma *ofdma;
 
-	spin_lock(&of_dma_lock);
-
-	if (list_empty(&of_dma_list)) {
-		spin_unlock(&of_dma_lock);
-		return NULL;
-	}
-
 	list_for_each_entry(ofdma, &of_dma_list, of_dma_controllers)
 		if ((ofdma->of_node == dma_spec->np) &&
-		    (ofdma->of_dma_nbcells == dma_spec->args_count)) {
-			ofdma->use_count++;
-			spin_unlock(&of_dma_lock);
+		    (ofdma->of_dma_nbcells == dma_spec->args_count))
 			return ofdma;
-		}
-
-	spin_unlock(&of_dma_lock);
 
 	pr_debug("%s: can't find DMA controller %s\n", __func__,
 		 dma_spec->np->full_name);
@@ -58,22 +46,6 @@
 }
 
 /**
- * of_dma_put_controller - Decrement use count for a registered DMA controller
- * @of_dma:	pointer to DMA controller data
- *
- * Decrements the use_count variable in the DMA data structure. This function
- * should be called only when a valid pointer is returned from
- * of_dma_get_controller() and no further accesses to data referenced by that
- * pointer are needed.
- */
-static void of_dma_put_controller(struct of_dma *ofdma)
-{
-	spin_lock(&of_dma_lock);
-	ofdma->use_count--;
-	spin_unlock(&of_dma_lock);
-}
-
-/**
  * of_dma_controller_register - Register a DMA controller to DT DMA helpers
  * @np:			device node of DMA controller
  * @of_dma_xlate:	translation function which converts a phandle
@@ -93,6 +65,7 @@
 {
 	struct of_dma	*ofdma;
 	int		nbcells;
+	const __be32	*prop;
 
 	if (!np || !of_dma_xlate) {
 		pr_err("%s: not enough information provided\n", __func__);
@@ -103,8 +76,11 @@
 	if (!ofdma)
 		return -ENOMEM;
 
-	nbcells = be32_to_cpup(of_get_property(np, "#dma-cells", NULL));
-	if (!nbcells) {
+	prop = of_get_property(np, "#dma-cells", NULL);
+	if (prop)
+		nbcells = be32_to_cpup(prop);
+
+	if (!prop || !nbcells) {
 		pr_err("%s: #dma-cells property is missing or invalid\n",
 		       __func__);
 		kfree(ofdma);
@@ -115,12 +91,11 @@
 	ofdma->of_dma_nbcells = nbcells;
 	ofdma->of_dma_xlate = of_dma_xlate;
 	ofdma->of_dma_data = data;
-	ofdma->use_count = 0;
 
 	/* Now queue of_dma controller structure in list */
-	spin_lock(&of_dma_lock);
+	mutex_lock(&of_dma_lock);
 	list_add_tail(&ofdma->of_dma_controllers, &of_dma_list);
-	spin_unlock(&of_dma_lock);
+	mutex_unlock(&of_dma_lock);
 
 	return 0;
 }
@@ -132,32 +107,20 @@
  *
  * Memory allocated by of_dma_controller_register() is freed here.
  */
-int of_dma_controller_free(struct device_node *np)
+void of_dma_controller_free(struct device_node *np)
 {
 	struct of_dma *ofdma;
 
-	spin_lock(&of_dma_lock);
-
-	if (list_empty(&of_dma_list)) {
-		spin_unlock(&of_dma_lock);
-		return -ENODEV;
-	}
+	mutex_lock(&of_dma_lock);
 
 	list_for_each_entry(ofdma, &of_dma_list, of_dma_controllers)
 		if (ofdma->of_node == np) {
-			if (ofdma->use_count) {
-				spin_unlock(&of_dma_lock);
-				return -EBUSY;
-			}
-
 			list_del(&ofdma->of_dma_controllers);
-			spin_unlock(&of_dma_lock);
 			kfree(ofdma);
-			return 0;
+			break;
 		}
 
-	spin_unlock(&of_dma_lock);
-	return -ENODEV;
+	mutex_unlock(&of_dma_lock);
 }
 EXPORT_SYMBOL_GPL(of_dma_controller_free);
 
@@ -172,8 +135,8 @@
  * specifiers, matches the name provided. Returns 0 if the name matches and
  * a valid pointer to the DMA specifier is found. Otherwise returns -ENODEV.
  */
-static int of_dma_match_channel(struct device_node *np, char *name, int index,
-				struct of_phandle_args *dma_spec)
+static int of_dma_match_channel(struct device_node *np, const char *name,
+				int index, struct of_phandle_args *dma_spec)
 {
 	const char *s;
 
@@ -198,7 +161,7 @@
  * Returns pointer to appropriate dma channel on success or NULL on error.
  */
 struct dma_chan *of_dma_request_slave_channel(struct device_node *np,
-					      char *name)
+					      const char *name)
 {
 	struct of_phandle_args	dma_spec;
 	struct of_dma		*ofdma;
@@ -220,14 +183,15 @@
 		if (of_dma_match_channel(np, name, i, &dma_spec))
 			continue;
 
-		ofdma = of_dma_get_controller(&dma_spec);
+		mutex_lock(&of_dma_lock);
+		ofdma = of_dma_find_controller(&dma_spec);
 
-		if (!ofdma)
-			continue;
+		if (ofdma)
+			chan = ofdma->of_dma_xlate(&dma_spec, ofdma);
+		else
+			chan = NULL;
 
-		chan = ofdma->of_dma_xlate(&dma_spec, ofdma);
-
-		of_dma_put_controller(ofdma);
+		mutex_unlock(&of_dma_lock);
 
 		of_node_put(dma_spec.np);
 
diff --git a/drivers/dma/omap-dma.c b/drivers/dma/omap-dma.c
index 08b43bf..ec3fc4f 100644
--- a/drivers/dma/omap-dma.c
+++ b/drivers/dma/omap-dma.c
@@ -16,6 +16,8 @@
 #include <linux/platform_device.h>
 #include <linux/slab.h>
 #include <linux/spinlock.h>
+#include <linux/of_dma.h>
+#include <linux/of_device.h>
 
 #include "virt-dma.h"
 
@@ -67,6 +69,10 @@
 	[OMAP_DMA_DATA_TYPE_S32] = 4,
 };
 
+static struct of_dma_filter_info omap_dma_info = {
+	.filter_fn = omap_dma_filter_fn,
+};
+
 static inline struct omap_dmadev *to_omap_dma_dev(struct dma_device *d)
 {
 	return container_of(d, struct omap_dmadev, ddev);
@@ -629,8 +635,22 @@
 		pr_warn("OMAP-DMA: failed to register slave DMA engine device: %d\n",
 			rc);
 		omap_dma_free(od);
-	} else {
-		platform_set_drvdata(pdev, od);
+		return rc;
+	}
+
+	platform_set_drvdata(pdev, od);
+
+	if (pdev->dev.of_node) {
+		omap_dma_info.dma_cap = od->ddev.cap_mask;
+
+		/* Device-tree DMA controller registration */
+		rc = of_dma_controller_register(pdev->dev.of_node,
+				of_dma_simple_xlate, &omap_dma_info);
+		if (rc) {
+			pr_warn("OMAP-DMA: failed to register DMA controller\n");
+			dma_async_device_unregister(&od->ddev);
+			omap_dma_free(od);
+		}
 	}
 
 	dev_info(&pdev->dev, "OMAP DMA engine driver\n");
@@ -642,18 +662,32 @@
 {
 	struct omap_dmadev *od = platform_get_drvdata(pdev);
 
+	if (pdev->dev.of_node)
+		of_dma_controller_free(pdev->dev.of_node);
+
 	dma_async_device_unregister(&od->ddev);
 	omap_dma_free(od);
 
 	return 0;
 }
 
+static const struct of_device_id omap_dma_match[] = {
+	{ .compatible = "ti,omap2420-sdma", },
+	{ .compatible = "ti,omap2430-sdma", },
+	{ .compatible = "ti,omap3430-sdma", },
+	{ .compatible = "ti,omap3630-sdma", },
+	{ .compatible = "ti,omap4430-sdma", },
+	{},
+};
+MODULE_DEVICE_TABLE(of, omap_dma_match);
+
 static struct platform_driver omap_dma_driver = {
 	.probe	= omap_dma_probe,
 	.remove	= omap_dma_remove,
 	.driver = {
 		.name = "omap-dma-engine",
 		.owner = THIS_MODULE,
+		.of_match_table = of_match_ptr(omap_dma_match),
 	},
 };
 
diff --git a/drivers/dma/pch_dma.c b/drivers/dma/pch_dma.c
index d01faeb..ce3dc3e 100644
--- a/drivers/dma/pch_dma.c
+++ b/drivers/dma/pch_dma.c
@@ -476,7 +476,7 @@
 	dev_dbg(chan2dev(&pd_chan->chan), "scanned %d descriptors\n", i);
 
 	if (!ret) {
-		ret = pdc_alloc_desc(&pd_chan->chan, GFP_NOIO);
+		ret = pdc_alloc_desc(&pd_chan->chan, GFP_ATOMIC);
 		if (ret) {
 			spin_lock(&pd_chan->lock);
 			pd_chan->descs_allocated++;
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index 5dbc594..a17553f 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -26,6 +26,7 @@
 #include <linux/scatterlist.h>
 #include <linux/of.h>
 #include <linux/of_dma.h>
+#include <linux/err.h>
 
 #include "dmaengine.h"
 #define PL330_MAX_CHAN		8
@@ -2288,13 +2289,12 @@
 
 		/* If already submitted */
 		if (desc->status == BUSY)
-			break;
+			continue;
 
 		ret = pl330_submit_req(pch->pl330_chid,
 						&desc->req);
 		if (!ret) {
 			desc->status = BUSY;
-			break;
 		} else if (ret == -EAGAIN) {
 			/* QFull or DMAC Dying */
 			break;
@@ -2904,9 +2904,9 @@
 	pi->mcbufsz = pdat ? pdat->mcbuf_sz : 0;
 
 	res = &adev->res;
-	pi->base = devm_request_and_ioremap(&adev->dev, res);
-	if (!pi->base)
-		return -ENXIO;
+	pi->base = devm_ioremap_resource(&adev->dev, res);
+	if (IS_ERR(pi->base))
+		return PTR_ERR(pi->base);
 
 	amba_set_drvdata(adev, pdmac);
 
diff --git a/drivers/dma/sh/Kconfig b/drivers/dma/sh/Kconfig
new file mode 100644
index 0000000..5c1dee2
--- /dev/null
+++ b/drivers/dma/sh/Kconfig
@@ -0,0 +1,24 @@
+#
+# DMA engine configuration for sh
+#
+
+config SH_DMAE_BASE
+	bool "Renesas SuperH DMA Engine support"
+	depends on (SUPERH && SH_DMA) || (ARM && ARCH_SHMOBILE)
+	depends on !SH_DMA_API
+	default y
+	select DMA_ENGINE
+	help
+	  Enable support for the Renesas SuperH DMA controllers.
+
+config SH_DMAE
+	tristate "Renesas SuperH DMAC support"
+	depends on SH_DMAE_BASE
+	help
+	  Enable support for the Renesas SuperH DMA controllers.
+
+config SUDMAC
+	tristate "Renesas SUDMAC support"
+	depends on SH_DMAE_BASE
+	help
+	  Enable support for the Renesas SUDMAC controllers.
diff --git a/drivers/dma/sh/Makefile b/drivers/dma/sh/Makefile
index 54ae957..c07ca46 100644
--- a/drivers/dma/sh/Makefile
+++ b/drivers/dma/sh/Makefile
@@ -1,2 +1,3 @@
-obj-$(CONFIG_SH_DMAE) += shdma-base.o
+obj-$(CONFIG_SH_DMAE_BASE) += shdma-base.o
 obj-$(CONFIG_SH_DMAE) += shdma.o
+obj-$(CONFIG_SUDMAC) += sudmac.o
diff --git a/drivers/dma/sh/sudmac.c b/drivers/dma/sh/sudmac.c
new file mode 100644
index 0000000..e7c94bb
--- /dev/null
+++ b/drivers/dma/sh/sudmac.c
@@ -0,0 +1,428 @@
+/*
+ * Renesas SUDMAC support
+ *
+ * Copyright (C) 2013 Renesas Solutions Corp.
+ *
+ * based on drivers/dma/sh/shdma.c:
+ * Copyright (C) 2011-2012 Guennadi Liakhovetski <g.liakhovetski@gmx.de>
+ * Copyright (C) 2009 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>
+ * Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved.
+ * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved.
+ *
+ * This is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/dmaengine.h>
+#include <linux/platform_device.h>
+#include <linux/sudmac.h>
+
+struct sudmac_chan {
+	struct shdma_chan shdma_chan;
+	void __iomem *base;
+	char dev_id[16];	/* unique name per DMAC of channel */
+
+	u32 offset;		/* for CFG, BA, BBC, CA, CBC, DEN */
+	u32 cfg;
+	u32 dint_end_bit;
+};
+
+struct sudmac_device {
+	struct shdma_dev shdma_dev;
+	struct sudmac_pdata *pdata;
+	void __iomem *chan_reg;
+};
+
+struct sudmac_regs {
+	u32 base_addr;
+	u32 base_byte_count;
+};
+
+struct sudmac_desc {
+	struct sudmac_regs hw;
+	struct shdma_desc shdma_desc;
+};
+
+#define to_chan(schan) container_of(schan, struct sudmac_chan, shdma_chan)
+#define to_desc(sdesc) container_of(sdesc, struct sudmac_desc, shdma_desc)
+#define to_sdev(sc) container_of(sc->shdma_chan.dma_chan.device, \
+				 struct sudmac_device, shdma_dev.dma_dev)
+
+/* SUDMAC register */
+#define SUDMAC_CH0CFG		0x00
+#define SUDMAC_CH0BA		0x10
+#define SUDMAC_CH0BBC		0x18
+#define SUDMAC_CH0CA		0x20
+#define SUDMAC_CH0CBC		0x28
+#define SUDMAC_CH0DEN		0x30
+#define SUDMAC_DSTSCLR		0x38
+#define SUDMAC_DBUFCTRL		0x3C
+#define SUDMAC_DINTCTRL		0x40
+#define SUDMAC_DINTSTS		0x44
+#define SUDMAC_DINTSTSCLR	0x48
+#define SUDMAC_CH0SHCTRL	0x50
+
+/* Definitions for the sudmac_channel.config */
+#define SUDMAC_SENDBUFM	0x1000 /* b12: Transmit Buffer Mode */
+#define SUDMAC_RCVENDM	0x0100 /* b8: Receive Data Transfer End Mode */
+#define SUDMAC_LBA_WAIT	0x0030 /* b5-4: Local Bus Access Wait */
+
+/* Definitions for the sudmac_channel.dint_end_bit */
+#define SUDMAC_CH1ENDE	0x0002 /* b1: Ch1 DMA Transfer End Int Enable */
+#define SUDMAC_CH0ENDE	0x0001 /* b0: Ch0 DMA Transfer End Int Enable */
+
+#define SUDMAC_DRV_NAME "sudmac"
+
+static void sudmac_writel(struct sudmac_chan *sc, u32 data, u32 reg)
+{
+	iowrite32(data, sc->base + reg);
+}
+
+static u32 sudmac_readl(struct sudmac_chan *sc, u32 reg)
+{
+	return ioread32(sc->base + reg);
+}
+
+static bool sudmac_is_busy(struct sudmac_chan *sc)
+{
+	u32 den = sudmac_readl(sc, SUDMAC_CH0DEN + sc->offset);
+
+	if (den)
+		return true; /* working */
+
+	return false; /* waiting */
+}
+
+static void sudmac_set_reg(struct sudmac_chan *sc, struct sudmac_regs *hw,
+			   struct shdma_desc *sdesc)
+{
+	sudmac_writel(sc, sc->cfg, SUDMAC_CH0CFG + sc->offset);
+	sudmac_writel(sc, hw->base_addr, SUDMAC_CH0BA + sc->offset);
+	sudmac_writel(sc, hw->base_byte_count, SUDMAC_CH0BBC + sc->offset);
+}
+
+static void sudmac_start(struct sudmac_chan *sc)
+{
+	u32 dintctrl = sudmac_readl(sc, SUDMAC_DINTCTRL);
+
+	sudmac_writel(sc, dintctrl | sc->dint_end_bit, SUDMAC_DINTCTRL);
+	sudmac_writel(sc, 1, SUDMAC_CH0DEN + sc->offset);
+}
+
+static void sudmac_start_xfer(struct shdma_chan *schan,
+			      struct shdma_desc *sdesc)
+{
+	struct sudmac_chan *sc = to_chan(schan);
+	struct sudmac_desc *sd = to_desc(sdesc);
+
+	sudmac_set_reg(sc, &sd->hw, sdesc);
+	sudmac_start(sc);
+}
+
+static bool sudmac_channel_busy(struct shdma_chan *schan)
+{
+	struct sudmac_chan *sc = to_chan(schan);
+
+	return sudmac_is_busy(sc);
+}
+
+static void sudmac_setup_xfer(struct shdma_chan *schan, int slave_id)
+{
+}
+
+static const struct sudmac_slave_config *sudmac_find_slave(
+	struct sudmac_chan *sc, int slave_id)
+{
+	struct sudmac_device *sdev = to_sdev(sc);
+	struct sudmac_pdata *pdata = sdev->pdata;
+	const struct sudmac_slave_config *cfg;
+	int i;
+
+	for (i = 0, cfg = pdata->slave; i < pdata->slave_num; i++, cfg++)
+		if (cfg->slave_id == slave_id)
+			return cfg;
+
+	return NULL;
+}
+
+static int sudmac_set_slave(struct shdma_chan *schan, int slave_id, bool try)
+{
+	struct sudmac_chan *sc = to_chan(schan);
+	const struct sudmac_slave_config *cfg = sudmac_find_slave(sc, slave_id);
+
+	if (!cfg)
+		return -ENODEV;
+
+	return 0;
+}
+
+static inline void sudmac_dma_halt(struct sudmac_chan *sc)
+{
+	u32 dintctrl = sudmac_readl(sc, SUDMAC_DINTCTRL);
+
+	sudmac_writel(sc, 0, SUDMAC_CH0DEN + sc->offset);
+	sudmac_writel(sc, dintctrl & ~sc->dint_end_bit, SUDMAC_DINTCTRL);
+	sudmac_writel(sc, sc->dint_end_bit, SUDMAC_DINTSTSCLR);
+}
+
+static int sudmac_desc_setup(struct shdma_chan *schan,
+			     struct shdma_desc *sdesc,
+			     dma_addr_t src, dma_addr_t dst, size_t *len)
+{
+	struct sudmac_chan *sc = to_chan(schan);
+	struct sudmac_desc *sd = to_desc(sdesc);
+
+	dev_dbg(sc->shdma_chan.dev, "%s: src=%x, dst=%x, len=%d\n",
+		__func__, src, dst, *len);
+
+	if (*len > schan->max_xfer_len)
+		*len = schan->max_xfer_len;
+
+	if (dst)
+		sd->hw.base_addr = dst;
+	else if (src)
+		sd->hw.base_addr = src;
+	sd->hw.base_byte_count = *len;
+
+	return 0;
+}
+
+static void sudmac_halt(struct shdma_chan *schan)
+{
+	struct sudmac_chan *sc = to_chan(schan);
+
+	sudmac_dma_halt(sc);
+}
+
+static bool sudmac_chan_irq(struct shdma_chan *schan, int irq)
+{
+	struct sudmac_chan *sc = to_chan(schan);
+	u32 dintsts = sudmac_readl(sc, SUDMAC_DINTSTS);
+
+	if (!(dintsts & sc->dint_end_bit))
+		return false;
+
+	/* DMA stop */
+	sudmac_dma_halt(sc);
+
+	return true;
+}
+
+static size_t sudmac_get_partial(struct shdma_chan *schan,
+				 struct shdma_desc *sdesc)
+{
+	struct sudmac_chan *sc = to_chan(schan);
+	struct sudmac_desc *sd = to_desc(sdesc);
+	u32 current_byte_count = sudmac_readl(sc, SUDMAC_CH0CBC + sc->offset);
+
+	return sd->hw.base_byte_count - current_byte_count;
+}
+
+static bool sudmac_desc_completed(struct shdma_chan *schan,
+				  struct shdma_desc *sdesc)
+{
+	struct sudmac_chan *sc = to_chan(schan);
+	struct sudmac_desc *sd = to_desc(sdesc);
+	u32 current_addr = sudmac_readl(sc, SUDMAC_CH0CA + sc->offset);
+
+	return sd->hw.base_addr + sd->hw.base_byte_count == current_addr;
+}
+
+static int sudmac_chan_probe(struct sudmac_device *su_dev, int id, int irq,
+			     unsigned long flags)
+{
+	struct shdma_dev *sdev = &su_dev->shdma_dev;
+	struct platform_device *pdev = to_platform_device(sdev->dma_dev.dev);
+	struct sudmac_chan *sc;
+	struct shdma_chan *schan;
+	int err;
+
+	sc = devm_kzalloc(&pdev->dev, sizeof(struct sudmac_chan), GFP_KERNEL);
+	if (!sc) {
+		dev_err(sdev->dma_dev.dev,
+			"No free memory for allocating dma channels!\n");
+		return -ENOMEM;
+	}
+
+	schan = &sc->shdma_chan;
+	schan->max_xfer_len = 64 * 1024 * 1024 - 1;
+
+	shdma_chan_probe(sdev, schan, id);
+
+	sc->base = su_dev->chan_reg;
+
+	/* get platform_data */
+	sc->offset = su_dev->pdata->channel->offset;
+	if (su_dev->pdata->channel->config & SUDMAC_TX_BUFFER_MODE)
+		sc->cfg |= SUDMAC_SENDBUFM;
+	if (su_dev->pdata->channel->config & SUDMAC_RX_END_MODE)
+		sc->cfg |= SUDMAC_RCVENDM;
+	sc->cfg |= (su_dev->pdata->channel->wait << 4) & SUDMAC_LBA_WAIT;
+
+	if (su_dev->pdata->channel->dint_end_bit & SUDMAC_DMA_BIT_CH0)
+		sc->dint_end_bit |= SUDMAC_CH0ENDE;
+	if (su_dev->pdata->channel->dint_end_bit & SUDMAC_DMA_BIT_CH1)
+		sc->dint_end_bit |= SUDMAC_CH1ENDE;
+
+	/* set up channel irq */
+	if (pdev->id >= 0)
+		snprintf(sc->dev_id, sizeof(sc->dev_id), "sudmac%d.%d",
+			 pdev->id, id);
+	else
+		snprintf(sc->dev_id, sizeof(sc->dev_id), "sudmac%d", id);
+
+	err = shdma_request_irq(schan, irq, flags, sc->dev_id);
+	if (err) {
+		dev_err(sdev->dma_dev.dev,
+			"DMA channel %d request_irq failed %d\n", id, err);
+		goto err_no_irq;
+	}
+
+	return 0;
+
+err_no_irq:
+	/* remove from dmaengine device node */
+	shdma_chan_remove(schan);
+	return err;
+}
+
+static void sudmac_chan_remove(struct sudmac_device *su_dev)
+{
+	struct dma_device *dma_dev = &su_dev->shdma_dev.dma_dev;
+	struct shdma_chan *schan;
+	int i;
+
+	shdma_for_each_chan(schan, &su_dev->shdma_dev, i) {
+		struct sudmac_chan *sc = to_chan(schan);
+
+		BUG_ON(!schan);
+
+		shdma_free_irq(&sc->shdma_chan);
+		shdma_chan_remove(schan);
+	}
+	dma_dev->chancnt = 0;
+}
+
+static dma_addr_t sudmac_slave_addr(struct shdma_chan *schan)
+{
+	/* SUDMAC doesn't need the address */
+	return 0;
+}
+
+static struct shdma_desc *sudmac_embedded_desc(void *buf, int i)
+{
+	return &((struct sudmac_desc *)buf)[i].shdma_desc;
+}
+
+static const struct shdma_ops sudmac_shdma_ops = {
+	.desc_completed = sudmac_desc_completed,
+	.halt_channel = sudmac_halt,
+	.channel_busy = sudmac_channel_busy,
+	.slave_addr = sudmac_slave_addr,
+	.desc_setup = sudmac_desc_setup,
+	.set_slave = sudmac_set_slave,
+	.setup_xfer = sudmac_setup_xfer,
+	.start_xfer = sudmac_start_xfer,
+	.embedded_desc = sudmac_embedded_desc,
+	.chan_irq = sudmac_chan_irq,
+	.get_partial = sudmac_get_partial,
+};
+
+static int sudmac_probe(struct platform_device *pdev)
+{
+	struct sudmac_pdata *pdata = pdev->dev.platform_data;
+	int err, i;
+	struct sudmac_device *su_dev;
+	struct dma_device *dma_dev;
+	struct resource *chan, *irq_res;
+
+	/* get platform data */
+	if (!pdata)
+		return -ENODEV;
+
+	chan = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+	if (!chan || !irq_res)
+		return -ENODEV;
+
+	err = -ENOMEM;
+	su_dev = devm_kzalloc(&pdev->dev, sizeof(struct sudmac_device),
+			      GFP_KERNEL);
+	if (!su_dev) {
+		dev_err(&pdev->dev, "Not enough memory\n");
+		return err;
+	}
+
+	dma_dev = &su_dev->shdma_dev.dma_dev;
+
+	su_dev->chan_reg = devm_request_and_ioremap(&pdev->dev, chan);
+	if (!su_dev->chan_reg)
+		return err;
+
+	dma_cap_set(DMA_SLAVE, dma_dev->cap_mask);
+
+	su_dev->shdma_dev.ops = &sudmac_shdma_ops;
+	su_dev->shdma_dev.desc_size = sizeof(struct sudmac_desc);
+	err = shdma_init(&pdev->dev, &su_dev->shdma_dev, pdata->channel_num);
+	if (err < 0)
+		return err;
+
+	/* platform data */
+	su_dev->pdata = pdev->dev.platform_data;
+
+	platform_set_drvdata(pdev, su_dev);
+
+	/* Create DMA Channel */
+	for (i = 0; i < pdata->channel_num; i++) {
+		err = sudmac_chan_probe(su_dev, i, irq_res->start, IRQF_SHARED);
+		if (err)
+			goto chan_probe_err;
+	}
+
+	err = dma_async_device_register(&su_dev->shdma_dev.dma_dev);
+	if (err < 0)
+		goto chan_probe_err;
+
+	return err;
+
+chan_probe_err:
+	sudmac_chan_remove(su_dev);
+
+	platform_set_drvdata(pdev, NULL);
+	shdma_cleanup(&su_dev->shdma_dev);
+
+	return err;
+}
+
+static int sudmac_remove(struct platform_device *pdev)
+{
+	struct sudmac_device *su_dev = platform_get_drvdata(pdev);
+	struct dma_device *dma_dev = &su_dev->shdma_dev.dma_dev;
+
+	dma_async_device_unregister(dma_dev);
+	sudmac_chan_remove(su_dev);
+	shdma_cleanup(&su_dev->shdma_dev);
+	platform_set_drvdata(pdev, NULL);
+
+	return 0;
+}
+
+static struct platform_driver sudmac_driver = {
+	.driver		= {
+		.owner	= THIS_MODULE,
+		.name	= SUDMAC_DRV_NAME,
+	},
+	.probe		= sudmac_probe,
+	.remove		= sudmac_remove,
+};
+module_platform_driver(sudmac_driver);
+
+MODULE_AUTHOR("Yoshihiro Shimoda");
+MODULE_DESCRIPTION("Renesas SUDMAC driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:" SUDMAC_DRV_NAME);
diff --git a/drivers/dma/sirf-dma.c b/drivers/dma/sirf-dma.c
index 1d627e2..1765a0a 100644
--- a/drivers/dma/sirf-dma.c
+++ b/drivers/dma/sirf-dma.c
@@ -16,6 +16,7 @@
 #include <linux/of_address.h>
 #include <linux/of_device.h>
 #include <linux/of_platform.h>
+#include <linux/clk.h>
 #include <linux/sirfsoc_dma.h>
 
 #include "dmaengine.h"
@@ -78,6 +79,7 @@
 	struct sirfsoc_dma_chan		channels[SIRFSOC_DMA_CHANNELS];
 	void __iomem			*base;
 	int				irq;
+	struct clk			*clk;
 	bool				is_marco;
 };
 
@@ -639,6 +641,12 @@
 		return -EINVAL;
 	}
 
+	sdma->clk = devm_clk_get(dev, NULL);
+	if (IS_ERR(sdma->clk)) {
+		dev_err(dev, "failed to get a clock.\n");
+		return PTR_ERR(sdma->clk);
+	}
+
 	ret = of_address_to_resource(dn, 0, &res);
 	if (ret) {
 		dev_err(dev, "Error parsing memory region!\n");
@@ -698,6 +706,8 @@
 
 	tasklet_init(&sdma->tasklet, sirfsoc_dma_tasklet, (unsigned long)sdma);
 
+	clk_prepare_enable(sdma->clk);
+
 	/* Register DMA engine */
 	dev_set_drvdata(dev, sdma);
 	ret = dma_async_device_register(dma);
@@ -720,6 +730,7 @@
 	struct device *dev = &op->dev;
 	struct sirfsoc_dma *sdma = dev_get_drvdata(dev);
 
+	clk_disable_unprepare(sdma->clk);
 	dma_async_device_unregister(&sdma->dma);
 	free_irq(sdma->irq, sdma);
 	irq_dispose_mapping(sdma->irq);
@@ -742,7 +753,18 @@
 	},
 };
 
-module_platform_driver(sirfsoc_dma_driver);
+static __init int sirfsoc_dma_init(void)
+{
+	return platform_driver_register(&sirfsoc_dma_driver);
+}
+
+static void __exit sirfsoc_dma_exit(void)
+{
+	platform_driver_unregister(&sirfsoc_dma_driver);
+}
+
+subsys_initcall(sirfsoc_dma_init);
+module_exit(sirfsoc_dma_exit);
 
 MODULE_AUTHOR("Rongjun Ying <rongjun.ying@csr.com>, "
 	"Barry Song <baohua.song@csr.com>");
diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c
index fcee27e..ce19340 100644
--- a/drivers/dma/tegra20-apb-dma.c
+++ b/drivers/dma/tegra20-apb-dma.c
@@ -30,6 +30,7 @@
 #include <linux/of.h>
 #include <linux/of_device.h>
 #include <linux/platform_device.h>
+#include <linux/pm.h>
 #include <linux/pm_runtime.h>
 #include <linux/slab.h>
 #include <linux/clk/tegra.h>
@@ -199,6 +200,7 @@
 
 	/* Channel-slave specific configuration */
 	struct dma_slave_config dma_sconfig;
+	struct tegra_dma_channel_regs	channel_reg;
 };
 
 /* tegra_dma: Tegra DMA specific information */
@@ -1213,7 +1215,6 @@
 	.support_channel_pause	= false,
 };
 
-#if defined(CONFIG_OF)
 /* Tegra30 specific DMA controller information */
 static const struct tegra_dma_chip_data tegra30_dma_chip_data = {
 	.nr_channels		= 32,
@@ -1243,7 +1244,6 @@
 	},
 };
 MODULE_DEVICE_TABLE(of, tegra_dma_of_match);
-#endif
 
 static int tegra_dma_probe(struct platform_device *pdev)
 {
@@ -1252,20 +1252,14 @@
 	int ret;
 	int i;
 	const struct tegra_dma_chip_data *cdata = NULL;
+	const struct of_device_id *match;
 
-	if (pdev->dev.of_node) {
-		const struct of_device_id *match;
-		match = of_match_device(of_match_ptr(tegra_dma_of_match),
-					&pdev->dev);
-		if (!match) {
-			dev_err(&pdev->dev, "Error: No device match found\n");
-			return -ENODEV;
-		}
-		cdata = match->data;
-	} else {
-		/* If no device tree then fallback to tegra20 */
-		cdata = &tegra20_dma_chip_data;
+	match = of_match_device(tegra_dma_of_match, &pdev->dev);
+	if (!match) {
+		dev_err(&pdev->dev, "Error: No device match found\n");
+		return -ENODEV;
 	}
+	cdata = match->data;
 
 	tdma = devm_kzalloc(&pdev->dev, sizeof(*tdma) + cdata->nr_channels *
 			sizeof(struct tegra_dma_channel), GFP_KERNEL);
@@ -1448,11 +1442,74 @@
 	return 0;
 }
 
+#ifdef CONFIG_PM_SLEEP
+static int tegra_dma_pm_suspend(struct device *dev)
+{
+	struct tegra_dma *tdma = dev_get_drvdata(dev);
+	int i;
+	int ret;
+
+	/* Enable clock before accessing register */
+	ret = tegra_dma_runtime_resume(dev);
+	if (ret < 0)
+		return ret;
+
+	tdma->reg_gen = tdma_read(tdma, TEGRA_APBDMA_GENERAL);
+	for (i = 0; i < tdma->chip_data->nr_channels; i++) {
+		struct tegra_dma_channel *tdc = &tdma->channels[i];
+		struct tegra_dma_channel_regs *ch_reg = &tdc->channel_reg;
+
+		ch_reg->csr = tdc_read(tdc, TEGRA_APBDMA_CHAN_CSR);
+		ch_reg->ahb_ptr = tdc_read(tdc, TEGRA_APBDMA_CHAN_AHBPTR);
+		ch_reg->apb_ptr = tdc_read(tdc, TEGRA_APBDMA_CHAN_APBPTR);
+		ch_reg->ahb_seq = tdc_read(tdc, TEGRA_APBDMA_CHAN_AHBSEQ);
+		ch_reg->apb_seq = tdc_read(tdc, TEGRA_APBDMA_CHAN_APBSEQ);
+	}
+
+	/* Disable clock */
+	tegra_dma_runtime_suspend(dev);
+	return 0;
+}
+
+static int tegra_dma_pm_resume(struct device *dev)
+{
+	struct tegra_dma *tdma = dev_get_drvdata(dev);
+	int i;
+	int ret;
+
+	/* Enable clock before accessing register */
+	ret = tegra_dma_runtime_resume(dev);
+	if (ret < 0)
+		return ret;
+
+	tdma_write(tdma, TEGRA_APBDMA_GENERAL, tdma->reg_gen);
+	tdma_write(tdma, TEGRA_APBDMA_CONTROL, 0);
+	tdma_write(tdma, TEGRA_APBDMA_IRQ_MASK_SET, 0xFFFFFFFFul);
+
+	for (i = 0; i < tdma->chip_data->nr_channels; i++) {
+		struct tegra_dma_channel *tdc = &tdma->channels[i];
+		struct tegra_dma_channel_regs *ch_reg = &tdc->channel_reg;
+
+		tdc_write(tdc, TEGRA_APBDMA_CHAN_APBSEQ, ch_reg->apb_seq);
+		tdc_write(tdc, TEGRA_APBDMA_CHAN_APBPTR, ch_reg->apb_ptr);
+		tdc_write(tdc, TEGRA_APBDMA_CHAN_AHBSEQ, ch_reg->ahb_seq);
+		tdc_write(tdc, TEGRA_APBDMA_CHAN_AHBPTR, ch_reg->ahb_ptr);
+		tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR,
+			(ch_reg->csr & ~TEGRA_APBDMA_CSR_ENB));
+	}
+
+	/* Disable clock */
+	tegra_dma_runtime_suspend(dev);
+	return 0;
+}
+#endif
+
 static const struct dev_pm_ops tegra_dma_dev_pm_ops = {
 #ifdef CONFIG_PM_RUNTIME
 	.runtime_suspend = tegra_dma_runtime_suspend,
 	.runtime_resume = tegra_dma_runtime_resume,
 #endif
+	SET_SYSTEM_SLEEP_PM_OPS(tegra_dma_pm_suspend, tegra_dma_pm_resume)
 };
 
 static struct platform_driver tegra_dmac_driver = {
@@ -1460,7 +1517,7 @@
 		.name	= "tegra-apbdma",
 		.owner = THIS_MODULE,
 		.pm	= &tegra_dma_dev_pm_ops,
-		.of_match_table = of_match_ptr(tegra_dma_of_match),
+		.of_match_table = tegra_dma_of_match,
 	},
 	.probe		= tegra_dma_probe,
 	.remove		= tegra_dma_remove,
diff --git a/drivers/dma/timb_dma.c b/drivers/dma/timb_dma.c
index 952f823..26107ba 100644
--- a/drivers/dma/timb_dma.c
+++ b/drivers/dma/timb_dma.c
@@ -823,7 +823,7 @@
 		.owner  = THIS_MODULE,
 	},
 	.probe	= td_probe,
-	.remove	= __exit_p(td_remove),
+	.remove	= td_remove,
 };
 
 module_platform_driver(td_driver);
diff --git a/drivers/dma/txx9dmac.c b/drivers/dma/txx9dmac.c
index 913f55c..a59fb48 100644
--- a/drivers/dma/txx9dmac.c
+++ b/drivers/dma/txx9dmac.c
@@ -1190,7 +1190,7 @@
 	return 0;
 }
 
-static int __exit txx9dmac_chan_remove(struct platform_device *pdev)
+static int txx9dmac_chan_remove(struct platform_device *pdev)
 {
 	struct txx9dmac_chan *dc = platform_get_drvdata(pdev);
 
@@ -1252,7 +1252,7 @@
 	return 0;
 }
 
-static int __exit txx9dmac_remove(struct platform_device *pdev)
+static int txx9dmac_remove(struct platform_device *pdev)
 {
 	struct txx9dmac_dev *ddev = platform_get_drvdata(pdev);
 
@@ -1299,14 +1299,14 @@
 };
 
 static struct platform_driver txx9dmac_chan_driver = {
-	.remove		= __exit_p(txx9dmac_chan_remove),
+	.remove		= txx9dmac_chan_remove,
 	.driver = {
 		.name	= "txx9dmac-chan",
 	},
 };
 
 static struct platform_driver txx9dmac_driver = {
-	.remove		= __exit_p(txx9dmac_remove),
+	.remove		= txx9dmac_remove,
 	.shutdown	= txx9dmac_shutdown,
 	.driver = {
 		.name	= "txx9dmac",
diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
index 5899a76..67610a6 100644
--- a/drivers/edac/edac_mc_sysfs.c
+++ b/drivers/edac/edac_mc_sysfs.c
@@ -87,7 +87,7 @@
 /*
  * various constants for Memory Controllers
  */
-static const char *mem_types[] = {
+static const char * const mem_types[] = {
 	[MEM_EMPTY] = "Empty",
 	[MEM_RESERVED] = "Reserved",
 	[MEM_UNKNOWN] = "Unknown",
@@ -107,7 +107,7 @@
 	[MEM_RDDR3] = "Registered-DDR3"
 };
 
-static const char *dev_types[] = {
+static const char * const dev_types[] = {
 	[DEV_UNKNOWN] = "Unknown",
 	[DEV_X1] = "x1",
 	[DEV_X2] = "x2",
@@ -118,7 +118,7 @@
 	[DEV_X64] = "x64"
 };
 
-static const char *edac_caps[] = {
+static const char * const edac_caps[] = {
 	[EDAC_UNKNOWN] = "Unknown",
 	[EDAC_NONE] = "None",
 	[EDAC_RESERVED] = "Reserved",
@@ -327,17 +327,17 @@
 };
 
 /* possible dynamic channel ce_count attribute files */
-DEVICE_CHANNEL(ch0_ce_count, S_IRUGO | S_IWUSR,
+DEVICE_CHANNEL(ch0_ce_count, S_IRUGO,
 		   channel_ce_count_show, NULL, 0);
-DEVICE_CHANNEL(ch1_ce_count, S_IRUGO | S_IWUSR,
+DEVICE_CHANNEL(ch1_ce_count, S_IRUGO,
 		   channel_ce_count_show, NULL, 1);
-DEVICE_CHANNEL(ch2_ce_count, S_IRUGO | S_IWUSR,
+DEVICE_CHANNEL(ch2_ce_count, S_IRUGO,
 		   channel_ce_count_show, NULL, 2);
-DEVICE_CHANNEL(ch3_ce_count, S_IRUGO | S_IWUSR,
+DEVICE_CHANNEL(ch3_ce_count, S_IRUGO,
 		   channel_ce_count_show, NULL, 3);
-DEVICE_CHANNEL(ch4_ce_count, S_IRUGO | S_IWUSR,
+DEVICE_CHANNEL(ch4_ce_count, S_IRUGO,
 		   channel_ce_count_show, NULL, 4);
-DEVICE_CHANNEL(ch5_ce_count, S_IRUGO | S_IWUSR,
+DEVICE_CHANNEL(ch5_ce_count, S_IRUGO,
 		   channel_ce_count_show, NULL, 5);
 
 /* Total possible dynamic ce_count attribute file table */
diff --git a/drivers/extcon/Kconfig b/drivers/extcon/Kconfig
index 5168a13..3297301 100644
--- a/drivers/extcon/Kconfig
+++ b/drivers/extcon/Kconfig
@@ -16,7 +16,7 @@
 
 config EXTCON_GPIO
 	tristate "GPIO extcon support"
-	depends on GENERIC_GPIO
+	depends on GPIOLIB
 	help
 	  Say Y here to enable GPIO based extcon support. Note that GPIO
 	  extcon supports single state per extcon instance.
diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
index 27ac423..7ef316f 100644
--- a/drivers/firewire/core-cdev.c
+++ b/drivers/firewire/core-cdev.c
@@ -389,10 +389,8 @@
 	struct bus_reset_event *e;
 
 	e = kzalloc(sizeof(*e), GFP_KERNEL);
-	if (e == NULL) {
-		fw_notice(client->device->card, "out of memory when allocating event\n");
+	if (e == NULL)
 		return;
-	}
 
 	fill_bus_reset_event(&e->reset, client);
 
@@ -693,10 +691,9 @@
 
 	r = kmalloc(sizeof(*r), GFP_ATOMIC);
 	e = kmalloc(sizeof(*e), GFP_ATOMIC);
-	if (r == NULL || e == NULL) {
-		fw_notice(card, "out of memory when allocating event\n");
+	if (r == NULL || e == NULL)
 		goto failed;
-	}
+
 	r->card    = card;
 	r->request = request;
 	r->data    = payload;
@@ -930,10 +927,9 @@
 	struct iso_interrupt_event *e;
 
 	e = kmalloc(sizeof(*e) + header_length, GFP_ATOMIC);
-	if (e == NULL) {
-		fw_notice(context->card, "out of memory when allocating event\n");
+	if (e == NULL)
 		return;
-	}
+
 	e->interrupt.type      = FW_CDEV_EVENT_ISO_INTERRUPT;
 	e->interrupt.closure   = client->iso_closure;
 	e->interrupt.cycle     = cycle;
@@ -950,10 +946,9 @@
 	struct iso_interrupt_mc_event *e;
 
 	e = kmalloc(sizeof(*e), GFP_ATOMIC);
-	if (e == NULL) {
-		fw_notice(context->card, "out of memory when allocating event\n");
+	if (e == NULL)
 		return;
-	}
+
 	e->interrupt.type      = FW_CDEV_EVENT_ISO_INTERRUPT_MULTICHANNEL;
 	e->interrupt.closure   = client->iso_closure;
 	e->interrupt.completed = fw_iso_buffer_lookup(&client->buffer,
@@ -1366,8 +1361,7 @@
 	int ret;
 
 	if ((request->channels == 0 && request->bandwidth == 0) ||
-	    request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL ||
-	    request->bandwidth < 0)
+	    request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL)
 		return -EINVAL;
 
 	r  = kmalloc(sizeof(*r), GFP_KERNEL);
@@ -1582,10 +1576,9 @@
 
 	list_for_each_entry(client, &card->phy_receiver_list, phy_receiver_link) {
 		e = kmalloc(sizeof(*e) + 8, GFP_ATOMIC);
-		if (e == NULL) {
-			fw_notice(card, "out of memory when allocating event\n");
+		if (e == NULL)
 			break;
-		}
+
 		e->phy_packet.closure	= client->phy_receiver_closure;
 		e->phy_packet.type	= FW_CDEV_EVENT_PHY_PACKET_RECEIVED;
 		e->phy_packet.rcode	= RCODE_COMPLETE;
diff --git a/drivers/firewire/core-device.c b/drivers/firewire/core-device.c
index 03ce7d9..664a6ff 100644
--- a/drivers/firewire/core-device.c
+++ b/drivers/firewire/core-device.c
@@ -692,10 +692,8 @@
 		 * match the drivers id_tables against it.
 		 */
 		unit = kzalloc(sizeof(*unit), GFP_KERNEL);
-		if (unit == NULL) {
-			fw_err(device->card, "out of memory for unit\n");
+		if (unit == NULL)
 			continue;
-		}
 
 		unit->directory = ci.p + value - 1;
 		unit->device.bus = &fw_bus_type;
diff --git a/drivers/firewire/net.c b/drivers/firewire/net.c
index 4d56536..815b0fc 100644
--- a/drivers/firewire/net.c
+++ b/drivers/firewire/net.c
@@ -356,10 +356,8 @@
 	}
 
 	new = kmalloc(sizeof(*new), GFP_ATOMIC);
-	if (!new) {
-		dev_err(&pd->skb->dev->dev, "out of memory\n");
+	if (!new)
 		return NULL;
-	}
 
 	new->offset = offset;
 	new->len = len;
@@ -402,8 +400,6 @@
 fail_w_new:
 	kfree(new);
 fail:
-	dev_err(&net->dev, "out of memory\n");
-
 	return NULL;
 }
 
@@ -609,7 +605,6 @@
 
 		skb = dev_alloc_skb(len + LL_RESERVED_SPACE(net));
 		if (unlikely(!skb)) {
-			dev_err(&net->dev, "out of memory\n");
 			net->stats.rx_dropped++;
 
 			return -ENOMEM;
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
index 45912e6..9e1db64 100644
--- a/drivers/firewire/ohci.c
+++ b/drivers/firewire/ohci.c
@@ -54,6 +54,10 @@
 #include "core.h"
 #include "ohci.h"
 
+#define ohci_info(ohci, f, args...)	dev_info(ohci->card.device, f, ##args)
+#define ohci_notice(ohci, f, args...)	dev_notice(ohci->card.device, f, ##args)
+#define ohci_err(ohci, f, args...)	dev_err(ohci->card.device, f, ##args)
+
 #define DESCRIPTOR_OUTPUT_MORE		0
 #define DESCRIPTOR_OUTPUT_LAST		(1 << 12)
 #define DESCRIPTOR_INPUT_MORE		(2 << 12)
@@ -68,6 +72,8 @@
 #define DESCRIPTOR_BRANCH_ALWAYS	(3 << 2)
 #define DESCRIPTOR_WAIT			(3 << 0)
 
+#define DESCRIPTOR_CMD			(0xf << 12)
+
 struct descriptor {
 	__le16 req_count;
 	__le16 control;
@@ -149,10 +155,11 @@
 	struct descriptor *last;
 
 	/*
-	 * The last descriptor in the DMA program.  It contains the branch
+	 * The last descriptor block in the DMA program. It contains the branch
 	 * address that must be updated upon appending a new descriptor.
 	 */
 	struct descriptor *prev;
+	int prev_z;
 
 	descriptor_callback_t callback;
 
@@ -270,7 +277,9 @@
 #define PCI_DEVICE_ID_TI_TSB12LV22	0x8009
 #define PCI_DEVICE_ID_TI_TSB12LV26	0x8020
 #define PCI_DEVICE_ID_TI_TSB82AA2	0x8025
+#define PCI_DEVICE_ID_VIA_VT630X	0x3044
 #define PCI_VENDOR_ID_PINNACLE_SYSTEMS	0x11bd
+#define PCI_REV_ID_VIA_VT6306		0x46
 
 #define QUIRK_CYCLE_TIMER		1
 #define QUIRK_RESET_PACKET		2
@@ -278,6 +287,8 @@
 #define QUIRK_NO_1394A			8
 #define QUIRK_NO_MSI			16
 #define QUIRK_TI_SLLZ059		32
+#define QUIRK_IR_WAKE			64
+#define QUIRK_PHY_LCTRL_TIMEOUT		128
 
 /* In case of multiple matches in ohci_quirks[], only the first one is used. */
 static const struct {
@@ -290,7 +301,10 @@
 		QUIRK_BE_HEADERS},
 
 	{PCI_VENDOR_ID_ATT, PCI_DEVICE_ID_AGERE_FW643, 6,
-		QUIRK_NO_MSI},
+		QUIRK_PHY_LCTRL_TIMEOUT | QUIRK_NO_MSI},
+
+	{PCI_VENDOR_ID_ATT, PCI_ANY_ID, PCI_ANY_ID,
+		QUIRK_PHY_LCTRL_TIMEOUT},
 
 	{PCI_VENDOR_ID_CREATIVE, PCI_DEVICE_ID_CREATIVE_SB1394, PCI_ANY_ID,
 		QUIRK_RESET_PACKET},
@@ -319,6 +333,9 @@
 	{PCI_VENDOR_ID_TI, PCI_ANY_ID, PCI_ANY_ID,
 		QUIRK_RESET_PACKET},
 
+	{PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VT630X, PCI_REV_ID_VIA_VT6306,
+		QUIRK_CYCLE_TIMER | QUIRK_IR_WAKE},
+
 	{PCI_VENDOR_ID_VIA, PCI_ANY_ID, PCI_ANY_ID,
 		QUIRK_CYCLE_TIMER | QUIRK_NO_MSI},
 };
@@ -333,6 +350,8 @@
 	", no 1394a enhancements = "	__stringify(QUIRK_NO_1394A)
 	", disable MSI = "		__stringify(QUIRK_NO_MSI)
 	", TI SLLZ059 erratum = "	__stringify(QUIRK_TI_SLLZ059)
+	", IR wake unreliable = "	__stringify(QUIRK_IR_WAKE)
+	", phy LCtrl timeout = "	__stringify(QUIRK_PHY_LCTRL_TIMEOUT)
 	")");
 
 #define OHCI_PARAM_DEBUG_AT_AR		1
@@ -359,8 +378,7 @@
 	    !(evt & OHCI1394_busReset))
 		return;
 
-	dev_notice(ohci->card.device,
-	    "IRQ %08x%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n", evt,
+	ohci_notice(ohci, "IRQ %08x%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n", evt,
 	    evt & OHCI1394_selfIDComplete	? " selfID"		: "",
 	    evt & OHCI1394_RQPkt		? " AR_req"		: "",
 	    evt & OHCI1394_RSPkt		? " AR_resp"		: "",
@@ -406,21 +424,19 @@
 	if (likely(!(param_debug & OHCI_PARAM_DEBUG_SELFIDS)))
 		return;
 
-	dev_notice(ohci->card.device,
-		   "%d selfIDs, generation %d, local node ID %04x\n",
-		   self_id_count, generation, ohci->node_id);
+	ohci_notice(ohci, "%d selfIDs, generation %d, local node ID %04x\n",
+		    self_id_count, generation, ohci->node_id);
 
 	for (s = ohci->self_id_buffer; self_id_count--; ++s)
 		if ((*s & 1 << 23) == 0)
-			dev_notice(ohci->card.device,
-			    "selfID 0: %08x, phy %d [%c%c%c] "
-			    "%s gc=%d %s %s%s%s\n",
+			ohci_notice(ohci,
+			    "selfID 0: %08x, phy %d [%c%c%c] %s gc=%d %s %s%s%s\n",
 			    *s, *s >> 24 & 63, _p(s, 6), _p(s, 4), _p(s, 2),
 			    speed[*s >> 14 & 3], *s >> 16 & 63,
 			    power[*s >> 8 & 7], *s >> 22 & 1 ? "L" : "",
 			    *s >> 11 & 1 ? "c" : "", *s & 2 ? "i" : "");
 		else
-			dev_notice(ohci->card.device,
+			ohci_notice(ohci,
 			    "selfID n: %08x, phy %d [%c%c%c%c%c%c%c%c]\n",
 			    *s, *s >> 24 & 63,
 			    _p(s, 16), _p(s, 14), _p(s, 12), _p(s, 10),
@@ -470,9 +486,8 @@
 			evt = 0x1f;
 
 	if (evt == OHCI1394_evt_bus_reset) {
-		dev_notice(ohci->card.device,
-			   "A%c evt_bus_reset, generation %d\n",
-			   dir, (header[2] >> 16) & 0xff);
+		ohci_notice(ohci, "A%c evt_bus_reset, generation %d\n",
+			    dir, (header[2] >> 16) & 0xff);
 		return;
 	}
 
@@ -491,32 +506,26 @@
 
 	switch (tcode) {
 	case 0xa:
-		dev_notice(ohci->card.device,
-			   "A%c %s, %s\n",
-			   dir, evts[evt], tcodes[tcode]);
+		ohci_notice(ohci, "A%c %s, %s\n",
+			    dir, evts[evt], tcodes[tcode]);
 		break;
 	case 0xe:
-		dev_notice(ohci->card.device,
-			   "A%c %s, PHY %08x %08x\n",
-			   dir, evts[evt], header[1], header[2]);
+		ohci_notice(ohci, "A%c %s, PHY %08x %08x\n",
+			    dir, evts[evt], header[1], header[2]);
 		break;
 	case 0x0: case 0x1: case 0x4: case 0x5: case 0x9:
-		dev_notice(ohci->card.device,
-			   "A%c spd %x tl %02x, "
-			   "%04x -> %04x, %s, "
-			   "%s, %04x%08x%s\n",
-			   dir, speed, header[0] >> 10 & 0x3f,
-			   header[1] >> 16, header[0] >> 16, evts[evt],
-			   tcodes[tcode], header[1] & 0xffff, header[2], specific);
+		ohci_notice(ohci,
+			    "A%c spd %x tl %02x, %04x -> %04x, %s, %s, %04x%08x%s\n",
+			    dir, speed, header[0] >> 10 & 0x3f,
+			    header[1] >> 16, header[0] >> 16, evts[evt],
+			    tcodes[tcode], header[1] & 0xffff, header[2], specific);
 		break;
 	default:
-		dev_notice(ohci->card.device,
-			   "A%c spd %x tl %02x, "
-			   "%04x -> %04x, %s, "
-			   "%s%s\n",
-			   dir, speed, header[0] >> 10 & 0x3f,
-			   header[1] >> 16, header[0] >> 16, evts[evt],
-			   tcodes[tcode], specific);
+		ohci_notice(ohci,
+			    "A%c spd %x tl %02x, %04x -> %04x, %s, %s%s\n",
+			    dir, speed, header[0] >> 10 & 0x3f,
+			    header[1] >> 16, header[0] >> 16, evts[evt],
+			    tcodes[tcode], specific);
 	}
 }
 
@@ -563,7 +572,8 @@
 		if (i >= 3)
 			msleep(1);
 	}
-	dev_err(ohci->card.device, "failed to read phy reg\n");
+	ohci_err(ohci, "failed to read phy reg %d\n", addr);
+	dump_stack();
 
 	return -EBUSY;
 }
@@ -585,7 +595,8 @@
 		if (i >= 3)
 			msleep(1);
 	}
-	dev_err(ohci->card.device, "failed to write phy reg\n");
+	ohci_err(ohci, "failed to write phy reg %d, val %u\n", addr, val);
+	dump_stack();
 
 	return -EBUSY;
 }
@@ -690,8 +701,7 @@
 		reg_write(ohci, CONTROL_CLEAR(ctx->regs), CONTEXT_RUN);
 		flush_writes(ohci);
 
-		dev_err(ohci->card.device, "AR error: %s; DMA stopped\n",
-			error_msg);
+		ohci_err(ohci, "AR error: %s; DMA stopped\n", error_msg);
 	}
 	/* FIXME: restart? */
 }
@@ -1157,6 +1167,7 @@
 	ctx->buffer_tail->used += sizeof(*ctx->buffer_tail->buffer);
 	ctx->last = ctx->buffer_tail->buffer;
 	ctx->prev = ctx->buffer_tail->buffer;
+	ctx->prev_z = 1;
 
 	return 0;
 }
@@ -1221,14 +1232,35 @@
 {
 	dma_addr_t d_bus;
 	struct descriptor_buffer *desc = ctx->buffer_tail;
+	struct descriptor *d_branch;
 
 	d_bus = desc->buffer_bus + (d - desc->buffer) * sizeof(*d);
 
 	desc->used += (z + extra) * sizeof(*d);
 
 	wmb(); /* finish init of new descriptors before branch_address update */
-	ctx->prev->branch_address = cpu_to_le32(d_bus | z);
-	ctx->prev = find_branch_descriptor(d, z);
+
+	d_branch = find_branch_descriptor(ctx->prev, ctx->prev_z);
+	d_branch->branch_address = cpu_to_le32(d_bus | z);
+
+	/*
+	 * VT6306 incorrectly checks only the single descriptor at the
+	 * CommandPtr when the wake bit is written, so if it's a
+	 * multi-descriptor block starting with an INPUT_MORE, put a copy of
+	 * the branch address in the first descriptor.
+	 *
+	 * Not doing this for transmit contexts since not sure how it interacts
+	 * with skip addresses.
+	 */
+	if (unlikely(ctx->ohci->quirks & QUIRK_IR_WAKE) &&
+	    d_branch != ctx->prev &&
+	    (ctx->prev->control & cpu_to_le16(DESCRIPTOR_CMD)) ==
+	     cpu_to_le16(DESCRIPTOR_INPUT_MORE)) {
+		ctx->prev->branch_address = cpu_to_le32(d_bus | z);
+	}
+
+	ctx->prev = d;
+	ctx->prev_z = z;
 }
 
 static void context_stop(struct context *ctx)
@@ -1248,7 +1280,7 @@
 		if (i)
 			udelay(10);
 	}
-	dev_err(ohci->card.device, "DMA context still active (0x%08x)\n", reg);
+	ohci_err(ohci, "DMA context still active (0x%08x)\n", reg);
 }
 
 struct driver_data {
@@ -1557,7 +1589,7 @@
 			goto out;
 		}
 
-	dev_err(ohci->card.device, "swap not done (CSR lock timeout)\n");
+	ohci_err(ohci, "swap not done (CSR lock timeout)\n");
 	fw_fill_response(&response, packet->header, RCODE_BUSY, NULL, 0);
 
  out:
@@ -1632,8 +1664,7 @@
 
 	ctl = reg_read(ohci, CONTROL_SET(regs));
 	if (ctl & CONTEXT_DEAD)
-		dev_err(ohci->card.device,
-			"DMA context %s has stopped, error code: %s\n",
+		ohci_err(ohci, "DMA context %s has stopped, error code: %s\n",
 			name, evts[ctl & 0x1f]);
 }
 
@@ -1815,8 +1846,8 @@
 
 	reg = reg_read(ohci, OHCI1394_NodeID);
 	if (!(reg & OHCI1394_NodeID_idValid)) {
-		dev_notice(ohci->card.device,
-			   "node ID not valid, new bus reset in progress\n");
+		ohci_notice(ohci,
+			    "node ID not valid, new bus reset in progress\n");
 		return -EBUSY;
 	}
 	self_id |= ((reg & 0x3f) << 24); /* phy ID */
@@ -1863,12 +1894,12 @@
 
 	reg = reg_read(ohci, OHCI1394_NodeID);
 	if (!(reg & OHCI1394_NodeID_idValid)) {
-		dev_notice(ohci->card.device,
-			   "node ID not valid, new bus reset in progress\n");
+		ohci_notice(ohci,
+			    "node ID not valid, new bus reset in progress\n");
 		return;
 	}
 	if ((reg & OHCI1394_NodeID_nodeNumber) == 63) {
-		dev_notice(ohci->card.device, "malconfigured bus\n");
+		ohci_notice(ohci, "malconfigured bus\n");
 		return;
 	}
 	ohci->node_id = reg & (OHCI1394_NodeID_busNumber |
@@ -1882,7 +1913,7 @@
 
 	reg = reg_read(ohci, OHCI1394_SelfIDCount);
 	if (reg & OHCI1394_SelfIDCount_selfIDError) {
-		dev_notice(ohci->card.device, "inconsistent self IDs\n");
+		ohci_notice(ohci, "self ID receive error\n");
 		return;
 	}
 	/*
@@ -1894,7 +1925,7 @@
 	self_id_count = (reg >> 3) & 0xff;
 
 	if (self_id_count > 252) {
-		dev_notice(ohci->card.device, "inconsistent self IDs\n");
+		ohci_notice(ohci, "bad selfIDSize (%08x)\n", reg);
 		return;
 	}
 
@@ -1902,7 +1933,10 @@
 	rmb();
 
 	for (i = 1, j = 0; j < self_id_count; i += 2, j++) {
-		if (ohci->self_id_cpu[i] != ~ohci->self_id_cpu[i + 1]) {
+		u32 id  = cond_le32_to_cpu(ohci->self_id_cpu[i]);
+		u32 id2 = cond_le32_to_cpu(ohci->self_id_cpu[i + 1]);
+
+		if (id != ~id2) {
 			/*
 			 * If the invalid data looks like a cycle start packet,
 			 * it's likely to be the result of the cycle master
@@ -1910,33 +1944,30 @@
 			 * so far are valid and should be processed so that the
 			 * bus manager can then correct the gap count.
 			 */
-			if (cond_le32_to_cpu(ohci->self_id_cpu[i])
-							== 0xffff008f) {
-				dev_notice(ohci->card.device,
-					   "ignoring spurious self IDs\n");
+			if (id == 0xffff008f) {
+				ohci_notice(ohci, "ignoring spurious self IDs\n");
 				self_id_count = j;
 				break;
-			} else {
-				dev_notice(ohci->card.device,
-					   "inconsistent self IDs\n");
-				return;
 			}
+
+			ohci_notice(ohci, "bad self ID %d/%d (%08x != ~%08x)\n",
+				    j, self_id_count, id, id2);
+			return;
 		}
-		ohci->self_id_buffer[j] =
-				cond_le32_to_cpu(ohci->self_id_cpu[i]);
+		ohci->self_id_buffer[j] = id;
 	}
 
 	if (ohci->quirks & QUIRK_TI_SLLZ059) {
 		self_id_count = find_and_insert_self_id(ohci, self_id_count);
 		if (self_id_count < 0) {
-			dev_notice(ohci->card.device,
-				   "could not construct local self ID\n");
+			ohci_notice(ohci,
+				    "could not construct local self ID\n");
 			return;
 		}
 	}
 
 	if (self_id_count == 0) {
-		dev_notice(ohci->card.device, "inconsistent self IDs\n");
+		ohci_notice(ohci, "no self IDs\n");
 		return;
 	}
 	rmb();
@@ -1957,8 +1988,7 @@
 
 	new_generation = (reg_read(ohci, OHCI1394_SelfIDCount) >> 16) & 0xff;
 	if (new_generation != generation) {
-		dev_notice(ohci->card.device,
-			   "new bus reset, discarding self ids\n");
+		ohci_notice(ohci, "new bus reset, discarding self ids\n");
 		return;
 	}
 
@@ -2096,7 +2126,7 @@
 	}
 
 	if (unlikely(event & OHCI1394_regAccessFail))
-		dev_err(ohci->card.device, "register access failure\n");
+		ohci_err(ohci, "register access failure\n");
 
 	if (unlikely(event & OHCI1394_postedWriteErr)) {
 		reg_read(ohci, OHCI1394_PostedWriteAddressHi);
@@ -2104,13 +2134,12 @@
 		reg_write(ohci, OHCI1394_IntEventClear,
 			  OHCI1394_postedWriteErr);
 		if (printk_ratelimit())
-			dev_err(ohci->card.device, "PCI posted write error\n");
+			ohci_err(ohci, "PCI posted write error\n");
 	}
 
 	if (unlikely(event & OHCI1394_cycleTooLong)) {
 		if (printk_ratelimit())
-			dev_notice(ohci->card.device,
-				   "isochronous cycle too long\n");
+			ohci_notice(ohci, "isochronous cycle too long\n");
 		reg_write(ohci, OHCI1394_LinkControlSet,
 			  OHCI1394_LinkControl_cycleMaster);
 	}
@@ -2123,8 +2152,7 @@
 		 * them at least two cycles later.  (FIXME?)
 		 */
 		if (printk_ratelimit())
-			dev_notice(ohci->card.device,
-				   "isochronous cycle inconsistent\n");
+			ohci_notice(ohci, "isochronous cycle inconsistent\n");
 	}
 
 	if (unlikely(event & OHCI1394_unrecoverableError))
@@ -2246,12 +2274,11 @@
 		       const __be32 *config_rom, size_t length)
 {
 	struct fw_ohci *ohci = fw_ohci(card);
-	struct pci_dev *dev = to_pci_dev(card->device);
 	u32 lps, version, irqs;
 	int i, ret;
 
 	if (software_reset(ohci)) {
-		dev_err(card->device, "failed to reset ohci card\n");
+		ohci_err(ohci, "failed to reset ohci card\n");
 		return -EBUSY;
 	}
 
@@ -2262,20 +2289,31 @@
 	 * will lock up the machine.  Wait 50msec to make sure we have
 	 * full link enabled.  However, with some cards (well, at least
 	 * a JMicron PCIe card), we have to try again sometimes.
+	 *
+	 * TI TSB82AA2 + TSB81BA3(A) cards signal LPS enabled early but
+	 * cannot actually use the phy at that time.  These need tens of
+	 * millisecods pause between LPS write and first phy access too.
+	 *
+	 * But do not wait for 50msec on Agere/LSI cards.  Their phy
+	 * arbitration state machine may time out during such a long wait.
 	 */
+
 	reg_write(ohci, OHCI1394_HCControlSet,
 		  OHCI1394_HCControl_LPS |
 		  OHCI1394_HCControl_postedWriteEnable);
 	flush_writes(ohci);
 
-	for (lps = 0, i = 0; !lps && i < 3; i++) {
+	if (!(ohci->quirks & QUIRK_PHY_LCTRL_TIMEOUT))
 		msleep(50);
+
+	for (lps = 0, i = 0; !lps && i < 150; i++) {
+		msleep(1);
 		lps = reg_read(ohci, OHCI1394_HCControlSet) &
 		      OHCI1394_HCControl_LPS;
 	}
 
 	if (!lps) {
-		dev_err(card->device, "failed to set Link Power Status\n");
+		ohci_err(ohci, "failed to set Link Power Status\n");
 		return -EIO;
 	}
 
@@ -2284,7 +2322,7 @@
 		if (ret < 0)
 			return ret;
 		if (ret)
-			dev_notice(card->device, "local TSB41BA3D phy\n");
+			ohci_notice(ohci, "local TSB41BA3D phy\n");
 		else
 			ohci->quirks &= ~QUIRK_TI_SLLZ059;
 	}
@@ -2382,24 +2420,6 @@
 
 	reg_write(ohci, OHCI1394_AsReqFilterHiSet, 0x80000000);
 
-	if (!(ohci->quirks & QUIRK_NO_MSI))
-		pci_enable_msi(dev);
-	if (request_irq(dev->irq, irq_handler,
-			pci_dev_msi_enabled(dev) ? 0 : IRQF_SHARED,
-			ohci_driver_name, ohci)) {
-		dev_err(card->device, "failed to allocate interrupt %d\n",
-			dev->irq);
-		pci_disable_msi(dev);
-
-		if (config_rom) {
-			dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE,
-					  ohci->next_config_rom,
-					  ohci->next_config_rom_bus);
-			ohci->next_config_rom = NULL;
-		}
-		return -EIO;
-	}
-
 	irqs =	OHCI1394_reqTxComplete | OHCI1394_respTxComplete |
 		OHCI1394_RQPkt | OHCI1394_RSPkt |
 		OHCI1394_isochTx | OHCI1394_isochRx |
@@ -3578,20 +3598,20 @@
 
 	if (!(pci_resource_flags(dev, 0) & IORESOURCE_MEM) ||
 	    pci_resource_len(dev, 0) < OHCI1394_REGISTER_SIZE) {
-		dev_err(&dev->dev, "invalid MMIO resource\n");
+		ohci_err(ohci, "invalid MMIO resource\n");
 		err = -ENXIO;
 		goto fail_disable;
 	}
 
 	err = pci_request_region(dev, 0, ohci_driver_name);
 	if (err) {
-		dev_err(&dev->dev, "MMIO resource unavailable\n");
+		ohci_err(ohci, "MMIO resource unavailable\n");
 		goto fail_disable;
 	}
 
 	ohci->registers = pci_iomap(dev, 0, OHCI1394_REGISTER_SIZE);
 	if (ohci->registers == NULL) {
-		dev_err(&dev->dev, "failed to remap registers\n");
+		ohci_err(ohci, "failed to remap registers\n");
 		err = -ENXIO;
 		goto fail_iomem;
 	}
@@ -3675,19 +3695,33 @@
 	guid = ((u64) reg_read(ohci, OHCI1394_GUIDHi) << 32) |
 		reg_read(ohci, OHCI1394_GUIDLo);
 
+	if (!(ohci->quirks & QUIRK_NO_MSI))
+		pci_enable_msi(dev);
+	if (request_irq(dev->irq, irq_handler,
+			pci_dev_msi_enabled(dev) ? 0 : IRQF_SHARED,
+			ohci_driver_name, ohci)) {
+		ohci_err(ohci, "failed to allocate interrupt %d\n", dev->irq);
+		err = -EIO;
+		goto fail_msi;
+	}
+
 	err = fw_card_add(&ohci->card, max_receive, link_speed, guid);
 	if (err)
-		goto fail_contexts;
+		goto fail_irq;
 
 	version = reg_read(ohci, OHCI1394_Version) & 0x00ff00ff;
-	dev_notice(&dev->dev,
-		  "added OHCI v%x.%x device as card %d, "
-		  "%d IR + %d IT contexts, quirks 0x%x\n",
-		  version >> 16, version & 0xff, ohci->card.index,
-		  ohci->n_ir, ohci->n_it, ohci->quirks);
+	ohci_notice(ohci,
+		    "added OHCI v%x.%x device as card %d, "
+		    "%d IR + %d IT contexts, quirks 0x%x\n",
+		    version >> 16, version & 0xff, ohci->card.index,
+		    ohci->n_ir, ohci->n_it, ohci->quirks);
 
 	return 0;
 
+ fail_irq:
+	free_irq(dev->irq, ohci);
+ fail_msi:
+	pci_disable_msi(dev);
  fail_contexts:
 	kfree(ohci->ir_context_list);
 	kfree(ohci->it_context_list);
@@ -3711,19 +3745,21 @@
 	kfree(ohci);
 	pmac_ohci_off(dev);
  fail:
-	if (err == -ENOMEM)
-		dev_err(&dev->dev, "out of memory\n");
-
 	return err;
 }
 
 static void pci_remove(struct pci_dev *dev)
 {
-	struct fw_ohci *ohci;
+	struct fw_ohci *ohci = pci_get_drvdata(dev);
 
-	ohci = pci_get_drvdata(dev);
-	reg_write(ohci, OHCI1394_IntMaskClear, ~0);
-	flush_writes(ohci);
+	/*
+	 * If the removal is happening from the suspend state, LPS won't be
+	 * enabled and host registers (eg., IntMaskClear) won't be accessible.
+	 */
+	if (reg_read(ohci, OHCI1394_HCControlSet) & OHCI1394_HCControl_LPS) {
+		reg_write(ohci, OHCI1394_IntMaskClear, ~0);
+		flush_writes(ohci);
+	}
 	cancel_work_sync(&ohci->bus_reset_work);
 	fw_core_remove_card(&ohci->card);
 
@@ -3766,16 +3802,14 @@
 	int err;
 
 	software_reset(ohci);
-	free_irq(dev->irq, ohci);
-	pci_disable_msi(dev);
 	err = pci_save_state(dev);
 	if (err) {
-		dev_err(&dev->dev, "pci_save_state failed\n");
+		ohci_err(ohci, "pci_save_state failed\n");
 		return err;
 	}
 	err = pci_set_power_state(dev, pci_choose_state(dev, state));
 	if (err)
-		dev_err(&dev->dev, "pci_set_power_state failed with %d\n", err);
+		ohci_err(ohci, "pci_set_power_state failed with %d\n", err);
 	pmac_ohci_off(dev);
 
 	return 0;
@@ -3791,7 +3825,7 @@
 	pci_restore_state(dev);
 	err = pci_enable_device(dev);
 	if (err) {
-		dev_err(&dev->dev, "pci_enable_device failed\n");
+		ohci_err(ohci, "pci_enable_device failed\n");
 		return err;
 	}
 
@@ -3837,6 +3871,4 @@
 MODULE_LICENSE("GPL");
 
 /* Provide a module alias so root-on-sbp2 initrds don't break. */
-#ifndef CONFIG_IEEE1394_OHCI1394_MODULE
 MODULE_ALIAS("ohci1394");
-#endif
diff --git a/drivers/firewire/sbp2.c b/drivers/firewire/sbp2.c
index 1162d6b..47674b9 100644
--- a/drivers/firewire/sbp2.c
+++ b/drivers/firewire/sbp2.c
@@ -1144,8 +1144,8 @@
 		return -ENODEV;
 
 	if (dma_get_max_seg_size(device->card->device) > SBP2_MAX_SEG_SIZE)
-		BUG_ON(dma_set_max_seg_size(device->card->device,
-					    SBP2_MAX_SEG_SIZE));
+		WARN_ON(dma_set_max_seg_size(device->card->device,
+					     SBP2_MAX_SEG_SIZE));
 
 	shost = scsi_host_alloc(&scsi_driver_template, sizeof(*tgt));
 	if (shost == NULL)
@@ -1475,10 +1475,8 @@
 	}
 
 	orb = kzalloc(sizeof(*orb), GFP_ATOMIC);
-	if (orb == NULL) {
-		dev_notice(lu_dev(lu), "failed to alloc ORB\n");
+	if (orb == NULL)
 		return SCSI_MLQUEUE_HOST_BUSY;
-	}
 
 	/* Initialize rcode to something not RCODE_COMPLETE. */
 	orb->base.rcode = -1;
@@ -1636,9 +1634,7 @@
 MODULE_DEVICE_TABLE(ieee1394, sbp2_id_table);
 
 /* Provide a module alias so root-on-sbp2 initrds don't break. */
-#ifndef CONFIG_IEEE1394_SBP2_MODULE
 MODULE_ALIAS("sbp2");
-#endif
 
 static int __init sbp2_init(void)
 {
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index c22eed9..87d5670 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -38,7 +38,6 @@
 menuconfig GPIOLIB
 	bool "GPIO Support"
 	depends on ARCH_WANT_OPTIONAL_GPIOLIB || ARCH_REQUIRE_GPIOLIB
-	select GENERIC_GPIO
 	help
 	  This enables GPIO support through the generic GPIO library.
 	  You only need to enable this, if you also want to enable
diff --git a/drivers/gpio/gpio-lpc32xx.c b/drivers/gpio/gpio-lpc32xx.c
index dda6a75..90a80eb 100644
--- a/drivers/gpio/gpio-lpc32xx.c
+++ b/drivers/gpio/gpio-lpc32xx.c
@@ -255,7 +255,7 @@
 }
 
 /*
- * GENERIC_GPIO primitives.
+ * GPIO primitives.
  */
 static int lpc32xx_gpio_dir_input_p012(struct gpio_chip *chip,
 	unsigned pin)
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 6961bbe..264f550 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -1685,6 +1685,7 @@
 	{ HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_NAVIGATION_CONTROLLER) },
 	{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS3_CONTROLLER) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_VAIO_VGX_MOUSE) },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_VAIO_VGP_MOUSE) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_STEELSERIES, USB_DEVICE_ID_STEELSERIES_SRWS1) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_SUNPLUS, USB_DEVICE_ID_SUNPLUS_WDESKTOP) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_THINGM, USB_DEVICE_ID_BLINK1) },
@@ -2341,7 +2342,7 @@
 
 	init_waitqueue_head(&hdev->debug_wait);
 	INIT_LIST_HEAD(&hdev->debug_list);
-	mutex_init(&hdev->debug_list_lock);
+	spin_lock_init(&hdev->debug_list_lock);
 	sema_init(&hdev->driver_lock, 1);
 	sema_init(&hdev->driver_input_lock, 1);
 
diff --git a/drivers/hid/hid-debug.c b/drivers/hid/hid-debug.c
index 7e56cb3..8453214 100644
--- a/drivers/hid/hid-debug.c
+++ b/drivers/hid/hid-debug.c
@@ -579,15 +579,16 @@
 {
 	int i;
 	struct hid_debug_list *list;
+	unsigned long flags;
 
-	mutex_lock(&hdev->debug_list_lock);
+	spin_lock_irqsave(&hdev->debug_list_lock, flags);
 	list_for_each_entry(list, &hdev->debug_list, node) {
 		for (i = 0; i < strlen(buf); i++)
 			list->hid_debug_buf[(list->tail + i) % HID_DEBUG_BUFSIZE] =
 				buf[i];
 		list->tail = (list->tail + i) % HID_DEBUG_BUFSIZE;
         }
-	mutex_unlock(&hdev->debug_list_lock);
+	spin_unlock_irqrestore(&hdev->debug_list_lock, flags);
 
 	wake_up_interruptible(&hdev->debug_wait);
 }
@@ -977,6 +978,7 @@
 {
 	int err = 0;
 	struct hid_debug_list *list;
+	unsigned long flags;
 
 	if (!(list = kzalloc(sizeof(struct hid_debug_list), GFP_KERNEL))) {
 		err = -ENOMEM;
@@ -992,9 +994,9 @@
 	file->private_data = list;
 	mutex_init(&list->read_mutex);
 
-	mutex_lock(&list->hdev->debug_list_lock);
+	spin_lock_irqsave(&list->hdev->debug_list_lock, flags);
 	list_add_tail(&list->node, &list->hdev->debug_list);
-	mutex_unlock(&list->hdev->debug_list_lock);
+	spin_unlock_irqrestore(&list->hdev->debug_list_lock, flags);
 
 out:
 	return err;
@@ -1088,10 +1090,11 @@
 static int hid_debug_events_release(struct inode *inode, struct file *file)
 {
 	struct hid_debug_list *list = file->private_data;
+	unsigned long flags;
 
-	mutex_lock(&list->hdev->debug_list_lock);
+	spin_lock_irqsave(&list->hdev->debug_list_lock, flags);
 	list_del(&list->node);
-	mutex_unlock(&list->hdev->debug_list_lock);
+	spin_unlock_irqrestore(&list->hdev->debug_list_lock, flags);
 	kfree(list->hid_debug_buf);
 	kfree(list);
 
diff --git a/drivers/hid/hid-steelseries.c b/drivers/hid/hid-steelseries.c
index 9b0efb0..d164911 100644
--- a/drivers/hid/hid-steelseries.c
+++ b/drivers/hid/hid-steelseries.c
@@ -18,7 +18,8 @@
 
 #include "hid-ids.h"
 
-#if defined(CONFIG_LEDS_CLASS) || defined(CONFIG_LEDS_CLASS_MODULE)
+#if IS_BUILTIN(CONFIG_LEDS_CLASS) || \
+    (IS_MODULE(CONFIG_LEDS_CLASS) && IS_MODULE(CONFIG_HID_STEELSERIES))
 #define SRWS1_NUMBER_LEDS 15
 struct steelseries_srws1_data {
 	__u16 led_state;
@@ -107,7 +108,8 @@
 0xC0                /*  End Collection                      */
 };
 
-#if defined(CONFIG_LEDS_CLASS) || defined(CONFIG_LEDS_CLASS_MODULE)
+#if IS_BUILTIN(CONFIG_LEDS_CLASS) || \
+    (IS_MODULE(CONFIG_LEDS_CLASS) && IS_MODULE(CONFIG_HID_STEELSERIES))
 static void steelseries_srws1_set_leds(struct hid_device *hdev, __u16 leds)
 {
 	struct list_head *report_list = &hdev->report_enum[HID_OUTPUT_REPORT].report_list;
@@ -370,7 +372,8 @@
 static struct hid_driver steelseries_srws1_driver = {
 	.name = "steelseries_srws1",
 	.id_table = steelseries_srws1_devices,
-#if defined(CONFIG_LEDS_CLASS) || defined(CONFIG_LEDS_CLASS_MODULE)
+#if IS_BUILTIN(CONFIG_LEDS_CLASS) || \
+    (IS_MODULE(CONFIG_LEDS_CLASS) && IS_MODULE(CONFIG_HID_STEELSERIES))
 	.probe = steelseries_srws1_probe,
 	.remove = steelseries_srws1_remove,
 #endif
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index adfee98..631736e 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -363,7 +363,7 @@
 
 config I2C_CBUS_GPIO
 	tristate "CBUS I2C driver"
-	depends on GENERIC_GPIO
+	depends on GPIOLIB
 	help
 	  Support for CBUS access using I2C API. Mostly relevant for Nokia
 	  Internet Tablets (770, N800 and N810).
@@ -436,7 +436,7 @@
 
 config I2C_GPIO
 	tristate "GPIO-based bitbanging I2C"
-	depends on GENERIC_GPIO
+	depends on GPIOLIB
 	select I2C_ALGOBIT
 	help
 	  This is a very simple bitbanging I2C driver utilizing the
diff --git a/drivers/i2c/muxes/Kconfig b/drivers/i2c/muxes/Kconfig
index 5faf244..f7f9865 100644
--- a/drivers/i2c/muxes/Kconfig
+++ b/drivers/i2c/muxes/Kconfig
@@ -7,7 +7,7 @@
 
 config I2C_ARB_GPIO_CHALLENGE
 	tristate "GPIO-based I2C arbitration"
-	depends on GENERIC_GPIO && OF
+	depends on GPIOLIB && OF
 	help
 	  If you say yes to this option, support will be included for an
 	  I2C multimaster arbitration scheme using GPIOs and a challenge &
@@ -19,7 +19,7 @@
 
 config I2C_MUX_GPIO
 	tristate "GPIO-based I2C multiplexer"
-	depends on GENERIC_GPIO
+	depends on GPIOLIB
 	help
 	  If you say yes to this option, support will be included for a
 	  GPIO based I2C multiplexer. This driver provides access to
diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
index b231139..2ff6204 100644
--- a/drivers/ide/ide-cd.c
+++ b/drivers/ide/ide-cd.c
@@ -1606,7 +1606,7 @@
 	return rc;
 }
 
-static int idecd_release(struct gendisk *disk, fmode_t mode)
+static void idecd_release(struct gendisk *disk, fmode_t mode)
 {
 	struct cdrom_info *info = ide_drv_g(disk, cdrom_info);
 
@@ -1615,8 +1615,6 @@
 
 	ide_cd_put(info);
 	mutex_unlock(&ide_cd_mutex);
-
-	return 0;
 }
 
 static int idecd_set_spindown(struct cdrom_device_info *cdi, unsigned long arg)
diff --git a/drivers/ide/ide-gd.c b/drivers/ide/ide-gd.c
index 70ea876..de86631 100644
--- a/drivers/ide/ide-gd.c
+++ b/drivers/ide/ide-gd.c
@@ -250,7 +250,7 @@
 }
 
 
-static int ide_gd_release(struct gendisk *disk, fmode_t mode)
+static void ide_gd_release(struct gendisk *disk, fmode_t mode)
 {
 	struct ide_disk_obj *idkp = ide_drv_g(disk, ide_disk_obj);
 	ide_drive_t *drive = idkp->drive;
@@ -270,8 +270,6 @@
 
 	ide_disk_put(idkp);
 	mutex_unlock(&ide_gd_mutex);
-
-	return 0;
 }
 
 static int ide_gd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
diff --git a/drivers/ide/ide-tape.c b/drivers/ide/ide-tape.c
index 89f8595..c6c574b 100644
--- a/drivers/ide/ide-tape.c
+++ b/drivers/ide/ide-tape.c
@@ -1918,15 +1918,13 @@
 	return 0;
 }
 
-static int idetape_release(struct gendisk *disk, fmode_t mode)
+static void idetape_release(struct gendisk *disk, fmode_t mode)
 {
 	struct ide_tape_obj *tape = ide_drv_g(disk, ide_tape_obj);
 
 	mutex_lock(&ide_tape_mutex);
 	ide_tape_put(tape);
 	mutex_unlock(&ide_tape_mutex);
-
-	return 0;
 }
 
 static int idetape_ioctl(struct block_device *bdev, fmode_t mode,
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index 0e8fab1..fa6964d 100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -273,6 +273,27 @@
 		.target_residency = 500,
 		.enter = &intel_idle },
 	{
+		.name = "C8-HSW",
+		.desc = "MWAIT 0x40",
+		.flags = MWAIT2flg(0x40) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
+		.exit_latency = 300,
+		.target_residency = 900,
+		.enter = &intel_idle },
+	{
+		.name = "C9-HSW",
+		.desc = "MWAIT 0x50",
+		.flags = MWAIT2flg(0x50) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
+		.exit_latency = 600,
+		.target_residency = 1800,
+		.enter = &intel_idle },
+	{
+		.name = "C10-HSW",
+		.desc = "MWAIT 0x60",
+		.flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
+		.exit_latency = 2600,
+		.target_residency = 7700,
+		.enter = &intel_idle },
+	{
 		.enter = NULL }
 };
 
diff --git a/drivers/infiniband/core/iwcm.c b/drivers/infiniband/core/iwcm.c
index 0bb99bb..c47c203 100644
--- a/drivers/infiniband/core/iwcm.c
+++ b/drivers/infiniband/core/iwcm.c
@@ -878,6 +878,8 @@
 			}
 			return;
 		}
+		if (empty)
+			return;
 		spin_lock_irqsave(&cm_id_priv->lock, flags);
 	}
 	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
index a8fdd33..22192de 100644
--- a/drivers/infiniband/core/verbs.c
+++ b/drivers/infiniband/core/verbs.c
@@ -348,7 +348,8 @@
 	struct ib_qp *qp = context;
 
 	list_for_each_entry(event->element.qp, &qp->open_list, open_list)
-		event->element.qp->event_handler(event, event->element.qp->qp_context);
+		if (event->element.qp->event_handler)
+			event->element.qp->event_handler(event, event->element.qp->qp_context);
 }
 
 static void __ib_insert_xrcd_qp(struct ib_xrcd *xrcd, struct ib_qp *qp)
diff --git a/drivers/infiniband/hw/cxgb3/cxio_resource.c b/drivers/infiniband/hw/cxgb3/cxio_resource.c
index 31f9201..c40088e 100644
--- a/drivers/infiniband/hw/cxgb3/cxio_resource.c
+++ b/drivers/infiniband/hw/cxgb3/cxio_resource.c
@@ -62,13 +62,13 @@
 		kfifo_in(fifo, (unsigned char *) &entry, sizeof(u32));
 	if (random) {
 		j = 0;
-		random_bytes = random32();
+		random_bytes = prandom_u32();
 		for (i = 0; i < RANDOM_SIZE; i++)
 			rarray[i] = i + skip_low;
 		for (i = skip_low + RANDOM_SIZE; i < nr - skip_high; i++) {
 			if (j >= RANDOM_SIZE) {
 				j = 0;
-				random_bytes = random32();
+				random_bytes = prandom_u32();
 			}
 			idx = (random_bytes >> (j * 2)) & 0xF;
 			kfifo_in(fifo,
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c
index 9c12da0..e87f220 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_provider.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c
@@ -559,7 +559,7 @@
 	__be64 *page_list = NULL;
 	int shift = 0;
 	u64 total_size;
-	int npages;
+	int npages = 0;
 	int ret;
 
 	PDBG("%s ib_mr %p ib_pd %p\n", __func__, mr, pd);
diff --git a/drivers/infiniband/hw/cxgb4/id_table.c b/drivers/infiniband/hw/cxgb4/id_table.c
index f95e5df..0161ae6 100644
--- a/drivers/infiniband/hw/cxgb4/id_table.c
+++ b/drivers/infiniband/hw/cxgb4/id_table.c
@@ -54,7 +54,7 @@
 
 	if (obj < alloc->max) {
 		if (alloc->flags & C4IW_ID_TABLE_F_RANDOM)
-			alloc->last += random32() % RANDOM_SKIP;
+			alloc->last += prandom_u32() % RANDOM_SKIP;
 		else
 			alloc->last = obj + 1;
 		if (alloc->last >= alloc->max)
@@ -88,7 +88,7 @@
 	alloc->start = start;
 	alloc->flags = flags;
 	if (flags & C4IW_ID_TABLE_F_RANDOM)
-		alloc->last = random32() % RANDOM_SKIP;
+		alloc->last = prandom_u32() % RANDOM_SKIP;
 	else
 		alloc->last = 0;
 	alloc->max  = num;
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index 5b059e2..2320404 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -111,6 +111,16 @@
 	return 0;
 }
 
+static int alloc_sq(struct c4iw_rdev *rdev, struct t4_sq *sq, int user)
+{
+	int ret = -ENOSYS;
+	if (user)
+		ret = alloc_oc_sq(rdev, sq);
+	if (ret)
+		ret = alloc_host_sq(rdev, sq);
+	return ret;
+}
+
 static int destroy_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
 		      struct c4iw_dev_ucontext *uctx)
 {
@@ -179,15 +189,9 @@
 		goto free_sw_rq;
 	}
 
-	if (user) {
-		if (alloc_oc_sq(rdev, &wq->sq) && alloc_host_sq(rdev, &wq->sq))
-			goto free_hwaddr;
-	} else {
-		ret = alloc_host_sq(rdev, &wq->sq);
-		if (ret)
-			goto free_hwaddr;
-	}
-
+	ret = alloc_sq(rdev, &wq->sq, user);
+	if (ret)
+		goto free_hwaddr;
 	memset(wq->sq.queue, 0, wq->sq.memsize);
 	dma_unmap_addr_set(&wq->sq, mapping, wq->sq.dma_addr);
 
diff --git a/drivers/infiniband/hw/ipath/ipath_file_ops.c b/drivers/infiniband/hw/ipath/ipath_file_ops.c
index aed8afe..6d7f453 100644
--- a/drivers/infiniband/hw/ipath/ipath_file_ops.c
+++ b/drivers/infiniband/hw/ipath/ipath_file_ops.c
@@ -40,6 +40,7 @@
 #include <linux/slab.h>
 #include <linux/highmem.h>
 #include <linux/io.h>
+#include <linux/aio.h>
 #include <linux/jiffies.h>
 #include <linux/cpu.h>
 #include <asm/pgtable.h>
diff --git a/drivers/infiniband/hw/ipath/ipath_verbs.c b/drivers/infiniband/hw/ipath/ipath_verbs.c
index ea93870..44ea939 100644
--- a/drivers/infiniband/hw/ipath/ipath_verbs.c
+++ b/drivers/infiniband/hw/ipath/ipath_verbs.c
@@ -2187,7 +2187,8 @@
 	if (ret)
 		goto err_reg;
 
-	if (ipath_verbs_register_sysfs(dev))
+	ret = ipath_verbs_register_sysfs(dev);
+	if (ret)
 		goto err_class;
 
 	enable_timer(dd);
@@ -2327,15 +2328,15 @@
 	int i;
 	int ret;
 
-	for (i = 0; i < ARRAY_SIZE(ipath_class_attributes); ++i)
-		if (device_create_file(&dev->dev,
-				       ipath_class_attributes[i])) {
-			ret = 1;
+	for (i = 0; i < ARRAY_SIZE(ipath_class_attributes); ++i) {
+		ret = device_create_file(&dev->dev,
+				       ipath_class_attributes[i]);
+		if (ret)
 			goto bail;
-		}
-
-	ret = 0;
-
+	}
+	return 0;
 bail:
+	for (i = 0; i < ARRAY_SIZE(ipath_class_attributes); ++i)
+		device_remove_file(&dev->dev, ipath_class_attributes[i]);
 	return ret;
 }
diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c
index 73b3a71..d5e60f4 100644
--- a/drivers/infiniband/hw/mlx4/cq.c
+++ b/drivers/infiniband/hw/mlx4/cq.c
@@ -33,6 +33,7 @@
 
 #include <linux/mlx4/cq.h>
 #include <linux/mlx4/qp.h>
+#include <linux/mlx4/srq.h>
 #include <linux/slab.h>
 
 #include "mlx4_ib.h"
@@ -585,6 +586,7 @@
 	struct mlx4_qp *mqp;
 	struct mlx4_ib_wq *wq;
 	struct mlx4_ib_srq *srq;
+	struct mlx4_srq *msrq = NULL;
 	int is_send;
 	int is_error;
 	u32 g_mlpath_rqpn;
@@ -653,6 +655,20 @@
 
 	wc->qp = &(*cur_qp)->ibqp;
 
+	if (wc->qp->qp_type == IB_QPT_XRC_TGT) {
+		u32 srq_num;
+		g_mlpath_rqpn = be32_to_cpu(cqe->g_mlpath_rqpn);
+		srq_num       = g_mlpath_rqpn & 0xffffff;
+		/* SRQ is also in the radix tree */
+		msrq = mlx4_srq_lookup(to_mdev(cq->ibcq.device)->dev,
+				       srq_num);
+		if (unlikely(!msrq)) {
+			pr_warn("CQ %06x with entry for unknown SRQN %06x\n",
+				cq->mcq.cqn, srq_num);
+			return -EINVAL;
+		}
+	}
+
 	if (is_send) {
 		wq = &(*cur_qp)->sq;
 		if (!(*cur_qp)->sq_signal_bits) {
@@ -666,6 +682,11 @@
 		wqe_ctr = be16_to_cpu(cqe->wqe_index);
 		wc->wr_id = srq->wrid[wqe_ctr];
 		mlx4_ib_free_srq_wqe(srq, wqe_ctr);
+	} else if (msrq) {
+		srq = to_mibsrq(msrq);
+		wqe_ctr = be16_to_cpu(cqe->wqe_index);
+		wc->wr_id = srq->wrid[wqe_ctr];
+		mlx4_ib_free_srq_wqe(srq, wqe_ctr);
 	} else {
 		wq	  = &(*cur_qp)->rq;
 		tail	  = wq->tail & (wq->wqe_cnt - 1);
diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
index 934792c..4d599ce 100644
--- a/drivers/infiniband/hw/mlx4/mad.c
+++ b/drivers/infiniband/hw/mlx4/mad.c
@@ -93,7 +93,7 @@
 __be64 mlx4_ib_gen_node_guid(void)
 {
 #define NODE_GUID_HI	((u64) (((u64)IB_OPENIB_OUI) << 40))
-	return cpu_to_be64(NODE_GUID_HI | random32());
+	return cpu_to_be64(NODE_GUID_HI | prandom_u32());
 }
 
 __be64 mlx4_ib_get_new_demux_tid(struct mlx4_ib_demux_ctx *ctx)
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index 35cced2..4f10af2 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -1292,6 +1292,8 @@
 	if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
 		context->sq_size_stride |= !!qp->sq_no_prefetch << 7;
 		context->xrcd = cpu_to_be32((u32) qp->xrcdn);
+		if (ibqp->qp_type == IB_QPT_RAW_PACKET)
+			context->param3 |= cpu_to_be32(1 << 30);
 	}
 
 	if (qp->ibqp.uobject)
@@ -1458,6 +1460,10 @@
 		}
 	}
 
+	if (qp->ibqp.qp_type == IB_QPT_RAW_PACKET)
+		context->pri_path.ackto = (context->pri_path.ackto & 0xf8) |
+					MLX4_IB_LINK_TYPE_ETH;
+
 	if (cur_state == IB_QPS_RTS && new_state == IB_QPS_SQD	&&
 	    attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY && attr->en_sqd_async_notify)
 		sqd_event = 1;
diff --git a/drivers/infiniband/hw/qib/qib_file_ops.c b/drivers/infiniband/hw/qib/qib_file_ops.c
index 4f7aa30..b56c942 100644
--- a/drivers/infiniband/hw/qib/qib_file_ops.c
+++ b/drivers/infiniband/hw/qib/qib_file_ops.c
@@ -39,7 +39,7 @@
 #include <linux/vmalloc.h>
 #include <linux/highmem.h>
 #include <linux/io.h>
-#include <linux/uio.h>
+#include <linux/aio.h>
 #include <linux/jiffies.h>
 #include <asm/pgtable.h>
 #include <linux/delay.h>
diff --git a/drivers/infiniband/hw/qib/qib_sysfs.c b/drivers/infiniband/hw/qib/qib_sysfs.c
index 034cc82..3c8e4e3 100644
--- a/drivers/infiniband/hw/qib/qib_sysfs.c
+++ b/drivers/infiniband/hw/qib/qib_sysfs.c
@@ -808,10 +808,14 @@
 	for (i = 0; i < ARRAY_SIZE(qib_attributes); ++i) {
 		ret = device_create_file(&dev->dev, qib_attributes[i]);
 		if (ret)
-			return ret;
+			goto bail;
 	}
 
 	return 0;
+bail:
+	for (i = 0; i < ARRAY_SIZE(qib_attributes); ++i)
+		device_remove_file(&dev->dev, qib_attributes[i]);
+	return ret;
 }
 
 /*
diff --git a/drivers/infiniband/hw/qib/qib_verbs.c b/drivers/infiniband/hw/qib/qib_verbs.c
index 7c0ab16..904c384 100644
--- a/drivers/infiniband/hw/qib/qib_verbs.c
+++ b/drivers/infiniband/hw/qib/qib_verbs.c
@@ -2234,7 +2234,8 @@
 	if (ret)
 		goto err_agents;
 
-	if (qib_verbs_register_sysfs(dd))
+	ret = qib_verbs_register_sysfs(dd);
+	if (ret)
 		goto err_class;
 
 	goto bail;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
index 1ef880d..3eceb61 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
@@ -460,7 +460,7 @@
 		goto err_qp;
 	}
 
-	psn = random32() & 0xffffff;
+	psn = prandom_u32() & 0xffffff;
 	ret = ipoib_cm_modify_rx_qp(dev, cm_id, p->qp, psn);
 	if (ret)
 		goto err_modify;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 554b906..b6e049a 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -830,7 +830,7 @@
 	 */
 	memcpy(cb->hwaddr, daddr, INFINIBAND_ALEN);
 
-	return 0;
+	return sizeof *header;
 }
 
 static void ipoib_set_mcast_list(struct net_device *dev)
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
index 0ab8c9c..f19b099 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
@@ -82,10 +82,10 @@
 
 int iser_debug_level = 0;
 
-MODULE_DESCRIPTION("iSER (iSCSI Extensions for RDMA) Datamover "
-		   "v" DRV_VER " (" DRV_DATE ")");
+MODULE_DESCRIPTION("iSER (iSCSI Extensions for RDMA) Datamover");
 MODULE_LICENSE("Dual BSD/GPL");
 MODULE_AUTHOR("Alex Nezhinsky, Dan Bar Dov, Or Gerlitz");
+MODULE_VERSION(DRV_VER);
 
 module_param_named(debug_level, iser_debug_level, int, 0644);
 MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0 (default:disabled)");
@@ -370,8 +370,8 @@
 	/* binds the iSER connection retrieved from the previously
 	 * connected ep_handle to the iSCSI layer connection. exchanges
 	 * connection pointers */
-	iser_err("binding iscsi/iser conn %p %p to ib_conn %p\n",
-					conn, conn->dd_data, ib_conn);
+	iser_info("binding iscsi/iser conn %p %p to ib_conn %p\n",
+		  conn, conn->dd_data, ib_conn);
 	iser_conn = conn->dd_data;
 	ib_conn->iser_conn = iser_conn;
 	iser_conn->ib_conn  = ib_conn;
@@ -475,28 +475,28 @@
 	case ISCSI_PARAM_HDRDGST_EN:
 		sscanf(buf, "%d", &value);
 		if (value) {
-			printk(KERN_ERR "DataDigest wasn't negotiated to None");
+			iser_err("DataDigest wasn't negotiated to None");
 			return -EPROTO;
 		}
 		break;
 	case ISCSI_PARAM_DATADGST_EN:
 		sscanf(buf, "%d", &value);
 		if (value) {
-			printk(KERN_ERR "DataDigest wasn't negotiated to None");
+			iser_err("DataDigest wasn't negotiated to None");
 			return -EPROTO;
 		}
 		break;
 	case ISCSI_PARAM_IFMARKER_EN:
 		sscanf(buf, "%d", &value);
 		if (value) {
-			printk(KERN_ERR "IFMarker wasn't negotiated to No");
+			iser_err("IFMarker wasn't negotiated to No");
 			return -EPROTO;
 		}
 		break;
 	case ISCSI_PARAM_OFMARKER_EN:
 		sscanf(buf, "%d", &value);
 		if (value) {
-			printk(KERN_ERR "OFMarker wasn't negotiated to No");
+			iser_err("OFMarker wasn't negotiated to No");
 			return -EPROTO;
 		}
 		break;
@@ -596,7 +596,7 @@
 	     ib_conn->state == ISER_CONN_DOWN))
 		rc = -1;
 
-	iser_err("ib conn %p rc = %d\n", ib_conn, rc);
+	iser_info("ib conn %p rc = %d\n", ib_conn, rc);
 
 	if (rc > 0)
 		return 1; /* success, this is the equivalent of POLLOUT */
@@ -623,7 +623,7 @@
 		iscsi_suspend_tx(ib_conn->iser_conn->iscsi_conn);
 
 
-	iser_err("ib conn %p state %d\n",ib_conn, ib_conn->state);
+	iser_info("ib conn %p state %d\n", ib_conn, ib_conn->state);
 	iser_conn_terminate(ib_conn);
 }
 
@@ -682,7 +682,7 @@
 
 static struct scsi_host_template iscsi_iser_sht = {
 	.module                 = THIS_MODULE,
-	.name                   = "iSCSI Initiator over iSER, v." DRV_VER,
+	.name                   = "iSCSI Initiator over iSER",
 	.queuecommand           = iscsi_queuecommand,
 	.change_queue_depth	= iscsi_change_queue_depth,
 	.sg_tablesize           = ISCSI_ISER_SG_TABLESIZE,
@@ -740,7 +740,7 @@
 	iser_dbg("Starting iSER datamover...\n");
 
 	if (iscsi_max_lun < 1) {
-		printk(KERN_ERR "Invalid max_lun value of %u\n", iscsi_max_lun);
+		iser_err("Invalid max_lun value of %u\n", iscsi_max_lun);
 		return -EINVAL;
 	}
 
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h
index 5babdb3..06f578c 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.h
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.h
@@ -42,6 +42,7 @@
 
 #include <linux/types.h>
 #include <linux/net.h>
+#include <linux/printk.h>
 #include <scsi/libiscsi.h>
 #include <scsi/scsi_transport_iscsi.h>
 
@@ -65,20 +66,26 @@
 
 #define DRV_NAME	"iser"
 #define PFX		DRV_NAME ": "
-#define DRV_VER		"0.1"
-#define DRV_DATE	"May 7th, 2006"
+#define DRV_VER		"1.1"
 
 #define iser_dbg(fmt, arg...)				\
 	do {						\
-		if (iser_debug_level > 1)		\
+		if (iser_debug_level > 2)		\
 			printk(KERN_DEBUG PFX "%s:" fmt,\
 				__func__ , ## arg);	\
 	} while (0)
 
 #define iser_warn(fmt, arg...)				\
 	do {						\
+		if (iser_debug_level > 1)		\
+			pr_warn(PFX "%s:" fmt,          \
+				__func__ , ## arg);	\
+	} while (0)
+
+#define iser_info(fmt, arg...)				\
+	do {						\
 		if (iser_debug_level > 0)		\
-			printk(KERN_DEBUG PFX "%s:" fmt,\
+			pr_info(PFX "%s:" fmt,          \
 				__func__ , ## arg);	\
 	} while (0)
 
@@ -133,6 +140,15 @@
 	__be64  read_va;
 } __attribute__((packed));
 
+
+#define ISER_ZBVA_NOT_SUPPORTED		0x80
+#define ISER_SEND_W_INV_NOT_SUPPORTED	0x40
+
+struct iser_cm_hdr {
+	u8      flags;
+	u8      rsvd[3];
+} __packed;
+
 /* Constant PDU lengths calculations */
 #define ISER_HEADERS_LEN  (sizeof(struct iser_hdr) + sizeof(struct iscsi_hdr))
 
diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c
index be1edb0..68ebb7f 100644
--- a/drivers/infiniband/ulp/iser/iser_memory.c
+++ b/drivers/infiniband/ulp/iser/iser_memory.c
@@ -416,8 +416,9 @@
 			for (i=0 ; i<ib_conn->page_vec->length ; i++)
 				iser_err("page_vec[%d] = 0x%llx\n", i,
 					 (unsigned long long) ib_conn->page_vec->pages[i]);
-			return err;
 		}
+		if (err)
+			return err;
 	}
 	return 0;
 }
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
index 4debadc..5278916 100644
--- a/drivers/infiniband/ulp/iser/iser_verbs.c
+++ b/drivers/infiniband/ulp/iser/iser_verbs.c
@@ -74,8 +74,9 @@
 	struct iser_cq_desc *cq_desc;
 
 	device->cqs_used = min(ISER_MAX_CQ, device->ib_device->num_comp_vectors);
-	iser_err("using %d CQs, device %s supports %d vectors\n", device->cqs_used,
-		 device->ib_device->name, device->ib_device->num_comp_vectors);
+	iser_info("using %d CQs, device %s supports %d vectors\n",
+		  device->cqs_used, device->ib_device->name,
+		  device->ib_device->num_comp_vectors);
 
 	device->cq_desc = kmalloc(sizeof(struct iser_cq_desc) * device->cqs_used,
 				  GFP_KERNEL);
@@ -262,7 +263,7 @@
 			min_index = index;
 	device->cq_active_qps[min_index]++;
 	mutex_unlock(&ig.connlist_mutex);
-	iser_err("cq index %d used for ib_conn %p\n", min_index, ib_conn);
+	iser_info("cq index %d used for ib_conn %p\n", min_index, ib_conn);
 
 	init_attr.event_handler = iser_qp_event_callback;
 	init_attr.qp_context	= (void *)ib_conn;
@@ -280,9 +281,9 @@
 		goto out_err;
 
 	ib_conn->qp = ib_conn->cma_id->qp;
-	iser_err("setting conn %p cma_id %p: fmr_pool %p qp %p\n",
-		 ib_conn, ib_conn->cma_id,
-		 ib_conn->fmr_pool, ib_conn->cma_id->qp);
+	iser_info("setting conn %p cma_id %p: fmr_pool %p qp %p\n",
+		  ib_conn, ib_conn->cma_id,
+		  ib_conn->fmr_pool, ib_conn->cma_id->qp);
 	return ret;
 
 out_err:
@@ -299,9 +300,9 @@
 	int cq_index;
 	BUG_ON(ib_conn == NULL);
 
-	iser_err("freeing conn %p cma_id %p fmr pool %p qp %p\n",
-		 ib_conn, ib_conn->cma_id,
-		 ib_conn->fmr_pool, ib_conn->qp);
+	iser_info("freeing conn %p cma_id %p fmr pool %p qp %p\n",
+		  ib_conn, ib_conn->cma_id,
+		  ib_conn->fmr_pool, ib_conn->qp);
 
 	/* qp is created only once both addr & route are resolved */
 	if (ib_conn->fmr_pool != NULL)
@@ -379,7 +380,7 @@
 {
 	mutex_lock(&ig.device_list_mutex);
 	device->refcount--;
-	iser_err("device %p refcount %d\n",device,device->refcount);
+	iser_info("device %p refcount %d\n", device, device->refcount);
 	if (!device->refcount) {
 		iser_free_device_ib_res(device);
 		list_del(&device->ig_list);
@@ -498,6 +499,7 @@
 {
 	struct rdma_conn_param conn_param;
 	int    ret;
+	struct iser_cm_hdr req_hdr;
 
 	ret = iser_create_ib_conn_res((struct iser_conn *)cma_id->context);
 	if (ret)
@@ -509,6 +511,12 @@
 	conn_param.retry_count	       = 7;
 	conn_param.rnr_retry_count     = 6;
 
+	memset(&req_hdr, 0, sizeof(req_hdr));
+	req_hdr.flags = (ISER_ZBVA_NOT_SUPPORTED |
+			ISER_SEND_W_INV_NOT_SUPPORTED);
+	conn_param.private_data		= (void *)&req_hdr;
+	conn_param.private_data_len	= sizeof(struct iser_cm_hdr);
+
 	ret = rdma_connect(cma_id, &conn_param);
 	if (ret) {
 		iser_err("failure connecting: %d\n", ret);
@@ -558,8 +566,8 @@
 {
 	int ret = 0;
 
-	iser_err("event %d status %d conn %p id %p\n",
-		event->event, event->status, cma_id->context, cma_id);
+	iser_info("event %d status %d conn %p id %p\n",
+		  event->event, event->status, cma_id->context, cma_id);
 
 	switch (event->event) {
 	case RDMA_CM_EVENT_ADDR_RESOLVED:
@@ -619,8 +627,8 @@
 	/* the device is known only --after-- address resolution */
 	ib_conn->device = NULL;
 
-	iser_err("connecting to: %pI4, port 0x%x\n",
-		 &dst_addr->sin_addr, dst_addr->sin_port);
+	iser_info("connecting to: %pI4, port 0x%x\n",
+		  &dst_addr->sin_addr, dst_addr->sin_port);
 
 	ib_conn->state = ISER_CONN_PENDING;
 
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
index c09d41b..b08ca7a 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
@@ -1374,7 +1374,7 @@
 		target_put_sess_cmd(ioctx->ch->sess, &ioctx->cmd);
 		break;
 	default:
-		WARN_ON("ERROR: unexpected command state");
+		WARN(1, "Unexpected command state (%d)", state);
 		break;
 	}
 
diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig
index 6a195d5..62a2c0e 100644
--- a/drivers/input/keyboard/Kconfig
+++ b/drivers/input/keyboard/Kconfig
@@ -175,7 +175,7 @@
 
 config KEYBOARD_GPIO
 	tristate "GPIO Buttons"
-	depends on GENERIC_GPIO
+	depends on GPIOLIB
 	help
 	  This driver implements support for buttons connected
 	  to GPIO pins of various CPUs (and some other chips).
@@ -190,7 +190,7 @@
 
 config KEYBOARD_GPIO_POLLED
 	tristate "Polled GPIO buttons"
-	depends on GENERIC_GPIO
+	depends on GPIOLIB
 	select INPUT_POLLDEV
 	help
 	  This driver implements support for buttons connected
@@ -241,7 +241,7 @@
 
 config KEYBOARD_MATRIX
 	tristate "GPIO driven matrix keypad support"
-	depends on GENERIC_GPIO
+	depends on GPIOLIB
 	select INPUT_MATRIXKMAP
 	help
 	  Enable support for GPIO driven matrix keypad.
diff --git a/drivers/input/misc/Kconfig b/drivers/input/misc/Kconfig
index af80928..bb698e1 100644
--- a/drivers/input/misc/Kconfig
+++ b/drivers/input/misc/Kconfig
@@ -214,7 +214,7 @@
 config INPUT_GP2A
 	tristate "Sharp GP2AP002A00F I2C Proximity/Opto sensor driver"
 	depends on I2C
-	depends on GENERIC_GPIO
+	depends on GPIOLIB
 	help
 	  Say Y here if you have a Sharp GP2AP002A00F proximity/als combo-chip
 	  hooked to an I2C bus.
@@ -224,7 +224,7 @@
 
 config INPUT_GPIO_TILT_POLLED
 	tristate "Polled GPIO tilt switch"
-	depends on GENERIC_GPIO
+	depends on GPIOLIB
 	select INPUT_POLLDEV
 	help
 	  This driver implements support for tilt switches connected
@@ -472,7 +472,7 @@
 
 config INPUT_GPIO_ROTARY_ENCODER
 	tristate "Rotary encoders connected to GPIO pins"
-	depends on GPIOLIB && GENERIC_GPIO
+	depends on GPIOLIB
 	help
 	  Say Y here to add support for rotary encoders connected to GPIO lines.
 	  Check file:Documentation/input/rotary-encoder.txt for more
@@ -484,7 +484,7 @@
 config INPUT_RB532_BUTTON
 	tristate "Mikrotik Routerboard 532 button interface"
 	depends on MIKROTIK_RB532
-	depends on GPIOLIB && GENERIC_GPIO
+	depends on GPIOLIB
 	select INPUT_POLLDEV
 	help
 	  Say Y here if you want support for the S1 button built into
diff --git a/drivers/input/mouse/Kconfig b/drivers/input/mouse/Kconfig
index 802bd6a..effa9c5 100644
--- a/drivers/input/mouse/Kconfig
+++ b/drivers/input/mouse/Kconfig
@@ -295,7 +295,7 @@
 
 config MOUSE_GPIO
 	tristate "GPIO mouse"
-	depends on GENERIC_GPIO
+	depends on GPIOLIB
 	select INPUT_POLLDEV
 	help
 	  This driver simulates a mouse on GPIO lines of various CPUs (and some
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index d44806d..ef99229 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -173,7 +173,7 @@
 config LEDS_GPIO
 	tristate "LED Support for GPIO connected LEDs"
 	depends on LEDS_CLASS
-	depends on GENERIC_GPIO
+	depends on GPIOLIB
 	help
 	  This option enables support for the LEDs connected to GPIO
 	  outputs. To be useful the particular board must have LEDs
@@ -362,7 +362,7 @@
 config LEDS_LT3593
 	tristate "LED driver for LT3593 controllers"
 	depends on LEDS_CLASS
-	depends on GENERIC_GPIO
+	depends on GPIOLIB
 	help
 	  This option enables support for LEDs driven by a Linear Technology
 	  LT3593 controller. This controller uses a special one-wire pulse
@@ -431,7 +431,7 @@
 
 config LEDS_RENESAS_TPU
 	bool "LED support for Renesas TPU"
-	depends on LEDS_CLASS=y && HAVE_CLK && GENERIC_GPIO
+	depends on LEDS_CLASS=y && HAVE_CLK && GPIOLIB
 	help
 	  This option enables build of the LED TPU platform driver,
 	  suitable to drive any TPU channel on newer Renesas SoCs.
diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig
index 4d8d90b..3bfc8f1 100644
--- a/drivers/md/Kconfig
+++ b/drivers/md/Kconfig
@@ -174,6 +174,8 @@
 
 	  In unsure, say N.
 
+source "drivers/md/bcache/Kconfig"
+
 config BLK_DEV_DM
 	tristate "Device mapper support"
 	---help---
diff --git a/drivers/md/Makefile b/drivers/md/Makefile
index 7ceeaef..1439fd4 100644
--- a/drivers/md/Makefile
+++ b/drivers/md/Makefile
@@ -29,6 +29,7 @@
 obj-$(CONFIG_MD_RAID456)	+= raid456.o
 obj-$(CONFIG_MD_MULTIPATH)	+= multipath.o
 obj-$(CONFIG_MD_FAULTY)		+= faulty.o
+obj-$(CONFIG_BCACHE)		+= bcache/
 obj-$(CONFIG_BLK_DEV_MD)	+= md-mod.o
 obj-$(CONFIG_BLK_DEV_DM)	+= dm-mod.o
 obj-$(CONFIG_DM_BUFIO)		+= dm-bufio.o
diff --git a/drivers/md/bcache/Kconfig b/drivers/md/bcache/Kconfig
new file mode 100644
index 0000000..05c220d
--- /dev/null
+++ b/drivers/md/bcache/Kconfig
@@ -0,0 +1,42 @@
+
+config BCACHE
+	tristate "Block device as cache"
+	select CLOSURES
+	---help---
+	Allows a block device to be used as cache for other devices; uses
+	a btree for indexing and the layout is optimized for SSDs.
+
+	See Documentation/bcache.txt for details.
+
+config BCACHE_DEBUG
+	bool "Bcache debugging"
+	depends on BCACHE
+	---help---
+	Don't select this option unless you're a developer
+
+	Enables extra debugging tools (primarily a fuzz tester)
+
+config BCACHE_EDEBUG
+	bool "Extended runtime checks"
+	depends on BCACHE
+	---help---
+	Don't select this option unless you're a developer
+
+	Enables extra runtime checks which significantly affect performance
+
+config BCACHE_CLOSURES_DEBUG
+	bool "Debug closures"
+	depends on BCACHE
+	select DEBUG_FS
+	---help---
+	Keeps all active closures in a linked list and provides a debugfs
+	interface to list them, which makes it possible to see asynchronous
+	operations that get stuck.
+
+# cgroup code needs to be updated:
+#
+#config CGROUP_BCACHE
+#	bool "Cgroup controls for bcache"
+#	depends on BCACHE && BLK_CGROUP
+#	---help---
+#	TODO
diff --git a/drivers/md/bcache/Makefile b/drivers/md/bcache/Makefile
new file mode 100644
index 0000000..0e9c825
--- /dev/null
+++ b/drivers/md/bcache/Makefile
@@ -0,0 +1,7 @@
+
+obj-$(CONFIG_BCACHE)	+= bcache.o
+
+bcache-y		:= alloc.o btree.o bset.o io.o journal.o writeback.o\
+	movinggc.o request.o super.o sysfs.o debug.o util.o trace.o stats.o closure.o
+
+CFLAGS_request.o	+= -Iblock
diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c
new file mode 100644
index 0000000..048f294
--- /dev/null
+++ b/drivers/md/bcache/alloc.c
@@ -0,0 +1,599 @@
+/*
+ * Primary bucket allocation code
+ *
+ * Copyright 2012 Google, Inc.
+ *
+ * Allocation in bcache is done in terms of buckets:
+ *
+ * Each bucket has associated an 8 bit gen; this gen corresponds to the gen in
+ * btree pointers - they must match for the pointer to be considered valid.
+ *
+ * Thus (assuming a bucket has no dirty data or metadata in it) we can reuse a
+ * bucket simply by incrementing its gen.
+ *
+ * The gens (along with the priorities; it's really the gens are important but
+ * the code is named as if it's the priorities) are written in an arbitrary list
+ * of buckets on disk, with a pointer to them in the journal header.
+ *
+ * When we invalidate a bucket, we have to write its new gen to disk and wait
+ * for that write to complete before we use it - otherwise after a crash we
+ * could have pointers that appeared to be good but pointed to data that had
+ * been overwritten.
+ *
+ * Since the gens and priorities are all stored contiguously on disk, we can
+ * batch this up: We fill up the free_inc list with freshly invalidated buckets,
+ * call prio_write(), and when prio_write() finishes we pull buckets off the
+ * free_inc list and optionally discard them.
+ *
+ * free_inc isn't the only freelist - if it was, we'd often to sleep while
+ * priorities and gens were being written before we could allocate. c->free is a
+ * smaller freelist, and buckets on that list are always ready to be used.
+ *
+ * If we've got discards enabled, that happens when a bucket moves from the
+ * free_inc list to the free list.
+ *
+ * There is another freelist, because sometimes we have buckets that we know
+ * have nothing pointing into them - these we can reuse without waiting for
+ * priorities to be rewritten. These come from freed btree nodes and buckets
+ * that garbage collection discovered no longer had valid keys pointing into
+ * them (because they were overwritten). That's the unused list - buckets on the
+ * unused list move to the free list, optionally being discarded in the process.
+ *
+ * It's also important to ensure that gens don't wrap around - with respect to
+ * either the oldest gen in the btree or the gen on disk. This is quite
+ * difficult to do in practice, but we explicitly guard against it anyways - if
+ * a bucket is in danger of wrapping around we simply skip invalidating it that
+ * time around, and we garbage collect or rewrite the priorities sooner than we
+ * would have otherwise.
+ *
+ * bch_bucket_alloc() allocates a single bucket from a specific cache.
+ *
+ * bch_bucket_alloc_set() allocates one or more buckets from different caches
+ * out of a cache set.
+ *
+ * free_some_buckets() drives all the processes described above. It's called
+ * from bch_bucket_alloc() and a few other places that need to make sure free
+ * buckets are ready.
+ *
+ * invalidate_buckets_(lru|fifo)() find buckets that are available to be
+ * invalidated, and then invalidate them and stick them on the free_inc list -
+ * in either lru or fifo order.
+ */
+
+#include "bcache.h"
+#include "btree.h"
+
+#include <linux/random.h>
+
+#define MAX_IN_FLIGHT_DISCARDS		8U
+
+/* Bucket heap / gen */
+
+uint8_t bch_inc_gen(struct cache *ca, struct bucket *b)
+{
+	uint8_t ret = ++b->gen;
+
+	ca->set->need_gc = max(ca->set->need_gc, bucket_gc_gen(b));
+	WARN_ON_ONCE(ca->set->need_gc > BUCKET_GC_GEN_MAX);
+
+	if (CACHE_SYNC(&ca->set->sb)) {
+		ca->need_save_prio = max(ca->need_save_prio,
+					 bucket_disk_gen(b));
+		WARN_ON_ONCE(ca->need_save_prio > BUCKET_DISK_GEN_MAX);
+	}
+
+	return ret;
+}
+
+void bch_rescale_priorities(struct cache_set *c, int sectors)
+{
+	struct cache *ca;
+	struct bucket *b;
+	unsigned next = c->nbuckets * c->sb.bucket_size / 1024;
+	unsigned i;
+	int r;
+
+	atomic_sub(sectors, &c->rescale);
+
+	do {
+		r = atomic_read(&c->rescale);
+
+		if (r >= 0)
+			return;
+	} while (atomic_cmpxchg(&c->rescale, r, r + next) != r);
+
+	mutex_lock(&c->bucket_lock);
+
+	c->min_prio = USHRT_MAX;
+
+	for_each_cache(ca, c, i)
+		for_each_bucket(b, ca)
+			if (b->prio &&
+			    b->prio != BTREE_PRIO &&
+			    !atomic_read(&b->pin)) {
+				b->prio--;
+				c->min_prio = min(c->min_prio, b->prio);
+			}
+
+	mutex_unlock(&c->bucket_lock);
+}
+
+/* Discard/TRIM */
+
+struct discard {
+	struct list_head	list;
+	struct work_struct	work;
+	struct cache		*ca;
+	long			bucket;
+
+	struct bio		bio;
+	struct bio_vec		bv;
+};
+
+static void discard_finish(struct work_struct *w)
+{
+	struct discard *d = container_of(w, struct discard, work);
+	struct cache *ca = d->ca;
+	char buf[BDEVNAME_SIZE];
+
+	if (!test_bit(BIO_UPTODATE, &d->bio.bi_flags)) {
+		pr_notice("discard error on %s, disabling",
+			 bdevname(ca->bdev, buf));
+		d->ca->discard = 0;
+	}
+
+	mutex_lock(&ca->set->bucket_lock);
+
+	fifo_push(&ca->free, d->bucket);
+	list_add(&d->list, &ca->discards);
+	atomic_dec(&ca->discards_in_flight);
+
+	mutex_unlock(&ca->set->bucket_lock);
+
+	closure_wake_up(&ca->set->bucket_wait);
+	wake_up(&ca->set->alloc_wait);
+
+	closure_put(&ca->set->cl);
+}
+
+static void discard_endio(struct bio *bio, int error)
+{
+	struct discard *d = container_of(bio, struct discard, bio);
+	schedule_work(&d->work);
+}
+
+static void do_discard(struct cache *ca, long bucket)
+{
+	struct discard *d = list_first_entry(&ca->discards,
+					     struct discard, list);
+
+	list_del(&d->list);
+	d->bucket = bucket;
+
+	atomic_inc(&ca->discards_in_flight);
+	closure_get(&ca->set->cl);
+
+	bio_init(&d->bio);
+
+	d->bio.bi_sector	= bucket_to_sector(ca->set, d->bucket);
+	d->bio.bi_bdev		= ca->bdev;
+	d->bio.bi_rw		= REQ_WRITE|REQ_DISCARD;
+	d->bio.bi_max_vecs	= 1;
+	d->bio.bi_io_vec	= d->bio.bi_inline_vecs;
+	d->bio.bi_size		= bucket_bytes(ca);
+	d->bio.bi_end_io	= discard_endio;
+	bio_set_prio(&d->bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0));
+
+	submit_bio(0, &d->bio);
+}
+
+/* Allocation */
+
+static inline bool can_inc_bucket_gen(struct bucket *b)
+{
+	return bucket_gc_gen(b) < BUCKET_GC_GEN_MAX &&
+		bucket_disk_gen(b) < BUCKET_DISK_GEN_MAX;
+}
+
+bool bch_bucket_add_unused(struct cache *ca, struct bucket *b)
+{
+	BUG_ON(GC_MARK(b) || GC_SECTORS_USED(b));
+
+	if (fifo_used(&ca->free) > ca->watermark[WATERMARK_MOVINGGC] &&
+	    CACHE_REPLACEMENT(&ca->sb) == CACHE_REPLACEMENT_FIFO)
+		return false;
+
+	b->prio = 0;
+
+	if (can_inc_bucket_gen(b) &&
+	    fifo_push(&ca->unused, b - ca->buckets)) {
+		atomic_inc(&b->pin);
+		return true;
+	}
+
+	return false;
+}
+
+static bool can_invalidate_bucket(struct cache *ca, struct bucket *b)
+{
+	return GC_MARK(b) == GC_MARK_RECLAIMABLE &&
+		!atomic_read(&b->pin) &&
+		can_inc_bucket_gen(b);
+}
+
+static void invalidate_one_bucket(struct cache *ca, struct bucket *b)
+{
+	bch_inc_gen(ca, b);
+	b->prio = INITIAL_PRIO;
+	atomic_inc(&b->pin);
+	fifo_push(&ca->free_inc, b - ca->buckets);
+}
+
+#define bucket_prio(b)				\
+	(((unsigned) (b->prio - ca->set->min_prio)) * GC_SECTORS_USED(b))
+
+#define bucket_max_cmp(l, r)	(bucket_prio(l) < bucket_prio(r))
+#define bucket_min_cmp(l, r)	(bucket_prio(l) > bucket_prio(r))
+
+static void invalidate_buckets_lru(struct cache *ca)
+{
+	struct bucket *b;
+	ssize_t i;
+
+	ca->heap.used = 0;
+
+	for_each_bucket(b, ca) {
+		/*
+		 * If we fill up the unused list, if we then return before
+		 * adding anything to the free_inc list we'll skip writing
+		 * prios/gens and just go back to allocating from the unused
+		 * list:
+		 */
+		if (fifo_full(&ca->unused))
+			return;
+
+		if (!can_invalidate_bucket(ca, b))
+			continue;
+
+		if (!GC_SECTORS_USED(b) &&
+		    bch_bucket_add_unused(ca, b))
+			continue;
+
+		if (!heap_full(&ca->heap))
+			heap_add(&ca->heap, b, bucket_max_cmp);
+		else if (bucket_max_cmp(b, heap_peek(&ca->heap))) {
+			ca->heap.data[0] = b;
+			heap_sift(&ca->heap, 0, bucket_max_cmp);
+		}
+	}
+
+	for (i = ca->heap.used / 2 - 1; i >= 0; --i)
+		heap_sift(&ca->heap, i, bucket_min_cmp);
+
+	while (!fifo_full(&ca->free_inc)) {
+		if (!heap_pop(&ca->heap, b, bucket_min_cmp)) {
+			/*
+			 * We don't want to be calling invalidate_buckets()
+			 * multiple times when it can't do anything
+			 */
+			ca->invalidate_needs_gc = 1;
+			bch_queue_gc(ca->set);
+			return;
+		}
+
+		invalidate_one_bucket(ca, b);
+	}
+}
+
+static void invalidate_buckets_fifo(struct cache *ca)
+{
+	struct bucket *b;
+	size_t checked = 0;
+
+	while (!fifo_full(&ca->free_inc)) {
+		if (ca->fifo_last_bucket <  ca->sb.first_bucket ||
+		    ca->fifo_last_bucket >= ca->sb.nbuckets)
+			ca->fifo_last_bucket = ca->sb.first_bucket;
+
+		b = ca->buckets + ca->fifo_last_bucket++;
+
+		if (can_invalidate_bucket(ca, b))
+			invalidate_one_bucket(ca, b);
+
+		if (++checked >= ca->sb.nbuckets) {
+			ca->invalidate_needs_gc = 1;
+			bch_queue_gc(ca->set);
+			return;
+		}
+	}
+}
+
+static void invalidate_buckets_random(struct cache *ca)
+{
+	struct bucket *b;
+	size_t checked = 0;
+
+	while (!fifo_full(&ca->free_inc)) {
+		size_t n;
+		get_random_bytes(&n, sizeof(n));
+
+		n %= (size_t) (ca->sb.nbuckets - ca->sb.first_bucket);
+		n += ca->sb.first_bucket;
+
+		b = ca->buckets + n;
+
+		if (can_invalidate_bucket(ca, b))
+			invalidate_one_bucket(ca, b);
+
+		if (++checked >= ca->sb.nbuckets / 2) {
+			ca->invalidate_needs_gc = 1;
+			bch_queue_gc(ca->set);
+			return;
+		}
+	}
+}
+
+static void invalidate_buckets(struct cache *ca)
+{
+	if (ca->invalidate_needs_gc)
+		return;
+
+	switch (CACHE_REPLACEMENT(&ca->sb)) {
+	case CACHE_REPLACEMENT_LRU:
+		invalidate_buckets_lru(ca);
+		break;
+	case CACHE_REPLACEMENT_FIFO:
+		invalidate_buckets_fifo(ca);
+		break;
+	case CACHE_REPLACEMENT_RANDOM:
+		invalidate_buckets_random(ca);
+		break;
+	}
+
+	pr_debug("free %zu/%zu free_inc %zu/%zu unused %zu/%zu",
+		 fifo_used(&ca->free), ca->free.size,
+		 fifo_used(&ca->free_inc), ca->free_inc.size,
+		 fifo_used(&ca->unused), ca->unused.size);
+}
+
+#define allocator_wait(ca, cond)					\
+do {									\
+	DEFINE_WAIT(__wait);						\
+									\
+	while (1) {							\
+		prepare_to_wait(&ca->set->alloc_wait,			\
+				&__wait, TASK_INTERRUPTIBLE);		\
+		if (cond)						\
+			break;						\
+									\
+		mutex_unlock(&(ca)->set->bucket_lock);			\
+		if (test_bit(CACHE_SET_STOPPING_2, &ca->set->flags)) {	\
+			finish_wait(&ca->set->alloc_wait, &__wait);	\
+			closure_return(cl);				\
+		}							\
+									\
+		schedule();						\
+		mutex_lock(&(ca)->set->bucket_lock);			\
+	}								\
+									\
+	finish_wait(&ca->set->alloc_wait, &__wait);			\
+} while (0)
+
+void bch_allocator_thread(struct closure *cl)
+{
+	struct cache *ca = container_of(cl, struct cache, alloc);
+
+	mutex_lock(&ca->set->bucket_lock);
+
+	while (1) {
+		/*
+		 * First, we pull buckets off of the unused and free_inc lists,
+		 * possibly issue discards to them, then we add the bucket to
+		 * the free list:
+		 */
+		while (1) {
+			long bucket;
+
+			if ((!atomic_read(&ca->set->prio_blocked) ||
+			     !CACHE_SYNC(&ca->set->sb)) &&
+			    !fifo_empty(&ca->unused))
+				fifo_pop(&ca->unused, bucket);
+			else if (!fifo_empty(&ca->free_inc))
+				fifo_pop(&ca->free_inc, bucket);
+			else
+				break;
+
+			allocator_wait(ca, (int) fifo_free(&ca->free) >
+				       atomic_read(&ca->discards_in_flight));
+
+			if (ca->discard) {
+				allocator_wait(ca, !list_empty(&ca->discards));
+				do_discard(ca, bucket);
+			} else {
+				fifo_push(&ca->free, bucket);
+				closure_wake_up(&ca->set->bucket_wait);
+			}
+		}
+
+		/*
+		 * We've run out of free buckets, we need to find some buckets
+		 * we can invalidate. First, invalidate them in memory and add
+		 * them to the free_inc list:
+		 */
+
+		allocator_wait(ca, ca->set->gc_mark_valid &&
+			       (ca->need_save_prio > 64 ||
+				!ca->invalidate_needs_gc));
+		invalidate_buckets(ca);
+
+		/*
+		 * Now, we write their new gens to disk so we can start writing
+		 * new stuff to them:
+		 */
+		allocator_wait(ca, !atomic_read(&ca->set->prio_blocked));
+		if (CACHE_SYNC(&ca->set->sb) &&
+		    (!fifo_empty(&ca->free_inc) ||
+		     ca->need_save_prio > 64))
+			bch_prio_write(ca);
+	}
+}
+
+long bch_bucket_alloc(struct cache *ca, unsigned watermark, struct closure *cl)
+{
+	long r = -1;
+again:
+	wake_up(&ca->set->alloc_wait);
+
+	if (fifo_used(&ca->free) > ca->watermark[watermark] &&
+	    fifo_pop(&ca->free, r)) {
+		struct bucket *b = ca->buckets + r;
+#ifdef CONFIG_BCACHE_EDEBUG
+		size_t iter;
+		long i;
+
+		for (iter = 0; iter < prio_buckets(ca) * 2; iter++)
+			BUG_ON(ca->prio_buckets[iter] == (uint64_t) r);
+
+		fifo_for_each(i, &ca->free, iter)
+			BUG_ON(i == r);
+		fifo_for_each(i, &ca->free_inc, iter)
+			BUG_ON(i == r);
+		fifo_for_each(i, &ca->unused, iter)
+			BUG_ON(i == r);
+#endif
+		BUG_ON(atomic_read(&b->pin) != 1);
+
+		SET_GC_SECTORS_USED(b, ca->sb.bucket_size);
+
+		if (watermark <= WATERMARK_METADATA) {
+			SET_GC_MARK(b, GC_MARK_METADATA);
+			b->prio = BTREE_PRIO;
+		} else {
+			SET_GC_MARK(b, GC_MARK_RECLAIMABLE);
+			b->prio = INITIAL_PRIO;
+		}
+
+		return r;
+	}
+
+	pr_debug("alloc failure: blocked %i free %zu free_inc %zu unused %zu",
+		 atomic_read(&ca->set->prio_blocked), fifo_used(&ca->free),
+		 fifo_used(&ca->free_inc), fifo_used(&ca->unused));
+
+	if (cl) {
+		closure_wait(&ca->set->bucket_wait, cl);
+
+		if (closure_blocking(cl)) {
+			mutex_unlock(&ca->set->bucket_lock);
+			closure_sync(cl);
+			mutex_lock(&ca->set->bucket_lock);
+			goto again;
+		}
+	}
+
+	return -1;
+}
+
+void bch_bucket_free(struct cache_set *c, struct bkey *k)
+{
+	unsigned i;
+
+	for (i = 0; i < KEY_PTRS(k); i++) {
+		struct bucket *b = PTR_BUCKET(c, k, i);
+
+		SET_GC_MARK(b, GC_MARK_RECLAIMABLE);
+		SET_GC_SECTORS_USED(b, 0);
+		bch_bucket_add_unused(PTR_CACHE(c, k, i), b);
+	}
+}
+
+int __bch_bucket_alloc_set(struct cache_set *c, unsigned watermark,
+			   struct bkey *k, int n, struct closure *cl)
+{
+	int i;
+
+	lockdep_assert_held(&c->bucket_lock);
+	BUG_ON(!n || n > c->caches_loaded || n > 8);
+
+	bkey_init(k);
+
+	/* sort by free space/prio of oldest data in caches */
+
+	for (i = 0; i < n; i++) {
+		struct cache *ca = c->cache_by_alloc[i];
+		long b = bch_bucket_alloc(ca, watermark, cl);
+
+		if (b == -1)
+			goto err;
+
+		k->ptr[i] = PTR(ca->buckets[b].gen,
+				bucket_to_sector(c, b),
+				ca->sb.nr_this_dev);
+
+		SET_KEY_PTRS(k, i + 1);
+	}
+
+	return 0;
+err:
+	bch_bucket_free(c, k);
+	__bkey_put(c, k);
+	return -1;
+}
+
+int bch_bucket_alloc_set(struct cache_set *c, unsigned watermark,
+			 struct bkey *k, int n, struct closure *cl)
+{
+	int ret;
+	mutex_lock(&c->bucket_lock);
+	ret = __bch_bucket_alloc_set(c, watermark, k, n, cl);
+	mutex_unlock(&c->bucket_lock);
+	return ret;
+}
+
+/* Init */
+
+void bch_cache_allocator_exit(struct cache *ca)
+{
+	struct discard *d;
+
+	while (!list_empty(&ca->discards)) {
+		d = list_first_entry(&ca->discards, struct discard, list);
+		cancel_work_sync(&d->work);
+		list_del(&d->list);
+		kfree(d);
+	}
+}
+
+int bch_cache_allocator_init(struct cache *ca)
+{
+	unsigned i;
+
+	/*
+	 * Reserve:
+	 * Prio/gen writes first
+	 * Then 8 for btree allocations
+	 * Then half for the moving garbage collector
+	 */
+
+	ca->watermark[WATERMARK_PRIO] = 0;
+
+	ca->watermark[WATERMARK_METADATA] = prio_buckets(ca);
+
+	ca->watermark[WATERMARK_MOVINGGC] = 8 +
+		ca->watermark[WATERMARK_METADATA];
+
+	ca->watermark[WATERMARK_NONE] = ca->free.size / 2 +
+		ca->watermark[WATERMARK_MOVINGGC];
+
+	for (i = 0; i < MAX_IN_FLIGHT_DISCARDS; i++) {
+		struct discard *d = kzalloc(sizeof(*d), GFP_KERNEL);
+		if (!d)
+			return -ENOMEM;
+
+		d->ca = ca;
+		INIT_WORK(&d->work, discard_finish);
+		list_add(&d->list, &ca->discards);
+	}
+
+	return 0;
+}
diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
new file mode 100644
index 0000000..340146d
--- /dev/null
+++ b/drivers/md/bcache/bcache.h
@@ -0,0 +1,1259 @@
+#ifndef _BCACHE_H
+#define _BCACHE_H
+
+/*
+ * SOME HIGH LEVEL CODE DOCUMENTATION:
+ *
+ * Bcache mostly works with cache sets, cache devices, and backing devices.
+ *
+ * Support for multiple cache devices hasn't quite been finished off yet, but
+ * it's about 95% plumbed through. A cache set and its cache devices is sort of
+ * like a md raid array and its component devices. Most of the code doesn't care
+ * about individual cache devices, the main abstraction is the cache set.
+ *
+ * Multiple cache devices is intended to give us the ability to mirror dirty
+ * cached data and metadata, without mirroring clean cached data.
+ *
+ * Backing devices are different, in that they have a lifetime independent of a
+ * cache set. When you register a newly formatted backing device it'll come up
+ * in passthrough mode, and then you can attach and detach a backing device from
+ * a cache set at runtime - while it's mounted and in use. Detaching implicitly
+ * invalidates any cached data for that backing device.
+ *
+ * A cache set can have multiple (many) backing devices attached to it.
+ *
+ * There's also flash only volumes - this is the reason for the distinction
+ * between struct cached_dev and struct bcache_device. A flash only volume
+ * works much like a bcache device that has a backing device, except the
+ * "cached" data is always dirty. The end result is that we get thin
+ * provisioning with very little additional code.
+ *
+ * Flash only volumes work but they're not production ready because the moving
+ * garbage collector needs more work. More on that later.
+ *
+ * BUCKETS/ALLOCATION:
+ *
+ * Bcache is primarily designed for caching, which means that in normal
+ * operation all of our available space will be allocated. Thus, we need an
+ * efficient way of deleting things from the cache so we can write new things to
+ * it.
+ *
+ * To do this, we first divide the cache device up into buckets. A bucket is the
+ * unit of allocation; they're typically around 1 mb - anywhere from 128k to 2M+
+ * works efficiently.
+ *
+ * Each bucket has a 16 bit priority, and an 8 bit generation associated with
+ * it. The gens and priorities for all the buckets are stored contiguously and
+ * packed on disk (in a linked list of buckets - aside from the superblock, all
+ * of bcache's metadata is stored in buckets).
+ *
+ * The priority is used to implement an LRU. We reset a bucket's priority when
+ * we allocate it or on cache it, and every so often we decrement the priority
+ * of each bucket. It could be used to implement something more sophisticated,
+ * if anyone ever gets around to it.
+ *
+ * The generation is used for invalidating buckets. Each pointer also has an 8
+ * bit generation embedded in it; for a pointer to be considered valid, its gen
+ * must match the gen of the bucket it points into.  Thus, to reuse a bucket all
+ * we have to do is increment its gen (and write its new gen to disk; we batch
+ * this up).
+ *
+ * Bcache is entirely COW - we never write twice to a bucket, even buckets that
+ * contain metadata (including btree nodes).
+ *
+ * THE BTREE:
+ *
+ * Bcache is in large part design around the btree.
+ *
+ * At a high level, the btree is just an index of key -> ptr tuples.
+ *
+ * Keys represent extents, and thus have a size field. Keys also have a variable
+ * number of pointers attached to them (potentially zero, which is handy for
+ * invalidating the cache).
+ *
+ * The key itself is an inode:offset pair. The inode number corresponds to a
+ * backing device or a flash only volume. The offset is the ending offset of the
+ * extent within the inode - not the starting offset; this makes lookups
+ * slightly more convenient.
+ *
+ * Pointers contain the cache device id, the offset on that device, and an 8 bit
+ * generation number. More on the gen later.
+ *
+ * Index lookups are not fully abstracted - cache lookups in particular are
+ * still somewhat mixed in with the btree code, but things are headed in that
+ * direction.
+ *
+ * Updates are fairly well abstracted, though. There are two different ways of
+ * updating the btree; insert and replace.
+ *
+ * BTREE_INSERT will just take a list of keys and insert them into the btree -
+ * overwriting (possibly only partially) any extents they overlap with. This is
+ * used to update the index after a write.
+ *
+ * BTREE_REPLACE is really cmpxchg(); it inserts a key into the btree iff it is
+ * overwriting a key that matches another given key. This is used for inserting
+ * data into the cache after a cache miss, and for background writeback, and for
+ * the moving garbage collector.
+ *
+ * There is no "delete" operation; deleting things from the index is
+ * accomplished by either by invalidating pointers (by incrementing a bucket's
+ * gen) or by inserting a key with 0 pointers - which will overwrite anything
+ * previously present at that location in the index.
+ *
+ * This means that there are always stale/invalid keys in the btree. They're
+ * filtered out by the code that iterates through a btree node, and removed when
+ * a btree node is rewritten.
+ *
+ * BTREE NODES:
+ *
+ * Our unit of allocation is a bucket, and we we can't arbitrarily allocate and
+ * free smaller than a bucket - so, that's how big our btree nodes are.
+ *
+ * (If buckets are really big we'll only use part of the bucket for a btree node
+ * - no less than 1/4th - but a bucket still contains no more than a single
+ * btree node. I'd actually like to change this, but for now we rely on the
+ * bucket's gen for deleting btree nodes when we rewrite/split a node.)
+ *
+ * Anyways, btree nodes are big - big enough to be inefficient with a textbook
+ * btree implementation.
+ *
+ * The way this is solved is that btree nodes are internally log structured; we
+ * can append new keys to an existing btree node without rewriting it. This
+ * means each set of keys we write is sorted, but the node is not.
+ *
+ * We maintain this log structure in memory - keeping 1Mb of keys sorted would
+ * be expensive, and we have to distinguish between the keys we have written and
+ * the keys we haven't. So to do a lookup in a btree node, we have to search
+ * each sorted set. But we do merge written sets together lazily, so the cost of
+ * these extra searches is quite low (normally most of the keys in a btree node
+ * will be in one big set, and then there'll be one or two sets that are much
+ * smaller).
+ *
+ * This log structure makes bcache's btree more of a hybrid between a
+ * conventional btree and a compacting data structure, with some of the
+ * advantages of both.
+ *
+ * GARBAGE COLLECTION:
+ *
+ * We can't just invalidate any bucket - it might contain dirty data or
+ * metadata. If it once contained dirty data, other writes might overwrite it
+ * later, leaving no valid pointers into that bucket in the index.
+ *
+ * Thus, the primary purpose of garbage collection is to find buckets to reuse.
+ * It also counts how much valid data it each bucket currently contains, so that
+ * allocation can reuse buckets sooner when they've been mostly overwritten.
+ *
+ * It also does some things that are really internal to the btree
+ * implementation. If a btree node contains pointers that are stale by more than
+ * some threshold, it rewrites the btree node to avoid the bucket's generation
+ * wrapping around. It also merges adjacent btree nodes if they're empty enough.
+ *
+ * THE JOURNAL:
+ *
+ * Bcache's journal is not necessary for consistency; we always strictly
+ * order metadata writes so that the btree and everything else is consistent on
+ * disk in the event of an unclean shutdown, and in fact bcache had writeback
+ * caching (with recovery from unclean shutdown) before journalling was
+ * implemented.
+ *
+ * Rather, the journal is purely a performance optimization; we can't complete a
+ * write until we've updated the index on disk, otherwise the cache would be
+ * inconsistent in the event of an unclean shutdown. This means that without the
+ * journal, on random write workloads we constantly have to update all the leaf
+ * nodes in the btree, and those writes will be mostly empty (appending at most
+ * a few keys each) - highly inefficient in terms of amount of metadata writes,
+ * and it puts more strain on the various btree resorting/compacting code.
+ *
+ * The journal is just a log of keys we've inserted; on startup we just reinsert
+ * all the keys in the open journal entries. That means that when we're updating
+ * a node in the btree, we can wait until a 4k block of keys fills up before
+ * writing them out.
+ *
+ * For simplicity, we only journal updates to leaf nodes; updates to parent
+ * nodes are rare enough (since our leaf nodes are huge) that it wasn't worth
+ * the complexity to deal with journalling them (in particular, journal replay)
+ * - updates to non leaf nodes just happen synchronously (see btree_split()).
+ */
+
+#define pr_fmt(fmt) "bcache: %s() " fmt "\n", __func__
+
+#include <linux/bio.h>
+#include <linux/blktrace_api.h>
+#include <linux/kobject.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/rbtree.h>
+#include <linux/rwsem.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+
+#include "util.h"
+#include "closure.h"
+
+struct bucket {
+	atomic_t	pin;
+	uint16_t	prio;
+	uint8_t		gen;
+	uint8_t		disk_gen;
+	uint8_t		last_gc; /* Most out of date gen in the btree */
+	uint8_t		gc_gen;
+	uint16_t	gc_mark;
+};
+
+/*
+ * I'd use bitfields for these, but I don't trust the compiler not to screw me
+ * as multiple threads touch struct bucket without locking
+ */
+
+BITMASK(GC_MARK,	 struct bucket, gc_mark, 0, 2);
+#define GC_MARK_RECLAIMABLE	0
+#define GC_MARK_DIRTY		1
+#define GC_MARK_METADATA	2
+BITMASK(GC_SECTORS_USED, struct bucket, gc_mark, 2, 14);
+
+struct bkey {
+	uint64_t	high;
+	uint64_t	low;
+	uint64_t	ptr[];
+};
+
+/* Enough for a key with 6 pointers */
+#define BKEY_PAD		8
+
+#define BKEY_PADDED(key)					\
+	union { struct bkey key; uint64_t key ## _pad[BKEY_PAD]; }
+
+/* Version 0: Cache device
+ * Version 1: Backing device
+ * Version 2: Seed pointer into btree node checksum
+ * Version 3: Cache device with new UUID format
+ * Version 4: Backing device with data offset
+ */
+#define BCACHE_SB_VERSION_CDEV			0
+#define BCACHE_SB_VERSION_BDEV			1
+#define BCACHE_SB_VERSION_CDEV_WITH_UUID	3
+#define BCACHE_SB_VERSION_BDEV_WITH_OFFSET	4
+#define BCACHE_SB_MAX_VERSION			4
+
+#define SB_SECTOR		8
+#define SB_SIZE			4096
+#define SB_LABEL_SIZE		32
+#define SB_JOURNAL_BUCKETS	256U
+/* SB_JOURNAL_BUCKETS must be divisible by BITS_PER_LONG */
+#define MAX_CACHES_PER_SET	8
+
+#define BDEV_DATA_START_DEFAULT	16	/* sectors */
+
+struct cache_sb {
+	uint64_t		csum;
+	uint64_t		offset;	/* sector where this sb was written */
+	uint64_t		version;
+
+	uint8_t			magic[16];
+
+	uint8_t			uuid[16];
+	union {
+		uint8_t		set_uuid[16];
+		uint64_t	set_magic;
+	};
+	uint8_t			label[SB_LABEL_SIZE];
+
+	uint64_t		flags;
+	uint64_t		seq;
+	uint64_t		pad[8];
+
+	union {
+	struct {
+		/* Cache devices */
+		uint64_t	nbuckets;	/* device size */
+
+		uint16_t	block_size;	/* sectors */
+		uint16_t	bucket_size;	/* sectors */
+
+		uint16_t	nr_in_set;
+		uint16_t	nr_this_dev;
+	};
+	struct {
+		/* Backing devices */
+		uint64_t	data_offset;
+
+		/*
+		 * block_size from the cache device section is still used by
+		 * backing devices, so don't add anything here until we fix
+		 * things to not need it for backing devices anymore
+		 */
+	};
+	};
+
+	uint32_t		last_mount;	/* time_t */
+
+	uint16_t		first_bucket;
+	union {
+		uint16_t	njournal_buckets;
+		uint16_t	keys;
+	};
+	uint64_t		d[SB_JOURNAL_BUCKETS];	/* journal buckets */
+};
+
+BITMASK(CACHE_SYNC,		struct cache_sb, flags, 0, 1);
+BITMASK(CACHE_DISCARD,		struct cache_sb, flags, 1, 1);
+BITMASK(CACHE_REPLACEMENT,	struct cache_sb, flags, 2, 3);
+#define CACHE_REPLACEMENT_LRU	0U
+#define CACHE_REPLACEMENT_FIFO	1U
+#define CACHE_REPLACEMENT_RANDOM 2U
+
+BITMASK(BDEV_CACHE_MODE,	struct cache_sb, flags, 0, 4);
+#define CACHE_MODE_WRITETHROUGH	0U
+#define CACHE_MODE_WRITEBACK	1U
+#define CACHE_MODE_WRITEAROUND	2U
+#define CACHE_MODE_NONE		3U
+BITMASK(BDEV_STATE,		struct cache_sb, flags, 61, 2);
+#define BDEV_STATE_NONE		0U
+#define BDEV_STATE_CLEAN	1U
+#define BDEV_STATE_DIRTY	2U
+#define BDEV_STATE_STALE	3U
+
+/* Version 1: Seed pointer into btree node checksum
+ */
+#define BCACHE_BSET_VERSION	1
+
+/*
+ * This is the on disk format for btree nodes - a btree node on disk is a list
+ * of these; within each set the keys are sorted
+ */
+struct bset {
+	uint64_t		csum;
+	uint64_t		magic;
+	uint64_t		seq;
+	uint32_t		version;
+	uint32_t		keys;
+
+	union {
+		struct bkey	start[0];
+		uint64_t	d[0];
+	};
+};
+
+/*
+ * On disk format for priorities and gens - see super.c near prio_write() for
+ * more.
+ */
+struct prio_set {
+	uint64_t		csum;
+	uint64_t		magic;
+	uint64_t		seq;
+	uint32_t		version;
+	uint32_t		pad;
+
+	uint64_t		next_bucket;
+
+	struct bucket_disk {
+		uint16_t	prio;
+		uint8_t		gen;
+	} __attribute((packed)) data[];
+};
+
+struct uuid_entry {
+	union {
+		struct {
+			uint8_t		uuid[16];
+			uint8_t		label[32];
+			uint32_t	first_reg;
+			uint32_t	last_reg;
+			uint32_t	invalidated;
+
+			uint32_t	flags;
+			/* Size of flash only volumes */
+			uint64_t	sectors;
+		};
+
+		uint8_t	pad[128];
+	};
+};
+
+BITMASK(UUID_FLASH_ONLY,	struct uuid_entry, flags, 0, 1);
+
+#include "journal.h"
+#include "stats.h"
+struct search;
+struct btree;
+struct keybuf;
+
+struct keybuf_key {
+	struct rb_node		node;
+	BKEY_PADDED(key);
+	void			*private;
+};
+
+typedef bool (keybuf_pred_fn)(struct keybuf *, struct bkey *);
+
+struct keybuf {
+	keybuf_pred_fn		*key_predicate;
+
+	struct bkey		last_scanned;
+	spinlock_t		lock;
+
+	/*
+	 * Beginning and end of range in rb tree - so that we can skip taking
+	 * lock and checking the rb tree when we need to check for overlapping
+	 * keys.
+	 */
+	struct bkey		start;
+	struct bkey		end;
+
+	struct rb_root		keys;
+
+#define KEYBUF_NR		100
+	DECLARE_ARRAY_ALLOCATOR(struct keybuf_key, freelist, KEYBUF_NR);
+};
+
+struct bio_split_pool {
+	struct bio_set		*bio_split;
+	mempool_t		*bio_split_hook;
+};
+
+struct bio_split_hook {
+	struct closure		cl;
+	struct bio_split_pool	*p;
+	struct bio		*bio;
+	bio_end_io_t		*bi_end_io;
+	void			*bi_private;
+};
+
+struct bcache_device {
+	struct closure		cl;
+
+	struct kobject		kobj;
+
+	struct cache_set	*c;
+	unsigned		id;
+#define BCACHEDEVNAME_SIZE	12
+	char			name[BCACHEDEVNAME_SIZE];
+
+	struct gendisk		*disk;
+
+	/* If nonzero, we're closing */
+	atomic_t		closing;
+
+	/* If nonzero, we're detaching/unregistering from cache set */
+	atomic_t		detaching;
+
+	atomic_long_t		sectors_dirty;
+	unsigned long		sectors_dirty_gc;
+	unsigned long		sectors_dirty_last;
+	long			sectors_dirty_derivative;
+
+	mempool_t		*unaligned_bvec;
+	struct bio_set		*bio_split;
+
+	unsigned		data_csum:1;
+
+	int (*cache_miss)(struct btree *, struct search *,
+			  struct bio *, unsigned);
+	int (*ioctl) (struct bcache_device *, fmode_t, unsigned, unsigned long);
+
+	struct bio_split_pool	bio_split_hook;
+};
+
+struct io {
+	/* Used to track sequential IO so it can be skipped */
+	struct hlist_node	hash;
+	struct list_head	lru;
+
+	unsigned long		jiffies;
+	unsigned		sequential;
+	sector_t		last;
+};
+
+struct cached_dev {
+	struct list_head	list;
+	struct bcache_device	disk;
+	struct block_device	*bdev;
+
+	struct cache_sb		sb;
+	struct bio		sb_bio;
+	struct bio_vec		sb_bv[1];
+	struct closure_with_waitlist sb_write;
+
+	/* Refcount on the cache set. Always nonzero when we're caching. */
+	atomic_t		count;
+	struct work_struct	detach;
+
+	/*
+	 * Device might not be running if it's dirty and the cache set hasn't
+	 * showed up yet.
+	 */
+	atomic_t		running;
+
+	/*
+	 * Writes take a shared lock from start to finish; scanning for dirty
+	 * data to refill the rb tree requires an exclusive lock.
+	 */
+	struct rw_semaphore	writeback_lock;
+
+	/*
+	 * Nonzero, and writeback has a refcount (d->count), iff there is dirty
+	 * data in the cache. Protected by writeback_lock; must have an
+	 * shared lock to set and exclusive lock to clear.
+	 */
+	atomic_t		has_dirty;
+
+	struct ratelimit	writeback_rate;
+	struct delayed_work	writeback_rate_update;
+
+	/*
+	 * Internal to the writeback code, so read_dirty() can keep track of
+	 * where it's at.
+	 */
+	sector_t		last_read;
+
+	/* Number of writeback bios in flight */
+	atomic_t		in_flight;
+	struct closure_with_timer writeback;
+	struct closure_waitlist	writeback_wait;
+
+	struct keybuf		writeback_keys;
+
+	/* For tracking sequential IO */
+#define RECENT_IO_BITS	7
+#define RECENT_IO	(1 << RECENT_IO_BITS)
+	struct io		io[RECENT_IO];
+	struct hlist_head	io_hash[RECENT_IO + 1];
+	struct list_head	io_lru;
+	spinlock_t		io_lock;
+
+	struct cache_accounting	accounting;
+
+	/* The rest of this all shows up in sysfs */
+	unsigned		sequential_cutoff;
+	unsigned		readahead;
+
+	unsigned		sequential_merge:1;
+	unsigned		verify:1;
+
+	unsigned		writeback_metadata:1;
+	unsigned		writeback_running:1;
+	unsigned char		writeback_percent;
+	unsigned		writeback_delay;
+
+	int			writeback_rate_change;
+	int64_t			writeback_rate_derivative;
+	uint64_t		writeback_rate_target;
+
+	unsigned		writeback_rate_update_seconds;
+	unsigned		writeback_rate_d_term;
+	unsigned		writeback_rate_p_term_inverse;
+	unsigned		writeback_rate_d_smooth;
+};
+
+enum alloc_watermarks {
+	WATERMARK_PRIO,
+	WATERMARK_METADATA,
+	WATERMARK_MOVINGGC,
+	WATERMARK_NONE,
+	WATERMARK_MAX
+};
+
+struct cache {
+	struct cache_set	*set;
+	struct cache_sb		sb;
+	struct bio		sb_bio;
+	struct bio_vec		sb_bv[1];
+
+	struct kobject		kobj;
+	struct block_device	*bdev;
+
+	unsigned		watermark[WATERMARK_MAX];
+
+	struct closure		alloc;
+	struct workqueue_struct	*alloc_workqueue;
+
+	struct closure		prio;
+	struct prio_set		*disk_buckets;
+
+	/*
+	 * When allocating new buckets, prio_write() gets first dibs - since we
+	 * may not be allocate at all without writing priorities and gens.
+	 * prio_buckets[] contains the last buckets we wrote priorities to (so
+	 * gc can mark them as metadata), prio_next[] contains the buckets
+	 * allocated for the next prio write.
+	 */
+	uint64_t		*prio_buckets;
+	uint64_t		*prio_last_buckets;
+
+	/*
+	 * free: Buckets that are ready to be used
+	 *
+	 * free_inc: Incoming buckets - these are buckets that currently have
+	 * cached data in them, and we can't reuse them until after we write
+	 * their new gen to disk. After prio_write() finishes writing the new
+	 * gens/prios, they'll be moved to the free list (and possibly discarded
+	 * in the process)
+	 *
+	 * unused: GC found nothing pointing into these buckets (possibly
+	 * because all the data they contained was overwritten), so we only
+	 * need to discard them before they can be moved to the free list.
+	 */
+	DECLARE_FIFO(long, free);
+	DECLARE_FIFO(long, free_inc);
+	DECLARE_FIFO(long, unused);
+
+	size_t			fifo_last_bucket;
+
+	/* Allocation stuff: */
+	struct bucket		*buckets;
+
+	DECLARE_HEAP(struct bucket *, heap);
+
+	/*
+	 * max(gen - disk_gen) for all buckets. When it gets too big we have to
+	 * call prio_write() to keep gens from wrapping.
+	 */
+	uint8_t			need_save_prio;
+	unsigned		gc_move_threshold;
+
+	/*
+	 * If nonzero, we know we aren't going to find any buckets to invalidate
+	 * until a gc finishes - otherwise we could pointlessly burn a ton of
+	 * cpu
+	 */
+	unsigned		invalidate_needs_gc:1;
+
+	bool			discard; /* Get rid of? */
+
+	/*
+	 * We preallocate structs for issuing discards to buckets, and keep them
+	 * on this list when they're not in use; do_discard() issues discards
+	 * whenever there's work to do and is called by free_some_buckets() and
+	 * when a discard finishes.
+	 */
+	atomic_t		discards_in_flight;
+	struct list_head	discards;
+
+	struct journal_device	journal;
+
+	/* The rest of this all shows up in sysfs */
+#define IO_ERROR_SHIFT		20
+	atomic_t		io_errors;
+	atomic_t		io_count;
+
+	atomic_long_t		meta_sectors_written;
+	atomic_long_t		btree_sectors_written;
+	atomic_long_t		sectors_written;
+
+	struct bio_split_pool	bio_split_hook;
+};
+
+struct gc_stat {
+	size_t			nodes;
+	size_t			key_bytes;
+
+	size_t			nkeys;
+	uint64_t		data;	/* sectors */
+	uint64_t		dirty;	/* sectors */
+	unsigned		in_use; /* percent */
+};
+
+/*
+ * Flag bits, for how the cache set is shutting down, and what phase it's at:
+ *
+ * CACHE_SET_UNREGISTERING means we're not just shutting down, we're detaching
+ * all the backing devices first (their cached data gets invalidated, and they
+ * won't automatically reattach).
+ *
+ * CACHE_SET_STOPPING always gets set first when we're closing down a cache set;
+ * we'll continue to run normally for awhile with CACHE_SET_STOPPING set (i.e.
+ * flushing dirty data).
+ *
+ * CACHE_SET_STOPPING_2 gets set at the last phase, when it's time to shut down
+ * the allocation thread.
+ */
+#define CACHE_SET_UNREGISTERING		0
+#define	CACHE_SET_STOPPING		1
+#define	CACHE_SET_STOPPING_2		2
+
+struct cache_set {
+	struct closure		cl;
+
+	struct list_head	list;
+	struct kobject		kobj;
+	struct kobject		internal;
+	struct dentry		*debug;
+	struct cache_accounting accounting;
+
+	unsigned long		flags;
+
+	struct cache_sb		sb;
+
+	struct cache		*cache[MAX_CACHES_PER_SET];
+	struct cache		*cache_by_alloc[MAX_CACHES_PER_SET];
+	int			caches_loaded;
+
+	struct bcache_device	**devices;
+	struct list_head	cached_devs;
+	uint64_t		cached_dev_sectors;
+	struct closure		caching;
+
+	struct closure_with_waitlist sb_write;
+
+	mempool_t		*search;
+	mempool_t		*bio_meta;
+	struct bio_set		*bio_split;
+
+	/* For the btree cache */
+	struct shrinker		shrink;
+
+	/* For the allocator itself */
+	wait_queue_head_t	alloc_wait;
+
+	/* For the btree cache and anything allocation related */
+	struct mutex		bucket_lock;
+
+	/* log2(bucket_size), in sectors */
+	unsigned short		bucket_bits;
+
+	/* log2(block_size), in sectors */
+	unsigned short		block_bits;
+
+	/*
+	 * Default number of pages for a new btree node - may be less than a
+	 * full bucket
+	 */
+	unsigned		btree_pages;
+
+	/*
+	 * Lists of struct btrees; lru is the list for structs that have memory
+	 * allocated for actual btree node, freed is for structs that do not.
+	 *
+	 * We never free a struct btree, except on shutdown - we just put it on
+	 * the btree_cache_freed list and reuse it later. This simplifies the
+	 * code, and it doesn't cost us much memory as the memory usage is
+	 * dominated by buffers that hold the actual btree node data and those
+	 * can be freed - and the number of struct btrees allocated is
+	 * effectively bounded.
+	 *
+	 * btree_cache_freeable effectively is a small cache - we use it because
+	 * high order page allocations can be rather expensive, and it's quite
+	 * common to delete and allocate btree nodes in quick succession. It
+	 * should never grow past ~2-3 nodes in practice.
+	 */
+	struct list_head	btree_cache;
+	struct list_head	btree_cache_freeable;
+	struct list_head	btree_cache_freed;
+
+	/* Number of elements in btree_cache + btree_cache_freeable lists */
+	unsigned		bucket_cache_used;
+
+	/*
+	 * If we need to allocate memory for a new btree node and that
+	 * allocation fails, we can cannibalize another node in the btree cache
+	 * to satisfy the allocation. However, only one thread can be doing this
+	 * at a time, for obvious reasons - try_harder and try_wait are
+	 * basically a lock for this that we can wait on asynchronously. The
+	 * btree_root() macro releases the lock when it returns.
+	 */
+	struct closure		*try_harder;
+	struct closure_waitlist	try_wait;
+	uint64_t		try_harder_start;
+
+	/*
+	 * When we free a btree node, we increment the gen of the bucket the
+	 * node is in - but we can't rewrite the prios and gens until we
+	 * finished whatever it is we were doing, otherwise after a crash the
+	 * btree node would be freed but for say a split, we might not have the
+	 * pointers to the new nodes inserted into the btree yet.
+	 *
+	 * This is a refcount that blocks prio_write() until the new keys are
+	 * written.
+	 */
+	atomic_t		prio_blocked;
+	struct closure_waitlist	bucket_wait;
+
+	/*
+	 * For any bio we don't skip we subtract the number of sectors from
+	 * rescale; when it hits 0 we rescale all the bucket priorities.
+	 */
+	atomic_t		rescale;
+	/*
+	 * When we invalidate buckets, we use both the priority and the amount
+	 * of good data to determine which buckets to reuse first - to weight
+	 * those together consistently we keep track of the smallest nonzero
+	 * priority of any bucket.
+	 */
+	uint16_t		min_prio;
+
+	/*
+	 * max(gen - gc_gen) for all buckets. When it gets too big we have to gc
+	 * to keep gens from wrapping around.
+	 */
+	uint8_t			need_gc;
+	struct gc_stat		gc_stats;
+	size_t			nbuckets;
+
+	struct closure_with_waitlist gc;
+	/* Where in the btree gc currently is */
+	struct bkey		gc_done;
+
+	/*
+	 * The allocation code needs gc_mark in struct bucket to be correct, but
+	 * it's not while a gc is in progress. Protected by bucket_lock.
+	 */
+	int			gc_mark_valid;
+
+	/* Counts how many sectors bio_insert has added to the cache */
+	atomic_t		sectors_to_gc;
+
+	struct closure		moving_gc;
+	struct closure_waitlist	moving_gc_wait;
+	struct keybuf		moving_gc_keys;
+	/* Number of moving GC bios in flight */
+	atomic_t		in_flight;
+
+	struct btree		*root;
+
+#ifdef CONFIG_BCACHE_DEBUG
+	struct btree		*verify_data;
+	struct mutex		verify_lock;
+#endif
+
+	unsigned		nr_uuids;
+	struct uuid_entry	*uuids;
+	BKEY_PADDED(uuid_bucket);
+	struct closure_with_waitlist uuid_write;
+
+	/*
+	 * A btree node on disk could have too many bsets for an iterator to fit
+	 * on the stack - this is a single element mempool for btree_read_work()
+	 */
+	struct mutex		fill_lock;
+	struct btree_iter	*fill_iter;
+
+	/*
+	 * btree_sort() is a merge sort and requires temporary space - single
+	 * element mempool
+	 */
+	struct mutex		sort_lock;
+	struct bset		*sort;
+
+	/* List of buckets we're currently writing data to */
+	struct list_head	data_buckets;
+	spinlock_t		data_bucket_lock;
+
+	struct journal		journal;
+
+#define CONGESTED_MAX		1024
+	unsigned		congested_last_us;
+	atomic_t		congested;
+
+	/* The rest of this all shows up in sysfs */
+	unsigned		congested_read_threshold_us;
+	unsigned		congested_write_threshold_us;
+
+	spinlock_t		sort_time_lock;
+	struct time_stats	sort_time;
+	struct time_stats	btree_gc_time;
+	struct time_stats	btree_split_time;
+	spinlock_t		btree_read_time_lock;
+	struct time_stats	btree_read_time;
+	struct time_stats	try_harder_time;
+
+	atomic_long_t		cache_read_races;
+	atomic_long_t		writeback_keys_done;
+	atomic_long_t		writeback_keys_failed;
+	unsigned		error_limit;
+	unsigned		error_decay;
+	unsigned short		journal_delay_ms;
+	unsigned		verify:1;
+	unsigned		key_merging_disabled:1;
+	unsigned		gc_always_rewrite:1;
+	unsigned		shrinker_disabled:1;
+	unsigned		copy_gc_enabled:1;
+
+#define BUCKET_HASH_BITS	12
+	struct hlist_head	bucket_hash[1 << BUCKET_HASH_BITS];
+};
+
+static inline bool key_merging_disabled(struct cache_set *c)
+{
+#ifdef CONFIG_BCACHE_DEBUG
+	return c->key_merging_disabled;
+#else
+	return 0;
+#endif
+}
+
+static inline bool SB_IS_BDEV(const struct cache_sb *sb)
+{
+	return sb->version == BCACHE_SB_VERSION_BDEV
+		|| sb->version == BCACHE_SB_VERSION_BDEV_WITH_OFFSET;
+}
+
+struct bbio {
+	unsigned		submit_time_us;
+	union {
+		struct bkey	key;
+		uint64_t	_pad[3];
+		/*
+		 * We only need pad = 3 here because we only ever carry around a
+		 * single pointer - i.e. the pointer we're doing io to/from.
+		 */
+	};
+	struct bio		bio;
+};
+
+static inline unsigned local_clock_us(void)
+{
+	return local_clock() >> 10;
+}
+
+#define MAX_BSETS		4U
+
+#define BTREE_PRIO		USHRT_MAX
+#define INITIAL_PRIO		32768
+
+#define btree_bytes(c)		((c)->btree_pages * PAGE_SIZE)
+#define btree_blocks(b)							\
+	((unsigned) (KEY_SIZE(&b->key) >> (b)->c->block_bits))
+
+#define btree_default_blocks(c)						\
+	((unsigned) ((PAGE_SECTORS * (c)->btree_pages) >> (c)->block_bits))
+
+#define bucket_pages(c)		((c)->sb.bucket_size / PAGE_SECTORS)
+#define bucket_bytes(c)		((c)->sb.bucket_size << 9)
+#define block_bytes(c)		((c)->sb.block_size << 9)
+
+#define __set_bytes(i, k)	(sizeof(*(i)) + (k) * sizeof(uint64_t))
+#define set_bytes(i)		__set_bytes(i, i->keys)
+
+#define __set_blocks(i, k, c)	DIV_ROUND_UP(__set_bytes(i, k), block_bytes(c))
+#define set_blocks(i, c)	__set_blocks(i, (i)->keys, c)
+
+#define node(i, j)		((struct bkey *) ((i)->d + (j)))
+#define end(i)			node(i, (i)->keys)
+
+#define index(i, b)							\
+	((size_t) (((void *) i - (void *) (b)->sets[0].data) /		\
+		   block_bytes(b->c)))
+
+#define btree_data_space(b)	(PAGE_SIZE << (b)->page_order)
+
+#define prios_per_bucket(c)				\
+	((bucket_bytes(c) - sizeof(struct prio_set)) /	\
+	 sizeof(struct bucket_disk))
+#define prio_buckets(c)					\
+	DIV_ROUND_UP((size_t) (c)->sb.nbuckets, prios_per_bucket(c))
+
+#define JSET_MAGIC		0x245235c1a3625032ULL
+#define PSET_MAGIC		0x6750e15f87337f91ULL
+#define BSET_MAGIC		0x90135c78b99e07f5ULL
+
+#define jset_magic(c)		((c)->sb.set_magic ^ JSET_MAGIC)
+#define pset_magic(c)		((c)->sb.set_magic ^ PSET_MAGIC)
+#define bset_magic(c)		((c)->sb.set_magic ^ BSET_MAGIC)
+
+/* Bkey fields: all units are in sectors */
+
+#define KEY_FIELD(name, field, offset, size)				\
+	BITMASK(name, struct bkey, field, offset, size)
+
+#define PTR_FIELD(name, offset, size)					\
+	static inline uint64_t name(const struct bkey *k, unsigned i)	\
+	{ return (k->ptr[i] >> offset) & ~(((uint64_t) ~0) << size); }	\
+									\
+	static inline void SET_##name(struct bkey *k, unsigned i, uint64_t v)\
+	{								\
+		k->ptr[i] &= ~(~((uint64_t) ~0 << size) << offset);	\
+		k->ptr[i] |= v << offset;				\
+	}
+
+KEY_FIELD(KEY_PTRS,	high, 60, 3)
+KEY_FIELD(HEADER_SIZE,	high, 58, 2)
+KEY_FIELD(KEY_CSUM,	high, 56, 2)
+KEY_FIELD(KEY_PINNED,	high, 55, 1)
+KEY_FIELD(KEY_DIRTY,	high, 36, 1)
+
+KEY_FIELD(KEY_SIZE,	high, 20, 16)
+KEY_FIELD(KEY_INODE,	high, 0,  20)
+
+/* Next time I change the on disk format, KEY_OFFSET() won't be 64 bits */
+
+static inline uint64_t KEY_OFFSET(const struct bkey *k)
+{
+	return k->low;
+}
+
+static inline void SET_KEY_OFFSET(struct bkey *k, uint64_t v)
+{
+	k->low = v;
+}
+
+PTR_FIELD(PTR_DEV,		51, 12)
+PTR_FIELD(PTR_OFFSET,		8,  43)
+PTR_FIELD(PTR_GEN,		0,  8)
+
+#define PTR_CHECK_DEV		((1 << 12) - 1)
+
+#define PTR(gen, offset, dev)						\
+	((((uint64_t) dev) << 51) | ((uint64_t) offset) << 8 | gen)
+
+static inline size_t sector_to_bucket(struct cache_set *c, sector_t s)
+{
+	return s >> c->bucket_bits;
+}
+
+static inline sector_t bucket_to_sector(struct cache_set *c, size_t b)
+{
+	return ((sector_t) b) << c->bucket_bits;
+}
+
+static inline sector_t bucket_remainder(struct cache_set *c, sector_t s)
+{
+	return s & (c->sb.bucket_size - 1);
+}
+
+static inline struct cache *PTR_CACHE(struct cache_set *c,
+				      const struct bkey *k,
+				      unsigned ptr)
+{
+	return c->cache[PTR_DEV(k, ptr)];
+}
+
+static inline size_t PTR_BUCKET_NR(struct cache_set *c,
+				   const struct bkey *k,
+				   unsigned ptr)
+{
+	return sector_to_bucket(c, PTR_OFFSET(k, ptr));
+}
+
+static inline struct bucket *PTR_BUCKET(struct cache_set *c,
+					const struct bkey *k,
+					unsigned ptr)
+{
+	return PTR_CACHE(c, k, ptr)->buckets + PTR_BUCKET_NR(c, k, ptr);
+}
+
+/* Btree key macros */
+
+/*
+ * The high bit being set is a relic from when we used it to do binary
+ * searches - it told you where a key started. It's not used anymore,
+ * and can probably be safely dropped.
+ */
+#define KEY(dev, sector, len)						\
+((struct bkey) {							\
+	.high = (1ULL << 63) | ((uint64_t) (len) << 20) | (dev),	\
+	.low = (sector)							\
+})
+
+static inline void bkey_init(struct bkey *k)
+{
+	*k = KEY(0, 0, 0);
+}
+
+#define KEY_START(k)		(KEY_OFFSET(k) - KEY_SIZE(k))
+#define START_KEY(k)		KEY(KEY_INODE(k), KEY_START(k), 0)
+#define MAX_KEY			KEY(~(~0 << 20), ((uint64_t) ~0) >> 1, 0)
+#define ZERO_KEY		KEY(0, 0, 0)
+
+/*
+ * This is used for various on disk data structures - cache_sb, prio_set, bset,
+ * jset: The checksum is _always_ the first 8 bytes of these structs
+ */
+#define csum_set(i)							\
+	bch_crc64(((void *) (i)) + sizeof(uint64_t),			\
+	      ((void *) end(i)) - (((void *) (i)) + sizeof(uint64_t)))
+
+/* Error handling macros */
+
+#define btree_bug(b, ...)						\
+do {									\
+	if (bch_cache_set_error((b)->c, __VA_ARGS__))			\
+		dump_stack();						\
+} while (0)
+
+#define cache_bug(c, ...)						\
+do {									\
+	if (bch_cache_set_error(c, __VA_ARGS__))			\
+		dump_stack();						\
+} while (0)
+
+#define btree_bug_on(cond, b, ...)					\
+do {									\
+	if (cond)							\
+		btree_bug(b, __VA_ARGS__);				\
+} while (0)
+
+#define cache_bug_on(cond, c, ...)					\
+do {									\
+	if (cond)							\
+		cache_bug(c, __VA_ARGS__);				\
+} while (0)
+
+#define cache_set_err_on(cond, c, ...)					\
+do {									\
+	if (cond)							\
+		bch_cache_set_error(c, __VA_ARGS__);			\
+} while (0)
+
+/* Looping macros */
+
+#define for_each_cache(ca, cs, iter)					\
+	for (iter = 0; ca = cs->cache[iter], iter < (cs)->sb.nr_in_set; iter++)
+
+#define for_each_bucket(b, ca)						\
+	for (b = (ca)->buckets + (ca)->sb.first_bucket;			\
+	     b < (ca)->buckets + (ca)->sb.nbuckets; b++)
+
+static inline void __bkey_put(struct cache_set *c, struct bkey *k)
+{
+	unsigned i;
+
+	for (i = 0; i < KEY_PTRS(k); i++)
+		atomic_dec_bug(&PTR_BUCKET(c, k, i)->pin);
+}
+
+/* Blktrace macros */
+
+#define blktrace_msg(c, fmt, ...)					\
+do {									\
+	struct request_queue *q = bdev_get_queue(c->bdev);		\
+	if (q)								\
+		blk_add_trace_msg(q, fmt, ##__VA_ARGS__);		\
+} while (0)
+
+#define blktrace_msg_all(s, fmt, ...)					\
+do {									\
+	struct cache *_c;						\
+	unsigned i;							\
+	for_each_cache(_c, (s), i)					\
+		blktrace_msg(_c, fmt, ##__VA_ARGS__);			\
+} while (0)
+
+static inline void cached_dev_put(struct cached_dev *dc)
+{
+	if (atomic_dec_and_test(&dc->count))
+		schedule_work(&dc->detach);
+}
+
+static inline bool cached_dev_get(struct cached_dev *dc)
+{
+	if (!atomic_inc_not_zero(&dc->count))
+		return false;
+
+	/* Paired with the mb in cached_dev_attach */
+	smp_mb__after_atomic_inc();
+	return true;
+}
+
+/*
+ * bucket_gc_gen() returns the difference between the bucket's current gen and
+ * the oldest gen of any pointer into that bucket in the btree (last_gc).
+ *
+ * bucket_disk_gen() returns the difference between the current gen and the gen
+ * on disk; they're both used to make sure gens don't wrap around.
+ */
+
+static inline uint8_t bucket_gc_gen(struct bucket *b)
+{
+	return b->gen - b->last_gc;
+}
+
+static inline uint8_t bucket_disk_gen(struct bucket *b)
+{
+	return b->gen - b->disk_gen;
+}
+
+#define BUCKET_GC_GEN_MAX	96U
+#define BUCKET_DISK_GEN_MAX	64U
+
+#define kobj_attribute_write(n, fn)					\
+	static struct kobj_attribute ksysfs_##n = __ATTR(n, S_IWUSR, NULL, fn)
+
+#define kobj_attribute_rw(n, show, store)				\
+	static struct kobj_attribute ksysfs_##n =			\
+		__ATTR(n, S_IWUSR|S_IRUSR, show, store)
+
+/* Forward declarations */
+
+void bch_writeback_queue(struct cached_dev *);
+void bch_writeback_add(struct cached_dev *, unsigned);
+
+void bch_count_io_errors(struct cache *, int, const char *);
+void bch_bbio_count_io_errors(struct cache_set *, struct bio *,
+			      int, const char *);
+void bch_bbio_endio(struct cache_set *, struct bio *, int, const char *);
+void bch_bbio_free(struct bio *, struct cache_set *);
+struct bio *bch_bbio_alloc(struct cache_set *);
+
+struct bio *bch_bio_split(struct bio *, int, gfp_t, struct bio_set *);
+void bch_generic_make_request(struct bio *, struct bio_split_pool *);
+void __bch_submit_bbio(struct bio *, struct cache_set *);
+void bch_submit_bbio(struct bio *, struct cache_set *, struct bkey *, unsigned);
+
+uint8_t bch_inc_gen(struct cache *, struct bucket *);
+void bch_rescale_priorities(struct cache_set *, int);
+bool bch_bucket_add_unused(struct cache *, struct bucket *);
+void bch_allocator_thread(struct closure *);
+
+long bch_bucket_alloc(struct cache *, unsigned, struct closure *);
+void bch_bucket_free(struct cache_set *, struct bkey *);
+
+int __bch_bucket_alloc_set(struct cache_set *, unsigned,
+			   struct bkey *, int, struct closure *);
+int bch_bucket_alloc_set(struct cache_set *, unsigned,
+			 struct bkey *, int, struct closure *);
+
+__printf(2, 3)
+bool bch_cache_set_error(struct cache_set *, const char *, ...);
+
+void bch_prio_write(struct cache *);
+void bch_write_bdev_super(struct cached_dev *, struct closure *);
+
+extern struct workqueue_struct *bcache_wq, *bch_gc_wq;
+extern const char * const bch_cache_modes[];
+extern struct mutex bch_register_lock;
+extern struct list_head bch_cache_sets;
+
+extern struct kobj_type bch_cached_dev_ktype;
+extern struct kobj_type bch_flash_dev_ktype;
+extern struct kobj_type bch_cache_set_ktype;
+extern struct kobj_type bch_cache_set_internal_ktype;
+extern struct kobj_type bch_cache_ktype;
+
+void bch_cached_dev_release(struct kobject *);
+void bch_flash_dev_release(struct kobject *);
+void bch_cache_set_release(struct kobject *);
+void bch_cache_release(struct kobject *);
+
+int bch_uuid_write(struct cache_set *);
+void bcache_write_super(struct cache_set *);
+
+int bch_flash_dev_create(struct cache_set *c, uint64_t size);
+
+int bch_cached_dev_attach(struct cached_dev *, struct cache_set *);
+void bch_cached_dev_detach(struct cached_dev *);
+void bch_cached_dev_run(struct cached_dev *);
+void bcache_device_stop(struct bcache_device *);
+
+void bch_cache_set_unregister(struct cache_set *);
+void bch_cache_set_stop(struct cache_set *);
+
+struct cache_set *bch_cache_set_alloc(struct cache_sb *);
+void bch_btree_cache_free(struct cache_set *);
+int bch_btree_cache_alloc(struct cache_set *);
+void bch_writeback_init_cached_dev(struct cached_dev *);
+void bch_moving_init_cache_set(struct cache_set *);
+
+void bch_cache_allocator_exit(struct cache *ca);
+int bch_cache_allocator_init(struct cache *ca);
+
+void bch_debug_exit(void);
+int bch_debug_init(struct kobject *);
+void bch_writeback_exit(void);
+int bch_writeback_init(void);
+void bch_request_exit(void);
+int bch_request_init(void);
+void bch_btree_exit(void);
+int bch_btree_init(void);
+
+#endif /* _BCACHE_H */
diff --git a/drivers/md/bcache/bset.c b/drivers/md/bcache/bset.c
new file mode 100644
index 0000000..cb4578a
--- /dev/null
+++ b/drivers/md/bcache/bset.c
@@ -0,0 +1,1192 @@
+/*
+ * Code for working with individual keys, and sorted sets of keys with in a
+ * btree node
+ *
+ * Copyright 2012 Google, Inc.
+ */
+
+#include "bcache.h"
+#include "btree.h"
+#include "debug.h"
+
+#include <linux/random.h>
+#include <linux/prefetch.h>
+
+/* Keylists */
+
+void bch_keylist_copy(struct keylist *dest, struct keylist *src)
+{
+	*dest = *src;
+
+	if (src->list == src->d) {
+		size_t n = (uint64_t *) src->top - src->d;
+		dest->top = (struct bkey *) &dest->d[n];
+		dest->list = dest->d;
+	}
+}
+
+int bch_keylist_realloc(struct keylist *l, int nptrs, struct cache_set *c)
+{
+	unsigned oldsize = (uint64_t *) l->top - l->list;
+	unsigned newsize = oldsize + 2 + nptrs;
+	uint64_t *new;
+
+	/* The journalling code doesn't handle the case where the keys to insert
+	 * is bigger than an empty write: If we just return -ENOMEM here,
+	 * bio_insert() and bio_invalidate() will insert the keys created so far
+	 * and finish the rest when the keylist is empty.
+	 */
+	if (newsize * sizeof(uint64_t) > block_bytes(c) - sizeof(struct jset))
+		return -ENOMEM;
+
+	newsize = roundup_pow_of_two(newsize);
+
+	if (newsize <= KEYLIST_INLINE ||
+	    roundup_pow_of_two(oldsize) == newsize)
+		return 0;
+
+	new = krealloc(l->list == l->d ? NULL : l->list,
+		       sizeof(uint64_t) * newsize, GFP_NOIO);
+
+	if (!new)
+		return -ENOMEM;
+
+	if (l->list == l->d)
+		memcpy(new, l->list, sizeof(uint64_t) * KEYLIST_INLINE);
+
+	l->list = new;
+	l->top = (struct bkey *) (&l->list[oldsize]);
+
+	return 0;
+}
+
+struct bkey *bch_keylist_pop(struct keylist *l)
+{
+	struct bkey *k = l->bottom;
+
+	if (k == l->top)
+		return NULL;
+
+	while (bkey_next(k) != l->top)
+		k = bkey_next(k);
+
+	return l->top = k;
+}
+
+/* Pointer validation */
+
+bool __bch_ptr_invalid(struct cache_set *c, int level, const struct bkey *k)
+{
+	unsigned i;
+
+	if (level && (!KEY_PTRS(k) || !KEY_SIZE(k) || KEY_DIRTY(k)))
+		goto bad;
+
+	if (!level && KEY_SIZE(k) > KEY_OFFSET(k))
+		goto bad;
+
+	if (!KEY_SIZE(k))
+		return true;
+
+	for (i = 0; i < KEY_PTRS(k); i++)
+		if (ptr_available(c, k, i)) {
+			struct cache *ca = PTR_CACHE(c, k, i);
+			size_t bucket = PTR_BUCKET_NR(c, k, i);
+			size_t r = bucket_remainder(c, PTR_OFFSET(k, i));
+
+			if (KEY_SIZE(k) + r > c->sb.bucket_size ||
+			    bucket <  ca->sb.first_bucket ||
+			    bucket >= ca->sb.nbuckets)
+				goto bad;
+		}
+
+	return false;
+bad:
+	cache_bug(c, "spotted bad key %s: %s", pkey(k), bch_ptr_status(c, k));
+	return true;
+}
+
+bool bch_ptr_bad(struct btree *b, const struct bkey *k)
+{
+	struct bucket *g;
+	unsigned i, stale;
+
+	if (!bkey_cmp(k, &ZERO_KEY) ||
+	    !KEY_PTRS(k) ||
+	    bch_ptr_invalid(b, k))
+		return true;
+
+	if (KEY_PTRS(k) && PTR_DEV(k, 0) == PTR_CHECK_DEV)
+		return true;
+
+	for (i = 0; i < KEY_PTRS(k); i++)
+		if (ptr_available(b->c, k, i)) {
+			g = PTR_BUCKET(b->c, k, i);
+			stale = ptr_stale(b->c, k, i);
+
+			btree_bug_on(stale > 96, b,
+				     "key too stale: %i, need_gc %u",
+				     stale, b->c->need_gc);
+
+			btree_bug_on(stale && KEY_DIRTY(k) && KEY_SIZE(k),
+				     b, "stale dirty pointer");
+
+			if (stale)
+				return true;
+
+#ifdef CONFIG_BCACHE_EDEBUG
+			if (!mutex_trylock(&b->c->bucket_lock))
+				continue;
+
+			if (b->level) {
+				if (KEY_DIRTY(k) ||
+				    g->prio != BTREE_PRIO ||
+				    (b->c->gc_mark_valid &&
+				     GC_MARK(g) != GC_MARK_METADATA))
+					goto bug;
+
+			} else {
+				if (g->prio == BTREE_PRIO)
+					goto bug;
+
+				if (KEY_DIRTY(k) &&
+				    b->c->gc_mark_valid &&
+				    GC_MARK(g) != GC_MARK_DIRTY)
+					goto bug;
+			}
+			mutex_unlock(&b->c->bucket_lock);
+#endif
+		}
+
+	return false;
+#ifdef CONFIG_BCACHE_EDEBUG
+bug:
+	mutex_unlock(&b->c->bucket_lock);
+	btree_bug(b,
+"inconsistent pointer %s: bucket %zu pin %i prio %i gen %i last_gc %i mark %llu gc_gen %i",
+		  pkey(k), PTR_BUCKET_NR(b->c, k, i), atomic_read(&g->pin),
+		  g->prio, g->gen, g->last_gc, GC_MARK(g), g->gc_gen);
+	return true;
+#endif
+}
+
+/* Key/pointer manipulation */
+
+void bch_bkey_copy_single_ptr(struct bkey *dest, const struct bkey *src,
+			      unsigned i)
+{
+	BUG_ON(i > KEY_PTRS(src));
+
+	/* Only copy the header, key, and one pointer. */
+	memcpy(dest, src, 2 * sizeof(uint64_t));
+	dest->ptr[0] = src->ptr[i];
+	SET_KEY_PTRS(dest, 1);
+	/* We didn't copy the checksum so clear that bit. */
+	SET_KEY_CSUM(dest, 0);
+}
+
+bool __bch_cut_front(const struct bkey *where, struct bkey *k)
+{
+	unsigned i, len = 0;
+
+	if (bkey_cmp(where, &START_KEY(k)) <= 0)
+		return false;
+
+	if (bkey_cmp(where, k) < 0)
+		len = KEY_OFFSET(k) - KEY_OFFSET(where);
+	else
+		bkey_copy_key(k, where);
+
+	for (i = 0; i < KEY_PTRS(k); i++)
+		SET_PTR_OFFSET(k, i, PTR_OFFSET(k, i) + KEY_SIZE(k) - len);
+
+	BUG_ON(len > KEY_SIZE(k));
+	SET_KEY_SIZE(k, len);
+	return true;
+}
+
+bool __bch_cut_back(const struct bkey *where, struct bkey *k)
+{
+	unsigned len = 0;
+
+	if (bkey_cmp(where, k) >= 0)
+		return false;
+
+	BUG_ON(KEY_INODE(where) != KEY_INODE(k));
+
+	if (bkey_cmp(where, &START_KEY(k)) > 0)
+		len = KEY_OFFSET(where) - KEY_START(k);
+
+	bkey_copy_key(k, where);
+
+	BUG_ON(len > KEY_SIZE(k));
+	SET_KEY_SIZE(k, len);
+	return true;
+}
+
+static uint64_t merge_chksums(struct bkey *l, struct bkey *r)
+{
+	return (l->ptr[KEY_PTRS(l)] + r->ptr[KEY_PTRS(r)]) &
+		~((uint64_t)1 << 63);
+}
+
+/* Tries to merge l and r: l should be lower than r
+ * Returns true if we were able to merge. If we did merge, l will be the merged
+ * key, r will be untouched.
+ */
+bool bch_bkey_try_merge(struct btree *b, struct bkey *l, struct bkey *r)
+{
+	unsigned i;
+
+	if (key_merging_disabled(b->c))
+		return false;
+
+	if (KEY_PTRS(l) != KEY_PTRS(r) ||
+	    KEY_DIRTY(l) != KEY_DIRTY(r) ||
+	    bkey_cmp(l, &START_KEY(r)))
+		return false;
+
+	for (i = 0; i < KEY_PTRS(l); i++)
+		if (l->ptr[i] + PTR(0, KEY_SIZE(l), 0) != r->ptr[i] ||
+		    PTR_BUCKET_NR(b->c, l, i) != PTR_BUCKET_NR(b->c, r, i))
+			return false;
+
+	/* Keys with no pointers aren't restricted to one bucket and could
+	 * overflow KEY_SIZE
+	 */
+	if (KEY_SIZE(l) + KEY_SIZE(r) > USHRT_MAX) {
+		SET_KEY_OFFSET(l, KEY_OFFSET(l) + USHRT_MAX - KEY_SIZE(l));
+		SET_KEY_SIZE(l, USHRT_MAX);
+
+		bch_cut_front(l, r);
+		return false;
+	}
+
+	if (KEY_CSUM(l)) {
+		if (KEY_CSUM(r))
+			l->ptr[KEY_PTRS(l)] = merge_chksums(l, r);
+		else
+			SET_KEY_CSUM(l, 0);
+	}
+
+	SET_KEY_OFFSET(l, KEY_OFFSET(l) + KEY_SIZE(r));
+	SET_KEY_SIZE(l, KEY_SIZE(l) + KEY_SIZE(r));
+
+	return true;
+}
+
+/* Binary tree stuff for auxiliary search trees */
+
+static unsigned inorder_next(unsigned j, unsigned size)
+{
+	if (j * 2 + 1 < size) {
+		j = j * 2 + 1;
+
+		while (j * 2 < size)
+			j *= 2;
+	} else
+		j >>= ffz(j) + 1;
+
+	return j;
+}
+
+static unsigned inorder_prev(unsigned j, unsigned size)
+{
+	if (j * 2 < size) {
+		j = j * 2;
+
+		while (j * 2 + 1 < size)
+			j = j * 2 + 1;
+	} else
+		j >>= ffs(j);
+
+	return j;
+}
+
+/* I have no idea why this code works... and I'm the one who wrote it
+ *
+ * However, I do know what it does:
+ * Given a binary tree constructed in an array (i.e. how you normally implement
+ * a heap), it converts a node in the tree - referenced by array index - to the
+ * index it would have if you did an inorder traversal.
+ *
+ * Also tested for every j, size up to size somewhere around 6 million.
+ *
+ * The binary tree starts at array index 1, not 0
+ * extra is a function of size:
+ *   extra = (size - rounddown_pow_of_two(size - 1)) << 1;
+ */
+static unsigned __to_inorder(unsigned j, unsigned size, unsigned extra)
+{
+	unsigned b = fls(j);
+	unsigned shift = fls(size - 1) - b;
+
+	j  ^= 1U << (b - 1);
+	j <<= 1;
+	j  |= 1;
+	j <<= shift;
+
+	if (j > extra)
+		j -= (j - extra) >> 1;
+
+	return j;
+}
+
+static unsigned to_inorder(unsigned j, struct bset_tree *t)
+{
+	return __to_inorder(j, t->size, t->extra);
+}
+
+static unsigned __inorder_to_tree(unsigned j, unsigned size, unsigned extra)
+{
+	unsigned shift;
+
+	if (j > extra)
+		j += j - extra;
+
+	shift = ffs(j);
+
+	j >>= shift;
+	j  |= roundup_pow_of_two(size) >> shift;
+
+	return j;
+}
+
+static unsigned inorder_to_tree(unsigned j, struct bset_tree *t)
+{
+	return __inorder_to_tree(j, t->size, t->extra);
+}
+
+#if 0
+void inorder_test(void)
+{
+	unsigned long done = 0;
+	ktime_t start = ktime_get();
+
+	for (unsigned size = 2;
+	     size < 65536000;
+	     size++) {
+		unsigned extra = (size - rounddown_pow_of_two(size - 1)) << 1;
+		unsigned i = 1, j = rounddown_pow_of_two(size - 1);
+
+		if (!(size % 4096))
+			printk(KERN_NOTICE "loop %u, %llu per us\n", size,
+			       done / ktime_us_delta(ktime_get(), start));
+
+		while (1) {
+			if (__inorder_to_tree(i, size, extra) != j)
+				panic("size %10u j %10u i %10u", size, j, i);
+
+			if (__to_inorder(j, size, extra) != i)
+				panic("size %10u j %10u i %10u", size, j, i);
+
+			if (j == rounddown_pow_of_two(size) - 1)
+				break;
+
+			BUG_ON(inorder_prev(inorder_next(j, size), size) != j);
+
+			j = inorder_next(j, size);
+			i++;
+		}
+
+		done += size - 1;
+	}
+}
+#endif
+
+/*
+ * Cacheline/offset <-> bkey pointer arithmatic:
+ *
+ * t->tree is a binary search tree in an array; each node corresponds to a key
+ * in one cacheline in t->set (BSET_CACHELINE bytes).
+ *
+ * This means we don't have to store the full index of the key that a node in
+ * the binary tree points to; to_inorder() gives us the cacheline, and then
+ * bkey_float->m gives us the offset within that cacheline, in units of 8 bytes.
+ *
+ * cacheline_to_bkey() and friends abstract out all the pointer arithmatic to
+ * make this work.
+ *
+ * To construct the bfloat for an arbitrary key we need to know what the key
+ * immediately preceding it is: we have to check if the two keys differ in the
+ * bits we're going to store in bkey_float->mantissa. t->prev[j] stores the size
+ * of the previous key so we can walk backwards to it from t->tree[j]'s key.
+ */
+
+static struct bkey *cacheline_to_bkey(struct bset_tree *t, unsigned cacheline,
+				      unsigned offset)
+{
+	return ((void *) t->data) + cacheline * BSET_CACHELINE + offset * 8;
+}
+
+static unsigned bkey_to_cacheline(struct bset_tree *t, struct bkey *k)
+{
+	return ((void *) k - (void *) t->data) / BSET_CACHELINE;
+}
+
+static unsigned bkey_to_cacheline_offset(struct bkey *k)
+{
+	return ((size_t) k & (BSET_CACHELINE - 1)) / sizeof(uint64_t);
+}
+
+static struct bkey *tree_to_bkey(struct bset_tree *t, unsigned j)
+{
+	return cacheline_to_bkey(t, to_inorder(j, t), t->tree[j].m);
+}
+
+static struct bkey *tree_to_prev_bkey(struct bset_tree *t, unsigned j)
+{
+	return (void *) (((uint64_t *) tree_to_bkey(t, j)) - t->prev[j]);
+}
+
+/*
+ * For the write set - the one we're currently inserting keys into - we don't
+ * maintain a full search tree, we just keep a simple lookup table in t->prev.
+ */
+static struct bkey *table_to_bkey(struct bset_tree *t, unsigned cacheline)
+{
+	return cacheline_to_bkey(t, cacheline, t->prev[cacheline]);
+}
+
+static inline uint64_t shrd128(uint64_t high, uint64_t low, uint8_t shift)
+{
+#ifdef CONFIG_X86_64
+	asm("shrd %[shift],%[high],%[low]"
+	    : [low] "+Rm" (low)
+	    : [high] "R" (high),
+	    [shift] "ci" (shift)
+	    : "cc");
+#else
+	low >>= shift;
+	low  |= (high << 1) << (63U - shift);
+#endif
+	return low;
+}
+
+static inline unsigned bfloat_mantissa(const struct bkey *k,
+				       struct bkey_float *f)
+{
+	const uint64_t *p = &k->low - (f->exponent >> 6);
+	return shrd128(p[-1], p[0], f->exponent & 63) & BKEY_MANTISSA_MASK;
+}
+
+static void make_bfloat(struct bset_tree *t, unsigned j)
+{
+	struct bkey_float *f = &t->tree[j];
+	struct bkey *m = tree_to_bkey(t, j);
+	struct bkey *p = tree_to_prev_bkey(t, j);
+
+	struct bkey *l = is_power_of_2(j)
+		? t->data->start
+		: tree_to_prev_bkey(t, j >> ffs(j));
+
+	struct bkey *r = is_power_of_2(j + 1)
+		? node(t->data, t->data->keys - bkey_u64s(&t->end))
+		: tree_to_bkey(t, j >> (ffz(j) + 1));
+
+	BUG_ON(m < l || m > r);
+	BUG_ON(bkey_next(p) != m);
+
+	if (KEY_INODE(l) != KEY_INODE(r))
+		f->exponent = fls64(KEY_INODE(r) ^ KEY_INODE(l)) + 64;
+	else
+		f->exponent = fls64(r->low ^ l->low);
+
+	f->exponent = max_t(int, f->exponent - BKEY_MANTISSA_BITS, 0);
+
+	/*
+	 * Setting f->exponent = 127 flags this node as failed, and causes the
+	 * lookup code to fall back to comparing against the original key.
+	 */
+
+	if (bfloat_mantissa(m, f) != bfloat_mantissa(p, f))
+		f->mantissa = bfloat_mantissa(m, f) - 1;
+	else
+		f->exponent = 127;
+}
+
+static void bset_alloc_tree(struct btree *b, struct bset_tree *t)
+{
+	if (t != b->sets) {
+		unsigned j = roundup(t[-1].size,
+				     64 / sizeof(struct bkey_float));
+
+		t->tree = t[-1].tree + j;
+		t->prev = t[-1].prev + j;
+	}
+
+	while (t < b->sets + MAX_BSETS)
+		t++->size = 0;
+}
+
+static void bset_build_unwritten_tree(struct btree *b)
+{
+	struct bset_tree *t = b->sets + b->nsets;
+
+	bset_alloc_tree(b, t);
+
+	if (t->tree != b->sets->tree + bset_tree_space(b)) {
+		t->prev[0] = bkey_to_cacheline_offset(t->data->start);
+		t->size = 1;
+	}
+}
+
+static void bset_build_written_tree(struct btree *b)
+{
+	struct bset_tree *t = b->sets + b->nsets;
+	struct bkey *k = t->data->start;
+	unsigned j, cacheline = 1;
+
+	bset_alloc_tree(b, t);
+
+	t->size = min_t(unsigned,
+			bkey_to_cacheline(t, end(t->data)),
+			b->sets->tree + bset_tree_space(b) - t->tree);
+
+	if (t->size < 2) {
+		t->size = 0;
+		return;
+	}
+
+	t->extra = (t->size - rounddown_pow_of_two(t->size - 1)) << 1;
+
+	/* First we figure out where the first key in each cacheline is */
+	for (j = inorder_next(0, t->size);
+	     j;
+	     j = inorder_next(j, t->size)) {
+		while (bkey_to_cacheline(t, k) != cacheline)
+			k = bkey_next(k);
+
+		t->prev[j] = bkey_u64s(k);
+		k = bkey_next(k);
+		cacheline++;
+		t->tree[j].m = bkey_to_cacheline_offset(k);
+	}
+
+	while (bkey_next(k) != end(t->data))
+		k = bkey_next(k);
+
+	t->end = *k;
+
+	/* Then we build the tree */
+	for (j = inorder_next(0, t->size);
+	     j;
+	     j = inorder_next(j, t->size))
+		make_bfloat(t, j);
+}
+
+void bch_bset_fix_invalidated_key(struct btree *b, struct bkey *k)
+{
+	struct bset_tree *t;
+	unsigned inorder, j = 1;
+
+	for (t = b->sets; t <= &b->sets[b->nsets]; t++)
+		if (k < end(t->data))
+			goto found_set;
+
+	BUG();
+found_set:
+	if (!t->size || !bset_written(b, t))
+		return;
+
+	inorder = bkey_to_cacheline(t, k);
+
+	if (k == t->data->start)
+		goto fix_left;
+
+	if (bkey_next(k) == end(t->data)) {
+		t->end = *k;
+		goto fix_right;
+	}
+
+	j = inorder_to_tree(inorder, t);
+
+	if (j &&
+	    j < t->size &&
+	    k == tree_to_bkey(t, j))
+fix_left:	do {
+			make_bfloat(t, j);
+			j = j * 2;
+		} while (j < t->size);
+
+	j = inorder_to_tree(inorder + 1, t);
+
+	if (j &&
+	    j < t->size &&
+	    k == tree_to_prev_bkey(t, j))
+fix_right:	do {
+			make_bfloat(t, j);
+			j = j * 2 + 1;
+		} while (j < t->size);
+}
+
+void bch_bset_fix_lookup_table(struct btree *b, struct bkey *k)
+{
+	struct bset_tree *t = &b->sets[b->nsets];
+	unsigned shift = bkey_u64s(k);
+	unsigned j = bkey_to_cacheline(t, k);
+
+	/* We're getting called from btree_split() or btree_gc, just bail out */
+	if (!t->size)
+		return;
+
+	/* k is the key we just inserted; we need to find the entry in the
+	 * lookup table for the first key that is strictly greater than k:
+	 * it's either k's cacheline or the next one
+	 */
+	if (j < t->size &&
+	    table_to_bkey(t, j) <= k)
+		j++;
+
+	/* Adjust all the lookup table entries, and find a new key for any that
+	 * have gotten too big
+	 */
+	for (; j < t->size; j++) {
+		t->prev[j] += shift;
+
+		if (t->prev[j] > 7) {
+			k = table_to_bkey(t, j - 1);
+
+			while (k < cacheline_to_bkey(t, j, 0))
+				k = bkey_next(k);
+
+			t->prev[j] = bkey_to_cacheline_offset(k);
+		}
+	}
+
+	if (t->size == b->sets->tree + bset_tree_space(b) - t->tree)
+		return;
+
+	/* Possibly add a new entry to the end of the lookup table */
+
+	for (k = table_to_bkey(t, t->size - 1);
+	     k != end(t->data);
+	     k = bkey_next(k))
+		if (t->size == bkey_to_cacheline(t, k)) {
+			t->prev[t->size] = bkey_to_cacheline_offset(k);
+			t->size++;
+		}
+}
+
+void bch_bset_init_next(struct btree *b)
+{
+	struct bset *i = write_block(b);
+
+	if (i != b->sets[0].data) {
+		b->sets[++b->nsets].data = i;
+		i->seq = b->sets[0].data->seq;
+	} else
+		get_random_bytes(&i->seq, sizeof(uint64_t));
+
+	i->magic	= bset_magic(b->c);
+	i->version	= 0;
+	i->keys		= 0;
+
+	bset_build_unwritten_tree(b);
+}
+
+struct bset_search_iter {
+	struct bkey *l, *r;
+};
+
+static struct bset_search_iter bset_search_write_set(struct btree *b,
+						     struct bset_tree *t,
+						     const struct bkey *search)
+{
+	unsigned li = 0, ri = t->size;
+
+	BUG_ON(!b->nsets &&
+	       t->size < bkey_to_cacheline(t, end(t->data)));
+
+	while (li + 1 != ri) {
+		unsigned m = (li + ri) >> 1;
+
+		if (bkey_cmp(table_to_bkey(t, m), search) > 0)
+			ri = m;
+		else
+			li = m;
+	}
+
+	return (struct bset_search_iter) {
+		table_to_bkey(t, li),
+		ri < t->size ? table_to_bkey(t, ri) : end(t->data)
+	};
+}
+
+static struct bset_search_iter bset_search_tree(struct btree *b,
+						struct bset_tree *t,
+						const struct bkey *search)
+{
+	struct bkey *l, *r;
+	struct bkey_float *f;
+	unsigned inorder, j, n = 1;
+
+	do {
+		unsigned p = n << 4;
+		p &= ((int) (p - t->size)) >> 31;
+
+		prefetch(&t->tree[p]);
+
+		j = n;
+		f = &t->tree[j];
+
+		/*
+		 * n = (f->mantissa > bfloat_mantissa())
+		 *	? j * 2
+		 *	: j * 2 + 1;
+		 *
+		 * We need to subtract 1 from f->mantissa for the sign bit trick
+		 * to work  - that's done in make_bfloat()
+		 */
+		if (likely(f->exponent != 127))
+			n = j * 2 + (((unsigned)
+				      (f->mantissa -
+				       bfloat_mantissa(search, f))) >> 31);
+		else
+			n = (bkey_cmp(tree_to_bkey(t, j), search) > 0)
+				? j * 2
+				: j * 2 + 1;
+	} while (n < t->size);
+
+	inorder = to_inorder(j, t);
+
+	/*
+	 * n would have been the node we recursed to - the low bit tells us if
+	 * we recursed left or recursed right.
+	 */
+	if (n & 1) {
+		l = cacheline_to_bkey(t, inorder, f->m);
+
+		if (++inorder != t->size) {
+			f = &t->tree[inorder_next(j, t->size)];
+			r = cacheline_to_bkey(t, inorder, f->m);
+		} else
+			r = end(t->data);
+	} else {
+		r = cacheline_to_bkey(t, inorder, f->m);
+
+		if (--inorder) {
+			f = &t->tree[inorder_prev(j, t->size)];
+			l = cacheline_to_bkey(t, inorder, f->m);
+		} else
+			l = t->data->start;
+	}
+
+	return (struct bset_search_iter) {l, r};
+}
+
+struct bkey *__bch_bset_search(struct btree *b, struct bset_tree *t,
+			       const struct bkey *search)
+{
+	struct bset_search_iter i;
+
+	/*
+	 * First, we search for a cacheline, then lastly we do a linear search
+	 * within that cacheline.
+	 *
+	 * To search for the cacheline, there's three different possibilities:
+	 *  * The set is too small to have a search tree, so we just do a linear
+	 *    search over the whole set.
+	 *  * The set is the one we're currently inserting into; keeping a full
+	 *    auxiliary search tree up to date would be too expensive, so we
+	 *    use a much simpler lookup table to do a binary search -
+	 *    bset_search_write_set().
+	 *  * Or we use the auxiliary search tree we constructed earlier -
+	 *    bset_search_tree()
+	 */
+
+	if (unlikely(!t->size)) {
+		i.l = t->data->start;
+		i.r = end(t->data);
+	} else if (bset_written(b, t)) {
+		/*
+		 * Each node in the auxiliary search tree covers a certain range
+		 * of bits, and keys above and below the set it covers might
+		 * differ outside those bits - so we have to special case the
+		 * start and end - handle that here:
+		 */
+
+		if (unlikely(bkey_cmp(search, &t->end) >= 0))
+			return end(t->data);
+
+		if (unlikely(bkey_cmp(search, t->data->start) < 0))
+			return t->data->start;
+
+		i = bset_search_tree(b, t, search);
+	} else
+		i = bset_search_write_set(b, t, search);
+
+#ifdef CONFIG_BCACHE_EDEBUG
+	BUG_ON(bset_written(b, t) &&
+	       i.l != t->data->start &&
+	       bkey_cmp(tree_to_prev_bkey(t,
+		  inorder_to_tree(bkey_to_cacheline(t, i.l), t)),
+			search) > 0);
+
+	BUG_ON(i.r != end(t->data) &&
+	       bkey_cmp(i.r, search) <= 0);
+#endif
+
+	while (likely(i.l != i.r) &&
+	       bkey_cmp(i.l, search) <= 0)
+		i.l = bkey_next(i.l);
+
+	return i.l;
+}
+
+/* Btree iterator */
+
+static inline bool btree_iter_cmp(struct btree_iter_set l,
+				  struct btree_iter_set r)
+{
+	int64_t c = bkey_cmp(&START_KEY(l.k), &START_KEY(r.k));
+
+	return c ? c > 0 : l.k < r.k;
+}
+
+static inline bool btree_iter_end(struct btree_iter *iter)
+{
+	return !iter->used;
+}
+
+void bch_btree_iter_push(struct btree_iter *iter, struct bkey *k,
+			 struct bkey *end)
+{
+	if (k != end)
+		BUG_ON(!heap_add(iter,
+				 ((struct btree_iter_set) { k, end }),
+				 btree_iter_cmp));
+}
+
+struct bkey *__bch_btree_iter_init(struct btree *b, struct btree_iter *iter,
+			       struct bkey *search, struct bset_tree *start)
+{
+	struct bkey *ret = NULL;
+	iter->size = ARRAY_SIZE(iter->data);
+	iter->used = 0;
+
+	for (; start <= &b->sets[b->nsets]; start++) {
+		ret = bch_bset_search(b, start, search);
+		bch_btree_iter_push(iter, ret, end(start->data));
+	}
+
+	return ret;
+}
+
+struct bkey *bch_btree_iter_next(struct btree_iter *iter)
+{
+	struct btree_iter_set unused;
+	struct bkey *ret = NULL;
+
+	if (!btree_iter_end(iter)) {
+		ret = iter->data->k;
+		iter->data->k = bkey_next(iter->data->k);
+
+		if (iter->data->k > iter->data->end) {
+			WARN_ONCE(1, "bset was corrupt!\n");
+			iter->data->k = iter->data->end;
+		}
+
+		if (iter->data->k == iter->data->end)
+			heap_pop(iter, unused, btree_iter_cmp);
+		else
+			heap_sift(iter, 0, btree_iter_cmp);
+	}
+
+	return ret;
+}
+
+struct bkey *bch_btree_iter_next_filter(struct btree_iter *iter,
+					struct btree *b, ptr_filter_fn fn)
+{
+	struct bkey *ret;
+
+	do {
+		ret = bch_btree_iter_next(iter);
+	} while (ret && fn(b, ret));
+
+	return ret;
+}
+
+struct bkey *bch_next_recurse_key(struct btree *b, struct bkey *search)
+{
+	struct btree_iter iter;
+
+	bch_btree_iter_init(b, &iter, search);
+	return bch_btree_iter_next_filter(&iter, b, bch_ptr_bad);
+}
+
+/* Mergesort */
+
+static void btree_sort_fixup(struct btree_iter *iter)
+{
+	while (iter->used > 1) {
+		struct btree_iter_set *top = iter->data, *i = top + 1;
+		struct bkey *k;
+
+		if (iter->used > 2 &&
+		    btree_iter_cmp(i[0], i[1]))
+			i++;
+
+		for (k = i->k;
+		     k != i->end && bkey_cmp(top->k, &START_KEY(k)) > 0;
+		     k = bkey_next(k))
+			if (top->k > i->k)
+				__bch_cut_front(top->k, k);
+			else if (KEY_SIZE(k))
+				bch_cut_back(&START_KEY(k), top->k);
+
+		if (top->k < i->k || k == i->k)
+			break;
+
+		heap_sift(iter, i - top, btree_iter_cmp);
+	}
+}
+
+static void btree_mergesort(struct btree *b, struct bset *out,
+			    struct btree_iter *iter,
+			    bool fixup, bool remove_stale)
+{
+	struct bkey *k, *last = NULL;
+	bool (*bad)(struct btree *, const struct bkey *) = remove_stale
+		? bch_ptr_bad
+		: bch_ptr_invalid;
+
+	while (!btree_iter_end(iter)) {
+		if (fixup && !b->level)
+			btree_sort_fixup(iter);
+
+		k = bch_btree_iter_next(iter);
+		if (bad(b, k))
+			continue;
+
+		if (!last) {
+			last = out->start;
+			bkey_copy(last, k);
+		} else if (b->level ||
+			   !bch_bkey_try_merge(b, last, k)) {
+			last = bkey_next(last);
+			bkey_copy(last, k);
+		}
+	}
+
+	out->keys = last ? (uint64_t *) bkey_next(last) - out->d : 0;
+
+	pr_debug("sorted %i keys", out->keys);
+	bch_check_key_order(b, out);
+}
+
+static void __btree_sort(struct btree *b, struct btree_iter *iter,
+			 unsigned start, unsigned order, bool fixup)
+{
+	uint64_t start_time;
+	bool remove_stale = !b->written;
+	struct bset *out = (void *) __get_free_pages(__GFP_NOWARN|GFP_NOIO,
+						     order);
+	if (!out) {
+		mutex_lock(&b->c->sort_lock);
+		out = b->c->sort;
+		order = ilog2(bucket_pages(b->c));
+	}
+
+	start_time = local_clock();
+
+	btree_mergesort(b, out, iter, fixup, remove_stale);
+	b->nsets = start;
+
+	if (!fixup && !start && b->written)
+		bch_btree_verify(b, out);
+
+	if (!start && order == b->page_order) {
+		/*
+		 * Our temporary buffer is the same size as the btree node's
+		 * buffer, we can just swap buffers instead of doing a big
+		 * memcpy()
+		 */
+
+		out->magic	= bset_magic(b->c);
+		out->seq	= b->sets[0].data->seq;
+		out->version	= b->sets[0].data->version;
+		swap(out, b->sets[0].data);
+
+		if (b->c->sort == b->sets[0].data)
+			b->c->sort = out;
+	} else {
+		b->sets[start].data->keys = out->keys;
+		memcpy(b->sets[start].data->start, out->start,
+		       (void *) end(out) - (void *) out->start);
+	}
+
+	if (out == b->c->sort)
+		mutex_unlock(&b->c->sort_lock);
+	else
+		free_pages((unsigned long) out, order);
+
+	if (b->written)
+		bset_build_written_tree(b);
+
+	if (!start) {
+		spin_lock(&b->c->sort_time_lock);
+		bch_time_stats_update(&b->c->sort_time, start_time);
+		spin_unlock(&b->c->sort_time_lock);
+	}
+}
+
+void bch_btree_sort_partial(struct btree *b, unsigned start)
+{
+	size_t oldsize = 0, order = b->page_order, keys = 0;
+	struct btree_iter iter;
+	__bch_btree_iter_init(b, &iter, NULL, &b->sets[start]);
+
+	BUG_ON(b->sets[b->nsets].data == write_block(b) &&
+	       (b->sets[b->nsets].size || b->nsets));
+
+	if (b->written)
+		oldsize = bch_count_data(b);
+
+	if (start) {
+		unsigned i;
+
+		for (i = start; i <= b->nsets; i++)
+			keys += b->sets[i].data->keys;
+
+		order = roundup_pow_of_two(__set_bytes(b->sets->data,
+						       keys)) / PAGE_SIZE;
+		if (order)
+			order = ilog2(order);
+	}
+
+	__btree_sort(b, &iter, start, order, false);
+
+	EBUG_ON(b->written && bch_count_data(b) != oldsize);
+}
+
+void bch_btree_sort_and_fix_extents(struct btree *b, struct btree_iter *iter)
+{
+	BUG_ON(!b->written);
+	__btree_sort(b, iter, 0, b->page_order, true);
+}
+
+void bch_btree_sort_into(struct btree *b, struct btree *new)
+{
+	uint64_t start_time = local_clock();
+
+	struct btree_iter iter;
+	bch_btree_iter_init(b, &iter, NULL);
+
+	btree_mergesort(b, new->sets->data, &iter, false, true);
+
+	spin_lock(&b->c->sort_time_lock);
+	bch_time_stats_update(&b->c->sort_time, start_time);
+	spin_unlock(&b->c->sort_time_lock);
+
+	bkey_copy_key(&new->key, &b->key);
+	new->sets->size = 0;
+}
+
+void bch_btree_sort_lazy(struct btree *b)
+{
+	if (b->nsets) {
+		unsigned i, j, keys = 0, total;
+
+		for (i = 0; i <= b->nsets; i++)
+			keys += b->sets[i].data->keys;
+
+		total = keys;
+
+		for (j = 0; j < b->nsets; j++) {
+			if (keys * 2 < total ||
+			    keys < 1000) {
+				bch_btree_sort_partial(b, j);
+				return;
+			}
+
+			keys -= b->sets[j].data->keys;
+		}
+
+		/* Must sort if b->nsets == 3 or we'll overflow */
+		if (b->nsets >= (MAX_BSETS - 1) - b->level) {
+			bch_btree_sort(b);
+			return;
+		}
+	}
+
+	bset_build_written_tree(b);
+}
+
+/* Sysfs stuff */
+
+struct bset_stats {
+	size_t nodes;
+	size_t sets_written, sets_unwritten;
+	size_t bytes_written, bytes_unwritten;
+	size_t floats, failed;
+};
+
+static int bch_btree_bset_stats(struct btree *b, struct btree_op *op,
+			    struct bset_stats *stats)
+{
+	struct bkey *k;
+	unsigned i;
+
+	stats->nodes++;
+
+	for (i = 0; i <= b->nsets; i++) {
+		struct bset_tree *t = &b->sets[i];
+		size_t bytes = t->data->keys * sizeof(uint64_t);
+		size_t j;
+
+		if (bset_written(b, t)) {
+			stats->sets_written++;
+			stats->bytes_written += bytes;
+
+			stats->floats += t->size - 1;
+
+			for (j = 1; j < t->size; j++)
+				if (t->tree[j].exponent == 127)
+					stats->failed++;
+		} else {
+			stats->sets_unwritten++;
+			stats->bytes_unwritten += bytes;
+		}
+	}
+
+	if (b->level) {
+		struct btree_iter iter;
+
+		for_each_key_filter(b, k, &iter, bch_ptr_bad) {
+			int ret = btree(bset_stats, k, b, op, stats);
+			if (ret)
+				return ret;
+		}
+	}
+
+	return 0;
+}
+
+int bch_bset_print_stats(struct cache_set *c, char *buf)
+{
+	struct btree_op op;
+	struct bset_stats t;
+	int ret;
+
+	bch_btree_op_init_stack(&op);
+	memset(&t, 0, sizeof(struct bset_stats));
+
+	ret = btree_root(bset_stats, c, &op, &t);
+	if (ret)
+		return ret;
+
+	return snprintf(buf, PAGE_SIZE,
+			"btree nodes:		%zu\n"
+			"written sets:		%zu\n"
+			"unwritten sets:		%zu\n"
+			"written key bytes:	%zu\n"
+			"unwritten key bytes:	%zu\n"
+			"floats:			%zu\n"
+			"failed:			%zu\n",
+			t.nodes,
+			t.sets_written, t.sets_unwritten,
+			t.bytes_written, t.bytes_unwritten,
+			t.floats, t.failed);
+}
diff --git a/drivers/md/bcache/bset.h b/drivers/md/bcache/bset.h
new file mode 100644
index 0000000..57a9cff
--- /dev/null
+++ b/drivers/md/bcache/bset.h
@@ -0,0 +1,379 @@
+#ifndef _BCACHE_BSET_H
+#define _BCACHE_BSET_H
+
+/*
+ * BKEYS:
+ *
+ * A bkey contains a key, a size field, a variable number of pointers, and some
+ * ancillary flag bits.
+ *
+ * We use two different functions for validating bkeys, bch_ptr_invalid and
+ * bch_ptr_bad().
+ *
+ * bch_ptr_invalid() primarily filters out keys and pointers that would be
+ * invalid due to some sort of bug, whereas bch_ptr_bad() filters out keys and
+ * pointer that occur in normal practice but don't point to real data.
+ *
+ * The one exception to the rule that ptr_invalid() filters out invalid keys is
+ * that it also filters out keys of size 0 - these are keys that have been
+ * completely overwritten. It'd be safe to delete these in memory while leaving
+ * them on disk, just unnecessary work - so we filter them out when resorting
+ * instead.
+ *
+ * We can't filter out stale keys when we're resorting, because garbage
+ * collection needs to find them to ensure bucket gens don't wrap around -
+ * unless we're rewriting the btree node those stale keys still exist on disk.
+ *
+ * We also implement functions here for removing some number of sectors from the
+ * front or the back of a bkey - this is mainly used for fixing overlapping
+ * extents, by removing the overlapping sectors from the older key.
+ *
+ * BSETS:
+ *
+ * A bset is an array of bkeys laid out contiguously in memory in sorted order,
+ * along with a header. A btree node is made up of a number of these, written at
+ * different times.
+ *
+ * There could be many of them on disk, but we never allow there to be more than
+ * 4 in memory - we lazily resort as needed.
+ *
+ * We implement code here for creating and maintaining auxiliary search trees
+ * (described below) for searching an individial bset, and on top of that we
+ * implement a btree iterator.
+ *
+ * BTREE ITERATOR:
+ *
+ * Most of the code in bcache doesn't care about an individual bset - it needs
+ * to search entire btree nodes and iterate over them in sorted order.
+ *
+ * The btree iterator code serves both functions; it iterates through the keys
+ * in a btree node in sorted order, starting from either keys after a specific
+ * point (if you pass it a search key) or the start of the btree node.
+ *
+ * AUXILIARY SEARCH TREES:
+ *
+ * Since keys are variable length, we can't use a binary search on a bset - we
+ * wouldn't be able to find the start of the next key. But binary searches are
+ * slow anyways, due to terrible cache behaviour; bcache originally used binary
+ * searches and that code topped out at under 50k lookups/second.
+ *
+ * So we need to construct some sort of lookup table. Since we only insert keys
+ * into the last (unwritten) set, most of the keys within a given btree node are
+ * usually in sets that are mostly constant. We use two different types of
+ * lookup tables to take advantage of this.
+ *
+ * Both lookup tables share in common that they don't index every key in the
+ * set; they index one key every BSET_CACHELINE bytes, and then a linear search
+ * is used for the rest.
+ *
+ * For sets that have been written to disk and are no longer being inserted
+ * into, we construct a binary search tree in an array - traversing a binary
+ * search tree in an array gives excellent locality of reference and is very
+ * fast, since both children of any node are adjacent to each other in memory
+ * (and their grandchildren, and great grandchildren...) - this means
+ * prefetching can be used to great effect.
+ *
+ * It's quite useful performance wise to keep these nodes small - not just
+ * because they're more likely to be in L2, but also because we can prefetch
+ * more nodes on a single cacheline and thus prefetch more iterations in advance
+ * when traversing this tree.
+ *
+ * Nodes in the auxiliary search tree must contain both a key to compare against
+ * (we don't want to fetch the key from the set, that would defeat the purpose),
+ * and a pointer to the key. We use a few tricks to compress both of these.
+ *
+ * To compress the pointer, we take advantage of the fact that one node in the
+ * search tree corresponds to precisely BSET_CACHELINE bytes in the set. We have
+ * a function (to_inorder()) that takes the index of a node in a binary tree and
+ * returns what its index would be in an inorder traversal, so we only have to
+ * store the low bits of the offset.
+ *
+ * The key is 84 bits (KEY_DEV + key->key, the offset on the device). To
+ * compress that,  we take advantage of the fact that when we're traversing the
+ * search tree at every iteration we know that both our search key and the key
+ * we're looking for lie within some range - bounded by our previous
+ * comparisons. (We special case the start of a search so that this is true even
+ * at the root of the tree).
+ *
+ * So we know the key we're looking for is between a and b, and a and b don't
+ * differ higher than bit 50, we don't need to check anything higher than bit
+ * 50.
+ *
+ * We don't usually need the rest of the bits, either; we only need enough bits
+ * to partition the key range we're currently checking.  Consider key n - the
+ * key our auxiliary search tree node corresponds to, and key p, the key
+ * immediately preceding n.  The lowest bit we need to store in the auxiliary
+ * search tree is the highest bit that differs between n and p.
+ *
+ * Note that this could be bit 0 - we might sometimes need all 80 bits to do the
+ * comparison. But we'd really like our nodes in the auxiliary search tree to be
+ * of fixed size.
+ *
+ * The solution is to make them fixed size, and when we're constructing a node
+ * check if p and n differed in the bits we needed them to. If they don't we
+ * flag that node, and when doing lookups we fallback to comparing against the
+ * real key. As long as this doesn't happen to often (and it seems to reliably
+ * happen a bit less than 1% of the time), we win - even on failures, that key
+ * is then more likely to be in cache than if we were doing binary searches all
+ * the way, since we're touching so much less memory.
+ *
+ * The keys in the auxiliary search tree are stored in (software) floating
+ * point, with an exponent and a mantissa. The exponent needs to be big enough
+ * to address all the bits in the original key, but the number of bits in the
+ * mantissa is somewhat arbitrary; more bits just gets us fewer failures.
+ *
+ * We need 7 bits for the exponent and 3 bits for the key's offset (since keys
+ * are 8 byte aligned); using 22 bits for the mantissa means a node is 4 bytes.
+ * We need one node per 128 bytes in the btree node, which means the auxiliary
+ * search trees take up 3% as much memory as the btree itself.
+ *
+ * Constructing these auxiliary search trees is moderately expensive, and we
+ * don't want to be constantly rebuilding the search tree for the last set
+ * whenever we insert another key into it. For the unwritten set, we use a much
+ * simpler lookup table - it's just a flat array, so index i in the lookup table
+ * corresponds to the i range of BSET_CACHELINE bytes in the set. Indexing
+ * within each byte range works the same as with the auxiliary search trees.
+ *
+ * These are much easier to keep up to date when we insert a key - we do it
+ * somewhat lazily; when we shift a key up we usually just increment the pointer
+ * to it, only when it would overflow do we go to the trouble of finding the
+ * first key in that range of bytes again.
+ */
+
+/* Btree key comparison/iteration */
+
+struct btree_iter {
+	size_t size, used;
+	struct btree_iter_set {
+		struct bkey *k, *end;
+	} data[MAX_BSETS];
+};
+
+struct bset_tree {
+	/*
+	 * We construct a binary tree in an array as if the array
+	 * started at 1, so that things line up on the same cachelines
+	 * better: see comments in bset.c at cacheline_to_bkey() for
+	 * details
+	 */
+
+	/* size of the binary tree and prev array */
+	unsigned	size;
+
+	/* function of size - precalculated for to_inorder() */
+	unsigned	extra;
+
+	/* copy of the last key in the set */
+	struct bkey	end;
+	struct bkey_float *tree;
+
+	/*
+	 * The nodes in the bset tree point to specific keys - this
+	 * array holds the sizes of the previous key.
+	 *
+	 * Conceptually it's a member of struct bkey_float, but we want
+	 * to keep bkey_float to 4 bytes and prev isn't used in the fast
+	 * path.
+	 */
+	uint8_t		*prev;
+
+	/* The actual btree node, with pointers to each sorted set */
+	struct bset	*data;
+};
+
+static __always_inline int64_t bkey_cmp(const struct bkey *l,
+					const struct bkey *r)
+{
+	return unlikely(KEY_INODE(l) != KEY_INODE(r))
+		? (int64_t) KEY_INODE(l) - (int64_t) KEY_INODE(r)
+		: (int64_t) KEY_OFFSET(l) - (int64_t) KEY_OFFSET(r);
+}
+
+static inline size_t bkey_u64s(const struct bkey *k)
+{
+	BUG_ON(KEY_CSUM(k) > 1);
+	return 2 + KEY_PTRS(k) + (KEY_CSUM(k) ? 1 : 0);
+}
+
+static inline size_t bkey_bytes(const struct bkey *k)
+{
+	return bkey_u64s(k) * sizeof(uint64_t);
+}
+
+static inline void bkey_copy(struct bkey *dest, const struct bkey *src)
+{
+	memcpy(dest, src, bkey_bytes(src));
+}
+
+static inline void bkey_copy_key(struct bkey *dest, const struct bkey *src)
+{
+	if (!src)
+		src = &KEY(0, 0, 0);
+
+	SET_KEY_INODE(dest, KEY_INODE(src));
+	SET_KEY_OFFSET(dest, KEY_OFFSET(src));
+}
+
+static inline struct bkey *bkey_next(const struct bkey *k)
+{
+	uint64_t *d = (void *) k;
+	return (struct bkey *) (d + bkey_u64s(k));
+}
+
+/* Keylists */
+
+struct keylist {
+	struct bkey		*top;
+	union {
+		uint64_t		*list;
+		struct bkey		*bottom;
+	};
+
+	/* Enough room for btree_split's keys without realloc */
+#define KEYLIST_INLINE		16
+	uint64_t		d[KEYLIST_INLINE];
+};
+
+static inline void bch_keylist_init(struct keylist *l)
+{
+	l->top = (void *) (l->list = l->d);
+}
+
+static inline void bch_keylist_push(struct keylist *l)
+{
+	l->top = bkey_next(l->top);
+}
+
+static inline void bch_keylist_add(struct keylist *l, struct bkey *k)
+{
+	bkey_copy(l->top, k);
+	bch_keylist_push(l);
+}
+
+static inline bool bch_keylist_empty(struct keylist *l)
+{
+	return l->top == (void *) l->list;
+}
+
+static inline void bch_keylist_free(struct keylist *l)
+{
+	if (l->list != l->d)
+		kfree(l->list);
+}
+
+void bch_keylist_copy(struct keylist *, struct keylist *);
+struct bkey *bch_keylist_pop(struct keylist *);
+int bch_keylist_realloc(struct keylist *, int, struct cache_set *);
+
+void bch_bkey_copy_single_ptr(struct bkey *, const struct bkey *,
+			      unsigned);
+bool __bch_cut_front(const struct bkey *, struct bkey *);
+bool __bch_cut_back(const struct bkey *, struct bkey *);
+
+static inline bool bch_cut_front(const struct bkey *where, struct bkey *k)
+{
+	BUG_ON(bkey_cmp(where, k) > 0);
+	return __bch_cut_front(where, k);
+}
+
+static inline bool bch_cut_back(const struct bkey *where, struct bkey *k)
+{
+	BUG_ON(bkey_cmp(where, &START_KEY(k)) < 0);
+	return __bch_cut_back(where, k);
+}
+
+const char *bch_ptr_status(struct cache_set *, const struct bkey *);
+bool __bch_ptr_invalid(struct cache_set *, int level, const struct bkey *);
+bool bch_ptr_bad(struct btree *, const struct bkey *);
+
+static inline uint8_t gen_after(uint8_t a, uint8_t b)
+{
+	uint8_t r = a - b;
+	return r > 128U ? 0 : r;
+}
+
+static inline uint8_t ptr_stale(struct cache_set *c, const struct bkey *k,
+				unsigned i)
+{
+	return gen_after(PTR_BUCKET(c, k, i)->gen, PTR_GEN(k, i));
+}
+
+static inline bool ptr_available(struct cache_set *c, const struct bkey *k,
+				 unsigned i)
+{
+	return (PTR_DEV(k, i) < MAX_CACHES_PER_SET) && PTR_CACHE(c, k, i);
+}
+
+
+typedef bool (*ptr_filter_fn)(struct btree *, const struct bkey *);
+
+struct bkey *bch_next_recurse_key(struct btree *, struct bkey *);
+struct bkey *bch_btree_iter_next(struct btree_iter *);
+struct bkey *bch_btree_iter_next_filter(struct btree_iter *,
+					struct btree *, ptr_filter_fn);
+
+void bch_btree_iter_push(struct btree_iter *, struct bkey *, struct bkey *);
+struct bkey *__bch_btree_iter_init(struct btree *, struct btree_iter *,
+				   struct bkey *, struct bset_tree *);
+
+/* 32 bits total: */
+#define BKEY_MID_BITS		3
+#define BKEY_EXPONENT_BITS	7
+#define BKEY_MANTISSA_BITS	22
+#define BKEY_MANTISSA_MASK	((1 << BKEY_MANTISSA_BITS) - 1)
+
+struct bkey_float {
+	unsigned	exponent:BKEY_EXPONENT_BITS;
+	unsigned	m:BKEY_MID_BITS;
+	unsigned	mantissa:BKEY_MANTISSA_BITS;
+} __packed;
+
+/*
+ * BSET_CACHELINE was originally intended to match the hardware cacheline size -
+ * it used to be 64, but I realized the lookup code would touch slightly less
+ * memory if it was 128.
+ *
+ * It definites the number of bytes (in struct bset) per struct bkey_float in
+ * the auxiliar search tree - when we're done searching the bset_float tree we
+ * have this many bytes left that we do a linear search over.
+ *
+ * Since (after level 5) every level of the bset_tree is on a new cacheline,
+ * we're touching one fewer cacheline in the bset tree in exchange for one more
+ * cacheline in the linear search - but the linear search might stop before it
+ * gets to the second cacheline.
+ */
+
+#define BSET_CACHELINE		128
+#define bset_tree_space(b)	(btree_data_space(b) / BSET_CACHELINE)
+
+#define bset_tree_bytes(b)	(bset_tree_space(b) * sizeof(struct bkey_float))
+#define bset_prev_bytes(b)	(bset_tree_space(b) * sizeof(uint8_t))
+
+void bch_bset_init_next(struct btree *);
+
+void bch_bset_fix_invalidated_key(struct btree *, struct bkey *);
+void bch_bset_fix_lookup_table(struct btree *, struct bkey *);
+
+struct bkey *__bch_bset_search(struct btree *, struct bset_tree *,
+			   const struct bkey *);
+
+static inline struct bkey *bch_bset_search(struct btree *b, struct bset_tree *t,
+					   const struct bkey *search)
+{
+	return search ? __bch_bset_search(b, t, search) : t->data->start;
+}
+
+bool bch_bkey_try_merge(struct btree *, struct bkey *, struct bkey *);
+void bch_btree_sort_lazy(struct btree *);
+void bch_btree_sort_into(struct btree *, struct btree *);
+void bch_btree_sort_and_fix_extents(struct btree *, struct btree_iter *);
+void bch_btree_sort_partial(struct btree *, unsigned);
+
+static inline void bch_btree_sort(struct btree *b)
+{
+	bch_btree_sort_partial(b, 0);
+}
+
+int bch_bset_print_stats(struct cache_set *, char *);
+
+#endif
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
new file mode 100644
index 0000000..7a5658f
--- /dev/null
+++ b/drivers/md/bcache/btree.c
@@ -0,0 +1,2503 @@
+/*
+ * Copyright (C) 2010 Kent Overstreet <kent.overstreet@gmail.com>
+ *
+ * Uses a block device as cache for other block devices; optimized for SSDs.
+ * All allocation is done in buckets, which should match the erase block size
+ * of the device.
+ *
+ * Buckets containing cached data are kept on a heap sorted by priority;
+ * bucket priority is increased on cache hit, and periodically all the buckets
+ * on the heap have their priority scaled down. This currently is just used as
+ * an LRU but in the future should allow for more intelligent heuristics.
+ *
+ * Buckets have an 8 bit counter; freeing is accomplished by incrementing the
+ * counter. Garbage collection is used to remove stale pointers.
+ *
+ * Indexing is done via a btree; nodes are not necessarily fully sorted, rather
+ * as keys are inserted we only sort the pages that have not yet been written.
+ * When garbage collection is run, we resort the entire node.
+ *
+ * All configuration is done via sysfs; see Documentation/bcache.txt.
+ */
+
+#include "bcache.h"
+#include "btree.h"
+#include "debug.h"
+#include "request.h"
+
+#include <linux/slab.h>
+#include <linux/bitops.h>
+#include <linux/hash.h>
+#include <linux/prefetch.h>
+#include <linux/random.h>
+#include <linux/rcupdate.h>
+#include <trace/events/bcache.h>
+
+/*
+ * Todo:
+ * register_bcache: Return errors out to userspace correctly
+ *
+ * Writeback: don't undirty key until after a cache flush
+ *
+ * Create an iterator for key pointers
+ *
+ * On btree write error, mark bucket such that it won't be freed from the cache
+ *
+ * Journalling:
+ *   Check for bad keys in replay
+ *   Propagate barriers
+ *   Refcount journal entries in journal_replay
+ *
+ * Garbage collection:
+ *   Finish incremental gc
+ *   Gc should free old UUIDs, data for invalid UUIDs
+ *
+ * Provide a way to list backing device UUIDs we have data cached for, and
+ * probably how long it's been since we've seen them, and a way to invalidate
+ * dirty data for devices that will never be attached again
+ *
+ * Keep 1 min/5 min/15 min statistics of how busy a block device has been, so
+ * that based on that and how much dirty data we have we can keep writeback
+ * from being starved
+ *
+ * Add a tracepoint or somesuch to watch for writeback starvation
+ *
+ * When btree depth > 1 and splitting an interior node, we have to make sure
+ * alloc_bucket() cannot fail. This should be true but is not completely
+ * obvious.
+ *
+ * Make sure all allocations get charged to the root cgroup
+ *
+ * Plugging?
+ *
+ * If data write is less than hard sector size of ssd, round up offset in open
+ * bucket to the next whole sector
+ *
+ * Also lookup by cgroup in get_open_bucket()
+ *
+ * Superblock needs to be fleshed out for multiple cache devices
+ *
+ * Add a sysfs tunable for the number of writeback IOs in flight
+ *
+ * Add a sysfs tunable for the number of open data buckets
+ *
+ * IO tracking: Can we track when one process is doing io on behalf of another?
+ * IO tracking: Don't use just an average, weigh more recent stuff higher
+ *
+ * Test module load/unload
+ */
+
+static const char * const op_types[] = {
+	"insert", "replace"
+};
+
+static const char *op_type(struct btree_op *op)
+{
+	return op_types[op->type];
+}
+
+#define MAX_NEED_GC		64
+#define MAX_SAVE_PRIO		72
+
+#define PTR_DIRTY_BIT		(((uint64_t) 1 << 36))
+
+#define PTR_HASH(c, k)							\
+	(((k)->ptr[0] >> c->bucket_bits) | PTR_GEN(k, 0))
+
+struct workqueue_struct *bch_gc_wq;
+static struct workqueue_struct *btree_io_wq;
+
+void bch_btree_op_init_stack(struct btree_op *op)
+{
+	memset(op, 0, sizeof(struct btree_op));
+	closure_init_stack(&op->cl);
+	op->lock = -1;
+	bch_keylist_init(&op->keys);
+}
+
+/* Btree key manipulation */
+
+static void bkey_put(struct cache_set *c, struct bkey *k, int level)
+{
+	if ((level && KEY_OFFSET(k)) || !level)
+		__bkey_put(c, k);
+}
+
+/* Btree IO */
+
+static uint64_t btree_csum_set(struct btree *b, struct bset *i)
+{
+	uint64_t crc = b->key.ptr[0];
+	void *data = (void *) i + 8, *end = end(i);
+
+	crc = bch_crc64_update(crc, data, end - data);
+	return crc ^ 0xffffffffffffffffULL;
+}
+
+static void btree_bio_endio(struct bio *bio, int error)
+{
+	struct closure *cl = bio->bi_private;
+	struct btree *b = container_of(cl, struct btree, io.cl);
+
+	if (error)
+		set_btree_node_io_error(b);
+
+	bch_bbio_count_io_errors(b->c, bio, error, (bio->bi_rw & WRITE)
+				 ? "writing btree" : "reading btree");
+	closure_put(cl);
+}
+
+static void btree_bio_init(struct btree *b)
+{
+	BUG_ON(b->bio);
+	b->bio = bch_bbio_alloc(b->c);
+
+	b->bio->bi_end_io	= btree_bio_endio;
+	b->bio->bi_private	= &b->io.cl;
+}
+
+void bch_btree_read_done(struct closure *cl)
+{
+	struct btree *b = container_of(cl, struct btree, io.cl);
+	struct bset *i = b->sets[0].data;
+	struct btree_iter *iter = b->c->fill_iter;
+	const char *err = "bad btree header";
+	BUG_ON(b->nsets || b->written);
+
+	bch_bbio_free(b->bio, b->c);
+	b->bio = NULL;
+
+	mutex_lock(&b->c->fill_lock);
+	iter->used = 0;
+
+	if (btree_node_io_error(b) ||
+	    !i->seq)
+		goto err;
+
+	for (;
+	     b->written < btree_blocks(b) && i->seq == b->sets[0].data->seq;
+	     i = write_block(b)) {
+		err = "unsupported bset version";
+		if (i->version > BCACHE_BSET_VERSION)
+			goto err;
+
+		err = "bad btree header";
+		if (b->written + set_blocks(i, b->c) > btree_blocks(b))
+			goto err;
+
+		err = "bad magic";
+		if (i->magic != bset_magic(b->c))
+			goto err;
+
+		err = "bad checksum";
+		switch (i->version) {
+		case 0:
+			if (i->csum != csum_set(i))
+				goto err;
+			break;
+		case BCACHE_BSET_VERSION:
+			if (i->csum != btree_csum_set(b, i))
+				goto err;
+			break;
+		}
+
+		err = "empty set";
+		if (i != b->sets[0].data && !i->keys)
+			goto err;
+
+		bch_btree_iter_push(iter, i->start, end(i));
+
+		b->written += set_blocks(i, b->c);
+	}
+
+	err = "corrupted btree";
+	for (i = write_block(b);
+	     index(i, b) < btree_blocks(b);
+	     i = ((void *) i) + block_bytes(b->c))
+		if (i->seq == b->sets[0].data->seq)
+			goto err;
+
+	bch_btree_sort_and_fix_extents(b, iter);
+
+	i = b->sets[0].data;
+	err = "short btree key";
+	if (b->sets[0].size &&
+	    bkey_cmp(&b->key, &b->sets[0].end) < 0)
+		goto err;
+
+	if (b->written < btree_blocks(b))
+		bch_bset_init_next(b);
+out:
+
+	mutex_unlock(&b->c->fill_lock);
+
+	spin_lock(&b->c->btree_read_time_lock);
+	bch_time_stats_update(&b->c->btree_read_time, b->io_start_time);
+	spin_unlock(&b->c->btree_read_time_lock);
+
+	smp_wmb(); /* read_done is our write lock */
+	set_btree_node_read_done(b);
+
+	closure_return(cl);
+err:
+	set_btree_node_io_error(b);
+	bch_cache_set_error(b->c, "%s at bucket %zu, block %zu, %u keys",
+			    err, PTR_BUCKET_NR(b->c, &b->key, 0),
+			    index(i, b), i->keys);
+	goto out;
+}
+
+void bch_btree_read(struct btree *b)
+{
+	BUG_ON(b->nsets || b->written);
+
+	if (!closure_trylock(&b->io.cl, &b->c->cl))
+		BUG();
+
+	b->io_start_time = local_clock();
+
+	btree_bio_init(b);
+	b->bio->bi_rw	= REQ_META|READ_SYNC;
+	b->bio->bi_size	= KEY_SIZE(&b->key) << 9;
+
+	bch_bio_map(b->bio, b->sets[0].data);
+
+	pr_debug("%s", pbtree(b));
+	trace_bcache_btree_read(b->bio);
+	bch_submit_bbio(b->bio, b->c, &b->key, 0);
+
+	continue_at(&b->io.cl, bch_btree_read_done, system_wq);
+}
+
+static void btree_complete_write(struct btree *b, struct btree_write *w)
+{
+	if (w->prio_blocked &&
+	    !atomic_sub_return(w->prio_blocked, &b->c->prio_blocked))
+		wake_up(&b->c->alloc_wait);
+
+	if (w->journal) {
+		atomic_dec_bug(w->journal);
+		__closure_wake_up(&b->c->journal.wait);
+	}
+
+	if (w->owner)
+		closure_put(w->owner);
+
+	w->prio_blocked	= 0;
+	w->journal	= NULL;
+	w->owner	= NULL;
+}
+
+static void __btree_write_done(struct closure *cl)
+{
+	struct btree *b = container_of(cl, struct btree, io.cl);
+	struct btree_write *w = btree_prev_write(b);
+
+	bch_bbio_free(b->bio, b->c);
+	b->bio = NULL;
+	btree_complete_write(b, w);
+
+	if (btree_node_dirty(b))
+		queue_delayed_work(btree_io_wq, &b->work,
+				   msecs_to_jiffies(30000));
+
+	closure_return(cl);
+}
+
+static void btree_write_done(struct closure *cl)
+{
+	struct btree *b = container_of(cl, struct btree, io.cl);
+	struct bio_vec *bv;
+	int n;
+
+	__bio_for_each_segment(bv, b->bio, n, 0)
+		__free_page(bv->bv_page);
+
+	__btree_write_done(cl);
+}
+
+static void do_btree_write(struct btree *b)
+{
+	struct closure *cl = &b->io.cl;
+	struct bset *i = b->sets[b->nsets].data;
+	BKEY_PADDED(key) k;
+
+	i->version	= BCACHE_BSET_VERSION;
+	i->csum		= btree_csum_set(b, i);
+
+	btree_bio_init(b);
+	b->bio->bi_rw	= REQ_META|WRITE_SYNC;
+	b->bio->bi_size	= set_blocks(i, b->c) * block_bytes(b->c);
+	bch_bio_map(b->bio, i);
+
+	bkey_copy(&k.key, &b->key);
+	SET_PTR_OFFSET(&k.key, 0, PTR_OFFSET(&k.key, 0) + bset_offset(b, i));
+
+	if (!bch_bio_alloc_pages(b->bio, GFP_NOIO)) {
+		int j;
+		struct bio_vec *bv;
+		void *base = (void *) ((unsigned long) i & ~(PAGE_SIZE - 1));
+
+		bio_for_each_segment(bv, b->bio, j)
+			memcpy(page_address(bv->bv_page),
+			       base + j * PAGE_SIZE, PAGE_SIZE);
+
+		trace_bcache_btree_write(b->bio);
+		bch_submit_bbio(b->bio, b->c, &k.key, 0);
+
+		continue_at(cl, btree_write_done, NULL);
+	} else {
+		b->bio->bi_vcnt = 0;
+		bch_bio_map(b->bio, i);
+
+		trace_bcache_btree_write(b->bio);
+		bch_submit_bbio(b->bio, b->c, &k.key, 0);
+
+		closure_sync(cl);
+		__btree_write_done(cl);
+	}
+}
+
+static void __btree_write(struct btree *b)
+{
+	struct bset *i = b->sets[b->nsets].data;
+
+	BUG_ON(current->bio_list);
+
+	closure_lock(&b->io, &b->c->cl);
+	cancel_delayed_work(&b->work);
+
+	clear_bit(BTREE_NODE_dirty,	 &b->flags);
+	change_bit(BTREE_NODE_write_idx, &b->flags);
+
+	bch_check_key_order(b, i);
+	BUG_ON(b->written && !i->keys);
+
+	do_btree_write(b);
+
+	pr_debug("%s block %i keys %i", pbtree(b), b->written, i->keys);
+
+	b->written += set_blocks(i, b->c);
+	atomic_long_add(set_blocks(i, b->c) * b->c->sb.block_size,
+			&PTR_CACHE(b->c, &b->key, 0)->btree_sectors_written);
+
+	bch_btree_sort_lazy(b);
+
+	if (b->written < btree_blocks(b))
+		bch_bset_init_next(b);
+}
+
+static void btree_write_work(struct work_struct *w)
+{
+	struct btree *b = container_of(to_delayed_work(w), struct btree, work);
+
+	down_write(&b->lock);
+
+	if (btree_node_dirty(b))
+		__btree_write(b);
+	up_write(&b->lock);
+}
+
+void bch_btree_write(struct btree *b, bool now, struct btree_op *op)
+{
+	struct bset *i = b->sets[b->nsets].data;
+	struct btree_write *w = btree_current_write(b);
+
+	BUG_ON(b->written &&
+	       (b->written >= btree_blocks(b) ||
+		i->seq != b->sets[0].data->seq ||
+		!i->keys));
+
+	if (!btree_node_dirty(b)) {
+		set_btree_node_dirty(b);
+		queue_delayed_work(btree_io_wq, &b->work,
+				   msecs_to_jiffies(30000));
+	}
+
+	w->prio_blocked += b->prio_blocked;
+	b->prio_blocked = 0;
+
+	if (op && op->journal && !b->level) {
+		if (w->journal &&
+		    journal_pin_cmp(b->c, w, op)) {
+			atomic_dec_bug(w->journal);
+			w->journal = NULL;
+		}
+
+		if (!w->journal) {
+			w->journal = op->journal;
+			atomic_inc(w->journal);
+		}
+	}
+
+	if (current->bio_list)
+		return;
+
+	/* Force write if set is too big */
+	if (now ||
+	    b->level ||
+	    set_bytes(i) > PAGE_SIZE - 48) {
+		if (op && now) {
+			/* Must wait on multiple writes */
+			BUG_ON(w->owner);
+			w->owner = &op->cl;
+			closure_get(&op->cl);
+		}
+
+		__btree_write(b);
+	}
+	BUG_ON(!b->written);
+}
+
+/*
+ * Btree in memory cache - allocation/freeing
+ * mca -> memory cache
+ */
+
+static void mca_reinit(struct btree *b)
+{
+	unsigned i;
+
+	b->flags	= 0;
+	b->written	= 0;
+	b->nsets	= 0;
+
+	for (i = 0; i < MAX_BSETS; i++)
+		b->sets[i].size = 0;
+	/*
+	 * Second loop starts at 1 because b->sets[0]->data is the memory we
+	 * allocated
+	 */
+	for (i = 1; i < MAX_BSETS; i++)
+		b->sets[i].data = NULL;
+}
+
+#define mca_reserve(c)	(((c->root && c->root->level)		\
+			  ? c->root->level : 1) * 8 + 16)
+#define mca_can_free(c)						\
+	max_t(int, 0, c->bucket_cache_used - mca_reserve(c))
+
+static void mca_data_free(struct btree *b)
+{
+	struct bset_tree *t = b->sets;
+	BUG_ON(!closure_is_unlocked(&b->io.cl));
+
+	if (bset_prev_bytes(b) < PAGE_SIZE)
+		kfree(t->prev);
+	else
+		free_pages((unsigned long) t->prev,
+			   get_order(bset_prev_bytes(b)));
+
+	if (bset_tree_bytes(b) < PAGE_SIZE)
+		kfree(t->tree);
+	else
+		free_pages((unsigned long) t->tree,
+			   get_order(bset_tree_bytes(b)));
+
+	free_pages((unsigned long) t->data, b->page_order);
+
+	t->prev = NULL;
+	t->tree = NULL;
+	t->data = NULL;
+	list_move(&b->list, &b->c->btree_cache_freed);
+	b->c->bucket_cache_used--;
+}
+
+static void mca_bucket_free(struct btree *b)
+{
+	BUG_ON(btree_node_dirty(b));
+
+	b->key.ptr[0] = 0;
+	hlist_del_init_rcu(&b->hash);
+	list_move(&b->list, &b->c->btree_cache_freeable);
+}
+
+static unsigned btree_order(struct bkey *k)
+{
+	return ilog2(KEY_SIZE(k) / PAGE_SECTORS ?: 1);
+}
+
+static void mca_data_alloc(struct btree *b, struct bkey *k, gfp_t gfp)
+{
+	struct bset_tree *t = b->sets;
+	BUG_ON(t->data);
+
+	b->page_order = max_t(unsigned,
+			      ilog2(b->c->btree_pages),
+			      btree_order(k));
+
+	t->data = (void *) __get_free_pages(gfp, b->page_order);
+	if (!t->data)
+		goto err;
+
+	t->tree = bset_tree_bytes(b) < PAGE_SIZE
+		? kmalloc(bset_tree_bytes(b), gfp)
+		: (void *) __get_free_pages(gfp, get_order(bset_tree_bytes(b)));
+	if (!t->tree)
+		goto err;
+
+	t->prev = bset_prev_bytes(b) < PAGE_SIZE
+		? kmalloc(bset_prev_bytes(b), gfp)
+		: (void *) __get_free_pages(gfp, get_order(bset_prev_bytes(b)));
+	if (!t->prev)
+		goto err;
+
+	list_move(&b->list, &b->c->btree_cache);
+	b->c->bucket_cache_used++;
+	return;
+err:
+	mca_data_free(b);
+}
+
+static struct btree *mca_bucket_alloc(struct cache_set *c,
+				      struct bkey *k, gfp_t gfp)
+{
+	struct btree *b = kzalloc(sizeof(struct btree), gfp);
+	if (!b)
+		return NULL;
+
+	init_rwsem(&b->lock);
+	lockdep_set_novalidate_class(&b->lock);
+	INIT_LIST_HEAD(&b->list);
+	INIT_DELAYED_WORK(&b->work, btree_write_work);
+	b->c = c;
+	closure_init_unlocked(&b->io);
+
+	mca_data_alloc(b, k, gfp);
+	return b;
+}
+
+static int mca_reap(struct btree *b, struct closure *cl, unsigned min_order)
+{
+	lockdep_assert_held(&b->c->bucket_lock);
+
+	if (!down_write_trylock(&b->lock))
+		return -ENOMEM;
+
+	if (b->page_order < min_order) {
+		rw_unlock(true, b);
+		return -ENOMEM;
+	}
+
+	BUG_ON(btree_node_dirty(b) && !b->sets[0].data);
+
+	if (cl && btree_node_dirty(b))
+		bch_btree_write(b, true, NULL);
+
+	if (cl)
+		closure_wait_event_async(&b->io.wait, cl,
+			 atomic_read(&b->io.cl.remaining) == -1);
+
+	if (btree_node_dirty(b) ||
+	    !closure_is_unlocked(&b->io.cl) ||
+	    work_pending(&b->work.work)) {
+		rw_unlock(true, b);
+		return -EAGAIN;
+	}
+
+	return 0;
+}
+
+static int bch_mca_shrink(struct shrinker *shrink, struct shrink_control *sc)
+{
+	struct cache_set *c = container_of(shrink, struct cache_set, shrink);
+	struct btree *b, *t;
+	unsigned long i, nr = sc->nr_to_scan;
+
+	if (c->shrinker_disabled)
+		return 0;
+
+	if (c->try_harder)
+		return 0;
+
+	/*
+	 * If nr == 0, we're supposed to return the number of items we have
+	 * cached. Not allowed to return -1.
+	 */
+	if (!nr)
+		return mca_can_free(c) * c->btree_pages;
+
+	/* Return -1 if we can't do anything right now */
+	if (sc->gfp_mask & __GFP_WAIT)
+		mutex_lock(&c->bucket_lock);
+	else if (!mutex_trylock(&c->bucket_lock))
+		return -1;
+
+	nr /= c->btree_pages;
+	nr = min_t(unsigned long, nr, mca_can_free(c));
+
+	i = 0;
+	list_for_each_entry_safe(b, t, &c->btree_cache_freeable, list) {
+		if (!nr)
+			break;
+
+		if (++i > 3 &&
+		    !mca_reap(b, NULL, 0)) {
+			mca_data_free(b);
+			rw_unlock(true, b);
+			--nr;
+		}
+	}
+
+	/*
+	 * Can happen right when we first start up, before we've read in any
+	 * btree nodes
+	 */
+	if (list_empty(&c->btree_cache))
+		goto out;
+
+	for (i = 0; nr && i < c->bucket_cache_used; i++) {
+		b = list_first_entry(&c->btree_cache, struct btree, list);
+		list_rotate_left(&c->btree_cache);
+
+		if (!b->accessed &&
+		    !mca_reap(b, NULL, 0)) {
+			mca_bucket_free(b);
+			mca_data_free(b);
+			rw_unlock(true, b);
+			--nr;
+		} else
+			b->accessed = 0;
+	}
+out:
+	nr = mca_can_free(c) * c->btree_pages;
+	mutex_unlock(&c->bucket_lock);
+	return nr;
+}
+
+void bch_btree_cache_free(struct cache_set *c)
+{
+	struct btree *b;
+	struct closure cl;
+	closure_init_stack(&cl);
+
+	if (c->shrink.list.next)
+		unregister_shrinker(&c->shrink);
+
+	mutex_lock(&c->bucket_lock);
+
+#ifdef CONFIG_BCACHE_DEBUG
+	if (c->verify_data)
+		list_move(&c->verify_data->list, &c->btree_cache);
+#endif
+
+	list_splice(&c->btree_cache_freeable,
+		    &c->btree_cache);
+
+	while (!list_empty(&c->btree_cache)) {
+		b = list_first_entry(&c->btree_cache, struct btree, list);
+
+		if (btree_node_dirty(b))
+			btree_complete_write(b, btree_current_write(b));
+		clear_bit(BTREE_NODE_dirty, &b->flags);
+
+		mca_data_free(b);
+	}
+
+	while (!list_empty(&c->btree_cache_freed)) {
+		b = list_first_entry(&c->btree_cache_freed,
+				     struct btree, list);
+		list_del(&b->list);
+		cancel_delayed_work_sync(&b->work);
+		kfree(b);
+	}
+
+	mutex_unlock(&c->bucket_lock);
+}
+
+int bch_btree_cache_alloc(struct cache_set *c)
+{
+	unsigned i;
+
+	/* XXX: doesn't check for errors */
+
+	closure_init_unlocked(&c->gc);
+
+	for (i = 0; i < mca_reserve(c); i++)
+		mca_bucket_alloc(c, &ZERO_KEY, GFP_KERNEL);
+
+	list_splice_init(&c->btree_cache,
+			 &c->btree_cache_freeable);
+
+#ifdef CONFIG_BCACHE_DEBUG
+	mutex_init(&c->verify_lock);
+
+	c->verify_data = mca_bucket_alloc(c, &ZERO_KEY, GFP_KERNEL);
+
+	if (c->verify_data &&
+	    c->verify_data->sets[0].data)
+		list_del_init(&c->verify_data->list);
+	else
+		c->verify_data = NULL;
+#endif
+
+	c->shrink.shrink = bch_mca_shrink;
+	c->shrink.seeks = 4;
+	c->shrink.batch = c->btree_pages * 2;
+	register_shrinker(&c->shrink);
+
+	return 0;
+}
+
+/* Btree in memory cache - hash table */
+
+static struct hlist_head *mca_hash(struct cache_set *c, struct bkey *k)
+{
+	return &c->bucket_hash[hash_32(PTR_HASH(c, k), BUCKET_HASH_BITS)];
+}
+
+static struct btree *mca_find(struct cache_set *c, struct bkey *k)
+{
+	struct btree *b;
+
+	rcu_read_lock();
+	hlist_for_each_entry_rcu(b, mca_hash(c, k), hash)
+		if (PTR_HASH(c, &b->key) == PTR_HASH(c, k))
+			goto out;
+	b = NULL;
+out:
+	rcu_read_unlock();
+	return b;
+}
+
+static struct btree *mca_cannibalize(struct cache_set *c, struct bkey *k,
+				     int level, struct closure *cl)
+{
+	int ret = -ENOMEM;
+	struct btree *i;
+
+	if (!cl)
+		return ERR_PTR(-ENOMEM);
+
+	/*
+	 * Trying to free up some memory - i.e. reuse some btree nodes - may
+	 * require initiating IO to flush the dirty part of the node. If we're
+	 * running under generic_make_request(), that IO will never finish and
+	 * we would deadlock. Returning -EAGAIN causes the cache lookup code to
+	 * punt to workqueue and retry.
+	 */
+	if (current->bio_list)
+		return ERR_PTR(-EAGAIN);
+
+	if (c->try_harder && c->try_harder != cl) {
+		closure_wait_event_async(&c->try_wait, cl, !c->try_harder);
+		return ERR_PTR(-EAGAIN);
+	}
+
+	/* XXX: tracepoint */
+	c->try_harder = cl;
+	c->try_harder_start = local_clock();
+retry:
+	list_for_each_entry_reverse(i, &c->btree_cache, list) {
+		int r = mca_reap(i, cl, btree_order(k));
+		if (!r)
+			return i;
+		if (r != -ENOMEM)
+			ret = r;
+	}
+
+	if (ret == -EAGAIN &&
+	    closure_blocking(cl)) {
+		mutex_unlock(&c->bucket_lock);
+		closure_sync(cl);
+		mutex_lock(&c->bucket_lock);
+		goto retry;
+	}
+
+	return ERR_PTR(ret);
+}
+
+/*
+ * We can only have one thread cannibalizing other cached btree nodes at a time,
+ * or we'll deadlock. We use an open coded mutex to ensure that, which a
+ * cannibalize_bucket() will take. This means every time we unlock the root of
+ * the btree, we need to release this lock if we have it held.
+ */
+void bch_cannibalize_unlock(struct cache_set *c, struct closure *cl)
+{
+	if (c->try_harder == cl) {
+		bch_time_stats_update(&c->try_harder_time, c->try_harder_start);
+		c->try_harder = NULL;
+		__closure_wake_up(&c->try_wait);
+	}
+}
+
+static struct btree *mca_alloc(struct cache_set *c, struct bkey *k,
+			       int level, struct closure *cl)
+{
+	struct btree *b;
+
+	lockdep_assert_held(&c->bucket_lock);
+
+	if (mca_find(c, k))
+		return NULL;
+
+	/* btree_free() doesn't free memory; it sticks the node on the end of
+	 * the list. Check if there's any freed nodes there:
+	 */
+	list_for_each_entry(b, &c->btree_cache_freeable, list)
+		if (!mca_reap(b, NULL, btree_order(k)))
+			goto out;
+
+	/* We never free struct btree itself, just the memory that holds the on
+	 * disk node. Check the freed list before allocating a new one:
+	 */
+	list_for_each_entry(b, &c->btree_cache_freed, list)
+		if (!mca_reap(b, NULL, 0)) {
+			mca_data_alloc(b, k, __GFP_NOWARN|GFP_NOIO);
+			if (!b->sets[0].data)
+				goto err;
+			else
+				goto out;
+		}
+
+	b = mca_bucket_alloc(c, k, __GFP_NOWARN|GFP_NOIO);
+	if (!b)
+		goto err;
+
+	BUG_ON(!down_write_trylock(&b->lock));
+	if (!b->sets->data)
+		goto err;
+out:
+	BUG_ON(!closure_is_unlocked(&b->io.cl));
+
+	bkey_copy(&b->key, k);
+	list_move(&b->list, &c->btree_cache);
+	hlist_del_init_rcu(&b->hash);
+	hlist_add_head_rcu(&b->hash, mca_hash(c, k));
+
+	lock_set_subclass(&b->lock.dep_map, level + 1, _THIS_IP_);
+	b->level	= level;
+
+	mca_reinit(b);
+
+	return b;
+err:
+	if (b)
+		rw_unlock(true, b);
+
+	b = mca_cannibalize(c, k, level, cl);
+	if (!IS_ERR(b))
+		goto out;
+
+	return b;
+}
+
+/**
+ * bch_btree_node_get - find a btree node in the cache and lock it, reading it
+ * in from disk if necessary.
+ *
+ * If IO is necessary, it uses the closure embedded in struct btree_op to wait;
+ * if that closure is in non blocking mode, will return -EAGAIN.
+ *
+ * The btree node will have either a read or a write lock held, depending on
+ * level and op->lock.
+ */
+struct btree *bch_btree_node_get(struct cache_set *c, struct bkey *k,
+				 int level, struct btree_op *op)
+{
+	int i = 0;
+	bool write = level <= op->lock;
+	struct btree *b;
+
+	BUG_ON(level < 0);
+retry:
+	b = mca_find(c, k);
+
+	if (!b) {
+		mutex_lock(&c->bucket_lock);
+		b = mca_alloc(c, k, level, &op->cl);
+		mutex_unlock(&c->bucket_lock);
+
+		if (!b)
+			goto retry;
+		if (IS_ERR(b))
+			return b;
+
+		bch_btree_read(b);
+
+		if (!write)
+			downgrade_write(&b->lock);
+	} else {
+		rw_lock(write, b, level);
+		if (PTR_HASH(c, &b->key) != PTR_HASH(c, k)) {
+			rw_unlock(write, b);
+			goto retry;
+		}
+		BUG_ON(b->level != level);
+	}
+
+	b->accessed = 1;
+
+	for (; i <= b->nsets && b->sets[i].size; i++) {
+		prefetch(b->sets[i].tree);
+		prefetch(b->sets[i].data);
+	}
+
+	for (; i <= b->nsets; i++)
+		prefetch(b->sets[i].data);
+
+	if (!closure_wait_event(&b->io.wait, &op->cl,
+				btree_node_read_done(b))) {
+		rw_unlock(write, b);
+		b = ERR_PTR(-EAGAIN);
+	} else if (btree_node_io_error(b)) {
+		rw_unlock(write, b);
+		b = ERR_PTR(-EIO);
+	} else
+		BUG_ON(!b->written);
+
+	return b;
+}
+
+static void btree_node_prefetch(struct cache_set *c, struct bkey *k, int level)
+{
+	struct btree *b;
+
+	mutex_lock(&c->bucket_lock);
+	b = mca_alloc(c, k, level, NULL);
+	mutex_unlock(&c->bucket_lock);
+
+	if (!IS_ERR_OR_NULL(b)) {
+		bch_btree_read(b);
+		rw_unlock(true, b);
+	}
+}
+
+/* Btree alloc */
+
+static void btree_node_free(struct btree *b, struct btree_op *op)
+{
+	unsigned i;
+
+	/*
+	 * The BUG_ON() in btree_node_get() implies that we must have a write
+	 * lock on parent to free or even invalidate a node
+	 */
+	BUG_ON(op->lock <= b->level);
+	BUG_ON(b == b->c->root);
+	pr_debug("bucket %s", pbtree(b));
+
+	if (btree_node_dirty(b))
+		btree_complete_write(b, btree_current_write(b));
+	clear_bit(BTREE_NODE_dirty, &b->flags);
+
+	if (b->prio_blocked &&
+	    !atomic_sub_return(b->prio_blocked, &b->c->prio_blocked))
+		wake_up(&b->c->alloc_wait);
+
+	b->prio_blocked = 0;
+
+	cancel_delayed_work(&b->work);
+
+	mutex_lock(&b->c->bucket_lock);
+
+	for (i = 0; i < KEY_PTRS(&b->key); i++) {
+		BUG_ON(atomic_read(&PTR_BUCKET(b->c, &b->key, i)->pin));
+
+		bch_inc_gen(PTR_CACHE(b->c, &b->key, i),
+			    PTR_BUCKET(b->c, &b->key, i));
+	}
+
+	bch_bucket_free(b->c, &b->key);
+	mca_bucket_free(b);
+	mutex_unlock(&b->c->bucket_lock);
+}
+
+struct btree *bch_btree_node_alloc(struct cache_set *c, int level,
+				   struct closure *cl)
+{
+	BKEY_PADDED(key) k;
+	struct btree *b = ERR_PTR(-EAGAIN);
+
+	mutex_lock(&c->bucket_lock);
+retry:
+	if (__bch_bucket_alloc_set(c, WATERMARK_METADATA, &k.key, 1, cl))
+		goto err;
+
+	SET_KEY_SIZE(&k.key, c->btree_pages * PAGE_SECTORS);
+
+	b = mca_alloc(c, &k.key, level, cl);
+	if (IS_ERR(b))
+		goto err_free;
+
+	if (!b) {
+		cache_bug(c,
+			"Tried to allocate bucket that was in btree cache");
+		__bkey_put(c, &k.key);
+		goto retry;
+	}
+
+	set_btree_node_read_done(b);
+	b->accessed = 1;
+	bch_bset_init_next(b);
+
+	mutex_unlock(&c->bucket_lock);
+	return b;
+err_free:
+	bch_bucket_free(c, &k.key);
+	__bkey_put(c, &k.key);
+err:
+	mutex_unlock(&c->bucket_lock);
+	return b;
+}
+
+static struct btree *btree_node_alloc_replacement(struct btree *b,
+						  struct closure *cl)
+{
+	struct btree *n = bch_btree_node_alloc(b->c, b->level, cl);
+	if (!IS_ERR_OR_NULL(n))
+		bch_btree_sort_into(b, n);
+
+	return n;
+}
+
+/* Garbage collection */
+
+uint8_t __bch_btree_mark_key(struct cache_set *c, int level, struct bkey *k)
+{
+	uint8_t stale = 0;
+	unsigned i;
+	struct bucket *g;
+
+	/*
+	 * ptr_invalid() can't return true for the keys that mark btree nodes as
+	 * freed, but since ptr_bad() returns true we'll never actually use them
+	 * for anything and thus we don't want mark their pointers here
+	 */
+	if (!bkey_cmp(k, &ZERO_KEY))
+		return stale;
+
+	for (i = 0; i < KEY_PTRS(k); i++) {
+		if (!ptr_available(c, k, i))
+			continue;
+
+		g = PTR_BUCKET(c, k, i);
+
+		if (gen_after(g->gc_gen, PTR_GEN(k, i)))
+			g->gc_gen = PTR_GEN(k, i);
+
+		if (ptr_stale(c, k, i)) {
+			stale = max(stale, ptr_stale(c, k, i));
+			continue;
+		}
+
+		cache_bug_on(GC_MARK(g) &&
+			     (GC_MARK(g) == GC_MARK_METADATA) != (level != 0),
+			     c, "inconsistent ptrs: mark = %llu, level = %i",
+			     GC_MARK(g), level);
+
+		if (level)
+			SET_GC_MARK(g, GC_MARK_METADATA);
+		else if (KEY_DIRTY(k))
+			SET_GC_MARK(g, GC_MARK_DIRTY);
+
+		/* guard against overflow */
+		SET_GC_SECTORS_USED(g, min_t(unsigned,
+					     GC_SECTORS_USED(g) + KEY_SIZE(k),
+					     (1 << 14) - 1));
+
+		BUG_ON(!GC_SECTORS_USED(g));
+	}
+
+	return stale;
+}
+
+#define btree_mark_key(b, k)	__bch_btree_mark_key(b->c, b->level, k)
+
+static int btree_gc_mark_node(struct btree *b, unsigned *keys,
+			      struct gc_stat *gc)
+{
+	uint8_t stale = 0;
+	unsigned last_dev = -1;
+	struct bcache_device *d = NULL;
+	struct bkey *k;
+	struct btree_iter iter;
+	struct bset_tree *t;
+
+	gc->nodes++;
+
+	for_each_key_filter(b, k, &iter, bch_ptr_invalid) {
+		if (last_dev != KEY_INODE(k)) {
+			last_dev = KEY_INODE(k);
+
+			d = KEY_INODE(k) < b->c->nr_uuids
+				? b->c->devices[last_dev]
+				: NULL;
+		}
+
+		stale = max(stale, btree_mark_key(b, k));
+
+		if (bch_ptr_bad(b, k))
+			continue;
+
+		*keys += bkey_u64s(k);
+
+		gc->key_bytes += bkey_u64s(k);
+		gc->nkeys++;
+
+		gc->data += KEY_SIZE(k);
+		if (KEY_DIRTY(k)) {
+			gc->dirty += KEY_SIZE(k);
+			if (d)
+				d->sectors_dirty_gc += KEY_SIZE(k);
+		}
+	}
+
+	for (t = b->sets; t <= &b->sets[b->nsets]; t++)
+		btree_bug_on(t->size &&
+			     bset_written(b, t) &&
+			     bkey_cmp(&b->key, &t->end) < 0,
+			     b, "found short btree key in gc");
+
+	return stale;
+}
+
+static struct btree *btree_gc_alloc(struct btree *b, struct bkey *k,
+				    struct btree_op *op)
+{
+	/*
+	 * We block priorities from being written for the duration of garbage
+	 * collection, so we can't sleep in btree_alloc() ->
+	 * bch_bucket_alloc_set(), or we'd risk deadlock - so we don't pass it
+	 * our closure.
+	 */
+	struct btree *n = btree_node_alloc_replacement(b, NULL);
+
+	if (!IS_ERR_OR_NULL(n)) {
+		swap(b, n);
+
+		memcpy(k->ptr, b->key.ptr,
+		       sizeof(uint64_t) * KEY_PTRS(&b->key));
+
+		__bkey_put(b->c, &b->key);
+		atomic_inc(&b->c->prio_blocked);
+		b->prio_blocked++;
+
+		btree_node_free(n, op);
+		up_write(&n->lock);
+	}
+
+	return b;
+}
+
+/*
+ * Leaving this at 2 until we've got incremental garbage collection done; it
+ * could be higher (and has been tested with 4) except that garbage collection
+ * could take much longer, adversely affecting latency.
+ */
+#define GC_MERGE_NODES	2U
+
+struct gc_merge_info {
+	struct btree	*b;
+	struct bkey	*k;
+	unsigned	keys;
+};
+
+static void btree_gc_coalesce(struct btree *b, struct btree_op *op,
+			      struct gc_stat *gc, struct gc_merge_info *r)
+{
+	unsigned nodes = 0, keys = 0, blocks;
+	int i;
+
+	while (nodes < GC_MERGE_NODES && r[nodes].b)
+		keys += r[nodes++].keys;
+
+	blocks = btree_default_blocks(b->c) * 2 / 3;
+
+	if (nodes < 2 ||
+	    __set_blocks(b->sets[0].data, keys, b->c) > blocks * (nodes - 1))
+		return;
+
+	for (i = nodes - 1; i >= 0; --i) {
+		if (r[i].b->written)
+			r[i].b = btree_gc_alloc(r[i].b, r[i].k, op);
+
+		if (r[i].b->written)
+			return;
+	}
+
+	for (i = nodes - 1; i > 0; --i) {
+		struct bset *n1 = r[i].b->sets->data;
+		struct bset *n2 = r[i - 1].b->sets->data;
+		struct bkey *k, *last = NULL;
+
+		keys = 0;
+
+		if (i == 1) {
+			/*
+			 * Last node we're not getting rid of - we're getting
+			 * rid of the node at r[0]. Have to try and fit all of
+			 * the remaining keys into this node; we can't ensure
+			 * they will always fit due to rounding and variable
+			 * length keys (shouldn't be possible in practice,
+			 * though)
+			 */
+			if (__set_blocks(n1, n1->keys + r->keys,
+					 b->c) > btree_blocks(r[i].b))
+				return;
+
+			keys = n2->keys;
+			last = &r->b->key;
+		} else
+			for (k = n2->start;
+			     k < end(n2);
+			     k = bkey_next(k)) {
+				if (__set_blocks(n1, n1->keys + keys +
+						 bkey_u64s(k), b->c) > blocks)
+					break;
+
+				last = k;
+				keys += bkey_u64s(k);
+			}
+
+		BUG_ON(__set_blocks(n1, n1->keys + keys,
+				    b->c) > btree_blocks(r[i].b));
+
+		if (last) {
+			bkey_copy_key(&r[i].b->key, last);
+			bkey_copy_key(r[i].k, last);
+		}
+
+		memcpy(end(n1),
+		       n2->start,
+		       (void *) node(n2, keys) - (void *) n2->start);
+
+		n1->keys += keys;
+
+		memmove(n2->start,
+			node(n2, keys),
+			(void *) end(n2) - (void *) node(n2, keys));
+
+		n2->keys -= keys;
+
+		r[i].keys	= n1->keys;
+		r[i - 1].keys	= n2->keys;
+	}
+
+	btree_node_free(r->b, op);
+	up_write(&r->b->lock);
+
+	pr_debug("coalesced %u nodes", nodes);
+
+	gc->nodes--;
+	nodes--;
+
+	memmove(&r[0], &r[1], sizeof(struct gc_merge_info) * nodes);
+	memset(&r[nodes], 0, sizeof(struct gc_merge_info));
+}
+
+static int btree_gc_recurse(struct btree *b, struct btree_op *op,
+			    struct closure *writes, struct gc_stat *gc)
+{
+	void write(struct btree *r)
+	{
+		if (!r->written)
+			bch_btree_write(r, true, op);
+		else if (btree_node_dirty(r)) {
+			BUG_ON(btree_current_write(r)->owner);
+			btree_current_write(r)->owner = writes;
+			closure_get(writes);
+
+			bch_btree_write(r, true, NULL);
+		}
+
+		up_write(&r->lock);
+	}
+
+	int ret = 0, stale;
+	unsigned i;
+	struct gc_merge_info r[GC_MERGE_NODES];
+
+	memset(r, 0, sizeof(r));
+
+	while ((r->k = bch_next_recurse_key(b, &b->c->gc_done))) {
+		r->b = bch_btree_node_get(b->c, r->k, b->level - 1, op);
+
+		if (IS_ERR(r->b)) {
+			ret = PTR_ERR(r->b);
+			break;
+		}
+
+		r->keys	= 0;
+		stale = btree_gc_mark_node(r->b, &r->keys, gc);
+
+		if (!b->written &&
+		    (r->b->level || stale > 10 ||
+		     b->c->gc_always_rewrite))
+			r->b = btree_gc_alloc(r->b, r->k, op);
+
+		if (r->b->level)
+			ret = btree_gc_recurse(r->b, op, writes, gc);
+
+		if (ret) {
+			write(r->b);
+			break;
+		}
+
+		bkey_copy_key(&b->c->gc_done, r->k);
+
+		if (!b->written)
+			btree_gc_coalesce(b, op, gc, r);
+
+		if (r[GC_MERGE_NODES - 1].b)
+			write(r[GC_MERGE_NODES - 1].b);
+
+		memmove(&r[1], &r[0],
+			sizeof(struct gc_merge_info) * (GC_MERGE_NODES - 1));
+
+		/* When we've got incremental GC working, we'll want to do
+		 * if (should_resched())
+		 *	return -EAGAIN;
+		 */
+		cond_resched();
+#if 0
+		if (need_resched()) {
+			ret = -EAGAIN;
+			break;
+		}
+#endif
+	}
+
+	for (i = 1; i < GC_MERGE_NODES && r[i].b; i++)
+		write(r[i].b);
+
+	/* Might have freed some children, must remove their keys */
+	if (!b->written)
+		bch_btree_sort(b);
+
+	return ret;
+}
+
+static int bch_btree_gc_root(struct btree *b, struct btree_op *op,
+			     struct closure *writes, struct gc_stat *gc)
+{
+	struct btree *n = NULL;
+	unsigned keys = 0;
+	int ret = 0, stale = btree_gc_mark_node(b, &keys, gc);
+
+	if (b->level || stale > 10)
+		n = btree_node_alloc_replacement(b, NULL);
+
+	if (!IS_ERR_OR_NULL(n))
+		swap(b, n);
+
+	if (b->level)
+		ret = btree_gc_recurse(b, op, writes, gc);
+
+	if (!b->written || btree_node_dirty(b)) {
+		atomic_inc(&b->c->prio_blocked);
+		b->prio_blocked++;
+		bch_btree_write(b, true, n ? op : NULL);
+	}
+
+	if (!IS_ERR_OR_NULL(n)) {
+		closure_sync(&op->cl);
+		bch_btree_set_root(b);
+		btree_node_free(n, op);
+		rw_unlock(true, b);
+	}
+
+	return ret;
+}
+
+static void btree_gc_start(struct cache_set *c)
+{
+	struct cache *ca;
+	struct bucket *b;
+	struct bcache_device **d;
+	unsigned i;
+
+	if (!c->gc_mark_valid)
+		return;
+
+	mutex_lock(&c->bucket_lock);
+
+	c->gc_mark_valid = 0;
+	c->gc_done = ZERO_KEY;
+
+	for_each_cache(ca, c, i)
+		for_each_bucket(b, ca) {
+			b->gc_gen = b->gen;
+			if (!atomic_read(&b->pin))
+				SET_GC_MARK(b, GC_MARK_RECLAIMABLE);
+		}
+
+	for (d = c->devices;
+	     d < c->devices + c->nr_uuids;
+	     d++)
+		if (*d)
+			(*d)->sectors_dirty_gc = 0;
+
+	mutex_unlock(&c->bucket_lock);
+}
+
+size_t bch_btree_gc_finish(struct cache_set *c)
+{
+	size_t available = 0;
+	struct bucket *b;
+	struct cache *ca;
+	struct bcache_device **d;
+	unsigned i;
+
+	mutex_lock(&c->bucket_lock);
+
+	set_gc_sectors(c);
+	c->gc_mark_valid = 1;
+	c->need_gc	= 0;
+
+	if (c->root)
+		for (i = 0; i < KEY_PTRS(&c->root->key); i++)
+			SET_GC_MARK(PTR_BUCKET(c, &c->root->key, i),
+				    GC_MARK_METADATA);
+
+	for (i = 0; i < KEY_PTRS(&c->uuid_bucket); i++)
+		SET_GC_MARK(PTR_BUCKET(c, &c->uuid_bucket, i),
+			    GC_MARK_METADATA);
+
+	for_each_cache(ca, c, i) {
+		uint64_t *i;
+
+		ca->invalidate_needs_gc = 0;
+
+		for (i = ca->sb.d; i < ca->sb.d + ca->sb.keys; i++)
+			SET_GC_MARK(ca->buckets + *i, GC_MARK_METADATA);
+
+		for (i = ca->prio_buckets;
+		     i < ca->prio_buckets + prio_buckets(ca) * 2; i++)
+			SET_GC_MARK(ca->buckets + *i, GC_MARK_METADATA);
+
+		for_each_bucket(b, ca) {
+			b->last_gc	= b->gc_gen;
+			c->need_gc	= max(c->need_gc, bucket_gc_gen(b));
+
+			if (!atomic_read(&b->pin) &&
+			    GC_MARK(b) == GC_MARK_RECLAIMABLE) {
+				available++;
+				if (!GC_SECTORS_USED(b))
+					bch_bucket_add_unused(ca, b);
+			}
+		}
+	}
+
+	for (d = c->devices;
+	     d < c->devices + c->nr_uuids;
+	     d++)
+		if (*d) {
+			unsigned long last =
+				atomic_long_read(&((*d)->sectors_dirty));
+			long difference = (*d)->sectors_dirty_gc - last;
+
+			pr_debug("sectors dirty off by %li", difference);
+
+			(*d)->sectors_dirty_last += difference;
+
+			atomic_long_set(&((*d)->sectors_dirty),
+					(*d)->sectors_dirty_gc);
+		}
+
+	mutex_unlock(&c->bucket_lock);
+	return available;
+}
+
+static void bch_btree_gc(struct closure *cl)
+{
+	struct cache_set *c = container_of(cl, struct cache_set, gc.cl);
+	int ret;
+	unsigned long available;
+	struct gc_stat stats;
+	struct closure writes;
+	struct btree_op op;
+
+	uint64_t start_time = local_clock();
+	trace_bcache_gc_start(c->sb.set_uuid);
+	blktrace_msg_all(c, "Starting gc");
+
+	memset(&stats, 0, sizeof(struct gc_stat));
+	closure_init_stack(&writes);
+	bch_btree_op_init_stack(&op);
+	op.lock = SHRT_MAX;
+
+	btree_gc_start(c);
+
+	ret = btree_root(gc_root, c, &op, &writes, &stats);
+	closure_sync(&op.cl);
+	closure_sync(&writes);
+
+	if (ret) {
+		blktrace_msg_all(c, "Stopped gc");
+		pr_warn("gc failed!");
+
+		continue_at(cl, bch_btree_gc, bch_gc_wq);
+	}
+
+	/* Possibly wait for new UUIDs or whatever to hit disk */
+	bch_journal_meta(c, &op.cl);
+	closure_sync(&op.cl);
+
+	available = bch_btree_gc_finish(c);
+
+	bch_time_stats_update(&c->btree_gc_time, start_time);
+
+	stats.key_bytes *= sizeof(uint64_t);
+	stats.dirty	<<= 9;
+	stats.data	<<= 9;
+	stats.in_use	= (c->nbuckets - available) * 100 / c->nbuckets;
+	memcpy(&c->gc_stats, &stats, sizeof(struct gc_stat));
+	blktrace_msg_all(c, "Finished gc");
+
+	trace_bcache_gc_end(c->sb.set_uuid);
+	wake_up(&c->alloc_wait);
+
+	continue_at(cl, bch_moving_gc, bch_gc_wq);
+}
+
+void bch_queue_gc(struct cache_set *c)
+{
+	closure_trylock_call(&c->gc.cl, bch_btree_gc, bch_gc_wq, &c->cl);
+}
+
+/* Initial partial gc */
+
+static int bch_btree_check_recurse(struct btree *b, struct btree_op *op,
+				   unsigned long **seen)
+{
+	int ret;
+	unsigned i;
+	struct bkey *k;
+	struct bucket *g;
+	struct btree_iter iter;
+
+	for_each_key_filter(b, k, &iter, bch_ptr_invalid) {
+		for (i = 0; i < KEY_PTRS(k); i++) {
+			if (!ptr_available(b->c, k, i))
+				continue;
+
+			g = PTR_BUCKET(b->c, k, i);
+
+			if (!__test_and_set_bit(PTR_BUCKET_NR(b->c, k, i),
+						seen[PTR_DEV(k, i)]) ||
+			    !ptr_stale(b->c, k, i)) {
+				g->gen = PTR_GEN(k, i);
+
+				if (b->level)
+					g->prio = BTREE_PRIO;
+				else if (g->prio == BTREE_PRIO)
+					g->prio = INITIAL_PRIO;
+			}
+		}
+
+		btree_mark_key(b, k);
+	}
+
+	if (b->level) {
+		k = bch_next_recurse_key(b, &ZERO_KEY);
+
+		while (k) {
+			struct bkey *p = bch_next_recurse_key(b, k);
+			if (p)
+				btree_node_prefetch(b->c, p, b->level - 1);
+
+			ret = btree(check_recurse, k, b, op, seen);
+			if (ret)
+				return ret;
+
+			k = p;
+		}
+	}
+
+	return 0;
+}
+
+int bch_btree_check(struct cache_set *c, struct btree_op *op)
+{
+	int ret = -ENOMEM;
+	unsigned i;
+	unsigned long *seen[MAX_CACHES_PER_SET];
+
+	memset(seen, 0, sizeof(seen));
+
+	for (i = 0; c->cache[i]; i++) {
+		size_t n = DIV_ROUND_UP(c->cache[i]->sb.nbuckets, 8);
+		seen[i] = kmalloc(n, GFP_KERNEL);
+		if (!seen[i])
+			goto err;
+
+		/* Disables the seen array until prio_read() uses it too */
+		memset(seen[i], 0xFF, n);
+	}
+
+	ret = btree_root(check_recurse, c, op, seen);
+err:
+	for (i = 0; i < MAX_CACHES_PER_SET; i++)
+		kfree(seen[i]);
+	return ret;
+}
+
+/* Btree insertion */
+
+static void shift_keys(struct btree *b, struct bkey *where, struct bkey *insert)
+{
+	struct bset *i = b->sets[b->nsets].data;
+
+	memmove((uint64_t *) where + bkey_u64s(insert),
+		where,
+		(void *) end(i) - (void *) where);
+
+	i->keys += bkey_u64s(insert);
+	bkey_copy(where, insert);
+	bch_bset_fix_lookup_table(b, where);
+}
+
+static bool fix_overlapping_extents(struct btree *b,
+				    struct bkey *insert,
+				    struct btree_iter *iter,
+				    struct btree_op *op)
+{
+	void subtract_dirty(struct bkey *k, int sectors)
+	{
+		struct bcache_device *d = b->c->devices[KEY_INODE(k)];
+
+		if (KEY_DIRTY(k) && d)
+			atomic_long_sub(sectors, &d->sectors_dirty);
+	}
+
+	unsigned old_size, sectors_found = 0;
+
+	while (1) {
+		struct bkey *k = bch_btree_iter_next(iter);
+		if (!k ||
+		    bkey_cmp(&START_KEY(k), insert) >= 0)
+			break;
+
+		if (bkey_cmp(k, &START_KEY(insert)) <= 0)
+			continue;
+
+		old_size = KEY_SIZE(k);
+
+		/*
+		 * We might overlap with 0 size extents; we can't skip these
+		 * because if they're in the set we're inserting to we have to
+		 * adjust them so they don't overlap with the key we're
+		 * inserting. But we don't want to check them for BTREE_REPLACE
+		 * operations.
+		 */
+
+		if (op->type == BTREE_REPLACE &&
+		    KEY_SIZE(k)) {
+			/*
+			 * k might have been split since we inserted/found the
+			 * key we're replacing
+			 */
+			unsigned i;
+			uint64_t offset = KEY_START(k) -
+				KEY_START(&op->replace);
+
+			/* But it must be a subset of the replace key */
+			if (KEY_START(k) < KEY_START(&op->replace) ||
+			    KEY_OFFSET(k) > KEY_OFFSET(&op->replace))
+				goto check_failed;
+
+			/* We didn't find a key that we were supposed to */
+			if (KEY_START(k) > KEY_START(insert) + sectors_found)
+				goto check_failed;
+
+			if (KEY_PTRS(&op->replace) != KEY_PTRS(k))
+				goto check_failed;
+
+			/* skip past gen */
+			offset <<= 8;
+
+			BUG_ON(!KEY_PTRS(&op->replace));
+
+			for (i = 0; i < KEY_PTRS(&op->replace); i++)
+				if (k->ptr[i] != op->replace.ptr[i] + offset)
+					goto check_failed;
+
+			sectors_found = KEY_OFFSET(k) - KEY_START(insert);
+		}
+
+		if (bkey_cmp(insert, k) < 0 &&
+		    bkey_cmp(&START_KEY(insert), &START_KEY(k)) > 0) {
+			/*
+			 * We overlapped in the middle of an existing key: that
+			 * means we have to split the old key. But we have to do
+			 * slightly different things depending on whether the
+			 * old key has been written out yet.
+			 */
+
+			struct bkey *top;
+
+			subtract_dirty(k, KEY_SIZE(insert));
+
+			if (bkey_written(b, k)) {
+				/*
+				 * We insert a new key to cover the top of the
+				 * old key, and the old key is modified in place
+				 * to represent the bottom split.
+				 *
+				 * It's completely arbitrary whether the new key
+				 * is the top or the bottom, but it has to match
+				 * up with what btree_sort_fixup() does - it
+				 * doesn't check for this kind of overlap, it
+				 * depends on us inserting a new key for the top
+				 * here.
+				 */
+				top = bch_bset_search(b, &b->sets[b->nsets],
+						      insert);
+				shift_keys(b, top, k);
+			} else {
+				BKEY_PADDED(key) temp;
+				bkey_copy(&temp.key, k);
+				shift_keys(b, k, &temp.key);
+				top = bkey_next(k);
+			}
+
+			bch_cut_front(insert, top);
+			bch_cut_back(&START_KEY(insert), k);
+			bch_bset_fix_invalidated_key(b, k);
+			return false;
+		}
+
+		if (bkey_cmp(insert, k) < 0) {
+			bch_cut_front(insert, k);
+		} else {
+			if (bkey_written(b, k) &&
+			    bkey_cmp(&START_KEY(insert), &START_KEY(k)) <= 0) {
+				/*
+				 * Completely overwrote, so we don't have to
+				 * invalidate the binary search tree
+				 */
+				bch_cut_front(k, k);
+			} else {
+				__bch_cut_back(&START_KEY(insert), k);
+				bch_bset_fix_invalidated_key(b, k);
+			}
+		}
+
+		subtract_dirty(k, old_size - KEY_SIZE(k));
+	}
+
+check_failed:
+	if (op->type == BTREE_REPLACE) {
+		if (!sectors_found) {
+			op->insert_collision = true;
+			return true;
+		} else if (sectors_found < KEY_SIZE(insert)) {
+			SET_KEY_OFFSET(insert, KEY_OFFSET(insert) -
+				       (KEY_SIZE(insert) - sectors_found));
+			SET_KEY_SIZE(insert, sectors_found);
+		}
+	}
+
+	return false;
+}
+
+static bool btree_insert_key(struct btree *b, struct btree_op *op,
+			     struct bkey *k)
+{
+	struct bset *i = b->sets[b->nsets].data;
+	struct bkey *m, *prev;
+	const char *status = "insert";
+
+	BUG_ON(bkey_cmp(k, &b->key) > 0);
+	BUG_ON(b->level && !KEY_PTRS(k));
+	BUG_ON(!b->level && !KEY_OFFSET(k));
+
+	if (!b->level) {
+		struct btree_iter iter;
+		struct bkey search = KEY(KEY_INODE(k), KEY_START(k), 0);
+
+		/*
+		 * bset_search() returns the first key that is strictly greater
+		 * than the search key - but for back merging, we want to find
+		 * the first key that is greater than or equal to KEY_START(k) -
+		 * unless KEY_START(k) is 0.
+		 */
+		if (KEY_OFFSET(&search))
+			SET_KEY_OFFSET(&search, KEY_OFFSET(&search) - 1);
+
+		prev = NULL;
+		m = bch_btree_iter_init(b, &iter, &search);
+
+		if (fix_overlapping_extents(b, k, &iter, op))
+			return false;
+
+		while (m != end(i) &&
+		       bkey_cmp(k, &START_KEY(m)) > 0)
+			prev = m, m = bkey_next(m);
+
+		if (key_merging_disabled(b->c))
+			goto insert;
+
+		/* prev is in the tree, if we merge we're done */
+		status = "back merging";
+		if (prev &&
+		    bch_bkey_try_merge(b, prev, k))
+			goto merged;
+
+		status = "overwrote front";
+		if (m != end(i) &&
+		    KEY_PTRS(m) == KEY_PTRS(k) && !KEY_SIZE(m))
+			goto copy;
+
+		status = "front merge";
+		if (m != end(i) &&
+		    bch_bkey_try_merge(b, k, m))
+			goto copy;
+	} else
+		m = bch_bset_search(b, &b->sets[b->nsets], k);
+
+insert:	shift_keys(b, m, k);
+copy:	bkey_copy(m, k);
+merged:
+	bch_check_keys(b, "%s for %s at %s: %s", status,
+		       op_type(op), pbtree(b), pkey(k));
+	bch_check_key_order_msg(b, i, "%s for %s at %s: %s", status,
+				op_type(op), pbtree(b), pkey(k));
+
+	if (b->level && !KEY_OFFSET(k))
+		b->prio_blocked++;
+
+	pr_debug("%s for %s at %s: %s", status,
+		 op_type(op), pbtree(b), pkey(k));
+
+	return true;
+}
+
+bool bch_btree_insert_keys(struct btree *b, struct btree_op *op)
+{
+	bool ret = false;
+	struct bkey *k;
+	unsigned oldsize = bch_count_data(b);
+
+	while ((k = bch_keylist_pop(&op->keys))) {
+		bkey_put(b->c, k, b->level);
+		ret |= btree_insert_key(b, op, k);
+	}
+
+	BUG_ON(bch_count_data(b) < oldsize);
+	return ret;
+}
+
+bool bch_btree_insert_check_key(struct btree *b, struct btree_op *op,
+				   struct bio *bio)
+{
+	bool ret = false;
+	uint64_t btree_ptr = b->key.ptr[0];
+	unsigned long seq = b->seq;
+	BKEY_PADDED(k) tmp;
+
+	rw_unlock(false, b);
+	rw_lock(true, b, b->level);
+
+	if (b->key.ptr[0] != btree_ptr ||
+	    b->seq != seq + 1 ||
+	    should_split(b))
+		goto out;
+
+	op->replace = KEY(op->inode, bio_end(bio), bio_sectors(bio));
+
+	SET_KEY_PTRS(&op->replace, 1);
+	get_random_bytes(&op->replace.ptr[0], sizeof(uint64_t));
+
+	SET_PTR_DEV(&op->replace, 0, PTR_CHECK_DEV);
+
+	bkey_copy(&tmp.k, &op->replace);
+
+	BUG_ON(op->type != BTREE_INSERT);
+	BUG_ON(!btree_insert_key(b, op, &tmp.k));
+	bch_btree_write(b, false, NULL);
+	ret = true;
+out:
+	downgrade_write(&b->lock);
+	return ret;
+}
+
+static int btree_split(struct btree *b, struct btree_op *op)
+{
+	bool split, root = b == b->c->root;
+	struct btree *n1, *n2 = NULL, *n3 = NULL;
+	uint64_t start_time = local_clock();
+
+	if (b->level)
+		set_closure_blocking(&op->cl);
+
+	n1 = btree_node_alloc_replacement(b, &op->cl);
+	if (IS_ERR(n1))
+		goto err;
+
+	split = set_blocks(n1->sets[0].data, n1->c) > (btree_blocks(b) * 4) / 5;
+
+	pr_debug("%ssplitting at %s keys %i", split ? "" : "not ",
+		 pbtree(b), n1->sets[0].data->keys);
+
+	if (split) {
+		unsigned keys = 0;
+
+		n2 = bch_btree_node_alloc(b->c, b->level, &op->cl);
+		if (IS_ERR(n2))
+			goto err_free1;
+
+		if (root) {
+			n3 = bch_btree_node_alloc(b->c, b->level + 1, &op->cl);
+			if (IS_ERR(n3))
+				goto err_free2;
+		}
+
+		bch_btree_insert_keys(n1, op);
+
+		/* Has to be a linear search because we don't have an auxiliary
+		 * search tree yet
+		 */
+
+		while (keys < (n1->sets[0].data->keys * 3) / 5)
+			keys += bkey_u64s(node(n1->sets[0].data, keys));
+
+		bkey_copy_key(&n1->key, node(n1->sets[0].data, keys));
+		keys += bkey_u64s(node(n1->sets[0].data, keys));
+
+		n2->sets[0].data->keys = n1->sets[0].data->keys - keys;
+		n1->sets[0].data->keys = keys;
+
+		memcpy(n2->sets[0].data->start,
+		       end(n1->sets[0].data),
+		       n2->sets[0].data->keys * sizeof(uint64_t));
+
+		bkey_copy_key(&n2->key, &b->key);
+
+		bch_keylist_add(&op->keys, &n2->key);
+		bch_btree_write(n2, true, op);
+		rw_unlock(true, n2);
+	} else
+		bch_btree_insert_keys(n1, op);
+
+	bch_keylist_add(&op->keys, &n1->key);
+	bch_btree_write(n1, true, op);
+
+	if (n3) {
+		bkey_copy_key(&n3->key, &MAX_KEY);
+		bch_btree_insert_keys(n3, op);
+		bch_btree_write(n3, true, op);
+
+		closure_sync(&op->cl);
+		bch_btree_set_root(n3);
+		rw_unlock(true, n3);
+	} else if (root) {
+		op->keys.top = op->keys.bottom;
+		closure_sync(&op->cl);
+		bch_btree_set_root(n1);
+	} else {
+		unsigned i;
+
+		bkey_copy(op->keys.top, &b->key);
+		bkey_copy_key(op->keys.top, &ZERO_KEY);
+
+		for (i = 0; i < KEY_PTRS(&b->key); i++) {
+			uint8_t g = PTR_BUCKET(b->c, &b->key, i)->gen + 1;
+
+			SET_PTR_GEN(op->keys.top, i, g);
+		}
+
+		bch_keylist_push(&op->keys);
+		closure_sync(&op->cl);
+		atomic_inc(&b->c->prio_blocked);
+	}
+
+	rw_unlock(true, n1);
+	btree_node_free(b, op);
+
+	bch_time_stats_update(&b->c->btree_split_time, start_time);
+
+	return 0;
+err_free2:
+	__bkey_put(n2->c, &n2->key);
+	btree_node_free(n2, op);
+	rw_unlock(true, n2);
+err_free1:
+	__bkey_put(n1->c, &n1->key);
+	btree_node_free(n1, op);
+	rw_unlock(true, n1);
+err:
+	if (n3 == ERR_PTR(-EAGAIN) ||
+	    n2 == ERR_PTR(-EAGAIN) ||
+	    n1 == ERR_PTR(-EAGAIN))
+		return -EAGAIN;
+
+	pr_warn("couldn't split");
+	return -ENOMEM;
+}
+
+static int bch_btree_insert_recurse(struct btree *b, struct btree_op *op,
+				    struct keylist *stack_keys)
+{
+	if (b->level) {
+		int ret;
+		struct bkey *insert = op->keys.bottom;
+		struct bkey *k = bch_next_recurse_key(b, &START_KEY(insert));
+
+		if (!k) {
+			btree_bug(b, "no key to recurse on at level %i/%i",
+				  b->level, b->c->root->level);
+
+			op->keys.top = op->keys.bottom;
+			return -EIO;
+		}
+
+		if (bkey_cmp(insert, k) > 0) {
+			unsigned i;
+
+			if (op->type == BTREE_REPLACE) {
+				__bkey_put(b->c, insert);
+				op->keys.top = op->keys.bottom;
+				op->insert_collision = true;
+				return 0;
+			}
+
+			for (i = 0; i < KEY_PTRS(insert); i++)
+				atomic_inc(&PTR_BUCKET(b->c, insert, i)->pin);
+
+			bkey_copy(stack_keys->top, insert);
+
+			bch_cut_back(k, insert);
+			bch_cut_front(k, stack_keys->top);
+
+			bch_keylist_push(stack_keys);
+		}
+
+		ret = btree(insert_recurse, k, b, op, stack_keys);
+		if (ret)
+			return ret;
+	}
+
+	if (!bch_keylist_empty(&op->keys)) {
+		if (should_split(b)) {
+			if (op->lock <= b->c->root->level) {
+				BUG_ON(b->level);
+				op->lock = b->c->root->level + 1;
+				return -EINTR;
+			}
+			return btree_split(b, op);
+		}
+
+		BUG_ON(write_block(b) != b->sets[b->nsets].data);
+
+		if (bch_btree_insert_keys(b, op))
+			bch_btree_write(b, false, op);
+	}
+
+	return 0;
+}
+
+int bch_btree_insert(struct btree_op *op, struct cache_set *c)
+{
+	int ret = 0;
+	struct keylist stack_keys;
+
+	/*
+	 * Don't want to block with the btree locked unless we have to,
+	 * otherwise we get deadlocks with try_harder and between split/gc
+	 */
+	clear_closure_blocking(&op->cl);
+
+	BUG_ON(bch_keylist_empty(&op->keys));
+	bch_keylist_copy(&stack_keys, &op->keys);
+	bch_keylist_init(&op->keys);
+
+	while (!bch_keylist_empty(&stack_keys) ||
+	       !bch_keylist_empty(&op->keys)) {
+		if (bch_keylist_empty(&op->keys)) {
+			bch_keylist_add(&op->keys,
+					bch_keylist_pop(&stack_keys));
+			op->lock = 0;
+		}
+
+		ret = btree_root(insert_recurse, c, op, &stack_keys);
+
+		if (ret == -EAGAIN) {
+			ret = 0;
+			closure_sync(&op->cl);
+		} else if (ret) {
+			struct bkey *k;
+
+			pr_err("error %i trying to insert key for %s",
+			       ret, op_type(op));
+
+			while ((k = bch_keylist_pop(&stack_keys) ?:
+				    bch_keylist_pop(&op->keys)))
+				bkey_put(c, k, 0);
+		}
+	}
+
+	bch_keylist_free(&stack_keys);
+
+	if (op->journal)
+		atomic_dec_bug(op->journal);
+	op->journal = NULL;
+	return ret;
+}
+
+void bch_btree_set_root(struct btree *b)
+{
+	unsigned i;
+
+	BUG_ON(!b->written);
+
+	for (i = 0; i < KEY_PTRS(&b->key); i++)
+		BUG_ON(PTR_BUCKET(b->c, &b->key, i)->prio != BTREE_PRIO);
+
+	mutex_lock(&b->c->bucket_lock);
+	list_del_init(&b->list);
+	mutex_unlock(&b->c->bucket_lock);
+
+	b->c->root = b;
+	__bkey_put(b->c, &b->key);
+
+	bch_journal_meta(b->c, NULL);
+	pr_debug("%s for %pf", pbtree(b), __builtin_return_address(0));
+}
+
+/* Cache lookup */
+
+static int submit_partial_cache_miss(struct btree *b, struct btree_op *op,
+				     struct bkey *k)
+{
+	struct search *s = container_of(op, struct search, op);
+	struct bio *bio = &s->bio.bio;
+	int ret = 0;
+
+	while (!ret &&
+	       !op->lookup_done) {
+		unsigned sectors = INT_MAX;
+
+		if (KEY_INODE(k) == op->inode) {
+			if (KEY_START(k) <= bio->bi_sector)
+				break;
+
+			sectors = min_t(uint64_t, sectors,
+					KEY_START(k) - bio->bi_sector);
+		}
+
+		ret = s->d->cache_miss(b, s, bio, sectors);
+	}
+
+	return ret;
+}
+
+/*
+ * Read from a single key, handling the initial cache miss if the key starts in
+ * the middle of the bio
+ */
+static int submit_partial_cache_hit(struct btree *b, struct btree_op *op,
+				    struct bkey *k)
+{
+	struct search *s = container_of(op, struct search, op);
+	struct bio *bio = &s->bio.bio;
+	unsigned ptr;
+	struct bio *n;
+
+	int ret = submit_partial_cache_miss(b, op, k);
+	if (ret || op->lookup_done)
+		return ret;
+
+	/* XXX: figure out best pointer - for multiple cache devices */
+	ptr = 0;
+
+	PTR_BUCKET(b->c, k, ptr)->prio = INITIAL_PRIO;
+
+	while (!op->lookup_done &&
+	       KEY_INODE(k) == op->inode &&
+	       bio->bi_sector < KEY_OFFSET(k)) {
+		struct bkey *bio_key;
+		sector_t sector = PTR_OFFSET(k, ptr) +
+			(bio->bi_sector - KEY_START(k));
+		unsigned sectors = min_t(uint64_t, INT_MAX,
+					 KEY_OFFSET(k) - bio->bi_sector);
+
+		n = bch_bio_split(bio, sectors, GFP_NOIO, s->d->bio_split);
+		if (!n)
+			return -EAGAIN;
+
+		if (n == bio)
+			op->lookup_done = true;
+
+		bio_key = &container_of(n, struct bbio, bio)->key;
+
+		/*
+		 * The bucket we're reading from might be reused while our bio
+		 * is in flight, and we could then end up reading the wrong
+		 * data.
+		 *
+		 * We guard against this by checking (in cache_read_endio()) if
+		 * the pointer is stale again; if so, we treat it as an error
+		 * and reread from the backing device (but we don't pass that
+		 * error up anywhere).
+		 */
+
+		bch_bkey_copy_single_ptr(bio_key, k, ptr);
+		SET_PTR_OFFSET(bio_key, 0, sector);
+
+		n->bi_end_io	= bch_cache_read_endio;
+		n->bi_private	= &s->cl;
+
+		trace_bcache_cache_hit(n);
+		__bch_submit_bbio(n, b->c);
+	}
+
+	return 0;
+}
+
+int bch_btree_search_recurse(struct btree *b, struct btree_op *op)
+{
+	struct search *s = container_of(op, struct search, op);
+	struct bio *bio = &s->bio.bio;
+
+	int ret = 0;
+	struct bkey *k;
+	struct btree_iter iter;
+	bch_btree_iter_init(b, &iter, &KEY(op->inode, bio->bi_sector, 0));
+
+	pr_debug("at %s searching for %u:%llu", pbtree(b), op->inode,
+		 (uint64_t) bio->bi_sector);
+
+	do {
+		k = bch_btree_iter_next_filter(&iter, b, bch_ptr_bad);
+		if (!k) {
+			/*
+			 * b->key would be exactly what we want, except that
+			 * pointers to btree nodes have nonzero size - we
+			 * wouldn't go far enough
+			 */
+
+			ret = submit_partial_cache_miss(b, op,
+					&KEY(KEY_INODE(&b->key),
+					     KEY_OFFSET(&b->key), 0));
+			break;
+		}
+
+		ret = b->level
+			? btree(search_recurse, k, b, op)
+			: submit_partial_cache_hit(b, op, k);
+	} while (!ret &&
+		 !op->lookup_done);
+
+	return ret;
+}
+
+/* Keybuf code */
+
+static inline int keybuf_cmp(struct keybuf_key *l, struct keybuf_key *r)
+{
+	/* Overlapping keys compare equal */
+	if (bkey_cmp(&l->key, &START_KEY(&r->key)) <= 0)
+		return -1;
+	if (bkey_cmp(&START_KEY(&l->key), &r->key) >= 0)
+		return 1;
+	return 0;
+}
+
+static inline int keybuf_nonoverlapping_cmp(struct keybuf_key *l,
+					    struct keybuf_key *r)
+{
+	return clamp_t(int64_t, bkey_cmp(&l->key, &r->key), -1, 1);
+}
+
+static int bch_btree_refill_keybuf(struct btree *b, struct btree_op *op,
+				   struct keybuf *buf, struct bkey *end)
+{
+	struct btree_iter iter;
+	bch_btree_iter_init(b, &iter, &buf->last_scanned);
+
+	while (!array_freelist_empty(&buf->freelist)) {
+		struct bkey *k = bch_btree_iter_next_filter(&iter, b,
+							    bch_ptr_bad);
+
+		if (!b->level) {
+			if (!k) {
+				buf->last_scanned = b->key;
+				break;
+			}
+
+			buf->last_scanned = *k;
+			if (bkey_cmp(&buf->last_scanned, end) >= 0)
+				break;
+
+			if (buf->key_predicate(buf, k)) {
+				struct keybuf_key *w;
+
+				pr_debug("%s", pkey(k));
+
+				spin_lock(&buf->lock);
+
+				w = array_alloc(&buf->freelist);
+
+				w->private = NULL;
+				bkey_copy(&w->key, k);
+
+				if (RB_INSERT(&buf->keys, w, node, keybuf_cmp))
+					array_free(&buf->freelist, w);
+
+				spin_unlock(&buf->lock);
+			}
+		} else {
+			if (!k)
+				break;
+
+			btree(refill_keybuf, k, b, op, buf, end);
+			/*
+			 * Might get an error here, but can't really do anything
+			 * and it'll get logged elsewhere. Just read what we
+			 * can.
+			 */
+
+			if (bkey_cmp(&buf->last_scanned, end) >= 0)
+				break;
+
+			cond_resched();
+		}
+	}
+
+	return 0;
+}
+
+void bch_refill_keybuf(struct cache_set *c, struct keybuf *buf,
+			  struct bkey *end)
+{
+	struct bkey start = buf->last_scanned;
+	struct btree_op op;
+	bch_btree_op_init_stack(&op);
+
+	cond_resched();
+
+	btree_root(refill_keybuf, c, &op, buf, end);
+	closure_sync(&op.cl);
+
+	pr_debug("found %s keys from %llu:%llu to %llu:%llu",
+		 RB_EMPTY_ROOT(&buf->keys) ? "no" :
+		 array_freelist_empty(&buf->freelist) ? "some" : "a few",
+		 KEY_INODE(&start), KEY_OFFSET(&start),
+		 KEY_INODE(&buf->last_scanned), KEY_OFFSET(&buf->last_scanned));
+
+	spin_lock(&buf->lock);
+
+	if (!RB_EMPTY_ROOT(&buf->keys)) {
+		struct keybuf_key *w;
+		w = RB_FIRST(&buf->keys, struct keybuf_key, node);
+		buf->start	= START_KEY(&w->key);
+
+		w = RB_LAST(&buf->keys, struct keybuf_key, node);
+		buf->end	= w->key;
+	} else {
+		buf->start	= MAX_KEY;
+		buf->end	= MAX_KEY;
+	}
+
+	spin_unlock(&buf->lock);
+}
+
+static void __bch_keybuf_del(struct keybuf *buf, struct keybuf_key *w)
+{
+	rb_erase(&w->node, &buf->keys);
+	array_free(&buf->freelist, w);
+}
+
+void bch_keybuf_del(struct keybuf *buf, struct keybuf_key *w)
+{
+	spin_lock(&buf->lock);
+	__bch_keybuf_del(buf, w);
+	spin_unlock(&buf->lock);
+}
+
+bool bch_keybuf_check_overlapping(struct keybuf *buf, struct bkey *start,
+				  struct bkey *end)
+{
+	bool ret = false;
+	struct keybuf_key *p, *w, s;
+	s.key = *start;
+
+	if (bkey_cmp(end, &buf->start) <= 0 ||
+	    bkey_cmp(start, &buf->end) >= 0)
+		return false;
+
+	spin_lock(&buf->lock);
+	w = RB_GREATER(&buf->keys, s, node, keybuf_nonoverlapping_cmp);
+
+	while (w && bkey_cmp(&START_KEY(&w->key), end) < 0) {
+		p = w;
+		w = RB_NEXT(w, node);
+
+		if (p->private)
+			ret = true;
+		else
+			__bch_keybuf_del(buf, p);
+	}
+
+	spin_unlock(&buf->lock);
+	return ret;
+}
+
+struct keybuf_key *bch_keybuf_next(struct keybuf *buf)
+{
+	struct keybuf_key *w;
+	spin_lock(&buf->lock);
+
+	w = RB_FIRST(&buf->keys, struct keybuf_key, node);
+
+	while (w && w->private)
+		w = RB_NEXT(w, node);
+
+	if (w)
+		w->private = ERR_PTR(-EINTR);
+
+	spin_unlock(&buf->lock);
+	return w;
+}
+
+struct keybuf_key *bch_keybuf_next_rescan(struct cache_set *c,
+					     struct keybuf *buf,
+					     struct bkey *end)
+{
+	struct keybuf_key *ret;
+
+	while (1) {
+		ret = bch_keybuf_next(buf);
+		if (ret)
+			break;
+
+		if (bkey_cmp(&buf->last_scanned, end) >= 0) {
+			pr_debug("scan finished");
+			break;
+		}
+
+		bch_refill_keybuf(c, buf, end);
+	}
+
+	return ret;
+}
+
+void bch_keybuf_init(struct keybuf *buf, keybuf_pred_fn *fn)
+{
+	buf->key_predicate	= fn;
+	buf->last_scanned	= MAX_KEY;
+	buf->keys		= RB_ROOT;
+
+	spin_lock_init(&buf->lock);
+	array_allocator_init(&buf->freelist);
+}
+
+void bch_btree_exit(void)
+{
+	if (btree_io_wq)
+		destroy_workqueue(btree_io_wq);
+	if (bch_gc_wq)
+		destroy_workqueue(bch_gc_wq);
+}
+
+int __init bch_btree_init(void)
+{
+	if (!(bch_gc_wq = create_singlethread_workqueue("bch_btree_gc")) ||
+	    !(btree_io_wq = create_singlethread_workqueue("bch_btree_io")))
+		return -ENOMEM;
+
+	return 0;
+}
diff --git a/drivers/md/bcache/btree.h b/drivers/md/bcache/btree.h
new file mode 100644
index 0000000..af4a709
--- /dev/null
+++ b/drivers/md/bcache/btree.h
@@ -0,0 +1,405 @@
+#ifndef _BCACHE_BTREE_H
+#define _BCACHE_BTREE_H
+
+/*
+ * THE BTREE:
+ *
+ * At a high level, bcache's btree is relatively standard b+ tree. All keys and
+ * pointers are in the leaves; interior nodes only have pointers to the child
+ * nodes.
+ *
+ * In the interior nodes, a struct bkey always points to a child btree node, and
+ * the key is the highest key in the child node - except that the highest key in
+ * an interior node is always MAX_KEY. The size field refers to the size on disk
+ * of the child node - this would allow us to have variable sized btree nodes
+ * (handy for keeping the depth of the btree 1 by expanding just the root).
+ *
+ * Btree nodes are themselves log structured, but this is hidden fairly
+ * thoroughly. Btree nodes on disk will in practice have extents that overlap
+ * (because they were written at different times), but in memory we never have
+ * overlapping extents - when we read in a btree node from disk, the first thing
+ * we do is resort all the sets of keys with a mergesort, and in the same pass
+ * we check for overlapping extents and adjust them appropriately.
+ *
+ * struct btree_op is a central interface to the btree code. It's used for
+ * specifying read vs. write locking, and the embedded closure is used for
+ * waiting on IO or reserve memory.
+ *
+ * BTREE CACHE:
+ *
+ * Btree nodes are cached in memory; traversing the btree might require reading
+ * in btree nodes which is handled mostly transparently.
+ *
+ * bch_btree_node_get() looks up a btree node in the cache and reads it in from
+ * disk if necessary. This function is almost never called directly though - the
+ * btree() macro is used to get a btree node, call some function on it, and
+ * unlock the node after the function returns.
+ *
+ * The root is special cased - it's taken out of the cache's lru (thus pinning
+ * it in memory), so we can find the root of the btree by just dereferencing a
+ * pointer instead of looking it up in the cache. This makes locking a bit
+ * tricky, since the root pointer is protected by the lock in the btree node it
+ * points to - the btree_root() macro handles this.
+ *
+ * In various places we must be able to allocate memory for multiple btree nodes
+ * in order to make forward progress. To do this we use the btree cache itself
+ * as a reserve; if __get_free_pages() fails, we'll find a node in the btree
+ * cache we can reuse. We can't allow more than one thread to be doing this at a
+ * time, so there's a lock, implemented by a pointer to the btree_op closure -
+ * this allows the btree_root() macro to implicitly release this lock.
+ *
+ * BTREE IO:
+ *
+ * Btree nodes never have to be explicitly read in; bch_btree_node_get() handles
+ * this.
+ *
+ * For writing, we have two btree_write structs embeddded in struct btree - one
+ * write in flight, and one being set up, and we toggle between them.
+ *
+ * Writing is done with a single function -  bch_btree_write() really serves two
+ * different purposes and should be broken up into two different functions. When
+ * passing now = false, it merely indicates that the node is now dirty - calling
+ * it ensures that the dirty keys will be written at some point in the future.
+ *
+ * When passing now = true, bch_btree_write() causes a write to happen
+ * "immediately" (if there was already a write in flight, it'll cause the write
+ * to happen as soon as the previous write completes). It returns immediately
+ * though - but it takes a refcount on the closure in struct btree_op you passed
+ * to it, so a closure_sync() later can be used to wait for the write to
+ * complete.
+ *
+ * This is handy because btree_split() and garbage collection can issue writes
+ * in parallel, reducing the amount of time they have to hold write locks.
+ *
+ * LOCKING:
+ *
+ * When traversing the btree, we may need write locks starting at some level -
+ * inserting a key into the btree will typically only require a write lock on
+ * the leaf node.
+ *
+ * This is specified with the lock field in struct btree_op; lock = 0 means we
+ * take write locks at level <= 0, i.e. only leaf nodes. bch_btree_node_get()
+ * checks this field and returns the node with the appropriate lock held.
+ *
+ * If, after traversing the btree, the insertion code discovers it has to split
+ * then it must restart from the root and take new locks - to do this it changes
+ * the lock field and returns -EINTR, which causes the btree_root() macro to
+ * loop.
+ *
+ * Handling cache misses require a different mechanism for upgrading to a write
+ * lock. We do cache lookups with only a read lock held, but if we get a cache
+ * miss and we wish to insert this data into the cache, we have to insert a
+ * placeholder key to detect races - otherwise, we could race with a write and
+ * overwrite the data that was just written to the cache with stale data from
+ * the backing device.
+ *
+ * For this we use a sequence number that write locks and unlocks increment - to
+ * insert the check key it unlocks the btree node and then takes a write lock,
+ * and fails if the sequence number doesn't match.
+ */
+
+#include "bset.h"
+#include "debug.h"
+
+struct btree_write {
+	struct closure		*owner;
+	atomic_t		*journal;
+
+	/* If btree_split() frees a btree node, it writes a new pointer to that
+	 * btree node indicating it was freed; it takes a refcount on
+	 * c->prio_blocked because we can't write the gens until the new
+	 * pointer is on disk. This allows btree_write_endio() to release the
+	 * refcount that btree_split() took.
+	 */
+	int			prio_blocked;
+};
+
+struct btree {
+	/* Hottest entries first */
+	struct hlist_node	hash;
+
+	/* Key/pointer for this btree node */
+	BKEY_PADDED(key);
+
+	/* Single bit - set when accessed, cleared by shrinker */
+	unsigned long		accessed;
+	unsigned long		seq;
+	struct rw_semaphore	lock;
+	struct cache_set	*c;
+
+	unsigned long		flags;
+	uint16_t		written;	/* would be nice to kill */
+	uint8_t			level;
+	uint8_t			nsets;
+	uint8_t			page_order;
+
+	/*
+	 * Set of sorted keys - the real btree node - plus a binary search tree
+	 *
+	 * sets[0] is special; set[0]->tree, set[0]->prev and set[0]->data point
+	 * to the memory we have allocated for this btree node. Additionally,
+	 * set[0]->data points to the entire btree node as it exists on disk.
+	 */
+	struct bset_tree	sets[MAX_BSETS];
+
+	/* Used to refcount bio splits, also protects b->bio */
+	struct closure_with_waitlist	io;
+
+	/* Gets transferred to w->prio_blocked - see the comment there */
+	int			prio_blocked;
+
+	struct list_head	list;
+	struct delayed_work	work;
+
+	uint64_t		io_start_time;
+	struct btree_write	writes[2];
+	struct bio		*bio;
+};
+
+#define BTREE_FLAG(flag)						\
+static inline bool btree_node_ ## flag(struct btree *b)			\
+{	return test_bit(BTREE_NODE_ ## flag, &b->flags); }		\
+									\
+static inline void set_btree_node_ ## flag(struct btree *b)		\
+{	set_bit(BTREE_NODE_ ## flag, &b->flags); }			\
+
+enum btree_flags {
+	BTREE_NODE_read_done,
+	BTREE_NODE_io_error,
+	BTREE_NODE_dirty,
+	BTREE_NODE_write_idx,
+};
+
+BTREE_FLAG(read_done);
+BTREE_FLAG(io_error);
+BTREE_FLAG(dirty);
+BTREE_FLAG(write_idx);
+
+static inline struct btree_write *btree_current_write(struct btree *b)
+{
+	return b->writes + btree_node_write_idx(b);
+}
+
+static inline struct btree_write *btree_prev_write(struct btree *b)
+{
+	return b->writes + (btree_node_write_idx(b) ^ 1);
+}
+
+static inline unsigned bset_offset(struct btree *b, struct bset *i)
+{
+	return (((size_t) i) - ((size_t) b->sets->data)) >> 9;
+}
+
+static inline struct bset *write_block(struct btree *b)
+{
+	return ((void *) b->sets[0].data) + b->written * block_bytes(b->c);
+}
+
+static inline bool bset_written(struct btree *b, struct bset_tree *t)
+{
+	return t->data < write_block(b);
+}
+
+static inline bool bkey_written(struct btree *b, struct bkey *k)
+{
+	return k < write_block(b)->start;
+}
+
+static inline void set_gc_sectors(struct cache_set *c)
+{
+	atomic_set(&c->sectors_to_gc, c->sb.bucket_size * c->nbuckets / 8);
+}
+
+static inline bool bch_ptr_invalid(struct btree *b, const struct bkey *k)
+{
+	return __bch_ptr_invalid(b->c, b->level, k);
+}
+
+static inline struct bkey *bch_btree_iter_init(struct btree *b,
+					       struct btree_iter *iter,
+					       struct bkey *search)
+{
+	return __bch_btree_iter_init(b, iter, search, b->sets);
+}
+
+/* Looping macros */
+
+#define for_each_cached_btree(b, c, iter)				\
+	for (iter = 0;							\
+	     iter < ARRAY_SIZE((c)->bucket_hash);			\
+	     iter++)							\
+		hlist_for_each_entry_rcu((b), (c)->bucket_hash + iter, hash)
+
+#define for_each_key_filter(b, k, iter, filter)				\
+	for (bch_btree_iter_init((b), (iter), NULL);			\
+	     ((k) = bch_btree_iter_next_filter((iter), b, filter));)
+
+#define for_each_key(b, k, iter)					\
+	for (bch_btree_iter_init((b), (iter), NULL);			\
+	     ((k) = bch_btree_iter_next(iter));)
+
+/* Recursing down the btree */
+
+struct btree_op {
+	struct closure		cl;
+	struct cache_set	*c;
+
+	/* Journal entry we have a refcount on */
+	atomic_t		*journal;
+
+	/* Bio to be inserted into the cache */
+	struct bio		*cache_bio;
+
+	unsigned		inode;
+
+	uint16_t		write_prio;
+
+	/* Btree level at which we start taking write locks */
+	short			lock;
+
+	/* Btree insertion type */
+	enum {
+		BTREE_INSERT,
+		BTREE_REPLACE
+	} type:8;
+
+	unsigned		csum:1;
+	unsigned		skip:1;
+	unsigned		flush_journal:1;
+
+	unsigned		insert_data_done:1;
+	unsigned		lookup_done:1;
+	unsigned		insert_collision:1;
+
+	/* Anything after this point won't get zeroed in do_bio_hook() */
+
+	/* Keys to be inserted */
+	struct keylist		keys;
+	BKEY_PADDED(replace);
+};
+
+void bch_btree_op_init_stack(struct btree_op *);
+
+static inline void rw_lock(bool w, struct btree *b, int level)
+{
+	w ? down_write_nested(&b->lock, level + 1)
+	  : down_read_nested(&b->lock, level + 1);
+	if (w)
+		b->seq++;
+}
+
+static inline void rw_unlock(bool w, struct btree *b)
+{
+#ifdef CONFIG_BCACHE_EDEBUG
+	unsigned i;
+
+	if (w &&
+	    b->key.ptr[0] &&
+	    btree_node_read_done(b))
+		for (i = 0; i <= b->nsets; i++)
+			bch_check_key_order(b, b->sets[i].data);
+#endif
+
+	if (w)
+		b->seq++;
+	(w ? up_write : up_read)(&b->lock);
+}
+
+#define insert_lock(s, b)	((b)->level <= (s)->lock)
+
+/*
+ * These macros are for recursing down the btree - they handle the details of
+ * locking and looking up nodes in the cache for you. They're best treated as
+ * mere syntax when reading code that uses them.
+ *
+ * op->lock determines whether we take a read or a write lock at a given depth.
+ * If you've got a read lock and find that you need a write lock (i.e. you're
+ * going to have to split), set op->lock and return -EINTR; btree_root() will
+ * call you again and you'll have the correct lock.
+ */
+
+/**
+ * btree - recurse down the btree on a specified key
+ * @fn:		function to call, which will be passed the child node
+ * @key:	key to recurse on
+ * @b:		parent btree node
+ * @op:		pointer to struct btree_op
+ */
+#define btree(fn, key, b, op, ...)					\
+({									\
+	int _r, l = (b)->level - 1;					\
+	bool _w = l <= (op)->lock;					\
+	struct btree *_b = bch_btree_node_get((b)->c, key, l, op);	\
+	if (!IS_ERR(_b)) {						\
+		_r = bch_btree_ ## fn(_b, op, ##__VA_ARGS__);		\
+		rw_unlock(_w, _b);					\
+	} else								\
+		_r = PTR_ERR(_b);					\
+	_r;								\
+})
+
+/**
+ * btree_root - call a function on the root of the btree
+ * @fn:		function to call, which will be passed the child node
+ * @c:		cache set
+ * @op:		pointer to struct btree_op
+ */
+#define btree_root(fn, c, op, ...)					\
+({									\
+	int _r = -EINTR;						\
+	do {								\
+		struct btree *_b = (c)->root;				\
+		bool _w = insert_lock(op, _b);				\
+		rw_lock(_w, _b, _b->level);				\
+		if (_b == (c)->root &&					\
+		    _w == insert_lock(op, _b))				\
+			_r = bch_btree_ ## fn(_b, op, ##__VA_ARGS__);	\
+		rw_unlock(_w, _b);					\
+		bch_cannibalize_unlock(c, &(op)->cl);		\
+	} while (_r == -EINTR);						\
+									\
+	_r;								\
+})
+
+static inline bool should_split(struct btree *b)
+{
+	struct bset *i = write_block(b);
+	return b->written >= btree_blocks(b) ||
+		(i->seq == b->sets[0].data->seq &&
+		 b->written + __set_blocks(i, i->keys + 15, b->c)
+		 > btree_blocks(b));
+}
+
+void bch_btree_read_done(struct closure *);
+void bch_btree_read(struct btree *);
+void bch_btree_write(struct btree *b, bool now, struct btree_op *op);
+
+void bch_cannibalize_unlock(struct cache_set *, struct closure *);
+void bch_btree_set_root(struct btree *);
+struct btree *bch_btree_node_alloc(struct cache_set *, int, struct closure *);
+struct btree *bch_btree_node_get(struct cache_set *, struct bkey *,
+				int, struct btree_op *);
+
+bool bch_btree_insert_keys(struct btree *, struct btree_op *);
+bool bch_btree_insert_check_key(struct btree *, struct btree_op *,
+				   struct bio *);
+int bch_btree_insert(struct btree_op *, struct cache_set *);
+
+int bch_btree_search_recurse(struct btree *, struct btree_op *);
+
+void bch_queue_gc(struct cache_set *);
+size_t bch_btree_gc_finish(struct cache_set *);
+void bch_moving_gc(struct closure *);
+int bch_btree_check(struct cache_set *, struct btree_op *);
+uint8_t __bch_btree_mark_key(struct cache_set *, int, struct bkey *);
+
+void bch_keybuf_init(struct keybuf *, keybuf_pred_fn *);
+void bch_refill_keybuf(struct cache_set *, struct keybuf *, struct bkey *);
+bool bch_keybuf_check_overlapping(struct keybuf *, struct bkey *,
+				  struct bkey *);
+void bch_keybuf_del(struct keybuf *, struct keybuf_key *);
+struct keybuf_key *bch_keybuf_next(struct keybuf *);
+struct keybuf_key *bch_keybuf_next_rescan(struct cache_set *,
+					  struct keybuf *, struct bkey *);
+
+#endif
diff --git a/drivers/md/bcache/closure.c b/drivers/md/bcache/closure.c
new file mode 100644
index 0000000..bd05a9a
--- /dev/null
+++ b/drivers/md/bcache/closure.c
@@ -0,0 +1,345 @@
+/*
+ * Asynchronous refcounty things
+ *
+ * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
+ * Copyright 2012 Google, Inc.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/module.h>
+#include <linux/seq_file.h>
+
+#include "closure.h"
+
+void closure_queue(struct closure *cl)
+{
+	struct workqueue_struct *wq = cl->wq;
+	if (wq) {
+		INIT_WORK(&cl->work, cl->work.func);
+		BUG_ON(!queue_work(wq, &cl->work));
+	} else
+		cl->fn(cl);
+}
+EXPORT_SYMBOL_GPL(closure_queue);
+
+#define CL_FIELD(type, field)					\
+	case TYPE_ ## type:					\
+	return &container_of(cl, struct type, cl)->field
+
+static struct closure_waitlist *closure_waitlist(struct closure *cl)
+{
+	switch (cl->type) {
+		CL_FIELD(closure_with_waitlist, wait);
+		CL_FIELD(closure_with_waitlist_and_timer, wait);
+	default:
+		return NULL;
+	}
+}
+
+static struct timer_list *closure_timer(struct closure *cl)
+{
+	switch (cl->type) {
+		CL_FIELD(closure_with_timer, timer);
+		CL_FIELD(closure_with_waitlist_and_timer, timer);
+	default:
+		return NULL;
+	}
+}
+
+static inline void closure_put_after_sub(struct closure *cl, int flags)
+{
+	int r = flags & CLOSURE_REMAINING_MASK;
+
+	BUG_ON(flags & CLOSURE_GUARD_MASK);
+	BUG_ON(!r && (flags & ~(CLOSURE_DESTRUCTOR|CLOSURE_BLOCKING)));
+
+	/* Must deliver precisely one wakeup */
+	if (r == 1 && (flags & CLOSURE_SLEEPING))
+		wake_up_process(cl->task);
+
+	if (!r) {
+		if (cl->fn && !(flags & CLOSURE_DESTRUCTOR)) {
+			/* CLOSURE_BLOCKING might be set - clear it */
+			atomic_set(&cl->remaining,
+				   CLOSURE_REMAINING_INITIALIZER);
+			closure_queue(cl);
+		} else {
+			struct closure *parent = cl->parent;
+			struct closure_waitlist *wait = closure_waitlist(cl);
+
+			closure_debug_destroy(cl);
+
+			atomic_set(&cl->remaining, -1);
+
+			if (wait)
+				closure_wake_up(wait);
+
+			if (cl->fn)
+				cl->fn(cl);
+
+			if (parent)
+				closure_put(parent);
+		}
+	}
+}
+
+/* For clearing flags with the same atomic op as a put */
+void closure_sub(struct closure *cl, int v)
+{
+	closure_put_after_sub(cl, atomic_sub_return(v, &cl->remaining));
+}
+EXPORT_SYMBOL_GPL(closure_sub);
+
+void closure_put(struct closure *cl)
+{
+	closure_put_after_sub(cl, atomic_dec_return(&cl->remaining));
+}
+EXPORT_SYMBOL_GPL(closure_put);
+
+static void set_waiting(struct closure *cl, unsigned long f)
+{
+#ifdef CONFIG_BCACHE_CLOSURES_DEBUG
+	cl->waiting_on = f;
+#endif
+}
+
+void __closure_wake_up(struct closure_waitlist *wait_list)
+{
+	struct llist_node *list;
+	struct closure *cl;
+	struct llist_node *reverse = NULL;
+
+	list = llist_del_all(&wait_list->list);
+
+	/* We first reverse the list to preserve FIFO ordering and fairness */
+
+	while (list) {
+		struct llist_node *t = list;
+		list = llist_next(list);
+
+		t->next = reverse;
+		reverse = t;
+	}
+
+	/* Then do the wakeups */
+
+	while (reverse) {
+		cl = container_of(reverse, struct closure, list);
+		reverse = llist_next(reverse);
+
+		set_waiting(cl, 0);
+		closure_sub(cl, CLOSURE_WAITING + 1);
+	}
+}
+EXPORT_SYMBOL_GPL(__closure_wake_up);
+
+bool closure_wait(struct closure_waitlist *list, struct closure *cl)
+{
+	if (atomic_read(&cl->remaining) & CLOSURE_WAITING)
+		return false;
+
+	set_waiting(cl, _RET_IP_);
+	atomic_add(CLOSURE_WAITING + 1, &cl->remaining);
+	llist_add(&cl->list, &list->list);
+
+	return true;
+}
+EXPORT_SYMBOL_GPL(closure_wait);
+
+/**
+ * closure_sync() - sleep until a closure a closure has nothing left to wait on
+ *
+ * Sleeps until the refcount hits 1 - the thread that's running the closure owns
+ * the last refcount.
+ */
+void closure_sync(struct closure *cl)
+{
+	while (1) {
+		__closure_start_sleep(cl);
+		closure_set_ret_ip(cl);
+
+		if ((atomic_read(&cl->remaining) &
+		     CLOSURE_REMAINING_MASK) == 1)
+			break;
+
+		schedule();
+	}
+
+	__closure_end_sleep(cl);
+}
+EXPORT_SYMBOL_GPL(closure_sync);
+
+/**
+ * closure_trylock() - try to acquire the closure, without waiting
+ * @cl:		closure to lock
+ *
+ * Returns true if the closure was succesfully locked.
+ */
+bool closure_trylock(struct closure *cl, struct closure *parent)
+{
+	if (atomic_cmpxchg(&cl->remaining, -1,
+			   CLOSURE_REMAINING_INITIALIZER) != -1)
+		return false;
+
+	closure_set_ret_ip(cl);
+
+	smp_mb();
+	cl->parent = parent;
+	if (parent)
+		closure_get(parent);
+
+	closure_debug_create(cl);
+	return true;
+}
+EXPORT_SYMBOL_GPL(closure_trylock);
+
+void __closure_lock(struct closure *cl, struct closure *parent,
+		    struct closure_waitlist *wait_list)
+{
+	struct closure wait;
+	closure_init_stack(&wait);
+
+	while (1) {
+		if (closure_trylock(cl, parent))
+			return;
+
+		closure_wait_event_sync(wait_list, &wait,
+					atomic_read(&cl->remaining) == -1);
+	}
+}
+EXPORT_SYMBOL_GPL(__closure_lock);
+
+static void closure_delay_timer_fn(unsigned long data)
+{
+	struct closure *cl = (struct closure *) data;
+	closure_sub(cl, CLOSURE_TIMER + 1);
+}
+
+void do_closure_timer_init(struct closure *cl)
+{
+	struct timer_list *timer = closure_timer(cl);
+
+	init_timer(timer);
+	timer->data	= (unsigned long) cl;
+	timer->function = closure_delay_timer_fn;
+}
+EXPORT_SYMBOL_GPL(do_closure_timer_init);
+
+bool __closure_delay(struct closure *cl, unsigned long delay,
+		     struct timer_list *timer)
+{
+	if (atomic_read(&cl->remaining) & CLOSURE_TIMER)
+		return false;
+
+	BUG_ON(timer_pending(timer));
+
+	timer->expires	= jiffies + delay;
+
+	atomic_add(CLOSURE_TIMER + 1, &cl->remaining);
+	add_timer(timer);
+	return true;
+}
+EXPORT_SYMBOL_GPL(__closure_delay);
+
+void __closure_flush(struct closure *cl, struct timer_list *timer)
+{
+	if (del_timer(timer))
+		closure_sub(cl, CLOSURE_TIMER + 1);
+}
+EXPORT_SYMBOL_GPL(__closure_flush);
+
+void __closure_flush_sync(struct closure *cl, struct timer_list *timer)
+{
+	if (del_timer_sync(timer))
+		closure_sub(cl, CLOSURE_TIMER + 1);
+}
+EXPORT_SYMBOL_GPL(__closure_flush_sync);
+
+#ifdef CONFIG_BCACHE_CLOSURES_DEBUG
+
+static LIST_HEAD(closure_list);
+static DEFINE_SPINLOCK(closure_list_lock);
+
+void closure_debug_create(struct closure *cl)
+{
+	unsigned long flags;
+
+	BUG_ON(cl->magic == CLOSURE_MAGIC_ALIVE);
+	cl->magic = CLOSURE_MAGIC_ALIVE;
+
+	spin_lock_irqsave(&closure_list_lock, flags);
+	list_add(&cl->all, &closure_list);
+	spin_unlock_irqrestore(&closure_list_lock, flags);
+}
+EXPORT_SYMBOL_GPL(closure_debug_create);
+
+void closure_debug_destroy(struct closure *cl)
+{
+	unsigned long flags;
+
+	BUG_ON(cl->magic != CLOSURE_MAGIC_ALIVE);
+	cl->magic = CLOSURE_MAGIC_DEAD;
+
+	spin_lock_irqsave(&closure_list_lock, flags);
+	list_del(&cl->all);
+	spin_unlock_irqrestore(&closure_list_lock, flags);
+}
+EXPORT_SYMBOL_GPL(closure_debug_destroy);
+
+static struct dentry *debug;
+
+#define work_data_bits(work) ((unsigned long *)(&(work)->data))
+
+static int debug_seq_show(struct seq_file *f, void *data)
+{
+	struct closure *cl;
+	spin_lock_irq(&closure_list_lock);
+
+	list_for_each_entry(cl, &closure_list, all) {
+		int r = atomic_read(&cl->remaining);
+
+		seq_printf(f, "%p: %pF -> %pf p %p r %i ",
+			   cl, (void *) cl->ip, cl->fn, cl->parent,
+			   r & CLOSURE_REMAINING_MASK);
+
+		seq_printf(f, "%s%s%s%s%s%s\n",
+			   test_bit(WORK_STRUCT_PENDING,
+				    work_data_bits(&cl->work)) ? "Q" : "",
+			   r & CLOSURE_RUNNING	? "R" : "",
+			   r & CLOSURE_BLOCKING	? "B" : "",
+			   r & CLOSURE_STACK	? "S" : "",
+			   r & CLOSURE_SLEEPING	? "Sl" : "",
+			   r & CLOSURE_TIMER	? "T" : "");
+
+		if (r & CLOSURE_WAITING)
+			seq_printf(f, " W %pF\n",
+				   (void *) cl->waiting_on);
+
+		seq_printf(f, "\n");
+	}
+
+	spin_unlock_irq(&closure_list_lock);
+	return 0;
+}
+
+static int debug_seq_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, debug_seq_show, NULL);
+}
+
+static const struct file_operations debug_ops = {
+	.owner		= THIS_MODULE,
+	.open		= debug_seq_open,
+	.read		= seq_read,
+	.release	= single_release
+};
+
+void __init closure_debug_init(void)
+{
+	debug = debugfs_create_file("closures", 0400, NULL, NULL, &debug_ops);
+}
+
+#endif
+
+MODULE_AUTHOR("Kent Overstreet <koverstreet@google.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/md/bcache/closure.h b/drivers/md/bcache/closure.h
new file mode 100644
index 0000000..0003992
--- /dev/null
+++ b/drivers/md/bcache/closure.h
@@ -0,0 +1,672 @@
+#ifndef _LINUX_CLOSURE_H
+#define _LINUX_CLOSURE_H
+
+#include <linux/llist.h>
+#include <linux/sched.h>
+#include <linux/workqueue.h>
+
+/*
+ * Closure is perhaps the most overused and abused term in computer science, but
+ * since I've been unable to come up with anything better you're stuck with it
+ * again.
+ *
+ * What are closures?
+ *
+ * They embed a refcount. The basic idea is they count "things that are in
+ * progress" - in flight bios, some other thread that's doing something else -
+ * anything you might want to wait on.
+ *
+ * The refcount may be manipulated with closure_get() and closure_put().
+ * closure_put() is where many of the interesting things happen, when it causes
+ * the refcount to go to 0.
+ *
+ * Closures can be used to wait on things both synchronously and asynchronously,
+ * and synchronous and asynchronous use can be mixed without restriction. To
+ * wait synchronously, use closure_sync() - you will sleep until your closure's
+ * refcount hits 1.
+ *
+ * To wait asynchronously, use
+ *   continue_at(cl, next_function, workqueue);
+ *
+ * passing it, as you might expect, the function to run when nothing is pending
+ * and the workqueue to run that function out of.
+ *
+ * continue_at() also, critically, is a macro that returns the calling function.
+ * There's good reason for this.
+ *
+ * To use safely closures asynchronously, they must always have a refcount while
+ * they are running owned by the thread that is running them. Otherwise, suppose
+ * you submit some bios and wish to have a function run when they all complete:
+ *
+ * foo_endio(struct bio *bio, int error)
+ * {
+ *	closure_put(cl);
+ * }
+ *
+ * closure_init(cl);
+ *
+ * do_stuff();
+ * closure_get(cl);
+ * bio1->bi_endio = foo_endio;
+ * bio_submit(bio1);
+ *
+ * do_more_stuff();
+ * closure_get(cl);
+ * bio2->bi_endio = foo_endio;
+ * bio_submit(bio2);
+ *
+ * continue_at(cl, complete_some_read, system_wq);
+ *
+ * If closure's refcount started at 0, complete_some_read() could run before the
+ * second bio was submitted - which is almost always not what you want! More
+ * importantly, it wouldn't be possible to say whether the original thread or
+ * complete_some_read()'s thread owned the closure - and whatever state it was
+ * associated with!
+ *
+ * So, closure_init() initializes a closure's refcount to 1 - and when a
+ * closure_fn is run, the refcount will be reset to 1 first.
+ *
+ * Then, the rule is - if you got the refcount with closure_get(), release it
+ * with closure_put() (i.e, in a bio->bi_endio function). If you have a refcount
+ * on a closure because you called closure_init() or you were run out of a
+ * closure - _always_ use continue_at(). Doing so consistently will help
+ * eliminate an entire class of particularly pernicious races.
+ *
+ * For a closure to wait on an arbitrary event, we need to introduce waitlists:
+ *
+ * struct closure_waitlist list;
+ * closure_wait_event(list, cl, condition);
+ * closure_wake_up(wait_list);
+ *
+ * These work analagously to wait_event() and wake_up() - except that instead of
+ * operating on the current thread (for wait_event()) and lists of threads, they
+ * operate on an explicit closure and lists of closures.
+ *
+ * Because it's a closure we can now wait either synchronously or
+ * asynchronously. closure_wait_event() returns the current value of the
+ * condition, and if it returned false continue_at() or closure_sync() can be
+ * used to wait for it to become true.
+ *
+ * It's useful for waiting on things when you can't sleep in the context in
+ * which you must check the condition (perhaps a spinlock held, or you might be
+ * beneath generic_make_request() - in which case you can't sleep on IO).
+ *
+ * closure_wait_event() will wait either synchronously or asynchronously,
+ * depending on whether the closure is in blocking mode or not. You can pick a
+ * mode explicitly with closure_wait_event_sync() and
+ * closure_wait_event_async(), which do just what you might expect.
+ *
+ * Lastly, you might have a wait list dedicated to a specific event, and have no
+ * need for specifying the condition - you just want to wait until someone runs
+ * closure_wake_up() on the appropriate wait list. In that case, just use
+ * closure_wait(). It will return either true or false, depending on whether the
+ * closure was already on a wait list or not - a closure can only be on one wait
+ * list at a time.
+ *
+ * Parents:
+ *
+ * closure_init() takes two arguments - it takes the closure to initialize, and
+ * a (possibly null) parent.
+ *
+ * If parent is non null, the new closure will have a refcount for its lifetime;
+ * a closure is considered to be "finished" when its refcount hits 0 and the
+ * function to run is null. Hence
+ *
+ * continue_at(cl, NULL, NULL);
+ *
+ * returns up the (spaghetti) stack of closures, precisely like normal return
+ * returns up the C stack. continue_at() with non null fn is better thought of
+ * as doing a tail call.
+ *
+ * All this implies that a closure should typically be embedded in a particular
+ * struct (which its refcount will normally control the lifetime of), and that
+ * struct can very much be thought of as a stack frame.
+ *
+ * Locking:
+ *
+ * Closures are based on work items but they can be thought of as more like
+ * threads - in that like threads and unlike work items they have a well
+ * defined lifetime; they are created (with closure_init()) and eventually
+ * complete after a continue_at(cl, NULL, NULL).
+ *
+ * Suppose you've got some larger structure with a closure embedded in it that's
+ * used for periodically doing garbage collection. You only want one garbage
+ * collection happening at a time, so the natural thing to do is protect it with
+ * a lock. However, it's difficult to use a lock protecting a closure correctly
+ * because the unlock should come after the last continue_to() (additionally, if
+ * you're using the closure asynchronously a mutex won't work since a mutex has
+ * to be unlocked by the same process that locked it).
+ *
+ * So to make it less error prone and more efficient, we also have the ability
+ * to use closures as locks:
+ *
+ * closure_init_unlocked();
+ * closure_trylock();
+ *
+ * That's all we need for trylock() - the last closure_put() implicitly unlocks
+ * it for you.  But for closure_lock(), we also need a wait list:
+ *
+ * struct closure_with_waitlist frobnicator_cl;
+ *
+ * closure_init_unlocked(&frobnicator_cl);
+ * closure_lock(&frobnicator_cl);
+ *
+ * A closure_with_waitlist embeds a closure and a wait list - much like struct
+ * delayed_work embeds a work item and a timer_list. The important thing is, use
+ * it exactly like you would a regular closure and closure_put() will magically
+ * handle everything for you.
+ *
+ * We've got closures that embed timers, too. They're called, appropriately
+ * enough:
+ * struct closure_with_timer;
+ *
+ * This gives you access to closure_delay(). It takes a refcount for a specified
+ * number of jiffies - you could then call closure_sync() (for a slightly
+ * convoluted version of msleep()) or continue_at() - which gives you the same
+ * effect as using a delayed work item, except you can reuse the work_struct
+ * already embedded in struct closure.
+ *
+ * Lastly, there's struct closure_with_waitlist_and_timer. It does what you
+ * probably expect, if you happen to need the features of both. (You don't
+ * really want to know how all this is implemented, but if I've done my job
+ * right you shouldn't have to care).
+ */
+
+struct closure;
+typedef void (closure_fn) (struct closure *);
+
+struct closure_waitlist {
+	struct llist_head	list;
+};
+
+enum closure_type {
+	TYPE_closure				= 0,
+	TYPE_closure_with_waitlist		= 1,
+	TYPE_closure_with_timer			= 2,
+	TYPE_closure_with_waitlist_and_timer	= 3,
+	MAX_CLOSURE_TYPE			= 3,
+};
+
+enum closure_state {
+	/*
+	 * CLOSURE_BLOCKING: Causes closure_wait_event() to block, instead of
+	 * waiting asynchronously
+	 *
+	 * CLOSURE_WAITING: Set iff the closure is on a waitlist. Must be set by
+	 * the thread that owns the closure, and cleared by the thread that's
+	 * waking up the closure.
+	 *
+	 * CLOSURE_SLEEPING: Must be set before a thread uses a closure to sleep
+	 * - indicates that cl->task is valid and closure_put() may wake it up.
+	 * Only set or cleared by the thread that owns the closure.
+	 *
+	 * CLOSURE_TIMER: Analagous to CLOSURE_WAITING, indicates that a closure
+	 * has an outstanding timer. Must be set by the thread that owns the
+	 * closure, and cleared by the timer function when the timer goes off.
+	 *
+	 * The rest are for debugging and don't affect behaviour:
+	 *
+	 * CLOSURE_RUNNING: Set when a closure is running (i.e. by
+	 * closure_init() and when closure_put() runs then next function), and
+	 * must be cleared before remaining hits 0. Primarily to help guard
+	 * against incorrect usage and accidentally transferring references.
+	 * continue_at() and closure_return() clear it for you, if you're doing
+	 * something unusual you can use closure_set_dead() which also helps
+	 * annotate where references are being transferred.
+	 *
+	 * CLOSURE_STACK: Sanity check - remaining should never hit 0 on a
+	 * closure with this flag set
+	 */
+
+	CLOSURE_BITS_START	= (1 << 19),
+	CLOSURE_DESTRUCTOR	= (1 << 19),
+	CLOSURE_BLOCKING	= (1 << 21),
+	CLOSURE_WAITING		= (1 << 23),
+	CLOSURE_SLEEPING	= (1 << 25),
+	CLOSURE_TIMER		= (1 << 27),
+	CLOSURE_RUNNING		= (1 << 29),
+	CLOSURE_STACK		= (1 << 31),
+};
+
+#define CLOSURE_GUARD_MASK					\
+	((CLOSURE_DESTRUCTOR|CLOSURE_BLOCKING|CLOSURE_WAITING|	\
+	  CLOSURE_SLEEPING|CLOSURE_TIMER|CLOSURE_RUNNING|CLOSURE_STACK) << 1)
+
+#define CLOSURE_REMAINING_MASK		(CLOSURE_BITS_START - 1)
+#define CLOSURE_REMAINING_INITIALIZER	(1|CLOSURE_RUNNING)
+
+struct closure {
+	union {
+		struct {
+			struct workqueue_struct *wq;
+			struct task_struct	*task;
+			struct llist_node	list;
+			closure_fn		*fn;
+		};
+		struct work_struct	work;
+	};
+
+	struct closure		*parent;
+
+	atomic_t		remaining;
+
+	enum closure_type	type;
+
+#ifdef CONFIG_BCACHE_CLOSURES_DEBUG
+#define CLOSURE_MAGIC_DEAD	0xc054dead
+#define CLOSURE_MAGIC_ALIVE	0xc054a11e
+
+	unsigned		magic;
+	struct list_head	all;
+	unsigned long		ip;
+	unsigned long		waiting_on;
+#endif
+};
+
+struct closure_with_waitlist {
+	struct closure		cl;
+	struct closure_waitlist	wait;
+};
+
+struct closure_with_timer {
+	struct closure		cl;
+	struct timer_list	timer;
+};
+
+struct closure_with_waitlist_and_timer {
+	struct closure		cl;
+	struct closure_waitlist	wait;
+	struct timer_list	timer;
+};
+
+extern unsigned invalid_closure_type(void);
+
+#define __CLOSURE_TYPE(cl, _t)						\
+	  __builtin_types_compatible_p(typeof(cl), struct _t)		\
+		? TYPE_ ## _t :						\
+
+#define __closure_type(cl)						\
+(									\
+	__CLOSURE_TYPE(cl, closure)					\
+	__CLOSURE_TYPE(cl, closure_with_waitlist)			\
+	__CLOSURE_TYPE(cl, closure_with_timer)				\
+	__CLOSURE_TYPE(cl, closure_with_waitlist_and_timer)		\
+	invalid_closure_type()						\
+)
+
+void closure_sub(struct closure *cl, int v);
+void closure_put(struct closure *cl);
+void closure_queue(struct closure *cl);
+void __closure_wake_up(struct closure_waitlist *list);
+bool closure_wait(struct closure_waitlist *list, struct closure *cl);
+void closure_sync(struct closure *cl);
+
+bool closure_trylock(struct closure *cl, struct closure *parent);
+void __closure_lock(struct closure *cl, struct closure *parent,
+		    struct closure_waitlist *wait_list);
+
+void do_closure_timer_init(struct closure *cl);
+bool __closure_delay(struct closure *cl, unsigned long delay,
+		     struct timer_list *timer);
+void __closure_flush(struct closure *cl, struct timer_list *timer);
+void __closure_flush_sync(struct closure *cl, struct timer_list *timer);
+
+#ifdef CONFIG_BCACHE_CLOSURES_DEBUG
+
+void closure_debug_init(void);
+void closure_debug_create(struct closure *cl);
+void closure_debug_destroy(struct closure *cl);
+
+#else
+
+static inline void closure_debug_init(void) {}
+static inline void closure_debug_create(struct closure *cl) {}
+static inline void closure_debug_destroy(struct closure *cl) {}
+
+#endif
+
+static inline void closure_set_ip(struct closure *cl)
+{
+#ifdef CONFIG_BCACHE_CLOSURES_DEBUG
+	cl->ip = _THIS_IP_;
+#endif
+}
+
+static inline void closure_set_ret_ip(struct closure *cl)
+{
+#ifdef CONFIG_BCACHE_CLOSURES_DEBUG
+	cl->ip = _RET_IP_;
+#endif
+}
+
+static inline void closure_get(struct closure *cl)
+{
+#ifdef CONFIG_BCACHE_CLOSURES_DEBUG
+	BUG_ON((atomic_inc_return(&cl->remaining) &
+		CLOSURE_REMAINING_MASK) <= 1);
+#else
+	atomic_inc(&cl->remaining);
+#endif
+}
+
+static inline void closure_set_stopped(struct closure *cl)
+{
+	atomic_sub(CLOSURE_RUNNING, &cl->remaining);
+}
+
+static inline bool closure_is_stopped(struct closure *cl)
+{
+	return !(atomic_read(&cl->remaining) & CLOSURE_RUNNING);
+}
+
+static inline bool closure_is_unlocked(struct closure *cl)
+{
+	return atomic_read(&cl->remaining) == -1;
+}
+
+static inline void do_closure_init(struct closure *cl, struct closure *parent,
+				   bool running)
+{
+	switch (cl->type) {
+	case TYPE_closure_with_timer:
+	case TYPE_closure_with_waitlist_and_timer:
+		do_closure_timer_init(cl);
+	default:
+		break;
+	}
+
+	cl->parent = parent;
+	if (parent)
+		closure_get(parent);
+
+	if (running) {
+		closure_debug_create(cl);
+		atomic_set(&cl->remaining, CLOSURE_REMAINING_INITIALIZER);
+	} else
+		atomic_set(&cl->remaining, -1);
+
+	closure_set_ip(cl);
+}
+
+/*
+ * Hack to get at the embedded closure if there is one, by doing an unsafe cast:
+ * the result of __closure_type() is thrown away, it's used merely for type
+ * checking.
+ */
+#define __to_internal_closure(cl)				\
+({								\
+	BUILD_BUG_ON(__closure_type(*cl) > MAX_CLOSURE_TYPE);	\
+	(struct closure *) cl;					\
+})
+
+#define closure_init_type(cl, parent, running)			\
+do {								\
+	struct closure *_cl = __to_internal_closure(cl);	\
+	_cl->type = __closure_type(*(cl));			\
+	do_closure_init(_cl, parent, running);			\
+} while (0)
+
+/**
+ * __closure_init() - Initialize a closure, skipping the memset()
+ *
+ * May be used instead of closure_init() when memory has already been zeroed.
+ */
+#define __closure_init(cl, parent)				\
+	closure_init_type(cl, parent, true)
+
+/**
+ * closure_init() - Initialize a closure, setting the refcount to 1
+ * @cl:		closure to initialize
+ * @parent:	parent of the new closure. cl will take a refcount on it for its
+ *		lifetime; may be NULL.
+ */
+#define closure_init(cl, parent)				\
+do {								\
+	memset((cl), 0, sizeof(*(cl)));				\
+	__closure_init(cl, parent);				\
+} while (0)
+
+static inline void closure_init_stack(struct closure *cl)
+{
+	memset(cl, 0, sizeof(struct closure));
+	atomic_set(&cl->remaining, CLOSURE_REMAINING_INITIALIZER|
+		   CLOSURE_BLOCKING|CLOSURE_STACK);
+}
+
+/**
+ * closure_init_unlocked() - Initialize a closure but leave it unlocked.
+ * @cl:		closure to initialize
+ *
+ * For when the closure will be used as a lock. The closure may not be used
+ * until after a closure_lock() or closure_trylock().
+ */
+#define closure_init_unlocked(cl)				\
+do {								\
+	memset((cl), 0, sizeof(*(cl)));				\
+	closure_init_type(cl, NULL, false);			\
+} while (0)
+
+/**
+ * closure_lock() - lock and initialize a closure.
+ * @cl:		the closure to lock
+ * @parent:	the new parent for this closure
+ *
+ * The closure must be of one of the types that has a waitlist (otherwise we
+ * wouldn't be able to sleep on contention).
+ *
+ * @parent has exactly the same meaning as in closure_init(); if non null, the
+ * closure will take a reference on @parent which will be released when it is
+ * unlocked.
+ */
+#define closure_lock(cl, parent)				\
+	__closure_lock(__to_internal_closure(cl), parent, &(cl)->wait)
+
+/**
+ * closure_delay() - delay some number of jiffies
+ * @cl:		the closure that will sleep
+ * @delay:	the delay in jiffies
+ *
+ * Takes a refcount on @cl which will be released after @delay jiffies; this may
+ * be used to have a function run after a delay with continue_at(), or
+ * closure_sync() may be used for a convoluted version of msleep().
+ */
+#define closure_delay(cl, delay)			\
+	__closure_delay(__to_internal_closure(cl), delay, &(cl)->timer)
+
+#define closure_flush(cl)				\
+	__closure_flush(__to_internal_closure(cl), &(cl)->timer)
+
+#define closure_flush_sync(cl)				\
+	__closure_flush_sync(__to_internal_closure(cl), &(cl)->timer)
+
+static inline void __closure_end_sleep(struct closure *cl)
+{
+	__set_current_state(TASK_RUNNING);
+
+	if (atomic_read(&cl->remaining) & CLOSURE_SLEEPING)
+		atomic_sub(CLOSURE_SLEEPING, &cl->remaining);
+}
+
+static inline void __closure_start_sleep(struct closure *cl)
+{
+	closure_set_ip(cl);
+	cl->task = current;
+	set_current_state(TASK_UNINTERRUPTIBLE);
+
+	if (!(atomic_read(&cl->remaining) & CLOSURE_SLEEPING))
+		atomic_add(CLOSURE_SLEEPING, &cl->remaining);
+}
+
+/**
+ * closure_blocking() - returns true if the closure is in blocking mode.
+ *
+ * If a closure is in blocking mode, closure_wait_event() will sleep until the
+ * condition is true instead of waiting asynchronously.
+ */
+static inline bool closure_blocking(struct closure *cl)
+{
+	return atomic_read(&cl->remaining) & CLOSURE_BLOCKING;
+}
+
+/**
+ * set_closure_blocking() - put a closure in blocking mode.
+ *
+ * If a closure is in blocking mode, closure_wait_event() will sleep until the
+ * condition is true instead of waiting asynchronously.
+ *
+ * Not thread safe - can only be called by the thread running the closure.
+ */
+static inline void set_closure_blocking(struct closure *cl)
+{
+	if (!closure_blocking(cl))
+		atomic_add(CLOSURE_BLOCKING, &cl->remaining);
+}
+
+/*
+ * Not thread safe - can only be called by the thread running the closure.
+ */
+static inline void clear_closure_blocking(struct closure *cl)
+{
+	if (closure_blocking(cl))
+		atomic_sub(CLOSURE_BLOCKING, &cl->remaining);
+}
+
+/**
+ * closure_wake_up() - wake up all closures on a wait list.
+ */
+static inline void closure_wake_up(struct closure_waitlist *list)
+{
+	smp_mb();
+	__closure_wake_up(list);
+}
+
+/*
+ * Wait on an event, synchronously or asynchronously - analogous to wait_event()
+ * but for closures.
+ *
+ * The loop is oddly structured so as to avoid a race; we must check the
+ * condition again after we've added ourself to the waitlist. We know if we were
+ * already on the waitlist because closure_wait() returns false; thus, we only
+ * schedule or break if closure_wait() returns false. If it returns true, we
+ * just loop again - rechecking the condition.
+ *
+ * The __closure_wake_up() is necessary because we may race with the event
+ * becoming true; i.e. we see event false -> wait -> recheck condition, but the
+ * thread that made the event true may have called closure_wake_up() before we
+ * added ourself to the wait list.
+ *
+ * We have to call closure_sync() at the end instead of just
+ * __closure_end_sleep() because a different thread might've called
+ * closure_wake_up() before us and gotten preempted before they dropped the
+ * refcount on our closure. If this was a stack allocated closure, that would be
+ * bad.
+ */
+#define __closure_wait_event(list, cl, condition, _block)		\
+({									\
+	bool block = _block;						\
+	typeof(condition) ret;						\
+									\
+	while (1) {							\
+		ret = (condition);					\
+		if (ret) {						\
+			__closure_wake_up(list);			\
+			if (block)					\
+				closure_sync(cl);			\
+									\
+			break;						\
+		}							\
+									\
+		if (block)						\
+			__closure_start_sleep(cl);			\
+									\
+		if (!closure_wait(list, cl)) {				\
+			if (!block)					\
+				break;					\
+									\
+			schedule();					\
+		}							\
+	}								\
+									\
+	ret;								\
+})
+
+/**
+ * closure_wait_event() - wait on a condition, synchronously or asynchronously.
+ * @list:	the wait list to wait on
+ * @cl:		the closure that is doing the waiting
+ * @condition:	a C expression for the event to wait for
+ *
+ * If the closure is in blocking mode, sleeps until the @condition evaluates to
+ * true - exactly like wait_event().
+ *
+ * If the closure is not in blocking mode, waits asynchronously; if the
+ * condition is currently false the @cl is put onto @list and returns. @list
+ * owns a refcount on @cl; closure_sync() or continue_at() may be used later to
+ * wait for another thread to wake up @list, which drops the refcount on @cl.
+ *
+ * Returns the value of @condition; @cl will be on @list iff @condition was
+ * false.
+ *
+ * closure_wake_up(@list) must be called after changing any variable that could
+ * cause @condition to become true.
+ */
+#define closure_wait_event(list, cl, condition)				\
+	__closure_wait_event(list, cl, condition, closure_blocking(cl))
+
+#define closure_wait_event_async(list, cl, condition)			\
+	__closure_wait_event(list, cl, condition, false)
+
+#define closure_wait_event_sync(list, cl, condition)			\
+	__closure_wait_event(list, cl, condition, true)
+
+static inline void set_closure_fn(struct closure *cl, closure_fn *fn,
+				  struct workqueue_struct *wq)
+{
+	BUG_ON(object_is_on_stack(cl));
+	closure_set_ip(cl);
+	cl->fn = fn;
+	cl->wq = wq;
+	/* between atomic_dec() in closure_put() */
+	smp_mb__before_atomic_dec();
+}
+
+#define continue_at(_cl, _fn, _wq)					\
+do {									\
+	set_closure_fn(_cl, _fn, _wq);					\
+	closure_sub(_cl, CLOSURE_RUNNING + 1);				\
+	return;								\
+} while (0)
+
+#define closure_return(_cl)	continue_at((_cl), NULL, NULL)
+
+#define continue_at_nobarrier(_cl, _fn, _wq)				\
+do {									\
+	set_closure_fn(_cl, _fn, _wq);					\
+	closure_queue(cl);						\
+	return;								\
+} while (0)
+
+#define closure_return_with_destructor(_cl, _destructor)		\
+do {									\
+	set_closure_fn(_cl, _destructor, NULL);				\
+	closure_sub(_cl, CLOSURE_RUNNING - CLOSURE_DESTRUCTOR + 1);	\
+	return;								\
+} while (0)
+
+static inline void closure_call(struct closure *cl, closure_fn fn,
+				struct workqueue_struct *wq,
+				struct closure *parent)
+{
+	closure_init(cl, parent);
+	continue_at_nobarrier(cl, fn, wq);
+}
+
+static inline void closure_trylock_call(struct closure *cl, closure_fn fn,
+					struct workqueue_struct *wq,
+					struct closure *parent)
+{
+	if (closure_trylock(cl, parent))
+		continue_at_nobarrier(cl, fn, wq);
+}
+
+#endif /* _LINUX_CLOSURE_H */
diff --git a/drivers/md/bcache/debug.c b/drivers/md/bcache/debug.c
new file mode 100644
index 0000000..89fd520
--- /dev/null
+++ b/drivers/md/bcache/debug.c
@@ -0,0 +1,565 @@
+/*
+ * Assorted bcache debug code
+ *
+ * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
+ * Copyright 2012 Google, Inc.
+ */
+
+#include "bcache.h"
+#include "btree.h"
+#include "debug.h"
+#include "request.h"
+
+#include <linux/console.h>
+#include <linux/debugfs.h>
+#include <linux/module.h>
+#include <linux/random.h>
+#include <linux/seq_file.h>
+
+static struct dentry *debug;
+
+const char *bch_ptr_status(struct cache_set *c, const struct bkey *k)
+{
+	unsigned i;
+
+	for (i = 0; i < KEY_PTRS(k); i++)
+		if (ptr_available(c, k, i)) {
+			struct cache *ca = PTR_CACHE(c, k, i);
+			size_t bucket = PTR_BUCKET_NR(c, k, i);
+			size_t r = bucket_remainder(c, PTR_OFFSET(k, i));
+
+			if (KEY_SIZE(k) + r > c->sb.bucket_size)
+				return "bad, length too big";
+			if (bucket <  ca->sb.first_bucket)
+				return "bad, short offset";
+			if (bucket >= ca->sb.nbuckets)
+				return "bad, offset past end of device";
+			if (ptr_stale(c, k, i))
+				return "stale";
+		}
+
+	if (!bkey_cmp(k, &ZERO_KEY))
+		return "bad, null key";
+	if (!KEY_PTRS(k))
+		return "bad, no pointers";
+	if (!KEY_SIZE(k))
+		return "zeroed key";
+	return "";
+}
+
+struct keyprint_hack bch_pkey(const struct bkey *k)
+{
+	unsigned i = 0;
+	struct keyprint_hack r;
+	char *out = r.s, *end = r.s + KEYHACK_SIZE;
+
+#define p(...)	(out += scnprintf(out, end - out, __VA_ARGS__))
+
+	p("%llu:%llu len %llu -> [", KEY_INODE(k), KEY_OFFSET(k), KEY_SIZE(k));
+
+	if (KEY_PTRS(k))
+		while (1) {
+			p("%llu:%llu gen %llu",
+			  PTR_DEV(k, i), PTR_OFFSET(k, i), PTR_GEN(k, i));
+
+			if (++i == KEY_PTRS(k))
+				break;
+
+			p(", ");
+		}
+
+	p("]");
+
+	if (KEY_DIRTY(k))
+		p(" dirty");
+	if (KEY_CSUM(k))
+		p(" cs%llu %llx", KEY_CSUM(k), k->ptr[1]);
+#undef p
+	return r;
+}
+
+struct keyprint_hack bch_pbtree(const struct btree *b)
+{
+	struct keyprint_hack r;
+
+	snprintf(r.s, 40, "%zu level %i/%i", PTR_BUCKET_NR(b->c, &b->key, 0),
+		 b->level, b->c->root ? b->c->root->level : -1);
+	return r;
+}
+
+#if defined(CONFIG_BCACHE_DEBUG) || defined(CONFIG_BCACHE_EDEBUG)
+
+static bool skipped_backwards(struct btree *b, struct bkey *k)
+{
+	return bkey_cmp(k, (!b->level)
+			? &START_KEY(bkey_next(k))
+			: bkey_next(k)) > 0;
+}
+
+static void dump_bset(struct btree *b, struct bset *i)
+{
+	struct bkey *k;
+	unsigned j;
+
+	for (k = i->start; k < end(i); k = bkey_next(k)) {
+		printk(KERN_ERR "block %zu key %zi/%u: %s", index(i, b),
+		       (uint64_t *) k - i->d, i->keys, pkey(k));
+
+		for (j = 0; j < KEY_PTRS(k); j++) {
+			size_t n = PTR_BUCKET_NR(b->c, k, j);
+			printk(" bucket %zu", n);
+
+			if (n >= b->c->sb.first_bucket && n < b->c->sb.nbuckets)
+				printk(" prio %i",
+				       PTR_BUCKET(b->c, k, j)->prio);
+		}
+
+		printk(" %s\n", bch_ptr_status(b->c, k));
+
+		if (bkey_next(k) < end(i) &&
+		    skipped_backwards(b, k))
+			printk(KERN_ERR "Key skipped backwards\n");
+	}
+}
+
+#endif
+
+#ifdef CONFIG_BCACHE_DEBUG
+
+void bch_btree_verify(struct btree *b, struct bset *new)
+{
+	struct btree *v = b->c->verify_data;
+	struct closure cl;
+	closure_init_stack(&cl);
+
+	if (!b->c->verify)
+		return;
+
+	closure_wait_event(&b->io.wait, &cl,
+			   atomic_read(&b->io.cl.remaining) == -1);
+
+	mutex_lock(&b->c->verify_lock);
+
+	bkey_copy(&v->key, &b->key);
+	v->written = 0;
+	v->level = b->level;
+
+	bch_btree_read(v);
+	closure_wait_event(&v->io.wait, &cl,
+			   atomic_read(&b->io.cl.remaining) == -1);
+
+	if (new->keys != v->sets[0].data->keys ||
+	    memcmp(new->start,
+		   v->sets[0].data->start,
+		   (void *) end(new) - (void *) new->start)) {
+		unsigned i, j;
+
+		console_lock();
+
+		printk(KERN_ERR "*** original memory node:\n");
+		for (i = 0; i <= b->nsets; i++)
+			dump_bset(b, b->sets[i].data);
+
+		printk(KERN_ERR "*** sorted memory node:\n");
+		dump_bset(b, new);
+
+		printk(KERN_ERR "*** on disk node:\n");
+		dump_bset(v, v->sets[0].data);
+
+		for (j = 0; j < new->keys; j++)
+			if (new->d[j] != v->sets[0].data->d[j])
+				break;
+
+		console_unlock();
+		panic("verify failed at %u\n", j);
+	}
+
+	mutex_unlock(&b->c->verify_lock);
+}
+
+static void data_verify_endio(struct bio *bio, int error)
+{
+	struct closure *cl = bio->bi_private;
+	closure_put(cl);
+}
+
+void bch_data_verify(struct search *s)
+{
+	char name[BDEVNAME_SIZE];
+	struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
+	struct closure *cl = &s->cl;
+	struct bio *check;
+	struct bio_vec *bv;
+	int i;
+
+	if (!s->unaligned_bvec)
+		bio_for_each_segment(bv, s->orig_bio, i)
+			bv->bv_offset = 0, bv->bv_len = PAGE_SIZE;
+
+	check = bio_clone(s->orig_bio, GFP_NOIO);
+	if (!check)
+		return;
+
+	if (bch_bio_alloc_pages(check, GFP_NOIO))
+		goto out_put;
+
+	check->bi_rw		= READ_SYNC;
+	check->bi_private	= cl;
+	check->bi_end_io	= data_verify_endio;
+
+	closure_bio_submit(check, cl, &dc->disk);
+	closure_sync(cl);
+
+	bio_for_each_segment(bv, s->orig_bio, i) {
+		void *p1 = kmap(bv->bv_page);
+		void *p2 = kmap(check->bi_io_vec[i].bv_page);
+
+		if (memcmp(p1 + bv->bv_offset,
+			   p2 + bv->bv_offset,
+			   bv->bv_len))
+			printk(KERN_ERR
+			       "bcache (%s): verify failed at sector %llu\n",
+			       bdevname(dc->bdev, name),
+			       (uint64_t) s->orig_bio->bi_sector);
+
+		kunmap(bv->bv_page);
+		kunmap(check->bi_io_vec[i].bv_page);
+	}
+
+	__bio_for_each_segment(bv, check, i, 0)
+		__free_page(bv->bv_page);
+out_put:
+	bio_put(check);
+}
+
+#endif
+
+#ifdef CONFIG_BCACHE_EDEBUG
+
+unsigned bch_count_data(struct btree *b)
+{
+	unsigned ret = 0;
+	struct btree_iter iter;
+	struct bkey *k;
+
+	if (!b->level)
+		for_each_key(b, k, &iter)
+			ret += KEY_SIZE(k);
+	return ret;
+}
+
+static void vdump_bucket_and_panic(struct btree *b, const char *fmt,
+				   va_list args)
+{
+	unsigned i;
+
+	console_lock();
+
+	for (i = 0; i <= b->nsets; i++)
+		dump_bset(b, b->sets[i].data);
+
+	vprintk(fmt, args);
+
+	console_unlock();
+
+	panic("at %s\n", pbtree(b));
+}
+
+void bch_check_key_order_msg(struct btree *b, struct bset *i,
+			     const char *fmt, ...)
+{
+	struct bkey *k;
+
+	if (!i->keys)
+		return;
+
+	for (k = i->start; bkey_next(k) < end(i); k = bkey_next(k))
+		if (skipped_backwards(b, k)) {
+			va_list args;
+			va_start(args, fmt);
+
+			vdump_bucket_and_panic(b, fmt, args);
+			va_end(args);
+		}
+}
+
+void bch_check_keys(struct btree *b, const char *fmt, ...)
+{
+	va_list args;
+	struct bkey *k, *p = NULL;
+	struct btree_iter iter;
+
+	if (b->level)
+		return;
+
+	for_each_key(b, k, &iter) {
+		if (p && bkey_cmp(&START_KEY(p), &START_KEY(k)) > 0) {
+			printk(KERN_ERR "Keys out of order:\n");
+			goto bug;
+		}
+
+		if (bch_ptr_invalid(b, k))
+			continue;
+
+		if (p && bkey_cmp(p, &START_KEY(k)) > 0) {
+			printk(KERN_ERR "Overlapping keys:\n");
+			goto bug;
+		}
+		p = k;
+	}
+	return;
+bug:
+	va_start(args, fmt);
+	vdump_bucket_and_panic(b, fmt, args);
+	va_end(args);
+}
+
+#endif
+
+#ifdef CONFIG_DEBUG_FS
+
+/* XXX: cache set refcounting */
+
+struct dump_iterator {
+	char			buf[PAGE_SIZE];
+	size_t			bytes;
+	struct cache_set	*c;
+	struct keybuf		keys;
+};
+
+static bool dump_pred(struct keybuf *buf, struct bkey *k)
+{
+	return true;
+}
+
+static ssize_t bch_dump_read(struct file *file, char __user *buf,
+			     size_t size, loff_t *ppos)
+{
+	struct dump_iterator *i = file->private_data;
+	ssize_t ret = 0;
+
+	while (size) {
+		struct keybuf_key *w;
+		unsigned bytes = min(i->bytes, size);
+
+		int err = copy_to_user(buf, i->buf, bytes);
+		if (err)
+			return err;
+
+		ret	 += bytes;
+		buf	 += bytes;
+		size	 -= bytes;
+		i->bytes -= bytes;
+		memmove(i->buf, i->buf + bytes, i->bytes);
+
+		if (i->bytes)
+			break;
+
+		w = bch_keybuf_next_rescan(i->c, &i->keys, &MAX_KEY);
+		if (!w)
+			break;
+
+		i->bytes = snprintf(i->buf, PAGE_SIZE, "%s\n", pkey(&w->key));
+		bch_keybuf_del(&i->keys, w);
+	}
+
+	return ret;
+}
+
+static int bch_dump_open(struct inode *inode, struct file *file)
+{
+	struct cache_set *c = inode->i_private;
+	struct dump_iterator *i;
+
+	i = kzalloc(sizeof(struct dump_iterator), GFP_KERNEL);
+	if (!i)
+		return -ENOMEM;
+
+	file->private_data = i;
+	i->c = c;
+	bch_keybuf_init(&i->keys, dump_pred);
+	i->keys.last_scanned = KEY(0, 0, 0);
+
+	return 0;
+}
+
+static int bch_dump_release(struct inode *inode, struct file *file)
+{
+	kfree(file->private_data);
+	return 0;
+}
+
+static const struct file_operations cache_set_debug_ops = {
+	.owner		= THIS_MODULE,
+	.open		= bch_dump_open,
+	.read		= bch_dump_read,
+	.release	= bch_dump_release
+};
+
+void bch_debug_init_cache_set(struct cache_set *c)
+{
+	if (!IS_ERR_OR_NULL(debug)) {
+		char name[50];
+		snprintf(name, 50, "bcache-%pU", c->sb.set_uuid);
+
+		c->debug = debugfs_create_file(name, 0400, debug, c,
+					       &cache_set_debug_ops);
+	}
+}
+
+#endif
+
+/* Fuzz tester has rotted: */
+#if 0
+
+static ssize_t btree_fuzz(struct kobject *k, struct kobj_attribute *a,
+			  const char *buffer, size_t size)
+{
+	void dump(struct btree *b)
+	{
+		struct bset *i;
+
+		for (i = b->sets[0].data;
+		     index(i, b) < btree_blocks(b) &&
+		     i->seq == b->sets[0].data->seq;
+		     i = ((void *) i) + set_blocks(i, b->c) * block_bytes(b->c))
+			dump_bset(b, i);
+	}
+
+	struct cache_sb *sb;
+	struct cache_set *c;
+	struct btree *all[3], *b, *fill, *orig;
+	int j;
+
+	struct btree_op op;
+	bch_btree_op_init_stack(&op);
+
+	sb = kzalloc(sizeof(struct cache_sb), GFP_KERNEL);
+	if (!sb)
+		return -ENOMEM;
+
+	sb->bucket_size = 128;
+	sb->block_size = 4;
+
+	c = bch_cache_set_alloc(sb);
+	if (!c)
+		return -ENOMEM;
+
+	for (j = 0; j < 3; j++) {
+		BUG_ON(list_empty(&c->btree_cache));
+		all[j] = list_first_entry(&c->btree_cache, struct btree, list);
+		list_del_init(&all[j]->list);
+
+		all[j]->key = KEY(0, 0, c->sb.bucket_size);
+		bkey_copy_key(&all[j]->key, &MAX_KEY);
+	}
+
+	b = all[0];
+	fill = all[1];
+	orig = all[2];
+
+	while (1) {
+		for (j = 0; j < 3; j++)
+			all[j]->written = all[j]->nsets = 0;
+
+		bch_bset_init_next(b);
+
+		while (1) {
+			struct bset *i = write_block(b);
+			struct bkey *k = op.keys.top;
+			unsigned rand;
+
+			bkey_init(k);
+			rand = get_random_int();
+
+			op.type = rand & 1
+				? BTREE_INSERT
+				: BTREE_REPLACE;
+			rand >>= 1;
+
+			SET_KEY_SIZE(k, bucket_remainder(c, rand));
+			rand >>= c->bucket_bits;
+			rand &= 1024 * 512 - 1;
+			rand += c->sb.bucket_size;
+			SET_KEY_OFFSET(k, rand);
+#if 0
+			SET_KEY_PTRS(k, 1);
+#endif
+			bch_keylist_push(&op.keys);
+			bch_btree_insert_keys(b, &op);
+
+			if (should_split(b) ||
+			    set_blocks(i, b->c) !=
+			    __set_blocks(i, i->keys + 15, b->c)) {
+				i->csum = csum_set(i);
+
+				memcpy(write_block(fill),
+				       i, set_bytes(i));
+
+				b->written += set_blocks(i, b->c);
+				fill->written = b->written;
+				if (b->written == btree_blocks(b))
+					break;
+
+				bch_btree_sort_lazy(b);
+				bch_bset_init_next(b);
+			}
+		}
+
+		memcpy(orig->sets[0].data,
+		       fill->sets[0].data,
+		       btree_bytes(c));
+
+		bch_btree_sort(b);
+		fill->written = 0;
+		bch_btree_read_done(&fill->io.cl);
+
+		if (b->sets[0].data->keys != fill->sets[0].data->keys ||
+		    memcmp(b->sets[0].data->start,
+			   fill->sets[0].data->start,
+			   b->sets[0].data->keys * sizeof(uint64_t))) {
+			struct bset *i = b->sets[0].data;
+			struct bkey *k, *l;
+
+			for (k = i->start,
+			     l = fill->sets[0].data->start;
+			     k < end(i);
+			     k = bkey_next(k), l = bkey_next(l))
+				if (bkey_cmp(k, l) ||
+				    KEY_SIZE(k) != KEY_SIZE(l))
+					pr_err("key %zi differs: %s != %s",
+					       (uint64_t *) k - i->d,
+					       pkey(k), pkey(l));
+
+			for (j = 0; j < 3; j++) {
+				pr_err("**** Set %i ****", j);
+				dump(all[j]);
+			}
+			panic("\n");
+		}
+
+		pr_info("fuzz complete: %i keys", b->sets[0].data->keys);
+	}
+}
+
+kobj_attribute_write(fuzz, btree_fuzz);
+#endif
+
+void bch_debug_exit(void)
+{
+	if (!IS_ERR_OR_NULL(debug))
+		debugfs_remove_recursive(debug);
+}
+
+int __init bch_debug_init(struct kobject *kobj)
+{
+	int ret = 0;
+#if 0
+	ret = sysfs_create_file(kobj, &ksysfs_fuzz.attr);
+	if (ret)
+		return ret;
+#endif
+
+	debug = debugfs_create_dir("bcache", NULL);
+	return ret;
+}
diff --git a/drivers/md/bcache/debug.h b/drivers/md/bcache/debug.h
new file mode 100644
index 0000000..f9378a2
--- /dev/null
+++ b/drivers/md/bcache/debug.h
@@ -0,0 +1,54 @@
+#ifndef _BCACHE_DEBUG_H
+#define _BCACHE_DEBUG_H
+
+/* Btree/bkey debug printing */
+
+#define KEYHACK_SIZE 80
+struct keyprint_hack {
+	char s[KEYHACK_SIZE];
+};
+
+struct keyprint_hack bch_pkey(const struct bkey *k);
+struct keyprint_hack bch_pbtree(const struct btree *b);
+#define pkey(k)		(&bch_pkey(k).s[0])
+#define pbtree(b)	(&bch_pbtree(b).s[0])
+
+#ifdef CONFIG_BCACHE_EDEBUG
+
+unsigned bch_count_data(struct btree *);
+void bch_check_key_order_msg(struct btree *, struct bset *, const char *, ...);
+void bch_check_keys(struct btree *, const char *, ...);
+
+#define bch_check_key_order(b, i)			\
+	bch_check_key_order_msg(b, i, "keys out of order")
+#define EBUG_ON(cond)		BUG_ON(cond)
+
+#else /* EDEBUG */
+
+#define bch_count_data(b)				0
+#define bch_check_key_order(b, i)			do {} while (0)
+#define bch_check_key_order_msg(b, i, ...)		do {} while (0)
+#define bch_check_keys(b, ...)				do {} while (0)
+#define EBUG_ON(cond)					do {} while (0)
+
+#endif
+
+#ifdef CONFIG_BCACHE_DEBUG
+
+void bch_btree_verify(struct btree *, struct bset *);
+void bch_data_verify(struct search *);
+
+#else /* DEBUG */
+
+static inline void bch_btree_verify(struct btree *b, struct bset *i) {}
+static inline void bch_data_verify(struct search *s) {};
+
+#endif
+
+#ifdef CONFIG_DEBUG_FS
+void bch_debug_init_cache_set(struct cache_set *);
+#else
+static inline void bch_debug_init_cache_set(struct cache_set *c) {}
+#endif
+
+#endif
diff --git a/drivers/md/bcache/io.c b/drivers/md/bcache/io.c
new file mode 100644
index 0000000..48efd4d
--- /dev/null
+++ b/drivers/md/bcache/io.c
@@ -0,0 +1,397 @@
+/*
+ * Some low level IO code, and hacks for various block layer limitations
+ *
+ * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
+ * Copyright 2012 Google, Inc.
+ */
+
+#include "bcache.h"
+#include "bset.h"
+#include "debug.h"
+
+static void bch_bi_idx_hack_endio(struct bio *bio, int error)
+{
+	struct bio *p = bio->bi_private;
+
+	bio_endio(p, error);
+	bio_put(bio);
+}
+
+static void bch_generic_make_request_hack(struct bio *bio)
+{
+	if (bio->bi_idx) {
+		struct bio *clone = bio_alloc(GFP_NOIO, bio_segments(bio));
+
+		memcpy(clone->bi_io_vec,
+		       bio_iovec(bio),
+		       bio_segments(bio) * sizeof(struct bio_vec));
+
+		clone->bi_sector	= bio->bi_sector;
+		clone->bi_bdev		= bio->bi_bdev;
+		clone->bi_rw		= bio->bi_rw;
+		clone->bi_vcnt		= bio_segments(bio);
+		clone->bi_size		= bio->bi_size;
+
+		clone->bi_private	= bio;
+		clone->bi_end_io	= bch_bi_idx_hack_endio;
+
+		bio = clone;
+	}
+
+	/*
+	 * Hack, since drivers that clone bios clone up to bi_max_vecs, but our
+	 * bios might have had more than that (before we split them per device
+	 * limitations).
+	 *
+	 * To be taken out once immutable bvec stuff is in.
+	 */
+	bio->bi_max_vecs = bio->bi_vcnt;
+
+	generic_make_request(bio);
+}
+
+/**
+ * bch_bio_split - split a bio
+ * @bio:	bio to split
+ * @sectors:	number of sectors to split from the front of @bio
+ * @gfp:	gfp mask
+ * @bs:		bio set to allocate from
+ *
+ * Allocates and returns a new bio which represents @sectors from the start of
+ * @bio, and updates @bio to represent the remaining sectors.
+ *
+ * If bio_sectors(@bio) was less than or equal to @sectors, returns @bio
+ * unchanged.
+ *
+ * The newly allocated bio will point to @bio's bi_io_vec, if the split was on a
+ * bvec boundry; it is the caller's responsibility to ensure that @bio is not
+ * freed before the split.
+ *
+ * If bch_bio_split() is running under generic_make_request(), it's not safe to
+ * allocate more than one bio from the same bio set. Therefore, if it is running
+ * under generic_make_request() it masks out __GFP_WAIT when doing the
+ * allocation. The caller must check for failure if there's any possibility of
+ * it being called from under generic_make_request(); it is then the caller's
+ * responsibility to retry from a safe context (by e.g. punting to workqueue).
+ */
+struct bio *bch_bio_split(struct bio *bio, int sectors,
+			  gfp_t gfp, struct bio_set *bs)
+{
+	unsigned idx = bio->bi_idx, vcnt = 0, nbytes = sectors << 9;
+	struct bio_vec *bv;
+	struct bio *ret = NULL;
+
+	BUG_ON(sectors <= 0);
+
+	/*
+	 * If we're being called from underneath generic_make_request() and we
+	 * already allocated any bios from this bio set, we risk deadlock if we
+	 * use the mempool. So instead, we possibly fail and let the caller punt
+	 * to workqueue or somesuch and retry in a safe context.
+	 */
+	if (current->bio_list)
+		gfp &= ~__GFP_WAIT;
+
+	if (sectors >= bio_sectors(bio))
+		return bio;
+
+	if (bio->bi_rw & REQ_DISCARD) {
+		ret = bio_alloc_bioset(gfp, 1, bs);
+		idx = 0;
+		goto out;
+	}
+
+	bio_for_each_segment(bv, bio, idx) {
+		vcnt = idx - bio->bi_idx;
+
+		if (!nbytes) {
+			ret = bio_alloc_bioset(gfp, vcnt, bs);
+			if (!ret)
+				return NULL;
+
+			memcpy(ret->bi_io_vec, bio_iovec(bio),
+			       sizeof(struct bio_vec) * vcnt);
+
+			break;
+		} else if (nbytes < bv->bv_len) {
+			ret = bio_alloc_bioset(gfp, ++vcnt, bs);
+			if (!ret)
+				return NULL;
+
+			memcpy(ret->bi_io_vec, bio_iovec(bio),
+			       sizeof(struct bio_vec) * vcnt);
+
+			ret->bi_io_vec[vcnt - 1].bv_len = nbytes;
+			bv->bv_offset	+= nbytes;
+			bv->bv_len	-= nbytes;
+			break;
+		}
+
+		nbytes -= bv->bv_len;
+	}
+out:
+	ret->bi_bdev	= bio->bi_bdev;
+	ret->bi_sector	= bio->bi_sector;
+	ret->bi_size	= sectors << 9;
+	ret->bi_rw	= bio->bi_rw;
+	ret->bi_vcnt	= vcnt;
+	ret->bi_max_vecs = vcnt;
+
+	bio->bi_sector	+= sectors;
+	bio->bi_size	-= sectors << 9;
+	bio->bi_idx	 = idx;
+
+	if (bio_integrity(bio)) {
+		if (bio_integrity_clone(ret, bio, gfp)) {
+			bio_put(ret);
+			return NULL;
+		}
+
+		bio_integrity_trim(ret, 0, bio_sectors(ret));
+		bio_integrity_trim(bio, bio_sectors(ret), bio_sectors(bio));
+	}
+
+	return ret;
+}
+
+static unsigned bch_bio_max_sectors(struct bio *bio)
+{
+	unsigned ret = bio_sectors(bio);
+	struct request_queue *q = bdev_get_queue(bio->bi_bdev);
+	unsigned max_segments = min_t(unsigned, BIO_MAX_PAGES,
+				      queue_max_segments(q));
+	struct bio_vec *bv, *end = bio_iovec(bio) +
+		min_t(int, bio_segments(bio), max_segments);
+
+	if (bio->bi_rw & REQ_DISCARD)
+		return min(ret, q->limits.max_discard_sectors);
+
+	if (bio_segments(bio) > max_segments ||
+	    q->merge_bvec_fn) {
+		ret = 0;
+
+		for (bv = bio_iovec(bio); bv < end; bv++) {
+			struct bvec_merge_data bvm = {
+				.bi_bdev	= bio->bi_bdev,
+				.bi_sector	= bio->bi_sector,
+				.bi_size	= ret << 9,
+				.bi_rw		= bio->bi_rw,
+			};
+
+			if (q->merge_bvec_fn &&
+			    q->merge_bvec_fn(q, &bvm, bv) < (int) bv->bv_len)
+				break;
+
+			ret += bv->bv_len >> 9;
+		}
+	}
+
+	ret = min(ret, queue_max_sectors(q));
+
+	WARN_ON(!ret);
+	ret = max_t(int, ret, bio_iovec(bio)->bv_len >> 9);
+
+	return ret;
+}
+
+static void bch_bio_submit_split_done(struct closure *cl)
+{
+	struct bio_split_hook *s = container_of(cl, struct bio_split_hook, cl);
+
+	s->bio->bi_end_io = s->bi_end_io;
+	s->bio->bi_private = s->bi_private;
+	bio_endio(s->bio, 0);
+
+	closure_debug_destroy(&s->cl);
+	mempool_free(s, s->p->bio_split_hook);
+}
+
+static void bch_bio_submit_split_endio(struct bio *bio, int error)
+{
+	struct closure *cl = bio->bi_private;
+	struct bio_split_hook *s = container_of(cl, struct bio_split_hook, cl);
+
+	if (error)
+		clear_bit(BIO_UPTODATE, &s->bio->bi_flags);
+
+	bio_put(bio);
+	closure_put(cl);
+}
+
+static void __bch_bio_submit_split(struct closure *cl)
+{
+	struct bio_split_hook *s = container_of(cl, struct bio_split_hook, cl);
+	struct bio *bio = s->bio, *n;
+
+	do {
+		n = bch_bio_split(bio, bch_bio_max_sectors(bio),
+				  GFP_NOIO, s->p->bio_split);
+		if (!n)
+			continue_at(cl, __bch_bio_submit_split, system_wq);
+
+		n->bi_end_io	= bch_bio_submit_split_endio;
+		n->bi_private	= cl;
+
+		closure_get(cl);
+		bch_generic_make_request_hack(n);
+	} while (n != bio);
+
+	continue_at(cl, bch_bio_submit_split_done, NULL);
+}
+
+void bch_generic_make_request(struct bio *bio, struct bio_split_pool *p)
+{
+	struct bio_split_hook *s;
+
+	if (!bio_has_data(bio) && !(bio->bi_rw & REQ_DISCARD))
+		goto submit;
+
+	if (bio_sectors(bio) <= bch_bio_max_sectors(bio))
+		goto submit;
+
+	s = mempool_alloc(p->bio_split_hook, GFP_NOIO);
+
+	s->bio		= bio;
+	s->p		= p;
+	s->bi_end_io	= bio->bi_end_io;
+	s->bi_private	= bio->bi_private;
+	bio_get(bio);
+
+	closure_call(&s->cl, __bch_bio_submit_split, NULL, NULL);
+	return;
+submit:
+	bch_generic_make_request_hack(bio);
+}
+
+/* Bios with headers */
+
+void bch_bbio_free(struct bio *bio, struct cache_set *c)
+{
+	struct bbio *b = container_of(bio, struct bbio, bio);
+	mempool_free(b, c->bio_meta);
+}
+
+struct bio *bch_bbio_alloc(struct cache_set *c)
+{
+	struct bbio *b = mempool_alloc(c->bio_meta, GFP_NOIO);
+	struct bio *bio = &b->bio;
+
+	bio_init(bio);
+	bio->bi_flags		|= BIO_POOL_NONE << BIO_POOL_OFFSET;
+	bio->bi_max_vecs	 = bucket_pages(c);
+	bio->bi_io_vec		 = bio->bi_inline_vecs;
+
+	return bio;
+}
+
+void __bch_submit_bbio(struct bio *bio, struct cache_set *c)
+{
+	struct bbio *b = container_of(bio, struct bbio, bio);
+
+	bio->bi_sector	= PTR_OFFSET(&b->key, 0);
+	bio->bi_bdev	= PTR_CACHE(c, &b->key, 0)->bdev;
+
+	b->submit_time_us = local_clock_us();
+	closure_bio_submit(bio, bio->bi_private, PTR_CACHE(c, &b->key, 0));
+}
+
+void bch_submit_bbio(struct bio *bio, struct cache_set *c,
+		     struct bkey *k, unsigned ptr)
+{
+	struct bbio *b = container_of(bio, struct bbio, bio);
+	bch_bkey_copy_single_ptr(&b->key, k, ptr);
+	__bch_submit_bbio(bio, c);
+}
+
+/* IO errors */
+
+void bch_count_io_errors(struct cache *ca, int error, const char *m)
+{
+	/*
+	 * The halflife of an error is:
+	 * log2(1/2)/log2(127/128) * refresh ~= 88 * refresh
+	 */
+
+	if (ca->set->error_decay) {
+		unsigned count = atomic_inc_return(&ca->io_count);
+
+		while (count > ca->set->error_decay) {
+			unsigned errors;
+			unsigned old = count;
+			unsigned new = count - ca->set->error_decay;
+
+			/*
+			 * First we subtract refresh from count; each time we
+			 * succesfully do so, we rescale the errors once:
+			 */
+
+			count = atomic_cmpxchg(&ca->io_count, old, new);
+
+			if (count == old) {
+				count = new;
+
+				errors = atomic_read(&ca->io_errors);
+				do {
+					old = errors;
+					new = ((uint64_t) errors * 127) / 128;
+					errors = atomic_cmpxchg(&ca->io_errors,
+								old, new);
+				} while (old != errors);
+			}
+		}
+	}
+
+	if (error) {
+		char buf[BDEVNAME_SIZE];
+		unsigned errors = atomic_add_return(1 << IO_ERROR_SHIFT,
+						    &ca->io_errors);
+		errors >>= IO_ERROR_SHIFT;
+
+		if (errors < ca->set->error_limit)
+			pr_err("%s: IO error on %s, recovering",
+			       bdevname(ca->bdev, buf), m);
+		else
+			bch_cache_set_error(ca->set,
+					    "%s: too many IO errors %s",
+					    bdevname(ca->bdev, buf), m);
+	}
+}
+
+void bch_bbio_count_io_errors(struct cache_set *c, struct bio *bio,
+			      int error, const char *m)
+{
+	struct bbio *b = container_of(bio, struct bbio, bio);
+	struct cache *ca = PTR_CACHE(c, &b->key, 0);
+
+	unsigned threshold = bio->bi_rw & REQ_WRITE
+		? c->congested_write_threshold_us
+		: c->congested_read_threshold_us;
+
+	if (threshold) {
+		unsigned t = local_clock_us();
+
+		int us = t - b->submit_time_us;
+		int congested = atomic_read(&c->congested);
+
+		if (us > (int) threshold) {
+			int ms = us / 1024;
+			c->congested_last_us = t;
+
+			ms = min(ms, CONGESTED_MAX + congested);
+			atomic_sub(ms, &c->congested);
+		} else if (congested < 0)
+			atomic_inc(&c->congested);
+	}
+
+	bch_count_io_errors(ca, error, m);
+}
+
+void bch_bbio_endio(struct cache_set *c, struct bio *bio,
+		    int error, const char *m)
+{
+	struct closure *cl = bio->bi_private;
+
+	bch_bbio_count_io_errors(c, bio, error, m);
+	bio_put(bio);
+	closure_put(cl);
+}
diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c
new file mode 100644
index 0000000..8c8dfdc
--- /dev/null
+++ b/drivers/md/bcache/journal.c
@@ -0,0 +1,787 @@
+/*
+ * bcache journalling code, for btree insertions
+ *
+ * Copyright 2012 Google, Inc.
+ */
+
+#include "bcache.h"
+#include "btree.h"
+#include "debug.h"
+#include "request.h"
+
+/*
+ * Journal replay/recovery:
+ *
+ * This code is all driven from run_cache_set(); we first read the journal
+ * entries, do some other stuff, then we mark all the keys in the journal
+ * entries (same as garbage collection would), then we replay them - reinserting
+ * them into the cache in precisely the same order as they appear in the
+ * journal.
+ *
+ * We only journal keys that go in leaf nodes, which simplifies things quite a
+ * bit.
+ */
+
+static void journal_read_endio(struct bio *bio, int error)
+{
+	struct closure *cl = bio->bi_private;
+	closure_put(cl);
+}
+
+static int journal_read_bucket(struct cache *ca, struct list_head *list,
+			       struct btree_op *op, unsigned bucket_index)
+{
+	struct journal_device *ja = &ca->journal;
+	struct bio *bio = &ja->bio;
+
+	struct journal_replay *i;
+	struct jset *j, *data = ca->set->journal.w[0].data;
+	unsigned len, left, offset = 0;
+	int ret = 0;
+	sector_t bucket = bucket_to_sector(ca->set, ca->sb.d[bucket_index]);
+
+	pr_debug("reading %llu", (uint64_t) bucket);
+
+	while (offset < ca->sb.bucket_size) {
+reread:		left = ca->sb.bucket_size - offset;
+		len = min_t(unsigned, left, PAGE_SECTORS * 8);
+
+		bio_reset(bio);
+		bio->bi_sector	= bucket + offset;
+		bio->bi_bdev	= ca->bdev;
+		bio->bi_rw	= READ;
+		bio->bi_size	= len << 9;
+
+		bio->bi_end_io	= journal_read_endio;
+		bio->bi_private = &op->cl;
+		bch_bio_map(bio, data);
+
+		closure_bio_submit(bio, &op->cl, ca);
+		closure_sync(&op->cl);
+
+		/* This function could be simpler now since we no longer write
+		 * journal entries that overlap bucket boundaries; this means
+		 * the start of a bucket will always have a valid journal entry
+		 * if it has any journal entries at all.
+		 */
+
+		j = data;
+		while (len) {
+			struct list_head *where;
+			size_t blocks, bytes = set_bytes(j);
+
+			if (j->magic != jset_magic(ca->set))
+				return ret;
+
+			if (bytes > left << 9)
+				return ret;
+
+			if (bytes > len << 9)
+				goto reread;
+
+			if (j->csum != csum_set(j))
+				return ret;
+
+			blocks = set_blocks(j, ca->set);
+
+			while (!list_empty(list)) {
+				i = list_first_entry(list,
+					struct journal_replay, list);
+				if (i->j.seq >= j->last_seq)
+					break;
+				list_del(&i->list);
+				kfree(i);
+			}
+
+			list_for_each_entry_reverse(i, list, list) {
+				if (j->seq == i->j.seq)
+					goto next_set;
+
+				if (j->seq < i->j.last_seq)
+					goto next_set;
+
+				if (j->seq > i->j.seq) {
+					where = &i->list;
+					goto add;
+				}
+			}
+
+			where = list;
+add:
+			i = kmalloc(offsetof(struct journal_replay, j) +
+				    bytes, GFP_KERNEL);
+			if (!i)
+				return -ENOMEM;
+			memcpy(&i->j, j, bytes);
+			list_add(&i->list, where);
+			ret = 1;
+
+			ja->seq[bucket_index] = j->seq;
+next_set:
+			offset	+= blocks * ca->sb.block_size;
+			len	-= blocks * ca->sb.block_size;
+			j = ((void *) j) + blocks * block_bytes(ca);
+		}
+	}
+
+	return ret;
+}
+
+int bch_journal_read(struct cache_set *c, struct list_head *list,
+			struct btree_op *op)
+{
+#define read_bucket(b)							\
+	({								\
+		int ret = journal_read_bucket(ca, list, op, b);		\
+		__set_bit(b, bitmap);					\
+		if (ret < 0)						\
+			return ret;					\
+		ret;							\
+	})
+
+	struct cache *ca;
+	unsigned iter;
+
+	for_each_cache(ca, c, iter) {
+		struct journal_device *ja = &ca->journal;
+		unsigned long bitmap[SB_JOURNAL_BUCKETS / BITS_PER_LONG];
+		unsigned i, l, r, m;
+		uint64_t seq;
+
+		bitmap_zero(bitmap, SB_JOURNAL_BUCKETS);
+		pr_debug("%u journal buckets", ca->sb.njournal_buckets);
+
+		/* Read journal buckets ordered by golden ratio hash to quickly
+		 * find a sequence of buckets with valid journal entries
+		 */
+		for (i = 0; i < ca->sb.njournal_buckets; i++) {
+			l = (i * 2654435769U) % ca->sb.njournal_buckets;
+
+			if (test_bit(l, bitmap))
+				break;
+
+			if (read_bucket(l))
+				goto bsearch;
+		}
+
+		/* If that fails, check all the buckets we haven't checked
+		 * already
+		 */
+		pr_debug("falling back to linear search");
+
+		for (l = 0; l < ca->sb.njournal_buckets; l++) {
+			if (test_bit(l, bitmap))
+				continue;
+
+			if (read_bucket(l))
+				goto bsearch;
+		}
+bsearch:
+		/* Binary search */
+		m = r = find_next_bit(bitmap, ca->sb.njournal_buckets, l + 1);
+		pr_debug("starting binary search, l %u r %u", l, r);
+
+		while (l + 1 < r) {
+			m = (l + r) >> 1;
+
+			if (read_bucket(m))
+				l = m;
+			else
+				r = m;
+		}
+
+		/* Read buckets in reverse order until we stop finding more
+		 * journal entries
+		 */
+		pr_debug("finishing up");
+		l = m;
+
+		while (1) {
+			if (!l--)
+				l = ca->sb.njournal_buckets - 1;
+
+			if (l == m)
+				break;
+
+			if (test_bit(l, bitmap))
+				continue;
+
+			if (!read_bucket(l))
+				break;
+		}
+
+		seq = 0;
+
+		for (i = 0; i < ca->sb.njournal_buckets; i++)
+			if (ja->seq[i] > seq) {
+				seq = ja->seq[i];
+				ja->cur_idx = ja->discard_idx =
+					ja->last_idx = i;
+
+			}
+	}
+
+	c->journal.seq = list_entry(list->prev,
+				    struct journal_replay,
+				    list)->j.seq;
+
+	return 0;
+#undef read_bucket
+}
+
+void bch_journal_mark(struct cache_set *c, struct list_head *list)
+{
+	atomic_t p = { 0 };
+	struct bkey *k;
+	struct journal_replay *i;
+	struct journal *j = &c->journal;
+	uint64_t last = j->seq;
+
+	/*
+	 * journal.pin should never fill up - we never write a journal
+	 * entry when it would fill up. But if for some reason it does, we
+	 * iterate over the list in reverse order so that we can just skip that
+	 * refcount instead of bugging.
+	 */
+
+	list_for_each_entry_reverse(i, list, list) {
+		BUG_ON(last < i->j.seq);
+		i->pin = NULL;
+
+		while (last-- != i->j.seq)
+			if (fifo_free(&j->pin) > 1) {
+				fifo_push_front(&j->pin, p);
+				atomic_set(&fifo_front(&j->pin), 0);
+			}
+
+		if (fifo_free(&j->pin) > 1) {
+			fifo_push_front(&j->pin, p);
+			i->pin = &fifo_front(&j->pin);
+			atomic_set(i->pin, 1);
+		}
+
+		for (k = i->j.start;
+		     k < end(&i->j);
+		     k = bkey_next(k)) {
+			unsigned j;
+
+			for (j = 0; j < KEY_PTRS(k); j++) {
+				struct bucket *g = PTR_BUCKET(c, k, j);
+				atomic_inc(&g->pin);
+
+				if (g->prio == BTREE_PRIO &&
+				    !ptr_stale(c, k, j))
+					g->prio = INITIAL_PRIO;
+			}
+
+			__bch_btree_mark_key(c, 0, k);
+		}
+	}
+}
+
+int bch_journal_replay(struct cache_set *s, struct list_head *list,
+			  struct btree_op *op)
+{
+	int ret = 0, keys = 0, entries = 0;
+	struct bkey *k;
+	struct journal_replay *i =
+		list_entry(list->prev, struct journal_replay, list);
+
+	uint64_t start = i->j.last_seq, end = i->j.seq, n = start;
+
+	list_for_each_entry(i, list, list) {
+		BUG_ON(i->pin && atomic_read(i->pin) != 1);
+
+		if (n != i->j.seq)
+			pr_err(
+		"journal entries %llu-%llu missing! (replaying %llu-%llu)\n",
+		n, i->j.seq - 1, start, end);
+
+		for (k = i->j.start;
+		     k < end(&i->j);
+		     k = bkey_next(k)) {
+			pr_debug("%s", pkey(k));
+			bkey_copy(op->keys.top, k);
+			bch_keylist_push(&op->keys);
+
+			op->journal = i->pin;
+			atomic_inc(op->journal);
+
+			ret = bch_btree_insert(op, s);
+			if (ret)
+				goto err;
+
+			BUG_ON(!bch_keylist_empty(&op->keys));
+			keys++;
+
+			cond_resched();
+		}
+
+		if (i->pin)
+			atomic_dec(i->pin);
+		n = i->j.seq + 1;
+		entries++;
+	}
+
+	pr_info("journal replay done, %i keys in %i entries, seq %llu",
+		keys, entries, end);
+
+	while (!list_empty(list)) {
+		i = list_first_entry(list, struct journal_replay, list);
+		list_del(&i->list);
+		kfree(i);
+	}
+err:
+	closure_sync(&op->cl);
+	return ret;
+}
+
+/* Journalling */
+
+static void btree_flush_write(struct cache_set *c)
+{
+	/*
+	 * Try to find the btree node with that references the oldest journal
+	 * entry, best is our current candidate and is locked if non NULL:
+	 */
+	struct btree *b, *best = NULL;
+	unsigned iter;
+
+	for_each_cached_btree(b, c, iter) {
+		if (!down_write_trylock(&b->lock))
+			continue;
+
+		if (!btree_node_dirty(b) ||
+		    !btree_current_write(b)->journal) {
+			rw_unlock(true, b);
+			continue;
+		}
+
+		if (!best)
+			best = b;
+		else if (journal_pin_cmp(c,
+					 btree_current_write(best),
+					 btree_current_write(b))) {
+			rw_unlock(true, best);
+			best = b;
+		} else
+			rw_unlock(true, b);
+	}
+
+	if (best)
+		goto out;
+
+	/* We can't find the best btree node, just pick the first */
+	list_for_each_entry(b, &c->btree_cache, list)
+		if (!b->level && btree_node_dirty(b)) {
+			best = b;
+			rw_lock(true, best, best->level);
+			goto found;
+		}
+
+out:
+	if (!best)
+		return;
+found:
+	if (btree_node_dirty(best))
+		bch_btree_write(best, true, NULL);
+	rw_unlock(true, best);
+}
+
+#define last_seq(j)	((j)->seq - fifo_used(&(j)->pin) + 1)
+
+static void journal_discard_endio(struct bio *bio, int error)
+{
+	struct journal_device *ja =
+		container_of(bio, struct journal_device, discard_bio);
+	struct cache *ca = container_of(ja, struct cache, journal);
+
+	atomic_set(&ja->discard_in_flight, DISCARD_DONE);
+
+	closure_wake_up(&ca->set->journal.wait);
+	closure_put(&ca->set->cl);
+}
+
+static void journal_discard_work(struct work_struct *work)
+{
+	struct journal_device *ja =
+		container_of(work, struct journal_device, discard_work);
+
+	submit_bio(0, &ja->discard_bio);
+}
+
+static void do_journal_discard(struct cache *ca)
+{
+	struct journal_device *ja = &ca->journal;
+	struct bio *bio = &ja->discard_bio;
+
+	if (!ca->discard) {
+		ja->discard_idx = ja->last_idx;
+		return;
+	}
+
+	switch (atomic_read(&ja->discard_in_flight) == DISCARD_IN_FLIGHT) {
+	case DISCARD_IN_FLIGHT:
+		return;
+
+	case DISCARD_DONE:
+		ja->discard_idx = (ja->discard_idx + 1) %
+			ca->sb.njournal_buckets;
+
+		atomic_set(&ja->discard_in_flight, DISCARD_READY);
+		/* fallthrough */
+
+	case DISCARD_READY:
+		if (ja->discard_idx == ja->last_idx)
+			return;
+
+		atomic_set(&ja->discard_in_flight, DISCARD_IN_FLIGHT);
+
+		bio_init(bio);
+		bio->bi_sector		= bucket_to_sector(ca->set,
+						ca->sb.d[ja->discard_idx]);
+		bio->bi_bdev		= ca->bdev;
+		bio->bi_rw		= REQ_WRITE|REQ_DISCARD;
+		bio->bi_max_vecs	= 1;
+		bio->bi_io_vec		= bio->bi_inline_vecs;
+		bio->bi_size		= bucket_bytes(ca);
+		bio->bi_end_io		= journal_discard_endio;
+
+		closure_get(&ca->set->cl);
+		INIT_WORK(&ja->discard_work, journal_discard_work);
+		schedule_work(&ja->discard_work);
+	}
+}
+
+static void journal_reclaim(struct cache_set *c)
+{
+	struct bkey *k = &c->journal.key;
+	struct cache *ca;
+	uint64_t last_seq;
+	unsigned iter, n = 0;
+	atomic_t p;
+
+	while (!atomic_read(&fifo_front(&c->journal.pin)))
+		fifo_pop(&c->journal.pin, p);
+
+	last_seq = last_seq(&c->journal);
+
+	/* Update last_idx */
+
+	for_each_cache(ca, c, iter) {
+		struct journal_device *ja = &ca->journal;
+
+		while (ja->last_idx != ja->cur_idx &&
+		       ja->seq[ja->last_idx] < last_seq)
+			ja->last_idx = (ja->last_idx + 1) %
+				ca->sb.njournal_buckets;
+	}
+
+	for_each_cache(ca, c, iter)
+		do_journal_discard(ca);
+
+	if (c->journal.blocks_free)
+		return;
+
+	/*
+	 * Allocate:
+	 * XXX: Sort by free journal space
+	 */
+
+	for_each_cache(ca, c, iter) {
+		struct journal_device *ja = &ca->journal;
+		unsigned next = (ja->cur_idx + 1) % ca->sb.njournal_buckets;
+
+		/* No space available on this device */
+		if (next == ja->discard_idx)
+			continue;
+
+		ja->cur_idx = next;
+		k->ptr[n++] = PTR(0,
+				  bucket_to_sector(c, ca->sb.d[ja->cur_idx]),
+				  ca->sb.nr_this_dev);
+	}
+
+	bkey_init(k);
+	SET_KEY_PTRS(k, n);
+
+	if (n)
+		c->journal.blocks_free = c->sb.bucket_size >> c->block_bits;
+
+	if (!journal_full(&c->journal))
+		__closure_wake_up(&c->journal.wait);
+}
+
+void bch_journal_next(struct journal *j)
+{
+	atomic_t p = { 1 };
+
+	j->cur = (j->cur == j->w)
+		? &j->w[1]
+		: &j->w[0];
+
+	/*
+	 * The fifo_push() needs to happen at the same time as j->seq is
+	 * incremented for last_seq() to be calculated correctly
+	 */
+	BUG_ON(!fifo_push(&j->pin, p));
+	atomic_set(&fifo_back(&j->pin), 1);
+
+	j->cur->data->seq	= ++j->seq;
+	j->cur->need_write	= false;
+	j->cur->data->keys	= 0;
+
+	if (fifo_full(&j->pin))
+		pr_debug("journal_pin full (%zu)", fifo_used(&j->pin));
+}
+
+static void journal_write_endio(struct bio *bio, int error)
+{
+	struct journal_write *w = bio->bi_private;
+
+	cache_set_err_on(error, w->c, "journal io error");
+	closure_put(&w->c->journal.io.cl);
+}
+
+static void journal_write(struct closure *);
+
+static void journal_write_done(struct closure *cl)
+{
+	struct journal *j = container_of(cl, struct journal, io.cl);
+	struct cache_set *c = container_of(j, struct cache_set, journal);
+
+	struct journal_write *w = (j->cur == j->w)
+		? &j->w[1]
+		: &j->w[0];
+
+	__closure_wake_up(&w->wait);
+
+	if (c->journal_delay_ms)
+		closure_delay(&j->io, msecs_to_jiffies(c->journal_delay_ms));
+
+	continue_at(cl, journal_write, system_wq);
+}
+
+static void journal_write_unlocked(struct closure *cl)
+	__releases(c->journal.lock)
+{
+	struct cache_set *c = container_of(cl, struct cache_set, journal.io.cl);
+	struct cache *ca;
+	struct journal_write *w = c->journal.cur;
+	struct bkey *k = &c->journal.key;
+	unsigned i, sectors = set_blocks(w->data, c) * c->sb.block_size;
+
+	struct bio *bio;
+	struct bio_list list;
+	bio_list_init(&list);
+
+	if (!w->need_write) {
+		/*
+		 * XXX: have to unlock closure before we unlock journal lock,
+		 * else we race with bch_journal(). But this way we race
+		 * against cache set unregister. Doh.
+		 */
+		set_closure_fn(cl, NULL, NULL);
+		closure_sub(cl, CLOSURE_RUNNING + 1);
+		spin_unlock(&c->journal.lock);
+		return;
+	} else if (journal_full(&c->journal)) {
+		journal_reclaim(c);
+		spin_unlock(&c->journal.lock);
+
+		btree_flush_write(c);
+		continue_at(cl, journal_write, system_wq);
+	}
+
+	c->journal.blocks_free -= set_blocks(w->data, c);
+
+	w->data->btree_level = c->root->level;
+
+	bkey_copy(&w->data->btree_root, &c->root->key);
+	bkey_copy(&w->data->uuid_bucket, &c->uuid_bucket);
+
+	for_each_cache(ca, c, i)
+		w->data->prio_bucket[ca->sb.nr_this_dev] = ca->prio_buckets[0];
+
+	w->data->magic		= jset_magic(c);
+	w->data->version	= BCACHE_JSET_VERSION;
+	w->data->last_seq	= last_seq(&c->journal);
+	w->data->csum		= csum_set(w->data);
+
+	for (i = 0; i < KEY_PTRS(k); i++) {
+		ca = PTR_CACHE(c, k, i);
+		bio = &ca->journal.bio;
+
+		atomic_long_add(sectors, &ca->meta_sectors_written);
+
+		bio_reset(bio);
+		bio->bi_sector	= PTR_OFFSET(k, i);
+		bio->bi_bdev	= ca->bdev;
+		bio->bi_rw	= REQ_WRITE|REQ_SYNC|REQ_META|REQ_FLUSH;
+		bio->bi_size	= sectors << 9;
+
+		bio->bi_end_io	= journal_write_endio;
+		bio->bi_private = w;
+		bch_bio_map(bio, w->data);
+
+		trace_bcache_journal_write(bio);
+		bio_list_add(&list, bio);
+
+		SET_PTR_OFFSET(k, i, PTR_OFFSET(k, i) + sectors);
+
+		ca->journal.seq[ca->journal.cur_idx] = w->data->seq;
+	}
+
+	atomic_dec_bug(&fifo_back(&c->journal.pin));
+	bch_journal_next(&c->journal);
+	journal_reclaim(c);
+
+	spin_unlock(&c->journal.lock);
+
+	while ((bio = bio_list_pop(&list)))
+		closure_bio_submit(bio, cl, c->cache[0]);
+
+	continue_at(cl, journal_write_done, NULL);
+}
+
+static void journal_write(struct closure *cl)
+{
+	struct cache_set *c = container_of(cl, struct cache_set, journal.io.cl);
+
+	spin_lock(&c->journal.lock);
+	journal_write_unlocked(cl);
+}
+
+static void __journal_try_write(struct cache_set *c, bool noflush)
+	__releases(c->journal.lock)
+{
+	struct closure *cl = &c->journal.io.cl;
+
+	if (!closure_trylock(cl, &c->cl))
+		spin_unlock(&c->journal.lock);
+	else if (noflush && journal_full(&c->journal)) {
+		spin_unlock(&c->journal.lock);
+		continue_at(cl, journal_write, system_wq);
+	} else
+		journal_write_unlocked(cl);
+}
+
+#define journal_try_write(c)	__journal_try_write(c, false)
+
+void bch_journal_meta(struct cache_set *c, struct closure *cl)
+{
+	struct journal_write *w;
+
+	if (CACHE_SYNC(&c->sb)) {
+		spin_lock(&c->journal.lock);
+
+		w = c->journal.cur;
+		w->need_write = true;
+
+		if (cl)
+			BUG_ON(!closure_wait(&w->wait, cl));
+
+		__journal_try_write(c, true);
+	}
+}
+
+/*
+ * Entry point to the journalling code - bio_insert() and btree_invalidate()
+ * pass bch_journal() a list of keys to be journalled, and then
+ * bch_journal() hands those same keys off to btree_insert_async()
+ */
+
+void bch_journal(struct closure *cl)
+{
+	struct btree_op *op = container_of(cl, struct btree_op, cl);
+	struct cache_set *c = op->c;
+	struct journal_write *w;
+	size_t b, n = ((uint64_t *) op->keys.top) - op->keys.list;
+
+	if (op->type != BTREE_INSERT ||
+	    !CACHE_SYNC(&c->sb))
+		goto out;
+
+	/*
+	 * If we're looping because we errored, might already be waiting on
+	 * another journal write:
+	 */
+	while (atomic_read(&cl->parent->remaining) & CLOSURE_WAITING)
+		closure_sync(cl->parent);
+
+	spin_lock(&c->journal.lock);
+
+	if (journal_full(&c->journal)) {
+		/* XXX: tracepoint */
+		closure_wait(&c->journal.wait, cl);
+
+		journal_reclaim(c);
+		spin_unlock(&c->journal.lock);
+
+		btree_flush_write(c);
+		continue_at(cl, bch_journal, bcache_wq);
+	}
+
+	w = c->journal.cur;
+	w->need_write = true;
+	b = __set_blocks(w->data, w->data->keys + n, c);
+
+	if (b * c->sb.block_size > PAGE_SECTORS << JSET_BITS ||
+	    b > c->journal.blocks_free) {
+		/* XXX: If we were inserting so many keys that they won't fit in
+		 * an _empty_ journal write, we'll deadlock. For now, handle
+		 * this in bch_keylist_realloc() - but something to think about.
+		 */
+		BUG_ON(!w->data->keys);
+
+		/* XXX: tracepoint */
+		BUG_ON(!closure_wait(&w->wait, cl));
+
+		closure_flush(&c->journal.io);
+
+		journal_try_write(c);
+		continue_at(cl, bch_journal, bcache_wq);
+	}
+
+	memcpy(end(w->data), op->keys.list, n * sizeof(uint64_t));
+	w->data->keys += n;
+
+	op->journal = &fifo_back(&c->journal.pin);
+	atomic_inc(op->journal);
+
+	if (op->flush_journal) {
+		closure_flush(&c->journal.io);
+		closure_wait(&w->wait, cl->parent);
+	}
+
+	journal_try_write(c);
+out:
+	bch_btree_insert_async(cl);
+}
+
+void bch_journal_free(struct cache_set *c)
+{
+	free_pages((unsigned long) c->journal.w[1].data, JSET_BITS);
+	free_pages((unsigned long) c->journal.w[0].data, JSET_BITS);
+	free_fifo(&c->journal.pin);
+}
+
+int bch_journal_alloc(struct cache_set *c)
+{
+	struct journal *j = &c->journal;
+
+	closure_init_unlocked(&j->io);
+	spin_lock_init(&j->lock);
+
+	c->journal_delay_ms = 100;
+
+	j->w[0].c = c;
+	j->w[1].c = c;
+
+	if (!(init_fifo(&j->pin, JOURNAL_PIN, GFP_KERNEL)) ||
+	    !(j->w[0].data = (void *) __get_free_pages(GFP_KERNEL, JSET_BITS)) ||
+	    !(j->w[1].data = (void *) __get_free_pages(GFP_KERNEL, JSET_BITS)))
+		return -ENOMEM;
+
+	return 0;
+}
diff --git a/drivers/md/bcache/journal.h b/drivers/md/bcache/journal.h
new file mode 100644
index 0000000..3d78512
--- /dev/null
+++ b/drivers/md/bcache/journal.h
@@ -0,0 +1,215 @@
+#ifndef _BCACHE_JOURNAL_H
+#define _BCACHE_JOURNAL_H
+
+/*
+ * THE JOURNAL:
+ *
+ * The journal is treated as a circular buffer of buckets - a journal entry
+ * never spans two buckets. This means (not implemented yet) we can resize the
+ * journal at runtime, and will be needed for bcache on raw flash support.
+ *
+ * Journal entries contain a list of keys, ordered by the time they were
+ * inserted; thus journal replay just has to reinsert the keys.
+ *
+ * We also keep some things in the journal header that are logically part of the
+ * superblock - all the things that are frequently updated. This is for future
+ * bcache on raw flash support; the superblock (which will become another
+ * journal) can't be moved or wear leveled, so it contains just enough
+ * information to find the main journal, and the superblock only has to be
+ * rewritten when we want to move/wear level the main journal.
+ *
+ * Currently, we don't journal BTREE_REPLACE operations - this will hopefully be
+ * fixed eventually. This isn't a bug - BTREE_REPLACE is used for insertions
+ * from cache misses, which don't have to be journaled, and for writeback and
+ * moving gc we work around it by flushing the btree to disk before updating the
+ * gc information. But it is a potential issue with incremental garbage
+ * collection, and it's fragile.
+ *
+ * OPEN JOURNAL ENTRIES:
+ *
+ * Each journal entry contains, in the header, the sequence number of the last
+ * journal entry still open - i.e. that has keys that haven't been flushed to
+ * disk in the btree.
+ *
+ * We track this by maintaining a refcount for every open journal entry, in a
+ * fifo; each entry in the fifo corresponds to a particular journal
+ * entry/sequence number. When the refcount at the tail of the fifo goes to
+ * zero, we pop it off - thus, the size of the fifo tells us the number of open
+ * journal entries
+ *
+ * We take a refcount on a journal entry when we add some keys to a journal
+ * entry that we're going to insert (held by struct btree_op), and then when we
+ * insert those keys into the btree the btree write we're setting up takes a
+ * copy of that refcount (held by struct btree_write). That refcount is dropped
+ * when the btree write completes.
+ *
+ * A struct btree_write can only hold a refcount on a single journal entry, but
+ * might contain keys for many journal entries - we handle this by making sure
+ * it always has a refcount on the _oldest_ journal entry of all the journal
+ * entries it has keys for.
+ *
+ * JOURNAL RECLAIM:
+ *
+ * As mentioned previously, our fifo of refcounts tells us the number of open
+ * journal entries; from that and the current journal sequence number we compute
+ * last_seq - the oldest journal entry we still need. We write last_seq in each
+ * journal entry, and we also have to keep track of where it exists on disk so
+ * we don't overwrite it when we loop around the journal.
+ *
+ * To do that we track, for each journal bucket, the sequence number of the
+ * newest journal entry it contains - if we don't need that journal entry we
+ * don't need anything in that bucket anymore. From that we track the last
+ * journal bucket we still need; all this is tracked in struct journal_device
+ * and updated by journal_reclaim().
+ *
+ * JOURNAL FILLING UP:
+ *
+ * There are two ways the journal could fill up; either we could run out of
+ * space to write to, or we could have too many open journal entries and run out
+ * of room in the fifo of refcounts. Since those refcounts are decremented
+ * without any locking we can't safely resize that fifo, so we handle it the
+ * same way.
+ *
+ * If the journal fills up, we start flushing dirty btree nodes until we can
+ * allocate space for a journal write again - preferentially flushing btree
+ * nodes that are pinning the oldest journal entries first.
+ */
+
+#define BCACHE_JSET_VERSION_UUIDv1	1
+/* Always latest UUID format */
+#define BCACHE_JSET_VERSION_UUID	1
+#define BCACHE_JSET_VERSION		1
+
+/*
+ * On disk format for a journal entry:
+ * seq is monotonically increasing; every journal entry has its own unique
+ * sequence number.
+ *
+ * last_seq is the oldest journal entry that still has keys the btree hasn't
+ * flushed to disk yet.
+ *
+ * version is for on disk format changes.
+ */
+struct jset {
+	uint64_t		csum;
+	uint64_t		magic;
+	uint64_t		seq;
+	uint32_t		version;
+	uint32_t		keys;
+
+	uint64_t		last_seq;
+
+	BKEY_PADDED(uuid_bucket);
+	BKEY_PADDED(btree_root);
+	uint16_t		btree_level;
+	uint16_t		pad[3];
+
+	uint64_t		prio_bucket[MAX_CACHES_PER_SET];
+
+	union {
+		struct bkey	start[0];
+		uint64_t	d[0];
+	};
+};
+
+/*
+ * Only used for holding the journal entries we read in btree_journal_read()
+ * during cache_registration
+ */
+struct journal_replay {
+	struct list_head	list;
+	atomic_t		*pin;
+	struct jset		j;
+};
+
+/*
+ * We put two of these in struct journal; we used them for writes to the
+ * journal that are being staged or in flight.
+ */
+struct journal_write {
+	struct jset		*data;
+#define JSET_BITS		3
+
+	struct cache_set	*c;
+	struct closure_waitlist	wait;
+	bool			need_write;
+};
+
+/* Embedded in struct cache_set */
+struct journal {
+	spinlock_t		lock;
+	/* used when waiting because the journal was full */
+	struct closure_waitlist	wait;
+	struct closure_with_timer io;
+
+	/* Number of blocks free in the bucket(s) we're currently writing to */
+	unsigned		blocks_free;
+	uint64_t		seq;
+	DECLARE_FIFO(atomic_t, pin);
+
+	BKEY_PADDED(key);
+
+	struct journal_write	w[2], *cur;
+};
+
+/*
+ * Embedded in struct cache. First three fields refer to the array of journal
+ * buckets, in cache_sb.
+ */
+struct journal_device {
+	/*
+	 * For each journal bucket, contains the max sequence number of the
+	 * journal writes it contains - so we know when a bucket can be reused.
+	 */
+	uint64_t		seq[SB_JOURNAL_BUCKETS];
+
+	/* Journal bucket we're currently writing to */
+	unsigned		cur_idx;
+
+	/* Last journal bucket that still contains an open journal entry */
+	unsigned		last_idx;
+
+	/* Next journal bucket to be discarded */
+	unsigned		discard_idx;
+
+#define DISCARD_READY		0
+#define DISCARD_IN_FLIGHT	1
+#define DISCARD_DONE		2
+	/* 1 - discard in flight, -1 - discard completed */
+	atomic_t		discard_in_flight;
+
+	struct work_struct	discard_work;
+	struct bio		discard_bio;
+	struct bio_vec		discard_bv;
+
+	/* Bio for journal reads/writes to this device */
+	struct bio		bio;
+	struct bio_vec		bv[8];
+};
+
+#define journal_pin_cmp(c, l, r)				\
+	(fifo_idx(&(c)->journal.pin, (l)->journal) >		\
+	 fifo_idx(&(c)->journal.pin, (r)->journal))
+
+#define JOURNAL_PIN	20000
+
+#define journal_full(j)						\
+	(!(j)->blocks_free || fifo_free(&(j)->pin) <= 1)
+
+struct closure;
+struct cache_set;
+struct btree_op;
+
+void bch_journal(struct closure *);
+void bch_journal_next(struct journal *);
+void bch_journal_mark(struct cache_set *, struct list_head *);
+void bch_journal_meta(struct cache_set *, struct closure *);
+int bch_journal_read(struct cache_set *, struct list_head *,
+			struct btree_op *);
+int bch_journal_replay(struct cache_set *, struct list_head *,
+			  struct btree_op *);
+
+void bch_journal_free(struct cache_set *);
+int bch_journal_alloc(struct cache_set *);
+
+#endif /* _BCACHE_JOURNAL_H */
diff --git a/drivers/md/bcache/movinggc.c b/drivers/md/bcache/movinggc.c
new file mode 100644
index 0000000..8589512
--- /dev/null
+++ b/drivers/md/bcache/movinggc.c
@@ -0,0 +1,254 @@
+/*
+ * Moving/copying garbage collector
+ *
+ * Copyright 2012 Google, Inc.
+ */
+
+#include "bcache.h"
+#include "btree.h"
+#include "debug.h"
+#include "request.h"
+
+struct moving_io {
+	struct keybuf_key	*w;
+	struct search		s;
+	struct bbio		bio;
+};
+
+static bool moving_pred(struct keybuf *buf, struct bkey *k)
+{
+	struct cache_set *c = container_of(buf, struct cache_set,
+					   moving_gc_keys);
+	unsigned i;
+
+	for (i = 0; i < KEY_PTRS(k); i++) {
+		struct cache *ca = PTR_CACHE(c, k, i);
+		struct bucket *g = PTR_BUCKET(c, k, i);
+
+		if (GC_SECTORS_USED(g) < ca->gc_move_threshold)
+			return true;
+	}
+
+	return false;
+}
+
+/* Moving GC - IO loop */
+
+static void moving_io_destructor(struct closure *cl)
+{
+	struct moving_io *io = container_of(cl, struct moving_io, s.cl);
+	kfree(io);
+}
+
+static void write_moving_finish(struct closure *cl)
+{
+	struct moving_io *io = container_of(cl, struct moving_io, s.cl);
+	struct bio *bio = &io->bio.bio;
+	struct bio_vec *bv = bio_iovec_idx(bio, bio->bi_vcnt);
+
+	while (bv-- != bio->bi_io_vec)
+		__free_page(bv->bv_page);
+
+	pr_debug("%s %s", io->s.op.insert_collision
+		 ? "collision moving" : "moved",
+		 pkey(&io->w->key));
+
+	bch_keybuf_del(&io->s.op.c->moving_gc_keys, io->w);
+
+	atomic_dec_bug(&io->s.op.c->in_flight);
+	closure_wake_up(&io->s.op.c->moving_gc_wait);
+
+	closure_return_with_destructor(cl, moving_io_destructor);
+}
+
+static void read_moving_endio(struct bio *bio, int error)
+{
+	struct moving_io *io = container_of(bio->bi_private,
+					    struct moving_io, s.cl);
+
+	if (error)
+		io->s.error = error;
+
+	bch_bbio_endio(io->s.op.c, bio, error, "reading data to move");
+}
+
+static void moving_init(struct moving_io *io)
+{
+	struct bio *bio = &io->bio.bio;
+
+	bio_init(bio);
+	bio_get(bio);
+	bio_set_prio(bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0));
+
+	bio->bi_size		= KEY_SIZE(&io->w->key) << 9;
+	bio->bi_max_vecs	= DIV_ROUND_UP(KEY_SIZE(&io->w->key),
+					       PAGE_SECTORS);
+	bio->bi_private		= &io->s.cl;
+	bio->bi_io_vec		= bio->bi_inline_vecs;
+	bch_bio_map(bio, NULL);
+}
+
+static void write_moving(struct closure *cl)
+{
+	struct search *s = container_of(cl, struct search, cl);
+	struct moving_io *io = container_of(s, struct moving_io, s);
+
+	if (!s->error) {
+		trace_bcache_write_moving(&io->bio.bio);
+
+		moving_init(io);
+
+		io->bio.bio.bi_sector	= KEY_START(&io->w->key);
+		s->op.lock		= -1;
+		s->op.write_prio	= 1;
+		s->op.cache_bio		= &io->bio.bio;
+
+		s->writeback		= KEY_DIRTY(&io->w->key);
+		s->op.csum		= KEY_CSUM(&io->w->key);
+
+		s->op.type = BTREE_REPLACE;
+		bkey_copy(&s->op.replace, &io->w->key);
+
+		closure_init(&s->op.cl, cl);
+		bch_insert_data(&s->op.cl);
+	}
+
+	continue_at(cl, write_moving_finish, NULL);
+}
+
+static void read_moving_submit(struct closure *cl)
+{
+	struct search *s = container_of(cl, struct search, cl);
+	struct moving_io *io = container_of(s, struct moving_io, s);
+	struct bio *bio = &io->bio.bio;
+
+	trace_bcache_read_moving(bio);
+	bch_submit_bbio(bio, s->op.c, &io->w->key, 0);
+
+	continue_at(cl, write_moving, bch_gc_wq);
+}
+
+static void read_moving(struct closure *cl)
+{
+	struct cache_set *c = container_of(cl, struct cache_set, moving_gc);
+	struct keybuf_key *w;
+	struct moving_io *io;
+	struct bio *bio;
+
+	/* XXX: if we error, background writeback could stall indefinitely */
+
+	while (!test_bit(CACHE_SET_STOPPING, &c->flags)) {
+		w = bch_keybuf_next_rescan(c, &c->moving_gc_keys, &MAX_KEY);
+		if (!w)
+			break;
+
+		io = kzalloc(sizeof(struct moving_io) + sizeof(struct bio_vec)
+			     * DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS),
+			     GFP_KERNEL);
+		if (!io)
+			goto err;
+
+		w->private	= io;
+		io->w		= w;
+		io->s.op.inode	= KEY_INODE(&w->key);
+		io->s.op.c	= c;
+
+		moving_init(io);
+		bio = &io->bio.bio;
+
+		bio->bi_rw	= READ;
+		bio->bi_end_io	= read_moving_endio;
+
+		if (bch_bio_alloc_pages(bio, GFP_KERNEL))
+			goto err;
+
+		pr_debug("%s", pkey(&w->key));
+
+		closure_call(&io->s.cl, read_moving_submit, NULL, &c->gc.cl);
+
+		if (atomic_inc_return(&c->in_flight) >= 64) {
+			closure_wait_event(&c->moving_gc_wait, cl,
+					   atomic_read(&c->in_flight) < 64);
+			continue_at(cl, read_moving, bch_gc_wq);
+		}
+	}
+
+	if (0) {
+err:		if (!IS_ERR_OR_NULL(w->private))
+			kfree(w->private);
+
+		bch_keybuf_del(&c->moving_gc_keys, w);
+	}
+
+	closure_return(cl);
+}
+
+static bool bucket_cmp(struct bucket *l, struct bucket *r)
+{
+	return GC_SECTORS_USED(l) < GC_SECTORS_USED(r);
+}
+
+static unsigned bucket_heap_top(struct cache *ca)
+{
+	return GC_SECTORS_USED(heap_peek(&ca->heap));
+}
+
+void bch_moving_gc(struct closure *cl)
+{
+	struct cache_set *c = container_of(cl, struct cache_set, gc.cl);
+	struct cache *ca;
+	struct bucket *b;
+	unsigned i;
+
+	if (!c->copy_gc_enabled)
+		closure_return(cl);
+
+	mutex_lock(&c->bucket_lock);
+
+	for_each_cache(ca, c, i) {
+		unsigned sectors_to_move = 0;
+		unsigned reserve_sectors = ca->sb.bucket_size *
+			min(fifo_used(&ca->free), ca->free.size / 2);
+
+		ca->heap.used = 0;
+
+		for_each_bucket(b, ca) {
+			if (!GC_SECTORS_USED(b))
+				continue;
+
+			if (!heap_full(&ca->heap)) {
+				sectors_to_move += GC_SECTORS_USED(b);
+				heap_add(&ca->heap, b, bucket_cmp);
+			} else if (bucket_cmp(b, heap_peek(&ca->heap))) {
+				sectors_to_move -= bucket_heap_top(ca);
+				sectors_to_move += GC_SECTORS_USED(b);
+
+				ca->heap.data[0] = b;
+				heap_sift(&ca->heap, 0, bucket_cmp);
+			}
+		}
+
+		while (sectors_to_move > reserve_sectors) {
+			heap_pop(&ca->heap, b, bucket_cmp);
+			sectors_to_move -= GC_SECTORS_USED(b);
+		}
+
+		ca->gc_move_threshold = bucket_heap_top(ca);
+
+		pr_debug("threshold %u", ca->gc_move_threshold);
+	}
+
+	mutex_unlock(&c->bucket_lock);
+
+	c->moving_gc_keys.last_scanned = ZERO_KEY;
+
+	closure_init(&c->moving_gc, cl);
+	read_moving(&c->moving_gc);
+
+	closure_return(cl);
+}
+
+void bch_moving_init_cache_set(struct cache_set *c)
+{
+	bch_keybuf_init(&c->moving_gc_keys, moving_pred);
+}
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
new file mode 100644
index 0000000..e5ff12e5
--- /dev/null
+++ b/drivers/md/bcache/request.c
@@ -0,0 +1,1411 @@
+/*
+ * Main bcache entry point - handle a read or a write request and decide what to
+ * do with it; the make_request functions are called by the block layer.
+ *
+ * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
+ * Copyright 2012 Google, Inc.
+ */
+
+#include "bcache.h"
+#include "btree.h"
+#include "debug.h"
+#include "request.h"
+
+#include <linux/cgroup.h>
+#include <linux/module.h>
+#include <linux/hash.h>
+#include <linux/random.h>
+#include "blk-cgroup.h"
+
+#include <trace/events/bcache.h>
+
+#define CUTOFF_CACHE_ADD	95
+#define CUTOFF_CACHE_READA	90
+#define CUTOFF_WRITEBACK	50
+#define CUTOFF_WRITEBACK_SYNC	75
+
+struct kmem_cache *bch_search_cache;
+
+static void check_should_skip(struct cached_dev *, struct search *);
+
+/* Cgroup interface */
+
+#ifdef CONFIG_CGROUP_BCACHE
+static struct bch_cgroup bcache_default_cgroup = { .cache_mode = -1 };
+
+static struct bch_cgroup *cgroup_to_bcache(struct cgroup *cgroup)
+{
+	struct cgroup_subsys_state *css;
+	return cgroup &&
+		(css = cgroup_subsys_state(cgroup, bcache_subsys_id))
+		? container_of(css, struct bch_cgroup, css)
+		: &bcache_default_cgroup;
+}
+
+struct bch_cgroup *bch_bio_to_cgroup(struct bio *bio)
+{
+	struct cgroup_subsys_state *css = bio->bi_css
+		? cgroup_subsys_state(bio->bi_css->cgroup, bcache_subsys_id)
+		: task_subsys_state(current, bcache_subsys_id);
+
+	return css
+		? container_of(css, struct bch_cgroup, css)
+		: &bcache_default_cgroup;
+}
+
+static ssize_t cache_mode_read(struct cgroup *cgrp, struct cftype *cft,
+			struct file *file,
+			char __user *buf, size_t nbytes, loff_t *ppos)
+{
+	char tmp[1024];
+	int len = bch_snprint_string_list(tmp, PAGE_SIZE, bch_cache_modes,
+					  cgroup_to_bcache(cgrp)->cache_mode + 1);
+
+	if (len < 0)
+		return len;
+
+	return simple_read_from_buffer(buf, nbytes, ppos, tmp, len);
+}
+
+static int cache_mode_write(struct cgroup *cgrp, struct cftype *cft,
+			    const char *buf)
+{
+	int v = bch_read_string_list(buf, bch_cache_modes);
+	if (v < 0)
+		return v;
+
+	cgroup_to_bcache(cgrp)->cache_mode = v - 1;
+	return 0;
+}
+
+static u64 bch_verify_read(struct cgroup *cgrp, struct cftype *cft)
+{
+	return cgroup_to_bcache(cgrp)->verify;
+}
+
+static int bch_verify_write(struct cgroup *cgrp, struct cftype *cft, u64 val)
+{
+	cgroup_to_bcache(cgrp)->verify = val;
+	return 0;
+}
+
+static u64 bch_cache_hits_read(struct cgroup *cgrp, struct cftype *cft)
+{
+	struct bch_cgroup *bcachecg = cgroup_to_bcache(cgrp);
+	return atomic_read(&bcachecg->stats.cache_hits);
+}
+
+static u64 bch_cache_misses_read(struct cgroup *cgrp, struct cftype *cft)
+{
+	struct bch_cgroup *bcachecg = cgroup_to_bcache(cgrp);
+	return atomic_read(&bcachecg->stats.cache_misses);
+}
+
+static u64 bch_cache_bypass_hits_read(struct cgroup *cgrp,
+					 struct cftype *cft)
+{
+	struct bch_cgroup *bcachecg = cgroup_to_bcache(cgrp);
+	return atomic_read(&bcachecg->stats.cache_bypass_hits);
+}
+
+static u64 bch_cache_bypass_misses_read(struct cgroup *cgrp,
+					   struct cftype *cft)
+{
+	struct bch_cgroup *bcachecg = cgroup_to_bcache(cgrp);
+	return atomic_read(&bcachecg->stats.cache_bypass_misses);
+}
+
+static struct cftype bch_files[] = {
+	{
+		.name		= "cache_mode",
+		.read		= cache_mode_read,
+		.write_string	= cache_mode_write,
+	},
+	{
+		.name		= "verify",
+		.read_u64	= bch_verify_read,
+		.write_u64	= bch_verify_write,
+	},
+	{
+		.name		= "cache_hits",
+		.read_u64	= bch_cache_hits_read,
+	},
+	{
+		.name		= "cache_misses",
+		.read_u64	= bch_cache_misses_read,
+	},
+	{
+		.name		= "cache_bypass_hits",
+		.read_u64	= bch_cache_bypass_hits_read,
+	},
+	{
+		.name		= "cache_bypass_misses",
+		.read_u64	= bch_cache_bypass_misses_read,
+	},
+	{ }	/* terminate */
+};
+
+static void init_bch_cgroup(struct bch_cgroup *cg)
+{
+	cg->cache_mode = -1;
+}
+
+static struct cgroup_subsys_state *bcachecg_create(struct cgroup *cgroup)
+{
+	struct bch_cgroup *cg;
+
+	cg = kzalloc(sizeof(*cg), GFP_KERNEL);
+	if (!cg)
+		return ERR_PTR(-ENOMEM);
+	init_bch_cgroup(cg);
+	return &cg->css;
+}
+
+static void bcachecg_destroy(struct cgroup *cgroup)
+{
+	struct bch_cgroup *cg = cgroup_to_bcache(cgroup);
+	free_css_id(&bcache_subsys, &cg->css);
+	kfree(cg);
+}
+
+struct cgroup_subsys bcache_subsys = {
+	.create		= bcachecg_create,
+	.destroy	= bcachecg_destroy,
+	.subsys_id	= bcache_subsys_id,
+	.name		= "bcache",
+	.module		= THIS_MODULE,
+};
+EXPORT_SYMBOL_GPL(bcache_subsys);
+#endif
+
+static unsigned cache_mode(struct cached_dev *dc, struct bio *bio)
+{
+#ifdef CONFIG_CGROUP_BCACHE
+	int r = bch_bio_to_cgroup(bio)->cache_mode;
+	if (r >= 0)
+		return r;
+#endif
+	return BDEV_CACHE_MODE(&dc->sb);
+}
+
+static bool verify(struct cached_dev *dc, struct bio *bio)
+{
+#ifdef CONFIG_CGROUP_BCACHE
+	if (bch_bio_to_cgroup(bio)->verify)
+		return true;
+#endif
+	return dc->verify;
+}
+
+static void bio_csum(struct bio *bio, struct bkey *k)
+{
+	struct bio_vec *bv;
+	uint64_t csum = 0;
+	int i;
+
+	bio_for_each_segment(bv, bio, i) {
+		void *d = kmap(bv->bv_page) + bv->bv_offset;
+		csum = bch_crc64_update(csum, d, bv->bv_len);
+		kunmap(bv->bv_page);
+	}
+
+	k->ptr[KEY_PTRS(k)] = csum & (~0ULL >> 1);
+}
+
+/* Insert data into cache */
+
+static void bio_invalidate(struct closure *cl)
+{
+	struct btree_op *op = container_of(cl, struct btree_op, cl);
+	struct bio *bio = op->cache_bio;
+
+	pr_debug("invalidating %i sectors from %llu",
+		 bio_sectors(bio), (uint64_t) bio->bi_sector);
+
+	while (bio_sectors(bio)) {
+		unsigned len = min(bio_sectors(bio), 1U << 14);
+
+		if (bch_keylist_realloc(&op->keys, 0, op->c))
+			goto out;
+
+		bio->bi_sector	+= len;
+		bio->bi_size	-= len << 9;
+
+		bch_keylist_add(&op->keys,
+				&KEY(op->inode, bio->bi_sector, len));
+	}
+
+	op->insert_data_done = true;
+	bio_put(bio);
+out:
+	continue_at(cl, bch_journal, bcache_wq);
+}
+
+struct open_bucket {
+	struct list_head	list;
+	struct task_struct	*last;
+	unsigned		sectors_free;
+	BKEY_PADDED(key);
+};
+
+void bch_open_buckets_free(struct cache_set *c)
+{
+	struct open_bucket *b;
+
+	while (!list_empty(&c->data_buckets)) {
+		b = list_first_entry(&c->data_buckets,
+				     struct open_bucket, list);
+		list_del(&b->list);
+		kfree(b);
+	}
+}
+
+int bch_open_buckets_alloc(struct cache_set *c)
+{
+	int i;
+
+	spin_lock_init(&c->data_bucket_lock);
+
+	for (i = 0; i < 6; i++) {
+		struct open_bucket *b = kzalloc(sizeof(*b), GFP_KERNEL);
+		if (!b)
+			return -ENOMEM;
+
+		list_add(&b->list, &c->data_buckets);
+	}
+
+	return 0;
+}
+
+/*
+ * We keep multiple buckets open for writes, and try to segregate different
+ * write streams for better cache utilization: first we look for a bucket where
+ * the last write to it was sequential with the current write, and failing that
+ * we look for a bucket that was last used by the same task.
+ *
+ * The ideas is if you've got multiple tasks pulling data into the cache at the
+ * same time, you'll get better cache utilization if you try to segregate their
+ * data and preserve locality.
+ *
+ * For example, say you've starting Firefox at the same time you're copying a
+ * bunch of files. Firefox will likely end up being fairly hot and stay in the
+ * cache awhile, but the data you copied might not be; if you wrote all that
+ * data to the same buckets it'd get invalidated at the same time.
+ *
+ * Both of those tasks will be doing fairly random IO so we can't rely on
+ * detecting sequential IO to segregate their data, but going off of the task
+ * should be a sane heuristic.
+ */
+static struct open_bucket *pick_data_bucket(struct cache_set *c,
+					    const struct bkey *search,
+					    struct task_struct *task,
+					    struct bkey *alloc)
+{
+	struct open_bucket *ret, *ret_task = NULL;
+
+	list_for_each_entry_reverse(ret, &c->data_buckets, list)
+		if (!bkey_cmp(&ret->key, search))
+			goto found;
+		else if (ret->last == task)
+			ret_task = ret;
+
+	ret = ret_task ?: list_first_entry(&c->data_buckets,
+					   struct open_bucket, list);
+found:
+	if (!ret->sectors_free && KEY_PTRS(alloc)) {
+		ret->sectors_free = c->sb.bucket_size;
+		bkey_copy(&ret->key, alloc);
+		bkey_init(alloc);
+	}
+
+	if (!ret->sectors_free)
+		ret = NULL;
+
+	return ret;
+}
+
+/*
+ * Allocates some space in the cache to write to, and k to point to the newly
+ * allocated space, and updates KEY_SIZE(k) and KEY_OFFSET(k) (to point to the
+ * end of the newly allocated space).
+ *
+ * May allocate fewer sectors than @sectors, KEY_SIZE(k) indicates how many
+ * sectors were actually allocated.
+ *
+ * If s->writeback is true, will not fail.
+ */
+static bool bch_alloc_sectors(struct bkey *k, unsigned sectors,
+			      struct search *s)
+{
+	struct cache_set *c = s->op.c;
+	struct open_bucket *b;
+	BKEY_PADDED(key) alloc;
+	struct closure cl, *w = NULL;
+	unsigned i;
+
+	if (s->writeback) {
+		closure_init_stack(&cl);
+		w = &cl;
+	}
+
+	/*
+	 * We might have to allocate a new bucket, which we can't do with a
+	 * spinlock held. So if we have to allocate, we drop the lock, allocate
+	 * and then retry. KEY_PTRS() indicates whether alloc points to
+	 * allocated bucket(s).
+	 */
+
+	bkey_init(&alloc.key);
+	spin_lock(&c->data_bucket_lock);
+
+	while (!(b = pick_data_bucket(c, k, s->task, &alloc.key))) {
+		unsigned watermark = s->op.write_prio
+			? WATERMARK_MOVINGGC
+			: WATERMARK_NONE;
+
+		spin_unlock(&c->data_bucket_lock);
+
+		if (bch_bucket_alloc_set(c, watermark, &alloc.key, 1, w))
+			return false;
+
+		spin_lock(&c->data_bucket_lock);
+	}
+
+	/*
+	 * If we had to allocate, we might race and not need to allocate the
+	 * second time we call find_data_bucket(). If we allocated a bucket but
+	 * didn't use it, drop the refcount bch_bucket_alloc_set() took:
+	 */
+	if (KEY_PTRS(&alloc.key))
+		__bkey_put(c, &alloc.key);
+
+	for (i = 0; i < KEY_PTRS(&b->key); i++)
+		EBUG_ON(ptr_stale(c, &b->key, i));
+
+	/* Set up the pointer to the space we're allocating: */
+
+	for (i = 0; i < KEY_PTRS(&b->key); i++)
+		k->ptr[i] = b->key.ptr[i];
+
+	sectors = min(sectors, b->sectors_free);
+
+	SET_KEY_OFFSET(k, KEY_OFFSET(k) + sectors);
+	SET_KEY_SIZE(k, sectors);
+	SET_KEY_PTRS(k, KEY_PTRS(&b->key));
+
+	/*
+	 * Move b to the end of the lru, and keep track of what this bucket was
+	 * last used for:
+	 */
+	list_move_tail(&b->list, &c->data_buckets);
+	bkey_copy_key(&b->key, k);
+	b->last = s->task;
+
+	b->sectors_free	-= sectors;
+
+	for (i = 0; i < KEY_PTRS(&b->key); i++) {
+		SET_PTR_OFFSET(&b->key, i, PTR_OFFSET(&b->key, i) + sectors);
+
+		atomic_long_add(sectors,
+				&PTR_CACHE(c, &b->key, i)->sectors_written);
+	}
+
+	if (b->sectors_free < c->sb.block_size)
+		b->sectors_free = 0;
+
+	/*
+	 * k takes refcounts on the buckets it points to until it's inserted
+	 * into the btree, but if we're done with this bucket we just transfer
+	 * get_data_bucket()'s refcount.
+	 */
+	if (b->sectors_free)
+		for (i = 0; i < KEY_PTRS(&b->key); i++)
+			atomic_inc(&PTR_BUCKET(c, &b->key, i)->pin);
+
+	spin_unlock(&c->data_bucket_lock);
+	return true;
+}
+
+static void bch_insert_data_error(struct closure *cl)
+{
+	struct btree_op *op = container_of(cl, struct btree_op, cl);
+
+	/*
+	 * Our data write just errored, which means we've got a bunch of keys to
+	 * insert that point to data that wasn't succesfully written.
+	 *
+	 * We don't have to insert those keys but we still have to invalidate
+	 * that region of the cache - so, if we just strip off all the pointers
+	 * from the keys we'll accomplish just that.
+	 */
+
+	struct bkey *src = op->keys.bottom, *dst = op->keys.bottom;
+
+	while (src != op->keys.top) {
+		struct bkey *n = bkey_next(src);
+
+		SET_KEY_PTRS(src, 0);
+		bkey_copy(dst, src);
+
+		dst = bkey_next(dst);
+		src = n;
+	}
+
+	op->keys.top = dst;
+
+	bch_journal(cl);
+}
+
+static void bch_insert_data_endio(struct bio *bio, int error)
+{
+	struct closure *cl = bio->bi_private;
+	struct btree_op *op = container_of(cl, struct btree_op, cl);
+	struct search *s = container_of(op, struct search, op);
+
+	if (error) {
+		/* TODO: We could try to recover from this. */
+		if (s->writeback)
+			s->error = error;
+		else if (s->write)
+			set_closure_fn(cl, bch_insert_data_error, bcache_wq);
+		else
+			set_closure_fn(cl, NULL, NULL);
+	}
+
+	bch_bbio_endio(op->c, bio, error, "writing data to cache");
+}
+
+static void bch_insert_data_loop(struct closure *cl)
+{
+	struct btree_op *op = container_of(cl, struct btree_op, cl);
+	struct search *s = container_of(op, struct search, op);
+	struct bio *bio = op->cache_bio, *n;
+
+	if (op->skip)
+		return bio_invalidate(cl);
+
+	if (atomic_sub_return(bio_sectors(bio), &op->c->sectors_to_gc) < 0) {
+		set_gc_sectors(op->c);
+		bch_queue_gc(op->c);
+	}
+
+	do {
+		unsigned i;
+		struct bkey *k;
+		struct bio_set *split = s->d
+			? s->d->bio_split : op->c->bio_split;
+
+		/* 1 for the device pointer and 1 for the chksum */
+		if (bch_keylist_realloc(&op->keys,
+					1 + (op->csum ? 1 : 0),
+					op->c))
+			continue_at(cl, bch_journal, bcache_wq);
+
+		k = op->keys.top;
+		bkey_init(k);
+		SET_KEY_INODE(k, op->inode);
+		SET_KEY_OFFSET(k, bio->bi_sector);
+
+		if (!bch_alloc_sectors(k, bio_sectors(bio), s))
+			goto err;
+
+		n = bch_bio_split(bio, KEY_SIZE(k), GFP_NOIO, split);
+		if (!n) {
+			__bkey_put(op->c, k);
+			continue_at(cl, bch_insert_data_loop, bcache_wq);
+		}
+
+		n->bi_end_io	= bch_insert_data_endio;
+		n->bi_private	= cl;
+
+		if (s->writeback) {
+			SET_KEY_DIRTY(k, true);
+
+			for (i = 0; i < KEY_PTRS(k); i++)
+				SET_GC_MARK(PTR_BUCKET(op->c, k, i),
+					    GC_MARK_DIRTY);
+		}
+
+		SET_KEY_CSUM(k, op->csum);
+		if (KEY_CSUM(k))
+			bio_csum(n, k);
+
+		pr_debug("%s", pkey(k));
+		bch_keylist_push(&op->keys);
+
+		trace_bcache_cache_insert(n, n->bi_sector, n->bi_bdev);
+		n->bi_rw |= REQ_WRITE;
+		bch_submit_bbio(n, op->c, k, 0);
+	} while (n != bio);
+
+	op->insert_data_done = true;
+	continue_at(cl, bch_journal, bcache_wq);
+err:
+	/* bch_alloc_sectors() blocks if s->writeback = true */
+	BUG_ON(s->writeback);
+
+	/*
+	 * But if it's not a writeback write we'd rather just bail out if
+	 * there aren't any buckets ready to write to - it might take awhile and
+	 * we might be starving btree writes for gc or something.
+	 */
+
+	if (s->write) {
+		/*
+		 * Writethrough write: We can't complete the write until we've
+		 * updated the index. But we don't want to delay the write while
+		 * we wait for buckets to be freed up, so just invalidate the
+		 * rest of the write.
+		 */
+		op->skip = true;
+		return bio_invalidate(cl);
+	} else {
+		/*
+		 * From a cache miss, we can just insert the keys for the data
+		 * we have written or bail out if we didn't do anything.
+		 */
+		op->insert_data_done = true;
+		bio_put(bio);
+
+		if (!bch_keylist_empty(&op->keys))
+			continue_at(cl, bch_journal, bcache_wq);
+		else
+			closure_return(cl);
+	}
+}
+
+/**
+ * bch_insert_data - stick some data in the cache
+ *
+ * This is the starting point for any data to end up in a cache device; it could
+ * be from a normal write, or a writeback write, or a write to a flash only
+ * volume - it's also used by the moving garbage collector to compact data in
+ * mostly empty buckets.
+ *
+ * It first writes the data to the cache, creating a list of keys to be inserted
+ * (if the data had to be fragmented there will be multiple keys); after the
+ * data is written it calls bch_journal, and after the keys have been added to
+ * the next journal write they're inserted into the btree.
+ *
+ * It inserts the data in op->cache_bio; bi_sector is used for the key offset,
+ * and op->inode is used for the key inode.
+ *
+ * If op->skip is true, instead of inserting the data it invalidates the region
+ * of the cache represented by op->cache_bio and op->inode.
+ */
+void bch_insert_data(struct closure *cl)
+{
+	struct btree_op *op = container_of(cl, struct btree_op, cl);
+
+	bch_keylist_init(&op->keys);
+	bio_get(op->cache_bio);
+	bch_insert_data_loop(cl);
+}
+
+void bch_btree_insert_async(struct closure *cl)
+{
+	struct btree_op *op = container_of(cl, struct btree_op, cl);
+	struct search *s = container_of(op, struct search, op);
+
+	if (bch_btree_insert(op, op->c)) {
+		s->error		= -ENOMEM;
+		op->insert_data_done	= true;
+	}
+
+	if (op->insert_data_done) {
+		bch_keylist_free(&op->keys);
+		closure_return(cl);
+	} else
+		continue_at(cl, bch_insert_data_loop, bcache_wq);
+}
+
+/* Common code for the make_request functions */
+
+static void request_endio(struct bio *bio, int error)
+{
+	struct closure *cl = bio->bi_private;
+
+	if (error) {
+		struct search *s = container_of(cl, struct search, cl);
+		s->error = error;
+		/* Only cache read errors are recoverable */
+		s->recoverable = false;
+	}
+
+	bio_put(bio);
+	closure_put(cl);
+}
+
+void bch_cache_read_endio(struct bio *bio, int error)
+{
+	struct bbio *b = container_of(bio, struct bbio, bio);
+	struct closure *cl = bio->bi_private;
+	struct search *s = container_of(cl, struct search, cl);
+
+	/*
+	 * If the bucket was reused while our bio was in flight, we might have
+	 * read the wrong data. Set s->error but not error so it doesn't get
+	 * counted against the cache device, but we'll still reread the data
+	 * from the backing device.
+	 */
+
+	if (error)
+		s->error = error;
+	else if (ptr_stale(s->op.c, &b->key, 0)) {
+		atomic_long_inc(&s->op.c->cache_read_races);
+		s->error = -EINTR;
+	}
+
+	bch_bbio_endio(s->op.c, bio, error, "reading from cache");
+}
+
+static void bio_complete(struct search *s)
+{
+	if (s->orig_bio) {
+		int cpu, rw = bio_data_dir(s->orig_bio);
+		unsigned long duration = jiffies - s->start_time;
+
+		cpu = part_stat_lock();
+		part_round_stats(cpu, &s->d->disk->part0);
+		part_stat_add(cpu, &s->d->disk->part0, ticks[rw], duration);
+		part_stat_unlock();
+
+		trace_bcache_request_end(s, s->orig_bio);
+		bio_endio(s->orig_bio, s->error);
+		s->orig_bio = NULL;
+	}
+}
+
+static void do_bio_hook(struct search *s)
+{
+	struct bio *bio = &s->bio.bio;
+	memcpy(bio, s->orig_bio, sizeof(struct bio));
+
+	bio->bi_end_io		= request_endio;
+	bio->bi_private		= &s->cl;
+	atomic_set(&bio->bi_cnt, 3);
+}
+
+static void search_free(struct closure *cl)
+{
+	struct search *s = container_of(cl, struct search, cl);
+	bio_complete(s);
+
+	if (s->op.cache_bio)
+		bio_put(s->op.cache_bio);
+
+	if (s->unaligned_bvec)
+		mempool_free(s->bio.bio.bi_io_vec, s->d->unaligned_bvec);
+
+	closure_debug_destroy(cl);
+	mempool_free(s, s->d->c->search);
+}
+
+static struct search *search_alloc(struct bio *bio, struct bcache_device *d)
+{
+	struct bio_vec *bv;
+	struct search *s = mempool_alloc(d->c->search, GFP_NOIO);
+	memset(s, 0, offsetof(struct search, op.keys));
+
+	__closure_init(&s->cl, NULL);
+
+	s->op.inode		= d->id;
+	s->op.c			= d->c;
+	s->d			= d;
+	s->op.lock		= -1;
+	s->task			= current;
+	s->orig_bio		= bio;
+	s->write		= (bio->bi_rw & REQ_WRITE) != 0;
+	s->op.flush_journal	= (bio->bi_rw & REQ_FLUSH) != 0;
+	s->op.skip		= (bio->bi_rw & REQ_DISCARD) != 0;
+	s->recoverable		= 1;
+	s->start_time		= jiffies;
+	do_bio_hook(s);
+
+	if (bio->bi_size != bio_segments(bio) * PAGE_SIZE) {
+		bv = mempool_alloc(d->unaligned_bvec, GFP_NOIO);
+		memcpy(bv, bio_iovec(bio),
+		       sizeof(struct bio_vec) * bio_segments(bio));
+
+		s->bio.bio.bi_io_vec	= bv;
+		s->unaligned_bvec	= 1;
+	}
+
+	return s;
+}
+
+static void btree_read_async(struct closure *cl)
+{
+	struct btree_op *op = container_of(cl, struct btree_op, cl);
+
+	int ret = btree_root(search_recurse, op->c, op);
+
+	if (ret == -EAGAIN)
+		continue_at(cl, btree_read_async, bcache_wq);
+
+	closure_return(cl);
+}
+
+/* Cached devices */
+
+static void cached_dev_bio_complete(struct closure *cl)
+{
+	struct search *s = container_of(cl, struct search, cl);
+	struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
+
+	search_free(cl);
+	cached_dev_put(dc);
+}
+
+/* Process reads */
+
+static void cached_dev_read_complete(struct closure *cl)
+{
+	struct search *s = container_of(cl, struct search, cl);
+
+	if (s->op.insert_collision)
+		bch_mark_cache_miss_collision(s);
+
+	if (s->op.cache_bio) {
+		int i;
+		struct bio_vec *bv;
+
+		__bio_for_each_segment(bv, s->op.cache_bio, i, 0)
+			__free_page(bv->bv_page);
+	}
+
+	cached_dev_bio_complete(cl);
+}
+
+static void request_read_error(struct closure *cl)
+{
+	struct search *s = container_of(cl, struct search, cl);
+	struct bio_vec *bv;
+	int i;
+
+	if (s->recoverable) {
+		/* The cache read failed, but we can retry from the backing
+		 * device.
+		 */
+		pr_debug("recovering at sector %llu",
+			 (uint64_t) s->orig_bio->bi_sector);
+
+		s->error = 0;
+		bv = s->bio.bio.bi_io_vec;
+		do_bio_hook(s);
+		s->bio.bio.bi_io_vec = bv;
+
+		if (!s->unaligned_bvec)
+			bio_for_each_segment(bv, s->orig_bio, i)
+				bv->bv_offset = 0, bv->bv_len = PAGE_SIZE;
+		else
+			memcpy(s->bio.bio.bi_io_vec,
+			       bio_iovec(s->orig_bio),
+			       sizeof(struct bio_vec) *
+			       bio_segments(s->orig_bio));
+
+		/* XXX: invalidate cache */
+
+		trace_bcache_read_retry(&s->bio.bio);
+		closure_bio_submit(&s->bio.bio, &s->cl, s->d);
+	}
+
+	continue_at(cl, cached_dev_read_complete, NULL);
+}
+
+static void request_read_done(struct closure *cl)
+{
+	struct search *s = container_of(cl, struct search, cl);
+	struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
+
+	/*
+	 * s->cache_bio != NULL implies that we had a cache miss; cache_bio now
+	 * contains data ready to be inserted into the cache.
+	 *
+	 * First, we copy the data we just read from cache_bio's bounce buffers
+	 * to the buffers the original bio pointed to:
+	 */
+
+	if (s->op.cache_bio) {
+		struct bio_vec *src, *dst;
+		unsigned src_offset, dst_offset, bytes;
+		void *dst_ptr;
+
+		bio_reset(s->op.cache_bio);
+		s->op.cache_bio->bi_sector	= s->cache_miss->bi_sector;
+		s->op.cache_bio->bi_bdev	= s->cache_miss->bi_bdev;
+		s->op.cache_bio->bi_size	= s->cache_bio_sectors << 9;
+		bch_bio_map(s->op.cache_bio, NULL);
+
+		src = bio_iovec(s->op.cache_bio);
+		dst = bio_iovec(s->cache_miss);
+		src_offset = src->bv_offset;
+		dst_offset = dst->bv_offset;
+		dst_ptr = kmap(dst->bv_page);
+
+		while (1) {
+			if (dst_offset == dst->bv_offset + dst->bv_len) {
+				kunmap(dst->bv_page);
+				dst++;
+				if (dst == bio_iovec_idx(s->cache_miss,
+						s->cache_miss->bi_vcnt))
+					break;
+
+				dst_offset = dst->bv_offset;
+				dst_ptr = kmap(dst->bv_page);
+			}
+
+			if (src_offset == src->bv_offset + src->bv_len) {
+				src++;
+				if (src == bio_iovec_idx(s->op.cache_bio,
+						 s->op.cache_bio->bi_vcnt))
+					BUG();
+
+				src_offset = src->bv_offset;
+			}
+
+			bytes = min(dst->bv_offset + dst->bv_len - dst_offset,
+				    src->bv_offset + src->bv_len - src_offset);
+
+			memcpy(dst_ptr + dst_offset,
+			       page_address(src->bv_page) + src_offset,
+			       bytes);
+
+			src_offset	+= bytes;
+			dst_offset	+= bytes;
+		}
+
+		bio_put(s->cache_miss);
+		s->cache_miss = NULL;
+	}
+
+	if (verify(dc, &s->bio.bio) && s->recoverable)
+		bch_data_verify(s);
+
+	bio_complete(s);
+
+	if (s->op.cache_bio &&
+	    !test_bit(CACHE_SET_STOPPING, &s->op.c->flags)) {
+		s->op.type = BTREE_REPLACE;
+		closure_call(&s->op.cl, bch_insert_data, NULL, cl);
+	}
+
+	continue_at(cl, cached_dev_read_complete, NULL);
+}
+
+static void request_read_done_bh(struct closure *cl)
+{
+	struct search *s = container_of(cl, struct search, cl);
+	struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
+
+	bch_mark_cache_accounting(s, !s->cache_miss, s->op.skip);
+
+	if (s->error)
+		continue_at_nobarrier(cl, request_read_error, bcache_wq);
+	else if (s->op.cache_bio || verify(dc, &s->bio.bio))
+		continue_at_nobarrier(cl, request_read_done, bcache_wq);
+	else
+		continue_at_nobarrier(cl, cached_dev_read_complete, NULL);
+}
+
+static int cached_dev_cache_miss(struct btree *b, struct search *s,
+				 struct bio *bio, unsigned sectors)
+{
+	int ret = 0;
+	unsigned reada;
+	struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
+	struct bio *miss;
+
+	miss = bch_bio_split(bio, sectors, GFP_NOIO, s->d->bio_split);
+	if (!miss)
+		return -EAGAIN;
+
+	if (miss == bio)
+		s->op.lookup_done = true;
+
+	miss->bi_end_io		= request_endio;
+	miss->bi_private	= &s->cl;
+
+	if (s->cache_miss || s->op.skip)
+		goto out_submit;
+
+	if (miss != bio ||
+	    (bio->bi_rw & REQ_RAHEAD) ||
+	    (bio->bi_rw & REQ_META) ||
+	    s->op.c->gc_stats.in_use >= CUTOFF_CACHE_READA)
+		reada = 0;
+	else {
+		reada = min(dc->readahead >> 9,
+			    sectors - bio_sectors(miss));
+
+		if (bio_end(miss) + reada > bdev_sectors(miss->bi_bdev))
+			reada = bdev_sectors(miss->bi_bdev) - bio_end(miss);
+	}
+
+	s->cache_bio_sectors = bio_sectors(miss) + reada;
+	s->op.cache_bio = bio_alloc_bioset(GFP_NOWAIT,
+			DIV_ROUND_UP(s->cache_bio_sectors, PAGE_SECTORS),
+			dc->disk.bio_split);
+
+	if (!s->op.cache_bio)
+		goto out_submit;
+
+	s->op.cache_bio->bi_sector	= miss->bi_sector;
+	s->op.cache_bio->bi_bdev	= miss->bi_bdev;
+	s->op.cache_bio->bi_size	= s->cache_bio_sectors << 9;
+
+	s->op.cache_bio->bi_end_io	= request_endio;
+	s->op.cache_bio->bi_private	= &s->cl;
+
+	/* btree_search_recurse()'s btree iterator is no good anymore */
+	ret = -EINTR;
+	if (!bch_btree_insert_check_key(b, &s->op, s->op.cache_bio))
+		goto out_put;
+
+	bch_bio_map(s->op.cache_bio, NULL);
+	if (bch_bio_alloc_pages(s->op.cache_bio, __GFP_NOWARN|GFP_NOIO))
+		goto out_put;
+
+	s->cache_miss = miss;
+	bio_get(s->op.cache_bio);
+
+	trace_bcache_cache_miss(s->orig_bio);
+	closure_bio_submit(s->op.cache_bio, &s->cl, s->d);
+
+	return ret;
+out_put:
+	bio_put(s->op.cache_bio);
+	s->op.cache_bio = NULL;
+out_submit:
+	closure_bio_submit(miss, &s->cl, s->d);
+	return ret;
+}
+
+static void request_read(struct cached_dev *dc, struct search *s)
+{
+	struct closure *cl = &s->cl;
+
+	check_should_skip(dc, s);
+	closure_call(&s->op.cl, btree_read_async, NULL, cl);
+
+	continue_at(cl, request_read_done_bh, NULL);
+}
+
+/* Process writes */
+
+static void cached_dev_write_complete(struct closure *cl)
+{
+	struct search *s = container_of(cl, struct search, cl);
+	struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
+
+	up_read_non_owner(&dc->writeback_lock);
+	cached_dev_bio_complete(cl);
+}
+
+static bool should_writeback(struct cached_dev *dc, struct bio *bio)
+{
+	unsigned threshold = (bio->bi_rw & REQ_SYNC)
+		? CUTOFF_WRITEBACK_SYNC
+		: CUTOFF_WRITEBACK;
+
+	return !atomic_read(&dc->disk.detaching) &&
+		cache_mode(dc, bio) == CACHE_MODE_WRITEBACK &&
+		dc->disk.c->gc_stats.in_use < threshold;
+}
+
+static void request_write(struct cached_dev *dc, struct search *s)
+{
+	struct closure *cl = &s->cl;
+	struct bio *bio = &s->bio.bio;
+	struct bkey start, end;
+	start = KEY(dc->disk.id, bio->bi_sector, 0);
+	end = KEY(dc->disk.id, bio_end(bio), 0);
+
+	bch_keybuf_check_overlapping(&s->op.c->moving_gc_keys, &start, &end);
+
+	check_should_skip(dc, s);
+	down_read_non_owner(&dc->writeback_lock);
+
+	if (bch_keybuf_check_overlapping(&dc->writeback_keys, &start, &end)) {
+		s->op.skip	= false;
+		s->writeback	= true;
+	}
+
+	if (bio->bi_rw & REQ_DISCARD)
+		goto skip;
+
+	if (s->op.skip)
+		goto skip;
+
+	if (should_writeback(dc, s->orig_bio))
+		s->writeback = true;
+
+	if (!s->writeback) {
+		s->op.cache_bio = bio_clone_bioset(bio, GFP_NOIO,
+						   dc->disk.bio_split);
+
+		trace_bcache_writethrough(s->orig_bio);
+		closure_bio_submit(bio, cl, s->d);
+	} else {
+		s->op.cache_bio = bio;
+		trace_bcache_writeback(s->orig_bio);
+		bch_writeback_add(dc, bio_sectors(bio));
+	}
+out:
+	closure_call(&s->op.cl, bch_insert_data, NULL, cl);
+	continue_at(cl, cached_dev_write_complete, NULL);
+skip:
+	s->op.skip = true;
+	s->op.cache_bio = s->orig_bio;
+	bio_get(s->op.cache_bio);
+	trace_bcache_write_skip(s->orig_bio);
+
+	if ((bio->bi_rw & REQ_DISCARD) &&
+	    !blk_queue_discard(bdev_get_queue(dc->bdev)))
+		goto out;
+
+	closure_bio_submit(bio, cl, s->d);
+	goto out;
+}
+
+static void request_nodata(struct cached_dev *dc, struct search *s)
+{
+	struct closure *cl = &s->cl;
+	struct bio *bio = &s->bio.bio;
+
+	if (bio->bi_rw & REQ_DISCARD) {
+		request_write(dc, s);
+		return;
+	}
+
+	if (s->op.flush_journal)
+		bch_journal_meta(s->op.c, cl);
+
+	closure_bio_submit(bio, cl, s->d);
+
+	continue_at(cl, cached_dev_bio_complete, NULL);
+}
+
+/* Cached devices - read & write stuff */
+
+int bch_get_congested(struct cache_set *c)
+{
+	int i;
+
+	if (!c->congested_read_threshold_us &&
+	    !c->congested_write_threshold_us)
+		return 0;
+
+	i = (local_clock_us() - c->congested_last_us) / 1024;
+	if (i < 0)
+		return 0;
+
+	i += atomic_read(&c->congested);
+	if (i >= 0)
+		return 0;
+
+	i += CONGESTED_MAX;
+
+	return i <= 0 ? 1 : fract_exp_two(i, 6);
+}
+
+static void add_sequential(struct task_struct *t)
+{
+	ewma_add(t->sequential_io_avg,
+		 t->sequential_io, 8, 0);
+
+	t->sequential_io = 0;
+}
+
+static struct hlist_head *iohash(struct cached_dev *dc, uint64_t k)
+{
+	return &dc->io_hash[hash_64(k, RECENT_IO_BITS)];
+}
+
+static void check_should_skip(struct cached_dev *dc, struct search *s)
+{
+	struct cache_set *c = s->op.c;
+	struct bio *bio = &s->bio.bio;
+
+	long rand;
+	int cutoff = bch_get_congested(c);
+	unsigned mode = cache_mode(dc, bio);
+
+	if (atomic_read(&dc->disk.detaching) ||
+	    c->gc_stats.in_use > CUTOFF_CACHE_ADD ||
+	    (bio->bi_rw & REQ_DISCARD))
+		goto skip;
+
+	if (mode == CACHE_MODE_NONE ||
+	    (mode == CACHE_MODE_WRITEAROUND &&
+	     (bio->bi_rw & REQ_WRITE)))
+		goto skip;
+
+	if (bio->bi_sector   & (c->sb.block_size - 1) ||
+	    bio_sectors(bio) & (c->sb.block_size - 1)) {
+		pr_debug("skipping unaligned io");
+		goto skip;
+	}
+
+	if (!cutoff) {
+		cutoff = dc->sequential_cutoff >> 9;
+
+		if (!cutoff)
+			goto rescale;
+
+		if (mode == CACHE_MODE_WRITEBACK &&
+		    (bio->bi_rw & REQ_WRITE) &&
+		    (bio->bi_rw & REQ_SYNC))
+			goto rescale;
+	}
+
+	if (dc->sequential_merge) {
+		struct io *i;
+
+		spin_lock(&dc->io_lock);
+
+		hlist_for_each_entry(i, iohash(dc, bio->bi_sector), hash)
+			if (i->last == bio->bi_sector &&
+			    time_before(jiffies, i->jiffies))
+				goto found;
+
+		i = list_first_entry(&dc->io_lru, struct io, lru);
+
+		add_sequential(s->task);
+		i->sequential = 0;
+found:
+		if (i->sequential + bio->bi_size > i->sequential)
+			i->sequential	+= bio->bi_size;
+
+		i->last			 = bio_end(bio);
+		i->jiffies		 = jiffies + msecs_to_jiffies(5000);
+		s->task->sequential_io	 = i->sequential;
+
+		hlist_del(&i->hash);
+		hlist_add_head(&i->hash, iohash(dc, i->last));
+		list_move_tail(&i->lru, &dc->io_lru);
+
+		spin_unlock(&dc->io_lock);
+	} else {
+		s->task->sequential_io = bio->bi_size;
+
+		add_sequential(s->task);
+	}
+
+	rand = get_random_int();
+	cutoff -= bitmap_weight(&rand, BITS_PER_LONG);
+
+	if (cutoff <= (int) (max(s->task->sequential_io,
+				 s->task->sequential_io_avg) >> 9))
+		goto skip;
+
+rescale:
+	bch_rescale_priorities(c, bio_sectors(bio));
+	return;
+skip:
+	bch_mark_sectors_bypassed(s, bio_sectors(bio));
+	s->op.skip = true;
+}
+
+static void cached_dev_make_request(struct request_queue *q, struct bio *bio)
+{
+	struct search *s;
+	struct bcache_device *d = bio->bi_bdev->bd_disk->private_data;
+	struct cached_dev *dc = container_of(d, struct cached_dev, disk);
+	int cpu, rw = bio_data_dir(bio);
+
+	cpu = part_stat_lock();
+	part_stat_inc(cpu, &d->disk->part0, ios[rw]);
+	part_stat_add(cpu, &d->disk->part0, sectors[rw], bio_sectors(bio));
+	part_stat_unlock();
+
+	bio->bi_bdev = dc->bdev;
+	bio->bi_sector += dc->sb.data_offset;
+
+	if (cached_dev_get(dc)) {
+		s = search_alloc(bio, d);
+		trace_bcache_request_start(s, bio);
+
+		if (!bio_has_data(bio))
+			request_nodata(dc, s);
+		else if (rw)
+			request_write(dc, s);
+		else
+			request_read(dc, s);
+	} else {
+		if ((bio->bi_rw & REQ_DISCARD) &&
+		    !blk_queue_discard(bdev_get_queue(dc->bdev)))
+			bio_endio(bio, 0);
+		else
+			bch_generic_make_request(bio, &d->bio_split_hook);
+	}
+}
+
+static int cached_dev_ioctl(struct bcache_device *d, fmode_t mode,
+			    unsigned int cmd, unsigned long arg)
+{
+	struct cached_dev *dc = container_of(d, struct cached_dev, disk);
+	return __blkdev_driver_ioctl(dc->bdev, mode, cmd, arg);
+}
+
+static int cached_dev_congested(void *data, int bits)
+{
+	struct bcache_device *d = data;
+	struct cached_dev *dc = container_of(d, struct cached_dev, disk);
+	struct request_queue *q = bdev_get_queue(dc->bdev);
+	int ret = 0;
+
+	if (bdi_congested(&q->backing_dev_info, bits))
+		return 1;
+
+	if (cached_dev_get(dc)) {
+		unsigned i;
+		struct cache *ca;
+
+		for_each_cache(ca, d->c, i) {
+			q = bdev_get_queue(ca->bdev);
+			ret |= bdi_congested(&q->backing_dev_info, bits);
+		}
+
+		cached_dev_put(dc);
+	}
+
+	return ret;
+}
+
+void bch_cached_dev_request_init(struct cached_dev *dc)
+{
+	struct gendisk *g = dc->disk.disk;
+
+	g->queue->make_request_fn		= cached_dev_make_request;
+	g->queue->backing_dev_info.congested_fn = cached_dev_congested;
+	dc->disk.cache_miss			= cached_dev_cache_miss;
+	dc->disk.ioctl				= cached_dev_ioctl;
+}
+
+/* Flash backed devices */
+
+static int flash_dev_cache_miss(struct btree *b, struct search *s,
+				struct bio *bio, unsigned sectors)
+{
+	/* Zero fill bio */
+
+	while (bio->bi_idx != bio->bi_vcnt) {
+		struct bio_vec *bv = bio_iovec(bio);
+		unsigned j = min(bv->bv_len >> 9, sectors);
+
+		void *p = kmap(bv->bv_page);
+		memset(p + bv->bv_offset, 0, j << 9);
+		kunmap(bv->bv_page);
+
+		bv->bv_len	-= j << 9;
+		bv->bv_offset	+= j << 9;
+
+		if (bv->bv_len)
+			return 0;
+
+		bio->bi_sector	+= j;
+		bio->bi_size	-= j << 9;
+
+		bio->bi_idx++;
+		sectors		-= j;
+	}
+
+	s->op.lookup_done = true;
+
+	return 0;
+}
+
+static void flash_dev_make_request(struct request_queue *q, struct bio *bio)
+{
+	struct search *s;
+	struct closure *cl;
+	struct bcache_device *d = bio->bi_bdev->bd_disk->private_data;
+	int cpu, rw = bio_data_dir(bio);
+
+	cpu = part_stat_lock();
+	part_stat_inc(cpu, &d->disk->part0, ios[rw]);
+	part_stat_add(cpu, &d->disk->part0, sectors[rw], bio_sectors(bio));
+	part_stat_unlock();
+
+	s = search_alloc(bio, d);
+	cl = &s->cl;
+	bio = &s->bio.bio;
+
+	trace_bcache_request_start(s, bio);
+
+	if (bio_has_data(bio) && !rw) {
+		closure_call(&s->op.cl, btree_read_async, NULL, cl);
+	} else if (bio_has_data(bio) || s->op.skip) {
+		bch_keybuf_check_overlapping(&s->op.c->moving_gc_keys,
+					     &KEY(d->id, bio->bi_sector, 0),
+					     &KEY(d->id, bio_end(bio), 0));
+
+		s->writeback	= true;
+		s->op.cache_bio	= bio;
+
+		closure_call(&s->op.cl, bch_insert_data, NULL, cl);
+	} else {
+		/* No data - probably a cache flush */
+		if (s->op.flush_journal)
+			bch_journal_meta(s->op.c, cl);
+	}
+
+	continue_at(cl, search_free, NULL);
+}
+
+static int flash_dev_ioctl(struct bcache_device *d, fmode_t mode,
+			   unsigned int cmd, unsigned long arg)
+{
+	return -ENOTTY;
+}
+
+static int flash_dev_congested(void *data, int bits)
+{
+	struct bcache_device *d = data;
+	struct request_queue *q;
+	struct cache *ca;
+	unsigned i;
+	int ret = 0;
+
+	for_each_cache(ca, d->c, i) {
+		q = bdev_get_queue(ca->bdev);
+		ret |= bdi_congested(&q->backing_dev_info, bits);
+	}
+
+	return ret;
+}
+
+void bch_flash_dev_request_init(struct bcache_device *d)
+{
+	struct gendisk *g = d->disk;
+
+	g->queue->make_request_fn		= flash_dev_make_request;
+	g->queue->backing_dev_info.congested_fn = flash_dev_congested;
+	d->cache_miss				= flash_dev_cache_miss;
+	d->ioctl				= flash_dev_ioctl;
+}
+
+void bch_request_exit(void)
+{
+#ifdef CONFIG_CGROUP_BCACHE
+	cgroup_unload_subsys(&bcache_subsys);
+#endif
+	if (bch_search_cache)
+		kmem_cache_destroy(bch_search_cache);
+}
+
+int __init bch_request_init(void)
+{
+	bch_search_cache = KMEM_CACHE(search, 0);
+	if (!bch_search_cache)
+		return -ENOMEM;
+
+#ifdef CONFIG_CGROUP_BCACHE
+	cgroup_load_subsys(&bcache_subsys);
+	init_bch_cgroup(&bcache_default_cgroup);
+
+	cgroup_add_cftypes(&bcache_subsys, bch_files);
+#endif
+	return 0;
+}
diff --git a/drivers/md/bcache/request.h b/drivers/md/bcache/request.h
new file mode 100644
index 0000000..254d9ab
--- /dev/null
+++ b/drivers/md/bcache/request.h
@@ -0,0 +1,62 @@
+#ifndef _BCACHE_REQUEST_H_
+#define _BCACHE_REQUEST_H_
+
+#include <linux/cgroup.h>
+
+struct search {
+	/* Stack frame for bio_complete */
+	struct closure		cl;
+
+	struct bcache_device	*d;
+	struct task_struct	*task;
+
+	struct bbio		bio;
+	struct bio		*orig_bio;
+	struct bio		*cache_miss;
+	unsigned		cache_bio_sectors;
+
+	unsigned		recoverable:1;
+	unsigned		unaligned_bvec:1;
+
+	unsigned		write:1;
+	unsigned		writeback:1;
+
+	/* IO error returned to s->bio */
+	short			error;
+	unsigned long		start_time;
+
+	/* Anything past op->keys won't get zeroed in do_bio_hook */
+	struct btree_op		op;
+};
+
+void bch_cache_read_endio(struct bio *, int);
+int bch_get_congested(struct cache_set *);
+void bch_insert_data(struct closure *cl);
+void bch_btree_insert_async(struct closure *);
+void bch_cache_read_endio(struct bio *, int);
+
+void bch_open_buckets_free(struct cache_set *);
+int bch_open_buckets_alloc(struct cache_set *);
+
+void bch_cached_dev_request_init(struct cached_dev *dc);
+void bch_flash_dev_request_init(struct bcache_device *d);
+
+extern struct kmem_cache *bch_search_cache, *bch_passthrough_cache;
+
+struct bch_cgroup {
+#ifdef CONFIG_CGROUP_BCACHE
+	struct cgroup_subsys_state	css;
+#endif
+	/*
+	 * We subtract one from the index into bch_cache_modes[], so that
+	 * default == -1; this makes it so the rest match up with d->cache_mode,
+	 * and we use d->cache_mode if cgrp->cache_mode < 0
+	 */
+	short				cache_mode;
+	bool				verify;
+	struct cache_stat_collector	stats;
+};
+
+struct bch_cgroup *bch_bio_to_cgroup(struct bio *bio);
+
+#endif /* _BCACHE_REQUEST_H_ */
diff --git a/drivers/md/bcache/stats.c b/drivers/md/bcache/stats.c
new file mode 100644
index 0000000..64e6794
--- /dev/null
+++ b/drivers/md/bcache/stats.c
@@ -0,0 +1,246 @@
+/*
+ * bcache stats code
+ *
+ * Copyright 2012 Google, Inc.
+ */
+
+#include "bcache.h"
+#include "stats.h"
+#include "btree.h"
+#include "request.h"
+#include "sysfs.h"
+
+/*
+ * We keep absolute totals of various statistics, and addionally a set of three
+ * rolling averages.
+ *
+ * Every so often, a timer goes off and rescales the rolling averages.
+ * accounting_rescale[] is how many times the timer has to go off before we
+ * rescale each set of numbers; that gets us half lives of 5 minutes, one hour,
+ * and one day.
+ *
+ * accounting_delay is how often the timer goes off - 22 times in 5 minutes,
+ * and accounting_weight is what we use to rescale:
+ *
+ * pow(31 / 32, 22) ~= 1/2
+ *
+ * So that we don't have to increment each set of numbers every time we (say)
+ * get a cache hit, we increment a single atomic_t in acc->collector, and when
+ * the rescale function runs it resets the atomic counter to 0 and adds its
+ * old value to each of the exported numbers.
+ *
+ * To reduce rounding error, the numbers in struct cache_stats are all
+ * stored left shifted by 16, and scaled back in the sysfs show() function.
+ */
+
+static const unsigned DAY_RESCALE		= 288;
+static const unsigned HOUR_RESCALE		= 12;
+static const unsigned FIVE_MINUTE_RESCALE	= 1;
+static const unsigned accounting_delay		= (HZ * 300) / 22;
+static const unsigned accounting_weight		= 32;
+
+/* sysfs reading/writing */
+
+read_attribute(cache_hits);
+read_attribute(cache_misses);
+read_attribute(cache_bypass_hits);
+read_attribute(cache_bypass_misses);
+read_attribute(cache_hit_ratio);
+read_attribute(cache_readaheads);
+read_attribute(cache_miss_collisions);
+read_attribute(bypassed);
+
+SHOW(bch_stats)
+{
+	struct cache_stats *s =
+		container_of(kobj, struct cache_stats, kobj);
+#define var(stat)		(s->stat >> 16)
+	var_print(cache_hits);
+	var_print(cache_misses);
+	var_print(cache_bypass_hits);
+	var_print(cache_bypass_misses);
+
+	sysfs_print(cache_hit_ratio,
+		    DIV_SAFE(var(cache_hits) * 100,
+			     var(cache_hits) + var(cache_misses)));
+
+	var_print(cache_readaheads);
+	var_print(cache_miss_collisions);
+	sysfs_hprint(bypassed,	var(sectors_bypassed) << 9);
+#undef var
+	return 0;
+}
+
+STORE(bch_stats)
+{
+	return size;
+}
+
+static void bch_stats_release(struct kobject *k)
+{
+}
+
+static struct attribute *bch_stats_files[] = {
+	&sysfs_cache_hits,
+	&sysfs_cache_misses,
+	&sysfs_cache_bypass_hits,
+	&sysfs_cache_bypass_misses,
+	&sysfs_cache_hit_ratio,
+	&sysfs_cache_readaheads,
+	&sysfs_cache_miss_collisions,
+	&sysfs_bypassed,
+	NULL
+};
+static KTYPE(bch_stats);
+
+static void scale_accounting(unsigned long data);
+
+void bch_cache_accounting_init(struct cache_accounting *acc,
+			       struct closure *parent)
+{
+	kobject_init(&acc->total.kobj,		&bch_stats_ktype);
+	kobject_init(&acc->five_minute.kobj,	&bch_stats_ktype);
+	kobject_init(&acc->hour.kobj,		&bch_stats_ktype);
+	kobject_init(&acc->day.kobj,		&bch_stats_ktype);
+
+	closure_init(&acc->cl, parent);
+	init_timer(&acc->timer);
+	acc->timer.expires	= jiffies + accounting_delay;
+	acc->timer.data		= (unsigned long) acc;
+	acc->timer.function	= scale_accounting;
+	add_timer(&acc->timer);
+}
+
+int bch_cache_accounting_add_kobjs(struct cache_accounting *acc,
+				   struct kobject *parent)
+{
+	int ret = kobject_add(&acc->total.kobj, parent,
+			      "stats_total");
+	ret = ret ?: kobject_add(&acc->five_minute.kobj, parent,
+				 "stats_five_minute");
+	ret = ret ?: kobject_add(&acc->hour.kobj, parent,
+				 "stats_hour");
+	ret = ret ?: kobject_add(&acc->day.kobj, parent,
+				 "stats_day");
+	return ret;
+}
+
+void bch_cache_accounting_clear(struct cache_accounting *acc)
+{
+	memset(&acc->total.cache_hits,
+	       0,
+	       sizeof(unsigned long) * 7);
+}
+
+void bch_cache_accounting_destroy(struct cache_accounting *acc)
+{
+	kobject_put(&acc->total.kobj);
+	kobject_put(&acc->five_minute.kobj);
+	kobject_put(&acc->hour.kobj);
+	kobject_put(&acc->day.kobj);
+
+	atomic_set(&acc->closing, 1);
+	if (del_timer_sync(&acc->timer))
+		closure_return(&acc->cl);
+}
+
+/* EWMA scaling */
+
+static void scale_stat(unsigned long *stat)
+{
+	*stat =  ewma_add(*stat, 0, accounting_weight, 0);
+}
+
+static void scale_stats(struct cache_stats *stats, unsigned long rescale_at)
+{
+	if (++stats->rescale == rescale_at) {
+		stats->rescale = 0;
+		scale_stat(&stats->cache_hits);
+		scale_stat(&stats->cache_misses);
+		scale_stat(&stats->cache_bypass_hits);
+		scale_stat(&stats->cache_bypass_misses);
+		scale_stat(&stats->cache_readaheads);
+		scale_stat(&stats->cache_miss_collisions);
+		scale_stat(&stats->sectors_bypassed);
+	}
+}
+
+static void scale_accounting(unsigned long data)
+{
+	struct cache_accounting *acc = (struct cache_accounting *) data;
+
+#define move_stat(name) do {						\
+	unsigned t = atomic_xchg(&acc->collector.name, 0);		\
+	t <<= 16;							\
+	acc->five_minute.name += t;					\
+	acc->hour.name += t;						\
+	acc->day.name += t;						\
+	acc->total.name += t;						\
+} while (0)
+
+	move_stat(cache_hits);
+	move_stat(cache_misses);
+	move_stat(cache_bypass_hits);
+	move_stat(cache_bypass_misses);
+	move_stat(cache_readaheads);
+	move_stat(cache_miss_collisions);
+	move_stat(sectors_bypassed);
+
+	scale_stats(&acc->total, 0);
+	scale_stats(&acc->day, DAY_RESCALE);
+	scale_stats(&acc->hour, HOUR_RESCALE);
+	scale_stats(&acc->five_minute, FIVE_MINUTE_RESCALE);
+
+	acc->timer.expires += accounting_delay;
+
+	if (!atomic_read(&acc->closing))
+		add_timer(&acc->timer);
+	else
+		closure_return(&acc->cl);
+}
+
+static void mark_cache_stats(struct cache_stat_collector *stats,
+			     bool hit, bool bypass)
+{
+	if (!bypass)
+		if (hit)
+			atomic_inc(&stats->cache_hits);
+		else
+			atomic_inc(&stats->cache_misses);
+	else
+		if (hit)
+			atomic_inc(&stats->cache_bypass_hits);
+		else
+			atomic_inc(&stats->cache_bypass_misses);
+}
+
+void bch_mark_cache_accounting(struct search *s, bool hit, bool bypass)
+{
+	struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
+	mark_cache_stats(&dc->accounting.collector, hit, bypass);
+	mark_cache_stats(&s->op.c->accounting.collector, hit, bypass);
+#ifdef CONFIG_CGROUP_BCACHE
+	mark_cache_stats(&(bch_bio_to_cgroup(s->orig_bio)->stats), hit, bypass);
+#endif
+}
+
+void bch_mark_cache_readahead(struct search *s)
+{
+	struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
+	atomic_inc(&dc->accounting.collector.cache_readaheads);
+	atomic_inc(&s->op.c->accounting.collector.cache_readaheads);
+}
+
+void bch_mark_cache_miss_collision(struct search *s)
+{
+	struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
+	atomic_inc(&dc->accounting.collector.cache_miss_collisions);
+	atomic_inc(&s->op.c->accounting.collector.cache_miss_collisions);
+}
+
+void bch_mark_sectors_bypassed(struct search *s, int sectors)
+{
+	struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
+	atomic_add(sectors, &dc->accounting.collector.sectors_bypassed);
+	atomic_add(sectors, &s->op.c->accounting.collector.sectors_bypassed);
+}
diff --git a/drivers/md/bcache/stats.h b/drivers/md/bcache/stats.h
new file mode 100644
index 0000000..c7c7a8f
--- /dev/null
+++ b/drivers/md/bcache/stats.h
@@ -0,0 +1,58 @@
+#ifndef _BCACHE_STATS_H_
+#define _BCACHE_STATS_H_
+
+struct cache_stat_collector {
+	atomic_t cache_hits;
+	atomic_t cache_misses;
+	atomic_t cache_bypass_hits;
+	atomic_t cache_bypass_misses;
+	atomic_t cache_readaheads;
+	atomic_t cache_miss_collisions;
+	atomic_t sectors_bypassed;
+};
+
+struct cache_stats {
+	struct kobject		kobj;
+
+	unsigned long cache_hits;
+	unsigned long cache_misses;
+	unsigned long cache_bypass_hits;
+	unsigned long cache_bypass_misses;
+	unsigned long cache_readaheads;
+	unsigned long cache_miss_collisions;
+	unsigned long sectors_bypassed;
+
+	unsigned		rescale;
+};
+
+struct cache_accounting {
+	struct closure		cl;
+	struct timer_list	timer;
+	atomic_t		closing;
+
+	struct cache_stat_collector collector;
+
+	struct cache_stats total;
+	struct cache_stats five_minute;
+	struct cache_stats hour;
+	struct cache_stats day;
+};
+
+struct search;
+
+void bch_cache_accounting_init(struct cache_accounting *acc,
+			       struct closure *parent);
+
+int bch_cache_accounting_add_kobjs(struct cache_accounting *acc,
+				   struct kobject *parent);
+
+void bch_cache_accounting_clear(struct cache_accounting *acc);
+
+void bch_cache_accounting_destroy(struct cache_accounting *acc);
+
+void bch_mark_cache_accounting(struct search *s, bool hit, bool bypass);
+void bch_mark_cache_readahead(struct search *s);
+void bch_mark_cache_miss_collision(struct search *s);
+void bch_mark_sectors_bypassed(struct search *s, int sectors);
+
+#endif /* _BCACHE_STATS_H_ */
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
new file mode 100644
index 0000000..c8046bc
--- /dev/null
+++ b/drivers/md/bcache/super.c
@@ -0,0 +1,1987 @@
+/*
+ * bcache setup/teardown code, and some metadata io - read a superblock and
+ * figure out what to do with it.
+ *
+ * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
+ * Copyright 2012 Google, Inc.
+ */
+
+#include "bcache.h"
+#include "btree.h"
+#include "debug.h"
+#include "request.h"
+
+#include <linux/buffer_head.h>
+#include <linux/debugfs.h>
+#include <linux/genhd.h>
+#include <linux/module.h>
+#include <linux/random.h>
+#include <linux/reboot.h>
+#include <linux/sysfs.h>
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Kent Overstreet <kent.overstreet@gmail.com>");
+
+static const char bcache_magic[] = {
+	0xc6, 0x85, 0x73, 0xf6, 0x4e, 0x1a, 0x45, 0xca,
+	0x82, 0x65, 0xf5, 0x7f, 0x48, 0xba, 0x6d, 0x81
+};
+
+static const char invalid_uuid[] = {
+	0xa0, 0x3e, 0xf8, 0xed, 0x3e, 0xe1, 0xb8, 0x78,
+	0xc8, 0x50, 0xfc, 0x5e, 0xcb, 0x16, 0xcd, 0x99
+};
+
+/* Default is -1; we skip past it for struct cached_dev's cache mode */
+const char * const bch_cache_modes[] = {
+	"default",
+	"writethrough",
+	"writeback",
+	"writearound",
+	"none",
+	NULL
+};
+
+struct uuid_entry_v0 {
+	uint8_t		uuid[16];
+	uint8_t		label[32];
+	uint32_t	first_reg;
+	uint32_t	last_reg;
+	uint32_t	invalidated;
+	uint32_t	pad;
+};
+
+static struct kobject *bcache_kobj;
+struct mutex bch_register_lock;
+LIST_HEAD(bch_cache_sets);
+static LIST_HEAD(uncached_devices);
+
+static int bcache_major, bcache_minor;
+static wait_queue_head_t unregister_wait;
+struct workqueue_struct *bcache_wq;
+
+#define BTREE_MAX_PAGES		(256 * 1024 / PAGE_SIZE)
+
+static void bio_split_pool_free(struct bio_split_pool *p)
+{
+	if (p->bio_split_hook)
+		mempool_destroy(p->bio_split_hook);
+
+	if (p->bio_split)
+		bioset_free(p->bio_split);
+}
+
+static int bio_split_pool_init(struct bio_split_pool *p)
+{
+	p->bio_split = bioset_create(4, 0);
+	if (!p->bio_split)
+		return -ENOMEM;
+
+	p->bio_split_hook = mempool_create_kmalloc_pool(4,
+				sizeof(struct bio_split_hook));
+	if (!p->bio_split_hook)
+		return -ENOMEM;
+
+	return 0;
+}
+
+/* Superblock */
+
+static const char *read_super(struct cache_sb *sb, struct block_device *bdev,
+			      struct page **res)
+{
+	const char *err;
+	struct cache_sb *s;
+	struct buffer_head *bh = __bread(bdev, 1, SB_SIZE);
+	unsigned i;
+
+	if (!bh)
+		return "IO error";
+
+	s = (struct cache_sb *) bh->b_data;
+
+	sb->offset		= le64_to_cpu(s->offset);
+	sb->version		= le64_to_cpu(s->version);
+
+	memcpy(sb->magic,	s->magic, 16);
+	memcpy(sb->uuid,	s->uuid, 16);
+	memcpy(sb->set_uuid,	s->set_uuid, 16);
+	memcpy(sb->label,	s->label, SB_LABEL_SIZE);
+
+	sb->flags		= le64_to_cpu(s->flags);
+	sb->seq			= le64_to_cpu(s->seq);
+	sb->last_mount		= le32_to_cpu(s->last_mount);
+	sb->first_bucket	= le16_to_cpu(s->first_bucket);
+	sb->keys		= le16_to_cpu(s->keys);
+
+	for (i = 0; i < SB_JOURNAL_BUCKETS; i++)
+		sb->d[i] = le64_to_cpu(s->d[i]);
+
+	pr_debug("read sb version %llu, flags %llu, seq %llu, journal size %u",
+		 sb->version, sb->flags, sb->seq, sb->keys);
+
+	err = "Not a bcache superblock";
+	if (sb->offset != SB_SECTOR)
+		goto err;
+
+	if (memcmp(sb->magic, bcache_magic, 16))
+		goto err;
+
+	err = "Too many journal buckets";
+	if (sb->keys > SB_JOURNAL_BUCKETS)
+		goto err;
+
+	err = "Bad checksum";
+	if (s->csum != csum_set(s))
+		goto err;
+
+	err = "Bad UUID";
+	if (bch_is_zero(sb->uuid, 16))
+		goto err;
+
+	sb->block_size	= le16_to_cpu(s->block_size);
+
+	err = "Superblock block size smaller than device block size";
+	if (sb->block_size << 9 < bdev_logical_block_size(bdev))
+		goto err;
+
+	switch (sb->version) {
+	case BCACHE_SB_VERSION_BDEV:
+		sb->data_offset	= BDEV_DATA_START_DEFAULT;
+		break;
+	case BCACHE_SB_VERSION_BDEV_WITH_OFFSET:
+		sb->data_offset	= le64_to_cpu(s->data_offset);
+
+		err = "Bad data offset";
+		if (sb->data_offset < BDEV_DATA_START_DEFAULT)
+			goto err;
+
+		break;
+	case BCACHE_SB_VERSION_CDEV:
+	case BCACHE_SB_VERSION_CDEV_WITH_UUID:
+		sb->nbuckets	= le64_to_cpu(s->nbuckets);
+		sb->block_size	= le16_to_cpu(s->block_size);
+		sb->bucket_size	= le16_to_cpu(s->bucket_size);
+
+		sb->nr_in_set	= le16_to_cpu(s->nr_in_set);
+		sb->nr_this_dev	= le16_to_cpu(s->nr_this_dev);
+
+		err = "Too many buckets";
+		if (sb->nbuckets > LONG_MAX)
+			goto err;
+
+		err = "Not enough buckets";
+		if (sb->nbuckets < 1 << 7)
+			goto err;
+
+		err = "Bad block/bucket size";
+		if (!is_power_of_2(sb->block_size) ||
+		    sb->block_size > PAGE_SECTORS ||
+		    !is_power_of_2(sb->bucket_size) ||
+		    sb->bucket_size < PAGE_SECTORS)
+			goto err;
+
+		err = "Invalid superblock: device too small";
+		if (get_capacity(bdev->bd_disk) < sb->bucket_size * sb->nbuckets)
+			goto err;
+
+		err = "Bad UUID";
+		if (bch_is_zero(sb->set_uuid, 16))
+			goto err;
+
+		err = "Bad cache device number in set";
+		if (!sb->nr_in_set ||
+		    sb->nr_in_set <= sb->nr_this_dev ||
+		    sb->nr_in_set > MAX_CACHES_PER_SET)
+			goto err;
+
+		err = "Journal buckets not sequential";
+		for (i = 0; i < sb->keys; i++)
+			if (sb->d[i] != sb->first_bucket + i)
+				goto err;
+
+		err = "Too many journal buckets";
+		if (sb->first_bucket + sb->keys > sb->nbuckets)
+			goto err;
+
+		err = "Invalid superblock: first bucket comes before end of super";
+		if (sb->first_bucket * sb->bucket_size < 16)
+			goto err;
+
+		break;
+	default:
+		err = "Unsupported superblock version";
+		goto err;
+	}
+
+	sb->last_mount = get_seconds();
+	err = NULL;
+
+	get_page(bh->b_page);
+	*res = bh->b_page;
+err:
+	put_bh(bh);
+	return err;
+}
+
+static void write_bdev_super_endio(struct bio *bio, int error)
+{
+	struct cached_dev *dc = bio->bi_private;
+	/* XXX: error checking */
+
+	closure_put(&dc->sb_write.cl);
+}
+
+static void __write_super(struct cache_sb *sb, struct bio *bio)
+{
+	struct cache_sb *out = page_address(bio->bi_io_vec[0].bv_page);
+	unsigned i;
+
+	bio->bi_sector	= SB_SECTOR;
+	bio->bi_rw	= REQ_SYNC|REQ_META;
+	bio->bi_size	= SB_SIZE;
+	bch_bio_map(bio, NULL);
+
+	out->offset		= cpu_to_le64(sb->offset);
+	out->version		= cpu_to_le64(sb->version);
+
+	memcpy(out->uuid,	sb->uuid, 16);
+	memcpy(out->set_uuid,	sb->set_uuid, 16);
+	memcpy(out->label,	sb->label, SB_LABEL_SIZE);
+
+	out->flags		= cpu_to_le64(sb->flags);
+	out->seq		= cpu_to_le64(sb->seq);
+
+	out->last_mount		= cpu_to_le32(sb->last_mount);
+	out->first_bucket	= cpu_to_le16(sb->first_bucket);
+	out->keys		= cpu_to_le16(sb->keys);
+
+	for (i = 0; i < sb->keys; i++)
+		out->d[i] = cpu_to_le64(sb->d[i]);
+
+	out->csum = csum_set(out);
+
+	pr_debug("ver %llu, flags %llu, seq %llu",
+		 sb->version, sb->flags, sb->seq);
+
+	submit_bio(REQ_WRITE, bio);
+}
+
+void bch_write_bdev_super(struct cached_dev *dc, struct closure *parent)
+{
+	struct closure *cl = &dc->sb_write.cl;
+	struct bio *bio = &dc->sb_bio;
+
+	closure_lock(&dc->sb_write, parent);
+
+	bio_reset(bio);
+	bio->bi_bdev	= dc->bdev;
+	bio->bi_end_io	= write_bdev_super_endio;
+	bio->bi_private = dc;
+
+	closure_get(cl);
+	__write_super(&dc->sb, bio);
+
+	closure_return(cl);
+}
+
+static void write_super_endio(struct bio *bio, int error)
+{
+	struct cache *ca = bio->bi_private;
+
+	bch_count_io_errors(ca, error, "writing superblock");
+	closure_put(&ca->set->sb_write.cl);
+}
+
+void bcache_write_super(struct cache_set *c)
+{
+	struct closure *cl = &c->sb_write.cl;
+	struct cache *ca;
+	unsigned i;
+
+	closure_lock(&c->sb_write, &c->cl);
+
+	c->sb.seq++;
+
+	for_each_cache(ca, c, i) {
+		struct bio *bio = &ca->sb_bio;
+
+		ca->sb.version		= BCACHE_SB_VERSION_CDEV_WITH_UUID;
+		ca->sb.seq		= c->sb.seq;
+		ca->sb.last_mount	= c->sb.last_mount;
+
+		SET_CACHE_SYNC(&ca->sb, CACHE_SYNC(&c->sb));
+
+		bio_reset(bio);
+		bio->bi_bdev	= ca->bdev;
+		bio->bi_end_io	= write_super_endio;
+		bio->bi_private = ca;
+
+		closure_get(cl);
+		__write_super(&ca->sb, bio);
+	}
+
+	closure_return(cl);
+}
+
+/* UUID io */
+
+static void uuid_endio(struct bio *bio, int error)
+{
+	struct closure *cl = bio->bi_private;
+	struct cache_set *c = container_of(cl, struct cache_set, uuid_write.cl);
+
+	cache_set_err_on(error, c, "accessing uuids");
+	bch_bbio_free(bio, c);
+	closure_put(cl);
+}
+
+static void uuid_io(struct cache_set *c, unsigned long rw,
+		    struct bkey *k, struct closure *parent)
+{
+	struct closure *cl = &c->uuid_write.cl;
+	struct uuid_entry *u;
+	unsigned i;
+
+	BUG_ON(!parent);
+	closure_lock(&c->uuid_write, parent);
+
+	for (i = 0; i < KEY_PTRS(k); i++) {
+		struct bio *bio = bch_bbio_alloc(c);
+
+		bio->bi_rw	= REQ_SYNC|REQ_META|rw;
+		bio->bi_size	= KEY_SIZE(k) << 9;
+
+		bio->bi_end_io	= uuid_endio;
+		bio->bi_private = cl;
+		bch_bio_map(bio, c->uuids);
+
+		bch_submit_bbio(bio, c, k, i);
+
+		if (!(rw & WRITE))
+			break;
+	}
+
+	pr_debug("%s UUIDs at %s", rw & REQ_WRITE ? "wrote" : "read",
+		 pkey(&c->uuid_bucket));
+
+	for (u = c->uuids; u < c->uuids + c->nr_uuids; u++)
+		if (!bch_is_zero(u->uuid, 16))
+			pr_debug("Slot %zi: %pU: %s: 1st: %u last: %u inv: %u",
+				 u - c->uuids, u->uuid, u->label,
+				 u->first_reg, u->last_reg, u->invalidated);
+
+	closure_return(cl);
+}
+
+static char *uuid_read(struct cache_set *c, struct jset *j, struct closure *cl)
+{
+	struct bkey *k = &j->uuid_bucket;
+
+	if (__bch_ptr_invalid(c, 1, k))
+		return "bad uuid pointer";
+
+	bkey_copy(&c->uuid_bucket, k);
+	uuid_io(c, READ_SYNC, k, cl);
+
+	if (j->version < BCACHE_JSET_VERSION_UUIDv1) {
+		struct uuid_entry_v0	*u0 = (void *) c->uuids;
+		struct uuid_entry	*u1 = (void *) c->uuids;
+		int i;
+
+		closure_sync(cl);
+
+		/*
+		 * Since the new uuid entry is bigger than the old, we have to
+		 * convert starting at the highest memory address and work down
+		 * in order to do it in place
+		 */
+
+		for (i = c->nr_uuids - 1;
+		     i >= 0;
+		     --i) {
+			memcpy(u1[i].uuid,	u0[i].uuid, 16);
+			memcpy(u1[i].label,	u0[i].label, 32);
+
+			u1[i].first_reg		= u0[i].first_reg;
+			u1[i].last_reg		= u0[i].last_reg;
+			u1[i].invalidated	= u0[i].invalidated;
+
+			u1[i].flags	= 0;
+			u1[i].sectors	= 0;
+		}
+	}
+
+	return NULL;
+}
+
+static int __uuid_write(struct cache_set *c)
+{
+	BKEY_PADDED(key) k;
+	struct closure cl;
+	closure_init_stack(&cl);
+
+	lockdep_assert_held(&bch_register_lock);
+
+	if (bch_bucket_alloc_set(c, WATERMARK_METADATA, &k.key, 1, &cl))
+		return 1;
+
+	SET_KEY_SIZE(&k.key, c->sb.bucket_size);
+	uuid_io(c, REQ_WRITE, &k.key, &cl);
+	closure_sync(&cl);
+
+	bkey_copy(&c->uuid_bucket, &k.key);
+	__bkey_put(c, &k.key);
+	return 0;
+}
+
+int bch_uuid_write(struct cache_set *c)
+{
+	int ret = __uuid_write(c);
+
+	if (!ret)
+		bch_journal_meta(c, NULL);
+
+	return ret;
+}
+
+static struct uuid_entry *uuid_find(struct cache_set *c, const char *uuid)
+{
+	struct uuid_entry *u;
+
+	for (u = c->uuids;
+	     u < c->uuids + c->nr_uuids; u++)
+		if (!memcmp(u->uuid, uuid, 16))
+			return u;
+
+	return NULL;
+}
+
+static struct uuid_entry *uuid_find_empty(struct cache_set *c)
+{
+	static const char zero_uuid[16] = "\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0";
+	return uuid_find(c, zero_uuid);
+}
+
+/*
+ * Bucket priorities/gens:
+ *
+ * For each bucket, we store on disk its
+   * 8 bit gen
+   * 16 bit priority
+ *
+ * See alloc.c for an explanation of the gen. The priority is used to implement
+ * lru (and in the future other) cache replacement policies; for most purposes
+ * it's just an opaque integer.
+ *
+ * The gens and the priorities don't have a whole lot to do with each other, and
+ * it's actually the gens that must be written out at specific times - it's no
+ * big deal if the priorities don't get written, if we lose them we just reuse
+ * buckets in suboptimal order.
+ *
+ * On disk they're stored in a packed array, and in as many buckets are required
+ * to fit them all. The buckets we use to store them form a list; the journal
+ * header points to the first bucket, the first bucket points to the second
+ * bucket, et cetera.
+ *
+ * This code is used by the allocation code; periodically (whenever it runs out
+ * of buckets to allocate from) the allocation code will invalidate some
+ * buckets, but it can't use those buckets until their new gens are safely on
+ * disk.
+ */
+
+static void prio_endio(struct bio *bio, int error)
+{
+	struct cache *ca = bio->bi_private;
+
+	cache_set_err_on(error, ca->set, "accessing priorities");
+	bch_bbio_free(bio, ca->set);
+	closure_put(&ca->prio);
+}
+
+static void prio_io(struct cache *ca, uint64_t bucket, unsigned long rw)
+{
+	struct closure *cl = &ca->prio;
+	struct bio *bio = bch_bbio_alloc(ca->set);
+
+	closure_init_stack(cl);
+
+	bio->bi_sector	= bucket * ca->sb.bucket_size;
+	bio->bi_bdev	= ca->bdev;
+	bio->bi_rw	= REQ_SYNC|REQ_META|rw;
+	bio->bi_size	= bucket_bytes(ca);
+
+	bio->bi_end_io	= prio_endio;
+	bio->bi_private = ca;
+	bch_bio_map(bio, ca->disk_buckets);
+
+	closure_bio_submit(bio, &ca->prio, ca);
+	closure_sync(cl);
+}
+
+#define buckets_free(c)	"free %zu, free_inc %zu, unused %zu",		\
+	fifo_used(&c->free), fifo_used(&c->free_inc), fifo_used(&c->unused)
+
+void bch_prio_write(struct cache *ca)
+{
+	int i;
+	struct bucket *b;
+	struct closure cl;
+
+	closure_init_stack(&cl);
+
+	lockdep_assert_held(&ca->set->bucket_lock);
+
+	for (b = ca->buckets;
+	     b < ca->buckets + ca->sb.nbuckets; b++)
+		b->disk_gen = b->gen;
+
+	ca->disk_buckets->seq++;
+
+	atomic_long_add(ca->sb.bucket_size * prio_buckets(ca),
+			&ca->meta_sectors_written);
+
+	pr_debug("free %zu, free_inc %zu, unused %zu", fifo_used(&ca->free),
+		 fifo_used(&ca->free_inc), fifo_used(&ca->unused));
+	blktrace_msg(ca, "Starting priorities: " buckets_free(ca));
+
+	for (i = prio_buckets(ca) - 1; i >= 0; --i) {
+		long bucket;
+		struct prio_set *p = ca->disk_buckets;
+		struct bucket_disk *d = p->data;
+		struct bucket_disk *end = d + prios_per_bucket(ca);
+
+		for (b = ca->buckets + i * prios_per_bucket(ca);
+		     b < ca->buckets + ca->sb.nbuckets && d < end;
+		     b++, d++) {
+			d->prio = cpu_to_le16(b->prio);
+			d->gen = b->gen;
+		}
+
+		p->next_bucket	= ca->prio_buckets[i + 1];
+		p->magic	= pset_magic(ca);
+		p->csum		= bch_crc64(&p->magic, bucket_bytes(ca) - 8);
+
+		bucket = bch_bucket_alloc(ca, WATERMARK_PRIO, &cl);
+		BUG_ON(bucket == -1);
+
+		mutex_unlock(&ca->set->bucket_lock);
+		prio_io(ca, bucket, REQ_WRITE);
+		mutex_lock(&ca->set->bucket_lock);
+
+		ca->prio_buckets[i] = bucket;
+		atomic_dec_bug(&ca->buckets[bucket].pin);
+	}
+
+	mutex_unlock(&ca->set->bucket_lock);
+
+	bch_journal_meta(ca->set, &cl);
+	closure_sync(&cl);
+
+	mutex_lock(&ca->set->bucket_lock);
+
+	ca->need_save_prio = 0;
+
+	/*
+	 * Don't want the old priorities to get garbage collected until after we
+	 * finish writing the new ones, and they're journalled
+	 */
+	for (i = 0; i < prio_buckets(ca); i++)
+		ca->prio_last_buckets[i] = ca->prio_buckets[i];
+}
+
+static void prio_read(struct cache *ca, uint64_t bucket)
+{
+	struct prio_set *p = ca->disk_buckets;
+	struct bucket_disk *d = p->data + prios_per_bucket(ca), *end = d;
+	struct bucket *b;
+	unsigned bucket_nr = 0;
+
+	for (b = ca->buckets;
+	     b < ca->buckets + ca->sb.nbuckets;
+	     b++, d++) {
+		if (d == end) {
+			ca->prio_buckets[bucket_nr] = bucket;
+			ca->prio_last_buckets[bucket_nr] = bucket;
+			bucket_nr++;
+
+			prio_io(ca, bucket, READ_SYNC);
+
+			if (p->csum != bch_crc64(&p->magic, bucket_bytes(ca) - 8))
+				pr_warn("bad csum reading priorities");
+
+			if (p->magic != pset_magic(ca))
+				pr_warn("bad magic reading priorities");
+
+			bucket = p->next_bucket;
+			d = p->data;
+		}
+
+		b->prio = le16_to_cpu(d->prio);
+		b->gen = b->disk_gen = b->last_gc = b->gc_gen = d->gen;
+	}
+}
+
+/* Bcache device */
+
+static int open_dev(struct block_device *b, fmode_t mode)
+{
+	struct bcache_device *d = b->bd_disk->private_data;
+	if (atomic_read(&d->closing))
+		return -ENXIO;
+
+	closure_get(&d->cl);
+	return 0;
+}
+
+static int release_dev(struct gendisk *b, fmode_t mode)
+{
+	struct bcache_device *d = b->private_data;
+	closure_put(&d->cl);
+	return 0;
+}
+
+static int ioctl_dev(struct block_device *b, fmode_t mode,
+		     unsigned int cmd, unsigned long arg)
+{
+	struct bcache_device *d = b->bd_disk->private_data;
+	return d->ioctl(d, mode, cmd, arg);
+}
+
+static const struct block_device_operations bcache_ops = {
+	.open		= open_dev,
+	.release	= release_dev,
+	.ioctl		= ioctl_dev,
+	.owner		= THIS_MODULE,
+};
+
+void bcache_device_stop(struct bcache_device *d)
+{
+	if (!atomic_xchg(&d->closing, 1))
+		closure_queue(&d->cl);
+}
+
+static void bcache_device_unlink(struct bcache_device *d)
+{
+	unsigned i;
+	struct cache *ca;
+
+	sysfs_remove_link(&d->c->kobj, d->name);
+	sysfs_remove_link(&d->kobj, "cache");
+
+	for_each_cache(ca, d->c, i)
+		bd_unlink_disk_holder(ca->bdev, d->disk);
+}
+
+static void bcache_device_link(struct bcache_device *d, struct cache_set *c,
+			       const char *name)
+{
+	unsigned i;
+	struct cache *ca;
+
+	for_each_cache(ca, d->c, i)
+		bd_link_disk_holder(ca->bdev, d->disk);
+
+	snprintf(d->name, BCACHEDEVNAME_SIZE,
+		 "%s%u", name, d->id);
+
+	WARN(sysfs_create_link(&d->kobj, &c->kobj, "cache") ||
+	     sysfs_create_link(&c->kobj, &d->kobj, d->name),
+	     "Couldn't create device <-> cache set symlinks");
+}
+
+static void bcache_device_detach(struct bcache_device *d)
+{
+	lockdep_assert_held(&bch_register_lock);
+
+	if (atomic_read(&d->detaching)) {
+		struct uuid_entry *u = d->c->uuids + d->id;
+
+		SET_UUID_FLASH_ONLY(u, 0);
+		memcpy(u->uuid, invalid_uuid, 16);
+		u->invalidated = cpu_to_le32(get_seconds());
+		bch_uuid_write(d->c);
+
+		atomic_set(&d->detaching, 0);
+	}
+
+	bcache_device_unlink(d);
+
+	d->c->devices[d->id] = NULL;
+	closure_put(&d->c->caching);
+	d->c = NULL;
+}
+
+static void bcache_device_attach(struct bcache_device *d, struct cache_set *c,
+				 unsigned id)
+{
+	BUG_ON(test_bit(CACHE_SET_STOPPING, &c->flags));
+
+	d->id = id;
+	d->c = c;
+	c->devices[id] = d;
+
+	closure_get(&c->caching);
+}
+
+static void bcache_device_free(struct bcache_device *d)
+{
+	lockdep_assert_held(&bch_register_lock);
+
+	pr_info("%s stopped", d->disk->disk_name);
+
+	if (d->c)
+		bcache_device_detach(d);
+
+	if (d->disk)
+		del_gendisk(d->disk);
+	if (d->disk && d->disk->queue)
+		blk_cleanup_queue(d->disk->queue);
+	if (d->disk)
+		put_disk(d->disk);
+
+	bio_split_pool_free(&d->bio_split_hook);
+	if (d->unaligned_bvec)
+		mempool_destroy(d->unaligned_bvec);
+	if (d->bio_split)
+		bioset_free(d->bio_split);
+
+	closure_debug_destroy(&d->cl);
+}
+
+static int bcache_device_init(struct bcache_device *d, unsigned block_size)
+{
+	struct request_queue *q;
+
+	if (!(d->bio_split = bioset_create(4, offsetof(struct bbio, bio))) ||
+	    !(d->unaligned_bvec = mempool_create_kmalloc_pool(1,
+				sizeof(struct bio_vec) * BIO_MAX_PAGES)) ||
+	    bio_split_pool_init(&d->bio_split_hook))
+
+		return -ENOMEM;
+
+	d->disk = alloc_disk(1);
+	if (!d->disk)
+		return -ENOMEM;
+
+	snprintf(d->disk->disk_name, DISK_NAME_LEN, "bcache%i", bcache_minor);
+
+	d->disk->major		= bcache_major;
+	d->disk->first_minor	= bcache_minor++;
+	d->disk->fops		= &bcache_ops;
+	d->disk->private_data	= d;
+
+	q = blk_alloc_queue(GFP_KERNEL);
+	if (!q)
+		return -ENOMEM;
+
+	blk_queue_make_request(q, NULL);
+	d->disk->queue			= q;
+	q->queuedata			= d;
+	q->backing_dev_info.congested_data = d;
+	q->limits.max_hw_sectors	= UINT_MAX;
+	q->limits.max_sectors		= UINT_MAX;
+	q->limits.max_segment_size	= UINT_MAX;
+	q->limits.max_segments		= BIO_MAX_PAGES;
+	q->limits.max_discard_sectors	= UINT_MAX;
+	q->limits.io_min		= block_size;
+	q->limits.logical_block_size	= block_size;
+	q->limits.physical_block_size	= block_size;
+	set_bit(QUEUE_FLAG_NONROT,	&d->disk->queue->queue_flags);
+	set_bit(QUEUE_FLAG_DISCARD,	&d->disk->queue->queue_flags);
+
+	return 0;
+}
+
+/* Cached device */
+
+static void calc_cached_dev_sectors(struct cache_set *c)
+{
+	uint64_t sectors = 0;
+	struct cached_dev *dc;
+
+	list_for_each_entry(dc, &c->cached_devs, list)
+		sectors += bdev_sectors(dc->bdev);
+
+	c->cached_dev_sectors = sectors;
+}
+
+void bch_cached_dev_run(struct cached_dev *dc)
+{
+	struct bcache_device *d = &dc->disk;
+
+	if (atomic_xchg(&dc->running, 1))
+		return;
+
+	if (!d->c &&
+	    BDEV_STATE(&dc->sb) != BDEV_STATE_NONE) {
+		struct closure cl;
+		closure_init_stack(&cl);
+
+		SET_BDEV_STATE(&dc->sb, BDEV_STATE_STALE);
+		bch_write_bdev_super(dc, &cl);
+		closure_sync(&cl);
+	}
+
+	add_disk(d->disk);
+	bd_link_disk_holder(dc->bdev, dc->disk.disk);
+#if 0
+	char *env[] = { "SYMLINK=label" , NULL };
+	kobject_uevent_env(&disk_to_dev(d->disk)->kobj, KOBJ_CHANGE, env);
+#endif
+	if (sysfs_create_link(&d->kobj, &disk_to_dev(d->disk)->kobj, "dev") ||
+	    sysfs_create_link(&disk_to_dev(d->disk)->kobj, &d->kobj, "bcache"))
+		pr_debug("error creating sysfs link");
+}
+
+static void cached_dev_detach_finish(struct work_struct *w)
+{
+	struct cached_dev *dc = container_of(w, struct cached_dev, detach);
+	char buf[BDEVNAME_SIZE];
+	struct closure cl;
+	closure_init_stack(&cl);
+
+	BUG_ON(!atomic_read(&dc->disk.detaching));
+	BUG_ON(atomic_read(&dc->count));
+
+	mutex_lock(&bch_register_lock);
+
+	memset(&dc->sb.set_uuid, 0, 16);
+	SET_BDEV_STATE(&dc->sb, BDEV_STATE_NONE);
+
+	bch_write_bdev_super(dc, &cl);
+	closure_sync(&cl);
+
+	bcache_device_detach(&dc->disk);
+	list_move(&dc->list, &uncached_devices);
+
+	mutex_unlock(&bch_register_lock);
+
+	pr_info("Caching disabled for %s", bdevname(dc->bdev, buf));
+
+	/* Drop ref we took in cached_dev_detach() */
+	closure_put(&dc->disk.cl);
+}
+
+void bch_cached_dev_detach(struct cached_dev *dc)
+{
+	lockdep_assert_held(&bch_register_lock);
+
+	if (atomic_read(&dc->disk.closing))
+		return;
+
+	if (atomic_xchg(&dc->disk.detaching, 1))
+		return;
+
+	/*
+	 * Block the device from being closed and freed until we're finished
+	 * detaching
+	 */
+	closure_get(&dc->disk.cl);
+
+	bch_writeback_queue(dc);
+	cached_dev_put(dc);
+}
+
+int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c)
+{
+	uint32_t rtime = cpu_to_le32(get_seconds());
+	struct uuid_entry *u;
+	char buf[BDEVNAME_SIZE];
+
+	bdevname(dc->bdev, buf);
+
+	if (memcmp(dc->sb.set_uuid, c->sb.set_uuid, 16))
+		return -ENOENT;
+
+	if (dc->disk.c) {
+		pr_err("Can't attach %s: already attached", buf);
+		return -EINVAL;
+	}
+
+	if (test_bit(CACHE_SET_STOPPING, &c->flags)) {
+		pr_err("Can't attach %s: shutting down", buf);
+		return -EINVAL;
+	}
+
+	if (dc->sb.block_size < c->sb.block_size) {
+		/* Will die */
+		pr_err("Couldn't attach %s: block size less than set's block size",
+		       buf);
+		return -EINVAL;
+	}
+
+	u = uuid_find(c, dc->sb.uuid);
+
+	if (u &&
+	    (BDEV_STATE(&dc->sb) == BDEV_STATE_STALE ||
+	     BDEV_STATE(&dc->sb) == BDEV_STATE_NONE)) {
+		memcpy(u->uuid, invalid_uuid, 16);
+		u->invalidated = cpu_to_le32(get_seconds());
+		u = NULL;
+	}
+
+	if (!u) {
+		if (BDEV_STATE(&dc->sb) == BDEV_STATE_DIRTY) {
+			pr_err("Couldn't find uuid for %s in set", buf);
+			return -ENOENT;
+		}
+
+		u = uuid_find_empty(c);
+		if (!u) {
+			pr_err("Not caching %s, no room for UUID", buf);
+			return -EINVAL;
+		}
+	}
+
+	/* Deadlocks since we're called via sysfs...
+	sysfs_remove_file(&dc->kobj, &sysfs_attach);
+	 */
+
+	if (bch_is_zero(u->uuid, 16)) {
+		struct closure cl;
+		closure_init_stack(&cl);
+
+		memcpy(u->uuid, dc->sb.uuid, 16);
+		memcpy(u->label, dc->sb.label, SB_LABEL_SIZE);
+		u->first_reg = u->last_reg = rtime;
+		bch_uuid_write(c);
+
+		memcpy(dc->sb.set_uuid, c->sb.set_uuid, 16);
+		SET_BDEV_STATE(&dc->sb, BDEV_STATE_CLEAN);
+
+		bch_write_bdev_super(dc, &cl);
+		closure_sync(&cl);
+	} else {
+		u->last_reg = rtime;
+		bch_uuid_write(c);
+	}
+
+	bcache_device_attach(&dc->disk, c, u - c->uuids);
+	list_move(&dc->list, &c->cached_devs);
+	calc_cached_dev_sectors(c);
+
+	smp_wmb();
+	/*
+	 * dc->c must be set before dc->count != 0 - paired with the mb in
+	 * cached_dev_get()
+	 */
+	atomic_set(&dc->count, 1);
+
+	if (BDEV_STATE(&dc->sb) == BDEV_STATE_DIRTY) {
+		atomic_set(&dc->has_dirty, 1);
+		atomic_inc(&dc->count);
+		bch_writeback_queue(dc);
+	}
+
+	bch_cached_dev_run(dc);
+	bcache_device_link(&dc->disk, c, "bdev");
+
+	pr_info("Caching %s as %s on set %pU",
+		bdevname(dc->bdev, buf), dc->disk.disk->disk_name,
+		dc->disk.c->sb.set_uuid);
+	return 0;
+}
+
+void bch_cached_dev_release(struct kobject *kobj)
+{
+	struct cached_dev *dc = container_of(kobj, struct cached_dev,
+					     disk.kobj);
+	kfree(dc);
+	module_put(THIS_MODULE);
+}
+
+static void cached_dev_free(struct closure *cl)
+{
+	struct cached_dev *dc = container_of(cl, struct cached_dev, disk.cl);
+
+	cancel_delayed_work_sync(&dc->writeback_rate_update);
+
+	mutex_lock(&bch_register_lock);
+
+	bd_unlink_disk_holder(dc->bdev, dc->disk.disk);
+	bcache_device_free(&dc->disk);
+	list_del(&dc->list);
+
+	mutex_unlock(&bch_register_lock);
+
+	if (!IS_ERR_OR_NULL(dc->bdev)) {
+		blk_sync_queue(bdev_get_queue(dc->bdev));
+		blkdev_put(dc->bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
+	}
+
+	wake_up(&unregister_wait);
+
+	kobject_put(&dc->disk.kobj);
+}
+
+static void cached_dev_flush(struct closure *cl)
+{
+	struct cached_dev *dc = container_of(cl, struct cached_dev, disk.cl);
+	struct bcache_device *d = &dc->disk;
+
+	bch_cache_accounting_destroy(&dc->accounting);
+	kobject_del(&d->kobj);
+
+	continue_at(cl, cached_dev_free, system_wq);
+}
+
+static int cached_dev_init(struct cached_dev *dc, unsigned block_size)
+{
+	int err;
+	struct io *io;
+
+	closure_init(&dc->disk.cl, NULL);
+	set_closure_fn(&dc->disk.cl, cached_dev_flush, system_wq);
+
+	__module_get(THIS_MODULE);
+	INIT_LIST_HEAD(&dc->list);
+	kobject_init(&dc->disk.kobj, &bch_cached_dev_ktype);
+
+	bch_cache_accounting_init(&dc->accounting, &dc->disk.cl);
+
+	err = bcache_device_init(&dc->disk, block_size);
+	if (err)
+		goto err;
+
+	spin_lock_init(&dc->io_lock);
+	closure_init_unlocked(&dc->sb_write);
+	INIT_WORK(&dc->detach, cached_dev_detach_finish);
+
+	dc->sequential_merge		= true;
+	dc->sequential_cutoff		= 4 << 20;
+
+	INIT_LIST_HEAD(&dc->io_lru);
+	dc->sb_bio.bi_max_vecs	= 1;
+	dc->sb_bio.bi_io_vec	= dc->sb_bio.bi_inline_vecs;
+
+	for (io = dc->io; io < dc->io + RECENT_IO; io++) {
+		list_add(&io->lru, &dc->io_lru);
+		hlist_add_head(&io->hash, dc->io_hash + RECENT_IO);
+	}
+
+	bch_writeback_init_cached_dev(dc);
+	return 0;
+err:
+	bcache_device_stop(&dc->disk);
+	return err;
+}
+
+/* Cached device - bcache superblock */
+
+static const char *register_bdev(struct cache_sb *sb, struct page *sb_page,
+				 struct block_device *bdev,
+				 struct cached_dev *dc)
+{
+	char name[BDEVNAME_SIZE];
+	const char *err = "cannot allocate memory";
+	struct gendisk *g;
+	struct cache_set *c;
+
+	if (!dc || cached_dev_init(dc, sb->block_size << 9) != 0)
+		return err;
+
+	memcpy(&dc->sb, sb, sizeof(struct cache_sb));
+	dc->sb_bio.bi_io_vec[0].bv_page = sb_page;
+	dc->bdev = bdev;
+	dc->bdev->bd_holder = dc;
+
+	g = dc->disk.disk;
+
+	set_capacity(g, dc->bdev->bd_part->nr_sects - dc->sb.data_offset);
+
+	g->queue->backing_dev_info.ra_pages =
+		max(g->queue->backing_dev_info.ra_pages,
+		    bdev->bd_queue->backing_dev_info.ra_pages);
+
+	bch_cached_dev_request_init(dc);
+
+	err = "error creating kobject";
+	if (kobject_add(&dc->disk.kobj, &part_to_dev(bdev->bd_part)->kobj,
+			"bcache"))
+		goto err;
+	if (bch_cache_accounting_add_kobjs(&dc->accounting, &dc->disk.kobj))
+		goto err;
+
+	list_add(&dc->list, &uncached_devices);
+	list_for_each_entry(c, &bch_cache_sets, list)
+		bch_cached_dev_attach(dc, c);
+
+	if (BDEV_STATE(&dc->sb) == BDEV_STATE_NONE ||
+	    BDEV_STATE(&dc->sb) == BDEV_STATE_STALE)
+		bch_cached_dev_run(dc);
+
+	return NULL;
+err:
+	kobject_put(&dc->disk.kobj);
+	pr_notice("error opening %s: %s", bdevname(bdev, name), err);
+	/*
+	 * Return NULL instead of an error because kobject_put() cleans
+	 * everything up
+	 */
+	return NULL;
+}
+
+/* Flash only volumes */
+
+void bch_flash_dev_release(struct kobject *kobj)
+{
+	struct bcache_device *d = container_of(kobj, struct bcache_device,
+					       kobj);
+	kfree(d);
+}
+
+static void flash_dev_free(struct closure *cl)
+{
+	struct bcache_device *d = container_of(cl, struct bcache_device, cl);
+	bcache_device_free(d);
+	kobject_put(&d->kobj);
+}
+
+static void flash_dev_flush(struct closure *cl)
+{
+	struct bcache_device *d = container_of(cl, struct bcache_device, cl);
+
+	bcache_device_unlink(d);
+	kobject_del(&d->kobj);
+	continue_at(cl, flash_dev_free, system_wq);
+}
+
+static int flash_dev_run(struct cache_set *c, struct uuid_entry *u)
+{
+	struct bcache_device *d = kzalloc(sizeof(struct bcache_device),
+					  GFP_KERNEL);
+	if (!d)
+		return -ENOMEM;
+
+	closure_init(&d->cl, NULL);
+	set_closure_fn(&d->cl, flash_dev_flush, system_wq);
+
+	kobject_init(&d->kobj, &bch_flash_dev_ktype);
+
+	if (bcache_device_init(d, block_bytes(c)))
+		goto err;
+
+	bcache_device_attach(d, c, u - c->uuids);
+	set_capacity(d->disk, u->sectors);
+	bch_flash_dev_request_init(d);
+	add_disk(d->disk);
+
+	if (kobject_add(&d->kobj, &disk_to_dev(d->disk)->kobj, "bcache"))
+		goto err;
+
+	bcache_device_link(d, c, "volume");
+
+	return 0;
+err:
+	kobject_put(&d->kobj);
+	return -ENOMEM;
+}
+
+static int flash_devs_run(struct cache_set *c)
+{
+	int ret = 0;
+	struct uuid_entry *u;
+
+	for (u = c->uuids;
+	     u < c->uuids + c->nr_uuids && !ret;
+	     u++)
+		if (UUID_FLASH_ONLY(u))
+			ret = flash_dev_run(c, u);
+
+	return ret;
+}
+
+int bch_flash_dev_create(struct cache_set *c, uint64_t size)
+{
+	struct uuid_entry *u;
+
+	if (test_bit(CACHE_SET_STOPPING, &c->flags))
+		return -EINTR;
+
+	u = uuid_find_empty(c);
+	if (!u) {
+		pr_err("Can't create volume, no room for UUID");
+		return -EINVAL;
+	}
+
+	get_random_bytes(u->uuid, 16);
+	memset(u->label, 0, 32);
+	u->first_reg = u->last_reg = cpu_to_le32(get_seconds());
+
+	SET_UUID_FLASH_ONLY(u, 1);
+	u->sectors = size >> 9;
+
+	bch_uuid_write(c);
+
+	return flash_dev_run(c, u);
+}
+
+/* Cache set */
+
+__printf(2, 3)
+bool bch_cache_set_error(struct cache_set *c, const char *fmt, ...)
+{
+	va_list args;
+
+	if (test_bit(CACHE_SET_STOPPING, &c->flags))
+		return false;
+
+	/* XXX: we can be called from atomic context
+	acquire_console_sem();
+	*/
+
+	printk(KERN_ERR "bcache: error on %pU: ", c->sb.set_uuid);
+
+	va_start(args, fmt);
+	vprintk(fmt, args);
+	va_end(args);
+
+	printk(", disabling caching\n");
+
+	bch_cache_set_unregister(c);
+	return true;
+}
+
+void bch_cache_set_release(struct kobject *kobj)
+{
+	struct cache_set *c = container_of(kobj, struct cache_set, kobj);
+	kfree(c);
+	module_put(THIS_MODULE);
+}
+
+static void cache_set_free(struct closure *cl)
+{
+	struct cache_set *c = container_of(cl, struct cache_set, cl);
+	struct cache *ca;
+	unsigned i;
+
+	if (!IS_ERR_OR_NULL(c->debug))
+		debugfs_remove(c->debug);
+
+	bch_open_buckets_free(c);
+	bch_btree_cache_free(c);
+	bch_journal_free(c);
+
+	for_each_cache(ca, c, i)
+		if (ca)
+			kobject_put(&ca->kobj);
+
+	free_pages((unsigned long) c->uuids, ilog2(bucket_pages(c)));
+	free_pages((unsigned long) c->sort, ilog2(bucket_pages(c)));
+
+	kfree(c->fill_iter);
+	if (c->bio_split)
+		bioset_free(c->bio_split);
+	if (c->bio_meta)
+		mempool_destroy(c->bio_meta);
+	if (c->search)
+		mempool_destroy(c->search);
+	kfree(c->devices);
+
+	mutex_lock(&bch_register_lock);
+	list_del(&c->list);
+	mutex_unlock(&bch_register_lock);
+
+	pr_info("Cache set %pU unregistered", c->sb.set_uuid);
+	wake_up(&unregister_wait);
+
+	closure_debug_destroy(&c->cl);
+	kobject_put(&c->kobj);
+}
+
+static void cache_set_flush(struct closure *cl)
+{
+	struct cache_set *c = container_of(cl, struct cache_set, caching);
+	struct btree *b;
+
+	/* Shut down allocator threads */
+	set_bit(CACHE_SET_STOPPING_2, &c->flags);
+	wake_up(&c->alloc_wait);
+
+	bch_cache_accounting_destroy(&c->accounting);
+
+	kobject_put(&c->internal);
+	kobject_del(&c->kobj);
+
+	if (!IS_ERR_OR_NULL(c->root))
+		list_add(&c->root->list, &c->btree_cache);
+
+	/* Should skip this if we're unregistering because of an error */
+	list_for_each_entry(b, &c->btree_cache, list)
+		if (btree_node_dirty(b))
+			bch_btree_write(b, true, NULL);
+
+	closure_return(cl);
+}
+
+static void __cache_set_unregister(struct closure *cl)
+{
+	struct cache_set *c = container_of(cl, struct cache_set, caching);
+	struct cached_dev *dc, *t;
+	size_t i;
+
+	mutex_lock(&bch_register_lock);
+
+	if (test_bit(CACHE_SET_UNREGISTERING, &c->flags))
+		list_for_each_entry_safe(dc, t, &c->cached_devs, list)
+			bch_cached_dev_detach(dc);
+
+	for (i = 0; i < c->nr_uuids; i++)
+		if (c->devices[i] && UUID_FLASH_ONLY(&c->uuids[i]))
+			bcache_device_stop(c->devices[i]);
+
+	mutex_unlock(&bch_register_lock);
+
+	continue_at(cl, cache_set_flush, system_wq);
+}
+
+void bch_cache_set_stop(struct cache_set *c)
+{
+	if (!test_and_set_bit(CACHE_SET_STOPPING, &c->flags))
+		closure_queue(&c->caching);
+}
+
+void bch_cache_set_unregister(struct cache_set *c)
+{
+	set_bit(CACHE_SET_UNREGISTERING, &c->flags);
+	bch_cache_set_stop(c);
+}
+
+#define alloc_bucket_pages(gfp, c)			\
+	((void *) __get_free_pages(__GFP_ZERO|gfp, ilog2(bucket_pages(c))))
+
+struct cache_set *bch_cache_set_alloc(struct cache_sb *sb)
+{
+	int iter_size;
+	struct cache_set *c = kzalloc(sizeof(struct cache_set), GFP_KERNEL);
+	if (!c)
+		return NULL;
+
+	__module_get(THIS_MODULE);
+	closure_init(&c->cl, NULL);
+	set_closure_fn(&c->cl, cache_set_free, system_wq);
+
+	closure_init(&c->caching, &c->cl);
+	set_closure_fn(&c->caching, __cache_set_unregister, system_wq);
+
+	/* Maybe create continue_at_noreturn() and use it here? */
+	closure_set_stopped(&c->cl);
+	closure_put(&c->cl);
+
+	kobject_init(&c->kobj, &bch_cache_set_ktype);
+	kobject_init(&c->internal, &bch_cache_set_internal_ktype);
+
+	bch_cache_accounting_init(&c->accounting, &c->cl);
+
+	memcpy(c->sb.set_uuid, sb->set_uuid, 16);
+	c->sb.block_size	= sb->block_size;
+	c->sb.bucket_size	= sb->bucket_size;
+	c->sb.nr_in_set		= sb->nr_in_set;
+	c->sb.last_mount	= sb->last_mount;
+	c->bucket_bits		= ilog2(sb->bucket_size);
+	c->block_bits		= ilog2(sb->block_size);
+	c->nr_uuids		= bucket_bytes(c) / sizeof(struct uuid_entry);
+
+	c->btree_pages		= c->sb.bucket_size / PAGE_SECTORS;
+	if (c->btree_pages > BTREE_MAX_PAGES)
+		c->btree_pages = max_t(int, c->btree_pages / 4,
+				       BTREE_MAX_PAGES);
+
+	init_waitqueue_head(&c->alloc_wait);
+	mutex_init(&c->bucket_lock);
+	mutex_init(&c->fill_lock);
+	mutex_init(&c->sort_lock);
+	spin_lock_init(&c->sort_time_lock);
+	closure_init_unlocked(&c->sb_write);
+	closure_init_unlocked(&c->uuid_write);
+	spin_lock_init(&c->btree_read_time_lock);
+	bch_moving_init_cache_set(c);
+
+	INIT_LIST_HEAD(&c->list);
+	INIT_LIST_HEAD(&c->cached_devs);
+	INIT_LIST_HEAD(&c->btree_cache);
+	INIT_LIST_HEAD(&c->btree_cache_freeable);
+	INIT_LIST_HEAD(&c->btree_cache_freed);
+	INIT_LIST_HEAD(&c->data_buckets);
+
+	c->search = mempool_create_slab_pool(32, bch_search_cache);
+	if (!c->search)
+		goto err;
+
+	iter_size = (sb->bucket_size / sb->block_size + 1) *
+		sizeof(struct btree_iter_set);
+
+	if (!(c->devices = kzalloc(c->nr_uuids * sizeof(void *), GFP_KERNEL)) ||
+	    !(c->bio_meta = mempool_create_kmalloc_pool(2,
+				sizeof(struct bbio) + sizeof(struct bio_vec) *
+				bucket_pages(c))) ||
+	    !(c->bio_split = bioset_create(4, offsetof(struct bbio, bio))) ||
+	    !(c->fill_iter = kmalloc(iter_size, GFP_KERNEL)) ||
+	    !(c->sort = alloc_bucket_pages(GFP_KERNEL, c)) ||
+	    !(c->uuids = alloc_bucket_pages(GFP_KERNEL, c)) ||
+	    bch_journal_alloc(c) ||
+	    bch_btree_cache_alloc(c) ||
+	    bch_open_buckets_alloc(c))
+		goto err;
+
+	c->fill_iter->size = sb->bucket_size / sb->block_size;
+
+	c->congested_read_threshold_us	= 2000;
+	c->congested_write_threshold_us	= 20000;
+	c->error_limit	= 8 << IO_ERROR_SHIFT;
+
+	return c;
+err:
+	bch_cache_set_unregister(c);
+	return NULL;
+}
+
+static void run_cache_set(struct cache_set *c)
+{
+	const char *err = "cannot allocate memory";
+	struct cached_dev *dc, *t;
+	struct cache *ca;
+	unsigned i;
+
+	struct btree_op op;
+	bch_btree_op_init_stack(&op);
+	op.lock = SHRT_MAX;
+
+	for_each_cache(ca, c, i)
+		c->nbuckets += ca->sb.nbuckets;
+
+	if (CACHE_SYNC(&c->sb)) {
+		LIST_HEAD(journal);
+		struct bkey *k;
+		struct jset *j;
+
+		err = "cannot allocate memory for journal";
+		if (bch_journal_read(c, &journal, &op))
+			goto err;
+
+		pr_debug("btree_journal_read() done");
+
+		err = "no journal entries found";
+		if (list_empty(&journal))
+			goto err;
+
+		j = &list_entry(journal.prev, struct journal_replay, list)->j;
+
+		err = "IO error reading priorities";
+		for_each_cache(ca, c, i)
+			prio_read(ca, j->prio_bucket[ca->sb.nr_this_dev]);
+
+		/*
+		 * If prio_read() fails it'll call cache_set_error and we'll
+		 * tear everything down right away, but if we perhaps checked
+		 * sooner we could avoid journal replay.
+		 */
+
+		k = &j->btree_root;
+
+		err = "bad btree root";
+		if (__bch_ptr_invalid(c, j->btree_level + 1, k))
+			goto err;
+
+		err = "error reading btree root";
+		c->root = bch_btree_node_get(c, k, j->btree_level, &op);
+		if (IS_ERR_OR_NULL(c->root))
+			goto err;
+
+		list_del_init(&c->root->list);
+		rw_unlock(true, c->root);
+
+		err = uuid_read(c, j, &op.cl);
+		if (err)
+			goto err;
+
+		err = "error in recovery";
+		if (bch_btree_check(c, &op))
+			goto err;
+
+		bch_journal_mark(c, &journal);
+		bch_btree_gc_finish(c);
+		pr_debug("btree_check() done");
+
+		/*
+		 * bcache_journal_next() can't happen sooner, or
+		 * btree_gc_finish() will give spurious errors about last_gc >
+		 * gc_gen - this is a hack but oh well.
+		 */
+		bch_journal_next(&c->journal);
+
+		for_each_cache(ca, c, i)
+			closure_call(&ca->alloc, bch_allocator_thread,
+				     system_wq, &c->cl);
+
+		/*
+		 * First place it's safe to allocate: btree_check() and
+		 * btree_gc_finish() have to run before we have buckets to
+		 * allocate, and bch_bucket_alloc_set() might cause a journal
+		 * entry to be written so bcache_journal_next() has to be called
+		 * first.
+		 *
+		 * If the uuids were in the old format we have to rewrite them
+		 * before the next journal entry is written:
+		 */
+		if (j->version < BCACHE_JSET_VERSION_UUID)
+			__uuid_write(c);
+
+		bch_journal_replay(c, &journal, &op);
+	} else {
+		pr_notice("invalidating existing data");
+		/* Don't want invalidate_buckets() to queue a gc yet */
+		closure_lock(&c->gc, NULL);
+
+		for_each_cache(ca, c, i) {
+			unsigned j;
+
+			ca->sb.keys = clamp_t(int, ca->sb.nbuckets >> 7,
+					      2, SB_JOURNAL_BUCKETS);
+
+			for (j = 0; j < ca->sb.keys; j++)
+				ca->sb.d[j] = ca->sb.first_bucket + j;
+		}
+
+		bch_btree_gc_finish(c);
+
+		for_each_cache(ca, c, i)
+			closure_call(&ca->alloc, bch_allocator_thread,
+				     ca->alloc_workqueue, &c->cl);
+
+		mutex_lock(&c->bucket_lock);
+		for_each_cache(ca, c, i)
+			bch_prio_write(ca);
+		mutex_unlock(&c->bucket_lock);
+
+		wake_up(&c->alloc_wait);
+
+		err = "cannot allocate new UUID bucket";
+		if (__uuid_write(c))
+			goto err_unlock_gc;
+
+		err = "cannot allocate new btree root";
+		c->root = bch_btree_node_alloc(c, 0, &op.cl);
+		if (IS_ERR_OR_NULL(c->root))
+			goto err_unlock_gc;
+
+		bkey_copy_key(&c->root->key, &MAX_KEY);
+		bch_btree_write(c->root, true, &op);
+
+		bch_btree_set_root(c->root);
+		rw_unlock(true, c->root);
+
+		/*
+		 * We don't want to write the first journal entry until
+		 * everything is set up - fortunately journal entries won't be
+		 * written until the SET_CACHE_SYNC() here:
+		 */
+		SET_CACHE_SYNC(&c->sb, true);
+
+		bch_journal_next(&c->journal);
+		bch_journal_meta(c, &op.cl);
+
+		/* Unlock */
+		closure_set_stopped(&c->gc.cl);
+		closure_put(&c->gc.cl);
+	}
+
+	closure_sync(&op.cl);
+	c->sb.last_mount = get_seconds();
+	bcache_write_super(c);
+
+	list_for_each_entry_safe(dc, t, &uncached_devices, list)
+		bch_cached_dev_attach(dc, c);
+
+	flash_devs_run(c);
+
+	return;
+err_unlock_gc:
+	closure_set_stopped(&c->gc.cl);
+	closure_put(&c->gc.cl);
+err:
+	closure_sync(&op.cl);
+	/* XXX: test this, it's broken */
+	bch_cache_set_error(c, err);
+}
+
+static bool can_attach_cache(struct cache *ca, struct cache_set *c)
+{
+	return ca->sb.block_size	== c->sb.block_size &&
+		ca->sb.bucket_size	== c->sb.block_size &&
+		ca->sb.nr_in_set	== c->sb.nr_in_set;
+}
+
+static const char *register_cache_set(struct cache *ca)
+{
+	char buf[12];
+	const char *err = "cannot allocate memory";
+	struct cache_set *c;
+
+	list_for_each_entry(c, &bch_cache_sets, list)
+		if (!memcmp(c->sb.set_uuid, ca->sb.set_uuid, 16)) {
+			if (c->cache[ca->sb.nr_this_dev])
+				return "duplicate cache set member";
+
+			if (!can_attach_cache(ca, c))
+				return "cache sb does not match set";
+
+			if (!CACHE_SYNC(&ca->sb))
+				SET_CACHE_SYNC(&c->sb, false);
+
+			goto found;
+		}
+
+	c = bch_cache_set_alloc(&ca->sb);
+	if (!c)
+		return err;
+
+	err = "error creating kobject";
+	if (kobject_add(&c->kobj, bcache_kobj, "%pU", c->sb.set_uuid) ||
+	    kobject_add(&c->internal, &c->kobj, "internal"))
+		goto err;
+
+	if (bch_cache_accounting_add_kobjs(&c->accounting, &c->kobj))
+		goto err;
+
+	bch_debug_init_cache_set(c);
+
+	list_add(&c->list, &bch_cache_sets);
+found:
+	sprintf(buf, "cache%i", ca->sb.nr_this_dev);
+	if (sysfs_create_link(&ca->kobj, &c->kobj, "set") ||
+	    sysfs_create_link(&c->kobj, &ca->kobj, buf))
+		goto err;
+
+	if (ca->sb.seq > c->sb.seq) {
+		c->sb.version		= ca->sb.version;
+		memcpy(c->sb.set_uuid, ca->sb.set_uuid, 16);
+		c->sb.flags             = ca->sb.flags;
+		c->sb.seq		= ca->sb.seq;
+		pr_debug("set version = %llu", c->sb.version);
+	}
+
+	ca->set = c;
+	ca->set->cache[ca->sb.nr_this_dev] = ca;
+	c->cache_by_alloc[c->caches_loaded++] = ca;
+
+	if (c->caches_loaded == c->sb.nr_in_set)
+		run_cache_set(c);
+
+	return NULL;
+err:
+	bch_cache_set_unregister(c);
+	return err;
+}
+
+/* Cache device */
+
+void bch_cache_release(struct kobject *kobj)
+{
+	struct cache *ca = container_of(kobj, struct cache, kobj);
+
+	if (ca->set)
+		ca->set->cache[ca->sb.nr_this_dev] = NULL;
+
+	bch_cache_allocator_exit(ca);
+
+	bio_split_pool_free(&ca->bio_split_hook);
+
+	if (ca->alloc_workqueue)
+		destroy_workqueue(ca->alloc_workqueue);
+
+	free_pages((unsigned long) ca->disk_buckets, ilog2(bucket_pages(ca)));
+	kfree(ca->prio_buckets);
+	vfree(ca->buckets);
+
+	free_heap(&ca->heap);
+	free_fifo(&ca->unused);
+	free_fifo(&ca->free_inc);
+	free_fifo(&ca->free);
+
+	if (ca->sb_bio.bi_inline_vecs[0].bv_page)
+		put_page(ca->sb_bio.bi_io_vec[0].bv_page);
+
+	if (!IS_ERR_OR_NULL(ca->bdev)) {
+		blk_sync_queue(bdev_get_queue(ca->bdev));
+		blkdev_put(ca->bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
+	}
+
+	kfree(ca);
+	module_put(THIS_MODULE);
+}
+
+static int cache_alloc(struct cache_sb *sb, struct cache *ca)
+{
+	size_t free;
+	struct bucket *b;
+
+	if (!ca)
+		return -ENOMEM;
+
+	__module_get(THIS_MODULE);
+	kobject_init(&ca->kobj, &bch_cache_ktype);
+
+	memcpy(&ca->sb, sb, sizeof(struct cache_sb));
+
+	INIT_LIST_HEAD(&ca->discards);
+
+	bio_init(&ca->sb_bio);
+	ca->sb_bio.bi_max_vecs	= 1;
+	ca->sb_bio.bi_io_vec	= ca->sb_bio.bi_inline_vecs;
+
+	bio_init(&ca->journal.bio);
+	ca->journal.bio.bi_max_vecs = 8;
+	ca->journal.bio.bi_io_vec = ca->journal.bio.bi_inline_vecs;
+
+	free = roundup_pow_of_two(ca->sb.nbuckets) >> 9;
+	free = max_t(size_t, free, (prio_buckets(ca) + 8) * 2);
+
+	if (!init_fifo(&ca->free,	free, GFP_KERNEL) ||
+	    !init_fifo(&ca->free_inc,	free << 2, GFP_KERNEL) ||
+	    !init_fifo(&ca->unused,	free << 2, GFP_KERNEL) ||
+	    !init_heap(&ca->heap,	free << 3, GFP_KERNEL) ||
+	    !(ca->buckets	= vmalloc(sizeof(struct bucket) *
+					  ca->sb.nbuckets)) ||
+	    !(ca->prio_buckets	= kzalloc(sizeof(uint64_t) * prio_buckets(ca) *
+					  2, GFP_KERNEL)) ||
+	    !(ca->disk_buckets	= alloc_bucket_pages(GFP_KERNEL, ca)) ||
+	    !(ca->alloc_workqueue = alloc_workqueue("bch_allocator", 0, 1)) ||
+	    bio_split_pool_init(&ca->bio_split_hook))
+		goto err;
+
+	ca->prio_last_buckets = ca->prio_buckets + prio_buckets(ca);
+
+	memset(ca->buckets, 0, ca->sb.nbuckets * sizeof(struct bucket));
+	for_each_bucket(b, ca)
+		atomic_set(&b->pin, 0);
+
+	if (bch_cache_allocator_init(ca))
+		goto err;
+
+	return 0;
+err:
+	kobject_put(&ca->kobj);
+	return -ENOMEM;
+}
+
+static const char *register_cache(struct cache_sb *sb, struct page *sb_page,
+				  struct block_device *bdev, struct cache *ca)
+{
+	char name[BDEVNAME_SIZE];
+	const char *err = "cannot allocate memory";
+
+	if (cache_alloc(sb, ca) != 0)
+		return err;
+
+	ca->sb_bio.bi_io_vec[0].bv_page = sb_page;
+	ca->bdev = bdev;
+	ca->bdev->bd_holder = ca;
+
+	if (blk_queue_discard(bdev_get_queue(ca->bdev)))
+		ca->discard = CACHE_DISCARD(&ca->sb);
+
+	err = "error creating kobject";
+	if (kobject_add(&ca->kobj, &part_to_dev(bdev->bd_part)->kobj, "bcache"))
+		goto err;
+
+	err = register_cache_set(ca);
+	if (err)
+		goto err;
+
+	pr_info("registered cache device %s", bdevname(bdev, name));
+
+	return NULL;
+err:
+	kobject_put(&ca->kobj);
+	pr_info("error opening %s: %s", bdevname(bdev, name), err);
+	/* Return NULL instead of an error because kobject_put() cleans
+	 * everything up
+	 */
+	return NULL;
+}
+
+/* Global interfaces/init */
+
+static ssize_t register_bcache(struct kobject *, struct kobj_attribute *,
+			       const char *, size_t);
+
+kobj_attribute_write(register,		register_bcache);
+kobj_attribute_write(register_quiet,	register_bcache);
+
+static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
+			       const char *buffer, size_t size)
+{
+	ssize_t ret = size;
+	const char *err = "cannot allocate memory";
+	char *path = NULL;
+	struct cache_sb *sb = NULL;
+	struct block_device *bdev = NULL;
+	struct page *sb_page = NULL;
+
+	if (!try_module_get(THIS_MODULE))
+		return -EBUSY;
+
+	mutex_lock(&bch_register_lock);
+
+	if (!(path = kstrndup(buffer, size, GFP_KERNEL)) ||
+	    !(sb = kmalloc(sizeof(struct cache_sb), GFP_KERNEL)))
+		goto err;
+
+	err = "failed to open device";
+	bdev = blkdev_get_by_path(strim(path),
+				  FMODE_READ|FMODE_WRITE|FMODE_EXCL,
+				  sb);
+	if (bdev == ERR_PTR(-EBUSY))
+		err = "device busy";
+
+	if (IS_ERR(bdev) ||
+	    set_blocksize(bdev, 4096))
+		goto err;
+
+	err = read_super(sb, bdev, &sb_page);
+	if (err)
+		goto err_close;
+
+	if (SB_IS_BDEV(sb)) {
+		struct cached_dev *dc = kzalloc(sizeof(*dc), GFP_KERNEL);
+
+		err = register_bdev(sb, sb_page, bdev, dc);
+	} else {
+		struct cache *ca = kzalloc(sizeof(*ca), GFP_KERNEL);
+
+		err = register_cache(sb, sb_page, bdev, ca);
+	}
+
+	if (err) {
+		/* register_(bdev|cache) will only return an error if they
+		 * didn't get far enough to create the kobject - if they did,
+		 * the kobject destructor will do this cleanup.
+		 */
+		put_page(sb_page);
+err_close:
+		blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
+err:
+		if (attr != &ksysfs_register_quiet)
+			pr_info("error opening %s: %s", path, err);
+		ret = -EINVAL;
+	}
+
+	kfree(sb);
+	kfree(path);
+	mutex_unlock(&bch_register_lock);
+	module_put(THIS_MODULE);
+	return ret;
+}
+
+static int bcache_reboot(struct notifier_block *n, unsigned long code, void *x)
+{
+	if (code == SYS_DOWN ||
+	    code == SYS_HALT ||
+	    code == SYS_POWER_OFF) {
+		DEFINE_WAIT(wait);
+		unsigned long start = jiffies;
+		bool stopped = false;
+
+		struct cache_set *c, *tc;
+		struct cached_dev *dc, *tdc;
+
+		mutex_lock(&bch_register_lock);
+
+		if (list_empty(&bch_cache_sets) &&
+		    list_empty(&uncached_devices))
+			goto out;
+
+		pr_info("Stopping all devices:");
+
+		list_for_each_entry_safe(c, tc, &bch_cache_sets, list)
+			bch_cache_set_stop(c);
+
+		list_for_each_entry_safe(dc, tdc, &uncached_devices, list)
+			bcache_device_stop(&dc->disk);
+
+		/* What's a condition variable? */
+		while (1) {
+			long timeout = start + 2 * HZ - jiffies;
+
+			stopped = list_empty(&bch_cache_sets) &&
+				list_empty(&uncached_devices);
+
+			if (timeout < 0 || stopped)
+				break;
+
+			prepare_to_wait(&unregister_wait, &wait,
+					TASK_UNINTERRUPTIBLE);
+
+			mutex_unlock(&bch_register_lock);
+			schedule_timeout(timeout);
+			mutex_lock(&bch_register_lock);
+		}
+
+		finish_wait(&unregister_wait, &wait);
+
+		if (stopped)
+			pr_info("All devices stopped");
+		else
+			pr_notice("Timeout waiting for devices to be closed");
+out:
+		mutex_unlock(&bch_register_lock);
+	}
+
+	return NOTIFY_DONE;
+}
+
+static struct notifier_block reboot = {
+	.notifier_call	= bcache_reboot,
+	.priority	= INT_MAX, /* before any real devices */
+};
+
+static void bcache_exit(void)
+{
+	bch_debug_exit();
+	bch_writeback_exit();
+	bch_request_exit();
+	bch_btree_exit();
+	if (bcache_kobj)
+		kobject_put(bcache_kobj);
+	if (bcache_wq)
+		destroy_workqueue(bcache_wq);
+	unregister_blkdev(bcache_major, "bcache");
+	unregister_reboot_notifier(&reboot);
+}
+
+static int __init bcache_init(void)
+{
+	static const struct attribute *files[] = {
+		&ksysfs_register.attr,
+		&ksysfs_register_quiet.attr,
+		NULL
+	};
+
+	mutex_init(&bch_register_lock);
+	init_waitqueue_head(&unregister_wait);
+	register_reboot_notifier(&reboot);
+	closure_debug_init();
+
+	bcache_major = register_blkdev(0, "bcache");
+	if (bcache_major < 0)
+		return bcache_major;
+
+	if (!(bcache_wq = create_workqueue("bcache")) ||
+	    !(bcache_kobj = kobject_create_and_add("bcache", fs_kobj)) ||
+	    sysfs_create_files(bcache_kobj, files) ||
+	    bch_btree_init() ||
+	    bch_request_init() ||
+	    bch_writeback_init() ||
+	    bch_debug_init(bcache_kobj))
+		goto err;
+
+	return 0;
+err:
+	bcache_exit();
+	return -ENOMEM;
+}
+
+module_exit(bcache_exit);
+module_init(bcache_init);
diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c
new file mode 100644
index 0000000..4d9cca4
--- /dev/null
+++ b/drivers/md/bcache/sysfs.c
@@ -0,0 +1,817 @@
+/*
+ * bcache sysfs interfaces
+ *
+ * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
+ * Copyright 2012 Google, Inc.
+ */
+
+#include "bcache.h"
+#include "sysfs.h"
+#include "btree.h"
+#include "request.h"
+
+#include <linux/sort.h>
+
+static const char * const cache_replacement_policies[] = {
+	"lru",
+	"fifo",
+	"random",
+	NULL
+};
+
+write_attribute(attach);
+write_attribute(detach);
+write_attribute(unregister);
+write_attribute(stop);
+write_attribute(clear_stats);
+write_attribute(trigger_gc);
+write_attribute(prune_cache);
+write_attribute(flash_vol_create);
+
+read_attribute(bucket_size);
+read_attribute(block_size);
+read_attribute(nbuckets);
+read_attribute(tree_depth);
+read_attribute(root_usage_percent);
+read_attribute(priority_stats);
+read_attribute(btree_cache_size);
+read_attribute(btree_cache_max_chain);
+read_attribute(cache_available_percent);
+read_attribute(written);
+read_attribute(btree_written);
+read_attribute(metadata_written);
+read_attribute(active_journal_entries);
+
+sysfs_time_stats_attribute(btree_gc,	sec, ms);
+sysfs_time_stats_attribute(btree_split, sec, us);
+sysfs_time_stats_attribute(btree_sort,	ms,  us);
+sysfs_time_stats_attribute(btree_read,	ms,  us);
+sysfs_time_stats_attribute(try_harder,	ms,  us);
+
+read_attribute(btree_nodes);
+read_attribute(btree_used_percent);
+read_attribute(average_key_size);
+read_attribute(dirty_data);
+read_attribute(bset_tree_stats);
+
+read_attribute(state);
+read_attribute(cache_read_races);
+read_attribute(writeback_keys_done);
+read_attribute(writeback_keys_failed);
+read_attribute(io_errors);
+read_attribute(congested);
+rw_attribute(congested_read_threshold_us);
+rw_attribute(congested_write_threshold_us);
+
+rw_attribute(sequential_cutoff);
+rw_attribute(sequential_merge);
+rw_attribute(data_csum);
+rw_attribute(cache_mode);
+rw_attribute(writeback_metadata);
+rw_attribute(writeback_running);
+rw_attribute(writeback_percent);
+rw_attribute(writeback_delay);
+rw_attribute(writeback_rate);
+
+rw_attribute(writeback_rate_update_seconds);
+rw_attribute(writeback_rate_d_term);
+rw_attribute(writeback_rate_p_term_inverse);
+rw_attribute(writeback_rate_d_smooth);
+read_attribute(writeback_rate_debug);
+
+rw_attribute(synchronous);
+rw_attribute(journal_delay_ms);
+rw_attribute(discard);
+rw_attribute(running);
+rw_attribute(label);
+rw_attribute(readahead);
+rw_attribute(io_error_limit);
+rw_attribute(io_error_halflife);
+rw_attribute(verify);
+rw_attribute(key_merging_disabled);
+rw_attribute(gc_always_rewrite);
+rw_attribute(freelist_percent);
+rw_attribute(cache_replacement_policy);
+rw_attribute(btree_shrinker_disabled);
+rw_attribute(copy_gc_enabled);
+rw_attribute(size);
+
+SHOW(__bch_cached_dev)
+{
+	struct cached_dev *dc = container_of(kobj, struct cached_dev,
+					     disk.kobj);
+	const char *states[] = { "no cache", "clean", "dirty", "inconsistent" };
+
+#define var(stat)		(dc->stat)
+
+	if (attr == &sysfs_cache_mode)
+		return bch_snprint_string_list(buf, PAGE_SIZE,
+					       bch_cache_modes + 1,
+					       BDEV_CACHE_MODE(&dc->sb));
+
+	sysfs_printf(data_csum,		"%i", dc->disk.data_csum);
+	var_printf(verify,		"%i");
+	var_printf(writeback_metadata,	"%i");
+	var_printf(writeback_running,	"%i");
+	var_print(writeback_delay);
+	var_print(writeback_percent);
+	sysfs_print(writeback_rate,	dc->writeback_rate.rate);
+
+	var_print(writeback_rate_update_seconds);
+	var_print(writeback_rate_d_term);
+	var_print(writeback_rate_p_term_inverse);
+	var_print(writeback_rate_d_smooth);
+
+	if (attr == &sysfs_writeback_rate_debug) {
+		char dirty[20];
+		char derivative[20];
+		char target[20];
+		bch_hprint(dirty,
+		       atomic_long_read(&dc->disk.sectors_dirty) << 9);
+		bch_hprint(derivative,	dc->writeback_rate_derivative << 9);
+		bch_hprint(target,	dc->writeback_rate_target << 9);
+
+		return sprintf(buf,
+			       "rate:\t\t%u\n"
+			       "change:\t\t%i\n"
+			       "dirty:\t\t%s\n"
+			       "derivative:\t%s\n"
+			       "target:\t\t%s\n",
+			       dc->writeback_rate.rate,
+			       dc->writeback_rate_change,
+			       dirty, derivative, target);
+	}
+
+	sysfs_hprint(dirty_data,
+		     atomic_long_read(&dc->disk.sectors_dirty) << 9);
+
+	var_printf(sequential_merge,	"%i");
+	var_hprint(sequential_cutoff);
+	var_hprint(readahead);
+
+	sysfs_print(running,		atomic_read(&dc->running));
+	sysfs_print(state,		states[BDEV_STATE(&dc->sb)]);
+
+	if (attr == &sysfs_label) {
+		memcpy(buf, dc->sb.label, SB_LABEL_SIZE);
+		buf[SB_LABEL_SIZE + 1] = '\0';
+		strcat(buf, "\n");
+		return strlen(buf);
+	}
+
+#undef var
+	return 0;
+}
+SHOW_LOCKED(bch_cached_dev)
+
+STORE(__cached_dev)
+{
+	struct cached_dev *dc = container_of(kobj, struct cached_dev,
+					     disk.kobj);
+	unsigned v = size;
+	struct cache_set *c;
+
+#define d_strtoul(var)		sysfs_strtoul(var, dc->var)
+#define d_strtoi_h(var)		sysfs_hatoi(var, dc->var)
+
+	sysfs_strtoul(data_csum,	dc->disk.data_csum);
+	d_strtoul(verify);
+	d_strtoul(writeback_metadata);
+	d_strtoul(writeback_running);
+	d_strtoul(writeback_delay);
+	sysfs_strtoul_clamp(writeback_rate,
+			    dc->writeback_rate.rate, 1, 1000000);
+	sysfs_strtoul_clamp(writeback_percent, dc->writeback_percent, 0, 40);
+
+	d_strtoul(writeback_rate_update_seconds);
+	d_strtoul(writeback_rate_d_term);
+	d_strtoul(writeback_rate_p_term_inverse);
+	sysfs_strtoul_clamp(writeback_rate_p_term_inverse,
+			    dc->writeback_rate_p_term_inverse, 1, INT_MAX);
+	d_strtoul(writeback_rate_d_smooth);
+
+	d_strtoul(sequential_merge);
+	d_strtoi_h(sequential_cutoff);
+	d_strtoi_h(readahead);
+
+	if (attr == &sysfs_clear_stats)
+		bch_cache_accounting_clear(&dc->accounting);
+
+	if (attr == &sysfs_running &&
+	    strtoul_or_return(buf))
+		bch_cached_dev_run(dc);
+
+	if (attr == &sysfs_cache_mode) {
+		ssize_t v = bch_read_string_list(buf, bch_cache_modes + 1);
+
+		if (v < 0)
+			return v;
+
+		if ((unsigned) v != BDEV_CACHE_MODE(&dc->sb)) {
+			SET_BDEV_CACHE_MODE(&dc->sb, v);
+			bch_write_bdev_super(dc, NULL);
+		}
+	}
+
+	if (attr == &sysfs_label) {
+		memcpy(dc->sb.label, buf, SB_LABEL_SIZE);
+		bch_write_bdev_super(dc, NULL);
+		if (dc->disk.c) {
+			memcpy(dc->disk.c->uuids[dc->disk.id].label,
+			       buf, SB_LABEL_SIZE);
+			bch_uuid_write(dc->disk.c);
+		}
+	}
+
+	if (attr == &sysfs_attach) {
+		if (bch_parse_uuid(buf, dc->sb.set_uuid) < 16)
+			return -EINVAL;
+
+		list_for_each_entry(c, &bch_cache_sets, list) {
+			v = bch_cached_dev_attach(dc, c);
+			if (!v)
+				return size;
+		}
+
+		pr_err("Can't attach %s: cache set not found", buf);
+		size = v;
+	}
+
+	if (attr == &sysfs_detach && dc->disk.c)
+		bch_cached_dev_detach(dc);
+
+	if (attr == &sysfs_stop)
+		bcache_device_stop(&dc->disk);
+
+	return size;
+}
+
+STORE(bch_cached_dev)
+{
+	struct cached_dev *dc = container_of(kobj, struct cached_dev,
+					     disk.kobj);
+
+	mutex_lock(&bch_register_lock);
+	size = __cached_dev_store(kobj, attr, buf, size);
+
+	if (attr == &sysfs_writeback_running)
+		bch_writeback_queue(dc);
+
+	if (attr == &sysfs_writeback_percent)
+		schedule_delayed_work(&dc->writeback_rate_update,
+				      dc->writeback_rate_update_seconds * HZ);
+
+	mutex_unlock(&bch_register_lock);
+	return size;
+}
+
+static struct attribute *bch_cached_dev_files[] = {
+	&sysfs_attach,
+	&sysfs_detach,
+	&sysfs_stop,
+#if 0
+	&sysfs_data_csum,
+#endif
+	&sysfs_cache_mode,
+	&sysfs_writeback_metadata,
+	&sysfs_writeback_running,
+	&sysfs_writeback_delay,
+	&sysfs_writeback_percent,
+	&sysfs_writeback_rate,
+	&sysfs_writeback_rate_update_seconds,
+	&sysfs_writeback_rate_d_term,
+	&sysfs_writeback_rate_p_term_inverse,
+	&sysfs_writeback_rate_d_smooth,
+	&sysfs_writeback_rate_debug,
+	&sysfs_dirty_data,
+	&sysfs_sequential_cutoff,
+	&sysfs_sequential_merge,
+	&sysfs_clear_stats,
+	&sysfs_running,
+	&sysfs_state,
+	&sysfs_label,
+	&sysfs_readahead,
+#ifdef CONFIG_BCACHE_DEBUG
+	&sysfs_verify,
+#endif
+	NULL
+};
+KTYPE(bch_cached_dev);
+
+SHOW(bch_flash_dev)
+{
+	struct bcache_device *d = container_of(kobj, struct bcache_device,
+					       kobj);
+	struct uuid_entry *u = &d->c->uuids[d->id];
+
+	sysfs_printf(data_csum,	"%i", d->data_csum);
+	sysfs_hprint(size,	u->sectors << 9);
+
+	if (attr == &sysfs_label) {
+		memcpy(buf, u->label, SB_LABEL_SIZE);
+		buf[SB_LABEL_SIZE + 1] = '\0';
+		strcat(buf, "\n");
+		return strlen(buf);
+	}
+
+	return 0;
+}
+
+STORE(__bch_flash_dev)
+{
+	struct bcache_device *d = container_of(kobj, struct bcache_device,
+					       kobj);
+	struct uuid_entry *u = &d->c->uuids[d->id];
+
+	sysfs_strtoul(data_csum,	d->data_csum);
+
+	if (attr == &sysfs_size) {
+		uint64_t v;
+		strtoi_h_or_return(buf, v);
+
+		u->sectors = v >> 9;
+		bch_uuid_write(d->c);
+		set_capacity(d->disk, u->sectors);
+	}
+
+	if (attr == &sysfs_label) {
+		memcpy(u->label, buf, SB_LABEL_SIZE);
+		bch_uuid_write(d->c);
+	}
+
+	if (attr == &sysfs_unregister) {
+		atomic_set(&d->detaching, 1);
+		bcache_device_stop(d);
+	}
+
+	return size;
+}
+STORE_LOCKED(bch_flash_dev)
+
+static struct attribute *bch_flash_dev_files[] = {
+	&sysfs_unregister,
+#if 0
+	&sysfs_data_csum,
+#endif
+	&sysfs_label,
+	&sysfs_size,
+	NULL
+};
+KTYPE(bch_flash_dev);
+
+SHOW(__bch_cache_set)
+{
+	unsigned root_usage(struct cache_set *c)
+	{
+		unsigned bytes = 0;
+		struct bkey *k;
+		struct btree *b;
+		struct btree_iter iter;
+
+		goto lock_root;
+
+		do {
+			rw_unlock(false, b);
+lock_root:
+			b = c->root;
+			rw_lock(false, b, b->level);
+		} while (b != c->root);
+
+		for_each_key_filter(b, k, &iter, bch_ptr_bad)
+			bytes += bkey_bytes(k);
+
+		rw_unlock(false, b);
+
+		return (bytes * 100) / btree_bytes(c);
+	}
+
+	size_t cache_size(struct cache_set *c)
+	{
+		size_t ret = 0;
+		struct btree *b;
+
+		mutex_lock(&c->bucket_lock);
+		list_for_each_entry(b, &c->btree_cache, list)
+			ret += 1 << (b->page_order + PAGE_SHIFT);
+
+		mutex_unlock(&c->bucket_lock);
+		return ret;
+	}
+
+	unsigned cache_max_chain(struct cache_set *c)
+	{
+		unsigned ret = 0;
+		struct hlist_head *h;
+
+		mutex_lock(&c->bucket_lock);
+
+		for (h = c->bucket_hash;
+		     h < c->bucket_hash + (1 << BUCKET_HASH_BITS);
+		     h++) {
+			unsigned i = 0;
+			struct hlist_node *p;
+
+			hlist_for_each(p, h)
+				i++;
+
+			ret = max(ret, i);
+		}
+
+		mutex_unlock(&c->bucket_lock);
+		return ret;
+	}
+
+	unsigned btree_used(struct cache_set *c)
+	{
+		return div64_u64(c->gc_stats.key_bytes * 100,
+				 (c->gc_stats.nodes ?: 1) * btree_bytes(c));
+	}
+
+	unsigned average_key_size(struct cache_set *c)
+	{
+		return c->gc_stats.nkeys
+			? div64_u64(c->gc_stats.data, c->gc_stats.nkeys)
+			: 0;
+	}
+
+	struct cache_set *c = container_of(kobj, struct cache_set, kobj);
+
+	sysfs_print(synchronous,		CACHE_SYNC(&c->sb));
+	sysfs_print(journal_delay_ms,		c->journal_delay_ms);
+	sysfs_hprint(bucket_size,		bucket_bytes(c));
+	sysfs_hprint(block_size,		block_bytes(c));
+	sysfs_print(tree_depth,			c->root->level);
+	sysfs_print(root_usage_percent,		root_usage(c));
+
+	sysfs_hprint(btree_cache_size,		cache_size(c));
+	sysfs_print(btree_cache_max_chain,	cache_max_chain(c));
+	sysfs_print(cache_available_percent,	100 - c->gc_stats.in_use);
+
+	sysfs_print_time_stats(&c->btree_gc_time,	btree_gc, sec, ms);
+	sysfs_print_time_stats(&c->btree_split_time,	btree_split, sec, us);
+	sysfs_print_time_stats(&c->sort_time,		btree_sort, ms, us);
+	sysfs_print_time_stats(&c->btree_read_time,	btree_read, ms, us);
+	sysfs_print_time_stats(&c->try_harder_time,	try_harder, ms, us);
+
+	sysfs_print(btree_used_percent,	btree_used(c));
+	sysfs_print(btree_nodes,	c->gc_stats.nodes);
+	sysfs_hprint(dirty_data,	c->gc_stats.dirty);
+	sysfs_hprint(average_key_size,	average_key_size(c));
+
+	sysfs_print(cache_read_races,
+		    atomic_long_read(&c->cache_read_races));
+
+	sysfs_print(writeback_keys_done,
+		    atomic_long_read(&c->writeback_keys_done));
+	sysfs_print(writeback_keys_failed,
+		    atomic_long_read(&c->writeback_keys_failed));
+
+	/* See count_io_errors for why 88 */
+	sysfs_print(io_error_halflife,	c->error_decay * 88);
+	sysfs_print(io_error_limit,	c->error_limit >> IO_ERROR_SHIFT);
+
+	sysfs_hprint(congested,
+		     ((uint64_t) bch_get_congested(c)) << 9);
+	sysfs_print(congested_read_threshold_us,
+		    c->congested_read_threshold_us);
+	sysfs_print(congested_write_threshold_us,
+		    c->congested_write_threshold_us);
+
+	sysfs_print(active_journal_entries,	fifo_used(&c->journal.pin));
+	sysfs_printf(verify,			"%i", c->verify);
+	sysfs_printf(key_merging_disabled,	"%i", c->key_merging_disabled);
+	sysfs_printf(gc_always_rewrite,		"%i", c->gc_always_rewrite);
+	sysfs_printf(btree_shrinker_disabled,	"%i", c->shrinker_disabled);
+	sysfs_printf(copy_gc_enabled,		"%i", c->copy_gc_enabled);
+
+	if (attr == &sysfs_bset_tree_stats)
+		return bch_bset_print_stats(c, buf);
+
+	return 0;
+}
+SHOW_LOCKED(bch_cache_set)
+
+STORE(__bch_cache_set)
+{
+	struct cache_set *c = container_of(kobj, struct cache_set, kobj);
+
+	if (attr == &sysfs_unregister)
+		bch_cache_set_unregister(c);
+
+	if (attr == &sysfs_stop)
+		bch_cache_set_stop(c);
+
+	if (attr == &sysfs_synchronous) {
+		bool sync = strtoul_or_return(buf);
+
+		if (sync != CACHE_SYNC(&c->sb)) {
+			SET_CACHE_SYNC(&c->sb, sync);
+			bcache_write_super(c);
+		}
+	}
+
+	if (attr == &sysfs_flash_vol_create) {
+		int r;
+		uint64_t v;
+		strtoi_h_or_return(buf, v);
+
+		r = bch_flash_dev_create(c, v);
+		if (r)
+			return r;
+	}
+
+	if (attr == &sysfs_clear_stats) {
+		atomic_long_set(&c->writeback_keys_done,	0);
+		atomic_long_set(&c->writeback_keys_failed,	0);
+
+		memset(&c->gc_stats, 0, sizeof(struct gc_stat));
+		bch_cache_accounting_clear(&c->accounting);
+	}
+
+	if (attr == &sysfs_trigger_gc)
+		bch_queue_gc(c);
+
+	if (attr == &sysfs_prune_cache) {
+		struct shrink_control sc;
+		sc.gfp_mask = GFP_KERNEL;
+		sc.nr_to_scan = strtoul_or_return(buf);
+		c->shrink.shrink(&c->shrink, &sc);
+	}
+
+	sysfs_strtoul(congested_read_threshold_us,
+		      c->congested_read_threshold_us);
+	sysfs_strtoul(congested_write_threshold_us,
+		      c->congested_write_threshold_us);
+
+	if (attr == &sysfs_io_error_limit)
+		c->error_limit = strtoul_or_return(buf) << IO_ERROR_SHIFT;
+
+	/* See count_io_errors() for why 88 */
+	if (attr == &sysfs_io_error_halflife)
+		c->error_decay = strtoul_or_return(buf) / 88;
+
+	sysfs_strtoul(journal_delay_ms,		c->journal_delay_ms);
+	sysfs_strtoul(verify,			c->verify);
+	sysfs_strtoul(key_merging_disabled,	c->key_merging_disabled);
+	sysfs_strtoul(gc_always_rewrite,	c->gc_always_rewrite);
+	sysfs_strtoul(btree_shrinker_disabled,	c->shrinker_disabled);
+	sysfs_strtoul(copy_gc_enabled,		c->copy_gc_enabled);
+
+	return size;
+}
+STORE_LOCKED(bch_cache_set)
+
+SHOW(bch_cache_set_internal)
+{
+	struct cache_set *c = container_of(kobj, struct cache_set, internal);
+	return bch_cache_set_show(&c->kobj, attr, buf);
+}
+
+STORE(bch_cache_set_internal)
+{
+	struct cache_set *c = container_of(kobj, struct cache_set, internal);
+	return bch_cache_set_store(&c->kobj, attr, buf, size);
+}
+
+static void bch_cache_set_internal_release(struct kobject *k)
+{
+}
+
+static struct attribute *bch_cache_set_files[] = {
+	&sysfs_unregister,
+	&sysfs_stop,
+	&sysfs_synchronous,
+	&sysfs_journal_delay_ms,
+	&sysfs_flash_vol_create,
+
+	&sysfs_bucket_size,
+	&sysfs_block_size,
+	&sysfs_tree_depth,
+	&sysfs_root_usage_percent,
+	&sysfs_btree_cache_size,
+	&sysfs_cache_available_percent,
+
+	&sysfs_average_key_size,
+	&sysfs_dirty_data,
+
+	&sysfs_io_error_limit,
+	&sysfs_io_error_halflife,
+	&sysfs_congested,
+	&sysfs_congested_read_threshold_us,
+	&sysfs_congested_write_threshold_us,
+	&sysfs_clear_stats,
+	NULL
+};
+KTYPE(bch_cache_set);
+
+static struct attribute *bch_cache_set_internal_files[] = {
+	&sysfs_active_journal_entries,
+
+	sysfs_time_stats_attribute_list(btree_gc, sec, ms)
+	sysfs_time_stats_attribute_list(btree_split, sec, us)
+	sysfs_time_stats_attribute_list(btree_sort, ms, us)
+	sysfs_time_stats_attribute_list(btree_read, ms, us)
+	sysfs_time_stats_attribute_list(try_harder, ms, us)
+
+	&sysfs_btree_nodes,
+	&sysfs_btree_used_percent,
+	&sysfs_btree_cache_max_chain,
+
+	&sysfs_bset_tree_stats,
+	&sysfs_cache_read_races,
+	&sysfs_writeback_keys_done,
+	&sysfs_writeback_keys_failed,
+
+	&sysfs_trigger_gc,
+	&sysfs_prune_cache,
+#ifdef CONFIG_BCACHE_DEBUG
+	&sysfs_verify,
+	&sysfs_key_merging_disabled,
+#endif
+	&sysfs_gc_always_rewrite,
+	&sysfs_btree_shrinker_disabled,
+	&sysfs_copy_gc_enabled,
+	NULL
+};
+KTYPE(bch_cache_set_internal);
+
+SHOW(__bch_cache)
+{
+	struct cache *ca = container_of(kobj, struct cache, kobj);
+
+	sysfs_hprint(bucket_size,	bucket_bytes(ca));
+	sysfs_hprint(block_size,	block_bytes(ca));
+	sysfs_print(nbuckets,		ca->sb.nbuckets);
+	sysfs_print(discard,		ca->discard);
+	sysfs_hprint(written, atomic_long_read(&ca->sectors_written) << 9);
+	sysfs_hprint(btree_written,
+		     atomic_long_read(&ca->btree_sectors_written) << 9);
+	sysfs_hprint(metadata_written,
+		     (atomic_long_read(&ca->meta_sectors_written) +
+		      atomic_long_read(&ca->btree_sectors_written)) << 9);
+
+	sysfs_print(io_errors,
+		    atomic_read(&ca->io_errors) >> IO_ERROR_SHIFT);
+
+	sysfs_print(freelist_percent, ca->free.size * 100 /
+		    ((size_t) ca->sb.nbuckets));
+
+	if (attr == &sysfs_cache_replacement_policy)
+		return bch_snprint_string_list(buf, PAGE_SIZE,
+					       cache_replacement_policies,
+					       CACHE_REPLACEMENT(&ca->sb));
+
+	if (attr == &sysfs_priority_stats) {
+		int cmp(const void *l, const void *r)
+		{	return *((uint16_t *) r) - *((uint16_t *) l); }
+
+		/* Number of quantiles we compute */
+		const unsigned nq = 31;
+
+		size_t n = ca->sb.nbuckets, i, unused, btree;
+		uint64_t sum = 0;
+		uint16_t q[nq], *p, *cached;
+		ssize_t ret;
+
+		cached = p = vmalloc(ca->sb.nbuckets * sizeof(uint16_t));
+		if (!p)
+			return -ENOMEM;
+
+		mutex_lock(&ca->set->bucket_lock);
+		for (i = ca->sb.first_bucket; i < n; i++)
+			p[i] = ca->buckets[i].prio;
+		mutex_unlock(&ca->set->bucket_lock);
+
+		sort(p, n, sizeof(uint16_t), cmp, NULL);
+
+		while (n &&
+		       !cached[n - 1])
+			--n;
+
+		unused = ca->sb.nbuckets - n;
+
+		while (cached < p + n &&
+		       *cached == BTREE_PRIO)
+			cached++;
+
+		btree = cached - p;
+		n -= btree;
+
+		for (i = 0; i < n; i++)
+			sum += INITIAL_PRIO - cached[i];
+
+		if (n)
+			do_div(sum, n);
+
+		for (i = 0; i < nq; i++)
+			q[i] = INITIAL_PRIO - cached[n * (i + 1) / (nq + 1)];
+
+		vfree(p);
+
+		ret = snprintf(buf, PAGE_SIZE,
+			       "Unused:		%zu%%\n"
+			       "Metadata:	%zu%%\n"
+			       "Average:	%llu\n"
+			       "Sectors per Q:	%zu\n"
+			       "Quantiles:	[",
+			       unused * 100 / (size_t) ca->sb.nbuckets,
+			       btree * 100 / (size_t) ca->sb.nbuckets, sum,
+			       n * ca->sb.bucket_size / (nq + 1));
+
+		for (i = 0; i < nq && ret < (ssize_t) PAGE_SIZE; i++)
+			ret += snprintf(buf + ret, PAGE_SIZE - ret,
+					i < nq - 1 ? "%u " : "%u]\n", q[i]);
+
+		buf[PAGE_SIZE - 1] = '\0';
+		return ret;
+	}
+
+	return 0;
+}
+SHOW_LOCKED(bch_cache)
+
+STORE(__bch_cache)
+{
+	struct cache *ca = container_of(kobj, struct cache, kobj);
+
+	if (attr == &sysfs_discard) {
+		bool v = strtoul_or_return(buf);
+
+		if (blk_queue_discard(bdev_get_queue(ca->bdev)))
+			ca->discard = v;
+
+		if (v != CACHE_DISCARD(&ca->sb)) {
+			SET_CACHE_DISCARD(&ca->sb, v);
+			bcache_write_super(ca->set);
+		}
+	}
+
+	if (attr == &sysfs_cache_replacement_policy) {
+		ssize_t v = bch_read_string_list(buf, cache_replacement_policies);
+
+		if (v < 0)
+			return v;
+
+		if ((unsigned) v != CACHE_REPLACEMENT(&ca->sb)) {
+			mutex_lock(&ca->set->bucket_lock);
+			SET_CACHE_REPLACEMENT(&ca->sb, v);
+			mutex_unlock(&ca->set->bucket_lock);
+
+			bcache_write_super(ca->set);
+		}
+	}
+
+	if (attr == &sysfs_freelist_percent) {
+		DECLARE_FIFO(long, free);
+		long i;
+		size_t p = strtoul_or_return(buf);
+
+		p = clamp_t(size_t,
+			    ((size_t) ca->sb.nbuckets * p) / 100,
+			    roundup_pow_of_two(ca->sb.nbuckets) >> 9,
+			    ca->sb.nbuckets / 2);
+
+		if (!init_fifo_exact(&free, p, GFP_KERNEL))
+			return -ENOMEM;
+
+		mutex_lock(&ca->set->bucket_lock);
+
+		fifo_move(&free, &ca->free);
+		fifo_swap(&free, &ca->free);
+
+		mutex_unlock(&ca->set->bucket_lock);
+
+		while (fifo_pop(&free, i))
+			atomic_dec(&ca->buckets[i].pin);
+
+		free_fifo(&free);
+	}
+
+	if (attr == &sysfs_clear_stats) {
+		atomic_long_set(&ca->sectors_written, 0);
+		atomic_long_set(&ca->btree_sectors_written, 0);
+		atomic_long_set(&ca->meta_sectors_written, 0);
+		atomic_set(&ca->io_count, 0);
+		atomic_set(&ca->io_errors, 0);
+	}
+
+	return size;
+}
+STORE_LOCKED(bch_cache)
+
+static struct attribute *bch_cache_files[] = {
+	&sysfs_bucket_size,
+	&sysfs_block_size,
+	&sysfs_nbuckets,
+	&sysfs_priority_stats,
+	&sysfs_discard,
+	&sysfs_written,
+	&sysfs_btree_written,
+	&sysfs_metadata_written,
+	&sysfs_io_errors,
+	&sysfs_clear_stats,
+	&sysfs_freelist_percent,
+	&sysfs_cache_replacement_policy,
+	NULL
+};
+KTYPE(bch_cache);
diff --git a/drivers/md/bcache/sysfs.h b/drivers/md/bcache/sysfs.h
new file mode 100644
index 0000000..0526fe9
--- /dev/null
+++ b/drivers/md/bcache/sysfs.h
@@ -0,0 +1,110 @@
+#ifndef _BCACHE_SYSFS_H_
+#define _BCACHE_SYSFS_H_
+
+#define KTYPE(type)							\
+struct kobj_type type ## _ktype = {					\
+	.release	= type ## _release,				\
+	.sysfs_ops	= &((const struct sysfs_ops) {			\
+		.show	= type ## _show,				\
+		.store	= type ## _store				\
+	}),								\
+	.default_attrs	= type ## _files				\
+}
+
+#define SHOW(fn)							\
+static ssize_t fn ## _show(struct kobject *kobj, struct attribute *attr,\
+			   char *buf)					\
+
+#define STORE(fn)							\
+static ssize_t fn ## _store(struct kobject *kobj, struct attribute *attr,\
+			    const char *buf, size_t size)		\
+
+#define SHOW_LOCKED(fn)							\
+SHOW(fn)								\
+{									\
+	ssize_t ret;							\
+	mutex_lock(&bch_register_lock);					\
+	ret = __ ## fn ## _show(kobj, attr, buf);			\
+	mutex_unlock(&bch_register_lock);				\
+	return ret;							\
+}
+
+#define STORE_LOCKED(fn)						\
+STORE(fn)								\
+{									\
+	ssize_t ret;							\
+	mutex_lock(&bch_register_lock);					\
+	ret = __ ## fn ## _store(kobj, attr, buf, size);		\
+	mutex_unlock(&bch_register_lock);				\
+	return ret;							\
+}
+
+#define __sysfs_attribute(_name, _mode)					\
+	static struct attribute sysfs_##_name =				\
+		{ .name = #_name, .mode = _mode }
+
+#define write_attribute(n)	__sysfs_attribute(n, S_IWUSR)
+#define read_attribute(n)	__sysfs_attribute(n, S_IRUGO)
+#define rw_attribute(n)		__sysfs_attribute(n, S_IRUGO|S_IWUSR)
+
+#define sysfs_printf(file, fmt, ...)					\
+do {									\
+	if (attr == &sysfs_ ## file)					\
+		return snprintf(buf, PAGE_SIZE, fmt "\n", __VA_ARGS__);	\
+} while (0)
+
+#define sysfs_print(file, var)						\
+do {									\
+	if (attr == &sysfs_ ## file)					\
+		return snprint(buf, PAGE_SIZE, var);			\
+} while (0)
+
+#define sysfs_hprint(file, val)						\
+do {									\
+	if (attr == &sysfs_ ## file) {					\
+		ssize_t ret = bch_hprint(buf, val);			\
+		strcat(buf, "\n");					\
+		return ret + 1;						\
+	}								\
+} while (0)
+
+#define var_printf(_var, fmt)	sysfs_printf(_var, fmt, var(_var))
+#define var_print(_var)		sysfs_print(_var, var(_var))
+#define var_hprint(_var)	sysfs_hprint(_var, var(_var))
+
+#define sysfs_strtoul(file, var)					\
+do {									\
+	if (attr == &sysfs_ ## file)					\
+		return strtoul_safe(buf, var) ?: (ssize_t) size;	\
+} while (0)
+
+#define sysfs_strtoul_clamp(file, var, min, max)			\
+do {									\
+	if (attr == &sysfs_ ## file)					\
+		return strtoul_safe_clamp(buf, var, min, max)		\
+			?: (ssize_t) size;				\
+} while (0)
+
+#define strtoul_or_return(cp)						\
+({									\
+	unsigned long _v;						\
+	int _r = kstrtoul(cp, 10, &_v);					\
+	if (_r)								\
+		return _r;						\
+	_v;								\
+})
+
+#define strtoi_h_or_return(cp, v)					\
+do {									\
+	int _r = strtoi_h(cp, &v);					\
+	if (_r)								\
+		return _r;						\
+} while (0)
+
+#define sysfs_hatoi(file, var)						\
+do {									\
+	if (attr == &sysfs_ ## file)					\
+		return strtoi_h(buf, &var) ?: (ssize_t) size;		\
+} while (0)
+
+#endif  /* _BCACHE_SYSFS_H_ */
diff --git a/drivers/md/bcache/trace.c b/drivers/md/bcache/trace.c
new file mode 100644
index 0000000..983f9bb
--- /dev/null
+++ b/drivers/md/bcache/trace.c
@@ -0,0 +1,26 @@
+#include "bcache.h"
+#include "btree.h"
+#include "request.h"
+
+#include <linux/module.h>
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/bcache.h>
+
+EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_request_start);
+EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_request_end);
+EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_passthrough);
+EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_cache_hit);
+EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_cache_miss);
+EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_read_retry);
+EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_writethrough);
+EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_writeback);
+EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_write_skip);
+EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_btree_read);
+EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_btree_write);
+EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_write_dirty);
+EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_read_dirty);
+EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_journal_write);
+EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_cache_insert);
+EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_gc_start);
+EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_gc_end);
diff --git a/drivers/md/bcache/util.c b/drivers/md/bcache/util.c
new file mode 100644
index 0000000..da3a99e
--- /dev/null
+++ b/drivers/md/bcache/util.c
@@ -0,0 +1,377 @@
+/*
+ * random utiility code, for bcache but in theory not specific to bcache
+ *
+ * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
+ * Copyright 2012 Google, Inc.
+ */
+
+#include <linux/bio.h>
+#include <linux/blkdev.h>
+#include <linux/ctype.h>
+#include <linux/debugfs.h>
+#include <linux/module.h>
+#include <linux/seq_file.h>
+#include <linux/types.h>
+
+#include "util.h"
+
+#define simple_strtoint(c, end, base)	simple_strtol(c, end, base)
+#define simple_strtouint(c, end, base)	simple_strtoul(c, end, base)
+
+#define STRTO_H(name, type)					\
+int bch_ ## name ## _h(const char *cp, type *res)		\
+{								\
+	int u = 0;						\
+	char *e;						\
+	type i = simple_ ## name(cp, &e, 10);			\
+								\
+	switch (tolower(*e)) {					\
+	default:						\
+		return -EINVAL;					\
+	case 'y':						\
+	case 'z':						\
+		u++;						\
+	case 'e':						\
+		u++;						\
+	case 'p':						\
+		u++;						\
+	case 't':						\
+		u++;						\
+	case 'g':						\
+		u++;						\
+	case 'm':						\
+		u++;						\
+	case 'k':						\
+		u++;						\
+		if (e++ == cp)					\
+			return -EINVAL;				\
+	case '\n':						\
+	case '\0':						\
+		if (*e == '\n')					\
+			e++;					\
+	}							\
+								\
+	if (*e)							\
+		return -EINVAL;					\
+								\
+	while (u--) {						\
+		if ((type) ~0 > 0 &&				\
+		    (type) ~0 / 1024 <= i)			\
+			return -EINVAL;				\
+		if ((i > 0 && ANYSINT_MAX(type) / 1024 < i) ||	\
+		    (i < 0 && -ANYSINT_MAX(type) / 1024 > i))	\
+			return -EINVAL;				\
+		i *= 1024;					\
+	}							\
+								\
+	*res = i;						\
+	return 0;						\
+}								\
+
+STRTO_H(strtoint, int)
+STRTO_H(strtouint, unsigned int)
+STRTO_H(strtoll, long long)
+STRTO_H(strtoull, unsigned long long)
+
+ssize_t bch_hprint(char *buf, int64_t v)
+{
+	static const char units[] = "?kMGTPEZY";
+	char dec[4] = "";
+	int u, t = 0;
+
+	for (u = 0; v >= 1024 || v <= -1024; u++) {
+		t = v & ~(~0 << 10);
+		v >>= 10;
+	}
+
+	if (!u)
+		return sprintf(buf, "%llu", v);
+
+	if (v < 100 && v > -100)
+		snprintf(dec, sizeof(dec), ".%i", t / 100);
+
+	return sprintf(buf, "%lli%s%c", v, dec, units[u]);
+}
+
+ssize_t bch_snprint_string_list(char *buf, size_t size, const char * const list[],
+			    size_t selected)
+{
+	char *out = buf;
+	size_t i;
+
+	for (i = 0; list[i]; i++)
+		out += snprintf(out, buf + size - out,
+				i == selected ? "[%s] " : "%s ", list[i]);
+
+	out[-1] = '\n';
+	return out - buf;
+}
+
+ssize_t bch_read_string_list(const char *buf, const char * const list[])
+{
+	size_t i;
+	char *s, *d = kstrndup(buf, PAGE_SIZE - 1, GFP_KERNEL);
+	if (!d)
+		return -ENOMEM;
+
+	s = strim(d);
+
+	for (i = 0; list[i]; i++)
+		if (!strcmp(list[i], s))
+			break;
+
+	kfree(d);
+
+	if (!list[i])
+		return -EINVAL;
+
+	return i;
+}
+
+bool bch_is_zero(const char *p, size_t n)
+{
+	size_t i;
+
+	for (i = 0; i < n; i++)
+		if (p[i])
+			return false;
+	return true;
+}
+
+int bch_parse_uuid(const char *s, char *uuid)
+{
+	size_t i, j, x;
+	memset(uuid, 0, 16);
+
+	for (i = 0, j = 0;
+	     i < strspn(s, "-0123456789:ABCDEFabcdef") && j < 32;
+	     i++) {
+		x = s[i] | 32;
+
+		switch (x) {
+		case '0'...'9':
+			x -= '0';
+			break;
+		case 'a'...'f':
+			x -= 'a' - 10;
+			break;
+		default:
+			continue;
+		}
+
+		if (!(j & 1))
+			x <<= 4;
+		uuid[j++ >> 1] |= x;
+	}
+	return i;
+}
+
+void bch_time_stats_update(struct time_stats *stats, uint64_t start_time)
+{
+	uint64_t now		= local_clock();
+	uint64_t duration	= time_after64(now, start_time)
+		? now - start_time : 0;
+	uint64_t last		= time_after64(now, stats->last)
+		? now - stats->last : 0;
+
+	stats->max_duration = max(stats->max_duration, duration);
+
+	if (stats->last) {
+		ewma_add(stats->average_duration, duration, 8, 8);
+
+		if (stats->average_frequency)
+			ewma_add(stats->average_frequency, last, 8, 8);
+		else
+			stats->average_frequency  = last << 8;
+	} else {
+		stats->average_duration  = duration << 8;
+	}
+
+	stats->last = now ?: 1;
+}
+
+unsigned bch_next_delay(struct ratelimit *d, uint64_t done)
+{
+	uint64_t now = local_clock();
+
+	d->next += div_u64(done, d->rate);
+
+	return time_after64(d->next, now)
+		? div_u64(d->next - now, NSEC_PER_SEC / HZ)
+		: 0;
+}
+
+void bch_bio_map(struct bio *bio, void *base)
+{
+	size_t size = bio->bi_size;
+	struct bio_vec *bv = bio->bi_io_vec;
+
+	BUG_ON(!bio->bi_size);
+	BUG_ON(bio->bi_vcnt);
+
+	bv->bv_offset = base ? ((unsigned long) base) % PAGE_SIZE : 0;
+	goto start;
+
+	for (; size; bio->bi_vcnt++, bv++) {
+		bv->bv_offset	= 0;
+start:		bv->bv_len	= min_t(size_t, PAGE_SIZE - bv->bv_offset,
+					size);
+		if (base) {
+			bv->bv_page = is_vmalloc_addr(base)
+				? vmalloc_to_page(base)
+				: virt_to_page(base);
+
+			base += bv->bv_len;
+		}
+
+		size -= bv->bv_len;
+	}
+}
+
+int bch_bio_alloc_pages(struct bio *bio, gfp_t gfp)
+{
+	int i;
+	struct bio_vec *bv;
+
+	bio_for_each_segment(bv, bio, i) {
+		bv->bv_page = alloc_page(gfp);
+		if (!bv->bv_page) {
+			while (bv-- != bio->bi_io_vec + bio->bi_idx)
+				__free_page(bv->bv_page);
+			return -ENOMEM;
+		}
+	}
+
+	return 0;
+}
+
+/*
+ * Portions Copyright (c) 1996-2001, PostgreSQL Global Development Group (Any
+ * use permitted, subject to terms of PostgreSQL license; see.)
+
+ * If we have a 64-bit integer type, then a 64-bit CRC looks just like the
+ * usual sort of implementation. (See Ross Williams' excellent introduction
+ * A PAINLESS GUIDE TO CRC ERROR DETECTION ALGORITHMS, available from
+ * ftp://ftp.rocksoft.com/papers/crc_v3.txt or several other net sites.)
+ * If we have no working 64-bit type, then fake it with two 32-bit registers.
+ *
+ * The present implementation is a normal (not "reflected", in Williams'
+ * terms) 64-bit CRC, using initial all-ones register contents and a final
+ * bit inversion. The chosen polynomial is borrowed from the DLT1 spec
+ * (ECMA-182, available from http://www.ecma.ch/ecma1/STAND/ECMA-182.HTM):
+ *
+ * x^64 + x^62 + x^57 + x^55 + x^54 + x^53 + x^52 + x^47 + x^46 + x^45 +
+ * x^40 + x^39 + x^38 + x^37 + x^35 + x^33 + x^32 + x^31 + x^29 + x^27 +
+ * x^24 + x^23 + x^22 + x^21 + x^19 + x^17 + x^13 + x^12 + x^10 + x^9 +
+ * x^7 + x^4 + x + 1
+*/
+
+static const uint64_t crc_table[256] = {
+	0x0000000000000000ULL, 0x42F0E1EBA9EA3693ULL, 0x85E1C3D753D46D26ULL,
+	0xC711223CFA3E5BB5ULL, 0x493366450E42ECDFULL, 0x0BC387AEA7A8DA4CULL,
+	0xCCD2A5925D9681F9ULL, 0x8E224479F47CB76AULL, 0x9266CC8A1C85D9BEULL,
+	0xD0962D61B56FEF2DULL, 0x17870F5D4F51B498ULL, 0x5577EEB6E6BB820BULL,
+	0xDB55AACF12C73561ULL, 0x99A54B24BB2D03F2ULL, 0x5EB4691841135847ULL,
+	0x1C4488F3E8F96ED4ULL, 0x663D78FF90E185EFULL, 0x24CD9914390BB37CULL,
+	0xE3DCBB28C335E8C9ULL, 0xA12C5AC36ADFDE5AULL, 0x2F0E1EBA9EA36930ULL,
+	0x6DFEFF5137495FA3ULL, 0xAAEFDD6DCD770416ULL, 0xE81F3C86649D3285ULL,
+	0xF45BB4758C645C51ULL, 0xB6AB559E258E6AC2ULL, 0x71BA77A2DFB03177ULL,
+	0x334A9649765A07E4ULL, 0xBD68D2308226B08EULL, 0xFF9833DB2BCC861DULL,
+	0x388911E7D1F2DDA8ULL, 0x7A79F00C7818EB3BULL, 0xCC7AF1FF21C30BDEULL,
+	0x8E8A101488293D4DULL, 0x499B3228721766F8ULL, 0x0B6BD3C3DBFD506BULL,
+	0x854997BA2F81E701ULL, 0xC7B97651866BD192ULL, 0x00A8546D7C558A27ULL,
+	0x4258B586D5BFBCB4ULL, 0x5E1C3D753D46D260ULL, 0x1CECDC9E94ACE4F3ULL,
+	0xDBFDFEA26E92BF46ULL, 0x990D1F49C77889D5ULL, 0x172F5B3033043EBFULL,
+	0x55DFBADB9AEE082CULL, 0x92CE98E760D05399ULL, 0xD03E790CC93A650AULL,
+	0xAA478900B1228E31ULL, 0xE8B768EB18C8B8A2ULL, 0x2FA64AD7E2F6E317ULL,
+	0x6D56AB3C4B1CD584ULL, 0xE374EF45BF6062EEULL, 0xA1840EAE168A547DULL,
+	0x66952C92ECB40FC8ULL, 0x2465CD79455E395BULL, 0x3821458AADA7578FULL,
+	0x7AD1A461044D611CULL, 0xBDC0865DFE733AA9ULL, 0xFF3067B657990C3AULL,
+	0x711223CFA3E5BB50ULL, 0x33E2C2240A0F8DC3ULL, 0xF4F3E018F031D676ULL,
+	0xB60301F359DBE0E5ULL, 0xDA050215EA6C212FULL, 0x98F5E3FE438617BCULL,
+	0x5FE4C1C2B9B84C09ULL, 0x1D14202910527A9AULL, 0x93366450E42ECDF0ULL,
+	0xD1C685BB4DC4FB63ULL, 0x16D7A787B7FAA0D6ULL, 0x5427466C1E109645ULL,
+	0x4863CE9FF6E9F891ULL, 0x0A932F745F03CE02ULL, 0xCD820D48A53D95B7ULL,
+	0x8F72ECA30CD7A324ULL, 0x0150A8DAF8AB144EULL, 0x43A04931514122DDULL,
+	0x84B16B0DAB7F7968ULL, 0xC6418AE602954FFBULL, 0xBC387AEA7A8DA4C0ULL,
+	0xFEC89B01D3679253ULL, 0x39D9B93D2959C9E6ULL, 0x7B2958D680B3FF75ULL,
+	0xF50B1CAF74CF481FULL, 0xB7FBFD44DD257E8CULL, 0x70EADF78271B2539ULL,
+	0x321A3E938EF113AAULL, 0x2E5EB66066087D7EULL, 0x6CAE578BCFE24BEDULL,
+	0xABBF75B735DC1058ULL, 0xE94F945C9C3626CBULL, 0x676DD025684A91A1ULL,
+	0x259D31CEC1A0A732ULL, 0xE28C13F23B9EFC87ULL, 0xA07CF2199274CA14ULL,
+	0x167FF3EACBAF2AF1ULL, 0x548F120162451C62ULL, 0x939E303D987B47D7ULL,
+	0xD16ED1D631917144ULL, 0x5F4C95AFC5EDC62EULL, 0x1DBC74446C07F0BDULL,
+	0xDAAD56789639AB08ULL, 0x985DB7933FD39D9BULL, 0x84193F60D72AF34FULL,
+	0xC6E9DE8B7EC0C5DCULL, 0x01F8FCB784FE9E69ULL, 0x43081D5C2D14A8FAULL,
+	0xCD2A5925D9681F90ULL, 0x8FDAB8CE70822903ULL, 0x48CB9AF28ABC72B6ULL,
+	0x0A3B7B1923564425ULL, 0x70428B155B4EAF1EULL, 0x32B26AFEF2A4998DULL,
+	0xF5A348C2089AC238ULL, 0xB753A929A170F4ABULL, 0x3971ED50550C43C1ULL,
+	0x7B810CBBFCE67552ULL, 0xBC902E8706D82EE7ULL, 0xFE60CF6CAF321874ULL,
+	0xE224479F47CB76A0ULL, 0xA0D4A674EE214033ULL, 0x67C58448141F1B86ULL,
+	0x253565A3BDF52D15ULL, 0xAB1721DA49899A7FULL, 0xE9E7C031E063ACECULL,
+	0x2EF6E20D1A5DF759ULL, 0x6C0603E6B3B7C1CAULL, 0xF6FAE5C07D3274CDULL,
+	0xB40A042BD4D8425EULL, 0x731B26172EE619EBULL, 0x31EBC7FC870C2F78ULL,
+	0xBFC9838573709812ULL, 0xFD39626EDA9AAE81ULL, 0x3A28405220A4F534ULL,
+	0x78D8A1B9894EC3A7ULL, 0x649C294A61B7AD73ULL, 0x266CC8A1C85D9BE0ULL,
+	0xE17DEA9D3263C055ULL, 0xA38D0B769B89F6C6ULL, 0x2DAF4F0F6FF541ACULL,
+	0x6F5FAEE4C61F773FULL, 0xA84E8CD83C212C8AULL, 0xEABE6D3395CB1A19ULL,
+	0x90C79D3FEDD3F122ULL, 0xD2377CD44439C7B1ULL, 0x15265EE8BE079C04ULL,
+	0x57D6BF0317EDAA97ULL, 0xD9F4FB7AE3911DFDULL, 0x9B041A914A7B2B6EULL,
+	0x5C1538ADB04570DBULL, 0x1EE5D94619AF4648ULL, 0x02A151B5F156289CULL,
+	0x4051B05E58BC1E0FULL, 0x87409262A28245BAULL, 0xC5B073890B687329ULL,
+	0x4B9237F0FF14C443ULL, 0x0962D61B56FEF2D0ULL, 0xCE73F427ACC0A965ULL,
+	0x8C8315CC052A9FF6ULL, 0x3A80143F5CF17F13ULL, 0x7870F5D4F51B4980ULL,
+	0xBF61D7E80F251235ULL, 0xFD913603A6CF24A6ULL, 0x73B3727A52B393CCULL,
+	0x31439391FB59A55FULL, 0xF652B1AD0167FEEAULL, 0xB4A25046A88DC879ULL,
+	0xA8E6D8B54074A6ADULL, 0xEA16395EE99E903EULL, 0x2D071B6213A0CB8BULL,
+	0x6FF7FA89BA4AFD18ULL, 0xE1D5BEF04E364A72ULL, 0xA3255F1BE7DC7CE1ULL,
+	0x64347D271DE22754ULL, 0x26C49CCCB40811C7ULL, 0x5CBD6CC0CC10FAFCULL,
+	0x1E4D8D2B65FACC6FULL, 0xD95CAF179FC497DAULL, 0x9BAC4EFC362EA149ULL,
+	0x158E0A85C2521623ULL, 0x577EEB6E6BB820B0ULL, 0x906FC95291867B05ULL,
+	0xD29F28B9386C4D96ULL, 0xCEDBA04AD0952342ULL, 0x8C2B41A1797F15D1ULL,
+	0x4B3A639D83414E64ULL, 0x09CA82762AAB78F7ULL, 0x87E8C60FDED7CF9DULL,
+	0xC51827E4773DF90EULL, 0x020905D88D03A2BBULL, 0x40F9E43324E99428ULL,
+	0x2CFFE7D5975E55E2ULL, 0x6E0F063E3EB46371ULL, 0xA91E2402C48A38C4ULL,
+	0xEBEEC5E96D600E57ULL, 0x65CC8190991CB93DULL, 0x273C607B30F68FAEULL,
+	0xE02D4247CAC8D41BULL, 0xA2DDA3AC6322E288ULL, 0xBE992B5F8BDB8C5CULL,
+	0xFC69CAB42231BACFULL, 0x3B78E888D80FE17AULL, 0x7988096371E5D7E9ULL,
+	0xF7AA4D1A85996083ULL, 0xB55AACF12C735610ULL, 0x724B8ECDD64D0DA5ULL,
+	0x30BB6F267FA73B36ULL, 0x4AC29F2A07BFD00DULL, 0x08327EC1AE55E69EULL,
+	0xCF235CFD546BBD2BULL, 0x8DD3BD16FD818BB8ULL, 0x03F1F96F09FD3CD2ULL,
+	0x41011884A0170A41ULL, 0x86103AB85A2951F4ULL, 0xC4E0DB53F3C36767ULL,
+	0xD8A453A01B3A09B3ULL, 0x9A54B24BB2D03F20ULL, 0x5D45907748EE6495ULL,
+	0x1FB5719CE1045206ULL, 0x919735E51578E56CULL, 0xD367D40EBC92D3FFULL,
+	0x1476F63246AC884AULL, 0x568617D9EF46BED9ULL, 0xE085162AB69D5E3CULL,
+	0xA275F7C11F7768AFULL, 0x6564D5FDE549331AULL, 0x279434164CA30589ULL,
+	0xA9B6706FB8DFB2E3ULL, 0xEB46918411358470ULL, 0x2C57B3B8EB0BDFC5ULL,
+	0x6EA7525342E1E956ULL, 0x72E3DAA0AA188782ULL, 0x30133B4B03F2B111ULL,
+	0xF7021977F9CCEAA4ULL, 0xB5F2F89C5026DC37ULL, 0x3BD0BCE5A45A6B5DULL,
+	0x79205D0E0DB05DCEULL, 0xBE317F32F78E067BULL, 0xFCC19ED95E6430E8ULL,
+	0x86B86ED5267CDBD3ULL, 0xC4488F3E8F96ED40ULL, 0x0359AD0275A8B6F5ULL,
+	0x41A94CE9DC428066ULL, 0xCF8B0890283E370CULL, 0x8D7BE97B81D4019FULL,
+	0x4A6ACB477BEA5A2AULL, 0x089A2AACD2006CB9ULL, 0x14DEA25F3AF9026DULL,
+	0x562E43B4931334FEULL, 0x913F6188692D6F4BULL, 0xD3CF8063C0C759D8ULL,
+	0x5DEDC41A34BBEEB2ULL, 0x1F1D25F19D51D821ULL, 0xD80C07CD676F8394ULL,
+	0x9AFCE626CE85B507ULL,
+};
+
+uint64_t bch_crc64_update(uint64_t crc, const void *_data, size_t len)
+{
+	const unsigned char *data = _data;
+
+	while (len--) {
+		int i = ((int) (crc >> 56) ^ *data++) & 0xFF;
+		crc = crc_table[i] ^ (crc << 8);
+	}
+
+	return crc;
+}
+
+uint64_t bch_crc64(const void *data, size_t len)
+{
+	uint64_t crc = 0xffffffffffffffffULL;
+
+	crc = bch_crc64_update(crc, data, len);
+
+	return crc ^ 0xffffffffffffffffULL;
+}
diff --git a/drivers/md/bcache/util.h b/drivers/md/bcache/util.h
new file mode 100644
index 0000000..577393e
--- /dev/null
+++ b/drivers/md/bcache/util.h
@@ -0,0 +1,589 @@
+
+#ifndef _BCACHE_UTIL_H
+#define _BCACHE_UTIL_H
+
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/llist.h>
+#include <linux/ratelimit.h>
+#include <linux/vmalloc.h>
+#include <linux/workqueue.h>
+
+#include "closure.h"
+
+#define PAGE_SECTORS		(PAGE_SIZE / 512)
+
+struct closure;
+
+#include <trace/events/bcache.h>
+
+#ifdef CONFIG_BCACHE_EDEBUG
+
+#define atomic_dec_bug(v)	BUG_ON(atomic_dec_return(v) < 0)
+#define atomic_inc_bug(v, i)	BUG_ON(atomic_inc_return(v) <= i)
+
+#else /* EDEBUG */
+
+#define atomic_dec_bug(v)	atomic_dec(v)
+#define atomic_inc_bug(v, i)	atomic_inc(v)
+
+#endif
+
+#define BITMASK(name, type, field, offset, size)		\
+static inline uint64_t name(const type *k)			\
+{ return (k->field >> offset) & ~(((uint64_t) ~0) << size); }	\
+								\
+static inline void SET_##name(type *k, uint64_t v)		\
+{								\
+	k->field &= ~(~((uint64_t) ~0 << size) << offset);	\
+	k->field |= v << offset;				\
+}
+
+#define DECLARE_HEAP(type, name)					\
+	struct {							\
+		size_t size, used;					\
+		type *data;						\
+	} name
+
+#define init_heap(heap, _size, gfp)					\
+({									\
+	size_t _bytes;							\
+	(heap)->used = 0;						\
+	(heap)->size = (_size);						\
+	_bytes = (heap)->size * sizeof(*(heap)->data);			\
+	(heap)->data = NULL;						\
+	if (_bytes < KMALLOC_MAX_SIZE)					\
+		(heap)->data = kmalloc(_bytes, (gfp));			\
+	if ((!(heap)->data) && ((gfp) & GFP_KERNEL))			\
+		(heap)->data = vmalloc(_bytes);				\
+	(heap)->data;							\
+})
+
+#define free_heap(heap)							\
+do {									\
+	if (is_vmalloc_addr((heap)->data))				\
+		vfree((heap)->data);					\
+	else								\
+		kfree((heap)->data);					\
+	(heap)->data = NULL;						\
+} while (0)
+
+#define heap_swap(h, i, j)	swap((h)->data[i], (h)->data[j])
+
+#define heap_sift(h, i, cmp)						\
+do {									\
+	size_t _r, _j = i;						\
+									\
+	for (; _j * 2 + 1 < (h)->used; _j = _r) {			\
+		_r = _j * 2 + 1;					\
+		if (_r + 1 < (h)->used &&				\
+		    cmp((h)->data[_r], (h)->data[_r + 1]))		\
+			_r++;						\
+									\
+		if (cmp((h)->data[_r], (h)->data[_j]))			\
+			break;						\
+		heap_swap(h, _r, _j);					\
+	}								\
+} while (0)
+
+#define heap_sift_down(h, i, cmp)					\
+do {									\
+	while (i) {							\
+		size_t p = (i - 1) / 2;					\
+		if (cmp((h)->data[i], (h)->data[p]))			\
+			break;						\
+		heap_swap(h, i, p);					\
+		i = p;							\
+	}								\
+} while (0)
+
+#define heap_add(h, d, cmp)						\
+({									\
+	bool _r = !heap_full(h);					\
+	if (_r) {							\
+		size_t _i = (h)->used++;				\
+		(h)->data[_i] = d;					\
+									\
+		heap_sift_down(h, _i, cmp);				\
+		heap_sift(h, _i, cmp);					\
+	}								\
+	_r;								\
+})
+
+#define heap_pop(h, d, cmp)						\
+({									\
+	bool _r = (h)->used;						\
+	if (_r) {							\
+		(d) = (h)->data[0];					\
+		(h)->used--;						\
+		heap_swap(h, 0, (h)->used);				\
+		heap_sift(h, 0, cmp);					\
+	}								\
+	_r;								\
+})
+
+#define heap_peek(h)	((h)->size ? (h)->data[0] : NULL)
+
+#define heap_full(h)	((h)->used == (h)->size)
+
+#define DECLARE_FIFO(type, name)					\
+	struct {							\
+		size_t front, back, size, mask;				\
+		type *data;						\
+	} name
+
+#define fifo_for_each(c, fifo, iter)					\
+	for (iter = (fifo)->front;					\
+	     c = (fifo)->data[iter], iter != (fifo)->back;		\
+	     iter = (iter + 1) & (fifo)->mask)
+
+#define __init_fifo(fifo, gfp)						\
+({									\
+	size_t _allocated_size, _bytes;					\
+	BUG_ON(!(fifo)->size);						\
+									\
+	_allocated_size = roundup_pow_of_two((fifo)->size + 1);		\
+	_bytes = _allocated_size * sizeof(*(fifo)->data);		\
+									\
+	(fifo)->mask = _allocated_size - 1;				\
+	(fifo)->front = (fifo)->back = 0;				\
+	(fifo)->data = NULL;						\
+									\
+	if (_bytes < KMALLOC_MAX_SIZE)					\
+		(fifo)->data = kmalloc(_bytes, (gfp));			\
+	if ((!(fifo)->data) && ((gfp) & GFP_KERNEL))			\
+		(fifo)->data = vmalloc(_bytes);				\
+	(fifo)->data;							\
+})
+
+#define init_fifo_exact(fifo, _size, gfp)				\
+({									\
+	(fifo)->size = (_size);						\
+	__init_fifo(fifo, gfp);						\
+})
+
+#define init_fifo(fifo, _size, gfp)					\
+({									\
+	(fifo)->size = (_size);						\
+	if ((fifo)->size > 4)						\
+		(fifo)->size = roundup_pow_of_two((fifo)->size) - 1;	\
+	__init_fifo(fifo, gfp);						\
+})
+
+#define free_fifo(fifo)							\
+do {									\
+	if (is_vmalloc_addr((fifo)->data))				\
+		vfree((fifo)->data);					\
+	else								\
+		kfree((fifo)->data);					\
+	(fifo)->data = NULL;						\
+} while (0)
+
+#define fifo_used(fifo)		(((fifo)->back - (fifo)->front) & (fifo)->mask)
+#define fifo_free(fifo)		((fifo)->size - fifo_used(fifo))
+
+#define fifo_empty(fifo)	(!fifo_used(fifo))
+#define fifo_full(fifo)		(!fifo_free(fifo))
+
+#define fifo_front(fifo)	((fifo)->data[(fifo)->front])
+#define fifo_back(fifo)							\
+	((fifo)->data[((fifo)->back - 1) & (fifo)->mask])
+
+#define fifo_idx(fifo, p)	(((p) - &fifo_front(fifo)) & (fifo)->mask)
+
+#define fifo_push_back(fifo, i)						\
+({									\
+	bool _r = !fifo_full((fifo));					\
+	if (_r) {							\
+		(fifo)->data[(fifo)->back++] = (i);			\
+		(fifo)->back &= (fifo)->mask;				\
+	}								\
+	_r;								\
+})
+
+#define fifo_pop_front(fifo, i)						\
+({									\
+	bool _r = !fifo_empty((fifo));					\
+	if (_r) {							\
+		(i) = (fifo)->data[(fifo)->front++];			\
+		(fifo)->front &= (fifo)->mask;				\
+	}								\
+	_r;								\
+})
+
+#define fifo_push_front(fifo, i)					\
+({									\
+	bool _r = !fifo_full((fifo));					\
+	if (_r) {							\
+		--(fifo)->front;					\
+		(fifo)->front &= (fifo)->mask;				\
+		(fifo)->data[(fifo)->front] = (i);			\
+	}								\
+	_r;								\
+})
+
+#define fifo_pop_back(fifo, i)						\
+({									\
+	bool _r = !fifo_empty((fifo));					\
+	if (_r) {							\
+		--(fifo)->back;						\
+		(fifo)->back &= (fifo)->mask;				\
+		(i) = (fifo)->data[(fifo)->back]			\
+	}								\
+	_r;								\
+})
+
+#define fifo_push(fifo, i)	fifo_push_back(fifo, (i))
+#define fifo_pop(fifo, i)	fifo_pop_front(fifo, (i))
+
+#define fifo_swap(l, r)							\
+do {									\
+	swap((l)->front, (r)->front);					\
+	swap((l)->back, (r)->back);					\
+	swap((l)->size, (r)->size);					\
+	swap((l)->mask, (r)->mask);					\
+	swap((l)->data, (r)->data);					\
+} while (0)
+
+#define fifo_move(dest, src)						\
+do {									\
+	typeof(*((dest)->data)) _t;					\
+	while (!fifo_full(dest) &&					\
+	       fifo_pop(src, _t))					\
+		fifo_push(dest, _t);					\
+} while (0)
+
+/*
+ * Simple array based allocator - preallocates a number of elements and you can
+ * never allocate more than that, also has no locking.
+ *
+ * Handy because if you know you only need a fixed number of elements you don't
+ * have to worry about memory allocation failure, and sometimes a mempool isn't
+ * what you want.
+ *
+ * We treat the free elements as entries in a singly linked list, and the
+ * freelist as a stack - allocating and freeing push and pop off the freelist.
+ */
+
+#define DECLARE_ARRAY_ALLOCATOR(type, name, size)			\
+	struct {							\
+		type	*freelist;					\
+		type	data[size];					\
+	} name
+
+#define array_alloc(array)						\
+({									\
+	typeof((array)->freelist) _ret = (array)->freelist;		\
+									\
+	if (_ret)							\
+		(array)->freelist = *((typeof((array)->freelist) *) _ret);\
+									\
+	_ret;								\
+})
+
+#define array_free(array, ptr)						\
+do {									\
+	typeof((array)->freelist) _ptr = ptr;				\
+									\
+	*((typeof((array)->freelist) *) _ptr) = (array)->freelist;	\
+	(array)->freelist = _ptr;					\
+} while (0)
+
+#define array_allocator_init(array)					\
+do {									\
+	typeof((array)->freelist) _i;					\
+									\
+	BUILD_BUG_ON(sizeof((array)->data[0]) < sizeof(void *));	\
+	(array)->freelist = NULL;					\
+									\
+	for (_i = (array)->data;					\
+	     _i < (array)->data + ARRAY_SIZE((array)->data);		\
+	     _i++)							\
+		array_free(array, _i);					\
+} while (0)
+
+#define array_freelist_empty(array)	((array)->freelist == NULL)
+
+#define ANYSINT_MAX(t)							\
+	((((t) 1 << (sizeof(t) * 8 - 2)) - (t) 1) * (t) 2 + (t) 1)
+
+int bch_strtoint_h(const char *, int *);
+int bch_strtouint_h(const char *, unsigned int *);
+int bch_strtoll_h(const char *, long long *);
+int bch_strtoull_h(const char *, unsigned long long *);
+
+static inline int bch_strtol_h(const char *cp, long *res)
+{
+#if BITS_PER_LONG == 32
+	return bch_strtoint_h(cp, (int *) res);
+#else
+	return bch_strtoll_h(cp, (long long *) res);
+#endif
+}
+
+static inline int bch_strtoul_h(const char *cp, long *res)
+{
+#if BITS_PER_LONG == 32
+	return bch_strtouint_h(cp, (unsigned int *) res);
+#else
+	return bch_strtoull_h(cp, (unsigned long long *) res);
+#endif
+}
+
+#define strtoi_h(cp, res)						\
+	(__builtin_types_compatible_p(typeof(*res), int)		\
+	? bch_strtoint_h(cp, (void *) res)				\
+	: __builtin_types_compatible_p(typeof(*res), long)		\
+	? bch_strtol_h(cp, (void *) res)				\
+	: __builtin_types_compatible_p(typeof(*res), long long)		\
+	? bch_strtoll_h(cp, (void *) res)				\
+	: __builtin_types_compatible_p(typeof(*res), unsigned int)	\
+	? bch_strtouint_h(cp, (void *) res)				\
+	: __builtin_types_compatible_p(typeof(*res), unsigned long)	\
+	? bch_strtoul_h(cp, (void *) res)				\
+	: __builtin_types_compatible_p(typeof(*res), unsigned long long)\
+	? bch_strtoull_h(cp, (void *) res) : -EINVAL)
+
+#define strtoul_safe(cp, var)						\
+({									\
+	unsigned long _v;						\
+	int _r = kstrtoul(cp, 10, &_v);					\
+	if (!_r)							\
+		var = _v;						\
+	_r;								\
+})
+
+#define strtoul_safe_clamp(cp, var, min, max)				\
+({									\
+	unsigned long _v;						\
+	int _r = kstrtoul(cp, 10, &_v);					\
+	if (!_r)							\
+		var = clamp_t(typeof(var), _v, min, max);		\
+	_r;								\
+})
+
+#define snprint(buf, size, var)						\
+	snprintf(buf, size,						\
+		__builtin_types_compatible_p(typeof(var), int)		\
+		     ? "%i\n" :						\
+		__builtin_types_compatible_p(typeof(var), unsigned)	\
+		     ? "%u\n" :						\
+		__builtin_types_compatible_p(typeof(var), long)		\
+		     ? "%li\n" :					\
+		__builtin_types_compatible_p(typeof(var), unsigned long)\
+		     ? "%lu\n" :					\
+		__builtin_types_compatible_p(typeof(var), int64_t)	\
+		     ? "%lli\n" :					\
+		__builtin_types_compatible_p(typeof(var), uint64_t)	\
+		     ? "%llu\n" :					\
+		__builtin_types_compatible_p(typeof(var), const char *)	\
+		     ? "%s\n" : "%i\n", var)
+
+ssize_t bch_hprint(char *buf, int64_t v);
+
+bool bch_is_zero(const char *p, size_t n);
+int bch_parse_uuid(const char *s, char *uuid);
+
+ssize_t bch_snprint_string_list(char *buf, size_t size, const char * const list[],
+			    size_t selected);
+
+ssize_t bch_read_string_list(const char *buf, const char * const list[]);
+
+struct time_stats {
+	/*
+	 * all fields are in nanoseconds, averages are ewmas stored left shifted
+	 * by 8
+	 */
+	uint64_t	max_duration;
+	uint64_t	average_duration;
+	uint64_t	average_frequency;
+	uint64_t	last;
+};
+
+void bch_time_stats_update(struct time_stats *stats, uint64_t time);
+
+#define NSEC_PER_ns			1L
+#define NSEC_PER_us			NSEC_PER_USEC
+#define NSEC_PER_ms			NSEC_PER_MSEC
+#define NSEC_PER_sec			NSEC_PER_SEC
+
+#define __print_time_stat(stats, name, stat, units)			\
+	sysfs_print(name ## _ ## stat ## _ ## units,			\
+		    div_u64((stats)->stat >> 8, NSEC_PER_ ## units))
+
+#define sysfs_print_time_stats(stats, name,				\
+			       frequency_units,				\
+			       duration_units)				\
+do {									\
+	__print_time_stat(stats, name,					\
+			  average_frequency,	frequency_units);	\
+	__print_time_stat(stats, name,					\
+			  average_duration,	duration_units);	\
+	__print_time_stat(stats, name,					\
+			  max_duration,		duration_units);	\
+									\
+	sysfs_print(name ## _last_ ## frequency_units, (stats)->last	\
+		    ? div_s64(local_clock() - (stats)->last,		\
+			      NSEC_PER_ ## frequency_units)		\
+		    : -1LL);						\
+} while (0)
+
+#define sysfs_time_stats_attribute(name,				\
+				   frequency_units,			\
+				   duration_units)			\
+read_attribute(name ## _average_frequency_ ## frequency_units);		\
+read_attribute(name ## _average_duration_ ## duration_units);		\
+read_attribute(name ## _max_duration_ ## duration_units);		\
+read_attribute(name ## _last_ ## frequency_units)
+
+#define sysfs_time_stats_attribute_list(name,				\
+					frequency_units,		\
+					duration_units)			\
+&sysfs_ ## name ## _average_frequency_ ## frequency_units,		\
+&sysfs_ ## name ## _average_duration_ ## duration_units,		\
+&sysfs_ ## name ## _max_duration_ ## duration_units,			\
+&sysfs_ ## name ## _last_ ## frequency_units,
+
+#define ewma_add(ewma, val, weight, factor)				\
+({									\
+	(ewma) *= (weight) - 1;						\
+	(ewma) += (val) << factor;					\
+	(ewma) /= (weight);						\
+	(ewma) >> factor;						\
+})
+
+struct ratelimit {
+	uint64_t		next;
+	unsigned		rate;
+};
+
+static inline void ratelimit_reset(struct ratelimit *d)
+{
+	d->next = local_clock();
+}
+
+unsigned bch_next_delay(struct ratelimit *d, uint64_t done);
+
+#define __DIV_SAFE(n, d, zero)						\
+({									\
+	typeof(n) _n = (n);						\
+	typeof(d) _d = (d);						\
+	_d ? _n / _d : zero;						\
+})
+
+#define DIV_SAFE(n, d)	__DIV_SAFE(n, d, 0)
+
+#define container_of_or_null(ptr, type, member)				\
+({									\
+	typeof(ptr) _ptr = ptr;						\
+	_ptr ? container_of(_ptr, type, member) : NULL;			\
+})
+
+#define RB_INSERT(root, new, member, cmp)				\
+({									\
+	__label__ dup;							\
+	struct rb_node **n = &(root)->rb_node, *parent = NULL;		\
+	typeof(new) this;						\
+	int res, ret = -1;						\
+									\
+	while (*n) {							\
+		parent = *n;						\
+		this = container_of(*n, typeof(*(new)), member);	\
+		res = cmp(new, this);					\
+		if (!res)						\
+			goto dup;					\
+		n = res < 0						\
+			? &(*n)->rb_left				\
+			: &(*n)->rb_right;				\
+	}								\
+									\
+	rb_link_node(&(new)->member, parent, n);			\
+	rb_insert_color(&(new)->member, root);				\
+	ret = 0;							\
+dup:									\
+	ret;								\
+})
+
+#define RB_SEARCH(root, search, member, cmp)				\
+({									\
+	struct rb_node *n = (root)->rb_node;				\
+	typeof(&(search)) this, ret = NULL;				\
+	int res;							\
+									\
+	while (n) {							\
+		this = container_of(n, typeof(search), member);		\
+		res = cmp(&(search), this);				\
+		if (!res) {						\
+			ret = this;					\
+			break;						\
+		}							\
+		n = res < 0						\
+			? n->rb_left					\
+			: n->rb_right;					\
+	}								\
+	ret;								\
+})
+
+#define RB_GREATER(root, search, member, cmp)				\
+({									\
+	struct rb_node *n = (root)->rb_node;				\
+	typeof(&(search)) this, ret = NULL;				\
+	int res;							\
+									\
+	while (n) {							\
+		this = container_of(n, typeof(search), member);		\
+		res = cmp(&(search), this);				\
+		if (res < 0) {						\
+			ret = this;					\
+			n = n->rb_left;					\
+		} else							\
+			n = n->rb_right;				\
+	}								\
+	ret;								\
+})
+
+#define RB_FIRST(root, type, member)					\
+	container_of_or_null(rb_first(root), type, member)
+
+#define RB_LAST(root, type, member)					\
+	container_of_or_null(rb_last(root), type, member)
+
+#define RB_NEXT(ptr, member)						\
+	container_of_or_null(rb_next(&(ptr)->member), typeof(*ptr), member)
+
+#define RB_PREV(ptr, member)						\
+	container_of_or_null(rb_prev(&(ptr)->member), typeof(*ptr), member)
+
+/* Does linear interpolation between powers of two */
+static inline unsigned fract_exp_two(unsigned x, unsigned fract_bits)
+{
+	unsigned fract = x & ~(~0 << fract_bits);
+
+	x >>= fract_bits;
+	x   = 1 << x;
+	x  += (x * fract) >> fract_bits;
+
+	return x;
+}
+
+#define bio_end(bio)	((bio)->bi_sector + bio_sectors(bio))
+
+void bch_bio_map(struct bio *bio, void *base);
+
+int bch_bio_alloc_pages(struct bio *bio, gfp_t gfp);
+
+static inline sector_t bdev_sectors(struct block_device *bdev)
+{
+	return bdev->bd_inode->i_size >> 9;
+}
+
+#define closure_bio_submit(bio, cl, dev)				\
+do {									\
+	closure_get(cl);						\
+	bch_generic_make_request(bio, &(dev)->bio_split_hook);		\
+} while (0)
+
+uint64_t bch_crc64_update(uint64_t, const void *, size_t);
+uint64_t bch_crc64(const void *, size_t);
+
+#endif /* _BCACHE_UTIL_H */
diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
new file mode 100644
index 0000000..93e7e31
--- /dev/null
+++ b/drivers/md/bcache/writeback.c
@@ -0,0 +1,414 @@
+/*
+ * background writeback - scan btree for dirty data and write it to the backing
+ * device
+ *
+ * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
+ * Copyright 2012 Google, Inc.
+ */
+
+#include "bcache.h"
+#include "btree.h"
+#include "debug.h"
+
+static struct workqueue_struct *dirty_wq;
+
+static void read_dirty(struct closure *);
+
+struct dirty_io {
+	struct closure		cl;
+	struct cached_dev	*dc;
+	struct bio		bio;
+};
+
+/* Rate limiting */
+
+static void __update_writeback_rate(struct cached_dev *dc)
+{
+	struct cache_set *c = dc->disk.c;
+	uint64_t cache_sectors = c->nbuckets * c->sb.bucket_size;
+	uint64_t cache_dirty_target =
+		div_u64(cache_sectors * dc->writeback_percent, 100);
+
+	int64_t target = div64_u64(cache_dirty_target * bdev_sectors(dc->bdev),
+				   c->cached_dev_sectors);
+
+	/* PD controller */
+
+	int change = 0;
+	int64_t error;
+	int64_t dirty = atomic_long_read(&dc->disk.sectors_dirty);
+	int64_t derivative = dirty - dc->disk.sectors_dirty_last;
+
+	dc->disk.sectors_dirty_last = dirty;
+
+	derivative *= dc->writeback_rate_d_term;
+	derivative = clamp(derivative, -dirty, dirty);
+
+	derivative = ewma_add(dc->disk.sectors_dirty_derivative, derivative,
+			      dc->writeback_rate_d_smooth, 0);
+
+	/* Avoid divide by zero */
+	if (!target)
+		goto out;
+
+	error = div64_s64((dirty + derivative - target) << 8, target);
+
+	change = div_s64((dc->writeback_rate.rate * error) >> 8,
+			 dc->writeback_rate_p_term_inverse);
+
+	/* Don't increase writeback rate if the device isn't keeping up */
+	if (change > 0 &&
+	    time_after64(local_clock(),
+			 dc->writeback_rate.next + 10 * NSEC_PER_MSEC))
+		change = 0;
+
+	dc->writeback_rate.rate =
+		clamp_t(int64_t, dc->writeback_rate.rate + change,
+			1, NSEC_PER_MSEC);
+out:
+	dc->writeback_rate_derivative = derivative;
+	dc->writeback_rate_change = change;
+	dc->writeback_rate_target = target;
+
+	schedule_delayed_work(&dc->writeback_rate_update,
+			      dc->writeback_rate_update_seconds * HZ);
+}
+
+static void update_writeback_rate(struct work_struct *work)
+{
+	struct cached_dev *dc = container_of(to_delayed_work(work),
+					     struct cached_dev,
+					     writeback_rate_update);
+
+	down_read(&dc->writeback_lock);
+
+	if (atomic_read(&dc->has_dirty) &&
+	    dc->writeback_percent)
+		__update_writeback_rate(dc);
+
+	up_read(&dc->writeback_lock);
+}
+
+static unsigned writeback_delay(struct cached_dev *dc, unsigned sectors)
+{
+	if (atomic_read(&dc->disk.detaching) ||
+	    !dc->writeback_percent)
+		return 0;
+
+	return bch_next_delay(&dc->writeback_rate, sectors * 10000000ULL);
+}
+
+/* Background writeback */
+
+static bool dirty_pred(struct keybuf *buf, struct bkey *k)
+{
+	return KEY_DIRTY(k);
+}
+
+static void dirty_init(struct keybuf_key *w)
+{
+	struct dirty_io *io = w->private;
+	struct bio *bio = &io->bio;
+
+	bio_init(bio);
+	if (!io->dc->writeback_percent)
+		bio_set_prio(bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0));
+
+	bio->bi_size		= KEY_SIZE(&w->key) << 9;
+	bio->bi_max_vecs	= DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS);
+	bio->bi_private		= w;
+	bio->bi_io_vec		= bio->bi_inline_vecs;
+	bch_bio_map(bio, NULL);
+}
+
+static void refill_dirty(struct closure *cl)
+{
+	struct cached_dev *dc = container_of(cl, struct cached_dev,
+					     writeback.cl);
+	struct keybuf *buf = &dc->writeback_keys;
+	bool searched_from_start = false;
+	struct bkey end = MAX_KEY;
+	SET_KEY_INODE(&end, dc->disk.id);
+
+	if (!atomic_read(&dc->disk.detaching) &&
+	    !dc->writeback_running)
+		closure_return(cl);
+
+	down_write(&dc->writeback_lock);
+
+	if (!atomic_read(&dc->has_dirty)) {
+		SET_BDEV_STATE(&dc->sb, BDEV_STATE_CLEAN);
+		bch_write_bdev_super(dc, NULL);
+
+		up_write(&dc->writeback_lock);
+		closure_return(cl);
+	}
+
+	if (bkey_cmp(&buf->last_scanned, &end) >= 0) {
+		buf->last_scanned = KEY(dc->disk.id, 0, 0);
+		searched_from_start = true;
+	}
+
+	bch_refill_keybuf(dc->disk.c, buf, &end);
+
+	if (bkey_cmp(&buf->last_scanned, &end) >= 0 && searched_from_start) {
+		/* Searched the entire btree  - delay awhile */
+
+		if (RB_EMPTY_ROOT(&buf->keys)) {
+			atomic_set(&dc->has_dirty, 0);
+			cached_dev_put(dc);
+		}
+
+		if (!atomic_read(&dc->disk.detaching))
+			closure_delay(&dc->writeback, dc->writeback_delay * HZ);
+	}
+
+	up_write(&dc->writeback_lock);
+
+	ratelimit_reset(&dc->writeback_rate);
+
+	/* Punt to workqueue only so we don't recurse and blow the stack */
+	continue_at(cl, read_dirty, dirty_wq);
+}
+
+void bch_writeback_queue(struct cached_dev *dc)
+{
+	if (closure_trylock(&dc->writeback.cl, &dc->disk.cl)) {
+		if (!atomic_read(&dc->disk.detaching))
+			closure_delay(&dc->writeback, dc->writeback_delay * HZ);
+
+		continue_at(&dc->writeback.cl, refill_dirty, dirty_wq);
+	}
+}
+
+void bch_writeback_add(struct cached_dev *dc, unsigned sectors)
+{
+	atomic_long_add(sectors, &dc->disk.sectors_dirty);
+
+	if (!atomic_read(&dc->has_dirty) &&
+	    !atomic_xchg(&dc->has_dirty, 1)) {
+		atomic_inc(&dc->count);
+
+		if (BDEV_STATE(&dc->sb) != BDEV_STATE_DIRTY) {
+			SET_BDEV_STATE(&dc->sb, BDEV_STATE_DIRTY);
+			/* XXX: should do this synchronously */
+			bch_write_bdev_super(dc, NULL);
+		}
+
+		bch_writeback_queue(dc);
+
+		if (dc->writeback_percent)
+			schedule_delayed_work(&dc->writeback_rate_update,
+				      dc->writeback_rate_update_seconds * HZ);
+	}
+}
+
+/* Background writeback - IO loop */
+
+static void dirty_io_destructor(struct closure *cl)
+{
+	struct dirty_io *io = container_of(cl, struct dirty_io, cl);
+	kfree(io);
+}
+
+static void write_dirty_finish(struct closure *cl)
+{
+	struct dirty_io *io = container_of(cl, struct dirty_io, cl);
+	struct keybuf_key *w = io->bio.bi_private;
+	struct cached_dev *dc = io->dc;
+	struct bio_vec *bv = bio_iovec_idx(&io->bio, io->bio.bi_vcnt);
+
+	while (bv-- != io->bio.bi_io_vec)
+		__free_page(bv->bv_page);
+
+	/* This is kind of a dumb way of signalling errors. */
+	if (KEY_DIRTY(&w->key)) {
+		unsigned i;
+		struct btree_op op;
+		bch_btree_op_init_stack(&op);
+
+		op.type = BTREE_REPLACE;
+		bkey_copy(&op.replace, &w->key);
+
+		SET_KEY_DIRTY(&w->key, false);
+		bch_keylist_add(&op.keys, &w->key);
+
+		for (i = 0; i < KEY_PTRS(&w->key); i++)
+			atomic_inc(&PTR_BUCKET(dc->disk.c, &w->key, i)->pin);
+
+		pr_debug("clearing %s", pkey(&w->key));
+		bch_btree_insert(&op, dc->disk.c);
+		closure_sync(&op.cl);
+
+		atomic_long_inc(op.insert_collision
+				? &dc->disk.c->writeback_keys_failed
+				: &dc->disk.c->writeback_keys_done);
+	}
+
+	bch_keybuf_del(&dc->writeback_keys, w);
+	atomic_dec_bug(&dc->in_flight);
+
+	closure_wake_up(&dc->writeback_wait);
+
+	closure_return_with_destructor(cl, dirty_io_destructor);
+}
+
+static void dirty_endio(struct bio *bio, int error)
+{
+	struct keybuf_key *w = bio->bi_private;
+	struct dirty_io *io = w->private;
+
+	if (error)
+		SET_KEY_DIRTY(&w->key, false);
+
+	closure_put(&io->cl);
+}
+
+static void write_dirty(struct closure *cl)
+{
+	struct dirty_io *io = container_of(cl, struct dirty_io, cl);
+	struct keybuf_key *w = io->bio.bi_private;
+
+	dirty_init(w);
+	io->bio.bi_rw		= WRITE;
+	io->bio.bi_sector	= KEY_START(&w->key);
+	io->bio.bi_bdev		= io->dc->bdev;
+	io->bio.bi_end_io	= dirty_endio;
+
+	trace_bcache_write_dirty(&io->bio);
+	closure_bio_submit(&io->bio, cl, &io->dc->disk);
+
+	continue_at(cl, write_dirty_finish, dirty_wq);
+}
+
+static void read_dirty_endio(struct bio *bio, int error)
+{
+	struct keybuf_key *w = bio->bi_private;
+	struct dirty_io *io = w->private;
+
+	bch_count_io_errors(PTR_CACHE(io->dc->disk.c, &w->key, 0),
+			    error, "reading dirty data from cache");
+
+	dirty_endio(bio, error);
+}
+
+static void read_dirty_submit(struct closure *cl)
+{
+	struct dirty_io *io = container_of(cl, struct dirty_io, cl);
+
+	trace_bcache_read_dirty(&io->bio);
+	closure_bio_submit(&io->bio, cl, &io->dc->disk);
+
+	continue_at(cl, write_dirty, dirty_wq);
+}
+
+static void read_dirty(struct closure *cl)
+{
+	struct cached_dev *dc = container_of(cl, struct cached_dev,
+					     writeback.cl);
+	unsigned delay = writeback_delay(dc, 0);
+	struct keybuf_key *w;
+	struct dirty_io *io;
+
+	/*
+	 * XXX: if we error, background writeback just spins. Should use some
+	 * mempools.
+	 */
+
+	while (1) {
+		w = bch_keybuf_next(&dc->writeback_keys);
+		if (!w)
+			break;
+
+		BUG_ON(ptr_stale(dc->disk.c, &w->key, 0));
+
+		if (delay > 0 &&
+		    (KEY_START(&w->key) != dc->last_read ||
+		     jiffies_to_msecs(delay) > 50)) {
+			w->private = NULL;
+
+			closure_delay(&dc->writeback, delay);
+			continue_at(cl, read_dirty, dirty_wq);
+		}
+
+		dc->last_read	= KEY_OFFSET(&w->key);
+
+		io = kzalloc(sizeof(struct dirty_io) + sizeof(struct bio_vec)
+			     * DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS),
+			     GFP_KERNEL);
+		if (!io)
+			goto err;
+
+		w->private	= io;
+		io->dc		= dc;
+
+		dirty_init(w);
+		io->bio.bi_sector	= PTR_OFFSET(&w->key, 0);
+		io->bio.bi_bdev		= PTR_CACHE(dc->disk.c,
+						    &w->key, 0)->bdev;
+		io->bio.bi_rw		= READ;
+		io->bio.bi_end_io	= read_dirty_endio;
+
+		if (bch_bio_alloc_pages(&io->bio, GFP_KERNEL))
+			goto err_free;
+
+		pr_debug("%s", pkey(&w->key));
+
+		closure_call(&io->cl, read_dirty_submit, NULL, &dc->disk.cl);
+
+		delay = writeback_delay(dc, KEY_SIZE(&w->key));
+
+		atomic_inc(&dc->in_flight);
+
+		if (!closure_wait_event(&dc->writeback_wait, cl,
+					atomic_read(&dc->in_flight) < 64))
+			continue_at(cl, read_dirty, dirty_wq);
+	}
+
+	if (0) {
+err_free:
+		kfree(w->private);
+err:
+		bch_keybuf_del(&dc->writeback_keys, w);
+	}
+
+	refill_dirty(cl);
+}
+
+void bch_writeback_init_cached_dev(struct cached_dev *dc)
+{
+	closure_init_unlocked(&dc->writeback);
+	init_rwsem(&dc->writeback_lock);
+
+	bch_keybuf_init(&dc->writeback_keys, dirty_pred);
+
+	dc->writeback_metadata		= true;
+	dc->writeback_running		= true;
+	dc->writeback_percent		= 10;
+	dc->writeback_delay		= 30;
+	dc->writeback_rate.rate		= 1024;
+
+	dc->writeback_rate_update_seconds = 30;
+	dc->writeback_rate_d_term	= 16;
+	dc->writeback_rate_p_term_inverse = 64;
+	dc->writeback_rate_d_smooth	= 8;
+
+	INIT_DELAYED_WORK(&dc->writeback_rate_update, update_writeback_rate);
+	schedule_delayed_work(&dc->writeback_rate_update,
+			      dc->writeback_rate_update_seconds * HZ);
+}
+
+void bch_writeback_exit(void)
+{
+	if (dirty_wq)
+		destroy_workqueue(dirty_wq);
+}
+
+int __init bch_writeback_init(void)
+{
+	dirty_wq = create_singlethread_workqueue("bcache_writeback");
+	if (!dirty_wq)
+		return -ENOMEM;
+
+	return 0;
+}
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
index c608313..0387e05 100644
--- a/drivers/md/dm-bufio.c
+++ b/drivers/md/dm-bufio.c
@@ -319,6 +319,9 @@
 static void *alloc_buffer_data(struct dm_bufio_client *c, gfp_t gfp_mask,
 			       enum data_mode *data_mode)
 {
+	unsigned noio_flag;
+	void *ptr;
+
 	if (c->block_size <= DM_BUFIO_BLOCK_SIZE_SLAB_LIMIT) {
 		*data_mode = DATA_MODE_SLAB;
 		return kmem_cache_alloc(DM_BUFIO_CACHE(c), gfp_mask);
@@ -332,7 +335,26 @@
 	}
 
 	*data_mode = DATA_MODE_VMALLOC;
-	return __vmalloc(c->block_size, gfp_mask, PAGE_KERNEL);
+
+	/*
+	 * __vmalloc allocates the data pages and auxiliary structures with
+	 * gfp_flags that were specified, but pagetables are always allocated
+	 * with GFP_KERNEL, no matter what was specified as gfp_mask.
+	 *
+	 * Consequently, we must set per-process flag PF_MEMALLOC_NOIO so that
+	 * all allocations done by this process (including pagetables) are done
+	 * as if GFP_NOIO was specified.
+	 */
+
+	if (gfp_mask & __GFP_NORETRY)
+		noio_flag = memalloc_noio_save();
+
+	ptr = __vmalloc(c->block_size, gfp_mask, PAGE_KERNEL);
+
+	if (gfp_mask & __GFP_NORETRY)
+		memalloc_noio_restore(noio_flag);
+
+	return ptr;
 }
 
 /*
diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c
index 83e995f..1af7255 100644
--- a/drivers/md/dm-cache-metadata.c
+++ b/drivers/md/dm-cache-metadata.c
@@ -1044,7 +1044,7 @@
 				 struct dm_cache_statistics *stats)
 {
 	down_read(&cmd->root_lock);
-	memcpy(stats, &cmd->stats, sizeof(*stats));
+	*stats = cmd->stats;
 	up_read(&cmd->root_lock);
 }
 
@@ -1052,7 +1052,7 @@
 				 struct dm_cache_statistics *stats)
 {
 	down_write(&cmd->root_lock);
-	memcpy(&cmd->stats, stats, sizeof(*stats));
+	cmd->stats = *stats;
 	up_write(&cmd->root_lock);
 }
 
diff --git a/drivers/md/dm-cache-policy.h b/drivers/md/dm-cache-policy.h
index 558bdfd..33369ca 100644
--- a/drivers/md/dm-cache-policy.h
+++ b/drivers/md/dm-cache-policy.h
@@ -130,8 +130,8 @@
 	 *
 	 * Must not block.
 	 *
-	 * Returns 1 iff in cache, 0 iff not, < 0 on error (-EWOULDBLOCK
-	 * would be typical).
+	 * Returns 0 if in cache, -ENOENT if not, < 0 for other errors
+	 * (-EWOULDBLOCK would be typical).
 	 */
 	int (*lookup)(struct dm_cache_policy *p, dm_oblock_t oblock, dm_cblock_t *cblock);
 
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
index 1074409..df44b60 100644
--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -205,7 +205,7 @@
 	/*
 	 * writethrough fields.  These MUST remain at the end of this
 	 * structure and the 'cache' member must be the first as it
-	 * is used to determine the offsetof the writethrough fields.
+	 * is used to determine the offset of the writethrough fields.
 	 */
 	struct cache *cache;
 	dm_cblock_t cblock;
@@ -393,7 +393,7 @@
 	return r;
 }
 
- /*----------------------------------------------------------------*/
+/*----------------------------------------------------------------*/
 
 static bool is_dirty(struct cache *cache, dm_cblock_t b)
 {
@@ -419,6 +419,7 @@
 }
 
 /*----------------------------------------------------------------*/
+
 static bool block_size_is_power_of_two(struct cache *cache)
 {
 	return cache->sectors_per_block_shift >= 0;
@@ -667,7 +668,7 @@
 
 	/*
 	 * We can't issue this bio directly, since we're in interrupt
-	 * context.  So it get's put on a bio list for processing by the
+	 * context.  So it gets put on a bio list for processing by the
 	 * worker thread.
 	 */
 	defer_writethrough_bio(pb->cache, bio);
@@ -1445,6 +1446,7 @@
 static void do_waker(struct work_struct *ws)
 {
 	struct cache *cache = container_of(to_delayed_work(ws), struct cache, waker);
+	policy_tick(cache->policy);
 	wake_worker(cache);
 	queue_delayed_work(cache->wq, &cache->waker, COMMIT_PERIOD);
 }
@@ -1809,7 +1811,37 @@
 
 static struct kmem_cache *migration_cache;
 
-static int set_config_values(struct dm_cache_policy *p, int argc, const char **argv)
+#define NOT_CORE_OPTION 1
+
+static int process_config_option(struct cache *cache, const char *key, const char *value)
+{
+	unsigned long tmp;
+
+	if (!strcasecmp(key, "migration_threshold")) {
+		if (kstrtoul(value, 10, &tmp))
+			return -EINVAL;
+
+		cache->migration_threshold = tmp;
+		return 0;
+	}
+
+	return NOT_CORE_OPTION;
+}
+
+static int set_config_value(struct cache *cache, const char *key, const char *value)
+{
+	int r = process_config_option(cache, key, value);
+
+	if (r == NOT_CORE_OPTION)
+		r = policy_set_config_value(cache->policy, key, value);
+
+	if (r)
+		DMWARN("bad config value for %s: %s", key, value);
+
+	return r;
+}
+
+static int set_config_values(struct cache *cache, int argc, const char **argv)
 {
 	int r = 0;
 
@@ -1819,12 +1851,9 @@
 	}
 
 	while (argc) {
-		r = policy_set_config_value(p, argv[0], argv[1]);
-		if (r) {
-			DMWARN("policy_set_config_value failed: key = '%s', value = '%s'",
-			       argv[0], argv[1]);
-			return r;
-		}
+		r = set_config_value(cache, argv[0], argv[1]);
+		if (r)
+			break;
 
 		argc -= 2;
 		argv += 2;
@@ -1836,8 +1865,6 @@
 static int create_cache_policy(struct cache *cache, struct cache_args *ca,
 			       char **error)
 {
-	int r;
-
 	cache->policy =	dm_cache_policy_create(ca->policy_name,
 					       cache->cache_size,
 					       cache->origin_sectors,
@@ -1847,14 +1874,7 @@
 		return -ENOMEM;
 	}
 
-	r = set_config_values(cache->policy, ca->policy_argc, ca->policy_argv);
-	if (r) {
-		*error = "Error setting cache policy's config values";
-		dm_cache_policy_destroy(cache->policy);
-		cache->policy = NULL;
-	}
-
-	return r;
+	return 0;
 }
 
 /*
@@ -1886,7 +1906,7 @@
 	return discard_block_size;
 }
 
-#define DEFAULT_MIGRATION_THRESHOLD (2048 * 100)
+#define DEFAULT_MIGRATION_THRESHOLD 2048
 
 static int cache_create(struct cache_args *ca, struct cache **result)
 {
@@ -1911,7 +1931,7 @@
 	ti->discards_supported = true;
 	ti->discard_zeroes_data_unsupported = true;
 
-	memcpy(&cache->features, &ca->features, sizeof(cache->features));
+	cache->features = ca->features;
 	ti->per_bio_data_size = get_per_bio_data_size(cache);
 
 	cache->callbacks.congested_fn = cache_is_congested;
@@ -1948,7 +1968,15 @@
 	r = create_cache_policy(cache, ca, error);
 	if (r)
 		goto bad;
+
 	cache->policy_nr_args = ca->policy_argc;
+	cache->migration_threshold = DEFAULT_MIGRATION_THRESHOLD;
+
+	r = set_config_values(cache, ca->policy_argc, ca->policy_argv);
+	if (r) {
+		*error = "Error setting cache policy's config values";
+		goto bad;
+	}
 
 	cmd = dm_cache_metadata_open(cache->metadata_dev->bdev,
 				     ca->block_size, may_format,
@@ -1967,10 +1995,10 @@
 	INIT_LIST_HEAD(&cache->quiesced_migrations);
 	INIT_LIST_HEAD(&cache->completed_migrations);
 	INIT_LIST_HEAD(&cache->need_commit_migrations);
-	cache->migration_threshold = DEFAULT_MIGRATION_THRESHOLD;
 	atomic_set(&cache->nr_migrations, 0);
 	init_waitqueue_head(&cache->migration_wait);
 
+	r = -ENOMEM;
 	cache->nr_dirty = 0;
 	cache->dirty_bitset = alloc_bitset(from_cblock(cache->cache_size));
 	if (!cache->dirty_bitset) {
@@ -2517,23 +2545,6 @@
 	DMEMIT("Error");
 }
 
-#define NOT_CORE_OPTION 1
-
-static int process_config_option(struct cache *cache, char **argv)
-{
-	unsigned long tmp;
-
-	if (!strcasecmp(argv[0], "migration_threshold")) {
-		if (kstrtoul(argv[1], 10, &tmp))
-			return -EINVAL;
-
-		cache->migration_threshold = tmp;
-		return 0;
-	}
-
-	return NOT_CORE_OPTION;
-}
-
 /*
  * Supports <key> <value>.
  *
@@ -2541,17 +2552,12 @@
  */
 static int cache_message(struct dm_target *ti, unsigned argc, char **argv)
 {
-	int r;
 	struct cache *cache = ti->private;
 
 	if (argc != 2)
 		return -EINVAL;
 
-	r = process_config_option(cache, argv);
-	if (r == NOT_CORE_OPTION)
-		return policy_set_config_value(cache->policy, argv[0], argv[1]);
-
-	return r;
+	return set_config_value(cache, argv[0], argv[1]);
 }
 
 static int cache_iterate_devices(struct dm_target *ti,
@@ -2609,7 +2615,7 @@
 
 static struct target_type cache_target = {
 	.name = "cache",
-	.version = {1, 1, 0},
+	.version = {1, 1, 1},
 	.module = THIS_MODULE,
 	.ctr = cache_ctr,
 	.dtr = cache_dtr,
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 13c1548..6d2d41a 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -858,8 +858,7 @@
 	unsigned int i;
 	struct bio_vec *bv;
 
-	for (i = 0; i < clone->bi_vcnt; i++) {
-		bv = bio_iovec_idx(clone, i);
+	bio_for_each_segment_all(bv, clone, i) {
 		BUG_ON(!bv->bv_page);
 		mempool_free(bv->bv_page, cc->page_pool);
 		bv->bv_page = NULL;
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index 51bb816..bdf26f5 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -907,6 +907,7 @@
 
 	ti->num_flush_bios = 1;
 	ti->num_discard_bios = 1;
+	ti->num_write_same_bios = 1;
 
 	return 0;
 
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
index d053098..699b5be 100644
--- a/drivers/md/dm-raid1.c
+++ b/drivers/md/dm-raid1.c
@@ -458,7 +458,7 @@
 {
 	io->bdev = m->dev->bdev;
 	io->sector = map_sector(m, bio);
-	io->count = bio->bi_size >> 9;
+	io->count = bio_sectors(bio);
 }
 
 static void hold_bio(struct mirror_set *ms, struct bio *bio)
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index c0e0702..c434e5a 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -1121,6 +1121,7 @@
 	s->pending_pool = mempool_create_slab_pool(MIN_IOS, pending_cache);
 	if (!s->pending_pool) {
 		ti->error = "Could not allocate mempool for pending exceptions";
+		r = -ENOMEM;
 		goto bad_pending_pool;
 	}
 
diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
index d8837d3..d907ca6 100644
--- a/drivers/md/dm-stripe.c
+++ b/drivers/md/dm-stripe.c
@@ -94,7 +94,7 @@
 static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
 {
 	struct stripe_c *sc;
-	sector_t width;
+	sector_t width, tmp_len;
 	uint32_t stripes;
 	uint32_t chunk_size;
 	int r;
@@ -116,18 +116,19 @@
 	}
 
 	width = ti->len;
-	if (sector_div(width, chunk_size)) {
-		ti->error = "Target length not divisible by "
-		    "chunk size";
-		return -EINVAL;
-	}
-
 	if (sector_div(width, stripes)) {
 		ti->error = "Target length not divisible by "
 		    "number of stripes";
 		return -EINVAL;
 	}
 
+	tmp_len = width;
+	if (sector_div(tmp_len, chunk_size)) {
+		ti->error = "Target length not divisible by "
+		    "chunk size";
+		return -EINVAL;
+	}
+
 	/*
 	 * Do we have enough arguments for that many stripes ?
 	 */
@@ -258,7 +259,7 @@
 	sector_t begin, end;
 
 	stripe_map_range_sector(sc, bio->bi_sector, target_stripe, &begin);
-	stripe_map_range_sector(sc, bio->bi_sector + bio_sectors(bio),
+	stripe_map_range_sector(sc, bio_end_sector(bio),
 				target_stripe, &end);
 	if (begin < end) {
 		bio->bi_bdev = sc->stripe[target_stripe].dev->bdev;
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index e50dad0c..1ff252a 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -1442,7 +1442,7 @@
 			return false;
 
 		if (!ti->type->iterate_devices ||
-		    !ti->type->iterate_devices(ti, device_not_write_same_capable, NULL))
+		    ti->type->iterate_devices(ti, device_not_write_same_capable, NULL))
 			return false;
 	}
 
diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
index 00cee02..60bce43 100644
--- a/drivers/md/dm-thin-metadata.c
+++ b/drivers/md/dm-thin-metadata.c
@@ -1645,12 +1645,12 @@
 	return r;
 }
 
-static int __resize_data_dev(struct dm_pool_metadata *pmd, dm_block_t new_count)
+static int __resize_space_map(struct dm_space_map *sm, dm_block_t new_count)
 {
 	int r;
 	dm_block_t old_count;
 
-	r = dm_sm_get_nr_blocks(pmd->data_sm, &old_count);
+	r = dm_sm_get_nr_blocks(sm, &old_count);
 	if (r)
 		return r;
 
@@ -1658,11 +1658,11 @@
 		return 0;
 
 	if (new_count < old_count) {
-		DMERR("cannot reduce size of data device");
+		DMERR("cannot reduce size of space map");
 		return -EINVAL;
 	}
 
-	return dm_sm_extend(pmd->data_sm, new_count - old_count);
+	return dm_sm_extend(sm, new_count - old_count);
 }
 
 int dm_pool_resize_data_dev(struct dm_pool_metadata *pmd, dm_block_t new_count)
@@ -1671,7 +1671,19 @@
 
 	down_write(&pmd->root_lock);
 	if (!pmd->fail_io)
-		r = __resize_data_dev(pmd, new_count);
+		r = __resize_space_map(pmd->data_sm, new_count);
+	up_write(&pmd->root_lock);
+
+	return r;
+}
+
+int dm_pool_resize_metadata_dev(struct dm_pool_metadata *pmd, dm_block_t new_count)
+{
+	int r = -EINVAL;
+
+	down_write(&pmd->root_lock);
+	if (!pmd->fail_io)
+		r = __resize_space_map(pmd->metadata_sm, new_count);
 	up_write(&pmd->root_lock);
 
 	return r;
@@ -1684,3 +1696,17 @@
 	dm_bm_set_read_only(pmd->bm);
 	up_write(&pmd->root_lock);
 }
+
+int dm_pool_register_metadata_threshold(struct dm_pool_metadata *pmd,
+					dm_block_t threshold,
+					dm_sm_threshold_fn fn,
+					void *context)
+{
+	int r;
+
+	down_write(&pmd->root_lock);
+	r = dm_sm_register_threshold_callback(pmd->metadata_sm, threshold, fn, context);
+	up_write(&pmd->root_lock);
+
+	return r;
+}
diff --git a/drivers/md/dm-thin-metadata.h b/drivers/md/dm-thin-metadata.h
index 0cecc37..845ebbe 100644
--- a/drivers/md/dm-thin-metadata.h
+++ b/drivers/md/dm-thin-metadata.h
@@ -8,6 +8,7 @@
 #define DM_THIN_METADATA_H
 
 #include "persistent-data/dm-block-manager.h"
+#include "persistent-data/dm-space-map.h"
 
 #define THIN_METADATA_BLOCK_SIZE 4096
 
@@ -185,6 +186,7 @@
  * blocks would be lost.
  */
 int dm_pool_resize_data_dev(struct dm_pool_metadata *pmd, dm_block_t new_size);
+int dm_pool_resize_metadata_dev(struct dm_pool_metadata *pmd, dm_block_t new_size);
 
 /*
  * Flicks the underlying block manager into read only mode, so you know
@@ -192,6 +194,11 @@
  */
 void dm_pool_metadata_read_only(struct dm_pool_metadata *pmd);
 
+int dm_pool_register_metadata_threshold(struct dm_pool_metadata *pmd,
+					dm_block_t threshold,
+					dm_sm_threshold_fn fn,
+					void *context);
+
 /*----------------------------------------------------------------*/
 
 #endif
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index 004ad165..759cffc 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -922,7 +922,7 @@
 		return r;
 
 	if (free_blocks <= pool->low_water_blocks && !pool->low_water_triggered) {
-		DMWARN("%s: reached low water mark, sending event.",
+		DMWARN("%s: reached low water mark for data device: sending event.",
 		       dm_device_name(pool->pool_md));
 		spin_lock_irqsave(&pool->lock, flags);
 		pool->low_water_triggered = 1;
@@ -1281,6 +1281,10 @@
 	bio_io_error(bio);
 }
 
+/*
+ * FIXME: should we also commit due to size of transaction, measured in
+ * metadata blocks?
+ */
 static int need_commit_due_to_time(struct pool *pool)
 {
 	return jiffies < pool->last_commit_jiffies ||
@@ -1909,6 +1913,56 @@
 	return r;
 }
 
+static void metadata_low_callback(void *context)
+{
+	struct pool *pool = context;
+
+	DMWARN("%s: reached low water mark for metadata device: sending event.",
+	       dm_device_name(pool->pool_md));
+
+	dm_table_event(pool->ti->table);
+}
+
+static sector_t get_metadata_dev_size(struct block_device *bdev)
+{
+	sector_t metadata_dev_size = i_size_read(bdev->bd_inode) >> SECTOR_SHIFT;
+	char buffer[BDEVNAME_SIZE];
+
+	if (metadata_dev_size > THIN_METADATA_MAX_SECTORS_WARNING) {
+		DMWARN("Metadata device %s is larger than %u sectors: excess space will not be used.",
+		       bdevname(bdev, buffer), THIN_METADATA_MAX_SECTORS);
+		metadata_dev_size = THIN_METADATA_MAX_SECTORS_WARNING;
+	}
+
+	return metadata_dev_size;
+}
+
+static dm_block_t get_metadata_dev_size_in_blocks(struct block_device *bdev)
+{
+	sector_t metadata_dev_size = get_metadata_dev_size(bdev);
+
+	sector_div(metadata_dev_size, THIN_METADATA_BLOCK_SIZE >> SECTOR_SHIFT);
+
+	return metadata_dev_size;
+}
+
+/*
+ * When a metadata threshold is crossed a dm event is triggered, and
+ * userland should respond by growing the metadata device.  We could let
+ * userland set the threshold, like we do with the data threshold, but I'm
+ * not sure they know enough to do this well.
+ */
+static dm_block_t calc_metadata_threshold(struct pool_c *pt)
+{
+	/*
+	 * 4M is ample for all ops with the possible exception of thin
+	 * device deletion which is harmless if it fails (just retry the
+	 * delete after you've grown the device).
+	 */
+	dm_block_t quarter = get_metadata_dev_size_in_blocks(pt->metadata_dev->bdev) / 4;
+	return min((dm_block_t)1024ULL /* 4M */, quarter);
+}
+
 /*
  * thin-pool <metadata dev> <data dev>
  *	     <data block size (sectors)>
@@ -1931,8 +1985,7 @@
 	unsigned long block_size;
 	dm_block_t low_water_blocks;
 	struct dm_dev *metadata_dev;
-	sector_t metadata_dev_size;
-	char b[BDEVNAME_SIZE];
+	fmode_t metadata_mode;
 
 	/*
 	 * FIXME Remove validation from scope of lock.
@@ -1944,19 +1997,32 @@
 		r = -EINVAL;
 		goto out_unlock;
 	}
+
 	as.argc = argc;
 	as.argv = argv;
 
-	r = dm_get_device(ti, argv[0], FMODE_READ | FMODE_WRITE, &metadata_dev);
+	/*
+	 * Set default pool features.
+	 */
+	pool_features_init(&pf);
+
+	dm_consume_args(&as, 4);
+	r = parse_pool_features(&as, &pf, ti);
+	if (r)
+		goto out_unlock;
+
+	metadata_mode = FMODE_READ | ((pf.mode == PM_READ_ONLY) ? 0 : FMODE_WRITE);
+	r = dm_get_device(ti, argv[0], metadata_mode, &metadata_dev);
 	if (r) {
 		ti->error = "Error opening metadata block device";
 		goto out_unlock;
 	}
 
-	metadata_dev_size = i_size_read(metadata_dev->bdev->bd_inode) >> SECTOR_SHIFT;
-	if (metadata_dev_size > THIN_METADATA_MAX_SECTORS_WARNING)
-		DMWARN("Metadata device %s is larger than %u sectors: excess space will not be used.",
-		       bdevname(metadata_dev->bdev, b), THIN_METADATA_MAX_SECTORS);
+	/*
+	 * Run for the side-effect of possibly issuing a warning if the
+	 * device is too big.
+	 */
+	(void) get_metadata_dev_size(metadata_dev->bdev);
 
 	r = dm_get_device(ti, argv[1], FMODE_READ | FMODE_WRITE, &data_dev);
 	if (r) {
@@ -1979,16 +2045,6 @@
 		goto out;
 	}
 
-	/*
-	 * Set default pool features.
-	 */
-	pool_features_init(&pf);
-
-	dm_consume_args(&as, 4);
-	r = parse_pool_features(&as, &pf, ti);
-	if (r)
-		goto out;
-
 	pt = kzalloc(sizeof(*pt), GFP_KERNEL);
 	if (!pt) {
 		r = -ENOMEM;
@@ -2040,6 +2096,13 @@
 	}
 	ti->private = pt;
 
+	r = dm_pool_register_metadata_threshold(pt->pool->pmd,
+						calc_metadata_threshold(pt),
+						metadata_low_callback,
+						pool);
+	if (r)
+		goto out_free_pt;
+
 	pt->callbacks.congested_fn = pool_is_congested;
 	dm_table_add_target_callbacks(ti->table, &pt->callbacks);
 
@@ -2079,6 +2142,78 @@
 	return r;
 }
 
+static int maybe_resize_data_dev(struct dm_target *ti, bool *need_commit)
+{
+	int r;
+	struct pool_c *pt = ti->private;
+	struct pool *pool = pt->pool;
+	sector_t data_size = ti->len;
+	dm_block_t sb_data_size;
+
+	*need_commit = false;
+
+	(void) sector_div(data_size, pool->sectors_per_block);
+
+	r = dm_pool_get_data_dev_size(pool->pmd, &sb_data_size);
+	if (r) {
+		DMERR("failed to retrieve data device size");
+		return r;
+	}
+
+	if (data_size < sb_data_size) {
+		DMERR("pool target (%llu blocks) too small: expected %llu",
+		      (unsigned long long)data_size, sb_data_size);
+		return -EINVAL;
+
+	} else if (data_size > sb_data_size) {
+		r = dm_pool_resize_data_dev(pool->pmd, data_size);
+		if (r) {
+			DMERR("failed to resize data device");
+			set_pool_mode(pool, PM_READ_ONLY);
+			return r;
+		}
+
+		*need_commit = true;
+	}
+
+	return 0;
+}
+
+static int maybe_resize_metadata_dev(struct dm_target *ti, bool *need_commit)
+{
+	int r;
+	struct pool_c *pt = ti->private;
+	struct pool *pool = pt->pool;
+	dm_block_t metadata_dev_size, sb_metadata_dev_size;
+
+	*need_commit = false;
+
+	metadata_dev_size = get_metadata_dev_size(pool->md_dev);
+
+	r = dm_pool_get_metadata_dev_size(pool->pmd, &sb_metadata_dev_size);
+	if (r) {
+		DMERR("failed to retrieve data device size");
+		return r;
+	}
+
+	if (metadata_dev_size < sb_metadata_dev_size) {
+		DMERR("metadata device (%llu sectors) too small: expected %llu",
+		      metadata_dev_size, sb_metadata_dev_size);
+		return -EINVAL;
+
+	} else if (metadata_dev_size > sb_metadata_dev_size) {
+		r = dm_pool_resize_metadata_dev(pool->pmd, metadata_dev_size);
+		if (r) {
+			DMERR("failed to resize metadata device");
+			return r;
+		}
+
+		*need_commit = true;
+	}
+
+	return 0;
+}
+
 /*
  * Retrieves the number of blocks of the data device from
  * the superblock and compares it to the actual device size,
@@ -2093,10 +2228,9 @@
 static int pool_preresume(struct dm_target *ti)
 {
 	int r;
+	bool need_commit1, need_commit2;
 	struct pool_c *pt = ti->private;
 	struct pool *pool = pt->pool;
-	sector_t data_size = ti->len;
-	dm_block_t sb_data_size;
 
 	/*
 	 * Take control of the pool object.
@@ -2105,30 +2239,16 @@
 	if (r)
 		return r;
 
-	(void) sector_div(data_size, pool->sectors_per_block);
-
-	r = dm_pool_get_data_dev_size(pool->pmd, &sb_data_size);
-	if (r) {
-		DMERR("failed to retrieve data device size");
+	r = maybe_resize_data_dev(ti, &need_commit1);
+	if (r)
 		return r;
-	}
 
-	if (data_size < sb_data_size) {
-		DMERR("pool target too small, is %llu blocks (expected %llu)",
-		      (unsigned long long)data_size, sb_data_size);
-		return -EINVAL;
+	r = maybe_resize_metadata_dev(ti, &need_commit2);
+	if (r)
+		return r;
 
-	} else if (data_size > sb_data_size) {
-		r = dm_pool_resize_data_dev(pool->pmd, data_size);
-		if (r) {
-			DMERR("failed to resize data device");
-			/* FIXME Stricter than necessary: Rollback transaction instead here */
-			set_pool_mode(pool, PM_READ_ONLY);
-			return r;
-		}
-
+	if (need_commit1 || need_commit2)
 		(void) commit_or_fallback(pool);
-	}
 
 	return 0;
 }
@@ -2549,7 +2669,7 @@
 	.name = "thin-pool",
 	.features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE |
 		    DM_TARGET_IMMUTABLE,
-	.version = {1, 7, 0},
+	.version = {1, 8, 0},
 	.module = THIS_MODULE,
 	.ctr = pool_ctr,
 	.dtr = pool_dtr,
diff --git a/drivers/md/dm-verity.c b/drivers/md/dm-verity.c
index a746f1d..b948fd8 100644
--- a/drivers/md/dm-verity.c
+++ b/drivers/md/dm-verity.c
@@ -501,7 +501,7 @@
 		return -EIO;
 	}
 
-	if ((bio->bi_sector + bio_sectors(bio)) >>
+	if (bio_end_sector(bio) >>
 	    (v->data_dev_block_bits - SECTOR_SHIFT) > v->data_blocks) {
 		DMERR_LIMIT("io out of range");
 		return -EIO;
@@ -519,7 +519,7 @@
 
 	bio->bi_end_io = verity_end_io;
 	bio->bi_private = io;
-	io->io_vec_size = bio->bi_vcnt - bio->bi_idx;
+	io->io_vec_size = bio_segments(bio);
 	if (io->io_vec_size < DM_VERITY_IO_VEC_INLINE)
 		io->io_vec = io->io_vec_inline;
 	else
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 9a0bdad..d5370a9 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -339,7 +339,7 @@
 	return md ? 0 : -ENXIO;
 }
 
-static int dm_blk_close(struct gendisk *disk, fmode_t mode)
+static void dm_blk_close(struct gendisk *disk, fmode_t mode)
 {
 	struct mapped_device *md = disk->private_data;
 
@@ -349,8 +349,6 @@
 	dm_put(md);
 
 	spin_unlock(&_minor_lock);
-
-	return 0;
 }
 
 int dm_open_count(struct mapped_device *md)
diff --git a/drivers/md/faulty.c b/drivers/md/faulty.c
index 5e7dc77..3193aef 100644
--- a/drivers/md/faulty.c
+++ b/drivers/md/faulty.c
@@ -185,8 +185,7 @@
 			return;
 		}
 
-		if (check_sector(conf, bio->bi_sector, bio->bi_sector+(bio->bi_size>>9),
-				 WRITE))
+		if (check_sector(conf, bio->bi_sector, bio_end_sector(bio), WRITE))
 			failit = 1;
 		if (check_mode(conf, WritePersistent)) {
 			add_sector(conf, bio->bi_sector, WritePersistent);
@@ -196,8 +195,7 @@
 			failit = 1;
 	} else {
 		/* read request */
-		if (check_sector(conf, bio->bi_sector, bio->bi_sector + (bio->bi_size>>9),
-				 READ))
+		if (check_sector(conf, bio->bi_sector, bio_end_sector(bio), READ))
 			failit = 1;
 		if (check_mode(conf, ReadTransient))
 			failit = 1;
diff --git a/drivers/md/linear.c b/drivers/md/linear.c
index 2101483..f03fabd 100644
--- a/drivers/md/linear.c
+++ b/drivers/md/linear.c
@@ -317,8 +317,7 @@
 		bio_io_error(bio);
 		return;
 	}
-	if (unlikely(bio->bi_sector + (bio->bi_size >> 9) >
-		     tmp_dev->end_sector)) {
+	if (unlikely(bio_end_sector(bio) > tmp_dev->end_sector)) {
 		/* This bio crosses a device boundary, so we have to
 		 * split it.
 		 */
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 4c74424..681d109 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -197,21 +197,12 @@
 	if (offset == 0 && size == bio->bi_size)
 		return;
 
-	bio->bi_sector += offset;
-	bio->bi_size = size;
-	offset <<= 9;
 	clear_bit(BIO_SEG_VALID, &bio->bi_flags);
 
-	while (bio->bi_idx < bio->bi_vcnt &&
-	       bio->bi_io_vec[bio->bi_idx].bv_len <= offset) {
-		/* remove this whole bio_vec */
-		offset -= bio->bi_io_vec[bio->bi_idx].bv_len;
-		bio->bi_idx++;
-	}
-	if (bio->bi_idx < bio->bi_vcnt) {
-		bio->bi_io_vec[bio->bi_idx].bv_offset += offset;
-		bio->bi_io_vec[bio->bi_idx].bv_len -= offset;
-	}
+	bio_advance(bio, offset << 9);
+
+	bio->bi_size = size;
+
 	/* avoid any complications with bi_idx being non-zero*/
 	if (bio->bi_idx) {
 		memmove(bio->bi_io_vec, bio->bi_io_vec+bio->bi_idx,
@@ -6674,15 +6665,13 @@
 	return err;
 }
 
-static int md_release(struct gendisk *disk, fmode_t mode)
+static void md_release(struct gendisk *disk, fmode_t mode)
 {
  	struct mddev *mddev = disk->private_data;
 
 	BUG_ON(!mddev);
 	atomic_dec(&mddev->openers);
 	mddev_put(mddev);
-
-	return 0;
 }
 
 static int md_media_changed(struct gendisk *disk)
diff --git a/drivers/md/persistent-data/dm-space-map-disk.c b/drivers/md/persistent-data/dm-space-map-disk.c
index f6d29e6..e735a6d 100644
--- a/drivers/md/persistent-data/dm-space-map-disk.c
+++ b/drivers/md/persistent-data/dm-space-map-disk.c
@@ -248,7 +248,8 @@
 	.new_block = sm_disk_new_block,
 	.commit = sm_disk_commit,
 	.root_size = sm_disk_root_size,
-	.copy_root = sm_disk_copy_root
+	.copy_root = sm_disk_copy_root,
+	.register_threshold_callback = NULL
 };
 
 struct dm_space_map *dm_sm_disk_create(struct dm_transaction_manager *tm,
diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c
index 906cf3d..1c95968 100644
--- a/drivers/md/persistent-data/dm-space-map-metadata.c
+++ b/drivers/md/persistent-data/dm-space-map-metadata.c
@@ -17,6 +17,55 @@
 /*----------------------------------------------------------------*/
 
 /*
+ * An edge triggered threshold.
+ */
+struct threshold {
+	bool threshold_set;
+	bool value_set;
+	dm_block_t threshold;
+	dm_block_t current_value;
+	dm_sm_threshold_fn fn;
+	void *context;
+};
+
+static void threshold_init(struct threshold *t)
+{
+	t->threshold_set = false;
+	t->value_set = false;
+}
+
+static void set_threshold(struct threshold *t, dm_block_t value,
+			  dm_sm_threshold_fn fn, void *context)
+{
+	t->threshold_set = true;
+	t->threshold = value;
+	t->fn = fn;
+	t->context = context;
+}
+
+static bool below_threshold(struct threshold *t, dm_block_t value)
+{
+	return t->threshold_set && value <= t->threshold;
+}
+
+static bool threshold_already_triggered(struct threshold *t)
+{
+	return t->value_set && below_threshold(t, t->current_value);
+}
+
+static void check_threshold(struct threshold *t, dm_block_t value)
+{
+	if (below_threshold(t, value) &&
+	    !threshold_already_triggered(t))
+		t->fn(t->context);
+
+	t->value_set = true;
+	t->current_value = value;
+}
+
+/*----------------------------------------------------------------*/
+
+/*
  * Space map interface.
  *
  * The low level disk format is written using the standard btree and
@@ -54,6 +103,8 @@
 	unsigned allocated_this_transaction;
 	unsigned nr_uncommitted;
 	struct block_op uncommitted[MAX_RECURSIVE_ALLOCATIONS];
+
+	struct threshold threshold;
 };
 
 static int add_bop(struct sm_metadata *smm, enum block_op_type type, dm_block_t b)
@@ -144,12 +195,6 @@
 	kfree(smm);
 }
 
-static int sm_metadata_extend(struct dm_space_map *sm, dm_block_t extra_blocks)
-{
-	DMERR("doesn't support extend");
-	return -EINVAL;
-}
-
 static int sm_metadata_get_nr_blocks(struct dm_space_map *sm, dm_block_t *count)
 {
 	struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
@@ -335,9 +380,19 @@
 
 static int sm_metadata_new_block(struct dm_space_map *sm, dm_block_t *b)
 {
+	dm_block_t count;
+	struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
+
 	int r = sm_metadata_new_block_(sm, b);
 	if (r)
 		DMERR("unable to allocate new metadata block");
+
+	r = sm_metadata_get_nr_free(sm, &count);
+	if (r)
+		DMERR("couldn't get free block count");
+
+	check_threshold(&smm->threshold, count);
+
 	return r;
 }
 
@@ -357,6 +412,18 @@
 	return 0;
 }
 
+static int sm_metadata_register_threshold_callback(struct dm_space_map *sm,
+						   dm_block_t threshold,
+						   dm_sm_threshold_fn fn,
+						   void *context)
+{
+	struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
+
+	set_threshold(&smm->threshold, threshold, fn, context);
+
+	return 0;
+}
+
 static int sm_metadata_root_size(struct dm_space_map *sm, size_t *result)
 {
 	*result = sizeof(struct disk_sm_root);
@@ -382,6 +449,8 @@
 	return 0;
 }
 
+static int sm_metadata_extend(struct dm_space_map *sm, dm_block_t extra_blocks);
+
 static struct dm_space_map ops = {
 	.destroy = sm_metadata_destroy,
 	.extend = sm_metadata_extend,
@@ -395,7 +464,8 @@
 	.new_block = sm_metadata_new_block,
 	.commit = sm_metadata_commit,
 	.root_size = sm_metadata_root_size,
-	.copy_root = sm_metadata_copy_root
+	.copy_root = sm_metadata_copy_root,
+	.register_threshold_callback = sm_metadata_register_threshold_callback
 };
 
 /*----------------------------------------------------------------*/
@@ -410,7 +480,7 @@
 
 static int sm_bootstrap_extend(struct dm_space_map *sm, dm_block_t extra_blocks)
 {
-	DMERR("boostrap doesn't support extend");
+	DMERR("bootstrap doesn't support extend");
 
 	return -EINVAL;
 }
@@ -450,7 +520,7 @@
 static int sm_bootstrap_set_count(struct dm_space_map *sm, dm_block_t b,
 				  uint32_t count)
 {
-	DMERR("boostrap doesn't support set_count");
+	DMERR("bootstrap doesn't support set_count");
 
 	return -EINVAL;
 }
@@ -491,7 +561,7 @@
 
 static int sm_bootstrap_root_size(struct dm_space_map *sm, size_t *result)
 {
-	DMERR("boostrap doesn't support root_size");
+	DMERR("bootstrap doesn't support root_size");
 
 	return -EINVAL;
 }
@@ -499,7 +569,7 @@
 static int sm_bootstrap_copy_root(struct dm_space_map *sm, void *where,
 				  size_t max)
 {
-	DMERR("boostrap doesn't support copy_root");
+	DMERR("bootstrap doesn't support copy_root");
 
 	return -EINVAL;
 }
@@ -517,11 +587,42 @@
 	.new_block = sm_bootstrap_new_block,
 	.commit = sm_bootstrap_commit,
 	.root_size = sm_bootstrap_root_size,
-	.copy_root = sm_bootstrap_copy_root
+	.copy_root = sm_bootstrap_copy_root,
+	.register_threshold_callback = NULL
 };
 
 /*----------------------------------------------------------------*/
 
+static int sm_metadata_extend(struct dm_space_map *sm, dm_block_t extra_blocks)
+{
+	int r, i;
+	enum allocation_event ev;
+	struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
+	dm_block_t old_len = smm->ll.nr_blocks;
+
+	/*
+	 * Flick into a mode where all blocks get allocated in the new area.
+	 */
+	smm->begin = old_len;
+	memcpy(&smm->sm, &bootstrap_ops, sizeof(smm->sm));
+
+	/*
+	 * Extend.
+	 */
+	r = sm_ll_extend(&smm->ll, extra_blocks);
+
+	/*
+	 * Switch back to normal behaviour.
+	 */
+	memcpy(&smm->sm, &ops, sizeof(smm->sm));
+	for (i = old_len; !r && i < smm->begin; i++)
+		r = sm_ll_inc(&smm->ll, i, &ev);
+
+	return r;
+}
+
+/*----------------------------------------------------------------*/
+
 struct dm_space_map *dm_sm_metadata_init(void)
 {
 	struct sm_metadata *smm;
@@ -549,6 +650,7 @@
 	smm->recursion_count = 0;
 	smm->allocated_this_transaction = 0;
 	smm->nr_uncommitted = 0;
+	threshold_init(&smm->threshold);
 
 	memcpy(&smm->sm, &bootstrap_ops, sizeof(smm->sm));
 
@@ -590,6 +692,7 @@
 	smm->recursion_count = 0;
 	smm->allocated_this_transaction = 0;
 	smm->nr_uncommitted = 0;
+	threshold_init(&smm->threshold);
 
 	memcpy(&smm->old_ll, &smm->ll, sizeof(smm->old_ll));
 	return 0;
diff --git a/drivers/md/persistent-data/dm-space-map.h b/drivers/md/persistent-data/dm-space-map.h
index 1cbfc6b..3e6d115 100644
--- a/drivers/md/persistent-data/dm-space-map.h
+++ b/drivers/md/persistent-data/dm-space-map.h
@@ -9,6 +9,8 @@
 
 #include "dm-block-manager.h"
 
+typedef void (*dm_sm_threshold_fn)(void *context);
+
 /*
  * struct dm_space_map keeps a record of how many times each block in a device
  * is referenced.  It needs to be fixed on disk as part of the transaction.
@@ -59,6 +61,15 @@
 	 */
 	int (*root_size)(struct dm_space_map *sm, size_t *result);
 	int (*copy_root)(struct dm_space_map *sm, void *copy_to_here_le, size_t len);
+
+	/*
+	 * You can register one threshold callback which is edge-triggered
+	 * when the free space in the space map drops below the threshold.
+	 */
+	int (*register_threshold_callback)(struct dm_space_map *sm,
+					   dm_block_t threshold,
+					   dm_sm_threshold_fn fn,
+					   void *context);
 };
 
 /*----------------------------------------------------------------*/
@@ -131,4 +142,16 @@
 	return sm->copy_root(sm, copy_to_here_le, len);
 }
 
+static inline int dm_sm_register_threshold_callback(struct dm_space_map *sm,
+						    dm_block_t threshold,
+						    dm_sm_threshold_fn fn,
+						    void *context)
+{
+	if (sm->register_threshold_callback)
+		return sm->register_threshold_callback(sm, threshold, fn, context);
+
+	return -EINVAL;
+}
+
+
 #endif	/* _LINUX_DM_SPACE_MAP_H */
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index 0505452..fcf65e5 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -502,11 +502,11 @@
 {
 	if (likely(is_power_of_2(chunk_sects))) {
 		return chunk_sects >= ((bio->bi_sector & (chunk_sects-1))
-					+ (bio->bi_size >> 9));
+					+ bio_sectors(bio));
 	} else{
 		sector_t sector = bio->bi_sector;
 		return chunk_sects >= (sector_div(sector, chunk_sects)
-						+ (bio->bi_size >> 9));
+						+ bio_sectors(bio));
 	}
 }
 
@@ -527,8 +527,7 @@
 		sector_t sector = bio->bi_sector;
 		struct bio_pair *bp;
 		/* Sanity check -- queue functions should prevent this happening */
-		if ((bio->bi_vcnt != 1 && bio->bi_vcnt != 0) ||
-		    bio->bi_idx != 0)
+		if (bio_segments(bio) > 1)
 			goto bad_map;
 		/* This is a one page bio that upper layers
 		 * refuse to split for us, so we need to split it.
@@ -567,7 +566,7 @@
 	printk("md/raid0:%s: make_request bug: can't convert block across chunks"
 	       " or bigger than %dk %llu %d\n",
 	       mdname(mddev), chunk_sects / 2,
-	       (unsigned long long)bio->bi_sector, bio->bi_size >> 10);
+	       (unsigned long long)bio->bi_sector, bio_sectors(bio) / 2);
 
 	bio_io_error(bio);
 	return;
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 851023e..5595118 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -92,7 +92,6 @@
 static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data)
 {
 	struct pool_info *pi = data;
-	struct page *page;
 	struct r1bio *r1_bio;
 	struct bio *bio;
 	int i, j;
@@ -122,14 +121,10 @@
 		j = 1;
 	while(j--) {
 		bio = r1_bio->bios[j];
-		for (i = 0; i < RESYNC_PAGES; i++) {
-			page = alloc_page(gfp_flags);
-			if (unlikely(!page))
-				goto out_free_pages;
+		bio->bi_vcnt = RESYNC_PAGES;
 
-			bio->bi_io_vec[i].bv_page = page;
-			bio->bi_vcnt = i+1;
-		}
+		if (bio_alloc_pages(bio, gfp_flags))
+			goto out_free_bio;
 	}
 	/* If not user-requests, copy the page pointers to all bios */
 	if (!test_bit(MD_RECOVERY_REQUESTED, &pi->mddev->recovery)) {
@@ -143,11 +138,6 @@
 
 	return r1_bio;
 
-out_free_pages:
-	for (j=0 ; j < pi->raid_disks; j++)
-		for (i=0; i < r1_bio->bios[j]->bi_vcnt ; i++)
-			put_page(r1_bio->bios[j]->bi_io_vec[i].bv_page);
-	j = -1;
 out_free_bio:
 	while (++j < pi->raid_disks)
 		bio_put(r1_bio->bios[j]);
@@ -267,7 +257,7 @@
 			 (bio_data_dir(bio) == WRITE) ? "write" : "read",
 			 (unsigned long long) bio->bi_sector,
 			 (unsigned long long) bio->bi_sector +
-			 (bio->bi_size >> 9) - 1);
+			 bio_sectors(bio) - 1);
 
 		call_bio_endio(r1_bio);
 	}
@@ -458,7 +448,7 @@
 					 " %llu-%llu\n",
 					 (unsigned long long) mbio->bi_sector,
 					 (unsigned long long) mbio->bi_sector +
-					 (mbio->bi_size >> 9) - 1);
+					 bio_sectors(mbio) - 1);
 				call_bio_endio(r1_bio);
 			}
 		}
@@ -925,7 +915,7 @@
 	if (unlikely(!bvecs))
 		return;
 
-	bio_for_each_segment(bvec, bio, i) {
+	bio_for_each_segment_all(bvec, bio, i) {
 		bvecs[i] = *bvec;
 		bvecs[i].bv_page = alloc_page(GFP_NOIO);
 		if (unlikely(!bvecs[i].bv_page))
@@ -1023,7 +1013,7 @@
 	md_write_start(mddev, bio); /* wait on superblock update early */
 
 	if (bio_data_dir(bio) == WRITE &&
-	    bio->bi_sector + bio->bi_size/512 > mddev->suspend_lo &&
+	    bio_end_sector(bio) > mddev->suspend_lo &&
 	    bio->bi_sector < mddev->suspend_hi) {
 		/* As the suspend_* range is controlled by
 		 * userspace, we want an interruptible
@@ -1034,7 +1024,7 @@
 			flush_signals(current);
 			prepare_to_wait(&conf->wait_barrier,
 					&w, TASK_INTERRUPTIBLE);
-			if (bio->bi_sector + bio->bi_size/512 <= mddev->suspend_lo ||
+			if (bio_end_sector(bio) <= mddev->suspend_lo ||
 			    bio->bi_sector >= mddev->suspend_hi)
 				break;
 			schedule();
@@ -1054,7 +1044,7 @@
 	r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO);
 
 	r1_bio->master_bio = bio;
-	r1_bio->sectors = bio->bi_size >> 9;
+	r1_bio->sectors = bio_sectors(bio);
 	r1_bio->state = 0;
 	r1_bio->mddev = mddev;
 	r1_bio->sector = bio->bi_sector;
@@ -1132,7 +1122,7 @@
 			r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO);
 
 			r1_bio->master_bio = bio;
-			r1_bio->sectors = (bio->bi_size >> 9) - sectors_handled;
+			r1_bio->sectors = bio_sectors(bio) - sectors_handled;
 			r1_bio->state = 0;
 			r1_bio->mddev = mddev;
 			r1_bio->sector = bio->bi_sector + sectors_handled;
@@ -1289,14 +1279,10 @@
 			struct bio_vec *bvec;
 			int j;
 
-			/* Yes, I really want the '__' version so that
-			 * we clear any unused pointer in the io_vec, rather
-			 * than leave them unchanged.  This is important
-			 * because when we come to free the pages, we won't
-			 * know the original bi_idx, so we just free
-			 * them all
+			/*
+			 * We trimmed the bio, so _all is legit
 			 */
-			__bio_for_each_segment(bvec, mbio, j, 0)
+			bio_for_each_segment_all(bvec, mbio, j)
 				bvec->bv_page = r1_bio->behind_bvecs[j].bv_page;
 			if (test_bit(WriteMostly, &conf->mirrors[i].rdev->flags))
 				atomic_inc(&r1_bio->behind_remaining);
@@ -1334,14 +1320,14 @@
 	/* Mustn't call r1_bio_write_done before this next test,
 	 * as it could result in the bio being freed.
 	 */
-	if (sectors_handled < (bio->bi_size >> 9)) {
+	if (sectors_handled < bio_sectors(bio)) {
 		r1_bio_write_done(r1_bio);
 		/* We need another r1_bio.  It has already been counted
 		 * in bio->bi_phys_segments
 		 */
 		r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO);
 		r1_bio->master_bio = bio;
-		r1_bio->sectors = (bio->bi_size >> 9) - sectors_handled;
+		r1_bio->sectors = bio_sectors(bio) - sectors_handled;
 		r1_bio->state = 0;
 		r1_bio->mddev = mddev;
 		r1_bio->sector = bio->bi_sector + sectors_handled;
@@ -1867,7 +1853,7 @@
 		struct bio *sbio = r1_bio->bios[i];
 		int size;
 
-		if (r1_bio->bios[i]->bi_end_io != end_sync_read)
+		if (sbio->bi_end_io != end_sync_read)
 			continue;
 
 		if (test_bit(BIO_UPTODATE, &sbio->bi_flags)) {
@@ -1892,16 +1878,15 @@
 			continue;
 		}
 		/* fixup the bio for reuse */
+		bio_reset(sbio);
 		sbio->bi_vcnt = vcnt;
 		sbio->bi_size = r1_bio->sectors << 9;
-		sbio->bi_idx = 0;
-		sbio->bi_phys_segments = 0;
-		sbio->bi_flags &= ~(BIO_POOL_MASK - 1);
-		sbio->bi_flags |= 1 << BIO_UPTODATE;
-		sbio->bi_next = NULL;
 		sbio->bi_sector = r1_bio->sector +
 			conf->mirrors[i].rdev->data_offset;
 		sbio->bi_bdev = conf->mirrors[i].rdev->bdev;
+		sbio->bi_end_io = end_sync_read;
+		sbio->bi_private = r1_bio;
+
 		size = sbio->bi_size;
 		for (j = 0; j < vcnt ; j++) {
 			struct bio_vec *bi;
@@ -1912,10 +1897,9 @@
 			else
 				bi->bv_len = size;
 			size -= PAGE_SIZE;
-			memcpy(page_address(bi->bv_page),
-			       page_address(pbio->bi_io_vec[j].bv_page),
-			       PAGE_SIZE);
 		}
+
+		bio_copy_data(sbio, pbio);
 	}
 	return 0;
 }
@@ -1952,7 +1936,7 @@
 		wbio->bi_rw = WRITE;
 		wbio->bi_end_io = end_sync_write;
 		atomic_inc(&r1_bio->remaining);
-		md_sync_acct(conf->mirrors[i].rdev->bdev, wbio->bi_size >> 9);
+		md_sync_acct(conf->mirrors[i].rdev->bdev, bio_sectors(wbio));
 
 		generic_make_request(wbio);
 	}
@@ -2064,32 +2048,11 @@
 	}
 }
 
-static void bi_complete(struct bio *bio, int error)
-{
-	complete((struct completion *)bio->bi_private);
-}
-
-static int submit_bio_wait(int rw, struct bio *bio)
-{
-	struct completion event;
-	rw |= REQ_SYNC;
-
-	init_completion(&event);
-	bio->bi_private = &event;
-	bio->bi_end_io = bi_complete;
-	submit_bio(rw, bio);
-	wait_for_completion(&event);
-
-	return test_bit(BIO_UPTODATE, &bio->bi_flags);
-}
-
 static int narrow_write_error(struct r1bio *r1_bio, int i)
 {
 	struct mddev *mddev = r1_bio->mddev;
 	struct r1conf *conf = mddev->private;
 	struct md_rdev *rdev = conf->mirrors[i].rdev;
-	int vcnt, idx;
-	struct bio_vec *vec;
 
 	/* bio has the data to be written to device 'i' where
 	 * we just recently had a write error.
@@ -2117,30 +2080,32 @@
 		   & ~(sector_t)(block_sectors - 1))
 		- sector;
 
-	if (test_bit(R1BIO_BehindIO, &r1_bio->state)) {
-		vcnt = r1_bio->behind_page_count;
-		vec = r1_bio->behind_bvecs;
-		idx = 0;
-		while (vec[idx].bv_page == NULL)
-			idx++;
-	} else {
-		vcnt = r1_bio->master_bio->bi_vcnt;
-		vec = r1_bio->master_bio->bi_io_vec;
-		idx = r1_bio->master_bio->bi_idx;
-	}
 	while (sect_to_write) {
 		struct bio *wbio;
 		if (sectors > sect_to_write)
 			sectors = sect_to_write;
 		/* Write at 'sector' for 'sectors'*/
 
-		wbio = bio_alloc_mddev(GFP_NOIO, vcnt, mddev);
-		memcpy(wbio->bi_io_vec, vec, vcnt * sizeof(struct bio_vec));
-		wbio->bi_sector = r1_bio->sector;
+		if (test_bit(R1BIO_BehindIO, &r1_bio->state)) {
+			unsigned vcnt = r1_bio->behind_page_count;
+			struct bio_vec *vec = r1_bio->behind_bvecs;
+
+			while (!vec->bv_page) {
+				vec++;
+				vcnt--;
+			}
+
+			wbio = bio_alloc_mddev(GFP_NOIO, vcnt, mddev);
+			memcpy(wbio->bi_io_vec, vec, vcnt * sizeof(struct bio_vec));
+
+			wbio->bi_vcnt = vcnt;
+		} else {
+			wbio = bio_clone_mddev(r1_bio->master_bio, GFP_NOIO, mddev);
+		}
+
 		wbio->bi_rw = WRITE;
-		wbio->bi_vcnt = vcnt;
+		wbio->bi_sector = r1_bio->sector;
 		wbio->bi_size = r1_bio->sectors << 9;
-		wbio->bi_idx = idx;
 
 		md_trim_bio(wbio, sector - r1_bio->sector, sectors);
 		wbio->bi_sector += rdev->data_offset;
@@ -2289,8 +2254,7 @@
 			r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO);
 
 			r1_bio->master_bio = mbio;
-			r1_bio->sectors = (mbio->bi_size >> 9)
-					  - sectors_handled;
+			r1_bio->sectors = bio_sectors(mbio) - sectors_handled;
 			r1_bio->state = 0;
 			set_bit(R1BIO_ReadError, &r1_bio->state);
 			r1_bio->mddev = mddev;
@@ -2464,18 +2428,7 @@
 	for (i = 0; i < conf->raid_disks * 2; i++) {
 		struct md_rdev *rdev;
 		bio = r1_bio->bios[i];
-
-		/* take from bio_init */
-		bio->bi_next = NULL;
-		bio->bi_flags &= ~(BIO_POOL_MASK-1);
-		bio->bi_flags |= 1 << BIO_UPTODATE;
-		bio->bi_rw = READ;
-		bio->bi_vcnt = 0;
-		bio->bi_idx = 0;
-		bio->bi_phys_segments = 0;
-		bio->bi_size = 0;
-		bio->bi_end_io = NULL;
-		bio->bi_private = NULL;
+		bio_reset(bio);
 
 		rdev = rcu_dereference(conf->mirrors[i].rdev);
 		if (rdev == NULL ||
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 018741b..59d4daa 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -1174,14 +1174,13 @@
 	/* If this request crosses a chunk boundary, we need to
 	 * split it.  This will only happen for 1 PAGE (or less) requests.
 	 */
-	if (unlikely((bio->bi_sector & chunk_mask) + (bio->bi_size >> 9)
+	if (unlikely((bio->bi_sector & chunk_mask) + bio_sectors(bio)
 		     > chunk_sects
 		     && (conf->geo.near_copies < conf->geo.raid_disks
 			 || conf->prev.near_copies < conf->prev.raid_disks))) {
 		struct bio_pair *bp;
 		/* Sanity check -- queue functions should prevent this happening */
-		if ((bio->bi_vcnt != 1 && bio->bi_vcnt != 0) ||
-		    bio->bi_idx != 0)
+		if (bio_segments(bio) > 1)
 			goto bad_map;
 		/* This is a one page bio that upper layers
 		 * refuse to split for us, so we need to split it.
@@ -1214,7 +1213,7 @@
 	bad_map:
 		printk("md/raid10:%s: make_request bug: can't convert block across chunks"
 		       " or bigger than %dk %llu %d\n", mdname(mddev), chunk_sects/2,
-		       (unsigned long long)bio->bi_sector, bio->bi_size >> 10);
+		       (unsigned long long)bio->bi_sector, bio_sectors(bio) / 2);
 
 		bio_io_error(bio);
 		return;
@@ -1229,7 +1228,7 @@
 	 */
 	wait_barrier(conf);
 
-	sectors = bio->bi_size >> 9;
+	sectors = bio_sectors(bio);
 	while (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
 	    bio->bi_sector < conf->reshape_progress &&
 	    bio->bi_sector + sectors > conf->reshape_progress) {
@@ -1331,8 +1330,7 @@
 			r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO);
 
 			r10_bio->master_bio = bio;
-			r10_bio->sectors = ((bio->bi_size >> 9)
-					    - sectors_handled);
+			r10_bio->sectors = bio_sectors(bio) - sectors_handled;
 			r10_bio->state = 0;
 			r10_bio->mddev = mddev;
 			r10_bio->sector = bio->bi_sector + sectors_handled;
@@ -1574,7 +1572,7 @@
 	 * after checking if we need to go around again.
 	 */
 
-	if (sectors_handled < (bio->bi_size >> 9)) {
+	if (sectors_handled < bio_sectors(bio)) {
 		one_write_done(r10_bio);
 		/* We need another r10_bio.  It has already been counted
 		 * in bio->bi_phys_segments.
@@ -1582,7 +1580,7 @@
 		r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO);
 
 		r10_bio->master_bio = bio;
-		r10_bio->sectors = (bio->bi_size >> 9) - sectors_handled;
+		r10_bio->sectors = bio_sectors(bio) - sectors_handled;
 
 		r10_bio->mddev = mddev;
 		r10_bio->sector = bio->bi_sector + sectors_handled;
@@ -2084,13 +2082,10 @@
 		 * First we need to fixup bv_offset, bv_len and
 		 * bi_vecs, as the read request might have corrupted these
 		 */
+		bio_reset(tbio);
+
 		tbio->bi_vcnt = vcnt;
 		tbio->bi_size = r10_bio->sectors << 9;
-		tbio->bi_idx = 0;
-		tbio->bi_phys_segments = 0;
-		tbio->bi_flags &= ~(BIO_POOL_MASK - 1);
-		tbio->bi_flags |= 1 << BIO_UPTODATE;
-		tbio->bi_next = NULL;
 		tbio->bi_rw = WRITE;
 		tbio->bi_private = r10_bio;
 		tbio->bi_sector = r10_bio->devs[i].addr;
@@ -2108,7 +2103,7 @@
 		d = r10_bio->devs[i].devnum;
 		atomic_inc(&conf->mirrors[d].rdev->nr_pending);
 		atomic_inc(&r10_bio->remaining);
-		md_sync_acct(conf->mirrors[d].rdev->bdev, tbio->bi_size >> 9);
+		md_sync_acct(conf->mirrors[d].rdev->bdev, bio_sectors(tbio));
 
 		tbio->bi_sector += conf->mirrors[d].rdev->data_offset;
 		tbio->bi_bdev = conf->mirrors[d].rdev->bdev;
@@ -2133,7 +2128,7 @@
 		d = r10_bio->devs[i].devnum;
 		atomic_inc(&r10_bio->remaining);
 		md_sync_acct(conf->mirrors[d].replacement->bdev,
-			     tbio->bi_size >> 9);
+			     bio_sectors(tbio));
 		generic_make_request(tbio);
 	}
 
@@ -2259,13 +2254,13 @@
 	wbio2 = r10_bio->devs[1].repl_bio;
 	if (wbio->bi_end_io) {
 		atomic_inc(&conf->mirrors[d].rdev->nr_pending);
-		md_sync_acct(conf->mirrors[d].rdev->bdev, wbio->bi_size >> 9);
+		md_sync_acct(conf->mirrors[d].rdev->bdev, bio_sectors(wbio));
 		generic_make_request(wbio);
 	}
 	if (wbio2 && wbio2->bi_end_io) {
 		atomic_inc(&conf->mirrors[d].replacement->nr_pending);
 		md_sync_acct(conf->mirrors[d].replacement->bdev,
-			     wbio2->bi_size >> 9);
+			     bio_sectors(wbio2));
 		generic_make_request(wbio2);
 	}
 }
@@ -2536,25 +2531,6 @@
 	}
 }
 
-static void bi_complete(struct bio *bio, int error)
-{
-	complete((struct completion *)bio->bi_private);
-}
-
-static int submit_bio_wait(int rw, struct bio *bio)
-{
-	struct completion event;
-	rw |= REQ_SYNC;
-
-	init_completion(&event);
-	bio->bi_private = &event;
-	bio->bi_end_io = bi_complete;
-	submit_bio(rw, bio);
-	wait_for_completion(&event);
-
-	return test_bit(BIO_UPTODATE, &bio->bi_flags);
-}
-
 static int narrow_write_error(struct r10bio *r10_bio, int i)
 {
 	struct bio *bio = r10_bio->master_bio;
@@ -2695,8 +2671,7 @@
 		r10_bio = mempool_alloc(conf->r10bio_pool,
 					GFP_NOIO);
 		r10_bio->master_bio = mbio;
-		r10_bio->sectors = (mbio->bi_size >> 9)
-			- sectors_handled;
+		r10_bio->sectors = bio_sectors(mbio) - sectors_handled;
 		r10_bio->state = 0;
 		set_bit(R10BIO_ReadError,
 			&r10_bio->state);
@@ -3133,6 +3108,7 @@
 					}
 				}
 				bio = r10_bio->devs[0].bio;
+				bio_reset(bio);
 				bio->bi_next = biolist;
 				biolist = bio;
 				bio->bi_private = r10_bio;
@@ -3157,6 +3133,7 @@
 				rdev = mirror->rdev;
 				if (!test_bit(In_sync, &rdev->flags)) {
 					bio = r10_bio->devs[1].bio;
+					bio_reset(bio);
 					bio->bi_next = biolist;
 					biolist = bio;
 					bio->bi_private = r10_bio;
@@ -3185,6 +3162,7 @@
 				if (rdev == NULL || bio == NULL ||
 				    test_bit(Faulty, &rdev->flags))
 					break;
+				bio_reset(bio);
 				bio->bi_next = biolist;
 				biolist = bio;
 				bio->bi_private = r10_bio;
@@ -3283,7 +3261,7 @@
 				r10_bio->devs[i].repl_bio->bi_end_io = NULL;
 
 			bio = r10_bio->devs[i].bio;
-			bio->bi_end_io = NULL;
+			bio_reset(bio);
 			clear_bit(BIO_UPTODATE, &bio->bi_flags);
 			if (conf->mirrors[d].rdev == NULL ||
 			    test_bit(Faulty, &conf->mirrors[d].rdev->flags))
@@ -3320,6 +3298,7 @@
 
 			/* Need to set up for writing to the replacement */
 			bio = r10_bio->devs[i].repl_bio;
+			bio_reset(bio);
 			clear_bit(BIO_UPTODATE, &bio->bi_flags);
 
 			sector = r10_bio->devs[i].addr;
@@ -3353,17 +3332,6 @@
 		}
 	}
 
-	for (bio = biolist; bio ; bio=bio->bi_next) {
-
-		bio->bi_flags &= ~(BIO_POOL_MASK - 1);
-		if (bio->bi_end_io)
-			bio->bi_flags |= 1 << BIO_UPTODATE;
-		bio->bi_vcnt = 0;
-		bio->bi_idx = 0;
-		bio->bi_phys_segments = 0;
-		bio->bi_size = 0;
-	}
-
 	nr_sectors = 0;
 	if (sector_nr + max_sync < max_sector)
 		max_sector = sector_nr + max_sync;
@@ -4411,7 +4379,6 @@
 	read_bio->bi_flags &= ~(BIO_POOL_MASK - 1);
 	read_bio->bi_flags |= 1 << BIO_UPTODATE;
 	read_bio->bi_vcnt = 0;
-	read_bio->bi_idx = 0;
 	read_bio->bi_size = 0;
 	r10_bio->master_bio = read_bio;
 	r10_bio->read_slot = r10_bio->devs[r10_bio->read_slot].devnum;
@@ -4435,17 +4402,14 @@
 		}
 		if (!rdev2 || test_bit(Faulty, &rdev2->flags))
 			continue;
+
+		bio_reset(b);
 		b->bi_bdev = rdev2->bdev;
 		b->bi_sector = r10_bio->devs[s/2].addr + rdev2->new_data_offset;
 		b->bi_private = r10_bio;
 		b->bi_end_io = end_reshape_write;
 		b->bi_rw = WRITE;
-		b->bi_flags &= ~(BIO_POOL_MASK - 1);
-		b->bi_flags |= 1 << BIO_UPTODATE;
 		b->bi_next = blist;
-		b->bi_vcnt = 0;
-		b->bi_idx = 0;
-		b->bi_size = 0;
 		blist = b;
 	}
 
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 4a7be45..9359828 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -90,7 +90,7 @@
  */
 static inline struct bio *r5_next_bio(struct bio *bio, sector_t sector)
 {
-	int sectors = bio->bi_size >> 9;
+	int sectors = bio_sectors(bio);
 	if (bio->bi_sector + sectors < sector + STRIPE_SECTORS)
 		return bio->bi_next;
 	else
@@ -569,14 +569,6 @@
 		bi = &sh->dev[i].req;
 		rbi = &sh->dev[i].rreq; /* For writing to replacement */
 
-		bi->bi_rw = rw;
-		rbi->bi_rw = rw;
-		if (rw & WRITE) {
-			bi->bi_end_io = raid5_end_write_request;
-			rbi->bi_end_io = raid5_end_write_request;
-		} else
-			bi->bi_end_io = raid5_end_read_request;
-
 		rcu_read_lock();
 		rrdev = rcu_dereference(conf->disks[i].replacement);
 		smp_mb(); /* Ensure that if rrdev is NULL, rdev won't be */
@@ -651,7 +643,14 @@
 
 			set_bit(STRIPE_IO_STARTED, &sh->state);
 
+			bio_reset(bi);
 			bi->bi_bdev = rdev->bdev;
+			bi->bi_rw = rw;
+			bi->bi_end_io = (rw & WRITE)
+				? raid5_end_write_request
+				: raid5_end_read_request;
+			bi->bi_private = sh;
+
 			pr_debug("%s: for %llu schedule op %ld on disc %d\n",
 				__func__, (unsigned long long)sh->sector,
 				bi->bi_rw, i);
@@ -665,12 +664,9 @@
 			if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags))
 				bi->bi_rw |= REQ_FLUSH;
 
-			bi->bi_flags = 1 << BIO_UPTODATE;
-			bi->bi_idx = 0;
 			bi->bi_io_vec[0].bv_len = STRIPE_SIZE;
 			bi->bi_io_vec[0].bv_offset = 0;
 			bi->bi_size = STRIPE_SIZE;
-			bi->bi_next = NULL;
 			if (rrdev)
 				set_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags);
 
@@ -687,7 +683,13 @@
 
 			set_bit(STRIPE_IO_STARTED, &sh->state);
 
+			bio_reset(rbi);
 			rbi->bi_bdev = rrdev->bdev;
+			rbi->bi_rw = rw;
+			BUG_ON(!(rw & WRITE));
+			rbi->bi_end_io = raid5_end_write_request;
+			rbi->bi_private = sh;
+
 			pr_debug("%s: for %llu schedule op %ld on "
 				 "replacement disc %d\n",
 				__func__, (unsigned long long)sh->sector,
@@ -699,12 +701,9 @@
 			else
 				rbi->bi_sector = (sh->sector
 						  + rrdev->data_offset);
-			rbi->bi_flags = 1 << BIO_UPTODATE;
-			rbi->bi_idx = 0;
 			rbi->bi_io_vec[0].bv_len = STRIPE_SIZE;
 			rbi->bi_io_vec[0].bv_offset = 0;
 			rbi->bi_size = STRIPE_SIZE;
-			rbi->bi_next = NULL;
 			if (conf->mddev->gendisk)
 				trace_block_bio_remap(bdev_get_queue(rbi->bi_bdev),
 						      rbi, disk_devt(conf->mddev->gendisk),
@@ -2402,11 +2401,11 @@
 	} else
 		bip = &sh->dev[dd_idx].toread;
 	while (*bip && (*bip)->bi_sector < bi->bi_sector) {
-		if ((*bip)->bi_sector + ((*bip)->bi_size >> 9) > bi->bi_sector)
+		if (bio_end_sector(*bip) > bi->bi_sector)
 			goto overlap;
 		bip = & (*bip)->bi_next;
 	}
-	if (*bip && (*bip)->bi_sector < bi->bi_sector + ((bi->bi_size)>>9))
+	if (*bip && (*bip)->bi_sector < bio_end_sector(bi))
 		goto overlap;
 
 	BUG_ON(*bip && bi->bi_next && (*bip) != bi->bi_next);
@@ -2422,8 +2421,8 @@
 		     sector < sh->dev[dd_idx].sector + STRIPE_SECTORS &&
 			     bi && bi->bi_sector <= sector;
 		     bi = r5_next_bio(bi, sh->dev[dd_idx].sector)) {
-			if (bi->bi_sector + (bi->bi_size>>9) >= sector)
-				sector = bi->bi_sector + (bi->bi_size>>9);
+			if (bio_end_sector(bi) >= sector)
+				sector = bio_end_sector(bi);
 		}
 		if (sector >= sh->dev[dd_idx].sector + STRIPE_SECTORS)
 			set_bit(R5_OVERWRITE, &sh->dev[dd_idx].flags);
@@ -3849,7 +3848,7 @@
 {
 	sector_t sector = bio->bi_sector + get_start_sect(bio->bi_bdev);
 	unsigned int chunk_sectors = mddev->chunk_sectors;
-	unsigned int bio_sectors = bio->bi_size >> 9;
+	unsigned int bio_sectors = bio_sectors(bio);
 
 	if (mddev->new_chunk_sectors < mddev->chunk_sectors)
 		chunk_sectors = mddev->new_chunk_sectors;
@@ -3941,7 +3940,7 @@
 {
 	struct request_queue *q = bdev_get_queue(bi->bi_bdev);
 
-	if ((bi->bi_size>>9) > queue_max_sectors(q))
+	if (bio_sectors(bi) > queue_max_sectors(q))
 		return 0;
 	blk_recount_segments(q, bi);
 	if (bi->bi_phys_segments > queue_max_segments(q))
@@ -3988,7 +3987,7 @@
 						    0,
 						    &dd_idx, NULL);
 
-	end_sector = align_bi->bi_sector + (align_bi->bi_size >> 9);
+	end_sector = bio_end_sector(align_bi);
 	rcu_read_lock();
 	rdev = rcu_dereference(conf->disks[dd_idx].replacement);
 	if (!rdev || test_bit(Faulty, &rdev->flags) ||
@@ -4011,7 +4010,7 @@
 		align_bi->bi_flags &= ~(1 << BIO_SEG_VALID);
 
 		if (!bio_fits_rdev(align_bi) ||
-		    is_badblock(rdev, align_bi->bi_sector, align_bi->bi_size>>9,
+		    is_badblock(rdev, align_bi->bi_sector, bio_sectors(align_bi),
 				&first_bad, &bad_sectors)) {
 			/* too big in some way, or has a known bad block */
 			bio_put(align_bi);
@@ -4273,7 +4272,7 @@
 	}
 
 	logical_sector = bi->bi_sector & ~((sector_t)STRIPE_SECTORS-1);
-	last_sector = bi->bi_sector + (bi->bi_size>>9);
+	last_sector = bio_end_sector(bi);
 	bi->bi_next = NULL;
 	bi->bi_phys_segments = 1;	/* over-loaded to count active stripes */
 
@@ -4739,7 +4738,7 @@
 	logical_sector = raid_bio->bi_sector & ~((sector_t)STRIPE_SECTORS-1);
 	sector = raid5_compute_sector(conf, logical_sector,
 				      0, &dd_idx, NULL);
-	last_sector = raid_bio->bi_sector + (raid_bio->bi_size>>9);
+	last_sector = bio_end_sector(raid_bio);
 
 	for (; logical_sector < last_sector;
 	     logical_sector += STRIPE_SECTORS,
diff --git a/drivers/memstick/core/mspro_block.c b/drivers/memstick/core/mspro_block.c
index f12b78d..f4176ca 100644
--- a/drivers/memstick/core/mspro_block.c
+++ b/drivers/memstick/core/mspro_block.c
@@ -204,7 +204,7 @@
 }
 
 
-static int mspro_block_disk_release(struct gendisk *disk)
+static void mspro_block_disk_release(struct gendisk *disk)
 {
 	struct mspro_block_data *msb = disk->private_data;
 	int disk_id = MINOR(disk_devt(disk)) >> MSPRO_BLOCK_PART_SHIFT;
@@ -224,13 +224,11 @@
 	}
 
 	mutex_unlock(&mspro_block_disk_lock);
-
-	return 0;
 }
 
-static int mspro_block_bd_release(struct gendisk *disk, fmode_t mode)
+static void mspro_block_bd_release(struct gendisk *disk, fmode_t mode)
 {
-	return mspro_block_disk_release(disk);
+	mspro_block_disk_release(disk);
 }
 
 static int mspro_block_bd_getgeo(struct block_device *bdev,
diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
index ffee6f7..dd239bd 100644
--- a/drivers/message/fusion/mptsas.c
+++ b/drivers/message/fusion/mptsas.c
@@ -2235,10 +2235,10 @@
 	}
 
 	/* do we need to support multiple segments? */
-	if (req->bio->bi_vcnt > 1 || rsp->bio->bi_vcnt > 1) {
+	if (bio_segments(req->bio) > 1 || bio_segments(rsp->bio) > 1) {
 		printk(MYIOC_s_ERR_FMT "%s: multiple segments req %u %u, rsp %u %u\n",
-		    ioc->name, __func__, req->bio->bi_vcnt, blk_rq_bytes(req),
-		    rsp->bio->bi_vcnt, blk_rq_bytes(rsp));
+		    ioc->name, __func__, bio_segments(req->bio), blk_rq_bytes(req),
+		    bio_segments(rsp->bio), blk_rq_bytes(rsp));
 		return -EINVAL;
 	}
 
diff --git a/drivers/message/i2o/i2o_block.c b/drivers/message/i2o/i2o_block.c
index 49e86ae..6fc3866 100644
--- a/drivers/message/i2o/i2o_block.c
+++ b/drivers/message/i2o/i2o_block.c
@@ -600,10 +600,8 @@
  *
  *	Unlock and unmount the media, and power down the device. Gets called if
  *	the block device is closed.
- *
- *	Returns 0 on success or negative error code on failure.
  */
-static int i2o_block_release(struct gendisk *disk, fmode_t mode)
+static void i2o_block_release(struct gendisk *disk, fmode_t mode)
 {
 	struct i2o_block_device *dev = disk->private_data;
 	u8 operation;
@@ -617,7 +615,7 @@
 	 * the TID no longer exists.
 	 */
 	if (!dev->i2o_dev)
-		return 0;
+		return;
 
 	mutex_lock(&i2o_block_mutex);
 	i2o_block_device_flush(dev->i2o_dev);
@@ -631,8 +629,6 @@
 
 	i2o_block_device_power(dev, operation);
 	mutex_unlock(&i2o_block_mutex);
-
-	return 0;
 }
 
 static int i2o_block_getgeo(struct block_device *bdev, struct hd_geometry *geo)
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index e12a03c..dd27b07 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -304,14 +304,13 @@
 	return ret;
 }
 
-static int mmc_blk_release(struct gendisk *disk, fmode_t mode)
+static void mmc_blk_release(struct gendisk *disk, fmode_t mode)
 {
 	struct mmc_blk_data *md = disk->private_data;
 
 	mutex_lock(&block_mutex);
 	mmc_blk_put(md);
 	mutex_unlock(&block_mutex);
-	return 0;
 }
 
 static int
diff --git a/drivers/mtd/Kconfig b/drivers/mtd/Kconfig
index 557bec5..5fab4e6e 100644
--- a/drivers/mtd/Kconfig
+++ b/drivers/mtd/Kconfig
@@ -157,19 +157,6 @@
 
 comment "User Modules And Translation Layers"
 
-config MTD_CHAR
-	tristate "Direct char device access to MTD devices"
-	help
-	  This provides a character device for each MTD device present in
-	  the system, allowing the user to read and write directly to the
-	  memory chips, and also use ioctl() to obtain information about
-	  the device, or to erase parts of it.
-
-config HAVE_MTD_OTP
-	bool
-	help
-	  Enable access to OTP regions using MTD_CHAR.
-
 config MTD_BLKDEVS
 	tristate "Common interface to block layer for MTD 'translation layers'"
 	depends on BLOCK
diff --git a/drivers/mtd/Makefile b/drivers/mtd/Makefile
index 18a38e5..4cfb31e 100644
--- a/drivers/mtd/Makefile
+++ b/drivers/mtd/Makefile
@@ -4,7 +4,7 @@
 
 # Core functionality.
 obj-$(CONFIG_MTD)		+= mtd.o
-mtd-y				:= mtdcore.o mtdsuper.o mtdconcat.o mtdpart.o
+mtd-y				:= mtdcore.o mtdsuper.o mtdconcat.o mtdpart.o mtdchar.o
 
 obj-$(CONFIG_MTD_OF_PARTS)	+= ofpart.o
 obj-$(CONFIG_MTD_REDBOOT_PARTS) += redboot.o
@@ -15,7 +15,6 @@
 obj-$(CONFIG_MTD_BCM47XX_PARTS)	+= bcm47xxpart.o
 
 # 'Users' - code which presents functionality to userspace.
-obj-$(CONFIG_MTD_CHAR)		+= mtdchar.o
 obj-$(CONFIG_MTD_BLKDEVS)	+= mtd_blkdevs.o
 obj-$(CONFIG_MTD_BLOCK)		+= mtdblock.o
 obj-$(CONFIG_MTD_BLOCK_RO)	+= mtdblock_ro.o
diff --git a/drivers/mtd/chips/Kconfig b/drivers/mtd/chips/Kconfig
index c219e3d..e4696b3 100644
--- a/drivers/mtd/chips/Kconfig
+++ b/drivers/mtd/chips/Kconfig
@@ -146,7 +146,6 @@
 config MTD_OTP
 	bool "Protection Registers aka one-time programmable (OTP) bits"
 	depends on MTD_CFI_ADV_OPTIONS
-	select HAVE_MTD_OTP
 	default n
 	help
 	  This enables support for reading, writing and locking so called
diff --git a/drivers/mtd/devices/Kconfig b/drivers/mtd/devices/Kconfig
index 12311f5..2a4d55e 100644
--- a/drivers/mtd/devices/Kconfig
+++ b/drivers/mtd/devices/Kconfig
@@ -71,7 +71,6 @@
 config MTD_DATAFLASH_OTP
 	bool "DataFlash OTP support (Security Register)"
 	depends on MTD_DATAFLASH
-	select HAVE_MTD_OTP
 	help
 	  Newer DataFlash chips (revisions C and D) support 128 bytes of
 	  one-time-programmable (OTP) data.  The first half may be written
@@ -205,69 +204,6 @@
 
 comment "Disk-On-Chip Device Drivers"
 
-config MTD_DOC2000
-	tristate "M-Systems Disk-On-Chip 2000 and Millennium (DEPRECATED)"
-	depends on MTD_NAND
-	select MTD_DOCPROBE
-	select MTD_NAND_IDS
-	---help---
-	  This provides an MTD device driver for the M-Systems DiskOnChip
-	  2000 and Millennium devices.  Originally designed for the DiskOnChip
-	  2000, it also now includes support for the DiskOnChip Millennium.
-	  If you have problems with this driver and the DiskOnChip Millennium,
-	  you may wish to try the alternative Millennium driver below. To use
-	  the alternative driver, you will need to undefine DOC_SINGLE_DRIVER
-	  in the <file:drivers/mtd/devices/docprobe.c> source code.
-
-	  If you use this device, you probably also want to enable the NFTL
-	  'NAND Flash Translation Layer' option below, which is used to
-	  emulate a block device by using a kind of file system on the flash
-	  chips.
-
-	  NOTE: This driver is deprecated and will probably be removed soon.
-	  Please try the new DiskOnChip driver under "NAND Flash Device
-	  Drivers".
-
-config MTD_DOC2001
-	tristate "M-Systems Disk-On-Chip Millennium-only alternative driver (DEPRECATED)"
-	depends on MTD_NAND
-	select MTD_DOCPROBE
-	select MTD_NAND_IDS
-	---help---
-	  This provides an alternative MTD device driver for the M-Systems
-	  DiskOnChip Millennium devices.  Use this if you have problems with
-	  the combined DiskOnChip 2000 and Millennium driver above.  To get
-	  the DiskOnChip probe code to load and use this driver instead of
-	  the other one, you will need to undefine DOC_SINGLE_DRIVER near
-	  the beginning of <file:drivers/mtd/devices/docprobe.c>.
-
-	  If you use this device, you probably also want to enable the NFTL
-	  'NAND Flash Translation Layer' option below, which is used to
-	  emulate a block device by using a kind of file system on the flash
-	  chips.
-
-	  NOTE: This driver is deprecated and will probably be removed soon.
-	  Please try the new DiskOnChip driver under "NAND Flash Device
-	  Drivers".
-
-config MTD_DOC2001PLUS
-	tristate "M-Systems Disk-On-Chip Millennium Plus"
-	depends on MTD_NAND
-	select MTD_DOCPROBE
-	select MTD_NAND_IDS
-	---help---
-	  This provides an MTD device driver for the M-Systems DiskOnChip
-	  Millennium Plus devices.
-
-	  If you use this device, you probably also want to enable the INFTL
-	  'Inverse NAND Flash Translation Layer' option below, which is used
-	  to emulate a block device by using a kind of file system on the
-	  flash chips.
-
-	  NOTE: This driver will soon be replaced by the new DiskOnChip driver
-	  under "NAND Flash Device Drivers" (currently that driver does not
-	  support all Millennium Plus devices).
-
 config MTD_DOCG3
 	tristate "M-Systems Disk-On-Chip G3"
 	select BCH
diff --git a/drivers/mtd/devices/Makefile b/drivers/mtd/devices/Makefile
index 369a194..d83bd73 100644
--- a/drivers/mtd/devices/Makefile
+++ b/drivers/mtd/devices/Makefile
@@ -2,12 +2,7 @@
 # linux/drivers/mtd/devices/Makefile
 #
 
-obj-$(CONFIG_MTD_DOC2000)	+= doc2000.o
-obj-$(CONFIG_MTD_DOC2001)	+= doc2001.o
-obj-$(CONFIG_MTD_DOC2001PLUS)	+= doc2001plus.o
 obj-$(CONFIG_MTD_DOCG3)		+= docg3.o
-obj-$(CONFIG_MTD_DOCPROBE)	+= docprobe.o
-obj-$(CONFIG_MTD_DOCECC)	+= docecc.o
 obj-$(CONFIG_MTD_SLRAM)		+= slram.o
 obj-$(CONFIG_MTD_PHRAM)		+= phram.o
 obj-$(CONFIG_MTD_PMC551)	+= pmc551.o
diff --git a/drivers/mtd/devices/bcm47xxsflash.c b/drivers/mtd/devices/bcm47xxsflash.c
index 9526628..18e7761 100644
--- a/drivers/mtd/devices/bcm47xxsflash.c
+++ b/drivers/mtd/devices/bcm47xxsflash.c
@@ -10,7 +10,7 @@
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("Serial flash driver for BCMA bus");
 
-static const char *probes[] = { "bcm47xxpart", NULL };
+static const char * const probes[] = { "bcm47xxpart", NULL };
 
 static int bcm47xxsflash_read(struct mtd_info *mtd, loff_t from, size_t len,
 			      size_t *retlen, u_char *buf)
@@ -61,6 +61,17 @@
 	}
 	sflash->priv = b47s;
 
+	b47s->bcma_cc = container_of(sflash, struct bcma_drv_cc, sflash);
+
+	switch (b47s->bcma_cc->capabilities & BCMA_CC_CAP_FLASHT) {
+	case BCMA_CC_FLASHT_STSER:
+		b47s->type = BCM47XXSFLASH_TYPE_ST;
+		break;
+	case BCMA_CC_FLASHT_ATSER:
+		b47s->type = BCM47XXSFLASH_TYPE_ATMEL;
+		break;
+	}
+
 	b47s->window = sflash->window;
 	b47s->blocksize = sflash->blocksize;
 	b47s->numblocks = sflash->numblocks;
diff --git a/drivers/mtd/devices/bcm47xxsflash.h b/drivers/mtd/devices/bcm47xxsflash.h
index ebf6f71..f22f8c4 100644
--- a/drivers/mtd/devices/bcm47xxsflash.h
+++ b/drivers/mtd/devices/bcm47xxsflash.h
@@ -3,7 +3,66 @@
 
 #include <linux/mtd/mtd.h>
 
+/* Used for ST flashes only. */
+#define OPCODE_ST_WREN		0x0006		/* Write Enable */
+#define OPCODE_ST_WRDIS		0x0004		/* Write Disable */
+#define OPCODE_ST_RDSR		0x0105		/* Read Status Register */
+#define OPCODE_ST_WRSR		0x0101		/* Write Status Register */
+#define OPCODE_ST_READ		0x0303		/* Read Data Bytes */
+#define OPCODE_ST_PP		0x0302		/* Page Program */
+#define OPCODE_ST_SE		0x02d8		/* Sector Erase */
+#define OPCODE_ST_BE		0x00c7		/* Bulk Erase */
+#define OPCODE_ST_DP		0x00b9		/* Deep Power-down */
+#define OPCODE_ST_RES		0x03ab		/* Read Electronic Signature */
+#define OPCODE_ST_CSA		0x1000		/* Keep chip select asserted */
+#define OPCODE_ST_SSE		0x0220		/* Sub-sector Erase */
+
+/* Used for Atmel flashes only. */
+#define OPCODE_AT_READ				0x07e8
+#define OPCODE_AT_PAGE_READ			0x07d2
+#define OPCODE_AT_STATUS			0x01d7
+#define OPCODE_AT_BUF1_WRITE			0x0384
+#define OPCODE_AT_BUF2_WRITE			0x0387
+#define OPCODE_AT_BUF1_ERASE_PROGRAM		0x0283
+#define OPCODE_AT_BUF2_ERASE_PROGRAM		0x0286
+#define OPCODE_AT_BUF1_PROGRAM			0x0288
+#define OPCODE_AT_BUF2_PROGRAM			0x0289
+#define OPCODE_AT_PAGE_ERASE			0x0281
+#define OPCODE_AT_BLOCK_ERASE			0x0250
+#define OPCODE_AT_BUF1_WRITE_ERASE_PROGRAM	0x0382
+#define OPCODE_AT_BUF2_WRITE_ERASE_PROGRAM	0x0385
+#define OPCODE_AT_BUF1_LOAD			0x0253
+#define OPCODE_AT_BUF2_LOAD			0x0255
+#define OPCODE_AT_BUF1_COMPARE			0x0260
+#define OPCODE_AT_BUF2_COMPARE			0x0261
+#define OPCODE_AT_BUF1_REPROGRAM		0x0258
+#define OPCODE_AT_BUF2_REPROGRAM		0x0259
+
+/* Status register bits for ST flashes */
+#define SR_ST_WIP		0x01		/* Write In Progress */
+#define SR_ST_WEL		0x02		/* Write Enable Latch */
+#define SR_ST_BP_MASK		0x1c		/* Block Protect */
+#define SR_ST_BP_SHIFT		2
+#define SR_ST_SRWD		0x80		/* Status Register Write Disable */
+
+/* Status register bits for Atmel flashes */
+#define SR_AT_READY		0x80
+#define SR_AT_MISMATCH		0x40
+#define SR_AT_ID_MASK		0x38
+#define SR_AT_ID_SHIFT		3
+
+struct bcma_drv_cc;
+
+enum bcm47xxsflash_type {
+	BCM47XXSFLASH_TYPE_ATMEL,
+	BCM47XXSFLASH_TYPE_ST,
+};
+
 struct bcm47xxsflash {
+	struct bcma_drv_cc *bcma_cc;
+
+	enum bcm47xxsflash_type type;
+
 	u32 window;
 	u32 blocksize;
 	u16 numblocks;
diff --git a/drivers/mtd/devices/doc2000.c b/drivers/mtd/devices/doc2000.c
deleted file mode 100644
index a4eb8b5..0000000
--- a/drivers/mtd/devices/doc2000.c
+++ /dev/null
@@ -1,1178 +0,0 @@
-
-/*
- * Linux driver for Disk-On-Chip 2000 and Millennium
- * (c) 1999 Machine Vision Holdings, Inc.
- * (c) 1999, 2000 David Woodhouse <dwmw2@infradead.org>
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <asm/errno.h>
-#include <asm/io.h>
-#include <asm/uaccess.h>
-#include <linux/delay.h>
-#include <linux/slab.h>
-#include <linux/sched.h>
-#include <linux/init.h>
-#include <linux/types.h>
-#include <linux/bitops.h>
-#include <linux/mutex.h>
-
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/nand.h>
-#include <linux/mtd/doc2000.h>
-
-#define DOC_SUPPORT_2000
-#define DOC_SUPPORT_2000TSOP
-#define DOC_SUPPORT_MILLENNIUM
-
-#ifdef DOC_SUPPORT_2000
-#define DoC_is_2000(doc) (doc->ChipID == DOC_ChipID_Doc2k)
-#else
-#define DoC_is_2000(doc) (0)
-#endif
-
-#if defined(DOC_SUPPORT_2000TSOP) || defined(DOC_SUPPORT_MILLENNIUM)
-#define DoC_is_Millennium(doc) (doc->ChipID == DOC_ChipID_DocMil)
-#else
-#define DoC_is_Millennium(doc) (0)
-#endif
-
-/* #define ECC_DEBUG */
-
-/* I have no idea why some DoC chips can not use memcpy_from|to_io().
- * This may be due to the different revisions of the ASIC controller built-in or
- * simplily a QA/Bug issue. Who knows ?? If you have trouble, please uncomment
- * this:
- #undef USE_MEMCPY
-*/
-
-static int doc_read(struct mtd_info *mtd, loff_t from, size_t len,
-		    size_t *retlen, u_char *buf);
-static int doc_write(struct mtd_info *mtd, loff_t to, size_t len,
-		     size_t *retlen, const u_char *buf);
-static int doc_read_oob(struct mtd_info *mtd, loff_t ofs,
-			struct mtd_oob_ops *ops);
-static int doc_write_oob(struct mtd_info *mtd, loff_t ofs,
-			 struct mtd_oob_ops *ops);
-static int doc_write_oob_nolock(struct mtd_info *mtd, loff_t ofs, size_t len,
-			 size_t *retlen, const u_char *buf);
-static int doc_erase (struct mtd_info *mtd, struct erase_info *instr);
-
-static struct mtd_info *doc2klist = NULL;
-
-/* Perform the required delay cycles by reading from the appropriate register */
-static void DoC_Delay(struct DiskOnChip *doc, unsigned short cycles)
-{
-	volatile char dummy;
-	int i;
-
-	for (i = 0; i < cycles; i++) {
-		if (DoC_is_Millennium(doc))
-			dummy = ReadDOC(doc->virtadr, NOP);
-		else
-			dummy = ReadDOC(doc->virtadr, DOCStatus);
-	}
-
-}
-
-/* DOC_WaitReady: Wait for RDY line to be asserted by the flash chip */
-static int _DoC_WaitReady(struct DiskOnChip *doc)
-{
-	void __iomem *docptr = doc->virtadr;
-	unsigned long timeo = jiffies + (HZ * 10);
-
-	pr_debug("_DoC_WaitReady called for out-of-line wait\n");
-
-	/* Out-of-line routine to wait for chip response */
-	while (!(ReadDOC(docptr, CDSNControl) & CDSN_CTRL_FR_B)) {
-		/* issue 2 read from NOP register after reading from CDSNControl register
-	   	see Software Requirement 11.4 item 2. */
-		DoC_Delay(doc, 2);
-
-		if (time_after(jiffies, timeo)) {
-			pr_debug("_DoC_WaitReady timed out.\n");
-			return -EIO;
-		}
-		udelay(1);
-		cond_resched();
-	}
-
-	return 0;
-}
-
-static inline int DoC_WaitReady(struct DiskOnChip *doc)
-{
-	void __iomem *docptr = doc->virtadr;
-
-	/* This is inline, to optimise the common case, where it's ready instantly */
-	int ret = 0;
-
-	/* 4 read form NOP register should be issued in prior to the read from CDSNControl
-	   see Software Requirement 11.4 item 2. */
-	DoC_Delay(doc, 4);
-
-	if (!(ReadDOC(docptr, CDSNControl) & CDSN_CTRL_FR_B))
-		/* Call the out-of-line routine to wait */
-		ret = _DoC_WaitReady(doc);
-
-	/* issue 2 read from NOP register after reading from CDSNControl register
-	   see Software Requirement 11.4 item 2. */
-	DoC_Delay(doc, 2);
-
-	return ret;
-}
-
-/* DoC_Command: Send a flash command to the flash chip through the CDSN Slow IO register to
-   bypass the internal pipeline. Each of 4 delay cycles (read from the NOP register) is
-   required after writing to CDSN Control register, see Software Requirement 11.4 item 3. */
-
-static int DoC_Command(struct DiskOnChip *doc, unsigned char command,
-			      unsigned char xtraflags)
-{
-	void __iomem *docptr = doc->virtadr;
-
-	if (DoC_is_2000(doc))
-		xtraflags |= CDSN_CTRL_FLASH_IO;
-
-	/* Assert the CLE (Command Latch Enable) line to the flash chip */
-	WriteDOC(xtraflags | CDSN_CTRL_CLE | CDSN_CTRL_CE, docptr, CDSNControl);
-	DoC_Delay(doc, 4);	/* Software requirement 11.4.3 for Millennium */
-
-	if (DoC_is_Millennium(doc))
-		WriteDOC(command, docptr, CDSNSlowIO);
-
-	/* Send the command */
-	WriteDOC_(command, docptr, doc->ioreg);
-	if (DoC_is_Millennium(doc))
-		WriteDOC(command, docptr, WritePipeTerm);
-
-	/* Lower the CLE line */
-	WriteDOC(xtraflags | CDSN_CTRL_CE, docptr, CDSNControl);
-	DoC_Delay(doc, 4);	/* Software requirement 11.4.3 for Millennium */
-
-	/* Wait for the chip to respond - Software requirement 11.4.1 (extended for any command) */
-	return DoC_WaitReady(doc);
-}
-
-/* DoC_Address: Set the current address for the flash chip through the CDSN Slow IO register to
-   bypass the internal pipeline. Each of 4 delay cycles (read from the NOP register) is
-   required after writing to CDSN Control register, see Software Requirement 11.4 item 3. */
-
-static int DoC_Address(struct DiskOnChip *doc, int numbytes, unsigned long ofs,
-		       unsigned char xtraflags1, unsigned char xtraflags2)
-{
-	int i;
-	void __iomem *docptr = doc->virtadr;
-
-	if (DoC_is_2000(doc))
-		xtraflags1 |= CDSN_CTRL_FLASH_IO;
-
-	/* Assert the ALE (Address Latch Enable) line to the flash chip */
-	WriteDOC(xtraflags1 | CDSN_CTRL_ALE | CDSN_CTRL_CE, docptr, CDSNControl);
-
-	DoC_Delay(doc, 4);	/* Software requirement 11.4.3 for Millennium */
-
-	/* Send the address */
-	/* Devices with 256-byte page are addressed as:
-	   Column (bits 0-7), Page (bits 8-15, 16-23, 24-31)
-	   * there is no device on the market with page256
-	   and more than 24 bits.
-	   Devices with 512-byte page are addressed as:
-	   Column (bits 0-7), Page (bits 9-16, 17-24, 25-31)
-	   * 25-31 is sent only if the chip support it.
-	   * bit 8 changes the read command to be sent
-	   (NAND_CMD_READ0 or NAND_CMD_READ1).
-	 */
-
-	if (numbytes == ADDR_COLUMN || numbytes == ADDR_COLUMN_PAGE) {
-		if (DoC_is_Millennium(doc))
-			WriteDOC(ofs & 0xff, docptr, CDSNSlowIO);
-		WriteDOC_(ofs & 0xff, docptr, doc->ioreg);
-	}
-
-	if (doc->page256) {
-		ofs = ofs >> 8;
-	} else {
-		ofs = ofs >> 9;
-	}
-
-	if (numbytes == ADDR_PAGE || numbytes == ADDR_COLUMN_PAGE) {
-		for (i = 0; i < doc->pageadrlen; i++, ofs = ofs >> 8) {
-			if (DoC_is_Millennium(doc))
-				WriteDOC(ofs & 0xff, docptr, CDSNSlowIO);
-			WriteDOC_(ofs & 0xff, docptr, doc->ioreg);
-		}
-	}
-
-	if (DoC_is_Millennium(doc))
-		WriteDOC(ofs & 0xff, docptr, WritePipeTerm);
-
-	DoC_Delay(doc, 2);	/* Needed for some slow flash chips. mf. */
-
-	/* FIXME: The SlowIO's for millennium could be replaced by
-	   a single WritePipeTerm here. mf. */
-
-	/* Lower the ALE line */
-	WriteDOC(xtraflags1 | xtraflags2 | CDSN_CTRL_CE, docptr,
-		 CDSNControl);
-
-	DoC_Delay(doc, 4);	/* Software requirement 11.4.3 for Millennium */
-
-	/* Wait for the chip to respond - Software requirement 11.4.1 */
-	return DoC_WaitReady(doc);
-}
-
-/* Read a buffer from DoC, taking care of Millennium odditys */
-static void DoC_ReadBuf(struct DiskOnChip *doc, u_char * buf, int len)
-{
-	volatile int dummy;
-	int modulus = 0xffff;
-	void __iomem *docptr = doc->virtadr;
-	int i;
-
-	if (len <= 0)
-		return;
-
-	if (DoC_is_Millennium(doc)) {
-		/* Read the data via the internal pipeline through CDSN IO register,
-		   see Pipelined Read Operations 11.3 */
-		dummy = ReadDOC(docptr, ReadPipeInit);
-
-		/* Millennium should use the LastDataRead register - Pipeline Reads */
-		len--;
-
-		/* This is needed for correctly ECC calculation */
-		modulus = 0xff;
-	}
-
-	for (i = 0; i < len; i++)
-		buf[i] = ReadDOC_(docptr, doc->ioreg + (i & modulus));
-
-	if (DoC_is_Millennium(doc)) {
-		buf[i] = ReadDOC(docptr, LastDataRead);
-	}
-}
-
-/* Write a buffer to DoC, taking care of Millennium odditys */
-static void DoC_WriteBuf(struct DiskOnChip *doc, const u_char * buf, int len)
-{
-	void __iomem *docptr = doc->virtadr;
-	int i;
-
-	if (len <= 0)
-		return;
-
-	for (i = 0; i < len; i++)
-		WriteDOC_(buf[i], docptr, doc->ioreg + i);
-
-	if (DoC_is_Millennium(doc)) {
-		WriteDOC(0x00, docptr, WritePipeTerm);
-	}
-}
-
-
-/* DoC_SelectChip: Select a given flash chip within the current floor */
-
-static inline int DoC_SelectChip(struct DiskOnChip *doc, int chip)
-{
-	void __iomem *docptr = doc->virtadr;
-
-	/* Software requirement 11.4.4 before writing DeviceSelect */
-	/* Deassert the CE line to eliminate glitches on the FCE# outputs */
-	WriteDOC(CDSN_CTRL_WP, docptr, CDSNControl);
-	DoC_Delay(doc, 4);	/* Software requirement 11.4.3 for Millennium */
-
-	/* Select the individual flash chip requested */
-	WriteDOC(chip, docptr, CDSNDeviceSelect);
-	DoC_Delay(doc, 4);
-
-	/* Reassert the CE line */
-	WriteDOC(CDSN_CTRL_CE | CDSN_CTRL_FLASH_IO | CDSN_CTRL_WP, docptr,
-		 CDSNControl);
-	DoC_Delay(doc, 4);	/* Software requirement 11.4.3 for Millennium */
-
-	/* Wait for it to be ready */
-	return DoC_WaitReady(doc);
-}
-
-/* DoC_SelectFloor: Select a given floor (bank of flash chips) */
-
-static inline int DoC_SelectFloor(struct DiskOnChip *doc, int floor)
-{
-	void __iomem *docptr = doc->virtadr;
-
-	/* Select the floor (bank) of chips required */
-	WriteDOC(floor, docptr, FloorSelect);
-
-	/* Wait for the chip to be ready */
-	return DoC_WaitReady(doc);
-}
-
-/* DoC_IdentChip: Identify a given NAND chip given {floor,chip} */
-
-static int DoC_IdentChip(struct DiskOnChip *doc, int floor, int chip)
-{
-	int mfr, id, i, j;
-	volatile char dummy;
-
-	/* Page in the required floor/chip */
-	DoC_SelectFloor(doc, floor);
-	DoC_SelectChip(doc, chip);
-
-	/* Reset the chip */
-	if (DoC_Command(doc, NAND_CMD_RESET, CDSN_CTRL_WP)) {
-		pr_debug("DoC_Command (reset) for %d,%d returned true\n",
-		      floor, chip);
-		return 0;
-	}
-
-
-	/* Read the NAND chip ID: 1. Send ReadID command */
-	if (DoC_Command(doc, NAND_CMD_READID, CDSN_CTRL_WP)) {
-		pr_debug("DoC_Command (ReadID) for %d,%d returned true\n",
-		      floor, chip);
-		return 0;
-	}
-
-	/* Read the NAND chip ID: 2. Send address byte zero */
-	DoC_Address(doc, ADDR_COLUMN, 0, CDSN_CTRL_WP, 0);
-
-	/* Read the manufacturer and device id codes from the device */
-
-	if (DoC_is_Millennium(doc)) {
-		DoC_Delay(doc, 2);
-		dummy = ReadDOC(doc->virtadr, ReadPipeInit);
-		mfr = ReadDOC(doc->virtadr, LastDataRead);
-
-		DoC_Delay(doc, 2);
-		dummy = ReadDOC(doc->virtadr, ReadPipeInit);
-		id = ReadDOC(doc->virtadr, LastDataRead);
-	} else {
-		/* CDSN Slow IO register see Software Req 11.4 item 5. */
-		dummy = ReadDOC(doc->virtadr, CDSNSlowIO);
-		DoC_Delay(doc, 2);
-		mfr = ReadDOC_(doc->virtadr, doc->ioreg);
-
-		/* CDSN Slow IO register see Software Req 11.4 item 5. */
-		dummy = ReadDOC(doc->virtadr, CDSNSlowIO);
-		DoC_Delay(doc, 2);
-		id = ReadDOC_(doc->virtadr, doc->ioreg);
-	}
-
-	/* No response - return failure */
-	if (mfr == 0xff || mfr == 0)
-		return 0;
-
-	/* Check it's the same as the first chip we identified.
-	 * M-Systems say that any given DiskOnChip device should only
-	 * contain _one_ type of flash part, although that's not a
-	 * hardware restriction. */
-	if (doc->mfr) {
-		if (doc->mfr == mfr && doc->id == id)
-			return 1;	/* This is the same as the first */
-		else
-			printk(KERN_WARNING
-			       "Flash chip at floor %d, chip %d is different:\n",
-			       floor, chip);
-	}
-
-	/* Print and store the manufacturer and ID codes. */
-	for (i = 0; nand_flash_ids[i].name != NULL; i++) {
-		if (id == nand_flash_ids[i].id) {
-			/* Try to identify manufacturer */
-			for (j = 0; nand_manuf_ids[j].id != 0x0; j++) {
-				if (nand_manuf_ids[j].id == mfr)
-					break;
-			}
-			printk(KERN_INFO
-			       "Flash chip found: Manufacturer ID: %2.2X, "
-			       "Chip ID: %2.2X (%s:%s)\n", mfr, id,
-			       nand_manuf_ids[j].name, nand_flash_ids[i].name);
-			if (!doc->mfr) {
-				doc->mfr = mfr;
-				doc->id = id;
-				doc->chipshift =
-					ffs((nand_flash_ids[i].chipsize << 20)) - 1;
-				doc->page256 = (nand_flash_ids[i].pagesize == 256) ? 1 : 0;
-				doc->pageadrlen = doc->chipshift > 25 ? 3 : 2;
-				doc->erasesize =
-				    nand_flash_ids[i].erasesize;
-				return 1;
-			}
-			return 0;
-		}
-	}
-
-
-	/* We haven't fully identified the chip. Print as much as we know. */
-	printk(KERN_WARNING "Unknown flash chip found: %2.2X %2.2X\n",
-	       id, mfr);
-
-	printk(KERN_WARNING "Please report to dwmw2@infradead.org\n");
-	return 0;
-}
-
-/* DoC_ScanChips: Find all NAND chips present in a DiskOnChip, and identify them */
-
-static void DoC_ScanChips(struct DiskOnChip *this, int maxchips)
-{
-	int floor, chip;
-	int numchips[MAX_FLOORS];
-	int ret = 1;
-
-	this->numchips = 0;
-	this->mfr = 0;
-	this->id = 0;
-
-	/* For each floor, find the number of valid chips it contains */
-	for (floor = 0; floor < MAX_FLOORS; floor++) {
-		ret = 1;
-		numchips[floor] = 0;
-		for (chip = 0; chip < maxchips && ret != 0; chip++) {
-
-			ret = DoC_IdentChip(this, floor, chip);
-			if (ret) {
-				numchips[floor]++;
-				this->numchips++;
-			}
-		}
-	}
-
-	/* If there are none at all that we recognise, bail */
-	if (!this->numchips) {
-		printk(KERN_NOTICE "No flash chips recognised.\n");
-		return;
-	}
-
-	/* Allocate an array to hold the information for each chip */
-	this->chips = kmalloc(sizeof(struct Nand) * this->numchips, GFP_KERNEL);
-	if (!this->chips) {
-		printk(KERN_NOTICE "No memory for allocating chip info structures\n");
-		return;
-	}
-
-	ret = 0;
-
-	/* Fill out the chip array with {floor, chipno} for each
-	 * detected chip in the device. */
-	for (floor = 0; floor < MAX_FLOORS; floor++) {
-		for (chip = 0; chip < numchips[floor]; chip++) {
-			this->chips[ret].floor = floor;
-			this->chips[ret].chip = chip;
-			this->chips[ret].curadr = 0;
-			this->chips[ret].curmode = 0x50;
-			ret++;
-		}
-	}
-
-	/* Calculate and print the total size of the device */
-	this->totlen = this->numchips * (1 << this->chipshift);
-
-	printk(KERN_INFO "%d flash chips found. Total DiskOnChip size: %ld MiB\n",
-	       this->numchips, this->totlen >> 20);
-}
-
-static int DoC2k_is_alias(struct DiskOnChip *doc1, struct DiskOnChip *doc2)
-{
-	int tmp1, tmp2, retval;
-	if (doc1->physadr == doc2->physadr)
-		return 1;
-
-	/* Use the alias resolution register which was set aside for this
-	 * purpose. If it's value is the same on both chips, they might
-	 * be the same chip, and we write to one and check for a change in
-	 * the other. It's unclear if this register is usuable in the
-	 * DoC 2000 (it's in the Millennium docs), but it seems to work. */
-	tmp1 = ReadDOC(doc1->virtadr, AliasResolution);
-	tmp2 = ReadDOC(doc2->virtadr, AliasResolution);
-	if (tmp1 != tmp2)
-		return 0;
-
-	WriteDOC((tmp1 + 1) % 0xff, doc1->virtadr, AliasResolution);
-	tmp2 = ReadDOC(doc2->virtadr, AliasResolution);
-	if (tmp2 == (tmp1 + 1) % 0xff)
-		retval = 1;
-	else
-		retval = 0;
-
-	/* Restore register contents.  May not be necessary, but do it just to
-	 * be safe. */
-	WriteDOC(tmp1, doc1->virtadr, AliasResolution);
-
-	return retval;
-}
-
-/* This routine is found from the docprobe code by symbol_get(),
- * which will bump the use count of this module. */
-void DoC2k_init(struct mtd_info *mtd)
-{
-	struct DiskOnChip *this = mtd->priv;
-	struct DiskOnChip *old = NULL;
-	int maxchips;
-
-	/* We must avoid being called twice for the same device. */
-
-	if (doc2klist)
-		old = doc2klist->priv;
-
-	while (old) {
-		if (DoC2k_is_alias(old, this)) {
-			printk(KERN_NOTICE
-			       "Ignoring DiskOnChip 2000 at 0x%lX - already configured\n",
-			       this->physadr);
-			iounmap(this->virtadr);
-			kfree(mtd);
-			return;
-		}
-		if (old->nextdoc)
-			old = old->nextdoc->priv;
-		else
-			old = NULL;
-	}
-
-
-	switch (this->ChipID) {
-	case DOC_ChipID_Doc2kTSOP:
-		mtd->name = "DiskOnChip 2000 TSOP";
-		this->ioreg = DoC_Mil_CDSN_IO;
-		/* Pretend it's a Millennium */
-		this->ChipID = DOC_ChipID_DocMil;
-		maxchips = MAX_CHIPS;
-		break;
-	case DOC_ChipID_Doc2k:
-		mtd->name = "DiskOnChip 2000";
-		this->ioreg = DoC_2k_CDSN_IO;
-		maxchips = MAX_CHIPS;
-		break;
-	case DOC_ChipID_DocMil:
-		mtd->name = "DiskOnChip Millennium";
-		this->ioreg = DoC_Mil_CDSN_IO;
-		maxchips = MAX_CHIPS_MIL;
-		break;
-	default:
-		printk("Unknown ChipID 0x%02x\n", this->ChipID);
-		kfree(mtd);
-		iounmap(this->virtadr);
-		return;
-	}
-
-	printk(KERN_NOTICE "%s found at address 0x%lX\n", mtd->name,
-	       this->physadr);
-
-	mtd->type = MTD_NANDFLASH;
-	mtd->flags = MTD_CAP_NANDFLASH;
-	mtd->writebufsize = mtd->writesize = 512;
-	mtd->oobsize = 16;
-	mtd->ecc_strength = 2;
-	mtd->owner = THIS_MODULE;
-	mtd->_erase = doc_erase;
-	mtd->_read = doc_read;
-	mtd->_write = doc_write;
-	mtd->_read_oob = doc_read_oob;
-	mtd->_write_oob = doc_write_oob;
-	this->curfloor = -1;
-	this->curchip = -1;
-	mutex_init(&this->lock);
-
-	/* Ident all the chips present. */
-	DoC_ScanChips(this, maxchips);
-
-	if (!this->totlen) {
-		kfree(mtd);
-		iounmap(this->virtadr);
-	} else {
-		this->nextdoc = doc2klist;
-		doc2klist = mtd;
-		mtd->size = this->totlen;
-		mtd->erasesize = this->erasesize;
-		mtd_device_register(mtd, NULL, 0);
-		return;
-	}
-}
-EXPORT_SYMBOL_GPL(DoC2k_init);
-
-static int doc_read(struct mtd_info *mtd, loff_t from, size_t len,
-		    size_t * retlen, u_char * buf)
-{
-	struct DiskOnChip *this = mtd->priv;
-	void __iomem *docptr = this->virtadr;
-	struct Nand *mychip;
-	unsigned char syndrome[6], eccbuf[6];
-	volatile char dummy;
-	int i, len256 = 0, ret=0;
-	size_t left = len;
-
-	mutex_lock(&this->lock);
-	while (left) {
-		len = left;
-
-		/* Don't allow a single read to cross a 512-byte block boundary */
-		if (from + len > ((from | 0x1ff) + 1))
-			len = ((from | 0x1ff) + 1) - from;
-
-		/* The ECC will not be calculated correctly if less than 512 is read */
-		if (len != 0x200)
-			printk(KERN_WARNING
-			       "ECC needs a full sector read (adr: %lx size %lx)\n",
-			       (long) from, (long) len);
-
-		/* printk("DoC_Read (adr: %lx size %lx)\n", (long) from, (long) len); */
-
-
-		/* Find the chip which is to be used and select it */
-		mychip = &this->chips[from >> (this->chipshift)];
-
-		if (this->curfloor != mychip->floor) {
-			DoC_SelectFloor(this, mychip->floor);
-			DoC_SelectChip(this, mychip->chip);
-		} else if (this->curchip != mychip->chip) {
-			DoC_SelectChip(this, mychip->chip);
-		}
-
-		this->curfloor = mychip->floor;
-		this->curchip = mychip->chip;
-
-		DoC_Command(this,
-			    (!this->page256
-			     && (from & 0x100)) ? NAND_CMD_READ1 : NAND_CMD_READ0,
-			    CDSN_CTRL_WP);
-		DoC_Address(this, ADDR_COLUMN_PAGE, from, CDSN_CTRL_WP,
-			    CDSN_CTRL_ECC_IO);
-
-		/* Prime the ECC engine */
-		WriteDOC(DOC_ECC_RESET, docptr, ECCConf);
-		WriteDOC(DOC_ECC_EN, docptr, ECCConf);
-
-		/* treat crossing 256-byte sector for 2M x 8bits devices */
-		if (this->page256 && from + len > (from | 0xff) + 1) {
-			len256 = (from | 0xff) + 1 - from;
-			DoC_ReadBuf(this, buf, len256);
-
-			DoC_Command(this, NAND_CMD_READ0, CDSN_CTRL_WP);
-			DoC_Address(this, ADDR_COLUMN_PAGE, from + len256,
-				    CDSN_CTRL_WP, CDSN_CTRL_ECC_IO);
-		}
-
-		DoC_ReadBuf(this, &buf[len256], len - len256);
-
-		/* Let the caller know we completed it */
-		*retlen += len;
-
-		/* Read the ECC data through the DiskOnChip ECC logic */
-		/* Note: this will work even with 2M x 8bit devices as   */
-		/*       they have 8 bytes of OOB per 256 page. mf.      */
-		DoC_ReadBuf(this, eccbuf, 6);
-
-		/* Flush the pipeline */
-		if (DoC_is_Millennium(this)) {
-			dummy = ReadDOC(docptr, ECCConf);
-			dummy = ReadDOC(docptr, ECCConf);
-			i = ReadDOC(docptr, ECCConf);
-		} else {
-			dummy = ReadDOC(docptr, 2k_ECCStatus);
-			dummy = ReadDOC(docptr, 2k_ECCStatus);
-			i = ReadDOC(docptr, 2k_ECCStatus);
-		}
-
-		/* Check the ECC Status */
-		if (i & 0x80) {
-			int nb_errors;
-			/* There was an ECC error */
-#ifdef ECC_DEBUG
-			printk(KERN_ERR "DiskOnChip ECC Error: Read at %lx\n", (long)from);
-#endif
-			/* Read the ECC syndrome through the DiskOnChip ECC
-			   logic.  These syndrome will be all ZERO when there
-			   is no error */
-			for (i = 0; i < 6; i++) {
-				syndrome[i] =
-					ReadDOC(docptr, ECCSyndrome0 + i);
-			}
-			nb_errors = doc_decode_ecc(buf, syndrome);
-
-#ifdef ECC_DEBUG
-			printk(KERN_ERR "Errors corrected: %x\n", nb_errors);
-#endif
-			if (nb_errors < 0) {
-				/* We return error, but have actually done the
-				   read. Not that this can be told to
-				   user-space, via sys_read(), but at least
-				   MTD-aware stuff can know about it by
-				   checking *retlen */
-				ret = -EIO;
-			}
-		}
-
-#ifdef PSYCHO_DEBUG
-		printk(KERN_DEBUG "ECC DATA at %lxB: %2.2X %2.2X %2.2X %2.2X %2.2X %2.2X\n",
-		       (long)from, eccbuf[0], eccbuf[1], eccbuf[2],
-		       eccbuf[3], eccbuf[4], eccbuf[5]);
-#endif
-
-		/* disable the ECC engine */
-		WriteDOC(DOC_ECC_DIS, docptr , ECCConf);
-
-		/* according to 11.4.1, we need to wait for the busy line
-	         * drop if we read to the end of the page.  */
-		if(0 == ((from + len) & 0x1ff))
-		{
-		    DoC_WaitReady(this);
-		}
-
-		from += len;
-		left -= len;
-		buf += len;
-	}
-
-	mutex_unlock(&this->lock);
-
-	return ret;
-}
-
-static int doc_write(struct mtd_info *mtd, loff_t to, size_t len,
-		     size_t * retlen, const u_char * buf)
-{
-	struct DiskOnChip *this = mtd->priv;
-	int di; /* Yes, DI is a hangover from when I was disassembling the binary driver */
-	void __iomem *docptr = this->virtadr;
-	unsigned char eccbuf[6];
-	volatile char dummy;
-	int len256 = 0;
-	struct Nand *mychip;
-	size_t left = len;
-	int status;
-
-	mutex_lock(&this->lock);
-	while (left) {
-		len = left;
-
-		/* Don't allow a single write to cross a 512-byte block boundary */
-		if (to + len > ((to | 0x1ff) + 1))
-			len = ((to | 0x1ff) + 1) - to;
-
-		/* The ECC will not be calculated correctly if less than 512 is written */
-/* DBB-
-		if (len != 0x200 && eccbuf)
-			printk(KERN_WARNING
-			       "ECC needs a full sector write (adr: %lx size %lx)\n",
-			       (long) to, (long) len);
-   -DBB */
-
-		/* printk("DoC_Write (adr: %lx size %lx)\n", (long) to, (long) len); */
-
-		/* Find the chip which is to be used and select it */
-		mychip = &this->chips[to >> (this->chipshift)];
-
-		if (this->curfloor != mychip->floor) {
-			DoC_SelectFloor(this, mychip->floor);
-			DoC_SelectChip(this, mychip->chip);
-		} else if (this->curchip != mychip->chip) {
-			DoC_SelectChip(this, mychip->chip);
-		}
-
-		this->curfloor = mychip->floor;
-		this->curchip = mychip->chip;
-
-		/* Set device to main plane of flash */
-		DoC_Command(this, NAND_CMD_RESET, CDSN_CTRL_WP);
-		DoC_Command(this,
-			    (!this->page256
-			     && (to & 0x100)) ? NAND_CMD_READ1 : NAND_CMD_READ0,
-			    CDSN_CTRL_WP);
-
-		DoC_Command(this, NAND_CMD_SEQIN, 0);
-		DoC_Address(this, ADDR_COLUMN_PAGE, to, 0, CDSN_CTRL_ECC_IO);
-
-		/* Prime the ECC engine */
-		WriteDOC(DOC_ECC_RESET, docptr, ECCConf);
-		WriteDOC(DOC_ECC_EN | DOC_ECC_RW, docptr, ECCConf);
-
-		/* treat crossing 256-byte sector for 2M x 8bits devices */
-		if (this->page256 && to + len > (to | 0xff) + 1) {
-			len256 = (to | 0xff) + 1 - to;
-			DoC_WriteBuf(this, buf, len256);
-
-			DoC_Command(this, NAND_CMD_PAGEPROG, 0);
-
-			DoC_Command(this, NAND_CMD_STATUS, CDSN_CTRL_WP);
-			/* There's an implicit DoC_WaitReady() in DoC_Command */
-
-			dummy = ReadDOC(docptr, CDSNSlowIO);
-			DoC_Delay(this, 2);
-
-			if (ReadDOC_(docptr, this->ioreg) & 1) {
-				printk(KERN_ERR "Error programming flash\n");
-				/* Error in programming */
-				*retlen = 0;
-				mutex_unlock(&this->lock);
-				return -EIO;
-			}
-
-			DoC_Command(this, NAND_CMD_SEQIN, 0);
-			DoC_Address(this, ADDR_COLUMN_PAGE, to + len256, 0,
-				    CDSN_CTRL_ECC_IO);
-		}
-
-		DoC_WriteBuf(this, &buf[len256], len - len256);
-
-		WriteDOC(CDSN_CTRL_ECC_IO | CDSN_CTRL_CE, docptr, CDSNControl);
-
-		if (DoC_is_Millennium(this)) {
-			WriteDOC(0, docptr, NOP);
-			WriteDOC(0, docptr, NOP);
-			WriteDOC(0, docptr, NOP);
-		} else {
-			WriteDOC_(0, docptr, this->ioreg);
-			WriteDOC_(0, docptr, this->ioreg);
-			WriteDOC_(0, docptr, this->ioreg);
-		}
-
-		WriteDOC(CDSN_CTRL_ECC_IO | CDSN_CTRL_FLASH_IO | CDSN_CTRL_CE, docptr,
-			 CDSNControl);
-
-		/* Read the ECC data through the DiskOnChip ECC logic */
-		for (di = 0; di < 6; di++) {
-			eccbuf[di] = ReadDOC(docptr, ECCSyndrome0 + di);
-		}
-
-		/* Reset the ECC engine */
-		WriteDOC(DOC_ECC_DIS, docptr, ECCConf);
-
-#ifdef PSYCHO_DEBUG
-		printk
-			("OOB data at %lx is %2.2X %2.2X %2.2X %2.2X %2.2X %2.2X\n",
-			 (long) to, eccbuf[0], eccbuf[1], eccbuf[2], eccbuf[3],
-			 eccbuf[4], eccbuf[5]);
-#endif
-		DoC_Command(this, NAND_CMD_PAGEPROG, 0);
-
-		DoC_Command(this, NAND_CMD_STATUS, CDSN_CTRL_WP);
-		/* There's an implicit DoC_WaitReady() in DoC_Command */
-
-		if (DoC_is_Millennium(this)) {
-			ReadDOC(docptr, ReadPipeInit);
-			status = ReadDOC(docptr, LastDataRead);
-		} else {
-			dummy = ReadDOC(docptr, CDSNSlowIO);
-			DoC_Delay(this, 2);
-			status = ReadDOC_(docptr, this->ioreg);
-		}
-
-		if (status & 1) {
-			printk(KERN_ERR "Error programming flash\n");
-			/* Error in programming */
-			*retlen = 0;
-			mutex_unlock(&this->lock);
-			return -EIO;
-		}
-
-		/* Let the caller know we completed it */
-		*retlen += len;
-
-		{
-			unsigned char x[8];
-			size_t dummy;
-			int ret;
-
-			/* Write the ECC data to flash */
-			for (di=0; di<6; di++)
-				x[di] = eccbuf[di];
-
-			x[6]=0x55;
-			x[7]=0x55;
-
-			ret = doc_write_oob_nolock(mtd, to, 8, &dummy, x);
-			if (ret) {
-				mutex_unlock(&this->lock);
-				return ret;
-			}
-		}
-
-		to += len;
-		left -= len;
-		buf += len;
-	}
-
-	mutex_unlock(&this->lock);
-	return 0;
-}
-
-static int doc_read_oob(struct mtd_info *mtd, loff_t ofs,
-			struct mtd_oob_ops *ops)
-{
-	struct DiskOnChip *this = mtd->priv;
-	int len256 = 0, ret;
-	struct Nand *mychip;
-	uint8_t *buf = ops->oobbuf;
-	size_t len = ops->len;
-
-	BUG_ON(ops->mode != MTD_OPS_PLACE_OOB);
-
-	ofs += ops->ooboffs;
-
-	mutex_lock(&this->lock);
-
-	mychip = &this->chips[ofs >> this->chipshift];
-
-	if (this->curfloor != mychip->floor) {
-		DoC_SelectFloor(this, mychip->floor);
-		DoC_SelectChip(this, mychip->chip);
-	} else if (this->curchip != mychip->chip) {
-		DoC_SelectChip(this, mychip->chip);
-	}
-	this->curfloor = mychip->floor;
-	this->curchip = mychip->chip;
-
-	/* update address for 2M x 8bit devices. OOB starts on the second */
-	/* page to maintain compatibility with doc_read_ecc. */
-	if (this->page256) {
-		if (!(ofs & 0x8))
-			ofs += 0x100;
-		else
-			ofs -= 0x8;
-	}
-
-	DoC_Command(this, NAND_CMD_READOOB, CDSN_CTRL_WP);
-	DoC_Address(this, ADDR_COLUMN_PAGE, ofs, CDSN_CTRL_WP, 0);
-
-	/* treat crossing 8-byte OOB data for 2M x 8bit devices */
-	/* Note: datasheet says it should automaticaly wrap to the */
-	/*       next OOB block, but it didn't work here. mf.      */
-	if (this->page256 && ofs + len > (ofs | 0x7) + 1) {
-		len256 = (ofs | 0x7) + 1 - ofs;
-		DoC_ReadBuf(this, buf, len256);
-
-		DoC_Command(this, NAND_CMD_READOOB, CDSN_CTRL_WP);
-		DoC_Address(this, ADDR_COLUMN_PAGE, ofs & (~0x1ff),
-			    CDSN_CTRL_WP, 0);
-	}
-
-	DoC_ReadBuf(this, &buf[len256], len - len256);
-
-	ops->retlen = len;
-	/* Reading the full OOB data drops us off of the end of the page,
-         * causing the flash device to go into busy mode, so we need
-         * to wait until ready 11.4.1 and Toshiba TC58256FT docs */
-
-	ret = DoC_WaitReady(this);
-
-	mutex_unlock(&this->lock);
-	return ret;
-
-}
-
-static int doc_write_oob_nolock(struct mtd_info *mtd, loff_t ofs, size_t len,
-				size_t * retlen, const u_char * buf)
-{
-	struct DiskOnChip *this = mtd->priv;
-	int len256 = 0;
-	void __iomem *docptr = this->virtadr;
-	struct Nand *mychip = &this->chips[ofs >> this->chipshift];
-	volatile int dummy;
-	int status;
-
-	//      printk("doc_write_oob(%lx, %d): %2.2X %2.2X %2.2X %2.2X ... %2.2X %2.2X .. %2.2X %2.2X\n",(long)ofs, len,
-	//   buf[0], buf[1], buf[2], buf[3], buf[8], buf[9], buf[14],buf[15]);
-
-	/* Find the chip which is to be used and select it */
-	if (this->curfloor != mychip->floor) {
-		DoC_SelectFloor(this, mychip->floor);
-		DoC_SelectChip(this, mychip->chip);
-	} else if (this->curchip != mychip->chip) {
-		DoC_SelectChip(this, mychip->chip);
-	}
-	this->curfloor = mychip->floor;
-	this->curchip = mychip->chip;
-
-	/* disable the ECC engine */
-	WriteDOC (DOC_ECC_RESET, docptr, ECCConf);
-	WriteDOC (DOC_ECC_DIS, docptr, ECCConf);
-
-	/* Reset the chip, see Software Requirement 11.4 item 1. */
-	DoC_Command(this, NAND_CMD_RESET, CDSN_CTRL_WP);
-
-	/* issue the Read2 command to set the pointer to the Spare Data Area. */
-	DoC_Command(this, NAND_CMD_READOOB, CDSN_CTRL_WP);
-
-	/* update address for 2M x 8bit devices. OOB starts on the second */
-	/* page to maintain compatibility with doc_read_ecc. */
-	if (this->page256) {
-		if (!(ofs & 0x8))
-			ofs += 0x100;
-		else
-			ofs -= 0x8;
-	}
-
-	/* issue the Serial Data In command to initial the Page Program process */
-	DoC_Command(this, NAND_CMD_SEQIN, 0);
-	DoC_Address(this, ADDR_COLUMN_PAGE, ofs, 0, 0);
-
-	/* treat crossing 8-byte OOB data for 2M x 8bit devices */
-	/* Note: datasheet says it should automaticaly wrap to the */
-	/*       next OOB block, but it didn't work here. mf.      */
-	if (this->page256 && ofs + len > (ofs | 0x7) + 1) {
-		len256 = (ofs | 0x7) + 1 - ofs;
-		DoC_WriteBuf(this, buf, len256);
-
-		DoC_Command(this, NAND_CMD_PAGEPROG, 0);
-		DoC_Command(this, NAND_CMD_STATUS, 0);
-		/* DoC_WaitReady() is implicit in DoC_Command */
-
-		if (DoC_is_Millennium(this)) {
-			ReadDOC(docptr, ReadPipeInit);
-			status = ReadDOC(docptr, LastDataRead);
-		} else {
-			dummy = ReadDOC(docptr, CDSNSlowIO);
-			DoC_Delay(this, 2);
-			status = ReadDOC_(docptr, this->ioreg);
-		}
-
-		if (status & 1) {
-			printk(KERN_ERR "Error programming oob data\n");
-			/* There was an error */
-			*retlen = 0;
-			return -EIO;
-		}
-		DoC_Command(this, NAND_CMD_SEQIN, 0);
-		DoC_Address(this, ADDR_COLUMN_PAGE, ofs & (~0x1ff), 0, 0);
-	}
-
-	DoC_WriteBuf(this, &buf[len256], len - len256);
-
-	DoC_Command(this, NAND_CMD_PAGEPROG, 0);
-	DoC_Command(this, NAND_CMD_STATUS, 0);
-	/* DoC_WaitReady() is implicit in DoC_Command */
-
-	if (DoC_is_Millennium(this)) {
-		ReadDOC(docptr, ReadPipeInit);
-		status = ReadDOC(docptr, LastDataRead);
-	} else {
-		dummy = ReadDOC(docptr, CDSNSlowIO);
-		DoC_Delay(this, 2);
-		status = ReadDOC_(docptr, this->ioreg);
-	}
-
-	if (status & 1) {
-		printk(KERN_ERR "Error programming oob data\n");
-		/* There was an error */
-		*retlen = 0;
-		return -EIO;
-	}
-
-	*retlen = len;
-	return 0;
-
-}
-
-static int doc_write_oob(struct mtd_info *mtd, loff_t ofs,
-			 struct mtd_oob_ops *ops)
-{
-	struct DiskOnChip *this = mtd->priv;
-	int ret;
-
-	BUG_ON(ops->mode != MTD_OPS_PLACE_OOB);
-
-	mutex_lock(&this->lock);
-	ret = doc_write_oob_nolock(mtd, ofs + ops->ooboffs, ops->len,
-				   &ops->retlen, ops->oobbuf);
-
-	mutex_unlock(&this->lock);
-	return ret;
-}
-
-static int doc_erase(struct mtd_info *mtd, struct erase_info *instr)
-{
-	struct DiskOnChip *this = mtd->priv;
-	__u32 ofs = instr->addr;
-	__u32 len = instr->len;
-	volatile int dummy;
-	void __iomem *docptr = this->virtadr;
-	struct Nand *mychip;
-	int status;
-
- 	mutex_lock(&this->lock);
-
-	if (ofs & (mtd->erasesize-1) || len & (mtd->erasesize-1)) {
-		mutex_unlock(&this->lock);
-		return -EINVAL;
-	}
-
-	instr->state = MTD_ERASING;
-
-	/* FIXME: Do this in the background. Use timers or schedule_task() */
-	while(len) {
-		mychip = &this->chips[ofs >> this->chipshift];
-
-		if (this->curfloor != mychip->floor) {
-			DoC_SelectFloor(this, mychip->floor);
-			DoC_SelectChip(this, mychip->chip);
-		} else if (this->curchip != mychip->chip) {
-			DoC_SelectChip(this, mychip->chip);
-		}
-		this->curfloor = mychip->floor;
-		this->curchip = mychip->chip;
-
-		DoC_Command(this, NAND_CMD_ERASE1, 0);
-		DoC_Address(this, ADDR_PAGE, ofs, 0, 0);
-		DoC_Command(this, NAND_CMD_ERASE2, 0);
-
-		DoC_Command(this, NAND_CMD_STATUS, CDSN_CTRL_WP);
-
-		if (DoC_is_Millennium(this)) {
-			ReadDOC(docptr, ReadPipeInit);
-			status = ReadDOC(docptr, LastDataRead);
-		} else {
-			dummy = ReadDOC(docptr, CDSNSlowIO);
-			DoC_Delay(this, 2);
-			status = ReadDOC_(docptr, this->ioreg);
-		}
-
-		if (status & 1) {
-			printk(KERN_ERR "Error erasing at 0x%x\n", ofs);
-			/* There was an error */
-			instr->state = MTD_ERASE_FAILED;
-			goto callback;
-		}
-		ofs += mtd->erasesize;
-		len -= mtd->erasesize;
-	}
-	instr->state = MTD_ERASE_DONE;
-
- callback:
-	mtd_erase_callback(instr);
-
-	mutex_unlock(&this->lock);
-	return 0;
-}
-
-
-/****************************************************************************
- *
- * Module stuff
- *
- ****************************************************************************/
-
-static void __exit cleanup_doc2000(void)
-{
-	struct mtd_info *mtd;
-	struct DiskOnChip *this;
-
-	while ((mtd = doc2klist)) {
-		this = mtd->priv;
-		doc2klist = this->nextdoc;
-
-		mtd_device_unregister(mtd);
-
-		iounmap(this->virtadr);
-		kfree(this->chips);
-		kfree(mtd);
-	}
-}
-
-module_exit(cleanup_doc2000);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org> et al.");
-MODULE_DESCRIPTION("MTD driver for DiskOnChip 2000 and Millennium");
-
diff --git a/drivers/mtd/devices/doc2001.c b/drivers/mtd/devices/doc2001.c
deleted file mode 100644
index f692795..0000000
--- a/drivers/mtd/devices/doc2001.c
+++ /dev/null
@@ -1,824 +0,0 @@
-
-/*
- * Linux driver for Disk-On-Chip Millennium
- * (c) 1999 Machine Vision Holdings, Inc.
- * (c) 1999, 2000 David Woodhouse <dwmw2@infradead.org>
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <asm/errno.h>
-#include <asm/io.h>
-#include <asm/uaccess.h>
-#include <linux/delay.h>
-#include <linux/slab.h>
-#include <linux/init.h>
-#include <linux/types.h>
-#include <linux/bitops.h>
-
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/nand.h>
-#include <linux/mtd/doc2000.h>
-
-/* #define ECC_DEBUG */
-
-/* I have no idea why some DoC chips can not use memcop_form|to_io().
- * This may be due to the different revisions of the ASIC controller built-in or
- * simplily a QA/Bug issue. Who knows ?? If you have trouble, please uncomment
- * this:*/
-#undef USE_MEMCPY
-
-static int doc_read(struct mtd_info *mtd, loff_t from, size_t len,
-		    size_t *retlen, u_char *buf);
-static int doc_write(struct mtd_info *mtd, loff_t to, size_t len,
-		     size_t *retlen, const u_char *buf);
-static int doc_read_oob(struct mtd_info *mtd, loff_t ofs,
-			struct mtd_oob_ops *ops);
-static int doc_write_oob(struct mtd_info *mtd, loff_t ofs,
-			 struct mtd_oob_ops *ops);
-static int doc_erase (struct mtd_info *mtd, struct erase_info *instr);
-
-static struct mtd_info *docmillist = NULL;
-
-/* Perform the required delay cycles by reading from the NOP register */
-static void DoC_Delay(void __iomem * docptr, unsigned short cycles)
-{
-	volatile char dummy;
-	int i;
-
-	for (i = 0; i < cycles; i++)
-		dummy = ReadDOC(docptr, NOP);
-}
-
-/* DOC_WaitReady: Wait for RDY line to be asserted by the flash chip */
-static int _DoC_WaitReady(void __iomem * docptr)
-{
-	unsigned short c = 0xffff;
-
-	pr_debug("_DoC_WaitReady called for out-of-line wait\n");
-
-	/* Out-of-line routine to wait for chip response */
-	while (!(ReadDOC(docptr, CDSNControl) & CDSN_CTRL_FR_B) && --c)
-		;
-
-	if (c == 0)
-		pr_debug("_DoC_WaitReady timed out.\n");
-
-	return (c == 0);
-}
-
-static inline int DoC_WaitReady(void __iomem * docptr)
-{
-	/* This is inline, to optimise the common case, where it's ready instantly */
-	int ret = 0;
-
-	/* 4 read form NOP register should be issued in prior to the read from CDSNControl
-	   see Software Requirement 11.4 item 2. */
-	DoC_Delay(docptr, 4);
-
-	if (!(ReadDOC(docptr, CDSNControl) & CDSN_CTRL_FR_B))
-		/* Call the out-of-line routine to wait */
-		ret = _DoC_WaitReady(docptr);
-
-	/* issue 2 read from NOP register after reading from CDSNControl register
-	   see Software Requirement 11.4 item 2. */
-	DoC_Delay(docptr, 2);
-
-	return ret;
-}
-
-/* DoC_Command: Send a flash command to the flash chip through the CDSN IO register
-   with the internal pipeline. Each of 4 delay cycles (read from the NOP register) is
-   required after writing to CDSN Control register, see Software Requirement 11.4 item 3. */
-
-static void DoC_Command(void __iomem * docptr, unsigned char command,
-			       unsigned char xtraflags)
-{
-	/* Assert the CLE (Command Latch Enable) line to the flash chip */
-	WriteDOC(xtraflags | CDSN_CTRL_CLE | CDSN_CTRL_CE, docptr, CDSNControl);
-	DoC_Delay(docptr, 4);
-
-	/* Send the command */
-	WriteDOC(command, docptr, Mil_CDSN_IO);
-	WriteDOC(0x00, docptr, WritePipeTerm);
-
-	/* Lower the CLE line */
-	WriteDOC(xtraflags | CDSN_CTRL_CE, docptr, CDSNControl);
-	DoC_Delay(docptr, 4);
-}
-
-/* DoC_Address: Set the current address for the flash chip through the CDSN IO register
-   with the internal pipeline. Each of 4 delay cycles (read from the NOP register) is
-   required after writing to CDSN Control register, see Software Requirement 11.4 item 3. */
-
-static inline void DoC_Address(void __iomem * docptr, int numbytes, unsigned long ofs,
-			       unsigned char xtraflags1, unsigned char xtraflags2)
-{
-	/* Assert the ALE (Address Latch Enable) line to the flash chip */
-	WriteDOC(xtraflags1 | CDSN_CTRL_ALE | CDSN_CTRL_CE, docptr, CDSNControl);
-	DoC_Delay(docptr, 4);
-
-	/* Send the address */
-	switch (numbytes)
-	    {
-	    case 1:
-		    /* Send single byte, bits 0-7. */
-		    WriteDOC(ofs & 0xff, docptr, Mil_CDSN_IO);
-		    WriteDOC(0x00, docptr, WritePipeTerm);
-		    break;
-	    case 2:
-		    /* Send bits 9-16 followed by 17-23 */
-		    WriteDOC((ofs >> 9)  & 0xff, docptr, Mil_CDSN_IO);
-		    WriteDOC((ofs >> 17) & 0xff, docptr, Mil_CDSN_IO);
-		    WriteDOC(0x00, docptr, WritePipeTerm);
-		break;
-	    case 3:
-		    /* Send 0-7, 9-16, then 17-23 */
-		    WriteDOC(ofs & 0xff, docptr, Mil_CDSN_IO);
-		    WriteDOC((ofs >> 9)  & 0xff, docptr, Mil_CDSN_IO);
-		    WriteDOC((ofs >> 17) & 0xff, docptr, Mil_CDSN_IO);
-		    WriteDOC(0x00, docptr, WritePipeTerm);
-		break;
-	    default:
-		return;
-	    }
-
-	/* Lower the ALE line */
-	WriteDOC(xtraflags1 | xtraflags2 | CDSN_CTRL_CE, docptr, CDSNControl);
-	DoC_Delay(docptr, 4);
-}
-
-/* DoC_SelectChip: Select a given flash chip within the current floor */
-static int DoC_SelectChip(void __iomem * docptr, int chip)
-{
-	/* Select the individual flash chip requested */
-	WriteDOC(chip, docptr, CDSNDeviceSelect);
-	DoC_Delay(docptr, 4);
-
-	/* Wait for it to be ready */
-	return DoC_WaitReady(docptr);
-}
-
-/* DoC_SelectFloor: Select a given floor (bank of flash chips) */
-static int DoC_SelectFloor(void __iomem * docptr, int floor)
-{
-	/* Select the floor (bank) of chips required */
-	WriteDOC(floor, docptr, FloorSelect);
-
-	/* Wait for the chip to be ready */
-	return DoC_WaitReady(docptr);
-}
-
-/* DoC_IdentChip: Identify a given NAND chip given {floor,chip} */
-static int DoC_IdentChip(struct DiskOnChip *doc, int floor, int chip)
-{
-	int mfr, id, i, j;
-	volatile char dummy;
-
-	/* Page in the required floor/chip
-	   FIXME: is this supported by Millennium ?? */
-	DoC_SelectFloor(doc->virtadr, floor);
-	DoC_SelectChip(doc->virtadr, chip);
-
-	/* Reset the chip, see Software Requirement 11.4 item 1. */
-	DoC_Command(doc->virtadr, NAND_CMD_RESET, CDSN_CTRL_WP);
-	DoC_WaitReady(doc->virtadr);
-
-	/* Read the NAND chip ID: 1. Send ReadID command */
-	DoC_Command(doc->virtadr, NAND_CMD_READID, CDSN_CTRL_WP);
-
-	/* Read the NAND chip ID: 2. Send address byte zero */
-	DoC_Address(doc->virtadr, 1, 0x00, CDSN_CTRL_WP, 0x00);
-
-	/* Read the manufacturer and device id codes of the flash device through
-	   CDSN IO register see Software Requirement 11.4 item 5.*/
-	dummy = ReadDOC(doc->virtadr, ReadPipeInit);
-	DoC_Delay(doc->virtadr, 2);
-	mfr = ReadDOC(doc->virtadr, Mil_CDSN_IO);
-
-	DoC_Delay(doc->virtadr, 2);
-	id  = ReadDOC(doc->virtadr, Mil_CDSN_IO);
-	dummy = ReadDOC(doc->virtadr, LastDataRead);
-
-	/* No response - return failure */
-	if (mfr == 0xff || mfr == 0)
-		return 0;
-
-	/* FIXME: to deal with multi-flash on multi-Millennium case more carefully */
-	for (i = 0; nand_flash_ids[i].name != NULL; i++) {
-		if ( id == nand_flash_ids[i].id) {
-			/* Try to identify manufacturer */
-			for (j = 0; nand_manuf_ids[j].id != 0x0; j++) {
-				if (nand_manuf_ids[j].id == mfr)
-					break;
-			}
-			printk(KERN_INFO "Flash chip found: Manufacturer ID: %2.2X, "
-			       "Chip ID: %2.2X (%s:%s)\n",
-			       mfr, id, nand_manuf_ids[j].name, nand_flash_ids[i].name);
-			doc->mfr = mfr;
-			doc->id = id;
-			doc->chipshift = ffs((nand_flash_ids[i].chipsize << 20)) - 1;
-			break;
-		}
-	}
-
-	if (nand_flash_ids[i].name == NULL)
-		return 0;
-	else
-		return 1;
-}
-
-/* DoC_ScanChips: Find all NAND chips present in a DiskOnChip, and identify them */
-static void DoC_ScanChips(struct DiskOnChip *this)
-{
-	int floor, chip;
-	int numchips[MAX_FLOORS_MIL];
-	int ret;
-
-	this->numchips = 0;
-	this->mfr = 0;
-	this->id = 0;
-
-	/* For each floor, find the number of valid chips it contains */
-	for (floor = 0,ret = 1; floor < MAX_FLOORS_MIL; floor++) {
-		numchips[floor] = 0;
-		for (chip = 0; chip < MAX_CHIPS_MIL && ret != 0; chip++) {
-			ret = DoC_IdentChip(this, floor, chip);
-			if (ret) {
-				numchips[floor]++;
-				this->numchips++;
-			}
-		}
-	}
-	/* If there are none at all that we recognise, bail */
-	if (!this->numchips) {
-		printk("No flash chips recognised.\n");
-		return;
-	}
-
-	/* Allocate an array to hold the information for each chip */
-	this->chips = kmalloc(sizeof(struct Nand) * this->numchips, GFP_KERNEL);
-	if (!this->chips){
-		printk("No memory for allocating chip info structures\n");
-		return;
-	}
-
-	/* Fill out the chip array with {floor, chipno} for each
-	 * detected chip in the device. */
-	for (floor = 0, ret = 0; floor < MAX_FLOORS_MIL; floor++) {
-		for (chip = 0 ; chip < numchips[floor] ; chip++) {
-			this->chips[ret].floor = floor;
-			this->chips[ret].chip = chip;
-			this->chips[ret].curadr = 0;
-			this->chips[ret].curmode = 0x50;
-			ret++;
-		}
-	}
-
-	/* Calculate and print the total size of the device */
-	this->totlen = this->numchips * (1 << this->chipshift);
-	printk(KERN_INFO "%d flash chips found. Total DiskOnChip size: %ld MiB\n",
-	       this->numchips ,this->totlen >> 20);
-}
-
-static int DoCMil_is_alias(struct DiskOnChip *doc1, struct DiskOnChip *doc2)
-{
-	int tmp1, tmp2, retval;
-
-	if (doc1->physadr == doc2->physadr)
-		return 1;
-
-	/* Use the alias resolution register which was set aside for this
-	 * purpose. If it's value is the same on both chips, they might
-	 * be the same chip, and we write to one and check for a change in
-	 * the other. It's unclear if this register is usuable in the
-	 * DoC 2000 (it's in the Millenium docs), but it seems to work. */
-	tmp1 = ReadDOC(doc1->virtadr, AliasResolution);
-	tmp2 = ReadDOC(doc2->virtadr, AliasResolution);
-	if (tmp1 != tmp2)
-		return 0;
-
-	WriteDOC((tmp1+1) % 0xff, doc1->virtadr, AliasResolution);
-	tmp2 = ReadDOC(doc2->virtadr, AliasResolution);
-	if (tmp2 == (tmp1+1) % 0xff)
-		retval = 1;
-	else
-		retval = 0;
-
-	/* Restore register contents.  May not be necessary, but do it just to
-	 * be safe. */
-	WriteDOC(tmp1, doc1->virtadr, AliasResolution);
-
-	return retval;
-}
-
-/* This routine is found from the docprobe code by symbol_get(),
- * which will bump the use count of this module. */
-void DoCMil_init(struct mtd_info *mtd)
-{
-	struct DiskOnChip *this = mtd->priv;
-	struct DiskOnChip *old = NULL;
-
-	/* We must avoid being called twice for the same device. */
-	if (docmillist)
-		old = docmillist->priv;
-
-	while (old) {
-		if (DoCMil_is_alias(this, old)) {
-			printk(KERN_NOTICE "Ignoring DiskOnChip Millennium at "
-			       "0x%lX - already configured\n", this->physadr);
-			iounmap(this->virtadr);
-			kfree(mtd);
-			return;
-		}
-		if (old->nextdoc)
-			old = old->nextdoc->priv;
-		else
-			old = NULL;
-	}
-
-	mtd->name = "DiskOnChip Millennium";
-	printk(KERN_NOTICE "DiskOnChip Millennium found at address 0x%lX\n",
-	       this->physadr);
-
-	mtd->type = MTD_NANDFLASH;
-	mtd->flags = MTD_CAP_NANDFLASH;
-
-	/* FIXME: erase size is not always 8KiB */
-	mtd->erasesize = 0x2000;
-	mtd->writebufsize = mtd->writesize = 512;
-	mtd->oobsize = 16;
-	mtd->ecc_strength = 2;
-	mtd->owner = THIS_MODULE;
-	mtd->_erase = doc_erase;
-	mtd->_read = doc_read;
-	mtd->_write = doc_write;
-	mtd->_read_oob = doc_read_oob;
-	mtd->_write_oob = doc_write_oob;
-	this->curfloor = -1;
-	this->curchip = -1;
-
-	/* Ident all the chips present. */
-	DoC_ScanChips(this);
-
-	if (!this->totlen) {
-		kfree(mtd);
-		iounmap(this->virtadr);
-	} else {
-		this->nextdoc = docmillist;
-		docmillist = mtd;
-		mtd->size  = this->totlen;
-		mtd_device_register(mtd, NULL, 0);
-		return;
-	}
-}
-EXPORT_SYMBOL_GPL(DoCMil_init);
-
-static int doc_read (struct mtd_info *mtd, loff_t from, size_t len,
-		     size_t *retlen, u_char *buf)
-{
-	int i, ret;
-	volatile char dummy;
-	unsigned char syndrome[6], eccbuf[6];
-	struct DiskOnChip *this = mtd->priv;
-	void __iomem *docptr = this->virtadr;
-	struct Nand *mychip = &this->chips[from >> (this->chipshift)];
-
-	/* Don't allow a single read to cross a 512-byte block boundary */
-	if (from + len > ((from | 0x1ff) + 1))
-		len = ((from | 0x1ff) + 1) - from;
-
-	/* Find the chip which is to be used and select it */
-	if (this->curfloor != mychip->floor) {
-		DoC_SelectFloor(docptr, mychip->floor);
-		DoC_SelectChip(docptr, mychip->chip);
-	} else if (this->curchip != mychip->chip) {
-		DoC_SelectChip(docptr, mychip->chip);
-	}
-	this->curfloor = mychip->floor;
-	this->curchip = mychip->chip;
-
-	/* issue the Read0 or Read1 command depend on which half of the page
-	   we are accessing. Polling the Flash Ready bit after issue 3 bytes
-	   address in Sequence Read Mode, see Software Requirement 11.4 item 1.*/
-	DoC_Command(docptr, (from >> 8) & 1, CDSN_CTRL_WP);
-	DoC_Address(docptr, 3, from, CDSN_CTRL_WP, 0x00);
-	DoC_WaitReady(docptr);
-
-	/* init the ECC engine, see Reed-Solomon EDC/ECC 11.1 .*/
-	WriteDOC (DOC_ECC_RESET, docptr, ECCConf);
-	WriteDOC (DOC_ECC_EN, docptr, ECCConf);
-
-	/* Read the data via the internal pipeline through CDSN IO register,
-	   see Pipelined Read Operations 11.3 */
-	dummy = ReadDOC(docptr, ReadPipeInit);
-#ifndef USE_MEMCPY
-	for (i = 0; i < len-1; i++) {
-		/* N.B. you have to increase the source address in this way or the
-		   ECC logic will not work properly */
-		buf[i] = ReadDOC(docptr, Mil_CDSN_IO + (i & 0xff));
-	}
-#else
-	memcpy_fromio(buf, docptr + DoC_Mil_CDSN_IO, len - 1);
-#endif
-	buf[len - 1] = ReadDOC(docptr, LastDataRead);
-
-	/* Let the caller know we completed it */
-	*retlen = len;
-        ret = 0;
-
-	/* Read the ECC data from Spare Data Area,
-	   see Reed-Solomon EDC/ECC 11.1 */
-	dummy = ReadDOC(docptr, ReadPipeInit);
-#ifndef USE_MEMCPY
-	for (i = 0; i < 5; i++) {
-		/* N.B. you have to increase the source address in this way or the
-		   ECC logic will not work properly */
-		eccbuf[i] = ReadDOC(docptr, Mil_CDSN_IO + i);
-	}
-#else
-	memcpy_fromio(eccbuf, docptr + DoC_Mil_CDSN_IO, 5);
-#endif
-	eccbuf[5] = ReadDOC(docptr, LastDataRead);
-
-	/* Flush the pipeline */
-	dummy = ReadDOC(docptr, ECCConf);
-	dummy = ReadDOC(docptr, ECCConf);
-
-	/* Check the ECC Status */
-	if (ReadDOC(docptr, ECCConf) & 0x80) {
-		int nb_errors;
-		/* There was an ECC error */
-#ifdef ECC_DEBUG
-		printk("DiskOnChip ECC Error: Read at %lx\n", (long)from);
-#endif
-		/* Read the ECC syndrome through the DiskOnChip ECC logic.
-		   These syndrome will be all ZERO when there is no error */
-		for (i = 0; i < 6; i++) {
-			syndrome[i] = ReadDOC(docptr, ECCSyndrome0 + i);
-		}
-		nb_errors = doc_decode_ecc(buf, syndrome);
-#ifdef ECC_DEBUG
-		printk("ECC Errors corrected: %x\n", nb_errors);
-#endif
-		if (nb_errors < 0) {
-			/* We return error, but have actually done the read. Not that
-			   this can be told to user-space, via sys_read(), but at least
-			   MTD-aware stuff can know about it by checking *retlen */
-			ret = -EIO;
-		}
-	}
-
-#ifdef PSYCHO_DEBUG
-	printk("ECC DATA at %lx: %2.2X %2.2X %2.2X %2.2X %2.2X %2.2X\n",
-	       (long)from, eccbuf[0], eccbuf[1], eccbuf[2], eccbuf[3],
-	       eccbuf[4], eccbuf[5]);
-#endif
-
-	/* disable the ECC engine */
-	WriteDOC(DOC_ECC_DIS, docptr , ECCConf);
-
-	return ret;
-}
-
-static int doc_write (struct mtd_info *mtd, loff_t to, size_t len,
-		      size_t *retlen, const u_char *buf)
-{
-	int i,ret = 0;
-	char eccbuf[6];
-	volatile char dummy;
-	struct DiskOnChip *this = mtd->priv;
-	void __iomem *docptr = this->virtadr;
-	struct Nand *mychip = &this->chips[to >> (this->chipshift)];
-
-#if 0
-	/* Don't allow a single write to cross a 512-byte block boundary */
-	if (to + len > ( (to | 0x1ff) + 1))
-		len = ((to | 0x1ff) + 1) - to;
-#else
-	/* Don't allow writes which aren't exactly one block */
-	if (to & 0x1ff || len != 0x200)
-		return -EINVAL;
-#endif
-
-	/* Find the chip which is to be used and select it */
-	if (this->curfloor != mychip->floor) {
-		DoC_SelectFloor(docptr, mychip->floor);
-		DoC_SelectChip(docptr, mychip->chip);
-	} else if (this->curchip != mychip->chip) {
-		DoC_SelectChip(docptr, mychip->chip);
-	}
-	this->curfloor = mychip->floor;
-	this->curchip = mychip->chip;
-
-	/* Reset the chip, see Software Requirement 11.4 item 1. */
-	DoC_Command(docptr, NAND_CMD_RESET, 0x00);
-	DoC_WaitReady(docptr);
-	/* Set device to main plane of flash */
-	DoC_Command(docptr, NAND_CMD_READ0, 0x00);
-
-	/* issue the Serial Data In command to initial the Page Program process */
-	DoC_Command(docptr, NAND_CMD_SEQIN, 0x00);
-	DoC_Address(docptr, 3, to, 0x00, 0x00);
-	DoC_WaitReady(docptr);
-
-	/* init the ECC engine, see Reed-Solomon EDC/ECC 11.1 .*/
-	WriteDOC (DOC_ECC_RESET, docptr, ECCConf);
-	WriteDOC (DOC_ECC_EN | DOC_ECC_RW, docptr, ECCConf);
-
-	/* Write the data via the internal pipeline through CDSN IO register,
-	   see Pipelined Write Operations 11.2 */
-#ifndef USE_MEMCPY
-	for (i = 0; i < len; i++) {
-		/* N.B. you have to increase the source address in this way or the
-		   ECC logic will not work properly */
-		WriteDOC(buf[i], docptr, Mil_CDSN_IO + i);
-	}
-#else
-	memcpy_toio(docptr + DoC_Mil_CDSN_IO, buf, len);
-#endif
-	WriteDOC(0x00, docptr, WritePipeTerm);
-
-	/* Write ECC data to flash, the ECC info is generated by the DiskOnChip ECC logic
-	   see Reed-Solomon EDC/ECC 11.1 */
-	WriteDOC(0, docptr, NOP);
-	WriteDOC(0, docptr, NOP);
-	WriteDOC(0, docptr, NOP);
-
-	/* Read the ECC data through the DiskOnChip ECC logic */
-	for (i = 0; i < 6; i++) {
-		eccbuf[i] = ReadDOC(docptr, ECCSyndrome0 + i);
-	}
-
-	/* ignore the ECC engine */
-	WriteDOC(DOC_ECC_DIS, docptr , ECCConf);
-
-#ifndef USE_MEMCPY
-	/* Write the ECC data to flash */
-	for (i = 0; i < 6; i++) {
-		/* N.B. you have to increase the source address in this way or the
-		   ECC logic will not work properly */
-		WriteDOC(eccbuf[i], docptr, Mil_CDSN_IO + i);
-	}
-#else
-	memcpy_toio(docptr + DoC_Mil_CDSN_IO, eccbuf, 6);
-#endif
-
-	/* write the block status BLOCK_USED (0x5555) at the end of ECC data
-	   FIXME: this is only a hack for programming the IPL area for LinuxBIOS
-	   and should be replace with proper codes in user space utilities */
-	WriteDOC(0x55, docptr, Mil_CDSN_IO);
-	WriteDOC(0x55, docptr, Mil_CDSN_IO + 1);
-
-	WriteDOC(0x00, docptr, WritePipeTerm);
-
-#ifdef PSYCHO_DEBUG
-	printk("OOB data at %lx is %2.2X %2.2X %2.2X %2.2X %2.2X %2.2X\n",
-	       (long) to, eccbuf[0], eccbuf[1], eccbuf[2], eccbuf[3],
-	       eccbuf[4], eccbuf[5]);
-#endif
-
-	/* Commit the Page Program command and wait for ready
-	   see Software Requirement 11.4 item 1.*/
-	DoC_Command(docptr, NAND_CMD_PAGEPROG, 0x00);
-	DoC_WaitReady(docptr);
-
-	/* Read the status of the flash device through CDSN IO register
-	   see Software Requirement 11.4 item 5.*/
-	DoC_Command(docptr, NAND_CMD_STATUS, CDSN_CTRL_WP);
-	dummy = ReadDOC(docptr, ReadPipeInit);
-	DoC_Delay(docptr, 2);
-	if (ReadDOC(docptr, Mil_CDSN_IO) & 1) {
-		printk("Error programming flash\n");
-		/* Error in programming
-		   FIXME: implement Bad Block Replacement (in nftl.c ??) */
-		ret = -EIO;
-	}
-	dummy = ReadDOC(docptr, LastDataRead);
-
-	/* Let the caller know we completed it */
-	*retlen = len;
-
-	return ret;
-}
-
-static int doc_read_oob(struct mtd_info *mtd, loff_t ofs,
-			struct mtd_oob_ops *ops)
-{
-#ifndef USE_MEMCPY
-	int i;
-#endif
-	volatile char dummy;
-	struct DiskOnChip *this = mtd->priv;
-	void __iomem *docptr = this->virtadr;
-	struct Nand *mychip = &this->chips[ofs >> this->chipshift];
-	uint8_t *buf = ops->oobbuf;
-	size_t len = ops->len;
-
-	BUG_ON(ops->mode != MTD_OPS_PLACE_OOB);
-
-	ofs += ops->ooboffs;
-
-	/* Find the chip which is to be used and select it */
-	if (this->curfloor != mychip->floor) {
-		DoC_SelectFloor(docptr, mychip->floor);
-		DoC_SelectChip(docptr, mychip->chip);
-	} else if (this->curchip != mychip->chip) {
-		DoC_SelectChip(docptr, mychip->chip);
-	}
-	this->curfloor = mychip->floor;
-	this->curchip = mychip->chip;
-
-	/* disable the ECC engine */
-	WriteDOC (DOC_ECC_RESET, docptr, ECCConf);
-	WriteDOC (DOC_ECC_DIS, docptr, ECCConf);
-
-	/* issue the Read2 command to set the pointer to the Spare Data Area.
-	   Polling the Flash Ready bit after issue 3 bytes address in
-	   Sequence Read Mode, see Software Requirement 11.4 item 1.*/
-	DoC_Command(docptr, NAND_CMD_READOOB, CDSN_CTRL_WP);
-	DoC_Address(docptr, 3, ofs, CDSN_CTRL_WP, 0x00);
-	DoC_WaitReady(docptr);
-
-	/* Read the data out via the internal pipeline through CDSN IO register,
-	   see Pipelined Read Operations 11.3 */
-	dummy = ReadDOC(docptr, ReadPipeInit);
-#ifndef USE_MEMCPY
-	for (i = 0; i < len-1; i++) {
-		/* N.B. you have to increase the source address in this way or the
-		   ECC logic will not work properly */
-		buf[i] = ReadDOC(docptr, Mil_CDSN_IO + i);
-	}
-#else
-	memcpy_fromio(buf, docptr + DoC_Mil_CDSN_IO, len - 1);
-#endif
-	buf[len - 1] = ReadDOC(docptr, LastDataRead);
-
-	ops->retlen = len;
-
-	return 0;
-}
-
-static int doc_write_oob(struct mtd_info *mtd, loff_t ofs,
-			 struct mtd_oob_ops *ops)
-{
-#ifndef USE_MEMCPY
-	int i;
-#endif
-	volatile char dummy;
-	int ret = 0;
-	struct DiskOnChip *this = mtd->priv;
-	void __iomem *docptr = this->virtadr;
-	struct Nand *mychip = &this->chips[ofs >> this->chipshift];
-	uint8_t *buf = ops->oobbuf;
-	size_t len = ops->len;
-
-	BUG_ON(ops->mode != MTD_OPS_PLACE_OOB);
-
-	ofs += ops->ooboffs;
-
-	/* Find the chip which is to be used and select it */
-	if (this->curfloor != mychip->floor) {
-		DoC_SelectFloor(docptr, mychip->floor);
-		DoC_SelectChip(docptr, mychip->chip);
-	} else if (this->curchip != mychip->chip) {
-		DoC_SelectChip(docptr, mychip->chip);
-	}
-	this->curfloor = mychip->floor;
-	this->curchip = mychip->chip;
-
-	/* disable the ECC engine */
-	WriteDOC (DOC_ECC_RESET, docptr, ECCConf);
-	WriteDOC (DOC_ECC_DIS, docptr, ECCConf);
-
-	/* Reset the chip, see Software Requirement 11.4 item 1. */
-	DoC_Command(docptr, NAND_CMD_RESET, CDSN_CTRL_WP);
-	DoC_WaitReady(docptr);
-	/* issue the Read2 command to set the pointer to the Spare Data Area. */
-	DoC_Command(docptr, NAND_CMD_READOOB, CDSN_CTRL_WP);
-
-	/* issue the Serial Data In command to initial the Page Program process */
-	DoC_Command(docptr, NAND_CMD_SEQIN, 0x00);
-	DoC_Address(docptr, 3, ofs, 0x00, 0x00);
-
-	/* Write the data via the internal pipeline through CDSN IO register,
-	   see Pipelined Write Operations 11.2 */
-#ifndef USE_MEMCPY
-	for (i = 0; i < len; i++) {
-		/* N.B. you have to increase the source address in this way or the
-		   ECC logic will not work properly */
-		WriteDOC(buf[i], docptr, Mil_CDSN_IO + i);
-	}
-#else
-	memcpy_toio(docptr + DoC_Mil_CDSN_IO, buf, len);
-#endif
-	WriteDOC(0x00, docptr, WritePipeTerm);
-
-	/* Commit the Page Program command and wait for ready
-	   see Software Requirement 11.4 item 1.*/
-	DoC_Command(docptr, NAND_CMD_PAGEPROG, 0x00);
-	DoC_WaitReady(docptr);
-
-	/* Read the status of the flash device through CDSN IO register
-	   see Software Requirement 11.4 item 5.*/
-	DoC_Command(docptr, NAND_CMD_STATUS, 0x00);
-	dummy = ReadDOC(docptr, ReadPipeInit);
-	DoC_Delay(docptr, 2);
-	if (ReadDOC(docptr, Mil_CDSN_IO) & 1) {
-		printk("Error programming oob data\n");
-		/* FIXME: implement Bad Block Replacement (in nftl.c ??) */
-		ops->retlen = 0;
-		ret = -EIO;
-	}
-	dummy = ReadDOC(docptr, LastDataRead);
-
-	ops->retlen = len;
-
-	return ret;
-}
-
-int doc_erase (struct mtd_info *mtd, struct erase_info *instr)
-{
-	volatile char dummy;
-	struct DiskOnChip *this = mtd->priv;
-	__u32 ofs = instr->addr;
-	__u32 len = instr->len;
-	void __iomem *docptr = this->virtadr;
-	struct Nand *mychip = &this->chips[ofs >> this->chipshift];
-
-	if (len != mtd->erasesize)
-		printk(KERN_WARNING "Erase not right size (%x != %x)n",
-		       len, mtd->erasesize);
-
-	/* Find the chip which is to be used and select it */
-	if (this->curfloor != mychip->floor) {
-		DoC_SelectFloor(docptr, mychip->floor);
-		DoC_SelectChip(docptr, mychip->chip);
-	} else if (this->curchip != mychip->chip) {
-		DoC_SelectChip(docptr, mychip->chip);
-	}
-	this->curfloor = mychip->floor;
-	this->curchip = mychip->chip;
-
-	instr->state = MTD_ERASE_PENDING;
-
-	/* issue the Erase Setup command */
-	DoC_Command(docptr, NAND_CMD_ERASE1, 0x00);
-	DoC_Address(docptr, 2, ofs, 0x00, 0x00);
-
-	/* Commit the Erase Start command and wait for ready
-	   see Software Requirement 11.4 item 1.*/
-	DoC_Command(docptr, NAND_CMD_ERASE2, 0x00);
-	DoC_WaitReady(docptr);
-
-	instr->state = MTD_ERASING;
-
-	/* Read the status of the flash device through CDSN IO register
-	   see Software Requirement 11.4 item 5.
-	   FIXME: it seems that we are not wait long enough, some blocks are not
-	   erased fully */
-	DoC_Command(docptr, NAND_CMD_STATUS, CDSN_CTRL_WP);
-	dummy = ReadDOC(docptr, ReadPipeInit);
-	DoC_Delay(docptr, 2);
-	if (ReadDOC(docptr, Mil_CDSN_IO) & 1) {
-		printk("Error Erasing at 0x%x\n", ofs);
-		/* There was an error
-		   FIXME: implement Bad Block Replacement (in nftl.c ??) */
-		instr->state = MTD_ERASE_FAILED;
-	} else
-		instr->state = MTD_ERASE_DONE;
-	dummy = ReadDOC(docptr, LastDataRead);
-
-	mtd_erase_callback(instr);
-
-	return 0;
-}
-
-/****************************************************************************
- *
- * Module stuff
- *
- ****************************************************************************/
-
-static void __exit cleanup_doc2001(void)
-{
-	struct mtd_info *mtd;
-	struct DiskOnChip *this;
-
-	while ((mtd=docmillist)) {
-		this = mtd->priv;
-		docmillist = this->nextdoc;
-
-		mtd_device_unregister(mtd);
-
-		iounmap(this->virtadr);
-		kfree(this->chips);
-		kfree(mtd);
-	}
-}
-
-module_exit(cleanup_doc2001);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org> et al.");
-MODULE_DESCRIPTION("Alternative driver for DiskOnChip Millennium");
diff --git a/drivers/mtd/devices/doc2001plus.c b/drivers/mtd/devices/doc2001plus.c
deleted file mode 100644
index 4f2220a..0000000
--- a/drivers/mtd/devices/doc2001plus.c
+++ /dev/null
@@ -1,1080 +0,0 @@
-/*
- * Linux driver for Disk-On-Chip Millennium Plus
- *
- * (c) 2002-2003 Greg Ungerer <gerg@snapgear.com>
- * (c) 2002-2003 SnapGear Inc
- * (c) 1999 Machine Vision Holdings, Inc.
- * (c) 1999, 2000 David Woodhouse <dwmw2@infradead.org>
- *
- * Released under GPL
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <asm/errno.h>
-#include <asm/io.h>
-#include <asm/uaccess.h>
-#include <linux/delay.h>
-#include <linux/slab.h>
-#include <linux/init.h>
-#include <linux/types.h>
-#include <linux/bitops.h>
-
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/nand.h>
-#include <linux/mtd/doc2000.h>
-
-/* #define ECC_DEBUG */
-
-/* I have no idea why some DoC chips can not use memcop_form|to_io().
- * This may be due to the different revisions of the ASIC controller built-in or
- * simplily a QA/Bug issue. Who knows ?? If you have trouble, please uncomment
- * this:*/
-#undef USE_MEMCPY
-
-static int doc_read(struct mtd_info *mtd, loff_t from, size_t len,
-		size_t *retlen, u_char *buf);
-static int doc_write(struct mtd_info *mtd, loff_t to, size_t len,
-		size_t *retlen, const u_char *buf);
-static int doc_read_oob(struct mtd_info *mtd, loff_t ofs,
-			struct mtd_oob_ops *ops);
-static int doc_write_oob(struct mtd_info *mtd, loff_t ofs,
-			 struct mtd_oob_ops *ops);
-static int doc_erase (struct mtd_info *mtd, struct erase_info *instr);
-
-static struct mtd_info *docmilpluslist = NULL;
-
-
-/* Perform the required delay cycles by writing to the NOP register */
-static void DoC_Delay(void __iomem * docptr, int cycles)
-{
-	int i;
-
-	for (i = 0; (i < cycles); i++)
-		WriteDOC(0, docptr, Mplus_NOP);
-}
-
-#define	CDSN_CTRL_FR_B_MASK	(CDSN_CTRL_FR_B0 | CDSN_CTRL_FR_B1)
-
-/* DOC_WaitReady: Wait for RDY line to be asserted by the flash chip */
-static int _DoC_WaitReady(void __iomem * docptr)
-{
-	unsigned int c = 0xffff;
-
-	pr_debug("_DoC_WaitReady called for out-of-line wait\n");
-
-	/* Out-of-line routine to wait for chip response */
-	while (((ReadDOC(docptr, Mplus_FlashControl) & CDSN_CTRL_FR_B_MASK) != CDSN_CTRL_FR_B_MASK) && --c)
-		;
-
-	if (c == 0)
-		pr_debug("_DoC_WaitReady timed out.\n");
-
-	return (c == 0);
-}
-
-static inline int DoC_WaitReady(void __iomem * docptr)
-{
-	/* This is inline, to optimise the common case, where it's ready instantly */
-	int ret = 0;
-
-	/* read form NOP register should be issued prior to the read from CDSNControl
-	   see Software Requirement 11.4 item 2. */
-	DoC_Delay(docptr, 4);
-
-	if ((ReadDOC(docptr, Mplus_FlashControl) & CDSN_CTRL_FR_B_MASK) != CDSN_CTRL_FR_B_MASK)
-		/* Call the out-of-line routine to wait */
-		ret = _DoC_WaitReady(docptr);
-
-	return ret;
-}
-
-/* For some reason the Millennium Plus seems to occasionally put itself
- * into reset mode. For me this happens randomly, with no pattern that I
- * can detect. M-systems suggest always check this on any block level
- * operation and setting to normal mode if in reset mode.
- */
-static inline void DoC_CheckASIC(void __iomem * docptr)
-{
-	/* Make sure the DoC is in normal mode */
-	if ((ReadDOC(docptr, Mplus_DOCControl) & DOC_MODE_NORMAL) == 0) {
-		WriteDOC((DOC_MODE_NORMAL | DOC_MODE_MDWREN), docptr, Mplus_DOCControl);
-		WriteDOC(~(DOC_MODE_NORMAL | DOC_MODE_MDWREN), docptr, Mplus_CtrlConfirm);
-	}
-}
-
-/* DoC_Command: Send a flash command to the flash chip through the Flash
- * command register. Need 2 Write Pipeline Terminates to complete send.
- */
-static void DoC_Command(void __iomem * docptr, unsigned char command,
-			       unsigned char xtraflags)
-{
-	WriteDOC(command, docptr, Mplus_FlashCmd);
-	WriteDOC(command, docptr, Mplus_WritePipeTerm);
-	WriteDOC(command, docptr, Mplus_WritePipeTerm);
-}
-
-/* DoC_Address: Set the current address for the flash chip through the Flash
- * Address register. Need 2 Write Pipeline Terminates to complete send.
- */
-static inline void DoC_Address(struct DiskOnChip *doc, int numbytes,
-			       unsigned long ofs, unsigned char xtraflags1,
-			       unsigned char xtraflags2)
-{
-	void __iomem * docptr = doc->virtadr;
-
-	/* Allow for possible Mill Plus internal flash interleaving */
-	ofs >>= doc->interleave;
-
-	switch (numbytes) {
-	case 1:
-		/* Send single byte, bits 0-7. */
-		WriteDOC(ofs & 0xff, docptr, Mplus_FlashAddress);
-		break;
-	case 2:
-		/* Send bits 9-16 followed by 17-23 */
-		WriteDOC((ofs >> 9)  & 0xff, docptr, Mplus_FlashAddress);
-		WriteDOC((ofs >> 17) & 0xff, docptr, Mplus_FlashAddress);
-		break;
-	case 3:
-		/* Send 0-7, 9-16, then 17-23 */
-		WriteDOC(ofs & 0xff, docptr, Mplus_FlashAddress);
-		WriteDOC((ofs >> 9)  & 0xff, docptr, Mplus_FlashAddress);
-		WriteDOC((ofs >> 17) & 0xff, docptr, Mplus_FlashAddress);
-		break;
-	default:
-		return;
-	}
-
-	WriteDOC(0x00, docptr, Mplus_WritePipeTerm);
-	WriteDOC(0x00, docptr, Mplus_WritePipeTerm);
-}
-
-/* DoC_SelectChip: Select a given flash chip within the current floor */
-static int DoC_SelectChip(void __iomem * docptr, int chip)
-{
-	/* No choice for flash chip on Millennium Plus */
-	return 0;
-}
-
-/* DoC_SelectFloor: Select a given floor (bank of flash chips) */
-static int DoC_SelectFloor(void __iomem * docptr, int floor)
-{
-	WriteDOC((floor & 0x3), docptr, Mplus_DeviceSelect);
-	return 0;
-}
-
-/*
- * Translate the given offset into the appropriate command and offset.
- * This does the mapping using the 16bit interleave layout defined by
- * M-Systems, and looks like this for a sector pair:
- *  +-----------+-------+-------+-------+--------------+---------+-----------+
- *  | 0 --- 511 |512-517|518-519|520-521| 522 --- 1033 |1034-1039|1040 - 1055|
- *  +-----------+-------+-------+-------+--------------+---------+-----------+
- *  | Data 0    | ECC 0 |Flags0 |Flags1 | Data 1       |ECC 1    | OOB 1 + 2 |
- *  +-----------+-------+-------+-------+--------------+---------+-----------+
- */
-/* FIXME: This lives in INFTL not here. Other users of flash devices
-   may not want it */
-static unsigned int DoC_GetDataOffset(struct mtd_info *mtd, loff_t *from)
-{
-	struct DiskOnChip *this = mtd->priv;
-
-	if (this->interleave) {
-		unsigned int ofs = *from & 0x3ff;
-		unsigned int cmd;
-
-		if (ofs < 512) {
-			cmd = NAND_CMD_READ0;
-			ofs &= 0x1ff;
-		} else if (ofs < 1014) {
-			cmd = NAND_CMD_READ1;
-			ofs = (ofs & 0x1ff) + 10;
-		} else {
-			cmd = NAND_CMD_READOOB;
-			ofs = ofs - 1014;
-		}
-
-		*from = (*from & ~0x3ff) | ofs;
-		return cmd;
-	} else {
-		/* No interleave */
-		if ((*from) & 0x100)
-			return NAND_CMD_READ1;
-		return NAND_CMD_READ0;
-	}
-}
-
-static unsigned int DoC_GetECCOffset(struct mtd_info *mtd, loff_t *from)
-{
-	unsigned int ofs, cmd;
-
-	if (*from & 0x200) {
-		cmd = NAND_CMD_READOOB;
-		ofs = 10 + (*from & 0xf);
-	} else {
-		cmd = NAND_CMD_READ1;
-		ofs = (*from & 0xf);
-	}
-
-	*from = (*from & ~0x3ff) | ofs;
-	return cmd;
-}
-
-static unsigned int DoC_GetFlagsOffset(struct mtd_info *mtd, loff_t *from)
-{
-	unsigned int ofs, cmd;
-
-	cmd = NAND_CMD_READ1;
-	ofs = (*from & 0x200) ? 8 : 6;
-	*from = (*from & ~0x3ff) | ofs;
-	return cmd;
-}
-
-static unsigned int DoC_GetHdrOffset(struct mtd_info *mtd, loff_t *from)
-{
-	unsigned int ofs, cmd;
-
-	cmd = NAND_CMD_READOOB;
-	ofs = (*from & 0x200) ? 24 : 16;
-	*from = (*from & ~0x3ff) | ofs;
-	return cmd;
-}
-
-static inline void MemReadDOC(void __iomem * docptr, unsigned char *buf, int len)
-{
-#ifndef USE_MEMCPY
-	int i;
-	for (i = 0; i < len; i++)
-		buf[i] = ReadDOC(docptr, Mil_CDSN_IO + i);
-#else
-	memcpy_fromio(buf, docptr + DoC_Mil_CDSN_IO, len);
-#endif
-}
-
-static inline void MemWriteDOC(void __iomem * docptr, unsigned char *buf, int len)
-{
-#ifndef USE_MEMCPY
-	int i;
-	for (i = 0; i < len; i++)
-		WriteDOC(buf[i], docptr, Mil_CDSN_IO + i);
-#else
-	memcpy_toio(docptr + DoC_Mil_CDSN_IO, buf, len);
-#endif
-}
-
-/* DoC_IdentChip: Identify a given NAND chip given {floor,chip} */
-static int DoC_IdentChip(struct DiskOnChip *doc, int floor, int chip)
-{
-	int mfr, id, i, j;
-	volatile char dummy;
-	void __iomem * docptr = doc->virtadr;
-
-	/* Page in the required floor/chip */
-	DoC_SelectFloor(docptr, floor);
-	DoC_SelectChip(docptr, chip);
-
-	/* Millennium Plus bus cycle sequence as per figure 2, section 2.4 */
-	WriteDOC((DOC_FLASH_CE | DOC_FLASH_WP), docptr, Mplus_FlashSelect);
-
-	/* Reset the chip, see Software Requirement 11.4 item 1. */
-	DoC_Command(docptr, NAND_CMD_RESET, 0);
-	DoC_WaitReady(docptr);
-
-	/* Read the NAND chip ID: 1. Send ReadID command */
-	DoC_Command(docptr, NAND_CMD_READID, 0);
-
-	/* Read the NAND chip ID: 2. Send address byte zero */
-	DoC_Address(doc, 1, 0x00, 0, 0x00);
-
-	WriteDOC(0, docptr, Mplus_FlashControl);
-	DoC_WaitReady(docptr);
-
-	/* Read the manufacturer and device id codes of the flash device through
-	   CDSN IO register see Software Requirement 11.4 item 5.*/
-	dummy = ReadDOC(docptr, Mplus_ReadPipeInit);
-	dummy = ReadDOC(docptr, Mplus_ReadPipeInit);
-
-	mfr = ReadDOC(docptr, Mil_CDSN_IO);
-	if (doc->interleave)
-		dummy = ReadDOC(docptr, Mil_CDSN_IO); /* 2 way interleave */
-
-	id  = ReadDOC(docptr, Mil_CDSN_IO);
-	if (doc->interleave)
-		dummy = ReadDOC(docptr, Mil_CDSN_IO); /* 2 way interleave */
-
-	dummy = ReadDOC(docptr, Mplus_LastDataRead);
-	dummy = ReadDOC(docptr, Mplus_LastDataRead);
-
-	/* Disable flash internally */
-	WriteDOC(0, docptr, Mplus_FlashSelect);
-
-	/* No response - return failure */
-	if (mfr == 0xff || mfr == 0)
-		return 0;
-
-	for (i = 0; nand_flash_ids[i].name != NULL; i++) {
-		if (id == nand_flash_ids[i].id) {
-			/* Try to identify manufacturer */
-			for (j = 0; nand_manuf_ids[j].id != 0x0; j++) {
-				if (nand_manuf_ids[j].id == mfr)
-					break;
-			}
-			printk(KERN_INFO "Flash chip found: Manufacturer ID: %2.2X, "
-			       "Chip ID: %2.2X (%s:%s)\n", mfr, id,
-			       nand_manuf_ids[j].name, nand_flash_ids[i].name);
-			doc->mfr = mfr;
-			doc->id = id;
-			doc->chipshift = ffs((nand_flash_ids[i].chipsize << 20)) - 1;
-			doc->erasesize = nand_flash_ids[i].erasesize << doc->interleave;
-			break;
-		}
-	}
-
-	if (nand_flash_ids[i].name == NULL)
-		return 0;
-	return 1;
-}
-
-/* DoC_ScanChips: Find all NAND chips present in a DiskOnChip, and identify them */
-static void DoC_ScanChips(struct DiskOnChip *this)
-{
-	int floor, chip;
-	int numchips[MAX_FLOORS_MPLUS];
-	int ret;
-
-	this->numchips = 0;
-	this->mfr = 0;
-	this->id = 0;
-
-	/* Work out the intended interleave setting */
-	this->interleave = 0;
-	if (this->ChipID == DOC_ChipID_DocMilPlus32)
-		this->interleave = 1;
-
-	/* Check the ASIC agrees */
-	if ( (this->interleave << 2) !=
-	     (ReadDOC(this->virtadr, Mplus_Configuration) & 4)) {
-		u_char conf = ReadDOC(this->virtadr, Mplus_Configuration);
-		printk(KERN_NOTICE "Setting DiskOnChip Millennium Plus interleave to %s\n",
-		       this->interleave?"on (16-bit)":"off (8-bit)");
-		conf ^= 4;
-		WriteDOC(conf, this->virtadr, Mplus_Configuration);
-	}
-
-	/* For each floor, find the number of valid chips it contains */
-	for (floor = 0,ret = 1; floor < MAX_FLOORS_MPLUS; floor++) {
-		numchips[floor] = 0;
-		for (chip = 0; chip < MAX_CHIPS_MPLUS && ret != 0; chip++) {
-			ret = DoC_IdentChip(this, floor, chip);
-			if (ret) {
-				numchips[floor]++;
-				this->numchips++;
-			}
-		}
-	}
-	/* If there are none at all that we recognise, bail */
-	if (!this->numchips) {
-		printk("No flash chips recognised.\n");
-		return;
-	}
-
-	/* Allocate an array to hold the information for each chip */
-	this->chips = kmalloc(sizeof(struct Nand) * this->numchips, GFP_KERNEL);
-	if (!this->chips){
-		printk("MTD: No memory for allocating chip info structures\n");
-		return;
-	}
-
-	/* Fill out the chip array with {floor, chipno} for each
-	 * detected chip in the device. */
-	for (floor = 0, ret = 0; floor < MAX_FLOORS_MPLUS; floor++) {
-		for (chip = 0 ; chip < numchips[floor] ; chip++) {
-			this->chips[ret].floor = floor;
-			this->chips[ret].chip = chip;
-			this->chips[ret].curadr = 0;
-			this->chips[ret].curmode = 0x50;
-			ret++;
-		}
-	}
-
-	/* Calculate and print the total size of the device */
-	this->totlen = this->numchips * (1 << this->chipshift);
-	printk(KERN_INFO "%d flash chips found. Total DiskOnChip size: %ld MiB\n",
-	       this->numchips ,this->totlen >> 20);
-}
-
-static int DoCMilPlus_is_alias(struct DiskOnChip *doc1, struct DiskOnChip *doc2)
-{
-	int tmp1, tmp2, retval;
-
-	if (doc1->physadr == doc2->physadr)
-		return 1;
-
-	/* Use the alias resolution register which was set aside for this
-	 * purpose. If it's value is the same on both chips, they might
-	 * be the same chip, and we write to one and check for a change in
-	 * the other. It's unclear if this register is usuable in the
-	 * DoC 2000 (it's in the Millennium docs), but it seems to work. */
-	tmp1 = ReadDOC(doc1->virtadr, Mplus_AliasResolution);
-	tmp2 = ReadDOC(doc2->virtadr, Mplus_AliasResolution);
-	if (tmp1 != tmp2)
-		return 0;
-
-	WriteDOC((tmp1+1) % 0xff, doc1->virtadr, Mplus_AliasResolution);
-	tmp2 = ReadDOC(doc2->virtadr, Mplus_AliasResolution);
-	if (tmp2 == (tmp1+1) % 0xff)
-		retval = 1;
-	else
-		retval = 0;
-
-	/* Restore register contents.  May not be necessary, but do it just to
-	 * be safe. */
-	WriteDOC(tmp1, doc1->virtadr, Mplus_AliasResolution);
-
-	return retval;
-}
-
-/* This routine is found from the docprobe code by symbol_get(),
- * which will bump the use count of this module. */
-void DoCMilPlus_init(struct mtd_info *mtd)
-{
-	struct DiskOnChip *this = mtd->priv;
-	struct DiskOnChip *old = NULL;
-
-	/* We must avoid being called twice for the same device. */
-	if (docmilpluslist)
-		old = docmilpluslist->priv;
-
-	while (old) {
-		if (DoCMilPlus_is_alias(this, old)) {
-			printk(KERN_NOTICE "Ignoring DiskOnChip Millennium "
-				"Plus at 0x%lX - already configured\n",
-				this->physadr);
-			iounmap(this->virtadr);
-			kfree(mtd);
-			return;
-		}
-		if (old->nextdoc)
-			old = old->nextdoc->priv;
-		else
-			old = NULL;
-	}
-
-	mtd->name = "DiskOnChip Millennium Plus";
-	printk(KERN_NOTICE "DiskOnChip Millennium Plus found at "
-		"address 0x%lX\n", this->physadr);
-
-	mtd->type = MTD_NANDFLASH;
-	mtd->flags = MTD_CAP_NANDFLASH;
-	mtd->writebufsize = mtd->writesize = 512;
-	mtd->oobsize = 16;
-	mtd->ecc_strength = 2;
-	mtd->owner = THIS_MODULE;
-	mtd->_erase = doc_erase;
-	mtd->_read = doc_read;
-	mtd->_write = doc_write;
-	mtd->_read_oob = doc_read_oob;
-	mtd->_write_oob = doc_write_oob;
-	this->curfloor = -1;
-	this->curchip = -1;
-
-	/* Ident all the chips present. */
-	DoC_ScanChips(this);
-
-	if (!this->totlen) {
-		kfree(mtd);
-		iounmap(this->virtadr);
-	} else {
-		this->nextdoc = docmilpluslist;
-		docmilpluslist = mtd;
-		mtd->size  = this->totlen;
-		mtd->erasesize = this->erasesize;
-		mtd_device_register(mtd, NULL, 0);
-		return;
-	}
-}
-EXPORT_SYMBOL_GPL(DoCMilPlus_init);
-
-#if 0
-static int doc_dumpblk(struct mtd_info *mtd, loff_t from)
-{
-	int i;
-	loff_t fofs;
-	struct DiskOnChip *this = mtd->priv;
-	void __iomem * docptr = this->virtadr;
-	struct Nand *mychip = &this->chips[from >> (this->chipshift)];
-	unsigned char *bp, buf[1056];
-	char c[32];
-
-	from &= ~0x3ff;
-
-	/* Don't allow read past end of device */
-	if (from >= this->totlen)
-		return -EINVAL;
-
-	DoC_CheckASIC(docptr);
-
-	/* Find the chip which is to be used and select it */
-	if (this->curfloor != mychip->floor) {
-		DoC_SelectFloor(docptr, mychip->floor);
-		DoC_SelectChip(docptr, mychip->chip);
-	} else if (this->curchip != mychip->chip) {
-		DoC_SelectChip(docptr, mychip->chip);
-	}
-	this->curfloor = mychip->floor;
-	this->curchip = mychip->chip;
-
-	/* Millennium Plus bus cycle sequence as per figure 2, section 2.4 */
-	WriteDOC((DOC_FLASH_CE | DOC_FLASH_WP), docptr, Mplus_FlashSelect);
-
-	/* Reset the chip, see Software Requirement 11.4 item 1. */
-	DoC_Command(docptr, NAND_CMD_RESET, 0);
-	DoC_WaitReady(docptr);
-
-	fofs = from;
-	DoC_Command(docptr, DoC_GetDataOffset(mtd, &fofs), 0);
-	DoC_Address(this, 3, fofs, 0, 0x00);
-	WriteDOC(0, docptr, Mplus_FlashControl);
-	DoC_WaitReady(docptr);
-
-	/* disable the ECC engine */
-	WriteDOC(DOC_ECC_RESET, docptr, Mplus_ECCConf);
-
-	ReadDOC(docptr, Mplus_ReadPipeInit);
-	ReadDOC(docptr, Mplus_ReadPipeInit);
-
-	/* Read the data via the internal pipeline through CDSN IO
-	   register, see Pipelined Read Operations 11.3 */
-	MemReadDOC(docptr, buf, 1054);
-	buf[1054] = ReadDOC(docptr, Mplus_LastDataRead);
-	buf[1055] = ReadDOC(docptr, Mplus_LastDataRead);
-
-	memset(&c[0], 0, sizeof(c));
-	printk("DUMP OFFSET=%x:\n", (int)from);
-
-        for (i = 0, bp = &buf[0]; (i < 1056); i++) {
-                if ((i % 16) == 0)
-                        printk("%08x: ", i);
-                printk(" %02x", *bp);
-                c[(i & 0xf)] = ((*bp >= 0x20) && (*bp <= 0x7f)) ? *bp : '.';
-                bp++;
-                if (((i + 1) % 16) == 0)
-                        printk("    %s\n", c);
-        }
-	printk("\n");
-
-	/* Disable flash internally */
-	WriteDOC(0, docptr, Mplus_FlashSelect);
-
-	return 0;
-}
-#endif
-
-static int doc_read(struct mtd_info *mtd, loff_t from, size_t len,
-		    size_t *retlen, u_char *buf)
-{
-	int ret, i;
-	volatile char dummy;
-	loff_t fofs;
-	unsigned char syndrome[6], eccbuf[6];
-	struct DiskOnChip *this = mtd->priv;
-	void __iomem * docptr = this->virtadr;
-	struct Nand *mychip = &this->chips[from >> (this->chipshift)];
-
-	/* Don't allow a single read to cross a 512-byte block boundary */
-	if (from + len > ((from | 0x1ff) + 1))
-		len = ((from | 0x1ff) + 1) - from;
-
-	DoC_CheckASIC(docptr);
-
-	/* Find the chip which is to be used and select it */
-	if (this->curfloor != mychip->floor) {
-		DoC_SelectFloor(docptr, mychip->floor);
-		DoC_SelectChip(docptr, mychip->chip);
-	} else if (this->curchip != mychip->chip) {
-		DoC_SelectChip(docptr, mychip->chip);
-	}
-	this->curfloor = mychip->floor;
-	this->curchip = mychip->chip;
-
-	/* Millennium Plus bus cycle sequence as per figure 2, section 2.4 */
-	WriteDOC((DOC_FLASH_CE | DOC_FLASH_WP), docptr, Mplus_FlashSelect);
-
-	/* Reset the chip, see Software Requirement 11.4 item 1. */
-	DoC_Command(docptr, NAND_CMD_RESET, 0);
-	DoC_WaitReady(docptr);
-
-	fofs = from;
-	DoC_Command(docptr, DoC_GetDataOffset(mtd, &fofs), 0);
-	DoC_Address(this, 3, fofs, 0, 0x00);
-	WriteDOC(0, docptr, Mplus_FlashControl);
-	DoC_WaitReady(docptr);
-
-	/* init the ECC engine, see Reed-Solomon EDC/ECC 11.1 .*/
-	WriteDOC(DOC_ECC_RESET, docptr, Mplus_ECCConf);
-	WriteDOC(DOC_ECC_EN, docptr, Mplus_ECCConf);
-
-	/* Let the caller know we completed it */
-	*retlen = len;
-	ret = 0;
-
-	ReadDOC(docptr, Mplus_ReadPipeInit);
-	ReadDOC(docptr, Mplus_ReadPipeInit);
-
-	/* Read the data via the internal pipeline through CDSN IO
-	   register, see Pipelined Read Operations 11.3 */
-	MemReadDOC(docptr, buf, len);
-
-	/* Read the ECC data following raw data */
-	MemReadDOC(docptr, eccbuf, 4);
-	eccbuf[4] = ReadDOC(docptr, Mplus_LastDataRead);
-	eccbuf[5] = ReadDOC(docptr, Mplus_LastDataRead);
-
-	/* Flush the pipeline */
-	dummy = ReadDOC(docptr, Mplus_ECCConf);
-	dummy = ReadDOC(docptr, Mplus_ECCConf);
-
-	/* Check the ECC Status */
-	if (ReadDOC(docptr, Mplus_ECCConf) & 0x80) {
-		int nb_errors;
-		/* There was an ECC error */
-#ifdef ECC_DEBUG
-		printk("DiskOnChip ECC Error: Read at %lx\n", (long)from);
-#endif
-		/* Read the ECC syndrome through the DiskOnChip ECC logic.
-		   These syndrome will be all ZERO when there is no error */
-		for (i = 0; i < 6; i++)
-			syndrome[i] = ReadDOC(docptr, Mplus_ECCSyndrome0 + i);
-
-		nb_errors = doc_decode_ecc(buf, syndrome);
-#ifdef ECC_DEBUG
-		printk("ECC Errors corrected: %x\n", nb_errors);
-#endif
-		if (nb_errors < 0) {
-			/* We return error, but have actually done the
-			   read. Not that this can be told to user-space, via
-			   sys_read(), but at least MTD-aware stuff can know
-			   about it by checking *retlen */
-#ifdef ECC_DEBUG
-			printk("%s(%d): Millennium Plus ECC error (from=0x%x:\n",
-				__FILE__, __LINE__, (int)from);
-			printk("        syndrome= %*phC\n", 6, syndrome);
-			printk("        eccbuf= %*phC\n", 6, eccbuf);
-#endif
-				ret = -EIO;
-		}
-	}
-
-#ifdef PSYCHO_DEBUG
-	printk("ECC DATA at %lx: %*ph\n", (long)from, 6, eccbuf);
-#endif
-	/* disable the ECC engine */
-	WriteDOC(DOC_ECC_DIS, docptr , Mplus_ECCConf);
-
-	/* Disable flash internally */
-	WriteDOC(0, docptr, Mplus_FlashSelect);
-
-	return ret;
-}
-
-static int doc_write(struct mtd_info *mtd, loff_t to, size_t len,
-		     size_t *retlen, const u_char *buf)
-{
-	int i, before, ret = 0;
-	loff_t fto;
-	volatile char dummy;
-	char eccbuf[6];
-	struct DiskOnChip *this = mtd->priv;
-	void __iomem * docptr = this->virtadr;
-	struct Nand *mychip = &this->chips[to >> (this->chipshift)];
-
-	/* Don't allow writes which aren't exactly one block (512 bytes) */
-	if ((to & 0x1ff) || (len != 0x200))
-		return -EINVAL;
-
-	/* Determine position of OOB flags, before or after data */
-	before = (this->interleave && (to & 0x200));
-
-	DoC_CheckASIC(docptr);
-
-	/* Find the chip which is to be used and select it */
-	if (this->curfloor != mychip->floor) {
-		DoC_SelectFloor(docptr, mychip->floor);
-		DoC_SelectChip(docptr, mychip->chip);
-	} else if (this->curchip != mychip->chip) {
-		DoC_SelectChip(docptr, mychip->chip);
-	}
-	this->curfloor = mychip->floor;
-	this->curchip = mychip->chip;
-
-	/* Millennium Plus bus cycle sequence as per figure 2, section 2.4 */
-	WriteDOC(DOC_FLASH_CE, docptr, Mplus_FlashSelect);
-
-	/* Reset the chip, see Software Requirement 11.4 item 1. */
-	DoC_Command(docptr, NAND_CMD_RESET, 0);
-	DoC_WaitReady(docptr);
-
-	/* Set device to appropriate plane of flash */
-	fto = to;
-	WriteDOC(DoC_GetDataOffset(mtd, &fto), docptr, Mplus_FlashCmd);
-
-	/* On interleaved devices the flags for 2nd half 512 are before data */
-	if (before)
-		fto -= 2;
-
-	/* issue the Serial Data In command to initial the Page Program process */
-	DoC_Command(docptr, NAND_CMD_SEQIN, 0x00);
-	DoC_Address(this, 3, fto, 0x00, 0x00);
-
-	/* Disable the ECC engine */
-	WriteDOC(DOC_ECC_RESET, docptr, Mplus_ECCConf);
-
-	if (before) {
-		/* Write the block status BLOCK_USED (0x5555) */
-		WriteDOC(0x55, docptr, Mil_CDSN_IO);
-		WriteDOC(0x55, docptr, Mil_CDSN_IO);
-	}
-
-	/* init the ECC engine, see Reed-Solomon EDC/ECC 11.1 .*/
-	WriteDOC(DOC_ECC_EN | DOC_ECC_RW, docptr, Mplus_ECCConf);
-
-	MemWriteDOC(docptr, (unsigned char *) buf, len);
-
-	/* Write ECC data to flash, the ECC info is generated by
-	   the DiskOnChip ECC logic see Reed-Solomon EDC/ECC 11.1 */
-	DoC_Delay(docptr, 3);
-
-	/* Read the ECC data through the DiskOnChip ECC logic */
-	for (i = 0; i < 6; i++)
-		eccbuf[i] = ReadDOC(docptr, Mplus_ECCSyndrome0 + i);
-
-	/* disable the ECC engine */
-	WriteDOC(DOC_ECC_DIS, docptr, Mplus_ECCConf);
-
-	/* Write the ECC data to flash */
-	MemWriteDOC(docptr, eccbuf, 6);
-
-	if (!before) {
-		/* Write the block status BLOCK_USED (0x5555) */
-		WriteDOC(0x55, docptr, Mil_CDSN_IO+6);
-		WriteDOC(0x55, docptr, Mil_CDSN_IO+7);
-	}
-
-#ifdef PSYCHO_DEBUG
-	printk("OOB data at %lx is %2.2X %2.2X %2.2X %2.2X %2.2X %2.2X\n",
-	       (long) to, eccbuf[0], eccbuf[1], eccbuf[2], eccbuf[3],
-	       eccbuf[4], eccbuf[5]);
-#endif
-
-	WriteDOC(0x00, docptr, Mplus_WritePipeTerm);
-	WriteDOC(0x00, docptr, Mplus_WritePipeTerm);
-
-	/* Commit the Page Program command and wait for ready
-	   see Software Requirement 11.4 item 1.*/
-	DoC_Command(docptr, NAND_CMD_PAGEPROG, 0x00);
-	DoC_WaitReady(docptr);
-
-	/* Read the status of the flash device through CDSN IO register
-	   see Software Requirement 11.4 item 5.*/
-	DoC_Command(docptr, NAND_CMD_STATUS, 0);
-	dummy = ReadDOC(docptr, Mplus_ReadPipeInit);
-	dummy = ReadDOC(docptr, Mplus_ReadPipeInit);
-	DoC_Delay(docptr, 2);
-	if ((dummy = ReadDOC(docptr, Mplus_LastDataRead)) & 1) {
-		printk("MTD: Error 0x%x programming at 0x%x\n", dummy, (int)to);
-		/* Error in programming
-		   FIXME: implement Bad Block Replacement (in nftl.c ??) */
-		ret = -EIO;
-	}
-	dummy = ReadDOC(docptr, Mplus_LastDataRead);
-
-	/* Disable flash internally */
-	WriteDOC(0, docptr, Mplus_FlashSelect);
-
-	/* Let the caller know we completed it */
-	*retlen = len;
-
-	return ret;
-}
-
-static int doc_read_oob(struct mtd_info *mtd, loff_t ofs,
-			struct mtd_oob_ops *ops)
-{
-	loff_t fofs, base;
-	struct DiskOnChip *this = mtd->priv;
-	void __iomem * docptr = this->virtadr;
-	struct Nand *mychip = &this->chips[ofs >> this->chipshift];
-	size_t i, size, got, want;
-	uint8_t *buf = ops->oobbuf;
-	size_t len = ops->len;
-
-	BUG_ON(ops->mode != MTD_OPS_PLACE_OOB);
-
-	ofs += ops->ooboffs;
-
-	DoC_CheckASIC(docptr);
-
-	/* Find the chip which is to be used and select it */
-	if (this->curfloor != mychip->floor) {
-		DoC_SelectFloor(docptr, mychip->floor);
-		DoC_SelectChip(docptr, mychip->chip);
-	} else if (this->curchip != mychip->chip) {
-		DoC_SelectChip(docptr, mychip->chip);
-	}
-	this->curfloor = mychip->floor;
-	this->curchip = mychip->chip;
-
-	/* Millennium Plus bus cycle sequence as per figure 2, section 2.4 */
-	WriteDOC((DOC_FLASH_CE | DOC_FLASH_WP), docptr, Mplus_FlashSelect);
-
-	/* disable the ECC engine */
-	WriteDOC(DOC_ECC_RESET, docptr, Mplus_ECCConf);
-	DoC_WaitReady(docptr);
-
-	/* Maximum of 16 bytes in the OOB region, so limit read to that */
-	if (len > 16)
-		len = 16;
-	got = 0;
-	want = len;
-
-	for (i = 0; ((i < 3) && (want > 0)); i++) {
-		/* Figure out which region we are accessing... */
-		fofs = ofs;
-		base = ofs & 0xf;
-		if (!this->interleave) {
-			DoC_Command(docptr, NAND_CMD_READOOB, 0);
-			size = 16 - base;
-		} else if (base < 6) {
-			DoC_Command(docptr, DoC_GetECCOffset(mtd, &fofs), 0);
-			size = 6 - base;
-		} else if (base < 8) {
-			DoC_Command(docptr, DoC_GetFlagsOffset(mtd, &fofs), 0);
-			size = 8 - base;
-		} else {
-			DoC_Command(docptr, DoC_GetHdrOffset(mtd, &fofs), 0);
-			size = 16 - base;
-		}
-		if (size > want)
-			size = want;
-
-		/* Issue read command */
-		DoC_Address(this, 3, fofs, 0, 0x00);
-		WriteDOC(0, docptr, Mplus_FlashControl);
-		DoC_WaitReady(docptr);
-
-		ReadDOC(docptr, Mplus_ReadPipeInit);
-		ReadDOC(docptr, Mplus_ReadPipeInit);
-		MemReadDOC(docptr, &buf[got], size - 2);
-		buf[got + size - 2] = ReadDOC(docptr, Mplus_LastDataRead);
-		buf[got + size - 1] = ReadDOC(docptr, Mplus_LastDataRead);
-
-		ofs += size;
-		got += size;
-		want -= size;
-	}
-
-	/* Disable flash internally */
-	WriteDOC(0, docptr, Mplus_FlashSelect);
-
-	ops->retlen = len;
-	return 0;
-}
-
-static int doc_write_oob(struct mtd_info *mtd, loff_t ofs,
-			 struct mtd_oob_ops *ops)
-{
-	volatile char dummy;
-	loff_t fofs, base;
-	struct DiskOnChip *this = mtd->priv;
-	void __iomem * docptr = this->virtadr;
-	struct Nand *mychip = &this->chips[ofs >> this->chipshift];
-	size_t i, size, got, want;
-	int ret = 0;
-	uint8_t *buf = ops->oobbuf;
-	size_t len = ops->len;
-
-	BUG_ON(ops->mode != MTD_OPS_PLACE_OOB);
-
-	ofs += ops->ooboffs;
-
-	DoC_CheckASIC(docptr);
-
-	/* Find the chip which is to be used and select it */
-	if (this->curfloor != mychip->floor) {
-		DoC_SelectFloor(docptr, mychip->floor);
-		DoC_SelectChip(docptr, mychip->chip);
-	} else if (this->curchip != mychip->chip) {
-		DoC_SelectChip(docptr, mychip->chip);
-	}
-	this->curfloor = mychip->floor;
-	this->curchip = mychip->chip;
-
-	/* Millennium Plus bus cycle sequence as per figure 2, section 2.4 */
-	WriteDOC(DOC_FLASH_CE, docptr, Mplus_FlashSelect);
-
-
-	/* Maximum of 16 bytes in the OOB region, so limit write to that */
-	if (len > 16)
-		len = 16;
-	got = 0;
-	want = len;
-
-	for (i = 0; ((i < 3) && (want > 0)); i++) {
-		/* Reset the chip, see Software Requirement 11.4 item 1. */
-		DoC_Command(docptr, NAND_CMD_RESET, 0);
-		DoC_WaitReady(docptr);
-
-		/* Figure out which region we are accessing... */
-		fofs = ofs;
-		base = ofs & 0x0f;
-		if (!this->interleave) {
-			WriteDOC(NAND_CMD_READOOB, docptr, Mplus_FlashCmd);
-			size = 16 - base;
-		} else if (base < 6) {
-			WriteDOC(DoC_GetECCOffset(mtd, &fofs), docptr, Mplus_FlashCmd);
-			size = 6 - base;
-		} else if (base < 8) {
-			WriteDOC(DoC_GetFlagsOffset(mtd, &fofs), docptr, Mplus_FlashCmd);
-			size = 8 - base;
-		} else {
-			WriteDOC(DoC_GetHdrOffset(mtd, &fofs), docptr, Mplus_FlashCmd);
-			size = 16 - base;
-		}
-		if (size > want)
-			size = want;
-
-		/* Issue the Serial Data In command to initial the Page Program process */
-		DoC_Command(docptr, NAND_CMD_SEQIN, 0x00);
-		DoC_Address(this, 3, fofs, 0, 0x00);
-
-		/* Disable the ECC engine */
-		WriteDOC(DOC_ECC_RESET, docptr, Mplus_ECCConf);
-
-		/* Write the data via the internal pipeline through CDSN IO
-		   register, see Pipelined Write Operations 11.2 */
-		MemWriteDOC(docptr, (unsigned char *) &buf[got], size);
-		WriteDOC(0x00, docptr, Mplus_WritePipeTerm);
-		WriteDOC(0x00, docptr, Mplus_WritePipeTerm);
-
-		/* Commit the Page Program command and wait for ready
-	 	   see Software Requirement 11.4 item 1.*/
-		DoC_Command(docptr, NAND_CMD_PAGEPROG, 0x00);
-		DoC_WaitReady(docptr);
-
-		/* Read the status of the flash device through CDSN IO register
-		   see Software Requirement 11.4 item 5.*/
-		DoC_Command(docptr, NAND_CMD_STATUS, 0x00);
-		dummy = ReadDOC(docptr, Mplus_ReadPipeInit);
-		dummy = ReadDOC(docptr, Mplus_ReadPipeInit);
-		DoC_Delay(docptr, 2);
-		if ((dummy = ReadDOC(docptr, Mplus_LastDataRead)) & 1) {
-			printk("MTD: Error 0x%x programming oob at 0x%x\n",
-				dummy, (int)ofs);
-			/* FIXME: implement Bad Block Replacement */
-			ops->retlen = 0;
-			ret = -EIO;
-		}
-		dummy = ReadDOC(docptr, Mplus_LastDataRead);
-
-		ofs += size;
-		got += size;
-		want -= size;
-	}
-
-	/* Disable flash internally */
-	WriteDOC(0, docptr, Mplus_FlashSelect);
-
-	ops->retlen = len;
-	return ret;
-}
-
-int doc_erase(struct mtd_info *mtd, struct erase_info *instr)
-{
-	volatile char dummy;
-	struct DiskOnChip *this = mtd->priv;
-	__u32 ofs = instr->addr;
-	__u32 len = instr->len;
-	void __iomem * docptr = this->virtadr;
-	struct Nand *mychip = &this->chips[ofs >> this->chipshift];
-
-	DoC_CheckASIC(docptr);
-
-	if (len != mtd->erasesize)
-		printk(KERN_WARNING "MTD: Erase not right size (%x != %x)n",
-		       len, mtd->erasesize);
-
-	/* Find the chip which is to be used and select it */
-	if (this->curfloor != mychip->floor) {
-		DoC_SelectFloor(docptr, mychip->floor);
-		DoC_SelectChip(docptr, mychip->chip);
-	} else if (this->curchip != mychip->chip) {
-		DoC_SelectChip(docptr, mychip->chip);
-	}
-	this->curfloor = mychip->floor;
-	this->curchip = mychip->chip;
-
-	instr->state = MTD_ERASE_PENDING;
-
-	/* Millennium Plus bus cycle sequence as per figure 2, section 2.4 */
-	WriteDOC(DOC_FLASH_CE, docptr, Mplus_FlashSelect);
-
-	DoC_Command(docptr, NAND_CMD_RESET, 0x00);
-	DoC_WaitReady(docptr);
-
-	DoC_Command(docptr, NAND_CMD_ERASE1, 0);
-	DoC_Address(this, 2, ofs, 0, 0x00);
-	DoC_Command(docptr, NAND_CMD_ERASE2, 0);
-	DoC_WaitReady(docptr);
-	instr->state = MTD_ERASING;
-
-	/* Read the status of the flash device through CDSN IO register
-	   see Software Requirement 11.4 item 5. */
-	DoC_Command(docptr, NAND_CMD_STATUS, 0);
-	dummy = ReadDOC(docptr, Mplus_ReadPipeInit);
-	dummy = ReadDOC(docptr, Mplus_ReadPipeInit);
-	if ((dummy = ReadDOC(docptr, Mplus_LastDataRead)) & 1) {
-		printk("MTD: Error 0x%x erasing at 0x%x\n", dummy, ofs);
-		/* FIXME: implement Bad Block Replacement (in nftl.c ??) */
-		instr->state = MTD_ERASE_FAILED;
-	} else {
-		instr->state = MTD_ERASE_DONE;
-	}
-	dummy = ReadDOC(docptr, Mplus_LastDataRead);
-
-	/* Disable flash internally */
-	WriteDOC(0, docptr, Mplus_FlashSelect);
-
-	mtd_erase_callback(instr);
-
-	return 0;
-}
-
-/****************************************************************************
- *
- * Module stuff
- *
- ****************************************************************************/
-
-static void __exit cleanup_doc2001plus(void)
-{
-	struct mtd_info *mtd;
-	struct DiskOnChip *this;
-
-	while ((mtd=docmilpluslist)) {
-		this = mtd->priv;
-		docmilpluslist = this->nextdoc;
-
-		mtd_device_unregister(mtd);
-
-		iounmap(this->virtadr);
-		kfree(this->chips);
-		kfree(mtd);
-	}
-}
-
-module_exit(cleanup_doc2001plus);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Greg Ungerer <gerg@snapgear.com> et al.");
-MODULE_DESCRIPTION("Driver for DiskOnChip Millennium Plus");
diff --git a/drivers/mtd/devices/docecc.c b/drivers/mtd/devices/docecc.c
deleted file mode 100644
index 4a1c39b..0000000
--- a/drivers/mtd/devices/docecc.c
+++ /dev/null
@@ -1,521 +0,0 @@
-/*
- * ECC algorithm for M-systems disk on chip. We use the excellent Reed
- * Solmon code of Phil Karn (karn@ka9q.ampr.org) available under the
- * GNU GPL License. The rest is simply to convert the disk on chip
- * syndrome into a standard syndome.
- *
- * Author: Fabrice Bellard (fabrice.bellard@netgem.com)
- * Copyright (C) 2000 Netgem S.A.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
- */
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <asm/errno.h>
-#include <asm/io.h>
-#include <asm/uaccess.h>
-#include <linux/delay.h>
-#include <linux/slab.h>
-#include <linux/init.h>
-#include <linux/types.h>
-
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/doc2000.h>
-
-#define DEBUG_ECC 0
-/* need to undef it (from asm/termbits.h) */
-#undef B0
-
-#define MM 10 /* Symbol size in bits */
-#define KK (1023-4) /* Number of data symbols per block */
-#define B0 510 /* First root of generator polynomial, alpha form */
-#define PRIM 1 /* power of alpha used to generate roots of generator poly */
-#define	NN ((1 << MM) - 1)
-
-typedef unsigned short dtype;
-
-/* 1+x^3+x^10 */
-static const int Pp[MM+1] = { 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1 };
-
-/* This defines the type used to store an element of the Galois Field
- * used by the code. Make sure this is something larger than a char if
- * if anything larger than GF(256) is used.
- *
- * Note: unsigned char will work up to GF(256) but int seems to run
- * faster on the Pentium.
- */
-typedef int gf;
-
-/* No legal value in index form represents zero, so
- * we need a special value for this purpose
- */
-#define A0	(NN)
-
-/* Compute x % NN, where NN is 2**MM - 1,
- * without a slow divide
- */
-static inline gf
-modnn(int x)
-{
-  while (x >= NN) {
-    x -= NN;
-    x = (x >> MM) + (x & NN);
-  }
-  return x;
-}
-
-#define	CLEAR(a,n) {\
-int ci;\
-for(ci=(n)-1;ci >=0;ci--)\
-(a)[ci] = 0;\
-}
-
-#define	COPY(a,b,n) {\
-int ci;\
-for(ci=(n)-1;ci >=0;ci--)\
-(a)[ci] = (b)[ci];\
-}
-
-#define	COPYDOWN(a,b,n) {\
-int ci;\
-for(ci=(n)-1;ci >=0;ci--)\
-(a)[ci] = (b)[ci];\
-}
-
-#define Ldec 1
-
-/* generate GF(2**m) from the irreducible polynomial p(X) in Pp[0]..Pp[m]
-   lookup tables:  index->polynomial form   alpha_to[] contains j=alpha**i;
-                   polynomial form -> index form  index_of[j=alpha**i] = i
-   alpha=2 is the primitive element of GF(2**m)
-   HARI's COMMENT: (4/13/94) alpha_to[] can be used as follows:
-        Let @ represent the primitive element commonly called "alpha" that
-   is the root of the primitive polynomial p(x). Then in GF(2^m), for any
-   0 <= i <= 2^m-2,
-        @^i = a(0) + a(1) @ + a(2) @^2 + ... + a(m-1) @^(m-1)
-   where the binary vector (a(0),a(1),a(2),...,a(m-1)) is the representation
-   of the integer "alpha_to[i]" with a(0) being the LSB and a(m-1) the MSB. Thus for
-   example the polynomial representation of @^5 would be given by the binary
-   representation of the integer "alpha_to[5]".
-                   Similarly, index_of[] can be used as follows:
-        As above, let @ represent the primitive element of GF(2^m) that is
-   the root of the primitive polynomial p(x). In order to find the power
-   of @ (alpha) that has the polynomial representation
-        a(0) + a(1) @ + a(2) @^2 + ... + a(m-1) @^(m-1)
-   we consider the integer "i" whose binary representation with a(0) being LSB
-   and a(m-1) MSB is (a(0),a(1),...,a(m-1)) and locate the entry
-   "index_of[i]". Now, @^index_of[i] is that element whose polynomial
-    representation is (a(0),a(1),a(2),...,a(m-1)).
-   NOTE:
-        The element alpha_to[2^m-1] = 0 always signifying that the
-   representation of "@^infinity" = 0 is (0,0,0,...,0).
-        Similarly, the element index_of[0] = A0 always signifying
-   that the power of alpha which has the polynomial representation
-   (0,0,...,0) is "infinity".
-
-*/
-
-static void
-generate_gf(dtype Alpha_to[NN + 1], dtype Index_of[NN + 1])
-{
-  register int i, mask;
-
-  mask = 1;
-  Alpha_to[MM] = 0;
-  for (i = 0; i < MM; i++) {
-    Alpha_to[i] = mask;
-    Index_of[Alpha_to[i]] = i;
-    /* If Pp[i] == 1 then, term @^i occurs in poly-repr of @^MM */
-    if (Pp[i] != 0)
-      Alpha_to[MM] ^= mask;	/* Bit-wise EXOR operation */
-    mask <<= 1;	/* single left-shift */
-  }
-  Index_of[Alpha_to[MM]] = MM;
-  /*
-   * Have obtained poly-repr of @^MM. Poly-repr of @^(i+1) is given by
-   * poly-repr of @^i shifted left one-bit and accounting for any @^MM
-   * term that may occur when poly-repr of @^i is shifted.
-   */
-  mask >>= 1;
-  for (i = MM + 1; i < NN; i++) {
-    if (Alpha_to[i - 1] >= mask)
-      Alpha_to[i] = Alpha_to[MM] ^ ((Alpha_to[i - 1] ^ mask) << 1);
-    else
-      Alpha_to[i] = Alpha_to[i - 1] << 1;
-    Index_of[Alpha_to[i]] = i;
-  }
-  Index_of[0] = A0;
-  Alpha_to[NN] = 0;
-}
-
-/*
- * Performs ERRORS+ERASURES decoding of RS codes. bb[] is the content
- * of the feedback shift register after having processed the data and
- * the ECC.
- *
- * Return number of symbols corrected, or -1 if codeword is illegal
- * or uncorrectable. If eras_pos is non-null, the detected error locations
- * are written back. NOTE! This array must be at least NN-KK elements long.
- * The corrected data are written in eras_val[]. They must be xor with the data
- * to retrieve the correct data : data[erase_pos[i]] ^= erase_val[i] .
- *
- * First "no_eras" erasures are declared by the calling program. Then, the
- * maximum # of errors correctable is t_after_eras = floor((NN-KK-no_eras)/2).
- * If the number of channel errors is not greater than "t_after_eras" the
- * transmitted codeword will be recovered. Details of algorithm can be found
- * in R. Blahut's "Theory ... of Error-Correcting Codes".
-
- * Warning: the eras_pos[] array must not contain duplicate entries; decoder failure
- * will result. The decoder *could* check for this condition, but it would involve
- * extra time on every decoding operation.
- * */
-static int
-eras_dec_rs(dtype Alpha_to[NN + 1], dtype Index_of[NN + 1],
-            gf bb[NN - KK + 1], gf eras_val[NN-KK], int eras_pos[NN-KK],
-            int no_eras)
-{
-  int deg_lambda, el, deg_omega;
-  int i, j, r,k;
-  gf u,q,tmp,num1,num2,den,discr_r;
-  gf lambda[NN-KK + 1], s[NN-KK + 1];	/* Err+Eras Locator poly
-					 * and syndrome poly */
-  gf b[NN-KK + 1], t[NN-KK + 1], omega[NN-KK + 1];
-  gf root[NN-KK], reg[NN-KK + 1], loc[NN-KK];
-  int syn_error, count;
-
-  syn_error = 0;
-  for(i=0;i<NN-KK;i++)
-      syn_error |= bb[i];
-
-  if (!syn_error) {
-    /* if remainder is zero, data[] is a codeword and there are no
-     * errors to correct. So return data[] unmodified
-     */
-    count = 0;
-    goto finish;
-  }
-
-  for(i=1;i<=NN-KK;i++){
-    s[i] = bb[0];
-  }
-  for(j=1;j<NN-KK;j++){
-    if(bb[j] == 0)
-      continue;
-    tmp = Index_of[bb[j]];
-
-    for(i=1;i<=NN-KK;i++)
-      s[i] ^= Alpha_to[modnn(tmp + (B0+i-1)*PRIM*j)];
-  }
-
-  /* undo the feedback register implicit multiplication and convert
-     syndromes to index form */
-
-  for(i=1;i<=NN-KK;i++) {
-      tmp = Index_of[s[i]];
-      if (tmp != A0)
-          tmp = modnn(tmp + 2 * KK * (B0+i-1)*PRIM);
-      s[i] = tmp;
-  }
-
-  CLEAR(&lambda[1],NN-KK);
-  lambda[0] = 1;
-
-  if (no_eras > 0) {
-    /* Init lambda to be the erasure locator polynomial */
-    lambda[1] = Alpha_to[modnn(PRIM * eras_pos[0])];
-    for (i = 1; i < no_eras; i++) {
-      u = modnn(PRIM*eras_pos[i]);
-      for (j = i+1; j > 0; j--) {
-	tmp = Index_of[lambda[j - 1]];
-	if(tmp != A0)
-	  lambda[j] ^= Alpha_to[modnn(u + tmp)];
-      }
-    }
-#if DEBUG_ECC >= 1
-    /* Test code that verifies the erasure locator polynomial just constructed
-       Needed only for decoder debugging. */
-
-    /* find roots of the erasure location polynomial */
-    for(i=1;i<=no_eras;i++)
-      reg[i] = Index_of[lambda[i]];
-    count = 0;
-    for (i = 1,k=NN-Ldec; i <= NN; i++,k = modnn(NN+k-Ldec)) {
-      q = 1;
-      for (j = 1; j <= no_eras; j++)
-	if (reg[j] != A0) {
-	  reg[j] = modnn(reg[j] + j);
-	  q ^= Alpha_to[reg[j]];
-	}
-      if (q != 0)
-	continue;
-      /* store root and error location number indices */
-      root[count] = i;
-      loc[count] = k;
-      count++;
-    }
-    if (count != no_eras) {
-      printf("\n lambda(x) is WRONG\n");
-      count = -1;
-      goto finish;
-    }
-#if DEBUG_ECC >= 2
-    printf("\n Erasure positions as determined by roots of Eras Loc Poly:\n");
-    for (i = 0; i < count; i++)
-      printf("%d ", loc[i]);
-    printf("\n");
-#endif
-#endif
-  }
-  for(i=0;i<NN-KK+1;i++)
-    b[i] = Index_of[lambda[i]];
-
-  /*
-   * Begin Berlekamp-Massey algorithm to determine error+erasure
-   * locator polynomial
-   */
-  r = no_eras;
-  el = no_eras;
-  while (++r <= NN-KK) {	/* r is the step number */
-    /* Compute discrepancy at the r-th step in poly-form */
-    discr_r = 0;
-    for (i = 0; i < r; i++){
-      if ((lambda[i] != 0) && (s[r - i] != A0)) {
-	discr_r ^= Alpha_to[modnn(Index_of[lambda[i]] + s[r - i])];
-      }
-    }
-    discr_r = Index_of[discr_r];	/* Index form */
-    if (discr_r == A0) {
-      /* 2 lines below: B(x) <-- x*B(x) */
-      COPYDOWN(&b[1],b,NN-KK);
-      b[0] = A0;
-    } else {
-      /* 7 lines below: T(x) <-- lambda(x) - discr_r*x*b(x) */
-      t[0] = lambda[0];
-      for (i = 0 ; i < NN-KK; i++) {
-	if(b[i] != A0)
-	  t[i+1] = lambda[i+1] ^ Alpha_to[modnn(discr_r + b[i])];
-	else
-	  t[i+1] = lambda[i+1];
-      }
-      if (2 * el <= r + no_eras - 1) {
-	el = r + no_eras - el;
-	/*
-	 * 2 lines below: B(x) <-- inv(discr_r) *
-	 * lambda(x)
-	 */
-	for (i = 0; i <= NN-KK; i++)
-	  b[i] = (lambda[i] == 0) ? A0 : modnn(Index_of[lambda[i]] - discr_r + NN);
-      } else {
-	/* 2 lines below: B(x) <-- x*B(x) */
-	COPYDOWN(&b[1],b,NN-KK);
-	b[0] = A0;
-      }
-      COPY(lambda,t,NN-KK+1);
-    }
-  }
-
-  /* Convert lambda to index form and compute deg(lambda(x)) */
-  deg_lambda = 0;
-  for(i=0;i<NN-KK+1;i++){
-    lambda[i] = Index_of[lambda[i]];
-    if(lambda[i] != A0)
-      deg_lambda = i;
-  }
-  /*
-   * Find roots of the error+erasure locator polynomial by Chien
-   * Search
-   */
-  COPY(&reg[1],&lambda[1],NN-KK);
-  count = 0;		/* Number of roots of lambda(x) */
-  for (i = 1,k=NN-Ldec; i <= NN; i++,k = modnn(NN+k-Ldec)) {
-    q = 1;
-    for (j = deg_lambda; j > 0; j--){
-      if (reg[j] != A0) {
-	reg[j] = modnn(reg[j] + j);
-	q ^= Alpha_to[reg[j]];
-      }
-    }
-    if (q != 0)
-      continue;
-    /* store root (index-form) and error location number */
-    root[count] = i;
-    loc[count] = k;
-    /* If we've already found max possible roots,
-     * abort the search to save time
-     */
-    if(++count == deg_lambda)
-      break;
-  }
-  if (deg_lambda != count) {
-    /*
-     * deg(lambda) unequal to number of roots => uncorrectable
-     * error detected
-     */
-    count = -1;
-    goto finish;
-  }
-  /*
-   * Compute err+eras evaluator poly omega(x) = s(x)*lambda(x) (modulo
-   * x**(NN-KK)). in index form. Also find deg(omega).
-   */
-  deg_omega = 0;
-  for (i = 0; i < NN-KK;i++){
-    tmp = 0;
-    j = (deg_lambda < i) ? deg_lambda : i;
-    for(;j >= 0; j--){
-      if ((s[i + 1 - j] != A0) && (lambda[j] != A0))
-	tmp ^= Alpha_to[modnn(s[i + 1 - j] + lambda[j])];
-    }
-    if(tmp != 0)
-      deg_omega = i;
-    omega[i] = Index_of[tmp];
-  }
-  omega[NN-KK] = A0;
-
-  /*
-   * Compute error values in poly-form. num1 = omega(inv(X(l))), num2 =
-   * inv(X(l))**(B0-1) and den = lambda_pr(inv(X(l))) all in poly-form
-   */
-  for (j = count-1; j >=0; j--) {
-    num1 = 0;
-    for (i = deg_omega; i >= 0; i--) {
-      if (omega[i] != A0)
-	num1  ^= Alpha_to[modnn(omega[i] + i * root[j])];
-    }
-    num2 = Alpha_to[modnn(root[j] * (B0 - 1) + NN)];
-    den = 0;
-
-    /* lambda[i+1] for i even is the formal derivative lambda_pr of lambda[i] */
-    for (i = min(deg_lambda,NN-KK-1) & ~1; i >= 0; i -=2) {
-      if(lambda[i+1] != A0)
-	den ^= Alpha_to[modnn(lambda[i+1] + i * root[j])];
-    }
-    if (den == 0) {
-#if DEBUG_ECC >= 1
-      printf("\n ERROR: denominator = 0\n");
-#endif
-      /* Convert to dual- basis */
-      count = -1;
-      goto finish;
-    }
-    /* Apply error to data */
-    if (num1 != 0) {
-        eras_val[j] = Alpha_to[modnn(Index_of[num1] + Index_of[num2] + NN - Index_of[den])];
-    } else {
-        eras_val[j] = 0;
-    }
-  }
- finish:
-  for(i=0;i<count;i++)
-      eras_pos[i] = loc[i];
-  return count;
-}
-
-/***************************************************************************/
-/* The DOC specific code begins here */
-
-#define SECTOR_SIZE 512
-/* The sector bytes are packed into NB_DATA MM bits words */
-#define NB_DATA (((SECTOR_SIZE + 1) * 8 + 6) / MM)
-
-/*
- * Correct the errors in 'sector[]' by using 'ecc1[]' which is the
- * content of the feedback shift register applyied to the sector and
- * the ECC. Return the number of errors corrected (and correct them in
- * sector), or -1 if error
- */
-int doc_decode_ecc(unsigned char sector[SECTOR_SIZE], unsigned char ecc1[6])
-{
-    int parity, i, nb_errors;
-    gf bb[NN - KK + 1];
-    gf error_val[NN-KK];
-    int error_pos[NN-KK], pos, bitpos, index, val;
-    dtype *Alpha_to, *Index_of;
-
-    /* init log and exp tables here to save memory. However, it is slower */
-    Alpha_to = kmalloc((NN + 1) * sizeof(dtype), GFP_KERNEL);
-    if (!Alpha_to)
-        return -1;
-
-    Index_of = kmalloc((NN + 1) * sizeof(dtype), GFP_KERNEL);
-    if (!Index_of) {
-        kfree(Alpha_to);
-        return -1;
-    }
-
-    generate_gf(Alpha_to, Index_of);
-
-    parity = ecc1[1];
-
-    bb[0] =  (ecc1[4] & 0xff) | ((ecc1[5] & 0x03) << 8);
-    bb[1] = ((ecc1[5] & 0xfc) >> 2) | ((ecc1[2] & 0x0f) << 6);
-    bb[2] = ((ecc1[2] & 0xf0) >> 4) | ((ecc1[3] & 0x3f) << 4);
-    bb[3] = ((ecc1[3] & 0xc0) >> 6) | ((ecc1[0] & 0xff) << 2);
-
-    nb_errors = eras_dec_rs(Alpha_to, Index_of, bb,
-                            error_val, error_pos, 0);
-    if (nb_errors <= 0)
-        goto the_end;
-
-    /* correct the errors */
-    for(i=0;i<nb_errors;i++) {
-        pos = error_pos[i];
-        if (pos >= NB_DATA && pos < KK) {
-            nb_errors = -1;
-            goto the_end;
-        }
-        if (pos < NB_DATA) {
-            /* extract bit position (MSB first) */
-            pos = 10 * (NB_DATA - 1 - pos) - 6;
-            /* now correct the following 10 bits. At most two bytes
-               can be modified since pos is even */
-            index = (pos >> 3) ^ 1;
-            bitpos = pos & 7;
-            if ((index >= 0 && index < SECTOR_SIZE) ||
-                index == (SECTOR_SIZE + 1)) {
-                val = error_val[i] >> (2 + bitpos);
-                parity ^= val;
-                if (index < SECTOR_SIZE)
-                    sector[index] ^= val;
-            }
-            index = ((pos >> 3) + 1) ^ 1;
-            bitpos = (bitpos + 10) & 7;
-            if (bitpos == 0)
-                bitpos = 8;
-            if ((index >= 0 && index < SECTOR_SIZE) ||
-                index == (SECTOR_SIZE + 1)) {
-                val = error_val[i] << (8 - bitpos);
-                parity ^= val;
-                if (index < SECTOR_SIZE)
-                    sector[index] ^= val;
-            }
-        }
-    }
-
-    /* use parity to test extra errors */
-    if ((parity & 0xff) != 0)
-        nb_errors = -1;
-
- the_end:
-    kfree(Alpha_to);
-    kfree(Index_of);
-    return nb_errors;
-}
-
-EXPORT_SYMBOL_GPL(doc_decode_ecc);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Fabrice Bellard <fabrice.bellard@netgem.com>");
-MODULE_DESCRIPTION("ECC code for correcting errors detected by DiskOnChip 2000 and Millennium ECC hardware");
diff --git a/drivers/mtd/devices/docg3.c b/drivers/mtd/devices/docg3.c
index 8510ccb..3e1b0a0 100644
--- a/drivers/mtd/devices/docg3.c
+++ b/drivers/mtd/devices/docg3.c
@@ -123,7 +123,7 @@
 	doc_writeb(docg3, addr, DOC_FLASHADDRESS);
 }
 
-static char const *part_probes[] = { "cmdlinepart", "saftlpart", NULL };
+static char const * const part_probes[] = { "cmdlinepart", "saftlpart", NULL };
 
 static int doc_register_readb(struct docg3 *docg3, int reg)
 {
@@ -2144,18 +2144,7 @@
 	.remove		= __exit_p(docg3_release),
 };
 
-static int __init docg3_init(void)
-{
-	return platform_driver_probe(&g3_driver, docg3_probe);
-}
-module_init(docg3_init);
-
-
-static void __exit docg3_exit(void)
-{
-	platform_driver_unregister(&g3_driver);
-}
-module_exit(docg3_exit);
+module_platform_driver_probe(g3_driver, docg3_probe);
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Robert Jarzmik <robert.jarzmik@free.fr>");
diff --git a/drivers/mtd/devices/docprobe.c b/drivers/mtd/devices/docprobe.c
deleted file mode 100644
index 88b3fd3..0000000
--- a/drivers/mtd/devices/docprobe.c
+++ /dev/null
@@ -1,325 +0,0 @@
-
-/* Linux driver for Disk-On-Chip devices			*/
-/* Probe routines common to all DoC devices			*/
-/* (C) 1999 Machine Vision Holdings, Inc.			*/
-/* (C) 1999-2003 David Woodhouse <dwmw2@infradead.org>		*/
-
-
-/* DOC_PASSIVE_PROBE:
-   In order to ensure that the BIOS checksum is correct at boot time, and
-   hence that the onboard BIOS extension gets executed, the DiskOnChip
-   goes into reset mode when it is read sequentially: all registers
-   return 0xff until the chip is woken up again by writing to the
-   DOCControl register.
-
-   Unfortunately, this means that the probe for the DiskOnChip is unsafe,
-   because one of the first things it does is write to where it thinks
-   the DOCControl register should be - which may well be shared memory
-   for another device. I've had machines which lock up when this is
-   attempted. Hence the possibility to do a passive probe, which will fail
-   to detect a chip in reset mode, but is at least guaranteed not to lock
-   the machine.
-
-   If you have this problem, uncomment the following line:
-#define DOC_PASSIVE_PROBE
-*/
-
-
-/* DOC_SINGLE_DRIVER:
-   Millennium driver has been merged into DOC2000 driver.
-
-   The old Millennium-only driver has been retained just in case there
-   are problems with the new code. If the combined driver doesn't work
-   for you, you can try the old one by undefining DOC_SINGLE_DRIVER
-   below and also enabling it in your configuration. If this fixes the
-   problems, please send a report to the MTD mailing list at
-   <linux-mtd@lists.infradead.org>.
-*/
-#define DOC_SINGLE_DRIVER
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <asm/errno.h>
-#include <asm/io.h>
-#include <linux/delay.h>
-#include <linux/slab.h>
-#include <linux/init.h>
-#include <linux/types.h>
-
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/nand.h>
-#include <linux/mtd/doc2000.h>
-
-
-static unsigned long doc_config_location = CONFIG_MTD_DOCPROBE_ADDRESS;
-module_param(doc_config_location, ulong, 0);
-MODULE_PARM_DESC(doc_config_location, "Physical memory address at which to probe for DiskOnChip");
-
-static unsigned long __initdata doc_locations[] = {
-#if defined (__alpha__) || defined(__i386__) || defined(__x86_64__)
-#ifdef CONFIG_MTD_DOCPROBE_HIGH
-	0xfffc8000, 0xfffca000, 0xfffcc000, 0xfffce000,
-	0xfffd0000, 0xfffd2000, 0xfffd4000, 0xfffd6000,
-	0xfffd8000, 0xfffda000, 0xfffdc000, 0xfffde000,
-	0xfffe0000, 0xfffe2000, 0xfffe4000, 0xfffe6000,
-	0xfffe8000, 0xfffea000, 0xfffec000, 0xfffee000,
-#else /*  CONFIG_MTD_DOCPROBE_HIGH */
-	0xc8000, 0xca000, 0xcc000, 0xce000,
-	0xd0000, 0xd2000, 0xd4000, 0xd6000,
-	0xd8000, 0xda000, 0xdc000, 0xde000,
-	0xe0000, 0xe2000, 0xe4000, 0xe6000,
-	0xe8000, 0xea000, 0xec000, 0xee000,
-#endif /*  CONFIG_MTD_DOCPROBE_HIGH */
-#endif
-	0xffffffff };
-
-/* doccheck: Probe a given memory window to see if there's a DiskOnChip present */
-
-static inline int __init doccheck(void __iomem *potential, unsigned long physadr)
-{
-	void __iomem *window=potential;
-	unsigned char tmp, tmpb, tmpc, ChipID;
-#ifndef DOC_PASSIVE_PROBE
-	unsigned char tmp2;
-#endif
-
-	/* Routine copied from the Linux DOC driver */
-
-#ifdef CONFIG_MTD_DOCPROBE_55AA
-	/* Check for 0x55 0xAA signature at beginning of window,
-	   this is no longer true once we remove the IPL (for Millennium */
-	if (ReadDOC(window, Sig1) != 0x55 || ReadDOC(window, Sig2) != 0xaa)
-		return 0;
-#endif /* CONFIG_MTD_DOCPROBE_55AA */
-
-#ifndef DOC_PASSIVE_PROBE
-	/* It's not possible to cleanly detect the DiskOnChip - the
-	 * bootup procedure will put the device into reset mode, and
-	 * it's not possible to talk to it without actually writing
-	 * to the DOCControl register. So we store the current contents
-	 * of the DOCControl register's location, in case we later decide
-	 * that it's not a DiskOnChip, and want to put it back how we
-	 * found it.
-	 */
-	tmp2 = ReadDOC(window, DOCControl);
-
-	/* Reset the DiskOnChip ASIC */
-	WriteDOC(DOC_MODE_CLR_ERR | DOC_MODE_MDWREN | DOC_MODE_RESET,
-		 window, DOCControl);
-	WriteDOC(DOC_MODE_CLR_ERR | DOC_MODE_MDWREN | DOC_MODE_RESET,
-		 window, DOCControl);
-
-	/* Enable the DiskOnChip ASIC */
-	WriteDOC(DOC_MODE_CLR_ERR | DOC_MODE_MDWREN | DOC_MODE_NORMAL,
-		 window, DOCControl);
-	WriteDOC(DOC_MODE_CLR_ERR | DOC_MODE_MDWREN | DOC_MODE_NORMAL,
-		 window, DOCControl);
-#endif /* !DOC_PASSIVE_PROBE */
-
-	/* We need to read the ChipID register four times. For some
-	   newer DiskOnChip 2000 units, the first three reads will
-	   return the DiskOnChip Millennium ident. Don't ask. */
-	ChipID = ReadDOC(window, ChipID);
-
-	switch (ChipID) {
-	case DOC_ChipID_Doc2k:
-		/* Check the TOGGLE bit in the ECC register */
-		tmp  = ReadDOC(window, 2k_ECCStatus) & DOC_TOGGLE_BIT;
-		tmpb = ReadDOC(window, 2k_ECCStatus) & DOC_TOGGLE_BIT;
-		tmpc = ReadDOC(window, 2k_ECCStatus) & DOC_TOGGLE_BIT;
-		if (tmp != tmpb && tmp == tmpc)
-				return ChipID;
-		break;
-
-	case DOC_ChipID_DocMil:
-		/* Check for the new 2000 with Millennium ASIC */
-		ReadDOC(window, ChipID);
-		ReadDOC(window, ChipID);
-		if (ReadDOC(window, ChipID) != DOC_ChipID_DocMil)
-			ChipID = DOC_ChipID_Doc2kTSOP;
-
-		/* Check the TOGGLE bit in the ECC register */
-		tmp  = ReadDOC(window, ECCConf) & DOC_TOGGLE_BIT;
-		tmpb = ReadDOC(window, ECCConf) & DOC_TOGGLE_BIT;
-		tmpc = ReadDOC(window, ECCConf) & DOC_TOGGLE_BIT;
-		if (tmp != tmpb && tmp == tmpc)
-				return ChipID;
-		break;
-
-	case DOC_ChipID_DocMilPlus16:
-	case DOC_ChipID_DocMilPlus32:
-	case 0:
-		/* Possible Millennium+, need to do more checks */
-#ifndef DOC_PASSIVE_PROBE
-		/* Possibly release from power down mode */
-		for (tmp = 0; (tmp < 4); tmp++)
-			ReadDOC(window, Mplus_Power);
-
-		/* Reset the DiskOnChip ASIC */
-		tmp = DOC_MODE_RESET | DOC_MODE_MDWREN | DOC_MODE_RST_LAT |
-			DOC_MODE_BDECT;
-		WriteDOC(tmp, window, Mplus_DOCControl);
-		WriteDOC(~tmp, window, Mplus_CtrlConfirm);
-
-		mdelay(1);
-		/* Enable the DiskOnChip ASIC */
-		tmp = DOC_MODE_NORMAL | DOC_MODE_MDWREN | DOC_MODE_RST_LAT |
-			DOC_MODE_BDECT;
-		WriteDOC(tmp, window, Mplus_DOCControl);
-		WriteDOC(~tmp, window, Mplus_CtrlConfirm);
-		mdelay(1);
-#endif /* !DOC_PASSIVE_PROBE */
-
-		ChipID = ReadDOC(window, ChipID);
-
-		switch (ChipID) {
-		case DOC_ChipID_DocMilPlus16:
-		case DOC_ChipID_DocMilPlus32:
-			/* Check the TOGGLE bit in the toggle register */
-			tmp  = ReadDOC(window, Mplus_Toggle) & DOC_TOGGLE_BIT;
-			tmpb = ReadDOC(window, Mplus_Toggle) & DOC_TOGGLE_BIT;
-			tmpc = ReadDOC(window, Mplus_Toggle) & DOC_TOGGLE_BIT;
-			if (tmp != tmpb && tmp == tmpc)
-					return ChipID;
-		default:
-			break;
-		}
-		/* FALL TRHU */
-
-	default:
-
-#ifdef CONFIG_MTD_DOCPROBE_55AA
-		printk(KERN_DEBUG "Possible DiskOnChip with unknown ChipID %2.2X found at 0x%lx\n",
-		       ChipID, physadr);
-#endif
-#ifndef DOC_PASSIVE_PROBE
-		/* Put back the contents of the DOCControl register, in case it's not
-		 * actually a DiskOnChip.
-		 */
-		WriteDOC(tmp2, window, DOCControl);
-#endif
-		return 0;
-	}
-
-	printk(KERN_WARNING "DiskOnChip failed TOGGLE test, dropping.\n");
-
-#ifndef DOC_PASSIVE_PROBE
-	/* Put back the contents of the DOCControl register: it's not a DiskOnChip */
-	WriteDOC(tmp2, window, DOCControl);
-#endif
-	return 0;
-}
-
-static int docfound;
-
-extern void DoC2k_init(struct mtd_info *);
-extern void DoCMil_init(struct mtd_info *);
-extern void DoCMilPlus_init(struct mtd_info *);
-
-static void __init DoC_Probe(unsigned long physadr)
-{
-	void __iomem *docptr;
-	struct DiskOnChip *this;
-	struct mtd_info *mtd;
-	int ChipID;
-	char namebuf[15];
-	char *name = namebuf;
-	void (*initroutine)(struct mtd_info *) = NULL;
-
-	docptr = ioremap(physadr, DOC_IOREMAP_LEN);
-
-	if (!docptr)
-		return;
-
-	if ((ChipID = doccheck(docptr, physadr))) {
-		if (ChipID == DOC_ChipID_Doc2kTSOP) {
-			/* Remove this at your own peril. The hardware driver works but nothing prevents you from erasing bad blocks */
-			printk(KERN_NOTICE "Refusing to drive DiskOnChip 2000 TSOP until Bad Block Table is correctly supported by INFTL\n");
-			iounmap(docptr);
-			return;
-		}
-		docfound = 1;
-		mtd = kzalloc(sizeof(struct DiskOnChip) + sizeof(struct mtd_info), GFP_KERNEL);
-		if (!mtd) {
-			printk(KERN_WARNING "Cannot allocate memory for data structures. Dropping.\n");
-			iounmap(docptr);
-			return;
-		}
-
-		this = (struct DiskOnChip *)(&mtd[1]);
-		mtd->priv = this;
-		this->virtadr = docptr;
-		this->physadr = physadr;
-		this->ChipID = ChipID;
-		sprintf(namebuf, "with ChipID %2.2X", ChipID);
-
-		switch(ChipID) {
-		case DOC_ChipID_Doc2kTSOP:
-			name="2000 TSOP";
-			initroutine = symbol_request(DoC2k_init);
-			break;
-
-		case DOC_ChipID_Doc2k:
-			name="2000";
-			initroutine = symbol_request(DoC2k_init);
-			break;
-
-		case DOC_ChipID_DocMil:
-			name="Millennium";
-#ifdef DOC_SINGLE_DRIVER
-			initroutine = symbol_request(DoC2k_init);
-#else
-			initroutine = symbol_request(DoCMil_init);
-#endif /* DOC_SINGLE_DRIVER */
-			break;
-
-		case DOC_ChipID_DocMilPlus16:
-		case DOC_ChipID_DocMilPlus32:
-			name="MillenniumPlus";
-			initroutine = symbol_request(DoCMilPlus_init);
-			break;
-		}
-
-		if (initroutine) {
-			(*initroutine)(mtd);
-			symbol_put_addr(initroutine);
-			return;
-		}
-		printk(KERN_NOTICE "Cannot find driver for DiskOnChip %s at 0x%lX\n", name, physadr);
-		kfree(mtd);
-	}
-	iounmap(docptr);
-}
-
-
-/****************************************************************************
- *
- * Module stuff
- *
- ****************************************************************************/
-
-static int __init init_doc(void)
-{
-	int i;
-
-	if (doc_config_location) {
-		printk(KERN_INFO "Using configured DiskOnChip probe address 0x%lx\n", doc_config_location);
-		DoC_Probe(doc_config_location);
-	} else {
-		for (i=0; (doc_locations[i] != 0xffffffff); i++) {
-			DoC_Probe(doc_locations[i]);
-		}
-	}
-	/* No banner message any more. Print a message if no DiskOnChip
-	   found, so the user knows we at least tried. */
-	if (!docfound)
-		printk(KERN_INFO "No recognised DiskOnChip devices found\n");
-	return -EAGAIN;
-}
-
-module_init(init_doc);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
-MODULE_DESCRIPTION("Probe code for DiskOnChip 2000 and Millennium devices");
-
diff --git a/drivers/mtd/devices/elm.c b/drivers/mtd/devices/elm.c
index 2ec5da9..dccef9fd 100644
--- a/drivers/mtd/devices/elm.c
+++ b/drivers/mtd/devices/elm.c
@@ -81,14 +81,21 @@
  * @dev:	ELM device
  * @bch_type:	Type of BCH ecc
  */
-void elm_config(struct device *dev, enum bch_ecc bch_type)
+int elm_config(struct device *dev, enum bch_ecc bch_type)
 {
 	u32 reg_val;
 	struct elm_info *info = dev_get_drvdata(dev);
 
+	if (!info) {
+		dev_err(dev, "Unable to configure elm - device not probed?\n");
+		return -ENODEV;
+	}
+
 	reg_val = (bch_type & ECC_BCH_LEVEL_MASK) | (ELM_ECC_SIZE << 16);
 	elm_write_reg(info, ELM_LOCATION_CONFIG, reg_val);
 	info->bch_type = bch_type;
+
+	return 0;
 }
 EXPORT_SYMBOL(elm_config);
 
diff --git a/drivers/mtd/devices/m25p80.c b/drivers/mtd/devices/m25p80.c
index 5b6b072..2f3d2a5 100644
--- a/drivers/mtd/devices/m25p80.c
+++ b/drivers/mtd/devices/m25p80.c
@@ -681,6 +681,7 @@
 	u16		flags;
 #define	SECT_4K		0x01		/* OPCODE_BE_4K works uniformly */
 #define	M25P_NO_ERASE	0x02		/* No erase command needed */
+#define	SST_WRITE	0x04		/* use SST byte programming */
 };
 
 #define INFO(_jedec_id, _ext_id, _sector_size, _n_sectors, _flags)	\
@@ -728,6 +729,7 @@
 	{ "en25q32b", INFO(0x1c3016, 0, 64 * 1024,  64, 0) },
 	{ "en25p64", INFO(0x1c2017, 0, 64 * 1024, 128, 0) },
 	{ "en25q64", INFO(0x1c3017, 0, 64 * 1024, 128, SECT_4K) },
+	{ "en25qh256", INFO(0x1c7019, 0, 64 * 1024, 512, 0) },
 
 	/* Everspin */
 	{ "mr25h256", CAT25_INFO(  32 * 1024, 1, 256, 2) },
@@ -740,7 +742,6 @@
 	{ "160s33b",  INFO(0x898911, 0, 64 * 1024,  32, 0) },
 	{ "320s33b",  INFO(0x898912, 0, 64 * 1024,  64, 0) },
 	{ "640s33b",  INFO(0x898913, 0, 64 * 1024, 128, 0) },
-	{ "n25q064",  INFO(0x20ba17, 0, 64 * 1024, 128, 0) },
 
 	/* Macronix */
 	{ "mx25l2005a",  INFO(0xc22012, 0, 64 * 1024,   4, SECT_4K) },
@@ -753,8 +754,10 @@
 	{ "mx25l12855e", INFO(0xc22618, 0, 64 * 1024, 256, 0) },
 	{ "mx25l25635e", INFO(0xc22019, 0, 64 * 1024, 512, 0) },
 	{ "mx25l25655e", INFO(0xc22619, 0, 64 * 1024, 512, 0) },
+	{ "mx66l51235l", INFO(0xc2201a, 0, 64 * 1024, 1024, 0) },
 
 	/* Micron */
+	{ "n25q064",  INFO(0x20ba17, 0, 64 * 1024, 128, 0) },
 	{ "n25q128a11",  INFO(0x20bb18, 0, 64 * 1024, 256, 0) },
 	{ "n25q128a13",  INFO(0x20ba18, 0, 64 * 1024, 256, 0) },
 	{ "n25q256a", INFO(0x20ba19, 0, 64 * 1024, 512, SECT_4K) },
@@ -781,14 +784,15 @@
 	{ "s25fl064k",  INFO(0xef4017,      0,  64 * 1024, 128, SECT_4K) },
 
 	/* SST -- large erase sizes are "overlays", "sectors" are 4K */
-	{ "sst25vf040b", INFO(0xbf258d, 0, 64 * 1024,  8, SECT_4K) },
-	{ "sst25vf080b", INFO(0xbf258e, 0, 64 * 1024, 16, SECT_4K) },
-	{ "sst25vf016b", INFO(0xbf2541, 0, 64 * 1024, 32, SECT_4K) },
-	{ "sst25vf032b", INFO(0xbf254a, 0, 64 * 1024, 64, SECT_4K) },
-	{ "sst25wf512",  INFO(0xbf2501, 0, 64 * 1024,  1, SECT_4K) },
-	{ "sst25wf010",  INFO(0xbf2502, 0, 64 * 1024,  2, SECT_4K) },
-	{ "sst25wf020",  INFO(0xbf2503, 0, 64 * 1024,  4, SECT_4K) },
-	{ "sst25wf040",  INFO(0xbf2504, 0, 64 * 1024,  8, SECT_4K) },
+	{ "sst25vf040b", INFO(0xbf258d, 0, 64 * 1024,  8, SECT_4K | SST_WRITE) },
+	{ "sst25vf080b", INFO(0xbf258e, 0, 64 * 1024, 16, SECT_4K | SST_WRITE) },
+	{ "sst25vf016b", INFO(0xbf2541, 0, 64 * 1024, 32, SECT_4K | SST_WRITE) },
+	{ "sst25vf032b", INFO(0xbf254a, 0, 64 * 1024, 64, SECT_4K | SST_WRITE) },
+	{ "sst25vf064c", INFO(0xbf254b, 0, 64 * 1024, 128, SECT_4K) },
+	{ "sst25wf512",  INFO(0xbf2501, 0, 64 * 1024,  1, SECT_4K | SST_WRITE) },
+	{ "sst25wf010",  INFO(0xbf2502, 0, 64 * 1024,  2, SECT_4K | SST_WRITE) },
+	{ "sst25wf020",  INFO(0xbf2503, 0, 64 * 1024,  4, SECT_4K | SST_WRITE) },
+	{ "sst25wf040",  INFO(0xbf2504, 0, 64 * 1024,  8, SECT_4K | SST_WRITE) },
 
 	/* ST Microelectronics -- newer production may have feature updates */
 	{ "m25p05",  INFO(0x202010,  0,  32 * 1024,   2, 0) },
@@ -838,6 +842,7 @@
 	{ "w25q64", INFO(0xef4017, 0, 64 * 1024, 128, SECT_4K) },
 	{ "w25q80", INFO(0xef5014, 0, 64 * 1024,  16, SECT_4K) },
 	{ "w25q80bl", INFO(0xef4014, 0, 64 * 1024,  16, SECT_4K) },
+	{ "w25q128", INFO(0xef4018, 0, 64 * 1024, 256, SECT_4K) },
 	{ "w25q256", INFO(0xef4019, 0, 64 * 1024, 512, SECT_4K) },
 
 	/* Catalyst / On Semiconductor -- non-JEDEC */
@@ -1000,7 +1005,7 @@
 	}
 
 	/* sst flash chips use AAI word program */
-	if (JEDEC_MFR(info->jedec_id) == CFI_MFR_SST)
+	if (info->flags & SST_WRITE)
 		flash->mtd._write = sst_write;
 	else
 		flash->mtd._write = m25p80_write;
diff --git a/drivers/mtd/devices/mtd_dataflash.c b/drivers/mtd/devices/mtd_dataflash.c
index 945c9f76..28779b6 100644
--- a/drivers/mtd/devices/mtd_dataflash.c
+++ b/drivers/mtd/devices/mtd_dataflash.c
@@ -105,8 +105,6 @@
 	{ .compatible = "atmel,dataflash", },
 	{ /* sentinel */ }
 };
-#else
-#define dataflash_dt_ids NULL
 #endif
 
 /* ......................................................................... */
@@ -914,7 +912,7 @@
 	.driver = {
 		.name		= "mtd_dataflash",
 		.owner		= THIS_MODULE,
-		.of_match_table = dataflash_dt_ids,
+		.of_match_table = of_match_ptr(dataflash_dt_ids),
 	},
 
 	.probe		= dataflash_probe,
diff --git a/drivers/mtd/maps/Kconfig b/drivers/mtd/maps/Kconfig
index 3ed17c4..bed9d58 100644
--- a/drivers/mtd/maps/Kconfig
+++ b/drivers/mtd/maps/Kconfig
@@ -249,22 +249,6 @@
 	help
 	  Support for NOR flash attached to the Lantiq SoC's External Bus Unit.
 
-config MTD_DILNETPC
-	tristate "CFI Flash device mapped on DIL/Net PC"
-	depends on X86 && MTD_CFI_INTELEXT && BROKEN
-	help
-	  MTD map driver for SSV DIL/Net PC Boards "DNP" and "ADNP".
-	  For details, see <http://www.ssv-embedded.de/ssv/pc104/p169.htm>
- 	  and <http://www.ssv-embedded.de/ssv/pc104/p170.htm>
-
-config MTD_DILNETPC_BOOTSIZE
-	hex "Size of DIL/Net PC flash boot partition"
-	depends on MTD_DILNETPC
-	default "0x80000"
-	help
-	  The amount of space taken up by the kernel or Etherboot
-	  on the DIL/Net PC flash chips.
-
 config MTD_L440GX
 	tristate "BIOS flash chip on Intel L440GX boards"
 	depends on X86 && MTD_JEDECPROBE
@@ -274,42 +258,6 @@
 
 	  BE VERY CAREFUL.
 
-config MTD_TQM8XXL
-	tristate "CFI Flash device mapped on TQM8XXL"
-	depends on MTD_CFI && TQM8xxL
-	help
-	  The TQM8xxL PowerPC board has up to two banks of CFI-compliant
-	  chips, currently uses AMD one. This 'mapping' driver supports
-	  that arrangement, allowing the CFI probe and command set driver
-	  code to communicate with the chips on the TQM8xxL board. More at
-	  <http://www.denx.de/wiki/PPCEmbedded/>.
-
-config MTD_RPXLITE
-	tristate "CFI Flash device mapped on RPX Lite or CLLF"
-	depends on MTD_CFI && (RPXCLASSIC || RPXLITE)
-	help
-	  The RPXLite PowerPC board has CFI-compliant chips mapped in
-	  a strange sparse mapping. This 'mapping' driver supports that
-	  arrangement, allowing the CFI probe and command set driver code
-	  to communicate with the chips on the RPXLite board. More at
-	  <http://www.embeddedplanet.com/>.
-
-config MTD_MBX860
-	tristate "System flash on MBX860 board"
-	depends on MTD_CFI && MBX
-	help
-	  This enables access routines for the flash chips on the Motorola
-	  MBX860 board. If you have one of these boards and would like
-	  to use the flash chips on it, say 'Y'.
-
-config MTD_DBOX2
-	tristate "CFI Flash device mapped on D-Box2"
-	depends on DBOX2 && MTD_CFI_INTELSTD && MTD_CFI_INTELEXT && MTD_CFI_AMDSTD
-	help
-	  This enables access routines for the flash chips on the Nokia/Sagem
-	  D-Box 2 board. If you have one of these boards and would like to use
-	  the flash chips on it, say 'Y'.
-
 config MTD_CFI_FLAGADM
 	tristate "CFI Flash device mapping on FlagaDM"
 	depends on 8xx && MTD_CFI
@@ -349,15 +297,6 @@
 	  IXDP425 and Coyote. If you have an IXP4xx based board and
 	  would like to use the flash chips on it, say 'Y'.
 
-config MTD_IXP2000
-	tristate "CFI Flash device mapped on Intel IXP2000 based systems"
-	depends on MTD_CFI && MTD_COMPLEX_MAPPINGS && ARCH_IXP2000
-	help
-	  This enables MTD access to flash devices on platforms based
-	  on Intel's IXP2000 family of network processors. If you have an
-	  IXP2000 based board and would like to use the flash chips on it,
-	  say 'Y'.
-
 config MTD_AUTCPU12
 	bool "NV-RAM mapping AUTCPU12 board"
 	depends on ARCH_AUTCPU12
@@ -372,13 +311,6 @@
 	  This enables access to the NOR Flash on the impA7 board of
 	  implementa GmbH. If you have such a board, say 'Y' here.
 
-config MTD_H720X
-	tristate "Hynix evaluation board mappings"
-	depends on MTD_CFI && ( ARCH_H7201 || ARCH_H7202 )
-	help
-	  This enables access to the flash chips on the Hynix evaluation boards.
-	  If you have such a board, say 'Y'.
-
 # This needs CFI or JEDEC, depending on the cards found.
 config MTD_PCI
 	tristate "PCI MTD driver"
@@ -419,7 +351,7 @@
 
 config MTD_GPIO_ADDR
 	tristate "GPIO-assisted Flash Chip Support"
-	depends on GENERIC_GPIO || GPIOLIB
+	depends on GPIOLIB
 	depends on MTD_COMPLEX_MAPPINGS
 	help
 	  Map driver which allows flashes to be partially physically addressed
@@ -433,15 +365,6 @@
 	help
 	  Map driver to support image based filesystems for uClinux.
 
-config MTD_DMV182
-        tristate "Map driver for Dy-4 SVME/DMV-182 board."
-        depends on DMV182
-	select MTD_MAP_BANK_WIDTH_32
-	select MTD_CFI_I8
-	select MTD_CFI_AMDSTD
-        help
-          Map driver for Dy-4 SVME/DMV-182 board.
-
 config MTD_INTEL_VR_NOR
 	tristate "NOR flash on Intel Vermilion Range Expansion Bus CS0"
 	depends on PCI
diff --git a/drivers/mtd/maps/Makefile b/drivers/mtd/maps/Makefile
index 4ded287..395a124 100644
--- a/drivers/mtd/maps/Makefile
+++ b/drivers/mtd/maps/Makefile
@@ -9,7 +9,6 @@
 # Chip mappings
 obj-$(CONFIG_MTD_CFI_FLAGADM)	+= cfi_flagadm.o
 obj-$(CONFIG_MTD_DC21285)	+= dc21285.o
-obj-$(CONFIG_MTD_DILNETPC)	+= dilnetpc.o
 obj-$(CONFIG_MTD_L440GX)	+= l440gx.o
 obj-$(CONFIG_MTD_AMD76XROM)	+= amd76xrom.o
 obj-$(CONFIG_MTD_ESB2ROM)	+= esb2rom.o
@@ -17,15 +16,12 @@
 obj-$(CONFIG_MTD_CK804XROM)	+= ck804xrom.o
 obj-$(CONFIG_MTD_TSUNAMI)	+= tsunami_flash.o
 obj-$(CONFIG_MTD_PXA2XX)	+= pxa2xx-flash.o
-obj-$(CONFIG_MTD_MBX860)	+= mbx860.o
 obj-$(CONFIG_MTD_OCTAGON)	+= octagon-5066.o
 obj-$(CONFIG_MTD_PHYSMAP)	+= physmap.o
 obj-$(CONFIG_MTD_PHYSMAP_OF)	+= physmap_of.o
 obj-$(CONFIG_MTD_PISMO)		+= pismo.o
 obj-$(CONFIG_MTD_PMC_MSP_EVM)   += pmcmsp-flash.o
 obj-$(CONFIG_MTD_PCMCIA)	+= pcmciamtd.o
-obj-$(CONFIG_MTD_RPXLITE)	+= rpxlite.o
-obj-$(CONFIG_MTD_TQM8XXL)	+= tqm8xxl.o
 obj-$(CONFIG_MTD_SA1100)	+= sa1100-flash.o
 obj-$(CONFIG_MTD_SBC_GXX)	+= sbc_gxx.o
 obj-$(CONFIG_MTD_SC520CDP)	+= sc520cdp.o
@@ -34,7 +30,6 @@
 obj-$(CONFIG_MTD_SUN_UFLASH)	+= sun_uflash.o
 obj-$(CONFIG_MTD_VMAX)		+= vmax301.o
 obj-$(CONFIG_MTD_SCx200_DOCFLASH)+= scx200_docflash.o
-obj-$(CONFIG_MTD_DBOX2)		+= dbox2-flash.o
 obj-$(CONFIG_MTD_SOLUTIONENGINE)+= solutionengine.o
 obj-$(CONFIG_MTD_PCI)		+= pci.o
 obj-$(CONFIG_MTD_AUTCPU12)	+= autcpu12-nvram.o
@@ -42,10 +37,7 @@
 obj-$(CONFIG_MTD_UCLINUX)	+= uclinux.o
 obj-$(CONFIG_MTD_NETtel)	+= nettel.o
 obj-$(CONFIG_MTD_SCB2_FLASH)	+= scb2_flash.o
-obj-$(CONFIG_MTD_H720X)		+= h720x-flash.o
 obj-$(CONFIG_MTD_IXP4XX)	+= ixp4xx.o
-obj-$(CONFIG_MTD_IXP2000)	+= ixp2000.o
-obj-$(CONFIG_MTD_DMV182)	+= dmv182.o
 obj-$(CONFIG_MTD_PLATRAM)	+= plat-ram.o
 obj-$(CONFIG_MTD_INTEL_VR_NOR)	+= intel_vr_nor.o
 obj-$(CONFIG_MTD_BFIN_ASYNC)	+= bfin-async-flash.o
diff --git a/drivers/mtd/maps/bfin-async-flash.c b/drivers/mtd/maps/bfin-async-flash.c
index f833edf..319b04a 100644
--- a/drivers/mtd/maps/bfin-async-flash.c
+++ b/drivers/mtd/maps/bfin-async-flash.c
@@ -122,7 +122,8 @@
 	switch_back(state);
 }
 
-static const char *part_probe_types[] = { "cmdlinepart", "RedBoot", NULL };
+static const char * const part_probe_types[] = {
+	"cmdlinepart", "RedBoot", NULL };
 
 static int bfin_flash_probe(struct platform_device *pdev)
 {
diff --git a/drivers/mtd/maps/ck804xrom.c b/drivers/mtd/maps/ck804xrom.c
index 586a1c7..0455166 100644
--- a/drivers/mtd/maps/ck804xrom.c
+++ b/drivers/mtd/maps/ck804xrom.c
@@ -308,8 +308,7 @@
 
  out:
 	/* Free any left over map structures */
-	if (map)
-		kfree(map);
+	kfree(map);
 
 	/* See if I have any map structures */
 	if (list_empty(&window->maps)) {
diff --git a/drivers/mtd/maps/dbox2-flash.c b/drivers/mtd/maps/dbox2-flash.c
deleted file mode 100644
index 85bdece..0000000
--- a/drivers/mtd/maps/dbox2-flash.c
+++ /dev/null
@@ -1,123 +0,0 @@
-/*
- * D-Box 2 flash driver
- */
-
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <asm/io.h>
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/map.h>
-#include <linux/mtd/partitions.h>
-#include <linux/errno.h>
-
-/* partition_info gives details on the logical partitions that the split the
- * single flash device into. If the size if zero we use up to the end of the
- * device. */
-static struct mtd_partition partition_info[]= {
-	{
-	.name		= "BR bootloader",
-	.size		= 128 * 1024,
-	.offset		= 0,
-	.mask_flags	= MTD_WRITEABLE
-	},
-	{
-	.name		= "FLFS (U-Boot)",
-	.size		= 128 * 1024,
-	.offset		= MTDPART_OFS_APPEND,
-	.mask_flags	= 0
-	},
-	{
-	.name		= "Root (SquashFS)",
-	.size		= 7040 * 1024,
-	.offset		= MTDPART_OFS_APPEND,
-	.mask_flags	= 0
-	},
-	{
-	.name		= "var (JFFS2)",
-	.size		= 896 * 1024,
-	.offset		= MTDPART_OFS_APPEND,
-	.mask_flags	= 0
-	},
-	{
-	.name		= "Flash without bootloader",
-	.size		= MTDPART_SIZ_FULL,
-	.offset		= 128 * 1024,
-	.mask_flags	= 0
-	},
-	{
-	.name		= "Complete Flash",
-	.size		= MTDPART_SIZ_FULL,
-	.offset		= 0,
-	.mask_flags	= MTD_WRITEABLE
-	}
-};
-
-#define NUM_PARTITIONS ARRAY_SIZE(partition_info)
-
-#define WINDOW_ADDR 0x10000000
-#define WINDOW_SIZE 0x800000
-
-static struct mtd_info *mymtd;
-
-
-struct map_info dbox2_flash_map = {
-	.name		= "D-Box 2 flash memory",
-	.size		= WINDOW_SIZE,
-	.bankwidth	= 4,
-	.phys		= WINDOW_ADDR,
-};
-
-static int __init init_dbox2_flash(void)
-{
-       	printk(KERN_NOTICE "D-Box 2 flash driver (size->0x%X mem->0x%X)\n", WINDOW_SIZE, WINDOW_ADDR);
-	dbox2_flash_map.virt = ioremap(WINDOW_ADDR, WINDOW_SIZE);
-
-	if (!dbox2_flash_map.virt) {
-		printk("Failed to ioremap\n");
-		return -EIO;
-	}
-	simple_map_init(&dbox2_flash_map);
-
-	// Probe for dual Intel 28F320 or dual AMD
-	mymtd = do_map_probe("cfi_probe", &dbox2_flash_map);
-	if (!mymtd) {
-	    // Probe for single Intel 28F640
-	    dbox2_flash_map.bankwidth = 2;
-
-	    mymtd = do_map_probe("cfi_probe", &dbox2_flash_map);
-	}
-
-	if (mymtd) {
-		mymtd->owner = THIS_MODULE;
-
-                /* Create MTD devices for each partition. */
-		mtd_device_register(mymtd, partition_info, NUM_PARTITIONS);
-
-		return 0;
-	}
-
-	iounmap((void *)dbox2_flash_map.virt);
-	return -ENXIO;
-}
-
-static void __exit cleanup_dbox2_flash(void)
-{
-	if (mymtd) {
-		mtd_device_unregister(mymtd);
-		map_destroy(mymtd);
-	}
-	if (dbox2_flash_map.virt) {
-		iounmap((void *)dbox2_flash_map.virt);
-		dbox2_flash_map.virt = 0;
-	}
-}
-
-module_init(init_dbox2_flash);
-module_exit(cleanup_dbox2_flash);
-
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Kári Davíðsson <kd@flaga.is>, Bastian Blank <waldi@tuxbox.org>, Alexander Wild <wild@te-elektronik.com>");
-MODULE_DESCRIPTION("MTD map driver for D-Box 2 board");
diff --git a/drivers/mtd/maps/dc21285.c b/drivers/mtd/maps/dc21285.c
index 080f060..f8a7dd1 100644
--- a/drivers/mtd/maps/dc21285.c
+++ b/drivers/mtd/maps/dc21285.c
@@ -143,9 +143,8 @@
 	.copy_from = dc21285_copy_from,
 };
 
-
 /* Partition stuff */
-static const char *probes[] = { "RedBoot", "cmdlinepart", NULL };
+static const char * const probes[] = { "RedBoot", "cmdlinepart", NULL };
 
 static int __init init_dc21285(void)
 {
diff --git a/drivers/mtd/maps/dilnetpc.c b/drivers/mtd/maps/dilnetpc.c
deleted file mode 100644
index 3e393f0..0000000
--- a/drivers/mtd/maps/dilnetpc.c
+++ /dev/null
@@ -1,496 +0,0 @@
-/* dilnetpc.c -- MTD map driver for SSV DIL/Net PC Boards "DNP" and "ADNP"
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
- *
- * The DIL/Net PC is a tiny embedded PC board made by SSV Embedded Systems
- * featuring the AMD Elan SC410 processor. There are two variants of this
- * board: DNP/1486 and ADNP/1486. The DNP version has 2 megs of flash
- * ROM (Intel 28F016S3) and 8 megs of DRAM, the ADNP version has 4 megs
- * flash and 16 megs of RAM.
- * For details, see http://www.ssv-embedded.de/ssv/pc104/p169.htm
- * and http://www.ssv-embedded.de/ssv/pc104/p170.htm
- */
-
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/string.h>
-
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/map.h>
-#include <linux/mtd/partitions.h>
-#include <linux/mtd/concat.h>
-
-#include <asm/io.h>
-
-/*
-** The DIL/NetPC keeps its BIOS in two distinct flash blocks.
-** Destroying any of these blocks transforms the DNPC into
-** a paperweight (albeit not a very useful one, considering
-** it only weighs a few grams).
-**
-** Therefore, the BIOS blocks must never be erased or written to
-** except by people who know exactly what they are doing (e.g.
-** to install a BIOS update). These partitions are marked read-only
-** by default, but can be made read/write by undefining
-** DNPC_BIOS_BLOCKS_WRITEPROTECTED:
-*/
-#define DNPC_BIOS_BLOCKS_WRITEPROTECTED
-
-/*
-** The ID string (in ROM) is checked to determine whether we
-** are running on a DNP/1486 or ADNP/1486
-*/
-#define BIOSID_BASE	0x000fe100
-
-#define ID_DNPC	"DNP1486"
-#define ID_ADNP	"ADNP1486"
-
-/*
-** Address where the flash should appear in CPU space
-*/
-#define FLASH_BASE	0x2000000
-
-/*
-** Chip Setup and Control (CSC) indexed register space
-*/
-#define CSC_INDEX	0x22
-#define CSC_DATA	0x23
-
-#define CSC_MMSWAR	0x30	/* MMS window C-F attributes register */
-#define CSC_MMSWDSR	0x31	/* MMS window C-F device select register */
-
-#define CSC_RBWR	0xa7	/* GPIO Read-Back/Write Register B */
-
-#define CSC_CR		0xd0	/* internal I/O device disable/Echo */
-				/* Z-bus/configuration register */
-
-#define CSC_PCCMDCR	0xf1	/* PC card mode and DMA control register */
-
-
-/*
-** PC Card indexed register space:
-*/
-
-#define PCC_INDEX	0x3e0
-#define PCC_DATA	0x3e1
-
-#define PCC_AWER_B		0x46	/* Socket B Address Window enable register */
-#define PCC_MWSAR_1_Lo	0x58	/* memory window 1 start address low register */
-#define PCC_MWSAR_1_Hi	0x59	/* memory window 1 start address high register */
-#define PCC_MWEAR_1_Lo	0x5A	/* memory window 1 stop address low register */
-#define PCC_MWEAR_1_Hi	0x5B	/* memory window 1 stop address high register */
-#define PCC_MWAOR_1_Lo	0x5C	/* memory window 1 address offset low register */
-#define PCC_MWAOR_1_Hi	0x5D	/* memory window 1 address offset high register */
-
-
-/*
-** Access to SC4x0's Chip Setup and Control (CSC)
-** and PC Card (PCC) indexed registers:
-*/
-static inline void setcsc(int reg, unsigned char data)
-{
-	outb(reg, CSC_INDEX);
-	outb(data, CSC_DATA);
-}
-
-static inline unsigned char getcsc(int reg)
-{
-	outb(reg, CSC_INDEX);
-	return(inb(CSC_DATA));
-}
-
-static inline void setpcc(int reg, unsigned char data)
-{
-	outb(reg, PCC_INDEX);
-	outb(data, PCC_DATA);
-}
-
-static inline unsigned char getpcc(int reg)
-{
-	outb(reg, PCC_INDEX);
-	return(inb(PCC_DATA));
-}
-
-
-/*
-************************************************************
-** Enable access to DIL/NetPC's flash by mapping it into
-** the SC4x0's MMS Window C.
-************************************************************
-*/
-static void dnpc_map_flash(unsigned long flash_base, unsigned long flash_size)
-{
-	unsigned long flash_end = flash_base + flash_size - 1;
-
-	/*
-	** enable setup of MMS windows C-F:
-	*/
-	/* - enable PC Card indexed register space */
-	setcsc(CSC_CR, getcsc(CSC_CR) | 0x2);
-	/* - set PC Card controller to operate in standard mode */
-	setcsc(CSC_PCCMDCR, getcsc(CSC_PCCMDCR) & ~1);
-
-	/*
-	** Program base address and end address of window
-	** where the flash ROM should appear in CPU address space
-	*/
-	setpcc(PCC_MWSAR_1_Lo, (flash_base >> 12) & 0xff);
-	setpcc(PCC_MWSAR_1_Hi, (flash_base >> 20) & 0x3f);
-	setpcc(PCC_MWEAR_1_Lo, (flash_end >> 12) & 0xff);
-	setpcc(PCC_MWEAR_1_Hi, (flash_end >> 20) & 0x3f);
-
-	/* program offset of first flash location to appear in this window (0) */
-	setpcc(PCC_MWAOR_1_Lo, ((0 - flash_base) >> 12) & 0xff);
-	setpcc(PCC_MWAOR_1_Hi, ((0 - flash_base)>> 20) & 0x3f);
-
-	/* set attributes for MMS window C: non-cacheable, write-enabled */
-	setcsc(CSC_MMSWAR, getcsc(CSC_MMSWAR) & ~0x11);
-
-	/* select physical device ROMCS0 (i.e. flash) for MMS Window C */
-	setcsc(CSC_MMSWDSR, getcsc(CSC_MMSWDSR) & ~0x03);
-
-	/* enable memory window 1 */
-	setpcc(PCC_AWER_B, getpcc(PCC_AWER_B) | 0x02);
-
-	/* now disable PC Card indexed register space again */
-	setcsc(CSC_CR, getcsc(CSC_CR) & ~0x2);
-}
-
-
-/*
-************************************************************
-** Disable access to DIL/NetPC's flash by mapping it into
-** the SC4x0's MMS Window C.
-************************************************************
-*/
-static void dnpc_unmap_flash(void)
-{
-	/* - enable PC Card indexed register space */
-	setcsc(CSC_CR, getcsc(CSC_CR) | 0x2);
-
-	/* disable memory window 1 */
-	setpcc(PCC_AWER_B, getpcc(PCC_AWER_B) & ~0x02);
-
-	/* now disable PC Card indexed register space again */
-	setcsc(CSC_CR, getcsc(CSC_CR) & ~0x2);
-}
-
-
-
-/*
-************************************************************
-** Enable/Disable VPP to write to flash
-************************************************************
-*/
-
-static DEFINE_SPINLOCK(dnpc_spin);
-static int        vpp_counter = 0;
-/*
-** This is what has to be done for the DNP board ..
-*/
-static void dnp_set_vpp(struct map_info *not_used, int on)
-{
-	spin_lock_irq(&dnpc_spin);
-
-	if (on)
-	{
-		if(++vpp_counter == 1)
-			setcsc(CSC_RBWR, getcsc(CSC_RBWR) & ~0x4);
-	}
-	else
-	{
-		if(--vpp_counter == 0)
-			setcsc(CSC_RBWR, getcsc(CSC_RBWR) | 0x4);
-		else
-			BUG_ON(vpp_counter < 0);
-	}
-	spin_unlock_irq(&dnpc_spin);
-}
-
-/*
-** .. and this the ADNP version:
-*/
-static void adnp_set_vpp(struct map_info *not_used, int on)
-{
-	spin_lock_irq(&dnpc_spin);
-
-	if (on)
-	{
-		if(++vpp_counter == 1)
-			setcsc(CSC_RBWR, getcsc(CSC_RBWR) & ~0x8);
-	}
-	else
-	{
-		if(--vpp_counter == 0)
-			setcsc(CSC_RBWR, getcsc(CSC_RBWR) | 0x8);
-		else
-			BUG_ON(vpp_counter < 0);
-	}
-	spin_unlock_irq(&dnpc_spin);
-}
-
-
-
-#define DNP_WINDOW_SIZE		0x00200000	/*  DNP flash size is 2MiB  */
-#define ADNP_WINDOW_SIZE	0x00400000	/* ADNP flash size is 4MiB */
-#define WINDOW_ADDR		FLASH_BASE
-
-static struct map_info dnpc_map = {
-	.name = "ADNP Flash Bank",
-	.size = ADNP_WINDOW_SIZE,
-	.bankwidth = 1,
-	.set_vpp = adnp_set_vpp,
-	.phys = WINDOW_ADDR
-};
-
-/*
-** The layout of the flash is somewhat "strange":
-**
-** 1.  960 KiB (15 blocks) : Space for ROM Bootloader and user data
-** 2.   64 KiB (1 block)   : System BIOS
-** 3.  960 KiB (15 blocks) : User Data (DNP model) or
-** 3. 3008 KiB (47 blocks) : User Data (ADNP model)
-** 4.   64 KiB (1 block)   : System BIOS Entry
-*/
-
-static struct mtd_partition partition_info[]=
-{
-	{
-		.name =		"ADNP boot",
-		.offset =	0,
-		.size =		0xf0000,
-	},
-	{
-		.name =		"ADNP system BIOS",
-		.offset =	MTDPART_OFS_NXTBLK,
-		.size =		0x10000,
-#ifdef DNPC_BIOS_BLOCKS_WRITEPROTECTED
-		.mask_flags =	MTD_WRITEABLE,
-#endif
-	},
-	{
-		.name =		"ADNP file system",
-		.offset =	MTDPART_OFS_NXTBLK,
-		.size =		0x2f0000,
-	},
-	{
-		.name =		"ADNP system BIOS entry",
-		.offset =	MTDPART_OFS_NXTBLK,
-		.size =		MTDPART_SIZ_FULL,
-#ifdef DNPC_BIOS_BLOCKS_WRITEPROTECTED
-		.mask_flags =	MTD_WRITEABLE,
-#endif
-	},
-};
-
-#define NUM_PARTITIONS ARRAY_SIZE(partition_info)
-
-static struct mtd_info *mymtd;
-static struct mtd_info *lowlvl_parts[NUM_PARTITIONS];
-static struct mtd_info *merged_mtd;
-
-/*
-** "Highlevel" partition info:
-**
-** Using the MTD concat layer, we can re-arrange partitions to our
-** liking: we construct a virtual MTD device by concatenating the
-** partitions, specifying the sequence such that the boot block
-** is immediately followed by the filesystem block (i.e. the stupid
-** system BIOS block is mapped to a different place). When re-partitioning
-** this concatenated MTD device, we can set the boot block size to
-** an arbitrary (though erase block aligned) value i.e. not one that
-** is dictated by the flash's physical layout. We can thus set the
-** boot block to be e.g. 64 KB (which is fully sufficient if we want
-** to boot an etherboot image) or to -say- 1.5 MB if we want to boot
-** a large kernel image. In all cases, the remainder of the flash
-** is available as file system space.
-*/
-
-static struct mtd_partition higlvl_partition_info[]=
-{
-	{
-		.name =		"ADNP boot block",
-		.offset =	0,
-		.size =		CONFIG_MTD_DILNETPC_BOOTSIZE,
-	},
-	{
-		.name =		"ADNP file system space",
-		.offset =	MTDPART_OFS_NXTBLK,
-		.size =		ADNP_WINDOW_SIZE-CONFIG_MTD_DILNETPC_BOOTSIZE-0x20000,
-	},
-	{
-		.name =		"ADNP system BIOS + BIOS Entry",
-		.offset =	MTDPART_OFS_NXTBLK,
-		.size =		MTDPART_SIZ_FULL,
-#ifdef DNPC_BIOS_BLOCKS_WRITEPROTECTED
-		.mask_flags =	MTD_WRITEABLE,
-#endif
-	},
-};
-
-#define NUM_HIGHLVL_PARTITIONS ARRAY_SIZE(higlvl_partition_info)
-
-
-static int dnp_adnp_probe(void)
-{
-	char *biosid, rc = -1;
-
-	biosid = (char*)ioremap(BIOSID_BASE, 16);
-	if(biosid)
-	{
-		if(!strcmp(biosid, ID_DNPC))
-			rc = 1;		/* this is a DNPC  */
-		else if(!strcmp(biosid, ID_ADNP))
-			rc = 0;		/* this is a ADNPC */
-	}
-	iounmap((void *)biosid);
-	return(rc);
-}
-
-
-static int __init init_dnpc(void)
-{
-	int is_dnp;
-
-	/*
-	** determine hardware (DNP/ADNP/invalid)
-	*/
-	if((is_dnp = dnp_adnp_probe()) < 0)
-		return -ENXIO;
-
-	/*
-	** Things are set up for ADNP by default
-	** -> modify all that needs to be different for DNP
-	*/
-	if(is_dnp)
-	{	/*
-		** Adjust window size, select correct set_vpp function.
-		** The partitioning scheme is identical on both DNP
-		** and ADNP except for the size of the third partition.
-		*/
-		int i;
-		dnpc_map.size          = DNP_WINDOW_SIZE;
-		dnpc_map.set_vpp       = dnp_set_vpp;
-		partition_info[2].size = 0xf0000;
-
-		/*
-		** increment all string pointers so the leading 'A' gets skipped,
-		** thus turning all occurrences of "ADNP ..." into "DNP ..."
-		*/
-		++dnpc_map.name;
-		for(i = 0; i < NUM_PARTITIONS; i++)
-			++partition_info[i].name;
-		higlvl_partition_info[1].size = DNP_WINDOW_SIZE -
-			CONFIG_MTD_DILNETPC_BOOTSIZE - 0x20000;
-		for(i = 0; i < NUM_HIGHLVL_PARTITIONS; i++)
-			++higlvl_partition_info[i].name;
-	}
-
-	printk(KERN_NOTICE "DIL/Net %s flash: 0x%lx at 0x%llx\n",
-		is_dnp ? "DNPC" : "ADNP", dnpc_map.size, (unsigned long long)dnpc_map.phys);
-
-	dnpc_map.virt = ioremap_nocache(dnpc_map.phys, dnpc_map.size);
-
-	dnpc_map_flash(dnpc_map.phys, dnpc_map.size);
-
-	if (!dnpc_map.virt) {
-		printk("Failed to ioremap_nocache\n");
-		return -EIO;
-	}
-	simple_map_init(&dnpc_map);
-
-	printk("FLASH virtual address: 0x%p\n", dnpc_map.virt);
-
-	mymtd = do_map_probe("jedec_probe", &dnpc_map);
-
-	if (!mymtd)
-		mymtd = do_map_probe("cfi_probe", &dnpc_map);
-
-	/*
-	** If flash probes fail, try to make flashes accessible
-	** at least as ROM. Ajust erasesize in this case since
-	** the default one (128M) will break our partitioning
-	*/
-	if (!mymtd)
-		if((mymtd = do_map_probe("map_rom", &dnpc_map)))
-			mymtd->erasesize = 0x10000;
-
-	if (!mymtd) {
-		iounmap(dnpc_map.virt);
-		return -ENXIO;
-	}
-
-	mymtd->owner = THIS_MODULE;
-
-	/*
-	** Supply pointers to lowlvl_parts[] array to add_mtd_partitions()
-	** -> add_mtd_partitions() will _not_ register MTD devices for
-	** the partitions, but will instead store pointers to the MTD
-	** objects it creates into our lowlvl_parts[] array.
-	** NOTE: we arrange the pointers such that the sequence of the
-	**       partitions gets re-arranged: partition #2 follows
-	**       partition #0.
-	*/
-	partition_info[0].mtdp = &lowlvl_parts[0];
-	partition_info[1].mtdp = &lowlvl_parts[2];
-	partition_info[2].mtdp = &lowlvl_parts[1];
-	partition_info[3].mtdp = &lowlvl_parts[3];
-
-	mtd_device_register(mymtd, partition_info, NUM_PARTITIONS);
-
-	/*
-	** now create a virtual MTD device by concatenating the for partitions
-	** (in the sequence given by the lowlvl_parts[] array.
-	*/
-	merged_mtd = mtd_concat_create(lowlvl_parts, NUM_PARTITIONS, "(A)DNP Flash Concatenated");
-	if(merged_mtd)
-	{	/*
-		** now partition the new device the way we want it. This time,
-		** we do not supply mtd pointers in higlvl_partition_info, so
-		** add_mtd_partitions() will register the devices.
-		*/
-		mtd_device_register(merged_mtd, higlvl_partition_info,
-				    NUM_HIGHLVL_PARTITIONS);
-	}
-
-	return 0;
-}
-
-static void __exit cleanup_dnpc(void)
-{
-	if(merged_mtd) {
-		mtd_device_unregister(merged_mtd);
-		mtd_concat_destroy(merged_mtd);
-	}
-
-	if (mymtd) {
-		mtd_device_unregister(mymtd);
-		map_destroy(mymtd);
-	}
-	if (dnpc_map.virt) {
-		iounmap(dnpc_map.virt);
-		dnpc_unmap_flash();
-		dnpc_map.virt = NULL;
-	}
-}
-
-module_init(init_dnpc);
-module_exit(cleanup_dnpc);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Sysgo Real-Time Solutions GmbH");
-MODULE_DESCRIPTION("MTD map driver for SSV DIL/NetPC DNP & ADNP");
diff --git a/drivers/mtd/maps/dmv182.c b/drivers/mtd/maps/dmv182.c
deleted file mode 100644
index 6538ac6..0000000
--- a/drivers/mtd/maps/dmv182.c
+++ /dev/null
@@ -1,146 +0,0 @@
-
-/*
- * drivers/mtd/maps/dmv182.c
- *
- * Flash map driver for the Dy4 SVME182 board
- *
- * Copyright 2003-2004, TimeSys Corporation
- *
- * Based on the SVME181 flash map, by Tom Nelson, Dot4, Inc. for TimeSys Corp.
- *
- * This program is free software; you can redistribute  it and/or modify it
- * under  the terms of  the GNU General  Public License as published by the
- * Free Software Foundation;  either version 2 of the  License, or (at your
- * option) any later version.
- */
-
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <asm/io.h>
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/map.h>
-#include <linux/mtd/partitions.h>
-#include <linux/errno.h>
-
-/*
- * This driver currently handles only the 16MiB user flash bank 1 on the
- * board.  It does not provide access to bank 0 (contains the Dy4 FFW), bank 2
- * (VxWorks boot), or the optional 48MiB expansion flash.
- *
- * scott.wood@timesys.com: On the newer boards with 128MiB flash, it
- * now supports the first 96MiB (the boot flash bank containing FFW
- * is excluded).  The VxWorks loader is in partition 1.
- */
-
-#define FLASH_BASE_ADDR 0xf0000000
-#define FLASH_BANK_SIZE (128*1024*1024)
-
-MODULE_AUTHOR("Scott Wood, TimeSys Corporation <scott.wood@timesys.com>");
-MODULE_DESCRIPTION("User-programmable flash device on the Dy4 SVME182 board");
-MODULE_LICENSE("GPL");
-
-static struct map_info svme182_map = {
-	.name		= "Dy4 SVME182",
-	.bankwidth	= 32,
-	.size		=  128 * 1024 * 1024
-};
-
-#define BOOTIMAGE_PART_SIZE		((6*1024*1024)-RESERVED_PART_SIZE)
-
-// Allow 6MiB for the kernel
-#define NEW_BOOTIMAGE_PART_SIZE  (6 * 1024 * 1024)
-// Allow 1MiB for the bootloader
-#define NEW_BOOTLOADER_PART_SIZE (1024 * 1024)
-// Use the remaining 9MiB at the end of flash for the RFS
-#define NEW_RFS_PART_SIZE        (0x01000000 - NEW_BOOTLOADER_PART_SIZE - \
-                                  NEW_BOOTIMAGE_PART_SIZE)
-
-static struct mtd_partition svme182_partitions[] = {
-	// The Lower PABS is only 128KiB, but the partition code doesn't
-	// like partitions that don't end on the largest erase block
-	// size of the device, even if all of the erase blocks in the
-	// partition are small ones.  The hardware should prevent
-	// writes to the actual PABS areas.
-	{
-		name:       "Lower PABS and CPU 0 bootloader or kernel",
-		size:       6*1024*1024,
-		offset:     0,
-	},
-	{
-		name:       "Root Filesystem",
-		size:       10*1024*1024,
-		offset:     MTDPART_OFS_NXTBLK
-	},
-	{
-		name:       "CPU1 Bootloader",
-		size:       1024*1024,
-		offset:     MTDPART_OFS_NXTBLK,
-	},
-	{
-		name:       "Extra",
-		size:       110*1024*1024,
-		offset:     MTDPART_OFS_NXTBLK
-	},
-	{
-		name:       "Foundation Firmware and Upper PABS",
-		size:       1024*1024,
-		offset:     MTDPART_OFS_NXTBLK,
-		mask_flags: MTD_WRITEABLE // read-only
-	}
-};
-
-static struct mtd_info *this_mtd;
-
-static int __init init_svme182(void)
-{
-	struct mtd_partition *partitions;
-	int num_parts = ARRAY_SIZE(svme182_partitions);
-
-	partitions = svme182_partitions;
-
-	svme182_map.virt = ioremap(FLASH_BASE_ADDR, svme182_map.size);
-
-	if (svme182_map.virt == 0) {
-		printk("Failed to ioremap FLASH memory area.\n");
-		return -EIO;
-	}
-
-	simple_map_init(&svme182_map);
-
-	this_mtd = do_map_probe("cfi_probe", &svme182_map);
-	if (!this_mtd)
-	{
-		iounmap((void *)svme182_map.virt);
-		return -ENXIO;
-	}
-
-	printk(KERN_NOTICE "SVME182 flash device: %dMiB at 0x%08x\n",
-		   this_mtd->size >> 20, FLASH_BASE_ADDR);
-
-	this_mtd->owner = THIS_MODULE;
-	mtd_device_register(this_mtd, partitions, num_parts);
-
-	return 0;
-}
-
-static void __exit cleanup_svme182(void)
-{
-	if (this_mtd)
-	{
-		mtd_device_unregister(this_mtd);
-		map_destroy(this_mtd);
-	}
-
-	if (svme182_map.virt)
-	{
-		iounmap((void *)svme182_map.virt);
-		svme182_map.virt = 0;
-	}
-
-	return;
-}
-
-module_init(init_svme182);
-module_exit(cleanup_svme182);
diff --git a/drivers/mtd/maps/gpio-addr-flash.c b/drivers/mtd/maps/gpio-addr-flash.c
index 7b643de..5ede282 100644
--- a/drivers/mtd/maps/gpio-addr-flash.c
+++ b/drivers/mtd/maps/gpio-addr-flash.c
@@ -157,7 +157,8 @@
 	memcpy_toio(map->virt + (to % state->win_size), from, len);
 }
 
-static const char *part_probe_types[] = { "cmdlinepart", "RedBoot", NULL };
+static const char * const part_probe_types[] = {
+	"cmdlinepart", "RedBoot", NULL };
 
 /**
  * gpio_flash_probe() - setup a mapping for a GPIO assisted flash
diff --git a/drivers/mtd/maps/h720x-flash.c b/drivers/mtd/maps/h720x-flash.c
deleted file mode 100644
index 8ed6cb4..0000000
--- a/drivers/mtd/maps/h720x-flash.c
+++ /dev/null
@@ -1,120 +0,0 @@
-/*
- * Flash memory access on Hynix GMS30C7201/HMS30C7202 based
- * evaluation boards
- *
- * (C) 2002 Jungjun Kim <jungjun.kim@hynix.com>
- *     2003 Thomas Gleixner <tglx@linutronix.de>
- */
-
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/errno.h>
-#include <linux/slab.h>
-
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/map.h>
-#include <linux/mtd/partitions.h>
-#include <mach/hardware.h>
-#include <asm/io.h>
-
-static struct mtd_info *mymtd;
-
-static struct map_info h720x_map = {
-	.name =		"H720X",
-	.bankwidth =	4,
-	.size =		H720X_FLASH_SIZE,
-	.phys =		H720X_FLASH_PHYS,
-};
-
-static struct mtd_partition h720x_partitions[] = {
-        {
-                .name = "ArMon",
-                .size = 0x00080000,
-                .offset = 0,
-                .mask_flags = MTD_WRITEABLE
-        },{
-                .name = "Env",
-                .size = 0x00040000,
-                .offset = 0x00080000,
-                .mask_flags = MTD_WRITEABLE
-        },{
-                .name = "Kernel",
-                .size = 0x00180000,
-                .offset = 0x000c0000,
-                .mask_flags = MTD_WRITEABLE
-        },{
-                .name = "Ramdisk",
-                .size = 0x00400000,
-                .offset = 0x00240000,
-                .mask_flags = MTD_WRITEABLE
-        },{
-                .name = "jffs2",
-                .size = MTDPART_SIZ_FULL,
-                .offset = MTDPART_OFS_APPEND
-        }
-};
-
-#define NUM_PARTITIONS ARRAY_SIZE(h720x_partitions)
-
-/*
- * Initialize FLASH support
- */
-static int __init h720x_mtd_init(void)
-{
-	h720x_map.virt = ioremap(h720x_map.phys, h720x_map.size);
-
-	if (!h720x_map.virt) {
-		printk(KERN_ERR "H720x-MTD: ioremap failed\n");
-		return -EIO;
-	}
-
-	simple_map_init(&h720x_map);
-
-	// Probe for flash bankwidth 4
-	printk (KERN_INFO "H720x-MTD probing 32bit FLASH\n");
-	mymtd = do_map_probe("cfi_probe", &h720x_map);
-	if (!mymtd) {
-		printk (KERN_INFO "H720x-MTD probing 16bit FLASH\n");
-	    // Probe for bankwidth 2
-	    h720x_map.bankwidth = 2;
-	    mymtd = do_map_probe("cfi_probe", &h720x_map);
-	}
-
-	if (mymtd) {
-		mymtd->owner = THIS_MODULE;
-
-		mtd_device_parse_register(mymtd, NULL, NULL,
-					  h720x_partitions, NUM_PARTITIONS);
-		return 0;
-	}
-
-	iounmap((void *)h720x_map.virt);
-	return -ENXIO;
-}
-
-/*
- * Cleanup
- */
-static void __exit h720x_mtd_cleanup(void)
-{
-
-	if (mymtd) {
-		mtd_device_unregister(mymtd);
-		map_destroy(mymtd);
-	}
-
-	if (h720x_map.virt) {
-		iounmap((void *)h720x_map.virt);
-		h720x_map.virt = 0;
-	}
-}
-
-
-module_init(h720x_mtd_init);
-module_exit(h720x_mtd_cleanup);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Thomas Gleixner <tglx@linutronix.de>");
-MODULE_DESCRIPTION("MTD map driver for Hynix evaluation boards");
diff --git a/drivers/mtd/maps/impa7.c b/drivers/mtd/maps/impa7.c
index 834a06c..4968674 100644
--- a/drivers/mtd/maps/impa7.c
+++ b/drivers/mtd/maps/impa7.c
@@ -24,14 +24,12 @@
 #define NUM_FLASHBANKS 2
 #define BUSWIDTH     4
 
-/* can be { "cfi_probe", "jedec_probe", "map_rom", NULL } */
-#define PROBETYPES { "jedec_probe", NULL }
-
 #define MSG_PREFIX "impA7:"   /* prefix for our printk()'s */
 #define MTDID      "impa7-%d"  /* for mtdparts= partitioning */
 
 static struct mtd_info *impa7_mtd[NUM_FLASHBANKS];
 
+static const char * const rom_probe_types[] = { "jedec_probe", NULL };
 
 static struct map_info impa7_map[NUM_FLASHBANKS] = {
 	{
@@ -60,8 +58,7 @@
 
 static int __init init_impa7(void)
 {
-	static const char *rom_probe_types[] = PROBETYPES;
-	const char **type;
+	const char * const *type;
 	int i;
 	static struct { u_long addr; u_long size; } pt[NUM_FLASHBANKS] = {
 	  { WINDOW_ADDR0, WINDOW_SIZE0 },
diff --git a/drivers/mtd/maps/intel_vr_nor.c b/drivers/mtd/maps/intel_vr_nor.c
index b14053b..f581ac1 100644
--- a/drivers/mtd/maps/intel_vr_nor.c
+++ b/drivers/mtd/maps/intel_vr_nor.c
@@ -82,9 +82,9 @@
 
 static int vr_nor_mtd_setup(struct vr_nor_mtd *p)
 {
-	static const char *probe_types[] =
+	static const char * const probe_types[] =
 	    { "cfi_probe", "jedec_probe", NULL };
-	const char **type;
+	const char * const *type;
 
 	for (type = probe_types; !p->info && *type; type++)
 		p->info = do_map_probe(*type, &p->map);
diff --git a/drivers/mtd/maps/ixp2000.c b/drivers/mtd/maps/ixp2000.c
deleted file mode 100644
index 4a41ced..0000000
--- a/drivers/mtd/maps/ixp2000.c
+++ /dev/null
@@ -1,253 +0,0 @@
-/*
- * drivers/mtd/maps/ixp2000.c
- *
- * Mapping for the Intel XScale IXP2000 based systems
- *
- * Copyright (C) 2002 Intel Corp.
- * Copyright (C) 2003-2004 MontaVista Software, Inc.
- *
- * Original Author: Naeem M Afzal <naeem.m.afzal@intel.com>
- * Maintainer: Deepak Saxena <dsaxena@plexity.net>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- */
-
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/slab.h>
-#include <linux/ioport.h>
-#include <linux/device.h>
-#include <linux/platform_device.h>
-
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/map.h>
-#include <linux/mtd/partitions.h>
-
-#include <asm/io.h>
-#include <mach/hardware.h>
-#include <asm/mach/flash.h>
-
-#include <linux/reboot.h>
-
-struct ixp2000_flash_info {
-	struct		mtd_info *mtd;
-	struct		map_info map;
-	struct		resource *res;
-};
-
-static inline unsigned long flash_bank_setup(struct map_info *map, unsigned long ofs)
-{
-	unsigned long (*set_bank)(unsigned long) =
-		(unsigned long(*)(unsigned long))map->map_priv_2;
-
-	return (set_bank ? set_bank(ofs) : ofs);
-}
-
-#ifdef __ARMEB__
-/*
- * Rev A0 and A1 of IXP2400 silicon have a broken addressing unit which
- * causes the lower address bits to be XORed with 0x11 on 8 bit accesses
- * and XORed with 0x10 on 16 bit accesses. See the spec update, erratum 44.
- */
-static int erratum44_workaround = 0;
-
-static inline unsigned long address_fix8_write(unsigned long addr)
-{
-	if (erratum44_workaround) {
-		return (addr ^ 3);
-	}
-	return addr;
-}
-#else
-
-#define address_fix8_write(x)	(x)
-#endif
-
-static map_word ixp2000_flash_read8(struct map_info *map, unsigned long ofs)
-{
-	map_word val;
-
-	val.x[0] =  *((u8 *)(map->map_priv_1 + flash_bank_setup(map, ofs)));
-	return val;
-}
-
-/*
- * We can't use the standard memcpy due to the broken SlowPort
- * address translation on rev A0 and A1 silicon and the fact that
- * we have banked flash.
- */
-static void ixp2000_flash_copy_from(struct map_info *map, void *to,
-			      unsigned long from, ssize_t len)
-{
-	from = flash_bank_setup(map, from);
-	while(len--)
-		*(__u8 *) to++ = *(__u8 *)(map->map_priv_1 + from++);
-}
-
-static void ixp2000_flash_write8(struct map_info *map, map_word d, unsigned long ofs)
-{
-	*(__u8 *) (address_fix8_write(map->map_priv_1 +
-				      flash_bank_setup(map, ofs))) = d.x[0];
-}
-
-static void ixp2000_flash_copy_to(struct map_info *map, unsigned long to,
-			    const void *from, ssize_t len)
-{
-	to = flash_bank_setup(map, to);
-	while(len--) {
-		unsigned long tmp = address_fix8_write(map->map_priv_1 + to++);
-		*(__u8 *)(tmp) = *(__u8 *)(from++);
-	}
-}
-
-
-static int ixp2000_flash_remove(struct platform_device *dev)
-{
-	struct flash_platform_data *plat = dev->dev.platform_data;
-	struct ixp2000_flash_info *info = platform_get_drvdata(dev);
-
-	platform_set_drvdata(dev, NULL);
-
-	if(!info)
-		return 0;
-
-	if (info->mtd) {
-		mtd_device_unregister(info->mtd);
-		map_destroy(info->mtd);
-	}
-	if (info->map.map_priv_1)
-		iounmap((void *) info->map.map_priv_1);
-
-	if (info->res) {
-		release_resource(info->res);
-		kfree(info->res);
-	}
-
-	if (plat->exit)
-		plat->exit();
-
-	return 0;
-}
-
-
-static int ixp2000_flash_probe(struct platform_device *dev)
-{
-	static const char *probes[] = { "RedBoot", "cmdlinepart", NULL };
-	struct ixp2000_flash_data *ixp_data = dev->dev.platform_data;
-	struct flash_platform_data *plat;
-	struct ixp2000_flash_info *info;
-	unsigned long window_size;
-	int err = -1;
-
-	if (!ixp_data)
-		return -ENODEV;
-
-	plat = ixp_data->platform_data;
-	if (!plat)
-		return -ENODEV;
-
-	window_size = resource_size(dev->resource);
-	dev_info(&dev->dev, "Probe of IXP2000 flash(%d banks x %dMiB)\n",
-		 ixp_data->nr_banks, ((u32)window_size >> 20));
-
-	if (plat->width != 1) {
-		dev_err(&dev->dev, "IXP2000 MTD map only supports 8-bit mode, asking for %d\n",
-			plat->width * 8);
-		return -EIO;
-	}
-
-	info = kzalloc(sizeof(struct ixp2000_flash_info), GFP_KERNEL);
-	if(!info) {
-		err = -ENOMEM;
-		goto Error;
-	}
-
-	platform_set_drvdata(dev, info);
-
-	/*
-	 * Tell the MTD layer we're not 1:1 mapped so that it does
-	 * not attempt to do a direct access on us.
-	 */
-	info->map.phys = NO_XIP;
-
-	info->map.size = ixp_data->nr_banks * window_size;
-	info->map.bankwidth = 1;
-
-	/*
- 	 * map_priv_2 is used to store a ptr to the bank_setup routine
- 	 */
-	info->map.map_priv_2 = (unsigned long) ixp_data->bank_setup;
-
-	info->map.name = dev_name(&dev->dev);
-	info->map.read = ixp2000_flash_read8;
-	info->map.write = ixp2000_flash_write8;
-	info->map.copy_from = ixp2000_flash_copy_from;
-	info->map.copy_to = ixp2000_flash_copy_to;
-
-	info->res = request_mem_region(dev->resource->start,
-				       resource_size(dev->resource),
-				       dev_name(&dev->dev));
-	if (!info->res) {
-		dev_err(&dev->dev, "Could not reserve memory region\n");
-		err = -ENOMEM;
-		goto Error;
-	}
-
-	info->map.map_priv_1 =
-		(unsigned long)ioremap(dev->resource->start,
-				       resource_size(dev->resource));
-	if (!info->map.map_priv_1) {
-		dev_err(&dev->dev, "Failed to ioremap flash region\n");
-		err = -EIO;
-		goto Error;
-	}
-
-#if defined(__ARMEB__)
-	/*
-	 * Enable erratum 44 workaround for NPUs with broken slowport
-	 */
-
-	erratum44_workaround = ixp2000_has_broken_slowport();
-	dev_info(&dev->dev, "Erratum 44 workaround %s\n",
-	       erratum44_workaround ? "enabled" : "disabled");
-#endif
-
-	info->mtd = do_map_probe(plat->map_name, &info->map);
-	if (!info->mtd) {
-		dev_err(&dev->dev, "map_probe failed\n");
-		err = -ENXIO;
-		goto Error;
-	}
-	info->mtd->owner = THIS_MODULE;
-
-	err = mtd_device_parse_register(info->mtd, probes, NULL, NULL, 0);
-	if (err)
-		goto Error;
-
-	return 0;
-
-Error:
-	ixp2000_flash_remove(dev);
-	return err;
-}
-
-static struct platform_driver ixp2000_flash_driver = {
-	.probe		= ixp2000_flash_probe,
-	.remove		= ixp2000_flash_remove,
-	.driver		= {
-		.name	= "IXP2000-Flash",
-		.owner	= THIS_MODULE,
-	},
-};
-
-module_platform_driver(ixp2000_flash_driver);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Deepak Saxena <dsaxena@plexity.net>");
-MODULE_ALIAS("platform:IXP2000-Flash");
diff --git a/drivers/mtd/maps/ixp4xx.c b/drivers/mtd/maps/ixp4xx.c
index e864fc6..52b3410 100644
--- a/drivers/mtd/maps/ixp4xx.c
+++ b/drivers/mtd/maps/ixp4xx.c
@@ -148,7 +148,7 @@
 	struct resource *res;
 };
 
-static const char *probes[] = { "RedBoot", "cmdlinepart", NULL };
+static const char * const probes[] = { "RedBoot", "cmdlinepart", NULL };
 
 static int ixp4xx_flash_remove(struct platform_device *dev)
 {
diff --git a/drivers/mtd/maps/lantiq-flash.c b/drivers/mtd/maps/lantiq-flash.c
index d1da6ed..d7ac65d 100644
--- a/drivers/mtd/maps/lantiq-flash.c
+++ b/drivers/mtd/maps/lantiq-flash.c
@@ -46,8 +46,7 @@
 };
 
 static const char ltq_map_name[] = "ltq_nor";
-static const char *ltq_probe_types[] = {
-					"cmdlinepart", "ofpart", NULL };
+static const char * const ltq_probe_types[] = { "cmdlinepart", "ofpart", NULL };
 
 static map_word
 ltq_read16(struct map_info *map, unsigned long adr)
diff --git a/drivers/mtd/maps/mbx860.c b/drivers/mtd/maps/mbx860.c
deleted file mode 100644
index 93fa56c..0000000
--- a/drivers/mtd/maps/mbx860.c
+++ /dev/null
@@ -1,98 +0,0 @@
-/*
- * Handle mapping of the flash on MBX860 boards
- *
- * Author:	Anton Todorov
- * Copyright:	(C) 2001 Emness Technology
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- */
-
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <asm/io.h>
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/map.h>
-#include <linux/mtd/partitions.h>
-
-
-#define WINDOW_ADDR 0xfe000000
-#define WINDOW_SIZE 0x00200000
-
-/* Flash / Partition sizing */
-#define MAX_SIZE_KiB              8192
-#define BOOT_PARTITION_SIZE_KiB    512
-#define KERNEL_PARTITION_SIZE_KiB 5632
-#define APP_PARTITION_SIZE_KiB    2048
-
-#define NUM_PARTITIONS 3
-
-/* partition_info gives details on the logical partitions that the split the
- * single flash device into. If the size if zero we use up to the end of the
- * device. */
-static struct mtd_partition partition_info[]={
-	{ .name = "MBX flash BOOT partition",
-	.offset = 0,
-	.size =   BOOT_PARTITION_SIZE_KiB*1024 },
-	{ .name = "MBX flash DATA partition",
-	.offset = BOOT_PARTITION_SIZE_KiB*1024,
-	.size = (KERNEL_PARTITION_SIZE_KiB)*1024 },
-	{ .name = "MBX flash APPLICATION partition",
-	.offset = (BOOT_PARTITION_SIZE_KiB+KERNEL_PARTITION_SIZE_KiB)*1024 }
-};
-
-
-static struct mtd_info *mymtd;
-
-struct map_info mbx_map = {
-	.name = "MBX flash",
-	.size = WINDOW_SIZE,
-	.phys = WINDOW_ADDR,
-	.bankwidth = 4,
-};
-
-static int __init init_mbx(void)
-{
-	printk(KERN_NOTICE "Motorola MBX flash device: 0x%x at 0x%x\n", WINDOW_SIZE*4, WINDOW_ADDR);
-	mbx_map.virt = ioremap(WINDOW_ADDR, WINDOW_SIZE * 4);
-
-	if (!mbx_map.virt) {
-		printk("Failed to ioremap\n");
-		return -EIO;
-	}
-	simple_map_init(&mbx_map);
-
-	mymtd = do_map_probe("jedec_probe", &mbx_map);
-	if (mymtd) {
-		mymtd->owner = THIS_MODULE;
-		mtd_device_register(mymtd, NULL, 0);
-		mtd_device_register(mymtd, partition_info, NUM_PARTITIONS);
-		return 0;
-	}
-
-	iounmap((void *)mbx_map.virt);
-	return -ENXIO;
-}
-
-static void __exit cleanup_mbx(void)
-{
-	if (mymtd) {
-		mtd_device_unregister(mymtd);
-		map_destroy(mymtd);
-	}
-	if (mbx_map.virt) {
-		iounmap((void *)mbx_map.virt);
-		mbx_map.virt = 0;
-	}
-}
-
-module_init(init_mbx);
-module_exit(cleanup_mbx);
-
-MODULE_AUTHOR("Anton Todorov <a.todorov@emness.com>");
-MODULE_DESCRIPTION("MTD map driver for Motorola MBX860 board");
-MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/maps/pci.c b/drivers/mtd/maps/pci.c
index c3aebd5..c2604f8 100644
--- a/drivers/mtd/maps/pci.c
+++ b/drivers/mtd/maps/pci.c
@@ -283,8 +283,7 @@
 	if (err)
 		goto release;
 
-	/* tsk - do_map_probe should take const char * */
-	mtd = do_map_probe((char *)info->map_name, &map->map);
+	mtd = do_map_probe(info->map_name, &map->map);
 	err = -ENODEV;
 	if (!mtd)
 		goto release;
diff --git a/drivers/mtd/maps/physmap.c b/drivers/mtd/maps/physmap.c
index 21b0b71..e7a592c 100644
--- a/drivers/mtd/maps/physmap.c
+++ b/drivers/mtd/maps/physmap.c
@@ -87,21 +87,18 @@
 	spin_unlock_irqrestore(&info->vpp_lock, flags);
 }
 
-static const char *rom_probe_types[] = {
-					"cfi_probe",
-					"jedec_probe",
-					"qinfo_probe",
-					"map_rom",
-					NULL };
-static const char *part_probe_types[] = { "cmdlinepart", "RedBoot", "afs",
-					  NULL };
+static const char * const rom_probe_types[] = {
+	"cfi_probe", "jedec_probe", "qinfo_probe", "map_rom", NULL };
+
+static const char * const part_probe_types[] = {
+	"cmdlinepart", "RedBoot", "afs", NULL };
 
 static int physmap_flash_probe(struct platform_device *dev)
 {
 	struct physmap_flash_data *physmap_data;
 	struct physmap_flash_info *info;
-	const char **probe_type;
-	const char **part_types;
+	const char * const *probe_type;
+	const char * const *part_types;
 	int err = 0;
 	int i;
 	int devices_found = 0;
diff --git a/drivers/mtd/maps/physmap_of.c b/drivers/mtd/maps/physmap_of.c
index 363939d..d111097 100644
--- a/drivers/mtd/maps/physmap_of.c
+++ b/drivers/mtd/maps/physmap_of.c
@@ -71,6 +71,9 @@
 	return 0;
 }
 
+static const char * const rom_probe_types[] = {
+	"cfi_probe", "jedec_probe", "map_rom" };
+
 /* Helper function to handle probing of the obsolete "direct-mapped"
  * compatible binding, which has an extra "probe-type" property
  * describing the type of flash probe necessary. */
@@ -80,8 +83,6 @@
 	struct device_node *dp = dev->dev.of_node;
 	const char *of_probe;
 	struct mtd_info *mtd;
-	static const char *rom_probe_types[]
-		= { "cfi_probe", "jedec_probe", "map_rom"};
 	int i;
 
 	dev_warn(&dev->dev, "Device tree uses obsolete \"direct-mapped\" "
@@ -111,9 +112,10 @@
    specifies the list of partition probers to use. If none is given then the
    default is use. These take precedence over other device tree
    information. */
-static const char *part_probe_types_def[] = { "cmdlinepart", "RedBoot",
-					"ofpart", "ofoldpart", NULL };
-static const char **of_get_probes(struct device_node *dp)
+static const char * const part_probe_types_def[] = {
+	"cmdlinepart", "RedBoot", "ofpart", "ofoldpart", NULL };
+
+static const char * const *of_get_probes(struct device_node *dp)
 {
 	const char *cp;
 	int cplen;
@@ -142,7 +144,7 @@
 	return res;
 }
 
-static void of_free_probes(const char **probes)
+static void of_free_probes(const char * const *probes)
 {
 	if (probes != part_probe_types_def)
 		kfree(probes);
@@ -151,7 +153,7 @@
 static struct of_device_id of_flash_match[];
 static int of_flash_probe(struct platform_device *dev)
 {
-	const char **part_probe_types;
+	const char * const *part_probe_types;
 	const struct of_device_id *match;
 	struct device_node *dp = dev->dev.of_node;
 	struct resource res;
diff --git a/drivers/mtd/maps/plat-ram.c b/drivers/mtd/maps/plat-ram.c
index 2de66b0..71fdda2 100644
--- a/drivers/mtd/maps/plat-ram.c
+++ b/drivers/mtd/maps/plat-ram.c
@@ -199,7 +199,7 @@
 	 * supplied by the platform_data struct */
 
 	if (pdata->map_probes) {
-		const char **map_probes = pdata->map_probes;
+		const char * const *map_probes = pdata->map_probes;
 
 		for ( ; !info->mtd && *map_probes; map_probes++)
 			info->mtd = do_map_probe(*map_probes , &info->map);
diff --git a/drivers/mtd/maps/pxa2xx-flash.c b/drivers/mtd/maps/pxa2xx-flash.c
index 43e3dbb..acb1dbc 100644
--- a/drivers/mtd/maps/pxa2xx-flash.c
+++ b/drivers/mtd/maps/pxa2xx-flash.c
@@ -45,9 +45,7 @@
 	struct map_info		map;
 };
 
-
-static const char *probes[] = { "RedBoot", "cmdlinepart", NULL };
-
+static const char * const probes[] = { "RedBoot", "cmdlinepart", NULL };
 
 static int pxa2xx_flash_probe(struct platform_device *pdev)
 {
diff --git a/drivers/mtd/maps/rbtx4939-flash.c b/drivers/mtd/maps/rbtx4939-flash.c
index 49c3fe7..ac02fbf 100644
--- a/drivers/mtd/maps/rbtx4939-flash.c
+++ b/drivers/mtd/maps/rbtx4939-flash.c
@@ -45,14 +45,15 @@
 	return 0;
 }
 
-static const char *rom_probe_types[] = { "cfi_probe", "jedec_probe", NULL };
+static const char * const rom_probe_types[] = {
+	"cfi_probe", "jedec_probe", NULL };
 
 static int rbtx4939_flash_probe(struct platform_device *dev)
 {
 	struct rbtx4939_flash_data *pdata;
 	struct rbtx4939_flash_info *info;
 	struct resource *res;
-	const char **probe_type;
+	const char * const *probe_type;
 	int err = 0;
 	unsigned long size;
 
diff --git a/drivers/mtd/maps/rpxlite.c b/drivers/mtd/maps/rpxlite.c
deleted file mode 100644
index ed88225..0000000
--- a/drivers/mtd/maps/rpxlite.c
+++ /dev/null
@@ -1,64 +0,0 @@
-/*
- * Handle mapping of the flash on the RPX Lite and CLLF boards
- */
-
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <asm/io.h>
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/map.h>
-
-
-#define WINDOW_ADDR 0xfe000000
-#define WINDOW_SIZE 0x800000
-
-static struct mtd_info *mymtd;
-
-static struct map_info rpxlite_map = {
-	.name = "RPX",
-	.size = WINDOW_SIZE,
-	.bankwidth = 4,
-	.phys = WINDOW_ADDR,
-};
-
-static int __init init_rpxlite(void)
-{
-	printk(KERN_NOTICE "RPX Lite or CLLF flash device: %x at %x\n", WINDOW_SIZE*4, WINDOW_ADDR);
-	rpxlite_map.virt = ioremap(WINDOW_ADDR, WINDOW_SIZE * 4);
-
-	if (!rpxlite_map.virt) {
-		printk("Failed to ioremap\n");
-		return -EIO;
-	}
-	simple_map_init(&rpxlite_map);
-	mymtd = do_map_probe("cfi_probe", &rpxlite_map);
-	if (mymtd) {
-		mymtd->owner = THIS_MODULE;
-		mtd_device_register(mymtd, NULL, 0);
-		return 0;
-	}
-
-	iounmap((void *)rpxlite_map.virt);
-	return -ENXIO;
-}
-
-static void __exit cleanup_rpxlite(void)
-{
-	if (mymtd) {
-		mtd_device_unregister(mymtd);
-		map_destroy(mymtd);
-	}
-	if (rpxlite_map.virt) {
-		iounmap((void *)rpxlite_map.virt);
-		rpxlite_map.virt = 0;
-	}
-}
-
-module_init(init_rpxlite);
-module_exit(cleanup_rpxlite);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Arnold Christensen <AKC@pel.dk>");
-MODULE_DESCRIPTION("MTD map driver for RPX Lite and CLLF boards");
diff --git a/drivers/mtd/maps/sa1100-flash.c b/drivers/mtd/maps/sa1100-flash.c
index f694417..29e3dca 100644
--- a/drivers/mtd/maps/sa1100-flash.c
+++ b/drivers/mtd/maps/sa1100-flash.c
@@ -244,7 +244,7 @@
 	return ERR_PTR(ret);
 }
 
-static const char *part_probes[] = { "cmdlinepart", "RedBoot", NULL };
+static const char * const part_probes[] = { "cmdlinepart", "RedBoot", NULL };
 
 static int sa1100_mtd_probe(struct platform_device *pdev)
 {
diff --git a/drivers/mtd/maps/solutionengine.c b/drivers/mtd/maps/solutionengine.c
index 9d900ad..83a7a70 100644
--- a/drivers/mtd/maps/solutionengine.c
+++ b/drivers/mtd/maps/solutionengine.c
@@ -31,7 +31,7 @@
 	.bankwidth = 4,
 };
 
-static const char *probes[] = { "RedBoot", "cmdlinepart", NULL };
+static const char * const probes[] = { "RedBoot", "cmdlinepart", NULL };
 
 #ifdef CONFIG_MTD_SUPERH_RESERVE
 static struct mtd_partition superh_se_partitions[] = {
diff --git a/drivers/mtd/maps/tqm8xxl.c b/drivers/mtd/maps/tqm8xxl.c
deleted file mode 100644
index d785879..0000000
--- a/drivers/mtd/maps/tqm8xxl.c
+++ /dev/null
@@ -1,249 +0,0 @@
-/*
- * Handle mapping of the flash memory access routines
- * on TQM8xxL based devices.
- *
- * based on rpxlite.c
- *
- * Copyright(C) 2001 Kirk Lee <kirk@hpc.ee.ntu.edu.tw>
- *
- * This code is GPLed
- *
- */
-
-/*
- * According to TQM8xxL hardware manual, TQM8xxL series have
- * following flash memory organisations:
- *	| capacity |	| chip type |	| bank0 |	| bank1 |
- *	    2MiB	   512Kx16	  2MiB		   0
- *	    4MiB	   1Mx16	  4MiB		   0
- *	    8MiB	   1Mx16	  4MiB		   4MiB
- * Thus, we choose CONFIG_MTD_CFI_I2 & CONFIG_MTD_CFI_B4 at
- * kernel configuration.
- */
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/slab.h>
-
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/map.h>
-#include <linux/mtd/partitions.h>
-
-#include <asm/io.h>
-
-#define FLASH_ADDR 0x40000000
-#define FLASH_SIZE 0x00800000
-#define FLASH_BANK_MAX 4
-
-// trivial struct to describe partition information
-struct mtd_part_def
-{
-	int nums;
-	unsigned char *type;
-	struct mtd_partition* mtd_part;
-};
-
-//static struct mtd_info *mymtd;
-static struct mtd_info* mtd_banks[FLASH_BANK_MAX];
-static struct map_info* map_banks[FLASH_BANK_MAX];
-static struct mtd_part_def part_banks[FLASH_BANK_MAX];
-static unsigned long num_banks;
-static void __iomem *start_scan_addr;
-
-/*
- * Here are partition information for all known TQM8xxL series devices.
- * See include/linux/mtd/partitions.h for definition of the mtd_partition
- * structure.
- *
- * The *_max_flash_size is the maximum possible mapped flash size which
- * is not necessarily the actual flash size.  It must correspond to the
- * value specified in the mapping definition defined by the
- * "struct map_desc *_io_desc" for the corresponding machine.
- */
-
-/* Currently, TQM8xxL has up to 8MiB flash */
-static unsigned long tqm8xxl_max_flash_size = 0x00800000;
-
-/* partition definition for first flash bank
- * (cf. "drivers/char/flash_config.c")
- */
-static struct mtd_partition tqm8xxl_partitions[] = {
-	{
-	  .name = "ppcboot",
-	  .offset = 0x00000000,
-	  .size = 0x00020000,           /* 128KB           */
-	  .mask_flags = MTD_WRITEABLE,  /* force read-only */
-	},
-	{
-	  .name = "kernel",             /* default kernel image */
-	  .offset = 0x00020000,
-	  .size = 0x000e0000,
-	  .mask_flags = MTD_WRITEABLE,  /* force read-only */
-	},
-	{
-	  .name = "user",
-	  .offset = 0x00100000,
-	  .size = 0x00100000,
-	},
-	{
-	  .name = "initrd",
-	  .offset = 0x00200000,
-	  .size = 0x00200000,
-	}
-};
-/* partition definition for second flash bank */
-static struct mtd_partition tqm8xxl_fs_partitions[] = {
-	{
-	  .name = "cramfs",
-	  .offset = 0x00000000,
-	  .size = 0x00200000,
-	},
-	{
-	  .name = "jffs",
-	  .offset = 0x00200000,
-	  .size = 0x00200000,
-	  //.size = MTDPART_SIZ_FULL,
-	}
-};
-
-static int __init init_tqm_mtd(void)
-{
-	int idx = 0, ret = 0;
-	unsigned long flash_addr, flash_size, mtd_size = 0;
-	/* pointer to TQM8xxL board info data */
-	bd_t *bd = (bd_t *)__res;
-
-	flash_addr = bd->bi_flashstart;
-	flash_size = bd->bi_flashsize;
-
-	//request maximum flash size address space
-	start_scan_addr = ioremap(flash_addr, flash_size);
-	if (!start_scan_addr) {
-		printk(KERN_WARNING "%s:Failed to ioremap address:0x%x\n", __func__, flash_addr);
-		return -EIO;
-	}
-
-	for (idx = 0 ; idx < FLASH_BANK_MAX ; idx++) {
-		if(mtd_size >= flash_size)
-			break;
-
-		printk(KERN_INFO "%s: chip probing count %d\n", __func__, idx);
-
-		map_banks[idx] = kzalloc(sizeof(struct map_info), GFP_KERNEL);
-		if(map_banks[idx] == NULL) {
-			ret = -ENOMEM;
-			/* FIXME: What if some MTD devices were probed already? */
-			goto error_mem;
-		}
-
-		map_banks[idx]->name = kmalloc(16, GFP_KERNEL);
-
-		if (!map_banks[idx]->name) {
-			ret = -ENOMEM;
-			/* FIXME: What if some MTD devices were probed already? */
-			goto error_mem;
-		}
-		sprintf(map_banks[idx]->name, "TQM8xxL%d", idx);
-
-		map_banks[idx]->size = flash_size;
-		map_banks[idx]->bankwidth = 4;
-
-		simple_map_init(map_banks[idx]);
-
-		map_banks[idx]->virt = start_scan_addr;
-		map_banks[idx]->phys = flash_addr;
-		/* FIXME: This looks utterly bogus, but I'm trying to
-		   preserve the behaviour of the original (shown here)...
-
-		map_banks[idx]->map_priv_1 =
-		start_scan_addr + ((idx > 0) ?
-		(mtd_banks[idx-1] ? mtd_banks[idx-1]->size : 0) : 0);
-		*/
-
-		if (idx && mtd_banks[idx-1]) {
-			map_banks[idx]->virt += mtd_banks[idx-1]->size;
-			map_banks[idx]->phys += mtd_banks[idx-1]->size;
-		}
-
-		//start to probe flash chips
-		mtd_banks[idx] = do_map_probe("cfi_probe", map_banks[idx]);
-
-		if (mtd_banks[idx]) {
-			mtd_banks[idx]->owner = THIS_MODULE;
-			mtd_size += mtd_banks[idx]->size;
-			num_banks++;
-
-			printk(KERN_INFO "%s: bank%d, name:%s, size:%dbytes \n", __func__, num_banks,
-			mtd_banks[idx]->name, mtd_banks[idx]->size);
-		}
-	}
-
-	/* no supported flash chips found */
-	if (!num_banks) {
-		printk(KERN_NOTICE "TQM8xxL: No support flash chips found!\n");
-		ret = -ENXIO;
-		goto error_mem;
-	}
-
-	/*
-	 * Select Static partition definitions
-	 */
-	part_banks[0].mtd_part = tqm8xxl_partitions;
-	part_banks[0].type = "Static image";
-	part_banks[0].nums = ARRAY_SIZE(tqm8xxl_partitions);
-
-	part_banks[1].mtd_part = tqm8xxl_fs_partitions;
-	part_banks[1].type = "Static file system";
-	part_banks[1].nums = ARRAY_SIZE(tqm8xxl_fs_partitions);
-
-	for(idx = 0; idx < num_banks ; idx++) {
-		if (part_banks[idx].nums == 0)
-			printk(KERN_NOTICE "TQM flash%d: no partition info available, registering whole flash at once\n", idx);
-		else
-			printk(KERN_NOTICE "TQM flash%d: Using %s partition definition\n",
-					idx, part_banks[idx].type);
-		mtd_device_register(mtd_banks[idx], part_banks[idx].mtd_part,
-		part_banks[idx].nums);
-	}
-	return 0;
-error_mem:
-	for(idx = 0 ; idx < FLASH_BANK_MAX ; idx++) {
-		if(map_banks[idx] != NULL) {
-			kfree(map_banks[idx]->name);
-			map_banks[idx]->name = NULL;
-			kfree(map_banks[idx]);
-			map_banks[idx] = NULL;
-		}
-	}
-error:
-	iounmap(start_scan_addr);
-	return ret;
-}
-
-static void __exit cleanup_tqm_mtd(void)
-{
-	unsigned int idx = 0;
-	for(idx = 0 ; idx < num_banks ; idx++) {
-		/* destroy mtd_info previously allocated */
-		if (mtd_banks[idx]) {
-			mtd_device_unregister(mtd_banks[idx]);
-			map_destroy(mtd_banks[idx]);
-		}
-		/* release map_info not used anymore */
-		kfree(map_banks[idx]->name);
-		kfree(map_banks[idx]);
-	}
-
-	if (start_scan_addr) {
-		iounmap(start_scan_addr);
-		start_scan_addr = 0;
-	}
-}
-
-module_init(init_tqm_mtd);
-module_exit(cleanup_tqm_mtd);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Kirk Lee <kirk@hpc.ee.ntu.edu.tw>");
-MODULE_DESCRIPTION("MTD map driver for TQM8xxL boards");
diff --git a/drivers/mtd/maps/tsunami_flash.c b/drivers/mtd/maps/tsunami_flash.c
index 1de390e..da2cdb5 100644
--- a/drivers/mtd/maps/tsunami_flash.c
+++ b/drivers/mtd/maps/tsunami_flash.c
@@ -82,11 +82,12 @@
 	tsunami_flash_mtd = 0;
 }
 
+static const char * const rom_probe_types[] = {
+	"cfi_probe", "jedec_probe", "map_rom", NULL };
 
 static int __init init_tsunami_flash(void)
 {
-	static const char *rom_probe_types[] = { "cfi_probe", "jedec_probe", "map_rom", NULL };
-	char **type;
+	const char * const *type;
 
 	tsunami_tig_writeb(FLASH_ENABLE_BYTE, FLASH_ENABLE_PORT);
 
diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c
index 5ad39bb..5073cbc 100644
--- a/drivers/mtd/mtd_blkdevs.c
+++ b/drivers/mtd/mtd_blkdevs.c
@@ -237,13 +237,12 @@
 	return ret;
 }
 
-static int blktrans_release(struct gendisk *disk, fmode_t mode)
+static void blktrans_release(struct gendisk *disk, fmode_t mode)
 {
 	struct mtd_blktrans_dev *dev = blktrans_dev_get(disk);
-	int ret = 0;
 
 	if (!dev)
-		return ret;
+		return;
 
 	mutex_lock(&dev->lock);
 
@@ -254,13 +253,13 @@
 	module_put(dev->tr->owner);
 
 	if (dev->mtd) {
-		ret = dev->tr->release ? dev->tr->release(dev) : 0;
+		if (dev->tr->release)
+			dev->tr->release(dev);
 		__put_mtd_device(dev->mtd);
 	}
 unlock:
 	mutex_unlock(&dev->lock);
 	blktrans_dev_put(dev);
-	return ret;
 }
 
 static int blktrans_getgeo(struct block_device *bdev, struct hd_geometry *geo)
diff --git a/drivers/mtd/mtdblock.c b/drivers/mtd/mtdblock.c
index 6c6d807..2aef5dd 100644
--- a/drivers/mtd/mtdblock.c
+++ b/drivers/mtd/mtdblock.c
@@ -308,7 +308,7 @@
 	return 0;
 }
 
-static int mtdblock_release(struct mtd_blktrans_dev *mbd)
+static void mtdblock_release(struct mtd_blktrans_dev *mbd)
 {
 	struct mtdblk_dev *mtdblk = container_of(mbd, struct mtdblk_dev, mbd);
 
@@ -333,8 +333,6 @@
 	mutex_unlock(&mtdblks_lock);
 
 	pr_debug("ok\n");
-
-	return 0;
 }
 
 static int mtdblock_flush(struct mtd_blktrans_dev *dev)
diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c
index dc571eb..c719879 100644
--- a/drivers/mtd/mtdchar.c
+++ b/drivers/mtd/mtdchar.c
@@ -38,6 +38,8 @@
 
 #include <asm/uaccess.h>
 
+#include "mtdcore.h"
+
 static DEFINE_MUTEX(mtd_mutex);
 
 /*
@@ -365,37 +367,35 @@
 	wake_up((wait_queue_head_t *)instr->priv);
 }
 
-#ifdef CONFIG_HAVE_MTD_OTP
 static int otp_select_filemode(struct mtd_file_info *mfi, int mode)
 {
 	struct mtd_info *mtd = mfi->mtd;
 	size_t retlen;
-	int ret = 0;
-
-	/*
-	 * Make a fake call to mtd_read_fact_prot_reg() to check if OTP
-	 * operations are supported.
-	 */
-	if (mtd_read_fact_prot_reg(mtd, -1, 0, &retlen, NULL) == -EOPNOTSUPP)
-		return -EOPNOTSUPP;
 
 	switch (mode) {
 	case MTD_OTP_FACTORY:
+		if (mtd_read_fact_prot_reg(mtd, -1, 0, &retlen, NULL) ==
+				-EOPNOTSUPP)
+			return -EOPNOTSUPP;
+
 		mfi->mode = MTD_FILE_MODE_OTP_FACTORY;
 		break;
 	case MTD_OTP_USER:
+		if (mtd_read_user_prot_reg(mtd, -1, 0, &retlen, NULL) ==
+				-EOPNOTSUPP)
+			return -EOPNOTSUPP;
+
 		mfi->mode = MTD_FILE_MODE_OTP_USER;
 		break;
-	default:
-		ret = -EINVAL;
 	case MTD_OTP_OFF:
+		mfi->mode = MTD_FILE_MODE_NORMAL;
 		break;
+	default:
+		return -EINVAL;
 	}
-	return ret;
+
+	return 0;
 }
-#else
-# define otp_select_filemode(f,m)	-EOPNOTSUPP
-#endif
 
 static int mtdchar_writeoob(struct file *file, struct mtd_info *mtd,
 	uint64_t start, uint32_t length, void __user *ptr,
@@ -888,7 +888,6 @@
 		break;
 	}
 
-#ifdef CONFIG_HAVE_MTD_OTP
 	case OTPSELECT:
 	{
 		int mode;
@@ -944,7 +943,6 @@
 		ret = mtd_lock_user_prot_reg(mtd, oinfo.start, oinfo.length);
 		break;
 	}
-#endif
 
 	/* This ioctl is being deprecated - it truncates the ECC layout */
 	case ECCGETLAYOUT:
@@ -1185,23 +1183,25 @@
 };
 MODULE_ALIAS_FS("mtd_inodefs");
 
-static int __init init_mtdchar(void)
+int __init init_mtdchar(void)
 {
 	int ret;
 
 	ret = __register_chrdev(MTD_CHAR_MAJOR, 0, 1 << MINORBITS,
 				   "mtd", &mtd_fops);
 	if (ret < 0) {
-		pr_notice("Can't allocate major number %d for "
-				"Memory Technology Devices.\n", MTD_CHAR_MAJOR);
+		pr_err("Can't allocate major number %d for MTD\n",
+		       MTD_CHAR_MAJOR);
 		return ret;
 	}
 
 	ret = register_filesystem(&mtd_inodefs_type);
 	if (ret) {
-		pr_notice("Can't register mtd_inodefs filesystem: %d\n", ret);
+		pr_err("Can't register mtd_inodefs filesystem, error %d\n",
+		       ret);
 		goto err_unregister_chdev;
 	}
+
 	return ret;
 
 err_unregister_chdev:
@@ -1209,18 +1209,10 @@
 	return ret;
 }
 
-static void __exit cleanup_mtdchar(void)
+void __exit cleanup_mtdchar(void)
 {
 	unregister_filesystem(&mtd_inodefs_type);
 	__unregister_chrdev(MTD_CHAR_MAJOR, 0, 1 << MINORBITS, "mtd");
 }
 
-module_init(init_mtdchar);
-module_exit(cleanup_mtdchar);
-
-MODULE_ALIAS_CHARDEV_MAJOR(MTD_CHAR_MAJOR);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
-MODULE_DESCRIPTION("Direct character-device access to MTD devices");
 MODULE_ALIAS_CHARDEV_MAJOR(MTD_CHAR_MAJOR);
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
index 322ca65..c400c57 100644
--- a/drivers/mtd/mtdcore.c
+++ b/drivers/mtd/mtdcore.c
@@ -42,6 +42,7 @@
 #include <linux/mtd/partitions.h>
 
 #include "mtdcore.h"
+
 /*
  * backing device capabilities for non-mappable devices (such as NAND flash)
  * - permits private mappings, copies are taken of the data
@@ -97,11 +98,7 @@
 static LIST_HEAD(mtd_notifiers);
 
 
-#if defined(CONFIG_MTD_CHAR) || defined(CONFIG_MTD_CHAR_MODULE)
 #define MTD_DEVT(index) MKDEV(MTD_CHAR_MAJOR, (index)*2)
-#else
-#define MTD_DEVT(index) 0
-#endif
 
 /* REVISIT once MTD uses the driver model better, whoever allocates
  * the mtd_info will probably want to use the release() hook...
@@ -493,7 +490,7 @@
  *
  * Returns zero in case of success and a negative error code in case of failure.
  */
-int mtd_device_parse_register(struct mtd_info *mtd, const char **types,
+int mtd_device_parse_register(struct mtd_info *mtd, const char * const *types,
 			      struct mtd_part_parser_data *parser_data,
 			      const struct mtd_partition *parts,
 			      int nr_parts)
@@ -1117,8 +1114,6 @@
 /*====================================================================*/
 /* Support for /proc/mtd */
 
-static struct proc_dir_entry *proc_mtd;
-
 static int mtd_proc_show(struct seq_file *m, void *v)
 {
 	struct mtd_info *mtd;
@@ -1164,6 +1159,8 @@
 	return ret;
 }
 
+static struct proc_dir_entry *proc_mtd;
+
 static int __init init_mtd(void)
 {
 	int ret;
@@ -1184,11 +1181,17 @@
 	if (ret)
 		goto err_bdi3;
 
-#ifdef CONFIG_PROC_FS
 	proc_mtd = proc_create("mtd", 0, NULL, &mtd_proc_ops);
-#endif /* CONFIG_PROC_FS */
+
+	ret = init_mtdchar();
+	if (ret)
+		goto out_procfs;
+
 	return 0;
 
+out_procfs:
+	if (proc_mtd)
+		remove_proc_entry("mtd", NULL);
 err_bdi3:
 	bdi_destroy(&mtd_bdi_ro_mappable);
 err_bdi2:
@@ -1202,10 +1205,9 @@
 
 static void __exit cleanup_mtd(void)
 {
-#ifdef CONFIG_PROC_FS
+	cleanup_mtdchar();
 	if (proc_mtd)
-		remove_proc_entry( "mtd", NULL);
-#endif /* CONFIG_PROC_FS */
+		remove_proc_entry("mtd", NULL);
 	class_unregister(&mtd_class);
 	bdi_destroy(&mtd_bdi_unmappable);
 	bdi_destroy(&mtd_bdi_ro_mappable);
diff --git a/drivers/mtd/mtdcore.h b/drivers/mtd/mtdcore.h
index 961a384..7b03533 100644
--- a/drivers/mtd/mtdcore.h
+++ b/drivers/mtd/mtdcore.h
@@ -1,23 +1,21 @@
-/* linux/drivers/mtd/mtdcore.h
- *
- * Header file for driver private mtdcore exports
- *
+/*
+ * These are exported solely for the purpose of mtd_blkdevs.c and mtdchar.c.
+ * You should not use them for _anything_ else.
  */
 
-/* These are exported solely for the purpose of mtd_blkdevs.c. You
-   should not use them for _anything_ else */
-
 extern struct mutex mtd_table_mutex;
-extern struct mtd_info *__mtd_next_device(int i);
 
-extern int add_mtd_device(struct mtd_info *mtd);
-extern int del_mtd_device(struct mtd_info *mtd);
-extern int add_mtd_partitions(struct mtd_info *, const struct mtd_partition *,
-			      int);
-extern int del_mtd_partitions(struct mtd_info *);
-extern int parse_mtd_partitions(struct mtd_info *master, const char **types,
-				struct mtd_partition **pparts,
-				struct mtd_part_parser_data *data);
+struct mtd_info *__mtd_next_device(int i);
+int add_mtd_device(struct mtd_info *mtd);
+int del_mtd_device(struct mtd_info *mtd);
+int add_mtd_partitions(struct mtd_info *, const struct mtd_partition *, int);
+int del_mtd_partitions(struct mtd_info *);
+int parse_mtd_partitions(struct mtd_info *master, const char * const *types,
+			 struct mtd_partition **pparts,
+			 struct mtd_part_parser_data *data);
+
+int __init init_mtdchar(void);
+void __exit cleanup_mtdchar(void);
 
 #define mtd_for_each_device(mtd)			\
 	for ((mtd) = __mtd_next_device(0);		\
diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c
index 70fa70a..3014933 100644
--- a/drivers/mtd/mtdpart.c
+++ b/drivers/mtd/mtdpart.c
@@ -694,7 +694,7 @@
  * Do not forget to update 'parse_mtd_partitions()' kerneldoc comment if you
  * are changing this array!
  */
-static const char *default_mtd_part_types[] = {
+static const char * const default_mtd_part_types[] = {
 	"cmdlinepart",
 	"ofpart",
 	NULL
@@ -720,7 +720,7 @@
  * o a positive number of found partitions, in which case on exit @pparts will
  *   point to an array containing this number of &struct mtd_info objects.
  */
-int parse_mtd_partitions(struct mtd_info *master, const char **types,
+int parse_mtd_partitions(struct mtd_info *master, const char *const *types,
 			 struct mtd_partition **pparts,
 			 struct mtd_part_parser_data *data)
 {
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
index 81bf5e5..a60f6c1 100644
--- a/drivers/mtd/nand/Kconfig
+++ b/drivers/mtd/nand/Kconfig
@@ -41,14 +41,6 @@
 	tristate
 	default n
 
-config MTD_NAND_MUSEUM_IDS
-	bool "Enable chip ids for obsolete ancient NAND devices"
-	default n
-	help
-	  Enable this option only when your board has first generation
-	  NAND chips (page size 256 byte, erase size 4-8KiB). The IDs
-	  of these chips were reused by later, larger chips.
-
 config MTD_NAND_DENALI
         tristate "Support Denali NAND controller"
         help
@@ -81,15 +73,9 @@
           scratch register here to enable this feature. On Intel Moorestown
           boards, the scratch register is at 0xFF108018.
 
-config MTD_NAND_H1900
-	tristate "iPAQ H1900 flash"
-	depends on ARCH_PXA && BROKEN
-	help
-	  This enables the driver for the iPAQ h1900 flash.
-
 config MTD_NAND_GPIO
 	tristate "GPIO NAND Flash driver"
-	depends on GENERIC_GPIO && ARM
+	depends on GPIOLIB && ARM
 	help
 	  This enables a GPIO based NAND flash driver.
 
@@ -201,22 +187,6 @@
 
 	  If unsure, say N.
 
-config MTD_NAND_RTC_FROM4
-	tristate "Renesas Flash ROM 4-slot interface board (FROM_BOARD4)"
-	depends on SH_SOLUTION_ENGINE
-	select REED_SOLOMON
-	select REED_SOLOMON_DEC8
-	select BITREVERSE
-	help
-	  This enables the driver for the Renesas Technology AG-AND
-	  flash interface board (FROM_BOARD4)
-
-config MTD_NAND_PPCHAMELEONEVB
-	tristate "NAND Flash device on PPChameleonEVB board"
-	depends on PPCHAMELEONEVB && BROKEN
-	help
-	  This enables the NAND flash driver on the PPChameleon EVB Board.
-
 config MTD_NAND_S3C2410
 	tristate "NAND Flash support for Samsung S3C SoCs"
 	depends on ARCH_S3C24XX || ARCH_S3C64XX
diff --git a/drivers/mtd/nand/Makefile b/drivers/mtd/nand/Makefile
index d76d912..bb81891 100644
--- a/drivers/mtd/nand/Makefile
+++ b/drivers/mtd/nand/Makefile
@@ -15,14 +15,11 @@
 obj-$(CONFIG_MTD_NAND_DENALI_DT)	+= denali_dt.o
 obj-$(CONFIG_MTD_NAND_AU1550)		+= au1550nd.o
 obj-$(CONFIG_MTD_NAND_BF5XX)		+= bf5xx_nand.o
-obj-$(CONFIG_MTD_NAND_PPCHAMELEONEVB)	+= ppchameleonevb.o
 obj-$(CONFIG_MTD_NAND_S3C2410)		+= s3c2410.o
 obj-$(CONFIG_MTD_NAND_DAVINCI)		+= davinci_nand.o
 obj-$(CONFIG_MTD_NAND_DISKONCHIP)	+= diskonchip.o
 obj-$(CONFIG_MTD_NAND_DOCG4)		+= docg4.o
 obj-$(CONFIG_MTD_NAND_FSMC)		+= fsmc_nand.o
-obj-$(CONFIG_MTD_NAND_H1900)		+= h1910.o
-obj-$(CONFIG_MTD_NAND_RTC_FROM4)	+= rtc_from4.o
 obj-$(CONFIG_MTD_NAND_SHARPSL)		+= sharpsl.o
 obj-$(CONFIG_MTD_NAND_NANDSIM)		+= nandsim.o
 obj-$(CONFIG_MTD_NAND_CS553X)		+= cs553x_nand.o
diff --git a/drivers/mtd/nand/atmel_nand.c b/drivers/mtd/nand/atmel_nand.c
index ffcbcca..2d23d29 100644
--- a/drivers/mtd/nand/atmel_nand.c
+++ b/drivers/mtd/nand/atmel_nand.c
@@ -1737,20 +1737,7 @@
 	},
 };
 
-static int __init atmel_nand_init(void)
-{
-	return platform_driver_probe(&atmel_nand_driver, atmel_nand_probe);
-}
-
-
-static void __exit atmel_nand_exit(void)
-{
-	platform_driver_unregister(&atmel_nand_driver);
-}
-
-
-module_init(atmel_nand_init);
-module_exit(atmel_nand_exit);
+module_platform_driver_probe(atmel_nand_driver, atmel_nand_probe);
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Rick Bronson");
diff --git a/drivers/mtd/nand/bf5xx_nand.c b/drivers/mtd/nand/bf5xx_nand.c
index 4271e94..776df36 100644
--- a/drivers/mtd/nand/bf5xx_nand.c
+++ b/drivers/mtd/nand/bf5xx_nand.c
@@ -874,21 +874,7 @@
 	},
 };
 
-static int __init bf5xx_nand_init(void)
-{
-	printk(KERN_INFO "%s, Version %s (c) 2007 Analog Devices, Inc.\n",
-		DRV_DESC, DRV_VERSION);
-
-	return platform_driver_register(&bf5xx_nand_driver);
-}
-
-static void __exit bf5xx_nand_exit(void)
-{
-	platform_driver_unregister(&bf5xx_nand_driver);
-}
-
-module_init(bf5xx_nand_init);
-module_exit(bf5xx_nand_exit);
+module_platform_driver(bf5xx_nand_driver);
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR(DRV_AUTHOR);
diff --git a/drivers/mtd/nand/cafe_nand.c b/drivers/mtd/nand/cafe_nand.c
index 010d612..c34985a5 100644
--- a/drivers/mtd/nand/cafe_nand.c
+++ b/drivers/mtd/nand/cafe_nand.c
@@ -303,13 +303,7 @@
 	case NAND_CMD_SEQIN:
 	case NAND_CMD_RNDIN:
 	case NAND_CMD_STATUS:
-	case NAND_CMD_DEPLETE1:
 	case NAND_CMD_RNDOUT:
-	case NAND_CMD_STATUS_ERROR:
-	case NAND_CMD_STATUS_ERROR0:
-	case NAND_CMD_STATUS_ERROR1:
-	case NAND_CMD_STATUS_ERROR2:
-	case NAND_CMD_STATUS_ERROR3:
 		cafe_writel(cafe, cafe->ctl2, NAND_CTRL2);
 		return;
 	}
@@ -536,8 +530,8 @@
 }
 
 static int cafe_nand_write_page(struct mtd_info *mtd, struct nand_chip *chip,
-				const uint8_t *buf, int oob_required, int page,
-				int cached, int raw)
+			uint32_t offset, int data_len, const uint8_t *buf,
+			int oob_required, int page, int cached, int raw)
 {
 	int status;
 
diff --git a/drivers/mtd/nand/davinci_nand.c b/drivers/mtd/nand/davinci_nand.c
index 94e17af..c3e15a5 100644
--- a/drivers/mtd/nand/davinci_nand.c
+++ b/drivers/mtd/nand/davinci_nand.c
@@ -34,6 +34,7 @@
 #include <linux/mtd/partitions.h>
 #include <linux/slab.h>
 #include <linux/of_device.h>
+#include <linux/of.h>
 
 #include <linux/platform_data/mtd-davinci.h>
 #include <linux/platform_data/mtd-davinci-aemif.h>
@@ -577,7 +578,6 @@
 	return pdev->dev.platform_data;
 }
 #else
-#define davinci_nand_of_match NULL
 static struct davinci_nand_pdata
 	*nand_davinci_get_pdata(struct platform_device *pdev)
 {
@@ -878,22 +878,12 @@
 	.driver		= {
 		.name	= "davinci_nand",
 		.owner	= THIS_MODULE,
-		.of_match_table = davinci_nand_of_match,
+		.of_match_table = of_match_ptr(davinci_nand_of_match),
 	},
 };
 MODULE_ALIAS("platform:davinci_nand");
 
-static int __init nand_davinci_init(void)
-{
-	return platform_driver_probe(&nand_davinci_driver, nand_davinci_probe);
-}
-module_init(nand_davinci_init);
-
-static void __exit nand_davinci_exit(void)
-{
-	platform_driver_unregister(&nand_davinci_driver);
-}
-module_exit(nand_davinci_exit);
+module_platform_driver_probe(nand_davinci_driver, nand_davinci_probe);
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Texas Instruments");
diff --git a/drivers/mtd/nand/denali_dt.c b/drivers/mtd/nand/denali_dt.c
index 546f8cb..9253024 100644
--- a/drivers/mtd/nand/denali_dt.c
+++ b/drivers/mtd/nand/denali_dt.c
@@ -42,7 +42,7 @@
 	}
 
 	ptr = devm_ioremap_nocache(dev, res->start, resource_size(res));
-	if (!res)
+	if (!ptr)
 		dev_err(dev, "ioremap_nocache of %s failed!", res->name);
 
 	return ptr;
@@ -90,7 +90,7 @@
 	denali->irq = platform_get_irq(ofdev, 0);
 	if (denali->irq < 0) {
 		dev_err(&ofdev->dev, "no irq defined\n");
-		return -ENXIO;
+		return denali->irq;
 	}
 
 	denali->flash_reg = request_and_map(&ofdev->dev, denali_reg);
@@ -146,21 +146,11 @@
 	.driver		= {
 		.name	= "denali-nand-dt",
 		.owner	= THIS_MODULE,
-		.of_match_table	= of_match_ptr(denali_nand_dt_ids),
+		.of_match_table	= denali_nand_dt_ids,
 	},
 };
 
-static int __init denali_init_dt(void)
-{
-	return platform_driver_register(&denali_dt_driver);
-}
-module_init(denali_init_dt);
-
-static void __exit denali_exit_dt(void)
-{
-	platform_driver_unregister(&denali_dt_driver);
-}
-module_exit(denali_exit_dt);
+module_platform_driver(denali_dt_driver);
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Jamie Iles");
diff --git a/drivers/mtd/nand/docg4.c b/drivers/mtd/nand/docg4.c
index 18fa448..fa25e7a 100644
--- a/drivers/mtd/nand/docg4.c
+++ b/drivers/mtd/nand/docg4.c
@@ -1397,18 +1397,7 @@
 	.remove		= __exit_p(cleanup_docg4),
 };
 
-static int __init docg4_init(void)
-{
-	return platform_driver_probe(&docg4_driver, probe_docg4);
-}
-
-static void __exit docg4_exit(void)
-{
-	platform_driver_unregister(&docg4_driver);
-}
-
-module_init(docg4_init);
-module_exit(docg4_exit);
+module_platform_driver_probe(docg4_driver, probe_docg4);
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Mike Dunn");
diff --git a/drivers/mtd/nand/fsmc_nand.c b/drivers/mtd/nand/fsmc_nand.c
index 05ba3f0..911e243 100644
--- a/drivers/mtd/nand/fsmc_nand.c
+++ b/drivers/mtd/nand/fsmc_nand.c
@@ -1235,18 +1235,7 @@
 	},
 };
 
-static int __init fsmc_nand_init(void)
-{
-	return platform_driver_probe(&fsmc_nand_driver,
-				     fsmc_nand_probe);
-}
-module_init(fsmc_nand_init);
-
-static void __exit fsmc_nand_exit(void)
-{
-	platform_driver_unregister(&fsmc_nand_driver);
-}
-module_exit(fsmc_nand_exit);
+module_platform_driver_probe(fsmc_nand_driver, fsmc_nand_probe);
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Vipin Kumar <vipin.kumar@st.com>, Ashish Priyadarshi");
diff --git a/drivers/mtd/nand/gpio.c b/drivers/mtd/nand/gpio.c
index e789e3f..89065dd 100644
--- a/drivers/mtd/nand/gpio.c
+++ b/drivers/mtd/nand/gpio.c
@@ -190,7 +190,6 @@
 	return r;
 }
 #else /* CONFIG_OF */
-#define gpio_nand_id_table NULL
 static inline int gpio_nand_get_config_of(const struct device *dev,
 					  struct gpio_nand_platdata *plat)
 {
@@ -259,8 +258,6 @@
 	if (gpio_is_valid(gpiomtd->plat.gpio_rdy))
 		gpio_free(gpiomtd->plat.gpio_rdy);
 
-	kfree(gpiomtd);
-
 	return 0;
 }
 
@@ -297,7 +294,7 @@
 	if (!res0)
 		return -EINVAL;
 
-	gpiomtd = kzalloc(sizeof(*gpiomtd), GFP_KERNEL);
+	gpiomtd = devm_kzalloc(&dev->dev, sizeof(*gpiomtd), GFP_KERNEL);
 	if (gpiomtd == NULL) {
 		dev_err(&dev->dev, "failed to create NAND MTD\n");
 		return -ENOMEM;
@@ -412,7 +409,6 @@
 	iounmap(gpiomtd->nand_chip.IO_ADDR_R);
 	release_mem_region(res0->start, resource_size(res0));
 err_map:
-	kfree(gpiomtd);
 	return ret;
 }
 
@@ -421,7 +417,7 @@
 	.remove		= gpio_nand_remove,
 	.driver		= {
 		.name	= "gpio-nand",
-		.of_match_table = gpio_nand_id_table,
+		.of_match_table = of_match_ptr(gpio_nand_id_table),
 	},
 };
 
diff --git a/drivers/mtd/nand/h1910.c b/drivers/mtd/nand/h1910.c
deleted file mode 100644
index 50166e9..0000000
--- a/drivers/mtd/nand/h1910.c
+++ /dev/null
@@ -1,167 +0,0 @@
-/*
- *  drivers/mtd/nand/h1910.c
- *
- *  Copyright (C) 2003 Joshua Wise (joshua@joshuawise.com)
- *
- *  Derived from drivers/mtd/nand/edb7312.c
- *       Copyright (C) 2002 Marius Gröger (mag@sysgo.de)
- *       Copyright (c) 2001 Thomas Gleixner (gleixner@autronix.de)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- *  Overview:
- *   This is a device driver for the NAND flash device found on the
- *   iPAQ h1910 board which utilizes the Samsung K9F2808 part. This is
- *   a 128Mibit (16MiB x 8 bits) NAND flash device.
- */
-
-#include <linux/slab.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/nand.h>
-#include <linux/mtd/partitions.h>
-#include <asm/io.h>
-#include <mach/hardware.h>
-#include <asm/sizes.h>
-#include <mach/h1900-gpio.h>
-#include <mach/ipaq.h>
-
-/*
- * MTD structure for EDB7312 board
- */
-static struct mtd_info *h1910_nand_mtd = NULL;
-
-/*
- * Module stuff
- */
-
-/*
- * Define static partitions for flash device
- */
-static struct mtd_partition partition_info[] = {
-      {name:"h1910 NAND Flash",
-	      offset:0,
-      size:16 * 1024 * 1024}
-};
-
-#define NUM_PARTITIONS 1
-
-/*
- *	hardware specific access to control-lines
- *
- *	NAND_NCE: bit 0 - don't care
- *	NAND_CLE: bit 1 - address bit 2
- *	NAND_ALE: bit 2 - address bit 3
- */
-static void h1910_hwcontrol(struct mtd_info *mtd, int cmd,
-			    unsigned int ctrl)
-{
-	struct nand_chip *chip = mtd->priv;
-
-	if (cmd != NAND_CMD_NONE)
-		writeb(cmd, chip->IO_ADDR_W | ((ctrl & 0x6) << 1));
-}
-
-/*
- *	read device ready pin
- */
-#if 0
-static int h1910_device_ready(struct mtd_info *mtd)
-{
-	return (GPLR(55) & GPIO_bit(55));
-}
-#endif
-
-/*
- * Main initialization routine
- */
-static int __init h1910_init(void)
-{
-	struct nand_chip *this;
-	void __iomem *nandaddr;
-
-	if (!machine_is_h1900())
-		return -ENODEV;
-
-	nandaddr = ioremap(0x08000000, 0x1000);
-	if (!nandaddr) {
-		printk("Failed to ioremap nand flash.\n");
-		return -ENOMEM;
-	}
-
-	/* Allocate memory for MTD device structure and private data */
-	h1910_nand_mtd = kmalloc(sizeof(struct mtd_info) + sizeof(struct nand_chip), GFP_KERNEL);
-	if (!h1910_nand_mtd) {
-		printk("Unable to allocate h1910 NAND MTD device structure.\n");
-		iounmap((void *)nandaddr);
-		return -ENOMEM;
-	}
-
-	/* Get pointer to private data */
-	this = (struct nand_chip *)(&h1910_nand_mtd[1]);
-
-	/* Initialize structures */
-	memset(h1910_nand_mtd, 0, sizeof(struct mtd_info));
-	memset(this, 0, sizeof(struct nand_chip));
-
-	/* Link the private data with the MTD structure */
-	h1910_nand_mtd->priv = this;
-	h1910_nand_mtd->owner = THIS_MODULE;
-
-	/*
-	 * Enable VPEN
-	 */
-	GPSR(37) = GPIO_bit(37);
-
-	/* insert callbacks */
-	this->IO_ADDR_R = nandaddr;
-	this->IO_ADDR_W = nandaddr;
-	this->cmd_ctrl = h1910_hwcontrol;
-	this->dev_ready = NULL;	/* unknown whether that was correct or not so we will just do it like this */
-	/* 15 us command delay time */
-	this->chip_delay = 50;
-	this->ecc.mode = NAND_ECC_SOFT;
-
-	/* Scan to find existence of the device */
-	if (nand_scan(h1910_nand_mtd, 1)) {
-		printk(KERN_NOTICE "No NAND device - returning -ENXIO\n");
-		kfree(h1910_nand_mtd);
-		iounmap((void *)nandaddr);
-		return -ENXIO;
-	}
-
-	/* Register the partitions */
-	mtd_device_parse_register(h1910_nand_mtd, NULL, NULL, partition_info,
-				  NUM_PARTITIONS);
-
-	/* Return happy */
-	return 0;
-}
-
-module_init(h1910_init);
-
-/*
- * Clean up routine
- */
-static void __exit h1910_cleanup(void)
-{
-	struct nand_chip *this = (struct nand_chip *)&h1910_nand_mtd[1];
-
-	/* Release resources, unregister device */
-	nand_release(h1910_nand_mtd);
-
-	/* Release io resource */
-	iounmap((void *)this->IO_ADDR_W);
-
-	/* Free the MTD device structure */
-	kfree(h1910_nand_mtd);
-}
-
-module_exit(h1910_cleanup);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Joshua Wise <joshua at joshuawise dot com>");
-MODULE_DESCRIPTION("NAND flash driver for iPAQ h1910");
diff --git a/drivers/mtd/nand/lpc32xx_mlc.c b/drivers/mtd/nand/lpc32xx_mlc.c
index 0ca22ae..a94facb 100644
--- a/drivers/mtd/nand/lpc32xx_mlc.c
+++ b/drivers/mtd/nand/lpc32xx_mlc.c
@@ -540,8 +540,8 @@
 }
 
 static int lpc32xx_write_page(struct mtd_info *mtd, struct nand_chip *chip,
-			      const uint8_t *buf, int oob_required, int page,
-			      int cached, int raw)
+			uint32_t offset, int data_len, const uint8_t *buf,
+			int oob_required, int page, int cached, int raw)
 {
 	int res;
 
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index 42c6392..dfcd0a5 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -4,7 +4,6 @@
  *  Overview:
  *   This is the generic MTD driver for NAND flash devices. It should be
  *   capable of working with almost all NAND chips currently available.
- *   Basic support for AG-AND chips is provided.
  *
  *	Additional technical information is available on
  *	http://www.linux-mtd.infradead.org/doc/nand.html
@@ -22,8 +21,6 @@
  *	Enable cached programming for 2k page size chips
  *	Check, if mtd->ecctype should be set to MTD_ECC_HW
  *	if we have HW ECC support.
- *	The AG-AND chips have nice features for speed improvement,
- *	which are not supported yet. Read / program 4 pages in one go.
  *	BBT table is not serialized, has to be fixed
  *
  * This program is free software; you can redistribute it and/or modify
@@ -515,7 +512,7 @@
  * @page_addr: the page address for this command, -1 if none
  *
  * Send command to NAND device. This function is used for small page devices
- * (256/512 Bytes per page).
+ * (512 Bytes per page).
  */
 static void nand_command(struct mtd_info *mtd, unsigned int command,
 			 int column, int page_addr)
@@ -631,8 +628,7 @@
 	}
 
 	/* Command latch cycle */
-	chip->cmd_ctrl(mtd, command & 0xff,
-		       NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
+	chip->cmd_ctrl(mtd, command, NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
 
 	if (column != -1 || page_addr != -1) {
 		int ctrl = NAND_CTRL_CHANGE | NAND_NCE | NAND_ALE;
@@ -671,16 +667,6 @@
 	case NAND_CMD_SEQIN:
 	case NAND_CMD_RNDIN:
 	case NAND_CMD_STATUS:
-	case NAND_CMD_DEPLETE1:
-		return;
-
-	case NAND_CMD_STATUS_ERROR:
-	case NAND_CMD_STATUS_ERROR0:
-	case NAND_CMD_STATUS_ERROR1:
-	case NAND_CMD_STATUS_ERROR2:
-	case NAND_CMD_STATUS_ERROR3:
-		/* Read error status commands require only a short delay */
-		udelay(chip->chip_delay);
 		return;
 
 	case NAND_CMD_RESET:
@@ -836,10 +822,7 @@
 	 */
 	ndelay(100);
 
-	if ((state == FL_ERASING) && (chip->options & NAND_IS_AND))
-		chip->cmdfunc(mtd, NAND_CMD_STATUS_MULTI, -1, -1);
-	else
-		chip->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1);
+	chip->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1);
 
 	if (in_interrupt() || oops_in_progress)
 		panic_nand_wait(mtd, chip, timeo);
@@ -1127,7 +1110,7 @@
 }
 
 /**
- * nand_read_subpage - [REPLACEABLE] software ECC based sub-page read function
+ * nand_read_subpage - [REPLACEABLE] ECC based sub-page read function
  * @mtd: mtd info structure
  * @chip: nand chip info structure
  * @data_offs: offset of requested data within the page
@@ -1995,6 +1978,67 @@
 	return 0;
 }
 
+
+/**
+ * nand_write_subpage_hwecc - [REPLACABLE] hardware ECC based subpage write
+ * @mtd:	mtd info structure
+ * @chip:	nand chip info structure
+ * @column:	column address of subpage within the page
+ * @data_len:	data length
+ * @oob_required: must write chip->oob_poi to OOB
+ */
+static int nand_write_subpage_hwecc(struct mtd_info *mtd,
+				struct nand_chip *chip, uint32_t offset,
+				uint32_t data_len, const uint8_t *data_buf,
+				int oob_required)
+{
+	uint8_t *oob_buf  = chip->oob_poi;
+	uint8_t *ecc_calc = chip->buffers->ecccalc;
+	int ecc_size      = chip->ecc.size;
+	int ecc_bytes     = chip->ecc.bytes;
+	int ecc_steps     = chip->ecc.steps;
+	uint32_t *eccpos  = chip->ecc.layout->eccpos;
+	uint32_t start_step = offset / ecc_size;
+	uint32_t end_step   = (offset + data_len - 1) / ecc_size;
+	int oob_bytes       = mtd->oobsize / ecc_steps;
+	int step, i;
+
+	for (step = 0; step < ecc_steps; step++) {
+		/* configure controller for WRITE access */
+		chip->ecc.hwctl(mtd, NAND_ECC_WRITE);
+
+		/* write data (untouched subpages already masked by 0xFF) */
+		chip->write_buf(mtd, data_buf, ecc_size);
+
+		/* mask ECC of un-touched subpages by padding 0xFF */
+		if ((step < start_step) || (step > end_step))
+			memset(ecc_calc, 0xff, ecc_bytes);
+		else
+			chip->ecc.calculate(mtd, data_buf, ecc_calc);
+
+		/* mask OOB of un-touched subpages by padding 0xFF */
+		/* if oob_required, preserve OOB metadata of written subpage */
+		if (!oob_required || (step < start_step) || (step > end_step))
+			memset(oob_buf, 0xff, oob_bytes);
+
+		data_buf += ecc_size;
+		ecc_calc += ecc_bytes;
+		oob_buf  += oob_bytes;
+	}
+
+	/* copy calculated ECC for whole page to chip->buffer->oob */
+	/* this include masked-value(0xFF) for unwritten subpages */
+	ecc_calc = chip->buffers->ecccalc;
+	for (i = 0; i < chip->ecc.total; i++)
+		chip->oob_poi[eccpos[i]] = ecc_calc[i];
+
+	/* write OOB buffer to NAND device */
+	chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
+
+	return 0;
+}
+
+
 /**
  * nand_write_page_syndrome - [REPLACEABLE] hardware ECC syndrome based page write
  * @mtd: mtd info structure
@@ -2047,6 +2091,8 @@
  * nand_write_page - [REPLACEABLE] write one page
  * @mtd: MTD device structure
  * @chip: NAND chip descriptor
+ * @offset: address offset within the page
+ * @data_len: length of actual data to be written
  * @buf: the data to write
  * @oob_required: must write chip->oob_poi to OOB
  * @page: page number to write
@@ -2054,15 +2100,25 @@
  * @raw: use _raw version of write_page
  */
 static int nand_write_page(struct mtd_info *mtd, struct nand_chip *chip,
-			   const uint8_t *buf, int oob_required, int page,
-			   int cached, int raw)
+		uint32_t offset, int data_len, const uint8_t *buf,
+		int oob_required, int page, int cached, int raw)
 {
-	int status;
+	int status, subpage;
+
+	if (!(chip->options & NAND_NO_SUBPAGE_WRITE) &&
+		chip->ecc.write_subpage)
+		subpage = offset || (data_len < mtd->writesize);
+	else
+		subpage = 0;
 
 	chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0x00, page);
 
 	if (unlikely(raw))
-		status = chip->ecc.write_page_raw(mtd, chip, buf, oob_required);
+		status = chip->ecc.write_page_raw(mtd, chip, buf,
+							oob_required);
+	else if (subpage)
+		status = chip->ecc.write_subpage(mtd, chip, offset, data_len,
+							 buf, oob_required);
 	else
 		status = chip->ecc.write_page(mtd, chip, buf, oob_required);
 
@@ -2075,7 +2131,7 @@
 	 */
 	cached = 0;
 
-	if (!cached || !(chip->options & NAND_CACHEPRG)) {
+	if (!cached || !NAND_HAS_CACHEPROG(chip)) {
 
 		chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
 		status = chip->waitfunc(mtd, chip);
@@ -2176,7 +2232,7 @@
 
 	uint8_t *oob = ops->oobbuf;
 	uint8_t *buf = ops->datbuf;
-	int ret, subpage;
+	int ret;
 	int oob_required = oob ? 1 : 0;
 
 	ops->retlen = 0;
@@ -2191,10 +2247,6 @@
 	}
 
 	column = to & (mtd->writesize - 1);
-	subpage = column || (writelen & (mtd->writesize - 1));
-
-	if (subpage && oob)
-		return -EINVAL;
 
 	chipnr = (int)(to >> chip->chip_shift);
 	chip->select_chip(mtd, chipnr);
@@ -2243,9 +2295,9 @@
 			/* We still need to erase leftover OOB data */
 			memset(chip->oob_poi, 0xff, mtd->oobsize);
 		}
-
-		ret = chip->write_page(mtd, chip, wbuf, oob_required, page,
-				       cached, (ops->mode == MTD_OPS_RAW));
+		ret = chip->write_page(mtd, chip, column, bytes, wbuf,
+					oob_required, page, cached,
+					(ops->mode == MTD_OPS_RAW));
 		if (ret)
 			break;
 
@@ -2481,24 +2533,6 @@
 }
 
 /**
- * multi_erase_cmd - [GENERIC] AND specific block erase command function
- * @mtd: MTD device structure
- * @page: the page address of the block which will be erased
- *
- * AND multi block erase command function. Erase 4 consecutive blocks.
- */
-static void multi_erase_cmd(struct mtd_info *mtd, int page)
-{
-	struct nand_chip *chip = mtd->priv;
-	/* Send commands to erase a block */
-	chip->cmdfunc(mtd, NAND_CMD_ERASE1, -1, page++);
-	chip->cmdfunc(mtd, NAND_CMD_ERASE1, -1, page++);
-	chip->cmdfunc(mtd, NAND_CMD_ERASE1, -1, page++);
-	chip->cmdfunc(mtd, NAND_CMD_ERASE1, -1, page);
-	chip->cmdfunc(mtd, NAND_CMD_ERASE2, -1, -1);
-}
-
-/**
  * nand_erase - [MTD Interface] erase block(s)
  * @mtd: MTD device structure
  * @instr: erase instruction
@@ -2510,7 +2544,6 @@
 	return nand_erase_nand(mtd, instr, 0);
 }
 
-#define BBT_PAGE_MASK	0xffffff3f
 /**
  * nand_erase_nand - [INTERN] erase block(s)
  * @mtd: MTD device structure
@@ -2524,8 +2557,6 @@
 {
 	int page, status, pages_per_block, ret, chipnr;
 	struct nand_chip *chip = mtd->priv;
-	loff_t rewrite_bbt[NAND_MAX_CHIPS] = {0};
-	unsigned int bbt_masked_page = 0xffffffff;
 	loff_t len;
 
 	pr_debug("%s: start = 0x%012llx, len = %llu\n",
@@ -2556,15 +2587,6 @@
 		goto erase_exit;
 	}
 
-	/*
-	 * If BBT requires refresh, set the BBT page mask to see if the BBT
-	 * should be rewritten. Otherwise the mask is set to 0xffffffff which
-	 * can not be matched. This is also done when the bbt is actually
-	 * erased to avoid recursive updates.
-	 */
-	if (chip->options & BBT_AUTO_REFRESH && !allowbbt)
-		bbt_masked_page = chip->bbt_td->pages[chipnr] & BBT_PAGE_MASK;
-
 	/* Loop through the pages */
 	len = instr->len;
 
@@ -2610,15 +2632,6 @@
 			goto erase_exit;
 		}
 
-		/*
-		 * If BBT requires refresh, set the BBT rewrite flag to the
-		 * page being erased.
-		 */
-		if (bbt_masked_page != 0xffffffff &&
-		    (page & BBT_PAGE_MASK) == bbt_masked_page)
-			    rewrite_bbt[chipnr] =
-					((loff_t)page << chip->page_shift);
-
 		/* Increment page address and decrement length */
 		len -= (1 << chip->phys_erase_shift);
 		page += pages_per_block;
@@ -2628,15 +2641,6 @@
 			chipnr++;
 			chip->select_chip(mtd, -1);
 			chip->select_chip(mtd, chipnr);
-
-			/*
-			 * If BBT requires refresh and BBT-PERCHIP, set the BBT
-			 * page mask to see if this BBT should be rewritten.
-			 */
-			if (bbt_masked_page != 0xffffffff &&
-			    (chip->bbt_td->options & NAND_BBT_PERCHIP))
-				bbt_masked_page = chip->bbt_td->pages[chipnr] &
-					BBT_PAGE_MASK;
 		}
 	}
 	instr->state = MTD_ERASE_DONE;
@@ -2653,23 +2657,6 @@
 	if (!ret)
 		mtd_erase_callback(instr);
 
-	/*
-	 * If BBT requires refresh and erase was successful, rewrite any
-	 * selected bad block tables.
-	 */
-	if (bbt_masked_page == 0xffffffff || ret)
-		return ret;
-
-	for (chipnr = 0; chipnr < chip->numchips; chipnr++) {
-		if (!rewrite_bbt[chipnr])
-			continue;
-		/* Update the BBT for chip */
-		pr_debug("%s: nand_update_bbt (%d:0x%0llx 0x%0x)\n",
-				__func__, chipnr, rewrite_bbt[chipnr],
-				chip->bbt_td->pages[chipnr]);
-		nand_update_bbt(mtd, rewrite_bbt[chipnr]);
-	}
-
 	/* Return more or less happy */
 	return ret;
 }
@@ -2905,8 +2892,6 @@
 		chip->onfi_version = 20;
 	else if (val & (1 << 1))
 		chip->onfi_version = 10;
-	else
-		chip->onfi_version = 0;
 
 	if (!chip->onfi_version) {
 		pr_info("%s: unsupported ONFI version: %d\n", __func__, val);
@@ -3171,6 +3156,30 @@
 		chip->bbt_options |= NAND_BBT_SCAN2NDPAGE;
 }
 
+static inline bool is_full_id_nand(struct nand_flash_dev *type)
+{
+	return type->id_len;
+}
+
+static bool find_full_id_nand(struct mtd_info *mtd, struct nand_chip *chip,
+		   struct nand_flash_dev *type, u8 *id_data, int *busw)
+{
+	if (!strncmp(type->id, id_data, type->id_len)) {
+		mtd->writesize = type->pagesize;
+		mtd->erasesize = type->erasesize;
+		mtd->oobsize = type->oobsize;
+
+		chip->cellinfo = id_data[2];
+		chip->chipsize = (uint64_t)type->chipsize << 20;
+		chip->options |= type->options;
+
+		*busw = type->options & NAND_BUSWIDTH_16;
+
+		return true;
+	}
+	return false;
+}
+
 /*
  * Get the flash and manufacturer id and lookup if the type is supported.
  */
@@ -3222,9 +3231,14 @@
 	if (!type)
 		type = nand_flash_ids;
 
-	for (; type->name != NULL; type++)
-		if (*dev_id == type->id)
-			break;
+	for (; type->name != NULL; type++) {
+		if (is_full_id_nand(type)) {
+			if (find_full_id_nand(mtd, chip, type, id_data, &busw))
+				goto ident_done;
+		} else if (*dev_id == type->dev_id) {
+				break;
+		}
+	}
 
 	chip->onfi_version = 0;
 	if (!type->name || !type->pagesize) {
@@ -3302,12 +3316,7 @@
 	}
 
 	chip->badblockbits = 8;
-
-	/* Check for AND chips with 4 page planes */
-	if (chip->options & NAND_4PAGE_ARRAY)
-		chip->erase_cmd = multi_erase_cmd;
-	else
-		chip->erase_cmd = single_erase_cmd;
+	chip->erase_cmd = single_erase_cmd;
 
 	/* Do not replace user supplied command function! */
 	if (mtd->writesize > 512 && chip->cmdfunc == nand_command)
@@ -3474,6 +3483,10 @@
 			chip->ecc.read_oob = nand_read_oob_std;
 		if (!chip->ecc.write_oob)
 			chip->ecc.write_oob = nand_write_oob_std;
+		if (!chip->ecc.read_subpage)
+			chip->ecc.read_subpage = nand_read_subpage;
+		if (!chip->ecc.write_subpage)
+			chip->ecc.write_subpage = nand_write_subpage_hwecc;
 
 	case NAND_ECC_HW_SYNDROME:
 		if ((!chip->ecc.calculate || !chip->ecc.correct ||
diff --git a/drivers/mtd/nand/nand_bbt.c b/drivers/mtd/nand/nand_bbt.c
index 916d6e9..2672643 100644
--- a/drivers/mtd/nand/nand_bbt.c
+++ b/drivers/mtd/nand/nand_bbt.c
@@ -1240,15 +1240,6 @@
  */
 static uint8_t scan_ff_pattern[] = { 0xff, 0xff };
 
-static uint8_t scan_agand_pattern[] = { 0x1C, 0x71, 0xC7, 0x1C, 0x71, 0xC7 };
-
-static struct nand_bbt_descr agand_flashbased = {
-	.options = NAND_BBT_SCANEMPTY | NAND_BBT_SCANALLPAGES,
-	.offs = 0x20,
-	.len = 6,
-	.pattern = scan_agand_pattern
-};
-
 /* Generic flash bbt descriptors */
 static uint8_t bbt_pattern[] = {'B', 'b', 't', '0' };
 static uint8_t mirror_pattern[] = {'1', 't', 'b', 'B' };
@@ -1333,22 +1324,6 @@
 {
 	struct nand_chip *this = mtd->priv;
 
-	/*
-	 * Default for AG-AND. We must use a flash based bad block table as the
-	 * devices have factory marked _good_ blocks. Erasing those blocks
-	 * leads to loss of the good / bad information, so we _must_ store this
-	 * information in a good / bad table during startup.
-	 */
-	if (this->options & NAND_IS_AND) {
-		/* Use the default pattern descriptors */
-		if (!this->bbt_td) {
-			this->bbt_td = &bbt_main_descr;
-			this->bbt_md = &bbt_mirror_descr;
-		}
-		this->bbt_options |= NAND_BBT_USE_FLASH;
-		return nand_scan_bbt(mtd, &agand_flashbased);
-	}
-
 	/* Is a flash based bad block table requested? */
 	if (this->bbt_options & NAND_BBT_USE_FLASH) {
 		/* Use the default pattern descriptors */
diff --git a/drivers/mtd/nand/nand_ids.c b/drivers/mtd/nand/nand_ids.c
index 9c61238..683813a 100644
--- a/drivers/mtd/nand/nand_ids.c
+++ b/drivers/mtd/nand/nand_ids.c
@@ -10,163 +10,153 @@
  */
 #include <linux/module.h>
 #include <linux/mtd/nand.h>
-/*
-*	Chip ID list
-*
-*	Name. ID code, pagesize, chipsize in MegaByte, eraseblock size,
-*	options
-*
-*	Pagesize; 0, 256, 512
-*	0	get this information from the extended chip ID
-+	256	256 Byte page size
-*	512	512 Byte page size
-*/
-struct nand_flash_dev nand_flash_ids[] = {
-#define SP_OPTIONS NAND_NEED_READRDY
-#define SP_OPTIONS16 (SP_OPTIONS | NAND_BUSWIDTH_16)
+#include <linux/sizes.h>
 
-#ifdef CONFIG_MTD_NAND_MUSEUM_IDS
-	{"NAND 1MiB 5V 8-bit",		0x6e, 256, 1, 0x1000, SP_OPTIONS},
-	{"NAND 2MiB 5V 8-bit",		0x64, 256, 2, 0x1000, SP_OPTIONS},
-	{"NAND 4MiB 5V 8-bit",		0x6b, 512, 4, 0x2000, SP_OPTIONS},
-	{"NAND 1MiB 3,3V 8-bit",	0xe8, 256, 1, 0x1000, SP_OPTIONS},
-	{"NAND 1MiB 3,3V 8-bit",	0xec, 256, 1, 0x1000, SP_OPTIONS},
-	{"NAND 2MiB 3,3V 8-bit",	0xea, 256, 2, 0x1000, SP_OPTIONS},
-	{"NAND 4MiB 3,3V 8-bit",	0xd5, 512, 4, 0x2000, SP_OPTIONS},
-	{"NAND 4MiB 3,3V 8-bit",	0xe3, 512, 4, 0x2000, SP_OPTIONS},
-	{"NAND 4MiB 3,3V 8-bit",	0xe5, 512, 4, 0x2000, SP_OPTIONS},
-	{"NAND 8MiB 3,3V 8-bit",	0xd6, 512, 8, 0x2000, SP_OPTIONS},
-
-	{"NAND 8MiB 1,8V 8-bit",	0x39, 512, 8, 0x2000, SP_OPTIONS},
-	{"NAND 8MiB 3,3V 8-bit",	0xe6, 512, 8, 0x2000, SP_OPTIONS},
-	{"NAND 8MiB 1,8V 16-bit",	0x49, 512, 8, 0x2000, SP_OPTIONS16},
-	{"NAND 8MiB 3,3V 16-bit",	0x59, 512, 8, 0x2000, SP_OPTIONS16},
-#endif
-
-	{"NAND 16MiB 1,8V 8-bit",	0x33, 512, 16, 0x4000, SP_OPTIONS},
-	{"NAND 16MiB 3,3V 8-bit",	0x73, 512, 16, 0x4000, SP_OPTIONS},
-	{"NAND 16MiB 1,8V 16-bit",	0x43, 512, 16, 0x4000, SP_OPTIONS16},
-	{"NAND 16MiB 3,3V 16-bit",	0x53, 512, 16, 0x4000, SP_OPTIONS16},
-
-	{"NAND 32MiB 1,8V 8-bit",	0x35, 512, 32, 0x4000, SP_OPTIONS},
-	{"NAND 32MiB 3,3V 8-bit",	0x75, 512, 32, 0x4000, SP_OPTIONS},
-	{"NAND 32MiB 1,8V 16-bit",	0x45, 512, 32, 0x4000, SP_OPTIONS16},
-	{"NAND 32MiB 3,3V 16-bit",	0x55, 512, 32, 0x4000, SP_OPTIONS16},
-
-	{"NAND 64MiB 1,8V 8-bit",	0x36, 512, 64, 0x4000, SP_OPTIONS},
-	{"NAND 64MiB 3,3V 8-bit",	0x76, 512, 64, 0x4000, SP_OPTIONS},
-	{"NAND 64MiB 1,8V 16-bit",	0x46, 512, 64, 0x4000, SP_OPTIONS16},
-	{"NAND 64MiB 3,3V 16-bit",	0x56, 512, 64, 0x4000, SP_OPTIONS16},
-
-	{"NAND 128MiB 1,8V 8-bit",	0x78, 512, 128, 0x4000, SP_OPTIONS},
-	{"NAND 128MiB 1,8V 8-bit",	0x39, 512, 128, 0x4000, SP_OPTIONS},
-	{"NAND 128MiB 3,3V 8-bit",	0x79, 512, 128, 0x4000, SP_OPTIONS},
-	{"NAND 128MiB 1,8V 16-bit",	0x72, 512, 128, 0x4000, SP_OPTIONS16},
-	{"NAND 128MiB 1,8V 16-bit",	0x49, 512, 128, 0x4000, SP_OPTIONS16},
-	{"NAND 128MiB 3,3V 16-bit",	0x74, 512, 128, 0x4000, SP_OPTIONS16},
-	{"NAND 128MiB 3,3V 16-bit",	0x59, 512, 128, 0x4000, SP_OPTIONS16},
-
-	{"NAND 256MiB 3,3V 8-bit",	0x71, 512, 256, 0x4000, SP_OPTIONS},
-
-	/*
-	 * These are the new chips with large page size. The pagesize and the
-	 * erasesize is determined from the extended id bytes
-	 */
 #define LP_OPTIONS NAND_SAMSUNG_LP_OPTIONS
 #define LP_OPTIONS16 (LP_OPTIONS | NAND_BUSWIDTH_16)
 
-	/* 512 Megabit */
-	{"NAND 64MiB 1,8V 8-bit",	0xA2, 0,  64, 0, LP_OPTIONS},
-	{"NAND 64MiB 1,8V 8-bit",	0xA0, 0,  64, 0, LP_OPTIONS},
-	{"NAND 64MiB 3,3V 8-bit",	0xF2, 0,  64, 0, LP_OPTIONS},
-	{"NAND 64MiB 3,3V 8-bit",	0xD0, 0,  64, 0, LP_OPTIONS},
-	{"NAND 64MiB 3,3V 8-bit",	0xF0, 0,  64, 0, LP_OPTIONS},
-	{"NAND 64MiB 1,8V 16-bit",	0xB2, 0,  64, 0, LP_OPTIONS16},
-	{"NAND 64MiB 1,8V 16-bit",	0xB0, 0,  64, 0, LP_OPTIONS16},
-	{"NAND 64MiB 3,3V 16-bit",	0xC2, 0,  64, 0, LP_OPTIONS16},
-	{"NAND 64MiB 3,3V 16-bit",	0xC0, 0,  64, 0, LP_OPTIONS16},
-
-	/* 1 Gigabit */
-	{"NAND 128MiB 1,8V 8-bit",	0xA1, 0, 128, 0, LP_OPTIONS},
-	{"NAND 128MiB 3,3V 8-bit",	0xF1, 0, 128, 0, LP_OPTIONS},
-	{"NAND 128MiB 3,3V 8-bit",	0xD1, 0, 128, 0, LP_OPTIONS},
-	{"NAND 128MiB 1,8V 16-bit",	0xB1, 0, 128, 0, LP_OPTIONS16},
-	{"NAND 128MiB 3,3V 16-bit",	0xC1, 0, 128, 0, LP_OPTIONS16},
-	{"NAND 128MiB 1,8V 16-bit",     0xAD, 0, 128, 0, LP_OPTIONS16},
-
-	/* 2 Gigabit */
-	{"NAND 256MiB 1,8V 8-bit",	0xAA, 0, 256, 0, LP_OPTIONS},
-	{"NAND 256MiB 3,3V 8-bit",	0xDA, 0, 256, 0, LP_OPTIONS},
-	{"NAND 256MiB 1,8V 16-bit",	0xBA, 0, 256, 0, LP_OPTIONS16},
-	{"NAND 256MiB 3,3V 16-bit",	0xCA, 0, 256, 0, LP_OPTIONS16},
-
-	/* 4 Gigabit */
-	{"NAND 512MiB 1,8V 8-bit",	0xAC, 0, 512, 0, LP_OPTIONS},
-	{"NAND 512MiB 3,3V 8-bit",	0xDC, 0, 512, 0, LP_OPTIONS},
-	{"NAND 512MiB 1,8V 16-bit",	0xBC, 0, 512, 0, LP_OPTIONS16},
-	{"NAND 512MiB 3,3V 16-bit",	0xCC, 0, 512, 0, LP_OPTIONS16},
-
-	/* 8 Gigabit */
-	{"NAND 1GiB 1,8V 8-bit",	0xA3, 0, 1024, 0, LP_OPTIONS},
-	{"NAND 1GiB 3,3V 8-bit",	0xD3, 0, 1024, 0, LP_OPTIONS},
-	{"NAND 1GiB 1,8V 16-bit",	0xB3, 0, 1024, 0, LP_OPTIONS16},
-	{"NAND 1GiB 3,3V 16-bit",	0xC3, 0, 1024, 0, LP_OPTIONS16},
-
-	/* 16 Gigabit */
-	{"NAND 2GiB 1,8V 8-bit",	0xA5, 0, 2048, 0, LP_OPTIONS},
-	{"NAND 2GiB 3,3V 8-bit",	0xD5, 0, 2048, 0, LP_OPTIONS},
-	{"NAND 2GiB 1,8V 16-bit",	0xB5, 0, 2048, 0, LP_OPTIONS16},
-	{"NAND 2GiB 3,3V 16-bit",	0xC5, 0, 2048, 0, LP_OPTIONS16},
-
-	/* 32 Gigabit */
-	{"NAND 4GiB 1,8V 8-bit",	0xA7, 0, 4096, 0, LP_OPTIONS},
-	{"NAND 4GiB 3,3V 8-bit",	0xD7, 0, 4096, 0, LP_OPTIONS},
-	{"NAND 4GiB 1,8V 16-bit",	0xB7, 0, 4096, 0, LP_OPTIONS16},
-	{"NAND 4GiB 3,3V 16-bit",	0xC7, 0, 4096, 0, LP_OPTIONS16},
-
-	/* 64 Gigabit */
-	{"NAND 8GiB 1,8V 8-bit",	0xAE, 0, 8192, 0, LP_OPTIONS},
-	{"NAND 8GiB 3,3V 8-bit",	0xDE, 0, 8192, 0, LP_OPTIONS},
-	{"NAND 8GiB 1,8V 16-bit",	0xBE, 0, 8192, 0, LP_OPTIONS16},
-	{"NAND 8GiB 3,3V 16-bit",	0xCE, 0, 8192, 0, LP_OPTIONS16},
-
-	/* 128 Gigabit */
-	{"NAND 16GiB 1,8V 8-bit",	0x1A, 0, 16384, 0, LP_OPTIONS},
-	{"NAND 16GiB 3,3V 8-bit",	0x3A, 0, 16384, 0, LP_OPTIONS},
-	{"NAND 16GiB 1,8V 16-bit",	0x2A, 0, 16384, 0, LP_OPTIONS16},
-	{"NAND 16GiB 3,3V 16-bit",	0x4A, 0, 16384, 0, LP_OPTIONS16},
-
-	/* 256 Gigabit */
-	{"NAND 32GiB 1,8V 8-bit",	0x1C, 0, 32768, 0, LP_OPTIONS},
-	{"NAND 32GiB 3,3V 8-bit",	0x3C, 0, 32768, 0, LP_OPTIONS},
-	{"NAND 32GiB 1,8V 16-bit",	0x2C, 0, 32768, 0, LP_OPTIONS16},
-	{"NAND 32GiB 3,3V 16-bit",	0x4C, 0, 32768, 0, LP_OPTIONS16},
-
-	/* 512 Gigabit */
-	{"NAND 64GiB 1,8V 8-bit",	0x1E, 0, 65536, 0, LP_OPTIONS},
-	{"NAND 64GiB 3,3V 8-bit",	0x3E, 0, 65536, 0, LP_OPTIONS},
-	{"NAND 64GiB 1,8V 16-bit",	0x2E, 0, 65536, 0, LP_OPTIONS16},
-	{"NAND 64GiB 3,3V 16-bit",	0x4E, 0, 65536, 0, LP_OPTIONS16},
-
-	/*
-	 * Renesas AND 1 Gigabit. Those chips do not support extended id and
-	 * have a strange page/block layout !  The chosen minimum erasesize is
-	 * 4 * 2 * 2048 = 16384 Byte, as those chips have an array of 4 page
-	 * planes 1 block = 2 pages, but due to plane arrangement the blocks
-	 * 0-3 consists of page 0 + 4,1 + 5, 2 + 6, 3 + 7 Anyway JFFS2 would
-	 * increase the eraseblock size so we chose a combined one which can be
-	 * erased in one go There are more speed improvements for reads and
-	 * writes possible, but not implemented now
-	 */
-	{"AND 128MiB 3,3V 8-bit",	0x01, 2048, 128, 0x4000,
-	 NAND_IS_AND | NAND_4PAGE_ARRAY | BBT_AUTO_REFRESH},
-
-	{NULL,}
-};
+#define SP_OPTIONS NAND_NEED_READRDY
+#define SP_OPTIONS16 (SP_OPTIONS | NAND_BUSWIDTH_16)
 
 /*
-*	Manufacturer ID list
-*/
+ * The chip ID list:
+ *    name, device ID, page size, chip size in MiB, eraseblock size, options
+ *
+ * If page size and eraseblock size are 0, the sizes are taken from the
+ * extended chip ID.
+ */
+struct nand_flash_dev nand_flash_ids[] = {
+	/*
+	 * Some incompatible NAND chips share device ID's and so must be
+	 * listed by full ID. We list them first so that we can easily identify
+	 * the most specific match.
+	 */
+	{"TC58NVG2S0F 4G 3.3V 8-bit",
+		{ .id = {0x98, 0xdc, 0x90, 0x26, 0x76, 0x15, 0x01, 0x08} },
+		  SZ_4K, SZ_512, SZ_256K, 0, 8, 224},
+	{"TC58NVG3S0F 8G 3.3V 8-bit",
+		{ .id = {0x98, 0xd3, 0x90, 0x26, 0x76, 0x15, 0x02, 0x08} },
+		  SZ_4K, SZ_1K, SZ_256K, 0, 8, 232},
+	{"TC58NVG5D2 32G 3.3V 8-bit",
+		{ .id = {0x98, 0xd7, 0x94, 0x32, 0x76, 0x56, 0x09, 0x00} },
+		  SZ_8K, SZ_4K, SZ_1M, 0, 8, 640},
+	{"TC58NVG6D2 64G 3.3V 8-bit",
+		{ .id = {0x98, 0xde, 0x94, 0x82, 0x76, 0x56, 0x04, 0x20} },
+		  SZ_8K, SZ_8K, SZ_2M, 0, 8, 640},
+
+	LEGACY_ID_NAND("NAND 4MiB 5V 8-bit",   0x6B, 4, SZ_8K, SP_OPTIONS),
+	LEGACY_ID_NAND("NAND 4MiB 3,3V 8-bit", 0xE3, 4, SZ_8K, SP_OPTIONS),
+	LEGACY_ID_NAND("NAND 4MiB 3,3V 8-bit", 0xE5, 4, SZ_8K, SP_OPTIONS),
+	LEGACY_ID_NAND("NAND 8MiB 3,3V 8-bit", 0xD6, 8, SZ_8K, SP_OPTIONS),
+	LEGACY_ID_NAND("NAND 8MiB 3,3V 8-bit", 0xE6, 8, SZ_8K, SP_OPTIONS),
+
+	LEGACY_ID_NAND("NAND 16MiB 1,8V 8-bit",  0x33, 16, SZ_16K, SP_OPTIONS),
+	LEGACY_ID_NAND("NAND 16MiB 3,3V 8-bit",  0x73, 16, SZ_16K, SP_OPTIONS),
+	LEGACY_ID_NAND("NAND 16MiB 1,8V 16-bit", 0x43, 16, SZ_16K, SP_OPTIONS16),
+	LEGACY_ID_NAND("NAND 16MiB 3,3V 16-bit", 0x53, 16, SZ_16K, SP_OPTIONS16),
+
+	LEGACY_ID_NAND("NAND 32MiB 1,8V 8-bit",  0x35, 32, SZ_16K, SP_OPTIONS),
+	LEGACY_ID_NAND("NAND 32MiB 3,3V 8-bit",  0x75, 32, SZ_16K, SP_OPTIONS),
+	LEGACY_ID_NAND("NAND 32MiB 1,8V 16-bit", 0x45, 32, SZ_16K, SP_OPTIONS16),
+	LEGACY_ID_NAND("NAND 32MiB 3,3V 16-bit", 0x55, 32, SZ_16K, SP_OPTIONS16),
+
+	LEGACY_ID_NAND("NAND 64MiB 1,8V 8-bit",  0x36, 64, SZ_16K, SP_OPTIONS),
+	LEGACY_ID_NAND("NAND 64MiB 3,3V 8-bit",  0x76, 64, SZ_16K, SP_OPTIONS),
+	LEGACY_ID_NAND("NAND 64MiB 1,8V 16-bit", 0x46, 64, SZ_16K, SP_OPTIONS16),
+	LEGACY_ID_NAND("NAND 64MiB 3,3V 16-bit", 0x56, 64, SZ_16K, SP_OPTIONS16),
+
+	LEGACY_ID_NAND("NAND 128MiB 1,8V 8-bit",  0x78, 128, SZ_16K, SP_OPTIONS),
+	LEGACY_ID_NAND("NAND 128MiB 1,8V 8-bit",  0x39, 128, SZ_16K, SP_OPTIONS),
+	LEGACY_ID_NAND("NAND 128MiB 3,3V 8-bit",  0x79, 128, SZ_16K, SP_OPTIONS),
+	LEGACY_ID_NAND("NAND 128MiB 1,8V 16-bit", 0x72, 128, SZ_16K, SP_OPTIONS16),
+	LEGACY_ID_NAND("NAND 128MiB 1,8V 16-bit", 0x49, 128, SZ_16K, SP_OPTIONS16),
+	LEGACY_ID_NAND("NAND 128MiB 3,3V 16-bit", 0x74, 128, SZ_16K, SP_OPTIONS16),
+	LEGACY_ID_NAND("NAND 128MiB 3,3V 16-bit", 0x59, 128, SZ_16K, SP_OPTIONS16),
+
+	LEGACY_ID_NAND("NAND 256MiB 3,3V 8-bit", 0x71, 256, SZ_16K, SP_OPTIONS),
+
+	/*
+	 * These are the new chips with large page size. Their page size and
+	 * eraseblock size are determined from the extended ID bytes.
+	 */
+
+	/* 512 Megabit */
+	EXTENDED_ID_NAND("NAND 64MiB 1,8V 8-bit",  0xA2,  64, LP_OPTIONS),
+	EXTENDED_ID_NAND("NAND 64MiB 1,8V 8-bit",  0xA0,  64, LP_OPTIONS),
+	EXTENDED_ID_NAND("NAND 64MiB 3,3V 8-bit",  0xF2,  64, LP_OPTIONS),
+	EXTENDED_ID_NAND("NAND 64MiB 3,3V 8-bit",  0xD0,  64, LP_OPTIONS),
+	EXTENDED_ID_NAND("NAND 64MiB 3,3V 8-bit",  0xF0,  64, LP_OPTIONS),
+	EXTENDED_ID_NAND("NAND 64MiB 1,8V 16-bit", 0xB2,  64, LP_OPTIONS16),
+	EXTENDED_ID_NAND("NAND 64MiB 1,8V 16-bit", 0xB0,  64, LP_OPTIONS16),
+	EXTENDED_ID_NAND("NAND 64MiB 3,3V 16-bit", 0xC2,  64, LP_OPTIONS16),
+	EXTENDED_ID_NAND("NAND 64MiB 3,3V 16-bit", 0xC0,  64, LP_OPTIONS16),
+
+	/* 1 Gigabit */
+	EXTENDED_ID_NAND("NAND 128MiB 1,8V 8-bit",  0xA1, 128, LP_OPTIONS),
+	EXTENDED_ID_NAND("NAND 128MiB 3,3V 8-bit",  0xF1, 128, LP_OPTIONS),
+	EXTENDED_ID_NAND("NAND 128MiB 3,3V 8-bit",  0xD1, 128, LP_OPTIONS),
+	EXTENDED_ID_NAND("NAND 128MiB 1,8V 16-bit", 0xB1, 128, LP_OPTIONS16),
+	EXTENDED_ID_NAND("NAND 128MiB 3,3V 16-bit", 0xC1, 128, LP_OPTIONS16),
+	EXTENDED_ID_NAND("NAND 128MiB 1,8V 16-bit", 0xAD, 128, LP_OPTIONS16),
+
+	/* 2 Gigabit */
+	EXTENDED_ID_NAND("NAND 256MiB 1,8V 8-bit",  0xAA, 256, LP_OPTIONS),
+	EXTENDED_ID_NAND("NAND 256MiB 3,3V 8-bit",  0xDA, 256, LP_OPTIONS),
+	EXTENDED_ID_NAND("NAND 256MiB 1,8V 16-bit", 0xBA, 256, LP_OPTIONS16),
+	EXTENDED_ID_NAND("NAND 256MiB 3,3V 16-bit", 0xCA, 256, LP_OPTIONS16),
+
+	/* 4 Gigabit */
+	EXTENDED_ID_NAND("NAND 512MiB 1,8V 8-bit",  0xAC, 512, LP_OPTIONS),
+	EXTENDED_ID_NAND("NAND 512MiB 3,3V 8-bit",  0xDC, 512, LP_OPTIONS),
+	EXTENDED_ID_NAND("NAND 512MiB 1,8V 16-bit", 0xBC, 512, LP_OPTIONS16),
+	EXTENDED_ID_NAND("NAND 512MiB 3,3V 16-bit", 0xCC, 512, LP_OPTIONS16),
+
+	/* 8 Gigabit */
+	EXTENDED_ID_NAND("NAND 1GiB 1,8V 8-bit",  0xA3, 1024, LP_OPTIONS),
+	EXTENDED_ID_NAND("NAND 1GiB 3,3V 8-bit",  0xD3, 1024, LP_OPTIONS),
+	EXTENDED_ID_NAND("NAND 1GiB 1,8V 16-bit", 0xB3, 1024, LP_OPTIONS16),
+	EXTENDED_ID_NAND("NAND 1GiB 3,3V 16-bit", 0xC3, 1024, LP_OPTIONS16),
+
+	/* 16 Gigabit */
+	EXTENDED_ID_NAND("NAND 2GiB 1,8V 8-bit",  0xA5, 2048, LP_OPTIONS),
+	EXTENDED_ID_NAND("NAND 2GiB 3,3V 8-bit",  0xD5, 2048, LP_OPTIONS),
+	EXTENDED_ID_NAND("NAND 2GiB 1,8V 16-bit", 0xB5, 2048, LP_OPTIONS16),
+	EXTENDED_ID_NAND("NAND 2GiB 3,3V 16-bit", 0xC5, 2048, LP_OPTIONS16),
+
+	/* 32 Gigabit */
+	EXTENDED_ID_NAND("NAND 4GiB 1,8V 8-bit",  0xA7, 4096, LP_OPTIONS),
+	EXTENDED_ID_NAND("NAND 4GiB 3,3V 8-bit",  0xD7, 4096, LP_OPTIONS),
+	EXTENDED_ID_NAND("NAND 4GiB 1,8V 16-bit", 0xB7, 4096, LP_OPTIONS16),
+	EXTENDED_ID_NAND("NAND 4GiB 3,3V 16-bit", 0xC7, 4096, LP_OPTIONS16),
+
+	/* 64 Gigabit */
+	EXTENDED_ID_NAND("NAND 8GiB 1,8V 8-bit",  0xAE, 8192, LP_OPTIONS),
+	EXTENDED_ID_NAND("NAND 8GiB 3,3V 8-bit",  0xDE, 8192, LP_OPTIONS),
+	EXTENDED_ID_NAND("NAND 8GiB 1,8V 16-bit", 0xBE, 8192, LP_OPTIONS16),
+	EXTENDED_ID_NAND("NAND 8GiB 3,3V 16-bit", 0xCE, 8192, LP_OPTIONS16),
+
+	/* 128 Gigabit */
+	EXTENDED_ID_NAND("NAND 16GiB 1,8V 8-bit",  0x1A, 16384, LP_OPTIONS),
+	EXTENDED_ID_NAND("NAND 16GiB 3,3V 8-bit",  0x3A, 16384, LP_OPTIONS),
+	EXTENDED_ID_NAND("NAND 16GiB 1,8V 16-bit", 0x2A, 16384, LP_OPTIONS16),
+	EXTENDED_ID_NAND("NAND 16GiB 3,3V 16-bit", 0x4A, 16384, LP_OPTIONS16),
+
+	/* 256 Gigabit */
+	EXTENDED_ID_NAND("NAND 32GiB 1,8V 8-bit",  0x1C, 32768, LP_OPTIONS),
+	EXTENDED_ID_NAND("NAND 32GiB 3,3V 8-bit",  0x3C, 32768, LP_OPTIONS),
+	EXTENDED_ID_NAND("NAND 32GiB 1,8V 16-bit", 0x2C, 32768, LP_OPTIONS16),
+	EXTENDED_ID_NAND("NAND 32GiB 3,3V 16-bit", 0x4C, 32768, LP_OPTIONS16),
+
+	/* 512 Gigabit */
+	EXTENDED_ID_NAND("NAND 64GiB 1,8V 8-bit",  0x1E, 65536, LP_OPTIONS),
+	EXTENDED_ID_NAND("NAND 64GiB 3,3V 8-bit",  0x3E, 65536, LP_OPTIONS),
+	EXTENDED_ID_NAND("NAND 64GiB 1,8V 16-bit", 0x2E, 65536, LP_OPTIONS16),
+	EXTENDED_ID_NAND("NAND 64GiB 3,3V 16-bit", 0x4E, 65536, LP_OPTIONS16),
+
+	{NULL}
+};
+
+/* Manufacturer IDs */
 struct nand_manufacturers nand_manuf_ids[] = {
 	{NAND_MFR_TOSHIBA, "Toshiba"},
 	{NAND_MFR_SAMSUNG, "Samsung"},
diff --git a/drivers/mtd/nand/nandsim.c b/drivers/mtd/nand/nandsim.c
index 891c52a..cb38f3d 100644
--- a/drivers/mtd/nand/nandsim.c
+++ b/drivers/mtd/nand/nandsim.c
@@ -218,7 +218,6 @@
 #define STATE_CMD_READOOB      0x00000005 /* read OOB area */
 #define STATE_CMD_ERASE1       0x00000006 /* sector erase first command */
 #define STATE_CMD_STATUS       0x00000007 /* read status */
-#define STATE_CMD_STATUS_M     0x00000008 /* read multi-plane status (isn't implemented) */
 #define STATE_CMD_SEQIN        0x00000009 /* sequential data input */
 #define STATE_CMD_READID       0x0000000A /* read ID */
 #define STATE_CMD_ERASE2       0x0000000B /* sector erase second command */
@@ -263,14 +262,13 @@
 #define NS_OPER_STATES   6  /* Maximum number of states in operation */
 
 #define OPT_ANY          0xFFFFFFFF /* any chip supports this operation */
-#define OPT_PAGE256      0x00000001 /* 256-byte  page chips */
 #define OPT_PAGE512      0x00000002 /* 512-byte  page chips */
 #define OPT_PAGE2048     0x00000008 /* 2048-byte page chips */
 #define OPT_SMARTMEDIA   0x00000010 /* SmartMedia technology chips */
 #define OPT_PAGE512_8BIT 0x00000040 /* 512-byte page chips with 8-bit bus width */
 #define OPT_PAGE4096     0x00000080 /* 4096-byte page chips */
 #define OPT_LARGEPAGE    (OPT_PAGE2048 | OPT_PAGE4096) /* 2048 & 4096-byte page chips */
-#define OPT_SMALLPAGE    (OPT_PAGE256  | OPT_PAGE512)  /* 256 and 512-byte page chips */
+#define OPT_SMALLPAGE    (OPT_PAGE512) /* 512-byte page chips */
 
 /* Remove action bits from state */
 #define NS_STATE(x) ((x) & ~ACTION_MASK)
@@ -406,8 +404,6 @@
 	{OPT_ANY, {STATE_CMD_ERASE1, STATE_ADDR_SEC, STATE_CMD_ERASE2 | ACTION_SECERASE, STATE_READY}},
 	/* Read status */
 	{OPT_ANY, {STATE_CMD_STATUS, STATE_DATAOUT_STATUS, STATE_READY}},
-	/* Read multi-plane status */
-	{OPT_SMARTMEDIA, {STATE_CMD_STATUS_M, STATE_DATAOUT_STATUS_M, STATE_READY}},
 	/* Read ID */
 	{OPT_ANY, {STATE_CMD_READID, STATE_ADDR_ZERO, STATE_DATAOUT_ID, STATE_READY}},
 	/* Large page devices read page */
@@ -699,10 +695,7 @@
 	ns->geom.secszoob = ns->geom.secsz + ns->geom.oobsz * ns->geom.pgsec;
 	ns->options = 0;
 
-	if (ns->geom.pgsz == 256) {
-		ns->options |= OPT_PAGE256;
-	}
-	else if (ns->geom.pgsz == 512) {
+	if (ns->geom.pgsz == 512) {
 		ns->options |= OPT_PAGE512;
 		if (ns->busw == 8)
 			ns->options |= OPT_PAGE512_8BIT;
@@ -769,9 +762,9 @@
 	}
 
 	/* Detect how many ID bytes the NAND chip outputs */
-        for (i = 0; nand_flash_ids[i].name != NULL; i++) {
-                if (second_id_byte != nand_flash_ids[i].id)
-                        continue;
+	for (i = 0; nand_flash_ids[i].name != NULL; i++) {
+		if (second_id_byte != nand_flash_ids[i].dev_id)
+			continue;
 	}
 
 	if (ns->busw == 16)
@@ -1079,8 +1072,6 @@
 			return "STATE_CMD_ERASE1";
 		case STATE_CMD_STATUS:
 			return "STATE_CMD_STATUS";
-		case STATE_CMD_STATUS_M:
-			return "STATE_CMD_STATUS_M";
 		case STATE_CMD_SEQIN:
 			return "STATE_CMD_SEQIN";
 		case STATE_CMD_READID:
@@ -1145,7 +1136,6 @@
 	case NAND_CMD_RNDOUTSTART:
 		return 0;
 
-	case NAND_CMD_STATUS_MULTI:
 	default:
 		return 1;
 	}
@@ -1171,8 +1161,6 @@
 			return STATE_CMD_ERASE1;
 		case NAND_CMD_STATUS:
 			return STATE_CMD_STATUS;
-		case NAND_CMD_STATUS_MULTI:
-			return STATE_CMD_STATUS_M;
 		case NAND_CMD_SEQIN:
 			return STATE_CMD_SEQIN;
 		case NAND_CMD_READID:
@@ -2306,7 +2294,7 @@
 		nand->geom.idbytes = 2;
 	nand->regs.status = NS_STATUS_OK(nand);
 	nand->nxstate = STATE_UNKNOWN;
-	nand->options |= OPT_PAGE256; /* temporary value */
+	nand->options |= OPT_PAGE512; /* temporary value */
 	nand->ids[0] = first_id_byte;
 	nand->ids[1] = second_id_byte;
 	nand->ids[2] = third_id_byte;
diff --git a/drivers/mtd/nand/nuc900_nand.c b/drivers/mtd/nand/nuc900_nand.c
index a619119..cd6be2e 100644
--- a/drivers/mtd/nand/nuc900_nand.c
+++ b/drivers/mtd/nand/nuc900_nand.c
@@ -177,15 +177,6 @@
 	case NAND_CMD_SEQIN:
 	case NAND_CMD_RNDIN:
 	case NAND_CMD_STATUS:
-	case NAND_CMD_DEPLETE1:
-		return;
-
-	case NAND_CMD_STATUS_ERROR:
-	case NAND_CMD_STATUS_ERROR0:
-	case NAND_CMD_STATUS_ERROR1:
-	case NAND_CMD_STATUS_ERROR2:
-	case NAND_CMD_STATUS_ERROR3:
-		udelay(chip->chip_delay);
 		return;
 
 	case NAND_CMD_RESET:
diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c
index 8e820dd..81b80af 100644
--- a/drivers/mtd/nand/omap2.c
+++ b/drivers/mtd/nand/omap2.c
@@ -1023,9 +1023,9 @@
 	int status, state = this->state;
 
 	if (state == FL_ERASING)
-		timeo += (HZ * 400) / 1000;
+		timeo += msecs_to_jiffies(400);
 	else
-		timeo += (HZ * 20) / 1000;
+		timeo += msecs_to_jiffies(20);
 
 	writeb(NAND_CMD_STATUS & 0xFF, info->reg.gpmc_nand_command);
 	while (time_before(jiffies, timeo)) {
@@ -1701,8 +1701,9 @@
 		elm_node = of_find_node_by_phandle(be32_to_cpup(parp));
 		pdev = of_find_device_by_node(elm_node);
 		info->elm_dev = &pdev->dev;
-		elm_config(info->elm_dev, bch_type);
-		info->is_elm_used = true;
+
+		if (elm_config(info->elm_dev, bch_type) == 0)
+			info->is_elm_used = true;
 	}
 
 	if (info->is_elm_used && (mtd->writesize <= 4096)) {
diff --git a/drivers/mtd/nand/orion_nand.c b/drivers/mtd/nand/orion_nand.c
index cd72b92..8fbd002 100644
--- a/drivers/mtd/nand/orion_nand.c
+++ b/drivers/mtd/nand/orion_nand.c
@@ -231,18 +231,7 @@
 	},
 };
 
-static int __init orion_nand_init(void)
-{
-	return platform_driver_probe(&orion_nand_driver, orion_nand_probe);
-}
-
-static void __exit orion_nand_exit(void)
-{
-	platform_driver_unregister(&orion_nand_driver);
-}
-
-module_init(orion_nand_init);
-module_exit(orion_nand_exit);
+module_platform_driver_probe(orion_nand_driver, orion_nand_probe);
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Tzachi Perelstein");
diff --git a/drivers/mtd/nand/ppchameleonevb.c b/drivers/mtd/nand/ppchameleonevb.c
deleted file mode 100644
index 0ddd90e..0000000
--- a/drivers/mtd/nand/ppchameleonevb.c
+++ /dev/null
@@ -1,403 +0,0 @@
-/*
- *  drivers/mtd/nand/ppchameleonevb.c
- *
- *  Copyright (C) 2003 DAVE Srl (info@wawnet.biz)
- *
- *  Derived from drivers/mtd/nand/edb7312.c
- *
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- *  Overview:
- *   This is a device driver for the NAND flash devices found on the
- *   PPChameleon/PPChameleonEVB system.
- *   PPChameleon options (autodetected):
- *   - BA model: no NAND
- *   - ME model: 32MB (Samsung K9F5608U0B)
- *   - HI model: 128MB (Samsung K9F1G08UOM)
- *   PPChameleonEVB options:
- *   - 32MB (Samsung K9F5608U0B)
- */
-
-#include <linux/init.h>
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/nand.h>
-#include <linux/mtd/partitions.h>
-#include <asm/io.h>
-#include <platforms/PPChameleonEVB.h>
-
-#undef USE_READY_BUSY_PIN
-#define USE_READY_BUSY_PIN
-/* see datasheets (tR) */
-#define NAND_BIG_DELAY_US		25
-#define NAND_SMALL_DELAY_US		10
-
-/* handy sizes */
-#define SZ_4M                           0x00400000
-#define NAND_SMALL_SIZE                 0x02000000
-#define NAND_MTD_NAME		"ppchameleon-nand"
-#define NAND_EVB_MTD_NAME	"ppchameleonevb-nand"
-
-/* GPIO pins used to drive NAND chip mounted on processor module */
-#define NAND_nCE_GPIO_PIN 		(0x80000000 >> 1)
-#define NAND_CLE_GPIO_PIN 		(0x80000000 >> 2)
-#define NAND_ALE_GPIO_PIN 		(0x80000000 >> 3)
-#define NAND_RB_GPIO_PIN 		(0x80000000 >> 4)
-/* GPIO pins used to drive NAND chip mounted on EVB */
-#define NAND_EVB_nCE_GPIO_PIN 	(0x80000000 >> 14)
-#define NAND_EVB_CLE_GPIO_PIN 	(0x80000000 >> 15)
-#define NAND_EVB_ALE_GPIO_PIN 	(0x80000000 >> 16)
-#define NAND_EVB_RB_GPIO_PIN 	(0x80000000 >> 31)
-
-/*
- * MTD structure for PPChameleonEVB board
- */
-static struct mtd_info *ppchameleon_mtd = NULL;
-static struct mtd_info *ppchameleonevb_mtd = NULL;
-
-/*
- * Module stuff
- */
-static unsigned long ppchameleon_fio_pbase = CFG_NAND0_PADDR;
-static unsigned long ppchameleonevb_fio_pbase = CFG_NAND1_PADDR;
-
-#ifdef MODULE
-module_param(ppchameleon_fio_pbase, ulong, 0);
-module_param(ppchameleonevb_fio_pbase, ulong, 0);
-#else
-__setup("ppchameleon_fio_pbase=", ppchameleon_fio_pbase);
-__setup("ppchameleonevb_fio_pbase=", ppchameleonevb_fio_pbase);
-#endif
-
-/*
- * Define static partitions for flash devices
- */
-static struct mtd_partition partition_info_hi[] = {
-      { .name = "PPChameleon HI Nand Flash",
-	.offset = 0,
-	.size = 128 * 1024 * 1024
-      }
-};
-
-static struct mtd_partition partition_info_me[] = {
-      { .name = "PPChameleon ME Nand Flash",
-	.offset = 0,
-	.size = 32 * 1024 * 1024
-      }
-};
-
-static struct mtd_partition partition_info_evb[] = {
-      { .name = "PPChameleonEVB Nand Flash",
-	.offset = 0,
-	.size = 32 * 1024 * 1024
-      }
-};
-
-#define NUM_PARTITIONS 1
-
-/*
- *	hardware specific access to control-lines
- */
-static void ppchameleon_hwcontrol(struct mtd_info *mtdinfo, int cmd,
-				  unsigned int ctrl)
-{
-	struct nand_chip *chip = mtd->priv;
-
-	if (ctrl & NAND_CTRL_CHANGE) {
-#error Missing headerfiles. No way to fix this. -tglx
-		switch (cmd) {
-		case NAND_CTL_SETCLE:
-			MACRO_NAND_CTL_SETCLE((unsigned long)CFG_NAND0_PADDR);
-			break;
-		case NAND_CTL_CLRCLE:
-			MACRO_NAND_CTL_CLRCLE((unsigned long)CFG_NAND0_PADDR);
-			break;
-		case NAND_CTL_SETALE:
-			MACRO_NAND_CTL_SETALE((unsigned long)CFG_NAND0_PADDR);
-			break;
-		case NAND_CTL_CLRALE:
-			MACRO_NAND_CTL_CLRALE((unsigned long)CFG_NAND0_PADDR);
-			break;
-		case NAND_CTL_SETNCE:
-			MACRO_NAND_ENABLE_CE((unsigned long)CFG_NAND0_PADDR);
-			break;
-		case NAND_CTL_CLRNCE:
-			MACRO_NAND_DISABLE_CE((unsigned long)CFG_NAND0_PADDR);
-			break;
-		}
-	}
-	if (cmd != NAND_CMD_NONE)
-		writeb(cmd, chip->IO_ADDR_W);
-}
-
-static void ppchameleonevb_hwcontrol(struct mtd_info *mtdinfo, int cmd,
-				     unsigned int ctrl)
-{
-	struct nand_chip *chip = mtd->priv;
-
-	if (ctrl & NAND_CTRL_CHANGE) {
-#error Missing headerfiles. No way to fix this. -tglx
-		switch (cmd) {
-		case NAND_CTL_SETCLE:
-			MACRO_NAND_CTL_SETCLE((unsigned long)CFG_NAND1_PADDR);
-			break;
-		case NAND_CTL_CLRCLE:
-			MACRO_NAND_CTL_CLRCLE((unsigned long)CFG_NAND1_PADDR);
-			break;
-		case NAND_CTL_SETALE:
-			MACRO_NAND_CTL_SETALE((unsigned long)CFG_NAND1_PADDR);
-			break;
-		case NAND_CTL_CLRALE:
-			MACRO_NAND_CTL_CLRALE((unsigned long)CFG_NAND1_PADDR);
-			break;
-		case NAND_CTL_SETNCE:
-			MACRO_NAND_ENABLE_CE((unsigned long)CFG_NAND1_PADDR);
-			break;
-		case NAND_CTL_CLRNCE:
-			MACRO_NAND_DISABLE_CE((unsigned long)CFG_NAND1_PADDR);
-			break;
-		}
-	}
-	if (cmd != NAND_CMD_NONE)
-		writeb(cmd, chip->IO_ADDR_W);
-}
-
-#ifdef USE_READY_BUSY_PIN
-/*
- *	read device ready pin
- */
-static int ppchameleon_device_ready(struct mtd_info *minfo)
-{
-	if (in_be32((volatile unsigned *)GPIO0_IR) & NAND_RB_GPIO_PIN)
-		return 1;
-	return 0;
-}
-
-static int ppchameleonevb_device_ready(struct mtd_info *minfo)
-{
-	if (in_be32((volatile unsigned *)GPIO0_IR) & NAND_EVB_RB_GPIO_PIN)
-		return 1;
-	return 0;
-}
-#endif
-
-/*
- * Main initialization routine
- */
-static int __init ppchameleonevb_init(void)
-{
-	struct nand_chip *this;
-	void __iomem *ppchameleon_fio_base;
-	void __iomem *ppchameleonevb_fio_base;
-
-	/*********************************
-	* Processor module NAND (if any) *
-	*********************************/
-	/* Allocate memory for MTD device structure and private data */
-	ppchameleon_mtd = kmalloc(sizeof(struct mtd_info) + sizeof(struct nand_chip), GFP_KERNEL);
-	if (!ppchameleon_mtd) {
-		printk("Unable to allocate PPChameleon NAND MTD device structure.\n");
-		return -ENOMEM;
-	}
-
-	/* map physical address */
-	ppchameleon_fio_base = ioremap(ppchameleon_fio_pbase, SZ_4M);
-	if (!ppchameleon_fio_base) {
-		printk("ioremap PPChameleon NAND flash failed\n");
-		kfree(ppchameleon_mtd);
-		return -EIO;
-	}
-
-	/* Get pointer to private data */
-	this = (struct nand_chip *)(&ppchameleon_mtd[1]);
-
-	/* Initialize structures */
-	memset(ppchameleon_mtd, 0, sizeof(struct mtd_info));
-	memset(this, 0, sizeof(struct nand_chip));
-
-	/* Link the private data with the MTD structure */
-	ppchameleon_mtd->priv = this;
-	ppchameleon_mtd->owner = THIS_MODULE;
-
-	/* Initialize GPIOs */
-	/* Pin mapping for NAND chip */
-	/*
-	   CE   GPIO_01
-	   CLE  GPIO_02
-	   ALE  GPIO_03
-	   R/B  GPIO_04
-	 */
-	/* output select */
-	out_be32((volatile unsigned *)GPIO0_OSRH, in_be32((volatile unsigned *)GPIO0_OSRH) & 0xC0FFFFFF);
-	/* three-state select */
-	out_be32((volatile unsigned *)GPIO0_TSRH, in_be32((volatile unsigned *)GPIO0_TSRH) & 0xC0FFFFFF);
-	/* enable output driver */
-	out_be32((volatile unsigned *)GPIO0_TCR,
-		 in_be32((volatile unsigned *)GPIO0_TCR) | NAND_nCE_GPIO_PIN | NAND_CLE_GPIO_PIN | NAND_ALE_GPIO_PIN);
-#ifdef USE_READY_BUSY_PIN
-	/* three-state select */
-	out_be32((volatile unsigned *)GPIO0_TSRH, in_be32((volatile unsigned *)GPIO0_TSRH) & 0xFF3FFFFF);
-	/* high-impedecence */
-	out_be32((volatile unsigned *)GPIO0_TCR, in_be32((volatile unsigned *)GPIO0_TCR) & (~NAND_RB_GPIO_PIN));
-	/* input select */
-	out_be32((volatile unsigned *)GPIO0_ISR1H,
-		 (in_be32((volatile unsigned *)GPIO0_ISR1H) & 0xFF3FFFFF) | 0x00400000);
-#endif
-
-	/* insert callbacks */
-	this->IO_ADDR_R = ppchameleon_fio_base;
-	this->IO_ADDR_W = ppchameleon_fio_base;
-	this->cmd_ctrl = ppchameleon_hwcontrol;
-#ifdef USE_READY_BUSY_PIN
-	this->dev_ready = ppchameleon_device_ready;
-#endif
-	this->chip_delay = NAND_BIG_DELAY_US;
-	/* ECC mode */
-	this->ecc.mode = NAND_ECC_SOFT;
-
-	/* Scan to find existence of the device (it could not be mounted) */
-	if (nand_scan(ppchameleon_mtd, 1)) {
-		iounmap((void *)ppchameleon_fio_base);
-		ppchameleon_fio_base = NULL;
-		kfree(ppchameleon_mtd);
-		goto nand_evb_init;
-	}
-#ifndef USE_READY_BUSY_PIN
-	/* Adjust delay if necessary */
-	if (ppchameleon_mtd->size == NAND_SMALL_SIZE)
-		this->chip_delay = NAND_SMALL_DELAY_US;
-#endif
-
-	ppchameleon_mtd->name = "ppchameleon-nand";
-
-	/* Register the partitions */
-	mtd_device_parse_register(ppchameleon_mtd, NULL, NULL,
-				  ppchameleon_mtd->size == NAND_SMALL_SIZE ?
-					partition_info_me : partition_info_hi,
-				  NUM_PARTITIONS);
-
- nand_evb_init:
-	/****************************
-	* EVB NAND (always present) *
-	****************************/
-	/* Allocate memory for MTD device structure and private data */
-	ppchameleonevb_mtd = kmalloc(sizeof(struct mtd_info) + sizeof(struct nand_chip), GFP_KERNEL);
-	if (!ppchameleonevb_mtd) {
-		printk("Unable to allocate PPChameleonEVB NAND MTD device structure.\n");
-		if (ppchameleon_fio_base)
-			iounmap(ppchameleon_fio_base);
-		return -ENOMEM;
-	}
-
-	/* map physical address */
-	ppchameleonevb_fio_base = ioremap(ppchameleonevb_fio_pbase, SZ_4M);
-	if (!ppchameleonevb_fio_base) {
-		printk("ioremap PPChameleonEVB NAND flash failed\n");
-		kfree(ppchameleonevb_mtd);
-		if (ppchameleon_fio_base)
-			iounmap(ppchameleon_fio_base);
-		return -EIO;
-	}
-
-	/* Get pointer to private data */
-	this = (struct nand_chip *)(&ppchameleonevb_mtd[1]);
-
-	/* Initialize structures */
-	memset(ppchameleonevb_mtd, 0, sizeof(struct mtd_info));
-	memset(this, 0, sizeof(struct nand_chip));
-
-	/* Link the private data with the MTD structure */
-	ppchameleonevb_mtd->priv = this;
-
-	/* Initialize GPIOs */
-	/* Pin mapping for NAND chip */
-	/*
-	   CE   GPIO_14
-	   CLE  GPIO_15
-	   ALE  GPIO_16
-	   R/B  GPIO_31
-	 */
-	/* output select */
-	out_be32((volatile unsigned *)GPIO0_OSRH, in_be32((volatile unsigned *)GPIO0_OSRH) & 0xFFFFFFF0);
-	out_be32((volatile unsigned *)GPIO0_OSRL, in_be32((volatile unsigned *)GPIO0_OSRL) & 0x3FFFFFFF);
-	/* three-state select */
-	out_be32((volatile unsigned *)GPIO0_TSRH, in_be32((volatile unsigned *)GPIO0_TSRH) & 0xFFFFFFF0);
-	out_be32((volatile unsigned *)GPIO0_TSRL, in_be32((volatile unsigned *)GPIO0_TSRL) & 0x3FFFFFFF);
-	/* enable output driver */
-	out_be32((volatile unsigned *)GPIO0_TCR, in_be32((volatile unsigned *)GPIO0_TCR) | NAND_EVB_nCE_GPIO_PIN |
-		 NAND_EVB_CLE_GPIO_PIN | NAND_EVB_ALE_GPIO_PIN);
-#ifdef USE_READY_BUSY_PIN
-	/* three-state select */
-	out_be32((volatile unsigned *)GPIO0_TSRL, in_be32((volatile unsigned *)GPIO0_TSRL) & 0xFFFFFFFC);
-	/* high-impedecence */
-	out_be32((volatile unsigned *)GPIO0_TCR, in_be32((volatile unsigned *)GPIO0_TCR) & (~NAND_EVB_RB_GPIO_PIN));
-	/* input select */
-	out_be32((volatile unsigned *)GPIO0_ISR1L,
-		 (in_be32((volatile unsigned *)GPIO0_ISR1L) & 0xFFFFFFFC) | 0x00000001);
-#endif
-
-	/* insert callbacks */
-	this->IO_ADDR_R = ppchameleonevb_fio_base;
-	this->IO_ADDR_W = ppchameleonevb_fio_base;
-	this->cmd_ctrl = ppchameleonevb_hwcontrol;
-#ifdef USE_READY_BUSY_PIN
-	this->dev_ready = ppchameleonevb_device_ready;
-#endif
-	this->chip_delay = NAND_SMALL_DELAY_US;
-
-	/* ECC mode */
-	this->ecc.mode = NAND_ECC_SOFT;
-
-	/* Scan to find existence of the device */
-	if (nand_scan(ppchameleonevb_mtd, 1)) {
-		iounmap((void *)ppchameleonevb_fio_base);
-		kfree(ppchameleonevb_mtd);
-		if (ppchameleon_fio_base)
-			iounmap(ppchameleon_fio_base);
-		return -ENXIO;
-	}
-
-	ppchameleonevb_mtd->name = NAND_EVB_MTD_NAME;
-
-	/* Register the partitions */
-	mtd_device_parse_register(ppchameleonevb_mtd, NULL, NULL,
-				  ppchameleon_mtd->size == NAND_SMALL_SIZE ?
-				  partition_info_me : partition_info_hi,
-				  NUM_PARTITIONS);
-
-	/* Return happy */
-	return 0;
-}
-
-module_init(ppchameleonevb_init);
-
-/*
- * Clean up routine
- */
-static void __exit ppchameleonevb_cleanup(void)
-{
-	struct nand_chip *this;
-
-	/* Release resources, unregister device(s) */
-	nand_release(ppchameleon_mtd);
-	nand_release(ppchameleonevb_mtd);
-
-	/* Release iomaps */
-	this = (struct nand_chip *) &ppchameleon_mtd[1];
-	iounmap((void *) this->IO_ADDR_R);
-	this = (struct nand_chip *) &ppchameleonevb_mtd[1];
-	iounmap((void *) this->IO_ADDR_R);
-
-	/* Free the MTD device structure */
-	kfree (ppchameleon_mtd);
-	kfree (ppchameleonevb_mtd);
-}
-module_exit(ppchameleonevb_cleanup);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("DAVE Srl <support-ppchameleon@dave-tech.it>");
-MODULE_DESCRIPTION("MTD map driver for DAVE Srl PPChameleonEVB board");
diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c
index 37ee75c..dec80ca 100644
--- a/drivers/mtd/nand/pxa3xx_nand.c
+++ b/drivers/mtd/nand/pxa3xx_nand.c
@@ -989,7 +989,7 @@
 	}
 
 	pxa3xx_flash_ids[0].name = f->name;
-	pxa3xx_flash_ids[0].id = (f->chip_id >> 8) & 0xffff;
+	pxa3xx_flash_ids[0].dev_id = (f->chip_id >> 8) & 0xffff;
 	pxa3xx_flash_ids[0].pagesize = f->page_size;
 	chipsize = (uint64_t)f->num_blocks * f->page_per_block * f->page_size;
 	pxa3xx_flash_ids[0].chipsize = chipsize >> 20;
diff --git a/drivers/mtd/nand/rtc_from4.c b/drivers/mtd/nand/rtc_from4.c
deleted file mode 100644
index e55b5cf..0000000
--- a/drivers/mtd/nand/rtc_from4.c
+++ /dev/null
@@ -1,624 +0,0 @@
-/*
- *  drivers/mtd/nand/rtc_from4.c
- *
- *  Copyright (C) 2004  Red Hat, Inc.
- *
- *  Derived from drivers/mtd/nand/spia.c
- *       Copyright (C) 2000 Steven J. Hill (sjhill@realitydiluted.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * Overview:
- *   This is a device driver for the AG-AND flash device found on the
- *   Renesas Technology Corp. Flash ROM 4-slot interface board (FROM_BOARD4),
- *   which utilizes the Renesas HN29V1G91T-30 part.
- *   This chip is a 1 GBibit (128MiB x 8 bits) AG-AND flash device.
- */
-
-#include <linux/delay.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/slab.h>
-#include <linux/rslib.h>
-#include <linux/bitrev.h>
-#include <linux/module.h>
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/nand.h>
-#include <linux/mtd/partitions.h>
-#include <asm/io.h>
-
-/*
- * MTD structure for Renesas board
- */
-static struct mtd_info *rtc_from4_mtd = NULL;
-
-#define RTC_FROM4_MAX_CHIPS	2
-
-/* HS77x9 processor register defines */
-#define SH77X9_BCR1	((volatile unsigned short *)(0xFFFFFF60))
-#define SH77X9_BCR2	((volatile unsigned short *)(0xFFFFFF62))
-#define SH77X9_WCR1	((volatile unsigned short *)(0xFFFFFF64))
-#define SH77X9_WCR2	((volatile unsigned short *)(0xFFFFFF66))
-#define SH77X9_MCR	((volatile unsigned short *)(0xFFFFFF68))
-#define SH77X9_PCR	((volatile unsigned short *)(0xFFFFFF6C))
-#define SH77X9_FRQCR	((volatile unsigned short *)(0xFFFFFF80))
-
-/*
- * Values specific to the Renesas Technology Corp. FROM_BOARD4 (used with HS77x9 processor)
- */
-/* Address where flash is mapped */
-#define RTC_FROM4_FIO_BASE	0x14000000
-
-/* CLE and ALE are tied to address lines 5 & 4, respectively */
-#define RTC_FROM4_CLE		(1 << 5)
-#define RTC_FROM4_ALE		(1 << 4)
-
-/* address lines A24-A22 used for chip selection */
-#define RTC_FROM4_NAND_ADDR_SLOT3	(0x00800000)
-#define RTC_FROM4_NAND_ADDR_SLOT4	(0x00C00000)
-#define RTC_FROM4_NAND_ADDR_FPGA	(0x01000000)
-/* mask address lines A24-A22 used for chip selection */
-#define RTC_FROM4_NAND_ADDR_MASK	(RTC_FROM4_NAND_ADDR_SLOT3 | RTC_FROM4_NAND_ADDR_SLOT4 | RTC_FROM4_NAND_ADDR_FPGA)
-
-/* FPGA status register for checking device ready (bit zero) */
-#define RTC_FROM4_FPGA_SR		(RTC_FROM4_NAND_ADDR_FPGA | 0x00000002)
-#define RTC_FROM4_DEVICE_READY		0x0001
-
-/* FPGA Reed-Solomon ECC Control register */
-
-#define RTC_FROM4_RS_ECC_CTL		(RTC_FROM4_NAND_ADDR_FPGA | 0x00000050)
-#define RTC_FROM4_RS_ECC_CTL_CLR	(1 << 7)
-#define RTC_FROM4_RS_ECC_CTL_GEN	(1 << 6)
-#define RTC_FROM4_RS_ECC_CTL_FD_E	(1 << 5)
-
-/* FPGA Reed-Solomon ECC code base */
-#define RTC_FROM4_RS_ECC		(RTC_FROM4_NAND_ADDR_FPGA | 0x00000060)
-#define RTC_FROM4_RS_ECCN		(RTC_FROM4_NAND_ADDR_FPGA | 0x00000080)
-
-/* FPGA Reed-Solomon ECC check register */
-#define RTC_FROM4_RS_ECC_CHK		(RTC_FROM4_NAND_ADDR_FPGA | 0x00000070)
-#define RTC_FROM4_RS_ECC_CHK_ERROR	(1 << 7)
-
-#define ERR_STAT_ECC_AVAILABLE		0x20
-
-/* Undefine for software ECC */
-#define RTC_FROM4_HWECC	1
-
-/* Define as 1 for no virtual erase blocks (in JFFS2) */
-#define RTC_FROM4_NO_VIRTBLOCKS	0
-
-/*
- * Module stuff
- */
-static void __iomem *rtc_from4_fio_base = (void *)P2SEGADDR(RTC_FROM4_FIO_BASE);
-
-static const struct mtd_partition partition_info[] = {
-	{
-	 .name = "Renesas flash partition 1",
-	 .offset = 0,
-	 .size = MTDPART_SIZ_FULL},
-};
-
-#define NUM_PARTITIONS 1
-
-/*
- *	hardware specific flash bbt decriptors
- *	Note: this is to allow debugging by disabling
- *		NAND_BBT_CREATE and/or NAND_BBT_WRITE
- *
- */
-static uint8_t bbt_pattern[] = { 'B', 'b', 't', '0' };
-static uint8_t mirror_pattern[] = { '1', 't', 'b', 'B' };
-
-static struct nand_bbt_descr rtc_from4_bbt_main_descr = {
-	.options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
-		| NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP,
-	.offs = 40,
-	.len = 4,
-	.veroffs = 44,
-	.maxblocks = 4,
-	.pattern = bbt_pattern
-};
-
-static struct nand_bbt_descr rtc_from4_bbt_mirror_descr = {
-	.options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
-		| NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP,
-	.offs = 40,
-	.len = 4,
-	.veroffs = 44,
-	.maxblocks = 4,
-	.pattern = mirror_pattern
-};
-
-#ifdef RTC_FROM4_HWECC
-
-/* the Reed Solomon control structure */
-static struct rs_control *rs_decoder;
-
-/*
- *      hardware specific Out Of Band information
- */
-static struct nand_ecclayout rtc_from4_nand_oobinfo = {
-	.eccbytes = 32,
-	.eccpos = {
-		   0, 1, 2, 3, 4, 5, 6, 7,
-		   8, 9, 10, 11, 12, 13, 14, 15,
-		   16, 17, 18, 19, 20, 21, 22, 23,
-		   24, 25, 26, 27, 28, 29, 30, 31},
-	.oobfree = {{32, 32}}
-};
-
-#endif
-
-/*
- * rtc_from4_hwcontrol - hardware specific access to control-lines
- * @mtd:	MTD device structure
- * @cmd:	hardware control command
- *
- * Address lines (A5 and A4) are used to control Command and Address Latch
- * Enable on this board, so set the read/write address appropriately.
- *
- * Chip Enable is also controlled by the Chip Select (CS5) and
- * Address lines (A24-A22), so no action is required here.
- *
- */
-static void rtc_from4_hwcontrol(struct mtd_info *mtd, int cmd,
-				unsigned int ctrl)
-{
-	struct nand_chip *chip = (mtd->priv);
-
-	if (cmd == NAND_CMD_NONE)
-		return;
-
-	if (ctrl & NAND_CLE)
-		writeb(cmd, chip->IO_ADDR_W | RTC_FROM4_CLE);
-	else
-		writeb(cmd, chip->IO_ADDR_W | RTC_FROM4_ALE);
-}
-
-/*
- * rtc_from4_nand_select_chip - hardware specific chip select
- * @mtd:	MTD device structure
- * @chip:	Chip to select (0 == slot 3, 1 == slot 4)
- *
- * The chip select is based on address lines A24-A22.
- * This driver uses flash slots 3 and 4 (A23-A22).
- *
- */
-static void rtc_from4_nand_select_chip(struct mtd_info *mtd, int chip)
-{
-	struct nand_chip *this = mtd->priv;
-
-	this->IO_ADDR_R = (void __iomem *)((unsigned long)this->IO_ADDR_R & ~RTC_FROM4_NAND_ADDR_MASK);
-	this->IO_ADDR_W = (void __iomem *)((unsigned long)this->IO_ADDR_W & ~RTC_FROM4_NAND_ADDR_MASK);
-
-	switch (chip) {
-
-	case 0:		/* select slot 3 chip */
-		this->IO_ADDR_R = (void __iomem *)((unsigned long)this->IO_ADDR_R | RTC_FROM4_NAND_ADDR_SLOT3);
-		this->IO_ADDR_W = (void __iomem *)((unsigned long)this->IO_ADDR_W | RTC_FROM4_NAND_ADDR_SLOT3);
-		break;
-	case 1:		/* select slot 4 chip */
-		this->IO_ADDR_R = (void __iomem *)((unsigned long)this->IO_ADDR_R | RTC_FROM4_NAND_ADDR_SLOT4);
-		this->IO_ADDR_W = (void __iomem *)((unsigned long)this->IO_ADDR_W | RTC_FROM4_NAND_ADDR_SLOT4);
-		break;
-
-	}
-}
-
-/*
- * rtc_from4_nand_device_ready - hardware specific ready/busy check
- * @mtd:	MTD device structure
- *
- * This board provides the Ready/Busy state in the status register
- * of the FPGA.  Bit zero indicates the RDY(1)/BSY(0) signal.
- *
- */
-static int rtc_from4_nand_device_ready(struct mtd_info *mtd)
-{
-	unsigned short status;
-
-	status = *((volatile unsigned short *)(rtc_from4_fio_base + RTC_FROM4_FPGA_SR));
-
-	return (status & RTC_FROM4_DEVICE_READY);
-
-}
-
-/*
- * deplete - code to perform device recovery in case there was a power loss
- * @mtd:	MTD device structure
- * @chip:	Chip to select (0 == slot 3, 1 == slot 4)
- *
- * If there was a sudden loss of power during an erase operation, a
- * "device recovery" operation must be performed when power is restored
- * to ensure correct operation.  This routine performs the required steps
- * for the requested chip.
- *
- * See page 86 of the data sheet for details.
- *
- */
-static void deplete(struct mtd_info *mtd, int chip)
-{
-	struct nand_chip *this = mtd->priv;
-
-	/* wait until device is ready */
-	while (!this->dev_ready(mtd)) ;
-
-	this->select_chip(mtd, chip);
-
-	/* Send the commands for device recovery, phase 1 */
-	this->cmdfunc(mtd, NAND_CMD_DEPLETE1, 0x0000, 0x0000);
-	this->cmdfunc(mtd, NAND_CMD_DEPLETE2, -1, -1);
-
-	/* Send the commands for device recovery, phase 2 */
-	this->cmdfunc(mtd, NAND_CMD_DEPLETE1, 0x0000, 0x0004);
-	this->cmdfunc(mtd, NAND_CMD_DEPLETE2, -1, -1);
-
-}
-
-#ifdef RTC_FROM4_HWECC
-/*
- * rtc_from4_enable_hwecc - hardware specific hardware ECC enable function
- * @mtd:	MTD device structure
- * @mode:	I/O mode; read or write
- *
- * enable hardware ECC for data read or write
- *
- */
-static void rtc_from4_enable_hwecc(struct mtd_info *mtd, int mode)
-{
-	volatile unsigned short *rs_ecc_ctl = (volatile unsigned short *)(rtc_from4_fio_base + RTC_FROM4_RS_ECC_CTL);
-	unsigned short status;
-
-	switch (mode) {
-	case NAND_ECC_READ:
-		status = RTC_FROM4_RS_ECC_CTL_CLR | RTC_FROM4_RS_ECC_CTL_FD_E;
-
-		*rs_ecc_ctl = status;
-		break;
-
-	case NAND_ECC_READSYN:
-		status = 0x00;
-
-		*rs_ecc_ctl = status;
-		break;
-
-	case NAND_ECC_WRITE:
-		status = RTC_FROM4_RS_ECC_CTL_CLR | RTC_FROM4_RS_ECC_CTL_GEN | RTC_FROM4_RS_ECC_CTL_FD_E;
-
-		*rs_ecc_ctl = status;
-		break;
-
-	default:
-		BUG();
-		break;
-	}
-
-}
-
-/*
- * rtc_from4_calculate_ecc - hardware specific code to read ECC code
- * @mtd:	MTD device structure
- * @dat:	buffer containing the data to generate ECC codes
- * @ecc_code	ECC codes calculated
- *
- * The ECC code is calculated by the FPGA.  All we have to do is read the values
- * from the FPGA registers.
- *
- * Note: We read from the inverted registers, since data is inverted before
- * the code is calculated. So all 0xff data (blank page) results in all 0xff rs code
- *
- */
-static void rtc_from4_calculate_ecc(struct mtd_info *mtd, const u_char *dat, u_char *ecc_code)
-{
-	volatile unsigned short *rs_eccn = (volatile unsigned short *)(rtc_from4_fio_base + RTC_FROM4_RS_ECCN);
-	unsigned short value;
-	int i;
-
-	for (i = 0; i < 8; i++) {
-		value = *rs_eccn;
-		ecc_code[i] = (unsigned char)value;
-		rs_eccn++;
-	}
-	ecc_code[7] |= 0x0f;	/* set the last four bits (not used) */
-}
-
-/*
- * rtc_from4_correct_data - hardware specific code to correct data using ECC code
- * @mtd:	MTD device structure
- * @buf:	buffer containing the data to generate ECC codes
- * @ecc1	ECC codes read
- * @ecc2	ECC codes calculated
- *
- * The FPGA tells us fast, if there's an error or not. If no, we go back happy
- * else we read the ecc results from the fpga and call the rs library to decode
- * and hopefully correct the error.
- *
- */
-static int rtc_from4_correct_data(struct mtd_info *mtd, const u_char *buf, u_char *ecc1, u_char *ecc2)
-{
-	int i, j, res;
-	unsigned short status;
-	uint16_t par[6], syn[6];
-	uint8_t ecc[8];
-	volatile unsigned short *rs_ecc;
-
-	status = *((volatile unsigned short *)(rtc_from4_fio_base + RTC_FROM4_RS_ECC_CHK));
-
-	if (!(status & RTC_FROM4_RS_ECC_CHK_ERROR)) {
-		return 0;
-	}
-
-	/* Read the syndrome pattern from the FPGA and correct the bitorder */
-	rs_ecc = (volatile unsigned short *)(rtc_from4_fio_base + RTC_FROM4_RS_ECC);
-	for (i = 0; i < 8; i++) {
-		ecc[i] = bitrev8(*rs_ecc);
-		rs_ecc++;
-	}
-
-	/* convert into 6 10bit syndrome fields */
-	par[5] = rs_decoder->index_of[(((uint16_t) ecc[0] >> 0) & 0x0ff) | (((uint16_t) ecc[1] << 8) & 0x300)];
-	par[4] = rs_decoder->index_of[(((uint16_t) ecc[1] >> 2) & 0x03f) | (((uint16_t) ecc[2] << 6) & 0x3c0)];
-	par[3] = rs_decoder->index_of[(((uint16_t) ecc[2] >> 4) & 0x00f) | (((uint16_t) ecc[3] << 4) & 0x3f0)];
-	par[2] = rs_decoder->index_of[(((uint16_t) ecc[3] >> 6) & 0x003) | (((uint16_t) ecc[4] << 2) & 0x3fc)];
-	par[1] = rs_decoder->index_of[(((uint16_t) ecc[5] >> 0) & 0x0ff) | (((uint16_t) ecc[6] << 8) & 0x300)];
-	par[0] = (((uint16_t) ecc[6] >> 2) & 0x03f) | (((uint16_t) ecc[7] << 6) & 0x3c0);
-
-	/* Convert to computable syndrome */
-	for (i = 0; i < 6; i++) {
-		syn[i] = par[0];
-		for (j = 1; j < 6; j++)
-			if (par[j] != rs_decoder->nn)
-				syn[i] ^= rs_decoder->alpha_to[rs_modnn(rs_decoder, par[j] + i * j)];
-
-		/* Convert to index form */
-		syn[i] = rs_decoder->index_of[syn[i]];
-	}
-
-	/* Let the library code do its magic. */
-	res = decode_rs8(rs_decoder, (uint8_t *) buf, par, 512, syn, 0, NULL, 0xff, NULL);
-	if (res > 0) {
-		pr_debug("rtc_from4_correct_data: " "ECC corrected %d errors on read\n", res);
-	}
-	return res;
-}
-
-/**
- * rtc_from4_errstat - perform additional error status checks
- * @mtd:	MTD device structure
- * @this:	NAND chip structure
- * @state:	state or the operation
- * @status:	status code returned from read status
- * @page:	startpage inside the chip, must be called with (page & this->pagemask)
- *
- * Perform additional error status checks on erase and write failures
- * to determine if errors are correctable.  For this device, correctable
- * 1-bit errors on erase and write are considered acceptable.
- *
- * note: see pages 34..37 of data sheet for details.
- *
- */
-static int rtc_from4_errstat(struct mtd_info *mtd, struct nand_chip *this,
-			     int state, int status, int page)
-{
-	int er_stat = 0;
-	int rtn, retlen;
-	size_t len;
-	uint8_t *buf;
-	int i;
-
-	this->cmdfunc(mtd, NAND_CMD_STATUS_CLEAR, -1, -1);
-
-	if (state == FL_ERASING) {
-
-		for (i = 0; i < 4; i++) {
-			if (!(status & 1 << (i + 1)))
-				continue;
-			this->cmdfunc(mtd, (NAND_CMD_STATUS_ERROR + i + 1),
-				      -1, -1);
-			rtn = this->read_byte(mtd);
-			this->cmdfunc(mtd, NAND_CMD_STATUS_RESET, -1, -1);
-
-			/* err_ecc_not_avail */
-			if (!(rtn & ERR_STAT_ECC_AVAILABLE))
-				er_stat |= 1 << (i + 1);
-		}
-
-	} else if (state == FL_WRITING) {
-
-		unsigned long corrected = mtd->ecc_stats.corrected;
-
-		/* single bank write logic */
-		this->cmdfunc(mtd, NAND_CMD_STATUS_ERROR, -1, -1);
-		rtn = this->read_byte(mtd);
-		this->cmdfunc(mtd, NAND_CMD_STATUS_RESET, -1, -1);
-
-		if (!(rtn & ERR_STAT_ECC_AVAILABLE)) {
-			/* err_ecc_not_avail */
-			er_stat |= 1 << 1;
-			goto out;
-		}
-
-		len = mtd->writesize;
-		buf = kmalloc(len, GFP_KERNEL);
-		if (!buf) {
-			er_stat = 1;
-			goto out;
-		}
-
-		/* recovery read */
-		rtn = nand_do_read(mtd, page, len, &retlen, buf);
-
-		/* if read failed or > 1-bit error corrected */
-		if (rtn || (mtd->ecc_stats.corrected - corrected) > 1)
-			er_stat |= 1 << 1;
-		kfree(buf);
-	}
-out:
-	rtn = status;
-	if (er_stat == 0) {	/* if ECC is available   */
-		rtn = (status & ~NAND_STATUS_FAIL);	/*   clear the error bit */
-	}
-
-	return rtn;
-}
-#endif
-
-/*
- * Main initialization routine
- */
-static int __init rtc_from4_init(void)
-{
-	struct nand_chip *this;
-	unsigned short bcr1, bcr2, wcr2;
-	int i;
-	int ret;
-
-	/* Allocate memory for MTD device structure and private data */
-	rtc_from4_mtd = kmalloc(sizeof(struct mtd_info) + sizeof(struct nand_chip), GFP_KERNEL);
-	if (!rtc_from4_mtd) {
-		printk("Unable to allocate Renesas NAND MTD device structure.\n");
-		return -ENOMEM;
-	}
-
-	/* Get pointer to private data */
-	this = (struct nand_chip *)(&rtc_from4_mtd[1]);
-
-	/* Initialize structures */
-	memset(rtc_from4_mtd, 0, sizeof(struct mtd_info));
-	memset(this, 0, sizeof(struct nand_chip));
-
-	/* Link the private data with the MTD structure */
-	rtc_from4_mtd->priv = this;
-	rtc_from4_mtd->owner = THIS_MODULE;
-
-	/* set area 5 as PCMCIA mode to clear the spec of tDH(Data hold time;9ns min) */
-	bcr1 = *SH77X9_BCR1 & ~0x0002;
-	bcr1 |= 0x0002;
-	*SH77X9_BCR1 = bcr1;
-
-	/* set */
-	bcr2 = *SH77X9_BCR2 & ~0x0c00;
-	bcr2 |= 0x0800;
-	*SH77X9_BCR2 = bcr2;
-
-	/* set area 5 wait states */
-	wcr2 = *SH77X9_WCR2 & ~0x1c00;
-	wcr2 |= 0x1c00;
-	*SH77X9_WCR2 = wcr2;
-
-	/* Set address of NAND IO lines */
-	this->IO_ADDR_R = rtc_from4_fio_base;
-	this->IO_ADDR_W = rtc_from4_fio_base;
-	/* Set address of hardware control function */
-	this->cmd_ctrl = rtc_from4_hwcontrol;
-	/* Set address of chip select function */
-	this->select_chip = rtc_from4_nand_select_chip;
-	/* command delay time (in us) */
-	this->chip_delay = 100;
-	/* return the status of the Ready/Busy line */
-	this->dev_ready = rtc_from4_nand_device_ready;
-
-#ifdef RTC_FROM4_HWECC
-	printk(KERN_INFO "rtc_from4_init: using hardware ECC detection.\n");
-
-	this->ecc.mode = NAND_ECC_HW_SYNDROME;
-	this->ecc.size = 512;
-	this->ecc.bytes = 8;
-	this->ecc.strength = 3;
-	/* return the status of extra status and ECC checks */
-	this->errstat = rtc_from4_errstat;
-	/* set the nand_oobinfo to support FPGA H/W error detection */
-	this->ecc.layout = &rtc_from4_nand_oobinfo;
-	this->ecc.hwctl = rtc_from4_enable_hwecc;
-	this->ecc.calculate = rtc_from4_calculate_ecc;
-	this->ecc.correct = rtc_from4_correct_data;
-
-	/* We could create the decoder on demand, if memory is a concern.
-	 * This way we have it handy, if an error happens
-	 *
-	 * Symbolsize is 10 (bits)
-	 * Primitve polynomial is x^10+x^3+1
-	 * first consecutive root is 0
-	 * primitve element to generate roots = 1
-	 * generator polinomial degree = 6
-	 */
-	rs_decoder = init_rs(10, 0x409, 0, 1, 6);
-	if (!rs_decoder) {
-		printk(KERN_ERR "Could not create a RS decoder\n");
-		ret = -ENOMEM;
-		goto err_1;
-	}
-#else
-	printk(KERN_INFO "rtc_from4_init: using software ECC detection.\n");
-
-	this->ecc.mode = NAND_ECC_SOFT;
-#endif
-
-	/* set the bad block tables to support debugging */
-	this->bbt_td = &rtc_from4_bbt_main_descr;
-	this->bbt_md = &rtc_from4_bbt_mirror_descr;
-
-	/* Scan to find existence of the device */
-	if (nand_scan(rtc_from4_mtd, RTC_FROM4_MAX_CHIPS)) {
-		ret = -ENXIO;
-		goto err_2;
-	}
-
-	/* Perform 'device recovery' for each chip in case there was a power loss. */
-	for (i = 0; i < this->numchips; i++) {
-		deplete(rtc_from4_mtd, i);
-	}
-
-#if RTC_FROM4_NO_VIRTBLOCKS
-	/* use a smaller erase block to minimize wasted space when a block is bad */
-	/* note: this uses eight times as much RAM as using the default and makes */
-	/*       mounts take four times as long. */
-	rtc_from4_mtd->flags |= MTD_NO_VIRTBLOCKS;
-#endif
-
-	/* Register the partitions */
-	ret = mtd_device_register(rtc_from4_mtd, partition_info,
-				  NUM_PARTITIONS);
-	if (ret)
-		goto err_3;
-
-	/* Return happy */
-	return 0;
-err_3:
-	nand_release(rtc_from4_mtd);
-err_2:
-	free_rs(rs_decoder);
-err_1:
-	kfree(rtc_from4_mtd);
-	return ret;
-}
-
-module_init(rtc_from4_init);
-
-/*
- * Clean up routine
- */
-static void __exit rtc_from4_cleanup(void)
-{
-	/* Release resource, unregister partitions */
-	nand_release(rtc_from4_mtd);
-
-	/* Free the MTD device structure */
-	kfree(rtc_from4_mtd);
-
-#ifdef RTC_FROM4_HWECC
-	/* Free the reed solomon resources */
-	if (rs_decoder) {
-		free_rs(rs_decoder);
-	}
-#endif
-}
-
-module_exit(rtc_from4_cleanup);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("d.marlin <dmarlin@redhat.com");
-MODULE_DESCRIPTION("Board-specific glue layer for AG-AND flash on Renesas FROM_BOARD4");
diff --git a/drivers/mtd/nand/sh_flctl.c b/drivers/mtd/nand/sh_flctl.c
index 57b3971..e57e18e 100644
--- a/drivers/mtd/nand/sh_flctl.c
+++ b/drivers/mtd/nand/sh_flctl.c
@@ -1081,7 +1081,6 @@
 	return pdata;
 }
 #else /* CONFIG_OF */
-#define of_flctl_match NULL
 static struct sh_flctl_platform_data *flctl_parse_dt(struct device *dev)
 {
 	return NULL;
@@ -1219,22 +1218,11 @@
 	.driver = {
 		.name	= "sh_flctl",
 		.owner	= THIS_MODULE,
-		.of_match_table = of_flctl_match,
+		.of_match_table = of_match_ptr(of_flctl_match),
 	},
 };
 
-static int __init flctl_nand_init(void)
-{
-	return platform_driver_probe(&flctl_driver, flctl_probe);
-}
-
-static void __exit flctl_nand_cleanup(void)
-{
-	platform_driver_unregister(&flctl_driver);
-}
-
-module_init(flctl_nand_init);
-module_exit(flctl_nand_cleanup);
+module_platform_driver_probe(flctl_driver, flctl_probe);
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Yoshihiro Shimoda");
diff --git a/drivers/mtd/nand/sm_common.c b/drivers/mtd/nand/sm_common.c
index 082bcdc..e8181ed 100644
--- a/drivers/mtd/nand/sm_common.c
+++ b/drivers/mtd/nand/sm_common.c
@@ -9,6 +9,7 @@
 #include <linux/kernel.h>
 #include <linux/mtd/nand.h>
 #include <linux/module.h>
+#include <linux/sizes.h>
 #include "sm_common.h"
 
 static struct nand_ecclayout nand_oob_sm = {
@@ -67,44 +68,37 @@
 	return error;
 }
 
-
 static struct nand_flash_dev nand_smartmedia_flash_ids[] = {
-	{"SmartMedia 1MiB 5V",          0x6e, 256, 1, 0x1000, 0},
-	{"SmartMedia 1MiB 3,3V",        0xe8, 256, 1, 0x1000, 0},
-	{"SmartMedia 1MiB 3,3V",        0xec, 256, 1, 0x1000, 0},
-	{"SmartMedia 2MiB 3,3V",        0xea, 256, 2, 0x1000, 0},
-	{"SmartMedia 2MiB 5V",          0x64, 256, 2, 0x1000, 0},
-	{"SmartMedia 2MiB 3,3V ROM",    0x5d, 512, 2, 0x2000, NAND_ROM},
-	{"SmartMedia 4MiB 3,3V",        0xe3, 512, 4, 0x2000, 0},
-	{"SmartMedia 4MiB 3,3/5V",      0xe5, 512, 4, 0x2000, 0},
-	{"SmartMedia 4MiB 5V",          0x6b, 512, 4, 0x2000, 0},
-	{"SmartMedia 4MiB 3,3V ROM",    0xd5, 512, 4, 0x2000, NAND_ROM},
-	{"SmartMedia 8MiB 3,3V",        0xe6, 512, 8, 0x2000, 0},
-	{"SmartMedia 8MiB 3,3V ROM",    0xd6, 512, 8, 0x2000, NAND_ROM},
-	{"SmartMedia 16MiB 3,3V",       0x73, 512, 16, 0x4000, 0},
-	{"SmartMedia 16MiB 3,3V ROM",   0x57, 512, 16, 0x4000, NAND_ROM},
-	{"SmartMedia 32MiB 3,3V",       0x75, 512, 32, 0x4000, 0},
-	{"SmartMedia 32MiB 3,3V ROM",   0x58, 512, 32, 0x4000, NAND_ROM},
-	{"SmartMedia 64MiB 3,3V",       0x76, 512, 64, 0x4000, 0},
-	{"SmartMedia 64MiB 3,3V ROM",   0xd9, 512, 64, 0x4000, NAND_ROM},
-	{"SmartMedia 128MiB 3,3V",      0x79, 512, 128, 0x4000, 0},
-	{"SmartMedia 128MiB 3,3V ROM",  0xda, 512, 128, 0x4000, NAND_ROM},
-	{"SmartMedia 256MiB 3,3V",      0x71, 512, 256, 0x4000 },
-	{"SmartMedia 256MiB 3,3V ROM",  0x5b, 512, 256, 0x4000, NAND_ROM},
-	{NULL,}
+	LEGACY_ID_NAND("SmartMedia 2MiB 3,3V ROM",   0x5d, 2,   SZ_8K, NAND_ROM),
+	LEGACY_ID_NAND("SmartMedia 4MiB 3,3V",       0xe3, 4,   SZ_8K, 0),
+	LEGACY_ID_NAND("SmartMedia 4MiB 3,3/5V",     0xe5, 4,   SZ_8K, 0),
+	LEGACY_ID_NAND("SmartMedia 4MiB 5V",         0x6b, 4,   SZ_8K, 0),
+	LEGACY_ID_NAND("SmartMedia 4MiB 3,3V ROM",   0xd5, 4,   SZ_8K, NAND_ROM),
+	LEGACY_ID_NAND("SmartMedia 8MiB 3,3V",       0xe6, 8,   SZ_8K, 0),
+	LEGACY_ID_NAND("SmartMedia 8MiB 3,3V ROM",   0xd6, 8,   SZ_8K, NAND_ROM),
+	LEGACY_ID_NAND("SmartMedia 16MiB 3,3V",      0x73, 16,  SZ_16K, 0),
+	LEGACY_ID_NAND("SmartMedia 16MiB 3,3V ROM",  0x57, 16,  SZ_16K, NAND_ROM),
+	LEGACY_ID_NAND("SmartMedia 32MiB 3,3V",      0x75, 32,  SZ_16K, 0),
+	LEGACY_ID_NAND("SmartMedia 32MiB 3,3V ROM",  0x58, 32,  SZ_16K, NAND_ROM),
+	LEGACY_ID_NAND("SmartMedia 64MiB 3,3V",      0x76, 64,  SZ_16K, 0),
+	LEGACY_ID_NAND("SmartMedia 64MiB 3,3V ROM",  0xd9, 64,  SZ_16K, NAND_ROM),
+	LEGACY_ID_NAND("SmartMedia 128MiB 3,3V",     0x79, 128, SZ_16K, 0),
+	LEGACY_ID_NAND("SmartMedia 128MiB 3,3V ROM", 0xda, 128, SZ_16K, NAND_ROM),
+	LEGACY_ID_NAND("SmartMedia 256MiB 3, 3V",    0x71, 256, SZ_16K, 0),
+	LEGACY_ID_NAND("SmartMedia 256MiB 3,3V ROM", 0x5b, 256, SZ_16K, NAND_ROM),
+	{NULL}
 };
 
 static struct nand_flash_dev nand_xd_flash_ids[] = {
-
-	{"xD 16MiB 3,3V",    0x73, 512, 16, 0x4000, 0},
-	{"xD 32MiB 3,3V",    0x75, 512, 32, 0x4000, 0},
-	{"xD 64MiB 3,3V",    0x76, 512, 64, 0x4000, 0},
-	{"xD 128MiB 3,3V",   0x79, 512, 128, 0x4000, 0},
-	{"xD 256MiB 3,3V",   0x71, 512, 256, 0x4000, NAND_BROKEN_XD},
-	{"xD 512MiB 3,3V",   0xdc, 512, 512, 0x4000, NAND_BROKEN_XD},
-	{"xD 1GiB 3,3V",     0xd3, 512, 1024, 0x4000, NAND_BROKEN_XD},
-	{"xD 2GiB 3,3V",     0xd5, 512, 2048, 0x4000, NAND_BROKEN_XD},
-	{NULL,}
+	LEGACY_ID_NAND("xD 16MiB 3,3V",  0x73, 16,   SZ_16K, 0),
+	LEGACY_ID_NAND("xD 32MiB 3,3V",  0x75, 32,   SZ_16K, 0),
+	LEGACY_ID_NAND("xD 64MiB 3,3V",  0x76, 64,   SZ_16K, 0),
+	LEGACY_ID_NAND("xD 128MiB 3,3V", 0x79, 128,  SZ_16K, 0),
+	LEGACY_ID_NAND("xD 256MiB 3,3V", 0x71, 256,  SZ_16K, NAND_BROKEN_XD),
+	LEGACY_ID_NAND("xD 512MiB 3,3V", 0xdc, 512,  SZ_16K, NAND_BROKEN_XD),
+	LEGACY_ID_NAND("xD 1GiB 3,3V",   0xd3, 1024, SZ_16K, NAND_BROKEN_XD),
+	LEGACY_ID_NAND("xD 2GiB 3,3V",   0xd5, 2048, SZ_16K, NAND_BROKEN_XD),
+	{NULL}
 };
 
 int sm_register_device(struct mtd_info *mtd, int smartmedia)
diff --git a/drivers/mtd/nand/txx9ndfmc.c b/drivers/mtd/nand/txx9ndfmc.c
index e1e8748..7ed654c 100644
--- a/drivers/mtd/nand/txx9ndfmc.c
+++ b/drivers/mtd/nand/txx9ndfmc.c
@@ -427,18 +427,7 @@
 	},
 };
 
-static int __init txx9ndfmc_init(void)
-{
-	return platform_driver_probe(&txx9ndfmc_driver, txx9ndfmc_probe);
-}
-
-static void __exit txx9ndfmc_exit(void)
-{
-	platform_driver_unregister(&txx9ndfmc_driver);
-}
-
-module_init(txx9ndfmc_init);
-module_exit(txx9ndfmc_exit);
+module_platform_driver_probe(txx9ndfmc_driver, txx9ndfmc_probe);
 
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("TXx9 SoC NAND flash controller driver");
diff --git a/drivers/mtd/ofpart.c b/drivers/mtd/ofpart.c
index 30bd907..553d6d6 100644
--- a/drivers/mtd/ofpart.c
+++ b/drivers/mtd/ofpart.c
@@ -55,6 +55,7 @@
 	while ((pp = of_get_next_child(node, pp))) {
 		const __be32 *reg;
 		int len;
+		int a_cells, s_cells;
 
 		reg = of_get_property(pp, "reg", &len);
 		if (!reg) {
@@ -62,8 +63,10 @@
 			continue;
 		}
 
-		(*pparts)[i].offset = be32_to_cpu(reg[0]);
-		(*pparts)[i].size = be32_to_cpu(reg[1]);
+		a_cells = of_n_addr_cells(pp);
+		s_cells = of_n_size_cells(pp);
+		(*pparts)[i].offset = of_read_number(reg, a_cells);
+		(*pparts)[i].size = of_read_number(reg + a_cells, s_cells);
 
 		partname = of_get_property(pp, "label", &len);
 		if (!partname)
diff --git a/drivers/mtd/onenand/Kconfig b/drivers/mtd/onenand/Kconfig
index 91467bb..ab26072 100644
--- a/drivers/mtd/onenand/Kconfig
+++ b/drivers/mtd/onenand/Kconfig
@@ -40,7 +40,6 @@
 
 config MTD_ONENAND_OTP
 	bool "OneNAND OTP Support"
-	select HAVE_MTD_OTP
 	help
 	  One Block of the NAND Flash Array memory is reserved as
 	  a One-Time Programmable Block memory area.
@@ -68,10 +67,4 @@
 
 	  And more recent chips
 
-config MTD_ONENAND_SIM
-	tristate "OneNAND simulator support"
-	help
-	  The simulator may simulate various OneNAND flash chips for the
-	  OneNAND MTD layer.
-
 endif # MTD_ONENAND
diff --git a/drivers/mtd/onenand/Makefile b/drivers/mtd/onenand/Makefile
index 2b7884c..9d6540e 100644
--- a/drivers/mtd/onenand/Makefile
+++ b/drivers/mtd/onenand/Makefile
@@ -10,7 +10,4 @@
 obj-$(CONFIG_MTD_ONENAND_OMAP2)		+= omap2.o
 obj-$(CONFIG_MTD_ONENAND_SAMSUNG)       += samsung.o
 
-# Simulator
-obj-$(CONFIG_MTD_ONENAND_SIM)		+= onenand_sim.o
-
 onenand-objs = onenand_base.o onenand_bbt.o
diff --git a/drivers/mtd/onenand/omap2.c b/drivers/mtd/onenand/omap2.c
index eec2aed..d98b198 100644
--- a/drivers/mtd/onenand/omap2.c
+++ b/drivers/mtd/onenand/omap2.c
@@ -832,19 +832,7 @@
 	},
 };
 
-static int __init omap2_onenand_init(void)
-{
-	printk(KERN_INFO "OneNAND driver initializing\n");
-	return platform_driver_register(&omap2_onenand_driver);
-}
-
-static void __exit omap2_onenand_exit(void)
-{
-	platform_driver_unregister(&omap2_onenand_driver);
-}
-
-module_init(omap2_onenand_init);
-module_exit(omap2_onenand_exit);
+module_platform_driver(omap2_onenand_driver);
 
 MODULE_ALIAS("platform:" DRIVER_NAME);
 MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/onenand/onenand_sim.c b/drivers/mtd/onenand/onenand_sim.c
deleted file mode 100644
index 85399e3..0000000
--- a/drivers/mtd/onenand/onenand_sim.c
+++ /dev/null
@@ -1,564 +0,0 @@
-/*
- *  linux/drivers/mtd/onenand/onenand_sim.c
- *
- *  The OneNAND simulator
- *
- *  Copyright © 2005-2007 Samsung Electronics
- *  Kyungmin Park <kyungmin.park@samsung.com>
- *
- *  Vishak G <vishak.g at samsung.com>, Rohit Hagargundgi <h.rohit at samsung.com>
- *  Flex-OneNAND simulator support
- *  Copyright (C) Samsung Electronics, 2008
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/vmalloc.h>
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/partitions.h>
-#include <linux/mtd/onenand.h>
-
-#include <linux/io.h>
-
-#ifndef CONFIG_ONENAND_SIM_MANUFACTURER
-#define CONFIG_ONENAND_SIM_MANUFACTURER         0xec
-#endif
-
-#ifndef CONFIG_ONENAND_SIM_DEVICE_ID
-#define CONFIG_ONENAND_SIM_DEVICE_ID            0x04
-#endif
-
-#define CONFIG_FLEXONENAND ((CONFIG_ONENAND_SIM_DEVICE_ID >> 9) & 1)
-
-#ifndef CONFIG_ONENAND_SIM_VERSION_ID
-#define CONFIG_ONENAND_SIM_VERSION_ID           0x1e
-#endif
-
-#ifndef CONFIG_ONENAND_SIM_TECHNOLOGY_ID
-#define CONFIG_ONENAND_SIM_TECHNOLOGY_ID CONFIG_FLEXONENAND
-#endif
-
-/* Initial boundary values for Flex-OneNAND Simulator */
-#ifndef CONFIG_FLEXONENAND_SIM_DIE0_BOUNDARY
-#define CONFIG_FLEXONENAND_SIM_DIE0_BOUNDARY	0x01
-#endif
-
-#ifndef CONFIG_FLEXONENAND_SIM_DIE1_BOUNDARY
-#define CONFIG_FLEXONENAND_SIM_DIE1_BOUNDARY	0x01
-#endif
-
-static int manuf_id	= CONFIG_ONENAND_SIM_MANUFACTURER;
-static int device_id	= CONFIG_ONENAND_SIM_DEVICE_ID;
-static int version_id	= CONFIG_ONENAND_SIM_VERSION_ID;
-static int technology_id = CONFIG_ONENAND_SIM_TECHNOLOGY_ID;
-static int boundary[] = {
-	CONFIG_FLEXONENAND_SIM_DIE0_BOUNDARY,
-	CONFIG_FLEXONENAND_SIM_DIE1_BOUNDARY,
-};
-
-struct onenand_flash {
-	void __iomem *base;
-	void __iomem *data;
-};
-
-#define ONENAND_CORE(flash)		(flash->data)
-#define ONENAND_CORE_SPARE(flash, this, offset)				\
-	((flash->data) + (this->chipsize) + (offset >> 5))
-
-#define ONENAND_MAIN_AREA(this, offset)					\
-	(this->base + ONENAND_DATARAM + offset)
-
-#define ONENAND_SPARE_AREA(this, offset)				\
-	(this->base + ONENAND_SPARERAM + offset)
-
-#define ONENAND_GET_WP_STATUS(this)					\
-	(readw(this->base + ONENAND_REG_WP_STATUS))
-
-#define ONENAND_SET_WP_STATUS(v, this)					\
-	(writew(v, this->base + ONENAND_REG_WP_STATUS))
-
-/* It has all 0xff chars */
-#define MAX_ONENAND_PAGESIZE		(4096 + 128)
-static unsigned char *ffchars;
-
-#if CONFIG_FLEXONENAND
-#define PARTITION_NAME "Flex-OneNAND simulator partition"
-#else
-#define PARTITION_NAME "OneNAND simulator partition"
-#endif
-
-static struct mtd_partition os_partitions[] = {
-	{
-		.name		= PARTITION_NAME,
-		.offset		= 0,
-		.size		= MTDPART_SIZ_FULL,
-	},
-};
-
-/*
- * OneNAND simulator mtd
- */
-struct onenand_info {
-	struct mtd_info		mtd;
-	struct mtd_partition	*parts;
-	struct onenand_chip	onenand;
-	struct onenand_flash	flash;
-};
-
-static struct onenand_info *info;
-
-#define DPRINTK(format, args...)					\
-do {									\
-	printk(KERN_DEBUG "%s[%d]: " format "\n", __func__,		\
-			   __LINE__, ##args);				\
-} while (0)
-
-/**
- * onenand_lock_handle - Handle Lock scheme
- * @this:		OneNAND device structure
- * @cmd:		The command to be sent
- *
- * Send lock command to OneNAND device.
- * The lock scheme depends on chip type.
- */
-static void onenand_lock_handle(struct onenand_chip *this, int cmd)
-{
-	int block_lock_scheme;
-	int status;
-
-	status = ONENAND_GET_WP_STATUS(this);
-	block_lock_scheme = !(this->options & ONENAND_HAS_CONT_LOCK);
-
-	switch (cmd) {
-	case ONENAND_CMD_UNLOCK:
-	case ONENAND_CMD_UNLOCK_ALL:
-		if (block_lock_scheme)
-			ONENAND_SET_WP_STATUS(ONENAND_WP_US, this);
-		else
-			ONENAND_SET_WP_STATUS(status | ONENAND_WP_US, this);
-		break;
-
-	case ONENAND_CMD_LOCK:
-		if (block_lock_scheme)
-			ONENAND_SET_WP_STATUS(ONENAND_WP_LS, this);
-		else
-			ONENAND_SET_WP_STATUS(status | ONENAND_WP_LS, this);
-		break;
-
-	case ONENAND_CMD_LOCK_TIGHT:
-		if (block_lock_scheme)
-			ONENAND_SET_WP_STATUS(ONENAND_WP_LTS, this);
-		else
-			ONENAND_SET_WP_STATUS(status | ONENAND_WP_LTS, this);
-		break;
-
-	default:
-		break;
-	}
-}
-
-/**
- * onenand_bootram_handle - Handle BootRAM area
- * @this:		OneNAND device structure
- * @cmd:		The command to be sent
- *
- * Emulate BootRAM area. It is possible to do basic operation using BootRAM.
- */
-static void onenand_bootram_handle(struct onenand_chip *this, int cmd)
-{
-	switch (cmd) {
-	case ONENAND_CMD_READID:
-		writew(manuf_id, this->base);
-		writew(device_id, this->base + 2);
-		writew(version_id, this->base + 4);
-		break;
-
-	default:
-		/* REVIST: Handle other commands */
-		break;
-	}
-}
-
-/**
- * onenand_update_interrupt - Set interrupt register
- * @this:         OneNAND device structure
- * @cmd:          The command to be sent
- *
- * Update interrupt register. The status depends on command.
- */
-static void onenand_update_interrupt(struct onenand_chip *this, int cmd)
-{
-	int interrupt = ONENAND_INT_MASTER;
-
-	switch (cmd) {
-	case ONENAND_CMD_READ:
-	case ONENAND_CMD_READOOB:
-		interrupt |= ONENAND_INT_READ;
-		break;
-
-	case ONENAND_CMD_PROG:
-	case ONENAND_CMD_PROGOOB:
-		interrupt |= ONENAND_INT_WRITE;
-		break;
-
-	case ONENAND_CMD_ERASE:
-		interrupt |= ONENAND_INT_ERASE;
-		break;
-
-	case ONENAND_CMD_RESET:
-		interrupt |= ONENAND_INT_RESET;
-		break;
-
-	default:
-		break;
-	}
-
-	writew(interrupt, this->base + ONENAND_REG_INTERRUPT);
-}
-
-/**
- * onenand_check_overwrite - Check if over-write happened
- * @dest:		The destination pointer
- * @src:		The source pointer
- * @count:		The length to be check
- *
- * Returns:		0 on same, otherwise 1
- *
- * Compare the source with destination
- */
-static int onenand_check_overwrite(void *dest, void *src, size_t count)
-{
-	unsigned int *s = (unsigned int *) src;
-	unsigned int *d = (unsigned int *) dest;
-	int i;
-
-	count >>= 2;
-	for (i = 0; i < count; i++)
-		if ((*s++ ^ *d++) != 0)
-			return 1;
-
-	return 0;
-}
-
-/**
- * onenand_data_handle - Handle OneNAND Core and DataRAM
- * @this:		OneNAND device structure
- * @cmd:		The command to be sent
- * @dataram:		Which dataram used
- * @offset:		The offset to OneNAND Core
- *
- * Copy data from OneNAND Core to DataRAM (read)
- * Copy data from DataRAM to OneNAND Core (write)
- * Erase the OneNAND Core (erase)
- */
-static void onenand_data_handle(struct onenand_chip *this, int cmd,
-				int dataram, unsigned int offset)
-{
-	struct mtd_info *mtd = &info->mtd;
-	struct onenand_flash *flash = this->priv;
-	int main_offset, spare_offset, die = 0;
-	void __iomem *src;
-	void __iomem *dest;
-	unsigned int i;
-	static int pi_operation;
-	int erasesize, rgn;
-
-	if (dataram) {
-		main_offset = mtd->writesize;
-		spare_offset = mtd->oobsize;
-	} else {
-		main_offset = 0;
-		spare_offset = 0;
-	}
-
-	if (pi_operation) {
-		die = readw(this->base + ONENAND_REG_START_ADDRESS2);
-		die >>= ONENAND_DDP_SHIFT;
-	}
-
-	switch (cmd) {
-	case FLEXONENAND_CMD_PI_ACCESS:
-		pi_operation = 1;
-		break;
-
-	case ONENAND_CMD_RESET:
-		pi_operation = 0;
-		break;
-
-	case ONENAND_CMD_READ:
-		src = ONENAND_CORE(flash) + offset;
-		dest = ONENAND_MAIN_AREA(this, main_offset);
-		if (pi_operation) {
-			writew(boundary[die], this->base + ONENAND_DATARAM);
-			break;
-		}
-		memcpy(dest, src, mtd->writesize);
-		/* Fall through */
-
-	case ONENAND_CMD_READOOB:
-		src = ONENAND_CORE_SPARE(flash, this, offset);
-		dest = ONENAND_SPARE_AREA(this, spare_offset);
-		memcpy(dest, src, mtd->oobsize);
-		break;
-
-	case ONENAND_CMD_PROG:
-		src = ONENAND_MAIN_AREA(this, main_offset);
-		dest = ONENAND_CORE(flash) + offset;
-		if (pi_operation) {
-			boundary[die] = readw(this->base + ONENAND_DATARAM);
-			break;
-		}
-		/* To handle partial write */
-		for (i = 0; i < (1 << mtd->subpage_sft); i++) {
-			int off = i * this->subpagesize;
-			if (!memcmp(src + off, ffchars, this->subpagesize))
-				continue;
-			if (memcmp(dest + off, ffchars, this->subpagesize) &&
-			    onenand_check_overwrite(dest + off, src + off, this->subpagesize))
-				printk(KERN_ERR "over-write happened at 0x%08x\n", offset);
-			memcpy(dest + off, src + off, this->subpagesize);
-		}
-		/* Fall through */
-
-	case ONENAND_CMD_PROGOOB:
-		src = ONENAND_SPARE_AREA(this, spare_offset);
-		/* Check all data is 0xff chars */
-		if (!memcmp(src, ffchars, mtd->oobsize))
-			break;
-
-		dest = ONENAND_CORE_SPARE(flash, this, offset);
-		if (memcmp(dest, ffchars, mtd->oobsize) &&
-		    onenand_check_overwrite(dest, src, mtd->oobsize))
-			printk(KERN_ERR "OOB: over-write happened at 0x%08x\n",
-			       offset);
-		memcpy(dest, src, mtd->oobsize);
-		break;
-
-	case ONENAND_CMD_ERASE:
-		if (pi_operation)
-			break;
-
-		if (FLEXONENAND(this)) {
-			rgn = flexonenand_region(mtd, offset);
-			erasesize = mtd->eraseregions[rgn].erasesize;
-		} else
-			erasesize = mtd->erasesize;
-
-		memset(ONENAND_CORE(flash) + offset, 0xff, erasesize);
-		memset(ONENAND_CORE_SPARE(flash, this, offset), 0xff,
-		       (erasesize >> 5));
-		break;
-
-	default:
-		break;
-	}
-}
-
-/**
- * onenand_command_handle - Handle command
- * @this:		OneNAND device structure
- * @cmd:		The command to be sent
- *
- * Emulate OneNAND command.
- */
-static void onenand_command_handle(struct onenand_chip *this, int cmd)
-{
-	unsigned long offset = 0;
-	int block = -1, page = -1, bufferram = -1;
-	int dataram = 0;
-
-	switch (cmd) {
-	case ONENAND_CMD_UNLOCK:
-	case ONENAND_CMD_LOCK:
-	case ONENAND_CMD_LOCK_TIGHT:
-	case ONENAND_CMD_UNLOCK_ALL:
-		onenand_lock_handle(this, cmd);
-		break;
-
-	case ONENAND_CMD_BUFFERRAM:
-		/* Do nothing */
-		return;
-
-	default:
-		block = (int) readw(this->base + ONENAND_REG_START_ADDRESS1);
-		if (block & (1 << ONENAND_DDP_SHIFT)) {
-			block &= ~(1 << ONENAND_DDP_SHIFT);
-			/* The half of chip block */
-			block += this->chipsize >> (this->erase_shift + 1);
-		}
-		if (cmd == ONENAND_CMD_ERASE)
-			break;
-
-		page = (int) readw(this->base + ONENAND_REG_START_ADDRESS8);
-		page = (page >> ONENAND_FPA_SHIFT);
-		bufferram = (int) readw(this->base + ONENAND_REG_START_BUFFER);
-		bufferram >>= ONENAND_BSA_SHIFT;
-		bufferram &= ONENAND_BSA_DATARAM1;
-		dataram = (bufferram == ONENAND_BSA_DATARAM1) ? 1 : 0;
-		break;
-	}
-
-	if (block != -1)
-		offset = onenand_addr(this, block);
-
-	if (page != -1)
-		offset += page << this->page_shift;
-
-	onenand_data_handle(this, cmd, dataram, offset);
-
-	onenand_update_interrupt(this, cmd);
-}
-
-/**
- * onenand_writew - [OneNAND Interface] Emulate write operation
- * @value:		value to write
- * @addr:		address to write
- *
- * Write OneNAND register with value
- */
-static void onenand_writew(unsigned short value, void __iomem * addr)
-{
-	struct onenand_chip *this = info->mtd.priv;
-
-	/* BootRAM handling */
-	if (addr < this->base + ONENAND_DATARAM) {
-		onenand_bootram_handle(this, value);
-		return;
-	}
-	/* Command handling */
-	if (addr == this->base + ONENAND_REG_COMMAND)
-		onenand_command_handle(this, value);
-
-	writew(value, addr);
-}
-
-/**
- * flash_init - Initialize OneNAND simulator
- * @flash:		OneNAND simulator data strucutres
- *
- * Initialize OneNAND simulator.
- */
-static int __init flash_init(struct onenand_flash *flash)
-{
-	int density, size;
-	int buffer_size;
-
-	flash->base = kzalloc(131072, GFP_KERNEL);
-	if (!flash->base) {
-		printk(KERN_ERR "Unable to allocate base address.\n");
-		return -ENOMEM;
-	}
-
-	density = device_id >> ONENAND_DEVICE_DENSITY_SHIFT;
-	density &= ONENAND_DEVICE_DENSITY_MASK;
-	size = ((16 << 20) << density);
-
-	ONENAND_CORE(flash) = vmalloc(size + (size >> 5));
-	if (!ONENAND_CORE(flash)) {
-		printk(KERN_ERR "Unable to allocate nand core address.\n");
-		kfree(flash->base);
-		return -ENOMEM;
-	}
-
-	memset(ONENAND_CORE(flash), 0xff, size + (size >> 5));
-
-	/* Setup registers */
-	writew(manuf_id, flash->base + ONENAND_REG_MANUFACTURER_ID);
-	writew(device_id, flash->base + ONENAND_REG_DEVICE_ID);
-	writew(version_id, flash->base + ONENAND_REG_VERSION_ID);
-	writew(technology_id, flash->base + ONENAND_REG_TECHNOLOGY);
-
-	if (density < 2 && (!CONFIG_FLEXONENAND))
-		buffer_size = 0x0400;	/* 1KiB page */
-	else
-		buffer_size = 0x0800;	/* 2KiB page */
-	writew(buffer_size, flash->base + ONENAND_REG_DATA_BUFFER_SIZE);
-
-	return 0;
-}
-
-/**
- * flash_exit - Clean up OneNAND simulator
- * @flash:		OneNAND simulator data structures
- *
- * Clean up OneNAND simulator.
- */
-static void flash_exit(struct onenand_flash *flash)
-{
-	vfree(ONENAND_CORE(flash));
-	kfree(flash->base);
-}
-
-static int __init onenand_sim_init(void)
-{
-	/* Allocate all 0xff chars pointer */
-	ffchars = kmalloc(MAX_ONENAND_PAGESIZE, GFP_KERNEL);
-	if (!ffchars) {
-		printk(KERN_ERR "Unable to allocate ff chars.\n");
-		return -ENOMEM;
-	}
-	memset(ffchars, 0xff, MAX_ONENAND_PAGESIZE);
-
-	/* Allocate OneNAND simulator mtd pointer */
-	info = kzalloc(sizeof(struct onenand_info), GFP_KERNEL);
-	if (!info) {
-		printk(KERN_ERR "Unable to allocate core structures.\n");
-		kfree(ffchars);
-		return -ENOMEM;
-	}
-
-	/* Override write_word function */
-	info->onenand.write_word = onenand_writew;
-
-	if (flash_init(&info->flash)) {
-		printk(KERN_ERR "Unable to allocate flash.\n");
-		kfree(ffchars);
-		kfree(info);
-		return -ENOMEM;
-	}
-
-	info->parts = os_partitions;
-
-	info->onenand.base = info->flash.base;
-	info->onenand.priv = &info->flash;
-
-	info->mtd.name = "OneNAND simulator";
-	info->mtd.priv = &info->onenand;
-	info->mtd.owner = THIS_MODULE;
-
-	if (onenand_scan(&info->mtd, 1)) {
-		flash_exit(&info->flash);
-		kfree(ffchars);
-		kfree(info);
-		return -ENXIO;
-	}
-
-	mtd_device_register(&info->mtd, info->parts,
-			    ARRAY_SIZE(os_partitions));
-
-	return 0;
-}
-
-static void __exit onenand_sim_exit(void)
-{
-	struct onenand_chip *this = info->mtd.priv;
-	struct onenand_flash *flash = this->priv;
-
-	onenand_release(&info->mtd);
-	flash_exit(flash);
-	kfree(ffchars);
-	kfree(info);
-}
-
-module_init(onenand_sim_init);
-module_exit(onenand_sim_exit);
-
-MODULE_AUTHOR("Kyungmin Park <kyungmin.park@samsung.com>");
-MODULE_DESCRIPTION("The OneNAND flash simulator");
-MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/sm_ftl.c b/drivers/mtd/sm_ftl.c
index 8dd6ba5..f9d5615 100644
--- a/drivers/mtd/sm_ftl.c
+++ b/drivers/mtd/sm_ftl.c
@@ -1107,7 +1107,7 @@
 }
 
 /* outside interface: device is released */
-static int sm_release(struct mtd_blktrans_dev *dev)
+static void sm_release(struct mtd_blktrans_dev *dev)
 {
 	struct sm_ftl *ftl = dev->priv;
 
@@ -1116,7 +1116,6 @@
 	cancel_work_sync(&ftl->flush_work);
 	sm_cache_flush(ftl);
 	mutex_unlock(&ftl->mutex);
-	return 0;
 }
 
 /* outside interface: get geometry */
diff --git a/drivers/net/ethernet/adi/bfin_mac.c b/drivers/net/ethernet/adi/bfin_mac.c
index ee70577..dada66b 100644
--- a/drivers/net/ethernet/adi/bfin_mac.c
+++ b/drivers/net/ethernet/adi/bfin_mac.c
@@ -1700,7 +1700,8 @@
 	}
 
 	bfin_mac_hwtstamp_init(ndev);
-	if (bfin_phc_init(ndev, &pdev->dev)) {
+	rc = bfin_phc_init(ndev, &pdev->dev);
+	if (rc) {
 		dev_err(&pdev->dev, "Cannot register PHC device!\n");
 		goto out_err_phc;
 	}
diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c
index 40649a8..6b0dc13 100644
--- a/drivers/net/ethernet/broadcom/cnic.c
+++ b/drivers/net/ethernet/broadcom/cnic.c
@@ -4085,7 +4085,7 @@
 	if (!cp->csk_tbl)
 		return -ENOMEM;
 
-	port_id = random32();
+	port_id = prandom_u32();
 	port_id %= CNIC_LOCAL_PORT_RANGE;
 	if (cnic_init_id_tbl(&cp->csk_port_tbl, CNIC_LOCAL_PORT_RANGE,
 			     CNIC_LOCAL_PORT_MIN, port_id)) {
@@ -4145,7 +4145,7 @@
 {
 	u32 seed;
 
-	seed = random32();
+	seed = prandom_u32();
 	cnic_ctx_wr(dev, 45, 0, seed);
 	return 0;
 }
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index e1e5bb9..fd7b547 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -2640,9 +2640,8 @@
 	req = get_mac_list_cmd.va;
 
 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
-				OPCODE_COMMON_GET_MAC_LIST, sizeof(*req),
-				wrb, &get_mac_list_cmd);
-
+			       OPCODE_COMMON_GET_MAC_LIST,
+			       get_mac_list_cmd.size, wrb, &get_mac_list_cmd);
 	req->hdr.domain = domain;
 	req->mac_type = MAC_ADDRESS_TYPE_NETWORK;
 	req->perm_override = 1;
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 6c52a60..a444110 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -1827,7 +1827,7 @@
 			mdelay(1);
 		} else {
 			be_rx_compl_discard(rxo, rxcp);
-			be_cq_notify(adapter, rx_cq->id, true, 1);
+			be_cq_notify(adapter, rx_cq->id, false, 1);
 			if (rxcp->num_rcvd == 0)
 				break;
 		}
@@ -2533,11 +2533,6 @@
 		q = &rxo->q;
 		if (q->created) {
 			be_cmd_rxq_destroy(adapter, q);
-			/* After the rxq is invalidated, wait for a grace time
-			 * of 1ms for all dma to end and the flush compl to
-			 * arrive
-			 */
-			mdelay(1);
 			be_rx_cq_clean(rxo);
 		}
 		be_queue_free(adapter, q);
@@ -2564,6 +2559,7 @@
 	 * all tx skbs are freed.
 	 */
 	be_tx_compl_clean(adapter);
+	netif_tx_disable(netdev);
 
 	be_rx_qs_destroy(adapter);
 
@@ -2672,6 +2668,7 @@
 	if (!status)
 		be_link_status_update(adapter, link_status);
 
+	netif_tx_start_all_queues(netdev);
 	be_roce_dev_open(adapter);
 	return 0;
 err:
@@ -2783,6 +2780,8 @@
 		goto done;
 	}
 
+	pci_disable_sriov(adapter->pdev);
+
 	for_all_vfs(adapter, vf_cfg, vf) {
 		if (lancer_chip(adapter))
 			be_cmd_set_mac_list(adapter, NULL, 0, vf + 1);
@@ -2792,7 +2791,6 @@
 
 		be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
 	}
-	pci_disable_sriov(adapter->pdev);
 done:
 	kfree(adapter->vf_cfg);
 	adapter->num_vfs = 0;
@@ -2889,13 +2887,8 @@
 			dev_info(dev, "Device supports %d VFs and not %d\n",
 				 adapter->dev_num_vfs, num_vfs);
 		adapter->num_vfs = min_t(u16, num_vfs, adapter->dev_num_vfs);
-
-		status = pci_enable_sriov(adapter->pdev, num_vfs);
-		if (status) {
-			dev_err(dev, "SRIOV enable failed\n");
-			adapter->num_vfs = 0;
+		if (!adapter->num_vfs)
 			return 0;
-		}
 	}
 
 	status = be_vf_setup_init(adapter);
@@ -2944,6 +2937,15 @@
 
 		be_cmd_enable_vf(adapter, vf + 1);
 	}
+
+	if (!old_vfs) {
+		status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
+		if (status) {
+			dev_err(dev, "SRIOV enable failed\n");
+			adapter->num_vfs = 0;
+			goto err;
+		}
+	}
 	return 0;
 err:
 	dev_err(dev, "VF setup failed\n");
@@ -3198,7 +3200,7 @@
 		be_cmd_set_flow_control(adapter, adapter->tx_fc,
 					adapter->rx_fc);
 
-	if (be_physfn(adapter) && num_vfs) {
+	if (be_physfn(adapter)) {
 		if (adapter->dev_num_vfs)
 			be_vf_setup(adapter);
 		else
diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h
index ceb4d43..9ce5b71 100644
--- a/drivers/net/ethernet/freescale/fec.h
+++ b/drivers/net/ethernet/freescale/fec.h
@@ -198,6 +198,11 @@
 #define FLAG_RX_CSUM_ENABLED	(BD_ENET_RX_ICE | BD_ENET_RX_PCR)
 #define FLAG_RX_CSUM_ERROR	(BD_ENET_RX_ICE | BD_ENET_RX_PCR)
 
+struct fec_enet_delayed_work {
+	struct delayed_work delay_work;
+	bool timeout;
+};
+
 /* The FEC buffer descriptors track the ring buffers.  The rx_bd_base and
  * tx_bd_base always point to the base of the buffer descriptors.  The
  * cur_rx and cur_tx point to the currently available buffer.
@@ -232,9 +237,6 @@
 	/* The ring entries to be free()ed */
 	struct bufdesc	*dirty_tx;
 
-	/* hold while accessing the HW like ringbuffer for tx/rx but not MAC */
-	spinlock_t hw_lock;
-
 	struct	platform_device *pdev;
 
 	int	opened;
@@ -269,7 +271,7 @@
 	int hwts_rx_en;
 	int hwts_tx_en;
 	struct timer_list time_keep;
-
+	struct fec_enet_delayed_work delay_work;
 };
 
 void fec_ptp_init(struct net_device *ndev, struct platform_device *pdev);
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index e25bf83..aff0310 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -445,6 +445,13 @@
 	u32 rcntl = OPT_FRAME_SIZE | 0x04;
 	u32 ecntl = 0x2; /* ETHEREN */
 
+	if (netif_running(ndev)) {
+		netif_device_detach(ndev);
+		napi_disable(&fep->napi);
+		netif_stop_queue(ndev);
+		netif_tx_lock(ndev);
+	}
+
 	/* Whack a reset.  We should wait for this. */
 	writel(1, fep->hwp + FEC_ECNTRL);
 	udelay(10);
@@ -605,6 +612,13 @@
 
 	/* Enable interrupts we wish to service */
 	writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
+
+	if (netif_running(ndev)) {
+		netif_device_attach(ndev);
+		napi_enable(&fep->napi);
+		netif_wake_queue(ndev);
+		netif_tx_unlock(ndev);
+	}
 }
 
 static void
@@ -644,8 +658,22 @@
 
 	ndev->stats.tx_errors++;
 
-	fec_restart(ndev, fep->full_duplex);
-	netif_wake_queue(ndev);
+	fep->delay_work.timeout = true;
+	schedule_delayed_work(&(fep->delay_work.delay_work), 0);
+}
+
+static void fec_enet_work(struct work_struct *work)
+{
+	struct fec_enet_private *fep =
+		container_of(work,
+			     struct fec_enet_private,
+			     delay_work.delay_work.work);
+
+	if (fep->delay_work.timeout) {
+		fep->delay_work.timeout = false;
+		fec_restart(fep->netdev, fep->full_duplex);
+		netif_wake_queue(fep->netdev);
+	}
 }
 
 static void
@@ -1024,16 +1052,12 @@
 {
 	struct fec_enet_private *fep = netdev_priv(ndev);
 	struct phy_device *phy_dev = fep->phy_dev;
-	unsigned long flags;
-
 	int status_change = 0;
 
-	spin_lock_irqsave(&fep->hw_lock, flags);
-
 	/* Prevent a state halted on mii error */
 	if (fep->mii_timeout && phy_dev->state == PHY_HALTED) {
 		phy_dev->state = PHY_RESUMING;
-		goto spin_unlock;
+		return;
 	}
 
 	if (phy_dev->link) {
@@ -1061,9 +1085,6 @@
 		}
 	}
 
-spin_unlock:
-	spin_unlock_irqrestore(&fep->hw_lock, flags);
-
 	if (status_change)
 		phy_print_status(phy_dev);
 }
@@ -1732,7 +1753,6 @@
 		return -ENOMEM;
 
 	memset(cbd_base, 0, PAGE_SIZE);
-	spin_lock_init(&fep->hw_lock);
 
 	fep->netdev = ndev;
 
@@ -1952,6 +1972,7 @@
 	if (fep->bufdesc_ex && fep->ptp_clock)
 		netdev_info(ndev, "registered PHC device %d\n", fep->dev_id);
 
+	INIT_DELAYED_WORK(&(fep->delay_work.delay_work), fec_enet_work);
 	return 0;
 
 failed_register:
@@ -1984,6 +2005,7 @@
 	struct fec_enet_private *fep = netdev_priv(ndev);
 	int i;
 
+	cancel_delayed_work_sync(&(fep->delay_work.delay_work));
 	unregister_netdev(ndev);
 	fec_enet_mii_remove(fep);
 	del_timer_sync(&fep->time_keep);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index bcf4d11..c9e6b62 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -889,7 +889,7 @@
 		.queue_mode = MLX4_NET_TRANS_Q_FIFO,
 		.exclusive = 0,
 		.allow_loopback = 1,
-		.promisc_mode = MLX4_FS_PROMISC_NONE,
+		.promisc_mode = MLX4_FS_REGULAR,
 	};
 
 	rule.port = priv->port;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index a69a908..b35f947 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -127,7 +127,7 @@
 		.queue_mode = MLX4_NET_TRANS_Q_LIFO,
 		.exclusive = 1,
 		.allow_loopback = 1,
-		.promisc_mode = MLX4_FS_PROMISC_NONE,
+		.promisc_mode = MLX4_FS_REGULAR,
 		.port = priv->port,
 		.priority = MLX4_DOMAIN_RFS,
 	};
@@ -448,7 +448,7 @@
 			.queue_mode = MLX4_NET_TRANS_Q_FIFO,
 			.exclusive = 0,
 			.allow_loopback = 1,
-			.promisc_mode = MLX4_FS_PROMISC_NONE,
+			.promisc_mode = MLX4_FS_REGULAR,
 			.priority = MLX4_DOMAIN_NIC,
 		};
 
@@ -795,7 +795,7 @@
 			err = mlx4_flow_steer_promisc_add(mdev->dev,
 							  priv->port,
 							  priv->base_qpn,
-							  MLX4_FS_PROMISC_UPLINK);
+							  MLX4_FS_ALL_DEFAULT);
 			if (err)
 				en_err(priv, "Failed enabling promiscuous mode\n");
 			priv->flags |= MLX4_EN_FLAG_MC_PROMISC;
@@ -858,7 +858,7 @@
 	case MLX4_STEERING_MODE_DEVICE_MANAGED:
 		err = mlx4_flow_steer_promisc_remove(mdev->dev,
 						     priv->port,
-						     MLX4_FS_PROMISC_UPLINK);
+						     MLX4_FS_ALL_DEFAULT);
 		if (err)
 			en_err(priv, "Failed disabling promiscuous mode\n");
 		priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
@@ -919,7 +919,7 @@
 				err = mlx4_flow_steer_promisc_add(mdev->dev,
 								  priv->port,
 								  priv->base_qpn,
-								  MLX4_FS_PROMISC_ALL_MULTI);
+								  MLX4_FS_MC_DEFAULT);
 				break;
 
 			case MLX4_STEERING_MODE_B0:
@@ -942,7 +942,7 @@
 			case MLX4_STEERING_MODE_DEVICE_MANAGED:
 				err = mlx4_flow_steer_promisc_remove(mdev->dev,
 								     priv->port,
-								     MLX4_FS_PROMISC_ALL_MULTI);
+								     MLX4_FS_MC_DEFAULT);
 				break;
 
 			case MLX4_STEERING_MODE_B0:
@@ -1621,10 +1621,10 @@
 				 MLX4_EN_FLAG_MC_PROMISC);
 		mlx4_flow_steer_promisc_remove(mdev->dev,
 					       priv->port,
-					       MLX4_FS_PROMISC_UPLINK);
+					       MLX4_FS_ALL_DEFAULT);
 		mlx4_flow_steer_promisc_remove(mdev->dev,
 					       priv->port,
-					       MLX4_FS_PROMISC_ALL_MULTI);
+					       MLX4_FS_MC_DEFAULT);
 	} else if (priv->flags & MLX4_EN_FLAG_PROMISC) {
 		priv->flags &= ~MLX4_EN_FLAG_PROMISC;
 
diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c
index 8e3123a..6000342 100644
--- a/drivers/net/ethernet/mellanox/mlx4/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/eq.c
@@ -497,8 +497,8 @@
 			break;
 
 		case MLX4_EVENT_TYPE_SRQ_LIMIT:
-			mlx4_warn(dev, "%s: MLX4_EVENT_TYPE_SRQ_LIMIT\n",
-				  __func__);
+			mlx4_dbg(dev, "%s: MLX4_EVENT_TYPE_SRQ_LIMIT\n",
+				 __func__);
 		case MLX4_EVENT_TYPE_SRQ_CATAS_ERROR:
 			if (mlx4_is_master(dev)) {
 				/* forward only to slave owning the SRQ */
diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c
index ffc78d2c..f3e804f 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mcg.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c
@@ -645,25 +645,37 @@
 	return err;
 }
 
+static const u8 __promisc_mode[] = {
+	[MLX4_FS_REGULAR]   = 0x0,
+	[MLX4_FS_ALL_DEFAULT] = 0x1,
+	[MLX4_FS_MC_DEFAULT] = 0x3,
+	[MLX4_FS_UC_SNIFFER] = 0x4,
+	[MLX4_FS_MC_SNIFFER] = 0x5,
+};
+
+int mlx4_map_sw_to_hw_steering_mode(struct mlx4_dev *dev,
+				    enum mlx4_net_trans_promisc_mode flow_type)
+{
+	if (flow_type >= MLX4_FS_MODE_NUM || flow_type < 0) {
+		mlx4_err(dev, "Invalid flow type. type = %d\n", flow_type);
+		return -EINVAL;
+	}
+	return __promisc_mode[flow_type];
+}
+EXPORT_SYMBOL_GPL(mlx4_map_sw_to_hw_steering_mode);
+
 static void trans_rule_ctrl_to_hw(struct mlx4_net_trans_rule *ctrl,
 				  struct mlx4_net_trans_rule_hw_ctrl *hw)
 {
-	static const u8 __promisc_mode[] = {
-		[MLX4_FS_PROMISC_NONE]   = 0x0,
-		[MLX4_FS_PROMISC_UPLINK] = 0x1,
-		[MLX4_FS_PROMISC_FUNCTION_PORT] = 0x2,
-		[MLX4_FS_PROMISC_ALL_MULTI] = 0x3,
-	};
+	u8 flags = 0;
 
-	u32 dw = 0;
+	flags = ctrl->queue_mode == MLX4_NET_TRANS_Q_LIFO ? 1 : 0;
+	flags |= ctrl->exclusive ? (1 << 2) : 0;
+	flags |= ctrl->allow_loopback ? (1 << 3) : 0;
 
-	dw = ctrl->queue_mode == MLX4_NET_TRANS_Q_LIFO ? 1 : 0;
-	dw |= ctrl->exclusive ? (1 << 2) : 0;
-	dw |= ctrl->allow_loopback ? (1 << 3) : 0;
-	dw |= __promisc_mode[ctrl->promisc_mode] << 8;
-	dw |= ctrl->priority << 16;
-
-	hw->ctrl = cpu_to_be32(dw);
+	hw->flags = flags;
+	hw->type = __promisc_mode[ctrl->promisc_mode];
+	hw->prio = cpu_to_be16(ctrl->priority);
 	hw->port = ctrl->port;
 	hw->qpn = cpu_to_be32(ctrl->qpn);
 }
@@ -677,29 +689,51 @@
 	[MLX4_NET_TRANS_RULE_ID_UDP]     = 0xE006
 };
 
+int mlx4_map_sw_to_hw_steering_id(struct mlx4_dev *dev,
+				  enum mlx4_net_trans_rule_id id)
+{
+	if (id >= MLX4_NET_TRANS_RULE_NUM || id < 0) {
+		mlx4_err(dev, "Invalid network rule id. id = %d\n", id);
+		return -EINVAL;
+	}
+	return __sw_id_hw[id];
+}
+EXPORT_SYMBOL_GPL(mlx4_map_sw_to_hw_steering_id);
+
+static const int __rule_hw_sz[] = {
+	[MLX4_NET_TRANS_RULE_ID_ETH] =
+		sizeof(struct mlx4_net_trans_rule_hw_eth),
+	[MLX4_NET_TRANS_RULE_ID_IB] =
+		sizeof(struct mlx4_net_trans_rule_hw_ib),
+	[MLX4_NET_TRANS_RULE_ID_IPV6] = 0,
+	[MLX4_NET_TRANS_RULE_ID_IPV4] =
+		sizeof(struct mlx4_net_trans_rule_hw_ipv4),
+	[MLX4_NET_TRANS_RULE_ID_TCP] =
+		sizeof(struct mlx4_net_trans_rule_hw_tcp_udp),
+	[MLX4_NET_TRANS_RULE_ID_UDP] =
+		sizeof(struct mlx4_net_trans_rule_hw_tcp_udp)
+};
+
+int mlx4_hw_rule_sz(struct mlx4_dev *dev,
+	       enum mlx4_net_trans_rule_id id)
+{
+	if (id >= MLX4_NET_TRANS_RULE_NUM || id < 0) {
+		mlx4_err(dev, "Invalid network rule id. id = %d\n", id);
+		return -EINVAL;
+	}
+
+	return __rule_hw_sz[id];
+}
+EXPORT_SYMBOL_GPL(mlx4_hw_rule_sz);
+
 static int parse_trans_rule(struct mlx4_dev *dev, struct mlx4_spec_list *spec,
 			    struct _rule_hw *rule_hw)
 {
-	static const size_t __rule_hw_sz[] = {
-		[MLX4_NET_TRANS_RULE_ID_ETH] =
-			sizeof(struct mlx4_net_trans_rule_hw_eth),
-		[MLX4_NET_TRANS_RULE_ID_IB] =
-			sizeof(struct mlx4_net_trans_rule_hw_ib),
-		[MLX4_NET_TRANS_RULE_ID_IPV6] = 0,
-		[MLX4_NET_TRANS_RULE_ID_IPV4] =
-			sizeof(struct mlx4_net_trans_rule_hw_ipv4),
-		[MLX4_NET_TRANS_RULE_ID_TCP] =
-			sizeof(struct mlx4_net_trans_rule_hw_tcp_udp),
-		[MLX4_NET_TRANS_RULE_ID_UDP] =
-			sizeof(struct mlx4_net_trans_rule_hw_tcp_udp)
-	};
-	if (spec->id >= MLX4_NET_TRANS_RULE_NUM) {
-		mlx4_err(dev, "Invalid network rule id. id = %d\n", spec->id);
+	if (mlx4_hw_rule_sz(dev, spec->id) < 0)
 		return -EINVAL;
-	}
-	memset(rule_hw, 0, __rule_hw_sz[spec->id]);
+	memset(rule_hw, 0, mlx4_hw_rule_sz(dev, spec->id));
 	rule_hw->id = cpu_to_be16(__sw_id_hw[spec->id]);
-	rule_hw->size = __rule_hw_sz[spec->id] >> 2;
+	rule_hw->size = mlx4_hw_rule_sz(dev, spec->id) >> 2;
 
 	switch (spec->id) {
 	case MLX4_NET_TRANS_RULE_ID_ETH:
@@ -713,12 +747,12 @@
 			rule_hw->eth.ether_type_enable = 1;
 			rule_hw->eth.ether_type = spec->eth.ether_type;
 		}
-		rule_hw->eth.vlan_id = spec->eth.vlan_id;
-		rule_hw->eth.vlan_id_msk = spec->eth.vlan_id_msk;
+		rule_hw->eth.vlan_tag = spec->eth.vlan_id;
+		rule_hw->eth.vlan_tag_msk = spec->eth.vlan_id_msk;
 		break;
 
 	case MLX4_NET_TRANS_RULE_ID_IB:
-		rule_hw->ib.qpn = spec->ib.r_qpn;
+		rule_hw->ib.l3_qpn = spec->ib.l3_qpn;
 		rule_hw->ib.qpn_mask = spec->ib.qpn_msk;
 		memcpy(&rule_hw->ib.dst_gid, &spec->ib.dst_gid, 16);
 		memcpy(&rule_hw->ib.dst_gid_msk, &spec->ib.dst_gid_msk, 16);
@@ -1136,7 +1170,7 @@
 		struct mlx4_net_trans_rule rule = {
 			.queue_mode = MLX4_NET_TRANS_Q_FIFO,
 			.exclusive = 0,
-			.promisc_mode = MLX4_FS_PROMISC_NONE,
+			.promisc_mode = MLX4_FS_REGULAR,
 			.priority = MLX4_DOMAIN_NIC,
 		};
 
@@ -1229,11 +1263,10 @@
 	u64 *regid_p;
 
 	switch (mode) {
-	case MLX4_FS_PROMISC_UPLINK:
-	case MLX4_FS_PROMISC_FUNCTION_PORT:
+	case MLX4_FS_ALL_DEFAULT:
 		regid_p = &dev->regid_promisc_array[port];
 		break;
-	case MLX4_FS_PROMISC_ALL_MULTI:
+	case MLX4_FS_MC_DEFAULT:
 		regid_p = &dev->regid_allmulti_array[port];
 		break;
 	default:
@@ -1260,11 +1293,10 @@
 	u64 *regid_p;
 
 	switch (mode) {
-	case MLX4_FS_PROMISC_UPLINK:
-	case MLX4_FS_PROMISC_FUNCTION_PORT:
+	case MLX4_FS_ALL_DEFAULT:
 		regid_p = &dev->regid_promisc_array[port];
 		break;
-	case MLX4_FS_PROMISC_ALL_MULTI:
+	case MLX4_FS_MC_DEFAULT:
 		regid_p = &dev->regid_allmulti_array[port];
 		break;
 	default:
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index eac3dae..df15bb6 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -730,85 +730,6 @@
 	struct list_head steer_entries[MLX4_NUM_STEERS];
 };
 
-struct mlx4_net_trans_rule_hw_ctrl {
-	__be32 ctrl;
-	u8 rsvd1;
-	u8 funcid;
-	u8 vep;
-	u8 port;
-	__be32 qpn;
-	__be32 rsvd2;
-};
-
-struct mlx4_net_trans_rule_hw_ib {
-	u8 size;
-	u8 rsvd1;
-	__be16 id;
-	u32 rsvd2;
-	__be32 qpn;
-	__be32 qpn_mask;
-	u8 dst_gid[16];
-	u8 dst_gid_msk[16];
-} __packed;
-
-struct mlx4_net_trans_rule_hw_eth {
-	u8	size;
-	u8	rsvd;
-	__be16	id;
-	u8	rsvd1[6];
-	u8	dst_mac[6];
-	u16	rsvd2;
-	u8	dst_mac_msk[6];
-	u16	rsvd3;
-	u8	src_mac[6];
-	u16	rsvd4;
-	u8	src_mac_msk[6];
-	u8      rsvd5;
-	u8      ether_type_enable;
-	__be16  ether_type;
-	__be16  vlan_id_msk;
-	__be16  vlan_id;
-} __packed;
-
-struct mlx4_net_trans_rule_hw_tcp_udp {
-	u8	size;
-	u8	rsvd;
-	__be16	id;
-	__be16	rsvd1[3];
-	__be16	dst_port;
-	__be16	rsvd2;
-	__be16	dst_port_msk;
-	__be16	rsvd3;
-	__be16	src_port;
-	__be16	rsvd4;
-	__be16	src_port_msk;
-} __packed;
-
-struct mlx4_net_trans_rule_hw_ipv4 {
-	u8	size;
-	u8	rsvd;
-	__be16	id;
-	__be32	rsvd1;
-	__be32	dst_ip;
-	__be32	dst_ip_msk;
-	__be32	src_ip;
-	__be32	src_ip_msk;
-} __packed;
-
-struct _rule_hw {
-	union {
-		struct {
-			u8 size;
-			u8 rsvd;
-			__be16 id;
-		};
-		struct mlx4_net_trans_rule_hw_eth eth;
-		struct mlx4_net_trans_rule_hw_ib ib;
-		struct mlx4_net_trans_rule_hw_ipv4 ipv4;
-		struct mlx4_net_trans_rule_hw_tcp_udp tcp_udp;
-	};
-};
-
 enum {
 	MLX4_PCI_DEV_IS_VF		= 1 << 0,
 	MLX4_PCI_DEV_FORCE_SENSE_PORT	= 1 << 1,
diff --git a/drivers/net/ethernet/mellanox/mlx4/srq.c b/drivers/net/ethernet/mellanox/mlx4/srq.c
index e329fe1..79fd269 100644
--- a/drivers/net/ethernet/mellanox/mlx4/srq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/srq.c
@@ -298,3 +298,18 @@
 		return;
 	mlx4_bitmap_cleanup(&mlx4_priv(dev)->srq_table.bitmap);
 }
+
+struct mlx4_srq *mlx4_srq_lookup(struct mlx4_dev *dev, u32 srqn)
+{
+	struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table;
+	struct mlx4_srq *srq;
+	unsigned long flags;
+
+	spin_lock_irqsave(&srq_table->lock, flags);
+	srq = radix_tree_lookup(&srq_table->tree,
+				srqn & (dev->caps.num_srqs - 1));
+	spin_unlock_irqrestore(&srq_table->lock, flags);
+
+	return srq;
+}
+EXPORT_SYMBOL_GPL(mlx4_srq_lookup);
diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c
index 07f6baa..9a95abf 100644
--- a/drivers/net/ethernet/sfc/ptp.c
+++ b/drivers/net/ethernet/sfc/ptp.c
@@ -912,8 +912,10 @@
 
 	ptp->phc_clock = ptp_clock_register(&ptp->phc_clock_info,
 					    &efx->pci_dev->dev);
-	if (!ptp->phc_clock)
+	if (IS_ERR(ptp->phc_clock)) {
+		rc = PTR_ERR(ptp->phc_clock);
 		goto fail3;
+	}
 
 	INIT_WORK(&ptp->pps_work, efx_ptp_pps_worker);
 	ptp->pps_workwq = create_singlethread_workqueue("sfc_pps");
diff --git a/drivers/net/ethernet/tile/tilegx.c b/drivers/net/ethernet/tile/tilegx.c
index 66e025a..f3c2d03 100644
--- a/drivers/net/ethernet/tile/tilegx.c
+++ b/drivers/net/ethernet/tile/tilegx.c
@@ -930,7 +930,7 @@
 		if (info->has_iqueue) {
 			gxio_mpipe_request_notif_ring_interrupt(
 				&context, cpu_x(cpu), cpu_y(cpu),
-				1, ingress_irq, info->iqueue.ring);
+				KERNEL_PL, ingress_irq, info->iqueue.ring);
 		}
 	}
 
diff --git a/drivers/net/ethernet/toshiba/spider_net.c b/drivers/net/ethernet/toshiba/spider_net.c
index c655fe6..5734480c1 100644
--- a/drivers/net/ethernet/toshiba/spider_net.c
+++ b/drivers/net/ethernet/toshiba/spider_net.c
@@ -1990,7 +1990,8 @@
 		goto alloc_rx_failed;
 
 	/* Allocate rx skbs */
-	if (spider_net_alloc_rx_skbs(card))
+	result = spider_net_alloc_rx_skbs(card);
+	if (result)
 		goto alloc_skbs_failed;
 
 	spider_net_set_multi(netdev);
diff --git a/drivers/net/hamradio/baycom_epp.c b/drivers/net/hamradio/baycom_epp.c
index 49b8b58..484f77e 100644
--- a/drivers/net/hamradio/baycom_epp.c
+++ b/drivers/net/hamradio/baycom_epp.c
@@ -449,7 +449,7 @@
 			if ((--bc->hdlctx.slotcnt) > 0)
 				return 0;
 			bc->hdlctx.slotcnt = bc->ch_params.slottime;
-			if ((random32() % 256) > bc->ch_params.ppersist)
+			if ((prandom_u32() % 256) > bc->ch_params.ppersist)
 				return 0;
 		}
 	}
diff --git a/drivers/net/hamradio/hdlcdrv.c b/drivers/net/hamradio/hdlcdrv.c
index a4a3516..3169252 100644
--- a/drivers/net/hamradio/hdlcdrv.c
+++ b/drivers/net/hamradio/hdlcdrv.c
@@ -389,7 +389,7 @@
 	if ((--s->hdlctx.slotcnt) > 0)
 		return;
 	s->hdlctx.slotcnt = s->ch_params.slottime;
-	if ((random32() % 256) > s->ch_params.ppersist)
+	if ((prandom_u32() % 256) > s->ch_params.ppersist)
 		return;
 	start_tx(dev, s);
 }
diff --git a/drivers/net/hamradio/yam.c b/drivers/net/hamradio/yam.c
index b2d863f..0721e72 100644
--- a/drivers/net/hamradio/yam.c
+++ b/drivers/net/hamradio/yam.c
@@ -638,7 +638,7 @@
 	yp->slotcnt = yp->slot / 10;
 
 	/* is random > persist ? */
-	if ((random32() % 256) > yp->pers)
+	if ((prandom_u32() % 256) > yp->pers)
 		return;
 
 	yam_start_tx(dev, yp);
diff --git a/drivers/net/irda/bfin_sir.c b/drivers/net/irda/bfin_sir.c
index a06fca6..22b4527 100644
--- a/drivers/net/irda/bfin_sir.c
+++ b/drivers/net/irda/bfin_sir.c
@@ -609,7 +609,7 @@
 {
 	struct bfin_sir_self *self = netdev_priv(dev);
 	struct bfin_sir_port *port = self->sir_port;
-	int err = -ENOMEM;
+	int err;
 
 	self->newspeed = 0;
 	self->speed = 9600;
@@ -623,8 +623,10 @@
 	bfin_sir_set_speed(port, 9600);
 
 	self->irlap = irlap_open(dev, &self->qos, DRIVER_NAME);
-	if (!self->irlap)
+	if (!self->irlap) {
+		err = -ENOMEM;
 		goto err_irlap;
+	}
 
 	INIT_WORK(&self->work, bfin_sir_send_work);
 
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index 4503452..1e11f2b 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -126,7 +126,7 @@
 
 config MDIO_GPIO
 	tristate "Support for GPIO lib-based bitbanged MDIO buses"
-	depends on MDIO_BITBANG && GENERIC_GPIO
+	depends on MDIO_BITBANG && GPIOLIB
 	---help---
 	  Supports GPIO lib-based MDIO busses.
 
diff --git a/drivers/net/team/team_mode_random.c b/drivers/net/team/team_mode_random.c
index 9eabfaa..5ca14d4 100644
--- a/drivers/net/team/team_mode_random.c
+++ b/drivers/net/team/team_mode_random.c
@@ -18,7 +18,7 @@
 
 static u32 random_N(unsigned int N)
 {
-	return reciprocal_divide(random32(), N);
+	return reciprocal_divide(prandom_u32(), N);
 }
 
 static bool rnd_transmit(struct team *team, struct sk_buff *skb)
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index 24fbec2..078795f 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -613,6 +613,13 @@
 	.driver_info = 0,
 },
 
+/* Dell Wireless 5804 (Novatel E371) - handled by qmi_wwan */
+{
+	USB_DEVICE_AND_INTERFACE_INFO(DELL_VENDOR_ID, 0x819b, USB_CLASS_COMM,
+			USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
+	.driver_info = 0,
+},
+
 /* AnyDATA ADU960S - handled by qmi_wwan */
 {
 	USB_DEVICE_AND_INTERFACE_INFO(0x16d5, 0x650a, USB_CLASS_COMM,
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 834e405..cf887c2 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -501,6 +501,13 @@
 					      USB_CDC_PROTO_NONE),
 		.driver_info        = (unsigned long)&qmi_wwan_info,
 	},
+	{	/* Dell Wireless 5804 (Novatel E371) */
+		USB_DEVICE_AND_INTERFACE_INFO(0x413C, 0x819b,
+					      USB_CLASS_COMM,
+					      USB_CDC_SUBCLASS_ETHERNET,
+					      USB_CDC_PROTO_NONE),
+		.driver_info        = (unsigned long)&qmi_wwan_info,
+	},
 	{	/* ADU960S */
 		USB_DEVICE_AND_INTERFACE_INFO(0x16d5, 0x650a,
 					      USB_CLASS_COMM,
diff --git a/drivers/net/usb/sierra_net.c b/drivers/net/usb/sierra_net.c
index a923d61..a79e9d3 100644
--- a/drivers/net/usb/sierra_net.c
+++ b/drivers/net/usb/sierra_net.c
@@ -426,6 +426,13 @@
 
 	dev_dbg(&dev->udev->dev, "%s", __func__);
 
+	/* The SIERRA_NET_HIP_MSYNC_ID command appears to request that the
+	 * firmware restart itself.  After restarting, the modem will respond
+	 * with the SIERRA_NET_HIP_RESTART_ID indication.  The driver continues
+	 * sending MSYNC commands every few seconds until it receives the
+	 * RESTART event from the firmware
+	 */
+
 	/* tell modem we are ready */
 	status = sierra_net_send_sync(dev);
 	if (status < 0)
@@ -704,6 +711,9 @@
 	/* set context index initially to 0 - prepares tx hdr template */
 	sierra_net_set_ctx_index(priv, 0);
 
+	/* prepare sync message template */
+	memcpy(priv->sync_msg, sync_tmplate, sizeof(priv->sync_msg));
+
 	/* decrease the rx_urb_size and max_tx_size to 4k on USB 1.1 */
 	dev->rx_urb_size  = SIERRA_NET_RX_URB_SIZE;
 	if (dev->udev->speed != USB_SPEED_HIGH)
@@ -739,11 +749,6 @@
 		kfree(priv);
 		return -ENODEV;
 	}
-	/* prepare sync message from template */
-	memcpy(priv->sync_msg, sync_tmplate, sizeof(priv->sync_msg));
-
-	/* initiate the sync sequence */
-	sierra_net_dosync(dev);
 
 	return 0;
 }
@@ -766,8 +771,9 @@
 		netdev_err(dev->net,
 			"usb_control_msg failed, status %d\n", status);
 
-	sierra_net_set_private(dev, NULL);
+	usbnet_status_stop(dev);
 
+	sierra_net_set_private(dev, NULL);
 	kfree(priv);
 }
 
@@ -908,6 +914,24 @@
 	.tx_fixup = sierra_net_tx_fixup,
 };
 
+static int
+sierra_net_probe(struct usb_interface *udev, const struct usb_device_id *prod)
+{
+	int ret;
+
+	ret = usbnet_probe(udev, prod);
+	if (ret == 0) {
+		struct usbnet *dev = usb_get_intfdata(udev);
+
+		ret = usbnet_status_start(dev, GFP_KERNEL);
+		if (ret == 0) {
+			/* Interrupt URB now set up; initiate sync sequence */
+			sierra_net_dosync(dev);
+		}
+	}
+	return ret;
+}
+
 #define DIRECT_IP_DEVICE(vend, prod) \
 	{USB_DEVICE_INTERFACE_NUMBER(vend, prod, 7), \
 	.driver_info = (unsigned long)&sierra_net_info_direct_ip}, \
@@ -930,7 +954,7 @@
 static struct usb_driver sierra_net_driver = {
 	.name = "sierra_net",
 	.id_table = products,
-	.probe = usbnet_probe,
+	.probe = sierra_net_probe,
 	.disconnect = usbnet_disconnect,
 	.suspend = usbnet_suspend,
 	.resume = usbnet_resume,
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index 1e5a9b7..f95cb03 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -252,6 +252,70 @@
 	return 0;
 }
 
+/* Submit the interrupt URB if not previously submitted, increasing refcount */
+int usbnet_status_start(struct usbnet *dev, gfp_t mem_flags)
+{
+	int ret = 0;
+
+	WARN_ON_ONCE(dev->interrupt == NULL);
+	if (dev->interrupt) {
+		mutex_lock(&dev->interrupt_mutex);
+
+		if (++dev->interrupt_count == 1)
+			ret = usb_submit_urb(dev->interrupt, mem_flags);
+
+		dev_dbg(&dev->udev->dev, "incremented interrupt URB count to %d\n",
+			dev->interrupt_count);
+		mutex_unlock(&dev->interrupt_mutex);
+	}
+	return ret;
+}
+EXPORT_SYMBOL_GPL(usbnet_status_start);
+
+/* For resume; submit interrupt URB if previously submitted */
+static int __usbnet_status_start_force(struct usbnet *dev, gfp_t mem_flags)
+{
+	int ret = 0;
+
+	mutex_lock(&dev->interrupt_mutex);
+	if (dev->interrupt_count) {
+		ret = usb_submit_urb(dev->interrupt, mem_flags);
+		dev_dbg(&dev->udev->dev,
+			"submitted interrupt URB for resume\n");
+	}
+	mutex_unlock(&dev->interrupt_mutex);
+	return ret;
+}
+
+/* Kill the interrupt URB if all submitters want it killed */
+void usbnet_status_stop(struct usbnet *dev)
+{
+	if (dev->interrupt) {
+		mutex_lock(&dev->interrupt_mutex);
+		WARN_ON(dev->interrupt_count == 0);
+
+		if (dev->interrupt_count && --dev->interrupt_count == 0)
+			usb_kill_urb(dev->interrupt);
+
+		dev_dbg(&dev->udev->dev,
+			"decremented interrupt URB count to %d\n",
+			dev->interrupt_count);
+		mutex_unlock(&dev->interrupt_mutex);
+	}
+}
+EXPORT_SYMBOL_GPL(usbnet_status_stop);
+
+/* For suspend; always kill interrupt URB */
+static void __usbnet_status_stop_force(struct usbnet *dev)
+{
+	if (dev->interrupt) {
+		mutex_lock(&dev->interrupt_mutex);
+		usb_kill_urb(dev->interrupt);
+		dev_dbg(&dev->udev->dev, "killed interrupt URB for suspend\n");
+		mutex_unlock(&dev->interrupt_mutex);
+	}
+}
+
 /* Passes this packet up the stack, updating its accounting.
  * Some link protocols batch packets, so their rx_fixup paths
  * can return clones as well as just modify the original skb.
@@ -725,7 +789,7 @@
 	if (!(info->flags & FLAG_AVOID_UNLINK_URBS))
 		usbnet_terminate_urbs(dev);
 
-	usb_kill_urb(dev->interrupt);
+	usbnet_status_stop(dev);
 
 	usbnet_purge_paused_rxq(dev);
 
@@ -787,7 +851,7 @@
 
 	/* start any status interrupt transfer */
 	if (dev->interrupt) {
-		retval = usb_submit_urb (dev->interrupt, GFP_KERNEL);
+		retval = usbnet_status_start(dev, GFP_KERNEL);
 		if (retval < 0) {
 			netif_err(dev, ifup, dev->net,
 				  "intr submit %d\n", retval);
@@ -1458,6 +1522,8 @@
 	dev->delay.data = (unsigned long) dev;
 	init_timer (&dev->delay);
 	mutex_init (&dev->phy_mutex);
+	mutex_init(&dev->interrupt_mutex);
+	dev->interrupt_count = 0;
 
 	dev->net = net;
 	strcpy (net->name, "usb%d");
@@ -1593,7 +1659,7 @@
 		 */
 		netif_device_detach (dev->net);
 		usbnet_terminate_urbs(dev);
-		usb_kill_urb(dev->interrupt);
+		__usbnet_status_stop_force(dev);
 
 		/*
 		 * reattach so runtime management can use and
@@ -1613,9 +1679,8 @@
 	int                     retval;
 
 	if (!--dev->suspend_count) {
-		/* resume interrupt URBs */
-		if (dev->interrupt && test_bit(EVENT_DEV_OPEN, &dev->flags))
-			usb_submit_urb(dev->interrupt, GFP_NOIO);
+		/* resume interrupt URB if it was previously submitted */
+		__usbnet_status_start_force(dev, GFP_NOIO);
 
 		spin_lock_irq(&dev->txq.lock);
 		while ((res = usb_get_from_anchor(&dev->deferred))) {
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/p2p.c b/drivers/net/wireless/brcm80211/brcmfmac/p2p.c
index 2b90da0..e7a1a47 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/p2p.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/p2p.c
@@ -1117,7 +1117,7 @@
 	if (afx_hdl->is_listen && afx_hdl->my_listen_chan)
 		/* 100ms ~ 300ms */
 		err = brcmf_p2p_discover_listen(p2p, afx_hdl->my_listen_chan,
-						100 * (1 + (random32() % 3)));
+						100 * (1 + prandom_u32() % 3));
 	else
 		err = brcmf_p2p_act_frm_search(p2p, afx_hdl->peer_listen_chan);
 
diff --git a/drivers/net/wireless/mwifiex/cfg80211.c b/drivers/net/wireless/mwifiex/cfg80211.c
index a0cb077..d3c8ece 100644
--- a/drivers/net/wireless/mwifiex/cfg80211.c
+++ b/drivers/net/wireless/mwifiex/cfg80211.c
@@ -216,7 +216,7 @@
 	mwifiex_form_mgmt_frame(skb, buf, len);
 	mwifiex_queue_tx_pkt(priv, skb);
 
-	*cookie = random32() | 1;
+	*cookie = prandom_u32() | 1;
 	cfg80211_mgmt_tx_status(wdev, *cookie, buf, len, true, GFP_ATOMIC);
 
 	wiphy_dbg(wiphy, "info: management frame transmitted\n");
@@ -271,7 +271,7 @@
 					 duration);
 
 	if (!ret) {
-		*cookie = random32() | 1;
+		*cookie = prandom_u32() | 1;
 		priv->roc_cfg.cookie = *cookie;
 		priv->roc_cfg.chan = *chan;
 
diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c
index 23049ae..d5a57a9 100644
--- a/drivers/of/of_mdio.c
+++ b/drivers/of/of_mdio.c
@@ -84,13 +84,10 @@
 		phy = get_phy_device(mdio, addr, is_c45);
 
 		if (!phy || IS_ERR(phy)) {
-			phy = phy_device_create(mdio, addr, 0, false, NULL);
-			if (!phy || IS_ERR(phy)) {
-				dev_err(&mdio->dev,
-					"error creating PHY at address %i\n",
-					addr);
-				continue;
-			}
+			dev_err(&mdio->dev,
+				"cannot get PHY at address %i\n",
+				addr);
+			continue;
 		}
 
 		/* Associate the OF node with the device structure so it
diff --git a/drivers/parisc/sba_iommu.c b/drivers/parisc/sba_iommu.c
index 42cfcd9..1ff1b67 100644
--- a/drivers/parisc/sba_iommu.c
+++ b/drivers/parisc/sba_iommu.c
@@ -575,7 +575,7 @@
 
 	mtsp(sid,1);
 	asm("lci 0(%%sr1, %1), %0" : "=r" (ci) : "r" (vba));
-	pa |= (ci >> 12) & 0xff;  /* move CI (8 bits) into lowest byte */
+	pa |= (ci >> PAGE_SHIFT) & 0xff;  /* move CI (8 bits) into lowest byte */
 
 	pa |= SBA_PDIR_VALID_BIT;	/* set "valid" bit */
 	*pdir_ptr = cpu_to_le64(pa);	/* swap and store into I/O Pdir */
@@ -1376,7 +1376,7 @@
 sba_ioc_init(struct parisc_device *sba, struct ioc *ioc, int ioc_num)
 {
 	u32 iova_space_size, iova_space_mask;
-	unsigned int pdir_size, iov_order;
+	unsigned int pdir_size, iov_order, tcnfg;
 
 	/*
 	** Determine IOVA Space size from memory size.
@@ -1468,8 +1468,19 @@
 	WRITE_REG(ioc->ibase | 1, ioc->ioc_hpa+IOC_IBASE);
 	WRITE_REG(ioc->imask, ioc->ioc_hpa+IOC_IMASK);
 
-	/* Set I/O PDIR Page size to 4K */
-	WRITE_REG(0, ioc->ioc_hpa+IOC_TCNFG);
+	/* Set I/O PDIR Page size to system page size */
+	switch (PAGE_SHIFT) {
+		case 12: tcnfg = 0; break;	/*  4K */
+		case 13: tcnfg = 1; break;	/*  8K */
+		case 14: tcnfg = 2; break;	/* 16K */
+		case 16: tcnfg = 3; break;	/* 64K */
+		default:
+			panic(__FILE__ "Unsupported system page size %d",
+				1 << PAGE_SHIFT);
+			break;
+	}
+	/* Set I/O PDIR Page size to PAGE_SIZE (4k/16k/...) */
+	WRITE_REG(tcnfg, ioc->ioc_hpa+IOC_TCNFG);
 
 	/*
 	** Clear I/O TLB of any possible entries.
diff --git a/drivers/pci/bus.c b/drivers/pci/bus.c
index 748f8f3..32e66a6 100644
--- a/drivers/pci/bus.c
+++ b/drivers/pci/bus.c
@@ -174,6 +174,7 @@
 	 * Can not put in pci_device_add yet because resources
 	 * are not assigned yet for some devices.
 	 */
+	pci_fixup_device(pci_fixup_final, dev);
 	pci_create_sysfs_dev_files(dev);
 
 	dev->match_driver = true;
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index d40bed7..2c10752 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -563,8 +563,10 @@
 	entry->msi_attrib.default_irq	= dev->irq;	/* Save IOAPIC IRQ */
 	entry->msi_attrib.pos		= dev->msi_cap;
 
-	entry->mask_pos = dev->msi_cap + (control & PCI_MSI_FLAGS_64BIT) ?
-		PCI_MSI_MASK_64 : PCI_MSI_MASK_32;
+	if (control & PCI_MSI_FLAGS_64BIT)
+		entry->mask_pos = dev->msi_cap + PCI_MSI_MASK_64;
+	else
+		entry->mask_pos = dev->msi_cap + PCI_MSI_MASK_32;
 	/* All MSIs are unmasked by default, Mask them all */
 	if (entry->msi_attrib.maskbit)
 		pci_read_config_dword(dev, entry->mask_pos, &entry->masked);
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 631aeb7..70f10fa 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -1341,7 +1341,6 @@
 	list_add_tail(&dev->bus_list, &bus->devices);
 	up_write(&pci_bus_sem);
 
-	pci_fixup_device(pci_fixup_final, dev);
 	ret = pcibios_add_device(dev);
 	WARN_ON(ret < 0);
 
diff --git a/drivers/pcmcia/m8xx_pcmcia.c b/drivers/pcmcia/m8xx_pcmcia.c
index a3a851e..18c0d8d 100644
--- a/drivers/pcmcia/m8xx_pcmcia.c
+++ b/drivers/pcmcia/m8xx_pcmcia.c
@@ -68,12 +68,6 @@
 
 #if !defined(CONFIG_PCMCIA_SLOT_A) && !defined(CONFIG_PCMCIA_SLOT_B)
 
-/* The RPX series use SLOT_B */
-#if defined(CONFIG_RPXCLASSIC) || defined(CONFIG_RPXLITE)
-#define CONFIG_PCMCIA_SLOT_B
-#define CONFIG_BD_IS_MHZ
-#endif
-
 /* The ADS board use SLOT_A */
 #ifdef CONFIG_ADS
 #define CONFIG_PCMCIA_SLOT_A
@@ -253,81 +247,6 @@
 
 #define PCMCIA_BMT_LIMIT (15*4)	/* Bus Monitor Timeout value */
 
-/* ------------------------------------------------------------------------- */
-/* board specific stuff:                                                     */
-/* voltage_set(), hardware_enable() and hardware_disable()                   */
-/* ------------------------------------------------------------------------- */
-/* RPX Boards from Embedded Planet                                           */
-
-#if defined(CONFIG_RPXCLASSIC) || defined(CONFIG_RPXLITE)
-
-/* The RPX boards seems to have it's bus monitor timeout set to 6*8 clocks.
- * SYPCR is write once only, therefore must the slowest memory be faster
- * than the bus monitor or we will get a machine check due to the bus timeout.
- */
-
-#define PCMCIA_BOARD_MSG "RPX CLASSIC or RPX LITE"
-
-#undef PCMCIA_BMT_LIMIT
-#define PCMCIA_BMT_LIMIT (6*8)
-
-static int voltage_set(int slot, int vcc, int vpp)
-{
-	u32 reg = 0;
-
-	switch (vcc) {
-	case 0:
-		break;
-	case 33:
-		reg |= BCSR1_PCVCTL4;
-		break;
-	case 50:
-		reg |= BCSR1_PCVCTL5;
-		break;
-	default:
-		return 1;
-	}
-
-	switch (vpp) {
-	case 0:
-		break;
-	case 33:
-	case 50:
-		if (vcc == vpp)
-			reg |= BCSR1_PCVCTL6;
-		else
-			return 1;
-		break;
-	case 120:
-		reg |= BCSR1_PCVCTL7;
-	default:
-		return 1;
-	}
-
-	if (!((vcc == 50) || (vcc == 0)))
-		return 1;
-
-	/* first, turn off all power */
-
-	out_be32(((u32 *) RPX_CSR_ADDR),
-		 in_be32(((u32 *) RPX_CSR_ADDR)) & ~(BCSR1_PCVCTL4 |
-						     BCSR1_PCVCTL5 |
-						     BCSR1_PCVCTL6 |
-						     BCSR1_PCVCTL7));
-
-	/* enable new powersettings */
-
-	out_be32(((u32 *) RPX_CSR_ADDR), in_be32(((u32 *) RPX_CSR_ADDR)) | reg);
-
-	return 0;
-}
-
-#define socket_get(_slot_) PCMCIA_SOCKET_KEY_5V
-#define hardware_enable(_slot_)	/* No hardware to enable */
-#define hardware_disable(_slot_)	/* No hardware to disable */
-
-#endif				/* CONFIG_RPXCLASSIC */
-
 /* FADS Boards from Motorola                                               */
 
 #if defined(CONFIG_FADS)
@@ -419,65 +338,6 @@
 
 #endif
 
-/* ------------------------------------------------------------------------- */
-/* Motorola MBX860                                                           */
-
-#if defined(CONFIG_MBX)
-
-#define PCMCIA_BOARD_MSG "MBX"
-
-static int voltage_set(int slot, int vcc, int vpp)
-{
-	u8 reg = 0;
-
-	switch (vcc) {
-	case 0:
-		break;
-	case 33:
-		reg |= CSR2_VCC_33;
-		break;
-	case 50:
-		reg |= CSR2_VCC_50;
-		break;
-	default:
-		return 1;
-	}
-
-	switch (vpp) {
-	case 0:
-		break;
-	case 33:
-	case 50:
-		if (vcc == vpp)
-			reg |= CSR2_VPP_VCC;
-		else
-			return 1;
-		break;
-	case 120:
-		if ((vcc == 33) || (vcc == 50))
-			reg |= CSR2_VPP_12;
-		else
-			return 1;
-	default:
-		return 1;
-	}
-
-	/* first, turn off all power */
-	out_8((u8 *) MBX_CSR2_ADDR,
-	      in_8((u8 *) MBX_CSR2_ADDR) & ~(CSR2_VCC_MASK | CSR2_VPP_MASK));
-
-	/* enable new powersettings */
-	out_8((u8 *) MBX_CSR2_ADDR, in_8((u8 *) MBX_CSR2_ADDR) | reg);
-
-	return 0;
-}
-
-#define socket_get(_slot_) PCMCIA_SOCKET_KEY_5V
-#define hardware_enable(_slot_)	/* No hardware to enable */
-#define hardware_disable(_slot_)	/* No hardware to disable */
-
-#endif				/* CONFIG_MBX */
-
 #if defined(CONFIG_PRxK)
 #include <asm/cpld.h>
 extern volatile fpga_pc_regs *fpga_pc;
diff --git a/drivers/pinctrl/sh-pfc/Kconfig b/drivers/pinctrl/sh-pfc/Kconfig
index 0e1f99c..f8a2ae4 100644
--- a/drivers/pinctrl/sh-pfc/Kconfig
+++ b/drivers/pinctrl/sh-pfc/Kconfig
@@ -6,7 +6,7 @@
 
 config PINCTRL_SH_PFC
 	# XXX move off the gpio dependency
-	depends on GENERIC_GPIO
+	depends on GPIOLIB
 	select GPIO_SH_PFC if ARCH_REQUIRE_GPIOLIB
 	select PINMUX
 	select PINCONF
@@ -40,19 +40,19 @@
 config PINCTRL_PFC_SH7203
 	def_bool y
 	depends on CPU_SUBTYPE_SH7203
-	depends on GENERIC_GPIO
+	depends on GPIOLIB
 	select PINCTRL_SH_PFC
 
 config PINCTRL_PFC_SH7264
 	def_bool y
 	depends on CPU_SUBTYPE_SH7264
-	depends on GENERIC_GPIO
+	depends on GPIOLIB
 	select PINCTRL_SH_PFC
 
 config PINCTRL_PFC_SH7269
 	def_bool y
 	depends on CPU_SUBTYPE_SH7269
-	depends on GENERIC_GPIO
+	depends on GPIOLIB
 	select PINCTRL_SH_PFC
 
 config PINCTRL_PFC_SH7372
@@ -68,55 +68,55 @@
 config PINCTRL_PFC_SH7720
 	def_bool y
 	depends on CPU_SUBTYPE_SH7720
-	depends on GENERIC_GPIO
+	depends on GPIOLIB
 	select PINCTRL_SH_PFC
 
 config PINCTRL_PFC_SH7722
 	def_bool y
 	depends on CPU_SUBTYPE_SH7722
-	depends on GENERIC_GPIO
+	depends on GPIOLIB
 	select PINCTRL_SH_PFC
 
 config PINCTRL_PFC_SH7723
 	def_bool y
 	depends on CPU_SUBTYPE_SH7723
-	depends on GENERIC_GPIO
+	depends on GPIOLIB
 	select PINCTRL_SH_PFC
 
 config PINCTRL_PFC_SH7724
 	def_bool y
 	depends on CPU_SUBTYPE_SH7724
-	depends on GENERIC_GPIO
+	depends on GPIOLIB
 	select PINCTRL_SH_PFC
 
 config PINCTRL_PFC_SH7734
 	def_bool y
 	depends on CPU_SUBTYPE_SH7734
-	depends on GENERIC_GPIO
+	depends on GPIOLIB
 	select PINCTRL_SH_PFC
 
 config PINCTRL_PFC_SH7757
 	def_bool y
 	depends on CPU_SUBTYPE_SH7757
-	depends on GENERIC_GPIO
+	depends on GPIOLIB
 	select PINCTRL_SH_PFC
 
 config PINCTRL_PFC_SH7785
 	def_bool y
 	depends on CPU_SUBTYPE_SH7785
-	depends on GENERIC_GPIO
+	depends on GPIOLIB
 	select PINCTRL_SH_PFC
 
 config PINCTRL_PFC_SH7786
 	def_bool y
 	depends on CPU_SUBTYPE_SH7786
-	depends on GENERIC_GPIO
+	depends on GPIOLIB
 	select PINCTRL_SH_PFC
 
 config PINCTRL_PFC_SHX3
 	def_bool y
 	depends on CPU_SUBTYPE_SHX3
-	depends on GENERIC_GPIO
+	depends on GPIOLIB
 	select PINCTRL_SH_PFC
 
 endif
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index 3338437..8577261 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -781,4 +781,12 @@
 	  graphics as well as the backlight. Currently only backlight
 	  control is supported by the driver.
 
+config PVPANIC
+	tristate "pvpanic device support"
+	depends on ACPI
+	---help---
+	  This driver provides support for the pvpanic device.  pvpanic is
+	  a paravirtualized device provided by QEMU; it lets a virtual machine
+	  (guest) communicate panic events to the host.
+
 endif # X86_PLATFORM_DEVICES
diff --git a/drivers/platform/x86/Makefile b/drivers/platform/x86/Makefile
index ace2b38..ef0ec74 100644
--- a/drivers/platform/x86/Makefile
+++ b/drivers/platform/x86/Makefile
@@ -51,3 +51,5 @@
 obj-$(CONFIG_SAMSUNG_Q10)	+= samsung-q10.o
 obj-$(CONFIG_APPLE_GMUX)	+= apple-gmux.o
 obj-$(CONFIG_CHROMEOS_LAPTOP)	+= chromeos_laptop.o
+
+obj-$(CONFIG_PVPANIC)           += pvpanic.o
diff --git a/drivers/platform/x86/asus-nb-wmi.c b/drivers/platform/x86/asus-nb-wmi.c
index 210b5b8..8fcb41e 100644
--- a/drivers/platform/x86/asus-nb-wmi.c
+++ b/drivers/platform/x86/asus-nb-wmi.c
@@ -171,6 +171,15 @@
 		},
 		.driver_data = &quirk_asus_x401u,
 	},
+	{
+		.callback = dmi_matched,
+		.ident = "ASUSTeK COMPUTER INC. X75A",
+		.matches = {
+			DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+			DMI_MATCH(DMI_PRODUCT_NAME, "X75A"),
+		},
+		.driver_data = &quirk_asus_x401u,
+	},
 	{},
 };
 
diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c
index fa3ee62..1134119 100644
--- a/drivers/platform/x86/dell-laptop.c
+++ b/drivers/platform/x86/dell-laptop.c
@@ -284,6 +284,7 @@
 {
 	/* Final token is a terminator, so we don't want to copy it */
 	int tokens = (dm->length-11)/sizeof(struct calling_interface_token)-1;
+	struct calling_interface_token *new_da_tokens;
 	struct calling_interface_structure *table =
 		container_of(dm, struct calling_interface_structure, header);
 
@@ -296,12 +297,13 @@
 	da_command_address = table->cmdIOAddress;
 	da_command_code = table->cmdIOCode;
 
-	da_tokens = krealloc(da_tokens, (da_num_tokens + tokens) *
-			     sizeof(struct calling_interface_token),
-			     GFP_KERNEL);
+	new_da_tokens = krealloc(da_tokens, (da_num_tokens + tokens) *
+				 sizeof(struct calling_interface_token),
+				 GFP_KERNEL);
 
-	if (!da_tokens)
+	if (!new_da_tokens)
 		return;
+	da_tokens = new_da_tokens;
 
 	memcpy(da_tokens+da_num_tokens, table->tokens,
 	       sizeof(struct calling_interface_token) * tokens);
diff --git a/drivers/platform/x86/dell-wmi-aio.c b/drivers/platform/x86/dell-wmi-aio.c
index 3f94545..bcf8cc6 100644
--- a/drivers/platform/x86/dell-wmi-aio.c
+++ b/drivers/platform/x86/dell-wmi-aio.c
@@ -34,6 +34,14 @@
 #define EVENT_GUID1 "284A0E6B-380E-472A-921F-E52786257FB4"
 #define EVENT_GUID2 "02314822-307C-4F66-BF0E-48AEAEB26CC8"
 
+struct dell_wmi_event {
+	u16	length;
+	/* 0x000: A hot key pressed or an event occurred
+	 * 0x00F: A sequence of hot keys are pressed */
+	u16	type;
+	u16	event[];
+};
+
 static const char *dell_wmi_aio_guids[] = {
 	EVENT_GUID1,
 	EVENT_GUID2,
@@ -46,15 +54,41 @@
 static const struct key_entry dell_wmi_aio_keymap[] = {
 	{ KE_KEY, 0xc0, { KEY_VOLUMEUP } },
 	{ KE_KEY, 0xc1, { KEY_VOLUMEDOWN } },
+	{ KE_KEY, 0xe030, { KEY_VOLUMEUP } },
+	{ KE_KEY, 0xe02e, { KEY_VOLUMEDOWN } },
+	{ KE_KEY, 0xe020, { KEY_MUTE } },
+	{ KE_KEY, 0xe027, { KEY_DISPLAYTOGGLE } },
+	{ KE_KEY, 0xe006, { KEY_BRIGHTNESSUP } },
+	{ KE_KEY, 0xe005, { KEY_BRIGHTNESSDOWN } },
+	{ KE_KEY, 0xe00b, { KEY_SWITCHVIDEOMODE } },
 	{ KE_END, 0 }
 };
 
 static struct input_dev *dell_wmi_aio_input_dev;
 
+/*
+ * The new WMI event data format will follow the dell_wmi_event structure
+ * So, we will check if the buffer matches the format
+ */
+static bool dell_wmi_aio_event_check(u8 *buffer, int length)
+{
+	struct dell_wmi_event *event = (struct dell_wmi_event *)buffer;
+
+	if (event == NULL || length < 6)
+		return false;
+
+	if ((event->type == 0 || event->type == 0xf) &&
+			event->length >= 2)
+		return true;
+
+	return false;
+}
+
 static void dell_wmi_aio_notify(u32 value, void *context)
 {
 	struct acpi_buffer response = { ACPI_ALLOCATE_BUFFER, NULL };
 	union acpi_object *obj;
+	struct dell_wmi_event *event;
 	acpi_status status;
 
 	status = wmi_get_event_data(value, &response);
@@ -65,7 +99,7 @@
 
 	obj = (union acpi_object *)response.pointer;
 	if (obj) {
-		unsigned int scancode;
+		unsigned int scancode = 0;
 
 		switch (obj->type) {
 		case ACPI_TYPE_INTEGER:
@@ -75,13 +109,22 @@
 				scancode, 1, true);
 			break;
 		case ACPI_TYPE_BUFFER:
-			/* Broken machines return the scancode in a buffer */
-			if (obj->buffer.pointer && obj->buffer.length > 0) {
-				scancode = obj->buffer.pointer[0];
+			if (dell_wmi_aio_event_check(obj->buffer.pointer,
+						obj->buffer.length)) {
+				event = (struct dell_wmi_event *)
+					obj->buffer.pointer;
+				scancode = event->event[0];
+			} else {
+				/* Broken machines return the scancode in a
+				   buffer */
+				if (obj->buffer.pointer &&
+						obj->buffer.length > 0)
+					scancode = obj->buffer.pointer[0];
+			}
+			if (scancode)
 				sparse_keymap_report_event(
 					dell_wmi_aio_input_dev,
 					scancode, 1, true);
-			}
 			break;
 		}
 	}
diff --git a/drivers/platform/x86/hp-wmi.c b/drivers/platform/x86/hp-wmi.c
index 1a779bb..8df0c5a 100644
--- a/drivers/platform/x86/hp-wmi.c
+++ b/drivers/platform/x86/hp-wmi.c
@@ -71,6 +71,14 @@
 	HPWMI_WIRELESS = 5,
 	HPWMI_CPU_BATTERY_THROTTLE = 6,
 	HPWMI_LOCK_SWITCH = 7,
+	HPWMI_LID_SWITCH = 8,
+	HPWMI_SCREEN_ROTATION = 9,
+	HPWMI_COOLSENSE_SYSTEM_MOBILE = 0x0A,
+	HPWMI_COOLSENSE_SYSTEM_HOT = 0x0B,
+	HPWMI_PROXIMITY_SENSOR = 0x0C,
+	HPWMI_BACKLIT_KB_BRIGHTNESS = 0x0D,
+	HPWMI_PEAKSHIFT_PERIOD = 0x0F,
+	HPWMI_BATTERY_CHARGE_PERIOD = 0x10,
 };
 
 struct bios_args {
@@ -536,6 +544,22 @@
 		break;
 	case HPWMI_LOCK_SWITCH:
 		break;
+	case HPWMI_LID_SWITCH:
+		break;
+	case HPWMI_SCREEN_ROTATION:
+		break;
+	case HPWMI_COOLSENSE_SYSTEM_MOBILE:
+		break;
+	case HPWMI_COOLSENSE_SYSTEM_HOT:
+		break;
+	case HPWMI_PROXIMITY_SENSOR:
+		break;
+	case HPWMI_BACKLIT_KB_BRIGHTNESS:
+		break;
+	case HPWMI_PEAKSHIFT_PERIOD:
+		break;
+	case HPWMI_BATTERY_CHARGE_PERIOD:
+		break;
 	default:
 		pr_info("Unknown event_id - %d - 0x%x\n", event_id, event_data);
 		break;
diff --git a/drivers/platform/x86/hp_accel.c b/drivers/platform/x86/hp_accel.c
index e64a7a8..a8e43cf 100644
--- a/drivers/platform/x86/hp_accel.c
+++ b/drivers/platform/x86/hp_accel.c
@@ -362,7 +362,8 @@
 
 static int lis3lv02d_resume(struct device *dev)
 {
-	return lis3lv02d_poweron(&lis3_dev);
+	lis3lv02d_poweron(&lis3_dev);
+	return 0;
 }
 
 static SIMPLE_DEV_PM_OPS(hp_accel_pm, lis3lv02d_suspend, lis3lv02d_resume);
diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c
index 17f00b8..89c4519 100644
--- a/drivers/platform/x86/ideapad-laptop.c
+++ b/drivers/platform/x86/ideapad-laptop.c
@@ -640,7 +640,8 @@
 	for (bit = 0; bit < 16; bit++) {
 		if (test_bit(bit, &value)) {
 			switch (bit) {
-			case 6:
+			case 0:	/* Z580 */
+			case 6:	/* Z570 */
 				/* Thermal Management button */
 				ideapad_input_report(priv, 65);
 				break;
@@ -648,6 +649,9 @@
 				/* OneKey Theater button */
 				ideapad_input_report(priv, 64);
 				break;
+			default:
+				pr_info("Unknown special button: %lu\n", bit);
+				break;
 			}
 		}
 	}
diff --git a/drivers/platform/x86/pvpanic.c b/drivers/platform/x86/pvpanic.c
new file mode 100644
index 0000000..47ae0c4
--- /dev/null
+++ b/drivers/platform/x86/pvpanic.c
@@ -0,0 +1,124 @@
+/*
+ *  pvpanic.c - pvpanic Device Support
+ *
+ *  Copyright (C) 2013 Fujitsu.
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <acpi/acpi_bus.h>
+#include <acpi/acpi_drivers.h>
+
+MODULE_AUTHOR("Hu Tao <hutao@cn.fujitsu.com>");
+MODULE_DESCRIPTION("pvpanic device driver");
+MODULE_LICENSE("GPL");
+
+static int pvpanic_add(struct acpi_device *device);
+static int pvpanic_remove(struct acpi_device *device);
+
+static const struct acpi_device_id pvpanic_device_ids[] = {
+	{ "QEMU0001", 0 },
+	{ "", 0 },
+};
+MODULE_DEVICE_TABLE(acpi, pvpanic_device_ids);
+
+#define PVPANIC_PANICKED	(1 << 0)
+
+static u16 port;
+
+static struct acpi_driver pvpanic_driver = {
+	.name =		"pvpanic",
+	.class =	"QEMU",
+	.ids =		pvpanic_device_ids,
+	.ops =		{
+				.add =		pvpanic_add,
+				.remove =	pvpanic_remove,
+			},
+	.owner =	THIS_MODULE,
+};
+
+static void
+pvpanic_send_event(unsigned int event)
+{
+	outb(event, port);
+}
+
+static int
+pvpanic_panic_notify(struct notifier_block *nb, unsigned long code,
+		     void *unused)
+{
+	pvpanic_send_event(PVPANIC_PANICKED);
+	return NOTIFY_DONE;
+}
+
+static struct notifier_block pvpanic_panic_nb = {
+	.notifier_call = pvpanic_panic_notify,
+};
+
+
+static acpi_status
+pvpanic_walk_resources(struct acpi_resource *res, void *context)
+{
+	switch (res->type) {
+	case ACPI_RESOURCE_TYPE_END_TAG:
+		return AE_OK;
+
+	case ACPI_RESOURCE_TYPE_IO:
+		port = res->data.io.minimum;
+		return AE_OK;
+
+	default:
+		return AE_ERROR;
+	}
+}
+
+static int pvpanic_add(struct acpi_device *device)
+{
+	acpi_status status;
+	u64 ret;
+
+	status = acpi_evaluate_integer(device->handle, "_STA", NULL,
+				       &ret);
+
+	if (ACPI_FAILURE(status) || (ret & 0x0B) != 0x0B)
+		return -ENODEV;
+
+	acpi_walk_resources(device->handle, METHOD_NAME__CRS,
+			    pvpanic_walk_resources, NULL);
+
+	if (!port)
+		return -ENODEV;
+
+	atomic_notifier_chain_register(&panic_notifier_list,
+				       &pvpanic_panic_nb);
+
+	return 0;
+}
+
+static int pvpanic_remove(struct acpi_device *device)
+{
+
+	atomic_notifier_chain_unregister(&panic_notifier_list,
+					 &pvpanic_panic_nb);
+	return 0;
+}
+
+module_acpi_driver(pvpanic_driver);
diff --git a/drivers/platform/x86/samsung-q10.c b/drivers/platform/x86/samsung-q10.c
index 5f77005..1a90b62 100644
--- a/drivers/platform/x86/samsung-q10.c
+++ b/drivers/platform/x86/samsung-q10.c
@@ -176,10 +176,7 @@
 						   samsungq10_probe,
 						   NULL, 0, NULL, 0);
 
-	if (IS_ERR(samsungq10_device))
-		return PTR_ERR(samsungq10_device);
-
-	return 0;
+	return PTR_RET(samsungq10_device);
 }
 
 static void __exit samsungq10_exit(void)
diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
index d544e3a..2ac045f 100644
--- a/drivers/platform/x86/sony-laptop.c
+++ b/drivers/platform/x86/sony-laptop.c
@@ -1255,6 +1255,11 @@
 			real_ev = __sony_nc_gfx_switch_status_get();
 			break;
 
+		case 0x015B:
+			/* Hybrid GFX switching SVS151290S */
+			ev_type = GFX_SWITCH;
+			real_ev = __sony_nc_gfx_switch_status_get();
+			break;
 		default:
 			dprintk("Unknown event 0x%x for handle 0x%x\n",
 					event, handle);
@@ -1353,6 +1358,7 @@
 			break;
 		case 0x0128:
 		case 0x0146:
+		case 0x015B:
 			result = sony_nc_gfx_switch_setup(pf_device, handle);
 			if (result)
 				pr_err("couldn't set up GFX Switch status (%d)\n",
@@ -1375,6 +1381,7 @@
 		case 0x0143:
 		case 0x014b:
 		case 0x014c:
+		case 0x0163:
 			result = sony_nc_kbd_backlight_setup(pf_device, handle);
 			if (result)
 				pr_err("couldn't set up keyboard backlight function (%d)\n",
@@ -1426,6 +1433,7 @@
 			break;
 		case 0x0128:
 		case 0x0146:
+		case 0x015B:
 			sony_nc_gfx_switch_cleanup(pd);
 			break;
 		case 0x0131:
@@ -1439,6 +1447,7 @@
 		case 0x0143:
 		case 0x014b:
 		case 0x014c:
+		case 0x0163:
 			sony_nc_kbd_backlight_cleanup(pd);
 			break;
 		default:
@@ -1485,6 +1494,7 @@
 		case 0x0143:
 		case 0x014b:
 		case 0x014c:
+		case 0x0163:
 			sony_nc_kbd_backlight_resume();
 			break;
 		default:
@@ -2390,7 +2400,9 @@
 {
 	unsigned int result;
 
-	if (sony_call_snc_handle(gfxs_ctl->handle, 0x0100, &result))
+	if (sony_call_snc_handle(gfxs_ctl->handle,
+				gfxs_ctl->handle == 0x015B ? 0x0000 : 0x0100,
+				&result))
 		return -EIO;
 
 	switch (gfxs_ctl->handle) {
@@ -2400,6 +2412,12 @@
 		 */
 		return result & 0x1 ? SPEED : STAMINA;
 		break;
+	case 0x015B:
+		/* 0: discrete GFX (speed)
+		 * 1: integrated GFX (stamina)
+		 */
+		return result & 0x1 ? STAMINA : SPEED;
+		break;
 	case 0x0128:
 		/* it's a more elaborated bitmask, for now:
 		 * 2: integrated GFX (stamina)
diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig
index a5d97ea..8bb2644 100644
--- a/drivers/regulator/Kconfig
+++ b/drivers/regulator/Kconfig
@@ -66,7 +66,7 @@
 
 config REGULATOR_GPIO
 	tristate "GPIO regulator support"
-	depends on GENERIC_GPIO
+	depends on GPIOLIB
 	help
 	  This driver provides support for regulators that can be
 	  controlled via gpios.
diff --git a/drivers/rtc/rtc-rs5c372.c b/drivers/rtc/rtc-rs5c372.c
index 224d634..ccf54f0 100644
--- a/drivers/rtc/rtc-rs5c372.c
+++ b/drivers/rtc/rtc-rs5c372.c
@@ -68,6 +68,7 @@
 enum rtc_type {
 	rtc_undef = 0,
 	rtc_r2025sd,
+	rtc_r2221tl,
 	rtc_rs5c372a,
 	rtc_rs5c372b,
 	rtc_rv5c386,
@@ -76,6 +77,7 @@
 
 static const struct i2c_device_id rs5c372_id[] = {
 	{ "r2025sd", rtc_r2025sd },
+	{ "r2221tl", rtc_r2221tl },
 	{ "rs5c372a", rtc_rs5c372a },
 	{ "rs5c372b", rtc_rs5c372b },
 	{ "rv5c386", rtc_rv5c386 },
@@ -529,6 +531,7 @@
 		rs5c372->time24 = 1;
 		break;
 	case rtc_r2025sd:
+	case rtc_r2221tl:
 	case rtc_rv5c386:
 	case rtc_rv5c387a:
 		buf[0] |= RV5C387_CTRL1_24;
@@ -609,6 +612,7 @@
 			rs5c372->time24 = 1;
 		break;
 	case rtc_r2025sd:
+	case rtc_r2221tl:
 	case rtc_rv5c386:
 	case rtc_rv5c387a:
 		if (rs5c372->regs[RS5C_REG_CTRL1] & RV5C387_CTRL1_24)
@@ -640,6 +644,7 @@
 	dev_info(&client->dev, "%s found, %s, driver version " DRV_VERSION "\n",
 			({ char *s; switch (rs5c372->type) {
 			case rtc_r2025sd:	s = "r2025sd"; break;
+			case rtc_r2221tl:	s = "r2221tl"; break;
 			case rtc_rs5c372a:	s = "rs5c372a"; break;
 			case rtc_rs5c372b:	s = "rs5c372b"; break;
 			case rtc_rv5c386:	s = "rv5c386"; break;
diff --git a/drivers/rtc/rtc-tile.c b/drivers/rtc/rtc-tile.c
index 249b653..fc3dee9 100644
--- a/drivers/rtc/rtc-tile.c
+++ b/drivers/rtc/rtc-tile.c
@@ -146,6 +146,7 @@
  */
 static void __exit tile_rtc_driver_exit(void)
 {
+	platform_device_unregister(tile_rtc_platform_device);
 	platform_driver_unregister(&tile_rtc_platform_driver);
 }
 
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index 82758cb..4361d97 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -2997,18 +2997,14 @@
 	return rc;
 }
 
-static int dasd_release(struct gendisk *disk, fmode_t mode)
+static void dasd_release(struct gendisk *disk, fmode_t mode)
 {
-	struct dasd_device *base;
-
-	base = dasd_device_from_gendisk(disk);
-	if (!base)
-		return -ENODEV;
-
-	atomic_dec(&base->block->open_count);
-	module_put(base->discipline->owner);
-	dasd_put_device(base);
-	return 0;
+	struct dasd_device *base = dasd_device_from_gendisk(disk);
+	if (base) {
+		atomic_dec(&base->block->open_count);
+		module_put(base->discipline->owner);
+		dasd_put_device(base);
+	}
 }
 
 /*
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c
index b6ad0de..6eca019 100644
--- a/drivers/s390/block/dcssblk.c
+++ b/drivers/s390/block/dcssblk.c
@@ -26,7 +26,7 @@
 #define DCSS_BUS_ID_SIZE 20
 
 static int dcssblk_open(struct block_device *bdev, fmode_t mode);
-static int dcssblk_release(struct gendisk *disk, fmode_t mode);
+static void dcssblk_release(struct gendisk *disk, fmode_t mode);
 static void dcssblk_make_request(struct request_queue *q, struct bio *bio);
 static int dcssblk_direct_access(struct block_device *bdev, sector_t secnum,
 				 void **kaddr, unsigned long *pfn);
@@ -781,16 +781,15 @@
 	return rc;
 }
 
-static int
+static void
 dcssblk_release(struct gendisk *disk, fmode_t mode)
 {
 	struct dcssblk_dev_info *dev_info = disk->private_data;
 	struct segment_info *entry;
-	int rc;
 
 	if (!dev_info) {
-		rc = -ENODEV;
-		goto out;
+		WARN_ON(1);
+		return;
 	}
 	down_write(&dcssblk_devices_sem);
 	if (atomic_dec_and_test(&dev_info->use_count)
@@ -803,9 +802,6 @@
 		dev_info->save_pending = 0;
 	}
 	up_write(&dcssblk_devices_sem);
-	rc = 0;
-out:
-	return rc;
 }
 
 static void
@@ -826,8 +822,7 @@
 	if ((bio->bi_sector & 7) != 0 || (bio->bi_size & 4095) != 0)
 		/* Request is not page-aligned. */
 		goto fail;
-	if (((bio->bi_size >> 9) + bio->bi_sector)
-			> get_capacity(bio->bi_bdev->bd_disk)) {
+	if (bio_end_sector(bio) > get_capacity(bio->bi_bdev->bd_disk)) {
 		/* Request beyond end of DCSS segment. */
 		goto fail;
 	}
diff --git a/drivers/s390/block/scm_blk.c b/drivers/s390/block/scm_blk.c
index b303cab..5d73e6e 100644
--- a/drivers/s390/block/scm_blk.c
+++ b/drivers/s390/block/scm_blk.c
@@ -123,10 +123,9 @@
 	return scm_get_ref();
 }
 
-static int scm_release(struct gendisk *gendisk, fmode_t mode)
+static void scm_release(struct gendisk *gendisk, fmode_t mode)
 {
 	scm_put_ref();
-	return 0;
 }
 
 static const struct block_device_operations scm_blk_devops = {
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index db95c54..86af29f 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -1353,6 +1353,8 @@
 	tristate "Emulex LightPulse Fibre Channel Support"
 	depends on PCI && SCSI
 	select SCSI_FC_ATTRS
+	select GENERIC_CSUM
+	select CRC_T10DIF
 	help
           This lpfc driver supports the Emulex LightPulse
           Family of Fibre Channel PCI host adapters.
diff --git a/drivers/scsi/aic94xx/aic94xx_dev.c b/drivers/scsi/aic94xx/aic94xx_dev.c
index 64136c56..3307238 100644
--- a/drivers/scsi/aic94xx/aic94xx_dev.c
+++ b/drivers/scsi/aic94xx/aic94xx_dev.c
@@ -84,7 +84,7 @@
 	struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha;
 	int ddb = (int) (unsigned long) dev->lldd_dev;
 
-	if (dev->dev_type == SATA_PM_PORT)
+	if (dev->dev_type == SAS_SATA_PM_PORT)
 		asd_ddbsite_write_byte(asd_ha,ddb, DDB_TYPE, DDB_TYPE_PM_PORT);
 	else if (dev->tproto)
 		asd_ddbsite_write_byte(asd_ha,ddb, DDB_TYPE, DDB_TYPE_TARGET);
@@ -116,7 +116,7 @@
 	int ddb = (int) (unsigned long) dev->lldd_dev;
 	u32 qdepth = 0;
 
-	if (dev->dev_type == SATA_DEV || dev->dev_type == SATA_PM_PORT) {
+	if (dev->dev_type == SAS_SATA_DEV || dev->dev_type == SAS_SATA_PM_PORT) {
 		if (ata_id_has_ncq(ata_dev->id))
 			qdepth = ata_id_queue_depth(ata_dev->id);
 		asd_ddbsite_write_dword(asd_ha, ddb, SATA_TAG_ALLOC_MASK,
@@ -140,8 +140,8 @@
 	int ddb = (int) (unsigned long) dev->lldd_dev;
 
 	asd_ddbsite_write_word(asd_ha, ddb, ATA_CMD_SCBPTR, 0xFFFF);
-	if (dev->dev_type == SATA_DEV || dev->dev_type == SATA_PM ||
-	    dev->dev_type == SATA_PM_PORT) {
+	if (dev->dev_type == SAS_SATA_DEV || dev->dev_type == SAS_SATA_PM ||
+	    dev->dev_type == SAS_SATA_PM_PORT) {
 		struct dev_to_host_fis *fis = (struct dev_to_host_fis *)
 			dev->frame_rcvd;
 		asd_ddbsite_write_byte(asd_ha, ddb, SATA_STATUS, fis->status);
@@ -174,7 +174,7 @@
 	asd_ddbsite_write_byte(asd_ha, ddb, CONN_MASK, dev->port->phy_mask);
 	if (dev->port->oob_mode != SATA_OOB_MODE) {
 		flags |= OPEN_REQUIRED;
-		if ((dev->dev_type == SATA_DEV) ||
+		if ((dev->dev_type == SAS_SATA_DEV) ||
 		    (dev->tproto & SAS_PROTOCOL_STP)) {
 			struct smp_resp *rps_resp = &dev->sata_dev.rps_resp;
 			if (rps_resp->frame_type == SMP_RESPONSE &&
@@ -188,8 +188,8 @@
 		} else {
 			flags |= CONCURRENT_CONN_SUPP;
 			if (!dev->parent &&
-			    (dev->dev_type == EDGE_DEV ||
-			     dev->dev_type == FANOUT_DEV))
+			    (dev->dev_type == SAS_EDGE_EXPANDER_DEVICE ||
+			     dev->dev_type == SAS_FANOUT_EXPANDER_DEVICE))
 				asd_ddbsite_write_byte(asd_ha, ddb, MAX_CCONN,
 						       4);
 			else
@@ -198,7 +198,7 @@
 			asd_ddbsite_write_byte(asd_ha, ddb, NUM_CTX, 1);
 		}
 	}
-	if (dev->dev_type == SATA_PM)
+	if (dev->dev_type == SAS_SATA_PM)
 		flags |= SATA_MULTIPORT;
 	asd_ddbsite_write_byte(asd_ha, ddb, DDB_TARG_FLAGS, flags);
 
@@ -211,7 +211,7 @@
 	asd_ddbsite_write_word(asd_ha, ddb, SEND_QUEUE_TAIL, 0xFFFF);
 	asd_ddbsite_write_word(asd_ha, ddb, SISTER_DDB, 0xFFFF);
 
-	if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) {
+	if (dev->dev_type == SAS_SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) {
 		i = asd_init_sata(dev);
 		if (i < 0) {
 			asd_free_ddb(asd_ha, ddb);
@@ -219,7 +219,7 @@
 		}
 	}
 
-	if (dev->dev_type == SAS_END_DEV) {
+	if (dev->dev_type == SAS_END_DEVICE) {
 		struct sas_end_device *rdev = rphy_to_end_device(dev->rphy);
 		if (rdev->I_T_nexus_loss_timeout > 0)
 			asd_ddbsite_write_word(asd_ha, ddb, ITNL_TIMEOUT,
@@ -328,10 +328,10 @@
 
 	spin_lock_irqsave(&asd_ha->hw_prof.ddb_lock, flags);
 	switch (dev->dev_type) {
-	case SATA_PM:
+	case SAS_SATA_PM:
 		res = asd_init_sata_pm_ddb(dev);
 		break;
-	case SATA_PM_PORT:
+	case SAS_SATA_PM_PORT:
 		res = asd_init_sata_pm_port_ddb(dev);
 		break;
 	default:
diff --git a/drivers/scsi/aic94xx/aic94xx_hwi.c b/drivers/scsi/aic94xx/aic94xx_hwi.c
index 81b736c..4df867e 100644
--- a/drivers/scsi/aic94xx/aic94xx_hwi.c
+++ b/drivers/scsi/aic94xx/aic94xx_hwi.c
@@ -74,7 +74,7 @@
 
 	memset(phy->identify_frame, 0, sizeof(*phy->identify_frame));
 
-	phy->identify_frame->dev_type = SAS_END_DEV;
+	phy->identify_frame->dev_type = SAS_END_DEVICE;
 	if (phy->sas_phy.role & PHY_ROLE_INITIATOR)
 		phy->identify_frame->initiator_bits = phy->sas_phy.iproto;
 	if (phy->sas_phy.role & PHY_ROLE_TARGET)
diff --git a/drivers/scsi/aic94xx/aic94xx_tmf.c b/drivers/scsi/aic94xx/aic94xx_tmf.c
index cf90409..d4c35df 100644
--- a/drivers/scsi/aic94xx/aic94xx_tmf.c
+++ b/drivers/scsi/aic94xx/aic94xx_tmf.c
@@ -184,7 +184,7 @@
 	struct sas_phy *phy = sas_get_local_phy(dev);
 	/* Standard mandates link reset for ATA  (type 0) and
 	 * hard reset for SSP (type 1) */
-	int reset_type = (dev->dev_type == SATA_DEV ||
+	int reset_type = (dev->dev_type == SAS_SATA_DEV ||
 			  (dev->tproto & SAS_PROTOCOL_STP)) ? 0 : 1;
 
 	asd_clear_nexus_I_T(dev, NEXUS_PHASE_PRE);
diff --git a/drivers/scsi/be2iscsi/be.h b/drivers/scsi/be2iscsi/be.h
index f1733df..777e7c0 100644
--- a/drivers/scsi/be2iscsi/be.h
+++ b/drivers/scsi/be2iscsi/be.h
@@ -1,5 +1,5 @@
 /**
- * Copyright (C) 2005 - 2012 Emulex
+ * Copyright (C) 2005 - 2013 Emulex
  * All rights reserved.
  *
  * This program is free software; you can redistribute it and/or
diff --git a/drivers/scsi/be2iscsi/be_cmds.c b/drivers/scsi/be2iscsi/be_cmds.c
index 5c87768..e66aa7c 100644
--- a/drivers/scsi/be2iscsi/be_cmds.c
+++ b/drivers/scsi/be2iscsi/be_cmds.c
@@ -1,5 +1,5 @@
 /**
- * Copyright (C) 2005 - 2012 Emulex
+ * Copyright (C) 2005 - 2013 Emulex
  * All rights reserved.
  *
  * This program is free software; you can redistribute it and/or
@@ -155,6 +155,7 @@
 	uint16_t status = 0, addl_status = 0, wrb_num = 0;
 	struct be_mcc_wrb *temp_wrb;
 	struct be_cmd_req_hdr *ioctl_hdr;
+	struct be_cmd_resp_hdr *ioctl_resp_hdr;
 	struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q;
 
 	if (beiscsi_error(phba))
@@ -204,6 +205,12 @@
 			    ioctl_hdr->subsystem,
 			    ioctl_hdr->opcode,
 			    status, addl_status);
+
+		if (status == MCC_STATUS_INSUFFICIENT_BUFFER) {
+			ioctl_resp_hdr = (struct be_cmd_resp_hdr *) ioctl_hdr;
+			if (ioctl_resp_hdr->response_length)
+				goto release_mcc_tag;
+		}
 		rc = -EAGAIN;
 	}
 
@@ -267,6 +274,7 @@
 	struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
 	struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
 	struct be_cmd_req_hdr *hdr = embedded_payload(wrb);
+	struct be_cmd_resp_hdr *resp_hdr;
 
 	be_dws_le_to_cpu(compl, 4);
 
@@ -284,6 +292,11 @@
 			    hdr->subsystem, hdr->opcode,
 			    compl_status, extd_status);
 
+		if (compl_status == MCC_STATUS_INSUFFICIENT_BUFFER) {
+			resp_hdr = (struct be_cmd_resp_hdr *) hdr;
+			if (resp_hdr->response_length)
+				return 0;
+		}
 		return -EBUSY;
 	}
 	return 0;
@@ -335,30 +348,26 @@
 void beiscsi_async_link_state_process(struct beiscsi_hba *phba,
 		struct be_async_event_link_state *evt)
 {
-	switch (evt->port_link_status) {
-	case ASYNC_EVENT_LINK_DOWN:
+	if ((evt->port_link_status == ASYNC_EVENT_LINK_DOWN) ||
+	    ((evt->port_link_status & ASYNC_EVENT_LOGICAL) &&
+	     (evt->port_fault != BEISCSI_PHY_LINK_FAULT_NONE))) {
+		phba->state = BE_ADAPTER_LINK_DOWN;
+
 		beiscsi_log(phba, KERN_ERR,
 			    BEISCSI_LOG_CONFIG | BEISCSI_LOG_INIT,
-			    "BC_%d : Link Down on Physical Port %d\n",
+			    "BC_%d : Link Down on Port %d\n",
 			    evt->physical_port);
 
-		phba->state |= BE_ADAPTER_LINK_DOWN;
 		iscsi_host_for_each_session(phba->shost,
 					    be2iscsi_fail_session);
-		break;
-	case ASYNC_EVENT_LINK_UP:
+	} else if ((evt->port_link_status & ASYNC_EVENT_LINK_UP) ||
+		    ((evt->port_link_status & ASYNC_EVENT_LOGICAL) &&
+		     (evt->port_fault == BEISCSI_PHY_LINK_FAULT_NONE))) {
 		phba->state = BE_ADAPTER_UP;
+
 		beiscsi_log(phba, KERN_ERR,
 			    BEISCSI_LOG_CONFIG | BEISCSI_LOG_INIT,
-			    "BC_%d : Link UP on Physical Port %d\n",
-			    evt->physical_port);
-		break;
-	default:
-		beiscsi_log(phba, KERN_ERR,
-			    BEISCSI_LOG_CONFIG | BEISCSI_LOG_INIT,
-			    "BC_%d : Unexpected Async Notification %d on"
-			    "Physical Port %d\n",
-			    evt->port_link_status,
+			    "BC_%d : Link UP on Port %d\n",
 			    evt->physical_port);
 	}
 }
@@ -479,7 +488,7 @@
 {
 	void __iomem *db = ctrl->db + MPU_MAILBOX_DB_OFFSET;
 	struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
-	int wait = 0;
+	uint32_t wait = 0;
 	u32 ready;
 
 	do {
@@ -527,6 +536,10 @@
 	struct be_mcc_compl *compl = &mbox->compl;
 	struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
 
+	status = be_mbox_db_ready_wait(ctrl);
+	if (status)
+		return status;
+
 	val &= ~MPU_MAILBOX_DB_RDY_MASK;
 	val |= MPU_MAILBOX_DB_HI_MASK;
 	val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2;
@@ -580,6 +593,10 @@
 	struct be_mcc_compl *compl = &mbox->compl;
 	struct be_ctrl_info *ctrl = &phba->ctrl;
 
+	status = be_mbox_db_ready_wait(ctrl);
+	if (status)
+		return status;
+
 	val |= MPU_MAILBOX_DB_HI_MASK;
 	/* at bits 2 - 31 place mbox dma addr msb bits 34 - 63 */
 	val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2;
@@ -732,6 +749,16 @@
 	return status;
 }
 
+/**
+ * be_cmd_fw_initialize()- Initialize FW
+ * @ctrl: Pointer to function control structure
+ *
+ * Send FW initialize pattern for the function.
+ *
+ * return
+ * Success: 0
+ * Failure: Non-Zero value
+ **/
 int be_cmd_fw_initialize(struct be_ctrl_info *ctrl)
 {
 	struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
@@ -762,6 +789,47 @@
 	return status;
 }
 
+/**
+ * be_cmd_fw_uninit()- Uinitialize FW
+ * @ctrl: Pointer to function control structure
+ *
+ * Send FW uninitialize pattern for the function
+ *
+ * return
+ * Success: 0
+ * Failure: Non-Zero value
+ **/
+int be_cmd_fw_uninit(struct be_ctrl_info *ctrl)
+{
+	struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
+	struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
+	int status;
+	u8 *endian_check;
+
+	spin_lock(&ctrl->mbox_lock);
+	memset(wrb, 0, sizeof(*wrb));
+
+	endian_check = (u8 *) wrb;
+	*endian_check++ = 0xFF;
+	*endian_check++ = 0xAA;
+	*endian_check++ = 0xBB;
+	*endian_check++ = 0xFF;
+	*endian_check++ = 0xFF;
+	*endian_check++ = 0xCC;
+	*endian_check++ = 0xDD;
+	*endian_check = 0xFF;
+
+	be_dws_cpu_to_le(wrb, sizeof(*wrb));
+
+	status = be_mbox_notify(ctrl);
+	if (status)
+		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+			    "BC_%d : be_cmd_fw_uninit Failed\n");
+
+	spin_unlock(&ctrl->mbox_lock);
+	return status;
+}
+
 int beiscsi_cmd_cq_create(struct be_ctrl_info *ctrl,
 			  struct be_queue_info *cq, struct be_queue_info *eq,
 			  bool sol_evts, bool no_delay, int coalesce_wm)
@@ -783,20 +851,7 @@
 			OPCODE_COMMON_CQ_CREATE, sizeof(*req));
 
 	req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
-	if (chip_skh_r(ctrl->pdev)) {
-		req->hdr.version = MBX_CMD_VER2;
-		req->page_size = 1;
-		AMAP_SET_BITS(struct amap_cq_context_v2, coalescwm,
-			      ctxt, coalesce_wm);
-		AMAP_SET_BITS(struct amap_cq_context_v2, nodelay,
-			      ctxt, no_delay);
-		AMAP_SET_BITS(struct amap_cq_context_v2, count, ctxt,
-			      __ilog2_u32(cq->len / 256));
-		AMAP_SET_BITS(struct amap_cq_context_v2, valid, ctxt, 1);
-		AMAP_SET_BITS(struct amap_cq_context_v2, eventable, ctxt, 1);
-		AMAP_SET_BITS(struct amap_cq_context_v2, eqid, ctxt, eq->id);
-		AMAP_SET_BITS(struct amap_cq_context_v2, armed, ctxt, 1);
-	} else {
+	if (is_chip_be2_be3r(phba)) {
 		AMAP_SET_BITS(struct amap_cq_context, coalescwm,
 			      ctxt, coalesce_wm);
 		AMAP_SET_BITS(struct amap_cq_context, nodelay, ctxt, no_delay);
@@ -809,6 +864,19 @@
 		AMAP_SET_BITS(struct amap_cq_context, armed, ctxt, 1);
 		AMAP_SET_BITS(struct amap_cq_context, func, ctxt,
 			      PCI_FUNC(ctrl->pdev->devfn));
+	} else {
+		req->hdr.version = MBX_CMD_VER2;
+		req->page_size = 1;
+		AMAP_SET_BITS(struct amap_cq_context_v2, coalescwm,
+			      ctxt, coalesce_wm);
+		AMAP_SET_BITS(struct amap_cq_context_v2, nodelay,
+			      ctxt, no_delay);
+		AMAP_SET_BITS(struct amap_cq_context_v2, count, ctxt,
+			      __ilog2_u32(cq->len / 256));
+		AMAP_SET_BITS(struct amap_cq_context_v2, valid, ctxt, 1);
+		AMAP_SET_BITS(struct amap_cq_context_v2, eventable, ctxt, 1);
+		AMAP_SET_BITS(struct amap_cq_context_v2, eqid, ctxt, eq->id);
+		AMAP_SET_BITS(struct amap_cq_context_v2, armed, ctxt, 1);
 	}
 
 	be_dws_cpu_to_le(ctxt, sizeof(req->context));
@@ -949,6 +1017,7 @@
 	struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
 	struct be_defq_create_req *req = embedded_payload(wrb);
 	struct be_dma_mem *q_mem = &dq->dma_mem;
+	struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
 	void *ctxt = &req->context;
 	int status;
 
@@ -961,17 +1030,36 @@
 			   OPCODE_COMMON_ISCSI_DEFQ_CREATE, sizeof(*req));
 
 	req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
-	AMAP_SET_BITS(struct amap_be_default_pdu_context, rx_pdid, ctxt, 0);
-	AMAP_SET_BITS(struct amap_be_default_pdu_context, rx_pdid_valid, ctxt,
-		      1);
-	AMAP_SET_BITS(struct amap_be_default_pdu_context, pci_func_id, ctxt,
-		      PCI_FUNC(ctrl->pdev->devfn));
-	AMAP_SET_BITS(struct amap_be_default_pdu_context, ring_size, ctxt,
-		      be_encoded_q_len(length / sizeof(struct phys_addr)));
-	AMAP_SET_BITS(struct amap_be_default_pdu_context, default_buffer_size,
-		      ctxt, entry_size);
-	AMAP_SET_BITS(struct amap_be_default_pdu_context, cq_id_recv, ctxt,
-		      cq->id);
+
+	if (is_chip_be2_be3r(phba)) {
+		AMAP_SET_BITS(struct amap_be_default_pdu_context,
+			      rx_pdid, ctxt, 0);
+		AMAP_SET_BITS(struct amap_be_default_pdu_context,
+			      rx_pdid_valid, ctxt, 1);
+		AMAP_SET_BITS(struct amap_be_default_pdu_context,
+			      pci_func_id, ctxt, PCI_FUNC(ctrl->pdev->devfn));
+		AMAP_SET_BITS(struct amap_be_default_pdu_context,
+			      ring_size, ctxt,
+			      be_encoded_q_len(length /
+			      sizeof(struct phys_addr)));
+		AMAP_SET_BITS(struct amap_be_default_pdu_context,
+			      default_buffer_size, ctxt, entry_size);
+		AMAP_SET_BITS(struct amap_be_default_pdu_context,
+			      cq_id_recv, ctxt,	cq->id);
+	} else {
+		AMAP_SET_BITS(struct amap_default_pdu_context_ext,
+			      rx_pdid, ctxt, 0);
+		AMAP_SET_BITS(struct amap_default_pdu_context_ext,
+			      rx_pdid_valid, ctxt, 1);
+		AMAP_SET_BITS(struct amap_default_pdu_context_ext,
+			      ring_size, ctxt,
+			      be_encoded_q_len(length /
+			      sizeof(struct phys_addr)));
+		AMAP_SET_BITS(struct amap_default_pdu_context_ext,
+			      default_buffer_size, ctxt, entry_size);
+		AMAP_SET_BITS(struct amap_default_pdu_context_ext,
+			      cq_id_recv, ctxt, cq->id);
+	}
 
 	be_dws_cpu_to_le(ctxt, sizeof(req->context));
 
diff --git a/drivers/scsi/be2iscsi/be_cmds.h b/drivers/scsi/be2iscsi/be_cmds.h
index 23397d5..9907308 100644
--- a/drivers/scsi/be2iscsi/be_cmds.h
+++ b/drivers/scsi/be2iscsi/be_cmds.h
@@ -1,5 +1,5 @@
 /**
- * Copyright (C) 2005 - 2012 Emulex
+ * Copyright (C) 2005 - 2013 Emulex
  * All rights reserved.
  *
  * This program is free software; you can redistribute it and/or
@@ -52,6 +52,10 @@
 
 /* Completion Status */
 #define MCC_STATUS_SUCCESS 0x0
+#define MCC_STATUS_FAILED 0x1
+#define MCC_STATUS_ILLEGAL_REQUEST 0x2
+#define MCC_STATUS_ILLEGAL_FIELD 0x3
+#define MCC_STATUS_INSUFFICIENT_BUFFER 0x4
 
 #define CQE_STATUS_COMPL_MASK 0xFFFF
 #define CQE_STATUS_COMPL_SHIFT 0	/* bits 0 - 15 */
@@ -118,7 +122,8 @@
 
 enum {
 	ASYNC_EVENT_LINK_DOWN = 0x0,
-	ASYNC_EVENT_LINK_UP = 0x1
+	ASYNC_EVENT_LINK_UP = 0x1,
+	ASYNC_EVENT_LOGICAL = 0x2
 };
 
 /**
@@ -130,6 +135,9 @@
 	u8 port_link_status;
 	u8 port_duplex;
 	u8 port_speed;
+#define BEISCSI_PHY_LINK_FAULT_NONE	0x00
+#define BEISCSI_PHY_LINK_FAULT_LOCAL	0x01
+#define BEISCSI_PHY_LINK_FAULT_REMOTE	0x02
 	u8 port_fault;
 	u8 rsvd0[7];
 	struct be_async_event_trailer trailer;
@@ -697,6 +705,7 @@
 			uint32_t tag, struct be_mcc_wrb **wrb, void *cmd_va);
 /*ISCSI Functuions */
 int be_cmd_fw_initialize(struct be_ctrl_info *ctrl);
+int be_cmd_fw_uninit(struct be_ctrl_info *ctrl);
 
 struct be_mcc_wrb *wrb_from_mbox(struct be_dma_mem *mbox_mem);
 struct be_mcc_wrb *wrb_from_mccq(struct beiscsi_hba *phba);
@@ -751,6 +760,18 @@
 	u8 rsvd4[32];		/* dword 3 */
 } __packed;
 
+struct amap_default_pdu_context_ext {
+	u8 rsvd0[16];   /* dword 0 */
+	u8 ring_size[4];    /* dword 0 */
+	u8 rsvd1[12];   /* dword 0 */
+	u8 rsvd2[22];   /* dword 1 */
+	u8 rx_pdid[9];  /* dword 1 */
+	u8 rx_pdid_valid;   /* dword 1 */
+	u8 default_buffer_size[16]; /* dword 2 */
+	u8 cq_id_recv[16];  /* dword 2 */
+	u8 rsvd3[32];   /* dword 3 */
+} __packed;
+
 struct be_defq_create_req {
 	struct be_cmd_req_hdr hdr;
 	u16 num_pages;
@@ -896,7 +917,7 @@
  * stack to notify the
  * controller of a posted Work Request Block
  */
-#define DB_WRB_POST_CID_MASK		0x3FF	/* bits 0 - 9 */
+#define DB_WRB_POST_CID_MASK		0xFFFF	/* bits 0 - 16 */
 #define DB_DEF_PDU_WRB_INDEX_MASK	0xFF	/* bits 0 - 9 */
 
 #define DB_DEF_PDU_WRB_INDEX_SHIFT	16
diff --git a/drivers/scsi/be2iscsi/be_iscsi.c b/drivers/scsi/be2iscsi/be_iscsi.c
index 9014690..ef36be00 100644
--- a/drivers/scsi/be2iscsi/be_iscsi.c
+++ b/drivers/scsi/be2iscsi/be_iscsi.c
@@ -1,5 +1,5 @@
 /**
- * Copyright (C) 2005 - 2012 Emulex
+ * Copyright (C) 2005 - 2013 Emulex
  * All rights reserved.
  *
  * This program is free software; you can redistribute it and/or
@@ -161,7 +161,9 @@
 				struct beiscsi_conn *beiscsi_conn,
 				unsigned int cid)
 {
-	if (phba->conn_table[cid]) {
+	uint16_t cri_index = BE_GET_CRI_FROM_CID(cid);
+
+	if (phba->conn_table[cri_index]) {
 		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
 			    "BS_%d : Connection table already occupied. Detected clash\n");
 
@@ -169,9 +171,9 @@
 	} else {
 		beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
 			    "BS_%d : phba->conn_table[%d]=%p(beiscsi_conn)\n",
-			    cid, beiscsi_conn);
+			    cri_index, beiscsi_conn);
 
-		phba->conn_table[cid] = beiscsi_conn;
+		phba->conn_table[cri_index] = beiscsi_conn;
 	}
 	return 0;
 }
@@ -990,9 +992,27 @@
 static void beiscsi_free_ep(struct beiscsi_endpoint *beiscsi_ep)
 {
 	struct beiscsi_hba *phba = beiscsi_ep->phba;
+	struct beiscsi_conn *beiscsi_conn;
 
 	beiscsi_put_cid(phba, beiscsi_ep->ep_cid);
 	beiscsi_ep->phba = NULL;
+	phba->ep_array[BE_GET_CRI_FROM_CID
+		       (beiscsi_ep->ep_cid)] = NULL;
+
+	/**
+	 * Check if any connection resource allocated by driver
+	 * is to be freed.This case occurs when target redirection
+	 * or connection retry is done.
+	 **/
+	if (!beiscsi_ep->conn)
+		return;
+
+	beiscsi_conn = beiscsi_ep->conn;
+	if (beiscsi_conn->login_in_progress) {
+		beiscsi_free_mgmt_task_handles(beiscsi_conn,
+					       beiscsi_conn->task);
+		beiscsi_conn->login_in_progress = 0;
+	}
 }
 
 /**
@@ -1009,7 +1029,6 @@
 {
 	struct beiscsi_endpoint *beiscsi_ep = ep->dd_data;
 	struct beiscsi_hba *phba = beiscsi_ep->phba;
-	struct be_mcc_wrb *wrb;
 	struct tcp_connect_and_offload_out *ptcpcnct_out;
 	struct be_dma_mem nonemb_cmd;
 	unsigned int tag;
@@ -1029,15 +1048,8 @@
 		    "BS_%d : In beiscsi_open_conn, ep_cid=%d\n",
 		    beiscsi_ep->ep_cid);
 
-	phba->ep_array[beiscsi_ep->ep_cid -
-		       phba->fw_config.iscsi_cid_start] = ep;
-	if (beiscsi_ep->ep_cid > (phba->fw_config.iscsi_cid_start +
-				  phba->params.cxns_per_ctrl * 2)) {
-
-		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
-			    "BS_%d : Failed in allocate iscsi cid\n");
-		goto free_ep;
-	}
+	phba->ep_array[BE_GET_CRI_FROM_CID
+		       (beiscsi_ep->ep_cid)] = ep;
 
 	beiscsi_ep->cid_vld = 0;
 	nonemb_cmd.va = pci_alloc_consistent(phba->ctrl.pdev,
@@ -1049,24 +1061,24 @@
 			    "BS_%d : Failed to allocate memory for"
 			    " mgmt_open_connection\n");
 
-		beiscsi_put_cid(phba, beiscsi_ep->ep_cid);
+		beiscsi_free_ep(beiscsi_ep);
 		return -ENOMEM;
 	}
 	nonemb_cmd.size = sizeof(struct tcp_connect_and_offload_in);
 	memset(nonemb_cmd.va, 0, nonemb_cmd.size);
 	tag = mgmt_open_connection(phba, dst_addr, beiscsi_ep, &nonemb_cmd);
-	if (!tag) {
+	if (tag <= 0) {
 		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
 			    "BS_%d : mgmt_open_connection Failed for cid=%d\n",
 			    beiscsi_ep->ep_cid);
 
-		beiscsi_put_cid(phba, beiscsi_ep->ep_cid);
 		pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
 				    nonemb_cmd.va, nonemb_cmd.dma);
+		beiscsi_free_ep(beiscsi_ep);
 		return -EAGAIN;
 	}
 
-	ret = beiscsi_mccq_compl(phba, tag, &wrb, NULL);
+	ret = beiscsi_mccq_compl(phba, tag, NULL, nonemb_cmd.va);
 	if (ret) {
 		beiscsi_log(phba, KERN_ERR,
 			    BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
@@ -1074,10 +1086,11 @@
 
 		pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
 			    nonemb_cmd.va, nonemb_cmd.dma);
-		goto free_ep;
+		beiscsi_free_ep(beiscsi_ep);
+		return -EBUSY;
 	}
 
-	ptcpcnct_out = embedded_payload(wrb);
+	ptcpcnct_out = (struct tcp_connect_and_offload_out *)nonemb_cmd.va;
 	beiscsi_ep = ep->dd_data;
 	beiscsi_ep->fw_handle = ptcpcnct_out->connection_handle;
 	beiscsi_ep->cid_vld = 1;
@@ -1087,10 +1100,6 @@
 	pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
 			    nonemb_cmd.va, nonemb_cmd.dma);
 	return 0;
-
-free_ep:
-	beiscsi_free_ep(beiscsi_ep);
-	return -EBUSY;
 }
 
 /**
@@ -1119,6 +1128,13 @@
 		return ERR_PTR(ret);
 	}
 
+	if (beiscsi_error(phba)) {
+		ret = -EIO;
+		beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG,
+			    "BS_%d : The FW state Not Stable!!!\n");
+		return ERR_PTR(ret);
+	}
+
 	if (phba->state != BE_ADAPTER_UP) {
 		ret = -EBUSY;
 		beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG,
@@ -1201,8 +1217,10 @@
 static int beiscsi_unbind_conn_to_cid(struct beiscsi_hba *phba,
 				      unsigned int cid)
 {
-	if (phba->conn_table[cid])
-		phba->conn_table[cid] = NULL;
+	uint16_t cri_index = BE_GET_CRI_FROM_CID(cid);
+
+	if (phba->conn_table[cri_index])
+		phba->conn_table[cri_index] = NULL;
 	else {
 		beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
 			    "BS_%d : Connection table Not occupied.\n");
diff --git a/drivers/scsi/be2iscsi/be_iscsi.h b/drivers/scsi/be2iscsi/be_iscsi.h
index 38eab72..31ddc84 100644
--- a/drivers/scsi/be2iscsi/be_iscsi.h
+++ b/drivers/scsi/be2iscsi/be_iscsi.h
@@ -1,5 +1,5 @@
 /**
- * Copyright (C) 2005 - 2012 Emulex
+ * Copyright (C) 2005 - 2013 Emulex
  * All rights reserved.
  *
  * This program is free software; you can redistribute it and/or
diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
index 4e2733d..d24a286 100644
--- a/drivers/scsi/be2iscsi/be_main.c
+++ b/drivers/scsi/be2iscsi/be_main.c
@@ -1,5 +1,5 @@
 /**
- * Copyright (C) 2005 - 2012 Emulex
+ * Copyright (C) 2005 - 2013 Emulex
  * All rights reserved.
  *
  * This program is free software; you can redistribute it and/or
@@ -153,10 +153,14 @@
 
 DEVICE_ATTR(beiscsi_drvr_ver, S_IRUGO, beiscsi_drvr_ver_disp, NULL);
 DEVICE_ATTR(beiscsi_adapter_family, S_IRUGO, beiscsi_adap_family_disp, NULL);
+DEVICE_ATTR(beiscsi_fw_ver, S_IRUGO, beiscsi_fw_ver_disp, NULL);
+DEVICE_ATTR(beiscsi_active_cid_count, S_IRUGO, beiscsi_active_cid_disp, NULL);
 struct device_attribute *beiscsi_attrs[] = {
 	&dev_attr_beiscsi_log_enable,
 	&dev_attr_beiscsi_drvr_ver,
 	&dev_attr_beiscsi_adapter_family,
+	&dev_attr_beiscsi_fw_ver,
+	&dev_attr_beiscsi_active_cid_count,
 	NULL,
 };
 
@@ -702,7 +706,7 @@
 				    + BE2_TMFS
 				    + BE2_NOPOUT_REQ));
 	phba->params.cxns_per_ctrl = phba->fw_config.iscsi_cid_count;
-	phba->params.asyncpdus_per_ctrl = phba->fw_config.iscsi_cid_count * 2;
+	phba->params.asyncpdus_per_ctrl = phba->fw_config.iscsi_cid_count;
 	phba->params.icds_per_ctrl = phba->fw_config.iscsi_icd_count;
 	phba->params.num_sge_per_io = BE2_SGE;
 	phba->params.defpdu_hdr_sz = BE2_DEFPDU_HDR_SZ;
@@ -1032,7 +1036,6 @@
 static unsigned int
 beiscsi_process_async_pdu(struct beiscsi_conn *beiscsi_conn,
 			  struct beiscsi_hba *phba,
-			  unsigned short cid,
 			  struct pdu_base *ppdu,
 			  unsigned long pdu_len,
 			  void *pbuffer, unsigned long buf_len)
@@ -1144,9 +1147,10 @@
 	struct hwi_wrb_context *pwrb_context;
 	struct hwi_controller *phwi_ctrlr;
 	struct wrb_handle *pwrb_handle, *pwrb_handle_tmp;
+	uint16_t cri_index = BE_GET_CRI_FROM_CID(cid);
 
 	phwi_ctrlr = phba->phwi_ctrlr;
-	pwrb_context = &phwi_ctrlr->wrb_context[cid];
+	pwrb_context = &phwi_ctrlr->wrb_context[cri_index];
 	if (pwrb_context->wrb_handles_available >= 2) {
 		pwrb_handle = pwrb_context->pwrb_handle_base[
 					    pwrb_context->alloc_index];
@@ -1322,8 +1326,9 @@
 	hdr->t2retain = 0;
 	hdr->flags = csol_cqe->i_flags;
 	hdr->response = csol_cqe->i_resp;
-	hdr->exp_cmdsn = csol_cqe->exp_cmdsn;
-	hdr->max_cmdsn = (csol_cqe->exp_cmdsn + csol_cqe->cmd_wnd - 1);
+	hdr->exp_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn);
+	hdr->max_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn +
+				     csol_cqe->cmd_wnd - 1);
 
 	hdr->dlength[0] = 0;
 	hdr->dlength[1] = 0;
@@ -1346,9 +1351,9 @@
 	hdr->opcode = ISCSI_OP_SCSI_TMFUNC_RSP;
 	hdr->flags = csol_cqe->i_flags;
 	hdr->response = csol_cqe->i_resp;
-	hdr->exp_cmdsn = csol_cqe->exp_cmdsn;
-	hdr->max_cmdsn = (csol_cqe->exp_cmdsn +
-			  csol_cqe->cmd_wnd - 1);
+	hdr->exp_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn);
+	hdr->max_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn +
+				     csol_cqe->cmd_wnd - 1);
 
 	hdr->itt = io_task->libiscsi_itt;
 	__iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0);
@@ -1363,35 +1368,29 @@
 	struct hwi_controller *phwi_ctrlr;
 	struct iscsi_task *task;
 	struct beiscsi_io_task *io_task;
-	struct iscsi_conn *conn = beiscsi_conn->conn;
-	struct iscsi_session *session = conn->session;
-	uint16_t wrb_index, cid;
+	uint16_t wrb_index, cid, cri_index;
 
 	phwi_ctrlr = phba->phwi_ctrlr;
-	if (chip_skh_r(phba->pcidev)) {
-		wrb_index = AMAP_GET_BITS(struct amap_it_dmsg_cqe_v2,
-					  wrb_idx, psol);
-		cid = AMAP_GET_BITS(struct amap_it_dmsg_cqe_v2,
-				    cid, psol);
-	} else {
+	if (is_chip_be2_be3r(phba)) {
 		wrb_index = AMAP_GET_BITS(struct amap_it_dmsg_cqe,
 					  wrb_idx, psol);
 		cid = AMAP_GET_BITS(struct amap_it_dmsg_cqe,
 				    cid, psol);
+	} else {
+		wrb_index = AMAP_GET_BITS(struct amap_it_dmsg_cqe_v2,
+					  wrb_idx, psol);
+		cid = AMAP_GET_BITS(struct amap_it_dmsg_cqe_v2,
+				    cid, psol);
 	}
 
-	pwrb_context = &phwi_ctrlr->wrb_context[
-			cid - phba->fw_config.iscsi_cid_start];
+	cri_index = BE_GET_CRI_FROM_CID(cid);
+	pwrb_context = &phwi_ctrlr->wrb_context[cri_index];
 	pwrb_handle = pwrb_context->pwrb_handle_basestd[wrb_index];
 	task = pwrb_handle->pio_handle;
 
 	io_task = task->dd_data;
-	spin_lock_bh(&phba->mgmt_sgl_lock);
-	free_mgmt_sgl_handle(phba, io_task->psgl_handle);
-	spin_unlock_bh(&phba->mgmt_sgl_lock);
-	spin_lock_bh(&session->lock);
-	free_wrb_handle(phba, pwrb_context, pwrb_handle);
-	spin_unlock_bh(&session->lock);
+	memset(io_task->pwrb_handle->pwrb, 0, sizeof(struct iscsi_wrb));
+	iscsi_put_task(task);
 }
 
 static void
@@ -1406,8 +1405,8 @@
 	hdr = (struct iscsi_nopin *)task->hdr;
 	hdr->flags = csol_cqe->i_flags;
 	hdr->exp_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn);
-	hdr->max_cmdsn = be32_to_cpu(hdr->exp_cmdsn +
-			 csol_cqe->cmd_wnd - 1);
+	hdr->max_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn +
+				     csol_cqe->cmd_wnd - 1);
 
 	hdr->opcode = ISCSI_OP_NOOP_IN;
 	hdr->itt = io_task->libiscsi_itt;
@@ -1418,34 +1417,7 @@
 		struct sol_cqe *psol,
 		struct common_sol_cqe *csol_cqe)
 {
-	if (chip_skh_r(phba->pcidev)) {
-		csol_cqe->exp_cmdsn = AMAP_GET_BITS(struct amap_sol_cqe_v2,
-						    i_exp_cmd_sn, psol);
-		csol_cqe->res_cnt = AMAP_GET_BITS(struct amap_sol_cqe_v2,
-						  i_res_cnt, psol);
-		csol_cqe->wrb_index = AMAP_GET_BITS(struct amap_sol_cqe_v2,
-						    wrb_index, psol);
-		csol_cqe->cid = AMAP_GET_BITS(struct amap_sol_cqe_v2,
-					      cid, psol);
-		csol_cqe->hw_sts = AMAP_GET_BITS(struct amap_sol_cqe_v2,
-						 hw_sts, psol);
-		csol_cqe->cmd_wnd = AMAP_GET_BITS(struct amap_sol_cqe,
-						  i_cmd_wnd, psol);
-		if (AMAP_GET_BITS(struct amap_sol_cqe_v2,
-				  cmd_cmpl, psol))
-			csol_cqe->i_sts = AMAP_GET_BITS(struct amap_sol_cqe_v2,
-							i_sts, psol);
-		else
-			csol_cqe->i_resp = AMAP_GET_BITS(struct amap_sol_cqe_v2,
-							 i_sts, psol);
-		if (AMAP_GET_BITS(struct amap_sol_cqe_v2,
-				  u, psol))
-			csol_cqe->i_flags = ISCSI_FLAG_CMD_UNDERFLOW;
-
-		if (AMAP_GET_BITS(struct amap_sol_cqe_v2,
-				  o, psol))
-			csol_cqe->i_flags |= ISCSI_FLAG_CMD_OVERFLOW;
-	} else {
+	if (is_chip_be2_be3r(phba)) {
 		csol_cqe->exp_cmdsn = AMAP_GET_BITS(struct amap_sol_cqe,
 						    i_exp_cmd_sn, psol);
 		csol_cqe->res_cnt = AMAP_GET_BITS(struct amap_sol_cqe,
@@ -1464,6 +1436,33 @@
 						i_sts, psol);
 		csol_cqe->i_flags = AMAP_GET_BITS(struct amap_sol_cqe,
 						  i_flags, psol);
+	} else {
+		csol_cqe->exp_cmdsn = AMAP_GET_BITS(struct amap_sol_cqe_v2,
+						    i_exp_cmd_sn, psol);
+		csol_cqe->res_cnt = AMAP_GET_BITS(struct amap_sol_cqe_v2,
+						  i_res_cnt, psol);
+		csol_cqe->wrb_index = AMAP_GET_BITS(struct amap_sol_cqe_v2,
+						    wrb_index, psol);
+		csol_cqe->cid = AMAP_GET_BITS(struct amap_sol_cqe_v2,
+					      cid, psol);
+		csol_cqe->hw_sts = AMAP_GET_BITS(struct amap_sol_cqe_v2,
+						 hw_sts, psol);
+		csol_cqe->cmd_wnd = AMAP_GET_BITS(struct amap_sol_cqe_v2,
+						  i_cmd_wnd, psol);
+		if (AMAP_GET_BITS(struct amap_sol_cqe_v2,
+				  cmd_cmpl, psol))
+			csol_cqe->i_sts = AMAP_GET_BITS(struct amap_sol_cqe_v2,
+							i_sts, psol);
+		else
+			csol_cqe->i_resp = AMAP_GET_BITS(struct amap_sol_cqe_v2,
+							 i_sts, psol);
+		if (AMAP_GET_BITS(struct amap_sol_cqe_v2,
+				  u, psol))
+			csol_cqe->i_flags = ISCSI_FLAG_CMD_UNDERFLOW;
+
+		if (AMAP_GET_BITS(struct amap_sol_cqe_v2,
+				  o, psol))
+			csol_cqe->i_flags |= ISCSI_FLAG_CMD_OVERFLOW;
 	}
 }
 
@@ -1480,14 +1479,15 @@
 	struct iscsi_conn *conn = beiscsi_conn->conn;
 	struct iscsi_session *session = conn->session;
 	struct common_sol_cqe csol_cqe = {0};
+	uint16_t cri_index = 0;
 
 	phwi_ctrlr = phba->phwi_ctrlr;
 
 	/* Copy the elements to a common structure */
 	adapter_get_sol_cqe(phba, psol, &csol_cqe);
 
-	pwrb_context = &phwi_ctrlr->wrb_context[
-			csol_cqe.cid - phba->fw_config.iscsi_cid_start];
+	cri_index = BE_GET_CRI_FROM_CID(csol_cqe.cid);
+	pwrb_context = &phwi_ctrlr->wrb_context[cri_index];
 
 	pwrb_handle = pwrb_context->pwrb_handle_basestd[
 		      csol_cqe.wrb_index];
@@ -1561,16 +1561,16 @@
 	unsigned char is_header = 0;
 	unsigned int index, dpl;
 
-	if (chip_skh_r(phba->pcidev)) {
-		dpl = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe_v2,
-				    dpl, pdpdu_cqe);
-		index = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe_v2,
-				      index, pdpdu_cqe);
-	} else {
+	if (is_chip_be2_be3r(phba)) {
 		dpl = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe,
 				    dpl, pdpdu_cqe);
 		index = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe,
 				      index, pdpdu_cqe);
+	} else {
+		dpl = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe_v2,
+				    dpl, pdpdu_cqe);
+		index = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe_v2,
+				      index, pdpdu_cqe);
 	}
 
 	phys_addr.u.a32.address_lo =
@@ -1613,8 +1613,8 @@
 
 	WARN_ON(!pasync_handle);
 
-	pasync_handle->cri = (unsigned short)beiscsi_conn->beiscsi_conn_cid -
-					     phba->fw_config.iscsi_cid_start;
+	pasync_handle->cri =
+			BE_GET_CRI_FROM_CID(beiscsi_conn->beiscsi_conn_cid);
 	pasync_handle->is_header = is_header;
 	pasync_handle->buffer_len = dpl;
 	*pcq_index = index;
@@ -1856,8 +1856,6 @@
 	}
 
 	status = beiscsi_process_async_pdu(beiscsi_conn, phba,
-					   (beiscsi_conn->beiscsi_conn_cid -
-					    phba->fw_config.iscsi_cid_start),
 					    phdr, hdr_len, pfirst_buffer,
 					    offset);
 
@@ -2011,6 +2009,7 @@
 	unsigned int num_processed = 0;
 	unsigned int tot_nump = 0;
 	unsigned short code = 0, cid = 0;
+	uint16_t cri_index = 0;
 	struct beiscsi_conn *beiscsi_conn;
 	struct beiscsi_endpoint *beiscsi_ep;
 	struct iscsi_endpoint *ep;
@@ -2028,7 +2027,9 @@
 			 32] & CQE_CODE_MASK);
 
 		 /* Get the CID */
-		if (chip_skh_r(phba->pcidev)) {
+		if (is_chip_be2_be3r(phba)) {
+			cid = AMAP_GET_BITS(struct amap_sol_cqe, cid, sol);
+		} else {
 			if ((code == DRIVERMSG_NOTIFY) ||
 			    (code == UNSOL_HDR_NOTIFY) ||
 			    (code == UNSOL_DATA_NOTIFY))
@@ -2038,10 +2039,10 @@
 			 else
 				 cid = AMAP_GET_BITS(struct amap_sol_cqe_v2,
 						     cid, sol);
-		   } else
-			 cid = AMAP_GET_BITS(struct amap_sol_cqe, cid, sol);
+		}
 
-		ep = phba->ep_array[cid - phba->fw_config.iscsi_cid_start];
+		cri_index = BE_GET_CRI_FROM_CID(cid);
+		ep = phba->ep_array[cri_index];
 		beiscsi_ep = ep->dd_data;
 		beiscsi_conn = beiscsi_ep->conn;
 
@@ -2191,7 +2192,7 @@
 
 static int be_iopoll(struct blk_iopoll *iop, int budget)
 {
-	static unsigned int ret;
+	unsigned int ret;
 	struct beiscsi_hba *phba;
 	struct be_eq_obj *pbe_eq;
 
@@ -2416,11 +2417,11 @@
 		/* Check for the data_count */
 		dsp_value = (task->data_count) ? 1 : 0;
 
-		if (chip_skh_r(phba->pcidev))
-			AMAP_SET_BITS(struct amap_iscsi_wrb_v2, dsp,
+		if (is_chip_be2_be3r(phba))
+			AMAP_SET_BITS(struct amap_iscsi_wrb, dsp,
 				      pwrb, dsp_value);
 		else
-			AMAP_SET_BITS(struct amap_iscsi_wrb, dsp,
+			AMAP_SET_BITS(struct amap_iscsi_wrb_v2, dsp,
 				      pwrb, dsp_value);
 
 		/* Map addr only if there is data_count */
@@ -2538,8 +2539,9 @@
 
 static int beiscsi_alloc_mem(struct beiscsi_hba *phba)
 {
-	struct be_mem_descriptor *mem_descr;
 	dma_addr_t bus_add;
+	struct hwi_controller *phwi_ctrlr;
+	struct be_mem_descriptor *mem_descr;
 	struct mem_array *mem_arr, *mem_arr_orig;
 	unsigned int i, j, alloc_size, curr_alloc_size;
 
@@ -2547,9 +2549,18 @@
 	if (!phba->phwi_ctrlr)
 		return -ENOMEM;
 
+	/* Allocate memory for wrb_context */
+	phwi_ctrlr = phba->phwi_ctrlr;
+	phwi_ctrlr->wrb_context = kzalloc(sizeof(struct hwi_wrb_context) *
+					  phba->params.cxns_per_ctrl,
+					  GFP_KERNEL);
+	if (!phwi_ctrlr->wrb_context)
+		return -ENOMEM;
+
 	phba->init_mem = kcalloc(SE_MEM_MAX, sizeof(*mem_descr),
 				 GFP_KERNEL);
 	if (!phba->init_mem) {
+		kfree(phwi_ctrlr->wrb_context);
 		kfree(phba->phwi_ctrlr);
 		return -ENOMEM;
 	}
@@ -2558,6 +2569,7 @@
 			       GFP_KERNEL);
 	if (!mem_arr_orig) {
 		kfree(phba->init_mem);
+		kfree(phwi_ctrlr->wrb_context);
 		kfree(phba->phwi_ctrlr);
 		return -ENOMEM;
 	}
@@ -2628,6 +2640,7 @@
 	}
 	kfree(mem_arr_orig);
 	kfree(phba->init_mem);
+	kfree(phba->phwi_ctrlr->wrb_context);
 	kfree(phba->phwi_ctrlr);
 	return -ENOMEM;
 }
@@ -2666,6 +2679,7 @@
 static int beiscsi_init_wrb_handle(struct beiscsi_hba *phba)
 {
 	struct be_mem_descriptor *mem_descr_wrbh, *mem_descr_wrb;
+	struct hwi_context_memory *phwi_ctxt;
 	struct wrb_handle *pwrb_handle = NULL;
 	struct hwi_controller *phwi_ctrlr;
 	struct hwi_wrb_context *pwrb_context;
@@ -2680,7 +2694,18 @@
 	mem_descr_wrb += HWI_MEM_WRB;
 	phwi_ctrlr = phba->phwi_ctrlr;
 
-	for (index = 0; index < phba->params.cxns_per_ctrl * 2; index += 2) {
+	/* Allocate memory for WRBQ */
+	phwi_ctxt = phwi_ctrlr->phwi_ctxt;
+	phwi_ctxt->be_wrbq = kzalloc(sizeof(struct be_queue_info) *
+				     phba->fw_config.iscsi_cid_count,
+				     GFP_KERNEL);
+	if (!phwi_ctxt->be_wrbq) {
+		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+			    "BM_%d : WRBQ Mem Alloc Failed\n");
+		return -ENOMEM;
+	}
+
+	for (index = 0; index < phba->params.cxns_per_ctrl; index++) {
 		pwrb_context = &phwi_ctrlr->wrb_context[index];
 		pwrb_context->pwrb_handle_base =
 				kzalloc(sizeof(struct wrb_handle *) *
@@ -2723,7 +2748,7 @@
 		}
 	}
 	idx = 0;
-	for (index = 0; index < phba->params.cxns_per_ctrl * 2; index += 2) {
+	for (index = 0; index < phba->params.cxns_per_ctrl; index++) {
 		pwrb_context = &phwi_ctrlr->wrb_context[index];
 		if (!num_cxn_wrb) {
 			pwrb = mem_descr_wrb->mem_array[idx].virtual_address;
@@ -2752,7 +2777,7 @@
 	return -ENOMEM;
 }
 
-static void hwi_init_async_pdu_ctx(struct beiscsi_hba *phba)
+static int hwi_init_async_pdu_ctx(struct beiscsi_hba *phba)
 {
 	struct hwi_controller *phwi_ctrlr;
 	struct hba_parameters *p = &phba->params;
@@ -2770,6 +2795,15 @@
 	pasync_ctx = phwi_ctrlr->phwi_ctxt->pasync_ctx;
 	memset(pasync_ctx, 0, sizeof(*pasync_ctx));
 
+	pasync_ctx->async_entry = kzalloc(sizeof(struct hwi_async_entry) *
+					  phba->fw_config.iscsi_cid_count,
+					  GFP_KERNEL);
+	if (!pasync_ctx->async_entry) {
+		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+			    "BM_%d : hwi_init_async_pdu_ctx Mem Alloc Failed\n");
+		return -ENOMEM;
+	}
+
 	pasync_ctx->num_entries = p->asyncpdus_per_ctrl;
 	pasync_ctx->buffer_size = p->defpdu_hdr_sz;
 
@@ -2934,6 +2968,8 @@
 	pasync_ctx->async_header.ep_read_ptr = -1;
 	pasync_ctx->async_data.host_write_ptr = 0;
 	pasync_ctx->async_data.ep_read_ptr = -1;
+
+	return 0;
 }
 
 static int
@@ -3293,6 +3329,7 @@
 	void *wrb_vaddr;
 	struct be_dma_mem sgl;
 	struct be_mem_descriptor *mem_descr;
+	struct hwi_wrb_context *pwrb_context;
 	int status;
 
 	idx = 0;
@@ -3351,8 +3388,9 @@
 			kfree(pwrb_arr);
 			return status;
 		}
-		phwi_ctrlr->wrb_context[i * 2].cid = phwi_context->be_wrbq[i].
-								   id;
+		pwrb_context = &phwi_ctrlr->wrb_context[i];
+		pwrb_context->cid = phwi_context->be_wrbq[i].id;
+		BE_SET_CID_TO_CRI(i, pwrb_context->cid);
 	}
 	kfree(pwrb_arr);
 	return 0;
@@ -3365,7 +3403,7 @@
 	struct hwi_wrb_context *pwrb_context;
 
 	phwi_ctrlr = phba->phwi_ctrlr;
-	for (index = 0; index < phba->params.cxns_per_ctrl * 2; index += 2) {
+	for (index = 0; index < phba->params.cxns_per_ctrl; index++) {
 		pwrb_context = &phwi_ctrlr->wrb_context[index];
 		kfree(pwrb_context->pwrb_handle_base);
 		kfree(pwrb_context->pwrb_handle_basestd);
@@ -3394,6 +3432,7 @@
 	struct be_ctrl_info *ctrl = &phba->ctrl;
 	struct hwi_controller *phwi_ctrlr;
 	struct hwi_context_memory *phwi_context;
+	struct hwi_async_pdu_context *pasync_ctx;
 	int i, eq_num;
 
 	phwi_ctrlr = phba->phwi_ctrlr;
@@ -3403,6 +3442,7 @@
 		if (q->created)
 			beiscsi_cmd_q_destroy(ctrl, q, QTYPE_WRBQ);
 	}
+	kfree(phwi_context->be_wrbq);
 	free_wrb_handles(phba);
 
 	q = &phwi_context->be_def_hdrq;
@@ -3430,6 +3470,10 @@
 			beiscsi_cmd_q_destroy(ctrl, q, QTYPE_EQ);
 	}
 	be_mcc_queues_destroy(phba);
+
+	pasync_ctx = phwi_ctrlr->phwi_ctxt->pasync_ctx;
+	kfree(pasync_ctx->async_entry);
+	be_cmd_fw_uninit(ctrl);
 }
 
 static int be_mcc_queues_create(struct beiscsi_hba *phba,
@@ -3607,7 +3651,12 @@
 	if (beiscsi_init_wrb_handle(phba))
 		return -ENOMEM;
 
-	hwi_init_async_pdu_ctx(phba);
+	if (hwi_init_async_pdu_ctx(phba)) {
+		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+			    "BM_%d : hwi_init_async_pdu_ctx failed\n");
+		return -ENOMEM;
+	}
+
 	if (hwi_init_port(phba) != 0) {
 		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
 			    "BM_%d : hwi_init_controller failed\n");
@@ -3637,6 +3686,7 @@
 		mem_descr++;
 	}
 	kfree(phba->init_mem);
+	kfree(phba->phwi_ctrlr->wrb_context);
 	kfree(phba->phwi_ctrlr);
 }
 
@@ -3769,7 +3819,7 @@
 
 static int hba_setup_cid_tbls(struct beiscsi_hba *phba)
 {
-	int i, new_cid;
+	int i;
 
 	phba->cid_array = kzalloc(sizeof(void *) * phba->params.cxns_per_ctrl,
 				  GFP_KERNEL);
@@ -3780,19 +3830,33 @@
 		return -ENOMEM;
 	}
 	phba->ep_array = kzalloc(sizeof(struct iscsi_endpoint *) *
-				 phba->params.cxns_per_ctrl * 2, GFP_KERNEL);
+				 phba->params.cxns_per_ctrl, GFP_KERNEL);
 	if (!phba->ep_array) {
 		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
 			    "BM_%d : Failed to allocate memory in "
 			    "hba_setup_cid_tbls\n");
 		kfree(phba->cid_array);
+		phba->cid_array = NULL;
 		return -ENOMEM;
 	}
-	new_cid = phba->fw_config.iscsi_cid_start;
-	for (i = 0; i < phba->params.cxns_per_ctrl; i++) {
-		phba->cid_array[i] = new_cid;
-		new_cid += 2;
+
+	phba->conn_table = kzalloc(sizeof(struct beiscsi_conn *) *
+				   phba->params.cxns_per_ctrl, GFP_KERNEL);
+	if (!phba->conn_table) {
+		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+			    "BM_%d : Failed to allocate memory in"
+			    "hba_setup_cid_tbls\n");
+
+		kfree(phba->cid_array);
+		kfree(phba->ep_array);
+		phba->cid_array = NULL;
+		phba->ep_array = NULL;
+		return -ENOMEM;
 	}
+
+	for (i = 0; i < phba->params.cxns_per_ctrl; i++)
+		phba->cid_array[i] = phba->phwi_ctrlr->wrb_context[i].cid;
+
 	phba->avlbl_cids = phba->params.cxns_per_ctrl;
 	return 0;
 }
@@ -4062,6 +4126,53 @@
 	kfree(phba->eh_sgl_hndl_base);
 	kfree(phba->cid_array);
 	kfree(phba->ep_array);
+	kfree(phba->conn_table);
+}
+
+/**
+ * beiscsi_free_mgmt_task_handles()- Free driver CXN resources
+ * @beiscsi_conn: ptr to the conn to be cleaned up
+ * @task: ptr to iscsi_task resource to be freed.
+ *
+ * Free driver mgmt resources binded to CXN.
+ **/
+void
+beiscsi_free_mgmt_task_handles(struct beiscsi_conn *beiscsi_conn,
+				struct iscsi_task *task)
+{
+	struct beiscsi_io_task *io_task;
+	struct beiscsi_hba *phba = beiscsi_conn->phba;
+	struct hwi_wrb_context *pwrb_context;
+	struct hwi_controller *phwi_ctrlr;
+	uint16_t cri_index = BE_GET_CRI_FROM_CID(
+				beiscsi_conn->beiscsi_conn_cid);
+
+	phwi_ctrlr = phba->phwi_ctrlr;
+	pwrb_context = &phwi_ctrlr->wrb_context[cri_index];
+
+	io_task = task->dd_data;
+
+	if (io_task->pwrb_handle) {
+		memset(io_task->pwrb_handle->pwrb, 0,
+		       sizeof(struct iscsi_wrb));
+		free_wrb_handle(phba, pwrb_context,
+				io_task->pwrb_handle);
+		io_task->pwrb_handle = NULL;
+	}
+
+	if (io_task->psgl_handle) {
+		spin_lock_bh(&phba->mgmt_sgl_lock);
+		free_mgmt_sgl_handle(phba,
+				     io_task->psgl_handle);
+		io_task->psgl_handle = NULL;
+		spin_unlock_bh(&phba->mgmt_sgl_lock);
+	}
+
+	if (io_task->mtask_addr)
+		pci_unmap_single(phba->pcidev,
+				 io_task->mtask_addr,
+				 io_task->mtask_data_count,
+				 PCI_DMA_TODEVICE);
 }
 
 /**
@@ -4078,10 +4189,11 @@
 	struct beiscsi_session *beiscsi_sess = beiscsi_conn->beiscsi_sess;
 	struct hwi_wrb_context *pwrb_context;
 	struct hwi_controller *phwi_ctrlr;
+	uint16_t cri_index = BE_GET_CRI_FROM_CID(
+			     beiscsi_conn->beiscsi_conn_cid);
 
 	phwi_ctrlr = phba->phwi_ctrlr;
-	pwrb_context = &phwi_ctrlr->wrb_context[beiscsi_conn->beiscsi_conn_cid
-			- phba->fw_config.iscsi_cid_start];
+	pwrb_context = &phwi_ctrlr->wrb_context[cri_index];
 
 	if (io_task->cmd_bhs) {
 		pci_pool_free(beiscsi_sess->bhs_pool, io_task->cmd_bhs,
@@ -4103,27 +4215,8 @@
 			io_task->psgl_handle = NULL;
 		}
 	} else {
-		if (!beiscsi_conn->login_in_progress) {
-			if (io_task->pwrb_handle) {
-				free_wrb_handle(phba, pwrb_context,
-						io_task->pwrb_handle);
-				io_task->pwrb_handle = NULL;
-			}
-			if (io_task->psgl_handle) {
-				spin_lock(&phba->mgmt_sgl_lock);
-				free_mgmt_sgl_handle(phba,
-						     io_task->psgl_handle);
-				spin_unlock(&phba->mgmt_sgl_lock);
-				io_task->psgl_handle = NULL;
-			}
-			if (io_task->mtask_addr) {
-				pci_unmap_single(phba->pcidev,
-						 io_task->mtask_addr,
-						 io_task->mtask_data_count,
-						 PCI_DMA_TODEVICE);
-				io_task->mtask_addr = 0;
-			}
-		}
+		if (!beiscsi_conn->login_in_progress)
+			beiscsi_free_mgmt_task_handles(beiscsi_conn, task);
 	}
 }
 
@@ -4146,15 +4239,14 @@
 	beiscsi_cleanup_task(task);
 	spin_unlock_bh(&session->lock);
 
-	pwrb_handle = alloc_wrb_handle(phba, (beiscsi_conn->beiscsi_conn_cid -
-				       phba->fw_config.iscsi_cid_start));
+	pwrb_handle = alloc_wrb_handle(phba, beiscsi_conn->beiscsi_conn_cid);
 
 	/* Check for the adapter family */
-	if (chip_skh_r(phba->pcidev))
-		beiscsi_offload_cxn_v2(params, pwrb_handle);
-	else
+	if (is_chip_be2_be3r(phba))
 		beiscsi_offload_cxn_v0(params, pwrb_handle,
 				       phba->init_mem);
+	else
+		beiscsi_offload_cxn_v2(params, pwrb_handle);
 
 	be_dws_le_to_cpu(pwrb_handle->pwrb,
 			 sizeof(struct iscsi_target_context_update_wrb));
@@ -4194,6 +4286,7 @@
 	struct hwi_wrb_context *pwrb_context;
 	struct hwi_controller *phwi_ctrlr;
 	itt_t itt;
+	uint16_t cri_index = 0;
 	struct beiscsi_session *beiscsi_sess = beiscsi_conn->beiscsi_sess;
 	dma_addr_t paddr;
 
@@ -4223,8 +4316,7 @@
 			goto free_hndls;
 		}
 		io_task->pwrb_handle = alloc_wrb_handle(phba,
-					beiscsi_conn->beiscsi_conn_cid -
-					phba->fw_config.iscsi_cid_start);
+					beiscsi_conn->beiscsi_conn_cid);
 		if (!io_task->pwrb_handle) {
 			beiscsi_log(phba, KERN_ERR,
 				    BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
@@ -4236,6 +4328,7 @@
 	} else {
 		io_task->scsi_cmnd = NULL;
 		if ((opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGIN) {
+			beiscsi_conn->task = task;
 			if (!beiscsi_conn->login_in_progress) {
 				spin_lock(&phba->mgmt_sgl_lock);
 				io_task->psgl_handle = (struct sgl_handle *)
@@ -4257,8 +4350,7 @@
 							io_task->psgl_handle;
 				io_task->pwrb_handle =
 					alloc_wrb_handle(phba,
-					beiscsi_conn->beiscsi_conn_cid -
-					phba->fw_config.iscsi_cid_start);
+					beiscsi_conn->beiscsi_conn_cid);
 				if (!io_task->pwrb_handle) {
 					beiscsi_log(phba, KERN_ERR,
 						    BEISCSI_LOG_IO |
@@ -4278,7 +4370,6 @@
 				io_task->pwrb_handle =
 						beiscsi_conn->plogin_wrb_handle;
 			}
-			beiscsi_conn->task = task;
 		} else {
 			spin_lock(&phba->mgmt_sgl_lock);
 			io_task->psgl_handle = alloc_mgmt_sgl_handle(phba);
@@ -4295,8 +4386,7 @@
 			}
 			io_task->pwrb_handle =
 					alloc_wrb_handle(phba,
-					beiscsi_conn->beiscsi_conn_cid -
-					phba->fw_config.iscsi_cid_start);
+					beiscsi_conn->beiscsi_conn_cid);
 			if (!io_task->pwrb_handle) {
 				beiscsi_log(phba, KERN_ERR,
 					    BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
@@ -4324,12 +4414,13 @@
 free_mgmt_hndls:
 	spin_lock(&phba->mgmt_sgl_lock);
 	free_mgmt_sgl_handle(phba, io_task->psgl_handle);
+	io_task->psgl_handle = NULL;
 	spin_unlock(&phba->mgmt_sgl_lock);
 free_hndls:
 	phwi_ctrlr = phba->phwi_ctrlr;
-	pwrb_context = &phwi_ctrlr->wrb_context[
-			beiscsi_conn->beiscsi_conn_cid -
-			phba->fw_config.iscsi_cid_start];
+	cri_index = BE_GET_CRI_FROM_CID(
+	beiscsi_conn->beiscsi_conn_cid);
+	pwrb_context = &phwi_ctrlr->wrb_context[cri_index];
 	if (io_task->pwrb_handle)
 		free_wrb_handle(phba, pwrb_context, io_task->pwrb_handle);
 	io_task->pwrb_handle = NULL;
@@ -4351,7 +4442,6 @@
 	unsigned int doorbell = 0;
 
 	pwrb = io_task->pwrb_handle->pwrb;
-	memset(pwrb, 0, sizeof(*pwrb));
 
 	io_task->cmd_bhs->iscsi_hdr.exp_statsn = 0;
 	io_task->bhs_len = sizeof(struct be_cmd_bhs);
@@ -4465,19 +4555,7 @@
 	pwrb = io_task->pwrb_handle->pwrb;
 	memset(pwrb, 0, sizeof(*pwrb));
 
-	if (chip_skh_r(phba->pcidev)) {
-		AMAP_SET_BITS(struct amap_iscsi_wrb_v2, cmdsn_itt, pwrb,
-			      be32_to_cpu(task->cmdsn));
-		AMAP_SET_BITS(struct amap_iscsi_wrb_v2, wrb_idx, pwrb,
-			      io_task->pwrb_handle->wrb_index);
-		AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sgl_idx, pwrb,
-			      io_task->psgl_handle->sgl_index);
-		AMAP_SET_BITS(struct amap_iscsi_wrb_v2, r2t_exp_dtl, pwrb,
-			      task->data_count);
-		AMAP_SET_BITS(struct amap_iscsi_wrb_v2, ptr2nextwrb, pwrb,
-			      io_task->pwrb_handle->nxt_wrb_index);
-		pwrb_typeoffset = SKH_WRB_TYPE_OFFSET;
-	} else {
+	if (is_chip_be2_be3r(phba)) {
 		AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb,
 			      be32_to_cpu(task->cmdsn));
 		AMAP_SET_BITS(struct amap_iscsi_wrb, wrb_idx, pwrb,
@@ -4489,6 +4567,18 @@
 		AMAP_SET_BITS(struct amap_iscsi_wrb, ptr2nextwrb, pwrb,
 			      io_task->pwrb_handle->nxt_wrb_index);
 		pwrb_typeoffset = BE_WRB_TYPE_OFFSET;
+	} else {
+		AMAP_SET_BITS(struct amap_iscsi_wrb_v2, cmdsn_itt, pwrb,
+			      be32_to_cpu(task->cmdsn));
+		AMAP_SET_BITS(struct amap_iscsi_wrb_v2, wrb_idx, pwrb,
+			      io_task->pwrb_handle->wrb_index);
+		AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sgl_idx, pwrb,
+			      io_task->psgl_handle->sgl_index);
+		AMAP_SET_BITS(struct amap_iscsi_wrb_v2, r2t_exp_dtl, pwrb,
+			      task->data_count);
+		AMAP_SET_BITS(struct amap_iscsi_wrb_v2, ptr2nextwrb, pwrb,
+			      io_task->pwrb_handle->nxt_wrb_index);
+		pwrb_typeoffset = SKH_WRB_TYPE_OFFSET;
 	}
 
 
@@ -4501,19 +4591,19 @@
 	case ISCSI_OP_NOOP_OUT:
 		if (task->hdr->ttt != ISCSI_RESERVED_TAG) {
 			ADAPTER_SET_WRB_TYPE(pwrb, TGT_DM_CMD, pwrb_typeoffset);
-			if (chip_skh_r(phba->pcidev))
-				AMAP_SET_BITS(struct amap_iscsi_wrb_v2,
+			if (is_chip_be2_be3r(phba))
+				AMAP_SET_BITS(struct amap_iscsi_wrb,
 					      dmsg, pwrb, 1);
 			else
-				AMAP_SET_BITS(struct amap_iscsi_wrb,
+				AMAP_SET_BITS(struct amap_iscsi_wrb_v2,
 					      dmsg, pwrb, 1);
 		} else {
 			ADAPTER_SET_WRB_TYPE(pwrb, INI_RD_CMD, pwrb_typeoffset);
-			if (chip_skh_r(phba->pcidev))
-				AMAP_SET_BITS(struct amap_iscsi_wrb_v2,
+			if (is_chip_be2_be3r(phba))
+				AMAP_SET_BITS(struct amap_iscsi_wrb,
 					      dmsg, pwrb, 0);
 			else
-				AMAP_SET_BITS(struct amap_iscsi_wrb,
+				AMAP_SET_BITS(struct amap_iscsi_wrb_v2,
 					      dmsg, pwrb, 0);
 		}
 		hwi_write_buffer(pwrb, task);
@@ -4540,9 +4630,9 @@
 	}
 
 	/* Set the task type */
-	io_task->wrb_type = (chip_skh_r(phba->pcidev)) ?
-		AMAP_GET_BITS(struct amap_iscsi_wrb_v2, type, pwrb) :
-		AMAP_GET_BITS(struct amap_iscsi_wrb, type, pwrb);
+	io_task->wrb_type = (is_chip_be2_be3r(phba)) ?
+		AMAP_GET_BITS(struct amap_iscsi_wrb, type, pwrb) :
+		AMAP_GET_BITS(struct amap_iscsi_wrb_v2, type, pwrb);
 
 	doorbell |= cid & DB_WRB_POST_CID_MASK;
 	doorbell |= (io_task->pwrb_handle->wrb_index &
@@ -4834,6 +4924,7 @@
 	case OC_SKH_ID1:
 		phba->generation = BE_GEN4;
 		phba->iotask_fn = beiscsi_iotask_v2;
+		break;
 	default:
 		phba->generation = 0;
 	}
diff --git a/drivers/scsi/be2iscsi/be_main.h b/drivers/scsi/be2iscsi/be_main.h
index 5946577..2c06ef3 100644
--- a/drivers/scsi/be2iscsi/be_main.h
+++ b/drivers/scsi/be2iscsi/be_main.h
@@ -1,5 +1,5 @@
 /**
- * Copyright (C) 2005 - 2012 Emulex
+ * Copyright (C) 2005 - 2013 Emulex
  * All rights reserved.
  *
  * This program is free software; you can redistribute it and/or
@@ -36,7 +36,7 @@
 
 #include "be.h"
 #define DRV_NAME		"be2iscsi"
-#define BUILD_STR		"10.0.272.0"
+#define BUILD_STR		"10.0.467.0"
 #define BE_NAME			"Emulex OneConnect" \
 				"Open-iSCSI Driver version" BUILD_STR
 #define DRV_DESC		BE_NAME " " "Driver"
@@ -66,8 +66,9 @@
 
 #define MAX_CPUS		64
 #define BEISCSI_MAX_NUM_CPUS	7
-#define OC_SKH_MAX_NUM_CPUS	63
+#define OC_SKH_MAX_NUM_CPUS	31
 
+#define BEISCSI_VER_STRLEN 32
 
 #define BEISCSI_SGLIST_ELEMENTS	30
 
@@ -265,7 +266,9 @@
 	unsigned short cid;
 } __packed;
 
-#define chip_skh_r(pdev)	(pdev->device == OC_SKH_ID1)
+#define chip_be2(phba)      (phba->generation == BE_GEN2)
+#define chip_be3_r(phba)    (phba->generation == BE_GEN3)
+#define is_chip_be2_be3r(phba) (chip_be3_r(phba) || (chip_be2(phba)))
 struct beiscsi_hba {
 	struct hba_parameters params;
 	struct hwi_controller *phwi_ctrlr;
@@ -304,10 +307,15 @@
 	unsigned short avlbl_cids;
 	unsigned short cid_alloc;
 	unsigned short cid_free;
-	struct beiscsi_conn *conn_table[BE2_MAX_SESSIONS * 2];
 	struct list_head hba_queue;
+#define BE_MAX_SESSION 2048
+#define BE_SET_CID_TO_CRI(cri_index, cid) \
+			  (phba->cid_to_cri_map[cid] = cri_index)
+#define BE_GET_CRI_FROM_CID(cid) (phba->cid_to_cri_map[cid])
+	unsigned short cid_to_cri_map[BE_MAX_SESSION];
 	unsigned short *cid_array;
 	struct iscsi_endpoint **ep_array;
+	struct beiscsi_conn **conn_table;
 	struct iscsi_boot_kset *boot_kset;
 	struct Scsi_Host *shost;
 	struct iscsi_iface *ipv4_iface;
@@ -339,6 +347,7 @@
 	struct delayed_work beiscsi_hw_check_task;
 
 	u8 mac_address[ETH_ALEN];
+	char fw_ver_str[BEISCSI_VER_STRLEN];
 	char wq_name[20];
 	struct workqueue_struct *wq;	/* The actuak work queue */
 	struct be_ctrl_info ctrl;
@@ -563,7 +572,7 @@
 	 * This is a varying size list! Do not add anything
 	 * after this entry!!
 	 */
-	struct hwi_async_entry async_entry[BE2_MAX_SESSIONS * 2];
+	struct hwi_async_entry *async_entry;
 };
 
 #define PDUCQE_CODE_MASK	0x0000003F
@@ -749,6 +758,8 @@
 free_mgmt_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle);
 
 void beiscsi_process_all_cqs(struct work_struct *work);
+void beiscsi_free_mgmt_task_handles(struct beiscsi_conn *beiscsi_conn,
+				     struct iscsi_task *task);
 
 static inline bool beiscsi_error(struct beiscsi_hba *phba)
 {
@@ -933,7 +944,7 @@
 	struct sgl_handle *psgl_handle_base;
 	unsigned int wrb_mem_index;
 
-	struct hwi_wrb_context wrb_context[BE2_MAX_SESSIONS * 2];
+	struct hwi_wrb_context *wrb_context;
 	struct mcc_wrb *pmcc_wrb_base;
 	struct be_ring default_pdu_hdr;
 	struct be_ring default_pdu_data;
@@ -970,9 +981,7 @@
 	struct be_queue_info be_def_hdrq;
 	struct be_queue_info be_def_dataq;
 
-	struct be_queue_info be_wrbq[BE2_MAX_SESSIONS];
-	struct be_mcc_wrb_context *pbe_mcc_context;
-
+	struct be_queue_info *be_wrbq;
 	struct hwi_async_pdu_context *pasync_ctx;
 };
 
diff --git a/drivers/scsi/be2iscsi/be_mgmt.c b/drivers/scsi/be2iscsi/be_mgmt.c
index 55cc990..245a959 100644
--- a/drivers/scsi/be2iscsi/be_mgmt.c
+++ b/drivers/scsi/be2iscsi/be_mgmt.c
@@ -1,5 +1,5 @@
 /**
- * Copyright (C) 2005 - 2012 Emulex
+ * Copyright (C) 2005 - 2013 Emulex
  * All rights reserved.
  *
  * This program is free software; you can redistribute it and/or
@@ -368,6 +368,8 @@
 		beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
 			    "BM_%d : phba->fw_config.iscsi_features = %d\n",
 			    phba->fw_config.iscsi_features);
+		memcpy(phba->fw_ver_str, resp->params.hba_attribs.
+		       firmware_version_string, BEISCSI_VER_STRLEN);
 	} else
 		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
 			    "BG_%d :  Failed in mgmt_check_supported_fw\n");
@@ -1260,6 +1262,45 @@
 }
 
 /**
+ * beiscsi_fw_ver_disp()- Display Firmware Version
+ * @dev: ptr to device not used.
+ * @attr: device attribute, not used.
+ * @buf: contains formatted text Firmware version
+ *
+ * return
+ * size of the formatted string
+ **/
+ssize_t
+beiscsi_fw_ver_disp(struct device *dev, struct device_attribute *attr,
+		     char *buf)
+{
+	struct Scsi_Host *shost = class_to_shost(dev);
+	struct beiscsi_hba *phba = iscsi_host_priv(shost);
+
+	return snprintf(buf, PAGE_SIZE, "%s\n", phba->fw_ver_str);
+}
+
+/**
+ * beiscsi_active_cid_disp()- Display Sessions Active
+ * @dev: ptr to device not used.
+ * @attr: device attribute, not used.
+ * @buf: contains formatted text Session Count
+ *
+ * return
+ * size of the formatted string
+ **/
+ssize_t
+beiscsi_active_cid_disp(struct device *dev, struct device_attribute *attr,
+			 char *buf)
+{
+	struct Scsi_Host *shost = class_to_shost(dev);
+	struct beiscsi_hba *phba = iscsi_host_priv(shost);
+
+	return snprintf(buf, PAGE_SIZE, "%d\n",
+		       (phba->params.cxns_per_ctrl - phba->avlbl_cids));
+}
+
+/**
  * beiscsi_adap_family_disp()- Display adapter family.
  * @dev: ptr to device to get priv structure
  * @attr: device attribute, not used.
diff --git a/drivers/scsi/be2iscsi/be_mgmt.h b/drivers/scsi/be2iscsi/be_mgmt.h
index 2e4968a..04af7e7 100644
--- a/drivers/scsi/be2iscsi/be_mgmt.h
+++ b/drivers/scsi/be2iscsi/be_mgmt.h
@@ -1,5 +1,5 @@
 /**
- * Copyright (C) 2005 - 2012 Emulex
+ * Copyright (C) 2005 - 2013 Emulex
  * All rights reserved.
  *
  * This program is free software; you can redistribute it and/or
@@ -156,25 +156,25 @@
 } __packed;
 
 struct mgmt_hba_attributes {
-	u8 flashrom_version_string[32];
-	u8 manufacturer_name[32];
+	u8 flashrom_version_string[BEISCSI_VER_STRLEN];
+	u8 manufacturer_name[BEISCSI_VER_STRLEN];
 	u32 supported_modes;
 	u8 seeprom_version_lo;
 	u8 seeprom_version_hi;
 	u8 rsvd0[2];
 	u32 fw_cmd_data_struct_version;
 	u32 ep_fw_data_struct_version;
-	u32 future_reserved[12];
+	u8 ncsi_version_string[12];
 	u32 default_extended_timeout;
-	u8 controller_model_number[32];
+	u8 controller_model_number[BEISCSI_VER_STRLEN];
 	u8 controller_description[64];
-	u8 controller_serial_number[32];
-	u8 ip_version_string[32];
-	u8 firmware_version_string[32];
-	u8 bios_version_string[32];
-	u8 redboot_version_string[32];
-	u8 driver_version_string[32];
-	u8 fw_on_flash_version_string[32];
+	u8 controller_serial_number[BEISCSI_VER_STRLEN];
+	u8 ip_version_string[BEISCSI_VER_STRLEN];
+	u8 firmware_version_string[BEISCSI_VER_STRLEN];
+	u8 bios_version_string[BEISCSI_VER_STRLEN];
+	u8 redboot_version_string[BEISCSI_VER_STRLEN];
+	u8 driver_version_string[BEISCSI_VER_STRLEN];
+	u8 fw_on_flash_version_string[BEISCSI_VER_STRLEN];
 	u32 functionalities_supported;
 	u16 max_cdblength;
 	u8 asic_revision;
@@ -190,7 +190,8 @@
 	u32 firmware_post_status;
 	u32 hba_mtu[8];
 	u8 iscsi_features;
-	u8 future_u8[3];
+	u8 asic_generation;
+	u8 future_u8[2];
 	u32 future_u32[3];
 } __packed;
 
@@ -207,7 +208,7 @@
 	u64 unique_identifier;
 	u8 netfilters;
 	u8 rsvd0[3];
-	u8 future_u32[4];
+	u32 future_u32[4];
 } __packed;
 
 struct be_mgmt_controller_attributes {
@@ -311,6 +312,12 @@
 ssize_t beiscsi_drvr_ver_disp(struct device *dev,
 			       struct device_attribute *attr, char *buf);
 
+ssize_t beiscsi_fw_ver_disp(struct device *dev,
+			     struct device_attribute *attr, char *buf);
+
+ssize_t beiscsi_active_cid_disp(struct device *dev,
+				 struct device_attribute *attr, char *buf);
+
 ssize_t beiscsi_adap_family_disp(struct device *dev,
 				  struct device_attribute *attr, char *buf);
 
diff --git a/drivers/scsi/bnx2fc/bnx2fc.h b/drivers/scsi/bnx2fc/bnx2fc.h
index 11596b2..08b22a90 100644
--- a/drivers/scsi/bnx2fc/bnx2fc.h
+++ b/drivers/scsi/bnx2fc/bnx2fc.h
@@ -2,7 +2,7 @@
 #define _BNX2FC_H_
 /* bnx2fc.h: Broadcom NetXtreme II Linux FCoE offload driver.
  *
- * Copyright (c) 2008 - 2011 Broadcom Corporation
+ * Copyright (c) 2008 - 2013 Broadcom Corporation
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -64,10 +64,12 @@
 #include "bnx2fc_constants.h"
 
 #define BNX2FC_NAME		"bnx2fc"
-#define BNX2FC_VERSION		"1.0.13"
+#define BNX2FC_VERSION		"1.0.14"
 
 #define PFX			"bnx2fc: "
 
+#define BCM_CHIP_LEN		16
+
 #define BNX2X_DOORBELL_PCI_BAR		2
 
 #define BNX2FC_MAX_BD_LEN		0xffff
@@ -241,6 +243,8 @@
 	int wait_for_link_down;
 	int num_ofld_sess;
 	struct list_head vports;
+
+	char chip_num[BCM_CHIP_LEN];
 };
 
 struct bnx2fc_interface {
diff --git a/drivers/scsi/bnx2fc/bnx2fc_els.c b/drivers/scsi/bnx2fc/bnx2fc_els.c
index bdbbb13..b1c9a4f 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_els.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_els.c
@@ -3,7 +3,7 @@
  * This file contains helper routines that handle ELS requests
  * and responses.
  *
- * Copyright (c) 2008 - 2011 Broadcom Corporation
+ * Copyright (c) 2008 - 2013 Broadcom Corporation
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
index 7dffec1..69ac554 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
@@ -3,7 +3,7 @@
  * cnic modules to create FCoE instances, send/receive non-offloaded
  * FIP/FCoE packets, listen to link events etc.
  *
- * Copyright (c) 2008 - 2011 Broadcom Corporation
+ * Copyright (c) 2008 - 2013 Broadcom Corporation
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -22,7 +22,7 @@
 
 #define DRV_MODULE_NAME		"bnx2fc"
 #define DRV_MODULE_VERSION	BNX2FC_VERSION
-#define DRV_MODULE_RELDATE	"Dec 21, 2012"
+#define DRV_MODULE_RELDATE	"Mar 08, 2013"
 
 
 static char version[] =
@@ -679,6 +679,7 @@
 {
 	struct fcoe_port *port = lport_priv(lport);
 	struct bnx2fc_interface *interface = port->priv;
+	struct bnx2fc_hba *hba = interface->hba;
 	struct Scsi_Host *shost = lport->host;
 	int rc = 0;
 
@@ -699,8 +700,9 @@
 	}
 	if (!lport->vport)
 		fc_host_max_npiv_vports(lport->host) = USHRT_MAX;
-	sprintf(fc_host_symbolic_name(lport->host), "%s v%s over %s",
-		BNX2FC_NAME, BNX2FC_VERSION,
+	snprintf(fc_host_symbolic_name(lport->host), 256,
+		 "%s (Broadcom %s) v%s over %s",
+		BNX2FC_NAME, hba->chip_num, BNX2FC_VERSION,
 		interface->netdev->name);
 
 	return 0;
@@ -1656,23 +1658,60 @@
 static int bnx2fc_bind_pcidev(struct bnx2fc_hba *hba)
 {
 	struct cnic_dev *cnic;
+	struct pci_dev *pdev;
 
 	if (!hba->cnic) {
 		printk(KERN_ERR PFX "cnic is NULL\n");
 		return -ENODEV;
 	}
 	cnic = hba->cnic;
-	hba->pcidev = cnic->pcidev;
-	if (hba->pcidev)
-		pci_dev_get(hba->pcidev);
+	pdev = hba->pcidev = cnic->pcidev;
+	if (!hba->pcidev)
+		return -ENODEV;
 
+	switch (pdev->device) {
+	case PCI_DEVICE_ID_NX2_57710:
+		strncpy(hba->chip_num, "BCM57710", BCM_CHIP_LEN);
+		break;
+	case PCI_DEVICE_ID_NX2_57711:
+		strncpy(hba->chip_num, "BCM57711", BCM_CHIP_LEN);
+		break;
+	case PCI_DEVICE_ID_NX2_57712:
+	case PCI_DEVICE_ID_NX2_57712_MF:
+	case PCI_DEVICE_ID_NX2_57712_VF:
+		strncpy(hba->chip_num, "BCM57712", BCM_CHIP_LEN);
+		break;
+	case PCI_DEVICE_ID_NX2_57800:
+	case PCI_DEVICE_ID_NX2_57800_MF:
+	case PCI_DEVICE_ID_NX2_57800_VF:
+		strncpy(hba->chip_num, "BCM57800", BCM_CHIP_LEN);
+		break;
+	case PCI_DEVICE_ID_NX2_57810:
+	case PCI_DEVICE_ID_NX2_57810_MF:
+	case PCI_DEVICE_ID_NX2_57810_VF:
+		strncpy(hba->chip_num, "BCM57810", BCM_CHIP_LEN);
+		break;
+	case PCI_DEVICE_ID_NX2_57840:
+	case PCI_DEVICE_ID_NX2_57840_MF:
+	case PCI_DEVICE_ID_NX2_57840_VF:
+	case PCI_DEVICE_ID_NX2_57840_2_20:
+	case PCI_DEVICE_ID_NX2_57840_4_10:
+		strncpy(hba->chip_num, "BCM57840", BCM_CHIP_LEN);
+		break;
+	default:
+		pr_err(PFX "Unknown device id 0x%x\n", pdev->device);
+		break;
+	}
+	pci_dev_get(hba->pcidev);
 	return 0;
 }
 
 static void bnx2fc_unbind_pcidev(struct bnx2fc_hba *hba)
 {
-	if (hba->pcidev)
+	if (hba->pcidev) {
+		hba->chip_num[0] = '\0';
 		pci_dev_put(hba->pcidev);
+	}
 	hba->pcidev = NULL;
 }
 
diff --git a/drivers/scsi/bnx2fc/bnx2fc_hwi.c b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
index 50510ff..c0d035a 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_hwi.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
@@ -2,7 +2,7 @@
  * This file contains the code that low level functions that interact
  * with 57712 FCoE firmware.
  *
- * Copyright (c) 2008 - 2011 Broadcom Corporation
+ * Copyright (c) 2008 - 2013 Broadcom Corporation
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -126,7 +126,11 @@
 	fcoe_init3.error_bit_map_lo = 0xffffffff;
 	fcoe_init3.error_bit_map_hi = 0xffffffff;
 
-	fcoe_init3.perf_config = 1;
+	/*
+	 * enable both cached connection and cached tasks
+	 * 0 = none, 1 = cached connection, 2 = cached tasks, 3 = both
+	 */
+	fcoe_init3.perf_config = 3;
 
 	kwqe_arr[0] = (struct kwqe *) &fcoe_init1;
 	kwqe_arr[1] = (struct kwqe *) &fcoe_init2;
diff --git a/drivers/scsi/bnx2fc/bnx2fc_io.c b/drivers/scsi/bnx2fc/bnx2fc_io.c
index 723a9a8..575142e 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_io.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_io.c
@@ -1,7 +1,7 @@
 /* bnx2fc_io.c: Broadcom NetXtreme II Linux FCoE offload driver.
  * IO manager and SCSI IO processing.
  *
- * Copyright (c) 2008 - 2011 Broadcom Corporation
+ * Copyright (c) 2008 - 2013 Broadcom Corporation
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -1270,8 +1270,11 @@
 
 	spin_lock_bh(&tgt->tgt_lock);
 	io_req->wait_for_comp = 0;
-	if (!(test_and_set_bit(BNX2FC_FLAG_ABTS_DONE,
-				    &io_req->req_flags))) {
+	if (test_bit(BNX2FC_FLAG_IO_COMPL, &io_req->req_flags)) {
+		BNX2FC_IO_DBG(io_req, "IO completed in a different context\n");
+		rc = SUCCESS;
+	} else if (!(test_and_set_bit(BNX2FC_FLAG_ABTS_DONE,
+				      &io_req->req_flags))) {
 		/* Let the scsi-ml try to recover this command */
 		printk(KERN_ERR PFX "abort failed, xid = 0x%x\n",
 		       io_req->xid);
diff --git a/drivers/scsi/bnx2fc/bnx2fc_tgt.c b/drivers/scsi/bnx2fc/bnx2fc_tgt.c
index c57a3bb..4d93177 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_tgt.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_tgt.c
@@ -2,7 +2,7 @@
  * Handles operations such as session offload/upload etc, and manages
  * session resources such as connection id and qp resources.
  *
- * Copyright (c) 2008 - 2011 Broadcom Corporation
+ * Copyright (c) 2008 - 2013 Broadcom Corporation
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
diff --git a/drivers/scsi/csiostor/csio_lnode.h b/drivers/scsi/csiostor/csio_lnode.h
index 0f9c041..372a67d 100644
--- a/drivers/scsi/csiostor/csio_lnode.h
+++ b/drivers/scsi/csiostor/csio_lnode.h
@@ -114,7 +114,7 @@
 	uint32_t	n_rnode_match;  /* matched rnode */
 	uint32_t	n_dev_loss_tmo; /* Device loss timeout */
 	uint32_t	n_fdmi_err;	/* fdmi err */
-	uint32_t	n_evt_fw[PROTO_ERR_IMPL_LOGO];	/* fw events */
+	uint32_t	n_evt_fw[PROTO_ERR_IMPL_LOGO + 1];	/* fw events */
 	enum csio_ln_ev	n_evt_sm[CSIO_LNE_MAX_EVENT];	/* State m/c events */
 	uint32_t	n_rnode_alloc;	/* rnode allocated */
 	uint32_t	n_rnode_free;	/* rnode freed */
diff --git a/drivers/scsi/csiostor/csio_rnode.h b/drivers/scsi/csiostor/csio_rnode.h
index 6594009..4334342 100644
--- a/drivers/scsi/csiostor/csio_rnode.h
+++ b/drivers/scsi/csiostor/csio_rnode.h
@@ -63,7 +63,7 @@
 	uint32_t	n_err_nomem;	/* error nomem */
 	uint32_t	n_evt_unexp;	/* unexpected event */
 	uint32_t	n_evt_drop;	/* unexpected event */
-	uint32_t	n_evt_fw[PROTO_ERR_IMPL_LOGO];	/* fw events */
+	uint32_t	n_evt_fw[PROTO_ERR_IMPL_LOGO + 1];	/* fw events */
 	enum csio_rn_ev	n_evt_sm[CSIO_RNFE_MAX_EVENT];	/* State m/c events */
 	uint32_t	n_lun_rst;	/* Number of resets of
 					 * of LUNs under this
diff --git a/drivers/scsi/fnic/fnic.h b/drivers/scsi/fnic/fnic.h
index 98436c3..b6d1f92 100644
--- a/drivers/scsi/fnic/fnic.h
+++ b/drivers/scsi/fnic/fnic.h
@@ -38,7 +38,7 @@
 
 #define DRV_NAME		"fnic"
 #define DRV_DESCRIPTION		"Cisco FCoE HBA Driver"
-#define DRV_VERSION		"1.5.0.2"
+#define DRV_VERSION		"1.5.0.22"
 #define PFX			DRV_NAME ": "
 #define DFX                     DRV_NAME "%d: "
 
@@ -192,6 +192,18 @@
 
 struct mempool;
 
+enum fnic_evt {
+	FNIC_EVT_START_VLAN_DISC = 1,
+	FNIC_EVT_START_FCF_DISC = 2,
+	FNIC_EVT_MAX,
+};
+
+struct fnic_event {
+	struct list_head list;
+	struct fnic *fnic;
+	enum fnic_evt event;
+};
+
 /* Per-instance private data structure */
 struct fnic {
 	struct fc_lport *lport;
@@ -254,6 +266,18 @@
 	struct sk_buff_head frame_queue;
 	struct sk_buff_head tx_queue;
 
+	/*** FIP related data members  -- start ***/
+	void (*set_vlan)(struct fnic *, u16 vlan);
+	struct work_struct      fip_frame_work;
+	struct sk_buff_head     fip_frame_queue;
+	struct timer_list       fip_timer;
+	struct list_head        vlans;
+	spinlock_t              vlans_lock;
+
+	struct work_struct      event_work;
+	struct list_head        evlist;
+	/*** FIP related data members  -- end ***/
+
 	/* copy work queue cache line section */
 	____cacheline_aligned struct vnic_wq_copy wq_copy[FNIC_WQ_COPY_MAX];
 	/* completion queue cache line section */
@@ -278,6 +302,7 @@
 }
 
 extern struct workqueue_struct *fnic_event_queue;
+extern struct workqueue_struct *fnic_fip_queue;
 extern struct device_attribute *fnic_attrs[];
 
 void fnic_clear_intr_mode(struct fnic *fnic);
@@ -289,6 +314,7 @@
 void fnic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf);
 void fnic_handle_frame(struct work_struct *work);
 void fnic_handle_link(struct work_struct *work);
+void fnic_handle_event(struct work_struct *work);
 int fnic_rq_cmpl_handler(struct fnic *fnic, int);
 int fnic_alloc_rq_frame(struct vnic_rq *rq);
 void fnic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf);
@@ -321,6 +347,12 @@
 
 int fnic_is_abts_pending(struct fnic *, struct scsi_cmnd *);
 
+void fnic_handle_fip_frame(struct work_struct *work);
+void fnic_handle_fip_event(struct fnic *fnic);
+void fnic_fcoe_reset_vlans(struct fnic *fnic);
+void fnic_fcoe_evlist_free(struct fnic *fnic);
+extern void fnic_handle_fip_timer(struct fnic *fnic);
+
 static inline int
 fnic_chk_state_flags_locked(struct fnic *fnic, unsigned long st_flags)
 {
diff --git a/drivers/scsi/fnic/fnic_fcs.c b/drivers/scsi/fnic/fnic_fcs.c
index 483eb9d..006fa92 100644
--- a/drivers/scsi/fnic/fnic_fcs.c
+++ b/drivers/scsi/fnic/fnic_fcs.c
@@ -31,12 +31,20 @@
 #include <scsi/libfc.h>
 #include "fnic_io.h"
 #include "fnic.h"
+#include "fnic_fip.h"
 #include "cq_enet_desc.h"
 #include "cq_exch_desc.h"
 
+static u8 fcoe_all_fcfs[ETH_ALEN];
+struct workqueue_struct *fnic_fip_queue;
 struct workqueue_struct *fnic_event_queue;
 
 static void fnic_set_eth_mode(struct fnic *);
+static void fnic_fcoe_send_vlan_req(struct fnic *fnic);
+static void fnic_fcoe_start_fcf_disc(struct fnic *fnic);
+static void fnic_fcoe_process_vlan_resp(struct fnic *fnic, struct sk_buff *);
+static int fnic_fcoe_vlan_check(struct fnic *fnic, u16 flag);
+static int fnic_fcoe_handle_fip_frame(struct fnic *fnic, struct sk_buff *skb);
 
 void fnic_handle_link(struct work_struct *work)
 {
@@ -69,6 +77,11 @@
 				FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
 					     "link down\n");
 				fcoe_ctlr_link_down(&fnic->ctlr);
+				if (fnic->config.flags & VFCF_FIP_CAPABLE) {
+					/* start FCoE VLAN discovery */
+					fnic_fcoe_send_vlan_req(fnic);
+					return;
+				}
 				FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
 					     "link up\n");
 				fcoe_ctlr_link_up(&fnic->ctlr);
@@ -79,6 +92,11 @@
 	} else if (fnic->link_status) {
 		/* DOWN -> UP */
 		spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+		if (fnic->config.flags & VFCF_FIP_CAPABLE) {
+			/* start FCoE VLAN discovery */
+			fnic_fcoe_send_vlan_req(fnic);
+			return;
+		}
 		FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "link up\n");
 		fcoe_ctlr_link_up(&fnic->ctlr);
 	} else {
@@ -128,6 +146,441 @@
 	}
 }
 
+void fnic_fcoe_evlist_free(struct fnic *fnic)
+{
+	struct fnic_event *fevt = NULL;
+	struct fnic_event *next = NULL;
+	unsigned long flags;
+
+	spin_lock_irqsave(&fnic->fnic_lock, flags);
+	if (list_empty(&fnic->evlist)) {
+		spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+		return;
+	}
+
+	list_for_each_entry_safe(fevt, next, &fnic->evlist, list) {
+		list_del(&fevt->list);
+		kfree(fevt);
+	}
+	spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+}
+
+void fnic_handle_event(struct work_struct *work)
+{
+	struct fnic *fnic = container_of(work, struct fnic, event_work);
+	struct fnic_event *fevt = NULL;
+	struct fnic_event *next = NULL;
+	unsigned long flags;
+
+	spin_lock_irqsave(&fnic->fnic_lock, flags);
+	if (list_empty(&fnic->evlist)) {
+		spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+		return;
+	}
+
+	list_for_each_entry_safe(fevt, next, &fnic->evlist, list) {
+		if (fnic->stop_rx_link_events) {
+			list_del(&fevt->list);
+			kfree(fevt);
+			spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+			return;
+		}
+		/*
+		 * If we're in a transitional state, just re-queue and return.
+		 * The queue will be serviced when we get to a stable state.
+		 */
+		if (fnic->state != FNIC_IN_FC_MODE &&
+		    fnic->state != FNIC_IN_ETH_MODE) {
+			spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+			return;
+		}
+
+		list_del(&fevt->list);
+		switch (fevt->event) {
+		case FNIC_EVT_START_VLAN_DISC:
+			spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+			fnic_fcoe_send_vlan_req(fnic);
+			spin_lock_irqsave(&fnic->fnic_lock, flags);
+			break;
+		case FNIC_EVT_START_FCF_DISC:
+			FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
+				  "Start FCF Discovery\n");
+			fnic_fcoe_start_fcf_disc(fnic);
+			break;
+		default:
+			FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
+				  "Unknown event 0x%x\n", fevt->event);
+			break;
+		}
+		kfree(fevt);
+	}
+	spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+}
+
+/**
+ * Check if the Received FIP FLOGI frame is rejected
+ * @fip: The FCoE controller that received the frame
+ * @skb: The received FIP frame
+ *
+ * Returns non-zero if the frame is rejected with unsupported cmd with
+ * insufficient resource els explanation.
+ */
+static inline int is_fnic_fip_flogi_reject(struct fcoe_ctlr *fip,
+					 struct sk_buff *skb)
+{
+	struct fc_lport *lport = fip->lp;
+	struct fip_header *fiph;
+	struct fc_frame_header *fh = NULL;
+	struct fip_desc *desc;
+	struct fip_encaps *els;
+	enum fip_desc_type els_dtype = 0;
+	u16 op;
+	u8 els_op;
+	u8 sub;
+
+	size_t els_len = 0;
+	size_t rlen;
+	size_t dlen = 0;
+
+	if (skb_linearize(skb))
+		return 0;
+
+	if (skb->len < sizeof(*fiph))
+		return 0;
+
+	fiph = (struct fip_header *)skb->data;
+	op = ntohs(fiph->fip_op);
+	sub = fiph->fip_subcode;
+
+	if (op != FIP_OP_LS)
+		return 0;
+
+	if (sub != FIP_SC_REP)
+		return 0;
+
+	rlen = ntohs(fiph->fip_dl_len) * 4;
+	if (rlen + sizeof(*fiph) > skb->len)
+		return 0;
+
+	desc = (struct fip_desc *)(fiph + 1);
+	dlen = desc->fip_dlen * FIP_BPW;
+
+	if (desc->fip_dtype == FIP_DT_FLOGI) {
+
+		shost_printk(KERN_DEBUG, lport->host,
+			  " FIP TYPE FLOGI: fab name:%llx "
+			  "vfid:%d map:%x\n",
+			  fip->sel_fcf->fabric_name, fip->sel_fcf->vfid,
+			  fip->sel_fcf->fc_map);
+		if (dlen < sizeof(*els) + sizeof(*fh) + 1)
+			return 0;
+
+		els_len = dlen - sizeof(*els);
+		els = (struct fip_encaps *)desc;
+		fh = (struct fc_frame_header *)(els + 1);
+		els_dtype = desc->fip_dtype;
+
+		if (!fh)
+			return 0;
+
+		/*
+		 * ELS command code, reason and explanation should be = Reject,
+		 * unsupported command and insufficient resource
+		 */
+		els_op = *(u8 *)(fh + 1);
+		if (els_op == ELS_LS_RJT) {
+			shost_printk(KERN_INFO, lport->host,
+				  "Flogi Request Rejected by Switch\n");
+			return 1;
+		}
+		shost_printk(KERN_INFO, lport->host,
+				"Flogi Request Accepted by Switch\n");
+	}
+	return 0;
+}
+
+static void fnic_fcoe_send_vlan_req(struct fnic *fnic)
+{
+	struct fcoe_ctlr *fip = &fnic->ctlr;
+	struct sk_buff *skb;
+	char *eth_fr;
+	int fr_len;
+	struct fip_vlan *vlan;
+	u64 vlan_tov;
+
+	fnic_fcoe_reset_vlans(fnic);
+	fnic->set_vlan(fnic, 0);
+	FNIC_FCS_DBG(KERN_INFO, fnic->lport->host,
+		  "Sending VLAN request...\n");
+	skb = dev_alloc_skb(sizeof(struct fip_vlan));
+	if (!skb)
+		return;
+
+	fr_len = sizeof(*vlan);
+	eth_fr = (char *)skb->data;
+	vlan = (struct fip_vlan *)eth_fr;
+
+	memset(vlan, 0, sizeof(*vlan));
+	memcpy(vlan->eth.h_source, fip->ctl_src_addr, ETH_ALEN);
+	memcpy(vlan->eth.h_dest, fcoe_all_fcfs, ETH_ALEN);
+	vlan->eth.h_proto = htons(ETH_P_FIP);
+
+	vlan->fip.fip_ver = FIP_VER_ENCAPS(FIP_VER);
+	vlan->fip.fip_op = htons(FIP_OP_VLAN);
+	vlan->fip.fip_subcode = FIP_SC_VL_REQ;
+	vlan->fip.fip_dl_len = htons(sizeof(vlan->desc) / FIP_BPW);
+
+	vlan->desc.mac.fd_desc.fip_dtype = FIP_DT_MAC;
+	vlan->desc.mac.fd_desc.fip_dlen = sizeof(vlan->desc.mac) / FIP_BPW;
+	memcpy(&vlan->desc.mac.fd_mac, fip->ctl_src_addr, ETH_ALEN);
+
+	vlan->desc.wwnn.fd_desc.fip_dtype = FIP_DT_NAME;
+	vlan->desc.wwnn.fd_desc.fip_dlen = sizeof(vlan->desc.wwnn) / FIP_BPW;
+	put_unaligned_be64(fip->lp->wwnn, &vlan->desc.wwnn.fd_wwn);
+
+	skb_put(skb, sizeof(*vlan));
+	skb->protocol = htons(ETH_P_FIP);
+	skb_reset_mac_header(skb);
+	skb_reset_network_header(skb);
+	fip->send(fip, skb);
+
+	/* set a timer so that we can retry if there no response */
+	vlan_tov = jiffies + msecs_to_jiffies(FCOE_CTLR_FIPVLAN_TOV);
+	mod_timer(&fnic->fip_timer, round_jiffies(vlan_tov));
+}
+
+static void fnic_fcoe_process_vlan_resp(struct fnic *fnic, struct sk_buff *skb)
+{
+	struct fcoe_ctlr *fip = &fnic->ctlr;
+	struct fip_header *fiph;
+	struct fip_desc *desc;
+	u16 vid;
+	size_t rlen;
+	size_t dlen;
+	struct fcoe_vlan *vlan;
+	u64 sol_time;
+	unsigned long flags;
+
+	FNIC_FCS_DBG(KERN_INFO, fnic->lport->host,
+		  "Received VLAN response...\n");
+
+	fiph = (struct fip_header *) skb->data;
+
+	FNIC_FCS_DBG(KERN_INFO, fnic->lport->host,
+		  "Received VLAN response... OP 0x%x SUB_OP 0x%x\n",
+		  ntohs(fiph->fip_op), fiph->fip_subcode);
+
+	rlen = ntohs(fiph->fip_dl_len) * 4;
+	fnic_fcoe_reset_vlans(fnic);
+	spin_lock_irqsave(&fnic->vlans_lock, flags);
+	desc = (struct fip_desc *)(fiph + 1);
+	while (rlen > 0) {
+		dlen = desc->fip_dlen * FIP_BPW;
+		switch (desc->fip_dtype) {
+		case FIP_DT_VLAN:
+			vid = ntohs(((struct fip_vlan_desc *)desc)->fd_vlan);
+			shost_printk(KERN_INFO, fnic->lport->host,
+				  "process_vlan_resp: FIP VLAN %d\n", vid);
+			vlan = kmalloc(sizeof(*vlan),
+							GFP_ATOMIC);
+			if (!vlan) {
+				/* retry from timer */
+				spin_unlock_irqrestore(&fnic->vlans_lock,
+							flags);
+				goto out;
+			}
+			memset(vlan, 0, sizeof(struct fcoe_vlan));
+			vlan->vid = vid & 0x0fff;
+			vlan->state = FIP_VLAN_AVAIL;
+			list_add_tail(&vlan->list, &fnic->vlans);
+			break;
+		}
+		desc = (struct fip_desc *)((char *)desc + dlen);
+		rlen -= dlen;
+	}
+
+	/* any VLAN descriptors present ? */
+	if (list_empty(&fnic->vlans)) {
+		/* retry from timer */
+		FNIC_FCS_DBG(KERN_INFO, fnic->lport->host,
+			  "No VLAN descriptors in FIP VLAN response\n");
+		spin_unlock_irqrestore(&fnic->vlans_lock, flags);
+		goto out;
+	}
+
+	vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, list);
+	fnic->set_vlan(fnic, vlan->vid);
+	vlan->state = FIP_VLAN_SENT; /* sent now */
+	vlan->sol_count++;
+	spin_unlock_irqrestore(&fnic->vlans_lock, flags);
+
+	/* start the solicitation */
+	fcoe_ctlr_link_up(fip);
+
+	sol_time = jiffies + msecs_to_jiffies(FCOE_CTLR_START_DELAY);
+	mod_timer(&fnic->fip_timer, round_jiffies(sol_time));
+out:
+	return;
+}
+
+static void fnic_fcoe_start_fcf_disc(struct fnic *fnic)
+{
+	unsigned long flags;
+	struct fcoe_vlan *vlan;
+	u64 sol_time;
+
+	spin_lock_irqsave(&fnic->vlans_lock, flags);
+	vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, list);
+	fnic->set_vlan(fnic, vlan->vid);
+	vlan->state = FIP_VLAN_SENT; /* sent now */
+	vlan->sol_count = 1;
+	spin_unlock_irqrestore(&fnic->vlans_lock, flags);
+
+	/* start the solicitation */
+	fcoe_ctlr_link_up(&fnic->ctlr);
+
+	sol_time = jiffies + msecs_to_jiffies(FCOE_CTLR_START_DELAY);
+	mod_timer(&fnic->fip_timer, round_jiffies(sol_time));
+}
+
+static int fnic_fcoe_vlan_check(struct fnic *fnic, u16 flag)
+{
+	unsigned long flags;
+	struct fcoe_vlan *fvlan;
+
+	spin_lock_irqsave(&fnic->vlans_lock, flags);
+	if (list_empty(&fnic->vlans)) {
+		spin_unlock_irqrestore(&fnic->vlans_lock, flags);
+		return -EINVAL;
+	}
+
+	fvlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, list);
+	if (fvlan->state == FIP_VLAN_USED) {
+		spin_unlock_irqrestore(&fnic->vlans_lock, flags);
+		return 0;
+	}
+
+	if (fvlan->state == FIP_VLAN_SENT) {
+		fvlan->state = FIP_VLAN_USED;
+		spin_unlock_irqrestore(&fnic->vlans_lock, flags);
+		return 0;
+	}
+	spin_unlock_irqrestore(&fnic->vlans_lock, flags);
+	return -EINVAL;
+}
+
+static void fnic_event_enq(struct fnic *fnic, enum fnic_evt ev)
+{
+	struct fnic_event *fevt;
+	unsigned long flags;
+
+	fevt = kmalloc(sizeof(*fevt), GFP_ATOMIC);
+	if (!fevt)
+		return;
+
+	fevt->fnic = fnic;
+	fevt->event = ev;
+
+	spin_lock_irqsave(&fnic->fnic_lock, flags);
+	list_add_tail(&fevt->list, &fnic->evlist);
+	spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+
+	schedule_work(&fnic->event_work);
+}
+
+static int fnic_fcoe_handle_fip_frame(struct fnic *fnic, struct sk_buff *skb)
+{
+	struct fip_header *fiph;
+	int ret = 1;
+	u16 op;
+	u8 sub;
+
+	if (!skb || !(skb->data))
+		return -1;
+
+	if (skb_linearize(skb))
+		goto drop;
+
+	fiph = (struct fip_header *)skb->data;
+	op = ntohs(fiph->fip_op);
+	sub = fiph->fip_subcode;
+
+	if (FIP_VER_DECAPS(fiph->fip_ver) != FIP_VER)
+		goto drop;
+
+	if (ntohs(fiph->fip_dl_len) * FIP_BPW + sizeof(*fiph) > skb->len)
+		goto drop;
+
+	if (op == FIP_OP_DISC && sub == FIP_SC_ADV) {
+		if (fnic_fcoe_vlan_check(fnic, ntohs(fiph->fip_flags)))
+			goto drop;
+		/* pass it on to fcoe */
+		ret = 1;
+	} else if (op == FIP_OP_VLAN && sub == FIP_SC_VL_REP) {
+		/* set the vlan as used */
+		fnic_fcoe_process_vlan_resp(fnic, skb);
+		ret = 0;
+	} else if (op == FIP_OP_CTRL && sub == FIP_SC_CLR_VLINK) {
+		/* received CVL request, restart vlan disc */
+		fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC);
+		/* pass it on to fcoe */
+		ret = 1;
+	}
+drop:
+	return ret;
+}
+
+void fnic_handle_fip_frame(struct work_struct *work)
+{
+	struct fnic *fnic = container_of(work, struct fnic, fip_frame_work);
+	unsigned long flags;
+	struct sk_buff *skb;
+	struct ethhdr *eh;
+
+	while ((skb = skb_dequeue(&fnic->fip_frame_queue))) {
+		spin_lock_irqsave(&fnic->fnic_lock, flags);
+		if (fnic->stop_rx_link_events) {
+			spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+			dev_kfree_skb(skb);
+			return;
+		}
+		/*
+		 * If we're in a transitional state, just re-queue and return.
+		 * The queue will be serviced when we get to a stable state.
+		 */
+		if (fnic->state != FNIC_IN_FC_MODE &&
+		    fnic->state != FNIC_IN_ETH_MODE) {
+			skb_queue_head(&fnic->fip_frame_queue, skb);
+			spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+			return;
+		}
+		spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+		eh = (struct ethhdr *)skb->data;
+		if (eh->h_proto == htons(ETH_P_FIP)) {
+			skb_pull(skb, sizeof(*eh));
+			if (fnic_fcoe_handle_fip_frame(fnic, skb) <= 0) {
+				dev_kfree_skb(skb);
+				continue;
+			}
+			/*
+			 * If there's FLOGI rejects - clear all
+			 * fcf's & restart from scratch
+			 */
+			if (is_fnic_fip_flogi_reject(&fnic->ctlr, skb)) {
+				shost_printk(KERN_INFO, fnic->lport->host,
+					  "Trigger a Link down - VLAN Disc\n");
+				fcoe_ctlr_link_down(&fnic->ctlr);
+				/* start FCoE VLAN discovery */
+				fnic_fcoe_send_vlan_req(fnic);
+				dev_kfree_skb(skb);
+				continue;
+			}
+			fcoe_ctlr_recv(&fnic->ctlr, skb);
+			continue;
+		}
+	}
+}
+
 /**
  * fnic_import_rq_eth_pkt() - handle received FCoE or FIP frame.
  * @fnic:	fnic instance.
@@ -150,8 +603,14 @@
 		skb_reset_mac_header(skb);
 	}
 	if (eh->h_proto == htons(ETH_P_FIP)) {
-		skb_pull(skb, sizeof(*eh));
-		fcoe_ctlr_recv(&fnic->ctlr, skb);
+		if (!(fnic->config.flags & VFCF_FIP_CAPABLE)) {
+			printk(KERN_ERR "Dropped FIP frame, as firmware "
+					"uses non-FIP mode, Enable FIP "
+					"using UCSM\n");
+			goto drop;
+		}
+		skb_queue_tail(&fnic->fip_frame_queue, skb);
+		queue_work(fnic_fip_queue, &fnic->fip_frame_work);
 		return 1;		/* let caller know packet was used */
 	}
 	if (eh->h_proto != htons(ETH_P_FCOE))
@@ -720,3 +1179,104 @@
 	dev_kfree_skb(fp_skb(fp));
 	buf->os_buf = NULL;
 }
+
+void fnic_fcoe_reset_vlans(struct fnic *fnic)
+{
+	unsigned long flags;
+	struct fcoe_vlan *vlan;
+	struct fcoe_vlan *next;
+
+	/*
+	 * indicate a link down to fcoe so that all fcf's are free'd
+	 * might not be required since we did this before sending vlan
+	 * discovery request
+	 */
+	spin_lock_irqsave(&fnic->vlans_lock, flags);
+	if (!list_empty(&fnic->vlans)) {
+		list_for_each_entry_safe(vlan, next, &fnic->vlans, list) {
+			list_del(&vlan->list);
+			kfree(vlan);
+		}
+	}
+	spin_unlock_irqrestore(&fnic->vlans_lock, flags);
+}
+
+void fnic_handle_fip_timer(struct fnic *fnic)
+{
+	unsigned long flags;
+	struct fcoe_vlan *vlan;
+	u64 sol_time;
+
+	spin_lock_irqsave(&fnic->fnic_lock, flags);
+	if (fnic->stop_rx_link_events) {
+		spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+		return;
+	}
+	spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+
+	if (fnic->ctlr.mode == FIP_ST_NON_FIP)
+		return;
+
+	spin_lock_irqsave(&fnic->vlans_lock, flags);
+	if (list_empty(&fnic->vlans)) {
+		/* no vlans available, try again */
+		FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
+			  "Start VLAN Discovery\n");
+		spin_unlock_irqrestore(&fnic->vlans_lock, flags);
+		fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC);
+		return;
+	}
+
+	vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, list);
+	shost_printk(KERN_DEBUG, fnic->lport->host,
+		  "fip_timer: vlan %d state %d sol_count %d\n",
+		  vlan->vid, vlan->state, vlan->sol_count);
+	switch (vlan->state) {
+	case FIP_VLAN_USED:
+		FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
+			  "FIP VLAN is selected for FC transaction\n");
+		spin_unlock_irqrestore(&fnic->vlans_lock, flags);
+		break;
+	case FIP_VLAN_FAILED:
+		/* if all vlans are in failed state, restart vlan disc */
+		FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
+			  "Start VLAN Discovery\n");
+		spin_unlock_irqrestore(&fnic->vlans_lock, flags);
+		fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC);
+		break;
+	case FIP_VLAN_SENT:
+		if (vlan->sol_count >= FCOE_CTLR_MAX_SOL) {
+			/*
+			 * no response on this vlan, remove  from the list.
+			 * Try the next vlan
+			 */
+			shost_printk(KERN_INFO, fnic->lport->host,
+				  "Dequeue this VLAN ID %d from list\n",
+				  vlan->vid);
+			list_del(&vlan->list);
+			kfree(vlan);
+			vlan = NULL;
+			if (list_empty(&fnic->vlans)) {
+				/* we exhausted all vlans, restart vlan disc */
+				spin_unlock_irqrestore(&fnic->vlans_lock,
+							flags);
+				shost_printk(KERN_INFO, fnic->lport->host,
+					  "fip_timer: vlan list empty, "
+					  "trigger vlan disc\n");
+				fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC);
+				return;
+			}
+			/* check the next vlan */
+			vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan,
+							list);
+			fnic->set_vlan(fnic, vlan->vid);
+			vlan->state = FIP_VLAN_SENT; /* sent now */
+		}
+		spin_unlock_irqrestore(&fnic->vlans_lock, flags);
+		vlan->sol_count++;
+		sol_time = jiffies + msecs_to_jiffies
+					(FCOE_CTLR_START_DELAY);
+		mod_timer(&fnic->fip_timer, round_jiffies(sol_time));
+		break;
+	}
+}
diff --git a/drivers/scsi/fnic/fnic_fip.h b/drivers/scsi/fnic/fnic_fip.h
new file mode 100644
index 0000000..87e74c2
--- /dev/null
+++ b/drivers/scsi/fnic/fnic_fip.h
@@ -0,0 +1,68 @@
+/*
+ * Copyright 2008 Cisco Systems, Inc.  All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef _FNIC_FIP_H_
+#define _FNIC_FIP_H_
+
+
+#define FCOE_CTLR_START_DELAY    2000    /* ms after first adv. to choose FCF */
+#define FCOE_CTLR_FIPVLAN_TOV    2000    /* ms after FIP VLAN disc */
+#define FCOE_CTLR_MAX_SOL        8
+
+#define FINC_MAX_FLOGI_REJECTS   8
+
+/*
+ * FIP_DT_VLAN descriptor.
+ */
+struct fip_vlan_desc {
+	struct fip_desc fd_desc;
+	__be16 fd_vlan;
+} __attribute__((packed));
+
+struct vlan {
+	__be16 vid;
+	__be16 type;
+};
+
+/*
+ * VLAN entry.
+ */
+struct fcoe_vlan {
+	struct list_head list;
+	u16 vid;		/* vlan ID */
+	u16 sol_count;		/* no. of sols sent */
+	u16 state;		/* state */
+};
+
+enum fip_vlan_state {
+	FIP_VLAN_AVAIL  = 0,	/* don't do anything */
+	FIP_VLAN_SENT   = 1,	/* sent */
+	FIP_VLAN_USED   = 2,	/* succeed */
+	FIP_VLAN_FAILED = 3,	/* failed to response */
+};
+
+struct fip_vlan {
+	struct ethhdr eth;
+	struct fip_header fip;
+	struct {
+		struct fip_mac_desc mac;
+		struct fip_wwn_desc wwnn;
+	} desc;
+};
+
+#endif  /* __FINC_FIP_H_ */
diff --git a/drivers/scsi/fnic/fnic_main.c b/drivers/scsi/fnic/fnic_main.c
index d601ac5..5f09d18 100644
--- a/drivers/scsi/fnic/fnic_main.c
+++ b/drivers/scsi/fnic/fnic_main.c
@@ -39,6 +39,7 @@
 #include "vnic_intr.h"
 #include "vnic_stats.h"
 #include "fnic_io.h"
+#include "fnic_fip.h"
 #include "fnic.h"
 
 #define PCI_DEVICE_ID_CISCO_FNIC	0x0045
@@ -292,6 +293,13 @@
 		  round_jiffies(jiffies + FNIC_NOTIFY_TIMER_PERIOD));
 }
 
+static void fnic_fip_notify_timer(unsigned long data)
+{
+	struct fnic *fnic = (struct fnic *)data;
+
+	fnic_handle_fip_timer(fnic);
+}
+
 static void fnic_notify_timer_start(struct fnic *fnic)
 {
 	switch (vnic_dev_get_intr_mode(fnic->vdev)) {
@@ -403,6 +411,12 @@
 	return fnic->data_src_addr;
 }
 
+static void fnic_set_vlan(struct fnic *fnic, u16 vlan_id)
+{
+	u16 old_vlan;
+	old_vlan = vnic_dev_set_default_vlan(fnic->vdev, vlan_id);
+}
+
 static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 {
 	struct Scsi_Host *host;
@@ -620,7 +634,29 @@
 		vnic_dev_packet_filter(fnic->vdev, 1, 1, 0, 0, 0);
 		vnic_dev_add_addr(fnic->vdev, FIP_ALL_ENODE_MACS);
 		vnic_dev_add_addr(fnic->vdev, fnic->ctlr.ctl_src_addr);
+		fnic->set_vlan = fnic_set_vlan;
 		fcoe_ctlr_init(&fnic->ctlr, FIP_MODE_AUTO);
+		setup_timer(&fnic->fip_timer, fnic_fip_notify_timer,
+							(unsigned long)fnic);
+		spin_lock_init(&fnic->vlans_lock);
+		INIT_WORK(&fnic->fip_frame_work, fnic_handle_fip_frame);
+		INIT_WORK(&fnic->event_work, fnic_handle_event);
+		skb_queue_head_init(&fnic->fip_frame_queue);
+		spin_lock_irqsave(&fnic_list_lock, flags);
+		if (!fnic_fip_queue) {
+			fnic_fip_queue =
+				create_singlethread_workqueue("fnic_fip_q");
+			if (!fnic_fip_queue) {
+				spin_unlock_irqrestore(&fnic_list_lock, flags);
+				printk(KERN_ERR PFX "fnic FIP work queue "
+						 "create failed\n");
+				err = -ENOMEM;
+				goto err_out_free_max_pool;
+			}
+		}
+		spin_unlock_irqrestore(&fnic_list_lock, flags);
+		INIT_LIST_HEAD(&fnic->evlist);
+		INIT_LIST_HEAD(&fnic->vlans);
 	} else {
 		shost_printk(KERN_INFO, fnic->lport->host,
 			     "firmware uses non-FIP mode\n");
@@ -807,6 +843,13 @@
 	skb_queue_purge(&fnic->frame_queue);
 	skb_queue_purge(&fnic->tx_queue);
 
+	if (fnic->config.flags & VFCF_FIP_CAPABLE) {
+		del_timer_sync(&fnic->fip_timer);
+		skb_queue_purge(&fnic->fip_frame_queue);
+		fnic_fcoe_reset_vlans(fnic);
+		fnic_fcoe_evlist_free(fnic);
+	}
+
 	/*
 	 * Log off the fabric. This stops all remote ports, dns port,
 	 * logs off the fabric. This flushes all rport, disc, lport work
@@ -889,8 +932,8 @@
 	len = sizeof(struct fnic_sgl_list);
 	fnic_sgl_cache[FNIC_SGL_CACHE_MAX] = kmem_cache_create
 		("fnic_sgl_max", len + FNIC_SG_DESC_ALIGN, FNIC_SG_DESC_ALIGN,
-		 SLAB_HWCACHE_ALIGN,
-		 NULL);
+		  SLAB_HWCACHE_ALIGN,
+		  NULL);
 	if (!fnic_sgl_cache[FNIC_SGL_CACHE_MAX]) {
 		printk(KERN_ERR PFX "failed to create fnic max sgl slab\n");
 		err = -ENOMEM;
@@ -951,6 +994,10 @@
 {
 	pci_unregister_driver(&fnic_driver);
 	destroy_workqueue(fnic_event_queue);
+	if (fnic_fip_queue) {
+		flush_workqueue(fnic_fip_queue);
+		destroy_workqueue(fnic_fip_queue);
+	}
 	kmem_cache_destroy(fnic_sgl_cache[FNIC_SGL_CACHE_MAX]);
 	kmem_cache_destroy(fnic_sgl_cache[FNIC_SGL_CACHE_DFLT]);
 	kmem_cache_destroy(fnic_io_req_cache);
diff --git a/drivers/scsi/fnic/vnic_dev.c b/drivers/scsi/fnic/vnic_dev.c
index b576be7..9795d6f 100644
--- a/drivers/scsi/fnic/vnic_dev.c
+++ b/drivers/scsi/fnic/vnic_dev.c
@@ -584,6 +584,16 @@
 	return vnic_dev_cmd(vdev, CMD_INIT, &a0, &a1, wait);
 }
 
+u16 vnic_dev_set_default_vlan(struct vnic_dev *vdev, u16 new_default_vlan)
+{
+	u64 a0 = new_default_vlan, a1 = 0;
+	int wait = 1000;
+	int old_vlan = 0;
+
+	old_vlan = vnic_dev_cmd(vdev, CMD_SET_DEFAULT_VLAN, &a0, &a1, wait);
+	return (u16)old_vlan;
+}
+
 int vnic_dev_link_status(struct vnic_dev *vdev)
 {
 	if (vdev->linkstatus)
diff --git a/drivers/scsi/fnic/vnic_dev.h b/drivers/scsi/fnic/vnic_dev.h
index f9935a8..40d4195 100644
--- a/drivers/scsi/fnic/vnic_dev.h
+++ b/drivers/scsi/fnic/vnic_dev.h
@@ -148,6 +148,8 @@
 int vnic_dev_open(struct vnic_dev *vdev, int arg);
 int vnic_dev_open_done(struct vnic_dev *vdev, int *done);
 int vnic_dev_init(struct vnic_dev *vdev, int arg);
+u16 vnic_dev_set_default_vlan(struct vnic_dev *vdev,
+				u16 new_default_vlan);
 int vnic_dev_soft_reset(struct vnic_dev *vdev, int arg);
 int vnic_dev_soft_reset_done(struct vnic_dev *vdev, int *done);
 void vnic_dev_set_intr_mode(struct vnic_dev *vdev,
diff --git a/drivers/scsi/fnic/vnic_devcmd.h b/drivers/scsi/fnic/vnic_devcmd.h
index 7c9ccbd..3e2fcbd 100644
--- a/drivers/scsi/fnic/vnic_devcmd.h
+++ b/drivers/scsi/fnic/vnic_devcmd.h
@@ -196,6 +196,73 @@
 
 	/* undo initialize of virtual link */
 	CMD_DEINIT		= _CMDCNW(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 34),
+
+	/* check fw capability of a cmd:
+	 * in:  (u32)a0=cmd
+	 * out: (u32)a0=errno, 0:valid cmd, a1=supported VNIC_STF_* bits */
+	CMD_CAPABILITY      = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 36),
+
+	/* persistent binding info
+	 * in:  (u64)a0=paddr of arg
+	 *      (u32)a1=CMD_PERBI_XXX */
+	CMD_PERBI       = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_FC, 37),
+
+	/* Interrupt Assert Register functionality
+	 * in: (u16)a0=interrupt number to assert
+	 */
+	CMD_IAR         = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 38),
+
+	/* initiate hangreset, like softreset after hang detected */
+	CMD_HANG_RESET      = _CMDC(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 39),
+
+	/* hangreset status:
+	 *    out: a0=0 reset complete, a0=1 reset in progress */
+	CMD_HANG_RESET_STATUS   = _CMDC(_CMD_DIR_READ, _CMD_VTYPE_ALL, 40),
+
+	/*
+	 * Set hw ingress packet vlan rewrite mode:
+	 * in:  (u32)a0=new vlan rewrite mode
+	 * out: (u32)a0=old vlan rewrite mode */
+	CMD_IG_VLAN_REWRITE_MODE = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ENET, 41),
+
+	/*
+	 * in:  (u16)a0=bdf of target vnic
+	 *      (u32)a1=cmd to proxy
+	 *      a2-a15=args to cmd in a1
+	 * out: (u32)a0=status of proxied cmd
+	 *      a1-a15=out args of proxied cmd */
+	CMD_PROXY_BY_BDF =  _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 42),
+
+	/*
+	 * As for BY_BDF except a0 is index of hvnlink subordinate vnic
+	 * or SR-IOV virtual vnic
+	 */
+	CMD_PROXY_BY_INDEX =    _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 43),
+
+	/*
+	 * For HPP toggle:
+	 * adapter-info-get
+	 * in:  (u64)a0=phsical address of buffer passed in from caller.
+	 *      (u16)a1=size of buffer specified in a0.
+	 * out: (u64)a0=phsical address of buffer passed in from caller.
+	 *      (u16)a1=actual bytes from VIF-CONFIG-INFO TLV, or
+	 *              0 if no VIF-CONFIG-INFO TLV was ever received. */
+	CMD_CONFIG_INFO_GET = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 44),
+
+	/*
+	 * INT13 API: (u64)a0=paddr to vnic_int13_params struct
+	 *            (u32)a1=INT13_CMD_xxx
+	 */
+	CMD_INT13_ALL = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 45),
+
+	/*
+	 * Set default vlan:
+	 * in: (u16)a0=new default vlan
+	 *     (u16)a1=zero for overriding vlan with param a0,
+	 *             non-zero for resetting vlan to the default
+	 * out: (u16)a0=old default vlan
+	 */
+	CMD_SET_DEFAULT_VLAN = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 46)
 };
 
 /* flags for CMD_OPEN */
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
index cc82d0f..4e31caa 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.c
+++ b/drivers/scsi/ibmvscsi/ibmvfc.c
@@ -2179,7 +2179,7 @@
 		return 0;
 	}
 
-	if (vhost->state == IBMVFC_ACTIVE) {
+	if (vhost->logged_in) {
 		evt = ibmvfc_get_event(vhost);
 		ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_MAD_FORMAT);
 
@@ -2190,7 +2190,12 @@
 		tmf->common.length = sizeof(*tmf);
 		tmf->scsi_id = rport->port_id;
 		int_to_scsilun(sdev->lun, &tmf->lun);
-		tmf->flags = (type | IBMVFC_TMF_LUA_VALID);
+		if (!(vhost->login_buf->resp.capabilities & IBMVFC_CAN_SUPPRESS_ABTS))
+			type &= ~IBMVFC_TMF_SUPPRESS_ABTS;
+		if (vhost->state == IBMVFC_ACTIVE)
+			tmf->flags = (type | IBMVFC_TMF_LUA_VALID);
+		else
+			tmf->flags = ((type & IBMVFC_TMF_SUPPRESS_ABTS) | IBMVFC_TMF_LUA_VALID);
 		tmf->cancel_key = (unsigned long)sdev->hostdata;
 		tmf->my_cancel_key = (unsigned long)starget->hostdata;
 
@@ -2327,7 +2332,7 @@
 	timeout = wait_for_completion_timeout(&evt->comp, timeout);
 
 	if (!timeout) {
-		rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_ABORT_TASK_SET);
+		rc = ibmvfc_cancel_all(sdev, 0);
 		if (!rc) {
 			rc = ibmvfc_wait_for_ops(vhost, sdev->hostdata, ibmvfc_match_key);
 			if (rc == SUCCESS)
@@ -2383,24 +2388,30 @@
  * @cmd:	scsi command to abort
  *
  * Returns:
- *	SUCCESS / FAILED
+ *	SUCCESS / FAST_IO_FAIL / FAILED
  **/
 static int ibmvfc_eh_abort_handler(struct scsi_cmnd *cmd)
 {
 	struct scsi_device *sdev = cmd->device;
 	struct ibmvfc_host *vhost = shost_priv(sdev->host);
-	int cancel_rc, abort_rc;
+	int cancel_rc, block_rc;
 	int rc = FAILED;
 
 	ENTER;
-	fc_block_scsi_eh(cmd);
+	block_rc = fc_block_scsi_eh(cmd);
 	ibmvfc_wait_while_resetting(vhost);
-	cancel_rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_ABORT_TASK_SET);
-	abort_rc = ibmvfc_abort_task_set(sdev);
+	if (block_rc != FAST_IO_FAIL) {
+		cancel_rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_ABORT_TASK_SET);
+		ibmvfc_abort_task_set(sdev);
+	} else
+		cancel_rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_SUPPRESS_ABTS);
 
-	if (!cancel_rc && !abort_rc)
+	if (!cancel_rc)
 		rc = ibmvfc_wait_for_ops(vhost, sdev, ibmvfc_match_lun);
 
+	if (block_rc == FAST_IO_FAIL && rc != FAILED)
+		rc = FAST_IO_FAIL;
+
 	LEAVE;
 	return rc;
 }
@@ -2410,29 +2421,47 @@
  * @cmd:	scsi command struct
  *
  * Returns:
- *	SUCCESS / FAILED
+ *	SUCCESS / FAST_IO_FAIL / FAILED
  **/
 static int ibmvfc_eh_device_reset_handler(struct scsi_cmnd *cmd)
 {
 	struct scsi_device *sdev = cmd->device;
 	struct ibmvfc_host *vhost = shost_priv(sdev->host);
-	int cancel_rc, reset_rc;
+	int cancel_rc, block_rc, reset_rc = 0;
 	int rc = FAILED;
 
 	ENTER;
-	fc_block_scsi_eh(cmd);
+	block_rc = fc_block_scsi_eh(cmd);
 	ibmvfc_wait_while_resetting(vhost);
-	cancel_rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_LUN_RESET);
-	reset_rc = ibmvfc_reset_device(sdev, IBMVFC_LUN_RESET, "LUN");
+	if (block_rc != FAST_IO_FAIL) {
+		cancel_rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_LUN_RESET);
+		reset_rc = ibmvfc_reset_device(sdev, IBMVFC_LUN_RESET, "LUN");
+	} else
+		cancel_rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_SUPPRESS_ABTS);
 
 	if (!cancel_rc && !reset_rc)
 		rc = ibmvfc_wait_for_ops(vhost, sdev, ibmvfc_match_lun);
 
+	if (block_rc == FAST_IO_FAIL && rc != FAILED)
+		rc = FAST_IO_FAIL;
+
 	LEAVE;
 	return rc;
 }
 
 /**
+ * ibmvfc_dev_cancel_all_noreset - Device iterated cancel all function
+ * @sdev:	scsi device struct
+ * @data:	return code
+ *
+ **/
+static void ibmvfc_dev_cancel_all_noreset(struct scsi_device *sdev, void *data)
+{
+	unsigned long *rc = data;
+	*rc |= ibmvfc_cancel_all(sdev, IBMVFC_TMF_SUPPRESS_ABTS);
+}
+
+/**
  * ibmvfc_dev_cancel_all_reset - Device iterated cancel all function
  * @sdev:	scsi device struct
  * @data:	return code
@@ -2449,26 +2478,33 @@
  * @cmd:	scsi command struct
  *
  * Returns:
- *	SUCCESS / FAILED
+ *	SUCCESS / FAST_IO_FAIL / FAILED
  **/
 static int ibmvfc_eh_target_reset_handler(struct scsi_cmnd *cmd)
 {
 	struct scsi_device *sdev = cmd->device;
 	struct ibmvfc_host *vhost = shost_priv(sdev->host);
 	struct scsi_target *starget = scsi_target(sdev);
-	int reset_rc;
+	int block_rc;
+	int reset_rc = 0;
 	int rc = FAILED;
 	unsigned long cancel_rc = 0;
 
 	ENTER;
-	fc_block_scsi_eh(cmd);
+	block_rc = fc_block_scsi_eh(cmd);
 	ibmvfc_wait_while_resetting(vhost);
-	starget_for_each_device(starget, &cancel_rc, ibmvfc_dev_cancel_all_reset);
-	reset_rc = ibmvfc_reset_device(sdev, IBMVFC_TARGET_RESET, "target");
+	if (block_rc != FAST_IO_FAIL) {
+		starget_for_each_device(starget, &cancel_rc, ibmvfc_dev_cancel_all_reset);
+		reset_rc = ibmvfc_reset_device(sdev, IBMVFC_TARGET_RESET, "target");
+	} else
+		starget_for_each_device(starget, &cancel_rc, ibmvfc_dev_cancel_all_noreset);
 
 	if (!cancel_rc && !reset_rc)
 		rc = ibmvfc_wait_for_ops(vhost, starget, ibmvfc_match_target);
 
+	if (block_rc == FAST_IO_FAIL && rc != FAILED)
+		rc = FAST_IO_FAIL;
+
 	LEAVE;
 	return rc;
 }
@@ -2480,12 +2516,16 @@
  **/
 static int ibmvfc_eh_host_reset_handler(struct scsi_cmnd *cmd)
 {
-	int rc;
+	int rc, block_rc;
 	struct ibmvfc_host *vhost = shost_priv(cmd->device->host);
 
-	fc_block_scsi_eh(cmd);
+	block_rc = fc_block_scsi_eh(cmd);
 	dev_err(vhost->dev, "Resetting connection due to error recovery\n");
 	rc = ibmvfc_issue_fc_host_lip(vhost->host);
+
+	if (block_rc == FAST_IO_FAIL)
+		return FAST_IO_FAIL;
+
 	return rc ? FAILED : SUCCESS;
 }
 
@@ -2509,8 +2549,7 @@
 		dev_rport = starget_to_rport(scsi_target(sdev));
 		if (dev_rport != rport)
 			continue;
-		ibmvfc_cancel_all(sdev, IBMVFC_TMF_ABORT_TASK_SET);
-		ibmvfc_abort_task_set(sdev);
+		ibmvfc_cancel_all(sdev, IBMVFC_TMF_SUPPRESS_ABTS);
 	}
 
 	rc = ibmvfc_wait_for_ops(vhost, rport, ibmvfc_match_rport);
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.h b/drivers/scsi/ibmvscsi/ibmvfc.h
index 3be8af6..017a529 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.h
+++ b/drivers/scsi/ibmvscsi/ibmvfc.h
@@ -29,8 +29,8 @@
 #include "viosrp.h"
 
 #define IBMVFC_NAME	"ibmvfc"
-#define IBMVFC_DRIVER_VERSION		"1.0.10"
-#define IBMVFC_DRIVER_DATE		"(August 24, 2012)"
+#define IBMVFC_DRIVER_VERSION		"1.0.11"
+#define IBMVFC_DRIVER_DATE		"(April 12, 2013)"
 
 #define IBMVFC_DEFAULT_TIMEOUT	60
 #define IBMVFC_ADISC_CANCEL_TIMEOUT	45
@@ -208,10 +208,10 @@
 	u16 error;
 	u32 flags;
 #define IBMVFC_NATIVE_FC		0x01
-#define IBMVFC_CAN_FLUSH_ON_HALT	0x08
 	u32 reserved;
 	u64 capabilities;
 #define IBMVFC_CAN_FLUSH_ON_HALT	0x08
+#define IBMVFC_CAN_SUPPRESS_ABTS	0x10
 	u32 max_cmds;
 	u32 scsi_id_sz;
 	u64 max_dma_len;
@@ -351,6 +351,7 @@
 #define IBMVFC_TMF_LUN_RESET		0x10
 #define IBMVFC_TMF_TGT_RESET		0x20
 #define IBMVFC_TMF_LUA_VALID		0x40
+#define IBMVFC_TMF_SUPPRESS_ABTS	0x80
 	u32 cancel_key;
 	u32 my_cancel_key;
 	u32 pad;
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index 2197b57..82a3c1e 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -4777,7 +4777,7 @@
 	ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata;
 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
 
-	if (!ioa_cfg->in_reset_reload) {
+	if (!ioa_cfg->in_reset_reload && !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
 		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
 		dev_err(&ioa_cfg->pdev->dev,
 			"Adapter being reset as a result of error recovery.\n");
@@ -6421,7 +6421,7 @@
 {
 	u32 ioadl_flags = 0;
 	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
-	struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
+	struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ata_ioadl.ioadl64;
 	struct ipr_ioadl64_desc *last_ioadl64 = NULL;
 	int len = qc->nbytes;
 	struct scatterlist *sg;
@@ -6441,7 +6441,7 @@
 	ioarcb->ioadl_len =
 		cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
 	ioarcb->u.sis64_addr_data.data_ioadl_addr =
-		cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ata_ioadl));
+		cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ata_ioadl.ioadl64));
 
 	for_each_sg(qc->sg, sg, qc->n_elem, si) {
 		ioadl64->flags = cpu_to_be32(ioadl_flags);
@@ -6739,6 +6739,7 @@
 static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd)
 {
 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
+	int i;
 
 	ENTER;
 	if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
@@ -6750,6 +6751,13 @@
 
 	ioa_cfg->in_reset_reload = 0;
 	ioa_cfg->reset_retries = 0;
+	for (i = 0; i < ioa_cfg->hrrq_num; i++) {
+		spin_lock(&ioa_cfg->hrrq[i]._lock);
+		ioa_cfg->hrrq[i].ioa_is_dead = 1;
+		spin_unlock(&ioa_cfg->hrrq[i]._lock);
+	}
+	wmb();
+
 	list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
 	wake_up_all(&ioa_cfg->reset_wait_q);
 	LEAVE;
@@ -8651,7 +8659,7 @@
 	spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
 	if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
 		ioa_cfg->sdt_state = ABORT_DUMP;
-	ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES;
+	ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES - 1;
 	ioa_cfg->in_ioa_bringdown = 1;
 	for (i = 0; i < ioa_cfg->hrrq_num; i++) {
 		spin_lock(&ioa_cfg->hrrq[i]._lock);
diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h
index 21a6ff1..a1fb840 100644
--- a/drivers/scsi/ipr.h
+++ b/drivers/scsi/ipr.h
@@ -552,7 +552,7 @@
 	u8 hob_lbam;
 	u8 hob_lbah;
 	u8 ctl;
-}__attribute__ ((packed, aligned(4)));
+}__attribute__ ((packed, aligned(2)));
 
 struct ipr_ioadl_desc {
 	__be32 flags_and_data_len;
diff --git a/drivers/scsi/isci/remote_device.c b/drivers/scsi/isci/remote_device.c
index c3aa6c5..96a26f4 100644
--- a/drivers/scsi/isci/remote_device.c
+++ b/drivers/scsi/isci/remote_device.c
@@ -1085,7 +1085,7 @@
 	struct isci_host *ihost = idev->owning_port->owning_controller;
 	struct domain_device *dev = idev->domain_dev;
 
-	if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_SATA)) {
+	if (dev->dev_type == SAS_SATA_DEV || (dev->tproto & SAS_PROTOCOL_SATA)) {
 		sci_change_state(&idev->sm, SCI_STP_DEV_IDLE);
 	} else if (dev_is_expander(dev)) {
 		sci_change_state(&idev->sm, SCI_SMP_DEV_IDLE);
@@ -1098,7 +1098,7 @@
 	struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
 	struct domain_device *dev = idev->domain_dev;
 
-	if (dev->dev_type == SAS_END_DEV) {
+	if (dev->dev_type == SAS_END_DEVICE) {
 		struct isci_host *ihost = idev->owning_port->owning_controller;
 
 		isci_remote_device_not_ready(ihost, idev,
diff --git a/drivers/scsi/isci/remote_device.h b/drivers/scsi/isci/remote_device.h
index 7674caa..47a013f 100644
--- a/drivers/scsi/isci/remote_device.h
+++ b/drivers/scsi/isci/remote_device.h
@@ -297,7 +297,7 @@
 
 static inline bool dev_is_expander(struct domain_device *dev)
 {
-	return dev->dev_type == EDGE_DEV || dev->dev_type == FANOUT_DEV;
+	return dev->dev_type == SAS_EDGE_EXPANDER_DEVICE || dev->dev_type == SAS_FANOUT_EXPANDER_DEVICE;
 }
 
 static inline void sci_remote_device_decrement_request_count(struct isci_remote_device *idev)
diff --git a/drivers/scsi/isci/request.c b/drivers/scsi/isci/request.c
index 9594ab6..e3e3bcb 100644
--- a/drivers/scsi/isci/request.c
+++ b/drivers/scsi/isci/request.c
@@ -2978,7 +2978,7 @@
 	/* all unaccelerated request types (non ssp or ncq) handled with
 	 * substates
 	 */
-	if (!task && dev->dev_type == SAS_END_DEV) {
+	if (!task && dev->dev_type == SAS_END_DEVICE) {
 		state = SCI_REQ_TASK_WAIT_TC_COMP;
 	} else if (task && task->task_proto == SAS_PROTOCOL_SMP) {
 		state = SCI_REQ_SMP_WAIT_RESP;
@@ -3101,7 +3101,7 @@
 	if (idev->rnc.remote_node_index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX)
 		return SCI_FAILURE_INVALID_REMOTE_DEVICE;
 
-	if (dev->dev_type == SAS_END_DEV)
+	if (dev->dev_type == SAS_END_DEVICE)
 		/* pass */;
 	else if (dev_is_sata(dev))
 		memset(&ireq->stp.cmd, 0, sizeof(ireq->stp.cmd));
@@ -3125,7 +3125,7 @@
 	/* Build the common part of the request */
 	sci_general_request_construct(ihost, idev, ireq);
 
-	if (dev->dev_type == SAS_END_DEV || dev_is_sata(dev)) {
+	if (dev->dev_type == SAS_END_DEVICE || dev_is_sata(dev)) {
 		set_bit(IREQ_TMF, &ireq->flags);
 		memset(ireq->tc, 0, sizeof(struct scu_task_context));
 
diff --git a/drivers/scsi/isci/task.c b/drivers/scsi/isci/task.c
index b6f19a1..9bb020a 100644
--- a/drivers/scsi/isci/task.c
+++ b/drivers/scsi/isci/task.c
@@ -250,7 +250,7 @@
 	}
 
 	/* XXX convert to get this from task->tproto like other drivers */
-	if (dev->dev_type == SAS_END_DEV) {
+	if (dev->dev_type == SAS_END_DEVICE) {
 		isci_tmf->proto = SAS_PROTOCOL_SSP;
 		status = sci_task_request_construct_ssp(ireq);
 		if (status != SCI_SUCCESS)
diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
index bdb81cd..161c98e 100644
--- a/drivers/scsi/libsas/sas_ata.c
+++ b/drivers/scsi/libsas/sas_ata.c
@@ -285,14 +285,14 @@
 	if (phy->attached_tproto & SAS_PROTOCOL_STP)
 		dev->tproto = phy->attached_tproto;
 	if (phy->attached_sata_dev)
-		dev->tproto |= SATA_DEV;
+		dev->tproto |= SAS_SATA_DEV;
 
-	if (phy->attached_dev_type == SATA_PENDING)
-		dev->dev_type = SATA_PENDING;
+	if (phy->attached_dev_type == SAS_SATA_PENDING)
+		dev->dev_type = SAS_SATA_PENDING;
 	else {
 		int res;
 
-		dev->dev_type = SATA_DEV;
+		dev->dev_type = SAS_SATA_DEV;
 		res = sas_get_report_phy_sata(dev->parent, phy->phy_id,
 					      &dev->sata_dev.rps_resp);
 		if (res) {
@@ -314,7 +314,7 @@
 	int res;
 
 	/* we weren't pending, so successfully end the reset sequence now */
-	if (dev->dev_type != SATA_PENDING)
+	if (dev->dev_type != SAS_SATA_PENDING)
 		return 1;
 
 	/* hmmm, if this succeeds do we need to repost the domain_device to the
@@ -348,9 +348,9 @@
 		return 0;
 
 	switch (ex_phy->attached_dev_type) {
-	case SATA_PENDING:
+	case SAS_SATA_PENDING:
 		return 0;
-	case SAS_END_DEV:
+	case SAS_END_DEVICE:
 		if (ex_phy->attached_sata_dev)
 			return sas_ata_clear_pending(dev, ex_phy);
 	default:
@@ -631,7 +631,7 @@
 	struct dev_to_host_fis *fis =
 		(struct dev_to_host_fis *) dev->frame_rcvd;
 
-	if (dev->dev_type == SATA_PENDING)
+	if (dev->dev_type == SAS_SATA_PENDING)
 		return;
 
 	if ((fis->sector_count == 1 && /* ATA */
@@ -797,7 +797,7 @@
 {
 	int res;
 
-	if (dev->dev_type == SATA_PM)
+	if (dev->dev_type == SAS_SATA_PM)
 		return -ENODEV;
 
 	sas_get_ata_command_set(dev);
diff --git a/drivers/scsi/libsas/sas_discover.c b/drivers/scsi/libsas/sas_discover.c
index a0c3003..62b58d3 100644
--- a/drivers/scsi/libsas/sas_discover.c
+++ b/drivers/scsi/libsas/sas_discover.c
@@ -39,11 +39,11 @@
 void sas_init_dev(struct domain_device *dev)
 {
 	switch (dev->dev_type) {
-	case SAS_END_DEV:
+	case SAS_END_DEVICE:
 		INIT_LIST_HEAD(&dev->ssp_dev.eh_list_node);
 		break;
-	case EDGE_DEV:
-	case FANOUT_DEV:
+	case SAS_EDGE_EXPANDER_DEVICE:
+	case SAS_FANOUT_EXPANDER_DEVICE:
 		INIT_LIST_HEAD(&dev->ex_dev.children);
 		mutex_init(&dev->ex_dev.cmd_mutex);
 		break;
@@ -93,9 +93,9 @@
 		if (fis->interrupt_reason == 1 && fis->lbal == 1 &&
 		    fis->byte_count_low==0x69 && fis->byte_count_high == 0x96
 		    && (fis->device & ~0x10) == 0)
-			dev->dev_type = SATA_PM;
+			dev->dev_type = SAS_SATA_PM;
 		else
-			dev->dev_type = SATA_DEV;
+			dev->dev_type = SAS_SATA_DEV;
 		dev->tproto = SAS_PROTOCOL_SATA;
 	} else {
 		struct sas_identify_frame *id =
@@ -109,21 +109,21 @@
 
 	dev->port = port;
 	switch (dev->dev_type) {
-	case SATA_DEV:
+	case SAS_SATA_DEV:
 		rc = sas_ata_init(dev);
 		if (rc) {
 			rphy = NULL;
 			break;
 		}
 		/* fall through */
-	case SAS_END_DEV:
+	case SAS_END_DEVICE:
 		rphy = sas_end_device_alloc(port->port);
 		break;
-	case EDGE_DEV:
+	case SAS_EDGE_EXPANDER_DEVICE:
 		rphy = sas_expander_alloc(port->port,
 					  SAS_EDGE_EXPANDER_DEVICE);
 		break;
-	case FANOUT_DEV:
+	case SAS_FANOUT_EXPANDER_DEVICE:
 		rphy = sas_expander_alloc(port->port,
 					  SAS_FANOUT_EXPANDER_DEVICE);
 		break;
@@ -156,7 +156,7 @@
 	dev->rphy = rphy;
 	get_device(&dev->rphy->dev);
 
-	if (dev_is_sata(dev) || dev->dev_type == SAS_END_DEV)
+	if (dev_is_sata(dev) || dev->dev_type == SAS_END_DEVICE)
 		list_add_tail(&dev->disco_list_node, &port->disco_list);
 	else {
 		spin_lock_irq(&port->dev_list_lock);
@@ -315,7 +315,7 @@
 	dev->phy = NULL;
 
 	/* remove the phys and ports, everything else should be gone */
-	if (dev->dev_type == EDGE_DEV || dev->dev_type == FANOUT_DEV)
+	if (dev->dev_type == SAS_EDGE_EXPANDER_DEVICE || dev->dev_type == SAS_FANOUT_EXPANDER_DEVICE)
 		kfree(dev->ex_dev.ex_phy);
 
 	if (dev_is_sata(dev) && dev->sata_dev.ap) {
@@ -343,7 +343,7 @@
 	spin_unlock_irq(&port->dev_list_lock);
 
 	spin_lock_irq(&ha->lock);
-	if (dev->dev_type == SAS_END_DEV &&
+	if (dev->dev_type == SAS_END_DEVICE &&
 	    !list_empty(&dev->ssp_dev.eh_list_node)) {
 		list_del_init(&dev->ssp_dev.eh_list_node);
 		ha->eh_active--;
@@ -457,15 +457,15 @@
 		    task_pid_nr(current));
 
 	switch (dev->dev_type) {
-	case SAS_END_DEV:
+	case SAS_END_DEVICE:
 		error = sas_discover_end_dev(dev);
 		break;
-	case EDGE_DEV:
-	case FANOUT_DEV:
+	case SAS_EDGE_EXPANDER_DEVICE:
+	case SAS_FANOUT_EXPANDER_DEVICE:
 		error = sas_discover_root_expander(dev);
 		break;
-	case SATA_DEV:
-	case SATA_PM:
+	case SAS_SATA_DEV:
+	case SAS_SATA_PM:
 #ifdef CONFIG_SCSI_SAS_ATA
 		error = sas_discover_sata(dev);
 		break;
diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
index 55cbd01..446b851 100644
--- a/drivers/scsi/libsas/sas_expander.c
+++ b/drivers/scsi/libsas/sas_expander.c
@@ -183,21 +183,21 @@
 	}
 }
 
-static enum sas_dev_type to_dev_type(struct discover_resp *dr)
+static enum sas_device_type to_dev_type(struct discover_resp *dr)
 {
 	/* This is detecting a failure to transmit initial dev to host
 	 * FIS as described in section J.5 of sas-2 r16
 	 */
-	if (dr->attached_dev_type == NO_DEVICE && dr->attached_sata_dev &&
+	if (dr->attached_dev_type == SAS_PHY_UNUSED && dr->attached_sata_dev &&
 	    dr->linkrate >= SAS_LINK_RATE_1_5_GBPS)
-		return SATA_PENDING;
+		return SAS_SATA_PENDING;
 	else
 		return dr->attached_dev_type;
 }
 
 static void sas_set_ex_phy(struct domain_device *dev, int phy_id, void *rsp)
 {
-	enum sas_dev_type dev_type;
+	enum sas_device_type dev_type;
 	enum sas_linkrate linkrate;
 	u8 sas_addr[SAS_ADDR_SIZE];
 	struct smp_resp *resp = rsp;
@@ -238,7 +238,7 @@
 	/* Handle vacant phy - rest of dr data is not valid so skip it */
 	if (phy->phy_state == PHY_VACANT) {
 		memset(phy->attached_sas_addr, 0, SAS_ADDR_SIZE);
-		phy->attached_dev_type = NO_DEVICE;
+		phy->attached_dev_type = SAS_PHY_UNUSED;
 		if (!test_bit(SAS_HA_ATA_EH_ACTIVE, &ha->state)) {
 			phy->phy_id = phy_id;
 			goto skip;
@@ -259,7 +259,7 @@
 	/* help some expanders that fail to zero sas_address in the 'no
 	 * device' case
 	 */
-	if (phy->attached_dev_type == NO_DEVICE ||
+	if (phy->attached_dev_type == SAS_PHY_UNUSED ||
 	    phy->linkrate < SAS_LINK_RATE_1_5_GBPS)
 		memset(phy->attached_sas_addr, 0, SAS_ADDR_SIZE);
 	else
@@ -292,13 +292,13 @@
 
  out:
 	switch (phy->attached_dev_type) {
-	case SATA_PENDING:
+	case SAS_SATA_PENDING:
 		type = "stp pending";
 		break;
-	case NO_DEVICE:
+	case SAS_PHY_UNUSED:
 		type = "no device";
 		break;
-	case SAS_END_DEV:
+	case SAS_END_DEVICE:
 		if (phy->attached_iproto) {
 			if (phy->attached_tproto)
 				type = "host+target";
@@ -311,8 +311,8 @@
 				type = "ssp";
 		}
 		break;
-	case EDGE_DEV:
-	case FANOUT_DEV:
+	case SAS_EDGE_EXPANDER_DEVICE:
+	case SAS_FANOUT_EXPANDER_DEVICE:
 		type = "smp";
 		break;
 	default:
@@ -833,7 +833,7 @@
 	} else
 #endif
 	  if (phy->attached_tproto & SAS_PROTOCOL_SSP) {
-		child->dev_type = SAS_END_DEV;
+		child->dev_type = SAS_END_DEVICE;
 		rphy = sas_end_device_alloc(phy->port);
 		/* FIXME: error handling */
 		if (unlikely(!rphy))
@@ -932,11 +932,11 @@
 
 
 	switch (phy->attached_dev_type) {
-	case EDGE_DEV:
+	case SAS_EDGE_EXPANDER_DEVICE:
 		rphy = sas_expander_alloc(phy->port,
 					  SAS_EDGE_EXPANDER_DEVICE);
 		break;
-	case FANOUT_DEV:
+	case SAS_FANOUT_EXPANDER_DEVICE:
 		rphy = sas_expander_alloc(phy->port,
 					  SAS_FANOUT_EXPANDER_DEVICE);
 		break;
@@ -1013,7 +1013,7 @@
 	if (sas_dev_present_in_domain(dev->port, ex_phy->attached_sas_addr))
 		sas_ex_disable_port(dev, ex_phy->attached_sas_addr);
 
-	if (ex_phy->attached_dev_type == NO_DEVICE) {
+	if (ex_phy->attached_dev_type == SAS_PHY_UNUSED) {
 		if (ex_phy->routing_attr == DIRECT_ROUTING) {
 			memset(ex_phy->attached_sas_addr, 0, SAS_ADDR_SIZE);
 			sas_configure_routing(dev, ex_phy->attached_sas_addr);
@@ -1022,10 +1022,10 @@
 	} else if (ex_phy->linkrate == SAS_LINK_RATE_UNKNOWN)
 		return 0;
 
-	if (ex_phy->attached_dev_type != SAS_END_DEV &&
-	    ex_phy->attached_dev_type != FANOUT_DEV &&
-	    ex_phy->attached_dev_type != EDGE_DEV &&
-	    ex_phy->attached_dev_type != SATA_PENDING) {
+	if (ex_phy->attached_dev_type != SAS_END_DEVICE &&
+	    ex_phy->attached_dev_type != SAS_FANOUT_EXPANDER_DEVICE &&
+	    ex_phy->attached_dev_type != SAS_EDGE_EXPANDER_DEVICE &&
+	    ex_phy->attached_dev_type != SAS_SATA_PENDING) {
 		SAS_DPRINTK("unknown device type(0x%x) attached to ex %016llx "
 			    "phy 0x%x\n", ex_phy->attached_dev_type,
 			    SAS_ADDR(dev->sas_addr),
@@ -1049,11 +1049,11 @@
 	}
 
 	switch (ex_phy->attached_dev_type) {
-	case SAS_END_DEV:
-	case SATA_PENDING:
+	case SAS_END_DEVICE:
+	case SAS_SATA_PENDING:
 		child = sas_ex_discover_end_dev(dev, phy_id);
 		break;
-	case FANOUT_DEV:
+	case SAS_FANOUT_EXPANDER_DEVICE:
 		if (SAS_ADDR(dev->port->disc.fanout_sas_addr)) {
 			SAS_DPRINTK("second fanout expander %016llx phy 0x%x "
 				    "attached to ex %016llx phy 0x%x\n",
@@ -1067,7 +1067,7 @@
 			memcpy(dev->port->disc.fanout_sas_addr,
 			       ex_phy->attached_sas_addr, SAS_ADDR_SIZE);
 		/* fallthrough */
-	case EDGE_DEV:
+	case SAS_EDGE_EXPANDER_DEVICE:
 		child = sas_ex_discover_expander(dev, phy_id);
 		break;
 	default:
@@ -1111,8 +1111,8 @@
 		    phy->phy_state == PHY_NOT_PRESENT)
 			continue;
 
-		if ((phy->attached_dev_type == EDGE_DEV ||
-		     phy->attached_dev_type == FANOUT_DEV) &&
+		if ((phy->attached_dev_type == SAS_EDGE_EXPANDER_DEVICE ||
+		     phy->attached_dev_type == SAS_FANOUT_EXPANDER_DEVICE) &&
 		    phy->routing_attr == SUBTRACTIVE_ROUTING) {
 
 			memcpy(sub_addr, phy->attached_sas_addr,SAS_ADDR_SIZE);
@@ -1130,8 +1130,8 @@
 	u8 sub_addr[8] = {0, };
 
 	list_for_each_entry(child, &ex->children, siblings) {
-		if (child->dev_type != EDGE_DEV &&
-		    child->dev_type != FANOUT_DEV)
+		if (child->dev_type != SAS_EDGE_EXPANDER_DEVICE &&
+		    child->dev_type != SAS_FANOUT_EXPANDER_DEVICE)
 			continue;
 		if (sub_addr[0] == 0) {
 			sas_find_sub_addr(child, sub_addr);
@@ -1208,7 +1208,7 @@
 	int i;
 	u8  *sub_sas_addr = NULL;
 
-	if (dev->dev_type != EDGE_DEV)
+	if (dev->dev_type != SAS_EDGE_EXPANDER_DEVICE)
 		return 0;
 
 	for (i = 0; i < ex->num_phys; i++) {
@@ -1218,8 +1218,8 @@
 		    phy->phy_state == PHY_NOT_PRESENT)
 			continue;
 
-		if ((phy->attached_dev_type == FANOUT_DEV ||
-		     phy->attached_dev_type == EDGE_DEV) &&
+		if ((phy->attached_dev_type == SAS_FANOUT_EXPANDER_DEVICE ||
+		     phy->attached_dev_type == SAS_EDGE_EXPANDER_DEVICE) &&
 		    phy->routing_attr == SUBTRACTIVE_ROUTING) {
 
 			if (!sub_sas_addr)
@@ -1245,8 +1245,8 @@
 						 struct ex_phy *child_phy)
 {
 	static const char *ex_type[] = {
-		[EDGE_DEV] = "edge",
-		[FANOUT_DEV] = "fanout",
+		[SAS_EDGE_EXPANDER_DEVICE] = "edge",
+		[SAS_FANOUT_EXPANDER_DEVICE] = "fanout",
 	};
 	struct domain_device *parent = child->parent;
 
@@ -1321,8 +1321,8 @@
 	if (!child->parent)
 		return 0;
 
-	if (child->parent->dev_type != EDGE_DEV &&
-	    child->parent->dev_type != FANOUT_DEV)
+	if (child->parent->dev_type != SAS_EDGE_EXPANDER_DEVICE &&
+	    child->parent->dev_type != SAS_FANOUT_EXPANDER_DEVICE)
 		return 0;
 
 	parent_ex = &child->parent->ex_dev;
@@ -1341,8 +1341,8 @@
 		child_phy = &child_ex->ex_phy[parent_phy->attached_phy_id];
 
 		switch (child->parent->dev_type) {
-		case EDGE_DEV:
-			if (child->dev_type == FANOUT_DEV) {
+		case SAS_EDGE_EXPANDER_DEVICE:
+			if (child->dev_type == SAS_FANOUT_EXPANDER_DEVICE) {
 				if (parent_phy->routing_attr != SUBTRACTIVE_ROUTING ||
 				    child_phy->routing_attr != TABLE_ROUTING) {
 					sas_print_parent_topology_bug(child, parent_phy, child_phy);
@@ -1366,7 +1366,7 @@
 				}
 			}
 			break;
-		case FANOUT_DEV:
+		case SAS_FANOUT_EXPANDER_DEVICE:
 			if (parent_phy->routing_attr != TABLE_ROUTING ||
 			    child_phy->routing_attr != SUBTRACTIVE_ROUTING) {
 				sas_print_parent_topology_bug(child, parent_phy, child_phy);
@@ -1619,8 +1619,8 @@
 	struct domain_device *dev;
 
 	list_for_each_entry(dev, &port->dev_list, dev_list_node) {
-		if (dev->dev_type == EDGE_DEV ||
-		    dev->dev_type == FANOUT_DEV) {
+		if (dev->dev_type == SAS_EDGE_EXPANDER_DEVICE ||
+		    dev->dev_type == SAS_FANOUT_EXPANDER_DEVICE) {
 			struct sas_expander_device *ex =
 				rphy_to_expander_device(dev->rphy);
 
@@ -1720,7 +1720,7 @@
 }
 
 static int sas_get_phy_attached_dev(struct domain_device *dev, int phy_id,
-				    u8 *sas_addr, enum sas_dev_type *type)
+				    u8 *sas_addr, enum sas_device_type *type)
 {
 	int res;
 	struct smp_resp *disc_resp;
@@ -1849,7 +1849,7 @@
 			SAS_DPRINTK("Expander phys DID NOT change\n");
 	}
 	list_for_each_entry(ch, &ex->children, siblings) {
-		if (ch->dev_type == EDGE_DEV || ch->dev_type == FANOUT_DEV) {
+		if (ch->dev_type == SAS_EDGE_EXPANDER_DEVICE || ch->dev_type == SAS_FANOUT_EXPANDER_DEVICE) {
 			res = sas_find_bcast_dev(ch, src_dev);
 			if (*src_dev)
 				return res;
@@ -1866,8 +1866,8 @@
 
 	list_for_each_entry_safe(child, n, &ex->children, siblings) {
 		set_bit(SAS_DEV_GONE, &child->state);
-		if (child->dev_type == EDGE_DEV ||
-		    child->dev_type == FANOUT_DEV)
+		if (child->dev_type == SAS_EDGE_EXPANDER_DEVICE ||
+		    child->dev_type == SAS_FANOUT_EXPANDER_DEVICE)
 			sas_unregister_ex_tree(port, child);
 		else
 			sas_unregister_dev(port, child);
@@ -1887,8 +1887,8 @@
 			if (SAS_ADDR(child->sas_addr) ==
 			    SAS_ADDR(phy->attached_sas_addr)) {
 				set_bit(SAS_DEV_GONE, &child->state);
-				if (child->dev_type == EDGE_DEV ||
-				    child->dev_type == FANOUT_DEV)
+				if (child->dev_type == SAS_EDGE_EXPANDER_DEVICE ||
+				    child->dev_type == SAS_FANOUT_EXPANDER_DEVICE)
 					sas_unregister_ex_tree(parent->port, child);
 				else
 					sas_unregister_dev(parent->port, child);
@@ -1916,8 +1916,8 @@
 	int res = 0;
 
 	list_for_each_entry(child, &ex_root->children, siblings) {
-		if (child->dev_type == EDGE_DEV ||
-		    child->dev_type == FANOUT_DEV) {
+		if (child->dev_type == SAS_EDGE_EXPANDER_DEVICE ||
+		    child->dev_type == SAS_FANOUT_EXPANDER_DEVICE) {
 			struct sas_expander_device *ex =
 				rphy_to_expander_device(child->rphy);
 
@@ -1970,8 +1970,8 @@
 	list_for_each_entry(child, &dev->ex_dev.children, siblings) {
 		if (SAS_ADDR(child->sas_addr) ==
 		    SAS_ADDR(ex_phy->attached_sas_addr)) {
-			if (child->dev_type == EDGE_DEV ||
-			    child->dev_type == FANOUT_DEV)
+			if (child->dev_type == SAS_EDGE_EXPANDER_DEVICE ||
+			    child->dev_type == SAS_FANOUT_EXPANDER_DEVICE)
 				res = sas_discover_bfs_by_root(child);
 			break;
 		}
@@ -1979,16 +1979,16 @@
 	return res;
 }
 
-static bool dev_type_flutter(enum sas_dev_type new, enum sas_dev_type old)
+static bool dev_type_flutter(enum sas_device_type new, enum sas_device_type old)
 {
 	if (old == new)
 		return true;
 
 	/* treat device directed resets as flutter, if we went
-	 * SAS_END_DEV to SATA_PENDING the link needs recovery
+	 * SAS_END_DEVICE to SAS_SATA_PENDING the link needs recovery
 	 */
-	if ((old == SATA_PENDING && new == SAS_END_DEV) ||
-	    (old == SAS_END_DEV && new == SATA_PENDING))
+	if ((old == SAS_SATA_PENDING && new == SAS_END_DEVICE) ||
+	    (old == SAS_END_DEVICE && new == SAS_SATA_PENDING))
 		return true;
 
 	return false;
@@ -1998,7 +1998,7 @@
 {
 	struct expander_device *ex = &dev->ex_dev;
 	struct ex_phy *phy = &ex->ex_phy[phy_id];
-	enum sas_dev_type type = NO_DEVICE;
+	enum sas_device_type type = SAS_PHY_UNUSED;
 	u8 sas_addr[8];
 	int res;
 
@@ -2032,7 +2032,7 @@
 
 		sas_ex_phy_discover(dev, phy_id);
 
-		if (ata_dev && phy->attached_dev_type == SATA_PENDING)
+		if (ata_dev && phy->attached_dev_type == SAS_SATA_PENDING)
 			action = ", needs recovery";
 		SAS_DPRINTK("ex %016llx phy 0x%x broadcast flutter%s\n",
 			    SAS_ADDR(dev->sas_addr), phy_id, action);
@@ -2163,10 +2163,10 @@
 	}
 
 	/* do we need to support multiple segments? */
-	if (req->bio->bi_vcnt > 1 || rsp->bio->bi_vcnt > 1) {
+	if (bio_segments(req->bio) > 1 || bio_segments(rsp->bio) > 1) {
 		printk("%s: multiple segments req %u %u, rsp %u %u\n",
-		       __func__, req->bio->bi_vcnt, blk_rq_bytes(req),
-		       rsp->bio->bi_vcnt, blk_rq_bytes(rsp));
+		       __func__, bio_segments(req->bio), blk_rq_bytes(req),
+		       bio_segments(rsp->bio), blk_rq_bytes(rsp));
 		return -EINVAL;
 	}
 
diff --git a/drivers/scsi/libsas/sas_internal.h b/drivers/scsi/libsas/sas_internal.h
index 1de6796..7e7ba83 100644
--- a/drivers/scsi/libsas/sas_internal.h
+++ b/drivers/scsi/libsas/sas_internal.h
@@ -131,16 +131,16 @@
 	rphy->identify.initiator_port_protocols = dev->iproto;
 	rphy->identify.target_port_protocols = dev->tproto;
 	switch (dev->dev_type) {
-	case SATA_DEV:
+	case SAS_SATA_DEV:
 		/* FIXME: need sata device type */
-	case SAS_END_DEV:
-	case SATA_PENDING:
+	case SAS_END_DEVICE:
+	case SAS_SATA_PENDING:
 		rphy->identify.device_type = SAS_END_DEVICE;
 		break;
-	case EDGE_DEV:
+	case SAS_EDGE_EXPANDER_DEVICE:
 		rphy->identify.device_type = SAS_EDGE_EXPANDER_DEVICE;
 		break;
-	case FANOUT_DEV:
+	case SAS_FANOUT_EXPANDER_DEVICE:
 		rphy->identify.device_type = SAS_FANOUT_EXPANDER_DEVICE;
 		break;
 	default:
diff --git a/drivers/scsi/libsas/sas_port.c b/drivers/scsi/libsas/sas_port.c
index 1398b71..d3c5297 100644
--- a/drivers/scsi/libsas/sas_port.c
+++ b/drivers/scsi/libsas/sas_port.c
@@ -69,7 +69,7 @@
 			continue;
 		}
 
-		if (dev->dev_type == EDGE_DEV || dev->dev_type == FANOUT_DEV) {
+		if (dev->dev_type == SAS_EDGE_EXPANDER_DEVICE || dev->dev_type == SAS_FANOUT_EXPANDER_DEVICE) {
 			dev->ex_dev.ex_change_count = -1;
 			for (i = 0; i < dev->ex_dev.num_phys; i++) {
 				struct ex_phy *phy = &dev->ex_dev.ex_phy[i];
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 7706c99..bcc56ca 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -46,10 +46,15 @@
 #define LPFC_DEFAULT_MENLO_SG_SEG_CNT 128	/* sg element count per scsi
 		cmnd for menlo needs nearly twice as for firmware
 		downloads using bsg */
-#define LPFC_DEFAULT_PROT_SG_SEG_CNT 4096 /* sg protection elements count */
+
+#define LPFC_MIN_SG_SLI4_BUF_SZ	0x800	/* based on LPFC_DEFAULT_SG_SEG_CNT */
+#define LPFC_MAX_SG_SLI4_SEG_CNT_DIF 128 /* sg element count per scsi cmnd */
+#define LPFC_MAX_SG_SEG_CNT_DIF 512	/* sg element count per scsi cmnd  */
 #define LPFC_MAX_SG_SEG_CNT	4096	/* sg element count per scsi cmnd */
+#define LPFC_MAX_SGL_SEG_CNT	512	/* SGL element count per scsi cmnd */
+#define LPFC_MAX_BPL_SEG_CNT	4096	/* BPL element count per scsi cmnd */
+
 #define LPFC_MAX_SGE_SIZE       0x80000000 /* Maximum data allowed in a SGE */
-#define LPFC_MAX_PROT_SG_SEG_CNT 4096	/* prot sg element count per scsi cmd*/
 #define LPFC_IOCB_LIST_CNT	2250	/* list of IOCBs for fast-path usage. */
 #define LPFC_Q_RAMP_UP_INTERVAL 120     /* lun q_depth ramp up interval */
 #define LPFC_VNAME_LEN		100	/* vport symbolic name length */
@@ -66,8 +71,10 @@
  * queue depths when there are driver resource error or Firmware
  * resource error.
  */
-#define QUEUE_RAMP_DOWN_INTERVAL	(1 * HZ)   /* 1 Second */
-#define QUEUE_RAMP_UP_INTERVAL		(300 * HZ) /* 5 minutes */
+/* 1 Second */
+#define QUEUE_RAMP_DOWN_INTERVAL	(msecs_to_jiffies(1000 * 1))
+/* 5 minutes */
+#define QUEUE_RAMP_UP_INTERVAL		(msecs_to_jiffies(1000 * 300))
 
 /* Number of exchanges reserved for discovery to complete */
 #define LPFC_DISC_IOCB_BUFF_COUNT 20
@@ -671,6 +678,7 @@
 	uint32_t lmt;
 
 	uint32_t fc_topology;	/* link topology, from LINK INIT */
+	uint32_t fc_topology_changed;	/* link topology, from LINK INIT */
 
 	struct lpfc_stats fc_stat;
 
@@ -701,9 +709,11 @@
 	uint32_t cfg_poll_tmo;
 	uint32_t cfg_use_msi;
 	uint32_t cfg_fcp_imax;
+	uint32_t cfg_fcp_cpu_map;
 	uint32_t cfg_fcp_wq_count;
 	uint32_t cfg_fcp_eq_count;
 	uint32_t cfg_fcp_io_channel;
+	uint32_t cfg_total_seg_cnt;
 	uint32_t cfg_sg_seg_cnt;
 	uint32_t cfg_prot_sg_seg_cnt;
 	uint32_t cfg_sg_dma_buf_size;
@@ -804,8 +814,10 @@
 	uint64_t bg_reftag_err_cnt;
 
 	/* fastpath list. */
-	spinlock_t scsi_buf_list_lock;
-	struct list_head lpfc_scsi_buf_list;
+	spinlock_t scsi_buf_list_get_lock;  /* SCSI buf alloc list lock */
+	spinlock_t scsi_buf_list_put_lock;  /* SCSI buf free list lock */
+	struct list_head lpfc_scsi_buf_list_get;
+	struct list_head lpfc_scsi_buf_list_put;
 	uint32_t total_scsi_bufs;
 	struct list_head lpfc_iocb_list;
 	uint32_t total_iocbq_bufs;
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index 9290713..3c5625b 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -674,6 +674,9 @@
 	int i;
 	int rc;
 
+	if (phba->pport->fc_flag & FC_OFFLINE_MODE)
+		return 0;
+
 	init_completion(&online_compl);
 	rc = lpfc_workq_post_event(phba, &status, &online_compl,
 			      LPFC_EVT_OFFLINE_PREP);
@@ -741,7 +744,8 @@
 	int status = 0;
 	int rc;
 
-	if (!phba->cfg_enable_hba_reset)
+	if ((!phba->cfg_enable_hba_reset) ||
+	    (phba->pport->fc_flag & FC_OFFLINE_MODE))
 		return -EACCES;
 
 	status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE);
@@ -895,6 +899,7 @@
 		pci_disable_sriov(pdev);
 		phba->cfg_sriov_nr_virtfn = 0;
 	}
+
 	status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE);
 
 	if (status != 0)
@@ -2801,6 +2806,8 @@
 		lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
 			"3054 lpfc_topology changed from %d to %d\n",
 			prev_val, val);
+		if (prev_val != val && phba->sli_rev == LPFC_SLI_REV4)
+			phba->fc_topology_changed = 1;
 		err = lpfc_issue_lip(lpfc_shost_from_vport(phba->pport));
 		if (err) {
 			phba->cfg_topology = prev_val;
@@ -3792,6 +3799,141 @@
 static DEVICE_ATTR(lpfc_fcp_imax, S_IRUGO | S_IWUSR,
 		   lpfc_fcp_imax_show, lpfc_fcp_imax_store);
 
+/**
+ * lpfc_state_show - Display current driver CPU affinity
+ * @dev: class converted to a Scsi_host structure.
+ * @attr: device attribute, not used.
+ * @buf: on return contains text describing the state of the link.
+ *
+ * Returns: size of formatted string.
+ **/
+static ssize_t
+lpfc_fcp_cpu_map_show(struct device *dev, struct device_attribute *attr,
+		      char *buf)
+{
+	struct Scsi_Host  *shost = class_to_shost(dev);
+	struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
+	struct lpfc_hba   *phba = vport->phba;
+	struct lpfc_vector_map_info *cpup;
+	int  idx, len = 0;
+
+	if ((phba->sli_rev != LPFC_SLI_REV4) ||
+	    (phba->intr_type != MSIX))
+		return len;
+
+	switch (phba->cfg_fcp_cpu_map) {
+	case 0:
+		len += snprintf(buf + len, PAGE_SIZE-len,
+				"fcp_cpu_map: No mapping (%d)\n",
+				phba->cfg_fcp_cpu_map);
+		return len;
+	case 1:
+		len += snprintf(buf + len, PAGE_SIZE-len,
+				"fcp_cpu_map: HBA centric mapping (%d): "
+				"%d online CPUs\n",
+				phba->cfg_fcp_cpu_map,
+				phba->sli4_hba.num_online_cpu);
+		break;
+	case 2:
+		len += snprintf(buf + len, PAGE_SIZE-len,
+				"fcp_cpu_map: Driver centric mapping (%d): "
+				"%d online CPUs\n",
+				phba->cfg_fcp_cpu_map,
+				phba->sli4_hba.num_online_cpu);
+		break;
+	}
+
+	cpup = phba->sli4_hba.cpu_map;
+	for (idx = 0; idx < phba->sli4_hba.num_present_cpu; idx++) {
+		if (cpup->irq == LPFC_VECTOR_MAP_EMPTY)
+			len += snprintf(buf + len, PAGE_SIZE-len,
+					"CPU %02d io_chan %02d "
+					"physid %d coreid %d\n",
+					idx, cpup->channel_id, cpup->phys_id,
+					cpup->core_id);
+		else
+			len += snprintf(buf + len, PAGE_SIZE-len,
+					"CPU %02d io_chan %02d "
+					"physid %d coreid %d IRQ %d\n",
+					idx, cpup->channel_id, cpup->phys_id,
+					cpup->core_id, cpup->irq);
+
+		cpup++;
+	}
+	return len;
+}
+
+/**
+ * lpfc_fcp_cpu_map_store - Change CPU affinity of driver vectors
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: one or more lpfc_polling_flags values.
+ * @count: not used.
+ *
+ * Returns:
+ * -EINVAL  - Not implemented yet.
+ **/
+static ssize_t
+lpfc_fcp_cpu_map_store(struct device *dev, struct device_attribute *attr,
+		       const char *buf, size_t count)
+{
+	int status = -EINVAL;
+	return status;
+}
+
+/*
+# lpfc_fcp_cpu_map: Defines how to map CPUs to IRQ vectors
+# for the HBA.
+#
+# Value range is [0 to 2]. Default value is LPFC_DRIVER_CPU_MAP (2).
+#	0 - Do not affinitze IRQ vectors
+#	1 - Affintize HBA vectors with respect to each HBA
+#	    (start with CPU0 for each HBA)
+#	2 - Affintize HBA vectors with respect to the entire driver
+#	    (round robin thru all CPUs across all HBAs)
+*/
+static int lpfc_fcp_cpu_map = LPFC_DRIVER_CPU_MAP;
+module_param(lpfc_fcp_cpu_map, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(lpfc_fcp_cpu_map,
+		 "Defines how to map CPUs to IRQ vectors per HBA");
+
+/**
+ * lpfc_fcp_cpu_map_init - Set the initial sr-iov virtual function enable
+ * @phba: lpfc_hba pointer.
+ * @val: link speed value.
+ *
+ * Description:
+ * If val is in a valid range [0-2], then affinitze the adapter's
+ * MSIX vectors.
+ *
+ * Returns:
+ * zero if val saved.
+ * -EINVAL val out of range
+ **/
+static int
+lpfc_fcp_cpu_map_init(struct lpfc_hba *phba, int val)
+{
+	if (phba->sli_rev != LPFC_SLI_REV4) {
+		phba->cfg_fcp_cpu_map = 0;
+		return 0;
+	}
+
+	if (val >= LPFC_MIN_CPU_MAP && val <= LPFC_MAX_CPU_MAP) {
+		phba->cfg_fcp_cpu_map = val;
+		return 0;
+	}
+
+	lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+			"3326 fcp_cpu_map: %d out of range, using default\n",
+			val);
+	phba->cfg_fcp_cpu_map = LPFC_DRIVER_CPU_MAP;
+
+	return 0;
+}
+
+static DEVICE_ATTR(lpfc_fcp_cpu_map, S_IRUGO | S_IWUSR,
+		   lpfc_fcp_cpu_map_show, lpfc_fcp_cpu_map_store);
+
 /*
 # lpfc_fcp_class:  Determines FC class to use for the FCP protocol.
 # Value range is [2,3]. Default value is 3.
@@ -4009,12 +4151,11 @@
 #       0  = disabled (default)
 #       1  = enabled
 # Value range is [0,1]. Default value is 0.
+#
+# This feature in under investigation and may be supported in the future.
 */
 unsigned int lpfc_fcp_look_ahead = LPFC_LOOK_AHEAD_OFF;
 
-module_param(lpfc_fcp_look_ahead, uint, S_IRUGO);
-MODULE_PARM_DESC(lpfc_fcp_look_ahead, "Look ahead for completions");
-
 /*
 # lpfc_prot_mask: i
 #	- Bit mask of host protection capabilities used to register with the
@@ -4071,16 +4212,23 @@
 
 /*
  * lpfc_sg_seg_cnt - Initial Maximum DMA Segment Count
- * This value can be set to values between 64 and 256. The default value is
+ * This value can be set to values between 64 and 4096. The default value is
  * 64, but may be increased to allow for larger Max I/O sizes. The scsi layer
  * will be allowed to request I/Os of sizes up to (MAX_SEG_COUNT * SEG_SIZE).
+ * Because of the additional overhead involved in setting up T10-DIF,
+ * this parameter will be limited to 128 if BlockGuard is enabled under SLI4
+ * and will be limited to 512 if BlockGuard is enabled under SLI3.
  */
 LPFC_ATTR_R(sg_seg_cnt, LPFC_DEFAULT_SG_SEG_CNT, LPFC_DEFAULT_SG_SEG_CNT,
 	    LPFC_MAX_SG_SEG_CNT, "Max Scatter Gather Segment Count");
 
-LPFC_ATTR_R(prot_sg_seg_cnt, LPFC_DEFAULT_PROT_SG_SEG_CNT,
-		LPFC_DEFAULT_PROT_SG_SEG_CNT, LPFC_MAX_PROT_SG_SEG_CNT,
-		"Max Protection Scatter Gather Segment Count");
+/*
+ * This parameter will be depricated, the driver cannot limit the
+ * protection data s/g list.
+ */
+LPFC_ATTR_R(prot_sg_seg_cnt, LPFC_DEFAULT_SG_SEG_CNT,
+	    LPFC_DEFAULT_SG_SEG_CNT, LPFC_MAX_SG_SEG_CNT,
+	    "Max Protection Scatter Gather Segment Count");
 
 struct device_attribute *lpfc_hba_attrs[] = {
 	&dev_attr_bg_info,
@@ -4141,6 +4289,7 @@
 	&dev_attr_lpfc_poll_tmo,
 	&dev_attr_lpfc_use_msi,
 	&dev_attr_lpfc_fcp_imax,
+	&dev_attr_lpfc_fcp_cpu_map,
 	&dev_attr_lpfc_fcp_wq_count,
 	&dev_attr_lpfc_fcp_eq_count,
 	&dev_attr_lpfc_fcp_io_channel,
@@ -5123,6 +5272,7 @@
 	lpfc_enable_rrq_init(phba, lpfc_enable_rrq);
 	lpfc_use_msi_init(phba, lpfc_use_msi);
 	lpfc_fcp_imax_init(phba, lpfc_fcp_imax);
+	lpfc_fcp_cpu_map_init(phba, lpfc_fcp_cpu_map);
 	lpfc_fcp_wq_count_init(phba, lpfc_fcp_wq_count);
 	lpfc_fcp_eq_count_init(phba, lpfc_fcp_eq_count);
 	lpfc_fcp_io_channel_init(phba, lpfc_fcp_io_channel);
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
index 8886668..094be2c 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.c
+++ b/drivers/scsi/lpfc/lpfc_bsg.c
@@ -219,26 +219,35 @@
 	unsigned int transfer_bytes, bytes_copied = 0;
 	unsigned int sg_offset, dma_offset;
 	unsigned char *dma_address, *sg_address;
-	struct scatterlist *sgel;
 	LIST_HEAD(temp_list);
-
+	struct sg_mapping_iter miter;
+	unsigned long flags;
+	unsigned int sg_flags = SG_MITER_ATOMIC;
+	bool sg_valid;
 
 	list_splice_init(&dma_buffers->list, &temp_list);
 	list_add(&dma_buffers->list, &temp_list);
 	sg_offset = 0;
-	sgel = bsg_buffers->sg_list;
+	if (to_buffers)
+		sg_flags |= SG_MITER_FROM_SG;
+	else
+		sg_flags |= SG_MITER_TO_SG;
+	sg_miter_start(&miter, bsg_buffers->sg_list, bsg_buffers->sg_cnt,
+		       sg_flags);
+	local_irq_save(flags);
+	sg_valid = sg_miter_next(&miter);
 	list_for_each_entry(mp, &temp_list, list) {
 		dma_offset = 0;
-		while (bytes_to_transfer && sgel &&
+		while (bytes_to_transfer && sg_valid &&
 		       (dma_offset < LPFC_BPL_SIZE)) {
 			dma_address = mp->virt + dma_offset;
 			if (sg_offset) {
 				/* Continue previous partial transfer of sg */
-				sg_address = sg_virt(sgel) + sg_offset;
-				transfer_bytes = sgel->length - sg_offset;
+				sg_address = miter.addr + sg_offset;
+				transfer_bytes = miter.length - sg_offset;
 			} else {
-				sg_address = sg_virt(sgel);
-				transfer_bytes = sgel->length;
+				sg_address = miter.addr;
+				transfer_bytes = miter.length;
 			}
 			if (bytes_to_transfer < transfer_bytes)
 				transfer_bytes = bytes_to_transfer;
@@ -252,12 +261,14 @@
 			sg_offset += transfer_bytes;
 			bytes_to_transfer -= transfer_bytes;
 			bytes_copied += transfer_bytes;
-			if (sg_offset >= sgel->length) {
+			if (sg_offset >= miter.length) {
 				sg_offset = 0;
-				sgel = sg_next(sgel);
+				sg_valid = sg_miter_next(&miter);
 			}
 		}
 	}
+	sg_miter_stop(&miter);
+	local_irq_restore(flags);
 	list_del_init(&dma_buffers->list);
 	list_splice(&temp_list, &dma_buffers->list);
 	return bytes_copied;
@@ -471,6 +482,7 @@
 	cmdiocbq->context1 = dd_data;
 	cmdiocbq->context2 = cmp;
 	cmdiocbq->context3 = bmp;
+	cmdiocbq->context_un.ndlp = ndlp;
 	dd_data->type = TYPE_IOCB;
 	dd_data->set_job = job;
 	dd_data->context_un.iocb.cmdiocbq = cmdiocbq;
@@ -1508,6 +1520,7 @@
 	ctiocb->context1 = dd_data;
 	ctiocb->context2 = cmp;
 	ctiocb->context3 = bmp;
+	ctiocb->context_un.ndlp = ndlp;
 	ctiocb->iocb_cmpl = lpfc_issue_ct_rsp_cmp;
 
 	dd_data->type = TYPE_IOCB;
@@ -2576,7 +2589,8 @@
 	evt->wait_time_stamp = jiffies;
 	time_left = wait_event_interruptible_timeout(
 		evt->wq, !list_empty(&evt->events_to_see),
-		((phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT) * HZ);
+		msecs_to_jiffies(1000 *
+			((phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT)));
 	if (list_empty(&evt->events_to_see))
 		ret_val = (time_left) ? -EINTR : -ETIMEDOUT;
 	else {
@@ -3151,7 +3165,8 @@
 	evt->waiting = 1;
 	time_left = wait_event_interruptible_timeout(
 		evt->wq, !list_empty(&evt->events_to_see),
-		((phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT) * HZ);
+		msecs_to_jiffies(1000 *
+			((phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT)));
 	evt->waiting = 0;
 	if (list_empty(&evt->events_to_see)) {
 		rc = (time_left) ? -EINTR : -ETIMEDOUT;
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index 7631893..d41456e 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -470,3 +470,4 @@
 void lpfc_free_sgl_list(struct lpfc_hba *, struct list_head *);
 uint32_t lpfc_sli_port_speed_get(struct lpfc_hba *);
 int lpfc_sli4_request_firmware_update(struct lpfc_hba *, uint8_t);
+void lpfc_sli4_offline_eratt(struct lpfc_hba *);
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index 7bff3a1..ae1a07c 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -1811,7 +1811,8 @@
 		if (init_utsname()->nodename[0] != '\0')
 			lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DHBA);
 		else
-			mod_timer(&vport->fc_fdmitmo, jiffies + HZ * 60);
+			mod_timer(&vport->fc_fdmitmo, jiffies +
+				  msecs_to_jiffies(1000 * 60));
 	}
 	return;
 }
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index bbed847..3cae0a9 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -29,6 +29,7 @@
 #include <scsi/scsi_host.h>
 #include <scsi/scsi_transport_fc.h>
 
+
 #include "lpfc_hw4.h"
 #include "lpfc_hw.h"
 #include "lpfc_sli.h"
@@ -238,7 +239,10 @@
 
 		icmd->un.elsreq64.remoteID = did;		/* DID */
 		icmd->ulpCommand = CMD_ELS_REQUEST64_CR;
-		icmd->ulpTimeout = phba->fc_ratov * 2;
+		if (elscmd == ELS_CMD_FLOGI)
+			icmd->ulpTimeout = FF_DEF_RATOV * 2;
+		else
+			icmd->ulpTimeout = phba->fc_ratov * 2;
 	} else {
 		icmd->un.xseq64.bdl.addrHigh = putPaddrHigh(pbuflist->phys);
 		icmd->un.xseq64.bdl.addrLow = putPaddrLow(pbuflist->phys);
@@ -308,16 +312,20 @@
 		/* Xmit ELS command <elsCmd> to remote NPORT <did> */
 		lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
 				 "0116 Xmit ELS command x%x to remote "
-				 "NPORT x%x I/O tag: x%x, port state: x%x\n",
+				 "NPORT x%x I/O tag: x%x, port state:x%x"
+				 " fc_flag:x%x\n",
 				 elscmd, did, elsiocb->iotag,
-				 vport->port_state);
+				 vport->port_state,
+				 vport->fc_flag);
 	} else {
 		/* Xmit ELS response <elsCmd> to remote NPORT <did> */
 		lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
 				 "0117 Xmit ELS response x%x to remote "
-				 "NPORT x%x I/O tag: x%x, size: x%x\n",
+				 "NPORT x%x I/O tag: x%x, size: x%x "
+				 "port_state x%x fc_flag x%x\n",
 				 elscmd, ndlp->nlp_DID, elsiocb->iotag,
-				 cmdSize);
+				 cmdSize, vport->port_state,
+				 vport->fc_flag);
 	}
 	return elsiocb;
 
@@ -909,6 +917,23 @@
 	spin_lock_irq(shost->host_lock);
 	vport->fc_flag |= FC_PT2PT;
 	spin_unlock_irq(shost->host_lock);
+	/* If physical FC port changed, unreg VFI and ALL VPIs / RPIs */
+	if ((phba->sli_rev == LPFC_SLI_REV4) && phba->fc_topology_changed) {
+		lpfc_unregister_fcf_prep(phba);
+
+		/* The FC_VFI_REGISTERED flag will get clear in the cmpl
+		 * handler for unreg_vfi, but if we don't force the
+		 * FC_VFI_REGISTERED flag then the reg_vfi mailbox could be
+		 * built with the update bit set instead of just the vp bit to
+		 * change the Nport ID.  We need to have the vp set and the
+		 * Upd cleared on topology changes.
+		 */
+		spin_lock_irq(shost->host_lock);
+		vport->fc_flag &= ~FC_VFI_REGISTERED;
+		spin_unlock_irq(shost->host_lock);
+		phba->fc_topology_changed = 0;
+		lpfc_issue_reg_vfi(vport);
+	}
 
 	/* Start discovery - this should just do CLEAR_LA */
 	lpfc_disc_start(vport);
@@ -1030,9 +1055,19 @@
 			vport->cfg_discovery_threads = LPFC_MAX_DISC_THREADS;
 		if ((phba->sli_rev == LPFC_SLI_REV4) &&
 		    (!(vport->fc_flag & FC_VFI_REGISTERED) ||
-		     (vport->fc_prevDID != vport->fc_myDID))) {
-			if (vport->fc_flag & FC_VFI_REGISTERED)
-				lpfc_sli4_unreg_all_rpis(vport);
+		     (vport->fc_prevDID != vport->fc_myDID) ||
+			phba->fc_topology_changed)) {
+			if (vport->fc_flag & FC_VFI_REGISTERED) {
+				if (phba->fc_topology_changed) {
+					lpfc_unregister_fcf_prep(phba);
+					spin_lock_irq(shost->host_lock);
+					vport->fc_flag &= ~FC_VFI_REGISTERED;
+					spin_unlock_irq(shost->host_lock);
+					phba->fc_topology_changed = 0;
+				} else {
+					lpfc_sli4_unreg_all_rpis(vport);
+				}
+			}
 			lpfc_issue_reg_vfi(vport);
 			lpfc_nlp_put(ndlp);
 			goto out;
@@ -1054,10 +1089,11 @@
 
 	/* FLOGI completes successfully */
 	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
-			 "0101 FLOGI completes successfully "
-			 "Data: x%x x%x x%x x%x\n",
+			 "0101 FLOGI completes successfully, I/O tag:x%x, "
+			 "Data: x%x x%x x%x x%x x%x x%x\n", cmdiocb->iotag,
 			 irsp->un.ulpWord[4], sp->cmn.e_d_tov,
-			 sp->cmn.w2.r_a_tov, sp->cmn.edtovResolution);
+			 sp->cmn.w2.r_a_tov, sp->cmn.edtovResolution,
+			 vport->port_state, vport->fc_flag);
 
 	if (vport->port_state == LPFC_FLOGI) {
 		/*
@@ -5047,6 +5083,8 @@
 	struct ls_rjt stat;
 	uint32_t cmd, did;
 	int rc;
+	uint32_t fc_flag = 0;
+	uint32_t port_state = 0;
 
 	cmd = *lp++;
 	sp = (struct serv_parm *) lp;
@@ -5113,16 +5151,25 @@
 			 * will be.
 			 */
 			vport->fc_myDID = PT2PT_LocalID;
-		}
+		} else
+			vport->fc_myDID = PT2PT_RemoteID;
 
 		/*
 		 * The vport state should go to LPFC_FLOGI only
 		 * AFTER we issue a FLOGI, not receive one.
 		 */
 		spin_lock_irq(shost->host_lock);
+		fc_flag = vport->fc_flag;
+		port_state = vport->port_state;
 		vport->fc_flag |= FC_PT2PT;
 		vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
+		vport->port_state = LPFC_FLOGI;
 		spin_unlock_irq(shost->host_lock);
+		lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+				 "3311 Rcv Flogi PS x%x new PS x%x "
+				 "fc_flag x%x new fc_flag x%x\n",
+				 port_state, vport->port_state,
+				 fc_flag, vport->fc_flag);
 
 		/*
 		 * We temporarily set fc_myDID to make it look like we are
@@ -6241,7 +6288,8 @@
 	}
 
 	if (!list_empty(&phba->sli.ring[LPFC_ELS_RING].txcmplq))
-		mod_timer(&vport->els_tmofunc, jiffies + HZ * timeout);
+		mod_timer(&vport->els_tmofunc,
+			  jiffies + msecs_to_jiffies(1000 * timeout));
 }
 
 /**
@@ -6612,7 +6660,9 @@
 	/* ELS command <elsCmd> received from NPORT <did> */
 	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
 			 "0112 ELS command x%x received from NPORT x%x "
-			 "Data: x%x\n", cmd, did, vport->port_state);
+			 "Data: x%x x%x x%x x%x\n",
+			cmd, did, vport->port_state, vport->fc_flag,
+			vport->fc_myDID, vport->fc_prevDID);
 	switch (cmd) {
 	case ELS_CMD_PLOGI:
 		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
@@ -6621,6 +6671,19 @@
 
 		phba->fc_stat.elsRcvPLOGI++;
 		ndlp = lpfc_plogi_confirm_nport(phba, payload, ndlp);
+		if (phba->sli_rev == LPFC_SLI_REV4 &&
+		    (phba->pport->fc_flag & FC_PT2PT)) {
+			vport->fc_prevDID = vport->fc_myDID;
+			/* Our DID needs to be updated before registering
+			 * the vfi. This is done in lpfc_rcv_plogi but
+			 * that is called after the reg_vfi.
+			 */
+			vport->fc_myDID = elsiocb->iocb.un.rcvels.parmRo;
+			lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+					 "3312 Remote port assigned DID x%x "
+					 "%x\n", vport->fc_myDID,
+					 vport->fc_prevDID);
+		}
 
 		lpfc_send_els_event(vport, ndlp, payload);
 
@@ -6630,6 +6693,7 @@
 			rjt_exp = LSEXP_NOTHING_MORE;
 			break;
 		}
+		shost = lpfc_shost_from_vport(vport);
 		if (vport->port_state < LPFC_DISC_AUTH) {
 			if (!(phba->pport->fc_flag & FC_PT2PT) ||
 				(phba->pport->fc_flag & FC_PT2PT_PLOGI)) {
@@ -6641,9 +6705,18 @@
 			 * another NPort and the other side has initiated
 			 * the PLOGI before responding to our FLOGI.
 			 */
+			if (phba->sli_rev == LPFC_SLI_REV4 &&
+			    (phba->fc_topology_changed ||
+			     vport->fc_myDID != vport->fc_prevDID)) {
+				lpfc_unregister_fcf_prep(phba);
+				spin_lock_irq(shost->host_lock);
+				vport->fc_flag &= ~FC_VFI_REGISTERED;
+				spin_unlock_irq(shost->host_lock);
+				phba->fc_topology_changed = 0;
+				lpfc_issue_reg_vfi(vport);
+			}
 		}
 
-		shost = lpfc_shost_from_vport(vport);
 		spin_lock_irq(shost->host_lock);
 		ndlp->nlp_flag &= ~NLP_TARGET_REMOVE;
 		spin_unlock_irq(shost->host_lock);
@@ -7002,8 +7075,11 @@
 	spin_lock_irq(shost->host_lock);
 	if (vport->fc_flag & FC_DISC_DELAYED) {
 		spin_unlock_irq(shost->host_lock);
+		lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
+				"3334 Delay fc port discovery for %d seconds\n",
+				phba->fc_ratov);
 		mod_timer(&vport->delayed_disc_tmo,
-			jiffies + HZ * phba->fc_ratov);
+			jiffies + msecs_to_jiffies(1000 * phba->fc_ratov));
 		return;
 	}
 	spin_unlock_irq(shost->host_lock);
@@ -7287,7 +7363,7 @@
 		return;
 
 	shost = lpfc_shost_from_vport(phba->pport);
-	mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ);
+	mod_timer(&ndlp->nlp_delayfunc, jiffies + msecs_to_jiffies(1000));
 	spin_lock_irq(shost->host_lock);
 	ndlp->nlp_flag |= NLP_DELAY_TMO;
 	spin_unlock_irq(shost->host_lock);
@@ -7791,7 +7867,8 @@
 	blocked = test_and_set_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags);
 	/* Start a timer to unblock fabric iocbs after 100ms */
 	if (!blocked)
-		mod_timer(&phba->fabric_block_timer, jiffies + HZ/10 );
+		mod_timer(&phba->fabric_block_timer,
+			  jiffies + msecs_to_jiffies(100));
 
 	return;
 }
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 326e05a..0f6e254 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -160,11 +160,12 @@
 	if (!list_empty(&evtp->evt_listp))
 		return;
 
+	evtp->evt_arg1  = lpfc_nlp_get(ndlp);
+
 	spin_lock_irq(&phba->hbalock);
 	/* We need to hold the node by incrementing the reference
 	 * count until this queued work is done
 	 */
-	evtp->evt_arg1  = lpfc_nlp_get(ndlp);
 	if (evtp->evt_arg1) {
 		evtp->evt = LPFC_EVT_DEV_LOSS;
 		list_add_tail(&evtp->evt_listp, &phba->work_list);
@@ -1008,9 +1009,6 @@
 		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
 			lpfc_linkup_port(vports[i]);
 	lpfc_destroy_vport_work_array(phba, vports);
-	if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
-	    (phba->sli_rev < LPFC_SLI_REV4))
-		lpfc_issue_clear_la(phba, phba->pport);
 
 	return 0;
 }
@@ -1436,7 +1434,8 @@
 	if (phba->fcf.fcf_flag & FCF_REGISTERED) {
 		phba->fcf.fcf_flag |= (FCF_SCAN_DONE | FCF_IN_USE);
 		phba->hba_flag &= ~FCF_TS_INPROG;
-		if (phba->pport->port_state != LPFC_FLOGI) {
+		if (phba->pport->port_state != LPFC_FLOGI &&
+		    phba->pport->fc_flag & FC_FABRIC) {
 			phba->hba_flag |= FCF_RR_INPROG;
 			spin_unlock_irq(&phba->hbalock);
 			lpfc_initial_flogi(phba->pport);
@@ -2270,8 +2269,11 @@
 				spin_unlock_irq(&phba->hbalock);
 				lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
 						"2836 New FCF matches in-use "
-						"FCF (x%x)\n",
-						phba->fcf.current_rec.fcf_indx);
+						"FCF (x%x), port_state:x%x, "
+						"fc_flag:x%x\n",
+						phba->fcf.current_rec.fcf_indx,
+						phba->pport->port_state,
+						phba->pport->fc_flag);
 				goto out;
 			} else
 				lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
@@ -2796,7 +2798,19 @@
 lpfc_issue_init_vpi(struct lpfc_vport *vport)
 {
 	LPFC_MBOXQ_t *mboxq;
-	int rc;
+	int rc, vpi;
+
+	if ((vport->port_type != LPFC_PHYSICAL_PORT) && (!vport->vpi)) {
+		vpi = lpfc_alloc_vpi(vport->phba);
+		if (!vpi) {
+			lpfc_printf_vlog(vport, KERN_ERR,
+					 LOG_MBOX,
+					 "3303 Failed to obtain vport vpi\n");
+			lpfc_vport_set_state(vport, FC_VPORT_FAILED);
+			return;
+		}
+		vport->vpi = vpi;
+	}
 
 	mboxq = mempool_alloc(vport->phba->mbox_mem_pool, GFP_KERNEL);
 	if (!mboxq) {
@@ -2894,9 +2908,14 @@
 		goto out_free_mem;
 	}
 
-	/* If the VFI is already registered, there is nothing else to do */
+	/* If the VFI is already registered, there is nothing else to do
+	 * Unless this was a VFI update and we are in PT2PT mode, then
+	 * we should drop through to set the port state to ready.
+	 */
 	if (vport->fc_flag & FC_VFI_REGISTERED)
-		goto out_free_mem;
+		if (!(phba->sli_rev == LPFC_SLI_REV4 &&
+		      vport->fc_flag & FC_PT2PT))
+			goto out_free_mem;
 
 	/* The VPI is implicitly registered when the VFI is registered */
 	spin_lock_irq(shost->host_lock);
@@ -2913,6 +2932,13 @@
 		goto out_free_mem;
 	}
 
+	lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
+			 "3313 cmpl reg vfi  port_state:%x fc_flag:%x myDid:%x "
+			 "alpacnt:%d LinkState:%x topology:%x\n",
+			 vport->port_state, vport->fc_flag, vport->fc_myDID,
+			 vport->phba->alpa_map[0],
+			 phba->link_state, phba->fc_topology);
+
 	if (vport->port_state == LPFC_FABRIC_CFG_LINK) {
 		/*
 		 * For private loop or for NPort pt2pt,
@@ -2925,7 +2951,10 @@
 			/* Use loop map to make discovery list */
 			lpfc_disc_list_loopmap(vport);
 			/* Start discovery */
-			lpfc_disc_start(vport);
+			if (vport->fc_flag & FC_PT2PT)
+				vport->port_state = LPFC_VPORT_READY;
+			else
+				lpfc_disc_start(vport);
 		} else {
 			lpfc_start_fdiscs(phba);
 			lpfc_do_scr_ns_plogi(phba, vport);
@@ -3007,6 +3036,15 @@
 		break;
 	}
 
+	if (phba->fc_topology &&
+	    phba->fc_topology != bf_get(lpfc_mbx_read_top_topology, la)) {
+		lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
+				"3314 Toplogy changed was 0x%x is 0x%x\n",
+				phba->fc_topology,
+				bf_get(lpfc_mbx_read_top_topology, la));
+		phba->fc_topology_changed = 1;
+	}
+
 	phba->fc_topology = bf_get(lpfc_mbx_read_top_topology, la);
 	phba->link_flag &= ~LS_NPIV_FAB_SUPPORTED;
 
@@ -4235,7 +4273,7 @@
 			tmo, vport->port_state, vport->fc_flag);
 	}
 
-	mod_timer(&vport->fc_disctmo, jiffies + HZ * tmo);
+	mod_timer(&vport->fc_disctmo, jiffies + msecs_to_jiffies(1000 * tmo));
 	spin_lock_irq(shost->host_lock);
 	vport->fc_flag |= FC_DISC_TMO;
 	spin_unlock_irq(shost->host_lock);
@@ -4949,8 +4987,12 @@
 	uint32_t clear_la_pending;
 	int did_changed;
 
-	if (!lpfc_is_link_up(phba))
+	if (!lpfc_is_link_up(phba)) {
+		lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
+				 "3315 Link is not up %x\n",
+				 phba->link_state);
 		return;
+	}
 
 	if (phba->link_state == LPFC_CLEAR_LA)
 		clear_la_pending = 1;
@@ -4983,11 +5025,13 @@
 	if (num_sent)
 		return;
 
-	/* Register the VPI for SLI3, NON-NPIV only. */
+	/* Register the VPI for SLI3, NPIV only. */
 	if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
 	    !(vport->fc_flag & FC_PT2PT) &&
 	    !(vport->fc_flag & FC_RSCN_MODE) &&
 	    (phba->sli_rev < LPFC_SLI_REV4)) {
+		if (vport->port_type == LPFC_PHYSICAL_PORT)
+			lpfc_issue_clear_la(phba, vport);
 		lpfc_issue_reg_vpi(phba, vport);
 		return;
 	}
@@ -5410,7 +5454,8 @@
 	if (vport->cfg_fdmi_on == 1)
 		lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DHBA);
 	else
-		mod_timer(&vport->fc_fdmitmo, jiffies + HZ * 60);
+		mod_timer(&vport->fc_fdmitmo,
+			  jiffies + msecs_to_jiffies(1000 * 60));
 
 	/* decrement the node reference count held for this callback
 	 * function.
@@ -5855,7 +5900,7 @@
 	struct lpfc_vport **vports;
 	struct lpfc_nodelist *ndlp;
 	struct Scsi_Host *shost;
-	int i, rc;
+	int i = 0, rc;
 
 	/* Unregister RPIs */
 	if (lpfc_fcf_inuse(phba))
@@ -5883,6 +5928,20 @@
 			spin_unlock_irq(shost->host_lock);
 		}
 	lpfc_destroy_vport_work_array(phba, vports);
+	if (i == 0 && (!(phba->sli3_options & LPFC_SLI3_NPIV_ENABLED))) {
+		ndlp = lpfc_findnode_did(phba->pport, Fabric_DID);
+		if (ndlp)
+			lpfc_cancel_retry_delay_tmo(phba->pport, ndlp);
+		lpfc_cleanup_pending_mbox(phba->pport);
+		if (phba->sli_rev == LPFC_SLI_REV4)
+			lpfc_sli4_unreg_all_rpis(phba->pport);
+		lpfc_mbx_unreg_vpi(phba->pport);
+		shost = lpfc_shost_from_vport(phba->pport);
+		spin_lock_irq(shost->host_lock);
+		phba->pport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
+		phba->pport->vpi_state &= ~LPFC_VPI_REGISTERED;
+		spin_unlock_irq(shost->host_lock);
+	}
 
 	/* Cleanup any outstanding ELS commands */
 	lpfc_els_flush_all_cmd(phba);
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
index e8c4760..83700c1 100644
--- a/drivers/scsi/lpfc/lpfc_hw.h
+++ b/drivers/scsi/lpfc/lpfc_hw.h
@@ -1667,6 +1667,7 @@
 #define	BG_OP_IN_CSUM_OUT_CSUM		0x5
 #define	BG_OP_IN_CRC_OUT_CSUM		0x6
 #define	BG_OP_IN_CSUM_OUT_CRC		0x7
+#define	BG_OP_RAW_MODE			0x8
 
 struct lpfc_pde5 {
 	uint32_t word0;
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index 1dd2f6f..713a461 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -200,6 +200,11 @@
 #define LPFC_MAX_IMAX          5000000
 #define LPFC_DEF_IMAX          50000
 
+#define LPFC_MIN_CPU_MAP       0
+#define LPFC_MAX_CPU_MAP       2
+#define LPFC_HBA_CPU_MAP       1
+#define LPFC_DRIVER_CPU_MAP    2  /* Default */
+
 /* PORT_CAPABILITIES constants. */
 #define LPFC_MAX_SUPPORTED_PAGES	8
 
@@ -621,7 +626,7 @@
 #define lpfc_sliport_status_rdy_SHIFT	23
 #define lpfc_sliport_status_rdy_MASK	0x1
 #define lpfc_sliport_status_rdy_WORD	word0
-#define MAX_IF_TYPE_2_RESETS	1000
+#define MAX_IF_TYPE_2_RESETS		6
 
 #define LPFC_CTL_PORT_CTL_OFFSET	0x408
 #define lpfc_sliport_ctrl_end_SHIFT	30
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 90b8b05..cb465b2 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -33,6 +33,7 @@
 #include <linux/slab.h>
 #include <linux/firmware.h>
 #include <linux/miscdevice.h>
+#include <linux/percpu.h>
 
 #include <scsi/scsi.h>
 #include <scsi/scsi_device.h>
@@ -58,6 +59,9 @@
 unsigned long _dump_buf_dif_order;
 spinlock_t _dump_buf_lock;
 
+/* Used when mapping IRQ vectors in a driver centric manner */
+uint16_t lpfc_used_cpu[LPFC_MAX_CPU];
+
 static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *);
 static int lpfc_post_rcv_buf(struct lpfc_hba *);
 static int lpfc_sli4_queue_verify(struct lpfc_hba *);
@@ -541,13 +545,16 @@
 
 	/* Set up ring-0 (ELS) timer */
 	timeout = phba->fc_ratov * 2;
-	mod_timer(&vport->els_tmofunc, jiffies + HZ * timeout);
+	mod_timer(&vport->els_tmofunc,
+		  jiffies + msecs_to_jiffies(1000 * timeout));
 	/* Set up heart beat (HB) timer */
-	mod_timer(&phba->hb_tmofunc, jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
+	mod_timer(&phba->hb_tmofunc,
+		  jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
 	phba->hb_outstanding = 0;
 	phba->last_completion_time = jiffies;
 	/* Set up error attention (ERATT) polling timer */
-	mod_timer(&phba->eratt_poll, jiffies + HZ * LPFC_ERATT_POLL_INTERVAL);
+	mod_timer(&phba->eratt_poll,
+		  jiffies + msecs_to_jiffies(1000 * LPFC_ERATT_POLL_INTERVAL));
 
 	if (phba->hba_flag & LINK_DISABLED) {
 		lpfc_printf_log(phba,
@@ -908,9 +915,9 @@
 		psb->pCmd = NULL;
 		psb->status = IOSTAT_SUCCESS;
 	}
-	spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
-	list_splice(&aborts, &phba->lpfc_scsi_buf_list);
-	spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag);
+	spin_lock_irqsave(&phba->scsi_buf_list_put_lock, iflag);
+	list_splice(&aborts, &phba->lpfc_scsi_buf_list_put);
+	spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, iflag);
 	return 0;
 }
 
@@ -1021,7 +1028,8 @@
 		!(phba->link_state == LPFC_HBA_ERROR) &&
 		!(phba->pport->load_flag & FC_UNLOADING))
 		mod_timer(&phba->hb_tmofunc,
-			jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
+			  jiffies +
+			  msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
 	return;
 }
 
@@ -1064,15 +1072,18 @@
 
 	spin_lock_irq(&phba->pport->work_port_lock);
 
-	if (time_after(phba->last_completion_time + LPFC_HB_MBOX_INTERVAL * HZ,
-		jiffies)) {
+	if (time_after(phba->last_completion_time +
+			msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL),
+			jiffies)) {
 		spin_unlock_irq(&phba->pport->work_port_lock);
 		if (!phba->hb_outstanding)
 			mod_timer(&phba->hb_tmofunc,
-				jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
+				jiffies +
+				msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
 		else
 			mod_timer(&phba->hb_tmofunc,
-				jiffies + HZ * LPFC_HB_MBOX_TIMEOUT);
+				jiffies +
+				msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT));
 		return;
 	}
 	spin_unlock_irq(&phba->pport->work_port_lock);
@@ -1104,7 +1115,8 @@
 				if (!pmboxq) {
 					mod_timer(&phba->hb_tmofunc,
 						 jiffies +
-						 HZ * LPFC_HB_MBOX_INTERVAL);
+						 msecs_to_jiffies(1000 *
+						 LPFC_HB_MBOX_INTERVAL));
 					return;
 				}
 
@@ -1120,7 +1132,8 @@
 							phba->mbox_mem_pool);
 					mod_timer(&phba->hb_tmofunc,
 						jiffies +
-						HZ * LPFC_HB_MBOX_INTERVAL);
+						msecs_to_jiffies(1000 *
+						LPFC_HB_MBOX_INTERVAL));
 					return;
 				}
 				phba->skipped_hb = 0;
@@ -1136,7 +1149,8 @@
 				phba->skipped_hb = jiffies;
 
 			mod_timer(&phba->hb_tmofunc,
-				  jiffies + HZ * LPFC_HB_MBOX_TIMEOUT);
+				 jiffies +
+				 msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT));
 			return;
 		} else {
 			/*
@@ -1150,7 +1164,8 @@
 					jiffies_to_msecs(jiffies
 						 - phba->last_completion_time));
 			mod_timer(&phba->hb_tmofunc,
-				  jiffies + HZ * LPFC_HB_MBOX_TIMEOUT);
+				jiffies +
+				msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT));
 		}
 	}
 }
@@ -1191,7 +1206,7 @@
  * This routine is called to bring a SLI4 HBA offline when HBA hardware error
  * other than Port Error 6 has been detected.
  **/
-static void
+void
 lpfc_sli4_offline_eratt(struct lpfc_hba *phba)
 {
 	lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
@@ -2633,6 +2648,7 @@
 	struct lpfc_vport *vport;
 	struct lpfc_vport **vports;
 	int i;
+	bool vpis_cleared = false;
 
 	if (!phba)
 		return 0;
@@ -2656,6 +2672,10 @@
 			lpfc_unblock_mgmt_io(phba);
 			return 1;
 		}
+		spin_lock_irq(&phba->hbalock);
+		if (!phba->sli4_hba.max_cfg_param.vpi_used)
+			vpis_cleared = true;
+		spin_unlock_irq(&phba->hbalock);
 	} else {
 		if (lpfc_sli_hba_setup(phba)) {	/* Initialize SLI2/SLI3 HBA */
 			lpfc_unblock_mgmt_io(phba);
@@ -2672,8 +2692,13 @@
 			vports[i]->fc_flag &= ~FC_OFFLINE_MODE;
 			if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
 				vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
-			if (phba->sli_rev == LPFC_SLI_REV4)
+			if (phba->sli_rev == LPFC_SLI_REV4) {
 				vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
+				if ((vpis_cleared) &&
+				    (vports[i]->port_type !=
+					LPFC_PHYSICAL_PORT))
+					vports[i]->vpi = 0;
+			}
 			spin_unlock_irq(shost->host_lock);
 		}
 		lpfc_destroy_vport_work_array(phba, vports);
@@ -2833,16 +2858,30 @@
 	struct lpfc_iocbq *io, *io_next;
 
 	spin_lock_irq(&phba->hbalock);
+
 	/* Release all the lpfc_scsi_bufs maintained by this host. */
-	spin_lock(&phba->scsi_buf_list_lock);
-	list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list, list) {
+
+	spin_lock(&phba->scsi_buf_list_put_lock);
+	list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_put,
+				 list) {
 		list_del(&sb->list);
 		pci_pool_free(phba->lpfc_scsi_dma_buf_pool, sb->data,
 			      sb->dma_handle);
 		kfree(sb);
 		phba->total_scsi_bufs--;
 	}
-	spin_unlock(&phba->scsi_buf_list_lock);
+	spin_unlock(&phba->scsi_buf_list_put_lock);
+
+	spin_lock(&phba->scsi_buf_list_get_lock);
+	list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_get,
+				 list) {
+		list_del(&sb->list);
+		pci_pool_free(phba->lpfc_scsi_dma_buf_pool, sb->data,
+			      sb->dma_handle);
+		kfree(sb);
+		phba->total_scsi_bufs--;
+	}
+	spin_unlock(&phba->scsi_buf_list_get_lock);
 
 	/* Release all the lpfc_iocbq entries maintained by this host. */
 	list_for_each_entry_safe(io, io_next, &phba->lpfc_iocb_list, list) {
@@ -2978,9 +3017,12 @@
 			phba->sli4_hba.scsi_xri_cnt,
 			phba->sli4_hba.scsi_xri_max);
 
-	spin_lock_irq(&phba->scsi_buf_list_lock);
-	list_splice_init(&phba->lpfc_scsi_buf_list, &scsi_sgl_list);
-	spin_unlock_irq(&phba->scsi_buf_list_lock);
+	spin_lock_irq(&phba->scsi_buf_list_get_lock);
+	spin_lock_irq(&phba->scsi_buf_list_put_lock);
+	list_splice_init(&phba->lpfc_scsi_buf_list_get, &scsi_sgl_list);
+	list_splice(&phba->lpfc_scsi_buf_list_put, &scsi_sgl_list);
+	spin_unlock_irq(&phba->scsi_buf_list_put_lock);
+	spin_unlock_irq(&phba->scsi_buf_list_get_lock);
 
 	if (phba->sli4_hba.scsi_xri_cnt > phba->sli4_hba.scsi_xri_max) {
 		/* max scsi xri shrinked below the allocated scsi buffers */
@@ -2994,9 +3036,9 @@
 				      psb->dma_handle);
 			kfree(psb);
 		}
-		spin_lock_irq(&phba->scsi_buf_list_lock);
+		spin_lock_irq(&phba->scsi_buf_list_get_lock);
 		phba->sli4_hba.scsi_xri_cnt -= scsi_xri_cnt;
-		spin_unlock_irq(&phba->scsi_buf_list_lock);
+		spin_unlock_irq(&phba->scsi_buf_list_get_lock);
 	}
 
 	/* update xris associated to remaining allocated scsi buffers */
@@ -3014,9 +3056,12 @@
 		psb->cur_iocbq.sli4_lxritag = lxri;
 		psb->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri];
 	}
-	spin_lock_irq(&phba->scsi_buf_list_lock);
-	list_splice_init(&scsi_sgl_list, &phba->lpfc_scsi_buf_list);
-	spin_unlock_irq(&phba->scsi_buf_list_lock);
+	spin_lock_irq(&phba->scsi_buf_list_get_lock);
+	spin_lock_irq(&phba->scsi_buf_list_put_lock);
+	list_splice_init(&scsi_sgl_list, &phba->lpfc_scsi_buf_list_get);
+	INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put);
+	spin_unlock_irq(&phba->scsi_buf_list_put_lock);
+	spin_unlock_irq(&phba->scsi_buf_list_get_lock);
 
 	return 0;
 
@@ -3197,14 +3242,15 @@
 		stat = 1;
 		goto finished;
 	}
-	if (time >= 30 * HZ) {
+	if (time >= msecs_to_jiffies(30 * 1000)) {
 		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
 				"0461 Scanning longer than 30 "
 				"seconds.  Continuing initialization\n");
 		stat = 1;
 		goto finished;
 	}
-	if (time >= 15 * HZ && phba->link_state <= LPFC_LINK_DOWN) {
+	if (time >= msecs_to_jiffies(15 * 1000) &&
+	    phba->link_state <= LPFC_LINK_DOWN) {
 		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
 				"0465 Link down longer than 15 "
 				"seconds.  Continuing initialization\n");
@@ -3216,7 +3262,7 @@
 		goto finished;
 	if (vport->num_disc_nodes || vport->fc_prli_sent)
 		goto finished;
-	if (vport->fc_map_cnt == 0 && time < 2 * HZ)
+	if (vport->fc_map_cnt == 0 && time < msecs_to_jiffies(2 * 1000))
 		goto finished;
 	if ((phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) != 0)
 		goto finished;
@@ -4215,7 +4261,8 @@
 			 * If there are other active VLinks present,
 			 * re-instantiate the Vlink using FDISC.
 			 */
-			mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ);
+			mod_timer(&ndlp->nlp_delayfunc,
+				  jiffies + msecs_to_jiffies(1000));
 			shost = lpfc_shost_from_vport(vport);
 			spin_lock_irq(shost->host_lock);
 			ndlp->nlp_flag |= NLP_DELAY_TMO;
@@ -4707,24 +4754,53 @@
 		return -ENOMEM;
 
 	/*
-	 * Since the sg_tablesize is module parameter, the sg_dma_buf_size
+	 * Since lpfc_sg_seg_cnt is module parameter, the sg_dma_buf_size
 	 * used to create the sg_dma_buf_pool must be dynamically calculated.
-	 * 2 segments are added since the IOCB needs a command and response bde.
 	 */
-	phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
-		sizeof(struct fcp_rsp) +
-			((phba->cfg_sg_seg_cnt + 2) * sizeof(struct ulp_bde64));
 
-	if (phba->cfg_enable_bg) {
-		phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT;
-		phba->cfg_sg_dma_buf_size +=
-			phba->cfg_prot_sg_seg_cnt * sizeof(struct ulp_bde64);
-	}
-
-	/* Also reinitialize the host templates with new values. */
+	/* Initialize the host templates the configured values. */
 	lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt;
 	lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt;
 
+	/* There are going to be 2 reserved BDEs: 1 FCP cmnd + 1 FCP rsp */
+	if (phba->cfg_enable_bg) {
+		/*
+		 * The scsi_buf for a T10-DIF I/O will hold the FCP cmnd,
+		 * the FCP rsp, and a BDE for each. Sice we have no control
+		 * over how many protection data segments the SCSI Layer
+		 * will hand us (ie: there could be one for every block
+		 * in the IO), we just allocate enough BDEs to accomidate
+		 * our max amount and we need to limit lpfc_sg_seg_cnt to
+		 * minimize the risk of running out.
+		 */
+		phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
+			sizeof(struct fcp_rsp) +
+			(LPFC_MAX_SG_SEG_CNT * sizeof(struct ulp_bde64));
+
+		if (phba->cfg_sg_seg_cnt > LPFC_MAX_SG_SEG_CNT_DIF)
+			phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT_DIF;
+
+		/* Total BDEs in BPL for scsi_sg_list and scsi_sg_prot_list */
+		phba->cfg_total_seg_cnt = LPFC_MAX_SG_SEG_CNT;
+	} else {
+		/*
+		 * The scsi_buf for a regular I/O will hold the FCP cmnd,
+		 * the FCP rsp, a BDE for each, and a BDE for up to
+		 * cfg_sg_seg_cnt data segments.
+		 */
+		phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
+			sizeof(struct fcp_rsp) +
+			((phba->cfg_sg_seg_cnt + 2) * sizeof(struct ulp_bde64));
+
+		/* Total BDEs in BPL for scsi_sg_list */
+		phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + 2;
+	}
+
+	lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP,
+			"9088 sg_tablesize:%d dmabuf_size:%d total_bde:%d\n",
+			phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size,
+			phba->cfg_total_seg_cnt);
+
 	phba->max_vpi = LPFC_MAX_VPI;
 	/* This will be set to correct value after config_port mbox */
 	phba->max_vports = 0;
@@ -4789,13 +4865,13 @@
 static int
 lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
 {
+	struct lpfc_vector_map_info *cpup;
 	struct lpfc_sli *psli;
 	LPFC_MBOXQ_t *mboxq;
-	int rc, i, hbq_count, buf_size, dma_buf_size, max_buf_size;
+	int rc, i, hbq_count, max_buf_size;
 	uint8_t pn_page[LPFC_MAX_SUPPORTED_PAGES] = {0};
 	struct lpfc_mqe *mqe;
-	int longs, sli_family;
-	int sges_per_segment;
+	int longs;
 
 	/* Before proceed, wait for POST done and device ready */
 	rc = lpfc_sli4_post_status_check(phba);
@@ -4863,11 +4939,6 @@
 	phba->fc_map[1] = LPFC_FCOE_FCF_MAP1;
 	phba->fc_map[2] = LPFC_FCOE_FCF_MAP2;
 
-	/* With BlockGuard we can have multiple SGEs per Data Segemnt */
-	sges_per_segment = 1;
-	if (phba->cfg_enable_bg)
-		sges_per_segment = 2;
-
 	/*
 	 * For SLI4, instead of using ring 0 (LPFC_FCP_RING) for FCP commands
 	 * we will associate a new ring, for each FCP fastpath EQ/CQ/WQ tuple.
@@ -4878,43 +4949,71 @@
 			sizeof(struct lpfc_sli_ring), GFP_KERNEL);
 	if (!phba->sli.ring)
 		return -ENOMEM;
-	/*
-	 * Since the sg_tablesize is module parameter, the sg_dma_buf_size
-	 * used to create the sg_dma_buf_pool must be dynamically calculated.
-	 * 2 segments are added since the IOCB needs a command and response bde.
-	 * To insure that the scsi sgl does not cross a 4k page boundary only
-	 * sgl sizes of must be a power of 2.
-	 */
-	buf_size = (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp) +
-		    (((phba->cfg_sg_seg_cnt * sges_per_segment) + 2) *
-		    sizeof(struct sli4_sge)));
 
-	sli_family = bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf);
-	max_buf_size = LPFC_SLI4_MAX_BUF_SIZE;
-	switch (sli_family) {
-	case LPFC_SLI_INTF_FAMILY_BE2:
-	case LPFC_SLI_INTF_FAMILY_BE3:
-		/* There is a single hint for BE - 2 pages per BPL. */
-		if (bf_get(lpfc_sli_intf_sli_hint1, &phba->sli4_hba.sli_intf) ==
-		    LPFC_SLI_INTF_SLI_HINT1_1)
-			max_buf_size = LPFC_SLI4_FL1_MAX_BUF_SIZE;
-		break;
-	case LPFC_SLI_INTF_FAMILY_LNCR_A0:
-	case LPFC_SLI_INTF_FAMILY_LNCR_B0:
-	default:
-		break;
+	/*
+	 * It doesn't matter what family our adapter is in, we are
+	 * limited to 2 Pages, 512 SGEs, for our SGL.
+	 * There are going to be 2 reserved SGEs: 1 FCP cmnd + 1 FCP rsp
+	 */
+	max_buf_size = (2 * SLI4_PAGE_SIZE);
+	if (phba->cfg_sg_seg_cnt > LPFC_MAX_SGL_SEG_CNT - 2)
+		phba->cfg_sg_seg_cnt = LPFC_MAX_SGL_SEG_CNT - 2;
+
+	/*
+	 * Since lpfc_sg_seg_cnt is module parameter, the sg_dma_buf_size
+	 * used to create the sg_dma_buf_pool must be dynamically calculated.
+	 */
+
+	if (phba->cfg_enable_bg) {
+		/*
+		 * The scsi_buf for a T10-DIF I/O will hold the FCP cmnd,
+		 * the FCP rsp, and a SGE for each. Sice we have no control
+		 * over how many protection data segments the SCSI Layer
+		 * will hand us (ie: there could be one for every block
+		 * in the IO), we just allocate enough SGEs to accomidate
+		 * our max amount and we need to limit lpfc_sg_seg_cnt to
+		 * minimize the risk of running out.
+		 */
+		phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
+			sizeof(struct fcp_rsp) + max_buf_size;
+
+		/* Total SGEs for scsi_sg_list and scsi_sg_prot_list */
+		phba->cfg_total_seg_cnt = LPFC_MAX_SGL_SEG_CNT;
+
+		if (phba->cfg_sg_seg_cnt > LPFC_MAX_SG_SLI4_SEG_CNT_DIF)
+			phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SLI4_SEG_CNT_DIF;
+	} else {
+		/*
+		 * The scsi_buf for a regular I/O will hold the FCP cmnd,
+		 * the FCP rsp, a SGE for each, and a SGE for up to
+		 * cfg_sg_seg_cnt data segments.
+		 */
+		phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
+			sizeof(struct fcp_rsp) +
+			((phba->cfg_sg_seg_cnt + 2) * sizeof(struct sli4_sge));
+
+		/* Total SGEs for scsi_sg_list */
+		phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + 2;
+		/*
+		 * NOTE: if (phba->cfg_sg_seg_cnt + 2) <= 256 we only need
+		 * to post 1 page for the SGL.
+		 */
 	}
 
-	for (dma_buf_size = LPFC_SLI4_MIN_BUF_SIZE;
-	     dma_buf_size < max_buf_size && buf_size > dma_buf_size;
-	     dma_buf_size = dma_buf_size << 1)
-		;
-	if (dma_buf_size == max_buf_size)
-		phba->cfg_sg_seg_cnt = (dma_buf_size -
-			sizeof(struct fcp_cmnd) - sizeof(struct fcp_rsp) -
-			(2 * sizeof(struct sli4_sge))) /
-				sizeof(struct sli4_sge);
-	phba->cfg_sg_dma_buf_size = dma_buf_size;
+	/* Initialize the host templates with the updated values. */
+	lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt;
+	lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt;
+
+	if (phba->cfg_sg_dma_buf_size  <= LPFC_MIN_SG_SLI4_BUF_SZ)
+		phba->cfg_sg_dma_buf_size = LPFC_MIN_SG_SLI4_BUF_SZ;
+	else
+		phba->cfg_sg_dma_buf_size =
+			SLI4_PAGE_ALIGN(phba->cfg_sg_dma_buf_size);
+
+	lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP,
+			"9087 sg_tablesize:%d dmabuf_size:%d total_sge:%d\n",
+			phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size,
+			phba->cfg_total_seg_cnt);
 
 	/* Initialize buffer queue management fields */
 	hbq_count = lpfc_sli_hbq_count();
@@ -5104,6 +5203,26 @@
 		goto out_free_fcp_eq_hdl;
 	}
 
+	phba->sli4_hba.cpu_map = kzalloc((sizeof(struct lpfc_vector_map_info) *
+					 phba->sli4_hba.num_present_cpu),
+					 GFP_KERNEL);
+	if (!phba->sli4_hba.cpu_map) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"3327 Failed allocate memory for msi-x "
+				"interrupt vector mapping\n");
+		rc = -ENOMEM;
+		goto out_free_msix;
+	}
+	/* Initialize io channels for round robin */
+	cpup = phba->sli4_hba.cpu_map;
+	rc = 0;
+	for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
+		cpup->channel_id = rc;
+		rc++;
+		if (rc >= phba->cfg_fcp_io_channel)
+			rc = 0;
+	}
+
 	/*
 	 * Enable sr-iov virtual functions if supported and configured
 	 * through the module parameter.
@@ -5123,6 +5242,8 @@
 
 	return 0;
 
+out_free_msix:
+	kfree(phba->sli4_hba.msix_entries);
 out_free_fcp_eq_hdl:
 	kfree(phba->sli4_hba.fcp_eq_hdl);
 out_free_fcf_rr_bmask:
@@ -5152,6 +5273,11 @@
 {
 	struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry;
 
+	/* Free memory allocated for msi-x interrupt vector to CPU mapping */
+	kfree(phba->sli4_hba.cpu_map);
+	phba->sli4_hba.num_present_cpu = 0;
+	phba->sli4_hba.num_online_cpu = 0;
+
 	/* Free memory allocated for msi-x interrupt vector entries */
 	kfree(phba->sli4_hba.msix_entries);
 
@@ -5260,8 +5386,10 @@
 	init_waitqueue_head(&phba->work_waitq);
 
 	/* Initialize the scsi buffer list used by driver for scsi IO */
-	spin_lock_init(&phba->scsi_buf_list_lock);
-	INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list);
+	spin_lock_init(&phba->scsi_buf_list_get_lock);
+	INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_get);
+	spin_lock_init(&phba->scsi_buf_list_put_lock);
+	INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put);
 
 	/* Initialize the fabric iocb list */
 	INIT_LIST_HEAD(&phba->fabric_iocb_list);
@@ -6696,6 +6824,7 @@
 	int cfg_fcp_io_channel;
 	uint32_t cpu;
 	uint32_t i = 0;
+	uint32_t j = 0;
 
 
 	/*
@@ -6706,15 +6835,21 @@
 	/* Sanity check on HBA EQ parameters */
 	cfg_fcp_io_channel = phba->cfg_fcp_io_channel;
 
-	/* It doesn't make sense to have more io channels then CPUs */
-	for_each_online_cpu(cpu) {
-		i++;
+	/* It doesn't make sense to have more io channels then online CPUs */
+	for_each_present_cpu(cpu) {
+		if (cpu_online(cpu))
+			i++;
+		j++;
 	}
+	phba->sli4_hba.num_online_cpu = i;
+	phba->sli4_hba.num_present_cpu = j;
+
 	if (i < cfg_fcp_io_channel) {
 		lpfc_printf_log(phba,
 				KERN_ERR, LOG_INIT,
 				"3188 Reducing IO channels to match number of "
-				"CPUs: from %d to %d\n", cfg_fcp_io_channel, i);
+				"online CPUs: from %d to %d\n",
+				cfg_fcp_io_channel, i);
 		cfg_fcp_io_channel = i;
 	}
 
@@ -7743,8 +7878,13 @@
 
 out:
 	/* Catch the not-ready port failure after a port reset. */
-	if (num_resets >= MAX_IF_TYPE_2_RESETS)
+	if (num_resets >= MAX_IF_TYPE_2_RESETS) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"3317 HBA not functional: IP Reset Failed "
+				"after (%d) retries, try: "
+				"echo fw_reset > board_mode\n", num_resets);
 		rc = -ENODEV;
+	}
 
 	return rc;
 }
@@ -8209,6 +8349,269 @@
 }
 
 /**
+ * lpfc_find_next_cpu - Find next available CPU that matches the phys_id
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * Find next available CPU to use for IRQ to CPU affinity.
+ */
+static int
+lpfc_find_next_cpu(struct lpfc_hba *phba, uint32_t phys_id)
+{
+	struct lpfc_vector_map_info *cpup;
+	int cpu;
+
+	cpup = phba->sli4_hba.cpu_map;
+	for (cpu = 0; cpu < phba->sli4_hba.num_present_cpu; cpu++) {
+		/* CPU must be online */
+		if (cpu_online(cpu)) {
+			if ((cpup->irq == LPFC_VECTOR_MAP_EMPTY) &&
+			    (lpfc_used_cpu[cpu] == LPFC_VECTOR_MAP_EMPTY) &&
+			    (cpup->phys_id == phys_id)) {
+				return cpu;
+			}
+		}
+		cpup++;
+	}
+
+	/*
+	 * If we get here, we have used ALL CPUs for the specific
+	 * phys_id. Now we need to clear out lpfc_used_cpu and start
+	 * reusing CPUs.
+	 */
+
+	for (cpu = 0; cpu < phba->sli4_hba.num_present_cpu; cpu++) {
+		if (lpfc_used_cpu[cpu] == phys_id)
+			lpfc_used_cpu[cpu] = LPFC_VECTOR_MAP_EMPTY;
+	}
+
+	cpup = phba->sli4_hba.cpu_map;
+	for (cpu = 0; cpu < phba->sli4_hba.num_present_cpu; cpu++) {
+		/* CPU must be online */
+		if (cpu_online(cpu)) {
+			if ((cpup->irq == LPFC_VECTOR_MAP_EMPTY) &&
+			    (cpup->phys_id == phys_id)) {
+				return cpu;
+			}
+		}
+		cpup++;
+	}
+	return LPFC_VECTOR_MAP_EMPTY;
+}
+
+/**
+ * lpfc_sli4_set_affinity - Set affinity for HBA IRQ vectors
+ * @phba:	pointer to lpfc hba data structure.
+ * @vectors:	number of HBA vectors
+ *
+ * Affinitize MSIX IRQ vectors to CPUs. Try to equally spread vector
+ * affinization across multple physical CPUs (numa nodes).
+ * In addition, this routine will assign an IO channel for each CPU
+ * to use when issuing I/Os.
+ */
+static int
+lpfc_sli4_set_affinity(struct lpfc_hba *phba, int vectors)
+{
+	int i, idx, saved_chann, used_chann, cpu, phys_id;
+	int max_phys_id, num_io_channel, first_cpu;
+	struct lpfc_vector_map_info *cpup;
+#ifdef CONFIG_X86
+	struct cpuinfo_x86 *cpuinfo;
+#endif
+	struct cpumask *mask;
+	uint8_t chann[LPFC_FCP_IO_CHAN_MAX+1];
+
+	/* If there is no mapping, just return */
+	if (!phba->cfg_fcp_cpu_map)
+		return 1;
+
+	/* Init cpu_map array */
+	memset(phba->sli4_hba.cpu_map, 0xff,
+	       (sizeof(struct lpfc_vector_map_info) *
+		phba->sli4_hba.num_present_cpu));
+
+	max_phys_id = 0;
+	phys_id = 0;
+	num_io_channel = 0;
+	first_cpu = LPFC_VECTOR_MAP_EMPTY;
+
+	/* Update CPU map with physical id and core id of each CPU */
+	cpup = phba->sli4_hba.cpu_map;
+	for (cpu = 0; cpu < phba->sli4_hba.num_present_cpu; cpu++) {
+#ifdef CONFIG_X86
+		cpuinfo = &cpu_data(cpu);
+		cpup->phys_id = cpuinfo->phys_proc_id;
+		cpup->core_id = cpuinfo->cpu_core_id;
+#else
+		/* No distinction between CPUs for other platforms */
+		cpup->phys_id = 0;
+		cpup->core_id = 0;
+#endif
+
+		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+				"3328 CPU physid %d coreid %d\n",
+				cpup->phys_id, cpup->core_id);
+
+		if (cpup->phys_id > max_phys_id)
+			max_phys_id = cpup->phys_id;
+		cpup++;
+	}
+
+	/* Now associate the HBA vectors with specific CPUs */
+	for (idx = 0; idx < vectors; idx++) {
+		cpup = phba->sli4_hba.cpu_map;
+		cpu = lpfc_find_next_cpu(phba, phys_id);
+		if (cpu == LPFC_VECTOR_MAP_EMPTY) {
+
+			/* Try for all phys_id's */
+			for (i = 1; i < max_phys_id; i++) {
+				phys_id++;
+				if (phys_id > max_phys_id)
+					phys_id = 0;
+				cpu = lpfc_find_next_cpu(phba, phys_id);
+				if (cpu == LPFC_VECTOR_MAP_EMPTY)
+					continue;
+				goto found;
+			}
+
+			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+					"3329 Cannot set affinity:"
+					"Error mapping vector %d (%d)\n",
+					idx, vectors);
+			return 0;
+		}
+found:
+		cpup += cpu;
+		if (phba->cfg_fcp_cpu_map == LPFC_DRIVER_CPU_MAP)
+			lpfc_used_cpu[cpu] = phys_id;
+
+		/* Associate vector with selected CPU */
+		cpup->irq = phba->sli4_hba.msix_entries[idx].vector;
+
+		/* Associate IO channel with selected CPU */
+		cpup->channel_id = idx;
+		num_io_channel++;
+
+		if (first_cpu == LPFC_VECTOR_MAP_EMPTY)
+			first_cpu = cpu;
+
+		/* Now affinitize to the selected CPU */
+		mask = &cpup->maskbits;
+		cpumask_clear(mask);
+		cpumask_set_cpu(cpu, mask);
+		i = irq_set_affinity_hint(phba->sli4_hba.msix_entries[idx].
+					  vector, mask);
+
+		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+				"3330 Set Affinity: CPU %d channel %d "
+				"irq %d (%x)\n",
+				cpu, cpup->channel_id,
+				phba->sli4_hba.msix_entries[idx].vector, i);
+
+		/* Spread vector mapping across multple physical CPU nodes */
+		phys_id++;
+		if (phys_id > max_phys_id)
+			phys_id = 0;
+	}
+
+	/*
+	 * Finally fill in the IO channel for any remaining CPUs.
+	 * At this point, all IO channels have been assigned to a specific
+	 * MSIx vector, mapped to a specific CPU.
+	 * Base the remaining IO channel assigned, to IO channels already
+	 * assigned to other CPUs on the same phys_id.
+	 */
+	for (i = 0; i <= max_phys_id; i++) {
+		/*
+		 * If there are no io channels already mapped to
+		 * this phys_id, just round robin thru the io_channels.
+		 * Setup chann[] for round robin.
+		 */
+		for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++)
+			chann[idx] = idx;
+
+		saved_chann = 0;
+		used_chann = 0;
+
+		/*
+		 * First build a list of IO channels already assigned
+		 * to this phys_id before reassigning the same IO
+		 * channels to the remaining CPUs.
+		 */
+		cpup = phba->sli4_hba.cpu_map;
+		cpu = first_cpu;
+		cpup += cpu;
+		for (idx = 0; idx < phba->sli4_hba.num_present_cpu;
+		     idx++) {
+			if (cpup->phys_id == i) {
+				/*
+				 * Save any IO channels that are
+				 * already mapped to this phys_id.
+				 */
+				if (cpup->irq != LPFC_VECTOR_MAP_EMPTY) {
+					chann[saved_chann] =
+						cpup->channel_id;
+					saved_chann++;
+					goto out;
+				}
+
+				/* See if we are using round-robin */
+				if (saved_chann == 0)
+					saved_chann =
+						phba->cfg_fcp_io_channel;
+
+				/* Associate next IO channel with CPU */
+				cpup->channel_id = chann[used_chann];
+				num_io_channel++;
+				used_chann++;
+				if (used_chann == saved_chann)
+					used_chann = 0;
+
+				lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+						"3331 Set IO_CHANN "
+						"CPU %d channel %d\n",
+						idx, cpup->channel_id);
+			}
+out:
+			cpu++;
+			if (cpu >= phba->sli4_hba.num_present_cpu) {
+				cpup = phba->sli4_hba.cpu_map;
+				cpu = 0;
+			} else {
+				cpup++;
+			}
+		}
+	}
+
+	if (phba->sli4_hba.num_online_cpu != phba->sli4_hba.num_present_cpu) {
+		cpup = phba->sli4_hba.cpu_map;
+		for (idx = 0; idx < phba->sli4_hba.num_present_cpu; idx++) {
+			if (cpup->channel_id == LPFC_VECTOR_MAP_EMPTY) {
+				cpup->channel_id = 0;
+				num_io_channel++;
+
+				lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+						"3332 Assign IO_CHANN "
+						"CPU %d channel %d\n",
+						idx, cpup->channel_id);
+			}
+			cpup++;
+		}
+	}
+
+	/* Sanity check */
+	if (num_io_channel != phba->sli4_hba.num_present_cpu)
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"3333 Set affinity mismatch:"
+				"%d chann != %d cpus: %d vactors\n",
+				num_io_channel, phba->sli4_hba.num_present_cpu,
+				vectors);
+
+	phba->cfg_fcp_io_sched = LPFC_FCP_SCHED_BY_CPU;
+	return 1;
+}
+
+
+/**
  * lpfc_sli4_enable_msix - Enable MSI-X interrupt mode to SLI-4 device
  * @phba: pointer to lpfc hba data structure.
  *
@@ -8259,9 +8662,7 @@
 				phba->sli4_hba.msix_entries[index].vector,
 				phba->sli4_hba.msix_entries[index].entry);
 
-	/*
-	 * Assign MSI-X vectors to interrupt handlers
-	 */
+	/* Assign MSI-X vectors to interrupt handlers */
 	for (index = 0; index < vectors; index++) {
 		memset(&phba->sli4_hba.handler_name[index], 0, 16);
 		sprintf((char *)&phba->sli4_hba.handler_name[index],
@@ -8289,6 +8690,8 @@
 				phba->cfg_fcp_io_channel, vectors);
 		phba->cfg_fcp_io_channel = vectors;
 	}
+
+	lpfc_sli4_set_affinity(phba, vectors);
 	return rc;
 
 cfg_fail_out:
@@ -9213,15 +9616,15 @@
 	/* Block all SCSI devices' I/Os on the host */
 	lpfc_scsi_dev_block(phba);
 
+	/* Flush all driver's outstanding SCSI I/Os as we are to reset */
+	lpfc_sli_flush_fcp_rings(phba);
+
 	/* stop all timers */
 	lpfc_stop_hba_timers(phba);
 
 	/* Disable interrupt and pci device */
 	lpfc_sli_disable_intr(phba);
 	pci_disable_device(phba->pcidev);
-
-	/* Flush all driver's outstanding SCSI I/Os as we are to reset */
-	lpfc_sli_flush_fcp_rings(phba);
 }
 
 /**
@@ -9966,6 +10369,9 @@
 	/* Block all SCSI devices' I/Os on the host */
 	lpfc_scsi_dev_block(phba);
 
+	/* Flush all driver's outstanding SCSI I/Os as we are to reset */
+	lpfc_sli_flush_fcp_rings(phba);
+
 	/* stop all timers */
 	lpfc_stop_hba_timers(phba);
 
@@ -9973,9 +10379,6 @@
 	lpfc_sli4_disable_intr(phba);
 	lpfc_sli4_queue_destroy(phba);
 	pci_disable_device(phba->pcidev);
-
-	/* Flush all driver's outstanding SCSI I/Os as we are to reset */
-	lpfc_sli_flush_fcp_rings(phba);
 }
 
 /**
@@ -10535,6 +10938,7 @@
 static int __init
 lpfc_init(void)
 {
+	int cpu;
 	int error = 0;
 
 	printk(LPFC_MODULE_DESC "\n");
@@ -10561,6 +10965,11 @@
 			return -ENOMEM;
 		}
 	}
+
+	/* Initialize in case vector mapping is needed */
+	for (cpu = 0; cpu < LPFC_MAX_CPU; cpu++)
+		lpfc_used_cpu[cpu] = LPFC_VECTOR_MAP_EMPTY;
+
 	error = pci_register_driver(&lpfc_driver);
 	if (error) {
 		fc_release_transport(lpfc_transport_template);
diff --git a/drivers/scsi/lpfc/lpfc_logmsg.h b/drivers/scsi/lpfc/lpfc_logmsg.h
index baf53e6..2a4e5d2 100644
--- a/drivers/scsi/lpfc/lpfc_logmsg.h
+++ b/drivers/scsi/lpfc/lpfc_logmsg.h
@@ -37,6 +37,7 @@
 #define LOG_EVENT	0x00010000	/* CT,TEMP,DUMP, logging */
 #define LOG_FIP		0x00020000	/* FIP events */
 #define LOG_FCP_UNDER	0x00040000	/* FCP underruns errors */
+#define LOG_SCSI_CMD	0x00080000	/* ALL SCSI commands */
 #define LOG_ALL_MSG	0xffffffff	/* LOG all messages */
 
 #define lpfc_printf_vlog(vport, level, mask, fmt, arg...) \
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c
index a7a9fa4..41363db 100644
--- a/drivers/scsi/lpfc/lpfc_mbox.c
+++ b/drivers/scsi/lpfc/lpfc_mbox.c
@@ -2149,18 +2149,21 @@
 
 	/* Only FC supports upd bit */
 	if ((phba->sli4_hba.lnk_info.lnk_tp == LPFC_LNK_TYPE_FC) &&
-	    (vport->fc_flag & FC_VFI_REGISTERED)) {
+	    (vport->fc_flag & FC_VFI_REGISTERED) &&
+	    (!phba->fc_topology_changed)) {
 		bf_set(lpfc_reg_vfi_vp, reg_vfi, 0);
 		bf_set(lpfc_reg_vfi_upd, reg_vfi, 1);
 	}
 	lpfc_printf_vlog(vport, KERN_INFO, LOG_MBOX,
 			"3134 Register VFI, mydid:x%x, fcfi:%d, "
-			" vfi:%d, vpi:%d, fc_pname:%x%x\n",
+			" vfi:%d, vpi:%d, fc_pname:%x%x fc_flag:x%x"
+			" port_state:x%x topology chg:%d\n",
 			vport->fc_myDID,
 			phba->fcf.fcfi,
 			phba->sli4_hba.vfi_ids[vport->vfi],
 			phba->vpi_ids[vport->vpi],
-			reg_vfi->wwn[0], reg_vfi->wwn[1]);
+			reg_vfi->wwn[0], reg_vfi->wwn[1], vport->fc_flag,
+			vport->port_state, phba->fc_topology_changed);
 }
 
 /**
diff --git a/drivers/scsi/lpfc/lpfc_mem.c b/drivers/scsi/lpfc/lpfc_mem.c
index cd86069..812d0cd 100644
--- a/drivers/scsi/lpfc/lpfc_mem.c
+++ b/drivers/scsi/lpfc/lpfc_mem.c
@@ -64,18 +64,26 @@
 	struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool;
 	int i;
 
-	if (phba->sli_rev == LPFC_SLI_REV4)
+	if (phba->sli_rev == LPFC_SLI_REV4) {
+		/* Calculate alignment */
+		if (phba->cfg_sg_dma_buf_size < SLI4_PAGE_SIZE)
+			i = phba->cfg_sg_dma_buf_size;
+		else
+			i = SLI4_PAGE_SIZE;
+
 		phba->lpfc_scsi_dma_buf_pool =
 			pci_pool_create("lpfc_scsi_dma_buf_pool",
 				phba->pcidev,
 				phba->cfg_sg_dma_buf_size,
-				phba->cfg_sg_dma_buf_size,
+				i,
 				0);
-	else
+	} else {
 		phba->lpfc_scsi_dma_buf_pool =
 			pci_pool_create("lpfc_scsi_dma_buf_pool",
 				phba->pcidev, phba->cfg_sg_dma_buf_size,
 				align, 0);
+	}
+
 	if (!phba->lpfc_scsi_dma_buf_pool)
 		goto fail;
 
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index 82f4d35..31e9b92 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -332,9 +332,11 @@
 
 	/* PLOGI chkparm OK */
 	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
-			 "0114 PLOGI chkparm OK Data: x%x x%x x%x x%x\n",
+			 "0114 PLOGI chkparm OK Data: x%x x%x x%x "
+			 "x%x x%x x%x\n",
 			 ndlp->nlp_DID, ndlp->nlp_state, ndlp->nlp_flag,
-			 ndlp->nlp_rpi);
+			 ndlp->nlp_rpi, vport->port_state,
+			 vport->fc_flag);
 
 	if (vport->cfg_fcp_class == 2 && sp->cls2.classValid)
 		ndlp->nlp_fcp_info |= CLASS2;
@@ -574,7 +576,7 @@
 	lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
 
 	/* 1 sec timeout */
-	mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ);
+	mod_timer(&ndlp->nlp_delayfunc, jiffies + msecs_to_jiffies(1000));
 
 	spin_lock_irq(shost->host_lock);
 	ndlp->nlp_flag |= NLP_DELAY_TMO;
@@ -631,7 +633,8 @@
 			 * If there are other active VLinks present,
 			 * re-instantiate the Vlink using FDISC.
 			 */
-			mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ);
+			mod_timer(&ndlp->nlp_delayfunc,
+				  jiffies + msecs_to_jiffies(1000));
 			spin_lock_irq(shost->host_lock);
 			ndlp->nlp_flag |= NLP_DELAY_TMO;
 			spin_unlock_irq(shost->host_lock);
@@ -648,7 +651,8 @@
 		!(ndlp->nlp_type & NLP_FCP_INITIATOR))) ||
 		(ndlp->nlp_state == NLP_STE_ADISC_ISSUE)) {
 		/* Only try to re-login if this is NOT a Fabric Node */
-		mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1);
+		mod_timer(&ndlp->nlp_delayfunc,
+			  jiffies + msecs_to_jiffies(1000 * 1));
 		spin_lock_irq(shost->host_lock);
 		ndlp->nlp_flag |= NLP_DELAY_TMO;
 		spin_unlock_irq(shost->host_lock);
@@ -969,7 +973,7 @@
 	}
 
 	/* Put ndlp in npr state set plogi timer for 1 sec */
-	mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1);
+	mod_timer(&ndlp->nlp_delayfunc, jiffies + msecs_to_jiffies(1000 * 1));
 	spin_lock_irq(shost->host_lock);
 	ndlp->nlp_flag |= NLP_DELAY_TMO;
 	spin_unlock_irq(shost->host_lock);
@@ -1303,7 +1307,8 @@
 	if ((irsp->ulpStatus) ||
 	    (!lpfc_check_adisc(vport, ndlp, &ap->nodeName, &ap->portName))) {
 		/* 1 sec timeout */
-		mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ);
+		mod_timer(&ndlp->nlp_delayfunc,
+			  jiffies + msecs_to_jiffies(1000));
 		spin_lock_irq(shost->host_lock);
 		ndlp->nlp_flag |= NLP_DELAY_TMO;
 		spin_unlock_irq(shost->host_lock);
@@ -1509,7 +1514,8 @@
 		}
 
 		/* Put ndlp in npr state set plogi timer for 1 sec */
-		mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1);
+		mod_timer(&ndlp->nlp_delayfunc,
+			  jiffies + msecs_to_jiffies(1000 * 1));
 		spin_lock_irq(shost->host_lock);
 		ndlp->nlp_flag |= NLP_DELAY_TMO;
 		spin_unlock_irq(shost->host_lock);
@@ -2145,7 +2151,8 @@
 	lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
 
 	if ((ndlp->nlp_flag & NLP_DELAY_TMO) == 0) {
-		mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1);
+		mod_timer(&ndlp->nlp_delayfunc,
+			  jiffies + msecs_to_jiffies(1000 * 1));
 		spin_lock_irq(shost->host_lock);
 		ndlp->nlp_flag |= NLP_DELAY_TMO;
 		ndlp->nlp_flag &= ~NLP_NPR_ADISC;
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 74b8710..8523b278e 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -24,6 +24,8 @@
 #include <linux/export.h>
 #include <linux/delay.h>
 #include <asm/unaligned.h>
+#include <linux/crc-t10dif.h>
+#include <net/checksum.h>
 
 #include <scsi/scsi.h>
 #include <scsi/scsi_device.h>
@@ -48,7 +50,7 @@
 #define LPFC_RESET_WAIT  2
 #define LPFC_ABORT_WAIT  2
 
-int _dump_buf_done;
+int _dump_buf_done = 1;
 
 static char *dif_op_str[] = {
 	"PROT_NORMAL",
@@ -66,6 +68,10 @@
 	__be32 ref_tag;         /* Target LBA or indirect LBA */
 };
 
+#if !defined(SCSI_PROT_GUARD_CHECK) || !defined(SCSI_PROT_REF_CHECK)
+#define scsi_prot_flagged(sc, flg)	sc
+#endif
+
 static void
 lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb);
 static void
@@ -534,7 +540,16 @@
 	dma_addr_t pdma_phys_fcp_rsp;
 	dma_addr_t pdma_phys_bpl;
 	uint16_t iotag;
-	int bcnt;
+	int bcnt, bpl_size;
+
+	bpl_size = phba->cfg_sg_dma_buf_size -
+		(sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));
+
+	lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
+			 "9067 ALLOC %d scsi_bufs: %d (%d + %d + %d)\n",
+			 num_to_alloc, phba->cfg_sg_dma_buf_size,
+			 (int)sizeof(struct fcp_cmnd),
+			 (int)sizeof(struct fcp_rsp), bpl_size);
 
 	for (bcnt = 0; bcnt < num_to_alloc; bcnt++) {
 		psb = kzalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL);
@@ -759,7 +774,7 @@
 			     struct list_head *post_sblist, int sb_count)
 {
 	struct lpfc_scsi_buf *psb, *psb_next;
-	int status;
+	int status, sgl_size;
 	int post_cnt = 0, block_cnt = 0, num_posting = 0, num_posted = 0;
 	dma_addr_t pdma_phys_bpl1;
 	int last_xritag = NO_XRI;
@@ -771,6 +786,9 @@
 	if (sb_count <= 0)
 		return -EINVAL;
 
+	sgl_size = phba->cfg_sg_dma_buf_size -
+		(sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));
+
 	list_for_each_entry_safe(psb, psb_next, post_sblist, list) {
 		list_del_init(&psb->list);
 		block_cnt++;
@@ -803,7 +821,7 @@
 				post_cnt = block_cnt;
 			} else if (block_cnt == 1) {
 				/* last single sgl with non-contiguous xri */
-				if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE)
+				if (sgl_size > SGL_PAGE_SIZE)
 					pdma_phys_bpl1 = psb->dma_phys_bpl +
 								SGL_PAGE_SIZE;
 				else
@@ -885,9 +903,12 @@
 	int num_posted, rc = 0;
 
 	/* get all SCSI buffers need to repost to a local list */
-	spin_lock_irq(&phba->scsi_buf_list_lock);
-	list_splice_init(&phba->lpfc_scsi_buf_list, &post_sblist);
-	spin_unlock_irq(&phba->scsi_buf_list_lock);
+	spin_lock_irq(&phba->scsi_buf_list_get_lock);
+	spin_lock_irq(&phba->scsi_buf_list_put_lock);
+	list_splice_init(&phba->lpfc_scsi_buf_list_get, &post_sblist);
+	list_splice(&phba->lpfc_scsi_buf_list_put, &post_sblist);
+	spin_unlock_irq(&phba->scsi_buf_list_put_lock);
+	spin_unlock_irq(&phba->scsi_buf_list_get_lock);
 
 	/* post the list of scsi buffer sgls to port if available */
 	if (!list_empty(&post_sblist)) {
@@ -923,13 +944,22 @@
 	IOCB_t *iocb;
 	dma_addr_t pdma_phys_fcp_cmd;
 	dma_addr_t pdma_phys_fcp_rsp;
-	dma_addr_t pdma_phys_bpl, pdma_phys_bpl1;
+	dma_addr_t pdma_phys_bpl;
 	uint16_t iotag, lxri = 0;
-	int bcnt, num_posted;
+	int bcnt, num_posted, sgl_size;
 	LIST_HEAD(prep_sblist);
 	LIST_HEAD(post_sblist);
 	LIST_HEAD(scsi_sblist);
 
+	sgl_size = phba->cfg_sg_dma_buf_size -
+		(sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));
+
+	lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
+			 "9068 ALLOC %d scsi_bufs: %d (%d + %d + %d)\n",
+			 num_to_alloc, phba->cfg_sg_dma_buf_size, sgl_size,
+			 (int)sizeof(struct fcp_cmnd),
+			 (int)sizeof(struct fcp_rsp));
+
 	for (bcnt = 0; bcnt < num_to_alloc; bcnt++) {
 		psb = kzalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL);
 		if (!psb)
@@ -948,6 +978,15 @@
 		}
 		memset(psb->data, 0, phba->cfg_sg_dma_buf_size);
 
+		/* Page alignment is CRITICAL, double check to be sure */
+		if (((unsigned long)(psb->data) &
+		    (unsigned long)(SLI4_PAGE_SIZE - 1)) != 0) {
+			pci_pool_free(phba->lpfc_scsi_dma_buf_pool,
+				      psb->data, psb->dma_handle);
+			kfree(psb);
+			break;
+		}
+
 		/* Allocate iotag for psb->cur_iocbq. */
 		iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq);
 		if (iotag == 0) {
@@ -968,17 +1007,14 @@
 		psb->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri];
 		psb->cur_iocbq.iocb_flag |= LPFC_IO_FCP;
 		psb->fcp_bpl = psb->data;
-		psb->fcp_cmnd = (psb->data + phba->cfg_sg_dma_buf_size)
-			- (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));
+		psb->fcp_cmnd = (psb->data + sgl_size);
 		psb->fcp_rsp = (struct fcp_rsp *)((uint8_t *)psb->fcp_cmnd +
 					sizeof(struct fcp_cmnd));
 
 		/* Initialize local short-hand pointers. */
 		sgl = (struct sli4_sge *)psb->fcp_bpl;
 		pdma_phys_bpl = psb->dma_handle;
-		pdma_phys_fcp_cmd =
-			(psb->dma_handle + phba->cfg_sg_dma_buf_size)
-			 - (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));
+		pdma_phys_fcp_cmd = (psb->dma_handle + sgl_size);
 		pdma_phys_fcp_rsp = pdma_phys_fcp_cmd + sizeof(struct fcp_cmnd);
 
 		/*
@@ -1020,17 +1056,13 @@
 		iocb->ulpLe = 1;
 		iocb->ulpClass = CLASS3;
 		psb->cur_iocbq.context1 = psb;
-		if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE)
-			pdma_phys_bpl1 = pdma_phys_bpl + SGL_PAGE_SIZE;
-		else
-			pdma_phys_bpl1 = 0;
 		psb->dma_phys_bpl = pdma_phys_bpl;
 
 		/* add the scsi buffer to a post list */
 		list_add_tail(&psb->list, &post_sblist);
-		spin_lock_irq(&phba->scsi_buf_list_lock);
+		spin_lock_irq(&phba->scsi_buf_list_get_lock);
 		phba->sli4_hba.scsi_xri_cnt++;
-		spin_unlock_irq(&phba->scsi_buf_list_lock);
+		spin_unlock_irq(&phba->scsi_buf_list_get_lock);
 	}
 	lpfc_printf_log(phba, KERN_INFO, LOG_BG,
 			"3021 Allocate %d out of %d requested new SCSI "
@@ -1079,17 +1111,23 @@
 lpfc_get_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
 {
 	struct  lpfc_scsi_buf * lpfc_cmd = NULL;
-	struct list_head *scsi_buf_list = &phba->lpfc_scsi_buf_list;
-	unsigned long iflag = 0;
+	struct list_head *scsi_buf_list_get = &phba->lpfc_scsi_buf_list_get;
+	unsigned long gflag = 0;
+	unsigned long pflag = 0;
 
-	spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
-	list_remove_head(scsi_buf_list, lpfc_cmd, struct lpfc_scsi_buf, list);
-	if (lpfc_cmd) {
-		lpfc_cmd->seg_cnt = 0;
-		lpfc_cmd->nonsg_phys = 0;
-		lpfc_cmd->prot_seg_cnt = 0;
+	spin_lock_irqsave(&phba->scsi_buf_list_get_lock, gflag);
+	list_remove_head(scsi_buf_list_get, lpfc_cmd, struct lpfc_scsi_buf,
+			 list);
+	if (!lpfc_cmd) {
+		spin_lock_irqsave(&phba->scsi_buf_list_put_lock, pflag);
+		list_splice(&phba->lpfc_scsi_buf_list_put,
+			    &phba->lpfc_scsi_buf_list_get);
+		INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put);
+		list_remove_head(scsi_buf_list_get, lpfc_cmd,
+				 struct lpfc_scsi_buf, list);
+		spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, pflag);
 	}
-	spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag);
+	spin_unlock_irqrestore(&phba->scsi_buf_list_get_lock, gflag);
 	return  lpfc_cmd;
 }
 /**
@@ -1107,28 +1145,39 @@
 lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
 {
 	struct lpfc_scsi_buf *lpfc_cmd ;
-	unsigned long iflag = 0;
+	unsigned long gflag = 0;
+	unsigned long pflag = 0;
 	int found = 0;
 
-	spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
-	list_for_each_entry(lpfc_cmd, &phba->lpfc_scsi_buf_list,
-							list) {
+	spin_lock_irqsave(&phba->scsi_buf_list_get_lock, gflag);
+	list_for_each_entry(lpfc_cmd, &phba->lpfc_scsi_buf_list_get, list) {
 		if (lpfc_test_rrq_active(phba, ndlp,
 					 lpfc_cmd->cur_iocbq.sli4_lxritag))
 			continue;
 		list_del(&lpfc_cmd->list);
 		found = 1;
-		lpfc_cmd->seg_cnt = 0;
-		lpfc_cmd->nonsg_phys = 0;
-		lpfc_cmd->prot_seg_cnt = 0;
 		break;
 	}
-	spin_unlock_irqrestore(&phba->scsi_buf_list_lock,
-						 iflag);
+	if (!found) {
+		spin_lock_irqsave(&phba->scsi_buf_list_put_lock, pflag);
+		list_splice(&phba->lpfc_scsi_buf_list_put,
+			    &phba->lpfc_scsi_buf_list_get);
+		INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put);
+		spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, pflag);
+		list_for_each_entry(lpfc_cmd, &phba->lpfc_scsi_buf_list_get,
+				    list) {
+			if (lpfc_test_rrq_active(
+				phba, ndlp, lpfc_cmd->cur_iocbq.sli4_lxritag))
+				continue;
+			list_del(&lpfc_cmd->list);
+			found = 1;
+			break;
+		}
+	}
+	spin_unlock_irqrestore(&phba->scsi_buf_list_get_lock, gflag);
 	if (!found)
 		return NULL;
-	else
-		return  lpfc_cmd;
+	return  lpfc_cmd;
 }
 /**
  * lpfc_get_scsi_buf - Get a scsi buffer from lpfc_scsi_buf_list of the HBA
@@ -1160,10 +1209,15 @@
 {
 	unsigned long iflag = 0;
 
-	spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
+	psb->seg_cnt = 0;
+	psb->nonsg_phys = 0;
+	psb->prot_seg_cnt = 0;
+
+	spin_lock_irqsave(&phba->scsi_buf_list_put_lock, iflag);
 	psb->pCmd = NULL;
-	list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list);
-	spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag);
+	psb->cur_iocbq.iocb_flag = LPFC_IO_FCP;
+	list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list_put);
+	spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, iflag);
 }
 
 /**
@@ -1181,6 +1235,10 @@
 {
 	unsigned long iflag = 0;
 
+	psb->seg_cnt = 0;
+	psb->nonsg_phys = 0;
+	psb->prot_seg_cnt = 0;
+
 	if (psb->exch_busy) {
 		spin_lock_irqsave(&phba->sli4_hba.abts_scsi_buf_list_lock,
 					iflag);
@@ -1190,11 +1248,11 @@
 		spin_unlock_irqrestore(&phba->sli4_hba.abts_scsi_buf_list_lock,
 					iflag);
 	} else {
-
-		spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
 		psb->pCmd = NULL;
-		list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list);
-		spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag);
+		psb->cur_iocbq.iocb_flag = LPFC_IO_FCP;
+		spin_lock_irqsave(&phba->scsi_buf_list_put_lock, iflag);
+		list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list_put);
+		spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, iflag);
 	}
 }
 
@@ -1268,6 +1326,7 @@
 			       "dma_map_sg.  Config %d, seg_cnt %d\n",
 			       __func__, phba->cfg_sg_seg_cnt,
 			       lpfc_cmd->seg_cnt);
+			lpfc_cmd->seg_cnt = 0;
 			scsi_dma_unmap(scsi_cmnd);
 			return 1;
 		}
@@ -2013,9 +2072,21 @@
 	bf_set(pde6_type, pde6, LPFC_PDE6_DESCRIPTOR);
 	bf_set(pde6_optx, pde6, txop);
 	bf_set(pde6_oprx, pde6, rxop);
+
+	/*
+	 * We only need to check the data on READs, for WRITEs
+	 * protection data is automatically generated, not checked.
+	 */
 	if (datadir == DMA_FROM_DEVICE) {
-		bf_set(pde6_ce, pde6, checking);
-		bf_set(pde6_re, pde6, checking);
+		if (scsi_prot_flagged(sc, SCSI_PROT_GUARD_CHECK))
+			bf_set(pde6_ce, pde6, checking);
+		else
+			bf_set(pde6_ce, pde6, 0);
+
+		if (scsi_prot_flagged(sc, SCSI_PROT_REF_CHECK))
+			bf_set(pde6_re, pde6, checking);
+		else
+			bf_set(pde6_re, pde6, 0);
 	}
 	bf_set(pde6_ai, pde6, 1);
 	bf_set(pde6_ae, pde6, 0);
@@ -2145,6 +2216,10 @@
 
 	split_offset = 0;
 	do {
+		/* Check to see if we ran out of space */
+		if (num_bde >= (phba->cfg_total_seg_cnt - 2))
+			return num_bde + 3;
+
 		/* setup PDE5 with what we have */
 		pde5 = (struct lpfc_pde5 *) bpl;
 		memset(pde5, 0, sizeof(struct lpfc_pde5));
@@ -2164,8 +2239,17 @@
 		bf_set(pde6_type, pde6, LPFC_PDE6_DESCRIPTOR);
 		bf_set(pde6_optx, pde6, txop);
 		bf_set(pde6_oprx, pde6, rxop);
-		bf_set(pde6_ce, pde6, checking);
-		bf_set(pde6_re, pde6, checking);
+
+		if (scsi_prot_flagged(sc, SCSI_PROT_GUARD_CHECK))
+			bf_set(pde6_ce, pde6, checking);
+		else
+			bf_set(pde6_ce, pde6, 0);
+
+		if (scsi_prot_flagged(sc, SCSI_PROT_REF_CHECK))
+			bf_set(pde6_re, pde6, checking);
+		else
+			bf_set(pde6_re, pde6, 0);
+
 		bf_set(pde6_ai, pde6, 1);
 		bf_set(pde6_ae, pde6, 0);
 		bf_set(pde6_apptagval, pde6, 0);
@@ -2213,6 +2297,10 @@
 		pgdone = 0;
 		subtotal = 0; /* total bytes processed for current prot grp */
 		while (!pgdone) {
+			/* Check to see if we ran out of space */
+			if (num_bde >= phba->cfg_total_seg_cnt)
+				return num_bde + 1;
+
 			if (!sgde) {
 				lpfc_printf_log(phba, KERN_ERR, LOG_BG,
 					"9065 BLKGRD:%s Invalid data segment\n",
@@ -2324,7 +2412,6 @@
 	struct sli4_sge_diseed *diseed = NULL;
 	dma_addr_t physaddr;
 	int i = 0, num_sge = 0, status;
-	int datadir = sc->sc_data_direction;
 	uint32_t reftag;
 	unsigned blksize;
 	uint8_t txop, rxop;
@@ -2362,13 +2449,26 @@
 	diseed->ref_tag = cpu_to_le32(reftag);
 	diseed->ref_tag_tran = diseed->ref_tag;
 
+	/*
+	 * We only need to check the data on READs, for WRITEs
+	 * protection data is automatically generated, not checked.
+	 */
+	if (sc->sc_data_direction == DMA_FROM_DEVICE) {
+		if (scsi_prot_flagged(sc, SCSI_PROT_GUARD_CHECK))
+			bf_set(lpfc_sli4_sge_dif_ce, diseed, checking);
+		else
+			bf_set(lpfc_sli4_sge_dif_ce, diseed, 0);
+
+		if (scsi_prot_flagged(sc, SCSI_PROT_REF_CHECK))
+			bf_set(lpfc_sli4_sge_dif_re, diseed, checking);
+		else
+			bf_set(lpfc_sli4_sge_dif_re, diseed, 0);
+	}
+
 	/* setup DISEED with the rest of the info */
 	bf_set(lpfc_sli4_sge_dif_optx, diseed, txop);
 	bf_set(lpfc_sli4_sge_dif_oprx, diseed, rxop);
-	if (datadir == DMA_FROM_DEVICE) {
-		bf_set(lpfc_sli4_sge_dif_ce, diseed, checking);
-		bf_set(lpfc_sli4_sge_dif_re, diseed, checking);
-	}
+
 	bf_set(lpfc_sli4_sge_dif_ai, diseed, 1);
 	bf_set(lpfc_sli4_sge_dif_me, diseed, 0);
 
@@ -2497,6 +2597,10 @@
 
 	split_offset = 0;
 	do {
+		/* Check to see if we ran out of space */
+		if (num_sge >= (phba->cfg_total_seg_cnt - 2))
+			return num_sge + 3;
+
 		/* setup DISEED with what we have */
 		diseed = (struct sli4_sge_diseed *) sgl;
 		memset(diseed, 0, sizeof(struct sli4_sge_diseed));
@@ -2506,11 +2610,34 @@
 		diseed->ref_tag = cpu_to_le32(reftag);
 		diseed->ref_tag_tran = diseed->ref_tag;
 
+		if (scsi_prot_flagged(sc, SCSI_PROT_GUARD_CHECK)) {
+			bf_set(lpfc_sli4_sge_dif_ce, diseed, checking);
+
+		} else {
+			bf_set(lpfc_sli4_sge_dif_ce, diseed, 0);
+			/*
+			 * When in this mode, the hardware will replace
+			 * the guard tag from the host with a
+			 * newly generated good CRC for the wire.
+			 * Switch to raw mode here to avoid this
+			 * behavior. What the host sends gets put on the wire.
+			 */
+			if (txop == BG_OP_IN_CRC_OUT_CRC) {
+				txop = BG_OP_RAW_MODE;
+				rxop = BG_OP_RAW_MODE;
+			}
+		}
+
+
+		if (scsi_prot_flagged(sc, SCSI_PROT_REF_CHECK))
+			bf_set(lpfc_sli4_sge_dif_re, diseed, checking);
+		else
+			bf_set(lpfc_sli4_sge_dif_re, diseed, 0);
+
 		/* setup DISEED with the rest of the info */
 		bf_set(lpfc_sli4_sge_dif_optx, diseed, txop);
 		bf_set(lpfc_sli4_sge_dif_oprx, diseed, rxop);
-		bf_set(lpfc_sli4_sge_dif_ce, diseed, checking);
-		bf_set(lpfc_sli4_sge_dif_re, diseed, checking);
+
 		bf_set(lpfc_sli4_sge_dif_ai, diseed, 1);
 		bf_set(lpfc_sli4_sge_dif_me, diseed, 0);
 
@@ -2556,6 +2683,10 @@
 		pgdone = 0;
 		subtotal = 0; /* total bytes processed for current prot grp */
 		while (!pgdone) {
+			/* Check to see if we ran out of space */
+			if (num_sge >= phba->cfg_total_seg_cnt)
+				return num_sge + 1;
+
 			if (!sgde) {
 				lpfc_printf_log(phba, KERN_ERR, LOG_BG,
 					"9086 BLKGRD:%s Invalid data segment\n",
@@ -2670,6 +2801,47 @@
 }
 
 /**
+ * lpfc_bg_scsi_adjust_dl - Adjust SCSI data length for BlockGuard
+ * @phba: The Hba for which this call is being executed.
+ * @lpfc_cmd: The scsi buffer which is going to be adjusted.
+ *
+ * Adjust the data length to account for how much data
+ * is actually on the wire.
+ *
+ * returns the adjusted data length
+ **/
+static int
+lpfc_bg_scsi_adjust_dl(struct lpfc_hba *phba,
+		       struct lpfc_scsi_buf *lpfc_cmd)
+{
+	struct scsi_cmnd *sc = lpfc_cmd->pCmd;
+	int fcpdl;
+
+	fcpdl = scsi_bufflen(sc);
+
+	/* Check if there is protection data on the wire */
+	if (sc->sc_data_direction == DMA_FROM_DEVICE) {
+		/* Read */
+		if (scsi_get_prot_op(sc) ==  SCSI_PROT_READ_INSERT)
+			return fcpdl;
+
+	} else {
+		/* Write */
+		if (scsi_get_prot_op(sc) ==  SCSI_PROT_WRITE_STRIP)
+			return fcpdl;
+	}
+
+	/*
+	 * If we are in DIF Type 1 mode every data block has a 8 byte
+	 * DIF (trailer) attached to it. Must ajust FCP data length.
+	 */
+	if (scsi_prot_flagged(sc, SCSI_PROT_TRANSFER_PI))
+		fcpdl += (fcpdl / lpfc_cmd_blksize(sc)) * 8;
+
+	return fcpdl;
+}
+
+/**
  * lpfc_bg_scsi_prep_dma_buf_s3 - DMA mapping for scsi buffer to SLI3 IF spec
  * @phba: The Hba for which this call is being executed.
  * @lpfc_cmd: The scsi buffer which is going to be prep'ed.
@@ -2689,8 +2861,7 @@
 	uint32_t num_bde = 0;
 	int datasegcnt, protsegcnt, datadir = scsi_cmnd->sc_data_direction;
 	int prot_group_type = 0;
-	int diflen, fcpdl;
-	unsigned blksize;
+	int fcpdl;
 
 	/*
 	 * Start the lpfc command prep by bumping the bpl beyond fcp_cmnd
@@ -2711,28 +2882,28 @@
 			return 1;
 
 		lpfc_cmd->seg_cnt = datasegcnt;
-		if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
-			lpfc_printf_log(phba, KERN_ERR, LOG_BG,
-					"9067 BLKGRD: %s: Too many sg segments"
-					" from dma_map_sg.  Config %d, seg_cnt"
-					" %d\n",
-					__func__, phba->cfg_sg_seg_cnt,
-					lpfc_cmd->seg_cnt);
-			scsi_dma_unmap(scsi_cmnd);
-			return 1;
-		}
+
+		/* First check if data segment count from SCSI Layer is good */
+		if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt)
+			goto err;
 
 		prot_group_type = lpfc_prot_group_type(phba, scsi_cmnd);
 
 		switch (prot_group_type) {
 		case LPFC_PG_TYPE_NO_DIF:
+
+			/* Here we need to add a PDE5 and PDE6 to the count */
+			if ((lpfc_cmd->seg_cnt + 2) > phba->cfg_total_seg_cnt)
+				goto err;
+
 			num_bde = lpfc_bg_setup_bpl(phba, scsi_cmnd, bpl,
 					datasegcnt);
 			/* we should have 2 or more entries in buffer list */
 			if (num_bde < 2)
 				goto err;
 			break;
-		case LPFC_PG_TYPE_DIF_BUF:{
+
+		case LPFC_PG_TYPE_DIF_BUF:
 			/*
 			 * This type indicates that protection buffers are
 			 * passed to the driver, so that needs to be prepared
@@ -2747,31 +2918,28 @@
 			}
 
 			lpfc_cmd->prot_seg_cnt = protsegcnt;
-			if (lpfc_cmd->prot_seg_cnt
-			    > phba->cfg_prot_sg_seg_cnt) {
-				lpfc_printf_log(phba, KERN_ERR, LOG_BG,
-					"9068 BLKGRD: %s: Too many prot sg "
-					"segments from dma_map_sg.  Config %d,"
-						"prot_seg_cnt %d\n", __func__,
-						phba->cfg_prot_sg_seg_cnt,
-						lpfc_cmd->prot_seg_cnt);
-				dma_unmap_sg(&phba->pcidev->dev,
-					     scsi_prot_sglist(scsi_cmnd),
-					     scsi_prot_sg_count(scsi_cmnd),
-					     datadir);
-				scsi_dma_unmap(scsi_cmnd);
-				return 1;
-			}
+
+			/*
+			 * There is a minimun of 4 BPLs used for every
+			 * protection data segment.
+			 */
+			if ((lpfc_cmd->prot_seg_cnt * 4) >
+			    (phba->cfg_total_seg_cnt - 2))
+				goto err;
 
 			num_bde = lpfc_bg_setup_bpl_prot(phba, scsi_cmnd, bpl,
 					datasegcnt, protsegcnt);
 			/* we should have 3 or more entries in buffer list */
-			if (num_bde < 3)
+			if ((num_bde < 3) ||
+			    (num_bde > phba->cfg_total_seg_cnt))
 				goto err;
 			break;
-		}
+
 		case LPFC_PG_TYPE_INVALID:
 		default:
+			scsi_dma_unmap(scsi_cmnd);
+			lpfc_cmd->seg_cnt = 0;
+
 			lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
 					"9022 Unexpected protection group %i\n",
 					prot_group_type);
@@ -2790,18 +2958,7 @@
 	iocb_cmd->ulpBdeCount = 1;
 	iocb_cmd->ulpLe = 1;
 
-	fcpdl = scsi_bufflen(scsi_cmnd);
-
-	if (scsi_get_prot_type(scsi_cmnd) == SCSI_PROT_DIF_TYPE1) {
-		/*
-		 * We are in DIF Type 1 mode
-		 * Every data block has a 8 byte DIF (trailer)
-		 * attached to it.  Must ajust FCP data length
-		 */
-		blksize = lpfc_cmd_blksize(scsi_cmnd);
-		diflen = (fcpdl / blksize) * 8;
-		fcpdl += diflen;
-	}
+	fcpdl = lpfc_bg_scsi_adjust_dl(phba, lpfc_cmd);
 	fcp_cmnd->fcpDl = be32_to_cpu(fcpdl);
 
 	/*
@@ -2812,14 +2969,234 @@
 
 	return 0;
 err:
+	if (lpfc_cmd->seg_cnt)
+		scsi_dma_unmap(scsi_cmnd);
+	if (lpfc_cmd->prot_seg_cnt)
+		dma_unmap_sg(&phba->pcidev->dev, scsi_prot_sglist(scsi_cmnd),
+			     scsi_prot_sg_count(scsi_cmnd),
+			     scsi_cmnd->sc_data_direction);
+
 	lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
-			"9023 Could not setup all needed BDE's"
-			"prot_group_type=%d, num_bde=%d\n",
+			"9023 Cannot setup S/G List for HBA"
+			"IO segs %d/%d BPL %d SCSI %d: %d %d\n",
+			lpfc_cmd->seg_cnt, lpfc_cmd->prot_seg_cnt,
+			phba->cfg_total_seg_cnt, phba->cfg_sg_seg_cnt,
 			prot_group_type, num_bde);
+
+	lpfc_cmd->seg_cnt = 0;
+	lpfc_cmd->prot_seg_cnt = 0;
 	return 1;
 }
 
 /*
+ * This function calcuates the T10 DIF guard tag
+ * on the specified data using a CRC algorithmn
+ * using crc_t10dif.
+ */
+uint16_t
+lpfc_bg_crc(uint8_t *data, int count)
+{
+	uint16_t crc = 0;
+	uint16_t x;
+
+	crc = crc_t10dif(data, count);
+	x = cpu_to_be16(crc);
+	return x;
+}
+
+/*
+ * This function calcuates the T10 DIF guard tag
+ * on the specified data using a CSUM algorithmn
+ * using ip_compute_csum.
+ */
+uint16_t
+lpfc_bg_csum(uint8_t *data, int count)
+{
+	uint16_t ret;
+
+	ret = ip_compute_csum(data, count);
+	return ret;
+}
+
+/*
+ * This function examines the protection data to try to determine
+ * what type of T10-DIF error occurred.
+ */
+void
+lpfc_calc_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
+{
+	struct scatterlist *sgpe; /* s/g prot entry */
+	struct scatterlist *sgde; /* s/g data entry */
+	struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
+	struct scsi_dif_tuple *src = NULL;
+	uint8_t *data_src = NULL;
+	uint16_t guard_tag, guard_type;
+	uint16_t start_app_tag, app_tag;
+	uint32_t start_ref_tag, ref_tag;
+	int prot, protsegcnt;
+	int err_type, len, data_len;
+	int chk_ref, chk_app, chk_guard;
+	uint16_t sum;
+	unsigned blksize;
+
+	err_type = BGS_GUARD_ERR_MASK;
+	sum = 0;
+	guard_tag = 0;
+
+	/* First check to see if there is protection data to examine */
+	prot = scsi_get_prot_op(cmd);
+	if ((prot == SCSI_PROT_READ_STRIP) ||
+	    (prot == SCSI_PROT_WRITE_INSERT) ||
+	    (prot == SCSI_PROT_NORMAL))
+		goto out;
+
+	/* Currently the driver just supports ref_tag and guard_tag checking */
+	chk_ref = 1;
+	chk_app = 0;
+	chk_guard = 0;
+
+	/* Setup a ptr to the protection data provided by the SCSI host */
+	sgpe = scsi_prot_sglist(cmd);
+	protsegcnt = lpfc_cmd->prot_seg_cnt;
+
+	if (sgpe && protsegcnt) {
+
+		/*
+		 * We will only try to verify guard tag if the segment
+		 * data length is a multiple of the blksize.
+		 */
+		sgde = scsi_sglist(cmd);
+		blksize = lpfc_cmd_blksize(cmd);
+		data_src = (uint8_t *)sg_virt(sgde);
+		data_len = sgde->length;
+		if ((data_len & (blksize - 1)) == 0)
+			chk_guard = 1;
+		guard_type = scsi_host_get_guard(cmd->device->host);
+
+		start_ref_tag = (uint32_t)scsi_get_lba(cmd); /* Truncate LBA */
+		start_app_tag = src->app_tag;
+		src = (struct scsi_dif_tuple *)sg_virt(sgpe);
+		len = sgpe->length;
+		while (src && protsegcnt) {
+			while (len) {
+
+				/*
+				 * First check to see if a protection data
+				 * check is valid
+				 */
+				if ((src->ref_tag == 0xffffffff) ||
+				    (src->app_tag == 0xffff)) {
+					start_ref_tag++;
+					goto skipit;
+				}
+
+				/* App Tag checking */
+				app_tag = src->app_tag;
+				if (chk_app && (app_tag != start_app_tag)) {
+					err_type = BGS_APPTAG_ERR_MASK;
+					goto out;
+				}
+
+				/* Reference Tag checking */
+				ref_tag = be32_to_cpu(src->ref_tag);
+				if (chk_ref && (ref_tag != start_ref_tag)) {
+					err_type = BGS_REFTAG_ERR_MASK;
+					goto out;
+				}
+				start_ref_tag++;
+
+				/* Guard Tag checking */
+				if (chk_guard) {
+					guard_tag = src->guard_tag;
+					if (guard_type == SHOST_DIX_GUARD_IP)
+						sum = lpfc_bg_csum(data_src,
+								   blksize);
+					else
+						sum = lpfc_bg_crc(data_src,
+								  blksize);
+					if ((guard_tag != sum)) {
+						err_type = BGS_GUARD_ERR_MASK;
+						goto out;
+					}
+				}
+skipit:
+				len -= sizeof(struct scsi_dif_tuple);
+				if (len < 0)
+					len = 0;
+				src++;
+
+				data_src += blksize;
+				data_len -= blksize;
+
+				/*
+				 * Are we at the end of the Data segment?
+				 * The data segment is only used for Guard
+				 * tag checking.
+				 */
+				if (chk_guard && (data_len == 0)) {
+					chk_guard = 0;
+					sgde = sg_next(sgde);
+					if (!sgde)
+						goto out;
+
+					data_src = (uint8_t *)sg_virt(sgde);
+					data_len = sgde->length;
+					if ((data_len & (blksize - 1)) == 0)
+						chk_guard = 1;
+				}
+			}
+
+			/* Goto the next Protection data segment */
+			sgpe = sg_next(sgpe);
+			if (sgpe) {
+				src = (struct scsi_dif_tuple *)sg_virt(sgpe);
+				len = sgpe->length;
+			} else {
+				src = NULL;
+			}
+			protsegcnt--;
+		}
+	}
+out:
+	if (err_type == BGS_GUARD_ERR_MASK) {
+		scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
+					0x10, 0x1);
+		cmd->result = DRIVER_SENSE << 24
+			| ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
+		phba->bg_guard_err_cnt++;
+		lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
+				"9069 BLKGRD: LBA %lx grd_tag error %x != %x\n",
+				(unsigned long)scsi_get_lba(cmd),
+				sum, guard_tag);
+
+	} else if (err_type == BGS_REFTAG_ERR_MASK) {
+		scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
+					0x10, 0x3);
+		cmd->result = DRIVER_SENSE << 24
+			| ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
+
+		phba->bg_reftag_err_cnt++;
+		lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
+				"9066 BLKGRD: LBA %lx ref_tag error %x != %x\n",
+				(unsigned long)scsi_get_lba(cmd),
+				ref_tag, start_ref_tag);
+
+	} else if (err_type == BGS_APPTAG_ERR_MASK) {
+		scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
+					0x10, 0x2);
+		cmd->result = DRIVER_SENSE << 24
+			| ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
+
+		phba->bg_apptag_err_cnt++;
+		lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
+				"9041 BLKGRD: LBA %lx app_tag error %x != %x\n",
+				(unsigned long)scsi_get_lba(cmd),
+				app_tag, start_app_tag);
+	}
+}
+
+
+/*
  * This function checks for BlockGuard errors detected by
  * the HBA.  In case of errors, the ASC/ASCQ fields in the
  * sense buffer will be set accordingly, paired with
@@ -2842,12 +3219,6 @@
 	uint32_t bgstat = bgf->bgstat;
 	uint64_t failing_sector = 0;
 
-	lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9069 BLKGRD: BG ERROR in cmd"
-			" 0x%x lba 0x%llx blk cnt 0x%x "
-			"bgstat=0x%x bghm=0x%x\n",
-			cmd->cmnd[0], (unsigned long long)scsi_get_lba(cmd),
-			blk_rq_sectors(cmd->request), bgstat, bghm);
-
 	spin_lock(&_dump_buf_lock);
 	if (!_dump_buf_done) {
 		lpfc_printf_log(phba, KERN_ERR, LOG_BG,  "9070 BLKGRD: Saving"
@@ -2870,18 +3241,24 @@
 
 	if (lpfc_bgs_get_invalid_prof(bgstat)) {
 		cmd->result = ScsiResult(DID_ERROR, 0);
-		lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9072 BLKGRD: Invalid"
-			" BlockGuard profile. bgstat:0x%x\n",
-			bgstat);
+		lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
+				"9072 BLKGRD: Invalid BG Profile in cmd"
+				" 0x%x lba 0x%llx blk cnt 0x%x "
+				"bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
+				(unsigned long long)scsi_get_lba(cmd),
+				blk_rq_sectors(cmd->request), bgstat, bghm);
 		ret = (-1);
 		goto out;
 	}
 
 	if (lpfc_bgs_get_uninit_dif_block(bgstat)) {
 		cmd->result = ScsiResult(DID_ERROR, 0);
-		lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9073 BLKGRD: "
-				"Invalid BlockGuard DIF Block. bgstat:0x%x\n",
-				bgstat);
+		lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
+				"9073 BLKGRD: Invalid BG PDIF Block in cmd"
+				" 0x%x lba 0x%llx blk cnt 0x%x "
+				"bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
+				(unsigned long long)scsi_get_lba(cmd),
+				blk_rq_sectors(cmd->request), bgstat, bghm);
 		ret = (-1);
 		goto out;
 	}
@@ -2894,8 +3271,12 @@
 		cmd->result = DRIVER_SENSE << 24
 			| ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
 		phba->bg_guard_err_cnt++;
-		lpfc_printf_log(phba, KERN_ERR, LOG_BG,
-			"9055 BLKGRD: guard_tag error\n");
+		lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
+				"9055 BLKGRD: Guard Tag error in cmd"
+				" 0x%x lba 0x%llx blk cnt 0x%x "
+				"bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
+				(unsigned long long)scsi_get_lba(cmd),
+				blk_rq_sectors(cmd->request), bgstat, bghm);
 	}
 
 	if (lpfc_bgs_get_reftag_err(bgstat)) {
@@ -2907,8 +3288,12 @@
 			| ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
 
 		phba->bg_reftag_err_cnt++;
-		lpfc_printf_log(phba, KERN_ERR, LOG_BG,
-			"9056 BLKGRD: ref_tag error\n");
+		lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
+				"9056 BLKGRD: Ref Tag error in cmd"
+				" 0x%x lba 0x%llx blk cnt 0x%x "
+				"bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
+				(unsigned long long)scsi_get_lba(cmd),
+				blk_rq_sectors(cmd->request), bgstat, bghm);
 	}
 
 	if (lpfc_bgs_get_apptag_err(bgstat)) {
@@ -2920,8 +3305,12 @@
 			| ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
 
 		phba->bg_apptag_err_cnt++;
-		lpfc_printf_log(phba, KERN_ERR, LOG_BG,
-			"9061 BLKGRD: app_tag error\n");
+		lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
+				"9061 BLKGRD: App Tag error in cmd"
+				" 0x%x lba 0x%llx blk cnt 0x%x "
+				"bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
+				(unsigned long long)scsi_get_lba(cmd),
+				blk_rq_sectors(cmd->request), bgstat, bghm);
 	}
 
 	if (lpfc_bgs_get_hi_water_mark_present(bgstat)) {
@@ -2960,11 +3349,16 @@
 
 	if (!ret) {
 		/* No error was reported - problem in FW? */
-		cmd->result = ScsiResult(DID_ERROR, 0);
-		lpfc_printf_log(phba, KERN_ERR, LOG_BG,
-			"9057 BLKGRD: Unknown error reported!\n");
-	}
+		lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
+				"9057 BLKGRD: Unknown error in cmd"
+				" 0x%x lba 0x%llx blk cnt 0x%x "
+				"bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
+				(unsigned long long)scsi_get_lba(cmd),
+				blk_rq_sectors(cmd->request), bgstat, bghm);
 
+		/* Calcuate what type of error it was */
+		lpfc_calc_bg_err(phba, lpfc_cmd);
+	}
 out:
 	return ret;
 }
@@ -3028,6 +3422,7 @@
 				"dma_map_sg.  Config %d, seg_cnt %d\n",
 				__func__, phba->cfg_sg_seg_cnt,
 			       lpfc_cmd->seg_cnt);
+			lpfc_cmd->seg_cnt = 0;
 			scsi_dma_unmap(scsi_cmnd);
 			return 1;
 		}
@@ -3094,45 +3489,6 @@
 }
 
 /**
- * lpfc_bg_scsi_adjust_dl - Adjust SCSI data length for BlockGuard
- * @phba: The Hba for which this call is being executed.
- * @lpfc_cmd: The scsi buffer which is going to be adjusted.
- *
- * Adjust the data length to account for how much data
- * is actually on the wire.
- *
- * returns the adjusted data length
- **/
-static int
-lpfc_bg_scsi_adjust_dl(struct lpfc_hba *phba,
-		struct lpfc_scsi_buf *lpfc_cmd)
-{
-	struct scsi_cmnd *sc = lpfc_cmd->pCmd;
-	int diflen, fcpdl;
-	unsigned blksize;
-
-	fcpdl = scsi_bufflen(sc);
-
-	/* Check if there is protection data on the wire */
-	if (sc->sc_data_direction == DMA_FROM_DEVICE) {
-		/* Read */
-		if (scsi_get_prot_op(sc) ==  SCSI_PROT_READ_INSERT)
-			return fcpdl;
-
-	} else {
-		/* Write */
-		if (scsi_get_prot_op(sc) ==  SCSI_PROT_WRITE_STRIP)
-			return fcpdl;
-	}
-
-	/* If protection data on the wire, adjust the count accordingly */
-	blksize = lpfc_cmd_blksize(sc);
-	diflen = (fcpdl / blksize) * 8;
-	fcpdl += diflen;
-	return fcpdl;
-}
-
-/**
  * lpfc_bg_scsi_prep_dma_buf_s4 - DMA mapping for scsi buffer to SLI4 IF spec
  * @phba: The Hba for which this call is being executed.
  * @lpfc_cmd: The scsi buffer which is going to be mapped.
@@ -3149,14 +3505,14 @@
 	struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
 	struct sli4_sge *sgl = (struct sli4_sge *)(lpfc_cmd->fcp_bpl);
 	IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
-	uint32_t num_bde = 0;
+	uint32_t num_sge = 0;
 	int datasegcnt, protsegcnt, datadir = scsi_cmnd->sc_data_direction;
 	int prot_group_type = 0;
 	int fcpdl;
 
 	/*
 	 * Start the lpfc command prep by bumping the sgl beyond fcp_cmnd
-	 *  fcp_rsp regions to the first data bde entry
+	 *  fcp_rsp regions to the first data sge entry
 	 */
 	if (scsi_sg_count(scsi_cmnd)) {
 		/*
@@ -3179,28 +3535,28 @@
 
 		sgl += 1;
 		lpfc_cmd->seg_cnt = datasegcnt;
-		if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
-			lpfc_printf_log(phba, KERN_ERR, LOG_BG,
-					"9087 BLKGRD: %s: Too many sg segments"
-					" from dma_map_sg.  Config %d, seg_cnt"
-					" %d\n",
-					__func__, phba->cfg_sg_seg_cnt,
-					lpfc_cmd->seg_cnt);
-			scsi_dma_unmap(scsi_cmnd);
-			return 1;
-		}
+
+		/* First check if data segment count from SCSI Layer is good */
+		if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt)
+			goto err;
 
 		prot_group_type = lpfc_prot_group_type(phba, scsi_cmnd);
 
 		switch (prot_group_type) {
 		case LPFC_PG_TYPE_NO_DIF:
-			num_bde = lpfc_bg_setup_sgl(phba, scsi_cmnd, sgl,
+			/* Here we need to add a DISEED to the count */
+			if ((lpfc_cmd->seg_cnt + 1) > phba->cfg_total_seg_cnt)
+				goto err;
+
+			num_sge = lpfc_bg_setup_sgl(phba, scsi_cmnd, sgl,
 					datasegcnt);
+
 			/* we should have 2 or more entries in buffer list */
-			if (num_bde < 2)
+			if (num_sge < 2)
 				goto err;
 			break;
-		case LPFC_PG_TYPE_DIF_BUF:{
+
+		case LPFC_PG_TYPE_DIF_BUF:
 			/*
 			 * This type indicates that protection buffers are
 			 * passed to the driver, so that needs to be prepared
@@ -3215,31 +3571,28 @@
 			}
 
 			lpfc_cmd->prot_seg_cnt = protsegcnt;
-			if (lpfc_cmd->prot_seg_cnt
-			    > phba->cfg_prot_sg_seg_cnt) {
-				lpfc_printf_log(phba, KERN_ERR, LOG_BG,
-					"9088 BLKGRD: %s: Too many prot sg "
-					"segments from dma_map_sg.  Config %d,"
-						"prot_seg_cnt %d\n", __func__,
-						phba->cfg_prot_sg_seg_cnt,
-						lpfc_cmd->prot_seg_cnt);
-				dma_unmap_sg(&phba->pcidev->dev,
-					     scsi_prot_sglist(scsi_cmnd),
-					     scsi_prot_sg_count(scsi_cmnd),
-					     datadir);
-				scsi_dma_unmap(scsi_cmnd);
-				return 1;
-			}
+			/*
+			 * There is a minimun of 3 SGEs used for every
+			 * protection data segment.
+			 */
+			if ((lpfc_cmd->prot_seg_cnt * 3) >
+			    (phba->cfg_total_seg_cnt - 2))
+				goto err;
 
-			num_bde = lpfc_bg_setup_sgl_prot(phba, scsi_cmnd, sgl,
+			num_sge = lpfc_bg_setup_sgl_prot(phba, scsi_cmnd, sgl,
 					datasegcnt, protsegcnt);
+
 			/* we should have 3 or more entries in buffer list */
-			if (num_bde < 3)
+			if ((num_sge < 3) ||
+			    (num_sge > phba->cfg_total_seg_cnt))
 				goto err;
 			break;
-		}
+
 		case LPFC_PG_TYPE_INVALID:
 		default:
+			scsi_dma_unmap(scsi_cmnd);
+			lpfc_cmd->seg_cnt = 0;
+
 			lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
 					"9083 Unexpected protection group %i\n",
 					prot_group_type);
@@ -3263,7 +3616,6 @@
 	}
 
 	fcpdl = lpfc_bg_scsi_adjust_dl(phba, lpfc_cmd);
-
 	fcp_cmnd->fcpDl = be32_to_cpu(fcpdl);
 
 	/*
@@ -3274,10 +3626,22 @@
 
 	return 0;
 err:
+	if (lpfc_cmd->seg_cnt)
+		scsi_dma_unmap(scsi_cmnd);
+	if (lpfc_cmd->prot_seg_cnt)
+		dma_unmap_sg(&phba->pcidev->dev, scsi_prot_sglist(scsi_cmnd),
+			     scsi_prot_sg_count(scsi_cmnd),
+			     scsi_cmnd->sc_data_direction);
+
 	lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
-			"9084 Could not setup all needed BDE's"
-			"prot_group_type=%d, num_bde=%d\n",
-			prot_group_type, num_bde);
+			"9084 Cannot setup S/G List for HBA"
+			"IO segs %d/%d SGL %d SCSI %d: %d %d\n",
+			lpfc_cmd->seg_cnt, lpfc_cmd->prot_seg_cnt,
+			phba->cfg_total_seg_cnt, phba->cfg_sg_seg_cnt,
+			prot_group_type, num_sge);
+
+	lpfc_cmd->seg_cnt = 0;
+	lpfc_cmd->prot_seg_cnt = 0;
 	return 1;
 }
 
@@ -4357,7 +4721,8 @@
 
 	if (scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) {
 		if (vport->phba->cfg_enable_bg) {
-			lpfc_printf_vlog(vport, KERN_INFO, LOG_BG,
+			lpfc_printf_vlog(vport,
+					 KERN_INFO, LOG_SCSI_CMD,
 					 "9033 BLKGRD: rcvd %s cmd:x%x "
 					 "sector x%llx cnt %u pt %x\n",
 					 dif_op_str[scsi_get_prot_op(cmnd)],
@@ -4369,7 +4734,8 @@
 		err = lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd);
 	} else {
 		if (vport->phba->cfg_enable_bg) {
-			lpfc_printf_vlog(vport, KERN_INFO, LOG_BG,
+			lpfc_printf_vlog(vport,
+					 KERN_INFO, LOG_SCSI_CMD,
 					 "9038 BLKGRD: rcvd PROT_NORMAL cmd: "
 					 "x%x sector x%llx cnt %u pt %x\n",
 					 cmnd->cmnd[0],
@@ -4542,7 +4908,7 @@
 	/* Wait for abort to complete */
 	wait_event_timeout(waitq,
 			  (lpfc_cmd->pCmd != cmnd),
-			   (2*vport->cfg_devloss_tmo*HZ));
+			   msecs_to_jiffies(2*vport->cfg_devloss_tmo*1000));
 	lpfc_cmd->waitq = NULL;
 
 	if (lpfc_cmd->pCmd == cmnd) {
@@ -5012,16 +5378,24 @@
 	struct lpfc_hba *phba = vport->phba;
 	int rc, ret = SUCCESS;
 
+	lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
+			 "3172 SCSI layer issued Host Reset Data:\n");
+
 	lpfc_offline_prep(phba, LPFC_MBX_WAIT);
 	lpfc_offline(phba);
 	rc = lpfc_sli_brdrestart(phba);
 	if (rc)
 		ret = FAILED;
-	lpfc_online(phba);
+	rc = lpfc_online(phba);
+	if (rc)
+		ret = FAILED;
 	lpfc_unblock_mgmt_io(phba);
 
-	lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
-			"3172 SCSI layer issued Host Reset Data: x%x\n", ret);
+	if (ret == FAILED) {
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
+				 "3323 Failed host reset, bring it offline\n");
+		lpfc_sli4_offline_eratt(phba);
+	}
 	return ret;
 }
 
@@ -5088,11 +5462,11 @@
 	}
 	num_allocated = lpfc_new_scsi_buf(vport, num_to_alloc);
 	if (num_to_alloc != num_allocated) {
-			lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
-				 "0708 Allocation request of %d "
-				 "command buffers did not succeed.  "
-				 "Allocated %d buffers.\n",
-				 num_to_alloc, num_allocated);
+			lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
+					 "0708 Allocation request of %d "
+					 "command buffers did not succeed.  "
+					 "Allocated %d buffers.\n",
+					 num_to_alloc, num_allocated);
 	}
 	if (num_allocated > 0)
 		phba->total_scsi_bufs += num_allocated;
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 35dd17e..572579f 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -667,7 +667,7 @@
 
 	spin_lock_irqsave(&phba->hbalock, iflags);
 	phba->hba_flag &= ~HBA_RRQ_ACTIVE;
-	next_time = jiffies + HZ * (phba->fc_ratov + 1);
+	next_time = jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov + 1));
 	list_for_each_entry_safe(rrq, nextrrq,
 				 &phba->active_rrq_list, list) {
 		if (time_after(jiffies, rrq->rrq_stop_time))
@@ -782,7 +782,7 @@
 		return;
 	spin_lock_irqsave(&phba->hbalock, iflags);
 	phba->hba_flag &= ~HBA_RRQ_ACTIVE;
-	next_time = jiffies + HZ * (phba->fc_ratov * 2);
+	next_time = jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov * 2));
 	list_splice_init(&phba->active_rrq_list, &rrq_list);
 	spin_unlock_irqrestore(&phba->hbalock, iflags);
 
@@ -878,7 +878,8 @@
 	else
 		rrq->send_rrq = 0;
 	rrq->xritag = xritag;
-	rrq->rrq_stop_time = jiffies + HZ * (phba->fc_ratov + 1);
+	rrq->rrq_stop_time = jiffies +
+				msecs_to_jiffies(1000 * (phba->fc_ratov + 1));
 	rrq->ndlp = ndlp;
 	rrq->nlp_DID = ndlp->nlp_DID;
 	rrq->vport = ndlp->vport;
@@ -926,8 +927,7 @@
 	} else  if ((piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) &&
 			!(piocbq->iocb_flag & LPFC_IO_LIBDFC))
 		ndlp = piocbq->context_un.ndlp;
-	else  if ((piocbq->iocb.ulpCommand == CMD_ELS_REQUEST64_CR) &&
-			(piocbq->iocb_flag & LPFC_IO_LIBDFC))
+	else  if (piocbq->iocb_flag & LPFC_IO_LIBDFC)
 		ndlp = piocbq->context_un.ndlp;
 	else
 		ndlp = piocbq->context1;
@@ -1339,7 +1339,8 @@
 			BUG();
 		else
 			mod_timer(&piocb->vport->els_tmofunc,
-				  jiffies + HZ * (phba->fc_ratov << 1));
+				jiffies +
+				msecs_to_jiffies(1000 * (phba->fc_ratov << 1)));
 	}
 
 
@@ -2340,7 +2341,8 @@
 		/* Mailbox cmd <cmd> Cmpl <cmpl> */
 		lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
 				"(%d):0307 Mailbox cmd x%x (x%x/x%x) Cmpl x%p "
-				"Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x\n",
+				"Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x "
+				"x%x x%x x%x\n",
 				pmb->vport ? pmb->vport->vpi : 0,
 				pmbox->mbxCommand,
 				lpfc_sli_config_mbox_subsys_get(phba, pmb),
@@ -2354,7 +2356,10 @@
 				pmbox->un.varWords[4],
 				pmbox->un.varWords[5],
 				pmbox->un.varWords[6],
-				pmbox->un.varWords[7]);
+				pmbox->un.varWords[7],
+				pmbox->un.varWords[8],
+				pmbox->un.varWords[9],
+				pmbox->un.varWords[10]);
 
 		if (pmb->mbox_cmpl)
 			pmb->mbox_cmpl(phba,pmb);
@@ -2908,8 +2913,9 @@
 		lpfc_worker_wake_up(phba);
 	else
 		/* Restart the timer for next eratt poll */
-		mod_timer(&phba->eratt_poll, jiffies +
-					HZ * LPFC_ERATT_POLL_INTERVAL);
+		mod_timer(&phba->eratt_poll,
+			  jiffies +
+			  msecs_to_jiffies(1000 * LPFC_ERATT_POLL_INTERVAL));
 	return;
 }
 
@@ -5511,6 +5517,7 @@
 			list_del_init(&rsrc_blk->list);
 			kfree(rsrc_blk);
 		}
+		phba->sli4_hba.max_cfg_param.vpi_used = 0;
 		break;
 	case LPFC_RSC_TYPE_FCOE_XRI:
 		kfree(phba->sli4_hba.xri_bmask);
@@ -5811,6 +5818,7 @@
 		lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI);
 	} else {
 		kfree(phba->vpi_bmask);
+		phba->sli4_hba.max_cfg_param.vpi_used = 0;
 		kfree(phba->vpi_ids);
 		bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
 		kfree(phba->sli4_hba.xri_bmask);
@@ -5992,7 +6000,7 @@
 	struct lpfc_sglq *sglq_entry = NULL;
 	struct lpfc_sglq *sglq_entry_next = NULL;
 	struct lpfc_sglq *sglq_entry_first = NULL;
-	int status, post_cnt = 0, num_posted = 0, block_cnt = 0;
+	int status, total_cnt, post_cnt = 0, num_posted = 0, block_cnt = 0;
 	int last_xritag = NO_XRI;
 	LIST_HEAD(prep_sgl_list);
 	LIST_HEAD(blck_sgl_list);
@@ -6004,6 +6012,7 @@
 	list_splice_init(&phba->sli4_hba.lpfc_sgl_list, &allc_sgl_list);
 	spin_unlock_irq(&phba->hbalock);
 
+	total_cnt = phba->sli4_hba.els_xri_cnt;
 	list_for_each_entry_safe(sglq_entry, sglq_entry_next,
 				 &allc_sgl_list, list) {
 		list_del_init(&sglq_entry->list);
@@ -6055,9 +6064,7 @@
 						sglq_entry->sli4_xritag);
 					list_add_tail(&sglq_entry->list,
 						      &free_sgl_list);
-					spin_lock_irq(&phba->hbalock);
-					phba->sli4_hba.els_xri_cnt--;
-					spin_unlock_irq(&phba->hbalock);
+					total_cnt--;
 				}
 			}
 		}
@@ -6085,9 +6092,7 @@
 					(sglq_entry_first->sli4_xritag +
 					 post_cnt - 1));
 			list_splice_init(&blck_sgl_list, &free_sgl_list);
-			spin_lock_irq(&phba->hbalock);
-			phba->sli4_hba.els_xri_cnt -= post_cnt;
-			spin_unlock_irq(&phba->hbalock);
+			total_cnt -= post_cnt;
 		}
 
 		/* don't reset xirtag due to hole in xri block */
@@ -6097,6 +6102,8 @@
 		/* reset els sgl post count for next round of posting */
 		post_cnt = 0;
 	}
+	/* update the number of XRIs posted for ELS */
+	phba->sli4_hba.els_xri_cnt = total_cnt;
 
 	/* free the els sgls failed to post */
 	lpfc_free_sgl_list(phba, &free_sgl_list);
@@ -6446,16 +6453,17 @@
 
 	/* Start the ELS watchdog timer */
 	mod_timer(&vport->els_tmofunc,
-		  jiffies + HZ * (phba->fc_ratov * 2));
+		  jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov * 2)));
 
 	/* Start heart beat timer */
 	mod_timer(&phba->hb_tmofunc,
-		  jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
+		  jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
 	phba->hb_outstanding = 0;
 	phba->last_completion_time = jiffies;
 
 	/* Start error attention (ERATT) polling timer */
-	mod_timer(&phba->eratt_poll, jiffies + HZ * LPFC_ERATT_POLL_INTERVAL);
+	mod_timer(&phba->eratt_poll,
+		  jiffies + msecs_to_jiffies(1000 * LPFC_ERATT_POLL_INTERVAL));
 
 	/* Enable PCIe device Advanced Error Reporting (AER) if configured */
 	if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) {
@@ -6822,8 +6830,9 @@
 			goto out_not_finished;
 		}
 		/* timeout active mbox command */
-		mod_timer(&psli->mbox_tmo, (jiffies +
-			       (HZ * lpfc_mbox_tmo_val(phba, pmbox))));
+		timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox) *
+					   1000);
+		mod_timer(&psli->mbox_tmo, jiffies + timeout);
 	}
 
 	/* Mailbox cmd <cmd> issue */
@@ -7496,7 +7505,7 @@
 
 	/* Start timer for the mbox_tmo and log some mailbox post messages */
 	mod_timer(&psli->mbox_tmo, (jiffies +
-		  (HZ * lpfc_mbox_tmo_val(phba, mboxq))));
+		  msecs_to_jiffies(1000 * lpfc_mbox_tmo_val(phba, mboxq))));
 
 	lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
 			"(%d):0355 Mailbox cmd x%x (x%x/x%x) issue Data: "
@@ -7914,15 +7923,21 @@
 static inline uint32_t
 lpfc_sli4_scmd_to_wqidx_distr(struct lpfc_hba *phba)
 {
-	int i;
+	struct lpfc_vector_map_info *cpup;
+	int chann, cpu;
 
-	if (phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_CPU)
-		i = smp_processor_id();
-	else
-		i = atomic_add_return(1, &phba->fcp_qidx);
-
-	i = (i % phba->cfg_fcp_io_channel);
-	return i;
+	if (phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_CPU) {
+		cpu = smp_processor_id();
+		if (cpu < phba->sli4_hba.num_present_cpu) {
+			cpup = phba->sli4_hba.cpu_map;
+			cpup += cpu;
+			return cpup->channel_id;
+		}
+		chann = cpu;
+	}
+	chann = atomic_add_return(1, &phba->fcp_qidx);
+	chann = (chann % phba->cfg_fcp_io_channel);
+	return chann;
 }
 
 /**
@@ -8444,10 +8459,14 @@
 
 	if ((piocb->iocb_flag & LPFC_IO_FCP) ||
 		(piocb->iocb_flag & LPFC_USE_FCPWQIDX)) {
+		if (unlikely(!phba->sli4_hba.fcp_wq))
+			return IOCB_ERROR;
 		if (lpfc_sli4_wq_put(phba->sli4_hba.fcp_wq[piocb->fcp_wqidx],
 				     &wqe))
 			return IOCB_ERROR;
 	} else {
+		if (unlikely(!phba->sli4_hba.els_wq))
+			return IOCB_ERROR;
 		if (lpfc_sli4_wq_put(phba->sli4_hba.els_wq, &wqe))
 			return IOCB_ERROR;
 	}
@@ -10003,7 +10022,7 @@
 	retval = lpfc_sli_issue_iocb(phba, ring_number, piocb,
 				     SLI_IOCB_RET_IOCB);
 	if (retval == IOCB_SUCCESS) {
-		timeout_req = timeout * HZ;
+		timeout_req = msecs_to_jiffies(timeout * 1000);
 		timeleft = wait_event_timeout(done_q,
 				lpfc_chk_iocb_flg(phba, piocb, LPFC_IO_WAKE),
 				timeout_req);
@@ -10108,7 +10127,7 @@
 	if (retval == MBX_BUSY || retval == MBX_SUCCESS) {
 		wait_event_interruptible_timeout(done_q,
 				pmboxq->mbox_flag & LPFC_MBX_WAKE,
-				timeout * HZ);
+				msecs_to_jiffies(timeout * 1000));
 
 		spin_lock_irqsave(&phba->hbalock, flag);
 		pmboxq->context1 = NULL;
@@ -12899,8 +12918,9 @@
 		}
 		wq->db_regaddr = bar_memmap_p + db_offset;
 		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
-				"3264 WQ[%d]: barset:x%x, offset:x%x\n",
-				wq->queue_id, pci_barset, db_offset);
+				"3264 WQ[%d]: barset:x%x, offset:x%x, "
+				"format:x%x\n", wq->queue_id, pci_barset,
+				db_offset, wq->db_format);
 	} else {
 		wq->db_format = LPFC_DB_LIST_FORMAT;
 		wq->db_regaddr = phba->sli4_hba.WQDBregaddr;
@@ -13120,8 +13140,9 @@
 		}
 		hrq->db_regaddr = bar_memmap_p + db_offset;
 		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
-				"3266 RQ[qid:%d]: barset:x%x, offset:x%x\n",
-				hrq->queue_id, pci_barset, db_offset);
+				"3266 RQ[qid:%d]: barset:x%x, offset:x%x, "
+				"format:x%x\n", hrq->queue_id, pci_barset,
+				db_offset, hrq->db_format);
 	} else {
 		hrq->db_format = LPFC_DB_RING_FORMAT;
 		hrq->db_regaddr = phba->sli4_hba.RQDBregaddr;
@@ -13971,13 +13992,14 @@
 	}
 
 	lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
-			"2538 Received frame rctl:%s type:%s "
-			"Frame Data:%08x %08x %08x %08x %08x %08x\n",
-			rctl_names[fc_hdr->fh_r_ctl],
-			type_names[fc_hdr->fh_type],
+			"2538 Received frame rctl:%s (x%x), type:%s (x%x), "
+			"frame Data:%08x %08x %08x %08x %08x %08x %08x\n",
+			rctl_names[fc_hdr->fh_r_ctl], fc_hdr->fh_r_ctl,
+			type_names[fc_hdr->fh_type], fc_hdr->fh_type,
 			be32_to_cpu(header[0]), be32_to_cpu(header[1]),
 			be32_to_cpu(header[2]), be32_to_cpu(header[3]),
-			be32_to_cpu(header[4]), be32_to_cpu(header[5]));
+			be32_to_cpu(header[4]), be32_to_cpu(header[5]),
+			be32_to_cpu(header[6]));
 	return 0;
 drop:
 	lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index be02b59..67af460 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -346,11 +346,6 @@
 #define SLI4_CT_VFI 2
 #define SLI4_CT_FCFI 3
 
-#define LPFC_SLI4_FL1_MAX_SEGMENT_SIZE	0x10000
-#define LPFC_SLI4_FL1_MAX_BUF_SIZE	0X2000
-#define LPFC_SLI4_MIN_BUF_SIZE		0x400
-#define LPFC_SLI4_MAX_BUF_SIZE		0x20000
-
 /*
  * SLI4 specific data structures
  */
@@ -440,6 +435,17 @@
 
 #define LPFC_SLI4_HANDLER_NAME_SZ	16
 
+/* Used for IRQ vector to CPU mapping */
+struct lpfc_vector_map_info {
+	uint16_t	phys_id;
+	uint16_t	core_id;
+	uint16_t	irq;
+	uint16_t	channel_id;
+	struct cpumask	maskbits;
+};
+#define LPFC_VECTOR_MAP_EMPTY	0xffff
+#define LPFC_MAX_CPU		256
+
 /* SLI4 HBA data structure entries */
 struct lpfc_sli4_hba {
 	void __iomem *conf_regs_memmap_p; /* Kernel memory mapped address for
@@ -573,6 +579,11 @@
 	struct lpfc_iov iov;
 	spinlock_t abts_scsi_buf_list_lock; /* list of aborted SCSI IOs */
 	spinlock_t abts_sgl_list_lock; /* list of aborted els IOs */
+
+	/* CPU to vector mapping information */
+	struct lpfc_vector_map_info *cpu_map;
+	uint16_t num_online_cpu;
+	uint16_t num_present_cpu;
 };
 
 enum lpfc_sge_type {
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index 664cd04..a38dc3b 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -18,7 +18,7 @@
  * included with this package.                                     *
  *******************************************************************/
 
-#define LPFC_DRIVER_VERSION "8.3.38"
+#define LPFC_DRIVER_VERSION "8.3.39"
 #define LPFC_DRIVER_NAME		"lpfc"
 
 /* Used for SLI 2/3 */
diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c
index 0fe188e6..e28e431 100644
--- a/drivers/scsi/lpfc/lpfc_vport.c
+++ b/drivers/scsi/lpfc/lpfc_vport.c
@@ -80,7 +80,7 @@
 	}
 }
 
-static int
+int
 lpfc_alloc_vpi(struct lpfc_hba *phba)
 {
 	unsigned long vpi;
@@ -568,6 +568,7 @@
 	struct lpfc_vport *vport = *(struct lpfc_vport **)fc_vport->dd_data;
 	struct lpfc_hba   *phba = vport->phba;
 	long timeout;
+	bool ns_ndlp_referenced = false;
 
 	if (vport->port_type == LPFC_PHYSICAL_PORT) {
 		lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT,
@@ -628,6 +629,18 @@
 
 	lpfc_debugfs_terminate(vport);
 
+	/*
+	 * The call to fc_remove_host might release the NameServer ndlp. Since
+	 * we might need to use the ndlp to send the DA_ID CT command,
+	 * increment the reference for the NameServer ndlp to prevent it from
+	 * being released.
+	 */
+	ndlp = lpfc_findnode_did(vport, NameServer_DID);
+	if (ndlp && NLP_CHK_NODE_ACT(ndlp)) {
+		lpfc_nlp_get(ndlp);
+		ns_ndlp_referenced = true;
+	}
+
 	/* Remove FC host and then SCSI host with the vport */
 	fc_remove_host(lpfc_shost_from_vport(vport));
 	scsi_remove_host(lpfc_shost_from_vport(vport));
@@ -734,6 +747,16 @@
 		lpfc_discovery_wait(vport);
 
 skip_logo:
+
+	/*
+	 * If the NameServer ndlp has been incremented to allow the DA_ID CT
+	 * command to be sent, decrement the ndlp now.
+	 */
+	if (ns_ndlp_referenced) {
+		ndlp = lpfc_findnode_did(vport, NameServer_DID);
+		lpfc_nlp_put(ndlp);
+	}
+
 	lpfc_cleanup(vport);
 	lpfc_sli_host_down(vport);
 
diff --git a/drivers/scsi/lpfc/lpfc_vport.h b/drivers/scsi/lpfc/lpfc_vport.h
index 9082834..6b2c94e 100644
--- a/drivers/scsi/lpfc/lpfc_vport.h
+++ b/drivers/scsi/lpfc/lpfc_vport.h
@@ -90,6 +90,7 @@
 int lpfc_vport_tgt_remove(struct Scsi_Host *, uint, uint);
 struct lpfc_vport **lpfc_create_vport_work_array(struct lpfc_hba *);
 void lpfc_destroy_vport_work_array(struct lpfc_hba *, struct lpfc_vport **);
+int lpfc_alloc_vpi(struct lpfc_hba *phba);
 
 /*
  *  queuecommand  VPORT-specific return codes. Specified in  the host byte code.
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index 7c90d57..3a9ddae 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -4931,11 +4931,12 @@
 		printk(KERN_ERR "megaraid_sas: timed out while"
 			"waiting for HBA to recover\n");
 		error = -ENODEV;
-		goto out_kfree_ioc;
+		goto out_up;
 	}
 	spin_unlock_irqrestore(&instance->hba_lock, flags);
 
 	error = megasas_mgmt_fw_ioctl(instance, user_ioc, ioc);
+      out_up:
 	up(&instance->ioctl_sem);
 
       out_kfree_ioc:
diff --git a/drivers/scsi/mpt2sas/mpt2sas_transport.c b/drivers/scsi/mpt2sas/mpt2sas_transport.c
index 8c2ffbe..193e7ae 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_transport.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_transport.c
@@ -1939,7 +1939,7 @@
 	ioc->transport_cmds.status = MPT2_CMD_PENDING;
 
 	/* Check if the request is split across multiple segments */
-	if (req->bio->bi_vcnt > 1) {
+	if (bio_segments(req->bio) > 1) {
 		u32 offset = 0;
 
 		/* Allocate memory and copy the request */
@@ -1971,7 +1971,7 @@
 
 	/* Check if the response needs to be populated across
 	 * multiple segments */
-	if (rsp->bio->bi_vcnt > 1) {
+	if (bio_segments(rsp->bio) > 1) {
 		pci_addr_in = pci_alloc_consistent(ioc->pdev, blk_rq_bytes(rsp),
 		    &pci_dma_in);
 		if (!pci_addr_in) {
@@ -2038,7 +2038,7 @@
 	sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
 	    MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_HOST_TO_IOC);
 	sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
-	if (req->bio->bi_vcnt > 1) {
+	if (bio_segments(req->bio) > 1) {
 		ioc->base_add_sg_single(psge, sgl_flags |
 		    (blk_rq_bytes(req) - 4), pci_dma_out);
 	} else {
@@ -2054,7 +2054,7 @@
 	    MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER |
 	    MPI2_SGE_FLAGS_END_OF_LIST);
 	sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
-	if (rsp->bio->bi_vcnt > 1) {
+	if (bio_segments(rsp->bio) > 1) {
 		ioc->base_add_sg_single(psge, sgl_flags |
 		    (blk_rq_bytes(rsp) + 4), pci_dma_in);
 	} else {
@@ -2099,7 +2099,7 @@
 		    le16_to_cpu(mpi_reply->ResponseDataLength);
 		/* check if the resp needs to be copied from the allocated
 		 * pci mem */
-		if (rsp->bio->bi_vcnt > 1) {
+		if (bio_segments(rsp->bio) > 1) {
 			u32 offset = 0;
 			u32 bytes_to_copy =
 			    le16_to_cpu(mpi_reply->ResponseDataLength);
diff --git a/drivers/scsi/mvsas/mv_init.c b/drivers/scsi/mvsas/mv_init.c
index 7455092..7b7381d 100644
--- a/drivers/scsi/mvsas/mv_init.c
+++ b/drivers/scsi/mvsas/mv_init.c
@@ -254,7 +254,7 @@
 	}
 	for (i = 0; i < MVS_MAX_DEVICES; i++) {
 		mvi->devices[i].taskfileset = MVS_ID_NOT_MAPPED;
-		mvi->devices[i].dev_type = NO_DEVICE;
+		mvi->devices[i].dev_type = SAS_PHY_UNUSED;
 		mvi->devices[i].device_id = i;
 		mvi->devices[i].dev_status = MVS_DEV_NORMAL;
 		init_timer(&mvi->devices[i].timer);
diff --git a/drivers/scsi/mvsas/mv_sas.c b/drivers/scsi/mvsas/mv_sas.c
index 532110f..c9e2449 100644
--- a/drivers/scsi/mvsas/mv_sas.c
+++ b/drivers/scsi/mvsas/mv_sas.c
@@ -706,7 +706,7 @@
 	return 0;
 }
 
-#define	DEV_IS_GONE(mvi_dev)	((!mvi_dev || (mvi_dev->dev_type == NO_DEVICE)))
+#define	DEV_IS_GONE(mvi_dev)	((!mvi_dev || (mvi_dev->dev_type == SAS_PHY_UNUSED)))
 static int mvs_task_prep(struct sas_task *task, struct mvs_info *mvi, int is_tmf,
 				struct mvs_tmf_task *tmf, int *pass)
 {
@@ -726,7 +726,7 @@
 		 * libsas will use dev->port, should
 		 * not call task_done for sata
 		 */
-		if (dev->dev_type != SATA_DEV)
+		if (dev->dev_type != SAS_SATA_DEV)
 			task->task_done(task);
 		return rc;
 	}
@@ -1159,10 +1159,10 @@
 			phy->identify.device_type =
 				phy->att_dev_info & PORT_DEV_TYPE_MASK;
 
-			if (phy->identify.device_type == SAS_END_DEV)
+			if (phy->identify.device_type == SAS_END_DEVICE)
 				phy->identify.target_port_protocols =
 							SAS_PROTOCOL_SSP;
-			else if (phy->identify.device_type != NO_DEVICE)
+			else if (phy->identify.device_type != SAS_PHY_UNUSED)
 				phy->identify.target_port_protocols =
 							SAS_PROTOCOL_SMP;
 			if (oob_done)
@@ -1260,7 +1260,7 @@
 {
 	u32 dev;
 	for (dev = 0; dev < MVS_MAX_DEVICES; dev++) {
-		if (mvi->devices[dev].dev_type == NO_DEVICE) {
+		if (mvi->devices[dev].dev_type == SAS_PHY_UNUSED) {
 			mvi->devices[dev].device_id = dev;
 			return &mvi->devices[dev];
 		}
@@ -1278,7 +1278,7 @@
 	u32 id = mvi_dev->device_id;
 	memset(mvi_dev, 0, sizeof(*mvi_dev));
 	mvi_dev->device_id = id;
-	mvi_dev->dev_type = NO_DEVICE;
+	mvi_dev->dev_type = SAS_PHY_UNUSED;
 	mvi_dev->dev_status = MVS_DEV_NORMAL;
 	mvi_dev->taskfileset = MVS_ID_NOT_MAPPED;
 }
@@ -1480,7 +1480,7 @@
 {
 	int rc;
 	struct sas_phy *phy = sas_get_local_phy(dev);
-	int reset_type = (dev->dev_type == SATA_DEV ||
+	int reset_type = (dev->dev_type == SAS_SATA_DEV ||
 			(dev->tproto & SAS_PROTOCOL_STP)) ? 0 : 1;
 	rc = sas_phy_reset(phy, reset_type);
 	sas_put_local_phy(phy);
@@ -1629,7 +1629,7 @@
 
 	} else if (task->task_proto & SAS_PROTOCOL_SATA ||
 		task->task_proto & SAS_PROTOCOL_STP) {
-		if (SATA_DEV == dev->dev_type) {
+		if (SAS_SATA_DEV == dev->dev_type) {
 			struct mvs_slot_info *slot = task->lldd_task;
 			u32 slot_idx = (u32)(slot - mvi->slot_info);
 			mv_dprintk("mvs_abort_task() mvi=%p task=%p "
diff --git a/drivers/scsi/mvsas/mv_sas.h b/drivers/scsi/mvsas/mv_sas.h
index 9f3cc13..60e2fb7 100644
--- a/drivers/scsi/mvsas/mv_sas.h
+++ b/drivers/scsi/mvsas/mv_sas.h
@@ -67,7 +67,7 @@
 extern struct kmem_cache *mvs_task_list_cache;
 
 #define DEV_IS_EXPANDER(type)	\
-	((type == EDGE_DEV) || (type == FANOUT_DEV))
+	((type == SAS_EDGE_EXPANDER_DEVICE) || (type == SAS_FANOUT_EXPANDER_DEVICE))
 
 #define bit(n) ((u64)1 << n)
 
@@ -241,7 +241,7 @@
 
 struct mvs_device {
 	struct list_head		dev_entry;
-	enum sas_dev_type dev_type;
+	enum sas_device_type dev_type;
 	struct mvs_info *mvi_info;
 	struct domain_device *sas_device;
 	struct timer_list timer;
diff --git a/drivers/scsi/pm8001/Makefile b/drivers/scsi/pm8001/Makefile
index 52f0429..ce4cd87 100644
--- a/drivers/scsi/pm8001/Makefile
+++ b/drivers/scsi/pm8001/Makefile
@@ -4,9 +4,10 @@
 # Copyright (C) 2008-2009  USI Co., Ltd.
 
 
-obj-$(CONFIG_SCSI_PM8001) += pm8001.o
-pm8001-y += pm8001_init.o \
+obj-$(CONFIG_SCSI_PM8001) += pm80xx.o
+pm80xx-y += pm8001_init.o \
 		pm8001_sas.o  \
 		pm8001_ctl.o  \
-		pm8001_hwi.o
+		pm8001_hwi.o  \
+		pm80xx_hwi.o
 
diff --git a/drivers/scsi/pm8001/pm8001_ctl.c b/drivers/scsi/pm8001/pm8001_ctl.c
index 45bc197b..d99f41c 100644
--- a/drivers/scsi/pm8001/pm8001_ctl.c
+++ b/drivers/scsi/pm8001/pm8001_ctl.c
@@ -1,5 +1,5 @@
 /*
- * PMC-Sierra SPC 8001 SAS/SATA based host adapters driver
+ * PMC-Sierra 8001/8081/8088/8089 SAS/SATA based host adapters driver
  *
  * Copyright (c) 2008-2009 USI Co., Ltd.
  * All rights reserved.
@@ -58,8 +58,13 @@
 	struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
 	struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
 
-	return snprintf(buf, PAGE_SIZE, "%d\n",
-		pm8001_ha->main_cfg_tbl.interface_rev);
+	if (pm8001_ha->chip_id == chip_8001) {
+		return snprintf(buf, PAGE_SIZE, "%d\n",
+			pm8001_ha->main_cfg_tbl.pm8001_tbl.interface_rev);
+	} else {
+		return snprintf(buf, PAGE_SIZE, "%d\n",
+			pm8001_ha->main_cfg_tbl.pm80xx_tbl.interface_rev);
+	}
 }
 static
 DEVICE_ATTR(interface_rev, S_IRUGO, pm8001_ctl_mpi_interface_rev_show, NULL);
@@ -78,11 +83,19 @@
 	struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
 	struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
 
-	return snprintf(buf, PAGE_SIZE, "%02x.%02x.%02x.%02x\n",
-		       (u8)(pm8001_ha->main_cfg_tbl.firmware_rev >> 24),
-		       (u8)(pm8001_ha->main_cfg_tbl.firmware_rev >> 16),
-		       (u8)(pm8001_ha->main_cfg_tbl.firmware_rev >> 8),
-		       (u8)(pm8001_ha->main_cfg_tbl.firmware_rev));
+	if (pm8001_ha->chip_id == chip_8001) {
+		return snprintf(buf, PAGE_SIZE, "%02x.%02x.%02x.%02x\n",
+		(u8)(pm8001_ha->main_cfg_tbl.pm8001_tbl.firmware_rev >> 24),
+		(u8)(pm8001_ha->main_cfg_tbl.pm8001_tbl.firmware_rev >> 16),
+		(u8)(pm8001_ha->main_cfg_tbl.pm8001_tbl.firmware_rev >> 8),
+		(u8)(pm8001_ha->main_cfg_tbl.pm8001_tbl.firmware_rev));
+	} else {
+		return snprintf(buf, PAGE_SIZE, "%02x.%02x.%02x.%02x\n",
+		(u8)(pm8001_ha->main_cfg_tbl.pm80xx_tbl.firmware_rev >> 24),
+		(u8)(pm8001_ha->main_cfg_tbl.pm80xx_tbl.firmware_rev >> 16),
+		(u8)(pm8001_ha->main_cfg_tbl.pm80xx_tbl.firmware_rev >> 8),
+		(u8)(pm8001_ha->main_cfg_tbl.pm80xx_tbl.firmware_rev));
+	}
 }
 static DEVICE_ATTR(fw_version, S_IRUGO, pm8001_ctl_fw_version_show, NULL);
 /**
@@ -99,8 +112,13 @@
 	struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
 	struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
 
-	return snprintf(buf, PAGE_SIZE, "%d\n",
-			pm8001_ha->main_cfg_tbl.max_out_io);
+	if (pm8001_ha->chip_id == chip_8001) {
+		return snprintf(buf, PAGE_SIZE, "%d\n",
+			pm8001_ha->main_cfg_tbl.pm8001_tbl.max_out_io);
+	} else {
+		return snprintf(buf, PAGE_SIZE, "%d\n",
+			pm8001_ha->main_cfg_tbl.pm80xx_tbl.max_out_io);
+	}
 }
 static DEVICE_ATTR(max_out_io, S_IRUGO, pm8001_ctl_max_out_io_show, NULL);
 /**
@@ -117,8 +135,15 @@
 	struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
 	struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
 
-	return snprintf(buf, PAGE_SIZE, "%04d\n",
-			(u16)(pm8001_ha->main_cfg_tbl.max_sgl >> 16));
+	if (pm8001_ha->chip_id == chip_8001) {
+		return snprintf(buf, PAGE_SIZE, "%04d\n",
+			(u16)(pm8001_ha->main_cfg_tbl.pm8001_tbl.max_sgl >> 16)
+			);
+	} else {
+		return snprintf(buf, PAGE_SIZE, "%04d\n",
+			(u16)(pm8001_ha->main_cfg_tbl.pm80xx_tbl.max_sgl >> 16)
+			);
+	}
 }
 static DEVICE_ATTR(max_devices, S_IRUGO, pm8001_ctl_max_devices_show, NULL);
 /**
@@ -136,8 +161,15 @@
 	struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
 	struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
 
-	return snprintf(buf, PAGE_SIZE, "%04d\n",
-			pm8001_ha->main_cfg_tbl.max_sgl & 0x0000FFFF);
+	if (pm8001_ha->chip_id == chip_8001) {
+		return snprintf(buf, PAGE_SIZE, "%04d\n",
+			pm8001_ha->main_cfg_tbl.pm8001_tbl.max_sgl & 0x0000FFFF
+			);
+	} else {
+		return snprintf(buf, PAGE_SIZE, "%04d\n",
+			pm8001_ha->main_cfg_tbl.pm80xx_tbl.max_sgl & 0x0000FFFF
+			);
+	}
 }
 static DEVICE_ATTR(max_sg_list, S_IRUGO, pm8001_ctl_max_sg_list_show, NULL);
 
@@ -173,7 +205,14 @@
 	struct Scsi_Host *shost = class_to_shost(cdev);
 	struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
 	struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
-	mode = (pm8001_ha->main_cfg_tbl.ctrl_cap_flag & 0xfe000000)>>25;
+	/* fe000000 means supports SAS2.1 */
+	if (pm8001_ha->chip_id == chip_8001)
+		mode = (pm8001_ha->main_cfg_tbl.pm8001_tbl.ctrl_cap_flag &
+							0xfe000000)>>25;
+	else
+		/* fe000000 means supports SAS2.1 */
+		mode = (pm8001_ha->main_cfg_tbl.pm80xx_tbl.ctrl_cap_flag &
+							0xfe000000)>>25;
 	return show_sas_spec_support_status(mode, buf);
 }
 static DEVICE_ATTR(sas_spec_support, S_IRUGO,
@@ -361,10 +400,11 @@
 		goto out;
 	}
 	payload = (struct pm8001_ioctl_payload *)ioctlbuffer;
-	memcpy((u8 *)payload->func_specific, (u8 *)pm8001_ha->fw_image->data,
+	memcpy((u8 *)&payload->func_specific, (u8 *)pm8001_ha->fw_image->data,
 				pm8001_ha->fw_image->size);
 	payload->length = pm8001_ha->fw_image->size;
 	payload->id = 0;
+	payload->minor_function = 0x1;
 	pm8001_ha->nvmd_completion = &completion;
 	ret = PM8001_CHIP_DISP->set_nvmd_req(pm8001_ha, payload);
 	wait_for_completion(&completion);
@@ -411,7 +451,7 @@
 			payload->length = 1024*16;
 			payload->id = 0;
 			fwControl =
-			      (struct fw_control_info *)payload->func_specific;
+			      (struct fw_control_info *)&payload->func_specific;
 			fwControl->len = IOCTL_BUF_SIZE;   /* IN */
 			fwControl->size = partitionSize + HEADER_LEN;/* IN */
 			fwControl->retcode = 0;/* OUT */
diff --git a/drivers/scsi/pm8001/pm8001_defs.h b/drivers/scsi/pm8001/pm8001_defs.h
index c3d20c8..479c5a7 100644
--- a/drivers/scsi/pm8001/pm8001_defs.h
+++ b/drivers/scsi/pm8001/pm8001_defs.h
@@ -1,5 +1,5 @@
 /*
- * PMC-Sierra SPC 8001 SAS/SATA based host adapters driver
+ * PMC-Sierra 8001/8081/8088/8089 SAS/SATA based host adapters driver
  *
  * Copyright (c) 2008-2009 USI Co., Ltd.
  * All rights reserved.
@@ -43,9 +43,12 @@
 
 enum chip_flavors {
 	chip_8001,
+	chip_8008,
+	chip_8009,
+	chip_8018,
+	chip_8019
 };
-#define USI_MAX_MEMCNT			9
-#define PM8001_MAX_DMA_SG		SG_ALL
+
 enum phy_speed {
 	PHY_SPEED_15 = 0x01,
 	PHY_SPEED_30 = 0x02,
@@ -69,23 +72,34 @@
 #define PM8001_MPI_QUEUE         1024   /* maximum mpi queue entries */
 #define	PM8001_MAX_INB_NUM	 1
 #define	PM8001_MAX_OUTB_NUM	 1
+#define	PM8001_MAX_SPCV_INB_NUM		1
+#define	PM8001_MAX_SPCV_OUTB_NUM	4
 #define	PM8001_CAN_QUEUE	 508	/* SCSI Queue depth */
 
-/* unchangeable hardware details */
-#define	PM8001_MAX_PHYS		 8	/* max. possible phys */
-#define	PM8001_MAX_PORTS	 8	/* max. possible ports */
-#define	PM8001_MAX_DEVICES	 1024	/* max supported device */
+/* Inbound/Outbound queue size */
+#define IOMB_SIZE_SPC		64
+#define IOMB_SIZE_SPCV		128
 
+/* unchangeable hardware details */
+#define	PM8001_MAX_PHYS		 16	/* max. possible phys */
+#define	PM8001_MAX_PORTS	 16	/* max. possible ports */
+#define	PM8001_MAX_DEVICES	 2048	/* max supported device */
+#define	PM8001_MAX_MSIX_VEC	 64	/* max msi-x int for spcv/ve */
+
+#define USI_MAX_MEMCNT_BASE	5
+#define IB			(USI_MAX_MEMCNT_BASE + 1)
+#define CI			(IB + PM8001_MAX_SPCV_INB_NUM)
+#define OB			(CI + PM8001_MAX_SPCV_INB_NUM)
+#define PI			(OB + PM8001_MAX_SPCV_OUTB_NUM)
+#define USI_MAX_MEMCNT		(PI + PM8001_MAX_SPCV_OUTB_NUM)
+#define PM8001_MAX_DMA_SG	SG_ALL
 enum memory_region_num {
 	AAP1 = 0x0, /* application acceleration processor */
 	IOP,	    /* IO processor */
-	CI,	    /* consumer index */
-	PI,	    /* producer index */
-	IB,	    /* inbound queue */
-	OB,	    /* outbound queue */
 	NVMD,	    /* NVM device */
 	DEV_MEM,    /* memory for devices */
 	CCB_MEM,    /* memory for command control block */
+	FW_FLASH    /* memory for fw flash update */
 };
 #define	PM8001_EVENT_LOG_SIZE	 (128 * 1024)
 
diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c
index b8dd050..69dd49c 100644
--- a/drivers/scsi/pm8001/pm8001_hwi.c
+++ b/drivers/scsi/pm8001/pm8001_hwi.c
@@ -50,32 +50,39 @@
 static void read_main_config_table(struct pm8001_hba_info *pm8001_ha)
 {
 	void __iomem *address = pm8001_ha->main_cfg_tbl_addr;
-	pm8001_ha->main_cfg_tbl.signature	= pm8001_mr32(address, 0x00);
-	pm8001_ha->main_cfg_tbl.interface_rev	= pm8001_mr32(address, 0x04);
-	pm8001_ha->main_cfg_tbl.firmware_rev	= pm8001_mr32(address, 0x08);
-	pm8001_ha->main_cfg_tbl.max_out_io	= pm8001_mr32(address, 0x0C);
-	pm8001_ha->main_cfg_tbl.max_sgl		= pm8001_mr32(address, 0x10);
-	pm8001_ha->main_cfg_tbl.ctrl_cap_flag	= pm8001_mr32(address, 0x14);
-	pm8001_ha->main_cfg_tbl.gst_offset	= pm8001_mr32(address, 0x18);
-	pm8001_ha->main_cfg_tbl.inbound_queue_offset =
+	pm8001_ha->main_cfg_tbl.pm8001_tbl.signature	=
+				pm8001_mr32(address, 0x00);
+	pm8001_ha->main_cfg_tbl.pm8001_tbl.interface_rev =
+				pm8001_mr32(address, 0x04);
+	pm8001_ha->main_cfg_tbl.pm8001_tbl.firmware_rev	=
+				pm8001_mr32(address, 0x08);
+	pm8001_ha->main_cfg_tbl.pm8001_tbl.max_out_io	=
+				pm8001_mr32(address, 0x0C);
+	pm8001_ha->main_cfg_tbl.pm8001_tbl.max_sgl	=
+				pm8001_mr32(address, 0x10);
+	pm8001_ha->main_cfg_tbl.pm8001_tbl.ctrl_cap_flag =
+				pm8001_mr32(address, 0x14);
+	pm8001_ha->main_cfg_tbl.pm8001_tbl.gst_offset	=
+				pm8001_mr32(address, 0x18);
+	pm8001_ha->main_cfg_tbl.pm8001_tbl.inbound_queue_offset =
 		pm8001_mr32(address, MAIN_IBQ_OFFSET);
-	pm8001_ha->main_cfg_tbl.outbound_queue_offset =
+	pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_queue_offset =
 		pm8001_mr32(address, MAIN_OBQ_OFFSET);
-	pm8001_ha->main_cfg_tbl.hda_mode_flag	=
+	pm8001_ha->main_cfg_tbl.pm8001_tbl.hda_mode_flag	=
 		pm8001_mr32(address, MAIN_HDA_FLAGS_OFFSET);
 
 	/* read analog Setting offset from the configuration table */
-	pm8001_ha->main_cfg_tbl.anolog_setup_table_offset =
+	pm8001_ha->main_cfg_tbl.pm8001_tbl.anolog_setup_table_offset =
 		pm8001_mr32(address, MAIN_ANALOG_SETUP_OFFSET);
 
 	/* read Error Dump Offset and Length */
-	pm8001_ha->main_cfg_tbl.fatal_err_dump_offset0 =
+	pm8001_ha->main_cfg_tbl.pm8001_tbl.fatal_err_dump_offset0 =
 		pm8001_mr32(address, MAIN_FATAL_ERROR_RDUMP0_OFFSET);
-	pm8001_ha->main_cfg_tbl.fatal_err_dump_length0 =
+	pm8001_ha->main_cfg_tbl.pm8001_tbl.fatal_err_dump_length0 =
 		pm8001_mr32(address, MAIN_FATAL_ERROR_RDUMP0_LENGTH);
-	pm8001_ha->main_cfg_tbl.fatal_err_dump_offset1 =
+	pm8001_ha->main_cfg_tbl.pm8001_tbl.fatal_err_dump_offset1 =
 		pm8001_mr32(address, MAIN_FATAL_ERROR_RDUMP1_OFFSET);
-	pm8001_ha->main_cfg_tbl.fatal_err_dump_length1 =
+	pm8001_ha->main_cfg_tbl.pm8001_tbl.fatal_err_dump_length1 =
 		pm8001_mr32(address, MAIN_FATAL_ERROR_RDUMP1_LENGTH);
 }
 
@@ -86,31 +93,56 @@
 static void read_general_status_table(struct pm8001_hba_info *pm8001_ha)
 {
 	void __iomem *address = pm8001_ha->general_stat_tbl_addr;
-	pm8001_ha->gs_tbl.gst_len_mpistate	= pm8001_mr32(address, 0x00);
-	pm8001_ha->gs_tbl.iq_freeze_state0	= pm8001_mr32(address, 0x04);
-	pm8001_ha->gs_tbl.iq_freeze_state1	= pm8001_mr32(address, 0x08);
-	pm8001_ha->gs_tbl.msgu_tcnt		= pm8001_mr32(address, 0x0C);
-	pm8001_ha->gs_tbl.iop_tcnt		= pm8001_mr32(address, 0x10);
-	pm8001_ha->gs_tbl.reserved		= pm8001_mr32(address, 0x14);
-	pm8001_ha->gs_tbl.phy_state[0]	= pm8001_mr32(address, 0x18);
-	pm8001_ha->gs_tbl.phy_state[1]	= pm8001_mr32(address, 0x1C);
-	pm8001_ha->gs_tbl.phy_state[2]	= pm8001_mr32(address, 0x20);
-	pm8001_ha->gs_tbl.phy_state[3]	= pm8001_mr32(address, 0x24);
-	pm8001_ha->gs_tbl.phy_state[4]	= pm8001_mr32(address, 0x28);
-	pm8001_ha->gs_tbl.phy_state[5]	= pm8001_mr32(address, 0x2C);
-	pm8001_ha->gs_tbl.phy_state[6]	= pm8001_mr32(address, 0x30);
-	pm8001_ha->gs_tbl.phy_state[7]	= pm8001_mr32(address, 0x34);
-	pm8001_ha->gs_tbl.reserved1		= pm8001_mr32(address, 0x38);
-	pm8001_ha->gs_tbl.reserved2		= pm8001_mr32(address, 0x3C);
-	pm8001_ha->gs_tbl.reserved3		= pm8001_mr32(address, 0x40);
-	pm8001_ha->gs_tbl.recover_err_info[0]	= pm8001_mr32(address, 0x44);
-	pm8001_ha->gs_tbl.recover_err_info[1]	= pm8001_mr32(address, 0x48);
-	pm8001_ha->gs_tbl.recover_err_info[2]	= pm8001_mr32(address, 0x4C);
-	pm8001_ha->gs_tbl.recover_err_info[3]	= pm8001_mr32(address, 0x50);
-	pm8001_ha->gs_tbl.recover_err_info[4]	= pm8001_mr32(address, 0x54);
-	pm8001_ha->gs_tbl.recover_err_info[5]	= pm8001_mr32(address, 0x58);
-	pm8001_ha->gs_tbl.recover_err_info[6]	= pm8001_mr32(address, 0x5C);
-	pm8001_ha->gs_tbl.recover_err_info[7]	= pm8001_mr32(address, 0x60);
+	pm8001_ha->gs_tbl.pm8001_tbl.gst_len_mpistate	=
+				pm8001_mr32(address, 0x00);
+	pm8001_ha->gs_tbl.pm8001_tbl.iq_freeze_state0	=
+				pm8001_mr32(address, 0x04);
+	pm8001_ha->gs_tbl.pm8001_tbl.iq_freeze_state1	=
+				pm8001_mr32(address, 0x08);
+	pm8001_ha->gs_tbl.pm8001_tbl.msgu_tcnt		=
+				pm8001_mr32(address, 0x0C);
+	pm8001_ha->gs_tbl.pm8001_tbl.iop_tcnt		=
+				pm8001_mr32(address, 0x10);
+	pm8001_ha->gs_tbl.pm8001_tbl.rsvd		=
+				pm8001_mr32(address, 0x14);
+	pm8001_ha->gs_tbl.pm8001_tbl.phy_state[0]	=
+				pm8001_mr32(address, 0x18);
+	pm8001_ha->gs_tbl.pm8001_tbl.phy_state[1]	=
+				pm8001_mr32(address, 0x1C);
+	pm8001_ha->gs_tbl.pm8001_tbl.phy_state[2]	=
+				pm8001_mr32(address, 0x20);
+	pm8001_ha->gs_tbl.pm8001_tbl.phy_state[3]	=
+				pm8001_mr32(address, 0x24);
+	pm8001_ha->gs_tbl.pm8001_tbl.phy_state[4]	=
+				pm8001_mr32(address, 0x28);
+	pm8001_ha->gs_tbl.pm8001_tbl.phy_state[5]	=
+				pm8001_mr32(address, 0x2C);
+	pm8001_ha->gs_tbl.pm8001_tbl.phy_state[6]	=
+				pm8001_mr32(address, 0x30);
+	pm8001_ha->gs_tbl.pm8001_tbl.phy_state[7]	=
+				pm8001_mr32(address, 0x34);
+	pm8001_ha->gs_tbl.pm8001_tbl.gpio_input_val	=
+				pm8001_mr32(address, 0x38);
+	pm8001_ha->gs_tbl.pm8001_tbl.rsvd1[0]		=
+				pm8001_mr32(address, 0x3C);
+	pm8001_ha->gs_tbl.pm8001_tbl.rsvd1[1]		=
+				pm8001_mr32(address, 0x40);
+	pm8001_ha->gs_tbl.pm8001_tbl.recover_err_info[0]	=
+				pm8001_mr32(address, 0x44);
+	pm8001_ha->gs_tbl.pm8001_tbl.recover_err_info[1]	=
+				pm8001_mr32(address, 0x48);
+	pm8001_ha->gs_tbl.pm8001_tbl.recover_err_info[2]	=
+				pm8001_mr32(address, 0x4C);
+	pm8001_ha->gs_tbl.pm8001_tbl.recover_err_info[3]	=
+				pm8001_mr32(address, 0x50);
+	pm8001_ha->gs_tbl.pm8001_tbl.recover_err_info[4]	=
+				pm8001_mr32(address, 0x54);
+	pm8001_ha->gs_tbl.pm8001_tbl.recover_err_info[5]	=
+				pm8001_mr32(address, 0x58);
+	pm8001_ha->gs_tbl.pm8001_tbl.recover_err_info[6]	=
+				pm8001_mr32(address, 0x5C);
+	pm8001_ha->gs_tbl.pm8001_tbl.recover_err_info[7]	=
+				pm8001_mr32(address, 0x60);
 }
 
 /**
@@ -119,10 +151,9 @@
  */
 static void read_inbnd_queue_table(struct pm8001_hba_info *pm8001_ha)
 {
-	int inbQ_num = 1;
 	int i;
 	void __iomem *address = pm8001_ha->inbnd_q_tbl_addr;
-	for (i = 0; i < inbQ_num; i++) {
+	for (i = 0; i < PM8001_MAX_INB_NUM; i++) {
 		u32 offset = i * 0x20;
 		pm8001_ha->inbnd_q_tbl[i].pi_pci_bar =
 		      get_pci_bar_index(pm8001_mr32(address, (offset + 0x14)));
@@ -137,10 +168,9 @@
  */
 static void read_outbnd_queue_table(struct pm8001_hba_info *pm8001_ha)
 {
-	int outbQ_num = 1;
 	int i;
 	void __iomem *address = pm8001_ha->outbnd_q_tbl_addr;
-	for (i = 0; i < outbQ_num; i++) {
+	for (i = 0; i < PM8001_MAX_OUTB_NUM; i++) {
 		u32 offset = i * 0x24;
 		pm8001_ha->outbnd_q_tbl[i].ci_pci_bar =
 		      get_pci_bar_index(pm8001_mr32(address, (offset + 0x14)));
@@ -155,54 +185,57 @@
  */
 static void init_default_table_values(struct pm8001_hba_info *pm8001_ha)
 {
-	int qn = 1;
 	int i;
 	u32 offsetib, offsetob;
 	void __iomem *addressib = pm8001_ha->inbnd_q_tbl_addr;
 	void __iomem *addressob = pm8001_ha->outbnd_q_tbl_addr;
 
-	pm8001_ha->main_cfg_tbl.inbound_q_nppd_hppd			= 0;
-	pm8001_ha->main_cfg_tbl.outbound_hw_event_pid0_3 		= 0;
-	pm8001_ha->main_cfg_tbl.outbound_hw_event_pid4_7		= 0;
-	pm8001_ha->main_cfg_tbl.outbound_ncq_event_pid0_3		= 0;
-	pm8001_ha->main_cfg_tbl.outbound_ncq_event_pid4_7		= 0;
-	pm8001_ha->main_cfg_tbl.outbound_tgt_ITNexus_event_pid0_3	= 0;
-	pm8001_ha->main_cfg_tbl.outbound_tgt_ITNexus_event_pid4_7	= 0;
-	pm8001_ha->main_cfg_tbl.outbound_tgt_ssp_event_pid0_3	= 0;
-	pm8001_ha->main_cfg_tbl.outbound_tgt_ssp_event_pid4_7	= 0;
-	pm8001_ha->main_cfg_tbl.outbound_tgt_smp_event_pid0_3	= 0;
-	pm8001_ha->main_cfg_tbl.outbound_tgt_smp_event_pid4_7	= 0;
+	pm8001_ha->main_cfg_tbl.pm8001_tbl.inbound_q_nppd_hppd		= 0;
+	pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_hw_event_pid0_3	= 0;
+	pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_hw_event_pid4_7	= 0;
+	pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_ncq_event_pid0_3	= 0;
+	pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_ncq_event_pid4_7	= 0;
+	pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_tgt_ITNexus_event_pid0_3 =
+									 0;
+	pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_tgt_ITNexus_event_pid4_7 =
+									 0;
+	pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_tgt_ssp_event_pid0_3 = 0;
+	pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_tgt_ssp_event_pid4_7 = 0;
+	pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_tgt_smp_event_pid0_3 = 0;
+	pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_tgt_smp_event_pid4_7 = 0;
 
-	pm8001_ha->main_cfg_tbl.upper_event_log_addr		=
+	pm8001_ha->main_cfg_tbl.pm8001_tbl.upper_event_log_addr		=
 		pm8001_ha->memoryMap.region[AAP1].phys_addr_hi;
-	pm8001_ha->main_cfg_tbl.lower_event_log_addr		=
+	pm8001_ha->main_cfg_tbl.pm8001_tbl.lower_event_log_addr		=
 		pm8001_ha->memoryMap.region[AAP1].phys_addr_lo;
-	pm8001_ha->main_cfg_tbl.event_log_size	= PM8001_EVENT_LOG_SIZE;
-	pm8001_ha->main_cfg_tbl.event_log_option		= 0x01;
-	pm8001_ha->main_cfg_tbl.upper_iop_event_log_addr	=
+	pm8001_ha->main_cfg_tbl.pm8001_tbl.event_log_size		=
+		PM8001_EVENT_LOG_SIZE;
+	pm8001_ha->main_cfg_tbl.pm8001_tbl.event_log_option		= 0x01;
+	pm8001_ha->main_cfg_tbl.pm8001_tbl.upper_iop_event_log_addr	=
 		pm8001_ha->memoryMap.region[IOP].phys_addr_hi;
-	pm8001_ha->main_cfg_tbl.lower_iop_event_log_addr	=
+	pm8001_ha->main_cfg_tbl.pm8001_tbl.lower_iop_event_log_addr	=
 		pm8001_ha->memoryMap.region[IOP].phys_addr_lo;
-	pm8001_ha->main_cfg_tbl.iop_event_log_size	= PM8001_EVENT_LOG_SIZE;
-	pm8001_ha->main_cfg_tbl.iop_event_log_option		= 0x01;
-	pm8001_ha->main_cfg_tbl.fatal_err_interrupt		= 0x01;
-	for (i = 0; i < qn; i++) {
+	pm8001_ha->main_cfg_tbl.pm8001_tbl.iop_event_log_size		=
+		PM8001_EVENT_LOG_SIZE;
+	pm8001_ha->main_cfg_tbl.pm8001_tbl.iop_event_log_option		= 0x01;
+	pm8001_ha->main_cfg_tbl.pm8001_tbl.fatal_err_interrupt		= 0x01;
+	for (i = 0; i < PM8001_MAX_INB_NUM; i++) {
 		pm8001_ha->inbnd_q_tbl[i].element_pri_size_cnt	=
 			PM8001_MPI_QUEUE | (64 << 16) | (0x00<<30);
 		pm8001_ha->inbnd_q_tbl[i].upper_base_addr	=
-			pm8001_ha->memoryMap.region[IB].phys_addr_hi;
+			pm8001_ha->memoryMap.region[IB + i].phys_addr_hi;
 		pm8001_ha->inbnd_q_tbl[i].lower_base_addr	=
-		pm8001_ha->memoryMap.region[IB].phys_addr_lo;
+		pm8001_ha->memoryMap.region[IB + i].phys_addr_lo;
 		pm8001_ha->inbnd_q_tbl[i].base_virt		=
-			(u8 *)pm8001_ha->memoryMap.region[IB].virt_ptr;
+			(u8 *)pm8001_ha->memoryMap.region[IB + i].virt_ptr;
 		pm8001_ha->inbnd_q_tbl[i].total_length		=
-			pm8001_ha->memoryMap.region[IB].total_len;
+			pm8001_ha->memoryMap.region[IB + i].total_len;
 		pm8001_ha->inbnd_q_tbl[i].ci_upper_base_addr	=
-			pm8001_ha->memoryMap.region[CI].phys_addr_hi;
+			pm8001_ha->memoryMap.region[CI + i].phys_addr_hi;
 		pm8001_ha->inbnd_q_tbl[i].ci_lower_base_addr	=
-			pm8001_ha->memoryMap.region[CI].phys_addr_lo;
+			pm8001_ha->memoryMap.region[CI + i].phys_addr_lo;
 		pm8001_ha->inbnd_q_tbl[i].ci_virt		=
-			pm8001_ha->memoryMap.region[CI].virt_ptr;
+			pm8001_ha->memoryMap.region[CI + i].virt_ptr;
 		offsetib = i * 0x20;
 		pm8001_ha->inbnd_q_tbl[i].pi_pci_bar		=
 			get_pci_bar_index(pm8001_mr32(addressib,
@@ -212,25 +245,25 @@
 		pm8001_ha->inbnd_q_tbl[i].producer_idx		= 0;
 		pm8001_ha->inbnd_q_tbl[i].consumer_index	= 0;
 	}
-	for (i = 0; i < qn; i++) {
+	for (i = 0; i < PM8001_MAX_OUTB_NUM; i++) {
 		pm8001_ha->outbnd_q_tbl[i].element_size_cnt	=
 			PM8001_MPI_QUEUE | (64 << 16) | (0x01<<30);
 		pm8001_ha->outbnd_q_tbl[i].upper_base_addr	=
-			pm8001_ha->memoryMap.region[OB].phys_addr_hi;
+			pm8001_ha->memoryMap.region[OB + i].phys_addr_hi;
 		pm8001_ha->outbnd_q_tbl[i].lower_base_addr	=
-			pm8001_ha->memoryMap.region[OB].phys_addr_lo;
+			pm8001_ha->memoryMap.region[OB + i].phys_addr_lo;
 		pm8001_ha->outbnd_q_tbl[i].base_virt		=
-			(u8 *)pm8001_ha->memoryMap.region[OB].virt_ptr;
+			(u8 *)pm8001_ha->memoryMap.region[OB + i].virt_ptr;
 		pm8001_ha->outbnd_q_tbl[i].total_length		=
-			pm8001_ha->memoryMap.region[OB].total_len;
+			pm8001_ha->memoryMap.region[OB + i].total_len;
 		pm8001_ha->outbnd_q_tbl[i].pi_upper_base_addr	=
-			pm8001_ha->memoryMap.region[PI].phys_addr_hi;
+			pm8001_ha->memoryMap.region[PI + i].phys_addr_hi;
 		pm8001_ha->outbnd_q_tbl[i].pi_lower_base_addr	=
-			pm8001_ha->memoryMap.region[PI].phys_addr_lo;
+			pm8001_ha->memoryMap.region[PI + i].phys_addr_lo;
 		pm8001_ha->outbnd_q_tbl[i].interrup_vec_cnt_delay	=
-			0 | (10 << 16) | (0 << 24);
+			0 | (10 << 16) | (i << 24);
 		pm8001_ha->outbnd_q_tbl[i].pi_virt		=
-			pm8001_ha->memoryMap.region[PI].virt_ptr;
+			pm8001_ha->memoryMap.region[PI + i].virt_ptr;
 		offsetob = i * 0x24;
 		pm8001_ha->outbnd_q_tbl[i].ci_pci_bar		=
 			get_pci_bar_index(pm8001_mr32(addressob,
@@ -250,42 +283,51 @@
 {
 	void __iomem *address = pm8001_ha->main_cfg_tbl_addr;
 	pm8001_mw32(address, 0x24,
-		pm8001_ha->main_cfg_tbl.inbound_q_nppd_hppd);
+		pm8001_ha->main_cfg_tbl.pm8001_tbl.inbound_q_nppd_hppd);
 	pm8001_mw32(address, 0x28,
-		pm8001_ha->main_cfg_tbl.outbound_hw_event_pid0_3);
+		pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_hw_event_pid0_3);
 	pm8001_mw32(address, 0x2C,
-		pm8001_ha->main_cfg_tbl.outbound_hw_event_pid4_7);
+		pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_hw_event_pid4_7);
 	pm8001_mw32(address, 0x30,
-		pm8001_ha->main_cfg_tbl.outbound_ncq_event_pid0_3);
+		pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_ncq_event_pid0_3);
 	pm8001_mw32(address, 0x34,
-		pm8001_ha->main_cfg_tbl.outbound_ncq_event_pid4_7);
+		pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_ncq_event_pid4_7);
 	pm8001_mw32(address, 0x38,
-		pm8001_ha->main_cfg_tbl.outbound_tgt_ITNexus_event_pid0_3);
+		pm8001_ha->main_cfg_tbl.pm8001_tbl.
+					outbound_tgt_ITNexus_event_pid0_3);
 	pm8001_mw32(address, 0x3C,
-		pm8001_ha->main_cfg_tbl.outbound_tgt_ITNexus_event_pid4_7);
+		pm8001_ha->main_cfg_tbl.pm8001_tbl.
+					outbound_tgt_ITNexus_event_pid4_7);
 	pm8001_mw32(address, 0x40,
-		pm8001_ha->main_cfg_tbl.outbound_tgt_ssp_event_pid0_3);
+		pm8001_ha->main_cfg_tbl.pm8001_tbl.
+					outbound_tgt_ssp_event_pid0_3);
 	pm8001_mw32(address, 0x44,
-		pm8001_ha->main_cfg_tbl.outbound_tgt_ssp_event_pid4_7);
+		pm8001_ha->main_cfg_tbl.pm8001_tbl.
+					outbound_tgt_ssp_event_pid4_7);
 	pm8001_mw32(address, 0x48,
-		pm8001_ha->main_cfg_tbl.outbound_tgt_smp_event_pid0_3);
+		pm8001_ha->main_cfg_tbl.pm8001_tbl.
+					outbound_tgt_smp_event_pid0_3);
 	pm8001_mw32(address, 0x4C,
-		pm8001_ha->main_cfg_tbl.outbound_tgt_smp_event_pid4_7);
+		pm8001_ha->main_cfg_tbl.pm8001_tbl.
+					outbound_tgt_smp_event_pid4_7);
 	pm8001_mw32(address, 0x50,
-		pm8001_ha->main_cfg_tbl.upper_event_log_addr);
+		pm8001_ha->main_cfg_tbl.pm8001_tbl.upper_event_log_addr);
 	pm8001_mw32(address, 0x54,
-		pm8001_ha->main_cfg_tbl.lower_event_log_addr);
-	pm8001_mw32(address, 0x58, pm8001_ha->main_cfg_tbl.event_log_size);
-	pm8001_mw32(address, 0x5C, pm8001_ha->main_cfg_tbl.event_log_option);
+		pm8001_ha->main_cfg_tbl.pm8001_tbl.lower_event_log_addr);
+	pm8001_mw32(address, 0x58,
+		pm8001_ha->main_cfg_tbl.pm8001_tbl.event_log_size);
+	pm8001_mw32(address, 0x5C,
+		pm8001_ha->main_cfg_tbl.pm8001_tbl.event_log_option);
 	pm8001_mw32(address, 0x60,
-		pm8001_ha->main_cfg_tbl.upper_iop_event_log_addr);
+		pm8001_ha->main_cfg_tbl.pm8001_tbl.upper_iop_event_log_addr);
 	pm8001_mw32(address, 0x64,
-		pm8001_ha->main_cfg_tbl.lower_iop_event_log_addr);
-	pm8001_mw32(address, 0x68, pm8001_ha->main_cfg_tbl.iop_event_log_size);
+		pm8001_ha->main_cfg_tbl.pm8001_tbl.lower_iop_event_log_addr);
+	pm8001_mw32(address, 0x68,
+		pm8001_ha->main_cfg_tbl.pm8001_tbl.iop_event_log_size);
 	pm8001_mw32(address, 0x6C,
-		pm8001_ha->main_cfg_tbl.iop_event_log_option);
+		pm8001_ha->main_cfg_tbl.pm8001_tbl.iop_event_log_option);
 	pm8001_mw32(address, 0x70,
-		pm8001_ha->main_cfg_tbl.fatal_err_interrupt);
+		pm8001_ha->main_cfg_tbl.pm8001_tbl.fatal_err_interrupt);
 }
 
 /**
@@ -597,6 +639,19 @@
  */
 static int pm8001_chip_init(struct pm8001_hba_info *pm8001_ha)
 {
+	u8 i = 0;
+	u16 deviceid;
+	pci_read_config_word(pm8001_ha->pdev, PCI_DEVICE_ID, &deviceid);
+	/* 8081 controllers need BAR shift to access MPI space
+	* as this is shared with BIOS data */
+	if (deviceid == 0x8081) {
+		if (-1 == pm8001_bar4_shift(pm8001_ha, GSM_SM_BASE)) {
+			PM8001_FAIL_DBG(pm8001_ha,
+				pm8001_printk("Shift Bar4 to 0x%x failed\n",
+					GSM_SM_BASE));
+			return -1;
+		}
+	}
 	/* check the firmware status */
 	if (-1 == check_fw_ready(pm8001_ha)) {
 		PM8001_FAIL_DBG(pm8001_ha,
@@ -613,11 +668,16 @@
 	read_outbnd_queue_table(pm8001_ha);
 	/* update main config table ,inbound table and outbound table */
 	update_main_config_table(pm8001_ha);
-	update_inbnd_queue_table(pm8001_ha, 0);
-	update_outbnd_queue_table(pm8001_ha, 0);
-	mpi_set_phys_g3_with_ssc(pm8001_ha, 0);
-	/* 7->130ms, 34->500ms, 119->1.5s */
-	mpi_set_open_retry_interval_reg(pm8001_ha, 119);
+	for (i = 0; i < PM8001_MAX_INB_NUM; i++)
+		update_inbnd_queue_table(pm8001_ha, i);
+	for (i = 0; i < PM8001_MAX_OUTB_NUM; i++)
+		update_outbnd_queue_table(pm8001_ha, i);
+	/* 8081 controller donot require these operations */
+	if (deviceid != 0x8081) {
+		mpi_set_phys_g3_with_ssc(pm8001_ha, 0);
+		/* 7->130ms, 34->500ms, 119->1.5s */
+		mpi_set_open_retry_interval_reg(pm8001_ha, 119);
+	}
 	/* notify firmware update finished and check initialization status */
 	if (0 == mpi_init_check(pm8001_ha)) {
 		PM8001_INIT_DBG(pm8001_ha,
@@ -639,6 +699,16 @@
 	u32 max_wait_count;
 	u32 value;
 	u32 gst_len_mpistate;
+	u16 deviceid;
+	pci_read_config_word(pm8001_ha->pdev, PCI_DEVICE_ID, &deviceid);
+	if (deviceid == 0x8081) {
+		if (-1 == pm8001_bar4_shift(pm8001_ha, GSM_SM_BASE)) {
+			PM8001_FAIL_DBG(pm8001_ha,
+				pm8001_printk("Shift Bar4 to 0x%x failed\n",
+					GSM_SM_BASE));
+			return -1;
+		}
+	}
 	init_pci_device_addresses(pm8001_ha);
 	/* Write bit1=1 to Inbound DoorBell Register to tell the SPC FW the
 	table is stop */
@@ -740,14 +810,14 @@
  * pm8001_chip_soft_rst - soft reset the PM8001 chip, so that the clear all
  * the FW register status to the originated status.
  * @pm8001_ha: our hba card information
- * @signature: signature in host scratch pad0 register.
  */
 static int
-pm8001_chip_soft_rst(struct pm8001_hba_info *pm8001_ha, u32 signature)
+pm8001_chip_soft_rst(struct pm8001_hba_info *pm8001_ha)
 {
 	u32	regVal, toggleVal;
 	u32	max_wait_count;
 	u32	regVal1, regVal2, regVal3;
+	u32	signature = 0x252acbcd; /* for host scratch pad0 */
 	unsigned long flags;
 
 	/* step1: Check FW is ready for soft reset */
@@ -1113,7 +1183,7 @@
  * pm8001_chip_iounmap - which maped when initialized.
  * @pm8001_ha: our hba card information
  */
-static void pm8001_chip_iounmap(struct pm8001_hba_info *pm8001_ha)
+void pm8001_chip_iounmap(struct pm8001_hba_info *pm8001_ha)
 {
 	s8 bar, logical = 0;
 	for (bar = 0; bar < 6; bar++) {
@@ -1192,7 +1262,7 @@
  * @pm8001_ha: our hba card information
  */
 static void
-pm8001_chip_interrupt_enable(struct pm8001_hba_info *pm8001_ha)
+pm8001_chip_interrupt_enable(struct pm8001_hba_info *pm8001_ha, u8 vec)
 {
 #ifdef PM8001_USE_MSIX
 	pm8001_chip_msix_interrupt_enable(pm8001_ha, 0);
@@ -1207,7 +1277,7 @@
  * @pm8001_ha: our hba card information
  */
 static void
-pm8001_chip_interrupt_disable(struct pm8001_hba_info *pm8001_ha)
+pm8001_chip_interrupt_disable(struct pm8001_hba_info *pm8001_ha, u8 vec)
 {
 #ifdef PM8001_USE_MSIX
 	pm8001_chip_msix_interrupt_disable(pm8001_ha, 0);
@@ -1218,12 +1288,13 @@
 }
 
 /**
- * mpi_msg_free_get- get the free message buffer for transfer inbound queue.
+ * pm8001_mpi_msg_free_get - get the free message buffer for transfer
+ * inbound queue.
  * @circularQ: the inbound queue  we want to transfer to HBA.
  * @messageSize: the message size of this transfer, normally it is 64 bytes
  * @messagePtr: the pointer to message.
  */
-static int mpi_msg_free_get(struct inbound_queue_table *circularQ,
+int pm8001_mpi_msg_free_get(struct inbound_queue_table *circularQ,
 			    u16 messageSize, void **messagePtr)
 {
 	u32 offset, consumer_index;
@@ -1231,7 +1302,7 @@
 	u8 bcCount = 1; /* only support single buffer */
 
 	/* Checks is the requested message size can be allocated in this queue*/
-	if (messageSize > 64) {
+	if (messageSize > IOMB_SIZE_SPCV) {
 		*messagePtr = NULL;
 		return -1;
 	}
@@ -1245,7 +1316,7 @@
 		return -1;
 	}
 	/* get memory IOMB buffer address */
-	offset = circularQ->producer_idx * 64;
+	offset = circularQ->producer_idx * messageSize;
 	/* increment to next bcCount element */
 	circularQ->producer_idx = (circularQ->producer_idx + bcCount)
 				% PM8001_MPI_QUEUE;
@@ -1257,29 +1328,30 @@
 }
 
 /**
- * mpi_build_cmd- build the message queue for transfer, update the PI to FW
- * to tell the fw to get this message from IOMB.
+ * pm8001_mpi_build_cmd- build the message queue for transfer, update the PI to
+ * FW to tell the fw to get this message from IOMB.
  * @pm8001_ha: our hba card information
  * @circularQ: the inbound queue we want to transfer to HBA.
  * @opCode: the operation code represents commands which LLDD and fw recognized.
  * @payload: the command payload of each operation command.
  */
-static int mpi_build_cmd(struct pm8001_hba_info *pm8001_ha,
+int pm8001_mpi_build_cmd(struct pm8001_hba_info *pm8001_ha,
 			 struct inbound_queue_table *circularQ,
-			 u32 opCode, void *payload)
+			 u32 opCode, void *payload, u32 responseQueue)
 {
 	u32 Header = 0, hpriority = 0, bc = 1, category = 0x02;
-	u32 responseQueue = 0;
 	void *pMessage;
 
-	if (mpi_msg_free_get(circularQ, 64, &pMessage) < 0) {
+	if (pm8001_mpi_msg_free_get(circularQ, pm8001_ha->iomb_size,
+		&pMessage) < 0) {
 		PM8001_IO_DBG(pm8001_ha,
 			pm8001_printk("No free mpi buffer\n"));
 		return -1;
 	}
 	BUG_ON(!payload);
 	/*Copy to the payload*/
-	memcpy(pMessage, payload, (64 - sizeof(struct mpi_msg_hdr)));
+	memcpy(pMessage, payload, (pm8001_ha->iomb_size -
+				sizeof(struct mpi_msg_hdr)));
 
 	/*Build the header*/
 	Header = ((1 << 31) | (hpriority << 30) | ((bc & 0x1f) << 24)
@@ -1291,12 +1363,13 @@
 	pm8001_cw32(pm8001_ha, circularQ->pi_pci_bar,
 		circularQ->pi_offset, circularQ->producer_idx);
 	PM8001_IO_DBG(pm8001_ha,
-		pm8001_printk("after PI= %d CI= %d\n", circularQ->producer_idx,
-		circularQ->consumer_index));
+		pm8001_printk("INB Q %x OPCODE:%x , UPDATED PI=%d CI=%d\n",
+			responseQueue, opCode, circularQ->producer_idx,
+			circularQ->consumer_index));
 	return 0;
 }
 
-static u32 mpi_msg_free_set(struct pm8001_hba_info *pm8001_ha, void *pMsg,
+u32 pm8001_mpi_msg_free_set(struct pm8001_hba_info *pm8001_ha, void *pMsg,
 			    struct outbound_queue_table *circularQ, u8 bc)
 {
 	u32 producer_index;
@@ -1305,7 +1378,7 @@
 
 	msgHeader = (struct mpi_msg_hdr *)(pMsg - sizeof(struct mpi_msg_hdr));
 	pOutBoundMsgHeader = (struct mpi_msg_hdr *)(circularQ->base_virt +
-				circularQ->consumer_idx * 64);
+				circularQ->consumer_idx * pm8001_ha->iomb_size);
 	if (pOutBoundMsgHeader != msgHeader) {
 		PM8001_FAIL_DBG(pm8001_ha,
 			pm8001_printk("consumer_idx = %d msgHeader = %p\n",
@@ -1336,13 +1409,14 @@
 }
 
 /**
- * mpi_msg_consume- get the MPI message from  outbound queue message table.
+ * pm8001_mpi_msg_consume- get the MPI message from outbound queue
+ * message table.
  * @pm8001_ha: our hba card information
  * @circularQ: the outbound queue  table.
  * @messagePtr1: the message contents of this outbound message.
  * @pBC: the message size.
  */
-static u32 mpi_msg_consume(struct pm8001_hba_info *pm8001_ha,
+u32 pm8001_mpi_msg_consume(struct pm8001_hba_info *pm8001_ha,
 			   struct outbound_queue_table *circularQ,
 			   void **messagePtr1, u8 *pBC)
 {
@@ -1356,7 +1430,7 @@
 			/*Get the pointer to the circular queue buffer element*/
 			msgHeader = (struct mpi_msg_hdr *)
 				(circularQ->base_virt +
-				circularQ->consumer_idx * 64);
+				circularQ->consumer_idx * pm8001_ha->iomb_size);
 			/* read header */
 			header_tmp = pm8001_read_32(msgHeader);
 			msgHeader_tmp = cpu_to_le32(header_tmp);
@@ -1416,7 +1490,7 @@
 	return MPI_IO_STATUS_BUSY;
 }
 
-static void pm8001_work_fn(struct work_struct *work)
+void pm8001_work_fn(struct work_struct *work)
 {
 	struct pm8001_work *pw = container_of(work, struct pm8001_work, work);
 	struct pm8001_device *pm8001_dev;
@@ -1431,7 +1505,7 @@
 	pm8001_dev = pw->data; /* Most stash device structure */
 	if ((pm8001_dev == NULL)
 	 || ((pw->handler != IO_XFER_ERROR_BREAK)
-	  && (pm8001_dev->dev_type == NO_DEVICE))) {
+	  && (pm8001_dev->dev_type == SAS_PHY_UNUSED))) {
 		kfree(pw);
 		return;
 	}
@@ -1596,7 +1670,7 @@
 	}	break;
 	case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS:
 		dev = pm8001_dev->sas_device;
-		pm8001_I_T_nexus_reset(dev);
+		pm8001_I_T_nexus_event_handler(dev);
 		break;
 	case IO_OPEN_CNX_ERROR_STP_RESOURCES_BUSY:
 		dev = pm8001_dev->sas_device;
@@ -1614,7 +1688,7 @@
 	kfree(pw);
 }
 
-static int pm8001_handle_event(struct pm8001_hba_info *pm8001_ha, void *data,
+int pm8001_handle_event(struct pm8001_hba_info *pm8001_ha, void *data,
 			       int handler)
 {
 	struct pm8001_work *pw;
@@ -1633,6 +1707,123 @@
 	return ret;
 }
 
+static void pm8001_send_abort_all(struct pm8001_hba_info *pm8001_ha,
+		struct pm8001_device *pm8001_ha_dev)
+{
+	int res;
+	u32 ccb_tag;
+	struct pm8001_ccb_info *ccb;
+	struct sas_task *task = NULL;
+	struct task_abort_req task_abort;
+	struct inbound_queue_table *circularQ;
+	u32 opc = OPC_INB_SATA_ABORT;
+	int ret;
+
+	if (!pm8001_ha_dev) {
+		PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("dev is null\n"));
+		return;
+	}
+
+	task = sas_alloc_slow_task(GFP_ATOMIC);
+
+	if (!task) {
+		PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("cannot "
+						"allocate task\n"));
+		return;
+	}
+
+	task->task_done = pm8001_task_done;
+
+	res = pm8001_tag_alloc(pm8001_ha, &ccb_tag);
+	if (res)
+		return;
+
+	ccb = &pm8001_ha->ccb_info[ccb_tag];
+	ccb->device = pm8001_ha_dev;
+	ccb->ccb_tag = ccb_tag;
+	ccb->task = task;
+
+	circularQ = &pm8001_ha->inbnd_q_tbl[0];
+
+	memset(&task_abort, 0, sizeof(task_abort));
+	task_abort.abort_all = cpu_to_le32(1);
+	task_abort.device_id = cpu_to_le32(pm8001_ha_dev->device_id);
+	task_abort.tag = cpu_to_le32(ccb_tag);
+
+	ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &task_abort, 0);
+
+}
+
+static void pm8001_send_read_log(struct pm8001_hba_info *pm8001_ha,
+		struct pm8001_device *pm8001_ha_dev)
+{
+	struct sata_start_req sata_cmd;
+	int res;
+	u32 ccb_tag;
+	struct pm8001_ccb_info *ccb;
+	struct sas_task *task = NULL;
+	struct host_to_dev_fis fis;
+	struct domain_device *dev;
+	struct inbound_queue_table *circularQ;
+	u32 opc = OPC_INB_SATA_HOST_OPSTART;
+
+	task = sas_alloc_slow_task(GFP_ATOMIC);
+
+	if (!task) {
+		PM8001_FAIL_DBG(pm8001_ha,
+			pm8001_printk("cannot allocate task !!!\n"));
+		return;
+	}
+	task->task_done = pm8001_task_done;
+
+	res = pm8001_tag_alloc(pm8001_ha, &ccb_tag);
+	if (res) {
+		PM8001_FAIL_DBG(pm8001_ha,
+			pm8001_printk("cannot allocate tag !!!\n"));
+		return;
+	}
+
+	/* allocate domain device by ourselves as libsas
+	 * is not going to provide any
+	*/
+	dev = kzalloc(sizeof(struct domain_device), GFP_ATOMIC);
+	if (!dev) {
+		PM8001_FAIL_DBG(pm8001_ha,
+			pm8001_printk("Domain device cannot be allocated\n"));
+		sas_free_task(task);
+		return;
+	} else {
+		task->dev = dev;
+		task->dev->lldd_dev = pm8001_ha_dev;
+	}
+
+	ccb = &pm8001_ha->ccb_info[ccb_tag];
+	ccb->device = pm8001_ha_dev;
+	ccb->ccb_tag = ccb_tag;
+	ccb->task = task;
+	pm8001_ha_dev->id |= NCQ_READ_LOG_FLAG;
+	pm8001_ha_dev->id |= NCQ_2ND_RLE_FLAG;
+
+	memset(&sata_cmd, 0, sizeof(sata_cmd));
+	circularQ = &pm8001_ha->inbnd_q_tbl[0];
+
+	/* construct read log FIS */
+	memset(&fis, 0, sizeof(struct host_to_dev_fis));
+	fis.fis_type = 0x27;
+	fis.flags = 0x80;
+	fis.command = ATA_CMD_READ_LOG_EXT;
+	fis.lbal = 0x10;
+	fis.sector_count = 0x1;
+
+	sata_cmd.tag = cpu_to_le32(ccb_tag);
+	sata_cmd.device_id = cpu_to_le32(pm8001_ha_dev->device_id);
+	sata_cmd.ncqtag_atap_dir_m |= ((0x1 << 7) | (0x5 << 9));
+	memcpy(&sata_cmd.sata_fis, &fis, sizeof(struct host_to_dev_fis));
+
+	res = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &sata_cmd, 0);
+
+}
+
 /**
  * mpi_ssp_completion- process the event that FW response to the SSP request.
  * @pm8001_ha: our hba card information
@@ -1867,7 +2058,7 @@
 		break;
 	}
 	PM8001_IO_DBG(pm8001_ha,
-		pm8001_printk("scsi_status = %x \n ",
+		pm8001_printk("scsi_status = %x\n ",
 		psspPayload->ssp_resp_iu.status));
 	spin_lock_irqsave(&t->task_state_lock, flags);
 	t->task_state_flags &= ~SAS_TASK_STATE_PENDING;
@@ -2096,16 +2287,44 @@
 	status = le32_to_cpu(psataPayload->status);
 	tag = le32_to_cpu(psataPayload->tag);
 
+	if (!tag) {
+		PM8001_FAIL_DBG(pm8001_ha,
+			pm8001_printk("tag null\n"));
+		return;
+	}
 	ccb = &pm8001_ha->ccb_info[tag];
 	param = le32_to_cpu(psataPayload->param);
-	t = ccb->task;
-	ts = &t->task_status;
-	pm8001_dev = ccb->device;
-	if (status)
+	if (ccb) {
+		t = ccb->task;
+		pm8001_dev = ccb->device;
+	} else {
 		PM8001_FAIL_DBG(pm8001_ha,
-			pm8001_printk("sata IO status 0x%x\n", status));
-	if (unlikely(!t || !t->lldd_task || !t->dev))
+			pm8001_printk("ccb null\n"));
 		return;
+	}
+
+	if (t) {
+		if (t->dev && (t->dev->lldd_dev))
+			pm8001_dev = t->dev->lldd_dev;
+	} else {
+		PM8001_FAIL_DBG(pm8001_ha,
+			pm8001_printk("task null\n"));
+		return;
+	}
+
+	if ((pm8001_dev && !(pm8001_dev->id & NCQ_READ_LOG_FLAG))
+		&& unlikely(!t || !t->lldd_task || !t->dev)) {
+		PM8001_FAIL_DBG(pm8001_ha,
+			pm8001_printk("task or dev null\n"));
+		return;
+	}
+
+	ts = &t->task_status;
+	if (!ts) {
+		PM8001_FAIL_DBG(pm8001_ha,
+			pm8001_printk("ts null\n"));
+		return;
+	}
 
 	switch (status) {
 	case IO_SUCCESS:
@@ -2113,6 +2332,19 @@
 		if (param == 0) {
 			ts->resp = SAS_TASK_COMPLETE;
 			ts->stat = SAM_STAT_GOOD;
+			/* check if response is for SEND READ LOG */
+			if (pm8001_dev &&
+				(pm8001_dev->id & NCQ_READ_LOG_FLAG)) {
+				/* set new bit for abort_all */
+				pm8001_dev->id |= NCQ_ABORT_ALL_FLAG;
+				/* clear bit for read log */
+				pm8001_dev->id = pm8001_dev->id & 0x7FFFFFFF;
+				pm8001_send_abort_all(pm8001_ha, pm8001_dev);
+				/* Free the tag */
+				pm8001_tag_free(pm8001_ha, tag);
+				sas_free_task(t);
+				return;
+			}
 		} else {
 			u8 len;
 			ts->resp = SAS_TASK_COMPLETE;
@@ -2424,6 +2656,29 @@
 	unsigned long flags;
 
 	ccb = &pm8001_ha->ccb_info[tag];
+
+	if (ccb) {
+		t = ccb->task;
+		pm8001_dev = ccb->device;
+	} else {
+		PM8001_FAIL_DBG(pm8001_ha,
+			pm8001_printk("No CCB !!!. returning\n"));
+	}
+	if (event)
+		PM8001_FAIL_DBG(pm8001_ha,
+			pm8001_printk("SATA EVENT 0x%x\n", event));
+
+	/* Check if this is NCQ error */
+	if (event == IO_XFER_ERROR_ABORTED_NCQ_MODE) {
+		/* find device using device id */
+		pm8001_dev = pm8001_find_dev(pm8001_ha, dev_id);
+		/* send read log extension */
+		if (pm8001_dev)
+			pm8001_send_read_log(pm8001_ha, pm8001_dev);
+		return;
+	}
+
+	ccb = &pm8001_ha->ccb_info[tag];
 	t = ccb->task;
 	pm8001_dev = ccb->device;
 	if (event)
@@ -2432,9 +2687,9 @@
 	if (unlikely(!t || !t->lldd_task || !t->dev))
 		return;
 	ts = &t->task_status;
-	PM8001_IO_DBG(pm8001_ha,
-		pm8001_printk("port_id = %x,device_id = %x\n",
-		port_id, dev_id));
+	PM8001_IO_DBG(pm8001_ha, pm8001_printk(
+		"port_id:0x%x, device_id:0x%x, tag:0x%x, event:0x%x\n",
+		port_id, dev_id, tag, event));
 	switch (event) {
 	case IO_OVERFLOW:
 		PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_UNDERFLOW\n"));
@@ -2822,8 +3077,8 @@
 	}
 }
 
-static void
-mpi_set_dev_state_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
+void pm8001_mpi_set_dev_state_resp(struct pm8001_hba_info *pm8001_ha,
+		void *piomb)
 {
 	struct set_dev_state_resp *pPayload =
 		(struct set_dev_state_resp *)(piomb + 4);
@@ -2843,8 +3098,7 @@
 	pm8001_ccb_free(pm8001_ha, tag);
 }
 
-static void
-mpi_set_nvmd_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
+void pm8001_mpi_set_nvmd_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
 {
 	struct get_nvm_data_resp *pPayload =
 		(struct get_nvm_data_resp *)(piomb + 4);
@@ -2863,8 +3117,8 @@
 	pm8001_ccb_free(pm8001_ha, tag);
 }
 
-static void
-mpi_get_nvmd_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
+void
+pm8001_mpi_get_nvmd_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
 {
 	struct fw_control_ex	*fw_control_context;
 	struct get_nvm_data_resp *pPayload =
@@ -2925,7 +3179,7 @@
 	pm8001_ccb_free(pm8001_ha, tag);
 }
 
-static int mpi_local_phy_ctl(struct pm8001_hba_info *pm8001_ha, void *piomb)
+int pm8001_mpi_local_phy_ctl(struct pm8001_hba_info *pm8001_ha, void *piomb)
 {
 	struct local_phy_ctl_resp *pPayload =
 		(struct local_phy_ctl_resp *)(piomb + 4);
@@ -2954,7 +3208,7 @@
  * while receive a broadcast(change) primitive just tell the sas
  * layer to discover the changed domain rather than the whole domain.
  */
-static void pm8001_bytes_dmaed(struct pm8001_hba_info *pm8001_ha, int i)
+void pm8001_bytes_dmaed(struct pm8001_hba_info *pm8001_ha, int i)
 {
 	struct pm8001_phy *phy = &pm8001_ha->phy[i];
 	struct asd_sas_phy *sas_phy = &phy->sas_phy;
@@ -2988,7 +3242,7 @@
 }
 
 /* Get the link rate speed  */
-static void get_lrate_mode(struct pm8001_phy *phy, u8 link_rate)
+void pm8001_get_lrate_mode(struct pm8001_phy *phy, u8 link_rate)
 {
 	struct sas_phy *sas_phy = phy->sas_phy.phy;
 
@@ -3025,7 +3279,7 @@
  * LOCKING: the frame_rcvd_lock needs to be held since this parses the frame
  * buffer.
  */
-static void pm8001_get_attached_sas_addr(struct pm8001_phy *phy,
+void pm8001_get_attached_sas_addr(struct pm8001_phy *phy,
 	u8 *sas_addr)
 {
 	if (phy->sas_phy.frame_rcvd[0] == 0x34
@@ -3067,7 +3321,7 @@
 		((phyId & 0x0F) << 4) | (port_id & 0x0F));
 	payload.param0 = cpu_to_le32(param0);
 	payload.param1 = cpu_to_le32(param1);
-	mpi_build_cmd(pm8001_ha, circularQ, opc, &payload);
+	pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0);
 }
 
 static int pm8001_chip_phy_ctl_req(struct pm8001_hba_info *pm8001_ha,
@@ -3112,19 +3366,19 @@
 		pm8001_chip_phy_ctl_req(pm8001_ha, phy_id,
 			PHY_NOTIFY_ENABLE_SPINUP);
 		port->port_attached = 1;
-		get_lrate_mode(phy, link_rate);
+		pm8001_get_lrate_mode(phy, link_rate);
 		break;
 	case SAS_EDGE_EXPANDER_DEVICE:
 		PM8001_MSG_DBG(pm8001_ha,
 			pm8001_printk("expander device.\n"));
 		port->port_attached = 1;
-		get_lrate_mode(phy, link_rate);
+		pm8001_get_lrate_mode(phy, link_rate);
 		break;
 	case SAS_FANOUT_EXPANDER_DEVICE:
 		PM8001_MSG_DBG(pm8001_ha,
 			pm8001_printk("fanout expander device.\n"));
 		port->port_attached = 1;
-		get_lrate_mode(phy, link_rate);
+		pm8001_get_lrate_mode(phy, link_rate);
 		break;
 	default:
 		PM8001_MSG_DBG(pm8001_ha,
@@ -3179,7 +3433,7 @@
 		" phy id = %d\n", port_id, phy_id));
 	port->port_state =  portstate;
 	port->port_attached = 1;
-	get_lrate_mode(phy, link_rate);
+	pm8001_get_lrate_mode(phy, link_rate);
 	phy->phy_type |= PORT_TYPE_SATA;
 	phy->phy_attached = 1;
 	phy->sas_phy.oob_mode = SATA_OOB_MODE;
@@ -3189,7 +3443,7 @@
 		sizeof(struct dev_to_host_fis));
 	phy->frame_rcvd_size = sizeof(struct dev_to_host_fis);
 	phy->identify.target_port_protocols = SAS_PROTOCOL_SATA;
-	phy->identify.device_type = SATA_DEV;
+	phy->identify.device_type = SAS_SATA_DEV;
 	pm8001_get_attached_sas_addr(phy, phy->sas_phy.attached_sas_addr);
 	spin_unlock_irqrestore(&phy->sas_phy.frame_rcvd_lock, flags);
 	pm8001_bytes_dmaed(pm8001_ha, phy_id);
@@ -3260,7 +3514,7 @@
 }
 
 /**
- * mpi_reg_resp -process register device ID response.
+ * pm8001_mpi_reg_resp -process register device ID response.
  * @pm8001_ha: our hba card information
  * @piomb: IO message buffer
  *
@@ -3269,7 +3523,7 @@
  * has assigned, from now,inter-communication with FW is no longer using the
  * SAS address, use device ID which FW assigned.
  */
-static int mpi_reg_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
+int pm8001_mpi_reg_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
 {
 	u32 status;
 	u32 device_id;
@@ -3331,7 +3585,7 @@
 	return 0;
 }
 
-static int mpi_dereg_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
+int pm8001_mpi_dereg_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
 {
 	u32 status;
 	u32 device_id;
@@ -3347,8 +3601,13 @@
 	return 0;
 }
 
-static int
-mpi_fw_flash_update_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
+/**
+ * fw_flash_update_resp - Response from FW for flash update command.
+ * @pm8001_ha: our hba card information
+ * @piomb: IO message buffer
+ */
+int pm8001_mpi_fw_flash_update_resp(struct pm8001_hba_info *pm8001_ha,
+		void *piomb)
 {
 	u32 status;
 	struct fw_control_ex	fw_control_context;
@@ -3403,10 +3662,6 @@
 		break;
 	}
 	ccb->fw_control_context->fw_control->retcode = status;
-	pci_free_consistent(pm8001_ha->pdev,
-			fw_control_context.len,
-			fw_control_context.virtAddr,
-			fw_control_context.phys_addr);
 	complete(pm8001_ha->nvmd_completion);
 	ccb->task = NULL;
 	ccb->ccb_tag = 0xFFFFFFFF;
@@ -3414,8 +3669,7 @@
 	return 0;
 }
 
-static int
-mpi_general_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
+int pm8001_mpi_general_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
 {
 	u32 status;
 	int i;
@@ -3431,8 +3685,7 @@
 	return 0;
 }
 
-static int
-mpi_task_abort_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
+int pm8001_mpi_task_abort_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
 {
 	struct sas_task *t;
 	struct pm8001_ccb_info *ccb;
@@ -3440,19 +3693,29 @@
 	u32 status ;
 	u32 tag, scp;
 	struct task_status_struct *ts;
+	struct pm8001_device *pm8001_dev;
 
 	struct task_abort_resp *pPayload =
 		(struct task_abort_resp *)(piomb + 4);
 
 	status = le32_to_cpu(pPayload->status);
 	tag = le32_to_cpu(pPayload->tag);
+	if (!tag) {
+		PM8001_FAIL_DBG(pm8001_ha,
+			pm8001_printk(" TAG NULL. RETURNING !!!"));
+		return -1;
+	}
+
 	scp = le32_to_cpu(pPayload->scp);
 	ccb = &pm8001_ha->ccb_info[tag];
 	t = ccb->task;
-	PM8001_IO_DBG(pm8001_ha,
-		pm8001_printk(" status = 0x%x\n", status));
-	if (t == NULL)
+	pm8001_dev = ccb->device; /* retrieve device */
+
+	if (!t)	{
+		PM8001_FAIL_DBG(pm8001_ha,
+			pm8001_printk(" TASK NULL. RETURNING !!!"));
 		return -1;
+	}
 	ts = &t->task_status;
 	if (status != 0)
 		PM8001_FAIL_DBG(pm8001_ha,
@@ -3476,7 +3739,15 @@
 	spin_unlock_irqrestore(&t->task_state_lock, flags);
 	pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
 	mb();
-	t->task_done(t);
+
+	if ((pm8001_dev->id & NCQ_ABORT_ALL_FLAG) && t)	{
+		pm8001_tag_free(pm8001_ha, tag);
+		sas_free_task(t);
+		/* clear the flag */
+		pm8001_dev->id &= 0xBFFFFFFF;
+	} else
+		t->task_done(t);
+
 	return 0;
 }
 
@@ -3727,17 +3998,17 @@
 	case OPC_OUB_LOCAL_PHY_CNTRL:
 		PM8001_MSG_DBG(pm8001_ha,
 			pm8001_printk("OPC_OUB_LOCAL_PHY_CNTRL\n"));
-		mpi_local_phy_ctl(pm8001_ha, piomb);
+		pm8001_mpi_local_phy_ctl(pm8001_ha, piomb);
 		break;
 	case OPC_OUB_DEV_REGIST:
 		PM8001_MSG_DBG(pm8001_ha,
 			pm8001_printk("OPC_OUB_DEV_REGIST\n"));
-		mpi_reg_resp(pm8001_ha, piomb);
+		pm8001_mpi_reg_resp(pm8001_ha, piomb);
 		break;
 	case OPC_OUB_DEREG_DEV:
 		PM8001_MSG_DBG(pm8001_ha,
 			pm8001_printk("unregister the device\n"));
-		mpi_dereg_resp(pm8001_ha, piomb);
+		pm8001_mpi_dereg_resp(pm8001_ha, piomb);
 		break;
 	case OPC_OUB_GET_DEV_HANDLE:
 		PM8001_MSG_DBG(pm8001_ha,
@@ -3775,7 +4046,7 @@
 	case OPC_OUB_FW_FLASH_UPDATE:
 		PM8001_MSG_DBG(pm8001_ha,
 			pm8001_printk("OPC_OUB_FW_FLASH_UPDATE\n"));
-		mpi_fw_flash_update_resp(pm8001_ha, piomb);
+		pm8001_mpi_fw_flash_update_resp(pm8001_ha, piomb);
 		break;
 	case OPC_OUB_GPIO_RESPONSE:
 		PM8001_MSG_DBG(pm8001_ha,
@@ -3788,17 +4059,17 @@
 	case OPC_OUB_GENERAL_EVENT:
 		PM8001_MSG_DBG(pm8001_ha,
 			pm8001_printk("OPC_OUB_GENERAL_EVENT\n"));
-		mpi_general_event(pm8001_ha, piomb);
+		pm8001_mpi_general_event(pm8001_ha, piomb);
 		break;
 	case OPC_OUB_SSP_ABORT_RSP:
 		PM8001_MSG_DBG(pm8001_ha,
 			pm8001_printk("OPC_OUB_SSP_ABORT_RSP\n"));
-		mpi_task_abort_resp(pm8001_ha, piomb);
+		pm8001_mpi_task_abort_resp(pm8001_ha, piomb);
 		break;
 	case OPC_OUB_SATA_ABORT_RSP:
 		PM8001_MSG_DBG(pm8001_ha,
 			pm8001_printk("OPC_OUB_SATA_ABORT_RSP\n"));
-		mpi_task_abort_resp(pm8001_ha, piomb);
+		pm8001_mpi_task_abort_resp(pm8001_ha, piomb);
 		break;
 	case OPC_OUB_SAS_DIAG_MODE_START_END:
 		PM8001_MSG_DBG(pm8001_ha,
@@ -3823,17 +4094,17 @@
 	case OPC_OUB_SMP_ABORT_RSP:
 		PM8001_MSG_DBG(pm8001_ha,
 			pm8001_printk("OPC_OUB_SMP_ABORT_RSP\n"));
-		mpi_task_abort_resp(pm8001_ha, piomb);
+		pm8001_mpi_task_abort_resp(pm8001_ha, piomb);
 		break;
 	case OPC_OUB_GET_NVMD_DATA:
 		PM8001_MSG_DBG(pm8001_ha,
 			pm8001_printk("OPC_OUB_GET_NVMD_DATA\n"));
-		mpi_get_nvmd_resp(pm8001_ha, piomb);
+		pm8001_mpi_get_nvmd_resp(pm8001_ha, piomb);
 		break;
 	case OPC_OUB_SET_NVMD_DATA:
 		PM8001_MSG_DBG(pm8001_ha,
 			pm8001_printk("OPC_OUB_SET_NVMD_DATA\n"));
-		mpi_set_nvmd_resp(pm8001_ha, piomb);
+		pm8001_mpi_set_nvmd_resp(pm8001_ha, piomb);
 		break;
 	case OPC_OUB_DEVICE_HANDLE_REMOVAL:
 		PM8001_MSG_DBG(pm8001_ha,
@@ -3842,7 +4113,7 @@
 	case OPC_OUB_SET_DEVICE_STATE:
 		PM8001_MSG_DBG(pm8001_ha,
 			pm8001_printk("OPC_OUB_SET_DEVICE_STATE\n"));
-		mpi_set_dev_state_resp(pm8001_ha, piomb);
+		pm8001_mpi_set_dev_state_resp(pm8001_ha, piomb);
 		break;
 	case OPC_OUB_GET_DEVICE_STATE:
 		PM8001_MSG_DBG(pm8001_ha,
@@ -3864,7 +4135,7 @@
 	}
 }
 
-static int process_oq(struct pm8001_hba_info *pm8001_ha)
+static int process_oq(struct pm8001_hba_info *pm8001_ha, u8 vec)
 {
 	struct outbound_queue_table *circularQ;
 	void *pMsg1 = NULL;
@@ -3873,14 +4144,15 @@
 	unsigned long flags;
 
 	spin_lock_irqsave(&pm8001_ha->lock, flags);
-	circularQ = &pm8001_ha->outbnd_q_tbl[0];
+	circularQ = &pm8001_ha->outbnd_q_tbl[vec];
 	do {
-		ret = mpi_msg_consume(pm8001_ha, circularQ, &pMsg1, &bc);
+		ret = pm8001_mpi_msg_consume(pm8001_ha, circularQ, &pMsg1, &bc);
 		if (MPI_IO_STATUS_SUCCESS == ret) {
 			/* process the outbound message */
 			process_one_iomb(pm8001_ha, (void *)(pMsg1 - 4));
 			/* free the message from the outbound circular buffer */
-			mpi_msg_free_set(pm8001_ha, pMsg1, circularQ, bc);
+			pm8001_mpi_msg_free_set(pm8001_ha, pMsg1,
+							circularQ, bc);
 		}
 		if (MPI_IO_STATUS_BUSY == ret) {
 			/* Update the producer index from SPC */
@@ -3903,7 +4175,7 @@
 	[PCI_DMA_FROMDEVICE]	= DATA_DIR_IN,/* INBOUND */
 	[PCI_DMA_NONE]		= DATA_DIR_NONE,/* NO TRANSFER */
 };
-static void
+void
 pm8001_chip_make_sg(struct scatterlist *scatter, int nr, void *prd)
 {
 	int i;
@@ -3978,7 +4250,7 @@
 	smp_cmd.long_smp_req.long_resp_size =
 		cpu_to_le32((u32)sg_dma_len(&task->smp_task.smp_resp)-4);
 	build_smp_cmd(pm8001_dev->device_id, smp_cmd.tag, &smp_cmd);
-	mpi_build_cmd(pm8001_ha, circularQ, opc, (u32 *)&smp_cmd);
+	pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, (u32 *)&smp_cmd, 0);
 	return 0;
 
 err_out_2:
@@ -4042,7 +4314,7 @@
 		ssp_cmd.len = cpu_to_le32(task->total_xfer_len);
 		ssp_cmd.esgl = 0;
 	}
-	ret = mpi_build_cmd(pm8001_ha, circularQ, opc, &ssp_cmd);
+	ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &ssp_cmd, 0);
 	return ret;
 }
 
@@ -4060,6 +4332,7 @@
 	u32 ATAP = 0x0;
 	u32 dir;
 	struct inbound_queue_table *circularQ;
+	unsigned long flags;
 	u32  opc = OPC_INB_SATA_HOST_OPSTART;
 	memset(&sata_cmd, 0, sizeof(sata_cmd));
 	circularQ = &pm8001_ha->inbnd_q_tbl[0];
@@ -4080,8 +4353,10 @@
 			PM8001_IO_DBG(pm8001_ha, pm8001_printk("FPDMA\n"));
 		}
 	}
-	if (task->ata_task.use_ncq && pm8001_get_ncq_tag(task, &hdr_tag))
+	if (task->ata_task.use_ncq && pm8001_get_ncq_tag(task, &hdr_tag)) {
+		task->ata_task.fis.sector_count |= (u8) (hdr_tag << 3);
 		ncg_tag = hdr_tag;
+	}
 	dir = data_dir_flags[task->data_dir] << 8;
 	sata_cmd.tag = cpu_to_le32(tag);
 	sata_cmd.device_id = cpu_to_le32(pm8001_ha_dev->device_id);
@@ -4112,7 +4387,55 @@
 		sata_cmd.len = cpu_to_le32(task->total_xfer_len);
 		sata_cmd.esgl = 0;
 	}
-	ret = mpi_build_cmd(pm8001_ha, circularQ, opc, &sata_cmd);
+
+	/* Check for read log for failed drive and return */
+	if (sata_cmd.sata_fis.command == 0x2f) {
+		if (pm8001_ha_dev && ((pm8001_ha_dev->id & NCQ_READ_LOG_FLAG) ||
+			(pm8001_ha_dev->id & NCQ_ABORT_ALL_FLAG) ||
+			(pm8001_ha_dev->id & NCQ_2ND_RLE_FLAG))) {
+			struct task_status_struct *ts;
+
+			pm8001_ha_dev->id &= 0xDFFFFFFF;
+			ts = &task->task_status;
+
+			spin_lock_irqsave(&task->task_state_lock, flags);
+			ts->resp = SAS_TASK_COMPLETE;
+			ts->stat = SAM_STAT_GOOD;
+			task->task_state_flags &= ~SAS_TASK_STATE_PENDING;
+			task->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
+			task->task_state_flags |= SAS_TASK_STATE_DONE;
+			if (unlikely((task->task_state_flags &
+					SAS_TASK_STATE_ABORTED))) {
+				spin_unlock_irqrestore(&task->task_state_lock,
+							flags);
+				PM8001_FAIL_DBG(pm8001_ha,
+					pm8001_printk("task 0x%p resp 0x%x "
+					" stat 0x%x but aborted by upper layer "
+					"\n", task, ts->resp, ts->stat));
+				pm8001_ccb_task_free(pm8001_ha, task, ccb, tag);
+			} else if (task->uldd_task) {
+				spin_unlock_irqrestore(&task->task_state_lock,
+							flags);
+				pm8001_ccb_task_free(pm8001_ha, task, ccb, tag);
+				mb();/* ditto */
+				spin_unlock_irq(&pm8001_ha->lock);
+				task->task_done(task);
+				spin_lock_irq(&pm8001_ha->lock);
+				return 0;
+			} else if (!task->uldd_task) {
+				spin_unlock_irqrestore(&task->task_state_lock,
+							flags);
+				pm8001_ccb_task_free(pm8001_ha, task, ccb, tag);
+				mb();/*ditto*/
+				spin_unlock_irq(&pm8001_ha->lock);
+				task->task_done(task);
+				spin_lock_irq(&pm8001_ha->lock);
+				return 0;
+			}
+		}
+	}
+
+	ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &sata_cmd, 0);
 	return ret;
 }
 
@@ -4142,12 +4465,12 @@
 	payload.ase_sh_lm_slr_phyid = cpu_to_le32(SPINHOLD_DISABLE |
 		LINKMODE_AUTO |	LINKRATE_15 |
 		LINKRATE_30 | LINKRATE_60 | phy_id);
-	payload.sas_identify.dev_type = SAS_END_DEV;
+	payload.sas_identify.dev_type = SAS_END_DEVICE;
 	payload.sas_identify.initiator_bits = SAS_PROTOCOL_ALL;
 	memcpy(payload.sas_identify.sas_addr,
 		pm8001_ha->sas_addr, SAS_ADDR_SIZE);
 	payload.sas_identify.phy_id = phy_id;
-	ret = mpi_build_cmd(pm8001_ha, circularQ, opcode, &payload);
+	ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opcode, &payload, 0);
 	return ret;
 }
 
@@ -4157,7 +4480,7 @@
  * @num: the inbound queue number
  * @phy_id: the phy id which we wanted to start up.
  */
-static int pm8001_chip_phy_stop_req(struct pm8001_hba_info *pm8001_ha,
+int pm8001_chip_phy_stop_req(struct pm8001_hba_info *pm8001_ha,
 	u8 phy_id)
 {
 	struct phy_stop_req payload;
@@ -4169,12 +4492,12 @@
 	memset(&payload, 0, sizeof(payload));
 	payload.tag = cpu_to_le32(tag);
 	payload.phy_id = cpu_to_le32(phy_id);
-	ret = mpi_build_cmd(pm8001_ha, circularQ, opcode, &payload);
+	ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opcode, &payload, 0);
 	return ret;
 }
 
 /**
- * see comments on mpi_reg_resp.
+ * see comments on pm8001_mpi_reg_resp.
  */
 static int pm8001_chip_reg_dev_req(struct pm8001_hba_info *pm8001_ha,
 	struct pm8001_device *pm8001_dev, u32 flag)
@@ -4204,11 +4527,11 @@
 	if (flag == 1)
 		stp_sspsmp_sata = 0x02; /*direct attached sata */
 	else {
-		if (pm8001_dev->dev_type == SATA_DEV)
+		if (pm8001_dev->dev_type == SAS_SATA_DEV)
 			stp_sspsmp_sata = 0x00; /* stp*/
-		else if (pm8001_dev->dev_type == SAS_END_DEV ||
-			pm8001_dev->dev_type == EDGE_DEV ||
-			pm8001_dev->dev_type == FANOUT_DEV)
+		else if (pm8001_dev->dev_type == SAS_END_DEVICE ||
+			pm8001_dev->dev_type == SAS_EDGE_EXPANDER_DEVICE ||
+			pm8001_dev->dev_type == SAS_FANOUT_EXPANDER_DEVICE)
 			stp_sspsmp_sata = 0x01; /*ssp or smp*/
 	}
 	if (parent_dev && DEV_IS_EXPANDER(parent_dev->dev_type))
@@ -4228,14 +4551,14 @@
 		cpu_to_le32(ITNT | (firstBurstSize * 0x10000));
 	memcpy(payload.sas_addr, pm8001_dev->sas_device->sas_addr,
 		SAS_ADDR_SIZE);
-	rc = mpi_build_cmd(pm8001_ha, circularQ, opc, &payload);
+	rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0);
 	return rc;
 }
 
 /**
- * see comments on mpi_reg_resp.
+ * see comments on pm8001_mpi_reg_resp.
  */
-static int pm8001_chip_dereg_dev_req(struct pm8001_hba_info *pm8001_ha,
+int pm8001_chip_dereg_dev_req(struct pm8001_hba_info *pm8001_ha,
 	u32 device_id)
 {
 	struct dereg_dev_req payload;
@@ -4249,7 +4572,7 @@
 	payload.device_id = cpu_to_le32(device_id);
 	PM8001_MSG_DBG(pm8001_ha,
 		pm8001_printk("unregister device device_id = %d\n", device_id));
-	ret = mpi_build_cmd(pm8001_ha, circularQ, opc, &payload);
+	ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0);
 	return ret;
 }
 
@@ -4272,7 +4595,7 @@
 	payload.tag = cpu_to_le32(1);
 	payload.phyop_phyid =
 		cpu_to_le32(((phy_op & 0xff) << 8) | (phyId & 0x0F));
-	ret = mpi_build_cmd(pm8001_ha, circularQ, opc, &payload);
+	ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0);
 	return ret;
 }
 
@@ -4296,11 +4619,11 @@
  * @stat: stat.
  */
 static irqreturn_t
-pm8001_chip_isr(struct pm8001_hba_info *pm8001_ha)
+pm8001_chip_isr(struct pm8001_hba_info *pm8001_ha, u8 vec)
 {
-	pm8001_chip_interrupt_disable(pm8001_ha);
-	process_oq(pm8001_ha);
-	pm8001_chip_interrupt_enable(pm8001_ha);
+	pm8001_chip_interrupt_disable(pm8001_ha, vec);
+	process_oq(pm8001_ha, vec);
+	pm8001_chip_interrupt_enable(pm8001_ha, vec);
 	return IRQ_HANDLED;
 }
 
@@ -4322,7 +4645,7 @@
 		task_abort.device_id = cpu_to_le32(dev_id);
 		task_abort.tag = cpu_to_le32(cmd_tag);
 	}
-	ret = mpi_build_cmd(pm8001_ha, circularQ, opc, &task_abort);
+	ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &task_abort, 0);
 	return ret;
 }
 
@@ -4331,16 +4654,17 @@
  * @task: the task we wanted to aborted.
  * @flag: the abort flag.
  */
-static int pm8001_chip_abort_task(struct pm8001_hba_info *pm8001_ha,
+int pm8001_chip_abort_task(struct pm8001_hba_info *pm8001_ha,
 	struct pm8001_device *pm8001_dev, u8 flag, u32 task_tag, u32 cmd_tag)
 {
 	u32 opc, device_id;
 	int rc = TMF_RESP_FUNC_FAILED;
-	PM8001_EH_DBG(pm8001_ha, pm8001_printk("cmd_tag = %x, abort task tag"
-		" = %x", cmd_tag, task_tag));
-	if (pm8001_dev->dev_type == SAS_END_DEV)
+	PM8001_EH_DBG(pm8001_ha,
+		pm8001_printk("cmd_tag = %x, abort task tag = 0x%x",
+			cmd_tag, task_tag));
+	if (pm8001_dev->dev_type == SAS_END_DEVICE)
 		opc = OPC_INB_SSP_ABORT;
-	else if (pm8001_dev->dev_type == SATA_DEV)
+	else if (pm8001_dev->dev_type == SAS_SATA_DEV)
 		opc = OPC_INB_SATA_ABORT;
 	else
 		opc = OPC_INB_SMP_ABORT;/* SMP */
@@ -4358,7 +4682,7 @@
  * @ccb: the ccb information.
  * @tmf: task management function.
  */
-static int pm8001_chip_ssp_tm_req(struct pm8001_hba_info *pm8001_ha,
+int pm8001_chip_ssp_tm_req(struct pm8001_hba_info *pm8001_ha,
 	struct pm8001_ccb_info *ccb, struct pm8001_tmf_task *tmf)
 {
 	struct sas_task *task = ccb->task;
@@ -4376,11 +4700,11 @@
 	memcpy(sspTMCmd.lun, task->ssp_task.LUN, 8);
 	sspTMCmd.tag = cpu_to_le32(ccb->ccb_tag);
 	circularQ = &pm8001_ha->inbnd_q_tbl[0];
-	ret = mpi_build_cmd(pm8001_ha, circularQ, opc, &sspTMCmd);
+	ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &sspTMCmd, 0);
 	return ret;
 }
 
-static int pm8001_chip_get_nvmd_req(struct pm8001_hba_info *pm8001_ha,
+int pm8001_chip_get_nvmd_req(struct pm8001_hba_info *pm8001_ha,
 	void *payload)
 {
 	u32 opc = OPC_INB_GET_NVMD_DATA;
@@ -4397,7 +4721,7 @@
 	fw_control_context = kzalloc(sizeof(struct fw_control_ex), GFP_KERNEL);
 	if (!fw_control_context)
 		return -ENOMEM;
-	fw_control_context->usrAddr = (u8 *)&ioctl_payload->func_specific[0];
+	fw_control_context->usrAddr = (u8 *)ioctl_payload->func_specific;
 	fw_control_context->len = ioctl_payload->length;
 	circularQ = &pm8001_ha->inbnd_q_tbl[0];
 	memset(&nvmd_req, 0, sizeof(nvmd_req));
@@ -4456,11 +4780,11 @@
 	default:
 		break;
 	}
-	rc = mpi_build_cmd(pm8001_ha, circularQ, opc, &nvmd_req);
+	rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &nvmd_req, 0);
 	return rc;
 }
 
-static int pm8001_chip_set_nvmd_req(struct pm8001_hba_info *pm8001_ha,
+int pm8001_chip_set_nvmd_req(struct pm8001_hba_info *pm8001_ha,
 	void *payload)
 {
 	u32 opc = OPC_INB_SET_NVMD_DATA;
@@ -4479,7 +4803,7 @@
 		return -ENOMEM;
 	circularQ = &pm8001_ha->inbnd_q_tbl[0];
 	memcpy(pm8001_ha->memoryMap.region[NVMD].virt_ptr,
-		ioctl_payload->func_specific,
+		&ioctl_payload->func_specific,
 		ioctl_payload->length);
 	memset(&nvmd_req, 0, sizeof(nvmd_req));
 	rc = pm8001_tag_alloc(pm8001_ha, &tag);
@@ -4536,7 +4860,7 @@
 	default:
 		break;
 	}
-	rc = mpi_build_cmd(pm8001_ha, circularQ, opc, &nvmd_req);
+	rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &nvmd_req, 0);
 	return rc;
 }
 
@@ -4545,7 +4869,7 @@
  * @pm8001_ha: our hba card information.
  * @fw_flash_updata_info: firmware flash update param
  */
-static int
+int
 pm8001_chip_fw_flash_update_build(struct pm8001_hba_info *pm8001_ha,
 	void *fw_flash_updata_info, u32 tag)
 {
@@ -4567,11 +4891,11 @@
 		cpu_to_le32(lower_32_bits(le64_to_cpu(info->sgl.addr)));
 	payload.sgl_addr_hi =
 		cpu_to_le32(upper_32_bits(le64_to_cpu(info->sgl.addr)));
-	ret = mpi_build_cmd(pm8001_ha, circularQ, opc, &payload);
+	ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0);
 	return ret;
 }
 
-static int
+int
 pm8001_chip_fw_flash_update_req(struct pm8001_hba_info *pm8001_ha,
 	void *payload)
 {
@@ -4581,29 +4905,14 @@
 	int rc;
 	u32 tag;
 	struct pm8001_ccb_info *ccb;
-	void *buffer = NULL;
-	dma_addr_t phys_addr;
-	u32 phys_addr_hi;
-	u32 phys_addr_lo;
+	void *buffer = pm8001_ha->memoryMap.region[FW_FLASH].virt_ptr;
+	dma_addr_t phys_addr = pm8001_ha->memoryMap.region[FW_FLASH].phys_addr;
 	struct pm8001_ioctl_payload *ioctl_payload = payload;
 
 	fw_control_context = kzalloc(sizeof(struct fw_control_ex), GFP_KERNEL);
 	if (!fw_control_context)
 		return -ENOMEM;
-	fw_control = (struct fw_control_info *)&ioctl_payload->func_specific[0];
-	if (fw_control->len != 0) {
-		if (pm8001_mem_alloc(pm8001_ha->pdev,
-			(void **)&buffer,
-			&phys_addr,
-			&phys_addr_hi,
-			&phys_addr_lo,
-			fw_control->len, 0) != 0) {
-				PM8001_FAIL_DBG(pm8001_ha,
-					pm8001_printk("Mem alloc failure\n"));
-				kfree(fw_control_context);
-				return -ENOMEM;
-		}
-	}
+	fw_control = (struct fw_control_info *)&ioctl_payload->func_specific;
 	memcpy(buffer, fw_control->buffer, fw_control->len);
 	flash_update_info.sgl.addr = cpu_to_le64(phys_addr);
 	flash_update_info.sgl.im_len.len = cpu_to_le32(fw_control->len);
@@ -4613,6 +4922,7 @@
 	flash_update_info.total_image_len = fw_control->size;
 	fw_control_context->fw_control = fw_control;
 	fw_control_context->virtAddr = buffer;
+	fw_control_context->phys_addr = phys_addr;
 	fw_control_context->len = fw_control->len;
 	rc = pm8001_tag_alloc(pm8001_ha, &tag);
 	if (rc) {
@@ -4627,7 +4937,7 @@
 	return rc;
 }
 
-static int
+int
 pm8001_chip_set_dev_state_req(struct pm8001_hba_info *pm8001_ha,
 	struct pm8001_device *pm8001_dev, u32 state)
 {
@@ -4648,7 +4958,7 @@
 	payload.tag = cpu_to_le32(tag);
 	payload.device_id = cpu_to_le32(pm8001_dev->device_id);
 	payload.nds = cpu_to_le32(state);
-	rc = mpi_build_cmd(pm8001_ha, circularQ, opc, &payload);
+	rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0);
 	return rc;
 
 }
@@ -4673,7 +4983,7 @@
 	payload.SSAHOLT = cpu_to_le32(0xd << 25);
 	payload.sata_hol_tmo = cpu_to_le32(80);
 	payload.open_reject_cmdretries_data_retries = cpu_to_le32(0xff00ff);
-	rc = mpi_build_cmd(pm8001_ha, circularQ, opc, &payload);
+	rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0);
 	return rc;
 
 }
@@ -4706,4 +5016,3 @@
 	.set_dev_state_req	= pm8001_chip_set_dev_state_req,
 	.sas_re_init_req	= pm8001_chip_sas_re_initialization,
 };
-
diff --git a/drivers/scsi/pm8001/pm8001_hwi.h b/drivers/scsi/pm8001/pm8001_hwi.h
index d437309..d7c1e20 100644
--- a/drivers/scsi/pm8001/pm8001_hwi.h
+++ b/drivers/scsi/pm8001/pm8001_hwi.h
@@ -131,6 +131,8 @@
 #define LINKRATE_30			(0x02 << 8)
 #define LINKRATE_60			(0x04 << 8)
 
+/* for new SPC controllers MEMBASE III is shared between BIOS and DATA */
+#define GSM_SM_BASE			0x4F0000
 struct mpi_msg_hdr{
 	__le32	header;	/* Bits [11:0]  - Message operation code */
 	/* Bits [15:12] - Message Category */
@@ -298,7 +300,7 @@
 
 
 #define OP_BITS 0x0000FF00
-#define ID_BITS 0x0000000F
+#define ID_BITS 0x000000FF
 
 /*
  * brief the data structure of PORT Control Command
diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c
index 3d5e522..e4b9bc7 100644
--- a/drivers/scsi/pm8001/pm8001_init.c
+++ b/drivers/scsi/pm8001/pm8001_init.c
@@ -1,5 +1,5 @@
 /*
- * PMC-Sierra SPC 8001 SAS/SATA based host adapters driver
+ * PMC-Sierra PM8001/8081/8088/8089 SAS/SATA based host adapters driver
  *
  * Copyright (c) 2008-2009 USI Co., Ltd.
  * All rights reserved.
@@ -44,8 +44,16 @@
 
 static struct scsi_transport_template *pm8001_stt;
 
+/**
+ * chip info structure to identify chip key functionality as
+ * encryption available/not, no of ports, hw specific function ref
+ */
 static const struct pm8001_chip_info pm8001_chips[] = {
-	[chip_8001] = {  8, &pm8001_8001_dispatch,},
+	[chip_8001] = {0,  8, &pm8001_8001_dispatch,},
+	[chip_8008] = {0,  8, &pm8001_80xx_dispatch,},
+	[chip_8009] = {1,  8, &pm8001_80xx_dispatch,},
+	[chip_8018] = {0,  16, &pm8001_80xx_dispatch,},
+	[chip_8019] = {1,  16, &pm8001_80xx_dispatch,},
 };
 static int pm8001_id;
 
@@ -155,37 +163,75 @@
 }
 
 #ifdef PM8001_USE_TASKLET
+
+/**
+ * tasklet for 64 msi-x interrupt handler
+ * @opaque: the passed general host adapter struct
+ * Note: pm8001_tasklet is common for pm8001 & pm80xx
+ */
 static void pm8001_tasklet(unsigned long opaque)
 {
 	struct pm8001_hba_info *pm8001_ha;
+	u32 vec;
 	pm8001_ha = (struct pm8001_hba_info *)opaque;
 	if (unlikely(!pm8001_ha))
 		BUG_ON(1);
-	PM8001_CHIP_DISP->isr(pm8001_ha);
+	vec = pm8001_ha->int_vector;
+	PM8001_CHIP_DISP->isr(pm8001_ha, vec);
 }
 #endif
 
+static struct  pm8001_hba_info *outq_to_hba(u8 *outq)
+{
+	return container_of((outq - *outq), struct pm8001_hba_info, outq[0]);
+}
 
- /**
-  * pm8001_interrupt - when HBA originate a interrupt,we should invoke this
-  * dispatcher to handle each case.
-  * @irq: irq number.
-  * @opaque: the passed general host adapter struct
-  */
-static irqreturn_t pm8001_interrupt(int irq, void *opaque)
+/**
+ * pm8001_interrupt_handler_msix - main MSIX interrupt handler.
+ * It obtains the vector number and calls the equivalent bottom
+ * half or services directly.
+ * @opaque: the passed outbound queue/vector. Host structure is
+ * retrieved from the same.
+ */
+static irqreturn_t pm8001_interrupt_handler_msix(int irq, void *opaque)
+{
+	struct pm8001_hba_info *pm8001_ha = outq_to_hba(opaque);
+	u8 outq = *(u8 *)opaque;
+	irqreturn_t ret = IRQ_HANDLED;
+	if (unlikely(!pm8001_ha))
+		return IRQ_NONE;
+	if (!PM8001_CHIP_DISP->is_our_interupt(pm8001_ha))
+		return IRQ_NONE;
+	pm8001_ha->int_vector = outq;
+#ifdef PM8001_USE_TASKLET
+	tasklet_schedule(&pm8001_ha->tasklet);
+#else
+	ret = PM8001_CHIP_DISP->isr(pm8001_ha, outq);
+#endif
+	return ret;
+}
+
+/**
+ * pm8001_interrupt_handler_intx - main INTx interrupt handler.
+ * @dev_id: sas_ha structure. The HBA is retrieved from sas_has structure.
+ */
+
+static irqreturn_t pm8001_interrupt_handler_intx(int irq, void *dev_id)
 {
 	struct pm8001_hba_info *pm8001_ha;
 	irqreturn_t ret = IRQ_HANDLED;
-	struct sas_ha_struct *sha = opaque;
+	struct sas_ha_struct *sha = dev_id;
 	pm8001_ha = sha->lldd_ha;
 	if (unlikely(!pm8001_ha))
 		return IRQ_NONE;
 	if (!PM8001_CHIP_DISP->is_our_interupt(pm8001_ha))
 		return IRQ_NONE;
+
+	pm8001_ha->int_vector = 0;
 #ifdef PM8001_USE_TASKLET
 	tasklet_schedule(&pm8001_ha->tasklet);
 #else
-	ret = PM8001_CHIP_DISP->isr(pm8001_ha);
+	ret = PM8001_CHIP_DISP->isr(pm8001_ha, 0);
 #endif
 	return ret;
 }
@@ -195,10 +241,14 @@
  * @pm8001_ha:our hba structure.
  *
  */
-static int pm8001_alloc(struct pm8001_hba_info *pm8001_ha)
+static int pm8001_alloc(struct pm8001_hba_info *pm8001_ha,
+			const struct pci_device_id *ent)
 {
 	int i;
 	spin_lock_init(&pm8001_ha->lock);
+	PM8001_INIT_DBG(pm8001_ha,
+		pm8001_printk("pm8001_alloc: PHY:%x\n",
+				pm8001_ha->chip->n_phy));
 	for (i = 0; i < pm8001_ha->chip->n_phy; i++) {
 		pm8001_phy_init(pm8001_ha, i);
 		pm8001_ha->port[i].wide_port_phymap = 0;
@@ -222,30 +272,57 @@
 	pm8001_ha->memoryMap.region[IOP].total_len = PM8001_EVENT_LOG_SIZE;
 	pm8001_ha->memoryMap.region[IOP].alignment = 32;
 
-	/* MPI Memory region 3 for consumer Index of inbound queues */
-	pm8001_ha->memoryMap.region[CI].num_elements = 1;
-	pm8001_ha->memoryMap.region[CI].element_size = 4;
-	pm8001_ha->memoryMap.region[CI].total_len = 4;
-	pm8001_ha->memoryMap.region[CI].alignment = 4;
+	for (i = 0; i < PM8001_MAX_SPCV_INB_NUM; i++) {
+		/* MPI Memory region 3 for consumer Index of inbound queues */
+		pm8001_ha->memoryMap.region[CI+i].num_elements = 1;
+		pm8001_ha->memoryMap.region[CI+i].element_size = 4;
+		pm8001_ha->memoryMap.region[CI+i].total_len = 4;
+		pm8001_ha->memoryMap.region[CI+i].alignment = 4;
 
-	/* MPI Memory region 4 for producer Index of outbound queues */
-	pm8001_ha->memoryMap.region[PI].num_elements = 1;
-	pm8001_ha->memoryMap.region[PI].element_size = 4;
-	pm8001_ha->memoryMap.region[PI].total_len = 4;
-	pm8001_ha->memoryMap.region[PI].alignment = 4;
+		if ((ent->driver_data) != chip_8001) {
+			/* MPI Memory region 5 inbound queues */
+			pm8001_ha->memoryMap.region[IB+i].num_elements =
+						PM8001_MPI_QUEUE;
+			pm8001_ha->memoryMap.region[IB+i].element_size = 128;
+			pm8001_ha->memoryMap.region[IB+i].total_len =
+						PM8001_MPI_QUEUE * 128;
+			pm8001_ha->memoryMap.region[IB+i].alignment = 128;
+		} else {
+			pm8001_ha->memoryMap.region[IB+i].num_elements =
+						PM8001_MPI_QUEUE;
+			pm8001_ha->memoryMap.region[IB+i].element_size = 64;
+			pm8001_ha->memoryMap.region[IB+i].total_len =
+						PM8001_MPI_QUEUE * 64;
+			pm8001_ha->memoryMap.region[IB+i].alignment = 64;
+		}
+	}
 
-	/* MPI Memory region 5 inbound queues */
-	pm8001_ha->memoryMap.region[IB].num_elements = PM8001_MPI_QUEUE;
-	pm8001_ha->memoryMap.region[IB].element_size = 64;
-	pm8001_ha->memoryMap.region[IB].total_len = PM8001_MPI_QUEUE * 64;
-	pm8001_ha->memoryMap.region[IB].alignment = 64;
+	for (i = 0; i < PM8001_MAX_SPCV_OUTB_NUM; i++) {
+		/* MPI Memory region 4 for producer Index of outbound queues */
+		pm8001_ha->memoryMap.region[PI+i].num_elements = 1;
+		pm8001_ha->memoryMap.region[PI+i].element_size = 4;
+		pm8001_ha->memoryMap.region[PI+i].total_len = 4;
+		pm8001_ha->memoryMap.region[PI+i].alignment = 4;
 
-	/* MPI Memory region 6 outbound queues */
-	pm8001_ha->memoryMap.region[OB].num_elements = PM8001_MPI_QUEUE;
-	pm8001_ha->memoryMap.region[OB].element_size = 64;
-	pm8001_ha->memoryMap.region[OB].total_len = PM8001_MPI_QUEUE * 64;
-	pm8001_ha->memoryMap.region[OB].alignment = 64;
+		if (ent->driver_data != chip_8001) {
+			/* MPI Memory region 6 Outbound queues */
+			pm8001_ha->memoryMap.region[OB+i].num_elements =
+						PM8001_MPI_QUEUE;
+			pm8001_ha->memoryMap.region[OB+i].element_size = 128;
+			pm8001_ha->memoryMap.region[OB+i].total_len =
+						PM8001_MPI_QUEUE * 128;
+			pm8001_ha->memoryMap.region[OB+i].alignment = 128;
+		} else {
+			/* MPI Memory region 6 Outbound queues */
+			pm8001_ha->memoryMap.region[OB+i].num_elements =
+						PM8001_MPI_QUEUE;
+			pm8001_ha->memoryMap.region[OB+i].element_size = 64;
+			pm8001_ha->memoryMap.region[OB+i].total_len =
+						PM8001_MPI_QUEUE * 64;
+			pm8001_ha->memoryMap.region[OB+i].alignment = 64;
+		}
 
+	}
 	/* Memory region write DMA*/
 	pm8001_ha->memoryMap.region[NVMD].num_elements = 1;
 	pm8001_ha->memoryMap.region[NVMD].element_size = 4096;
@@ -264,6 +341,9 @@
 	pm8001_ha->memoryMap.region[CCB_MEM].total_len = PM8001_MAX_CCB *
 		sizeof(struct pm8001_ccb_info);
 
+	/* Memory region for fw flash */
+	pm8001_ha->memoryMap.region[FW_FLASH].total_len = 4096;
+
 	for (i = 0; i < USI_MAX_MEMCNT; i++) {
 		if (pm8001_mem_alloc(pm8001_ha->pdev,
 			&pm8001_ha->memoryMap.region[i].virt_ptr,
@@ -281,7 +361,7 @@
 
 	pm8001_ha->devices = pm8001_ha->memoryMap.region[DEV_MEM].virt_ptr;
 	for (i = 0; i < PM8001_MAX_DEVICES; i++) {
-		pm8001_ha->devices[i].dev_type = NO_DEVICE;
+		pm8001_ha->devices[i].dev_type = SAS_PHY_UNUSED;
 		pm8001_ha->devices[i].id = i;
 		pm8001_ha->devices[i].device_id = PM8001_MAX_DEVICES;
 		pm8001_ha->devices[i].running_req = 0;
@@ -339,10 +419,12 @@
 				ioremap(pm8001_ha->io_mem[logicalBar].membase,
 				pm8001_ha->io_mem[logicalBar].memsize);
 			PM8001_INIT_DBG(pm8001_ha,
-				pm8001_printk("PCI: bar %d, logicalBar %d "
-				"virt_addr=%lx,len=%d\n", bar, logicalBar,
-				(unsigned long)
-				pm8001_ha->io_mem[logicalBar].memvirtaddr,
+				pm8001_printk("PCI: bar %d, logicalBar %d ",
+				bar, logicalBar));
+			PM8001_INIT_DBG(pm8001_ha, pm8001_printk(
+				"base addr %llx virt_addr=%llx len=%d\n",
+				(u64)pm8001_ha->io_mem[logicalBar].membase,
+				(u64)pm8001_ha->io_mem[logicalBar].memvirtaddr,
 				pm8001_ha->io_mem[logicalBar].memsize));
 		} else {
 			pm8001_ha->io_mem[logicalBar].membase	= 0;
@@ -361,8 +443,9 @@
  * @shost: scsi host struct which has been initialized before.
  */
 static struct pm8001_hba_info *pm8001_pci_alloc(struct pci_dev *pdev,
-						u32 chip_id,
-						struct Scsi_Host *shost)
+				 const struct pci_device_id *ent,
+				struct Scsi_Host *shost)
+
 {
 	struct pm8001_hba_info *pm8001_ha;
 	struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
@@ -374,7 +457,7 @@
 
 	pm8001_ha->pdev = pdev;
 	pm8001_ha->dev = &pdev->dev;
-	pm8001_ha->chip_id = chip_id;
+	pm8001_ha->chip_id = ent->driver_data;
 	pm8001_ha->chip = &pm8001_chips[pm8001_ha->chip_id];
 	pm8001_ha->irq = pdev->irq;
 	pm8001_ha->sas = sha;
@@ -382,12 +465,22 @@
 	pm8001_ha->id = pm8001_id++;
 	pm8001_ha->logging_level = 0x01;
 	sprintf(pm8001_ha->name, "%s%d", DRV_NAME, pm8001_ha->id);
+	/* IOMB size is 128 for 8088/89 controllers */
+	if (pm8001_ha->chip_id != chip_8001)
+		pm8001_ha->iomb_size = IOMB_SIZE_SPCV;
+	else
+		pm8001_ha->iomb_size = IOMB_SIZE_SPC;
+
 #ifdef PM8001_USE_TASKLET
+	/**
+	* default tasklet for non msi-x interrupt handler/first msi-x
+	* interrupt handler
+	**/
 	tasklet_init(&pm8001_ha->tasklet, pm8001_tasklet,
-		(unsigned long)pm8001_ha);
+			(unsigned long)pm8001_ha);
 #endif
 	pm8001_ioremap(pm8001_ha);
-	if (!pm8001_alloc(pm8001_ha))
+	if (!pm8001_alloc(pm8001_ha, ent))
 		return pm8001_ha;
 	pm8001_free(pm8001_ha);
 	return NULL;
@@ -512,21 +605,50 @@
  */
 static void pm8001_init_sas_add(struct pm8001_hba_info *pm8001_ha)
 {
-	u8 i;
+	u8 i, j;
 #ifdef PM8001_READ_VPD
+	/* For new SPC controllers WWN is stored in flash vpd
+	*  For SPC/SPCve controllers WWN is stored in EEPROM
+	*  For Older SPC WWN is stored in NVMD
+	*/
 	DECLARE_COMPLETION_ONSTACK(completion);
 	struct pm8001_ioctl_payload payload;
+	u16 deviceid;
+	pci_read_config_word(pm8001_ha->pdev, PCI_DEVICE_ID, &deviceid);
 	pm8001_ha->nvmd_completion = &completion;
-	payload.minor_function = 0;
-	payload.length = 128;
-	payload.func_specific = kzalloc(128, GFP_KERNEL);
+
+	if (pm8001_ha->chip_id == chip_8001) {
+		if (deviceid == 0x8081) {
+			payload.minor_function = 4;
+			payload.length = 4096;
+		} else {
+			payload.minor_function = 0;
+			payload.length = 128;
+		}
+	} else {
+		payload.minor_function = 1;
+		payload.length = 4096;
+	}
+	payload.offset = 0;
+	payload.func_specific = kzalloc(payload.length, GFP_KERNEL);
 	PM8001_CHIP_DISP->get_nvmd_req(pm8001_ha, &payload);
 	wait_for_completion(&completion);
+
+	for (i = 0, j = 0; i <= 7; i++, j++) {
+		if (pm8001_ha->chip_id == chip_8001) {
+			if (deviceid == 0x8081)
+				pm8001_ha->sas_addr[j] =
+					payload.func_specific[0x704 + i];
+		} else
+			pm8001_ha->sas_addr[j] =
+					payload.func_specific[0x804 + i];
+	}
+
 	for (i = 0; i < pm8001_ha->chip->n_phy; i++) {
-		memcpy(&pm8001_ha->phy[i].dev_sas_addr, pm8001_ha->sas_addr,
-			SAS_ADDR_SIZE);
+		memcpy(&pm8001_ha->phy[i].dev_sas_addr,
+			pm8001_ha->sas_addr, SAS_ADDR_SIZE);
 		PM8001_INIT_DBG(pm8001_ha,
-			pm8001_printk("phy %d sas_addr = %016llx \n", i,
+			pm8001_printk("phy %d sas_addr = %016llx\n", i,
 			pm8001_ha->phy[i].dev_sas_addr));
 	}
 #else
@@ -547,31 +669,50 @@
  * @chip_info: our ha struct.
  * @irq_handler: irq_handler
  */
-static u32 pm8001_setup_msix(struct pm8001_hba_info *pm8001_ha,
-	irq_handler_t irq_handler)
+static u32 pm8001_setup_msix(struct pm8001_hba_info *pm8001_ha)
 {
 	u32 i = 0, j = 0;
-	u32 number_of_intr = 1;
+	u32 number_of_intr;
 	int flag = 0;
 	u32 max_entry;
 	int rc;
+	static char intr_drvname[PM8001_MAX_MSIX_VEC][sizeof(DRV_NAME)+3];
+
+	/* SPCv controllers supports 64 msi-x */
+	if (pm8001_ha->chip_id == chip_8001) {
+		number_of_intr = 1;
+		flag |= IRQF_DISABLED;
+	} else {
+		number_of_intr = PM8001_MAX_MSIX_VEC;
+		flag &= ~IRQF_SHARED;
+		flag |= IRQF_DISABLED;
+	}
+
 	max_entry = sizeof(pm8001_ha->msix_entries) /
 		sizeof(pm8001_ha->msix_entries[0]);
-	flag |= IRQF_DISABLED;
 	for (i = 0; i < max_entry ; i++)
 		pm8001_ha->msix_entries[i].entry = i;
 	rc = pci_enable_msix(pm8001_ha->pdev, pm8001_ha->msix_entries,
 		number_of_intr);
 	pm8001_ha->number_of_intr = number_of_intr;
 	if (!rc) {
+		PM8001_INIT_DBG(pm8001_ha, pm8001_printk(
+			"pci_enable_msix request ret:%d no of intr %d\n",
+					rc, pm8001_ha->number_of_intr));
+
+		for (i = 0; i < number_of_intr; i++)
+			pm8001_ha->outq[i] = i;
+
 		for (i = 0; i < number_of_intr; i++) {
+			snprintf(intr_drvname[i], sizeof(intr_drvname[0]),
+					DRV_NAME"%d", i);
 			if (request_irq(pm8001_ha->msix_entries[i].vector,
-				irq_handler, flag, DRV_NAME,
-				SHOST_TO_SAS_HA(pm8001_ha->shost))) {
+				pm8001_interrupt_handler_msix, flag,
+				intr_drvname[i], &pm8001_ha->outq[i])) {
 				for (j = 0; j < i; j++)
 					free_irq(
 					pm8001_ha->msix_entries[j].vector,
-					SHOST_TO_SAS_HA(pm8001_ha->shost));
+					&pm8001_ha->outq[j]);
 				pci_disable_msix(pm8001_ha->pdev);
 				break;
 			}
@@ -588,22 +729,24 @@
 static u32 pm8001_request_irq(struct pm8001_hba_info *pm8001_ha)
 {
 	struct pci_dev *pdev;
-	irq_handler_t irq_handler = pm8001_interrupt;
 	int rc;
 
 	pdev = pm8001_ha->pdev;
 
 #ifdef PM8001_USE_MSIX
 	if (pci_find_capability(pdev, PCI_CAP_ID_MSIX))
-		return pm8001_setup_msix(pm8001_ha, irq_handler);
-	else
+		return pm8001_setup_msix(pm8001_ha);
+	else {
+		PM8001_INIT_DBG(pm8001_ha,
+			pm8001_printk("MSIX not supported!!!\n"));
 		goto intx;
+	}
 #endif
 
 intx:
 	/* initialize the INT-X interrupt */
-	rc = request_irq(pdev->irq, irq_handler, IRQF_SHARED, DRV_NAME,
-		SHOST_TO_SAS_HA(pm8001_ha->shost));
+	rc = request_irq(pdev->irq, pm8001_interrupt_handler_intx, IRQF_SHARED,
+		DRV_NAME, SHOST_TO_SAS_HA(pm8001_ha->shost));
 	return rc;
 }
 
@@ -621,12 +764,13 @@
 {
 	unsigned int rc;
 	u32	pci_reg;
+	u8	i = 0;
 	struct pm8001_hba_info *pm8001_ha;
 	struct Scsi_Host *shost = NULL;
 	const struct pm8001_chip_info *chip;
 
 	dev_printk(KERN_INFO, &pdev->dev,
-		"pm8001: driver version %s\n", DRV_VERSION);
+		"pm80xx: driver version %s\n", DRV_VERSION);
 	rc = pci_enable_device(pdev);
 	if (rc)
 		goto err_out_enable;
@@ -665,25 +809,39 @@
 		goto err_out_free;
 	}
 	pci_set_drvdata(pdev, SHOST_TO_SAS_HA(shost));
-	pm8001_ha = pm8001_pci_alloc(pdev, chip_8001, shost);
+	/* ent->driver variable is used to differentiate between controllers */
+	pm8001_ha = pm8001_pci_alloc(pdev, ent, shost);
 	if (!pm8001_ha) {
 		rc = -ENOMEM;
 		goto err_out_free;
 	}
 	list_add_tail(&pm8001_ha->list, &hba_list);
-	PM8001_CHIP_DISP->chip_soft_rst(pm8001_ha, 0x252acbcd);
+	PM8001_CHIP_DISP->chip_soft_rst(pm8001_ha);
 	rc = PM8001_CHIP_DISP->chip_init(pm8001_ha);
-	if (rc)
+	if (rc) {
+		PM8001_FAIL_DBG(pm8001_ha, pm8001_printk(
+			"chip_init failed [ret: %d]\n", rc));
 		goto err_out_ha_free;
+	}
 
 	rc = scsi_add_host(shost, &pdev->dev);
 	if (rc)
 		goto err_out_ha_free;
 	rc = pm8001_request_irq(pm8001_ha);
-	if (rc)
+	if (rc)	{
+		PM8001_FAIL_DBG(pm8001_ha, pm8001_printk(
+			"pm8001_request_irq failed [ret: %d]\n", rc));
 		goto err_out_shost;
+	}
 
-	PM8001_CHIP_DISP->interrupt_enable(pm8001_ha);
+	PM8001_CHIP_DISP->interrupt_enable(pm8001_ha, 0);
+	if (pm8001_ha->chip_id != chip_8001) {
+		for (i = 1; i < pm8001_ha->number_of_intr; i++)
+			PM8001_CHIP_DISP->interrupt_enable(pm8001_ha, i);
+		/* setup thermal configuration. */
+		pm80xx_set_thermal_config(pm8001_ha);
+	}
+
 	pm8001_init_sas_add(pm8001_ha);
 	pm8001_post_sas_ha_init(shost, chip);
 	rc = sas_register_ha(SHOST_TO_SAS_HA(shost));
@@ -719,14 +877,15 @@
 	sas_remove_host(pm8001_ha->shost);
 	list_del(&pm8001_ha->list);
 	scsi_remove_host(pm8001_ha->shost);
-	PM8001_CHIP_DISP->interrupt_disable(pm8001_ha);
-	PM8001_CHIP_DISP->chip_soft_rst(pm8001_ha, 0x252acbcd);
+	PM8001_CHIP_DISP->interrupt_disable(pm8001_ha, 0xFF);
+	PM8001_CHIP_DISP->chip_soft_rst(pm8001_ha);
 
 #ifdef PM8001_USE_MSIX
 	for (i = 0; i < pm8001_ha->number_of_intr; i++)
 		synchronize_irq(pm8001_ha->msix_entries[i].vector);
 	for (i = 0; i < pm8001_ha->number_of_intr; i++)
-		free_irq(pm8001_ha->msix_entries[i].vector, sha);
+		free_irq(pm8001_ha->msix_entries[i].vector,
+				&pm8001_ha->outq[i]);
 	pci_disable_msix(pdev);
 #else
 	free_irq(pm8001_ha->irq, sha);
@@ -763,13 +922,14 @@
 		printk(KERN_ERR " PCI PM not supported\n");
 		return -ENODEV;
 	}
-	PM8001_CHIP_DISP->interrupt_disable(pm8001_ha);
-	PM8001_CHIP_DISP->chip_soft_rst(pm8001_ha, 0x252acbcd);
+	PM8001_CHIP_DISP->interrupt_disable(pm8001_ha, 0xFF);
+	PM8001_CHIP_DISP->chip_soft_rst(pm8001_ha);
 #ifdef PM8001_USE_MSIX
 	for (i = 0; i < pm8001_ha->number_of_intr; i++)
 		synchronize_irq(pm8001_ha->msix_entries[i].vector);
 	for (i = 0; i < pm8001_ha->number_of_intr; i++)
-		free_irq(pm8001_ha->msix_entries[i].vector, sha);
+		free_irq(pm8001_ha->msix_entries[i].vector,
+				&pm8001_ha->outq[i]);
 	pci_disable_msix(pdev);
 #else
 	free_irq(pm8001_ha->irq, sha);
@@ -798,6 +958,7 @@
 	struct sas_ha_struct *sha = pci_get_drvdata(pdev);
 	struct pm8001_hba_info *pm8001_ha;
 	int rc;
+	u8 i = 0;
 	u32 device_state;
 	pm8001_ha = sha->lldd_ha;
 	device_state = pdev->current_state;
@@ -820,19 +981,33 @@
 	if (rc)
 		goto err_out_disable;
 
-	PM8001_CHIP_DISP->chip_soft_rst(pm8001_ha, 0x252acbcd);
+	/* chip soft rst only for spc */
+	if (pm8001_ha->chip_id == chip_8001) {
+		PM8001_CHIP_DISP->chip_soft_rst(pm8001_ha);
+		PM8001_INIT_DBG(pm8001_ha,
+			pm8001_printk("chip soft reset successful\n"));
+	}
 	rc = PM8001_CHIP_DISP->chip_init(pm8001_ha);
 	if (rc)
 		goto err_out_disable;
-	PM8001_CHIP_DISP->interrupt_disable(pm8001_ha);
+
+	/* disable all the interrupt bits */
+	PM8001_CHIP_DISP->interrupt_disable(pm8001_ha, 0xFF);
+
 	rc = pm8001_request_irq(pm8001_ha);
 	if (rc)
 		goto err_out_disable;
-	#ifdef PM8001_USE_TASKLET
+#ifdef PM8001_USE_TASKLET
+	/* default tasklet for non msi-x interrupt handler/first msi-x
+	* interrupt handler */
 	tasklet_init(&pm8001_ha->tasklet, pm8001_tasklet,
-		    (unsigned long)pm8001_ha);
-	#endif
-	PM8001_CHIP_DISP->interrupt_enable(pm8001_ha);
+			(unsigned long)pm8001_ha);
+#endif
+	PM8001_CHIP_DISP->interrupt_enable(pm8001_ha, 0);
+	if (pm8001_ha->chip_id != chip_8001) {
+		for (i = 1; i < pm8001_ha->number_of_intr; i++)
+			PM8001_CHIP_DISP->interrupt_enable(pm8001_ha, i);
+	}
 	scsi_unblock_requests(pm8001_ha->shost);
 	return 0;
 
@@ -843,14 +1018,45 @@
 	return rc;
 }
 
+/* update of pci device, vendor id and driver data with
+ * unique value for each of the controller
+ */
 static struct pci_device_id pm8001_pci_table[] = {
-	{
-		PCI_VDEVICE(PMC_Sierra, 0x8001), chip_8001
-	},
+	{ PCI_VDEVICE(PMC_Sierra, 0x8001), chip_8001 },
 	{
 		PCI_DEVICE(0x117c, 0x0042),
 		.driver_data = chip_8001
 	},
+	/* Support for SPC/SPCv/SPCve controllers */
+	{ PCI_VDEVICE(ADAPTEC2, 0x8001), chip_8001 },
+	{ PCI_VDEVICE(PMC_Sierra, 0x8008), chip_8008 },
+	{ PCI_VDEVICE(ADAPTEC2, 0x8008), chip_8008 },
+	{ PCI_VDEVICE(PMC_Sierra, 0x8018), chip_8018 },
+	{ PCI_VDEVICE(ADAPTEC2, 0x8018), chip_8018 },
+	{ PCI_VDEVICE(PMC_Sierra, 0x8009), chip_8009 },
+	{ PCI_VDEVICE(ADAPTEC2, 0x8009), chip_8009 },
+	{ PCI_VDEVICE(PMC_Sierra, 0x8019), chip_8019 },
+	{ PCI_VDEVICE(ADAPTEC2, 0x8019), chip_8019 },
+	{ PCI_VENDOR_ID_ADAPTEC2, 0x8081,
+		PCI_VENDOR_ID_ADAPTEC2, 0x0400, 0, 0, chip_8001 },
+	{ PCI_VENDOR_ID_ADAPTEC2, 0x8081,
+		PCI_VENDOR_ID_ADAPTEC2, 0x0800, 0, 0, chip_8001 },
+	{ PCI_VENDOR_ID_ADAPTEC2, 0x8088,
+		PCI_VENDOR_ID_ADAPTEC2, 0x0008, 0, 0, chip_8008 },
+	{ PCI_VENDOR_ID_ADAPTEC2, 0x8088,
+		PCI_VENDOR_ID_ADAPTEC2, 0x0800, 0, 0, chip_8008 },
+	{ PCI_VENDOR_ID_ADAPTEC2, 0x8089,
+		PCI_VENDOR_ID_ADAPTEC2, 0x0008, 0, 0, chip_8009 },
+	{ PCI_VENDOR_ID_ADAPTEC2, 0x8089,
+		PCI_VENDOR_ID_ADAPTEC2, 0x0800, 0, 0, chip_8009 },
+	{ PCI_VENDOR_ID_ADAPTEC2, 0x8088,
+		PCI_VENDOR_ID_ADAPTEC2, 0x0016, 0, 0, chip_8018 },
+	{ PCI_VENDOR_ID_ADAPTEC2, 0x8088,
+		PCI_VENDOR_ID_ADAPTEC2, 0x1600, 0, 0, chip_8018 },
+	{ PCI_VENDOR_ID_ADAPTEC2, 0x8089,
+		PCI_VENDOR_ID_ADAPTEC2, 0x0016, 0, 0, chip_8019 },
+	{ PCI_VENDOR_ID_ADAPTEC2, 0x8089,
+		PCI_VENDOR_ID_ADAPTEC2, 0x1600, 0, 0, chip_8019 },
 	{} /* terminate list */
 };
 
@@ -870,7 +1076,7 @@
 {
 	int rc = -ENOMEM;
 
-	pm8001_wq = alloc_workqueue("pm8001", 0, 0);
+	pm8001_wq = alloc_workqueue("pm80xx", 0, 0);
 	if (!pm8001_wq)
 		goto err;
 
@@ -902,7 +1108,8 @@
 module_exit(pm8001_exit);
 
 MODULE_AUTHOR("Jack Wang <jack_wang@usish.com>");
-MODULE_DESCRIPTION("PMC-Sierra PM8001 SAS/SATA controller driver");
+MODULE_DESCRIPTION(
+		"PMC-Sierra PM8001/8081/8088/8089 SAS/SATA controller driver");
 MODULE_VERSION(DRV_VERSION);
 MODULE_LICENSE("GPL");
 MODULE_DEVICE_TABLE(pci, pm8001_pci_table);
diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c
index b961112..a85d73d 100644
--- a/drivers/scsi/pm8001/pm8001_sas.c
+++ b/drivers/scsi/pm8001/pm8001_sas.c
@@ -1,5 +1,5 @@
 /*
- * PMC-Sierra SPC 8001 SAS/SATA based host adapters driver
+ * PMC-Sierra PM8001/8081/8088/8089 SAS/SATA based host adapters driver
  *
  * Copyright (c) 2008-2009 USI Co., Ltd.
  * All rights reserved.
@@ -68,7 +68,7 @@
 	clear_bit(tag, bitmap);
 }
 
-static void pm8001_tag_free(struct pm8001_hba_info *pm8001_ha, u32 tag)
+void pm8001_tag_free(struct pm8001_hba_info *pm8001_ha, u32 tag)
 {
 	pm8001_tag_clear(pm8001_ha, tag);
 }
@@ -212,10 +212,12 @@
 		break;
 	case PHY_FUNC_GET_EVENTS:
 		spin_lock_irqsave(&pm8001_ha->lock, flags);
-		if (-1 == pm8001_bar4_shift(pm8001_ha,
+		if (pm8001_ha->chip_id == chip_8001) {
+			if (-1 == pm8001_bar4_shift(pm8001_ha,
 					(phy_id < 4) ? 0x30000 : 0x40000)) {
-			spin_unlock_irqrestore(&pm8001_ha->lock, flags);
-			return -EINVAL;
+				spin_unlock_irqrestore(&pm8001_ha->lock, flags);
+				return -EINVAL;
+			}
 		}
 		{
 			struct sas_phy *phy = sas_phy->phy;
@@ -228,7 +230,8 @@
 			phy->loss_of_dword_sync_count = qp[3];
 			phy->phy_reset_problem_count = qp[4];
 		}
-		pm8001_bar4_shift(pm8001_ha, 0);
+		if (pm8001_ha->chip_id == chip_8001)
+			pm8001_bar4_shift(pm8001_ha, 0);
 		spin_unlock_irqrestore(&pm8001_ha->lock, flags);
 		return 0;
 	default:
@@ -249,7 +252,9 @@
 	struct pm8001_hba_info *pm8001_ha;
 	struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
 	pm8001_ha = sha->lldd_ha;
-	PM8001_CHIP_DISP->sas_re_init_req(pm8001_ha);
+	/* SAS_RE_INITIALIZATION not available in SPCv/ve */
+	if (pm8001_ha->chip_id == chip_8001)
+		PM8001_CHIP_DISP->sas_re_init_req(pm8001_ha);
 	for (i = 0; i < pm8001_ha->chip->n_phy; ++i)
 		PM8001_CHIP_DISP->phy_start_req(pm8001_ha, i);
 }
@@ -352,7 +357,7 @@
   * @tmf: the task management IU
   */
 #define DEV_IS_GONE(pm8001_dev)	\
-	((!pm8001_dev || (pm8001_dev->dev_type == NO_DEVICE)))
+	((!pm8001_dev || (pm8001_dev->dev_type == SAS_PHY_UNUSED)))
 static int pm8001_task_exec(struct sas_task *task, const int num,
 	gfp_t gfp_flags, int is_tmf, struct pm8001_tmf_task *tmf)
 {
@@ -370,7 +375,7 @@
 		struct task_status_struct *tsm = &t->task_status;
 		tsm->resp = SAS_TASK_UNDELIVERED;
 		tsm->stat = SAS_PHY_DOWN;
-		if (dev->dev_type != SATA_DEV)
+		if (dev->dev_type != SAS_SATA_DEV)
 			t->task_done(t);
 		return 0;
 	}
@@ -548,7 +553,7 @@
 {
 	u32 dev;
 	for (dev = 0; dev < PM8001_MAX_DEVICES; dev++) {
-		if (pm8001_ha->devices[dev].dev_type == NO_DEVICE) {
+		if (pm8001_ha->devices[dev].dev_type == SAS_PHY_UNUSED) {
 			pm8001_ha->devices[dev].id = dev;
 			return &pm8001_ha->devices[dev];
 		}
@@ -560,13 +565,31 @@
 	}
 	return NULL;
 }
+/**
+  * pm8001_find_dev - find a matching pm8001_device
+  * @pm8001_ha: our hba card information
+  */
+struct pm8001_device *pm8001_find_dev(struct pm8001_hba_info *pm8001_ha,
+					u32 device_id)
+{
+	u32 dev;
+	for (dev = 0; dev < PM8001_MAX_DEVICES; dev++) {
+		if (pm8001_ha->devices[dev].device_id == device_id)
+			return &pm8001_ha->devices[dev];
+	}
+	if (dev == PM8001_MAX_DEVICES) {
+		PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("NO MATCHING "
+				"DEVICE FOUND !!!\n"));
+	}
+	return NULL;
+}
 
 static void pm8001_free_dev(struct pm8001_device *pm8001_dev)
 {
 	u32 id = pm8001_dev->id;
 	memset(pm8001_dev, 0, sizeof(*pm8001_dev));
 	pm8001_dev->id = id;
-	pm8001_dev->dev_type = NO_DEVICE;
+	pm8001_dev->dev_type = SAS_PHY_UNUSED;
 	pm8001_dev->device_id = PM8001_MAX_DEVICES;
 	pm8001_dev->sas_device = NULL;
 }
@@ -624,7 +647,7 @@
 			res = -1;
 		}
 	} else {
-		if (dev->dev_type == SATA_DEV) {
+		if (dev->dev_type == SAS_SATA_DEV) {
 			pm8001_device->attached_phy =
 				dev->rphy->identify.phy_identifier;
 				flag = 1; /* directly sata*/
@@ -634,7 +657,7 @@
 	PM8001_CHIP_DISP->reg_dev_req(pm8001_ha, pm8001_device, flag);
 	spin_unlock_irqrestore(&pm8001_ha->lock, flags);
 	wait_for_completion(&completion);
-	if (dev->dev_type == SAS_END_DEV)
+	if (dev->dev_type == SAS_END_DEVICE)
 		msleep(50);
 	pm8001_ha->flags = PM8001F_RUN_TIME;
 	return 0;
@@ -648,7 +671,7 @@
 	return pm8001_dev_found_notify(dev);
 }
 
-static void pm8001_task_done(struct sas_task *task)
+void pm8001_task_done(struct sas_task *task)
 {
 	if (!del_timer(&task->slow_task->timer))
 		return;
@@ -904,7 +927,7 @@
 		struct pm8001_ccb_info *ccb = &pm8001_ha->ccb_info[i];
 
 		pm8001_dev = ccb->device;
-		if (!pm8001_dev || (pm8001_dev->dev_type == NO_DEVICE))
+		if (!pm8001_dev || (pm8001_dev->dev_type == SAS_PHY_UNUSED))
 			continue;
 		if (!device_to_close) {
 			uintptr_t d = (uintptr_t)pm8001_dev
@@ -995,6 +1018,72 @@
 	return rc;
 }
 
+/*
+* This function handle the IT_NEXUS_XXX event or completion
+* status code for SSP/SATA/SMP I/O request.
+*/
+int pm8001_I_T_nexus_event_handler(struct domain_device *dev)
+{
+	int rc = TMF_RESP_FUNC_FAILED;
+	struct pm8001_device *pm8001_dev;
+	struct pm8001_hba_info *pm8001_ha;
+	struct sas_phy *phy;
+	u32 device_id = 0;
+
+	if (!dev || !dev->lldd_dev)
+		return -1;
+
+	pm8001_dev = dev->lldd_dev;
+	device_id = pm8001_dev->device_id;
+	pm8001_ha = pm8001_find_ha_by_dev(dev);
+
+	PM8001_EH_DBG(pm8001_ha,
+			pm8001_printk("I_T_Nexus handler invoked !!"));
+
+	phy = sas_get_local_phy(dev);
+
+	if (dev_is_sata(dev)) {
+		DECLARE_COMPLETION_ONSTACK(completion_setstate);
+		if (scsi_is_sas_phy_local(phy)) {
+			rc = 0;
+			goto out;
+		}
+		/* send internal ssp/sata/smp abort command to FW */
+		rc = pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev ,
+							dev, 1, 0);
+		msleep(100);
+
+		/* deregister the target device */
+		pm8001_dev_gone_notify(dev);
+		msleep(200);
+
+		/*send phy reset to hard reset target */
+		rc = sas_phy_reset(phy, 1);
+		msleep(2000);
+		pm8001_dev->setds_completion = &completion_setstate;
+
+		wait_for_completion(&completion_setstate);
+	} else {
+		/* send internal ssp/sata/smp abort command to FW */
+		rc = pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev ,
+							dev, 1, 0);
+		msleep(100);
+
+		/* deregister the target device */
+		pm8001_dev_gone_notify(dev);
+		msleep(200);
+
+		/*send phy reset to hard reset target */
+		rc = sas_phy_reset(phy, 1);
+		msleep(2000);
+	}
+	PM8001_EH_DBG(pm8001_ha, pm8001_printk(" for device[%x]:rc=%d\n",
+		pm8001_dev->device_id, rc));
+out:
+	sas_put_local_phy(phy);
+
+	return rc;
+}
 /* mandatory SAM-3, the task reset the specified LUN*/
 int pm8001_lu_reset(struct domain_device *dev, u8 *lun)
 {
diff --git a/drivers/scsi/pm8001/pm8001_sas.h b/drivers/scsi/pm8001/pm8001_sas.h
index 1100820..5708194 100644
--- a/drivers/scsi/pm8001/pm8001_sas.h
+++ b/drivers/scsi/pm8001/pm8001_sas.h
@@ -1,5 +1,5 @@
 /*
- * PMC-Sierra SPC 8001 SAS/SATA based host adapters driver
+ * PMC-Sierra PM8001/8081/8088/8089 SAS/SATA based host adapters driver
  *
  * Copyright (c) 2008-2009 USI Co., Ltd.
  * All rights reserved.
@@ -57,8 +57,8 @@
 #include <linux/atomic.h>
 #include "pm8001_defs.h"
 
-#define DRV_NAME		"pm8001"
-#define DRV_VERSION		"0.1.36"
+#define DRV_NAME		"pm80xx"
+#define DRV_VERSION		"0.1.37"
 #define PM8001_FAIL_LOGGING	0x01 /* Error message logging */
 #define PM8001_INIT_LOGGING	0x02 /* driver init logging */
 #define PM8001_DISC_LOGGING	0x04 /* discovery layer logging */
@@ -66,8 +66,8 @@
 #define PM8001_EH_LOGGING	0x10 /* libsas EH function logging*/
 #define PM8001_IOCTL_LOGGING	0x20 /* IOCTL message logging */
 #define PM8001_MSG_LOGGING	0x40 /* misc message logging */
-#define pm8001_printk(format, arg...)	printk(KERN_INFO "%s %d:" format,\
-				__func__, __LINE__, ## arg)
+#define pm8001_printk(format, arg...)	printk(KERN_INFO "pm80xx %s %d:" \
+			format, __func__, __LINE__, ## arg)
 #define PM8001_CHECK_LOGGING(HBA, LEVEL, CMD)	\
 do {						\
 	if (unlikely(HBA->logging_level & LEVEL))	\
@@ -103,11 +103,12 @@
 #define PM8001_READ_VPD
 
 
-#define DEV_IS_EXPANDER(type)	((type == EDGE_DEV) || (type == FANOUT_DEV))
+#define DEV_IS_EXPANDER(type)	((type == SAS_EDGE_EXPANDER_DEVICE) || (type == SAS_FANOUT_EXPANDER_DEVICE))
 
 #define PM8001_NAME_LENGTH		32/* generic length of strings */
 extern struct list_head hba_list;
 extern const struct pm8001_dispatch pm8001_8001_dispatch;
+extern const struct pm8001_dispatch pm8001_80xx_dispatch;
 
 struct pm8001_hba_info;
 struct pm8001_ccb_info;
@@ -131,15 +132,15 @@
 struct pm8001_dispatch {
 	char *name;
 	int (*chip_init)(struct pm8001_hba_info *pm8001_ha);
-	int (*chip_soft_rst)(struct pm8001_hba_info *pm8001_ha, u32 signature);
+	int (*chip_soft_rst)(struct pm8001_hba_info *pm8001_ha);
 	void (*chip_rst)(struct pm8001_hba_info *pm8001_ha);
 	int (*chip_ioremap)(struct pm8001_hba_info *pm8001_ha);
 	void (*chip_iounmap)(struct pm8001_hba_info *pm8001_ha);
-	irqreturn_t (*isr)(struct pm8001_hba_info *pm8001_ha);
+	irqreturn_t (*isr)(struct pm8001_hba_info *pm8001_ha, u8 vec);
 	u32 (*is_our_interupt)(struct pm8001_hba_info *pm8001_ha);
-	int (*isr_process_oq)(struct pm8001_hba_info *pm8001_ha);
-	void (*interrupt_enable)(struct pm8001_hba_info *pm8001_ha);
-	void (*interrupt_disable)(struct pm8001_hba_info *pm8001_ha);
+	int (*isr_process_oq)(struct pm8001_hba_info *pm8001_ha, u8 vec);
+	void (*interrupt_enable)(struct pm8001_hba_info *pm8001_ha, u8 vec);
+	void (*interrupt_disable)(struct pm8001_hba_info *pm8001_ha, u8 vec);
 	void (*make_prd)(struct scatterlist *scatter, int nr, void *prd);
 	int (*smp_req)(struct pm8001_hba_info *pm8001_ha,
 		struct pm8001_ccb_info *ccb);
@@ -173,6 +174,7 @@
 };
 
 struct pm8001_chip_info {
+	u32     encrypt;
 	u32	n_phy;
 	const struct pm8001_dispatch	*dispatch;
 };
@@ -204,7 +206,7 @@
 };
 
 struct pm8001_device {
-	enum sas_dev_type	dev_type;
+	enum sas_device_type	dev_type;
 	struct domain_device	*sas_device;
 	u32			attached_phy;
 	u32			id;
@@ -256,7 +258,20 @@
 	struct mpi_mem		region[USI_MAX_MEMCNT];
 };
 
-struct main_cfg_table {
+struct encrypt {
+	u32	cipher_mode;
+	u32	sec_mode;
+	u32	status;
+	u32	flag;
+};
+
+struct sas_phy_attribute_table {
+	u32	phystart1_16[16];
+	u32	outbound_hw_event_pid1_16[16];
+};
+
+union main_cfg_table {
+	struct {
 	u32			signature;
 	u32			interface_rev;
 	u32			firmware_rev;
@@ -292,19 +307,69 @@
 	u32			fatal_err_dump_length1;
 	u32			hda_mode_flag;
 	u32			anolog_setup_table_offset;
+	u32			rsvd[4];
+	} pm8001_tbl;
+
+	struct {
+	u32			signature;
+	u32			interface_rev;
+	u32			firmware_rev;
+	u32			max_out_io;
+	u32			max_sgl;
+	u32			ctrl_cap_flag;
+	u32			gst_offset;
+	u32			inbound_queue_offset;
+	u32			outbound_queue_offset;
+	u32			inbound_q_nppd_hppd;
+	u32			rsvd[8];
+	u32			crc_core_dump;
+	u32			rsvd1;
+	u32			upper_event_log_addr;
+	u32			lower_event_log_addr;
+	u32			event_log_size;
+	u32			event_log_severity;
+	u32			upper_pcs_event_log_addr;
+	u32			lower_pcs_event_log_addr;
+	u32			pcs_event_log_size;
+	u32			pcs_event_log_severity;
+	u32			fatal_err_interrupt;
+	u32			fatal_err_dump_offset0;
+	u32			fatal_err_dump_length0;
+	u32			fatal_err_dump_offset1;
+	u32			fatal_err_dump_length1;
+	u32			gpio_led_mapping;
+	u32			analog_setup_table_offset;
+	u32			int_vec_table_offset;
+	u32			phy_attr_table_offset;
+	u32			port_recovery_timer;
+	u32			interrupt_reassertion_delay;
+	} pm80xx_tbl;
 };
-struct general_status_table {
+
+union general_status_table {
+	struct {
 	u32			gst_len_mpistate;
 	u32			iq_freeze_state0;
 	u32			iq_freeze_state1;
 	u32			msgu_tcnt;
 	u32			iop_tcnt;
-	u32			reserved;
+	u32			rsvd;
 	u32			phy_state[8];
-	u32			reserved1;
-	u32			reserved2;
-	u32			reserved3;
+	u32			gpio_input_val;
+	u32			rsvd1[2];
 	u32			recover_err_info[8];
+	} pm8001_tbl;
+	struct {
+	u32			gst_len_mpistate;
+	u32			iq_freeze_state0;
+	u32			iq_freeze_state1;
+	u32			msgu_tcnt;
+	u32			iop_tcnt;
+	u32			rsvd[9];
+	u32			gpio_input_val;
+	u32			rsvd1[2];
+	u32			recover_err_info[8];
+	} pm80xx_tbl;
 };
 struct inbound_queue_table {
 	u32			element_pri_size_cnt;
@@ -351,15 +416,21 @@
 	struct device		*dev;
 	struct pm8001_hba_memspace io_mem[6];
 	struct mpi_mem_req	memoryMap;
+	struct encrypt		encrypt_info; /* support encryption */
 	void __iomem	*msg_unit_tbl_addr;/*Message Unit Table Addr*/
 	void __iomem	*main_cfg_tbl_addr;/*Main Config Table Addr*/
 	void __iomem	*general_stat_tbl_addr;/*General Status Table Addr*/
 	void __iomem	*inbnd_q_tbl_addr;/*Inbound Queue Config Table Addr*/
 	void __iomem	*outbnd_q_tbl_addr;/*Outbound Queue Config Table Addr*/
-	struct main_cfg_table	main_cfg_tbl;
-	struct general_status_table	gs_tbl;
-	struct inbound_queue_table	inbnd_q_tbl[PM8001_MAX_INB_NUM];
-	struct outbound_queue_table	outbnd_q_tbl[PM8001_MAX_OUTB_NUM];
+	void __iomem	*pspa_q_tbl_addr;
+			/*MPI SAS PHY attributes Queue Config Table Addr*/
+	void __iomem	*ivt_tbl_addr; /*MPI IVT Table Addr */
+	union main_cfg_table	main_cfg_tbl;
+	union general_status_table	gs_tbl;
+	struct inbound_queue_table	inbnd_q_tbl[PM8001_MAX_SPCV_INB_NUM];
+	struct outbound_queue_table	outbnd_q_tbl[PM8001_MAX_SPCV_OUTB_NUM];
+	struct sas_phy_attribute_table	phy_attr_table;
+					/* MPI SAS PHY attributes */
 	u8			sas_addr[SAS_ADDR_SIZE];
 	struct sas_ha_struct	*sas;/* SCSI/SAS glue */
 	struct Scsi_Host	*shost;
@@ -372,10 +443,12 @@
 	struct pm8001_port	port[PM8001_MAX_PHYS];
 	u32			id;
 	u32			irq;
+	u32			iomb_size; /* SPC and SPCV IOMB size */
 	struct pm8001_device	*devices;
 	struct pm8001_ccb_info	*ccb_info;
 #ifdef PM8001_USE_MSIX
-	struct msix_entry	msix_entries[16];/*for msi-x interrupt*/
+	struct msix_entry	msix_entries[PM8001_MAX_MSIX_VEC];
+					/*for msi-x interrupt*/
 	int			number_of_intr;/*will be used in remove()*/
 #endif
 #ifdef PM8001_USE_TASKLET
@@ -383,7 +456,10 @@
 #endif
 	u32			logging_level;
 	u32			fw_status;
+	u32			smp_exp_mode;
+	u32			int_vector;
 	const struct firmware 	*fw_image;
+	u8			outq[PM8001_MAX_MSIX_VEC];
 };
 
 struct pm8001_work {
@@ -419,6 +495,9 @@
 #define FLASH_UPDATE_DNLD_NOT_SUPPORTED		0x10
 #define FLASH_UPDATE_DISABLED			0x11
 
+#define	NCQ_READ_LOG_FLAG			0x80000000
+#define	NCQ_ABORT_ALL_FLAG			0x40000000
+#define	NCQ_2ND_RLE_FLAG			0x20000000
 /**
  * brief param structure for firmware flash update.
  */
@@ -484,6 +563,7 @@
 void pm8001_dev_gone(struct domain_device *dev);
 int pm8001_lu_reset(struct domain_device *dev, u8 *lun);
 int pm8001_I_T_nexus_reset(struct domain_device *dev);
+int pm8001_I_T_nexus_event_handler(struct domain_device *dev);
 int pm8001_query_task(struct sas_task *task);
 void pm8001_open_reject_retry(
 	struct pm8001_hba_info *pm8001_ha,
@@ -493,6 +573,61 @@
 	dma_addr_t *pphys_addr, u32 *pphys_addr_hi, u32 *pphys_addr_lo,
 	u32 mem_size, u32 align);
 
+void pm8001_chip_iounmap(struct pm8001_hba_info *pm8001_ha);
+int pm8001_mpi_build_cmd(struct pm8001_hba_info *pm8001_ha,
+			struct inbound_queue_table *circularQ,
+			u32 opCode, void *payload, u32 responseQueue);
+int pm8001_mpi_msg_free_get(struct inbound_queue_table *circularQ,
+				u16 messageSize, void **messagePtr);
+u32 pm8001_mpi_msg_free_set(struct pm8001_hba_info *pm8001_ha, void *pMsg,
+			struct outbound_queue_table *circularQ, u8 bc);
+u32 pm8001_mpi_msg_consume(struct pm8001_hba_info *pm8001_ha,
+			struct outbound_queue_table *circularQ,
+			void **messagePtr1, u8 *pBC);
+int pm8001_chip_set_dev_state_req(struct pm8001_hba_info *pm8001_ha,
+			struct pm8001_device *pm8001_dev, u32 state);
+int pm8001_chip_fw_flash_update_req(struct pm8001_hba_info *pm8001_ha,
+					void *payload);
+int pm8001_chip_fw_flash_update_build(struct pm8001_hba_info *pm8001_ha,
+					void *fw_flash_updata_info, u32 tag);
+int pm8001_chip_set_nvmd_req(struct pm8001_hba_info *pm8001_ha, void *payload);
+int pm8001_chip_get_nvmd_req(struct pm8001_hba_info *pm8001_ha, void *payload);
+int pm8001_chip_ssp_tm_req(struct pm8001_hba_info *pm8001_ha,
+				struct pm8001_ccb_info *ccb,
+				struct pm8001_tmf_task *tmf);
+int pm8001_chip_abort_task(struct pm8001_hba_info *pm8001_ha,
+				struct pm8001_device *pm8001_dev,
+				u8 flag, u32 task_tag, u32 cmd_tag);
+int pm8001_chip_dereg_dev_req(struct pm8001_hba_info *pm8001_ha, u32 device_id);
+void pm8001_chip_make_sg(struct scatterlist *scatter, int nr, void *prd);
+void pm8001_work_fn(struct work_struct *work);
+int pm8001_handle_event(struct pm8001_hba_info *pm8001_ha,
+					void *data, int handler);
+void pm8001_mpi_set_dev_state_resp(struct pm8001_hba_info *pm8001_ha,
+							void *piomb);
+void pm8001_mpi_set_nvmd_resp(struct pm8001_hba_info *pm8001_ha,
+							void *piomb);
+void pm8001_mpi_get_nvmd_resp(struct pm8001_hba_info *pm8001_ha,
+							void *piomb);
+int pm8001_mpi_local_phy_ctl(struct pm8001_hba_info *pm8001_ha,
+							void *piomb);
+void pm8001_get_lrate_mode(struct pm8001_phy *phy, u8 link_rate);
+void pm8001_get_attached_sas_addr(struct pm8001_phy *phy, u8 *sas_addr);
+void pm8001_bytes_dmaed(struct pm8001_hba_info *pm8001_ha, int i);
+int pm8001_mpi_reg_resp(struct pm8001_hba_info *pm8001_ha, void *piomb);
+int pm8001_mpi_dereg_resp(struct pm8001_hba_info *pm8001_ha, void *piomb);
+int pm8001_mpi_fw_flash_update_resp(struct pm8001_hba_info *pm8001_ha,
+							void *piomb);
+int pm8001_mpi_general_event(struct pm8001_hba_info *pm8001_ha , void *piomb);
+int pm8001_mpi_task_abort_resp(struct pm8001_hba_info *pm8001_ha, void *piomb);
+struct sas_task *pm8001_alloc_task(void);
+void pm8001_task_done(struct sas_task *task);
+void pm8001_free_task(struct sas_task *task);
+void pm8001_tag_free(struct pm8001_hba_info *pm8001_ha, u32 tag);
+struct pm8001_device *pm8001_find_dev(struct pm8001_hba_info *pm8001_ha,
+					u32 device_id);
+int pm80xx_set_thermal_config(struct pm8001_hba_info *pm8001_ha);
+
 int pm8001_bar4_shift(struct pm8001_hba_info *pm8001_ha, u32 shiftValue);
 
 /* ctl shared API */
diff --git a/drivers/scsi/pm8001/pm80xx_hwi.c b/drivers/scsi/pm8001/pm80xx_hwi.c
new file mode 100644
index 0000000..302514d
--- /dev/null
+++ b/drivers/scsi/pm8001/pm80xx_hwi.c
@@ -0,0 +1,4130 @@
+/*
+ * PMC-Sierra SPCv/ve 8088/8089 SAS/SATA based host adapters driver
+ *
+ * Copyright (c) 2008-2009 PMC-Sierra, Inc.,
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ *
+ */
+ #include <linux/slab.h>
+ #include "pm8001_sas.h"
+ #include "pm80xx_hwi.h"
+ #include "pm8001_chips.h"
+ #include "pm8001_ctl.h"
+
+#define SMP_DIRECT 1
+#define SMP_INDIRECT 2
+/**
+ * read_main_config_table - read the configure table and save it.
+ * @pm8001_ha: our hba card information
+ */
+static void read_main_config_table(struct pm8001_hba_info *pm8001_ha)
+{
+	void __iomem *address = pm8001_ha->main_cfg_tbl_addr;
+
+	pm8001_ha->main_cfg_tbl.pm80xx_tbl.signature	=
+		pm8001_mr32(address, MAIN_SIGNATURE_OFFSET);
+	pm8001_ha->main_cfg_tbl.pm80xx_tbl.interface_rev =
+		pm8001_mr32(address, MAIN_INTERFACE_REVISION);
+	pm8001_ha->main_cfg_tbl.pm80xx_tbl.firmware_rev	=
+		pm8001_mr32(address, MAIN_FW_REVISION);
+	pm8001_ha->main_cfg_tbl.pm80xx_tbl.max_out_io	=
+		pm8001_mr32(address, MAIN_MAX_OUTSTANDING_IO_OFFSET);
+	pm8001_ha->main_cfg_tbl.pm80xx_tbl.max_sgl	=
+		pm8001_mr32(address, MAIN_MAX_SGL_OFFSET);
+	pm8001_ha->main_cfg_tbl.pm80xx_tbl.ctrl_cap_flag =
+		pm8001_mr32(address, MAIN_CNTRL_CAP_OFFSET);
+	pm8001_ha->main_cfg_tbl.pm80xx_tbl.gst_offset	=
+		pm8001_mr32(address, MAIN_GST_OFFSET);
+	pm8001_ha->main_cfg_tbl.pm80xx_tbl.inbound_queue_offset =
+		pm8001_mr32(address, MAIN_IBQ_OFFSET);
+	pm8001_ha->main_cfg_tbl.pm80xx_tbl.outbound_queue_offset =
+		pm8001_mr32(address, MAIN_OBQ_OFFSET);
+
+	/* read Error Dump Offset and Length */
+	pm8001_ha->main_cfg_tbl.pm80xx_tbl.fatal_err_dump_offset0 =
+		pm8001_mr32(address, MAIN_FATAL_ERROR_RDUMP0_OFFSET);
+	pm8001_ha->main_cfg_tbl.pm80xx_tbl.fatal_err_dump_length0 =
+		pm8001_mr32(address, MAIN_FATAL_ERROR_RDUMP0_LENGTH);
+	pm8001_ha->main_cfg_tbl.pm80xx_tbl.fatal_err_dump_offset1 =
+		pm8001_mr32(address, MAIN_FATAL_ERROR_RDUMP1_OFFSET);
+	pm8001_ha->main_cfg_tbl.pm80xx_tbl.fatal_err_dump_length1 =
+		pm8001_mr32(address, MAIN_FATAL_ERROR_RDUMP1_LENGTH);
+
+	/* read GPIO LED settings from the configuration table */
+	pm8001_ha->main_cfg_tbl.pm80xx_tbl.gpio_led_mapping =
+		pm8001_mr32(address, MAIN_GPIO_LED_FLAGS_OFFSET);
+
+	/* read analog Setting offset from the configuration table */
+	pm8001_ha->main_cfg_tbl.pm80xx_tbl.analog_setup_table_offset =
+		pm8001_mr32(address, MAIN_ANALOG_SETUP_OFFSET);
+
+	pm8001_ha->main_cfg_tbl.pm80xx_tbl.int_vec_table_offset =
+		pm8001_mr32(address, MAIN_INT_VECTOR_TABLE_OFFSET);
+	pm8001_ha->main_cfg_tbl.pm80xx_tbl.phy_attr_table_offset =
+		pm8001_mr32(address, MAIN_SAS_PHY_ATTR_TABLE_OFFSET);
+}
+
+/**
+ * read_general_status_table - read the general status table and save it.
+ * @pm8001_ha: our hba card information
+ */
+static void read_general_status_table(struct pm8001_hba_info *pm8001_ha)
+{
+	void __iomem *address = pm8001_ha->general_stat_tbl_addr;
+	pm8001_ha->gs_tbl.pm80xx_tbl.gst_len_mpistate	=
+			pm8001_mr32(address, GST_GSTLEN_MPIS_OFFSET);
+	pm8001_ha->gs_tbl.pm80xx_tbl.iq_freeze_state0	=
+			pm8001_mr32(address, GST_IQ_FREEZE_STATE0_OFFSET);
+	pm8001_ha->gs_tbl.pm80xx_tbl.iq_freeze_state1	=
+			pm8001_mr32(address, GST_IQ_FREEZE_STATE1_OFFSET);
+	pm8001_ha->gs_tbl.pm80xx_tbl.msgu_tcnt		=
+			pm8001_mr32(address, GST_MSGUTCNT_OFFSET);
+	pm8001_ha->gs_tbl.pm80xx_tbl.iop_tcnt		=
+			pm8001_mr32(address, GST_IOPTCNT_OFFSET);
+	pm8001_ha->gs_tbl.pm80xx_tbl.gpio_input_val	=
+			pm8001_mr32(address, GST_GPIO_INPUT_VAL);
+	pm8001_ha->gs_tbl.pm80xx_tbl.recover_err_info[0] =
+			pm8001_mr32(address, GST_RERRINFO_OFFSET0);
+	pm8001_ha->gs_tbl.pm80xx_tbl.recover_err_info[1] =
+			pm8001_mr32(address, GST_RERRINFO_OFFSET1);
+	pm8001_ha->gs_tbl.pm80xx_tbl.recover_err_info[2] =
+			pm8001_mr32(address, GST_RERRINFO_OFFSET2);
+	pm8001_ha->gs_tbl.pm80xx_tbl.recover_err_info[3] =
+			pm8001_mr32(address, GST_RERRINFO_OFFSET3);
+	pm8001_ha->gs_tbl.pm80xx_tbl.recover_err_info[4] =
+			pm8001_mr32(address, GST_RERRINFO_OFFSET4);
+	pm8001_ha->gs_tbl.pm80xx_tbl.recover_err_info[5] =
+			pm8001_mr32(address, GST_RERRINFO_OFFSET5);
+	pm8001_ha->gs_tbl.pm80xx_tbl.recover_err_info[6] =
+			pm8001_mr32(address, GST_RERRINFO_OFFSET6);
+	pm8001_ha->gs_tbl.pm80xx_tbl.recover_err_info[7] =
+			 pm8001_mr32(address, GST_RERRINFO_OFFSET7);
+}
+/**
+ * read_phy_attr_table - read the phy attribute table and save it.
+ * @pm8001_ha: our hba card information
+ */
+static void read_phy_attr_table(struct pm8001_hba_info *pm8001_ha)
+{
+	void __iomem *address = pm8001_ha->pspa_q_tbl_addr;
+	pm8001_ha->phy_attr_table.phystart1_16[0] =
+			pm8001_mr32(address, PSPA_PHYSTATE0_OFFSET);
+	pm8001_ha->phy_attr_table.phystart1_16[1] =
+			pm8001_mr32(address, PSPA_PHYSTATE1_OFFSET);
+	pm8001_ha->phy_attr_table.phystart1_16[2] =
+			pm8001_mr32(address, PSPA_PHYSTATE2_OFFSET);
+	pm8001_ha->phy_attr_table.phystart1_16[3] =
+			pm8001_mr32(address, PSPA_PHYSTATE3_OFFSET);
+	pm8001_ha->phy_attr_table.phystart1_16[4] =
+			pm8001_mr32(address, PSPA_PHYSTATE4_OFFSET);
+	pm8001_ha->phy_attr_table.phystart1_16[5] =
+			pm8001_mr32(address, PSPA_PHYSTATE5_OFFSET);
+	pm8001_ha->phy_attr_table.phystart1_16[6] =
+			pm8001_mr32(address, PSPA_PHYSTATE6_OFFSET);
+	pm8001_ha->phy_attr_table.phystart1_16[7] =
+			pm8001_mr32(address, PSPA_PHYSTATE7_OFFSET);
+	pm8001_ha->phy_attr_table.phystart1_16[8] =
+			pm8001_mr32(address, PSPA_PHYSTATE8_OFFSET);
+	pm8001_ha->phy_attr_table.phystart1_16[9] =
+			pm8001_mr32(address, PSPA_PHYSTATE9_OFFSET);
+	pm8001_ha->phy_attr_table.phystart1_16[10] =
+			pm8001_mr32(address, PSPA_PHYSTATE10_OFFSET);
+	pm8001_ha->phy_attr_table.phystart1_16[11] =
+			pm8001_mr32(address, PSPA_PHYSTATE11_OFFSET);
+	pm8001_ha->phy_attr_table.phystart1_16[12] =
+			pm8001_mr32(address, PSPA_PHYSTATE12_OFFSET);
+	pm8001_ha->phy_attr_table.phystart1_16[13] =
+			pm8001_mr32(address, PSPA_PHYSTATE13_OFFSET);
+	pm8001_ha->phy_attr_table.phystart1_16[14] =
+			pm8001_mr32(address, PSPA_PHYSTATE14_OFFSET);
+	pm8001_ha->phy_attr_table.phystart1_16[15] =
+			pm8001_mr32(address, PSPA_PHYSTATE15_OFFSET);
+
+	pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[0] =
+			pm8001_mr32(address, PSPA_OB_HW_EVENT_PID0_OFFSET);
+	pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[1] =
+			pm8001_mr32(address, PSPA_OB_HW_EVENT_PID1_OFFSET);
+	pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[2] =
+			pm8001_mr32(address, PSPA_OB_HW_EVENT_PID2_OFFSET);
+	pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[3] =
+			pm8001_mr32(address, PSPA_OB_HW_EVENT_PID3_OFFSET);
+	pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[4] =
+			pm8001_mr32(address, PSPA_OB_HW_EVENT_PID4_OFFSET);
+	pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[5] =
+			pm8001_mr32(address, PSPA_OB_HW_EVENT_PID5_OFFSET);
+	pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[6] =
+			pm8001_mr32(address, PSPA_OB_HW_EVENT_PID6_OFFSET);
+	pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[7] =
+			pm8001_mr32(address, PSPA_OB_HW_EVENT_PID7_OFFSET);
+	pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[8] =
+			pm8001_mr32(address, PSPA_OB_HW_EVENT_PID8_OFFSET);
+	pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[9] =
+			pm8001_mr32(address, PSPA_OB_HW_EVENT_PID9_OFFSET);
+	pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[10] =
+			pm8001_mr32(address, PSPA_OB_HW_EVENT_PID10_OFFSET);
+	pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[11] =
+			pm8001_mr32(address, PSPA_OB_HW_EVENT_PID11_OFFSET);
+	pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[12] =
+			pm8001_mr32(address, PSPA_OB_HW_EVENT_PID12_OFFSET);
+	pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[13] =
+			pm8001_mr32(address, PSPA_OB_HW_EVENT_PID13_OFFSET);
+	pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[14] =
+			pm8001_mr32(address, PSPA_OB_HW_EVENT_PID14_OFFSET);
+	pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[15] =
+			pm8001_mr32(address, PSPA_OB_HW_EVENT_PID15_OFFSET);
+
+}
+
+/**
+ * read_inbnd_queue_table - read the inbound queue table and save it.
+ * @pm8001_ha: our hba card information
+ */
+static void read_inbnd_queue_table(struct pm8001_hba_info *pm8001_ha)
+{
+	int i;
+	void __iomem *address = pm8001_ha->inbnd_q_tbl_addr;
+	for (i = 0; i < PM8001_MAX_SPCV_INB_NUM; i++) {
+		u32 offset = i * 0x20;
+		pm8001_ha->inbnd_q_tbl[i].pi_pci_bar =
+			get_pci_bar_index(pm8001_mr32(address,
+				(offset + IB_PIPCI_BAR)));
+		pm8001_ha->inbnd_q_tbl[i].pi_offset =
+			pm8001_mr32(address, (offset + IB_PIPCI_BAR_OFFSET));
+	}
+}
+
+/**
+ * read_outbnd_queue_table - read the outbound queue table and save it.
+ * @pm8001_ha: our hba card information
+ */
+static void read_outbnd_queue_table(struct pm8001_hba_info *pm8001_ha)
+{
+	int i;
+	void __iomem *address = pm8001_ha->outbnd_q_tbl_addr;
+	for (i = 0; i < PM8001_MAX_SPCV_OUTB_NUM; i++) {
+		u32 offset = i * 0x24;
+		pm8001_ha->outbnd_q_tbl[i].ci_pci_bar =
+			get_pci_bar_index(pm8001_mr32(address,
+				(offset + OB_CIPCI_BAR)));
+		pm8001_ha->outbnd_q_tbl[i].ci_offset =
+			pm8001_mr32(address, (offset + OB_CIPCI_BAR_OFFSET));
+	}
+}
+
+/**
+ * init_default_table_values - init the default table.
+ * @pm8001_ha: our hba card information
+ */
+static void init_default_table_values(struct pm8001_hba_info *pm8001_ha)
+{
+	int i;
+	u32 offsetib, offsetob;
+	void __iomem *addressib = pm8001_ha->inbnd_q_tbl_addr;
+	void __iomem *addressob = pm8001_ha->outbnd_q_tbl_addr;
+
+	pm8001_ha->main_cfg_tbl.pm80xx_tbl.upper_event_log_addr		=
+		pm8001_ha->memoryMap.region[AAP1].phys_addr_hi;
+	pm8001_ha->main_cfg_tbl.pm80xx_tbl.lower_event_log_addr		=
+		pm8001_ha->memoryMap.region[AAP1].phys_addr_lo;
+	pm8001_ha->main_cfg_tbl.pm80xx_tbl.event_log_size		=
+							PM8001_EVENT_LOG_SIZE;
+	pm8001_ha->main_cfg_tbl.pm80xx_tbl.event_log_severity		= 0x01;
+	pm8001_ha->main_cfg_tbl.pm80xx_tbl.upper_pcs_event_log_addr	=
+		pm8001_ha->memoryMap.region[IOP].phys_addr_hi;
+	pm8001_ha->main_cfg_tbl.pm80xx_tbl.lower_pcs_event_log_addr	=
+		pm8001_ha->memoryMap.region[IOP].phys_addr_lo;
+	pm8001_ha->main_cfg_tbl.pm80xx_tbl.pcs_event_log_size		=
+							PM8001_EVENT_LOG_SIZE;
+	pm8001_ha->main_cfg_tbl.pm80xx_tbl.pcs_event_log_severity	= 0x01;
+	pm8001_ha->main_cfg_tbl.pm80xx_tbl.fatal_err_interrupt		= 0x01;
+
+	/* Disable end to end CRC checking */
+	pm8001_ha->main_cfg_tbl.pm80xx_tbl.crc_core_dump = (0x1 << 16);
+
+	for (i = 0; i < PM8001_MAX_SPCV_INB_NUM; i++) {
+		pm8001_ha->inbnd_q_tbl[i].element_pri_size_cnt	=
+			PM8001_MPI_QUEUE | (64 << 16) | (0x00<<30);
+		pm8001_ha->inbnd_q_tbl[i].upper_base_addr	=
+			pm8001_ha->memoryMap.region[IB + i].phys_addr_hi;
+		pm8001_ha->inbnd_q_tbl[i].lower_base_addr	=
+		pm8001_ha->memoryMap.region[IB + i].phys_addr_lo;
+		pm8001_ha->inbnd_q_tbl[i].base_virt		=
+			(u8 *)pm8001_ha->memoryMap.region[IB + i].virt_ptr;
+		pm8001_ha->inbnd_q_tbl[i].total_length		=
+			pm8001_ha->memoryMap.region[IB + i].total_len;
+		pm8001_ha->inbnd_q_tbl[i].ci_upper_base_addr	=
+			pm8001_ha->memoryMap.region[CI + i].phys_addr_hi;
+		pm8001_ha->inbnd_q_tbl[i].ci_lower_base_addr	=
+			pm8001_ha->memoryMap.region[CI + i].phys_addr_lo;
+		pm8001_ha->inbnd_q_tbl[i].ci_virt		=
+			pm8001_ha->memoryMap.region[CI + i].virt_ptr;
+		offsetib = i * 0x20;
+		pm8001_ha->inbnd_q_tbl[i].pi_pci_bar		=
+			get_pci_bar_index(pm8001_mr32(addressib,
+				(offsetib + 0x14)));
+		pm8001_ha->inbnd_q_tbl[i].pi_offset		=
+			pm8001_mr32(addressib, (offsetib + 0x18));
+		pm8001_ha->inbnd_q_tbl[i].producer_idx		= 0;
+		pm8001_ha->inbnd_q_tbl[i].consumer_index	= 0;
+	}
+	for (i = 0; i < PM8001_MAX_SPCV_OUTB_NUM; i++) {
+		pm8001_ha->outbnd_q_tbl[i].element_size_cnt	=
+			PM8001_MPI_QUEUE | (64 << 16) | (0x01<<30);
+		pm8001_ha->outbnd_q_tbl[i].upper_base_addr	=
+			pm8001_ha->memoryMap.region[OB + i].phys_addr_hi;
+		pm8001_ha->outbnd_q_tbl[i].lower_base_addr	=
+			pm8001_ha->memoryMap.region[OB + i].phys_addr_lo;
+		pm8001_ha->outbnd_q_tbl[i].base_virt		=
+			(u8 *)pm8001_ha->memoryMap.region[OB + i].virt_ptr;
+		pm8001_ha->outbnd_q_tbl[i].total_length		=
+			pm8001_ha->memoryMap.region[OB + i].total_len;
+		pm8001_ha->outbnd_q_tbl[i].pi_upper_base_addr	=
+			pm8001_ha->memoryMap.region[PI + i].phys_addr_hi;
+		pm8001_ha->outbnd_q_tbl[i].pi_lower_base_addr	=
+			pm8001_ha->memoryMap.region[PI + i].phys_addr_lo;
+		/* interrupt vector based on oq */
+		pm8001_ha->outbnd_q_tbl[i].interrup_vec_cnt_delay = (i << 24);
+		pm8001_ha->outbnd_q_tbl[i].pi_virt		=
+			pm8001_ha->memoryMap.region[PI + i].virt_ptr;
+		offsetob = i * 0x24;
+		pm8001_ha->outbnd_q_tbl[i].ci_pci_bar		=
+			get_pci_bar_index(pm8001_mr32(addressob,
+			offsetob + 0x14));
+		pm8001_ha->outbnd_q_tbl[i].ci_offset		=
+			pm8001_mr32(addressob, (offsetob + 0x18));
+		pm8001_ha->outbnd_q_tbl[i].consumer_idx		= 0;
+		pm8001_ha->outbnd_q_tbl[i].producer_index	= 0;
+	}
+}
+
+/**
+ * update_main_config_table - update the main default table to the HBA.
+ * @pm8001_ha: our hba card information
+ */
+static void update_main_config_table(struct pm8001_hba_info *pm8001_ha)
+{
+	void __iomem *address = pm8001_ha->main_cfg_tbl_addr;
+	pm8001_mw32(address, MAIN_IQNPPD_HPPD_OFFSET,
+		pm8001_ha->main_cfg_tbl.pm80xx_tbl.inbound_q_nppd_hppd);
+	pm8001_mw32(address, MAIN_EVENT_LOG_ADDR_HI,
+		pm8001_ha->main_cfg_tbl.pm80xx_tbl.upper_event_log_addr);
+	pm8001_mw32(address, MAIN_EVENT_LOG_ADDR_LO,
+		pm8001_ha->main_cfg_tbl.pm80xx_tbl.lower_event_log_addr);
+	pm8001_mw32(address, MAIN_EVENT_LOG_BUFF_SIZE,
+		pm8001_ha->main_cfg_tbl.pm80xx_tbl.event_log_size);
+	pm8001_mw32(address, MAIN_EVENT_LOG_OPTION,
+		pm8001_ha->main_cfg_tbl.pm80xx_tbl.event_log_severity);
+	pm8001_mw32(address, MAIN_PCS_EVENT_LOG_ADDR_HI,
+		pm8001_ha->main_cfg_tbl.pm80xx_tbl.upper_pcs_event_log_addr);
+	pm8001_mw32(address, MAIN_PCS_EVENT_LOG_ADDR_LO,
+		pm8001_ha->main_cfg_tbl.pm80xx_tbl.lower_pcs_event_log_addr);
+	pm8001_mw32(address, MAIN_PCS_EVENT_LOG_BUFF_SIZE,
+		pm8001_ha->main_cfg_tbl.pm80xx_tbl.pcs_event_log_size);
+	pm8001_mw32(address, MAIN_PCS_EVENT_LOG_OPTION,
+		pm8001_ha->main_cfg_tbl.pm80xx_tbl.pcs_event_log_severity);
+	pm8001_mw32(address, MAIN_FATAL_ERROR_INTERRUPT,
+		pm8001_ha->main_cfg_tbl.pm80xx_tbl.fatal_err_interrupt);
+	pm8001_mw32(address, MAIN_EVENT_CRC_CHECK,
+		pm8001_ha->main_cfg_tbl.pm80xx_tbl.crc_core_dump);
+
+	/* SPCv specific */
+	pm8001_ha->main_cfg_tbl.pm80xx_tbl.gpio_led_mapping &= 0xCFFFFFFF;
+	/* Set GPIOLED to 0x2 for LED indicator */
+	pm8001_ha->main_cfg_tbl.pm80xx_tbl.gpio_led_mapping |= 0x20000000;
+	pm8001_mw32(address, MAIN_GPIO_LED_FLAGS_OFFSET,
+		pm8001_ha->main_cfg_tbl.pm80xx_tbl.gpio_led_mapping);
+
+	pm8001_mw32(address, MAIN_PORT_RECOVERY_TIMER,
+		pm8001_ha->main_cfg_tbl.pm80xx_tbl.port_recovery_timer);
+	pm8001_mw32(address, MAIN_INT_REASSERTION_DELAY,
+		pm8001_ha->main_cfg_tbl.pm80xx_tbl.interrupt_reassertion_delay);
+}
+
+/**
+ * update_inbnd_queue_table - update the inbound queue table to the HBA.
+ * @pm8001_ha: our hba card information
+ */
+static void update_inbnd_queue_table(struct pm8001_hba_info *pm8001_ha,
+					 int number)
+{
+	void __iomem *address = pm8001_ha->inbnd_q_tbl_addr;
+	u16 offset = number * 0x20;
+	pm8001_mw32(address, offset + IB_PROPERITY_OFFSET,
+		pm8001_ha->inbnd_q_tbl[number].element_pri_size_cnt);
+	pm8001_mw32(address, offset + IB_BASE_ADDR_HI_OFFSET,
+		pm8001_ha->inbnd_q_tbl[number].upper_base_addr);
+	pm8001_mw32(address, offset + IB_BASE_ADDR_LO_OFFSET,
+		pm8001_ha->inbnd_q_tbl[number].lower_base_addr);
+	pm8001_mw32(address, offset + IB_CI_BASE_ADDR_HI_OFFSET,
+		pm8001_ha->inbnd_q_tbl[number].ci_upper_base_addr);
+	pm8001_mw32(address, offset + IB_CI_BASE_ADDR_LO_OFFSET,
+		pm8001_ha->inbnd_q_tbl[number].ci_lower_base_addr);
+}
+
+/**
+ * update_outbnd_queue_table - update the outbound queue table to the HBA.
+ * @pm8001_ha: our hba card information
+ */
+static void update_outbnd_queue_table(struct pm8001_hba_info *pm8001_ha,
+						 int number)
+{
+	void __iomem *address = pm8001_ha->outbnd_q_tbl_addr;
+	u16 offset = number * 0x24;
+	pm8001_mw32(address, offset + OB_PROPERITY_OFFSET,
+		pm8001_ha->outbnd_q_tbl[number].element_size_cnt);
+	pm8001_mw32(address, offset + OB_BASE_ADDR_HI_OFFSET,
+		pm8001_ha->outbnd_q_tbl[number].upper_base_addr);
+	pm8001_mw32(address, offset + OB_BASE_ADDR_LO_OFFSET,
+		pm8001_ha->outbnd_q_tbl[number].lower_base_addr);
+	pm8001_mw32(address, offset + OB_PI_BASE_ADDR_HI_OFFSET,
+		pm8001_ha->outbnd_q_tbl[number].pi_upper_base_addr);
+	pm8001_mw32(address, offset + OB_PI_BASE_ADDR_LO_OFFSET,
+		pm8001_ha->outbnd_q_tbl[number].pi_lower_base_addr);
+	pm8001_mw32(address, offset + OB_INTERRUPT_COALES_OFFSET,
+		pm8001_ha->outbnd_q_tbl[number].interrup_vec_cnt_delay);
+}
+
+/**
+ * mpi_init_check - check firmware initialization status.
+ * @pm8001_ha: our hba card information
+ */
+static int mpi_init_check(struct pm8001_hba_info *pm8001_ha)
+{
+	u32 max_wait_count;
+	u32 value;
+	u32 gst_len_mpistate;
+
+	/* Write bit0=1 to Inbound DoorBell Register to tell the SPC FW the
+	table is updated */
+	pm8001_cw32(pm8001_ha, 0, MSGU_IBDB_SET, SPCv_MSGU_CFG_TABLE_UPDATE);
+	/* wait until Inbound DoorBell Clear Register toggled */
+	max_wait_count = 2 * 1000 * 1000;/* 2 sec for spcv/ve */
+	do {
+		udelay(1);
+		value = pm8001_cr32(pm8001_ha, 0, MSGU_IBDB_SET);
+		value &= SPCv_MSGU_CFG_TABLE_UPDATE;
+	} while ((value != 0) && (--max_wait_count));
+
+	if (!max_wait_count)
+		return -1;
+	/* check the MPI-State for initialization upto 100ms*/
+	max_wait_count = 100 * 1000;/* 100 msec */
+	do {
+		udelay(1);
+		gst_len_mpistate =
+			pm8001_mr32(pm8001_ha->general_stat_tbl_addr,
+					GST_GSTLEN_MPIS_OFFSET);
+	} while ((GST_MPI_STATE_INIT !=
+		(gst_len_mpistate & GST_MPI_STATE_MASK)) && (--max_wait_count));
+	if (!max_wait_count)
+		return -1;
+
+	/* check MPI Initialization error */
+	gst_len_mpistate = gst_len_mpistate >> 16;
+	if (0x0000 != gst_len_mpistate)
+		return -1;
+
+	return 0;
+}
+
+/**
+ * check_fw_ready - The LLDD check if the FW is ready, if not, return error.
+ * @pm8001_ha: our hba card information
+ */
+static int check_fw_ready(struct pm8001_hba_info *pm8001_ha)
+{
+	u32 value;
+	u32 max_wait_count;
+	u32 max_wait_time;
+	int ret = 0;
+
+	/* reset / PCIe ready */
+	max_wait_time = max_wait_count = 100 * 1000;	/* 100 milli sec */
+	do {
+		udelay(1);
+		value = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1);
+	} while ((value == 0xFFFFFFFF) && (--max_wait_count));
+
+	/* check ila status */
+	max_wait_time = max_wait_count = 1000 * 1000;	/* 1000 milli sec */
+	do {
+		udelay(1);
+		value = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1);
+	} while (((value & SCRATCH_PAD_ILA_READY) !=
+			SCRATCH_PAD_ILA_READY) && (--max_wait_count));
+	if (!max_wait_count)
+		ret = -1;
+	else {
+		PM8001_MSG_DBG(pm8001_ha,
+			pm8001_printk(" ila ready status in %d millisec\n",
+				(max_wait_time - max_wait_count)));
+	}
+
+	/* check RAAE status */
+	max_wait_time = max_wait_count = 1800 * 1000;	/* 1800 milli sec */
+	do {
+		udelay(1);
+		value = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1);
+	} while (((value & SCRATCH_PAD_RAAE_READY) !=
+				SCRATCH_PAD_RAAE_READY) && (--max_wait_count));
+	if (!max_wait_count)
+		ret = -1;
+	else {
+		PM8001_MSG_DBG(pm8001_ha,
+			pm8001_printk(" raae ready status in %d millisec\n",
+					(max_wait_time - max_wait_count)));
+	}
+
+	/* check iop0 status */
+	max_wait_time = max_wait_count = 600 * 1000;	/* 600 milli sec */
+	do {
+		udelay(1);
+		value = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1);
+	} while (((value & SCRATCH_PAD_IOP0_READY) != SCRATCH_PAD_IOP0_READY) &&
+			(--max_wait_count));
+	if (!max_wait_count)
+		ret = -1;
+	else {
+		PM8001_MSG_DBG(pm8001_ha,
+			pm8001_printk(" iop0 ready status in %d millisec\n",
+				(max_wait_time - max_wait_count)));
+	}
+
+	/* check iop1 status only for 16 port controllers */
+	if ((pm8001_ha->chip_id != chip_8008) &&
+			(pm8001_ha->chip_id != chip_8009)) {
+		/* 200 milli sec */
+		max_wait_time = max_wait_count = 200 * 1000;
+		do {
+			udelay(1);
+			value = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1);
+		} while (((value & SCRATCH_PAD_IOP1_READY) !=
+				SCRATCH_PAD_IOP1_READY) && (--max_wait_count));
+		if (!max_wait_count)
+			ret = -1;
+		else {
+			PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
+				"iop1 ready status in %d millisec\n",
+				(max_wait_time - max_wait_count)));
+		}
+	}
+
+	return ret;
+}
+
+static void init_pci_device_addresses(struct pm8001_hba_info *pm8001_ha)
+{
+	void __iomem *base_addr;
+	u32	value;
+	u32	offset;
+	u32	pcibar;
+	u32	pcilogic;
+
+	value = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_0);
+	offset = value & 0x03FFFFFF; /* scratch pad 0 TBL address */
+
+	PM8001_INIT_DBG(pm8001_ha,
+		pm8001_printk("Scratchpad 0 Offset: 0x%x value 0x%x\n",
+				offset, value));
+	pcilogic = (value & 0xFC000000) >> 26;
+	pcibar = get_pci_bar_index(pcilogic);
+	PM8001_INIT_DBG(pm8001_ha,
+		pm8001_printk("Scratchpad 0 PCI BAR: %d\n", pcibar));
+	pm8001_ha->main_cfg_tbl_addr = base_addr =
+		pm8001_ha->io_mem[pcibar].memvirtaddr + offset;
+	pm8001_ha->general_stat_tbl_addr =
+		base_addr + (pm8001_cr32(pm8001_ha, pcibar, offset + 0x18) &
+					0xFFFFFF);
+	pm8001_ha->inbnd_q_tbl_addr =
+		base_addr + (pm8001_cr32(pm8001_ha, pcibar, offset + 0x1C) &
+					0xFFFFFF);
+	pm8001_ha->outbnd_q_tbl_addr =
+		base_addr + (pm8001_cr32(pm8001_ha, pcibar, offset + 0x20) &
+					0xFFFFFF);
+	pm8001_ha->ivt_tbl_addr =
+		base_addr + (pm8001_cr32(pm8001_ha, pcibar, offset + 0x8C) &
+					0xFFFFFF);
+	pm8001_ha->pspa_q_tbl_addr =
+		base_addr + (pm8001_cr32(pm8001_ha, pcibar, offset + 0x90) &
+					0xFFFFFF);
+
+	PM8001_INIT_DBG(pm8001_ha,
+			pm8001_printk("GST OFFSET 0x%x\n",
+			pm8001_cr32(pm8001_ha, pcibar, offset + 0x18)));
+	PM8001_INIT_DBG(pm8001_ha,
+			pm8001_printk("INBND OFFSET 0x%x\n",
+			pm8001_cr32(pm8001_ha, pcibar, offset + 0x1C)));
+	PM8001_INIT_DBG(pm8001_ha,
+			pm8001_printk("OBND OFFSET 0x%x\n",
+			pm8001_cr32(pm8001_ha, pcibar, offset + 0x20)));
+	PM8001_INIT_DBG(pm8001_ha,
+			pm8001_printk("IVT OFFSET 0x%x\n",
+			pm8001_cr32(pm8001_ha, pcibar, offset + 0x8C)));
+	PM8001_INIT_DBG(pm8001_ha,
+			pm8001_printk("PSPA OFFSET 0x%x\n",
+			pm8001_cr32(pm8001_ha, pcibar, offset + 0x90)));
+	PM8001_INIT_DBG(pm8001_ha,
+			pm8001_printk("addr - main cfg %p general status %p\n",
+			pm8001_ha->main_cfg_tbl_addr,
+			pm8001_ha->general_stat_tbl_addr));
+	PM8001_INIT_DBG(pm8001_ha,
+			pm8001_printk("addr - inbnd %p obnd %p\n",
+			pm8001_ha->inbnd_q_tbl_addr,
+			pm8001_ha->outbnd_q_tbl_addr));
+	PM8001_INIT_DBG(pm8001_ha,
+			pm8001_printk("addr - pspa %p ivt %p\n",
+			pm8001_ha->pspa_q_tbl_addr,
+			pm8001_ha->ivt_tbl_addr));
+}
+
+/**
+ * pm80xx_set_thermal_config - support the thermal configuration
+ * @pm8001_ha: our hba card information.
+ */
+int
+pm80xx_set_thermal_config(struct pm8001_hba_info *pm8001_ha)
+{
+	struct set_ctrl_cfg_req payload;
+	struct inbound_queue_table *circularQ;
+	int rc;
+	u32 tag;
+	u32 opc = OPC_INB_SET_CONTROLLER_CONFIG;
+
+	memset(&payload, 0, sizeof(struct set_ctrl_cfg_req));
+	rc = pm8001_tag_alloc(pm8001_ha, &tag);
+	if (rc)
+		return -1;
+
+	circularQ = &pm8001_ha->inbnd_q_tbl[0];
+	payload.tag = cpu_to_le32(tag);
+	payload.cfg_pg[0] = (THERMAL_LOG_ENABLE << 9) |
+			(THERMAL_ENABLE << 8) | THERMAL_OP_CODE;
+	payload.cfg_pg[1] = (LTEMPHIL << 24) | (RTEMPHIL << 8);
+
+	rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0);
+	return rc;
+
+}
+
+/**
+* pm80xx_set_sas_protocol_timer_config - support the SAS Protocol
+* Timer configuration page
+* @pm8001_ha: our hba card information.
+*/
+static int
+pm80xx_set_sas_protocol_timer_config(struct pm8001_hba_info *pm8001_ha)
+{
+	struct set_ctrl_cfg_req payload;
+	struct inbound_queue_table *circularQ;
+	SASProtocolTimerConfig_t SASConfigPage;
+	int rc;
+	u32 tag;
+	u32 opc = OPC_INB_SET_CONTROLLER_CONFIG;
+
+	memset(&payload, 0, sizeof(struct set_ctrl_cfg_req));
+	memset(&SASConfigPage, 0, sizeof(SASProtocolTimerConfig_t));
+
+	rc = pm8001_tag_alloc(pm8001_ha, &tag);
+
+	if (rc)
+		return -1;
+
+	circularQ = &pm8001_ha->inbnd_q_tbl[0];
+	payload.tag = cpu_to_le32(tag);
+
+	SASConfigPage.pageCode        =  SAS_PROTOCOL_TIMER_CONFIG_PAGE;
+	SASConfigPage.MST_MSI         =  3 << 15;
+	SASConfigPage.STP_SSP_MCT_TMO =  (STP_MCT_TMO << 16) | SSP_MCT_TMO;
+	SASConfigPage.STP_FRM_TMO     = (SAS_MAX_OPEN_TIME << 24) |
+				(SMP_MAX_CONN_TIMER << 16) | STP_FRM_TIMER;
+	SASConfigPage.STP_IDLE_TMO    =  STP_IDLE_TIME;
+
+	if (SASConfigPage.STP_IDLE_TMO > 0x3FFFFFF)
+		SASConfigPage.STP_IDLE_TMO = 0x3FFFFFF;
+
+
+	SASConfigPage.OPNRJT_RTRY_INTVL =         (SAS_MFD << 16) |
+						SAS_OPNRJT_RTRY_INTVL;
+	SASConfigPage.Data_Cmd_OPNRJT_RTRY_TMO =  (SAS_DOPNRJT_RTRY_TMO << 16)
+						| SAS_COPNRJT_RTRY_TMO;
+	SASConfigPage.Data_Cmd_OPNRJT_RTRY_THR =  (SAS_DOPNRJT_RTRY_THR << 16)
+						| SAS_COPNRJT_RTRY_THR;
+	SASConfigPage.MAX_AIP =  SAS_MAX_AIP;
+
+	PM8001_INIT_DBG(pm8001_ha,
+			pm8001_printk("SASConfigPage.pageCode "
+			"0x%08x\n", SASConfigPage.pageCode));
+	PM8001_INIT_DBG(pm8001_ha,
+			pm8001_printk("SASConfigPage.MST_MSI "
+			" 0x%08x\n", SASConfigPage.MST_MSI));
+	PM8001_INIT_DBG(pm8001_ha,
+			pm8001_printk("SASConfigPage.STP_SSP_MCT_TMO "
+			" 0x%08x\n", SASConfigPage.STP_SSP_MCT_TMO));
+	PM8001_INIT_DBG(pm8001_ha,
+			pm8001_printk("SASConfigPage.STP_FRM_TMO "
+			" 0x%08x\n", SASConfigPage.STP_FRM_TMO));
+	PM8001_INIT_DBG(pm8001_ha,
+			pm8001_printk("SASConfigPage.STP_IDLE_TMO "
+			" 0x%08x\n", SASConfigPage.STP_IDLE_TMO));
+	PM8001_INIT_DBG(pm8001_ha,
+			pm8001_printk("SASConfigPage.OPNRJT_RTRY_INTVL "
+			" 0x%08x\n", SASConfigPage.OPNRJT_RTRY_INTVL));
+	PM8001_INIT_DBG(pm8001_ha,
+			pm8001_printk("SASConfigPage.Data_Cmd_OPNRJT_RTRY_TMO "
+			" 0x%08x\n", SASConfigPage.Data_Cmd_OPNRJT_RTRY_TMO));
+	PM8001_INIT_DBG(pm8001_ha,
+			pm8001_printk("SASConfigPage.Data_Cmd_OPNRJT_RTRY_THR "
+			" 0x%08x\n", SASConfigPage.Data_Cmd_OPNRJT_RTRY_THR));
+	PM8001_INIT_DBG(pm8001_ha, pm8001_printk("SASConfigPage.MAX_AIP "
+			" 0x%08x\n", SASConfigPage.MAX_AIP));
+
+	memcpy(&payload.cfg_pg, &SASConfigPage,
+			 sizeof(SASProtocolTimerConfig_t));
+
+	rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0);
+
+	return rc;
+}
+
+/**
+ * pm80xx_get_encrypt_info - Check for encryption
+ * @pm8001_ha: our hba card information.
+ */
+static int
+pm80xx_get_encrypt_info(struct pm8001_hba_info *pm8001_ha)
+{
+	u32 scratch3_value;
+	int ret;
+
+	/* Read encryption status from SCRATCH PAD 3 */
+	scratch3_value = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_3);
+
+	if ((scratch3_value & SCRATCH_PAD3_ENC_MASK) ==
+					SCRATCH_PAD3_ENC_READY) {
+		if (scratch3_value & SCRATCH_PAD3_XTS_ENABLED)
+			pm8001_ha->encrypt_info.cipher_mode = CIPHER_MODE_XTS;
+		if ((scratch3_value & SCRATCH_PAD3_SM_MASK) ==
+						SCRATCH_PAD3_SMF_ENABLED)
+			pm8001_ha->encrypt_info.sec_mode = SEC_MODE_SMF;
+		if ((scratch3_value & SCRATCH_PAD3_SM_MASK) ==
+						SCRATCH_PAD3_SMA_ENABLED)
+			pm8001_ha->encrypt_info.sec_mode = SEC_MODE_SMA;
+		if ((scratch3_value & SCRATCH_PAD3_SM_MASK) ==
+						SCRATCH_PAD3_SMB_ENABLED)
+			pm8001_ha->encrypt_info.sec_mode = SEC_MODE_SMB;
+		pm8001_ha->encrypt_info.status = 0;
+		PM8001_INIT_DBG(pm8001_ha, pm8001_printk(
+			"Encryption: SCRATCH_PAD3_ENC_READY 0x%08X."
+			"Cipher mode 0x%x Sec mode 0x%x status 0x%x\n",
+			scratch3_value, pm8001_ha->encrypt_info.cipher_mode,
+			pm8001_ha->encrypt_info.sec_mode,
+			pm8001_ha->encrypt_info.status));
+		ret = 0;
+	} else if ((scratch3_value & SCRATCH_PAD3_ENC_READY) ==
+					SCRATCH_PAD3_ENC_DISABLED) {
+		PM8001_INIT_DBG(pm8001_ha, pm8001_printk(
+			"Encryption: SCRATCH_PAD3_ENC_DISABLED 0x%08X\n",
+			scratch3_value));
+		pm8001_ha->encrypt_info.status = 0xFFFFFFFF;
+		pm8001_ha->encrypt_info.cipher_mode = 0;
+		pm8001_ha->encrypt_info.sec_mode = 0;
+		return 0;
+	} else if ((scratch3_value & SCRATCH_PAD3_ENC_MASK) ==
+				SCRATCH_PAD3_ENC_DIS_ERR) {
+		pm8001_ha->encrypt_info.status =
+			(scratch3_value & SCRATCH_PAD3_ERR_CODE) >> 16;
+		if (scratch3_value & SCRATCH_PAD3_XTS_ENABLED)
+			pm8001_ha->encrypt_info.cipher_mode = CIPHER_MODE_XTS;
+		if ((scratch3_value & SCRATCH_PAD3_SM_MASK) ==
+					SCRATCH_PAD3_SMF_ENABLED)
+			pm8001_ha->encrypt_info.sec_mode = SEC_MODE_SMF;
+		if ((scratch3_value & SCRATCH_PAD3_SM_MASK) ==
+					SCRATCH_PAD3_SMA_ENABLED)
+			pm8001_ha->encrypt_info.sec_mode = SEC_MODE_SMA;
+		if ((scratch3_value & SCRATCH_PAD3_SM_MASK) ==
+					SCRATCH_PAD3_SMB_ENABLED)
+			pm8001_ha->encrypt_info.sec_mode = SEC_MODE_SMB;
+		PM8001_INIT_DBG(pm8001_ha, pm8001_printk(
+			"Encryption: SCRATCH_PAD3_DIS_ERR 0x%08X."
+			"Cipher mode 0x%x sec mode 0x%x status 0x%x\n",
+			scratch3_value, pm8001_ha->encrypt_info.cipher_mode,
+			pm8001_ha->encrypt_info.sec_mode,
+			pm8001_ha->encrypt_info.status));
+		ret = -1;
+	} else if ((scratch3_value & SCRATCH_PAD3_ENC_MASK) ==
+				 SCRATCH_PAD3_ENC_ENA_ERR) {
+
+		pm8001_ha->encrypt_info.status =
+			(scratch3_value & SCRATCH_PAD3_ERR_CODE) >> 16;
+		if (scratch3_value & SCRATCH_PAD3_XTS_ENABLED)
+			pm8001_ha->encrypt_info.cipher_mode = CIPHER_MODE_XTS;
+		if ((scratch3_value & SCRATCH_PAD3_SM_MASK) ==
+					SCRATCH_PAD3_SMF_ENABLED)
+			pm8001_ha->encrypt_info.sec_mode = SEC_MODE_SMF;
+		if ((scratch3_value & SCRATCH_PAD3_SM_MASK) ==
+					SCRATCH_PAD3_SMA_ENABLED)
+			pm8001_ha->encrypt_info.sec_mode = SEC_MODE_SMA;
+		if ((scratch3_value & SCRATCH_PAD3_SM_MASK) ==
+					SCRATCH_PAD3_SMB_ENABLED)
+			pm8001_ha->encrypt_info.sec_mode = SEC_MODE_SMB;
+
+		PM8001_INIT_DBG(pm8001_ha, pm8001_printk(
+			"Encryption: SCRATCH_PAD3_ENA_ERR 0x%08X."
+			"Cipher mode 0x%x sec mode 0x%x status 0x%x\n",
+			scratch3_value, pm8001_ha->encrypt_info.cipher_mode,
+			pm8001_ha->encrypt_info.sec_mode,
+			pm8001_ha->encrypt_info.status));
+		ret = -1;
+	}
+	return ret;
+}
+
+/**
+ * pm80xx_encrypt_update - update flash with encryption informtion
+ * @pm8001_ha: our hba card information.
+ */
+static int pm80xx_encrypt_update(struct pm8001_hba_info *pm8001_ha)
+{
+	struct kek_mgmt_req payload;
+	struct inbound_queue_table *circularQ;
+	int rc;
+	u32 tag;
+	u32 opc = OPC_INB_KEK_MANAGEMENT;
+
+	memset(&payload, 0, sizeof(struct kek_mgmt_req));
+	rc = pm8001_tag_alloc(pm8001_ha, &tag);
+	if (rc)
+		return -1;
+
+	circularQ = &pm8001_ha->inbnd_q_tbl[0];
+	payload.tag = cpu_to_le32(tag);
+	/* Currently only one key is used. New KEK index is 1.
+	 * Current KEK index is 1. Store KEK to NVRAM is 1.
+	 */
+	payload.new_curidx_ksop = ((1 << 24) | (1 << 16) | (1 << 8) |
+					KEK_MGMT_SUBOP_KEYCARDUPDATE);
+
+	rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0);
+
+	return rc;
+}
+
+/**
+ * pm8001_chip_init - the main init function that initialize whole PM8001 chip.
+ * @pm8001_ha: our hba card information
+ */
+static int pm80xx_chip_init(struct pm8001_hba_info *pm8001_ha)
+{
+	int ret;
+	u8 i = 0;
+
+	/* check the firmware status */
+	if (-1 == check_fw_ready(pm8001_ha)) {
+		PM8001_FAIL_DBG(pm8001_ha,
+			pm8001_printk("Firmware is not ready!\n"));
+		return -EBUSY;
+	}
+
+	/* Initialize pci space address eg: mpi offset */
+	init_pci_device_addresses(pm8001_ha);
+	init_default_table_values(pm8001_ha);
+	read_main_config_table(pm8001_ha);
+	read_general_status_table(pm8001_ha);
+	read_inbnd_queue_table(pm8001_ha);
+	read_outbnd_queue_table(pm8001_ha);
+	read_phy_attr_table(pm8001_ha);
+
+	/* update main config table ,inbound table and outbound table */
+	update_main_config_table(pm8001_ha);
+	for (i = 0; i < PM8001_MAX_SPCV_INB_NUM; i++)
+		update_inbnd_queue_table(pm8001_ha, i);
+	for (i = 0; i < PM8001_MAX_SPCV_OUTB_NUM; i++)
+		update_outbnd_queue_table(pm8001_ha, i);
+
+	/* notify firmware update finished and check initialization status */
+	if (0 == mpi_init_check(pm8001_ha)) {
+		PM8001_INIT_DBG(pm8001_ha,
+			pm8001_printk("MPI initialize successful!\n"));
+	} else
+		return -EBUSY;
+
+	/* send SAS protocol timer configuration page to FW */
+	ret = pm80xx_set_sas_protocol_timer_config(pm8001_ha);
+
+	/* Check for encryption */
+	if (pm8001_ha->chip->encrypt) {
+		PM8001_INIT_DBG(pm8001_ha,
+			pm8001_printk("Checking for encryption\n"));
+		ret = pm80xx_get_encrypt_info(pm8001_ha);
+		if (ret == -1) {
+			PM8001_INIT_DBG(pm8001_ha,
+				pm8001_printk("Encryption error !!\n"));
+			if (pm8001_ha->encrypt_info.status == 0x81) {
+				PM8001_INIT_DBG(pm8001_ha, pm8001_printk(
+					"Encryption enabled with error."
+					"Saving encryption key to flash\n"));
+				pm80xx_encrypt_update(pm8001_ha);
+			}
+		}
+	}
+	return 0;
+}
+
+static int mpi_uninit_check(struct pm8001_hba_info *pm8001_ha)
+{
+	u32 max_wait_count;
+	u32 value;
+	u32 gst_len_mpistate;
+	init_pci_device_addresses(pm8001_ha);
+	/* Write bit1=1 to Inbound DoorBell Register to tell the SPC FW the
+	table is stop */
+	pm8001_cw32(pm8001_ha, 0, MSGU_IBDB_SET, SPCv_MSGU_CFG_TABLE_RESET);
+
+	/* wait until Inbound DoorBell Clear Register toggled */
+	max_wait_count = 2 * 1000 * 1000;	/* 2 sec for spcv/ve */
+	do {
+		udelay(1);
+		value = pm8001_cr32(pm8001_ha, 0, MSGU_IBDB_SET);
+		value &= SPCv_MSGU_CFG_TABLE_RESET;
+	} while ((value != 0) && (--max_wait_count));
+
+	if (!max_wait_count) {
+		PM8001_FAIL_DBG(pm8001_ha,
+			pm8001_printk("TIMEOUT:IBDB value/=%x\n", value));
+		return -1;
+	}
+
+	/* check the MPI-State for termination in progress */
+	/* wait until Inbound DoorBell Clear Register toggled */
+	max_wait_count = 2 * 1000 * 1000;	/* 2 sec for spcv/ve */
+	do {
+		udelay(1);
+		gst_len_mpistate =
+			pm8001_mr32(pm8001_ha->general_stat_tbl_addr,
+			GST_GSTLEN_MPIS_OFFSET);
+		if (GST_MPI_STATE_UNINIT ==
+			(gst_len_mpistate & GST_MPI_STATE_MASK))
+			break;
+	} while (--max_wait_count);
+	if (!max_wait_count) {
+		PM8001_FAIL_DBG(pm8001_ha,
+			pm8001_printk(" TIME OUT MPI State = 0x%x\n",
+				gst_len_mpistate & GST_MPI_STATE_MASK));
+		return -1;
+	}
+
+	return 0;
+}
+
+/**
+ * pm8001_chip_soft_rst - soft reset the PM8001 chip, so that the clear all
+ * the FW register status to the originated status.
+ * @pm8001_ha: our hba card information
+ */
+
+static int
+pm80xx_chip_soft_rst(struct pm8001_hba_info *pm8001_ha)
+{
+	u32 regval;
+	u32 bootloader_state;
+
+	/* Check if MPI is in ready state to reset */
+	if (mpi_uninit_check(pm8001_ha) != 0) {
+		PM8001_FAIL_DBG(pm8001_ha,
+			pm8001_printk("MPI state is not ready\n"));
+		return -1;
+	}
+
+	/* checked for reset register normal state; 0x0 */
+	regval = pm8001_cr32(pm8001_ha, 0, SPC_REG_SOFT_RESET);
+	PM8001_INIT_DBG(pm8001_ha,
+		pm8001_printk("reset register before write : 0x%x\n", regval));
+
+	pm8001_cw32(pm8001_ha, 0, SPC_REG_SOFT_RESET, SPCv_NORMAL_RESET_VALUE);
+	mdelay(500);
+
+	regval = pm8001_cr32(pm8001_ha, 0, SPC_REG_SOFT_RESET);
+	PM8001_INIT_DBG(pm8001_ha,
+	pm8001_printk("reset register after write 0x%x\n", regval));
+
+	if ((regval & SPCv_SOFT_RESET_READ_MASK) ==
+			SPCv_SOFT_RESET_NORMAL_RESET_OCCURED) {
+		PM8001_MSG_DBG(pm8001_ha,
+			pm8001_printk(" soft reset successful [regval: 0x%x]\n",
+					regval));
+	} else {
+		PM8001_MSG_DBG(pm8001_ha,
+			pm8001_printk(" soft reset failed [regval: 0x%x]\n",
+					regval));
+
+		/* check bootloader is successfully executed or in HDA mode */
+		bootloader_state =
+			pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1) &
+			SCRATCH_PAD1_BOOTSTATE_MASK;
+
+		if (bootloader_state == SCRATCH_PAD1_BOOTSTATE_HDA_SEEPROM) {
+			PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
+				"Bootloader state - HDA mode SEEPROM\n"));
+		} else if (bootloader_state ==
+				SCRATCH_PAD1_BOOTSTATE_HDA_BOOTSTRAP) {
+			PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
+				"Bootloader state - HDA mode Bootstrap Pin\n"));
+		} else if (bootloader_state ==
+				SCRATCH_PAD1_BOOTSTATE_HDA_SOFTRESET) {
+			PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
+				"Bootloader state - HDA mode soft reset\n"));
+		} else if (bootloader_state ==
+					SCRATCH_PAD1_BOOTSTATE_CRIT_ERROR) {
+			PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
+				"Bootloader state-HDA mode critical error\n"));
+		}
+		return -EBUSY;
+	}
+
+	/* check the firmware status after reset */
+	if (-1 == check_fw_ready(pm8001_ha)) {
+		PM8001_FAIL_DBG(pm8001_ha,
+			pm8001_printk("Firmware is not ready!\n"));
+		return -EBUSY;
+	}
+	PM8001_INIT_DBG(pm8001_ha,
+		pm8001_printk("SPCv soft reset Complete\n"));
+	return 0;
+}
+
+static void pm80xx_hw_chip_rst(struct pm8001_hba_info *pm8001_ha)
+{
+	 u32 i;
+
+	PM8001_INIT_DBG(pm8001_ha,
+		pm8001_printk("chip reset start\n"));
+
+	/* do SPCv chip reset. */
+	pm8001_cw32(pm8001_ha, 0, SPC_REG_SOFT_RESET, 0x11);
+	PM8001_INIT_DBG(pm8001_ha,
+		pm8001_printk("SPC soft reset Complete\n"));
+
+	/* Check this ..whether delay is required or no */
+	/* delay 10 usec */
+	udelay(10);
+
+	/* wait for 20 msec until the firmware gets reloaded */
+	i = 20;
+	do {
+		mdelay(1);
+	} while ((--i) != 0);
+
+	PM8001_INIT_DBG(pm8001_ha,
+		pm8001_printk("chip reset finished\n"));
+}
+
+/**
+ * pm8001_chip_interrupt_enable - enable PM8001 chip interrupt
+ * @pm8001_ha: our hba card information
+ */
+static void
+pm80xx_chip_intx_interrupt_enable(struct pm8001_hba_info *pm8001_ha)
+{
+	pm8001_cw32(pm8001_ha, 0, MSGU_ODMR, ODMR_CLEAR_ALL);
+	pm8001_cw32(pm8001_ha, 0, MSGU_ODCR, ODCR_CLEAR_ALL);
+}
+
+/**
+ * pm8001_chip_intx_interrupt_disable- disable PM8001 chip interrupt
+ * @pm8001_ha: our hba card information
+ */
+static void
+pm80xx_chip_intx_interrupt_disable(struct pm8001_hba_info *pm8001_ha)
+{
+	pm8001_cw32(pm8001_ha, 0, MSGU_ODMR_CLR, ODMR_MASK_ALL);
+}
+
+/**
+ * pm8001_chip_interrupt_enable - enable PM8001 chip interrupt
+ * @pm8001_ha: our hba card information
+ */
+static void
+pm80xx_chip_interrupt_enable(struct pm8001_hba_info *pm8001_ha, u8 vec)
+{
+#ifdef PM8001_USE_MSIX
+	u32 mask;
+	mask = (u32)(1 << vec);
+
+	pm8001_cw32(pm8001_ha, 0, MSGU_ODMR_CLR, (u32)(mask & 0xFFFFFFFF));
+	return;
+#endif
+	pm80xx_chip_intx_interrupt_enable(pm8001_ha);
+
+}
+
+/**
+ * pm8001_chip_interrupt_disable- disable PM8001 chip interrupt
+ * @pm8001_ha: our hba card information
+ */
+static void
+pm80xx_chip_interrupt_disable(struct pm8001_hba_info *pm8001_ha, u8 vec)
+{
+#ifdef PM8001_USE_MSIX
+	u32 mask;
+	if (vec == 0xFF)
+		mask = 0xFFFFFFFF;
+	else
+		mask = (u32)(1 << vec);
+	pm8001_cw32(pm8001_ha, 0, MSGU_ODMR, (u32)(mask & 0xFFFFFFFF));
+	return;
+#endif
+	pm80xx_chip_intx_interrupt_disable(pm8001_ha);
+}
+
+static void pm80xx_send_abort_all(struct pm8001_hba_info *pm8001_ha,
+		struct pm8001_device *pm8001_ha_dev)
+{
+	int res;
+	u32 ccb_tag;
+	struct pm8001_ccb_info *ccb;
+	struct sas_task *task = NULL;
+	struct task_abort_req task_abort;
+	struct inbound_queue_table *circularQ;
+	u32 opc = OPC_INB_SATA_ABORT;
+	int ret;
+
+	if (!pm8001_ha_dev) {
+		PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("dev is null\n"));
+		return;
+	}
+
+	task = sas_alloc_slow_task(GFP_ATOMIC);
+
+	if (!task) {
+		PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("cannot "
+						"allocate task\n"));
+		return;
+	}
+
+	task->task_done = pm8001_task_done;
+
+	res = pm8001_tag_alloc(pm8001_ha, &ccb_tag);
+	if (res)
+		return;
+
+	ccb = &pm8001_ha->ccb_info[ccb_tag];
+	ccb->device = pm8001_ha_dev;
+	ccb->ccb_tag = ccb_tag;
+	ccb->task = task;
+
+	circularQ = &pm8001_ha->inbnd_q_tbl[0];
+
+	memset(&task_abort, 0, sizeof(task_abort));
+	task_abort.abort_all = cpu_to_le32(1);
+	task_abort.device_id = cpu_to_le32(pm8001_ha_dev->device_id);
+	task_abort.tag = cpu_to_le32(ccb_tag);
+
+	ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &task_abort, 0);
+
+}
+
+static void pm80xx_send_read_log(struct pm8001_hba_info *pm8001_ha,
+		struct pm8001_device *pm8001_ha_dev)
+{
+	struct sata_start_req sata_cmd;
+	int res;
+	u32 ccb_tag;
+	struct pm8001_ccb_info *ccb;
+	struct sas_task *task = NULL;
+	struct host_to_dev_fis fis;
+	struct domain_device *dev;
+	struct inbound_queue_table *circularQ;
+	u32 opc = OPC_INB_SATA_HOST_OPSTART;
+
+	task = sas_alloc_slow_task(GFP_ATOMIC);
+
+	if (!task) {
+		PM8001_FAIL_DBG(pm8001_ha,
+			pm8001_printk("cannot allocate task !!!\n"));
+		return;
+	}
+	task->task_done = pm8001_task_done;
+
+	res = pm8001_tag_alloc(pm8001_ha, &ccb_tag);
+	if (res) {
+		PM8001_FAIL_DBG(pm8001_ha,
+			pm8001_printk("cannot allocate tag !!!\n"));
+		return;
+	}
+
+	/* allocate domain device by ourselves as libsas
+	 * is not going to provide any
+	*/
+	dev = kzalloc(sizeof(struct domain_device), GFP_ATOMIC);
+	if (!dev) {
+		PM8001_FAIL_DBG(pm8001_ha,
+			pm8001_printk("Domain device cannot be allocated\n"));
+		sas_free_task(task);
+		return;
+	} else {
+		task->dev = dev;
+		task->dev->lldd_dev = pm8001_ha_dev;
+	}
+
+	ccb = &pm8001_ha->ccb_info[ccb_tag];
+	ccb->device = pm8001_ha_dev;
+	ccb->ccb_tag = ccb_tag;
+	ccb->task = task;
+	pm8001_ha_dev->id |= NCQ_READ_LOG_FLAG;
+	pm8001_ha_dev->id |= NCQ_2ND_RLE_FLAG;
+
+	memset(&sata_cmd, 0, sizeof(sata_cmd));
+	circularQ = &pm8001_ha->inbnd_q_tbl[0];
+
+	/* construct read log FIS */
+	memset(&fis, 0, sizeof(struct host_to_dev_fis));
+	fis.fis_type = 0x27;
+	fis.flags = 0x80;
+	fis.command = ATA_CMD_READ_LOG_EXT;
+	fis.lbal = 0x10;
+	fis.sector_count = 0x1;
+
+	sata_cmd.tag = cpu_to_le32(ccb_tag);
+	sata_cmd.device_id = cpu_to_le32(pm8001_ha_dev->device_id);
+	sata_cmd.ncqtag_atap_dir_m_dad |= ((0x1 << 7) | (0x5 << 9));
+	memcpy(&sata_cmd.sata_fis, &fis, sizeof(struct host_to_dev_fis));
+
+	res = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &sata_cmd, 0);
+
+}
+
+/**
+ * mpi_ssp_completion- process the event that FW response to the SSP request.
+ * @pm8001_ha: our hba card information
+ * @piomb: the message contents of this outbound message.
+ *
+ * When FW has completed a ssp request for example a IO request, after it has
+ * filled the SG data with the data, it will trigger this event represent
+ * that he has finished the job,please check the coresponding buffer.
+ * So we will tell the caller who maybe waiting the result to tell upper layer
+ * that the task has been finished.
+ */
+static void
+mpi_ssp_completion(struct pm8001_hba_info *pm8001_ha , void *piomb)
+{
+	struct sas_task *t;
+	struct pm8001_ccb_info *ccb;
+	unsigned long flags;
+	u32 status;
+	u32 param;
+	u32 tag;
+	struct ssp_completion_resp *psspPayload;
+	struct task_status_struct *ts;
+	struct ssp_response_iu *iu;
+	struct pm8001_device *pm8001_dev;
+	psspPayload = (struct ssp_completion_resp *)(piomb + 4);
+	status = le32_to_cpu(psspPayload->status);
+	tag = le32_to_cpu(psspPayload->tag);
+	ccb = &pm8001_ha->ccb_info[tag];
+	if ((status == IO_ABORTED) && ccb->open_retry) {
+		/* Being completed by another */
+		ccb->open_retry = 0;
+		return;
+	}
+	pm8001_dev = ccb->device;
+	param = le32_to_cpu(psspPayload->param);
+	t = ccb->task;
+
+	if (status && status != IO_UNDERFLOW)
+		PM8001_FAIL_DBG(pm8001_ha,
+			pm8001_printk("sas IO status 0x%x\n", status));
+	if (unlikely(!t || !t->lldd_task || !t->dev))
+		return;
+	ts = &t->task_status;
+	switch (status) {
+	case IO_SUCCESS:
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("IO_SUCCESS ,param = 0x%x\n",
+				param));
+		if (param == 0) {
+			ts->resp = SAS_TASK_COMPLETE;
+			ts->stat = SAM_STAT_GOOD;
+		} else {
+			ts->resp = SAS_TASK_COMPLETE;
+			ts->stat = SAS_PROTO_RESPONSE;
+			ts->residual = param;
+			iu = &psspPayload->ssp_resp_iu;
+			sas_ssp_task_response(pm8001_ha->dev, t, iu);
+		}
+		if (pm8001_dev)
+			pm8001_dev->running_req--;
+		break;
+	case IO_ABORTED:
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("IO_ABORTED IOMB Tag\n"));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_ABORTED_TASK;
+		break;
+	case IO_UNDERFLOW:
+		/* SSP Completion with error */
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("IO_UNDERFLOW ,param = 0x%x\n",
+				param));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_DATA_UNDERRUN;
+		ts->residual = param;
+		if (pm8001_dev)
+			pm8001_dev->running_req--;
+		break;
+	case IO_NO_DEVICE:
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("IO_NO_DEVICE\n"));
+		ts->resp = SAS_TASK_UNDELIVERED;
+		ts->stat = SAS_PHY_DOWN;
+		break;
+	case IO_XFER_ERROR_BREAK:
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("IO_XFER_ERROR_BREAK\n"));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_OPEN_REJECT;
+		/* Force the midlayer to retry */
+		ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
+		break;
+	case IO_XFER_ERROR_PHY_NOT_READY:
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("IO_XFER_ERROR_PHY_NOT_READY\n"));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_OPEN_REJECT;
+		ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
+		break;
+	case IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED:
+		PM8001_IO_DBG(pm8001_ha,
+		pm8001_printk("IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED\n"));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_OPEN_REJECT;
+		ts->open_rej_reason = SAS_OREJ_EPROTO;
+		break;
+	case IO_OPEN_CNX_ERROR_ZONE_VIOLATION:
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("IO_OPEN_CNX_ERROR_ZONE_VIOLATION\n"));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_OPEN_REJECT;
+		ts->open_rej_reason = SAS_OREJ_UNKNOWN;
+		break;
+	case IO_OPEN_CNX_ERROR_BREAK:
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("IO_OPEN_CNX_ERROR_BREAK\n"));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_OPEN_REJECT;
+		ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
+		break;
+	case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS:
+	case IO_XFER_OPEN_RETRY_BACKOFF_THRESHOLD_REACHED:
+	case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_TMO:
+	case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_NO_DEST:
+	case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_COLLIDE:
+	case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_PATHWAY_BLOCKED:
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS\n"));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_OPEN_REJECT;
+		ts->open_rej_reason = SAS_OREJ_UNKNOWN;
+		if (!t->uldd_task)
+			pm8001_handle_event(pm8001_ha,
+				pm8001_dev,
+				IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS);
+		break;
+	case IO_OPEN_CNX_ERROR_BAD_DESTINATION:
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("IO_OPEN_CNX_ERROR_BAD_DESTINATION\n"));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_OPEN_REJECT;
+		ts->open_rej_reason = SAS_OREJ_BAD_DEST;
+		break;
+	case IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED:
+		PM8001_IO_DBG(pm8001_ha, pm8001_printk(
+			"IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED\n"));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_OPEN_REJECT;
+		ts->open_rej_reason = SAS_OREJ_CONN_RATE;
+		break;
+	case IO_OPEN_CNX_ERROR_WRONG_DESTINATION:
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("IO_OPEN_CNX_ERROR_WRONG_DESTINATION\n"));
+		ts->resp = SAS_TASK_UNDELIVERED;
+		ts->stat = SAS_OPEN_REJECT;
+		ts->open_rej_reason = SAS_OREJ_WRONG_DEST;
+		break;
+	case IO_XFER_ERROR_NAK_RECEIVED:
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("IO_XFER_ERROR_NAK_RECEIVED\n"));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_OPEN_REJECT;
+		ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
+		break;
+	case IO_XFER_ERROR_ACK_NAK_TIMEOUT:
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("IO_XFER_ERROR_ACK_NAK_TIMEOUT\n"));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_NAK_R_ERR;
+		break;
+	case IO_XFER_ERROR_DMA:
+		PM8001_IO_DBG(pm8001_ha,
+		pm8001_printk("IO_XFER_ERROR_DMA\n"));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_OPEN_REJECT;
+		break;
+	case IO_XFER_OPEN_RETRY_TIMEOUT:
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("IO_XFER_OPEN_RETRY_TIMEOUT\n"));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_OPEN_REJECT;
+		ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
+		break;
+	case IO_XFER_ERROR_OFFSET_MISMATCH:
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("IO_XFER_ERROR_OFFSET_MISMATCH\n"));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_OPEN_REJECT;
+		break;
+	case IO_PORT_IN_RESET:
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("IO_PORT_IN_RESET\n"));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_OPEN_REJECT;
+		break;
+	case IO_DS_NON_OPERATIONAL:
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("IO_DS_NON_OPERATIONAL\n"));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_OPEN_REJECT;
+		if (!t->uldd_task)
+			pm8001_handle_event(pm8001_ha,
+				pm8001_dev,
+				IO_DS_NON_OPERATIONAL);
+		break;
+	case IO_DS_IN_RECOVERY:
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("IO_DS_IN_RECOVERY\n"));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_OPEN_REJECT;
+		break;
+	case IO_TM_TAG_NOT_FOUND:
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("IO_TM_TAG_NOT_FOUND\n"));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_OPEN_REJECT;
+		break;
+	case IO_SSP_EXT_IU_ZERO_LEN_ERROR:
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("IO_SSP_EXT_IU_ZERO_LEN_ERROR\n"));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_OPEN_REJECT;
+		break;
+	case IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY:
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY\n"));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_OPEN_REJECT;
+		ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
+		break;
+	default:
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("Unknown status 0x%x\n", status));
+		/* not allowed case. Therefore, return failed status */
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_OPEN_REJECT;
+		break;
+	}
+	PM8001_IO_DBG(pm8001_ha,
+		pm8001_printk("scsi_status = 0x%x\n ",
+		psspPayload->ssp_resp_iu.status));
+	spin_lock_irqsave(&t->task_state_lock, flags);
+	t->task_state_flags &= ~SAS_TASK_STATE_PENDING;
+	t->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
+	t->task_state_flags |= SAS_TASK_STATE_DONE;
+	if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) {
+		spin_unlock_irqrestore(&t->task_state_lock, flags);
+		PM8001_FAIL_DBG(pm8001_ha, pm8001_printk(
+			"task 0x%p done with io_status 0x%x resp 0x%x "
+			"stat 0x%x but aborted by upper layer!\n",
+			t, status, ts->resp, ts->stat));
+		pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
+	} else {
+		spin_unlock_irqrestore(&t->task_state_lock, flags);
+		pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
+		mb();/* in order to force CPU ordering */
+		t->task_done(t);
+	}
+}
+
+/*See the comments for mpi_ssp_completion */
+static void mpi_ssp_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
+{
+	struct sas_task *t;
+	unsigned long flags;
+	struct task_status_struct *ts;
+	struct pm8001_ccb_info *ccb;
+	struct pm8001_device *pm8001_dev;
+	struct ssp_event_resp *psspPayload =
+		(struct ssp_event_resp *)(piomb + 4);
+	u32 event = le32_to_cpu(psspPayload->event);
+	u32 tag = le32_to_cpu(psspPayload->tag);
+	u32 port_id = le32_to_cpu(psspPayload->port_id);
+
+	ccb = &pm8001_ha->ccb_info[tag];
+	t = ccb->task;
+	pm8001_dev = ccb->device;
+	if (event)
+		PM8001_FAIL_DBG(pm8001_ha,
+			pm8001_printk("sas IO status 0x%x\n", event));
+	if (unlikely(!t || !t->lldd_task || !t->dev))
+		return;
+	ts = &t->task_status;
+	PM8001_IO_DBG(pm8001_ha,
+		pm8001_printk("port_id:0x%x, tag:0x%x, event:0x%x\n",
+				port_id, tag, event));
+	switch (event) {
+	case IO_OVERFLOW:
+		PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_UNDERFLOW\n");)
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_DATA_OVERRUN;
+		ts->residual = 0;
+		if (pm8001_dev)
+			pm8001_dev->running_req--;
+		break;
+	case IO_XFER_ERROR_BREAK:
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("IO_XFER_ERROR_BREAK\n"));
+		pm8001_handle_event(pm8001_ha, t, IO_XFER_ERROR_BREAK);
+		return;
+	case IO_XFER_ERROR_PHY_NOT_READY:
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("IO_XFER_ERROR_PHY_NOT_READY\n"));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_OPEN_REJECT;
+		ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
+		break;
+	case IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED:
+		PM8001_IO_DBG(pm8001_ha, pm8001_printk(
+			"IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED\n"));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_OPEN_REJECT;
+		ts->open_rej_reason = SAS_OREJ_EPROTO;
+		break;
+	case IO_OPEN_CNX_ERROR_ZONE_VIOLATION:
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("IO_OPEN_CNX_ERROR_ZONE_VIOLATION\n"));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_OPEN_REJECT;
+		ts->open_rej_reason = SAS_OREJ_UNKNOWN;
+		break;
+	case IO_OPEN_CNX_ERROR_BREAK:
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("IO_OPEN_CNX_ERROR_BREAK\n"));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_OPEN_REJECT;
+		ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
+		break;
+	case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS:
+	case IO_XFER_OPEN_RETRY_BACKOFF_THRESHOLD_REACHED:
+	case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_TMO:
+	case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_NO_DEST:
+	case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_COLLIDE:
+	case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_PATHWAY_BLOCKED:
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS\n"));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_OPEN_REJECT;
+		ts->open_rej_reason = SAS_OREJ_UNKNOWN;
+		if (!t->uldd_task)
+			pm8001_handle_event(pm8001_ha,
+				pm8001_dev,
+				IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS);
+		break;
+	case IO_OPEN_CNX_ERROR_BAD_DESTINATION:
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("IO_OPEN_CNX_ERROR_BAD_DESTINATION\n"));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_OPEN_REJECT;
+		ts->open_rej_reason = SAS_OREJ_BAD_DEST;
+		break;
+	case IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED:
+		PM8001_IO_DBG(pm8001_ha, pm8001_printk(
+			"IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED\n"));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_OPEN_REJECT;
+		ts->open_rej_reason = SAS_OREJ_CONN_RATE;
+		break;
+	case IO_OPEN_CNX_ERROR_WRONG_DESTINATION:
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("IO_OPEN_CNX_ERROR_WRONG_DESTINATION\n"));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_OPEN_REJECT;
+		ts->open_rej_reason = SAS_OREJ_WRONG_DEST;
+		break;
+	case IO_XFER_ERROR_NAK_RECEIVED:
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("IO_XFER_ERROR_NAK_RECEIVED\n"));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_OPEN_REJECT;
+		ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
+		break;
+	case IO_XFER_ERROR_ACK_NAK_TIMEOUT:
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("IO_XFER_ERROR_ACK_NAK_TIMEOUT\n"));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_NAK_R_ERR;
+		break;
+	case IO_XFER_OPEN_RETRY_TIMEOUT:
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("IO_XFER_OPEN_RETRY_TIMEOUT\n"));
+		pm8001_handle_event(pm8001_ha, t, IO_XFER_OPEN_RETRY_TIMEOUT);
+		return;
+	case IO_XFER_ERROR_UNEXPECTED_PHASE:
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("IO_XFER_ERROR_UNEXPECTED_PHASE\n"));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_DATA_OVERRUN;
+		break;
+	case IO_XFER_ERROR_XFER_RDY_OVERRUN:
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("IO_XFER_ERROR_XFER_RDY_OVERRUN\n"));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_DATA_OVERRUN;
+		break;
+	case IO_XFER_ERROR_XFER_RDY_NOT_EXPECTED:
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("IO_XFER_ERROR_XFER_RDY_NOT_EXPECTED\n"));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_DATA_OVERRUN;
+		break;
+	case IO_XFER_ERROR_CMD_ISSUE_ACK_NAK_TIMEOUT:
+		PM8001_IO_DBG(pm8001_ha,
+		pm8001_printk("IO_XFER_ERROR_CMD_ISSUE_ACK_NAK_TIMEOUT\n"));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_DATA_OVERRUN;
+		break;
+	case IO_XFER_ERROR_OFFSET_MISMATCH:
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("IO_XFER_ERROR_OFFSET_MISMATCH\n"));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_DATA_OVERRUN;
+		break;
+	case IO_XFER_ERROR_XFER_ZERO_DATA_LEN:
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("IO_XFER_ERROR_XFER_ZERO_DATA_LEN\n"));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_DATA_OVERRUN;
+		break;
+	case IO_XFER_ERROR_INTERNAL_CRC_ERROR:
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("IO_XFR_ERROR_INTERNAL_CRC_ERROR\n"));
+		/* TBC: used default set values */
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_DATA_OVERRUN;
+		break;
+	case IO_XFER_CMD_FRAME_ISSUED:
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("IO_XFER_CMD_FRAME_ISSUED\n"));
+		return;
+	default:
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("Unknown status 0x%x\n", event));
+		/* not allowed case. Therefore, return failed status */
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_DATA_OVERRUN;
+		break;
+	}
+	spin_lock_irqsave(&t->task_state_lock, flags);
+	t->task_state_flags &= ~SAS_TASK_STATE_PENDING;
+	t->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
+	t->task_state_flags |= SAS_TASK_STATE_DONE;
+	if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) {
+		spin_unlock_irqrestore(&t->task_state_lock, flags);
+		PM8001_FAIL_DBG(pm8001_ha, pm8001_printk(
+			"task 0x%p done with event 0x%x resp 0x%x "
+			"stat 0x%x but aborted by upper layer!\n",
+			t, event, ts->resp, ts->stat));
+		pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
+	} else {
+		spin_unlock_irqrestore(&t->task_state_lock, flags);
+		pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
+		mb();/* in order to force CPU ordering */
+		t->task_done(t);
+	}
+}
+
+/*See the comments for mpi_ssp_completion */
+static void
+mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
+{
+	struct sas_task *t;
+	struct pm8001_ccb_info *ccb;
+	u32 param;
+	u32 status;
+	u32 tag;
+	struct sata_completion_resp *psataPayload;
+	struct task_status_struct *ts;
+	struct ata_task_resp *resp ;
+	u32 *sata_resp;
+	struct pm8001_device *pm8001_dev;
+	unsigned long flags;
+
+	psataPayload = (struct sata_completion_resp *)(piomb + 4);
+	status = le32_to_cpu(psataPayload->status);
+	tag = le32_to_cpu(psataPayload->tag);
+
+	if (!tag) {
+		PM8001_FAIL_DBG(pm8001_ha,
+			pm8001_printk("tag null\n"));
+		return;
+	}
+	ccb = &pm8001_ha->ccb_info[tag];
+	param = le32_to_cpu(psataPayload->param);
+	if (ccb) {
+		t = ccb->task;
+		pm8001_dev = ccb->device;
+	} else {
+		PM8001_FAIL_DBG(pm8001_ha,
+			pm8001_printk("ccb null\n"));
+		return;
+	}
+
+	if (t) {
+		if (t->dev && (t->dev->lldd_dev))
+			pm8001_dev = t->dev->lldd_dev;
+	} else {
+		PM8001_FAIL_DBG(pm8001_ha,
+			pm8001_printk("task null\n"));
+		return;
+	}
+
+	if ((pm8001_dev && !(pm8001_dev->id & NCQ_READ_LOG_FLAG))
+		&& unlikely(!t || !t->lldd_task || !t->dev)) {
+		PM8001_FAIL_DBG(pm8001_ha,
+			pm8001_printk("task or dev null\n"));
+		return;
+	}
+
+	ts = &t->task_status;
+	if (!ts) {
+		PM8001_FAIL_DBG(pm8001_ha,
+			pm8001_printk("ts null\n"));
+		return;
+	}
+
+	switch (status) {
+	case IO_SUCCESS:
+		PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_SUCCESS\n"));
+		if (param == 0) {
+			ts->resp = SAS_TASK_COMPLETE;
+			ts->stat = SAM_STAT_GOOD;
+			/* check if response is for SEND READ LOG */
+			if (pm8001_dev &&
+				(pm8001_dev->id & NCQ_READ_LOG_FLAG)) {
+				/* set new bit for abort_all */
+				pm8001_dev->id |= NCQ_ABORT_ALL_FLAG;
+				/* clear bit for read log */
+				pm8001_dev->id = pm8001_dev->id & 0x7FFFFFFF;
+				pm80xx_send_abort_all(pm8001_ha, pm8001_dev);
+				/* Free the tag */
+				pm8001_tag_free(pm8001_ha, tag);
+				sas_free_task(t);
+				return;
+			}
+		} else {
+			u8 len;
+			ts->resp = SAS_TASK_COMPLETE;
+			ts->stat = SAS_PROTO_RESPONSE;
+			ts->residual = param;
+			PM8001_IO_DBG(pm8001_ha,
+				pm8001_printk("SAS_PROTO_RESPONSE len = %d\n",
+				param));
+			sata_resp = &psataPayload->sata_resp[0];
+			resp = (struct ata_task_resp *)ts->buf;
+			if (t->ata_task.dma_xfer == 0 &&
+			t->data_dir == PCI_DMA_FROMDEVICE) {
+				len = sizeof(struct pio_setup_fis);
+				PM8001_IO_DBG(pm8001_ha,
+				pm8001_printk("PIO read len = %d\n", len));
+			} else if (t->ata_task.use_ncq) {
+				len = sizeof(struct set_dev_bits_fis);
+				PM8001_IO_DBG(pm8001_ha,
+					pm8001_printk("FPDMA len = %d\n", len));
+			} else {
+				len = sizeof(struct dev_to_host_fis);
+				PM8001_IO_DBG(pm8001_ha,
+				pm8001_printk("other len = %d\n", len));
+			}
+			if (SAS_STATUS_BUF_SIZE >= sizeof(*resp)) {
+				resp->frame_len = len;
+				memcpy(&resp->ending_fis[0], sata_resp, len);
+				ts->buf_valid_size = sizeof(*resp);
+			} else
+				PM8001_IO_DBG(pm8001_ha,
+					pm8001_printk("response to large\n"));
+		}
+		if (pm8001_dev)
+			pm8001_dev->running_req--;
+		break;
+	case IO_ABORTED:
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("IO_ABORTED IOMB Tag\n"));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_ABORTED_TASK;
+		if (pm8001_dev)
+			pm8001_dev->running_req--;
+		break;
+		/* following cases are to do cases */
+	case IO_UNDERFLOW:
+		/* SATA Completion with error */
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("IO_UNDERFLOW param = %d\n", param));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_DATA_UNDERRUN;
+		ts->residual = param;
+		if (pm8001_dev)
+			pm8001_dev->running_req--;
+		break;
+	case IO_NO_DEVICE:
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("IO_NO_DEVICE\n"));
+		ts->resp = SAS_TASK_UNDELIVERED;
+		ts->stat = SAS_PHY_DOWN;
+		break;
+	case IO_XFER_ERROR_BREAK:
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("IO_XFER_ERROR_BREAK\n"));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_INTERRUPTED;
+		break;
+	case IO_XFER_ERROR_PHY_NOT_READY:
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("IO_XFER_ERROR_PHY_NOT_READY\n"));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_OPEN_REJECT;
+		ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
+		break;
+	case IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED:
+		PM8001_IO_DBG(pm8001_ha, pm8001_printk(
+			"IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED\n"));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_OPEN_REJECT;
+		ts->open_rej_reason = SAS_OREJ_EPROTO;
+		break;
+	case IO_OPEN_CNX_ERROR_ZONE_VIOLATION:
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("IO_OPEN_CNX_ERROR_ZONE_VIOLATION\n"));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_OPEN_REJECT;
+		ts->open_rej_reason = SAS_OREJ_UNKNOWN;
+		break;
+	case IO_OPEN_CNX_ERROR_BREAK:
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("IO_OPEN_CNX_ERROR_BREAK\n"));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_OPEN_REJECT;
+		ts->open_rej_reason = SAS_OREJ_RSVD_CONT0;
+		break;
+	case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS:
+	case IO_XFER_OPEN_RETRY_BACKOFF_THRESHOLD_REACHED:
+	case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_TMO:
+	case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_NO_DEST:
+	case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_COLLIDE:
+	case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_PATHWAY_BLOCKED:
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS\n"));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_DEV_NO_RESPONSE;
+		if (!t->uldd_task) {
+			pm8001_handle_event(pm8001_ha,
+				pm8001_dev,
+				IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS);
+			ts->resp = SAS_TASK_UNDELIVERED;
+			ts->stat = SAS_QUEUE_FULL;
+			pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
+			mb();/*in order to force CPU ordering*/
+			spin_unlock_irq(&pm8001_ha->lock);
+			t->task_done(t);
+			spin_lock_irq(&pm8001_ha->lock);
+			return;
+		}
+		break;
+	case IO_OPEN_CNX_ERROR_BAD_DESTINATION:
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("IO_OPEN_CNX_ERROR_BAD_DESTINATION\n"));
+		ts->resp = SAS_TASK_UNDELIVERED;
+		ts->stat = SAS_OPEN_REJECT;
+		ts->open_rej_reason = SAS_OREJ_BAD_DEST;
+		if (!t->uldd_task) {
+			pm8001_handle_event(pm8001_ha,
+				pm8001_dev,
+				IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS);
+			ts->resp = SAS_TASK_UNDELIVERED;
+			ts->stat = SAS_QUEUE_FULL;
+			pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
+			mb();/*ditto*/
+			spin_unlock_irq(&pm8001_ha->lock);
+			t->task_done(t);
+			spin_lock_irq(&pm8001_ha->lock);
+			return;
+		}
+		break;
+	case IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED:
+		PM8001_IO_DBG(pm8001_ha, pm8001_printk(
+			"IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED\n"));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_OPEN_REJECT;
+		ts->open_rej_reason = SAS_OREJ_CONN_RATE;
+		break;
+	case IO_OPEN_CNX_ERROR_STP_RESOURCES_BUSY:
+		PM8001_IO_DBG(pm8001_ha, pm8001_printk(
+			"IO_OPEN_CNX_ERROR_STP_RESOURCES_BUSY\n"));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_DEV_NO_RESPONSE;
+		if (!t->uldd_task) {
+			pm8001_handle_event(pm8001_ha,
+				pm8001_dev,
+				IO_OPEN_CNX_ERROR_STP_RESOURCES_BUSY);
+			ts->resp = SAS_TASK_UNDELIVERED;
+			ts->stat = SAS_QUEUE_FULL;
+			pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
+			mb();/* ditto*/
+			spin_unlock_irq(&pm8001_ha->lock);
+			t->task_done(t);
+			spin_lock_irq(&pm8001_ha->lock);
+			return;
+		}
+		break;
+	case IO_OPEN_CNX_ERROR_WRONG_DESTINATION:
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("IO_OPEN_CNX_ERROR_WRONG_DESTINATION\n"));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_OPEN_REJECT;
+		ts->open_rej_reason = SAS_OREJ_WRONG_DEST;
+		break;
+	case IO_XFER_ERROR_NAK_RECEIVED:
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("IO_XFER_ERROR_NAK_RECEIVED\n"));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_NAK_R_ERR;
+		break;
+	case IO_XFER_ERROR_ACK_NAK_TIMEOUT:
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("IO_XFER_ERROR_ACK_NAK_TIMEOUT\n"));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_NAK_R_ERR;
+		break;
+	case IO_XFER_ERROR_DMA:
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("IO_XFER_ERROR_DMA\n"));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_ABORTED_TASK;
+		break;
+	case IO_XFER_ERROR_SATA_LINK_TIMEOUT:
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("IO_XFER_ERROR_SATA_LINK_TIMEOUT\n"));
+		ts->resp = SAS_TASK_UNDELIVERED;
+		ts->stat = SAS_DEV_NO_RESPONSE;
+		break;
+	case IO_XFER_ERROR_REJECTED_NCQ_MODE:
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("IO_XFER_ERROR_REJECTED_NCQ_MODE\n"));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_DATA_UNDERRUN;
+		break;
+	case IO_XFER_OPEN_RETRY_TIMEOUT:
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("IO_XFER_OPEN_RETRY_TIMEOUT\n"));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_OPEN_TO;
+		break;
+	case IO_PORT_IN_RESET:
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("IO_PORT_IN_RESET\n"));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_DEV_NO_RESPONSE;
+		break;
+	case IO_DS_NON_OPERATIONAL:
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("IO_DS_NON_OPERATIONAL\n"));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_DEV_NO_RESPONSE;
+		if (!t->uldd_task) {
+			pm8001_handle_event(pm8001_ha, pm8001_dev,
+					IO_DS_NON_OPERATIONAL);
+			ts->resp = SAS_TASK_UNDELIVERED;
+			ts->stat = SAS_QUEUE_FULL;
+			pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
+			mb();/*ditto*/
+			spin_unlock_irq(&pm8001_ha->lock);
+			t->task_done(t);
+			spin_lock_irq(&pm8001_ha->lock);
+			return;
+		}
+		break;
+	case IO_DS_IN_RECOVERY:
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("IO_DS_IN_RECOVERY\n"));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_DEV_NO_RESPONSE;
+		break;
+	case IO_DS_IN_ERROR:
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("IO_DS_IN_ERROR\n"));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_DEV_NO_RESPONSE;
+		if (!t->uldd_task) {
+			pm8001_handle_event(pm8001_ha, pm8001_dev,
+					IO_DS_IN_ERROR);
+			ts->resp = SAS_TASK_UNDELIVERED;
+			ts->stat = SAS_QUEUE_FULL;
+			pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
+			mb();/*ditto*/
+			spin_unlock_irq(&pm8001_ha->lock);
+			t->task_done(t);
+			spin_lock_irq(&pm8001_ha->lock);
+			return;
+		}
+		break;
+	case IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY:
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY\n"));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_OPEN_REJECT;
+		ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
+	default:
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("Unknown status 0x%x\n", status));
+		/* not allowed case. Therefore, return failed status */
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_DEV_NO_RESPONSE;
+		break;
+	}
+	spin_lock_irqsave(&t->task_state_lock, flags);
+	t->task_state_flags &= ~SAS_TASK_STATE_PENDING;
+	t->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
+	t->task_state_flags |= SAS_TASK_STATE_DONE;
+	if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) {
+		spin_unlock_irqrestore(&t->task_state_lock, flags);
+		PM8001_FAIL_DBG(pm8001_ha,
+			pm8001_printk("task 0x%p done with io_status 0x%x"
+			" resp 0x%x stat 0x%x but aborted by upper layer!\n",
+			t, status, ts->resp, ts->stat));
+		pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
+	} else if (t->uldd_task) {
+		spin_unlock_irqrestore(&t->task_state_lock, flags);
+		pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
+		mb();/* ditto */
+		spin_unlock_irq(&pm8001_ha->lock);
+		t->task_done(t);
+		spin_lock_irq(&pm8001_ha->lock);
+	} else if (!t->uldd_task) {
+		spin_unlock_irqrestore(&t->task_state_lock, flags);
+		pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
+		mb();/*ditto*/
+		spin_unlock_irq(&pm8001_ha->lock);
+		t->task_done(t);
+		spin_lock_irq(&pm8001_ha->lock);
+	}
+}
+
+/*See the comments for mpi_ssp_completion */
+static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
+{
+	struct sas_task *t;
+	struct task_status_struct *ts;
+	struct pm8001_ccb_info *ccb;
+	struct pm8001_device *pm8001_dev;
+	struct sata_event_resp *psataPayload =
+		(struct sata_event_resp *)(piomb + 4);
+	u32 event = le32_to_cpu(psataPayload->event);
+	u32 tag = le32_to_cpu(psataPayload->tag);
+	u32 port_id = le32_to_cpu(psataPayload->port_id);
+	u32 dev_id = le32_to_cpu(psataPayload->device_id);
+	unsigned long flags;
+
+	ccb = &pm8001_ha->ccb_info[tag];
+
+	if (ccb) {
+		t = ccb->task;
+		pm8001_dev = ccb->device;
+	} else {
+		PM8001_FAIL_DBG(pm8001_ha,
+			pm8001_printk("No CCB !!!. returning\n"));
+		return;
+	}
+	if (event)
+		PM8001_FAIL_DBG(pm8001_ha,
+			pm8001_printk("SATA EVENT 0x%x\n", event));
+
+	/* Check if this is NCQ error */
+	if (event == IO_XFER_ERROR_ABORTED_NCQ_MODE) {
+		/* find device using device id */
+		pm8001_dev = pm8001_find_dev(pm8001_ha, dev_id);
+		/* send read log extension */
+		if (pm8001_dev)
+			pm80xx_send_read_log(pm8001_ha, pm8001_dev);
+		return;
+	}
+
+	if (unlikely(!t || !t->lldd_task || !t->dev)) {
+		PM8001_FAIL_DBG(pm8001_ha,
+			pm8001_printk("task or dev null\n"));
+		return;
+	}
+
+	ts = &t->task_status;
+	PM8001_IO_DBG(pm8001_ha,
+		pm8001_printk("port_id:0x%x, tag:0x%x, event:0x%x\n",
+				port_id, tag, event));
+	switch (event) {
+	case IO_OVERFLOW:
+		PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_UNDERFLOW\n"));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_DATA_OVERRUN;
+		ts->residual = 0;
+		if (pm8001_dev)
+			pm8001_dev->running_req--;
+		break;
+	case IO_XFER_ERROR_BREAK:
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("IO_XFER_ERROR_BREAK\n"));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_INTERRUPTED;
+		break;
+	case IO_XFER_ERROR_PHY_NOT_READY:
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("IO_XFER_ERROR_PHY_NOT_READY\n"));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_OPEN_REJECT;
+		ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
+		break;
+	case IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED:
+		PM8001_IO_DBG(pm8001_ha, pm8001_printk(
+			"IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED\n"));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_OPEN_REJECT;
+		ts->open_rej_reason = SAS_OREJ_EPROTO;
+		break;
+	case IO_OPEN_CNX_ERROR_ZONE_VIOLATION:
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("IO_OPEN_CNX_ERROR_ZONE_VIOLATION\n"));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_OPEN_REJECT;
+		ts->open_rej_reason = SAS_OREJ_UNKNOWN;
+		break;
+	case IO_OPEN_CNX_ERROR_BREAK:
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("IO_OPEN_CNX_ERROR_BREAK\n"));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_OPEN_REJECT;
+		ts->open_rej_reason = SAS_OREJ_RSVD_CONT0;
+		break;
+	case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS:
+	case IO_XFER_OPEN_RETRY_BACKOFF_THRESHOLD_REACHED:
+	case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_TMO:
+	case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_NO_DEST:
+	case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_COLLIDE:
+	case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_PATHWAY_BLOCKED:
+		PM8001_FAIL_DBG(pm8001_ha,
+			pm8001_printk("IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS\n"));
+		ts->resp = SAS_TASK_UNDELIVERED;
+		ts->stat = SAS_DEV_NO_RESPONSE;
+		if (!t->uldd_task) {
+			pm8001_handle_event(pm8001_ha,
+				pm8001_dev,
+				IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS);
+			ts->resp = SAS_TASK_COMPLETE;
+			ts->stat = SAS_QUEUE_FULL;
+			pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
+			mb();/*ditto*/
+			spin_unlock_irq(&pm8001_ha->lock);
+			t->task_done(t);
+			spin_lock_irq(&pm8001_ha->lock);
+			return;
+		}
+		break;
+	case IO_OPEN_CNX_ERROR_BAD_DESTINATION:
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("IO_OPEN_CNX_ERROR_BAD_DESTINATION\n"));
+		ts->resp = SAS_TASK_UNDELIVERED;
+		ts->stat = SAS_OPEN_REJECT;
+		ts->open_rej_reason = SAS_OREJ_BAD_DEST;
+		break;
+	case IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED:
+		PM8001_IO_DBG(pm8001_ha, pm8001_printk(
+			"IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED\n"));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_OPEN_REJECT;
+		ts->open_rej_reason = SAS_OREJ_CONN_RATE;
+		break;
+	case IO_OPEN_CNX_ERROR_WRONG_DESTINATION:
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("IO_OPEN_CNX_ERROR_WRONG_DESTINATION\n"));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_OPEN_REJECT;
+		ts->open_rej_reason = SAS_OREJ_WRONG_DEST;
+		break;
+	case IO_XFER_ERROR_NAK_RECEIVED:
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("IO_XFER_ERROR_NAK_RECEIVED\n"));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_NAK_R_ERR;
+		break;
+	case IO_XFER_ERROR_PEER_ABORTED:
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("IO_XFER_ERROR_PEER_ABORTED\n"));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_NAK_R_ERR;
+		break;
+	case IO_XFER_ERROR_REJECTED_NCQ_MODE:
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("IO_XFER_ERROR_REJECTED_NCQ_MODE\n"));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_DATA_UNDERRUN;
+		break;
+	case IO_XFER_OPEN_RETRY_TIMEOUT:
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("IO_XFER_OPEN_RETRY_TIMEOUT\n"));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_OPEN_TO;
+		break;
+	case IO_XFER_ERROR_UNEXPECTED_PHASE:
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("IO_XFER_ERROR_UNEXPECTED_PHASE\n"));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_OPEN_TO;
+		break;
+	case IO_XFER_ERROR_XFER_RDY_OVERRUN:
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("IO_XFER_ERROR_XFER_RDY_OVERRUN\n"));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_OPEN_TO;
+		break;
+	case IO_XFER_ERROR_XFER_RDY_NOT_EXPECTED:
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("IO_XFER_ERROR_XFER_RDY_NOT_EXPECTED\n"));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_OPEN_TO;
+		break;
+	case IO_XFER_ERROR_OFFSET_MISMATCH:
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("IO_XFER_ERROR_OFFSET_MISMATCH\n"));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_OPEN_TO;
+		break;
+	case IO_XFER_ERROR_XFER_ZERO_DATA_LEN:
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("IO_XFER_ERROR_XFER_ZERO_DATA_LEN\n"));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_OPEN_TO;
+		break;
+	case IO_XFER_CMD_FRAME_ISSUED:
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("IO_XFER_CMD_FRAME_ISSUED\n"));
+		break;
+	case IO_XFER_PIO_SETUP_ERROR:
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("IO_XFER_PIO_SETUP_ERROR\n"));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_OPEN_TO;
+		break;
+	case IO_XFER_ERROR_INTERNAL_CRC_ERROR:
+		PM8001_FAIL_DBG(pm8001_ha,
+			pm8001_printk("IO_XFR_ERROR_INTERNAL_CRC_ERROR\n"));
+		/* TBC: used default set values */
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_OPEN_TO;
+		break;
+	case IO_XFER_DMA_ACTIVATE_TIMEOUT:
+		PM8001_FAIL_DBG(pm8001_ha,
+			pm8001_printk("IO_XFR_DMA_ACTIVATE_TIMEOUT\n"));
+		/* TBC: used default set values */
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_OPEN_TO;
+		break;
+	default:
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("Unknown status 0x%x\n", event));
+		/* not allowed case. Therefore, return failed status */
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_OPEN_TO;
+		break;
+	}
+	spin_lock_irqsave(&t->task_state_lock, flags);
+	t->task_state_flags &= ~SAS_TASK_STATE_PENDING;
+	t->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
+	t->task_state_flags |= SAS_TASK_STATE_DONE;
+	if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) {
+		spin_unlock_irqrestore(&t->task_state_lock, flags);
+		PM8001_FAIL_DBG(pm8001_ha,
+			pm8001_printk("task 0x%p done with io_status 0x%x"
+			" resp 0x%x stat 0x%x but aborted by upper layer!\n",
+			t, event, ts->resp, ts->stat));
+		pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
+	} else if (t->uldd_task) {
+		spin_unlock_irqrestore(&t->task_state_lock, flags);
+		pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
+		mb();/* ditto */
+		spin_unlock_irq(&pm8001_ha->lock);
+		t->task_done(t);
+		spin_lock_irq(&pm8001_ha->lock);
+	} else if (!t->uldd_task) {
+		spin_unlock_irqrestore(&t->task_state_lock, flags);
+		pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
+		mb();/*ditto*/
+		spin_unlock_irq(&pm8001_ha->lock);
+		t->task_done(t);
+		spin_lock_irq(&pm8001_ha->lock);
+	}
+}
+
+/*See the comments for mpi_ssp_completion */
+static void
+mpi_smp_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
+{
+	u32 param, i;
+	struct sas_task *t;
+	struct pm8001_ccb_info *ccb;
+	unsigned long flags;
+	u32 status;
+	u32 tag;
+	struct smp_completion_resp *psmpPayload;
+	struct task_status_struct *ts;
+	struct pm8001_device *pm8001_dev;
+	char *pdma_respaddr = NULL;
+
+	psmpPayload = (struct smp_completion_resp *)(piomb + 4);
+	status = le32_to_cpu(psmpPayload->status);
+	tag = le32_to_cpu(psmpPayload->tag);
+
+	ccb = &pm8001_ha->ccb_info[tag];
+	param = le32_to_cpu(psmpPayload->param);
+	t = ccb->task;
+	ts = &t->task_status;
+	pm8001_dev = ccb->device;
+	if (status)
+		PM8001_FAIL_DBG(pm8001_ha,
+			pm8001_printk("smp IO status 0x%x\n", status));
+	if (unlikely(!t || !t->lldd_task || !t->dev))
+		return;
+
+	switch (status) {
+
+	case IO_SUCCESS:
+		PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_SUCCESS\n"));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAM_STAT_GOOD;
+		if (pm8001_dev)
+			pm8001_dev->running_req--;
+		if (pm8001_ha->smp_exp_mode == SMP_DIRECT) {
+			PM8001_IO_DBG(pm8001_ha,
+				pm8001_printk("DIRECT RESPONSE Length:%d\n",
+						param));
+			pdma_respaddr = (char *)(phys_to_virt(cpu_to_le64
+						((u64)sg_dma_address
+						(&t->smp_task.smp_resp))));
+			for (i = 0; i < param; i++) {
+				*(pdma_respaddr+i) = psmpPayload->_r_a[i];
+				PM8001_IO_DBG(pm8001_ha, pm8001_printk(
+					"SMP Byte%d DMA data 0x%x psmp 0x%x\n",
+					i, *(pdma_respaddr+i),
+					psmpPayload->_r_a[i]));
+			}
+		}
+		break;
+	case IO_ABORTED:
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("IO_ABORTED IOMB\n"));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_ABORTED_TASK;
+		if (pm8001_dev)
+			pm8001_dev->running_req--;
+		break;
+	case IO_OVERFLOW:
+		PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_UNDERFLOW\n"));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_DATA_OVERRUN;
+		ts->residual = 0;
+		if (pm8001_dev)
+			pm8001_dev->running_req--;
+		break;
+	case IO_NO_DEVICE:
+		PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_NO_DEVICE\n"));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_PHY_DOWN;
+		break;
+	case IO_ERROR_HW_TIMEOUT:
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("IO_ERROR_HW_TIMEOUT\n"));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAM_STAT_BUSY;
+		break;
+	case IO_XFER_ERROR_BREAK:
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("IO_XFER_ERROR_BREAK\n"));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAM_STAT_BUSY;
+		break;
+	case IO_XFER_ERROR_PHY_NOT_READY:
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("IO_XFER_ERROR_PHY_NOT_READY\n"));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAM_STAT_BUSY;
+		break;
+	case IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED:
+		PM8001_IO_DBG(pm8001_ha,
+		pm8001_printk("IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED\n"));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_OPEN_REJECT;
+		ts->open_rej_reason = SAS_OREJ_UNKNOWN;
+		break;
+	case IO_OPEN_CNX_ERROR_ZONE_VIOLATION:
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("IO_OPEN_CNX_ERROR_ZONE_VIOLATION\n"));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_OPEN_REJECT;
+		ts->open_rej_reason = SAS_OREJ_UNKNOWN;
+		break;
+	case IO_OPEN_CNX_ERROR_BREAK:
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("IO_OPEN_CNX_ERROR_BREAK\n"));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_OPEN_REJECT;
+		ts->open_rej_reason = SAS_OREJ_RSVD_CONT0;
+		break;
+	case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS:
+	case IO_XFER_OPEN_RETRY_BACKOFF_THRESHOLD_REACHED:
+	case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_TMO:
+	case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_NO_DEST:
+	case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_COLLIDE:
+	case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_PATHWAY_BLOCKED:
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS\n"));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_OPEN_REJECT;
+		ts->open_rej_reason = SAS_OREJ_UNKNOWN;
+		pm8001_handle_event(pm8001_ha,
+				pm8001_dev,
+				IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS);
+		break;
+	case IO_OPEN_CNX_ERROR_BAD_DESTINATION:
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("IO_OPEN_CNX_ERROR_BAD_DESTINATION\n"));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_OPEN_REJECT;
+		ts->open_rej_reason = SAS_OREJ_BAD_DEST;
+		break;
+	case IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED:
+		PM8001_IO_DBG(pm8001_ha, pm8001_printk(\
+			"IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED\n"));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_OPEN_REJECT;
+		ts->open_rej_reason = SAS_OREJ_CONN_RATE;
+		break;
+	case IO_OPEN_CNX_ERROR_WRONG_DESTINATION:
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("IO_OPEN_CNX_ERROR_WRONG_DESTINATION\n"));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_OPEN_REJECT;
+		ts->open_rej_reason = SAS_OREJ_WRONG_DEST;
+		break;
+	case IO_XFER_ERROR_RX_FRAME:
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("IO_XFER_ERROR_RX_FRAME\n"));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_DEV_NO_RESPONSE;
+		break;
+	case IO_XFER_OPEN_RETRY_TIMEOUT:
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("IO_XFER_OPEN_RETRY_TIMEOUT\n"));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_OPEN_REJECT;
+		ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
+		break;
+	case IO_ERROR_INTERNAL_SMP_RESOURCE:
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("IO_ERROR_INTERNAL_SMP_RESOURCE\n"));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_QUEUE_FULL;
+		break;
+	case IO_PORT_IN_RESET:
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("IO_PORT_IN_RESET\n"));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_OPEN_REJECT;
+		ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
+		break;
+	case IO_DS_NON_OPERATIONAL:
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("IO_DS_NON_OPERATIONAL\n"));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_DEV_NO_RESPONSE;
+		break;
+	case IO_DS_IN_RECOVERY:
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("IO_DS_IN_RECOVERY\n"));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_OPEN_REJECT;
+		ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
+		break;
+	case IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY:
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY\n"));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_OPEN_REJECT;
+		ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
+		break;
+	default:
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("Unknown status 0x%x\n", status));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_DEV_NO_RESPONSE;
+		/* not allowed case. Therefore, return failed status */
+		break;
+	}
+	spin_lock_irqsave(&t->task_state_lock, flags);
+	t->task_state_flags &= ~SAS_TASK_STATE_PENDING;
+	t->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
+	t->task_state_flags |= SAS_TASK_STATE_DONE;
+	if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) {
+		spin_unlock_irqrestore(&t->task_state_lock, flags);
+		PM8001_FAIL_DBG(pm8001_ha, pm8001_printk(
+			"task 0x%p done with io_status 0x%x resp 0x%x"
+			"stat 0x%x but aborted by upper layer!\n",
+			t, status, ts->resp, ts->stat));
+		pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
+	} else {
+		spin_unlock_irqrestore(&t->task_state_lock, flags);
+		pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
+		mb();/* in order to force CPU ordering */
+		t->task_done(t);
+	}
+}
+
+/**
+ * pm80xx_hw_event_ack_req- For PM8001,some events need to acknowage to FW.
+ * @pm8001_ha: our hba card information
+ * @Qnum: the outbound queue message number.
+ * @SEA: source of event to ack
+ * @port_id: port id.
+ * @phyId: phy id.
+ * @param0: parameter 0.
+ * @param1: parameter 1.
+ */
+static void pm80xx_hw_event_ack_req(struct pm8001_hba_info *pm8001_ha,
+	u32 Qnum, u32 SEA, u32 port_id, u32 phyId, u32 param0, u32 param1)
+{
+	struct hw_event_ack_req	 payload;
+	u32 opc = OPC_INB_SAS_HW_EVENT_ACK;
+
+	struct inbound_queue_table *circularQ;
+
+	memset((u8 *)&payload, 0, sizeof(payload));
+	circularQ = &pm8001_ha->inbnd_q_tbl[Qnum];
+	payload.tag = cpu_to_le32(1);
+	payload.phyid_sea_portid = cpu_to_le32(((SEA & 0xFFFF) << 8) |
+		((phyId & 0xFF) << 24) | (port_id & 0xFF));
+	payload.param0 = cpu_to_le32(param0);
+	payload.param1 = cpu_to_le32(param1);
+	pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0);
+}
+
+static int pm80xx_chip_phy_ctl_req(struct pm8001_hba_info *pm8001_ha,
+	u32 phyId, u32 phy_op);
+
+/**
+ * hw_event_sas_phy_up -FW tells me a SAS phy up event.
+ * @pm8001_ha: our hba card information
+ * @piomb: IO message buffer
+ */
+static void
+hw_event_sas_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb)
+{
+	struct hw_event_resp *pPayload =
+		(struct hw_event_resp *)(piomb + 4);
+	u32 lr_status_evt_portid =
+		le32_to_cpu(pPayload->lr_status_evt_portid);
+	u32 phyid_npip_portstate = le32_to_cpu(pPayload->phyid_npip_portstate);
+
+	u8 link_rate =
+		(u8)((lr_status_evt_portid & 0xF0000000) >> 28);
+	u8 port_id = (u8)(lr_status_evt_portid & 0x000000FF);
+	u8 phy_id =
+		(u8)((phyid_npip_portstate & 0xFF0000) >> 16);
+	u8 portstate = (u8)(phyid_npip_portstate & 0x0000000F);
+
+	struct pm8001_port *port = &pm8001_ha->port[port_id];
+	struct sas_ha_struct *sas_ha = pm8001_ha->sas;
+	struct pm8001_phy *phy = &pm8001_ha->phy[phy_id];
+	unsigned long flags;
+	u8 deviceType = pPayload->sas_identify.dev_type;
+	port->port_state = portstate;
+	PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
+		"portid:%d; phyid:%d; linkrate:%d; "
+		"portstate:%x; devicetype:%x\n",
+		port_id, phy_id, link_rate, portstate, deviceType));
+
+	switch (deviceType) {
+	case SAS_PHY_UNUSED:
+		PM8001_MSG_DBG(pm8001_ha,
+			pm8001_printk("device type no device.\n"));
+		break;
+	case SAS_END_DEVICE:
+		PM8001_MSG_DBG(pm8001_ha, pm8001_printk("end device.\n"));
+		pm80xx_chip_phy_ctl_req(pm8001_ha, phy_id,
+			PHY_NOTIFY_ENABLE_SPINUP);
+		port->port_attached = 1;
+		pm8001_get_lrate_mode(phy, link_rate);
+		break;
+	case SAS_EDGE_EXPANDER_DEVICE:
+		PM8001_MSG_DBG(pm8001_ha,
+			pm8001_printk("expander device.\n"));
+		port->port_attached = 1;
+		pm8001_get_lrate_mode(phy, link_rate);
+		break;
+	case SAS_FANOUT_EXPANDER_DEVICE:
+		PM8001_MSG_DBG(pm8001_ha,
+			pm8001_printk("fanout expander device.\n"));
+		port->port_attached = 1;
+		pm8001_get_lrate_mode(phy, link_rate);
+		break;
+	default:
+		PM8001_MSG_DBG(pm8001_ha,
+			pm8001_printk("unknown device type(%x)\n", deviceType));
+		break;
+	}
+	phy->phy_type |= PORT_TYPE_SAS;
+	phy->identify.device_type = deviceType;
+	phy->phy_attached = 1;
+	if (phy->identify.device_type == SAS_END_DEVICE)
+		phy->identify.target_port_protocols = SAS_PROTOCOL_SSP;
+	else if (phy->identify.device_type != SAS_PHY_UNUSED)
+		phy->identify.target_port_protocols = SAS_PROTOCOL_SMP;
+	phy->sas_phy.oob_mode = SAS_OOB_MODE;
+	sas_ha->notify_phy_event(&phy->sas_phy, PHYE_OOB_DONE);
+	spin_lock_irqsave(&phy->sas_phy.frame_rcvd_lock, flags);
+	memcpy(phy->frame_rcvd, &pPayload->sas_identify,
+		sizeof(struct sas_identify_frame)-4);
+	phy->frame_rcvd_size = sizeof(struct sas_identify_frame) - 4;
+	pm8001_get_attached_sas_addr(phy, phy->sas_phy.attached_sas_addr);
+	spin_unlock_irqrestore(&phy->sas_phy.frame_rcvd_lock, flags);
+	if (pm8001_ha->flags == PM8001F_RUN_TIME)
+		mdelay(200);/*delay a moment to wait disk to spinup*/
+	pm8001_bytes_dmaed(pm8001_ha, phy_id);
+}
+
+/**
+ * hw_event_sata_phy_up -FW tells me a SATA phy up event.
+ * @pm8001_ha: our hba card information
+ * @piomb: IO message buffer
+ */
+static void
+hw_event_sata_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb)
+{
+	struct hw_event_resp *pPayload =
+		(struct hw_event_resp *)(piomb + 4);
+	u32 phyid_npip_portstate = le32_to_cpu(pPayload->phyid_npip_portstate);
+	u32 lr_status_evt_portid =
+		le32_to_cpu(pPayload->lr_status_evt_portid);
+	u8 link_rate =
+		(u8)((lr_status_evt_portid & 0xF0000000) >> 28);
+	u8 port_id = (u8)(lr_status_evt_portid & 0x000000FF);
+	u8 phy_id =
+		(u8)((phyid_npip_portstate & 0xFF0000) >> 16);
+
+	u8 portstate = (u8)(phyid_npip_portstate & 0x0000000F);
+
+	struct pm8001_port *port = &pm8001_ha->port[port_id];
+	struct sas_ha_struct *sas_ha = pm8001_ha->sas;
+	struct pm8001_phy *phy = &pm8001_ha->phy[phy_id];
+	unsigned long flags;
+	PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
+		"port id %d, phy id %d link_rate %d portstate 0x%x\n",
+				port_id, phy_id, link_rate, portstate));
+
+	port->port_state = portstate;
+	port->port_attached = 1;
+	pm8001_get_lrate_mode(phy, link_rate);
+	phy->phy_type |= PORT_TYPE_SATA;
+	phy->phy_attached = 1;
+	phy->sas_phy.oob_mode = SATA_OOB_MODE;
+	sas_ha->notify_phy_event(&phy->sas_phy, PHYE_OOB_DONE);
+	spin_lock_irqsave(&phy->sas_phy.frame_rcvd_lock, flags);
+	memcpy(phy->frame_rcvd, ((u8 *)&pPayload->sata_fis - 4),
+		sizeof(struct dev_to_host_fis));
+	phy->frame_rcvd_size = sizeof(struct dev_to_host_fis);
+	phy->identify.target_port_protocols = SAS_PROTOCOL_SATA;
+	phy->identify.device_type = SAS_SATA_DEV;
+	pm8001_get_attached_sas_addr(phy, phy->sas_phy.attached_sas_addr);
+	spin_unlock_irqrestore(&phy->sas_phy.frame_rcvd_lock, flags);
+	pm8001_bytes_dmaed(pm8001_ha, phy_id);
+}
+
+/**
+ * hw_event_phy_down -we should notify the libsas the phy is down.
+ * @pm8001_ha: our hba card information
+ * @piomb: IO message buffer
+ */
+static void
+hw_event_phy_down(struct pm8001_hba_info *pm8001_ha, void *piomb)
+{
+	struct hw_event_resp *pPayload =
+		(struct hw_event_resp *)(piomb + 4);
+
+	u32 lr_status_evt_portid =
+		le32_to_cpu(pPayload->lr_status_evt_portid);
+	u8 port_id = (u8)(lr_status_evt_portid & 0x000000FF);
+	u32 phyid_npip_portstate = le32_to_cpu(pPayload->phyid_npip_portstate);
+	u8 phy_id =
+		(u8)((phyid_npip_portstate & 0xFF0000) >> 16);
+	u8 portstate = (u8)(phyid_npip_portstate & 0x0000000F);
+
+	struct pm8001_port *port = &pm8001_ha->port[port_id];
+	struct pm8001_phy *phy = &pm8001_ha->phy[phy_id];
+	port->port_state = portstate;
+	phy->phy_type = 0;
+	phy->identify.device_type = 0;
+	phy->phy_attached = 0;
+	memset(&phy->dev_sas_addr, 0, SAS_ADDR_SIZE);
+	switch (portstate) {
+	case PORT_VALID:
+		break;
+	case PORT_INVALID:
+		PM8001_MSG_DBG(pm8001_ha,
+			pm8001_printk(" PortInvalid portID %d\n", port_id));
+		PM8001_MSG_DBG(pm8001_ha,
+			pm8001_printk(" Last phy Down and port invalid\n"));
+		port->port_attached = 0;
+		pm80xx_hw_event_ack_req(pm8001_ha, 0, HW_EVENT_PHY_DOWN,
+			port_id, phy_id, 0, 0);
+		break;
+	case PORT_IN_RESET:
+		PM8001_MSG_DBG(pm8001_ha,
+			pm8001_printk(" Port In Reset portID %d\n", port_id));
+		break;
+	case PORT_NOT_ESTABLISHED:
+		PM8001_MSG_DBG(pm8001_ha,
+			pm8001_printk(" phy Down and PORT_NOT_ESTABLISHED\n"));
+		port->port_attached = 0;
+		break;
+	case PORT_LOSTCOMM:
+		PM8001_MSG_DBG(pm8001_ha,
+			pm8001_printk(" phy Down and PORT_LOSTCOMM\n"));
+		PM8001_MSG_DBG(pm8001_ha,
+			pm8001_printk(" Last phy Down and port invalid\n"));
+		port->port_attached = 0;
+		pm80xx_hw_event_ack_req(pm8001_ha, 0, HW_EVENT_PHY_DOWN,
+			port_id, phy_id, 0, 0);
+		break;
+	default:
+		port->port_attached = 0;
+		PM8001_MSG_DBG(pm8001_ha,
+			pm8001_printk(" phy Down and(default) = 0x%x\n",
+			portstate));
+		break;
+
+	}
+}
+
+static int mpi_phy_start_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
+{
+	struct phy_start_resp *pPayload =
+		(struct phy_start_resp *)(piomb + 4);
+	u32 status =
+		le32_to_cpu(pPayload->status);
+	u32 phy_id =
+		le32_to_cpu(pPayload->phyid);
+	struct pm8001_phy *phy = &pm8001_ha->phy[phy_id];
+
+	PM8001_INIT_DBG(pm8001_ha,
+		pm8001_printk("phy start resp status:0x%x, phyid:0x%x\n",
+				status, phy_id));
+	if (status == 0) {
+		phy->phy_state = 1;
+		if (pm8001_ha->flags == PM8001F_RUN_TIME)
+			complete(phy->enable_completion);
+	}
+	return 0;
+
+}
+
+/**
+ * mpi_thermal_hw_event -The hw event has come.
+ * @pm8001_ha: our hba card information
+ * @piomb: IO message buffer
+ */
+static int mpi_thermal_hw_event(struct pm8001_hba_info *pm8001_ha, void *piomb)
+{
+	struct thermal_hw_event *pPayload =
+		(struct thermal_hw_event *)(piomb + 4);
+
+	u32 thermal_event = le32_to_cpu(pPayload->thermal_event);
+	u32 rht_lht = le32_to_cpu(pPayload->rht_lht);
+
+	if (thermal_event & 0x40) {
+		PM8001_IO_DBG(pm8001_ha, pm8001_printk(
+			"Thermal Event: Local high temperature violated!\n"));
+		PM8001_IO_DBG(pm8001_ha, pm8001_printk(
+			"Thermal Event: Measured local high temperature %d\n",
+				((rht_lht & 0xFF00) >> 8)));
+	}
+	if (thermal_event & 0x10) {
+		PM8001_IO_DBG(pm8001_ha, pm8001_printk(
+			"Thermal Event: Remote high temperature violated!\n"));
+		PM8001_IO_DBG(pm8001_ha, pm8001_printk(
+			"Thermal Event: Measured remote high temperature %d\n",
+				((rht_lht & 0xFF000000) >> 24)));
+	}
+	return 0;
+}
+
+/**
+ * mpi_hw_event -The hw event has come.
+ * @pm8001_ha: our hba card information
+ * @piomb: IO message buffer
+ */
+static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void *piomb)
+{
+	unsigned long flags;
+	struct hw_event_resp *pPayload =
+		(struct hw_event_resp *)(piomb + 4);
+	u32 lr_status_evt_portid =
+		le32_to_cpu(pPayload->lr_status_evt_portid);
+	u32 phyid_npip_portstate = le32_to_cpu(pPayload->phyid_npip_portstate);
+	u8 port_id = (u8)(lr_status_evt_portid & 0x000000FF);
+	u8 phy_id =
+		(u8)((phyid_npip_portstate & 0xFF0000) >> 16);
+	u16 eventType =
+		(u16)((lr_status_evt_portid & 0x00FFFF00) >> 8);
+	u8 status =
+		(u8)((lr_status_evt_portid & 0x0F000000) >> 24);
+
+	struct sas_ha_struct *sas_ha = pm8001_ha->sas;
+	struct pm8001_phy *phy = &pm8001_ha->phy[phy_id];
+	struct asd_sas_phy *sas_phy = sas_ha->sas_phy[phy_id];
+	PM8001_MSG_DBG(pm8001_ha,
+		pm8001_printk("portid:%d phyid:%d event:0x%x status:0x%x\n",
+				port_id, phy_id, eventType, status));
+
+	switch (eventType) {
+
+	case HW_EVENT_SAS_PHY_UP:
+		PM8001_MSG_DBG(pm8001_ha,
+			pm8001_printk("HW_EVENT_PHY_START_STATUS\n"));
+		hw_event_sas_phy_up(pm8001_ha, piomb);
+		break;
+	case HW_EVENT_SATA_PHY_UP:
+		PM8001_MSG_DBG(pm8001_ha,
+			pm8001_printk("HW_EVENT_SATA_PHY_UP\n"));
+		hw_event_sata_phy_up(pm8001_ha, piomb);
+		break;
+	case HW_EVENT_SATA_SPINUP_HOLD:
+		PM8001_MSG_DBG(pm8001_ha,
+			pm8001_printk("HW_EVENT_SATA_SPINUP_HOLD\n"));
+		sas_ha->notify_phy_event(&phy->sas_phy, PHYE_SPINUP_HOLD);
+		break;
+	case HW_EVENT_PHY_DOWN:
+		PM8001_MSG_DBG(pm8001_ha,
+			pm8001_printk("HW_EVENT_PHY_DOWN\n"));
+		sas_ha->notify_phy_event(&phy->sas_phy, PHYE_LOSS_OF_SIGNAL);
+		phy->phy_attached = 0;
+		phy->phy_state = 0;
+		hw_event_phy_down(pm8001_ha, piomb);
+		break;
+	case HW_EVENT_PORT_INVALID:
+		PM8001_MSG_DBG(pm8001_ha,
+			pm8001_printk("HW_EVENT_PORT_INVALID\n"));
+		sas_phy_disconnected(sas_phy);
+		phy->phy_attached = 0;
+		sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
+		break;
+	/* the broadcast change primitive received, tell the LIBSAS this event
+	to revalidate the sas domain*/
+	case HW_EVENT_BROADCAST_CHANGE:
+		PM8001_MSG_DBG(pm8001_ha,
+			pm8001_printk("HW_EVENT_BROADCAST_CHANGE\n"));
+		pm80xx_hw_event_ack_req(pm8001_ha, 0, HW_EVENT_BROADCAST_CHANGE,
+			port_id, phy_id, 1, 0);
+		spin_lock_irqsave(&sas_phy->sas_prim_lock, flags);
+		sas_phy->sas_prim = HW_EVENT_BROADCAST_CHANGE;
+		spin_unlock_irqrestore(&sas_phy->sas_prim_lock, flags);
+		sas_ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD);
+		break;
+	case HW_EVENT_PHY_ERROR:
+		PM8001_MSG_DBG(pm8001_ha,
+			pm8001_printk("HW_EVENT_PHY_ERROR\n"));
+		sas_phy_disconnected(&phy->sas_phy);
+		phy->phy_attached = 0;
+		sas_ha->notify_phy_event(&phy->sas_phy, PHYE_OOB_ERROR);
+		break;
+	case HW_EVENT_BROADCAST_EXP:
+		PM8001_MSG_DBG(pm8001_ha,
+			pm8001_printk("HW_EVENT_BROADCAST_EXP\n"));
+		spin_lock_irqsave(&sas_phy->sas_prim_lock, flags);
+		sas_phy->sas_prim = HW_EVENT_BROADCAST_EXP;
+		spin_unlock_irqrestore(&sas_phy->sas_prim_lock, flags);
+		sas_ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD);
+		break;
+	case HW_EVENT_LINK_ERR_INVALID_DWORD:
+		PM8001_MSG_DBG(pm8001_ha,
+			pm8001_printk("HW_EVENT_LINK_ERR_INVALID_DWORD\n"));
+		pm80xx_hw_event_ack_req(pm8001_ha, 0,
+			HW_EVENT_LINK_ERR_INVALID_DWORD, port_id, phy_id, 0, 0);
+		sas_phy_disconnected(sas_phy);
+		phy->phy_attached = 0;
+		sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
+		break;
+	case HW_EVENT_LINK_ERR_DISPARITY_ERROR:
+		PM8001_MSG_DBG(pm8001_ha,
+			pm8001_printk("HW_EVENT_LINK_ERR_DISPARITY_ERROR\n"));
+		pm80xx_hw_event_ack_req(pm8001_ha, 0,
+			HW_EVENT_LINK_ERR_DISPARITY_ERROR,
+			port_id, phy_id, 0, 0);
+		sas_phy_disconnected(sas_phy);
+		phy->phy_attached = 0;
+		sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
+		break;
+	case HW_EVENT_LINK_ERR_CODE_VIOLATION:
+		PM8001_MSG_DBG(pm8001_ha,
+			pm8001_printk("HW_EVENT_LINK_ERR_CODE_VIOLATION\n"));
+		pm80xx_hw_event_ack_req(pm8001_ha, 0,
+			HW_EVENT_LINK_ERR_CODE_VIOLATION,
+			port_id, phy_id, 0, 0);
+		sas_phy_disconnected(sas_phy);
+		phy->phy_attached = 0;
+		sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
+		break;
+	case HW_EVENT_LINK_ERR_LOSS_OF_DWORD_SYNCH:
+		PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
+				"HW_EVENT_LINK_ERR_LOSS_OF_DWORD_SYNCH\n"));
+		pm80xx_hw_event_ack_req(pm8001_ha, 0,
+			HW_EVENT_LINK_ERR_LOSS_OF_DWORD_SYNCH,
+			port_id, phy_id, 0, 0);
+		sas_phy_disconnected(sas_phy);
+		phy->phy_attached = 0;
+		sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
+		break;
+	case HW_EVENT_MALFUNCTION:
+		PM8001_MSG_DBG(pm8001_ha,
+			pm8001_printk("HW_EVENT_MALFUNCTION\n"));
+		break;
+	case HW_EVENT_BROADCAST_SES:
+		PM8001_MSG_DBG(pm8001_ha,
+			pm8001_printk("HW_EVENT_BROADCAST_SES\n"));
+		spin_lock_irqsave(&sas_phy->sas_prim_lock, flags);
+		sas_phy->sas_prim = HW_EVENT_BROADCAST_SES;
+		spin_unlock_irqrestore(&sas_phy->sas_prim_lock, flags);
+		sas_ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD);
+		break;
+	case HW_EVENT_INBOUND_CRC_ERROR:
+		PM8001_MSG_DBG(pm8001_ha,
+			pm8001_printk("HW_EVENT_INBOUND_CRC_ERROR\n"));
+		pm80xx_hw_event_ack_req(pm8001_ha, 0,
+			HW_EVENT_INBOUND_CRC_ERROR,
+			port_id, phy_id, 0, 0);
+		break;
+	case HW_EVENT_HARD_RESET_RECEIVED:
+		PM8001_MSG_DBG(pm8001_ha,
+			pm8001_printk("HW_EVENT_HARD_RESET_RECEIVED\n"));
+		sas_ha->notify_port_event(sas_phy, PORTE_HARD_RESET);
+		break;
+	case HW_EVENT_ID_FRAME_TIMEOUT:
+		PM8001_MSG_DBG(pm8001_ha,
+			pm8001_printk("HW_EVENT_ID_FRAME_TIMEOUT\n"));
+		sas_phy_disconnected(sas_phy);
+		phy->phy_attached = 0;
+		sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
+		break;
+	case HW_EVENT_LINK_ERR_PHY_RESET_FAILED:
+		PM8001_MSG_DBG(pm8001_ha,
+			pm8001_printk("HW_EVENT_LINK_ERR_PHY_RESET_FAILED\n"));
+		pm80xx_hw_event_ack_req(pm8001_ha, 0,
+			HW_EVENT_LINK_ERR_PHY_RESET_FAILED,
+			port_id, phy_id, 0, 0);
+		sas_phy_disconnected(sas_phy);
+		phy->phy_attached = 0;
+		sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
+		break;
+	case HW_EVENT_PORT_RESET_TIMER_TMO:
+		PM8001_MSG_DBG(pm8001_ha,
+			pm8001_printk("HW_EVENT_PORT_RESET_TIMER_TMO\n"));
+		sas_phy_disconnected(sas_phy);
+		phy->phy_attached = 0;
+		sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
+		break;
+	case HW_EVENT_PORT_RECOVERY_TIMER_TMO:
+		PM8001_MSG_DBG(pm8001_ha,
+			pm8001_printk("HW_EVENT_PORT_RECOVERY_TIMER_TMO\n"));
+		pm80xx_hw_event_ack_req(pm8001_ha, 0,
+			HW_EVENT_PORT_RECOVERY_TIMER_TMO,
+			port_id, phy_id, 0, 0);
+		sas_phy_disconnected(sas_phy);
+		phy->phy_attached = 0;
+		sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
+		break;
+	case HW_EVENT_PORT_RECOVER:
+		PM8001_MSG_DBG(pm8001_ha,
+			pm8001_printk("HW_EVENT_PORT_RECOVER\n"));
+		break;
+	case HW_EVENT_PORT_RESET_COMPLETE:
+		PM8001_MSG_DBG(pm8001_ha,
+			pm8001_printk("HW_EVENT_PORT_RESET_COMPLETE\n"));
+		break;
+	case EVENT_BROADCAST_ASYNCH_EVENT:
+		PM8001_MSG_DBG(pm8001_ha,
+			pm8001_printk("EVENT_BROADCAST_ASYNCH_EVENT\n"));
+		break;
+	default:
+		PM8001_MSG_DBG(pm8001_ha,
+			pm8001_printk("Unknown event type 0x%x\n", eventType));
+		break;
+	}
+	return 0;
+}
+
+/**
+ * mpi_phy_stop_resp - SPCv specific
+ * @pm8001_ha: our hba card information
+ * @piomb: IO message buffer
+ */
+static int mpi_phy_stop_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
+{
+	struct phy_stop_resp *pPayload =
+		(struct phy_stop_resp *)(piomb + 4);
+	u32 status =
+		le32_to_cpu(pPayload->status);
+	u32 phyid =
+		le32_to_cpu(pPayload->phyid);
+	struct pm8001_phy *phy = &pm8001_ha->phy[phyid];
+	PM8001_MSG_DBG(pm8001_ha,
+			pm8001_printk("phy:0x%x status:0x%x\n",
+					phyid, status));
+	if (status == 0)
+		phy->phy_state = 0;
+	return 0;
+}
+
+/**
+ * mpi_set_controller_config_resp - SPCv specific
+ * @pm8001_ha: our hba card information
+ * @piomb: IO message buffer
+ */
+static int mpi_set_controller_config_resp(struct pm8001_hba_info *pm8001_ha,
+			void *piomb)
+{
+	struct set_ctrl_cfg_resp *pPayload =
+			(struct set_ctrl_cfg_resp *)(piomb + 4);
+	u32 status = le32_to_cpu(pPayload->status);
+	u32 err_qlfr_pgcd = le32_to_cpu(pPayload->err_qlfr_pgcd);
+
+	PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
+			"SET CONTROLLER RESP: status 0x%x qlfr_pgcd 0x%x\n",
+			status, err_qlfr_pgcd));
+
+	return 0;
+}
+
+/**
+ * mpi_get_controller_config_resp - SPCv specific
+ * @pm8001_ha: our hba card information
+ * @piomb: IO message buffer
+ */
+static int mpi_get_controller_config_resp(struct pm8001_hba_info *pm8001_ha,
+			void *piomb)
+{
+	PM8001_MSG_DBG(pm8001_ha,
+			pm8001_printk(" pm80xx_addition_functionality\n"));
+
+	return 0;
+}
+
+/**
+ * mpi_get_phy_profile_resp - SPCv specific
+ * @pm8001_ha: our hba card information
+ * @piomb: IO message buffer
+ */
+static int mpi_get_phy_profile_resp(struct pm8001_hba_info *pm8001_ha,
+			void *piomb)
+{
+	PM8001_MSG_DBG(pm8001_ha,
+			pm8001_printk(" pm80xx_addition_functionality\n"));
+
+	return 0;
+}
+
+/**
+ * mpi_flash_op_ext_resp - SPCv specific
+ * @pm8001_ha: our hba card information
+ * @piomb: IO message buffer
+ */
+static int mpi_flash_op_ext_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
+{
+	PM8001_MSG_DBG(pm8001_ha,
+			pm8001_printk(" pm80xx_addition_functionality\n"));
+
+	return 0;
+}
+
+/**
+ * mpi_set_phy_profile_resp - SPCv specific
+ * @pm8001_ha: our hba card information
+ * @piomb: IO message buffer
+ */
+static int mpi_set_phy_profile_resp(struct pm8001_hba_info *pm8001_ha,
+			void *piomb)
+{
+	PM8001_MSG_DBG(pm8001_ha,
+			pm8001_printk(" pm80xx_addition_functionality\n"));
+
+	return 0;
+}
+
+/**
+ * mpi_kek_management_resp - SPCv specific
+ * @pm8001_ha: our hba card information
+ * @piomb: IO message buffer
+ */
+static int mpi_kek_management_resp(struct pm8001_hba_info *pm8001_ha,
+			void *piomb)
+{
+	struct kek_mgmt_resp *pPayload = (struct kek_mgmt_resp *)(piomb + 4);
+
+	u32 status = le32_to_cpu(pPayload->status);
+	u32 kidx_new_curr_ksop = le32_to_cpu(pPayload->kidx_new_curr_ksop);
+	u32 err_qlfr = le32_to_cpu(pPayload->err_qlfr);
+
+	PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
+		"KEK MGMT RESP. Status 0x%x idx_ksop 0x%x err_qlfr 0x%x\n",
+		status, kidx_new_curr_ksop, err_qlfr));
+
+	return 0;
+}
+
+/**
+ * mpi_dek_management_resp - SPCv specific
+ * @pm8001_ha: our hba card information
+ * @piomb: IO message buffer
+ */
+static int mpi_dek_management_resp(struct pm8001_hba_info *pm8001_ha,
+			void *piomb)
+{
+	PM8001_MSG_DBG(pm8001_ha,
+			pm8001_printk(" pm80xx_addition_functionality\n"));
+
+	return 0;
+}
+
+/**
+ * ssp_coalesced_comp_resp - SPCv specific
+ * @pm8001_ha: our hba card information
+ * @piomb: IO message buffer
+ */
+static int ssp_coalesced_comp_resp(struct pm8001_hba_info *pm8001_ha,
+			void *piomb)
+{
+	PM8001_MSG_DBG(pm8001_ha,
+			pm8001_printk(" pm80xx_addition_functionality\n"));
+
+	return 0;
+}
+
+/**
+ * process_one_iomb - process one outbound Queue memory block
+ * @pm8001_ha: our hba card information
+ * @piomb: IO message buffer
+ */
+static void process_one_iomb(struct pm8001_hba_info *pm8001_ha, void *piomb)
+{
+	__le32 pHeader = *(__le32 *)piomb;
+	u32 opc = (u32)((le32_to_cpu(pHeader)) & 0xFFF);
+
+	switch (opc) {
+	case OPC_OUB_ECHO:
+		PM8001_MSG_DBG(pm8001_ha, pm8001_printk("OPC_OUB_ECHO\n"));
+		break;
+	case OPC_OUB_HW_EVENT:
+		PM8001_MSG_DBG(pm8001_ha,
+			pm8001_printk("OPC_OUB_HW_EVENT\n"));
+		mpi_hw_event(pm8001_ha, piomb);
+		break;
+	case OPC_OUB_THERM_HW_EVENT:
+		PM8001_MSG_DBG(pm8001_ha,
+			pm8001_printk("OPC_OUB_THERMAL_EVENT\n"));
+		mpi_thermal_hw_event(pm8001_ha, piomb);
+		break;
+	case OPC_OUB_SSP_COMP:
+		PM8001_MSG_DBG(pm8001_ha,
+			pm8001_printk("OPC_OUB_SSP_COMP\n"));
+		mpi_ssp_completion(pm8001_ha, piomb);
+		break;
+	case OPC_OUB_SMP_COMP:
+		PM8001_MSG_DBG(pm8001_ha,
+			pm8001_printk("OPC_OUB_SMP_COMP\n"));
+		mpi_smp_completion(pm8001_ha, piomb);
+		break;
+	case OPC_OUB_LOCAL_PHY_CNTRL:
+		PM8001_MSG_DBG(pm8001_ha,
+			pm8001_printk("OPC_OUB_LOCAL_PHY_CNTRL\n"));
+		pm8001_mpi_local_phy_ctl(pm8001_ha, piomb);
+		break;
+	case OPC_OUB_DEV_REGIST:
+		PM8001_MSG_DBG(pm8001_ha,
+		pm8001_printk("OPC_OUB_DEV_REGIST\n"));
+		pm8001_mpi_reg_resp(pm8001_ha, piomb);
+		break;
+	case OPC_OUB_DEREG_DEV:
+		PM8001_MSG_DBG(pm8001_ha,
+			pm8001_printk("unresgister the deviece\n"));
+		pm8001_mpi_dereg_resp(pm8001_ha, piomb);
+		break;
+	case OPC_OUB_GET_DEV_HANDLE:
+		PM8001_MSG_DBG(pm8001_ha,
+			pm8001_printk("OPC_OUB_GET_DEV_HANDLE\n"));
+		break;
+	case OPC_OUB_SATA_COMP:
+		PM8001_MSG_DBG(pm8001_ha,
+			pm8001_printk("OPC_OUB_SATA_COMP\n"));
+		mpi_sata_completion(pm8001_ha, piomb);
+		break;
+	case OPC_OUB_SATA_EVENT:
+		PM8001_MSG_DBG(pm8001_ha,
+			pm8001_printk("OPC_OUB_SATA_EVENT\n"));
+		mpi_sata_event(pm8001_ha, piomb);
+		break;
+	case OPC_OUB_SSP_EVENT:
+		PM8001_MSG_DBG(pm8001_ha,
+			pm8001_printk("OPC_OUB_SSP_EVENT\n"));
+		mpi_ssp_event(pm8001_ha, piomb);
+		break;
+	case OPC_OUB_DEV_HANDLE_ARRIV:
+		PM8001_MSG_DBG(pm8001_ha,
+			pm8001_printk("OPC_OUB_DEV_HANDLE_ARRIV\n"));
+		/*This is for target*/
+		break;
+	case OPC_OUB_SSP_RECV_EVENT:
+		PM8001_MSG_DBG(pm8001_ha,
+			pm8001_printk("OPC_OUB_SSP_RECV_EVENT\n"));
+		/*This is for target*/
+		break;
+	case OPC_OUB_FW_FLASH_UPDATE:
+		PM8001_MSG_DBG(pm8001_ha,
+			pm8001_printk("OPC_OUB_FW_FLASH_UPDATE\n"));
+		pm8001_mpi_fw_flash_update_resp(pm8001_ha, piomb);
+		break;
+	case OPC_OUB_GPIO_RESPONSE:
+		PM8001_MSG_DBG(pm8001_ha,
+			pm8001_printk("OPC_OUB_GPIO_RESPONSE\n"));
+		break;
+	case OPC_OUB_GPIO_EVENT:
+		PM8001_MSG_DBG(pm8001_ha,
+			pm8001_printk("OPC_OUB_GPIO_EVENT\n"));
+		break;
+	case OPC_OUB_GENERAL_EVENT:
+		PM8001_MSG_DBG(pm8001_ha,
+			pm8001_printk("OPC_OUB_GENERAL_EVENT\n"));
+		pm8001_mpi_general_event(pm8001_ha, piomb);
+		break;
+	case OPC_OUB_SSP_ABORT_RSP:
+		PM8001_MSG_DBG(pm8001_ha,
+			pm8001_printk("OPC_OUB_SSP_ABORT_RSP\n"));
+		pm8001_mpi_task_abort_resp(pm8001_ha, piomb);
+		break;
+	case OPC_OUB_SATA_ABORT_RSP:
+		PM8001_MSG_DBG(pm8001_ha,
+			pm8001_printk("OPC_OUB_SATA_ABORT_RSP\n"));
+		pm8001_mpi_task_abort_resp(pm8001_ha, piomb);
+		break;
+	case OPC_OUB_SAS_DIAG_MODE_START_END:
+		PM8001_MSG_DBG(pm8001_ha,
+			pm8001_printk("OPC_OUB_SAS_DIAG_MODE_START_END\n"));
+		break;
+	case OPC_OUB_SAS_DIAG_EXECUTE:
+		PM8001_MSG_DBG(pm8001_ha,
+			pm8001_printk("OPC_OUB_SAS_DIAG_EXECUTE\n"));
+		break;
+	case OPC_OUB_GET_TIME_STAMP:
+		PM8001_MSG_DBG(pm8001_ha,
+			pm8001_printk("OPC_OUB_GET_TIME_STAMP\n"));
+		break;
+	case OPC_OUB_SAS_HW_EVENT_ACK:
+		PM8001_MSG_DBG(pm8001_ha,
+			pm8001_printk("OPC_OUB_SAS_HW_EVENT_ACK\n"));
+		break;
+	case OPC_OUB_PORT_CONTROL:
+		PM8001_MSG_DBG(pm8001_ha,
+			pm8001_printk("OPC_OUB_PORT_CONTROL\n"));
+		break;
+	case OPC_OUB_SMP_ABORT_RSP:
+		PM8001_MSG_DBG(pm8001_ha,
+			pm8001_printk("OPC_OUB_SMP_ABORT_RSP\n"));
+		pm8001_mpi_task_abort_resp(pm8001_ha, piomb);
+		break;
+	case OPC_OUB_GET_NVMD_DATA:
+		PM8001_MSG_DBG(pm8001_ha,
+			pm8001_printk("OPC_OUB_GET_NVMD_DATA\n"));
+		pm8001_mpi_get_nvmd_resp(pm8001_ha, piomb);
+		break;
+	case OPC_OUB_SET_NVMD_DATA:
+		PM8001_MSG_DBG(pm8001_ha,
+			pm8001_printk("OPC_OUB_SET_NVMD_DATA\n"));
+		pm8001_mpi_set_nvmd_resp(pm8001_ha, piomb);
+		break;
+	case OPC_OUB_DEVICE_HANDLE_REMOVAL:
+		PM8001_MSG_DBG(pm8001_ha,
+			pm8001_printk("OPC_OUB_DEVICE_HANDLE_REMOVAL\n"));
+		break;
+	case OPC_OUB_SET_DEVICE_STATE:
+		PM8001_MSG_DBG(pm8001_ha,
+			pm8001_printk("OPC_OUB_SET_DEVICE_STATE\n"));
+		pm8001_mpi_set_dev_state_resp(pm8001_ha, piomb);
+		break;
+	case OPC_OUB_GET_DEVICE_STATE:
+		PM8001_MSG_DBG(pm8001_ha,
+			pm8001_printk("OPC_OUB_GET_DEVICE_STATE\n"));
+		break;
+	case OPC_OUB_SET_DEV_INFO:
+		PM8001_MSG_DBG(pm8001_ha,
+			pm8001_printk("OPC_OUB_SET_DEV_INFO\n"));
+		break;
+	/* spcv specifc commands */
+	case OPC_OUB_PHY_START_RESP:
+		PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
+			"OPC_OUB_PHY_START_RESP opcode:%x\n", opc));
+		mpi_phy_start_resp(pm8001_ha, piomb);
+		break;
+	case OPC_OUB_PHY_STOP_RESP:
+		PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
+			"OPC_OUB_PHY_STOP_RESP opcode:%x\n", opc));
+		mpi_phy_stop_resp(pm8001_ha, piomb);
+		break;
+	case OPC_OUB_SET_CONTROLLER_CONFIG:
+		PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
+			"OPC_OUB_SET_CONTROLLER_CONFIG opcode:%x\n", opc));
+		mpi_set_controller_config_resp(pm8001_ha, piomb);
+		break;
+	case OPC_OUB_GET_CONTROLLER_CONFIG:
+		PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
+			"OPC_OUB_GET_CONTROLLER_CONFIG opcode:%x\n", opc));
+		mpi_get_controller_config_resp(pm8001_ha, piomb);
+		break;
+	case OPC_OUB_GET_PHY_PROFILE:
+		PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
+			"OPC_OUB_GET_PHY_PROFILE opcode:%x\n", opc));
+		mpi_get_phy_profile_resp(pm8001_ha, piomb);
+		break;
+	case OPC_OUB_FLASH_OP_EXT:
+		PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
+			"OPC_OUB_FLASH_OP_EXT opcode:%x\n", opc));
+		mpi_flash_op_ext_resp(pm8001_ha, piomb);
+		break;
+	case OPC_OUB_SET_PHY_PROFILE:
+		PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
+			"OPC_OUB_SET_PHY_PROFILE opcode:%x\n", opc));
+		mpi_set_phy_profile_resp(pm8001_ha, piomb);
+		break;
+	case OPC_OUB_KEK_MANAGEMENT_RESP:
+		PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
+			"OPC_OUB_KEK_MANAGEMENT_RESP opcode:%x\n", opc));
+		mpi_kek_management_resp(pm8001_ha, piomb);
+		break;
+	case OPC_OUB_DEK_MANAGEMENT_RESP:
+		PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
+			"OPC_OUB_DEK_MANAGEMENT_RESP opcode:%x\n", opc));
+		mpi_dek_management_resp(pm8001_ha, piomb);
+		break;
+	case OPC_OUB_SSP_COALESCED_COMP_RESP:
+		PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
+			"OPC_OUB_SSP_COALESCED_COMP_RESP opcode:%x\n", opc));
+		ssp_coalesced_comp_resp(pm8001_ha, piomb);
+		break;
+	default:
+		PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
+			"Unknown outbound Queue IOMB OPC = 0x%x\n", opc));
+		break;
+	}
+}
+
+static int process_oq(struct pm8001_hba_info *pm8001_ha, u8 vec)
+{
+	struct outbound_queue_table *circularQ;
+	void *pMsg1 = NULL;
+	u8 uninitialized_var(bc);
+	u32 ret = MPI_IO_STATUS_FAIL;
+	unsigned long flags;
+
+	spin_lock_irqsave(&pm8001_ha->lock, flags);
+	circularQ = &pm8001_ha->outbnd_q_tbl[vec];
+	do {
+		ret = pm8001_mpi_msg_consume(pm8001_ha, circularQ, &pMsg1, &bc);
+		if (MPI_IO_STATUS_SUCCESS == ret) {
+			/* process the outbound message */
+			process_one_iomb(pm8001_ha, (void *)(pMsg1 - 4));
+			/* free the message from the outbound circular buffer */
+			pm8001_mpi_msg_free_set(pm8001_ha, pMsg1,
+							circularQ, bc);
+		}
+		if (MPI_IO_STATUS_BUSY == ret) {
+			/* Update the producer index from SPC */
+			circularQ->producer_index =
+				cpu_to_le32(pm8001_read_32(circularQ->pi_virt));
+			if (le32_to_cpu(circularQ->producer_index) ==
+				circularQ->consumer_idx)
+				/* OQ is empty */
+				break;
+		}
+	} while (1);
+	spin_unlock_irqrestore(&pm8001_ha->lock, flags);
+	return ret;
+}
+
+/* PCI_DMA_... to our direction translation. */
+static const u8 data_dir_flags[] = {
+	[PCI_DMA_BIDIRECTIONAL] = DATA_DIR_BYRECIPIENT,/* UNSPECIFIED */
+	[PCI_DMA_TODEVICE]	= DATA_DIR_OUT,/* OUTBOUND */
+	[PCI_DMA_FROMDEVICE]	= DATA_DIR_IN,/* INBOUND */
+	[PCI_DMA_NONE]		= DATA_DIR_NONE,/* NO TRANSFER */
+};
+
+static void build_smp_cmd(u32 deviceID, __le32 hTag,
+			struct smp_req *psmp_cmd, int mode, int length)
+{
+	psmp_cmd->tag = hTag;
+	psmp_cmd->device_id = cpu_to_le32(deviceID);
+	if (mode == SMP_DIRECT) {
+		length = length - 4; /* subtract crc */
+		psmp_cmd->len_ip_ir = cpu_to_le32(length << 16);
+	} else {
+		psmp_cmd->len_ip_ir = cpu_to_le32(1|(1 << 1));
+	}
+}
+
+/**
+ * pm8001_chip_smp_req - send a SMP task to FW
+ * @pm8001_ha: our hba card information.
+ * @ccb: the ccb information this request used.
+ */
+static int pm80xx_chip_smp_req(struct pm8001_hba_info *pm8001_ha,
+	struct pm8001_ccb_info *ccb)
+{
+	int elem, rc;
+	struct sas_task *task = ccb->task;
+	struct domain_device *dev = task->dev;
+	struct pm8001_device *pm8001_dev = dev->lldd_dev;
+	struct scatterlist *sg_req, *sg_resp;
+	u32 req_len, resp_len;
+	struct smp_req smp_cmd;
+	u32 opc;
+	struct inbound_queue_table *circularQ;
+	char *preq_dma_addr = NULL;
+	__le64 tmp_addr;
+	u32 i, length;
+
+	memset(&smp_cmd, 0, sizeof(smp_cmd));
+	/*
+	 * DMA-map SMP request, response buffers
+	 */
+	sg_req = &task->smp_task.smp_req;
+	elem = dma_map_sg(pm8001_ha->dev, sg_req, 1, PCI_DMA_TODEVICE);
+	if (!elem)
+		return -ENOMEM;
+	req_len = sg_dma_len(sg_req);
+
+	sg_resp = &task->smp_task.smp_resp;
+	elem = dma_map_sg(pm8001_ha->dev, sg_resp, 1, PCI_DMA_FROMDEVICE);
+	if (!elem) {
+		rc = -ENOMEM;
+		goto err_out;
+	}
+	resp_len = sg_dma_len(sg_resp);
+	/* must be in dwords */
+	if ((req_len & 0x3) || (resp_len & 0x3)) {
+		rc = -EINVAL;
+		goto err_out_2;
+	}
+
+	opc = OPC_INB_SMP_REQUEST;
+	circularQ = &pm8001_ha->inbnd_q_tbl[0];
+	smp_cmd.tag = cpu_to_le32(ccb->ccb_tag);
+
+	length = sg_req->length;
+	PM8001_IO_DBG(pm8001_ha,
+		pm8001_printk("SMP Frame Length %d\n", sg_req->length));
+	if (!(length - 8))
+		pm8001_ha->smp_exp_mode = SMP_DIRECT;
+	else
+		pm8001_ha->smp_exp_mode = SMP_INDIRECT;
+
+	/* DIRECT MODE support only in spcv/ve */
+	pm8001_ha->smp_exp_mode = SMP_DIRECT;
+
+	tmp_addr = cpu_to_le64((u64)sg_dma_address(&task->smp_task.smp_req));
+	preq_dma_addr = (char *)phys_to_virt(tmp_addr);
+
+	/* INDIRECT MODE command settings. Use DMA */
+	if (pm8001_ha->smp_exp_mode == SMP_INDIRECT) {
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("SMP REQUEST INDIRECT MODE\n"));
+		/* for SPCv indirect mode. Place the top 4 bytes of
+		 * SMP Request header here. */
+		for (i = 0; i < 4; i++)
+			smp_cmd.smp_req16[i] = *(preq_dma_addr + i);
+		/* exclude top 4 bytes for SMP req header */
+		smp_cmd.long_smp_req.long_req_addr =
+			cpu_to_le64((u64)sg_dma_address
+				(&task->smp_task.smp_req) - 4);
+		/* exclude 4 bytes for SMP req header and CRC */
+		smp_cmd.long_smp_req.long_req_size =
+			cpu_to_le32((u32)sg_dma_len(&task->smp_task.smp_req)-8);
+		smp_cmd.long_smp_req.long_resp_addr =
+				cpu_to_le64((u64)sg_dma_address
+					(&task->smp_task.smp_resp));
+		smp_cmd.long_smp_req.long_resp_size =
+				cpu_to_le32((u32)sg_dma_len
+					(&task->smp_task.smp_resp)-4);
+	} else { /* DIRECT MODE */
+		smp_cmd.long_smp_req.long_req_addr =
+			cpu_to_le64((u64)sg_dma_address
+					(&task->smp_task.smp_req));
+		smp_cmd.long_smp_req.long_req_size =
+			cpu_to_le32((u32)sg_dma_len(&task->smp_task.smp_req)-4);
+		smp_cmd.long_smp_req.long_resp_addr =
+			cpu_to_le64((u64)sg_dma_address
+				(&task->smp_task.smp_resp));
+		smp_cmd.long_smp_req.long_resp_size =
+			cpu_to_le32
+			((u32)sg_dma_len(&task->smp_task.smp_resp)-4);
+	}
+	if (pm8001_ha->smp_exp_mode == SMP_DIRECT) {
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("SMP REQUEST DIRECT MODE\n"));
+		for (i = 0; i < length; i++)
+			if (i < 16) {
+				smp_cmd.smp_req16[i] = *(preq_dma_addr+i);
+				PM8001_IO_DBG(pm8001_ha, pm8001_printk(
+					"Byte[%d]:%x (DMA data:%x)\n",
+					i, smp_cmd.smp_req16[i],
+					*(preq_dma_addr)));
+			} else {
+				smp_cmd.smp_req[i] = *(preq_dma_addr+i);
+				PM8001_IO_DBG(pm8001_ha, pm8001_printk(
+					"Byte[%d]:%x (DMA data:%x)\n",
+					i, smp_cmd.smp_req[i],
+					*(preq_dma_addr)));
+			}
+	}
+
+	build_smp_cmd(pm8001_dev->device_id, smp_cmd.tag,
+				&smp_cmd, pm8001_ha->smp_exp_mode, length);
+	pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, (u32 *)&smp_cmd, 0);
+	return 0;
+
+err_out_2:
+	dma_unmap_sg(pm8001_ha->dev, &ccb->task->smp_task.smp_resp, 1,
+			PCI_DMA_FROMDEVICE);
+err_out:
+	dma_unmap_sg(pm8001_ha->dev, &ccb->task->smp_task.smp_req, 1,
+			PCI_DMA_TODEVICE);
+	return rc;
+}
+
+static int check_enc_sas_cmd(struct sas_task *task)
+{
+	if ((task->ssp_task.cdb[0] == READ_10)
+		|| (task->ssp_task.cdb[0] == WRITE_10)
+		|| (task->ssp_task.cdb[0] == WRITE_VERIFY))
+		return 1;
+	else
+		return 0;
+}
+
+static int check_enc_sat_cmd(struct sas_task *task)
+{
+	int ret = 0;
+	switch (task->ata_task.fis.command) {
+	case ATA_CMD_FPDMA_READ:
+	case ATA_CMD_READ_EXT:
+	case ATA_CMD_READ:
+	case ATA_CMD_FPDMA_WRITE:
+	case ATA_CMD_WRITE_EXT:
+	case ATA_CMD_WRITE:
+	case ATA_CMD_PIO_READ:
+	case ATA_CMD_PIO_READ_EXT:
+	case ATA_CMD_PIO_WRITE:
+	case ATA_CMD_PIO_WRITE_EXT:
+		ret = 1;
+		break;
+	default:
+		ret = 0;
+		break;
+	}
+	return ret;
+}
+
+/**
+ * pm80xx_chip_ssp_io_req - send a SSP task to FW
+ * @pm8001_ha: our hba card information.
+ * @ccb: the ccb information this request used.
+ */
+static int pm80xx_chip_ssp_io_req(struct pm8001_hba_info *pm8001_ha,
+	struct pm8001_ccb_info *ccb)
+{
+	struct sas_task *task = ccb->task;
+	struct domain_device *dev = task->dev;
+	struct pm8001_device *pm8001_dev = dev->lldd_dev;
+	struct ssp_ini_io_start_req ssp_cmd;
+	u32 tag = ccb->ccb_tag;
+	int ret;
+	u64 phys_addr;
+	struct inbound_queue_table *circularQ;
+	static u32 inb;
+	static u32 outb;
+	u32 opc = OPC_INB_SSPINIIOSTART;
+	memset(&ssp_cmd, 0, sizeof(ssp_cmd));
+	memcpy(ssp_cmd.ssp_iu.lun, task->ssp_task.LUN, 8);
+	/* data address domain added for spcv; set to 0 by host,
+	 * used internally by controller
+	 * 0 for SAS 1.1 and SAS 2.0 compatible TLR
+	 */
+	ssp_cmd.dad_dir_m_tlr =
+		cpu_to_le32(data_dir_flags[task->data_dir] << 8 | 0x0);
+	ssp_cmd.data_len = cpu_to_le32(task->total_xfer_len);
+	ssp_cmd.device_id = cpu_to_le32(pm8001_dev->device_id);
+	ssp_cmd.tag = cpu_to_le32(tag);
+	if (task->ssp_task.enable_first_burst)
+		ssp_cmd.ssp_iu.efb_prio_attr |= 0x80;
+	ssp_cmd.ssp_iu.efb_prio_attr |= (task->ssp_task.task_prio << 3);
+	ssp_cmd.ssp_iu.efb_prio_attr |= (task->ssp_task.task_attr & 7);
+	memcpy(ssp_cmd.ssp_iu.cdb, task->ssp_task.cdb, 16);
+	circularQ = &pm8001_ha->inbnd_q_tbl[0];
+
+	/* Check if encryption is set */
+	if (pm8001_ha->chip->encrypt &&
+		!(pm8001_ha->encrypt_info.status) && check_enc_sas_cmd(task)) {
+		PM8001_IO_DBG(pm8001_ha, pm8001_printk(
+			"Encryption enabled.Sending Encrypt SAS command 0x%x\n",
+			task->ssp_task.cdb[0]));
+		opc = OPC_INB_SSP_INI_DIF_ENC_IO;
+		/* enable encryption. 0 for SAS 1.1 and SAS 2.0 compatible TLR*/
+		ssp_cmd.dad_dir_m_tlr =	cpu_to_le32
+			((data_dir_flags[task->data_dir] << 8) | 0x20 | 0x0);
+
+		/* fill in PRD (scatter/gather) table, if any */
+		if (task->num_scatter > 1) {
+			pm8001_chip_make_sg(task->scatter,
+						ccb->n_elem, ccb->buf_prd);
+			phys_addr = ccb->ccb_dma_handle +
+				offsetof(struct pm8001_ccb_info, buf_prd[0]);
+			ssp_cmd.enc_addr_low =
+				cpu_to_le32(lower_32_bits(phys_addr));
+			ssp_cmd.enc_addr_high =
+				cpu_to_le32(upper_32_bits(phys_addr));
+			ssp_cmd.enc_esgl = cpu_to_le32(1<<31);
+		} else if (task->num_scatter == 1) {
+			u64 dma_addr = sg_dma_address(task->scatter);
+			ssp_cmd.enc_addr_low =
+				cpu_to_le32(lower_32_bits(dma_addr));
+			ssp_cmd.enc_addr_high =
+				cpu_to_le32(upper_32_bits(dma_addr));
+			ssp_cmd.enc_len = cpu_to_le32(task->total_xfer_len);
+			ssp_cmd.enc_esgl = 0;
+		} else if (task->num_scatter == 0) {
+			ssp_cmd.enc_addr_low = 0;
+			ssp_cmd.enc_addr_high = 0;
+			ssp_cmd.enc_len = cpu_to_le32(task->total_xfer_len);
+			ssp_cmd.enc_esgl = 0;
+		}
+		/* XTS mode. All other fields are 0 */
+		ssp_cmd.key_cmode = 0x6 << 4;
+		/* set tweak values. Should be the start lba */
+		ssp_cmd.twk_val0 = cpu_to_le32((task->ssp_task.cdb[2] << 24) |
+						(task->ssp_task.cdb[3] << 16) |
+						(task->ssp_task.cdb[4] << 8) |
+						(task->ssp_task.cdb[5]));
+	} else {
+		PM8001_IO_DBG(pm8001_ha, pm8001_printk(
+			"Sending Normal SAS command 0x%x inb q %x\n",
+			task->ssp_task.cdb[0], inb));
+		/* fill in PRD (scatter/gather) table, if any */
+		if (task->num_scatter > 1) {
+			pm8001_chip_make_sg(task->scatter, ccb->n_elem,
+					ccb->buf_prd);
+			phys_addr = ccb->ccb_dma_handle +
+				offsetof(struct pm8001_ccb_info, buf_prd[0]);
+			ssp_cmd.addr_low =
+				cpu_to_le32(lower_32_bits(phys_addr));
+			ssp_cmd.addr_high =
+				cpu_to_le32(upper_32_bits(phys_addr));
+			ssp_cmd.esgl = cpu_to_le32(1<<31);
+		} else if (task->num_scatter == 1) {
+			u64 dma_addr = sg_dma_address(task->scatter);
+			ssp_cmd.addr_low = cpu_to_le32(lower_32_bits(dma_addr));
+			ssp_cmd.addr_high =
+				cpu_to_le32(upper_32_bits(dma_addr));
+			ssp_cmd.len = cpu_to_le32(task->total_xfer_len);
+			ssp_cmd.esgl = 0;
+		} else if (task->num_scatter == 0) {
+			ssp_cmd.addr_low = 0;
+			ssp_cmd.addr_high = 0;
+			ssp_cmd.len = cpu_to_le32(task->total_xfer_len);
+			ssp_cmd.esgl = 0;
+		}
+	}
+	ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &ssp_cmd, outb++);
+
+	/* rotate the outb queue */
+	outb = outb%PM8001_MAX_SPCV_OUTB_NUM;
+
+	return ret;
+}
+
+static int pm80xx_chip_sata_req(struct pm8001_hba_info *pm8001_ha,
+	struct pm8001_ccb_info *ccb)
+{
+	struct sas_task *task = ccb->task;
+	struct domain_device *dev = task->dev;
+	struct pm8001_device *pm8001_ha_dev = dev->lldd_dev;
+	u32 tag = ccb->ccb_tag;
+	int ret;
+	static u32 inb;
+	static u32 outb;
+	struct sata_start_req sata_cmd;
+	u32 hdr_tag, ncg_tag = 0;
+	u64 phys_addr;
+	u32 ATAP = 0x0;
+	u32 dir;
+	struct inbound_queue_table *circularQ;
+	unsigned long flags;
+	u32 opc = OPC_INB_SATA_HOST_OPSTART;
+	memset(&sata_cmd, 0, sizeof(sata_cmd));
+	circularQ = &pm8001_ha->inbnd_q_tbl[0];
+
+	if (task->data_dir == PCI_DMA_NONE) {
+		ATAP = 0x04; /* no data*/
+		PM8001_IO_DBG(pm8001_ha, pm8001_printk("no data\n"));
+	} else if (likely(!task->ata_task.device_control_reg_update)) {
+		if (task->ata_task.dma_xfer) {
+			ATAP = 0x06; /* DMA */
+			PM8001_IO_DBG(pm8001_ha, pm8001_printk("DMA\n"));
+		} else {
+			ATAP = 0x05; /* PIO*/
+			PM8001_IO_DBG(pm8001_ha, pm8001_printk("PIO\n"));
+		}
+		if (task->ata_task.use_ncq &&
+			dev->sata_dev.command_set != ATAPI_COMMAND_SET) {
+			ATAP = 0x07; /* FPDMA */
+			PM8001_IO_DBG(pm8001_ha, pm8001_printk("FPDMA\n"));
+		}
+	}
+	if (task->ata_task.use_ncq && pm8001_get_ncq_tag(task, &hdr_tag)) {
+		task->ata_task.fis.sector_count |= (u8) (hdr_tag << 3);
+		ncg_tag = hdr_tag;
+	}
+	dir = data_dir_flags[task->data_dir] << 8;
+	sata_cmd.tag = cpu_to_le32(tag);
+	sata_cmd.device_id = cpu_to_le32(pm8001_ha_dev->device_id);
+	sata_cmd.data_len = cpu_to_le32(task->total_xfer_len);
+
+	sata_cmd.sata_fis = task->ata_task.fis;
+	if (likely(!task->ata_task.device_control_reg_update))
+		sata_cmd.sata_fis.flags |= 0x80;/* C=1: update ATA cmd reg */
+	sata_cmd.sata_fis.flags &= 0xF0;/* PM_PORT field shall be 0 */
+
+	/* Check if encryption is set */
+	if (pm8001_ha->chip->encrypt &&
+		!(pm8001_ha->encrypt_info.status) && check_enc_sat_cmd(task)) {
+		PM8001_IO_DBG(pm8001_ha, pm8001_printk(
+			"Encryption enabled.Sending Encrypt SATA cmd 0x%x\n",
+			sata_cmd.sata_fis.command));
+		opc = OPC_INB_SATA_DIF_ENC_IO;
+
+		/* set encryption bit */
+		sata_cmd.ncqtag_atap_dir_m_dad =
+			cpu_to_le32(((ncg_tag & 0xff)<<16)|
+				((ATAP & 0x3f) << 10) | 0x20 | dir);
+							/* dad (bit 0-1) is 0 */
+		/* fill in PRD (scatter/gather) table, if any */
+		if (task->num_scatter > 1) {
+			pm8001_chip_make_sg(task->scatter,
+						ccb->n_elem, ccb->buf_prd);
+			phys_addr = ccb->ccb_dma_handle +
+				offsetof(struct pm8001_ccb_info, buf_prd[0]);
+			sata_cmd.enc_addr_low = lower_32_bits(phys_addr);
+			sata_cmd.enc_addr_high = upper_32_bits(phys_addr);
+			sata_cmd.enc_esgl = cpu_to_le32(1 << 31);
+		} else if (task->num_scatter == 1) {
+			u64 dma_addr = sg_dma_address(task->scatter);
+			sata_cmd.enc_addr_low = lower_32_bits(dma_addr);
+			sata_cmd.enc_addr_high = upper_32_bits(dma_addr);
+			sata_cmd.enc_len = cpu_to_le32(task->total_xfer_len);
+			sata_cmd.enc_esgl = 0;
+		} else if (task->num_scatter == 0) {
+			sata_cmd.enc_addr_low = 0;
+			sata_cmd.enc_addr_high = 0;
+			sata_cmd.enc_len = cpu_to_le32(task->total_xfer_len);
+			sata_cmd.enc_esgl = 0;
+		}
+		/* XTS mode. All other fields are 0 */
+		sata_cmd.key_index_mode = 0x6 << 4;
+		/* set tweak values. Should be the start lba */
+		sata_cmd.twk_val0 =
+			cpu_to_le32((sata_cmd.sata_fis.lbal_exp << 24) |
+					(sata_cmd.sata_fis.lbah << 16) |
+					(sata_cmd.sata_fis.lbam << 8) |
+					(sata_cmd.sata_fis.lbal));
+		sata_cmd.twk_val1 =
+			cpu_to_le32((sata_cmd.sata_fis.lbah_exp << 8) |
+					 (sata_cmd.sata_fis.lbam_exp));
+	} else {
+		PM8001_IO_DBG(pm8001_ha, pm8001_printk(
+			"Sending Normal SATA command 0x%x inb %x\n",
+			sata_cmd.sata_fis.command, inb));
+		/* dad (bit 0-1) is 0 */
+		sata_cmd.ncqtag_atap_dir_m_dad =
+			cpu_to_le32(((ncg_tag & 0xff)<<16) |
+					((ATAP & 0x3f) << 10) | dir);
+
+		/* fill in PRD (scatter/gather) table, if any */
+		if (task->num_scatter > 1) {
+			pm8001_chip_make_sg(task->scatter,
+					ccb->n_elem, ccb->buf_prd);
+			phys_addr = ccb->ccb_dma_handle +
+				offsetof(struct pm8001_ccb_info, buf_prd[0]);
+			sata_cmd.addr_low = lower_32_bits(phys_addr);
+			sata_cmd.addr_high = upper_32_bits(phys_addr);
+			sata_cmd.esgl = cpu_to_le32(1 << 31);
+		} else if (task->num_scatter == 1) {
+			u64 dma_addr = sg_dma_address(task->scatter);
+			sata_cmd.addr_low = lower_32_bits(dma_addr);
+			sata_cmd.addr_high = upper_32_bits(dma_addr);
+			sata_cmd.len = cpu_to_le32(task->total_xfer_len);
+			sata_cmd.esgl = 0;
+		} else if (task->num_scatter == 0) {
+			sata_cmd.addr_low = 0;
+			sata_cmd.addr_high = 0;
+			sata_cmd.len = cpu_to_le32(task->total_xfer_len);
+			sata_cmd.esgl = 0;
+		}
+			/* scsi cdb */
+			sata_cmd.atapi_scsi_cdb[0] =
+				cpu_to_le32(((task->ata_task.atapi_packet[0]) |
+				(task->ata_task.atapi_packet[1] << 8) |
+				(task->ata_task.atapi_packet[2] << 16) |
+				(task->ata_task.atapi_packet[3] << 24)));
+			sata_cmd.atapi_scsi_cdb[1] =
+				cpu_to_le32(((task->ata_task.atapi_packet[4]) |
+				(task->ata_task.atapi_packet[5] << 8) |
+				(task->ata_task.atapi_packet[6] << 16) |
+				(task->ata_task.atapi_packet[7] << 24)));
+			sata_cmd.atapi_scsi_cdb[2] =
+				cpu_to_le32(((task->ata_task.atapi_packet[8]) |
+				(task->ata_task.atapi_packet[9] << 8) |
+				(task->ata_task.atapi_packet[10] << 16) |
+				(task->ata_task.atapi_packet[11] << 24)));
+			sata_cmd.atapi_scsi_cdb[3] =
+				cpu_to_le32(((task->ata_task.atapi_packet[12]) |
+				(task->ata_task.atapi_packet[13] << 8) |
+				(task->ata_task.atapi_packet[14] << 16) |
+				(task->ata_task.atapi_packet[15] << 24)));
+	}
+
+	/* Check for read log for failed drive and return */
+	if (sata_cmd.sata_fis.command == 0x2f) {
+		if (pm8001_ha_dev && ((pm8001_ha_dev->id & NCQ_READ_LOG_FLAG) ||
+			(pm8001_ha_dev->id & NCQ_ABORT_ALL_FLAG) ||
+			(pm8001_ha_dev->id & NCQ_2ND_RLE_FLAG))) {
+			struct task_status_struct *ts;
+
+			pm8001_ha_dev->id &= 0xDFFFFFFF;
+			ts = &task->task_status;
+
+			spin_lock_irqsave(&task->task_state_lock, flags);
+			ts->resp = SAS_TASK_COMPLETE;
+			ts->stat = SAM_STAT_GOOD;
+			task->task_state_flags &= ~SAS_TASK_STATE_PENDING;
+			task->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
+			task->task_state_flags |= SAS_TASK_STATE_DONE;
+			if (unlikely((task->task_state_flags &
+					SAS_TASK_STATE_ABORTED))) {
+				spin_unlock_irqrestore(&task->task_state_lock,
+							flags);
+				PM8001_FAIL_DBG(pm8001_ha,
+					pm8001_printk("task 0x%p resp 0x%x "
+					" stat 0x%x but aborted by upper layer "
+					"\n", task, ts->resp, ts->stat));
+				pm8001_ccb_task_free(pm8001_ha, task, ccb, tag);
+				return 0;
+			} else if (task->uldd_task) {
+				spin_unlock_irqrestore(&task->task_state_lock,
+							flags);
+				pm8001_ccb_task_free(pm8001_ha, task, ccb, tag);
+				mb();/* ditto */
+				spin_unlock_irq(&pm8001_ha->lock);
+				task->task_done(task);
+				spin_lock_irq(&pm8001_ha->lock);
+				return 0;
+			} else if (!task->uldd_task) {
+				spin_unlock_irqrestore(&task->task_state_lock,
+							flags);
+				pm8001_ccb_task_free(pm8001_ha, task, ccb, tag);
+				mb();/*ditto*/
+				spin_unlock_irq(&pm8001_ha->lock);
+				task->task_done(task);
+				spin_lock_irq(&pm8001_ha->lock);
+				return 0;
+			}
+		}
+	}
+
+	ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc,
+						&sata_cmd, outb++);
+
+	/* rotate the outb queue */
+	outb = outb%PM8001_MAX_SPCV_OUTB_NUM;
+	return ret;
+}
+
+/**
+ * pm80xx_chip_phy_start_req - start phy via PHY_START COMMAND
+ * @pm8001_ha: our hba card information.
+ * @num: the inbound queue number
+ * @phy_id: the phy id which we wanted to start up.
+ */
+static int
+pm80xx_chip_phy_start_req(struct pm8001_hba_info *pm8001_ha, u8 phy_id)
+{
+	struct phy_start_req payload;
+	struct inbound_queue_table *circularQ;
+	int ret;
+	u32 tag = 0x01;
+	u32 opcode = OPC_INB_PHYSTART;
+	circularQ = &pm8001_ha->inbnd_q_tbl[0];
+	memset(&payload, 0, sizeof(payload));
+	payload.tag = cpu_to_le32(tag);
+
+	PM8001_INIT_DBG(pm8001_ha,
+		pm8001_printk("PHY START REQ for phy_id %d\n", phy_id));
+	/*
+	 ** [0:7]	PHY Identifier
+	 ** [8:11]	link rate 1.5G, 3G, 6G
+	 ** [12:13] link mode 01b SAS mode; 10b SATA mode; 11b Auto mode
+	 ** [14]	0b disable spin up hold; 1b enable spin up hold
+	 ** [15] ob no change in current PHY analig setup 1b enable using SPAST
+	 */
+	payload.ase_sh_lm_slr_phyid = cpu_to_le32(SPINHOLD_DISABLE |
+			LINKMODE_AUTO | LINKRATE_15 |
+			LINKRATE_30 | LINKRATE_60 | phy_id);
+	/* SSC Disable and SAS Analog ST configuration */
+	/**
+	payload.ase_sh_lm_slr_phyid =
+		cpu_to_le32(SSC_DISABLE_30 | SAS_ASE | SPINHOLD_DISABLE |
+		LINKMODE_AUTO | LINKRATE_15 | LINKRATE_30 | LINKRATE_60 |
+		phy_id);
+	Have to add "SAS PHY Analog Setup SPASTI 1 Byte" Based on need
+	**/
+
+	payload.sas_identify.dev_type = SAS_END_DEVICE;
+	payload.sas_identify.initiator_bits = SAS_PROTOCOL_ALL;
+	memcpy(payload.sas_identify.sas_addr,
+		pm8001_ha->sas_addr, SAS_ADDR_SIZE);
+	payload.sas_identify.phy_id = phy_id;
+	ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opcode, &payload, 0);
+	return ret;
+}
+
+/**
+ * pm8001_chip_phy_stop_req - start phy via PHY_STOP COMMAND
+ * @pm8001_ha: our hba card information.
+ * @num: the inbound queue number
+ * @phy_id: the phy id which we wanted to start up.
+ */
+static int pm80xx_chip_phy_stop_req(struct pm8001_hba_info *pm8001_ha,
+	u8 phy_id)
+{
+	struct phy_stop_req payload;
+	struct inbound_queue_table *circularQ;
+	int ret;
+	u32 tag = 0x01;
+	u32 opcode = OPC_INB_PHYSTOP;
+	circularQ = &pm8001_ha->inbnd_q_tbl[0];
+	memset(&payload, 0, sizeof(payload));
+	payload.tag = cpu_to_le32(tag);
+	payload.phy_id = cpu_to_le32(phy_id);
+	ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opcode, &payload, 0);
+	return ret;
+}
+
+/**
+ * see comments on pm8001_mpi_reg_resp.
+ */
+static int pm80xx_chip_reg_dev_req(struct pm8001_hba_info *pm8001_ha,
+	struct pm8001_device *pm8001_dev, u32 flag)
+{
+	struct reg_dev_req payload;
+	u32	opc;
+	u32 stp_sspsmp_sata = 0x4;
+	struct inbound_queue_table *circularQ;
+	u32 linkrate, phy_id;
+	int rc, tag = 0xdeadbeef;
+	struct pm8001_ccb_info *ccb;
+	u8 retryFlag = 0x1;
+	u16 firstBurstSize = 0;
+	u16 ITNT = 2000;
+	struct domain_device *dev = pm8001_dev->sas_device;
+	struct domain_device *parent_dev = dev->parent;
+	circularQ = &pm8001_ha->inbnd_q_tbl[0];
+
+	memset(&payload, 0, sizeof(payload));
+	rc = pm8001_tag_alloc(pm8001_ha, &tag);
+	if (rc)
+		return rc;
+	ccb = &pm8001_ha->ccb_info[tag];
+	ccb->device = pm8001_dev;
+	ccb->ccb_tag = tag;
+	payload.tag = cpu_to_le32(tag);
+
+	if (flag == 1) {
+		stp_sspsmp_sata = 0x02; /*direct attached sata */
+	} else {
+		if (pm8001_dev->dev_type == SAS_SATA_DEV)
+			stp_sspsmp_sata = 0x00; /* stp*/
+		else if (pm8001_dev->dev_type == SAS_END_DEVICE ||
+			pm8001_dev->dev_type == SAS_EDGE_EXPANDER_DEVICE ||
+			pm8001_dev->dev_type == SAS_FANOUT_EXPANDER_DEVICE)
+			stp_sspsmp_sata = 0x01; /*ssp or smp*/
+	}
+	if (parent_dev && DEV_IS_EXPANDER(parent_dev->dev_type))
+		phy_id = parent_dev->ex_dev.ex_phy->phy_id;
+	else
+		phy_id = pm8001_dev->attached_phy;
+
+	opc = OPC_INB_REG_DEV;
+
+	linkrate = (pm8001_dev->sas_device->linkrate < dev->port->linkrate) ?
+			pm8001_dev->sas_device->linkrate : dev->port->linkrate;
+
+	payload.phyid_portid =
+		cpu_to_le32(((pm8001_dev->sas_device->port->id) & 0xFF) |
+		((phy_id & 0xFF) << 8));
+
+	payload.dtype_dlr_mcn_ir_retry = cpu_to_le32((retryFlag & 0x01) |
+		((linkrate & 0x0F) << 24) |
+		((stp_sspsmp_sata & 0x03) << 28));
+	payload.firstburstsize_ITNexustimeout =
+		cpu_to_le32(ITNT | (firstBurstSize * 0x10000));
+
+	memcpy(payload.sas_addr, pm8001_dev->sas_device->sas_addr,
+		SAS_ADDR_SIZE);
+
+	rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0);
+
+	return rc;
+}
+
+/**
+ * pm80xx_chip_phy_ctl_req - support the local phy operation
+ * @pm8001_ha: our hba card information.
+ * @num: the inbound queue number
+ * @phy_id: the phy id which we wanted to operate
+ * @phy_op:
+ */
+static int pm80xx_chip_phy_ctl_req(struct pm8001_hba_info *pm8001_ha,
+	u32 phyId, u32 phy_op)
+{
+	struct local_phy_ctl_req payload;
+	struct inbound_queue_table *circularQ;
+	int ret;
+	u32 opc = OPC_INB_LOCAL_PHY_CONTROL;
+	memset(&payload, 0, sizeof(payload));
+	circularQ = &pm8001_ha->inbnd_q_tbl[0];
+	payload.tag = cpu_to_le32(1);
+	payload.phyop_phyid =
+		cpu_to_le32(((phy_op & 0xFF) << 8) | (phyId & 0xFF));
+	ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0);
+	return ret;
+}
+
+static u32 pm80xx_chip_is_our_interupt(struct pm8001_hba_info *pm8001_ha)
+{
+	u32 value;
+#ifdef PM8001_USE_MSIX
+	return 1;
+#endif
+	value = pm8001_cr32(pm8001_ha, 0, MSGU_ODR);
+	if (value)
+		return 1;
+	return 0;
+
+}
+
+/**
+ * pm8001_chip_isr - PM8001 isr handler.
+ * @pm8001_ha: our hba card information.
+ * @irq: irq number.
+ * @stat: stat.
+ */
+static irqreturn_t
+pm80xx_chip_isr(struct pm8001_hba_info *pm8001_ha, u8 vec)
+{
+	pm80xx_chip_interrupt_disable(pm8001_ha, vec);
+	process_oq(pm8001_ha, vec);
+	pm80xx_chip_interrupt_enable(pm8001_ha, vec);
+	return IRQ_HANDLED;
+}
+
+const struct pm8001_dispatch pm8001_80xx_dispatch = {
+	.name			= "pmc80xx",
+	.chip_init		= pm80xx_chip_init,
+	.chip_soft_rst		= pm80xx_chip_soft_rst,
+	.chip_rst		= pm80xx_hw_chip_rst,
+	.chip_iounmap		= pm8001_chip_iounmap,
+	.isr			= pm80xx_chip_isr,
+	.is_our_interupt	= pm80xx_chip_is_our_interupt,
+	.isr_process_oq		= process_oq,
+	.interrupt_enable	= pm80xx_chip_interrupt_enable,
+	.interrupt_disable	= pm80xx_chip_interrupt_disable,
+	.make_prd		= pm8001_chip_make_sg,
+	.smp_req		= pm80xx_chip_smp_req,
+	.ssp_io_req		= pm80xx_chip_ssp_io_req,
+	.sata_req		= pm80xx_chip_sata_req,
+	.phy_start_req		= pm80xx_chip_phy_start_req,
+	.phy_stop_req		= pm80xx_chip_phy_stop_req,
+	.reg_dev_req		= pm80xx_chip_reg_dev_req,
+	.dereg_dev_req		= pm8001_chip_dereg_dev_req,
+	.phy_ctl_req		= pm80xx_chip_phy_ctl_req,
+	.task_abort		= pm8001_chip_abort_task,
+	.ssp_tm_req		= pm8001_chip_ssp_tm_req,
+	.get_nvmd_req		= pm8001_chip_get_nvmd_req,
+	.set_nvmd_req		= pm8001_chip_set_nvmd_req,
+	.fw_flash_update_req	= pm8001_chip_fw_flash_update_req,
+	.set_dev_state_req	= pm8001_chip_set_dev_state_req,
+};
diff --git a/drivers/scsi/pm8001/pm80xx_hwi.h b/drivers/scsi/pm8001/pm80xx_hwi.h
new file mode 100644
index 0000000..2b760ba
--- /dev/null
+++ b/drivers/scsi/pm8001/pm80xx_hwi.h
@@ -0,0 +1,1523 @@
+/*
+ * PMC-Sierra SPCv/ve 8088/8089 SAS/SATA based host adapters driver
+ *
+ * Copyright (c) 2008-2009 USI Co., Ltd.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *	notice, this list of conditions, and the following disclaimer,
+ *	without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *	substantially similar to the "NO WARRANTY" disclaimer below
+ *	("Disclaimer") and any redistribution must be conditioned upon
+ *	including a substantially similar Disclaimer requirement for further
+ *	binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *	of any contributors may be used to endorse or promote products derived
+ *	from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ *
+ */
+
+#ifndef _PMC8001_REG_H_
+#define _PMC8001_REG_H_
+
+#include <linux/types.h>
+#include <scsi/libsas.h>
+
+/* for Request Opcode of IOMB */
+#define OPC_INB_ECHO				1	/* 0x000 */
+#define OPC_INB_PHYSTART			4	/* 0x004 */
+#define OPC_INB_PHYSTOP				5	/* 0x005 */
+#define OPC_INB_SSPINIIOSTART			6	/* 0x006 */
+#define OPC_INB_SSPINITMSTART			7	/* 0x007 */
+/* 0x8 RESV IN SPCv */
+#define OPC_INB_RSVD				8	/* 0x008 */
+#define OPC_INB_DEV_HANDLE_ACCEPT		9	/* 0x009 */
+#define OPC_INB_SSPTGTIOSTART			10	/* 0x00A */
+#define OPC_INB_SSPTGTRSPSTART			11	/* 0x00B */
+/* 0xC, 0xD, 0xE removed in SPCv */
+#define OPC_INB_SSP_ABORT			15	/* 0x00F */
+#define OPC_INB_DEREG_DEV_HANDLE		16	/* 0x010 */
+#define OPC_INB_GET_DEV_HANDLE			17	/* 0x011 */
+#define OPC_INB_SMP_REQUEST			18	/* 0x012 */
+/* 0x13 SMP_RESPONSE is removed in SPCv */
+#define OPC_INB_SMP_ABORT			20	/* 0x014 */
+/* 0x16 RESV IN SPCv */
+#define OPC_INB_RSVD1				22	/* 0x016 */
+#define OPC_INB_SATA_HOST_OPSTART		23	/* 0x017 */
+#define OPC_INB_SATA_ABORT			24	/* 0x018 */
+#define OPC_INB_LOCAL_PHY_CONTROL		25	/* 0x019 */
+/* 0x1A RESV IN SPCv */
+#define OPC_INB_RSVD2				26	/* 0x01A */
+#define OPC_INB_FW_FLASH_UPDATE			32	/* 0x020 */
+#define OPC_INB_GPIO				34	/* 0x022 */
+#define OPC_INB_SAS_DIAG_MODE_START_END		35	/* 0x023 */
+#define OPC_INB_SAS_DIAG_EXECUTE		36	/* 0x024 */
+/* 0x25 RESV IN SPCv */
+#define OPC_INB_RSVD3				37	/* 0x025 */
+#define OPC_INB_GET_TIME_STAMP			38	/* 0x026 */
+#define OPC_INB_PORT_CONTROL			39	/* 0x027 */
+#define OPC_INB_GET_NVMD_DATA			40	/* 0x028 */
+#define OPC_INB_SET_NVMD_DATA			41	/* 0x029 */
+#define OPC_INB_SET_DEVICE_STATE		42	/* 0x02A */
+#define OPC_INB_GET_DEVICE_STATE		43	/* 0x02B */
+#define OPC_INB_SET_DEV_INFO			44	/* 0x02C */
+/* 0x2D RESV IN SPCv */
+#define OPC_INB_RSVD4				45	/* 0x02D */
+#define OPC_INB_SGPIO_REGISTER			46	/* 0x02E */
+#define OPC_INB_PCIE_DIAG_EXEC			47	/* 0x02F */
+#define OPC_INB_SET_CONTROLLER_CONFIG		48	/* 0x030 */
+#define OPC_INB_GET_CONTROLLER_CONFIG		49	/* 0x031 */
+#define OPC_INB_REG_DEV				50	/* 0x032 */
+#define OPC_INB_SAS_HW_EVENT_ACK		51	/* 0x033 */
+#define OPC_INB_GET_DEVICE_INFO			52	/* 0x034 */
+#define OPC_INB_GET_PHY_PROFILE			53	/* 0x035 */
+#define OPC_INB_FLASH_OP_EXT			54	/* 0x036 */
+#define OPC_INB_SET_PHY_PROFILE			55	/* 0x037 */
+#define OPC_INB_KEK_MANAGEMENT			256	/* 0x100 */
+#define OPC_INB_DEK_MANAGEMENT			257	/* 0x101 */
+#define OPC_INB_SSP_INI_DIF_ENC_IO		258	/* 0x102 */
+#define OPC_INB_SATA_DIF_ENC_IO			259	/* 0x103 */
+
+/* for Response Opcode of IOMB */
+#define OPC_OUB_ECHO					1	/* 0x001 */
+#define OPC_OUB_RSVD					4	/* 0x004 */
+#define OPC_OUB_SSP_COMP				5	/* 0x005 */
+#define OPC_OUB_SMP_COMP				6	/* 0x006 */
+#define OPC_OUB_LOCAL_PHY_CNTRL				7	/* 0x007 */
+#define OPC_OUB_RSVD1					10	/* 0x00A */
+#define OPC_OUB_DEREG_DEV				11	/* 0x00B */
+#define OPC_OUB_GET_DEV_HANDLE				12	/* 0x00C */
+#define OPC_OUB_SATA_COMP				13	/* 0x00D */
+#define OPC_OUB_SATA_EVENT				14	/* 0x00E */
+#define OPC_OUB_SSP_EVENT				15	/* 0x00F */
+#define OPC_OUB_RSVD2					16	/* 0x010 */
+/* 0x11 - SMP_RECEIVED Notification removed in SPCv*/
+#define OPC_OUB_SSP_RECV_EVENT				18	/* 0x012 */
+#define OPC_OUB_RSVD3					19	/* 0x013 */
+#define OPC_OUB_FW_FLASH_UPDATE				20	/* 0x014 */
+#define OPC_OUB_GPIO_RESPONSE				22	/* 0x016 */
+#define OPC_OUB_GPIO_EVENT				23	/* 0x017 */
+#define OPC_OUB_GENERAL_EVENT				24	/* 0x018 */
+#define OPC_OUB_SSP_ABORT_RSP				26	/* 0x01A */
+#define OPC_OUB_SATA_ABORT_RSP				27	/* 0x01B */
+#define OPC_OUB_SAS_DIAG_MODE_START_END			28	/* 0x01C */
+#define OPC_OUB_SAS_DIAG_EXECUTE			29	/* 0x01D */
+#define OPC_OUB_GET_TIME_STAMP				30	/* 0x01E */
+#define OPC_OUB_RSVD4					31	/* 0x01F */
+#define OPC_OUB_PORT_CONTROL				32	/* 0x020 */
+#define OPC_OUB_SKIP_ENTRY				33	/* 0x021 */
+#define OPC_OUB_SMP_ABORT_RSP				34	/* 0x022 */
+#define OPC_OUB_GET_NVMD_DATA				35	/* 0x023 */
+#define OPC_OUB_SET_NVMD_DATA				36	/* 0x024 */
+#define OPC_OUB_DEVICE_HANDLE_REMOVAL			37	/* 0x025 */
+#define OPC_OUB_SET_DEVICE_STATE			38	/* 0x026 */
+#define OPC_OUB_GET_DEVICE_STATE			39	/* 0x027 */
+#define OPC_OUB_SET_DEV_INFO				40	/* 0x028 */
+#define OPC_OUB_RSVD5					41	/* 0x029 */
+#define OPC_OUB_HW_EVENT				1792	/* 0x700 */
+#define OPC_OUB_DEV_HANDLE_ARRIV			1824	/* 0x720 */
+#define OPC_OUB_THERM_HW_EVENT				1840	/* 0x730 */
+#define OPC_OUB_SGPIO_RESP				2094	/* 0x82E */
+#define OPC_OUB_PCIE_DIAG_EXECUTE			2095	/* 0x82F */
+#define OPC_OUB_DEV_REGIST				2098	/* 0x832 */
+#define OPC_OUB_SAS_HW_EVENT_ACK			2099	/* 0x833 */
+#define OPC_OUB_GET_DEVICE_INFO				2100	/* 0x834 */
+/* spcv specific commands */
+#define OPC_OUB_PHY_START_RESP				2052	/* 0x804 */
+#define OPC_OUB_PHY_STOP_RESP				2053	/* 0x805 */
+#define OPC_OUB_SET_CONTROLLER_CONFIG			2096	/* 0x830 */
+#define OPC_OUB_GET_CONTROLLER_CONFIG			2097	/* 0x831 */
+#define OPC_OUB_GET_PHY_PROFILE				2101	/* 0x835 */
+#define OPC_OUB_FLASH_OP_EXT				2102	/* 0x836 */
+#define OPC_OUB_SET_PHY_PROFILE				2103	/* 0x837 */
+#define OPC_OUB_KEK_MANAGEMENT_RESP			2304	/* 0x900 */
+#define OPC_OUB_DEK_MANAGEMENT_RESP			2305	/* 0x901 */
+#define OPC_OUB_SSP_COALESCED_COMP_RESP			2306	/* 0x902 */
+
+/* for phy start*/
+#define SSC_DISABLE_15			(0x01 << 16)
+#define SSC_DISABLE_30			(0x02 << 16)
+#define SSC_DISABLE_60			(0x04 << 16)
+#define SAS_ASE				(0x01 << 15)
+#define SPINHOLD_DISABLE		(0x00 << 14)
+#define SPINHOLD_ENABLE			(0x01 << 14)
+#define LINKMODE_SAS			(0x01 << 12)
+#define LINKMODE_DSATA			(0x02 << 12)
+#define LINKMODE_AUTO			(0x03 << 12)
+#define LINKRATE_15			(0x01 << 8)
+#define LINKRATE_30			(0x02 << 8)
+#define LINKRATE_60			(0x06 << 8)
+
+/* Thermal related */
+#define	THERMAL_ENABLE			0x1
+#define	THERMAL_LOG_ENABLE		0x1
+#define THERMAL_OP_CODE			0x6
+#define LTEMPHIL			 70
+#define RTEMPHIL			100
+
+/* Encryption info */
+#define SCRATCH_PAD3_ENC_DISABLED	0x00000000
+#define SCRATCH_PAD3_ENC_DIS_ERR	0x00000001
+#define SCRATCH_PAD3_ENC_ENA_ERR	0x00000002
+#define SCRATCH_PAD3_ENC_READY		0x00000003
+#define SCRATCH_PAD3_ENC_MASK		SCRATCH_PAD3_ENC_READY
+
+#define SCRATCH_PAD3_XTS_ENABLED		(1 << 14)
+#define SCRATCH_PAD3_SMA_ENABLED		(1 << 4)
+#define SCRATCH_PAD3_SMB_ENABLED		(1 << 5)
+#define SCRATCH_PAD3_SMF_ENABLED		0
+#define SCRATCH_PAD3_SM_MASK			0x000000F0
+#define SCRATCH_PAD3_ERR_CODE			0x00FF0000
+
+#define SEC_MODE_SMF				0x0
+#define SEC_MODE_SMA				0x100
+#define SEC_MODE_SMB				0x200
+#define CIPHER_MODE_ECB				0x00000001
+#define CIPHER_MODE_XTS				0x00000002
+#define KEK_MGMT_SUBOP_KEYCARDUPDATE		0x4
+
+/* SAS protocol timer configuration page */
+#define SAS_PROTOCOL_TIMER_CONFIG_PAGE  0x04
+#define STP_MCT_TMO                     32
+#define SSP_MCT_TMO                     32
+#define SAS_MAX_OPEN_TIME				5
+#define SMP_MAX_CONN_TIMER              0xFF
+#define STP_FRM_TIMER                   0
+#define STP_IDLE_TIME                   5 /* 5 us; controller default */
+#define SAS_MFD                         0
+#define SAS_OPNRJT_RTRY_INTVL           2
+#define SAS_DOPNRJT_RTRY_TMO            128
+#define SAS_COPNRJT_RTRY_TMO            128
+
+/*
+  Making ORR bigger than IT NEXUS LOSS which is 2000000us = 2 second.
+  Assuming a bigger value 3 second, 3000000/128 = 23437.5 where 128
+  is DOPNRJT_RTRY_TMO
+*/
+#define SAS_DOPNRJT_RTRY_THR            23438
+#define SAS_COPNRJT_RTRY_THR            23438
+#define SAS_MAX_AIP                     0x200000
+#define IT_NEXUS_TIMEOUT       0x7D0
+#define PORT_RECOVERY_TIMEOUT  ((IT_NEXUS_TIMEOUT/100) + 30)
+
+struct mpi_msg_hdr {
+	__le32	header;	/* Bits [11:0] - Message operation code */
+	/* Bits [15:12] - Message Category */
+	/* Bits [21:16] - Outboundqueue ID for the
+	operation completion message */
+	/* Bits [23:22] - Reserved */
+	/* Bits [28:24] - Buffer Count, indicates how
+	many buffer are allocated for the massage */
+	/* Bits [30:29] - Reserved */
+	/* Bits [31] - Message Valid bit */
+} __attribute__((packed, aligned(4)));
+
+/*
+ * brief the data structure of PHY Start Command
+ * use to describe enable the phy (128 bytes)
+ */
+struct phy_start_req {
+	__le32	tag;
+	__le32	ase_sh_lm_slr_phyid;
+	struct sas_identify_frame sas_identify; /* 28 Bytes */
+	__le32 spasti;
+	u32	reserved[21];
+} __attribute__((packed, aligned(4)));
+
+/*
+ * brief the data structure of PHY Start Command
+ * use to disable the phy (128 bytes)
+ */
+struct phy_stop_req {
+	__le32	tag;
+	__le32	phy_id;
+	u32	reserved[29];
+} __attribute__((packed, aligned(4)));
+
+/* set device bits fis - device to host */
+struct set_dev_bits_fis {
+	u8	fis_type;	/* 0xA1*/
+	u8	n_i_pmport;
+	/* b7 : n Bit. Notification bit. If set device needs attention. */
+	/* b6 : i Bit. Interrupt Bit */
+	/* b5-b4: reserved2 */
+	/* b3-b0: PM Port */
+	u8	status;
+	u8	error;
+	u32	_r_a;
+} __attribute__ ((packed));
+/* PIO setup FIS - device to host */
+struct pio_setup_fis {
+	u8	fis_type;	/* 0x5f */
+	u8	i_d_pmPort;
+	/* b7 : reserved */
+	/* b6 : i bit. Interrupt bit */
+	/* b5 : d bit. data transfer direction. set to 1 for device to host
+	xfer */
+	/* b4 : reserved */
+	/* b3-b0: PM Port */
+	u8	status;
+	u8	error;
+	u8	lbal;
+	u8	lbam;
+	u8	lbah;
+	u8	device;
+	u8	lbal_exp;
+	u8	lbam_exp;
+	u8	lbah_exp;
+	u8	_r_a;
+	u8	sector_count;
+	u8	sector_count_exp;
+	u8	_r_b;
+	u8	e_status;
+	u8	_r_c[2];
+	u8	transfer_count;
+} __attribute__ ((packed));
+
+/*
+ * brief the data structure of SATA Completion Response
+ * use to describe the sata task response (64 bytes)
+ */
+struct sata_completion_resp {
+	__le32	tag;
+	__le32	status;
+	__le32	param;
+	u32	sata_resp[12];
+} __attribute__((packed, aligned(4)));
+
+/*
+ * brief the data structure of SAS HW Event Notification
+ * use to alert the host about the hardware event(64 bytes)
+ */
+/* updated outbound struct for spcv */
+
+struct hw_event_resp {
+	__le32	lr_status_evt_portid;
+	__le32	evt_param;
+	__le32	phyid_npip_portstate;
+	struct sas_identify_frame	sas_identify;
+	struct dev_to_host_fis	sata_fis;
+} __attribute__((packed, aligned(4)));
+
+/*
+ * brief the data structure for thermal event notification
+ */
+
+struct thermal_hw_event {
+	__le32	thermal_event;
+	__le32	rht_lht;
+} __attribute__((packed, aligned(4)));
+
+/*
+ * brief the data structure of REGISTER DEVICE Command
+ * use to describe MPI REGISTER DEVICE Command (64 bytes)
+ */
+
+struct reg_dev_req {
+	__le32	tag;
+	__le32	phyid_portid;
+	__le32	dtype_dlr_mcn_ir_retry;
+	__le32	firstburstsize_ITNexustimeout;
+	u8	sas_addr[SAS_ADDR_SIZE];
+	__le32	upper_device_id;
+	u32	reserved[24];
+} __attribute__((packed, aligned(4)));
+
+/*
+ * brief the data structure of DEREGISTER DEVICE Command
+ * use to request spc to remove all internal resources associated
+ * with the device id (64 bytes)
+ */
+
+struct dereg_dev_req {
+	__le32	tag;
+	__le32	device_id;
+	u32	reserved[29];
+} __attribute__((packed, aligned(4)));
+
+/*
+ * brief the data structure of DEVICE_REGISTRATION Response
+ * use to notify the completion of the device registration (64 bytes)
+ */
+struct dev_reg_resp {
+	__le32	tag;
+	__le32	status;
+	__le32	device_id;
+	u32	reserved[12];
+} __attribute__((packed, aligned(4)));
+
+/*
+ * brief the data structure of Local PHY Control Command
+ * use to issue PHY CONTROL to local phy (64 bytes)
+ */
+struct local_phy_ctl_req {
+	__le32	tag;
+	__le32	phyop_phyid;
+	u32	reserved1[29];
+} __attribute__((packed, aligned(4)));
+
+/**
+ * brief the data structure of Local Phy Control Response
+ * use to describe MPI Local Phy Control Response (64 bytes)
+ */
+ struct local_phy_ctl_resp {
+	__le32	tag;
+	__le32	phyop_phyid;
+	__le32	status;
+	u32	reserved[12];
+} __attribute__((packed, aligned(4)));
+
+#define OP_BITS 0x0000FF00
+#define ID_BITS 0x000000FF
+
+/*
+ * brief the data structure of PORT Control Command
+ * use to control port properties (64 bytes)
+ */
+
+struct port_ctl_req {
+	__le32	tag;
+	__le32	portop_portid;
+	__le32	param0;
+	__le32	param1;
+	u32	reserved1[27];
+} __attribute__((packed, aligned(4)));
+
+/*
+ * brief the data structure of HW Event Ack Command
+ * use to acknowledge receive HW event (64 bytes)
+ */
+struct hw_event_ack_req {
+	__le32	tag;
+	__le32	phyid_sea_portid;
+	__le32	param0;
+	__le32	param1;
+	u32	reserved1[27];
+} __attribute__((packed, aligned(4)));
+
+/*
+ * brief the data structure of PHY_START Response Command
+ * indicates the completion of PHY_START command (64 bytes)
+ */
+struct phy_start_resp {
+	__le32	tag;
+	__le32	status;
+	__le32	phyid;
+	u32	reserved[12];
+} __attribute__((packed, aligned(4)));
+
+/*
+ * brief the data structure of PHY_STOP Response Command
+ * indicates the completion of PHY_STOP command (64 bytes)
+ */
+struct phy_stop_resp {
+	__le32	tag;
+	__le32	status;
+	__le32	phyid;
+	u32	reserved[12];
+} __attribute__((packed, aligned(4)));
+
+/*
+ * brief the data structure of SSP Completion Response
+ * use to indicate a SSP Completion (n bytes)
+ */
+struct ssp_completion_resp {
+	__le32	tag;
+	__le32	status;
+	__le32	param;
+	__le32	ssptag_rescv_rescpad;
+	struct ssp_response_iu ssp_resp_iu;
+	__le32	residual_count;
+} __attribute__((packed, aligned(4)));
+
+#define SSP_RESCV_BIT	0x00010000
+
+/*
+ * brief the data structure of SATA EVNET response
+ * use to indicate a SATA Completion (64 bytes)
+ */
+struct sata_event_resp {
+	__le32 tag;
+	__le32 event;
+	__le32 port_id;
+	__le32 device_id;
+	u32 reserved;
+	__le32 event_param0;
+	__le32 event_param1;
+	__le32 sata_addr_h32;
+	__le32 sata_addr_l32;
+	__le32 e_udt1_udt0_crc;
+	__le32 e_udt5_udt4_udt3_udt2;
+	__le32 a_udt1_udt0_crc;
+	__le32 a_udt5_udt4_udt3_udt2;
+	__le32 hwdevid_diferr;
+	__le32 err_framelen_byteoffset;
+	__le32 err_dataframe;
+} __attribute__((packed, aligned(4)));
+
+/*
+ * brief the data structure of SSP EVNET esponse
+ * use to indicate a SSP Completion (64 bytes)
+ */
+struct ssp_event_resp {
+	__le32 tag;
+	__le32 event;
+	__le32 port_id;
+	__le32 device_id;
+	__le32 ssp_tag;
+	__le32 event_param0;
+	__le32 event_param1;
+	__le32 sas_addr_h32;
+	__le32 sas_addr_l32;
+	__le32 e_udt1_udt0_crc;
+	__le32 e_udt5_udt4_udt3_udt2;
+	__le32 a_udt1_udt0_crc;
+	__le32 a_udt5_udt4_udt3_udt2;
+	__le32 hwdevid_diferr;
+	__le32 err_framelen_byteoffset;
+	__le32 err_dataframe;
+} __attribute__((packed, aligned(4)));
+
+/**
+ * brief the data structure of General Event Notification Response
+ * use to describe MPI General Event Notification Response (64 bytes)
+ */
+struct general_event_resp {
+	__le32	status;
+	__le32	inb_IOMB_payload[14];
+} __attribute__((packed, aligned(4)));
+
+#define GENERAL_EVENT_PAYLOAD	14
+#define OPCODE_BITS	0x00000fff
+
+/*
+ * brief the data structure of SMP Request Command
+ * use to describe MPI SMP REQUEST Command (64 bytes)
+ */
+struct smp_req {
+	__le32	tag;
+	__le32	device_id;
+	__le32	len_ip_ir;
+	/* Bits [0] - Indirect response */
+	/* Bits [1] - Indirect Payload */
+	/* Bits [15:2] - Reserved */
+	/* Bits [23:16] - direct payload Len */
+	/* Bits [31:24] - Reserved */
+	u8	smp_req16[16];
+	union {
+		u8	smp_req[32];
+		struct {
+			__le64 long_req_addr;/* sg dma address, LE */
+			__le32 long_req_size;/* LE */
+			u32	_r_a;
+			__le64 long_resp_addr;/* sg dma address, LE */
+			__le32 long_resp_size;/* LE */
+			u32	_r_b;
+			} long_smp_req;/* sequencer extension */
+	};
+	__le32	rsvd[16];
+} __attribute__((packed, aligned(4)));
+/*
+ * brief the data structure of SMP Completion Response
+ * use to describe MPI SMP Completion Response (64 bytes)
+ */
+struct smp_completion_resp {
+	__le32	tag;
+	__le32	status;
+	__le32	param;
+	u8	_r_a[252];
+} __attribute__((packed, aligned(4)));
+
+/*
+ *brief the data structure of SSP SMP SATA Abort Command
+ * use to describe MPI SSP SMP & SATA Abort Command (64 bytes)
+ */
+struct task_abort_req {
+	__le32	tag;
+	__le32	device_id;
+	__le32	tag_to_abort;
+	__le32	abort_all;
+	u32	reserved[27];
+} __attribute__((packed, aligned(4)));
+
+/* These flags used for SSP SMP & SATA Abort */
+#define ABORT_MASK		0x3
+#define ABORT_SINGLE		0x0
+#define ABORT_ALL		0x1
+
+/**
+ * brief the data structure of SSP SATA SMP Abort Response
+ * use to describe SSP SMP & SATA Abort Response ( 64 bytes)
+ */
+struct task_abort_resp {
+	__le32	tag;
+	__le32	status;
+	__le32	scp;
+	u32	reserved[12];
+} __attribute__((packed, aligned(4)));
+
+/**
+ * brief the data structure of SAS Diagnostic Start/End Command
+ * use to describe MPI SAS Diagnostic Start/End Command (64 bytes)
+ */
+struct sas_diag_start_end_req {
+	__le32	tag;
+	__le32	operation_phyid;
+	u32	reserved[29];
+} __attribute__((packed, aligned(4)));
+
+/**
+ * brief the data structure of SAS Diagnostic Execute Command
+ * use to describe MPI SAS Diagnostic Execute Command (64 bytes)
+ */
+struct sas_diag_execute_req {
+	__le32	tag;
+	__le32	cmdtype_cmddesc_phyid;
+	__le32	pat1_pat2;
+	__le32	threshold;
+	__le32	codepat_errmsk;
+	__le32	pmon;
+	__le32	pERF1CTL;
+	u32	reserved[24];
+} __attribute__((packed, aligned(4)));
+
+#define SAS_DIAG_PARAM_BYTES 24
+
+/*
+ * brief the data structure of Set Device State Command
+ * use to describe MPI Set Device State Command (64 bytes)
+ */
+struct set_dev_state_req {
+	__le32	tag;
+	__le32	device_id;
+	__le32	nds;
+	u32	reserved[28];
+} __attribute__((packed, aligned(4)));
+
+/*
+ * brief the data structure of SATA Start Command
+ * use to describe MPI SATA IO Start Command (64 bytes)
+ * Note: This structure is common for normal / encryption I/O
+ */
+
+struct sata_start_req {
+	__le32	tag;
+	__le32	device_id;
+	__le32	data_len;
+	__le32	ncqtag_atap_dir_m_dad;
+	struct host_to_dev_fis	sata_fis;
+	u32	reserved1;
+	u32	reserved2;	/* dword 11. rsvd for normal I/O. */
+				/* EPLE Descl for enc I/O */
+	u32	addr_low;	/* dword 12. rsvd for enc I/O */
+	u32	addr_high;	/* dword 13. reserved for enc I/O */
+	__le32	len;		/* dword 14: length for normal I/O. */
+				/* EPLE Desch for enc I/O */
+	__le32	esgl;		/* dword 15. rsvd for enc I/O */
+	__le32	atapi_scsi_cdb[4];	/* dword 16-19. rsvd for enc I/O */
+	/* The below fields are reserved for normal I/O */
+	__le32	key_index_mode;	/* dword 20 */
+	__le32	sector_cnt_enss;/* dword 21 */
+	__le32	keytagl;	/* dword 22 */
+	__le32	keytagh;	/* dword 23 */
+	__le32	twk_val0;	/* dword 24 */
+	__le32	twk_val1;	/* dword 25 */
+	__le32	twk_val2;	/* dword 26 */
+	__le32	twk_val3;	/* dword 27 */
+	__le32	enc_addr_low;	/* dword 28. Encryption SGL address high */
+	__le32	enc_addr_high;	/* dword 29. Encryption SGL address low */
+	__le32	enc_len;	/* dword 30. Encryption length */
+	__le32	enc_esgl;	/* dword 31. Encryption esgl bit */
+} __attribute__((packed, aligned(4)));
+
+/**
+ * brief the data structure of SSP INI TM Start Command
+ * use to describe MPI SSP INI TM Start Command (64 bytes)
+ */
+struct ssp_ini_tm_start_req {
+	__le32	tag;
+	__le32	device_id;
+	__le32	relate_tag;
+	__le32	tmf;
+	u8	lun[8];
+	__le32	ds_ads_m;
+	u32	reserved[24];
+} __attribute__((packed, aligned(4)));
+
+struct ssp_info_unit {
+	u8	lun[8];/* SCSI Logical Unit Number */
+	u8	reserved1;/* reserved */
+	u8	efb_prio_attr;
+	/* B7 : enabledFirstBurst */
+	/* B6-3 : taskPriority */
+	/* B2-0 : taskAttribute */
+	u8	reserved2;	/* reserved */
+	u8	additional_cdb_len;
+	/* B7-2 : additional_cdb_len */
+	/* B1-0 : reserved */
+	u8	cdb[16];/* The SCSI CDB up to 16 bytes length */
+} __attribute__((packed, aligned(4)));
+
+/**
+ * brief the data structure of SSP INI IO Start Command
+ * use to describe MPI SSP INI IO Start Command (64 bytes)
+ * Note: This structure is common for normal / encryption I/O
+ */
+struct ssp_ini_io_start_req {
+	__le32	tag;
+	__le32	device_id;
+	__le32	data_len;
+	__le32	dad_dir_m_tlr;
+	struct ssp_info_unit	ssp_iu;
+	__le32	addr_low;	/* dword 12: sgl low for normal I/O. */
+				/* epl_descl for encryption I/O */
+	__le32	addr_high;	/* dword 13: sgl hi for normal I/O */
+				/* dpl_descl for encryption I/O */
+	__le32	len;		/* dword 14: len for normal I/O. */
+				/* edpl_desch for encryption I/O */
+	__le32	esgl;		/* dword 15: ESGL bit for normal I/O. */
+				/* user defined tag mask for enc I/O */
+	/* The below fields are reserved for normal I/O */
+	u8	udt[12];	/* dword 16-18 */
+	__le32	sectcnt_ios;	/* dword 19 */
+	__le32	key_cmode;	/* dword 20 */
+	__le32	ks_enss;	/* dword 21 */
+	__le32	keytagl;	/* dword 22 */
+	__le32	keytagh;	/* dword 23 */
+	__le32	twk_val0;	/* dword 24 */
+	__le32	twk_val1;	/* dword 25 */
+	__le32	twk_val2;	/* dword 26 */
+	__le32	twk_val3;	/* dword 27 */
+	__le32	enc_addr_low;	/* dword 28: Encryption sgl addr low */
+	__le32	enc_addr_high;	/* dword 29: Encryption sgl addr hi */
+	__le32	enc_len;	/* dword 30: Encryption length */
+	__le32	enc_esgl;	/* dword 31: ESGL bit for encryption */
+} __attribute__((packed, aligned(4)));
+
+/**
+ * brief the data structure for SSP_INI_DIF_ENC_IO COMMAND
+ * use to initiate SSP I/O operation with optional DIF/ENC
+ */
+struct ssp_dif_enc_io_req {
+	__le32	tag;
+	__le32	device_id;
+	__le32	data_len;
+	__le32	dirMTlr;
+	__le32	sspiu0;
+	__le32	sspiu1;
+	__le32	sspiu2;
+	__le32	sspiu3;
+	__le32	sspiu4;
+	__le32	sspiu5;
+	__le32	sspiu6;
+	__le32	epl_des;
+	__le32	dpl_desl_ndplr;
+	__le32	dpl_desh;
+	__le32	uum_uuv_bss_difbits;
+	u8	udt[12];
+	__le32	sectcnt_ios;
+	__le32	key_cmode;
+	__le32	ks_enss;
+	__le32	keytagl;
+	__le32	keytagh;
+	__le32	twk_val0;
+	__le32	twk_val1;
+	__le32	twk_val2;
+	__le32	twk_val3;
+	__le32	addr_low;
+	__le32	addr_high;
+	__le32	len;
+	__le32	esgl;
+} __attribute__((packed, aligned(4)));
+
+/**
+ * brief the data structure of Firmware download
+ * use to describe MPI FW DOWNLOAD Command (64 bytes)
+ */
+struct fw_flash_Update_req {
+	__le32	tag;
+	__le32	cur_image_offset;
+	__le32	cur_image_len;
+	__le32	total_image_len;
+	u32	reserved0[7];
+	__le32	sgl_addr_lo;
+	__le32	sgl_addr_hi;
+	__le32	len;
+	__le32	ext_reserved;
+	u32	reserved1[16];
+} __attribute__((packed, aligned(4)));
+
+#define FWFLASH_IOMB_RESERVED_LEN 0x07
+/**
+ * brief the data structure of FW_FLASH_UPDATE Response
+ * use to describe MPI FW_FLASH_UPDATE Response (64 bytes)
+ *
+ */
+ struct fw_flash_Update_resp {
+	__le32	tag;
+	__le32	status;
+	u32	reserved[13];
+} __attribute__((packed, aligned(4)));
+
+/**
+ * brief the data structure of Get NVM Data Command
+ * use to get data from NVM in HBA(64 bytes)
+ */
+struct get_nvm_data_req {
+	__le32	tag;
+	__le32	len_ir_vpdd;
+	__le32	vpd_offset;
+	u32	reserved[8];
+	__le32	resp_addr_lo;
+	__le32	resp_addr_hi;
+	__le32	resp_len;
+	u32	reserved1[17];
+} __attribute__((packed, aligned(4)));
+
+struct set_nvm_data_req {
+	__le32	tag;
+	__le32	len_ir_vpdd;
+	__le32	vpd_offset;
+	u32	reserved[8];
+	__le32	resp_addr_lo;
+	__le32	resp_addr_hi;
+	__le32	resp_len;
+	u32	reserved1[17];
+} __attribute__((packed, aligned(4)));
+
+/**
+ * brief the data structure for SET CONTROLLER CONFIG COMMAND
+ * use to modify controller configuration
+ */
+struct set_ctrl_cfg_req {
+	__le32	tag;
+	__le32	cfg_pg[14];
+	u32	reserved[16];
+} __attribute__((packed, aligned(4)));
+
+/**
+ * brief the data structure for GET CONTROLLER CONFIG COMMAND
+ * use to get controller configuration page
+ */
+struct get_ctrl_cfg_req {
+	__le32	tag;
+	__le32	pgcd;
+	__le32	int_vec;
+	u32	reserved[28];
+} __attribute__((packed, aligned(4)));
+
+/**
+ * brief the data structure for KEK_MANAGEMENT COMMAND
+ * use for KEK management
+ */
+struct kek_mgmt_req {
+	__le32	tag;
+	__le32	new_curidx_ksop;
+	u32	reserved;
+	__le32	kblob[12];
+	u32	reserved1[16];
+} __attribute__((packed, aligned(4)));
+
+/**
+ * brief the data structure for DEK_MANAGEMENT COMMAND
+ * use for DEK management
+ */
+struct dek_mgmt_req {
+	__le32	tag;
+	__le32	kidx_dsop;
+	__le32	dekidx;
+	__le32	addr_l;
+	__le32	addr_h;
+	__le32	nent;
+	__le32	dbf_tblsize;
+	u32	reserved[24];
+} __attribute__((packed, aligned(4)));
+
+/**
+ * brief the data structure for SET PHY PROFILE COMMAND
+ * use to retrive phy specific information
+ */
+struct set_phy_profile_req {
+	__le32	tag;
+	__le32	ppc_phyid;
+	u32	reserved[29];
+} __attribute__((packed, aligned(4)));
+
+/**
+ * brief the data structure for GET PHY PROFILE COMMAND
+ * use to retrive phy specific information
+ */
+struct get_phy_profile_req {
+	__le32	tag;
+	__le32	ppc_phyid;
+	__le32	profile[29];
+} __attribute__((packed, aligned(4)));
+
+/**
+ * brief the data structure for EXT FLASH PARTITION
+ * use to manage ext flash partition
+ */
+struct ext_flash_partition_req {
+	__le32	tag;
+	__le32	cmd;
+	__le32	offset;
+	__le32	len;
+	u32	reserved[7];
+	__le32	addr_low;
+	__le32	addr_high;
+	__le32	len1;
+	__le32	ext;
+	u32	reserved1[16];
+} __attribute__((packed, aligned(4)));
+
+#define TWI_DEVICE	0x0
+#define C_SEEPROM	0x1
+#define VPD_FLASH	0x4
+#define AAP1_RDUMP	0x5
+#define IOP_RDUMP	0x6
+#define EXPAN_ROM	0x7
+
+#define IPMode		0x80000000
+#define NVMD_TYPE	0x0000000F
+#define NVMD_STAT	0x0000FFFF
+#define NVMD_LEN	0xFF000000
+/**
+ * brief the data structure of Get NVMD Data Response
+ * use to describe MPI Get NVMD Data Response (64 bytes)
+ */
+struct get_nvm_data_resp {
+	__le32		tag;
+	__le32		ir_tda_bn_dps_das_nvm;
+	__le32		dlen_status;
+	__le32		nvm_data[12];
+} __attribute__((packed, aligned(4)));
+
+/**
+ * brief the data structure of SAS Diagnostic Start/End Response
+ * use to describe MPI SAS Diagnostic Start/End Response (64 bytes)
+ *
+ */
+struct sas_diag_start_end_resp {
+	__le32		tag;
+	__le32		status;
+	u32		reserved[13];
+} __attribute__((packed, aligned(4)));
+
+/**
+ * brief the data structure of SAS Diagnostic Execute Response
+ * use to describe MPI SAS Diagnostic Execute Response (64 bytes)
+ *
+ */
+struct sas_diag_execute_resp {
+	__le32		tag;
+	__le32		cmdtype_cmddesc_phyid;
+	__le32		Status;
+	__le32		ReportData;
+	u32		reserved[11];
+} __attribute__((packed, aligned(4)));
+
+/**
+ * brief the data structure of Set Device State Response
+ * use to describe MPI Set Device State Response (64 bytes)
+ *
+ */
+struct set_dev_state_resp {
+	__le32		tag;
+	__le32		status;
+	__le32		device_id;
+	__le32		pds_nds;
+	u32		reserved[11];
+} __attribute__((packed, aligned(4)));
+
+/* new outbound structure for spcv - begins */
+/**
+ * brief the data structure for SET CONTROLLER CONFIG COMMAND
+ * use to modify controller configuration
+ */
+struct set_ctrl_cfg_resp {
+	__le32 tag;
+	__le32 status;
+	__le32 err_qlfr_pgcd;
+	u32 reserved[12];
+} __attribute__((packed, aligned(4)));
+
+struct get_ctrl_cfg_resp {
+	__le32 tag;
+	__le32 status;
+	__le32 err_qlfr;
+	__le32 confg_page[12];
+} __attribute__((packed, aligned(4)));
+
+struct kek_mgmt_resp {
+	__le32 tag;
+	__le32 status;
+	__le32 kidx_new_curr_ksop;
+	__le32 err_qlfr;
+	u32 reserved[11];
+} __attribute__((packed, aligned(4)));
+
+struct dek_mgmt_resp {
+	__le32 tag;
+	__le32 status;
+	__le32 kekidx_tbls_dsop;
+	__le32 dekidx;
+	__le32 err_qlfr;
+	u32 reserved[10];
+} __attribute__((packed, aligned(4)));
+
+struct get_phy_profile_resp {
+	__le32 tag;
+	__le32 status;
+	__le32 ppc_phyid;
+	__le32 ppc_specific_rsp[12];
+} __attribute__((packed, aligned(4)));
+
+struct flash_op_ext_resp {
+	__le32 tag;
+	__le32 cmd;
+	__le32 status;
+	__le32 epart_size;
+	__le32 epart_sect_size;
+	u32 reserved[10];
+} __attribute__((packed, aligned(4)));
+
+struct set_phy_profile_resp {
+	__le32 tag;
+	__le32 status;
+	__le32 ppc_phyid;
+	__le32 ppc_specific_rsp[12];
+} __attribute__((packed, aligned(4)));
+
+struct ssp_coalesced_comp_resp {
+	__le32 coal_cnt;
+	__le32 tag0;
+	__le32 ssp_tag0;
+	__le32 tag1;
+	__le32 ssp_tag1;
+	__le32 add_tag_ssp_tag[10];
+} __attribute__((packed, aligned(4)));
+
+/* new outbound structure for spcv - ends */
+
+/* brief data structure for SAS protocol timer configuration page.
+ *
+ */
+struct SASProtocolTimerConfig {
+	__le32 pageCode;			/* 0 */
+	__le32 MST_MSI;				/* 1 */
+	__le32 STP_SSP_MCT_TMO;			/* 2 */
+	__le32 STP_FRM_TMO;			/* 3 */
+	__le32 STP_IDLE_TMO;			/* 4 */
+	__le32 OPNRJT_RTRY_INTVL;		/* 5 */
+	__le32 Data_Cmd_OPNRJT_RTRY_TMO;	/* 6 */
+	__le32 Data_Cmd_OPNRJT_RTRY_THR;	/* 7 */
+	__le32 MAX_AIP;				/* 8 */
+} __attribute__((packed, aligned(4)));
+
+typedef struct SASProtocolTimerConfig SASProtocolTimerConfig_t;
+
+#define NDS_BITS 0x0F
+#define PDS_BITS 0xF0
+
+/*
+ * HW Events type
+ */
+
+#define HW_EVENT_RESET_START			0x01
+#define HW_EVENT_CHIP_RESET_COMPLETE		0x02
+#define HW_EVENT_PHY_STOP_STATUS		0x03
+#define HW_EVENT_SAS_PHY_UP			0x04
+#define HW_EVENT_SATA_PHY_UP			0x05
+#define HW_EVENT_SATA_SPINUP_HOLD		0x06
+#define HW_EVENT_PHY_DOWN			0x07
+#define HW_EVENT_PORT_INVALID			0x08
+#define HW_EVENT_BROADCAST_CHANGE		0x09
+#define HW_EVENT_PHY_ERROR			0x0A
+#define HW_EVENT_BROADCAST_SES			0x0B
+#define HW_EVENT_INBOUND_CRC_ERROR		0x0C
+#define HW_EVENT_HARD_RESET_RECEIVED		0x0D
+#define HW_EVENT_MALFUNCTION			0x0E
+#define HW_EVENT_ID_FRAME_TIMEOUT		0x0F
+#define HW_EVENT_BROADCAST_EXP			0x10
+#define HW_EVENT_PHY_START_STATUS		0x11
+#define HW_EVENT_LINK_ERR_INVALID_DWORD		0x12
+#define HW_EVENT_LINK_ERR_DISPARITY_ERROR	0x13
+#define HW_EVENT_LINK_ERR_CODE_VIOLATION	0x14
+#define HW_EVENT_LINK_ERR_LOSS_OF_DWORD_SYNCH	0x15
+#define HW_EVENT_LINK_ERR_PHY_RESET_FAILED	0x16
+#define HW_EVENT_PORT_RECOVERY_TIMER_TMO	0x17
+#define HW_EVENT_PORT_RECOVER			0x18
+#define HW_EVENT_PORT_RESET_TIMER_TMO		0x19
+#define HW_EVENT_PORT_RESET_COMPLETE		0x20
+#define EVENT_BROADCAST_ASYNCH_EVENT		0x21
+
+/* port state */
+#define PORT_NOT_ESTABLISHED			0x00
+#define PORT_VALID				0x01
+#define PORT_LOSTCOMM				0x02
+#define PORT_IN_RESET				0x04
+#define PORT_3RD_PARTY_RESET			0x07
+#define PORT_INVALID				0x08
+
+/*
+ * SSP/SMP/SATA IO Completion Status values
+ */
+
+#define IO_SUCCESS				0x00
+#define IO_ABORTED				0x01
+#define IO_OVERFLOW				0x02
+#define IO_UNDERFLOW				0x03
+#define IO_FAILED				0x04
+#define IO_ABORT_RESET				0x05
+#define IO_NOT_VALID				0x06
+#define IO_NO_DEVICE				0x07
+#define IO_ILLEGAL_PARAMETER			0x08
+#define IO_LINK_FAILURE				0x09
+#define IO_PROG_ERROR				0x0A
+
+#define IO_EDC_IN_ERROR				0x0B
+#define IO_EDC_OUT_ERROR			0x0C
+#define IO_ERROR_HW_TIMEOUT			0x0D
+#define IO_XFER_ERROR_BREAK			0x0E
+#define IO_XFER_ERROR_PHY_NOT_READY		0x0F
+#define IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED	0x10
+#define IO_OPEN_CNX_ERROR_ZONE_VIOLATION		0x11
+#define IO_OPEN_CNX_ERROR_BREAK				0x12
+#define IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS			0x13
+#define IO_OPEN_CNX_ERROR_BAD_DESTINATION		0x14
+#define IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED	0x15
+#define IO_OPEN_CNX_ERROR_STP_RESOURCES_BUSY		0x16
+#define IO_OPEN_CNX_ERROR_WRONG_DESTINATION		0x17
+/* This error code 0x18 is not used on SPCv */
+#define IO_OPEN_CNX_ERROR_UNKNOWN_ERROR			0x18
+#define IO_XFER_ERROR_NAK_RECEIVED			0x19
+#define IO_XFER_ERROR_ACK_NAK_TIMEOUT			0x1A
+#define IO_XFER_ERROR_PEER_ABORTED			0x1B
+#define IO_XFER_ERROR_RX_FRAME				0x1C
+#define IO_XFER_ERROR_DMA				0x1D
+#define IO_XFER_ERROR_CREDIT_TIMEOUT			0x1E
+#define IO_XFER_ERROR_SATA_LINK_TIMEOUT			0x1F
+#define IO_XFER_ERROR_SATA				0x20
+
+/* This error code 0x22 is not used on SPCv */
+#define IO_XFER_ERROR_ABORTED_DUE_TO_SRST		0x22
+#define IO_XFER_ERROR_REJECTED_NCQ_MODE			0x21
+#define IO_XFER_ERROR_ABORTED_NCQ_MODE			0x23
+#define IO_XFER_OPEN_RETRY_TIMEOUT			0x24
+/* This error code 0x25 is not used on SPCv */
+#define IO_XFER_SMP_RESP_CONNECTION_ERROR		0x25
+#define IO_XFER_ERROR_UNEXPECTED_PHASE			0x26
+#define IO_XFER_ERROR_XFER_RDY_OVERRUN			0x27
+#define IO_XFER_ERROR_XFER_RDY_NOT_EXPECTED		0x28
+#define IO_XFER_ERROR_CMD_ISSUE_ACK_NAK_TIMEOUT		0x30
+
+/* The following error code 0x31 and 0x32 are not using (obsolete) */
+#define IO_XFER_ERROR_CMD_ISSUE_BREAK_BEFORE_ACK_NAK	0x31
+#define IO_XFER_ERROR_CMD_ISSUE_PHY_DOWN_BEFORE_ACK_NAK	0x32
+
+#define IO_XFER_ERROR_OFFSET_MISMATCH			0x34
+#define IO_XFER_ERROR_XFER_ZERO_DATA_LEN		0x35
+#define IO_XFER_CMD_FRAME_ISSUED			0x36
+#define IO_ERROR_INTERNAL_SMP_RESOURCE			0x37
+#define IO_PORT_IN_RESET				0x38
+#define IO_DS_NON_OPERATIONAL				0x39
+#define IO_DS_IN_RECOVERY				0x3A
+#define IO_TM_TAG_NOT_FOUND				0x3B
+#define IO_XFER_PIO_SETUP_ERROR				0x3C
+#define IO_SSP_EXT_IU_ZERO_LEN_ERROR			0x3D
+#define IO_DS_IN_ERROR					0x3E
+#define IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY		0x3F
+#define IO_ABORT_IN_PROGRESS				0x40
+#define IO_ABORT_DELAYED				0x41
+#define IO_INVALID_LENGTH				0x42
+
+/********** additional response event values *****************/
+
+#define IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY_ALT		0x43
+#define IO_XFER_OPEN_RETRY_BACKOFF_THRESHOLD_REACHED	0x44
+#define IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_TMO	0x45
+#define IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_NO_DEST		0x46
+#define IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_COLLIDE	0x47
+#define IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_PATHWAY_BLOCKED	0x48
+#define IO_DS_INVALID					0x49
+/* WARNING: the value is not contiguous from here */
+#define IO_XFER_ERR_LAST_PIO_DATAIN_CRC_ERR	0x52
+#define IO_XFER_DMA_ACTIVATE_TIMEOUT		0x53
+#define IO_XFER_ERROR_INTERNAL_CRC_ERROR	0x54
+#define MPI_IO_RQE_BUSY_FULL			0x55
+#define IO_XFER_ERR_EOB_DATA_OVERRUN		0x56
+#define IO_XFR_ERROR_INVALID_SSP_RSP_FRAME	0x57
+#define IO_OPEN_CNX_ERROR_OPEN_PREEMPTED	0x58
+
+#define MPI_ERR_IO_RESOURCE_UNAVAILABLE		0x1004
+#define MPI_ERR_ATAPI_DEVICE_BUSY		0x1024
+
+#define IO_XFR_ERROR_DEK_KEY_CACHE_MISS		0x2040
+/*
+ * An encryption IO request failed due to DEK Key Tag mismatch.
+ * The key tag supplied in the encryption IOMB does not match with
+ * the Key Tag in the referenced DEK Entry.
+ */
+#define IO_XFR_ERROR_DEK_KEY_TAG_MISMATCH	0x2041
+#define IO_XFR_ERROR_CIPHER_MODE_INVALID	0x2042
+/*
+ * An encryption I/O request failed because the initial value (IV)
+ * in the unwrapped DEK blob didn't match the IV used to unwrap it.
+ */
+#define IO_XFR_ERROR_DEK_IV_MISMATCH		0x2043
+/* An encryption I/O request failed due to an internal RAM ECC or
+ * interface error while unwrapping the DEK. */
+#define IO_XFR_ERROR_DEK_RAM_INTERFACE_ERROR	0x2044
+/* An encryption I/O request failed due to an internal RAM ECC or
+ * interface error while unwrapping the DEK. */
+#define IO_XFR_ERROR_INTERNAL_RAM		0x2045
+/*
+ * An encryption I/O request failed
+ * because the DEK index specified in the I/O was outside the bounds of
+ * the total number of entries in the host DEK table.
+ */
+#define IO_XFR_ERROR_DEK_INDEX_OUT_OF_BOUNDS0x2046
+
+/* define DIF IO response error status code */
+#define IO_XFR_ERROR_DIF_MISMATCH			0x3000
+#define IO_XFR_ERROR_DIF_APPLICATION_TAG_MISMATCH	0x3001
+#define IO_XFR_ERROR_DIF_REFERENCE_TAG_MISMATCH		0x3002
+#define IO_XFR_ERROR_DIF_CRC_MISMATCH			0x3003
+
+/* define operator management response status and error qualifier code */
+#define OPR_MGMT_OP_NOT_SUPPORTED			0x2060
+#define OPR_MGMT_MPI_ENC_ERR_OPR_PARAM_ILLEGAL		0x2061
+#define OPR_MGMT_MPI_ENC_ERR_OPR_ID_NOT_FOUND		0x2062
+#define OPR_MGMT_MPI_ENC_ERR_OPR_ROLE_NOT_MATCH		0x2063
+#define OPR_MGMT_MPI_ENC_ERR_OPR_MAX_NUM_EXCEEDED	0x2064
+#define OPR_MGMT_MPI_ENC_ERR_KEK_UNWRAP_FAIL		0x2022
+#define OPR_MGMT_MPI_ENC_ERR_NVRAM_OPERATION_FAILURE	0x2023
+/***************** additional response event values ***************/
+
+/* WARNING: This error code must always be the last number.
+ * If you add error code, modify this code also
+ * It is used as an index
+ */
+#define IO_ERROR_UNKNOWN_GENERIC			0x2023
+
+/* MSGU CONFIGURATION TABLE*/
+
+#define SPCv_MSGU_CFG_TABLE_UPDATE		0x01
+#define SPCv_MSGU_CFG_TABLE_RESET		0x02
+#define SPCv_MSGU_CFG_TABLE_FREEZE		0x04
+#define SPCv_MSGU_CFG_TABLE_UNFREEZE		0x08
+#define MSGU_IBDB_SET				0x00
+#define MSGU_HOST_INT_STATUS			0x08
+#define MSGU_HOST_INT_MASK			0x0C
+#define MSGU_IOPIB_INT_STATUS			0x18
+#define MSGU_IOPIB_INT_MASK			0x1C
+#define MSGU_IBDB_CLEAR				0x20
+
+#define MSGU_MSGU_CONTROL			0x24
+#define MSGU_ODR				0x20
+#define MSGU_ODCR				0x28
+
+#define MSGU_ODMR				0x30
+#define MSGU_ODMR_U				0x34
+#define MSGU_ODMR_CLR				0x38
+#define MSGU_ODMR_CLR_U				0x3C
+#define MSGU_OD_RSVD				0x40
+
+#define MSGU_SCRATCH_PAD_0			0x44
+#define MSGU_SCRATCH_PAD_1			0x48
+#define MSGU_SCRATCH_PAD_2			0x4C
+#define MSGU_SCRATCH_PAD_3			0x50
+#define MSGU_HOST_SCRATCH_PAD_0			0x54
+#define MSGU_HOST_SCRATCH_PAD_1			0x58
+#define MSGU_HOST_SCRATCH_PAD_2			0x5C
+#define MSGU_HOST_SCRATCH_PAD_3			0x60
+#define MSGU_HOST_SCRATCH_PAD_4			0x64
+#define MSGU_HOST_SCRATCH_PAD_5			0x68
+#define MSGU_HOST_SCRATCH_PAD_6			0x6C
+#define MSGU_HOST_SCRATCH_PAD_7			0x70
+
+/* bit definition for ODMR register */
+#define ODMR_MASK_ALL			0xFFFFFFFF/* mask all
+					interrupt vector */
+#define ODMR_CLEAR_ALL			0	/* clear all
+					interrupt vector */
+/* bit definition for ODCR register */
+#define ODCR_CLEAR_ALL			0xFFFFFFFF /* mask all
+					interrupt vector*/
+/* MSIX Interupts */
+#define MSIX_TABLE_OFFSET		0x2000
+#define MSIX_TABLE_ELEMENT_SIZE		0x10
+#define MSIX_INTERRUPT_CONTROL_OFFSET	0xC
+#define MSIX_TABLE_BASE			(MSIX_TABLE_OFFSET + \
+					MSIX_INTERRUPT_CONTROL_OFFSET)
+#define MSIX_INTERRUPT_DISABLE		0x1
+#define MSIX_INTERRUPT_ENABLE		0x0
+
+/* state definition for Scratch Pad1 register */
+#define SCRATCH_PAD_RAAE_READY		0x3
+#define SCRATCH_PAD_ILA_READY		0xC
+#define SCRATCH_PAD_BOOT_LOAD_SUCCESS	0x0
+#define SCRATCH_PAD_IOP0_READY		0xC00
+#define SCRATCH_PAD_IOP1_READY		0x3000
+
+/* boot loader state */
+#define SCRATCH_PAD1_BOOTSTATE_MASK		0x70	/* Bit 4-6 */
+#define SCRATCH_PAD1_BOOTSTATE_SUCESS		0x0	/* Load successful */
+#define SCRATCH_PAD1_BOOTSTATE_HDA_SEEPROM	0x10	/* HDA SEEPROM */
+#define SCRATCH_PAD1_BOOTSTATE_HDA_BOOTSTRAP	0x20	/* HDA BootStrap Pins */
+#define SCRATCH_PAD1_BOOTSTATE_HDA_SOFTRESET	0x30	/* HDA Soft Reset */
+#define SCRATCH_PAD1_BOOTSTATE_CRIT_ERROR	0x40	/* HDA critical error */
+#define SCRATCH_PAD1_BOOTSTATE_R1		0x50	/* Reserved */
+#define SCRATCH_PAD1_BOOTSTATE_R2		0x60	/* Reserved */
+#define SCRATCH_PAD1_BOOTSTATE_FATAL		0x70	/* Fatal Error */
+
+ /* state definition for Scratch Pad2 register */
+#define SCRATCH_PAD2_POR		0x00	/* power on state */
+#define SCRATCH_PAD2_SFR		0x01	/* soft reset state */
+#define SCRATCH_PAD2_ERR		0x02	/* error state */
+#define SCRATCH_PAD2_RDY		0x03	/* ready state */
+#define SCRATCH_PAD2_FWRDY_RST		0x04	/* FW rdy for soft reset flag */
+#define SCRATCH_PAD2_IOPRDY_RST		0x08	/* IOP ready for soft reset */
+#define SCRATCH_PAD2_STATE_MASK		0xFFFFFFF4 /* ScratchPad 2
+ Mask, bit1-0 State */
+#define SCRATCH_PAD2_RESERVED		0x000003FC/* Scratch Pad1
+ Reserved bit 2 to 9 */
+
+#define SCRATCH_PAD_ERROR_MASK		0xFFFFFC00 /* Error mask bits */
+#define SCRATCH_PAD_STATE_MASK		0x00000003 /* State Mask bits */
+
+/* main configuration offset - byte offset */
+#define MAIN_SIGNATURE_OFFSET		0x00 /* DWORD 0x00 */
+#define MAIN_INTERFACE_REVISION		0x04 /* DWORD 0x01 */
+#define MAIN_FW_REVISION		0x08 /* DWORD 0x02 */
+#define MAIN_MAX_OUTSTANDING_IO_OFFSET	0x0C /* DWORD 0x03 */
+#define MAIN_MAX_SGL_OFFSET		0x10 /* DWORD 0x04 */
+#define MAIN_CNTRL_CAP_OFFSET		0x14 /* DWORD 0x05 */
+#define MAIN_GST_OFFSET			0x18 /* DWORD 0x06 */
+#define MAIN_IBQ_OFFSET			0x1C /* DWORD 0x07 */
+#define MAIN_OBQ_OFFSET			0x20 /* DWORD 0x08 */
+#define MAIN_IQNPPD_HPPD_OFFSET		0x24 /* DWORD 0x09 */
+
+/* 0x28 - 0x4C - RSVD */
+#define MAIN_EVENT_CRC_CHECK		0x48 /* DWORD 0x12 */
+#define MAIN_EVENT_LOG_ADDR_HI		0x50 /* DWORD 0x14 */
+#define MAIN_EVENT_LOG_ADDR_LO		0x54 /* DWORD 0x15 */
+#define MAIN_EVENT_LOG_BUFF_SIZE	0x58 /* DWORD 0x16 */
+#define MAIN_EVENT_LOG_OPTION		0x5C /* DWORD 0x17 */
+#define MAIN_PCS_EVENT_LOG_ADDR_HI	0x60 /* DWORD 0x18 */
+#define MAIN_PCS_EVENT_LOG_ADDR_LO	0x64 /* DWORD 0x19 */
+#define MAIN_PCS_EVENT_LOG_BUFF_SIZE	0x68 /* DWORD 0x1A */
+#define MAIN_PCS_EVENT_LOG_OPTION	0x6C /* DWORD 0x1B */
+#define MAIN_FATAL_ERROR_INTERRUPT	0x70 /* DWORD 0x1C */
+#define MAIN_FATAL_ERROR_RDUMP0_OFFSET	0x74 /* DWORD 0x1D */
+#define MAIN_FATAL_ERROR_RDUMP0_LENGTH	0x78 /* DWORD 0x1E */
+#define MAIN_FATAL_ERROR_RDUMP1_OFFSET	0x7C /* DWORD 0x1F */
+#define MAIN_FATAL_ERROR_RDUMP1_LENGTH	0x80 /* DWORD 0x20 */
+#define MAIN_GPIO_LED_FLAGS_OFFSET	0x84 /* DWORD 0x21 */
+#define MAIN_ANALOG_SETUP_OFFSET	0x88 /* DWORD 0x22 */
+
+#define MAIN_INT_VECTOR_TABLE_OFFSET	0x8C /* DWORD 0x23 */
+#define MAIN_SAS_PHY_ATTR_TABLE_OFFSET	0x90 /* DWORD 0x24 */
+#define MAIN_PORT_RECOVERY_TIMER	0x94 /* DWORD 0x25 */
+#define MAIN_INT_REASSERTION_DELAY	0x98 /* DWORD 0x26 */
+
+/* Gereral Status Table offset - byte offset */
+#define GST_GSTLEN_MPIS_OFFSET		0x00
+#define GST_IQ_FREEZE_STATE0_OFFSET	0x04
+#define GST_IQ_FREEZE_STATE1_OFFSET	0x08
+#define GST_MSGUTCNT_OFFSET		0x0C
+#define GST_IOPTCNT_OFFSET		0x10
+/* 0x14 - 0x34 - RSVD */
+#define GST_GPIO_INPUT_VAL		0x38
+/* 0x3c - 0x40 - RSVD */
+#define GST_RERRINFO_OFFSET0		0x44
+#define GST_RERRINFO_OFFSET1		0x48
+#define GST_RERRINFO_OFFSET2		0x4c
+#define GST_RERRINFO_OFFSET3		0x50
+#define GST_RERRINFO_OFFSET4		0x54
+#define GST_RERRINFO_OFFSET5		0x58
+#define GST_RERRINFO_OFFSET6		0x5c
+#define GST_RERRINFO_OFFSET7		0x60
+
+/* General Status Table - MPI state */
+#define GST_MPI_STATE_UNINIT		0x00
+#define GST_MPI_STATE_INIT		0x01
+#define GST_MPI_STATE_TERMINATION	0x02
+#define GST_MPI_STATE_ERROR		0x03
+#define GST_MPI_STATE_MASK		0x07
+
+/* Per SAS PHY Attributes */
+
+#define PSPA_PHYSTATE0_OFFSET		0x00 /* Dword V */
+#define PSPA_OB_HW_EVENT_PID0_OFFSET	0x04 /* DWORD V+1 */
+#define PSPA_PHYSTATE1_OFFSET		0x08 /* Dword V+2 */
+#define PSPA_OB_HW_EVENT_PID1_OFFSET	0x0C /* DWORD V+3 */
+#define PSPA_PHYSTATE2_OFFSET		0x10 /* Dword V+4 */
+#define PSPA_OB_HW_EVENT_PID2_OFFSET	0x14 /* DWORD V+5 */
+#define PSPA_PHYSTATE3_OFFSET		0x18 /* Dword V+6 */
+#define PSPA_OB_HW_EVENT_PID3_OFFSET	0x1C /* DWORD V+7 */
+#define PSPA_PHYSTATE4_OFFSET		0x20 /* Dword V+8 */
+#define PSPA_OB_HW_EVENT_PID4_OFFSET	0x24 /* DWORD V+9 */
+#define PSPA_PHYSTATE5_OFFSET		0x28 /* Dword V+10 */
+#define PSPA_OB_HW_EVENT_PID5_OFFSET	0x2C /* DWORD V+11 */
+#define PSPA_PHYSTATE6_OFFSET		0x30 /* Dword V+12 */
+#define PSPA_OB_HW_EVENT_PID6_OFFSET	0x34 /* DWORD V+13 */
+#define PSPA_PHYSTATE7_OFFSET		0x38 /* Dword V+14 */
+#define PSPA_OB_HW_EVENT_PID7_OFFSET	0x3C /* DWORD V+15 */
+#define PSPA_PHYSTATE8_OFFSET		0x40 /* DWORD V+16 */
+#define PSPA_OB_HW_EVENT_PID8_OFFSET	0x44 /* DWORD V+17 */
+#define PSPA_PHYSTATE9_OFFSET		0x48 /* DWORD V+18 */
+#define PSPA_OB_HW_EVENT_PID9_OFFSET	0x4C /* DWORD V+19 */
+#define PSPA_PHYSTATE10_OFFSET		0x50 /* DWORD V+20 */
+#define PSPA_OB_HW_EVENT_PID10_OFFSET	0x54 /* DWORD V+21 */
+#define PSPA_PHYSTATE11_OFFSET		0x58 /* DWORD V+22 */
+#define PSPA_OB_HW_EVENT_PID11_OFFSET	0x5C /* DWORD V+23 */
+#define PSPA_PHYSTATE12_OFFSET		0x60 /* DWORD V+24 */
+#define PSPA_OB_HW_EVENT_PID12_OFFSET	0x64 /* DWORD V+25 */
+#define PSPA_PHYSTATE13_OFFSET		0x68 /* DWORD V+26 */
+#define PSPA_OB_HW_EVENT_PID13_OFFSET	0x6c /* DWORD V+27 */
+#define PSPA_PHYSTATE14_OFFSET		0x70 /* DWORD V+28 */
+#define PSPA_OB_HW_EVENT_PID14_OFFSET	0x74 /* DWORD V+29 */
+#define PSPA_PHYSTATE15_OFFSET		0x78 /* DWORD V+30 */
+#define PSPA_OB_HW_EVENT_PID15_OFFSET	0x7c /* DWORD V+31 */
+/* end PSPA */
+
+/* inbound queue configuration offset - byte offset */
+#define IB_PROPERITY_OFFSET		0x00
+#define IB_BASE_ADDR_HI_OFFSET		0x04
+#define IB_BASE_ADDR_LO_OFFSET		0x08
+#define IB_CI_BASE_ADDR_HI_OFFSET	0x0C
+#define IB_CI_BASE_ADDR_LO_OFFSET	0x10
+#define IB_PIPCI_BAR			0x14
+#define IB_PIPCI_BAR_OFFSET		0x18
+#define IB_RESERVED_OFFSET		0x1C
+
+/* outbound queue configuration offset - byte offset */
+#define OB_PROPERITY_OFFSET		0x00
+#define OB_BASE_ADDR_HI_OFFSET		0x04
+#define OB_BASE_ADDR_LO_OFFSET		0x08
+#define OB_PI_BASE_ADDR_HI_OFFSET	0x0C
+#define OB_PI_BASE_ADDR_LO_OFFSET	0x10
+#define OB_CIPCI_BAR			0x14
+#define OB_CIPCI_BAR_OFFSET		0x18
+#define OB_INTERRUPT_COALES_OFFSET	0x1C
+#define OB_DYNAMIC_COALES_OFFSET	0x20
+#define OB_PROPERTY_INT_ENABLE		0x40000000
+
+#define MBIC_NMI_ENABLE_VPE0_IOP	0x000418
+#define MBIC_NMI_ENABLE_VPE0_AAP1	0x000418
+/* PCIE registers - BAR2(0x18), BAR1(win) 0x010000 */
+#define PCIE_EVENT_INTERRUPT_ENABLE	0x003040
+#define PCIE_EVENT_INTERRUPT		0x003044
+#define PCIE_ERROR_INTERRUPT_ENABLE	0x003048
+#define PCIE_ERROR_INTERRUPT		0x00304C
+
+/* SPCV soft reset */
+#define SPC_REG_SOFT_RESET 0x00001000
+#define SPCv_NORMAL_RESET_VALUE		0x1
+
+#define SPCv_SOFT_RESET_READ_MASK		0xC0
+#define SPCv_SOFT_RESET_NO_RESET		0x0
+#define SPCv_SOFT_RESET_NORMAL_RESET_OCCURED	0x40
+#define SPCv_SOFT_RESET_HDA_MODE_OCCURED	0x80
+#define SPCv_SOFT_RESET_CHIP_RESET_OCCURED	0xC0
+
+/* signature definition for host scratch pad0 register */
+#define SPC_SOFT_RESET_SIGNATURE	0x252acbcd
+/* Signature for Soft Reset */
+
+/* SPC Reset register - BAR4(0x20), BAR2(win) (need dynamic mapping) */
+#define SPC_REG_RESET			0x000000/* reset register */
+
+/* bit definition for SPC_RESET register */
+#define SPC_REG_RESET_OSSP		0x00000001
+#define SPC_REG_RESET_RAAE		0x00000002
+#define SPC_REG_RESET_PCS_SPBC		0x00000004
+#define SPC_REG_RESET_PCS_IOP_SS	0x00000008
+#define SPC_REG_RESET_PCS_AAP1_SS	0x00000010
+#define SPC_REG_RESET_PCS_AAP2_SS	0x00000020
+#define SPC_REG_RESET_PCS_LM		0x00000040
+#define SPC_REG_RESET_PCS		0x00000080
+#define SPC_REG_RESET_GSM		0x00000100
+#define SPC_REG_RESET_DDR2		0x00010000
+#define SPC_REG_RESET_BDMA_CORE		0x00020000
+#define SPC_REG_RESET_BDMA_SXCBI	0x00040000
+#define SPC_REG_RESET_PCIE_AL_SXCBI	0x00080000
+#define SPC_REG_RESET_PCIE_PWR		0x00100000
+#define SPC_REG_RESET_PCIE_SFT		0x00200000
+#define SPC_REG_RESET_PCS_SXCBI		0x00400000
+#define SPC_REG_RESET_LMS_SXCBI		0x00800000
+#define SPC_REG_RESET_PMIC_SXCBI	0x01000000
+#define SPC_REG_RESET_PMIC_CORE		0x02000000
+#define SPC_REG_RESET_PCIE_PC_SXCBI	0x04000000
+#define SPC_REG_RESET_DEVICE		0x80000000
+
+/* registers for BAR Shifting - BAR2(0x18), BAR1(win) */
+#define SPCV_IBW_AXI_TRANSLATION_LOW	0x001010
+
+#define MBIC_AAP1_ADDR_BASE		0x060000
+#define MBIC_IOP_ADDR_BASE		0x070000
+#define GSM_ADDR_BASE			0x0700000
+/* Dynamic map through Bar4 - 0x00700000 */
+#define GSM_CONFIG_RESET		0x00000000
+#define RAM_ECC_DB_ERR			0x00000018
+#define GSM_READ_ADDR_PARITY_INDIC	0x00000058
+#define GSM_WRITE_ADDR_PARITY_INDIC	0x00000060
+#define GSM_WRITE_DATA_PARITY_INDIC	0x00000068
+#define GSM_READ_ADDR_PARITY_CHECK	0x00000038
+#define GSM_WRITE_ADDR_PARITY_CHECK	0x00000040
+#define GSM_WRITE_DATA_PARITY_CHECK	0x00000048
+
+#define RB6_ACCESS_REG			0x6A0000
+#define HDAC_EXEC_CMD			0x0002
+#define HDA_C_PA			0xcb
+#define HDA_SEQ_ID_BITS			0x00ff0000
+#define HDA_GSM_OFFSET_BITS		0x00FFFFFF
+#define HDA_GSM_CMD_OFFSET_BITS		0x42C0
+#define HDA_GSM_RSP_OFFSET_BITS		0x42E0
+
+#define MBIC_AAP1_ADDR_BASE		0x060000
+#define MBIC_IOP_ADDR_BASE		0x070000
+#define GSM_ADDR_BASE			0x0700000
+#define SPC_TOP_LEVEL_ADDR_BASE		0x000000
+#define GSM_CONFIG_RESET_VALUE		0x00003b00
+#define GPIO_ADDR_BASE			0x00090000
+#define GPIO_GPIO_0_0UTPUT_CTL_OFFSET	0x0000010c
+
+/* RB6 offset */
+#define SPC_RB6_OFFSET			0x80C0
+/* Magic number of soft reset for RB6 */
+#define RB6_MAGIC_NUMBER_RST		0x1234
+
+/* Device Register status */
+#define DEVREG_SUCCESS					0x00
+#define DEVREG_FAILURE_OUT_OF_RESOURCE			0x01
+#define DEVREG_FAILURE_DEVICE_ALREADY_REGISTERED	0x02
+#define DEVREG_FAILURE_INVALID_PHY_ID			0x03
+#define DEVREG_FAILURE_PHY_ID_ALREADY_REGISTERED	0x04
+#define DEVREG_FAILURE_PORT_ID_OUT_OF_RANGE		0x05
+#define DEVREG_FAILURE_PORT_NOT_VALID_STATE		0x06
+#define DEVREG_FAILURE_DEVICE_TYPE_NOT_VALID		0x07
+
+#endif
diff --git a/drivers/scsi/qla2xxx/Kconfig b/drivers/scsi/qla2xxx/Kconfig
index 317a7fd..23d6072 100644
--- a/drivers/scsi/qla2xxx/Kconfig
+++ b/drivers/scsi/qla2xxx/Kconfig
@@ -24,7 +24,9 @@
 
 	Firmware images can be retrieved from:
 
-		ftp://ftp.qlogic.com/outgoing/linux/firmware/
+		http://ldriver.qlogic.com/firmware/
+
+	They are also included in the linux-firmware tree as well.
 
 config TCM_QLA2XXX
 	tristate "TCM_QLA2XXX fabric module for Qlogic 2xxx series target mode HBAs"
diff --git a/drivers/scsi/qla2xxx/qla_mr.c b/drivers/scsi/qla2xxx/qla_mr.c
index 729b743..937fed8 100644
--- a/drivers/scsi/qla2xxx/qla_mr.c
+++ b/drivers/scsi/qla2xxx/qla_mr.c
@@ -3003,12 +3003,10 @@
 
 	/* Set transfer direction */
 	if (cmd->sc_data_direction == DMA_TO_DEVICE) {
-		lcmd_pkt->cntrl_flags =
-		    __constant_cpu_to_le16(TMF_WRITE_DATA);
+		lcmd_pkt->cntrl_flags = TMF_WRITE_DATA;
 		vha->qla_stats.output_bytes += scsi_bufflen(cmd);
 	} else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
-		lcmd_pkt->cntrl_flags =
-		    __constant_cpu_to_le16(TMF_READ_DATA);
+		lcmd_pkt->cntrl_flags = TMF_READ_DATA;
 		vha->qla_stats.input_bytes += scsi_bufflen(cmd);
 	}
 
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 5307bf8..ad72c1d 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -644,7 +644,7 @@
 	qla2x00_rel_sp(sp->fcport->vha, sp);
 }
 
-void
+static void
 qla2x00_sp_compl(void *data, void *ptr, int res)
 {
 	struct qla_hw_data *ha = (struct qla_hw_data *)data;
diff --git a/drivers/scsi/qla4xxx/ql4_iocb.c b/drivers/scsi/qla4xxx/ql4_iocb.c
index 14fec97..fad71ed 100644
--- a/drivers/scsi/qla4xxx/ql4_iocb.c
+++ b/drivers/scsi/qla4xxx/ql4_iocb.c
@@ -507,6 +507,7 @@
 	mrb->mbox_cmd = in_mbox[0];
 	wmb();
 
+	ha->iocb_cnt += mrb->iocb_cnt;
 	ha->isp_ops->queue_iocb(ha);
 exit_mbox_iocb:
 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index a47f999..4d231c1 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -2216,14 +2216,14 @@
 	fw_ddb_entry->iscsi_def_time2retain = cpu_to_le16(sess->time2retain);
 	fw_ddb_entry->tgt_portal_grp = cpu_to_le16(sess->tpgt);
 	fw_ddb_entry->mss = cpu_to_le16(conn->max_segment_size);
-	fw_ddb_entry->tcp_xmt_wsf = cpu_to_le16(conn->tcp_xmit_wsf);
-	fw_ddb_entry->tcp_rcv_wsf = cpu_to_le16(conn->tcp_recv_wsf);
+	fw_ddb_entry->tcp_xmt_wsf = (uint8_t) cpu_to_le32(conn->tcp_xmit_wsf);
+	fw_ddb_entry->tcp_rcv_wsf = (uint8_t) cpu_to_le32(conn->tcp_recv_wsf);
 	fw_ddb_entry->ipv4_tos = conn->ipv4_tos;
 	fw_ddb_entry->ipv6_flow_lbl = cpu_to_le16(conn->ipv6_flow_label);
 	fw_ddb_entry->ka_timeout = cpu_to_le16(conn->keepalive_timeout);
 	fw_ddb_entry->lcl_port = cpu_to_le16(conn->local_port);
-	fw_ddb_entry->stat_sn = cpu_to_le16(conn->statsn);
-	fw_ddb_entry->exp_stat_sn = cpu_to_le16(conn->exp_statsn);
+	fw_ddb_entry->stat_sn = cpu_to_le32(conn->statsn);
+	fw_ddb_entry->exp_stat_sn = cpu_to_le32(conn->exp_statsn);
 	fw_ddb_entry->ddb_link = cpu_to_le16(sess->discovery_parent_type);
 	fw_ddb_entry->chap_tbl_idx = cpu_to_le16(sess->chap_out_idx);
 	fw_ddb_entry->tsid = cpu_to_le16(sess->tsid);
@@ -5504,9 +5504,9 @@
  * If this is invoked as a result of a userspace call then the entry is marked
  * as nonpersistent using flash_state field.
  **/
-int qla4xxx_sysfs_ddb_tgt_create(struct scsi_qla_host *ha,
-				 struct dev_db_entry *fw_ddb_entry,
-				 uint16_t *idx, int user)
+static int qla4xxx_sysfs_ddb_tgt_create(struct scsi_qla_host *ha,
+					struct dev_db_entry *fw_ddb_entry,
+					uint16_t *idx, int user)
 {
 	struct iscsi_bus_flash_session *fnode_sess = NULL;
 	struct iscsi_bus_flash_conn *fnode_conn = NULL;
@@ -5605,6 +5605,7 @@
 		ql4_printk(KERN_ERR, ha,
 			   "%s: A non-persistent entry %s found\n",
 			   __func__, dev->kobj.name);
+		put_device(dev);
 		goto exit_ddb_add;
 	}
 
@@ -6112,8 +6113,7 @@
 	int parent_type, parent_index = 0xffff;
 	int rc = 0;
 
-	dev = iscsi_find_flashnode_conn(fnode_sess, NULL,
-					iscsi_is_flashnode_conn_dev);
+	dev = iscsi_find_flashnode_conn(fnode_sess);
 	if (!dev)
 		return -EIO;
 
@@ -6276,8 +6276,7 @@
 			rc = sprintf(buf, "\n");
 		break;
 	case ISCSI_FLASHNODE_DISCOVERY_PARENT_IDX:
-		if ((fnode_sess->discovery_parent_idx) >= 0  &&
-		    (fnode_sess->discovery_parent_idx < MAX_DDB_ENTRIES))
+		if (fnode_sess->discovery_parent_idx < MAX_DDB_ENTRIES)
 			parent_index = fnode_sess->discovery_parent_idx;
 
 		rc = sprintf(buf, "%u\n", parent_index);
@@ -6287,8 +6286,7 @@
 			parent_type = ISCSI_DISC_PARENT_ISNS;
 		else if (fnode_sess->discovery_parent_type == DDB_NO_LINK)
 			parent_type = ISCSI_DISC_PARENT_UNKNOWN;
-		else if (fnode_sess->discovery_parent_type >= 0  &&
-			 fnode_sess->discovery_parent_type < MAX_DDB_ENTRIES)
+		else if (fnode_sess->discovery_parent_type < MAX_DDB_ENTRIES)
 			parent_type = ISCSI_DISC_PARENT_SENDTGT;
 		else
 			parent_type = ISCSI_DISC_PARENT_UNKNOWN;
@@ -6349,6 +6347,8 @@
 		rc = -ENOSYS;
 		break;
 	}
+
+	put_device(dev);
 	return rc;
 }
 
@@ -6368,20 +6368,11 @@
 {
 	struct Scsi_Host *shost = iscsi_flash_session_to_shost(fnode_sess);
 	struct scsi_qla_host *ha = to_qla_host(shost);
-	struct dev_db_entry *fw_ddb_entry = NULL;
 	struct iscsi_flashnode_param_info *fnode_param;
 	struct nlattr *attr;
 	int rc = QLA_ERROR;
 	uint32_t rem = len;
 
-	fw_ddb_entry = kzalloc(sizeof(*fw_ddb_entry), GFP_KERNEL);
-	if (!fw_ddb_entry) {
-		DEBUG2(ql4_printk(KERN_ERR, ha,
-				  "%s: Unable to allocate ddb buffer\n",
-				  __func__));
-		return -ENOMEM;
-	}
-
 	nla_for_each_attr(attr, data, len, rem) {
 		fnode_param = nla_data(attr);
 
@@ -6590,16 +6581,11 @@
 	struct dev_db_entry *fw_ddb_entry = NULL;
 	dma_addr_t fw_ddb_entry_dma;
 	uint16_t *ddb_cookie = NULL;
-	size_t ddb_size;
+	size_t ddb_size = 0;
 	void *pddb = NULL;
 	int target_id;
 	int rc = 0;
 
-	if (!fnode_sess) {
-		rc = -EINVAL;
-		goto exit_ddb_del;
-	}
-
 	if (fnode_sess->is_boot_target) {
 		rc = -EPERM;
 		DEBUG2(ql4_printk(KERN_ERR, ha,
@@ -6631,8 +6617,7 @@
 
 		dev_db_start_offset += (fnode_sess->target_id *
 				       sizeof(*fw_ddb_entry));
-		dev_db_start_offset += (void *)&(fw_ddb_entry->cookie) -
-				       (void *)fw_ddb_entry;
+		dev_db_start_offset += offsetof(struct dev_db_entry, cookie);
 
 		ddb_size = sizeof(*ddb_cookie);
 	}
diff --git a/drivers/scsi/qla4xxx/ql4_version.h b/drivers/scsi/qla4xxx/ql4_version.h
index 83e0fec..fe873cf 100644
--- a/drivers/scsi/qla4xxx/ql4_version.h
+++ b/drivers/scsi/qla4xxx/ql4_version.h
@@ -5,4 +5,4 @@
  * See LICENSE.qla4xxx for copyright and licensing details.
  */
 
-#define QLA4XXX_DRIVER_VERSION	"5.03.00-k8"
+#define QLA4XXX_DRIVER_VERSION	"5.03.00-k9"
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index 5add6f4..0a537a0 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -1997,24 +1997,39 @@
 	return ret;
 }
 
+static unsigned long lba_to_map_index(sector_t lba)
+{
+	if (scsi_debug_unmap_alignment) {
+		lba += scsi_debug_unmap_granularity -
+			scsi_debug_unmap_alignment;
+	}
+	do_div(lba, scsi_debug_unmap_granularity);
+
+	return lba;
+}
+
+static sector_t map_index_to_lba(unsigned long index)
+{
+	return index * scsi_debug_unmap_granularity -
+		scsi_debug_unmap_alignment;
+}
+
 static unsigned int map_state(sector_t lba, unsigned int *num)
 {
-	unsigned int granularity, alignment, mapped;
-	sector_t block, next, end;
+	sector_t end;
+	unsigned int mapped;
+	unsigned long index;
+	unsigned long next;
 
-	granularity = scsi_debug_unmap_granularity;
-	alignment = granularity - scsi_debug_unmap_alignment;
-	block = lba + alignment;
-	do_div(block, granularity);
-
-	mapped = test_bit(block, map_storep);
+	index = lba_to_map_index(lba);
+	mapped = test_bit(index, map_storep);
 
 	if (mapped)
-		next = find_next_zero_bit(map_storep, map_size, block);
+		next = find_next_zero_bit(map_storep, map_size, index);
 	else
-		next = find_next_bit(map_storep, map_size, block);
+		next = find_next_bit(map_storep, map_size, index);
 
-	end = next * granularity - scsi_debug_unmap_alignment;
+	end = min_t(sector_t, sdebug_store_sectors,  map_index_to_lba(next));
 	*num = end - lba;
 
 	return mapped;
@@ -2022,47 +2037,37 @@
 
 static void map_region(sector_t lba, unsigned int len)
 {
-	unsigned int granularity, alignment;
 	sector_t end = lba + len;
 
-	granularity = scsi_debug_unmap_granularity;
-	alignment = granularity - scsi_debug_unmap_alignment;
-
 	while (lba < end) {
-		sector_t block, rem;
+		unsigned long index = lba_to_map_index(lba);
 
-		block = lba + alignment;
-		rem = do_div(block, granularity);
+		if (index < map_size)
+			set_bit(index, map_storep);
 
-		if (block < map_size)
-			set_bit(block, map_storep);
-
-		lba += granularity - rem;
+		lba = map_index_to_lba(index + 1);
 	}
 }
 
 static void unmap_region(sector_t lba, unsigned int len)
 {
-	unsigned int granularity, alignment;
 	sector_t end = lba + len;
 
-	granularity = scsi_debug_unmap_granularity;
-	alignment = granularity - scsi_debug_unmap_alignment;
-
 	while (lba < end) {
-		sector_t block, rem;
+		unsigned long index = lba_to_map_index(lba);
 
-		block = lba + alignment;
-		rem = do_div(block, granularity);
-
-		if (rem == 0 && lba + granularity < end && block < map_size) {
-			clear_bit(block, map_storep);
-			if (scsi_debug_lbprz)
+		if (lba == map_index_to_lba(index) &&
+		    lba + scsi_debug_unmap_granularity <= end &&
+		    index < map_size) {
+			clear_bit(index, map_storep);
+			if (scsi_debug_lbprz) {
 				memset(fake_storep +
-				       block * scsi_debug_sector_size, 0,
-				       scsi_debug_sector_size);
+				       lba * scsi_debug_sector_size, 0,
+				       scsi_debug_sector_size *
+				       scsi_debug_unmap_granularity);
+			}
 		}
-		lba += granularity - rem;
+		lba = map_index_to_lba(index + 1);
 	}
 }
 
@@ -2089,7 +2094,7 @@
 
 	write_lock_irqsave(&atomic_rw, iflags);
 	ret = do_device_access(SCpnt, devip, lba, num, 1);
-	if (scsi_debug_unmap_granularity)
+	if (scsi_debug_lbp())
 		map_region(lba, num);
 	write_unlock_irqrestore(&atomic_rw, iflags);
 	if (-1 == ret)
@@ -2122,7 +2127,7 @@
 
 	write_lock_irqsave(&atomic_rw, iflags);
 
-	if (unmap && scsi_debug_unmap_granularity) {
+	if (unmap && scsi_debug_lbp()) {
 		unmap_region(lba, num);
 		goto out;
 	}
@@ -2146,7 +2151,7 @@
 		       fake_storep + (lba * scsi_debug_sector_size),
 		       scsi_debug_sector_size);
 
-	if (scsi_debug_unmap_granularity)
+	if (scsi_debug_lbp())
 		map_region(lba, num);
 out:
 	write_unlock_irqrestore(&atomic_rw, iflags);
@@ -3389,8 +3394,6 @@
 
 	/* Logical Block Provisioning */
 	if (scsi_debug_lbp()) {
-		unsigned int map_bytes;
-
 		scsi_debug_unmap_max_blocks =
 			clamp(scsi_debug_unmap_max_blocks, 0U, 0xffffffffU);
 
@@ -3401,16 +3404,16 @@
 			clamp(scsi_debug_unmap_granularity, 1U, 0xffffffffU);
 
 		if (scsi_debug_unmap_alignment &&
-		    scsi_debug_unmap_granularity < scsi_debug_unmap_alignment) {
+		    scsi_debug_unmap_granularity <=
+		    scsi_debug_unmap_alignment) {
 			printk(KERN_ERR
-			       "%s: ERR: unmap_granularity < unmap_alignment\n",
+			       "%s: ERR: unmap_granularity <= unmap_alignment\n",
 			       __func__);
 			return -EINVAL;
 		}
 
-		map_size = (sdebug_store_sectors / scsi_debug_unmap_granularity);
-		map_bytes = map_size >> 3;
-		map_storep = vmalloc(map_bytes);
+		map_size = lba_to_map_index(sdebug_store_sectors - 1) + 1;
+		map_storep = vmalloc(BITS_TO_LONGS(map_size) * sizeof(long));
 
 		printk(KERN_INFO "scsi_debug_init: %lu provisioning blocks\n",
 		       map_size);
@@ -3421,7 +3424,7 @@
 			goto free_vm;
 		}
 
-		memset(map_storep, 0x0, map_bytes);
+		bitmap_zero(map_storep, map_size);
 
 		/* Map first 1KB for partition table */
 		if (scsi_debug_num_parts)
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index c1b05a8..f43de1e 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -25,6 +25,7 @@
 #include <linux/interrupt.h>
 #include <linux/blkdev.h>
 #include <linux/delay.h>
+#include <linux/jiffies.h>
 
 #include <scsi/scsi.h>
 #include <scsi/scsi_cmnd.h>
@@ -791,32 +792,48 @@
 	struct scsi_device *sdev = scmd->device;
 	struct Scsi_Host *shost = sdev->host;
 	DECLARE_COMPLETION_ONSTACK(done);
-	unsigned long timeleft;
+	unsigned long timeleft = timeout;
 	struct scsi_eh_save ses;
+	const unsigned long stall_for = msecs_to_jiffies(100);
 	int rtn;
 
+retry:
 	scsi_eh_prep_cmnd(scmd, &ses, cmnd, cmnd_size, sense_bytes);
 	shost->eh_action = &done;
 
 	scsi_log_send(scmd);
 	scmd->scsi_done = scsi_eh_done;
-	shost->hostt->queuecommand(shost, scmd);
-
-	timeleft = wait_for_completion_timeout(&done, timeout);
+	rtn = shost->hostt->queuecommand(shost, scmd);
+	if (rtn) {
+		if (timeleft > stall_for) {
+			scsi_eh_restore_cmnd(scmd, &ses);
+			timeleft -= stall_for;
+			msleep(jiffies_to_msecs(stall_for));
+			goto retry;
+		}
+		/* signal not to enter either branch of the if () below */
+		timeleft = 0;
+		rtn = NEEDS_RETRY;
+	} else {
+		timeleft = wait_for_completion_timeout(&done, timeout);
+	}
 
 	shost->eh_action = NULL;
 
-	scsi_log_completion(scmd, SUCCESS);
+	scsi_log_completion(scmd, rtn);
 
 	SCSI_LOG_ERROR_RECOVERY(3,
 		printk("%s: scmd: %p, timeleft: %ld\n",
 			__func__, scmd, timeleft));
 
 	/*
-	 * If there is time left scsi_eh_done got called, and we will
-	 * examine the actual status codes to see whether the command
-	 * actually did complete normally, else tell the host to forget
-	 * about this command.
+	 * If there is time left scsi_eh_done got called, and we will examine
+	 * the actual status codes to see whether the command actually did
+	 * complete normally, else if we have a zero return and no time left,
+	 * the command must still be pending, so abort it and return FAILED.
+	 * If we never actually managed to issue the command, because
+	 * ->queuecommand() kept returning non zero, use the rtn = FAILED
+	 * value above (so don't execute either branch of the if)
 	 */
 	if (timeleft) {
 		rtn = scsi_eh_completed_normally(scmd);
@@ -837,7 +854,7 @@
 			rtn = FAILED;
 			break;
 		}
-	} else {
+	} else if (!rtn) {
 		scsi_abort_eh_cmnd(scmd);
 		rtn = FAILED;
 	}
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index c31187d..86d5220 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -276,11 +276,10 @@
 }
 EXPORT_SYMBOL(scsi_execute);
 
-
-int scsi_execute_req(struct scsi_device *sdev, const unsigned char *cmd,
+int scsi_execute_req_flags(struct scsi_device *sdev, const unsigned char *cmd,
 		     int data_direction, void *buffer, unsigned bufflen,
 		     struct scsi_sense_hdr *sshdr, int timeout, int retries,
-		     int *resid)
+		     int *resid, int flags)
 {
 	char *sense = NULL;
 	int result;
@@ -291,14 +290,14 @@
 			return DRIVER_ERROR << 24;
 	}
 	result = scsi_execute(sdev, cmd, data_direction, buffer, bufflen,
-			      sense, timeout, retries, 0, resid);
+			      sense, timeout, retries, flags, resid);
 	if (sshdr)
 		scsi_normalize_sense(sense, SCSI_SENSE_BUFFERSIZE, sshdr);
 
 	kfree(sense);
 	return result;
 }
-EXPORT_SYMBOL(scsi_execute_req);
+EXPORT_SYMBOL(scsi_execute_req_flags);
 
 /*
  * Function:    scsi_init_cmd_errh()
diff --git a/drivers/scsi/scsi_pm.c b/drivers/scsi/scsi_pm.c
index 8f6b12c..42539ee 100644
--- a/drivers/scsi/scsi_pm.c
+++ b/drivers/scsi/scsi_pm.c
@@ -144,33 +144,83 @@
 
 #ifdef CONFIG_PM_RUNTIME
 
+static int sdev_blk_runtime_suspend(struct scsi_device *sdev,
+					int (*cb)(struct device *))
+{
+	int err;
+
+	err = blk_pre_runtime_suspend(sdev->request_queue);
+	if (err)
+		return err;
+	if (cb)
+		err = cb(&sdev->sdev_gendev);
+	blk_post_runtime_suspend(sdev->request_queue, err);
+
+	return err;
+}
+
+static int sdev_runtime_suspend(struct device *dev)
+{
+	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+	int (*cb)(struct device *) = pm ? pm->runtime_suspend : NULL;
+	struct scsi_device *sdev = to_scsi_device(dev);
+	int err;
+
+	if (sdev->request_queue->dev)
+		return sdev_blk_runtime_suspend(sdev, cb);
+
+	err = scsi_dev_type_suspend(dev, cb);
+	if (err == -EAGAIN)
+		pm_schedule_suspend(dev, jiffies_to_msecs(
+					round_jiffies_up_relative(HZ/10)));
+	return err;
+}
+
 static int scsi_runtime_suspend(struct device *dev)
 {
 	int err = 0;
-	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
 
 	dev_dbg(dev, "scsi_runtime_suspend\n");
-	if (scsi_is_sdev_device(dev)) {
-		err = scsi_dev_type_suspend(dev,
-				pm ? pm->runtime_suspend : NULL);
-		if (err == -EAGAIN)
-			pm_schedule_suspend(dev, jiffies_to_msecs(
-				round_jiffies_up_relative(HZ/10)));
-	}
+	if (scsi_is_sdev_device(dev))
+		err = sdev_runtime_suspend(dev);
 
 	/* Insert hooks here for targets, hosts, and transport classes */
 
 	return err;
 }
 
+static int sdev_blk_runtime_resume(struct scsi_device *sdev,
+					int (*cb)(struct device *))
+{
+	int err = 0;
+
+	blk_pre_runtime_resume(sdev->request_queue);
+	if (cb)
+		err = cb(&sdev->sdev_gendev);
+	blk_post_runtime_resume(sdev->request_queue, err);
+
+	return err;
+}
+
+static int sdev_runtime_resume(struct device *dev)
+{
+	struct scsi_device *sdev = to_scsi_device(dev);
+	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+	int (*cb)(struct device *) = pm ? pm->runtime_resume : NULL;
+
+	if (sdev->request_queue->dev)
+		return sdev_blk_runtime_resume(sdev, cb);
+	else
+		return scsi_dev_type_resume(dev, cb);
+}
+
 static int scsi_runtime_resume(struct device *dev)
 {
 	int err = 0;
-	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
 
 	dev_dbg(dev, "scsi_runtime_resume\n");
 	if (scsi_is_sdev_device(dev))
-		err = scsi_dev_type_resume(dev, pm ? pm->runtime_resume : NULL);
+		err = sdev_runtime_resume(dev);
 
 	/* Insert hooks here for targets, hosts, and transport classes */
 
@@ -185,10 +235,18 @@
 
 	/* Insert hooks here for targets, hosts, and transport classes */
 
-	if (scsi_is_sdev_device(dev))
-		err = pm_schedule_suspend(dev, 100);
-	else
+	if (scsi_is_sdev_device(dev)) {
+		struct scsi_device *sdev = to_scsi_device(dev);
+
+		if (sdev->request_queue->dev) {
+			pm_runtime_mark_last_busy(dev);
+			err = pm_runtime_autosuspend(dev);
+		} else {
+			err = pm_runtime_suspend(dev);
+		}
+	} else {
 		err = pm_runtime_suspend(dev);
+	}
 	return err;
 }
 
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index 47799a3..133926b 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -1019,8 +1019,7 @@
 /**
  * iscsi_get_flashnode_by_index -finds flashnode session entry by index
  * @shost: pointer to host data
- * @data: pointer to data containing value to use for comparison
- * @fn: function pointer that does actual comparison
+ * @idx: index to match
  *
  * Finds the flashnode session object for the passed index
  *
@@ -1029,13 +1028,13 @@
  *  %NULL on failure
  */
 static struct iscsi_bus_flash_session *
-iscsi_get_flashnode_by_index(struct Scsi_Host *shost, void *data,
-			     int (*fn)(struct device *dev, void *data))
+iscsi_get_flashnode_by_index(struct Scsi_Host *shost, uint32_t idx)
 {
 	struct iscsi_bus_flash_session *fnode_sess = NULL;
 	struct device *dev;
 
-	dev = device_find_child(&shost->shost_gendev, data, fn);
+	dev = device_find_child(&shost->shost_gendev, &idx,
+				flashnode_match_index);
 	if (dev)
 		fnode_sess = iscsi_dev_to_flash_session(dev);
 
@@ -1059,18 +1058,13 @@
 iscsi_find_flashnode_sess(struct Scsi_Host *shost, void *data,
 			  int (*fn)(struct device *dev, void *data))
 {
-	struct device *dev;
-
-	dev = device_find_child(&shost->shost_gendev, data, fn);
-	return dev;
+	return device_find_child(&shost->shost_gendev, data, fn);
 }
 EXPORT_SYMBOL_GPL(iscsi_find_flashnode_sess);
 
 /**
  * iscsi_find_flashnode_conn - finds flashnode connection entry
  * @fnode_sess: pointer to parent flashnode session entry
- * @data: pointer to data containing value to use for comparison
- * @fn: function pointer that does actual comparison
  *
  * Finds the flashnode connection object comparing the data passed using logic
  * defined in passed function pointer
@@ -1080,14 +1074,10 @@
  *  %NULL on failure
  */
 struct device *
-iscsi_find_flashnode_conn(struct iscsi_bus_flash_session *fnode_sess,
-			  void *data,
-			  int (*fn)(struct device *dev, void *data))
+iscsi_find_flashnode_conn(struct iscsi_bus_flash_session *fnode_sess)
 {
-	struct device *dev;
-
-	dev = device_find_child(&fnode_sess->dev, data, fn);
-	return dev;
+	return device_find_child(&fnode_sess->dev, NULL,
+				 iscsi_is_flashnode_conn_dev);
 }
 EXPORT_SYMBOL_GPL(iscsi_find_flashnode_conn);
 
@@ -2808,7 +2798,7 @@
 	struct iscsi_bus_flash_session *fnode_sess;
 	struct iscsi_bus_flash_conn *fnode_conn;
 	struct device *dev;
-	uint32_t *idx;
+	uint32_t idx;
 	int err = 0;
 
 	if (!transport->set_flashnode_param) {
@@ -2824,25 +2814,27 @@
 		goto put_host;
 	}
 
-	idx = &ev->u.set_flashnode.flashnode_idx;
-	fnode_sess = iscsi_get_flashnode_by_index(shost, idx,
-						  flashnode_match_index);
+	idx = ev->u.set_flashnode.flashnode_idx;
+	fnode_sess = iscsi_get_flashnode_by_index(shost, idx);
 	if (!fnode_sess) {
 		pr_err("%s could not find flashnode %u for host no %u\n",
-		       __func__, *idx, ev->u.set_flashnode.host_no);
+		       __func__, idx, ev->u.set_flashnode.host_no);
 		err = -ENODEV;
 		goto put_host;
 	}
 
-	dev = iscsi_find_flashnode_conn(fnode_sess, NULL,
-					iscsi_is_flashnode_conn_dev);
+	dev = iscsi_find_flashnode_conn(fnode_sess);
 	if (!dev) {
 		err = -ENODEV;
-		goto put_host;
+		goto put_sess;
 	}
 
 	fnode_conn = iscsi_dev_to_flash_conn(dev);
 	err = transport->set_flashnode_param(fnode_sess, fnode_conn, data, len);
+	put_device(dev);
+
+put_sess:
+	put_device(&fnode_sess->dev);
 
 put_host:
 	scsi_host_put(shost);
@@ -2891,7 +2883,7 @@
 {
 	struct Scsi_Host *shost;
 	struct iscsi_bus_flash_session *fnode_sess;
-	uint32_t *idx;
+	uint32_t idx;
 	int err = 0;
 
 	if (!transport->del_flashnode) {
@@ -2907,17 +2899,17 @@
 		goto put_host;
 	}
 
-	idx = &ev->u.del_flashnode.flashnode_idx;
-	fnode_sess = iscsi_get_flashnode_by_index(shost, idx,
-						  flashnode_match_index);
+	idx = ev->u.del_flashnode.flashnode_idx;
+	fnode_sess = iscsi_get_flashnode_by_index(shost, idx);
 	if (!fnode_sess) {
 		pr_err("%s could not find flashnode %u for host no %u\n",
-		       __func__, *idx, ev->u.del_flashnode.host_no);
+		       __func__, idx, ev->u.del_flashnode.host_no);
 		err = -ENODEV;
 		goto put_host;
 	}
 
 	err = transport->del_flashnode(fnode_sess);
+	put_device(&fnode_sess->dev);
 
 put_host:
 	scsi_host_put(shost);
@@ -2933,7 +2925,7 @@
 	struct iscsi_bus_flash_session *fnode_sess;
 	struct iscsi_bus_flash_conn *fnode_conn;
 	struct device *dev;
-	uint32_t *idx;
+	uint32_t idx;
 	int err = 0;
 
 	if (!transport->login_flashnode) {
@@ -2949,25 +2941,27 @@
 		goto put_host;
 	}
 
-	idx = &ev->u.login_flashnode.flashnode_idx;
-	fnode_sess = iscsi_get_flashnode_by_index(shost, idx,
-						  flashnode_match_index);
+	idx = ev->u.login_flashnode.flashnode_idx;
+	fnode_sess = iscsi_get_flashnode_by_index(shost, idx);
 	if (!fnode_sess) {
 		pr_err("%s could not find flashnode %u for host no %u\n",
-		       __func__, *idx, ev->u.login_flashnode.host_no);
+		       __func__, idx, ev->u.login_flashnode.host_no);
 		err = -ENODEV;
 		goto put_host;
 	}
 
-	dev = iscsi_find_flashnode_conn(fnode_sess, NULL,
-					iscsi_is_flashnode_conn_dev);
+	dev = iscsi_find_flashnode_conn(fnode_sess);
 	if (!dev) {
 		err = -ENODEV;
-		goto put_host;
+		goto put_sess;
 	}
 
 	fnode_conn = iscsi_dev_to_flash_conn(dev);
 	err = transport->login_flashnode(fnode_sess, fnode_conn);
+	put_device(dev);
+
+put_sess:
+	put_device(&fnode_sess->dev);
 
 put_host:
 	scsi_host_put(shost);
@@ -2983,7 +2977,7 @@
 	struct iscsi_bus_flash_session *fnode_sess;
 	struct iscsi_bus_flash_conn *fnode_conn;
 	struct device *dev;
-	uint32_t *idx;
+	uint32_t idx;
 	int err = 0;
 
 	if (!transport->logout_flashnode) {
@@ -2999,26 +2993,28 @@
 		goto put_host;
 	}
 
-	idx = &ev->u.logout_flashnode.flashnode_idx;
-	fnode_sess = iscsi_get_flashnode_by_index(shost, idx,
-						  flashnode_match_index);
+	idx = ev->u.logout_flashnode.flashnode_idx;
+	fnode_sess = iscsi_get_flashnode_by_index(shost, idx);
 	if (!fnode_sess) {
 		pr_err("%s could not find flashnode %u for host no %u\n",
-		       __func__, *idx, ev->u.logout_flashnode.host_no);
+		       __func__, idx, ev->u.logout_flashnode.host_no);
 		err = -ENODEV;
 		goto put_host;
 	}
 
-	dev = iscsi_find_flashnode_conn(fnode_sess, NULL,
-					iscsi_is_flashnode_conn_dev);
+	dev = iscsi_find_flashnode_conn(fnode_sess);
 	if (!dev) {
 		err = -ENODEV;
-		goto put_host;
+		goto put_sess;
 	}
 
 	fnode_conn = iscsi_dev_to_flash_conn(dev);
 
 	err = transport->logout_flashnode(fnode_sess, fnode_conn);
+	put_device(dev);
+
+put_sess:
+	put_device(&fnode_sess->dev);
 
 put_host:
 	scsi_host_put(shost);
@@ -3985,8 +3981,10 @@
 	}
 
 	iscsi_eh_timer_workq = create_singlethread_workqueue("iscsi_eh");
-	if (!iscsi_eh_timer_workq)
+	if (!iscsi_eh_timer_workq) {
+		err = -ENOMEM;
 		goto release_nls;
+	}
 
 	return 0;
 
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 7992635..c1c5552 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -142,6 +142,7 @@
 	char *buffer_data;
 	struct scsi_mode_data data;
 	struct scsi_sense_hdr sshdr;
+	const char *temp = "temporary ";
 	int len;
 
 	if (sdp->type != TYPE_DISK)
@@ -150,6 +151,13 @@
 		 * it's not worth the risk */
 		return -EINVAL;
 
+	if (strncmp(buf, temp, sizeof(temp) - 1) == 0) {
+		buf += sizeof(temp) - 1;
+		sdkp->cache_override = 1;
+	} else {
+		sdkp->cache_override = 0;
+	}
+
 	for (i = 0; i < ARRAY_SIZE(sd_cache_types); i++) {
 		len = strlen(sd_cache_types[i]);
 		if (strncmp(sd_cache_types[i], buf, len) == 0 &&
@@ -162,6 +170,13 @@
 		return -EINVAL;
 	rcd = ct & 0x01 ? 1 : 0;
 	wce = ct & 0x02 ? 1 : 0;
+
+	if (sdkp->cache_override) {
+		sdkp->WCE = wce;
+		sdkp->RCD = rcd;
+		return count;
+	}
+
 	if (scsi_mode_sense(sdp, 0x08, 8, buffer, sizeof(buffer), SD_TIMEOUT,
 			    SD_MAX_RETRIES, &data, NULL))
 		return -EINVAL;
@@ -1121,10 +1136,6 @@
 
 	sdev = sdkp->device;
 
-	retval = scsi_autopm_get_device(sdev);
-	if (retval)
-		goto error_autopm;
-
 	/*
 	 * If the device is in error recovery, wait until it is done.
 	 * If the device is offline, then disallow any access to it.
@@ -1169,8 +1180,6 @@
 	return 0;
 
 error_out:
-	scsi_autopm_put_device(sdev);
-error_autopm:
 	scsi_disk_put(sdkp);
 	return retval;	
 }
@@ -1188,7 +1197,7 @@
  *
  *	Locking: called with bdev->bd_mutex held.
  **/
-static int sd_release(struct gendisk *disk, fmode_t mode)
+static void sd_release(struct gendisk *disk, fmode_t mode)
 {
 	struct scsi_disk *sdkp = scsi_disk(disk);
 	struct scsi_device *sdev = sdkp->device;
@@ -1205,9 +1214,7 @@
 	 * XXX is followed by a "rmmod sd_mod"?
 	 */
 
-	scsi_autopm_put_device(sdev);
 	scsi_disk_put(sdkp);
-	return 0;
 }
 
 static int sd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
@@ -1367,14 +1374,9 @@
 	retval = -ENODEV;
 
 	if (scsi_block_when_processing_errors(sdp)) {
-		retval = scsi_autopm_get_device(sdp);
-		if (retval)
-			goto out;
-
 		sshdr  = kzalloc(sizeof(*sshdr), GFP_KERNEL);
 		retval = scsi_test_unit_ready(sdp, SD_TIMEOUT, SD_MAX_RETRIES,
 					      sshdr);
-		scsi_autopm_put_device(sdp);
 	}
 
 	/* failed to execute TUR, assume media not present */
@@ -1424,8 +1426,9 @@
 		 * Leave the rest of the command zero to indicate
 		 * flush everything.
 		 */
-		res = scsi_execute_req(sdp, cmd, DMA_NONE, NULL, 0, &sshdr,
-				       SD_FLUSH_TIMEOUT, SD_MAX_RETRIES, NULL);
+		res = scsi_execute_req_flags(sdp, cmd, DMA_NONE, NULL, 0,
+					     &sshdr, SD_FLUSH_TIMEOUT,
+					     SD_MAX_RETRIES, NULL, REQ_PM);
 		if (res == 0)
 			break;
 	}
@@ -2319,6 +2322,10 @@
 	int old_rcd = sdkp->RCD;
 	int old_dpofua = sdkp->DPOFUA;
 
+
+	if (sdkp->cache_override)
+		return;
+
 	first_len = 4;
 	if (sdp->skip_ms_page_8) {
 		if (sdp->type == TYPE_RBC)
@@ -2812,6 +2819,7 @@
 	sdkp->capacity = 0;
 	sdkp->media_present = 1;
 	sdkp->write_prot = 0;
+	sdkp->cache_override = 0;
 	sdkp->WCE = 0;
 	sdkp->RCD = 0;
 	sdkp->ATO = 0;
@@ -2838,6 +2846,7 @@
 
 	sd_printk(KERN_NOTICE, sdkp, "Attached SCSI %sdisk\n",
 		  sdp->removable ? "removable " : "");
+	blk_pm_runtime_init(sdp->request_queue, dev);
 	scsi_autopm_put_device(sdp);
 	put_device(&sdkp->dev);
 }
@@ -3021,8 +3030,8 @@
 	if (!scsi_device_online(sdp))
 		return -ENODEV;
 
-	res = scsi_execute_req(sdp, cmd, DMA_NONE, NULL, 0, &sshdr,
-			       SD_TIMEOUT, SD_MAX_RETRIES, NULL);
+	res = scsi_execute_req_flags(sdp, cmd, DMA_NONE, NULL, 0, &sshdr,
+			       SD_TIMEOUT, SD_MAX_RETRIES, NULL, REQ_PM);
 	if (res) {
 		sd_printk(KERN_WARNING, sdkp, "START_STOP FAILED\n");
 		sd_print_result(sdkp, res);
diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h
index 74a1e4c..2386aeb 100644
--- a/drivers/scsi/sd.h
+++ b/drivers/scsi/sd.h
@@ -73,6 +73,7 @@
 	u8		protection_type;/* Data Integrity Field */
 	u8		provisioning_mode;
 	unsigned	ATO : 1;	/* state of disk ATO bit */
+	unsigned	cache_override : 1; /* temp override of WCE,RCD */
 	unsigned	WCE : 1;	/* state of disk WCE bit */
 	unsigned	RCD : 1;	/* state of disk RCD bit, unused */
 	unsigned	DPOFUA : 1;	/* state of disk DPOFUA bit */
diff --git a/drivers/scsi/sd_dif.c b/drivers/scsi/sd_dif.c
index 04998f3..6174ca4 100644
--- a/drivers/scsi/sd_dif.c
+++ b/drivers/scsi/sd_dif.c
@@ -93,14 +93,6 @@
 		if (sdt->app_tag == 0xffff)
 			return 0;
 
-		/* Bad ref tag received from disk */
-		if (sdt->ref_tag == 0xffffffff) {
-			printk(KERN_ERR
-			       "%s: bad phys ref tag on sector %lu\n",
-			       bix->disk_name, (unsigned long)sector);
-			return -EIO;
-		}
-
 		if (be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) {
 			printk(KERN_ERR
 			       "%s: ref tag error on sector %lu (rcvd %u)\n",
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 9f0c465..df5e961 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -35,6 +35,7 @@
 #include <linux/sched.h>
 #include <linux/string.h>
 #include <linux/mm.h>
+#include <linux/aio.h>
 #include <linux/errno.h>
 #include <linux/mtio.h>
 #include <linux/ioctl.h>
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
index f2884ee..119d67f 100644
--- a/drivers/scsi/sr.c
+++ b/drivers/scsi/sr.c
@@ -541,14 +541,13 @@
 	return ret;
 }
 
-static int sr_block_release(struct gendisk *disk, fmode_t mode)
+static void sr_block_release(struct gendisk *disk, fmode_t mode)
 {
 	struct scsi_cd *cd = scsi_cd(disk);
 	mutex_lock(&sr_mutex);
 	cdrom_release(&cd->cdi, mode);
 	scsi_cd_put(cd);
 	mutex_unlock(&sr_mutex);
-	return 0;
 }
 
 static int sr_block_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
diff --git a/drivers/scsi/ufs/Kconfig b/drivers/scsi/ufs/Kconfig
index 0371047..35faf24 100644
--- a/drivers/scsi/ufs/Kconfig
+++ b/drivers/scsi/ufs/Kconfig
@@ -57,3 +57,14 @@
 	  If you have a controller with this interface, say Y or M here.
 
 	  If unsure, say N.
+
+config SCSI_UFSHCD_PLATFORM
+	tristate "Platform bus based UFS Controller support"
+	depends on SCSI_UFSHCD
+	---help---
+	This selects the UFS host controller support. Select this if
+	you have an UFS controller on Platform bus.
+
+	If you have a controller with this interface, say Y or M here.
+
+	  If unsure, say N.
diff --git a/drivers/scsi/ufs/Makefile b/drivers/scsi/ufs/Makefile
index 9eda0df..1e5bd48 100644
--- a/drivers/scsi/ufs/Makefile
+++ b/drivers/scsi/ufs/Makefile
@@ -1,3 +1,4 @@
 # UFSHCD makefile
 obj-$(CONFIG_SCSI_UFSHCD) += ufshcd.o
 obj-$(CONFIG_SCSI_UFSHCD_PCI) += ufshcd-pci.o
+obj-$(CONFIG_SCSI_UFSHCD_PLATFORM) += ufshcd-pltfrm.o
diff --git a/drivers/scsi/ufs/ufshcd-pltfrm.c b/drivers/scsi/ufs/ufshcd-pltfrm.c
new file mode 100644
index 0000000..03319ac
--- /dev/null
+++ b/drivers/scsi/ufs/ufshcd-pltfrm.c
@@ -0,0 +1,217 @@
+/*
+ * Universal Flash Storage Host controller Platform bus based glue driver
+ *
+ * This code is based on drivers/scsi/ufs/ufshcd-pltfrm.c
+ * Copyright (C) 2011-2013 Samsung India Software Operations
+ *
+ * Authors:
+ *	Santosh Yaraganavi <santosh.sy@samsung.com>
+ *	Vinayak Holikatti <h.vinayak@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ * See the COPYING file in the top-level directory or visit
+ * <http://www.gnu.org/licenses/gpl-2.0.html>
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * This program is provided "AS IS" and "WITH ALL FAULTS" and
+ * without warranty of any kind. You are solely responsible for
+ * determining the appropriateness of using and distributing
+ * the program and assume all risks associated with your exercise
+ * of rights with respect to the program, including but not limited
+ * to infringement of third party rights, the risks and costs of
+ * program errors, damage to or loss of data, programs or equipment,
+ * and unavailability or interruption of operations. Under no
+ * circumstances will the contributor of this Program be liable for
+ * any damages of any kind arising from your use or distribution of
+ * this program.
+ */
+
+#include "ufshcd.h"
+#include <linux/platform_device.h>
+
+#ifdef CONFIG_PM
+/**
+ * ufshcd_pltfrm_suspend - suspend power management function
+ * @dev: pointer to device handle
+ *
+ *
+ * Returns 0
+ */
+static int ufshcd_pltfrm_suspend(struct device *dev)
+{
+	struct platform_device *pdev = to_platform_device(dev);
+	struct ufs_hba *hba =  platform_get_drvdata(pdev);
+
+	/*
+	 * TODO:
+	 * 1. Call ufshcd_suspend
+	 * 2. Do bus specific power management
+	 */
+
+	disable_irq(hba->irq);
+
+	return 0;
+}
+
+/**
+ * ufshcd_pltfrm_resume - resume power management function
+ * @dev: pointer to device handle
+ *
+ * Returns 0
+ */
+static int ufshcd_pltfrm_resume(struct device *dev)
+{
+	struct platform_device *pdev = to_platform_device(dev);
+	struct ufs_hba *hba =  platform_get_drvdata(pdev);
+
+	/*
+	 * TODO:
+	 * 1. Call ufshcd_resume.
+	 * 2. Do bus specific wake up
+	 */
+
+	enable_irq(hba->irq);
+
+	return 0;
+}
+#else
+#define ufshcd_pltfrm_suspend	NULL
+#define ufshcd_pltfrm_resume	NULL
+#endif
+
+/**
+ * ufshcd_pltfrm_probe - probe routine of the driver
+ * @pdev: pointer to Platform device handle
+ *
+ * Returns 0 on success, non-zero value on failure
+ */
+static int ufshcd_pltfrm_probe(struct platform_device *pdev)
+{
+	struct ufs_hba *hba;
+	void __iomem *mmio_base;
+	struct resource *mem_res;
+	struct resource *irq_res;
+	resource_size_t mem_size;
+	int err;
+	struct device *dev = &pdev->dev;
+
+	mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!mem_res) {
+		dev_err(&pdev->dev,
+			"Memory resource not available\n");
+		err = -ENODEV;
+		goto out_error;
+	}
+
+	mem_size = resource_size(mem_res);
+	if (!request_mem_region(mem_res->start, mem_size, "ufshcd")) {
+		dev_err(&pdev->dev,
+			"Cannot reserve the memory resource\n");
+		err = -EBUSY;
+		goto out_error;
+	}
+
+	mmio_base = ioremap_nocache(mem_res->start, mem_size);
+	if (!mmio_base) {
+		dev_err(&pdev->dev, "memory map failed\n");
+		err = -ENOMEM;
+		goto out_release_regions;
+	}
+
+	irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+	if (!irq_res) {
+		dev_err(&pdev->dev, "IRQ resource not available\n");
+		err = -ENODEV;
+		goto out_iounmap;
+	}
+
+	err = dma_set_coherent_mask(dev, dev->coherent_dma_mask);
+	if (err) {
+		dev_err(&pdev->dev, "set dma mask failed\n");
+		goto out_iounmap;
+	}
+
+	err = ufshcd_init(&pdev->dev, &hba, mmio_base, irq_res->start);
+	if (err) {
+		dev_err(&pdev->dev, "Intialization failed\n");
+		goto out_iounmap;
+	}
+
+	platform_set_drvdata(pdev, hba);
+
+	return 0;
+
+out_iounmap:
+	iounmap(mmio_base);
+out_release_regions:
+	release_mem_region(mem_res->start, mem_size);
+out_error:
+	return err;
+}
+
+/**
+ * ufshcd_pltfrm_remove - remove platform driver routine
+ * @pdev: pointer to platform device handle
+ *
+ * Returns 0 on success, non-zero value on failure
+ */
+static int ufshcd_pltfrm_remove(struct platform_device *pdev)
+{
+	struct resource *mem_res;
+	resource_size_t mem_size;
+	struct ufs_hba *hba =  platform_get_drvdata(pdev);
+
+	disable_irq(hba->irq);
+
+	/* Some buggy controllers raise interrupt after
+	 * the resources are removed. So first we unregister the
+	 * irq handler and then the resources used by driver
+	 */
+
+	free_irq(hba->irq, hba);
+	ufshcd_remove(hba);
+	mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!mem_res)
+		dev_err(&pdev->dev, "ufshcd: Memory resource not available\n");
+	else {
+		mem_size = resource_size(mem_res);
+		release_mem_region(mem_res->start, mem_size);
+	}
+	platform_set_drvdata(pdev, NULL);
+	return 0;
+}
+
+static const struct of_device_id ufs_of_match[] = {
+	{ .compatible = "jedec,ufs-1.1"},
+};
+
+static const struct dev_pm_ops ufshcd_dev_pm_ops = {
+	.suspend	= ufshcd_pltfrm_suspend,
+	.resume		= ufshcd_pltfrm_resume,
+};
+
+static struct platform_driver ufshcd_pltfrm_driver = {
+	.probe	= ufshcd_pltfrm_probe,
+	.remove	= ufshcd_pltfrm_remove,
+	.driver	= {
+		.name	= "ufshcd",
+		.owner	= THIS_MODULE,
+		.pm	= &ufshcd_dev_pm_ops,
+		.of_match_table = ufs_of_match,
+	},
+};
+
+module_platform_driver(ufshcd_pltfrm_driver);
+
+MODULE_AUTHOR("Santosh Yaragnavi <santosh.sy@samsung.com>");
+MODULE_AUTHOR("Vinayak Holikatti <h.vinayak@samsung.com>");
+MODULE_DESCRIPTION("UFS host controller Pltform bus based glue driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(UFSHCD_DRIVER_VERSION);
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 60fd40c..c32a478 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -478,7 +478,7 @@
 		ucd_cmd_ptr->header.dword_2 = 0;
 
 		ucd_cmd_ptr->exp_data_transfer_len =
-			cpu_to_be32(lrbp->cmd->transfersize);
+			cpu_to_be32(lrbp->cmd->sdb.length);
 
 		memcpy(ucd_cmd_ptr->cdb,
 		       lrbp->cmd->cmnd,
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index 141d8c10..92a9345 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -62,7 +62,7 @@
 
 config SPI_ATH79
 	tristate "Atheros AR71XX/AR724X/AR913X SPI controller driver"
-	depends on ATH79 && GENERIC_GPIO
+	depends on ATH79 && GPIOLIB
 	select SPI_BITBANG
 	help
 	  This enables support for the SPI controller present on the
@@ -175,7 +175,7 @@
 
 config SPI_GPIO
 	tristate "GPIO-based bitbanging SPI Master"
-	depends on GENERIC_GPIO
+	depends on GPIOLIB
 	select SPI_BITBANG
 	help
 	  This simple GPIO bitbanging SPI master uses the arch-neutral GPIO
@@ -259,7 +259,7 @@
 
 config SPI_OC_TINY
 	tristate "OpenCores tiny SPI"
-	depends on GENERIC_GPIO
+	depends on GPIOLIB
 	select SPI_BITBANG
 	help
 	  This is the driver for OpenCores tiny SPI master controller.
@@ -457,7 +457,7 @@
 
 config SPI_TXX9
 	tristate "Toshiba TXx9 SPI controller"
-	depends on GENERIC_GPIO && CPU_TX49XX
+	depends on GPIOLIB && CPU_TX49XX
 	help
 	  SPI driver for Toshiba TXx9 MIPS SoCs
 
diff --git a/drivers/ssb/driver_mipscore.c b/drivers/ssb/driver_mipscore.c
index fa385a3..0907706 100644
--- a/drivers/ssb/driver_mipscore.c
+++ b/drivers/ssb/driver_mipscore.c
@@ -18,7 +18,7 @@
 
 #include "ssb_private.h"
 
-static const char *part_probes[] = { "bcm47xxpart", NULL };
+static const char * const part_probes[] = { "bcm47xxpart", NULL };
 
 static struct physmap_flash_data ssb_pflash_data = {
 	.part_probe_types	= part_probes,
diff --git a/drivers/staging/android/Kconfig b/drivers/staging/android/Kconfig
index 9f61d46..c0c95be 100644
--- a/drivers/staging/android/Kconfig
+++ b/drivers/staging/android/Kconfig
@@ -54,7 +54,7 @@
 
 config ANDROID_TIMED_GPIO
 	tristate "Android timed gpio driver"
-	depends on GENERIC_GPIO && ANDROID_TIMED_OUTPUT
+	depends on GPIOLIB && ANDROID_TIMED_OUTPUT
 	default n
 
 config ANDROID_LOW_MEMORY_KILLER
diff --git a/drivers/staging/android/logger.c b/drivers/staging/android/logger.c
index b14a557..b040200 100644
--- a/drivers/staging/android/logger.c
+++ b/drivers/staging/android/logger.c
@@ -28,6 +28,7 @@
 #include <linux/slab.h>
 #include <linux/time.h>
 #include <linux/vmalloc.h>
+#include <linux/aio.h>
 #include "logger.h"
 
 #include <asm/ioctls.h>
diff --git a/drivers/staging/iio/accel/Kconfig b/drivers/staging/iio/accel/Kconfig
index e2e786d..ad45dfb 100644
--- a/drivers/staging/iio/accel/Kconfig
+++ b/drivers/staging/iio/accel/Kconfig
@@ -61,7 +61,7 @@
 	depends on SPI
 	select IIO_TRIGGER if IIO_BUFFER
 	depends on !IIO_BUFFER || IIO_KFIFO_BUF
-	depends on GENERIC_GPIO
+	depends on GPIOLIB
 	help
 	  Say yes here to build SPI support for the ST microelectronics
 	  accelerometer. The driver supplies direct access via sysfs files
diff --git a/drivers/staging/iio/adc/Kconfig b/drivers/staging/iio/adc/Kconfig
index d990829..cabc7a3 100644
--- a/drivers/staging/iio/adc/Kconfig
+++ b/drivers/staging/iio/adc/Kconfig
@@ -73,7 +73,7 @@
 config AD7816
 	tristate "Analog Devices AD7816/7/8 temperature sensor and ADC driver"
 	depends on SPI
-	depends on GENERIC_GPIO
+	depends on GPIOLIB
 	help
 	  Say yes here to build support for Analog Devices AD7816/7/8
 	  temperature sensors and ADC.
diff --git a/drivers/staging/iio/addac/Kconfig b/drivers/staging/iio/addac/Kconfig
index 698a897..e6795e0 100644
--- a/drivers/staging/iio/addac/Kconfig
+++ b/drivers/staging/iio/addac/Kconfig
@@ -5,7 +5,7 @@
 
 config ADT7316
 	tristate "Analog Devices ADT7316/7/8 ADT7516/7/9 temperature sensor, ADC and DAC driver"
-	depends on GENERIC_GPIO
+	depends on GPIOLIB
 	help
 	  Say yes here to build support for Analog Devices ADT7316, ADT7317, ADT7318
 	  and ADT7516, ADT7517, ADT7519 temperature sensors, ADC and DAC.
diff --git a/drivers/staging/iio/resolver/Kconfig b/drivers/staging/iio/resolver/Kconfig
index 49f69ef..ce360f1 100644
--- a/drivers/staging/iio/resolver/Kconfig
+++ b/drivers/staging/iio/resolver/Kconfig
@@ -13,7 +13,7 @@
 config AD2S1200
 	tristate "Analog Devices ad2s1200/ad2s1205 driver"
 	depends on SPI
-	depends on GENERIC_GPIO
+	depends on GPIOLIB
 	help
 	  Say yes here to build support for Analog Devices spi resolver
 	  to digital converters, ad2s1200 and ad2s1205, provides direct access
@@ -22,7 +22,7 @@
 config AD2S1210
 	tristate "Analog Devices ad2s1210 driver"
 	depends on SPI
-	depends on GENERIC_GPIO
+	depends on GPIOLIB
 	help
 	  Say yes here to build support for Analog Devices spi resolver
 	  to digital converters, ad2s1210, provides direct access via sysfs.
diff --git a/drivers/staging/iio/trigger/Kconfig b/drivers/staging/iio/trigger/Kconfig
index d44d3ad..1a051da 100644
--- a/drivers/staging/iio/trigger/Kconfig
+++ b/drivers/staging/iio/trigger/Kconfig
@@ -14,7 +14,7 @@
 
 config IIO_GPIO_TRIGGER
 	tristate "GPIO trigger"
-	depends on GENERIC_GPIO
+	depends on GPIOLIB
 	help
 	  Provides support for using GPIO pins as IIO triggers.
 
diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig
index a764f16..5e3c025 100644
--- a/drivers/thermal/Kconfig
+++ b/drivers/thermal/Kconfig
@@ -67,15 +67,16 @@
 	  Enable this to let the user space manage the platform thermals.
 
 config CPU_THERMAL
-	tristate "generic cpu cooling support"
+	bool "generic cpu cooling support"
 	depends on CPU_FREQ
 	select CPU_FREQ_TABLE
 	help
 	  This implements the generic cpu cooling mechanism through frequency
-	  reduction, cpu hotplug and any other ways of reducing temperature. An
-	  ACPI version of this already exists(drivers/acpi/processor_thermal.c).
+	  reduction. An ACPI version of this already exists
+	  (drivers/acpi/processor_thermal.c).
 	  This will be useful for platforms using the generic thermal interface
 	  and not the ACPI interface.
+
 	  If you want this support, you should say Y here.
 
 config THERMAL_EMULATION
@@ -86,6 +87,10 @@
 	  user can manually input temperature and test the different trip
 	  threshold behaviour for simulation purpose.
 
+	  WARNING: Be careful while enabling this option on production systems,
+	  because userland can easily disable the thermal policy by simply
+	  flooding this sysfs node with low temperature values.
+
 config SPEAR_THERMAL
 	bool "SPEAr thermal sensor driver"
 	depends on PLAT_SPEAR
@@ -117,15 +122,6 @@
 	  If you say yes here you get support for TMU (Thermal Management
 	  Unit) on SAMSUNG EXYNOS series of SoC.
 
-config EXYNOS_THERMAL_EMUL
-	bool "EXYNOS TMU emulation mode support"
-	depends on EXYNOS_THERMAL
-	help
-	  Exynos 4412 and 4414 and 5 series has emulation mode on TMU.
-	  Enable this option will be make sysfs node in exynos thermal platform
-	  device directory to support emulation mode. With emulation mode sysfs
-	  node, you can manually input temperature to TMU for simulation purpose.
-
 config DOVE_THERMAL
 	tristate "Temperature sensor on Marvell Dove SoCs"
 	depends on ARCH_DOVE
@@ -144,6 +140,14 @@
 	  created. Cooling devices can be bound to the trip points to cool this
 	  thermal zone if trip points reached.
 
+config ARMADA_THERMAL
+	tristate "Armada 370/XP thermal management"
+	depends on ARCH_MVEBU
+	depends on OF
+	help
+	  Enable this option if you want to have support for thermal management
+	  controller present in Armada 370 and Armada XP SoC.
+
 config DB8500_CPUFREQ_COOLING
 	tristate "DB8500 cpufreq cooling"
 	depends on ARCH_U8500
diff --git a/drivers/thermal/Makefile b/drivers/thermal/Makefile
index d3a2b38c..c054d41 100644
--- a/drivers/thermal/Makefile
+++ b/drivers/thermal/Makefile
@@ -3,14 +3,15 @@
 #
 
 obj-$(CONFIG_THERMAL)		+= thermal_sys.o
+thermal_sys-y			+= thermal_core.o
 
 # governors
-obj-$(CONFIG_THERMAL_GOV_FAIR_SHARE)	+= fair_share.o
-obj-$(CONFIG_THERMAL_GOV_STEP_WISE)	+= step_wise.o
-obj-$(CONFIG_THERMAL_GOV_USER_SPACE)	+= user_space.o
+thermal_sys-$(CONFIG_THERMAL_GOV_FAIR_SHARE)	+= fair_share.o
+thermal_sys-$(CONFIG_THERMAL_GOV_STEP_WISE)	+= step_wise.o
+thermal_sys-$(CONFIG_THERMAL_GOV_USER_SPACE)	+= user_space.o
 
 # cpufreq cooling
-obj-$(CONFIG_CPU_THERMAL)	+= cpu_cooling.o
+thermal_sys-$(CONFIG_CPU_THERMAL)	+= cpu_cooling.o
 
 # platform thermal drivers
 obj-$(CONFIG_SPEAR_THERMAL)	+= spear_thermal.o
@@ -19,6 +20,7 @@
 obj-$(CONFIG_EXYNOS_THERMAL)	+= exynos_thermal.o
 obj-$(CONFIG_DOVE_THERMAL)  	+= dove_thermal.o
 obj-$(CONFIG_DB8500_THERMAL)	+= db8500_thermal.o
+obj-$(CONFIG_ARMADA_THERMAL)	+= armada_thermal.o
 obj-$(CONFIG_DB8500_CPUFREQ_COOLING)	+= db8500_cpufreq_cooling.o
 obj-$(CONFIG_INTEL_POWERCLAMP)	+= intel_powerclamp.o
 
diff --git a/drivers/thermal/armada_thermal.c b/drivers/thermal/armada_thermal.c
new file mode 100644
index 0000000..5b4d75f
--- /dev/null
+++ b/drivers/thermal/armada_thermal.c
@@ -0,0 +1,232 @@
+/*
+ * Marvell Armada 370/XP thermal sensor driver
+ *
+ * Copyright (C) 2013 Marvell
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+#include <linux/of_device.h>
+#include <linux/thermal.h>
+
+#define THERMAL_VALID_OFFSET		9
+#define THERMAL_VALID_MASK		0x1
+#define THERMAL_TEMP_OFFSET		10
+#define THERMAL_TEMP_MASK		0x1ff
+
+/* Thermal Manager Control and Status Register */
+#define PMU_TDC0_SW_RST_MASK		(0x1 << 1)
+#define PMU_TM_DISABLE_OFFS		0
+#define PMU_TM_DISABLE_MASK		(0x1 << PMU_TM_DISABLE_OFFS)
+#define PMU_TDC0_REF_CAL_CNT_OFFS	11
+#define PMU_TDC0_REF_CAL_CNT_MASK	(0x1ff << PMU_TDC0_REF_CAL_CNT_OFFS)
+#define PMU_TDC0_OTF_CAL_MASK		(0x1 << 30)
+#define PMU_TDC0_START_CAL_MASK		(0x1 << 25)
+
+struct armada_thermal_ops;
+
+/* Marvell EBU Thermal Sensor Dev Structure */
+struct armada_thermal_priv {
+	void __iomem *sensor;
+	void __iomem *control;
+	struct armada_thermal_ops *ops;
+};
+
+struct armada_thermal_ops {
+	/* Initialize the sensor */
+	void (*init_sensor)(struct armada_thermal_priv *);
+
+	/* Test for a valid sensor value (optional) */
+	bool (*is_valid)(struct armada_thermal_priv *);
+};
+
+static void armadaxp_init_sensor(struct armada_thermal_priv *priv)
+{
+	unsigned long reg;
+
+	reg = readl_relaxed(priv->control);
+	reg |= PMU_TDC0_OTF_CAL_MASK;
+	writel(reg, priv->control);
+
+	/* Reference calibration value */
+	reg &= ~PMU_TDC0_REF_CAL_CNT_MASK;
+	reg |= (0xf1 << PMU_TDC0_REF_CAL_CNT_OFFS);
+	writel(reg, priv->control);
+
+	/* Reset the sensor */
+	reg = readl_relaxed(priv->control);
+	writel((reg | PMU_TDC0_SW_RST_MASK), priv->control);
+
+	writel(reg, priv->control);
+
+	/* Enable the sensor */
+	reg = readl_relaxed(priv->sensor);
+	reg &= ~PMU_TM_DISABLE_MASK;
+	writel(reg, priv->sensor);
+}
+
+static void armada370_init_sensor(struct armada_thermal_priv *priv)
+{
+	unsigned long reg;
+
+	reg = readl_relaxed(priv->control);
+	reg |= PMU_TDC0_OTF_CAL_MASK;
+	writel(reg, priv->control);
+
+	/* Reference calibration value */
+	reg &= ~PMU_TDC0_REF_CAL_CNT_MASK;
+	reg |= (0xf1 << PMU_TDC0_REF_CAL_CNT_OFFS);
+	writel(reg, priv->control);
+
+	reg &= ~PMU_TDC0_START_CAL_MASK;
+	writel(reg, priv->control);
+
+	mdelay(10);
+}
+
+static bool armada_is_valid(struct armada_thermal_priv *priv)
+{
+	unsigned long reg = readl_relaxed(priv->sensor);
+
+	return (reg >> THERMAL_VALID_OFFSET) & THERMAL_VALID_MASK;
+}
+
+static int armada_get_temp(struct thermal_zone_device *thermal,
+			  unsigned long *temp)
+{
+	struct armada_thermal_priv *priv = thermal->devdata;
+	unsigned long reg;
+
+	/* Valid check */
+	if (priv->ops->is_valid && !priv->ops->is_valid(priv)) {
+		dev_err(&thermal->device,
+			"Temperature sensor reading not valid\n");
+		return -EIO;
+	}
+
+	reg = readl_relaxed(priv->sensor);
+	reg = (reg >> THERMAL_TEMP_OFFSET) & THERMAL_TEMP_MASK;
+	*temp = (3153000000UL - (10000000UL*reg)) / 13825;
+	return 0;
+}
+
+static struct thermal_zone_device_ops ops = {
+	.get_temp = armada_get_temp,
+};
+
+static const struct armada_thermal_ops armadaxp_ops = {
+	.init_sensor = armadaxp_init_sensor,
+};
+
+static const struct armada_thermal_ops armada370_ops = {
+	.is_valid = armada_is_valid,
+	.init_sensor = armada370_init_sensor,
+};
+
+static const struct of_device_id armada_thermal_id_table[] = {
+	{
+		.compatible = "marvell,armadaxp-thermal",
+		.data       = &armadaxp_ops,
+	},
+	{
+		.compatible = "marvell,armada370-thermal",
+		.data       = &armada370_ops,
+	},
+	{
+		/* sentinel */
+	},
+};
+MODULE_DEVICE_TABLE(of, armada_thermal_id_table);
+
+static int armada_thermal_probe(struct platform_device *pdev)
+{
+	struct thermal_zone_device *thermal;
+	const struct of_device_id *match;
+	struct armada_thermal_priv *priv;
+	struct resource *res;
+
+	match = of_match_device(armada_thermal_id_table, &pdev->dev);
+	if (!match)
+		return -ENODEV;
+
+	priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!res) {
+		dev_err(&pdev->dev, "Failed to get platform resource\n");
+		return -ENODEV;
+	}
+
+	priv->sensor = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(priv->sensor))
+		return PTR_ERR(priv->sensor);
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+	if (!res) {
+		dev_err(&pdev->dev, "Failed to get platform resource\n");
+		return -ENODEV;
+	}
+
+	priv->control = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(priv->control))
+		return PTR_ERR(priv->control);
+
+	priv->ops = (struct armada_thermal_ops *)match->data;
+	priv->ops->init_sensor(priv);
+
+	thermal = thermal_zone_device_register("armada_thermal", 0, 0,
+					       priv, &ops, NULL, 0, 0);
+	if (IS_ERR(thermal)) {
+		dev_err(&pdev->dev,
+			"Failed to register thermal zone device\n");
+		return PTR_ERR(thermal);
+	}
+
+	platform_set_drvdata(pdev, thermal);
+
+	return 0;
+}
+
+static int armada_thermal_exit(struct platform_device *pdev)
+{
+	struct thermal_zone_device *armada_thermal =
+		platform_get_drvdata(pdev);
+
+	thermal_zone_device_unregister(armada_thermal);
+	platform_set_drvdata(pdev, NULL);
+
+	return 0;
+}
+
+static struct platform_driver armada_thermal_driver = {
+	.probe = armada_thermal_probe,
+	.remove = armada_thermal_exit,
+	.driver = {
+		.name = "armada_thermal",
+		.owner = THIS_MODULE,
+		.of_match_table = of_match_ptr(armada_thermal_id_table),
+	},
+};
+
+module_platform_driver(armada_thermal_driver);
+
+MODULE_AUTHOR("Ezequiel Garcia <ezequiel.garcia@free-electrons.com>");
+MODULE_DESCRIPTION("Armada 370/XP thermal driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/thermal/cpu_cooling.c b/drivers/thermal/cpu_cooling.c
index 8dc44cb..c94bf2e 100644
--- a/drivers/thermal/cpu_cooling.c
+++ b/drivers/thermal/cpu_cooling.c
@@ -20,10 +20,8 @@
  *
  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  */
-#include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/thermal.h>
-#include <linux/platform_device.h>
 #include <linux/cpufreq.h>
 #include <linux/err.h>
 #include <linux/slab.h>
@@ -31,21 +29,19 @@
 #include <linux/cpu_cooling.h>
 
 /**
- * struct cpufreq_cooling_device
+ * struct cpufreq_cooling_device - data for cooling device with cpufreq
  * @id: unique integer value corresponding to each cpufreq_cooling_device
  *	registered.
- * @cool_dev: thermal_cooling_device pointer to keep track of the the
- *	egistered cooling device.
+ * @cool_dev: thermal_cooling_device pointer to keep track of the
+ *	registered cooling device.
  * @cpufreq_state: integer value representing the current state of cpufreq
  *	cooling	devices.
  * @cpufreq_val: integer value representing the absolute value of the clipped
  *	frequency.
  * @allowed_cpus: all the cpus involved for this cpufreq_cooling_device.
- * @node: list_head to link all cpufreq_cooling_device together.
  *
  * This structure is required for keeping information of each
- * cpufreq_cooling_device registered as a list whose head is represented by
- * cooling_cpufreq_list. In order to prevent corruption of this list a
+ * cpufreq_cooling_device registered. In order to prevent corruption of this a
  * mutex lock cooling_cpufreq_lock is used.
  */
 struct cpufreq_cooling_device {
@@ -54,9 +50,7 @@
 	unsigned int cpufreq_state;
 	unsigned int cpufreq_val;
 	struct cpumask allowed_cpus;
-	struct list_head node;
 };
-static LIST_HEAD(cooling_cpufreq_list);
 static DEFINE_IDR(cpufreq_idr);
 static DEFINE_MUTEX(cooling_cpufreq_lock);
 
@@ -70,6 +64,11 @@
  * get_idr - function to get a unique id.
  * @idr: struct idr * handle used to create a id.
  * @id: int * value generated by this function.
+ *
+ * This function will populate @id with an unique
+ * id, using the idr API.
+ *
+ * Return: 0 on success, an error code on failure.
  */
 static int get_idr(struct idr *idr, int *id)
 {
@@ -81,6 +80,7 @@
 	if (unlikely(ret < 0))
 		return ret;
 	*id = ret;
+
 	return 0;
 }
 
@@ -99,63 +99,162 @@
 /* Below code defines functions to be used for cpufreq as cooling device */
 
 /**
- * is_cpufreq_valid - function to check if a cpu has frequency transition policy.
+ * is_cpufreq_valid - function to check frequency transitioning capability.
  * @cpu: cpu for which check is needed.
+ *
+ * This function will check the current state of the system if
+ * it is capable of changing the frequency for a given @cpu.
+ *
+ * Return: 0 if the system is not currently capable of changing
+ * the frequency of given cpu. !0 in case the frequency is changeable.
  */
 static int is_cpufreq_valid(int cpu)
 {
 	struct cpufreq_policy policy;
+
 	return !cpufreq_get_policy(&policy, cpu);
 }
 
+enum cpufreq_cooling_property {
+	GET_LEVEL,
+	GET_FREQ,
+	GET_MAXL,
+};
+
+/**
+ * get_property - fetch a property of interest for a give cpu.
+ * @cpu: cpu for which the property is required
+ * @input: query parameter
+ * @output: query return
+ * @property: type of query (frequency, level, max level)
+ *
+ * This is the common function to
+ * 1. get maximum cpu cooling states
+ * 2. translate frequency to cooling state
+ * 3. translate cooling state to frequency
+ * Note that the code may be not in good shape
+ * but it is written in this way in order to:
+ * a) reduce duplicate code as most of the code can be shared.
+ * b) make sure the logic is consistent when translating between
+ *    cooling states and frequencies.
+ *
+ * Return: 0 on success, -EINVAL when invalid parameters are passed.
+ */
+static int get_property(unsigned int cpu, unsigned long input,
+			unsigned int *output,
+			enum cpufreq_cooling_property property)
+{
+	int i, j;
+	unsigned long max_level = 0, level = 0;
+	unsigned int freq = CPUFREQ_ENTRY_INVALID;
+	int descend = -1;
+	struct cpufreq_frequency_table *table =
+					cpufreq_frequency_get_table(cpu);
+
+	if (!output)
+		return -EINVAL;
+
+	if (!table)
+		return -EINVAL;
+
+	for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) {
+		/* ignore invalid entries */
+		if (table[i].frequency == CPUFREQ_ENTRY_INVALID)
+			continue;
+
+		/* ignore duplicate entry */
+		if (freq == table[i].frequency)
+			continue;
+
+		/* get the frequency order */
+		if (freq != CPUFREQ_ENTRY_INVALID && descend != -1)
+			descend = !!(freq > table[i].frequency);
+
+		freq = table[i].frequency;
+		max_level++;
+	}
+
+	/* get max level */
+	if (property == GET_MAXL) {
+		*output = (unsigned int)max_level;
+		return 0;
+	}
+
+	if (property == GET_FREQ)
+		level = descend ? input : (max_level - input - 1);
+
+	for (i = 0, j = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) {
+		/* ignore invalid entry */
+		if (table[i].frequency == CPUFREQ_ENTRY_INVALID)
+			continue;
+
+		/* ignore duplicate entry */
+		if (freq == table[i].frequency)
+			continue;
+
+		/* now we have a valid frequency entry */
+		freq = table[i].frequency;
+
+		if (property == GET_LEVEL && (unsigned int)input == freq) {
+			/* get level by frequency */
+			*output = descend ? j : (max_level - j - 1);
+			return 0;
+		}
+		if (property == GET_FREQ && level == j) {
+			/* get frequency by level */
+			*output = freq;
+			return 0;
+		}
+		j++;
+	}
+
+	return -EINVAL;
+}
+
+/**
+ * cpufreq_cooling_get_level - for a give cpu, return the cooling level.
+ * @cpu: cpu for which the level is required
+ * @freq: the frequency of interest
+ *
+ * This function will match the cooling level corresponding to the
+ * requested @freq and return it.
+ *
+ * Return: The matched cooling level on success or THERMAL_CSTATE_INVALID
+ * otherwise.
+ */
+unsigned long cpufreq_cooling_get_level(unsigned int cpu, unsigned int freq)
+{
+	unsigned int val;
+
+	if (get_property(cpu, (unsigned long)freq, &val, GET_LEVEL))
+		return THERMAL_CSTATE_INVALID;
+
+	return (unsigned long)val;
+}
+EXPORT_SYMBOL_GPL(cpufreq_cooling_get_level);
+
 /**
  * get_cpu_frequency - get the absolute value of frequency from level.
  * @cpu: cpu for which frequency is fetched.
- * @level: level of frequency, equals cooling state of cpu cooling device
+ * @level: cooling level
+ *
+ * This function matches cooling level with frequency. Based on a cooling level
+ * of frequency, equals cooling state of cpu cooling device, it will return
+ * the corresponding frequency.
  *	e.g level=0 --> 1st MAX FREQ, level=1 ---> 2nd MAX FREQ, .... etc
+ *
+ * Return: 0 on error, the corresponding frequency otherwise.
  */
 static unsigned int get_cpu_frequency(unsigned int cpu, unsigned long level)
 {
-	int ret = 0, i = 0;
-	unsigned long level_index;
-	bool descend = false;
-	struct cpufreq_frequency_table *table =
-					cpufreq_frequency_get_table(cpu);
-	if (!table)
-		return ret;
+	int ret = 0;
+	unsigned int freq;
 
-	while (table[i].frequency != CPUFREQ_TABLE_END) {
-		if (table[i].frequency == CPUFREQ_ENTRY_INVALID)
-			continue;
+	ret = get_property(cpu, level, &freq, GET_FREQ);
+	if (ret)
+		return 0;
 
-		/*check if table in ascending or descending order*/
-		if ((table[i + 1].frequency != CPUFREQ_TABLE_END) &&
-			(table[i + 1].frequency < table[i].frequency)
-			&& !descend) {
-			descend = true;
-		}
-
-		/*return if level matched and table in descending order*/
-		if (descend && i == level)
-			return table[i].frequency;
-		i++;
-	}
-	i--;
-
-	if (level > i || descend)
-		return ret;
-	level_index = i - level;
-
-	/*Scan the table in reverse order and match the level*/
-	while (i >= 0) {
-		if (table[i].frequency == CPUFREQ_ENTRY_INVALID)
-			continue;
-		/*return if level matched*/
-		if (i == level_index)
-			return table[i].frequency;
-		i--;
-	}
-	return ret;
+	return freq;
 }
 
 /**
@@ -163,13 +262,19 @@
  * @cpufreq_device: cpufreq_cooling_device pointer containing frequency
  *	clipping data.
  * @cooling_state: value of the cooling state.
+ *
+ * Function used to make sure the cpufreq layer is aware of current thermal
+ * limits. The limits are applied by updating the cpufreq policy.
+ *
+ * Return: 0 on success, an error code otherwise (-EINVAL in case wrong
+ * cooling state).
  */
 static int cpufreq_apply_cooling(struct cpufreq_cooling_device *cpufreq_device,
-				unsigned long cooling_state)
+				 unsigned long cooling_state)
 {
 	unsigned int cpuid, clip_freq;
-	struct cpumask *maskPtr = &cpufreq_device->allowed_cpus;
-	unsigned int cpu = cpumask_any(maskPtr);
+	struct cpumask *mask = &cpufreq_device->allowed_cpus;
+	unsigned int cpu = cpumask_any(mask);
 
 
 	/* Check if the old cooling action is same as new cooling action */
@@ -184,7 +289,7 @@
 	cpufreq_device->cpufreq_val = clip_freq;
 	notify_device = cpufreq_device;
 
-	for_each_cpu(cpuid, maskPtr) {
+	for_each_cpu(cpuid, mask) {
 		if (is_cpufreq_valid(cpuid))
 			cpufreq_update_policy(cpuid);
 	}
@@ -199,9 +304,15 @@
  * @nb:	struct notifier_block * with callback info.
  * @event: value showing cpufreq event for which this function invoked.
  * @data: callback-specific data
+ *
+ * Callback to highjack the notification on cpufreq policy transition.
+ * Every time there is a change in policy, we will intercept and
+ * update the cpufreq policy with thermal constraints.
+ *
+ * Return: 0 (success)
  */
 static int cpufreq_thermal_notifier(struct notifier_block *nb,
-					unsigned long event, void *data)
+				    unsigned long event, void *data)
 {
 	struct cpufreq_policy *policy = data;
 	unsigned long max_freq = 0;
@@ -212,7 +323,7 @@
 	if (cpumask_test_cpu(policy->cpu, &notify_device->allowed_cpus))
 		max_freq = notify_device->cpufreq_val;
 
-	/* Never exceed user_policy.max*/
+	/* Never exceed user_policy.max */
 	if (max_freq > policy->user_policy.max)
 		max_freq = policy->user_policy.max;
 
@@ -222,50 +333,46 @@
 	return 0;
 }
 
-/*
- * cpufreq cooling device callback functions are defined below
- */
+/* cpufreq cooling device callback functions are defined below */
 
 /**
  * cpufreq_get_max_state - callback function to get the max cooling state.
  * @cdev: thermal cooling device pointer.
  * @state: fill this variable with the max cooling state.
+ *
+ * Callback for the thermal cooling device to return the cpufreq
+ * max cooling state.
+ *
+ * Return: 0 on success, an error code otherwise.
  */
 static int cpufreq_get_max_state(struct thermal_cooling_device *cdev,
 				 unsigned long *state)
 {
 	struct cpufreq_cooling_device *cpufreq_device = cdev->devdata;
-	struct cpumask *maskPtr = &cpufreq_device->allowed_cpus;
+	struct cpumask *mask = &cpufreq_device->allowed_cpus;
 	unsigned int cpu;
-	struct cpufreq_frequency_table *table;
-	unsigned long count = 0;
-	int i = 0;
+	unsigned int count = 0;
+	int ret;
 
-	cpu = cpumask_any(maskPtr);
-	table = cpufreq_frequency_get_table(cpu);
-	if (!table) {
-		*state = 0;
-		return 0;
-	}
+	cpu = cpumask_any(mask);
 
-	for (i = 0; (table[i].frequency != CPUFREQ_TABLE_END); i++) {
-		if (table[i].frequency == CPUFREQ_ENTRY_INVALID)
-			continue;
-		count++;
-	}
+	ret = get_property(cpu, 0, &count, GET_MAXL);
 
-	if (count > 0) {
-		*state = --count;
-		return 0;
-	}
+	if (count > 0)
+		*state = count;
 
-	return -EINVAL;
+	return ret;
 }
 
 /**
  * cpufreq_get_cur_state - callback function to get the current cooling state.
  * @cdev: thermal cooling device pointer.
  * @state: fill this variable with the current cooling state.
+ *
+ * Callback for the thermal cooling device to return the cpufreq
+ * current cooling state.
+ *
+ * Return: 0 on success, an error code otherwise.
  */
 static int cpufreq_get_cur_state(struct thermal_cooling_device *cdev,
 				 unsigned long *state)
@@ -273,6 +380,7 @@
 	struct cpufreq_cooling_device *cpufreq_device = cdev->devdata;
 
 	*state = cpufreq_device->cpufreq_state;
+
 	return 0;
 }
 
@@ -280,6 +388,11 @@
  * cpufreq_set_cur_state - callback function to set the current cooling state.
  * @cdev: thermal cooling device pointer.
  * @state: set this variable to the current cooling state.
+ *
+ * Callback for the thermal cooling device to change the cpufreq
+ * current cooling state.
+ *
+ * Return: 0 on success, an error code otherwise.
  */
 static int cpufreq_set_cur_state(struct thermal_cooling_device *cdev,
 				 unsigned long state)
@@ -304,9 +417,16 @@
 /**
  * cpufreq_cooling_register - function to create cpufreq cooling device.
  * @clip_cpus: cpumask of cpus where the frequency constraints will happen.
+ *
+ * This interface function registers the cpufreq cooling device with the name
+ * "thermal-cpufreq-%x". This api can support multiple instances of cpufreq
+ * cooling devices.
+ *
+ * Return: a valid struct thermal_cooling_device pointer on success,
+ * on failure, it returns a corresponding ERR_PTR().
  */
-struct thermal_cooling_device *cpufreq_cooling_register(
-	const struct cpumask *clip_cpus)
+struct thermal_cooling_device *
+cpufreq_cooling_register(const struct cpumask *clip_cpus)
 {
 	struct thermal_cooling_device *cool_dev;
 	struct cpufreq_cooling_device *cpufreq_dev = NULL;
@@ -315,9 +435,9 @@
 	int ret = 0, i;
 	struct cpufreq_policy policy;
 
-	/*Verify that all the clip cpus have same freq_min, freq_max limit*/
+	/* Verify that all the clip cpus have same freq_min, freq_max limit */
 	for_each_cpu(i, clip_cpus) {
-		/*continue if cpufreq policy not found and not return error*/
+		/* continue if cpufreq policy not found and not return error */
 		if (!cpufreq_get_policy(&policy, i))
 			continue;
 		if (min == 0 && max == 0) {
@@ -325,12 +445,12 @@
 			max = policy.cpuinfo.max_freq;
 		} else {
 			if (min != policy.cpuinfo.min_freq ||
-				max != policy.cpuinfo.max_freq)
+			    max != policy.cpuinfo.max_freq)
 				return ERR_PTR(-EINVAL);
 		}
 	}
 	cpufreq_dev = kzalloc(sizeof(struct cpufreq_cooling_device),
-			GFP_KERNEL);
+			      GFP_KERNEL);
 	if (!cpufreq_dev)
 		return ERR_PTR(-ENOMEM);
 
@@ -342,10 +462,11 @@
 		return ERR_PTR(-EINVAL);
 	}
 
-	sprintf(dev_name, "thermal-cpufreq-%d", cpufreq_dev->id);
+	snprintf(dev_name, sizeof(dev_name), "thermal-cpufreq-%d",
+		 cpufreq_dev->id);
 
 	cool_dev = thermal_cooling_device_register(dev_name, cpufreq_dev,
-						&cpufreq_cooling_ops);
+						   &cpufreq_cooling_ops);
 	if (!cool_dev) {
 		release_idr(&cpufreq_idr, cpufreq_dev->id);
 		kfree(cpufreq_dev);
@@ -358,17 +479,20 @@
 	/* Register the notifier for first cpufreq cooling device */
 	if (cpufreq_dev_count == 0)
 		cpufreq_register_notifier(&thermal_cpufreq_notifier_block,
-						CPUFREQ_POLICY_NOTIFIER);
+					  CPUFREQ_POLICY_NOTIFIER);
 	cpufreq_dev_count++;
 
 	mutex_unlock(&cooling_cpufreq_lock);
+
 	return cool_dev;
 }
-EXPORT_SYMBOL(cpufreq_cooling_register);
+EXPORT_SYMBOL_GPL(cpufreq_cooling_register);
 
 /**
  * cpufreq_cooling_unregister - function to remove cpufreq cooling device.
  * @cdev: thermal cooling device pointer.
+ *
+ * This interface function unregisters the "thermal-cpufreq-%x" cooling device.
  */
 void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev)
 {
@@ -378,14 +502,13 @@
 	cpufreq_dev_count--;
 
 	/* Unregister the notifier for the last cpufreq cooling device */
-	if (cpufreq_dev_count == 0) {
+	if (cpufreq_dev_count == 0)
 		cpufreq_unregister_notifier(&thermal_cpufreq_notifier_block,
-					CPUFREQ_POLICY_NOTIFIER);
-	}
+					    CPUFREQ_POLICY_NOTIFIER);
 	mutex_unlock(&cooling_cpufreq_lock);
 
 	thermal_cooling_device_unregister(cpufreq_dev->cool_dev);
 	release_idr(&cpufreq_idr, cpufreq_dev->id);
 	kfree(cpufreq_dev);
 }
-EXPORT_SYMBOL(cpufreq_cooling_unregister);
+EXPORT_SYMBOL_GPL(cpufreq_cooling_unregister);
diff --git a/drivers/thermal/db8500_cpufreq_cooling.c b/drivers/thermal/db8500_cpufreq_cooling.c
index 2141985..786d192 100644
--- a/drivers/thermal/db8500_cpufreq_cooling.c
+++ b/drivers/thermal/db8500_cpufreq_cooling.c
@@ -37,7 +37,7 @@
 	cpumask_set_cpu(0, &mask_val);
 	cdev = cpufreq_cooling_register(&mask_val);
 
-	if (IS_ERR_OR_NULL(cdev)) {
+	if (IS_ERR(cdev)) {
 		dev_err(&pdev->dev, "Failed to register cooling device\n");
 		return PTR_ERR(cdev);
 	}
diff --git a/drivers/thermal/db8500_thermal.c b/drivers/thermal/db8500_thermal.c
index 61ce60a..1e3b3bf 100644
--- a/drivers/thermal/db8500_thermal.c
+++ b/drivers/thermal/db8500_thermal.c
@@ -419,7 +419,8 @@
 	low_irq = platform_get_irq_byname(pdev, "IRQ_HOTMON_LOW");
 	if (low_irq < 0) {
 		dev_err(&pdev->dev, "Get IRQ_HOTMON_LOW failed.\n");
-		return low_irq;
+		ret = low_irq;
+		goto out_unlock;
 	}
 
 	ret = devm_request_threaded_irq(&pdev->dev, low_irq, NULL,
@@ -427,13 +428,14 @@
 		"dbx500_temp_low", pzone);
 	if (ret < 0) {
 		dev_err(&pdev->dev, "Failed to allocate temp low irq.\n");
-		return ret;
+		goto out_unlock;
 	}
 
 	high_irq = platform_get_irq_byname(pdev, "IRQ_HOTMON_HIGH");
 	if (high_irq < 0) {
 		dev_err(&pdev->dev, "Get IRQ_HOTMON_HIGH failed.\n");
-		return high_irq;
+		ret = high_irq;
+		goto out_unlock;
 	}
 
 	ret = devm_request_threaded_irq(&pdev->dev, high_irq, NULL,
@@ -441,15 +443,16 @@
 		"dbx500_temp_high", pzone);
 	if (ret < 0) {
 		dev_err(&pdev->dev, "Failed to allocate temp high irq.\n");
-		return ret;
+		goto out_unlock;
 	}
 
 	pzone->therm_dev = thermal_zone_device_register("db8500_thermal_zone",
 		ptrips->num_trips, 0, pzone, &thdev_ops, NULL, 0, 0);
 
-	if (IS_ERR_OR_NULL(pzone->therm_dev)) {
+	if (IS_ERR(pzone->therm_dev)) {
 		dev_err(&pdev->dev, "Register thermal zone device failed.\n");
-		return PTR_ERR(pzone->therm_dev);
+		ret = PTR_ERR(pzone->therm_dev);
+		goto out_unlock;
 	}
 	dev_info(&pdev->dev, "Thermal zone device registered.\n");
 
@@ -461,9 +464,11 @@
 
 	platform_set_drvdata(pdev, pzone);
 	pzone->mode = THERMAL_DEVICE_ENABLED;
+
+out_unlock:
 	mutex_unlock(&pzone->th_lock);
 
-	return 0;
+	return ret;
 }
 
 static int db8500_thermal_remove(struct platform_device *pdev)
diff --git a/drivers/thermal/dove_thermal.c b/drivers/thermal/dove_thermal.c
index 3078c40..4b15a5f 100644
--- a/drivers/thermal/dove_thermal.c
+++ b/drivers/thermal/dove_thermal.c
@@ -107,12 +107,13 @@
 	}
 
 	/*
-	 * Calculate temperature. See Section 8.10.1 of 88AP510,
-	 * Documentation/arm/Marvell/README
+	 * Calculate temperature. According to Marvell internal
+	 * documentation the formula for this is:
+	 * Celsius = (322-reg)/1.3625
 	 */
 	reg = readl_relaxed(priv->sensor);
 	reg = (reg >> DOVE_THERMAL_TEMP_OFFSET) & DOVE_THERMAL_TEMP_MASK;
-	*temp = ((2281638UL - (7298*reg)) / 10);
+	*temp = ((3220000000UL - (10000000UL * reg)) / 13625);
 
 	return 0;
 }
diff --git a/drivers/thermal/exynos_thermal.c b/drivers/thermal/exynos_thermal.c
index b777ae6..d20ce9e 100644
--- a/drivers/thermal/exynos_thermal.c
+++ b/drivers/thermal/exynos_thermal.c
@@ -98,13 +98,13 @@
 #define IDLE_INTERVAL 10000
 #define MCELSIUS	1000
 
-#ifdef CONFIG_EXYNOS_THERMAL_EMUL
+#ifdef CONFIG_THERMAL_EMULATION
 #define EXYNOS_EMUL_TIME	0x57F0
 #define EXYNOS_EMUL_TIME_SHIFT	16
 #define EXYNOS_EMUL_DATA_SHIFT	8
 #define EXYNOS_EMUL_DATA_MASK	0xFF
 #define EXYNOS_EMUL_ENABLE	0x1
-#endif /* CONFIG_EXYNOS_THERMAL_EMUL */
+#endif /* CONFIG_THERMAL_EMULATION */
 
 /* CPU Zone information */
 #define PANIC_ZONE      4
@@ -143,6 +143,7 @@
 struct thermal_sensor_conf {
 	char name[SENSOR_NAME_LEN];
 	int (*read_temperature)(void *data);
+	int (*write_emul_temp)(void *drv_data, unsigned long temp);
 	struct thermal_trip_point_conf trip_data;
 	struct thermal_cooling_conf cooling_data;
 	void *private_data;
@@ -240,26 +241,6 @@
 	return ret;
 }
 
-static int exynos_get_frequency_level(unsigned int cpu, unsigned int freq)
-{
-	int i = 0, ret = -EINVAL;
-	struct cpufreq_frequency_table *table = NULL;
-#ifdef CONFIG_CPU_FREQ
-	table = cpufreq_frequency_get_table(cpu);
-#endif
-	if (!table)
-		return ret;
-
-	while (table[i].frequency != CPUFREQ_TABLE_END) {
-		if (table[i].frequency == CPUFREQ_ENTRY_INVALID)
-			continue;
-		if (table[i].frequency == freq)
-			return i;
-		i++;
-	}
-	return ret;
-}
-
 /* Bind callback functions for thermal zone */
 static int exynos_bind(struct thermal_zone_device *thermal,
 			struct thermal_cooling_device *cdev)
@@ -286,8 +267,8 @@
 	/* Bind the thermal zone to the cpufreq cooling device */
 	for (i = 0; i < tab_size; i++) {
 		clip_data = (struct freq_clip_table *)&(tab_ptr[i]);
-		level = exynos_get_frequency_level(0, clip_data->freq_clip_max);
-		if (level < 0)
+		level = cpufreq_cooling_get_level(0, clip_data->freq_clip_max);
+		if (level == THERMAL_CSTATE_INVALID)
 			return 0;
 		switch (GET_ZONE(i)) {
 		case MONITOR_ZONE:
@@ -367,6 +348,23 @@
 	return 0;
 }
 
+/* Get temperature callback functions for thermal zone */
+static int exynos_set_emul_temp(struct thermal_zone_device *thermal,
+						unsigned long temp)
+{
+	void *data;
+	int ret = -EINVAL;
+
+	if (!th_zone->sensor_conf) {
+		pr_info("Temperature sensor not initialised\n");
+		return -EINVAL;
+	}
+	data = th_zone->sensor_conf->private_data;
+	if (th_zone->sensor_conf->write_emul_temp)
+		ret = th_zone->sensor_conf->write_emul_temp(data, temp);
+	return ret;
+}
+
 /* Get the temperature trend */
 static int exynos_get_trend(struct thermal_zone_device *thermal,
 			int trip, enum thermal_trend *trend)
@@ -390,6 +388,7 @@
 	.bind = exynos_bind,
 	.unbind = exynos_unbind,
 	.get_temp = exynos_get_temp,
+	.set_emul_temp = exynos_set_emul_temp,
 	.get_trend = exynos_get_trend,
 	.get_mode = exynos_get_mode,
 	.set_mode = exynos_set_mode,
@@ -712,6 +711,47 @@
 	return temp;
 }
 
+#ifdef CONFIG_THERMAL_EMULATION
+static int exynos_tmu_set_emulation(void *drv_data, unsigned long temp)
+{
+	struct exynos_tmu_data *data = drv_data;
+	unsigned int reg;
+	int ret = -EINVAL;
+
+	if (data->soc == SOC_ARCH_EXYNOS4210)
+		goto out;
+
+	if (temp && temp < MCELSIUS)
+		goto out;
+
+	mutex_lock(&data->lock);
+	clk_enable(data->clk);
+
+	reg = readl(data->base + EXYNOS_EMUL_CON);
+
+	if (temp) {
+		temp /= MCELSIUS;
+
+		reg = (EXYNOS_EMUL_TIME << EXYNOS_EMUL_TIME_SHIFT) |
+			(temp_to_code(data, temp)
+			 << EXYNOS_EMUL_DATA_SHIFT) | EXYNOS_EMUL_ENABLE;
+	} else {
+		reg &= ~EXYNOS_EMUL_ENABLE;
+	}
+
+	writel(reg, data->base + EXYNOS_EMUL_CON);
+
+	clk_disable(data->clk);
+	mutex_unlock(&data->lock);
+	return 0;
+out:
+	return ret;
+}
+#else
+static int exynos_tmu_set_emulation(void *drv_data,	unsigned long temp)
+	{ return -EINVAL; }
+#endif/*CONFIG_THERMAL_EMULATION*/
+
 static void exynos_tmu_work(struct work_struct *work)
 {
 	struct exynos_tmu_data *data = container_of(work,
@@ -745,6 +785,7 @@
 static struct thermal_sensor_conf exynos_sensor_conf = {
 	.name			= "exynos-therm",
 	.read_temperature	= (int (*)(void *))exynos_tmu_read,
+	.write_emul_temp	= exynos_tmu_set_emulation,
 };
 
 #if defined(CONFIG_CPU_EXYNOS4210)
@@ -814,6 +855,10 @@
 		.data = (void *)EXYNOS4210_TMU_DRV_DATA,
 	},
 	{
+		.compatible = "samsung,exynos4412-tmu",
+		.data = (void *)EXYNOS_TMU_DRV_DATA,
+	},
+	{
 		.compatible = "samsung,exynos5250-tmu",
 		.data = (void *)EXYNOS_TMU_DRV_DATA,
 	},
@@ -851,93 +896,6 @@
 			platform_get_device_id(pdev)->driver_data;
 }
 
-#ifdef CONFIG_EXYNOS_THERMAL_EMUL
-static ssize_t exynos_tmu_emulation_show(struct device *dev,
-					 struct device_attribute *attr,
-					 char *buf)
-{
-	struct platform_device *pdev = container_of(dev,
-					struct platform_device, dev);
-	struct exynos_tmu_data *data = platform_get_drvdata(pdev);
-	unsigned int reg;
-	u8 temp_code;
-	int temp = 0;
-
-	if (data->soc == SOC_ARCH_EXYNOS4210)
-		goto out;
-
-	mutex_lock(&data->lock);
-	clk_enable(data->clk);
-	reg = readl(data->base + EXYNOS_EMUL_CON);
-	clk_disable(data->clk);
-	mutex_unlock(&data->lock);
-
-	if (reg & EXYNOS_EMUL_ENABLE) {
-		reg >>= EXYNOS_EMUL_DATA_SHIFT;
-		temp_code = reg & EXYNOS_EMUL_DATA_MASK;
-		temp = code_to_temp(data, temp_code);
-	}
-out:
-	return sprintf(buf, "%d\n", temp * MCELSIUS);
-}
-
-static ssize_t exynos_tmu_emulation_store(struct device *dev,
-					struct device_attribute *attr,
-					const char *buf, size_t count)
-{
-	struct platform_device *pdev = container_of(dev,
-					struct platform_device, dev);
-	struct exynos_tmu_data *data = platform_get_drvdata(pdev);
-	unsigned int reg;
-	int temp;
-
-	if (data->soc == SOC_ARCH_EXYNOS4210)
-		goto out;
-
-	if (!sscanf(buf, "%d\n", &temp) || temp < 0)
-		return -EINVAL;
-
-	mutex_lock(&data->lock);
-	clk_enable(data->clk);
-
-	reg = readl(data->base + EXYNOS_EMUL_CON);
-
-	if (temp) {
-		/* Both CELSIUS and MCELSIUS type are available for input */
-		if (temp > MCELSIUS)
-			temp /= MCELSIUS;
-
-		reg = (EXYNOS_EMUL_TIME << EXYNOS_EMUL_TIME_SHIFT) |
-			(temp_to_code(data, (temp / MCELSIUS))
-			 << EXYNOS_EMUL_DATA_SHIFT) | EXYNOS_EMUL_ENABLE;
-	} else {
-		reg &= ~EXYNOS_EMUL_ENABLE;
-	}
-
-	writel(reg, data->base + EXYNOS_EMUL_CON);
-
-	clk_disable(data->clk);
-	mutex_unlock(&data->lock);
-
-out:
-	return count;
-}
-
-static DEVICE_ATTR(emulation, 0644, exynos_tmu_emulation_show,
-					exynos_tmu_emulation_store);
-static int create_emulation_sysfs(struct device *dev)
-{
-	return device_create_file(dev, &dev_attr_emulation);
-}
-static void remove_emulation_sysfs(struct device *dev)
-{
-	device_remove_file(dev, &dev_attr_emulation);
-}
-#else
-static inline int create_emulation_sysfs(struct device *dev) { return 0; }
-static inline void remove_emulation_sysfs(struct device *dev) {}
-#endif
-
 static int exynos_tmu_probe(struct platform_device *pdev)
 {
 	struct exynos_tmu_data *data;
@@ -983,12 +941,16 @@
 		return ret;
 	}
 
-	data->clk = clk_get(NULL, "tmu_apbif");
+	data->clk = devm_clk_get(&pdev->dev, "tmu_apbif");
 	if (IS_ERR(data->clk)) {
 		dev_err(&pdev->dev, "Failed to get clock\n");
 		return  PTR_ERR(data->clk);
 	}
 
+	ret = clk_prepare(data->clk);
+	if (ret)
+		return ret;
+
 	if (pdata->type == SOC_ARCH_EXYNOS ||
 				pdata->type == SOC_ARCH_EXYNOS4210)
 		data->soc = pdata->type;
@@ -1037,14 +999,10 @@
 		goto err_clk;
 	}
 
-	ret = create_emulation_sysfs(&pdev->dev);
-	if (ret)
-		dev_err(&pdev->dev, "Failed to create emulation mode sysfs node\n");
-
 	return 0;
 err_clk:
 	platform_set_drvdata(pdev, NULL);
-	clk_put(data->clk);
+	clk_unprepare(data->clk);
 	return ret;
 }
 
@@ -1052,13 +1010,11 @@
 {
 	struct exynos_tmu_data *data = platform_get_drvdata(pdev);
 
-	remove_emulation_sysfs(&pdev->dev);
-
 	exynos_tmu_control(pdev, false);
 
 	exynos_unregister_thermal();
 
-	clk_put(data->clk);
+	clk_unprepare(data->clk);
 
 	platform_set_drvdata(pdev, NULL);
 
diff --git a/drivers/thermal/fair_share.c b/drivers/thermal/fair_share.c
index 792479f..944ba2f 100644
--- a/drivers/thermal/fair_share.c
+++ b/drivers/thermal/fair_share.c
@@ -22,9 +22,6 @@
  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  */
 
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/module.h>
 #include <linux/thermal.h>
 
 #include "thermal_core.h"
@@ -111,23 +108,15 @@
 static struct thermal_governor thermal_gov_fair_share = {
 	.name		= "fair_share",
 	.throttle	= fair_share_throttle,
-	.owner		= THIS_MODULE,
 };
 
-static int __init thermal_gov_fair_share_init(void)
+int thermal_gov_fair_share_register(void)
 {
 	return thermal_register_governor(&thermal_gov_fair_share);
 }
 
-static void __exit thermal_gov_fair_share_exit(void)
+void thermal_gov_fair_share_unregister(void)
 {
 	thermal_unregister_governor(&thermal_gov_fair_share);
 }
 
-/* This should load after thermal framework */
-fs_initcall(thermal_gov_fair_share_init);
-module_exit(thermal_gov_fair_share_exit);
-
-MODULE_AUTHOR("Durgadoss R");
-MODULE_DESCRIPTION("A simple weight based thermal throttling governor");
-MODULE_LICENSE("GPL");
diff --git a/drivers/thermal/kirkwood_thermal.c b/drivers/thermal/kirkwood_thermal.c
index e5500ed..dfeceaf 100644
--- a/drivers/thermal/kirkwood_thermal.c
+++ b/drivers/thermal/kirkwood_thermal.c
@@ -41,21 +41,21 @@
 	reg = readl_relaxed(priv->sensor);
 
 	/* Valid check */
-	if (!(reg >> KIRKWOOD_THERMAL_VALID_OFFSET) &
-	    KIRKWOOD_THERMAL_VALID_MASK) {
+	if (!((reg >> KIRKWOOD_THERMAL_VALID_OFFSET) &
+	    KIRKWOOD_THERMAL_VALID_MASK)) {
 		dev_err(&thermal->device,
 			"Temperature sensor reading not valid\n");
 		return -EIO;
 	}
 
 	/*
-	 * Calculate temperature. See Section 8.10.1 of the 88AP510,
-	 * datasheet, which has the same sensor.
-	 * Documentation/arm/Marvell/README
+	 * Calculate temperature. According to Marvell internal
+	 * documentation the formula for this is:
+	 * Celsius = (322-reg)/1.3625
 	 */
 	reg = (reg >> KIRKWOOD_THERMAL_TEMP_OFFSET) &
 		KIRKWOOD_THERMAL_TEMP_MASK;
-	*temp = ((2281638UL - (7298*reg)) / 10);
+	*temp = ((3220000000UL - (10000000UL * reg)) / 13625);
 
 	return 0;
 }
diff --git a/drivers/thermal/rcar_thermal.c b/drivers/thermal/rcar_thermal.c
index 2cc5b61..8d7edd4 100644
--- a/drivers/thermal/rcar_thermal.c
+++ b/drivers/thermal/rcar_thermal.c
@@ -24,6 +24,7 @@
 #include <linux/io.h>
 #include <linux/module.h>
 #include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
 #include <linux/reboot.h>
 #include <linux/slab.h>
 #include <linux/spinlock.h>
@@ -377,6 +378,9 @@
 	spin_lock_init(&common->lock);
 	common->dev = dev;
 
+	pm_runtime_enable(dev);
+	pm_runtime_get_sync(dev);
+
 	irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
 	if (irq) {
 		int ret;
@@ -419,12 +423,15 @@
 		priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
 		if (!priv) {
 			dev_err(dev, "Could not allocate priv\n");
-			return -ENOMEM;
+			ret = -ENOMEM;
+			goto error_unregister;
 		}
 
 		priv->base = devm_ioremap_resource(dev, res);
-		if (IS_ERR(priv->base))
-			return PTR_ERR(priv->base);
+		if (IS_ERR(priv->base)) {
+			ret = PTR_ERR(priv->base);
+			goto error_unregister;
+		}
 
 		priv->common = common;
 		priv->id = i;
@@ -443,10 +450,10 @@
 			goto error_unregister;
 		}
 
-		list_move_tail(&priv->list, &common->head);
-
 		if (rcar_has_irq_support(priv))
 			rcar_thermal_irq_enable(priv);
+
+		list_move_tail(&priv->list, &common->head);
 	}
 
 	platform_set_drvdata(pdev, common);
@@ -456,8 +463,14 @@
 	return 0;
 
 error_unregister:
-	rcar_thermal_for_each_priv(priv, common)
+	rcar_thermal_for_each_priv(priv, common) {
 		thermal_zone_device_unregister(priv->zone);
+		if (rcar_has_irq_support(priv))
+			rcar_thermal_irq_disable(priv);
+	}
+
+	pm_runtime_put_sync(dev);
+	pm_runtime_disable(dev);
 
 	return ret;
 }
@@ -465,13 +478,20 @@
 static int rcar_thermal_remove(struct platform_device *pdev)
 {
 	struct rcar_thermal_common *common = platform_get_drvdata(pdev);
+	struct device *dev = &pdev->dev;
 	struct rcar_thermal_priv *priv;
 
-	rcar_thermal_for_each_priv(priv, common)
+	rcar_thermal_for_each_priv(priv, common) {
 		thermal_zone_device_unregister(priv->zone);
+		if (rcar_has_irq_support(priv))
+			rcar_thermal_irq_disable(priv);
+	}
 
 	platform_set_drvdata(pdev, NULL);
 
+	pm_runtime_put_sync(dev);
+	pm_runtime_disable(dev);
+
 	return 0;
 }
 
diff --git a/drivers/thermal/step_wise.c b/drivers/thermal/step_wise.c
index 407cde3..4d4ddae 100644
--- a/drivers/thermal/step_wise.c
+++ b/drivers/thermal/step_wise.c
@@ -22,9 +22,6 @@
  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  */
 
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/module.h>
 #include <linux/thermal.h>
 
 #include "thermal_core.h"
@@ -59,9 +56,12 @@
 
 	switch (trend) {
 	case THERMAL_TREND_RAISING:
-		if (throttle)
+		if (throttle) {
 			cur_state = cur_state < instance->upper ?
 				    (cur_state + 1) : instance->upper;
+			if (cur_state < instance->lower)
+				cur_state = instance->lower;
+		}
 		break;
 	case THERMAL_TREND_RAISE_FULL:
 		if (throttle)
@@ -71,8 +71,11 @@
 		if (cur_state == instance->lower) {
 			if (!throttle)
 				cur_state = -1;
-		} else
+		} else {
 			cur_state -= 1;
+			if (cur_state > instance->upper)
+				cur_state = instance->upper;
+		}
 		break;
 	case THERMAL_TREND_DROP_FULL:
 		if (cur_state == instance->lower) {
@@ -180,23 +183,14 @@
 static struct thermal_governor thermal_gov_step_wise = {
 	.name		= "step_wise",
 	.throttle	= step_wise_throttle,
-	.owner		= THIS_MODULE,
 };
 
-static int __init thermal_gov_step_wise_init(void)
+int thermal_gov_step_wise_register(void)
 {
 	return thermal_register_governor(&thermal_gov_step_wise);
 }
 
-static void __exit thermal_gov_step_wise_exit(void)
+void thermal_gov_step_wise_unregister(void)
 {
 	thermal_unregister_governor(&thermal_gov_step_wise);
 }
-
-/* This should load after thermal framework */
-fs_initcall(thermal_gov_step_wise_init);
-module_exit(thermal_gov_step_wise_exit);
-
-MODULE_AUTHOR("Durgadoss R");
-MODULE_DESCRIPTION("A step-by-step thermal throttling governor");
-MODULE_LICENSE("GPL");
diff --git a/drivers/thermal/thermal_sys.c b/drivers/thermal/thermal_core.c
similarity index 88%
rename from drivers/thermal/thermal_sys.c
rename to drivers/thermal/thermal_core.c
index 5b7863a..d755440 100644
--- a/drivers/thermal/thermal_sys.c
+++ b/drivers/thermal/thermal_core.c
@@ -40,7 +40,7 @@
 
 MODULE_AUTHOR("Zhang Rui");
 MODULE_DESCRIPTION("Generic thermal management sysfs support");
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
 
 static DEFINE_IDR(thermal_tz_idr);
 static DEFINE_IDR(thermal_cdev_idr);
@@ -99,7 +99,6 @@
 
 	return err;
 }
-EXPORT_SYMBOL_GPL(thermal_register_governor);
 
 void thermal_unregister_governor(struct thermal_governor *governor)
 {
@@ -127,7 +126,6 @@
 	mutex_unlock(&thermal_governor_lock);
 	return;
 }
-EXPORT_SYMBOL_GPL(thermal_unregister_governor);
 
 static int get_idr(struct idr *idr, struct mutex *lock, int *id)
 {
@@ -371,16 +369,28 @@
 	monitor_thermal_zone(tz);
 }
 
-static int thermal_zone_get_temp(struct thermal_zone_device *tz,
-				unsigned long *temp)
+/**
+ * thermal_zone_get_temp() - returns its the temperature of thermal zone
+ * @tz: a valid pointer to a struct thermal_zone_device
+ * @temp: a valid pointer to where to store the resulting temperature.
+ *
+ * When a valid thermal zone reference is passed, it will fetch its
+ * temperature and fill @temp.
+ *
+ * Return: On success returns 0, an error code otherwise
+ */
+int thermal_zone_get_temp(struct thermal_zone_device *tz, unsigned long *temp)
 {
-	int ret = 0;
+	int ret = -EINVAL;
 #ifdef CONFIG_THERMAL_EMULATION
 	int count;
 	unsigned long crit_temp = -1UL;
 	enum thermal_trip_type type;
 #endif
 
+	if (!tz || IS_ERR(tz))
+		goto exit;
+
 	mutex_lock(&tz->lock);
 
 	ret = tz->ops->get_temp(tz, temp);
@@ -404,8 +414,10 @@
 skip_emul:
 #endif
 	mutex_unlock(&tz->lock);
+exit:
 	return ret;
 }
+EXPORT_SYMBOL_GPL(thermal_zone_get_temp);
 
 static void update_temperature(struct thermal_zone_device *tz)
 {
@@ -434,7 +446,7 @@
 	for (count = 0; count < tz->trips; count++)
 		handle_thermal_trip(tz, count);
 }
-EXPORT_SYMBOL(thermal_zone_device_update);
+EXPORT_SYMBOL_GPL(thermal_zone_device_update);
 
 static void thermal_zone_device_check(struct work_struct *work)
 {
@@ -1097,13 +1109,23 @@
 #endif
 
 /**
- * thermal_zone_bind_cooling_device - bind a cooling device to a thermal zone
- * @tz:		thermal zone device
+ * thermal_zone_bind_cooling_device() - bind a cooling device to a thermal zone
+ * @tz:		pointer to struct thermal_zone_device
  * @trip:	indicates which trip point the cooling devices is
  *		associated with in this thermal zone.
- * @cdev:	thermal cooling device
+ * @cdev:	pointer to struct thermal_cooling_device
+ * @upper:	the Maximum cooling state for this trip point.
+ *		THERMAL_NO_LIMIT means no upper limit,
+ *		and the cooling device can be in max_state.
+ * @lower:	the Minimum cooling state can be used for this trip point.
+ *		THERMAL_NO_LIMIT means no lower limit,
+ *		and the cooling device can be in cooling state 0.
  *
+ * This interface function bind a thermal cooling device to the certain trip
+ * point of a thermal zone device.
  * This function is usually called in the thermal zone device .bind callback.
+ *
+ * Return: 0 on success, the proper error value otherwise.
  */
 int thermal_zone_bind_cooling_device(struct thermal_zone_device *tz,
 				     int trip,
@@ -1197,16 +1219,21 @@
 	kfree(dev);
 	return result;
 }
-EXPORT_SYMBOL(thermal_zone_bind_cooling_device);
+EXPORT_SYMBOL_GPL(thermal_zone_bind_cooling_device);
 
 /**
- * thermal_zone_unbind_cooling_device - unbind a cooling device from a thermal zone
- * @tz:		thermal zone device
+ * thermal_zone_unbind_cooling_device() - unbind a cooling device from a
+ *					  thermal zone.
+ * @tz:		pointer to a struct thermal_zone_device.
  * @trip:	indicates which trip point the cooling devices is
  *		associated with in this thermal zone.
- * @cdev:	thermal cooling device
+ * @cdev:	pointer to a struct thermal_cooling_device.
  *
+ * This interface function unbind a thermal cooling device from the certain
+ * trip point of a thermal zone device.
  * This function is usually called in the thermal zone device .unbind callback.
+ *
+ * Return: 0 on success, the proper error value otherwise.
  */
 int thermal_zone_unbind_cooling_device(struct thermal_zone_device *tz,
 				       int trip,
@@ -1237,7 +1264,7 @@
 	kfree(pos);
 	return 0;
 }
-EXPORT_SYMBOL(thermal_zone_unbind_cooling_device);
+EXPORT_SYMBOL_GPL(thermal_zone_unbind_cooling_device);
 
 static void thermal_release(struct device *dev)
 {
@@ -1260,10 +1287,17 @@
 };
 
 /**
- * thermal_cooling_device_register - register a new thermal cooling device
+ * thermal_cooling_device_register() - register a new thermal cooling device
  * @type:	the thermal cooling device type.
  * @devdata:	device private data.
  * @ops:		standard thermal cooling devices callbacks.
+ *
+ * This interface function adds a new thermal cooling device (fan/processor/...)
+ * to /sys/class/thermal/ folder as cooling_device[0-*]. It tries to bind itself
+ * to all the thermal zone devices registered at the same time.
+ *
+ * Return: a pointer to the created struct thermal_cooling_device or an
+ * ERR_PTR. Caller must check return value with IS_ERR*() helpers.
  */
 struct thermal_cooling_device *
 thermal_cooling_device_register(char *type, void *devdata,
@@ -1289,7 +1323,7 @@
 		return ERR_PTR(result);
 	}
 
-	strcpy(cdev->type, type ? : "");
+	strlcpy(cdev->type, type ? : "", sizeof(cdev->type));
 	mutex_init(&cdev->lock);
 	INIT_LIST_HEAD(&cdev->thermal_instances);
 	cdev->ops = ops;
@@ -1334,7 +1368,7 @@
 	device_unregister(&cdev->device);
 	return ERR_PTR(result);
 }
-EXPORT_SYMBOL(thermal_cooling_device_register);
+EXPORT_SYMBOL_GPL(thermal_cooling_device_register);
 
 /**
  * thermal_cooling_device_unregister - removes the registered thermal cooling device
@@ -1394,7 +1428,7 @@
 	device_unregister(&cdev->device);
 	return;
 }
-EXPORT_SYMBOL(thermal_cooling_device_unregister);
+EXPORT_SYMBOL_GPL(thermal_cooling_device_unregister);
 
 void thermal_cdev_update(struct thermal_cooling_device *cdev)
 {
@@ -1420,7 +1454,7 @@
 EXPORT_SYMBOL(thermal_cdev_update);
 
 /**
- * notify_thermal_framework - Sensor drivers use this API to notify framework
+ * thermal_notify_framework - Sensor drivers use this API to notify framework
  * @tz:		thermal zone device
  * @trip:	indicates which trip point has been crossed
  *
@@ -1431,16 +1465,21 @@
  * The throttling policy is based on the configured platform data; if no
  * platform data is provided, this uses the step_wise throttling policy.
  */
-void notify_thermal_framework(struct thermal_zone_device *tz, int trip)
+void thermal_notify_framework(struct thermal_zone_device *tz, int trip)
 {
 	handle_thermal_trip(tz, trip);
 }
-EXPORT_SYMBOL(notify_thermal_framework);
+EXPORT_SYMBOL_GPL(thermal_notify_framework);
 
 /**
- * create_trip_attrs - create attributes for trip points
+ * create_trip_attrs() - create attributes for trip points
  * @tz:		the thermal zone device
  * @mask:	Writeable trip point bitmap.
+ *
+ * helper function to instantiate sysfs entries for every trip
+ * point and its properties of a struct thermal_zone_device.
+ *
+ * Return: 0 on success, the proper error value otherwise.
  */
 static int create_trip_attrs(struct thermal_zone_device *tz, int mask)
 {
@@ -1541,7 +1580,7 @@
 }
 
 /**
- * thermal_zone_device_register - register a new thermal zone device
+ * thermal_zone_device_register() - register a new thermal zone device
  * @type:	the thermal zone device type
  * @trips:	the number of trip points the thermal zone support
  * @mask:	a bit string indicating the writeablility of trip points
@@ -1554,8 +1593,15 @@
  *		   whether trip points have been crossed (0 for interrupt
  *		   driven systems)
  *
+ * This interface function adds a new thermal zone device (sensor) to
+ * /sys/class/thermal folder as thermal_zone[0-*]. It tries to bind all the
+ * thermal cooling devices registered at the same time.
  * thermal_zone_device_unregister() must be called when the device is no
  * longer needed. The passive cooling depends on the .get_trend() return value.
+ *
+ * Return: a pointer to the created struct thermal_zone_device or an
+ * in case of error, an ERR_PTR. Caller must check return value with
+ * IS_ERR*() helpers.
  */
 struct thermal_zone_device *thermal_zone_device_register(const char *type,
 	int trips, int mask, void *devdata,
@@ -1594,7 +1640,7 @@
 		return ERR_PTR(result);
 	}
 
-	strcpy(tz->type, type ? : "");
+	strlcpy(tz->type, type ? : "", sizeof(tz->type));
 	tz->ops = ops;
 	tz->tzp = tzp;
 	tz->device.class = &thermal_class;
@@ -1687,7 +1733,7 @@
 	device_unregister(&tz->device);
 	return ERR_PTR(result);
 }
-EXPORT_SYMBOL(thermal_zone_device_register);
+EXPORT_SYMBOL_GPL(thermal_zone_device_register);
 
 /**
  * thermal_device_unregister - removes the registered thermal zone device
@@ -1754,7 +1800,45 @@
 	device_unregister(&tz->device);
 	return;
 }
-EXPORT_SYMBOL(thermal_zone_device_unregister);
+EXPORT_SYMBOL_GPL(thermal_zone_device_unregister);
+
+/**
+ * thermal_zone_get_zone_by_name() - search for a zone and returns its ref
+ * @name: thermal zone name to fetch the temperature
+ *
+ * When only one zone is found with the passed name, returns a reference to it.
+ *
+ * Return: On success returns a reference to an unique thermal zone with
+ * matching name equals to @name, an ERR_PTR otherwise (-EINVAL for invalid
+ * paramenters, -ENODEV for not found and -EEXIST for multiple matches).
+ */
+struct thermal_zone_device *thermal_zone_get_zone_by_name(const char *name)
+{
+	struct thermal_zone_device *pos = NULL, *ref = ERR_PTR(-EINVAL);
+	unsigned int found = 0;
+
+	if (!name)
+		goto exit;
+
+	mutex_lock(&thermal_list_lock);
+	list_for_each_entry(pos, &thermal_tz_list, node)
+		if (!strnicmp(name, pos->type, THERMAL_NAME_LENGTH)) {
+			found++;
+			ref = pos;
+		}
+	mutex_unlock(&thermal_list_lock);
+
+	/* nothing has been found, thus an error code for it */
+	if (found == 0)
+		ref = ERR_PTR(-ENODEV);
+	else if (found > 1)
+	/* Success only when an unique zone is found */
+		ref = ERR_PTR(-EEXIST);
+
+exit:
+	return ref;
+}
+EXPORT_SYMBOL_GPL(thermal_zone_get_zone_by_name);
 
 #ifdef CONFIG_NET
 static struct genl_family thermal_event_genl_family = {
@@ -1832,7 +1916,7 @@
 
 	return result;
 }
-EXPORT_SYMBOL(thermal_generate_netlink_event);
+EXPORT_SYMBOL_GPL(thermal_generate_netlink_event);
 
 static int genetlink_init(void)
 {
@@ -1858,30 +1942,69 @@
 static inline void genetlink_exit(void) {}
 #endif /* !CONFIG_NET */
 
+static int __init thermal_register_governors(void)
+{
+	int result;
+
+	result = thermal_gov_step_wise_register();
+	if (result)
+		return result;
+
+	result = thermal_gov_fair_share_register();
+	if (result)
+		return result;
+
+	return thermal_gov_user_space_register();
+}
+
+static void thermal_unregister_governors(void)
+{
+	thermal_gov_step_wise_unregister();
+	thermal_gov_fair_share_unregister();
+	thermal_gov_user_space_unregister();
+}
+
 static int __init thermal_init(void)
 {
-	int result = 0;
+	int result;
+
+	result = thermal_register_governors();
+	if (result)
+		goto error;
 
 	result = class_register(&thermal_class);
-	if (result) {
-		idr_destroy(&thermal_tz_idr);
-		idr_destroy(&thermal_cdev_idr);
-		mutex_destroy(&thermal_idr_lock);
-		mutex_destroy(&thermal_list_lock);
-		return result;
-	}
+	if (result)
+		goto unregister_governors;
+
 	result = genetlink_init();
+	if (result)
+		goto unregister_class;
+
+	return 0;
+
+unregister_governors:
+	thermal_unregister_governors();
+unregister_class:
+	class_unregister(&thermal_class);
+error:
+	idr_destroy(&thermal_tz_idr);
+	idr_destroy(&thermal_cdev_idr);
+	mutex_destroy(&thermal_idr_lock);
+	mutex_destroy(&thermal_list_lock);
+	mutex_destroy(&thermal_governor_lock);
 	return result;
 }
 
 static void __exit thermal_exit(void)
 {
+	genetlink_exit();
 	class_unregister(&thermal_class);
+	thermal_unregister_governors();
 	idr_destroy(&thermal_tz_idr);
 	idr_destroy(&thermal_cdev_idr);
 	mutex_destroy(&thermal_idr_lock);
 	mutex_destroy(&thermal_list_lock);
-	genetlink_exit();
+	mutex_destroy(&thermal_governor_lock);
 }
 
 fs_initcall(thermal_init);
diff --git a/drivers/thermal/thermal_core.h b/drivers/thermal/thermal_core.h
index 0d3205a..7cf2f66 100644
--- a/drivers/thermal/thermal_core.h
+++ b/drivers/thermal/thermal_core.h
@@ -50,4 +50,31 @@
 	struct list_head cdev_node; /* node in cdev->thermal_instances */
 };
 
+int thermal_register_governor(struct thermal_governor *);
+void thermal_unregister_governor(struct thermal_governor *);
+
+#ifdef CONFIG_THERMAL_GOV_STEP_WISE
+int thermal_gov_step_wise_register(void);
+void thermal_gov_step_wise_unregister(void);
+#else
+static inline int thermal_gov_step_wise_register(void) { return 0; }
+static inline void thermal_gov_step_wise_unregister(void) {}
+#endif /* CONFIG_THERMAL_GOV_STEP_WISE */
+
+#ifdef CONFIG_THERMAL_GOV_FAIR_SHARE
+int thermal_gov_fair_share_register(void);
+void thermal_gov_fair_share_unregister(void);
+#else
+static inline int thermal_gov_fair_share_register(void) { return 0; }
+static inline void thermal_gov_fair_share_unregister(void) {}
+#endif /* CONFIG_THERMAL_GOV_FAIR_SHARE */
+
+#ifdef CONFIG_THERMAL_GOV_USER_SPACE
+int thermal_gov_user_space_register(void);
+void thermal_gov_user_space_unregister(void);
+#else
+static inline int thermal_gov_user_space_register(void) { return 0; }
+static inline void thermal_gov_user_space_unregister(void) {}
+#endif /* CONFIG_THERMAL_GOV_USER_SPACE */
+
 #endif /* __THERMAL_CORE_H__ */
diff --git a/drivers/thermal/user_space.c b/drivers/thermal/user_space.c
index 6bbb380..10adcdd 100644
--- a/drivers/thermal/user_space.c
+++ b/drivers/thermal/user_space.c
@@ -22,9 +22,6 @@
  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  */
 
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/module.h>
 #include <linux/thermal.h>
 
 #include "thermal_core.h"
@@ -46,23 +43,15 @@
 static struct thermal_governor thermal_gov_user_space = {
 	.name		= "user_space",
 	.throttle	= notify_user_space,
-	.owner		= THIS_MODULE,
 };
 
-static int __init thermal_gov_user_space_init(void)
+int thermal_gov_user_space_register(void)
 {
 	return thermal_register_governor(&thermal_gov_user_space);
 }
 
-static void __exit thermal_gov_user_space_exit(void)
+void thermal_gov_user_space_unregister(void)
 {
 	thermal_unregister_governor(&thermal_gov_user_space);
 }
 
-/* This should load after thermal framework */
-fs_initcall(thermal_gov_user_space_init);
-module_exit(thermal_gov_user_space_exit);
-
-MODULE_AUTHOR("Durgadoss R");
-MODULE_DESCRIPTION("A user space Thermal notifier");
-MODULE_LICENSE("GPL");
diff --git a/drivers/tty/serial/68328serial.c b/drivers/tty/serial/68328serial.c
index ef2e08e..5dc9c4b 100644
--- a/drivers/tty/serial/68328serial.c
+++ b/drivers/tty/serial/68328serial.c
@@ -14,7 +14,6 @@
  * 2.4/2.5 port                 David McCullough
  */
 
-#include <asm/dbg.h>
 #include <linux/module.h>
 #include <linux/errno.h>
 #include <linux/serial.h>
diff --git a/drivers/tty/serial/bcm63xx_uart.c b/drivers/tty/serial/bcm63xx_uart.c
index 52a3ecd..6fa2ae77 100644
--- a/drivers/tty/serial/bcm63xx_uart.c
+++ b/drivers/tty/serial/bcm63xx_uart.c
@@ -30,7 +30,6 @@
 #include <linux/serial.h>
 #include <linux/serial_core.h>
 
-#include <bcm63xx_clk.h>
 #include <bcm63xx_irq.h>
 #include <bcm63xx_regs.h>
 #include <bcm63xx_io.h>
diff --git a/drivers/tty/tty_audit.c b/drivers/tty/tty_audit.c
index 6953dc8..a4fdce74 100644
--- a/drivers/tty/tty_audit.c
+++ b/drivers/tty/tty_audit.c
@@ -60,24 +60,22 @@
 		tty_audit_buf_free(buf);
 }
 
-static void tty_audit_log(const char *description, struct task_struct *tsk,
-			  kuid_t loginuid, unsigned sessionid, int major,
-			  int minor, unsigned char *data, size_t size)
+static void tty_audit_log(const char *description, int major, int minor,
+			  unsigned char *data, size_t size)
 {
 	struct audit_buffer *ab;
+	struct task_struct *tsk = current;
+	uid_t uid = from_kuid(&init_user_ns, task_uid(tsk));
+	uid_t loginuid = from_kuid(&init_user_ns, audit_get_loginuid(tsk));
+	u32 sessionid = audit_get_sessionid(tsk);
 
 	ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_TTY);
 	if (ab) {
 		char name[sizeof(tsk->comm)];
-		kuid_t uid = task_uid(tsk);
 
-		audit_log_format(ab, "%s pid=%u uid=%u auid=%u ses=%u "
-				 "major=%d minor=%d comm=", description,
-				 tsk->pid,
-				 from_kuid(&init_user_ns, uid),
-				 from_kuid(&init_user_ns, loginuid),
-				 sessionid,
-				 major, minor);
+		audit_log_format(ab, "%s pid=%u uid=%u auid=%u ses=%u major=%d"
+				 " minor=%d comm=", description, tsk->pid, uid,
+				 loginuid, sessionid, major, minor);
 		get_task_comm(name, tsk);
 		audit_log_untrustedstring(ab, name);
 		audit_log_format(ab, " data=");
@@ -90,11 +88,9 @@
  *	tty_audit_buf_push	-	Push buffered data out
  *
  *	Generate an audit message from the contents of @buf, which is owned by
- *	@tsk with @loginuid.  @buf->mutex must be locked.
+ *	the current task.  @buf->mutex must be locked.
  */
-static void tty_audit_buf_push(struct task_struct *tsk, kuid_t loginuid,
-			       unsigned int sessionid,
-			       struct tty_audit_buf *buf)
+static void tty_audit_buf_push(struct tty_audit_buf *buf)
 {
 	if (buf->valid == 0)
 		return;
@@ -102,25 +98,11 @@
 		buf->valid = 0;
 		return;
 	}
-	tty_audit_log("tty", tsk, loginuid, sessionid, buf->major, buf->minor,
-		      buf->data, buf->valid);
+	tty_audit_log("tty", buf->major, buf->minor, buf->data, buf->valid);
 	buf->valid = 0;
 }
 
 /**
- *	tty_audit_buf_push_current	-	Push buffered data out
- *
- *	Generate an audit message from the contents of @buf, which is owned by
- *	the current task.  @buf->mutex must be locked.
- */
-static void tty_audit_buf_push_current(struct tty_audit_buf *buf)
-{
-	kuid_t auid = audit_get_loginuid(current);
-	unsigned int sessionid = audit_get_sessionid(current);
-	tty_audit_buf_push(current, auid, sessionid, buf);
-}
-
-/**
  *	tty_audit_exit	-	Handle a task exit
  *
  *	Make sure all buffered data is written out and deallocate the buffer.
@@ -130,15 +112,13 @@
 {
 	struct tty_audit_buf *buf;
 
-	spin_lock_irq(&current->sighand->siglock);
 	buf = current->signal->tty_audit_buf;
 	current->signal->tty_audit_buf = NULL;
-	spin_unlock_irq(&current->sighand->siglock);
 	if (!buf)
 		return;
 
 	mutex_lock(&buf->mutex);
-	tty_audit_buf_push_current(buf);
+	tty_audit_buf_push(buf);
 	mutex_unlock(&buf->mutex);
 
 	tty_audit_buf_put(buf);
@@ -151,9 +131,8 @@
  */
 void tty_audit_fork(struct signal_struct *sig)
 {
-	spin_lock_irq(&current->sighand->siglock);
 	sig->audit_tty = current->signal->audit_tty;
-	spin_unlock_irq(&current->sighand->siglock);
+	sig->audit_tty_log_passwd = current->signal->audit_tty_log_passwd;
 }
 
 /**
@@ -163,20 +142,21 @@
 {
 	struct tty_audit_buf *buf;
 	int major, minor, should_audit;
+	unsigned long flags;
 
-	spin_lock_irq(&current->sighand->siglock);
+	spin_lock_irqsave(&current->sighand->siglock, flags);
 	should_audit = current->signal->audit_tty;
 	buf = current->signal->tty_audit_buf;
 	if (buf)
 		atomic_inc(&buf->count);
-	spin_unlock_irq(&current->sighand->siglock);
+	spin_unlock_irqrestore(&current->sighand->siglock, flags);
 
 	major = tty->driver->major;
 	minor = tty->driver->minor_start + tty->index;
 	if (buf) {
 		mutex_lock(&buf->mutex);
 		if (buf->major == major && buf->minor == minor)
-			tty_audit_buf_push_current(buf);
+			tty_audit_buf_push(buf);
 		mutex_unlock(&buf->mutex);
 		tty_audit_buf_put(buf);
 	}
@@ -187,24 +167,20 @@
 
 		auid = audit_get_loginuid(current);
 		sessionid = audit_get_sessionid(current);
-		tty_audit_log("ioctl=TIOCSTI", current, auid, sessionid, major,
-			      minor, &ch, 1);
+		tty_audit_log("ioctl=TIOCSTI", major, minor, &ch, 1);
 	}
 }
 
 /**
- * tty_audit_push_task	-	Flush task's pending audit data
- * @tsk:		task pointer
- * @loginuid:		sender login uid
- * @sessionid:		sender session id
+ * tty_audit_push_current -	Flush current's pending audit data
  *
- * Called with a ref on @tsk held. Try to lock sighand and get a
- * reference to the tty audit buffer if available.
+ * Try to lock sighand and get a reference to the tty audit buffer if available.
  * Flush the buffer or return an appropriate error code.
  */
-int tty_audit_push_task(struct task_struct *tsk, kuid_t loginuid, u32 sessionid)
+int tty_audit_push_current(void)
 {
 	struct tty_audit_buf *buf = ERR_PTR(-EPERM);
+	struct task_struct *tsk = current;
 	unsigned long flags;
 
 	if (!lock_task_sighand(tsk, &flags))
@@ -225,7 +201,7 @@
 		return PTR_ERR(buf);
 
 	mutex_lock(&buf->mutex);
-	tty_audit_buf_push(tsk, loginuid, sessionid, buf);
+	tty_audit_buf_push(buf);
 	mutex_unlock(&buf->mutex);
 
 	tty_audit_buf_put(buf);
@@ -243,10 +219,11 @@
 		unsigned icanon)
 {
 	struct tty_audit_buf *buf, *buf2;
+	unsigned long flags;
 
 	buf = NULL;
 	buf2 = NULL;
-	spin_lock_irq(&current->sighand->siglock);
+	spin_lock_irqsave(&current->sighand->siglock, flags);
 	if (likely(!current->signal->audit_tty))
 		goto out;
 	buf = current->signal->tty_audit_buf;
@@ -254,7 +231,7 @@
 		atomic_inc(&buf->count);
 		goto out;
 	}
-	spin_unlock_irq(&current->sighand->siglock);
+	spin_unlock_irqrestore(&current->sighand->siglock, flags);
 
 	buf2 = tty_audit_buf_alloc(tty->driver->major,
 				   tty->driver->minor_start + tty->index,
@@ -264,7 +241,7 @@
 		return NULL;
 	}
 
-	spin_lock_irq(&current->sighand->siglock);
+	spin_lock_irqsave(&current->sighand->siglock, flags);
 	if (!current->signal->audit_tty)
 		goto out;
 	buf = current->signal->tty_audit_buf;
@@ -276,7 +253,7 @@
 	atomic_inc(&buf->count);
 	/* Fall through */
  out:
-	spin_unlock_irq(&current->sighand->siglock);
+	spin_unlock_irqrestore(&current->sighand->siglock, flags);
 	if (buf2)
 		tty_audit_buf_free(buf2);
 	return buf;
@@ -292,10 +269,18 @@
 {
 	struct tty_audit_buf *buf;
 	int major, minor;
+	int audit_log_tty_passwd;
+	unsigned long flags;
 
 	if (unlikely(size == 0))
 		return;
 
+	spin_lock_irqsave(&current->sighand->siglock, flags);
+	audit_log_tty_passwd = current->signal->audit_tty_log_passwd;
+	spin_unlock_irqrestore(&current->sighand->siglock, flags);
+	if (!audit_log_tty_passwd && icanon && !L_ECHO(tty))
+		return;
+
 	if (tty->driver->type == TTY_DRIVER_TYPE_PTY
 	    && tty->driver->subtype == PTY_TYPE_MASTER)
 		return;
@@ -309,7 +294,7 @@
 	minor = tty->driver->minor_start + tty->index;
 	if (buf->major != major || buf->minor != minor
 	    || buf->icanon != icanon) {
-		tty_audit_buf_push_current(buf);
+		tty_audit_buf_push(buf);
 		buf->major = major;
 		buf->minor = minor;
 		buf->icanon = icanon;
@@ -325,7 +310,7 @@
 		data += run;
 		size -= run;
 		if (buf->valid == N_TTY_BUF_SIZE)
-			tty_audit_buf_push_current(buf);
+			tty_audit_buf_push(buf);
 	} while (size != 0);
 	mutex_unlock(&buf->mutex);
 	tty_audit_buf_put(buf);
@@ -339,16 +324,17 @@
 void tty_audit_push(struct tty_struct *tty)
 {
 	struct tty_audit_buf *buf;
+	unsigned long flags;
 
-	spin_lock_irq(&current->sighand->siglock);
+	spin_lock_irqsave(&current->sighand->siglock, flags);
 	if (likely(!current->signal->audit_tty)) {
-		spin_unlock_irq(&current->sighand->siglock);
+		spin_unlock_irqrestore(&current->sighand->siglock, flags);
 		return;
 	}
 	buf = current->signal->tty_audit_buf;
 	if (buf)
 		atomic_inc(&buf->count);
-	spin_unlock_irq(&current->sighand->siglock);
+	spin_unlock_irqrestore(&current->sighand->siglock, flags);
 
 	if (buf) {
 		int major, minor;
@@ -357,7 +343,7 @@
 		minor = tty->driver->minor_start + tty->index;
 		mutex_lock(&buf->mutex);
 		if (buf->major == major && buf->minor == minor)
-			tty_audit_buf_push_current(buf);
+			tty_audit_buf_push(buf);
 		mutex_unlock(&buf->mutex);
 		tty_audit_buf_put(buf);
 	}
diff --git a/drivers/usb/gadget/inode.c b/drivers/usb/gadget/inode.c
index dda0dc4..570c005 100644
--- a/drivers/usb/gadget/inode.c
+++ b/drivers/usb/gadget/inode.c
@@ -24,6 +24,8 @@
 #include <linux/sched.h>
 #include <linux/slab.h>
 #include <linux/poll.h>
+#include <linux/mmu_context.h>
+#include <linux/aio.h>
 
 #include <linux/device.h>
 #include <linux/moduleparam.h>
@@ -513,6 +515,9 @@
 struct kiocb_priv {
 	struct usb_request	*req;
 	struct ep_data		*epdata;
+	struct kiocb		*iocb;
+	struct mm_struct	*mm;
+	struct work_struct	work;
 	void			*buf;
 	const struct iovec	*iv;
 	unsigned long		nr_segs;
@@ -528,7 +533,6 @@
 	local_irq_disable();
 	epdata = priv->epdata;
 	// spin_lock(&epdata->dev->lock);
-	kiocbSetCancelled(iocb);
 	if (likely(epdata && epdata->ep && priv->req))
 		value = usb_ep_dequeue (epdata->ep, priv->req);
 	else
@@ -540,15 +544,12 @@
 	return value;
 }
 
-static ssize_t ep_aio_read_retry(struct kiocb *iocb)
+static ssize_t ep_copy_to_user(struct kiocb_priv *priv)
 {
-	struct kiocb_priv	*priv = iocb->private;
 	ssize_t			len, total;
 	void			*to_copy;
 	int			i;
 
-	/* we "retry" to get the right mm context for this: */
-
 	/* copy stuff into user buffers */
 	total = priv->actual;
 	len = 0;
@@ -568,9 +569,26 @@
 		if (total == 0)
 			break;
 	}
+
+	return len;
+}
+
+static void ep_user_copy_worker(struct work_struct *work)
+{
+	struct kiocb_priv *priv = container_of(work, struct kiocb_priv, work);
+	struct mm_struct *mm = priv->mm;
+	struct kiocb *iocb = priv->iocb;
+	size_t ret;
+
+	use_mm(mm);
+	ret = ep_copy_to_user(priv);
+	unuse_mm(mm);
+
+	/* completing the iocb can drop the ctx and mm, don't touch mm after */
+	aio_complete(iocb, ret, ret);
+
 	kfree(priv->buf);
 	kfree(priv);
-	return len;
 }
 
 static void ep_aio_complete(struct usb_ep *ep, struct usb_request *req)
@@ -596,14 +614,14 @@
 		aio_complete(iocb, req->actual ? req->actual : req->status,
 				req->status);
 	} else {
-		/* retry() won't report both; so we hide some faults */
+		/* ep_copy_to_user() won't report both; we hide some faults */
 		if (unlikely(0 != req->status))
 			DBG(epdata->dev, "%s fault %d len %d\n",
 				ep->name, req->status, req->actual);
 
 		priv->buf = req->buf;
 		priv->actual = req->actual;
-		kick_iocb(iocb);
+		schedule_work(&priv->work);
 	}
 	spin_unlock(&epdata->dev->lock);
 
@@ -633,8 +651,10 @@
 		return value;
 	}
 	iocb->private = priv;
+	priv->iocb = iocb;
 	priv->iv = iv;
 	priv->nr_segs = nr_segs;
+	INIT_WORK(&priv->work, ep_user_copy_worker);
 
 	value = get_ready_ep(iocb->ki_filp->f_flags, epdata);
 	if (unlikely(value < 0)) {
@@ -642,10 +662,11 @@
 		goto fail;
 	}
 
-	iocb->ki_cancel = ep_aio_cancel;
+	kiocb_set_cancel_fn(iocb, ep_aio_cancel);
 	get_ep(epdata);
 	priv->epdata = epdata;
 	priv->actual = 0;
+	priv->mm = current->mm; /* mm teardown waits for iocbs in exit_aio() */
 
 	/* each kiocb is coupled to one usb_request, but we can't
 	 * allocate or submit those if the host disconnected.
@@ -674,7 +695,7 @@
 		kfree(priv);
 		put_ep(epdata);
 	} else
-		value = (iv ? -EIOCBRETRY : -EIOCBQUEUED);
+		value = -EIOCBQUEUED;
 	return value;
 }
 
@@ -692,7 +713,6 @@
 	if (unlikely(!buf))
 		return -ENOMEM;
 
-	iocb->ki_retry = ep_aio_read_retry;
 	return ep_aio_rwtail(iocb, buf, iocb->ki_left, epdata, iov, nr_segs);
 }
 
diff --git a/drivers/usb/host/ehci-tilegx.c b/drivers/usb/host/ehci-tilegx.c
index 1d215cd..b083a35 100644
--- a/drivers/usb/host/ehci-tilegx.c
+++ b/drivers/usb/host/ehci-tilegx.c
@@ -118,8 +118,10 @@
 
 	hcd = usb_create_hcd(&ehci_tilegx_hc_driver, &pdev->dev,
 			     dev_name(&pdev->dev));
-	if (!hcd)
-		return -ENOMEM;
+	if (!hcd) {
+          ret = -ENOMEM;
+          goto err_hcd;
+        }
 
 	/*
 	 * We don't use rsrc_start to map in our registers, but seems like
@@ -176,6 +178,7 @@
 err_no_irq:
 	tilegx_stop_ehc();
 	usb_put_hcd(hcd);
+err_hcd:
 	gxio_usb_host_destroy(&pdata->usb_ctx);
 	return ret;
 }
diff --git a/drivers/usb/host/ohci-tilegx.c b/drivers/usb/host/ohci-tilegx.c
index 1ae7b28..ea73009 100644
--- a/drivers/usb/host/ohci-tilegx.c
+++ b/drivers/usb/host/ohci-tilegx.c
@@ -112,8 +112,10 @@
 
 	hcd = usb_create_hcd(&ohci_tilegx_hc_driver, &pdev->dev,
 			     dev_name(&pdev->dev));
-	if (!hcd)
-		return -ENOMEM;
+	if (!hcd) {
+		ret = -ENOMEM;
+		goto err_hcd;
+	}
 
 	/*
 	 * We don't use rsrc_start to map in our registers, but seems like
@@ -165,6 +167,7 @@
 err_no_irq:
 	tilegx_stop_ohc();
 	usb_put_hcd(hcd);
+err_hcd:
 	gxio_usb_host_destroy(&pdata->usb_ctx);
 	return ret;
 }
diff --git a/drivers/usb/phy/Kconfig b/drivers/usb/phy/Kconfig
index aab2ab2..371d0e7 100644
--- a/drivers/usb/phy/Kconfig
+++ b/drivers/usb/phy/Kconfig
@@ -128,7 +128,7 @@
 
 config USB_GPIO_VBUS
 	tristate "GPIO based peripheral-only VBUS sensing 'transceiver'"
-	depends on GENERIC_GPIO
+	depends on GPIOLIB
 	help
 	  Provides simple GPIO VBUS sensing for controllers with an
 	  internal transceiver via the usb_phy interface, and
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig
index c04ccdf..d71d60f 100644
--- a/drivers/video/Kconfig
+++ b/drivers/video/Kconfig
@@ -2429,7 +2429,7 @@
 	select FB_CFB_COPYAREA
 	select FB_CFB_IMAGEBLIT
 	select FB_MODE_HELPERS
-	select OF_VIDEOMODE
+	select VIDEOMODE_HELPERS
 	help
 	  Framebuffer support for the MXS SoC.
 
@@ -2483,7 +2483,7 @@
 	tristate "Solomon SSD1307 framebuffer support"
 	depends on FB && I2C
 	depends on OF
-	depends on GENERIC_GPIO
+	depends on GPIOLIB
 	select FB_SYS_FOPS
 	select FB_SYS_FILLRECT
 	select FB_SYS_COPYAREA
diff --git a/drivers/video/au1100fb.c b/drivers/video/au1100fb.c
index ddabaa8..700cac0 100644
--- a/drivers/video/au1100fb.c
+++ b/drivers/video/au1100fb.c
@@ -111,30 +111,16 @@
 	switch (blank_mode) {
 
 	case VESA_NO_BLANKING:
-			/* Turn on panel */
-			fbdev->regs->lcd_control |= LCD_CONTROL_GO;
-#ifdef CONFIG_MIPS_PB1100
-			if (fbdev->panel_idx == 1) {
-				au_writew(au_readw(PB1100_G_CONTROL)
-					  | (PB1100_G_CONTROL_BL | PB1100_G_CONTROL_VDD),
-			PB1100_G_CONTROL);
-			}
-#endif
+		/* Turn on panel */
+		fbdev->regs->lcd_control |= LCD_CONTROL_GO;
 		au_sync();
 		break;
 
 	case VESA_VSYNC_SUSPEND:
 	case VESA_HSYNC_SUSPEND:
 	case VESA_POWERDOWN:
-			/* Turn off panel */
-			fbdev->regs->lcd_control &= ~LCD_CONTROL_GO;
-#ifdef CONFIG_MIPS_PB1100
-			if (fbdev->panel_idx == 1) {
-				au_writew(au_readw(PB1100_G_CONTROL)
-				  	  & ~(PB1100_G_CONTROL_BL | PB1100_G_CONTROL_VDD),
-			PB1100_G_CONTROL);
-			}
-#endif
+		/* Turn off panel */
+		fbdev->regs->lcd_control &= ~LCD_CONTROL_GO;
 		au_sync();
 		break;
 	default:
diff --git a/drivers/video/backlight/Kconfig b/drivers/video/backlight/Kconfig
index 2e166c3..d5ab658 100644
--- a/drivers/video/backlight/Kconfig
+++ b/drivers/video/backlight/Kconfig
@@ -36,14 +36,14 @@
 
 config LCD_L4F00242T03
 	tristate "Epson L4F00242T03 LCD"
-	depends on SPI_MASTER && GENERIC_GPIO
+	depends on SPI_MASTER && GPIOLIB
 	help
 	  SPI driver for Epson L4F00242T03. This provides basic support
 	  for init and powering the LCD up/down through a sysfs interface.
 
 config LCD_LMS283GF05
 	tristate "Samsung LMS283GF05 LCD"
-	depends on SPI_MASTER && GENERIC_GPIO
+	depends on SPI_MASTER && GPIOLIB
 	help
 	  SPI driver for Samsung LMS283GF05. This provides basic support
 	  for powering the LCD up/down through a sysfs interface.
diff --git a/drivers/video/mxsfb.c b/drivers/video/mxsfb.c
index 1b2c26d..21223d4 100644
--- a/drivers/video/mxsfb.c
+++ b/drivers/video/mxsfb.c
@@ -42,7 +42,6 @@
 #include <linux/module.h>
 #include <linux/kernel.h>
 #include <linux/of_device.h>
-#include <video/of_display_timing.h>
 #include <linux/platform_device.h>
 #include <linux/clk.h>
 #include <linux/dma-mapping.h>
@@ -50,6 +49,7 @@
 #include <linux/pinctrl/consumer.h>
 #include <linux/fb.h>
 #include <linux/regulator/consumer.h>
+#include <video/of_display_timing.h>
 #include <video/videomode.h>
 
 #define REG_SET	4
@@ -777,16 +777,16 @@
 		struct videomode vm;
 		struct fb_videomode fb_vm;
 
-		ret = videomode_from_timing(timings, &vm, i);
+		ret = videomode_from_timings(timings, &vm, i);
 		if (ret < 0)
 			goto put_timings_node;
 		ret = fb_videomode_from_videomode(&vm, &fb_vm);
 		if (ret < 0)
 			goto put_timings_node;
 
-		if (vm.data_flags & DISPLAY_FLAGS_DE_HIGH)
+		if (vm.flags & DISPLAY_FLAGS_DE_HIGH)
 			host->sync |= MXSFB_SYNC_DATA_ENABLE_HIGH_ACT;
-		if (vm.data_flags & DISPLAY_FLAGS_PIXDATA_NEGEDGE)
+		if (vm.flags & DISPLAY_FLAGS_PIXDATA_NEGEDGE)
 			host->sync |= MXSFB_SYNC_DOTCLK_FALLING_ACT;
 		fb_add_videomode(&fb_vm, &fb_info->modelist);
 	}
diff --git a/drivers/w1/masters/Kconfig b/drivers/w1/masters/Kconfig
index e8ca63a..2bd1257 100644
--- a/drivers/w1/masters/Kconfig
+++ b/drivers/w1/masters/Kconfig
@@ -50,7 +50,7 @@
 
 config W1_MASTER_GPIO
 	tristate "GPIO 1-wire busmaster"
-	depends on GENERIC_GPIO
+	depends on GPIOLIB
 	help
 	  Say Y here if you want to communicate with your 1-wire devices using
 	  GPIO pins. This driver uses the GPIO API to control the wire.
diff --git a/drivers/watchdog/ath79_wdt.c b/drivers/watchdog/ath79_wdt.c
index 8987990..d184c48 100644
--- a/drivers/watchdog/ath79_wdt.c
+++ b/drivers/watchdog/ath79_wdt.c
@@ -253,11 +253,9 @@
 		return -EINVAL;
 	}
 
-	wdt_base = devm_request_and_ioremap(&pdev->dev, res);
-	if (!wdt_base) {
-		dev_err(&pdev->dev, "unable to remap memory region\n");
-		return -ENOMEM;
-	}
+	wdt_base = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(wdt_base))
+		return PTR_ERR(wdt_base);
 
 	wdt_clk = devm_clk_get(&pdev->dev, "wdt");
 	if (IS_ERR(wdt_clk))
diff --git a/drivers/watchdog/davinci_wdt.c b/drivers/watchdog/davinci_wdt.c
index 7df1fdc..100d4fb 100644
--- a/drivers/watchdog/davinci_wdt.c
+++ b/drivers/watchdog/davinci_wdt.c
@@ -27,6 +27,7 @@
 #include <linux/device.h>
 #include <linux/clk.h>
 #include <linux/slab.h>
+#include <linux/err.h>
 
 #define MODULE_NAME "DAVINCI-WDT: "
 
@@ -221,11 +222,9 @@
 		return -ENOENT;
 	}
 
-	wdt_base = devm_request_and_ioremap(dev, wdt_mem);
-	if (!wdt_base) {
-		dev_err(dev, "ioremap failed\n");
-		return -EADDRNOTAVAIL;
-	}
+	wdt_base = devm_ioremap_resource(dev, wdt_mem);
+	if (IS_ERR(wdt_base))
+		return PTR_ERR(wdt_base);
 
 	ret = misc_register(&davinci_wdt_miscdev);
 	if (ret < 0) {
diff --git a/drivers/watchdog/s3c2410_wdt.c b/drivers/watchdog/s3c2410_wdt.c
index c1a221c..ee03135 100644
--- a/drivers/watchdog/s3c2410_wdt.c
+++ b/drivers/watchdog/s3c2410_wdt.c
@@ -330,10 +330,9 @@
 	}
 
 	/* get the memory region for the watchdog timer */
-	wdt_base = devm_request_and_ioremap(dev, wdt_mem);
-	if (wdt_base == NULL) {
-		dev_err(dev, "failed to devm_request_and_ioremap() region\n");
-		ret = -ENOMEM;
+	wdt_base = devm_ioremap_resource(dev, wdt_mem);
+	if (IS_ERR(wdt_base)) {
+		ret = PTR_ERR(wdt_base);
 		goto err;
 	}
 
diff --git a/drivers/watchdog/shwdt.c b/drivers/watchdog/shwdt.c
index 6a89e40..6185af2b 100644
--- a/drivers/watchdog/shwdt.c
+++ b/drivers/watchdog/shwdt.c
@@ -34,6 +34,7 @@
 #include <linux/slab.h>
 #include <linux/io.h>
 #include <linux/clk.h>
+#include <linux/err.h>
 #include <asm/watchdog.h>
 
 #define DRV_NAME "sh-wdt"
@@ -249,9 +250,9 @@
 		wdt->clk = NULL;
 	}
 
-	wdt->base = devm_request_and_ioremap(wdt->dev, res);
-	if (unlikely(!wdt->base)) {
-		rc = -EADDRNOTAVAIL;
+	wdt->base = devm_ioremap_resource(wdt->dev, res);
+	if (IS_ERR(wdt->base)) {
+		rc = PTR_ERR(wdt->base);
 		goto err;
 	}
 
diff --git a/drivers/watchdog/watchdog_dev.c b/drivers/watchdog/watchdog_dev.c
index 08b48bb..faf4e18 100644
--- a/drivers/watchdog/watchdog_dev.c
+++ b/drivers/watchdog/watchdog_dev.c
@@ -523,6 +523,7 @@
 	int err, devno;
 
 	if (watchdog->id == 0) {
+		old_wdd = watchdog;
 		watchdog_miscdev.parent = watchdog->parent;
 		err = misc_register(&watchdog_miscdev);
 		if (err != 0) {
@@ -531,9 +532,9 @@
 			if (err == -EBUSY)
 				pr_err("%s: a legacy watchdog module is probably present.\n",
 					watchdog->info->identity);
+			old_wdd = NULL;
 			return err;
 		}
-		old_wdd = watchdog;
 	}
 
 	/* Fill in the data structures */
diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig
index dd4d9cb..f03bf50 100644
--- a/drivers/xen/Kconfig
+++ b/drivers/xen/Kconfig
@@ -141,7 +141,7 @@
 
 config SWIOTLB_XEN
 	def_bool y
-	depends on PCI
+	depends on PCI && X86
 	select SWIOTLB
 
 config XEN_TMEM
diff --git a/drivers/xen/events.c b/drivers/xen/events.c
index d8cc8127..6a6bbe4 100644
--- a/drivers/xen/events.c
+++ b/drivers/xen/events.c
@@ -167,6 +167,8 @@
 	info->cpu = cpu;
 
 	evtchn_to_irq[evtchn] = irq;
+
+	irq_clear_status_flags(irq, IRQ_NOREQUEST|IRQ_NOAUTOEN);
 }
 
 static void xen_irq_info_evtchn_init(unsigned irq,
@@ -874,7 +876,6 @@
 		struct irq_info *info = info_for_irq(irq);
 		WARN_ON(info == NULL || info->type != IRQT_EVTCHN);
 	}
-	irq_clear_status_flags(irq, IRQ_NOREQUEST|IRQ_NOAUTOEN);
 
 out:
 	mutex_unlock(&irq_mapping_update_lock);
diff --git a/fs/9p/vfs_addr.c b/fs/9p/vfs_addr.c
index 0ad61c6..055562c 100644
--- a/fs/9p/vfs_addr.c
+++ b/fs/9p/vfs_addr.c
@@ -33,6 +33,7 @@
 #include <linux/pagemap.h>
 #include <linux/idr.h>
 #include <linux/sched.h>
+#include <linux/aio.h>
 #include <net/9p/9p.h>
 #include <net/9p/client.h>
 
diff --git a/fs/afs/write.c b/fs/afs/write.c
index 7e03ead..a890db4 100644
--- a/fs/afs/write.c
+++ b/fs/afs/write.c
@@ -14,6 +14,7 @@
 #include <linux/pagemap.h>
 #include <linux/writeback.h>
 #include <linux/pagevec.h>
+#include <linux/aio.h>
 #include "internal.h"
 
 static int afs_write_back_from_locked_page(struct afs_writeback *wb,
diff --git a/fs/aio.c b/fs/aio.c
index 351afe7..c5b1a8c 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -8,6 +8,8 @@
  *
  *	See ../COPYING for licensing terms.
  */
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
 #include <linux/kernel.h>
 #include <linux/init.h>
 #include <linux/errno.h>
@@ -18,8 +20,6 @@
 #include <linux/backing-dev.h>
 #include <linux/uio.h>
 
-#define DEBUG 0
-
 #include <linux/sched.h>
 #include <linux/fs.h>
 #include <linux/file.h>
@@ -39,11 +39,76 @@
 #include <asm/kmap_types.h>
 #include <asm/uaccess.h>
 
-#if DEBUG > 1
-#define dprintk		printk
-#else
-#define dprintk(x...)	do { ; } while (0)
-#endif
+#define AIO_RING_MAGIC			0xa10a10a1
+#define AIO_RING_COMPAT_FEATURES	1
+#define AIO_RING_INCOMPAT_FEATURES	0
+struct aio_ring {
+	unsigned	id;	/* kernel internal index number */
+	unsigned	nr;	/* number of io_events */
+	unsigned	head;
+	unsigned	tail;
+
+	unsigned	magic;
+	unsigned	compat_features;
+	unsigned	incompat_features;
+	unsigned	header_length;	/* size of aio_ring */
+
+
+	struct io_event		io_events[0];
+}; /* 128 bytes + ring size */
+
+#define AIO_RING_PAGES	8
+
+struct kioctx {
+	atomic_t		users;
+	atomic_t		dead;
+
+	/* This needs improving */
+	unsigned long		user_id;
+	struct hlist_node	list;
+
+	/*
+	 * This is what userspace passed to io_setup(), it's not used for
+	 * anything but counting against the global max_reqs quota.
+	 *
+	 * The real limit is nr_events - 1, which will be larger (see
+	 * aio_setup_ring())
+	 */
+	unsigned		max_reqs;
+
+	/* Size of ringbuffer, in units of struct io_event */
+	unsigned		nr_events;
+
+	unsigned long		mmap_base;
+	unsigned long		mmap_size;
+
+	struct page		**ring_pages;
+	long			nr_pages;
+
+	struct rcu_head		rcu_head;
+	struct work_struct	rcu_work;
+
+	struct {
+		atomic_t	reqs_active;
+	} ____cacheline_aligned_in_smp;
+
+	struct {
+		spinlock_t	ctx_lock;
+		struct list_head active_reqs;	/* used for cancellation */
+	} ____cacheline_aligned_in_smp;
+
+	struct {
+		struct mutex	ring_lock;
+		wait_queue_head_t wait;
+	} ____cacheline_aligned_in_smp;
+
+	struct {
+		unsigned	tail;
+		spinlock_t	completion_lock;
+	} ____cacheline_aligned_in_smp;
+
+	struct page		*internal_pages[AIO_RING_PAGES];
+};
 
 /*------ sysctl variables----*/
 static DEFINE_SPINLOCK(aio_nr_lock);
@@ -54,11 +119,6 @@
 static struct kmem_cache	*kiocb_cachep;
 static struct kmem_cache	*kioctx_cachep;
 
-static struct workqueue_struct *aio_wq;
-
-static void aio_kick_handler(struct work_struct *);
-static void aio_queue_work(struct kioctx *);
-
 /* aio_setup
  *	Creates the slab caches used by the aio routines, panic on
  *	failure as this is done early during the boot sequence.
@@ -68,10 +128,7 @@
 	kiocb_cachep = KMEM_CACHE(kiocb, SLAB_HWCACHE_ALIGN|SLAB_PANIC);
 	kioctx_cachep = KMEM_CACHE(kioctx,SLAB_HWCACHE_ALIGN|SLAB_PANIC);
 
-	aio_wq = alloc_workqueue("aio", 0, 1);	/* used to limit concurrency */
-	BUG_ON(!aio_wq);
-
-	pr_debug("aio_setup: sizeof(struct page) = %d\n", (int)sizeof(struct page));
+	pr_debug("sizeof(struct page) = %zu\n", sizeof(struct page));
 
 	return 0;
 }
@@ -79,28 +136,23 @@
 
 static void aio_free_ring(struct kioctx *ctx)
 {
-	struct aio_ring_info *info = &ctx->ring_info;
 	long i;
 
-	for (i=0; i<info->nr_pages; i++)
-		put_page(info->ring_pages[i]);
+	for (i = 0; i < ctx->nr_pages; i++)
+		put_page(ctx->ring_pages[i]);
 
-	if (info->mmap_size) {
-		BUG_ON(ctx->mm != current->mm);
-		vm_munmap(info->mmap_base, info->mmap_size);
-	}
+	if (ctx->mmap_size)
+		vm_munmap(ctx->mmap_base, ctx->mmap_size);
 
-	if (info->ring_pages && info->ring_pages != info->internal_pages)
-		kfree(info->ring_pages);
-	info->ring_pages = NULL;
-	info->nr = 0;
+	if (ctx->ring_pages && ctx->ring_pages != ctx->internal_pages)
+		kfree(ctx->ring_pages);
 }
 
 static int aio_setup_ring(struct kioctx *ctx)
 {
 	struct aio_ring *ring;
-	struct aio_ring_info *info = &ctx->ring_info;
 	unsigned nr_events = ctx->max_reqs;
+	struct mm_struct *mm = current->mm;
 	unsigned long size, populate;
 	int nr_pages;
 
@@ -116,46 +168,44 @@
 
 	nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) / sizeof(struct io_event);
 
-	info->nr = 0;
-	info->ring_pages = info->internal_pages;
+	ctx->nr_events = 0;
+	ctx->ring_pages = ctx->internal_pages;
 	if (nr_pages > AIO_RING_PAGES) {
-		info->ring_pages = kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL);
-		if (!info->ring_pages)
+		ctx->ring_pages = kcalloc(nr_pages, sizeof(struct page *),
+					  GFP_KERNEL);
+		if (!ctx->ring_pages)
 			return -ENOMEM;
 	}
 
-	info->mmap_size = nr_pages * PAGE_SIZE;
-	dprintk("attempting mmap of %lu bytes\n", info->mmap_size);
-	down_write(&ctx->mm->mmap_sem);
-	info->mmap_base = do_mmap_pgoff(NULL, 0, info->mmap_size, 
-					PROT_READ|PROT_WRITE,
-					MAP_ANONYMOUS|MAP_PRIVATE, 0,
-					&populate);
-	if (IS_ERR((void *)info->mmap_base)) {
-		up_write(&ctx->mm->mmap_sem);
-		info->mmap_size = 0;
+	ctx->mmap_size = nr_pages * PAGE_SIZE;
+	pr_debug("attempting mmap of %lu bytes\n", ctx->mmap_size);
+	down_write(&mm->mmap_sem);
+	ctx->mmap_base = do_mmap_pgoff(NULL, 0, ctx->mmap_size,
+				       PROT_READ|PROT_WRITE,
+				       MAP_ANONYMOUS|MAP_PRIVATE, 0, &populate);
+	if (IS_ERR((void *)ctx->mmap_base)) {
+		up_write(&mm->mmap_sem);
+		ctx->mmap_size = 0;
 		aio_free_ring(ctx);
 		return -EAGAIN;
 	}
 
-	dprintk("mmap address: 0x%08lx\n", info->mmap_base);
-	info->nr_pages = get_user_pages(current, ctx->mm,
-					info->mmap_base, nr_pages, 
-					1, 0, info->ring_pages, NULL);
-	up_write(&ctx->mm->mmap_sem);
+	pr_debug("mmap address: 0x%08lx\n", ctx->mmap_base);
+	ctx->nr_pages = get_user_pages(current, mm, ctx->mmap_base, nr_pages,
+				       1, 0, ctx->ring_pages, NULL);
+	up_write(&mm->mmap_sem);
 
-	if (unlikely(info->nr_pages != nr_pages)) {
+	if (unlikely(ctx->nr_pages != nr_pages)) {
 		aio_free_ring(ctx);
 		return -EAGAIN;
 	}
 	if (populate)
-		mm_populate(info->mmap_base, populate);
+		mm_populate(ctx->mmap_base, populate);
 
-	ctx->user_id = info->mmap_base;
+	ctx->user_id = ctx->mmap_base;
+	ctx->nr_events = nr_events; /* trusted copy */
 
-	info->nr = nr_events;		/* trusted copy */
-
-	ring = kmap_atomic(info->ring_pages[0]);
+	ring = kmap_atomic(ctx->ring_pages[0]);
 	ring->nr = nr_events;	/* user copy */
 	ring->id = ctx->user_id;
 	ring->head = ring->tail = 0;
@@ -164,72 +214,133 @@
 	ring->incompat_features = AIO_RING_INCOMPAT_FEATURES;
 	ring->header_length = sizeof(struct aio_ring);
 	kunmap_atomic(ring);
+	flush_dcache_page(ctx->ring_pages[0]);
 
 	return 0;
 }
 
-
-/* aio_ring_event: returns a pointer to the event at the given index from
- * kmap_atomic().  Release the pointer with put_aio_ring_event();
- */
 #define AIO_EVENTS_PER_PAGE	(PAGE_SIZE / sizeof(struct io_event))
 #define AIO_EVENTS_FIRST_PAGE	((PAGE_SIZE - sizeof(struct aio_ring)) / sizeof(struct io_event))
 #define AIO_EVENTS_OFFSET	(AIO_EVENTS_PER_PAGE - AIO_EVENTS_FIRST_PAGE)
 
-#define aio_ring_event(info, nr) ({					\
-	unsigned pos = (nr) + AIO_EVENTS_OFFSET;			\
-	struct io_event *__event;					\
-	__event = kmap_atomic(						\
-			(info)->ring_pages[pos / AIO_EVENTS_PER_PAGE]); \
-	__event += pos % AIO_EVENTS_PER_PAGE;				\
-	__event;							\
-})
+void kiocb_set_cancel_fn(struct kiocb *req, kiocb_cancel_fn *cancel)
+{
+	struct kioctx *ctx = req->ki_ctx;
+	unsigned long flags;
 
-#define put_aio_ring_event(event) do {		\
-	struct io_event *__event = (event);	\
-	(void)__event;				\
-	kunmap_atomic((void *)((unsigned long)__event & PAGE_MASK)); \
-} while(0)
+	spin_lock_irqsave(&ctx->ctx_lock, flags);
 
-static void ctx_rcu_free(struct rcu_head *head)
+	if (!req->ki_list.next)
+		list_add(&req->ki_list, &ctx->active_reqs);
+
+	req->ki_cancel = cancel;
+
+	spin_unlock_irqrestore(&ctx->ctx_lock, flags);
+}
+EXPORT_SYMBOL(kiocb_set_cancel_fn);
+
+static int kiocb_cancel(struct kioctx *ctx, struct kiocb *kiocb,
+			struct io_event *res)
+{
+	kiocb_cancel_fn *old, *cancel;
+	int ret = -EINVAL;
+
+	/*
+	 * Don't want to set kiocb->ki_cancel = KIOCB_CANCELLED unless it
+	 * actually has a cancel function, hence the cmpxchg()
+	 */
+
+	cancel = ACCESS_ONCE(kiocb->ki_cancel);
+	do {
+		if (!cancel || cancel == KIOCB_CANCELLED)
+			return ret;
+
+		old = cancel;
+		cancel = cmpxchg(&kiocb->ki_cancel, old, KIOCB_CANCELLED);
+	} while (cancel != old);
+
+	atomic_inc(&kiocb->ki_users);
+	spin_unlock_irq(&ctx->ctx_lock);
+
+	memset(res, 0, sizeof(*res));
+	res->obj = (u64)(unsigned long)kiocb->ki_obj.user;
+	res->data = kiocb->ki_user_data;
+	ret = cancel(kiocb, res);
+
+	spin_lock_irq(&ctx->ctx_lock);
+
+	return ret;
+}
+
+static void free_ioctx_rcu(struct rcu_head *head)
 {
 	struct kioctx *ctx = container_of(head, struct kioctx, rcu_head);
 	kmem_cache_free(kioctx_cachep, ctx);
 }
 
-/* __put_ioctx
- *	Called when the last user of an aio context has gone away,
- *	and the struct needs to be freed.
+/*
+ * When this function runs, the kioctx has been removed from the "hash table"
+ * and ctx->users has dropped to 0, so we know no more kiocbs can be submitted -
+ * now it's safe to cancel any that need to be.
  */
-static void __put_ioctx(struct kioctx *ctx)
+static void free_ioctx(struct kioctx *ctx)
 {
-	unsigned nr_events = ctx->max_reqs;
-	BUG_ON(ctx->reqs_active);
+	struct aio_ring *ring;
+	struct io_event res;
+	struct kiocb *req;
+	unsigned head, avail;
 
-	cancel_delayed_work_sync(&ctx->wq);
-	aio_free_ring(ctx);
-	mmdrop(ctx->mm);
-	ctx->mm = NULL;
-	if (nr_events) {
-		spin_lock(&aio_nr_lock);
-		BUG_ON(aio_nr - nr_events > aio_nr);
-		aio_nr -= nr_events;
-		spin_unlock(&aio_nr_lock);
+	spin_lock_irq(&ctx->ctx_lock);
+
+	while (!list_empty(&ctx->active_reqs)) {
+		req = list_first_entry(&ctx->active_reqs,
+				       struct kiocb, ki_list);
+
+		list_del_init(&req->ki_list);
+		kiocb_cancel(ctx, req, &res);
 	}
-	pr_debug("__put_ioctx: freeing %p\n", ctx);
-	call_rcu(&ctx->rcu_head, ctx_rcu_free);
+
+	spin_unlock_irq(&ctx->ctx_lock);
+
+	ring = kmap_atomic(ctx->ring_pages[0]);
+	head = ring->head;
+	kunmap_atomic(ring);
+
+	while (atomic_read(&ctx->reqs_active) > 0) {
+		wait_event(ctx->wait, head != ctx->tail);
+
+		avail = (head <= ctx->tail ? ctx->tail : ctx->nr_events) - head;
+
+		atomic_sub(avail, &ctx->reqs_active);
+		head += avail;
+		head %= ctx->nr_events;
+	}
+
+	WARN_ON(atomic_read(&ctx->reqs_active) < 0);
+
+	aio_free_ring(ctx);
+
+	spin_lock(&aio_nr_lock);
+	BUG_ON(aio_nr - ctx->max_reqs > aio_nr);
+	aio_nr -= ctx->max_reqs;
+	spin_unlock(&aio_nr_lock);
+
+	pr_debug("freeing %p\n", ctx);
+
+	/*
+	 * Here the call_rcu() is between the wait_event() for reqs_active to
+	 * hit 0, and freeing the ioctx.
+	 *
+	 * aio_complete() decrements reqs_active, but it has to touch the ioctx
+	 * after to issue a wakeup so we use rcu.
+	 */
+	call_rcu(&ctx->rcu_head, free_ioctx_rcu);
 }
 
-static inline int try_get_ioctx(struct kioctx *kioctx)
+static void put_ioctx(struct kioctx *ctx)
 {
-	return atomic_inc_not_zero(&kioctx->users);
-}
-
-static inline void put_ioctx(struct kioctx *kioctx)
-{
-	BUG_ON(atomic_read(&kioctx->users) <= 0);
-	if (unlikely(atomic_dec_and_test(&kioctx->users)))
-		__put_ioctx(kioctx);
+	if (unlikely(atomic_dec_and_test(&ctx->users)))
+		free_ioctx(ctx);
 }
 
 /* ioctx_alloc
@@ -237,7 +348,7 @@
  */
 static struct kioctx *ioctx_alloc(unsigned nr_events)
 {
-	struct mm_struct *mm;
+	struct mm_struct *mm = current->mm;
 	struct kioctx *ctx;
 	int err = -ENOMEM;
 
@@ -256,17 +367,15 @@
 		return ERR_PTR(-ENOMEM);
 
 	ctx->max_reqs = nr_events;
-	mm = ctx->mm = current->mm;
-	atomic_inc(&mm->mm_count);
 
 	atomic_set(&ctx->users, 2);
+	atomic_set(&ctx->dead, 0);
 	spin_lock_init(&ctx->ctx_lock);
-	spin_lock_init(&ctx->ring_info.ring_lock);
+	spin_lock_init(&ctx->completion_lock);
+	mutex_init(&ctx->ring_lock);
 	init_waitqueue_head(&ctx->wait);
 
 	INIT_LIST_HEAD(&ctx->active_reqs);
-	INIT_LIST_HEAD(&ctx->run_list);
-	INIT_DELAYED_WORK(&ctx->wq, aio_kick_handler);
 
 	if (aio_setup_ring(ctx) < 0)
 		goto out_freectx;
@@ -286,64 +395,56 @@
 	hlist_add_head_rcu(&ctx->list, &mm->ioctx_list);
 	spin_unlock(&mm->ioctx_lock);
 
-	dprintk("aio: allocated ioctx %p[%ld]: mm=%p mask=0x%x\n",
-		ctx, ctx->user_id, current->mm, ctx->ring_info.nr);
+	pr_debug("allocated ioctx %p[%ld]: mm=%p mask=0x%x\n",
+		 ctx, ctx->user_id, mm, ctx->nr_events);
 	return ctx;
 
 out_cleanup:
 	err = -EAGAIN;
 	aio_free_ring(ctx);
 out_freectx:
-	mmdrop(mm);
 	kmem_cache_free(kioctx_cachep, ctx);
-	dprintk("aio: error allocating ioctx %d\n", err);
+	pr_debug("error allocating ioctx %d\n", err);
 	return ERR_PTR(err);
 }
 
-/* kill_ctx
- *	Cancels all outstanding aio requests on an aio context.  Used 
- *	when the processes owning a context have all exited to encourage 
+static void kill_ioctx_work(struct work_struct *work)
+{
+	struct kioctx *ctx = container_of(work, struct kioctx, rcu_work);
+
+	wake_up_all(&ctx->wait);
+	put_ioctx(ctx);
+}
+
+static void kill_ioctx_rcu(struct rcu_head *head)
+{
+	struct kioctx *ctx = container_of(head, struct kioctx, rcu_head);
+
+	INIT_WORK(&ctx->rcu_work, kill_ioctx_work);
+	schedule_work(&ctx->rcu_work);
+}
+
+/* kill_ioctx
+ *	Cancels all outstanding aio requests on an aio context.  Used
+ *	when the processes owning a context have all exited to encourage
  *	the rapid destruction of the kioctx.
  */
-static void kill_ctx(struct kioctx *ctx)
+static void kill_ioctx(struct kioctx *ctx)
 {
-	int (*cancel)(struct kiocb *, struct io_event *);
-	struct task_struct *tsk = current;
-	DECLARE_WAITQUEUE(wait, tsk);
-	struct io_event res;
+	if (!atomic_xchg(&ctx->dead, 1)) {
+		hlist_del_rcu(&ctx->list);
+		/* Between hlist_del_rcu() and dropping the initial ref */
+		synchronize_rcu();
 
-	spin_lock_irq(&ctx->ctx_lock);
-	ctx->dead = 1;
-	while (!list_empty(&ctx->active_reqs)) {
-		struct list_head *pos = ctx->active_reqs.next;
-		struct kiocb *iocb = list_kiocb(pos);
-		list_del_init(&iocb->ki_list);
-		cancel = iocb->ki_cancel;
-		kiocbSetCancelled(iocb);
-		if (cancel) {
-			iocb->ki_users++;
-			spin_unlock_irq(&ctx->ctx_lock);
-			cancel(iocb, &res);
-			spin_lock_irq(&ctx->ctx_lock);
-		}
+		/*
+		 * We can't punt to workqueue here because put_ioctx() ->
+		 * free_ioctx() will unmap the ringbuffer, and that has to be
+		 * done in the original process's context. kill_ioctx_rcu/work()
+		 * exist for exit_aio(), as in that path free_ioctx() won't do
+		 * the unmap.
+		 */
+		kill_ioctx_work(&ctx->rcu_work);
 	}
-
-	if (!ctx->reqs_active)
-		goto out;
-
-	add_wait_queue(&ctx->wait, &wait);
-	set_task_state(tsk, TASK_UNINTERRUPTIBLE);
-	while (ctx->reqs_active) {
-		spin_unlock_irq(&ctx->ctx_lock);
-		io_schedule();
-		set_task_state(tsk, TASK_UNINTERRUPTIBLE);
-		spin_lock_irq(&ctx->ctx_lock);
-	}
-	__set_task_state(tsk, TASK_RUNNING);
-	remove_wait_queue(&ctx->wait, &wait);
-
-out:
-	spin_unlock_irq(&ctx->ctx_lock);
 }
 
 /* wait_on_sync_kiocb:
@@ -351,9 +452,9 @@
  */
 ssize_t wait_on_sync_kiocb(struct kiocb *iocb)
 {
-	while (iocb->ki_users) {
+	while (atomic_read(&iocb->ki_users)) {
 		set_current_state(TASK_UNINTERRUPTIBLE);
-		if (!iocb->ki_users)
+		if (!atomic_read(&iocb->ki_users))
 			break;
 		io_schedule();
 	}
@@ -362,28 +463,26 @@
 }
 EXPORT_SYMBOL(wait_on_sync_kiocb);
 
-/* exit_aio: called when the last user of mm goes away.  At this point, 
- * there is no way for any new requests to be submited or any of the 
- * io_* syscalls to be called on the context.  However, there may be 
- * outstanding requests which hold references to the context; as they 
- * go away, they will call put_ioctx and release any pinned memory
- * associated with the request (held via struct page * references).
+/*
+ * exit_aio: called when the last user of mm goes away.  At this point, there is
+ * no way for any new requests to be submited or any of the io_* syscalls to be
+ * called on the context.
+ *
+ * There may be outstanding kiocbs, but free_ioctx() will explicitly wait on
+ * them.
  */
 void exit_aio(struct mm_struct *mm)
 {
 	struct kioctx *ctx;
+	struct hlist_node *n;
 
-	while (!hlist_empty(&mm->ioctx_list)) {
-		ctx = hlist_entry(mm->ioctx_list.first, struct kioctx, list);
-		hlist_del_rcu(&ctx->list);
-
-		kill_ctx(ctx);
-
+	hlist_for_each_entry_safe(ctx, n, &mm->ioctx_list, list) {
 		if (1 != atomic_read(&ctx->users))
 			printk(KERN_DEBUG
 				"exit_aio:ioctx still alive: %d %d %d\n",
-				atomic_read(&ctx->users), ctx->dead,
-				ctx->reqs_active);
+				atomic_read(&ctx->users),
+				atomic_read(&ctx->dead),
+				atomic_read(&ctx->reqs_active));
 		/*
 		 * We don't need to bother with munmap() here -
 		 * exit_mmap(mm) is coming and it'll unmap everything.
@@ -391,150 +490,53 @@
 		 * as indicator that it needs to unmap the area,
 		 * just set it to 0; aio_free_ring() is the only
 		 * place that uses ->mmap_size, so it's safe.
-		 * That way we get all munmap done to current->mm -
-		 * all other callers have ctx->mm == current->mm.
 		 */
-		ctx->ring_info.mmap_size = 0;
-		put_ioctx(ctx);
+		ctx->mmap_size = 0;
+
+		if (!atomic_xchg(&ctx->dead, 1)) {
+			hlist_del_rcu(&ctx->list);
+			call_rcu(&ctx->rcu_head, kill_ioctx_rcu);
+		}
 	}
 }
 
 /* aio_get_req
- *	Allocate a slot for an aio request.  Increments the users count
+ *	Allocate a slot for an aio request.  Increments the ki_users count
  * of the kioctx so that the kioctx stays around until all requests are
  * complete.  Returns NULL if no requests are free.
  *
- * Returns with kiocb->users set to 2.  The io submit code path holds
+ * Returns with kiocb->ki_users set to 2.  The io submit code path holds
  * an extra reference while submitting the i/o.
  * This prevents races between the aio code path referencing the
  * req (after submitting it) and aio_complete() freeing the req.
  */
-static struct kiocb *__aio_get_req(struct kioctx *ctx)
-{
-	struct kiocb *req = NULL;
-
-	req = kmem_cache_alloc(kiocb_cachep, GFP_KERNEL);
-	if (unlikely(!req))
-		return NULL;
-
-	req->ki_flags = 0;
-	req->ki_users = 2;
-	req->ki_key = 0;
-	req->ki_ctx = ctx;
-	req->ki_cancel = NULL;
-	req->ki_retry = NULL;
-	req->ki_dtor = NULL;
-	req->private = NULL;
-	req->ki_iovec = NULL;
-	INIT_LIST_HEAD(&req->ki_run_list);
-	req->ki_eventfd = NULL;
-
-	return req;
-}
-
-/*
- * struct kiocb's are allocated in batches to reduce the number of
- * times the ctx lock is acquired and released.
- */
-#define KIOCB_BATCH_SIZE	32L
-struct kiocb_batch {
-	struct list_head head;
-	long count; /* number of requests left to allocate */
-};
-
-static void kiocb_batch_init(struct kiocb_batch *batch, long total)
-{
-	INIT_LIST_HEAD(&batch->head);
-	batch->count = total;
-}
-
-static void kiocb_batch_free(struct kioctx *ctx, struct kiocb_batch *batch)
-{
-	struct kiocb *req, *n;
-
-	if (list_empty(&batch->head))
-		return;
-
-	spin_lock_irq(&ctx->ctx_lock);
-	list_for_each_entry_safe(req, n, &batch->head, ki_batch) {
-		list_del(&req->ki_batch);
-		list_del(&req->ki_list);
-		kmem_cache_free(kiocb_cachep, req);
-		ctx->reqs_active--;
-	}
-	if (unlikely(!ctx->reqs_active && ctx->dead))
-		wake_up_all(&ctx->wait);
-	spin_unlock_irq(&ctx->ctx_lock);
-}
-
-/*
- * Allocate a batch of kiocbs.  This avoids taking and dropping the
- * context lock a lot during setup.
- */
-static int kiocb_batch_refill(struct kioctx *ctx, struct kiocb_batch *batch)
-{
-	unsigned short allocated, to_alloc;
-	long avail;
-	struct kiocb *req, *n;
-	struct aio_ring *ring;
-
-	to_alloc = min(batch->count, KIOCB_BATCH_SIZE);
-	for (allocated = 0; allocated < to_alloc; allocated++) {
-		req = __aio_get_req(ctx);
-		if (!req)
-			/* allocation failed, go with what we've got */
-			break;
-		list_add(&req->ki_batch, &batch->head);
-	}
-
-	if (allocated == 0)
-		goto out;
-
-	spin_lock_irq(&ctx->ctx_lock);
-	ring = kmap_atomic(ctx->ring_info.ring_pages[0]);
-
-	avail = aio_ring_avail(&ctx->ring_info, ring) - ctx->reqs_active;
-	BUG_ON(avail < 0);
-	if (avail < allocated) {
-		/* Trim back the number of requests. */
-		list_for_each_entry_safe(req, n, &batch->head, ki_batch) {
-			list_del(&req->ki_batch);
-			kmem_cache_free(kiocb_cachep, req);
-			if (--allocated <= avail)
-				break;
-		}
-	}
-
-	batch->count -= allocated;
-	list_for_each_entry(req, &batch->head, ki_batch) {
-		list_add(&req->ki_list, &ctx->active_reqs);
-		ctx->reqs_active++;
-	}
-
-	kunmap_atomic(ring);
-	spin_unlock_irq(&ctx->ctx_lock);
-
-out:
-	return allocated;
-}
-
-static inline struct kiocb *aio_get_req(struct kioctx *ctx,
-					struct kiocb_batch *batch)
+static inline struct kiocb *aio_get_req(struct kioctx *ctx)
 {
 	struct kiocb *req;
 
-	if (list_empty(&batch->head))
-		if (kiocb_batch_refill(ctx, batch) == 0)
-			return NULL;
-	req = list_first_entry(&batch->head, struct kiocb, ki_batch);
-	list_del(&req->ki_batch);
+	if (atomic_read(&ctx->reqs_active) >= ctx->nr_events)
+		return NULL;
+
+	if (atomic_inc_return(&ctx->reqs_active) > ctx->nr_events - 1)
+		goto out_put;
+
+	req = kmem_cache_alloc(kiocb_cachep, GFP_KERNEL|__GFP_ZERO);
+	if (unlikely(!req))
+		goto out_put;
+
+	atomic_set(&req->ki_users, 2);
+	req->ki_ctx = ctx;
+
 	return req;
+out_put:
+	atomic_dec(&ctx->reqs_active);
+	return NULL;
 }
 
-static inline void really_put_req(struct kioctx *ctx, struct kiocb *req)
+static void kiocb_free(struct kiocb *req)
 {
-	assert_spin_locked(&ctx->ctx_lock);
-
+	if (req->ki_filp)
+		fput(req->ki_filp);
 	if (req->ki_eventfd != NULL)
 		eventfd_ctx_put(req->ki_eventfd);
 	if (req->ki_dtor)
@@ -542,48 +544,12 @@
 	if (req->ki_iovec != &req->ki_inline_vec)
 		kfree(req->ki_iovec);
 	kmem_cache_free(kiocb_cachep, req);
-	ctx->reqs_active--;
-
-	if (unlikely(!ctx->reqs_active && ctx->dead))
-		wake_up_all(&ctx->wait);
 }
 
-/* __aio_put_req
- *	Returns true if this put was the last user of the request.
- */
-static int __aio_put_req(struct kioctx *ctx, struct kiocb *req)
+void aio_put_req(struct kiocb *req)
 {
-	dprintk(KERN_DEBUG "aio_put(%p): f_count=%ld\n",
-		req, atomic_long_read(&req->ki_filp->f_count));
-
-	assert_spin_locked(&ctx->ctx_lock);
-
-	req->ki_users--;
-	BUG_ON(req->ki_users < 0);
-	if (likely(req->ki_users))
-		return 0;
-	list_del(&req->ki_list);		/* remove from active_reqs */
-	req->ki_cancel = NULL;
-	req->ki_retry = NULL;
-
-	fput(req->ki_filp);
-	req->ki_filp = NULL;
-	really_put_req(ctx, req);
-	return 1;
-}
-
-/* aio_put_req
- *	Returns true if this put was the last user of the kiocb,
- *	false if the request is still in use.
- */
-int aio_put_req(struct kiocb *req)
-{
-	struct kioctx *ctx = req->ki_ctx;
-	int ret;
-	spin_lock_irq(&ctx->ctx_lock);
-	ret = __aio_put_req(ctx, req);
-	spin_unlock_irq(&ctx->ctx_lock);
-	return ret;
+	if (atomic_dec_and_test(&req->ki_users))
+		kiocb_free(req);
 }
 EXPORT_SYMBOL(aio_put_req);
 
@@ -595,13 +561,8 @@
 	rcu_read_lock();
 
 	hlist_for_each_entry_rcu(ctx, &mm->ioctx_list, list) {
-		/*
-		 * RCU protects us against accessing freed memory but
-		 * we have to be careful not to get a reference when the
-		 * reference count already dropped to 0 (ctx->dead test
-		 * is unreliable because of races).
-		 */
-		if (ctx->user_id == ctx_id && !ctx->dead && try_get_ioctx(ctx)){
+		if (ctx->user_id == ctx_id) {
+			atomic_inc(&ctx->users);
 			ret = ctx;
 			break;
 		}
@@ -611,295 +572,16 @@
 	return ret;
 }
 
-/*
- * Queue up a kiocb to be retried. Assumes that the kiocb
- * has already been marked as kicked, and places it on
- * the retry run list for the corresponding ioctx, if it
- * isn't already queued. Returns 1 if it actually queued
- * the kiocb (to tell the caller to activate the work
- * queue to process it), or 0, if it found that it was
- * already queued.
- */
-static inline int __queue_kicked_iocb(struct kiocb *iocb)
-{
-	struct kioctx *ctx = iocb->ki_ctx;
-
-	assert_spin_locked(&ctx->ctx_lock);
-
-	if (list_empty(&iocb->ki_run_list)) {
-		list_add_tail(&iocb->ki_run_list,
-			&ctx->run_list);
-		return 1;
-	}
-	return 0;
-}
-
-/* aio_run_iocb
- *	This is the core aio execution routine. It is
- *	invoked both for initial i/o submission and
- *	subsequent retries via the aio_kick_handler.
- *	Expects to be invoked with iocb->ki_ctx->lock
- *	already held. The lock is released and reacquired
- *	as needed during processing.
- *
- * Calls the iocb retry method (already setup for the
- * iocb on initial submission) for operation specific
- * handling, but takes care of most of common retry
- * execution details for a given iocb. The retry method
- * needs to be non-blocking as far as possible, to avoid
- * holding up other iocbs waiting to be serviced by the
- * retry kernel thread.
- *
- * The trickier parts in this code have to do with
- * ensuring that only one retry instance is in progress
- * for a given iocb at any time. Providing that guarantee
- * simplifies the coding of individual aio operations as
- * it avoids various potential races.
- */
-static ssize_t aio_run_iocb(struct kiocb *iocb)
-{
-	struct kioctx	*ctx = iocb->ki_ctx;
-	ssize_t (*retry)(struct kiocb *);
-	ssize_t ret;
-
-	if (!(retry = iocb->ki_retry)) {
-		printk("aio_run_iocb: iocb->ki_retry = NULL\n");
-		return 0;
-	}
-
-	/*
-	 * We don't want the next retry iteration for this
-	 * operation to start until this one has returned and
-	 * updated the iocb state. However, wait_queue functions
-	 * can trigger a kick_iocb from interrupt context in the
-	 * meantime, indicating that data is available for the next
-	 * iteration. We want to remember that and enable the
-	 * next retry iteration _after_ we are through with
-	 * this one.
-	 *
-	 * So, in order to be able to register a "kick", but
-	 * prevent it from being queued now, we clear the kick
-	 * flag, but make the kick code *think* that the iocb is
-	 * still on the run list until we are actually done.
-	 * When we are done with this iteration, we check if
-	 * the iocb was kicked in the meantime and if so, queue
-	 * it up afresh.
-	 */
-
-	kiocbClearKicked(iocb);
-
-	/*
-	 * This is so that aio_complete knows it doesn't need to
-	 * pull the iocb off the run list (We can't just call
-	 * INIT_LIST_HEAD because we don't want a kick_iocb to
-	 * queue this on the run list yet)
-	 */
-	iocb->ki_run_list.next = iocb->ki_run_list.prev = NULL;
-	spin_unlock_irq(&ctx->ctx_lock);
-
-	/* Quit retrying if the i/o has been cancelled */
-	if (kiocbIsCancelled(iocb)) {
-		ret = -EINTR;
-		aio_complete(iocb, ret, 0);
-		/* must not access the iocb after this */
-		goto out;
-	}
-
-	/*
-	 * Now we are all set to call the retry method in async
-	 * context.
-	 */
-	ret = retry(iocb);
-
-	if (ret != -EIOCBRETRY && ret != -EIOCBQUEUED) {
-		/*
-		 * There's no easy way to restart the syscall since other AIO's
-		 * may be already running. Just fail this IO with EINTR.
-		 */
-		if (unlikely(ret == -ERESTARTSYS || ret == -ERESTARTNOINTR ||
-			     ret == -ERESTARTNOHAND || ret == -ERESTART_RESTARTBLOCK))
-			ret = -EINTR;
-		aio_complete(iocb, ret, 0);
-	}
-out:
-	spin_lock_irq(&ctx->ctx_lock);
-
-	if (-EIOCBRETRY == ret) {
-		/*
-		 * OK, now that we are done with this iteration
-		 * and know that there is more left to go,
-		 * this is where we let go so that a subsequent
-		 * "kick" can start the next iteration
-		 */
-
-		/* will make __queue_kicked_iocb succeed from here on */
-		INIT_LIST_HEAD(&iocb->ki_run_list);
-		/* we must queue the next iteration ourselves, if it
-		 * has already been kicked */
-		if (kiocbIsKicked(iocb)) {
-			__queue_kicked_iocb(iocb);
-
-			/*
-			 * __queue_kicked_iocb will always return 1 here, because
-			 * iocb->ki_run_list is empty at this point so it should
-			 * be safe to unconditionally queue the context into the
-			 * work queue.
-			 */
-			aio_queue_work(ctx);
-		}
-	}
-	return ret;
-}
-
-/*
- * __aio_run_iocbs:
- * 	Process all pending retries queued on the ioctx
- * 	run list.
- * Assumes it is operating within the aio issuer's mm
- * context.
- */
-static int __aio_run_iocbs(struct kioctx *ctx)
-{
-	struct kiocb *iocb;
-	struct list_head run_list;
-
-	assert_spin_locked(&ctx->ctx_lock);
-
-	list_replace_init(&ctx->run_list, &run_list);
-	while (!list_empty(&run_list)) {
-		iocb = list_entry(run_list.next, struct kiocb,
-			ki_run_list);
-		list_del(&iocb->ki_run_list);
-		/*
-		 * Hold an extra reference while retrying i/o.
-		 */
-		iocb->ki_users++;       /* grab extra reference */
-		aio_run_iocb(iocb);
-		__aio_put_req(ctx, iocb);
- 	}
-	if (!list_empty(&ctx->run_list))
-		return 1;
-	return 0;
-}
-
-static void aio_queue_work(struct kioctx * ctx)
-{
-	unsigned long timeout;
-	/*
-	 * if someone is waiting, get the work started right
-	 * away, otherwise, use a longer delay
-	 */
-	smp_mb();
-	if (waitqueue_active(&ctx->wait))
-		timeout = 1;
-	else
-		timeout = HZ/10;
-	queue_delayed_work(aio_wq, &ctx->wq, timeout);
-}
-
-/*
- * aio_run_all_iocbs:
- *	Process all pending retries queued on the ioctx
- *	run list, and keep running them until the list
- *	stays empty.
- * Assumes it is operating within the aio issuer's mm context.
- */
-static inline void aio_run_all_iocbs(struct kioctx *ctx)
-{
-	spin_lock_irq(&ctx->ctx_lock);
-	while (__aio_run_iocbs(ctx))
-		;
-	spin_unlock_irq(&ctx->ctx_lock);
-}
-
-/*
- * aio_kick_handler:
- * 	Work queue handler triggered to process pending
- * 	retries on an ioctx. Takes on the aio issuer's
- *	mm context before running the iocbs, so that
- *	copy_xxx_user operates on the issuer's address
- *      space.
- * Run on aiod's context.
- */
-static void aio_kick_handler(struct work_struct *work)
-{
-	struct kioctx *ctx = container_of(work, struct kioctx, wq.work);
-	mm_segment_t oldfs = get_fs();
-	struct mm_struct *mm;
-	int requeue;
-
-	set_fs(USER_DS);
-	use_mm(ctx->mm);
-	spin_lock_irq(&ctx->ctx_lock);
-	requeue =__aio_run_iocbs(ctx);
-	mm = ctx->mm;
-	spin_unlock_irq(&ctx->ctx_lock);
- 	unuse_mm(mm);
-	set_fs(oldfs);
-	/*
-	 * we're in a worker thread already; no point using non-zero delay
-	 */
-	if (requeue)
-		queue_delayed_work(aio_wq, &ctx->wq, 0);
-}
-
-
-/*
- * Called by kick_iocb to queue the kiocb for retry
- * and if required activate the aio work queue to process
- * it
- */
-static void try_queue_kicked_iocb(struct kiocb *iocb)
-{
- 	struct kioctx	*ctx = iocb->ki_ctx;
-	unsigned long flags;
-	int run = 0;
-
-	spin_lock_irqsave(&ctx->ctx_lock, flags);
-	/* set this inside the lock so that we can't race with aio_run_iocb()
-	 * testing it and putting the iocb on the run list under the lock */
-	if (!kiocbTryKick(iocb))
-		run = __queue_kicked_iocb(iocb);
-	spin_unlock_irqrestore(&ctx->ctx_lock, flags);
-	if (run)
-		aio_queue_work(ctx);
-}
-
-/*
- * kick_iocb:
- *      Called typically from a wait queue callback context
- *      to trigger a retry of the iocb.
- *      The retry is usually executed by aio workqueue
- *      threads (See aio_kick_handler).
- */
-void kick_iocb(struct kiocb *iocb)
-{
-	/* sync iocbs are easy: they can only ever be executing from a 
-	 * single context. */
-	if (is_sync_kiocb(iocb)) {
-		kiocbSetKicked(iocb);
-	        wake_up_process(iocb->ki_obj.tsk);
-		return;
-	}
-
-	try_queue_kicked_iocb(iocb);
-}
-EXPORT_SYMBOL(kick_iocb);
-
 /* aio_complete
  *	Called when the io request on the given iocb is complete.
- *	Returns true if this is the last user of the request.  The 
- *	only other user of the request can be the cancellation code.
  */
-int aio_complete(struct kiocb *iocb, long res, long res2)
+void aio_complete(struct kiocb *iocb, long res, long res2)
 {
 	struct kioctx	*ctx = iocb->ki_ctx;
-	struct aio_ring_info	*info;
 	struct aio_ring	*ring;
-	struct io_event	*event;
+	struct io_event	*ev_page, *event;
 	unsigned long	flags;
-	unsigned long	tail;
-	int		ret;
+	unsigned tail, pos;
 
 	/*
 	 * Special case handling for sync iocbs:
@@ -909,61 +591,81 @@
 	 *  - the sync task helpfully left a reference to itself in the iocb
 	 */
 	if (is_sync_kiocb(iocb)) {
-		BUG_ON(iocb->ki_users != 1);
+		BUG_ON(atomic_read(&iocb->ki_users) != 1);
 		iocb->ki_user_data = res;
-		iocb->ki_users = 0;
+		atomic_set(&iocb->ki_users, 0);
 		wake_up_process(iocb->ki_obj.tsk);
-		return 1;
+		return;
 	}
 
-	info = &ctx->ring_info;
-
-	/* add a completion event to the ring buffer.
-	 * must be done holding ctx->ctx_lock to prevent
-	 * other code from messing with the tail
-	 * pointer since we might be called from irq
-	 * context.
+	/*
+	 * Take rcu_read_lock() in case the kioctx is being destroyed, as we
+	 * need to issue a wakeup after decrementing reqs_active.
 	 */
-	spin_lock_irqsave(&ctx->ctx_lock, flags);
+	rcu_read_lock();
 
-	if (iocb->ki_run_list.prev && !list_empty(&iocb->ki_run_list))
-		list_del_init(&iocb->ki_run_list);
+	if (iocb->ki_list.next) {
+		unsigned long flags;
+
+		spin_lock_irqsave(&ctx->ctx_lock, flags);
+		list_del(&iocb->ki_list);
+		spin_unlock_irqrestore(&ctx->ctx_lock, flags);
+	}
 
 	/*
 	 * cancelled requests don't get events, userland was given one
 	 * when the event got cancelled.
 	 */
-	if (kiocbIsCancelled(iocb))
+	if (unlikely(xchg(&iocb->ki_cancel,
+			  KIOCB_CANCELLED) == KIOCB_CANCELLED)) {
+		atomic_dec(&ctx->reqs_active);
+		/* Still need the wake_up in case free_ioctx is waiting */
 		goto put_rq;
+	}
 
-	ring = kmap_atomic(info->ring_pages[0]);
+	/*
+	 * Add a completion event to the ring buffer. Must be done holding
+	 * ctx->ctx_lock to prevent other code from messing with the tail
+	 * pointer since we might be called from irq context.
+	 */
+	spin_lock_irqsave(&ctx->completion_lock, flags);
 
-	tail = info->tail;
-	event = aio_ring_event(info, tail);
-	if (++tail >= info->nr)
+	tail = ctx->tail;
+	pos = tail + AIO_EVENTS_OFFSET;
+
+	if (++tail >= ctx->nr_events)
 		tail = 0;
 
+	ev_page = kmap_atomic(ctx->ring_pages[pos / AIO_EVENTS_PER_PAGE]);
+	event = ev_page + pos % AIO_EVENTS_PER_PAGE;
+
 	event->obj = (u64)(unsigned long)iocb->ki_obj.user;
 	event->data = iocb->ki_user_data;
 	event->res = res;
 	event->res2 = res2;
 
-	dprintk("aio_complete: %p[%lu]: %p: %p %Lx %lx %lx\n",
-		ctx, tail, iocb, iocb->ki_obj.user, iocb->ki_user_data,
-		res, res2);
+	kunmap_atomic(ev_page);
+	flush_dcache_page(ctx->ring_pages[pos / AIO_EVENTS_PER_PAGE]);
+
+	pr_debug("%p[%u]: %p: %p %Lx %lx %lx\n",
+		 ctx, tail, iocb, iocb->ki_obj.user, iocb->ki_user_data,
+		 res, res2);
 
 	/* after flagging the request as done, we
 	 * must never even look at it again
 	 */
 	smp_wmb();	/* make event visible before updating tail */
 
-	info->tail = tail;
+	ctx->tail = tail;
+
+	ring = kmap_atomic(ctx->ring_pages[0]);
 	ring->tail = tail;
-
-	put_aio_ring_event(event);
 	kunmap_atomic(ring);
+	flush_dcache_page(ctx->ring_pages[0]);
 
-	pr_debug("added to ring %p at [%lu]\n", iocb, tail);
+	spin_unlock_irqrestore(&ctx->completion_lock, flags);
+
+	pr_debug("added to ring %p at [%u]\n", iocb, tail);
 
 	/*
 	 * Check if the user asked us to deliver the result through an
@@ -975,7 +677,7 @@
 
 put_rq:
 	/* everything turned out well, dispose of the aiocb. */
-	ret = __aio_put_req(ctx, iocb);
+	aio_put_req(iocb);
 
 	/*
 	 * We have to order our ring_info tail store above and test
@@ -988,233 +690,133 @@
 	if (waitqueue_active(&ctx->wait))
 		wake_up(&ctx->wait);
 
-	spin_unlock_irqrestore(&ctx->ctx_lock, flags);
-	return ret;
+	rcu_read_unlock();
 }
 EXPORT_SYMBOL(aio_complete);
 
-/* aio_read_evt
- *	Pull an event off of the ioctx's event ring.  Returns the number of 
- *	events fetched (0 or 1 ;-)
- *	FIXME: make this use cmpxchg.
- *	TODO: make the ringbuffer user mmap()able (requires FIXME).
+/* aio_read_events
+ *	Pull an event off of the ioctx's event ring.  Returns the number of
+ *	events fetched
  */
-static int aio_read_evt(struct kioctx *ioctx, struct io_event *ent)
+static long aio_read_events_ring(struct kioctx *ctx,
+				 struct io_event __user *event, long nr)
 {
-	struct aio_ring_info *info = &ioctx->ring_info;
 	struct aio_ring *ring;
-	unsigned long head;
-	int ret = 0;
+	unsigned head, pos;
+	long ret = 0;
+	int copy_ret;
 
-	ring = kmap_atomic(info->ring_pages[0]);
-	dprintk("in aio_read_evt h%lu t%lu m%lu\n",
-		 (unsigned long)ring->head, (unsigned long)ring->tail,
-		 (unsigned long)ring->nr);
+	mutex_lock(&ctx->ring_lock);
 
-	if (ring->head == ring->tail)
+	ring = kmap_atomic(ctx->ring_pages[0]);
+	head = ring->head;
+	kunmap_atomic(ring);
+
+	pr_debug("h%u t%u m%u\n", head, ctx->tail, ctx->nr_events);
+
+	if (head == ctx->tail)
 		goto out;
 
-	spin_lock(&info->ring_lock);
+	while (ret < nr) {
+		long avail;
+		struct io_event *ev;
+		struct page *page;
 
-	head = ring->head % info->nr;
-	if (head != ring->tail) {
-		struct io_event *evp = aio_ring_event(info, head);
-		*ent = *evp;
-		head = (head + 1) % info->nr;
-		smp_mb(); /* finish reading the event before updatng the head */
-		ring->head = head;
-		ret = 1;
-		put_aio_ring_event(evp);
+		avail = (head <= ctx->tail ? ctx->tail : ctx->nr_events) - head;
+		if (head == ctx->tail)
+			break;
+
+		avail = min(avail, nr - ret);
+		avail = min_t(long, avail, AIO_EVENTS_PER_PAGE -
+			    ((head + AIO_EVENTS_OFFSET) % AIO_EVENTS_PER_PAGE));
+
+		pos = head + AIO_EVENTS_OFFSET;
+		page = ctx->ring_pages[pos / AIO_EVENTS_PER_PAGE];
+		pos %= AIO_EVENTS_PER_PAGE;
+
+		ev = kmap(page);
+		copy_ret = copy_to_user(event + ret, ev + pos,
+					sizeof(*ev) * avail);
+		kunmap(page);
+
+		if (unlikely(copy_ret)) {
+			ret = -EFAULT;
+			goto out;
+		}
+
+		ret += avail;
+		head += avail;
+		head %= ctx->nr_events;
 	}
-	spin_unlock(&info->ring_lock);
 
-out:
-	dprintk("leaving aio_read_evt: %d  h%lu t%lu\n", ret,
-		 (unsigned long)ring->head, (unsigned long)ring->tail);
+	ring = kmap_atomic(ctx->ring_pages[0]);
+	ring->head = head;
 	kunmap_atomic(ring);
+	flush_dcache_page(ctx->ring_pages[0]);
+
+	pr_debug("%li  h%u t%u\n", ret, head, ctx->tail);
+
+	atomic_sub(ret, &ctx->reqs_active);
+out:
+	mutex_unlock(&ctx->ring_lock);
+
 	return ret;
 }
 
-struct aio_timeout {
-	struct timer_list	timer;
-	int			timed_out;
-	struct task_struct	*p;
-};
-
-static void timeout_func(unsigned long data)
+static bool aio_read_events(struct kioctx *ctx, long min_nr, long nr,
+			    struct io_event __user *event, long *i)
 {
-	struct aio_timeout *to = (struct aio_timeout *)data;
+	long ret = aio_read_events_ring(ctx, event + *i, nr - *i);
 
-	to->timed_out = 1;
-	wake_up_process(to->p);
+	if (ret > 0)
+		*i += ret;
+
+	if (unlikely(atomic_read(&ctx->dead)))
+		ret = -EINVAL;
+
+	if (!*i)
+		*i = ret;
+
+	return ret < 0 || *i >= min_nr;
 }
 
-static inline void init_timeout(struct aio_timeout *to)
-{
-	setup_timer_on_stack(&to->timer, timeout_func, (unsigned long) to);
-	to->timed_out = 0;
-	to->p = current;
-}
-
-static inline void set_timeout(long start_jiffies, struct aio_timeout *to,
-			       const struct timespec *ts)
-{
-	to->timer.expires = start_jiffies + timespec_to_jiffies(ts);
-	if (time_after(to->timer.expires, jiffies))
-		add_timer(&to->timer);
-	else
-		to->timed_out = 1;
-}
-
-static inline void clear_timeout(struct aio_timeout *to)
-{
-	del_singleshot_timer_sync(&to->timer);
-}
-
-static int read_events(struct kioctx *ctx,
-			long min_nr, long nr,
+static long read_events(struct kioctx *ctx, long min_nr, long nr,
 			struct io_event __user *event,
 			struct timespec __user *timeout)
 {
-	long			start_jiffies = jiffies;
-	struct task_struct	*tsk = current;
-	DECLARE_WAITQUEUE(wait, tsk);
-	int			ret;
-	int			i = 0;
-	struct io_event		ent;
-	struct aio_timeout	to;
-	int			retry = 0;
+	ktime_t until = { .tv64 = KTIME_MAX };
+	long ret = 0;
 
-	/* needed to zero any padding within an entry (there shouldn't be 
-	 * any, but C is fun!
-	 */
-	memset(&ent, 0, sizeof(ent));
-retry:
-	ret = 0;
-	while (likely(i < nr)) {
-		ret = aio_read_evt(ctx, &ent);
-		if (unlikely(ret <= 0))
-			break;
-
-		dprintk("read event: %Lx %Lx %Lx %Lx\n",
-			ent.data, ent.obj, ent.res, ent.res2);
-
-		/* Could we split the check in two? */
-		ret = -EFAULT;
-		if (unlikely(copy_to_user(event, &ent, sizeof(ent)))) {
-			dprintk("aio: lost an event due to EFAULT.\n");
-			break;
-		}
-		ret = 0;
-
-		/* Good, event copied to userland, update counts. */
-		event ++;
-		i ++;
-	}
-
-	if (min_nr <= i)
-		return i;
-	if (ret)
-		return ret;
-
-	/* End fast path */
-
-	/* racey check, but it gets redone */
-	if (!retry && unlikely(!list_empty(&ctx->run_list))) {
-		retry = 1;
-		aio_run_all_iocbs(ctx);
-		goto retry;
-	}
-
-	init_timeout(&to);
 	if (timeout) {
 		struct timespec	ts;
-		ret = -EFAULT;
+
 		if (unlikely(copy_from_user(&ts, timeout, sizeof(ts))))
-			goto out;
+			return -EFAULT;
 
-		set_timeout(start_jiffies, &to, &ts);
+		until = timespec_to_ktime(ts);
 	}
 
-	while (likely(i < nr)) {
-		add_wait_queue_exclusive(&ctx->wait, &wait);
-		do {
-			set_task_state(tsk, TASK_INTERRUPTIBLE);
-			ret = aio_read_evt(ctx, &ent);
-			if (ret)
-				break;
-			if (min_nr <= i)
-				break;
-			if (unlikely(ctx->dead)) {
-				ret = -EINVAL;
-				break;
-			}
-			if (to.timed_out)	/* Only check after read evt */
-				break;
-			/* Try to only show up in io wait if there are ops
-			 *  in flight */
-			if (ctx->reqs_active)
-				io_schedule();
-			else
-				schedule();
-			if (signal_pending(tsk)) {
-				ret = -EINTR;
-				break;
-			}
-			/*ret = aio_read_evt(ctx, &ent);*/
-		} while (1) ;
-
-		set_task_state(tsk, TASK_RUNNING);
-		remove_wait_queue(&ctx->wait, &wait);
-
-		if (unlikely(ret <= 0))
-			break;
-
-		ret = -EFAULT;
-		if (unlikely(copy_to_user(event, &ent, sizeof(ent)))) {
-			dprintk("aio: lost an event due to EFAULT.\n");
-			break;
-		}
-
-		/* Good, event copied to userland, update counts. */
-		event ++;
-		i ++;
-	}
-
-	if (timeout)
-		clear_timeout(&to);
-out:
-	destroy_timer_on_stack(&to.timer);
-	return i ? i : ret;
-}
-
-/* Take an ioctx and remove it from the list of ioctx's.  Protects 
- * against races with itself via ->dead.
- */
-static void io_destroy(struct kioctx *ioctx)
-{
-	struct mm_struct *mm = current->mm;
-	int was_dead;
-
-	/* delete the entry from the list is someone else hasn't already */
-	spin_lock(&mm->ioctx_lock);
-	was_dead = ioctx->dead;
-	ioctx->dead = 1;
-	hlist_del_rcu(&ioctx->list);
-	spin_unlock(&mm->ioctx_lock);
-
-	dprintk("aio_release(%p)\n", ioctx);
-	if (likely(!was_dead))
-		put_ioctx(ioctx);	/* twice for the list */
-
-	kill_ctx(ioctx);
-
 	/*
-	 * Wake up any waiters.  The setting of ctx->dead must be seen
-	 * by other CPUs at this point.  Right now, we rely on the
-	 * locking done by the above calls to ensure this consistency.
+	 * Note that aio_read_events() is being called as the conditional - i.e.
+	 * we're calling it after prepare_to_wait() has set task state to
+	 * TASK_INTERRUPTIBLE.
+	 *
+	 * But aio_read_events() can block, and if it blocks it's going to flip
+	 * the task state back to TASK_RUNNING.
+	 *
+	 * This should be ok, provided it doesn't flip the state back to
+	 * TASK_RUNNING and return 0 too much - that causes us to spin. That
+	 * will only happen if the mutex_lock() call blocks, and we then find
+	 * the ringbuffer empty. So in practice we should be ok, but it's
+	 * something to be aware of when touching this code.
 	 */
-	wake_up_all(&ioctx->wait);
+	wait_event_interruptible_hrtimeout(ctx->wait,
+			aio_read_events(ctx, min_nr, nr, event, &ret), until);
+
+	if (!ret && signal_pending(current))
+		ret = -EINTR;
+
+	return ret;
 }
 
 /* sys_io_setup:
@@ -1252,7 +854,7 @@
 	if (!IS_ERR(ioctx)) {
 		ret = put_user(ioctx->user_id, ctxp);
 		if (ret)
-			io_destroy(ioctx);
+			kill_ioctx(ioctx);
 		put_ioctx(ioctx);
 	}
 
@@ -1270,7 +872,7 @@
 {
 	struct kioctx *ioctx = lookup_ioctx(ctx);
 	if (likely(NULL != ioctx)) {
-		io_destroy(ioctx);
+		kill_ioctx(ioctx);
 		put_ioctx(ioctx);
 		return 0;
 	}
@@ -1301,30 +903,21 @@
 	BUG_ON(ret > 0 && iocb->ki_left == 0);
 }
 
-static ssize_t aio_rw_vect_retry(struct kiocb *iocb)
+typedef ssize_t (aio_rw_op)(struct kiocb *, const struct iovec *,
+			    unsigned long, loff_t);
+
+static ssize_t aio_rw_vect_retry(struct kiocb *iocb, int rw, aio_rw_op *rw_op)
 {
 	struct file *file = iocb->ki_filp;
 	struct address_space *mapping = file->f_mapping;
 	struct inode *inode = mapping->host;
-	ssize_t (*rw_op)(struct kiocb *, const struct iovec *,
-			 unsigned long, loff_t);
 	ssize_t ret = 0;
-	unsigned short opcode;
-
-	if ((iocb->ki_opcode == IOCB_CMD_PREADV) ||
-		(iocb->ki_opcode == IOCB_CMD_PREAD)) {
-		rw_op = file->f_op->aio_read;
-		opcode = IOCB_CMD_PREADV;
-	} else {
-		rw_op = file->f_op->aio_write;
-		opcode = IOCB_CMD_PWRITEV;
-	}
 
 	/* This matches the pread()/pwrite() logic */
 	if (iocb->ki_pos < 0)
 		return -EINVAL;
 
-	if (opcode == IOCB_CMD_PWRITEV)
+	if (rw == WRITE)
 		file_start_write(file);
 	do {
 		ret = rw_op(iocb, &iocb->ki_iovec[iocb->ki_cur_seg],
@@ -1336,9 +929,9 @@
 	/* retry all partial writes.  retry partial reads as long as its a
 	 * regular file. */
 	} while (ret > 0 && iocb->ki_left > 0 &&
-		 (opcode == IOCB_CMD_PWRITEV ||
+		 (rw == WRITE ||
 		  (!S_ISFIFO(inode->i_mode) && !S_ISSOCK(inode->i_mode))));
-	if (opcode == IOCB_CMD_PWRITEV)
+	if (rw == WRITE)
 		file_end_write(file);
 
 	/* This means we must have transferred all that we could */
@@ -1348,81 +941,49 @@
 
 	/* If we managed to write some out we return that, rather than
 	 * the eventual error. */
-	if (opcode == IOCB_CMD_PWRITEV
-	    && ret < 0 && ret != -EIOCBQUEUED && ret != -EIOCBRETRY
+	if (rw == WRITE
+	    && ret < 0 && ret != -EIOCBQUEUED
 	    && iocb->ki_nbytes - iocb->ki_left)
 		ret = iocb->ki_nbytes - iocb->ki_left;
 
 	return ret;
 }
 
-static ssize_t aio_fdsync(struct kiocb *iocb)
-{
-	struct file *file = iocb->ki_filp;
-	ssize_t ret = -EINVAL;
-
-	if (file->f_op->aio_fsync)
-		ret = file->f_op->aio_fsync(iocb, 1);
-	return ret;
-}
-
-static ssize_t aio_fsync(struct kiocb *iocb)
-{
-	struct file *file = iocb->ki_filp;
-	ssize_t ret = -EINVAL;
-
-	if (file->f_op->aio_fsync)
-		ret = file->f_op->aio_fsync(iocb, 0);
-	return ret;
-}
-
-static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb, bool compat)
+static ssize_t aio_setup_vectored_rw(int rw, struct kiocb *kiocb, bool compat)
 {
 	ssize_t ret;
 
+	kiocb->ki_nr_segs = kiocb->ki_nbytes;
+
 #ifdef CONFIG_COMPAT
 	if (compat)
-		ret = compat_rw_copy_check_uvector(type,
+		ret = compat_rw_copy_check_uvector(rw,
 				(struct compat_iovec __user *)kiocb->ki_buf,
-				kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
+				kiocb->ki_nr_segs, 1, &kiocb->ki_inline_vec,
 				&kiocb->ki_iovec);
 	else
 #endif
-		ret = rw_copy_check_uvector(type,
+		ret = rw_copy_check_uvector(rw,
 				(struct iovec __user *)kiocb->ki_buf,
-				kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
+				kiocb->ki_nr_segs, 1, &kiocb->ki_inline_vec,
 				&kiocb->ki_iovec);
 	if (ret < 0)
-		goto out;
+		return ret;
 
-	ret = rw_verify_area(type, kiocb->ki_filp, &kiocb->ki_pos, ret);
-	if (ret < 0)
-		goto out;
-
-	kiocb->ki_nr_segs = kiocb->ki_nbytes;
-	kiocb->ki_cur_seg = 0;
-	/* ki_nbytes/left now reflect bytes instead of segs */
+	/* ki_nbytes now reflect bytes instead of segs */
 	kiocb->ki_nbytes = ret;
-	kiocb->ki_left = ret;
-
-	ret = 0;
-out:
-	return ret;
+	return 0;
 }
 
-static ssize_t aio_setup_single_vector(int type, struct file * file, struct kiocb *kiocb)
+static ssize_t aio_setup_single_vector(int rw, struct kiocb *kiocb)
 {
-	int bytes;
-
-	bytes = rw_verify_area(type, file, &kiocb->ki_pos, kiocb->ki_left);
-	if (bytes < 0)
-		return bytes;
+	if (unlikely(!access_ok(!rw, kiocb->ki_buf, kiocb->ki_nbytes)))
+		return -EFAULT;
 
 	kiocb->ki_iovec = &kiocb->ki_inline_vec;
 	kiocb->ki_iovec->iov_base = kiocb->ki_buf;
-	kiocb->ki_iovec->iov_len = bytes;
+	kiocb->ki_iovec->iov_len = kiocb->ki_nbytes;
 	kiocb->ki_nr_segs = 1;
-	kiocb->ki_cur_seg = 0;
 	return 0;
 }
 
@@ -1431,96 +992,95 @@
  *	Performs the initial checks and aio retry method
  *	setup for the kiocb at the time of io submission.
  */
-static ssize_t aio_setup_iocb(struct kiocb *kiocb, bool compat)
+static ssize_t aio_run_iocb(struct kiocb *req, bool compat)
 {
-	struct file *file = kiocb->ki_filp;
-	ssize_t ret = 0;
+	struct file *file = req->ki_filp;
+	ssize_t ret;
+	int rw;
+	fmode_t mode;
+	aio_rw_op *rw_op;
 
-	switch (kiocb->ki_opcode) {
+	switch (req->ki_opcode) {
 	case IOCB_CMD_PREAD:
-		ret = -EBADF;
-		if (unlikely(!(file->f_mode & FMODE_READ)))
-			break;
-		ret = -EFAULT;
-		if (unlikely(!access_ok(VERIFY_WRITE, kiocb->ki_buf,
-			kiocb->ki_left)))
-			break;
-		ret = aio_setup_single_vector(READ, file, kiocb);
-		if (ret)
-			break;
-		ret = -EINVAL;
-		if (file->f_op->aio_read)
-			kiocb->ki_retry = aio_rw_vect_retry;
-		break;
-	case IOCB_CMD_PWRITE:
-		ret = -EBADF;
-		if (unlikely(!(file->f_mode & FMODE_WRITE)))
-			break;
-		ret = -EFAULT;
-		if (unlikely(!access_ok(VERIFY_READ, kiocb->ki_buf,
-			kiocb->ki_left)))
-			break;
-		ret = aio_setup_single_vector(WRITE, file, kiocb);
-		if (ret)
-			break;
-		ret = -EINVAL;
-		if (file->f_op->aio_write)
-			kiocb->ki_retry = aio_rw_vect_retry;
-		break;
 	case IOCB_CMD_PREADV:
-		ret = -EBADF;
-		if (unlikely(!(file->f_mode & FMODE_READ)))
-			break;
-		ret = aio_setup_vectored_rw(READ, kiocb, compat);
-		if (ret)
-			break;
-		ret = -EINVAL;
-		if (file->f_op->aio_read)
-			kiocb->ki_retry = aio_rw_vect_retry;
-		break;
+		mode	= FMODE_READ;
+		rw	= READ;
+		rw_op	= file->f_op->aio_read;
+		goto rw_common;
+
+	case IOCB_CMD_PWRITE:
 	case IOCB_CMD_PWRITEV:
-		ret = -EBADF;
-		if (unlikely(!(file->f_mode & FMODE_WRITE)))
-			break;
-		ret = aio_setup_vectored_rw(WRITE, kiocb, compat);
+		mode	= FMODE_WRITE;
+		rw	= WRITE;
+		rw_op	= file->f_op->aio_write;
+		goto rw_common;
+rw_common:
+		if (unlikely(!(file->f_mode & mode)))
+			return -EBADF;
+
+		if (!rw_op)
+			return -EINVAL;
+
+		ret = (req->ki_opcode == IOCB_CMD_PREADV ||
+		       req->ki_opcode == IOCB_CMD_PWRITEV)
+			? aio_setup_vectored_rw(rw, req, compat)
+			: aio_setup_single_vector(rw, req);
 		if (ret)
-			break;
-		ret = -EINVAL;
-		if (file->f_op->aio_write)
-			kiocb->ki_retry = aio_rw_vect_retry;
+			return ret;
+
+		ret = rw_verify_area(rw, file, &req->ki_pos, req->ki_nbytes);
+		if (ret < 0)
+			return ret;
+
+		req->ki_nbytes = ret;
+		req->ki_left = ret;
+
+		ret = aio_rw_vect_retry(req, rw, rw_op);
 		break;
+
 	case IOCB_CMD_FDSYNC:
-		ret = -EINVAL;
-		if (file->f_op->aio_fsync)
-			kiocb->ki_retry = aio_fdsync;
+		if (!file->f_op->aio_fsync)
+			return -EINVAL;
+
+		ret = file->f_op->aio_fsync(req, 1);
 		break;
+
 	case IOCB_CMD_FSYNC:
-		ret = -EINVAL;
-		if (file->f_op->aio_fsync)
-			kiocb->ki_retry = aio_fsync;
+		if (!file->f_op->aio_fsync)
+			return -EINVAL;
+
+		ret = file->f_op->aio_fsync(req, 0);
 		break;
+
 	default:
-		dprintk("EINVAL: io_submit: no operation provided\n");
-		ret = -EINVAL;
+		pr_debug("EINVAL: no operation provided\n");
+		return -EINVAL;
 	}
 
-	if (!kiocb->ki_retry)
-		return ret;
+	if (ret != -EIOCBQUEUED) {
+		/*
+		 * There's no easy way to restart the syscall since other AIO's
+		 * may be already running. Just fail this IO with EINTR.
+		 */
+		if (unlikely(ret == -ERESTARTSYS || ret == -ERESTARTNOINTR ||
+			     ret == -ERESTARTNOHAND ||
+			     ret == -ERESTART_RESTARTBLOCK))
+			ret = -EINTR;
+		aio_complete(req, ret, 0);
+	}
 
 	return 0;
 }
 
 static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
-			 struct iocb *iocb, struct kiocb_batch *batch,
-			 bool compat)
+			 struct iocb *iocb, bool compat)
 {
 	struct kiocb *req;
-	struct file *file;
 	ssize_t ret;
 
 	/* enforce forwards compatibility on users */
 	if (unlikely(iocb->aio_reserved1 || iocb->aio_reserved2)) {
-		pr_debug("EINVAL: io_submit: reserve field set\n");
+		pr_debug("EINVAL: reserve field set\n");
 		return -EINVAL;
 	}
 
@@ -1534,16 +1094,16 @@
 		return -EINVAL;
 	}
 
-	file = fget(iocb->aio_fildes);
-	if (unlikely(!file))
-		return -EBADF;
-
-	req = aio_get_req(ctx, batch);  /* returns with 2 references to req */
-	if (unlikely(!req)) {
-		fput(file);
+	req = aio_get_req(ctx);
+	if (unlikely(!req))
 		return -EAGAIN;
+
+	req->ki_filp = fget(iocb->aio_fildes);
+	if (unlikely(!req->ki_filp)) {
+		ret = -EBADF;
+		goto out_put_req;
 	}
-	req->ki_filp = file;
+
 	if (iocb->aio_flags & IOCB_FLAG_RESFD) {
 		/*
 		 * If the IOCB_FLAG_RESFD flag of aio_flags is set, get an
@@ -1559,9 +1119,9 @@
 		}
 	}
 
-	ret = put_user(req->ki_key, &user_iocb->aio_key);
+	ret = put_user(KIOCB_KEY, &user_iocb->aio_key);
 	if (unlikely(ret)) {
-		dprintk("EFAULT: aio_key\n");
+		pr_debug("EFAULT: aio_key\n");
 		goto out_put_req;
 	}
 
@@ -1573,41 +1133,14 @@
 	req->ki_left = req->ki_nbytes = iocb->aio_nbytes;
 	req->ki_opcode = iocb->aio_lio_opcode;
 
-	ret = aio_setup_iocb(req, compat);
-
+	ret = aio_run_iocb(req, compat);
 	if (ret)
 		goto out_put_req;
 
-	spin_lock_irq(&ctx->ctx_lock);
-	/*
-	 * We could have raced with io_destroy() and are currently holding a
-	 * reference to ctx which should be destroyed. We cannot submit IO
-	 * since ctx gets freed as soon as io_submit() puts its reference.  The
-	 * check here is reliable: io_destroy() sets ctx->dead before waiting
-	 * for outstanding IO and the barrier between these two is realized by
-	 * unlock of mm->ioctx_lock and lock of ctx->ctx_lock.  Analogously we
-	 * increment ctx->reqs_active before checking for ctx->dead and the
-	 * barrier is realized by unlock and lock of ctx->ctx_lock. Thus if we
-	 * don't see ctx->dead set here, io_destroy() waits for our IO to
-	 * finish.
-	 */
-	if (ctx->dead) {
-		spin_unlock_irq(&ctx->ctx_lock);
-		ret = -EINVAL;
-		goto out_put_req;
-	}
-	aio_run_iocb(req);
-	if (!list_empty(&ctx->run_list)) {
-		/* drain the run list */
-		while (__aio_run_iocbs(ctx))
-			;
-	}
-	spin_unlock_irq(&ctx->ctx_lock);
-
 	aio_put_req(req);	/* drop extra ref to req */
 	return 0;
-
 out_put_req:
+	atomic_dec(&ctx->reqs_active);
 	aio_put_req(req);	/* drop extra ref to req */
 	aio_put_req(req);	/* drop i/o ref to req */
 	return ret;
@@ -1620,7 +1153,6 @@
 	long ret = 0;
 	int i = 0;
 	struct blk_plug plug;
-	struct kiocb_batch batch;
 
 	if (unlikely(nr < 0))
 		return -EINVAL;
@@ -1633,12 +1165,10 @@
 
 	ctx = lookup_ioctx(ctx_id);
 	if (unlikely(!ctx)) {
-		pr_debug("EINVAL: io_submit: invalid context id\n");
+		pr_debug("EINVAL: invalid context id\n");
 		return -EINVAL;
 	}
 
-	kiocb_batch_init(&batch, nr);
-
 	blk_start_plug(&plug);
 
 	/*
@@ -1659,13 +1189,12 @@
 			break;
 		}
 
-		ret = io_submit_one(ctx, user_iocb, &tmp, &batch, compat);
+		ret = io_submit_one(ctx, user_iocb, &tmp, compat);
 		if (ret)
 			break;
 	}
 	blk_finish_plug(&plug);
 
-	kiocb_batch_free(ctx, &batch);
 	put_ioctx(ctx);
 	return i ? i : ret;
 }
@@ -1698,10 +1227,13 @@
 
 	assert_spin_locked(&ctx->ctx_lock);
 
+	if (key != KIOCB_KEY)
+		return NULL;
+
 	/* TODO: use a hash or array, this sucks. */
 	list_for_each(pos, &ctx->active_reqs) {
 		struct kiocb *kiocb = list_kiocb(pos);
-		if (kiocb->ki_obj.user == iocb && kiocb->ki_key == key)
+		if (kiocb->ki_obj.user == iocb)
 			return kiocb;
 	}
 	return NULL;
@@ -1720,7 +1252,7 @@
 SYSCALL_DEFINE3(io_cancel, aio_context_t, ctx_id, struct iocb __user *, iocb,
 		struct io_event __user *, result)
 {
-	int (*cancel)(struct kiocb *iocb, struct io_event *res);
+	struct io_event res;
 	struct kioctx *ctx;
 	struct kiocb *kiocb;
 	u32 key;
@@ -1735,32 +1267,22 @@
 		return -EINVAL;
 
 	spin_lock_irq(&ctx->ctx_lock);
-	ret = -EAGAIN;
+
 	kiocb = lookup_kiocb(ctx, iocb, key);
-	if (kiocb && kiocb->ki_cancel) {
-		cancel = kiocb->ki_cancel;
-		kiocb->ki_users ++;
-		kiocbSetCancelled(kiocb);
-	} else
-		cancel = NULL;
+	if (kiocb)
+		ret = kiocb_cancel(ctx, kiocb, &res);
+	else
+		ret = -EINVAL;
+
 	spin_unlock_irq(&ctx->ctx_lock);
 
-	if (NULL != cancel) {
-		struct io_event tmp;
-		pr_debug("calling cancel\n");
-		memset(&tmp, 0, sizeof(tmp));
-		tmp.obj = (u64)(unsigned long)kiocb->ki_obj.user;
-		tmp.data = kiocb->ki_user_data;
-		ret = cancel(kiocb, &tmp);
-		if (!ret) {
-			/* Cancellation succeeded -- copy the result
-			 * into the user's buffer.
-			 */
-			if (copy_to_user(result, &tmp, sizeof(tmp)))
-				ret = -EFAULT;
-		}
-	} else
-		ret = -EINVAL;
+	if (!ret) {
+		/* Cancellation succeeded -- copy the result
+		 * into the user's buffer.
+		 */
+		if (copy_to_user(result, &res, sizeof(res)))
+			ret = -EFAULT;
+	}
 
 	put_ioctx(ctx);
 
diff --git a/fs/bio-integrity.c b/fs/bio-integrity.c
index a3f28f3..8fb42916 100644
--- a/fs/bio-integrity.c
+++ b/fs/bio-integrity.c
@@ -27,48 +27,11 @@
 #include <linux/workqueue.h>
 #include <linux/slab.h>
 
-struct integrity_slab {
-	struct kmem_cache *slab;
-	unsigned short nr_vecs;
-	char name[8];
-};
+#define BIP_INLINE_VECS	4
 
-#define IS(x) { .nr_vecs = x, .name = "bip-"__stringify(x) }
-struct integrity_slab bip_slab[BIOVEC_NR_POOLS] __read_mostly = {
-	IS(1), IS(4), IS(16), IS(64), IS(128), IS(BIO_MAX_PAGES),
-};
-#undef IS
-
+static struct kmem_cache *bip_slab;
 static struct workqueue_struct *kintegrityd_wq;
 
-static inline unsigned int vecs_to_idx(unsigned int nr)
-{
-	switch (nr) {
-	case 1:
-		return 0;
-	case 2 ... 4:
-		return 1;
-	case 5 ... 16:
-		return 2;
-	case 17 ... 64:
-		return 3;
-	case 65 ... 128:
-		return 4;
-	case 129 ... BIO_MAX_PAGES:
-		return 5;
-	default:
-		BUG();
-	}
-}
-
-static inline int use_bip_pool(unsigned int idx)
-{
-	if (idx == BIOVEC_MAX_IDX)
-		return 1;
-
-	return 0;
-}
-
 /**
  * bio_integrity_alloc - Allocate integrity payload and attach it to bio
  * @bio:	bio to attach integrity metadata to
@@ -84,37 +47,41 @@
 						  unsigned int nr_vecs)
 {
 	struct bio_integrity_payload *bip;
-	unsigned int idx = vecs_to_idx(nr_vecs);
 	struct bio_set *bs = bio->bi_pool;
+	unsigned long idx = BIO_POOL_NONE;
+	unsigned inline_vecs;
 
-	if (!bs)
-		bs = fs_bio_set;
-
-	BUG_ON(bio == NULL);
-	bip = NULL;
-
-	/* Lower order allocations come straight from slab */
-	if (!use_bip_pool(idx))
-		bip = kmem_cache_alloc(bip_slab[idx].slab, gfp_mask);
-
-	/* Use mempool if lower order alloc failed or max vecs were requested */
-	if (bip == NULL) {
-		idx = BIOVEC_MAX_IDX;  /* so we free the payload properly later */
+	if (!bs) {
+		bip = kmalloc(sizeof(struct bio_integrity_payload) +
+			      sizeof(struct bio_vec) * nr_vecs, gfp_mask);
+		inline_vecs = nr_vecs;
+	} else {
 		bip = mempool_alloc(bs->bio_integrity_pool, gfp_mask);
-
-		if (unlikely(bip == NULL)) {
-			printk(KERN_ERR "%s: could not alloc bip\n", __func__);
-			return NULL;
-		}
+		inline_vecs = BIP_INLINE_VECS;
 	}
 
+	if (unlikely(!bip))
+		return NULL;
+
 	memset(bip, 0, sizeof(*bip));
 
+	if (nr_vecs > inline_vecs) {
+		bip->bip_vec = bvec_alloc(gfp_mask, nr_vecs, &idx,
+					  bs->bvec_integrity_pool);
+		if (!bip->bip_vec)
+			goto err;
+	} else {
+		bip->bip_vec = bip->bip_inline_vecs;
+	}
+
 	bip->bip_slab = idx;
 	bip->bip_bio = bio;
 	bio->bi_integrity = bip;
 
 	return bip;
+err:
+	mempool_free(bip, bs->bio_integrity_pool);
+	return NULL;
 }
 EXPORT_SYMBOL(bio_integrity_alloc);
 
@@ -130,20 +97,18 @@
 	struct bio_integrity_payload *bip = bio->bi_integrity;
 	struct bio_set *bs = bio->bi_pool;
 
-	if (!bs)
-		bs = fs_bio_set;
-
-	BUG_ON(bip == NULL);
-
-	/* A cloned bio doesn't own the integrity metadata */
-	if (!bio_flagged(bio, BIO_CLONED) && !bio_flagged(bio, BIO_FS_INTEGRITY)
-	    && bip->bip_buf != NULL)
+	if (bip->bip_owns_buf)
 		kfree(bip->bip_buf);
 
-	if (use_bip_pool(bip->bip_slab))
+	if (bs) {
+		if (bip->bip_slab != BIO_POOL_NONE)
+			bvec_free(bs->bvec_integrity_pool, bip->bip_vec,
+				  bip->bip_slab);
+
 		mempool_free(bip, bs->bio_integrity_pool);
-	else
-		kmem_cache_free(bip_slab[bip->bip_slab].slab, bip);
+	} else {
+		kfree(bip);
+	}
 
 	bio->bi_integrity = NULL;
 }
@@ -419,6 +384,7 @@
 		return -EIO;
 	}
 
+	bip->bip_owns_buf = 1;
 	bip->bip_buf = buf;
 	bip->bip_size = len;
 	bip->bip_sector = bio->bi_sector;
@@ -694,11 +660,11 @@
 	bp->bio1.bi_integrity = &bp->bip1;
 	bp->bio2.bi_integrity = &bp->bip2;
 
-	bp->iv1 = bip->bip_vec[0];
-	bp->iv2 = bip->bip_vec[0];
+	bp->iv1 = bip->bip_vec[bip->bip_idx];
+	bp->iv2 = bip->bip_vec[bip->bip_idx];
 
-	bp->bip1.bip_vec[0] = bp->iv1;
-	bp->bip2.bip_vec[0] = bp->iv2;
+	bp->bip1.bip_vec = &bp->iv1;
+	bp->bip2.bip_vec = &bp->iv2;
 
 	bp->iv1.bv_len = sectors * bi->tuple_size;
 	bp->iv2.bv_offset += sectors * bi->tuple_size;
@@ -746,13 +712,14 @@
 
 int bioset_integrity_create(struct bio_set *bs, int pool_size)
 {
-	unsigned int max_slab = vecs_to_idx(BIO_MAX_PAGES);
-
 	if (bs->bio_integrity_pool)
 		return 0;
 
-	bs->bio_integrity_pool =
-		mempool_create_slab_pool(pool_size, bip_slab[max_slab].slab);
+	bs->bio_integrity_pool = mempool_create_slab_pool(pool_size, bip_slab);
+
+	bs->bvec_integrity_pool = biovec_create_pool(bs, pool_size);
+	if (!bs->bvec_integrity_pool)
+		return -1;
 
 	if (!bs->bio_integrity_pool)
 		return -1;
@@ -765,13 +732,14 @@
 {
 	if (bs->bio_integrity_pool)
 		mempool_destroy(bs->bio_integrity_pool);
+
+	if (bs->bvec_integrity_pool)
+		mempool_destroy(bs->bio_integrity_pool);
 }
 EXPORT_SYMBOL(bioset_integrity_free);
 
 void __init bio_integrity_init(void)
 {
-	unsigned int i;
-
 	/*
 	 * kintegrityd won't block much but may burn a lot of CPU cycles.
 	 * Make it highpri CPU intensive wq with max concurrency of 1.
@@ -781,14 +749,10 @@
 	if (!kintegrityd_wq)
 		panic("Failed to create kintegrityd\n");
 
-	for (i = 0 ; i < BIOVEC_NR_POOLS ; i++) {
-		unsigned int size;
-
-		size = sizeof(struct bio_integrity_payload)
-			+ bip_slab[i].nr_vecs * sizeof(struct bio_vec);
-
-		bip_slab[i].slab =
-			kmem_cache_create(bip_slab[i].name, size, 0,
-					  SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
-	}
+	bip_slab = kmem_cache_create("bio_integrity_payload",
+				     sizeof(struct bio_integrity_payload) +
+				     sizeof(struct bio_vec) * BIP_INLINE_VECS,
+				     0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
+	if (!bip_slab)
+		panic("Failed to create slab\n");
 }
diff --git a/fs/bio.c b/fs/bio.c
index b96fc6c..94bbc04 100644
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -19,6 +19,7 @@
 #include <linux/swap.h>
 #include <linux/bio.h>
 #include <linux/blkdev.h>
+#include <linux/uio.h>
 #include <linux/iocontext.h>
 #include <linux/slab.h>
 #include <linux/init.h>
@@ -160,12 +161,12 @@
 	return bvec_slabs[idx].nr_vecs;
 }
 
-void bvec_free_bs(struct bio_set *bs, struct bio_vec *bv, unsigned int idx)
+void bvec_free(mempool_t *pool, struct bio_vec *bv, unsigned int idx)
 {
 	BIO_BUG_ON(idx >= BIOVEC_NR_POOLS);
 
 	if (idx == BIOVEC_MAX_IDX)
-		mempool_free(bv, bs->bvec_pool);
+		mempool_free(bv, pool);
 	else {
 		struct biovec_slab *bvs = bvec_slabs + idx;
 
@@ -173,8 +174,8 @@
 	}
 }
 
-struct bio_vec *bvec_alloc_bs(gfp_t gfp_mask, int nr, unsigned long *idx,
-			      struct bio_set *bs)
+struct bio_vec *bvec_alloc(gfp_t gfp_mask, int nr, unsigned long *idx,
+			   mempool_t *pool)
 {
 	struct bio_vec *bvl;
 
@@ -210,7 +211,7 @@
 	 */
 	if (*idx == BIOVEC_MAX_IDX) {
 fallback:
-		bvl = mempool_alloc(bs->bvec_pool, gfp_mask);
+		bvl = mempool_alloc(pool, gfp_mask);
 	} else {
 		struct biovec_slab *bvs = bvec_slabs + *idx;
 		gfp_t __gfp_mask = gfp_mask & ~(__GFP_WAIT | __GFP_IO);
@@ -252,8 +253,8 @@
 	__bio_free(bio);
 
 	if (bs) {
-		if (bio_has_allocated_vec(bio))
-			bvec_free_bs(bs, bio->bi_io_vec, BIO_POOL_IDX(bio));
+		if (bio_flagged(bio, BIO_OWNS_VEC))
+			bvec_free(bs->bvec_pool, bio->bi_io_vec, BIO_POOL_IDX(bio));
 
 		/*
 		 * If we have front padding, adjust the bio pointer before freeing
@@ -297,6 +298,54 @@
 }
 EXPORT_SYMBOL(bio_reset);
 
+static void bio_alloc_rescue(struct work_struct *work)
+{
+	struct bio_set *bs = container_of(work, struct bio_set, rescue_work);
+	struct bio *bio;
+
+	while (1) {
+		spin_lock(&bs->rescue_lock);
+		bio = bio_list_pop(&bs->rescue_list);
+		spin_unlock(&bs->rescue_lock);
+
+		if (!bio)
+			break;
+
+		generic_make_request(bio);
+	}
+}
+
+static void punt_bios_to_rescuer(struct bio_set *bs)
+{
+	struct bio_list punt, nopunt;
+	struct bio *bio;
+
+	/*
+	 * In order to guarantee forward progress we must punt only bios that
+	 * were allocated from this bio_set; otherwise, if there was a bio on
+	 * there for a stacking driver higher up in the stack, processing it
+	 * could require allocating bios from this bio_set, and doing that from
+	 * our own rescuer would be bad.
+	 *
+	 * Since bio lists are singly linked, pop them all instead of trying to
+	 * remove from the middle of the list:
+	 */
+
+	bio_list_init(&punt);
+	bio_list_init(&nopunt);
+
+	while ((bio = bio_list_pop(current->bio_list)))
+		bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio);
+
+	*current->bio_list = nopunt;
+
+	spin_lock(&bs->rescue_lock);
+	bio_list_merge(&bs->rescue_list, &punt);
+	spin_unlock(&bs->rescue_lock);
+
+	queue_work(bs->rescue_workqueue, &bs->rescue_work);
+}
+
 /**
  * bio_alloc_bioset - allocate a bio for I/O
  * @gfp_mask:   the GFP_ mask given to the slab allocator
@@ -314,11 +363,27 @@
  *   previously allocated bio for IO before attempting to allocate a new one.
  *   Failure to do so can cause deadlocks under memory pressure.
  *
+ *   Note that when running under generic_make_request() (i.e. any block
+ *   driver), bios are not submitted until after you return - see the code in
+ *   generic_make_request() that converts recursion into iteration, to prevent
+ *   stack overflows.
+ *
+ *   This would normally mean allocating multiple bios under
+ *   generic_make_request() would be susceptible to deadlocks, but we have
+ *   deadlock avoidance code that resubmits any blocked bios from a rescuer
+ *   thread.
+ *
+ *   However, we do not guarantee forward progress for allocations from other
+ *   mempools. Doing multiple allocations from the same mempool under
+ *   generic_make_request() should be avoided - instead, use bio_set's front_pad
+ *   for per bio allocations.
+ *
  *   RETURNS:
  *   Pointer to new bio on success, NULL on failure.
  */
 struct bio *bio_alloc_bioset(gfp_t gfp_mask, int nr_iovecs, struct bio_set *bs)
 {
+	gfp_t saved_gfp = gfp_mask;
 	unsigned front_pad;
 	unsigned inline_vecs;
 	unsigned long idx = BIO_POOL_NONE;
@@ -336,7 +401,37 @@
 		front_pad = 0;
 		inline_vecs = nr_iovecs;
 	} else {
+		/*
+		 * generic_make_request() converts recursion to iteration; this
+		 * means if we're running beneath it, any bios we allocate and
+		 * submit will not be submitted (and thus freed) until after we
+		 * return.
+		 *
+		 * This exposes us to a potential deadlock if we allocate
+		 * multiple bios from the same bio_set() while running
+		 * underneath generic_make_request(). If we were to allocate
+		 * multiple bios (say a stacking block driver that was splitting
+		 * bios), we would deadlock if we exhausted the mempool's
+		 * reserve.
+		 *
+		 * We solve this, and guarantee forward progress, with a rescuer
+		 * workqueue per bio_set. If we go to allocate and there are
+		 * bios on current->bio_list, we first try the allocation
+		 * without __GFP_WAIT; if that fails, we punt those bios we
+		 * would be blocking to the rescuer workqueue before we retry
+		 * with the original gfp_flags.
+		 */
+
+		if (current->bio_list && !bio_list_empty(current->bio_list))
+			gfp_mask &= ~__GFP_WAIT;
+
 		p = mempool_alloc(bs->bio_pool, gfp_mask);
+		if (!p && gfp_mask != saved_gfp) {
+			punt_bios_to_rescuer(bs);
+			gfp_mask = saved_gfp;
+			p = mempool_alloc(bs->bio_pool, gfp_mask);
+		}
+
 		front_pad = bs->front_pad;
 		inline_vecs = BIO_INLINE_VECS;
 	}
@@ -348,9 +443,17 @@
 	bio_init(bio);
 
 	if (nr_iovecs > inline_vecs) {
-		bvl = bvec_alloc_bs(gfp_mask, nr_iovecs, &idx, bs);
+		bvl = bvec_alloc(gfp_mask, nr_iovecs, &idx, bs->bvec_pool);
+		if (!bvl && gfp_mask != saved_gfp) {
+			punt_bios_to_rescuer(bs);
+			gfp_mask = saved_gfp;
+			bvl = bvec_alloc(gfp_mask, nr_iovecs, &idx, bs->bvec_pool);
+		}
+
 		if (unlikely(!bvl))
 			goto err_free;
+
+		bio->bi_flags |= 1 << BIO_OWNS_VEC;
 	} else if (nr_iovecs) {
 		bvl = bio->bi_inline_vecs;
 	}
@@ -652,6 +755,181 @@
 }
 EXPORT_SYMBOL(bio_add_page);
 
+struct submit_bio_ret {
+	struct completion event;
+	int error;
+};
+
+static void submit_bio_wait_endio(struct bio *bio, int error)
+{
+	struct submit_bio_ret *ret = bio->bi_private;
+
+	ret->error = error;
+	complete(&ret->event);
+}
+
+/**
+ * submit_bio_wait - submit a bio, and wait until it completes
+ * @rw: whether to %READ or %WRITE, or maybe to %READA (read ahead)
+ * @bio: The &struct bio which describes the I/O
+ *
+ * Simple wrapper around submit_bio(). Returns 0 on success, or the error from
+ * bio_endio() on failure.
+ */
+int submit_bio_wait(int rw, struct bio *bio)
+{
+	struct submit_bio_ret ret;
+
+	rw |= REQ_SYNC;
+	init_completion(&ret.event);
+	bio->bi_private = &ret;
+	bio->bi_end_io = submit_bio_wait_endio;
+	submit_bio(rw, bio);
+	wait_for_completion(&ret.event);
+
+	return ret.error;
+}
+EXPORT_SYMBOL(submit_bio_wait);
+
+/**
+ * bio_advance - increment/complete a bio by some number of bytes
+ * @bio:	bio to advance
+ * @bytes:	number of bytes to complete
+ *
+ * This updates bi_sector, bi_size and bi_idx; if the number of bytes to
+ * complete doesn't align with a bvec boundary, then bv_len and bv_offset will
+ * be updated on the last bvec as well.
+ *
+ * @bio will then represent the remaining, uncompleted portion of the io.
+ */
+void bio_advance(struct bio *bio, unsigned bytes)
+{
+	if (bio_integrity(bio))
+		bio_integrity_advance(bio, bytes);
+
+	bio->bi_sector += bytes >> 9;
+	bio->bi_size -= bytes;
+
+	if (bio->bi_rw & BIO_NO_ADVANCE_ITER_MASK)
+		return;
+
+	while (bytes) {
+		if (unlikely(bio->bi_idx >= bio->bi_vcnt)) {
+			WARN_ONCE(1, "bio idx %d >= vcnt %d\n",
+				  bio->bi_idx, bio->bi_vcnt);
+			break;
+		}
+
+		if (bytes >= bio_iovec(bio)->bv_len) {
+			bytes -= bio_iovec(bio)->bv_len;
+			bio->bi_idx++;
+		} else {
+			bio_iovec(bio)->bv_len -= bytes;
+			bio_iovec(bio)->bv_offset += bytes;
+			bytes = 0;
+		}
+	}
+}
+EXPORT_SYMBOL(bio_advance);
+
+/**
+ * bio_alloc_pages - allocates a single page for each bvec in a bio
+ * @bio: bio to allocate pages for
+ * @gfp_mask: flags for allocation
+ *
+ * Allocates pages up to @bio->bi_vcnt.
+ *
+ * Returns 0 on success, -ENOMEM on failure. On failure, any allocated pages are
+ * freed.
+ */
+int bio_alloc_pages(struct bio *bio, gfp_t gfp_mask)
+{
+	int i;
+	struct bio_vec *bv;
+
+	bio_for_each_segment_all(bv, bio, i) {
+		bv->bv_page = alloc_page(gfp_mask);
+		if (!bv->bv_page) {
+			while (--bv >= bio->bi_io_vec)
+				__free_page(bv->bv_page);
+			return -ENOMEM;
+		}
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(bio_alloc_pages);
+
+/**
+ * bio_copy_data - copy contents of data buffers from one chain of bios to
+ * another
+ * @src: source bio list
+ * @dst: destination bio list
+ *
+ * If @src and @dst are single bios, bi_next must be NULL - otherwise, treats
+ * @src and @dst as linked lists of bios.
+ *
+ * Stops when it reaches the end of either @src or @dst - that is, copies
+ * min(src->bi_size, dst->bi_size) bytes (or the equivalent for lists of bios).
+ */
+void bio_copy_data(struct bio *dst, struct bio *src)
+{
+	struct bio_vec *src_bv, *dst_bv;
+	unsigned src_offset, dst_offset, bytes;
+	void *src_p, *dst_p;
+
+	src_bv = bio_iovec(src);
+	dst_bv = bio_iovec(dst);
+
+	src_offset = src_bv->bv_offset;
+	dst_offset = dst_bv->bv_offset;
+
+	while (1) {
+		if (src_offset == src_bv->bv_offset + src_bv->bv_len) {
+			src_bv++;
+			if (src_bv == bio_iovec_idx(src, src->bi_vcnt)) {
+				src = src->bi_next;
+				if (!src)
+					break;
+
+				src_bv = bio_iovec(src);
+			}
+
+			src_offset = src_bv->bv_offset;
+		}
+
+		if (dst_offset == dst_bv->bv_offset + dst_bv->bv_len) {
+			dst_bv++;
+			if (dst_bv == bio_iovec_idx(dst, dst->bi_vcnt)) {
+				dst = dst->bi_next;
+				if (!dst)
+					break;
+
+				dst_bv = bio_iovec(dst);
+			}
+
+			dst_offset = dst_bv->bv_offset;
+		}
+
+		bytes = min(dst_bv->bv_offset + dst_bv->bv_len - dst_offset,
+			    src_bv->bv_offset + src_bv->bv_len - src_offset);
+
+		src_p = kmap_atomic(src_bv->bv_page);
+		dst_p = kmap_atomic(dst_bv->bv_page);
+
+		memcpy(dst_p + dst_bv->bv_offset,
+		       src_p + src_bv->bv_offset,
+		       bytes);
+
+		kunmap_atomic(dst_p);
+		kunmap_atomic(src_p);
+
+		src_offset += bytes;
+		dst_offset += bytes;
+	}
+}
+EXPORT_SYMBOL(bio_copy_data);
+
 struct bio_map_data {
 	struct bio_vec *iovecs;
 	struct sg_iovec *sgvecs;
@@ -714,7 +992,7 @@
 	int iov_idx = 0;
 	unsigned int iov_off = 0;
 
-	__bio_for_each_segment(bvec, bio, i, 0) {
+	bio_for_each_segment_all(bvec, bio, i) {
 		char *bv_addr = page_address(bvec->bv_page);
 		unsigned int bv_len = iovecs[i].bv_len;
 
@@ -896,7 +1174,7 @@
 	return bio;
 cleanup:
 	if (!map_data)
-		bio_for_each_segment(bvec, bio, i)
+		bio_for_each_segment_all(bvec, bio, i)
 			__free_page(bvec->bv_page);
 
 	bio_put(bio);
@@ -1110,7 +1388,7 @@
 	/*
 	 * make sure we dirty pages we wrote to
 	 */
-	__bio_for_each_segment(bvec, bio, i, 0) {
+	bio_for_each_segment_all(bvec, bio, i) {
 		if (bio_data_dir(bio) == READ)
 			set_page_dirty_lock(bvec->bv_page);
 
@@ -1216,7 +1494,7 @@
 	int i;
 	char *p = bmd->sgvecs[0].iov_base;
 
-	__bio_for_each_segment(bvec, bio, i, 0) {
+	bio_for_each_segment_all(bvec, bio, i) {
 		char *addr = page_address(bvec->bv_page);
 		int len = bmd->iovecs[i].bv_len;
 
@@ -1256,7 +1534,7 @@
 	if (!reading) {
 		void *p = data;
 
-		bio_for_each_segment(bvec, bio, i) {
+		bio_for_each_segment_all(bvec, bio, i) {
 			char *addr = page_address(bvec->bv_page);
 
 			memcpy(addr, p, bvec->bv_len);
@@ -1301,11 +1579,11 @@
  */
 void bio_set_pages_dirty(struct bio *bio)
 {
-	struct bio_vec *bvec = bio->bi_io_vec;
+	struct bio_vec *bvec;
 	int i;
 
-	for (i = 0; i < bio->bi_vcnt; i++) {
-		struct page *page = bvec[i].bv_page;
+	bio_for_each_segment_all(bvec, bio, i) {
+		struct page *page = bvec->bv_page;
 
 		if (page && !PageCompound(page))
 			set_page_dirty_lock(page);
@@ -1314,11 +1592,11 @@
 
 static void bio_release_pages(struct bio *bio)
 {
-	struct bio_vec *bvec = bio->bi_io_vec;
+	struct bio_vec *bvec;
 	int i;
 
-	for (i = 0; i < bio->bi_vcnt; i++) {
-		struct page *page = bvec[i].bv_page;
+	bio_for_each_segment_all(bvec, bio, i) {
+		struct page *page = bvec->bv_page;
 
 		if (page)
 			put_page(page);
@@ -1367,16 +1645,16 @@
 
 void bio_check_pages_dirty(struct bio *bio)
 {
-	struct bio_vec *bvec = bio->bi_io_vec;
+	struct bio_vec *bvec;
 	int nr_clean_pages = 0;
 	int i;
 
-	for (i = 0; i < bio->bi_vcnt; i++) {
-		struct page *page = bvec[i].bv_page;
+	bio_for_each_segment_all(bvec, bio, i) {
+		struct page *page = bvec->bv_page;
 
 		if (PageDirty(page) || PageCompound(page)) {
 			page_cache_release(page);
-			bvec[i].bv_page = NULL;
+			bvec->bv_page = NULL;
 		} else {
 			nr_clean_pages++;
 		}
@@ -1477,8 +1755,7 @@
 	trace_block_split(bdev_get_queue(bi->bi_bdev), bi,
 				bi->bi_sector + first_sectors);
 
-	BUG_ON(bi->bi_vcnt != 1 && bi->bi_vcnt != 0);
-	BUG_ON(bi->bi_idx != 0);
+	BUG_ON(bio_segments(bi) > 1);
 	atomic_set(&bp->cnt, 3);
 	bp->error = 0;
 	bp->bio1 = *bi;
@@ -1488,8 +1765,8 @@
 	bp->bio1.bi_size = first_sectors << 9;
 
 	if (bi->bi_vcnt != 0) {
-		bp->bv1 = bi->bi_io_vec[0];
-		bp->bv2 = bi->bi_io_vec[0];
+		bp->bv1 = *bio_iovec(bi);
+		bp->bv2 = *bio_iovec(bi);
 
 		if (bio_is_rw(bi)) {
 			bp->bv2.bv_offset += first_sectors << 9;
@@ -1541,7 +1818,7 @@
 	if (index >= bio->bi_idx)
 		index = bio->bi_vcnt - 1;
 
-	__bio_for_each_segment(bv, bio, i, 0) {
+	bio_for_each_segment_all(bv, bio, i) {
 		if (i == index) {
 			if (offset > bv->bv_offset)
 				sectors += (offset - bv->bv_offset) / sector_sz;
@@ -1559,29 +1836,25 @@
  * create memory pools for biovec's in a bio_set.
  * use the global biovec slabs created for general use.
  */
-static int biovec_create_pools(struct bio_set *bs, int pool_entries)
+mempool_t *biovec_create_pool(struct bio_set *bs, int pool_entries)
 {
 	struct biovec_slab *bp = bvec_slabs + BIOVEC_MAX_IDX;
 
-	bs->bvec_pool = mempool_create_slab_pool(pool_entries, bp->slab);
-	if (!bs->bvec_pool)
-		return -ENOMEM;
-
-	return 0;
-}
-
-static void biovec_free_pools(struct bio_set *bs)
-{
-	mempool_destroy(bs->bvec_pool);
+	return mempool_create_slab_pool(pool_entries, bp->slab);
 }
 
 void bioset_free(struct bio_set *bs)
 {
+	if (bs->rescue_workqueue)
+		destroy_workqueue(bs->rescue_workqueue);
+
 	if (bs->bio_pool)
 		mempool_destroy(bs->bio_pool);
 
+	if (bs->bvec_pool)
+		mempool_destroy(bs->bvec_pool);
+
 	bioset_integrity_free(bs);
-	biovec_free_pools(bs);
 	bio_put_slab(bs);
 
 	kfree(bs);
@@ -1612,6 +1885,10 @@
 
 	bs->front_pad = front_pad;
 
+	spin_lock_init(&bs->rescue_lock);
+	bio_list_init(&bs->rescue_list);
+	INIT_WORK(&bs->rescue_work, bio_alloc_rescue);
+
 	bs->bio_slab = bio_find_or_create_slab(front_pad + back_pad);
 	if (!bs->bio_slab) {
 		kfree(bs);
@@ -1622,9 +1899,15 @@
 	if (!bs->bio_pool)
 		goto bad;
 
-	if (!biovec_create_pools(bs, pool_size))
-		return bs;
+	bs->bvec_pool = biovec_create_pool(bs, pool_size);
+	if (!bs->bvec_pool)
+		goto bad;
 
+	bs->rescue_workqueue = alloc_workqueue("bioset", WQ_MEM_RECLAIM, 0);
+	if (!bs->rescue_workqueue)
+		goto bad;
+
+	return bs;
 bad:
 	bioset_free(bs);
 	return NULL;
diff --git a/fs/block_dev.c b/fs/block_dev.c
index ce08de7..2091db8 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -27,6 +27,7 @@
 #include <linux/namei.h>
 #include <linux/log2.h>
 #include <linux/cleancache.h>
+#include <linux/aio.h>
 #include <asm/uaccess.h>
 #include "internal.h"
 
@@ -1045,7 +1046,7 @@
 }
 EXPORT_SYMBOL(bd_set_size);
 
-static int __blkdev_put(struct block_device *bdev, fmode_t mode, int for_part);
+static void __blkdev_put(struct block_device *bdev, fmode_t mode, int for_part);
 
 /*
  * bd_mutex locking:
@@ -1400,9 +1401,8 @@
 	return blkdev_get(bdev, filp->f_mode, filp);
 }
 
-static int __blkdev_put(struct block_device *bdev, fmode_t mode, int for_part)
+static void __blkdev_put(struct block_device *bdev, fmode_t mode, int for_part)
 {
-	int ret = 0;
 	struct gendisk *disk = bdev->bd_disk;
 	struct block_device *victim = NULL;
 
@@ -1422,7 +1422,7 @@
 	}
 	if (bdev->bd_contains == bdev) {
 		if (disk->fops->release)
-			ret = disk->fops->release(disk, mode);
+			disk->fops->release(disk, mode);
 	}
 	if (!bdev->bd_openers) {
 		struct module *owner = disk->fops->owner;
@@ -1441,10 +1441,9 @@
 	bdput(bdev);
 	if (victim)
 		__blkdev_put(victim, mode, 1);
-	return ret;
 }
 
-int blkdev_put(struct block_device *bdev, fmode_t mode)
+void blkdev_put(struct block_device *bdev, fmode_t mode)
 {
 	mutex_lock(&bdev->bd_mutex);
 
@@ -1488,15 +1487,15 @@
 
 	mutex_unlock(&bdev->bd_mutex);
 
-	return __blkdev_put(bdev, mode, 0);
+	__blkdev_put(bdev, mode, 0);
 }
 EXPORT_SYMBOL(blkdev_put);
 
 static int blkdev_close(struct inode * inode, struct file * filp)
 {
 	struct block_device *bdev = I_BDEV(filp->f_mapping->host);
-
-	return blkdev_put(bdev, filp->f_mode);
+	blkdev_put(bdev, filp->f_mode);
+	return 0;
 }
 
 static long block_ioctl(struct file *file, unsigned cmd, unsigned long arg)
@@ -1557,7 +1556,7 @@
 		return 0;
 
 	size -= pos;
-	if (size < INT_MAX)
+	if (size < iocb->ki_left)
 		nr_segs = iov_shorten((struct iovec *)iov, nr_segs, size);
 	return generic_file_aio_read(iocb, iov, nr_segs, pos);
 }
diff --git a/fs/btrfs/Kconfig b/fs/btrfs/Kconfig
index 9a8622a..2b3b832 100644
--- a/fs/btrfs/Kconfig
+++ b/fs/btrfs/Kconfig
@@ -1,5 +1,5 @@
 config BTRFS_FS
-	tristate "Btrfs filesystem Unstable disk format"
+	tristate "Btrfs filesystem support"
 	select LIBCRC32C
 	select ZLIB_INFLATE
 	select ZLIB_DEFLATE
@@ -52,3 +52,23 @@
 	  In most cases, unless you are a btrfs developer who needs
 	  to verify the integrity of (super)-block write requests
 	  during the run of a regression test, say N
+
+config BTRFS_FS_RUN_SANITY_TESTS
+	bool "Btrfs will run sanity tests upon loading"
+	depends on BTRFS_FS
+	help
+	  This will run some basic sanity tests on the free space cache
+	  code to make sure it is acting as it should.  These are mostly
+	  regression tests and are only really interesting to btrfs devlopers.
+
+	  If unsure, say N.
+
+config BTRFS_DEBUG
+	bool "Btrfs debugging support"
+	depends on BTRFS_FS
+	help
+	  Enable run-time debugging support for the btrfs filesystem. This may
+	  enable additional and expensive checks with negative impact on
+	  performance, or export extra information via sysfs.
+
+	  If unsure, say N.
diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
index bd605c8..b4fb415 100644
--- a/fs/btrfs/backref.c
+++ b/fs/btrfs/backref.c
@@ -352,6 +352,8 @@
 		err = __resolve_indirect_ref(fs_info, search_commit_root,
 					     time_seq, ref, parents,
 					     extent_item_pos);
+		if (err == -ENOMEM)
+			goto out;
 		if (err)
 			continue;
 
@@ -367,7 +369,7 @@
 			new_ref = kmalloc(sizeof(*new_ref), GFP_NOFS);
 			if (!new_ref) {
 				ret = -ENOMEM;
-				break;
+				goto out;
 			}
 			memcpy(new_ref, ref, sizeof(*ref));
 			new_ref->parent = node->val;
@@ -377,7 +379,7 @@
 		}
 		ulist_reinit(parents);
 	}
-
+out:
 	ulist_free(parents);
 	return ret;
 }
@@ -421,7 +423,10 @@
 		BUG_ON(!ref->wanted_disk_byte);
 		eb = read_tree_block(fs_info->tree_root, ref->wanted_disk_byte,
 				     fs_info->tree_root->leafsize, 0);
-		BUG_ON(!eb);
+		if (!eb || !extent_buffer_uptodate(eb)) {
+			free_extent_buffer(eb);
+			return -EIO;
+		}
 		btrfs_tree_read_lock(eb);
 		if (btrfs_header_level(eb) == 0)
 			btrfs_item_key_to_cpu(eb, &ref->key_for_search, 0);
@@ -443,7 +448,7 @@
  *           having a parent).
  * mode = 2: merge identical parents
  */
-static int __merge_refs(struct list_head *head, int mode)
+static void __merge_refs(struct list_head *head, int mode)
 {
 	struct list_head *pos1;
 
@@ -489,7 +494,6 @@
 		}
 
 	}
-	return 0;
 }
 
 /*
@@ -582,7 +586,8 @@
 		default:
 			WARN_ON(1);
 		}
-		BUG_ON(ret);
+		if (ret)
+			return ret;
 	}
 
 	return 0;
@@ -680,7 +685,8 @@
 		default:
 			WARN_ON(1);
 		}
-		BUG_ON(ret);
+		if (ret)
+			return ret;
 		ptr += btrfs_extent_inline_ref_size(type);
 	}
 
@@ -762,7 +768,9 @@
 		default:
 			WARN_ON(1);
 		}
-		BUG_ON(ret);
+		if (ret)
+			return ret;
+
 	}
 
 	return ret;
@@ -880,18 +888,14 @@
 	if (ret)
 		goto out;
 
-	ret = __merge_refs(&prefs, 1);
-	if (ret)
-		goto out;
+	__merge_refs(&prefs, 1);
 
 	ret = __resolve_indirect_refs(fs_info, search_commit_root, time_seq,
 				      &prefs, extent_item_pos);
 	if (ret)
 		goto out;
 
-	ret = __merge_refs(&prefs, 2);
-	if (ret)
-		goto out;
+	__merge_refs(&prefs, 2);
 
 	while (!list_empty(&prefs)) {
 		ref = list_first_entry(&prefs, struct __prelim_ref, list);
@@ -900,7 +904,8 @@
 		if (ref->count && ref->root_id && ref->parent == 0) {
 			/* no parent == root of tree */
 			ret = ulist_add(roots, ref->root_id, 0, GFP_NOFS);
-			BUG_ON(ret < 0);
+			if (ret < 0)
+				goto out;
 		}
 		if (ref->count && ref->parent) {
 			struct extent_inode_elem *eie = NULL;
@@ -911,7 +916,10 @@
 							info_level);
 				eb = read_tree_block(fs_info->extent_root,
 							   ref->parent, bsz, 0);
-				BUG_ON(!eb);
+				if (!eb || !extent_buffer_uptodate(eb)) {
+					free_extent_buffer(eb);
+					return -EIO;
+				}
 				ret = find_extent_in_eb(eb, bytenr,
 							*extent_item_pos, &eie);
 				ref->inode_list = eie;
@@ -920,6 +928,8 @@
 			ret = ulist_add_merge(refs, ref->parent,
 					      (uintptr_t)ref->inode_list,
 					      (u64 *)&eie, GFP_NOFS);
+			if (ret < 0)
+				goto out;
 			if (!ret && extent_item_pos) {
 				/*
 				 * we've recorded that parent, so we must extend
@@ -930,7 +940,6 @@
 					eie = eie->next;
 				eie->next = ref->inode_list;
 			}
-			BUG_ON(ret < 0);
 		}
 		kfree(ref);
 	}
@@ -1180,6 +1189,20 @@
 	return ret;
 }
 
+/*
+ * this iterates to turn a name (from iref/extref) into a full filesystem path.
+ * Elements of the path are separated by '/' and the path is guaranteed to be
+ * 0-terminated. the path is only given within the current file system.
+ * Therefore, it never starts with a '/'. the caller is responsible to provide
+ * "size" bytes in "dest". the dest buffer will be filled backwards. finally,
+ * the start point of the resulting string is returned. this pointer is within
+ * dest, normally.
+ * in case the path buffer would overflow, the pointer is decremented further
+ * as if output was written to the buffer, though no more output is actually
+ * generated. that way, the caller can determine how much space would be
+ * required for the path to fit into the buffer. in that case, the returned
+ * value will be smaller than dest. callers must check this!
+ */
 char *btrfs_ref_to_path(struct btrfs_root *fs_root, struct btrfs_path *path,
 			u32 name_len, unsigned long name_off,
 			struct extent_buffer *eb_in, u64 parent,
@@ -1249,32 +1272,6 @@
 }
 
 /*
- * this iterates to turn a btrfs_inode_ref into a full filesystem path. elements
- * of the path are separated by '/' and the path is guaranteed to be
- * 0-terminated. the path is only given within the current file system.
- * Therefore, it never starts with a '/'. the caller is responsible to provide
- * "size" bytes in "dest". the dest buffer will be filled backwards. finally,
- * the start point of the resulting string is returned. this pointer is within
- * dest, normally.
- * in case the path buffer would overflow, the pointer is decremented further
- * as if output was written to the buffer, though no more output is actually
- * generated. that way, the caller can determine how much space would be
- * required for the path to fit into the buffer. in that case, the returned
- * value will be smaller than dest. callers must check this!
- */
-char *btrfs_iref_to_path(struct btrfs_root *fs_root,
-			 struct btrfs_path *path,
-			 struct btrfs_inode_ref *iref,
-			 struct extent_buffer *eb_in, u64 parent,
-			 char *dest, u32 size)
-{
-	return btrfs_ref_to_path(fs_root, path,
-				 btrfs_inode_ref_name_len(eb_in, iref),
-				 (unsigned long)(iref + 1),
-				 eb_in, parent, dest, size);
-}
-
-/*
  * this makes the path point to (logical EXTENT_ITEM *)
  * returns BTRFS_EXTENT_FLAG_DATA for data, BTRFS_EXTENT_FLAG_TREE_BLOCK for
  * tree blocks and <0 on error.
@@ -1461,8 +1458,6 @@
 				iterate_extent_inodes_t *iterate, void *ctx)
 {
 	int ret;
-	struct list_head data_refs = LIST_HEAD_INIT(data_refs);
-	struct list_head shared_refs = LIST_HEAD_INIT(shared_refs);
 	struct btrfs_trans_handle *trans;
 	struct ulist *refs = NULL;
 	struct ulist *roots = NULL;
@@ -1508,11 +1503,9 @@
 						iterate, ctx);
 		}
 		ulist_free(roots);
-		roots = NULL;
 	}
 
 	free_leaf_list(refs);
-	ulist_free(roots);
 out:
 	if (!search_commit_root) {
 		btrfs_put_tree_mod_seq(fs_info, &tree_mod_seq_elem);
diff --git a/fs/btrfs/backref.h b/fs/btrfs/backref.h
index 310a7f6..0f446d7 100644
--- a/fs/btrfs/backref.h
+++ b/fs/btrfs/backref.h
@@ -59,9 +59,6 @@
 int btrfs_find_all_roots(struct btrfs_trans_handle *trans,
 				struct btrfs_fs_info *fs_info, u64 bytenr,
 				u64 time_seq, struct ulist **roots);
-char *btrfs_iref_to_path(struct btrfs_root *fs_root, struct btrfs_path *path,
-			 struct btrfs_inode_ref *iref, struct extent_buffer *eb,
-			 u64 parent, char *dest, u32 size);
 char *btrfs_ref_to_path(struct btrfs_root *fs_root, struct btrfs_path *path,
 			u32 name_len, unsigned long name_off,
 			struct extent_buffer *eb_in, u64 parent,
diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h
index d9b97d4..08b286b 100644
--- a/fs/btrfs/btrfs_inode.h
+++ b/fs/btrfs/btrfs_inode.h
@@ -93,7 +93,7 @@
 
 	unsigned long runtime_flags;
 
-	/* Keep track of who's O_SYNC/fsycing currently */
+	/* Keep track of who's O_SYNC/fsyncing currently */
 	atomic_t sync_writers;
 
 	/* full 64 bit generation number, struct vfs_inode doesn't have a big
diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
index 15b9408..b189bd1 100644
--- a/fs/btrfs/compression.c
+++ b/fs/btrfs/compression.c
@@ -82,6 +82,10 @@
 	u32 sums;
 };
 
+static int btrfs_decompress_biovec(int type, struct page **pages_in,
+				   u64 disk_start, struct bio_vec *bvec,
+				   int vcnt, size_t srclen);
+
 static inline int compressed_bio_size(struct btrfs_root *root,
 				      unsigned long disk_size)
 {
@@ -106,7 +110,6 @@
 				 u64 disk_start)
 {
 	int ret;
-	struct btrfs_root *root = BTRFS_I(inode)->root;
 	struct page *page;
 	unsigned long i;
 	char *kaddr;
@@ -121,7 +124,7 @@
 		csum = ~(u32)0;
 
 		kaddr = kmap_atomic(page);
-		csum = btrfs_csum_data(root, kaddr, csum, PAGE_CACHE_SIZE);
+		csum = btrfs_csum_data(kaddr, csum, PAGE_CACHE_SIZE);
 		btrfs_csum_final(csum, (char *)&csum);
 		kunmap_atomic(kaddr);
 
@@ -739,7 +742,7 @@
 static atomic_t comp_alloc_workspace[BTRFS_COMPRESS_TYPES];
 static wait_queue_head_t comp_workspace_wait[BTRFS_COMPRESS_TYPES];
 
-struct btrfs_compress_op *btrfs_compress_op[] = {
+static struct btrfs_compress_op *btrfs_compress_op[] = {
 	&btrfs_zlib_compress,
 	&btrfs_lzo_compress,
 };
@@ -910,8 +913,9 @@
  * be contiguous.  They all correspond to the range of bytes covered by
  * the compressed extent.
  */
-int btrfs_decompress_biovec(int type, struct page **pages_in, u64 disk_start,
-			    struct bio_vec *bvec, int vcnt, size_t srclen)
+static int btrfs_decompress_biovec(int type, struct page **pages_in,
+				   u64 disk_start, struct bio_vec *bvec,
+				   int vcnt, size_t srclen)
 {
 	struct list_head *workspace;
 	int ret;
diff --git a/fs/btrfs/compression.h b/fs/btrfs/compression.h
index 9afb0a6..0c803b4 100644
--- a/fs/btrfs/compression.h
+++ b/fs/btrfs/compression.h
@@ -30,8 +30,6 @@
 			 unsigned long *total_in,
 			 unsigned long *total_out,
 			 unsigned long max_out);
-int btrfs_decompress_biovec(int type, struct page **pages_in, u64 disk_start,
-			    struct bio_vec *bvec, int vcnt, size_t srclen);
 int btrfs_decompress(int type, unsigned char *data_in, struct page *dest_page,
 		     unsigned long start_byte, size_t srclen, size_t destlen);
 int btrfs_decompress_buf2page(char *buf, unsigned long buf_start,
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
index ca9d8f1..de6de8e 100644
--- a/fs/btrfs/ctree.c
+++ b/fs/btrfs/ctree.c
@@ -37,16 +37,11 @@
 			      struct btrfs_root *root,
 			      struct extent_buffer *dst_buf,
 			      struct extent_buffer *src_buf);
-static void del_ptr(struct btrfs_trans_handle *trans, struct btrfs_root *root,
-		    struct btrfs_path *path, int level, int slot);
+static void del_ptr(struct btrfs_root *root, struct btrfs_path *path,
+		    int level, int slot);
 static void tree_mod_log_free_eb(struct btrfs_fs_info *fs_info,
 				 struct extent_buffer *eb);
-struct extent_buffer *read_old_tree_block(struct btrfs_root *root, u64 bytenr,
-					  u32 blocksize, u64 parent_transid,
-					  u64 time_seq);
-struct extent_buffer *btrfs_find_old_tree_block(struct btrfs_root *root,
-						u64 bytenr, u32 blocksize,
-						u64 time_seq);
+static int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path);
 
 struct btrfs_path *btrfs_alloc_path(void)
 {
@@ -208,7 +203,7 @@
  * tree until you end up with a lock on the root.  A locked buffer
  * is returned, with a reference held.
  */
-struct extent_buffer *btrfs_read_lock_root_node(struct btrfs_root *root)
+static struct extent_buffer *btrfs_read_lock_root_node(struct btrfs_root *root)
 {
 	struct extent_buffer *eb;
 
@@ -361,6 +356,44 @@
 }
 
 /*
+ * Increment the upper half of tree_mod_seq, set lower half zero.
+ *
+ * Must be called with fs_info->tree_mod_seq_lock held.
+ */
+static inline u64 btrfs_inc_tree_mod_seq_major(struct btrfs_fs_info *fs_info)
+{
+	u64 seq = atomic64_read(&fs_info->tree_mod_seq);
+	seq &= 0xffffffff00000000ull;
+	seq += 1ull << 32;
+	atomic64_set(&fs_info->tree_mod_seq, seq);
+	return seq;
+}
+
+/*
+ * Increment the lower half of tree_mod_seq.
+ *
+ * Must be called with fs_info->tree_mod_seq_lock held. The way major numbers
+ * are generated should not technically require a spin lock here. (Rationale:
+ * incrementing the minor while incrementing the major seq number is between its
+ * atomic64_read and atomic64_set calls doesn't duplicate sequence numbers, it
+ * just returns a unique sequence number as usual.) We have decided to leave
+ * that requirement in here and rethink it once we notice it really imposes a
+ * problem on some workload.
+ */
+static inline u64 btrfs_inc_tree_mod_seq_minor(struct btrfs_fs_info *fs_info)
+{
+	return atomic64_inc_return(&fs_info->tree_mod_seq);
+}
+
+/*
+ * return the last minor in the previous major tree_mod_seq number
+ */
+u64 btrfs_tree_mod_seq_prev(u64 seq)
+{
+	return (seq & 0xffffffff00000000ull) - 1ull;
+}
+
+/*
  * This adds a new blocker to the tree mod log's blocker list if the @elem
  * passed does not already have a sequence number set. So when a caller expects
  * to record tree modifications, it should ensure to set elem->seq to zero
@@ -376,10 +409,10 @@
 	tree_mod_log_write_lock(fs_info);
 	spin_lock(&fs_info->tree_mod_seq_lock);
 	if (!elem->seq) {
-		elem->seq = btrfs_inc_tree_mod_seq(fs_info);
+		elem->seq = btrfs_inc_tree_mod_seq_major(fs_info);
 		list_add_tail(&elem->list, &fs_info->tree_mod_seq_list);
 	}
-	seq = btrfs_inc_tree_mod_seq(fs_info);
+	seq = btrfs_inc_tree_mod_seq_minor(fs_info);
 	spin_unlock(&fs_info->tree_mod_seq_lock);
 	tree_mod_log_write_unlock(fs_info);
 
@@ -524,7 +557,10 @@
 	if (!tm)
 		return -ENOMEM;
 
-	tm->seq = btrfs_inc_tree_mod_seq(fs_info);
+	spin_lock(&fs_info->tree_mod_seq_lock);
+	tm->seq = btrfs_inc_tree_mod_seq_minor(fs_info);
+	spin_unlock(&fs_info->tree_mod_seq_lock);
+
 	return tm->seq;
 }
 
@@ -643,7 +679,8 @@
 static noinline int
 tree_mod_log_insert_root(struct btrfs_fs_info *fs_info,
 			 struct extent_buffer *old_root,
-			 struct extent_buffer *new_root, gfp_t flags)
+			 struct extent_buffer *new_root, gfp_t flags,
+			 int log_removal)
 {
 	struct tree_mod_elem *tm;
 	int ret;
@@ -651,7 +688,8 @@
 	if (tree_mod_dont_log(fs_info, NULL))
 		return 0;
 
-	__tree_mod_log_free_eb(fs_info, old_root);
+	if (log_removal)
+		__tree_mod_log_free_eb(fs_info, old_root);
 
 	ret = tree_mod_alloc(fs_info, flags, &tm);
 	if (ret < 0)
@@ -738,7 +776,7 @@
 static noinline void
 tree_mod_log_eb_copy(struct btrfs_fs_info *fs_info, struct extent_buffer *dst,
 		     struct extent_buffer *src, unsigned long dst_offset,
-		     unsigned long src_offset, int nr_items, int log_removal)
+		     unsigned long src_offset, int nr_items)
 {
 	int ret;
 	int i;
@@ -752,12 +790,10 @@
 	}
 
 	for (i = 0; i < nr_items; i++) {
-		if (log_removal) {
-			ret = tree_mod_log_insert_key_locked(fs_info, src,
-							i + src_offset,
-							MOD_LOG_KEY_REMOVE);
-			BUG_ON(ret < 0);
-		}
+		ret = tree_mod_log_insert_key_locked(fs_info, src,
+						i + src_offset,
+						MOD_LOG_KEY_REMOVE);
+		BUG_ON(ret < 0);
 		ret = tree_mod_log_insert_key_locked(fs_info, dst,
 						     i + dst_offset,
 						     MOD_LOG_KEY_ADD);
@@ -802,11 +838,12 @@
 
 static noinline void
 tree_mod_log_set_root_pointer(struct btrfs_root *root,
-			      struct extent_buffer *new_root_node)
+			      struct extent_buffer *new_root_node,
+			      int log_removal)
 {
 	int ret;
 	ret = tree_mod_log_insert_root(root->fs_info, root->node,
-				       new_root_node, GFP_NOFS);
+				       new_root_node, GFP_NOFS, log_removal);
 	BUG_ON(ret < 0);
 }
 
@@ -867,7 +904,8 @@
 
 	if (btrfs_block_can_be_shared(root, buf)) {
 		ret = btrfs_lookup_extent_info(trans, root, buf->start,
-					       buf->len, &refs, &flags);
+					       btrfs_header_level(buf), 1,
+					       &refs, &flags);
 		if (ret)
 			return ret;
 		if (refs == 0) {
@@ -1028,7 +1066,7 @@
 			parent_start = 0;
 
 		extent_buffer_get(cow);
-		tree_mod_log_set_root_pointer(root, cow);
+		tree_mod_log_set_root_pointer(root, cow, 1);
 		rcu_assign_pointer(root->node, cow);
 
 		btrfs_free_tree_block(trans, root, buf, parent_start,
@@ -1067,11 +1105,11 @@
  */
 static struct tree_mod_elem *
 __tree_mod_log_oldest_root(struct btrfs_fs_info *fs_info,
-			   struct btrfs_root *root, u64 time_seq)
+			   struct extent_buffer *eb_root, u64 time_seq)
 {
 	struct tree_mod_elem *tm;
 	struct tree_mod_elem *found = NULL;
-	u64 root_logical = root->node->start;
+	u64 root_logical = eb_root->start;
 	int looped = 0;
 
 	if (!time_seq)
@@ -1105,7 +1143,6 @@
 
 		found = tm;
 		root_logical = tm->old_root.logical;
-		BUG_ON(root_logical == root->node->start);
 		looped = 1;
 	}
 
@@ -1190,6 +1227,13 @@
 	btrfs_set_header_nritems(eb, n);
 }
 
+/*
+ * Called with eb read locked. If the buffer cannot be rewinded, the same buffer
+ * is returned. If rewind operations happen, a fresh buffer is returned. The
+ * returned buffer is always read-locked. If the returned buffer is not the
+ * input buffer, the lock on the input buffer is released and the input buffer
+ * is freed (its refcount is decremented).
+ */
 static struct extent_buffer *
 tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct extent_buffer *eb,
 		    u64 time_seq)
@@ -1223,8 +1267,11 @@
 	}
 
 	extent_buffer_get(eb_rewin);
+	btrfs_tree_read_unlock(eb);
 	free_extent_buffer(eb);
 
+	extent_buffer_get(eb_rewin);
+	btrfs_tree_read_lock(eb_rewin);
 	__tree_mod_log_rewind(eb_rewin, time_seq, tm);
 	WARN_ON(btrfs_header_nritems(eb_rewin) >
 		BTRFS_NODEPTRS_PER_BLOCK(fs_info->tree_root));
@@ -1243,33 +1290,35 @@
 get_old_root(struct btrfs_root *root, u64 time_seq)
 {
 	struct tree_mod_elem *tm;
-	struct extent_buffer *eb;
+	struct extent_buffer *eb = NULL;
+	struct extent_buffer *eb_root;
 	struct extent_buffer *old;
 	struct tree_mod_root *old_root = NULL;
 	u64 old_generation = 0;
 	u64 logical;
 	u32 blocksize;
 
-	eb = btrfs_read_lock_root_node(root);
-	tm = __tree_mod_log_oldest_root(root->fs_info, root, time_seq);
+	eb_root = btrfs_read_lock_root_node(root);
+	tm = __tree_mod_log_oldest_root(root->fs_info, eb_root, time_seq);
 	if (!tm)
-		return root->node;
+		return eb_root;
 
 	if (tm->op == MOD_LOG_ROOT_REPLACE) {
 		old_root = &tm->old_root;
 		old_generation = tm->generation;
 		logical = old_root->logical;
 	} else {
-		logical = root->node->start;
+		logical = eb_root->start;
 	}
 
 	tm = tree_mod_log_search(root->fs_info, logical, time_seq);
 	if (old_root && tm && tm->op != MOD_LOG_KEY_REMOVE_WHILE_FREEING) {
-		btrfs_tree_read_unlock(root->node);
-		free_extent_buffer(root->node);
+		btrfs_tree_read_unlock(eb_root);
+		free_extent_buffer(eb_root);
 		blocksize = btrfs_level_size(root, old_root->level);
 		old = read_tree_block(root, logical, blocksize, 0);
-		if (!old) {
+		if (!old || !extent_buffer_uptodate(old)) {
+			free_extent_buffer(old);
 			pr_warn("btrfs: failed to read tree block %llu from get_old_root\n",
 				logical);
 			WARN_ON(1);
@@ -1278,13 +1327,13 @@
 			free_extent_buffer(old);
 		}
 	} else if (old_root) {
-		btrfs_tree_read_unlock(root->node);
-		free_extent_buffer(root->node);
+		btrfs_tree_read_unlock(eb_root);
+		free_extent_buffer(eb_root);
 		eb = alloc_dummy_extent_buffer(logical, root->nodesize);
 	} else {
-		eb = btrfs_clone_extent_buffer(root->node);
-		btrfs_tree_read_unlock(root->node);
-		free_extent_buffer(root->node);
+		eb = btrfs_clone_extent_buffer(eb_root);
+		btrfs_tree_read_unlock(eb_root);
+		free_extent_buffer(eb_root);
 	}
 
 	if (!eb)
@@ -1294,7 +1343,7 @@
 	if (old_root) {
 		btrfs_set_header_bytenr(eb, eb->start);
 		btrfs_set_header_backref_rev(eb, BTRFS_MIXED_BACKREF_REV);
-		btrfs_set_header_owner(eb, root->root_key.objectid);
+		btrfs_set_header_owner(eb, btrfs_header_owner(eb_root));
 		btrfs_set_header_level(eb, old_root->level);
 		btrfs_set_header_generation(eb, old_generation);
 	}
@@ -1311,15 +1360,15 @@
 {
 	struct tree_mod_elem *tm;
 	int level;
+	struct extent_buffer *eb_root = btrfs_root_node(root);
 
-	tm = __tree_mod_log_oldest_root(root->fs_info, root, time_seq);
+	tm = __tree_mod_log_oldest_root(root->fs_info, eb_root, time_seq);
 	if (tm && tm->op == MOD_LOG_ROOT_REPLACE) {
 		level = tm->old_root.level;
 	} else {
-		rcu_read_lock();
-		level = btrfs_header_level(root->node);
-		rcu_read_unlock();
+		level = btrfs_header_level(eb_root);
 	}
+	free_extent_buffer(eb_root);
 
 	return level;
 }
@@ -1514,8 +1563,10 @@
 			if (!cur) {
 				cur = read_tree_block(root, blocknr,
 							 blocksize, gen);
-				if (!cur)
+				if (!cur || !extent_buffer_uptodate(cur)) {
+					free_extent_buffer(cur);
 					return -EIO;
+				}
 			} else if (!uptodate) {
 				err = btrfs_read_buffer(cur, gen);
 				if (err) {
@@ -1680,6 +1731,8 @@
 				   struct extent_buffer *parent, int slot)
 {
 	int level = btrfs_header_level(parent);
+	struct extent_buffer *eb;
+
 	if (slot < 0)
 		return NULL;
 	if (slot >= btrfs_header_nritems(parent))
@@ -1687,9 +1740,15 @@
 
 	BUG_ON(level == 0);
 
-	return read_tree_block(root, btrfs_node_blockptr(parent, slot),
-		       btrfs_level_size(root, level - 1),
-		       btrfs_node_ptr_generation(parent, slot));
+	eb = read_tree_block(root, btrfs_node_blockptr(parent, slot),
+			     btrfs_level_size(root, level - 1),
+			     btrfs_node_ptr_generation(parent, slot));
+	if (eb && !extent_buffer_uptodate(eb)) {
+		free_extent_buffer(eb);
+		eb = NULL;
+	}
+
+	return eb;
 }
 
 /*
@@ -1754,7 +1813,7 @@
 			goto enospc;
 		}
 
-		tree_mod_log_set_root_pointer(root, child);
+		tree_mod_log_set_root_pointer(root, child, 1);
 		rcu_assign_pointer(root->node, child);
 
 		add_root_to_dirty_list(root);
@@ -1818,7 +1877,7 @@
 		if (btrfs_header_nritems(right) == 0) {
 			clean_tree_block(trans, root, right);
 			btrfs_tree_unlock(right);
-			del_ptr(trans, root, path, level + 1, pslot + 1);
+			del_ptr(root, path, level + 1, pslot + 1);
 			root_sub_used(root, right->len);
 			btrfs_free_tree_block(trans, root, right, 0, 1);
 			free_extent_buffer_stale(right);
@@ -1862,7 +1921,7 @@
 	if (btrfs_header_nritems(mid) == 0) {
 		clean_tree_block(trans, root, mid);
 		btrfs_tree_unlock(mid);
-		del_ptr(trans, root, path, level + 1, pslot);
+		del_ptr(root, path, level + 1, pslot);
 		root_sub_used(root, mid->len);
 		btrfs_free_tree_block(trans, root, mid, 0, 1);
 		free_extent_buffer_stale(mid);
@@ -2210,9 +2269,6 @@
 	int no_skips = 0;
 	struct extent_buffer *t;
 
-	if (path->really_keep_locks)
-		return;
-
 	for (i = level; i < BTRFS_MAX_LEVEL; i++) {
 		if (!path->nodes[i])
 			break;
@@ -2260,7 +2316,7 @@
 {
 	int i;
 
-	if (path->keep_locks || path->really_keep_locks)
+	if (path->keep_locks)
 		return;
 
 	for (i = level; i < BTRFS_MAX_LEVEL; i++) {
@@ -2493,7 +2549,7 @@
 	if (!cow)
 		write_lock_level = -1;
 
-	if (cow && (p->really_keep_locks || p->keep_locks || p->lowest_level))
+	if (cow && (p->keep_locks || p->lowest_level))
 		write_lock_level = BTRFS_MAX_LEVEL;
 
 	min_write_lock_level = write_lock_level;
@@ -2795,15 +2851,9 @@
 				btrfs_clear_path_blocking(p, b,
 							  BTRFS_READ_LOCK);
 			}
+			b = tree_mod_log_rewind(root->fs_info, b, time_seq);
 			p->locks[level] = BTRFS_READ_LOCK;
 			p->nodes[level] = b;
-			b = tree_mod_log_rewind(root->fs_info, b, time_seq);
-			if (b != p->nodes[level]) {
-				btrfs_tree_unlock_rw(p->nodes[level],
-						     p->locks[level]);
-				p->locks[level] = 0;
-				p->nodes[level] = b;
-			}
 		} else {
 			p->slots[level] = slot;
 			unlock_up(p, level, lowest_unlock, 0, NULL);
@@ -2902,8 +2952,7 @@
  * higher levels
  *
  */
-static void fixup_low_keys(struct btrfs_trans_handle *trans,
-			   struct btrfs_root *root, struct btrfs_path *path,
+static void fixup_low_keys(struct btrfs_root *root, struct btrfs_path *path,
 			   struct btrfs_disk_key *key, int level)
 {
 	int i;
@@ -2928,8 +2977,7 @@
  * This function isn't completely safe. It's the caller's responsibility
  * that the new key won't break the order
  */
-void btrfs_set_item_key_safe(struct btrfs_trans_handle *trans,
-			     struct btrfs_root *root, struct btrfs_path *path,
+void btrfs_set_item_key_safe(struct btrfs_root *root, struct btrfs_path *path,
 			     struct btrfs_key *new_key)
 {
 	struct btrfs_disk_key disk_key;
@@ -2951,7 +2999,7 @@
 	btrfs_set_item_key(eb, &disk_key, slot);
 	btrfs_mark_buffer_dirty(eb);
 	if (slot == 0)
-		fixup_low_keys(trans, root, path, &disk_key, 1);
+		fixup_low_keys(root, path, &disk_key, 1);
 }
 
 /*
@@ -2998,7 +3046,7 @@
 		push_items = min(src_nritems - 8, push_items);
 
 	tree_mod_log_eb_copy(root->fs_info, dst, src, dst_nritems, 0,
-			     push_items, 1);
+			     push_items);
 	copy_extent_buffer(dst, src,
 			   btrfs_node_key_ptr_offset(dst_nritems),
 			   btrfs_node_key_ptr_offset(0),
@@ -3069,7 +3117,7 @@
 				      sizeof(struct btrfs_key_ptr));
 
 	tree_mod_log_eb_copy(root->fs_info, dst, src, 0,
-			     src_nritems - push_items, push_items, 1);
+			     src_nritems - push_items, push_items);
 	copy_extent_buffer(dst, src,
 			   btrfs_node_key_ptr_offset(0),
 			   btrfs_node_key_ptr_offset(src_nritems - push_items),
@@ -3093,7 +3141,7 @@
  */
 static noinline int insert_new_root(struct btrfs_trans_handle *trans,
 			   struct btrfs_root *root,
-			   struct btrfs_path *path, int level)
+			   struct btrfs_path *path, int level, int log_removal)
 {
 	u64 lower_gen;
 	struct extent_buffer *lower;
@@ -3144,7 +3192,7 @@
 	btrfs_mark_buffer_dirty(c);
 
 	old = root->node;
-	tree_mod_log_set_root_pointer(root, c);
+	tree_mod_log_set_root_pointer(root, c, log_removal);
 	rcu_assign_pointer(root->node, c);
 
 	/* the super has an extra ref to root->node */
@@ -3221,18 +3269,21 @@
 	int mid;
 	int ret;
 	u32 c_nritems;
-	int tree_mod_log_removal = 1;
 
 	c = path->nodes[level];
 	WARN_ON(btrfs_header_generation(c) != trans->transid);
 	if (c == root->node) {
-		/* trying to split the root, lets make a new one */
-		ret = insert_new_root(trans, root, path, level + 1);
 		/*
-		 * removal of root nodes has been logged by
-		 * tree_mod_log_set_root_pointer due to locking
+		 * trying to split the root, lets make a new one
+		 *
+		 * tree mod log: We pass 0 as log_removal parameter to
+		 * insert_new_root, because that root buffer will be kept as a
+		 * normal node. We are going to log removal of half of the
+		 * elements below with tree_mod_log_eb_copy. We're holding a
+		 * tree lock on the buffer, which is why we cannot race with
+		 * other tree_mod_log users.
 		 */
-		tree_mod_log_removal = 0;
+		ret = insert_new_root(trans, root, path, level + 1, 0);
 		if (ret)
 			return ret;
 	} else {
@@ -3270,8 +3321,7 @@
 			    (unsigned long)btrfs_header_chunk_tree_uuid(split),
 			    BTRFS_UUID_SIZE);
 
-	tree_mod_log_eb_copy(root->fs_info, split, c, 0, mid, c_nritems - mid,
-			     tree_mod_log_removal);
+	tree_mod_log_eb_copy(root->fs_info, split, c, 0, mid, c_nritems - mid);
 	copy_extent_buffer(split, c,
 			   btrfs_node_key_ptr_offset(0),
 			   btrfs_node_key_ptr_offset(mid),
@@ -3687,7 +3737,7 @@
 		clean_tree_block(trans, root, right);
 
 	btrfs_item_key(right, &disk_key, 0);
-	fixup_low_keys(trans, root, path, &disk_key, 1);
+	fixup_low_keys(root, path, &disk_key, 1);
 
 	/* then fixup the leaf pointer in the path */
 	if (path->slots[0] < push_items) {
@@ -3953,7 +4003,7 @@
 	}
 
 	if (!path->nodes[1]) {
-		ret = insert_new_root(trans, root, path, 1);
+		ret = insert_new_root(trans, root, path, 1, 1);
 		if (ret)
 			return ret;
 	}
@@ -4047,8 +4097,7 @@
 			path->nodes[0] = right;
 			path->slots[0] = 0;
 			if (path->slots[1] == 0)
-				fixup_low_keys(trans, root, path,
-					       &disk_key, 1);
+				fixup_low_keys(root, path, &disk_key, 1);
 		}
 		btrfs_mark_buffer_dirty(right);
 		return ret;
@@ -4264,7 +4313,7 @@
 		return ret;
 
 	path->slots[0]++;
-	setup_items_for_insert(trans, root, path, new_key, &item_size,
+	setup_items_for_insert(root, path, new_key, &item_size,
 			       item_size, item_size +
 			       sizeof(struct btrfs_item), 1);
 	leaf = path->nodes[0];
@@ -4281,9 +4330,7 @@
  * off the end of the item or if we shift the item to chop bytes off
  * the front.
  */
-void btrfs_truncate_item(struct btrfs_trans_handle *trans,
-			 struct btrfs_root *root,
-			 struct btrfs_path *path,
+void btrfs_truncate_item(struct btrfs_root *root, struct btrfs_path *path,
 			 u32 new_size, int from_end)
 {
 	int slot;
@@ -4367,7 +4414,7 @@
 		btrfs_set_disk_key_offset(&disk_key, offset + size_diff);
 		btrfs_set_item_key(leaf, &disk_key, slot);
 		if (slot == 0)
-			fixup_low_keys(trans, root, path, &disk_key, 1);
+			fixup_low_keys(root, path, &disk_key, 1);
 	}
 
 	item = btrfs_item_nr(leaf, slot);
@@ -4383,8 +4430,7 @@
 /*
  * make the item pointed to by the path bigger, data_size is the new size.
  */
-void btrfs_extend_item(struct btrfs_trans_handle *trans,
-		       struct btrfs_root *root, struct btrfs_path *path,
+void btrfs_extend_item(struct btrfs_root *root, struct btrfs_path *path,
 		       u32 data_size)
 {
 	int slot;
@@ -4454,8 +4500,7 @@
  * to save stack depth by doing the bulk of the work in a function
  * that doesn't call btrfs_search_slot
  */
-void setup_items_for_insert(struct btrfs_trans_handle *trans,
-			    struct btrfs_root *root, struct btrfs_path *path,
+void setup_items_for_insert(struct btrfs_root *root, struct btrfs_path *path,
 			    struct btrfs_key *cpu_key, u32 *data_size,
 			    u32 total_data, u32 total_size, int nr)
 {
@@ -4531,7 +4576,7 @@
 
 	if (slot == 0) {
 		btrfs_cpu_key_to_disk(&disk_key, cpu_key);
-		fixup_low_keys(trans, root, path, &disk_key, 1);
+		fixup_low_keys(root, path, &disk_key, 1);
 	}
 	btrfs_unlock_up_safe(path, 1);
 	btrfs_mark_buffer_dirty(leaf);
@@ -4571,7 +4616,7 @@
 	slot = path->slots[0];
 	BUG_ON(slot < 0);
 
-	setup_items_for_insert(trans, root, path, cpu_key, data_size,
+	setup_items_for_insert(root, path, cpu_key, data_size,
 			       total_data, total_size, nr);
 	return 0;
 }
@@ -4609,8 +4654,8 @@
  * the tree should have been previously balanced so the deletion does not
  * empty a node.
  */
-static void del_ptr(struct btrfs_trans_handle *trans, struct btrfs_root *root,
-		    struct btrfs_path *path, int level, int slot)
+static void del_ptr(struct btrfs_root *root, struct btrfs_path *path,
+		    int level, int slot)
 {
 	struct extent_buffer *parent = path->nodes[level];
 	u32 nritems;
@@ -4642,7 +4687,7 @@
 		struct btrfs_disk_key disk_key;
 
 		btrfs_node_key(parent, &disk_key, 0);
-		fixup_low_keys(trans, root, path, &disk_key, level + 1);
+		fixup_low_keys(root, path, &disk_key, level + 1);
 	}
 	btrfs_mark_buffer_dirty(parent);
 }
@@ -4663,7 +4708,7 @@
 				    struct extent_buffer *leaf)
 {
 	WARN_ON(btrfs_header_generation(leaf) != trans->transid);
-	del_ptr(trans, root, path, 1, path->slots[1]);
+	del_ptr(root, path, 1, path->slots[1]);
 
 	/*
 	 * btrfs_free_extent is expensive, we want to make sure we
@@ -4744,7 +4789,7 @@
 			struct btrfs_disk_key disk_key;
 
 			btrfs_item_key(leaf, &disk_key, 0);
-			fixup_low_keys(trans, root, path, &disk_key, 1);
+			fixup_low_keys(root, path, &disk_key, 1);
 		}
 
 		/* delete the leaf if it is mostly empty */
@@ -5464,139 +5509,6 @@
 	return btrfs_next_old_leaf(root, path, 0);
 }
 
-/* Release the path up to but not including the given level */
-static void btrfs_release_level(struct btrfs_path *path, int level)
-{
-	int i;
-
-	for (i = 0; i < level; i++) {
-		path->slots[i] = 0;
-		if (!path->nodes[i])
-			continue;
-		if (path->locks[i]) {
-			btrfs_tree_unlock_rw(path->nodes[i], path->locks[i]);
-			path->locks[i] = 0;
-		}
-		free_extent_buffer(path->nodes[i]);
-		path->nodes[i] = NULL;
-	}
-}
-
-/*
- * This function assumes 2 things
- *
- * 1) You are using path->keep_locks
- * 2) You are not inserting items.
- *
- * If either of these are not true do not use this function. If you need a next
- * leaf with either of these not being true then this function can be easily
- * adapted to do that, but at the moment these are the limitations.
- */
-int btrfs_next_leaf_write(struct btrfs_trans_handle *trans,
-			  struct btrfs_root *root, struct btrfs_path *path,
-			  int del)
-{
-	struct extent_buffer *b;
-	struct btrfs_key key;
-	u32 nritems;
-	int level = 1;
-	int slot;
-	int ret = 1;
-	int write_lock_level = BTRFS_MAX_LEVEL;
-	int ins_len = del ? -1 : 0;
-
-	WARN_ON(!(path->keep_locks || path->really_keep_locks));
-
-	nritems = btrfs_header_nritems(path->nodes[0]);
-	btrfs_item_key_to_cpu(path->nodes[0], &key, nritems - 1);
-
-	while (path->nodes[level]) {
-		nritems = btrfs_header_nritems(path->nodes[level]);
-		if (!(path->locks[level] & BTRFS_WRITE_LOCK)) {
-search:
-			btrfs_release_path(path);
-			ret = btrfs_search_slot(trans, root, &key, path,
-						ins_len, 1);
-			if (ret < 0)
-				goto out;
-			level = 1;
-			continue;
-		}
-
-		if (path->slots[level] >= nritems - 1) {
-			level++;
-			continue;
-		}
-
-		btrfs_release_level(path, level);
-		break;
-	}
-
-	if (!path->nodes[level]) {
-		ret = 1;
-		goto out;
-	}
-
-	path->slots[level]++;
-	b = path->nodes[level];
-
-	while (b) {
-		level = btrfs_header_level(b);
-
-		if (!should_cow_block(trans, root, b))
-			goto cow_done;
-
-		btrfs_set_path_blocking(path);
-		ret = btrfs_cow_block(trans, root, b,
-				      path->nodes[level + 1],
-				      path->slots[level + 1], &b);
-		if (ret)
-			goto out;
-cow_done:
-		path->nodes[level] = b;
-		btrfs_clear_path_blocking(path, NULL, 0);
-		if (level != 0) {
-			ret = setup_nodes_for_search(trans, root, path, b,
-						     level, ins_len,
-						     &write_lock_level);
-			if (ret == -EAGAIN)
-				goto search;
-			if (ret)
-				goto out;
-
-			b = path->nodes[level];
-			slot = path->slots[level];
-
-			ret = read_block_for_search(trans, root, path,
-						    &b, level, slot, &key, 0);
-			if (ret == -EAGAIN)
-				goto search;
-			if (ret)
-				goto out;
-			level = btrfs_header_level(b);
-			if (!btrfs_try_tree_write_lock(b)) {
-				btrfs_set_path_blocking(path);
-				btrfs_tree_lock(b);
-				btrfs_clear_path_blocking(path, b,
-							  BTRFS_WRITE_LOCK);
-			}
-			path->locks[level] = BTRFS_WRITE_LOCK;
-			path->nodes[level] = b;
-			path->slots[level] = 0;
-		} else {
-			path->slots[level] = 0;
-			ret = 0;
-			break;
-		}
-	}
-
-out:
-	if (ret)
-		btrfs_release_path(path);
-
-	return ret;
-}
-
 int btrfs_next_old_leaf(struct btrfs_root *root, struct btrfs_path *path,
 			u64 time_seq)
 {
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 0d82922..63c328a 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -340,6 +340,7 @@
  */
 #define BTRFS_FS_STATE_ERROR		0
 #define BTRFS_FS_STATE_REMOUNTING	1
+#define BTRFS_FS_STATE_TRANS_ABORTED	2
 
 /* Super block flags */
 /* Errors detected */
@@ -508,6 +509,7 @@
 
 #define BTRFS_FEATURE_INCOMPAT_EXTENDED_IREF	(1ULL << 6)
 #define BTRFS_FEATURE_INCOMPAT_RAID56		(1ULL << 7)
+#define BTRFS_FEATURE_INCOMPAT_SKINNY_METADATA	(1ULL << 8)
 
 #define BTRFS_FEATURE_COMPAT_SUPP		0ULL
 #define BTRFS_FEATURE_COMPAT_RO_SUPP		0ULL
@@ -518,7 +520,8 @@
 	 BTRFS_FEATURE_INCOMPAT_BIG_METADATA |		\
 	 BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO |		\
 	 BTRFS_FEATURE_INCOMPAT_RAID56 |		\
-	 BTRFS_FEATURE_INCOMPAT_EXTENDED_IREF)
+	 BTRFS_FEATURE_INCOMPAT_EXTENDED_IREF |		\
+	 BTRFS_FEATURE_INCOMPAT_SKINNY_METADATA)
 
 /*
  * A leaf is full of items. offset and size tell us where to find
@@ -583,7 +586,6 @@
 	unsigned int skip_locking:1;
 	unsigned int leave_spinning:1;
 	unsigned int search_commit_root:1;
-	unsigned int really_keep_locks:1;
 };
 
 /*
@@ -1019,9 +1021,9 @@
  */
 #define BTRFS_QGROUP_STATUS_FLAG_ON		(1ULL << 0)
 /*
- * SCANNING is set during the initialization phase
+ * RESCAN is set during the initialization phase
  */
-#define BTRFS_QGROUP_STATUS_FLAG_SCANNING	(1ULL << 1)
+#define BTRFS_QGROUP_STATUS_FLAG_RESCAN		(1ULL << 1)
 /*
  * Some qgroup entries are known to be out of date,
  * either because the configuration has changed in a way that
@@ -1050,7 +1052,7 @@
 	 * only used during scanning to record the progress
 	 * of the scan. It contains a logical address
 	 */
-	__le64 scan;
+	__le64 rescan;
 } __attribute__ ((__packed__));
 
 struct btrfs_qgroup_info_item {
@@ -1360,6 +1362,17 @@
 	wait_queue_head_t transaction_blocked_wait;
 	wait_queue_head_t async_submit_wait;
 
+	/*
+	 * Used to protect the incompat_flags, compat_flags, compat_ro_flags
+	 * when they are updated.
+	 *
+	 * Because we do not clear the flags for ever, so we needn't use
+	 * the lock on the read side.
+	 *
+	 * We also needn't use the lock when we mount the fs, because
+	 * there is no other task which will update the flag.
+	 */
+	spinlock_t super_lock;
 	struct btrfs_super_block *super_copy;
 	struct btrfs_super_block *super_for_commit;
 	struct block_device *__bdev;
@@ -1409,7 +1422,7 @@
 
 	/* this protects tree_mod_seq_list */
 	spinlock_t tree_mod_seq_lock;
-	atomic_t tree_mod_seq;
+	atomic64_t tree_mod_seq;
 	struct list_head tree_mod_seq_list;
 	struct seq_list tree_mod_seq_elem;
 
@@ -1581,12 +1594,20 @@
 	struct rb_root qgroup_tree;
 	spinlock_t qgroup_lock;
 
+	/* protect user change for quota operations */
+	struct mutex qgroup_ioctl_lock;
+
 	/* list of dirty qgroups to be written at next commit */
 	struct list_head dirty_qgroups;
 
 	/* used by btrfs_qgroup_record_ref for an efficient tree traversal */
 	u64 qgroup_seq;
 
+	/* qgroup rescan items */
+	struct mutex qgroup_rescan_lock; /* protects the progress item */
+	struct btrfs_key qgroup_rescan_progress;
+	struct btrfs_workers qgroup_rescan_workers;
+
 	/* filesystem state */
 	unsigned long fs_state;
 
@@ -1808,6 +1829,12 @@
  */
 #define BTRFS_EXTENT_ITEM_KEY	168
 
+/*
+ * The same as the BTRFS_EXTENT_ITEM_KEY, except it's metadata we already know
+ * the length, so we save the level in key->offset instead of the length.
+ */
+#define BTRFS_METADATA_ITEM_KEY	169
+
 #define BTRFS_TREE_BLOCK_REF_KEY	176
 
 #define BTRFS_EXTENT_DATA_REF_KEY	178
@@ -2766,8 +2793,10 @@
 
 static inline int btrfs_super_csum_size(struct btrfs_super_block *s)
 {
-	int t = btrfs_super_csum_type(s);
-	BUG_ON(t >= ARRAY_SIZE(btrfs_csum_sizes));
+	u16 t = btrfs_super_csum_type(s);
+	/*
+	 * csum type is validated at mount time
+	 */
 	return btrfs_csum_sizes[t];
 }
 
@@ -2864,8 +2893,8 @@
 		   version, 64);
 BTRFS_SETGET_FUNCS(qgroup_status_flags, struct btrfs_qgroup_status_item,
 		   flags, 64);
-BTRFS_SETGET_FUNCS(qgroup_status_scan, struct btrfs_qgroup_status_item,
-		   scan, 64);
+BTRFS_SETGET_FUNCS(qgroup_status_rescan, struct btrfs_qgroup_status_item,
+		   rescan, 64);
 
 /* btrfs_qgroup_info_item */
 BTRFS_SETGET_FUNCS(qgroup_info_generation, struct btrfs_qgroup_info_item,
@@ -3005,7 +3034,7 @@
 int btrfs_lookup_extent(struct btrfs_root *root, u64 start, u64 len);
 int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans,
 			     struct btrfs_root *root, u64 bytenr,
-			     u64 num_bytes, u64 *refs, u64 *flags);
+			     u64 offset, int metadata, u64 *refs, u64 *flags);
 int btrfs_pin_extent(struct btrfs_root *root,
 		     u64 bytenr, u64 num, int reserved);
 int btrfs_pin_extent_for_log_replay(struct btrfs_root *root,
@@ -3017,8 +3046,6 @@
 						 struct btrfs_fs_info *info,
 						 u64 bytenr);
 void btrfs_put_block_group(struct btrfs_block_group_cache *cache);
-u64 btrfs_find_block_group(struct btrfs_root *root,
-			   u64 search_start, u64 search_hint, int owner);
 struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans,
 					struct btrfs_root *root, u32 blocksize,
 					u64 parent, u64 root_objectid,
@@ -3028,10 +3055,6 @@
 			   struct btrfs_root *root,
 			   struct extent_buffer *buf,
 			   u64 parent, int last_ref);
-struct extent_buffer *btrfs_init_new_buffer(struct btrfs_trans_handle *trans,
-					    struct btrfs_root *root,
-					    u64 bytenr, u32 blocksize,
-					    int level);
 int btrfs_alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
 				     struct btrfs_root *root,
 				     u64 root_objectid, u64 owner,
@@ -3044,7 +3067,7 @@
 				  struct btrfs_root *root,
 				  u64 num_bytes, u64 min_alloc_size,
 				  u64 empty_size, u64 hint_byte,
-				  struct btrfs_key *ins, u64 data);
+				  struct btrfs_key *ins, int is_data);
 int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
 		  struct extent_buffer *buf, int full_backref, int for_cow);
 int btrfs_dec_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
@@ -3084,7 +3107,6 @@
 			     struct btrfs_root *root, u64 group_start);
 void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans,
 				       struct btrfs_root *root);
-u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags);
 u64 btrfs_get_alloc_profile(struct btrfs_root *root, int data);
 void btrfs_clear_space_info_full(struct btrfs_fs_info *info);
 
@@ -3161,8 +3183,7 @@
 int btrfs_previous_item(struct btrfs_root *root,
 			struct btrfs_path *path, u64 min_objectid,
 			int type);
-void btrfs_set_item_key_safe(struct btrfs_trans_handle *trans,
-			     struct btrfs_root *root, struct btrfs_path *path,
+void btrfs_set_item_key_safe(struct btrfs_root *root, struct btrfs_path *path,
 			     struct btrfs_key *new_key);
 struct extent_buffer *btrfs_root_node(struct btrfs_root *root);
 struct extent_buffer *btrfs_lock_root_node(struct btrfs_root *root);
@@ -3198,12 +3219,9 @@
 		      struct extent_buffer **cow_ret, u64 new_root_objectid);
 int btrfs_block_can_be_shared(struct btrfs_root *root,
 			      struct extent_buffer *buf);
-void btrfs_extend_item(struct btrfs_trans_handle *trans,
-		       struct btrfs_root *root, struct btrfs_path *path,
+void btrfs_extend_item(struct btrfs_root *root, struct btrfs_path *path,
 		       u32 data_size);
-void btrfs_truncate_item(struct btrfs_trans_handle *trans,
-			 struct btrfs_root *root,
-			 struct btrfs_path *path,
+void btrfs_truncate_item(struct btrfs_root *root, struct btrfs_path *path,
 			 u32 new_size, int from_end);
 int btrfs_split_item(struct btrfs_trans_handle *trans,
 		     struct btrfs_root *root,
@@ -3243,8 +3261,7 @@
 	return btrfs_del_items(trans, root, path, path->slots[0], 1);
 }
 
-void setup_items_for_insert(struct btrfs_trans_handle *trans,
-			    struct btrfs_root *root, struct btrfs_path *path,
+void setup_items_for_insert(struct btrfs_root *root, struct btrfs_path *path,
 			    struct btrfs_key *cpu_key, u32 *data_size,
 			    u32 total_data, u32 total_size, int nr);
 int btrfs_insert_item(struct btrfs_trans_handle *trans, struct btrfs_root
@@ -3264,9 +3281,6 @@
 }
 
 int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path);
-int btrfs_next_leaf_write(struct btrfs_trans_handle *trans,
-			  struct btrfs_root *root, struct btrfs_path *path,
-			  int del);
 int btrfs_next_old_leaf(struct btrfs_root *root, struct btrfs_path *path,
 			u64 time_seq);
 static inline int btrfs_next_old_item(struct btrfs_root *root,
@@ -3281,7 +3295,6 @@
 {
 	return btrfs_next_old_item(root, p, 0);
 }
-int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path);
 int btrfs_leaf_free_space(struct btrfs_root *root, struct extent_buffer *leaf);
 int __must_check btrfs_drop_snapshot(struct btrfs_root *root,
 				     struct btrfs_block_rsv *block_rsv,
@@ -3318,10 +3331,7 @@
 			   struct seq_list *elem);
 void btrfs_put_tree_mod_seq(struct btrfs_fs_info *fs_info,
 			    struct seq_list *elem);
-static inline u64 btrfs_inc_tree_mod_seq(struct btrfs_fs_info *fs_info)
-{
-	return atomic_inc_return(&fs_info->tree_mod_seq);
-}
+u64 btrfs_tree_mod_seq_prev(u64 seq);
 int btrfs_old_root_level(struct btrfs_root *root, u64 time_seq);
 
 /* root-item.c */
@@ -3345,9 +3355,8 @@
 				   struct btrfs_root *root,
 				   struct btrfs_key *key,
 				   struct btrfs_root_item *item);
-void btrfs_read_root_item(struct btrfs_root *root,
-			 struct extent_buffer *eb, int slot,
-			 struct btrfs_root_item *item);
+void btrfs_read_root_item(struct extent_buffer *eb, int slot,
+			  struct btrfs_root_item *item);
 int btrfs_find_last_root(struct btrfs_root *root, u64 objectid, struct
 			 btrfs_root_item *item, struct btrfs_key *key);
 int btrfs_find_dead_roots(struct btrfs_root *root, u64 objectid);
@@ -3380,9 +3389,6 @@
 btrfs_search_dir_index_item(struct btrfs_root *root,
 			    struct btrfs_path *path, u64 dirid,
 			    const char *name, int name_len);
-struct btrfs_dir_item *btrfs_match_dir_item_name(struct btrfs_root *root,
-			      struct btrfs_path *path,
-			      const char *name, int name_len);
 int btrfs_delete_one_dir_name(struct btrfs_trans_handle *trans,
 			      struct btrfs_root *root,
 			      struct btrfs_path *path,
@@ -3460,16 +3466,11 @@
 			     struct btrfs_root *root,
 			     struct btrfs_path *path, u64 objectid,
 			     u64 bytenr, int mod);
-u64 btrfs_file_extent_length(struct btrfs_path *path);
 int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans,
 			   struct btrfs_root *root,
 			   struct btrfs_ordered_sum *sums);
 int btrfs_csum_one_bio(struct btrfs_root *root, struct inode *inode,
 		       struct bio *bio, u64 file_start, int contig);
-struct btrfs_csum_item *btrfs_lookup_csum(struct btrfs_trans_handle *trans,
-					  struct btrfs_root *root,
-					  struct btrfs_path *path,
-					  u64 bytenr, int cow);
 int btrfs_csum_truncate(struct btrfs_trans_handle *trans,
 			struct btrfs_root *root, struct btrfs_path *path,
 			u64 isize);
@@ -3531,8 +3532,6 @@
 int btrfs_start_delalloc_inodes(struct btrfs_root *root, int delay_iput);
 int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end,
 			      struct extent_state **cached_state);
-int btrfs_writepages(struct address_space *mapping,
-		     struct writeback_control *wbc);
 int btrfs_create_subvol_root(struct btrfs_trans_handle *trans,
 			     struct btrfs_root *new_root, u64 new_dirid);
 int btrfs_merge_bio_hook(int rw, struct page *page, unsigned long offset,
@@ -3542,7 +3541,6 @@
 int btrfs_readpage(struct file *file, struct page *page);
 void btrfs_evict_inode(struct inode *inode);
 int btrfs_write_inode(struct inode *inode, struct writeback_control *wbc);
-int btrfs_dirty_inode(struct inode *inode);
 struct inode *btrfs_alloc_inode(struct super_block *sb);
 void btrfs_destroy_inode(struct inode *inode);
 int btrfs_drop_inode(struct inode *inode);
@@ -3560,7 +3558,6 @@
 int btrfs_update_inode_fallback(struct btrfs_trans_handle *trans,
 				struct btrfs_root *root, struct inode *inode);
 int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode);
-int btrfs_orphan_del(struct btrfs_trans_handle *trans, struct inode *inode);
 int btrfs_orphan_cleanup(struct btrfs_root *root);
 void btrfs_orphan_commit_root(struct btrfs_trans_handle *trans,
 			      struct btrfs_root *root);
@@ -3611,7 +3608,6 @@
 int btrfs_mark_extent_written(struct btrfs_trans_handle *trans,
 			      struct inode *inode, u64 start, u64 end);
 int btrfs_release_file(struct inode *inode, struct file *file);
-void btrfs_drop_pages(struct page **pages, size_t num_pages);
 int btrfs_dirty_pages(struct btrfs_root *root, struct inode *inode,
 		      struct page **pages, size_t num_pages,
 		      loff_t pos, size_t write_bytes,
@@ -3634,14 +3630,31 @@
 
 #ifdef CONFIG_PRINTK
 __printf(2, 3)
-void btrfs_printk(struct btrfs_fs_info *fs_info, const char *fmt, ...);
+void btrfs_printk(const struct btrfs_fs_info *fs_info, const char *fmt, ...);
 #else
 static inline __printf(2, 3)
-void btrfs_printk(struct btrfs_fs_info *fs_info, const char *fmt, ...)
+void btrfs_printk(const struct btrfs_fs_info *fs_info, const char *fmt, ...)
 {
 }
 #endif
 
+#define btrfs_emerg(fs_info, fmt, args...) \
+	btrfs_printk(fs_info, KERN_EMERG fmt, ##args)
+#define btrfs_alert(fs_info, fmt, args...) \
+	btrfs_printk(fs_info, KERN_ALERT fmt, ##args)
+#define btrfs_crit(fs_info, fmt, args...) \
+	btrfs_printk(fs_info, KERN_CRIT fmt, ##args)
+#define btrfs_err(fs_info, fmt, args...) \
+	btrfs_printk(fs_info, KERN_ERR fmt, ##args)
+#define btrfs_warn(fs_info, fmt, args...) \
+	btrfs_printk(fs_info, KERN_WARNING fmt, ##args)
+#define btrfs_notice(fs_info, fmt, args...) \
+	btrfs_printk(fs_info, KERN_NOTICE fmt, ##args)
+#define btrfs_info(fs_info, fmt, args...) \
+	btrfs_printk(fs_info, KERN_INFO fmt, ##args)
+#define btrfs_debug(fs_info, fmt, args...) \
+	btrfs_printk(fs_info, KERN_DEBUG fmt, ##args)
+
 __printf(5, 6)
 void __btrfs_std_error(struct btrfs_fs_info *fs_info, const char *function,
 		     unsigned int line, int errno, const char *fmt, ...);
@@ -3663,11 +3676,28 @@
 	disk_super = fs_info->super_copy;
 	features = btrfs_super_incompat_flags(disk_super);
 	if (!(features & flag)) {
-		features |= flag;
-		btrfs_set_super_incompat_flags(disk_super, features);
+		spin_lock(&fs_info->super_lock);
+		features = btrfs_super_incompat_flags(disk_super);
+		if (!(features & flag)) {
+			features |= flag;
+			btrfs_set_super_incompat_flags(disk_super, features);
+			printk(KERN_INFO "btrfs: setting %llu feature flag\n",
+					 flag);
+		}
+		spin_unlock(&fs_info->super_lock);
 	}
 }
 
+#define btrfs_fs_incompat(fs_info, opt) \
+	__btrfs_fs_incompat((fs_info), BTRFS_FEATURE_INCOMPAT_##opt)
+
+static inline int __btrfs_fs_incompat(struct btrfs_fs_info *fs_info, u64 flag)
+{
+	struct btrfs_super_block *disk_super;
+	disk_super = fs_info->super_copy;
+	return !!(btrfs_super_incompat_flags(disk_super) & flag);
+}
+
 /*
  * Call btrfs_abort_transaction as early as possible when an error condition is
  * detected, that way the exact line number is reported.
@@ -3753,7 +3783,6 @@
 int btrfs_scrub_cancel(struct btrfs_fs_info *info);
 int btrfs_scrub_cancel_dev(struct btrfs_fs_info *info,
 			   struct btrfs_device *dev);
-int btrfs_scrub_cancel_devid(struct btrfs_root *root, u64 devid);
 int btrfs_scrub_progress(struct btrfs_root *root, u64 devid,
 			 struct btrfs_scrub_progress *progress);
 
@@ -3784,7 +3813,7 @@
 		       struct btrfs_fs_info *fs_info);
 int btrfs_quota_disable(struct btrfs_trans_handle *trans,
 			struct btrfs_fs_info *fs_info);
-int btrfs_quota_rescan(struct btrfs_fs_info *fs_info);
+int btrfs_qgroup_rescan(struct btrfs_fs_info *fs_info);
 int btrfs_add_qgroup_relation(struct btrfs_trans_handle *trans,
 			      struct btrfs_fs_info *fs_info, u64 src, u64 dst);
 int btrfs_del_qgroup_relation(struct btrfs_trans_handle *trans,
diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
index 14fce27..f26f38c 100644
--- a/fs/btrfs/delayed-inode.c
+++ b/fs/btrfs/delayed-inode.c
@@ -202,7 +202,7 @@
 	spin_unlock(&root->lock);
 }
 
-struct btrfs_delayed_node *btrfs_first_delayed_node(
+static struct btrfs_delayed_node *btrfs_first_delayed_node(
 			struct btrfs_delayed_root *delayed_root)
 {
 	struct list_head *p;
@@ -221,7 +221,7 @@
 	return node;
 }
 
-struct btrfs_delayed_node *btrfs_next_delayed_node(
+static struct btrfs_delayed_node *btrfs_next_delayed_node(
 						struct btrfs_delayed_node *node)
 {
 	struct btrfs_delayed_root *delayed_root;
@@ -282,7 +282,7 @@
 	__btrfs_release_delayed_node(node, 0);
 }
 
-struct btrfs_delayed_node *btrfs_first_prepared_delayed_node(
+static struct btrfs_delayed_node *btrfs_first_prepared_delayed_node(
 					struct btrfs_delayed_root *delayed_root)
 {
 	struct list_head *p;
@@ -308,7 +308,7 @@
 	__btrfs_release_delayed_node(node, 1);
 }
 
-struct btrfs_delayed_item *btrfs_alloc_delayed_item(u32 data_len)
+static struct btrfs_delayed_item *btrfs_alloc_delayed_item(u32 data_len)
 {
 	struct btrfs_delayed_item *item;
 	item = kmalloc(sizeof(*item) + data_len, GFP_NOFS);
@@ -383,7 +383,7 @@
 	return NULL;
 }
 
-struct btrfs_delayed_item *__btrfs_lookup_delayed_insertion_item(
+static struct btrfs_delayed_item *__btrfs_lookup_delayed_insertion_item(
 					struct btrfs_delayed_node *delayed_node,
 					struct btrfs_key *key)
 {
@@ -394,45 +394,6 @@
 	return item;
 }
 
-struct btrfs_delayed_item *__btrfs_lookup_delayed_deletion_item(
-					struct btrfs_delayed_node *delayed_node,
-					struct btrfs_key *key)
-{
-	struct btrfs_delayed_item *item;
-
-	item = __btrfs_lookup_delayed_item(&delayed_node->del_root, key,
-					   NULL, NULL);
-	return item;
-}
-
-struct btrfs_delayed_item *__btrfs_search_delayed_insertion_item(
-					struct btrfs_delayed_node *delayed_node,
-					struct btrfs_key *key)
-{
-	struct btrfs_delayed_item *item, *next;
-
-	item = __btrfs_lookup_delayed_item(&delayed_node->ins_root, key,
-					   NULL, &next);
-	if (!item)
-		item = next;
-
-	return item;
-}
-
-struct btrfs_delayed_item *__btrfs_search_delayed_deletion_item(
-					struct btrfs_delayed_node *delayed_node,
-					struct btrfs_key *key)
-{
-	struct btrfs_delayed_item *item, *next;
-
-	item = __btrfs_lookup_delayed_item(&delayed_node->del_root, key,
-					   NULL, &next);
-	if (!item)
-		item = next;
-
-	return item;
-}
-
 static int __btrfs_add_delayed_item(struct btrfs_delayed_node *delayed_node,
 				    struct btrfs_delayed_item *ins,
 				    int action)
@@ -535,7 +496,7 @@
 	}
 }
 
-struct btrfs_delayed_item *__btrfs_first_delayed_insertion_item(
+static struct btrfs_delayed_item *__btrfs_first_delayed_insertion_item(
 					struct btrfs_delayed_node *delayed_node)
 {
 	struct rb_node *p;
@@ -548,7 +509,7 @@
 	return item;
 }
 
-struct btrfs_delayed_item *__btrfs_first_delayed_deletion_item(
+static struct btrfs_delayed_item *__btrfs_first_delayed_deletion_item(
 					struct btrfs_delayed_node *delayed_node)
 {
 	struct rb_node *p;
@@ -561,7 +522,7 @@
 	return item;
 }
 
-struct btrfs_delayed_item *__btrfs_next_delayed_item(
+static struct btrfs_delayed_item *__btrfs_next_delayed_item(
 						struct btrfs_delayed_item *item)
 {
 	struct rb_node *p;
@@ -766,10 +727,9 @@
  * This helper will insert some continuous items into the same leaf according
  * to the free space of the leaf.
  */
-static int btrfs_batch_insert_items(struct btrfs_trans_handle *trans,
-				struct btrfs_root *root,
-				struct btrfs_path *path,
-				struct btrfs_delayed_item *item)
+static int btrfs_batch_insert_items(struct btrfs_root *root,
+				    struct btrfs_path *path,
+				    struct btrfs_delayed_item *item)
 {
 	struct btrfs_delayed_item *curr, *next;
 	int free_space;
@@ -848,7 +808,7 @@
 	btrfs_clear_path_blocking(path, NULL, 0);
 
 	/* insert the keys of the items */
-	setup_items_for_insert(trans, root, path, keys, data_size,
+	setup_items_for_insert(root, path, keys, data_size,
 			       total_data_size, total_size, nitems);
 
 	/* insert the dir index items */
@@ -932,7 +892,7 @@
 	if (curr && btrfs_is_continuous_delayed_item(prev, curr)) {
 		/* insert the continuous items into the same leaf */
 		path->slots[0]++;
-		btrfs_batch_insert_items(trans, root, path, curr);
+		btrfs_batch_insert_items(root, path, curr);
 	}
 	btrfs_release_delayed_item(prev);
 	btrfs_mark_buffer_dirty(path->nodes[0]);
diff --git a/fs/btrfs/delayed-ref.c b/fs/btrfs/delayed-ref.c
index b7a0641..c219463 100644
--- a/fs/btrfs/delayed-ref.c
+++ b/fs/btrfs/delayed-ref.c
@@ -40,16 +40,19 @@
  * compare two delayed tree backrefs with same bytenr and type
  */
 static int comp_tree_refs(struct btrfs_delayed_tree_ref *ref2,
-			  struct btrfs_delayed_tree_ref *ref1)
+			  struct btrfs_delayed_tree_ref *ref1, int type)
 {
-	if (ref1->root < ref2->root)
-		return -1;
-	if (ref1->root > ref2->root)
-		return 1;
-	if (ref1->parent < ref2->parent)
-		return -1;
-	if (ref1->parent > ref2->parent)
-		return 1;
+	if (type == BTRFS_TREE_BLOCK_REF_KEY) {
+		if (ref1->root < ref2->root)
+			return -1;
+		if (ref1->root > ref2->root)
+			return 1;
+	} else {
+		if (ref1->parent < ref2->parent)
+			return -1;
+		if (ref1->parent > ref2->parent)
+			return 1;
+	}
 	return 0;
 }
 
@@ -113,7 +116,8 @@
 	if (ref1->type == BTRFS_TREE_BLOCK_REF_KEY ||
 	    ref1->type == BTRFS_SHARED_BLOCK_REF_KEY) {
 		return comp_tree_refs(btrfs_delayed_node_to_tree_ref(ref2),
-				      btrfs_delayed_node_to_tree_ref(ref1));
+				      btrfs_delayed_node_to_tree_ref(ref1),
+				      ref1->type);
 	} else if (ref1->type == BTRFS_EXTENT_DATA_REF_KEY ||
 		   ref1->type == BTRFS_SHARED_DATA_REF_KEY) {
 		return comp_data_refs(btrfs_delayed_node_to_data_ref(ref2),
@@ -357,8 +361,10 @@
 		elem = list_first_entry(&fs_info->tree_mod_seq_list,
 					struct seq_list, list);
 		if (seq >= elem->seq) {
-			pr_debug("holding back delayed_ref %llu, lowest is "
-				 "%llu (%p)\n", seq, elem->seq, delayed_refs);
+			pr_debug("holding back delayed_ref %#x.%x, lowest is %#x.%x (%p)\n",
+				 (u32)(seq >> 32), (u32)seq,
+				 (u32)(elem->seq >> 32), (u32)elem->seq,
+				 delayed_refs);
 			ret = 1;
 		}
 	}
diff --git a/fs/btrfs/dir-item.c b/fs/btrfs/dir-item.c
index 502c215..79e594e 100644
--- a/fs/btrfs/dir-item.c
+++ b/fs/btrfs/dir-item.c
@@ -21,6 +21,10 @@
 #include "hash.h"
 #include "transaction.h"
 
+static struct btrfs_dir_item *btrfs_match_dir_item_name(struct btrfs_root *root,
+			      struct btrfs_path *path,
+			      const char *name, int name_len);
+
 /*
  * insert a name into a directory, doing overflow properly if there is a hash
  * collision.  data_size indicates how big the item inserted should be.  On
@@ -49,7 +53,7 @@
 		di = btrfs_match_dir_item_name(root, path, name, name_len);
 		if (di)
 			return ERR_PTR(-EEXIST);
-		btrfs_extend_item(trans, root, path, data_size);
+		btrfs_extend_item(root, path, data_size);
 	} else if (ret < 0)
 		return ERR_PTR(ret);
 	WARN_ON(ret > 0);
@@ -379,7 +383,7 @@
  * this walks through all the entries in a dir item and finds one
  * for a specific name.
  */
-struct btrfs_dir_item *btrfs_match_dir_item_name(struct btrfs_root *root,
+static struct btrfs_dir_item *btrfs_match_dir_item_name(struct btrfs_root *root,
 			      struct btrfs_path *path,
 			      const char *name, int name_len)
 {
@@ -442,8 +446,7 @@
 		start = btrfs_item_ptr_offset(leaf, path->slots[0]);
 		memmove_extent_buffer(leaf, ptr, ptr + sub_item_len,
 			item_len - (ptr + sub_item_len - start));
-		btrfs_truncate_item(trans, root, path,
-				    item_len - sub_item_len, 1);
+		btrfs_truncate_item(root, path, item_len - sub_item_len, 1);
 	}
 	return ret;
 }
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 6d19a0a..4e9ebe1 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -30,6 +30,7 @@
 #include <linux/slab.h>
 #include <linux/migrate.h>
 #include <linux/ratelimit.h>
+#include <linux/uuid.h>
 #include <asm/unaligned.h>
 #include "compat.h"
 #include "ctree.h"
@@ -69,6 +70,8 @@
 					int mark);
 static int btrfs_destroy_pinned_extent(struct btrfs_root *root,
 				       struct extent_io_tree *pinned_extents);
+static int btrfs_cleanup_transaction(struct btrfs_root *root);
+static void btrfs_error_commit_super(struct btrfs_root *root);
 
 /*
  * end_io_wq structs are used to do processing in task context when an IO is
@@ -222,7 +225,7 @@
 	em->bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev;
 
 	write_lock(&em_tree->lock);
-	ret = add_extent_mapping(em_tree, em);
+	ret = add_extent_mapping(em_tree, em, 0);
 	if (ret == -EEXIST) {
 		free_extent_map(em);
 		em = lookup_extent_mapping(em_tree, start, len);
@@ -238,7 +241,7 @@
 	return em;
 }
 
-u32 btrfs_csum_data(struct btrfs_root *root, char *data, u32 seed, size_t len)
+u32 btrfs_csum_data(char *data, u32 seed, size_t len)
 {
 	return crc32c(seed, data, len);
 }
@@ -274,7 +277,7 @@
 		if (err)
 			return 1;
 		cur_len = min(len, map_len - (offset - map_start));
-		crc = btrfs_csum_data(root, kaddr + offset - map_start,
+		crc = btrfs_csum_data(kaddr + offset - map_start,
 				      crc, cur_len);
 		len -= cur_len;
 		offset += cur_len;
@@ -354,6 +357,49 @@
 }
 
 /*
+ * Return 0 if the superblock checksum type matches the checksum value of that
+ * algorithm. Pass the raw disk superblock data.
+ */
+static int btrfs_check_super_csum(char *raw_disk_sb)
+{
+	struct btrfs_super_block *disk_sb =
+		(struct btrfs_super_block *)raw_disk_sb;
+	u16 csum_type = btrfs_super_csum_type(disk_sb);
+	int ret = 0;
+
+	if (csum_type == BTRFS_CSUM_TYPE_CRC32) {
+		u32 crc = ~(u32)0;
+		const int csum_size = sizeof(crc);
+		char result[csum_size];
+
+		/*
+		 * The super_block structure does not span the whole
+		 * BTRFS_SUPER_INFO_SIZE range, we expect that the unused space
+		 * is filled with zeros and is included in the checkum.
+		 */
+		crc = btrfs_csum_data(raw_disk_sb + BTRFS_CSUM_SIZE,
+				crc, BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE);
+		btrfs_csum_final(crc, result);
+
+		if (memcmp(raw_disk_sb, result, csum_size))
+			ret = 1;
+
+		if (ret && btrfs_super_generation(disk_sb) < 10) {
+			printk(KERN_WARNING "btrfs: super block crcs don't match, older mkfs detected\n");
+			ret = 0;
+		}
+	}
+
+	if (csum_type >= ARRAY_SIZE(btrfs_csum_sizes)) {
+		printk(KERN_ERR "btrfs: unsupported checksum algorithm %u\n",
+				csum_type);
+		ret = 1;
+	}
+
+	return ret;
+}
+
+/*
  * helper to read a given tree block, doing retries as required when
  * the checksums don't match and we have alternate mirrors to try.
  */
@@ -530,41 +576,6 @@
 	return 0;
 }
 
-struct extent_buffer *find_eb_for_page(struct extent_io_tree *tree,
-				       struct page *page, int max_walk)
-{
-	struct extent_buffer *eb;
-	u64 start = page_offset(page);
-	u64 target = start;
-	u64 min_start;
-
-	if (start < max_walk)
-		min_start = 0;
-	else
-		min_start = start - max_walk;
-
-	while (start >= min_start) {
-		eb = find_extent_buffer(tree, start, 0);
-		if (eb) {
-			/*
-			 * we found an extent buffer and it contains our page
-			 * horray!
-			 */
-			if (eb->start <= target &&
-			    eb->start + eb->len > target)
-				return eb;
-
-			/* we found an extent buffer that wasn't for us */
-			free_extent_buffer(eb);
-			return NULL;
-		}
-		if (start == 0)
-			break;
-		start -= PAGE_CACHE_SIZE;
-	}
-	return NULL;
-}
-
 static int btree_readpage_end_io_hook(struct page *page, u64 start, u64 end,
 			       struct extent_state *state, int mirror)
 {
@@ -613,6 +624,12 @@
 		goto err;
 	}
 	found_level = btrfs_header_level(eb);
+	if (found_level >= BTRFS_MAX_LEVEL) {
+		btrfs_info(root->fs_info, "bad tree block level %d\n",
+			   (int)btrfs_header_level(eb));
+		ret = -EIO;
+		goto err;
+	}
 
 	btrfs_set_buffer_lockdep_class(btrfs_header_owner(eb),
 				       eb, found_level);
@@ -636,10 +653,9 @@
 	if (!ret)
 		set_extent_buffer_uptodate(eb);
 err:
-	if (test_bit(EXTENT_BUFFER_READAHEAD, &eb->bflags)) {
-		clear_bit(EXTENT_BUFFER_READAHEAD, &eb->bflags);
+	if (reads_done &&
+	    test_and_clear_bit(EXTENT_BUFFER_READAHEAD, &eb->bflags))
 		btree_readahead_hook(root, eb, eb->start, ret);
-	}
 
 	if (ret) {
 		/*
@@ -993,14 +1009,8 @@
 {
 	if (PageWriteback(page) || PageDirty(page))
 		return 0;
-	/*
-	 * We need to mask out eg. __GFP_HIGHMEM and __GFP_DMA32 as we're doing
-	 * slab allocation from alloc_extent_state down the callchain where
-	 * it'd hit a BUG_ON as those flags are not allowed.
-	 */
-	gfp_flags &= ~GFP_SLAB_BUG_MASK;
 
-	return try_release_extent_buffer(page, gfp_flags);
+	return try_release_extent_buffer(page);
 }
 
 static void btree_invalidatepage(struct page *page, unsigned long offset)
@@ -1275,6 +1285,7 @@
 	struct btrfs_key key;
 	int ret = 0;
 	u64 bytenr;
+	uuid_le uuid;
 
 	root = btrfs_alloc_root(fs_info);
 	if (!root)
@@ -1324,6 +1335,8 @@
 	btrfs_set_root_used(&root->root_item, leaf->len);
 	btrfs_set_root_last_snapshot(&root->root_item, 0);
 	btrfs_set_root_dirid(&root->root_item, 0);
+	uuid_le_gen(&uuid);
+	memcpy(root->root_item.uuid, uuid.b, BTRFS_UUID_SIZE);
 	root->root_item.drop_level = 0;
 
 	key.objectid = objectid;
@@ -1476,7 +1489,7 @@
 	if (ret == 0) {
 		l = path->nodes[0];
 		slot = path->slots[0];
-		btrfs_read_root_item(tree_root, l, slot, &root->root_item);
+		btrfs_read_root_item(l, slot, &root->root_item);
 		memcpy(&root->root_key, location, sizeof(*location));
 	}
 	btrfs_free_path(path);
@@ -1491,6 +1504,14 @@
 	blocksize = btrfs_level_size(root, btrfs_root_level(&root->root_item));
 	root->node = read_tree_block(root, btrfs_root_bytenr(&root->root_item),
 				     blocksize, generation);
+	if (!root->node || !extent_buffer_uptodate(root->node)) {
+		ret = (!root->node) ? -ENOMEM : -EIO;
+
+		free_extent_buffer(root->node);
+		kfree(root);
+		return ERR_PTR(ret);
+	}
+
 	root->commit_root = btrfs_root_node(root);
 	BUG_ON(!root->node); /* -ENOMEM */
 out:
@@ -1658,15 +1679,20 @@
 	struct btrfs_root *root = arg;
 
 	do {
+		int again = 0;
+
 		if (!(root->fs_info->sb->s_flags & MS_RDONLY) &&
-		    mutex_trylock(&root->fs_info->cleaner_mutex)) {
-			btrfs_run_delayed_iputs(root);
-			btrfs_clean_old_snapshots(root);
-			mutex_unlock(&root->fs_info->cleaner_mutex);
+		    down_read_trylock(&root->fs_info->sb->s_umount)) {
+			if (mutex_trylock(&root->fs_info->cleaner_mutex)) {
+				btrfs_run_delayed_iputs(root);
+				again = btrfs_clean_one_deleted_snapshot(root);
+				mutex_unlock(&root->fs_info->cleaner_mutex);
+			}
 			btrfs_run_defrag_inodes(root->fs_info);
+			up_read(&root->fs_info->sb->s_umount);
 		}
 
-		if (!try_to_freeze()) {
+		if (!try_to_freeze() && !again) {
 			set_current_state(TASK_INTERRUPTIBLE);
 			if (!kthread_should_stop())
 				schedule();
@@ -1935,6 +1961,28 @@
 	return 0;
 }
 
+/* helper to cleanup workers */
+static void btrfs_stop_all_workers(struct btrfs_fs_info *fs_info)
+{
+	btrfs_stop_workers(&fs_info->generic_worker);
+	btrfs_stop_workers(&fs_info->fixup_workers);
+	btrfs_stop_workers(&fs_info->delalloc_workers);
+	btrfs_stop_workers(&fs_info->workers);
+	btrfs_stop_workers(&fs_info->endio_workers);
+	btrfs_stop_workers(&fs_info->endio_meta_workers);
+	btrfs_stop_workers(&fs_info->endio_raid56_workers);
+	btrfs_stop_workers(&fs_info->rmw_workers);
+	btrfs_stop_workers(&fs_info->endio_meta_write_workers);
+	btrfs_stop_workers(&fs_info->endio_write_workers);
+	btrfs_stop_workers(&fs_info->endio_freespace_worker);
+	btrfs_stop_workers(&fs_info->submit_workers);
+	btrfs_stop_workers(&fs_info->delayed_workers);
+	btrfs_stop_workers(&fs_info->caching_workers);
+	btrfs_stop_workers(&fs_info->readahead_workers);
+	btrfs_stop_workers(&fs_info->flush_workers);
+	btrfs_stop_workers(&fs_info->qgroup_rescan_workers);
+}
+
 /* helper to cleanup tree roots */
 static void free_root_pointers(struct btrfs_fs_info *info, int chunk_root)
 {
@@ -1972,6 +2020,36 @@
 	}
 }
 
+static void del_fs_roots(struct btrfs_fs_info *fs_info)
+{
+	int ret;
+	struct btrfs_root *gang[8];
+	int i;
+
+	while (!list_empty(&fs_info->dead_roots)) {
+		gang[0] = list_entry(fs_info->dead_roots.next,
+				     struct btrfs_root, root_list);
+		list_del(&gang[0]->root_list);
+
+		if (gang[0]->in_radix) {
+			btrfs_free_fs_root(fs_info, gang[0]);
+		} else {
+			free_extent_buffer(gang[0]->node);
+			free_extent_buffer(gang[0]->commit_root);
+			kfree(gang[0]);
+		}
+	}
+
+	while (1) {
+		ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix,
+					     (void **)gang, 0,
+					     ARRAY_SIZE(gang));
+		if (!ret)
+			break;
+		for (i = 0; i < ret; i++)
+			btrfs_free_fs_root(fs_info, gang[i]);
+	}
+}
 
 int open_ctree(struct super_block *sb,
 	       struct btrfs_fs_devices *fs_devices,
@@ -2060,6 +2138,7 @@
 	spin_lock_init(&fs_info->defrag_inodes_lock);
 	spin_lock_init(&fs_info->free_chunk_lock);
 	spin_lock_init(&fs_info->tree_mod_seq_lock);
+	spin_lock_init(&fs_info->super_lock);
 	rwlock_init(&fs_info->tree_mod_log_lock);
 	mutex_init(&fs_info->reloc_mutex);
 	seqlock_init(&fs_info->profiles_lock);
@@ -2083,7 +2162,7 @@
 	atomic_set(&fs_info->async_submit_draining, 0);
 	atomic_set(&fs_info->nr_async_bios, 0);
 	atomic_set(&fs_info->defrag_running, 0);
-	atomic_set(&fs_info->tree_mod_seq, 0);
+	atomic64_set(&fs_info->tree_mod_seq, 0);
 	fs_info->sb = sb;
 	fs_info->max_inline = 8192 * 1024;
 	fs_info->metadata_ratio = 0;
@@ -2187,11 +2266,13 @@
 	mutex_init(&fs_info->dev_replace.lock);
 
 	spin_lock_init(&fs_info->qgroup_lock);
+	mutex_init(&fs_info->qgroup_ioctl_lock);
 	fs_info->qgroup_tree = RB_ROOT;
 	INIT_LIST_HEAD(&fs_info->dirty_qgroups);
 	fs_info->qgroup_seq = 1;
 	fs_info->quota_enabled = 0;
 	fs_info->pending_quota_state = 0;
+	mutex_init(&fs_info->qgroup_rescan_lock);
 
 	btrfs_init_free_cluster(&fs_info->meta_alloc_cluster);
 	btrfs_init_free_cluster(&fs_info->data_alloc_cluster);
@@ -2211,12 +2292,31 @@
 		     fs_info, BTRFS_ROOT_TREE_OBJECTID);
 
 	invalidate_bdev(fs_devices->latest_bdev);
+
+	/*
+	 * Read super block and check the signature bytes only
+	 */
 	bh = btrfs_read_dev_super(fs_devices->latest_bdev);
 	if (!bh) {
 		err = -EINVAL;
 		goto fail_alloc;
 	}
 
+	/*
+	 * We want to check superblock checksum, the type is stored inside.
+	 * Pass the whole disk block of size BTRFS_SUPER_INFO_SIZE (4k).
+	 */
+	if (btrfs_check_super_csum(bh->b_data)) {
+		printk(KERN_ERR "btrfs: superblock checksum mismatch\n");
+		err = -EINVAL;
+		goto fail_alloc;
+	}
+
+	/*
+	 * super_copy is zeroed at allocation time and we never touch the
+	 * following bytes up to INFO_SIZE, the checksum is calculated from
+	 * the whole block of INFO_SIZE
+	 */
 	memcpy(fs_info->super_copy, bh->b_data, sizeof(*fs_info->super_copy));
 	memcpy(fs_info->super_for_commit, fs_info->super_copy,
 	       sizeof(*fs_info->super_for_commit));
@@ -2224,6 +2324,13 @@
 
 	memcpy(fs_info->fsid, fs_info->super_copy->fsid, BTRFS_FSID_SIZE);
 
+	ret = btrfs_check_super_valid(fs_info, sb->s_flags & MS_RDONLY);
+	if (ret) {
+		printk(KERN_ERR "btrfs: superblock contains fatal errors\n");
+		err = -EINVAL;
+		goto fail_alloc;
+	}
+
 	disk_super = fs_info->super_copy;
 	if (!btrfs_super_root(disk_super))
 		goto fail_alloc;
@@ -2232,13 +2339,6 @@
 	if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_ERROR)
 		set_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state);
 
-	ret = btrfs_check_super_valid(fs_info, sb->s_flags & MS_RDONLY);
-	if (ret) {
-		printk(KERN_ERR "btrfs: superblock contains fatal errors\n");
-		err = ret;
-		goto fail_alloc;
-	}
-
 	/*
 	 * run through our array of backup supers and setup
 	 * our ring pointer to the oldest one
@@ -2290,6 +2390,9 @@
 	if (tree_root->fs_info->compress_type == BTRFS_COMPRESS_LZO)
 		features |= BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO;
 
+	if (features & BTRFS_FEATURE_INCOMPAT_SKINNY_METADATA)
+		printk(KERN_ERR "btrfs: has skinny extents\n");
+
 	/*
 	 * flag our filesystem as having big metadata blocks if
 	 * they are bigger than the page size
@@ -2319,6 +2422,10 @@
 		goto fail_alloc;
 	}
 
+	/*
+	 * Needn't use the lock because there is no other task which will
+	 * update the flag.
+	 */
 	btrfs_set_super_incompat_flags(disk_super, features);
 
 	features = btrfs_super_compat_ro_flags(disk_super) &
@@ -2394,6 +2501,8 @@
 	btrfs_init_workers(&fs_info->readahead_workers, "readahead",
 			   fs_info->thread_pool_size,
 			   &fs_info->generic_worker);
+	btrfs_init_workers(&fs_info->qgroup_rescan_workers, "qgroup-rescan", 1,
+			   &fs_info->generic_worker);
 
 	/*
 	 * endios are largely parallel and should have a very
@@ -2428,6 +2537,7 @@
 	ret |= btrfs_start_workers(&fs_info->caching_workers);
 	ret |= btrfs_start_workers(&fs_info->readahead_workers);
 	ret |= btrfs_start_workers(&fs_info->flush_workers);
+	ret |= btrfs_start_workers(&fs_info->qgroup_rescan_workers);
 	if (ret) {
 		err = -ENOMEM;
 		goto fail_sb_buffer;
@@ -2475,8 +2585,8 @@
 	chunk_root->node = read_tree_block(chunk_root,
 					   btrfs_super_chunk_root(disk_super),
 					   blocksize, generation);
-	BUG_ON(!chunk_root->node); /* -ENOMEM */
-	if (!test_bit(EXTENT_BUFFER_UPTODATE, &chunk_root->node->bflags)) {
+	if (!chunk_root->node ||
+	    !test_bit(EXTENT_BUFFER_UPTODATE, &chunk_root->node->bflags)) {
 		printk(KERN_WARNING "btrfs: failed to read chunk root on %s\n",
 		       sb->s_id);
 		goto fail_tree_roots;
@@ -2661,6 +2771,13 @@
 		log_tree_root->node = read_tree_block(tree_root, bytenr,
 						      blocksize,
 						      generation + 1);
+		if (!log_tree_root->node ||
+		    !extent_buffer_uptodate(log_tree_root->node)) {
+			printk(KERN_ERR "btrfs: failed to read log tree\n");
+			free_extent_buffer(log_tree_root->node);
+			kfree(log_tree_root);
+			goto fail_trans_kthread;
+		}
 		/* returns with log_tree_root freed on success */
 		ret = btrfs_recover_log_trees(log_tree_root);
 		if (ret) {
@@ -2740,6 +2857,8 @@
 	btrfs_free_qgroup_config(fs_info);
 fail_trans_kthread:
 	kthread_stop(fs_info->transaction_kthread);
+	del_fs_roots(fs_info);
+	btrfs_cleanup_transaction(fs_info->tree_root);
 fail_cleaner:
 	kthread_stop(fs_info->cleaner_kthread);
 
@@ -2750,6 +2869,7 @@
 	filemap_write_and_wait(fs_info->btree_inode->i_mapping);
 
 fail_block_groups:
+	btrfs_put_block_group_cache(fs_info);
 	btrfs_free_block_groups(fs_info);
 
 fail_tree_roots:
@@ -2757,22 +2877,7 @@
 	invalidate_inode_pages2(fs_info->btree_inode->i_mapping);
 
 fail_sb_buffer:
-	btrfs_stop_workers(&fs_info->generic_worker);
-	btrfs_stop_workers(&fs_info->readahead_workers);
-	btrfs_stop_workers(&fs_info->fixup_workers);
-	btrfs_stop_workers(&fs_info->delalloc_workers);
-	btrfs_stop_workers(&fs_info->workers);
-	btrfs_stop_workers(&fs_info->endio_workers);
-	btrfs_stop_workers(&fs_info->endio_meta_workers);
-	btrfs_stop_workers(&fs_info->endio_raid56_workers);
-	btrfs_stop_workers(&fs_info->rmw_workers);
-	btrfs_stop_workers(&fs_info->endio_meta_write_workers);
-	btrfs_stop_workers(&fs_info->endio_write_workers);
-	btrfs_stop_workers(&fs_info->endio_freespace_worker);
-	btrfs_stop_workers(&fs_info->submit_workers);
-	btrfs_stop_workers(&fs_info->delayed_workers);
-	btrfs_stop_workers(&fs_info->caching_workers);
-	btrfs_stop_workers(&fs_info->flush_workers);
+	btrfs_stop_all_workers(fs_info);
 fail_alloc:
 fail_iput:
 	btrfs_mapping_tree_free(&fs_info->mapping_tree);
@@ -2904,7 +3009,10 @@
 		if (wait) {
 			bh = __find_get_block(device->bdev, bytenr / 4096,
 					      BTRFS_SUPER_INFO_SIZE);
-			BUG_ON(!bh);
+			if (!bh) {
+				errors++;
+				continue;
+			}
 			wait_on_buffer(bh);
 			if (!buffer_uptodate(bh))
 				errors++;
@@ -2919,7 +3027,7 @@
 			btrfs_set_super_bytenr(sb, bytenr);
 
 			crc = ~(u32)0;
-			crc = btrfs_csum_data(NULL, (char *)sb +
+			crc = btrfs_csum_data((char *)sb +
 					      BTRFS_CSUM_SIZE, crc,
 					      BTRFS_SUPER_INFO_SIZE -
 					      BTRFS_CSUM_SIZE);
@@ -2931,6 +3039,13 @@
 			 */
 			bh = __getblk(device->bdev, bytenr / 4096,
 				      BTRFS_SUPER_INFO_SIZE);
+			if (!bh) {
+				printk(KERN_ERR "btrfs: couldn't get super "
+				       "buffer head for bytenr %Lu\n", bytenr);
+				errors++;
+				continue;
+			}
+
 			memcpy(bh->b_data, sb, BTRFS_SUPER_INFO_SIZE);
 
 			/* one reference for submit_bh */
@@ -3153,7 +3268,7 @@
 	return num_tolerated_disk_barrier_failures;
 }
 
-int write_all_supers(struct btrfs_root *root, int max_mirrors)
+static int write_all_supers(struct btrfs_root *root, int max_mirrors)
 {
 	struct list_head *head;
 	struct btrfs_device *dev;
@@ -3283,37 +3398,6 @@
 	kfree(root);
 }
 
-static void del_fs_roots(struct btrfs_fs_info *fs_info)
-{
-	int ret;
-	struct btrfs_root *gang[8];
-	int i;
-
-	while (!list_empty(&fs_info->dead_roots)) {
-		gang[0] = list_entry(fs_info->dead_roots.next,
-				     struct btrfs_root, root_list);
-		list_del(&gang[0]->root_list);
-
-		if (gang[0]->in_radix) {
-			btrfs_free_fs_root(fs_info, gang[0]);
-		} else {
-			free_extent_buffer(gang[0]->node);
-			free_extent_buffer(gang[0]->commit_root);
-			kfree(gang[0]);
-		}
-	}
-
-	while (1) {
-		ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix,
-					     (void **)gang, 0,
-					     ARRAY_SIZE(gang));
-		if (!ret)
-			break;
-		for (i = 0; i < ret; i++)
-			btrfs_free_fs_root(fs_info, gang[i]);
-	}
-}
-
 int btrfs_cleanup_fs_roots(struct btrfs_fs_info *fs_info)
 {
 	u64 root_objectid = 0;
@@ -3349,8 +3433,8 @@
 
 	mutex_lock(&root->fs_info->cleaner_mutex);
 	btrfs_run_delayed_iputs(root);
-	btrfs_clean_old_snapshots(root);
 	mutex_unlock(&root->fs_info->cleaner_mutex);
+	wake_up_process(root->fs_info->cleaner_kthread);
 
 	/* wait until ongoing cleanup work done */
 	down_write(&root->fs_info->cleanup_work_sem);
@@ -3426,20 +3510,7 @@
 		       percpu_counter_sum(&fs_info->delalloc_bytes));
 	}
 
-	free_extent_buffer(fs_info->extent_root->node);
-	free_extent_buffer(fs_info->extent_root->commit_root);
-	free_extent_buffer(fs_info->tree_root->node);
-	free_extent_buffer(fs_info->tree_root->commit_root);
-	free_extent_buffer(fs_info->chunk_root->node);
-	free_extent_buffer(fs_info->chunk_root->commit_root);
-	free_extent_buffer(fs_info->dev_root->node);
-	free_extent_buffer(fs_info->dev_root->commit_root);
-	free_extent_buffer(fs_info->csum_root->node);
-	free_extent_buffer(fs_info->csum_root->commit_root);
-	if (fs_info->quota_root) {
-		free_extent_buffer(fs_info->quota_root->node);
-		free_extent_buffer(fs_info->quota_root->commit_root);
-	}
+	free_root_pointers(fs_info, 1);
 
 	btrfs_free_block_groups(fs_info);
 
@@ -3447,22 +3518,7 @@
 
 	iput(fs_info->btree_inode);
 
-	btrfs_stop_workers(&fs_info->generic_worker);
-	btrfs_stop_workers(&fs_info->fixup_workers);
-	btrfs_stop_workers(&fs_info->delalloc_workers);
-	btrfs_stop_workers(&fs_info->workers);
-	btrfs_stop_workers(&fs_info->endio_workers);
-	btrfs_stop_workers(&fs_info->endio_meta_workers);
-	btrfs_stop_workers(&fs_info->endio_raid56_workers);
-	btrfs_stop_workers(&fs_info->rmw_workers);
-	btrfs_stop_workers(&fs_info->endio_meta_write_workers);
-	btrfs_stop_workers(&fs_info->endio_write_workers);
-	btrfs_stop_workers(&fs_info->endio_freespace_worker);
-	btrfs_stop_workers(&fs_info->submit_workers);
-	btrfs_stop_workers(&fs_info->delayed_workers);
-	btrfs_stop_workers(&fs_info->caching_workers);
-	btrfs_stop_workers(&fs_info->readahead_workers);
-	btrfs_stop_workers(&fs_info->flush_workers);
+	btrfs_stop_all_workers(fs_info);
 
 #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
 	if (btrfs_test_opt(root, CHECK_INTEGRITY))
@@ -3567,18 +3623,13 @@
 static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info,
 			      int read_only)
 {
-	if (btrfs_super_csum_type(fs_info->super_copy) >= ARRAY_SIZE(btrfs_csum_sizes)) {
-		printk(KERN_ERR "btrfs: unsupported checksum algorithm\n");
-		return -EINVAL;
-	}
-
-	if (read_only)
-		return 0;
-
+	/*
+	 * Placeholder for checks
+	 */
 	return 0;
 }
 
-void btrfs_error_commit_super(struct btrfs_root *root)
+static void btrfs_error_commit_super(struct btrfs_root *root)
 {
 	mutex_lock(&root->fs_info->cleaner_mutex);
 	btrfs_run_delayed_iputs(root);
@@ -3669,6 +3720,9 @@
 				continue;
 			}
 
+			if (head->must_insert_reserved)
+				btrfs_pin_extent(root, ref->bytenr,
+						 ref->num_bytes, 1);
 			btrfs_free_delayed_extent_op(head->extent_op);
 			delayed_refs->num_heads--;
 			if (list_empty(&head->cluster))
@@ -3740,13 +3794,9 @@
 					int mark)
 {
 	int ret;
-	struct page *page;
-	struct inode *btree_inode = root->fs_info->btree_inode;
 	struct extent_buffer *eb;
 	u64 start = 0;
 	u64 end;
-	u64 offset;
-	unsigned long index;
 
 	while (1) {
 		ret = find_first_extent_bit(dirty_pages, start, &start, &end,
@@ -3756,36 +3806,17 @@
 
 		clear_extent_bits(dirty_pages, start, end, mark, GFP_NOFS);
 		while (start <= end) {
-			index = start >> PAGE_CACHE_SHIFT;
-			start = (u64)(index + 1) << PAGE_CACHE_SHIFT;
-			page = find_get_page(btree_inode->i_mapping, index);
-			if (!page)
+			eb = btrfs_find_tree_block(root, start,
+						   root->leafsize);
+			start += eb->len;
+			if (!eb)
 				continue;
-			offset = page_offset(page);
+			wait_on_extent_buffer_writeback(eb);
 
-			spin_lock(&dirty_pages->buffer_lock);
-			eb = radix_tree_lookup(
-			     &(&BTRFS_I(page->mapping->host)->io_tree)->buffer,
-					       offset >> PAGE_CACHE_SHIFT);
-			spin_unlock(&dirty_pages->buffer_lock);
-			if (eb)
-				ret = test_and_clear_bit(EXTENT_BUFFER_DIRTY,
-							 &eb->bflags);
-			if (PageWriteback(page))
-				end_page_writeback(page);
-
-			lock_page(page);
-			if (PageDirty(page)) {
-				clear_page_dirty_for_io(page);
-				spin_lock_irq(&page->mapping->tree_lock);
-				radix_tree_tag_clear(&page->mapping->page_tree,
-							page_index(page),
-							PAGECACHE_TAG_DIRTY);
-				spin_unlock_irq(&page->mapping->tree_lock);
-			}
-
-			unlock_page(page);
-			page_cache_release(page);
+			if (test_and_clear_bit(EXTENT_BUFFER_DIRTY,
+					       &eb->bflags))
+				clear_extent_buffer_dirty(eb);
+			free_extent_buffer_stale(eb);
 		}
 	}
 
@@ -3866,7 +3897,7 @@
 	*/
 }
 
-int btrfs_cleanup_transaction(struct btrfs_root *root)
+static int btrfs_cleanup_transaction(struct btrfs_root *root)
 {
 	struct btrfs_transaction *t;
 	LIST_HEAD(list);
@@ -3887,10 +3918,6 @@
 
 		btrfs_destroy_delayed_refs(t, root);
 
-		btrfs_block_rsv_release(root,
-					&root->fs_info->trans_block_rsv,
-					t->dirty_pages.dirty_bytes);
-
 		/* FIXME: cleanup wait for commit */
 		t->in_commit = 1;
 		t->blocked = 1;
diff --git a/fs/btrfs/disk-io.h b/fs/btrfs/disk-io.h
index 034d7dc..be69ce1 100644
--- a/fs/btrfs/disk-io.h
+++ b/fs/btrfs/disk-io.h
@@ -61,7 +61,6 @@
 		      struct btrfs_root *root, int max_mirrors);
 struct buffer_head *btrfs_read_dev_super(struct block_device *bdev);
 int btrfs_commit_super(struct btrfs_root *root);
-void btrfs_error_commit_super(struct btrfs_root *root);
 struct extent_buffer *btrfs_find_tree_block(struct btrfs_root *root,
 					    u64 bytenr, u32 blocksize);
 struct btrfs_root *btrfs_read_fs_root_no_radix(struct btrfs_root *tree_root,
@@ -77,7 +76,7 @@
 			  int atomic);
 int btrfs_set_buffer_uptodate(struct extent_buffer *buf);
 int btrfs_read_buffer(struct extent_buffer *buf, u64 parent_transid);
-u32 btrfs_csum_data(struct btrfs_root *root, char *data, u32 seed, size_t len);
+u32 btrfs_csum_data(char *data, u32 seed, size_t len);
 void btrfs_csum_final(u32 crc, char *result);
 int btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio,
 			int metadata);
@@ -93,10 +92,8 @@
 			     struct btrfs_fs_info *fs_info);
 int btrfs_add_log_tree(struct btrfs_trans_handle *trans,
 		       struct btrfs_root *root);
-int btrfs_cleanup_transaction(struct btrfs_root *root);
 void btrfs_cleanup_one_transaction(struct btrfs_transaction *trans,
 				  struct btrfs_root *root);
-void btrfs_abort_devices(struct btrfs_root *root);
 struct btrfs_root *btrfs_create_tree(struct btrfs_trans_handle *trans,
 				     struct btrfs_fs_info *fs_info,
 				     u64 objectid);
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 3d55123..2305b5c 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -105,6 +105,8 @@
 				       u64 num_bytes, int reserve);
 static int block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv,
 			       u64 num_bytes);
+int btrfs_pin_extent(struct btrfs_root *root,
+		     u64 bytenr, u64 num_bytes, int reserved);
 
 static noinline int
 block_group_cache_done(struct btrfs_block_group_cache *cache)
@@ -270,9 +272,27 @@
 			return ret;
 
 		while (nr--) {
-			cache->bytes_super += stripe_len;
-			ret = add_excluded_extent(root, logical[nr],
-						  stripe_len);
+			u64 start, len;
+
+			if (logical[nr] > cache->key.objectid +
+			    cache->key.offset)
+				continue;
+
+			if (logical[nr] + stripe_len <= cache->key.objectid)
+				continue;
+
+			start = logical[nr];
+			if (start < cache->key.objectid) {
+				start = cache->key.objectid;
+				len = (logical[nr] + stripe_len) - start;
+			} else {
+				len = min_t(u64, stripe_len,
+					    cache->key.objectid +
+					    cache->key.offset - start);
+			}
+
+			cache->bytes_super += len;
+			ret = add_excluded_extent(root, start, len);
 			if (ret) {
 				kfree(logical);
 				return ret;
@@ -419,8 +439,7 @@
 			if (ret)
 				break;
 
-			if (need_resched() ||
-			    btrfs_next_leaf(extent_root, path)) {
+			if (need_resched()) {
 				caching_ctl->progress = last;
 				btrfs_release_path(path);
 				up_read(&fs_info->extent_commit_sem);
@@ -428,6 +447,12 @@
 				cond_resched();
 				goto again;
 			}
+
+			ret = btrfs_next_leaf(extent_root, path);
+			if (ret < 0)
+				goto err;
+			if (ret)
+				break;
 			leaf = path->nodes[0];
 			nritems = btrfs_header_nritems(leaf);
 			continue;
@@ -442,11 +467,16 @@
 		    block_group->key.offset)
 			break;
 
-		if (key.type == BTRFS_EXTENT_ITEM_KEY) {
+		if (key.type == BTRFS_EXTENT_ITEM_KEY ||
+		    key.type == BTRFS_METADATA_ITEM_KEY) {
 			total_found += add_new_free_space(block_group,
 							  fs_info, last,
 							  key.objectid);
-			last = key.objectid + key.offset;
+			if (key.type == BTRFS_METADATA_ITEM_KEY)
+				last = key.objectid +
+					fs_info->tree_root->leafsize;
+			else
+				last = key.objectid + key.offset;
 
 			if (total_found > (1024 * 1024 * 2)) {
 				total_found = 0;
@@ -656,55 +686,6 @@
 	rcu_read_unlock();
 }
 
-u64 btrfs_find_block_group(struct btrfs_root *root,
-			   u64 search_start, u64 search_hint, int owner)
-{
-	struct btrfs_block_group_cache *cache;
-	u64 used;
-	u64 last = max(search_hint, search_start);
-	u64 group_start = 0;
-	int full_search = 0;
-	int factor = 9;
-	int wrapped = 0;
-again:
-	while (1) {
-		cache = btrfs_lookup_first_block_group(root->fs_info, last);
-		if (!cache)
-			break;
-
-		spin_lock(&cache->lock);
-		last = cache->key.objectid + cache->key.offset;
-		used = btrfs_block_group_used(&cache->item);
-
-		if ((full_search || !cache->ro) &&
-		    block_group_bits(cache, BTRFS_BLOCK_GROUP_METADATA)) {
-			if (used + cache->pinned + cache->reserved <
-			    div_factor(cache->key.offset, factor)) {
-				group_start = cache->key.objectid;
-				spin_unlock(&cache->lock);
-				btrfs_put_block_group(cache);
-				goto found;
-			}
-		}
-		spin_unlock(&cache->lock);
-		btrfs_put_block_group(cache);
-		cond_resched();
-	}
-	if (!wrapped) {
-		last = search_start;
-		wrapped = 1;
-		goto again;
-	}
-	if (!full_search && factor < 10) {
-		last = search_start;
-		full_search = 1;
-		factor = 10;
-		goto again;
-	}
-found:
-	return group_start;
-}
-
 /* simple helper to search for an existing extent at a given offset */
 int btrfs_lookup_extent(struct btrfs_root *root, u64 start, u64 len)
 {
@@ -718,15 +699,21 @@
 
 	key.objectid = start;
 	key.offset = len;
-	btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
+	key.type = BTRFS_EXTENT_ITEM_KEY;
 	ret = btrfs_search_slot(NULL, root->fs_info->extent_root, &key, path,
 				0, 0);
+	if (ret > 0) {
+		btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
+		if (key.objectid == start &&
+		    key.type == BTRFS_METADATA_ITEM_KEY)
+			ret = 0;
+	}
 	btrfs_free_path(path);
 	return ret;
 }
 
 /*
- * helper function to lookup reference count and flags of extent.
+ * helper function to lookup reference count and flags of a tree block.
  *
  * the head node for delayed ref is used to store the sum of all the
  * reference count modifications queued up in the rbtree. the head
@@ -736,7 +723,7 @@
  */
 int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans,
 			     struct btrfs_root *root, u64 bytenr,
-			     u64 num_bytes, u64 *refs, u64 *flags)
+			     u64 offset, int metadata, u64 *refs, u64 *flags)
 {
 	struct btrfs_delayed_ref_head *head;
 	struct btrfs_delayed_ref_root *delayed_refs;
@@ -749,13 +736,29 @@
 	u64 extent_flags;
 	int ret;
 
+	/*
+	 * If we don't have skinny metadata, don't bother doing anything
+	 * different
+	 */
+	if (metadata && !btrfs_fs_incompat(root->fs_info, SKINNY_METADATA)) {
+		offset = root->leafsize;
+		metadata = 0;
+	}
+
 	path = btrfs_alloc_path();
 	if (!path)
 		return -ENOMEM;
 
-	key.objectid = bytenr;
-	key.type = BTRFS_EXTENT_ITEM_KEY;
-	key.offset = num_bytes;
+	if (metadata) {
+		key.objectid = bytenr;
+		key.type = BTRFS_METADATA_ITEM_KEY;
+		key.offset = offset;
+	} else {
+		key.objectid = bytenr;
+		key.type = BTRFS_EXTENT_ITEM_KEY;
+		key.offset = offset;
+	}
+
 	if (!trans) {
 		path->skip_locking = 1;
 		path->search_commit_root = 1;
@@ -766,6 +769,13 @@
 	if (ret < 0)
 		goto out_free;
 
+	if (ret > 0 && metadata && key.type == BTRFS_METADATA_ITEM_KEY) {
+		key.type = BTRFS_EXTENT_ITEM_KEY;
+		key.offset = root->leafsize;
+		btrfs_release_path(path);
+		goto again;
+	}
+
 	if (ret == 0) {
 		leaf = path->nodes[0];
 		item_size = btrfs_item_size_nr(leaf, path->slots[0]);
@@ -1001,7 +1011,7 @@
 		return ret;
 	BUG_ON(ret); /* Corruption */
 
-	btrfs_extend_item(trans, root, path, new_size);
+	btrfs_extend_item(root, path, new_size);
 
 	leaf = path->nodes[0];
 	item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
@@ -1453,6 +1463,8 @@
 	int want;
 	int ret;
 	int err = 0;
+	bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
+						 SKINNY_METADATA);
 
 	key.objectid = bytenr;
 	key.type = BTRFS_EXTENT_ITEM_KEY;
@@ -1464,11 +1476,46 @@
 		path->keep_locks = 1;
 	} else
 		extra_size = -1;
+
+	/*
+	 * Owner is our parent level, so we can just add one to get the level
+	 * for the block we are interested in.
+	 */
+	if (skinny_metadata && owner < BTRFS_FIRST_FREE_OBJECTID) {
+		key.type = BTRFS_METADATA_ITEM_KEY;
+		key.offset = owner;
+	}
+
+again:
 	ret = btrfs_search_slot(trans, root, &key, path, extra_size, 1);
 	if (ret < 0) {
 		err = ret;
 		goto out;
 	}
+
+	/*
+	 * We may be a newly converted file system which still has the old fat
+	 * extent entries for metadata, so try and see if we have one of those.
+	 */
+	if (ret > 0 && skinny_metadata) {
+		skinny_metadata = false;
+		if (path->slots[0]) {
+			path->slots[0]--;
+			btrfs_item_key_to_cpu(path->nodes[0], &key,
+					      path->slots[0]);
+			if (key.objectid == bytenr &&
+			    key.type == BTRFS_EXTENT_ITEM_KEY &&
+			    key.offset == num_bytes)
+				ret = 0;
+		}
+		if (ret) {
+			key.type = BTRFS_EXTENT_ITEM_KEY;
+			key.offset = num_bytes;
+			btrfs_release_path(path);
+			goto again;
+		}
+	}
+
 	if (ret && !insert) {
 		err = -ENOENT;
 		goto out;
@@ -1504,11 +1551,9 @@
 	ptr = (unsigned long)(ei + 1);
 	end = (unsigned long)ei + item_size;
 
-	if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
+	if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK && !skinny_metadata) {
 		ptr += sizeof(struct btrfs_tree_block_info);
 		BUG_ON(ptr > end);
-	} else {
-		BUG_ON(!(flags & BTRFS_EXTENT_FLAG_DATA));
 	}
 
 	err = -ENOENT;
@@ -1590,8 +1635,7 @@
  * helper to add new inline back ref
  */
 static noinline_for_stack
-void setup_inline_extent_backref(struct btrfs_trans_handle *trans,
-				 struct btrfs_root *root,
+void setup_inline_extent_backref(struct btrfs_root *root,
 				 struct btrfs_path *path,
 				 struct btrfs_extent_inline_ref *iref,
 				 u64 parent, u64 root_objectid,
@@ -1614,7 +1658,7 @@
 	type = extent_ref_type(parent, owner);
 	size = btrfs_extent_inline_ref_size(type);
 
-	btrfs_extend_item(trans, root, path, size);
+	btrfs_extend_item(root, path, size);
 
 	ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
 	refs = btrfs_extent_refs(leaf, ei);
@@ -1683,8 +1727,7 @@
  * helper to update/remove inline back ref
  */
 static noinline_for_stack
-void update_inline_extent_backref(struct btrfs_trans_handle *trans,
-				  struct btrfs_root *root,
+void update_inline_extent_backref(struct btrfs_root *root,
 				  struct btrfs_path *path,
 				  struct btrfs_extent_inline_ref *iref,
 				  int refs_to_mod,
@@ -1740,7 +1783,7 @@
 			memmove_extent_buffer(leaf, ptr, ptr + size,
 					      end - ptr - size);
 		item_size -= size;
-		btrfs_truncate_item(trans, root, path, item_size, 1);
+		btrfs_truncate_item(root, path, item_size, 1);
 	}
 	btrfs_mark_buffer_dirty(leaf);
 }
@@ -1762,10 +1805,10 @@
 					   root_objectid, owner, offset, 1);
 	if (ret == 0) {
 		BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID);
-		update_inline_extent_backref(trans, root, path, iref,
+		update_inline_extent_backref(root, path, iref,
 					     refs_to_add, extent_op);
 	} else if (ret == -ENOENT) {
-		setup_inline_extent_backref(trans, root, path, iref, parent,
+		setup_inline_extent_backref(root, path, iref, parent,
 					    root_objectid, owner, offset,
 					    refs_to_add, extent_op);
 		ret = 0;
@@ -1802,7 +1845,7 @@
 
 	BUG_ON(!is_data && refs_to_drop != 1);
 	if (iref) {
-		update_inline_extent_backref(trans, root, path, iref,
+		update_inline_extent_backref(root, path, iref,
 					     -refs_to_drop, NULL);
 	} else if (is_data) {
 		ret = remove_extent_data_ref(trans, root, path, refs_to_drop);
@@ -1973,10 +2016,8 @@
 		ref_root = ref->root;
 
 	if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
-		if (extent_op) {
-			BUG_ON(extent_op->update_key);
+		if (extent_op)
 			flags |= extent_op->flags_to_set;
-		}
 		ret = alloc_reserved_file_extent(trans, root,
 						 parent, ref_root, flags,
 						 ref->objectid, ref->offset,
@@ -2029,18 +2070,33 @@
 	u32 item_size;
 	int ret;
 	int err = 0;
+	int metadata = (node->type == BTRFS_TREE_BLOCK_REF_KEY ||
+			node->type == BTRFS_SHARED_BLOCK_REF_KEY);
 
 	if (trans->aborted)
 		return 0;
 
+	if (metadata && !btrfs_fs_incompat(root->fs_info, SKINNY_METADATA))
+		metadata = 0;
+
 	path = btrfs_alloc_path();
 	if (!path)
 		return -ENOMEM;
 
 	key.objectid = node->bytenr;
-	key.type = BTRFS_EXTENT_ITEM_KEY;
-	key.offset = node->num_bytes;
 
+	if (metadata) {
+		struct btrfs_delayed_tree_ref *tree_ref;
+
+		tree_ref = btrfs_delayed_node_to_tree_ref(node);
+		key.type = BTRFS_METADATA_ITEM_KEY;
+		key.offset = tree_ref->level;
+	} else {
+		key.type = BTRFS_EXTENT_ITEM_KEY;
+		key.offset = node->num_bytes;
+	}
+
+again:
 	path->reada = 1;
 	path->leave_spinning = 1;
 	ret = btrfs_search_slot(trans, root->fs_info->extent_root, &key,
@@ -2050,6 +2106,14 @@
 		goto out;
 	}
 	if (ret > 0) {
+		if (metadata) {
+			btrfs_release_path(path);
+			metadata = 0;
+
+			key.offset = node->num_bytes;
+			key.type = BTRFS_EXTENT_ITEM_KEY;
+			goto again;
+		}
 		err = -EIO;
 		goto out;
 	}
@@ -2089,10 +2153,8 @@
 	struct btrfs_key ins;
 	u64 parent = 0;
 	u64 ref_root = 0;
-
-	ins.objectid = node->bytenr;
-	ins.offset = node->num_bytes;
-	ins.type = BTRFS_EXTENT_ITEM_KEY;
+	bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
+						 SKINNY_METADATA);
 
 	ref = btrfs_delayed_node_to_tree_ref(node);
 	if (node->type == BTRFS_SHARED_BLOCK_REF_KEY)
@@ -2100,10 +2162,18 @@
 	else
 		ref_root = ref->root;
 
+	ins.objectid = node->bytenr;
+	if (skinny_metadata) {
+		ins.offset = ref->level;
+		ins.type = BTRFS_METADATA_ITEM_KEY;
+	} else {
+		ins.offset = node->num_bytes;
+		ins.type = BTRFS_EXTENT_ITEM_KEY;
+	}
+
 	BUG_ON(node->ref_mod != 1);
 	if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
-		BUG_ON(!extent_op || !extent_op->update_flags ||
-		       !extent_op->update_key);
+		BUG_ON(!extent_op || !extent_op->update_flags);
 		ret = alloc_reserved_tree_block(trans, root,
 						parent, ref_root,
 						extent_op->flags_to_set,
@@ -2307,9 +2377,7 @@
 				btrfs_free_delayed_extent_op(extent_op);
 
 				if (ret) {
-					printk(KERN_DEBUG
-					       "btrfs: run_delayed_extent_op "
-					       "returned %d\n", ret);
+					btrfs_debug(fs_info, "run_delayed_extent_op returned %d", ret);
 					spin_lock(&delayed_refs->lock);
 					btrfs_delayed_ref_unlock(locked_ref);
 					return ret;
@@ -2348,8 +2416,7 @@
 		if (ret) {
 			btrfs_delayed_ref_unlock(locked_ref);
 			btrfs_put_delayed_ref(ref);
-			printk(KERN_DEBUG
-			       "btrfs: run_one_delayed_ref returned %d\n", ret);
+			btrfs_debug(fs_info, "run_one_delayed_ref returned %d", ret);
 			spin_lock(&delayed_refs->lock);
 			return ret;
 		}
@@ -2426,9 +2493,11 @@
 	if (list_empty(&trans->qgroup_ref_list) !=
 	    !trans->delayed_ref_elem.seq) {
 		/* list without seq or seq without list */
-		printk(KERN_ERR "btrfs: qgroup accounting update error, list is%s empty, seq is %llu\n",
+		btrfs_err(fs_info,
+			"qgroup accounting update error, list is%s empty, seq is %#x.%x",
 			list_empty(&trans->qgroup_ref_list) ? "" : " not",
-			trans->delayed_ref_elem.seq);
+			(u32)(trans->delayed_ref_elem.seq >> 32),
+			(u32)trans->delayed_ref_elem.seq);
 		BUG();
 	}
 
@@ -3337,7 +3406,7 @@
  * progress (either running or paused) picks the target profile (if it's
  * already available), otherwise falls back to plain reducing.
  */
-u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags)
+static u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags)
 {
 	/*
 	 * we add in the count of missing devices because we want
@@ -3557,6 +3626,11 @@
 	rcu_read_unlock();
 }
 
+static inline u64 calc_global_rsv_need_space(struct btrfs_block_rsv *global)
+{
+	return (global->size << 1);
+}
+
 static int should_alloc_chunk(struct btrfs_root *root,
 			      struct btrfs_space_info *sinfo, int force)
 {
@@ -3574,7 +3648,7 @@
 	 * global_rsv, it doesn't change except when the transaction commits.
 	 */
 	if (sinfo->flags & BTRFS_BLOCK_GROUP_METADATA)
-		num_allocated += global_rsv->size;
+		num_allocated += calc_global_rsv_need_space(global_rsv);
 
 	/*
 	 * in limited mode, we want to have some free space up to
@@ -3627,8 +3701,8 @@
 
 	thresh = get_system_chunk_thresh(root, type);
 	if (left < thresh && btrfs_test_opt(root, ENOSPC_DEBUG)) {
-		printk(KERN_INFO "left=%llu, need=%llu, flags=%llu\n",
-		       left, thresh, type);
+		btrfs_info(root->fs_info, "left=%llu, need=%llu, flags=%llu",
+			left, thresh, type);
 		dump_space_info(info, 0, 0);
 	}
 
@@ -3746,7 +3820,7 @@
 {
 	struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
 	u64 profile = btrfs_get_alloc_profile(root, 0);
-	u64 rsv_size = 0;
+	u64 space_size;
 	u64 avail;
 	u64 used;
 	u64 to_add;
@@ -3754,18 +3828,16 @@
 	used = space_info->bytes_used + space_info->bytes_reserved +
 		space_info->bytes_pinned + space_info->bytes_readonly;
 
-	spin_lock(&global_rsv->lock);
-	rsv_size = global_rsv->size;
-	spin_unlock(&global_rsv->lock);
-
 	/*
 	 * We only want to allow over committing if we have lots of actual space
 	 * free, but if we don't have enough space to handle the global reserve
 	 * space then we could end up having a real enospc problem when trying
 	 * to allocate a chunk or some other such important allocation.
 	 */
-	rsv_size <<= 1;
-	if (used + rsv_size >= space_info->total_bytes)
+	spin_lock(&global_rsv->lock);
+	space_size = calc_global_rsv_need_space(global_rsv);
+	spin_unlock(&global_rsv->lock);
+	if (used + space_size >= space_info->total_bytes)
 		return 0;
 
 	used += space_info->bytes_may_use;
@@ -3808,8 +3880,8 @@
 	return 0;
 }
 
-void btrfs_writeback_inodes_sb_nr(struct btrfs_root *root,
-				  unsigned long nr_pages)
+static void btrfs_writeback_inodes_sb_nr(struct btrfs_root *root,
+					 unsigned long nr_pages)
 {
 	struct super_block *sb = root->fs_info->sb;
 	int started;
@@ -3826,7 +3898,8 @@
 		 * the disk).
 		 */
 		btrfs_start_delalloc_inodes(root, 0);
-		btrfs_wait_ordered_extents(root, 0);
+		if (!current->journal_info)
+			btrfs_wait_ordered_extents(root, 0);
 	}
 }
 
@@ -5090,9 +5163,11 @@
 				    u64 bytenr, u64 num_bytes)
 {
 	struct btrfs_block_group_cache *cache;
+	int ret;
 
 	cache = btrfs_lookup_block_group(root->fs_info, bytenr);
-	BUG_ON(!cache); /* Logic error */
+	if (!cache)
+		return -EINVAL;
 
 	/*
 	 * pull in the free space cache (if any) so that our pin
@@ -5105,9 +5180,9 @@
 	pin_down_extent(root, cache, bytenr, num_bytes, 0);
 
 	/* remove us from the free space cache (if we're there at all) */
-	btrfs_remove_free_space(cache, bytenr, num_bytes);
+	ret = btrfs_remove_free_space(cache, bytenr, num_bytes);
 	btrfs_put_block_group(cache);
-	return 0;
+	return ret;
 }
 
 /**
@@ -5312,6 +5387,8 @@
 	int num_to_del = 1;
 	u32 item_size;
 	u64 refs;
+	bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
+						 SKINNY_METADATA);
 
 	path = btrfs_alloc_path();
 	if (!path)
@@ -5323,6 +5400,9 @@
 	is_data = owner_objectid >= BTRFS_FIRST_FREE_OBJECTID;
 	BUG_ON(!is_data && refs_to_drop != 1);
 
+	if (is_data)
+		skinny_metadata = 0;
+
 	ret = lookup_extent_backref(trans, extent_root, path, &iref,
 				    bytenr, num_bytes, parent,
 				    root_objectid, owner_objectid,
@@ -5339,6 +5419,11 @@
 				found_extent = 1;
 				break;
 			}
+			if (key.type == BTRFS_METADATA_ITEM_KEY &&
+			    key.offset == owner_objectid) {
+				found_extent = 1;
+				break;
+			}
 			if (path->slots[0] - extent_slot > 5)
 				break;
 			extent_slot--;
@@ -5364,12 +5449,39 @@
 			key.type = BTRFS_EXTENT_ITEM_KEY;
 			key.offset = num_bytes;
 
+			if (!is_data && skinny_metadata) {
+				key.type = BTRFS_METADATA_ITEM_KEY;
+				key.offset = owner_objectid;
+			}
+
 			ret = btrfs_search_slot(trans, extent_root,
 						&key, path, -1, 1);
+			if (ret > 0 && skinny_metadata && path->slots[0]) {
+				/*
+				 * Couldn't find our skinny metadata item,
+				 * see if we have ye olde extent item.
+				 */
+				path->slots[0]--;
+				btrfs_item_key_to_cpu(path->nodes[0], &key,
+						      path->slots[0]);
+				if (key.objectid == bytenr &&
+				    key.type == BTRFS_EXTENT_ITEM_KEY &&
+				    key.offset == num_bytes)
+					ret = 0;
+			}
+
+			if (ret > 0 && skinny_metadata) {
+				skinny_metadata = false;
+				key.type = BTRFS_EXTENT_ITEM_KEY;
+				key.offset = num_bytes;
+				btrfs_release_path(path);
+				ret = btrfs_search_slot(trans, extent_root,
+							&key, path, -1, 1);
+			}
+
 			if (ret) {
-				printk(KERN_ERR "umm, got %d back from search"
-				       ", was looking for %llu\n", ret,
-				       (unsigned long long)bytenr);
+				btrfs_err(info, "umm, got %d back from search, was looking for %llu",
+					ret, (unsigned long long)bytenr);
 				if (ret > 0)
 					btrfs_print_leaf(extent_root,
 							 path->nodes[0]);
@@ -5383,13 +5495,13 @@
 	} else if (ret == -ENOENT) {
 		btrfs_print_leaf(extent_root, path->nodes[0]);
 		WARN_ON(1);
-		printk(KERN_ERR "btrfs unable to find ref byte nr %llu "
-		       "parent %llu root %llu  owner %llu offset %llu\n",
-		       (unsigned long long)bytenr,
-		       (unsigned long long)parent,
-		       (unsigned long long)root_objectid,
-		       (unsigned long long)owner_objectid,
-		       (unsigned long long)owner_offset);
+		btrfs_err(info,
+			"unable to find ref byte nr %llu parent %llu root %llu  owner %llu offset %llu",
+			(unsigned long long)bytenr,
+			(unsigned long long)parent,
+			(unsigned long long)root_objectid,
+			(unsigned long long)owner_objectid,
+			(unsigned long long)owner_offset);
 	} else {
 		btrfs_abort_transaction(trans, extent_root, ret);
 		goto out;
@@ -5417,9 +5529,8 @@
 		ret = btrfs_search_slot(trans, extent_root, &key, path,
 					-1, 1);
 		if (ret) {
-			printk(KERN_ERR "umm, got %d back from search"
-			       ", was looking for %llu\n", ret,
-			       (unsigned long long)bytenr);
+			btrfs_err(info, "umm, got %d back from search, was looking for %llu",
+				ret, (unsigned long long)bytenr);
 			btrfs_print_leaf(extent_root, path->nodes[0]);
 		}
 		if (ret < 0) {
@@ -5435,7 +5546,8 @@
 	BUG_ON(item_size < sizeof(*ei));
 	ei = btrfs_item_ptr(leaf, extent_slot,
 			    struct btrfs_extent_item);
-	if (owner_objectid < BTRFS_FIRST_FREE_OBJECTID) {
+	if (owner_objectid < BTRFS_FIRST_FREE_OBJECTID &&
+	    key.type == BTRFS_EXTENT_ITEM_KEY) {
 		struct btrfs_tree_block_info *bi;
 		BUG_ON(item_size < sizeof(*ei) + sizeof(*bi));
 		bi = (struct btrfs_tree_block_info *)(ei + 1);
@@ -5443,7 +5555,13 @@
 	}
 
 	refs = btrfs_extent_refs(leaf, ei);
-	BUG_ON(refs < refs_to_drop);
+	if (refs < refs_to_drop) {
+		btrfs_err(info, "trying to drop %d refs but we only have %Lu "
+			  "for bytenr %Lu\n", refs_to_drop, refs, bytenr);
+		ret = -EINVAL;
+		btrfs_abort_transaction(trans, extent_root, ret);
+		goto out;
+	}
 	refs -= refs_to_drop;
 
 	if (refs > 0) {
@@ -5758,7 +5876,7 @@
 				     struct btrfs_root *orig_root,
 				     u64 num_bytes, u64 empty_size,
 				     u64 hint_byte, struct btrfs_key *ins,
-				     u64 data)
+				     u64 flags)
 {
 	int ret = 0;
 	struct btrfs_root *root = orig_root->fs_info->extent_root;
@@ -5769,8 +5887,8 @@
 	int empty_cluster = 2 * 1024 * 1024;
 	struct btrfs_space_info *space_info;
 	int loop = 0;
-	int index = __get_raid_index(data);
-	int alloc_type = (data & BTRFS_BLOCK_GROUP_DATA) ?
+	int index = __get_raid_index(flags);
+	int alloc_type = (flags & BTRFS_BLOCK_GROUP_DATA) ?
 		RESERVE_ALLOC_NO_ACCOUNT : RESERVE_ALLOC;
 	bool found_uncached_bg = false;
 	bool failed_cluster_refill = false;
@@ -5783,11 +5901,11 @@
 	ins->objectid = 0;
 	ins->offset = 0;
 
-	trace_find_free_extent(orig_root, num_bytes, empty_size, data);
+	trace_find_free_extent(orig_root, num_bytes, empty_size, flags);
 
-	space_info = __find_space_info(root->fs_info, data);
+	space_info = __find_space_info(root->fs_info, flags);
 	if (!space_info) {
-		printk(KERN_ERR "No space info for %llu\n", data);
+		btrfs_err(root->fs_info, "No space info for %llu", flags);
 		return -ENOSPC;
 	}
 
@@ -5798,13 +5916,13 @@
 	if (btrfs_mixed_space_info(space_info))
 		use_cluster = false;
 
-	if (data & BTRFS_BLOCK_GROUP_METADATA && use_cluster) {
+	if (flags & BTRFS_BLOCK_GROUP_METADATA && use_cluster) {
 		last_ptr = &root->fs_info->meta_alloc_cluster;
 		if (!btrfs_test_opt(root, SSD))
 			empty_cluster = 64 * 1024;
 	}
 
-	if ((data & BTRFS_BLOCK_GROUP_DATA) && use_cluster &&
+	if ((flags & BTRFS_BLOCK_GROUP_DATA) && use_cluster &&
 	    btrfs_test_opt(root, SSD)) {
 		last_ptr = &root->fs_info->data_alloc_cluster;
 	}
@@ -5833,7 +5951,7 @@
 		 * However if we are re-searching with an ideal block group
 		 * picked out then we don't care that the block group is cached.
 		 */
-		if (block_group && block_group_bits(block_group, data) &&
+		if (block_group && block_group_bits(block_group, flags) &&
 		    block_group->cached != BTRFS_CACHE_NO) {
 			down_read(&space_info->groups_sem);
 			if (list_empty(&block_group->list) ||
@@ -5871,7 +5989,7 @@
 		 * raid types, but we want to make sure we only allocate
 		 * for the proper type.
 		 */
-		if (!block_group_bits(block_group, data)) {
+		if (!block_group_bits(block_group, flags)) {
 		    u64 extra = BTRFS_BLOCK_GROUP_DUP |
 				BTRFS_BLOCK_GROUP_RAID1 |
 				BTRFS_BLOCK_GROUP_RAID5 |
@@ -5883,7 +6001,7 @@
 			 * doesn't provide them, bail.  This does allow us to
 			 * fill raid0 from raid1.
 			 */
-			if ((data & extra) && !(block_group->flags & extra))
+			if ((flags & extra) && !(block_group->flags & extra))
 				goto loop;
 		}
 
@@ -5914,7 +6032,7 @@
 			if (used_block_group != block_group &&
 			    (!used_block_group ||
 			     used_block_group->ro ||
-			     !block_group_bits(used_block_group, data))) {
+			     !block_group_bits(used_block_group, flags))) {
 				used_block_group = block_group;
 				goto refill_cluster;
 			}
@@ -6110,7 +6228,7 @@
 		index = 0;
 		loop++;
 		if (loop == LOOP_ALLOC_CHUNK) {
-			ret = do_chunk_alloc(trans, root, data,
+			ret = do_chunk_alloc(trans, root, flags,
 					     CHUNK_ALLOC_FORCE);
 			/*
 			 * Do not bail out on ENOSPC since we
@@ -6188,16 +6306,17 @@
 			 struct btrfs_root *root,
 			 u64 num_bytes, u64 min_alloc_size,
 			 u64 empty_size, u64 hint_byte,
-			 struct btrfs_key *ins, u64 data)
+			 struct btrfs_key *ins, int is_data)
 {
 	bool final_tried = false;
+	u64 flags;
 	int ret;
 
-	data = btrfs_get_alloc_profile(root, data);
+	flags = btrfs_get_alloc_profile(root, is_data);
 again:
 	WARN_ON(num_bytes < root->sectorsize);
 	ret = find_free_extent(trans, root, num_bytes, empty_size,
-			       hint_byte, ins, data);
+			       hint_byte, ins, flags);
 
 	if (ret == -ENOSPC) {
 		if (!final_tried) {
@@ -6210,10 +6329,10 @@
 		} else if (btrfs_test_opt(root, ENOSPC_DEBUG)) {
 			struct btrfs_space_info *sinfo;
 
-			sinfo = __find_space_info(root->fs_info, data);
-			printk(KERN_ERR "btrfs allocation failed flags %llu, "
-			       "wanted %llu\n", (unsigned long long)data,
-			       (unsigned long long)num_bytes);
+			sinfo = __find_space_info(root->fs_info, flags);
+			btrfs_err(root->fs_info, "allocation failed flags %llu, wanted %llu",
+				(unsigned long long)flags,
+				(unsigned long long)num_bytes);
 			if (sinfo)
 				dump_space_info(sinfo, num_bytes, 1);
 		}
@@ -6232,8 +6351,8 @@
 
 	cache = btrfs_lookup_block_group(root->fs_info, start);
 	if (!cache) {
-		printk(KERN_ERR "Unable to find block group for %llu\n",
-		       (unsigned long long)start);
+		btrfs_err(root->fs_info, "Unable to find block group for %llu",
+			(unsigned long long)start);
 		return -ENOSPC;
 	}
 
@@ -6328,9 +6447,9 @@
 
 	ret = update_block_group(root, ins->objectid, ins->offset, 1);
 	if (ret) { /* -ENOENT, logic error */
-		printk(KERN_ERR "btrfs update block group failed for %llu "
-		       "%llu\n", (unsigned long long)ins->objectid,
-		       (unsigned long long)ins->offset);
+		btrfs_err(fs_info, "update block group failed for %llu %llu",
+			(unsigned long long)ins->objectid,
+			(unsigned long long)ins->offset);
 		BUG();
 	}
 	return ret;
@@ -6349,7 +6468,12 @@
 	struct btrfs_extent_inline_ref *iref;
 	struct btrfs_path *path;
 	struct extent_buffer *leaf;
-	u32 size = sizeof(*extent_item) + sizeof(*block_info) + sizeof(*iref);
+	u32 size = sizeof(*extent_item) + sizeof(*iref);
+	bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
+						 SKINNY_METADATA);
+
+	if (!skinny_metadata)
+		size += sizeof(*block_info);
 
 	path = btrfs_alloc_path();
 	if (!path)
@@ -6370,12 +6494,16 @@
 	btrfs_set_extent_generation(leaf, extent_item, trans->transid);
 	btrfs_set_extent_flags(leaf, extent_item,
 			       flags | BTRFS_EXTENT_FLAG_TREE_BLOCK);
-	block_info = (struct btrfs_tree_block_info *)(extent_item + 1);
 
-	btrfs_set_tree_block_key(leaf, block_info, key);
-	btrfs_set_tree_block_level(leaf, block_info, level);
+	if (skinny_metadata) {
+		iref = (struct btrfs_extent_inline_ref *)(extent_item + 1);
+	} else {
+		block_info = (struct btrfs_tree_block_info *)(extent_item + 1);
+		btrfs_set_tree_block_key(leaf, block_info, key);
+		btrfs_set_tree_block_level(leaf, block_info, level);
+		iref = (struct btrfs_extent_inline_ref *)(block_info + 1);
+	}
 
-	iref = (struct btrfs_extent_inline_ref *)(block_info + 1);
 	if (parent > 0) {
 		BUG_ON(!(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF));
 		btrfs_set_extent_inline_ref_type(leaf, iref,
@@ -6390,11 +6518,11 @@
 	btrfs_mark_buffer_dirty(leaf);
 	btrfs_free_path(path);
 
-	ret = update_block_group(root, ins->objectid, ins->offset, 1);
+	ret = update_block_group(root, ins->objectid, root->leafsize, 1);
 	if (ret) { /* -ENOENT, logic error */
-		printk(KERN_ERR "btrfs update block group failed for %llu "
-		       "%llu\n", (unsigned long long)ins->objectid,
-		       (unsigned long long)ins->offset);
+		btrfs_err(fs_info, "update block group failed for %llu %llu",
+			(unsigned long long)ins->objectid,
+			(unsigned long long)ins->offset);
 		BUG();
 	}
 	return ret;
@@ -6439,47 +6567,48 @@
 	if (!caching_ctl) {
 		BUG_ON(!block_group_cache_done(block_group));
 		ret = btrfs_remove_free_space(block_group, start, num_bytes);
-		BUG_ON(ret); /* -ENOMEM */
+		if (ret)
+			goto out;
 	} else {
 		mutex_lock(&caching_ctl->mutex);
 
 		if (start >= caching_ctl->progress) {
 			ret = add_excluded_extent(root, start, num_bytes);
-			BUG_ON(ret); /* -ENOMEM */
 		} else if (start + num_bytes <= caching_ctl->progress) {
 			ret = btrfs_remove_free_space(block_group,
 						      start, num_bytes);
-			BUG_ON(ret); /* -ENOMEM */
 		} else {
 			num_bytes = caching_ctl->progress - start;
 			ret = btrfs_remove_free_space(block_group,
 						      start, num_bytes);
-			BUG_ON(ret); /* -ENOMEM */
+			if (ret)
+				goto out_lock;
 
 			start = caching_ctl->progress;
 			num_bytes = ins->objectid + ins->offset -
 				    caching_ctl->progress;
 			ret = add_excluded_extent(root, start, num_bytes);
-			BUG_ON(ret); /* -ENOMEM */
 		}
-
+out_lock:
 		mutex_unlock(&caching_ctl->mutex);
 		put_caching_control(caching_ctl);
+		if (ret)
+			goto out;
 	}
 
 	ret = btrfs_update_reserved_bytes(block_group, ins->offset,
 					  RESERVE_ALLOC_NO_ACCOUNT);
 	BUG_ON(ret); /* logic error */
-	btrfs_put_block_group(block_group);
 	ret = alloc_reserved_file_extent(trans, root, 0, root_objectid,
 					 0, owner, offset, ins, 1);
+out:
+	btrfs_put_block_group(block_group);
 	return ret;
 }
 
-struct extent_buffer *btrfs_init_new_buffer(struct btrfs_trans_handle *trans,
-					    struct btrfs_root *root,
-					    u64 bytenr, u32 blocksize,
-					    int level)
+static struct extent_buffer *
+btrfs_init_new_buffer(struct btrfs_trans_handle *trans, struct btrfs_root *root,
+		      u64 bytenr, u32 blocksize, int level)
 {
 	struct extent_buffer *buf;
 
@@ -6594,7 +6723,8 @@
 	struct extent_buffer *buf;
 	u64 flags = 0;
 	int ret;
-
+	bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
+						 SKINNY_METADATA);
 
 	block_rsv = use_block_rsv(trans, root, blocksize);
 	if (IS_ERR(block_rsv))
@@ -6627,7 +6757,10 @@
 		else
 			memset(&extent_op->key, 0, sizeof(extent_op->key));
 		extent_op->flags_to_set = flags;
-		extent_op->update_key = 1;
+		if (skinny_metadata)
+			extent_op->update_key = 0;
+		else
+			extent_op->update_key = 1;
 		extent_op->update_flags = 1;
 		extent_op->is_data = 0;
 
@@ -6704,8 +6837,9 @@
 			continue;
 
 		/* We don't lock the tree block, it's OK to be racy here */
-		ret = btrfs_lookup_extent_info(trans, root, bytenr, blocksize,
-					       &refs, &flags);
+		ret = btrfs_lookup_extent_info(trans, root, bytenr,
+					       wc->level - 1, 1, &refs,
+					       &flags);
 		/* We don't care about errors in readahead. */
 		if (ret < 0)
 			continue;
@@ -6772,7 +6906,7 @@
 	     (wc->stage == UPDATE_BACKREF && !(wc->flags[level] & flag)))) {
 		BUG_ON(!path->locks[level]);
 		ret = btrfs_lookup_extent_info(trans, root,
-					       eb->start, eb->len,
+					       eb->start, level, 1,
 					       &wc->refs[level],
 					       &wc->flags[level]);
 		BUG_ON(ret == -ENOMEM);
@@ -6870,7 +7004,7 @@
 	btrfs_tree_lock(next);
 	btrfs_set_lock_blocking(next);
 
-	ret = btrfs_lookup_extent_info(trans, root, bytenr, blocksize,
+	ret = btrfs_lookup_extent_info(trans, root, bytenr, level - 1, 1,
 				       &wc->refs[level - 1],
 				       &wc->flags[level - 1]);
 	if (ret < 0) {
@@ -6878,7 +7012,10 @@
 		return ret;
 	}
 
-	BUG_ON(wc->refs[level - 1] == 0);
+	if (unlikely(wc->refs[level - 1] == 0)) {
+		btrfs_err(root->fs_info, "Missing references.");
+		BUG();
+	}
 	*lookup_info = 0;
 
 	if (wc->stage == DROP_REFERENCE) {
@@ -6917,8 +7054,10 @@
 		if (reada && level == 1)
 			reada_walk_down(trans, root, wc, path);
 		next = read_tree_block(root, bytenr, blocksize, generation);
-		if (!next)
+		if (!next || !extent_buffer_uptodate(next)) {
+			free_extent_buffer(next);
 			return -EIO;
+		}
 		btrfs_tree_lock(next);
 		btrfs_set_lock_blocking(next);
 	}
@@ -7001,7 +7140,7 @@
 			path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
 
 			ret = btrfs_lookup_extent_info(trans, root,
-						       eb->start, eb->len,
+						       eb->start, level, 1,
 						       &wc->refs[level],
 						       &wc->flags[level]);
 			if (ret < 0) {
@@ -7137,6 +7276,8 @@
  * reference count by one. if update_ref is true, this function
  * also make sure backrefs for the shared block and all lower level
  * blocks are properly updated.
+ *
+ * If called with for_reloc == 0, may exit early with -EAGAIN
  */
 int btrfs_drop_snapshot(struct btrfs_root *root,
 			 struct btrfs_block_rsv *block_rsv, int update_ref,
@@ -7211,8 +7352,7 @@
 
 			ret = btrfs_lookup_extent_info(trans, root,
 						path->nodes[level]->start,
-						path->nodes[level]->len,
-						&wc->refs[level],
+						level, 1, &wc->refs[level],
 						&wc->flags[level]);
 			if (ret < 0) {
 				err = ret;
@@ -7238,6 +7378,12 @@
 	wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root);
 
 	while (1) {
+		if (!for_reloc && btrfs_fs_closing(root->fs_info)) {
+			pr_debug("btrfs: drop snapshot early exit\n");
+			err = -EAGAIN;
+			goto out_end_trans;
+		}
+
 		ret = walk_down_tree(trans, root, path, wc);
 		if (ret < 0) {
 			err = ret;
@@ -8020,10 +8166,26 @@
 			free_excluded_extents(root, cache);
 		}
 
+		ret = btrfs_add_block_group_cache(root->fs_info, cache);
+		if (ret) {
+			btrfs_remove_free_space_cache(cache);
+			btrfs_put_block_group(cache);
+			goto error;
+		}
+
 		ret = update_space_info(info, cache->flags, found_key.offset,
 					btrfs_block_group_used(&cache->item),
 					&space_info);
-		BUG_ON(ret); /* -ENOMEM */
+		if (ret) {
+			btrfs_remove_free_space_cache(cache);
+			spin_lock(&info->block_group_cache_lock);
+			rb_erase(&cache->cache_node,
+				 &info->block_group_cache_tree);
+			spin_unlock(&info->block_group_cache_lock);
+			btrfs_put_block_group(cache);
+			goto error;
+		}
+
 		cache->space_info = space_info;
 		spin_lock(&cache->space_info->lock);
 		cache->space_info->bytes_readonly += cache->bytes_super;
@@ -8031,9 +8193,6 @@
 
 		__link_block_group(space_info, cache);
 
-		ret = btrfs_add_block_group_cache(root->fs_info, cache);
-		BUG_ON(ret); /* Logic error */
-
 		set_avail_alloc_bits(root->fs_info, cache->flags);
 		if (btrfs_chunk_readonly(root, cache->key.objectid))
 			set_block_group_ro(cache, 1);
@@ -8156,9 +8315,24 @@
 
 	free_excluded_extents(root, cache);
 
+	ret = btrfs_add_block_group_cache(root->fs_info, cache);
+	if (ret) {
+		btrfs_remove_free_space_cache(cache);
+		btrfs_put_block_group(cache);
+		return ret;
+	}
+
 	ret = update_space_info(root->fs_info, cache->flags, size, bytes_used,
 				&cache->space_info);
-	BUG_ON(ret); /* -ENOMEM */
+	if (ret) {
+		btrfs_remove_free_space_cache(cache);
+		spin_lock(&root->fs_info->block_group_cache_lock);
+		rb_erase(&cache->cache_node,
+			 &root->fs_info->block_group_cache_tree);
+		spin_unlock(&root->fs_info->block_group_cache_lock);
+		btrfs_put_block_group(cache);
+		return ret;
+	}
 	update_global_block_rsv(root->fs_info);
 
 	spin_lock(&cache->space_info->lock);
@@ -8167,9 +8341,6 @@
 
 	__link_block_group(cache->space_info, cache);
 
-	ret = btrfs_add_block_group_cache(root->fs_info, cache);
-	BUG_ON(ret); /* Logic error */
-
 	list_add_tail(&cache->new_bg_list, &trans->new_bgs);
 
 	set_avail_alloc_bits(extent_root->fs_info, type);
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index cdee391..32d67a8 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -24,12 +24,62 @@
 static struct kmem_cache *extent_state_cache;
 static struct kmem_cache *extent_buffer_cache;
 
+#ifdef CONFIG_BTRFS_DEBUG
 static LIST_HEAD(buffers);
 static LIST_HEAD(states);
 
-#define LEAK_DEBUG 0
-#if LEAK_DEBUG
 static DEFINE_SPINLOCK(leak_lock);
+
+static inline
+void btrfs_leak_debug_add(struct list_head *new, struct list_head *head)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&leak_lock, flags);
+	list_add(new, head);
+	spin_unlock_irqrestore(&leak_lock, flags);
+}
+
+static inline
+void btrfs_leak_debug_del(struct list_head *entry)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&leak_lock, flags);
+	list_del(entry);
+	spin_unlock_irqrestore(&leak_lock, flags);
+}
+
+static inline
+void btrfs_leak_debug_check(void)
+{
+	struct extent_state *state;
+	struct extent_buffer *eb;
+
+	while (!list_empty(&states)) {
+		state = list_entry(states.next, struct extent_state, leak_list);
+		printk(KERN_ERR "btrfs state leak: start %llu end %llu "
+		       "state %lu in tree %p refs %d\n",
+		       (unsigned long long)state->start,
+		       (unsigned long long)state->end,
+		       state->state, state->tree, atomic_read(&state->refs));
+		list_del(&state->leak_list);
+		kmem_cache_free(extent_state_cache, state);
+	}
+
+	while (!list_empty(&buffers)) {
+		eb = list_entry(buffers.next, struct extent_buffer, leak_list);
+		printk(KERN_ERR "btrfs buffer leak start %llu len %lu "
+		       "refs %d\n", (unsigned long long)eb->start,
+		       eb->len, atomic_read(&eb->refs));
+		list_del(&eb->leak_list);
+		kmem_cache_free(extent_buffer_cache, eb);
+	}
+}
+#else
+#define btrfs_leak_debug_add(new, head)	do {} while (0)
+#define btrfs_leak_debug_del(entry)	do {} while (0)
+#define btrfs_leak_debug_check()	do {} while (0)
 #endif
 
 #define BUFFER_LRU_MAX 64
@@ -84,29 +134,7 @@
 
 void extent_io_exit(void)
 {
-	struct extent_state *state;
-	struct extent_buffer *eb;
-
-	while (!list_empty(&states)) {
-		state = list_entry(states.next, struct extent_state, leak_list);
-		printk(KERN_ERR "btrfs state leak: start %llu end %llu "
-		       "state %lu in tree %p refs %d\n",
-		       (unsigned long long)state->start,
-		       (unsigned long long)state->end,
-		       state->state, state->tree, atomic_read(&state->refs));
-		list_del(&state->leak_list);
-		kmem_cache_free(extent_state_cache, state);
-
-	}
-
-	while (!list_empty(&buffers)) {
-		eb = list_entry(buffers.next, struct extent_buffer, leak_list);
-		printk(KERN_ERR "btrfs buffer leak start %llu len %lu "
-		       "refs %d\n", (unsigned long long)eb->start,
-		       eb->len, atomic_read(&eb->refs));
-		list_del(&eb->leak_list);
-		kmem_cache_free(extent_buffer_cache, eb);
-	}
+	btrfs_leak_debug_check();
 
 	/*
 	 * Make sure all delayed rcu free are flushed before we
@@ -134,9 +162,6 @@
 static struct extent_state *alloc_extent_state(gfp_t mask)
 {
 	struct extent_state *state;
-#if LEAK_DEBUG
-	unsigned long flags;
-#endif
 
 	state = kmem_cache_alloc(extent_state_cache, mask);
 	if (!state)
@@ -144,11 +169,7 @@
 	state->state = 0;
 	state->private = 0;
 	state->tree = NULL;
-#if LEAK_DEBUG
-	spin_lock_irqsave(&leak_lock, flags);
-	list_add(&state->leak_list, &states);
-	spin_unlock_irqrestore(&leak_lock, flags);
-#endif
+	btrfs_leak_debug_add(&state->leak_list, &states);
 	atomic_set(&state->refs, 1);
 	init_waitqueue_head(&state->wq);
 	trace_alloc_extent_state(state, mask, _RET_IP_);
@@ -160,15 +181,8 @@
 	if (!state)
 		return;
 	if (atomic_dec_and_test(&state->refs)) {
-#if LEAK_DEBUG
-		unsigned long flags;
-#endif
 		WARN_ON(state->tree);
-#if LEAK_DEBUG
-		spin_lock_irqsave(&leak_lock, flags);
-		list_del(&state->leak_list);
-		spin_unlock_irqrestore(&leak_lock, flags);
-#endif
+		btrfs_leak_debug_del(&state->leak_list);
 		trace_free_extent_state(state, _RET_IP_);
 		kmem_cache_free(extent_state_cache, state);
 	}
@@ -308,21 +322,21 @@
 }
 
 static void set_state_cb(struct extent_io_tree *tree,
-			 struct extent_state *state, int *bits)
+			 struct extent_state *state, unsigned long *bits)
 {
 	if (tree->ops && tree->ops->set_bit_hook)
 		tree->ops->set_bit_hook(tree->mapping->host, state, bits);
 }
 
 static void clear_state_cb(struct extent_io_tree *tree,
-			   struct extent_state *state, int *bits)
+			   struct extent_state *state, unsigned long *bits)
 {
 	if (tree->ops && tree->ops->clear_bit_hook)
 		tree->ops->clear_bit_hook(tree->mapping->host, state, bits);
 }
 
 static void set_state_bits(struct extent_io_tree *tree,
-			   struct extent_state *state, int *bits);
+			   struct extent_state *state, unsigned long *bits);
 
 /*
  * insert an extent_state struct into the tree.  'bits' are set on the
@@ -336,7 +350,7 @@
  */
 static int insert_state(struct extent_io_tree *tree,
 			struct extent_state *state, u64 start, u64 end,
-			int *bits)
+			unsigned long *bits)
 {
 	struct rb_node *node;
 
@@ -424,10 +438,10 @@
  */
 static struct extent_state *clear_state_bit(struct extent_io_tree *tree,
 					    struct extent_state *state,
-					    int *bits, int wake)
+					    unsigned long *bits, int wake)
 {
 	struct extent_state *next;
-	int bits_to_clear = *bits & ~EXTENT_CTLBITS;
+	unsigned long bits_to_clear = *bits & ~EXTENT_CTLBITS;
 
 	if ((bits_to_clear & EXTENT_DIRTY) && (state->state & EXTENT_DIRTY)) {
 		u64 range = state->end - state->start + 1;
@@ -463,7 +477,7 @@
 	return prealloc;
 }
 
-void extent_io_tree_panic(struct extent_io_tree *tree, int err)
+static void extent_io_tree_panic(struct extent_io_tree *tree, int err)
 {
 	btrfs_panic(tree_fs_info(tree), err, "Locking error: "
 		    "Extent tree was modified by another "
@@ -483,7 +497,7 @@
  * This takes the tree lock, and returns 0 on success and < 0 on error.
  */
 int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
-		     int bits, int wake, int delete,
+		     unsigned long bits, int wake, int delete,
 		     struct extent_state **cached_state,
 		     gfp_t mask)
 {
@@ -644,7 +658,8 @@
  * The range [start, end] is inclusive.
  * The tree lock is taken by this function
  */
-void wait_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, int bits)
+static void wait_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
+			    unsigned long bits)
 {
 	struct extent_state *state;
 	struct rb_node *node;
@@ -685,9 +700,9 @@
 
 static void set_state_bits(struct extent_io_tree *tree,
 			   struct extent_state *state,
-			   int *bits)
+			   unsigned long *bits)
 {
-	int bits_to_set = *bits & ~EXTENT_CTLBITS;
+	unsigned long bits_to_set = *bits & ~EXTENT_CTLBITS;
 
 	set_state_cb(tree, state, bits);
 	if ((bits_to_set & EXTENT_DIRTY) && !(state->state & EXTENT_DIRTY)) {
@@ -730,8 +745,9 @@
 
 static int __must_check
 __set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
-		 int bits, int exclusive_bits, u64 *failed_start,
-		 struct extent_state **cached_state, gfp_t mask)
+		 unsigned long bits, unsigned long exclusive_bits,
+		 u64 *failed_start, struct extent_state **cached_state,
+		 gfp_t mask)
 {
 	struct extent_state *state;
 	struct extent_state *prealloc = NULL;
@@ -923,9 +939,9 @@
 	goto again;
 }
 
-int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, int bits,
-		   u64 *failed_start, struct extent_state **cached_state,
-		   gfp_t mask)
+int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
+		   unsigned long bits, u64 * failed_start,
+		   struct extent_state **cached_state, gfp_t mask)
 {
 	return __set_extent_bit(tree, start, end, bits, 0, failed_start,
 				cached_state, mask);
@@ -950,7 +966,7 @@
  * boundary bits like LOCK.
  */
 int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
-		       int bits, int clear_bits,
+		       unsigned long bits, unsigned long clear_bits,
 		       struct extent_state **cached_state, gfp_t mask)
 {
 	struct extent_state *state;
@@ -1143,14 +1159,14 @@
 }
 
 int set_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
-		    int bits, gfp_t mask)
+		    unsigned long bits, gfp_t mask)
 {
 	return set_extent_bit(tree, start, end, bits, NULL,
 			      NULL, mask);
 }
 
 int clear_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
-		      int bits, gfp_t mask)
+		      unsigned long bits, gfp_t mask)
 {
 	return clear_extent_bit(tree, start, end, bits, 0, 0, NULL, mask);
 }
@@ -1189,7 +1205,7 @@
 int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
 			struct extent_state **cached_state, gfp_t mask)
 {
-	return set_extent_bit(tree, start, end, EXTENT_UPTODATE, 0,
+	return set_extent_bit(tree, start, end, EXTENT_UPTODATE, NULL,
 			      cached_state, mask);
 }
 
@@ -1205,7 +1221,7 @@
  * us if waiting is desired.
  */
 int lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
-		     int bits, struct extent_state **cached_state)
+		     unsigned long bits, struct extent_state **cached_state)
 {
 	int err;
 	u64 failed_start;
@@ -1313,8 +1329,9 @@
  * return it.  tree->lock must be held.  NULL will returned if
  * nothing was found after 'start'
  */
-struct extent_state *find_first_extent_bit_state(struct extent_io_tree *tree,
-						 u64 start, int bits)
+static struct extent_state *
+find_first_extent_bit_state(struct extent_io_tree *tree,
+			    u64 start, unsigned long bits)
 {
 	struct rb_node *node;
 	struct extent_state *state;
@@ -1348,7 +1365,7 @@
  * If nothing was found, 1 is returned. If found something, return 0.
  */
 int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
-			  u64 *start_ret, u64 *end_ret, int bits,
+			  u64 *start_ret, u64 *end_ret, unsigned long bits,
 			  struct extent_state **cached_state)
 {
 	struct extent_state *state;
@@ -1638,7 +1655,7 @@
 	unsigned long end_index = end >> PAGE_CACHE_SHIFT;
 	unsigned long nr_pages = end_index - index + 1;
 	int i;
-	int clear_bits = 0;
+	unsigned long clear_bits = 0;
 
 	if (op & EXTENT_CLEAR_UNLOCK)
 		clear_bits |= EXTENT_LOCKED;
@@ -1777,6 +1794,64 @@
 	return ret;
 }
 
+void extent_cache_csums_dio(struct extent_io_tree *tree, u64 start, u32 csums[],
+			    int count)
+{
+	struct rb_node *node;
+	struct extent_state *state;
+
+	spin_lock(&tree->lock);
+	/*
+	 * this search will find all the extents that end after
+	 * our range starts.
+	 */
+	node = tree_search(tree, start);
+	BUG_ON(!node);
+
+	state = rb_entry(node, struct extent_state, rb_node);
+	BUG_ON(state->start != start);
+
+	while (count) {
+		state->private = *csums++;
+		count--;
+		state = next_state(state);
+	}
+	spin_unlock(&tree->lock);
+}
+
+static inline u64 __btrfs_get_bio_offset(struct bio *bio, int bio_index)
+{
+	struct bio_vec *bvec = bio->bi_io_vec + bio_index;
+
+	return page_offset(bvec->bv_page) + bvec->bv_offset;
+}
+
+void extent_cache_csums(struct extent_io_tree *tree, struct bio *bio, int bio_index,
+			u32 csums[], int count)
+{
+	struct rb_node *node;
+	struct extent_state *state = NULL;
+	u64 start;
+
+	spin_lock(&tree->lock);
+	do {
+		start = __btrfs_get_bio_offset(bio, bio_index);
+		if (state == NULL || state->start != start) {
+			node = tree_search(tree, start);
+			BUG_ON(!node);
+
+			state = rb_entry(node, struct extent_state, rb_node);
+			BUG_ON(state->start != start);
+		}
+		state->private = *csums++;
+		count--;
+		bio_index++;
+
+		state = next_state(state);
+	} while (count);
+	spin_unlock(&tree->lock);
+}
+
 int get_state_private(struct extent_io_tree *tree, u64 start, u64 *private)
 {
 	struct rb_node *node;
@@ -1811,7 +1886,7 @@
  * range is found set.
  */
 int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
-		   int bits, int filled, struct extent_state *cached)
+		   unsigned long bits, int filled, struct extent_state *cached)
 {
 	struct extent_state *state = NULL;
 	struct rb_node *node;
@@ -2560,8 +2635,7 @@
 		if (old_compressed)
 			contig = bio->bi_sector == sector;
 		else
-			contig = bio->bi_sector + (bio->bi_size >> 9) ==
-				sector;
+			contig = bio_end_sector(bio) == sector;
 
 		if (prev_bio_flags != bio_flags || !contig ||
 		    merge_bio(rw, tree, page, offset, page_size, bio, bio_flags) ||
@@ -2596,7 +2670,8 @@
 	return ret;
 }
 
-void attach_extent_buffer_page(struct extent_buffer *eb, struct page *page)
+static void attach_extent_buffer_page(struct extent_buffer *eb,
+				      struct page *page)
 {
 	if (!PagePrivate(page)) {
 		SetPagePrivate(page);
@@ -2626,7 +2701,7 @@
 				   struct page *page,
 				   get_extent_t *get_extent,
 				   struct bio **bio, int mirror_num,
-				   unsigned long *bio_flags)
+				   unsigned long *bio_flags, int rw)
 {
 	struct inode *inode = page->mapping->host;
 	u64 start = page_offset(page);
@@ -2772,7 +2847,7 @@
 		}
 
 		pnr -= page->index;
-		ret = submit_extent_page(READ, tree, page,
+		ret = submit_extent_page(rw, tree, page,
 					 sector, disk_io_size, pg_offset,
 					 bdev, bio, pnr,
 					 end_bio_extent_readpage, mirror_num,
@@ -2805,7 +2880,7 @@
 	int ret;
 
 	ret = __extent_read_full_page(tree, page, get_extent, &bio, mirror_num,
-				      &bio_flags);
+				      &bio_flags, READ);
 	if (bio)
 		ret = submit_one_bio(READ, bio, mirror_num, bio_flags);
 	return ret;
@@ -3104,7 +3179,7 @@
 	return 0;
 }
 
-static void wait_on_extent_buffer_writeback(struct extent_buffer *eb)
+void wait_on_extent_buffer_writeback(struct extent_buffer *eb)
 {
 	wait_on_bit(&eb->bflags, EXTENT_BUFFER_WRITEBACK, eb_wait,
 		    TASK_UNINTERRUPTIBLE);
@@ -3229,7 +3304,7 @@
 	u64 offset = eb->start;
 	unsigned long i, num_pages;
 	unsigned long bio_flags = 0;
-	int rw = (epd->sync_io ? WRITE_SYNC : WRITE);
+	int rw = (epd->sync_io ? WRITE_SYNC : WRITE) | REQ_META;
 	int ret = 0;
 
 	clear_bit(EXTENT_BUFFER_IOERR, &eb->bflags);
@@ -3666,14 +3741,14 @@
 			continue;
 		for (i = 0; i < nr; i++) {
 			__extent_read_full_page(tree, pagepool[i], get_extent,
-					&bio, 0, &bio_flags);
+					&bio, 0, &bio_flags, READ);
 			page_cache_release(pagepool[i]);
 		}
 		nr = 0;
 	}
 	for (i = 0; i < nr; i++) {
 		__extent_read_full_page(tree, pagepool[i], get_extent,
-					&bio, 0, &bio_flags);
+					&bio, 0, &bio_flags, READ);
 		page_cache_release(pagepool[i]);
 	}
 
@@ -3714,9 +3789,9 @@
  * are locked or under IO and drops the related state bits if it is safe
  * to drop the page.
  */
-int try_release_extent_state(struct extent_map_tree *map,
-			     struct extent_io_tree *tree, struct page *page,
-			     gfp_t mask)
+static int try_release_extent_state(struct extent_map_tree *map,
+				    struct extent_io_tree *tree,
+				    struct page *page, gfp_t mask)
 {
 	u64 start = page_offset(page);
 	u64 end = start + PAGE_CACHE_SIZE - 1;
@@ -4007,12 +4082,7 @@
 
 static void __free_extent_buffer(struct extent_buffer *eb)
 {
-#if LEAK_DEBUG
-	unsigned long flags;
-	spin_lock_irqsave(&leak_lock, flags);
-	list_del(&eb->leak_list);
-	spin_unlock_irqrestore(&leak_lock, flags);
-#endif
+	btrfs_leak_debug_del(&eb->leak_list);
 	kmem_cache_free(extent_buffer_cache, eb);
 }
 
@@ -4022,9 +4092,6 @@
 						   gfp_t mask)
 {
 	struct extent_buffer *eb = NULL;
-#if LEAK_DEBUG
-	unsigned long flags;
-#endif
 
 	eb = kmem_cache_zalloc(extent_buffer_cache, mask);
 	if (eb == NULL)
@@ -4044,11 +4111,8 @@
 	init_waitqueue_head(&eb->write_lock_wq);
 	init_waitqueue_head(&eb->read_lock_wq);
 
-#if LEAK_DEBUG
-	spin_lock_irqsave(&leak_lock, flags);
-	list_add(&eb->leak_list, &buffers);
-	spin_unlock_irqrestore(&leak_lock, flags);
-#endif
+	btrfs_leak_debug_add(&eb->leak_list, &buffers);
+
 	spin_lock_init(&eb->refs_lock);
 	atomic_set(&eb->refs, 1);
 	atomic_set(&eb->io_pages, 0);
@@ -4386,7 +4450,7 @@
 }
 
 /* Expects to have eb->eb_lock already held */
-static int release_extent_buffer(struct extent_buffer *eb, gfp_t mask)
+static int release_extent_buffer(struct extent_buffer *eb)
 {
 	WARN_ON(atomic_read(&eb->refs) == 0);
 	if (atomic_dec_and_test(&eb->refs)) {
@@ -4444,7 +4508,7 @@
 	 * I know this is terrible, but it's temporary until we stop tracking
 	 * the uptodate bits and such for the extent buffers.
 	 */
-	release_extent_buffer(eb, GFP_ATOMIC);
+	release_extent_buffer(eb);
 }
 
 void free_extent_buffer_stale(struct extent_buffer *eb)
@@ -4458,7 +4522,7 @@
 	if (atomic_read(&eb->refs) == 2 && !extent_buffer_under_io(eb) &&
 	    test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
 		atomic_dec(&eb->refs);
-	release_extent_buffer(eb, GFP_NOFS);
+	release_extent_buffer(eb);
 }
 
 void clear_extent_buffer_dirty(struct extent_buffer *eb)
@@ -4510,17 +4574,6 @@
 	return was_dirty;
 }
 
-static int range_straddles_pages(u64 start, u64 len)
-{
-	if (len < PAGE_CACHE_SIZE)
-		return 1;
-	if (start & (PAGE_CACHE_SIZE - 1))
-		return 1;
-	if ((start + len) & (PAGE_CACHE_SIZE - 1))
-		return 1;
-	return 0;
-}
-
 int clear_extent_buffer_uptodate(struct extent_buffer *eb)
 {
 	unsigned long i;
@@ -4552,37 +4605,6 @@
 	return 0;
 }
 
-int extent_range_uptodate(struct extent_io_tree *tree,
-			  u64 start, u64 end)
-{
-	struct page *page;
-	int ret;
-	int pg_uptodate = 1;
-	int uptodate;
-	unsigned long index;
-
-	if (range_straddles_pages(start, end - start + 1)) {
-		ret = test_range_bit(tree, start, end,
-				     EXTENT_UPTODATE, 1, NULL);
-		if (ret)
-			return 1;
-	}
-	while (start <= end) {
-		index = start >> PAGE_CACHE_SHIFT;
-		page = find_get_page(tree->mapping, index);
-		if (!page)
-			return 1;
-		uptodate = PageUptodate(page);
-		page_cache_release(page);
-		if (!uptodate) {
-			pg_uptodate = 0;
-			break;
-		}
-		start += PAGE_CACHE_SIZE;
-	}
-	return pg_uptodate;
-}
-
 int extent_buffer_uptodate(struct extent_buffer *eb)
 {
 	return test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
@@ -4645,7 +4667,8 @@
 			ClearPageError(page);
 			err = __extent_read_full_page(tree, page,
 						      get_extent, &bio,
-						      mirror_num, &bio_flags);
+						      mirror_num, &bio_flags,
+						      READ | REQ_META);
 			if (err)
 				ret = err;
 		} else {
@@ -4654,7 +4677,8 @@
 	}
 
 	if (bio) {
-		err = submit_one_bio(READ, bio, mirror_num, bio_flags);
+		err = submit_one_bio(READ | REQ_META, bio, mirror_num,
+				     bio_flags);
 		if (err)
 			return err;
 	}
@@ -5018,7 +5042,7 @@
 	}
 }
 
-int try_release_extent_buffer(struct page *page, gfp_t mask)
+int try_release_extent_buffer(struct page *page)
 {
 	struct extent_buffer *eb;
 
@@ -5048,9 +5072,6 @@
 	}
 	spin_unlock(&page->mapping->private_lock);
 
-	if ((mask & GFP_NOFS) == GFP_NOFS)
-		mask = GFP_NOFS;
-
 	/*
 	 * If tree ref isn't set then we know the ref on this eb is a real ref,
 	 * so just return, this page will likely be freed soon anyway.
@@ -5060,5 +5081,5 @@
 		return 0;
 	}
 
-	return release_extent_buffer(eb, mask);
+	return release_extent_buffer(eb);
 }
diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
index 258c921..a2c03a1 100644
--- a/fs/btrfs/extent_io.h
+++ b/fs/btrfs/extent_io.h
@@ -81,9 +81,9 @@
 	int (*writepage_end_io_hook)(struct page *page, u64 start, u64 end,
 				      struct extent_state *state, int uptodate);
 	void (*set_bit_hook)(struct inode *inode, struct extent_state *state,
-			     int *bits);
+			     unsigned long *bits);
 	void (*clear_bit_hook)(struct inode *inode, struct extent_state *state,
-			       int *bits);
+			       unsigned long *bits);
 	void (*merge_extent_hook)(struct inode *inode,
 				  struct extent_state *new,
 				  struct extent_state *other);
@@ -116,7 +116,9 @@
 	/* for use by the FS */
 	u64 private;
 
+#ifdef CONFIG_BTRFS_DEBUG
 	struct list_head leak_list;
+#endif
 };
 
 #define INLINE_EXTENT_BUFFER_PAGES 16
@@ -132,7 +134,6 @@
 	atomic_t refs;
 	atomic_t io_pages;
 	int read_mirror;
-	struct list_head leak_list;
 	struct rcu_head rcu_head;
 	pid_t lock_owner;
 
@@ -159,6 +160,9 @@
 	wait_queue_head_t read_lock_wq;
 	wait_queue_head_t lock_wq;
 	struct page *pages[INLINE_EXTENT_BUFFER_PAGES];
+#ifdef CONFIG_BTRFS_DEBUG
+	struct list_head leak_list;
+#endif
 };
 
 static inline void extent_set_compress_type(unsigned long *bio_flags,
@@ -185,13 +189,10 @@
 int try_release_extent_mapping(struct extent_map_tree *map,
 			       struct extent_io_tree *tree, struct page *page,
 			       gfp_t mask);
-int try_release_extent_buffer(struct page *page, gfp_t mask);
-int try_release_extent_state(struct extent_map_tree *map,
-			     struct extent_io_tree *tree, struct page *page,
-			     gfp_t mask);
+int try_release_extent_buffer(struct page *page);
 int lock_extent(struct extent_io_tree *tree, u64 start, u64 end);
 int lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
-		     int bits, struct extent_state **cached);
+		     unsigned long bits, struct extent_state **cached);
 int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end);
 int unlock_extent_cached(struct extent_io_tree *tree, u64 start, u64 end,
 			 struct extent_state **cached, gfp_t mask);
@@ -207,16 +208,17 @@
 
 void free_extent_state(struct extent_state *state);
 int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
-		   int bits, int filled, struct extent_state *cached_state);
+		   unsigned long bits, int filled,
+		   struct extent_state *cached_state);
 int clear_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
-		      int bits, gfp_t mask);
+		      unsigned long bits, gfp_t mask);
 int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
-		     int bits, int wake, int delete, struct extent_state **cached,
-		     gfp_t mask);
+		     unsigned long bits, int wake, int delete,
+		     struct extent_state **cached, gfp_t mask);
 int set_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
-		    int bits, gfp_t mask);
+		    unsigned long bits, gfp_t mask);
 int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
-		   int bits, u64 *failed_start,
+		   unsigned long bits, u64 *failed_start,
 		   struct extent_state **cached_state, gfp_t mask);
 int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
 			struct extent_state **cached_state, gfp_t mask);
@@ -229,17 +231,15 @@
 int clear_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
 		       gfp_t mask);
 int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
-		       int bits, int clear_bits,
+		       unsigned long bits, unsigned long clear_bits,
 		       struct extent_state **cached_state, gfp_t mask);
 int set_extent_delalloc(struct extent_io_tree *tree, u64 start, u64 end,
 			struct extent_state **cached_state, gfp_t mask);
 int set_extent_defrag(struct extent_io_tree *tree, u64 start, u64 end,
 		      struct extent_state **cached_state, gfp_t mask);
 int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
-			  u64 *start_ret, u64 *end_ret, int bits,
+			  u64 *start_ret, u64 *end_ret, unsigned long bits,
 			  struct extent_state **cached_state);
-struct extent_state *find_first_extent_bit_state(struct extent_io_tree *tree,
-						 u64 start, int bits);
 int extent_invalidatepage(struct extent_io_tree *tree,
 			  struct page *page, unsigned long offset);
 int extent_write_full_page(struct extent_io_tree *tree, struct page *page,
@@ -261,6 +261,10 @@
 int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
 		__u64 start, __u64 len, get_extent_t *get_extent);
 int set_state_private(struct extent_io_tree *tree, u64 start, u64 private);
+void extent_cache_csums_dio(struct extent_io_tree *tree, u64 start, u32 csums[],
+			    int count);
+void extent_cache_csums(struct extent_io_tree *tree, struct bio *bio,
+			int bvec_index, u32 csums[], int count);
 int get_state_private(struct extent_io_tree *tree, u64 start, u64 *private);
 void set_page_extent_mapped(struct page *page);
 
@@ -278,6 +282,7 @@
 int read_extent_buffer_pages(struct extent_io_tree *tree,
 			     struct extent_buffer *eb, u64 start, int wait,
 			     get_extent_t *get_extent, int mirror_num);
+void wait_on_extent_buffer_writeback(struct extent_buffer *eb);
 
 static inline unsigned long num_extent_pages(u64 start, u64 len)
 {
@@ -313,7 +318,6 @@
 			   unsigned long src_offset, unsigned long len);
 void memset_extent_buffer(struct extent_buffer *eb, char c,
 			  unsigned long start, unsigned long len);
-void wait_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, int bits);
 void clear_extent_buffer_dirty(struct extent_buffer *eb);
 int set_extent_buffer_dirty(struct extent_buffer *eb);
 int set_extent_buffer_uptodate(struct extent_buffer *eb);
@@ -323,8 +327,6 @@
 		      unsigned long min_len, char **map,
 		      unsigned long *map_start,
 		      unsigned long *map_len);
-int extent_range_uptodate(struct extent_io_tree *tree,
-			  u64 start, u64 end);
 int extent_range_clear_dirty_for_io(struct inode *inode, u64 start, u64 end);
 int extent_range_redirty_for_io(struct inode *inode, u64 start, u64 end);
 int extent_clear_unlock_delalloc(struct inode *inode,
diff --git a/fs/btrfs/extent_map.c b/fs/btrfs/extent_map.c
index 2834ca57..a4a7a1a 100644
--- a/fs/btrfs/extent_map.c
+++ b/fs/btrfs/extent_map.c
@@ -174,6 +174,14 @@
 	    test_bit(EXTENT_FLAG_LOGGING, &next->flags))
 		return 0;
 
+	/*
+	 * We don't want to merge stuff that hasn't been written to the log yet
+	 * since it may not reflect exactly what is on disk, and that would be
+	 * bad.
+	 */
+	if (!list_empty(&prev->list) || !list_empty(&next->list))
+		return 0;
+
 	if (extent_map_end(prev) == next->start &&
 	    prev->flags == next->flags &&
 	    prev->bdev == next->bdev &&
@@ -209,9 +217,7 @@
 			em->mod_len = (em->mod_len + em->mod_start) - merge->mod_start;
 			em->mod_start = merge->mod_start;
 			em->generation = max(em->generation, merge->generation);
-			list_move(&em->list, &tree->modified_extents);
 
-			list_del_init(&merge->list);
 			rb_erase(&merge->rb_node, &tree->map);
 			free_extent_map(merge);
 		}
@@ -227,7 +233,6 @@
 		merge->in_tree = 0;
 		em->mod_len = (merge->mod_start + merge->mod_len) - em->mod_start;
 		em->generation = max(em->generation, merge->generation);
-		list_del_init(&merge->list);
 		free_extent_map(merge);
 	}
 }
@@ -302,7 +307,7 @@
  * reference dropped if the merge attempt was successful.
  */
 int add_extent_mapping(struct extent_map_tree *tree,
-		       struct extent_map *em)
+		       struct extent_map *em, int modified)
 {
 	int ret = 0;
 	struct rb_node *rb;
@@ -324,7 +329,10 @@
 	em->mod_start = em->start;
 	em->mod_len = em->len;
 
-	try_merge_map(tree, em);
+	if (modified)
+		list_move(&em->list, &tree->modified_extents);
+	else
+		try_merge_map(tree, em);
 out:
 	return ret;
 }
@@ -337,8 +345,9 @@
 	return start + len;
 }
 
-struct extent_map *__lookup_extent_mapping(struct extent_map_tree *tree,
-					   u64 start, u64 len, int strict)
+static struct extent_map *
+__lookup_extent_mapping(struct extent_map_tree *tree,
+			u64 start, u64 len, int strict)
 {
 	struct extent_map *em;
 	struct rb_node *rb_node;
diff --git a/fs/btrfs/extent_map.h b/fs/btrfs/extent_map.h
index c6598c8..61adc44 100644
--- a/fs/btrfs/extent_map.h
+++ b/fs/btrfs/extent_map.h
@@ -26,6 +26,7 @@
 	u64 mod_len;
 	u64 orig_start;
 	u64 orig_block_len;
+	u64 ram_bytes;
 	u64 block_start;
 	u64 block_len;
 	u64 generation;
@@ -61,7 +62,7 @@
 struct extent_map *lookup_extent_mapping(struct extent_map_tree *tree,
 					 u64 start, u64 len);
 int add_extent_mapping(struct extent_map_tree *tree,
-		       struct extent_map *em);
+		       struct extent_map *em, int modified);
 int remove_extent_mapping(struct extent_map_tree *tree, struct extent_map *em);
 
 struct extent_map *alloc_extent_map(void);
diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c
index c4628a2..b193bf3 100644
--- a/fs/btrfs/file-item.c
+++ b/fs/btrfs/file-item.c
@@ -83,10 +83,11 @@
 	return ret;
 }
 
-struct btrfs_csum_item *btrfs_lookup_csum(struct btrfs_trans_handle *trans,
-					  struct btrfs_root *root,
-					  struct btrfs_path *path,
-					  u64 bytenr, int cow)
+static struct btrfs_csum_item *
+btrfs_lookup_csum(struct btrfs_trans_handle *trans,
+		  struct btrfs_root *root,
+		  struct btrfs_path *path,
+		  u64 bytenr, int cow)
 {
 	int ret;
 	struct btrfs_key file_key;
@@ -152,32 +153,12 @@
 	return ret;
 }
 
-u64 btrfs_file_extent_length(struct btrfs_path *path)
-{
-	int extent_type;
-	struct btrfs_file_extent_item *fi;
-	u64 len;
-
-	fi = btrfs_item_ptr(path->nodes[0], path->slots[0],
-			    struct btrfs_file_extent_item);
-	extent_type = btrfs_file_extent_type(path->nodes[0], fi);
-
-	if (extent_type == BTRFS_FILE_EXTENT_REG ||
-	    extent_type == BTRFS_FILE_EXTENT_PREALLOC)
-		len = btrfs_file_extent_num_bytes(path->nodes[0], fi);
-	else if (extent_type == BTRFS_FILE_EXTENT_INLINE)
-		len = btrfs_file_extent_inline_len(path->nodes[0], fi);
-	else
-		BUG();
-
-	return len;
-}
-
 static int __btrfs_lookup_bio_sums(struct btrfs_root *root,
 				   struct inode *inode, struct bio *bio,
 				   u64 logical_offset, u32 *dst, int dio)
 {
-	u32 sum;
+	u32 sum[16];
+	int len;
 	struct bio_vec *bvec = bio->bi_io_vec;
 	int bio_index = 0;
 	u64 offset = 0;
@@ -186,7 +167,7 @@
 	u64 disk_bytenr;
 	u32 diff;
 	u16 csum_size = btrfs_super_csum_size(root->fs_info->super_copy);
-	int ret;
+	int count;
 	struct btrfs_path *path;
 	struct btrfs_csum_item *item = NULL;
 	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
@@ -214,10 +195,12 @@
 	if (dio)
 		offset = logical_offset;
 	while (bio_index < bio->bi_vcnt) {
+		len = min_t(int, ARRAY_SIZE(sum), bio->bi_vcnt - bio_index);
 		if (!dio)
 			offset = page_offset(bvec->bv_page) + bvec->bv_offset;
-		ret = btrfs_find_ordered_sum(inode, offset, disk_bytenr, &sum);
-		if (ret == 0)
+		count = btrfs_find_ordered_sum(inode, offset, disk_bytenr, sum,
+					       len);
+		if (count)
 			goto found;
 
 		if (!item || disk_bytenr < item_start_offset ||
@@ -230,10 +213,8 @@
 			item = btrfs_lookup_csum(NULL, root->fs_info->csum_root,
 						 path, disk_bytenr, 0);
 			if (IS_ERR(item)) {
-				ret = PTR_ERR(item);
-				if (ret == -ENOENT || ret == -EFBIG)
-					ret = 0;
-				sum = 0;
+				count = 1;
+				sum[0] = 0;
 				if (BTRFS_I(inode)->root->root_key.objectid ==
 				    BTRFS_DATA_RELOC_TREE_OBJECTID) {
 					set_extent_bits(io_tree, offset,
@@ -269,19 +250,29 @@
 		diff = disk_bytenr - item_start_offset;
 		diff = diff / root->sectorsize;
 		diff = diff * csum_size;
-
-		read_extent_buffer(path->nodes[0], &sum,
+		count = min_t(int, len, (item_last_offset - disk_bytenr) >>
+					inode->i_sb->s_blocksize_bits);
+		read_extent_buffer(path->nodes[0], sum,
 				   ((unsigned long)item) + diff,
-				   csum_size);
+				   csum_size * count);
 found:
-		if (dst)
-			*dst++ = sum;
-		else
-			set_state_private(io_tree, offset, sum);
-		disk_bytenr += bvec->bv_len;
-		offset += bvec->bv_len;
-		bio_index++;
-		bvec++;
+		if (dst) {
+			memcpy(dst, sum, count * csum_size);
+			dst += count;
+		} else {
+			if (dio)
+				extent_cache_csums_dio(io_tree, offset, sum,
+						       count);
+			else
+				extent_cache_csums(io_tree, bio, bio_index, sum,
+					    count);
+		}
+		while (count--) {
+			disk_bytenr += bvec->bv_len;
+			offset += bvec->bv_len;
+			bio_index++;
+			bvec++;
+		}
 	}
 	btrfs_free_path(path);
 	return 0;
@@ -358,11 +349,8 @@
 
 		btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
 		if (key.objectid != BTRFS_EXTENT_CSUM_OBJECTID ||
-		    key.type != BTRFS_EXTENT_CSUM_KEY)
-			break;
-
-		btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
-		if (key.offset > end)
+		    key.type != BTRFS_EXTENT_CSUM_KEY ||
+		    key.offset > end)
 			break;
 
 		if (key.offset > start)
@@ -484,8 +472,7 @@
 
 		data = kmap_atomic(bvec->bv_page);
 		sector_sum->sum = ~(u32)0;
-		sector_sum->sum = btrfs_csum_data(root,
-						  data + bvec->bv_offset,
+		sector_sum->sum = btrfs_csum_data(data + bvec->bv_offset,
 						  sector_sum->sum,
 						  bvec->bv_len);
 		kunmap_atomic(data);
@@ -518,8 +505,7 @@
  * This calls btrfs_truncate_item with the correct args based on the
  * overlap, and fixes up the key as required.
  */
-static noinline void truncate_one_csum(struct btrfs_trans_handle *trans,
-				       struct btrfs_root *root,
+static noinline void truncate_one_csum(struct btrfs_root *root,
 				       struct btrfs_path *path,
 				       struct btrfs_key *key,
 				       u64 bytenr, u64 len)
@@ -544,7 +530,7 @@
 		 */
 		u32 new_size = (bytenr - key->offset) >> blocksize_bits;
 		new_size *= csum_size;
-		btrfs_truncate_item(trans, root, path, new_size, 1);
+		btrfs_truncate_item(root, path, new_size, 1);
 	} else if (key->offset >= bytenr && csum_end > end_byte &&
 		   end_byte > key->offset) {
 		/*
@@ -556,10 +542,10 @@
 		u32 new_size = (csum_end - end_byte) >> blocksize_bits;
 		new_size *= csum_size;
 
-		btrfs_truncate_item(trans, root, path, new_size, 0);
+		btrfs_truncate_item(root, path, new_size, 0);
 
 		key->offset = end_byte;
-		btrfs_set_item_key_safe(trans, root, path, key);
+		btrfs_set_item_key_safe(root, path, key);
 	} else {
 		BUG();
 	}
@@ -674,7 +660,7 @@
 
 			key.offset = end_byte - 1;
 		} else {
-			truncate_one_csum(trans, root, path, &key, bytenr, len);
+			truncate_one_csum(root, path, &key, bytenr, len);
 			if (key.offset < bytenr)
 				break;
 		}
@@ -835,7 +821,7 @@
 		diff /= csum_size;
 		diff *= csum_size;
 
-		btrfs_extend_item(trans, root, path, diff);
+		btrfs_extend_item(root, path, diff);
 		goto csum;
 	}
 
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index bb8b7a0..4205ba7 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -24,6 +24,7 @@
 #include <linux/string.h>
 #include <linux/backing-dev.h>
 #include <linux/mpage.h>
+#include <linux/aio.h>
 #include <linux/falloc.h>
 #include <linux/swap.h>
 #include <linux/writeback.h>
@@ -192,8 +193,8 @@
  * the same inode in the tree, we will merge them together (by
  * __btrfs_add_inode_defrag()) and free the one that we want to requeue.
  */
-void btrfs_requeue_inode_defrag(struct inode *inode,
-				struct inode_defrag *defrag)
+static void btrfs_requeue_inode_defrag(struct inode *inode,
+				       struct inode_defrag *defrag)
 {
 	struct btrfs_root *root = BTRFS_I(inode)->root;
 	int ret;
@@ -473,7 +474,7 @@
 /*
  * unlocks pages after btrfs_file_write is done with them
  */
-void btrfs_drop_pages(struct page **pages, size_t num_pages)
+static void btrfs_drop_pages(struct page **pages, size_t num_pages)
 {
 	size_t i;
 	for (i = 0; i < num_pages; i++) {
@@ -497,9 +498,9 @@
  * doing real data extents, marking pages dirty and delalloc as required.
  */
 int btrfs_dirty_pages(struct btrfs_root *root, struct inode *inode,
-		      struct page **pages, size_t num_pages,
-		      loff_t pos, size_t write_bytes,
-		      struct extent_state **cached)
+			     struct page **pages, size_t num_pages,
+			     loff_t pos, size_t write_bytes,
+			     struct extent_state **cached)
 {
 	int err = 0;
 	int i;
@@ -552,6 +553,7 @@
 	int testend = 1;
 	unsigned long flags;
 	int compressed = 0;
+	bool modified;
 
 	WARN_ON(end < start);
 	if (end == (u64)-1) {
@@ -561,6 +563,7 @@
 	while (1) {
 		int no_splits = 0;
 
+		modified = false;
 		if (!split)
 			split = alloc_extent_map();
 		if (!split2)
@@ -592,6 +595,7 @@
 		compressed = test_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
 		clear_bit(EXTENT_FLAG_PINNED, &em->flags);
 		clear_bit(EXTENT_FLAG_LOGGING, &flags);
+		modified = !list_empty(&em->list);
 		remove_extent_mapping(em_tree, em);
 		if (no_splits)
 			goto next;
@@ -607,15 +611,15 @@
 				split->block_len = em->block_len;
 			else
 				split->block_len = split->len;
+			split->ram_bytes = em->ram_bytes;
 			split->orig_block_len = max(split->block_len,
 						    em->orig_block_len);
 			split->generation = gen;
 			split->bdev = em->bdev;
 			split->flags = flags;
 			split->compress_type = em->compress_type;
-			ret = add_extent_mapping(em_tree, split);
+			ret = add_extent_mapping(em_tree, split, modified);
 			BUG_ON(ret); /* Logic error */
-			list_move(&split->list, &em_tree->modified_extents);
 			free_extent_map(split);
 			split = split2;
 			split2 = NULL;
@@ -632,6 +636,7 @@
 			split->generation = gen;
 			split->orig_block_len = max(em->block_len,
 						    em->orig_block_len);
+			split->ram_bytes = em->ram_bytes;
 
 			if (compressed) {
 				split->block_len = em->block_len;
@@ -643,9 +648,8 @@
 				split->orig_start = em->orig_start;
 			}
 
-			ret = add_extent_mapping(em_tree, split);
+			ret = add_extent_mapping(em_tree, split, modified);
 			BUG_ON(ret); /* Logic error */
-			list_move(&split->list, &em_tree->modified_extents);
 			free_extent_map(split);
 			split = NULL;
 		}
@@ -821,7 +825,7 @@
 
 			memcpy(&new_key, &key, sizeof(new_key));
 			new_key.offset = end;
-			btrfs_set_item_key_safe(trans, root, path, &new_key);
+			btrfs_set_item_key_safe(root, path, &new_key);
 
 			extent_offset += end - key.offset;
 			btrfs_set_file_extent_offset(leaf, fi, extent_offset);
@@ -1037,7 +1041,7 @@
 				     ino, bytenr, orig_offset,
 				     &other_start, &other_end)) {
 			new_key.offset = end;
-			btrfs_set_item_key_safe(trans, root, path, &new_key);
+			btrfs_set_item_key_safe(root, path, &new_key);
 			fi = btrfs_item_ptr(leaf, path->slots[0],
 					    struct btrfs_file_extent_item);
 			btrfs_set_file_extent_generation(leaf, fi,
@@ -1071,7 +1075,7 @@
 							 trans->transid);
 			path->slots[0]++;
 			new_key.offset = start;
-			btrfs_set_item_key_safe(trans, root, path, &new_key);
+			btrfs_set_item_key_safe(root, path, &new_key);
 
 			fi = btrfs_item_ptr(leaf, path->slots[0],
 					    struct btrfs_file_extent_item);
@@ -1882,7 +1886,7 @@
 
 		path->slots[0]++;
 		key.offset = offset;
-		btrfs_set_item_key_safe(trans, root, path, &key);
+		btrfs_set_item_key_safe(root, path, &key);
 		fi = btrfs_item_ptr(leaf, path->slots[0],
 				    struct btrfs_file_extent_item);
 		num_bytes = btrfs_file_extent_num_bytes(leaf, fi) + end -
@@ -1912,6 +1916,7 @@
 	} else {
 		hole_em->start = offset;
 		hole_em->len = end - offset;
+		hole_em->ram_bytes = hole_em->len;
 		hole_em->orig_start = offset;
 
 		hole_em->block_start = EXTENT_MAP_HOLE;
@@ -1924,10 +1929,7 @@
 		do {
 			btrfs_drop_extent_cache(inode, offset, end - 1, 0);
 			write_lock(&em_tree->lock);
-			ret = add_extent_mapping(em_tree, hole_em);
-			if (!ret)
-				list_move(&hole_em->list,
-					  &em_tree->modified_extents);
+			ret = add_extent_mapping(em_tree, hole_em, 1);
 			write_unlock(&em_tree->lock);
 		} while (ret == -EEXIST);
 		free_extent_map(hole_em);
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
index 1f84fc0..ecca6c7 100644
--- a/fs/btrfs/free-space-cache.c
+++ b/fs/btrfs/free-space-cache.c
@@ -104,7 +104,8 @@
 
 	spin_lock(&block_group->lock);
 	if (!((BTRFS_I(inode)->flags & flags) == flags)) {
-		printk(KERN_INFO "Old style space inode found, converting.\n");
+		btrfs_info(root->fs_info,
+			"Old style space inode found, converting.");
 		BTRFS_I(inode)->flags |= BTRFS_INODE_NODATASUM |
 			BTRFS_INODE_NODATACOW;
 		block_group->disk_cache_state = BTRFS_DC_CLEAR;
@@ -119,9 +120,10 @@
 	return inode;
 }
 
-int __create_free_space_inode(struct btrfs_root *root,
-			      struct btrfs_trans_handle *trans,
-			      struct btrfs_path *path, u64 ino, u64 offset)
+static int __create_free_space_inode(struct btrfs_root *root,
+				     struct btrfs_trans_handle *trans,
+				     struct btrfs_path *path,
+				     u64 ino, u64 offset)
 {
 	struct btrfs_key key;
 	struct btrfs_disk_key disk_key;
@@ -431,7 +433,7 @@
 	if (index == 0)
 		offset = sizeof(u32) * io_ctl->num_pages;
 
-	crc = btrfs_csum_data(io_ctl->root, io_ctl->orig + offset, crc,
+	crc = btrfs_csum_data(io_ctl->orig + offset, crc,
 			      PAGE_CACHE_SIZE - offset);
 	btrfs_csum_final(crc, (char *)&crc);
 	io_ctl_unmap_page(io_ctl);
@@ -461,7 +463,7 @@
 	kunmap(io_ctl->pages[0]);
 
 	io_ctl_map_page(io_ctl, 0);
-	crc = btrfs_csum_data(io_ctl->root, io_ctl->orig + offset, crc,
+	crc = btrfs_csum_data(io_ctl->orig + offset, crc,
 			      PAGE_CACHE_SIZE - offset);
 	btrfs_csum_final(crc, (char *)&crc);
 	if (val != crc) {
@@ -624,9 +626,9 @@
 	spin_unlock(&ctl->tree_lock);
 }
 
-int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
-			    struct btrfs_free_space_ctl *ctl,
-			    struct btrfs_path *path, u64 offset)
+static int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
+				   struct btrfs_free_space_ctl *ctl,
+				   struct btrfs_path *path, u64 offset)
 {
 	struct btrfs_free_space_header *header;
 	struct extent_buffer *leaf;
@@ -669,10 +671,11 @@
 	btrfs_release_path(path);
 
 	if (BTRFS_I(inode)->generation != generation) {
-		printk(KERN_ERR "btrfs: free space inode generation (%llu) did"
-		       " not match free space cache generation (%llu)\n",
-		       (unsigned long long)BTRFS_I(inode)->generation,
-		       (unsigned long long)generation);
+		btrfs_err(root->fs_info,
+			"free space inode generation (%llu) "
+			"did not match free space cache generation (%llu)",
+			(unsigned long long)BTRFS_I(inode)->generation,
+			(unsigned long long)generation);
 		return 0;
 	}
 
@@ -721,8 +724,8 @@
 			ret = link_free_space(ctl, e);
 			spin_unlock(&ctl->tree_lock);
 			if (ret) {
-				printk(KERN_ERR "Duplicate entries in "
-				       "free space cache, dumping\n");
+				btrfs_err(root->fs_info,
+					"Duplicate entries in free space cache, dumping");
 				kmem_cache_free(btrfs_free_space_cachep, e);
 				goto free_cache;
 			}
@@ -741,8 +744,8 @@
 			ctl->op->recalc_thresholds(ctl);
 			spin_unlock(&ctl->tree_lock);
 			if (ret) {
-				printk(KERN_ERR "Duplicate entries in "
-				       "free space cache, dumping\n");
+				btrfs_err(root->fs_info,
+					"Duplicate entries in free space cache, dumping");
 				kmem_cache_free(btrfs_free_space_cachep, e);
 				goto free_cache;
 			}
@@ -833,8 +836,8 @@
 
 	if (!matched) {
 		__btrfs_remove_free_space_cache(ctl);
-		printk(KERN_ERR "block group %llu has an wrong amount of free "
-		       "space\n", block_group->key.objectid);
+		btrfs_err(fs_info, "block group %llu has wrong amount of free space",
+			block_group->key.objectid);
 		ret = -1;
 	}
 out:
@@ -845,8 +848,8 @@
 		spin_unlock(&block_group->lock);
 		ret = 0;
 
-		printk(KERN_ERR "btrfs: failed to load free space cache "
-		       "for block group %llu\n", block_group->key.objectid);
+		btrfs_err(fs_info, "failed to load free space cache for block group %llu",
+			block_group->key.objectid);
 	}
 
 	iput(inode);
@@ -866,11 +869,11 @@
  * on mount.  This will return 0 if it was successfull in writing the cache out,
  * and -1 if it was not.
  */
-int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
-			    struct btrfs_free_space_ctl *ctl,
-			    struct btrfs_block_group_cache *block_group,
-			    struct btrfs_trans_handle *trans,
-			    struct btrfs_path *path, u64 offset)
+static int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
+				   struct btrfs_free_space_ctl *ctl,
+				   struct btrfs_block_group_cache *block_group,
+				   struct btrfs_trans_handle *trans,
+				   struct btrfs_path *path, u64 offset)
 {
 	struct btrfs_free_space_header *header;
 	struct extent_buffer *leaf;
@@ -1104,8 +1107,9 @@
 		spin_unlock(&block_group->lock);
 		ret = 0;
 #ifdef DEBUG
-		printk(KERN_ERR "btrfs: failed to write free space cache "
-		       "for block group %llu\n", block_group->key.objectid);
+		btrfs_err(root->fs_info,
+			"failed to write free space cache for block group %llu",
+			block_group->key.objectid);
 #endif
 	}
 
@@ -1564,7 +1568,8 @@
 	search_bytes = ctl->unit;
 	search_bytes = min(search_bytes, end - search_start + 1);
 	ret = search_bitmap(ctl, bitmap_info, &search_start, &search_bytes);
-	BUG_ON(ret < 0 || search_start != *offset);
+	if (ret < 0 || search_start != *offset)
+		return -EINVAL;
 
 	/* We may have found more bits than what we need */
 	search_bytes = min(search_bytes, *bytes);
@@ -1970,7 +1975,6 @@
 		re_search = true;
 		goto again;
 	}
-	BUG_ON(ret); /* logic error */
 out_lock:
 	spin_unlock(&ctl->tree_lock);
 out:
@@ -2064,7 +2068,8 @@
 	return 0;
 }
 
-void __btrfs_remove_free_space_cache_locked(struct btrfs_free_space_ctl *ctl)
+static void __btrfs_remove_free_space_cache_locked(
+				struct btrfs_free_space_ctl *ctl)
 {
 	struct btrfs_free_space *info;
 	struct rb_node *node;
@@ -2931,8 +2936,9 @@
 	ret = __load_free_space_cache(root, inode, ctl, path, 0);
 
 	if (ret < 0)
-		printk(KERN_ERR "btrfs: failed to load free ino cache for "
-		       "root %llu\n", root->root_key.objectid);
+		btrfs_err(fs_info,
+			"failed to load free ino cache for root %llu",
+			root->root_key.objectid);
 out_put:
 	iput(inode);
 out:
@@ -2959,11 +2965,531 @@
 	if (ret) {
 		btrfs_delalloc_release_metadata(inode, inode->i_size);
 #ifdef DEBUG
-		printk(KERN_ERR "btrfs: failed to write free ino cache "
-		       "for root %llu\n", root->root_key.objectid);
+		btrfs_err(root->fs_info,
+			"failed to write free ino cache for root %llu",
+			root->root_key.objectid);
 #endif
 	}
 
 	iput(inode);
 	return ret;
 }
+
+#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
+static struct btrfs_block_group_cache *init_test_block_group(void)
+{
+	struct btrfs_block_group_cache *cache;
+
+	cache = kzalloc(sizeof(*cache), GFP_NOFS);
+	if (!cache)
+		return NULL;
+	cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl),
+					GFP_NOFS);
+	if (!cache->free_space_ctl) {
+		kfree(cache);
+		return NULL;
+	}
+
+	cache->key.objectid = 0;
+	cache->key.offset = 1024 * 1024 * 1024;
+	cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
+	cache->sectorsize = 4096;
+
+	spin_lock_init(&cache->lock);
+	INIT_LIST_HEAD(&cache->list);
+	INIT_LIST_HEAD(&cache->cluster_list);
+	INIT_LIST_HEAD(&cache->new_bg_list);
+
+	btrfs_init_free_space_ctl(cache);
+
+	return cache;
+}
+
+/*
+ * Checks to see if the given range is in the free space cache.  This is really
+ * just used to check the absence of space, so if there is free space in the
+ * range at all we will return 1.
+ */
+static int check_exists(struct btrfs_block_group_cache *cache, u64 offset,
+			u64 bytes)
+{
+	struct btrfs_free_space_ctl *ctl = cache->free_space_ctl;
+	struct btrfs_free_space *info;
+	int ret = 0;
+
+	spin_lock(&ctl->tree_lock);
+	info = tree_search_offset(ctl, offset, 0, 0);
+	if (!info) {
+		info = tree_search_offset(ctl, offset_to_bitmap(ctl, offset),
+					  1, 0);
+		if (!info)
+			goto out;
+	}
+
+have_info:
+	if (info->bitmap) {
+		u64 bit_off, bit_bytes;
+		struct rb_node *n;
+		struct btrfs_free_space *tmp;
+
+		bit_off = offset;
+		bit_bytes = ctl->unit;
+		ret = search_bitmap(ctl, info, &bit_off, &bit_bytes);
+		if (!ret) {
+			if (bit_off == offset) {
+				ret = 1;
+				goto out;
+			} else if (bit_off > offset &&
+				   offset + bytes > bit_off) {
+				ret = 1;
+				goto out;
+			}
+		}
+
+		n = rb_prev(&info->offset_index);
+		while (n) {
+			tmp = rb_entry(n, struct btrfs_free_space,
+				       offset_index);
+			if (tmp->offset + tmp->bytes < offset)
+				break;
+			if (offset + bytes < tmp->offset) {
+				n = rb_prev(&info->offset_index);
+				continue;
+			}
+			info = tmp;
+			goto have_info;
+		}
+
+		n = rb_next(&info->offset_index);
+		while (n) {
+			tmp = rb_entry(n, struct btrfs_free_space,
+				       offset_index);
+			if (offset + bytes < tmp->offset)
+				break;
+			if (tmp->offset + tmp->bytes < offset) {
+				n = rb_next(&info->offset_index);
+				continue;
+			}
+			info = tmp;
+			goto have_info;
+		}
+
+		goto out;
+	}
+
+	if (info->offset == offset) {
+		ret = 1;
+		goto out;
+	}
+
+	if (offset > info->offset && offset < info->offset + info->bytes)
+		ret = 1;
+out:
+	spin_unlock(&ctl->tree_lock);
+	return ret;
+}
+
+/*
+ * Use this if you need to make a bitmap or extent entry specifically, it
+ * doesn't do any of the merging that add_free_space does, this acts a lot like
+ * how the free space cache loading stuff works, so you can get really weird
+ * configurations.
+ */
+static int add_free_space_entry(struct btrfs_block_group_cache *cache,
+				u64 offset, u64 bytes, bool bitmap)
+{
+	struct btrfs_free_space_ctl *ctl = cache->free_space_ctl;
+	struct btrfs_free_space *info = NULL, *bitmap_info;
+	void *map = NULL;
+	u64 bytes_added;
+	int ret;
+
+again:
+	if (!info) {
+		info = kmem_cache_zalloc(btrfs_free_space_cachep, GFP_NOFS);
+		if (!info)
+			return -ENOMEM;
+	}
+
+	if (!bitmap) {
+		spin_lock(&ctl->tree_lock);
+		info->offset = offset;
+		info->bytes = bytes;
+		ret = link_free_space(ctl, info);
+		spin_unlock(&ctl->tree_lock);
+		if (ret)
+			kmem_cache_free(btrfs_free_space_cachep, info);
+		return ret;
+	}
+
+	if (!map) {
+		map = kzalloc(PAGE_CACHE_SIZE, GFP_NOFS);
+		if (!map) {
+			kmem_cache_free(btrfs_free_space_cachep, info);
+			return -ENOMEM;
+		}
+	}
+
+	spin_lock(&ctl->tree_lock);
+	bitmap_info = tree_search_offset(ctl, offset_to_bitmap(ctl, offset),
+					 1, 0);
+	if (!bitmap_info) {
+		info->bitmap = map;
+		map = NULL;
+		add_new_bitmap(ctl, info, offset);
+		bitmap_info = info;
+	}
+
+	bytes_added = add_bytes_to_bitmap(ctl, bitmap_info, offset, bytes);
+	bytes -= bytes_added;
+	offset += bytes_added;
+	spin_unlock(&ctl->tree_lock);
+
+	if (bytes)
+		goto again;
+
+	if (map)
+		kfree(map);
+	return 0;
+}
+
+/*
+ * This test just does basic sanity checking, making sure we can add an exten
+ * entry and remove space from either end and the middle, and make sure we can
+ * remove space that covers adjacent extent entries.
+ */
+static int test_extents(struct btrfs_block_group_cache *cache)
+{
+	int ret = 0;
+
+	printk(KERN_ERR "Running extent only tests\n");
+
+	/* First just make sure we can remove an entire entry */
+	ret = btrfs_add_free_space(cache, 0, 4 * 1024 * 1024);
+	if (ret) {
+		printk(KERN_ERR "Error adding initial extents %d\n", ret);
+		return ret;
+	}
+
+	ret = btrfs_remove_free_space(cache, 0, 4 * 1024 * 1024);
+	if (ret) {
+		printk(KERN_ERR "Error removing extent %d\n", ret);
+		return ret;
+	}
+
+	if (check_exists(cache, 0, 4 * 1024 * 1024)) {
+		printk(KERN_ERR "Full remove left some lingering space\n");
+		return -1;
+	}
+
+	/* Ok edge and middle cases now */
+	ret = btrfs_add_free_space(cache, 0, 4 * 1024 * 1024);
+	if (ret) {
+		printk(KERN_ERR "Error adding half extent %d\n", ret);
+		return ret;
+	}
+
+	ret = btrfs_remove_free_space(cache, 3 * 1024 * 1024, 1 * 1024 * 1024);
+	if (ret) {
+		printk(KERN_ERR "Error removing tail end %d\n", ret);
+		return ret;
+	}
+
+	ret = btrfs_remove_free_space(cache, 0, 1 * 1024 * 1024);
+	if (ret) {
+		printk(KERN_ERR "Error removing front end %d\n", ret);
+		return ret;
+	}
+
+	ret = btrfs_remove_free_space(cache, 2 * 1024 * 1024, 4096);
+	if (ret) {
+		printk(KERN_ERR "Error removing middle peice %d\n", ret);
+		return ret;
+	}
+
+	if (check_exists(cache, 0, 1 * 1024 * 1024)) {
+		printk(KERN_ERR "Still have space at the front\n");
+		return -1;
+	}
+
+	if (check_exists(cache, 2 * 1024 * 1024, 4096)) {
+		printk(KERN_ERR "Still have space in the middle\n");
+		return -1;
+	}
+
+	if (check_exists(cache, 3 * 1024 * 1024, 1 * 1024 * 1024)) {
+		printk(KERN_ERR "Still have space at the end\n");
+		return -1;
+	}
+
+	/* Cleanup */
+	__btrfs_remove_free_space_cache(cache->free_space_ctl);
+
+	return 0;
+}
+
+static int test_bitmaps(struct btrfs_block_group_cache *cache)
+{
+	u64 next_bitmap_offset;
+	int ret;
+
+	printk(KERN_ERR "Running bitmap only tests\n");
+
+	ret = add_free_space_entry(cache, 0, 4 * 1024 * 1024, 1);
+	if (ret) {
+		printk(KERN_ERR "Couldn't create a bitmap entry %d\n", ret);
+		return ret;
+	}
+
+	ret = btrfs_remove_free_space(cache, 0, 4 * 1024 * 1024);
+	if (ret) {
+		printk(KERN_ERR "Error removing bitmap full range %d\n", ret);
+		return ret;
+	}
+
+	if (check_exists(cache, 0, 4 * 1024 * 1024)) {
+		printk(KERN_ERR "Left some space in bitmap\n");
+		return -1;
+	}
+
+	ret = add_free_space_entry(cache, 0, 4 * 1024 * 1024, 1);
+	if (ret) {
+		printk(KERN_ERR "Couldn't add to our bitmap entry %d\n", ret);
+		return ret;
+	}
+
+	ret = btrfs_remove_free_space(cache, 1 * 1024 * 1024, 2 * 1024 * 1024);
+	if (ret) {
+		printk(KERN_ERR "Couldn't remove middle chunk %d\n", ret);
+		return ret;
+	}
+
+	/*
+	 * The first bitmap we have starts at offset 0 so the next one is just
+	 * at the end of the first bitmap.
+	 */
+	next_bitmap_offset = (u64)(BITS_PER_BITMAP * 4096);
+
+	/* Test a bit straddling two bitmaps */
+	ret = add_free_space_entry(cache, next_bitmap_offset -
+				   (2 * 1024 * 1024), 4 * 1024 * 1024, 1);
+	if (ret) {
+		printk(KERN_ERR "Couldn't add space that straddles two bitmaps"
+		       " %d\n", ret);
+		return ret;
+	}
+
+	ret = btrfs_remove_free_space(cache, next_bitmap_offset -
+				      (1 * 1024 * 1024), 2 * 1024 * 1024);
+	if (ret) {
+		printk(KERN_ERR "Couldn't remove overlapping space %d\n", ret);
+		return ret;
+	}
+
+	if (check_exists(cache, next_bitmap_offset - (1 * 1024 * 1024),
+			 2 * 1024 * 1024)) {
+		printk(KERN_ERR "Left some space when removing overlapping\n");
+		return -1;
+	}
+
+	__btrfs_remove_free_space_cache(cache->free_space_ctl);
+
+	return 0;
+}
+
+/* This is the high grade jackassery */
+static int test_bitmaps_and_extents(struct btrfs_block_group_cache *cache)
+{
+	u64 bitmap_offset = (u64)(BITS_PER_BITMAP * 4096);
+	int ret;
+
+	printk(KERN_ERR "Running bitmap and extent tests\n");
+
+	/*
+	 * First let's do something simple, an extent at the same offset as the
+	 * bitmap, but the free space completely in the extent and then
+	 * completely in the bitmap.
+	 */
+	ret = add_free_space_entry(cache, 4 * 1024 * 1024, 1 * 1024 * 1024, 1);
+	if (ret) {
+		printk(KERN_ERR "Couldn't create bitmap entry %d\n", ret);
+		return ret;
+	}
+
+	ret = add_free_space_entry(cache, 0, 1 * 1024 * 1024, 0);
+	if (ret) {
+		printk(KERN_ERR "Couldn't add extent entry %d\n", ret);
+		return ret;
+	}
+
+	ret = btrfs_remove_free_space(cache, 0, 1 * 1024 * 1024);
+	if (ret) {
+		printk(KERN_ERR "Couldn't remove extent entry %d\n", ret);
+		return ret;
+	}
+
+	if (check_exists(cache, 0, 1 * 1024 * 1024)) {
+		printk(KERN_ERR "Left remnants after our remove\n");
+		return -1;
+	}
+
+	/* Now to add back the extent entry and remove from the bitmap */
+	ret = add_free_space_entry(cache, 0, 1 * 1024 * 1024, 0);
+	if (ret) {
+		printk(KERN_ERR "Couldn't re-add extent entry %d\n", ret);
+		return ret;
+	}
+
+	ret = btrfs_remove_free_space(cache, 4 * 1024 * 1024, 1 * 1024 * 1024);
+	if (ret) {
+		printk(KERN_ERR "Couldn't remove from bitmap %d\n", ret);
+		return ret;
+	}
+
+	if (check_exists(cache, 4 * 1024 * 1024, 1 * 1024 * 1024)) {
+		printk(KERN_ERR "Left remnants in the bitmap\n");
+		return -1;
+	}
+
+	/*
+	 * Ok so a little more evil, extent entry and bitmap at the same offset,
+	 * removing an overlapping chunk.
+	 */
+	ret = add_free_space_entry(cache, 1 * 1024 * 1024, 4 * 1024 * 1024, 1);
+	if (ret) {
+		printk(KERN_ERR "Couldn't add to a bitmap %d\n", ret);
+		return ret;
+	}
+
+	ret = btrfs_remove_free_space(cache, 512 * 1024, 3 * 1024 * 1024);
+	if (ret) {
+		printk(KERN_ERR "Couldn't remove overlapping space %d\n", ret);
+		return ret;
+	}
+
+	if (check_exists(cache, 512 * 1024, 3 * 1024 * 1024)) {
+		printk(KERN_ERR "Left over peices after removing "
+		       "overlapping\n");
+		return -1;
+	}
+
+	__btrfs_remove_free_space_cache(cache->free_space_ctl);
+
+	/* Now with the extent entry offset into the bitmap */
+	ret = add_free_space_entry(cache, 4 * 1024 * 1024, 4 * 1024 * 1024, 1);
+	if (ret) {
+		printk(KERN_ERR "Couldn't add space to the bitmap %d\n", ret);
+		return ret;
+	}
+
+	ret = add_free_space_entry(cache, 2 * 1024 * 1024, 2 * 1024 * 1024, 0);
+	if (ret) {
+		printk(KERN_ERR "Couldn't add extent to the cache %d\n", ret);
+		return ret;
+	}
+
+	ret = btrfs_remove_free_space(cache, 3 * 1024 * 1024, 4 * 1024 * 1024);
+	if (ret) {
+		printk(KERN_ERR "Problem removing overlapping space %d\n", ret);
+		return ret;
+	}
+
+	if (check_exists(cache, 3 * 1024 * 1024, 4 * 1024 * 1024)) {
+		printk(KERN_ERR "Left something behind when removing space");
+		return -1;
+	}
+
+	/*
+	 * This has blown up in the past, the extent entry starts before the
+	 * bitmap entry, but we're trying to remove an offset that falls
+	 * completely within the bitmap range and is in both the extent entry
+	 * and the bitmap entry, looks like this
+	 *
+	 *   [ extent ]
+	 *      [ bitmap ]
+	 *        [ del ]
+	 */
+	__btrfs_remove_free_space_cache(cache->free_space_ctl);
+	ret = add_free_space_entry(cache, bitmap_offset + 4 * 1024 * 1024,
+				   4 * 1024 * 1024, 1);
+	if (ret) {
+		printk(KERN_ERR "Couldn't add bitmap %d\n", ret);
+		return ret;
+	}
+
+	ret = add_free_space_entry(cache, bitmap_offset - 1 * 1024 * 1024,
+				   5 * 1024 * 1024, 0);
+	if (ret) {
+		printk(KERN_ERR "Couldn't add extent entry %d\n", ret);
+		return ret;
+	}
+
+	ret = btrfs_remove_free_space(cache, bitmap_offset + 1 * 1024 * 1024,
+				      5 * 1024 * 1024);
+	if (ret) {
+		printk(KERN_ERR "Failed to free our space %d\n", ret);
+		return ret;
+	}
+
+	if (check_exists(cache, bitmap_offset + 1 * 1024 * 1024,
+			 5 * 1024 * 1024)) {
+		printk(KERN_ERR "Left stuff over\n");
+		return -1;
+	}
+
+	__btrfs_remove_free_space_cache(cache->free_space_ctl);
+
+	/*
+	 * This blew up before, we have part of the free space in a bitmap and
+	 * then the entirety of the rest of the space in an extent.  This used
+	 * to return -EAGAIN back from btrfs_remove_extent, make sure this
+	 * doesn't happen.
+	 */
+	ret = add_free_space_entry(cache, 1 * 1024 * 1024, 2 * 1024 * 1024, 1);
+	if (ret) {
+		printk(KERN_ERR "Couldn't add bitmap entry %d\n", ret);
+		return ret;
+	}
+
+	ret = add_free_space_entry(cache, 3 * 1024 * 1024, 1 * 1024 * 1024, 0);
+	if (ret) {
+		printk(KERN_ERR "Couldn't add extent entry %d\n", ret);
+		return ret;
+	}
+
+	ret = btrfs_remove_free_space(cache, 1 * 1024 * 1024, 3 * 1024 * 1024);
+	if (ret) {
+		printk(KERN_ERR "Error removing bitmap and extent "
+		       "overlapping %d\n", ret);
+		return ret;
+	}
+
+	__btrfs_remove_free_space_cache(cache->free_space_ctl);
+	return 0;
+}
+
+void btrfs_test_free_space_cache(void)
+{
+	struct btrfs_block_group_cache *cache;
+
+	printk(KERN_ERR "Running btrfs free space cache tests\n");
+
+	cache = init_test_block_group();
+	if (!cache) {
+		printk(KERN_ERR "Couldn't run the tests\n");
+		return;
+	}
+
+	if (test_extents(cache))
+		goto out;
+	if (test_bitmaps(cache))
+		goto out;
+	if (test_bitmaps_and_extents(cache))
+		goto out;
+out:
+	__btrfs_remove_free_space_cache(cache->free_space_ctl);
+	kfree(cache->free_space_ctl);
+	kfree(cache);
+	printk(KERN_ERR "Free space cache tests finished\n");
+}
+#endif /* CONFIG_BTRFS_FS_RUN_SANITY_TESTS */
diff --git a/fs/btrfs/free-space-cache.h b/fs/btrfs/free-space-cache.h
index 8f2613f..4dc17d8 100644
--- a/fs/btrfs/free-space-cache.h
+++ b/fs/btrfs/free-space-cache.h
@@ -110,4 +110,9 @@
 			       struct btrfs_free_cluster *cluster);
 int btrfs_trim_block_group(struct btrfs_block_group_cache *block_group,
 			   u64 *trimmed, u64 start, u64 end, u64 minlen);
+
+#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
+void btrfs_test_free_space_cache(void);
+#endif
+
 #endif
diff --git a/fs/btrfs/inode-item.c b/fs/btrfs/inode-item.c
index 48b8fda..e0b7034 100644
--- a/fs/btrfs/inode-item.c
+++ b/fs/btrfs/inode-item.c
@@ -183,10 +183,11 @@
 	return -ENOENT;
 }
 
-int btrfs_del_inode_extref(struct btrfs_trans_handle *trans,
-			   struct btrfs_root *root,
-			   const char *name, int name_len,
-			   u64 inode_objectid, u64 ref_objectid, u64 *index)
+static int btrfs_del_inode_extref(struct btrfs_trans_handle *trans,
+				  struct btrfs_root *root,
+				  const char *name, int name_len,
+				  u64 inode_objectid, u64 ref_objectid,
+				  u64 *index)
 {
 	struct btrfs_path *path;
 	struct btrfs_key key;
@@ -246,7 +247,7 @@
 	memmove_extent_buffer(leaf, ptr, ptr + del_len,
 			      item_size - (ptr + del_len - item_start));
 
-	btrfs_truncate_item(trans, root, path, item_size - del_len, 1);
+	btrfs_truncate_item(root, path, item_size - del_len, 1);
 
 out:
 	btrfs_free_path(path);
@@ -309,7 +310,7 @@
 	item_start = btrfs_item_ptr_offset(leaf, path->slots[0]);
 	memmove_extent_buffer(leaf, ptr, ptr + sub_item_len,
 			      item_size - (ptr + sub_item_len - item_start));
-	btrfs_truncate_item(trans, root, path, item_size - sub_item_len, 1);
+	btrfs_truncate_item(root, path, item_size - sub_item_len, 1);
 out:
 	btrfs_free_path(path);
 
@@ -361,7 +362,7 @@
 						   name, name_len, NULL))
 			goto out;
 
-		btrfs_extend_item(trans, root, path, ins_len);
+		btrfs_extend_item(root, path, ins_len);
 		ret = 0;
 	}
 	if (ret < 0)
@@ -417,7 +418,7 @@
 			goto out;
 
 		old_size = btrfs_item_size_nr(path->nodes[0], path->slots[0]);
-		btrfs_extend_item(trans, root, path, ins_len);
+		btrfs_extend_item(root, path, ins_len);
 		ref = btrfs_item_ptr(path->nodes[0], path->slots[0],
 				     struct btrfs_inode_ref);
 		ref = (struct btrfs_inode_ref *)((unsigned long)ref + old_size);
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 09c58a3..9b31b3b 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -32,6 +32,7 @@
 #include <linux/writeback.h>
 #include <linux/statfs.h>
 #include <linux/compat.h>
+#include <linux/aio.h>
 #include <linux/bit_spinlock.h>
 #include <linux/xattr.h>
 #include <linux/posix_acl.h>
@@ -100,7 +101,10 @@
 static struct extent_map *create_pinned_em(struct inode *inode, u64 start,
 					   u64 len, u64 orig_start,
 					   u64 block_start, u64 block_len,
-					   u64 orig_block_len, int type);
+					   u64 orig_block_len, u64 ram_bytes,
+					   int type);
+
+static int btrfs_dirty_inode(struct inode *inode);
 
 static int btrfs_init_inode_security(struct btrfs_trans_handle *trans,
 				     struct inode *inode,  struct inode *dir,
@@ -722,6 +726,7 @@
 		em->block_start = ins.objectid;
 		em->block_len = ins.offset;
 		em->orig_block_len = ins.offset;
+		em->ram_bytes = async_extent->ram_size;
 		em->bdev = root->fs_info->fs_devices->latest_bdev;
 		em->compress_type = async_extent->compress_type;
 		set_bit(EXTENT_FLAG_PINNED, &em->flags);
@@ -730,10 +735,7 @@
 
 		while (1) {
 			write_lock(&em_tree->lock);
-			ret = add_extent_mapping(em_tree, em);
-			if (!ret)
-				list_move(&em->list,
-					  &em_tree->modified_extents);
+			ret = add_extent_mapping(em_tree, em, 1);
 			write_unlock(&em_tree->lock);
 			if (ret != -EEXIST) {
 				free_extent_map(em);
@@ -921,7 +923,8 @@
 		}
 
 		em = alloc_extent_map();
-		BUG_ON(!em); /* -ENOMEM */
+		if (!em)
+			goto out_reserve;
 		em->start = start;
 		em->orig_start = em->start;
 		ram_size = ins.offset;
@@ -932,16 +935,14 @@
 		em->block_start = ins.objectid;
 		em->block_len = ins.offset;
 		em->orig_block_len = ins.offset;
+		em->ram_bytes = ram_size;
 		em->bdev = root->fs_info->fs_devices->latest_bdev;
 		set_bit(EXTENT_FLAG_PINNED, &em->flags);
 		em->generation = -1;
 
 		while (1) {
 			write_lock(&em_tree->lock);
-			ret = add_extent_mapping(em_tree, em);
-			if (!ret)
-				list_move(&em->list,
-					  &em_tree->modified_extents);
+			ret = add_extent_mapping(em_tree, em, 1);
 			write_unlock(&em_tree->lock);
 			if (ret != -EEXIST) {
 				free_extent_map(em);
@@ -950,11 +951,14 @@
 			btrfs_drop_extent_cache(inode, start,
 						start + ram_size - 1, 0);
 		}
+		if (ret)
+			goto out_reserve;
 
 		cur_alloc_size = ins.offset;
 		ret = btrfs_add_ordered_extent(inode, start, ins.objectid,
 					       ram_size, cur_alloc_size, 0);
-		BUG_ON(ret); /* -ENOMEM */
+		if (ret)
+			goto out_reserve;
 
 		if (root->root_key.objectid ==
 		    BTRFS_DATA_RELOC_TREE_OBJECTID) {
@@ -962,7 +966,7 @@
 						      cur_alloc_size);
 			if (ret) {
 				btrfs_abort_transaction(trans, root, ret);
-				goto out_unlock;
+				goto out_reserve;
 			}
 		}
 
@@ -991,6 +995,8 @@
 out:
 	return ret;
 
+out_reserve:
+	btrfs_free_reserved_extent(root, ins.objectid, ins.offset);
 out_unlock:
 	extent_clear_unlock_delalloc(inode,
 		     &BTRFS_I(inode)->io_tree,
@@ -1194,6 +1200,7 @@
 	u64 disk_bytenr;
 	u64 num_bytes;
 	u64 disk_num_bytes;
+	u64 ram_bytes;
 	int extent_type;
 	int ret, err;
 	int type;
@@ -1290,6 +1297,7 @@
 				    struct btrfs_file_extent_item);
 		extent_type = btrfs_file_extent_type(leaf, fi);
 
+		ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi);
 		if (extent_type == BTRFS_FILE_EXTENT_REG ||
 		    extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
 			disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
@@ -1373,6 +1381,7 @@
 			em->block_len = num_bytes;
 			em->block_start = disk_bytenr;
 			em->orig_block_len = disk_num_bytes;
+			em->ram_bytes = ram_bytes;
 			em->bdev = root->fs_info->fs_devices->latest_bdev;
 			em->mod_start = em->start;
 			em->mod_len = em->len;
@@ -1381,10 +1390,7 @@
 			em->generation = -1;
 			while (1) {
 				write_lock(&em_tree->lock);
-				ret = add_extent_mapping(em_tree, em);
-				if (!ret)
-					list_move(&em->list,
-						  &em_tree->modified_extents);
+				ret = add_extent_mapping(em_tree, em, 1);
 				write_unlock(&em_tree->lock);
 				if (ret != -EEXIST) {
 					free_extent_map(em);
@@ -1525,7 +1531,7 @@
  * have pending delalloc work to be done.
  */
 static void btrfs_set_bit_hook(struct inode *inode,
-			       struct extent_state *state, int *bits)
+			       struct extent_state *state, unsigned long *bits)
 {
 
 	/*
@@ -1569,7 +1575,8 @@
  * extent_io.c clear_bit_hook, see set_bit_hook for why
  */
 static void btrfs_clear_bit_hook(struct inode *inode,
-				 struct extent_state *state, int *bits)
+				 struct extent_state *state,
+				 unsigned long *bits)
 {
 	/*
 	 * set_bit and clear bit hooks normally require _irqsave/restore
@@ -2793,6 +2800,8 @@
 	int ret;
 	struct btrfs_root *root = BTRFS_I(inode)->root;
 	u32 csum = ~(u32)0;
+	static DEFINE_RATELIMIT_STATE(_rs, DEFAULT_RATELIMIT_INTERVAL,
+	                              DEFAULT_RATELIMIT_BURST);
 
 	if (PageChecked(page)) {
 		ClearPageChecked(page);
@@ -2819,7 +2828,7 @@
 	if (ret)
 		goto zeroit;
 
-	csum = btrfs_csum_data(root, kaddr + offset, csum,  end - start + 1);
+	csum = btrfs_csum_data(kaddr + offset, csum,  end - start + 1);
 	btrfs_csum_final(csum, (char *)&csum);
 	if (csum != private)
 		goto zeroit;
@@ -2829,11 +2838,11 @@
 	return 0;
 
 zeroit:
-	printk_ratelimited(KERN_INFO "btrfs csum failed ino %llu off %llu csum %u "
-		       "private %llu\n",
-		       (unsigned long long)btrfs_ino(page->mapping->host),
-		       (unsigned long long)start, csum,
-		       (unsigned long long)private);
+	if (__ratelimit(&_rs))
+		btrfs_info(root->fs_info, "csum failed ino %llu off %llu csum %u private %llu",
+			(unsigned long long)btrfs_ino(page->mapping->host),
+			(unsigned long long)start, csum,
+			(unsigned long long)private);
 	memset(kaddr + offset, 1, end - start + 1);
 	flush_dcache_page(page);
 	kunmap_atomic(kaddr);
@@ -3019,7 +3028,8 @@
  * We have done the truncate/delete so we can go ahead and remove the orphan
  * item for this particular inode.
  */
-int btrfs_orphan_del(struct btrfs_trans_handle *trans, struct inode *inode)
+static int btrfs_orphan_del(struct btrfs_trans_handle *trans,
+			    struct inode *inode)
 {
 	struct btrfs_root *root = BTRFS_I(inode)->root;
 	int delete_item = 0;
@@ -3114,8 +3124,8 @@
 		 */
 
 		if (found_key.offset == last_objectid) {
-			printk(KERN_ERR "btrfs: Error removing orphan entry, "
-			       "stopping orphan cleanup\n");
+			btrfs_err(root->fs_info,
+				"Error removing orphan entry, stopping orphan cleanup");
 			ret = -EINVAL;
 			goto out;
 		}
@@ -3172,8 +3182,8 @@
 				ret = PTR_ERR(trans);
 				goto out;
 			}
-			printk(KERN_ERR "auto deleting %Lu\n",
-			       found_key.objectid);
+			btrfs_debug(root->fs_info, "auto deleting %Lu",
+				found_key.objectid);
 			ret = btrfs_del_orphan_item(trans, root,
 						    found_key.objectid);
 			BUG_ON(ret); /* -ENOMEM or corruption (JDM: Recheck) */
@@ -3237,13 +3247,14 @@
 	}
 
 	if (nr_unlink)
-		printk(KERN_INFO "btrfs: unlinked %d orphans\n", nr_unlink);
+		btrfs_debug(root->fs_info, "unlinked %d orphans", nr_unlink);
 	if (nr_truncate)
-		printk(KERN_INFO "btrfs: truncated %d orphans\n", nr_truncate);
+		btrfs_debug(root->fs_info, "truncated %d orphans", nr_truncate);
 
 out:
 	if (ret)
-		printk(KERN_CRIT "btrfs: could not do orphan cleanup %d\n", ret);
+		btrfs_crit(root->fs_info,
+			"could not do orphan cleanup %d", ret);
 	btrfs_free_path(path);
 	return ret;
 }
@@ -3591,9 +3602,10 @@
 	ret = btrfs_del_inode_ref(trans, root, name, name_len, ino,
 				  dir_ino, &index);
 	if (ret) {
-		printk(KERN_INFO "btrfs failed to delete reference to %.*s, "
-		       "inode %llu parent %llu\n", name_len, name,
-		       (unsigned long long)ino, (unsigned long long)dir_ino);
+		btrfs_info(root->fs_info,
+			"failed to delete reference to %.*s, inode %llu parent %llu",
+			name_len, name,
+			(unsigned long long)ino, (unsigned long long)dir_ino);
 		btrfs_abort_transaction(trans, root, ret);
 		goto err;
 	}
@@ -3615,6 +3627,8 @@
 					   dir, index);
 	if (ret == -ENOENT)
 		ret = 0;
+	else if (ret)
+		btrfs_abort_transaction(trans, root, ret);
 err:
 	btrfs_free_path(path);
 	if (ret)
@@ -3660,7 +3674,7 @@
 		eb = path->nodes[level];
 		if (!btrfs_block_can_be_shared(root, eb))
 			continue;
-		ret = btrfs_lookup_extent_info(NULL, root, eb->start, eb->len,
+		ret = btrfs_lookup_extent_info(NULL, root, eb->start, level, 1,
 					       &refs, NULL);
 		if (refs > 1)
 			return 1;
@@ -4175,8 +4189,7 @@
 				}
 				size =
 				    btrfs_file_extent_calc_inline_size(size);
-				btrfs_truncate_item(trans, root, path,
-						    size, 1);
+				btrfs_truncate_item(root, path, size, 1);
 			} else if (root->ref_cows) {
 				inode_sub_bytes(inode, item_end + 1 -
 						found_key.offset);
@@ -4450,16 +4463,14 @@
 			hole_em->block_start = EXTENT_MAP_HOLE;
 			hole_em->block_len = 0;
 			hole_em->orig_block_len = 0;
+			hole_em->ram_bytes = hole_size;
 			hole_em->bdev = root->fs_info->fs_devices->latest_bdev;
 			hole_em->compress_type = BTRFS_COMPRESS_NONE;
 			hole_em->generation = trans->transid;
 
 			while (1) {
 				write_lock(&em_tree->lock);
-				err = add_extent_mapping(em_tree, hole_em);
-				if (!err)
-					list_move(&hole_em->list,
-						  &em_tree->modified_extents);
+				err = add_extent_mapping(em_tree, hole_em, 1);
 				write_unlock(&em_tree->lock);
 				if (err != -EEXIST)
 					break;
@@ -4670,8 +4681,9 @@
 			ret = btrfs_block_rsv_migrate(global_rsv, rsv, min_size);
 
 		if (ret) {
-			printk(KERN_WARNING "Could not get space for a "
-			       "delete, will truncate on mount %d\n", ret);
+			btrfs_warn(root->fs_info,
+				"Could not get space for a delete, will truncate on mount %d",
+				ret);
 			btrfs_orphan_del(NULL, inode);
 			btrfs_free_block_rsv(root, rsv);
 			goto no_delete;
@@ -5335,7 +5347,7 @@
  * FIXME, needs more benchmarking...there are no reasons other than performance
  * to keep or drop this code.
  */
-int btrfs_dirty_inode(struct inode *inode)
+static int btrfs_dirty_inode(struct inode *inode)
 {
 	struct btrfs_root *root = BTRFS_I(inode)->root;
 	struct btrfs_trans_handle *trans;
@@ -5977,7 +5989,7 @@
 		em->block_start += start_diff;
 		em->block_len -= start_diff;
 	}
-	return add_extent_mapping(em_tree, em);
+	return add_extent_mapping(em_tree, em, 0);
 }
 
 static noinline int uncompress_inline(struct btrfs_path *path,
@@ -6151,6 +6163,7 @@
 		goto not_found_em;
 	}
 
+	em->ram_bytes = btrfs_file_extent_ram_bytes(leaf, item);
 	if (found_type == BTRFS_FILE_EXTENT_REG ||
 	    found_type == BTRFS_FILE_EXTENT_PREALLOC) {
 		em->start = extent_start;
@@ -6259,18 +6272,18 @@
 insert:
 	btrfs_release_path(path);
 	if (em->start > start || extent_map_end(em) <= start) {
-		printk(KERN_ERR "Btrfs: bad extent! em: [%llu %llu] passed "
-		       "[%llu %llu]\n", (unsigned long long)em->start,
-		       (unsigned long long)em->len,
-		       (unsigned long long)start,
-		       (unsigned long long)len);
+		btrfs_err(root->fs_info, "bad extent! em: [%llu %llu] passed [%llu %llu]",
+			(unsigned long long)em->start,
+			(unsigned long long)em->len,
+			(unsigned long long)start,
+			(unsigned long long)len);
 		err = -EIO;
 		goto out;
 	}
 
 	err = 0;
 	write_lock(&em_tree->lock);
-	ret = add_extent_mapping(em_tree, em);
+	ret = add_extent_mapping(em_tree, em, 0);
 	/* it is possible that someone inserted the extent into the tree
 	 * while we had the lock dropped.  It is also possible that
 	 * an overlapping map exists in the tree
@@ -6482,7 +6495,7 @@
 	}
 
 	em = create_pinned_em(inode, start, ins.offset, start, ins.objectid,
-			      ins.offset, ins.offset, 0);
+			      ins.offset, ins.offset, ins.offset, 0);
 	if (IS_ERR(em))
 		goto out;
 
@@ -6502,7 +6515,9 @@
  * block must be cow'd
  */
 static noinline int can_nocow_odirect(struct btrfs_trans_handle *trans,
-				      struct inode *inode, u64 offset, u64 len)
+				      struct inode *inode, u64 offset, u64 *len,
+				      u64 *orig_start, u64 *orig_block_len,
+				      u64 *ram_bytes)
 {
 	struct btrfs_path *path;
 	int ret;
@@ -6559,8 +6574,12 @@
 	disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
 	backref_offset = btrfs_file_extent_offset(leaf, fi);
 
+	*orig_start = key.offset - backref_offset;
+	*orig_block_len = btrfs_file_extent_disk_num_bytes(leaf, fi);
+	*ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi);
+
 	extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
-	if (extent_end < offset + len) {
+	if (extent_end < offset + *len) {
 		/* extent doesn't include our full range, must cow */
 		goto out;
 	}
@@ -6584,13 +6603,14 @@
 	 */
 	disk_bytenr += backref_offset;
 	disk_bytenr += offset - key.offset;
-	num_bytes = min(offset + len, extent_end) - offset;
+	num_bytes = min(offset + *len, extent_end) - offset;
 	if (csum_exist_in_range(root, disk_bytenr, num_bytes))
 				goto out;
 	/*
 	 * all of the above have passed, it is safe to overwrite this extent
 	 * without cow
 	 */
+	*len = num_bytes;
 	ret = 1;
 out:
 	btrfs_free_path(path);
@@ -6661,7 +6681,8 @@
 static struct extent_map *create_pinned_em(struct inode *inode, u64 start,
 					   u64 len, u64 orig_start,
 					   u64 block_start, u64 block_len,
-					   u64 orig_block_len, int type)
+					   u64 orig_block_len, u64 ram_bytes,
+					   int type)
 {
 	struct extent_map_tree *em_tree;
 	struct extent_map *em;
@@ -6682,6 +6703,7 @@
 	em->block_start = block_start;
 	em->bdev = root->fs_info->fs_devices->latest_bdev;
 	em->orig_block_len = orig_block_len;
+	em->ram_bytes = ram_bytes;
 	em->generation = -1;
 	set_bit(EXTENT_FLAG_PINNED, &em->flags);
 	if (type == BTRFS_ORDERED_PREALLOC)
@@ -6691,10 +6713,7 @@
 		btrfs_drop_extent_cache(inode, em->start,
 				em->start + em->len - 1, 0);
 		write_lock(&em_tree->lock);
-		ret = add_extent_mapping(em_tree, em);
-		if (!ret)
-			list_move(&em->list,
-				  &em_tree->modified_extents);
+		ret = add_extent_mapping(em_tree, em, 1);
 		write_unlock(&em_tree->lock);
 	} while (ret == -EEXIST);
 
@@ -6789,7 +6808,7 @@
 	     em->block_start != EXTENT_MAP_HOLE)) {
 		int type;
 		int ret;
-		u64 block_start;
+		u64 block_start, orig_start, orig_block_len, ram_bytes;
 
 		if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
 			type = BTRFS_ORDERED_PREALLOC;
@@ -6807,16 +6826,15 @@
 		if (IS_ERR(trans))
 			goto must_cow;
 
-		if (can_nocow_odirect(trans, inode, start, len) == 1) {
-			u64 orig_start = em->orig_start;
-			u64 orig_block_len = em->orig_block_len;
-
+		if (can_nocow_odirect(trans, inode, start, &len, &orig_start,
+				      &orig_block_len, &ram_bytes) == 1) {
 			if (type == BTRFS_ORDERED_PREALLOC) {
 				free_extent_map(em);
 				em = create_pinned_em(inode, start, len,
 						       orig_start,
 						       block_start, len,
-						       orig_block_len, type);
+						       orig_block_len,
+						       ram_bytes, type);
 				if (IS_ERR(em)) {
 					btrfs_end_transaction(trans, root);
 					goto unlock_err;
@@ -6936,7 +6954,7 @@
 				goto failed;
 			local_irq_save(flags);
 			kaddr = kmap_atomic(page);
-			csum = btrfs_csum_data(root, kaddr + bvec->bv_offset,
+			csum = btrfs_csum_data(kaddr + bvec->bv_offset,
 					       csum, bvec->bv_len);
 			btrfs_csum_final(csum, (char *)&csum);
 			kunmap_atomic(kaddr);
@@ -6945,11 +6963,10 @@
 			flush_dcache_page(bvec->bv_page);
 			if (csum != private) {
 failed:
-				printk(KERN_ERR "btrfs csum failed ino %llu off"
-				      " %llu csum %u private %u\n",
-				      (unsigned long long)btrfs_ino(inode),
-				      (unsigned long long)start,
-				      csum, (unsigned)private);
+				btrfs_err(root->fs_info, "csum failed ino %llu off %llu csum %u private %u",
+					(unsigned long long)btrfs_ino(inode),
+					(unsigned long long)start,
+					csum, (unsigned)private);
 				err = -EIO;
 			}
 		}
@@ -7425,8 +7442,8 @@
 	return extent_write_full_page(tree, page, btrfs_get_extent, wbc);
 }
 
-int btrfs_writepages(struct address_space *mapping,
-		     struct writeback_control *wbc)
+static int btrfs_writepages(struct address_space *mapping,
+			    struct writeback_control *wbc)
 {
 	struct extent_io_tree *tree;
 
@@ -7941,8 +7958,8 @@
 
 	if (test_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
 		     &BTRFS_I(inode)->runtime_flags)) {
-		printk(KERN_INFO "BTRFS: inode %llu still on the orphan list\n",
-		       (unsigned long long)btrfs_ino(inode));
+		btrfs_info(root->fs_info, "inode %llu still on the orphan list",
+			(unsigned long long)btrfs_ino(inode));
 		atomic_dec(&root->orphan_inodes);
 	}
 
@@ -7951,10 +7968,9 @@
 		if (!ordered)
 			break;
 		else {
-			printk(KERN_ERR "btrfs found ordered "
-			       "extent %llu %llu on inode cleanup\n",
-			       (unsigned long long)ordered->file_offset,
-			       (unsigned long long)ordered->len);
+			btrfs_err(root->fs_info, "found ordered extent %llu %llu on inode cleanup",
+				(unsigned long long)ordered->file_offset,
+				(unsigned long long)ordered->len);
 			btrfs_remove_ordered_extent(inode, ordered);
 			btrfs_put_ordered_extent(ordered);
 			btrfs_put_ordered_extent(ordered);
@@ -8571,16 +8587,14 @@
 		em->block_start = ins.objectid;
 		em->block_len = ins.offset;
 		em->orig_block_len = ins.offset;
+		em->ram_bytes = ins.offset;
 		em->bdev = root->fs_info->fs_devices->latest_bdev;
 		set_bit(EXTENT_FLAG_PREALLOC, &em->flags);
 		em->generation = trans->transid;
 
 		while (1) {
 			write_lock(&em_tree->lock);
-			ret = add_extent_mapping(em_tree, em);
-			if (!ret)
-				list_move(&em->list,
-					  &em_tree->modified_extents);
+			ret = add_extent_mapping(em_tree, em, 1);
 			write_unlock(&em_tree->lock);
 			if (ret != -EEXIST)
 				break;
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index 2c02310..0de4a2f 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -723,7 +723,9 @@
 	struct dentry *dentry;
 	int error;
 
-	mutex_lock_nested(&dir->i_mutex, I_MUTEX_PARENT);
+	error = mutex_lock_killable_nested(&dir->i_mutex, I_MUTEX_PARENT);
+	if (error == -EINTR)
+		return error;
 
 	dentry = lookup_one_len(name, parent->dentry, namelen);
 	error = PTR_ERR(dentry);
@@ -1152,8 +1154,11 @@
 	u64 new_align = ~((u64)128 * 1024 - 1);
 	struct page **pages = NULL;
 
-	if (extent_thresh == 0)
-		extent_thresh = 256 * 1024;
+	if (isize == 0)
+		return 0;
+
+	if (range->start >= isize)
+		return -EINVAL;
 
 	if (range->flags & BTRFS_DEFRAG_RANGE_COMPRESS) {
 		if (range->compress_type > BTRFS_COMPRESS_TYPES)
@@ -1162,8 +1167,8 @@
 			compress_type = range->compress_type;
 	}
 
-	if (isize == 0)
-		return 0;
+	if (extent_thresh == 0)
+		extent_thresh = 256 * 1024;
 
 	/*
 	 * if we were not given a file, allocate a readahead
@@ -2086,7 +2091,9 @@
 	if (err)
 		goto out;
 
-	mutex_lock_nested(&dir->i_mutex, I_MUTEX_PARENT);
+	err = mutex_lock_killable_nested(&dir->i_mutex, I_MUTEX_PARENT);
+	if (err == -EINTR)
+		goto out;
 	dentry = lookup_one_len(vol_args->name, parent, namelen);
 	if (IS_ERR(dentry)) {
 		err = PTR_ERR(dentry);
@@ -2425,7 +2432,6 @@
 
 	mutex_lock(&fs_devices->device_list_mutex);
 	dev = btrfs_find_device(root->fs_info, di_args->devid, s_uuid, NULL);
-	mutex_unlock(&fs_devices->device_list_mutex);
 
 	if (!dev) {
 		ret = -ENODEV;
@@ -2449,6 +2455,7 @@
 	}
 
 out:
+	mutex_unlock(&fs_devices->device_list_mutex);
 	if (ret == 0 && copy_to_user(arg, di_args, sizeof(*di_args)))
 		ret = -EFAULT;
 
@@ -3003,7 +3010,7 @@
 	}
 }
 
-long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
+static long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
 {
 	struct btrfs_ioctl_space_args space_args;
 	struct btrfs_ioctl_space_info space;
@@ -3693,12 +3700,11 @@
 		goto drop_write;
 	}
 
-	if (sa->cmd != BTRFS_QUOTA_CTL_RESCAN) {
-		trans = btrfs_start_transaction(root, 2);
-		if (IS_ERR(trans)) {
-			ret = PTR_ERR(trans);
-			goto out;
-		}
+	down_write(&root->fs_info->subvol_sem);
+	trans = btrfs_start_transaction(root->fs_info->tree_root, 2);
+	if (IS_ERR(trans)) {
+		ret = PTR_ERR(trans);
+		goto out;
 	}
 
 	switch (sa->cmd) {
@@ -3708,9 +3714,6 @@
 	case BTRFS_QUOTA_CTL_DISABLE:
 		ret = btrfs_quota_disable(trans, root->fs_info);
 		break;
-	case BTRFS_QUOTA_CTL_RESCAN:
-		ret = btrfs_quota_rescan(root->fs_info);
-		break;
 	default:
 		ret = -EINVAL;
 		break;
@@ -3719,13 +3722,12 @@
 	if (copy_to_user(arg, sa, sizeof(*sa)))
 		ret = -EFAULT;
 
-	if (trans) {
-		err = btrfs_commit_transaction(trans, root);
-		if (err && !ret)
-			ret = err;
-	}
+	err = btrfs_commit_transaction(trans, root->fs_info->tree_root);
+	if (err && !ret)
+		ret = err;
 out:
 	kfree(sa);
+	up_write(&root->fs_info->subvol_sem);
 drop_write:
 	mnt_drop_write_file(file);
 	return ret;
@@ -3877,6 +3879,64 @@
 	return ret;
 }
 
+static long btrfs_ioctl_quota_rescan(struct file *file, void __user *arg)
+{
+	struct btrfs_root *root = BTRFS_I(fdentry(file)->d_inode)->root;
+	struct btrfs_ioctl_quota_rescan_args *qsa;
+	int ret;
+
+	if (!capable(CAP_SYS_ADMIN))
+		return -EPERM;
+
+	ret = mnt_want_write_file(file);
+	if (ret)
+		return ret;
+
+	qsa = memdup_user(arg, sizeof(*qsa));
+	if (IS_ERR(qsa)) {
+		ret = PTR_ERR(qsa);
+		goto drop_write;
+	}
+
+	if (qsa->flags) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	ret = btrfs_qgroup_rescan(root->fs_info);
+
+out:
+	kfree(qsa);
+drop_write:
+	mnt_drop_write_file(file);
+	return ret;
+}
+
+static long btrfs_ioctl_quota_rescan_status(struct file *file, void __user *arg)
+{
+	struct btrfs_root *root = BTRFS_I(fdentry(file)->d_inode)->root;
+	struct btrfs_ioctl_quota_rescan_args *qsa;
+	int ret = 0;
+
+	if (!capable(CAP_SYS_ADMIN))
+		return -EPERM;
+
+	qsa = kzalloc(sizeof(*qsa), GFP_NOFS);
+	if (!qsa)
+		return -ENOMEM;
+
+	if (root->fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN) {
+		qsa->flags = 1;
+		qsa->progress = root->fs_info->qgroup_rescan_progress.objectid;
+	}
+
+	if (copy_to_user(arg, qsa, sizeof(*qsa)))
+		ret = -EFAULT;
+
+	kfree(qsa);
+	return ret;
+}
+
 static long btrfs_ioctl_set_received_subvol(struct file *file,
 					    void __user *arg)
 {
@@ -4115,6 +4175,10 @@
 		return btrfs_ioctl_qgroup_create(file, argp);
 	case BTRFS_IOC_QGROUP_LIMIT:
 		return btrfs_ioctl_qgroup_limit(file, argp);
+	case BTRFS_IOC_QUOTA_RESCAN:
+		return btrfs_ioctl_quota_rescan(file, argp);
+	case BTRFS_IOC_QUOTA_RESCAN_STATUS:
+		return btrfs_ioctl_quota_rescan_status(file, argp);
 	case BTRFS_IOC_DEV_REPLACE:
 		return btrfs_ioctl_dev_replace(root, argp);
 	case BTRFS_IOC_GET_FSLABEL:
diff --git a/fs/btrfs/locking.c b/fs/btrfs/locking.c
index e95df43..01277b8 100644
--- a/fs/btrfs/locking.c
+++ b/fs/btrfs/locking.c
@@ -24,7 +24,7 @@
 #include "extent_io.h"
 #include "locking.h"
 
-void btrfs_assert_tree_read_locked(struct extent_buffer *eb);
+static void btrfs_assert_tree_read_locked(struct extent_buffer *eb);
 
 /*
  * if we currently have a spinning reader or writer lock
@@ -264,7 +264,7 @@
 	BUG_ON(!atomic_read(&eb->write_locks));
 }
 
-void btrfs_assert_tree_read_locked(struct extent_buffer *eb)
+static void btrfs_assert_tree_read_locked(struct extent_buffer *eb)
 {
 	BUG_ON(!atomic_read(&eb->read_locks));
 }
diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c
index 005c45d..1ddd728 100644
--- a/fs/btrfs/ordered-data.c
+++ b/fs/btrfs/ordered-data.c
@@ -986,7 +986,7 @@
  * be reclaimed before their checksum is actually put into the btree
  */
 int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u64 disk_bytenr,
-			   u32 *sum)
+			   u32 *sum, int len)
 {
 	struct btrfs_ordered_sum *ordered_sum;
 	struct btrfs_sector_sum *sector_sums;
@@ -995,22 +995,28 @@
 	unsigned long num_sectors;
 	unsigned long i;
 	u32 sectorsize = BTRFS_I(inode)->root->sectorsize;
-	int ret = 1;
+	int index = 0;
 
 	ordered = btrfs_lookup_ordered_extent(inode, offset);
 	if (!ordered)
-		return 1;
+		return 0;
 
 	spin_lock_irq(&tree->lock);
 	list_for_each_entry_reverse(ordered_sum, &ordered->list, list) {
-		if (disk_bytenr >= ordered_sum->bytenr) {
-			num_sectors = ordered_sum->len / sectorsize;
-			sector_sums = ordered_sum->sums;
-			for (i = 0; i < num_sectors; i++) {
+		if (disk_bytenr >= ordered_sum->bytenr &&
+		    disk_bytenr < ordered_sum->bytenr + ordered_sum->len) {
+			i = (disk_bytenr - ordered_sum->bytenr) >>
+			    inode->i_sb->s_blocksize_bits;
+			sector_sums = ordered_sum->sums + i;
+			num_sectors = ordered_sum->len >>
+				      inode->i_sb->s_blocksize_bits;
+			for (; i < num_sectors; i++) {
 				if (sector_sums[i].bytenr == disk_bytenr) {
-					*sum = sector_sums[i].sum;
-					ret = 0;
-					goto out;
+					sum[index] = sector_sums[i].sum;
+					index++;
+					if (index == len)
+						goto out;
+					disk_bytenr += sectorsize;
 				}
 			}
 		}
@@ -1018,7 +1024,7 @@
 out:
 	spin_unlock_irq(&tree->lock);
 	btrfs_put_ordered_extent(ordered);
-	return ret;
+	return index;
 }
 
 
diff --git a/fs/btrfs/ordered-data.h b/fs/btrfs/ordered-data.h
index 8eadfe4..58b0e3b 100644
--- a/fs/btrfs/ordered-data.h
+++ b/fs/btrfs/ordered-data.h
@@ -196,7 +196,8 @@
 							u64 len);
 int btrfs_ordered_update_i_size(struct inode *inode, u64 offset,
 				struct btrfs_ordered_extent *ordered);
-int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u64 disk_bytenr, u32 *sum);
+int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u64 disk_bytenr,
+			   u32 *sum, int len);
 int btrfs_run_ordered_operations(struct btrfs_trans_handle *trans,
 				 struct btrfs_root *root, int wait);
 void btrfs_add_ordered_operation(struct btrfs_trans_handle *trans,
diff --git a/fs/btrfs/print-tree.c b/fs/btrfs/print-tree.c
index 920957e..dc0024f 100644
--- a/fs/btrfs/print-tree.c
+++ b/fs/btrfs/print-tree.c
@@ -176,7 +176,7 @@
 
 	nr = btrfs_header_nritems(l);
 
-	printk(KERN_INFO "leaf %llu total ptrs %d free space %d\n",
+	btrfs_info(root->fs_info, "leaf %llu total ptrs %d free space %d",
 		(unsigned long long)btrfs_header_bytenr(l), nr,
 		btrfs_leaf_free_space(root, l));
 	for (i = 0 ; i < nr ; i++) {
@@ -319,10 +319,9 @@
 		btrfs_print_leaf(root, c);
 		return;
 	}
-	printk(KERN_INFO "node %llu level %d total ptrs %d free spc %u\n",
-	       (unsigned long long)btrfs_header_bytenr(c),
-	      level, nr,
-	       (u32)BTRFS_NODEPTRS_PER_BLOCK(root) - nr);
+	btrfs_info(root->fs_info, "node %llu level %d total ptrs %d free spc %u",
+		(unsigned long long)btrfs_header_bytenr(c),
+		level, nr, (u32)BTRFS_NODEPTRS_PER_BLOCK(root) - nr);
 	for (i = 0; i < nr; i++) {
 		btrfs_node_key_to_cpu(c, &key, i);
 		printk(KERN_INFO "\tkey %d (%llu %u %llu) block %llu\n",
diff --git a/fs/btrfs/print-tree.h b/fs/btrfs/print-tree.h
index da75efe..7faddfa 100644
--- a/fs/btrfs/print-tree.h
+++ b/fs/btrfs/print-tree.h
@@ -19,5 +19,5 @@
 #ifndef __PRINT_TREE_
 #define __PRINT_TREE_
 void btrfs_print_leaf(struct btrfs_root *root, struct extent_buffer *l);
-void btrfs_print_tree(struct btrfs_root *root, struct extent_buffer *t);
+void btrfs_print_tree(struct btrfs_root *root, struct extent_buffer *c);
 #endif
diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
index b44124d..9d49c58 100644
--- a/fs/btrfs/qgroup.c
+++ b/fs/btrfs/qgroup.c
@@ -31,13 +31,13 @@
 #include "locking.h"
 #include "ulist.h"
 #include "backref.h"
+#include "extent_io.h"
 
 /* TODO XXX FIXME
  *  - subvol delete -> delete when ref goes to 0? delete limits also?
  *  - reorganize keys
  *  - compressed
  *  - sync
- *  - rescan
  *  - copy also limits on subvol creation
  *  - limit
  *  - caches fuer ulists
@@ -98,7 +98,15 @@
 	struct btrfs_qgroup *member;
 };
 
-/* must be called with qgroup_lock held */
+struct qgroup_rescan {
+	struct btrfs_work	work;
+	struct btrfs_fs_info	*fs_info;
+};
+
+static void qgroup_rescan_start(struct btrfs_fs_info *fs_info,
+				struct qgroup_rescan *qscan);
+
+/* must be called with qgroup_ioctl_lock held */
 static struct btrfs_qgroup *find_qgroup_rb(struct btrfs_fs_info *fs_info,
 					   u64 qgroupid)
 {
@@ -298,7 +306,20 @@
 			}
 			fs_info->qgroup_flags = btrfs_qgroup_status_flags(l,
 									  ptr);
-			/* FIXME read scan element */
+			fs_info->qgroup_rescan_progress.objectid =
+					btrfs_qgroup_status_rescan(l, ptr);
+			if (fs_info->qgroup_flags &
+			    BTRFS_QGROUP_STATUS_FLAG_RESCAN) {
+				struct qgroup_rescan *qscan =
+					kmalloc(sizeof(*qscan), GFP_NOFS);
+				if (!qscan) {
+					ret = -ENOMEM;
+					goto out;
+				}
+				fs_info->qgroup_rescan_progress.type = 0;
+				fs_info->qgroup_rescan_progress.offset = 0;
+				qgroup_rescan_start(fs_info, qscan);
+			}
 			goto next1;
 		}
 
@@ -420,8 +441,6 @@
 		qgroup = rb_entry(n, struct btrfs_qgroup, node);
 		rb_erase(n, &fs_info->qgroup_tree);
 
-		WARN_ON(!list_empty(&qgroup->dirty));
-
 		while (!list_empty(&qgroup->groups)) {
 			list = list_first_entry(&qgroup->groups,
 						struct btrfs_qgroup_list,
@@ -721,7 +740,8 @@
 	ptr = btrfs_item_ptr(l, slot, struct btrfs_qgroup_status_item);
 	btrfs_set_qgroup_status_flags(l, ptr, fs_info->qgroup_flags);
 	btrfs_set_qgroup_status_generation(l, ptr, trans->transid);
-	/* XXX scan */
+	btrfs_set_qgroup_status_rescan(l, ptr,
+				fs_info->qgroup_rescan_progress.objectid);
 
 	btrfs_mark_buffer_dirty(l);
 
@@ -783,19 +803,21 @@
 		       struct btrfs_fs_info *fs_info)
 {
 	struct btrfs_root *quota_root;
+	struct btrfs_root *tree_root = fs_info->tree_root;
 	struct btrfs_path *path = NULL;
 	struct btrfs_qgroup_status_item *ptr;
 	struct extent_buffer *leaf;
 	struct btrfs_key key;
+	struct btrfs_key found_key;
+	struct btrfs_qgroup *qgroup = NULL;
 	int ret = 0;
+	int slot;
 
-	spin_lock(&fs_info->qgroup_lock);
+	mutex_lock(&fs_info->qgroup_ioctl_lock);
 	if (fs_info->quota_root) {
 		fs_info->pending_quota_state = 1;
-		spin_unlock(&fs_info->qgroup_lock);
 		goto out;
 	}
-	spin_unlock(&fs_info->qgroup_lock);
 
 	/*
 	 * initially create the quota tree
@@ -830,10 +852,57 @@
 	fs_info->qgroup_flags = BTRFS_QGROUP_STATUS_FLAG_ON |
 				BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
 	btrfs_set_qgroup_status_flags(leaf, ptr, fs_info->qgroup_flags);
-	btrfs_set_qgroup_status_scan(leaf, ptr, 0);
+	btrfs_set_qgroup_status_rescan(leaf, ptr, 0);
 
 	btrfs_mark_buffer_dirty(leaf);
 
+	key.objectid = 0;
+	key.type = BTRFS_ROOT_REF_KEY;
+	key.offset = 0;
+
+	btrfs_release_path(path);
+	ret = btrfs_search_slot_for_read(tree_root, &key, path, 1, 0);
+	if (ret > 0)
+		goto out_add_root;
+	if (ret < 0)
+		goto out_free_path;
+
+
+	while (1) {
+		slot = path->slots[0];
+		leaf = path->nodes[0];
+		btrfs_item_key_to_cpu(leaf, &found_key, slot);
+
+		if (found_key.type == BTRFS_ROOT_REF_KEY) {
+			ret = add_qgroup_item(trans, quota_root,
+					      found_key.offset);
+			if (ret)
+				goto out_free_path;
+
+			qgroup = add_qgroup_rb(fs_info, found_key.offset);
+			if (IS_ERR(qgroup)) {
+				ret = PTR_ERR(qgroup);
+				goto out_free_path;
+			}
+		}
+		ret = btrfs_next_item(tree_root, path);
+		if (ret < 0)
+			goto out_free_path;
+		if (ret)
+			break;
+	}
+
+out_add_root:
+	btrfs_release_path(path);
+	ret = add_qgroup_item(trans, quota_root, BTRFS_FS_TREE_OBJECTID);
+	if (ret)
+		goto out_free_path;
+
+	qgroup = add_qgroup_rb(fs_info, BTRFS_FS_TREE_OBJECTID);
+	if (IS_ERR(qgroup)) {
+		ret = PTR_ERR(qgroup);
+		goto out_free_path;
+	}
 	spin_lock(&fs_info->qgroup_lock);
 	fs_info->quota_root = quota_root;
 	fs_info->pending_quota_state = 1;
@@ -847,6 +916,7 @@
 		kfree(quota_root);
 	}
 out:
+	mutex_unlock(&fs_info->qgroup_ioctl_lock);
 	return ret;
 }
 
@@ -857,11 +927,10 @@
 	struct btrfs_root *quota_root;
 	int ret = 0;
 
+	mutex_lock(&fs_info->qgroup_ioctl_lock);
+	if (!fs_info->quota_root)
+		goto out;
 	spin_lock(&fs_info->qgroup_lock);
-	if (!fs_info->quota_root) {
-		spin_unlock(&fs_info->qgroup_lock);
-		return 0;
-	}
 	fs_info->quota_enabled = 0;
 	fs_info->pending_quota_state = 0;
 	quota_root = fs_info->quota_root;
@@ -869,8 +938,10 @@
 	btrfs_free_qgroup_config(fs_info);
 	spin_unlock(&fs_info->qgroup_lock);
 
-	if (!quota_root)
-		return -EINVAL;
+	if (!quota_root) {
+		ret = -EINVAL;
+		goto out;
+	}
 
 	ret = btrfs_clean_quota_tree(trans, quota_root);
 	if (ret)
@@ -891,39 +962,62 @@
 	free_extent_buffer(quota_root->commit_root);
 	kfree(quota_root);
 out:
+	mutex_unlock(&fs_info->qgroup_ioctl_lock);
 	return ret;
 }
 
-int btrfs_quota_rescan(struct btrfs_fs_info *fs_info)
+static void qgroup_dirty(struct btrfs_fs_info *fs_info,
+			 struct btrfs_qgroup *qgroup)
 {
-	/* FIXME */
-	return 0;
+	if (list_empty(&qgroup->dirty))
+		list_add(&qgroup->dirty, &fs_info->dirty_qgroups);
 }
 
 int btrfs_add_qgroup_relation(struct btrfs_trans_handle *trans,
 			      struct btrfs_fs_info *fs_info, u64 src, u64 dst)
 {
 	struct btrfs_root *quota_root;
+	struct btrfs_qgroup *parent;
+	struct btrfs_qgroup *member;
+	struct btrfs_qgroup_list *list;
 	int ret = 0;
 
+	mutex_lock(&fs_info->qgroup_ioctl_lock);
 	quota_root = fs_info->quota_root;
-	if (!quota_root)
-		return -EINVAL;
+	if (!quota_root) {
+		ret = -EINVAL;
+		goto out;
+	}
+	member = find_qgroup_rb(fs_info, src);
+	parent = find_qgroup_rb(fs_info, dst);
+	if (!member || !parent) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	/* check if such qgroup relation exist firstly */
+	list_for_each_entry(list, &member->groups, next_group) {
+		if (list->group == parent) {
+			ret = -EEXIST;
+			goto out;
+		}
+	}
 
 	ret = add_qgroup_relation_item(trans, quota_root, src, dst);
 	if (ret)
-		return ret;
+		goto out;
 
 	ret = add_qgroup_relation_item(trans, quota_root, dst, src);
 	if (ret) {
 		del_qgroup_relation_item(trans, quota_root, src, dst);
-		return ret;
+		goto out;
 	}
 
 	spin_lock(&fs_info->qgroup_lock);
 	ret = add_relation_rb(quota_root->fs_info, src, dst);
 	spin_unlock(&fs_info->qgroup_lock);
-
+out:
+	mutex_unlock(&fs_info->qgroup_ioctl_lock);
 	return ret;
 }
 
@@ -931,13 +1025,34 @@
 			      struct btrfs_fs_info *fs_info, u64 src, u64 dst)
 {
 	struct btrfs_root *quota_root;
+	struct btrfs_qgroup *parent;
+	struct btrfs_qgroup *member;
+	struct btrfs_qgroup_list *list;
 	int ret = 0;
 	int err;
 
+	mutex_lock(&fs_info->qgroup_ioctl_lock);
 	quota_root = fs_info->quota_root;
-	if (!quota_root)
-		return -EINVAL;
+	if (!quota_root) {
+		ret = -EINVAL;
+		goto out;
+	}
 
+	member = find_qgroup_rb(fs_info, src);
+	parent = find_qgroup_rb(fs_info, dst);
+	if (!member || !parent) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	/* check if such qgroup relation exist firstly */
+	list_for_each_entry(list, &member->groups, next_group) {
+		if (list->group == parent)
+			goto exist;
+	}
+	ret = -ENOENT;
+	goto out;
+exist:
 	ret = del_qgroup_relation_item(trans, quota_root, src, dst);
 	err = del_qgroup_relation_item(trans, quota_root, dst, src);
 	if (err && !ret)
@@ -945,9 +1060,9 @@
 
 	spin_lock(&fs_info->qgroup_lock);
 	del_relation_rb(fs_info, src, dst);
-
 	spin_unlock(&fs_info->qgroup_lock);
-
+out:
+	mutex_unlock(&fs_info->qgroup_ioctl_lock);
 	return ret;
 }
 
@@ -958,11 +1073,21 @@
 	struct btrfs_qgroup *qgroup;
 	int ret = 0;
 
+	mutex_lock(&fs_info->qgroup_ioctl_lock);
 	quota_root = fs_info->quota_root;
-	if (!quota_root)
-		return -EINVAL;
+	if (!quota_root) {
+		ret = -EINVAL;
+		goto out;
+	}
+	qgroup = find_qgroup_rb(fs_info, qgroupid);
+	if (qgroup) {
+		ret = -EEXIST;
+		goto out;
+	}
 
 	ret = add_qgroup_item(trans, quota_root, qgroupid);
+	if (ret)
+		goto out;
 
 	spin_lock(&fs_info->qgroup_lock);
 	qgroup = add_qgroup_rb(fs_info, qgroupid);
@@ -970,7 +1095,8 @@
 
 	if (IS_ERR(qgroup))
 		ret = PTR_ERR(qgroup);
-
+out:
+	mutex_unlock(&fs_info->qgroup_ioctl_lock);
 	return ret;
 }
 
@@ -981,27 +1107,32 @@
 	struct btrfs_qgroup *qgroup;
 	int ret = 0;
 
+	mutex_lock(&fs_info->qgroup_ioctl_lock);
 	quota_root = fs_info->quota_root;
-	if (!quota_root)
-		return -EINVAL;
+	if (!quota_root) {
+		ret = -EINVAL;
+		goto out;
+	}
 
-	/* check if there are no relations to this qgroup */
-	spin_lock(&fs_info->qgroup_lock);
 	qgroup = find_qgroup_rb(fs_info, qgroupid);
-	if (qgroup) {
-		if (!list_empty(&qgroup->groups) || !list_empty(&qgroup->members)) {
-			spin_unlock(&fs_info->qgroup_lock);
-			return -EBUSY;
+	if (!qgroup) {
+		ret = -ENOENT;
+		goto out;
+	} else {
+		/* check if there are no relations to this qgroup */
+		if (!list_empty(&qgroup->groups) ||
+		    !list_empty(&qgroup->members)) {
+			ret = -EBUSY;
+			goto out;
 		}
 	}
-	spin_unlock(&fs_info->qgroup_lock);
-
 	ret = del_qgroup_item(trans, quota_root, qgroupid);
 
 	spin_lock(&fs_info->qgroup_lock);
 	del_qgroup_rb(quota_root->fs_info, qgroupid);
 	spin_unlock(&fs_info->qgroup_lock);
-
+out:
+	mutex_unlock(&fs_info->qgroup_ioctl_lock);
 	return ret;
 }
 
@@ -1009,13 +1140,22 @@
 		       struct btrfs_fs_info *fs_info, u64 qgroupid,
 		       struct btrfs_qgroup_limit *limit)
 {
-	struct btrfs_root *quota_root = fs_info->quota_root;
+	struct btrfs_root *quota_root;
 	struct btrfs_qgroup *qgroup;
 	int ret = 0;
 
-	if (!quota_root)
-		return -EINVAL;
+	mutex_lock(&fs_info->qgroup_ioctl_lock);
+	quota_root = fs_info->quota_root;
+	if (!quota_root) {
+		ret = -EINVAL;
+		goto out;
+	}
 
+	qgroup = find_qgroup_rb(fs_info, qgroupid);
+	if (!qgroup) {
+		ret = -ENOENT;
+		goto out;
+	}
 	ret = update_qgroup_limit_item(trans, quota_root, qgroupid,
 				       limit->flags, limit->max_rfer,
 				       limit->max_excl, limit->rsv_rfer,
@@ -1027,31 +1167,17 @@
 	}
 
 	spin_lock(&fs_info->qgroup_lock);
-
-	qgroup = find_qgroup_rb(fs_info, qgroupid);
-	if (!qgroup) {
-		ret = -ENOENT;
-		goto unlock;
-	}
 	qgroup->lim_flags = limit->flags;
 	qgroup->max_rfer = limit->max_rfer;
 	qgroup->max_excl = limit->max_excl;
 	qgroup->rsv_rfer = limit->rsv_rfer;
 	qgroup->rsv_excl = limit->rsv_excl;
-
-unlock:
 	spin_unlock(&fs_info->qgroup_lock);
-
+out:
+	mutex_unlock(&fs_info->qgroup_ioctl_lock);
 	return ret;
 }
 
-static void qgroup_dirty(struct btrfs_fs_info *fs_info,
-			 struct btrfs_qgroup *qgroup)
-{
-	if (list_empty(&qgroup->dirty))
-		list_add(&qgroup->dirty, &fs_info->dirty_qgroups);
-}
-
 /*
  * btrfs_qgroup_record_ref is called when the ref is added or deleted. it puts
  * the modification into a list that's later used by btrfs_end_transaction to
@@ -1075,6 +1201,144 @@
 	return 0;
 }
 
+static int qgroup_account_ref_step1(struct btrfs_fs_info *fs_info,
+				    struct ulist *roots, struct ulist *tmp,
+				    u64 seq)
+{
+	struct ulist_node *unode;
+	struct ulist_iterator uiter;
+	struct ulist_node *tmp_unode;
+	struct ulist_iterator tmp_uiter;
+	struct btrfs_qgroup *qg;
+	int ret;
+
+	ULIST_ITER_INIT(&uiter);
+	while ((unode = ulist_next(roots, &uiter))) {
+		qg = find_qgroup_rb(fs_info, unode->val);
+		if (!qg)
+			continue;
+
+		ulist_reinit(tmp);
+						/* XXX id not needed */
+		ret = ulist_add(tmp, qg->qgroupid,
+				(u64)(uintptr_t)qg, GFP_ATOMIC);
+		if (ret < 0)
+			return ret;
+		ULIST_ITER_INIT(&tmp_uiter);
+		while ((tmp_unode = ulist_next(tmp, &tmp_uiter))) {
+			struct btrfs_qgroup_list *glist;
+
+			qg = (struct btrfs_qgroup *)(uintptr_t)tmp_unode->aux;
+			if (qg->refcnt < seq)
+				qg->refcnt = seq + 1;
+			else
+				++qg->refcnt;
+
+			list_for_each_entry(glist, &qg->groups, next_group) {
+				ret = ulist_add(tmp, glist->group->qgroupid,
+						(u64)(uintptr_t)glist->group,
+						GFP_ATOMIC);
+				if (ret < 0)
+					return ret;
+			}
+		}
+	}
+
+	return 0;
+}
+
+static int qgroup_account_ref_step2(struct btrfs_fs_info *fs_info,
+				    struct ulist *roots, struct ulist *tmp,
+				    u64 seq, int sgn, u64 num_bytes,
+				    struct btrfs_qgroup *qgroup)
+{
+	struct ulist_node *unode;
+	struct ulist_iterator uiter;
+	struct btrfs_qgroup *qg;
+	struct btrfs_qgroup_list *glist;
+	int ret;
+
+	ulist_reinit(tmp);
+	ret = ulist_add(tmp, qgroup->qgroupid, (uintptr_t)qgroup, GFP_ATOMIC);
+	if (ret < 0)
+		return ret;
+
+	ULIST_ITER_INIT(&uiter);
+	while ((unode = ulist_next(tmp, &uiter))) {
+		qg = (struct btrfs_qgroup *)(uintptr_t)unode->aux;
+		if (qg->refcnt < seq) {
+			/* not visited by step 1 */
+			qg->rfer += sgn * num_bytes;
+			qg->rfer_cmpr += sgn * num_bytes;
+			if (roots->nnodes == 0) {
+				qg->excl += sgn * num_bytes;
+				qg->excl_cmpr += sgn * num_bytes;
+			}
+			qgroup_dirty(fs_info, qg);
+		}
+		WARN_ON(qg->tag >= seq);
+		qg->tag = seq;
+
+		list_for_each_entry(glist, &qg->groups, next_group) {
+			ret = ulist_add(tmp, glist->group->qgroupid,
+					(uintptr_t)glist->group, GFP_ATOMIC);
+			if (ret < 0)
+				return ret;
+		}
+	}
+
+	return 0;
+}
+
+static int qgroup_account_ref_step3(struct btrfs_fs_info *fs_info,
+				    struct ulist *roots, struct ulist *tmp,
+				    u64 seq, int sgn, u64 num_bytes)
+{
+	struct ulist_node *unode;
+	struct ulist_iterator uiter;
+	struct btrfs_qgroup *qg;
+	struct ulist_node *tmp_unode;
+	struct ulist_iterator tmp_uiter;
+	int ret;
+
+	ULIST_ITER_INIT(&uiter);
+	while ((unode = ulist_next(roots, &uiter))) {
+		qg = find_qgroup_rb(fs_info, unode->val);
+		if (!qg)
+			continue;
+
+		ulist_reinit(tmp);
+		ret = ulist_add(tmp, qg->qgroupid, (uintptr_t)qg, GFP_ATOMIC);
+		if (ret < 0)
+			return ret;
+
+		ULIST_ITER_INIT(&tmp_uiter);
+		while ((tmp_unode = ulist_next(tmp, &tmp_uiter))) {
+			struct btrfs_qgroup_list *glist;
+
+			qg = (struct btrfs_qgroup *)(uintptr_t)tmp_unode->aux;
+			if (qg->tag == seq)
+				continue;
+
+			if (qg->refcnt - seq == roots->nnodes) {
+				qg->excl -= sgn * num_bytes;
+				qg->excl_cmpr -= sgn * num_bytes;
+				qgroup_dirty(fs_info, qg);
+			}
+
+			list_for_each_entry(glist, &qg->groups, next_group) {
+				ret = ulist_add(tmp, glist->group->qgroupid,
+						(uintptr_t)glist->group,
+						GFP_ATOMIC);
+				if (ret < 0)
+					return ret;
+			}
+		}
+	}
+
+	return 0;
+}
+
 /*
  * btrfs_qgroup_account_ref is called for every ref that is added to or deleted
  * from the fs. First, all roots referencing the extent are searched, and
@@ -1090,10 +1354,8 @@
 	struct btrfs_root *quota_root;
 	u64 ref_root;
 	struct btrfs_qgroup *qgroup;
-	struct ulist_node *unode;
 	struct ulist *roots = NULL;
 	struct ulist *tmp = NULL;
-	struct ulist_iterator uiter;
 	u64 seq;
 	int ret = 0;
 	int sgn;
@@ -1132,9 +1394,11 @@
 	case BTRFS_ADD_DELAYED_REF:
 	case BTRFS_ADD_DELAYED_EXTENT:
 		sgn = 1;
+		seq = btrfs_tree_mod_seq_prev(node->seq);
 		break;
 	case BTRFS_DROP_DELAYED_REF:
 		sgn = -1;
+		seq = node->seq;
 		break;
 	case BTRFS_UPDATE_DELAYED_HEAD:
 		return 0;
@@ -1142,20 +1406,37 @@
 		BUG();
 	}
 
+	mutex_lock(&fs_info->qgroup_rescan_lock);
+	if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN) {
+		if (fs_info->qgroup_rescan_progress.objectid <= node->bytenr) {
+			mutex_unlock(&fs_info->qgroup_rescan_lock);
+			return 0;
+		}
+	}
+	mutex_unlock(&fs_info->qgroup_rescan_lock);
+
 	/*
 	 * the delayed ref sequence number we pass depends on the direction of
-	 * the operation. for add operations, we pass (node->seq - 1) to skip
+	 * the operation. for add operations, we pass
+	 * tree_mod_log_prev_seq(node->seq) to skip
 	 * the delayed ref's current sequence number, because we need the state
 	 * of the tree before the add operation. for delete operations, we pass
 	 * (node->seq) to include the delayed ref's current sequence number,
 	 * because we need the state of the tree after the delete operation.
 	 */
-	ret = btrfs_find_all_roots(trans, fs_info, node->bytenr,
-				   sgn > 0 ? node->seq - 1 : node->seq, &roots);
+	ret = btrfs_find_all_roots(trans, fs_info, node->bytenr, seq, &roots);
 	if (ret < 0)
 		return ret;
 
+	mutex_lock(&fs_info->qgroup_rescan_lock);
 	spin_lock(&fs_info->qgroup_lock);
+	if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN) {
+		if (fs_info->qgroup_rescan_progress.objectid <= node->bytenr) {
+			ret = 0;
+			goto unlock;
+		}
+	}
+
 	quota_root = fs_info->quota_root;
 	if (!quota_root)
 		goto unlock;
@@ -1175,106 +1456,29 @@
 	seq = fs_info->qgroup_seq;
 	fs_info->qgroup_seq += roots->nnodes + 1; /* max refcnt */
 
-	ULIST_ITER_INIT(&uiter);
-	while ((unode = ulist_next(roots, &uiter))) {
-		struct ulist_node *tmp_unode;
-		struct ulist_iterator tmp_uiter;
-		struct btrfs_qgroup *qg;
-
-		qg = find_qgroup_rb(fs_info, unode->val);
-		if (!qg)
-			continue;
-
-		ulist_reinit(tmp);
-						/* XXX id not needed */
-		ulist_add(tmp, qg->qgroupid, (u64)(uintptr_t)qg, GFP_ATOMIC);
-		ULIST_ITER_INIT(&tmp_uiter);
-		while ((tmp_unode = ulist_next(tmp, &tmp_uiter))) {
-			struct btrfs_qgroup_list *glist;
-
-			qg = (struct btrfs_qgroup *)(uintptr_t)tmp_unode->aux;
-			if (qg->refcnt < seq)
-				qg->refcnt = seq + 1;
-			else
-				++qg->refcnt;
-
-			list_for_each_entry(glist, &qg->groups, next_group) {
-				ulist_add(tmp, glist->group->qgroupid,
-					  (u64)(uintptr_t)glist->group,
-					  GFP_ATOMIC);
-			}
-		}
-	}
+	ret = qgroup_account_ref_step1(fs_info, roots, tmp, seq);
+	if (ret)
+		goto unlock;
 
 	/*
 	 * step 2: walk from the new root
 	 */
-	ulist_reinit(tmp);
-	ulist_add(tmp, qgroup->qgroupid, (uintptr_t)qgroup, GFP_ATOMIC);
-	ULIST_ITER_INIT(&uiter);
-	while ((unode = ulist_next(tmp, &uiter))) {
-		struct btrfs_qgroup *qg;
-		struct btrfs_qgroup_list *glist;
-
-		qg = (struct btrfs_qgroup *)(uintptr_t)unode->aux;
-		if (qg->refcnt < seq) {
-			/* not visited by step 1 */
-			qg->rfer += sgn * node->num_bytes;
-			qg->rfer_cmpr += sgn * node->num_bytes;
-			if (roots->nnodes == 0) {
-				qg->excl += sgn * node->num_bytes;
-				qg->excl_cmpr += sgn * node->num_bytes;
-			}
-			qgroup_dirty(fs_info, qg);
-		}
-		WARN_ON(qg->tag >= seq);
-		qg->tag = seq;
-
-		list_for_each_entry(glist, &qg->groups, next_group) {
-			ulist_add(tmp, glist->group->qgroupid,
-				  (uintptr_t)glist->group, GFP_ATOMIC);
-		}
-	}
+	ret = qgroup_account_ref_step2(fs_info, roots, tmp, seq, sgn,
+				       node->num_bytes, qgroup);
+	if (ret)
+		goto unlock;
 
 	/*
 	 * step 3: walk again from old refs
 	 */
-	ULIST_ITER_INIT(&uiter);
-	while ((unode = ulist_next(roots, &uiter))) {
-		struct btrfs_qgroup *qg;
-		struct ulist_node *tmp_unode;
-		struct ulist_iterator tmp_uiter;
+	ret = qgroup_account_ref_step3(fs_info, roots, tmp, seq, sgn,
+				       node->num_bytes);
+	if (ret)
+		goto unlock;
 
-		qg = find_qgroup_rb(fs_info, unode->val);
-		if (!qg)
-			continue;
-
-		ulist_reinit(tmp);
-		ulist_add(tmp, qg->qgroupid, (uintptr_t)qg, GFP_ATOMIC);
-		ULIST_ITER_INIT(&tmp_uiter);
-		while ((tmp_unode = ulist_next(tmp, &tmp_uiter))) {
-			struct btrfs_qgroup_list *glist;
-
-			qg = (struct btrfs_qgroup *)(uintptr_t)tmp_unode->aux;
-			if (qg->tag == seq)
-				continue;
-
-			if (qg->refcnt - seq == roots->nnodes) {
-				qg->excl -= sgn * node->num_bytes;
-				qg->excl_cmpr -= sgn * node->num_bytes;
-				qgroup_dirty(fs_info, qg);
-			}
-
-			list_for_each_entry(glist, &qg->groups, next_group) {
-				ulist_add(tmp, glist->group->qgroupid,
-					  (uintptr_t)glist->group,
-					  GFP_ATOMIC);
-			}
-		}
-	}
-	ret = 0;
 unlock:
 	spin_unlock(&fs_info->qgroup_lock);
+	mutex_unlock(&fs_info->qgroup_rescan_lock);
 	ulist_free(roots);
 	ulist_free(tmp);
 
@@ -1289,10 +1493,14 @@
 {
 	struct btrfs_root *quota_root = fs_info->quota_root;
 	int ret = 0;
+	int start_rescan_worker = 0;
 
 	if (!quota_root)
 		goto out;
 
+	if (!fs_info->quota_enabled && fs_info->pending_quota_state)
+		start_rescan_worker = 1;
+
 	fs_info->quota_enabled = fs_info->pending_quota_state;
 
 	spin_lock(&fs_info->qgroup_lock);
@@ -1318,6 +1526,13 @@
 	if (ret)
 		fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
 
+	if (!ret && start_rescan_worker) {
+		ret = btrfs_qgroup_rescan(fs_info);
+		if (ret)
+			pr_err("btrfs: start rescan quota failed: %d\n", ret);
+		ret = 0;
+	}
+
 out:
 
 	return ret;
@@ -1338,12 +1553,30 @@
 	struct btrfs_qgroup *srcgroup;
 	struct btrfs_qgroup *dstgroup;
 	u32 level_size = 0;
+	u64 nums;
 
+	mutex_lock(&fs_info->qgroup_ioctl_lock);
 	if (!fs_info->quota_enabled)
-		return 0;
+		goto out;
 
-	if (!quota_root)
-		return -EINVAL;
+	if (!quota_root) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	if (inherit) {
+		i_qgroups = (u64 *)(inherit + 1);
+		nums = inherit->num_qgroups + 2 * inherit->num_ref_copies +
+		       2 * inherit->num_excl_copies;
+		for (i = 0; i < nums; ++i) {
+			srcgroup = find_qgroup_rb(fs_info, *i_qgroups);
+			if (!srcgroup) {
+				ret = -EINVAL;
+				goto out;
+			}
+			++i_qgroups;
+		}
+	}
 
 	/*
 	 * create a tracking group for the subvol itself
@@ -1470,6 +1703,7 @@
 unlock:
 	spin_unlock(&fs_info->qgroup_lock);
 out:
+	mutex_unlock(&fs_info->qgroup_ioctl_lock);
 	return ret;
 }
 
@@ -1514,7 +1748,10 @@
 		ret = -ENOMEM;
 		goto out;
 	}
-	ulist_add(ulist, qgroup->qgroupid, (uintptr_t)qgroup, GFP_ATOMIC);
+	ret = ulist_add(ulist, qgroup->qgroupid,
+			(uintptr_t)qgroup, GFP_ATOMIC);
+	if (ret < 0)
+		goto out;
 	ULIST_ITER_INIT(&uiter);
 	while ((unode = ulist_next(ulist, &uiter))) {
 		struct btrfs_qgroup *qg;
@@ -1523,25 +1760,27 @@
 		qg = (struct btrfs_qgroup *)(uintptr_t)unode->aux;
 
 		if ((qg->lim_flags & BTRFS_QGROUP_LIMIT_MAX_RFER) &&
-		    qg->reserved + qg->rfer + num_bytes >
+		    qg->reserved + (s64)qg->rfer + num_bytes >
 		    qg->max_rfer) {
 			ret = -EDQUOT;
 			goto out;
 		}
 
 		if ((qg->lim_flags & BTRFS_QGROUP_LIMIT_MAX_EXCL) &&
-		    qg->reserved + qg->excl + num_bytes >
+		    qg->reserved + (s64)qg->excl + num_bytes >
 		    qg->max_excl) {
 			ret = -EDQUOT;
 			goto out;
 		}
 
 		list_for_each_entry(glist, &qg->groups, next_group) {
-			ulist_add(ulist, glist->group->qgroupid,
-				  (uintptr_t)glist->group, GFP_ATOMIC);
+			ret = ulist_add(ulist, glist->group->qgroupid,
+					(uintptr_t)glist->group, GFP_ATOMIC);
+			if (ret < 0)
+				goto out;
 		}
 	}
-
+	ret = 0;
 	/*
 	 * no limits exceeded, now record the reservation into all qgroups
 	 */
@@ -1570,6 +1809,7 @@
 	struct ulist_node *unode;
 	struct ulist_iterator uiter;
 	u64 ref_root = root->root_key.objectid;
+	int ret = 0;
 
 	if (!is_fstree(ref_root))
 		return;
@@ -1592,7 +1832,10 @@
 		btrfs_std_error(fs_info, -ENOMEM);
 		goto out;
 	}
-	ulist_add(ulist, qgroup->qgroupid, (uintptr_t)qgroup, GFP_ATOMIC);
+	ret = ulist_add(ulist, qgroup->qgroupid,
+			(uintptr_t)qgroup, GFP_ATOMIC);
+	if (ret < 0)
+		goto out;
 	ULIST_ITER_INIT(&uiter);
 	while ((unode = ulist_next(ulist, &uiter))) {
 		struct btrfs_qgroup *qg;
@@ -1603,8 +1846,10 @@
 		qg->reserved -= num_bytes;
 
 		list_for_each_entry(glist, &qg->groups, next_group) {
-			ulist_add(ulist, glist->group->qgroupid,
-				  (uintptr_t)glist->group, GFP_ATOMIC);
+			ret = ulist_add(ulist, glist->group->qgroupid,
+					(uintptr_t)glist->group, GFP_ATOMIC);
+			if (ret < 0)
+				goto out;
 		}
 	}
 
@@ -1617,8 +1862,265 @@
 {
 	if (list_empty(&trans->qgroup_ref_list) && !trans->delayed_ref_elem.seq)
 		return;
-	printk(KERN_ERR "btrfs: qgroups not uptodate in trans handle %p: list is%s empty, seq is %llu\n",
+	pr_err("btrfs: qgroups not uptodate in trans handle %p: list is%s empty, seq is %#x.%x\n",
 		trans, list_empty(&trans->qgroup_ref_list) ? "" : " not",
-		trans->delayed_ref_elem.seq);
+		(u32)(trans->delayed_ref_elem.seq >> 32),
+		(u32)trans->delayed_ref_elem.seq);
 	BUG();
 }
+
+/*
+ * returns < 0 on error, 0 when more leafs are to be scanned.
+ * returns 1 when done, 2 when done and FLAG_INCONSISTENT was cleared.
+ */
+static int
+qgroup_rescan_leaf(struct qgroup_rescan *qscan, struct btrfs_path *path,
+		   struct btrfs_trans_handle *trans, struct ulist *tmp,
+		   struct extent_buffer *scratch_leaf)
+{
+	struct btrfs_key found;
+	struct btrfs_fs_info *fs_info = qscan->fs_info;
+	struct ulist *roots = NULL;
+	struct ulist_node *unode;
+	struct ulist_iterator uiter;
+	struct seq_list tree_mod_seq_elem = {};
+	u64 seq;
+	int slot;
+	int ret;
+
+	path->leave_spinning = 1;
+	mutex_lock(&fs_info->qgroup_rescan_lock);
+	ret = btrfs_search_slot_for_read(fs_info->extent_root,
+					 &fs_info->qgroup_rescan_progress,
+					 path, 1, 0);
+
+	pr_debug("current progress key (%llu %u %llu), search_slot ret %d\n",
+		 (unsigned long long)fs_info->qgroup_rescan_progress.objectid,
+		 fs_info->qgroup_rescan_progress.type,
+		 (unsigned long long)fs_info->qgroup_rescan_progress.offset,
+		 ret);
+
+	if (ret) {
+		/*
+		 * The rescan is about to end, we will not be scanning any
+		 * further blocks. We cannot unset the RESCAN flag here, because
+		 * we want to commit the transaction if everything went well.
+		 * To make the live accounting work in this phase, we set our
+		 * scan progress pointer such that every real extent objectid
+		 * will be smaller.
+		 */
+		fs_info->qgroup_rescan_progress.objectid = (u64)-1;
+		btrfs_release_path(path);
+		mutex_unlock(&fs_info->qgroup_rescan_lock);
+		return ret;
+	}
+
+	btrfs_item_key_to_cpu(path->nodes[0], &found,
+			      btrfs_header_nritems(path->nodes[0]) - 1);
+	fs_info->qgroup_rescan_progress.objectid = found.objectid + 1;
+
+	btrfs_get_tree_mod_seq(fs_info, &tree_mod_seq_elem);
+	memcpy(scratch_leaf, path->nodes[0], sizeof(*scratch_leaf));
+	slot = path->slots[0];
+	btrfs_release_path(path);
+	mutex_unlock(&fs_info->qgroup_rescan_lock);
+
+	for (; slot < btrfs_header_nritems(scratch_leaf); ++slot) {
+		btrfs_item_key_to_cpu(scratch_leaf, &found, slot);
+		if (found.type != BTRFS_EXTENT_ITEM_KEY)
+			continue;
+		ret = btrfs_find_all_roots(trans, fs_info, found.objectid,
+					   tree_mod_seq_elem.seq, &roots);
+		if (ret < 0)
+			goto out;
+		spin_lock(&fs_info->qgroup_lock);
+		seq = fs_info->qgroup_seq;
+		fs_info->qgroup_seq += roots->nnodes + 1; /* max refcnt */
+
+		ret = qgroup_account_ref_step1(fs_info, roots, tmp, seq);
+		if (ret) {
+			spin_unlock(&fs_info->qgroup_lock);
+			ulist_free(roots);
+			goto out;
+		}
+
+		/*
+		 * step2 of btrfs_qgroup_account_ref works from a single root,
+		 * we're doing all at once here.
+		 */
+		ulist_reinit(tmp);
+		ULIST_ITER_INIT(&uiter);
+		while ((unode = ulist_next(roots, &uiter))) {
+			struct btrfs_qgroup *qg;
+
+			qg = find_qgroup_rb(fs_info, unode->val);
+			if (!qg)
+				continue;
+
+			ret = ulist_add(tmp, qg->qgroupid, (uintptr_t)qg,
+					GFP_ATOMIC);
+			if (ret < 0) {
+				spin_unlock(&fs_info->qgroup_lock);
+				ulist_free(roots);
+				goto out;
+			}
+		}
+
+		/* this loop is similar to step 2 of btrfs_qgroup_account_ref */
+		ULIST_ITER_INIT(&uiter);
+		while ((unode = ulist_next(tmp, &uiter))) {
+			struct btrfs_qgroup *qg;
+			struct btrfs_qgroup_list *glist;
+
+			qg = (struct btrfs_qgroup *)(uintptr_t) unode->aux;
+			qg->rfer += found.offset;
+			qg->rfer_cmpr += found.offset;
+			WARN_ON(qg->tag >= seq);
+			if (qg->refcnt - seq == roots->nnodes) {
+				qg->excl += found.offset;
+				qg->excl_cmpr += found.offset;
+			}
+			qgroup_dirty(fs_info, qg);
+
+			list_for_each_entry(glist, &qg->groups, next_group) {
+				ret = ulist_add(tmp, glist->group->qgroupid,
+						(uintptr_t)glist->group,
+						GFP_ATOMIC);
+				if (ret < 0) {
+					spin_unlock(&fs_info->qgroup_lock);
+					ulist_free(roots);
+					goto out;
+				}
+			}
+		}
+
+		spin_unlock(&fs_info->qgroup_lock);
+		ulist_free(roots);
+		ret = 0;
+	}
+
+out:
+	btrfs_put_tree_mod_seq(fs_info, &tree_mod_seq_elem);
+
+	return ret;
+}
+
+static void btrfs_qgroup_rescan_worker(struct btrfs_work *work)
+{
+	struct qgroup_rescan *qscan = container_of(work, struct qgroup_rescan,
+						   work);
+	struct btrfs_path *path;
+	struct btrfs_trans_handle *trans = NULL;
+	struct btrfs_fs_info *fs_info = qscan->fs_info;
+	struct ulist *tmp = NULL;
+	struct extent_buffer *scratch_leaf = NULL;
+	int err = -ENOMEM;
+
+	path = btrfs_alloc_path();
+	if (!path)
+		goto out;
+	tmp = ulist_alloc(GFP_NOFS);
+	if (!tmp)
+		goto out;
+	scratch_leaf = kmalloc(sizeof(*scratch_leaf), GFP_NOFS);
+	if (!scratch_leaf)
+		goto out;
+
+	err = 0;
+	while (!err) {
+		trans = btrfs_start_transaction(fs_info->fs_root, 0);
+		if (IS_ERR(trans)) {
+			err = PTR_ERR(trans);
+			break;
+		}
+		if (!fs_info->quota_enabled) {
+			err = -EINTR;
+		} else {
+			err = qgroup_rescan_leaf(qscan, path, trans,
+						 tmp, scratch_leaf);
+		}
+		if (err > 0)
+			btrfs_commit_transaction(trans, fs_info->fs_root);
+		else
+			btrfs_end_transaction(trans, fs_info->fs_root);
+	}
+
+out:
+	kfree(scratch_leaf);
+	ulist_free(tmp);
+	btrfs_free_path(path);
+	kfree(qscan);
+
+	mutex_lock(&fs_info->qgroup_rescan_lock);
+	fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
+
+	if (err == 2 &&
+	    fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT) {
+		fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
+	} else if (err < 0) {
+		fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
+	}
+	mutex_unlock(&fs_info->qgroup_rescan_lock);
+
+	if (err >= 0) {
+		pr_info("btrfs: qgroup scan completed%s\n",
+			err == 2 ? " (inconsistency flag cleared)" : "");
+	} else {
+		pr_err("btrfs: qgroup scan failed with %d\n", err);
+	}
+}
+
+static void
+qgroup_rescan_start(struct btrfs_fs_info *fs_info, struct qgroup_rescan *qscan)
+{
+	memset(&qscan->work, 0, sizeof(qscan->work));
+	qscan->work.func = btrfs_qgroup_rescan_worker;
+	qscan->fs_info = fs_info;
+
+	pr_info("btrfs: qgroup scan started\n");
+	btrfs_queue_worker(&fs_info->qgroup_rescan_workers, &qscan->work);
+}
+
+int
+btrfs_qgroup_rescan(struct btrfs_fs_info *fs_info)
+{
+	int ret = 0;
+	struct rb_node *n;
+	struct btrfs_qgroup *qgroup;
+	struct qgroup_rescan *qscan = kmalloc(sizeof(*qscan), GFP_NOFS);
+
+	if (!qscan)
+		return -ENOMEM;
+
+	mutex_lock(&fs_info->qgroup_rescan_lock);
+	spin_lock(&fs_info->qgroup_lock);
+	if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN)
+		ret = -EINPROGRESS;
+	else if (!(fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_ON))
+		ret = -EINVAL;
+	if (ret) {
+		spin_unlock(&fs_info->qgroup_lock);
+		mutex_unlock(&fs_info->qgroup_rescan_lock);
+		kfree(qscan);
+		return ret;
+	}
+
+	fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_RESCAN;
+	memset(&fs_info->qgroup_rescan_progress, 0,
+		sizeof(fs_info->qgroup_rescan_progress));
+
+	/* clear all current qgroup tracking information */
+	for (n = rb_first(&fs_info->qgroup_tree); n; n = rb_next(n)) {
+		qgroup = rb_entry(n, struct btrfs_qgroup, node);
+		qgroup->rfer = 0;
+		qgroup->rfer_cmpr = 0;
+		qgroup->excl = 0;
+		qgroup->excl_cmpr = 0;
+	}
+	spin_unlock(&fs_info->qgroup_lock);
+	mutex_unlock(&fs_info->qgroup_rescan_lock);
+
+	qgroup_rescan_start(fs_info, qscan);
+
+	return 0;
+}
diff --git a/fs/btrfs/raid56.c b/fs/btrfs/raid56.c
index 9a79fb7..0740621 100644
--- a/fs/btrfs/raid56.c
+++ b/fs/btrfs/raid56.c
@@ -410,7 +410,7 @@
 /*
  * remove everything in the cache
  */
-void btrfs_clear_rbio_cache(struct btrfs_fs_info *info)
+static void btrfs_clear_rbio_cache(struct btrfs_fs_info *info)
 {
 	struct btrfs_stripe_hash_table *table;
 	unsigned long flags;
@@ -1010,12 +1010,12 @@
  * this will try to merge into existing bios if possible, and returns
  * zero if all went well.
  */
-int rbio_add_io_page(struct btrfs_raid_bio *rbio,
-		     struct bio_list *bio_list,
-		     struct page *page,
-		     int stripe_nr,
-		     unsigned long page_index,
-		     unsigned long bio_max_len)
+static int rbio_add_io_page(struct btrfs_raid_bio *rbio,
+			    struct bio_list *bio_list,
+			    struct page *page,
+			    int stripe_nr,
+			    unsigned long page_index,
+			    unsigned long bio_max_len)
 {
 	struct bio *last = bio_list->tail;
 	u64 last_end = 0;
diff --git a/fs/btrfs/reada.c b/fs/btrfs/reada.c
index 96b93da..1031b69 100644
--- a/fs/btrfs/reada.c
+++ b/fs/btrfs/reada.c
@@ -955,10 +955,11 @@
 	while (atomic_read(&rc->elems)) {
 		wait_event_timeout(rc->wait, atomic_read(&rc->elems) == 0,
 				   5 * HZ);
-		dump_devs(rc->root->fs_info, rc->elems < 10 ? 1 : 0);
+		dump_devs(rc->root->fs_info,
+			  atomic_read(&rc->elems) < 10 ? 1 : 0);
 	}
 
-	dump_devs(rc->root->fs_info, rc->elems < 10 ? 1 : 0);
+	dump_devs(rc->root->fs_info, atomic_read(&rc->elems) < 10 ? 1 : 0);
 
 	kref_put(&rc->refcnt, reada_control_release);
 
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
index b67171e..704a1b8 100644
--- a/fs/btrfs/relocation.c
+++ b/fs/btrfs/relocation.c
@@ -326,8 +326,7 @@
 	return NULL;
 }
 
-void backref_tree_panic(struct rb_node *rb_node, int errno,
-					  u64 bytenr)
+static void backref_tree_panic(struct rb_node *rb_node, int errno, u64 bytenr)
 {
 
 	struct btrfs_fs_info *fs_info = NULL;
@@ -619,10 +618,13 @@
 int find_inline_backref(struct extent_buffer *leaf, int slot,
 			unsigned long *ptr, unsigned long *end)
 {
+	struct btrfs_key key;
 	struct btrfs_extent_item *ei;
 	struct btrfs_tree_block_info *bi;
 	u32 item_size;
 
+	btrfs_item_key_to_cpu(leaf, &key, slot);
+
 	item_size = btrfs_item_size_nr(leaf, slot);
 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
 	if (item_size < sizeof(*ei)) {
@@ -634,13 +636,18 @@
 	WARN_ON(!(btrfs_extent_flags(leaf, ei) &
 		  BTRFS_EXTENT_FLAG_TREE_BLOCK));
 
-	if (item_size <= sizeof(*ei) + sizeof(*bi)) {
+	if (key.type == BTRFS_EXTENT_ITEM_KEY &&
+	    item_size <= sizeof(*ei) + sizeof(*bi)) {
 		WARN_ON(item_size < sizeof(*ei) + sizeof(*bi));
 		return 1;
 	}
 
-	bi = (struct btrfs_tree_block_info *)(ei + 1);
-	*ptr = (unsigned long)(bi + 1);
+	if (key.type == BTRFS_EXTENT_ITEM_KEY) {
+		bi = (struct btrfs_tree_block_info *)(ei + 1);
+		*ptr = (unsigned long)(bi + 1);
+	} else {
+		*ptr = (unsigned long)(ei + 1);
+	}
 	*end = (unsigned long)ei + item_size;
 	return 0;
 }
@@ -708,7 +715,7 @@
 	end = 0;
 	ptr = 0;
 	key.objectid = cur->bytenr;
-	key.type = BTRFS_EXTENT_ITEM_KEY;
+	key.type = BTRFS_METADATA_ITEM_KEY;
 	key.offset = (u64)-1;
 
 	path1->search_commit_root = 1;
@@ -766,7 +773,8 @@
 				break;
 			}
 
-			if (key.type == BTRFS_EXTENT_ITEM_KEY) {
+			if (key.type == BTRFS_EXTENT_ITEM_KEY ||
+			    key.type == BTRFS_METADATA_ITEM_KEY) {
 				ret = find_inline_backref(eb, path1->slots[0],
 							  &ptr, &end);
 				if (ret)
@@ -1762,7 +1770,11 @@
 
 			eb = read_tree_block(dest, old_bytenr, blocksize,
 					     old_ptr_gen);
-			BUG_ON(!eb);
+			if (!eb || !extent_buffer_uptodate(eb)) {
+				ret = (!eb) ? -ENOMEM : -EIO;
+				free_extent_buffer(eb);
+				return ret;
+			}
 			btrfs_tree_lock(eb);
 			if (cow) {
 				ret = btrfs_cow_block(trans, dest, eb, parent,
@@ -1915,6 +1927,10 @@
 		bytenr = btrfs_node_blockptr(eb, path->slots[i]);
 		blocksize = btrfs_level_size(root, i - 1);
 		eb = read_tree_block(root, bytenr, blocksize, ptr_gen);
+		if (!eb || !extent_buffer_uptodate(eb)) {
+			free_extent_buffer(eb);
+			return -EIO;
+		}
 		BUG_ON(btrfs_header_level(eb) != i - 1);
 		path->nodes[i - 1] = eb;
 		path->slots[i - 1] = 0;
@@ -2592,7 +2608,8 @@
 		blocksize = btrfs_level_size(root, node->level);
 		generation = btrfs_node_ptr_generation(upper->eb, slot);
 		eb = read_tree_block(root, bytenr, blocksize, generation);
-		if (!eb) {
+		if (!eb || !extent_buffer_uptodate(eb)) {
+			free_extent_buffer(eb);
 			err = -EIO;
 			goto next;
 		}
@@ -2753,7 +2770,10 @@
 	BUG_ON(block->key_ready);
 	eb = read_tree_block(rc->extent_root, block->bytenr,
 			     block->key.objectid, block->key.offset);
-	BUG_ON(!eb);
+	if (!eb || !extent_buffer_uptodate(eb)) {
+		free_extent_buffer(eb);
+		return -EIO;
+	}
 	WARN_ON(btrfs_header_level(eb) != block->level);
 	if (block->level == 0)
 		btrfs_item_key_to_cpu(eb, &block->key, 0);
@@ -2768,8 +2788,13 @@
 			    struct tree_block *block)
 {
 	BUG_ON(block->key_ready);
-	readahead_tree_block(rc->extent_root, block->bytenr,
-			     block->key.objectid, block->key.offset);
+	if (block->key.type == BTRFS_METADATA_ITEM_KEY)
+		readahead_tree_block(rc->extent_root, block->bytenr,
+				     block->key.objectid,
+				     rc->extent_root->leafsize);
+	else
+		readahead_tree_block(rc->extent_root, block->bytenr,
+				     block->key.objectid, block->key.offset);
 	return 0;
 }
 
@@ -2850,7 +2875,7 @@
 	path = btrfs_alloc_path();
 	if (!path) {
 		err = -ENOMEM;
-		goto out_path;
+		goto out_free_blocks;
 	}
 
 	rb_node = rb_first(blocks);
@@ -2864,8 +2889,11 @@
 	rb_node = rb_first(blocks);
 	while (rb_node) {
 		block = rb_entry(rb_node, struct tree_block, rb_node);
-		if (!block->key_ready)
-			get_tree_block_key(rc, block);
+		if (!block->key_ready) {
+			err = get_tree_block_key(rc, block);
+			if (err)
+				goto out_free_path;
+		}
 		rb_node = rb_next(rb_node);
 	}
 
@@ -2892,8 +2920,9 @@
 out:
 	err = finish_pending_nodes(trans, rc, path, err);
 
+out_free_path:
 	btrfs_free_path(path);
-out_path:
+out_free_blocks:
 	free_block_list(blocks);
 	return err;
 }
@@ -2965,7 +2994,7 @@
 	lock_extent(&BTRFS_I(inode)->io_tree, start, end);
 	while (1) {
 		write_lock(&em_tree->lock);
-		ret = add_extent_mapping(em_tree, em);
+		ret = add_extent_mapping(em_tree, em, 0);
 		write_unlock(&em_tree->lock);
 		if (ret != -EEXIST) {
 			free_extent_map(em);
@@ -3176,12 +3205,17 @@
 	eb =  path->nodes[0];
 	item_size = btrfs_item_size_nr(eb, path->slots[0]);
 
-	if (item_size >= sizeof(*ei) + sizeof(*bi)) {
+	if (extent_key->type == BTRFS_METADATA_ITEM_KEY ||
+	    item_size >= sizeof(*ei) + sizeof(*bi)) {
 		ei = btrfs_item_ptr(eb, path->slots[0],
 				struct btrfs_extent_item);
-		bi = (struct btrfs_tree_block_info *)(ei + 1);
+		if (extent_key->type == BTRFS_EXTENT_ITEM_KEY) {
+			bi = (struct btrfs_tree_block_info *)(ei + 1);
+			level = btrfs_tree_block_level(eb, bi);
+		} else {
+			level = (int)extent_key->offset;
+		}
 		generation = btrfs_extent_generation(eb, ei);
-		level = btrfs_tree_block_level(eb, bi);
 	} else {
 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
 		u64 ref_owner;
@@ -3210,7 +3244,7 @@
 		return -ENOMEM;
 
 	block->bytenr = extent_key->objectid;
-	block->key.objectid = extent_key->offset;
+	block->key.objectid = rc->extent_root->leafsize;
 	block->key.offset = generation;
 	block->level = level;
 	block->key_ready = 0;
@@ -3252,9 +3286,15 @@
 	ret = btrfs_search_slot(NULL, rc->extent_root, &key, path, 0, 0);
 	if (ret < 0)
 		goto out;
-	BUG_ON(ret);
 
 	btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
+	if (ret > 0) {
+		if (key.objectid == bytenr &&
+		    key.type == BTRFS_METADATA_ITEM_KEY)
+			ret = 0;
+	}
+	BUG_ON(ret);
+
 	ret = add_tree_block(rc, &key, path, blocks);
 out:
 	btrfs_free_path(path);
@@ -3275,7 +3315,8 @@
 		return 1;
 
 	ret = btrfs_lookup_extent_info(NULL, rc->extent_root,
-				       eb->start, eb->len, NULL, &flags);
+				       eb->start, btrfs_header_level(eb), 1,
+				       NULL, &flags);
 	BUG_ON(ret);
 
 	if (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)
@@ -3644,12 +3685,25 @@
 			break;
 		}
 
-		if (key.type != BTRFS_EXTENT_ITEM_KEY ||
+		if (key.type != BTRFS_EXTENT_ITEM_KEY &&
+		    key.type != BTRFS_METADATA_ITEM_KEY) {
+			path->slots[0]++;
+			goto next;
+		}
+
+		if (key.type == BTRFS_EXTENT_ITEM_KEY &&
 		    key.objectid + key.offset <= rc->search_start) {
 			path->slots[0]++;
 			goto next;
 		}
 
+		if (key.type == BTRFS_METADATA_ITEM_KEY &&
+		    key.objectid + rc->extent_root->leafsize <=
+		    rc->search_start) {
+			path->slots[0]++;
+			goto next;
+		}
+
 		ret = find_first_extent_bit(&rc->processed_blocks,
 					    key.objectid, &start, &end,
 					    EXTENT_DIRTY, NULL);
@@ -3658,7 +3712,11 @@
 			btrfs_release_path(path);
 			rc->search_start = end + 1;
 		} else {
-			rc->search_start = key.objectid + key.offset;
+			if (key.type == BTRFS_EXTENT_ITEM_KEY)
+				rc->search_start = key.objectid + key.offset;
+			else
+				rc->search_start = key.objectid +
+					rc->extent_root->leafsize;
 			memcpy(extent_key, &key, sizeof(key));
 			return 0;
 		}
@@ -4105,10 +4163,7 @@
 
 	while (1) {
 		mutex_lock(&fs_info->cleaner_mutex);
-
-		btrfs_clean_old_snapshots(fs_info->tree_root);
 		ret = relocate_block_group(rc);
-
 		mutex_unlock(&fs_info->cleaner_mutex);
 		if (ret < 0) {
 			err = ret;
diff --git a/fs/btrfs/root-tree.c b/fs/btrfs/root-tree.c
index 668af53..5bf1ed5 100644
--- a/fs/btrfs/root-tree.c
+++ b/fs/btrfs/root-tree.c
@@ -29,9 +29,8 @@
  * generation numbers as then we know the root was once mounted with an older
  * kernel that was not aware of the root item structure change.
  */
-void btrfs_read_root_item(struct btrfs_root *root,
-			 struct extent_buffer *eb, int slot,
-			 struct btrfs_root_item *item)
+void btrfs_read_root_item(struct extent_buffer *eb, int slot,
+			  struct btrfs_root_item *item)
 {
 	uuid_le uuid;
 	int len;
@@ -104,7 +103,7 @@
 		goto out;
 	}
 	if (item)
-		btrfs_read_root_item(root, l, slot, item);
+		btrfs_read_root_item(l, slot, item);
 	if (key)
 		memcpy(key, &found_key, sizeof(found_key));
 
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index 85e072b..f489e24 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -1336,7 +1336,6 @@
 	int page_num;
 	u8 calculated_csum[BTRFS_CSUM_SIZE];
 	u32 crc = ~(u32)0;
-	struct btrfs_root *root = fs_info->extent_root;
 	void *mapped_buffer;
 
 	WARN_ON(!sblock->pagev[0]->page);
@@ -1365,12 +1364,11 @@
 
 	for (page_num = 0;;) {
 		if (page_num == 0 && is_metadata)
-			crc = btrfs_csum_data(root,
+			crc = btrfs_csum_data(
 				((u8 *)mapped_buffer) + BTRFS_CSUM_SIZE,
 				crc, PAGE_SIZE - BTRFS_CSUM_SIZE);
 		else
-			crc = btrfs_csum_data(root, mapped_buffer, crc,
-					      PAGE_SIZE);
+			crc = btrfs_csum_data(mapped_buffer, crc, PAGE_SIZE);
 
 		kunmap_atomic(mapped_buffer);
 		page_num++;
@@ -1657,7 +1655,6 @@
 	void *buffer;
 	u32 crc = ~(u32)0;
 	int fail = 0;
-	struct btrfs_root *root = sctx->dev_root;
 	u64 len;
 	int index;
 
@@ -1674,7 +1671,7 @@
 	for (;;) {
 		u64 l = min_t(u64, len, PAGE_SIZE);
 
-		crc = btrfs_csum_data(root, buffer, crc, l);
+		crc = btrfs_csum_data(buffer, crc, l);
 		kunmap_atomic(buffer);
 		len -= l;
 		if (len == 0)
@@ -1744,7 +1741,7 @@
 	for (;;) {
 		u64 l = min_t(u64, len, mapped_size);
 
-		crc = btrfs_csum_data(root, p, crc, l);
+		crc = btrfs_csum_data(p, crc, l);
 		kunmap_atomic(mapped_buffer);
 		len -= l;
 		if (len == 0)
@@ -1805,7 +1802,7 @@
 	for (;;) {
 		u64 l = min_t(u64, len, mapped_size);
 
-		crc = btrfs_csum_data(root, p, crc, l);
+		crc = btrfs_csum_data(p, crc, l);
 		kunmap_atomic(mapped_buffer);
 		len -= l;
 		if (len == 0)
@@ -2236,12 +2233,12 @@
 	u64 flags;
 	int ret;
 	int slot;
-	int i;
 	u64 nstripes;
 	struct extent_buffer *l;
 	struct btrfs_key key;
 	u64 physical;
 	u64 logical;
+	u64 logic_end;
 	u64 generation;
 	int mirror_num;
 	struct reada_control *reada1;
@@ -2255,6 +2252,7 @@
 	u64 extent_len;
 	struct btrfs_device *extent_dev;
 	int extent_mirror_num;
+	int stop_loop;
 
 	if (map->type & (BTRFS_BLOCK_GROUP_RAID5 |
 			 BTRFS_BLOCK_GROUP_RAID6)) {
@@ -2315,8 +2313,8 @@
 	key_start.type = BTRFS_EXTENT_ITEM_KEY;
 	key_start.offset = (u64)0;
 	key_end.objectid = base + offset + nstripes * increment;
-	key_end.type = BTRFS_EXTENT_ITEM_KEY;
-	key_end.offset = (u64)0;
+	key_end.type = BTRFS_METADATA_ITEM_KEY;
+	key_end.offset = (u64)-1;
 	reada1 = btrfs_reada_add(root, &key_start, &key_end);
 
 	key_start.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
@@ -2354,8 +2352,9 @@
 	 */
 	logical = base + offset;
 	physical = map->stripes[num].physical;
+	logic_end = logical + increment * nstripes;
 	ret = 0;
-	for (i = 0; i < nstripes; ++i) {
+	while (logical < logic_end) {
 		/*
 		 * canceled?
 		 */
@@ -2391,19 +2390,14 @@
 			wake_up(&fs_info->scrub_pause_wait);
 		}
 
-		ret = btrfs_lookup_csums_range(csum_root, logical,
-					       logical + map->stripe_len - 1,
-					       &sctx->csum_list, 1);
-		if (ret)
-			goto out;
-
 		key.objectid = logical;
 		key.type = BTRFS_EXTENT_ITEM_KEY;
-		key.offset = (u64)0;
+		key.offset = (u64)-1;
 
 		ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
 		if (ret < 0)
 			goto out;
+
 		if (ret > 0) {
 			ret = btrfs_previous_item(root, path, 0,
 						  BTRFS_EXTENT_ITEM_KEY);
@@ -2420,7 +2414,10 @@
 			}
 		}
 
+		stop_loop = 0;
 		while (1) {
+			u64 bytes;
+
 			l = path->nodes[0];
 			slot = path->slots[0];
 			if (slot >= btrfs_header_nritems(l)) {
@@ -2430,18 +2427,29 @@
 				if (ret < 0)
 					goto out;
 
+				stop_loop = 1;
 				break;
 			}
 			btrfs_item_key_to_cpu(l, &key, slot);
 
-			if (key.objectid + key.offset <= logical)
+			if (key.type == BTRFS_METADATA_ITEM_KEY)
+				bytes = root->leafsize;
+			else
+				bytes = key.offset;
+
+			if (key.objectid + bytes <= logical)
 				goto next;
 
-			if (key.objectid >= logical + map->stripe_len)
+			if (key.type != BTRFS_EXTENT_ITEM_KEY &&
+			    key.type != BTRFS_METADATA_ITEM_KEY)
+				goto next;
+
+			if (key.objectid >= logical + map->stripe_len) {
+				/* out of this device extent */
+				if (key.objectid >= logic_end)
+					stop_loop = 1;
 				break;
-
-			if (btrfs_key_type(&key) != BTRFS_EXTENT_ITEM_KEY)
-				goto next;
+			}
 
 			extent = btrfs_item_ptr(l, slot,
 						struct btrfs_extent_item);
@@ -2458,22 +2466,24 @@
 				goto next;
 			}
 
+again:
+			extent_logical = key.objectid;
+			extent_len = bytes;
+
 			/*
 			 * trim extent to this stripe
 			 */
-			if (key.objectid < logical) {
-				key.offset -= logical - key.objectid;
-				key.objectid = logical;
+			if (extent_logical < logical) {
+				extent_len -= logical - extent_logical;
+				extent_logical = logical;
 			}
-			if (key.objectid + key.offset >
+			if (extent_logical + extent_len >
 			    logical + map->stripe_len) {
-				key.offset = logical + map->stripe_len -
-					     key.objectid;
+				extent_len = logical + map->stripe_len -
+					     extent_logical;
 			}
 
-			extent_logical = key.objectid;
-			extent_physical = key.objectid - logical + physical;
-			extent_len = key.offset;
+			extent_physical = extent_logical - logical + physical;
 			extent_dev = scrub_dev;
 			extent_mirror_num = mirror_num;
 			if (is_dev_replace)
@@ -2481,13 +2491,35 @@
 						   extent_len, &extent_physical,
 						   &extent_dev,
 						   &extent_mirror_num);
-			ret = scrub_extent(sctx, extent_logical, extent_len,
-					   extent_physical, extent_dev, flags,
-					   generation, extent_mirror_num,
-					   key.objectid - logical + physical);
+
+			ret = btrfs_lookup_csums_range(csum_root, logical,
+						logical + map->stripe_len - 1,
+						&sctx->csum_list, 1);
 			if (ret)
 				goto out;
 
+			ret = scrub_extent(sctx, extent_logical, extent_len,
+					   extent_physical, extent_dev, flags,
+					   generation, extent_mirror_num,
+					   extent_physical);
+			if (ret)
+				goto out;
+
+			if (extent_logical + extent_len <
+			    key.objectid + bytes) {
+				logical += increment;
+				physical += map->stripe_len;
+
+				if (logical < key.objectid + bytes) {
+					cond_resched();
+					goto again;
+				}
+
+				if (logical >= logic_end) {
+					stop_loop = 1;
+					break;
+				}
+			}
 next:
 			path->slots[0]++;
 		}
@@ -2495,8 +2527,14 @@
 		logical += increment;
 		physical += map->stripe_len;
 		spin_lock(&sctx->stat_lock);
-		sctx->stat.last_physical = physical;
+		if (stop_loop)
+			sctx->stat.last_physical = map->stripes[num].physical +
+						   length;
+		else
+			sctx->stat.last_physical = physical;
 		spin_unlock(&sctx->stat_lock);
+		if (stop_loop)
+			break;
 	}
 out:
 	/* push queued extents */
@@ -3005,28 +3043,6 @@
 	return 0;
 }
 
-int btrfs_scrub_cancel_devid(struct btrfs_root *root, u64 devid)
-{
-	struct btrfs_fs_info *fs_info = root->fs_info;
-	struct btrfs_device *dev;
-	int ret;
-
-	/*
-	 * we have to hold the device_list_mutex here so the device
-	 * does not go away in cancel_dev. FIXME: find a better solution
-	 */
-	mutex_lock(&fs_info->fs_devices->device_list_mutex);
-	dev = btrfs_find_device(fs_info, devid, NULL, NULL);
-	if (!dev) {
-		mutex_unlock(&fs_info->fs_devices->device_list_mutex);
-		return -ENODEV;
-	}
-	ret = btrfs_scrub_cancel_dev(fs_info, dev);
-	mutex_unlock(&fs_info->fs_devices->device_list_mutex);
-
-	return ret;
-}
-
 int btrfs_scrub_progress(struct btrfs_root *root, u64 devid,
 			 struct btrfs_scrub_progress *progress)
 {
diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
index c85e7c6..ff40f1c 100644
--- a/fs/btrfs/send.c
+++ b/fs/btrfs/send.c
@@ -387,7 +387,7 @@
 	return path;
 }
 
-int write_buf(struct file *filp, const void *buf, u32 len, loff_t *off)
+static int write_buf(struct file *filp, const void *buf, u32 len, loff_t *off)
 {
 	int ret;
 	mm_segment_t old_fs;
@@ -3479,7 +3479,6 @@
 	struct send_ctx *sctx = ctx;
 	char *found_data = NULL;
 	int found_data_len  = 0;
-	struct fs_path *p = NULL;
 
 	ret = find_xattr(sctx, sctx->parent_root, sctx->right_path,
 			sctx->cmp_key, name, name_len, &found_data,
@@ -3498,7 +3497,6 @@
 	}
 
 	kfree(found_data);
-	fs_path_free(sctx, p);
 	return ret;
 }
 
@@ -4529,9 +4527,11 @@
 {
 	int ret;
 
-	ret = send_header(sctx);
-	if (ret < 0)
-		goto out;
+	if (!(sctx->flags & BTRFS_SEND_FLAG_OMIT_STREAM_HEADER)) {
+		ret = send_header(sctx);
+		if (ret < 0)
+			goto out;
+	}
 
 	ret = send_subvol_begin(sctx);
 	if (ret < 0)
@@ -4593,7 +4593,7 @@
 		goto out;
 	}
 
-	if (arg->flags & ~BTRFS_SEND_FLAG_NO_FILE_DATA) {
+	if (arg->flags & ~BTRFS_SEND_FLAG_MASK) {
 		ret = -EINVAL;
 		goto out;
 	}
@@ -4612,8 +4612,8 @@
 	sctx->flags = arg->flags;
 
 	sctx->send_filp = fget(arg->send_fd);
-	if (IS_ERR(sctx->send_filp)) {
-		ret = PTR_ERR(sctx->send_filp);
+	if (!sctx->send_filp) {
+		ret = -EBADF;
 		goto out;
 	}
 
@@ -4704,12 +4704,14 @@
 	if (ret < 0)
 		goto out;
 
-	ret = begin_cmd(sctx, BTRFS_SEND_C_END);
-	if (ret < 0)
-		goto out;
-	ret = send_cmd(sctx);
-	if (ret < 0)
-		goto out;
+	if (!(sctx->flags & BTRFS_SEND_FLAG_OMIT_END_CMD)) {
+		ret = begin_cmd(sctx, BTRFS_SEND_C_END);
+		if (ret < 0)
+			goto out;
+		ret = send_cmd(sctx);
+		if (ret < 0)
+			goto out;
+	}
 
 out:
 	kfree(arg);
diff --git a/fs/btrfs/send.h b/fs/btrfs/send.h
index 8bb18f7..48d425a 100644
--- a/fs/btrfs/send.h
+++ b/fs/btrfs/send.h
@@ -131,5 +131,4 @@
 
 #ifdef __KERNEL__
 long btrfs_ioctl_send(struct file *mnt_file, void __user *arg);
-int write_buf(struct file *filp, const void *buf, u32 len, loff_t *off);
 #endif
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index f6b8859..a4807ce 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -56,6 +56,7 @@
 #include "compression.h"
 #include "rcu-string.h"
 #include "dev-replace.h"
+#include "free-space-cache.h"
 
 #define CREATE_TRACE_POINTS
 #include <trace/events/btrfs.h>
@@ -63,9 +64,9 @@
 static const struct super_operations btrfs_super_ops;
 static struct file_system_type btrfs_fs_type;
 
-static const char *btrfs_decode_error(int errno, char nbuf[16])
+static const char *btrfs_decode_error(int errno)
 {
-	char *errstr = NULL;
+	char *errstr = "unknown";
 
 	switch (errno) {
 	case -EIO:
@@ -80,18 +81,18 @@
 	case -EEXIST:
 		errstr = "Object already exists";
 		break;
-	default:
-		if (nbuf) {
-			if (snprintf(nbuf, 16, "error %d", -errno) >= 0)
-				errstr = nbuf;
-		}
+	case -ENOSPC:
+		errstr = "No space left";
+		break;
+	case -ENOENT:
+		errstr = "No such entry";
 		break;
 	}
 
 	return errstr;
 }
 
-static void __save_error_info(struct btrfs_fs_info *fs_info)
+static void save_error_info(struct btrfs_fs_info *fs_info)
 {
 	/*
 	 * today we only save the error info into ram.  Long term we'll
@@ -100,11 +101,6 @@
 	set_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state);
 }
 
-static void save_error_info(struct btrfs_fs_info *fs_info)
-{
-	__save_error_info(fs_info);
-}
-
 /* btrfs handle error by forcing the filesystem readonly */
 static void btrfs_handle_error(struct btrfs_fs_info *fs_info)
 {
@@ -115,7 +111,7 @@
 
 	if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) {
 		sb->s_flags |= MS_RDONLY;
-		printk(KERN_INFO "btrfs is forced readonly\n");
+		btrfs_info(fs_info, "forced readonly");
 		/*
 		 * Note that a running device replace operation is not
 		 * canceled here although there is no way to update
@@ -126,7 +122,6 @@
 		 * mounted writeable again, the device replace
 		 * operation continues.
 		 */
-//		WARN_ON(1);
 	}
 }
 
@@ -139,7 +134,6 @@
 		       unsigned int line, int errno, const char *fmt, ...)
 {
 	struct super_block *sb = fs_info->sb;
-	char nbuf[16];
 	const char *errstr;
 
 	/*
@@ -149,7 +143,7 @@
 	if (errno == -EROFS && (sb->s_flags & MS_RDONLY))
   		return;
 
-  	errstr = btrfs_decode_error(errno, nbuf);
+	errstr = btrfs_decode_error(errno);
 	if (fmt) {
 		struct va_format vaf;
 		va_list args;
@@ -158,19 +152,18 @@
 		vaf.fmt = fmt;
 		vaf.va = &args;
 
-		printk(KERN_CRIT "BTRFS error (device %s) in %s:%d: %s (%pV)\n",
-			sb->s_id, function, line, errstr, &vaf);
+		printk(KERN_CRIT "BTRFS error (device %s) in %s:%d: errno=%d %s (%pV)\n",
+			sb->s_id, function, line, errno, errstr, &vaf);
 		va_end(args);
 	} else {
-		printk(KERN_CRIT "BTRFS error (device %s) in %s:%d: %s\n",
-			sb->s_id, function, line, errstr);
+		printk(KERN_CRIT "BTRFS error (device %s) in %s:%d: errno=%d %s\n",
+			sb->s_id, function, line, errno, errstr);
 	}
 
 	/* Don't go through full error handling during mount */
-	if (sb->s_flags & MS_BORN) {
-		save_error_info(fs_info);
+	save_error_info(fs_info);
+	if (sb->s_flags & MS_BORN)
 		btrfs_handle_error(fs_info);
-	}
 }
 
 static const char * const logtypes[] = {
@@ -184,7 +177,7 @@
 	"debug",
 };
 
-void btrfs_printk(struct btrfs_fs_info *fs_info, const char *fmt, ...)
+void btrfs_printk(const struct btrfs_fs_info *fs_info, const char *fmt, ...)
 {
 	struct super_block *sb = fs_info->sb;
 	char lvl[4];
@@ -208,7 +201,7 @@
 	vaf.fmt = fmt;
 	vaf.va = &args;
 
-	printk("%sBTRFS %s (device %s): %pV", lvl, type, sb->s_id, &vaf);
+	printk("%sBTRFS %s (device %s): %pV\n", lvl, type, sb->s_id, &vaf);
 
 	va_end(args);
 }
@@ -252,18 +245,24 @@
 			       struct btrfs_root *root, const char *function,
 			       unsigned int line, int errno)
 {
-	WARN_ONCE(1, KERN_DEBUG "btrfs: Transaction aborted\n");
+	/*
+	 * Report first abort since mount
+	 */
+	if (!test_and_set_bit(BTRFS_FS_STATE_TRANS_ABORTED,
+				&root->fs_info->fs_state)) {
+		WARN(1, KERN_DEBUG "btrfs: Transaction aborted (error %d)\n",
+				errno);
+	}
 	trans->aborted = errno;
 	/* Nothing used. The other threads that have joined this
 	 * transaction may be able to continue. */
 	if (!trans->blocks_used) {
-		char nbuf[16];
 		const char *errstr;
 
-		errstr = btrfs_decode_error(errno, nbuf);
-		btrfs_printk(root->fs_info,
-			     "%s:%d: Aborting unused transaction(%s).\n",
-			     function, line, errstr);
+		errstr = btrfs_decode_error(errno);
+		btrfs_warn(root->fs_info,
+		           "%s:%d: Aborting unused transaction(%s).",
+		           function, line, errstr);
 		return;
 	}
 	ACCESS_ONCE(trans->transaction->aborted) = errno;
@@ -276,7 +275,6 @@
 void __btrfs_panic(struct btrfs_fs_info *fs_info, const char *function,
 		   unsigned int line, int errno, const char *fmt, ...)
 {
-	char nbuf[16];
 	char *s_id = "<unknown>";
 	const char *errstr;
 	struct va_format vaf = { .fmt = fmt };
@@ -288,13 +286,13 @@
 	va_start(args, fmt);
 	vaf.va = &args;
 
-	errstr = btrfs_decode_error(errno, nbuf);
+	errstr = btrfs_decode_error(errno);
 	if (fs_info && (fs_info->mount_opt & BTRFS_MOUNT_PANIC_ON_FATAL_ERROR))
-		panic(KERN_CRIT "BTRFS panic (device %s) in %s:%d: %pV (%s)\n",
-			s_id, function, line, &vaf, errstr);
+		panic(KERN_CRIT "BTRFS panic (device %s) in %s:%d: %pV (errno=%d %s)\n",
+			s_id, function, line, &vaf, errno, errstr);
 
-	printk(KERN_CRIT "BTRFS panic (device %s) in %s:%d: %pV (%s)\n",
-	       s_id, function, line, &vaf, errstr);
+	printk(KERN_CRIT "BTRFS panic (device %s) in %s:%d: %pV (errno=%d %s)\n",
+	       s_id, function, line, &vaf, errno, errstr);
 	va_end(args);
 	/* Caller calls BUG() */
 }
@@ -650,7 +648,7 @@
  */
 static int btrfs_parse_early_options(const char *options, fmode_t flags,
 		void *holder, char **subvol_name, u64 *subvol_objectid,
-		u64 *subvol_rootid, struct btrfs_fs_devices **fs_devices)
+		struct btrfs_fs_devices **fs_devices)
 {
 	substring_t args[MAX_OPT_ARGS];
 	char *device_name, *opts, *orig, *p;
@@ -693,16 +691,8 @@
 			}
 			break;
 		case Opt_subvolrootid:
-			intarg = 0;
-			error = match_int(&args[0], &intarg);
-			if (!error) {
-				/* we want the original fs_tree */
-				if (!intarg)
-					*subvol_rootid =
-						BTRFS_FS_TREE_OBJECTID;
-				else
-					*subvol_rootid = intarg;
-			}
+			printk(KERN_WARNING
+				"btrfs: 'subvolrootid' mount option is deprecated and has no effect\n");
 			break;
 		case Opt_device:
 			device_name = match_strdup(&args[0]);
@@ -876,7 +866,7 @@
 		return 0;
 	}
 
-	btrfs_wait_ordered_extents(root, 0);
+	btrfs_wait_ordered_extents(root, 1);
 
 	trans = btrfs_attach_transaction_barrier(root);
 	if (IS_ERR(trans)) {
@@ -1080,7 +1070,6 @@
 	fmode_t mode = FMODE_READ;
 	char *subvol_name = NULL;
 	u64 subvol_objectid = 0;
-	u64 subvol_rootid = 0;
 	int error = 0;
 
 	if (!(flags & MS_RDONLY))
@@ -1088,7 +1077,7 @@
 
 	error = btrfs_parse_early_options(data, mode, fs_type,
 					  &subvol_name, &subvol_objectid,
-					  &subvol_rootid, &fs_devices);
+					  &fs_devices);
 	if (error) {
 		kfree(subvol_name);
 		return ERR_PTR(error);
@@ -1202,11 +1191,14 @@
 			      new_pool_size);
 }
 
-static inline void btrfs_remount_prepare(struct btrfs_fs_info *fs_info,
-					 unsigned long old_opts, int flags)
+static inline void btrfs_remount_prepare(struct btrfs_fs_info *fs_info)
 {
 	set_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state);
+}
 
+static inline void btrfs_remount_begin(struct btrfs_fs_info *fs_info,
+				       unsigned long old_opts, int flags)
+{
 	if (btrfs_raw_test_opt(old_opts, AUTO_DEFRAG) &&
 	    (!btrfs_raw_test_opt(fs_info->mount_opt, AUTO_DEFRAG) ||
 	     (flags & MS_RDONLY))) {
@@ -1247,7 +1239,7 @@
 	unsigned int old_metadata_ratio = fs_info->metadata_ratio;
 	int ret;
 
-	btrfs_remount_prepare(fs_info, old_opts, *flags);
+	btrfs_remount_prepare(fs_info);
 
 	ret = btrfs_parse_options(root, data);
 	if (ret) {
@@ -1255,6 +1247,7 @@
 		goto restore;
 	}
 
+	btrfs_remount_begin(fs_info, old_opts, *flags);
 	btrfs_resize_thread_pool(fs_info,
 		fs_info->thread_pool_size, old_thread_pool_size);
 
@@ -1739,6 +1732,10 @@
 
 	btrfs_init_lockdep();
 
+#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
+	btrfs_test_free_space_cache();
+#endif
+
 	printk(KERN_INFO "%s loaded\n", BTRFS_BUILD_VERSION);
 	return 0;
 
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index 50767bb..0544587 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -34,7 +34,7 @@
 
 #define BTRFS_ROOT_TRANS_TAG 0
 
-void put_transaction(struct btrfs_transaction *transaction)
+static void put_transaction(struct btrfs_transaction *transaction)
 {
 	WARN_ON(atomic_read(&transaction->use_count) == 0);
 	if (atomic_dec_and_test(&transaction->use_count)) {
@@ -162,7 +162,7 @@
 	if (!RB_EMPTY_ROOT(&fs_info->tree_mod_log))
 		WARN(1, KERN_ERR "btrfs: tree_mod_log rb tree not empty when "
 			"creating a fresh transaction\n");
-	atomic_set(&fs_info->tree_mod_seq, 0);
+	atomic64_set(&fs_info->tree_mod_seq, 0);
 
 	spin_lock_init(&cur_trans->commit_lock);
 	spin_lock_init(&cur_trans->delayed_refs.lock);
@@ -707,23 +707,13 @@
 int btrfs_end_transaction(struct btrfs_trans_handle *trans,
 			  struct btrfs_root *root)
 {
-	int ret;
-
-	ret = __btrfs_end_transaction(trans, root, 0);
-	if (ret)
-		return ret;
-	return 0;
+	return __btrfs_end_transaction(trans, root, 0);
 }
 
 int btrfs_end_transaction_throttle(struct btrfs_trans_handle *trans,
 				   struct btrfs_root *root)
 {
-	int ret;
-
-	ret = __btrfs_end_transaction(trans, root, 1);
-	if (ret)
-		return ret;
-	return 0;
+	return __btrfs_end_transaction(trans, root, 1);
 }
 
 int btrfs_end_transaction_dmeta(struct btrfs_trans_handle *trans,
@@ -948,7 +938,7 @@
 int btrfs_add_dead_root(struct btrfs_root *root)
 {
 	spin_lock(&root->fs_info->trans_lock);
-	list_add(&root->root_list, &root->fs_info->dead_roots);
+	list_add_tail(&root->root_list, &root->fs_info->dead_roots);
 	spin_unlock(&root->fs_info->trans_lock);
 	return 0;
 }
@@ -1179,13 +1169,17 @@
 	memcpy(new_root_item->uuid, new_uuid.b, BTRFS_UUID_SIZE);
 	memcpy(new_root_item->parent_uuid, root->root_item.uuid,
 			BTRFS_UUID_SIZE);
+	if (!(root_flags & BTRFS_ROOT_SUBVOL_RDONLY)) {
+		memset(new_root_item->received_uuid, 0,
+		       sizeof(new_root_item->received_uuid));
+		memset(&new_root_item->stime, 0, sizeof(new_root_item->stime));
+		memset(&new_root_item->rtime, 0, sizeof(new_root_item->rtime));
+		btrfs_set_root_stransid(new_root_item, 0);
+		btrfs_set_root_rtransid(new_root_item, 0);
+	}
 	new_root_item->otime.sec = cpu_to_le64(cur_time.tv_sec);
 	new_root_item->otime.nsec = cpu_to_le32(cur_time.tv_nsec);
 	btrfs_set_root_otransid(new_root_item, trans->transid);
-	memset(&new_root_item->stime, 0, sizeof(new_root_item->stime));
-	memset(&new_root_item->rtime, 0, sizeof(new_root_item->rtime));
-	btrfs_set_root_stransid(new_root_item, 0);
-	btrfs_set_root_rtransid(new_root_item, 0);
 
 	old = btrfs_lock_root_node(root);
 	ret = btrfs_cow_block(trans, root, old, NULL, 0, &old);
@@ -1487,6 +1481,10 @@
 		current->journal_info = NULL;
 
 	kmem_cache_free(btrfs_trans_handle_cachep, trans);
+
+	spin_lock(&root->fs_info->trans_lock);
+	root->fs_info->trans_no_join = 0;
+	spin_unlock(&root->fs_info->trans_lock);
 }
 
 static int btrfs_flush_all_pending_stuffs(struct btrfs_trans_handle *trans,
@@ -1808,7 +1806,7 @@
 	ret = btrfs_write_and_wait_transaction(trans, root);
 	if (ret) {
 		btrfs_error(root->fs_info, ret,
-			    "Error while writing out transaction.");
+			    "Error while writing out transaction");
 		mutex_unlock(&root->fs_info->tree_log_mutex);
 		goto cleanup_transaction;
 	}
@@ -1864,8 +1862,7 @@
 		btrfs_qgroup_free(root, trans->qgroup_reserved);
 		trans->qgroup_reserved = 0;
 	}
-	btrfs_printk(root->fs_info, "Skipping commit of aborted transaction.\n");
-//	WARN_ON(1);
+	btrfs_warn(root->fs_info, "Skipping commit of aborted transaction.");
 	if (current->journal_info == trans)
 		current->journal_info = NULL;
 	cleanup_transaction(trans, root, ret);
@@ -1874,31 +1871,49 @@
 }
 
 /*
- * interface function to delete all the snapshots we have scheduled for deletion
+ * return < 0 if error
+ * 0 if there are no more dead_roots at the time of call
+ * 1 there are more to be processed, call me again
+ *
+ * The return value indicates there are certainly more snapshots to delete, but
+ * if there comes a new one during processing, it may return 0. We don't mind,
+ * because btrfs_commit_super will poke cleaner thread and it will process it a
+ * few seconds later.
  */
-int btrfs_clean_old_snapshots(struct btrfs_root *root)
+int btrfs_clean_one_deleted_snapshot(struct btrfs_root *root)
 {
-	LIST_HEAD(list);
+	int ret;
 	struct btrfs_fs_info *fs_info = root->fs_info;
 
+	if (fs_info->sb->s_flags & MS_RDONLY) {
+		pr_debug("btrfs: cleaner called for RO fs!\n");
+		return 0;
+	}
+
 	spin_lock(&fs_info->trans_lock);
-	list_splice_init(&fs_info->dead_roots, &list);
+	if (list_empty(&fs_info->dead_roots)) {
+		spin_unlock(&fs_info->trans_lock);
+		return 0;
+	}
+	root = list_first_entry(&fs_info->dead_roots,
+			struct btrfs_root, root_list);
+	list_del(&root->root_list);
 	spin_unlock(&fs_info->trans_lock);
 
-	while (!list_empty(&list)) {
-		int ret;
+	pr_debug("btrfs: cleaner removing %llu\n",
+			(unsigned long long)root->objectid);
 
-		root = list_entry(list.next, struct btrfs_root, root_list);
-		list_del(&root->root_list);
+	btrfs_kill_all_delayed_nodes(root);
 
-		btrfs_kill_all_delayed_nodes(root);
-
-		if (btrfs_header_backref_rev(root->node) <
-		    BTRFS_MIXED_BACKREF_REV)
-			ret = btrfs_drop_snapshot(root, NULL, 0, 0);
-		else
-			ret =btrfs_drop_snapshot(root, NULL, 1, 0);
-		BUG_ON(ret < 0);
-	}
-	return 0;
+	if (btrfs_header_backref_rev(root->node) <
+			BTRFS_MIXED_BACKREF_REV)
+		ret = btrfs_drop_snapshot(root, NULL, 0, 0);
+	else
+		ret = btrfs_drop_snapshot(root, NULL, 1, 0);
+	/*
+	 * If we encounter a transaction abort during snapshot cleaning, we
+	 * don't want to crash here
+	 */
+	BUG_ON(ret < 0 && ret != -EAGAIN && ret != -EROFS);
+	return 1;
 }
diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h
index 3c8e0d2..24c9733 100644
--- a/fs/btrfs/transaction.h
+++ b/fs/btrfs/transaction.h
@@ -123,7 +123,7 @@
 
 int btrfs_add_dead_root(struct btrfs_root *root);
 int btrfs_defrag_root(struct btrfs_root *root);
-int btrfs_clean_old_snapshots(struct btrfs_root *root);
+int btrfs_clean_one_deleted_snapshot(struct btrfs_root *root);
 int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
 			     struct btrfs_root *root);
 int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans,
@@ -146,5 +146,4 @@
 				struct extent_io_tree *dirty_pages, int mark);
 int btrfs_transaction_blocked(struct btrfs_fs_info *info);
 int btrfs_transaction_in_commit(struct btrfs_fs_info *info);
-void put_transaction(struct btrfs_transaction *transaction);
 #endif
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index ef96381..c276ac9 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -277,17 +277,19 @@
 			      struct extent_buffer *eb,
 			      struct walk_control *wc, u64 gen)
 {
-	if (wc->pin)
-		btrfs_pin_extent_for_log_replay(log->fs_info->extent_root,
-						eb->start, eb->len);
+	int ret = 0;
 
-	if (btrfs_buffer_uptodate(eb, gen, 0)) {
+	if (wc->pin)
+		ret = btrfs_pin_extent_for_log_replay(log->fs_info->extent_root,
+						      eb->start, eb->len);
+
+	if (!ret && btrfs_buffer_uptodate(eb, gen, 0)) {
 		if (wc->write)
 			btrfs_write_tree_block(eb);
 		if (wc->wait)
 			btrfs_wait_tree_block_writeback(eb);
 	}
-	return 0;
+	return ret;
 }
 
 /*
@@ -408,9 +410,9 @@
 		found_size = btrfs_item_size_nr(path->nodes[0],
 						path->slots[0]);
 		if (found_size > item_size)
-			btrfs_truncate_item(trans, root, path, item_size, 1);
+			btrfs_truncate_item(root, path, item_size, 1);
 		else if (found_size < item_size)
-			btrfs_extend_item(trans, root, path,
+			btrfs_extend_item(root, path,
 					  item_size - found_size);
 	} else if (ret) {
 		return ret;
@@ -587,7 +589,8 @@
 
 	/* drop any overlapping extents */
 	ret = btrfs_drop_extents(trans, root, inode, start, extent_end, 1);
-	BUG_ON(ret);
+	if (ret)
+		goto out;
 
 	if (found_type == BTRFS_FILE_EXTENT_REG ||
 	    found_type == BTRFS_FILE_EXTENT_PREALLOC) {
@@ -597,7 +600,8 @@
 
 		ret = btrfs_insert_empty_item(trans, root, path, key,
 					      sizeof(*item));
-		BUG_ON(ret);
+		if (ret)
+			goto out;
 		dest_offset = btrfs_item_ptr_offset(path->nodes[0],
 						    path->slots[0]);
 		copy_extent_buffer(path->nodes[0], eb, dest_offset,
@@ -623,7 +627,8 @@
 						ins.objectid, ins.offset,
 						0, root->root_key.objectid,
 						key->objectid, offset, 0);
-				BUG_ON(ret);
+				if (ret)
+					goto out;
 			} else {
 				/*
 				 * insert the extent pointer in the extent
@@ -632,7 +637,8 @@
 				ret = btrfs_alloc_logged_file_extent(trans,
 						root, root->root_key.objectid,
 						key->objectid, offset, &ins);
-				BUG_ON(ret);
+				if (ret)
+					goto out;
 			}
 			btrfs_release_path(path);
 
@@ -649,26 +655,30 @@
 			ret = btrfs_lookup_csums_range(root->log_root,
 						csum_start, csum_end - 1,
 						&ordered_sums, 0);
-			BUG_ON(ret);
+			if (ret)
+				goto out;
 			while (!list_empty(&ordered_sums)) {
 				struct btrfs_ordered_sum *sums;
 				sums = list_entry(ordered_sums.next,
 						struct btrfs_ordered_sum,
 						list);
-				ret = btrfs_csum_file_blocks(trans,
+				if (!ret)
+					ret = btrfs_csum_file_blocks(trans,
 						root->fs_info->csum_root,
 						sums);
-				BUG_ON(ret);
 				list_del(&sums->list);
 				kfree(sums);
 			}
+			if (ret)
+				goto out;
 		} else {
 			btrfs_release_path(path);
 		}
 	} else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
 		/* inline extents are easy, we just overwrite them */
 		ret = overwrite_item(trans, root, path, eb, slot, key);
-		BUG_ON(ret);
+		if (ret)
+			goto out;
 	}
 
 	inode_add_bytes(inode, nbytes);
@@ -713,20 +723,21 @@
 
 	inode = read_one_inode(root, location.objectid);
 	if (!inode) {
-		kfree(name);
-		return -EIO;
+		ret = -EIO;
+		goto out;
 	}
 
 	ret = link_to_fixup_dir(trans, root, path, location.objectid);
-	BUG_ON(ret);
+	if (ret)
+		goto out;
 
 	ret = btrfs_unlink_inode(trans, root, dir, inode, name, name_len);
-	BUG_ON(ret);
-	kfree(name);
-
-	iput(inode);
-
+	if (ret)
+		goto out;
 	btrfs_run_delayed_items(trans, root);
+out:
+	kfree(name);
+	iput(inode);
 	return ret;
 }
 
@@ -879,7 +890,8 @@
 			victim_name_len = btrfs_inode_ref_name_len(leaf,
 								   victim_ref);
 			victim_name = kmalloc(victim_name_len, GFP_NOFS);
-			BUG_ON(!victim_name);
+			if (!victim_name)
+				return -ENOMEM;
 
 			read_extent_buffer(leaf, victim_name,
 					   (unsigned long)(victim_ref + 1),
@@ -895,9 +907,10 @@
 				ret = btrfs_unlink_inode(trans, root, dir,
 							 inode, victim_name,
 							 victim_name_len);
-				BUG_ON(ret);
-				btrfs_run_delayed_items(trans, root);
 				kfree(victim_name);
+				if (ret)
+					return ret;
+				btrfs_run_delayed_items(trans, root);
 				*search_done = 1;
 				goto again;
 			}
@@ -905,7 +918,6 @@
 
 			ptr = (unsigned long)(victim_ref + 1) + victim_name_len;
 		}
-		BUG_ON(ret);
 
 		/*
 		 * NOTE: we have searched root tree and checked the
@@ -939,6 +951,8 @@
 				goto next;
 
 			victim_name = kmalloc(victim_name_len, GFP_NOFS);
+			if (!victim_name)
+				return -ENOMEM;
 			read_extent_buffer(leaf, victim_name, (unsigned long)&extref->name,
 					   victim_name_len);
 
@@ -965,14 +979,16 @@
 								 victim_name_len);
 					btrfs_run_delayed_items(trans, root);
 				}
-				BUG_ON(ret);
 				iput(victim_parent);
 				kfree(victim_name);
+				if (ret)
+					return ret;
 				*search_done = 1;
 				goto again;
 			}
 			kfree(victim_name);
-			BUG_ON(ret);
+			if (ret)
+				return ret;
 next:
 			cur_offset += victim_name_len + sizeof(*extref);
 		}
@@ -985,7 +1001,8 @@
 					 ref_index, name, namelen, 0);
 	if (di && !IS_ERR(di)) {
 		ret = drop_one_dir_item(trans, root, path, dir, di);
-		BUG_ON(ret);
+		if (ret)
+			return ret;
 	}
 	btrfs_release_path(path);
 
@@ -994,7 +1011,8 @@
 				   name, namelen, 0);
 	if (di && !IS_ERR(di)) {
 		ret = drop_one_dir_item(trans, root, path, dir, di);
-		BUG_ON(ret);
+		if (ret)
+			return ret;
 	}
 	btrfs_release_path(path);
 
@@ -1139,15 +1157,19 @@
 						      parent_objectid,
 						      ref_index, name, namelen,
 						      &search_done);
-				if (ret == 1)
+				if (ret == 1) {
+					ret = 0;
 					goto out;
-				BUG_ON(ret);
+				}
+				if (ret)
+					goto out;
 			}
 
 			/* insert our name */
 			ret = btrfs_add_link(trans, dir, inode, name, namelen,
 					     0, ref_index);
-			BUG_ON(ret);
+			if (ret)
+				goto out;
 
 			btrfs_update_inode(trans, root, inode);
 		}
@@ -1162,13 +1184,11 @@
 
 	/* finally write the back reference in the inode */
 	ret = overwrite_item(trans, root, path, eb, slot, key);
-	BUG_ON(ret);
-
 out:
 	btrfs_release_path(path);
 	iput(dir);
 	iput(inode);
-	return 0;
+	return ret;
 }
 
 static int insert_orphan_item(struct btrfs_trans_handle *trans,
@@ -1326,10 +1346,10 @@
 		if (S_ISDIR(inode->i_mode)) {
 			ret = replay_dir_deletes(trans, root, NULL, path,
 						 ino, 1);
-			BUG_ON(ret);
+			if (ret)
+				goto out;
 		}
 		ret = insert_orphan_item(trans, root, ino);
-		BUG_ON(ret);
 	}
 
 out:
@@ -1374,9 +1394,9 @@
 			return -EIO;
 
 		ret = fixup_inode_link_count(trans, root, inode);
-		BUG_ON(ret);
-
 		iput(inode);
+		if (ret)
+			goto out;
 
 		/*
 		 * fixup on a directory may create new entries,
@@ -1426,7 +1446,7 @@
 	} else if (ret == -EEXIST) {
 		ret = 0;
 	} else {
-		BUG();
+		BUG(); /* Logic Error */
 	}
 	iput(inode);
 
@@ -1495,7 +1515,7 @@
 	struct inode *dir;
 	u8 log_type;
 	int exists;
-	int ret;
+	int ret = 0;
 
 	dir = read_one_inode(root, key->objectid);
 	if (!dir)
@@ -1527,7 +1547,9 @@
 						     key->offset, name,
 						     name_len, 1);
 	} else {
-		BUG();
+		/* Corruption */
+		ret = -EINVAL;
+		goto out;
 	}
 	if (IS_ERR_OR_NULL(dst_di)) {
 		/* we need a sequence number to insert, so we only
@@ -1555,7 +1577,8 @@
 		goto out;
 
 	ret = drop_one_dir_item(trans, root, path, dir, dst_di);
-	BUG_ON(ret);
+	if (ret)
+		goto out;
 
 	if (key->type == BTRFS_DIR_INDEX_KEY)
 		goto insert;
@@ -1563,14 +1586,15 @@
 	btrfs_release_path(path);
 	kfree(name);
 	iput(dir);
-	return 0;
+	return ret;
 
 insert:
 	btrfs_release_path(path);
 	ret = insert_one_name(trans, root, path, key->objectid, key->offset,
 			      name, name_len, log_type, &log_key);
-
-	BUG_ON(ret && ret != -ENOENT);
+	if (ret && ret != -ENOENT)
+		goto out;
+	ret = 0;
 	goto out;
 }
 
@@ -1601,7 +1625,8 @@
 			return -EIO;
 		name_len = btrfs_dir_name_len(eb, di);
 		ret = replay_one_name(trans, root, path, eb, di, key);
-		BUG_ON(ret);
+		if (ret)
+			return ret;
 		ptr = (unsigned long)(di + 1);
 		ptr += name_len;
 	}
@@ -1762,16 +1787,21 @@
 
 			ret = link_to_fixup_dir(trans, root,
 						path, location.objectid);
-			BUG_ON(ret);
+			if (ret) {
+				kfree(name);
+				iput(inode);
+				goto out;
+			}
+
 			btrfs_inc_nlink(inode);
 			ret = btrfs_unlink_inode(trans, root, dir, inode,
 						 name, name_len);
-			BUG_ON(ret);
-
-			btrfs_run_delayed_items(trans, root);
-
+			if (!ret)
+				btrfs_run_delayed_items(trans, root);
 			kfree(name);
 			iput(inode);
+			if (ret)
+				goto out;
 
 			/* there might still be more names under this key
 			 * check and repeat if required
@@ -1875,7 +1905,8 @@
 			ret = check_item_in_log(trans, root, log, path,
 						log_path, dir,
 						&found_key);
-			BUG_ON(ret);
+			if (ret)
+				goto out;
 			if (found_key.offset == (u64)-1)
 				break;
 			dir_key.offset = found_key.offset + 1;
@@ -1952,11 +1983,13 @@
 			if (S_ISDIR(mode)) {
 				ret = replay_dir_deletes(wc->trans,
 					 root, log, path, key.objectid, 0);
-				BUG_ON(ret);
+				if (ret)
+					break;
 			}
 			ret = overwrite_item(wc->trans, root, path,
 					     eb, i, &key);
-			BUG_ON(ret);
+			if (ret)
+				break;
 
 			/* for regular files, make sure corresponding
 			 * orhpan item exist. extents past the new EOF
@@ -1965,12 +1998,14 @@
 			if (S_ISREG(mode)) {
 				ret = insert_orphan_item(wc->trans, root,
 							 key.objectid);
-				BUG_ON(ret);
+				if (ret)
+					break;
 			}
 
 			ret = link_to_fixup_dir(wc->trans, root,
 						path, key.objectid);
-			BUG_ON(ret);
+			if (ret)
+				break;
 		}
 		if (wc->stage < LOG_WALK_REPLAY_ALL)
 			continue;
@@ -1979,28 +2014,35 @@
 		if (key.type == BTRFS_XATTR_ITEM_KEY) {
 			ret = overwrite_item(wc->trans, root, path,
 					     eb, i, &key);
-			BUG_ON(ret);
+			if (ret)
+				break;
 		} else if (key.type == BTRFS_INODE_REF_KEY) {
 			ret = add_inode_ref(wc->trans, root, log, path,
 					    eb, i, &key);
-			BUG_ON(ret && ret != -ENOENT);
+			if (ret && ret != -ENOENT)
+				break;
+			ret = 0;
 		} else if (key.type == BTRFS_INODE_EXTREF_KEY) {
 			ret = add_inode_ref(wc->trans, root, log, path,
 					    eb, i, &key);
-			BUG_ON(ret && ret != -ENOENT);
+			if (ret && ret != -ENOENT)
+				break;
+			ret = 0;
 		} else if (key.type == BTRFS_EXTENT_DATA_KEY) {
 			ret = replay_one_extent(wc->trans, root, path,
 						eb, i, &key);
-			BUG_ON(ret);
+			if (ret)
+				break;
 		} else if (key.type == BTRFS_DIR_ITEM_KEY ||
 			   key.type == BTRFS_DIR_INDEX_KEY) {
 			ret = replay_one_dir_item(wc->trans, root, path,
 						  eb, i, &key);
-			BUG_ON(ret);
+			if (ret)
+				break;
 		}
 	}
 	btrfs_free_path(path);
-	return 0;
+	return ret;
 }
 
 static noinline int walk_down_log_tree(struct btrfs_trans_handle *trans,
@@ -2045,8 +2087,10 @@
 
 		if (*level == 1) {
 			ret = wc->process_func(root, next, wc, ptr_gen);
-			if (ret)
+			if (ret) {
+				free_extent_buffer(next);
 				return ret;
+			}
 
 			path->slots[*level]++;
 			if (wc->free) {
@@ -2066,7 +2110,10 @@
 					BTRFS_TREE_LOG_OBJECTID);
 				ret = btrfs_free_and_pin_reserved_extent(root,
 							 bytenr, blocksize);
-				BUG_ON(ret); /* -ENOMEM or logic errors */
+				if (ret) {
+					free_extent_buffer(next);
+					return ret;
+				}
 			}
 			free_extent_buffer(next);
 			continue;
@@ -2139,7 +2186,8 @@
 				ret = btrfs_free_and_pin_reserved_extent(root,
 						path->nodes[*level]->start,
 						path->nodes[*level]->len);
-				BUG_ON(ret);
+				if (ret)
+					return ret;
 			}
 			free_extent_buffer(path->nodes[*level]);
 			path->nodes[*level] = NULL;
@@ -2161,7 +2209,6 @@
 	int wret;
 	int level;
 	struct btrfs_path *path;
-	int i;
 	int orig_level;
 
 	path = btrfs_alloc_path();
@@ -2213,17 +2260,12 @@
 				BTRFS_TREE_LOG_OBJECTID);
 			ret = btrfs_free_and_pin_reserved_extent(log, next->start,
 							 next->len);
-			BUG_ON(ret); /* -ENOMEM or logic errors */
+			if (ret)
+				goto out;
 		}
 	}
 
 out:
-	for (i = 0; i <= orig_level; i++) {
-		if (path->nodes[i]) {
-			free_extent_buffer(path->nodes[i]);
-			path->nodes[i] = NULL;
-		}
-	}
 	btrfs_free_path(path);
 	return ret;
 }
@@ -2507,7 +2549,10 @@
 
 	if (trans) {
 		ret = walk_log_tree(trans, log, &wc);
-		BUG_ON(ret);
+
+		/* I don't think this can happen but just in case */
+		if (ret)
+			btrfs_abort_transaction(trans, log, ret);
 	}
 
 	while (1) {
@@ -2615,7 +2660,10 @@
 	if (di) {
 		ret = btrfs_delete_one_dir_name(trans, log, path, di);
 		bytes_del += name_len;
-		BUG_ON(ret);
+		if (ret) {
+			err = ret;
+			goto fail;
+		}
 	}
 	btrfs_release_path(path);
 	di = btrfs_lookup_dir_index_item(trans, log, path, dir_ino,
@@ -2627,7 +2675,10 @@
 	if (di) {
 		ret = btrfs_delete_one_dir_name(trans, log, path, di);
 		bytes_del += name_len;
-		BUG_ON(ret);
+		if (ret) {
+			err = ret;
+			goto fail;
+		}
 	}
 
 	/* update the directory size in the log to reflect the names
@@ -2966,7 +3017,7 @@
 
 	while (1) {
 		ret = btrfs_search_slot(trans, log, &key, path, -1, 1);
-		BUG_ON(ret == 0);
+		BUG_ON(ret == 0); /* Logic error */
 		if (ret < 0)
 			break;
 
@@ -3169,7 +3220,11 @@
 						log->fs_info->csum_root,
 						ds + cs, ds + cs + cl - 1,
 						&ordered_sums, 0);
-				BUG_ON(ret);
+				if (ret) {
+					btrfs_release_path(dst_path);
+					kfree(ins_data);
+					return ret;
+				}
 			}
 		}
 	}
@@ -3209,115 +3264,6 @@
 	return 0;
 }
 
-static int drop_adjacent_extents(struct btrfs_trans_handle *trans,
-				 struct btrfs_root *root, struct inode *inode,
-				 struct extent_map *em,
-				 struct btrfs_path *path)
-{
-	struct btrfs_file_extent_item *fi;
-	struct extent_buffer *leaf;
-	struct btrfs_key key, new_key;
-	struct btrfs_map_token token;
-	u64 extent_end;
-	u64 extent_offset = 0;
-	int extent_type;
-	int del_slot = 0;
-	int del_nr = 0;
-	int ret = 0;
-
-	while (1) {
-		btrfs_init_map_token(&token);
-		leaf = path->nodes[0];
-		path->slots[0]++;
-		if (path->slots[0] >= btrfs_header_nritems(leaf)) {
-			if (del_nr) {
-				ret = btrfs_del_items(trans, root, path,
-						      del_slot, del_nr);
-				if (ret)
-					return ret;
-				del_nr = 0;
-			}
-
-			ret = btrfs_next_leaf_write(trans, root, path, 1);
-			if (ret < 0)
-				return ret;
-			if (ret > 0)
-				return 0;
-			leaf = path->nodes[0];
-		}
-
-		btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
-		if (key.objectid != btrfs_ino(inode) ||
-		    key.type != BTRFS_EXTENT_DATA_KEY ||
-		    key.offset >= em->start + em->len)
-			break;
-
-		fi = btrfs_item_ptr(leaf, path->slots[0],
-				    struct btrfs_file_extent_item);
-		extent_type = btrfs_token_file_extent_type(leaf, fi, &token);
-		if (extent_type == BTRFS_FILE_EXTENT_REG ||
-		    extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
-			extent_offset = btrfs_token_file_extent_offset(leaf,
-								fi, &token);
-			extent_end = key.offset +
-				btrfs_token_file_extent_num_bytes(leaf, fi,
-								  &token);
-		} else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
-			extent_end = key.offset +
-				btrfs_file_extent_inline_len(leaf, fi);
-		} else {
-			BUG();
-		}
-
-		if (extent_end <= em->len + em->start) {
-			if (!del_nr) {
-				del_slot = path->slots[0];
-			}
-			del_nr++;
-			continue;
-		}
-
-		/*
-		 * Ok so we'll ignore previous items if we log a new extent,
-		 * which can lead to overlapping extents, so if we have an
-		 * existing extent we want to adjust we _have_ to check the next
-		 * guy to make sure we even need this extent anymore, this keeps
-		 * us from panicing in set_item_key_safe.
-		 */
-		if (path->slots[0] < btrfs_header_nritems(leaf) - 1) {
-			struct btrfs_key tmp_key;
-
-			btrfs_item_key_to_cpu(leaf, &tmp_key,
-					      path->slots[0] + 1);
-			if (tmp_key.objectid == btrfs_ino(inode) &&
-			    tmp_key.type == BTRFS_EXTENT_DATA_KEY &&
-			    tmp_key.offset <= em->start + em->len) {
-				if (!del_nr)
-					del_slot = path->slots[0];
-				del_nr++;
-				continue;
-			}
-		}
-
-		BUG_ON(extent_type == BTRFS_FILE_EXTENT_INLINE);
-		memcpy(&new_key, &key, sizeof(new_key));
-		new_key.offset = em->start + em->len;
-		btrfs_set_item_key_safe(trans, root, path, &new_key);
-		extent_offset += em->start + em->len - key.offset;
-		btrfs_set_token_file_extent_offset(leaf, fi, extent_offset,
-						   &token);
-		btrfs_set_token_file_extent_num_bytes(leaf, fi, extent_end -
-						      (em->start + em->len),
-						      &token);
-		btrfs_mark_buffer_dirty(leaf);
-	}
-
-	if (del_nr)
-		ret = btrfs_del_items(trans, root, path, del_slot, del_nr);
-
-	return ret;
-}
-
 static int log_one_extent(struct btrfs_trans_handle *trans,
 			  struct inode *inode, struct btrfs_root *root,
 			  struct extent_map *em, struct btrfs_path *path)
@@ -3339,39 +3285,24 @@
 	int index = log->log_transid % 2;
 	bool skip_csum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
 
-insert:
+	ret = __btrfs_drop_extents(trans, log, inode, path, em->start,
+				   em->start + em->len, NULL, 0);
+	if (ret)
+		return ret;
+
 	INIT_LIST_HEAD(&ordered_sums);
 	btrfs_init_map_token(&token);
 	key.objectid = btrfs_ino(inode);
 	key.type = BTRFS_EXTENT_DATA_KEY;
 	key.offset = em->start;
-	path->really_keep_locks = 1;
 
 	ret = btrfs_insert_empty_item(trans, log, path, &key, sizeof(*fi));
-	if (ret && ret != -EEXIST) {
-		path->really_keep_locks = 0;
+	if (ret)
 		return ret;
-	}
 	leaf = path->nodes[0];
 	fi = btrfs_item_ptr(leaf, path->slots[0],
 			    struct btrfs_file_extent_item);
 
-	/*
-	 * If we are overwriting an inline extent with a real one then we need
-	 * to just delete the inline extent as it may not be large enough to
-	 * have the entire file_extent_item.
-	 */
-	if (ret && btrfs_token_file_extent_type(leaf, fi, &token) ==
-	    BTRFS_FILE_EXTENT_INLINE) {
-		ret = btrfs_del_item(trans, log, path);
-		btrfs_release_path(path);
-		if (ret) {
-			path->really_keep_locks = 0;
-			return ret;
-		}
-		goto insert;
-	}
-
 	btrfs_set_token_file_extent_generation(leaf, fi, em->generation,
 					       &token);
 	if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) {
@@ -3410,22 +3341,14 @@
 					   em->start - em->orig_start,
 					   &token);
 	btrfs_set_token_file_extent_num_bytes(leaf, fi, em->len, &token);
-	btrfs_set_token_file_extent_ram_bytes(leaf, fi, em->len, &token);
+	btrfs_set_token_file_extent_ram_bytes(leaf, fi, em->ram_bytes, &token);
 	btrfs_set_token_file_extent_compression(leaf, fi, em->compress_type,
 						&token);
 	btrfs_set_token_file_extent_encryption(leaf, fi, 0, &token);
 	btrfs_set_token_file_extent_other_encoding(leaf, fi, 0, &token);
 	btrfs_mark_buffer_dirty(leaf);
 
-	/*
-	 * Have to check the extent to the right of us to make sure it doesn't
-	 * fall in our current range.  We're ok if the previous extent is in our
-	 * range since the recovery stuff will run us in key order and thus just
-	 * drop the part we overwrote.
-	 */
-	ret = drop_adjacent_extents(trans, log, inode, em, path);
 	btrfs_release_path(path);
-	path->really_keep_locks = 0;
 	if (ret) {
 		return ret;
 	}
@@ -3650,8 +3573,6 @@
 	bool fast_search = false;
 	u64 ino = btrfs_ino(inode);
 
-	log = root->log_root;
-
 	path = btrfs_alloc_path();
 	if (!path)
 		return -ENOMEM;
@@ -3918,9 +3839,9 @@
  * only logging is done of any parent directories that are older than
  * the last committed transaction
  */
-int btrfs_log_inode_parent(struct btrfs_trans_handle *trans,
-		    struct btrfs_root *root, struct inode *inode,
-		    struct dentry *parent, int exists_only)
+static int btrfs_log_inode_parent(struct btrfs_trans_handle *trans,
+			    	  struct btrfs_root *root, struct inode *inode,
+			    	  struct dentry *parent, int exists_only)
 {
 	int inode_only = exists_only ? LOG_INODE_EXISTS : LOG_INODE_ALL;
 	struct super_block *sb;
@@ -4111,6 +4032,9 @@
 		wc.replay_dest = btrfs_read_fs_root_no_name(fs_info, &tmp_key);
 		if (IS_ERR(wc.replay_dest)) {
 			ret = PTR_ERR(wc.replay_dest);
+			free_extent_buffer(log->node);
+			free_extent_buffer(log->commit_root);
+			kfree(log);
 			btrfs_error(fs_info, ret, "Couldn't read target root "
 				    "for tree log recovery.");
 			goto error;
@@ -4119,12 +4043,10 @@
 		wc.replay_dest->log_root = log;
 		btrfs_record_root_in_trans(trans, wc.replay_dest);
 		ret = walk_log_tree(trans, log, &wc);
-		BUG_ON(ret);
 
-		if (wc.stage == LOG_WALK_REPLAY_ALL) {
+		if (!ret && wc.stage == LOG_WALK_REPLAY_ALL) {
 			ret = fixup_inode_link_counts(trans, wc.replay_dest,
 						      path);
-			BUG_ON(ret);
 		}
 
 		key.offset = found_key.offset - 1;
@@ -4133,6 +4055,9 @@
 		free_extent_buffer(log->commit_root);
 		kfree(log);
 
+		if (ret)
+			goto error;
+
 		if (found_key.offset == 0)
 			break;
 	}
@@ -4153,17 +4078,20 @@
 
 	btrfs_free_path(path);
 
+	/* step 4: commit the transaction, which also unpins the blocks */
+	ret = btrfs_commit_transaction(trans, fs_info->tree_root);
+	if (ret)
+		return ret;
+
 	free_extent_buffer(log_root_tree->node);
 	log_root_tree->log_root = NULL;
 	fs_info->log_root_recovering = 0;
-
-	/* step 4: commit the transaction, which also unpins the blocks */
-	btrfs_commit_transaction(trans, fs_info->tree_root);
-
 	kfree(log_root_tree);
-	return 0;
 
+	return 0;
 error:
+	if (wc.trans)
+		btrfs_end_transaction(wc.trans, fs_info->tree_root);
 	btrfs_free_path(path);
 	return ret;
 }
diff --git a/fs/btrfs/tree-log.h b/fs/btrfs/tree-log.h
index 862ac81..1d4ae0d 100644
--- a/fs/btrfs/tree-log.h
+++ b/fs/btrfs/tree-log.h
@@ -40,9 +40,6 @@
 			       struct inode *inode, u64 dirid);
 void btrfs_end_log_trans(struct btrfs_root *root);
 int btrfs_pin_log_trans(struct btrfs_root *root);
-int btrfs_log_inode_parent(struct btrfs_trans_handle *trans,
-		    struct btrfs_root *root, struct inode *inode,
-		    struct dentry *parent, int exists_only);
 void btrfs_record_unlink_dir(struct btrfs_trans_handle *trans,
 			     struct inode *dir, struct inode *inode,
 			     int for_rename);
diff --git a/fs/btrfs/ulist.c b/fs/btrfs/ulist.c
index ddc61ca..7b417e2 100644
--- a/fs/btrfs/ulist.c
+++ b/fs/btrfs/ulist.c
@@ -53,6 +53,7 @@
 	ulist->nnodes = 0;
 	ulist->nodes = ulist->int_nodes;
 	ulist->nodes_alloced = ULIST_SIZE;
+	ulist->root = RB_ROOT;
 }
 EXPORT_SYMBOL(ulist_init);
 
@@ -72,6 +73,7 @@
 	if (ulist->nodes_alloced > ULIST_SIZE)
 		kfree(ulist->nodes);
 	ulist->nodes_alloced = 0;	/* in case ulist_fini is called twice */
+	ulist->root = RB_ROOT;
 }
 EXPORT_SYMBOL(ulist_fini);
 
@@ -123,6 +125,45 @@
 }
 EXPORT_SYMBOL(ulist_free);
 
+static struct ulist_node *ulist_rbtree_search(struct ulist *ulist, u64 val)
+{
+	struct rb_node *n = ulist->root.rb_node;
+	struct ulist_node *u = NULL;
+
+	while (n) {
+		u = rb_entry(n, struct ulist_node, rb_node);
+		if (u->val < val)
+			n = n->rb_right;
+		else if (u->val > val)
+			n = n->rb_left;
+		else
+			return u;
+	}
+	return NULL;
+}
+
+static int ulist_rbtree_insert(struct ulist *ulist, struct ulist_node *ins)
+{
+	struct rb_node **p = &ulist->root.rb_node;
+	struct rb_node *parent = NULL;
+	struct ulist_node *cur = NULL;
+
+	while (*p) {
+		parent = *p;
+		cur = rb_entry(parent, struct ulist_node, rb_node);
+
+		if (cur->val < ins->val)
+			p = &(*p)->rb_right;
+		else if (cur->val > ins->val)
+			p = &(*p)->rb_left;
+		else
+			return -EEXIST;
+	}
+	rb_link_node(&ins->rb_node, parent, p);
+	rb_insert_color(&ins->rb_node, &ulist->root);
+	return 0;
+}
+
 /**
  * ulist_add - add an element to the ulist
  * @ulist:	ulist to add the element to
@@ -151,14 +192,13 @@
 int ulist_add_merge(struct ulist *ulist, u64 val, u64 aux,
 		    u64 *old_aux, gfp_t gfp_mask)
 {
-	int i;
-
-	for (i = 0; i < ulist->nnodes; ++i) {
-		if (ulist->nodes[i].val == val) {
-			if (old_aux)
-				*old_aux = ulist->nodes[i].aux;
-			return 0;
-		}
+	int ret = 0;
+	struct ulist_node *node = NULL;
+	node = ulist_rbtree_search(ulist, val);
+	if (node) {
+		if (old_aux)
+			*old_aux = node->aux;
+		return 0;
 	}
 
 	if (ulist->nnodes >= ulist->nodes_alloced) {
@@ -187,6 +227,8 @@
 	}
 	ulist->nodes[ulist->nnodes].val = val;
 	ulist->nodes[ulist->nnodes].aux = aux;
+	ret = ulist_rbtree_insert(ulist, &ulist->nodes[ulist->nnodes]);
+	BUG_ON(ret);
 	++ulist->nnodes;
 
 	return 1;
diff --git a/fs/btrfs/ulist.h b/fs/btrfs/ulist.h
index 21a1963..fb36731 100644
--- a/fs/btrfs/ulist.h
+++ b/fs/btrfs/ulist.h
@@ -8,6 +8,9 @@
 #ifndef __ULIST__
 #define __ULIST__
 
+#include <linux/list.h>
+#include <linux/rbtree.h>
+
 /*
  * ulist is a generic data structure to hold a collection of unique u64
  * values. The only operations it supports is adding to the list and
@@ -34,6 +37,7 @@
 struct ulist_node {
 	u64 val;		/* value to store */
 	u64 aux;		/* auxiliary value saved along with the val */
+	struct rb_node rb_node;	/* used to speed up search */
 };
 
 struct ulist {
@@ -54,6 +58,8 @@
 	 */
 	struct ulist_node *nodes;
 
+	struct rb_root root;
+
 	/*
 	 * inline storage space for the first ULIST_SIZE entries
 	 */
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index 2854c82..0e925ce 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -46,6 +46,7 @@
 				struct btrfs_device *device);
 static int btrfs_relocate_sys_chunks(struct btrfs_root *root);
 static void __btrfs_reset_dev_stats(struct btrfs_device *dev);
+static void btrfs_dev_stat_print_on_error(struct btrfs_device *dev);
 static void btrfs_dev_stat_print_on_load(struct btrfs_device *device);
 
 static DEFINE_MUTEX(uuid_mutex);
@@ -717,9 +718,9 @@
 		if (!device->name)
 			continue;
 
-		ret = btrfs_get_bdev_and_sb(device->name->str, flags, holder, 1,
-					    &bdev, &bh);
-		if (ret)
+		/* Just open everything we can; ignore failures here */
+		if (btrfs_get_bdev_and_sb(device->name->str, flags, holder, 1,
+					    &bdev, &bh))
 			continue;
 
 		disk_super = (struct btrfs_super_block *)bh->b_data;
@@ -1199,10 +1200,10 @@
 	return ret;
 }
 
-int btrfs_alloc_dev_extent(struct btrfs_trans_handle *trans,
-			   struct btrfs_device *device,
-			   u64 chunk_tree, u64 chunk_objectid,
-			   u64 chunk_offset, u64 start, u64 num_bytes)
+static int btrfs_alloc_dev_extent(struct btrfs_trans_handle *trans,
+				  struct btrfs_device *device,
+				  u64 chunk_tree, u64 chunk_objectid,
+				  u64 chunk_offset, u64 start, u64 num_bytes)
 {
 	int ret;
 	struct btrfs_path *path;
@@ -1329,9 +1330,9 @@
  * the device information is stored in the chunk root
  * the btrfs_device struct should be fully filled in
  */
-int btrfs_add_device(struct btrfs_trans_handle *trans,
-		     struct btrfs_root *root,
-		     struct btrfs_device *device)
+static int btrfs_add_device(struct btrfs_trans_handle *trans,
+			    struct btrfs_root *root,
+			    struct btrfs_device *device)
 {
 	int ret;
 	struct btrfs_path *path;
@@ -1710,8 +1711,8 @@
 	mutex_unlock(&fs_info->fs_devices->device_list_mutex);
 }
 
-int btrfs_find_device_by_path(struct btrfs_root *root, char *device_path,
-			      struct btrfs_device **device)
+static int btrfs_find_device_by_path(struct btrfs_root *root, char *device_path,
+				     struct btrfs_device **device)
 {
 	int ret = 0;
 	struct btrfs_super_block *disk_super;
@@ -3607,7 +3608,7 @@
 	return 0;
 }
 
-struct btrfs_raid_attr btrfs_raid_array[BTRFS_NR_RAID_TYPES] = {
+static struct btrfs_raid_attr btrfs_raid_array[BTRFS_NR_RAID_TYPES] = {
 	[BTRFS_RAID_RAID10] = {
 		.sub_stripes	= 2,
 		.dev_stripes	= 1,
@@ -3674,18 +3675,10 @@
 
 static void check_raid56_incompat_flag(struct btrfs_fs_info *info, u64 type)
 {
-	u64 features;
-
 	if (!(type & (BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6)))
 		return;
 
-	features = btrfs_super_incompat_flags(info->super_copy);
-	if (features & BTRFS_FEATURE_INCOMPAT_RAID56)
-		return;
-
-	features |= BTRFS_FEATURE_INCOMPAT_RAID56;
-	btrfs_set_super_incompat_flags(info->super_copy, features);
-	printk(KERN_INFO "btrfs: setting RAID5/6 feature flag\n");
+	btrfs_set_fs_incompat(info, RAID56);
 }
 
 static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
@@ -3932,7 +3925,7 @@
 
 	em_tree = &extent_root->fs_info->mapping_tree.map_tree;
 	write_lock(&em_tree->lock);
-	ret = add_extent_mapping(em_tree, em);
+	ret = add_extent_mapping(em_tree, em, 0);
 	write_unlock(&em_tree->lock);
 	if (ret) {
 		free_extent_map(em);
@@ -4240,9 +4233,25 @@
 	read_lock(&em_tree->lock);
 	em = lookup_extent_mapping(em_tree, logical, len);
 	read_unlock(&em_tree->lock);
-	BUG_ON(!em);
 
-	BUG_ON(em->start > logical || em->start + em->len < logical);
+	/*
+	 * We could return errors for these cases, but that could get ugly and
+	 * we'd probably do the same thing which is just not do anything else
+	 * and exit, so return 1 so the callers don't try to use other copies.
+	 */
+	if (!em) {
+		btrfs_emerg(fs_info, "No mapping for %Lu-%Lu\n", logical,
+			    logical+len);
+		return 1;
+	}
+
+	if (em->start > logical || em->start + em->len < logical) {
+		btrfs_emerg(fs_info, "Invalid mapping for %Lu-%Lu, got "
+			    "%Lu-%Lu\n", logical, logical+len, em->start,
+			    em->start + em->len);
+		return 1;
+	}
+
 	map = (struct map_lookup *)em->bdev;
 	if (map->type & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1))
 		ret = map->num_stripes;
@@ -4411,13 +4420,19 @@
 	read_unlock(&em_tree->lock);
 
 	if (!em) {
-		printk(KERN_CRIT "btrfs: unable to find logical %llu len %llu\n",
-		       (unsigned long long)logical,
-		       (unsigned long long)*length);
-		BUG();
+		btrfs_crit(fs_info, "unable to find logical %llu len %llu",
+			(unsigned long long)logical,
+			(unsigned long long)*length);
+		return -EINVAL;
 	}
 
-	BUG_ON(em->start > logical || em->start + em->len < logical);
+	if (em->start > logical || em->start + em->len < logical) {
+		btrfs_crit(fs_info, "found a bad mapping, wanted %Lu, "
+			   "found %Lu-%Lu\n", logical, em->start,
+			   em->start + em->len);
+		return -EINVAL;
+	}
+
 	map = (struct map_lookup *)em->bdev;
 	offset = logical - em->start;
 
@@ -5106,9 +5121,9 @@
  * This will add one bio to the pending list for a device and make sure
  * the work struct is scheduled.
  */
-noinline void btrfs_schedule_bio(struct btrfs_root *root,
-				 struct btrfs_device *device,
-				 int rw, struct bio *bio)
+static noinline void btrfs_schedule_bio(struct btrfs_root *root,
+					struct btrfs_device *device,
+					int rw, struct bio *bio)
 {
 	int should_queue = 1;
 	struct btrfs_pending_bios *pending_bios;
@@ -5177,7 +5192,7 @@
 	}
 
 	prev = &bio->bi_io_vec[bio->bi_vcnt - 1];
-	if ((bio->bi_size >> 9) > max_sectors)
+	if (bio_sectors(bio) > max_sectors)
 		return 0;
 
 	if (!q->merge_bvec_fn)
@@ -5308,10 +5323,10 @@
 	}
 
 	if (map_length < length) {
-		printk(KERN_CRIT "btrfs: mapping failed logical %llu bio len %llu "
-		       "len %llu\n", (unsigned long long)logical,
-		       (unsigned long long)length,
-		       (unsigned long long)map_length);
+		btrfs_crit(root->fs_info, "mapping failed logical %llu bio len %llu len %llu",
+			(unsigned long long)logical,
+			(unsigned long long)length,
+			(unsigned long long)map_length);
 		BUG();
 	}
 
@@ -5476,7 +5491,7 @@
 	}
 
 	write_lock(&map_tree->map_tree.lock);
-	ret = add_extent_mapping(&map_tree->map_tree, em);
+	ret = add_extent_mapping(&map_tree->map_tree, em, 0);
 	write_unlock(&map_tree->map_tree.lock);
 	BUG_ON(ret); /* Tree corruption */
 	free_extent_map(em);
@@ -5583,8 +5598,8 @@
 			return -EIO;
 
 		if (!device) {
-			printk(KERN_WARNING "warning devid %llu missing\n",
-			       (unsigned long long)devid);
+			btrfs_warn(root->fs_info, "devid %llu missing",
+				(unsigned long long)devid);
 			device = add_missing_dev(root, devid, dev_uuid);
 			if (!device)
 				return -ENOMEM;
@@ -5926,7 +5941,7 @@
 	btrfs_dev_stat_print_on_error(dev);
 }
 
-void btrfs_dev_stat_print_on_error(struct btrfs_device *dev)
+static void btrfs_dev_stat_print_on_error(struct btrfs_device *dev)
 {
 	if (!dev->dev_stats_valid)
 		return;
diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h
index 062d860..845ccbb 100644
--- a/fs/btrfs/volumes.h
+++ b/fs/btrfs/volumes.h
@@ -254,10 +254,6 @@
 #define btrfs_bio_size(n) (sizeof(struct btrfs_bio) + \
 			    (sizeof(struct btrfs_bio_stripe) * (n)))
 
-int btrfs_alloc_dev_extent(struct btrfs_trans_handle *trans,
-			   struct btrfs_device *device,
-			   u64 chunk_tree, u64 chunk_objectid,
-			   u64 chunk_offset, u64 start, u64 num_bytes);
 int btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
 		    u64 logical, u64 *length,
 		    struct btrfs_bio **bbio_ret, int mirror_num);
@@ -282,11 +278,6 @@
 int btrfs_find_device_missing_or_by_path(struct btrfs_root *root,
 					 char *device_path,
 					 struct btrfs_device **device);
-int btrfs_find_device_by_path(struct btrfs_root *root, char *device_path,
-			      struct btrfs_device **device);
-int btrfs_add_device(struct btrfs_trans_handle *trans,
-		     struct btrfs_root *root,
-		     struct btrfs_device *device);
 int btrfs_rm_device(struct btrfs_root *root, char *device_path);
 void btrfs_cleanup_fs_uuids(void);
 int btrfs_num_copies(struct btrfs_fs_info *fs_info, u64 logical, u64 len);
@@ -307,7 +298,6 @@
 int btrfs_chunk_readonly(struct btrfs_root *root, u64 chunk_offset);
 int find_free_dev_extent(struct btrfs_device *device, u64 num_bytes,
 			 u64 *start, u64 *max_avail);
-void btrfs_dev_stat_print_on_error(struct btrfs_device *device);
 void btrfs_dev_stat_inc_and_print(struct btrfs_device *dev, int index);
 int btrfs_get_dev_stats(struct btrfs_root *root,
 			struct btrfs_ioctl_get_dev_stats *stats);
@@ -321,9 +311,6 @@
 void btrfs_init_dev_replace_tgtdev_for_resume(struct btrfs_fs_info *fs_info,
 					      struct btrfs_device *tgtdev);
 int btrfs_scratch_superblock(struct btrfs_device *device);
-void btrfs_schedule_bio(struct btrfs_root *root,
-			struct btrfs_device *device,
-			int rw, struct bio *bio);
 int btrfs_is_parity_mirror(struct btrfs_mapping_tree *map_tree,
 			   u64 logical, u64 len, int mirror_num);
 unsigned long btrfs_full_stripe_len(struct btrfs_root *root,
diff --git a/fs/btrfs/xattr.c b/fs/btrfs/xattr.c
index 446a684..05740b9 100644
--- a/fs/btrfs/xattr.c
+++ b/fs/btrfs/xattr.c
@@ -406,8 +406,8 @@
 				XATTR_REPLACE);
 }
 
-int btrfs_initxattrs(struct inode *inode, const struct xattr *xattr_array,
-		     void *fs_info)
+static int btrfs_initxattrs(struct inode *inode,
+			    const struct xattr *xattr_array, void *fs_info)
 {
 	const struct xattr *xattr;
 	struct btrfs_trans_handle *trans = fs_info;
diff --git a/fs/buffer.c b/fs/buffer.c
index bc1fe14..d2a4d1b 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -2977,7 +2977,6 @@
 	bio->bi_io_vec[0].bv_offset = bh_offset(bh);
 
 	bio->bi_vcnt = 1;
-	bio->bi_idx = 0;
 	bio->bi_size = bh->b_size;
 
 	bio->bi_end_io = end_bio_bh_io_sync;
diff --git a/fs/ceph/file.c b/fs/ceph/file.c
index d70830c..656e169 100644
--- a/fs/ceph/file.c
+++ b/fs/ceph/file.c
@@ -7,6 +7,7 @@
 #include <linux/mount.h>
 #include <linux/namei.h>
 #include <linux/writeback.h>
+#include <linux/aio.h>
 
 #include "super.h"
 #include "mds_client.h"
diff --git a/fs/compat.c b/fs/compat.c
index 93f7d02..fc3b55d 100644
--- a/fs/compat.c
+++ b/fs/compat.c
@@ -47,6 +47,7 @@
 #include <linux/fs_struct.h>
 #include <linux/slab.h>
 #include <linux/pagemap.h>
+#include <linux/aio.h>
 
 #include <asm/uaccess.h>
 #include <asm/mmu_context.h>
diff --git a/fs/direct-io.c b/fs/direct-io.c
index cfb816d..7ab90f5 100644
--- a/fs/direct-io.c
+++ b/fs/direct-io.c
@@ -37,6 +37,7 @@
 #include <linux/uio.h>
 #include <linux/atomic.h>
 #include <linux/prefetch.h>
+#include <linux/aio.h>
 
 /*
  * How many user pages to map in one call to get_user_pages().  This determines
@@ -441,8 +442,8 @@
 static int dio_bio_complete(struct dio *dio, struct bio *bio)
 {
 	const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
-	struct bio_vec *bvec = bio->bi_io_vec;
-	int page_no;
+	struct bio_vec *bvec;
+	unsigned i;
 
 	if (!uptodate)
 		dio->io_error = -EIO;
@@ -450,8 +451,8 @@
 	if (dio->is_async && dio->rw == READ) {
 		bio_check_pages_dirty(bio);	/* transfers ownership */
 	} else {
-		for (page_no = 0; page_no < bio->bi_vcnt; page_no++) {
-			struct page *page = bvec[page_no].bv_page;
+		bio_for_each_segment_all(bvec, bio, i) {
+			struct page *page = bvec->bv_page;
 
 			if (dio->rw == READ && !PageCompound(page))
 				set_page_dirty_lock(page);
diff --git a/fs/ecryptfs/crypto.c b/fs/ecryptfs/crypto.c
index d5c25db..f71ec12 100644
--- a/fs/ecryptfs/crypto.c
+++ b/fs/ecryptfs/crypto.c
@@ -243,7 +243,7 @@
 	struct ecryptfs_key_sig *key_sig, *key_sig_tmp;
 
 	if (crypt_stat->tfm)
-		crypto_free_blkcipher(crypt_stat->tfm);
+		crypto_free_ablkcipher(crypt_stat->tfm);
 	if (crypt_stat->hash_tfm)
 		crypto_free_hash(crypt_stat->hash_tfm);
 	list_for_each_entry_safe(key_sig, key_sig_tmp,
@@ -319,6 +319,22 @@
 	return i;
 }
 
+struct extent_crypt_result {
+	struct completion completion;
+	int rc;
+};
+
+static void extent_crypt_complete(struct crypto_async_request *req, int rc)
+{
+	struct extent_crypt_result *ecr = req->data;
+
+	if (rc == -EINPROGRESS)
+		return;
+
+	ecr->rc = rc;
+	complete(&ecr->completion);
+}
+
 /**
  * encrypt_scatterlist
  * @crypt_stat: Pointer to the crypt_stat struct to initialize.
@@ -334,11 +350,8 @@
 			       struct scatterlist *src_sg, int size,
 			       unsigned char *iv)
 {
-	struct blkcipher_desc desc = {
-		.tfm = crypt_stat->tfm,
-		.info = iv,
-		.flags = CRYPTO_TFM_REQ_MAY_SLEEP
-	};
+	struct ablkcipher_request *req = NULL;
+	struct extent_crypt_result ecr;
 	int rc = 0;
 
 	BUG_ON(!crypt_stat || !crypt_stat->tfm
@@ -349,24 +362,47 @@
 		ecryptfs_dump_hex(crypt_stat->key,
 				  crypt_stat->key_size);
 	}
-	/* Consider doing this once, when the file is opened */
+
+	init_completion(&ecr.completion);
+
 	mutex_lock(&crypt_stat->cs_tfm_mutex);
-	if (!(crypt_stat->flags & ECRYPTFS_KEY_SET)) {
-		rc = crypto_blkcipher_setkey(crypt_stat->tfm, crypt_stat->key,
-					     crypt_stat->key_size);
-		crypt_stat->flags |= ECRYPTFS_KEY_SET;
-	}
-	if (rc) {
-		ecryptfs_printk(KERN_ERR, "Error setting key; rc = [%d]\n",
-				rc);
+	req = ablkcipher_request_alloc(crypt_stat->tfm, GFP_NOFS);
+	if (!req) {
 		mutex_unlock(&crypt_stat->cs_tfm_mutex);
-		rc = -EINVAL;
+		rc = -ENOMEM;
 		goto out;
 	}
-	ecryptfs_printk(KERN_DEBUG, "Encrypting [%d] bytes.\n", size);
-	crypto_blkcipher_encrypt_iv(&desc, dest_sg, src_sg, size);
+
+	ablkcipher_request_set_callback(req,
+			CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
+			extent_crypt_complete, &ecr);
+	/* Consider doing this once, when the file is opened */
+	if (!(crypt_stat->flags & ECRYPTFS_KEY_SET)) {
+		rc = crypto_ablkcipher_setkey(crypt_stat->tfm, crypt_stat->key,
+					      crypt_stat->key_size);
+		if (rc) {
+			ecryptfs_printk(KERN_ERR,
+					"Error setting key; rc = [%d]\n",
+					rc);
+			mutex_unlock(&crypt_stat->cs_tfm_mutex);
+			rc = -EINVAL;
+			goto out;
+		}
+		crypt_stat->flags |= ECRYPTFS_KEY_SET;
+	}
 	mutex_unlock(&crypt_stat->cs_tfm_mutex);
+	ecryptfs_printk(KERN_DEBUG, "Encrypting [%d] bytes.\n", size);
+	ablkcipher_request_set_crypt(req, src_sg, dest_sg, size, iv);
+	rc = crypto_ablkcipher_encrypt(req);
+	if (rc == -EINPROGRESS || rc == -EBUSY) {
+		struct extent_crypt_result *ecr = req->base.data;
+
+		wait_for_completion(&ecr->completion);
+		rc = ecr->rc;
+		INIT_COMPLETION(ecr->completion);
+	}
 out:
+	ablkcipher_request_free(req);
 	return rc;
 }
 
@@ -624,35 +660,61 @@
 			       struct scatterlist *src_sg, int size,
 			       unsigned char *iv)
 {
-	struct blkcipher_desc desc = {
-		.tfm = crypt_stat->tfm,
-		.info = iv,
-		.flags = CRYPTO_TFM_REQ_MAY_SLEEP
-	};
+	struct ablkcipher_request *req = NULL;
+	struct extent_crypt_result ecr;
 	int rc = 0;
 
-	/* Consider doing this once, when the file is opened */
+	BUG_ON(!crypt_stat || !crypt_stat->tfm
+	       || !(crypt_stat->flags & ECRYPTFS_STRUCT_INITIALIZED));
+	if (unlikely(ecryptfs_verbosity > 0)) {
+		ecryptfs_printk(KERN_DEBUG, "Key size [%zd]; key:\n",
+				crypt_stat->key_size);
+		ecryptfs_dump_hex(crypt_stat->key,
+				  crypt_stat->key_size);
+	}
+
+	init_completion(&ecr.completion);
+
 	mutex_lock(&crypt_stat->cs_tfm_mutex);
-	rc = crypto_blkcipher_setkey(crypt_stat->tfm, crypt_stat->key,
-				     crypt_stat->key_size);
-	if (rc) {
-		ecryptfs_printk(KERN_ERR, "Error setting key; rc = [%d]\n",
-				rc);
+	req = ablkcipher_request_alloc(crypt_stat->tfm, GFP_NOFS);
+	if (!req) {
 		mutex_unlock(&crypt_stat->cs_tfm_mutex);
-		rc = -EINVAL;
+		rc = -ENOMEM;
 		goto out;
 	}
-	ecryptfs_printk(KERN_DEBUG, "Decrypting [%d] bytes.\n", size);
-	rc = crypto_blkcipher_decrypt_iv(&desc, dest_sg, src_sg, size);
+
+	ablkcipher_request_set_callback(req,
+			CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
+			extent_crypt_complete, &ecr);
+	/* Consider doing this once, when the file is opened */
+	if (!(crypt_stat->flags & ECRYPTFS_KEY_SET)) {
+		rc = crypto_ablkcipher_setkey(crypt_stat->tfm, crypt_stat->key,
+					      crypt_stat->key_size);
+		if (rc) {
+			ecryptfs_printk(KERN_ERR,
+					"Error setting key; rc = [%d]\n",
+					rc);
+			mutex_unlock(&crypt_stat->cs_tfm_mutex);
+			rc = -EINVAL;
+			goto out;
+		}
+		crypt_stat->flags |= ECRYPTFS_KEY_SET;
+	}
 	mutex_unlock(&crypt_stat->cs_tfm_mutex);
-	if (rc) {
-		ecryptfs_printk(KERN_ERR, "Error decrypting; rc = [%d]\n",
-				rc);
-		goto out;
+	ecryptfs_printk(KERN_DEBUG, "Decrypting [%d] bytes.\n", size);
+	ablkcipher_request_set_crypt(req, src_sg, dest_sg, size, iv);
+	rc = crypto_ablkcipher_decrypt(req);
+	if (rc == -EINPROGRESS || rc == -EBUSY) {
+		struct extent_crypt_result *ecr = req->base.data;
+
+		wait_for_completion(&ecr->completion);
+		rc = ecr->rc;
+		INIT_COMPLETION(ecr->completion);
 	}
-	rc = size;
 out:
+	ablkcipher_request_free(req);
 	return rc;
+
 }
 
 /**
@@ -746,8 +808,7 @@
 						    crypt_stat->cipher, "cbc");
 	if (rc)
 		goto out_unlock;
-	crypt_stat->tfm = crypto_alloc_blkcipher(full_alg_name, 0,
-						 CRYPTO_ALG_ASYNC);
+	crypt_stat->tfm = crypto_alloc_ablkcipher(full_alg_name, 0, 0);
 	kfree(full_alg_name);
 	if (IS_ERR(crypt_stat->tfm)) {
 		rc = PTR_ERR(crypt_stat->tfm);
@@ -757,7 +818,7 @@
 				crypt_stat->cipher);
 		goto out_unlock;
 	}
-	crypto_blkcipher_set_flags(crypt_stat->tfm, CRYPTO_TFM_REQ_WEAK_KEY);
+	crypto_ablkcipher_set_flags(crypt_stat->tfm, CRYPTO_TFM_REQ_WEAK_KEY);
 	rc = 0;
 out_unlock:
 	mutex_unlock(&crypt_stat->cs_tfm_mutex);
diff --git a/fs/ecryptfs/ecryptfs_kernel.h b/fs/ecryptfs/ecryptfs_kernel.h
index dd299b3..f622a73 100644
--- a/fs/ecryptfs/ecryptfs_kernel.h
+++ b/fs/ecryptfs/ecryptfs_kernel.h
@@ -38,6 +38,7 @@
 #include <linux/nsproxy.h>
 #include <linux/backing-dev.h>
 #include <linux/ecryptfs.h>
+#include <linux/crypto.h>
 
 #define ECRYPTFS_DEFAULT_IV_BYTES 16
 #define ECRYPTFS_DEFAULT_EXTENT_SIZE 4096
@@ -233,7 +234,7 @@
 	size_t extent_shift;
 	unsigned int extent_mask;
 	struct ecryptfs_mount_crypt_stat *mount_crypt_stat;
-	struct crypto_blkcipher *tfm;
+	struct crypto_ablkcipher *tfm;
 	struct crypto_hash *hash_tfm; /* Crypto context for generating
 				       * the initialization vectors */
 	unsigned char cipher[ECRYPTFS_MAX_CIPHER_NAME_SIZE];
diff --git a/fs/ecryptfs/file.c b/fs/ecryptfs/file.c
index 63b1f54..201f0a0 100644
--- a/fs/ecryptfs/file.c
+++ b/fs/ecryptfs/file.c
@@ -31,6 +31,7 @@
 #include <linux/security.h>
 #include <linux/compat.h>
 #include <linux/fs_stack.h>
+#include <linux/aio.h>
 #include "ecryptfs_kernel.h"
 
 /**
diff --git a/fs/ecryptfs/read_write.c b/fs/ecryptfs/read_write.c
index 6a16053..09fe622 100644
--- a/fs/ecryptfs/read_write.c
+++ b/fs/ecryptfs/read_write.c
@@ -232,17 +232,10 @@
 			struct inode *ecryptfs_inode)
 {
 	struct file *lower_file;
-	mm_segment_t fs_save;
-	ssize_t rc;
-
 	lower_file = ecryptfs_inode_to_private(ecryptfs_inode)->lower_file;
 	if (!lower_file)
 		return -EIO;
-	fs_save = get_fs();
-	set_fs(get_ds());
-	rc = vfs_read(lower_file, data, size, &offset);
-	set_fs(fs_save);
-	return rc;
+	return kernel_read(lower_file, offset, data, size);
 }
 
 /**
diff --git a/fs/exofs/ore.c b/fs/exofs/ore.c
index f936cb5..b744228 100644
--- a/fs/exofs/ore.c
+++ b/fs/exofs/ore.c
@@ -401,7 +401,7 @@
 	struct bio_vec *bv;
 	unsigned i;
 
-	__bio_for_each_segment(bv, bio, i, 0) {
+	bio_for_each_segment_all(bv, bio, i) {
 		unsigned this_count = bv->bv_len;
 
 		if (likely(PAGE_SIZE == this_count))
diff --git a/fs/exofs/ore_raid.c b/fs/exofs/ore_raid.c
index b963f38..7682b97 100644
--- a/fs/exofs/ore_raid.c
+++ b/fs/exofs/ore_raid.c
@@ -432,7 +432,7 @@
 		if (!bio)
 			continue;
 
-		__bio_for_each_segment(bv, bio, i, 0) {
+		bio_for_each_segment_all(bv, bio, i) {
 			struct page *page = bv->bv_page;
 
 			SetPageUptodate(page);
diff --git a/fs/ext2/inode.c b/fs/ext2/inode.c
index fe60cc1..0a87bb1 100644
--- a/fs/ext2/inode.c
+++ b/fs/ext2/inode.c
@@ -31,6 +31,7 @@
 #include <linux/mpage.h>
 #include <linux/fiemap.h>
 #include <linux/namei.h>
+#include <linux/aio.h>
 #include "ext2.h"
 #include "acl.h"
 #include "xip.h"
diff --git a/fs/ext3/inode.c b/fs/ext3/inode.c
index d706dbf..23c7128 100644
--- a/fs/ext3/inode.c
+++ b/fs/ext3/inode.c
@@ -27,6 +27,7 @@
 #include <linux/writeback.h>
 #include <linux/mpage.h>
 #include <linux/namei.h>
+#include <linux/aio.h>
 #include "ext3.h"
 #include "xattr.h"
 #include "acl.h"
diff --git a/fs/ext3/super.c b/fs/ext3/super.c
index 3dc48cc..6356665 100644
--- a/fs/ext3/super.c
+++ b/fs/ext3/super.c
@@ -362,22 +362,19 @@
 /*
  * Release the journal device
  */
-static int ext3_blkdev_put(struct block_device *bdev)
+static void ext3_blkdev_put(struct block_device *bdev)
 {
-	return blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
+	blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
 }
 
-static int ext3_blkdev_remove(struct ext3_sb_info *sbi)
+static void ext3_blkdev_remove(struct ext3_sb_info *sbi)
 {
 	struct block_device *bdev;
-	int ret = -ENODEV;
-
 	bdev = sbi->journal_bdev;
 	if (bdev) {
-		ret = ext3_blkdev_put(bdev);
+		ext3_blkdev_put(bdev);
 		sbi->journal_bdev = NULL;
 	}
-	return ret;
 }
 
 static inline struct inode *orphan_list_entry(struct list_head *l)
diff --git a/fs/ext4/file.c b/fs/ext4/file.c
index 64848b5..4959e29 100644
--- a/fs/ext4/file.c
+++ b/fs/ext4/file.c
@@ -23,6 +23,7 @@
 #include <linux/jbd2.h>
 #include <linux/mount.h>
 #include <linux/path.h>
+#include <linux/aio.h>
 #include <linux/quotaops.h>
 #include <linux/pagevec.h>
 #include "ext4.h"
diff --git a/fs/ext4/indirect.c b/fs/ext4/indirect.c
index 98be6f6..b8d5d35 100644
--- a/fs/ext4/indirect.c
+++ b/fs/ext4/indirect.c
@@ -20,6 +20,7 @@
  *	(sct@redhat.com), 1993, 1998
  */
 
+#include <linux/aio.h>
 #include "ext4_jbd2.h"
 #include "truncate.h"
 #include "ext4_extents.h"	/* Needed for EXT_MAX_BLOCKS */
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 793d44b..0723774 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -37,6 +37,7 @@
 #include <linux/printk.h>
 #include <linux/slab.h>
 #include <linux/ratelimit.h>
+#include <linux/aio.h>
 
 #include "ext4_jbd2.h"
 #include "xattr.h"
diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c
index 5929cd0..19599bd 100644
--- a/fs/ext4/page-io.c
+++ b/fs/ext4/page-io.c
@@ -18,6 +18,7 @@
 #include <linux/pagevec.h>
 #include <linux/mpage.h>
 #include <linux/namei.h>
+#include <linux/aio.h>
 #include <linux/uio.h>
 #include <linux/bio.h>
 #include <linux/workqueue.h>
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 24a146b..94cc84d 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -703,22 +703,19 @@
 /*
  * Release the journal device
  */
-static int ext4_blkdev_put(struct block_device *bdev)
+static void ext4_blkdev_put(struct block_device *bdev)
 {
-	return blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
+	blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
 }
 
-static int ext4_blkdev_remove(struct ext4_sb_info *sbi)
+static void ext4_blkdev_remove(struct ext4_sb_info *sbi)
 {
 	struct block_device *bdev;
-	int ret = -ENODEV;
-
 	bdev = sbi->journal_bdev;
 	if (bdev) {
-		ret = ext4_blkdev_put(bdev);
+		ext4_blkdev_put(bdev);
 		sbi->journal_bdev = NULL;
 	}
-	return ret;
 }
 
 static inline struct inode *orphan_list_entry(struct list_head *l)
diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c
index 2b6fc13..b1de01d 100644
--- a/fs/f2fs/checkpoint.c
+++ b/fs/f2fs/checkpoint.c
@@ -20,6 +20,7 @@
 #include "f2fs.h"
 #include "node.h"
 #include "segment.h"
+#include <trace/events/f2fs.h>
 
 static struct kmem_cache *orphan_entry_slab;
 static struct kmem_cache *inode_entry_slab;
@@ -57,13 +58,19 @@
 		cond_resched();
 		goto repeat;
 	}
-	if (f2fs_readpage(sbi, page, index, READ_SYNC)) {
+	if (PageUptodate(page))
+		goto out;
+
+	if (f2fs_readpage(sbi, page, index, READ_SYNC))
+		goto repeat;
+
+	lock_page(page);
+	if (page->mapping != mapping) {
 		f2fs_put_page(page, 1);
 		goto repeat;
 	}
+out:
 	mark_page_accessed(page);
-
-	/* We do not allow returning an errorneous page */
 	return page;
 }
 
@@ -541,54 +548,44 @@
  */
 static void block_operations(struct f2fs_sb_info *sbi)
 {
-	int t;
 	struct writeback_control wbc = {
 		.sync_mode = WB_SYNC_ALL,
 		.nr_to_write = LONG_MAX,
 		.for_reclaim = 0,
 	};
+	struct blk_plug plug;
 
-	/* Stop renaming operation */
-	mutex_lock_op(sbi, RENAME);
-	mutex_lock_op(sbi, DENTRY_OPS);
+	blk_start_plug(&plug);
 
-retry_dents:
+retry_flush_dents:
+	mutex_lock_all(sbi);
+
 	/* write all the dirty dentry pages */
-	sync_dirty_dir_inodes(sbi);
-
-	mutex_lock_op(sbi, DATA_WRITE);
 	if (get_pages(sbi, F2FS_DIRTY_DENTS)) {
-		mutex_unlock_op(sbi, DATA_WRITE);
-		goto retry_dents;
+		mutex_unlock_all(sbi);
+		sync_dirty_dir_inodes(sbi);
+		goto retry_flush_dents;
 	}
 
-	/* block all the operations */
-	for (t = DATA_NEW; t <= NODE_TRUNC; t++)
-		mutex_lock_op(sbi, t);
-
-	mutex_lock(&sbi->write_inode);
-
 	/*
 	 * POR: we should ensure that there is no dirty node pages
 	 * until finishing nat/sit flush.
 	 */
-retry:
-	sync_node_pages(sbi, 0, &wbc);
-
-	mutex_lock_op(sbi, NODE_WRITE);
+retry_flush_nodes:
+	mutex_lock(&sbi->node_write);
 
 	if (get_pages(sbi, F2FS_DIRTY_NODES)) {
-		mutex_unlock_op(sbi, NODE_WRITE);
-		goto retry;
+		mutex_unlock(&sbi->node_write);
+		sync_node_pages(sbi, 0, &wbc);
+		goto retry_flush_nodes;
 	}
-	mutex_unlock(&sbi->write_inode);
+	blk_finish_plug(&plug);
 }
 
 static void unblock_operations(struct f2fs_sb_info *sbi)
 {
-	int t;
-	for (t = NODE_WRITE; t >= RENAME; t--)
-		mutex_unlock_op(sbi, t);
+	mutex_unlock(&sbi->node_write);
+	mutex_unlock_all(sbi);
 }
 
 static void do_checkpoint(struct f2fs_sb_info *sbi, bool is_umount)
@@ -727,9 +724,13 @@
 	struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
 	unsigned long long ckpt_ver;
 
+	trace_f2fs_write_checkpoint(sbi->sb, is_umount, "start block_ops");
+
 	mutex_lock(&sbi->cp_mutex);
 	block_operations(sbi);
 
+	trace_f2fs_write_checkpoint(sbi->sb, is_umount, "finish block_ops");
+
 	f2fs_submit_bio(sbi, DATA, true);
 	f2fs_submit_bio(sbi, NODE, true);
 	f2fs_submit_bio(sbi, META, true);
@@ -746,13 +747,13 @@
 	flush_nat_entries(sbi);
 	flush_sit_entries(sbi);
 
-	reset_victim_segmap(sbi);
-
 	/* unlock all the fs_lock[] in do_checkpoint() */
 	do_checkpoint(sbi, is_umount);
 
 	unblock_operations(sbi);
 	mutex_unlock(&sbi->cp_mutex);
+
+	trace_f2fs_write_checkpoint(sbi->sb, is_umount, "finish checkpoint");
 }
 
 void init_orphan_info(struct f2fs_sb_info *sbi)
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index 7bd22a2..91ff93b 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -12,6 +12,7 @@
 #include <linux/f2fs_fs.h>
 #include <linux/buffer_head.h>
 #include <linux/mpage.h>
+#include <linux/aio.h>
 #include <linux/writeback.h>
 #include <linux/backing-dev.h>
 #include <linux/blkdev.h>
@@ -21,6 +22,7 @@
 #include "f2fs.h"
 #include "node.h"
 #include "segment.h"
+#include <trace/events/f2fs.h>
 
 /*
  * Lock ordering for the change of data block address:
@@ -54,6 +56,8 @@
 	if (!inc_valid_block_count(sbi, dn->inode, 1))
 		return -ENOSPC;
 
+	trace_f2fs_reserve_new_block(dn->inode, dn->nid, dn->ofs_in_node);
+
 	__set_data_blkaddr(dn, NEW_ADDR);
 	dn->data_blkaddr = NEW_ADDR;
 	sync_inode_page(dn);
@@ -133,7 +137,7 @@
 		goto end_update;
 	}
 
-	/* Frone merge */
+	/* Front merge */
 	if (fofs == start_fofs - 1 && blk_addr == start_blkaddr - 1) {
 		fi->ext.fofs--;
 		fi->ext.blk_addr--;
@@ -169,7 +173,7 @@
 	return;
 }
 
-struct page *find_data_page(struct inode *inode, pgoff_t index)
+struct page *find_data_page(struct inode *inode, pgoff_t index, bool sync)
 {
 	struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
 	struct address_space *mapping = inode->i_mapping;
@@ -183,7 +187,7 @@
 	f2fs_put_page(page, 0);
 
 	set_new_dnode(&dn, inode, NULL, NULL, 0);
-	err = get_dnode_of_data(&dn, index, RDONLY_NODE);
+	err = get_dnode_of_data(&dn, index, LOOKUP_NODE);
 	if (err)
 		return ERR_PTR(err);
 	f2fs_put_dnode(&dn);
@@ -199,12 +203,20 @@
 	if (!page)
 		return ERR_PTR(-ENOMEM);
 
-	err = f2fs_readpage(sbi, page, dn.data_blkaddr, READ_SYNC);
-	if (err) {
-		f2fs_put_page(page, 1);
-		return ERR_PTR(err);
+	if (PageUptodate(page)) {
+		unlock_page(page);
+		return page;
 	}
-	unlock_page(page);
+
+	err = f2fs_readpage(sbi, page, dn.data_blkaddr,
+					sync ? READ_SYNC : READA);
+	if (sync) {
+		wait_on_page_locked(page);
+		if (!PageUptodate(page)) {
+			f2fs_put_page(page, 0);
+			return ERR_PTR(-EIO);
+		}
+	}
 	return page;
 }
 
@@ -222,14 +234,14 @@
 	int err;
 
 	set_new_dnode(&dn, inode, NULL, NULL, 0);
-	err = get_dnode_of_data(&dn, index, RDONLY_NODE);
+	err = get_dnode_of_data(&dn, index, LOOKUP_NODE);
 	if (err)
 		return ERR_PTR(err);
 	f2fs_put_dnode(&dn);
 
 	if (dn.data_blkaddr == NULL_ADDR)
 		return ERR_PTR(-ENOENT);
-
+repeat:
 	page = grab_cache_page(mapping, index);
 	if (!page)
 		return ERR_PTR(-ENOMEM);
@@ -241,9 +253,17 @@
 	BUG_ON(dn.data_blkaddr == NULL_ADDR);
 
 	err = f2fs_readpage(sbi, page, dn.data_blkaddr, READ_SYNC);
-	if (err) {
-		f2fs_put_page(page, 1);
+	if (err)
 		return ERR_PTR(err);
+
+	lock_page(page);
+	if (!PageUptodate(page)) {
+		f2fs_put_page(page, 1);
+		return ERR_PTR(-EIO);
+	}
+	if (page->mapping != mapping) {
+		f2fs_put_page(page, 1);
+		goto repeat;
 	}
 	return page;
 }
@@ -251,6 +271,9 @@
 /*
  * Caller ensures that this data page is never allocated.
  * A new zero-filled data page is allocated in the page cache.
+ *
+ * Also, caller should grab and release a mutex by calling mutex_lock_op() and
+ * mutex_unlock_op().
  */
 struct page *get_new_data_page(struct inode *inode, pgoff_t index,
 						bool new_i_size)
@@ -262,7 +285,7 @@
 	int err;
 
 	set_new_dnode(&dn, inode, NULL, NULL, 0);
-	err = get_dnode_of_data(&dn, index, 0);
+	err = get_dnode_of_data(&dn, index, ALLOC_NODE);
 	if (err)
 		return ERR_PTR(err);
 
@@ -273,7 +296,7 @@
 		}
 	}
 	f2fs_put_dnode(&dn);
-
+repeat:
 	page = grab_cache_page(mapping, index);
 	if (!page)
 		return ERR_PTR(-ENOMEM);
@@ -283,14 +306,21 @@
 
 	if (dn.data_blkaddr == NEW_ADDR) {
 		zero_user_segment(page, 0, PAGE_CACHE_SIZE);
+		SetPageUptodate(page);
 	} else {
 		err = f2fs_readpage(sbi, page, dn.data_blkaddr, READ_SYNC);
-		if (err) {
-			f2fs_put_page(page, 1);
+		if (err)
 			return ERR_PTR(err);
+		lock_page(page);
+		if (!PageUptodate(page)) {
+			f2fs_put_page(page, 1);
+			return ERR_PTR(-EIO);
+		}
+		if (page->mapping != mapping) {
+			f2fs_put_page(page, 1);
+			goto repeat;
 		}
 	}
-	SetPageUptodate(page);
 
 	if (new_i_size &&
 		i_size_read(inode) < ((index + 1) << PAGE_CACHE_SHIFT)) {
@@ -325,21 +355,15 @@
 
 /*
  * Fill the locked page with data located in the block address.
- * Read operation is synchronous, and caller must unlock the page.
+ * Return unlocked page.
  */
 int f2fs_readpage(struct f2fs_sb_info *sbi, struct page *page,
 					block_t blk_addr, int type)
 {
 	struct block_device *bdev = sbi->sb->s_bdev;
-	bool sync = (type == READ_SYNC);
 	struct bio *bio;
 
-	/* This page can be already read by other threads */
-	if (PageUptodate(page)) {
-		if (!sync)
-			unlock_page(page);
-		return 0;
-	}
+	trace_f2fs_readpage(page, blk_addr, type);
 
 	down_read(&sbi->bio_sem);
 
@@ -354,18 +378,12 @@
 		kfree(bio->bi_private);
 		bio_put(bio);
 		up_read(&sbi->bio_sem);
+		f2fs_put_page(page, 1);
 		return -EFAULT;
 	}
 
 	submit_bio(type, bio);
 	up_read(&sbi->bio_sem);
-
-	/* wait for read completion if sync */
-	if (sync) {
-		lock_page(page);
-		if (PageError(page))
-			return -EIO;
-	}
 	return 0;
 }
 
@@ -387,14 +405,18 @@
 	/* Get the page offset from the block offset(iblock) */
 	pgofs =	(pgoff_t)(iblock >> (PAGE_CACHE_SHIFT - blkbits));
 
-	if (check_extent_cache(inode, pgofs, bh_result))
+	if (check_extent_cache(inode, pgofs, bh_result)) {
+		trace_f2fs_get_data_block(inode, iblock, bh_result, 0);
 		return 0;
+	}
 
 	/* When reading holes, we need its node page */
 	set_new_dnode(&dn, inode, NULL, NULL, 0);
-	err = get_dnode_of_data(&dn, pgofs, RDONLY_NODE);
-	if (err)
+	err = get_dnode_of_data(&dn, pgofs, LOOKUP_NODE_RA);
+	if (err) {
+		trace_f2fs_get_data_block(inode, iblock, bh_result, err);
 		return (err == -ENOENT) ? 0 : err;
+	}
 
 	/* It does not support data allocation */
 	BUG_ON(create);
@@ -419,6 +441,7 @@
 		bh_result->b_size = (i << blkbits);
 	}
 	f2fs_put_dnode(&dn);
+	trace_f2fs_get_data_block(inode, iblock, bh_result, 0);
 	return 0;
 }
 
@@ -437,13 +460,12 @@
 int do_write_data_page(struct page *page)
 {
 	struct inode *inode = page->mapping->host;
-	struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
 	block_t old_blk_addr, new_blk_addr;
 	struct dnode_of_data dn;
 	int err = 0;
 
 	set_new_dnode(&dn, inode, NULL, NULL, 0);
-	err = get_dnode_of_data(&dn, page->index, RDONLY_NODE);
+	err = get_dnode_of_data(&dn, page->index, LOOKUP_NODE);
 	if (err)
 		return err;
 
@@ -467,8 +489,6 @@
 		write_data_page(inode, page, &dn,
 				old_blk_addr, &new_blk_addr);
 		update_extent_cache(new_blk_addr, &dn);
-		F2FS_I(inode)->data_version =
-			le64_to_cpu(F2FS_CKPT(sbi)->checkpoint_ver);
 	}
 out_writepage:
 	f2fs_put_dnode(&dn);
@@ -484,10 +504,11 @@
 	const pgoff_t end_index = ((unsigned long long) i_size)
 							>> PAGE_CACHE_SHIFT;
 	unsigned offset;
+	bool need_balance_fs = false;
 	int err = 0;
 
 	if (page->index < end_index)
-		goto out;
+		goto write;
 
 	/*
 	 * If the offset is out-of-range of file size,
@@ -499,50 +520,46 @@
 			dec_page_count(sbi, F2FS_DIRTY_DENTS);
 			inode_dec_dirty_dents(inode);
 		}
-		goto unlock_out;
+		goto out;
 	}
 
 	zero_user_segment(page, offset, PAGE_CACHE_SIZE);
-out:
-	if (sbi->por_doing)
+write:
+	if (sbi->por_doing) {
+		err = AOP_WRITEPAGE_ACTIVATE;
 		goto redirty_out;
+	}
 
-	if (wbc->for_reclaim && !S_ISDIR(inode->i_mode) && !is_cold_data(page))
-		goto redirty_out;
-
-	mutex_lock_op(sbi, DATA_WRITE);
+	/* Dentry blocks are controlled by checkpoint */
 	if (S_ISDIR(inode->i_mode)) {
 		dec_page_count(sbi, F2FS_DIRTY_DENTS);
 		inode_dec_dirty_dents(inode);
+		err = do_write_data_page(page);
+	} else {
+		int ilock = mutex_lock_op(sbi);
+		err = do_write_data_page(page);
+		mutex_unlock_op(sbi, ilock);
+		need_balance_fs = true;
 	}
-	err = do_write_data_page(page);
-	if (err && err != -ENOENT) {
-		wbc->pages_skipped++;
-		set_page_dirty(page);
-	}
-	mutex_unlock_op(sbi, DATA_WRITE);
+	if (err == -ENOENT)
+		goto out;
+	else if (err)
+		goto redirty_out;
 
 	if (wbc->for_reclaim)
 		f2fs_submit_bio(sbi, DATA, true);
 
-	if (err == -ENOENT)
-		goto unlock_out;
-
 	clear_cold_data(page);
+out:
 	unlock_page(page);
-
-	if (!wbc->for_reclaim && !S_ISDIR(inode->i_mode))
+	if (need_balance_fs)
 		f2fs_balance_fs(sbi);
 	return 0;
 
-unlock_out:
-	unlock_page(page);
-	return (err == -ENOENT) ? 0 : err;
-
 redirty_out:
 	wbc->pages_skipped++;
 	set_page_dirty(page);
-	return AOP_WRITEPAGE_ACTIVATE;
+	return err;
 }
 
 #define MAX_DESIRED_PAGES_WP	4096
@@ -561,19 +578,26 @@
 {
 	struct inode *inode = mapping->host;
 	struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
+	bool locked = false;
 	int ret;
 	long excess_nrtw = 0, desired_nrtw;
 
+	/* deal with chardevs and other special file */
+	if (!mapping->a_ops->writepage)
+		return 0;
+
 	if (wbc->nr_to_write < MAX_DESIRED_PAGES_WP) {
 		desired_nrtw = MAX_DESIRED_PAGES_WP;
 		excess_nrtw = desired_nrtw - wbc->nr_to_write;
 		wbc->nr_to_write = desired_nrtw;
 	}
 
-	if (!S_ISDIR(inode->i_mode))
+	if (!S_ISDIR(inode->i_mode)) {
 		mutex_lock(&sbi->writepages);
+		locked = true;
+	}
 	ret = write_cache_pages(mapping, wbc, __f2fs_writepage, mapping);
-	if (!S_ISDIR(inode->i_mode))
+	if (locked)
 		mutex_unlock(&sbi->writepages);
 	f2fs_submit_bio(sbi, DATA, (wbc->sync_mode == WB_SYNC_ALL));
 
@@ -593,39 +617,33 @@
 	pgoff_t index = ((unsigned long long) pos) >> PAGE_CACHE_SHIFT;
 	struct dnode_of_data dn;
 	int err = 0;
+	int ilock;
 
 	/* for nobh_write_end */
 	*fsdata = NULL;
 
 	f2fs_balance_fs(sbi);
-
+repeat:
 	page = grab_cache_page_write_begin(mapping, index, flags);
 	if (!page)
 		return -ENOMEM;
 	*pagep = page;
 
-	mutex_lock_op(sbi, DATA_NEW);
+	ilock = mutex_lock_op(sbi);
 
 	set_new_dnode(&dn, inode, NULL, NULL, 0);
-	err = get_dnode_of_data(&dn, index, 0);
-	if (err) {
-		mutex_unlock_op(sbi, DATA_NEW);
-		f2fs_put_page(page, 1);
-		return err;
-	}
+	err = get_dnode_of_data(&dn, index, ALLOC_NODE);
+	if (err)
+		goto err;
 
-	if (dn.data_blkaddr == NULL_ADDR) {
+	if (dn.data_blkaddr == NULL_ADDR)
 		err = reserve_new_block(&dn);
-		if (err) {
-			f2fs_put_dnode(&dn);
-			mutex_unlock_op(sbi, DATA_NEW);
-			f2fs_put_page(page, 1);
-			return err;
-		}
-	}
-	f2fs_put_dnode(&dn);
 
-	mutex_unlock_op(sbi, DATA_NEW);
+	f2fs_put_dnode(&dn);
+	if (err)
+		goto err;
+
+	mutex_unlock_op(sbi, ilock);
 
 	if ((len == PAGE_CACHE_SIZE) || PageUptodate(page))
 		return 0;
@@ -636,21 +654,34 @@
 
 		/* Reading beyond i_size is simple: memset to zero */
 		zero_user_segments(page, 0, start, end, PAGE_CACHE_SIZE);
-		return 0;
+		goto out;
 	}
 
 	if (dn.data_blkaddr == NEW_ADDR) {
 		zero_user_segment(page, 0, PAGE_CACHE_SIZE);
 	} else {
 		err = f2fs_readpage(sbi, page, dn.data_blkaddr, READ_SYNC);
-		if (err) {
-			f2fs_put_page(page, 1);
+		if (err)
 			return err;
+		lock_page(page);
+		if (!PageUptodate(page)) {
+			f2fs_put_page(page, 1);
+			return -EIO;
+		}
+		if (page->mapping != mapping) {
+			f2fs_put_page(page, 1);
+			goto repeat;
 		}
 	}
+out:
 	SetPageUptodate(page);
 	clear_cold_data(page);
 	return 0;
+
+err:
+	mutex_unlock_op(sbi, ilock);
+	f2fs_put_page(page, 1);
+	return err;
 }
 
 static ssize_t f2fs_direct_IO(int rw, struct kiocb *iocb,
@@ -681,7 +712,7 @@
 static int f2fs_release_data_page(struct page *page, gfp_t wait)
 {
 	ClearPagePrivate(page);
-	return 0;
+	return 1;
 }
 
 static int f2fs_set_data_page_dirty(struct page *page)
diff --git a/fs/f2fs/debug.c b/fs/f2fs/debug.c
index 025b9e2..8d99437 100644
--- a/fs/f2fs/debug.c
+++ b/fs/f2fs/debug.c
@@ -13,7 +13,6 @@
 
 #include <linux/fs.h>
 #include <linux/backing-dev.h>
-#include <linux/proc_fs.h>
 #include <linux/f2fs_fs.h>
 #include <linux/blkdev.h>
 #include <linux/debugfs.h>
@@ -106,7 +105,7 @@
 		}
 	}
 	mutex_unlock(&sit_i->sentry_lock);
-	dist = sbi->total_sections * hblks_per_sec * hblks_per_sec / 100;
+	dist = TOTAL_SECS(sbi) * hblks_per_sec * hblks_per_sec / 100;
 	si->bimodal = bimodal / dist;
 	if (si->dirty_count)
 		si->avg_vblocks = total_vblocks / ndirty;
@@ -138,14 +137,13 @@
 	si->base_mem += f2fs_bitmap_size(TOTAL_SEGS(sbi));
 	si->base_mem += 2 * SIT_VBLOCK_MAP_SIZE * TOTAL_SEGS(sbi);
 	if (sbi->segs_per_sec > 1)
-		si->base_mem += sbi->total_sections *
-			sizeof(struct sec_entry);
+		si->base_mem += TOTAL_SECS(sbi) * sizeof(struct sec_entry);
 	si->base_mem += __bitmap_size(sbi, SIT_BITMAP);
 
 	/* build free segmap */
 	si->base_mem += sizeof(struct free_segmap_info);
 	si->base_mem += f2fs_bitmap_size(TOTAL_SEGS(sbi));
-	si->base_mem += f2fs_bitmap_size(sbi->total_sections);
+	si->base_mem += f2fs_bitmap_size(TOTAL_SECS(sbi));
 
 	/* build curseg */
 	si->base_mem += sizeof(struct curseg_info) * NR_CURSEG_TYPE;
@@ -154,7 +152,7 @@
 	/* build dirty segmap */
 	si->base_mem += sizeof(struct dirty_seglist_info);
 	si->base_mem += NR_DIRTY_TYPE * f2fs_bitmap_size(TOTAL_SEGS(sbi));
-	si->base_mem += 2 * f2fs_bitmap_size(TOTAL_SEGS(sbi));
+	si->base_mem += f2fs_bitmap_size(TOTAL_SECS(sbi));
 
 	/* buld nm */
 	si->base_mem += sizeof(struct f2fs_nm_info);
diff --git a/fs/f2fs/dir.c b/fs/f2fs/dir.c
index 1be9487..1ac6b93 100644
--- a/fs/f2fs/dir.c
+++ b/fs/f2fs/dir.c
@@ -148,7 +148,7 @@
 
 	for (; bidx < end_block; bidx++) {
 		/* no need to allocate new dentry pages to all the indices */
-		dentry_page = find_data_page(dir, bidx);
+		dentry_page = find_data_page(dir, bidx, true);
 		if (IS_ERR(dentry_page)) {
 			room = true;
 			continue;
@@ -189,6 +189,9 @@
 	unsigned int max_depth;
 	unsigned int level;
 
+	if (namelen > F2FS_NAME_LEN)
+		return NULL;
+
 	if (npages == 0)
 		return NULL;
 
@@ -246,9 +249,6 @@
 void f2fs_set_link(struct inode *dir, struct f2fs_dir_entry *de,
 		struct page *page, struct inode *inode)
 {
-	struct f2fs_sb_info *sbi = F2FS_SB(dir->i_sb);
-
-	mutex_lock_op(sbi, DENTRY_OPS);
 	lock_page(page);
 	wait_on_page_writeback(page);
 	de->ino = cpu_to_le32(inode->i_ino);
@@ -262,7 +262,6 @@
 	F2FS_I(inode)->i_pino = dir->i_ino;
 
 	f2fs_put_page(page, 1);
-	mutex_unlock_op(sbi, DENTRY_OPS);
 }
 
 void init_dent_inode(const struct qstr *name, struct page *ipage)
@@ -281,6 +280,43 @@
 	set_page_dirty(ipage);
 }
 
+static int make_empty_dir(struct inode *inode, struct inode *parent)
+{
+	struct page *dentry_page;
+	struct f2fs_dentry_block *dentry_blk;
+	struct f2fs_dir_entry *de;
+	void *kaddr;
+
+	dentry_page = get_new_data_page(inode, 0, true);
+	if (IS_ERR(dentry_page))
+		return PTR_ERR(dentry_page);
+
+	kaddr = kmap_atomic(dentry_page);
+	dentry_blk = (struct f2fs_dentry_block *)kaddr;
+
+	de = &dentry_blk->dentry[0];
+	de->name_len = cpu_to_le16(1);
+	de->hash_code = 0;
+	de->ino = cpu_to_le32(inode->i_ino);
+	memcpy(dentry_blk->filename[0], ".", 1);
+	set_de_type(de, inode);
+
+	de = &dentry_blk->dentry[1];
+	de->hash_code = 0;
+	de->name_len = cpu_to_le16(2);
+	de->ino = cpu_to_le32(parent->i_ino);
+	memcpy(dentry_blk->filename[1], "..", 2);
+	set_de_type(de, inode);
+
+	test_and_set_bit_le(0, &dentry_blk->dentry_bitmap);
+	test_and_set_bit_le(1, &dentry_blk->dentry_bitmap);
+	kunmap_atomic(kaddr);
+
+	set_page_dirty(dentry_page);
+	f2fs_put_page(dentry_page, 1);
+	return 0;
+}
+
 static int init_inode_metadata(struct inode *inode,
 		struct inode *dir, const struct qstr *name)
 {
@@ -291,7 +327,7 @@
 			return err;
 
 		if (S_ISDIR(inode->i_mode)) {
-			err = f2fs_make_empty(inode, dir);
+			err = make_empty_dir(inode, dir);
 			if (err) {
 				remove_inode_page(inode);
 				return err;
@@ -314,7 +350,7 @@
 	}
 	if (is_inode_flag_set(F2FS_I(inode), FI_INC_LINK)) {
 		inc_nlink(inode);
-		f2fs_write_inode(inode, NULL);
+		update_inode_page(inode);
 	}
 	return 0;
 }
@@ -338,7 +374,7 @@
 	}
 
 	if (need_dir_update)
-		f2fs_write_inode(dir, NULL);
+		update_inode_page(dir);
 	else
 		mark_inode_dirty(dir);
 
@@ -370,6 +406,10 @@
 	goto next;
 }
 
+/*
+ * Caller should grab and release a mutex by calling mutex_lock_op() and
+ * mutex_unlock_op().
+ */
 int __f2fs_add_link(struct inode *dir, const struct qstr *name, struct inode *inode)
 {
 	unsigned int bit_pos;
@@ -379,7 +419,6 @@
 	f2fs_hash_t dentry_hash;
 	struct f2fs_dir_entry *de;
 	unsigned int nbucket, nblock;
-	struct f2fs_sb_info *sbi = F2FS_SB(dir->i_sb);
 	size_t namelen = name->len;
 	struct page *dentry_page = NULL;
 	struct f2fs_dentry_block *dentry_blk = NULL;
@@ -409,12 +448,9 @@
 	bidx = dir_block_index(level, (le32_to_cpu(dentry_hash) % nbucket));
 
 	for (block = bidx; block <= (bidx + nblock - 1); block++) {
-		mutex_lock_op(sbi, DENTRY_OPS);
 		dentry_page = get_new_data_page(dir, block, true);
-		if (IS_ERR(dentry_page)) {
-			mutex_unlock_op(sbi, DENTRY_OPS);
+		if (IS_ERR(dentry_page))
 			return PTR_ERR(dentry_page);
-		}
 
 		dentry_blk = kmap(dentry_page);
 		bit_pos = room_for_filename(dentry_blk, slots);
@@ -423,7 +459,6 @@
 
 		kunmap(dentry_page);
 		f2fs_put_page(dentry_page, 1);
-		mutex_unlock_op(sbi, DENTRY_OPS);
 	}
 
 	/* Move to next level to find the empty slot for new dentry */
@@ -453,7 +488,6 @@
 fail:
 	kunmap(dentry_page);
 	f2fs_put_page(dentry_page, 1);
-	mutex_unlock_op(sbi, DENTRY_OPS);
 	return err;
 }
 
@@ -473,8 +507,6 @@
 	void *kaddr = page_address(page);
 	int i;
 
-	mutex_lock_op(sbi, DENTRY_OPS);
-
 	lock_page(page);
 	wait_on_page_writeback(page);
 
@@ -494,7 +526,7 @@
 
 	if (inode && S_ISDIR(inode->i_mode)) {
 		drop_nlink(dir);
-		f2fs_write_inode(dir, NULL);
+		update_inode_page(dir);
 	} else {
 		mark_inode_dirty(dir);
 	}
@@ -506,7 +538,8 @@
 			drop_nlink(inode);
 			i_size_write(inode, 0);
 		}
-		f2fs_write_inode(inode, NULL);
+		update_inode_page(inode);
+
 		if (inode->i_nlink == 0)
 			add_orphan_inode(sbi, inode->i_ino);
 	}
@@ -519,45 +552,6 @@
 		inode_dec_dirty_dents(dir);
 	}
 	f2fs_put_page(page, 1);
-
-	mutex_unlock_op(sbi, DENTRY_OPS);
-}
-
-int f2fs_make_empty(struct inode *inode, struct inode *parent)
-{
-	struct page *dentry_page;
-	struct f2fs_dentry_block *dentry_blk;
-	struct f2fs_dir_entry *de;
-	void *kaddr;
-
-	dentry_page = get_new_data_page(inode, 0, true);
-	if (IS_ERR(dentry_page))
-		return PTR_ERR(dentry_page);
-
-	kaddr = kmap_atomic(dentry_page);
-	dentry_blk = (struct f2fs_dentry_block *)kaddr;
-
-	de = &dentry_blk->dentry[0];
-	de->name_len = cpu_to_le16(1);
-	de->hash_code = f2fs_dentry_hash(".", 1);
-	de->ino = cpu_to_le32(inode->i_ino);
-	memcpy(dentry_blk->filename[0], ".", 1);
-	set_de_type(de, inode);
-
-	de = &dentry_blk->dentry[1];
-	de->hash_code = f2fs_dentry_hash("..", 2);
-	de->name_len = cpu_to_le16(2);
-	de->ino = cpu_to_le32(parent->i_ino);
-	memcpy(dentry_blk->filename[1], "..", 2);
-	set_de_type(de, inode);
-
-	test_and_set_bit_le(0, &dentry_blk->dentry_bitmap);
-	test_and_set_bit_le(1, &dentry_blk->dentry_bitmap);
-	kunmap_atomic(kaddr);
-
-	set_page_dirty(dentry_page);
-	f2fs_put_page(dentry_page, 1);
-	return 0;
 }
 
 bool f2fs_empty_dir(struct inode *dir)
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
index 201c8d3..20aab02 100644
--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -125,11 +125,15 @@
 					 * file keeping -1 as its node offset to
 					 * distinguish from index node blocks.
 					 */
-#define RDONLY_NODE		1	/*
-					 * specify a read-only mode when getting
-					 * a node block. 0 is read-write mode.
-					 * used by get_dnode_of_data().
+enum {
+	ALLOC_NODE,			/* allocate a new node page if needed */
+	LOOKUP_NODE,			/* look up a node without readahead */
+	LOOKUP_NODE_RA,			/*
+					 * look up a node with readahead called
+					 * by get_datablock_ro.
 					 */
+};
+
 #define F2FS_LINK_MAX		32000	/* maximum link count per file */
 
 /* for in-memory extent cache entry */
@@ -144,6 +148,7 @@
  * i_advise uses FADVISE_XXX_BIT. We can add additional hints later.
  */
 #define FADVISE_COLD_BIT	0x01
+#define FADVISE_CP_BIT		0x02
 
 struct f2fs_inode_info {
 	struct inode vfs_inode;		/* serve a vfs inode */
@@ -155,7 +160,6 @@
 
 	/* Use below internally in f2fs*/
 	unsigned long flags;		/* use to pass per-file flags */
-	unsigned long long data_version;/* latest version of data for fsync */
 	atomic_t dirty_dents;		/* # of dirty dentry pages */
 	f2fs_hash_t chash;		/* hash value of given file name */
 	unsigned int clevel;		/* maximum level of given file name */
@@ -186,7 +190,6 @@
 struct f2fs_nm_info {
 	block_t nat_blkaddr;		/* base disk address of NAT */
 	nid_t max_nid;			/* maximum possible node ids */
-	nid_t init_scan_nid;		/* the first nid to be scanned */
 	nid_t next_scan_nid;		/* the next nid to be scanned */
 
 	/* NAT cache management */
@@ -305,23 +308,12 @@
 };
 
 /*
- * FS_LOCK nesting subclasses for the lock validator:
- *
- * The locking order between these classes is
- * RENAME -> DENTRY_OPS -> DATA_WRITE -> DATA_NEW
- *    -> DATA_TRUNC -> NODE_WRITE -> NODE_NEW -> NODE_TRUNC
+ * Uses as sbi->fs_lock[NR_GLOBAL_LOCKS].
+ * The checkpoint procedure blocks all the locks in this fs_lock array.
+ * Some FS operations grab free locks, and if there is no free lock,
+ * then wait to grab a lock in a round-robin manner.
  */
-enum lock_type {
-	RENAME,		/* for renaming operations */
-	DENTRY_OPS,	/* for directory operations */
-	DATA_WRITE,	/* for data write */
-	DATA_NEW,	/* for data allocation */
-	DATA_TRUNC,	/* for data truncate */
-	NODE_NEW,	/* for node allocation */
-	NODE_TRUNC,	/* for node truncate */
-	NODE_WRITE,	/* for node write */
-	NR_LOCK_TYPE,
-};
+#define NR_GLOBAL_LOCKS	8
 
 /*
  * The below are the page types of bios used in submti_bio().
@@ -361,11 +353,13 @@
 	/* for checkpoint */
 	struct f2fs_checkpoint *ckpt;		/* raw checkpoint pointer */
 	struct inode *meta_inode;		/* cache meta blocks */
-	struct mutex cp_mutex;			/* for checkpoint procedure */
-	struct mutex fs_lock[NR_LOCK_TYPE];	/* for blocking FS operations */
-	struct mutex write_inode;		/* mutex for write inode */
+	struct mutex cp_mutex;			/* checkpoint procedure lock */
+	struct mutex fs_lock[NR_GLOBAL_LOCKS];	/* blocking FS operations */
+	struct mutex node_write;		/* locking node writes */
 	struct mutex writepages;		/* mutex for writepages() */
+	unsigned char next_lock_num;		/* round-robin global locks */
 	int por_doing;				/* recovery is doing or not */
+	int on_build_free_nids;			/* build_free_nids is doing */
 
 	/* for orphan inode management */
 	struct list_head orphan_inode_list;	/* orphan inode list */
@@ -406,6 +400,7 @@
 	/* for cleaning operations */
 	struct mutex gc_mutex;			/* mutex for GC */
 	struct f2fs_gc_kthread	*gc_thread;	/* GC thread */
+	unsigned int cur_victim_sec;		/* current victim section num */
 
 	/*
 	 * for stat information.
@@ -498,22 +493,51 @@
 	cp->ckpt_flags = cpu_to_le32(ckpt_flags);
 }
 
-static inline void mutex_lock_op(struct f2fs_sb_info *sbi, enum lock_type t)
+static inline void mutex_lock_all(struct f2fs_sb_info *sbi)
 {
-	mutex_lock_nested(&sbi->fs_lock[t], t);
+	int i = 0;
+	for (; i < NR_GLOBAL_LOCKS; i++)
+		mutex_lock(&sbi->fs_lock[i]);
 }
 
-static inline void mutex_unlock_op(struct f2fs_sb_info *sbi, enum lock_type t)
+static inline void mutex_unlock_all(struct f2fs_sb_info *sbi)
 {
-	mutex_unlock(&sbi->fs_lock[t]);
+	int i = 0;
+	for (; i < NR_GLOBAL_LOCKS; i++)
+		mutex_unlock(&sbi->fs_lock[i]);
+}
+
+static inline int mutex_lock_op(struct f2fs_sb_info *sbi)
+{
+	unsigned char next_lock = sbi->next_lock_num % NR_GLOBAL_LOCKS;
+	int i = 0;
+
+	for (; i < NR_GLOBAL_LOCKS; i++)
+		if (mutex_trylock(&sbi->fs_lock[i]))
+			return i;
+
+	mutex_lock(&sbi->fs_lock[next_lock]);
+	sbi->next_lock_num++;
+	return next_lock;
+}
+
+static inline void mutex_unlock_op(struct f2fs_sb_info *sbi, int ilock)
+{
+	if (ilock < 0)
+		return;
+	BUG_ON(ilock >= NR_GLOBAL_LOCKS);
+	mutex_unlock(&sbi->fs_lock[ilock]);
 }
 
 /*
  * Check whether the given nid is within node id range.
  */
-static inline void check_nid_range(struct f2fs_sb_info *sbi, nid_t nid)
+static inline int check_nid_range(struct f2fs_sb_info *sbi, nid_t nid)
 {
-	BUG_ON((nid >= NM_I(sbi)->max_nid));
+	WARN_ON((nid >= NM_I(sbi)->max_nid));
+	if (nid >= NM_I(sbi)->max_nid)
+		return -EINVAL;
+	return 0;
 }
 
 #define F2FS_DEFAULT_ALLOCATED_BLOCKS	1
@@ -819,7 +843,6 @@
 /* used for f2fs_inode_info->flags */
 enum {
 	FI_NEW_INODE,		/* indicate newly allocated inode */
-	FI_NEED_CP,		/* need to do checkpoint during fsync */
 	FI_INC_LINK,		/* need to increment i_nlink */
 	FI_ACL_MODE,		/* indicate acl mode */
 	FI_NO_ALLOC,		/* should not allocate any blocks */
@@ -872,6 +895,7 @@
 void f2fs_set_inode_flags(struct inode *);
 struct inode *f2fs_iget(struct super_block *, unsigned long);
 void update_inode(struct inode *, struct page *);
+int update_inode_page(struct inode *);
 int f2fs_write_inode(struct inode *, struct writeback_control *);
 void f2fs_evict_inode(struct inode *);
 
@@ -973,7 +997,6 @@
 					int, unsigned int, int);
 void flush_sit_entries(struct f2fs_sb_info *);
 int build_segment_manager(struct f2fs_sb_info *);
-void reset_victim_segmap(struct f2fs_sb_info *);
 void destroy_segment_manager(struct f2fs_sb_info *);
 
 /*
@@ -1000,7 +1023,7 @@
  */
 int reserve_new_block(struct dnode_of_data *);
 void update_extent_cache(block_t, struct dnode_of_data *);
-struct page *find_data_page(struct inode *, pgoff_t);
+struct page *find_data_page(struct inode *, pgoff_t, bool);
 struct page *get_lock_data_page(struct inode *, pgoff_t);
 struct page *get_new_data_page(struct inode *, pgoff_t, bool);
 int f2fs_readpage(struct f2fs_sb_info *, struct page *, block_t, int);
@@ -1020,7 +1043,7 @@
 /*
  * recovery.c
  */
-void recover_fsync_data(struct f2fs_sb_info *);
+int recover_fsync_data(struct f2fs_sb_info *);
 bool space_for_roll_forward(struct f2fs_sb_info *);
 
 /*
diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
index db62628..1cae864 100644
--- a/fs/f2fs/file.c
+++ b/fs/f2fs/file.c
@@ -13,6 +13,7 @@
 #include <linux/stat.h>
 #include <linux/buffer_head.h>
 #include <linux/writeback.h>
+#include <linux/blkdev.h>
 #include <linux/falloc.h>
 #include <linux/types.h>
 #include <linux/compat.h>
@@ -24,6 +25,7 @@
 #include "segment.h"
 #include "xattr.h"
 #include "acl.h"
+#include <trace/events/f2fs.h>
 
 static int f2fs_vm_page_mkwrite(struct vm_area_struct *vma,
 						struct vm_fault *vmf)
@@ -33,19 +35,18 @@
 	struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
 	block_t old_blk_addr;
 	struct dnode_of_data dn;
-	int err;
+	int err, ilock;
 
 	f2fs_balance_fs(sbi);
 
 	sb_start_pagefault(inode->i_sb);
 
-	mutex_lock_op(sbi, DATA_NEW);
-
 	/* block allocation */
+	ilock = mutex_lock_op(sbi);
 	set_new_dnode(&dn, inode, NULL, NULL, 0);
-	err = get_dnode_of_data(&dn, page->index, 0);
+	err = get_dnode_of_data(&dn, page->index, ALLOC_NODE);
 	if (err) {
-		mutex_unlock_op(sbi, DATA_NEW);
+		mutex_unlock_op(sbi, ilock);
 		goto out;
 	}
 
@@ -55,13 +56,12 @@
 		err = reserve_new_block(&dn);
 		if (err) {
 			f2fs_put_dnode(&dn);
-			mutex_unlock_op(sbi, DATA_NEW);
+			mutex_unlock_op(sbi, ilock);
 			goto out;
 		}
 	}
 	f2fs_put_dnode(&dn);
-
-	mutex_unlock_op(sbi, DATA_NEW);
+	mutex_unlock_op(sbi, ilock);
 
 	lock_page(page);
 	if (page->mapping != inode->i_mapping ||
@@ -102,28 +102,10 @@
 	.remap_pages	= generic_file_remap_pages,
 };
 
-static int need_to_sync_dir(struct f2fs_sb_info *sbi, struct inode *inode)
-{
-	struct dentry *dentry;
-	nid_t pino;
-
-	inode = igrab(inode);
-	dentry = d_find_any_alias(inode);
-	if (!dentry) {
-		iput(inode);
-		return 0;
-	}
-	pino = dentry->d_parent->d_inode->i_ino;
-	dput(dentry);
-	iput(inode);
-	return !is_checkpointed_node(sbi, pino);
-}
-
 int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
 {
 	struct inode *inode = file->f_mapping->host;
 	struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
-	unsigned long long cur_version;
 	int ret = 0;
 	bool need_cp = false;
 	struct writeback_control wbc = {
@@ -135,9 +117,12 @@
 	if (inode->i_sb->s_flags & MS_RDONLY)
 		return 0;
 
+	trace_f2fs_sync_file_enter(inode);
 	ret = filemap_write_and_wait_range(inode->i_mapping, start, end);
-	if (ret)
+	if (ret) {
+		trace_f2fs_sync_file_exit(inode, need_cp, datasync, ret);
 		return ret;
+	}
 
 	/* guarantee free sections for fsync */
 	f2fs_balance_fs(sbi);
@@ -147,28 +132,18 @@
 	if (datasync && !(inode->i_state & I_DIRTY_DATASYNC))
 		goto out;
 
-	mutex_lock(&sbi->cp_mutex);
-	cur_version = le64_to_cpu(F2FS_CKPT(sbi)->checkpoint_ver);
-	mutex_unlock(&sbi->cp_mutex);
-
-	if (F2FS_I(inode)->data_version != cur_version &&
-					!(inode->i_state & I_DIRTY))
-		goto out;
-	F2FS_I(inode)->data_version--;
-
 	if (!S_ISREG(inode->i_mode) || inode->i_nlink != 1)
 		need_cp = true;
-	else if (is_inode_flag_set(F2FS_I(inode), FI_NEED_CP))
+	else if (is_cp_file(inode))
 		need_cp = true;
 	else if (!space_for_roll_forward(sbi))
 		need_cp = true;
-	else if (need_to_sync_dir(sbi, inode))
+	else if (!is_checkpointed_node(sbi, F2FS_I(inode)->i_pino))
 		need_cp = true;
 
 	if (need_cp) {
 		/* all the dirty node pages should be flushed for POR */
 		ret = f2fs_sync_fs(inode->i_sb, 1);
-		clear_inode_flag(F2FS_I(inode), FI_NEED_CP);
 	} else {
 		/* if there is no written node page, write its inode page */
 		while (!sync_node_pages(sbi, inode->i_ino, &wbc)) {
@@ -178,9 +153,11 @@
 		}
 		filemap_fdatawait_range(sbi->node_inode->i_mapping,
 							0, LONG_MAX);
+		ret = blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, NULL);
 	}
 out:
 	mutex_unlock(&inode->i_mutex);
+	trace_f2fs_sync_file_exit(inode, need_cp, datasync, ret);
 	return ret;
 }
 
@@ -216,6 +193,9 @@
 		sync_inode_page(dn);
 	}
 	dn->ofs_in_node = ofs;
+
+	trace_f2fs_truncate_data_blocks_range(dn->inode, dn->nid,
+					 dn->ofs_in_node, nr_free);
 	return nr_free;
 }
 
@@ -232,11 +212,15 @@
 	if (!offset)
 		return;
 
-	page = find_data_page(inode, from >> PAGE_CACHE_SHIFT);
+	page = find_data_page(inode, from >> PAGE_CACHE_SHIFT, false);
 	if (IS_ERR(page))
 		return;
 
 	lock_page(page);
+	if (page->mapping != inode->i_mapping) {
+		f2fs_put_page(page, 1);
+		return;
+	}
 	wait_on_page_writeback(page);
 	zero_user(page, offset, PAGE_CACHE_SIZE - offset);
 	set_page_dirty(page);
@@ -249,20 +233,22 @@
 	unsigned int blocksize = inode->i_sb->s_blocksize;
 	struct dnode_of_data dn;
 	pgoff_t free_from;
-	int count = 0;
+	int count = 0, ilock = -1;
 	int err;
 
+	trace_f2fs_truncate_blocks_enter(inode, from);
+
 	free_from = (pgoff_t)
 			((from + blocksize - 1) >> (sbi->log_blocksize));
 
-	mutex_lock_op(sbi, DATA_TRUNC);
-
+	ilock = mutex_lock_op(sbi);
 	set_new_dnode(&dn, inode, NULL, NULL, 0);
-	err = get_dnode_of_data(&dn, free_from, RDONLY_NODE);
+	err = get_dnode_of_data(&dn, free_from, LOOKUP_NODE);
 	if (err) {
 		if (err == -ENOENT)
 			goto free_next;
-		mutex_unlock_op(sbi, DATA_TRUNC);
+		mutex_unlock_op(sbi, ilock);
+		trace_f2fs_truncate_blocks_exit(inode, err);
 		return err;
 	}
 
@@ -273,6 +259,7 @@
 
 	count -= dn.ofs_in_node;
 	BUG_ON(count < 0);
+
 	if (dn.ofs_in_node || IS_INODE(dn.node_page)) {
 		truncate_data_blocks_range(&dn, count);
 		free_from += count;
@@ -281,11 +268,12 @@
 	f2fs_put_dnode(&dn);
 free_next:
 	err = truncate_inode_blocks(inode, free_from);
-	mutex_unlock_op(sbi, DATA_TRUNC);
+	mutex_unlock_op(sbi, ilock);
 
 	/* lastly zero out the first data page */
 	truncate_partial_data_page(inode, from);
 
+	trace_f2fs_truncate_blocks_exit(inode, err);
 	return err;
 }
 
@@ -295,6 +283,8 @@
 				S_ISLNK(inode->i_mode)))
 		return;
 
+	trace_f2fs_truncate(inode);
+
 	if (!truncate_blocks(inode, i_size_read(inode))) {
 		inode->i_mtime = inode->i_ctime = CURRENT_TIME;
 		mark_inode_dirty(inode);
@@ -389,15 +379,16 @@
 {
 	struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
 	struct page *page;
+	int ilock;
 
 	if (!len)
 		return;
 
 	f2fs_balance_fs(sbi);
 
-	mutex_lock_op(sbi, DATA_NEW);
+	ilock = mutex_lock_op(sbi);
 	page = get_new_data_page(inode, index, false);
-	mutex_unlock_op(sbi, DATA_NEW);
+	mutex_unlock_op(sbi, ilock);
 
 	if (!IS_ERR(page)) {
 		wait_on_page_writeback(page);
@@ -414,15 +405,10 @@
 
 	for (index = pg_start; index < pg_end; index++) {
 		struct dnode_of_data dn;
-		struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
 
-		f2fs_balance_fs(sbi);
-
-		mutex_lock_op(sbi, DATA_TRUNC);
 		set_new_dnode(&dn, inode, NULL, NULL, 0);
-		err = get_dnode_of_data(&dn, index, RDONLY_NODE);
+		err = get_dnode_of_data(&dn, index, LOOKUP_NODE);
 		if (err) {
-			mutex_unlock_op(sbi, DATA_TRUNC);
 			if (err == -ENOENT)
 				continue;
 			return err;
@@ -431,7 +417,6 @@
 		if (dn.data_blkaddr != NULL_ADDR)
 			truncate_data_blocks_range(&dn, 1);
 		f2fs_put_dnode(&dn);
-		mutex_unlock_op(sbi, DATA_TRUNC);
 	}
 	return 0;
 }
@@ -461,12 +446,19 @@
 		if (pg_start < pg_end) {
 			struct address_space *mapping = inode->i_mapping;
 			loff_t blk_start, blk_end;
+			struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
+			int ilock;
+
+			f2fs_balance_fs(sbi);
 
 			blk_start = pg_start << PAGE_CACHE_SHIFT;
 			blk_end = pg_end << PAGE_CACHE_SHIFT;
 			truncate_inode_pages_range(mapping, blk_start,
 					blk_end - 1);
+
+			ilock = mutex_lock_op(sbi);
 			ret = truncate_hole(inode, pg_start, pg_end);
+			mutex_unlock_op(sbi, ilock);
 		}
 	}
 
@@ -500,13 +492,13 @@
 
 	for (index = pg_start; index <= pg_end; index++) {
 		struct dnode_of_data dn;
+		int ilock;
 
-		mutex_lock_op(sbi, DATA_NEW);
-
+		ilock = mutex_lock_op(sbi);
 		set_new_dnode(&dn, inode, NULL, NULL, 0);
-		ret = get_dnode_of_data(&dn, index, 0);
+		ret = get_dnode_of_data(&dn, index, ALLOC_NODE);
 		if (ret) {
-			mutex_unlock_op(sbi, DATA_NEW);
+			mutex_unlock_op(sbi, ilock);
 			break;
 		}
 
@@ -514,13 +506,12 @@
 			ret = reserve_new_block(&dn);
 			if (ret) {
 				f2fs_put_dnode(&dn);
-				mutex_unlock_op(sbi, DATA_NEW);
+				mutex_unlock_op(sbi, ilock);
 				break;
 			}
 		}
 		f2fs_put_dnode(&dn);
-
-		mutex_unlock_op(sbi, DATA_NEW);
+		mutex_unlock_op(sbi, ilock);
 
 		if (pg_start == pg_end)
 			new_size = offset + len;
@@ -559,6 +550,7 @@
 		inode->i_mtime = inode->i_ctime = CURRENT_TIME;
 		mark_inode_dirty(inode);
 	}
+	trace_f2fs_fallocate(inode, mode, offset, len, ret);
 	return ret;
 }
 
diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
index 2e3eb2d..1496159 100644
--- a/fs/f2fs/gc.c
+++ b/fs/f2fs/gc.c
@@ -11,7 +11,6 @@
 #include <linux/fs.h>
 #include <linux/module.h>
 #include <linux/backing-dev.h>
-#include <linux/proc_fs.h>
 #include <linux/init.h>
 #include <linux/f2fs_fs.h>
 #include <linux/kthread.h>
@@ -23,6 +22,7 @@
 #include "node.h"
 #include "segment.h"
 #include "gc.h"
+#include <trace/events/f2fs.h>
 
 static struct kmem_cache *winode_slab;
 
@@ -81,9 +81,6 @@
 		/* if return value is not zero, no victim was selected */
 		if (f2fs_gc(sbi))
 			wait_ms = GC_THREAD_NOGC_SLEEP_TIME;
-		else if (wait_ms == GC_THREAD_NOGC_SLEEP_TIME)
-			wait_ms = GC_THREAD_MAX_SLEEP_TIME;
-
 	} while (!kthread_should_stop());
 	return 0;
 }
@@ -131,7 +128,7 @@
 {
 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
 
-	if (p->alloc_mode) {
+	if (p->alloc_mode == SSR) {
 		p->gc_mode = GC_GREEDY;
 		p->dirty_segmap = dirty_i->dirty_segmap[type];
 		p->ofs_unit = 1;
@@ -160,18 +157,21 @@
 static unsigned int check_bg_victims(struct f2fs_sb_info *sbi)
 {
 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
-	unsigned int segno;
+	unsigned int hint = 0;
+	unsigned int secno;
 
 	/*
 	 * If the gc_type is FG_GC, we can select victim segments
 	 * selected by background GC before.
 	 * Those segments guarantee they have small valid blocks.
 	 */
-	segno = find_next_bit(dirty_i->victim_segmap[BG_GC],
-						TOTAL_SEGS(sbi), 0);
-	if (segno < TOTAL_SEGS(sbi)) {
-		clear_bit(segno, dirty_i->victim_segmap[BG_GC]);
-		return segno;
+next:
+	secno = find_next_bit(dirty_i->victim_secmap, TOTAL_SECS(sbi), hint++);
+	if (secno < TOTAL_SECS(sbi)) {
+		if (sec_usage_check(sbi, secno))
+			goto next;
+		clear_bit(secno, dirty_i->victim_secmap);
+		return secno * sbi->segs_per_sec;
 	}
 	return NULL_SEGNO;
 }
@@ -234,7 +234,7 @@
 {
 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
 	struct victim_sel_policy p;
-	unsigned int segno;
+	unsigned int secno;
 	int nsearched = 0;
 
 	p.alloc_mode = alloc_mode;
@@ -253,6 +253,7 @@
 
 	while (1) {
 		unsigned long cost;
+		unsigned int segno;
 
 		segno = find_next_bit(p.dirty_segmap,
 						TOTAL_SEGS(sbi), p.offset);
@@ -265,13 +266,11 @@
 			break;
 		}
 		p.offset = ((segno / p.ofs_unit) * p.ofs_unit) + p.ofs_unit;
+		secno = GET_SECNO(sbi, segno);
 
-		if (test_bit(segno, dirty_i->victim_segmap[FG_GC]))
+		if (sec_usage_check(sbi, secno))
 			continue;
-		if (gc_type == BG_GC &&
-				test_bit(segno, dirty_i->victim_segmap[BG_GC]))
-			continue;
-		if (IS_CURSEC(sbi, GET_SECNO(sbi, segno)))
+		if (gc_type == BG_GC && test_bit(secno, dirty_i->victim_secmap))
 			continue;
 
 		cost = get_gc_cost(sbi, segno, &p);
@@ -291,13 +290,18 @@
 	}
 got_it:
 	if (p.min_segno != NULL_SEGNO) {
-		*result = (p.min_segno / p.ofs_unit) * p.ofs_unit;
 		if (p.alloc_mode == LFS) {
-			int i;
-			for (i = 0; i < p.ofs_unit; i++)
-				set_bit(*result + i,
-					dirty_i->victim_segmap[gc_type]);
+			secno = GET_SECNO(sbi, p.min_segno);
+			if (gc_type == FG_GC)
+				sbi->cur_victim_sec = secno;
+			else
+				set_bit(secno, dirty_i->victim_secmap);
 		}
+		*result = (p.min_segno / p.ofs_unit) * p.ofs_unit;
+
+		trace_f2fs_get_victim(sbi->sb, type, gc_type, &p,
+				sbi->cur_victim_sec,
+				prefree_segments(sbi), free_segments(sbi));
 	}
 	mutex_unlock(&dirty_i->seglist_lock);
 
@@ -381,6 +385,7 @@
 
 next_step:
 	entry = sum;
+
 	for (off = 0; off < sbi->blocks_per_seg; off++, entry++) {
 		nid_t nid = le32_to_cpu(entry->nid);
 		struct page *node_page;
@@ -401,11 +406,18 @@
 			continue;
 
 		/* set page dirty and write it */
-		if (!PageWriteback(node_page))
+		if (gc_type == FG_GC) {
+			f2fs_submit_bio(sbi, NODE, true);
+			wait_on_page_writeback(node_page);
 			set_page_dirty(node_page);
+		} else {
+			if (!PageWriteback(node_page))
+				set_page_dirty(node_page);
+		}
 		f2fs_put_page(node_page, 1);
 		stat_inc_node_blk_count(sbi, 1);
 	}
+
 	if (initial) {
 		initial = false;
 		goto next_step;
@@ -418,6 +430,13 @@
 			.for_reclaim = 0,
 		};
 		sync_node_pages(sbi, 0, &wbc);
+
+		/*
+		 * In the case of FG_GC, it'd be better to reclaim this victim
+		 * completely.
+		 */
+		if (get_valid_blocks(sbi, segno, 1) != 0)
+			goto next_step;
 	}
 }
 
@@ -481,21 +500,19 @@
 
 static void move_data_page(struct inode *inode, struct page *page, int gc_type)
 {
-	if (page->mapping != inode->i_mapping)
-		goto out;
-
-	if (inode != page->mapping->host)
-		goto out;
-
-	if (PageWriteback(page))
-		goto out;
-
 	if (gc_type == BG_GC) {
+		if (PageWriteback(page))
+			goto out;
 		set_page_dirty(page);
 		set_cold_data(page);
 	} else {
 		struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
-		mutex_lock_op(sbi, DATA_WRITE);
+
+		if (PageWriteback(page)) {
+			f2fs_submit_bio(sbi, DATA, true);
+			wait_on_page_writeback(page);
+		}
+
 		if (clear_page_dirty_for_io(page) &&
 			S_ISDIR(inode->i_mode)) {
 			dec_page_count(sbi, F2FS_DIRTY_DENTS);
@@ -503,7 +520,6 @@
 		}
 		set_cold_data(page);
 		do_write_data_page(page);
-		mutex_unlock_op(sbi, DATA_WRITE);
 		clear_cold_data(page);
 	}
 out:
@@ -530,6 +546,7 @@
 
 next_step:
 	entry = sum;
+
 	for (off = 0; off < sbi->blocks_per_seg; off++, entry++) {
 		struct page *data_page;
 		struct inode *inode;
@@ -567,7 +584,7 @@
 				continue;
 
 			data_page = find_data_page(inode,
-					start_bidx + ofs_in_node);
+					start_bidx + ofs_in_node, false);
 			if (IS_ERR(data_page))
 				goto next_iput;
 
@@ -588,11 +605,22 @@
 next_iput:
 		iput(inode);
 	}
+
 	if (++phase < 4)
 		goto next_step;
 
-	if (gc_type == FG_GC)
+	if (gc_type == FG_GC) {
 		f2fs_submit_bio(sbi, DATA, true);
+
+		/*
+		 * In the case of FG_GC, it'd be better to reclaim this victim
+		 * completely.
+		 */
+		if (get_valid_blocks(sbi, segno, 1) != 0) {
+			phase = 2;
+			goto next_step;
+		}
+	}
 }
 
 static int __get_victim(struct f2fs_sb_info *sbi, unsigned int *victim,
@@ -611,18 +639,15 @@
 {
 	struct page *sum_page;
 	struct f2fs_summary_block *sum;
+	struct blk_plug plug;
 
 	/* read segment summary of victim */
 	sum_page = get_sum_page(sbi, segno);
 	if (IS_ERR(sum_page))
 		return;
 
-	/*
-	 * CP needs to lock sum_page. In this time, we don't need
-	 * to lock this page, because this summary page is not gone anywhere.
-	 * Also, this page is not gonna be updated before GC is done.
-	 */
-	unlock_page(sum_page);
+	blk_start_plug(&plug);
+
 	sum = page_address(sum_page);
 
 	switch (GET_SUM_TYPE((&sum->footer))) {
@@ -633,10 +658,12 @@
 		gc_data_segment(sbi, sum->entries, ilist, segno, gc_type);
 		break;
 	}
+	blk_finish_plug(&plug);
+
 	stat_inc_seg_count(sbi, GET_SUM_TYPE((&sum->footer)));
 	stat_inc_call_count(sbi->stat_info);
 
-	f2fs_put_page(sum_page, 0);
+	f2fs_put_page(sum_page, 1);
 }
 
 int f2fs_gc(struct f2fs_sb_info *sbi)
@@ -652,8 +679,10 @@
 	if (!(sbi->sb->s_flags & MS_ACTIVE))
 		goto stop;
 
-	if (gc_type == BG_GC && has_not_enough_free_secs(sbi, nfree))
+	if (gc_type == BG_GC && has_not_enough_free_secs(sbi, nfree)) {
 		gc_type = FG_GC;
+		write_checkpoint(sbi, false);
+	}
 
 	if (!__get_victim(sbi, &segno, gc_type, NO_CHECK_TYPE))
 		goto stop;
@@ -662,9 +691,11 @@
 	for (i = 0; i < sbi->segs_per_sec; i++)
 		do_garbage_collect(sbi, segno + i, &ilist, gc_type);
 
-	if (gc_type == FG_GC &&
-			get_valid_blocks(sbi, segno, sbi->segs_per_sec) == 0)
+	if (gc_type == FG_GC) {
+		sbi->cur_victim_sec = NULL_SEGNO;
 		nfree++;
+		WARN_ON(get_valid_blocks(sbi, segno, sbi->segs_per_sec));
+	}
 
 	if (has_not_enough_free_secs(sbi, nfree))
 		goto gc_more;
diff --git a/fs/f2fs/gc.h b/fs/f2fs/gc.h
index 30b2db0..2c6a6bd 100644
--- a/fs/f2fs/gc.h
+++ b/fs/f2fs/gc.h
@@ -13,9 +13,9 @@
 						 * whether IO subsystem is idle
 						 * or not
 						 */
-#define GC_THREAD_MIN_SLEEP_TIME	10000 /* milliseconds */
-#define GC_THREAD_MAX_SLEEP_TIME	30000
-#define GC_THREAD_NOGC_SLEEP_TIME	10000
+#define GC_THREAD_MIN_SLEEP_TIME	30000	/* milliseconds */
+#define GC_THREAD_MAX_SLEEP_TIME	60000
+#define GC_THREAD_NOGC_SLEEP_TIME	300000	/* wait 5 min */
 #define LIMIT_INVALID_BLOCK	40 /* percentage over total user space */
 #define LIMIT_FREE_BLOCK	40 /* percentage over invalid + free space */
 
@@ -58,6 +58,9 @@
 
 static inline long increase_sleep_time(long wait)
 {
+	if (wait == GC_THREAD_NOGC_SLEEP_TIME)
+		return wait;
+
 	wait += GC_THREAD_MIN_SLEEP_TIME;
 	if (wait > GC_THREAD_MAX_SLEEP_TIME)
 		wait = GC_THREAD_MAX_SLEEP_TIME;
@@ -66,6 +69,9 @@
 
 static inline long decrease_sleep_time(long wait)
 {
+	if (wait == GC_THREAD_NOGC_SLEEP_TIME)
+		wait = GC_THREAD_MAX_SLEEP_TIME;
+
 	wait -= GC_THREAD_MIN_SLEEP_TIME;
 	if (wait <= GC_THREAD_MIN_SLEEP_TIME)
 		wait = GC_THREAD_MIN_SLEEP_TIME;
diff --git a/fs/f2fs/inode.c b/fs/f2fs/inode.c
index ddae412..91ac7f9 100644
--- a/fs/f2fs/inode.c
+++ b/fs/f2fs/inode.c
@@ -16,6 +16,8 @@
 #include "f2fs.h"
 #include "node.h"
 
+#include <trace/events/f2fs.h>
+
 void f2fs_set_inode_flags(struct inode *inode)
 {
 	unsigned int flags = F2FS_I(inode)->i_flags;
@@ -44,7 +46,11 @@
 	struct f2fs_inode *ri;
 
 	/* Check if ino is within scope */
-	check_nid_range(sbi, inode->i_ino);
+	if (check_nid_range(sbi, inode->i_ino)) {
+		f2fs_msg(inode->i_sb, KERN_ERR, "bad inode number: %lu",
+			 (unsigned long) inode->i_ino);
+		return -EINVAL;
+	}
 
 	node_page = get_node_page(sbi, inode->i_ino);
 	if (IS_ERR(node_page))
@@ -76,7 +82,6 @@
 	fi->i_xattr_nid = le32_to_cpu(ri->i_xattr_nid);
 	fi->i_flags = le32_to_cpu(ri->i_flags);
 	fi->flags = 0;
-	fi->data_version = le64_to_cpu(F2FS_CKPT(sbi)->checkpoint_ver) - 1;
 	fi->i_advise = ri->i_advise;
 	fi->i_pino = le32_to_cpu(ri->i_pino);
 	get_extent_info(&fi->ext, ri->i_ext);
@@ -88,13 +93,16 @@
 {
 	struct f2fs_sb_info *sbi = F2FS_SB(sb);
 	struct inode *inode;
-	int ret;
+	int ret = 0;
 
 	inode = iget_locked(sb, ino);
 	if (!inode)
 		return ERR_PTR(-ENOMEM);
-	if (!(inode->i_state & I_NEW))
+
+	if (!(inode->i_state & I_NEW)) {
+		trace_f2fs_iget(inode);
 		return inode;
+	}
 	if (ino == F2FS_NODE_INO(sbi) || ino == F2FS_META_INO(sbi))
 		goto make_now;
 
@@ -136,11 +144,12 @@
 		goto bad_inode;
 	}
 	unlock_new_inode(inode);
-
+	trace_f2fs_iget(inode);
 	return inode;
 
 bad_inode:
 	iget_failed(inode);
+	trace_f2fs_iget_exit(inode, ret);
 	return ERR_PTR(ret);
 }
 
@@ -192,11 +201,24 @@
 	set_page_dirty(node_page);
 }
 
-int f2fs_write_inode(struct inode *inode, struct writeback_control *wbc)
+int update_inode_page(struct inode *inode)
 {
 	struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
 	struct page *node_page;
-	bool need_lock = false;
+
+	node_page = get_node_page(sbi, inode->i_ino);
+	if (IS_ERR(node_page))
+		return PTR_ERR(node_page);
+
+	update_inode(inode, node_page);
+	f2fs_put_page(node_page, 1);
+	return 0;
+}
+
+int f2fs_write_inode(struct inode *inode, struct writeback_control *wbc)
+{
+	struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
+	int ret, ilock;
 
 	if (inode->i_ino == F2FS_NODE_INO(sbi) ||
 			inode->i_ino == F2FS_META_INO(sbi))
@@ -205,25 +227,14 @@
 	if (wbc)
 		f2fs_balance_fs(sbi);
 
-	node_page = get_node_page(sbi, inode->i_ino);
-	if (IS_ERR(node_page))
-		return PTR_ERR(node_page);
-
-	if (!PageDirty(node_page)) {
-		need_lock = true;
-		f2fs_put_page(node_page, 1);
-		mutex_lock(&sbi->write_inode);
-		node_page = get_node_page(sbi, inode->i_ino);
-		if (IS_ERR(node_page)) {
-			mutex_unlock(&sbi->write_inode);
-			return PTR_ERR(node_page);
-		}
-	}
-	update_inode(inode, node_page);
-	f2fs_put_page(node_page, 1);
-	if (need_lock)
-		mutex_unlock(&sbi->write_inode);
-	return 0;
+	/*
+	 * We need to lock here to prevent from producing dirty node pages
+	 * during the urgent cleaning time when runing out of free sections.
+	 */
+	ilock = mutex_lock_op(sbi);
+	ret = update_inode_page(inode);
+	mutex_unlock_op(sbi, ilock);
+	return ret;
 }
 
 /*
@@ -232,7 +243,9 @@
 void f2fs_evict_inode(struct inode *inode)
 {
 	struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
+	int ilock;
 
+	trace_f2fs_evict_inode(inode);
 	truncate_inode_pages(&inode->i_data, 0);
 
 	if (inode->i_ino == F2FS_NODE_INO(sbi) ||
@@ -252,7 +265,10 @@
 	if (F2FS_HAS_BLOCKS(inode))
 		f2fs_truncate(inode);
 
+	ilock = mutex_lock_op(sbi);
 	remove_inode_page(inode);
+	mutex_unlock_op(sbi, ilock);
+
 	sb_end_intwrite(inode->i_sb);
 no_delete:
 	clear_inode(inode);
diff --git a/fs/f2fs/namei.c b/fs/f2fs/namei.c
index 1a49b88..47abc97 100644
--- a/fs/f2fs/namei.c
+++ b/fs/f2fs/namei.c
@@ -15,8 +15,10 @@
 #include <linux/ctype.h>
 
 #include "f2fs.h"
+#include "node.h"
 #include "xattr.h"
 #include "acl.h"
+#include <trace/events/f2fs.h>
 
 static struct inode *f2fs_new_inode(struct inode *dir, umode_t mode)
 {
@@ -25,19 +27,19 @@
 	nid_t ino;
 	struct inode *inode;
 	bool nid_free = false;
-	int err;
+	int err, ilock;
 
 	inode = new_inode(sb);
 	if (!inode)
 		return ERR_PTR(-ENOMEM);
 
-	mutex_lock_op(sbi, NODE_NEW);
+	ilock = mutex_lock_op(sbi);
 	if (!alloc_nid(sbi, &ino)) {
-		mutex_unlock_op(sbi, NODE_NEW);
+		mutex_unlock_op(sbi, ilock);
 		err = -ENOSPC;
 		goto fail;
 	}
-	mutex_unlock_op(sbi, NODE_NEW);
+	mutex_unlock_op(sbi, ilock);
 
 	inode->i_uid = current_fsuid();
 
@@ -61,7 +63,7 @@
 		nid_free = true;
 		goto out;
 	}
-
+	trace_f2fs_new_inode(inode, 0);
 	mark_inode_dirty(inode);
 	return inode;
 
@@ -69,6 +71,8 @@
 	clear_nlink(inode);
 	unlock_new_inode(inode);
 fail:
+	trace_f2fs_new_inode(inode, err);
+	make_bad_inode(inode);
 	iput(inode);
 	if (nid_free)
 		alloc_nid_failed(sbi, ino);
@@ -82,7 +86,7 @@
 	int ret;
 
 	if (sublen > slen)
-		return 1;
+		return 0;
 
 	ret = memcmp(s + slen - sublen, sub, sublen);
 	if (ret) {	/* compare upper case */
@@ -90,16 +94,16 @@
 		char upper_sub[8];
 		for (i = 0; i < sublen && i < sizeof(upper_sub); i++)
 			upper_sub[i] = toupper(sub[i]);
-		return memcmp(s + slen - sublen, upper_sub, sublen);
+		return !memcmp(s + slen - sublen, upper_sub, sublen);
 	}
 
-	return ret;
+	return !ret;
 }
 
 /*
  * Set multimedia files as cold files for hot/cold data separation
  */
-static inline void set_cold_file(struct f2fs_sb_info *sbi, struct inode *inode,
+static inline void set_cold_files(struct f2fs_sb_info *sbi, struct inode *inode,
 		const unsigned char *name)
 {
 	int i;
@@ -107,8 +111,8 @@
 
 	int count = le32_to_cpu(sbi->raw_super->extension_count);
 	for (i = 0; i < count; i++) {
-		if (!is_multimedia_file(name, extlist[i])) {
-			F2FS_I(inode)->i_advise |= FADVISE_COLD_BIT;
+		if (is_multimedia_file(name, extlist[i])) {
+			set_cold_file(inode);
 			break;
 		}
 	}
@@ -121,7 +125,7 @@
 	struct f2fs_sb_info *sbi = F2FS_SB(sb);
 	struct inode *inode;
 	nid_t ino = 0;
-	int err;
+	int err, ilock;
 
 	f2fs_balance_fs(sbi);
 
@@ -130,14 +134,16 @@
 		return PTR_ERR(inode);
 
 	if (!test_opt(sbi, DISABLE_EXT_IDENTIFY))
-		set_cold_file(sbi, inode, dentry->d_name.name);
+		set_cold_files(sbi, inode, dentry->d_name.name);
 
 	inode->i_op = &f2fs_file_inode_operations;
 	inode->i_fop = &f2fs_file_operations;
 	inode->i_mapping->a_ops = &f2fs_dblock_aops;
 	ino = inode->i_ino;
 
+	ilock = mutex_lock_op(sbi);
 	err = f2fs_add_link(dentry, inode);
+	mutex_unlock_op(sbi, ilock);
 	if (err)
 		goto out;
 
@@ -150,6 +156,7 @@
 out:
 	clear_nlink(inode);
 	unlock_new_inode(inode);
+	make_bad_inode(inode);
 	iput(inode);
 	alloc_nid_failed(sbi, ino);
 	return err;
@@ -161,7 +168,7 @@
 	struct inode *inode = old_dentry->d_inode;
 	struct super_block *sb = dir->i_sb;
 	struct f2fs_sb_info *sbi = F2FS_SB(sb);
-	int err;
+	int err, ilock;
 
 	f2fs_balance_fs(sbi);
 
@@ -169,14 +176,23 @@
 	atomic_inc(&inode->i_count);
 
 	set_inode_flag(F2FS_I(inode), FI_INC_LINK);
+	ilock = mutex_lock_op(sbi);
 	err = f2fs_add_link(dentry, inode);
+	mutex_unlock_op(sbi, ilock);
 	if (err)
 		goto out;
 
+	/*
+	 * This file should be checkpointed during fsync.
+	 * We lost i_pino from now on.
+	 */
+	set_cp_file(inode);
+
 	d_instantiate(dentry, inode);
 	return 0;
 out:
 	clear_inode_flag(F2FS_I(inode), FI_INC_LINK);
+	make_bad_inode(inode);
 	iput(inode);
 	return err;
 }
@@ -197,7 +213,7 @@
 	struct f2fs_dir_entry *de;
 	struct page *page;
 
-	if (dentry->d_name.len > F2FS_MAX_NAME_LEN)
+	if (dentry->d_name.len > F2FS_NAME_LEN)
 		return ERR_PTR(-ENAMETOOLONG);
 
 	de = f2fs_find_entry(dir, &dentry->d_name, &page);
@@ -222,7 +238,9 @@
 	struct f2fs_dir_entry *de;
 	struct page *page;
 	int err = -ENOENT;
+	int ilock;
 
+	trace_f2fs_unlink_enter(dir, dentry);
 	f2fs_balance_fs(sbi);
 
 	de = f2fs_find_entry(dir, &dentry->d_name, &page);
@@ -236,11 +254,14 @@
 		goto fail;
 	}
 
+	ilock = mutex_lock_op(sbi);
 	f2fs_delete_entry(de, page, inode);
+	mutex_unlock_op(sbi, ilock);
 
 	/* In order to evict this inode,  we set it dirty */
 	mark_inode_dirty(inode);
 fail:
+	trace_f2fs_unlink_exit(inode, err);
 	return err;
 }
 
@@ -251,7 +272,7 @@
 	struct f2fs_sb_info *sbi = F2FS_SB(sb);
 	struct inode *inode;
 	size_t symlen = strlen(symname) + 1;
-	int err;
+	int err, ilock;
 
 	f2fs_balance_fs(sbi);
 
@@ -262,7 +283,9 @@
 	inode->i_op = &f2fs_symlink_inode_operations;
 	inode->i_mapping->a_ops = &f2fs_dblock_aops;
 
+	ilock = mutex_lock_op(sbi);
 	err = f2fs_add_link(dentry, inode);
+	mutex_unlock_op(sbi, ilock);
 	if (err)
 		goto out;
 
@@ -275,6 +298,7 @@
 out:
 	clear_nlink(inode);
 	unlock_new_inode(inode);
+	make_bad_inode(inode);
 	iput(inode);
 	alloc_nid_failed(sbi, inode->i_ino);
 	return err;
@@ -284,7 +308,7 @@
 {
 	struct f2fs_sb_info *sbi = F2FS_SB(dir->i_sb);
 	struct inode *inode;
-	int err;
+	int err, ilock;
 
 	f2fs_balance_fs(sbi);
 
@@ -298,7 +322,9 @@
 	mapping_set_gfp_mask(inode->i_mapping, GFP_F2FS_ZERO);
 
 	set_inode_flag(F2FS_I(inode), FI_INC_LINK);
+	ilock = mutex_lock_op(sbi);
 	err = f2fs_add_link(dentry, inode);
+	mutex_unlock_op(sbi, ilock);
 	if (err)
 		goto out_fail;
 
@@ -313,6 +339,7 @@
 	clear_inode_flag(F2FS_I(inode), FI_INC_LINK);
 	clear_nlink(inode);
 	unlock_new_inode(inode);
+	make_bad_inode(inode);
 	iput(inode);
 	alloc_nid_failed(sbi, inode->i_ino);
 	return err;
@@ -333,6 +360,7 @@
 	struct f2fs_sb_info *sbi = F2FS_SB(sb);
 	struct inode *inode;
 	int err = 0;
+	int ilock;
 
 	if (!new_valid_dev(rdev))
 		return -EINVAL;
@@ -346,7 +374,9 @@
 	init_special_inode(inode, inode->i_mode, rdev);
 	inode->i_op = &f2fs_special_inode_operations;
 
+	ilock = mutex_lock_op(sbi);
 	err = f2fs_add_link(dentry, inode);
+	mutex_unlock_op(sbi, ilock);
 	if (err)
 		goto out;
 
@@ -357,6 +387,7 @@
 out:
 	clear_nlink(inode);
 	unlock_new_inode(inode);
+	make_bad_inode(inode);
 	iput(inode);
 	alloc_nid_failed(sbi, inode->i_ino);
 	return err;
@@ -374,7 +405,7 @@
 	struct f2fs_dir_entry *old_dir_entry = NULL;
 	struct f2fs_dir_entry *old_entry;
 	struct f2fs_dir_entry *new_entry;
-	int err = -ENOENT;
+	int err = -ENOENT, ilock = -1;
 
 	f2fs_balance_fs(sbi);
 
@@ -389,7 +420,7 @@
 			goto out_old;
 	}
 
-	mutex_lock_op(sbi, RENAME);
+	ilock = mutex_lock_op(sbi);
 
 	if (new_inode) {
 		struct page *new_page;
@@ -412,7 +443,7 @@
 		drop_nlink(new_inode);
 		if (!new_inode->i_nlink)
 			add_orphan_inode(sbi, new_inode->i_ino);
-		f2fs_write_inode(new_inode, NULL);
+		update_inode_page(new_inode);
 	} else {
 		err = f2fs_add_link(new_dentry, old_inode);
 		if (err)
@@ -420,12 +451,11 @@
 
 		if (old_dir_entry) {
 			inc_nlink(new_dir);
-			f2fs_write_inode(new_dir, NULL);
+			update_inode_page(new_dir);
 		}
 	}
 
 	old_inode->i_ctime = CURRENT_TIME;
-	set_inode_flag(F2FS_I(old_inode), FI_NEED_CP);
 	mark_inode_dirty(old_inode);
 
 	f2fs_delete_entry(old_entry, old_page, NULL);
@@ -439,10 +469,10 @@
 			f2fs_put_page(old_dir_page, 0);
 		}
 		drop_nlink(old_dir);
-		f2fs_write_inode(old_dir, NULL);
+		update_inode_page(old_dir);
 	}
 
-	mutex_unlock_op(sbi, RENAME);
+	mutex_unlock_op(sbi, ilock);
 	return 0;
 
 out_dir:
@@ -450,7 +480,7 @@
 		kunmap(old_dir_page);
 		f2fs_put_page(old_dir_page, 0);
 	}
-	mutex_unlock_op(sbi, RENAME);
+	mutex_unlock_op(sbi, ilock);
 out_old:
 	kunmap(old_page);
 	f2fs_put_page(old_page, 0);
diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
index e275218..3df43b4 100644
--- a/fs/f2fs/node.c
+++ b/fs/f2fs/node.c
@@ -19,6 +19,7 @@
 #include "f2fs.h"
 #include "node.h"
 #include "segment.h"
+#include <trace/events/f2fs.h>
 
 static struct kmem_cache *nat_entry_slab;
 static struct kmem_cache *free_nid_slab;
@@ -88,10 +89,13 @@
 {
 	struct address_space *mapping = sbi->meta_inode->i_mapping;
 	struct f2fs_nm_info *nm_i = NM_I(sbi);
+	struct blk_plug plug;
 	struct page *page;
 	pgoff_t index;
 	int i;
 
+	blk_start_plug(&plug);
+
 	for (i = 0; i < FREE_NID_PAGES; i++, nid += NAT_ENTRY_PER_BLOCK) {
 		if (nid >= nm_i->max_nid)
 			nid = 0;
@@ -100,12 +104,16 @@
 		page = grab_cache_page(mapping, index);
 		if (!page)
 			continue;
-		if (f2fs_readpage(sbi, page, index, READ)) {
+		if (PageUptodate(page)) {
 			f2fs_put_page(page, 1);
 			continue;
 		}
+		if (f2fs_readpage(sbi, page, index, READ))
+			continue;
+
 		f2fs_put_page(page, 0);
 	}
+	blk_finish_plug(&plug);
 }
 
 static struct nat_entry *__lookup_nat_cache(struct f2fs_nm_info *nm_i, nid_t n)
@@ -236,7 +244,7 @@
 {
 	struct f2fs_nm_info *nm_i = NM_I(sbi);
 
-	if (nm_i->nat_cnt < 2 * NM_WOUT_THRESHOLD)
+	if (nm_i->nat_cnt <= NM_WOUT_THRESHOLD)
 		return 0;
 
 	write_lock(&nm_i->nat_tree_lock);
@@ -320,15 +328,14 @@
 	noffset[0] = 0;
 
 	if (block < direct_index) {
-		offset[n++] = block;
-		level = 0;
+		offset[n] = block;
 		goto got;
 	}
 	block -= direct_index;
 	if (block < direct_blks) {
 		offset[n++] = NODE_DIR1_BLOCK;
 		noffset[n] = 1;
-		offset[n++] = block;
+		offset[n] = block;
 		level = 1;
 		goto got;
 	}
@@ -336,7 +343,7 @@
 	if (block < direct_blks) {
 		offset[n++] = NODE_DIR2_BLOCK;
 		noffset[n] = 2;
-		offset[n++] = block;
+		offset[n] = block;
 		level = 1;
 		goto got;
 	}
@@ -346,7 +353,7 @@
 		noffset[n] = 3;
 		offset[n++] = block / direct_blks;
 		noffset[n] = 4 + offset[n - 1];
-		offset[n++] = block % direct_blks;
+		offset[n] = block % direct_blks;
 		level = 2;
 		goto got;
 	}
@@ -356,7 +363,7 @@
 		noffset[n] = 4 + dptrs_per_blk;
 		offset[n++] = block / direct_blks;
 		noffset[n] = 5 + dptrs_per_blk + offset[n - 1];
-		offset[n++] = block % direct_blks;
+		offset[n] = block % direct_blks;
 		level = 2;
 		goto got;
 	}
@@ -371,7 +378,7 @@
 		noffset[n] = 7 + (dptrs_per_blk * 2) +
 			      offset[n - 2] * (dptrs_per_blk + 1) +
 			      offset[n - 1];
-		offset[n++] = block % direct_blks;
+		offset[n] = block % direct_blks;
 		level = 3;
 		goto got;
 	} else {
@@ -383,8 +390,11 @@
 
 /*
  * Caller should call f2fs_put_dnode(dn).
+ * Also, it should grab and release a mutex by calling mutex_lock_op() and
+ * mutex_unlock_op() only if ro is not set RDONLY_NODE.
+ * In the case of RDONLY_NODE, we don't need to care about mutex.
  */
-int get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int ro)
+int get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode)
 {
 	struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb);
 	struct page *npage[4];
@@ -403,7 +413,8 @@
 		return PTR_ERR(npage[0]);
 
 	parent = npage[0];
-	nids[1] = get_nid(parent, offset[0], true);
+	if (level != 0)
+		nids[1] = get_nid(parent, offset[0], true);
 	dn->inode_page = npage[0];
 	dn->inode_page_locked = true;
 
@@ -411,12 +422,9 @@
 	for (i = 1; i <= level; i++) {
 		bool done = false;
 
-		if (!nids[i] && !ro) {
-			mutex_lock_op(sbi, NODE_NEW);
-
+		if (!nids[i] && mode == ALLOC_NODE) {
 			/* alloc new node */
 			if (!alloc_nid(sbi, &(nids[i]))) {
-				mutex_unlock_op(sbi, NODE_NEW);
 				err = -ENOSPC;
 				goto release_pages;
 			}
@@ -425,16 +433,14 @@
 			npage[i] = new_node_page(dn, noffset[i]);
 			if (IS_ERR(npage[i])) {
 				alloc_nid_failed(sbi, nids[i]);
-				mutex_unlock_op(sbi, NODE_NEW);
 				err = PTR_ERR(npage[i]);
 				goto release_pages;
 			}
 
 			set_nid(parent, offset[i - 1], nids[i], i == 1);
 			alloc_nid_done(sbi, nids[i]);
-			mutex_unlock_op(sbi, NODE_NEW);
 			done = true;
-		} else if (ro && i == level && level > 1) {
+		} else if (mode == LOOKUP_NODE_RA && i == level && level > 1) {
 			npage[i] = get_node_page_ra(parent, offset[i - 1]);
 			if (IS_ERR(npage[i])) {
 				err = PTR_ERR(npage[i]);
@@ -507,6 +513,7 @@
 
 	f2fs_put_page(dn->node_page, 1);
 	dn->node_page = NULL;
+	trace_f2fs_truncate_node(dn->inode, dn->nid, ni.blk_addr);
 }
 
 static int truncate_dnode(struct dnode_of_data *dn)
@@ -547,9 +554,13 @@
 	if (dn->nid == 0)
 		return NIDS_PER_BLOCK + 1;
 
+	trace_f2fs_truncate_nodes_enter(dn->inode, dn->nid, dn->data_blkaddr);
+
 	page = get_node_page(sbi, dn->nid);
-	if (IS_ERR(page))
+	if (IS_ERR(page)) {
+		trace_f2fs_truncate_nodes_exit(dn->inode, PTR_ERR(page));
 		return PTR_ERR(page);
+	}
 
 	rn = (struct f2fs_node *)page_address(page);
 	if (depth < 3) {
@@ -591,10 +602,12 @@
 	} else {
 		f2fs_put_page(page, 1);
 	}
+	trace_f2fs_truncate_nodes_exit(dn->inode, freed);
 	return freed;
 
 out_err:
 	f2fs_put_page(page, 1);
+	trace_f2fs_truncate_nodes_exit(dn->inode, ret);
 	return ret;
 }
 
@@ -649,6 +662,9 @@
 fail:
 	for (i = depth - 3; i >= 0; i--)
 		f2fs_put_page(pages[i], 1);
+
+	trace_f2fs_truncate_partial_nodes(dn->inode, nid, depth, err);
+
 	return err;
 }
 
@@ -658,6 +674,7 @@
 int truncate_inode_blocks(struct inode *inode, pgoff_t from)
 {
 	struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
+	struct address_space *node_mapping = sbi->node_inode->i_mapping;
 	int err = 0, cont = 1;
 	int level, offset[4], noffset[4];
 	unsigned int nofs = 0;
@@ -665,11 +682,15 @@
 	struct dnode_of_data dn;
 	struct page *page;
 
-	level = get_node_path(from, offset, noffset);
+	trace_f2fs_truncate_inode_blocks_enter(inode, from);
 
+	level = get_node_path(from, offset, noffset);
+restart:
 	page = get_node_page(sbi, inode->i_ino);
-	if (IS_ERR(page))
+	if (IS_ERR(page)) {
+		trace_f2fs_truncate_inode_blocks_exit(inode, PTR_ERR(page));
 		return PTR_ERR(page);
+	}
 
 	set_new_dnode(&dn, inode, page, NULL, 0);
 	unlock_page(page);
@@ -728,6 +749,10 @@
 		if (offset[1] == 0 &&
 				rn->i.i_nid[offset[0] - NODE_DIR1_BLOCK]) {
 			lock_page(page);
+			if (page->mapping != node_mapping) {
+				f2fs_put_page(page, 1);
+				goto restart;
+			}
 			wait_on_page_writeback(page);
 			rn->i.i_nid[offset[0] - NODE_DIR1_BLOCK] = 0;
 			set_page_dirty(page);
@@ -739,9 +764,14 @@
 	}
 fail:
 	f2fs_put_page(page, 0);
+	trace_f2fs_truncate_inode_blocks_exit(inode, err);
 	return err > 0 ? 0 : err;
 }
 
+/*
+ * Caller should grab and release a mutex by calling mutex_lock_op() and
+ * mutex_unlock_op().
+ */
 int remove_inode_page(struct inode *inode)
 {
 	struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
@@ -749,21 +779,16 @@
 	nid_t ino = inode->i_ino;
 	struct dnode_of_data dn;
 
-	mutex_lock_op(sbi, NODE_TRUNC);
 	page = get_node_page(sbi, ino);
-	if (IS_ERR(page)) {
-		mutex_unlock_op(sbi, NODE_TRUNC);
+	if (IS_ERR(page))
 		return PTR_ERR(page);
-	}
 
 	if (F2FS_I(inode)->i_xattr_nid) {
 		nid_t nid = F2FS_I(inode)->i_xattr_nid;
 		struct page *npage = get_node_page(sbi, nid);
 
-		if (IS_ERR(npage)) {
-			mutex_unlock_op(sbi, NODE_TRUNC);
+		if (IS_ERR(npage))
 			return PTR_ERR(npage);
-		}
 
 		F2FS_I(inode)->i_xattr_nid = 0;
 		set_new_dnode(&dn, inode, page, npage, nid);
@@ -775,23 +800,18 @@
 	BUG_ON(inode->i_blocks != 0 && inode->i_blocks != 1);
 	set_new_dnode(&dn, inode, page, page, ino);
 	truncate_node(&dn);
-
-	mutex_unlock_op(sbi, NODE_TRUNC);
 	return 0;
 }
 
 int new_inode_page(struct inode *inode, const struct qstr *name)
 {
-	struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
 	struct page *page;
 	struct dnode_of_data dn;
 
 	/* allocate inode page for new inode */
 	set_new_dnode(&dn, inode, NULL, NULL, inode->i_ino);
-	mutex_lock_op(sbi, NODE_NEW);
 	page = new_node_page(&dn, 0);
 	init_dent_inode(name, page);
-	mutex_unlock_op(sbi, NODE_NEW);
 	if (IS_ERR(page))
 		return PTR_ERR(page);
 	f2fs_put_page(page, 1);
@@ -844,6 +864,12 @@
 	return ERR_PTR(err);
 }
 
+/*
+ * Caller should do after getting the following values.
+ * 0: f2fs_put_page(page, 0)
+ * LOCKED_PAGE: f2fs_put_page(page, 1)
+ * error: nothing
+ */
 static int read_node_page(struct page *page, int type)
 {
 	struct f2fs_sb_info *sbi = F2FS_SB(page->mapping->host->i_sb);
@@ -851,8 +877,14 @@
 
 	get_node_info(sbi, page->index, &ni);
 
-	if (ni.blk_addr == NULL_ADDR)
+	if (ni.blk_addr == NULL_ADDR) {
+		f2fs_put_page(page, 1);
 		return -ENOENT;
+	}
+
+	if (PageUptodate(page))
+		return LOCKED_PAGE;
+
 	return f2fs_readpage(sbi, page, ni.blk_addr, type);
 }
 
@@ -863,40 +895,53 @@
 {
 	struct address_space *mapping = sbi->node_inode->i_mapping;
 	struct page *apage;
+	int err;
 
 	apage = find_get_page(mapping, nid);
-	if (apage && PageUptodate(apage))
-		goto release_out;
+	if (apage && PageUptodate(apage)) {
+		f2fs_put_page(apage, 0);
+		return;
+	}
 	f2fs_put_page(apage, 0);
 
 	apage = grab_cache_page(mapping, nid);
 	if (!apage)
 		return;
 
-	if (read_node_page(apage, READA))
-		unlock_page(apage);
-
-release_out:
-	f2fs_put_page(apage, 0);
+	err = read_node_page(apage, READA);
+	if (err == 0)
+		f2fs_put_page(apage, 0);
+	else if (err == LOCKED_PAGE)
+		f2fs_put_page(apage, 1);
 	return;
 }
 
 struct page *get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid)
 {
-	int err;
-	struct page *page;
 	struct address_space *mapping = sbi->node_inode->i_mapping;
-
+	struct page *page;
+	int err;
+repeat:
 	page = grab_cache_page(mapping, nid);
 	if (!page)
 		return ERR_PTR(-ENOMEM);
 
 	err = read_node_page(page, READ_SYNC);
-	if (err) {
-		f2fs_put_page(page, 1);
+	if (err < 0)
 		return ERR_PTR(err);
-	}
+	else if (err == LOCKED_PAGE)
+		goto got_it;
 
+	lock_page(page);
+	if (!PageUptodate(page)) {
+		f2fs_put_page(page, 1);
+		return ERR_PTR(-EIO);
+	}
+	if (page->mapping != mapping) {
+		f2fs_put_page(page, 1);
+		goto repeat;
+	}
+got_it:
 	BUG_ON(nid != nid_of_node(page));
 	mark_page_accessed(page);
 	return page;
@@ -910,31 +955,27 @@
 {
 	struct f2fs_sb_info *sbi = F2FS_SB(parent->mapping->host->i_sb);
 	struct address_space *mapping = sbi->node_inode->i_mapping;
-	int i, end;
-	int err = 0;
-	nid_t nid;
+	struct blk_plug plug;
 	struct page *page;
+	int err, i, end;
+	nid_t nid;
 
 	/* First, try getting the desired direct node. */
 	nid = get_nid(parent, start, false);
 	if (!nid)
 		return ERR_PTR(-ENOENT);
-
-	page = find_get_page(mapping, nid);
-	if (page && PageUptodate(page))
-		goto page_hit;
-	f2fs_put_page(page, 0);
-
 repeat:
 	page = grab_cache_page(mapping, nid);
 	if (!page)
 		return ERR_PTR(-ENOMEM);
 
-	err = read_node_page(page, READA);
-	if (err) {
-		f2fs_put_page(page, 1);
+	err = read_node_page(page, READ_SYNC);
+	if (err < 0)
 		return ERR_PTR(err);
-	}
+	else if (err == LOCKED_PAGE)
+		goto page_hit;
+
+	blk_start_plug(&plug);
 
 	/* Then, try readahead for siblings of the desired node */
 	end = start + MAX_RA_NODE;
@@ -946,18 +987,19 @@
 		ra_node_page(sbi, nid);
 	}
 
-page_hit:
-	lock_page(page);
-	if (PageError(page)) {
-		f2fs_put_page(page, 1);
-		return ERR_PTR(-EIO);
-	}
+	blk_finish_plug(&plug);
 
-	/* Has the page been truncated? */
+	lock_page(page);
 	if (page->mapping != mapping) {
 		f2fs_put_page(page, 1);
 		goto repeat;
 	}
+page_hit:
+	if (!PageUptodate(page)) {
+		f2fs_put_page(page, 1);
+		return ERR_PTR(-EIO);
+	}
+	mark_page_accessed(page);
 	return page;
 }
 
@@ -972,7 +1014,7 @@
 		if (!dn->inode_page_locked)
 			unlock_page(dn->inode_page);
 	} else {
-		f2fs_write_inode(dn->inode, NULL);
+		update_inode_page(dn->inode);
 	}
 }
 
@@ -1087,17 +1129,8 @@
 	block_t new_addr;
 	struct node_info ni;
 
-	if (wbc->for_reclaim) {
-		dec_page_count(sbi, F2FS_DIRTY_NODES);
-		wbc->pages_skipped++;
-		set_page_dirty(page);
-		return AOP_WRITEPAGE_ACTIVATE;
-	}
-
 	wait_on_page_writeback(page);
 
-	mutex_lock_op(sbi, NODE_WRITE);
-
 	/* get old block addr of this node page */
 	nid = nid_of_node(page);
 	BUG_ON(page->index != nid);
@@ -1105,17 +1138,25 @@
 	get_node_info(sbi, nid, &ni);
 
 	/* This page is already truncated */
-	if (ni.blk_addr == NULL_ADDR)
+	if (ni.blk_addr == NULL_ADDR) {
+		dec_page_count(sbi, F2FS_DIRTY_NODES);
+		unlock_page(page);
 		return 0;
+	}
 
+	if (wbc->for_reclaim) {
+		dec_page_count(sbi, F2FS_DIRTY_NODES);
+		wbc->pages_skipped++;
+		set_page_dirty(page);
+		return AOP_WRITEPAGE_ACTIVATE;
+	}
+
+	mutex_lock(&sbi->node_write);
 	set_page_writeback(page);
-
-	/* insert node offset */
 	write_node_page(sbi, page, nid, ni.blk_addr, &new_addr);
 	set_node_addr(sbi, &ni, new_addr);
 	dec_page_count(sbi, F2FS_DIRTY_NODES);
-
-	mutex_unlock_op(sbi, NODE_WRITE);
+	mutex_unlock(&sbi->node_write);
 	unlock_page(page);
 	return 0;
 }
@@ -1130,12 +1171,11 @@
 			    struct writeback_control *wbc)
 {
 	struct f2fs_sb_info *sbi = F2FS_SB(mapping->host->i_sb);
-	struct block_device *bdev = sbi->sb->s_bdev;
 	long nr_to_write = wbc->nr_to_write;
 
 	/* First check balancing cached NAT entries */
 	if (try_to_free_nats(sbi, NAT_ENTRY_PER_BLOCK)) {
-		write_checkpoint(sbi, false);
+		f2fs_sync_fs(sbi->sb, true);
 		return 0;
 	}
 
@@ -1144,10 +1184,9 @@
 		return 0;
 
 	/* if mounting is failed, skip writing node pages */
-	wbc->nr_to_write = bio_get_nr_vecs(bdev);
+	wbc->nr_to_write = max_hw_blocks(sbi);
 	sync_node_pages(sbi, 0, wbc);
-	wbc->nr_to_write = nr_to_write -
-		(bio_get_nr_vecs(bdev) - wbc->nr_to_write);
+	wbc->nr_to_write = nr_to_write - (max_hw_blocks(sbi) - wbc->nr_to_write);
 	return 0;
 }
 
@@ -1178,7 +1217,7 @@
 static int f2fs_release_node_page(struct page *page, gfp_t wait)
 {
 	ClearPagePrivate(page);
-	return 0;
+	return 1;
 }
 
 /*
@@ -1195,14 +1234,13 @@
 static struct free_nid *__lookup_free_nid_list(nid_t n, struct list_head *head)
 {
 	struct list_head *this;
-	struct free_nid *i = NULL;
+	struct free_nid *i;
 	list_for_each(this, head) {
 		i = list_entry(this, struct free_nid, list);
 		if (i->nid == n)
-			break;
-		i = NULL;
+			return i;
 	}
-	return i;
+	return NULL;
 }
 
 static void __del_from_free_nid_list(struct free_nid *i)
@@ -1211,11 +1249,29 @@
 	kmem_cache_free(free_nid_slab, i);
 }
 
-static int add_free_nid(struct f2fs_nm_info *nm_i, nid_t nid)
+static int add_free_nid(struct f2fs_nm_info *nm_i, nid_t nid, bool build)
 {
 	struct free_nid *i;
+	struct nat_entry *ne;
+	bool allocated = false;
 
 	if (nm_i->fcnt > 2 * MAX_FREE_NIDS)
+		return -1;
+
+	/* 0 nid should not be used */
+	if (nid == 0)
+		return 0;
+
+	if (!build)
+		goto retry;
+
+	/* do not add allocated nids */
+	read_lock(&nm_i->nat_tree_lock);
+	ne = __lookup_nat_cache(nm_i, nid);
+	if (ne && nat_get_blkaddr(ne) != NULL_ADDR)
+		allocated = true;
+	read_unlock(&nm_i->nat_tree_lock);
+	if (allocated)
 		return 0;
 retry:
 	i = kmem_cache_alloc(free_nid_slab, GFP_NOFS);
@@ -1250,63 +1306,59 @@
 	spin_unlock(&nm_i->free_nid_list_lock);
 }
 
-static int scan_nat_page(struct f2fs_nm_info *nm_i,
+static void scan_nat_page(struct f2fs_nm_info *nm_i,
 			struct page *nat_page, nid_t start_nid)
 {
 	struct f2fs_nat_block *nat_blk = page_address(nat_page);
 	block_t blk_addr;
-	int fcnt = 0;
 	int i;
 
-	/* 0 nid should not be used */
-	if (start_nid == 0)
-		++start_nid;
-
 	i = start_nid % NAT_ENTRY_PER_BLOCK;
 
 	for (; i < NAT_ENTRY_PER_BLOCK; i++, start_nid++) {
-		blk_addr  = le32_to_cpu(nat_blk->entries[i].block_addr);
+
+		if (start_nid >= nm_i->max_nid)
+			break;
+
+		blk_addr = le32_to_cpu(nat_blk->entries[i].block_addr);
 		BUG_ON(blk_addr == NEW_ADDR);
-		if (blk_addr == NULL_ADDR)
-			fcnt += add_free_nid(nm_i, start_nid);
+		if (blk_addr == NULL_ADDR) {
+			if (add_free_nid(nm_i, start_nid, true) < 0)
+				break;
+		}
 	}
-	return fcnt;
 }
 
 static void build_free_nids(struct f2fs_sb_info *sbi)
 {
-	struct free_nid *fnid, *next_fnid;
 	struct f2fs_nm_info *nm_i = NM_I(sbi);
 	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
 	struct f2fs_summary_block *sum = curseg->sum_blk;
-	nid_t nid = 0;
-	bool is_cycled = false;
-	int fcnt = 0;
-	int i;
+	int i = 0;
+	nid_t nid = nm_i->next_scan_nid;
 
-	nid = nm_i->next_scan_nid;
-	nm_i->init_scan_nid = nid;
+	/* Enough entries */
+	if (nm_i->fcnt > NAT_ENTRY_PER_BLOCK)
+		return;
 
+	/* readahead nat pages to be scanned */
 	ra_nat_pages(sbi, nid);
 
 	while (1) {
 		struct page *page = get_current_nat_page(sbi, nid);
 
-		fcnt += scan_nat_page(nm_i, page, nid);
+		scan_nat_page(nm_i, page, nid);
 		f2fs_put_page(page, 1);
 
 		nid += (NAT_ENTRY_PER_BLOCK - (nid % NAT_ENTRY_PER_BLOCK));
-
-		if (nid >= nm_i->max_nid) {
+		if (nid >= nm_i->max_nid)
 			nid = 0;
-			is_cycled = true;
-		}
-		if (fcnt > MAX_FREE_NIDS)
-			break;
-		if (is_cycled && nm_i->init_scan_nid <= nid)
+
+		if (i++ == FREE_NID_PAGES)
 			break;
 	}
 
+	/* go to the next free nat pages to find free nids abundantly */
 	nm_i->next_scan_nid = nid;
 
 	/* find free nids from current sum_pages */
@@ -1315,22 +1367,11 @@
 		block_t addr = le32_to_cpu(nat_in_journal(sum, i).block_addr);
 		nid = le32_to_cpu(nid_in_journal(sum, i));
 		if (addr == NULL_ADDR)
-			add_free_nid(nm_i, nid);
+			add_free_nid(nm_i, nid, true);
 		else
 			remove_free_nid(nm_i, nid);
 	}
 	mutex_unlock(&curseg->curseg_mutex);
-
-	/* remove the free nids from current allocated nids */
-	list_for_each_entry_safe(fnid, next_fnid, &nm_i->free_nid_list, list) {
-		struct nat_entry *ne;
-
-		read_lock(&nm_i->nat_tree_lock);
-		ne = __lookup_nat_cache(nm_i, fnid->nid);
-		if (ne && nat_get_blkaddr(ne) != NULL_ADDR)
-			remove_free_nid(nm_i, fnid->nid);
-		read_unlock(&nm_i->nat_tree_lock);
-	}
 }
 
 /*
@@ -1344,41 +1385,36 @@
 	struct free_nid *i = NULL;
 	struct list_head *this;
 retry:
-	mutex_lock(&nm_i->build_lock);
-	if (!nm_i->fcnt) {
-		/* scan NAT in order to build free nid list */
-		build_free_nids(sbi);
-		if (!nm_i->fcnt) {
-			mutex_unlock(&nm_i->build_lock);
-			return false;
-		}
-	}
-	mutex_unlock(&nm_i->build_lock);
+	if (sbi->total_valid_node_count + 1 >= nm_i->max_nid)
+		return false;
 
-	/*
-	 * We check fcnt again since previous check is racy as
-	 * we didn't hold free_nid_list_lock. So other thread
-	 * could consume all of free nids.
-	 */
 	spin_lock(&nm_i->free_nid_list_lock);
-	if (!nm_i->fcnt) {
+
+	/* We should not use stale free nids created by build_free_nids */
+	if (nm_i->fcnt && !sbi->on_build_free_nids) {
+		BUG_ON(list_empty(&nm_i->free_nid_list));
+		list_for_each(this, &nm_i->free_nid_list) {
+			i = list_entry(this, struct free_nid, list);
+			if (i->state == NID_NEW)
+				break;
+		}
+
+		BUG_ON(i->state != NID_NEW);
+		*nid = i->nid;
+		i->state = NID_ALLOC;
+		nm_i->fcnt--;
 		spin_unlock(&nm_i->free_nid_list_lock);
-		goto retry;
+		return true;
 	}
-
-	BUG_ON(list_empty(&nm_i->free_nid_list));
-	list_for_each(this, &nm_i->free_nid_list) {
-		i = list_entry(this, struct free_nid, list);
-		if (i->state == NID_NEW)
-			break;
-	}
-
-	BUG_ON(i->state != NID_NEW);
-	*nid = i->nid;
-	i->state = NID_ALLOC;
-	nm_i->fcnt--;
 	spin_unlock(&nm_i->free_nid_list_lock);
-	return true;
+
+	/* Let's scan nat pages and its caches to get free nids */
+	mutex_lock(&nm_i->build_lock);
+	sbi->on_build_free_nids = 1;
+	build_free_nids(sbi);
+	sbi->on_build_free_nids = 0;
+	mutex_unlock(&nm_i->build_lock);
+	goto retry;
 }
 
 /*
@@ -1391,10 +1427,8 @@
 
 	spin_lock(&nm_i->free_nid_list_lock);
 	i = __lookup_free_nid_list(nid, &nm_i->free_nid_list);
-	if (i) {
-		BUG_ON(i->state != NID_ALLOC);
-		__del_from_free_nid_list(i);
-	}
+	BUG_ON(!i || i->state != NID_ALLOC);
+	__del_from_free_nid_list(i);
 	spin_unlock(&nm_i->free_nid_list_lock);
 }
 
@@ -1403,8 +1437,19 @@
  */
 void alloc_nid_failed(struct f2fs_sb_info *sbi, nid_t nid)
 {
-	alloc_nid_done(sbi, nid);
-	add_free_nid(NM_I(sbi), nid);
+	struct f2fs_nm_info *nm_i = NM_I(sbi);
+	struct free_nid *i;
+
+	spin_lock(&nm_i->free_nid_list_lock);
+	i = __lookup_free_nid_list(nid, &nm_i->free_nid_list);
+	BUG_ON(!i || i->state != NID_ALLOC);
+	if (nm_i->fcnt > 2 * MAX_FREE_NIDS) {
+		__del_from_free_nid_list(i);
+	} else {
+		i->state = NID_NEW;
+		nm_i->fcnt++;
+	}
+	spin_unlock(&nm_i->free_nid_list_lock);
 }
 
 void recover_node_page(struct f2fs_sb_info *sbi, struct page *page,
@@ -1475,23 +1520,24 @@
 	sum_entry = &sum->entries[0];
 
 	for (i = 0; i < last_offset; i++, sum_entry++) {
-		if (f2fs_readpage(sbi, page, addr, READ_SYNC))
-			goto out;
-
-		rn = (struct f2fs_node *)page_address(page);
-		sum_entry->nid = rn->footer.nid;
-		sum_entry->version = 0;
-		sum_entry->ofs_in_node = 0;
-		addr++;
-
 		/*
 		 * In order to read next node page,
 		 * we must clear PageUptodate flag.
 		 */
 		ClearPageUptodate(page);
+
+		if (f2fs_readpage(sbi, page, addr, READ_SYNC))
+			goto out;
+
+		lock_page(page);
+		rn = (struct f2fs_node *)page_address(page);
+		sum_entry->nid = rn->footer.nid;
+		sum_entry->version = 0;
+		sum_entry->ofs_in_node = 0;
+		addr++;
 	}
-out:
 	unlock_page(page);
+out:
 	__free_pages(page, 0);
 	return 0;
 }
@@ -1614,13 +1660,11 @@
 			nid_in_journal(sum, offset) = cpu_to_le32(nid);
 		}
 
-		if (nat_get_blkaddr(ne) == NULL_ADDR) {
+		if (nat_get_blkaddr(ne) == NULL_ADDR &&
+				add_free_nid(NM_I(sbi), nid, false) <= 0) {
 			write_lock(&nm_i->nat_tree_lock);
 			__del_from_nat_cache(nm_i, ne);
 			write_unlock(&nm_i->nat_tree_lock);
-
-			/* We can reuse this freed nid at this point */
-			add_free_nid(NM_I(sbi), nid);
 		} else {
 			write_lock(&nm_i->nat_tree_lock);
 			__clear_nat_cache_dirty(nm_i, ne);
@@ -1661,19 +1705,16 @@
 	spin_lock_init(&nm_i->free_nid_list_lock);
 	rwlock_init(&nm_i->nat_tree_lock);
 
-	nm_i->bitmap_size = __bitmap_size(sbi, NAT_BITMAP);
-	nm_i->init_scan_nid = le32_to_cpu(sbi->ckpt->next_free_nid);
 	nm_i->next_scan_nid = le32_to_cpu(sbi->ckpt->next_free_nid);
-
-	nm_i->nat_bitmap = kzalloc(nm_i->bitmap_size, GFP_KERNEL);
-	if (!nm_i->nat_bitmap)
-		return -ENOMEM;
+	nm_i->bitmap_size = __bitmap_size(sbi, NAT_BITMAP);
 	version_bitmap = __bitmap_ptr(sbi, NAT_BITMAP);
 	if (!version_bitmap)
 		return -EFAULT;
 
-	/* copy version bitmap */
-	memcpy(nm_i->nat_bitmap, version_bitmap, nm_i->bitmap_size);
+	nm_i->nat_bitmap = kmemdup(version_bitmap, nm_i->bitmap_size,
+					GFP_KERNEL);
+	if (!nm_i->nat_bitmap)
+		return -ENOMEM;
 	return 0;
 }
 
diff --git a/fs/f2fs/node.h b/fs/f2fs/node.h
index afdb130..0a2d72f 100644
--- a/fs/f2fs/node.h
+++ b/fs/f2fs/node.h
@@ -29,6 +29,9 @@
 /* vector size for gang look-up from nat cache that consists of radix tree */
 #define NATVEC_SIZE	64
 
+/* return value for read_node_page */
+#define LOCKED_PAGE	1
+
 /*
  * For node information
  */
@@ -239,7 +242,7 @@
 		return false;
 	if (ofs >= 6 + 2 * NIDS_PER_BLOCK) {
 		ofs -= 6 + 2 * NIDS_PER_BLOCK;
-		if ((long int)ofs % (NIDS_PER_BLOCK + 1))
+		if (!((long int)ofs % (NIDS_PER_BLOCK + 1)))
 			return false;
 	}
 	return true;
@@ -277,6 +280,21 @@
 	return F2FS_I(inode)->i_advise & FADVISE_COLD_BIT;
 }
 
+static inline void set_cold_file(struct inode *inode)
+{
+	F2FS_I(inode)->i_advise |= FADVISE_COLD_BIT;
+}
+
+static inline int is_cp_file(struct inode *inode)
+{
+	return F2FS_I(inode)->i_advise & FADVISE_CP_BIT;
+}
+
+static inline void set_cp_file(struct inode *inode)
+{
+	F2FS_I(inode)->i_advise |= FADVISE_CP_BIT;
+}
+
 static inline int is_cold_data(struct page *page)
 {
 	return PageChecked(page);
diff --git a/fs/f2fs/recovery.c b/fs/f2fs/recovery.c
index b235215..60c8a50 100644
--- a/fs/f2fs/recovery.c
+++ b/fs/f2fs/recovery.c
@@ -53,7 +53,7 @@
 
 	dir = f2fs_iget(inode->i_sb, le32_to_cpu(raw_inode->i_pino));
 	if (IS_ERR(dir)) {
-		err = -EINVAL;
+		err = PTR_ERR(dir);
 		goto out;
 	}
 
@@ -112,11 +112,14 @@
 	while (1) {
 		struct fsync_inode_entry *entry;
 
-		if (f2fs_readpage(sbi, page, blkaddr, READ_SYNC))
+		err = f2fs_readpage(sbi, page, blkaddr, READ_SYNC);
+		if (err)
 			goto out;
 
+		lock_page(page);
+
 		if (cp_ver != cpver_of_node(page))
-			goto out;
+			goto unlock_out;
 
 		if (!is_fsync_dnode(page))
 			goto next;
@@ -129,24 +132,23 @@
 							FI_INC_LINK);
 		} else {
 			if (IS_INODE(page) && is_dent_dnode(page)) {
-				if (recover_inode_page(sbi, page)) {
-					err = -ENOMEM;
-					goto out;
-				}
+				err = recover_inode_page(sbi, page);
+				if (err)
+					goto unlock_out;
 			}
 
 			/* add this fsync inode to the list */
 			entry = kmem_cache_alloc(fsync_entry_slab, GFP_NOFS);
 			if (!entry) {
 				err = -ENOMEM;
-				goto out;
+				goto unlock_out;
 			}
 
 			entry->inode = f2fs_iget(sbi->sb, ino_of_node(page));
 			if (IS_ERR(entry->inode)) {
 				err = PTR_ERR(entry->inode);
 				kmem_cache_free(fsync_entry_slab, entry);
-				goto out;
+				goto unlock_out;
 			}
 
 			list_add_tail(&entry->list, head);
@@ -154,16 +156,20 @@
 		}
 		if (IS_INODE(page)) {
 			err = recover_inode(entry->inode, page);
-			if (err)
-				goto out;
+			if (err == -ENOENT) {
+				goto next;
+			} else if (err) {
+				err = -EINVAL;
+				goto unlock_out;
+			}
 		}
 next:
 		/* check next segment */
 		blkaddr = next_blkaddr_of_node(page);
-		ClearPageUptodate(page);
 	}
-out:
+unlock_out:
 	unlock_page(page);
+out:
 	__free_pages(page, 0);
 	return err;
 }
@@ -232,13 +238,15 @@
 	iput(inode);
 }
 
-static void do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode,
+static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode,
 					struct page *page, block_t blkaddr)
 {
 	unsigned int start, end;
 	struct dnode_of_data dn;
 	struct f2fs_summary sum;
 	struct node_info ni;
+	int err = 0;
+	int ilock;
 
 	start = start_bidx_of_node(ofs_of_node(page));
 	if (IS_INODE(page))
@@ -246,9 +254,14 @@
 	else
 		end = start + ADDRS_PER_BLOCK;
 
+	ilock = mutex_lock_op(sbi);
 	set_new_dnode(&dn, inode, NULL, NULL, 0);
-	if (get_dnode_of_data(&dn, start, 0))
-		return;
+
+	err = get_dnode_of_data(&dn, start, ALLOC_NODE);
+	if (err) {
+		mutex_unlock_op(sbi, ilock);
+		return err;
+	}
 
 	wait_on_page_writeback(dn.node_page);
 
@@ -293,14 +306,17 @@
 
 	recover_node_page(sbi, dn.node_page, &sum, &ni, blkaddr);
 	f2fs_put_dnode(&dn);
+	mutex_unlock_op(sbi, ilock);
+	return 0;
 }
 
-static void recover_data(struct f2fs_sb_info *sbi,
+static int recover_data(struct f2fs_sb_info *sbi,
 				struct list_head *head, int type)
 {
 	unsigned long long cp_ver = le64_to_cpu(sbi->ckpt->checkpoint_ver);
 	struct curseg_info *curseg;
 	struct page *page;
+	int err = 0;
 	block_t blkaddr;
 
 	/* get node pages in the current segment */
@@ -310,23 +326,29 @@
 	/* read node page */
 	page = alloc_page(GFP_NOFS | __GFP_ZERO);
 	if (IS_ERR(page))
-		return;
+		return -ENOMEM;
+
 	lock_page(page);
 
 	while (1) {
 		struct fsync_inode_entry *entry;
 
-		if (f2fs_readpage(sbi, page, blkaddr, READ_SYNC))
+		err = f2fs_readpage(sbi, page, blkaddr, READ_SYNC);
+		if (err)
 			goto out;
 
+		lock_page(page);
+
 		if (cp_ver != cpver_of_node(page))
-			goto out;
+			goto unlock_out;
 
 		entry = get_fsync_inode(head, ino_of_node(page));
 		if (!entry)
 			goto next;
 
-		do_recover_data(sbi, entry->inode, page, blkaddr);
+		err = do_recover_data(sbi, entry->inode, page, blkaddr);
+		if (err)
+			goto out;
 
 		if (entry->blkaddr == blkaddr) {
 			iput(entry->inode);
@@ -336,28 +358,32 @@
 next:
 		/* check next segment */
 		blkaddr = next_blkaddr_of_node(page);
-		ClearPageUptodate(page);
 	}
-out:
+unlock_out:
 	unlock_page(page);
+out:
 	__free_pages(page, 0);
 
-	allocate_new_segments(sbi);
+	if (!err)
+		allocate_new_segments(sbi);
+	return err;
 }
 
-void recover_fsync_data(struct f2fs_sb_info *sbi)
+int recover_fsync_data(struct f2fs_sb_info *sbi)
 {
 	struct list_head inode_list;
+	int err;
 
 	fsync_entry_slab = f2fs_kmem_cache_create("f2fs_fsync_inode_entry",
 			sizeof(struct fsync_inode_entry), NULL);
 	if (unlikely(!fsync_entry_slab))
-		return;
+		return -ENOMEM;
 
 	INIT_LIST_HEAD(&inode_list);
 
 	/* step #1: find fsynced inode numbers */
-	if (find_fsync_dnodes(sbi, &inode_list))
+	err = find_fsync_dnodes(sbi, &inode_list);
+	if (err)
 		goto out;
 
 	if (list_empty(&inode_list))
@@ -365,11 +391,12 @@
 
 	/* step #2: recover data */
 	sbi->por_doing = 1;
-	recover_data(sbi, &inode_list, CURSEG_WARM_NODE);
+	err = recover_data(sbi, &inode_list, CURSEG_WARM_NODE);
 	sbi->por_doing = 0;
 	BUG_ON(!list_empty(&inode_list));
 out:
 	destroy_fsync_dnodes(sbi, &inode_list);
 	kmem_cache_destroy(fsync_entry_slab);
 	write_checkpoint(sbi, false);
+	return err;
 }
diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
index 777f17e..d8e84e4 100644
--- a/fs/f2fs/segment.c
+++ b/fs/f2fs/segment.c
@@ -18,6 +18,7 @@
 #include "f2fs.h"
 #include "segment.h"
 #include "node.h"
+#include <trace/events/f2fs.h>
 
 /*
  * This function balances dirty node and dentry pages.
@@ -49,9 +50,20 @@
 
 	if (dirty_type == DIRTY) {
 		struct seg_entry *sentry = get_seg_entry(sbi, segno);
+		enum dirty_type t = DIRTY_HOT_DATA;
+
 		dirty_type = sentry->type;
+
 		if (!test_and_set_bit(segno, dirty_i->dirty_segmap[dirty_type]))
 			dirty_i->nr_dirty[dirty_type]++;
+
+		/* Only one bitmap should be set */
+		for (; t <= DIRTY_COLD_NODE; t++) {
+			if (t == dirty_type)
+				continue;
+			if (test_and_clear_bit(segno, dirty_i->dirty_segmap[t]))
+				dirty_i->nr_dirty[t]--;
+		}
 	}
 }
 
@@ -64,13 +76,16 @@
 		dirty_i->nr_dirty[dirty_type]--;
 
 	if (dirty_type == DIRTY) {
-		struct seg_entry *sentry = get_seg_entry(sbi, segno);
-		dirty_type = sentry->type;
-		if (test_and_clear_bit(segno,
-					dirty_i->dirty_segmap[dirty_type]))
-			dirty_i->nr_dirty[dirty_type]--;
-		clear_bit(segno, dirty_i->victim_segmap[FG_GC]);
-		clear_bit(segno, dirty_i->victim_segmap[BG_GC]);
+		enum dirty_type t = DIRTY_HOT_DATA;
+
+		/* clear all the bitmaps */
+		for (; t <= DIRTY_COLD_NODE; t++)
+			if (test_and_clear_bit(segno, dirty_i->dirty_segmap[t]))
+				dirty_i->nr_dirty[t]--;
+
+		if (get_valid_blocks(sbi, segno, sbi->segs_per_sec) == 0)
+			clear_bit(GET_SECNO(sbi, segno),
+						dirty_i->victim_secmap);
 	}
 }
 
@@ -296,13 +311,12 @@
 	f2fs_put_page(page, 1);
 }
 
-static unsigned int check_prefree_segments(struct f2fs_sb_info *sbi,
-					int ofs_unit, int type)
+static unsigned int check_prefree_segments(struct f2fs_sb_info *sbi, int type)
 {
 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
 	unsigned long *prefree_segmap = dirty_i->dirty_segmap[PRE];
-	unsigned int segno, next_segno, i;
-	int ofs = 0;
+	unsigned int segno;
+	unsigned int ofs = 0;
 
 	/*
 	 * If there is not enough reserved sections,
@@ -318,28 +332,46 @@
 	if (IS_NODESEG(type))
 		return NULL_SEGNO;
 next:
-	segno = find_next_bit(prefree_segmap, TOTAL_SEGS(sbi), ofs++);
-	ofs = ((segno / ofs_unit) * ofs_unit) + ofs_unit;
+	segno = find_next_bit(prefree_segmap, TOTAL_SEGS(sbi), ofs);
+	ofs += sbi->segs_per_sec;
+
 	if (segno < TOTAL_SEGS(sbi)) {
+		int i;
+
 		/* skip intermediate segments in a section */
-		if (segno % ofs_unit)
+		if (segno % sbi->segs_per_sec)
+			goto next;
+
+		/* skip if the section is currently used */
+		if (sec_usage_check(sbi, GET_SECNO(sbi, segno)))
 			goto next;
 
 		/* skip if whole section is not prefree */
-		next_segno = find_next_zero_bit(prefree_segmap,
-						TOTAL_SEGS(sbi), segno + 1);
-		if (next_segno - segno < ofs_unit)
-			goto next;
+		for (i = 1; i < sbi->segs_per_sec; i++)
+			if (!test_bit(segno + i, prefree_segmap))
+				goto next;
 
 		/* skip if whole section was not free at the last checkpoint */
-		for (i = 0; i < ofs_unit; i++)
-			if (get_seg_entry(sbi, segno)->ckpt_valid_blocks)
+		for (i = 0; i < sbi->segs_per_sec; i++)
+			if (get_seg_entry(sbi, segno + i)->ckpt_valid_blocks)
 				goto next;
+
 		return segno;
 	}
 	return NULL_SEGNO;
 }
 
+static int is_next_segment_free(struct f2fs_sb_info *sbi, int type)
+{
+	struct curseg_info *curseg = CURSEG_I(sbi, type);
+	unsigned int segno = curseg->segno;
+	struct free_segmap_info *free_i = FREE_I(sbi);
+
+	if (segno + 1 < TOTAL_SEGS(sbi) && (segno + 1) % sbi->segs_per_sec)
+		return !test_bit(segno + 1, free_i->free_segmap);
+	return 0;
+}
+
 /*
  * Find a new segment from the free segments bitmap to right order
  * This function should be returned with success, otherwise BUG
@@ -348,9 +380,8 @@
 			unsigned int *newseg, bool new_sec, int dir)
 {
 	struct free_segmap_info *free_i = FREE_I(sbi);
-	unsigned int total_secs = sbi->total_sections;
 	unsigned int segno, secno, zoneno;
-	unsigned int total_zones = sbi->total_sections / sbi->secs_per_zone;
+	unsigned int total_zones = TOTAL_SECS(sbi) / sbi->secs_per_zone;
 	unsigned int hint = *newseg / sbi->segs_per_sec;
 	unsigned int old_zoneno = GET_ZONENO_FROM_SEGNO(sbi, *newseg);
 	unsigned int left_start = hint;
@@ -363,16 +394,17 @@
 	if (!new_sec && ((*newseg + 1) % sbi->segs_per_sec)) {
 		segno = find_next_zero_bit(free_i->free_segmap,
 					TOTAL_SEGS(sbi), *newseg + 1);
-		if (segno < TOTAL_SEGS(sbi))
+		if (segno - *newseg < sbi->segs_per_sec -
+					(*newseg % sbi->segs_per_sec))
 			goto got_it;
 	}
 find_other_zone:
-	secno = find_next_zero_bit(free_i->free_secmap, total_secs, hint);
-	if (secno >= total_secs) {
+	secno = find_next_zero_bit(free_i->free_secmap, TOTAL_SECS(sbi), hint);
+	if (secno >= TOTAL_SECS(sbi)) {
 		if (dir == ALLOC_RIGHT) {
 			secno = find_next_zero_bit(free_i->free_secmap,
-						total_secs, 0);
-			BUG_ON(secno >= total_secs);
+							TOTAL_SECS(sbi), 0);
+			BUG_ON(secno >= TOTAL_SECS(sbi));
 		} else {
 			go_left = 1;
 			left_start = hint - 1;
@@ -387,8 +419,8 @@
 			continue;
 		}
 		left_start = find_next_zero_bit(free_i->free_secmap,
-						total_secs, 0);
-		BUG_ON(left_start >= total_secs);
+							TOTAL_SECS(sbi), 0);
+		BUG_ON(left_start >= TOTAL_SECS(sbi));
 		break;
 	}
 	secno = left_start;
@@ -561,20 +593,20 @@
 						int type, bool force)
 {
 	struct curseg_info *curseg = CURSEG_I(sbi, type);
-	unsigned int ofs_unit;
 
 	if (force) {
 		new_curseg(sbi, type, true);
 		goto out;
 	}
 
-	ofs_unit = need_SSR(sbi) ? 1 : sbi->segs_per_sec;
-	curseg->next_segno = check_prefree_segments(sbi, ofs_unit, type);
+	curseg->next_segno = check_prefree_segments(sbi, type);
 
 	if (curseg->next_segno != NULL_SEGNO)
 		change_curseg(sbi, type, false);
 	else if (type == CURSEG_WARM_NODE)
 		new_curseg(sbi, type, false);
+	else if (curseg->alloc_type == LFS && is_next_segment_free(sbi, type))
+		new_curseg(sbi, type, false);
 	else if (need_SSR(sbi) && get_ssr_segment(sbi, type))
 		change_curseg(sbi, type, true);
 	else
@@ -656,10 +688,16 @@
 	if (type >= META_FLUSH)
 		rw = WRITE_FLUSH_FUA;
 
+	if (btype == META)
+		rw |= REQ_META;
+
 	if (sbi->bio[btype]) {
 		struct bio_private *p = sbi->bio[btype]->bi_private;
 		p->sbi = sbi;
 		sbi->bio[btype]->bi_end_io = f2fs_end_io_write;
+
+		trace_f2fs_do_submit_bio(sbi->sb, btype, sync, sbi->bio[btype]);
+
 		if (type == META_FLUSH) {
 			DECLARE_COMPLETION_ONSTACK(wait);
 			p->is_sync = true;
@@ -696,7 +734,7 @@
 		do_submit_bio(sbi, type, false);
 alloc_new:
 	if (sbi->bio[type] == NULL) {
-		sbi->bio[type] = f2fs_bio_alloc(bdev, bio_get_nr_vecs(bdev));
+		sbi->bio[type] = f2fs_bio_alloc(bdev, max_hw_blocks(sbi));
 		sbi->bio[type]->bi_sector = SECTOR_FROM_BLOCK(sbi, blk_addr);
 		/*
 		 * The end_io will be assigned at the sumbission phase.
@@ -714,6 +752,7 @@
 	sbi->last_block_in_bio[type] = blk_addr;
 
 	up_write(&sbi->bio_sem);
+	trace_f2fs_submit_write_page(page, blk_addr, type);
 }
 
 static bool __has_curseg_space(struct f2fs_sb_info *sbi, int type)
@@ -1390,7 +1429,7 @@
 	}
 
 	if (sbi->segs_per_sec > 1) {
-		sit_i->sec_entries = vzalloc(sbi->total_sections *
+		sit_i->sec_entries = vzalloc(TOTAL_SECS(sbi) *
 					sizeof(struct sec_entry));
 		if (!sit_i->sec_entries)
 			return -ENOMEM;
@@ -1403,10 +1442,9 @@
 	bitmap_size = __bitmap_size(sbi, SIT_BITMAP);
 	src_bitmap = __bitmap_ptr(sbi, SIT_BITMAP);
 
-	dst_bitmap = kzalloc(bitmap_size, GFP_KERNEL);
+	dst_bitmap = kmemdup(src_bitmap, bitmap_size, GFP_KERNEL);
 	if (!dst_bitmap)
 		return -ENOMEM;
-	memcpy(dst_bitmap, src_bitmap, bitmap_size);
 
 	/* init SIT information */
 	sit_i->s_ops = &default_salloc_ops;
@@ -1442,7 +1480,7 @@
 	if (!free_i->free_segmap)
 		return -ENOMEM;
 
-	sec_bitmap_size = f2fs_bitmap_size(sbi->total_sections);
+	sec_bitmap_size = f2fs_bitmap_size(TOTAL_SECS(sbi));
 	free_i->free_secmap = kmalloc(sec_bitmap_size, GFP_KERNEL);
 	if (!free_i->free_secmap)
 		return -ENOMEM;
@@ -1559,14 +1597,13 @@
 	}
 }
 
-static int init_victim_segmap(struct f2fs_sb_info *sbi)
+static int init_victim_secmap(struct f2fs_sb_info *sbi)
 {
 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
-	unsigned int bitmap_size = f2fs_bitmap_size(TOTAL_SEGS(sbi));
+	unsigned int bitmap_size = f2fs_bitmap_size(TOTAL_SECS(sbi));
 
-	dirty_i->victim_segmap[FG_GC] = kzalloc(bitmap_size, GFP_KERNEL);
-	dirty_i->victim_segmap[BG_GC] = kzalloc(bitmap_size, GFP_KERNEL);
-	if (!dirty_i->victim_segmap[FG_GC] || !dirty_i->victim_segmap[BG_GC])
+	dirty_i->victim_secmap = kzalloc(bitmap_size, GFP_KERNEL);
+	if (!dirty_i->victim_secmap)
 		return -ENOMEM;
 	return 0;
 }
@@ -1593,7 +1630,7 @@
 	}
 
 	init_dirty_segmap(sbi);
-	return init_victim_segmap(sbi);
+	return init_victim_secmap(sbi);
 }
 
 /*
@@ -1680,18 +1717,10 @@
 	mutex_unlock(&dirty_i->seglist_lock);
 }
 
-void reset_victim_segmap(struct f2fs_sb_info *sbi)
-{
-	unsigned int bitmap_size = f2fs_bitmap_size(TOTAL_SEGS(sbi));
-	memset(DIRTY_I(sbi)->victim_segmap[FG_GC], 0, bitmap_size);
-}
-
-static void destroy_victim_segmap(struct f2fs_sb_info *sbi)
+static void destroy_victim_secmap(struct f2fs_sb_info *sbi)
 {
 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
-
-	kfree(dirty_i->victim_segmap[FG_GC]);
-	kfree(dirty_i->victim_segmap[BG_GC]);
+	kfree(dirty_i->victim_secmap);
 }
 
 static void destroy_dirty_segmap(struct f2fs_sb_info *sbi)
@@ -1706,7 +1735,7 @@
 	for (i = 0; i < NR_DIRTY_TYPE; i++)
 		discard_dirty_segmap(sbi, i);
 
-	destroy_victim_segmap(sbi);
+	destroy_victim_secmap(sbi);
 	SM_I(sbi)->dirty_info = NULL;
 	kfree(dirty_i);
 }
diff --git a/fs/f2fs/segment.h b/fs/f2fs/segment.h
index 552dadb..062424a 100644
--- a/fs/f2fs/segment.h
+++ b/fs/f2fs/segment.h
@@ -8,10 +8,13 @@
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
  */
+#include <linux/blkdev.h>
+
 /* constant macro */
 #define NULL_SEGNO			((unsigned int)(~0))
+#define NULL_SECNO			((unsigned int)(~0))
 
-/* V: Logical segment # in volume, R: Relative segment # in main area */
+/* L: Logical segment # in volume, R: Relative segment # in main area */
 #define GET_L2R_SEGNO(free_i, segno)	(segno - free_i->start_segno)
 #define GET_R2L_SEGNO(free_i, segno)	(segno + free_i->start_segno)
 
@@ -23,13 +26,13 @@
 	((t == CURSEG_HOT_NODE) || (t == CURSEG_COLD_NODE) ||		\
 	(t == CURSEG_WARM_NODE))
 
-#define IS_CURSEG(sbi, segno)						\
-	((segno == CURSEG_I(sbi, CURSEG_HOT_DATA)->segno) ||	\
-	 (segno == CURSEG_I(sbi, CURSEG_WARM_DATA)->segno) ||	\
-	 (segno == CURSEG_I(sbi, CURSEG_COLD_DATA)->segno) ||	\
-	 (segno == CURSEG_I(sbi, CURSEG_HOT_NODE)->segno) ||	\
-	 (segno == CURSEG_I(sbi, CURSEG_WARM_NODE)->segno) ||	\
-	 (segno == CURSEG_I(sbi, CURSEG_COLD_NODE)->segno))
+#define IS_CURSEG(sbi, seg)						\
+	((seg == CURSEG_I(sbi, CURSEG_HOT_DATA)->segno) ||	\
+	 (seg == CURSEG_I(sbi, CURSEG_WARM_DATA)->segno) ||	\
+	 (seg == CURSEG_I(sbi, CURSEG_COLD_DATA)->segno) ||	\
+	 (seg == CURSEG_I(sbi, CURSEG_HOT_NODE)->segno) ||	\
+	 (seg == CURSEG_I(sbi, CURSEG_WARM_NODE)->segno) ||	\
+	 (seg == CURSEG_I(sbi, CURSEG_COLD_NODE)->segno))
 
 #define IS_CURSEC(sbi, secno)						\
 	((secno == CURSEG_I(sbi, CURSEG_HOT_DATA)->segno /		\
@@ -81,9 +84,12 @@
 #define f2fs_bitmap_size(nr)			\
 	(BITS_TO_LONGS(nr) * sizeof(unsigned long))
 #define TOTAL_SEGS(sbi)	(SM_I(sbi)->main_segments)
+#define TOTAL_SECS(sbi)	(sbi->total_sections)
 
 #define SECTOR_FROM_BLOCK(sbi, blk_addr)				\
 	(blk_addr << ((sbi)->log_blocksize - F2FS_LOG_SECTOR_SIZE))
+#define SECTOR_TO_BLOCK(sbi, sectors)					\
+	(sectors >> ((sbi)->log_blocksize - F2FS_LOG_SECTOR_SIZE))
 
 /* during checkpoint, bio_private is used to synchronize the last bio */
 struct bio_private {
@@ -213,7 +219,7 @@
 	unsigned long *dirty_segmap[NR_DIRTY_TYPE];
 	struct mutex seglist_lock;		/* lock for segment bitmaps */
 	int nr_dirty[NR_DIRTY_TYPE];		/* # of dirty segments */
-	unsigned long *victim_segmap[2];	/* BG_GC, FG_GC */
+	unsigned long *victim_secmap;		/* background GC victims */
 };
 
 /* victim selection function for cleaning and SSR */
@@ -464,8 +470,7 @@
 
 static inline int utilization(struct f2fs_sb_info *sbi)
 {
-	return (long int)valid_user_blocks(sbi) * 100 /
-			(long int)sbi->user_block_count;
+	return div_u64(valid_user_blocks(sbi) * 100, sbi->user_block_count);
 }
 
 /*
@@ -616,3 +621,17 @@
 		le32_to_cpu(F2FS_CKPT(sbi)->cp_pack_total_block_count)
 				- (base + 1) + type;
 }
+
+static inline bool sec_usage_check(struct f2fs_sb_info *sbi, unsigned int secno)
+{
+	if (IS_CURSEC(sbi, secno) || (sbi->cur_victim_sec == secno))
+		return true;
+	return false;
+}
+
+static inline unsigned int max_hw_blocks(struct f2fs_sb_info *sbi)
+{
+	struct block_device *bdev = sbi->sb->s_bdev;
+	struct request_queue *q = bdev_get_queue(bdev);
+	return SECTOR_TO_BLOCK(sbi, queue_max_sectors(q));
+}
diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
index 62e0177..8555f7d 100644
--- a/fs/f2fs/super.c
+++ b/fs/f2fs/super.c
@@ -12,7 +12,6 @@
 #include <linux/init.h>
 #include <linux/fs.h>
 #include <linux/statfs.h>
-#include <linux/proc_fs.h>
 #include <linux/buffer_head.h>
 #include <linux/backing-dev.h>
 #include <linux/kthread.h>
@@ -21,12 +20,17 @@
 #include <linux/seq_file.h>
 #include <linux/random.h>
 #include <linux/exportfs.h>
+#include <linux/blkdev.h>
 #include <linux/f2fs_fs.h>
 
 #include "f2fs.h"
 #include "node.h"
+#include "segment.h"
 #include "xattr.h"
 
+#define CREATE_TRACE_POINTS
+#include <trace/events/f2fs.h>
+
 static struct kmem_cache *f2fs_inode_cachep;
 
 enum {
@@ -94,6 +98,20 @@
 	return &fi->vfs_inode;
 }
 
+static int f2fs_drop_inode(struct inode *inode)
+{
+	/*
+	 * This is to avoid a deadlock condition like below.
+	 * writeback_single_inode(inode)
+	 *  - f2fs_write_data_page
+	 *    - f2fs_gc -> iput -> evict
+	 *       - inode_wait_for_writeback(inode)
+	 */
+	if (!inode_unhashed(inode) && inode->i_state & I_SYNC)
+		return 0;
+	return generic_drop_inode(inode);
+}
+
 static void f2fs_i_callback(struct rcu_head *head)
 {
 	struct inode *inode = container_of(head, struct inode, i_rcu);
@@ -132,13 +150,18 @@
 {
 	struct f2fs_sb_info *sbi = F2FS_SB(sb);
 
+	trace_f2fs_sync_fs(sb, sync);
+
 	if (!sbi->s_dirty && !get_pages(sbi, F2FS_DIRTY_NODES))
 		return 0;
 
-	if (sync)
+	if (sync) {
+		mutex_lock(&sbi->gc_mutex);
 		write_checkpoint(sbi, false);
-	else
+		mutex_unlock(&sbi->gc_mutex);
+	} else {
 		f2fs_balance_fs(sbi);
+	}
 
 	return 0;
 }
@@ -180,7 +203,7 @@
 	buf->f_files = sbi->total_node_count;
 	buf->f_ffree = sbi->total_node_count - valid_inode_count(sbi);
 
-	buf->f_namelen = F2FS_MAX_NAME_LEN;
+	buf->f_namelen = F2FS_NAME_LEN;
 	buf->f_fsid.val[0] = (u32)id;
 	buf->f_fsid.val[1] = (u32)(id >> 32);
 
@@ -223,6 +246,7 @@
 
 static struct super_operations f2fs_sops = {
 	.alloc_inode	= f2fs_alloc_inode,
+	.drop_inode	= f2fs_drop_inode,
 	.destroy_inode	= f2fs_destroy_inode,
 	.write_inode	= f2fs_write_inode,
 	.show_options	= f2fs_show_options,
@@ -457,6 +481,7 @@
 	sbi->root_ino_num = le32_to_cpu(raw_super->root_ino);
 	sbi->node_ino_num = le32_to_cpu(raw_super->node_ino);
 	sbi->meta_ino_num = le32_to_cpu(raw_super->meta_ino);
+	sbi->cur_victim_sec = NULL_SECNO;
 
 	for (i = 0; i < NR_COUNT_TYPE; i++)
 		atomic_set(&sbi->nr_pages[i], 0);
@@ -473,7 +498,7 @@
 	if (!*raw_super_buf) {
 		f2fs_msg(sb, KERN_ERR, "unable to read %s superblock",
 				super);
-		return 1;
+		return -EIO;
 	}
 
 	*raw_super = (struct f2fs_super_block *)
@@ -485,7 +510,7 @@
 
 	f2fs_msg(sb, KERN_ERR, "Can't find a valid F2FS filesystem "
 				"in %s superblock", super);
-	return 1;
+	return -EINVAL;
 }
 
 static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
@@ -508,9 +533,12 @@
 		goto free_sbi;
 	}
 
-	if (validate_superblock(sb, &raw_super, &raw_super_buf, 0)) {
+	err = validate_superblock(sb, &raw_super, &raw_super_buf, 0);
+	if (err) {
 		brelse(raw_super_buf);
-		if (validate_superblock(sb, &raw_super, &raw_super_buf, 1))
+		/* check secondary superblock when primary failed */
+		err = validate_superblock(sb, &raw_super, &raw_super_buf, 1);
+		if (err)
 			goto free_sb_buf;
 	}
 	/* init some FS parameters */
@@ -525,7 +553,8 @@
 	set_opt(sbi, POSIX_ACL);
 #endif
 	/* parse mount options */
-	if (parse_options(sb, sbi, (char *)data))
+	err = parse_options(sb, sbi, (char *)data);
+	if (err)
 		goto free_sb_buf;
 
 	sb->s_maxbytes = max_file_size(le32_to_cpu(raw_super->log_blocksize));
@@ -547,11 +576,11 @@
 	sbi->raw_super = raw_super;
 	sbi->raw_super_buf = raw_super_buf;
 	mutex_init(&sbi->gc_mutex);
-	mutex_init(&sbi->write_inode);
 	mutex_init(&sbi->writepages);
 	mutex_init(&sbi->cp_mutex);
-	for (i = 0; i < NR_LOCK_TYPE; i++)
+	for (i = 0; i < NR_GLOBAL_LOCKS; i++)
 		mutex_init(&sbi->fs_lock[i]);
+	mutex_init(&sbi->node_write);
 	sbi->por_doing = 0;
 	spin_lock_init(&sbi->stat_lock);
 	init_rwsem(&sbi->bio_sem);
@@ -638,8 +667,12 @@
 	}
 
 	/* recover fsynced data */
-	if (!test_opt(sbi, DISABLE_ROLL_FORWARD))
-		recover_fsync_data(sbi);
+	if (!test_opt(sbi, DISABLE_ROLL_FORWARD)) {
+		err = recover_fsync_data(sbi);
+		if (err)
+			f2fs_msg(sb, KERN_ERR,
+				"Cannot recover all fsync data errno=%ld", err);
+	}
 
 	/* After POR, we can run background GC thread */
 	err = start_gc_thread(sbi);
@@ -650,6 +683,14 @@
 	if (err)
 		goto fail;
 
+	if (test_opt(sbi, DISCARD)) {
+		struct request_queue *q = bdev_get_queue(sb->s_bdev);
+		if (!blk_queue_discard(q))
+			f2fs_msg(sb, KERN_WARNING,
+					"mounting with \"discard\" option, but "
+					"the device does not support discard");
+	}
+
 	return 0;
 fail:
 	stop_gc_thread(sbi);
diff --git a/fs/f2fs/xattr.c b/fs/f2fs/xattr.c
index 8038c04..0b02dce 100644
--- a/fs/f2fs/xattr.c
+++ b/fs/f2fs/xattr.c
@@ -307,27 +307,30 @@
 	int error, found, free, newsize;
 	size_t name_len;
 	char *pval;
+	int ilock;
 
 	if (name == NULL)
 		return -EINVAL;
-	name_len = strlen(name);
 
 	if (value == NULL)
 		value_len = 0;
 
-	if (name_len > 255 || value_len > MAX_VALUE_LEN)
+	name_len = strlen(name);
+
+	if (name_len > F2FS_NAME_LEN || value_len > MAX_VALUE_LEN)
 		return -ERANGE;
 
 	f2fs_balance_fs(sbi);
 
-	mutex_lock_op(sbi, NODE_NEW);
+	ilock = mutex_lock_op(sbi);
+
 	if (!fi->i_xattr_nid) {
 		/* Allocate new attribute block */
 		struct dnode_of_data dn;
 
 		if (!alloc_nid(sbi, &fi->i_xattr_nid)) {
-			mutex_unlock_op(sbi, NODE_NEW);
-			return -ENOSPC;
+			error = -ENOSPC;
+			goto exit;
 		}
 		set_new_dnode(&dn, inode, NULL, NULL, fi->i_xattr_nid);
 		mark_inode_dirty(inode);
@@ -336,8 +339,8 @@
 		if (IS_ERR(page)) {
 			alloc_nid_failed(sbi, fi->i_xattr_nid);
 			fi->i_xattr_nid = 0;
-			mutex_unlock_op(sbi, NODE_NEW);
-			return PTR_ERR(page);
+			error = PTR_ERR(page);
+			goto exit;
 		}
 
 		alloc_nid_done(sbi, fi->i_xattr_nid);
@@ -349,8 +352,8 @@
 		/* The inode already has an extended attribute block. */
 		page = get_node_page(sbi, fi->i_xattr_nid);
 		if (IS_ERR(page)) {
-			mutex_unlock_op(sbi, NODE_NEW);
-			return PTR_ERR(page);
+			error = PTR_ERR(page);
+			goto exit;
 		}
 
 		base_addr = page_address(page);
@@ -432,12 +435,13 @@
 		inode->i_ctime = CURRENT_TIME;
 		clear_inode_flag(fi, FI_ACL_MODE);
 	}
-	f2fs_write_inode(inode, NULL);
-	mutex_unlock_op(sbi, NODE_NEW);
+	update_inode_page(inode);
+	mutex_unlock_op(sbi, ilock);
 
 	return 0;
 cleanup:
 	f2fs_put_page(page, 1);
-	mutex_unlock_op(sbi, NODE_NEW);
+exit:
+	mutex_unlock_op(sbi, ilock);
 	return error;
 }
diff --git a/fs/fat/inode.c b/fs/fat/inode.c
index 4ff9016..dfce656 100644
--- a/fs/fat/inode.c
+++ b/fs/fat/inode.c
@@ -19,6 +19,7 @@
 #include <linux/mpage.h>
 #include <linux/buffer_head.h>
 #include <linux/mount.h>
+#include <linux/aio.h>
 #include <linux/vfs.h>
 #include <linux/parser.h>
 #include <linux/uio.h>
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index 798d445..3be5718 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -22,7 +22,6 @@
 #include <linux/mm.h>
 #include <linux/pagemap.h>
 #include <linux/kthread.h>
-#include <linux/freezer.h>
 #include <linux/writeback.h>
 #include <linux/blkdev.h>
 #include <linux/backing-dev.h>
@@ -88,20 +87,6 @@
 #define CREATE_TRACE_POINTS
 #include <trace/events/writeback.h>
 
-/* Wakeup flusher thread or forker thread to fork it. Requires bdi->wb_lock. */
-static void bdi_wakeup_flusher(struct backing_dev_info *bdi)
-{
-	if (bdi->wb.task) {
-		wake_up_process(bdi->wb.task);
-	} else {
-		/*
-		 * The bdi thread isn't there, wake up the forker thread which
-		 * will create and run it.
-		 */
-		wake_up_process(default_backing_dev_info.wb.task);
-	}
-}
-
 static void bdi_queue_work(struct backing_dev_info *bdi,
 			   struct wb_writeback_work *work)
 {
@@ -109,10 +94,9 @@
 
 	spin_lock_bh(&bdi->wb_lock);
 	list_add_tail(&work->list, &bdi->work_list);
-	if (!bdi->wb.task)
-		trace_writeback_nothread(bdi, work);
-	bdi_wakeup_flusher(bdi);
 	spin_unlock_bh(&bdi->wb_lock);
+
+	mod_delayed_work(bdi_wq, &bdi->wb.dwork, 0);
 }
 
 static void
@@ -127,10 +111,8 @@
 	 */
 	work = kzalloc(sizeof(*work), GFP_ATOMIC);
 	if (!work) {
-		if (bdi->wb.task) {
-			trace_writeback_nowork(bdi);
-			wake_up_process(bdi->wb.task);
-		}
+		trace_writeback_nowork(bdi);
+		mod_delayed_work(bdi_wq, &bdi->wb.dwork, 0);
 		return;
 	}
 
@@ -177,9 +159,7 @@
 	 * writeback as soon as there is no other work to do.
 	 */
 	trace_writeback_wake_background(bdi);
-	spin_lock_bh(&bdi->wb_lock);
-	bdi_wakeup_flusher(bdi);
-	spin_unlock_bh(&bdi->wb_lock);
+	mod_delayed_work(bdi_wq, &bdi->wb.dwork, 0);
 }
 
 /*
@@ -1020,67 +1000,49 @@
 
 /*
  * Handle writeback of dirty data for the device backed by this bdi. Also
- * wakes up periodically and does kupdated style flushing.
+ * reschedules periodically and does kupdated style flushing.
  */
-int bdi_writeback_thread(void *data)
+void bdi_writeback_workfn(struct work_struct *work)
 {
-	struct bdi_writeback *wb = data;
+	struct bdi_writeback *wb = container_of(to_delayed_work(work),
+						struct bdi_writeback, dwork);
 	struct backing_dev_info *bdi = wb->bdi;
 	long pages_written;
 
 	set_worker_desc("flush-%s", dev_name(bdi->dev));
 	current->flags |= PF_SWAPWRITE;
-	set_freezable();
-	wb->last_active = jiffies;
 
-	/*
-	 * Our parent may run at a different priority, just set us to normal
-	 */
-	set_user_nice(current, 0);
-
-	trace_writeback_thread_start(bdi);
-
-	while (!kthread_freezable_should_stop(NULL)) {
+	if (likely(!current_is_workqueue_rescuer() ||
+		   list_empty(&bdi->bdi_list))) {
 		/*
-		 * Remove own delayed wake-up timer, since we are already awake
-		 * and we'll take care of the periodic write-back.
+		 * The normal path.  Keep writing back @bdi until its
+		 * work_list is empty.  Note that this path is also taken
+		 * if @bdi is shutting down even when we're running off the
+		 * rescuer as work_list needs to be drained.
 		 */
-		del_timer(&wb->wakeup_timer);
-
-		pages_written = wb_do_writeback(wb, 0);
-
+		do {
+			pages_written = wb_do_writeback(wb, 0);
+			trace_writeback_pages_written(pages_written);
+		} while (!list_empty(&bdi->work_list));
+	} else {
+		/*
+		 * bdi_wq can't get enough workers and we're running off
+		 * the emergency worker.  Don't hog it.  Hopefully, 1024 is
+		 * enough for efficient IO.
+		 */
+		pages_written = writeback_inodes_wb(&bdi->wb, 1024,
+						    WB_REASON_FORKER_THREAD);
 		trace_writeback_pages_written(pages_written);
-
-		if (pages_written)
-			wb->last_active = jiffies;
-
-		set_current_state(TASK_INTERRUPTIBLE);
-		if (!list_empty(&bdi->work_list) || kthread_should_stop()) {
-			__set_current_state(TASK_RUNNING);
-			continue;
-		}
-
-		if (wb_has_dirty_io(wb) && dirty_writeback_interval)
-			schedule_timeout(msecs_to_jiffies(dirty_writeback_interval * 10));
-		else {
-			/*
-			 * We have nothing to do, so can go sleep without any
-			 * timeout and save power. When a work is queued or
-			 * something is made dirty - we will be woken up.
-			 */
-			schedule();
-		}
 	}
 
-	/* Flush any work that raced with us exiting */
-	if (!list_empty(&bdi->work_list))
-		wb_do_writeback(wb, 1);
+	if (!list_empty(&bdi->work_list) ||
+	    (wb_has_dirty_io(wb) && dirty_writeback_interval))
+		queue_delayed_work(bdi_wq, &wb->dwork,
+			msecs_to_jiffies(dirty_writeback_interval * 10));
 
-	trace_writeback_thread_stop(bdi);
-	return 0;
+	current->flags &= ~PF_SWAPWRITE;
 }
 
-
 /*
  * Start writeback of `nr_pages' pages.  If `nr_pages' is zero, write back
  * the whole world.
diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c
index b3aaf7b..aef34b1 100644
--- a/fs/fuse/cuse.c
+++ b/fs/fuse/cuse.c
@@ -38,6 +38,7 @@
 #include <linux/device.h>
 #include <linux/file.h>
 #include <linux/fs.h>
+#include <linux/aio.h>
 #include <linux/kdev_t.h>
 #include <linux/kthread.h>
 #include <linux/list.h>
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index a6c1664..1d55f94 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -19,6 +19,7 @@
 #include <linux/pipe_fs_i.h>
 #include <linux/swap.h>
 #include <linux/splice.h>
+#include <linux/aio.h>
 
 MODULE_ALIAS_MISCDEV(FUSE_MINOR);
 MODULE_ALIAS("devname:fuse");
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index 4655e59..d1c9b85 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -15,6 +15,7 @@
 #include <linux/module.h>
 #include <linux/compat.h>
 #include <linux/swap.h>
+#include <linux/aio.h>
 
 static const struct file_operations fuse_direct_io_file_operations;
 
diff --git a/fs/gfs2/aops.c b/fs/gfs2/aops.c
index 9883694..0bad69e 100644
--- a/fs/gfs2/aops.c
+++ b/fs/gfs2/aops.c
@@ -20,6 +20,7 @@
 #include <linux/swap.h>
 #include <linux/gfs2_ondisk.h>
 #include <linux/backing-dev.h>
+#include <linux/aio.h>
 
 #include "gfs2.h"
 #include "incore.h"
diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c
index d79c2da..acd1676 100644
--- a/fs/gfs2/file.c
+++ b/fs/gfs2/file.c
@@ -25,6 +25,7 @@
 #include <asm/uaccess.h>
 #include <linux/dlm.h>
 #include <linux/dlm_plock.h>
+#include <linux/aio.h>
 
 #include "gfs2.h"
 #include "incore.h"
diff --git a/fs/gfs2/lops.c b/fs/gfs2/lops.c
index 7318abf..c5fa758 100644
--- a/fs/gfs2/lops.c
+++ b/fs/gfs2/lops.c
@@ -300,7 +300,7 @@
 	u64 nblk;
 
 	if (bio) {
-		nblk = bio->bi_sector + bio_sectors(bio);
+		nblk = bio_end_sector(bio);
 		nblk >>= sdp->sd_fsb2bb_shift;
 		if (blkno == nblk)
 			return bio;
diff --git a/fs/hfs/dir.c b/fs/hfs/dir.c
index 17c22a8..e0101b6 100644
--- a/fs/hfs/dir.c
+++ b/fs/hfs/dir.c
@@ -176,7 +176,9 @@
 {
 	struct hfs_readdir_data *rd = file->private_data;
 	if (rd) {
+		mutex_lock(&inode->i_mutex);
 		list_del(&rd->list);
+		mutex_unlock(&inode->i_mutex);
 		kfree(rd);
 	}
 	return 0;
diff --git a/fs/hfs/inode.c b/fs/hfs/inode.c
index 716e1aa..f9299d8 100644
--- a/fs/hfs/inode.c
+++ b/fs/hfs/inode.c
@@ -14,6 +14,7 @@
 #include <linux/pagemap.h>
 #include <linux/mpage.h>
 #include <linux/sched.h>
+#include <linux/aio.h>
 
 #include "hfs_fs.h"
 #include "btree.h"
diff --git a/fs/hfsplus/inode.c b/fs/hfsplus/inode.c
index 7faaa96..f833d35 100644
--- a/fs/hfsplus/inode.c
+++ b/fs/hfsplus/inode.c
@@ -14,6 +14,7 @@
 #include <linux/pagemap.h>
 #include <linux/mpage.h>
 #include <linux/sched.h>
+#include <linux/aio.h>
 
 #include "hfsplus_fs.h"
 #include "hfsplus_raw.h"
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index 523464e..a3f868a 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -909,11 +909,8 @@
 
 static int get_hstate_idx(int page_size_log)
 {
-	struct hstate *h;
+	struct hstate *h = hstate_sizelog(page_size_log);
 
-	if (!page_size_log)
-		return default_hstate_idx;
-	h = size_to_hstate(1 << page_size_log);
 	if (!h)
 		return -1;
 	return h - hstates;
@@ -929,9 +926,12 @@
 	.d_dname = hugetlb_dname
 };
 
-struct file *hugetlb_file_setup(const char *name, unsigned long addr,
-				size_t size, vm_flags_t acctflag,
-				struct user_struct **user,
+/*
+ * Note that size should be aligned to proper hugepage size in caller side,
+ * otherwise hugetlb_reserve_pages reserves one less hugepages than intended.
+ */
+struct file *hugetlb_file_setup(const char *name, size_t size,
+				vm_flags_t acctflag, struct user_struct **user,
 				int creat_flags, int page_size_log)
 {
 	struct file *file = ERR_PTR(-ENOMEM);
@@ -939,8 +939,6 @@
 	struct path path;
 	struct super_block *sb;
 	struct qstr quick_string;
-	struct hstate *hstate;
-	unsigned long num_pages;
 	int hstate_idx;
 
 	hstate_idx = get_hstate_idx(page_size_log);
@@ -980,12 +978,10 @@
 	if (!inode)
 		goto out_dentry;
 
-	hstate = hstate_inode(inode);
-	size += addr & ~huge_page_mask(hstate);
-	num_pages = ALIGN(size, huge_page_size(hstate)) >>
-			huge_page_shift(hstate);
 	file = ERR_PTR(-ENOMEM);
-	if (hugetlb_reserve_pages(inode, 0, num_pages, NULL, acctflag))
+	if (hugetlb_reserve_pages(inode, 0,
+			size >> huge_page_shift(hstate_inode(inode)), NULL,
+			acctflag))
 		goto out_inode;
 
 	d_instantiate(path.dentry, inode);
diff --git a/fs/jfs/inode.c b/fs/jfs/inode.c
index 77554b6..730f24e 100644
--- a/fs/jfs/inode.c
+++ b/fs/jfs/inode.c
@@ -23,6 +23,7 @@
 #include <linux/pagemap.h>
 #include <linux/quotaops.h>
 #include <linux/writeback.h>
+#include <linux/aio.h>
 #include "jfs_incore.h"
 #include "jfs_inode.h"
 #include "jfs_filsys.h"
diff --git a/fs/jfs/jfs_logmgr.c b/fs/jfs/jfs_logmgr.c
index cbe48ea..c57499d 100644
--- a/fs/jfs/jfs_logmgr.c
+++ b/fs/jfs/jfs_logmgr.c
@@ -2005,7 +2005,6 @@
 	bio->bi_io_vec[0].bv_offset = bp->l_offset;
 
 	bio->bi_vcnt = 1;
-	bio->bi_idx = 0;
 	bio->bi_size = LOGPSIZE;
 
 	bio->bi_end_io = lbmIODone;
@@ -2146,7 +2145,6 @@
 	bio->bi_io_vec[0].bv_offset = bp->l_offset;
 
 	bio->bi_vcnt = 1;
-	bio->bi_idx = 0;
 	bio->bi_size = LOGPSIZE;
 
 	bio->bi_end_io = lbmIODone;
diff --git a/fs/logfs/dev_bdev.c b/fs/logfs/dev_bdev.c
index e784a21..550475c 100644
--- a/fs/logfs/dev_bdev.c
+++ b/fs/logfs/dev_bdev.c
@@ -32,7 +32,6 @@
 	bio_vec.bv_len = PAGE_SIZE;
 	bio_vec.bv_offset = 0;
 	bio.bi_vcnt = 1;
-	bio.bi_idx = 0;
 	bio.bi_size = PAGE_SIZE;
 	bio.bi_bdev = bdev;
 	bio.bi_sector = page->index * (PAGE_SIZE >> 9);
@@ -108,7 +107,6 @@
 		if (i >= max_pages) {
 			/* Block layer cannot split bios :( */
 			bio->bi_vcnt = i;
-			bio->bi_idx = 0;
 			bio->bi_size = i * PAGE_SIZE;
 			bio->bi_bdev = super->s_bdev;
 			bio->bi_sector = ofs >> 9;
@@ -136,7 +134,6 @@
 		unlock_page(page);
 	}
 	bio->bi_vcnt = nr_pages;
-	bio->bi_idx = 0;
 	bio->bi_size = nr_pages * PAGE_SIZE;
 	bio->bi_bdev = super->s_bdev;
 	bio->bi_sector = ofs >> 9;
@@ -202,7 +199,6 @@
 		if (i >= max_pages) {
 			/* Block layer cannot split bios :( */
 			bio->bi_vcnt = i;
-			bio->bi_idx = 0;
 			bio->bi_size = i * PAGE_SIZE;
 			bio->bi_bdev = super->s_bdev;
 			bio->bi_sector = ofs >> 9;
@@ -224,7 +220,6 @@
 		bio->bi_io_vec[i].bv_offset = 0;
 	}
 	bio->bi_vcnt = nr_pages;
-	bio->bi_idx = 0;
 	bio->bi_size = nr_pages * PAGE_SIZE;
 	bio->bi_bdev = super->s_bdev;
 	bio->bi_sector = ofs >> 9;
diff --git a/fs/namei.c b/fs/namei.c
index 57ae9c8..85e40d1 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -2740,7 +2740,7 @@
 		if (error)
 			return error;
 
-		audit_inode(name, dir, 0);
+		audit_inode(name, dir, LOOKUP_PARENT);
 		error = -EISDIR;
 		/* trailing slashes? */
 		if (nd->last.name[nd->last.len])
diff --git a/fs/nfs/blocklayout/blocklayout.h b/fs/nfs/blocklayout/blocklayout.h
index f4891bd..8485978 100644
--- a/fs/nfs/blocklayout/blocklayout.h
+++ b/fs/nfs/blocklayout/blocklayout.h
@@ -173,7 +173,7 @@
 /* blocklayoutdev.c */
 ssize_t bl_pipe_downcall(struct file *, const char __user *, size_t);
 void bl_pipe_destroy_msg(struct rpc_pipe_msg *);
-int nfs4_blkdev_put(struct block_device *bdev);
+void nfs4_blkdev_put(struct block_device *bdev);
 struct pnfs_block_dev *nfs4_blk_decode_device(struct nfs_server *server,
 						struct pnfs_device *dev);
 int nfs4_blk_process_layoutget(struct pnfs_layout_hdr *lo,
diff --git a/fs/nfs/blocklayout/blocklayoutdev.c b/fs/nfs/blocklayout/blocklayoutdev.c
index a86c5bd..04303b5 100644
--- a/fs/nfs/blocklayout/blocklayoutdev.c
+++ b/fs/nfs/blocklayout/blocklayoutdev.c
@@ -56,11 +56,11 @@
 /*
  * Release the block device
  */
-int nfs4_blkdev_put(struct block_device *bdev)
+void nfs4_blkdev_put(struct block_device *bdev)
 {
 	dprintk("%s for device %d:%d\n", __func__, MAJOR(bdev->bd_dev),
 			MINOR(bdev->bd_dev));
-	return blkdev_put(bdev, FMODE_READ);
+	blkdev_put(bdev, FMODE_READ);
 }
 
 ssize_t bl_pipe_downcall(struct file *filp, const char __user *src,
diff --git a/fs/nfs/blocklayout/blocklayoutdm.c b/fs/nfs/blocklayout/blocklayoutdm.c
index 6fc7b5c..8999cfd 100644
--- a/fs/nfs/blocklayout/blocklayoutdm.c
+++ b/fs/nfs/blocklayout/blocklayoutdm.c
@@ -88,14 +88,8 @@
  */
 static void nfs4_blk_metadev_release(struct pnfs_block_dev *bdev)
 {
-	int rv;
-
 	dprintk("%s Releasing\n", __func__);
-	rv = nfs4_blkdev_put(bdev->bm_mdev);
-	if (rv)
-		printk(KERN_ERR "NFS: %s nfs4_blkdev_put returns %d\n",
-				__func__, rv);
-
+	nfs4_blkdev_put(bdev->bm_mdev);
 	dev_remove(bdev->net, bdev->bm_mdev->bd_dev);
 }
 
diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h
index 553a83c..a1dd768 100644
--- a/fs/nfs/nfs4_fs.h
+++ b/fs/nfs/nfs4_fs.h
@@ -47,6 +47,8 @@
 			const nfs4_stateid *);
 	int	(*find_root_sec)(struct nfs_server *, struct nfs_fh *,
 			struct nfs_fsinfo *);
+	int	(*free_lock_state)(struct nfs_server *,
+			struct nfs4_lock_state *);
 	const struct nfs4_state_recovery_ops *reboot_recovery_ops;
 	const struct nfs4_state_recovery_ops *nograce_recovery_ops;
 	const struct nfs4_state_maintenance_ops *state_renewal_ops;
@@ -234,7 +236,6 @@
 extern struct rpc_clnt *nfs4_proc_lookup_mountpoint(struct inode *, struct qstr *,
 			    struct nfs_fh *, struct nfs_fattr *);
 extern int nfs4_proc_secinfo(struct inode *, const struct qstr *, struct nfs4_secinfo_flavors *);
-extern int nfs4_release_lockowner(struct nfs4_lock_state *);
 extern const struct xattr_handler *nfs4_xattr_handlers[];
 extern int nfs4_set_rw_stateid(nfs4_stateid *stateid,
 		const struct nfs_open_context *ctx,
diff --git a/fs/nfs/nfs4filelayout.h b/fs/nfs/nfs4filelayout.h
index b8da955..235ff95 100644
--- a/fs/nfs/nfs4filelayout.h
+++ b/fs/nfs/nfs4filelayout.h
@@ -70,6 +70,8 @@
 	struct list_head	ds_addrs;
 	struct nfs_client	*ds_clp;
 	atomic_t		ds_count;
+	unsigned long		ds_state;
+#define NFS4DS_CONNECTING	0	/* ds is establishing connection */
 };
 
 struct nfs4_file_layout_dsaddr {
diff --git a/fs/nfs/nfs4filelayoutdev.c b/fs/nfs/nfs4filelayoutdev.c
index 1fe284f..661a0f6 100644
--- a/fs/nfs/nfs4filelayoutdev.c
+++ b/fs/nfs/nfs4filelayoutdev.c
@@ -775,6 +775,22 @@
 	return flseg->fh_array[i];
 }
 
+static void nfs4_wait_ds_connect(struct nfs4_pnfs_ds *ds)
+{
+	might_sleep();
+	wait_on_bit(&ds->ds_state, NFS4DS_CONNECTING,
+			nfs_wait_bit_killable, TASK_KILLABLE);
+}
+
+static void nfs4_clear_ds_conn_bit(struct nfs4_pnfs_ds *ds)
+{
+	smp_mb__before_clear_bit();
+	clear_bit(NFS4DS_CONNECTING, &ds->ds_state);
+	smp_mb__after_clear_bit();
+	wake_up_bit(&ds->ds_state, NFS4DS_CONNECTING);
+}
+
+
 struct nfs4_pnfs_ds *
 nfs4_fl_prepare_ds(struct pnfs_layout_segment *lseg, u32 ds_idx)
 {
@@ -791,16 +807,22 @@
 		filelayout_mark_devid_invalid(devid);
 		return NULL;
 	}
+	if (ds->ds_clp)
+		return ds;
 
-	if (!ds->ds_clp) {
+	if (test_and_set_bit(NFS4DS_CONNECTING, &ds->ds_state) == 0) {
 		struct nfs_server *s = NFS_SERVER(lseg->pls_layout->plh_inode);
 		int err;
 
 		err = nfs4_ds_connect(s, ds);
 		if (err) {
 			nfs4_mark_deviceid_unavailable(devid);
-			return NULL;
+			ds = NULL;
 		}
+		nfs4_clear_ds_conn_bit(ds);
+	} else {
+		/* Either ds is connected, or ds is NULL */
+		nfs4_wait_ds_connect(ds);
 	}
 	return ds;
 }
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 9da4bd5..8fbc100 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -4766,9 +4766,9 @@
 	if (status != 0)
 		goto out;
 	/* Is this a delegated lock? */
-	if (test_bit(NFS_DELEGATED_STATE, &state->flags))
-		goto out;
 	lsp = request->fl_u.nfs4_fl.owner;
+	if (test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags) == 0)
+		goto out;
 	seqid = nfs_alloc_seqid(&lsp->ls_seqid, GFP_KERNEL);
 	status = -ENOMEM;
 	if (seqid == NULL)
@@ -5238,9 +5238,8 @@
 	.rpc_release = nfs4_release_lockowner_release,
 };
 
-int nfs4_release_lockowner(struct nfs4_lock_state *lsp)
+static int nfs4_release_lockowner(struct nfs_server *server, struct nfs4_lock_state *lsp)
 {
-	struct nfs_server *server = lsp->ls_state->owner->so_server;
 	struct nfs_release_lockowner_data *data;
 	struct rpc_message msg = {
 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RELEASE_LOCKOWNER],
@@ -6783,26 +6782,76 @@
 	return err;
 }
 
-static int _nfs4_free_stateid(struct nfs_server *server, nfs4_stateid *stateid)
-{
-	struct nfs41_free_stateid_args args = {
-		.stateid = stateid,
-	};
+struct nfs_free_stateid_data {
+	struct nfs_server *server;
+	struct nfs41_free_stateid_args args;
 	struct nfs41_free_stateid_res res;
+};
+
+static void nfs41_free_stateid_prepare(struct rpc_task *task, void *calldata)
+{
+	struct nfs_free_stateid_data *data = calldata;
+	nfs41_setup_sequence(nfs4_get_session(data->server),
+			&data->args.seq_args,
+			&data->res.seq_res,
+			task);
+}
+
+static void nfs41_free_stateid_done(struct rpc_task *task, void *calldata)
+{
+	struct nfs_free_stateid_data *data = calldata;
+
+	nfs41_sequence_done(task, &data->res.seq_res);
+
+	switch (task->tk_status) {
+	case -NFS4ERR_DELAY:
+		if (nfs4_async_handle_error(task, data->server, NULL) == -EAGAIN)
+			rpc_restart_call_prepare(task);
+	}
+}
+
+static void nfs41_free_stateid_release(void *calldata)
+{
+	kfree(calldata);
+}
+
+const struct rpc_call_ops nfs41_free_stateid_ops = {
+	.rpc_call_prepare = nfs41_free_stateid_prepare,
+	.rpc_call_done = nfs41_free_stateid_done,
+	.rpc_release = nfs41_free_stateid_release,
+};
+
+static struct rpc_task *_nfs41_free_stateid(struct nfs_server *server,
+		nfs4_stateid *stateid,
+		bool privileged)
+{
 	struct rpc_message msg = {
 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FREE_STATEID],
-		.rpc_argp = &args,
-		.rpc_resp = &res,
 	};
-	int status;
+	struct rpc_task_setup task_setup = {
+		.rpc_client = server->client,
+		.rpc_message = &msg,
+		.callback_ops = &nfs41_free_stateid_ops,
+		.flags = RPC_TASK_ASYNC,
+	};
+	struct nfs_free_stateid_data *data;
 
 	dprintk("NFS call  free_stateid %p\n", stateid);
-	nfs41_init_sequence(&args.seq_args, &res.seq_res, 0);
-	nfs4_set_sequence_privileged(&args.seq_args);
-	status = nfs4_call_sync_sequence(server->client, server, &msg,
-			&args.seq_args, &res.seq_res);
-	dprintk("NFS reply free_stateid: %d\n", status);
-	return status;
+	data = kmalloc(sizeof(*data), GFP_NOFS);
+	if (!data)
+		return ERR_PTR(-ENOMEM);
+	data->server = server;
+	nfs4_stateid_copy(&data->args.stateid, stateid);
+
+	task_setup.callback_data = data;
+
+	msg.rpc_argp = &data->args;
+	msg.rpc_resp = &data->res;
+	nfs41_init_sequence(&data->args.seq_args, &data->res.seq_res, 0);
+	if (privileged)
+		nfs4_set_sequence_privileged(&data->args.seq_args);
+
+	return rpc_run_task(&task_setup);
 }
 
 /**
@@ -6816,15 +6865,29 @@
  */
 static int nfs41_free_stateid(struct nfs_server *server, nfs4_stateid *stateid)
 {
-	struct nfs4_exception exception = { };
-	int err;
-	do {
-		err = _nfs4_free_stateid(server, stateid);
-		if (err != -NFS4ERR_DELAY)
-			break;
-		nfs4_handle_exception(server, err, &exception);
-	} while (exception.retry);
-	return err;
+	struct rpc_task *task;
+	int ret;
+
+	task = _nfs41_free_stateid(server, stateid, true);
+	if (IS_ERR(task))
+		return PTR_ERR(task);
+	ret = rpc_wait_for_completion_task(task);
+	if (!ret)
+		ret = task->tk_status;
+	rpc_put_task(task);
+	return ret;
+}
+
+static int nfs41_free_lock_state(struct nfs_server *server, struct nfs4_lock_state *lsp)
+{
+	struct rpc_task *task;
+
+	task = _nfs41_free_stateid(server, &lsp->ls_stateid, false);
+	nfs4_free_lock_state(server, lsp);
+	if (IS_ERR(task))
+		return PTR_ERR(task);
+	rpc_put_task(task);
+	return 0;
 }
 
 static bool nfs41_match_stateid(const nfs4_stateid *s1,
@@ -6916,6 +6979,7 @@
 	.call_sync = _nfs4_call_sync,
 	.match_stateid = nfs4_match_stateid,
 	.find_root_sec = nfs4_find_root_sec,
+	.free_lock_state = nfs4_release_lockowner,
 	.reboot_recovery_ops = &nfs40_reboot_recovery_ops,
 	.nograce_recovery_ops = &nfs40_nograce_recovery_ops,
 	.state_renewal_ops = &nfs40_state_renewal_ops,
@@ -6933,6 +6997,7 @@
 	.call_sync = nfs4_call_sync_sequence,
 	.match_stateid = nfs41_match_stateid,
 	.find_root_sec = nfs41_find_root_sec,
+	.free_lock_state = nfs41_free_lock_state,
 	.reboot_recovery_ops = &nfs41_reboot_recovery_ops,
 	.nograce_recovery_ops = &nfs41_nograce_recovery_ops,
 	.state_renewal_ops = &nfs41_state_renewal_ops,
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index 0b32f94..300d17d 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -921,6 +921,7 @@
  */
 void nfs4_put_lock_state(struct nfs4_lock_state *lsp)
 {
+	struct nfs_server *server;
 	struct nfs4_state *state;
 
 	if (lsp == NULL)
@@ -932,11 +933,13 @@
 	if (list_empty(&state->lock_states))
 		clear_bit(LK_STATE_IN_USE, &state->flags);
 	spin_unlock(&state->state_lock);
+	server = state->owner->so_server;
 	if (test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags)) {
-		if (nfs4_release_lockowner(lsp) == 0)
-			return;
-	}
-	nfs4_free_lock_state(lsp->ls_state->owner->so_server, lsp);
+		struct nfs_client *clp = server->nfs_client;
+
+		clp->cl_mvops->free_lock_state(server, lsp);
+	} else
+		nfs4_free_lock_state(server, lsp);
 }
 
 static void nfs4_fl_copy_lock(struct file_lock *dst, struct file_lock *src)
diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
index 3c79c58..4be8d13 100644
--- a/fs/nfs/nfs4xdr.c
+++ b/fs/nfs/nfs4xdr.c
@@ -2003,7 +2003,7 @@
 				struct compound_hdr *hdr)
 {
 	encode_op_hdr(xdr, OP_FREE_STATEID, decode_free_stateid_maxsz, hdr);
-	encode_nfs4_stateid(xdr, args->stateid);
+	encode_nfs4_stateid(xdr, &args->stateid);
 }
 #endif /* CONFIG_NFS_V4_1 */
 
diff --git a/fs/nfs/super.c b/fs/nfs/super.c
index 1bb071d..a366107 100644
--- a/fs/nfs/super.c
+++ b/fs/nfs/super.c
@@ -1610,16 +1610,15 @@
 /*
  * Select a security flavor for this mount.  The selected flavor
  * is planted in args->auth_flavors[0].
+ *
+ * Returns 0 on success, -EACCES on failure.
  */
-static void nfs_select_flavor(struct nfs_parsed_mount_data *args,
+static int nfs_select_flavor(struct nfs_parsed_mount_data *args,
 			      struct nfs_mount_request *request)
 {
 	unsigned int i, count = *(request->auth_flav_len);
 	rpc_authflavor_t flavor;
 
-	if (args->auth_flavors[0] != RPC_AUTH_MAXFLAVOR)
-		goto out;
-
 	/*
 	 * The NFSv2 MNT operation does not return a flavor list.
 	 */
@@ -1634,6 +1633,25 @@
 		goto out_default;
 
 	/*
+	 * If the sec= mount option is used, the specified flavor or AUTH_NULL
+	 * must be in the list returned by the server.
+	 *
+	 * AUTH_NULL has a special meaning when it's in the server list - it
+	 * means that the server will ignore the rpc creds, so any flavor
+	 * can be used.
+	 */
+	if (args->auth_flavors[0] != RPC_AUTH_MAXFLAVOR) {
+		for (i = 0; i < count; i++) {
+			if (args->auth_flavors[0] == request->auth_flavs[i] ||
+			    request->auth_flavs[i] == RPC_AUTH_NULL)
+				goto out;
+		}
+		dfprintk(MOUNT, "NFS: auth flavor %d not supported by server\n",
+			args->auth_flavors[0]);
+		goto out_err;
+	}
+
+	/*
 	 * RFC 2623, section 2.7 suggests we SHOULD prefer the
 	 * flavor listed first.  However, some servers list
 	 * AUTH_NULL first.  Avoid ever choosing AUTH_NULL.
@@ -1653,12 +1671,29 @@
 		}
 	}
 
+	/*
+	 * As a last chance, see if the server list contains AUTH_NULL -
+	 * if it does, use the default flavor.
+	 */
+	for (i = 0; i < count; i++) {
+		if (request->auth_flavs[i] == RPC_AUTH_NULL)
+			goto out_default;
+	}
+
+	dfprintk(MOUNT, "NFS: no auth flavors in common with server\n");
+	goto out_err;
+
 out_default:
-	flavor = RPC_AUTH_UNIX;
+	/* use default if flavor not already set */
+	flavor = (args->auth_flavors[0] == RPC_AUTH_MAXFLAVOR) ?
+		RPC_AUTH_UNIX : args->auth_flavors[0];
 out_set:
 	args->auth_flavors[0] = flavor;
 out:
 	dfprintk(MOUNT, "NFS: using auth flavor %d\n", args->auth_flavors[0]);
+	return 0;
+out_err:
+	return -EACCES;
 }
 
 /*
@@ -1721,8 +1756,7 @@
 		return status;
 	}
 
-	nfs_select_flavor(args, &request);
-	return 0;
+	return nfs_select_flavor(args, &request);
 }
 
 struct dentry *nfs_try_mount(int flags, const char *dev_name,
diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
index 8ae5abf..27d74a2 100644
--- a/fs/nfsd/nfs4proc.c
+++ b/fs/nfsd/nfs4proc.c
@@ -279,6 +279,7 @@
 {
 	struct svc_fh *current_fh = &cstate->current_fh;
 	__be32 status;
+	int accmode = 0;
 
 	/* We don't know the target directory, and therefore can not
 	* set the change info
@@ -290,9 +291,19 @@
 
 	open->op_truncate = (open->op_iattr.ia_valid & ATTR_SIZE) &&
 		(open->op_iattr.ia_size == 0);
+	/*
+	 * In the delegation case, the client is telling us about an
+	 * open that it *already* performed locally, some time ago.  We
+	 * should let it succeed now if possible.
+	 *
+	 * In the case of a CLAIM_FH open, on the other hand, the client
+	 * may be counting on us to enforce permissions (the Linux 4.1
+	 * client uses this for normal opens, for example).
+	 */
+	if (open->op_claim_type == NFS4_OPEN_CLAIM_DELEG_CUR_FH)
+		accmode = NFSD_MAY_OWNER_OVERRIDE;
 
-	status = do_open_permission(rqstp, current_fh, open,
-				    NFSD_MAY_OWNER_OVERRIDE);
+	status = do_open_permission(rqstp, current_fh, open, accmode);
 
 	return status;
 }
diff --git a/fs/nfsd/nfs4recover.c b/fs/nfsd/nfs4recover.c
index 899ca26..4e9a21d 100644
--- a/fs/nfsd/nfs4recover.c
+++ b/fs/nfsd/nfs4recover.c
@@ -146,7 +146,7 @@
  * then disable recovery tracking.
  */
 static void
-legacy_recdir_name_error(int error)
+legacy_recdir_name_error(struct nfs4_client *clp, int error)
 {
 	printk(KERN_ERR "NFSD: unable to generate recoverydir "
 			"name (%d).\n", error);
@@ -159,9 +159,7 @@
 	if (error == -ENOENT) {
 		printk(KERN_ERR "NFSD: disabling legacy clientid tracking. "
 			"Reboot recovery will not function correctly!\n");
-
-		/* the argument is ignored by the legacy exit function */
-		nfsd4_client_tracking_exit(NULL);
+		nfsd4_client_tracking_exit(clp->net);
 	}
 }
 
@@ -184,7 +182,7 @@
 
 	status = nfs4_make_rec_clidname(dname, &clp->cl_name);
 	if (status)
-		return legacy_recdir_name_error(status);
+		return legacy_recdir_name_error(clp, status);
 
 	status = nfs4_save_creds(&original_cred);
 	if (status < 0)
@@ -341,7 +339,7 @@
 
 	status = nfs4_make_rec_clidname(dname, &clp->cl_name);
 	if (status)
-		return legacy_recdir_name_error(status);
+		return legacy_recdir_name_error(clp, status);
 
 	status = mnt_want_write_file(nn->rec_file);
 	if (status)
@@ -601,7 +599,7 @@
 
 	status = nfs4_make_rec_clidname(dname, &clp->cl_name);
 	if (status) {
-		legacy_recdir_name_error(status);
+		legacy_recdir_name_error(clp, status);
 		return status;
 	}
 
diff --git a/fs/nilfs2/inode.c b/fs/nilfs2/inode.c
index cf02f55..689fb60 100644
--- a/fs/nilfs2/inode.c
+++ b/fs/nilfs2/inode.c
@@ -25,7 +25,7 @@
 #include <linux/gfp.h>
 #include <linux/mpage.h>
 #include <linux/writeback.h>
-#include <linux/uio.h>
+#include <linux/aio.h>
 #include "nilfs.h"
 #include "btnode.h"
 #include "segment.h"
diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
index d0be29f..6c80083 100644
--- a/fs/notify/fanotify/fanotify_user.c
+++ b/fs/notify/fanotify/fanotify_user.c
@@ -13,6 +13,7 @@
 #include <linux/slab.h>
 #include <linux/types.h>
 #include <linux/uaccess.h>
+#include <linux/compat.h>
 
 #include <asm/ioctls.h>
 
@@ -857,6 +858,22 @@
 	return ret;
 }
 
+#ifdef CONFIG_COMPAT
+COMPAT_SYSCALL_DEFINE6(fanotify_mark,
+				int, fanotify_fd, unsigned int, flags,
+				__u32, mask0, __u32, mask1, int, dfd,
+				const char  __user *, pathname)
+{
+	return sys_fanotify_mark(fanotify_fd, flags,
+#ifdef __BIG_ENDIAN
+				((__u64)mask1 << 32) | mask0,
+#else
+				((__u64)mask0 << 32) | mask1,
+#endif
+				 dfd, pathname);
+}
+#endif
+
 /*
  * fanotify_user_setup - Our initialization function.  Note that we cannot return
  * error because we have compiled-in VFS hooks.  So an (unlikely) failure here
diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c
index 1da4b81..c5670b8 100644
--- a/fs/ntfs/file.c
+++ b/fs/ntfs/file.c
@@ -27,6 +27,7 @@
 #include <linux/swap.h>
 #include <linux/uio.h>
 #include <linux/writeback.h>
+#include <linux/aio.h>
 
 #include <asm/page.h>
 #include <asm/uaccess.h>
diff --git a/fs/ntfs/inode.c b/fs/ntfs/inode.c
index d3e118c..2778b02 100644
--- a/fs/ntfs/inode.c
+++ b/fs/ntfs/inode.c
@@ -28,6 +28,7 @@
 #include <linux/quotaops.h>
 #include <linux/slab.h>
 #include <linux/log2.h>
+#include <linux/aio.h>
 
 #include "aops.h"
 #include "attrib.h"
diff --git a/fs/ocfs2/aops.h b/fs/ocfs2/aops.h
index ffb2da3..f671e49 100644
--- a/fs/ocfs2/aops.h
+++ b/fs/ocfs2/aops.h
@@ -22,6 +22,8 @@
 #ifndef OCFS2_AOPS_H
 #define OCFS2_AOPS_H
 
+#include <linux/aio.h>
+
 handle_t *ocfs2_start_walk_page_trans(struct inode *inode,
 							 struct page *page,
 							 unsigned from,
diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c
index 12ae194..3a44a64 100644
--- a/fs/ocfs2/dlmglue.c
+++ b/fs/ocfs2/dlmglue.c
@@ -2322,7 +2322,7 @@
 	status = __ocfs2_cluster_lock(osb, lockres, level, dlm_flags,
 				      arg_flags, subclass, _RET_IP_);
 	if (status < 0) {
-		if (status != -EAGAIN && status != -EIOCBRETRY)
+		if (status != -EAGAIN)
 			mlog_errno(status);
 		goto bail;
 	}
diff --git a/fs/ocfs2/inode.h b/fs/ocfs2/inode.h
index 88924a3..621fc73 100644
--- a/fs/ocfs2/inode.h
+++ b/fs/ocfs2/inode.h
@@ -147,8 +147,6 @@
 int ocfs2_mark_inode_dirty(handle_t *handle,
 			   struct inode *inode,
 			   struct buffer_head *bh);
-int ocfs2_aio_read(struct file *file, struct kiocb *req, struct iocb *iocb);
-int ocfs2_aio_write(struct file *file, struct kiocb *req, struct iocb *iocb);
 struct buffer_head *ocfs2_bread(struct inode *inode,
 				int block, int *err, int reada);
 
diff --git a/fs/pipe.c b/fs/pipe.c
index a029a14..d2c45e1 100644
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -21,6 +21,7 @@
 #include <linux/audit.h>
 #include <linux/syscalls.h>
 #include <linux/fcntl.h>
+#include <linux/aio.h>
 
 #include <asm/uaccess.h>
 #include <asm/ioctls.h>
diff --git a/fs/pstore/ram.c b/fs/pstore/ram.c
index 32cbd7c..1376e5a 100644
--- a/fs/pstore/ram.c
+++ b/fs/pstore/ram.c
@@ -438,6 +438,7 @@
 		pr_err("memory size too small, minimum is %zu\n",
 			cxt->console_size + cxt->record_size +
 			cxt->ftrace_size);
+		err = -EINVAL;
 		goto fail_cnt;
 	}
 
@@ -455,6 +456,7 @@
 	spin_lock_init(&cxt->pstore.buf_lock);
 	if (!cxt->pstore.buf) {
 		pr_err("cannot allocate pstore buffer\n");
+		err = -ENOMEM;
 		goto fail_clear;
 	}
 
diff --git a/fs/read_write.c b/fs/read_write.c
index 90ba3b3..0343000 100644
--- a/fs/read_write.c
+++ b/fs/read_write.c
@@ -9,6 +9,7 @@
 #include <linux/fcntl.h>
 #include <linux/file.h>
 #include <linux/uio.h>
+#include <linux/aio.h>
 #include <linux/fsnotify.h>
 #include <linux/security.h>
 #include <linux/export.h>
@@ -329,16 +330,6 @@
 	return count > MAX_RW_COUNT ? MAX_RW_COUNT : count;
 }
 
-static void wait_on_retry_sync_kiocb(struct kiocb *iocb)
-{
-	set_current_state(TASK_UNINTERRUPTIBLE);
-	if (!kiocbIsKicked(iocb))
-		schedule();
-	else
-		kiocbClearKicked(iocb);
-	__set_current_state(TASK_RUNNING);
-}
-
 ssize_t do_sync_read(struct file *filp, char __user *buf, size_t len, loff_t *ppos)
 {
 	struct iovec iov = { .iov_base = buf, .iov_len = len };
@@ -350,13 +341,7 @@
 	kiocb.ki_left = len;
 	kiocb.ki_nbytes = len;
 
-	for (;;) {
-		ret = filp->f_op->aio_read(&kiocb, &iov, 1, kiocb.ki_pos);
-		if (ret != -EIOCBRETRY)
-			break;
-		wait_on_retry_sync_kiocb(&kiocb);
-	}
-
+	ret = filp->f_op->aio_read(&kiocb, &iov, 1, kiocb.ki_pos);
 	if (-EIOCBQUEUED == ret)
 		ret = wait_on_sync_kiocb(&kiocb);
 	*ppos = kiocb.ki_pos;
@@ -406,13 +391,7 @@
 	kiocb.ki_left = len;
 	kiocb.ki_nbytes = len;
 
-	for (;;) {
-		ret = filp->f_op->aio_write(&kiocb, &iov, 1, kiocb.ki_pos);
-		if (ret != -EIOCBRETRY)
-			break;
-		wait_on_retry_sync_kiocb(&kiocb);
-	}
-
+	ret = filp->f_op->aio_write(&kiocb, &iov, 1, kiocb.ki_pos);
 	if (-EIOCBQUEUED == ret)
 		ret = wait_on_sync_kiocb(&kiocb);
 	*ppos = kiocb.ki_pos;
@@ -592,13 +571,7 @@
 	kiocb.ki_left = len;
 	kiocb.ki_nbytes = len;
 
-	for (;;) {
-		ret = fn(&kiocb, iov, nr_segs, kiocb.ki_pos);
-		if (ret != -EIOCBRETRY)
-			break;
-		wait_on_retry_sync_kiocb(&kiocb);
-	}
-
+	ret = fn(&kiocb, iov, nr_segs, kiocb.ki_pos);
 	if (ret == -EIOCBQUEUED)
 		ret = wait_on_sync_kiocb(&kiocb);
 	*ppos = kiocb.ki_pos;
diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c
index ea5061fd..77d6d47 100644
--- a/fs/reiserfs/inode.c
+++ b/fs/reiserfs/inode.c
@@ -18,6 +18,7 @@
 #include <linux/writeback.h>
 #include <linux/quotaops.h>
 #include <linux/swap.h>
+#include <linux/aio.h>
 
 int reiserfs_commit_write(struct file *f, struct page *page,
 			  unsigned from, unsigned to);
diff --git a/fs/reiserfs/journal.c b/fs/reiserfs/journal.c
index afcadcc..742fdd4 100644
--- a/fs/reiserfs/journal.c
+++ b/fs/reiserfs/journal.c
@@ -97,7 +97,7 @@
 static int can_dirty(struct reiserfs_journal_cnode *cn);
 static int journal_join(struct reiserfs_transaction_handle *th,
 			struct super_block *sb, unsigned long nblocks);
-static int release_journal_dev(struct super_block *super,
+static void release_journal_dev(struct super_block *super,
 			       struct reiserfs_journal *journal);
 static int dirty_one_transaction(struct super_block *s,
 				 struct reiserfs_journal_list *jl);
@@ -2532,23 +2532,13 @@
 	SB_JOURNAL(sb)->j_current_jl = alloc_journal_list(sb);
 }
 
-static int release_journal_dev(struct super_block *super,
+static void release_journal_dev(struct super_block *super,
 			       struct reiserfs_journal *journal)
 {
-	int result;
-
-	result = 0;
-
 	if (journal->j_dev_bd != NULL) {
-		result = blkdev_put(journal->j_dev_bd, journal->j_dev_mode);
+		blkdev_put(journal->j_dev_bd, journal->j_dev_mode);
 		journal->j_dev_bd = NULL;
 	}
-
-	if (result != 0) {
-		reiserfs_warning(super, "sh-457",
-				 "Cannot release journal device: %i", result);
-	}
-	return result;
 }
 
 static int journal_init_dev(struct super_block *super,
diff --git a/fs/romfs/mmap-nommu.c b/fs/romfs/mmap-nommu.c
index e1a7779..f373bde 100644
--- a/fs/romfs/mmap-nommu.c
+++ b/fs/romfs/mmap-nommu.c
@@ -49,8 +49,11 @@
 		return (unsigned long) -EINVAL;
 
 	offset += ROMFS_I(inode)->i_dataoffset;
-	if (offset > mtd->size - len)
+	if (offset >= mtd->size)
 		return (unsigned long) -EINVAL;
+	/* the mapping mustn't extend beyond the EOF */
+	if ((offset + len) > mtd->size)
+		len = mtd->size - offset;
 
 	ret = mtd_get_unmapped_area(mtd, len, offset, flags);
 	if (ret == -EOPNOTSUPP)
diff --git a/fs/ubifs/file.c b/fs/ubifs/file.c
index f12189d..1437453 100644
--- a/fs/ubifs/file.c
+++ b/fs/ubifs/file.c
@@ -50,6 +50,7 @@
  */
 
 #include "ubifs.h"
+#include <linux/aio.h>
 #include <linux/mount.h>
 #include <linux/namei.h>
 #include <linux/slab.h>
diff --git a/fs/udf/inode.c b/fs/udf/inode.c
index 7a12e48..b6d15d3 100644
--- a/fs/udf/inode.c
+++ b/fs/udf/inode.c
@@ -38,6 +38,7 @@
 #include <linux/slab.h>
 #include <linux/crc-itu-t.h>
 #include <linux/mpage.h>
+#include <linux/aio.h>
 
 #include "udf_i.h"
 #include "udf_sb.h"
diff --git a/fs/xfs/Kconfig b/fs/xfs/Kconfig
index cc33aaf..399e8ce 100644
--- a/fs/xfs/Kconfig
+++ b/fs/xfs/Kconfig
@@ -69,6 +69,19 @@
 
 	  If unsure, say N.
 
+config XFS_WARN
+	bool "XFS Verbose Warnings"
+	depends on XFS_FS && !XFS_DEBUG
+	help
+	  Say Y here to get an XFS build with many additional warnings.
+	  It converts ASSERT checks to WARN, so will log any out-of-bounds
+	  conditions that occur that would otherwise be missed. It is much
+	  lighter weight than XFS_DEBUG and does not modify algorithms and will
+	  not cause the kernel to panic on non-fatal errors.
+
+	  However, similar to XFS_DEBUG, it is only advisable to use this if you
+	  are debugging a particular problem.
+
 config XFS_DEBUG
 	bool "XFS Debugging support"
 	depends on XFS_FS
diff --git a/fs/xfs/mrlock.h b/fs/xfs/mrlock.h
index ff6a198..e3c92d1 100644
--- a/fs/xfs/mrlock.h
+++ b/fs/xfs/mrlock.h
@@ -22,12 +22,12 @@
 
 typedef struct {
 	struct rw_semaphore	mr_lock;
-#ifdef DEBUG
+#if defined(DEBUG) || defined(XFS_WARN)
 	int			mr_writer;
 #endif
 } mrlock_t;
 
-#ifdef DEBUG
+#if defined(DEBUG) || defined(XFS_WARN)
 #define mrinit(mrp, name)	\
 	do { (mrp)->mr_writer = 0; init_rwsem(&(mrp)->mr_lock); } while (0)
 #else
@@ -46,7 +46,7 @@
 static inline void mrupdate_nested(mrlock_t *mrp, int subclass)
 {
 	down_write_nested(&mrp->mr_lock, subclass);
-#ifdef DEBUG
+#if defined(DEBUG) || defined(XFS_WARN)
 	mrp->mr_writer = 1;
 #endif
 }
@@ -60,7 +60,7 @@
 {
 	if (!down_write_trylock(&mrp->mr_lock))
 		return 0;
-#ifdef DEBUG
+#if defined(DEBUG) || defined(XFS_WARN)
 	mrp->mr_writer = 1;
 #endif
 	return 1;
@@ -68,7 +68,7 @@
 
 static inline void mrunlock_excl(mrlock_t *mrp)
 {
-#ifdef DEBUG
+#if defined(DEBUG) || defined(XFS_WARN)
 	mrp->mr_writer = 0;
 #endif
 	up_write(&mrp->mr_lock);
@@ -81,7 +81,7 @@
 
 static inline void mrdemote(mrlock_t *mrp)
 {
-#ifdef DEBUG
+#if defined(DEBUG) || defined(XFS_WARN)
 	mrp->mr_writer = 0;
 #endif
 	downgrade_write(&mrp->mr_lock);
diff --git a/fs/xfs/xfs.h b/fs/xfs/xfs.h
index d8b11b7..a742c47 100644
--- a/fs/xfs/xfs.h
+++ b/fs/xfs/xfs.h
@@ -24,6 +24,11 @@
 #define XFS_BUF_LOCK_TRACKING 1
 #endif
 
+#ifdef CONFIG_XFS_WARN
+#define XFS_WARN 1
+#endif
+
+
 #include "xfs_linux.h"
 
 #endif	/* __XFS_H__ */
diff --git a/fs/xfs/xfs_alloc_btree.c b/fs/xfs/xfs_alloc_btree.c
index 30c4c14..cafc902 100644
--- a/fs/xfs/xfs_alloc_btree.c
+++ b/fs/xfs/xfs_alloc_btree.c
@@ -386,7 +386,7 @@
 };
 
 
-#ifdef DEBUG
+#if defined(DEBUG) || defined(XFS_WARN)
 STATIC int
 xfs_allocbt_keys_inorder(
 	struct xfs_btree_cur	*cur,
@@ -442,7 +442,7 @@
 	.init_ptr_from_cur	= xfs_allocbt_init_ptr_from_cur,
 	.key_diff		= xfs_allocbt_key_diff,
 	.buf_ops		= &xfs_allocbt_buf_ops,
-#ifdef DEBUG
+#if defined(DEBUG) || defined(XFS_WARN)
 	.keys_inorder		= xfs_allocbt_keys_inorder,
 	.recs_inorder		= xfs_allocbt_recs_inorder,
 #endif
diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c
index 3244c98..2b2691b 100644
--- a/fs/xfs/xfs_aops.c
+++ b/fs/xfs/xfs_aops.c
@@ -31,6 +31,7 @@
 #include "xfs_vnodeops.h"
 #include "xfs_trace.h"
 #include "xfs_bmap.h"
+#include <linux/aio.h>
 #include <linux/gfp.h>
 #include <linux/mpage.h>
 #include <linux/pagevec.h>
diff --git a/fs/xfs/xfs_bmap_btree.c b/fs/xfs/xfs_bmap_btree.c
index 3a86c3f..0c61a22 100644
--- a/fs/xfs/xfs_bmap_btree.c
+++ b/fs/xfs/xfs_bmap_btree.c
@@ -813,7 +813,7 @@
 };
 
 
-#ifdef DEBUG
+#if defined(DEBUG) || defined(XFS_WARN)
 STATIC int
 xfs_bmbt_keys_inorder(
 	struct xfs_btree_cur	*cur,
@@ -853,7 +853,7 @@
 	.init_ptr_from_cur	= xfs_bmbt_init_ptr_from_cur,
 	.key_diff		= xfs_bmbt_key_diff,
 	.buf_ops		= &xfs_bmbt_buf_ops,
-#ifdef DEBUG
+#if defined(DEBUG) || defined(XFS_WARN)
 	.keys_inorder		= xfs_bmbt_keys_inorder,
 	.recs_inorder		= xfs_bmbt_recs_inorder,
 #endif
diff --git a/fs/xfs/xfs_btree.h b/fs/xfs/xfs_btree.h
index 6e6c915..55e3c7c 100644
--- a/fs/xfs/xfs_btree.h
+++ b/fs/xfs/xfs_btree.h
@@ -215,7 +215,7 @@
 
 	const struct xfs_buf_ops	*buf_ops;
 
-#ifdef DEBUG
+#if defined(DEBUG) || defined(XFS_WARN)
 	/* check that k1 is lower than k2 */
 	int	(*keys_inorder)(struct xfs_btree_cur *cur,
 				union xfs_btree_key *k1,
diff --git a/fs/xfs/xfs_dir2_node.c b/fs/xfs/xfs_dir2_node.c
index ecc6c66..5246de4 100644
--- a/fs/xfs/xfs_dir2_node.c
+++ b/fs/xfs/xfs_dir2_node.c
@@ -993,7 +993,7 @@
 	xfs_dir2_leaf_t		*leaf1;		/* first leaf structure */
 	xfs_dir2_leaf_t		*leaf2;		/* second leaf structure */
 	int			mid;		/* midpoint leaf index */
-#ifdef DEBUG
+#if defined(DEBUG) || defined(XFS_WARN)
 	int			oldstale;	/* old count of stale leaves */
 #endif
 	int			oldsum;		/* old total leaf count */
@@ -1022,7 +1022,7 @@
 	ents2 = xfs_dir3_leaf_ents_p(leaf2);
 
 	oldsum = hdr1.count + hdr2.count;
-#ifdef DEBUG
+#if defined(DEBUG) || defined(XFS_WARN)
 	oldstale = hdr1.stale + hdr2.stale;
 #endif
 	mid = oldsum >> 1;
diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c
index 054d60c..a5f2042 100644
--- a/fs/xfs/xfs_file.c
+++ b/fs/xfs/xfs_file.c
@@ -36,6 +36,7 @@
 #include "xfs_ioctl.h"
 #include "xfs_trace.h"
 
+#include <linux/aio.h>
 #include <linux/dcache.h>
 #include <linux/falloc.h>
 #include <linux/pagevec.h>
diff --git a/fs/xfs/xfs_ialloc_btree.c b/fs/xfs/xfs_ialloc_btree.c
index c82ac88..5448eb6 100644
--- a/fs/xfs/xfs_ialloc_btree.c
+++ b/fs/xfs/xfs_ialloc_btree.c
@@ -272,7 +272,7 @@
 	.verify_write = xfs_inobt_write_verify,
 };
 
-#ifdef DEBUG
+#if defined(DEBUG) || defined(XFS_WARN)
 STATIC int
 xfs_inobt_keys_inorder(
 	struct xfs_btree_cur	*cur,
@@ -310,7 +310,7 @@
 	.init_ptr_from_cur	= xfs_inobt_init_ptr_from_cur,
 	.key_diff		= xfs_inobt_key_diff,
 	.buf_ops		= &xfs_inobt_buf_ops,
-#ifdef DEBUG
+#if defined(DEBUG) || defined(XFS_WARN)
 	.keys_inorder		= xfs_inobt_keys_inorder,
 	.recs_inorder		= xfs_inobt_recs_inorder,
 #endif
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
index 558ef49..efbe1ac 100644
--- a/fs/xfs/xfs_inode.c
+++ b/fs/xfs/xfs_inode.c
@@ -287,7 +287,7 @@
 	trace_xfs_ilock_demote(ip, lock_flags, _RET_IP_);
 }
 
-#ifdef DEBUG
+#if defined(DEBUG) || defined(XFS_WARN)
 int
 xfs_isilocked(
 	xfs_inode_t		*ip,
diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
index d681e34..5e99968 100644
--- a/fs/xfs/xfs_ioctl.c
+++ b/fs/xfs/xfs_ioctl.c
@@ -422,9 +422,12 @@
 	if (IS_ERR(dentry))
 		return PTR_ERR(dentry);
 
-	kbuf = kzalloc(al_hreq.buflen, GFP_KERNEL);
-	if (!kbuf)
-		goto out_dput;
+	kbuf = kmem_zalloc(al_hreq.buflen, KM_SLEEP | KM_MAYFAIL);
+	if (!kbuf) {
+		kbuf = kmem_zalloc_large(al_hreq.buflen);
+		if (!kbuf)
+			goto out_dput;
+	}
 
 	cursor = (attrlist_cursor_kern_t *)&al_hreq.pos;
 	error = -xfs_attr_list(XFS_I(dentry->d_inode), kbuf, al_hreq.buflen,
@@ -436,7 +439,10 @@
 		error = -EFAULT;
 
  out_kfree:
-	kfree(kbuf);
+	if (is_vmalloc_addr(kbuf))
+		kmem_free_large(kbuf);
+	else
+		kmem_free(kbuf);
  out_dput:
 	dput(dentry);
 	return error;
diff --git a/fs/xfs/xfs_ioctl32.c b/fs/xfs/xfs_ioctl32.c
index 63b8fc4..c0c6625 100644
--- a/fs/xfs/xfs_ioctl32.c
+++ b/fs/xfs/xfs_ioctl32.c
@@ -373,9 +373,12 @@
 		return PTR_ERR(dentry);
 
 	error = -ENOMEM;
-	kbuf = kmalloc(al_hreq.buflen, GFP_KERNEL);
-	if (!kbuf)
-		goto out_dput;
+	kbuf = kmem_zalloc(al_hreq.buflen, KM_SLEEP | KM_MAYFAIL);
+	if (!kbuf) {
+		kbuf = kmem_zalloc_large(al_hreq.buflen);
+		if (!kbuf)
+			goto out_dput;
+	}
 
 	cursor = (attrlist_cursor_kern_t *)&al_hreq.pos;
 	error = -xfs_attr_list(XFS_I(dentry->d_inode), kbuf, al_hreq.buflen,
@@ -387,7 +390,10 @@
 		error = -EFAULT;
 
  out_kfree:
-	kfree(kbuf);
+	if (is_vmalloc_addr(kbuf))
+		kmem_free_large(kbuf);
+	else
+		kmem_free(kbuf);
  out_dput:
 	dput(dentry);
 	return error;
diff --git a/fs/xfs/xfs_linux.h b/fs/xfs/xfs_linux.h
index 14e59d9..800f896 100644
--- a/fs/xfs/xfs_linux.h
+++ b/fs/xfs/xfs_linux.h
@@ -293,15 +293,7 @@
 #define ASSERT_ALWAYS(expr)	\
 	(unlikely(expr) ? (void)0 : assfail(#expr, __FILE__, __LINE__))
 
-#ifndef DEBUG
-#define ASSERT(expr)	((void)0)
-
-#ifndef STATIC
-# define STATIC static noinline
-#endif
-
-#else /* DEBUG */
-
+#ifdef DEBUG
 #define ASSERT(expr)	\
 	(unlikely(expr) ? (void)0 : assfail(#expr, __FILE__, __LINE__))
 
@@ -309,6 +301,26 @@
 # define STATIC noinline
 #endif
 
+#else	/* !DEBUG */
+
+#ifdef XFS_WARN
+
+#define ASSERT(expr)	\
+	(unlikely(expr) ? (void)0 : asswarn(#expr, __FILE__, __LINE__))
+
+#ifndef STATIC
+# define STATIC static noinline
+#endif
+
+#else	/* !DEBUG && !XFS_WARN */
+
+#define ASSERT(expr)	((void)0)
+
+#ifndef STATIC
+# define STATIC static noinline
+#endif
+
+#endif /* XFS_WARN */
 #endif /* DEBUG */
 
 #endif /* __XFS_LINUX__ */
diff --git a/fs/xfs/xfs_message.c b/fs/xfs/xfs_message.c
index 331cd9f..9163dc1 100644
--- a/fs/xfs/xfs_message.c
+++ b/fs/xfs/xfs_message.c
@@ -93,6 +93,14 @@
 }
 
 void
+asswarn(char *expr, char *file, int line)
+{
+	xfs_warn(NULL, "Assertion failed: %s, file: %s, line: %d",
+		expr, file, line);
+	WARN_ON(1);
+}
+
+void
 assfail(char *expr, char *file, int line)
 {
 	xfs_emerg(NULL, "Assertion failed: %s, file: %s, line: %d",
diff --git a/fs/xfs/xfs_message.h b/fs/xfs/xfs_message.h
index 76c8198..8540115 100644
--- a/fs/xfs/xfs_message.h
+++ b/fs/xfs/xfs_message.h
@@ -57,6 +57,7 @@
 	xfs_printk_ratelimited(xfs_debug, dev, fmt, ##__VA_ARGS__)
 
 extern void assfail(char *expr, char *f, int l);
+extern void asswarn(char *expr, char *f, int l);
 
 extern void xfs_hex_dump(void *p, int length);
 
diff --git a/fs/xfs/xfs_trans.h b/fs/xfs/xfs_trans.h
index cd29f61..a44dba5 100644
--- a/fs/xfs/xfs_trans.h
+++ b/fs/xfs/xfs_trans.h
@@ -405,7 +405,7 @@
 	int64_t			t_res_fdblocks_delta; /* on-disk only chg */
 	int64_t			t_frextents_delta;/* superblock freextents chg*/
 	int64_t			t_res_frextents_delta; /* on-disk only chg */
-#ifdef DEBUG
+#if defined(DEBUG) || defined(XFS_WARN)
 	int64_t			t_ag_freeblks_delta; /* debugging counter */
 	int64_t			t_ag_flist_delta; /* debugging counter */
 	int64_t			t_ag_btree_delta; /* debugging counter */
@@ -433,7 +433,7 @@
 #define	xfs_trans_get_block_res(tp)	((tp)->t_blk_res)
 #define	xfs_trans_set_sync(tp)		((tp)->t_flags |= XFS_TRANS_SYNC)
 
-#ifdef DEBUG
+#if defined(DEBUG) || defined(XFS_WARN)
 #define	xfs_trans_agblocks_delta(tp, d)	((tp)->t_ag_freeblks_delta += (int64_t)d)
 #define	xfs_trans_agflist_delta(tp, d)	((tp)->t_ag_flist_delta += (int64_t)d)
 #define	xfs_trans_agbtree_delta(tp, d)	((tp)->t_ag_btree_delta += (int64_t)d)
diff --git a/include/linux/acpi_dma.h b/include/linux/acpi_dma.h
new file mode 100644
index 0000000..d09deab
--- /dev/null
+++ b/include/linux/acpi_dma.h
@@ -0,0 +1,116 @@
+/*
+ * ACPI helpers for DMA request / controller
+ *
+ * Based on of_dma.h
+ *
+ * Copyright (C) 2013, Intel Corporation
+ * Author: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __LINUX_ACPI_DMA_H
+#define __LINUX_ACPI_DMA_H
+
+#include <linux/list.h>
+#include <linux/device.h>
+#include <linux/dmaengine.h>
+
+/**
+ * struct acpi_dma_spec - slave device DMA resources
+ * @chan_id:	channel unique id
+ * @slave_id:	request line unique id
+ * @dev:	struct device of the DMA controller to be used in the filter
+ *		function
+ */
+struct acpi_dma_spec {
+	int		chan_id;
+	int		slave_id;
+	struct device	*dev;
+};
+
+/**
+ * struct acpi_dma - representation of the registered DMAC
+ * @dma_controllers:	linked list node
+ * @dev:		struct device of this controller
+ * @acpi_dma_xlate:	callback function to find a suitable channel
+ * @data:		private data used by a callback function
+ */
+struct acpi_dma {
+	struct list_head	dma_controllers;
+	struct device		*dev;
+	struct dma_chan		*(*acpi_dma_xlate)
+				(struct acpi_dma_spec *, struct acpi_dma *);
+	void			*data;
+};
+
+/* Used with acpi_dma_simple_xlate() */
+struct acpi_dma_filter_info {
+	dma_cap_mask_t	dma_cap;
+	dma_filter_fn	filter_fn;
+};
+
+#ifdef CONFIG_DMA_ACPI
+
+int acpi_dma_controller_register(struct device *dev,
+		struct dma_chan *(*acpi_dma_xlate)
+		(struct acpi_dma_spec *, struct acpi_dma *),
+		void *data);
+int acpi_dma_controller_free(struct device *dev);
+int devm_acpi_dma_controller_register(struct device *dev,
+		struct dma_chan *(*acpi_dma_xlate)
+		(struct acpi_dma_spec *, struct acpi_dma *),
+		void *data);
+void devm_acpi_dma_controller_free(struct device *dev);
+
+struct dma_chan *acpi_dma_request_slave_chan_by_index(struct device *dev,
+						      size_t index);
+struct dma_chan *acpi_dma_request_slave_chan_by_name(struct device *dev,
+						     const char *name);
+
+struct dma_chan *acpi_dma_simple_xlate(struct acpi_dma_spec *dma_spec,
+				       struct acpi_dma *adma);
+#else
+
+static inline int acpi_dma_controller_register(struct device *dev,
+		struct dma_chan *(*acpi_dma_xlate)
+		(struct acpi_dma_spec *, struct acpi_dma *),
+		void *data)
+{
+	return -ENODEV;
+}
+static inline int acpi_dma_controller_free(struct device *dev)
+{
+	return -ENODEV;
+}
+static inline int devm_acpi_dma_controller_register(struct device *dev,
+		struct dma_chan *(*acpi_dma_xlate)
+		(struct acpi_dma_spec *, struct acpi_dma *),
+		void *data)
+{
+	return -ENODEV;
+}
+static inline void devm_acpi_dma_controller_free(struct device *dev)
+{
+}
+
+static inline struct dma_chan *acpi_dma_request_slave_chan_by_index(
+		struct device *dev, size_t index)
+{
+	return NULL;
+}
+static inline struct dma_chan *acpi_dma_request_slave_chan_by_name(
+		struct device *dev, const char *name)
+{
+	return NULL;
+}
+
+#define acpi_dma_simple_xlate	NULL
+
+#endif
+
+#define acpi_dma_request_slave_channel	acpi_dma_request_slave_chan_by_index
+
+#endif /* __LINUX_ACPI_DMA_H */
diff --git a/include/linux/aio.h b/include/linux/aio.h
index 31ff6db..1bdf965 100644
--- a/include/linux/aio.h
+++ b/include/linux/aio.h
@@ -9,91 +9,32 @@
 
 #include <linux/atomic.h>
 
-#define AIO_MAXSEGS		4
-#define AIO_KIOGRP_NR_ATOMIC	8
-
 struct kioctx;
+struct kiocb;
 
-/* Notes on cancelling a kiocb:
- *	If a kiocb is cancelled, aio_complete may return 0 to indicate 
- *	that cancel has not yet disposed of the kiocb.  All cancel 
- *	operations *must* call aio_put_req to dispose of the kiocb 
- *	to guard against races with the completion code.
- */
-#define KIOCB_C_CANCELLED	0x01
-#define KIOCB_C_COMPLETE	0x02
+#define KIOCB_KEY		0
 
-#define KIOCB_SYNC_KEY		(~0U)
-
-/* ki_flags bits */
 /*
- * This may be used for cancel/retry serialization in the future, but
- * for now it's unused and we probably don't want modules to even
- * think they can use it.
+ * We use ki_cancel == KIOCB_CANCELLED to indicate that a kiocb has been either
+ * cancelled or completed (this makes a certain amount of sense because
+ * successful cancellation - io_cancel() - does deliver the completion to
+ * userspace).
+ *
+ * And since most things don't implement kiocb cancellation and we'd really like
+ * kiocb completion to be lockless when possible, we use ki_cancel to
+ * synchronize cancellation and completion - we only set it to KIOCB_CANCELLED
+ * with xchg() or cmpxchg(), see batch_complete_aio() and kiocb_cancel().
  */
-/* #define KIF_LOCKED		0 */
-#define KIF_KICKED		1
-#define KIF_CANCELLED		2
+#define KIOCB_CANCELLED		((void *) (~0ULL))
 
-#define kiocbTryLock(iocb)	test_and_set_bit(KIF_LOCKED, &(iocb)->ki_flags)
-#define kiocbTryKick(iocb)	test_and_set_bit(KIF_KICKED, &(iocb)->ki_flags)
+typedef int (kiocb_cancel_fn)(struct kiocb *, struct io_event *);
 
-#define kiocbSetLocked(iocb)	set_bit(KIF_LOCKED, &(iocb)->ki_flags)
-#define kiocbSetKicked(iocb)	set_bit(KIF_KICKED, &(iocb)->ki_flags)
-#define kiocbSetCancelled(iocb)	set_bit(KIF_CANCELLED, &(iocb)->ki_flags)
-
-#define kiocbClearLocked(iocb)	clear_bit(KIF_LOCKED, &(iocb)->ki_flags)
-#define kiocbClearKicked(iocb)	clear_bit(KIF_KICKED, &(iocb)->ki_flags)
-#define kiocbClearCancelled(iocb)	clear_bit(KIF_CANCELLED, &(iocb)->ki_flags)
-
-#define kiocbIsLocked(iocb)	test_bit(KIF_LOCKED, &(iocb)->ki_flags)
-#define kiocbIsKicked(iocb)	test_bit(KIF_KICKED, &(iocb)->ki_flags)
-#define kiocbIsCancelled(iocb)	test_bit(KIF_CANCELLED, &(iocb)->ki_flags)
-
-/* is there a better place to document function pointer methods? */
-/**
- * ki_retry	-	iocb forward progress callback
- * @kiocb:	The kiocb struct to advance by performing an operation.
- *
- * This callback is called when the AIO core wants a given AIO operation
- * to make forward progress.  The kiocb argument describes the operation
- * that is to be performed.  As the operation proceeds, perhaps partially,
- * ki_retry is expected to update the kiocb with progress made.  Typically
- * ki_retry is set in the AIO core and it itself calls file_operations
- * helpers.
- *
- * ki_retry's return value determines when the AIO operation is completed
- * and an event is generated in the AIO event ring.  Except the special
- * return values described below, the value that is returned from ki_retry
- * is transferred directly into the completion ring as the operation's
- * resulting status.  Once this has happened ki_retry *MUST NOT* reference
- * the kiocb pointer again.
- *
- * If ki_retry returns -EIOCBQUEUED it has made a promise that aio_complete()
- * will be called on the kiocb pointer in the future.  The AIO core will
- * not ask the method again -- ki_retry must ensure forward progress.
- * aio_complete() must be called once and only once in the future, multiple
- * calls may result in undefined behaviour.
- *
- * If ki_retry returns -EIOCBRETRY it has made a promise that kick_iocb()
- * will be called on the kiocb pointer in the future.  This may happen
- * through generic helpers that associate kiocb->ki_wait with a wait
- * queue head that ki_retry uses via current->io_wait.  It can also happen
- * with custom tracking and manual calls to kick_iocb(), though that is
- * discouraged.  In either case, kick_iocb() must be called once and only
- * once.  ki_retry must ensure forward progress, the AIO core will wait
- * indefinitely for kick_iocb() to be called.
- */
 struct kiocb {
-	struct list_head	ki_run_list;
-	unsigned long		ki_flags;
-	int			ki_users;
-	unsigned		ki_key;		/* id of this request */
+	atomic_t		ki_users;
 
 	struct file		*ki_filp;
-	struct kioctx		*ki_ctx;	/* may be NULL for sync ops */
-	int			(*ki_cancel)(struct kiocb *, struct io_event *);
-	ssize_t			(*ki_retry)(struct kiocb *);
+	struct kioctx		*ki_ctx;	/* NULL for sync ops */
+	kiocb_cancel_fn		*ki_cancel;
 	void			(*ki_dtor)(struct kiocb *);
 
 	union {
@@ -117,7 +58,6 @@
 
 	struct list_head	ki_list;	/* the aio core uses this
 						 * for cancellation */
-	struct list_head	ki_batch;	/* batch allocation */
 
 	/*
 	 * If the aio_resfd field of the userspace iocb is not zero,
@@ -128,106 +68,40 @@
 
 static inline bool is_sync_kiocb(struct kiocb *kiocb)
 {
-	return kiocb->ki_key == KIOCB_SYNC_KEY;
+	return kiocb->ki_ctx == NULL;
 }
 
 static inline void init_sync_kiocb(struct kiocb *kiocb, struct file *filp)
 {
 	*kiocb = (struct kiocb) {
-			.ki_users = 1,
-			.ki_key = KIOCB_SYNC_KEY,
+			.ki_users = ATOMIC_INIT(1),
+			.ki_ctx = NULL,
 			.ki_filp = filp,
 			.ki_obj.tsk = current,
 		};
 }
 
-#define AIO_RING_MAGIC			0xa10a10a1
-#define AIO_RING_COMPAT_FEATURES	1
-#define AIO_RING_INCOMPAT_FEATURES	0
-struct aio_ring {
-	unsigned	id;	/* kernel internal index number */
-	unsigned	nr;	/* number of io_events */
-	unsigned	head;
-	unsigned	tail;
-
-	unsigned	magic;
-	unsigned	compat_features;
-	unsigned	incompat_features;
-	unsigned	header_length;	/* size of aio_ring */
-
-
-	struct io_event		io_events[0];
-}; /* 128 bytes + ring size */
-
-#define AIO_RING_PAGES	8
-struct aio_ring_info {
-	unsigned long		mmap_base;
-	unsigned long		mmap_size;
-
-	struct page		**ring_pages;
-	spinlock_t		ring_lock;
-	long			nr_pages;
-
-	unsigned		nr, tail;
-
-	struct page		*internal_pages[AIO_RING_PAGES];
-};
-
-static inline unsigned aio_ring_avail(struct aio_ring_info *info,
-					struct aio_ring *ring)
-{
-	return (ring->head + info->nr - 1 - ring->tail) % info->nr;
-}
-
-struct kioctx {
-	atomic_t		users;
-	int			dead;
-	struct mm_struct	*mm;
-
-	/* This needs improving */
-	unsigned long		user_id;
-	struct hlist_node	list;
-
-	wait_queue_head_t	wait;
-
-	spinlock_t		ctx_lock;
-
-	int			reqs_active;
-	struct list_head	active_reqs;	/* used for cancellation */
-	struct list_head	run_list;	/* used for kicked reqs */
-
-	/* sys_io_setup currently limits this to an unsigned int */
-	unsigned		max_reqs;
-
-	struct aio_ring_info	ring_info;
-
-	struct delayed_work	wq;
-
-	struct rcu_head		rcu_head;
-};
-
 /* prototypes */
-extern unsigned aio_max_size;
-
 #ifdef CONFIG_AIO
 extern ssize_t wait_on_sync_kiocb(struct kiocb *iocb);
-extern int aio_put_req(struct kiocb *iocb);
-extern void kick_iocb(struct kiocb *iocb);
-extern int aio_complete(struct kiocb *iocb, long res, long res2);
+extern void aio_put_req(struct kiocb *iocb);
+extern void aio_complete(struct kiocb *iocb, long res, long res2);
 struct mm_struct;
 extern void exit_aio(struct mm_struct *mm);
 extern long do_io_submit(aio_context_t ctx_id, long nr,
 			 struct iocb __user *__user *iocbpp, bool compat);
+void kiocb_set_cancel_fn(struct kiocb *req, kiocb_cancel_fn *cancel);
 #else
 static inline ssize_t wait_on_sync_kiocb(struct kiocb *iocb) { return 0; }
-static inline int aio_put_req(struct kiocb *iocb) { return 0; }
-static inline void kick_iocb(struct kiocb *iocb) { }
-static inline int aio_complete(struct kiocb *iocb, long res, long res2) { return 0; }
+static inline void aio_put_req(struct kiocb *iocb) { }
+static inline void aio_complete(struct kiocb *iocb, long res, long res2) { }
 struct mm_struct;
 static inline void exit_aio(struct mm_struct *mm) { }
 static inline long do_io_submit(aio_context_t ctx_id, long nr,
 				struct iocb __user * __user *iocbpp,
 				bool compat) { return 0; }
+static inline void kiocb_set_cancel_fn(struct kiocb *req,
+				       kiocb_cancel_fn *cancel) { }
 #endif /* CONFIG_AIO */
 
 static inline struct kiocb *list_kiocb(struct list_head *h)
diff --git a/include/linux/audit.h b/include/linux/audit.h
index 5a6d718..b20b038 100644
--- a/include/linux/audit.h
+++ b/include/linux/audit.h
@@ -84,8 +84,13 @@
 #define	AUDIT_TYPE_CHILD_DELETE 3	/* a child being deleted */
 #define	AUDIT_TYPE_CHILD_CREATE 4	/* a child being created */
 
+/* maximized args number that audit_socketcall can process */
+#define AUDITSC_ARGS		6
+
 struct filename;
 
+extern void audit_log_session_info(struct audit_buffer *ab);
+
 #ifdef CONFIG_AUDITSYSCALL
 /* These are defined in auditsc.c */
 				/* Public API */
@@ -120,7 +125,7 @@
 				       unsigned long a1, unsigned long a2,
 				       unsigned long a3)
 {
-	if (unlikely(!audit_dummy_context()))
+	if (unlikely(current->audit_context))
 		__audit_syscall_entry(arch, major, a0, a1, a2, a3);
 }
 static inline void audit_syscall_exit(void *pt_regs)
@@ -185,12 +190,10 @@
 	return tsk->sessionid;
 }
 
-extern void audit_log_task_context(struct audit_buffer *ab);
-extern void audit_log_task_info(struct audit_buffer *ab, struct task_struct *tsk);
 extern void __audit_ipc_obj(struct kern_ipc_perm *ipcp);
 extern void __audit_ipc_set_perm(unsigned long qbytes, uid_t uid, gid_t gid, umode_t mode);
 extern int __audit_bprm(struct linux_binprm *bprm);
-extern void __audit_socketcall(int nargs, unsigned long *args);
+extern int __audit_socketcall(int nargs, unsigned long *args);
 extern int __audit_sockaddr(int len, void *addr);
 extern void __audit_fd_pair(int fd1, int fd2);
 extern void __audit_mq_open(int oflag, umode_t mode, struct mq_attr *attr);
@@ -224,10 +227,11 @@
 		return __audit_bprm(bprm);
 	return 0;
 }
-static inline void audit_socketcall(int nargs, unsigned long *args)
+static inline int audit_socketcall(int nargs, unsigned long *args)
 {
 	if (unlikely(!audit_dummy_context()))
-		__audit_socketcall(nargs, args);
+		return __audit_socketcall(nargs, args);
+	return 0;
 }
 static inline int audit_sockaddr(int len, void *addr)
 {
@@ -340,11 +344,6 @@
 {
 	return -1;
 }
-static inline void audit_log_task_context(struct audit_buffer *ab)
-{ }
-static inline void audit_log_task_info(struct audit_buffer *ab,
-				       struct task_struct *tsk)
-{ }
 static inline void audit_ipc_obj(struct kern_ipc_perm *ipcp)
 { }
 static inline void audit_ipc_set_perm(unsigned long qbytes, uid_t uid,
@@ -354,8 +353,10 @@
 {
 	return 0;
 }
-static inline void audit_socketcall(int nargs, unsigned long *args)
-{ }
+static inline int audit_socketcall(int nargs, unsigned long *args)
+{
+	return 0;
+}
 static inline void audit_fd_pair(int fd1, int fd2)
 { }
 static inline int audit_sockaddr(int len, void *addr)
@@ -390,6 +391,11 @@
 #define audit_signals 0
 #endif /* CONFIG_AUDITSYSCALL */
 
+static inline bool audit_loginuid_set(struct task_struct *tsk)
+{
+	return uid_valid(audit_get_loginuid(tsk));
+}
+
 #ifdef CONFIG_AUDIT
 /* These are defined in audit.c */
 				/* Public API */
@@ -429,14 +435,17 @@
 { }
 #endif
 
+extern int audit_log_task_context(struct audit_buffer *ab);
+extern void audit_log_task_info(struct audit_buffer *ab,
+				struct task_struct *tsk);
+
 extern int		    audit_update_lsm_rules(void);
 
 				/* Private API (for audit.c only) */
-extern int audit_filter_user(void);
+extern int audit_filter_user(int type);
 extern int audit_filter_type(int type);
 extern int  audit_receive_filter(int type, int pid, int seq,
-				void *data, size_t datasz, kuid_t loginuid,
-				u32 sessionid, u32 sid);
+				void *data, size_t datasz);
 extern int audit_enabled;
 #else /* CONFIG_AUDIT */
 static inline __printf(4, 5)
@@ -476,6 +485,13 @@
 { }
 static inline void audit_log_secctx(struct audit_buffer *ab, u32 secid)
 { }
+static inline int audit_log_task_context(struct audit_buffer *ab)
+{
+	return 0;
+}
+static inline void audit_log_task_info(struct audit_buffer *ab,
+				       struct task_struct *tsk)
+{ }
 #define audit_enabled 0
 #endif /* CONFIG_AUDIT */
 static inline void audit_log_string(struct audit_buffer *ab, const char *buf)
diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h
index 3504599..c388155 100644
--- a/include/linux/backing-dev.h
+++ b/include/linux/backing-dev.h
@@ -18,6 +18,7 @@
 #include <linux/writeback.h>
 #include <linux/atomic.h>
 #include <linux/sysctl.h>
+#include <linux/workqueue.h>
 
 struct page;
 struct device;
@@ -27,7 +28,6 @@
  * Bits in backing_dev_info.state
  */
 enum bdi_state {
-	BDI_pending,		/* On its way to being activated */
 	BDI_wb_alloc,		/* Default embedded wb allocated */
 	BDI_async_congested,	/* The async (write) queue is getting full */
 	BDI_sync_congested,	/* The sync queue is getting full */
@@ -53,10 +53,8 @@
 	unsigned int nr;
 
 	unsigned long last_old_flush;	/* last old data flush */
-	unsigned long last_active;	/* last time bdi thread was active */
 
-	struct task_struct *task;	/* writeback thread */
-	struct timer_list wakeup_timer; /* used for delayed bdi thread wakeup */
+	struct delayed_work dwork;	/* work item used for writeback */
 	struct list_head b_dirty;	/* dirty inodes */
 	struct list_head b_io;		/* parked for writeback */
 	struct list_head b_more_io;	/* parked for more writeback */
@@ -123,14 +121,15 @@
 void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages,
 			enum wb_reason reason);
 void bdi_start_background_writeback(struct backing_dev_info *bdi);
-int bdi_writeback_thread(void *data);
+void bdi_writeback_workfn(struct work_struct *work);
 int bdi_has_dirty_io(struct backing_dev_info *bdi);
 void bdi_wakeup_thread_delayed(struct backing_dev_info *bdi);
 void bdi_lock_two(struct bdi_writeback *wb1, struct bdi_writeback *wb2);
 
 extern spinlock_t bdi_lock;
 extern struct list_head bdi_list;
-extern struct list_head bdi_pending_list;
+
+extern struct workqueue_struct *bdi_wq;
 
 static inline int wb_has_dirty_io(struct bdi_writeback *wb)
 {
@@ -336,11 +335,6 @@
 	return bdi->capabilities & BDI_CAP_SWAP_BACKED;
 }
 
-static inline bool bdi_cap_flush_forker(struct backing_dev_info *bdi)
-{
-	return bdi == &default_backing_dev_info;
-}
-
 static inline bool mapping_cap_writeback_dirty(struct address_space *mapping)
 {
 	return bdi_cap_writeback_dirty(mapping->backing_dev_info);
diff --git a/include/linux/bio.h b/include/linux/bio.h
index 820e7aa..ef24466 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -67,6 +67,7 @@
 #define bio_offset(bio)		bio_iovec((bio))->bv_offset
 #define bio_segments(bio)	((bio)->bi_vcnt - (bio)->bi_idx)
 #define bio_sectors(bio)	((bio)->bi_size >> 9)
+#define bio_end_sector(bio)	((bio)->bi_sector + bio_sectors((bio)))
 
 static inline unsigned int bio_cur_bytes(struct bio *bio)
 {
@@ -84,11 +85,6 @@
 	return NULL;
 }
 
-static inline int bio_has_allocated_vec(struct bio *bio)
-{
-	return bio->bi_io_vec && bio->bi_io_vec != bio->bi_inline_vecs;
-}
-
 /*
  * will die
  */
@@ -136,16 +132,27 @@
 #define bio_io_error(bio) bio_endio((bio), -EIO)
 
 /*
- * drivers should not use the __ version unless they _really_ want to
- * run through the entire bio and not just pending pieces
+ * drivers should not use the __ version unless they _really_ know what
+ * they're doing
  */
 #define __bio_for_each_segment(bvl, bio, i, start_idx)			\
 	for (bvl = bio_iovec_idx((bio), (start_idx)), i = (start_idx);	\
 	     i < (bio)->bi_vcnt;					\
 	     bvl++, i++)
 
+/*
+ * drivers should _never_ use the all version - the bio may have been split
+ * before it got to the driver and the driver won't own all of it
+ */
+#define bio_for_each_segment_all(bvl, bio, i)				\
+	for (i = 0;							\
+	     bvl = bio_iovec_idx((bio), (i)), i < (bio)->bi_vcnt;	\
+	     i++)
+
 #define bio_for_each_segment(bvl, bio, i)				\
-	__bio_for_each_segment(bvl, bio, i, (bio)->bi_idx)
+	for (i = (bio)->bi_idx;						\
+	     bvl = bio_iovec_idx((bio), (i)), i < (bio)->bi_vcnt;	\
+	     i++)
 
 /*
  * get a reference to a bio, so it won't disappear. the intended use is
@@ -180,9 +187,12 @@
 	unsigned short		bip_slab;	/* slab the bip came from */
 	unsigned short		bip_vcnt;	/* # of integrity bio_vecs */
 	unsigned short		bip_idx;	/* current bip_vec index */
+	unsigned		bip_owns_buf:1;	/* should free bip_buf */
 
 	struct work_struct	bip_work;	/* I/O completion */
-	struct bio_vec		bip_vec[0];	/* embedded bvec array */
+
+	struct bio_vec		*bip_vec;
+	struct bio_vec		bip_inline_vecs[0];/* embedded bvec array */
 };
 #endif /* CONFIG_BLK_DEV_INTEGRITY */
 
@@ -211,6 +221,7 @@
 
 extern struct bio_set *bioset_create(unsigned int, unsigned int);
 extern void bioset_free(struct bio_set *);
+extern mempool_t *biovec_create_pool(struct bio_set *bs, int pool_entries);
 
 extern struct bio *bio_alloc_bioset(gfp_t, int, struct bio_set *);
 extern void bio_put(struct bio *);
@@ -245,6 +256,9 @@
 struct request_queue;
 extern int bio_phys_segments(struct request_queue *, struct bio *);
 
+extern int submit_bio_wait(int rw, struct bio *bio);
+extern void bio_advance(struct bio *, unsigned);
+
 extern void bio_init(struct bio *);
 extern void bio_reset(struct bio *);
 
@@ -279,6 +293,9 @@
 }
 #endif
 
+extern void bio_copy_data(struct bio *dst, struct bio *src);
+extern int bio_alloc_pages(struct bio *bio, gfp_t gfp);
+
 extern struct bio *bio_copy_user(struct request_queue *, struct rq_map_data *,
 				 unsigned long, unsigned int, int, gfp_t);
 extern struct bio *bio_copy_user_iov(struct request_queue *,
@@ -286,8 +303,8 @@
 				     int, int, gfp_t);
 extern int bio_uncopy_user(struct bio *);
 void zero_fill_bio(struct bio *bio);
-extern struct bio_vec *bvec_alloc_bs(gfp_t, int, unsigned long *, struct bio_set *);
-extern void bvec_free_bs(struct bio_set *, struct bio_vec *, unsigned int);
+extern struct bio_vec *bvec_alloc(gfp_t, int, unsigned long *, mempool_t *);
+extern void bvec_free(mempool_t *, struct bio_vec *, unsigned int);
 extern unsigned int bvec_nr_vecs(unsigned short idx);
 
 #ifdef CONFIG_BLK_CGROUP
@@ -298,39 +315,6 @@
 static inline void bio_disassociate_task(struct bio *bio) { }
 #endif	/* CONFIG_BLK_CGROUP */
 
-/*
- * bio_set is used to allow other portions of the IO system to
- * allocate their own private memory pools for bio and iovec structures.
- * These memory pools in turn all allocate from the bio_slab
- * and the bvec_slabs[].
- */
-#define BIO_POOL_SIZE 2
-#define BIOVEC_NR_POOLS 6
-#define BIOVEC_MAX_IDX	(BIOVEC_NR_POOLS - 1)
-
-struct bio_set {
-	struct kmem_cache *bio_slab;
-	unsigned int front_pad;
-
-	mempool_t *bio_pool;
-#if defined(CONFIG_BLK_DEV_INTEGRITY)
-	mempool_t *bio_integrity_pool;
-#endif
-	mempool_t *bvec_pool;
-};
-
-struct biovec_slab {
-	int nr_vecs;
-	char *name;
-	struct kmem_cache *slab;
-};
-
-/*
- * a small number of entries is fine, not going to be performance critical.
- * basically we just need to survive
- */
-#define BIO_SPLIT_ENTRIES 2
-
 #ifdef CONFIG_HIGHMEM
 /*
  * remember never ever reenable interrupts between a bvec_kmap_irq and
@@ -527,6 +511,49 @@
 	return bio;
 }
 
+/*
+ * bio_set is used to allow other portions of the IO system to
+ * allocate their own private memory pools for bio and iovec structures.
+ * These memory pools in turn all allocate from the bio_slab
+ * and the bvec_slabs[].
+ */
+#define BIO_POOL_SIZE 2
+#define BIOVEC_NR_POOLS 6
+#define BIOVEC_MAX_IDX	(BIOVEC_NR_POOLS - 1)
+
+struct bio_set {
+	struct kmem_cache *bio_slab;
+	unsigned int front_pad;
+
+	mempool_t *bio_pool;
+	mempool_t *bvec_pool;
+#if defined(CONFIG_BLK_DEV_INTEGRITY)
+	mempool_t *bio_integrity_pool;
+	mempool_t *bvec_integrity_pool;
+#endif
+
+	/*
+	 * Deadlock avoidance for stacking block drivers: see comments in
+	 * bio_alloc_bioset() for details
+	 */
+	spinlock_t		rescue_lock;
+	struct bio_list		rescue_list;
+	struct work_struct	rescue_work;
+	struct workqueue_struct	*rescue_workqueue;
+};
+
+struct biovec_slab {
+	int nr_vecs;
+	char *name;
+	struct kmem_cache *slab;
+};
+
+/*
+ * a small number of entries is fine, not going to be performance critical.
+ * basically we just need to survive
+ */
+#define BIO_SPLIT_ENTRIES 2
+
 #if defined(CONFIG_BLK_DEV_INTEGRITY)
 
 #define bip_vec_idx(bip, idx)	(&(bip->bip_vec[(idx)]))
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index 22990cf..fa1abeb 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -118,6 +118,7 @@
  * BIO_POOL_IDX()
  */
 #define BIO_RESET_BITS	13
+#define BIO_OWNS_VEC	13	/* bio_free() should free bvec */
 
 #define bio_flagged(bio, flag)	((bio)->bi_flags & (1 << (flag)))
 
@@ -176,6 +177,7 @@
 	__REQ_IO_STAT,		/* account I/O stat */
 	__REQ_MIXED_MERGE,	/* merge of different types, fail separately */
 	__REQ_KERNEL, 		/* direct IO to kernel pages */
+	__REQ_PM,		/* runtime pm request */
 	__REQ_NR_BITS,		/* stops here */
 };
 
@@ -198,6 +200,8 @@
 	 REQ_SECURE)
 #define REQ_CLONE_MASK		REQ_COMMON_MASK
 
+#define BIO_NO_ADVANCE_ITER_MASK	(REQ_DISCARD|REQ_WRITE_SAME)
+
 /* This mask is used for both bio and request merge checking */
 #define REQ_NOMERGE_FLAGS \
 	(REQ_NOMERGE | REQ_STARTED | REQ_SOFTBARRIER | REQ_FLUSH | REQ_FUA)
@@ -224,5 +228,6 @@
 #define REQ_MIXED_MERGE		(1 << __REQ_MIXED_MERGE)
 #define REQ_SECURE		(1 << __REQ_SECURE)
 #define REQ_KERNEL		(1 << __REQ_KERNEL)
+#define REQ_PM			(1 << __REQ_PM)
 
 #endif /* __LINUX_BLK_TYPES_H */
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 78feda9..2fdb4a4 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -361,6 +361,12 @@
 	 */
 	struct kobject kobj;
 
+#ifdef CONFIG_PM_RUNTIME
+	struct device		*dev;
+	int			rpm_status;
+	unsigned int		nr_pending;
+#endif
+
 	/*
 	 * queue settings
 	 */
@@ -838,7 +844,7 @@
 						     unsigned int cmd_flags)
 {
 	if (unlikely(cmd_flags & REQ_DISCARD))
-		return q->limits.max_discard_sectors;
+		return min(q->limits.max_discard_sectors, UINT_MAX >> 9);
 
 	if (unlikely(cmd_flags & REQ_WRITE_SAME))
 		return q->limits.max_write_same_sectors;
@@ -961,6 +967,27 @@
 extern void blk_put_queue(struct request_queue *);
 
 /*
+ * block layer runtime pm functions
+ */
+#ifdef CONFIG_PM_RUNTIME
+extern void blk_pm_runtime_init(struct request_queue *q, struct device *dev);
+extern int blk_pre_runtime_suspend(struct request_queue *q);
+extern void blk_post_runtime_suspend(struct request_queue *q, int err);
+extern void blk_pre_runtime_resume(struct request_queue *q);
+extern void blk_post_runtime_resume(struct request_queue *q, int err);
+#else
+static inline void blk_pm_runtime_init(struct request_queue *q,
+	struct device *dev) {}
+static inline int blk_pre_runtime_suspend(struct request_queue *q)
+{
+	return -ENOSYS;
+}
+static inline void blk_post_runtime_suspend(struct request_queue *q, int err) {}
+static inline void blk_pre_runtime_resume(struct request_queue *q) {}
+static inline void blk_post_runtime_resume(struct request_queue *q, int err) {}
+#endif
+
+/*
  * blk_plug permits building a queue of related requests by holding the I/O
  * fragments for a short period. This allows merging of sequential requests
  * into single larger request. As the requests are moved from a per-task list to
@@ -1484,7 +1511,7 @@
 
 struct block_device_operations {
 	int (*open) (struct block_device *, fmode_t);
-	int (*release) (struct gendisk *, fmode_t);
+	void (*release) (struct gendisk *, fmode_t);
 	int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
 	int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
 	int (*direct_access) (struct block_device *, sector_t,
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index 3bff9ce..5047355 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -28,6 +28,7 @@
 struct inode;
 struct cgroup;
 struct css_id;
+struct eventfd_ctx;
 
 extern int cgroup_init_early(void);
 extern int cgroup_init(void);
diff --git a/include/linux/cgroup_subsys.h b/include/linux/cgroup_subsys.h
index f204a7a..6e7ec64 100644
--- a/include/linux/cgroup_subsys.h
+++ b/include/linux/cgroup_subsys.h
@@ -78,3 +78,9 @@
 #endif
 
 /* */
+
+#ifdef CONFIG_CGROUP_BCACHE
+SUBSYS(bcache)
+#endif
+
+/* */
diff --git a/include/linux/compat.h b/include/linux/compat.h
index d53c353..7f0c1dd 100644
--- a/include/linux/compat.h
+++ b/include/linux/compat.h
@@ -673,6 +673,8 @@
 asmlinkage long compat_sys_sched_rr_get_interval(compat_pid_t pid,
 						 struct compat_timespec __user *interval);
 
+asmlinkage long compat_sys_fanotify_mark(int, unsigned int, __u32, __u32,
+					    int, const char __user *);
 #else
 
 #define is_compat_task() (0)
diff --git a/include/linux/cpu_cooling.h b/include/linux/cpu_cooling.h
index 40b4ef5..282e270 100644
--- a/include/linux/cpu_cooling.h
+++ b/include/linux/cpu_cooling.h
@@ -25,34 +25,39 @@
 #define __CPU_COOLING_H__
 
 #include <linux/thermal.h>
+#include <linux/cpumask.h>
 
-#define CPUFREQ_COOLING_START		0
-#define CPUFREQ_COOLING_STOP		1
-
-#if defined(CONFIG_CPU_THERMAL) || defined(CONFIG_CPU_THERMAL_MODULE)
+#ifdef CONFIG_CPU_THERMAL
 /**
  * cpufreq_cooling_register - function to create cpufreq cooling device.
  * @clip_cpus: cpumask of cpus where the frequency constraints will happen
  */
-struct thermal_cooling_device *cpufreq_cooling_register(
-		const struct cpumask *clip_cpus);
+struct thermal_cooling_device *
+cpufreq_cooling_register(const struct cpumask *clip_cpus);
 
 /**
  * cpufreq_cooling_unregister - function to remove cpufreq cooling device.
  * @cdev: thermal cooling device pointer.
  */
 void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev);
+
+unsigned long cpufreq_cooling_get_level(unsigned int, unsigned int);
 #else /* !CONFIG_CPU_THERMAL */
-static inline struct thermal_cooling_device *cpufreq_cooling_register(
-	const struct cpumask *clip_cpus)
+static inline struct thermal_cooling_device *
+cpufreq_cooling_register(const struct cpumask *clip_cpus)
 {
 	return NULL;
 }
-static inline void cpufreq_cooling_unregister(
-		struct thermal_cooling_device *cdev)
+static inline
+void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev)
 {
 	return;
 }
+static inline
+unsigned long cpufreq_cooling_get_level(unsigned int, unsigned int)
+{
+	return THERMAL_CSTATE_INVALID;
+}
 #endif	/* CONFIG_CPU_THERMAL */
 
 #endif /* __CPU_COOLING_H__ */
diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h
index 3c86faa..8f04062 100644
--- a/include/linux/cpuidle.h
+++ b/include/linux/cpuidle.h
@@ -17,7 +17,7 @@
 #include <linux/completion.h>
 #include <linux/hrtimer.h>
 
-#define CPUIDLE_STATE_MAX	8
+#define CPUIDLE_STATE_MAX	10
 #define CPUIDLE_NAME_LEN	16
 #define CPUIDLE_DESC_LEN	32
 
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
index 1e483fa..3cd3247 100644
--- a/include/linux/device-mapper.h
+++ b/include/linux/device-mapper.h
@@ -79,11 +79,26 @@
 typedef int (*dm_merge_fn) (struct dm_target *ti, struct bvec_merge_data *bvm,
 			    struct bio_vec *biovec, int max_size);
 
+/*
+ * These iteration functions are typically used to check (and combine)
+ * properties of underlying devices.
+ * E.g. Does at least one underlying device support flush?
+ *      Does any underlying device not support WRITE_SAME?
+ *
+ * The callout function is called once for each contiguous section of
+ * an underlying device.  State can be maintained in *data.
+ * Return non-zero to stop iterating through any further devices.
+ */
 typedef int (*iterate_devices_callout_fn) (struct dm_target *ti,
 					   struct dm_dev *dev,
 					   sector_t start, sector_t len,
 					   void *data);
 
+/*
+ * This function must iterate through each section of device used by the
+ * target until it encounters a non-zero return code, which it then returns.
+ * Returns zero if no callout returned non-zero.
+ */
 typedef int (*dm_iterate_devices_fn) (struct dm_target *ti,
 				      iterate_devices_callout_fn fn,
 				      void *data);
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index 91ac8da..96d3e4a 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -967,8 +967,9 @@
 #ifdef CONFIG_DMA_ENGINE
 enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx);
 void dma_issue_pending_all(void);
-struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, dma_filter_fn fn, void *fn_param);
-struct dma_chan *dma_request_slave_channel(struct device *dev, char *name);
+struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask,
+					dma_filter_fn fn, void *fn_param);
+struct dma_chan *dma_request_slave_channel(struct device *dev, const char *name);
 void dma_release_channel(struct dma_chan *chan);
 #else
 static inline enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
@@ -978,13 +979,13 @@
 static inline void dma_issue_pending_all(void)
 {
 }
-static inline struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask,
+static inline struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask,
 					      dma_filter_fn fn, void *fn_param)
 {
 	return NULL;
 }
 static inline struct dma_chan *dma_request_slave_channel(struct device *dev,
-							 char *name)
+							 const char *name)
 {
 	return NULL;
 }
@@ -1005,9 +1006,9 @@
 	__dma_request_slave_channel_compat(&(mask), x, y, dev, name)
 
 static inline struct dma_chan
-*__dma_request_slave_channel_compat(dma_cap_mask_t *mask, dma_filter_fn fn,
-				  void *fn_param, struct device *dev,
-				  char *name)
+*__dma_request_slave_channel_compat(const dma_cap_mask_t *mask,
+				  dma_filter_fn fn, void *fn_param,
+				  struct device *dev, char *name)
 {
 	struct dma_chan *chan;
 
diff --git a/include/linux/drbd.h b/include/linux/drbd.h
index 0c5a18e..1b4d4ee 100644
--- a/include/linux/drbd.h
+++ b/include/linux/drbd.h
@@ -52,7 +52,7 @@
 #endif
 
 extern const char *drbd_buildtag(void);
-#define REL_VERSION "8.4.2"
+#define REL_VERSION "8.4.3"
 #define API_VERSION 1
 #define PRO_VERSION_MIN 86
 #define PRO_VERSION_MAX 101
@@ -319,7 +319,8 @@
 	SS_IN_TRANSIENT_STATE = -18,  /* Retry after the next state change */
 	SS_CONCURRENT_ST_CHG = -19,   /* Concurrent cluster side state change! */
 	SS_O_VOL_PEER_PRI = -20,
-	SS_AFTER_LAST_ERROR = -21,    /* Keep this at bottom */
+	SS_OUTDATE_WO_CONN = -21,
+	SS_AFTER_LAST_ERROR = -22,    /* Keep this at bottom */
 };
 
 /* from drbd_strings.c */
diff --git a/include/linux/drbd_limits.h b/include/linux/drbd_limits.h
index 1fa19c5..1fedf2b 100644
--- a/include/linux/drbd_limits.h
+++ b/include/linux/drbd_limits.h
@@ -126,13 +126,12 @@
 #define DRBD_RESYNC_RATE_DEF 250
 #define DRBD_RESYNC_RATE_SCALE 'k'  /* kilobytes */
 
-  /* less than 7 would hit performance unnecessarily.
-   * 919 slots context information per transaction,
-   * 32k activity log, 4k transaction size,
-   * one transaction in flight:
-   * 919 * 7 = 6433 */
+  /* less than 7 would hit performance unnecessarily. */
 #define DRBD_AL_EXTENTS_MIN  7
-#define DRBD_AL_EXTENTS_MAX  6433
+  /* we use u16 as "slot number", (u16)~0 is "FREE".
+   * If you use >= 292 kB on-disk ring buffer,
+   * this is the maximum you can use: */
+#define DRBD_AL_EXTENTS_MAX  0xfffe
 #define DRBD_AL_EXTENTS_DEF  1237
 #define DRBD_AL_EXTENTS_SCALE '1'
 
diff --git a/include/linux/errno.h b/include/linux/errno.h
index f6bf082..89627b9 100644
--- a/include/linux/errno.h
+++ b/include/linux/errno.h
@@ -28,6 +28,5 @@
 #define EBADTYPE	527	/* Type not supported by server */
 #define EJUKEBOX	528	/* Request initiated, but will not complete before timeout */
 #define EIOCBQUEUED	529	/* iocb queued, will get completion event */
-#define EIOCBRETRY	530	/* iocb queued, will trigger a retry */
 
 #endif
diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h
index f9a12f6..df6fab8 100644
--- a/include/linux/f2fs_fs.h
+++ b/include/linux/f2fs_fs.h
@@ -139,7 +139,7 @@
 	__le32 len;		/* lengh of the extent */
 } __packed;
 
-#define F2FS_MAX_NAME_LEN	256
+#define F2FS_NAME_LEN		255
 #define ADDRS_PER_INODE         923	/* Address Pointers in an Inode */
 #define ADDRS_PER_BLOCK         1018	/* Address Pointers in a Direct Block */
 #define NIDS_PER_BLOCK          1018	/* Node IDs in an Indirect Block */
@@ -165,7 +165,8 @@
 	__le32 i_flags;			/* file attributes */
 	__le32 i_pino;			/* parent inode number */
 	__le32 i_namelen;		/* file name length */
-	__u8 i_name[F2FS_MAX_NAME_LEN];	/* file name for SPOR */
+	__u8 i_name[F2FS_NAME_LEN];	/* file name for SPOR */
+	__u8 i_reserved2;		/* for backward compatibility */
 
 	struct f2fs_extent i_ext;	/* caching a largest extent */
 
@@ -362,10 +363,10 @@
 typedef __le32	f2fs_hash_t;
 
 /* One directory entry slot covers 8bytes-long file name */
-#define F2FS_NAME_LEN		8
-#define F2FS_NAME_LEN_BITS	3
+#define F2FS_SLOT_LEN		8
+#define F2FS_SLOT_LEN_BITS	3
 
-#define GET_DENTRY_SLOTS(x)	((x + F2FS_NAME_LEN - 1) >> F2FS_NAME_LEN_BITS)
+#define GET_DENTRY_SLOTS(x)	((x + F2FS_SLOT_LEN - 1) >> F2FS_SLOT_LEN_BITS)
 
 /* the number of dentry in a block */
 #define NR_DENTRY_IN_BLOCK	214
@@ -377,10 +378,10 @@
 #define SIZE_OF_DENTRY_BITMAP	((NR_DENTRY_IN_BLOCK + BITS_PER_BYTE - 1) / \
 					BITS_PER_BYTE)
 #define SIZE_OF_RESERVED	(PAGE_SIZE - ((SIZE_OF_DIR_ENTRY + \
-				F2FS_NAME_LEN) * \
+				F2FS_SLOT_LEN) * \
 				NR_DENTRY_IN_BLOCK + SIZE_OF_DENTRY_BITMAP))
 
-/* One directory entry slot representing F2FS_NAME_LEN-sized file name */
+/* One directory entry slot representing F2FS_SLOT_LEN-sized file name */
 struct f2fs_dir_entry {
 	__le32 hash_code;	/* hash code of file name */
 	__le32 ino;		/* inode number */
@@ -394,7 +395,7 @@
 	__u8 dentry_bitmap[SIZE_OF_DENTRY_BITMAP];
 	__u8 reserved[SIZE_OF_RESERVED];
 	struct f2fs_dir_entry dentry[NR_DENTRY_IN_BLOCK];
-	__u8 filename[NR_DENTRY_IN_BLOCK][F2FS_NAME_LEN];
+	__u8 filename[NR_DENTRY_IN_BLOCK][F2FS_SLOT_LEN];
 } __packed;
 
 /* file types used in inode_info->flags */
diff --git a/include/linux/fs.h b/include/linux/fs.h
index b5a24ba..43db02e 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -2091,7 +2091,7 @@
 					       void *holder);
 extern struct block_device *blkdev_get_by_dev(dev_t dev, fmode_t mode,
 					      void *holder);
-extern int blkdev_put(struct block_device *bdev, fmode_t mode);
+extern void blkdev_put(struct block_device *bdev, fmode_t mode);
 #ifdef CONFIG_SYSFS
 extern int bd_link_disk_holder(struct block_device *bdev, struct gendisk *disk);
 extern void bd_unlink_disk_holder(struct block_device *bdev,
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index f83e17a..99d0fbc 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -90,6 +90,8 @@
  *            not set this, then the ftrace infrastructure will add recursion
  *            protection for the caller.
  * STUB   - The ftrace_ops is just a place holder.
+ * INITIALIZED - The ftrace_ops has already been initialized (first use time
+ *            register_ftrace_function() is called, it will initialized the ops)
  */
 enum {
 	FTRACE_OPS_FL_ENABLED			= 1 << 0,
@@ -100,6 +102,7 @@
 	FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED	= 1 << 5,
 	FTRACE_OPS_FL_RECURSION_SAFE		= 1 << 6,
 	FTRACE_OPS_FL_STUB			= 1 << 7,
+	FTRACE_OPS_FL_INITIALIZED		= 1 << 8,
 };
 
 struct ftrace_ops {
@@ -110,6 +113,7 @@
 #ifdef CONFIG_DYNAMIC_FTRACE
 	struct ftrace_hash		*notrace_hash;
 	struct ftrace_hash		*filter_hash;
+	struct mutex			regex_lock;
 #endif
 };
 
diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
index 34e00fb..4372658 100644
--- a/include/linux/ftrace_event.h
+++ b/include/linux/ftrace_event.h
@@ -293,6 +293,7 @@
 	 * caching and such. Which is mostly OK ;-)
 	 */
 	unsigned long		flags;
+	atomic_t		sm_ref;	/* soft-mode reference counter */
 };
 
 #define __TRACE_EVENT_FLAGS(name, value)				\
diff --git a/include/linux/gpio.h b/include/linux/gpio.h
index f6c7ae3..552e3f4 100644
--- a/include/linux/gpio.h
+++ b/include/linux/gpio.h
@@ -39,7 +39,7 @@
 	const char	*label;
 };
 
-#ifdef CONFIG_GENERIC_GPIO
+#ifdef CONFIG_GPIOLIB
 
 #ifdef CONFIG_ARCH_HAVE_CUSTOM_GPIO_H
 #include <asm/gpio.h>
@@ -74,7 +74,7 @@
 
 #endif /* ! CONFIG_ARCH_HAVE_CUSTOM_GPIO_H */
 
-#else /* ! CONFIG_GENERIC_GPIO */
+#else /* ! CONFIG_GPIOLIB */
 
 #include <linux/kernel.h>
 #include <linux/types.h>
@@ -226,7 +226,7 @@
 	WARN_ON(1);
 }
 
-#endif /* ! CONFIG_GENERIC_GPIO */
+#endif /* ! CONFIG_GPIOLIB */
 
 struct device;
 
diff --git a/include/linux/hid.h b/include/linux/hid.h
index af1b86d..0c48991 100644
--- a/include/linux/hid.h
+++ b/include/linux/hid.h
@@ -515,7 +515,7 @@
 	struct dentry *debug_rdesc;
 	struct dentry *debug_events;
 	struct list_head debug_list;
-	struct mutex debug_list_lock;
+	spinlock_t  debug_list_lock;
 	wait_queue_head_t debug_wait;
 };
 
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 3a62df3..6b4890f 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -189,8 +189,7 @@
 
 extern const struct file_operations hugetlbfs_file_operations;
 extern const struct vm_operations_struct hugetlb_vm_ops;
-struct file *hugetlb_file_setup(const char *name, unsigned long addr,
-				size_t size, vm_flags_t acct,
+struct file *hugetlb_file_setup(const char *name, size_t size, vm_flags_t acct,
 				struct user_struct **user, int creat_flags,
 				int page_size_log);
 
@@ -209,8 +208,8 @@
 
 #define is_file_hugepages(file)			0
 static inline struct file *
-hugetlb_file_setup(const char *name, unsigned long addr, size_t size,
-		vm_flags_t acctflag, struct user_struct **user, int creat_flags,
+hugetlb_file_setup(const char *name, size_t size, vm_flags_t acctflag,
+		struct user_struct **user, int creat_flags,
 		int page_size_log)
 {
 	return ERR_PTR(-ENOSYS);
@@ -288,6 +287,13 @@
 	return hstate_inode(file_inode(f));
 }
 
+static inline struct hstate *hstate_sizelog(int page_size_log)
+{
+	if (!page_size_log)
+		return &default_hstate;
+	return size_to_hstate(1 << page_size_log);
+}
+
 static inline struct hstate *hstate_vma(struct vm_area_struct *vma)
 {
 	return hstate_file(vma->vm_file);
@@ -352,11 +358,12 @@
 	return h - hstates;
 }
 
-#else
+#else	/* CONFIG_HUGETLB_PAGE */
 struct hstate {};
 #define alloc_huge_page_node(h, nid) NULL
 #define alloc_bootmem_huge_page(h) NULL
 #define hstate_file(f) NULL
+#define hstate_sizelog(s) NULL
 #define hstate_vma(v) NULL
 #define hstate_inode(i) NULL
 #define huge_page_size(h) PAGE_SIZE
@@ -371,6 +378,6 @@
 }
 #define hstate_index_to_shift(index) 0
 #define hstate_index(h) 0
-#endif
+#endif	/* CONFIG_HUGETLB_PAGE */
 
 #endif /* _LINUX_HUGETLB_H */
diff --git a/include/linux/idr.h b/include/linux/idr.h
index a470ac3..871a213 100644
--- a/include/linux/idr.h
+++ b/include/linux/idr.h
@@ -124,11 +124,13 @@
  * @idp:     idr handle
  * @entry:   the type * to use as cursor
  * @id:      id entry's key
+ *
+ * @entry and @id do not need to be initialized before the loop, and
+ * after normal terminatinon @entry is left with the value NULL.  This
+ * is convenient for a "not found" value.
  */
-#define idr_for_each_entry(idp, entry, id)				\
-	for (id = 0, entry = (typeof(entry))idr_get_next((idp), &(id)); \
-	     entry != NULL;                                             \
-	     ++id, entry = (typeof(entry))idr_get_next((idp), &(id)))
+#define idr_for_each_entry(idp, entry, id)			\
+	for (id = 0; ((entry) = idr_get_next(idp, &(id))) != NULL; ++id)
 
 /*
  * Don't use the following functions.  These exist only to suppress
diff --git a/include/linux/kref.h b/include/linux/kref.h
index 4972e6e..e15828f 100644
--- a/include/linux/kref.h
+++ b/include/linux/kref.h
@@ -39,8 +39,11 @@
  */
 static inline void kref_get(struct kref *kref)
 {
-	WARN_ON(!atomic_read(&kref->refcount));
-	atomic_inc(&kref->refcount);
+	/* If refcount was 0 before incrementing then we have a race
+	 * condition when this kref is freeing by some other thread right now.
+	 * In this case one should use kref_get_unless_zero()
+	 */
+	WARN_ON_ONCE(atomic_inc_return(&kref->refcount) < 2);
 }
 
 /**
@@ -100,7 +103,7 @@
 				 struct mutex *lock)
 {
 	WARN_ON(release == NULL);
-        if (unlikely(!atomic_add_unless(&kref->refcount, -1, 1))) {
+	if (unlikely(!atomic_add_unless(&kref->refcount, -1, 1))) {
 		mutex_lock(lock);
 		if (unlikely(!atomic_dec_and_test(&kref->refcount))) {
 			mutex_unlock(lock);
diff --git a/include/linux/lru_cache.h b/include/linux/lru_cache.h
index 4019013..4626228 100644
--- a/include/linux/lru_cache.h
+++ b/include/linux/lru_cache.h
@@ -256,6 +256,7 @@
 extern void lc_set(struct lru_cache *lc, unsigned int enr, int index);
 extern void lc_del(struct lru_cache *lc, struct lc_element *element);
 
+extern struct lc_element *lc_get_cumulative(struct lru_cache *lc, unsigned int enr);
 extern struct lc_element *lc_try_get(struct lru_cache *lc, unsigned int enr);
 extern struct lc_element *lc_find(struct lru_cache *lc, unsigned int enr);
 extern struct lc_element *lc_get(struct lru_cache *lc, unsigned int enr);
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index 53acaf6..a51b013 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -903,11 +903,12 @@
 }
 
 enum mlx4_net_trans_promisc_mode {
-	MLX4_FS_PROMISC_NONE = 0,
-	MLX4_FS_PROMISC_UPLINK,
-	/* For future use. Not implemented yet */
-	MLX4_FS_PROMISC_FUNCTION_PORT,
-	MLX4_FS_PROMISC_ALL_MULTI,
+	MLX4_FS_REGULAR = 1,
+	MLX4_FS_ALL_DEFAULT,
+	MLX4_FS_MC_DEFAULT,
+	MLX4_FS_UC_SNIFFER,
+	MLX4_FS_MC_SNIFFER,
+	MLX4_FS_MODE_NUM, /* should be last */
 };
 
 struct mlx4_spec_eth {
@@ -936,7 +937,7 @@
 };
 
 struct mlx4_spec_ib {
-	__be32	r_qpn;
+	__be32  l3_qpn;
 	__be32	qpn_msk;
 	u8	dst_gid[16];
 	u8	dst_gid_msk[16];
@@ -969,6 +970,92 @@
 	u32	qpn;
 };
 
+struct mlx4_net_trans_rule_hw_ctrl {
+	__be16 prio;
+	u8 type;
+	u8 flags;
+	u8 rsvd1;
+	u8 funcid;
+	u8 vep;
+	u8 port;
+	__be32 qpn;
+	__be32 rsvd2;
+};
+
+struct mlx4_net_trans_rule_hw_ib {
+	u8 size;
+	u8 rsvd1;
+	__be16 id;
+	u32 rsvd2;
+	__be32 l3_qpn;
+	__be32 qpn_mask;
+	u8 dst_gid[16];
+	u8 dst_gid_msk[16];
+} __packed;
+
+struct mlx4_net_trans_rule_hw_eth {
+	u8	size;
+	u8	rsvd;
+	__be16	id;
+	u8	rsvd1[6];
+	u8	dst_mac[6];
+	u16	rsvd2;
+	u8	dst_mac_msk[6];
+	u16	rsvd3;
+	u8	src_mac[6];
+	u16	rsvd4;
+	u8	src_mac_msk[6];
+	u8      rsvd5;
+	u8      ether_type_enable;
+	__be16  ether_type;
+	__be16  vlan_tag_msk;
+	__be16  vlan_tag;
+} __packed;
+
+struct mlx4_net_trans_rule_hw_tcp_udp {
+	u8	size;
+	u8	rsvd;
+	__be16	id;
+	__be16	rsvd1[3];
+	__be16	dst_port;
+	__be16	rsvd2;
+	__be16	dst_port_msk;
+	__be16	rsvd3;
+	__be16	src_port;
+	__be16	rsvd4;
+	__be16	src_port_msk;
+} __packed;
+
+struct mlx4_net_trans_rule_hw_ipv4 {
+	u8	size;
+	u8	rsvd;
+	__be16	id;
+	__be32	rsvd1;
+	__be32	dst_ip;
+	__be32	dst_ip_msk;
+	__be32	src_ip;
+	__be32	src_ip_msk;
+} __packed;
+
+struct _rule_hw {
+	union {
+		struct {
+			u8 size;
+			u8 rsvd;
+			__be16 id;
+		};
+		struct mlx4_net_trans_rule_hw_eth eth;
+		struct mlx4_net_trans_rule_hw_ib ib;
+		struct mlx4_net_trans_rule_hw_ipv4 ipv4;
+		struct mlx4_net_trans_rule_hw_tcp_udp tcp_udp;
+	};
+};
+
+/* translating DMFS verbs sniffer rule to the FW API would need two reg IDs */
+struct mlx4_flow_handle {
+	u64 reg_id[2];
+};
+
 int mlx4_flow_steer_promisc_add(struct mlx4_dev *dev, u8 port, u32 qpn,
 				enum mlx4_net_trans_promisc_mode mode);
 int mlx4_flow_steer_promisc_remove(struct mlx4_dev *dev, u8 port,
@@ -1018,6 +1105,11 @@
 int mlx4_flow_attach(struct mlx4_dev *dev,
 		     struct mlx4_net_trans_rule *rule, u64 *reg_id);
 int mlx4_flow_detach(struct mlx4_dev *dev, u64 reg_id);
+int mlx4_map_sw_to_hw_steering_mode(struct mlx4_dev *dev,
+				    enum mlx4_net_trans_promisc_mode flow_type);
+int mlx4_map_sw_to_hw_steering_id(struct mlx4_dev *dev,
+				  enum mlx4_net_trans_rule_id id);
+int mlx4_hw_rule_sz(struct mlx4_dev *dev, enum mlx4_net_trans_rule_id id);
 
 void mlx4_sync_pkey_table(struct mlx4_dev *dev, int slave, int port,
 			  int i, int val);
diff --git a/include/linux/mlx4/srq.h b/include/linux/mlx4/srq.h
index 799a069..192e0f7 100644
--- a/include/linux/mlx4/srq.h
+++ b/include/linux/mlx4/srq.h
@@ -39,4 +39,6 @@
 	u32			reserved2[3];
 };
 
+struct mlx4_srq *mlx4_srq_lookup(struct mlx4_dev *dev, u32 srqn);
+
 #endif /* MLX4_SRQ_H */
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 1a7f19e..e0c8528 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -951,13 +951,19 @@
  * (see walk_page_range for more details)
  */
 struct mm_walk {
-	int (*pgd_entry)(pgd_t *, unsigned long, unsigned long, struct mm_walk *);
-	int (*pud_entry)(pud_t *, unsigned long, unsigned long, struct mm_walk *);
-	int (*pmd_entry)(pmd_t *, unsigned long, unsigned long, struct mm_walk *);
-	int (*pte_entry)(pte_t *, unsigned long, unsigned long, struct mm_walk *);
-	int (*pte_hole)(unsigned long, unsigned long, struct mm_walk *);
-	int (*hugetlb_entry)(pte_t *, unsigned long,
-			     unsigned long, unsigned long, struct mm_walk *);
+	int (*pgd_entry)(pgd_t *pgd, unsigned long addr,
+			 unsigned long next, struct mm_walk *walk);
+	int (*pud_entry)(pud_t *pud, unsigned long addr,
+	                 unsigned long next, struct mm_walk *walk);
+	int (*pmd_entry)(pmd_t *pmd, unsigned long addr,
+			 unsigned long next, struct mm_walk *walk);
+	int (*pte_entry)(pte_t *pte, unsigned long addr,
+			 unsigned long next, struct mm_walk *walk);
+	int (*pte_hole)(unsigned long addr, unsigned long next,
+			struct mm_walk *walk);
+	int (*hugetlb_entry)(pte_t *pte, unsigned long hmask,
+			     unsigned long addr, unsigned long next,
+			     struct mm_walk *walk);
 	struct mm_struct *mm;
 	void *private;
 };
diff --git a/include/linux/mtd/blktrans.h b/include/linux/mtd/blktrans.h
index 4eb0a50..e93837f 100644
--- a/include/linux/mtd/blktrans.h
+++ b/include/linux/mtd/blktrans.h
@@ -74,7 +74,7 @@
 
 	/* Called with mtd_table_mutex held; no race with add/remove */
 	int (*open)(struct mtd_blktrans_dev *dev);
-	int (*release)(struct mtd_blktrans_dev *dev);
+	void (*release)(struct mtd_blktrans_dev *dev);
 
 	/* Called on {de,}registration and on subsequent addition/removal
 	   of devices, with mtd_table_mutex held. */
diff --git a/include/linux/mtd/mtd.h b/include/linux/mtd/mtd.h
index f9ac289..a5cf4e8 100644
--- a/include/linux/mtd/mtd.h
+++ b/include/linux/mtd/mtd.h
@@ -362,10 +362,10 @@
 struct mtd_part_parser_data;
 
 extern int mtd_device_parse_register(struct mtd_info *mtd,
-			      const char **part_probe_types,
-			      struct mtd_part_parser_data *parser_data,
-			      const struct mtd_partition *defparts,
-			      int defnr_parts);
+				     const char * const *part_probe_types,
+				     struct mtd_part_parser_data *parser_data,
+				     const struct mtd_partition *defparts,
+				     int defnr_parts);
 #define mtd_device_register(master, parts, nr_parts)	\
 	mtd_device_parse_register(master, NULL, NULL, parts, nr_parts)
 extern int mtd_device_unregister(struct mtd_info *master);
diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h
index ef52d9c..ab63634 100644
--- a/include/linux/mtd/nand.h
+++ b/include/linux/mtd/nand.h
@@ -86,7 +86,6 @@
 #define NAND_CMD_READOOB	0x50
 #define NAND_CMD_ERASE1		0x60
 #define NAND_CMD_STATUS		0x70
-#define NAND_CMD_STATUS_MULTI	0x71
 #define NAND_CMD_SEQIN		0x80
 #define NAND_CMD_RNDIN		0x85
 #define NAND_CMD_READID		0x90
@@ -105,25 +104,6 @@
 #define NAND_CMD_RNDOUTSTART	0xE0
 #define NAND_CMD_CACHEDPROG	0x15
 
-/* Extended commands for AG-AND device */
-/*
- * Note: the command for NAND_CMD_DEPLETE1 is really 0x00 but
- *       there is no way to distinguish that from NAND_CMD_READ0
- *       until the remaining sequence of commands has been completed
- *       so add a high order bit and mask it off in the command.
- */
-#define NAND_CMD_DEPLETE1	0x100
-#define NAND_CMD_DEPLETE2	0x38
-#define NAND_CMD_STATUS_MULTI	0x71
-#define NAND_CMD_STATUS_ERROR	0x72
-/* multi-bank error status (banks 0-3) */
-#define NAND_CMD_STATUS_ERROR0	0x73
-#define NAND_CMD_STATUS_ERROR1	0x74
-#define NAND_CMD_STATUS_ERROR2	0x75
-#define NAND_CMD_STATUS_ERROR3	0x76
-#define NAND_CMD_STATUS_RESET	0x7f
-#define NAND_CMD_STATUS_CLEAR	0xff
-
 #define NAND_CMD_NONE		-1
 
 /* Status bits */
@@ -165,28 +145,8 @@
  */
 /* Buswidth is 16 bit */
 #define NAND_BUSWIDTH_16	0x00000002
-/* Device supports partial programming without padding */
-#define NAND_NO_PADDING		0x00000004
 /* Chip has cache program function */
 #define NAND_CACHEPRG		0x00000008
-/* Chip has copy back function */
-#define NAND_COPYBACK		0x00000010
-/*
- * AND Chip which has 4 banks and a confusing page / block
- * assignment. See Renesas datasheet for further information.
- */
-#define NAND_IS_AND		0x00000020
-/*
- * Chip has a array of 4 pages which can be read without
- * additional ready /busy waits.
- */
-#define NAND_4PAGE_ARRAY	0x00000040
-/*
- * Chip requires that BBT is periodically rewritten to prevent
- * bits from adjacent blocks from 'leaking' in altering data.
- * This happens with the Renesas AG-AND chips, possibly others.
- */
-#define BBT_AUTO_REFRESH	0x00000080
 /*
  * Chip requires ready check on read (for auto-incremented sequential read).
  * True only for small page devices; large page devices do not support
@@ -207,13 +167,10 @@
 #define NAND_SUBPAGE_READ	0x00001000
 
 /* Options valid for Samsung large page devices */
-#define NAND_SAMSUNG_LP_OPTIONS \
-	(NAND_NO_PADDING | NAND_CACHEPRG | NAND_COPYBACK)
+#define NAND_SAMSUNG_LP_OPTIONS NAND_CACHEPRG
 
 /* Macros to identify the above */
-#define NAND_MUST_PAD(chip) (!(chip->options & NAND_NO_PADDING))
 #define NAND_HAS_CACHEPROG(chip) ((chip->options & NAND_CACHEPRG))
-#define NAND_HAS_COPYBACK(chip) ((chip->options & NAND_COPYBACK))
 #define NAND_HAS_SUBPAGE_READ(chip) ((chip->options & NAND_SUBPAGE_READ))
 
 /* Non chip related options */
@@ -361,6 +318,7 @@
  *		any single ECC step, 0 if bitflips uncorrectable, -EIO hw error
  * @read_subpage:	function to read parts of the page covered by ECC;
  *			returns same as read_page()
+ * @write_subpage:	function to write parts of the page covered by ECC.
  * @write_page:	function to write a page according to the ECC generator
  *		requirements.
  * @write_oob_raw:	function to write chip OOB data without ECC
@@ -392,6 +350,9 @@
 			uint8_t *buf, int oob_required, int page);
 	int (*read_subpage)(struct mtd_info *mtd, struct nand_chip *chip,
 			uint32_t offs, uint32_t len, uint8_t *buf);
+	int (*write_subpage)(struct mtd_info *mtd, struct nand_chip *chip,
+			uint32_t offset, uint32_t data_len,
+			const uint8_t *data_buf, int oob_required);
 	int (*write_page)(struct mtd_info *mtd, struct nand_chip *chip,
 			const uint8_t *buf, int oob_required);
 	int (*write_oob_raw)(struct mtd_info *mtd, struct nand_chip *chip,
@@ -527,8 +488,8 @@
 	int (*errstat)(struct mtd_info *mtd, struct nand_chip *this, int state,
 			int status, int page);
 	int (*write_page)(struct mtd_info *mtd, struct nand_chip *chip,
-			const uint8_t *buf, int oob_required, int page,
-			int cached, int raw);
+			uint32_t offset, int data_len, const uint8_t *buf,
+			int oob_required, int page, int cached, int raw);
 	int (*onfi_set_features)(struct mtd_info *mtd, struct nand_chip *chip,
 			int feature_addr, uint8_t *subfeature_para);
 	int (*onfi_get_features)(struct mtd_info *mtd, struct nand_chip *chip,
@@ -589,25 +550,65 @@
 #define NAND_MFR_MACRONIX	0xc2
 #define NAND_MFR_EON		0x92
 
+/* The maximum expected count of bytes in the NAND ID sequence */
+#define NAND_MAX_ID_LEN 8
+
+/*
+ * A helper for defining older NAND chips where the second ID byte fully
+ * defined the chip, including the geometry (chip size, eraseblock size, page
+ * size). All these chips have 512 bytes NAND page size.
+ */
+#define LEGACY_ID_NAND(nm, devid, chipsz, erasesz, opts)          \
+	{ .name = (nm), {{ .dev_id = (devid) }}, .pagesize = 512, \
+	  .chipsize = (chipsz), .erasesize = (erasesz), .options = (opts) }
+
+/*
+ * A helper for defining newer chips which report their page size and
+ * eraseblock size via the extended ID bytes.
+ *
+ * The real difference between LEGACY_ID_NAND and EXTENDED_ID_NAND is that with
+ * EXTENDED_ID_NAND, manufacturers overloaded the same device ID so that the
+ * device ID now only represented a particular total chip size (and voltage,
+ * buswidth), and the page size, eraseblock size, and OOB size could vary while
+ * using the same device ID.
+ */
+#define EXTENDED_ID_NAND(nm, devid, chipsz, opts)                      \
+	{ .name = (nm), {{ .dev_id = (devid) }}, .chipsize = (chipsz), \
+	  .options = (opts) }
+
 /**
  * struct nand_flash_dev - NAND Flash Device ID Structure
- * @name:	Identify the device type
- * @id:		device ID code
- * @pagesize:	Pagesize in bytes. Either 256 or 512 or 0
- *		If the pagesize is 0, then the real pagesize
- *		and the eraseize are determined from the
- *		extended id bytes in the chip
- * @erasesize:	Size of an erase block in the flash device.
- * @chipsize:	Total chipsize in Mega Bytes
- * @options:	Bitfield to store chip relevant options
+ * @name: a human-readable name of the NAND chip
+ * @dev_id: the device ID (the second byte of the full chip ID array)
+ * @mfr_id: manufecturer ID part of the full chip ID array (refers the same
+ *          memory address as @id[0])
+ * @dev_id: device ID part of the full chip ID array (refers the same memory
+ *          address as @id[1])
+ * @id: full device ID array
+ * @pagesize: size of the NAND page in bytes; if 0, then the real page size (as
+ *            well as the eraseblock size) is determined from the extended NAND
+ *            chip ID array)
+ * @chipsize: total chip size in MiB
+ * @erasesize: eraseblock size in bytes (determined from the extended ID if 0)
+ * @options: stores various chip bit options
+ * @id_len: The valid length of the @id.
+ * @oobsize: OOB size
  */
 struct nand_flash_dev {
 	char *name;
-	int id;
-	unsigned long pagesize;
-	unsigned long chipsize;
-	unsigned long erasesize;
-	unsigned long options;
+	union {
+		struct {
+			uint8_t mfr_id;
+			uint8_t dev_id;
+		};
+		uint8_t id[NAND_MAX_ID_LEN];
+	};
+	unsigned int pagesize;
+	unsigned int chipsize;
+	unsigned int erasesize;
+	unsigned int options;
+	uint16_t id_len;
+	uint16_t oobsize;
 };
 
 /**
diff --git a/include/linux/mtd/physmap.h b/include/linux/mtd/physmap.h
index d2887e7..aa6a263 100644
--- a/include/linux/mtd/physmap.h
+++ b/include/linux/mtd/physmap.h
@@ -30,7 +30,7 @@
 	unsigned int		pfow_base;
 	char                    *probe_type;
 	struct mtd_partition	*parts;
-	const char		**part_probe_types;
+	const char * const	*part_probe_types;
 };
 
 #endif /* __LINUX_MTD_PHYSMAP__ */
diff --git a/include/linux/mtd/plat-ram.h b/include/linux/mtd/plat-ram.h
index e07890a..44212d6 100644
--- a/include/linux/mtd/plat-ram.h
+++ b/include/linux/mtd/plat-ram.h
@@ -20,8 +20,8 @@
 
 struct platdata_mtd_ram {
 	const char		*mapname;
-	const char		**map_probes;
-	const char		**probes;
+	const char * const      *map_probes;
+	const char * const      *probes;
 	struct mtd_partition	*partitions;
 	int			 nr_partitions;
 	int			 bankwidth;
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
index 766c5bc..104b62f 100644
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
@@ -1176,7 +1176,7 @@
 
 struct nfs41_free_stateid_args {
 	struct nfs4_sequence_args	seq_args;
-	nfs4_stateid			*stateid;
+	nfs4_stateid			stateid;
 };
 
 struct nfs41_free_stateid_res {
diff --git a/include/linux/nvme.h b/include/linux/nvme.h
index 4fa3b0b..f451c8d 100644
--- a/include/linux/nvme.h
+++ b/include/linux/nvme.h
@@ -107,6 +107,12 @@
 	__u8			vs[1024];
 };
 
+enum {
+	NVME_CTRL_ONCS_COMPARE			= 1 << 0,
+	NVME_CTRL_ONCS_WRITE_UNCORRECTABLE	= 1 << 1,
+	NVME_CTRL_ONCS_DSM			= 1 << 2,
+};
+
 struct nvme_lbaf {
 	__le16			ms;
 	__u8			ds;
@@ -201,11 +207,11 @@
 	__u8			flags;
 	__u16			command_id;
 	__le32			nsid;
-	__u32			cdw2[2];
+	__le32			cdw2[2];
 	__le64			metadata;
 	__le64			prp1;
 	__le64			prp2;
-	__u32			cdw10[6];
+	__le32			cdw10[6];
 };
 
 struct nvme_rw_command {
@@ -246,6 +252,31 @@
 	NVME_RW_DSM_COMPRESSED		= 1 << 7,
 };
 
+struct nvme_dsm_cmd {
+	__u8			opcode;
+	__u8			flags;
+	__u16			command_id;
+	__le32			nsid;
+	__u64			rsvd2[2];
+	__le64			prp1;
+	__le64			prp2;
+	__le32			nr;
+	__le32			attributes;
+	__u32			rsvd12[4];
+};
+
+enum {
+	NVME_DSMGMT_IDR		= 1 << 0,
+	NVME_DSMGMT_IDW		= 1 << 1,
+	NVME_DSMGMT_AD		= 1 << 2,
+};
+
+struct nvme_dsm_range {
+	__le32			cattr;
+	__le32			nlb;
+	__le64			slba;
+};
+
 /* Admin commands */
 
 enum nvme_admin_opcode {
@@ -285,6 +316,9 @@
 	NVME_FEAT_WRITE_ATOMIC	= 0x0a,
 	NVME_FEAT_ASYNC_EVENT	= 0x0b,
 	NVME_FEAT_SW_PROGRESS	= 0x0c,
+	NVME_FWACT_REPL		= (0 << 3),
+	NVME_FWACT_REPL_ACTV	= (1 << 3),
+	NVME_FWACT_ACTV		= (2 << 3),
 };
 
 struct nvme_identify {
@@ -362,6 +396,16 @@
 	__u32			rsvd12[4];
 };
 
+struct nvme_format_cmd {
+	__u8			opcode;
+	__u8			flags;
+	__u16			command_id;
+	__le32			nsid;
+	__u64			rsvd2[4];
+	__le32			cdw10;
+	__u32			rsvd11[5];
+};
+
 struct nvme_command {
 	union {
 		struct nvme_common_command common;
@@ -372,6 +416,8 @@
 		struct nvme_create_sq create_sq;
 		struct nvme_delete_queue delete_queue;
 		struct nvme_download_firmware dlfw;
+		struct nvme_format_cmd format;
+		struct nvme_dsm_cmd dsm;
 	};
 };
 
@@ -388,6 +434,7 @@
 	NVME_SC_FUSED_FAIL		= 0x9,
 	NVME_SC_FUSED_MISSING		= 0xa,
 	NVME_SC_INVALID_NS		= 0xb,
+	NVME_SC_CMD_SEQ_ERROR		= 0xc,
 	NVME_SC_LBA_RANGE		= 0x80,
 	NVME_SC_CAP_EXCEEDED		= 0x81,
 	NVME_SC_NS_NOT_READY		= 0x82,
@@ -461,4 +508,111 @@
 #define NVME_IOCTL_ADMIN_CMD	_IOWR('N', 0x41, struct nvme_admin_cmd)
 #define NVME_IOCTL_SUBMIT_IO	_IOW('N', 0x42, struct nvme_user_io)
 
+#ifdef __KERNEL__
+#include <linux/pci.h>
+#include <linux/miscdevice.h>
+#include <linux/kref.h>
+
+#define NVME_IO_TIMEOUT	(5 * HZ)
+
+/*
+ * Represents an NVM Express device.  Each nvme_dev is a PCI function.
+ */
+struct nvme_dev {
+	struct list_head node;
+	struct nvme_queue **queues;
+	u32 __iomem *dbs;
+	struct pci_dev *pci_dev;
+	struct dma_pool *prp_page_pool;
+	struct dma_pool *prp_small_pool;
+	int instance;
+	int queue_count;
+	int db_stride;
+	u32 ctrl_config;
+	struct msix_entry *entry;
+	struct nvme_bar __iomem *bar;
+	struct list_head namespaces;
+	struct kref kref;
+	struct miscdevice miscdev;
+	char name[12];
+	char serial[20];
+	char model[40];
+	char firmware_rev[8];
+	u32 max_hw_sectors;
+	u32 stripe_size;
+	u16 oncs;
+};
+
+/*
+ * An NVM Express namespace is equivalent to a SCSI LUN
+ */
+struct nvme_ns {
+	struct list_head list;
+
+	struct nvme_dev *dev;
+	struct request_queue *queue;
+	struct gendisk *disk;
+
+	int ns_id;
+	int lba_shift;
+	int ms;
+	u64 mode_select_num_blocks;
+	u32 mode_select_block_len;
+};
+
+/*
+ * The nvme_iod describes the data in an I/O, including the list of PRP
+ * entries.  You can't see it in this data structure because C doesn't let
+ * me express that.  Use nvme_alloc_iod to ensure there's enough space
+ * allocated to store the PRP list.
+ */
+struct nvme_iod {
+	void *private;		/* For the use of the submitter of the I/O */
+	int npages;		/* In the PRP list. 0 means small pool in use */
+	int offset;		/* Of PRP list */
+	int nents;		/* Used in scatterlist */
+	int length;		/* Of data, in bytes */
+	dma_addr_t first_dma;
+	struct scatterlist sg[0];
+};
+
+static inline u64 nvme_block_nr(struct nvme_ns *ns, sector_t sector)
+{
+	return (sector >> (ns->lba_shift - 9));
+}
+
+/**
+ * nvme_free_iod - frees an nvme_iod
+ * @dev: The device that the I/O was submitted to
+ * @iod: The memory to free
+ */
+void nvme_free_iod(struct nvme_dev *dev, struct nvme_iod *iod);
+
+int nvme_setup_prps(struct nvme_dev *dev, struct nvme_common_command *cmd,
+			struct nvme_iod *iod, int total_len, gfp_t gfp);
+struct nvme_iod *nvme_map_user_pages(struct nvme_dev *dev, int write,
+				unsigned long addr, unsigned length);
+void nvme_unmap_user_pages(struct nvme_dev *dev, int write,
+			struct nvme_iod *iod);
+struct nvme_queue *get_nvmeq(struct nvme_dev *dev);
+void put_nvmeq(struct nvme_queue *nvmeq);
+int nvme_submit_sync_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd,
+						u32 *result, unsigned timeout);
+int nvme_submit_flush_data(struct nvme_queue *nvmeq, struct nvme_ns *ns);
+int nvme_submit_admin_cmd(struct nvme_dev *, struct nvme_command *,
+							u32 *result);
+int nvme_identify(struct nvme_dev *, unsigned nsid, unsigned cns,
+							dma_addr_t dma_addr);
+int nvme_get_features(struct nvme_dev *dev, unsigned fid, unsigned nsid,
+			dma_addr_t dma_addr, u32 *result);
+int nvme_set_features(struct nvme_dev *dev, unsigned fid, unsigned dword11,
+			dma_addr_t dma_addr, u32 *result);
+
+struct sg_io_hdr;
+
+int nvme_sg_io(struct nvme_ns *ns, struct sg_io_hdr __user *u_hdr);
+int nvme_sg_get_version_num(int __user *ip);
+
+#endif
+
 #endif /* _LINUX_NVME_H */
diff --git a/include/linux/of_dma.h b/include/linux/of_dma.h
index d15073e..364dda7 100644
--- a/include/linux/of_dma.h
+++ b/include/linux/of_dma.h
@@ -25,7 +25,6 @@
 	struct dma_chan		*(*of_dma_xlate)
 				(struct of_phandle_args *, struct of_dma *);
 	void			*of_dma_data;
-	int			use_count;
 };
 
 struct of_dma_filter_info {
@@ -38,9 +37,9 @@
 		struct dma_chan *(*of_dma_xlate)
 		(struct of_phandle_args *, struct of_dma *),
 		void *data);
-extern int of_dma_controller_free(struct device_node *np);
+extern void of_dma_controller_free(struct device_node *np);
 extern struct dma_chan *of_dma_request_slave_channel(struct device_node *np,
-						     char *name);
+						     const char *name);
 extern struct dma_chan *of_dma_simple_xlate(struct of_phandle_args *dma_spec,
 		struct of_dma *ofdma);
 #else
@@ -52,13 +51,12 @@
 	return -ENODEV;
 }
 
-static inline int of_dma_controller_free(struct device_node *np)
+static inline void of_dma_controller_free(struct device_node *np)
 {
-	return -ENODEV;
 }
 
 static inline struct dma_chan *of_dma_request_slave_channel(struct device_node *np,
-						     char *name)
+						     const char *name)
 {
 	return NULL;
 }
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index 2b85c52..c129162 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -2147,11 +2147,13 @@
 #define PCI_DEVICE_ID_TIGON3_5705M_2	0x165e
 #define PCI_DEVICE_ID_NX2_57712		0x1662
 #define PCI_DEVICE_ID_NX2_57712E	0x1663
+#define PCI_DEVICE_ID_NX2_57712_MF	0x1663
 #define PCI_DEVICE_ID_TIGON3_5714	0x1668
 #define PCI_DEVICE_ID_TIGON3_5714S	0x1669
 #define PCI_DEVICE_ID_TIGON3_5780	0x166a
 #define PCI_DEVICE_ID_TIGON3_5780S	0x166b
 #define PCI_DEVICE_ID_TIGON3_5705F	0x166e
+#define PCI_DEVICE_ID_NX2_57712_VF	0x166f
 #define PCI_DEVICE_ID_TIGON3_5754M	0x1672
 #define PCI_DEVICE_ID_TIGON3_5755M	0x1673
 #define PCI_DEVICE_ID_TIGON3_5756	0x1674
@@ -2177,13 +2179,15 @@
 #define PCI_DEVICE_ID_TIGON3_5787	0x169b
 #define PCI_DEVICE_ID_TIGON3_5788	0x169c
 #define PCI_DEVICE_ID_TIGON3_5789	0x169d
+#define PCI_DEVICE_ID_NX2_57840_4_10	0x16a1
+#define PCI_DEVICE_ID_NX2_57840_2_20	0x16a2
+#define PCI_DEVICE_ID_NX2_57840_MF	0x16a4
 #define PCI_DEVICE_ID_NX2_57800_MF	0x16a5
 #define PCI_DEVICE_ID_TIGON3_5702X	0x16a6
 #define PCI_DEVICE_ID_TIGON3_5703X	0x16a7
 #define PCI_DEVICE_ID_TIGON3_5704S	0x16a8
 #define PCI_DEVICE_ID_NX2_57800_VF	0x16a9
 #define PCI_DEVICE_ID_NX2_5706S		0x16aa
-#define PCI_DEVICE_ID_NX2_57840_MF	0x16a4
 #define PCI_DEVICE_ID_NX2_5708S		0x16ac
 #define PCI_DEVICE_ID_NX2_57840_VF	0x16ad
 #define PCI_DEVICE_ID_NX2_57810_MF	0x16ae
diff --git a/include/linux/pid_namespace.h b/include/linux/pid_namespace.h
index 731e4ec..e277266 100644
--- a/include/linux/pid_namespace.h
+++ b/include/linux/pid_namespace.h
@@ -4,6 +4,7 @@
 #include <linux/sched.h>
 #include <linux/bug.h>
 #include <linux/mm.h>
+#include <linux/workqueue.h>
 #include <linux/threads.h>
 #include <linux/nsproxy.h>
 #include <linux/kref.h>
diff --git a/include/linux/platform_data/elm.h b/include/linux/platform_data/elm.h
index 1bd5244..bf0a83b 100644
--- a/include/linux/platform_data/elm.h
+++ b/include/linux/platform_data/elm.h
@@ -50,5 +50,5 @@
 
 void elm_decode_bch_error_page(struct device *dev, u8 *ecc_calc,
 		struct elm_errorvec *err_vec);
-void elm_config(struct device *dev, enum bch_ecc bch_type);
+int elm_config(struct device *dev, enum bch_ecc bch_type);
 #endif /* __ELM_H */
diff --git a/include/linux/platform_data/imx-iram.h b/include/linux/platform_data/imx-iram.h
deleted file mode 100644
index 022690c..0000000
--- a/include/linux/platform_data/imx-iram.h
+++ /dev/null
@@ -1,41 +0,0 @@
-/*
- * Copyright (C) 2010 Freescale Semiconductor, Inc. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
- * MA 02110-1301, USA.
- */
-#include <linux/errno.h>
-
-#ifdef CONFIG_IRAM_ALLOC
-
-int __init iram_init(unsigned long base, unsigned long size);
-void __iomem *iram_alloc(unsigned int size, unsigned long *dma_addr);
-void iram_free(unsigned long dma_addr, unsigned int size);
-
-#else
-
-static inline int __init iram_init(unsigned long base, unsigned long size)
-{
-	return -ENOMEM;
-}
-
-static inline void __iomem *iram_alloc(unsigned int size, unsigned long *dma_addr)
-{
-	return NULL;
-}
-
-static inline void iram_free(unsigned long base, unsigned long size) {}
-
-#endif
diff --git a/include/linux/random.h b/include/linux/random.h
index 347ce55..3b9377d 100644
--- a/include/linux/random.h
+++ b/include/linux/random.h
@@ -29,13 +29,6 @@
 void prandom_bytes(void *buf, int nbytes);
 void prandom_seed(u32 seed);
 
-/*
- * These macros are preserved for backward compatibility and should be
- * removed as soon as a transition is finished.
- */
-#define random32() prandom_u32()
-#define srandom32(seed) prandom_seed(seed)
-
 u32 prandom_u32_state(struct rnd_state *);
 void prandom_bytes_state(struct rnd_state *state, void *buf, int nbytes);
 
diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h
index 8da67d6..0616ffe 100644
--- a/include/linux/rwsem.h
+++ b/include/linux/rwsem.h
@@ -133,10 +133,20 @@
 	_down_write_nest_lock(sem, &(nest_lock)->dep_map);	\
 } while (0);
 
+/*
+ * Take/release a lock when not the owner will release it.
+ *
+ * [ This API should be avoided as much as possible - the
+ *   proper abstraction for this case is completions. ]
+ */
+extern void down_read_non_owner(struct rw_semaphore *sem);
+extern void up_read_non_owner(struct rw_semaphore *sem);
 #else
 # define down_read_nested(sem, subclass)		down_read(sem)
 # define down_write_nest_lock(sem, nest_lock)	down_write(sem)
 # define down_write_nested(sem, subclass)	down_write(sem)
+# define down_read_non_owner(sem)		down_read(sem)
+# define up_read_non_owner(sem)			up_read(sem)
 #endif
 
 #endif /* _LINUX_RWSEM_H */
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 4800e9d..178a8d9 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -313,8 +313,6 @@
 struct nsproxy;
 struct user_namespace;
 
-#include <linux/aio.h>
-
 #ifdef CONFIG_MMU
 extern void arch_pick_mmap_layout(struct mm_struct *mm);
 extern unsigned long
@@ -595,6 +593,7 @@
 #endif
 #ifdef CONFIG_AUDIT
 	unsigned audit_tty;
+	unsigned audit_tty_log_passwd;
 	struct tty_audit_buf *tty_audit_buf;
 #endif
 #ifdef CONFIG_CGROUPS
@@ -1413,6 +1412,10 @@
 #ifdef CONFIG_UPROBES
 	struct uprobe_task *utask;
 #endif
+#if defined(CONFIG_BCACHE) || defined(CONFIG_BCACHE_MODULE)
+	unsigned int	sequential_io;
+	unsigned int	sequential_io_avg;
+#endif
 };
 
 /* Future-safe accessor for struct task_struct's cpus_allowed. */
diff --git a/include/linux/sudmac.h b/include/linux/sudmac.h
new file mode 100644
index 0000000..377b8a5
--- /dev/null
+++ b/include/linux/sudmac.h
@@ -0,0 +1,52 @@
+/*
+ * Header for the SUDMAC driver
+ *
+ * Copyright (C) 2013 Renesas Solutions Corp.
+ *
+ * This is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ */
+#ifndef SUDMAC_H
+#define SUDMAC_H
+
+#include <linux/dmaengine.h>
+#include <linux/shdma-base.h>
+#include <linux/types.h>
+
+/* Used by slave DMA clients to request DMA to/from a specific peripheral */
+struct sudmac_slave {
+	struct shdma_slave	shdma_slave;	/* Set by the platform */
+};
+
+/*
+ * Supplied by platforms to specify, how a DMA channel has to be configured for
+ * a certain peripheral
+ */
+struct sudmac_slave_config {
+	int		slave_id;
+};
+
+struct sudmac_channel {
+	unsigned long	offset;
+	unsigned long	config;
+	unsigned long	wait;		/* The configuable range is 0 to 3 */
+	unsigned long	dint_end_bit;
+};
+
+struct sudmac_pdata {
+	const struct sudmac_slave_config *slave;
+	int slave_num;
+	const struct sudmac_channel *channel;
+	int channel_num;
+};
+
+/* Definitions for the sudmac_channel.config */
+#define SUDMAC_TX_BUFFER_MODE	BIT(0)
+#define SUDMAC_RX_END_MODE	BIT(1)
+
+/* Definitions for the sudmac_channel.dint_end_bit */
+#define SUDMAC_DMA_BIT_CH0	BIT(0)
+#define SUDMAC_DMA_BIT_CH1	BIT(1)
+
+#endif
diff --git a/include/linux/thermal.h b/include/linux/thermal.h
index e3c0ae9..a386a1c 100644
--- a/include/linux/thermal.h
+++ b/include/linux/thermal.h
@@ -33,8 +33,11 @@
 #define THERMAL_MAX_TRIPS	12
 #define THERMAL_NAME_LENGTH	20
 
+/* invalid cooling state */
+#define THERMAL_CSTATE_INVALID -1UL
+
 /* No upper/lower limit requirement */
-#define THERMAL_NO_LIMIT	-1UL
+#define THERMAL_NO_LIMIT	THERMAL_CSTATE_INVALID
 
 /* Unit conversion macros */
 #define KELVIN_TO_CELSIUS(t)	(long)(((long)t-2732 >= 0) ?	\
@@ -184,7 +187,6 @@
 	char name[THERMAL_NAME_LENGTH];
 	int (*throttle)(struct thermal_zone_device *tz, int trip);
 	struct list_head	governor_list;
-	struct module		*owner;
 };
 
 /* Structure that holds binding parameters for a zone */
@@ -237,21 +239,20 @@
 struct thermal_cooling_device *thermal_cooling_device_register(char *, void *,
 		const struct thermal_cooling_device_ops *);
 void thermal_cooling_device_unregister(struct thermal_cooling_device *);
+struct thermal_zone_device *thermal_zone_get_zone_by_name(const char *name);
+int thermal_zone_get_temp(struct thermal_zone_device *tz, unsigned long *temp);
 
 int get_tz_trend(struct thermal_zone_device *, int);
 struct thermal_instance *get_thermal_instance(struct thermal_zone_device *,
 		struct thermal_cooling_device *, int);
 void thermal_cdev_update(struct thermal_cooling_device *);
-void notify_thermal_framework(struct thermal_zone_device *, int);
-
-int thermal_register_governor(struct thermal_governor *);
-void thermal_unregister_governor(struct thermal_governor *);
+void thermal_notify_framework(struct thermal_zone_device *, int);
 
 #ifdef CONFIG_NET
 extern int thermal_generate_netlink_event(struct thermal_zone_device *tz,
 						enum events event);
 #else
-static int thermal_generate_netlink_event(struct thermal_zone_device *tz,
+static inline int thermal_generate_netlink_event(struct thermal_zone_device *tz,
 						enum events event)
 {
 	return 0;
diff --git a/include/linux/tty.h b/include/linux/tty.h
index 7e92bd8..8780bd2 100644
--- a/include/linux/tty.h
+++ b/include/linux/tty.h
@@ -575,8 +575,7 @@
 extern void tty_audit_fork(struct signal_struct *sig);
 extern void tty_audit_tiocsti(struct tty_struct *tty, char ch);
 extern void tty_audit_push(struct tty_struct *tty);
-extern int tty_audit_push_task(struct task_struct *tsk,
-			       kuid_t loginuid, u32 sessionid);
+extern int tty_audit_push_current(void);
 #else
 static inline void tty_audit_add_data(struct tty_struct *tty,
 		unsigned char *data, size_t size, unsigned icanon)
@@ -594,8 +593,7 @@
 static inline void tty_audit_push(struct tty_struct *tty)
 {
 }
-static inline int tty_audit_push_task(struct task_struct *tsk,
-				      kuid_t loginuid, u32 sessionid)
+static inline int tty_audit_push_current(void)
 {
 	return 0;
 }
diff --git a/include/linux/usb/usbnet.h b/include/linux/usb/usbnet.h
index da46327..f18d641 100644
--- a/include/linux/usb/usbnet.h
+++ b/include/linux/usb/usbnet.h
@@ -56,6 +56,8 @@
 	struct sk_buff_head	done;
 	struct sk_buff_head	rxq_pause;
 	struct urb		*interrupt;
+	unsigned		interrupt_count;
+	struct mutex		interrupt_mutex;
 	struct usb_anchor	deferred;
 	struct tasklet_struct	bh;
 
@@ -248,4 +250,7 @@
 extern int usbnet_manage_power(struct usbnet *, int);
 extern void usbnet_link_change(struct usbnet *, bool, bool);
 
+extern int usbnet_status_start(struct usbnet *dev, gfp_t mem_flags);
+extern void usbnet_status_stop(struct usbnet *dev);
+
 #endif /* __LINUX_USB_USBNET_H */
diff --git a/include/linux/wait.h b/include/linux/wait.h
index 7cb64d4..ac38be2 100644
--- a/include/linux/wait.h
+++ b/include/linux/wait.h
@@ -330,6 +330,92 @@
 	__ret;								\
 })
 
+#define __wait_event_hrtimeout(wq, condition, timeout, state)		\
+({									\
+	int __ret = 0;							\
+	DEFINE_WAIT(__wait);						\
+	struct hrtimer_sleeper __t;					\
+									\
+	hrtimer_init_on_stack(&__t.timer, CLOCK_MONOTONIC,		\
+			      HRTIMER_MODE_REL);			\
+	hrtimer_init_sleeper(&__t, current);				\
+	if ((timeout).tv64 != KTIME_MAX)				\
+		hrtimer_start_range_ns(&__t.timer, timeout,		\
+				       current->timer_slack_ns,		\
+				       HRTIMER_MODE_REL);		\
+									\
+	for (;;) {							\
+		prepare_to_wait(&wq, &__wait, state);			\
+		if (condition)						\
+			break;						\
+		if (state == TASK_INTERRUPTIBLE &&			\
+		    signal_pending(current)) {				\
+			__ret = -ERESTARTSYS;				\
+			break;						\
+		}							\
+		if (!__t.task) {					\
+			__ret = -ETIME;					\
+			break;						\
+		}							\
+		schedule();						\
+	}								\
+									\
+	hrtimer_cancel(&__t.timer);					\
+	destroy_hrtimer_on_stack(&__t.timer);				\
+	finish_wait(&wq, &__wait);					\
+	__ret;								\
+})
+
+/**
+ * wait_event_hrtimeout - sleep until a condition gets true or a timeout elapses
+ * @wq: the waitqueue to wait on
+ * @condition: a C expression for the event to wait for
+ * @timeout: timeout, as a ktime_t
+ *
+ * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
+ * @condition evaluates to true or a signal is received.
+ * The @condition is checked each time the waitqueue @wq is woken up.
+ *
+ * wake_up() has to be called after changing any variable that could
+ * change the result of the wait condition.
+ *
+ * The function returns 0 if @condition became true, or -ETIME if the timeout
+ * elapsed.
+ */
+#define wait_event_hrtimeout(wq, condition, timeout)			\
+({									\
+	int __ret = 0;							\
+	if (!(condition))						\
+		__ret = __wait_event_hrtimeout(wq, condition, timeout,	\
+					       TASK_UNINTERRUPTIBLE);	\
+	__ret;								\
+})
+
+/**
+ * wait_event_interruptible_hrtimeout - sleep until a condition gets true or a timeout elapses
+ * @wq: the waitqueue to wait on
+ * @condition: a C expression for the event to wait for
+ * @timeout: timeout, as a ktime_t
+ *
+ * The process is put to sleep (TASK_INTERRUPTIBLE) until the
+ * @condition evaluates to true or a signal is received.
+ * The @condition is checked each time the waitqueue @wq is woken up.
+ *
+ * wake_up() has to be called after changing any variable that could
+ * change the result of the wait condition.
+ *
+ * The function returns 0 if @condition became true, -ERESTARTSYS if it was
+ * interrupted by a signal, or -ETIME if the timeout elapsed.
+ */
+#define wait_event_interruptible_hrtimeout(wq, condition, timeout)	\
+({									\
+	long __ret = 0;							\
+	if (!(condition))						\
+		__ret = __wait_event_hrtimeout(wq, condition, timeout,	\
+					       TASK_INTERRUPTIBLE);	\
+	__ret;								\
+})
+
 #define __wait_event_interruptible_exclusive(wq, condition, ret)	\
 do {									\
 	DEFINE_WAIT(__wait);						\
diff --git a/include/linux/writeback.h b/include/linux/writeback.h
index 9a9367c..579a500 100644
--- a/include/linux/writeback.h
+++ b/include/linux/writeback.h
@@ -5,6 +5,7 @@
 #define WRITEBACK_H
 
 #include <linux/sched.h>
+#include <linux/workqueue.h>
 #include <linux/fs.h>
 
 DECLARE_PER_CPU(int, dirty_throttle_leaks);
diff --git a/include/scsi/libsas.h b/include/scsi/libsas.h
index ef937b5..e2c1e66 100644
--- a/include/scsi/libsas.h
+++ b/include/scsi/libsas.h
@@ -118,7 +118,7 @@
 
 	enum ex_phy_state phy_state;
 
-	enum sas_dev_type attached_dev_type;
+	enum sas_device_type attached_dev_type;
 	enum sas_linkrate linkrate;
 
 	u8   attached_sata_host:1;
@@ -195,7 +195,7 @@
 
 struct domain_device {
 	spinlock_t done_lock;
-        enum sas_dev_type dev_type;
+	enum sas_device_type dev_type;
 
         enum sas_linkrate linkrate;
         enum sas_linkrate min_linkrate;
diff --git a/include/scsi/osd_protocol.h b/include/scsi/osd_protocol.h
index a6026da..25ac628 100644
--- a/include/scsi/osd_protocol.h
+++ b/include/scsi/osd_protocol.h
@@ -107,7 +107,7 @@
  *		int exponent: 04;
  *	}
  */
-typedef __be32 __bitwise osd_cdb_offset;
+typedef __be32 osd_cdb_offset;
 
 enum {
 	OSD_OFFSET_UNUSED = 0xFFFFFFFF,
diff --git a/include/scsi/sas.h b/include/scsi/sas.h
index be3eb0b..0d2607d 100644
--- a/include/scsi/sas.h
+++ b/include/scsi/sas.h
@@ -90,16 +90,18 @@
 };
 
 /* See sas_discover.c if you plan on changing these */
-enum sas_dev_type {
-	NO_DEVICE   = 0,	  /* protocol */
-	SAS_END_DEV = 1,	  /* protocol */
-	EDGE_DEV    = 2,	  /* protocol */
-	FANOUT_DEV  = 3,	  /* protocol */
-	SAS_HA      = 4,
-	SATA_DEV    = 5,
-	SATA_PM     = 7,
-	SATA_PM_PORT= 8,
-	SATA_PENDING  = 9,
+enum sas_device_type {
+	/* these are SAS protocol defined (attached device type field) */
+	SAS_PHY_UNUSED = 0,
+	SAS_END_DEVICE = 1,
+	SAS_EDGE_EXPANDER_DEVICE = 2,
+	SAS_FANOUT_EXPANDER_DEVICE = 3,
+	/* these are internal to libsas */
+	SAS_HA = 4,
+	SAS_SATA_DEV = 5,
+	SAS_SATA_PM = 7,
+	SAS_SATA_PM_PORT = 8,
+	SAS_SATA_PENDING = 9,
 };
 
 enum sas_protocol {
diff --git a/include/scsi/sas_ata.h b/include/scsi/sas_ata.h
index ff71a56..00f41ae 100644
--- a/include/scsi/sas_ata.h
+++ b/include/scsi/sas_ata.h
@@ -32,8 +32,8 @@
 
 static inline int dev_is_sata(struct domain_device *dev)
 {
-	return dev->dev_type == SATA_DEV || dev->dev_type == SATA_PM ||
-	       dev->dev_type == SATA_PM_PORT || dev->dev_type == SATA_PENDING;
+	return dev->dev_type == SAS_SATA_DEV || dev->dev_type == SAS_SATA_PM ||
+	       dev->dev_type == SAS_SATA_PM_PORT || dev->dev_type == SAS_SATA_PENDING;
 }
 
 int sas_get_ata_info(struct domain_device *dev, struct ex_phy *phy);
diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
index a7f9cba..cc64587 100644
--- a/include/scsi/scsi_device.h
+++ b/include/scsi/scsi_device.h
@@ -394,10 +394,18 @@
 			int data_direction, void *buffer, unsigned bufflen,
 			unsigned char *sense, int timeout, int retries,
 			int flag, int *resid);
-extern int scsi_execute_req(struct scsi_device *sdev, const unsigned char *cmd,
-			    int data_direction, void *buffer, unsigned bufflen,
-			    struct scsi_sense_hdr *, int timeout, int retries,
-			    int *resid);
+extern int scsi_execute_req_flags(struct scsi_device *sdev,
+	const unsigned char *cmd, int data_direction, void *buffer,
+	unsigned bufflen, struct scsi_sense_hdr *sshdr, int timeout,
+	int retries, int *resid, int flags);
+static inline int scsi_execute_req(struct scsi_device *sdev,
+	const unsigned char *cmd, int data_direction, void *buffer,
+	unsigned bufflen, struct scsi_sense_hdr *sshdr, int timeout,
+	int retries, int *resid)
+{
+	return scsi_execute_req_flags(sdev, cmd, data_direction, buffer,
+		bufflen, sshdr, timeout, retries, resid, 0);
+}
 extern void sdev_disable_disk_events(struct scsi_device *sdev);
 extern void sdev_enable_disk_events(struct scsi_device *sdev);
 
diff --git a/include/scsi/scsi_transport_iscsi.h b/include/scsi/scsi_transport_iscsi.h
index 4a58cca..d0f1602 100644
--- a/include/scsi/scsi_transport_iscsi.h
+++ b/include/scsi/scsi_transport_iscsi.h
@@ -471,14 +471,10 @@
 extern void iscsi_destroy_all_flashnode(struct Scsi_Host *shost);
 extern int iscsi_flashnode_bus_match(struct device *dev,
 				     struct device_driver *drv);
-extern int iscsi_is_flashnode_conn_dev(struct device *dev, void *data);
-
 extern struct device *
 iscsi_find_flashnode_sess(struct Scsi_Host *shost, void *data,
 			  int (*fn)(struct device *dev, void *data));
-
 extern struct device *
-iscsi_find_flashnode_conn(struct iscsi_bus_flash_session *fnode_sess,
-			  void *data,
-			  int (*fn)(struct device *dev, void *data));
+iscsi_find_flashnode_conn(struct iscsi_bus_flash_session *fnode_sess);
+
 #endif
diff --git a/include/scsi/scsi_transport_sas.h b/include/scsi/scsi_transport_sas.h
index 9b8e088..0bd71e2 100644
--- a/include/scsi/scsi_transport_sas.h
+++ b/include/scsi/scsi_transport_sas.h
@@ -10,13 +10,6 @@
 struct sas_rphy;
 struct request;
 
-enum sas_device_type {
-	SAS_PHY_UNUSED = 0,
-	SAS_END_DEVICE = 1,
-	SAS_EDGE_EXPANDER_DEVICE = 2,
-	SAS_FANOUT_EXPANDER_DEVICE = 3,
-};
-
 static inline int sas_protocol_ata(enum sas_protocol proto)
 {
 	return ((proto & SAS_PROTOCOL_SATA) ||
diff --git a/include/sound/tlv.h b/include/sound/tlv.h
index 28c65e1..e11e179 100644
--- a/include/sound/tlv.h
+++ b/include/sound/tlv.h
@@ -74,7 +74,11 @@
 #define DECLARE_TLV_DB_LINEAR(name, min_dB, max_dB)	\
 	unsigned int name[] = { TLV_DB_LINEAR_ITEM(min_dB, max_dB) }
 
-/* dB range container */
+/* dB range container:
+ * Items in dB range container must be ordered by their values and by their
+ * dB values. This implies that larger values must correspond with larger
+ * dB values (which is also required for all other mixer controls).
+ */
 /* Each item is: <min> <max> <TLV> */
 #define TLV_DB_RANGE_ITEM(...) \
 	TLV_ITEM(SNDRV_CTL_TLVT_DB_RANGE, __VA_ARGS__)
diff --git a/include/trace/events/bcache.h b/include/trace/events/bcache.h
new file mode 100644
index 0000000..3cc5a0b
--- /dev/null
+++ b/include/trace/events/bcache.h
@@ -0,0 +1,271 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM bcache
+
+#if !defined(_TRACE_BCACHE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_BCACHE_H
+
+#include <linux/tracepoint.h>
+
+struct search;
+
+DECLARE_EVENT_CLASS(bcache_request,
+
+	TP_PROTO(struct search *s, struct bio *bio),
+
+	TP_ARGS(s, bio),
+
+	TP_STRUCT__entry(
+		__field(dev_t,		dev			)
+		__field(unsigned int,	orig_major		)
+		__field(unsigned int,	orig_minor		)
+		__field(sector_t,	sector			)
+		__field(dev_t,		orig_sector		)
+		__field(unsigned int,	nr_sector		)
+		__array(char,		rwbs,	6		)
+		__array(char,		comm,	TASK_COMM_LEN	)
+	),
+
+	TP_fast_assign(
+		__entry->dev		= bio->bi_bdev->bd_dev;
+		__entry->orig_major	= s->d->disk->major;
+		__entry->orig_minor	= s->d->disk->first_minor;
+		__entry->sector		= bio->bi_sector;
+		__entry->orig_sector	= bio->bi_sector - 16;
+		__entry->nr_sector	= bio->bi_size >> 9;
+		blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
+		memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
+	),
+
+	TP_printk("%d,%d %s %llu + %u [%s] (from %d,%d @ %llu)",
+		  MAJOR(__entry->dev), MINOR(__entry->dev),
+		  __entry->rwbs,
+		  (unsigned long long)__entry->sector,
+		  __entry->nr_sector, __entry->comm,
+		  __entry->orig_major, __entry->orig_minor,
+		  (unsigned long long)__entry->orig_sector)
+);
+
+DEFINE_EVENT(bcache_request, bcache_request_start,
+
+	TP_PROTO(struct search *s, struct bio *bio),
+
+	TP_ARGS(s, bio)
+);
+
+DEFINE_EVENT(bcache_request, bcache_request_end,
+
+	TP_PROTO(struct search *s, struct bio *bio),
+
+	TP_ARGS(s, bio)
+);
+
+DECLARE_EVENT_CLASS(bcache_bio,
+
+	TP_PROTO(struct bio *bio),
+
+	TP_ARGS(bio),
+
+	TP_STRUCT__entry(
+		__field(dev_t,		dev			)
+		__field(sector_t,	sector			)
+		__field(unsigned int,	nr_sector		)
+		__array(char,		rwbs,	6		)
+		__array(char,		comm,	TASK_COMM_LEN	)
+	),
+
+	TP_fast_assign(
+		__entry->dev		= bio->bi_bdev->bd_dev;
+		__entry->sector		= bio->bi_sector;
+		__entry->nr_sector	= bio->bi_size >> 9;
+		blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
+		memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
+	),
+
+	TP_printk("%d,%d  %s %llu + %u [%s]",
+		  MAJOR(__entry->dev), MINOR(__entry->dev),
+		  __entry->rwbs,
+		  (unsigned long long)__entry->sector,
+		  __entry->nr_sector, __entry->comm)
+);
+
+
+DEFINE_EVENT(bcache_bio, bcache_passthrough,
+
+	TP_PROTO(struct bio *bio),
+
+	TP_ARGS(bio)
+);
+
+DEFINE_EVENT(bcache_bio, bcache_cache_hit,
+
+	TP_PROTO(struct bio *bio),
+
+	TP_ARGS(bio)
+);
+
+DEFINE_EVENT(bcache_bio, bcache_cache_miss,
+
+	TP_PROTO(struct bio *bio),
+
+	TP_ARGS(bio)
+);
+
+DEFINE_EVENT(bcache_bio, bcache_read_retry,
+
+	TP_PROTO(struct bio *bio),
+
+	TP_ARGS(bio)
+);
+
+DEFINE_EVENT(bcache_bio, bcache_writethrough,
+
+	TP_PROTO(struct bio *bio),
+
+	TP_ARGS(bio)
+);
+
+DEFINE_EVENT(bcache_bio, bcache_writeback,
+
+	TP_PROTO(struct bio *bio),
+
+	TP_ARGS(bio)
+);
+
+DEFINE_EVENT(bcache_bio, bcache_write_skip,
+
+	TP_PROTO(struct bio *bio),
+
+	TP_ARGS(bio)
+);
+
+DEFINE_EVENT(bcache_bio, bcache_btree_read,
+
+	TP_PROTO(struct bio *bio),
+
+	TP_ARGS(bio)
+);
+
+DEFINE_EVENT(bcache_bio, bcache_btree_write,
+
+	TP_PROTO(struct bio *bio),
+
+	TP_ARGS(bio)
+);
+
+DEFINE_EVENT(bcache_bio, bcache_write_dirty,
+
+	TP_PROTO(struct bio *bio),
+
+	TP_ARGS(bio)
+);
+
+DEFINE_EVENT(bcache_bio, bcache_read_dirty,
+
+	TP_PROTO(struct bio *bio),
+
+	TP_ARGS(bio)
+);
+
+DEFINE_EVENT(bcache_bio, bcache_write_moving,
+
+	TP_PROTO(struct bio *bio),
+
+	TP_ARGS(bio)
+);
+
+DEFINE_EVENT(bcache_bio, bcache_read_moving,
+
+	TP_PROTO(struct bio *bio),
+
+	TP_ARGS(bio)
+);
+
+DEFINE_EVENT(bcache_bio, bcache_journal_write,
+
+	TP_PROTO(struct bio *bio),
+
+	TP_ARGS(bio)
+);
+
+DECLARE_EVENT_CLASS(bcache_cache_bio,
+
+	TP_PROTO(struct bio *bio,
+		 sector_t orig_sector,
+		 struct block_device* orig_bdev),
+
+	TP_ARGS(bio, orig_sector, orig_bdev),
+
+	TP_STRUCT__entry(
+		__field(dev_t,		dev			)
+		__field(dev_t,		orig_dev		)
+		__field(sector_t,	sector			)
+		__field(sector_t,	orig_sector		)
+		__field(unsigned int,	nr_sector		)
+		__array(char,		rwbs,	6		)
+		__array(char,		comm,	TASK_COMM_LEN	)
+	),
+
+	TP_fast_assign(
+		__entry->dev		= bio->bi_bdev->bd_dev;
+		__entry->orig_dev	= orig_bdev->bd_dev;
+		__entry->sector		= bio->bi_sector;
+		__entry->orig_sector	= orig_sector;
+		__entry->nr_sector	= bio->bi_size >> 9;
+		blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
+		memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
+	),
+
+	TP_printk("%d,%d  %s %llu + %u [%s] (from %d,%d %llu)",
+		  MAJOR(__entry->dev), MINOR(__entry->dev),
+		  __entry->rwbs,
+		  (unsigned long long)__entry->sector,
+		  __entry->nr_sector, __entry->comm,
+		  MAJOR(__entry->orig_dev), MINOR(__entry->orig_dev),
+		  (unsigned long long)__entry->orig_sector)
+);
+
+DEFINE_EVENT(bcache_cache_bio, bcache_cache_insert,
+
+	TP_PROTO(struct bio *bio,
+		 sector_t orig_sector,
+		 struct block_device *orig_bdev),
+
+	TP_ARGS(bio, orig_sector, orig_bdev)
+);
+
+DECLARE_EVENT_CLASS(bcache_gc,
+
+	TP_PROTO(uint8_t *uuid),
+
+	TP_ARGS(uuid),
+
+	TP_STRUCT__entry(
+		__field(uint8_t *,	uuid)
+	),
+
+	TP_fast_assign(
+		__entry->uuid		= uuid;
+	),
+
+	TP_printk("%pU", __entry->uuid)
+);
+
+
+DEFINE_EVENT(bcache_gc, bcache_gc_start,
+
+	     TP_PROTO(uint8_t *uuid),
+
+	     TP_ARGS(uuid)
+);
+
+DEFINE_EVENT(bcache_gc, bcache_gc_end,
+
+	     TP_PROTO(uint8_t *uuid),
+
+	     TP_ARGS(uuid)
+);
+
+#endif /* _TRACE_BCACHE_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/block.h b/include/trace/events/block.h
index 9c14673..60ae7c3 100644
--- a/include/trace/events/block.h
+++ b/include/trace/events/block.h
@@ -244,7 +244,7 @@
 		__entry->dev		= bio->bi_bdev ?
 					  bio->bi_bdev->bd_dev : 0;
 		__entry->sector		= bio->bi_sector;
-		__entry->nr_sector	= bio->bi_size >> 9;
+		__entry->nr_sector	= bio_sectors(bio);
 		blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
 		memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
 	),
@@ -281,7 +281,7 @@
 	TP_fast_assign(
 		__entry->dev		= bio->bi_bdev->bd_dev;
 		__entry->sector		= bio->bi_sector;
-		__entry->nr_sector	= bio->bi_size >> 9;
+		__entry->nr_sector	= bio_sectors(bio);
 		__entry->error		= error;
 		blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
 	),
@@ -309,7 +309,7 @@
 	TP_fast_assign(
 		__entry->dev		= bio->bi_bdev->bd_dev;
 		__entry->sector		= bio->bi_sector;
-		__entry->nr_sector	= bio->bi_size >> 9;
+		__entry->nr_sector	= bio_sectors(bio);
 		blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
 		memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
 	),
@@ -376,7 +376,7 @@
 	TP_fast_assign(
 		__entry->dev		= bio->bi_bdev->bd_dev;
 		__entry->sector		= bio->bi_sector;
-		__entry->nr_sector	= bio->bi_size >> 9;
+		__entry->nr_sector	= bio_sectors(bio);
 		blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
 		memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
 	),
@@ -404,7 +404,7 @@
 	TP_fast_assign(
 		__entry->dev		= bio ? bio->bi_bdev->bd_dev : 0;
 		__entry->sector		= bio ? bio->bi_sector : 0;
-		__entry->nr_sector	= bio ? bio->bi_size >> 9 : 0;
+		__entry->nr_sector	= bio ? bio_sectors(bio) : 0;
 		blk_fill_rwbs(__entry->rwbs,
 			      bio ? bio->bi_rw : 0, __entry->nr_sector);
 		memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
@@ -580,7 +580,7 @@
 	TP_fast_assign(
 		__entry->dev		= bio->bi_bdev->bd_dev;
 		__entry->sector		= bio->bi_sector;
-		__entry->nr_sector	= bio->bi_size >> 9;
+		__entry->nr_sector	= bio_sectors(bio);
 		__entry->old_dev	= dev;
 		__entry->old_sector	= from;
 		blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
diff --git a/include/trace/events/f2fs.h b/include/trace/events/f2fs.h
new file mode 100644
index 0000000..52ae548
--- /dev/null
+++ b/include/trace/events/f2fs.h
@@ -0,0 +1,682 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM f2fs
+
+#if !defined(_TRACE_F2FS_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_F2FS_H
+
+#include <linux/tracepoint.h>
+
+#define show_dev(entry)		MAJOR(entry->dev), MINOR(entry->dev)
+#define show_dev_ino(entry)	show_dev(entry), (unsigned long)entry->ino
+
+#define show_block_type(type)						\
+	__print_symbolic(type,						\
+		{ NODE,		"NODE" },				\
+		{ DATA,		"DATA" },				\
+		{ META,		"META" },				\
+		{ META_FLUSH,	"META_FLUSH" })
+
+#define show_bio_type(type)						\
+	__print_symbolic(type,						\
+		{ READ, 	"READ" },				\
+		{ READA, 	"READAHEAD" },				\
+		{ READ_SYNC, 	"READ_SYNC" },				\
+		{ WRITE, 	"WRITE" },				\
+		{ WRITE_SYNC, 	"WRITE_SYNC" },				\
+		{ WRITE_FLUSH,	"WRITE_FLUSH" },			\
+		{ WRITE_FUA, 	"WRITE_FUA" })
+
+#define show_data_type(type)						\
+	__print_symbolic(type,						\
+		{ CURSEG_HOT_DATA, 	"Hot DATA" },			\
+		{ CURSEG_WARM_DATA, 	"Warm DATA" },			\
+		{ CURSEG_COLD_DATA, 	"Cold DATA" },			\
+		{ CURSEG_HOT_NODE, 	"Hot NODE" },			\
+		{ CURSEG_WARM_NODE, 	"Warm NODE" },			\
+		{ CURSEG_COLD_NODE, 	"Cold NODE" },			\
+		{ NO_CHECK_TYPE, 	"No TYPE" })
+
+#define show_gc_type(type)						\
+	__print_symbolic(type,						\
+		{ FG_GC,	"Foreground GC" },			\
+		{ BG_GC,	"Background GC" })
+
+#define show_alloc_mode(type)						\
+	__print_symbolic(type,						\
+		{ LFS,	"LFS-mode" },					\
+		{ SSR,	"SSR-mode" })
+
+#define show_victim_policy(type)					\
+	__print_symbolic(type,						\
+		{ GC_GREEDY,	"Greedy" },				\
+		{ GC_CB,	"Cost-Benefit" })
+
+struct victim_sel_policy;
+
+DECLARE_EVENT_CLASS(f2fs__inode,
+
+	TP_PROTO(struct inode *inode),
+
+	TP_ARGS(inode),
+
+	TP_STRUCT__entry(
+		__field(dev_t,	dev)
+		__field(ino_t,	ino)
+		__field(ino_t,	pino)
+		__field(umode_t, mode)
+		__field(loff_t,	size)
+		__field(unsigned int, nlink)
+		__field(blkcnt_t, blocks)
+		__field(__u8,	advise)
+	),
+
+	TP_fast_assign(
+		__entry->dev	= inode->i_sb->s_dev;
+		__entry->ino	= inode->i_ino;
+		__entry->pino	= F2FS_I(inode)->i_pino;
+		__entry->mode	= inode->i_mode;
+		__entry->nlink	= inode->i_nlink;
+		__entry->size	= inode->i_size;
+		__entry->blocks	= inode->i_blocks;
+		__entry->advise	= F2FS_I(inode)->i_advise;
+	),
+
+	TP_printk("dev = (%d,%d), ino = %lu, pino = %lu, i_mode = 0x%hx, "
+		"i_size = %lld, i_nlink = %u, i_blocks = %llu, i_advise = 0x%x",
+		show_dev_ino(__entry),
+		(unsigned long)__entry->pino,
+		__entry->mode,
+		__entry->size,
+		(unsigned int)__entry->nlink,
+		(unsigned long long)__entry->blocks,
+		(unsigned char)__entry->advise)
+);
+
+DECLARE_EVENT_CLASS(f2fs__inode_exit,
+
+	TP_PROTO(struct inode *inode, int ret),
+
+	TP_ARGS(inode, ret),
+
+	TP_STRUCT__entry(
+		__field(dev_t,	dev)
+		__field(ino_t,	ino)
+		__field(int,	ret)
+	),
+
+	TP_fast_assign(
+		__entry->dev	= inode->i_sb->s_dev;
+		__entry->ino	= inode->i_ino;
+		__entry->ret	= ret;
+	),
+
+	TP_printk("dev = (%d,%d), ino = %lu, ret = %d",
+		show_dev_ino(__entry),
+		__entry->ret)
+);
+
+DEFINE_EVENT(f2fs__inode, f2fs_sync_file_enter,
+
+	TP_PROTO(struct inode *inode),
+
+	TP_ARGS(inode)
+);
+
+TRACE_EVENT(f2fs_sync_file_exit,
+
+	TP_PROTO(struct inode *inode, bool need_cp, int datasync, int ret),
+
+	TP_ARGS(inode, need_cp, datasync, ret),
+
+	TP_STRUCT__entry(
+		__field(dev_t,	dev)
+		__field(ino_t,	ino)
+		__field(bool,	need_cp)
+		__field(int,	datasync)
+		__field(int,	ret)
+	),
+
+	TP_fast_assign(
+		__entry->dev		= inode->i_sb->s_dev;
+		__entry->ino		= inode->i_ino;
+		__entry->need_cp	= need_cp;
+		__entry->datasync	= datasync;
+		__entry->ret		= ret;
+	),
+
+	TP_printk("dev = (%d,%d), ino = %lu, checkpoint is %s, "
+		"datasync = %d, ret = %d",
+		show_dev_ino(__entry),
+		__entry->need_cp ? "needed" : "not needed",
+		__entry->datasync,
+		__entry->ret)
+);
+
+TRACE_EVENT(f2fs_sync_fs,
+
+	TP_PROTO(struct super_block *sb, int wait),
+
+	TP_ARGS(sb, wait),
+
+	TP_STRUCT__entry(
+		__field(dev_t,	dev)
+		__field(int,	dirty)
+		__field(int,	wait)
+	),
+
+	TP_fast_assign(
+		__entry->dev	= sb->s_dev;
+		__entry->dirty	= F2FS_SB(sb)->s_dirty;
+		__entry->wait	= wait;
+	),
+
+	TP_printk("dev = (%d,%d), superblock is %s, wait = %d",
+		show_dev(__entry),
+		__entry->dirty ? "dirty" : "not dirty",
+		__entry->wait)
+);
+
+DEFINE_EVENT(f2fs__inode, f2fs_iget,
+
+	TP_PROTO(struct inode *inode),
+
+	TP_ARGS(inode)
+);
+
+DEFINE_EVENT(f2fs__inode_exit, f2fs_iget_exit,
+
+	TP_PROTO(struct inode *inode, int ret),
+
+	TP_ARGS(inode, ret)
+);
+
+DEFINE_EVENT(f2fs__inode, f2fs_evict_inode,
+
+	TP_PROTO(struct inode *inode),
+
+	TP_ARGS(inode)
+);
+
+DEFINE_EVENT(f2fs__inode_exit, f2fs_new_inode,
+
+	TP_PROTO(struct inode *inode, int ret),
+
+	TP_ARGS(inode, ret)
+);
+
+TRACE_EVENT(f2fs_unlink_enter,
+
+	TP_PROTO(struct inode *dir, struct dentry *dentry),
+
+	TP_ARGS(dir, dentry),
+
+	TP_STRUCT__entry(
+		__field(dev_t,	dev)
+		__field(ino_t,	ino)
+		__field(loff_t,	size)
+		__field(blkcnt_t, blocks)
+		__field(const char *,	name)
+	),
+
+	TP_fast_assign(
+		__entry->dev	= dir->i_sb->s_dev;
+		__entry->ino	= dir->i_ino;
+		__entry->size	= dir->i_size;
+		__entry->blocks	= dir->i_blocks;
+		__entry->name	= dentry->d_name.name;
+	),
+
+	TP_printk("dev = (%d,%d), dir ino = %lu, i_size = %lld, "
+		"i_blocks = %llu, name = %s",
+		show_dev_ino(__entry),
+		__entry->size,
+		(unsigned long long)__entry->blocks,
+		__entry->name)
+);
+
+DEFINE_EVENT(f2fs__inode_exit, f2fs_unlink_exit,
+
+	TP_PROTO(struct inode *inode, int ret),
+
+	TP_ARGS(inode, ret)
+);
+
+DEFINE_EVENT(f2fs__inode, f2fs_truncate,
+
+	TP_PROTO(struct inode *inode),
+
+	TP_ARGS(inode)
+);
+
+TRACE_EVENT(f2fs_truncate_data_blocks_range,
+
+	TP_PROTO(struct inode *inode, nid_t nid, unsigned int ofs, int free),
+
+	TP_ARGS(inode, nid,  ofs, free),
+
+	TP_STRUCT__entry(
+		__field(dev_t,	dev)
+		__field(ino_t,	ino)
+		__field(nid_t,	nid)
+		__field(unsigned int,	ofs)
+		__field(int,	free)
+	),
+
+	TP_fast_assign(
+		__entry->dev	= inode->i_sb->s_dev;
+		__entry->ino	= inode->i_ino;
+		__entry->nid	= nid;
+		__entry->ofs	= ofs;
+		__entry->free	= free;
+	),
+
+	TP_printk("dev = (%d,%d), ino = %lu, nid = %u, offset = %u, freed = %d",
+		show_dev_ino(__entry),
+		(unsigned int)__entry->nid,
+		__entry->ofs,
+		__entry->free)
+);
+
+DECLARE_EVENT_CLASS(f2fs__truncate_op,
+
+	TP_PROTO(struct inode *inode, u64 from),
+
+	TP_ARGS(inode, from),
+
+	TP_STRUCT__entry(
+		__field(dev_t,	dev)
+		__field(ino_t,	ino)
+		__field(loff_t,	size)
+		__field(blkcnt_t, blocks)
+		__field(u64,	from)
+	),
+
+	TP_fast_assign(
+		__entry->dev	= inode->i_sb->s_dev;
+		__entry->ino	= inode->i_ino;
+		__entry->size	= inode->i_size;
+		__entry->blocks	= inode->i_blocks;
+		__entry->from	= from;
+	),
+
+	TP_printk("dev = (%d,%d), ino = %lu, i_size = %lld, i_blocks = %llu, "
+		"start file offset = %llu",
+		show_dev_ino(__entry),
+		__entry->size,
+		(unsigned long long)__entry->blocks,
+		(unsigned long long)__entry->from)
+);
+
+DEFINE_EVENT(f2fs__truncate_op, f2fs_truncate_blocks_enter,
+
+	TP_PROTO(struct inode *inode, u64 from),
+
+	TP_ARGS(inode, from)
+);
+
+DEFINE_EVENT(f2fs__inode_exit, f2fs_truncate_blocks_exit,
+
+	TP_PROTO(struct inode *inode, int ret),
+
+	TP_ARGS(inode, ret)
+);
+
+DEFINE_EVENT(f2fs__truncate_op, f2fs_truncate_inode_blocks_enter,
+
+	TP_PROTO(struct inode *inode, u64 from),
+
+	TP_ARGS(inode, from)
+);
+
+DEFINE_EVENT(f2fs__inode_exit, f2fs_truncate_inode_blocks_exit,
+
+	TP_PROTO(struct inode *inode, int ret),
+
+	TP_ARGS(inode, ret)
+);
+
+DECLARE_EVENT_CLASS(f2fs__truncate_node,
+
+	TP_PROTO(struct inode *inode, nid_t nid, block_t blk_addr),
+
+	TP_ARGS(inode, nid, blk_addr),
+
+	TP_STRUCT__entry(
+		__field(dev_t,	dev)
+		__field(ino_t,	ino)
+		__field(nid_t,	nid)
+		__field(block_t,	blk_addr)
+	),
+
+	TP_fast_assign(
+		__entry->dev		= inode->i_sb->s_dev;
+		__entry->ino		= inode->i_ino;
+		__entry->nid		= nid;
+		__entry->blk_addr	= blk_addr;
+	),
+
+	TP_printk("dev = (%d,%d), ino = %lu, nid = %u, block_address = 0x%llx",
+		show_dev_ino(__entry),
+		(unsigned int)__entry->nid,
+		(unsigned long long)__entry->blk_addr)
+);
+
+DEFINE_EVENT(f2fs__truncate_node, f2fs_truncate_nodes_enter,
+
+	TP_PROTO(struct inode *inode, nid_t nid, block_t blk_addr),
+
+	TP_ARGS(inode, nid, blk_addr)
+);
+
+DEFINE_EVENT(f2fs__inode_exit, f2fs_truncate_nodes_exit,
+
+	TP_PROTO(struct inode *inode, int ret),
+
+	TP_ARGS(inode, ret)
+);
+
+DEFINE_EVENT(f2fs__truncate_node, f2fs_truncate_node,
+
+	TP_PROTO(struct inode *inode, nid_t nid, block_t blk_addr),
+
+	TP_ARGS(inode, nid, blk_addr)
+);
+
+TRACE_EVENT(f2fs_truncate_partial_nodes,
+
+	TP_PROTO(struct inode *inode, nid_t nid[], int depth, int err),
+
+	TP_ARGS(inode, nid, depth, err),
+
+	TP_STRUCT__entry(
+		__field(dev_t,	dev)
+		__field(ino_t,	ino)
+		__field(nid_t,	nid[3])
+		__field(int,	depth)
+		__field(int,	err)
+	),
+
+	TP_fast_assign(
+		__entry->dev	= inode->i_sb->s_dev;
+		__entry->ino	= inode->i_ino;
+		__entry->nid[0]	= nid[0];
+		__entry->nid[1]	= nid[1];
+		__entry->nid[2]	= nid[2];
+		__entry->depth	= depth;
+		__entry->err	= err;
+	),
+
+	TP_printk("dev = (%d,%d), ino = %lu, "
+		"nid[0] = %u, nid[1] = %u, nid[2] = %u, depth = %d, err = %d",
+		show_dev_ino(__entry),
+		(unsigned int)__entry->nid[0],
+		(unsigned int)__entry->nid[1],
+		(unsigned int)__entry->nid[2],
+		__entry->depth,
+		__entry->err)
+);
+
+TRACE_EVENT_CONDITION(f2fs_readpage,
+
+	TP_PROTO(struct page *page, sector_t blkaddr, int type),
+
+	TP_ARGS(page, blkaddr, type),
+
+	TP_CONDITION(page->mapping),
+
+	TP_STRUCT__entry(
+		__field(dev_t,	dev)
+		__field(ino_t,	ino)
+		__field(pgoff_t,	index)
+		__field(sector_t,	blkaddr)
+		__field(int,	type)
+	),
+
+	TP_fast_assign(
+		__entry->dev		= page->mapping->host->i_sb->s_dev;
+		__entry->ino		= page->mapping->host->i_ino;
+		__entry->index		= page->index;
+		__entry->blkaddr	= blkaddr;
+		__entry->type		= type;
+	),
+
+	TP_printk("dev = (%d,%d), ino = %lu, page_index = 0x%lx, "
+		"blkaddr = 0x%llx, bio_type = %s",
+		show_dev_ino(__entry),
+		(unsigned long)__entry->index,
+		(unsigned long long)__entry->blkaddr,
+		show_bio_type(__entry->type))
+);
+
+TRACE_EVENT(f2fs_get_data_block,
+	TP_PROTO(struct inode *inode, sector_t iblock,
+				struct buffer_head *bh, int ret),
+
+	TP_ARGS(inode, iblock, bh, ret),
+
+	TP_STRUCT__entry(
+		__field(dev_t,	dev)
+		__field(ino_t,	ino)
+		__field(sector_t,	iblock)
+		__field(sector_t,	bh_start)
+		__field(size_t,	bh_size)
+		__field(int,	ret)
+	),
+
+	TP_fast_assign(
+		__entry->dev		= inode->i_sb->s_dev;
+		__entry->ino		= inode->i_ino;
+		__entry->iblock		= iblock;
+		__entry->bh_start	= bh->b_blocknr;
+		__entry->bh_size	= bh->b_size;
+		__entry->ret		= ret;
+	),
+
+	TP_printk("dev = (%d,%d), ino = %lu, file offset = %llu, "
+		"start blkaddr = 0x%llx, len = 0x%llx bytes, err = %d",
+		show_dev_ino(__entry),
+		(unsigned long long)__entry->iblock,
+		(unsigned long long)__entry->bh_start,
+		(unsigned long long)__entry->bh_size,
+		__entry->ret)
+);
+
+TRACE_EVENT(f2fs_get_victim,
+
+	TP_PROTO(struct super_block *sb, int type, int gc_type,
+			struct victim_sel_policy *p, unsigned int pre_victim,
+			unsigned int prefree, unsigned int free),
+
+	TP_ARGS(sb, type, gc_type, p, pre_victim, prefree, free),
+
+	TP_STRUCT__entry(
+		__field(dev_t,	dev)
+		__field(int,	type)
+		__field(int,	gc_type)
+		__field(int,	alloc_mode)
+		__field(int,	gc_mode)
+		__field(unsigned int,	victim)
+		__field(unsigned int,	ofs_unit)
+		__field(unsigned int,	pre_victim)
+		__field(unsigned int,	prefree)
+		__field(unsigned int,	free)
+	),
+
+	TP_fast_assign(
+		__entry->dev		= sb->s_dev;
+		__entry->type		= type;
+		__entry->gc_type	= gc_type;
+		__entry->alloc_mode	= p->alloc_mode;
+		__entry->gc_mode	= p->gc_mode;
+		__entry->victim		= p->min_segno;
+		__entry->ofs_unit	= p->ofs_unit;
+		__entry->pre_victim	= pre_victim;
+		__entry->prefree	= prefree;
+		__entry->free		= free;
+	),
+
+	TP_printk("dev = (%d,%d), type = %s, policy = (%s, %s, %s), victim = %u "
+		"ofs_unit = %u, pre_victim_secno = %d, prefree = %u, free = %u",
+		show_dev(__entry),
+		show_data_type(__entry->type),
+		show_gc_type(__entry->gc_type),
+		show_alloc_mode(__entry->alloc_mode),
+		show_victim_policy(__entry->gc_mode),
+		__entry->victim,
+		__entry->ofs_unit,
+		(int)__entry->pre_victim,
+		__entry->prefree,
+		__entry->free)
+);
+
+TRACE_EVENT(f2fs_fallocate,
+
+	TP_PROTO(struct inode *inode, int mode,
+				loff_t offset, loff_t len, int ret),
+
+	TP_ARGS(inode, mode, offset, len, ret),
+
+	TP_STRUCT__entry(
+		__field(dev_t,	dev)
+		__field(ino_t,	ino)
+		__field(int,	mode)
+		__field(loff_t,	offset)
+		__field(loff_t,	len)
+		__field(loff_t, size)
+		__field(blkcnt_t, blocks)
+		__field(int,	ret)
+	),
+
+	TP_fast_assign(
+		__entry->dev	= inode->i_sb->s_dev;
+		__entry->ino	= inode->i_ino;
+		__entry->mode	= mode;
+		__entry->offset	= offset;
+		__entry->len	= len;
+		__entry->size	= inode->i_size;
+		__entry->blocks = inode->i_blocks;
+		__entry->ret	= ret;
+	),
+
+	TP_printk("dev = (%d,%d), ino = %lu, mode = %x, offset = %lld, "
+		"len = %lld,  i_size = %lld, i_blocks = %llu, ret = %d",
+		show_dev_ino(__entry),
+		__entry->mode,
+		(unsigned long long)__entry->offset,
+		(unsigned long long)__entry->len,
+		(unsigned long long)__entry->size,
+		(unsigned long long)__entry->blocks,
+		__entry->ret)
+);
+
+TRACE_EVENT(f2fs_reserve_new_block,
+
+	TP_PROTO(struct inode *inode, nid_t nid, unsigned int ofs_in_node),
+
+	TP_ARGS(inode, nid, ofs_in_node),
+
+	TP_STRUCT__entry(
+		__field(dev_t,	dev)
+		__field(nid_t, nid)
+		__field(unsigned int, ofs_in_node)
+	),
+
+	TP_fast_assign(
+		__entry->dev	= inode->i_sb->s_dev;
+		__entry->nid	= nid;
+		__entry->ofs_in_node = ofs_in_node;
+	),
+
+	TP_printk("dev = (%d,%d), nid = %u, ofs_in_node = %u",
+		show_dev(__entry),
+		(unsigned int)__entry->nid,
+		__entry->ofs_in_node)
+);
+
+TRACE_EVENT(f2fs_do_submit_bio,
+
+	TP_PROTO(struct super_block *sb, int btype, bool sync, struct bio *bio),
+
+	TP_ARGS(sb, btype, sync, bio),
+
+	TP_STRUCT__entry(
+		__field(dev_t,	dev)
+		__field(int,	btype)
+		__field(bool,	sync)
+		__field(sector_t,	sector)
+		__field(unsigned int,	size)
+	),
+
+	TP_fast_assign(
+		__entry->dev		= sb->s_dev;
+		__entry->btype		= btype;
+		__entry->sync		= sync;
+		__entry->sector		= bio->bi_sector;
+		__entry->size		= bio->bi_size;
+	),
+
+	TP_printk("dev = (%d,%d), type = %s, io = %s, sector = %lld, size = %u",
+		show_dev(__entry),
+		show_block_type(__entry->btype),
+		__entry->sync ? "sync" : "no sync",
+		(unsigned long long)__entry->sector,
+		__entry->size)
+);
+
+TRACE_EVENT(f2fs_submit_write_page,
+
+	TP_PROTO(struct page *page, block_t blk_addr, int type),
+
+	TP_ARGS(page, blk_addr, type),
+
+	TP_STRUCT__entry(
+		__field(dev_t,	dev)
+		__field(ino_t,	ino)
+		__field(int, type)
+		__field(pgoff_t, index)
+		__field(block_t, block)
+	),
+
+	TP_fast_assign(
+		__entry->dev	= page->mapping->host->i_sb->s_dev;
+		__entry->ino	= page->mapping->host->i_ino;
+		__entry->type	= type;
+		__entry->index	= page->index;
+		__entry->block	= blk_addr;
+	),
+
+	TP_printk("dev = (%d,%d), ino = %lu, %s, index = %lu, blkaddr = 0x%llx",
+		show_dev_ino(__entry),
+		show_block_type(__entry->type),
+		(unsigned long)__entry->index,
+		(unsigned long long)__entry->block)
+);
+
+TRACE_EVENT(f2fs_write_checkpoint,
+
+	TP_PROTO(struct super_block *sb, bool is_umount, char *msg),
+
+	TP_ARGS(sb, is_umount, msg),
+
+	TP_STRUCT__entry(
+		__field(dev_t,	dev)
+		__field(bool,	is_umount)
+		__field(char *,	msg)
+	),
+
+	TP_fast_assign(
+		__entry->dev		= sb->s_dev;
+		__entry->is_umount	= is_umount;
+		__entry->msg		= msg;
+	),
+
+	TP_printk("dev = (%d,%d), checkpoint for %s, state = %s",
+		show_dev(__entry),
+		__entry->is_umount ? "clean umount" : "consistency",
+		__entry->msg)
+);
+
+#endif /* _TRACE_F2FS_H */
+
+ /* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/writeback.h b/include/trace/events/writeback.h
index 6a16fd2..464ea82 100644
--- a/include/trace/events/writeback.h
+++ b/include/trace/events/writeback.h
@@ -183,7 +183,6 @@
 DEFINE_EVENT(writeback_work_class, name, \
 	TP_PROTO(struct backing_dev_info *bdi, struct wb_writeback_work *work), \
 	TP_ARGS(bdi, work))
-DEFINE_WRITEBACK_WORK_EVENT(writeback_nothread);
 DEFINE_WRITEBACK_WORK_EVENT(writeback_queue);
 DEFINE_WRITEBACK_WORK_EVENT(writeback_exec);
 DEFINE_WRITEBACK_WORK_EVENT(writeback_start);
@@ -222,12 +221,8 @@
 
 DEFINE_WRITEBACK_EVENT(writeback_nowork);
 DEFINE_WRITEBACK_EVENT(writeback_wake_background);
-DEFINE_WRITEBACK_EVENT(writeback_wake_thread);
-DEFINE_WRITEBACK_EVENT(writeback_wake_forker_thread);
 DEFINE_WRITEBACK_EVENT(writeback_bdi_register);
 DEFINE_WRITEBACK_EVENT(writeback_bdi_unregister);
-DEFINE_WRITEBACK_EVENT(writeback_thread_start);
-DEFINE_WRITEBACK_EVENT(writeback_thread_stop);
 
 DECLARE_EVENT_CLASS(wbc_class,
 	TP_PROTO(struct writeback_control *wbc, struct backing_dev_info *bdi),
diff --git a/include/uapi/linux/audit.h b/include/uapi/linux/audit.h
index 9f096f1..75cef3f 100644
--- a/include/uapi/linux/audit.h
+++ b/include/uapi/linux/audit.h
@@ -246,6 +246,7 @@
 #define AUDIT_OBJ_TYPE	21
 #define AUDIT_OBJ_LEV_LOW	22
 #define AUDIT_OBJ_LEV_HIGH	23
+#define AUDIT_LOGINUID_SET	24
 
 				/* These are ONLY useful when checking
 				 * at syscall exit time (AUDIT_AT_EXIT). */
@@ -369,7 +370,8 @@
 };
 
 struct audit_tty_status {
-	__u32		enabled; /* 1 = enabled, 0 = disabled */
+	__u32		enabled;	/* 1 = enabled, 0 = disabled */
+	__u32		log_passwd;	/* 1 = enabled, 0 = disabled */
 };
 
 /* audit_rule_data supports filter rules with both integer and string
diff --git a/include/uapi/linux/btrfs.h b/include/uapi/linux/btrfs.h
index fa3a5f9..5ef0df5 100644
--- a/include/uapi/linux/btrfs.h
+++ b/include/uapi/linux/btrfs.h
@@ -376,12 +376,18 @@
 
 #define BTRFS_QUOTA_CTL_ENABLE	1
 #define BTRFS_QUOTA_CTL_DISABLE	2
-#define BTRFS_QUOTA_CTL_RESCAN	3
+#define BTRFS_QUOTA_CTL_RESCAN__NOTUSED	3
 struct btrfs_ioctl_quota_ctl_args {
 	__u64 cmd;
 	__u64 status;
 };
 
+struct btrfs_ioctl_quota_rescan_args {
+	__u64	flags;
+	__u64   progress;
+	__u64   reserved[6];
+};
+
 struct btrfs_ioctl_qgroup_assign_args {
 	__u64 assign;
 	__u64 src;
@@ -412,7 +418,25 @@
  * search of clone sources doesn't find an extent. UPDATE_EXTENT
  * commands will be sent instead of WRITE commands.
  */
-#define BTRFS_SEND_FLAG_NO_FILE_DATA     0x1
+#define BTRFS_SEND_FLAG_NO_FILE_DATA		0x1
+
+/*
+ * Do not add the leading stream header. Used when multiple snapshots
+ * are sent back to back.
+ */
+#define BTRFS_SEND_FLAG_OMIT_STREAM_HEADER	0x2
+
+/*
+ * Omit the command at the end of the stream that indicated the end
+ * of the stream. This option is used when multiple snapshots are
+ * sent back to back.
+ */
+#define BTRFS_SEND_FLAG_OMIT_END_CMD		0x4
+
+#define BTRFS_SEND_FLAG_MASK \
+	(BTRFS_SEND_FLAG_NO_FILE_DATA | \
+	 BTRFS_SEND_FLAG_OMIT_STREAM_HEADER | \
+	 BTRFS_SEND_FLAG_OMIT_END_CMD)
 
 struct btrfs_ioctl_send_args {
 	__s64 send_fd;			/* in */
@@ -502,6 +526,10 @@
 			       struct btrfs_ioctl_qgroup_create_args)
 #define BTRFS_IOC_QGROUP_LIMIT _IOR(BTRFS_IOCTL_MAGIC, 43, \
 			       struct btrfs_ioctl_qgroup_limit_args)
+#define BTRFS_IOC_QUOTA_RESCAN _IOW(BTRFS_IOCTL_MAGIC, 44, \
+			       struct btrfs_ioctl_quota_rescan_args)
+#define BTRFS_IOC_QUOTA_RESCAN_STATUS _IOR(BTRFS_IOCTL_MAGIC, 45, \
+			       struct btrfs_ioctl_quota_rescan_args)
 #define BTRFS_IOC_GET_FSLABEL _IOR(BTRFS_IOCTL_MAGIC, 49, \
 				   char[BTRFS_LABEL_SIZE])
 #define BTRFS_IOC_SET_FSLABEL _IOW(BTRFS_IOCTL_MAGIC, 50, \
diff --git a/include/uapi/linux/if_cablemodem.h b/include/uapi/linux/if_cablemodem.h
index 9ca1007..ee6b3c4 100644
--- a/include/uapi/linux/if_cablemodem.h
+++ b/include/uapi/linux/if_cablemodem.h
@@ -12,11 +12,11 @@
  */
 
 /* some useful defines for sb1000.c e cmconfig.c - fv */
-#define SIOCGCMSTATS		SIOCDEVPRIVATE+0	/* get cable modem stats */
-#define SIOCGCMFIRMWARE		SIOCDEVPRIVATE+1	/* get cm firmware version */
-#define SIOCGCMFREQUENCY	SIOCDEVPRIVATE+2	/* get cable modem frequency */
-#define SIOCSCMFREQUENCY	SIOCDEVPRIVATE+3	/* set cable modem frequency */
-#define SIOCGCMPIDS			SIOCDEVPRIVATE+4	/* get cable modem PIDs */
-#define SIOCSCMPIDS			SIOCDEVPRIVATE+5	/* set cable modem PIDs */
+#define SIOCGCMSTATS		(SIOCDEVPRIVATE+0)	/* get cable modem stats */
+#define SIOCGCMFIRMWARE		(SIOCDEVPRIVATE+1)	/* get cm firmware version */
+#define SIOCGCMFREQUENCY	(SIOCDEVPRIVATE+2)	/* get cable modem frequency */
+#define SIOCSCMFREQUENCY	(SIOCDEVPRIVATE+3)	/* set cable modem frequency */
+#define SIOCGCMPIDS			(SIOCDEVPRIVATE+4)	/* get cable modem PIDs */
+#define SIOCSCMPIDS			(SIOCDEVPRIVATE+5)	/* set cable modem PIDs */
 
 #endif
diff --git a/ipc/sem.c b/ipc/sem.c
index 899b598..a7e40ed 100644
--- a/ipc/sem.c
+++ b/ipc/sem.c
@@ -796,6 +796,13 @@
 	struct sem_queue * q;
 
 	semncnt = 0;
+	list_for_each_entry(q, &sma->sem_base[semnum].sem_pending, list) {
+		struct sembuf * sops = q->sops;
+		BUG_ON(sops->sem_num != semnum);
+		if ((sops->sem_op < 0) && !(sops->sem_flg & IPC_NOWAIT))
+			semncnt++;
+	}
+
 	list_for_each_entry(q, &sma->sem_pending, list) {
 		struct sembuf * sops = q->sops;
 		int nsops = q->nsops;
@@ -815,6 +822,13 @@
 	struct sem_queue * q;
 
 	semzcnt = 0;
+	list_for_each_entry(q, &sma->sem_base[semnum].sem_pending, list) {
+		struct sembuf * sops = q->sops;
+		BUG_ON(sops->sem_num != semnum);
+		if ((sops->sem_op == 0) && !(sops->sem_flg & IPC_NOWAIT))
+			semzcnt++;
+	}
+
 	list_for_each_entry(q, &sma->sem_pending, list) {
 		struct sembuf * sops = q->sops;
 		int nsops = q->nsops;
diff --git a/ipc/shm.c b/ipc/shm.c
index 8247c49..7e199fa 100644
--- a/ipc/shm.c
+++ b/ipc/shm.c
@@ -491,10 +491,20 @@
 
 	sprintf (name, "SYSV%08x", key);
 	if (shmflg & SHM_HUGETLB) {
+		struct hstate *hs = hstate_sizelog((shmflg >> SHM_HUGE_SHIFT)
+						& SHM_HUGE_MASK);
+		size_t hugesize;
+
+		if (!hs) {
+			error = -EINVAL;
+			goto no_file;
+		}
+		hugesize = ALIGN(size, huge_page_size(hs));
+
 		/* hugetlb_file_setup applies strict accounting */
 		if (shmflg & SHM_NORESERVE)
 			acctflag = VM_NORESERVE;
-		file = hugetlb_file_setup(name, 0, size, acctflag,
+		file = hugetlb_file_setup(name, hugesize, acctflag,
 				  &shp->mlock_user, HUGETLB_SHMFS_INODE,
 				(shmflg >> SHM_HUGE_SHIFT) & SHM_HUGE_MASK);
 	} else {
diff --git a/kernel/audit.c b/kernel/audit.c
index 0b084fa..21c7fa6 100644
--- a/kernel/audit.c
+++ b/kernel/audit.c
@@ -49,6 +49,8 @@
 #include <linux/slab.h>
 #include <linux/err.h>
 #include <linux/kthread.h>
+#include <linux/kernel.h>
+#include <linux/syscalls.h>
 
 #include <linux/audit.h>
 
@@ -265,7 +267,6 @@
 }
 
 static int audit_log_config_change(char *function_name, int new, int old,
-				   kuid_t loginuid, u32 sessionid, u32 sid,
 				   int allow_changes)
 {
 	struct audit_buffer *ab;
@@ -274,29 +275,17 @@
 	ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_CONFIG_CHANGE);
 	if (unlikely(!ab))
 		return rc;
-	audit_log_format(ab, "%s=%d old=%d auid=%u ses=%u", function_name, new,
-			 old, from_kuid(&init_user_ns, loginuid), sessionid);
-	if (sid) {
-		char *ctx = NULL;
-		u32 len;
-
-		rc = security_secid_to_secctx(sid, &ctx, &len);
-		if (rc) {
-			audit_log_format(ab, " sid=%u", sid);
-			allow_changes = 0; /* Something weird, deny request */
-		} else {
-			audit_log_format(ab, " subj=%s", ctx);
-			security_release_secctx(ctx, len);
-		}
-	}
+	audit_log_format(ab, "%s=%d old=%d", function_name, new, old);
+	audit_log_session_info(ab);
+	rc = audit_log_task_context(ab);
+	if (rc)
+		allow_changes = 0; /* Something weird, deny request */
 	audit_log_format(ab, " res=%d", allow_changes);
 	audit_log_end(ab);
 	return rc;
 }
 
-static int audit_do_config_change(char *function_name, int *to_change,
-				  int new, kuid_t loginuid, u32 sessionid,
-				  u32 sid)
+static int audit_do_config_change(char *function_name, int *to_change, int new)
 {
 	int allow_changes, rc = 0, old = *to_change;
 
@@ -307,8 +296,7 @@
 		allow_changes = 1;
 
 	if (audit_enabled != AUDIT_OFF) {
-		rc = audit_log_config_change(function_name, new, old, loginuid,
-					     sessionid, sid, allow_changes);
+		rc = audit_log_config_change(function_name, new, old, allow_changes);
 		if (rc)
 			allow_changes = 0;
 	}
@@ -322,44 +310,37 @@
 	return rc;
 }
 
-static int audit_set_rate_limit(int limit, kuid_t loginuid, u32 sessionid,
-				u32 sid)
+static int audit_set_rate_limit(int limit)
 {
-	return audit_do_config_change("audit_rate_limit", &audit_rate_limit,
-				      limit, loginuid, sessionid, sid);
+	return audit_do_config_change("audit_rate_limit", &audit_rate_limit, limit);
 }
 
-static int audit_set_backlog_limit(int limit, kuid_t loginuid, u32 sessionid,
-				   u32 sid)
+static int audit_set_backlog_limit(int limit)
 {
-	return audit_do_config_change("audit_backlog_limit", &audit_backlog_limit,
-				      limit, loginuid, sessionid, sid);
+	return audit_do_config_change("audit_backlog_limit", &audit_backlog_limit, limit);
 }
 
-static int audit_set_enabled(int state, kuid_t loginuid, u32 sessionid, u32 sid)
+static int audit_set_enabled(int state)
 {
 	int rc;
 	if (state < AUDIT_OFF || state > AUDIT_LOCKED)
 		return -EINVAL;
 
-	rc =  audit_do_config_change("audit_enabled", &audit_enabled, state,
-				     loginuid, sessionid, sid);
-
+	rc =  audit_do_config_change("audit_enabled", &audit_enabled, state);
 	if (!rc)
 		audit_ever_enabled |= !!state;
 
 	return rc;
 }
 
-static int audit_set_failure(int state, kuid_t loginuid, u32 sessionid, u32 sid)
+static int audit_set_failure(int state)
 {
 	if (state != AUDIT_FAIL_SILENT
 	    && state != AUDIT_FAIL_PRINTK
 	    && state != AUDIT_FAIL_PANIC)
 		return -EINVAL;
 
-	return audit_do_config_change("audit_failure", &audit_failure, state,
-				      loginuid, sessionid, sid);
+	return audit_do_config_change("audit_failure", &audit_failure, state);
 }
 
 /*
@@ -417,34 +398,53 @@
 		consume_skb(skb);
 }
 
-static int kauditd_thread(void *dummy)
+/*
+ * flush_hold_queue - empty the hold queue if auditd appears
+ *
+ * If auditd just started, drain the queue of messages already
+ * sent to syslog/printk.  Remember loss here is ok.  We already
+ * called audit_log_lost() if it didn't go out normally.  so the
+ * race between the skb_dequeue and the next check for audit_pid
+ * doesn't matter.
+ *
+ * If you ever find kauditd to be too slow we can get a perf win
+ * by doing our own locking and keeping better track if there
+ * are messages in this queue.  I don't see the need now, but
+ * in 5 years when I want to play with this again I'll see this
+ * note and still have no friggin idea what i'm thinking today.
+ */
+static void flush_hold_queue(void)
 {
 	struct sk_buff *skb;
 
+	if (!audit_default || !audit_pid)
+		return;
+
+	skb = skb_dequeue(&audit_skb_hold_queue);
+	if (likely(!skb))
+		return;
+
+	while (skb && audit_pid) {
+		kauditd_send_skb(skb);
+		skb = skb_dequeue(&audit_skb_hold_queue);
+	}
+
+	/*
+	 * if auditd just disappeared but we
+	 * dequeued an skb we need to drop ref
+	 */
+	if (skb)
+		consume_skb(skb);
+}
+
+static int kauditd_thread(void *dummy)
+{
 	set_freezable();
 	while (!kthread_should_stop()) {
-		/*
-		 * if auditd just started drain the queue of messages already
-		 * sent to syslog/printk.  remember loss here is ok.  we already
-		 * called audit_log_lost() if it didn't go out normally.  so the
-		 * race between the skb_dequeue and the next check for audit_pid
-		 * doesn't matter.
-		 *
-		 * if you ever find kauditd to be too slow we can get a perf win
-		 * by doing our own locking and keeping better track if there
-		 * are messages in this queue.  I don't see the need now, but
-		 * in 5 years when I want to play with this again I'll see this
-		 * note and still have no friggin idea what i'm thinking today.
-		 */
-		if (audit_default && audit_pid) {
-			skb = skb_dequeue(&audit_skb_hold_queue);
-			if (unlikely(skb)) {
-				while (skb && audit_pid) {
-					kauditd_send_skb(skb);
-					skb = skb_dequeue(&audit_skb_hold_queue);
-				}
-			}
-		}
+		struct sk_buff *skb;
+		DECLARE_WAITQUEUE(wait, current);
+
+		flush_hold_queue();
 
 		skb = skb_dequeue(&audit_skb_queue);
 		wake_up(&audit_backlog_wait);
@@ -453,19 +453,18 @@
 				kauditd_send_skb(skb);
 			else
 				audit_printk_skb(skb);
-		} else {
-			DECLARE_WAITQUEUE(wait, current);
-			set_current_state(TASK_INTERRUPTIBLE);
-			add_wait_queue(&kauditd_wait, &wait);
-
-			if (!skb_queue_len(&audit_skb_queue)) {
-				try_to_freeze();
-				schedule();
-			}
-
-			__set_current_state(TASK_RUNNING);
-			remove_wait_queue(&kauditd_wait, &wait);
+			continue;
 		}
+		set_current_state(TASK_INTERRUPTIBLE);
+		add_wait_queue(&kauditd_wait, &wait);
+
+		if (!skb_queue_len(&audit_skb_queue)) {
+			try_to_freeze();
+			schedule();
+		}
+
+		__set_current_state(TASK_RUNNING);
+		remove_wait_queue(&kauditd_wait, &wait);
 	}
 	return 0;
 }
@@ -579,13 +578,14 @@
 		return -EPERM;
 
 	switch (msg_type) {
-	case AUDIT_GET:
 	case AUDIT_LIST:
-	case AUDIT_LIST_RULES:
-	case AUDIT_SET:
 	case AUDIT_ADD:
-	case AUDIT_ADD_RULE:
 	case AUDIT_DEL:
+		return -EOPNOTSUPP;
+	case AUDIT_GET:
+	case AUDIT_SET:
+	case AUDIT_LIST_RULES:
+	case AUDIT_ADD_RULE:
 	case AUDIT_DEL_RULE:
 	case AUDIT_SIGNAL_INFO:
 	case AUDIT_TTY_GET:
@@ -608,12 +608,10 @@
 	return err;
 }
 
-static int audit_log_common_recv_msg(struct audit_buffer **ab, u16 msg_type,
-				     kuid_t auid, u32 ses, u32 sid)
+static int audit_log_common_recv_msg(struct audit_buffer **ab, u16 msg_type)
 {
 	int rc = 0;
-	char *ctx = NULL;
-	u32 len;
+	uid_t uid = from_kuid(&init_user_ns, current_uid());
 
 	if (!audit_enabled) {
 		*ab = NULL;
@@ -623,33 +621,21 @@
 	*ab = audit_log_start(NULL, GFP_KERNEL, msg_type);
 	if (unlikely(!*ab))
 		return rc;
-	audit_log_format(*ab, "pid=%d uid=%u auid=%u ses=%u",
-			 task_tgid_vnr(current),
-			 from_kuid(&init_user_ns, current_uid()),
-			 from_kuid(&init_user_ns, auid), ses);
-	if (sid) {
-		rc = security_secid_to_secctx(sid, &ctx, &len);
-		if (rc)
-			audit_log_format(*ab, " ssid=%u", sid);
-		else {
-			audit_log_format(*ab, " subj=%s", ctx);
-			security_release_secctx(ctx, len);
-		}
-	}
+	audit_log_format(*ab, "pid=%d uid=%u", task_tgid_vnr(current), uid);
+	audit_log_session_info(*ab);
+	audit_log_task_context(*ab);
 
 	return rc;
 }
 
 static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
 {
-	u32			seq, sid;
+	u32			seq;
 	void			*data;
 	struct audit_status	*status_get, status_set;
 	int			err;
 	struct audit_buffer	*ab;
 	u16			msg_type = nlh->nlmsg_type;
-	kuid_t			loginuid; /* loginuid of sender */
-	u32			sessionid;
 	struct audit_sig_info   *sig_data;
 	char			*ctx = NULL;
 	u32			len;
@@ -668,9 +654,6 @@
 			return err;
 		}
 	}
-	loginuid = audit_get_loginuid(current);
-	sessionid = audit_get_sessionid(current);
-	security_task_getsecid(current, &sid);
 	seq  = nlh->nlmsg_seq;
 	data = nlmsg_data(nlh);
 
@@ -691,14 +674,12 @@
 			return -EINVAL;
 		status_get   = (struct audit_status *)data;
 		if (status_get->mask & AUDIT_STATUS_ENABLED) {
-			err = audit_set_enabled(status_get->enabled,
-						loginuid, sessionid, sid);
+			err = audit_set_enabled(status_get->enabled);
 			if (err < 0)
 				return err;
 		}
 		if (status_get->mask & AUDIT_STATUS_FAILURE) {
-			err = audit_set_failure(status_get->failure,
-						loginuid, sessionid, sid);
+			err = audit_set_failure(status_get->failure);
 			if (err < 0)
 				return err;
 		}
@@ -706,22 +687,17 @@
 			int new_pid = status_get->pid;
 
 			if (audit_enabled != AUDIT_OFF)
-				audit_log_config_change("audit_pid", new_pid,
-							audit_pid, loginuid,
-							sessionid, sid, 1);
-
+				audit_log_config_change("audit_pid", new_pid, audit_pid, 1);
 			audit_pid = new_pid;
 			audit_nlk_portid = NETLINK_CB(skb).portid;
 		}
 		if (status_get->mask & AUDIT_STATUS_RATE_LIMIT) {
-			err = audit_set_rate_limit(status_get->rate_limit,
-						   loginuid, sessionid, sid);
+			err = audit_set_rate_limit(status_get->rate_limit);
 			if (err < 0)
 				return err;
 		}
 		if (status_get->mask & AUDIT_STATUS_BACKLOG_LIMIT)
-			err = audit_set_backlog_limit(status_get->backlog_limit,
-						      loginuid, sessionid, sid);
+			err = audit_set_backlog_limit(status_get->backlog_limit);
 		break;
 	case AUDIT_USER:
 	case AUDIT_FIRST_USER_MSG ... AUDIT_LAST_USER_MSG:
@@ -729,25 +705,22 @@
 		if (!audit_enabled && msg_type != AUDIT_USER_AVC)
 			return 0;
 
-		err = audit_filter_user();
+		err = audit_filter_user(msg_type);
 		if (err == 1) {
 			err = 0;
 			if (msg_type == AUDIT_USER_TTY) {
-				err = tty_audit_push_task(current, loginuid,
-							     sessionid);
+				err = tty_audit_push_current();
 				if (err)
 					break;
 			}
-			audit_log_common_recv_msg(&ab, msg_type,
-						  loginuid, sessionid, sid);
-
+			audit_log_common_recv_msg(&ab, msg_type);
 			if (msg_type != AUDIT_USER_TTY)
 				audit_log_format(ab, " msg='%.1024s'",
 						 (char *)data);
 			else {
 				int size;
 
-				audit_log_format(ab, " msg=");
+				audit_log_format(ab, " data=");
 				size = nlmsg_len(nlh);
 				if (size > 0 &&
 				    ((unsigned char *)data)[size - 1] == '\0')
@@ -758,50 +731,24 @@
 			audit_log_end(ab);
 		}
 		break;
-	case AUDIT_ADD:
-	case AUDIT_DEL:
-		if (nlmsg_len(nlh) < sizeof(struct audit_rule))
-			return -EINVAL;
-		if (audit_enabled == AUDIT_LOCKED) {
-			audit_log_common_recv_msg(&ab, AUDIT_CONFIG_CHANGE,
-						  loginuid, sessionid, sid);
-
-			audit_log_format(ab, " audit_enabled=%d res=0",
-					 audit_enabled);
-			audit_log_end(ab);
-			return -EPERM;
-		}
-		/* fallthrough */
-	case AUDIT_LIST:
-		err = audit_receive_filter(msg_type, NETLINK_CB(skb).portid,
-					   seq, data, nlmsg_len(nlh),
-					   loginuid, sessionid, sid);
-		break;
 	case AUDIT_ADD_RULE:
 	case AUDIT_DEL_RULE:
 		if (nlmsg_len(nlh) < sizeof(struct audit_rule_data))
 			return -EINVAL;
 		if (audit_enabled == AUDIT_LOCKED) {
-			audit_log_common_recv_msg(&ab, AUDIT_CONFIG_CHANGE,
-						  loginuid, sessionid, sid);
-
-			audit_log_format(ab, " audit_enabled=%d res=0",
-					 audit_enabled);
+			audit_log_common_recv_msg(&ab, AUDIT_CONFIG_CHANGE);
+			audit_log_format(ab, " audit_enabled=%d res=0", audit_enabled);
 			audit_log_end(ab);
 			return -EPERM;
 		}
 		/* fallthrough */
 	case AUDIT_LIST_RULES:
 		err = audit_receive_filter(msg_type, NETLINK_CB(skb).portid,
-					   seq, data, nlmsg_len(nlh),
-					   loginuid, sessionid, sid);
+					   seq, data, nlmsg_len(nlh));
 		break;
 	case AUDIT_TRIM:
 		audit_trim_trees();
-
-		audit_log_common_recv_msg(&ab, AUDIT_CONFIG_CHANGE,
-					  loginuid, sessionid, sid);
-
+		audit_log_common_recv_msg(&ab, AUDIT_CONFIG_CHANGE);
 		audit_log_format(ab, " op=trim res=1");
 		audit_log_end(ab);
 		break;
@@ -831,8 +778,7 @@
 		/* OK, here comes... */
 		err = audit_tag_tree(old, new);
 
-		audit_log_common_recv_msg(&ab, AUDIT_CONFIG_CHANGE,
-					  loginuid, sessionid, sid);
+		audit_log_common_recv_msg(&ab, AUDIT_CONFIG_CHANGE);
 
 		audit_log_format(ab, " op=make_equiv old=");
 		audit_log_untrustedstring(ab, old);
@@ -871,27 +817,30 @@
 		struct audit_tty_status s;
 		struct task_struct *tsk = current;
 
-		spin_lock_irq(&tsk->sighand->siglock);
+		spin_lock(&tsk->sighand->siglock);
 		s.enabled = tsk->signal->audit_tty != 0;
-		spin_unlock_irq(&tsk->sighand->siglock);
+		s.log_passwd = tsk->signal->audit_tty_log_passwd;
+		spin_unlock(&tsk->sighand->siglock);
 
 		audit_send_reply(NETLINK_CB(skb).portid, seq,
 				 AUDIT_TTY_GET, 0, 0, &s, sizeof(s));
 		break;
 	}
 	case AUDIT_TTY_SET: {
-		struct audit_tty_status *s;
+		struct audit_tty_status s;
 		struct task_struct *tsk = current;
 
-		if (nlh->nlmsg_len < sizeof(struct audit_tty_status))
-			return -EINVAL;
-		s = data;
-		if (s->enabled != 0 && s->enabled != 1)
+		memset(&s, 0, sizeof(s));
+		/* guard against past and future API changes */
+		memcpy(&s, data, min(sizeof(s), (size_t)nlh->nlmsg_len));
+		if ((s.enabled != 0 && s.enabled != 1) ||
+		    (s.log_passwd != 0 && s.log_passwd != 1))
 			return -EINVAL;
 
-		spin_lock_irq(&tsk->sighand->siglock);
-		tsk->signal->audit_tty = s->enabled != 0;
-		spin_unlock_irq(&tsk->sighand->siglock);
+		spin_lock(&tsk->sighand->siglock);
+		tsk->signal->audit_tty = s.enabled;
+		tsk->signal->audit_tty_log_passwd = s.log_passwd;
+		spin_unlock(&tsk->sighand->siglock);
 		break;
 	}
 	default:
@@ -1434,6 +1383,14 @@
 	kfree(pathname);
 }
 
+void audit_log_session_info(struct audit_buffer *ab)
+{
+	u32 sessionid = audit_get_sessionid(current);
+	uid_t auid = from_kuid(&init_user_ns, audit_get_loginuid(current));
+
+	audit_log_format(ab, " auid=%u ses=%u\n", auid, sessionid);
+}
+
 void audit_log_key(struct audit_buffer *ab, char *key)
 {
 	audit_log_format(ab, " key=");
@@ -1443,6 +1400,224 @@
 		audit_log_format(ab, "(null)");
 }
 
+void audit_log_cap(struct audit_buffer *ab, char *prefix, kernel_cap_t *cap)
+{
+	int i;
+
+	audit_log_format(ab, " %s=", prefix);
+	CAP_FOR_EACH_U32(i) {
+		audit_log_format(ab, "%08x",
+				 cap->cap[(_KERNEL_CAPABILITY_U32S-1) - i]);
+	}
+}
+
+void audit_log_fcaps(struct audit_buffer *ab, struct audit_names *name)
+{
+	kernel_cap_t *perm = &name->fcap.permitted;
+	kernel_cap_t *inh = &name->fcap.inheritable;
+	int log = 0;
+
+	if (!cap_isclear(*perm)) {
+		audit_log_cap(ab, "cap_fp", perm);
+		log = 1;
+	}
+	if (!cap_isclear(*inh)) {
+		audit_log_cap(ab, "cap_fi", inh);
+		log = 1;
+	}
+
+	if (log)
+		audit_log_format(ab, " cap_fe=%d cap_fver=%x",
+				 name->fcap.fE, name->fcap_ver);
+}
+
+static inline int audit_copy_fcaps(struct audit_names *name,
+				   const struct dentry *dentry)
+{
+	struct cpu_vfs_cap_data caps;
+	int rc;
+
+	if (!dentry)
+		return 0;
+
+	rc = get_vfs_caps_from_disk(dentry, &caps);
+	if (rc)
+		return rc;
+
+	name->fcap.permitted = caps.permitted;
+	name->fcap.inheritable = caps.inheritable;
+	name->fcap.fE = !!(caps.magic_etc & VFS_CAP_FLAGS_EFFECTIVE);
+	name->fcap_ver = (caps.magic_etc & VFS_CAP_REVISION_MASK) >>
+				VFS_CAP_REVISION_SHIFT;
+
+	return 0;
+}
+
+/* Copy inode data into an audit_names. */
+void audit_copy_inode(struct audit_names *name, const struct dentry *dentry,
+		      const struct inode *inode)
+{
+	name->ino   = inode->i_ino;
+	name->dev   = inode->i_sb->s_dev;
+	name->mode  = inode->i_mode;
+	name->uid   = inode->i_uid;
+	name->gid   = inode->i_gid;
+	name->rdev  = inode->i_rdev;
+	security_inode_getsecid(inode, &name->osid);
+	audit_copy_fcaps(name, dentry);
+}
+
+/**
+ * audit_log_name - produce AUDIT_PATH record from struct audit_names
+ * @context: audit_context for the task
+ * @n: audit_names structure with reportable details
+ * @path: optional path to report instead of audit_names->name
+ * @record_num: record number to report when handling a list of names
+ * @call_panic: optional pointer to int that will be updated if secid fails
+ */
+void audit_log_name(struct audit_context *context, struct audit_names *n,
+		    struct path *path, int record_num, int *call_panic)
+{
+	struct audit_buffer *ab;
+	ab = audit_log_start(context, GFP_KERNEL, AUDIT_PATH);
+	if (!ab)
+		return;
+
+	audit_log_format(ab, "item=%d", record_num);
+
+	if (path)
+		audit_log_d_path(ab, " name=", path);
+	else if (n->name) {
+		switch (n->name_len) {
+		case AUDIT_NAME_FULL:
+			/* log the full path */
+			audit_log_format(ab, " name=");
+			audit_log_untrustedstring(ab, n->name->name);
+			break;
+		case 0:
+			/* name was specified as a relative path and the
+			 * directory component is the cwd */
+			audit_log_d_path(ab, " name=", &context->pwd);
+			break;
+		default:
+			/* log the name's directory component */
+			audit_log_format(ab, " name=");
+			audit_log_n_untrustedstring(ab, n->name->name,
+						    n->name_len);
+		}
+	} else
+		audit_log_format(ab, " name=(null)");
+
+	if (n->ino != (unsigned long)-1) {
+		audit_log_format(ab, " inode=%lu"
+				 " dev=%02x:%02x mode=%#ho"
+				 " ouid=%u ogid=%u rdev=%02x:%02x",
+				 n->ino,
+				 MAJOR(n->dev),
+				 MINOR(n->dev),
+				 n->mode,
+				 from_kuid(&init_user_ns, n->uid),
+				 from_kgid(&init_user_ns, n->gid),
+				 MAJOR(n->rdev),
+				 MINOR(n->rdev));
+	}
+	if (n->osid != 0) {
+		char *ctx = NULL;
+		u32 len;
+		if (security_secid_to_secctx(
+			n->osid, &ctx, &len)) {
+			audit_log_format(ab, " osid=%u", n->osid);
+			if (call_panic)
+				*call_panic = 2;
+		} else {
+			audit_log_format(ab, " obj=%s", ctx);
+			security_release_secctx(ctx, len);
+		}
+	}
+
+	audit_log_fcaps(ab, n);
+	audit_log_end(ab);
+}
+
+int audit_log_task_context(struct audit_buffer *ab)
+{
+	char *ctx = NULL;
+	unsigned len;
+	int error;
+	u32 sid;
+
+	security_task_getsecid(current, &sid);
+	if (!sid)
+		return 0;
+
+	error = security_secid_to_secctx(sid, &ctx, &len);
+	if (error) {
+		if (error != -EINVAL)
+			goto error_path;
+		return 0;
+	}
+
+	audit_log_format(ab, " subj=%s", ctx);
+	security_release_secctx(ctx, len);
+	return 0;
+
+error_path:
+	audit_panic("error in audit_log_task_context");
+	return error;
+}
+EXPORT_SYMBOL(audit_log_task_context);
+
+void audit_log_task_info(struct audit_buffer *ab, struct task_struct *tsk)
+{
+	const struct cred *cred;
+	char name[sizeof(tsk->comm)];
+	struct mm_struct *mm = tsk->mm;
+	char *tty;
+
+	if (!ab)
+		return;
+
+	/* tsk == current */
+	cred = current_cred();
+
+	spin_lock_irq(&tsk->sighand->siglock);
+	if (tsk->signal && tsk->signal->tty && tsk->signal->tty->name)
+		tty = tsk->signal->tty->name;
+	else
+		tty = "(none)";
+	spin_unlock_irq(&tsk->sighand->siglock);
+
+	audit_log_format(ab,
+			 " ppid=%ld pid=%d auid=%u uid=%u gid=%u"
+			 " euid=%u suid=%u fsuid=%u"
+			 " egid=%u sgid=%u fsgid=%u ses=%u tty=%s",
+			 sys_getppid(),
+			 tsk->pid,
+			 from_kuid(&init_user_ns, audit_get_loginuid(tsk)),
+			 from_kuid(&init_user_ns, cred->uid),
+			 from_kgid(&init_user_ns, cred->gid),
+			 from_kuid(&init_user_ns, cred->euid),
+			 from_kuid(&init_user_ns, cred->suid),
+			 from_kuid(&init_user_ns, cred->fsuid),
+			 from_kgid(&init_user_ns, cred->egid),
+			 from_kgid(&init_user_ns, cred->sgid),
+			 from_kgid(&init_user_ns, cred->fsgid),
+			 audit_get_sessionid(tsk), tty);
+
+	get_task_comm(name, tsk);
+	audit_log_format(ab, " comm=");
+	audit_log_untrustedstring(ab, name);
+
+	if (mm) {
+		down_read(&mm->mmap_sem);
+		if (mm->exe_file)
+			audit_log_d_path(ab, " exe=", &mm->exe_file->f_path);
+		up_read(&mm->mmap_sem);
+	}
+	audit_log_task_context(ab);
+}
+EXPORT_SYMBOL(audit_log_task_info);
+
 /**
  * audit_log_link_denied - report a link restriction denial
  * @operation: specific link opreation
@@ -1451,19 +1626,28 @@
 void audit_log_link_denied(const char *operation, struct path *link)
 {
 	struct audit_buffer *ab;
+	struct audit_names *name;
 
+	name = kzalloc(sizeof(*name), GFP_NOFS);
+	if (!name)
+		return;
+
+	/* Generate AUDIT_ANOM_LINK with subject, operation, outcome. */
 	ab = audit_log_start(current->audit_context, GFP_KERNEL,
 			     AUDIT_ANOM_LINK);
 	if (!ab)
-		return;
-	audit_log_format(ab, "op=%s action=denied", operation);
-	audit_log_format(ab, " pid=%d comm=", current->pid);
-	audit_log_untrustedstring(ab, current->comm);
-	audit_log_d_path(ab, " path=", link);
-	audit_log_format(ab, " dev=");
-	audit_log_untrustedstring(ab, link->dentry->d_inode->i_sb->s_id);
-	audit_log_format(ab, " ino=%lu", link->dentry->d_inode->i_ino);
+		goto out;
+	audit_log_format(ab, "op=%s", operation);
+	audit_log_task_info(ab, current);
+	audit_log_format(ab, " res=0");
 	audit_log_end(ab);
+
+	/* Generate AUDIT_PATH record with object. */
+	name->type = AUDIT_TYPE_NORMAL;
+	audit_copy_inode(name, link->dentry, link->dentry->d_inode);
+	audit_log_name(current->audit_context, name, link, 0, NULL);
+out:
+	kfree(name);
 }
 
 /**
diff --git a/kernel/audit.h b/kernel/audit.h
index 11468d9..1c95131 100644
--- a/kernel/audit.h
+++ b/kernel/audit.h
@@ -22,6 +22,7 @@
 #include <linux/fs.h>
 #include <linux/audit.h>
 #include <linux/skbuff.h>
+#include <uapi/linux/mqueue.h>
 
 /* 0 = no checking
    1 = put_count checking
@@ -29,6 +30,11 @@
 */
 #define AUDIT_DEBUG 0
 
+/* AUDIT_NAMES is the number of slots we reserve in the audit_context
+ * for saving names from getname().  If we get more names we will allocate
+ * a name dynamically and also add those to the list anchored by names_list. */
+#define AUDIT_NAMES	5
+
 /* At task start time, the audit_state is set in the audit_context using
    a per-task filter.  At syscall entry, the audit_state is augmented by
    the syscall filter. */
@@ -59,8 +65,158 @@
 	struct audit_krule	rule;
 };
 
+struct audit_cap_data {
+	kernel_cap_t		permitted;
+	kernel_cap_t		inheritable;
+	union {
+		unsigned int	fE;		/* effective bit of file cap */
+		kernel_cap_t	effective;	/* effective set of process */
+	};
+};
+
+/* When fs/namei.c:getname() is called, we store the pointer in name and
+ * we don't let putname() free it (instead we free all of the saved
+ * pointers at syscall exit time).
+ *
+ * Further, in fs/namei.c:path_lookup() we store the inode and device.
+ */
+struct audit_names {
+	struct list_head	list;		/* audit_context->names_list */
+
+	struct filename		*name;
+	int			name_len;	/* number of chars to log */
+	bool			name_put;	/* call __putname()? */
+
+	unsigned long		ino;
+	dev_t			dev;
+	umode_t			mode;
+	kuid_t			uid;
+	kgid_t			gid;
+	dev_t			rdev;
+	u32			osid;
+	struct audit_cap_data	fcap;
+	unsigned int		fcap_ver;
+	unsigned char		type;		/* record type */
+	/*
+	 * This was an allocated audit_names and not from the array of
+	 * names allocated in the task audit context.  Thus this name
+	 * should be freed on syscall exit.
+	 */
+	bool			should_free;
+};
+
+/* The per-task audit context. */
+struct audit_context {
+	int		    dummy;	/* must be the first element */
+	int		    in_syscall;	/* 1 if task is in a syscall */
+	enum audit_state    state, current_state;
+	unsigned int	    serial;     /* serial number for record */
+	int		    major;      /* syscall number */
+	struct timespec	    ctime;      /* time of syscall entry */
+	unsigned long	    argv[4];    /* syscall arguments */
+	long		    return_code;/* syscall return code */
+	u64		    prio;
+	int		    return_valid; /* return code is valid */
+	/*
+	 * The names_list is the list of all audit_names collected during this
+	 * syscall.  The first AUDIT_NAMES entries in the names_list will
+	 * actually be from the preallocated_names array for performance
+	 * reasons.  Except during allocation they should never be referenced
+	 * through the preallocated_names array and should only be found/used
+	 * by running the names_list.
+	 */
+	struct audit_names  preallocated_names[AUDIT_NAMES];
+	int		    name_count; /* total records in names_list */
+	struct list_head    names_list;	/* struct audit_names->list anchor */
+	char		    *filterkey;	/* key for rule that triggered record */
+	struct path	    pwd;
+	struct audit_aux_data *aux;
+	struct audit_aux_data *aux_pids;
+	struct sockaddr_storage *sockaddr;
+	size_t sockaddr_len;
+				/* Save things to print about task_struct */
+	pid_t		    pid, ppid;
+	kuid_t		    uid, euid, suid, fsuid;
+	kgid_t		    gid, egid, sgid, fsgid;
+	unsigned long	    personality;
+	int		    arch;
+
+	pid_t		    target_pid;
+	kuid_t		    target_auid;
+	kuid_t		    target_uid;
+	unsigned int	    target_sessionid;
+	u32		    target_sid;
+	char		    target_comm[TASK_COMM_LEN];
+
+	struct audit_tree_refs *trees, *first_trees;
+	struct list_head killed_trees;
+	int tree_count;
+
+	int type;
+	union {
+		struct {
+			int nargs;
+			long args[6];
+		} socketcall;
+		struct {
+			kuid_t			uid;
+			kgid_t			gid;
+			umode_t			mode;
+			u32			osid;
+			int			has_perm;
+			uid_t			perm_uid;
+			gid_t			perm_gid;
+			umode_t			perm_mode;
+			unsigned long		qbytes;
+		} ipc;
+		struct {
+			mqd_t			mqdes;
+			struct mq_attr		mqstat;
+		} mq_getsetattr;
+		struct {
+			mqd_t			mqdes;
+			int			sigev_signo;
+		} mq_notify;
+		struct {
+			mqd_t			mqdes;
+			size_t			msg_len;
+			unsigned int		msg_prio;
+			struct timespec		abs_timeout;
+		} mq_sendrecv;
+		struct {
+			int			oflag;
+			umode_t			mode;
+			struct mq_attr		attr;
+		} mq_open;
+		struct {
+			pid_t			pid;
+			struct audit_cap_data	cap;
+		} capset;
+		struct {
+			int			fd;
+			int			flags;
+		} mmap;
+	};
+	int fds[2];
+
+#if AUDIT_DEBUG
+	int		    put_count;
+	int		    ino_count;
+#endif
+};
+
 extern int audit_ever_enabled;
 
+extern void audit_copy_inode(struct audit_names *name,
+			     const struct dentry *dentry,
+			     const struct inode *inode);
+extern void audit_log_cap(struct audit_buffer *ab, char *prefix,
+			  kernel_cap_t *cap);
+extern void audit_log_fcaps(struct audit_buffer *ab, struct audit_names *name);
+extern void audit_log_name(struct audit_context *context,
+			   struct audit_names *n, struct path *path,
+			   int record_num, int *call_panic);
+
 extern int audit_pid;
 
 #define AUDIT_INODE_BUCKETS	32
diff --git a/kernel/auditfilter.c b/kernel/auditfilter.c
index 2674368..83a2970 100644
--- a/kernel/auditfilter.c
+++ b/kernel/auditfilter.c
@@ -310,121 +310,83 @@
 	return n;
 }
 
-
-/* Translate struct audit_rule to kernel's rule respresentation.
- * Exists for backward compatibility with userspace. */
-static struct audit_entry *audit_rule_to_entry(struct audit_rule *rule)
+/* check if an audit field is valid */
+static int audit_field_valid(struct audit_entry *entry, struct audit_field *f)
 {
-	struct audit_entry *entry;
-	int err = 0;
-	int i;
+	switch(f->type) {
+	case AUDIT_MSGTYPE:
+		if (entry->rule.listnr != AUDIT_FILTER_TYPE &&
+		    entry->rule.listnr != AUDIT_FILTER_USER)
+			return -EINVAL;
+		break;
+	};
 
-	entry = audit_to_entry_common(rule);
-	if (IS_ERR(entry))
-		goto exit_nofree;
-
-	for (i = 0; i < rule->field_count; i++) {
-		struct audit_field *f = &entry->rule.fields[i];
-		u32 n;
-
-		n = rule->fields[i] & (AUDIT_NEGATE|AUDIT_OPERATORS);
-
-		/* Support for legacy operators where
-		 * AUDIT_NEGATE bit signifies != and otherwise assumes == */
-		if (n & AUDIT_NEGATE)
-			f->op = Audit_not_equal;
-		else if (!n)
-			f->op = Audit_equal;
-		else
-			f->op = audit_to_op(n);
-
-		entry->rule.vers_ops = (n & AUDIT_OPERATORS) ? 2 : 1;
-
-		f->type = rule->fields[i] & ~(AUDIT_NEGATE|AUDIT_OPERATORS);
-		f->val = rule->values[i];
-		f->uid = INVALID_UID;
-		f->gid = INVALID_GID;
-
-		err = -EINVAL;
-		if (f->op == Audit_bad)
-			goto exit_free;
-
-		switch(f->type) {
-		default:
-			goto exit_free;
-		case AUDIT_UID:
-		case AUDIT_EUID:
-		case AUDIT_SUID:
-		case AUDIT_FSUID:
-		case AUDIT_LOGINUID:
-			/* bit ops not implemented for uid comparisons */
-			if (f->op == Audit_bitmask || f->op == Audit_bittest)
-				goto exit_free;
-
-			f->uid = make_kuid(current_user_ns(), f->val);
-			if (!uid_valid(f->uid))
-				goto exit_free;
-			break;
-		case AUDIT_GID:
-		case AUDIT_EGID:
-		case AUDIT_SGID:
-		case AUDIT_FSGID:
-			/* bit ops not implemented for gid comparisons */
-			if (f->op == Audit_bitmask || f->op == Audit_bittest)
-				goto exit_free;
-
-			f->gid = make_kgid(current_user_ns(), f->val);
-			if (!gid_valid(f->gid))
-				goto exit_free;
-			break;
-		case AUDIT_PID:
-		case AUDIT_PERS:
-		case AUDIT_MSGTYPE:
-		case AUDIT_PPID:
-		case AUDIT_DEVMAJOR:
-		case AUDIT_DEVMINOR:
-		case AUDIT_EXIT:
-		case AUDIT_SUCCESS:
-			/* bit ops are only useful on syscall args */
-			if (f->op == Audit_bitmask || f->op == Audit_bittest)
-				goto exit_free;
-			break;
-		case AUDIT_ARG0:
-		case AUDIT_ARG1:
-		case AUDIT_ARG2:
-		case AUDIT_ARG3:
-			break;
-		/* arch is only allowed to be = or != */
-		case AUDIT_ARCH:
-			if (f->op != Audit_not_equal && f->op != Audit_equal)
-				goto exit_free;
-			entry->rule.arch_f = f;
-			break;
-		case AUDIT_PERM:
-			if (f->val & ~15)
-				goto exit_free;
-			break;
-		case AUDIT_FILETYPE:
-			if (f->val & ~S_IFMT)
-				goto exit_free;
-			break;
-		case AUDIT_INODE:
-			err = audit_to_inode(&entry->rule, f);
-			if (err)
-				goto exit_free;
-			break;
-		}
-	}
-
-	if (entry->rule.inode_f && entry->rule.inode_f->op == Audit_not_equal)
-		entry->rule.inode_f = NULL;
-
-exit_nofree:
-	return entry;
-
-exit_free:
-	audit_free_rule(entry);
-	return ERR_PTR(err);
+	switch(f->type) {
+	default:
+		return -EINVAL;
+	case AUDIT_UID:
+	case AUDIT_EUID:
+	case AUDIT_SUID:
+	case AUDIT_FSUID:
+	case AUDIT_LOGINUID:
+	case AUDIT_OBJ_UID:
+	case AUDIT_GID:
+	case AUDIT_EGID:
+	case AUDIT_SGID:
+	case AUDIT_FSGID:
+	case AUDIT_OBJ_GID:
+	case AUDIT_PID:
+	case AUDIT_PERS:
+	case AUDIT_MSGTYPE:
+	case AUDIT_PPID:
+	case AUDIT_DEVMAJOR:
+	case AUDIT_DEVMINOR:
+	case AUDIT_EXIT:
+	case AUDIT_SUCCESS:
+		/* bit ops are only useful on syscall args */
+		if (f->op == Audit_bitmask || f->op == Audit_bittest)
+			return -EINVAL;
+		break;
+	case AUDIT_ARG0:
+	case AUDIT_ARG1:
+	case AUDIT_ARG2:
+	case AUDIT_ARG3:
+	case AUDIT_SUBJ_USER:
+	case AUDIT_SUBJ_ROLE:
+	case AUDIT_SUBJ_TYPE:
+	case AUDIT_SUBJ_SEN:
+	case AUDIT_SUBJ_CLR:
+	case AUDIT_OBJ_USER:
+	case AUDIT_OBJ_ROLE:
+	case AUDIT_OBJ_TYPE:
+	case AUDIT_OBJ_LEV_LOW:
+	case AUDIT_OBJ_LEV_HIGH:
+	case AUDIT_WATCH:
+	case AUDIT_DIR:
+	case AUDIT_FILTERKEY:
+		break;
+	case AUDIT_LOGINUID_SET:
+		if ((f->val != 0) && (f->val != 1))
+			return -EINVAL;
+	/* FALL THROUGH */
+	case AUDIT_ARCH:
+		if (f->op != Audit_not_equal && f->op != Audit_equal)
+			return -EINVAL;
+		break;
+	case AUDIT_PERM:
+		if (f->val & ~15)
+			return -EINVAL;
+		break;
+	case AUDIT_FILETYPE:
+		if (f->val & ~S_IFMT)
+			return -EINVAL;
+		break;
+	case AUDIT_FIELD_COMPARE:
+		if (f->val > AUDIT_MAX_FIELD_COMPARE)
+			return -EINVAL;
+		break;
+	};
+	return 0;
 }
 
 /* Translate struct audit_rule_data to kernel's rule respresentation. */
@@ -459,17 +421,25 @@
 		f->gid = INVALID_GID;
 		f->lsm_str = NULL;
 		f->lsm_rule = NULL;
-		switch(f->type) {
+
+		/* Support legacy tests for a valid loginuid */
+		if ((f->type == AUDIT_LOGINUID) && (f->val == 4294967295)) {
+			f->type = AUDIT_LOGINUID_SET;
+			f->val = 0;
+		}
+
+		err = audit_field_valid(entry, f);
+		if (err)
+			goto exit_free;
+
+		err = -EINVAL;
+		switch (f->type) {
+		case AUDIT_LOGINUID:
 		case AUDIT_UID:
 		case AUDIT_EUID:
 		case AUDIT_SUID:
 		case AUDIT_FSUID:
-		case AUDIT_LOGINUID:
 		case AUDIT_OBJ_UID:
-			/* bit ops not implemented for uid comparisons */
-			if (f->op == Audit_bitmask || f->op == Audit_bittest)
-				goto exit_free;
-
 			f->uid = make_kuid(current_user_ns(), f->val);
 			if (!uid_valid(f->uid))
 				goto exit_free;
@@ -479,27 +449,10 @@
 		case AUDIT_SGID:
 		case AUDIT_FSGID:
 		case AUDIT_OBJ_GID:
-			/* bit ops not implemented for gid comparisons */
-			if (f->op == Audit_bitmask || f->op == Audit_bittest)
-				goto exit_free;
-
 			f->gid = make_kgid(current_user_ns(), f->val);
 			if (!gid_valid(f->gid))
 				goto exit_free;
 			break;
-		case AUDIT_PID:
-		case AUDIT_PERS:
-		case AUDIT_MSGTYPE:
-		case AUDIT_PPID:
-		case AUDIT_DEVMAJOR:
-		case AUDIT_DEVMINOR:
-		case AUDIT_EXIT:
-		case AUDIT_SUCCESS:
-		case AUDIT_ARG0:
-		case AUDIT_ARG1:
-		case AUDIT_ARG2:
-		case AUDIT_ARG3:
-			break;
 		case AUDIT_ARCH:
 			entry->rule.arch_f = f;
 			break;
@@ -570,20 +523,6 @@
 			entry->rule.buflen += f->val;
 			entry->rule.filterkey = str;
 			break;
-		case AUDIT_PERM:
-			if (f->val & ~15)
-				goto exit_free;
-			break;
-		case AUDIT_FILETYPE:
-			if (f->val & ~S_IFMT)
-				goto exit_free;
-			break;
-		case AUDIT_FIELD_COMPARE:
-			if (f->val > AUDIT_MAX_FIELD_COMPARE)
-				goto exit_free;
-			break;
-		default:
-			goto exit_free;
 		}
 	}
 
@@ -613,36 +552,6 @@
 	return len;
 }
 
-/* Translate kernel rule respresentation to struct audit_rule.
- * Exists for backward compatibility with userspace. */
-static struct audit_rule *audit_krule_to_rule(struct audit_krule *krule)
-{
-	struct audit_rule *rule;
-	int i;
-
-	rule = kzalloc(sizeof(*rule), GFP_KERNEL);
-	if (unlikely(!rule))
-		return NULL;
-
-	rule->flags = krule->flags | krule->listnr;
-	rule->action = krule->action;
-	rule->field_count = krule->field_count;
-	for (i = 0; i < rule->field_count; i++) {
-		rule->values[i] = krule->fields[i].val;
-		rule->fields[i] = krule->fields[i].type;
-
-		if (krule->vers_ops == 1) {
-			if (krule->fields[i].op == Audit_not_equal)
-				rule->fields[i] |= AUDIT_NEGATE;
-		} else {
-			rule->fields[i] |= audit_ops[krule->fields[i].op];
-		}
-	}
-	for (i = 0; i < AUDIT_BITMASK_SIZE; i++) rule->mask[i] = krule->mask[i];
-
-	return rule;
-}
-
 /* Translate kernel rule respresentation to struct audit_rule_data. */
 static struct audit_rule_data *audit_krule_to_data(struct audit_krule *krule)
 {
@@ -1055,35 +964,6 @@
 	return ret;
 }
 
-/* List rules using struct audit_rule.  Exists for backward
- * compatibility with userspace. */
-static void audit_list(int pid, int seq, struct sk_buff_head *q)
-{
-	struct sk_buff *skb;
-	struct audit_krule *r;
-	int i;
-
-	/* This is a blocking read, so use audit_filter_mutex instead of rcu
-	 * iterator to sync with list writers. */
-	for (i=0; i<AUDIT_NR_FILTERS; i++) {
-		list_for_each_entry(r, &audit_rules_list[i], list) {
-			struct audit_rule *rule;
-
-			rule = audit_krule_to_rule(r);
-			if (unlikely(!rule))
-				break;
-			skb = audit_make_reply(pid, seq, AUDIT_LIST, 0, 1,
-					 rule, sizeof(*rule));
-			if (skb)
-				skb_queue_tail(q, skb);
-			kfree(rule);
-		}
-	}
-	skb = audit_make_reply(pid, seq, AUDIT_LIST, 1, 1, NULL, 0);
-	if (skb)
-		skb_queue_tail(q, skb);
-}
-
 /* List rules using struct audit_rule_data. */
 static void audit_list_rules(int pid, int seq, struct sk_buff_head *q)
 {
@@ -1113,11 +993,11 @@
 }
 
 /* Log rule additions and removals */
-static void audit_log_rule_change(kuid_t loginuid, u32 sessionid, u32 sid,
-				  char *action, struct audit_krule *rule,
-				  int res)
+static void audit_log_rule_change(char *action, struct audit_krule *rule, int res)
 {
 	struct audit_buffer *ab;
+	uid_t loginuid = from_kuid(&init_user_ns, audit_get_loginuid(current));
+	u32 sessionid = audit_get_sessionid(current);
 
 	if (!audit_enabled)
 		return;
@@ -1125,18 +1005,8 @@
 	ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_CONFIG_CHANGE);
 	if (!ab)
 		return;
-	audit_log_format(ab, "auid=%u ses=%u",
-			 from_kuid(&init_user_ns, loginuid), sessionid);
-	if (sid) {
-		char *ctx = NULL;
-		u32 len;
-		if (security_secid_to_secctx(sid, &ctx, &len))
-			audit_log_format(ab, " ssid=%u", sid);
-		else {
-			audit_log_format(ab, " subj=%s", ctx);
-			security_release_secctx(ctx, len);
-		}
-	}
+	audit_log_format(ab, "auid=%u ses=%u" ,loginuid, sessionid);
+	audit_log_task_context(ab);
 	audit_log_format(ab, " op=");
 	audit_log_string(ab, action);
 	audit_log_key(ab, rule->filterkey);
@@ -1155,8 +1025,7 @@
  * @sessionid: sessionid for netlink audit message
  * @sid: SE Linux Security ID of sender
  */
-int audit_receive_filter(int type, int pid, int seq, void *data,
-			 size_t datasz, kuid_t loginuid, u32 sessionid, u32 sid)
+int audit_receive_filter(int type, int pid, int seq, void *data, size_t datasz)
 {
 	struct task_struct *tsk;
 	struct audit_netlink_list *dest;
@@ -1164,7 +1033,6 @@
 	struct audit_entry *entry;
 
 	switch (type) {
-	case AUDIT_LIST:
 	case AUDIT_LIST_RULES:
 		/* We can't just spew out the rules here because we might fill
 		 * the available socket buffer space and deadlock waiting for
@@ -1179,10 +1047,7 @@
 		skb_queue_head_init(&dest->q);
 
 		mutex_lock(&audit_filter_mutex);
-		if (type == AUDIT_LIST)
-			audit_list(pid, seq, &dest->q);
-		else
-			audit_list_rules(pid, seq, &dest->q);
+		audit_list_rules(pid, seq, &dest->q);
 		mutex_unlock(&audit_filter_mutex);
 
 		tsk = kthread_run(audit_send_list, dest, "audit_send_list");
@@ -1192,35 +1057,23 @@
 			err = PTR_ERR(tsk);
 		}
 		break;
-	case AUDIT_ADD:
 	case AUDIT_ADD_RULE:
-		if (type == AUDIT_ADD)
-			entry = audit_rule_to_entry(data);
-		else
-			entry = audit_data_to_entry(data, datasz);
+		entry = audit_data_to_entry(data, datasz);
 		if (IS_ERR(entry))
 			return PTR_ERR(entry);
 
 		err = audit_add_rule(entry);
-		audit_log_rule_change(loginuid, sessionid, sid, "add rule",
-				      &entry->rule, !err);
-
+		audit_log_rule_change("add rule", &entry->rule, !err);
 		if (err)
 			audit_free_rule(entry);
 		break;
-	case AUDIT_DEL:
 	case AUDIT_DEL_RULE:
-		if (type == AUDIT_DEL)
-			entry = audit_rule_to_entry(data);
-		else
-			entry = audit_data_to_entry(data, datasz);
+		entry = audit_data_to_entry(data, datasz);
 		if (IS_ERR(entry))
 			return PTR_ERR(entry);
 
 		err = audit_del_rule(entry);
-		audit_log_rule_change(loginuid, sessionid, sid, "remove rule",
-				      &entry->rule, !err);
-
+		audit_log_rule_change("remove rule", &entry->rule, !err);
 		audit_free_rule(entry);
 		break;
 	default:
@@ -1358,7 +1211,7 @@
 	return strncmp(p, dname, dlen);
 }
 
-static int audit_filter_user_rules(struct audit_krule *rule,
+static int audit_filter_user_rules(struct audit_krule *rule, int type,
 				   enum audit_state *state)
 {
 	int i;
@@ -1382,6 +1235,13 @@
 			result = audit_uid_comparator(audit_get_loginuid(current),
 						  f->op, f->uid);
 			break;
+		case AUDIT_LOGINUID_SET:
+			result = audit_comparator(audit_loginuid_set(current),
+						  f->op, f->val);
+			break;
+		case AUDIT_MSGTYPE:
+			result = audit_comparator(type, f->op, f->val);
+			break;
 		case AUDIT_SUBJ_USER:
 		case AUDIT_SUBJ_ROLE:
 		case AUDIT_SUBJ_TYPE:
@@ -1408,7 +1268,7 @@
 	return 1;
 }
 
-int audit_filter_user(void)
+int audit_filter_user(int type)
 {
 	enum audit_state state = AUDIT_DISABLED;
 	struct audit_entry *e;
@@ -1416,7 +1276,7 @@
 
 	rcu_read_lock();
 	list_for_each_entry_rcu(e, &audit_filter_list[AUDIT_FILTER_USER], list) {
-		if (audit_filter_user_rules(&e->rule, &state)) {
+		if (audit_filter_user_rules(&e->rule, type, &state)) {
 			if (state == AUDIT_DISABLED)
 				ret = 0;
 			break;
diff --git a/kernel/auditsc.c b/kernel/auditsc.c
index c682294..3c8a601 100644
--- a/kernel/auditsc.c
+++ b/kernel/auditsc.c
@@ -76,11 +76,6 @@
 #define AUDITSC_SUCCESS 1
 #define AUDITSC_FAILURE 2
 
-/* AUDIT_NAMES is the number of slots we reserve in the audit_context
- * for saving names from getname().  If we get more names we will allocate
- * a name dynamically and also add those to the list anchored by names_list. */
-#define AUDIT_NAMES	5
-
 /* no execve audit message should be longer than this (userspace limits) */
 #define MAX_EXECVE_AUDIT_LEN 7500
 
@@ -90,44 +85,6 @@
 /* determines whether we collect data for signals sent */
 int audit_signals;
 
-struct audit_cap_data {
-	kernel_cap_t		permitted;
-	kernel_cap_t		inheritable;
-	union {
-		unsigned int	fE;		/* effective bit of a file capability */
-		kernel_cap_t	effective;	/* effective set of a process */
-	};
-};
-
-/* When fs/namei.c:getname() is called, we store the pointer in name and
- * we don't let putname() free it (instead we free all of the saved
- * pointers at syscall exit time).
- *
- * Further, in fs/namei.c:path_lookup() we store the inode and device.
- */
-struct audit_names {
-	struct list_head	list;		/* audit_context->names_list */
-	struct filename	*name;
-	unsigned long		ino;
-	dev_t			dev;
-	umode_t			mode;
-	kuid_t			uid;
-	kgid_t			gid;
-	dev_t			rdev;
-	u32			osid;
-	struct audit_cap_data	 fcap;
-	unsigned int		fcap_ver;
-	int			name_len;	/* number of name's characters to log */
-	unsigned char		type;		/* record type */
-	bool			name_put;	/* call __putname() for this name */
-	/*
-	 * This was an allocated audit_names and not from the array of
-	 * names allocated in the task audit context.  Thus this name
-	 * should be freed on syscall exit
-	 */
-	bool			should_free;
-};
-
 struct audit_aux_data {
 	struct audit_aux_data	*next;
 	int			type;
@@ -175,106 +132,6 @@
 	struct audit_chunk *c[31];
 };
 
-/* The per-task audit context. */
-struct audit_context {
-	int		    dummy;	/* must be the first element */
-	int		    in_syscall;	/* 1 if task is in a syscall */
-	enum audit_state    state, current_state;
-	unsigned int	    serial;     /* serial number for record */
-	int		    major;      /* syscall number */
-	struct timespec	    ctime;      /* time of syscall entry */
-	unsigned long	    argv[4];    /* syscall arguments */
-	long		    return_code;/* syscall return code */
-	u64		    prio;
-	int		    return_valid; /* return code is valid */
-	/*
-	 * The names_list is the list of all audit_names collected during this
-	 * syscall.  The first AUDIT_NAMES entries in the names_list will
-	 * actually be from the preallocated_names array for performance
-	 * reasons.  Except during allocation they should never be referenced
-	 * through the preallocated_names array and should only be found/used
-	 * by running the names_list.
-	 */
-	struct audit_names  preallocated_names[AUDIT_NAMES];
-	int		    name_count; /* total records in names_list */
-	struct list_head    names_list;	/* anchor for struct audit_names->list */
-	char *		    filterkey;	/* key for rule that triggered record */
-	struct path	    pwd;
-	struct audit_aux_data *aux;
-	struct audit_aux_data *aux_pids;
-	struct sockaddr_storage *sockaddr;
-	size_t sockaddr_len;
-				/* Save things to print about task_struct */
-	pid_t		    pid, ppid;
-	kuid_t		    uid, euid, suid, fsuid;
-	kgid_t		    gid, egid, sgid, fsgid;
-	unsigned long	    personality;
-	int		    arch;
-
-	pid_t		    target_pid;
-	kuid_t		    target_auid;
-	kuid_t		    target_uid;
-	unsigned int	    target_sessionid;
-	u32		    target_sid;
-	char		    target_comm[TASK_COMM_LEN];
-
-	struct audit_tree_refs *trees, *first_trees;
-	struct list_head killed_trees;
-	int tree_count;
-
-	int type;
-	union {
-		struct {
-			int nargs;
-			long args[6];
-		} socketcall;
-		struct {
-			kuid_t			uid;
-			kgid_t			gid;
-			umode_t			mode;
-			u32			osid;
-			int			has_perm;
-			uid_t			perm_uid;
-			gid_t			perm_gid;
-			umode_t			perm_mode;
-			unsigned long		qbytes;
-		} ipc;
-		struct {
-			mqd_t			mqdes;
-			struct mq_attr 		mqstat;
-		} mq_getsetattr;
-		struct {
-			mqd_t			mqdes;
-			int			sigev_signo;
-		} mq_notify;
-		struct {
-			mqd_t			mqdes;
-			size_t			msg_len;
-			unsigned int		msg_prio;
-			struct timespec		abs_timeout;
-		} mq_sendrecv;
-		struct {
-			int			oflag;
-			umode_t			mode;
-			struct mq_attr		attr;
-		} mq_open;
-		struct {
-			pid_t			pid;
-			struct audit_cap_data	cap;
-		} capset;
-		struct {
-			int			fd;
-			int			flags;
-		} mmap;
-	};
-	int fds[2];
-
-#if AUDIT_DEBUG
-	int		    put_count;
-	int		    ino_count;
-#endif
-};
-
 static inline int open_arg(int flags, int mask)
 {
 	int n = ACC_MODE(flags);
@@ -633,9 +490,23 @@
 			break;
 		case AUDIT_GID:
 			result = audit_gid_comparator(cred->gid, f->op, f->gid);
+			if (f->op == Audit_equal) {
+				if (!result)
+					result = in_group_p(f->gid);
+			} else if (f->op == Audit_not_equal) {
+				if (result)
+					result = !in_group_p(f->gid);
+			}
 			break;
 		case AUDIT_EGID:
 			result = audit_gid_comparator(cred->egid, f->op, f->gid);
+			if (f->op == Audit_equal) {
+				if (!result)
+					result = in_egroup_p(f->gid);
+			} else if (f->op == Audit_not_equal) {
+				if (result)
+					result = !in_egroup_p(f->gid);
+			}
 			break;
 		case AUDIT_SGID:
 			result = audit_gid_comparator(cred->sgid, f->op, f->gid);
@@ -742,6 +613,9 @@
 			if (ctx)
 				result = audit_uid_comparator(tsk->loginuid, f->op, f->uid);
 			break;
+		case AUDIT_LOGINUID_SET:
+			result = audit_comparator(audit_loginuid_set(tsk), f->op, f->val);
+			break;
 		case AUDIT_SUBJ_USER:
 		case AUDIT_SUBJ_ROLE:
 		case AUDIT_SUBJ_TYPE:
@@ -987,6 +861,8 @@
 
 #if AUDIT_DEBUG == 2
 	if (context->put_count + context->ino_count != context->name_count) {
+		int i = 0;
+
 		printk(KERN_ERR "%s:%d(:%d): major=%d in_syscall=%d"
 		       " name_count=%d put_count=%d"
 		       " ino_count=%d [NOT freeing]\n",
@@ -995,7 +871,7 @@
 		       context->name_count, context->put_count,
 		       context->ino_count);
 		list_for_each_entry(n, &context->names_list, list) {
-			printk(KERN_ERR "names[%d] = %p = %s\n", i,
+			printk(KERN_ERR "names[%d] = %p = %s\n", i++,
 			       n->name, n->name->name ?: "(null)");
 		}
 		dump_stack();
@@ -1010,7 +886,7 @@
 	list_for_each_entry_safe(n, next, &context->names_list, list) {
 		list_del(&n->list);
 		if (n->name && n->name_put)
-			__putname(n->name);
+			final_putname(n->name);
 		if (n->should_free)
 			kfree(n);
 	}
@@ -1093,88 +969,6 @@
 	kfree(context);
 }
 
-void audit_log_task_context(struct audit_buffer *ab)
-{
-	char *ctx = NULL;
-	unsigned len;
-	int error;
-	u32 sid;
-
-	security_task_getsecid(current, &sid);
-	if (!sid)
-		return;
-
-	error = security_secid_to_secctx(sid, &ctx, &len);
-	if (error) {
-		if (error != -EINVAL)
-			goto error_path;
-		return;
-	}
-
-	audit_log_format(ab, " subj=%s", ctx);
-	security_release_secctx(ctx, len);
-	return;
-
-error_path:
-	audit_panic("error in audit_log_task_context");
-	return;
-}
-
-EXPORT_SYMBOL(audit_log_task_context);
-
-void audit_log_task_info(struct audit_buffer *ab, struct task_struct *tsk)
-{
-	const struct cred *cred;
-	char name[sizeof(tsk->comm)];
-	struct mm_struct *mm = tsk->mm;
-	char *tty;
-
-	if (!ab)
-		return;
-
-	/* tsk == current */
-	cred = current_cred();
-
-	spin_lock_irq(&tsk->sighand->siglock);
-	if (tsk->signal && tsk->signal->tty)
-		tty = tsk->signal->tty->name;
-	else
-		tty = "(none)";
-	spin_unlock_irq(&tsk->sighand->siglock);
-
-
-	audit_log_format(ab,
-			 " ppid=%ld pid=%d auid=%u uid=%u gid=%u"
-			 " euid=%u suid=%u fsuid=%u"
-			 " egid=%u sgid=%u fsgid=%u ses=%u tty=%s",
-			 sys_getppid(),
-			 tsk->pid,
-			 from_kuid(&init_user_ns, tsk->loginuid),
-			 from_kuid(&init_user_ns, cred->uid),
-			 from_kgid(&init_user_ns, cred->gid),
-			 from_kuid(&init_user_ns, cred->euid),
-			 from_kuid(&init_user_ns, cred->suid),
-			 from_kuid(&init_user_ns, cred->fsuid),
-			 from_kgid(&init_user_ns, cred->egid),
-			 from_kgid(&init_user_ns, cred->sgid),
-			 from_kgid(&init_user_ns, cred->fsgid),
-			 tsk->sessionid, tty);
-
-	get_task_comm(name, tsk);
-	audit_log_format(ab, " comm=");
-	audit_log_untrustedstring(ab, name);
-
-	if (mm) {
-		down_read(&mm->mmap_sem);
-		if (mm->exe_file)
-			audit_log_d_path(ab, " exe=", &mm->exe_file->f_path);
-		up_read(&mm->mmap_sem);
-	}
-	audit_log_task_context(ab);
-}
-
-EXPORT_SYMBOL(audit_log_task_info);
-
 static int audit_log_pid_context(struct audit_context *context, pid_t pid,
 				 kuid_t auid, kuid_t uid, unsigned int sessionid,
 				 u32 sid, char *comm)
@@ -1191,12 +985,14 @@
 	audit_log_format(ab, "opid=%d oauid=%d ouid=%d oses=%d", pid,
 			 from_kuid(&init_user_ns, auid),
 			 from_kuid(&init_user_ns, uid), sessionid);
-	if (security_secid_to_secctx(sid, &ctx, &len)) {
-		audit_log_format(ab, " obj=(none)");
-		rc = 1;
-	} else {
-		audit_log_format(ab, " obj=%s", ctx);
-		security_release_secctx(ctx, len);
+	if (sid) {
+		if (security_secid_to_secctx(sid, &ctx, &len)) {
+			audit_log_format(ab, " obj=(none)");
+			rc = 1;
+		} else {
+			audit_log_format(ab, " obj=%s", ctx);
+			security_release_secctx(ctx, len);
+		}
 	}
 	audit_log_format(ab, " ocomm=");
 	audit_log_untrustedstring(ab, comm);
@@ -1390,35 +1186,6 @@
 	kfree(buf);
 }
 
-static void audit_log_cap(struct audit_buffer *ab, char *prefix, kernel_cap_t *cap)
-{
-	int i;
-
-	audit_log_format(ab, " %s=", prefix);
-	CAP_FOR_EACH_U32(i) {
-		audit_log_format(ab, "%08x", cap->cap[(_KERNEL_CAPABILITY_U32S-1) - i]);
-	}
-}
-
-static void audit_log_fcaps(struct audit_buffer *ab, struct audit_names *name)
-{
-	kernel_cap_t *perm = &name->fcap.permitted;
-	kernel_cap_t *inh = &name->fcap.inheritable;
-	int log = 0;
-
-	if (!cap_isclear(*perm)) {
-		audit_log_cap(ab, "cap_fp", perm);
-		log = 1;
-	}
-	if (!cap_isclear(*inh)) {
-		audit_log_cap(ab, "cap_fi", inh);
-		log = 1;
-	}
-
-	if (log)
-		audit_log_format(ab, " cap_fe=%d cap_fver=%x", name->fcap.fE, name->fcap_ver);
-}
-
 static void show_special(struct audit_context *context, int *call_panic)
 {
 	struct audit_buffer *ab;
@@ -1516,68 +1283,6 @@
 	audit_log_end(ab);
 }
 
-static void audit_log_name(struct audit_context *context, struct audit_names *n,
-			   int record_num, int *call_panic)
-{
-	struct audit_buffer *ab;
-	ab = audit_log_start(context, GFP_KERNEL, AUDIT_PATH);
-	if (!ab)
-		return; /* audit_panic has been called */
-
-	audit_log_format(ab, "item=%d", record_num);
-
-	if (n->name) {
-		switch (n->name_len) {
-		case AUDIT_NAME_FULL:
-			/* log the full path */
-			audit_log_format(ab, " name=");
-			audit_log_untrustedstring(ab, n->name->name);
-			break;
-		case 0:
-			/* name was specified as a relative path and the
-			 * directory component is the cwd */
-			audit_log_d_path(ab, " name=", &context->pwd);
-			break;
-		default:
-			/* log the name's directory component */
-			audit_log_format(ab, " name=");
-			audit_log_n_untrustedstring(ab, n->name->name,
-						    n->name_len);
-		}
-	} else
-		audit_log_format(ab, " name=(null)");
-
-	if (n->ino != (unsigned long)-1) {
-		audit_log_format(ab, " inode=%lu"
-				 " dev=%02x:%02x mode=%#ho"
-				 " ouid=%u ogid=%u rdev=%02x:%02x",
-				 n->ino,
-				 MAJOR(n->dev),
-				 MINOR(n->dev),
-				 n->mode,
-				 from_kuid(&init_user_ns, n->uid),
-				 from_kgid(&init_user_ns, n->gid),
-				 MAJOR(n->rdev),
-				 MINOR(n->rdev));
-	}
-	if (n->osid != 0) {
-		char *ctx = NULL;
-		u32 len;
-		if (security_secid_to_secctx(
-			n->osid, &ctx, &len)) {
-			audit_log_format(ab, " osid=%u", n->osid);
-			*call_panic = 2;
-		} else {
-			audit_log_format(ab, " obj=%s", ctx);
-			security_release_secctx(ctx, len);
-		}
-	}
-
-	audit_log_fcaps(ab, n);
-
-	audit_log_end(ab);
-}
-
 static void audit_log_exit(struct audit_context *context, struct task_struct *tsk)
 {
 	int i, call_panic = 0;
@@ -1695,7 +1400,7 @@
 
 	i = 0;
 	list_for_each_entry(n, &context->names_list, list)
-		audit_log_name(context, n, i++, &call_panic);
+		audit_log_name(context, n, NULL, i++, &call_panic);
 
 	/* Send end of event record to help user space know we are finished */
 	ab = audit_log_start(context, GFP_KERNEL, AUDIT_EOE);
@@ -2030,18 +1735,18 @@
 	BUG_ON(!context);
 	if (!context->in_syscall) {
 #if AUDIT_DEBUG == 2
-		printk(KERN_ERR "%s:%d(:%d): __putname(%p)\n",
+		printk(KERN_ERR "%s:%d(:%d): final_putname(%p)\n",
 		       __FILE__, __LINE__, context->serial, name);
 		if (context->name_count) {
 			struct audit_names *n;
-			int i;
+			int i = 0;
 
 			list_for_each_entry(n, &context->names_list, list)
-				printk(KERN_ERR "name[%d] = %p = %s\n", i,
+				printk(KERN_ERR "name[%d] = %p = %s\n", i++,
 				       n->name, n->name->name ?: "(null)");
 			}
 #endif
-		__putname(name);
+		final_putname(name);
 	}
 #if AUDIT_DEBUG
 	else {
@@ -2060,41 +1765,6 @@
 #endif
 }
 
-static inline int audit_copy_fcaps(struct audit_names *name, const struct dentry *dentry)
-{
-	struct cpu_vfs_cap_data caps;
-	int rc;
-
-	if (!dentry)
-		return 0;
-
-	rc = get_vfs_caps_from_disk(dentry, &caps);
-	if (rc)
-		return rc;
-
-	name->fcap.permitted = caps.permitted;
-	name->fcap.inheritable = caps.inheritable;
-	name->fcap.fE = !!(caps.magic_etc & VFS_CAP_FLAGS_EFFECTIVE);
-	name->fcap_ver = (caps.magic_etc & VFS_CAP_REVISION_MASK) >> VFS_CAP_REVISION_SHIFT;
-
-	return 0;
-}
-
-
-/* Copy inode data into an audit_names. */
-static void audit_copy_inode(struct audit_names *name, const struct dentry *dentry,
-			     const struct inode *inode)
-{
-	name->ino   = inode->i_ino;
-	name->dev   = inode->i_sb->s_dev;
-	name->mode  = inode->i_mode;
-	name->uid   = inode->i_uid;
-	name->gid   = inode->i_gid;
-	name->rdev  = inode->i_rdev;
-	security_inode_getsecid(inode, &name->osid);
-	audit_copy_fcaps(name, dentry);
-}
-
 /**
  * __audit_inode - store the inode and device from a lookup
  * @name: name being audited
@@ -2303,7 +1973,7 @@
 	unsigned int sessionid;
 
 #ifdef CONFIG_AUDIT_LOGINUID_IMMUTABLE
-	if (uid_valid(task->loginuid))
+	if (audit_loginuid_set(task))
 		return -EPERM;
 #else /* CONFIG_AUDIT_LOGINUID_IMMUTABLE */
 	if (!capable(CAP_AUDIT_CONTROL))
@@ -2471,17 +2141,20 @@
 
 /**
  * audit_socketcall - record audit data for sys_socketcall
- * @nargs: number of args
+ * @nargs: number of args, which should not be more than AUDITSC_ARGS.
  * @args: args array
  *
  */
-void __audit_socketcall(int nargs, unsigned long *args)
+int __audit_socketcall(int nargs, unsigned long *args)
 {
 	struct audit_context *context = current->audit_context;
 
+	if (nargs <= 0 || nargs > AUDITSC_ARGS || !args)
+		return -EINVAL;
 	context->type = AUDIT_SOCKETCALL;
 	context->socketcall.nargs = nargs;
 	memcpy(context->socketcall.args, args, nargs * sizeof(unsigned long));
+	return 0;
 }
 
 /**
diff --git a/kernel/fork.c b/kernel/fork.c
index 7d40687..987b28a 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -70,6 +70,7 @@
 #include <linux/khugepaged.h>
 #include <linux/signalfd.h>
 #include <linux/uprobes.h>
+#include <linux/aio.h>
 
 #include <asm/pgtable.h>
 #include <asm/pgalloc.h>
@@ -1303,6 +1304,10 @@
 	p->memcg_batch.do_batch = 0;
 	p->memcg_batch.memcg = NULL;
 #endif
+#ifdef CONFIG_BCACHE
+	p->sequential_io	= 0;
+	p->sequential_io_avg	= 0;
+#endif
 
 	/* Perform scheduler related setup. Assign this task to a CPU. */
 	sched_fork(p);
diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index 6a3bccb..1f3186b 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -2998,6 +2998,7 @@
 EXPORT_SYMBOL_GPL(lockdep_init_map);
 
 struct lock_class_key __lockdep_no_validate__;
+EXPORT_SYMBOL_GPL(__lockdep_no_validate__);
 
 static int
 print_lock_nested_lock_not_held(struct task_struct *curr,
diff --git a/kernel/params.c b/kernel/params.c
index ed35345..53b958f 100644
--- a/kernel/params.c
+++ b/kernel/params.c
@@ -613,10 +613,13 @@
 		       sizeof(*mk->mp) + sizeof(mk->mp->attrs[0]) * (num+1),
 		       GFP_KERNEL);
 	if (!new) {
-		kfree(mk->mp);
+		kfree(attrs);
 		err = -ENOMEM;
 		goto fail;
 	}
+	/* Despite looking like the typical realloc() bug, this is safe.
+	 * We *want* the old 'attrs' to be freed either way, and we'll store
+	 * the new one in the success case. */
 	attrs = krealloc(attrs, sizeof(new->grp.attrs[0])*(num+2), GFP_KERNEL);
 	if (!attrs) {
 		err = -ENOMEM;
diff --git a/kernel/printk.c b/kernel/printk.c
index 96dcfcd..fa36e14 100644
--- a/kernel/printk.c
+++ b/kernel/printk.c
@@ -32,6 +32,7 @@
 #include <linux/security.h>
 #include <linux/bootmem.h>
 #include <linux/memblock.h>
+#include <linux/aio.h>
 #include <linux/syscalls.h>
 #include <linux/kexec.h>
 #include <linux/kdb.h>
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index 17ae54d..aed981a 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -17,6 +17,7 @@
 #include <linux/ptrace.h>
 #include <linux/security.h>
 #include <linux/signal.h>
+#include <linux/uio.h>
 #include <linux/audit.h>
 #include <linux/pid_namespace.h>
 #include <linux/syscalls.h>
diff --git a/kernel/relay.c b/kernel/relay.c
index eef0d11..b91488b 100644
--- a/kernel/relay.c
+++ b/kernel/relay.c
@@ -234,7 +234,6 @@
 static void relay_remove_buf(struct kref *kref)
 {
 	struct rchan_buf *buf = container_of(kref, struct rchan_buf, kref);
-	buf->chan->cb->remove_buf_file(buf->dentry);
 	relay_destroy_buf(buf);
 }
 
@@ -484,6 +483,7 @@
 {
 	buf->finalized = 1;
 	del_timer_sync(&buf->timer);
+	buf->chan->cb->remove_buf_file(buf->dentry);
 	kref_put(&buf->kref, relay_remove_buf);
 }
 
diff --git a/kernel/rwsem.c b/kernel/rwsem.c
index b3c6c3f..cfff143 100644
--- a/kernel/rwsem.c
+++ b/kernel/rwsem.c
@@ -126,6 +126,15 @@
 
 EXPORT_SYMBOL(_down_write_nest_lock);
 
+void down_read_non_owner(struct rw_semaphore *sem)
+{
+	might_sleep();
+
+	__down_read(sem);
+}
+
+EXPORT_SYMBOL(down_read_non_owner);
+
 void down_write_nested(struct rw_semaphore *sem, int subclass)
 {
 	might_sleep();
@@ -136,6 +145,13 @@
 
 EXPORT_SYMBOL(down_write_nested);
 
+void up_read_non_owner(struct rw_semaphore *sem)
+{
+	__up_read(sem);
+}
+
+EXPORT_SYMBOL(up_read_non_owner);
+
 #endif
 
 
diff --git a/kernel/sys_ni.c b/kernel/sys_ni.c
index bfd6787..7078052 100644
--- a/kernel/sys_ni.c
+++ b/kernel/sys_ni.c
@@ -200,6 +200,7 @@
 /* fanotify! */
 cond_syscall(sys_fanotify_init);
 cond_syscall(sys_fanotify_mark);
+cond_syscall(compat_sys_fanotify_mark);
 
 /* open by handle */
 cond_syscall(sys_name_to_handle_at);
diff --git a/kernel/sysctl_binary.c b/kernel/sysctl_binary.c
index ebf7235..aea4a9e 100644
--- a/kernel/sysctl_binary.c
+++ b/kernel/sysctl_binary.c
@@ -15,6 +15,7 @@
 #include <linux/netdevice.h>
 #include <linux/kernel.h>
 #include <linux/slab.h>
+#include <linux/compat.h>
 
 #ifdef CONFIG_SYSCTL_SYSCALL
 
@@ -1447,7 +1448,6 @@
 
 
 #ifdef CONFIG_COMPAT
-#include <asm/compat.h>
 
 struct compat_sysctl_args {
 	compat_uptr_t	name;
@@ -1459,7 +1459,7 @@
 	compat_ulong_t	__unused[4];
 };
 
-asmlinkage long compat_sys_sysctl(struct compat_sysctl_args __user *args)
+COMPAT_SYSCALL_DEFINE1(sysctl, struct compat_sysctl_args __user *, args)
 {
 	struct compat_sysctl_args tmp;
 	compat_size_t __user *compat_oldlenp;
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index 5e9efd4..015f85a 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -71,6 +71,7 @@
 config RING_BUFFER
 	bool
 	select TRACE_CLOCK
+	select IRQ_WORK
 
 config FTRACE_NMI_ENTER
        bool
@@ -107,7 +108,6 @@
 	select BINARY_PRINTF
 	select EVENT_TRACING
 	select TRACE_CLOCK
-	select IRQ_WORK
 
 config GENERIC_TRACER
 	bool
diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
index ed58a32..b8b8560 100644
--- a/kernel/trace/blktrace.c
+++ b/kernel/trace/blktrace.c
@@ -1808,6 +1808,7 @@
 
 	rwbs[i] = '\0';
 }
+EXPORT_SYMBOL_GPL(blk_fill_rwbs);
 
 #endif /* CONFIG_EVENT_TRACING */
 
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 8a5c017..b549b0f 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -64,6 +64,13 @@
 
 #define FL_GLOBAL_CONTROL_MASK (FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_CONTROL)
 
+#ifdef CONFIG_DYNAMIC_FTRACE
+#define INIT_REGEX_LOCK(opsname)	\
+	.regex_lock	= __MUTEX_INITIALIZER(opsname.regex_lock),
+#else
+#define INIT_REGEX_LOCK(opsname)
+#endif
+
 static struct ftrace_ops ftrace_list_end __read_mostly = {
 	.func		= ftrace_stub,
 	.flags		= FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_STUB,
@@ -131,6 +138,16 @@
 	while (likely(op = rcu_dereference_raw((op)->next)) &&	\
 	       unlikely((op) != &ftrace_list_end))
 
+static inline void ftrace_ops_init(struct ftrace_ops *ops)
+{
+#ifdef CONFIG_DYNAMIC_FTRACE
+	if (!(ops->flags & FTRACE_OPS_FL_INITIALIZED)) {
+		mutex_init(&ops->regex_lock);
+		ops->flags |= FTRACE_OPS_FL_INITIALIZED;
+	}
+#endif
+}
+
 /**
  * ftrace_nr_registered_ops - return number of ops registered
  *
@@ -907,7 +924,8 @@
 #else
 static struct ftrace_ops ftrace_profile_ops __read_mostly = {
 	.func		= function_profile_call,
-	.flags		= FTRACE_OPS_FL_RECURSION_SAFE,
+	.flags		= FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED,
+	INIT_REGEX_LOCK(ftrace_profile_ops)
 };
 
 static int register_ftrace_profiler(void)
@@ -1103,11 +1121,10 @@
 	.func			= ftrace_stub,
 	.notrace_hash		= EMPTY_HASH,
 	.filter_hash		= EMPTY_HASH,
-	.flags			= FTRACE_OPS_FL_RECURSION_SAFE,
+	.flags			= FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED,
+	INIT_REGEX_LOCK(global_ops)
 };
 
-static DEFINE_MUTEX(ftrace_regex_lock);
-
 struct ftrace_page {
 	struct ftrace_page	*next;
 	struct dyn_ftrace	*records;
@@ -1247,6 +1264,7 @@
 
 void ftrace_free_filter(struct ftrace_ops *ops)
 {
+	ftrace_ops_init(ops);
 	free_ftrace_hash(ops->filter_hash);
 	free_ftrace_hash(ops->notrace_hash);
 }
@@ -2441,7 +2459,7 @@
 		     !ftrace_lookup_ip(ops->notrace_hash, rec->ip)) ||
 
 		    ((iter->flags & FTRACE_ITER_ENABLED) &&
-		     !(rec->flags & ~FTRACE_FL_MASK))) {
+		     !(rec->flags & FTRACE_FL_ENABLED))) {
 
 			rec = NULL;
 			goto retry;
@@ -2624,6 +2642,8 @@
 	struct ftrace_hash *hash;
 	int ret = 0;
 
+	ftrace_ops_init(ops);
+
 	if (unlikely(ftrace_disabled))
 		return -ENODEV;
 
@@ -2636,28 +2656,26 @@
 		return -ENOMEM;
 	}
 
+	iter->ops = ops;
+	iter->flags = flag;
+
+	mutex_lock(&ops->regex_lock);
+
 	if (flag & FTRACE_ITER_NOTRACE)
 		hash = ops->notrace_hash;
 	else
 		hash = ops->filter_hash;
 
-	iter->ops = ops;
-	iter->flags = flag;
-
 	if (file->f_mode & FMODE_WRITE) {
-		mutex_lock(&ftrace_lock);
 		iter->hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, hash);
-		mutex_unlock(&ftrace_lock);
-
 		if (!iter->hash) {
 			trace_parser_put(&iter->parser);
 			kfree(iter);
-			return -ENOMEM;
+			ret = -ENOMEM;
+			goto out_unlock;
 		}
 	}
 
-	mutex_lock(&ftrace_regex_lock);
-
 	if ((file->f_mode & FMODE_WRITE) &&
 	    (file->f_flags & O_TRUNC))
 		ftrace_filter_reset(iter->hash);
@@ -2677,7 +2695,9 @@
 		}
 	} else
 		file->private_data = iter;
-	mutex_unlock(&ftrace_regex_lock);
+
+ out_unlock:
+	mutex_unlock(&ops->regex_lock);
 
 	return ret;
 }
@@ -2910,6 +2930,8 @@
 static struct ftrace_ops trace_probe_ops __read_mostly =
 {
 	.func		= function_trace_probe_call,
+	.flags		= FTRACE_OPS_FL_INITIALIZED,
+	INIT_REGEX_LOCK(trace_probe_ops)
 };
 
 static int ftrace_probe_registered;
@@ -2919,8 +2941,12 @@
 	int ret;
 	int i;
 
-	if (ftrace_probe_registered)
+	if (ftrace_probe_registered) {
+		/* still need to update the function call sites */
+		if (ftrace_enabled)
+			ftrace_run_update_code(FTRACE_UPDATE_CALLS);
 		return;
+	}
 
 	for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
 		struct hlist_head *hhd = &ftrace_func_hash[i];
@@ -2990,19 +3016,21 @@
 	if (WARN_ON(not))
 		return -EINVAL;
 
-	mutex_lock(&ftrace_lock);
+	mutex_lock(&trace_probe_ops.regex_lock);
 
 	hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash);
 	if (!hash) {
 		count = -ENOMEM;
-		goto out_unlock;
+		goto out;
 	}
 
 	if (unlikely(ftrace_disabled)) {
 		count = -ENODEV;
-		goto out_unlock;
+		goto out;
 	}
 
+	mutex_lock(&ftrace_lock);
+
 	do_for_each_ftrace_rec(pg, rec) {
 
 		if (!ftrace_match_record(rec, NULL, search, len, type))
@@ -3056,6 +3084,8 @@
 
  out_unlock:
 	mutex_unlock(&ftrace_lock);
+ out:
+	mutex_unlock(&trace_probe_ops.regex_lock);
 	free_ftrace_hash(hash);
 
 	return count;
@@ -3095,7 +3125,7 @@
 			return;
 	}
 
-	mutex_lock(&ftrace_lock);
+	mutex_lock(&trace_probe_ops.regex_lock);
 
 	hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash);
 	if (!hash)
@@ -3133,6 +3163,7 @@
 			list_add(&entry->free_list, &free_list);
 		}
 	}
+	mutex_lock(&ftrace_lock);
 	__disable_ftrace_function_probe();
 	/*
 	 * Remove after the disable is called. Otherwise, if the last
@@ -3144,9 +3175,10 @@
 		list_del(&entry->free_list);
 		ftrace_free_entry(entry);
 	}
+	mutex_unlock(&ftrace_lock);
 		
  out_unlock:
-	mutex_unlock(&ftrace_lock);
+	mutex_unlock(&trace_probe_ops.regex_lock);
 	free_ftrace_hash(hash);
 }
 
@@ -3256,18 +3288,17 @@
 	if (!cnt)
 		return 0;
 
-	mutex_lock(&ftrace_regex_lock);
-
-	ret = -ENODEV;
-	if (unlikely(ftrace_disabled))
-		goto out_unlock;
-
 	if (file->f_mode & FMODE_READ) {
 		struct seq_file *m = file->private_data;
 		iter = m->private;
 	} else
 		iter = file->private_data;
 
+	if (unlikely(ftrace_disabled))
+		return -ENODEV;
+
+	/* iter->hash is a local copy, so we don't need regex_lock */
+
 	parser = &iter->parser;
 	read = trace_get_user(parser, ubuf, cnt, ppos);
 
@@ -3276,14 +3307,12 @@
 		ret = ftrace_process_regex(iter->hash, parser->buffer,
 					   parser->idx, enable);
 		trace_parser_clear(parser);
-		if (ret)
-			goto out_unlock;
+		if (ret < 0)
+			goto out;
 	}
 
 	ret = read;
-out_unlock:
-	mutex_unlock(&ftrace_regex_lock);
-
+ out:
 	return ret;
 }
 
@@ -3335,16 +3364,19 @@
 	if (unlikely(ftrace_disabled))
 		return -ENODEV;
 
+	mutex_lock(&ops->regex_lock);
+
 	if (enable)
 		orig_hash = &ops->filter_hash;
 	else
 		orig_hash = &ops->notrace_hash;
 
 	hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash);
-	if (!hash)
-		return -ENOMEM;
+	if (!hash) {
+		ret = -ENOMEM;
+		goto out_regex_unlock;
+	}
 
-	mutex_lock(&ftrace_regex_lock);
 	if (reset)
 		ftrace_filter_reset(hash);
 	if (buf && !ftrace_match_records(hash, buf, len)) {
@@ -3366,7 +3398,7 @@
 	mutex_unlock(&ftrace_lock);
 
  out_regex_unlock:
-	mutex_unlock(&ftrace_regex_lock);
+	mutex_unlock(&ops->regex_lock);
 
 	free_ftrace_hash(hash);
 	return ret;
@@ -3392,6 +3424,7 @@
 int ftrace_set_filter_ip(struct ftrace_ops *ops, unsigned long ip,
 			 int remove, int reset)
 {
+	ftrace_ops_init(ops);
 	return ftrace_set_addr(ops, ip, remove, reset, 1);
 }
 EXPORT_SYMBOL_GPL(ftrace_set_filter_ip);
@@ -3416,6 +3449,7 @@
 int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
 		       int len, int reset)
 {
+	ftrace_ops_init(ops);
 	return ftrace_set_regex(ops, buf, len, reset, 1);
 }
 EXPORT_SYMBOL_GPL(ftrace_set_filter);
@@ -3434,6 +3468,7 @@
 int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
 			int len, int reset)
 {
+	ftrace_ops_init(ops);
 	return ftrace_set_regex(ops, buf, len, reset, 0);
 }
 EXPORT_SYMBOL_GPL(ftrace_set_notrace);
@@ -3524,6 +3559,8 @@
 {
 	char *func;
 
+	ftrace_ops_init(ops);
+
 	while (buf) {
 		func = strsep(&buf, ",");
 		ftrace_set_regex(ops, func, strlen(func), 0, enable);
@@ -3551,10 +3588,8 @@
 	int filter_hash;
 	int ret;
 
-	mutex_lock(&ftrace_regex_lock);
 	if (file->f_mode & FMODE_READ) {
 		iter = m->private;
-
 		seq_release(inode, file);
 	} else
 		iter = file->private_data;
@@ -3567,6 +3602,8 @@
 
 	trace_parser_put(parser);
 
+	mutex_lock(&iter->ops->regex_lock);
+
 	if (file->f_mode & FMODE_WRITE) {
 		filter_hash = !!(iter->flags & FTRACE_ITER_FILTER);
 
@@ -3584,10 +3621,11 @@
 
 		mutex_unlock(&ftrace_lock);
 	}
+
+	mutex_unlock(&iter->ops->regex_lock);
 	free_ftrace_hash(iter->hash);
 	kfree(iter);
 
-	mutex_unlock(&ftrace_regex_lock);
 	return 0;
 }
 
@@ -4126,7 +4164,8 @@
 
 static struct ftrace_ops global_ops = {
 	.func			= ftrace_stub,
-	.flags			= FTRACE_OPS_FL_RECURSION_SAFE,
+	.flags			= FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED,
+	INIT_REGEX_LOCK(global_ops)
 };
 
 static int __init ftrace_nodyn_init(void)
@@ -4180,8 +4219,9 @@
 }
 
 static struct ftrace_ops control_ops = {
-	.func = ftrace_ops_control_func,
-	.flags = FTRACE_OPS_FL_RECURSION_SAFE,
+	.func	= ftrace_ops_control_func,
+	.flags	= FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED,
+	INIT_REGEX_LOCK(control_ops)
 };
 
 static inline void
@@ -4539,6 +4579,8 @@
 {
 	int ret = -1;
 
+	ftrace_ops_init(ops);
+
 	mutex_lock(&ftrace_lock);
 
 	ret = __register_ftrace_function(ops);
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index 53582e9..7a0cf68 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -251,7 +251,8 @@
 	switch (enable) {
 	case 0:
 		/*
-		 * When soft_disable is set and enable is cleared, we want
+		 * When soft_disable is set and enable is cleared, the sm_ref
+		 * reference counter is decremented. If it reaches 0, we want
 		 * to clear the SOFT_DISABLED flag but leave the event in the
 		 * state that it was. That is, if the event was enabled and
 		 * SOFT_DISABLED isn't set, then do nothing. But if SOFT_DISABLED
@@ -263,6 +264,8 @@
 		 * "soft enable"s (clearing the SOFT_DISABLED bit) wont work.
 		 */
 		if (soft_disable) {
+			if (atomic_dec_return(&file->sm_ref) > 0)
+				break;
 			disable = file->flags & FTRACE_EVENT_FL_SOFT_DISABLED;
 			clear_bit(FTRACE_EVENT_FL_SOFT_MODE_BIT, &file->flags);
 		} else
@@ -291,8 +294,11 @@
 		 */
 		if (!soft_disable)
 			clear_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &file->flags);
-		else
+		else {
+			if (atomic_inc_return(&file->sm_ref) > 1)
+				break;
 			set_bit(FTRACE_EVENT_FL_SOFT_MODE_BIT, &file->flags);
+		}
 
 		if (!(file->flags & FTRACE_EVENT_FL_ENABLED)) {
 
@@ -623,6 +629,8 @@
 	if (file->flags & FTRACE_EVENT_FL_ENABLED) {
 		if (file->flags & FTRACE_EVENT_FL_SOFT_DISABLED)
 			buf = "0*\n";
+		else if (file->flags & FTRACE_EVENT_FL_SOFT_MODE)
+			buf = "1*\n";
 		else
 			buf = "1\n";
 	} else
@@ -1521,6 +1529,24 @@
 	return 0;
 }
 
+static struct ftrace_event_file *
+trace_create_new_event(struct ftrace_event_call *call,
+		       struct trace_array *tr)
+{
+	struct ftrace_event_file *file;
+
+	file = kmem_cache_alloc(file_cachep, GFP_TRACE);
+	if (!file)
+		return NULL;
+
+	file->event_call = call;
+	file->tr = tr;
+	atomic_set(&file->sm_ref, 0);
+	list_add(&file->list, &tr->events);
+
+	return file;
+}
+
 /* Add an event to a trace directory */
 static int
 __trace_add_new_event(struct ftrace_event_call *call,
@@ -1532,14 +1558,10 @@
 {
 	struct ftrace_event_file *file;
 
-	file = kmem_cache_alloc(file_cachep, GFP_TRACE);
+	file = trace_create_new_event(call, tr);
 	if (!file)
 		return -ENOMEM;
 
-	file->event_call = call;
-	file->tr = tr;
-	list_add(&file->list, &tr->events);
-
 	return event_create_dir(tr->event_dir, file, id, enable, filter, format);
 }
 
@@ -1554,14 +1576,10 @@
 {
 	struct ftrace_event_file *file;
 
-	file = kmem_cache_alloc(file_cachep, GFP_TRACE);
+	file = trace_create_new_event(call, tr);
 	if (!file)
 		return -ENOMEM;
 
-	file->event_call = call;
-	file->tr = tr;
-	list_add(&file->list, &tr->events);
-
 	return 0;
 }
 
@@ -2061,8 +2079,18 @@
 	if (ret < 0)
 		goto out_put;
 	ret = register_ftrace_function_probe(glob, ops, data);
-	if (!ret)
+	/*
+	 * The above returns on success the # of functions enabled,
+	 * but if it didn't find any functions it returns zero.
+	 * Consider no functions a failure too.
+	 */
+	if (!ret) {
+		ret = -ENOENT;
 		goto out_disable;
+	} else if (ret < 0)
+		goto out_disable;
+	/* Just return zero, not the number of enabled functions */
+	ret = 0;
  out:
 	mutex_unlock(&event_mutex);
 	return ret;
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
index 1865d5f..636d45f 100644
--- a/kernel/trace/trace_kprobe.c
+++ b/kernel/trace/trace_kprobe.c
@@ -27,7 +27,6 @@
 /**
  * Kprobe event core functions
  */
-
 struct trace_probe {
 	struct list_head	list;
 	struct kretprobe	rp;	/* Use rp.kp for kprobe use */
@@ -36,6 +35,7 @@
 	const char		*symbol;	/* symbol name */
 	struct ftrace_event_class	class;
 	struct ftrace_event_call	call;
+	struct ftrace_event_file	**files;
 	ssize_t			size;		/* trace entry size */
 	unsigned int		nr_args;
 	struct probe_arg	args[];
@@ -46,7 +46,7 @@
 	(sizeof(struct probe_arg) * (n)))
 
 
-static __kprobes int trace_probe_is_return(struct trace_probe *tp)
+static __kprobes bool trace_probe_is_return(struct trace_probe *tp)
 {
 	return tp->rp.handler != NULL;
 }
@@ -183,12 +183,57 @@
 	return NULL;
 }
 
-/* Enable trace_probe - @flag must be TP_FLAG_TRACE or TP_FLAG_PROFILE */
-static int enable_trace_probe(struct trace_probe *tp, int flag)
+static int trace_probe_nr_files(struct trace_probe *tp)
+{
+	struct ftrace_event_file **file = tp->files;
+	int ret = 0;
+
+	if (file)
+		while (*(file++))
+			ret++;
+
+	return ret;
+}
+
+static DEFINE_MUTEX(probe_enable_lock);
+
+/*
+ * Enable trace_probe
+ * if the file is NULL, enable "perf" handler, or enable "trace" handler.
+ */
+static int
+enable_trace_probe(struct trace_probe *tp, struct ftrace_event_file *file)
 {
 	int ret = 0;
 
-	tp->flags |= flag;
+	mutex_lock(&probe_enable_lock);
+
+	if (file) {
+		struct ftrace_event_file **new, **old = tp->files;
+		int n = trace_probe_nr_files(tp);
+
+		/* 1 is for new one and 1 is for stopper */
+		new = kzalloc((n + 2) * sizeof(struct ftrace_event_file *),
+			      GFP_KERNEL);
+		if (!new) {
+			ret = -ENOMEM;
+			goto out_unlock;
+		}
+		memcpy(new, old, n * sizeof(struct ftrace_event_file *));
+		new[n] = file;
+		/* The last one keeps a NULL */
+
+		rcu_assign_pointer(tp->files, new);
+		tp->flags |= TP_FLAG_TRACE;
+
+		if (old) {
+			/* Make sure the probe is done with old files */
+			synchronize_sched();
+			kfree(old);
+		}
+	} else
+		tp->flags |= TP_FLAG_PROFILE;
+
 	if (trace_probe_is_enabled(tp) && trace_probe_is_registered(tp) &&
 	    !trace_probe_has_gone(tp)) {
 		if (trace_probe_is_return(tp))
@@ -197,19 +242,83 @@
 			ret = enable_kprobe(&tp->rp.kp);
 	}
 
+ out_unlock:
+	mutex_unlock(&probe_enable_lock);
+
 	return ret;
 }
 
-/* Disable trace_probe - @flag must be TP_FLAG_TRACE or TP_FLAG_PROFILE */
-static void disable_trace_probe(struct trace_probe *tp, int flag)
+static int
+trace_probe_file_index(struct trace_probe *tp, struct ftrace_event_file *file)
 {
-	tp->flags &= ~flag;
+	int i;
+
+	if (tp->files) {
+		for (i = 0; tp->files[i]; i++)
+			if (tp->files[i] == file)
+				return i;
+	}
+
+	return -1;
+}
+
+/*
+ * Disable trace_probe
+ * if the file is NULL, disable "perf" handler, or disable "trace" handler.
+ */
+static int
+disable_trace_probe(struct trace_probe *tp, struct ftrace_event_file *file)
+{
+	int ret = 0;
+
+	mutex_lock(&probe_enable_lock);
+
+	if (file) {
+		struct ftrace_event_file **new, **old = tp->files;
+		int n = trace_probe_nr_files(tp);
+		int i, j;
+
+		if (n == 0 || trace_probe_file_index(tp, file) < 0) {
+			ret = -EINVAL;
+			goto out_unlock;
+		}
+
+		if (n == 1) {	/* Remove the last file */
+			tp->flags &= ~TP_FLAG_TRACE;
+			new = NULL;
+		} else {
+			new = kzalloc(n * sizeof(struct ftrace_event_file *),
+				      GFP_KERNEL);
+			if (!new) {
+				ret = -ENOMEM;
+				goto out_unlock;
+			}
+
+			/* This copy & check loop copies the NULL stopper too */
+			for (i = 0, j = 0; j < n && i < n + 1; i++)
+				if (old[i] != file)
+					new[j++] = old[i];
+		}
+
+		rcu_assign_pointer(tp->files, new);
+
+		/* Make sure the probe is done with old files */
+		synchronize_sched();
+		kfree(old);
+	} else
+		tp->flags &= ~TP_FLAG_PROFILE;
+
 	if (!trace_probe_is_enabled(tp) && trace_probe_is_registered(tp)) {
 		if (trace_probe_is_return(tp))
 			disable_kretprobe(&tp->rp);
 		else
 			disable_kprobe(&tp->rp.kp);
 	}
+
+ out_unlock:
+	mutex_unlock(&probe_enable_lock);
+
+	return ret;
 }
 
 /* Internal register function - just handle k*probes and flags */
@@ -723,9 +832,10 @@
 }
 
 /* Kprobe handler */
-static __kprobes void kprobe_trace_func(struct kprobe *kp, struct pt_regs *regs)
+static __kprobes void
+__kprobe_trace_func(struct trace_probe *tp, struct pt_regs *regs,
+		    struct ftrace_event_file *ftrace_file)
 {
-	struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp);
 	struct kprobe_trace_entry_head *entry;
 	struct ring_buffer_event *event;
 	struct ring_buffer *buffer;
@@ -733,7 +843,10 @@
 	unsigned long irq_flags;
 	struct ftrace_event_call *call = &tp->call;
 
-	tp->nhit++;
+	WARN_ON(call != ftrace_file->event_call);
+
+	if (test_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &ftrace_file->flags))
+		return;
 
 	local_save_flags(irq_flags);
 	pc = preempt_count();
@@ -741,13 +854,14 @@
 	dsize = __get_data_size(tp, regs);
 	size = sizeof(*entry) + tp->size + dsize;
 
-	event = trace_current_buffer_lock_reserve(&buffer, call->event.type,
-						  size, irq_flags, pc);
+	event = trace_event_buffer_lock_reserve(&buffer, ftrace_file,
+						call->event.type,
+						size, irq_flags, pc);
 	if (!event)
 		return;
 
 	entry = ring_buffer_event_data(event);
-	entry->ip = (unsigned long)kp->addr;
+	entry->ip = (unsigned long)tp->rp.kp.addr;
 	store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize);
 
 	if (!filter_current_check_discard(buffer, call, entry, event))
@@ -755,11 +869,24 @@
 						irq_flags, pc, regs);
 }
 
-/* Kretprobe handler */
-static __kprobes void kretprobe_trace_func(struct kretprobe_instance *ri,
-					  struct pt_regs *regs)
+static __kprobes void
+kprobe_trace_func(struct trace_probe *tp, struct pt_regs *regs)
 {
-	struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp);
+	struct ftrace_event_file **file = tp->files;
+
+	/* Note: preempt is already disabled around the kprobe handler */
+	while (*file) {
+		__kprobe_trace_func(tp, regs, *file);
+		file++;
+	}
+}
+
+/* Kretprobe handler */
+static __kprobes void
+__kretprobe_trace_func(struct trace_probe *tp, struct kretprobe_instance *ri,
+		       struct pt_regs *regs,
+		       struct ftrace_event_file *ftrace_file)
+{
 	struct kretprobe_trace_entry_head *entry;
 	struct ring_buffer_event *event;
 	struct ring_buffer *buffer;
@@ -767,14 +894,20 @@
 	unsigned long irq_flags;
 	struct ftrace_event_call *call = &tp->call;
 
+	WARN_ON(call != ftrace_file->event_call);
+
+	if (test_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &ftrace_file->flags))
+		return;
+
 	local_save_flags(irq_flags);
 	pc = preempt_count();
 
 	dsize = __get_data_size(tp, regs);
 	size = sizeof(*entry) + tp->size + dsize;
 
-	event = trace_current_buffer_lock_reserve(&buffer, call->event.type,
-						  size, irq_flags, pc);
+	event = trace_event_buffer_lock_reserve(&buffer, ftrace_file,
+						call->event.type,
+						size, irq_flags, pc);
 	if (!event)
 		return;
 
@@ -788,6 +921,19 @@
 						irq_flags, pc, regs);
 }
 
+static __kprobes void
+kretprobe_trace_func(struct trace_probe *tp, struct kretprobe_instance *ri,
+		     struct pt_regs *regs)
+{
+	struct ftrace_event_file **file = tp->files;
+
+	/* Note: preempt is already disabled around the kprobe handler */
+	while (*file) {
+		__kretprobe_trace_func(tp, ri, regs, *file);
+		file++;
+	}
+}
+
 /* Event entry printers */
 enum print_line_t
 print_kprobe_event(struct trace_iterator *iter, int flags,
@@ -975,10 +1121,9 @@
 #ifdef CONFIG_PERF_EVENTS
 
 /* Kprobe profile handler */
-static __kprobes void kprobe_perf_func(struct kprobe *kp,
-					 struct pt_regs *regs)
+static __kprobes void
+kprobe_perf_func(struct trace_probe *tp, struct pt_regs *regs)
 {
-	struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp);
 	struct ftrace_event_call *call = &tp->call;
 	struct kprobe_trace_entry_head *entry;
 	struct hlist_head *head;
@@ -997,7 +1142,7 @@
 	if (!entry)
 		return;
 
-	entry->ip = (unsigned long)kp->addr;
+	entry->ip = (unsigned long)tp->rp.kp.addr;
 	memset(&entry[1], 0, dsize);
 	store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize);
 
@@ -1007,10 +1152,10 @@
 }
 
 /* Kretprobe profile handler */
-static __kprobes void kretprobe_perf_func(struct kretprobe_instance *ri,
-					    struct pt_regs *regs)
+static __kprobes void
+kretprobe_perf_func(struct trace_probe *tp, struct kretprobe_instance *ri,
+		    struct pt_regs *regs)
 {
-	struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp);
 	struct ftrace_event_call *call = &tp->call;
 	struct kretprobe_trace_entry_head *entry;
 	struct hlist_head *head;
@@ -1044,20 +1189,19 @@
 		    enum trace_reg type, void *data)
 {
 	struct trace_probe *tp = (struct trace_probe *)event->data;
+	struct ftrace_event_file *file = data;
 
 	switch (type) {
 	case TRACE_REG_REGISTER:
-		return enable_trace_probe(tp, TP_FLAG_TRACE);
+		return enable_trace_probe(tp, file);
 	case TRACE_REG_UNREGISTER:
-		disable_trace_probe(tp, TP_FLAG_TRACE);
-		return 0;
+		return disable_trace_probe(tp, file);
 
 #ifdef CONFIG_PERF_EVENTS
 	case TRACE_REG_PERF_REGISTER:
-		return enable_trace_probe(tp, TP_FLAG_PROFILE);
+		return enable_trace_probe(tp, NULL);
 	case TRACE_REG_PERF_UNREGISTER:
-		disable_trace_probe(tp, TP_FLAG_PROFILE);
-		return 0;
+		return disable_trace_probe(tp, NULL);
 	case TRACE_REG_PERF_OPEN:
 	case TRACE_REG_PERF_CLOSE:
 	case TRACE_REG_PERF_ADD:
@@ -1073,11 +1217,13 @@
 {
 	struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp);
 
+	tp->nhit++;
+
 	if (tp->flags & TP_FLAG_TRACE)
-		kprobe_trace_func(kp, regs);
+		kprobe_trace_func(tp, regs);
 #ifdef CONFIG_PERF_EVENTS
 	if (tp->flags & TP_FLAG_PROFILE)
-		kprobe_perf_func(kp, regs);
+		kprobe_perf_func(tp, regs);
 #endif
 	return 0;	/* We don't tweek kernel, so just return 0 */
 }
@@ -1087,11 +1233,13 @@
 {
 	struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp);
 
+	tp->nhit++;
+
 	if (tp->flags & TP_FLAG_TRACE)
-		kretprobe_trace_func(ri, regs);
+		kretprobe_trace_func(tp, ri, regs);
 #ifdef CONFIG_PERF_EVENTS
 	if (tp->flags & TP_FLAG_PROFILE)
-		kretprobe_perf_func(ri, regs);
+		kretprobe_perf_func(tp, ri, regs);
 #endif
 	return 0;	/* We don't tweek kernel, so just return 0 */
 }
@@ -1189,11 +1337,24 @@
 	return a1 + a2 + a3 + a4 + a5 + a6;
 }
 
+static struct ftrace_event_file *
+find_trace_probe_file(struct trace_probe *tp, struct trace_array *tr)
+{
+	struct ftrace_event_file *file;
+
+	list_for_each_entry(file, &tr->events, list)
+		if (file->event_call == &tp->call)
+			return file;
+
+	return NULL;
+}
+
 static __init int kprobe_trace_self_tests_init(void)
 {
 	int ret, warn = 0;
 	int (*target)(int, int, int, int, int, int);
 	struct trace_probe *tp;
+	struct ftrace_event_file *file;
 
 	target = kprobe_trace_selftest_target;
 
@@ -1203,31 +1364,43 @@
 				  "$stack $stack0 +0($stack)",
 				  create_trace_probe);
 	if (WARN_ON_ONCE(ret)) {
-		pr_warning("error on probing function entry.\n");
+		pr_warn("error on probing function entry.\n");
 		warn++;
 	} else {
 		/* Enable trace point */
 		tp = find_trace_probe("testprobe", KPROBE_EVENT_SYSTEM);
 		if (WARN_ON_ONCE(tp == NULL)) {
-			pr_warning("error on getting new probe.\n");
+			pr_warn("error on getting new probe.\n");
 			warn++;
-		} else
-			enable_trace_probe(tp, TP_FLAG_TRACE);
+		} else {
+			file = find_trace_probe_file(tp, top_trace_array());
+			if (WARN_ON_ONCE(file == NULL)) {
+				pr_warn("error on getting probe file.\n");
+				warn++;
+			} else
+				enable_trace_probe(tp, file);
+		}
 	}
 
 	ret = traceprobe_command("r:testprobe2 kprobe_trace_selftest_target "
 				  "$retval", create_trace_probe);
 	if (WARN_ON_ONCE(ret)) {
-		pr_warning("error on probing function return.\n");
+		pr_warn("error on probing function return.\n");
 		warn++;
 	} else {
 		/* Enable trace point */
 		tp = find_trace_probe("testprobe2", KPROBE_EVENT_SYSTEM);
 		if (WARN_ON_ONCE(tp == NULL)) {
-			pr_warning("error on getting new probe.\n");
+			pr_warn("error on getting 2nd new probe.\n");
 			warn++;
-		} else
-			enable_trace_probe(tp, TP_FLAG_TRACE);
+		} else {
+			file = find_trace_probe_file(tp, top_trace_array());
+			if (WARN_ON_ONCE(file == NULL)) {
+				pr_warn("error on getting probe file.\n");
+				warn++;
+			} else
+				enable_trace_probe(tp, file);
+		}
 	}
 
 	if (warn)
@@ -1238,27 +1411,39 @@
 	/* Disable trace points before removing it */
 	tp = find_trace_probe("testprobe", KPROBE_EVENT_SYSTEM);
 	if (WARN_ON_ONCE(tp == NULL)) {
-		pr_warning("error on getting test probe.\n");
+		pr_warn("error on getting test probe.\n");
 		warn++;
-	} else
-		disable_trace_probe(tp, TP_FLAG_TRACE);
+	} else {
+		file = find_trace_probe_file(tp, top_trace_array());
+		if (WARN_ON_ONCE(file == NULL)) {
+			pr_warn("error on getting probe file.\n");
+			warn++;
+		} else
+			disable_trace_probe(tp, file);
+	}
 
 	tp = find_trace_probe("testprobe2", KPROBE_EVENT_SYSTEM);
 	if (WARN_ON_ONCE(tp == NULL)) {
-		pr_warning("error on getting 2nd test probe.\n");
+		pr_warn("error on getting 2nd test probe.\n");
 		warn++;
-	} else
-		disable_trace_probe(tp, TP_FLAG_TRACE);
+	} else {
+		file = find_trace_probe_file(tp, top_trace_array());
+		if (WARN_ON_ONCE(file == NULL)) {
+			pr_warn("error on getting probe file.\n");
+			warn++;
+		} else
+			disable_trace_probe(tp, file);
+	}
 
 	ret = traceprobe_command("-:testprobe", create_trace_probe);
 	if (WARN_ON_ONCE(ret)) {
-		pr_warning("error on deleting a probe.\n");
+		pr_warn("error on deleting a probe.\n");
 		warn++;
 	}
 
 	ret = traceprobe_command("-:testprobe2", create_trace_probe);
 	if (WARN_ON_ONCE(ret)) {
-		pr_warning("error on deleting a probe.\n");
+		pr_warn("error on deleting a probe.\n");
 		warn++;
 	}
 
diff --git a/lib/kobject.c b/lib/kobject.c
index a654866..b7e29a6 100644
--- a/lib/kobject.c
+++ b/lib/kobject.c
@@ -529,7 +529,7 @@
 	return kobj;
 }
 
-static struct kobject *kobject_get_unless_zero(struct kobject *kobj)
+static struct kobject * __must_check kobject_get_unless_zero(struct kobject *kobj)
 {
 	if (!kref_get_unless_zero(&kobj->kref))
 		kobj = NULL;
diff --git a/lib/lru_cache.c b/lib/lru_cache.c
index 8335d39..4a83ecd 100644
--- a/lib/lru_cache.c
+++ b/lib/lru_cache.c
@@ -365,7 +365,13 @@
 	return 0;
 }
 
-static struct lc_element *__lc_get(struct lru_cache *lc, unsigned int enr, bool may_change)
+/* used as internal flags to __lc_get */
+enum {
+	LC_GET_MAY_CHANGE = 1,
+	LC_GET_MAY_USE_UNCOMMITTED = 2,
+};
+
+static struct lc_element *__lc_get(struct lru_cache *lc, unsigned int enr, unsigned int flags)
 {
 	struct lc_element *e;
 
@@ -380,22 +386,31 @@
 	 * this enr is currently being pulled in already,
 	 * and will be available once the pending transaction
 	 * has been committed. */
-	if (e && e->lc_new_number == e->lc_number) {
+	if (e) {
+		if (e->lc_new_number != e->lc_number) {
+			/* It has been found above, but on the "to_be_changed"
+			 * list, not yet committed.  Don't pull it in twice,
+			 * wait for the transaction, then try again...
+			 */
+			if (!(flags & LC_GET_MAY_USE_UNCOMMITTED))
+				RETURN(NULL);
+			/* ... unless the caller is aware of the implications,
+			 * probably preparing a cumulative transaction. */
+			++e->refcnt;
+			++lc->hits;
+			RETURN(e);
+		}
+		/* else: lc_new_number == lc_number; a real hit. */
 		++lc->hits;
 		if (e->refcnt++ == 0)
 			lc->used++;
 		list_move(&e->list, &lc->in_use); /* Not evictable... */
 		RETURN(e);
 	}
+	/* e == NULL */
 
 	++lc->misses;
-	if (!may_change)
-		RETURN(NULL);
-
-	/* It has been found above, but on the "to_be_changed" list, not yet
-	 * committed.  Don't pull it in twice, wait for the transaction, then
-	 * try again */
-	if (e)
+	if (!(flags & LC_GET_MAY_CHANGE))
 		RETURN(NULL);
 
 	/* To avoid races with lc_try_lock(), first, mark us dirty
@@ -477,7 +492,27 @@
  */
 struct lc_element *lc_get(struct lru_cache *lc, unsigned int enr)
 {
-	return __lc_get(lc, enr, 1);
+	return __lc_get(lc, enr, LC_GET_MAY_CHANGE);
+}
+
+/**
+ * lc_get_cumulative - like lc_get; also finds to-be-changed elements
+ * @lc: the lru cache to operate on
+ * @enr: the label to look up
+ *
+ * Unlike lc_get this also returns the element for @enr, if it is belonging to
+ * a pending transaction, so the return values are like for lc_get(),
+ * plus:
+ *
+ * pointer to an element already on the "to_be_changed" list.
+ * 	In this case, the cache was already marked %LC_DIRTY.
+ *
+ * Caller needs to make sure that the pending transaction is completed,
+ * before proceeding to actually use this element.
+ */
+struct lc_element *lc_get_cumulative(struct lru_cache *lc, unsigned int enr)
+{
+	return __lc_get(lc, enr, LC_GET_MAY_CHANGE|LC_GET_MAY_USE_UNCOMMITTED);
 }
 
 /**
@@ -648,3 +683,4 @@
 EXPORT_SYMBOL(lc_seq_dump_details);
 EXPORT_SYMBOL(lc_try_lock);
 EXPORT_SYMBOL(lc_is_used);
+EXPORT_SYMBOL(lc_get_cumulative);
diff --git a/lib/rwsem.c b/lib/rwsem.c
index cf0ad2a..19c5fa9 100644
--- a/lib/rwsem.c
+++ b/lib/rwsem.c
@@ -223,7 +223,9 @@
 			count = RWSEM_ACTIVE_WRITE_BIAS;
 			if (!list_is_singular(&sem->wait_list))
 				count += RWSEM_WAITING_BIAS;
-			if (cmpxchg(&sem->count, RWSEM_WAITING_BIAS, count) ==
+
+			if (sem->count == RWSEM_WAITING_BIAS &&
+			    cmpxchg(&sem->count, RWSEM_WAITING_BIAS, count) ==
 							RWSEM_WAITING_BIAS)
 				break;
 		}
diff --git a/mm/backing-dev.c b/mm/backing-dev.c
index 41733c5..5025174 100644
--- a/mm/backing-dev.c
+++ b/mm/backing-dev.c
@@ -31,13 +31,14 @@
 static struct class *bdi_class;
 
 /*
- * bdi_lock protects updates to bdi_list and bdi_pending_list, as well as
- * reader side protection for bdi_pending_list. bdi_list has RCU reader side
+ * bdi_lock protects updates to bdi_list. bdi_list has RCU reader side
  * locking.
  */
 DEFINE_SPINLOCK(bdi_lock);
 LIST_HEAD(bdi_list);
-LIST_HEAD(bdi_pending_list);
+
+/* bdi_wq serves all asynchronous writeback tasks */
+struct workqueue_struct *bdi_wq;
 
 void bdi_lock_two(struct bdi_writeback *wb1, struct bdi_writeback *wb2)
 {
@@ -257,6 +258,11 @@
 {
 	int err;
 
+	bdi_wq = alloc_workqueue("writeback", WQ_MEM_RECLAIM | WQ_FREEZABLE |
+					      WQ_UNBOUND | WQ_SYSFS, 0);
+	if (!bdi_wq)
+		return -ENOMEM;
+
 	err = bdi_init(&default_backing_dev_info);
 	if (!err)
 		bdi_register(&default_backing_dev_info, NULL, "default");
@@ -271,26 +277,6 @@
 	return wb_has_dirty_io(&bdi->wb);
 }
 
-static void wakeup_timer_fn(unsigned long data)
-{
-	struct backing_dev_info *bdi = (struct backing_dev_info *)data;
-
-	spin_lock_bh(&bdi->wb_lock);
-	if (bdi->wb.task) {
-		trace_writeback_wake_thread(bdi);
-		wake_up_process(bdi->wb.task);
-	} else if (bdi->dev) {
-		/*
-		 * When bdi tasks are inactive for long time, they are killed.
-		 * In this case we have to wake-up the forker thread which
-		 * should create and run the bdi thread.
-		 */
-		trace_writeback_wake_forker_thread(bdi);
-		wake_up_process(default_backing_dev_info.wb.task);
-	}
-	spin_unlock_bh(&bdi->wb_lock);
-}
-
 /*
  * This function is used when the first inode for this bdi is marked dirty. It
  * wakes-up the corresponding bdi thread which should then take care of the
@@ -307,176 +293,7 @@
 	unsigned long timeout;
 
 	timeout = msecs_to_jiffies(dirty_writeback_interval * 10);
-	mod_timer(&bdi->wb.wakeup_timer, jiffies + timeout);
-}
-
-/*
- * Calculate the longest interval (jiffies) bdi threads are allowed to be
- * inactive.
- */
-static unsigned long bdi_longest_inactive(void)
-{
-	unsigned long interval;
-
-	interval = msecs_to_jiffies(dirty_writeback_interval * 10);
-	return max(5UL * 60 * HZ, interval);
-}
-
-/*
- * Clear pending bit and wakeup anybody waiting for flusher thread creation or
- * shutdown
- */
-static void bdi_clear_pending(struct backing_dev_info *bdi)
-{
-	clear_bit(BDI_pending, &bdi->state);
-	smp_mb__after_clear_bit();
-	wake_up_bit(&bdi->state, BDI_pending);
-}
-
-static int bdi_forker_thread(void *ptr)
-{
-	struct bdi_writeback *me = ptr;
-
-	current->flags |= PF_SWAPWRITE;
-	set_freezable();
-
-	/*
-	 * Our parent may run at a different priority, just set us to normal
-	 */
-	set_user_nice(current, 0);
-
-	for (;;) {
-		struct task_struct *task = NULL;
-		struct backing_dev_info *bdi;
-		enum {
-			NO_ACTION,   /* Nothing to do */
-			FORK_THREAD, /* Fork bdi thread */
-			KILL_THREAD, /* Kill inactive bdi thread */
-		} action = NO_ACTION;
-
-		/*
-		 * Temporary measure, we want to make sure we don't see
-		 * dirty data on the default backing_dev_info
-		 */
-		if (wb_has_dirty_io(me) || !list_empty(&me->bdi->work_list)) {
-			del_timer(&me->wakeup_timer);
-			wb_do_writeback(me, 0);
-		}
-
-		spin_lock_bh(&bdi_lock);
-		/*
-		 * In the following loop we are going to check whether we have
-		 * some work to do without any synchronization with tasks
-		 * waking us up to do work for them. Set the task state here
-		 * so that we don't miss wakeups after verifying conditions.
-		 */
-		set_current_state(TASK_INTERRUPTIBLE);
-
-		list_for_each_entry(bdi, &bdi_list, bdi_list) {
-			bool have_dirty_io;
-
-			if (!bdi_cap_writeback_dirty(bdi) ||
-			     bdi_cap_flush_forker(bdi))
-				continue;
-
-			WARN(!test_bit(BDI_registered, &bdi->state),
-			     "bdi %p/%s is not registered!\n", bdi, bdi->name);
-
-			have_dirty_io = !list_empty(&bdi->work_list) ||
-					wb_has_dirty_io(&bdi->wb);
-
-			/*
-			 * If the bdi has work to do, but the thread does not
-			 * exist - create it.
-			 */
-			if (!bdi->wb.task && have_dirty_io) {
-				/*
-				 * Set the pending bit - if someone will try to
-				 * unregister this bdi - it'll wait on this bit.
-				 */
-				set_bit(BDI_pending, &bdi->state);
-				action = FORK_THREAD;
-				break;
-			}
-
-			spin_lock(&bdi->wb_lock);
-
-			/*
-			 * If there is no work to do and the bdi thread was
-			 * inactive long enough - kill it. The wb_lock is taken
-			 * to make sure no-one adds more work to this bdi and
-			 * wakes the bdi thread up.
-			 */
-			if (bdi->wb.task && !have_dirty_io &&
-			    time_after(jiffies, bdi->wb.last_active +
-						bdi_longest_inactive())) {
-				task = bdi->wb.task;
-				bdi->wb.task = NULL;
-				spin_unlock(&bdi->wb_lock);
-				set_bit(BDI_pending, &bdi->state);
-				action = KILL_THREAD;
-				break;
-			}
-			spin_unlock(&bdi->wb_lock);
-		}
-		spin_unlock_bh(&bdi_lock);
-
-		/* Keep working if default bdi still has things to do */
-		if (!list_empty(&me->bdi->work_list))
-			__set_current_state(TASK_RUNNING);
-
-		switch (action) {
-		case FORK_THREAD:
-			__set_current_state(TASK_RUNNING);
-			task = kthread_create(bdi_writeback_thread, &bdi->wb,
-					      "flush-%s", dev_name(bdi->dev));
-			if (IS_ERR(task)) {
-				/*
-				 * If thread creation fails, force writeout of
-				 * the bdi from the thread. Hopefully 1024 is
-				 * large enough for efficient IO.
-				 */
-				writeback_inodes_wb(&bdi->wb, 1024,
-						    WB_REASON_FORKER_THREAD);
-			} else {
-				/*
-				 * The spinlock makes sure we do not lose
-				 * wake-ups when racing with 'bdi_queue_work()'.
-				 * And as soon as the bdi thread is visible, we
-				 * can start it.
-				 */
-				spin_lock_bh(&bdi->wb_lock);
-				bdi->wb.task = task;
-				spin_unlock_bh(&bdi->wb_lock);
-				wake_up_process(task);
-			}
-			bdi_clear_pending(bdi);
-			break;
-
-		case KILL_THREAD:
-			__set_current_state(TASK_RUNNING);
-			kthread_stop(task);
-			bdi_clear_pending(bdi);
-			break;
-
-		case NO_ACTION:
-			if (!wb_has_dirty_io(me) || !dirty_writeback_interval)
-				/*
-				 * There are no dirty data. The only thing we
-				 * should now care about is checking for
-				 * inactive bdi threads and killing them. Thus,
-				 * let's sleep for longer time, save energy and
-				 * be friendly for battery-driven devices.
-				 */
-				schedule_timeout(bdi_longest_inactive());
-			else
-				schedule_timeout(msecs_to_jiffies(dirty_writeback_interval * 10));
-			try_to_freeze();
-			break;
-		}
-	}
-
-	return 0;
+	mod_delayed_work(bdi_wq, &bdi->wb.dwork, timeout);
 }
 
 /*
@@ -489,6 +306,9 @@
 	spin_unlock_bh(&bdi_lock);
 
 	synchronize_rcu_expedited();
+
+	/* bdi_list is now unused, clear it to mark @bdi dying */
+	INIT_LIST_HEAD(&bdi->bdi_list);
 }
 
 int bdi_register(struct backing_dev_info *bdi, struct device *parent,
@@ -508,20 +328,6 @@
 
 	bdi->dev = dev;
 
-	/*
-	 * Just start the forker thread for our default backing_dev_info,
-	 * and add other bdi's to the list. They will get a thread created
-	 * on-demand when they need it.
-	 */
-	if (bdi_cap_flush_forker(bdi)) {
-		struct bdi_writeback *wb = &bdi->wb;
-
-		wb->task = kthread_run(bdi_forker_thread, wb, "bdi-%s",
-						dev_name(dev));
-		if (IS_ERR(wb->task))
-			return PTR_ERR(wb->task);
-	}
-
 	bdi_debug_register(bdi, dev_name(dev));
 	set_bit(BDI_registered, &bdi->state);
 
@@ -545,8 +351,6 @@
  */
 static void bdi_wb_shutdown(struct backing_dev_info *bdi)
 {
-	struct task_struct *task;
-
 	if (!bdi_cap_writeback_dirty(bdi))
 		return;
 
@@ -556,22 +360,20 @@
 	bdi_remove_from_list(bdi);
 
 	/*
-	 * If setup is pending, wait for that to complete first
+	 * Drain work list and shutdown the delayed_work.  At this point,
+	 * @bdi->bdi_list is empty telling bdi_Writeback_workfn() that @bdi
+	 * is dying and its work_list needs to be drained no matter what.
 	 */
-	wait_on_bit(&bdi->state, BDI_pending, bdi_sched_wait,
-			TASK_UNINTERRUPTIBLE);
+	mod_delayed_work(bdi_wq, &bdi->wb.dwork, 0);
+	flush_delayed_work(&bdi->wb.dwork);
+	WARN_ON(!list_empty(&bdi->work_list));
 
 	/*
-	 * Finally, kill the kernel thread. We don't need to be RCU
-	 * safe anymore, since the bdi is gone from visibility.
+	 * This shouldn't be necessary unless @bdi for some reason has
+	 * unflushed dirty IO after work_list is drained.  Do it anyway
+	 * just in case.
 	 */
-	spin_lock_bh(&bdi->wb_lock);
-	task = bdi->wb.task;
-	bdi->wb.task = NULL;
-	spin_unlock_bh(&bdi->wb_lock);
-
-	if (task)
-		kthread_stop(task);
+	cancel_delayed_work_sync(&bdi->wb.dwork);
 }
 
 /*
@@ -597,10 +399,8 @@
 		bdi_set_min_ratio(bdi, 0);
 		trace_writeback_bdi_unregister(bdi);
 		bdi_prune_sb(bdi);
-		del_timer_sync(&bdi->wb.wakeup_timer);
 
-		if (!bdi_cap_flush_forker(bdi))
-			bdi_wb_shutdown(bdi);
+		bdi_wb_shutdown(bdi);
 		bdi_debug_unregister(bdi);
 
 		spin_lock_bh(&bdi->wb_lock);
@@ -622,7 +422,7 @@
 	INIT_LIST_HEAD(&wb->b_io);
 	INIT_LIST_HEAD(&wb->b_more_io);
 	spin_lock_init(&wb->list_lock);
-	setup_timer(&wb->wakeup_timer, wakeup_timer_fn, (unsigned long)bdi);
+	INIT_DELAYED_WORK(&wb->dwork, bdi_writeback_workfn);
 }
 
 /*
@@ -695,12 +495,11 @@
 	bdi_unregister(bdi);
 
 	/*
-	 * If bdi_unregister() had already been called earlier, the
-	 * wakeup_timer could still be armed because bdi_prune_sb()
-	 * can race with the bdi_wakeup_thread_delayed() calls from
-	 * __mark_inode_dirty().
+	 * If bdi_unregister() had already been called earlier, the dwork
+	 * could still be pending because bdi_prune_sb() can race with the
+	 * bdi_wakeup_thread_delayed() calls from __mark_inode_dirty().
 	 */
-	del_timer_sync(&bdi->wb.wakeup_timer);
+	cancel_delayed_work_sync(&bdi->wb.dwork);
 
 	for (i = 0; i < NR_BDI_STAT_ITEMS; i++)
 		percpu_counter_destroy(&bdi->bdi_stat[i]);
diff --git a/mm/bounce.c b/mm/bounce.c
index a5c2ec3..c9f0a43 100644
--- a/mm/bounce.c
+++ b/mm/bounce.c
@@ -101,7 +101,7 @@
 	struct bio_vec *tovec, *fromvec;
 	int i;
 
-	__bio_for_each_segment(tovec, to, i, 0) {
+	bio_for_each_segment(tovec, to, i) {
 		fromvec = from->bi_io_vec + i;
 
 		/*
@@ -134,7 +134,7 @@
 	/*
 	 * free up bounce indirect pages used
 	 */
-	__bio_for_each_segment(bvec, bio, i, 0) {
+	bio_for_each_segment_all(bvec, bio, i) {
 		org_vec = bio_orig->bi_io_vec + i;
 		if (bvec->bv_page == org_vec->bv_page)
 			continue;
@@ -199,78 +199,43 @@
 static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig,
 			       mempool_t *pool, int force)
 {
-	struct page *page;
-	struct bio *bio = NULL;
-	int i, rw = bio_data_dir(*bio_orig);
+	struct bio *bio;
+	int rw = bio_data_dir(*bio_orig);
 	struct bio_vec *to, *from;
+	unsigned i;
 
-	bio_for_each_segment(from, *bio_orig, i) {
-		page = from->bv_page;
+	bio_for_each_segment(from, *bio_orig, i)
+		if (page_to_pfn(from->bv_page) > queue_bounce_pfn(q))
+			goto bounce;
 
-		/*
-		 * is destination page below bounce pfn?
-		 */
+	return;
+bounce:
+	bio = bio_clone_bioset(*bio_orig, GFP_NOIO, fs_bio_set);
+
+	bio_for_each_segment_all(to, bio, i) {
+		struct page *page = to->bv_page;
+
 		if (page_to_pfn(page) <= queue_bounce_pfn(q) && !force)
 			continue;
 
-		/*
-		 * irk, bounce it
-		 */
-		if (!bio) {
-			unsigned int cnt = (*bio_orig)->bi_vcnt;
-
-			bio = bio_alloc(GFP_NOIO, cnt);
-			memset(bio->bi_io_vec, 0, cnt * sizeof(struct bio_vec));
-		}
-			
-
-		to = bio->bi_io_vec + i;
-
-		to->bv_page = mempool_alloc(pool, q->bounce_gfp);
-		to->bv_len = from->bv_len;
-		to->bv_offset = from->bv_offset;
 		inc_zone_page_state(to->bv_page, NR_BOUNCE);
+		to->bv_page = mempool_alloc(pool, q->bounce_gfp);
 
 		if (rw == WRITE) {
 			char *vto, *vfrom;
 
-			flush_dcache_page(from->bv_page);
+			flush_dcache_page(page);
+
 			vto = page_address(to->bv_page) + to->bv_offset;
-			vfrom = kmap(from->bv_page) + from->bv_offset;
+			vfrom = kmap_atomic(page) + to->bv_offset;
 			memcpy(vto, vfrom, to->bv_len);
-			kunmap(from->bv_page);
+			kunmap_atomic(vfrom);
 		}
 	}
 
-	/*
-	 * no pages bounced
-	 */
-	if (!bio)
-		return;
-
 	trace_block_bio_bounce(q, *bio_orig);
 
-	/*
-	 * at least one page was bounced, fill in possible non-highmem
-	 * pages
-	 */
-	__bio_for_each_segment(from, *bio_orig, i, 0) {
-		to = bio_iovec_idx(bio, i);
-		if (!to->bv_page) {
-			to->bv_page = from->bv_page;
-			to->bv_len = from->bv_len;
-			to->bv_offset = from->bv_offset;
-		}
-	}
-
-	bio->bi_bdev = (*bio_orig)->bi_bdev;
 	bio->bi_flags |= (1 << BIO_BOUNCED);
-	bio->bi_sector = (*bio_orig)->bi_sector;
-	bio->bi_rw = (*bio_orig)->bi_rw;
-
-	bio->bi_vcnt = (*bio_orig)->bi_vcnt;
-	bio->bi_idx = (*bio_orig)->bi_idx;
-	bio->bi_size = (*bio_orig)->bi_size;
 
 	if (pool == page_pool) {
 		bio->bi_end_io = bounce_end_io_write;
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 0f1d921..cb1c9de 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -92,16 +92,18 @@
 	/*
 	 * For MEM_CONTAINER_TYPE_ALL, usage = pagecache + rss.
 	 */
-	MEM_CGROUP_STAT_CACHE, 	   /* # of pages charged as cache */
-	MEM_CGROUP_STAT_RSS,	   /* # of pages charged as anon rss */
-	MEM_CGROUP_STAT_FILE_MAPPED,  /* # of pages charged as file rss */
-	MEM_CGROUP_STAT_SWAP, /* # of pages, swapped out */
+	MEM_CGROUP_STAT_CACHE,		/* # of pages charged as cache */
+	MEM_CGROUP_STAT_RSS,		/* # of pages charged as anon rss */
+	MEM_CGROUP_STAT_RSS_HUGE,	/* # of pages charged as anon huge */
+	MEM_CGROUP_STAT_FILE_MAPPED,	/* # of pages charged as file rss */
+	MEM_CGROUP_STAT_SWAP,		/* # of pages, swapped out */
 	MEM_CGROUP_STAT_NSTATS,
 };
 
 static const char * const mem_cgroup_stat_names[] = {
 	"cache",
 	"rss",
+	"rss_huge",
 	"mapped_file",
 	"swap",
 };
@@ -917,6 +919,7 @@
 }
 
 static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg,
+					 struct page *page,
 					 bool anon, int nr_pages)
 {
 	preempt_disable();
@@ -932,6 +935,10 @@
 		__this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_CACHE],
 				nr_pages);
 
+	if (PageTransHuge(page))
+		__this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_RSS_HUGE],
+				nr_pages);
+
 	/* pagein of a big page is an event. So, ignore page size */
 	if (nr_pages > 0)
 		__this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGIN]);
@@ -2914,7 +2921,7 @@
 	else
 		anon = false;
 
-	mem_cgroup_charge_statistics(memcg, anon, nr_pages);
+	mem_cgroup_charge_statistics(memcg, page, anon, nr_pages);
 	unlock_page_cgroup(pc);
 
 	/*
@@ -3708,16 +3715,21 @@
 {
 	struct page_cgroup *head_pc = lookup_page_cgroup(head);
 	struct page_cgroup *pc;
+	struct mem_cgroup *memcg;
 	int i;
 
 	if (mem_cgroup_disabled())
 		return;
+
+	memcg = head_pc->mem_cgroup;
 	for (i = 1; i < HPAGE_PMD_NR; i++) {
 		pc = head_pc + i;
-		pc->mem_cgroup = head_pc->mem_cgroup;
+		pc->mem_cgroup = memcg;
 		smp_wmb();/* see __commit_charge() */
 		pc->flags = head_pc->flags & ~PCGF_NOCOPY_AT_SPLIT;
 	}
+	__this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_RSS_HUGE],
+		       HPAGE_PMD_NR);
 }
 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
 
@@ -3773,11 +3785,11 @@
 		__this_cpu_inc(to->stat->count[MEM_CGROUP_STAT_FILE_MAPPED]);
 		preempt_enable();
 	}
-	mem_cgroup_charge_statistics(from, anon, -nr_pages);
+	mem_cgroup_charge_statistics(from, page, anon, -nr_pages);
 
 	/* caller should have done css_get */
 	pc->mem_cgroup = to;
-	mem_cgroup_charge_statistics(to, anon, nr_pages);
+	mem_cgroup_charge_statistics(to, page, anon, nr_pages);
 	move_unlock_mem_cgroup(from, &flags);
 	ret = 0;
 unlock:
@@ -4152,7 +4164,7 @@
 		break;
 	}
 
-	mem_cgroup_charge_statistics(memcg, anon, -nr_pages);
+	mem_cgroup_charge_statistics(memcg, page, anon, -nr_pages);
 
 	ClearPageCgroupUsed(pc);
 	/*
@@ -4502,7 +4514,7 @@
 	lock_page_cgroup(pc);
 	if (PageCgroupUsed(pc)) {
 		memcg = pc->mem_cgroup;
-		mem_cgroup_charge_statistics(memcg, false, -1);
+		mem_cgroup_charge_statistics(memcg, oldpage, false, -1);
 		ClearPageCgroupUsed(pc);
 	}
 	unlock_page_cgroup(pc);
@@ -5030,6 +5042,10 @@
 			return res_counter_read_u64(&memcg->memsw, RES_USAGE);
 	}
 
+	/*
+	 * Transparent hugepages are still accounted for in MEM_CGROUP_STAT_RSS
+	 * as well as in MEM_CGROUP_STAT_RSS_HUGE.
+	 */
 	val = mem_cgroup_recursive_stat(memcg, MEM_CGROUP_STAT_CACHE);
 	val += mem_cgroup_recursive_stat(memcg, MEM_CGROUP_STAT_RSS);
 
diff --git a/mm/mmap.c b/mm/mmap.c
index da3e9c0..f681e18 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1363,15 +1363,24 @@
 		file = fget(fd);
 		if (!file)
 			goto out;
+		if (is_file_hugepages(file))
+			len = ALIGN(len, huge_page_size(hstate_file(file)));
 	} else if (flags & MAP_HUGETLB) {
 		struct user_struct *user = NULL;
+		struct hstate *hs = hstate_sizelog((flags >> MAP_HUGE_SHIFT) &
+						   SHM_HUGE_MASK);
+
+		if (!hs)
+			return -EINVAL;
+
+		len = ALIGN(len, huge_page_size(hs));
 		/*
 		 * VM_NORESERVE is used because the reservations will be
 		 * taken when vm_ops->mmap() is called
 		 * A dummy user value is used because we are not locking
 		 * memory so no accounting is necessary
 		 */
-		file = hugetlb_file_setup(HUGETLB_ANON_FILE, addr, len,
+		file = hugetlb_file_setup(HUGETLB_ANON_FILE, len,
 				VM_NORESERVE,
 				&user, HUGETLB_ANONHUGE_INODE,
 				(flags >> MAP_HUGE_SHIFT) & MAP_HUGE_MASK);
diff --git a/mm/mmu_context.c b/mm/mmu_context.c
index 3dcfaf4..8a8cd02 100644
--- a/mm/mmu_context.c
+++ b/mm/mmu_context.c
@@ -14,9 +14,6 @@
  * use_mm
  *	Makes the calling kernel thread take on the specified
  *	mm context.
- *	Called by the retry thread execute retries within the
- *	iocb issuer's mm context, so that copy_from/to_user
- *	operations work seamlessly for aio.
  *	(Note: this routine is intended to be called only
  *	from a kernel thread context)
  */
diff --git a/mm/page_io.c b/mm/page_io.c
index bb5d752..a8a3ef4 100644
--- a/mm/page_io.c
+++ b/mm/page_io.c
@@ -20,6 +20,7 @@
 #include <linux/buffer_head.h>
 #include <linux/writeback.h>
 #include <linux/frontswap.h>
+#include <linux/aio.h>
 #include <asm/pgtable.h>
 
 static struct bio *get_swap_bio(gfp_t gfp_flags,
@@ -35,7 +36,6 @@
 		bio->bi_io_vec[0].bv_len = PAGE_SIZE;
 		bio->bi_io_vec[0].bv_offset = 0;
 		bio->bi_vcnt = 1;
-		bio->bi_idx = 0;
 		bio->bi_size = PAGE_SIZE;
 		bio->bi_end_io = end_io;
 	}
diff --git a/mm/shmem.c b/mm/shmem.c
index 39b2a0b..5e6a842 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -31,6 +31,7 @@
 #include <linux/mm.h>
 #include <linux/export.h>
 #include <linux/swap.h>
+#include <linux/aio.h>
 
 static struct vfsmount *shm_mnt;
 
diff --git a/mm/slab_common.c b/mm/slab_common.c
index d2517b0..ff3218a 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -446,18 +446,18 @@
 		if (!kmalloc_caches[i]) {
 			kmalloc_caches[i] = create_kmalloc_cache(NULL,
 							1 << i, flags);
-
-			/*
-			 * Caches that are not of the two-to-the-power-of size.
-			 * These have to be created immediately after the
-			 * earlier power of two caches
-			 */
-			if (KMALLOC_MIN_SIZE <= 32 && !kmalloc_caches[1] && i == 6)
-				kmalloc_caches[1] = create_kmalloc_cache(NULL, 96, flags);
-
-			if (KMALLOC_MIN_SIZE <= 64 && !kmalloc_caches[2] && i == 7)
-				kmalloc_caches[2] = create_kmalloc_cache(NULL, 192, flags);
 		}
+
+		/*
+		 * Caches that are not of the two-to-the-power-of size.
+		 * These have to be created immediately after the
+		 * earlier power of two caches
+		 */
+		if (KMALLOC_MIN_SIZE <= 32 && !kmalloc_caches[1] && i == 6)
+			kmalloc_caches[1] = create_kmalloc_cache(NULL, 96, flags);
+
+		if (KMALLOC_MIN_SIZE <= 64 && !kmalloc_caches[2] && i == 7)
+			kmalloc_caches[2] = create_kmalloc_cache(NULL, 192, flags);
 	}
 
 	/* Kmalloc array is now usable */
diff --git a/mm/swap.c b/mm/swap.c
index acd40bf..dfd7d71 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -30,6 +30,7 @@
 #include <linux/backing-dev.h>
 #include <linux/memcontrol.h>
 #include <linux/gfp.h>
+#include <linux/uio.h>
 
 #include "internal.h"
 
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index b12fd86..d365724 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -1522,6 +1522,8 @@
  *	Must not be called in NMI context (strictly speaking, only if we don't
  *	have CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG, but making the calling
  *	conventions for vfree() arch-depenedent would be a really bad idea)
+ *
+ *	NOTE: assumes that the object at *addr has a size >= sizeof(llist_node)
  *	
  */
 void vfree(const void *addr)
diff --git a/net/core/dev.c b/net/core/dev.c
index 40b1fad..fc1e289 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2213,6 +2213,17 @@
 	__be16 type = skb->protocol;
 	int vlan_depth = ETH_HLEN;
 
+	/* Tunnel gso handlers can set protocol to ethernet. */
+	if (type == htons(ETH_P_TEB)) {
+		struct ethhdr *eth;
+
+		if (unlikely(!pskb_may_pull(skb, sizeof(struct ethhdr))))
+			return 0;
+
+		eth = (struct ethhdr *)skb_mac_header(skb);
+		type = eth->h_proto;
+	}
+
 	while (type == htons(ETH_P_8021Q) || type == htons(ETH_P_8021AD)) {
 		struct vlan_hdr *vh;
 
diff --git a/net/ipv4/gre.c b/net/ipv4/gre.c
index cc22363..b2e805a 100644
--- a/net/ipv4/gre.c
+++ b/net/ipv4/gre.c
@@ -150,13 +150,7 @@
 		csum = false;
 
 	/* setup inner skb. */
-	if (greh->protocol == htons(ETH_P_TEB)) {
-		struct ethhdr *eth = (struct ethhdr *)skb_inner_mac_header(skb);
-		skb->protocol = eth->h_proto;
-	} else {
-		skb->protocol = greh->protocol;
-	}
-
+	skb->protocol = greh->protocol;
 	skb->encapsulation = 0;
 
 	if (unlikely(!pskb_may_pull(skb, ghl)))
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 0ae038a..0bf5d39 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -2311,7 +2311,6 @@
 	struct sk_buff *segs = ERR_PTR(-EINVAL);
 	int mac_len = skb->mac_len;
 	int tnl_hlen = skb_inner_mac_header(skb) - skb_transport_header(skb);
-	struct ethhdr *inner_eth = (struct ethhdr *)skb_inner_mac_header(skb);
 	__be16 protocol = skb->protocol;
 	netdev_features_t enc_features;
 	int outer_hlen;
@@ -2324,8 +2323,7 @@
 	skb_reset_mac_header(skb);
 	skb_set_network_header(skb, skb_inner_network_offset(skb));
 	skb->mac_len = skb_inner_network_offset(skb);
-	inner_eth = (struct ethhdr *)skb_mac_header(skb);
-	skb->protocol = inner_eth->h_proto;
+	skb->protocol = htons(ETH_P_TEB);
 
 	/* segment inner packet. */
 	enc_features = skb->dev->hw_enc_features & netif_skb_features(skb);
diff --git a/net/socket.c b/net/socket.c
index b416093..6b94633 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -2412,7 +2412,7 @@
 
 SYSCALL_DEFINE2(socketcall, int, call, unsigned long __user *, args)
 {
-	unsigned long a[6];
+	unsigned long a[AUDITSC_ARGS];
 	unsigned long a0, a1;
 	int err;
 	unsigned int len;
@@ -2428,7 +2428,9 @@
 	if (copy_from_user(a, args, len))
 		return -EFAULT;
 
-	audit_socketcall(nargs[call] / sizeof(unsigned long), a);
+	err = audit_socketcall(nargs[call] / sizeof(unsigned long), a);
+	if (err)
+		return err;
 
 	a0 = a[0];
 	a1 = a[1];
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
index a764e22..7da6b45 100644
--- a/net/sunrpc/auth_gss/auth_gss.c
+++ b/net/sunrpc/auth_gss/auth_gss.c
@@ -867,8 +867,7 @@
 	err = -EINVAL;
 	gss_auth->mech = gss_mech_get_by_pseudoflavor(flavor);
 	if (!gss_auth->mech) {
-		printk(KERN_WARNING "%s: Pseudoflavor %d not found!\n",
-				__func__, flavor);
+		dprintk("RPC:       Pseudoflavor %d not found!\n", flavor);
 		goto err_free;
 	}
 	gss_auth->service = gss_pseudoflavor_to_service(gss_auth->mech, flavor);
diff --git a/net/sunrpc/auth_gss/gss_rpc_xdr.c b/net/sunrpc/auth_gss/gss_rpc_xdr.c
index 5c4c61d..357f613 100644
--- a/net/sunrpc/auth_gss/gss_rpc_xdr.c
+++ b/net/sunrpc/auth_gss/gss_rpc_xdr.c
@@ -21,16 +21,6 @@
 #include <linux/sunrpc/svcauth.h>
 #include "gss_rpc_xdr.h"
 
-static bool gssx_check_pointer(struct xdr_stream *xdr)
-{
-	__be32 *p;
-
-	p = xdr_reserve_space(xdr, 4);
-	if (unlikely(p == NULL))
-		return -ENOSPC;
-	return *p?true:false;
-}
-
 static int gssx_enc_bool(struct xdr_stream *xdr, int v)
 {
 	__be32 *p;
@@ -264,25 +254,27 @@
 	if (unlikely(p == NULL))
 		return -ENOSPC;
 	count = be32_to_cpup(p++);
-	if (count != 0) {
-		/* we recognize only 1 currently: CREDS_VALUE */
-		oa->count = 1;
+	if (!count)
+		return 0;
 
-		oa->data = kmalloc(sizeof(struct gssx_option), GFP_KERNEL);
-		if (!oa->data)
-			return -ENOMEM;
+	/* we recognize only 1 currently: CREDS_VALUE */
+	oa->count = 1;
 
-		creds = kmalloc(sizeof(struct svc_cred), GFP_KERNEL);
-		if (!creds) {
-			kfree(oa->data);
-			return -ENOMEM;
-		}
+	oa->data = kmalloc(sizeof(struct gssx_option), GFP_KERNEL);
+	if (!oa->data)
+		return -ENOMEM;
 
-		oa->data[0].option.data = CREDS_VALUE;
-		oa->data[0].option.len = sizeof(CREDS_VALUE);
-		oa->data[0].value.data = (void *)creds;
-		oa->data[0].value.len = 0;
+	creds = kmalloc(sizeof(struct svc_cred), GFP_KERNEL);
+	if (!creds) {
+		kfree(oa->data);
+		return -ENOMEM;
 	}
+
+	oa->data[0].option.data = CREDS_VALUE;
+	oa->data[0].option.len = sizeof(CREDS_VALUE);
+	oa->data[0].value.data = (void *)creds;
+	oa->data[0].value.len = 0;
+
 	for (i = 0; i < count; i++) {
 		gssx_buffer dummy = { 0, NULL };
 		u32 length;
@@ -800,6 +792,7 @@
 				struct xdr_stream *xdr,
 				struct gssx_res_accept_sec_context *res)
 {
+	u32 value_follows;
 	int err;
 
 	/* res->status */
@@ -808,7 +801,10 @@
 		return err;
 
 	/* res->context_handle */
-	if (gssx_check_pointer(xdr)) {
+	err = gssx_dec_bool(xdr, &value_follows);
+	if (err)
+		return err;
+	if (value_follows) {
 		err = gssx_dec_ctx(xdr, res->context_handle);
 		if (err)
 			return err;
@@ -817,7 +813,10 @@
 	}
 
 	/* res->output_token */
-	if (gssx_check_pointer(xdr)) {
+	err = gssx_dec_bool(xdr, &value_follows);
+	if (err)
+		return err;
+	if (value_follows) {
 		err = gssx_dec_buffer(xdr, res->output_token);
 		if (err)
 			return err;
@@ -826,7 +825,10 @@
 	}
 
 	/* res->delegated_cred_handle */
-	if (gssx_check_pointer(xdr)) {
+	err = gssx_dec_bool(xdr, &value_follows);
+	if (err)
+		return err;
+	if (value_follows) {
 		/* we do not support upcall servers sending this data. */
 		return -EINVAL;
 	}
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index 3f7930f..5a750b9c3 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -360,7 +360,7 @@
 
 	auth = rpcauth_create(args->authflavor, clnt);
 	if (IS_ERR(auth)) {
-		printk(KERN_INFO "RPC: Couldn't create auth handle (flavor %u)\n",
+		dprintk("RPC:       Couldn't create auth handle (flavor %u)\n",
 				args->authflavor);
 		err = PTR_ERR(auth);
 		goto out_no_auth;
diff --git a/security/keys/internal.h b/security/keys/internal.h
index 8bbefc3..d4f1468 100644
--- a/security/keys/internal.h
+++ b/security/keys/internal.h
@@ -16,6 +16,8 @@
 #include <linux/key-type.h>
 #include <linux/task_work.h>
 
+struct iovec;
+
 #ifdef __KDEBUG
 #define kenter(FMT, ...) \
 	printk(KERN_DEBUG "==> %s("FMT")\n", __func__, ##__VA_ARGS__)
diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c
index 4b5c948..33cfd27 100644
--- a/security/keys/keyctl.c
+++ b/security/keys/keyctl.c
@@ -22,6 +22,7 @@
 #include <linux/err.h>
 #include <linux/vmalloc.h>
 #include <linux/security.h>
+#include <linux/uio.h>
 #include <asm/uaccess.h>
 #include "internal.h"
 
diff --git a/sound/atmel/abdac.c b/sound/atmel/abdac.c
index 071ce1b..872d59e 100644
--- a/sound/atmel/abdac.c
+++ b/sound/atmel/abdac.c
@@ -583,8 +583,6 @@
 	free_irq(dac->irq, dac);
 	snd_card_free(card);
 
-	platform_set_drvdata(pdev, NULL);
-
 	return 0;
 }
 
diff --git a/sound/atmel/ac97c.c b/sound/atmel/ac97c.c
index 6b7e2b5..ae63d22 100644
--- a/sound/atmel/ac97c.c
+++ b/sound/atmel/ac97c.c
@@ -1199,8 +1199,6 @@
 	snd_card_set_dev(card, NULL);
 	snd_card_free(card);
 
-	platform_set_drvdata(pdev, NULL);
-
 	return 0;
 }
 
diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
index 23e3c46..ccfa383 100644
--- a/sound/core/pcm_native.c
+++ b/sound/core/pcm_native.c
@@ -25,7 +25,7 @@
 #include <linux/slab.h>
 #include <linux/time.h>
 #include <linux/pm_qos.h>
-#include <linux/uio.h>
+#include <linux/aio.h>
 #include <linux/dma-mapping.h>
 #include <sound/core.h>
 #include <sound/control.h>
diff --git a/sound/mips/hal2.c b/sound/mips/hal2.c
index 7420c59..2b7f6e8 100644
--- a/sound/mips/hal2.c
+++ b/sound/mips/hal2.c
@@ -922,7 +922,6 @@
 	struct snd_card *card = platform_get_drvdata(pdev);
 
 	snd_card_free(card);
-	platform_set_drvdata(pdev, NULL);
 	return 0;
 }
 
diff --git a/sound/mips/sgio2audio.c b/sound/mips/sgio2audio.c
index 01a03ef..cfe99ae 100644
--- a/sound/mips/sgio2audio.c
+++ b/sound/mips/sgio2audio.c
@@ -963,7 +963,6 @@
 	struct snd_card *card = platform_get_drvdata(pdev);
 
 	snd_card_free(card);
-	platform_set_drvdata(pdev, NULL);
 	return 0;
 }
 
diff --git a/sound/oss/Kconfig b/sound/oss/Kconfig
index 5849b12..51c4ba9 100644
--- a/sound/oss/Kconfig
+++ b/sound/oss/Kconfig
@@ -250,6 +250,7 @@
 menuconfig SOUND_OSS
 	tristate "OSS sound modules"
 	depends on ISA_DMA_API && VIRT_TO_BUS
+	depends on !ISA_DMA_SUPPORT_BROKEN
 	help
 	  OSS is the Open Sound System suite of sound card drivers.  They make
 	  sound programming easier since they provide a common API.  Say Y or
diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
index 6f9b647..55108b5 100644
--- a/sound/pci/hda/hda_codec.c
+++ b/sound/pci/hda/hda_codec.c
@@ -681,6 +681,9 @@
 	struct hda_bus_unsolicited *unsol;
 	unsigned int wp;
 
+	if (!bus || !bus->workq)
+		return 0;
+
 	trace_hda_unsol_event(bus, res, res_ex);
 	unsol = bus->unsol;
 	if (!unsol)
@@ -1580,7 +1583,7 @@
 		    "NID=0x%x, stream=0x%x, channel=%d, format=0x%x\n",
 		    nid, stream_tag, channel_id, format);
 	p = get_hda_cvt_setup(codec, nid);
-	if (!p || p->active)
+	if (!p)
 		return;
 
 	if (codec->pcm_format_first)
@@ -1627,7 +1630,7 @@
 
 	snd_printdd("hda_codec_cleanup_stream: NID=0x%x\n", nid);
 	p = get_hda_cvt_setup(codec, nid);
-	if (p && p->active) {
+	if (p) {
 		/* here we just clear the active flag when do_now isn't set;
 		 * actual clean-ups will be done later in
 		 * purify_inactive_streams() called from snd_hda_codec_prpapre()
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index 7b213d5..de18722 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -615,7 +615,7 @@
 /* quirks for Nvidia */
 #define AZX_DCAPS_PRESET_NVIDIA \
 	(AZX_DCAPS_NVIDIA_SNOOP | AZX_DCAPS_RIRB_DELAY | AZX_DCAPS_NO_MSI |\
-	 AZX_DCAPS_ALIGN_BUFSIZE)
+	 AZX_DCAPS_ALIGN_BUFSIZE | AZX_DCAPS_NO_64BIT)
 
 #define AZX_DCAPS_PRESET_CTHDA \
 	(AZX_DCAPS_NO_MSI | AZX_DCAPS_POSFIX_LPIB | AZX_DCAPS_4K_BDLE_BOUNDARY)
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
index 84b81c8..b314d3e 100644
--- a/sound/pci/hda/patch_conexant.c
+++ b/sound/pci/hda/patch_conexant.c
@@ -64,6 +64,7 @@
 	/* extra EAPD pins */
 	unsigned int num_eapds;
 	hda_nid_t eapds[4];
+	bool dynamic_eapd;
 
 #ifdef ENABLE_CXT_STATIC_QUIRKS
 	const struct snd_kcontrol_new *mixers[5];
@@ -3155,7 +3156,7 @@
 	 * thus it might control over all pins.
 	 */
 	if (spec->num_eapds > 2)
-		spec->gen.own_eapd_ctl = 1;
+		spec->dynamic_eapd = 1;
 }
 
 static void cx_auto_turn_eapd(struct hda_codec *codec, int num_pins,
@@ -3194,10 +3195,19 @@
 	return 0;
 }
 
+static int cx_auto_init(struct hda_codec *codec)
+{
+	struct conexant_spec *spec = codec->spec;
+	snd_hda_gen_init(codec);
+	if (!spec->dynamic_eapd)
+		cx_auto_turn_eapd(codec, spec->num_eapds, spec->eapds, true);
+	return 0;
+}
+
 static const struct hda_codec_ops cx_auto_patch_ops = {
 	.build_controls = cx_auto_build_controls,
 	.build_pcms = snd_hda_gen_build_pcms,
-	.init = snd_hda_gen_init,
+	.init = cx_auto_init,
 	.free = snd_hda_gen_free,
 	.unsol_event = snd_hda_jack_unsol_event,
 #ifdef CONFIG_PM
@@ -3348,7 +3358,8 @@
 
 	cx_auto_parse_beep(codec);
 	cx_auto_parse_eapd(codec);
-	if (spec->gen.own_eapd_ctl)
+	spec->gen.own_eapd_ctl = 1;
+	if (spec->dynamic_eapd)
 		spec->gen.vmaster_mute.hook = cx_auto_vmaster_hook;
 
 	switch (codec->vendor_id) {
diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
index 32930e6..e12f7a0 100644
--- a/sound/pci/hda/patch_hdmi.c
+++ b/sound/pci/hda/patch_hdmi.c
@@ -1832,12 +1832,10 @@
 #define INTEL_EN_ALL_PIN_CVTS	0x01 /* enable 2nd & 3rd pins and convertors */
 
 static void intel_haswell_enable_all_pins(struct hda_codec *codec,
-					const struct hda_fixup *fix, int action)
+					  bool update_tree)
 {
 	unsigned int vendor_param;
 
-	if (action != HDA_FIXUP_ACT_PRE_PROBE)
-		return;
 	vendor_param = snd_hda_codec_read(codec, INTEL_VENDOR_NID, 0,
 				INTEL_GET_VENDOR_VERB, 0);
 	if (vendor_param == -1 || vendor_param & INTEL_EN_ALL_PIN_CVTS)
@@ -1849,8 +1847,8 @@
 	if (vendor_param == -1)
 		return;
 
-	snd_hda_codec_update_widgets(codec);
-	return;
+	if (update_tree)
+		snd_hda_codec_update_widgets(codec);
 }
 
 static void intel_haswell_fixup_enable_dp12(struct hda_codec *codec)
@@ -1868,30 +1866,20 @@
 				INTEL_SET_VENDOR_VERB, vendor_param);
 }
 
+/* Haswell needs to re-issue the vendor-specific verbs before turning to D0.
+ * Otherwise you may get severe h/w communication errors.
+ */
+static void haswell_set_power_state(struct hda_codec *codec, hda_nid_t fg,
+				unsigned int power_state)
+{
+	if (power_state == AC_PWRST_D0) {
+		intel_haswell_enable_all_pins(codec, false);
+		intel_haswell_fixup_enable_dp12(codec);
+	}
 
-
-/* available models for fixup */
-enum {
-	INTEL_HASWELL,
-};
-
-static const struct hda_model_fixup hdmi_models[] = {
-	{.id = INTEL_HASWELL, .name = "Haswell"},
-	{}
-};
-
-static const struct snd_pci_quirk hdmi_fixup_tbl[] = {
-	SND_PCI_QUIRK(0x8086, 0x2010, "Haswell", INTEL_HASWELL),
-	{} /* terminator */
-};
-
-static const struct hda_fixup hdmi_fixups[] = {
-	[INTEL_HASWELL] = {
-		.type = HDA_FIXUP_FUNC,
-		.v.func = intel_haswell_enable_all_pins,
-	},
-};
-
+	snd_hda_codec_read(codec, fg, 0, AC_VERB_SET_POWER_STATE, power_state);
+	snd_hda_codec_set_power_to_all(codec, fg, power_state);
+}
 
 static int patch_generic_hdmi(struct hda_codec *codec)
 {
@@ -1904,11 +1892,10 @@
 	codec->spec = spec;
 	hdmi_array_init(spec, 4);
 
-	snd_hda_pick_fixup(codec, hdmi_models, hdmi_fixup_tbl, hdmi_fixups);
-	snd_hda_apply_fixup(codec, HDA_FIXUP_ACT_PRE_PROBE);
-
-	if (codec->vendor_id == 0x80862807)
+	if (codec->vendor_id == 0x80862807) {
+		intel_haswell_enable_all_pins(codec, true);
 		intel_haswell_fixup_enable_dp12(codec);
+	}
 
 	if (hdmi_parse_codec(codec) < 0) {
 		codec->spec = NULL;
@@ -1916,6 +1903,9 @@
 		return -EINVAL;
 	}
 	codec->patch_ops = generic_hdmi_patch_ops;
+	if (codec->vendor_id == 0x80862807)
+		codec->patch_ops.set_power_state = haswell_set_power_state;
+
 	generic_hdmi_init_per_pins(codec);
 
 	init_channel_allocations();
diff --git a/sound/soc/codecs/wm8994.c b/sound/soc/codecs/wm8994.c
index 14094f5..1eb152c 100644
--- a/sound/soc/codecs/wm8994.c
+++ b/sound/soc/codecs/wm8994.c
@@ -2882,6 +2882,7 @@
 		default:
 			return 0;
 		}
+		break;
 	default:
 		return 0;
 	}
diff --git a/sound/soc/davinci/davinci-mcasp.c b/sound/soc/davinci/davinci-mcasp.c
index 8b85049..56ecfc7 100644
--- a/sound/soc/davinci/davinci-mcasp.c
+++ b/sound/soc/davinci/davinci-mcasp.c
@@ -505,7 +505,10 @@
 		mcasp_set_bits(base + DAVINCI_MCASP_ACLKRCTL_REG, ACLKRE);
 		mcasp_set_bits(base + DAVINCI_MCASP_RXFMCTL_REG, AFSRE);
 
-		mcasp_set_bits(base + DAVINCI_MCASP_PDIR_REG, ACLKX | AFSX);
+		mcasp_set_bits(base + DAVINCI_MCASP_PDIR_REG,
+				ACLKX | ACLKR);
+		mcasp_set_bits(base + DAVINCI_MCASP_PDIR_REG,
+				AFSX | AFSR);
 		break;
 	case SND_SOC_DAIFMT_CBM_CFS:
 		/* codec is clock master and frame slave */
@@ -565,7 +568,7 @@
 		mcasp_set_bits(base + DAVINCI_MCASP_ACLKXCTL_REG, ACLKXPOL);
 		mcasp_clr_bits(base + DAVINCI_MCASP_TXFMCTL_REG, FSXPOL);
 
-		mcasp_clr_bits(base + DAVINCI_MCASP_ACLKRCTL_REG, ACLKRPOL);
+		mcasp_set_bits(base + DAVINCI_MCASP_ACLKRCTL_REG, ACLKRPOL);
 		mcasp_clr_bits(base + DAVINCI_MCASP_RXFMCTL_REG, FSRPOL);
 		break;
 
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
index 21779a6..a80c883 100644
--- a/sound/soc/soc-dapm.c
+++ b/sound/soc/soc-dapm.c
@@ -1095,9 +1095,9 @@
 
 #ifdef CONFIG_HAVE_CLK
 	if (SND_SOC_DAPM_EVENT_ON(event)) {
-		return clk_enable(w->clk);
+		return clk_prepare_enable(w->clk);
 	} else {
-		clk_disable(w->clk);
+		clk_disable_unprepare(w->clk);
 		return 0;
 	}
 #endif
diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c
index 321e066..9e9d348 100644
--- a/tools/power/x86/turbostat/turbostat.c
+++ b/tools/power/x86/turbostat/turbostat.c
@@ -46,6 +46,7 @@
 unsigned int skip_c1;
 unsigned int do_nhm_cstates;
 unsigned int do_snb_cstates;
+unsigned int do_c8_c9_c10;
 unsigned int has_aperf;
 unsigned int has_epb;
 unsigned int units = 1000000000;	/* Ghz etc */
@@ -120,6 +121,9 @@
 	unsigned long long pc3;
 	unsigned long long pc6;
 	unsigned long long pc7;
+	unsigned long long pc8;
+	unsigned long long pc9;
+	unsigned long long pc10;
 	unsigned int package_id;
 	unsigned int energy_pkg;	/* MSR_PKG_ENERGY_STATUS */
 	unsigned int energy_dram;	/* MSR_DRAM_ENERGY_STATUS */
@@ -282,6 +286,11 @@
 		outp += sprintf(outp, "   %%pc6");
 	if (do_snb_cstates)
 		outp += sprintf(outp, "   %%pc7");
+	if (do_c8_c9_c10) {
+		outp += sprintf(outp, "   %%pc8");
+		outp += sprintf(outp, "   %%pc9");
+		outp += sprintf(outp, "  %%pc10");
+	}
 
 	if (do_rapl & RAPL_PKG)
 		outp += sprintf(outp, "  Pkg_W");
@@ -336,6 +345,9 @@
 		fprintf(stderr, "pc3: %016llX\n", p->pc3);
 		fprintf(stderr, "pc6: %016llX\n", p->pc6);
 		fprintf(stderr, "pc7: %016llX\n", p->pc7);
+		fprintf(stderr, "pc8: %016llX\n", p->pc8);
+		fprintf(stderr, "pc9: %016llX\n", p->pc9);
+		fprintf(stderr, "pc10: %016llX\n", p->pc10);
 		fprintf(stderr, "Joules PKG: %0X\n", p->energy_pkg);
 		fprintf(stderr, "Joules COR: %0X\n", p->energy_cores);
 		fprintf(stderr, "Joules GFX: %0X\n", p->energy_gfx);
@@ -493,6 +505,11 @@
 		outp += sprintf(outp, " %6.2f", 100.0 * p->pc6/t->tsc);
 	if (do_snb_cstates)
 		outp += sprintf(outp, " %6.2f", 100.0 * p->pc7/t->tsc);
+	if (do_c8_c9_c10) {
+		outp += sprintf(outp, " %6.2f", 100.0 * p->pc8/t->tsc);
+		outp += sprintf(outp, " %6.2f", 100.0 * p->pc9/t->tsc);
+		outp += sprintf(outp, " %6.2f", 100.0 * p->pc10/t->tsc);
+	}
 
 	/*
  	 * If measurement interval exceeds minimum RAPL Joule Counter range,
@@ -569,6 +586,9 @@
 	old->pc3 = new->pc3 - old->pc3;
 	old->pc6 = new->pc6 - old->pc6;
 	old->pc7 = new->pc7 - old->pc7;
+	old->pc8 = new->pc8 - old->pc8;
+	old->pc9 = new->pc9 - old->pc9;
+	old->pc10 = new->pc10 - old->pc10;
 	old->pkg_temp_c = new->pkg_temp_c;
 
 	DELTA_WRAP32(new->energy_pkg, old->energy_pkg);
@@ -702,6 +722,9 @@
 	p->pc3 = 0;
 	p->pc6 = 0;
 	p->pc7 = 0;
+	p->pc8 = 0;
+	p->pc9 = 0;
+	p->pc10 = 0;
 
 	p->energy_pkg = 0;
 	p->energy_dram = 0;
@@ -740,6 +763,9 @@
 	average.packages.pc3 += p->pc3;
 	average.packages.pc6 += p->pc6;
 	average.packages.pc7 += p->pc7;
+	average.packages.pc8 += p->pc8;
+	average.packages.pc9 += p->pc9;
+	average.packages.pc10 += p->pc10;
 
 	average.packages.energy_pkg += p->energy_pkg;
 	average.packages.energy_dram += p->energy_dram;
@@ -781,6 +807,10 @@
 	average.packages.pc3 /= topo.num_packages;
 	average.packages.pc6 /= topo.num_packages;
 	average.packages.pc7 /= topo.num_packages;
+
+	average.packages.pc8 /= topo.num_packages;
+	average.packages.pc9 /= topo.num_packages;
+	average.packages.pc10 /= topo.num_packages;
 }
 
 static unsigned long long rdtsc(void)
@@ -880,6 +910,14 @@
 		if (get_msr(cpu, MSR_PKG_C7_RESIDENCY, &p->pc7))
 			return -12;
 	}
+	if (do_c8_c9_c10) {
+		if (get_msr(cpu, MSR_PKG_C8_RESIDENCY, &p->pc8))
+			return -13;
+		if (get_msr(cpu, MSR_PKG_C9_RESIDENCY, &p->pc9))
+			return -13;
+		if (get_msr(cpu, MSR_PKG_C10_RESIDENCY, &p->pc10))
+			return -13;
+	}
 	if (do_rapl & RAPL_PKG) {
 		if (get_msr(cpu, MSR_PKG_ENERGY_STATUS, &msr))
 			return -13;
@@ -1762,6 +1800,19 @@
 	return 0;
 }
 
+int has_c8_c9_c10(unsigned int family, unsigned int model)
+{
+	if (!genuine_intel)
+		return 0;
+
+	switch (model) {
+	case 0x45:
+		return 1;
+	}
+	return 0;
+}
+
+
 double discover_bclk(unsigned int family, unsigned int model)
 {
 	if (is_snb(family, model))
@@ -1918,6 +1969,7 @@
 	do_nhm_cstates = genuine_intel;	/* all Intel w/ non-stop TSC have NHM counters */
 	do_smi = do_nhm_cstates;
 	do_snb_cstates = is_snb(family, model);
+	do_c8_c9_c10 = has_c8_c9_c10(family, model);
 	bclk = discover_bclk(family, model);
 
 	do_nehalem_turbo_ratio_limit = has_nehalem_turbo_ratio_limit(family, model);
@@ -2279,7 +2331,7 @@
 	cmdline(argc, argv);
 
 	if (verbose)
-		fprintf(stderr, "turbostat v3.3 March 15, 2013"
+		fprintf(stderr, "turbostat v3.4 April 17, 2013"
 			" - Len Brown <lenb@kernel.org>\n");
 
 	turbostat_init();
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 45f0936..302681c 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -1978,7 +1978,7 @@
 	if (vcpu->kvm->mm != current->mm)
 		return -EIO;
 
-#if defined(CONFIG_S390) || defined(CONFIG_PPC)
+#if defined(CONFIG_S390) || defined(CONFIG_PPC) || defined(CONFIG_MIPS)
 	/*
 	 * Special cases: vcpu ioctls that are asynchronous to vcpu execution,
 	 * so vcpu_load() would break it.
@@ -3105,13 +3105,21 @@
 	int r;
 	int cpu;
 
-	r = kvm_irqfd_init();
-	if (r)
-		goto out_irqfd;
 	r = kvm_arch_init(opaque);
 	if (r)
 		goto out_fail;
 
+	/*
+	 * kvm_arch_init makes sure there's at most one caller
+	 * for architectures that support multiple implementations,
+	 * like intel and amd on x86.
+	 * kvm_arch_init must be called before kvm_irqfd_init to avoid creating
+	 * conflicts in case kvm is already setup for another implementation.
+	 */
+	r = kvm_irqfd_init();
+	if (r)
+		goto out_irqfd;
+
 	if (!zalloc_cpumask_var(&cpus_hardware_enabled, GFP_KERNEL)) {
 		r = -ENOMEM;
 		goto out_free_0;
@@ -3186,10 +3194,10 @@
 out_free_0a:
 	free_cpumask_var(cpus_hardware_enabled);
 out_free_0:
-	kvm_arch_exit();
-out_fail:
 	kvm_irqfd_exit();
 out_irqfd:
+	kvm_arch_exit();
+out_fail:
 	return r;
 }
 EXPORT_SYMBOL_GPL(kvm_init);