Merge git://git.kernel.org/pub/scm/linux/kernel/git/davej/cpufreq

* git://git.kernel.org/pub/scm/linux/kernel/git/davej/cpufreq:
  [CPUFREQ] state info wrong after resume
  [CPUFREQ] allow use of the powersave governor as the default one
  [CPUFREQ] document the currently undocumented parts of the sysfs interface
  [CPUFREQ] expose cpufreq coordination requirements regardless of coordination mechanism
diff --git a/Documentation/DMA-API.txt b/Documentation/DMA-API.txt
index b939ebb..80d1504 100644
--- a/Documentation/DMA-API.txt
+++ b/Documentation/DMA-API.txt
@@ -145,7 +145,7 @@
 int
 dma_supported(struct device *dev, u64 mask)
 int
-pci_dma_supported(struct device *dev, u64 mask)
+pci_dma_supported(struct pci_dev *hwdev, u64 mask)
 
 Checks to see if the device can support DMA to the memory described by
 mask.
@@ -189,7 +189,7 @@
 dma_map_single(struct device *dev, void *cpu_addr, size_t size,
 		      enum dma_data_direction direction)
 dma_addr_t
-pci_map_single(struct device *dev, void *cpu_addr, size_t size,
+pci_map_single(struct pci_dev *hwdev, void *cpu_addr, size_t size,
 		      int direction)
 
 Maps a piece of processor virtual memory so it can be accessed by the
@@ -395,6 +395,71 @@
 
 See also dma_map_single().
 
+dma_addr_t
+dma_map_single_attrs(struct device *dev, void *cpu_addr, size_t size,
+		     enum dma_data_direction dir,
+		     struct dma_attrs *attrs)
+
+void
+dma_unmap_single_attrs(struct device *dev, dma_addr_t dma_addr,
+		       size_t size, enum dma_data_direction dir,
+		       struct dma_attrs *attrs)
+
+int
+dma_map_sg_attrs(struct device *dev, struct scatterlist *sgl,
+		 int nents, enum dma_data_direction dir,
+		 struct dma_attrs *attrs)
+
+void
+dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sgl,
+		   int nents, enum dma_data_direction dir,
+		   struct dma_attrs *attrs)
+
+The four functions above are just like the counterpart functions
+without the _attrs suffixes, except that they pass an optional
+struct dma_attrs*.
+
+struct dma_attrs encapsulates a set of "dma attributes". For the
+definition of struct dma_attrs see linux/dma-attrs.h.
+
+The interpretation of dma attributes is architecture-specific, and
+each attribute should be documented in Documentation/DMA-attributes.txt.
+
+If struct dma_attrs* is NULL, the semantics of each of these
+functions is identical to those of the corresponding function
+without the _attrs suffix. As a result dma_map_single_attrs()
+can generally replace dma_map_single(), etc.
+
+As an example of the use of the *_attrs functions, here's how
+you could pass an attribute DMA_ATTR_FOO when mapping memory
+for DMA:
+
+#include <linux/dma-attrs.h>
+/* DMA_ATTR_FOO should be defined in linux/dma-attrs.h and
+ * documented in Documentation/DMA-attributes.txt */
+...
+
+	DEFINE_DMA_ATTRS(attrs);
+	dma_set_attr(DMA_ATTR_FOO, &attrs);
+	....
+	n = dma_map_sg_attrs(dev, sg, nents, DMA_TO_DEVICE, &attr);
+	....
+
+Architectures that care about DMA_ATTR_FOO would check for its
+presence in their implementations of the mapping and unmapping
+routines, e.g.:
+
+void whizco_dma_map_sg_attrs(struct device *dev, dma_addr_t dma_addr,
+			     size_t size, enum dma_data_direction dir,
+			     struct dma_attrs *attrs)
+{
+	....
+	int foo =  dma_get_attr(DMA_ATTR_FOO, attrs);
+	....
+	if (foo)
+		/* twizzle the frobnozzle */
+	....
+
 
 Part II - Advanced dma_ usage
 -----------------------------
diff --git a/Documentation/DMA-attributes.txt b/Documentation/DMA-attributes.txt
new file mode 100644
index 0000000..6d772f8
--- /dev/null
+++ b/Documentation/DMA-attributes.txt
@@ -0,0 +1,24 @@
+			DMA attributes
+			==============
+
+This document describes the semantics of the DMA attributes that are
+defined in linux/dma-attrs.h.
+
+DMA_ATTR_WRITE_BARRIER
+----------------------
+
+DMA_ATTR_WRITE_BARRIER is a (write) barrier attribute for DMA.  DMA
+to a memory region with the DMA_ATTR_WRITE_BARRIER attribute forces
+all pending DMA writes to complete, and thus provides a mechanism to
+strictly order DMA from a device across all intervening busses and
+bridges.  This barrier is not specific to a particular type of
+interconnect, it applies to the system as a whole, and so its
+implementation must account for the idiosyncracies of the system all
+the way from the DMA device to memory.
+
+As an example of a situation where DMA_ATTR_WRITE_BARRIER would be
+useful, suppose that a device does a DMA write to indicate that data is
+ready and available in memory.  The DMA of the "completion indication"
+could race with data DMA.  Mapping the memory used for completion
+indications with DMA_ATTR_WRITE_BARRIER would prevent the race.
+
diff --git a/Documentation/cgroups.txt b/Documentation/cgroups.txt
index 31d12e2..c298a66 100644
--- a/Documentation/cgroups.txt
+++ b/Documentation/cgroups.txt
@@ -500,8 +500,7 @@
 
 void fork(struct cgroup_subsy *ss, struct task_struct *task)
 
-Called when a task is forked into a cgroup. Also called during
-registration for all existing tasks.
+Called when a task is forked into a cgroup.
 
 void exit(struct cgroup_subsys *ss, struct task_struct *task)
 
diff --git a/Documentation/controllers/devices.txt b/Documentation/controllers/devices.txt
new file mode 100644
index 0000000..4dcea42
--- /dev/null
+++ b/Documentation/controllers/devices.txt
@@ -0,0 +1,48 @@
+Device Whitelist Controller
+
+1. Description:
+
+Implement a cgroup to track and enforce open and mknod restrictions
+on device files.  A device cgroup associates a device access
+whitelist with each cgroup.  A whitelist entry has 4 fields.
+'type' is a (all), c (char), or b (block).  'all' means it applies
+to all types and all major and minor numbers.  Major and minor are
+either an integer or * for all.  Access is a composition of r
+(read), w (write), and m (mknod).
+
+The root device cgroup starts with rwm to 'all'.  A child device
+cgroup gets a copy of the parent.  Administrators can then remove
+devices from the whitelist or add new entries.  A child cgroup can
+never receive a device access which is denied its parent.  However
+when a device access is removed from a parent it will not also be
+removed from the child(ren).
+
+2. User Interface
+
+An entry is added using devices.allow, and removed using
+devices.deny.  For instance
+
+	echo 'c 1:3 mr' > /cgroups/1/devices.allow
+
+allows cgroup 1 to read and mknod the device usually known as
+/dev/null.  Doing
+
+	echo a > /cgroups/1/devices.deny
+
+will remove the default 'a *:* mrw' entry.
+
+3. Security
+
+Any task can move itself between cgroups.  This clearly won't
+suffice, but we can decide the best way to adequately restrict
+movement as people get some experience with this.  We may just want
+to require CAP_SYS_ADMIN, which at least is a separate bit from
+CAP_MKNOD.  We may want to just refuse moving to a cgroup which
+isn't a descendent of the current one.  Or we may want to use
+CAP_MAC_ADMIN, since we really are trying to lock down root.
+
+CAP_SYS_ADMIN is needed to modify the whitelist or move another
+task to a new cgroup.  (Again we'll probably want to change that).
+
+A cgroup may not be granted more permissions than the cgroup's
+parent has.
diff --git a/Documentation/controllers/resource_counter.txt b/Documentation/controllers/resource_counter.txt
new file mode 100644
index 0000000..f196ac1
--- /dev/null
+++ b/Documentation/controllers/resource_counter.txt
@@ -0,0 +1,181 @@
+
+		The Resource Counter
+
+The resource counter, declared at include/linux/res_counter.h,
+is supposed to facilitate the resource management by controllers
+by providing common stuff for accounting.
+
+This "stuff" includes the res_counter structure and routines
+to work with it.
+
+
+
+1. Crucial parts of the res_counter structure
+
+ a. unsigned long long usage
+
+ 	The usage value shows the amount of a resource that is consumed
+	by a group at a given time. The units of measurement should be
+	determined by the controller that uses this counter. E.g. it can
+	be bytes, items or any other unit the controller operates on.
+
+ b. unsigned long long max_usage
+
+ 	The maximal value of the usage over time.
+
+ 	This value is useful when gathering statistical information about
+	the particular group, as it shows the actual resource requirements
+	for a particular group, not just some usage snapshot.
+
+ c. unsigned long long limit
+
+ 	The maximal allowed amount of resource to consume by the group. In
+	case the group requests for more resources, so that the usage value
+	would exceed the limit, the resource allocation is rejected (see
+	the next section).
+
+ d. unsigned long long failcnt
+
+ 	The failcnt stands for "failures counter". This is the number of
+	resource allocation attempts that failed.
+
+ c. spinlock_t lock
+
+ 	Protects changes of the above values.
+
+
+
+2. Basic accounting routines
+
+ a. void res_counter_init(struct res_counter *rc)
+
+ 	Initializes the resource counter. As usual, should be the first
+	routine called for a new counter.
+
+ b. int res_counter_charge[_locked]
+			(struct res_counter *rc, unsigned long val)
+
+	When a resource is about to be allocated it has to be accounted
+	with the appropriate resource counter (controller should determine
+	which one to use on its own). This operation is called "charging".
+
+	This is not very important which operation - resource allocation
+	or charging - is performed first, but
+	  * if the allocation is performed first, this may create a
+	    temporary resource over-usage by the time resource counter is
+	    charged;
+	  * if the charging is performed first, then it should be uncharged
+	    on error path (if the one is called).
+
+ c. void res_counter_uncharge[_locked]
+			(struct res_counter *rc, unsigned long val)
+
+	When a resource is released (freed) it should be de-accounted
+	from the resource counter it was accounted to.  This is called
+	"uncharging".
+
+    The _locked routines imply that the res_counter->lock is taken.
+
+
+ 2.1 Other accounting routines
+
+    There are more routines that may help you with common needs, like
+    checking whether the limit is reached or resetting the max_usage
+    value. They are all declared in include/linux/res_counter.h.
+
+
+
+3. Analyzing the resource counter registrations
+
+ a. If the failcnt value constantly grows, this means that the counter's
+    limit is too tight. Either the group is misbehaving and consumes too
+    many resources, or the configuration is not suitable for the group
+    and the limit should be increased.
+
+ b. The max_usage value can be used to quickly tune the group. One may
+    set the limits to maximal values and either load the container with
+    a common pattern or leave one for a while. After this the max_usage
+    value shows the amount of memory the container would require during
+    its common activity.
+
+    Setting the limit a bit above this value gives a pretty good
+    configuration that works in most of the cases.
+
+ c. If the max_usage is much less than the limit, but the failcnt value
+    is growing, then the group tries to allocate a big chunk of resource
+    at once.
+
+ d. If the max_usage is much less than the limit, but the failcnt value
+    is 0, then this group is given too high limit, that it does not
+    require. It is better to lower the limit a bit leaving more resource
+    for other groups.
+
+
+
+4. Communication with the control groups subsystem (cgroups)
+
+All the resource controllers that are using cgroups and resource counters
+should provide files (in the cgroup filesystem) to work with the resource
+counter fields. They are recommended to adhere to the following rules:
+
+ a. File names
+
+ 	Field name	File name
+	---------------------------------------------------
+	usage		usage_in_<unit_of_measurement>
+	max_usage	max_usage_in_<unit_of_measurement>
+	limit		limit_in_<unit_of_measurement>
+	failcnt		failcnt
+	lock		no file :)
+
+ b. Reading from file should show the corresponding field value in the
+    appropriate format.
+
+ c. Writing to file
+
+ 	Field		Expected behavior
+	----------------------------------
+	usage		prohibited
+	max_usage	reset to usage
+	limit		set the limit
+	failcnt		reset to zero
+
+
+
+5. Usage example
+
+ a. Declare a task group (take a look at cgroups subsystem for this) and
+    fold a res_counter into it
+
+	struct my_group {
+		struct res_counter res;
+
+		<other fields>
+	}
+
+ b. Put hooks in resource allocation/release paths
+
+ 	int alloc_something(...)
+	{
+		if (res_counter_charge(res_counter_ptr, amount) < 0)
+			return -ENOMEM;
+
+		<allocate the resource and return to the caller>
+	}
+
+	void release_something(...)
+	{
+		res_counter_uncharge(res_counter_ptr, amount);
+
+		<release the resource>
+	}
+
+    In order to keep the usage value self-consistent, both the
+    "res_counter_ptr" and the "amount" in release_something() should be
+    the same as they were in the alloc_something() when the releasing
+    resource was allocated.
+
+ c. Provide the way to read res_counter values and set them (the cgroups
+    still can help with it).
+
+ c. Compile and run :)
diff --git a/Documentation/cpusets.txt b/Documentation/cpusets.txt
index aa854b9..fb7b361 100644
--- a/Documentation/cpusets.txt
+++ b/Documentation/cpusets.txt
@@ -171,6 +171,7 @@
  - memory_migrate flag: if set, move pages to cpusets nodes
  - cpu_exclusive flag: is cpu placement exclusive?
  - mem_exclusive flag: is memory placement exclusive?
+ - mem_hardwall flag:  is memory allocation hardwalled
  - memory_pressure: measure of how much paging pressure in cpuset
 
 In addition, the root cpuset only has the following file:
@@ -222,17 +223,18 @@
 a direct ancestor or descendent, may share any of the same CPUs or
 Memory Nodes.
 
-A cpuset that is mem_exclusive restricts kernel allocations for
-page, buffer and other data commonly shared by the kernel across
-multiple users.  All cpusets, whether mem_exclusive or not, restrict
-allocations of memory for user space.  This enables configuring a
-system so that several independent jobs can share common kernel data,
-such as file system pages, while isolating each jobs user allocation in
-its own cpuset.  To do this, construct a large mem_exclusive cpuset to
-hold all the jobs, and construct child, non-mem_exclusive cpusets for
-each individual job.  Only a small amount of typical kernel memory,
-such as requests from interrupt handlers, is allowed to be taken
-outside even a mem_exclusive cpuset.
+A cpuset that is mem_exclusive *or* mem_hardwall is "hardwalled",
+i.e. it restricts kernel allocations for page, buffer and other data
+commonly shared by the kernel across multiple users.  All cpusets,
+whether hardwalled or not, restrict allocations of memory for user
+space.  This enables configuring a system so that several independent
+jobs can share common kernel data, such as file system pages, while
+isolating each job's user allocation in its own cpuset.  To do this,
+construct a large mem_exclusive cpuset to hold all the jobs, and
+construct child, non-mem_exclusive cpusets for each individual job.
+Only a small amount of typical kernel memory, such as requests from
+interrupt handlers, is allowed to be taken outside even a
+mem_exclusive cpuset.
 
 
 1.5 What is memory_pressure ?
@@ -707,7 +709,7 @@
 
 In this directory you can find several files:
 # ls
-cpus  cpu_exclusive  mems  mem_exclusive  tasks
+cpus  cpu_exclusive  mems  mem_exclusive mem_hardwall  tasks
 
 Reading them will give you information about the state of this cpuset:
 the CPUs and Memory Nodes it can use, the processes that are using
diff --git a/Documentation/dontdiff b/Documentation/dontdiff
index 354aec0..881e6dd 100644
--- a/Documentation/dontdiff
+++ b/Documentation/dontdiff
@@ -141,6 +141,7 @@
 mktables
 mktree
 modpost
+modules.order
 modversions.h*
 offset.h
 offsets.h
@@ -171,6 +172,7 @@
 split-include
 tags
 tftpboot.img
+timeconst.h
 times.h*
 tkparse
 trix_boot.h
diff --git a/Documentation/kbuild/kconfig-language.txt b/Documentation/kbuild/kconfig-language.txt
index 649cb87..00b950d 100644
--- a/Documentation/kbuild/kconfig-language.txt
+++ b/Documentation/kbuild/kconfig-language.txt
@@ -104,14 +104,15 @@
   Reverse dependencies can only be used with boolean or tristate
   symbols.
   Note:
-	select is evil.... select will by brute force set a symbol
-	equal to 'y' without visiting the dependencies. So abusing
-	select you are able to select a symbol FOO even if FOO depends
-	on BAR that is not set. In general use select only for
-	non-visible symbols (no prompts anywhere) and for symbols with
-	no dependencies. That will limit the usefulness but on the
-	other hand avoid the illegal configurations all over. kconfig
-	should one day warn about such things.
+	select should be used with care. select will force
+	a symbol to a value without visiting the dependencies.
+	By abusing select you are able to select a symbol FOO even
+	if FOO depends on BAR that is not set.
+	In general use select only for non-visible symbols
+	(no prompts anywhere) and for symbols with no dependencies.
+	That will limit the usefulness but on the other hand avoid
+	the illegal configurations all over.
+	kconfig should one day warn about such things.
 
 - numerical ranges: "range" <symbol> <symbol> ["if" <expr>]
   This allows to limit the range of possible input values for int
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index e5f3d91..3ce193f 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -627,8 +627,7 @@
 	eata=		[HW,SCSI]
 
 	edd=		[EDD]
-			Format: {"of[f]" | "sk[ipmbr]"}
-			See comment in arch/i386/boot/edd.S
+			Format: {"off" | "on" | "skip[mbr]"}
 
 	eisa_irq_edge=	[PARISC,HW]
 			See header of drivers/parisc/eisa.c.
@@ -1389,6 +1388,13 @@
 
 	nr_uarts=	[SERIAL] maximum number of UARTs to be registered.
 
+	olpc_ec_timeout= [OLPC] ms delay when issuing EC commands
+			Rather than timing out after 20 ms if an EC
+			command is not properly ACKed, override the length
+			of the timeout.  We have interrupts disabled while
+			waiting for the ACK, so if this is set too high
+			interrupts *may* be lost!
+
 	opl3=		[HW,OSS]
 			Format: <io>
 
diff --git a/Documentation/keys-request-key.txt b/Documentation/keys-request-key.txt
index 266955d..09b55e4 100644
--- a/Documentation/keys-request-key.txt
+++ b/Documentation/keys-request-key.txt
@@ -11,26 +11,29 @@
 
 	struct key *request_key(const struct key_type *type,
 				const char *description,
-				const char *callout_string);
+				const char *callout_info);
 
 or:
 
 	struct key *request_key_with_auxdata(const struct key_type *type,
 					     const char *description,
-					     const char *callout_string,
+					     const char *callout_info,
+					     size_t callout_len,
 					     void *aux);
 
 or:
 
 	struct key *request_key_async(const struct key_type *type,
 				      const char *description,
-				      const char *callout_string);
+				      const char *callout_info,
+				      size_t callout_len);
 
 or:
 
 	struct key *request_key_async_with_auxdata(const struct key_type *type,
 						   const char *description,
-						   const char *callout_string,
+						   const char *callout_info,
+					     	   size_t callout_len,
 						   void *aux);
 
 Or by userspace invoking the request_key system call:
diff --git a/Documentation/keys.txt b/Documentation/keys.txt
index 51652d3..d5c7a57d 100644
--- a/Documentation/keys.txt
+++ b/Documentation/keys.txt
@@ -170,7 +170,8 @@
      amount of description and payload space that can be consumed.
 
      The user can view information on this and other statistics through procfs
-     files.
+     files.  The root user may also alter the quota limits through sysctl files
+     (see the section "New procfs files").
 
      Process-specific and thread-specific keyrings are not counted towards a
      user's quota.
@@ -329,6 +330,27 @@
 	<bytes>/<max>		Key size quota
 
 
+Four new sysctl files have been added also for the purpose of controlling the
+quota limits on keys:
+
+ (*) /proc/sys/kernel/keys/root_maxkeys
+     /proc/sys/kernel/keys/root_maxbytes
+
+     These files hold the maximum number of keys that root may have and the
+     maximum total number of bytes of data that root may have stored in those
+     keys.
+
+ (*) /proc/sys/kernel/keys/maxkeys
+     /proc/sys/kernel/keys/maxbytes
+
+     These files hold the maximum number of keys that each non-root user may
+     have and the maximum total number of bytes of data that each of those
+     users may have stored in their keys.
+
+Root may alter these by writing each new limit as a decimal number string to
+the appropriate file.
+
+
 ===============================
 USERSPACE SYSTEM CALL INTERFACE
 ===============================
@@ -711,6 +733,27 @@
      The assumed authoritative key is inherited across fork and exec.
 
 
+ (*) Get the LSM security context attached to a key.
+
+	long keyctl(KEYCTL_GET_SECURITY, key_serial_t key, char *buffer,
+		    size_t buflen)
+
+     This function returns a string that represents the LSM security context
+     attached to a key in the buffer provided.
+
+     Unless there's an error, it always returns the amount of data it could
+     produce, even if that's too big for the buffer, but it won't copy more
+     than requested to userspace. If the buffer pointer is NULL then no copy
+     will take place.
+
+     A NUL character is included at the end of the string if the buffer is
+     sufficiently big.  This is included in the returned count.  If no LSM is
+     in force then an empty string will be returned.
+
+     A process must have view permission on the key for this function to be
+     successful.
+
+
 ===============
 KERNEL SERVICES
 ===============
@@ -771,7 +814,7 @@
 
 	struct key *request_key(const struct key_type *type,
 				const char *description,
-				const char *callout_string);
+				const char *callout_info);
 
     This is used to request a key or keyring with a description that matches
     the description specified according to the key type's match function. This
@@ -793,24 +836,28 @@
 
 	struct key *request_key_with_auxdata(const struct key_type *type,
 					     const char *description,
-					     const char *callout_string,
+					     const void *callout_info,
+					     size_t callout_len,
 					     void *aux);
 
     This is identical to request_key(), except that the auxiliary data is
-    passed to the key_type->request_key() op if it exists.
+    passed to the key_type->request_key() op if it exists, and the callout_info
+    is a blob of length callout_len, if given (the length may be 0).
 
 
 (*) A key can be requested asynchronously by calling one of:
 
 	struct key *request_key_async(const struct key_type *type,
 				      const char *description,
-				      const char *callout_string);
+				      const void *callout_info,
+				      size_t callout_len);
 
     or:
 
 	struct key *request_key_async_with_auxdata(const struct key_type *type,
 						   const char *description,
-						   const char *callout_string,
+						   const char *callout_info,
+					     	   size_t callout_len,
 					     	   void *aux);
 
     which are asynchronous equivalents of request_key() and
diff --git a/Documentation/oops-tracing.txt b/Documentation/oops-tracing.txt
index 7f60dfe..b152e81 100644
--- a/Documentation/oops-tracing.txt
+++ b/Documentation/oops-tracing.txt
@@ -253,6 +253,10 @@
 
   8: 'D' if the kernel has died recently, i.e. there was an OOPS or BUG.
 
+  9: 'A' if the ACPI table has been overridden.
+
+ 10: 'W' if a warning has previously been issued by the kernel.
+
 The primary reason for the 'Tainted: ' string is to tell kernel
 debuggers if this is a clean kernel or if anything unusual has
 occurred.  Tainting is permanent: even if an offending module is
diff --git a/Documentation/sysrq.txt b/Documentation/sysrq.txt
index 10c8f69..5ce0952 100644
--- a/Documentation/sysrq.txt
+++ b/Documentation/sysrq.txt
@@ -85,6 +85,8 @@
 'k'     - Secure Access Key (SAK) Kills all programs on the current virtual
           console. NOTE: See important comments below in SAK section.
 
+'l'     - Shows a stack backtrace for all active CPUs.
+
 'm'     - Will dump current memory info to your console.
 
 'n'	- Used to make RT tasks nice-able
diff --git a/Documentation/vm/slabinfo.c b/Documentation/vm/slabinfo.c
index 22d7e3e..d3ce295 100644
--- a/Documentation/vm/slabinfo.c
+++ b/Documentation/vm/slabinfo.c
@@ -31,7 +31,7 @@
 	int hwcache_align, object_size, objs_per_slab;
 	int sanity_checks, slab_size, store_user, trace;
 	int order, poison, reclaim_account, red_zone;
-	unsigned long partial, objects, slabs;
+	unsigned long partial, objects, slabs, objects_partial, objects_total;
 	unsigned long alloc_fastpath, alloc_slowpath;
 	unsigned long free_fastpath, free_slowpath;
 	unsigned long free_frozen, free_add_partial, free_remove_partial;
@@ -540,7 +540,8 @@
 		return;
 
 	store_size(size_str, slab_size(s));
-	snprintf(dist_str, 40, "%lu/%lu/%d", s->slabs, s->partial, s->cpu_slabs);
+	snprintf(dist_str, 40, "%lu/%lu/%d", s->slabs - s->cpu_slabs,
+						s->partial, s->cpu_slabs);
 
 	if (!line++)
 		first_line();
@@ -776,7 +777,6 @@
 		unsigned long used;
 		unsigned long long wasted;
 		unsigned long long objwaste;
-		long long objects_in_partial_slabs;
 		unsigned long percentage_partial_slabs;
 		unsigned long percentage_partial_objs;
 
@@ -790,18 +790,11 @@
 		wasted = size - used;
 		objwaste = s->slab_size - s->object_size;
 
-		objects_in_partial_slabs = s->objects -
-			(s->slabs - s->partial - s ->cpu_slabs) *
-			s->objs_per_slab;
-
-		if (objects_in_partial_slabs < 0)
-			objects_in_partial_slabs = 0;
-
 		percentage_partial_slabs = s->partial * 100 / s->slabs;
 		if (percentage_partial_slabs > 100)
 			percentage_partial_slabs = 100;
 
-		percentage_partial_objs = objects_in_partial_slabs * 100
+		percentage_partial_objs = s->objects_partial * 100
 							/ s->objects;
 
 		if (percentage_partial_objs > 100)
@@ -823,8 +816,8 @@
 			min_objects = s->objects;
 		if (used < min_used)
 			min_used = used;
-		if (objects_in_partial_slabs < min_partobj)
-			min_partobj = objects_in_partial_slabs;
+		if (s->objects_partial < min_partobj)
+			min_partobj = s->objects_partial;
 		if (percentage_partial_slabs < min_ppart)
 			min_ppart = percentage_partial_slabs;
 		if (percentage_partial_objs < min_ppartobj)
@@ -848,8 +841,8 @@
 			max_objects = s->objects;
 		if (used > max_used)
 			max_used = used;
-		if (objects_in_partial_slabs > max_partobj)
-			max_partobj = objects_in_partial_slabs;
+		if (s->objects_partial > max_partobj)
+			max_partobj = s->objects_partial;
 		if (percentage_partial_slabs > max_ppart)
 			max_ppart = percentage_partial_slabs;
 		if (percentage_partial_objs > max_ppartobj)
@@ -864,7 +857,7 @@
 
 		total_objects += s->objects;
 		total_used += used;
-		total_partobj += objects_in_partial_slabs;
+		total_partobj += s->objects_partial;
 		total_ppart += percentage_partial_slabs;
 		total_ppartobj += percentage_partial_objs;
 
@@ -1160,6 +1153,8 @@
 			slab->hwcache_align = get_obj("hwcache_align");
 			slab->object_size = get_obj("object_size");
 			slab->objects = get_obj("objects");
+			slab->objects_partial = get_obj("objects_partial");
+			slab->objects_total = get_obj("objects_total");
 			slab->objs_per_slab = get_obj("objs_per_slab");
 			slab->order = get_obj("order");
 			slab->partial = get_obj("partial");
diff --git a/MAINTAINERS b/MAINTAINERS
index c1dd1ae..d103766 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -2694,7 +2694,7 @@
 M:	dhowells@redhat.com
 P:	Koichi Yasutake
 M:	yasutake.koichi@jp.panasonic.com
-L:	linux-am33-list@redhat.com
+L:	linux-am33-list@redhat.com (moderated for non-subscribers)
 W:	ftp://ftp.redhat.com/pub/redhat/gnupro/AM33/
 S:	Maintained
 
diff --git a/Makefile b/Makefile
index fc3411e..d3634cd 100644
--- a/Makefile
+++ b/Makefile
@@ -1174,8 +1174,10 @@
 # Brief documentation of the typical targets used
 # ---------------------------------------------------------------------------
 
-boards := $(wildcard $(srctree)/arch/$(ARCH)/configs/*_defconfig)
+boards := $(wildcard $(srctree)/arch/$(SRCARCH)/configs/*_defconfig)
 boards := $(notdir $(boards))
+board-dirs := $(dir $(wildcard $(srctree)/arch/$(SRCARCH)/configs/*/*_defconfig))
+board-dirs := $(sort $(notdir $(board-dirs:/=)))
 
 help:
 	@echo  'Cleaning targets:'
@@ -1221,14 +1223,19 @@
 	@echo  'Documentation targets:'
 	@$(MAKE) -f $(srctree)/Documentation/DocBook/Makefile dochelp
 	@echo  ''
-	@echo  'Architecture specific targets ($(ARCH)):'
+	@echo  'Architecture specific targets ($(SRCARCH)):'
 	@$(if $(archhelp),$(archhelp),\
-		echo '  No architecture specific help defined for $(ARCH)')
+		echo '  No architecture specific help defined for $(SRCARCH)')
 	@echo  ''
 	@$(if $(boards), \
 		$(foreach b, $(boards), \
 		printf "  %-24s - Build for %s\\n" $(b) $(subst _defconfig,,$(b));) \
 		echo '')
+	@$(if $(board-dirs), \
+		$(foreach b, $(board-dirs), \
+		printf "  %-16s - Show %s-specific targets\\n" help-$(b) $(b);) \
+		printf "  %-16s - Show all of the above\\n" help-boards; \
+		echo '')
 
 	@echo  '  make V=0|1 [targets] 0 => quiet build (default), 1 => verbose build'
 	@echo  '  make V=2   [targets] 2 => give reason for rebuild of target'
@@ -1240,6 +1247,20 @@
 	@echo  'For further info see the ./README file'
 
 
+help-board-dirs := $(addprefix help-,$(board-dirs))
+
+help-boards: $(help-board-dirs)
+
+boards-per-dir = $(notdir $(wildcard $(srctree)/arch/$(SRCARCH)/configs/$*/*_defconfig))
+
+$(help-board-dirs): help-%:
+	@echo  'Architecture specific targets ($(SRCARCH) $*):'
+	@$(if $(boards-per-dir), \
+		$(foreach b, $(boards-per-dir), \
+		printf "  %-24s - Build for %s\\n" $*/$(b) $(subst _defconfig,,$(b));) \
+		echo '')
+
+
 # Documentation targets
 # ---------------------------------------------------------------------------
 %docs: scripts_basic FORCE
diff --git a/arch/Kconfig b/arch/Kconfig
index 694c9af..3ea332b 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -36,3 +36,6 @@
 
 config HAVE_KRETPROBES
 	def_bool n
+
+config HAVE_DMA_ATTRS
+	def_bool n
diff --git a/arch/alpha/kernel/asm-offsets.c b/arch/alpha/kernel/asm-offsets.c
index 6c56c75..4b18cd9 100644
--- a/arch/alpha/kernel/asm-offsets.c
+++ b/arch/alpha/kernel/asm-offsets.c
@@ -8,13 +8,9 @@
 #include <linux/stddef.h>
 #include <linux/sched.h>
 #include <linux/ptrace.h>
+#include <linux/kbuild.h>
 #include <asm/io.h>
 
-#define DEFINE(sym, val) \
-        asm volatile("\n->" #sym " %0 " #val : : "i" (val))
-
-#define BLANK() asm volatile("\n->" : : )
-
 void foo(void)
 {
 	DEFINE(TI_TASK, offsetof(struct thread_info, task));
diff --git a/arch/alpha/kernel/pci.c b/arch/alpha/kernel/pci.c
index baf5756..36ab22a 100644
--- a/arch/alpha/kernel/pci.c
+++ b/arch/alpha/kernel/pci.c
@@ -514,8 +514,8 @@
 
 void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen)
 {
-	unsigned long start = pci_resource_start(dev, bar);
-	unsigned long len = pci_resource_len(dev, bar);
+	resource_size_t start = pci_resource_start(dev, bar);
+	resource_size_t len = pci_resource_len(dev, bar);
 	unsigned long flags = pci_resource_flags(dev, bar);
 
 	if (!len || !start)
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index d8d2532..b786e68 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -8,6 +8,7 @@
 config ARM
 	bool
 	default y
+	select HAVE_IDE
 	select RTC_LIB
 	select SYS_SUPPORTS_APM_EMULATION
 	select HAVE_OPROFILE
@@ -223,7 +224,6 @@
 	select TIMER_ACORN
 	select ISA
 	select NO_IOPORT
-	select HAVE_IDE
 	help
 	  Support for the Cirrus Logic PS7500FE system-on-a-chip.
 
@@ -236,7 +236,6 @@
 	bool "Co-EBSA285"
 	select FOOTBRIDGE
 	select FOOTBRIDGE_ADDIN
-	select HAVE_IDE
 	help
 	  Support for Intel's EBSA285 companion chip.
 
@@ -262,7 +261,6 @@
 config ARCH_FOOTBRIDGE
 	bool "FootBridge"
 	select FOOTBRIDGE
-	select HAVE_IDE
 	help
 	  Support for systems based on the DC21285 companion chip
 	  ("FootBridge"), such as the Simtec CATS and the Rebel NetWinder.
@@ -301,7 +299,6 @@
 	depends on MMU
 	select PLAT_IOP
 	select PCI
-	select HAVE_IDE
 	help
 	  Support for Intel's 80219 and IOP32X (XScale) family of
 	  processors.
@@ -311,14 +308,12 @@
 	depends on MMU
 	select PLAT_IOP
 	select PCI
-	select HAVE_IDE
 	help
 	  Support for Intel's IOP33X (XScale) family of processors.
 
 config ARCH_IXP23XX
  	bool "IXP23XX-based"
 	depends on MMU
-	select HAVE_IDE
  	select PCI
 	help
 	  Support for Intel's IXP23xx (XScale) family of processors.
@@ -336,14 +331,12 @@
 	select GENERIC_GPIO
 	select GENERIC_TIME
 	select GENERIC_CLOCKEVENTS
-	select HAVE_IDE
 	help
 	  Support for Intel's IXP4XX (XScale) family of processors.
 
 config ARCH_L7200
 	bool "LinkUp-L7200"
 	select FIQ
-	select HAVE_IDE
 	help
 	  Say Y here if you intend to run this kernel on a LinkUp Systems
 	  L7200 Software Development Board which uses an ARM720T processor.
@@ -400,7 +393,6 @@
 	depends on MMU
 	select ARCH_MTD_XIP
 	select GENERIC_GPIO
-	select HAVE_IDE
 	select HAVE_GPIO_LIB
 	select GENERIC_TIME
 	select GENERIC_CLOCKEVENTS
@@ -416,7 +408,6 @@
 	select ARCH_MAY_HAVE_PC_FDC
 	select ISA_DMA_API
 	select NO_IOPORT
-	select HAVE_IDE
 	help
 	  On the Acorn Risc-PC, Linux can support the internal IDE disk and
 	  CD-ROM interface, serial and parallel port, and the floppy drive.
@@ -432,7 +423,6 @@
 	select GENERIC_TIME
 	select GENERIC_CLOCKEVENTS
 	select TICK_ONESHOT
-	select HAVE_IDE
 	select HAVE_GPIO_LIB
 	help
 	  Support for StrongARM 11x0 based boards.
@@ -440,7 +430,6 @@
 config ARCH_S3C2410
 	bool "Samsung S3C2410, S3C2412, S3C2413, S3C2440, S3C2442, S3C2443"
 	select GENERIC_GPIO
-	select HAVE_IDE
 	help
 	  Samsung S3C2410X CPU based systems, such as the Simtec Electronics
 	  BAST (<http://www.simtec.co.uk/products/EB110ITX/>), the IPAQ 1940 or
@@ -448,7 +437,6 @@
 
 config ARCH_SHARK
 	bool "Shark"
-	select HAVE_IDE
 	select ISA
 	select ISA_DMA
 	select PCI
@@ -458,7 +446,6 @@
 
 config ARCH_LH7A40X
 	bool "Sharp LH7A40X"
-	select HAVE_IDE
 	help
 	  Say Y here for systems based on one of the Sharp LH7A40X
 	  System on a Chip processors.  These CPUs include an ARM922T
diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c
index 0a0d247..4a88125 100644
--- a/arch/arm/kernel/asm-offsets.c
+++ b/arch/arm/kernel/asm-offsets.c
@@ -16,6 +16,7 @@
 #include <asm/thread_info.h>
 #include <asm/memory.h>
 #include <asm/procinfo.h>
+#include <linux/kbuild.h>
 
 /*
  * Make sure that the compiler and target are compatible.
@@ -35,13 +36,6 @@
 #error    Known good compilers: 3.3
 #endif
 
-/* Use marker if you need to separate the values later */
-
-#define DEFINE(sym, val) \
-        asm volatile("\n->" #sym " %0 " #val : : "i" (val))
-
-#define BLANK() asm volatile("\n->" : : )
-
 int main(void)
 {
   DEFINE(TSK_ACTIVE_MM,		offsetof(struct task_struct, active_mm));
diff --git a/arch/arm/kernel/atags.c b/arch/arm/kernel/atags.c
index e2e934c..64c4208 100644
--- a/arch/arm/kernel/atags.c
+++ b/arch/arm/kernel/atags.c
@@ -35,7 +35,7 @@
 {
 	struct proc_dir_entry* tags_entry;
 
-	tags_entry = create_proc_read_entry("atags", 0400, &proc_root, read_buffer, &tags_buffer);
+	tags_entry = create_proc_read_entry("atags", 0400, NULL, read_buffer, &tags_buffer);
 	if (!tags_entry)
 		return -ENOMEM;
 
diff --git a/arch/arm/kernel/ecard.c b/arch/arm/kernel/ecard.c
index f56d48c..a53c0ab 100644
--- a/arch/arm/kernel/ecard.c
+++ b/arch/arm/kernel/ecard.c
@@ -37,6 +37,7 @@
 #include <linux/mm.h>
 #include <linux/slab.h>
 #include <linux/proc_fs.h>
+#include <linux/seq_file.h>
 #include <linux/device.h>
 #include <linux/init.h>
 #include <linux/mutex.h>
@@ -723,17 +724,14 @@
 	return address;
 }
 
-static int ecard_prints(char *buffer, ecard_t *ec)
+static int ecard_prints(struct seq_file *m, ecard_t *ec)
 {
-	char *start = buffer;
-
-	buffer += sprintf(buffer, "  %d: %s ", ec->slot_no,
-			  ec->easi ? "EASI" : "    ");
+	seq_printf(m, "  %d: %s ", ec->slot_no, ec->easi ? "EASI" : "    ");
 
 	if (ec->cid.id == 0) {
 		struct in_chunk_dir incd;
 
-		buffer += sprintf(buffer, "[%04X:%04X] ",
+		seq_printf(m, "[%04X:%04X] ",
 			ec->cid.manufacturer, ec->cid.product);
 
 		if (!ec->card_desc && ec->cid.cd &&
@@ -744,43 +742,43 @@
 				strcpy((char *)ec->card_desc, incd.d.string);
 		}
 
-		buffer += sprintf(buffer, "%s\n", ec->card_desc ? ec->card_desc : "*unknown*");
+		seq_printf(m, "%s\n", ec->card_desc ? ec->card_desc : "*unknown*");
 	} else
-		buffer += sprintf(buffer, "Simple card %d\n", ec->cid.id);
+		seq_printf(m, "Simple card %d\n", ec->cid.id);
 
-	return buffer - start;
+	return 0;
 }
 
-static int get_ecard_dev_info(char *buf, char **start, off_t pos, int count)
+static int ecard_devices_proc_show(struct seq_file *m, void *v)
 {
 	ecard_t *ec = cards;
-	off_t at = 0;
-	int len, cnt;
 
-	cnt = 0;
-	while (ec && count > cnt) {
-		len = ecard_prints(buf, ec);
-		at += len;
-		if (at >= pos) {
-			if (!*start) {
-				*start = buf + (pos - (at - len));
-				cnt = at - pos;
-			} else
-				cnt += len;
-			buf += len;
-		}
+	while (ec) {
+		ecard_prints(m, ec);
 		ec = ec->next;
 	}
-	return (count > cnt) ? cnt : count;
+	return 0;
 }
 
+static int ecard_devices_proc_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, ecard_devices_proc_show, NULL);
+}
+
+static const struct file_operations bus_ecard_proc_fops = {
+	.owner		= THIS_MODULE,
+	.open		= ecard_devices_proc_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+
 static struct proc_dir_entry *proc_bus_ecard_dir = NULL;
 
 static void ecard_proc_init(void)
 {
-	proc_bus_ecard_dir = proc_mkdir("ecard", proc_bus);
-	create_proc_info_entry("devices", 0, proc_bus_ecard_dir,
-		get_ecard_dev_info);
+	proc_bus_ecard_dir = proc_mkdir("bus/ecard", NULL);
+	proc_create("devices", 0, proc_bus_ecard_dir, &bus_ecard_proc_fops);
 }
 
 #define ec_set_resource(ec,nr,st,sz)				\
diff --git a/arch/arm/mach-davinci/clock.c b/arch/arm/mach-davinci/clock.c
index 4143828..c6b94f6 100644
--- a/arch/arm/mach-davinci/clock.c
+++ b/arch/arm/mach-davinci/clock.c
@@ -311,11 +311,7 @@
 
 static int __init davinci_ck_proc_init(void)
 {
-	struct proc_dir_entry *entry;
-
-	entry = create_proc_entry("davinci_clocks", 0, NULL);
-	if (entry)
-		entry->proc_fops = &proc_davinci_ck_operations;
+	proc_create("davinci_clocks", 0, NULL, &proc_davinci_ck_operations);
 	return 0;
 
 }
diff --git a/arch/arm/mm/iomap.c b/arch/arm/mm/iomap.c
index 62066f3..7429f8c 100644
--- a/arch/arm/mm/iomap.c
+++ b/arch/arm/mm/iomap.c
@@ -26,8 +26,8 @@
 #ifdef CONFIG_PCI
 void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen)
 {
-	unsigned long start = pci_resource_start(dev, bar);
-	unsigned long len   = pci_resource_len(dev, bar);
+	resource_size_t start = pci_resource_start(dev, bar);
+	resource_size_t len   = pci_resource_len(dev, bar);
 	unsigned long flags = pci_resource_flags(dev, bar);
 
 	if (!len || !start)
diff --git a/arch/avr32/kernel/asm-offsets.c b/arch/avr32/kernel/asm-offsets.c
index 078cd33..e4796c6 100644
--- a/arch/avr32/kernel/asm-offsets.c
+++ b/arch/avr32/kernel/asm-offsets.c
@@ -5,14 +5,7 @@
  */
 
 #include <linux/thread_info.h>
-
-#define DEFINE(sym, val) \
-        asm volatile("\n->" #sym " %0 " #val : : "i" (val))
-
-#define BLANK() asm volatile("\n->" : : )
-
-#define OFFSET(sym, str, mem) \
-        DEFINE(sym, offsetof(struct str, mem));
+#include <linux/kbuild.h>
 
 void foo(void)
 {
diff --git a/arch/avr32/mm/tlb.c b/arch/avr32/mm/tlb.c
index b835257..cd12edb 100644
--- a/arch/avr32/mm/tlb.c
+++ b/arch/avr32/mm/tlb.c
@@ -369,11 +369,7 @@
 
 static int __init proctlb_init(void)
 {
-	struct proc_dir_entry *entry;
-
-	entry = create_proc_entry("tlb", 0, NULL);
-	if (entry)
-		entry->proc_fops = &proc_tlb_operations;
+	proc_create("tlb", 0, NULL, &proc_tlb_operations);
 	return 0;
 }
 late_initcall(proctlb_init);
diff --git a/arch/blackfin/kernel/asm-offsets.c b/arch/blackfin/kernel/asm-offsets.c
index b56b274..721f15f 100644
--- a/arch/blackfin/kernel/asm-offsets.c
+++ b/arch/blackfin/kernel/asm-offsets.c
@@ -34,8 +34,7 @@
 #include <linux/hardirq.h>
 #include <linux/irq.h>
 #include <linux/thread_info.h>
-
-#define DEFINE(sym, val) asm volatile("\n->" #sym " %0 " #val : : "i" (val))
+#include <linux/kbuild.h>
 
 int main(void)
 {
diff --git a/arch/blackfin/kernel/signal.c b/arch/blackfin/kernel/signal.c
index d1fa244..cb9d883 100644
--- a/arch/blackfin/kernel/signal.c
+++ b/arch/blackfin/kernel/signal.c
@@ -212,7 +212,7 @@
 
 	/* Set up registers for signal handler */
 	wrusp((unsigned long)frame);
-	if (get_personality & FDPIC_FUNCPTRS) {
+	if (current->personality & FDPIC_FUNCPTRS) {
 		struct fdpic_func_descriptor __user *funcptr =
 			(struct fdpic_func_descriptor *) ka->sa.sa_handler;
 		__get_user(regs->pc, &funcptr->text);
diff --git a/arch/cris/kernel/profile.c b/arch/cris/kernel/profile.c
index aad0a9e..44f7b4f 100644
--- a/arch/cris/kernel/profile.c
+++ b/arch/cris/kernel/profile.c
@@ -75,9 +75,9 @@
 
 	sample_buffer_pos = sample_buffer;
 
-	entry = create_proc_entry("system_profile", S_IWUSR | S_IRUGO, NULL);
+	entry = proc_create("system_profile", S_IWUSR | S_IRUGO, NULL,
+			    &cris_proc_profile_operations);
 	if (entry) {
-		entry->proc_fops = &cris_proc_profile_operations;
 		entry->size = SAMPLE_BUFFER_SIZE;
 	}
 	prof_running = 1;
diff --git a/arch/frv/kernel/asm-offsets.c b/arch/frv/kernel/asm-offsets.c
index fbb19fc..9de9684 100644
--- a/arch/frv/kernel/asm-offsets.c
+++ b/arch/frv/kernel/asm-offsets.c
@@ -7,15 +7,13 @@
 #include <linux/sched.h>
 #include <linux/signal.h>
 #include <linux/personality.h>
+#include <linux/kbuild.h>
 #include <asm/registers.h>
 #include <asm/ucontext.h>
 #include <asm/processor.h>
 #include <asm/thread_info.h>
 #include <asm/gdb-stub.h>
 
-#define DEFINE(sym, val) \
-        asm volatile("\n->" #sym " %0 " #val : : "i" (val))
-
 #define DEF_PTREG(sym, reg) \
         asm volatile("\n->" #sym " %0 offsetof(struct pt_regs, " #reg ")" \
 		     : : "i" (offsetof(struct pt_regs, reg)))
@@ -32,11 +30,6 @@
         asm volatile("\n->" #sym " %0 offsetof(struct frv_frame0, " #reg ")" \
 		     : : "i" (offsetof(struct frv_frame0, reg)))
 
-#define BLANK() asm volatile("\n->" : : )
-
-#define OFFSET(sym, str, mem) \
-	DEFINE(sym, offsetof(struct str, mem));
-
 void foo(void)
 {
 	/* offsets into the thread_info structure */
diff --git a/arch/frv/kernel/signal.c b/arch/frv/kernel/signal.c
index d64bcaf..3bdb368 100644
--- a/arch/frv/kernel/signal.c
+++ b/arch/frv/kernel/signal.c
@@ -297,7 +297,7 @@
 	__frame->lr   = (unsigned long) &frame->retcode;
 	__frame->gr8  = sig;
 
-	if (get_personality & FDPIC_FUNCPTRS) {
+	if (current->personality & FDPIC_FUNCPTRS) {
 		struct fdpic_func_descriptor __user *funcptr =
 			(struct fdpic_func_descriptor __user *) ka->sa.sa_handler;
 		__get_user(__frame->pc, &funcptr->text);
@@ -396,7 +396,7 @@
 	__frame->gr8 = sig;
 	__frame->gr9 = (unsigned long) &frame->info;
 
-	if (get_personality & FDPIC_FUNCPTRS) {
+	if (current->personality & FDPIC_FUNCPTRS) {
 		struct fdpic_func_descriptor __user *funcptr =
 			(struct fdpic_func_descriptor __user *) ka->sa.sa_handler;
 		__get_user(__frame->pc, &funcptr->text);
diff --git a/arch/frv/kernel/traps.c b/arch/frv/kernel/traps.c
index a40df80..1d2dfe6 100644
--- a/arch/frv/kernel/traps.c
+++ b/arch/frv/kernel/traps.c
@@ -362,11 +362,8 @@
 #ifdef CONFIG_MMU
 	unsigned long fixup;
 
-	if ((esr0 & ESRx_EC) == ESRx_EC_DATA_ACCESS)
-		if (handle_misalignment(esr0, ear0, epcr0) == 0)
-			return;
-
-	if ((fixup = search_exception_table(__frame->pc)) != 0) {
+	fixup = search_exception_table(__frame->pc);
+	if (fixup) {
 		__frame->pc = fixup;
 		return;
 	}
diff --git a/arch/frv/mb93090-mb00/pci-iomap.c b/arch/frv/mb93090-mb00/pci-iomap.c
index 068fa04..35f6df2 100644
--- a/arch/frv/mb93090-mb00/pci-iomap.c
+++ b/arch/frv/mb93090-mb00/pci-iomap.c
@@ -13,8 +13,8 @@
 
 void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen)
 {
-	unsigned long start = pci_resource_start(dev, bar);
-	unsigned long len = pci_resource_len(dev, bar);
+	resource_size_t start = pci_resource_start(dev, bar);
+	resource_size_t len = pci_resource_len(dev, bar);
 	unsigned long flags = pci_resource_flags(dev, bar);
 
 	if (!len || !start)
diff --git a/arch/frv/mm/unaligned.c b/arch/frv/mm/unaligned.c
deleted file mode 100644
index 8f0375f..0000000
--- a/arch/frv/mm/unaligned.c
+++ /dev/null
@@ -1,217 +0,0 @@
-/* unaligned.c: unalignment fixup handler for CPUs on which it is supported (FR451 only)
- *
- * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
- * Written by David Howells (dhowells@redhat.com)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#include <linux/sched.h>
-#include <linux/signal.h>
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/types.h>
-#include <linux/user.h>
-#include <linux/string.h>
-#include <linux/linkage.h>
-#include <linux/init.h>
-
-#include <asm/setup.h>
-#include <asm/system.h>
-#include <asm/uaccess.h>
-
-#if 0
-#define kdebug(fmt, ...) printk("FDPIC "fmt"\n" ,##__VA_ARGS__ )
-#else
-#define kdebug(fmt, ...) do {} while(0)
-#endif
-
-#define _MA_SIGNED	0x01
-#define _MA_HALF	0x02
-#define _MA_WORD	0x04
-#define _MA_DWORD	0x08
-#define _MA_SZ_MASK	0x0e
-#define _MA_LOAD	0x10
-#define _MA_STORE	0x20
-#define _MA_UPDATE	0x40
-#define _MA_IMM		0x80
-
-#define _MA_LDxU	_MA_LOAD | _MA_UPDATE
-#define _MA_LDxI	_MA_LOAD | _MA_IMM
-#define _MA_STxU	_MA_STORE | _MA_UPDATE
-#define _MA_STxI	_MA_STORE | _MA_IMM
-
-static const uint8_t tbl_LDGRk_reg[0x40] = {
-	[0x02] = _MA_LOAD | _MA_HALF | _MA_SIGNED,	/* LDSH  @(GRi,GRj),GRk */
-	[0x03] = _MA_LOAD | _MA_HALF,			/* LDUH  @(GRi,GRj),GRk */
-	[0x04] = _MA_LOAD | _MA_WORD,			/* LD	 @(GRi,GRj),GRk */
-	[0x05] = _MA_LOAD | _MA_DWORD,			/* LDD	 @(GRi,GRj),GRk */
-	[0x12] = _MA_LDxU | _MA_HALF | _MA_SIGNED,	/* LDSHU @(GRi,GRj),GRk */
-	[0x13] = _MA_LDxU | _MA_HALF,			/* LDUHU @(GRi,GRj),GRk */
-	[0x14] = _MA_LDxU | _MA_WORD,			/* LDU	 @(GRi,GRj),GRk */
-	[0x15] = _MA_LDxU | _MA_DWORD,			/* LDDU	 @(GRi,GRj),GRk */
-};
-
-static const uint8_t tbl_STGRk_reg[0x40] = {
-	[0x01] = _MA_STORE | _MA_HALF,			/* STH   @(GRi,GRj),GRk */
-	[0x02] = _MA_STORE | _MA_WORD,			/* ST	 @(GRi,GRj),GRk */
-	[0x03] = _MA_STORE | _MA_DWORD,			/* STD	 @(GRi,GRj),GRk */
-	[0x11] = _MA_STxU  | _MA_HALF,			/* STHU  @(GRi,GRj),GRk */
-	[0x12] = _MA_STxU  | _MA_WORD,			/* STU	 @(GRi,GRj),GRk */
-	[0x13] = _MA_STxU  | _MA_DWORD,			/* STDU	 @(GRi,GRj),GRk */
-};
-
-static const uint8_t tbl_LDSTGRk_imm[0x80] = {
-	[0x31] = _MA_LDxI | _MA_HALF | _MA_SIGNED,	/* LDSHI @(GRi,d12),GRk */
-	[0x32] = _MA_LDxI | _MA_WORD,			/* LDI   @(GRi,d12),GRk */
-	[0x33] = _MA_LDxI | _MA_DWORD,			/* LDDI  @(GRi,d12),GRk */
-	[0x36] = _MA_LDxI | _MA_HALF,			/* LDUHI @(GRi,d12),GRk */
-	[0x51] = _MA_STxI | _MA_HALF,			/* STHI  @(GRi,d12),GRk */
-	[0x52] = _MA_STxI | _MA_WORD,			/* STI   @(GRi,d12),GRk */
-	[0x53] = _MA_STxI | _MA_DWORD,			/* STDI  @(GRi,d12),GRk */
-};
-
-
-/*****************************************************************************/
-/*
- * see if we can handle the exception by fixing up a misaligned memory access
- */
-int handle_misalignment(unsigned long esr0, unsigned long ear0, unsigned long epcr0)
-{
-	unsigned long insn, addr, *greg;
-	int GRi, GRj, GRk, D12, op;
-
-	union {
-		uint64_t _64;
-		uint32_t _32[2];
-		uint16_t _16;
-		uint8_t _8[8];
-	} x;
-
-	if (!(esr0 & ESR0_EAV) || !(epcr0 & EPCR0_V) || !(ear0 & 7))
-		return -EAGAIN;
-
-	epcr0 &= EPCR0_PC;
-
-	if (__frame->pc != epcr0) {
-		kdebug("MISALIGN: Execution not halted on excepting instruction\n");
-		BUG();
-	}
-
-	if (__get_user(insn, (unsigned long *) epcr0) < 0)
-		return -EFAULT;
-
-	/* determine the instruction type first */
-	switch ((insn >> 18) & 0x7f) {
-	case 0x2:
-		/* LDx @(GRi,GRj),GRk */
-		op = tbl_LDGRk_reg[(insn >> 6) & 0x3f];
-		break;
-
-	case 0x3:
-		/* STx GRk,@(GRi,GRj) */
-		op = tbl_STGRk_reg[(insn >> 6) & 0x3f];
-		break;
-
-	default:
-		op = tbl_LDSTGRk_imm[(insn >> 18) & 0x7f];
-		break;
-	}
-
-	if (!op)
-		return -EAGAIN;
-
-	kdebug("MISALIGN: pc=%08lx insn=%08lx ad=%08lx op=%02x\n", epcr0, insn, ear0, op);
-
-	memset(&x, 0xba, 8);
-
-	/* validate the instruction parameters */
-	greg = (unsigned long *) &__frame->tbr;
-
-	GRi = (insn >> 12) & 0x3f;
-	GRk = (insn >> 25) & 0x3f;
-
-	if (GRi > 31 || GRk > 31)
-		return -ENOENT;
-
-	if (op & _MA_DWORD && GRk & 1)
-		return -EINVAL;
-
-	if (op & _MA_IMM) {
-		D12 = insn & 0xfff;
-		asm ("slli %0,#20,%0 ! srai %0,#20,%0" : "=r"(D12) : "0"(D12)); /* sign extend */
-		addr = (GRi ? greg[GRi] : 0) + D12;
-	}
-	else {
-		GRj = (insn >>  0) & 0x3f;
-		if (GRj > 31)
-			return -ENOENT;
-		addr = (GRi ? greg[GRi] : 0) + (GRj ? greg[GRj] : 0);
-	}
-
-	if (addr != ear0) {
-		kdebug("MISALIGN: Calculated addr (%08lx) does not match EAR0 (%08lx)\n",
-		       addr, ear0);
-		return -EFAULT;
-	}
-
-	/* check the address is okay */
-	if (user_mode(__frame) && ___range_ok(ear0, 8) < 0)
-		return -EFAULT;
-
-	/* perform the memory op */
-	if (op & _MA_STORE) {
-		/* perform a store */
-		x._32[0] = 0;
-		if (GRk != 0) {
-			if (op & _MA_HALF) {
-				x._16 = greg[GRk];
-			}
-			else {
-				x._32[0] = greg[GRk];
-			}
-		}
-		if (op & _MA_DWORD)
-			x._32[1] = greg[GRk + 1];
-
-		kdebug("MISALIGN: Store GR%d { %08x:%08x } -> %08lx (%dB)\n",
-		       GRk, x._32[1], x._32[0], addr, op & _MA_SZ_MASK);
-
-		if (__memcpy_user((void *) addr, &x, op & _MA_SZ_MASK) != 0)
-			return -EFAULT;
-	}
-	else {
-		/* perform a load */
-		if (__memcpy_user(&x, (void *) addr, op & _MA_SZ_MASK) != 0)
-			return -EFAULT;
-
-		if (op & _MA_HALF) {
-			if (op & _MA_SIGNED)
-				asm ("slli %0,#16,%0 ! srai %0,#16,%0"
-				     : "=r"(x._32[0]) : "0"(x._16));
-			else
-				asm ("sethi #0,%0"
-				     : "=r"(x._32[0]) : "0"(x._16));
-		}
-
-		kdebug("MISALIGN: Load %08lx (%dB) -> GR%d, { %08x:%08x }\n",
-		       addr, op & _MA_SZ_MASK, GRk, x._32[1], x._32[0]);
-
-		if (GRk != 0)
-			greg[GRk] = x._32[0];
-		if (op & _MA_DWORD)
-			greg[GRk + 1] = x._32[1];
-	}
-
-	/* update the base pointer if required */
-	if (op & _MA_UPDATE)
-		greg[GRi] = addr;
-
-	/* well... we've done that insn */
-	__frame->pc = __frame->pc + 4;
-
-	return 0;
-} /* end handle_misalignment() */
diff --git a/arch/h8300/kernel/asm-offsets.c b/arch/h8300/kernel/asm-offsets.c
index fc30b4f..2042552 100644
--- a/arch/h8300/kernel/asm-offsets.c
+++ b/arch/h8300/kernel/asm-offsets.c
@@ -13,15 +13,11 @@
 #include <linux/kernel_stat.h>
 #include <linux/ptrace.h>
 #include <linux/hardirq.h>
+#include <linux/kbuild.h>
 #include <asm/bootinfo.h>
 #include <asm/irq.h>
 #include <asm/ptrace.h>
 
-#define DEFINE(sym, val) \
-        asm volatile("\n->" #sym " %0 " #val : : "i" (val))
-
-#define BLANK() asm volatile("\n->" : : )
-
 int main(void)
 {
 	/* offsets into the task struct */
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index 3aa6c82..0df5f6f 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -19,6 +19,7 @@
 	select HAVE_OPROFILE
 	select HAVE_KPROBES
 	select HAVE_KRETPROBES
+	select HAVE_DMA_ATTRS
 	select HAVE_KVM
 	default y
 	help
@@ -47,6 +48,9 @@
 config SWIOTLB
        bool
 
+config IOMMU_HELPER
+       bool
+
 config GENERIC_LOCKBREAK
 	bool
 	default y
@@ -615,7 +619,7 @@
 	default y
 
 config IOMMU_HELPER
-	def_bool (IA64_HP_ZX1 || IA64_HP_ZX1_SWIOTLB || IA64_GENERIC)
+	def_bool (IA64_HP_ZX1 || IA64_HP_ZX1_SWIOTLB || IA64_GENERIC || SWIOTLB)
 
 source "arch/ia64/hp/sim/Kconfig"
 
diff --git a/arch/ia64/hp/common/hwsw_iommu.c b/arch/ia64/hp/common/hwsw_iommu.c
index 8f6bcfe..1c44ec2 100644
--- a/arch/ia64/hp/common/hwsw_iommu.c
+++ b/arch/ia64/hp/common/hwsw_iommu.c
@@ -20,10 +20,10 @@
 extern int swiotlb_late_init_with_default_size (size_t size);
 extern ia64_mv_dma_alloc_coherent	swiotlb_alloc_coherent;
 extern ia64_mv_dma_free_coherent	swiotlb_free_coherent;
-extern ia64_mv_dma_map_single		swiotlb_map_single;
-extern ia64_mv_dma_unmap_single		swiotlb_unmap_single;
-extern ia64_mv_dma_map_sg		swiotlb_map_sg;
-extern ia64_mv_dma_unmap_sg		swiotlb_unmap_sg;
+extern ia64_mv_dma_map_single_attrs	swiotlb_map_single_attrs;
+extern ia64_mv_dma_unmap_single_attrs	swiotlb_unmap_single_attrs;
+extern ia64_mv_dma_map_sg_attrs		swiotlb_map_sg_attrs;
+extern ia64_mv_dma_unmap_sg_attrs	swiotlb_unmap_sg_attrs;
 extern ia64_mv_dma_supported		swiotlb_dma_supported;
 extern ia64_mv_dma_mapping_error	swiotlb_dma_mapping_error;
 
@@ -31,19 +31,19 @@
 
 extern ia64_mv_dma_alloc_coherent	sba_alloc_coherent;
 extern ia64_mv_dma_free_coherent	sba_free_coherent;
-extern ia64_mv_dma_map_single		sba_map_single;
-extern ia64_mv_dma_unmap_single		sba_unmap_single;
-extern ia64_mv_dma_map_sg		sba_map_sg;
-extern ia64_mv_dma_unmap_sg		sba_unmap_sg;
+extern ia64_mv_dma_map_single_attrs	sba_map_single_attrs;
+extern ia64_mv_dma_unmap_single_attrs	sba_unmap_single_attrs;
+extern ia64_mv_dma_map_sg_attrs		sba_map_sg_attrs;
+extern ia64_mv_dma_unmap_sg_attrs	sba_unmap_sg_attrs;
 extern ia64_mv_dma_supported		sba_dma_supported;
 extern ia64_mv_dma_mapping_error	sba_dma_mapping_error;
 
 #define hwiommu_alloc_coherent		sba_alloc_coherent
 #define hwiommu_free_coherent		sba_free_coherent
-#define hwiommu_map_single		sba_map_single
-#define hwiommu_unmap_single		sba_unmap_single
-#define hwiommu_map_sg			sba_map_sg
-#define hwiommu_unmap_sg		sba_unmap_sg
+#define hwiommu_map_single_attrs	sba_map_single_attrs
+#define hwiommu_unmap_single_attrs	sba_unmap_single_attrs
+#define hwiommu_map_sg_attrs		sba_map_sg_attrs
+#define hwiommu_unmap_sg_attrs		sba_unmap_sg_attrs
 #define hwiommu_dma_supported		sba_dma_supported
 #define hwiommu_dma_mapping_error	sba_dma_mapping_error
 #define hwiommu_sync_single_for_cpu	machvec_dma_sync_single
@@ -98,41 +98,48 @@
 }
 
 dma_addr_t
-hwsw_map_single (struct device *dev, void *addr, size_t size, int dir)
+hwsw_map_single_attrs(struct device *dev, void *addr, size_t size, int dir,
+		       struct dma_attrs *attrs)
 {
 	if (use_swiotlb(dev))
-		return swiotlb_map_single(dev, addr, size, dir);
+		return swiotlb_map_single_attrs(dev, addr, size, dir, attrs);
 	else
-		return hwiommu_map_single(dev, addr, size, dir);
+		return hwiommu_map_single_attrs(dev, addr, size, dir, attrs);
 }
+EXPORT_SYMBOL(hwsw_map_single_attrs);
 
 void
-hwsw_unmap_single (struct device *dev, dma_addr_t iova, size_t size, int dir)
+hwsw_unmap_single_attrs(struct device *dev, dma_addr_t iova, size_t size,
+			 int dir, struct dma_attrs *attrs)
 {
 	if (use_swiotlb(dev))
-		return swiotlb_unmap_single(dev, iova, size, dir);
+		return swiotlb_unmap_single_attrs(dev, iova, size, dir, attrs);
 	else
-		return hwiommu_unmap_single(dev, iova, size, dir);
+		return hwiommu_unmap_single_attrs(dev, iova, size, dir, attrs);
 }
-
+EXPORT_SYMBOL(hwsw_unmap_single_attrs);
 
 int
-hwsw_map_sg (struct device *dev, struct scatterlist *sglist, int nents, int dir)
+hwsw_map_sg_attrs(struct device *dev, struct scatterlist *sglist, int nents,
+		   int dir, struct dma_attrs *attrs)
 {
 	if (use_swiotlb(dev))
-		return swiotlb_map_sg(dev, sglist, nents, dir);
+		return swiotlb_map_sg_attrs(dev, sglist, nents, dir, attrs);
 	else
-		return hwiommu_map_sg(dev, sglist, nents, dir);
+		return hwiommu_map_sg_attrs(dev, sglist, nents, dir, attrs);
 }
+EXPORT_SYMBOL(hwsw_map_sg_attrs);
 
 void
-hwsw_unmap_sg (struct device *dev, struct scatterlist *sglist, int nents, int dir)
+hwsw_unmap_sg_attrs(struct device *dev, struct scatterlist *sglist, int nents,
+		     int dir, struct dma_attrs *attrs)
 {
 	if (use_swiotlb(dev))
-		return swiotlb_unmap_sg(dev, sglist, nents, dir);
+		return swiotlb_unmap_sg_attrs(dev, sglist, nents, dir, attrs);
 	else
-		return hwiommu_unmap_sg(dev, sglist, nents, dir);
+		return hwiommu_unmap_sg_attrs(dev, sglist, nents, dir, attrs);
 }
+EXPORT_SYMBOL(hwsw_unmap_sg_attrs);
 
 void
 hwsw_sync_single_for_cpu (struct device *dev, dma_addr_t addr, size_t size, int dir)
@@ -185,10 +192,6 @@
 }
 
 EXPORT_SYMBOL(hwsw_dma_mapping_error);
-EXPORT_SYMBOL(hwsw_map_single);
-EXPORT_SYMBOL(hwsw_unmap_single);
-EXPORT_SYMBOL(hwsw_map_sg);
-EXPORT_SYMBOL(hwsw_unmap_sg);
 EXPORT_SYMBOL(hwsw_dma_supported);
 EXPORT_SYMBOL(hwsw_alloc_coherent);
 EXPORT_SYMBOL(hwsw_free_coherent);
diff --git a/arch/ia64/hp/common/sba_iommu.c b/arch/ia64/hp/common/sba_iommu.c
index 9409de5..34421ae 100644
--- a/arch/ia64/hp/common/sba_iommu.c
+++ b/arch/ia64/hp/common/sba_iommu.c
@@ -899,16 +899,18 @@
 }
 
 /**
- * sba_map_single - map one buffer and return IOVA for DMA
+ * sba_map_single_attrs - map one buffer and return IOVA for DMA
  * @dev: instance of PCI owned by the driver that's asking.
  * @addr:  driver buffer to map.
  * @size:  number of bytes to map in driver buffer.
  * @dir:  R/W or both.
+ * @attrs: optional dma attributes
  *
  * See Documentation/DMA-mapping.txt
  */
 dma_addr_t
-sba_map_single(struct device *dev, void *addr, size_t size, int dir)
+sba_map_single_attrs(struct device *dev, void *addr, size_t size, int dir,
+		     struct dma_attrs *attrs)
 {
 	struct ioc *ioc;
 	dma_addr_t iovp;
@@ -932,7 +934,8 @@
  		** Device is bit capable of DMA'ing to the buffer...
 		** just return the PCI address of ptr
  		*/
-		DBG_BYPASS("sba_map_single() bypass mask/addr: 0x%lx/0x%lx\n",
+		DBG_BYPASS("sba_map_single_attrs() bypass mask/addr: "
+			   "0x%lx/0x%lx\n",
 		           to_pci_dev(dev)->dma_mask, pci_addr);
 		return pci_addr;
 	}
@@ -953,7 +956,7 @@
 
 #ifdef ASSERT_PDIR_SANITY
 	spin_lock_irqsave(&ioc->res_lock, flags);
-	if (sba_check_pdir(ioc,"Check before sba_map_single()"))
+	if (sba_check_pdir(ioc,"Check before sba_map_single_attrs()"))
 		panic("Sanity check failed");
 	spin_unlock_irqrestore(&ioc->res_lock, flags);
 #endif
@@ -982,11 +985,12 @@
 	/* form complete address */
 #ifdef ASSERT_PDIR_SANITY
 	spin_lock_irqsave(&ioc->res_lock, flags);
-	sba_check_pdir(ioc,"Check after sba_map_single()");
+	sba_check_pdir(ioc,"Check after sba_map_single_attrs()");
 	spin_unlock_irqrestore(&ioc->res_lock, flags);
 #endif
 	return SBA_IOVA(ioc, iovp, offset);
 }
+EXPORT_SYMBOL(sba_map_single_attrs);
 
 #ifdef ENABLE_MARK_CLEAN
 static SBA_INLINE void
@@ -1013,15 +1017,17 @@
 #endif
 
 /**
- * sba_unmap_single - unmap one IOVA and free resources
+ * sba_unmap_single_attrs - unmap one IOVA and free resources
  * @dev: instance of PCI owned by the driver that's asking.
  * @iova:  IOVA of driver buffer previously mapped.
  * @size:  number of bytes mapped in driver buffer.
  * @dir:  R/W or both.
+ * @attrs: optional dma attributes
  *
  * See Documentation/DMA-mapping.txt
  */
-void sba_unmap_single(struct device *dev, dma_addr_t iova, size_t size, int dir)
+void sba_unmap_single_attrs(struct device *dev, dma_addr_t iova, size_t size,
+			    int dir, struct dma_attrs *attrs)
 {
 	struct ioc *ioc;
 #if DELAYED_RESOURCE_CNT > 0
@@ -1038,7 +1044,8 @@
 		/*
 		** Address does not fall w/in IOVA, must be bypassing
 		*/
-		DBG_BYPASS("sba_unmap_single() bypass addr: 0x%lx\n", iova);
+		DBG_BYPASS("sba_unmap_single_atttrs() bypass addr: 0x%lx\n",
+			   iova);
 
 #ifdef ENABLE_MARK_CLEAN
 		if (dir == DMA_FROM_DEVICE) {
@@ -1087,7 +1094,7 @@
 	spin_unlock_irqrestore(&ioc->res_lock, flags);
 #endif /* DELAYED_RESOURCE_CNT == 0 */
 }
-
+EXPORT_SYMBOL(sba_unmap_single_attrs);
 
 /**
  * sba_alloc_coherent - allocate/map shared mem for DMA
@@ -1144,7 +1151,8 @@
 	 * If device can't bypass or bypass is disabled, pass the 32bit fake
 	 * device to map single to get an iova mapping.
 	 */
-	*dma_handle = sba_map_single(&ioc->sac_only_dev->dev, addr, size, 0);
+	*dma_handle = sba_map_single_attrs(&ioc->sac_only_dev->dev, addr,
+					   size, 0, NULL);
 
 	return addr;
 }
@@ -1161,7 +1169,7 @@
  */
 void sba_free_coherent (struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle)
 {
-	sba_unmap_single(dev, dma_handle, size, 0);
+	sba_unmap_single_attrs(dev, dma_handle, size, 0, NULL);
 	free_pages((unsigned long) vaddr, get_order(size));
 }
 
@@ -1410,10 +1418,12 @@
  * @sglist:  array of buffer/length pairs
  * @nents:  number of entries in list
  * @dir:  R/W or both.
+ * @attrs: optional dma attributes
  *
  * See Documentation/DMA-mapping.txt
  */
-int sba_map_sg(struct device *dev, struct scatterlist *sglist, int nents, int dir)
+int sba_map_sg_attrs(struct device *dev, struct scatterlist *sglist, int nents,
+		     int dir, struct dma_attrs *attrs)
 {
 	struct ioc *ioc;
 	int coalesced, filled = 0;
@@ -1441,16 +1451,16 @@
 	/* Fast path single entry scatterlists. */
 	if (nents == 1) {
 		sglist->dma_length = sglist->length;
-		sglist->dma_address = sba_map_single(dev, sba_sg_address(sglist), sglist->length, dir);
+		sglist->dma_address = sba_map_single_attrs(dev, sba_sg_address(sglist), sglist->length, dir, attrs);
 		return 1;
 	}
 
 #ifdef ASSERT_PDIR_SANITY
 	spin_lock_irqsave(&ioc->res_lock, flags);
-	if (sba_check_pdir(ioc,"Check before sba_map_sg()"))
+	if (sba_check_pdir(ioc,"Check before sba_map_sg_attrs()"))
 	{
 		sba_dump_sg(ioc, sglist, nents);
-		panic("Check before sba_map_sg()");
+		panic("Check before sba_map_sg_attrs()");
 	}
 	spin_unlock_irqrestore(&ioc->res_lock, flags);
 #endif
@@ -1479,10 +1489,10 @@
 
 #ifdef ASSERT_PDIR_SANITY
 	spin_lock_irqsave(&ioc->res_lock, flags);
-	if (sba_check_pdir(ioc,"Check after sba_map_sg()"))
+	if (sba_check_pdir(ioc,"Check after sba_map_sg_attrs()"))
 	{
 		sba_dump_sg(ioc, sglist, nents);
-		panic("Check after sba_map_sg()\n");
+		panic("Check after sba_map_sg_attrs()\n");
 	}
 	spin_unlock_irqrestore(&ioc->res_lock, flags);
 #endif
@@ -1492,18 +1502,20 @@
 
 	return filled;
 }
-
+EXPORT_SYMBOL(sba_map_sg_attrs);
 
 /**
- * sba_unmap_sg - unmap Scatter/Gather list
+ * sba_unmap_sg_attrs - unmap Scatter/Gather list
  * @dev: instance of PCI owned by the driver that's asking.
  * @sglist:  array of buffer/length pairs
  * @nents:  number of entries in list
  * @dir:  R/W or both.
+ * @attrs: optional dma attributes
  *
  * See Documentation/DMA-mapping.txt
  */
-void sba_unmap_sg (struct device *dev, struct scatterlist *sglist, int nents, int dir)
+void sba_unmap_sg_attrs(struct device *dev, struct scatterlist *sglist,
+			int nents, int dir, struct dma_attrs *attrs)
 {
 #ifdef ASSERT_PDIR_SANITY
 	struct ioc *ioc;
@@ -1518,13 +1530,14 @@
 	ASSERT(ioc);
 
 	spin_lock_irqsave(&ioc->res_lock, flags);
-	sba_check_pdir(ioc,"Check before sba_unmap_sg()");
+	sba_check_pdir(ioc,"Check before sba_unmap_sg_attrs()");
 	spin_unlock_irqrestore(&ioc->res_lock, flags);
 #endif
 
 	while (nents && sglist->dma_length) {
 
-		sba_unmap_single(dev, sglist->dma_address, sglist->dma_length, dir);
+		sba_unmap_single_attrs(dev, sglist->dma_address,
+				       sglist->dma_length, dir, attrs);
 		sglist = sg_next(sglist);
 		nents--;
 	}
@@ -1533,11 +1546,12 @@
 
 #ifdef ASSERT_PDIR_SANITY
 	spin_lock_irqsave(&ioc->res_lock, flags);
-	sba_check_pdir(ioc,"Check after sba_unmap_sg()");
+	sba_check_pdir(ioc,"Check after sba_unmap_sg_attrs()");
 	spin_unlock_irqrestore(&ioc->res_lock, flags);
 #endif
 
 }
+EXPORT_SYMBOL(sba_unmap_sg_attrs);
 
 /**************************************************************
 *
@@ -1918,15 +1932,13 @@
 static void __init
 ioc_proc_init(void)
 {
-	struct proc_dir_entry *dir, *entry;
+	struct proc_dir_entry *dir;
 
 	dir = proc_mkdir("bus/mckinley", NULL);
 	if (!dir)
 		return;
 
-	entry = create_proc_entry(ioc_list->name, 0, dir);
-	if (entry)
-		entry->proc_fops = &ioc_fops;
+	proc_create(ioc_list->name, 0, dir, &ioc_fops);
 }
 #endif
 
@@ -2166,10 +2178,6 @@
 __setup("sbapagesize=",sba_page_override);
 
 EXPORT_SYMBOL(sba_dma_mapping_error);
-EXPORT_SYMBOL(sba_map_single);
-EXPORT_SYMBOL(sba_unmap_single);
-EXPORT_SYMBOL(sba_map_sg);
-EXPORT_SYMBOL(sba_unmap_sg);
 EXPORT_SYMBOL(sba_dma_supported);
 EXPORT_SYMBOL(sba_alloc_coherent);
 EXPORT_SYMBOL(sba_free_coherent);
diff --git a/arch/ia64/kernel/asm-offsets.c b/arch/ia64/kernel/asm-offsets.c
index 230a6f9..c64a55a 100644
--- a/arch/ia64/kernel/asm-offsets.c
+++ b/arch/ia64/kernel/asm-offsets.c
@@ -9,7 +9,7 @@
 #include <linux/sched.h>
 #include <linux/pid.h>
 #include <linux/clocksource.h>
-
+#include <linux/kbuild.h>
 #include <asm-ia64/processor.h>
 #include <asm-ia64/ptrace.h>
 #include <asm-ia64/siginfo.h>
@@ -19,11 +19,6 @@
 #include "../kernel/sigframe.h"
 #include "../kernel/fsyscall_gtod_data.h"
 
-#define DEFINE(sym, val) \
-        asm volatile("\n->" #sym " %0 " #val : : "i" (val))
-
-#define BLANK() asm volatile("\n->" : : )
-
 void foo(void)
 {
 	DEFINE(IA64_TASK_SIZE, sizeof (struct task_struct));
diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c
index c8e4037..7fbb51e 100644
--- a/arch/ia64/kernel/perfmon.c
+++ b/arch/ia64/kernel/perfmon.c
@@ -6695,16 +6695,12 @@
 	/*
 	 * create /proc/perfmon (mostly for debugging purposes)
 	 */
- 	perfmon_dir = create_proc_entry("perfmon", S_IRUGO, NULL);
+	perfmon_dir = proc_create("perfmon", S_IRUGO, NULL, &pfm_proc_fops);
 	if (perfmon_dir == NULL) {
 		printk(KERN_ERR "perfmon: cannot create /proc entry, perfmon disabled\n");
 		pmu_conf = NULL;
 		return -1;
 	}
-  	/*
- 	 * install customized file operations for /proc/perfmon entry
- 	 */
- 	perfmon_dir->proc_fops = &pfm_proc_fops;
 
 	/*
 	 * create /proc/sys/kernel/perfmon (for debugging purposes)
diff --git a/arch/ia64/kernel/salinfo.c b/arch/ia64/kernel/salinfo.c
index b11bb50..ecb9eb7 100644
--- a/arch/ia64/kernel/salinfo.c
+++ b/arch/ia64/kernel/salinfo.c
@@ -648,18 +648,16 @@
 		if (!dir)
 			continue;
 
-		entry = create_proc_entry("event", S_IRUSR, dir);
+		entry = proc_create_data("event", S_IRUSR, dir,
+					 &salinfo_event_fops, data);
 		if (!entry)
 			continue;
-		entry->data = data;
-		entry->proc_fops = &salinfo_event_fops;
 		*sdir++ = entry;
 
-		entry = create_proc_entry("data", S_IRUSR | S_IWUSR, dir);
+		entry = proc_create_data("data", S_IRUSR | S_IWUSR, dir,
+					 &salinfo_data_fops, data);
 		if (!entry)
 			continue;
-		entry->data = data;
-		entry->proc_fops = &salinfo_data_fops;
 		*sdir++ = entry;
 
 		/* we missed any events before now */
diff --git a/arch/ia64/sn/kernel/sn2/sn2_smp.c b/arch/ia64/sn/kernel/sn2/sn2_smp.c
index dfc6bf1..49d3120 100644
--- a/arch/ia64/sn/kernel/sn2/sn2_smp.c
+++ b/arch/ia64/sn/kernel/sn2/sn2_smp.c
@@ -550,11 +550,12 @@
 	if (!ia64_platform_is("sn2"))
 		return 0;
 
-	if (!(proc_sn2_ptc = create_proc_entry(PTC_BASENAME, 0444, NULL))) {
+	proc_sn2_ptc = proc_create(PTC_BASENAME, 0444,
+				   NULL, &proc_sn2_ptc_operations);
+	if (!&proc_sn2_ptc_operations) {
 		printk(KERN_ERR "unable to create %s proc entry", PTC_BASENAME);
 		return -EINVAL;
 	}
-	proc_sn2_ptc->proc_fops = &proc_sn2_ptc_operations;
 	spin_lock_init(&sn2_global_ptc_lock);
 	return 0;
 }
diff --git a/arch/ia64/sn/kernel/sn2/sn_proc_fs.c b/arch/ia64/sn/kernel/sn2/sn_proc_fs.c
index 62b3e9a..2526e5c 100644
--- a/arch/ia64/sn/kernel/sn2/sn_proc_fs.c
+++ b/arch/ia64/sn/kernel/sn2/sn_proc_fs.c
@@ -139,30 +139,21 @@
 void register_sn_procfs(void)
 {
 	static struct proc_dir_entry *sgi_proc_dir = NULL;
-	struct proc_dir_entry *pde;
 
 	BUG_ON(sgi_proc_dir != NULL);
 	if (!(sgi_proc_dir = proc_mkdir("sgi_sn", NULL)))
 		return;
 
-	pde = create_proc_entry("partition_id", 0444, sgi_proc_dir);
-	if (pde)
-		pde->proc_fops = &proc_partition_id_fops;
-	pde = create_proc_entry("system_serial_number", 0444, sgi_proc_dir);
-	if (pde)
-		pde->proc_fops = &proc_system_sn_fops;
-	pde = create_proc_entry("licenseID", 0444, sgi_proc_dir);
-	if (pde)
-		pde->proc_fops = &proc_license_id_fops;
-	pde = create_proc_entry("sn_force_interrupt", 0644, sgi_proc_dir);
-	if (pde)
-		pde->proc_fops = &proc_sn_force_intr_fops;
-	pde = create_proc_entry("coherence_id", 0444, sgi_proc_dir);
-	if (pde)
-		pde->proc_fops = &proc_coherence_id_fops;
-	pde = create_proc_entry("sn_topology", 0444, sgi_proc_dir);
-	if (pde)
-		pde->proc_fops = &proc_sn_topo_fops;
+	proc_create("partition_id", 0444, sgi_proc_dir,
+		    &proc_partition_id_fops);
+	proc_create("system_serial_number", 0444, sgi_proc_dir,
+		    &proc_system_sn_fops);
+	proc_create("licenseID", 0444, sgi_proc_dir, &proc_license_id_fops);
+	proc_create("sn_force_interrupt", 0644, sgi_proc_dir,
+		    &proc_sn_force_intr_fops);
+	proc_create("coherence_id", 0444, sgi_proc_dir,
+		    &proc_coherence_id_fops);
+	proc_create("sn_topology", 0444, sgi_proc_dir, &proc_sn_topo_fops);
 }
 
 #endif /* CONFIG_PROC_FS */
diff --git a/arch/ia64/sn/pci/pci_dma.c b/arch/ia64/sn/pci/pci_dma.c
index 18b94b7..52175af 100644
--- a/arch/ia64/sn/pci/pci_dma.c
+++ b/arch/ia64/sn/pci/pci_dma.c
@@ -10,6 +10,7 @@
  */
 
 #include <linux/module.h>
+#include <linux/dma-attrs.h>
 #include <asm/dma.h>
 #include <asm/sn/intr.h>
 #include <asm/sn/pcibus_provider_defs.h>
@@ -149,11 +150,12 @@
 EXPORT_SYMBOL(sn_dma_free_coherent);
 
 /**
- * sn_dma_map_single - map a single page for DMA
+ * sn_dma_map_single_attrs - map a single page for DMA
  * @dev: device to map for
  * @cpu_addr: kernel virtual address of the region to map
  * @size: size of the region
  * @direction: DMA direction
+ * @attrs: optional dma attributes
  *
  * Map the region pointed to by @cpu_addr for DMA and return the
  * DMA address.
@@ -163,42 +165,59 @@
  * no way of saving the dmamap handle from the alloc to later free
  * (which is pretty much unacceptable).
  *
+ * mappings with the DMA_ATTR_WRITE_BARRIER get mapped with
+ * dma_map_consistent() so that writes force a flush of pending DMA.
+ * (See "SGI Altix Architecture Considerations for Linux Device Drivers",
+ * Document Number: 007-4763-001)
+ *
  * TODO: simplify our interface;
  *       figure out how to save dmamap handle so can use two step.
  */
-dma_addr_t sn_dma_map_single(struct device *dev, void *cpu_addr, size_t size,
-			     int direction)
+dma_addr_t sn_dma_map_single_attrs(struct device *dev, void *cpu_addr,
+				   size_t size, int direction,
+				   struct dma_attrs *attrs)
 {
 	dma_addr_t dma_addr;
 	unsigned long phys_addr;
 	struct pci_dev *pdev = to_pci_dev(dev);
 	struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
+	int dmabarr;
+
+	dmabarr = dma_get_attr(DMA_ATTR_WRITE_BARRIER, attrs);
 
 	BUG_ON(dev->bus != &pci_bus_type);
 
 	phys_addr = __pa(cpu_addr);
-	dma_addr = provider->dma_map(pdev, phys_addr, size, SN_DMA_ADDR_PHYS);
+	if (dmabarr)
+		dma_addr = provider->dma_map_consistent(pdev, phys_addr,
+							size, SN_DMA_ADDR_PHYS);
+	else
+		dma_addr = provider->dma_map(pdev, phys_addr, size,
+					     SN_DMA_ADDR_PHYS);
+
 	if (!dma_addr) {
 		printk(KERN_ERR "%s: out of ATEs\n", __func__);
 		return 0;
 	}
 	return dma_addr;
 }
-EXPORT_SYMBOL(sn_dma_map_single);
+EXPORT_SYMBOL(sn_dma_map_single_attrs);
 
 /**
- * sn_dma_unmap_single - unamp a DMA mapped page
+ * sn_dma_unmap_single_attrs - unamp a DMA mapped page
  * @dev: device to sync
  * @dma_addr: DMA address to sync
  * @size: size of region
  * @direction: DMA direction
+ * @attrs: optional dma attributes
  *
  * This routine is supposed to sync the DMA region specified
  * by @dma_handle into the coherence domain.  On SN, we're always cache
  * coherent, so we just need to free any ATEs associated with this mapping.
  */
-void sn_dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
-			 int direction)
+void sn_dma_unmap_single_attrs(struct device *dev, dma_addr_t dma_addr,
+			       size_t size, int direction,
+			       struct dma_attrs *attrs)
 {
 	struct pci_dev *pdev = to_pci_dev(dev);
 	struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
@@ -207,19 +226,21 @@
 
 	provider->dma_unmap(pdev, dma_addr, direction);
 }
-EXPORT_SYMBOL(sn_dma_unmap_single);
+EXPORT_SYMBOL(sn_dma_unmap_single_attrs);
 
 /**
- * sn_dma_unmap_sg - unmap a DMA scatterlist
+ * sn_dma_unmap_sg_attrs - unmap a DMA scatterlist
  * @dev: device to unmap
  * @sg: scatterlist to unmap
  * @nhwentries: number of scatterlist entries
  * @direction: DMA direction
+ * @attrs: optional dma attributes
  *
  * Unmap a set of streaming mode DMA translations.
  */
-void sn_dma_unmap_sg(struct device *dev, struct scatterlist *sgl,
-		     int nhwentries, int direction)
+void sn_dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sgl,
+			   int nhwentries, int direction,
+			   struct dma_attrs *attrs)
 {
 	int i;
 	struct pci_dev *pdev = to_pci_dev(dev);
@@ -234,25 +255,34 @@
 		sg->dma_length = 0;
 	}
 }
-EXPORT_SYMBOL(sn_dma_unmap_sg);
+EXPORT_SYMBOL(sn_dma_unmap_sg_attrs);
 
 /**
- * sn_dma_map_sg - map a scatterlist for DMA
+ * sn_dma_map_sg_attrs - map a scatterlist for DMA
  * @dev: device to map for
  * @sg: scatterlist to map
  * @nhwentries: number of entries
  * @direction: direction of the DMA transaction
+ * @attrs: optional dma attributes
+ *
+ * mappings with the DMA_ATTR_WRITE_BARRIER get mapped with
+ * dma_map_consistent() so that writes force a flush of pending DMA.
+ * (See "SGI Altix Architecture Considerations for Linux Device Drivers",
+ * Document Number: 007-4763-001)
  *
  * Maps each entry of @sg for DMA.
  */
-int sn_dma_map_sg(struct device *dev, struct scatterlist *sgl, int nhwentries,
-		  int direction)
+int sn_dma_map_sg_attrs(struct device *dev, struct scatterlist *sgl,
+			int nhwentries, int direction, struct dma_attrs *attrs)
 {
 	unsigned long phys_addr;
 	struct scatterlist *saved_sg = sgl, *sg;
 	struct pci_dev *pdev = to_pci_dev(dev);
 	struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
 	int i;
+	int dmabarr;
+
+	dmabarr = dma_get_attr(DMA_ATTR_WRITE_BARRIER, attrs);
 
 	BUG_ON(dev->bus != &pci_bus_type);
 
@@ -260,11 +290,19 @@
 	 * Setup a DMA address for each entry in the scatterlist.
 	 */
 	for_each_sg(sgl, sg, nhwentries, i) {
+		dma_addr_t dma_addr;
 		phys_addr = SG_ENT_PHYS_ADDRESS(sg);
-		sg->dma_address = provider->dma_map(pdev,
-						    phys_addr, sg->length,
-						    SN_DMA_ADDR_PHYS);
+		if (dmabarr)
+			dma_addr = provider->dma_map_consistent(pdev,
+								phys_addr,
+								sg->length,
+								SN_DMA_ADDR_PHYS);
+		else
+			dma_addr = provider->dma_map(pdev, phys_addr,
+						     sg->length,
+						     SN_DMA_ADDR_PHYS);
 
+		sg->dma_address = dma_addr;
 		if (!sg->dma_address) {
 			printk(KERN_ERR "%s: out of ATEs\n", __func__);
 
@@ -272,7 +310,8 @@
 			 * Free any successfully allocated entries.
 			 */
 			if (i > 0)
-				sn_dma_unmap_sg(dev, saved_sg, i, direction);
+				sn_dma_unmap_sg_attrs(dev, saved_sg, i,
+						      direction, attrs);
 			return 0;
 		}
 
@@ -281,7 +320,7 @@
 
 	return nhwentries;
 }
-EXPORT_SYMBOL(sn_dma_map_sg);
+EXPORT_SYMBOL(sn_dma_map_sg_attrs);
 
 void sn_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
 				size_t size, int direction)
diff --git a/arch/m68k/kernel/asm-offsets.c b/arch/m68k/kernel/asm-offsets.c
index 246a882..b1f012f 100644
--- a/arch/m68k/kernel/asm-offsets.c
+++ b/arch/m68k/kernel/asm-offsets.c
@@ -11,14 +11,12 @@
 #include <linux/stddef.h>
 #include <linux/sched.h>
 #include <linux/kernel_stat.h>
+#include <linux/kbuild.h>
 #include <asm/bootinfo.h>
 #include <asm/irq.h>
 #include <asm/amigahw.h>
 #include <linux/font.h>
 
-#define DEFINE(sym, val) \
-	asm volatile("\n->" #sym " %0 " #val : : "i" (val))
-
 int main(void)
 {
 	/* offsets into the task struct */
diff --git a/arch/m68k/mac/iop.c b/arch/m68k/mac/iop.c
index 5b2799e..326fb99 100644
--- a/arch/m68k/mac/iop.c
+++ b/arch/m68k/mac/iop.c
@@ -109,7 +109,6 @@
 #include <linux/mm.h>
 #include <linux/delay.h>
 #include <linux/init.h>
-#include <linux/proc_fs.h>
 #include <linux/interrupt.h>
 
 #include <asm/bootinfo.h>
@@ -124,10 +123,6 @@
 
 int iop_scc_present,iop_ism_present;
 
-#ifdef CONFIG_PROC_FS
-static int iop_get_proc_info(char *, char **, off_t, int);
-#endif /* CONFIG_PROC_FS */
-
 /* structure for tracking channel listeners */
 
 struct listener {
@@ -299,12 +294,6 @@
 		iop_listeners[IOP_NUM_ISM][i].devname = NULL;
 		iop_listeners[IOP_NUM_ISM][i].handler = NULL;
 	}
-
-#if 0	/* Crashing in 2.4 now, not yet sure why.   --jmt */
-#ifdef CONFIG_PROC_FS
-	create_proc_info_entry("mac_iop", 0, &proc_root, iop_get_proc_info);
-#endif
-#endif
 }
 
 /*
@@ -637,77 +626,3 @@
 	}
 	return IRQ_HANDLED;
 }
-
-#ifdef CONFIG_PROC_FS
-
-char *iop_chan_state(int state)
-{
-	switch(state) {
-		case IOP_MSG_IDLE	: return "idle      ";
-		case IOP_MSG_NEW	: return "new       ";
-		case IOP_MSG_RCVD	: return "received  ";
-		case IOP_MSG_COMPLETE	: return "completed ";
-		default			: return "unknown   ";
-	}
-}
-
-int iop_dump_one_iop(char *buf, int iop_num, char *iop_name)
-{
-	int i,len = 0;
-	volatile struct mac_iop *iop = iop_base[iop_num];
-
-	len += sprintf(buf+len, "%s IOP channel states:\n\n", iop_name);
-	len += sprintf(buf+len, "##  send_state  recv_state  device\n");
-	len += sprintf(buf+len, "------------------------------------------------\n");
-	for (i = 0 ; i < NUM_IOP_CHAN ; i++) {
-		len += sprintf(buf+len, "%2d  %10s  %10s  %s\n", i,
-			iop_chan_state(iop_readb(iop, IOP_ADDR_SEND_STATE+i)),
-			iop_chan_state(iop_readb(iop, IOP_ADDR_RECV_STATE+i)),
-			iop_listeners[iop_num][i].handler?
-				      iop_listeners[iop_num][i].devname : "");
-
-	}
-	len += sprintf(buf+len, "\n");
-	return len;
-}
-
-static int iop_get_proc_info(char *buf, char **start, off_t pos, int count)
-{
-	int len, cnt;
-
-	cnt = 0;
-	len =  sprintf(buf, "IOPs detected:\n\n");
-
-	if (iop_scc_present) {
-		len += sprintf(buf+len, "SCC IOP (%p): status %02X\n",
-				iop_base[IOP_NUM_SCC],
-				(uint) iop_base[IOP_NUM_SCC]->status_ctrl);
-	}
-	if (iop_ism_present) {
-		len += sprintf(buf+len, "ISM IOP (%p): status %02X\n\n",
-				iop_base[IOP_NUM_ISM],
-				(uint) iop_base[IOP_NUM_ISM]->status_ctrl);
-	}
-
-	if (iop_scc_present) {
-		len += iop_dump_one_iop(buf+len, IOP_NUM_SCC, "SCC");
-
-	}
-
-	if (iop_ism_present) {
-		len += iop_dump_one_iop(buf+len, IOP_NUM_ISM, "ISM");
-
-	}
-
-	if (len >= pos) {
-		if (!*start) {
-			*start = buf + pos;
-			cnt = len - pos;
-		} else {
-			cnt += len;
-		}
-	}
-	return (count > cnt) ? cnt : count;
-}
-
-#endif /* CONFIG_PROC_FS */
diff --git a/arch/m68knommu/kernel/asm-offsets.c b/arch/m68knommu/kernel/asm-offsets.c
index d97b89b..fd0c685 100644
--- a/arch/m68knommu/kernel/asm-offsets.c
+++ b/arch/m68knommu/kernel/asm-offsets.c
@@ -13,15 +13,11 @@
 #include <linux/kernel_stat.h>
 #include <linux/ptrace.h>
 #include <linux/hardirq.h>
+#include <linux/kbuild.h>
 #include <asm/bootinfo.h>
 #include <asm/irq.h>
 #include <asm/thread_info.h>
 
-#define DEFINE(sym, val) \
-        asm volatile("\n->" #sym " %0 " #val : : "i" (val))
-
-#define BLANK() asm volatile("\n->" : : )
-
 int main(void)
 {
 	/* offsets into the task struct */
diff --git a/arch/mips/basler/excite/excite_procfs.c b/arch/mips/basler/excite/excite_procfs.c
index 9ee67a9..08923e6 100644
--- a/arch/mips/basler/excite/excite_procfs.c
+++ b/arch/mips/basler/excite/excite_procfs.c
@@ -18,8 +18,9 @@
  *  along with this program; if not, write to the Free Software
  *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  */
-
+#include <linux/module.h>
 #include <linux/proc_fs.h>
+#include <linux/seq_file.h>
 #include <linux/stat.h>
 #include <asm/page.h>
 #include <asm/io.h>
@@ -28,14 +29,25 @@
 
 #include <excite.h>
 
-static int excite_get_unit_id(char *buf, char **addr, off_t offs, int size)
+static int excite_unit_id_proc_show(struct seq_file *m, void *v)
 {
-	const int len = snprintf(buf, PAGE_SIZE, "%06x", unit_id);
-	const int w = len - offs;
-	*addr = buf + offs;
-	return w < size ? w : size;
+	seq_printf(m, "%06x", unit_id);
+	return 0;
 }
 
+static int excite_unit_id_proc_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, excite_unit_id_proc_show, NULL);
+}
+
+static const struct file_operations excite_unit_id_proc_fops = {
+	.owner		= THIS_MODULE,
+	.open		= excite_unit_id_proc_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+
 static int
 excite_bootrom_read(char *page, char **start, off_t off, int count,
 		  int *eof, void *data)
@@ -65,12 +77,12 @@
 void excite_procfs_init(void)
 {
 	/* Create & populate /proc/excite */
-	struct proc_dir_entry * const pdir = proc_mkdir("excite", &proc_root);
+	struct proc_dir_entry * const pdir = proc_mkdir("excite", NULL);
 	if (pdir) {
 		struct proc_dir_entry * e;
 
-		e = create_proc_info_entry("unit_id", S_IRUGO, pdir,
-					   excite_get_unit_id);
+		e = proc_create("unit_id", S_IRUGO, pdir,
+				&excite_unit_id_proc_fops);
 		if (e) e->size = 6;
 
 		e = create_proc_read_entry("bootrom", S_IRUGO, pdir,
diff --git a/arch/mips/kernel/asm-offsets.c b/arch/mips/kernel/asm-offsets.c
index 5bf03b3..7294222 100644
--- a/arch/mips/kernel/asm-offsets.c
+++ b/arch/mips/kernel/asm-offsets.c
@@ -13,327 +13,285 @@
 #include <linux/sched.h>
 #include <linux/mm.h>
 #include <linux/interrupt.h>
-
+#include <linux/kbuild.h>
 #include <asm/ptrace.h>
 #include <asm/processor.h>
 
-#define text(t) __asm__("\n->#" t)
-#define _offset(type, member) (&(((type *)NULL)->member))
-#define offset(string, ptr, member) \
-	__asm__("\n->" string " %0" : : "i" (_offset(ptr, member)))
-#define constant(string, member) \
-	__asm__("\n->" string " %0" : : "ri" (member))
-#define size(string, size) \
-	__asm__("\n->" string " %0" : : "i" (sizeof(size)))
-#define linefeed text("")
-
 void output_ptreg_defines(void)
 {
-	text("MIPS pt_regs offsets.");
-	offset("PT_R0", struct pt_regs, regs[0]);
-	offset("PT_R1", struct pt_regs, regs[1]);
-	offset("PT_R2", struct pt_regs, regs[2]);
-	offset("PT_R3", struct pt_regs, regs[3]);
-	offset("PT_R4", struct pt_regs, regs[4]);
-	offset("PT_R5", struct pt_regs, regs[5]);
-	offset("PT_R6", struct pt_regs, regs[6]);
-	offset("PT_R7", struct pt_regs, regs[7]);
-	offset("PT_R8", struct pt_regs, regs[8]);
-	offset("PT_R9", struct pt_regs, regs[9]);
-	offset("PT_R10", struct pt_regs, regs[10]);
-	offset("PT_R11", struct pt_regs, regs[11]);
-	offset("PT_R12", struct pt_regs, regs[12]);
-	offset("PT_R13", struct pt_regs, regs[13]);
-	offset("PT_R14", struct pt_regs, regs[14]);
-	offset("PT_R15", struct pt_regs, regs[15]);
-	offset("PT_R16", struct pt_regs, regs[16]);
-	offset("PT_R17", struct pt_regs, regs[17]);
-	offset("PT_R18", struct pt_regs, regs[18]);
-	offset("PT_R19", struct pt_regs, regs[19]);
-	offset("PT_R20", struct pt_regs, regs[20]);
-	offset("PT_R21", struct pt_regs, regs[21]);
-	offset("PT_R22", struct pt_regs, regs[22]);
-	offset("PT_R23", struct pt_regs, regs[23]);
-	offset("PT_R24", struct pt_regs, regs[24]);
-	offset("PT_R25", struct pt_regs, regs[25]);
-	offset("PT_R26", struct pt_regs, regs[26]);
-	offset("PT_R27", struct pt_regs, regs[27]);
-	offset("PT_R28", struct pt_regs, regs[28]);
-	offset("PT_R29", struct pt_regs, regs[29]);
-	offset("PT_R30", struct pt_regs, regs[30]);
-	offset("PT_R31", struct pt_regs, regs[31]);
-	offset("PT_LO", struct pt_regs, lo);
-	offset("PT_HI", struct pt_regs, hi);
+	COMMENT("MIPS pt_regs offsets.");
+	OFFSET(PT_R0, pt_regs, regs[0]);
+	OFFSET(PT_R1, pt_regs, regs[1]);
+	OFFSET(PT_R2, pt_regs, regs[2]);
+	OFFSET(PT_R3, pt_regs, regs[3]);
+	OFFSET(PT_R4, pt_regs, regs[4]);
+	OFFSET(PT_R5, pt_regs, regs[5]);
+	OFFSET(PT_R6, pt_regs, regs[6]);
+	OFFSET(PT_R7, pt_regs, regs[7]);
+	OFFSET(PT_R8, pt_regs, regs[8]);
+	OFFSET(PT_R9, pt_regs, regs[9]);
+	OFFSET(PT_R10, pt_regs, regs[10]);
+	OFFSET(PT_R11, pt_regs, regs[11]);
+	OFFSET(PT_R12, pt_regs, regs[12]);
+	OFFSET(PT_R13, pt_regs, regs[13]);
+	OFFSET(PT_R14, pt_regs, regs[14]);
+	OFFSET(PT_R15, pt_regs, regs[15]);
+	OFFSET(PT_R16, pt_regs, regs[16]);
+	OFFSET(PT_R17, pt_regs, regs[17]);
+	OFFSET(PT_R18, pt_regs, regs[18]);
+	OFFSET(PT_R19, pt_regs, regs[19]);
+	OFFSET(PT_R20, pt_regs, regs[20]);
+	OFFSET(PT_R21, pt_regs, regs[21]);
+	OFFSET(PT_R22, pt_regs, regs[22]);
+	OFFSET(PT_R23, pt_regs, regs[23]);
+	OFFSET(PT_R24, pt_regs, regs[24]);
+	OFFSET(PT_R25, pt_regs, regs[25]);
+	OFFSET(PT_R26, pt_regs, regs[26]);
+	OFFSET(PT_R27, pt_regs, regs[27]);
+	OFFSET(PT_R28, pt_regs, regs[28]);
+	OFFSET(PT_R29, pt_regs, regs[29]);
+	OFFSET(PT_R30, pt_regs, regs[30]);
+	OFFSET(PT_R31, pt_regs, regs[31]);
+	OFFSET(PT_LO, pt_regs, lo);
+	OFFSET(PT_HI, pt_regs, hi);
 #ifdef CONFIG_CPU_HAS_SMARTMIPS
-	offset("PT_ACX", struct pt_regs, acx);
+	OFFSET(PT_ACX, pt_regs, acx);
 #endif
-	offset("PT_EPC", struct pt_regs, cp0_epc);
-	offset("PT_BVADDR", struct pt_regs, cp0_badvaddr);
-	offset("PT_STATUS", struct pt_regs, cp0_status);
-	offset("PT_CAUSE", struct pt_regs, cp0_cause);
+	OFFSET(PT_EPC, pt_regs, cp0_epc);
+	OFFSET(PT_BVADDR, pt_regs, cp0_badvaddr);
+	OFFSET(PT_STATUS, pt_regs, cp0_status);
+	OFFSET(PT_CAUSE, pt_regs, cp0_cause);
 #ifdef CONFIG_MIPS_MT_SMTC
-	offset("PT_TCSTATUS", struct pt_regs, cp0_tcstatus);
+	OFFSET(PT_TCSTATUS, pt_regs, cp0_tcstatus);
 #endif /* CONFIG_MIPS_MT_SMTC */
-	size("PT_SIZE", struct pt_regs);
-	linefeed;
+	DEFINE(PT_SIZE, sizeof(struct pt_regs));
+	BLANK();
 }
 
 void output_task_defines(void)
 {
-	text("MIPS task_struct offsets.");
-	offset("TASK_STATE", struct task_struct, state);
-	offset("TASK_THREAD_INFO", struct task_struct, stack);
-	offset("TASK_FLAGS", struct task_struct, flags);
-	offset("TASK_MM", struct task_struct, mm);
-	offset("TASK_PID", struct task_struct, pid);
-	size(  "TASK_STRUCT_SIZE", struct task_struct);
-	linefeed;
+	COMMENT("MIPS task_struct offsets.");
+	OFFSET(TASK_STATE, task_struct, state);
+	OFFSET(TASK_THREAD_INFO, task_struct, stack);
+	OFFSET(TASK_FLAGS, task_struct, flags);
+	OFFSET(TASK_MM, task_struct, mm);
+	OFFSET(TASK_PID, task_struct, pid);
+	DEFINE(TASK_STRUCT_SIZE, sizeof(struct task_struct));
+	BLANK();
 }
 
 void output_thread_info_defines(void)
 {
-	text("MIPS thread_info offsets.");
-	offset("TI_TASK", struct thread_info, task);
-	offset("TI_EXEC_DOMAIN", struct thread_info, exec_domain);
-	offset("TI_FLAGS", struct thread_info, flags);
-	offset("TI_TP_VALUE", struct thread_info, tp_value);
-	offset("TI_CPU", struct thread_info, cpu);
-	offset("TI_PRE_COUNT", struct thread_info, preempt_count);
-	offset("TI_ADDR_LIMIT", struct thread_info, addr_limit);
-	offset("TI_RESTART_BLOCK", struct thread_info, restart_block);
-	offset("TI_REGS", struct thread_info, regs);
-	constant("_THREAD_SIZE", THREAD_SIZE);
-	constant("_THREAD_MASK", THREAD_MASK);
-	linefeed;
+	COMMENT("MIPS thread_info offsets.");
+	OFFSET(TI_TASK, thread_info, task);
+	OFFSET(TI_EXEC_DOMAIN, thread_info, exec_domain);
+	OFFSET(TI_FLAGS, thread_info, flags);
+	OFFSET(TI_TP_VALUE, thread_info, tp_value);
+	OFFSET(TI_CPU, thread_info, cpu);
+	OFFSET(TI_PRE_COUNT, thread_info, preempt_count);
+	OFFSET(TI_ADDR_LIMIT, thread_info, addr_limit);
+	OFFSET(TI_RESTART_BLOCK, thread_info, restart_block);
+	OFFSET(TI_REGS, thread_info, regs);
+	DEFINE(_THREAD_SIZE, THREAD_SIZE);
+	DEFINE(_THREAD_MASK, THREAD_MASK);
+	BLANK();
 }
 
 void output_thread_defines(void)
 {
-	text("MIPS specific thread_struct offsets.");
-	offset("THREAD_REG16", struct task_struct, thread.reg16);
-	offset("THREAD_REG17", struct task_struct, thread.reg17);
-	offset("THREAD_REG18", struct task_struct, thread.reg18);
-	offset("THREAD_REG19", struct task_struct, thread.reg19);
-	offset("THREAD_REG20", struct task_struct, thread.reg20);
-	offset("THREAD_REG21", struct task_struct, thread.reg21);
-	offset("THREAD_REG22", struct task_struct, thread.reg22);
-	offset("THREAD_REG23", struct task_struct, thread.reg23);
-	offset("THREAD_REG29", struct task_struct, thread.reg29);
-	offset("THREAD_REG30", struct task_struct, thread.reg30);
-	offset("THREAD_REG31", struct task_struct, thread.reg31);
-	offset("THREAD_STATUS", struct task_struct,
+	COMMENT("MIPS specific thread_struct offsets.");
+	OFFSET(THREAD_REG16, task_struct, thread.reg16);
+	OFFSET(THREAD_REG17, task_struct, thread.reg17);
+	OFFSET(THREAD_REG18, task_struct, thread.reg18);
+	OFFSET(THREAD_REG19, task_struct, thread.reg19);
+	OFFSET(THREAD_REG20, task_struct, thread.reg20);
+	OFFSET(THREAD_REG21, task_struct, thread.reg21);
+	OFFSET(THREAD_REG22, task_struct, thread.reg22);
+	OFFSET(THREAD_REG23, task_struct, thread.reg23);
+	OFFSET(THREAD_REG29, task_struct, thread.reg29);
+	OFFSET(THREAD_REG30, task_struct, thread.reg30);
+	OFFSET(THREAD_REG31, task_struct, thread.reg31);
+	OFFSET(THREAD_STATUS, task_struct,
 	       thread.cp0_status);
-	offset("THREAD_FPU", struct task_struct, thread.fpu);
+	OFFSET(THREAD_FPU, task_struct, thread.fpu);
 
-	offset("THREAD_BVADDR", struct task_struct, \
+	OFFSET(THREAD_BVADDR, task_struct, \
 	       thread.cp0_badvaddr);
-	offset("THREAD_BUADDR", struct task_struct, \
+	OFFSET(THREAD_BUADDR, task_struct, \
 	       thread.cp0_baduaddr);
-	offset("THREAD_ECODE", struct task_struct, \
+	OFFSET(THREAD_ECODE, task_struct, \
 	       thread.error_code);
-	offset("THREAD_TRAPNO", struct task_struct, thread.trap_no);
-	offset("THREAD_TRAMP", struct task_struct, \
+	OFFSET(THREAD_TRAPNO, task_struct, thread.trap_no);
+	OFFSET(THREAD_TRAMP, task_struct, \
 	       thread.irix_trampoline);
-	offset("THREAD_OLDCTX", struct task_struct, \
+	OFFSET(THREAD_OLDCTX, task_struct, \
 	       thread.irix_oldctx);
-	linefeed;
+	BLANK();
 }
 
 void output_thread_fpu_defines(void)
 {
-	offset("THREAD_FPR0",
-	       struct task_struct, thread.fpu.fpr[0]);
-	offset("THREAD_FPR1",
-	       struct task_struct, thread.fpu.fpr[1]);
-	offset("THREAD_FPR2",
-	       struct task_struct, thread.fpu.fpr[2]);
-	offset("THREAD_FPR3",
-	       struct task_struct, thread.fpu.fpr[3]);
-	offset("THREAD_FPR4",
-	       struct task_struct, thread.fpu.fpr[4]);
-	offset("THREAD_FPR5",
-	       struct task_struct, thread.fpu.fpr[5]);
-	offset("THREAD_FPR6",
-	       struct task_struct, thread.fpu.fpr[6]);
-	offset("THREAD_FPR7",
-	       struct task_struct, thread.fpu.fpr[7]);
-	offset("THREAD_FPR8",
-	       struct task_struct, thread.fpu.fpr[8]);
-	offset("THREAD_FPR9",
-	       struct task_struct, thread.fpu.fpr[9]);
-	offset("THREAD_FPR10",
-	       struct task_struct, thread.fpu.fpr[10]);
-	offset("THREAD_FPR11",
-	       struct task_struct, thread.fpu.fpr[11]);
-	offset("THREAD_FPR12",
-	       struct task_struct, thread.fpu.fpr[12]);
-	offset("THREAD_FPR13",
-	       struct task_struct, thread.fpu.fpr[13]);
-	offset("THREAD_FPR14",
-	       struct task_struct, thread.fpu.fpr[14]);
-	offset("THREAD_FPR15",
-	       struct task_struct, thread.fpu.fpr[15]);
-	offset("THREAD_FPR16",
-	       struct task_struct, thread.fpu.fpr[16]);
-	offset("THREAD_FPR17",
-	       struct task_struct, thread.fpu.fpr[17]);
-	offset("THREAD_FPR18",
-	       struct task_struct, thread.fpu.fpr[18]);
-	offset("THREAD_FPR19",
-	       struct task_struct, thread.fpu.fpr[19]);
-	offset("THREAD_FPR20",
-	       struct task_struct, thread.fpu.fpr[20]);
-	offset("THREAD_FPR21",
-	       struct task_struct, thread.fpu.fpr[21]);
-	offset("THREAD_FPR22",
-	       struct task_struct, thread.fpu.fpr[22]);
-	offset("THREAD_FPR23",
-	       struct task_struct, thread.fpu.fpr[23]);
-	offset("THREAD_FPR24",
-	       struct task_struct, thread.fpu.fpr[24]);
-	offset("THREAD_FPR25",
-	       struct task_struct, thread.fpu.fpr[25]);
-	offset("THREAD_FPR26",
-	       struct task_struct, thread.fpu.fpr[26]);
-	offset("THREAD_FPR27",
-	       struct task_struct, thread.fpu.fpr[27]);
-	offset("THREAD_FPR28",
-	       struct task_struct, thread.fpu.fpr[28]);
-	offset("THREAD_FPR29",
-	       struct task_struct, thread.fpu.fpr[29]);
-	offset("THREAD_FPR30",
-	       struct task_struct, thread.fpu.fpr[30]);
-	offset("THREAD_FPR31",
-	       struct task_struct, thread.fpu.fpr[31]);
+	OFFSET(THREAD_FPR0, task_struct, thread.fpu.fpr[0]);
+	OFFSET(THREAD_FPR1, task_struct, thread.fpu.fpr[1]);
+	OFFSET(THREAD_FPR2, task_struct, thread.fpu.fpr[2]);
+	OFFSET(THREAD_FPR3, task_struct, thread.fpu.fpr[3]);
+	OFFSET(THREAD_FPR4, task_struct, thread.fpu.fpr[4]);
+	OFFSET(THREAD_FPR5, task_struct, thread.fpu.fpr[5]);
+	OFFSET(THREAD_FPR6, task_struct, thread.fpu.fpr[6]);
+	OFFSET(THREAD_FPR7, task_struct, thread.fpu.fpr[7]);
+	OFFSET(THREAD_FPR8, task_struct, thread.fpu.fpr[8]);
+	OFFSET(THREAD_FPR9, task_struct, thread.fpu.fpr[9]);
+	OFFSET(THREAD_FPR10, task_struct, thread.fpu.fpr[10]);
+	OFFSET(THREAD_FPR11, task_struct, thread.fpu.fpr[11]);
+	OFFSET(THREAD_FPR12, task_struct, thread.fpu.fpr[12]);
+	OFFSET(THREAD_FPR13, task_struct, thread.fpu.fpr[13]);
+	OFFSET(THREAD_FPR14, task_struct, thread.fpu.fpr[14]);
+	OFFSET(THREAD_FPR15, task_struct, thread.fpu.fpr[15]);
+	OFFSET(THREAD_FPR16, task_struct, thread.fpu.fpr[16]);
+	OFFSET(THREAD_FPR17, task_struct, thread.fpu.fpr[17]);
+	OFFSET(THREAD_FPR18, task_struct, thread.fpu.fpr[18]);
+	OFFSET(THREAD_FPR19, task_struct, thread.fpu.fpr[19]);
+	OFFSET(THREAD_FPR20, task_struct, thread.fpu.fpr[20]);
+	OFFSET(THREAD_FPR21, task_struct, thread.fpu.fpr[21]);
+	OFFSET(THREAD_FPR22, task_struct, thread.fpu.fpr[22]);
+	OFFSET(THREAD_FPR23, task_struct, thread.fpu.fpr[23]);
+	OFFSET(THREAD_FPR24, task_struct, thread.fpu.fpr[24]);
+	OFFSET(THREAD_FPR25, task_struct, thread.fpu.fpr[25]);
+	OFFSET(THREAD_FPR26, task_struct, thread.fpu.fpr[26]);
+	OFFSET(THREAD_FPR27, task_struct, thread.fpu.fpr[27]);
+	OFFSET(THREAD_FPR28, task_struct, thread.fpu.fpr[28]);
+	OFFSET(THREAD_FPR29, task_struct, thread.fpu.fpr[29]);
+	OFFSET(THREAD_FPR30, task_struct, thread.fpu.fpr[30]);
+	OFFSET(THREAD_FPR31, task_struct, thread.fpu.fpr[31]);
 
-	offset("THREAD_FCR31",
-	       struct task_struct, thread.fpu.fcr31);
-	linefeed;
+	OFFSET(THREAD_FCR31, task_struct, thread.fpu.fcr31);
+	BLANK();
 }
 
 void output_mm_defines(void)
 {
-	text("Size of struct page");
-	size("STRUCT_PAGE_SIZE", struct page);
-	linefeed;
-	text("Linux mm_struct offsets.");
-	offset("MM_USERS", struct mm_struct, mm_users);
-	offset("MM_PGD", struct mm_struct, pgd);
-	offset("MM_CONTEXT", struct mm_struct, context);
-	linefeed;
-	constant("_PAGE_SIZE", PAGE_SIZE);
-	constant("_PAGE_SHIFT", PAGE_SHIFT);
-	linefeed;
-	constant("_PGD_T_SIZE", sizeof(pgd_t));
-	constant("_PMD_T_SIZE", sizeof(pmd_t));
-	constant("_PTE_T_SIZE", sizeof(pte_t));
-	linefeed;
-	constant("_PGD_T_LOG2", PGD_T_LOG2);
-	constant("_PMD_T_LOG2", PMD_T_LOG2);
-	constant("_PTE_T_LOG2", PTE_T_LOG2);
-	linefeed;
-	constant("_PGD_ORDER", PGD_ORDER);
-	constant("_PMD_ORDER", PMD_ORDER);
-	constant("_PTE_ORDER", PTE_ORDER);
-	linefeed;
-	constant("_PMD_SHIFT", PMD_SHIFT);
-	constant("_PGDIR_SHIFT", PGDIR_SHIFT);
-	linefeed;
-	constant("_PTRS_PER_PGD", PTRS_PER_PGD);
-	constant("_PTRS_PER_PMD", PTRS_PER_PMD);
-	constant("_PTRS_PER_PTE", PTRS_PER_PTE);
-	linefeed;
+	COMMENT("Size of struct page");
+	DEFINE(STRUCT_PAGE_SIZE, sizeof(struct page));
+	BLANK();
+	COMMENT("Linux mm_struct offsets.");
+	OFFSET(MM_USERS, mm_struct, mm_users);
+	OFFSET(MM_PGD, mm_struct, pgd);
+	OFFSET(MM_CONTEXT, mm_struct, context);
+	BLANK();
+	DEFINE(_PAGE_SIZE, PAGE_SIZE);
+	DEFINE(_PAGE_SHIFT, PAGE_SHIFT);
+	BLANK();
+	DEFINE(_PGD_T_SIZE, sizeof(pgd_t));
+	DEFINE(_PMD_T_SIZE, sizeof(pmd_t));
+	DEFINE(_PTE_T_SIZE, sizeof(pte_t));
+	BLANK();
+	DEFINE(_PGD_T_LOG2, PGD_T_LOG2);
+	DEFINE(_PMD_T_LOG2, PMD_T_LOG2);
+	DEFINE(_PTE_T_LOG2, PTE_T_LOG2);
+	BLANK();
+	DEFINE(_PGD_ORDER, PGD_ORDER);
+	DEFINE(_PMD_ORDER, PMD_ORDER);
+	DEFINE(_PTE_ORDER, PTE_ORDER);
+	BLANK();
+	DEFINE(_PMD_SHIFT, PMD_SHIFT);
+	DEFINE(_PGDIR_SHIFT, PGDIR_SHIFT);
+	BLANK();
+	DEFINE(_PTRS_PER_PGD, PTRS_PER_PGD);
+	DEFINE(_PTRS_PER_PMD, PTRS_PER_PMD);
+	DEFINE(_PTRS_PER_PTE, PTRS_PER_PTE);
+	BLANK();
 }
 
 #ifdef CONFIG_32BIT
 void output_sc_defines(void)
 {
-	text("Linux sigcontext offsets.");
-	offset("SC_REGS", struct sigcontext, sc_regs);
-	offset("SC_FPREGS", struct sigcontext, sc_fpregs);
-	offset("SC_ACX", struct sigcontext, sc_acx);
-	offset("SC_MDHI", struct sigcontext, sc_mdhi);
-	offset("SC_MDLO", struct sigcontext, sc_mdlo);
-	offset("SC_PC", struct sigcontext, sc_pc);
-	offset("SC_FPC_CSR", struct sigcontext, sc_fpc_csr);
-	offset("SC_FPC_EIR", struct sigcontext, sc_fpc_eir);
-	offset("SC_HI1", struct sigcontext, sc_hi1);
-	offset("SC_LO1", struct sigcontext, sc_lo1);
-	offset("SC_HI2", struct sigcontext, sc_hi2);
-	offset("SC_LO2", struct sigcontext, sc_lo2);
-	offset("SC_HI3", struct sigcontext, sc_hi3);
-	offset("SC_LO3", struct sigcontext, sc_lo3);
-	linefeed;
+	COMMENT("Linux sigcontext offsets.");
+	OFFSET(SC_REGS, sigcontext, sc_regs);
+	OFFSET(SC_FPREGS, sigcontext, sc_fpregs);
+	OFFSET(SC_ACX, sigcontext, sc_acx);
+	OFFSET(SC_MDHI, sigcontext, sc_mdhi);
+	OFFSET(SC_MDLO, sigcontext, sc_mdlo);
+	OFFSET(SC_PC, sigcontext, sc_pc);
+	OFFSET(SC_FPC_CSR, sigcontext, sc_fpc_csr);
+	OFFSET(SC_FPC_EIR, sigcontext, sc_fpc_eir);
+	OFFSET(SC_HI1, sigcontext, sc_hi1);
+	OFFSET(SC_LO1, sigcontext, sc_lo1);
+	OFFSET(SC_HI2, sigcontext, sc_hi2);
+	OFFSET(SC_LO2, sigcontext, sc_lo2);
+	OFFSET(SC_HI3, sigcontext, sc_hi3);
+	OFFSET(SC_LO3, sigcontext, sc_lo3);
+	BLANK();
 }
 #endif
 
 #ifdef CONFIG_64BIT
 void output_sc_defines(void)
 {
-	text("Linux sigcontext offsets.");
-	offset("SC_REGS", struct sigcontext, sc_regs);
-	offset("SC_FPREGS", struct sigcontext, sc_fpregs);
-	offset("SC_MDHI", struct sigcontext, sc_mdhi);
-	offset("SC_MDLO", struct sigcontext, sc_mdlo);
-	offset("SC_PC", struct sigcontext, sc_pc);
-	offset("SC_FPC_CSR", struct sigcontext, sc_fpc_csr);
-	linefeed;
+	COMMENT("Linux sigcontext offsets.");
+	OFFSET(SC_REGS, sigcontext, sc_regs);
+	OFFSET(SC_FPREGS, sigcontext, sc_fpregs);
+	OFFSET(SC_MDHI, sigcontext, sc_mdhi);
+	OFFSET(SC_MDLO, sigcontext, sc_mdlo);
+	OFFSET(SC_PC, sigcontext, sc_pc);
+	OFFSET(SC_FPC_CSR, sigcontext, sc_fpc_csr);
+	BLANK();
 }
 #endif
 
 #ifdef CONFIG_MIPS32_COMPAT
 void output_sc32_defines(void)
 {
-	text("Linux 32-bit sigcontext offsets.");
-	offset("SC32_FPREGS", struct sigcontext32, sc_fpregs);
-	offset("SC32_FPC_CSR", struct sigcontext32, sc_fpc_csr);
-	offset("SC32_FPC_EIR", struct sigcontext32, sc_fpc_eir);
-	linefeed;
+	COMMENT("Linux 32-bit sigcontext offsets.");
+	OFFSET(SC32_FPREGS, sigcontext32, sc_fpregs);
+	OFFSET(SC32_FPC_CSR, sigcontext32, sc_fpc_csr);
+	OFFSET(SC32_FPC_EIR, sigcontext32, sc_fpc_eir);
+	BLANK();
 }
 #endif
 
 void output_signal_defined(void)
 {
-	text("Linux signal numbers.");
-	constant("_SIGHUP", SIGHUP);
-	constant("_SIGINT", SIGINT);
-	constant("_SIGQUIT", SIGQUIT);
-	constant("_SIGILL", SIGILL);
-	constant("_SIGTRAP", SIGTRAP);
-	constant("_SIGIOT", SIGIOT);
-	constant("_SIGABRT", SIGABRT);
-	constant("_SIGEMT", SIGEMT);
-	constant("_SIGFPE", SIGFPE);
-	constant("_SIGKILL", SIGKILL);
-	constant("_SIGBUS", SIGBUS);
-	constant("_SIGSEGV", SIGSEGV);
-	constant("_SIGSYS", SIGSYS);
-	constant("_SIGPIPE", SIGPIPE);
-	constant("_SIGALRM", SIGALRM);
-	constant("_SIGTERM", SIGTERM);
-	constant("_SIGUSR1", SIGUSR1);
-	constant("_SIGUSR2", SIGUSR2);
-	constant("_SIGCHLD", SIGCHLD);
-	constant("_SIGPWR", SIGPWR);
-	constant("_SIGWINCH", SIGWINCH);
-	constant("_SIGURG", SIGURG);
-	constant("_SIGIO", SIGIO);
-	constant("_SIGSTOP", SIGSTOP);
-	constant("_SIGTSTP", SIGTSTP);
-	constant("_SIGCONT", SIGCONT);
-	constant("_SIGTTIN", SIGTTIN);
-	constant("_SIGTTOU", SIGTTOU);
-	constant("_SIGVTALRM", SIGVTALRM);
-	constant("_SIGPROF", SIGPROF);
-	constant("_SIGXCPU", SIGXCPU);
-	constant("_SIGXFSZ", SIGXFSZ);
-	linefeed;
+	COMMENT("Linux signal numbers.");
+	DEFINE(_SIGHUP, SIGHUP);
+	DEFINE(_SIGINT, SIGINT);
+	DEFINE(_SIGQUIT, SIGQUIT);
+	DEFINE(_SIGILL, SIGILL);
+	DEFINE(_SIGTRAP, SIGTRAP);
+	DEFINE(_SIGIOT, SIGIOT);
+	DEFINE(_SIGABRT, SIGABRT);
+	DEFINE(_SIGEMT, SIGEMT);
+	DEFINE(_SIGFPE, SIGFPE);
+	DEFINE(_SIGKILL, SIGKILL);
+	DEFINE(_SIGBUS, SIGBUS);
+	DEFINE(_SIGSEGV, SIGSEGV);
+	DEFINE(_SIGSYS, SIGSYS);
+	DEFINE(_SIGPIPE, SIGPIPE);
+	DEFINE(_SIGALRM, SIGALRM);
+	DEFINE(_SIGTERM, SIGTERM);
+	DEFINE(_SIGUSR1, SIGUSR1);
+	DEFINE(_SIGUSR2, SIGUSR2);
+	DEFINE(_SIGCHLD, SIGCHLD);
+	DEFINE(_SIGPWR, SIGPWR);
+	DEFINE(_SIGWINCH, SIGWINCH);
+	DEFINE(_SIGURG, SIGURG);
+	DEFINE(_SIGIO, SIGIO);
+	DEFINE(_SIGSTOP, SIGSTOP);
+	DEFINE(_SIGTSTP, SIGTSTP);
+	DEFINE(_SIGCONT, SIGCONT);
+	DEFINE(_SIGTTIN, SIGTTIN);
+	DEFINE(_SIGTTOU, SIGTTOU);
+	DEFINE(_SIGVTALRM, SIGVTALRM);
+	DEFINE(_SIGPROF, SIGPROF);
+	DEFINE(_SIGXCPU, SIGXCPU);
+	DEFINE(_SIGXFSZ, SIGXFSZ);
+	BLANK();
 }
 
 void output_irq_cpustat_t_defines(void)
 {
-	text("Linux irq_cpustat_t offsets.");
-	offset("IC_SOFTIRQ_PENDING", irq_cpustat_t, __softirq_pending);
-	size("IC_IRQ_CPUSTAT_T", irq_cpustat_t);
-	linefeed;
+	COMMENT("Linux irq_cpustat_t offsets.");
+	DEFINE(IC_SOFTIRQ_PENDING,
+			offsetof(irq_cpustat_t, __softirq_pending));
+	DEFINE(IC_IRQ_CPUSTAT_T, sizeof(irq_cpustat_t));
+	BLANK();
 }
diff --git a/arch/mips/lib/iomap-pci.c b/arch/mips/lib/iomap-pci.c
index c11b249..2ab899c 100644
--- a/arch/mips/lib/iomap-pci.c
+++ b/arch/mips/lib/iomap-pci.c
@@ -45,8 +45,8 @@
  */
 void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen)
 {
-	unsigned long start = pci_resource_start(dev, bar);
-	unsigned long len = pci_resource_len(dev, bar);
+	resource_size_t start = pci_resource_start(dev, bar);
+	resource_size_t len = pci_resource_len(dev, bar);
 	unsigned long flags = pci_resource_flags(dev, bar);
 
 	if (!len || !start)
diff --git a/arch/mips/pmc-sierra/yosemite/setup.c b/arch/mips/pmc-sierra/yosemite/setup.c
index 855977c..6537d90 100644
--- a/arch/mips/pmc-sierra/yosemite/setup.c
+++ b/arch/mips/pmc-sierra/yosemite/setup.c
@@ -143,9 +143,6 @@
 mips_hpt_frequency = 33000000 * 3 * 5;
 }
 
-/* No other usable initialization hook than this ...  */
-extern void (*late_time_init)(void);
-
 unsigned long ocd_base;
 
 EXPORT_SYMBOL(ocd_base);
diff --git a/arch/mn10300/kernel/asm-offsets.c b/arch/mn10300/kernel/asm-offsets.c
index ee2d9f8..2646fcb 100644
--- a/arch/mn10300/kernel/asm-offsets.c
+++ b/arch/mn10300/kernel/asm-offsets.c
@@ -7,6 +7,7 @@
 #include <linux/sched.h>
 #include <linux/signal.h>
 #include <linux/personality.h>
+#include <linux/kbuild.h>
 #include <asm/ucontext.h>
 #include <asm/processor.h>
 #include <asm/thread_info.h>
@@ -14,14 +15,6 @@
 #include "sigframe.h"
 #include "mn10300-serial.h"
 
-#define DEFINE(sym, val) \
-	asm volatile("\n->" #sym " %0 " #val : : "i" (val))
-
-#define BLANK() asm volatile("\n->")
-
-#define OFFSET(sym, str, mem) \
-	DEFINE(sym, offsetof(struct str, mem));
-
 void foo(void)
 {
 	OFFSET(SIGCONTEXT_d0, sigcontext, d0);
diff --git a/arch/mn10300/unit-asb2305/pci-iomap.c b/arch/mn10300/unit-asb2305/pci-iomap.c
index dbceae4..c1a8d8f 100644
--- a/arch/mn10300/unit-asb2305/pci-iomap.c
+++ b/arch/mn10300/unit-asb2305/pci-iomap.c
@@ -16,8 +16,8 @@
  */
 void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen)
 {
-	unsigned long start = pci_resource_start(dev, bar);
-	unsigned long len = pci_resource_len(dev, bar);
+	resource_size_t start = pci_resource_start(dev, bar);
+	resource_size_t len = pci_resource_len(dev, bar);
 	unsigned long flags = pci_resource_flags(dev, bar);
 
 	if (!len || !start)
diff --git a/arch/parisc/kernel/asm-offsets.c b/arch/parisc/kernel/asm-offsets.c
index eaa79bc..3efc0b7 100644
--- a/arch/parisc/kernel/asm-offsets.c
+++ b/arch/parisc/kernel/asm-offsets.c
@@ -32,6 +32,7 @@
 #include <linux/thread_info.h>
 #include <linux/ptrace.h>
 #include <linux/hardirq.h>
+#include <linux/kbuild.h>
 
 #include <asm/pgtable.h>
 #include <asm/ptrace.h>
@@ -39,11 +40,6 @@
 #include <asm/pdc.h>
 #include <asm/uaccess.h>
 
-#define DEFINE(sym, val) \
-	asm volatile("\n->" #sym " %0 " #val : : "i" (val))
-
-#define BLANK() asm volatile("\n->" : : )
-
 #ifdef CONFIG_64BIT
 #define FRAME_SIZE	128
 #else
diff --git a/arch/parisc/kernel/pci-dma.c b/arch/parisc/kernel/pci-dma.c
index 9448d4e..ccd61b9 100644
--- a/arch/parisc/kernel/pci-dma.c
+++ b/arch/parisc/kernel/pci-dma.c
@@ -397,10 +397,9 @@
 			"pcxl_dma_init: Unable to create gsc /proc dir entry\n");
 	else {
 		struct proc_dir_entry* ent;
-		ent = create_proc_entry("pcxl_dma", 0, proc_gsc_root);
-		if (ent)
-			ent->proc_fops = &proc_pcxl_dma_ops;
-		else
+		ent = proc_create("pcxl_dma", 0, proc_gsc_root,
+				  &proc_pcxl_dma_ops);
+		if (!ent)
 			printk(KERN_WARNING
 				"pci-dma.c: Unable to create pcxl_dma /proc entry.\n");
 	}
diff --git a/arch/parisc/lib/iomap.c b/arch/parisc/lib/iomap.c
index f4a8116..9abed07 100644
--- a/arch/parisc/lib/iomap.c
+++ b/arch/parisc/lib/iomap.c
@@ -438,8 +438,8 @@
 /* Create a virtual mapping cookie for a PCI BAR (memory or IO) */
 void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen)
 {
-	unsigned long start = pci_resource_start(dev, bar);
-	unsigned long len = pci_resource_len(dev, bar);
+	resource_size_t start = pci_resource_start(dev, bar);
+	resource_size_t len = pci_resource_len(dev, bar);
 	unsigned long flags = pci_resource_flags(dev, bar);
 
 	if (!len || !start)
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index 6213484..59044e7 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -30,6 +30,7 @@
 #include <linux/time.h>
 #include <linux/hardirq.h>
 #endif
+#include <linux/kbuild.h>
 
 #include <asm/io.h>
 #include <asm/page.h>
@@ -51,11 +52,6 @@
 #include <asm/iseries/alpaca.h>
 #endif
 
-#define DEFINE(sym, val) \
-	asm volatile("\n->" #sym " %0 " #val : : "i" (val))
-
-#define BLANK() asm volatile("\n->" : : )
-
 int main(void)
 {
 	DEFINE(THREAD, offsetof(struct task_struct, thread));
diff --git a/arch/powerpc/kernel/lparcfg.c b/arch/powerpc/kernel/lparcfg.c
index 1ffacc6..1e656b4 100644
--- a/arch/powerpc/kernel/lparcfg.c
+++ b/arch/powerpc/kernel/lparcfg.c
@@ -591,10 +591,8 @@
 			!firmware_has_feature(FW_FEATURE_ISERIES))
 		mode |= S_IWUSR;
 
-	ent = create_proc_entry("ppc64/lparcfg", mode, NULL);
-	if (ent) {
-		ent->proc_fops = &lparcfg_fops;
-	} else {
+	ent = proc_create("ppc64/lparcfg", mode, NULL, &lparcfg_fops);
+	if (!ent) {
 		printk(KERN_ERR "Failed to create ppc64/lparcfg\n");
 		return -EIO;
 	}
diff --git a/arch/powerpc/kernel/proc_ppc64.c b/arch/powerpc/kernel/proc_ppc64.c
index f78dfce..c647dde 100644
--- a/arch/powerpc/kernel/proc_ppc64.c
+++ b/arch/powerpc/kernel/proc_ppc64.c
@@ -68,12 +68,11 @@
 {
 	struct proc_dir_entry *pde;
 
-	pde = create_proc_entry("ppc64/systemcfg", S_IFREG|S_IRUGO, NULL);
+	pde = proc_create_data("ppc64/systemcfg", S_IFREG|S_IRUGO, NULL,
+			       &page_map_fops, vdso_data);
 	if (!pde)
 		return 1;
-	pde->data = vdso_data;
 	pde->size = PAGE_SIZE;
-	pde->proc_fops = &page_map_fops;
 
 	return 0;
 }
diff --git a/arch/powerpc/kernel/rtas-proc.c b/arch/powerpc/kernel/rtas-proc.c
index f2e3bc7..f9c6abc 100644
--- a/arch/powerpc/kernel/rtas-proc.c
+++ b/arch/powerpc/kernel/rtas-proc.c
@@ -255,8 +255,6 @@
 
 static int __init proc_rtas_init(void)
 {
-	struct proc_dir_entry *entry;
-
 	if (!machine_is(pseries))
 		return -ENODEV;
 
@@ -264,35 +262,20 @@
 	if (rtas_node == NULL)
 		return -ENODEV;
 
-	entry = create_proc_entry("ppc64/rtas/progress", S_IRUGO|S_IWUSR, NULL);
-	if (entry)
-		entry->proc_fops = &ppc_rtas_progress_operations;
-
-	entry = create_proc_entry("ppc64/rtas/clock", S_IRUGO|S_IWUSR, NULL);
-	if (entry)
-		entry->proc_fops = &ppc_rtas_clock_operations;
-
-	entry = create_proc_entry("ppc64/rtas/poweron", S_IWUSR|S_IRUGO, NULL);
-	if (entry)
-		entry->proc_fops = &ppc_rtas_poweron_operations;
-
-	entry = create_proc_entry("ppc64/rtas/sensors", S_IRUGO, NULL);
-	if (entry)
-		entry->proc_fops = &ppc_rtas_sensors_operations;
-
-	entry = create_proc_entry("ppc64/rtas/frequency", S_IWUSR|S_IRUGO,
-				  NULL);
-	if (entry)
-		entry->proc_fops = &ppc_rtas_tone_freq_operations;
-
-	entry = create_proc_entry("ppc64/rtas/volume", S_IWUSR|S_IRUGO, NULL);
-	if (entry)
-		entry->proc_fops = &ppc_rtas_tone_volume_operations;
-
-	entry = create_proc_entry("ppc64/rtas/rmo_buffer", S_IRUSR, NULL);
-	if (entry)
-		entry->proc_fops = &ppc_rtas_rmo_buf_ops;
-
+	proc_create("ppc64/rtas/progress", S_IRUGO|S_IWUSR, NULL,
+		    &ppc_rtas_progress_operations);
+	proc_create("ppc64/rtas/clock", S_IRUGO|S_IWUSR, NULL,
+		    &ppc_rtas_clock_operations);
+	proc_create("ppc64/rtas/poweron", S_IWUSR|S_IRUGO, NULL,
+		    &ppc_rtas_poweron_operations);
+	proc_create("ppc64/rtas/sensors", S_IRUGO, NULL,
+		    &ppc_rtas_sensors_operations);
+	proc_create("ppc64/rtas/frequency", S_IWUSR|S_IRUGO, NULL,
+		    &ppc_rtas_tone_freq_operations);
+	proc_create("ppc64/rtas/volume", S_IWUSR|S_IRUGO, NULL,
+		    &ppc_rtas_tone_volume_operations);
+	proc_create("ppc64/rtas/rmo_buffer", S_IRUSR, NULL,
+		    &ppc_rtas_rmo_buf_ops);
 	return 0;
 }
 
diff --git a/arch/powerpc/kernel/rtas_flash.c b/arch/powerpc/kernel/rtas_flash.c
index 627f126..0a5e22b 100644
--- a/arch/powerpc/kernel/rtas_flash.c
+++ b/arch/powerpc/kernel/rtas_flash.c
@@ -704,18 +704,11 @@
 static struct proc_dir_entry *create_flash_pde(const char *filename,
 					       const struct file_operations *fops)
 {
-	struct proc_dir_entry *ent = NULL;
-
-	ent = create_proc_entry(filename, S_IRUSR | S_IWUSR, NULL);
-	if (ent != NULL) {
-		ent->proc_fops = fops;
-		ent->owner = THIS_MODULE;
-	}
-
-	return ent;
+	return proc_create(filename, S_IRUSR | S_IWUSR, NULL, fops);
 }
 
 static const struct file_operations rtas_flash_operations = {
+	.owner		= THIS_MODULE,
 	.read		= rtas_flash_read,
 	.write		= rtas_flash_write,
 	.open		= rtas_excl_open,
@@ -723,6 +716,7 @@
 };
 
 static const struct file_operations manage_flash_operations = {
+	.owner		= THIS_MODULE,
 	.read		= manage_flash_read,
 	.write		= manage_flash_write,
 	.open		= rtas_excl_open,
@@ -730,6 +724,7 @@
 };
 
 static const struct file_operations validate_flash_operations = {
+	.owner		= THIS_MODULE,
 	.read		= validate_flash_read,
 	.write		= validate_flash_write,
 	.open		= rtas_excl_open,
diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c
index 00528ef..45dcd26 100644
--- a/arch/powerpc/platforms/cell/spufs/sched.c
+++ b/arch/powerpc/platforms/cell/spufs/sched.c
@@ -1063,10 +1063,9 @@
 
 	mod_timer(&spuloadavg_timer, 0);
 
-	entry = create_proc_entry("spu_loadavg", 0, NULL);
+	entry = proc_create("spu_loadavg", 0, NULL, &spu_loadavg_fops);
 	if (!entry)
 		goto out_stop_kthread;
-	entry->proc_fops = &spu_loadavg_fops;
 
 	pr_debug("spusched: tick: %d, min ticks: %d, default ticks: %d\n",
 			SPUSCHED_TICK, MIN_SPU_TIMESLICE, DEF_SPU_TIMESLICE);
diff --git a/arch/powerpc/platforms/cell/spufs/sputrace.c b/arch/powerpc/platforms/cell/spufs/sputrace.c
index 79aa773..aea5286 100644
--- a/arch/powerpc/platforms/cell/spufs/sputrace.c
+++ b/arch/powerpc/platforms/cell/spufs/sputrace.c
@@ -201,10 +201,9 @@
 	if (!sputrace_log)
 		goto out;
 
-	entry = create_proc_entry("sputrace", S_IRUSR, NULL);
+	entry = proc_create("sputrace", S_IRUSR, NULL, &sputrace_fops);
 	if (!entry)
 		goto out_free_log;
-	entry->proc_fops = &sputrace_fops;
 
 	for (i = 0; i < ARRAY_SIZE(spu_probes); i++) {
 		struct spu_probe *p = &spu_probes[i];
diff --git a/arch/powerpc/platforms/iseries/lpevents.c b/arch/powerpc/platforms/iseries/lpevents.c
index e5b40e3..b0f8a85 100644
--- a/arch/powerpc/platforms/iseries/lpevents.c
+++ b/arch/powerpc/platforms/iseries/lpevents.c
@@ -330,15 +330,11 @@
 
 static int __init proc_lpevents_init(void)
 {
-	struct proc_dir_entry *e;
-
 	if (!firmware_has_feature(FW_FEATURE_ISERIES))
 		return 0;
 
-	e = create_proc_entry("iSeries/lpevents", S_IFREG|S_IRUGO, NULL);
-	if (e)
-		e->proc_fops = &proc_lpevents_operations;
-
+	proc_create("iSeries/lpevents", S_IFREG|S_IRUGO, NULL,
+		    &proc_lpevents_operations);
 	return 0;
 }
 __initcall(proc_lpevents_init);
diff --git a/arch/powerpc/platforms/iseries/mf.c b/arch/powerpc/platforms/iseries/mf.c
index c0f2433..1dc7295 100644
--- a/arch/powerpc/platforms/iseries/mf.c
+++ b/arch/powerpc/platforms/iseries/mf.c
@@ -1255,11 +1255,11 @@
 		if (i == 3)	/* no vmlinux entry for 'D' */
 			continue;
 
-		ent = create_proc_entry("vmlinux", S_IFREG|S_IWUSR, mf);
+		ent = proc_create_data("vmlinux", S_IFREG|S_IWUSR, mf,
+				       &proc_vmlinux_operations,
+				       (void *)(long)i);
 		if (!ent)
 			return 1;
-		ent->data = (void *)(long)i;
-		ent->proc_fops = &proc_vmlinux_operations;
 	}
 
 	ent = create_proc_entry("side", S_IFREG|S_IRUSR|S_IWUSR, mf_proc_root);
diff --git a/arch/powerpc/platforms/iseries/proc.c b/arch/powerpc/platforms/iseries/proc.c
index f2cde41..91f4c6c 100644
--- a/arch/powerpc/platforms/iseries/proc.c
+++ b/arch/powerpc/platforms/iseries/proc.c
@@ -110,15 +110,11 @@
 
 static int __init iseries_proc_init(void)
 {
-	struct proc_dir_entry *e;
-
 	if (!firmware_has_feature(FW_FEATURE_ISERIES))
 		return 0;
 
-	e = create_proc_entry("iSeries/titanTod", S_IFREG|S_IRUGO, NULL);
-	if (e)
-		e->proc_fops = &proc_titantod_operations;
-
+	proc_create("iSeries/titanTod", S_IFREG|S_IRUGO, NULL,
+		    &proc_titantod_operations);
 	return 0;
 }
 __initcall(iseries_proc_init);
diff --git a/arch/powerpc/platforms/iseries/viopath.c b/arch/powerpc/platforms/iseries/viopath.c
index df23331..49ff4dc 100644
--- a/arch/powerpc/platforms/iseries/viopath.c
+++ b/arch/powerpc/platforms/iseries/viopath.c
@@ -180,15 +180,10 @@
 
 static int __init vio_proc_init(void)
 {
-	struct proc_dir_entry *e;
-
 	if (!firmware_has_feature(FW_FEATURE_ISERIES))
 		return 0;
 
-	e = create_proc_entry("iSeries/config", 0, NULL);
-	if (e)
-		e->proc_fops = &proc_viopath_operations;
-
+	proc_create("iSeries/config", 0, NULL, &proc_viopath_operations);
         return 0;
 }
 __initcall(vio_proc_init);
diff --git a/arch/powerpc/platforms/pseries/eeh.c b/arch/powerpc/platforms/pseries/eeh.c
index a3fd56b..6f544ba 100644
--- a/arch/powerpc/platforms/pseries/eeh.c
+++ b/arch/powerpc/platforms/pseries/eeh.c
@@ -1259,14 +1259,8 @@
 
 static int __init eeh_init_proc(void)
 {
-	struct proc_dir_entry *e;
-
-	if (machine_is(pseries)) {
-		e = create_proc_entry("ppc64/eeh", 0, NULL);
-		if (e)
-			e->proc_fops = &proc_eeh_operations;
-	}
-
+	if (machine_is(pseries))
+		proc_create("ppc64/eeh", 0, NULL, &proc_eeh_operations);
 	return 0;
 }
 __initcall(eeh_init_proc);
diff --git a/arch/powerpc/platforms/pseries/reconfig.c b/arch/powerpc/platforms/pseries/reconfig.c
index ac75c10..75769aa 100644
--- a/arch/powerpc/platforms/pseries/reconfig.c
+++ b/arch/powerpc/platforms/pseries/reconfig.c
@@ -512,12 +512,9 @@
 	if (!machine_is(pseries))
 		return 0;
 
-	ent = create_proc_entry("ppc64/ofdt", S_IWUSR, NULL);
-	if (ent) {
-		ent->data = NULL;
+	ent = proc_create("ppc64/ofdt", S_IWUSR, NULL, &ofdt_fops);
+	if (ent)
 		ent->size = 0;
-		ent->proc_fops = &ofdt_fops;
-	}
 
 	return 0;
 }
diff --git a/arch/powerpc/platforms/pseries/rtasd.c b/arch/powerpc/platforms/pseries/rtasd.c
index befadd4..7d3e2b0 100644
--- a/arch/powerpc/platforms/pseries/rtasd.c
+++ b/arch/powerpc/platforms/pseries/rtasd.c
@@ -468,10 +468,9 @@
 		return -ENOMEM;
 	}
 
-	entry = create_proc_entry("ppc64/rtas/error_log", S_IRUSR, NULL);
-	if (entry)
-		entry->proc_fops = &proc_rtas_log_operations;
-	else
+	entry = proc_create("ppc64/rtas/error_log", S_IRUSR, NULL,
+			    &proc_rtas_log_operations);
+	if (!entry)
 		printk(KERN_ERR "Failed to create error_log proc entry\n");
 
 	if (kernel_thread(rtasd, NULL, CLONE_FS) < 0)
diff --git a/arch/ppc/kernel/asm-offsets.c b/arch/ppc/kernel/asm-offsets.c
index a51a177..8dcbdd6 100644
--- a/arch/ppc/kernel/asm-offsets.c
+++ b/arch/ppc/kernel/asm-offsets.c
@@ -18,6 +18,8 @@
 #include <linux/suspend.h>
 #include <linux/mman.h>
 #include <linux/mm.h>
+#include <linux/kbuild.h>
+
 #include <asm/io.h>
 #include <asm/page.h>
 #include <asm/pgtable.h>
@@ -26,11 +28,6 @@
 #include <asm/thread_info.h>
 #include <asm/vdso_datapage.h>
 
-#define DEFINE(sym, val) \
-	asm volatile("\n->" #sym " %0 " #val : : "i" (val))
-
-#define BLANK() asm volatile("\n->" : : )
-
 int
 main(void)
 {
diff --git a/arch/ppc/kernel/pci.c b/arch/ppc/kernel/pci.c
index 50ce83f..df3ef6d 100644
--- a/arch/ppc/kernel/pci.c
+++ b/arch/ppc/kernel/pci.c
@@ -1121,8 +1121,8 @@
 
 void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max)
 {
-	unsigned long start = pci_resource_start(dev, bar);
-	unsigned long len = pci_resource_len(dev, bar);
+	resource_size_t start = pci_resource_start(dev, bar);
+	resource_size_t len = pci_resource_len(dev, bar);
 	unsigned long flags = pci_resource_flags(dev, bar);
 
 	if (!len)
diff --git a/arch/ppc/platforms/sbc82xx.c b/arch/ppc/platforms/sbc82xx.c
index 0df6aac..24f6e06 100644
--- a/arch/ppc/platforms/sbc82xx.c
+++ b/arch/ppc/platforms/sbc82xx.c
@@ -30,8 +30,6 @@
 
 extern unsigned char __res[sizeof(bd_t)];
 
-extern void (*late_time_init)(void);
-
 #ifdef CONFIG_GEN_RTC
 TODC_ALLOC();
 
diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c
index 1375f8a..fa28eca 100644
--- a/arch/s390/kernel/asm-offsets.c
+++ b/arch/s390/kernel/asm-offsets.c
@@ -5,44 +5,38 @@
  */
 
 #include <linux/sched.h>
-
-/* Use marker if you need to separate the values later */
-
-#define DEFINE(sym, val, marker) \
-	asm volatile("\n->" #sym " %0 " #val " " #marker : : "i" (val))
-
-#define BLANK() asm volatile("\n->" : : )
+#include <linux/kbuild.h>
 
 int main(void)
 {
-	DEFINE(__THREAD_info, offsetof(struct task_struct, stack),);
-	DEFINE(__THREAD_ksp, offsetof(struct task_struct, thread.ksp),);
-	DEFINE(__THREAD_per, offsetof(struct task_struct, thread.per_info),);
+	DEFINE(__THREAD_info, offsetof(struct task_struct, stack));
+	DEFINE(__THREAD_ksp, offsetof(struct task_struct, thread.ksp));
+	DEFINE(__THREAD_per, offsetof(struct task_struct, thread.per_info));
 	DEFINE(__THREAD_mm_segment,
-	       offsetof(struct task_struct, thread.mm_segment),);
+	       offsetof(struct task_struct, thread.mm_segment));
 	BLANK();
-	DEFINE(__TASK_pid, offsetof(struct task_struct, pid),);
+	DEFINE(__TASK_pid, offsetof(struct task_struct, pid));
 	BLANK();
-	DEFINE(__PER_atmid, offsetof(per_struct, lowcore.words.perc_atmid),);
-	DEFINE(__PER_address, offsetof(per_struct, lowcore.words.address),);
-	DEFINE(__PER_access_id, offsetof(per_struct, lowcore.words.access_id),);
+	DEFINE(__PER_atmid, offsetof(per_struct, lowcore.words.perc_atmid));
+	DEFINE(__PER_address, offsetof(per_struct, lowcore.words.address));
+	DEFINE(__PER_access_id, offsetof(per_struct, lowcore.words.access_id));
 	BLANK();
-	DEFINE(__TI_task, offsetof(struct thread_info, task),);
-	DEFINE(__TI_domain, offsetof(struct thread_info, exec_domain),);
-	DEFINE(__TI_flags, offsetof(struct thread_info, flags),);
-	DEFINE(__TI_cpu, offsetof(struct thread_info, cpu),);
-	DEFINE(__TI_precount, offsetof(struct thread_info, preempt_count),);
+	DEFINE(__TI_task, offsetof(struct thread_info, task));
+	DEFINE(__TI_domain, offsetof(struct thread_info, exec_domain));
+	DEFINE(__TI_flags, offsetof(struct thread_info, flags));
+	DEFINE(__TI_cpu, offsetof(struct thread_info, cpu));
+	DEFINE(__TI_precount, offsetof(struct thread_info, preempt_count));
 	BLANK();
-	DEFINE(__PT_ARGS, offsetof(struct pt_regs, args),);
-	DEFINE(__PT_PSW, offsetof(struct pt_regs, psw),);
-	DEFINE(__PT_GPRS, offsetof(struct pt_regs, gprs),);
-	DEFINE(__PT_ORIG_GPR2, offsetof(struct pt_regs, orig_gpr2),);
-	DEFINE(__PT_ILC, offsetof(struct pt_regs, ilc),);
-	DEFINE(__PT_TRAP, offsetof(struct pt_regs, trap),);
-	DEFINE(__PT_SIZE, sizeof(struct pt_regs),);
+	DEFINE(__PT_ARGS, offsetof(struct pt_regs, args));
+	DEFINE(__PT_PSW, offsetof(struct pt_regs, psw));
+	DEFINE(__PT_GPRS, offsetof(struct pt_regs, gprs));
+	DEFINE(__PT_ORIG_GPR2, offsetof(struct pt_regs, orig_gpr2));
+	DEFINE(__PT_ILC, offsetof(struct pt_regs, ilc));
+	DEFINE(__PT_TRAP, offsetof(struct pt_regs, trap));
+	DEFINE(__PT_SIZE, sizeof(struct pt_regs));
 	BLANK();
-	DEFINE(__SF_BACKCHAIN, offsetof(struct stack_frame, back_chain),);
-	DEFINE(__SF_GPRS, offsetof(struct stack_frame, gprs),);
-	DEFINE(__SF_EMPTY, offsetof(struct stack_frame, empty1),);
+	DEFINE(__SF_BACKCHAIN, offsetof(struct stack_frame, back_chain));
+	DEFINE(__SF_GPRS, offsetof(struct stack_frame, gprs));
+	DEFINE(__SF_EMPTY, offsetof(struct stack_frame, empty1));
 	return 0;
 }
diff --git a/arch/s390/kernel/irq.c b/arch/s390/kernel/irq.c
index c36d812..c59a86d 100644
--- a/arch/s390/kernel/irq.c
+++ b/arch/s390/kernel/irq.c
@@ -60,8 +60,6 @@
 /*
  * Switch to the asynchronous interrupt stack for softirq execution.
  */
-extern void __do_softirq(void);
-
 asmlinkage void do_softirq(void)
 {
 	unsigned long flags, old, new;
diff --git a/arch/sh/drivers/pci/pci.c b/arch/sh/drivers/pci/pci.c
index 49b435c..08d2e73 100644
--- a/arch/sh/drivers/pci/pci.c
+++ b/arch/sh/drivers/pci/pci.c
@@ -191,8 +191,8 @@
 
 void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen)
 {
-	unsigned long start = pci_resource_start(dev, bar);
-	unsigned long len = pci_resource_len(dev, bar);
+	resource_size_t start = pci_resource_start(dev, bar);
+	resource_size_t len = pci_resource_len(dev, bar);
 	unsigned long flags = pci_resource_flags(dev, bar);
 
 	if (unlikely(!len || !start))
diff --git a/arch/sh/kernel/asm-offsets.c b/arch/sh/kernel/asm-offsets.c
index dc6725c..57cf0e0 100644
--- a/arch/sh/kernel/asm-offsets.c
+++ b/arch/sh/kernel/asm-offsets.c
@@ -11,13 +11,10 @@
 #include <linux/stddef.h>
 #include <linux/types.h>
 #include <linux/mm.h>
+#include <linux/kbuild.h>
+
 #include <asm/thread_info.h>
 
-#define DEFINE(sym, val) \
-        asm volatile("\n->" #sym " %0 " #val : : "i" (val))
-
-#define BLANK() asm volatile("\n->" : : )
-
 int main(void)
 {
 	/* offsets into the thread_info struct */
diff --git a/arch/sh/kernel/irq.c b/arch/sh/kernel/irq.c
index 9bf19b0..a2a99e4 100644
--- a/arch/sh/kernel/irq.c
+++ b/arch/sh/kernel/irq.c
@@ -200,8 +200,6 @@
 	hardirq_ctx[cpu] = NULL;
 }
 
-extern asmlinkage void __do_softirq(void);
-
 asmlinkage void do_softirq(void)
 {
 	unsigned long flags;
diff --git a/arch/sparc/kernel/asm-offsets.c b/arch/sparc/kernel/asm-offsets.c
index 6773ed7..cd3f769 100644
--- a/arch/sparc/kernel/asm-offsets.c
+++ b/arch/sparc/kernel/asm-offsets.c
@@ -12,11 +12,7 @@
 
 #include <linux/sched.h>
 // #include <linux/mm.h>
-
-#define DEFINE(sym, val) \
-	asm volatile("\n->" #sym " %0 " #val : : "i" (val))
-
-#define BLANK() asm volatile("\n->" : : )
+#include <linux/kbuild.h>
 
 int foo(void)
 {
diff --git a/arch/sparc/lib/iomap.c b/arch/sparc/lib/iomap.c
index 54501c1..9ef37e1 100644
--- a/arch/sparc/lib/iomap.c
+++ b/arch/sparc/lib/iomap.c
@@ -21,8 +21,8 @@
 /* Create a virtual mapping cookie for a PCI BAR (memory or IO) */
 void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen)
 {
-	unsigned long start = pci_resource_start(dev, bar);
-	unsigned long len = pci_resource_len(dev, bar);
+	resource_size_t start = pci_resource_start(dev, bar);
+	resource_size_t len = pci_resource_len(dev, bar);
 	unsigned long flags = pci_resource_flags(dev, bar);
 
 	if (!len || !start)
diff --git a/arch/sparc64/lib/iomap.c b/arch/sparc64/lib/iomap.c
index ac556db..7120ebb 100644
--- a/arch/sparc64/lib/iomap.c
+++ b/arch/sparc64/lib/iomap.c
@@ -21,8 +21,8 @@
 /* Create a virtual mapping cookie for a PCI BAR (memory or IO) */
 void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen)
 {
-	unsigned long start = pci_resource_start(dev, bar);
-	unsigned long len = pci_resource_len(dev, bar);
+	resource_size_t start = pci_resource_start(dev, bar);
+	resource_size_t len = pci_resource_len(dev, bar);
 	unsigned long flags = pci_resource_flags(dev, bar);
 
 	if (!len || !start)
diff --git a/arch/um/kernel/exitcode.c b/arch/um/kernel/exitcode.c
index 984f80e..6540d2c 100644
--- a/arch/um/kernel/exitcode.c
+++ b/arch/um/kernel/exitcode.c
@@ -59,7 +59,7 @@
 {
 	struct proc_dir_entry *ent;
 
-	ent = create_proc_entry("exitcode", 0600, &proc_root);
+	ent = create_proc_entry("exitcode", 0600, NULL);
 	if (ent == NULL) {
 		printk(KERN_WARNING "make_proc_exitcode : Failed to register "
 		       "/proc/exitcode\n");
diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
index e8cb9ff..83603cf 100644
--- a/arch/um/kernel/process.c
+++ b/arch/um/kernel/process.c
@@ -364,7 +364,7 @@
 	if (!sysemu_supported)
 		return 0;
 
-	ent = create_proc_entry("sysemu", 0600, &proc_root);
+	ent = create_proc_entry("sysemu", 0600, NULL);
 
 	if (ent == NULL)
 	{
diff --git a/arch/um/kernel/time.c b/arch/um/kernel/time.c
index e066e84..0d0cea2 100644
--- a/arch/um/kernel/time.c
+++ b/arch/um/kernel/time.c
@@ -4,6 +4,7 @@
  */
 
 #include <linux/clockchips.h>
+#include <linux/init.h>
 #include <linux/interrupt.h>
 #include <linux/jiffies.h>
 #include <linux/threads.h>
@@ -109,8 +110,6 @@
 	clockevents_register_device(&itimer_clockevent);
 }
 
-extern void (*late_time_init)(void);
-
 void __init time_init(void)
 {
 	long long nsecs;
diff --git a/arch/v850/kernel/asm-offsets.c b/arch/v850/kernel/asm-offsets.c
index cee5c31..581e698 100644
--- a/arch/v850/kernel/asm-offsets.c
+++ b/arch/v850/kernel/asm-offsets.c
@@ -13,14 +13,11 @@
 #include <linux/kernel_stat.h>
 #include <linux/ptrace.h>
 #include <linux/hardirq.h>
+#include <linux/kbuild.h>
+
 #include <asm/irq.h>
 #include <asm/errno.h>
 
-#define DEFINE(sym, val) \
-	asm volatile("\n->" #sym " %0 " #val : : "i" (val))
-
-#define BLANK() asm volatile("\n->" : : )
-
 int main (void)
 {
 	/* offsets into the task struct */
diff --git a/arch/v850/kernel/rte_mb_a_pci.c b/arch/v850/kernel/rte_mb_a_pci.c
index 7165478..687e367 100644
--- a/arch/v850/kernel/rte_mb_a_pci.c
+++ b/arch/v850/kernel/rte_mb_a_pci.c
@@ -790,8 +790,8 @@
 
 void __iomem *pci_iomap (struct pci_dev *dev, int bar, unsigned long max)
 {
-	unsigned long start = pci_resource_start (dev, bar);
-	unsigned long len = pci_resource_len (dev, bar);
+	resource_size_t start = pci_resource_start (dev, bar);
+	resource_size_t len = pci_resource_len (dev, bar);
 
 	if (!start || len == 0)
 		return 0;
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index a12dbb2..f70e3e3 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -537,9 +537,6 @@
 	  Calgary anyway, pass 'iommu=calgary' on the kernel command line.
 	  If unsure, say Y.
 
-config IOMMU_HELPER
-	def_bool (CALGARY_IOMMU || GART_IOMMU)
-
 # need this always selected by IOMMU for the VIA workaround
 config SWIOTLB
 	bool
@@ -550,6 +547,8 @@
 	  access 32-bits of memory can be used on systems with more than
 	  3 GB of memory. If unsure, say Y.
 
+config IOMMU_HELPER
+	def_bool (CALGARY_IOMMU || GART_IOMMU || SWIOTLB)
 
 config NR_CPUS
 	int "Maximum number of CPUs (2-255)"
@@ -1505,6 +1504,10 @@
 config PCI_GOANY
 	bool "Any"
 
+config PCI_GOOLPC
+	bool "OLPC"
+	depends on OLPC
+
 endchoice
 
 config PCI_BIOS
@@ -1514,12 +1517,17 @@
 # x86-64 doesn't support PCI BIOS access from long mode so always go direct.
 config PCI_DIRECT
 	def_bool y
-	depends on PCI && (X86_64 || (PCI_GODIRECT || PCI_GOANY) || X86_VISWS)
+	depends on PCI && (X86_64 || (PCI_GODIRECT || PCI_GOANY || PCI_GOOLPC) || X86_VISWS)
 
 config PCI_MMCONFIG
 	def_bool y
 	depends on X86_32 && PCI && ACPI && (PCI_GOMMCONFIG || PCI_GOANY)
 
+config PCI_OLPC
+	bool
+	depends on PCI && PCI_GOOLPC
+	default y
+
 config PCI_DOMAINS
 	def_bool y
 	depends on PCI
@@ -1639,6 +1647,13 @@
 	  MFGPTs have a better resolution and max interval than the
 	  generic PIT, and are suitable for use as high-res timers.
 
+config OLPC
+	bool "One Laptop Per Child support"
+	default n
+	help
+	  Add support for detecting the unique features of the OLPC
+	  XO hardware.
+
 endif # X86_32
 
 config K8_NB
diff --git a/arch/x86/boot/edd.c b/arch/x86/boot/edd.c
index d84a48e..03399d6 100644
--- a/arch/x86/boot/edd.c
+++ b/arch/x86/boot/edd.c
@@ -126,17 +126,25 @@
 {
 	char eddarg[8];
 	int do_mbr = 1;
+#ifdef CONFIG_EDD_OFF
+	int do_edd = 0;
+#else
 	int do_edd = 1;
+#endif
 	int be_quiet;
 	int devno;
 	struct edd_info ei, *edp;
 	u32 *mbrptr;
 
 	if (cmdline_find_option("edd", eddarg, sizeof eddarg) > 0) {
-		if (!strcmp(eddarg, "skipmbr") || !strcmp(eddarg, "skip"))
+		if (!strcmp(eddarg, "skipmbr") || !strcmp(eddarg, "skip")) {
+			do_edd = 1;
 			do_mbr = 0;
+		}
 		else if (!strcmp(eddarg, "off"))
 			do_edd = 0;
+		else if (!strcmp(eddarg, "on"))
+			do_edd = 1;
 	}
 
 	be_quiet = cmdline_find_option_bool("quiet");
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
index fa19c38..350eb1b 100644
--- a/arch/x86/kernel/Makefile
+++ b/arch/x86/kernel/Makefile
@@ -91,6 +91,8 @@
 obj-$(CONFIG_SCx200)		+= scx200.o
 scx200-y			+= scx200_32.o
 
+obj-$(CONFIG_OLPC)		+= olpc.o
+
 ###
 # 64 bit specific files
 ifeq ($(CONFIG_X86_64),y)
diff --git a/arch/x86/kernel/asm-offsets_32.c b/arch/x86/kernel/asm-offsets_32.c
index 670c3c3..9258808 100644
--- a/arch/x86/kernel/asm-offsets_32.c
+++ b/arch/x86/kernel/asm-offsets_32.c
@@ -9,6 +9,7 @@
 #include <linux/signal.h>
 #include <linux/personality.h>
 #include <linux/suspend.h>
+#include <linux/kbuild.h>
 #include <asm/ucontext.h>
 #include "sigframe.h"
 #include <asm/pgtable.h>
@@ -23,14 +24,6 @@
 #include <linux/lguest.h>
 #include "../../../drivers/lguest/lg.h"
 
-#define DEFINE(sym, val) \
-        asm volatile("\n->" #sym " %0 " #val : : "i" (val))
-
-#define BLANK() asm volatile("\n->" : : )
-
-#define OFFSET(sym, str, mem) \
-	DEFINE(sym, offsetof(struct str, mem));
-
 /* workaround for a warning with -Wmissing-prototypes */
 void foo(void);
 
diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c
index 494e1e0..f126c05 100644
--- a/arch/x86/kernel/asm-offsets_64.c
+++ b/arch/x86/kernel/asm-offsets_64.c
@@ -10,6 +10,7 @@
 #include <linux/errno.h> 
 #include <linux/hardirq.h>
 #include <linux/suspend.h>
+#include <linux/kbuild.h>
 #include <asm/pda.h>
 #include <asm/processor.h>
 #include <asm/segment.h>
@@ -17,14 +18,6 @@
 #include <asm/ia32.h>
 #include <asm/bootparam.h>
 
-#define DEFINE(sym, val) \
-        asm volatile("\n->" #sym " %0 " #val : : "i" (val))
-
-#define BLANK() asm volatile("\n->" : : )
-
-#define OFFSET(sym, str, mem) \
-	DEFINE(sym, offsetof(struct str, mem))
-
 #define __NO_STUBS 1
 #undef __SYSCALL
 #undef _ASM_X86_64_UNISTD_H_
diff --git a/arch/x86/kernel/cpu/mtrr/if.c b/arch/x86/kernel/cpu/mtrr/if.c
index 1960f19..84c480b 100644
--- a/arch/x86/kernel/cpu/mtrr/if.c
+++ b/arch/x86/kernel/cpu/mtrr/if.c
@@ -424,7 +424,7 @@
 		return -ENODEV;
 
 	proc_root_mtrr =
-		proc_create("mtrr", S_IWUSR | S_IRUGO, &proc_root, &mtrr_fops);
+		proc_create("mtrr", S_IWUSR | S_IRUGO, NULL, &mtrr_fops);
 
 	if (proc_root_mtrr)
 		proc_root_mtrr->owner = THIS_MODULE;
diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
index 00bda7b..147352d 100644
--- a/arch/x86/kernel/irq_32.c
+++ b/arch/x86/kernel/irq_32.c
@@ -190,8 +190,6 @@
 	hardirq_ctx[cpu] = NULL;
 }
 
-extern asmlinkage void __do_softirq(void);
-
 asmlinkage void do_softirq(void)
 {
 	unsigned long flags;
diff --git a/arch/x86/kernel/olpc.c b/arch/x86/kernel/olpc.c
new file mode 100644
index 0000000..3e66722
--- /dev/null
+++ b/arch/x86/kernel/olpc.c
@@ -0,0 +1,260 @@
+/*
+ * Support for the OLPC DCON and OLPC EC access
+ *
+ * Copyright © 2006  Advanced Micro Devices, Inc.
+ * Copyright © 2007-2008  Andres Salomon <dilinger@debian.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/spinlock.h>
+#include <linux/io.h>
+#include <linux/string.h>
+#include <asm/geode.h>
+#include <asm/olpc.h>
+
+#ifdef CONFIG_OPEN_FIRMWARE
+#include <asm/ofw.h>
+#endif
+
+struct olpc_platform_t olpc_platform_info;
+EXPORT_SYMBOL_GPL(olpc_platform_info);
+
+static DEFINE_SPINLOCK(ec_lock);
+
+/* what the timeout *should* be (in ms) */
+#define EC_BASE_TIMEOUT 20
+
+/* the timeout that bugs in the EC might force us to actually use */
+static int ec_timeout = EC_BASE_TIMEOUT;
+
+static int __init olpc_ec_timeout_set(char *str)
+{
+	if (get_option(&str, &ec_timeout) != 1) {
+		ec_timeout = EC_BASE_TIMEOUT;
+		printk(KERN_ERR "olpc-ec:  invalid argument to "
+				"'olpc_ec_timeout=', ignoring!\n");
+	}
+	printk(KERN_DEBUG "olpc-ec:  using %d ms delay for EC commands.\n",
+			ec_timeout);
+	return 1;
+}
+__setup("olpc_ec_timeout=", olpc_ec_timeout_set);
+
+/*
+ * These {i,o}bf_status functions return whether the buffers are full or not.
+ */
+
+static inline unsigned int ibf_status(unsigned int port)
+{
+	return !!(inb(port) & 0x02);
+}
+
+static inline unsigned int obf_status(unsigned int port)
+{
+	return inb(port) & 0x01;
+}
+
+#define wait_on_ibf(p, d) __wait_on_ibf(__LINE__, (p), (d))
+static int __wait_on_ibf(unsigned int line, unsigned int port, int desired)
+{
+	unsigned int timeo;
+	int state = ibf_status(port);
+
+	for (timeo = ec_timeout; state != desired && timeo; timeo--) {
+		mdelay(1);
+		state = ibf_status(port);
+	}
+
+	if ((state == desired) && (ec_timeout > EC_BASE_TIMEOUT) &&
+			timeo < (ec_timeout - EC_BASE_TIMEOUT)) {
+		printk(KERN_WARNING "olpc-ec:  %d: waited %u ms for IBF!\n",
+				line, ec_timeout - timeo);
+	}
+
+	return !(state == desired);
+}
+
+#define wait_on_obf(p, d) __wait_on_obf(__LINE__, (p), (d))
+static int __wait_on_obf(unsigned int line, unsigned int port, int desired)
+{
+	unsigned int timeo;
+	int state = obf_status(port);
+
+	for (timeo = ec_timeout; state != desired && timeo; timeo--) {
+		mdelay(1);
+		state = obf_status(port);
+	}
+
+	if ((state == desired) && (ec_timeout > EC_BASE_TIMEOUT) &&
+			timeo < (ec_timeout - EC_BASE_TIMEOUT)) {
+		printk(KERN_WARNING "olpc-ec:  %d: waited %u ms for OBF!\n",
+				line, ec_timeout - timeo);
+	}
+
+	return !(state == desired);
+}
+
+/*
+ * This allows the kernel to run Embedded Controller commands.  The EC is
+ * documented at <http://wiki.laptop.org/go/Embedded_controller>, and the
+ * available EC commands are here:
+ * <http://wiki.laptop.org/go/Ec_specification>.  Unfortunately, while
+ * OpenFirmware's source is available, the EC's is not.
+ */
+int olpc_ec_cmd(unsigned char cmd, unsigned char *inbuf, size_t inlen,
+		unsigned char *outbuf,  size_t outlen)
+{
+	unsigned long flags;
+	int ret = -EIO;
+	int i;
+
+	spin_lock_irqsave(&ec_lock, flags);
+
+	/* Clear OBF */
+	for (i = 0; i < 10 && (obf_status(0x6c) == 1); i++)
+		inb(0x68);
+	if (i == 10) {
+		printk(KERN_ERR "olpc-ec:  timeout while attempting to "
+				"clear OBF flag!\n");
+		goto err;
+	}
+
+	if (wait_on_ibf(0x6c, 0)) {
+		printk(KERN_ERR "olpc-ec:  timeout waiting for EC to "
+				"quiesce!\n");
+		goto err;
+	}
+
+restart:
+	/*
+	 * Note that if we time out during any IBF checks, that's a failure;
+	 * we have to return.  There's no way for the kernel to clear that.
+	 *
+	 * If we time out during an OBF check, we can restart the command;
+	 * reissuing it will clear the OBF flag, and we should be alright.
+	 * The OBF flag will sometimes misbehave due to what we believe
+	 * is a hardware quirk..
+	 */
+	printk(KERN_DEBUG "olpc-ec:  running cmd 0x%x\n", cmd);
+	outb(cmd, 0x6c);
+
+	if (wait_on_ibf(0x6c, 0)) {
+		printk(KERN_ERR "olpc-ec:  timeout waiting for EC to read "
+				"command!\n");
+		goto err;
+	}
+
+	if (inbuf && inlen) {
+		/* write data to EC */
+		for (i = 0; i < inlen; i++) {
+			if (wait_on_ibf(0x6c, 0)) {
+				printk(KERN_ERR "olpc-ec:  timeout waiting for"
+						" EC accept data!\n");
+				goto err;
+			}
+			printk(KERN_DEBUG "olpc-ec:  sending cmd arg 0x%x\n",
+					inbuf[i]);
+			outb(inbuf[i], 0x68);
+		}
+	}
+	if (outbuf && outlen) {
+		/* read data from EC */
+		for (i = 0; i < outlen; i++) {
+			if (wait_on_obf(0x6c, 1)) {
+				printk(KERN_ERR "olpc-ec:  timeout waiting for"
+						" EC to provide data!\n");
+				goto restart;
+			}
+			outbuf[i] = inb(0x68);
+			printk(KERN_DEBUG "olpc-ec:  received 0x%x\n",
+					outbuf[i]);
+		}
+	}
+
+	ret = 0;
+err:
+	spin_unlock_irqrestore(&ec_lock, flags);
+	return ret;
+}
+EXPORT_SYMBOL_GPL(olpc_ec_cmd);
+
+#ifdef CONFIG_OPEN_FIRMWARE
+static void __init platform_detect(void)
+{
+	size_t propsize;
+	u32 rev;
+
+	if (ofw("getprop", 4, 1, NULL, "board-revision-int", &rev, 4,
+			&propsize) || propsize != 4) {
+		printk(KERN_ERR "ofw: getprop call failed!\n");
+		rev = 0;
+	}
+	olpc_platform_info.boardrev = be32_to_cpu(rev);
+}
+#else
+static void __init platform_detect(void)
+{
+	/* stopgap until OFW support is added to the kernel */
+	olpc_platform_info.boardrev = be32_to_cpu(0xc2);
+}
+#endif
+
+static int __init olpc_init(void)
+{
+	unsigned char *romsig;
+
+	/* The ioremap check is dangerous; limit what we run it on */
+	if (!is_geode() || geode_has_vsa2())
+		return 0;
+
+	spin_lock_init(&ec_lock);
+
+	romsig = ioremap(0xffffffc0, 16);
+	if (!romsig)
+		return 0;
+
+	if (strncmp(romsig, "CL1   Q", 7))
+		goto unmap;
+	if (strncmp(romsig+6, romsig+13, 3)) {
+		printk(KERN_INFO "OLPC BIOS signature looks invalid.  "
+				"Assuming not OLPC\n");
+		goto unmap;
+	}
+
+	printk(KERN_INFO "OLPC board with OpenFirmware %.16s\n", romsig);
+	olpc_platform_info.flags |= OLPC_F_PRESENT;
+
+	/* get the platform revision */
+	platform_detect();
+
+	/* assume B1 and above models always have a DCON */
+	if (olpc_board_at_least(olpc_board(0xb1)))
+		olpc_platform_info.flags |= OLPC_F_DCON;
+
+	/* get the EC revision */
+	olpc_ec_cmd(EC_FIRMWARE_REV, NULL, 0,
+			(unsigned char *) &olpc_platform_info.ecver, 1);
+
+	/* check to see if the VSA exists */
+	if (geode_has_vsa2())
+		olpc_platform_info.flags |= OLPC_F_VSA;
+
+	printk(KERN_INFO "OLPC board revision %s%X (EC=%x)\n",
+			((olpc_platform_info.boardrev & 0xf) < 8) ? "pre" : "",
+			olpc_platform_info.boardrev >> 4,
+			olpc_platform_info.ecver);
+
+unmap:
+	iounmap(romsig);
+	return 0;
+}
+
+postcore_initcall(olpc_init);
diff --git a/arch/x86/kernel/time_32.c b/arch/x86/kernel/time_32.c
index 1a89e93..2ff21f3 100644
--- a/arch/x86/kernel/time_32.c
+++ b/arch/x86/kernel/time_32.c
@@ -115,7 +115,6 @@
 	return IRQ_HANDLED;
 }
 
-extern void (*late_time_init)(void);
 /* Duplicate of time_init() below, with hpet_enable part added */
 void __init hpet_time_init(void)
 {
diff --git a/arch/x86/kernel/vmlinux_64.lds.S b/arch/x86/kernel/vmlinux_64.lds.S
index b7ab3c3..fad3674 100644
--- a/arch/x86/kernel/vmlinux_64.lds.S
+++ b/arch/x86/kernel/vmlinux_64.lds.S
@@ -209,12 +209,6 @@
 	EXIT_DATA
   }
 
-/* vdso blob that is mapped into user space */
-  vdso_start = . ;
-  .vdso  : AT(ADDR(.vdso) - LOAD_OFFSET) { *(.vdso) }
-  . = ALIGN(PAGE_SIZE);
-  vdso_end = .;
-
 #ifdef CONFIG_BLK_DEV_INITRD
   . = ALIGN(PAGE_SIZE);
   __initramfs_start = .;
diff --git a/arch/x86/pci/Makefile_32 b/arch/x86/pci/Makefile_32
index cdd6828..b859047 100644
--- a/arch/x86/pci/Makefile_32
+++ b/arch/x86/pci/Makefile_32
@@ -3,6 +3,7 @@
 obj-$(CONFIG_PCI_BIOS)		+= pcbios.o
 obj-$(CONFIG_PCI_MMCONFIG)	+= mmconfig_32.o direct.o mmconfig-shared.o
 obj-$(CONFIG_PCI_DIRECT)	+= direct.o
+obj-$(CONFIG_PCI_OLPC)		+= olpc.o
 
 pci-y				:= fixup.o
 pci-$(CONFIG_ACPI)		+= acpi.o
diff --git a/arch/x86/pci/init.c b/arch/x86/pci/init.c
index 3de9f9b..0f5f7dd 100644
--- a/arch/x86/pci/init.c
+++ b/arch/x86/pci/init.c
@@ -14,6 +14,9 @@
 #ifdef CONFIG_PCI_MMCONFIG
 	pci_mmcfg_init(type);
 #endif
+#ifdef CONFIG_PCI_OLPC
+	pci_olpc_init();
+#endif
 	if (raw_pci_ops)
 		return 0;
 #ifdef CONFIG_PCI_BIOS
diff --git a/arch/x86/pci/olpc.c b/arch/x86/pci/olpc.c
new file mode 100644
index 0000000..5e76365
--- /dev/null
+++ b/arch/x86/pci/olpc.c
@@ -0,0 +1,313 @@
+/*
+ * Low-level PCI config space access for OLPC systems who lack the VSA
+ * PCI virtualization software.
+ *
+ * Copyright © 2006  Advanced Micro Devices, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * The AMD Geode chipset (ie: GX2 processor, cs5536 I/O companion device)
+ * has some I/O functions (display, southbridge, sound, USB HCIs, etc)
+ * that more or less behave like PCI devices, but the hardware doesn't
+ * directly implement the PCI configuration space headers.  AMD provides
+ * "VSA" (Virtual System Architecture) software that emulates PCI config
+ * space for these devices, by trapping I/O accesses to PCI config register
+ * (CF8/CFC) and running some code in System Management Mode interrupt state.
+ * On the OLPC platform, we don't want to use that VSA code because
+ * (a) it slows down suspend/resume, and (b) recompiling it requires special
+ * compilers that are hard to get.  So instead of letting the complex VSA
+ * code simulate the PCI config registers for the on-chip devices, we
+ * just simulate them the easy way, by inserting the code into the
+ * pci_write_config and pci_read_config path.  Most of the config registers
+ * are read-only anyway, so the bulk of the simulation is just table lookup.
+ */
+
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <asm/olpc.h>
+#include <asm/geode.h>
+#include "pci.h"
+
+/*
+ * In the tables below, the first two line (8 longwords) are the
+ * size masks that are used when the higher level PCI code determines
+ * the size of the region by writing ~0 to a base address register
+ * and reading back the result.
+ *
+ * The following lines are the values that are read during normal
+ * PCI config access cycles, i.e. not after just having written
+ * ~0 to a base address register.
+ */
+
+static const uint32_t lxnb_hdr[] = {  /* dev 1 function 0 - devfn = 8 */
+	0x0,	0x0,	0x0,	0x0,
+	0x0,	0x0,	0x0,	0x0,
+
+	0x281022, 0x2200005, 0x6000021, 0x80f808,	/* AMD Vendor ID */
+	0x0,	0x0,	0x0,	0x0,   /* No virtual registers, hence no BAR */
+	0x0,	0x0,	0x0,	0x28100b,
+	0x0,	0x0,	0x0,	0x0,
+	0x0,	0x0,	0x0,	0x0,
+	0x0,	0x0,	0x0,	0x0,
+	0x0,	0x0,	0x0,	0x0,
+};
+
+static const uint32_t gxnb_hdr[] = {  /* dev 1 function 0 - devfn = 8 */
+	0xfffffffd, 0x0, 0x0,	0x0,
+	0x0,	0x0,	0x0,	0x0,
+
+	0x28100b, 0x2200005, 0x6000021, 0x80f808,	/* NSC Vendor ID */
+	0xac1d,	0x0,	0x0,	0x0,  /* I/O BAR - base of virtual registers */
+	0x0,	0x0,	0x0,	0x28100b,
+	0x0,	0x0,	0x0,	0x0,
+	0x0,	0x0,	0x0,	0x0,
+	0x0,	0x0,	0x0,	0x0,
+	0x0,	0x0,	0x0,	0x0,
+};
+
+static const uint32_t lxfb_hdr[] = {  /* dev 1 function 1 - devfn = 9 */
+	0xff000008, 0xffffc000, 0xffffc000, 0xffffc000,
+	0xffffc000,	0x0,	0x0,	0x0,
+
+	0x20811022, 0x2200003, 0x3000000, 0x0,		/* AMD Vendor ID */
+	0xfd000000, 0xfe000000, 0xfe004000, 0xfe008000, /* FB, GP, VG, DF */
+	0xfe00c000, 0x0, 0x0,	0x30100b,		/* VIP */
+	0x0,	0x0,	0x0,	0x10e,	   /* INTA, IRQ14 for graphics accel */
+	0x0,	0x0,	0x0,	0x0,
+	0x3d0,	0x3c0,	0xa0000, 0x0,	    /* VG IO, VG IO, EGA FB, MONO FB */
+	0x0,	0x0,	0x0,	0x0,
+};
+
+static const uint32_t gxfb_hdr[] = {  /* dev 1 function 1 - devfn = 9 */
+	0xff800008, 0xffffc000, 0xffffc000, 0xffffc000,
+	0x0,	0x0,	0x0,	0x0,
+
+	0x30100b, 0x2200003, 0x3000000, 0x0,		/* NSC Vendor ID */
+	0xfd000000, 0xfe000000, 0xfe004000, 0xfe008000,	/* FB, GP, VG, DF */
+	0x0,	0x0,	0x0,	0x30100b,
+	0x0,	0x0,	0x0,	0x0,
+	0x0,	0x0,	0x0,	0x0,
+	0x3d0,	0x3c0,	0xa0000, 0x0,  	    /* VG IO, VG IO, EGA FB, MONO FB */
+	0x0,	0x0,	0x0,	0x0,
+};
+
+static const uint32_t aes_hdr[] = {	/* dev 1 function 2 - devfn = 0xa */
+	0xffffc000, 0x0, 0x0,	0x0,
+	0x0,	0x0,	0x0,	0x0,
+
+	0x20821022, 0x2a00006, 0x10100000, 0x8,		/* NSC Vendor ID */
+	0xfe010000, 0x0, 0x0,	0x0,			/* AES registers */
+	0x0,	0x0,	0x0,	0x20821022,
+	0x0,	0x0,	0x0,	0x0,
+	0x0,	0x0,	0x0,	0x0,
+	0x0,	0x0,	0x0,	0x0,
+	0x0,	0x0,	0x0,	0x0,
+};
+
+
+static const uint32_t isa_hdr[] = {  /* dev f function 0 - devfn = 78 */
+	0xfffffff9, 0xffffff01, 0xffffffc1, 0xffffffe1,
+	0xffffff81, 0xffffffc1, 0x0, 0x0,
+
+	0x20901022, 0x2a00049, 0x6010003, 0x802000,
+	0x18b1,	0x1001,	0x1801,	0x1881,	/* SMB-8   GPIO-256 MFGPT-64  IRQ-32 */
+	0x1401,	0x1841,	0x0,	0x20901022,		/* PMS-128 ACPI-64 */
+	0x0,	0x0,	0x0,	0x0,
+	0x0,	0x0,	0x0,	0x0,
+	0x0,	0x0,	0x0,	0xaa5b,			/* IRQ steering */
+	0x0,	0x0,	0x0,	0x0,
+};
+
+static const uint32_t ac97_hdr[] = {  /* dev f function 3 - devfn = 7b */
+	0xffffff81, 0x0, 0x0,	0x0,
+	0x0,	0x0,	0x0,	0x0,
+
+	0x20931022, 0x2a00041, 0x4010001, 0x0,
+	0x1481,	0x0,	0x0,	0x0,			/* I/O BAR-128 */
+	0x0,	0x0,	0x0,	0x20931022,
+	0x0,	0x0,	0x0,	0x205,			/* IntB, IRQ5 */
+	0x0,	0x0,	0x0,	0x0,
+	0x0,	0x0,	0x0,	0x0,
+	0x0,	0x0,	0x0,	0x0,
+};
+
+static const uint32_t ohci_hdr[] = {  /* dev f function 4 - devfn = 7c */
+	0xfffff000, 0x0, 0x0,	0x0,
+	0x0,	0x0,	0x0,	0x0,
+
+	0x20941022, 0x2300006, 0xc031002, 0x0,
+	0xfe01a000, 0x0, 0x0,	0x0,			/* MEMBAR-1000 */
+	0x0,	0x0,	0x0,	0x20941022,
+	0x0,	0x40,	0x0,	0x40a,			/* CapPtr INT-D, IRQA */
+	0xc8020001, 0x0, 0x0,	0x0,	/* Capabilities - 40 is R/O,
+					   44 is mask 8103 (power control) */
+	0x0,	0x0,	0x0,	0x0,
+	0x0,	0x0,	0x0,	0x0,
+};
+
+static const uint32_t ehci_hdr[] = {  /* dev f function 4 - devfn = 7d */
+	0xfffff000, 0x0, 0x0,	0x0,
+	0x0,	0x0,	0x0,	0x0,
+
+	0x20951022, 0x2300006, 0xc032002, 0x0,
+	0xfe01b000, 0x0, 0x0,	0x0,			/* MEMBAR-1000 */
+	0x0,	0x0,	0x0,	0x20951022,
+	0x0,	0x40,	0x0,	0x40a,			/* CapPtr INT-D, IRQA */
+	0xc8020001, 0x0, 0x0,	0x0,	/* Capabilities - 40 is R/O, 44 is
+					   mask 8103 (power control) */
+#if 0
+	0x1,	0x40080000, 0x0, 0x0,	/* EECP - see EHCI spec section 2.1.7 */
+#endif
+	0x01000001, 0x0, 0x0,	0x0,	/* EECP - see EHCI spec section 2.1.7 */
+	0x2020,	0x0,	0x0,	0x0,	/* (EHCI page 8) 60 SBRN (R/O),
+					   61 FLADJ (R/W), PORTWAKECAP  */
+};
+
+static uint32_t ff_loc = ~0;
+static uint32_t zero_loc;
+static int bar_probing;		/* Set after a write of ~0 to a BAR */
+static int is_lx;
+
+#define NB_SLOT 0x1	/* Northbridge - GX chip - Device 1 */
+#define SB_SLOT 0xf	/* Southbridge - CS5536 chip - Device F */
+
+static int is_simulated(unsigned int bus, unsigned int devfn)
+{
+	return (!bus && ((PCI_SLOT(devfn) == NB_SLOT) ||
+			(PCI_SLOT(devfn) == SB_SLOT)));
+}
+
+static uint32_t *hdr_addr(const uint32_t *hdr, int reg)
+{
+	uint32_t addr;
+
+	/*
+	 * This is a little bit tricky.  The header maps consist of
+	 * 0x20 bytes of size masks, followed by 0x70 bytes of header data.
+	 * In the normal case, when not probing a BAR's size, we want
+	 * to access the header data, so we add 0x20 to the reg offset,
+	 * thus skipping the size mask area.
+	 * In the BAR probing case, we want to access the size mask for
+	 * the BAR, so we subtract 0x10 (the config header offset for
+	 * BAR0), and don't skip the size mask area.
+	 */
+
+	addr = (uint32_t)hdr + reg + (bar_probing ? -0x10 : 0x20);
+
+	bar_probing = 0;
+	return (uint32_t *)addr;
+}
+
+static int pci_olpc_read(unsigned int seg, unsigned int bus,
+		unsigned int devfn, int reg, int len, uint32_t *value)
+{
+	uint32_t *addr;
+
+	/* Use the hardware mechanism for non-simulated devices */
+	if (!is_simulated(bus, devfn))
+		return pci_direct_conf1.read(seg, bus, devfn, reg, len, value);
+
+	/*
+	 * No device has config registers past 0x70, so we save table space
+	 * by not storing entries for the nonexistent registers
+	 */
+	if (reg >= 0x70)
+		addr = &zero_loc;
+	else {
+		switch (devfn) {
+		case  0x8:
+			addr = hdr_addr(is_lx ? lxnb_hdr : gxnb_hdr, reg);
+			break;
+		case  0x9:
+			addr = hdr_addr(is_lx ? lxfb_hdr : gxfb_hdr, reg);
+			break;
+		case  0xa:
+			addr = is_lx ? hdr_addr(aes_hdr, reg) : &ff_loc;
+			break;
+		case 0x78:
+			addr = hdr_addr(isa_hdr, reg);
+			break;
+		case 0x7b:
+			addr = hdr_addr(ac97_hdr, reg);
+			break;
+		case 0x7c:
+			addr = hdr_addr(ohci_hdr, reg);
+			break;
+		case 0x7d:
+			addr = hdr_addr(ehci_hdr, reg);
+			break;
+		default:
+			addr = &ff_loc;
+			break;
+		}
+	}
+	switch (len) {
+	case 1:
+		*value = *(uint8_t *)addr;
+		break;
+	case 2:
+		*value = *(uint16_t *)addr;
+		break;
+	case 4:
+		*value = *addr;
+		break;
+	default:
+		BUG();
+	}
+
+	return 0;
+}
+
+static int pci_olpc_write(unsigned int seg, unsigned int bus,
+		unsigned int devfn, int reg, int len, uint32_t value)
+{
+	/* Use the hardware mechanism for non-simulated devices */
+	if (!is_simulated(bus, devfn))
+		return pci_direct_conf1.write(seg, bus, devfn, reg, len, value);
+
+	/* XXX we may want to extend this to simulate EHCI power management */
+
+	/*
+	 * Mostly we just discard writes, but if the write is a size probe
+	 * (i.e. writing ~0 to a BAR), we remember it and arrange to return
+	 * the appropriate size mask on the next read.  This is cheating
+	 * to some extent, because it depends on the fact that the next
+	 * access after such a write will always be a read to the same BAR.
+	 */
+
+	if ((reg >= 0x10) && (reg < 0x2c)) {
+		/* write is to a BAR */
+		if (value == ~0)
+			bar_probing = 1;
+	} else {
+		/*
+		 * No warning on writes to ROM BAR, CMD, LATENCY_TIMER,
+		 * CACHE_LINE_SIZE, or PM registers.
+		 */
+		if ((reg != PCI_ROM_ADDRESS) && (reg != PCI_COMMAND_MASTER) &&
+				(reg != PCI_LATENCY_TIMER) &&
+				(reg != PCI_CACHE_LINE_SIZE) && (reg != 0x44))
+			printk(KERN_WARNING "OLPC PCI: Config write to devfn"
+				" %x reg %x value %x\n", devfn, reg, value);
+	}
+
+	return 0;
+}
+
+static struct pci_raw_ops pci_olpc_conf = {
+	.read =	pci_olpc_read,
+	.write = pci_olpc_write,
+};
+
+void __init pci_olpc_init(void)
+{
+	if (!machine_is_olpc() || olpc_has_vsa())
+		return;
+
+	printk(KERN_INFO "PCI: Using configuration type OLPC\n");
+	raw_pci_ops = &pci_olpc_conf;
+	is_lx = is_geode_lx();
+}
diff --git a/arch/x86/pci/pci.h b/arch/x86/pci/pci.h
index c4bddae..7d84e81 100644
--- a/arch/x86/pci/pci.h
+++ b/arch/x86/pci/pci.h
@@ -98,6 +98,7 @@
 extern void pci_direct_init(int type);
 extern void pci_pcbios_init(void);
 extern void pci_mmcfg_init(int type);
+extern void pci_olpc_init(void);
 
 /* pci-mmconfig.c */
 
diff --git a/arch/x86/vdso/vdso.S b/arch/x86/vdso/vdso.S
index 4b1620a..1d3aa6b 100644
--- a/arch/x86/vdso/vdso.S
+++ b/arch/x86/vdso/vdso.S
@@ -1,2 +1,10 @@
-	.section ".vdso","a"
+#include <linux/init.h>
+
+__INITDATA
+
+	.globl vdso_start, vdso_end
+vdso_start:
 	.incbin "arch/x86/vdso/vdso.so"
+vdso_end:
+
+__FINIT
diff --git a/arch/xtensa/kernel/asm-offsets.c b/arch/xtensa/kernel/asm-offsets.c
index ef63ada..070ff8a 100644
--- a/arch/xtensa/kernel/asm-offsets.c
+++ b/arch/xtensa/kernel/asm-offsets.c
@@ -19,12 +19,11 @@
 #include <linux/thread_info.h>
 #include <linux/ptrace.h>
 #include <linux/mm.h>
+#include <linux/kbuild.h>
 
 #include <asm/ptrace.h>
 #include <asm/uaccess.h>
 
-#define DEFINE(sym, val) asm volatile("\n->" #sym " %0 " #val : : "i" (val))
-
 int main(void)
 {
 	/* struct pt_regs */
diff --git a/block/blk-barrier.c b/block/blk-barrier.c
index 55c5f1f..66e5528 100644
--- a/block/blk-barrier.c
+++ b/block/blk-barrier.c
@@ -53,7 +53,7 @@
 /*
  * Cache flushing for ordered writes handling
  */
-inline unsigned blk_ordered_cur_seq(struct request_queue *q)
+unsigned blk_ordered_cur_seq(struct request_queue *q)
 {
 	if (!q->ordseq)
 		return 0;
@@ -143,10 +143,8 @@
 		end_io = post_flush_end_io;
 	}
 
+	blk_rq_init(q, rq);
 	rq->cmd_flags = REQ_HARDBARRIER;
-	rq_init(q, rq);
-	rq->elevator_private = NULL;
-	rq->elevator_private2 = NULL;
 	rq->rq_disk = q->bar_rq.rq_disk;
 	rq->end_io = end_io;
 	q->prepare_flush_fn(q, rq);
@@ -167,14 +165,11 @@
 	blkdev_dequeue_request(rq);
 	q->orig_bar_rq = rq;
 	rq = &q->bar_rq;
-	rq->cmd_flags = 0;
-	rq_init(q, rq);
+	blk_rq_init(q, rq);
 	if (bio_data_dir(q->orig_bar_rq->bio) == WRITE)
 		rq->cmd_flags |= REQ_RW;
 	if (q->ordered & QUEUE_ORDERED_FUA)
 		rq->cmd_flags |= REQ_FUA;
-	rq->elevator_private = NULL;
-	rq->elevator_private2 = NULL;
 	init_request_from_bio(rq, q->orig_bar_rq->bio);
 	rq->end_io = bar_end_io;
 
diff --git a/block/blk-core.c b/block/blk-core.c
index 2a438a9..5d09f8c 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -107,41 +107,21 @@
 }
 EXPORT_SYMBOL(blk_get_backing_dev_info);
 
-/*
- * We can't just memset() the structure, since the allocation path
- * already stored some information in the request.
- */
-void rq_init(struct request_queue *q, struct request *rq)
+void blk_rq_init(struct request_queue *q, struct request *rq)
 {
+	memset(rq, 0, sizeof(*rq));
+
 	INIT_LIST_HEAD(&rq->queuelist);
 	INIT_LIST_HEAD(&rq->donelist);
 	rq->q = q;
 	rq->sector = rq->hard_sector = (sector_t) -1;
-	rq->nr_sectors = rq->hard_nr_sectors = 0;
-	rq->current_nr_sectors = rq->hard_cur_sectors = 0;
-	rq->bio = rq->biotail = NULL;
 	INIT_HLIST_NODE(&rq->hash);
 	RB_CLEAR_NODE(&rq->rb_node);
-	rq->rq_disk = NULL;
-	rq->nr_phys_segments = 0;
-	rq->nr_hw_segments = 0;
-	rq->ioprio = 0;
-	rq->special = NULL;
-	rq->buffer = NULL;
+	rq->cmd = rq->__cmd;
 	rq->tag = -1;
-	rq->errors = 0;
 	rq->ref_count = 1;
-	rq->cmd_len = 0;
-	memset(rq->cmd, 0, sizeof(rq->cmd));
-	rq->data_len = 0;
-	rq->extra_len = 0;
-	rq->sense_len = 0;
-	rq->data = NULL;
-	rq->sense = NULL;
-	rq->end_io = NULL;
-	rq->end_io_data = NULL;
-	rq->next_rq = NULL;
 }
+EXPORT_SYMBOL(blk_rq_init);
 
 static void req_bio_endio(struct request *rq, struct bio *bio,
 			  unsigned int nbytes, int error)
@@ -194,7 +174,7 @@
 
 	if (blk_pc_request(rq)) {
 		printk(KERN_INFO "  cdb: ");
-		for (bit = 0; bit < sizeof(rq->cmd); bit++)
+		for (bit = 0; bit < BLK_MAX_CDB; bit++)
 			printk("%02x ", rq->cmd[bit]);
 		printk("\n");
 	}
@@ -220,7 +200,8 @@
 	if (blk_queue_stopped(q))
 		return;
 
-	if (!test_and_set_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags)) {
+	if (!test_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags)) {
+		__set_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags);
 		mod_timer(&q->unplug_timer, jiffies + q->unplug_delay);
 		blk_add_trace_generic(q, NULL, 0, BLK_TA_PLUG);
 	}
@@ -235,9 +216,10 @@
 {
 	WARN_ON(!irqs_disabled());
 
-	if (!test_and_clear_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags))
+	if (!test_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags))
 		return 0;
 
+	queue_flag_clear(QUEUE_FLAG_PLUGGED, q);
 	del_timer(&q->unplug_timer);
 	return 1;
 }
@@ -333,15 +315,16 @@
 {
 	WARN_ON(!irqs_disabled());
 
-	clear_bit(QUEUE_FLAG_STOPPED, &q->queue_flags);
+	queue_flag_clear(QUEUE_FLAG_STOPPED, q);
 
 	/*
 	 * one level of recursion is ok and is much faster than kicking
 	 * the unplug handling
 	 */
-	if (!test_and_set_bit(QUEUE_FLAG_REENTER, &q->queue_flags)) {
+	if (!test_bit(QUEUE_FLAG_REENTER, &q->queue_flags)) {
+		queue_flag_set(QUEUE_FLAG_REENTER, q);
 		q->request_fn(q);
-		clear_bit(QUEUE_FLAG_REENTER, &q->queue_flags);
+		queue_flag_clear(QUEUE_FLAG_REENTER, q);
 	} else {
 		blk_plug_device(q);
 		kblockd_schedule_work(&q->unplug_work);
@@ -366,7 +349,7 @@
 void blk_stop_queue(struct request_queue *q)
 {
 	blk_remove_plug(q);
-	set_bit(QUEUE_FLAG_STOPPED, &q->queue_flags);
+	queue_flag_set(QUEUE_FLAG_STOPPED, q);
 }
 EXPORT_SYMBOL(blk_stop_queue);
 
@@ -395,11 +378,8 @@
  * blk_run_queue - run a single device queue
  * @q:	The queue to run
  */
-void blk_run_queue(struct request_queue *q)
+void __blk_run_queue(struct request_queue *q)
 {
-	unsigned long flags;
-
-	spin_lock_irqsave(q->queue_lock, flags);
 	blk_remove_plug(q);
 
 	/*
@@ -407,15 +387,28 @@
 	 * handling reinvoke the handler shortly if we already got there.
 	 */
 	if (!elv_queue_empty(q)) {
-		if (!test_and_set_bit(QUEUE_FLAG_REENTER, &q->queue_flags)) {
+		if (!test_bit(QUEUE_FLAG_REENTER, &q->queue_flags)) {
+			queue_flag_set(QUEUE_FLAG_REENTER, q);
 			q->request_fn(q);
-			clear_bit(QUEUE_FLAG_REENTER, &q->queue_flags);
+			queue_flag_clear(QUEUE_FLAG_REENTER, q);
 		} else {
 			blk_plug_device(q);
 			kblockd_schedule_work(&q->unplug_work);
 		}
 	}
+}
+EXPORT_SYMBOL(__blk_run_queue);
 
+/**
+ * blk_run_queue - run a single device queue
+ * @q: The queue to run
+ */
+void blk_run_queue(struct request_queue *q)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(q->queue_lock, flags);
+	__blk_run_queue(q);
 	spin_unlock_irqrestore(q->queue_lock, flags);
 }
 EXPORT_SYMBOL(blk_run_queue);
@@ -428,7 +421,7 @@
 void blk_cleanup_queue(struct request_queue *q)
 {
 	mutex_lock(&q->sysfs_lock);
-	set_bit(QUEUE_FLAG_DEAD, &q->queue_flags);
+	queue_flag_set_unlocked(QUEUE_FLAG_DEAD, q);
 	mutex_unlock(&q->sysfs_lock);
 
 	if (q->elevator)
@@ -607,6 +600,8 @@
 	if (!rq)
 		return NULL;
 
+	blk_rq_init(q, rq);
+
 	/*
 	 * first three bits are identical in rq->cmd_flags and bio->bi_rw,
 	 * see bio.h and blkdev.h
@@ -789,8 +784,6 @@
 	if (ioc_batching(q, ioc))
 		ioc->nr_batch_requests--;
 
-	rq_init(q, rq);
-
 	blk_add_trace_generic(q, bio, rw, BLK_TA_GETRQ);
 out:
 	return rq;
diff --git a/block/blk-map.c b/block/blk-map.c
index 3c942bd..0b1af5a 100644
--- a/block/blk-map.c
+++ b/block/blk-map.c
@@ -255,10 +255,18 @@
  * @kbuf:	the kernel buffer
  * @len:	length of user data
  * @gfp_mask:	memory allocation flags
+ *
+ * Description:
+ *    Data will be mapped directly if possible. Otherwise a bounce
+ *    buffer is used.
  */
 int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
 		    unsigned int len, gfp_t gfp_mask)
 {
+	unsigned long kaddr;
+	unsigned int alignment;
+	int reading = rq_data_dir(rq) == READ;
+	int do_copy = 0;
 	struct bio *bio;
 
 	if (len > (q->max_hw_sectors << 9))
@@ -266,13 +274,24 @@
 	if (!len || !kbuf)
 		return -EINVAL;
 
-	bio = bio_map_kern(q, kbuf, len, gfp_mask);
+	kaddr = (unsigned long)kbuf;
+	alignment = queue_dma_alignment(q) | q->dma_pad_mask;
+	do_copy = ((kaddr & alignment) || (len & alignment));
+
+	if (do_copy)
+		bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
+	else
+		bio = bio_map_kern(q, kbuf, len, gfp_mask);
+
 	if (IS_ERR(bio))
 		return PTR_ERR(bio);
 
 	if (rq_data_dir(rq) == WRITE)
 		bio->bi_rw |= (1 << BIO_RW);
 
+	if (do_copy)
+		rq->cmd_flags |= REQ_COPY_USER;
+
 	blk_rq_bio_prep(q, rq, bio);
 	blk_queue_bounce(q, &rq->bio);
 	rq->buffer = rq->data = NULL;
diff --git a/block/blk-merge.c b/block/blk-merge.c
index b5c5c4a..73b2356 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -55,7 +55,7 @@
 	if (!rq->bio)
 		return;
 
-	cluster = q->queue_flags & (1 << QUEUE_FLAG_CLUSTER);
+	cluster = test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags);
 	hw_seg_size = seg_size = 0;
 	phys_size = hw_size = nr_phys_segs = nr_hw_segs = 0;
 	rq_for_each_segment(bv, rq, iter) {
@@ -128,7 +128,7 @@
 static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio,
 				   struct bio *nxt)
 {
-	if (!(q->queue_flags & (1 << QUEUE_FLAG_CLUSTER)))
+	if (!test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags))
 		return 0;
 
 	if (!BIOVEC_PHYS_MERGEABLE(__BVEC_END(bio), __BVEC_START(nxt)))
@@ -175,7 +175,7 @@
 	int nsegs, cluster;
 
 	nsegs = 0;
-	cluster = q->queue_flags & (1 << QUEUE_FLAG_CLUSTER);
+	cluster = test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags);
 
 	/*
 	 * for each bio in rq
diff --git a/block/blk-settings.c b/block/blk-settings.c
index 5713f7e..6089384 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -14,7 +14,6 @@
 EXPORT_SYMBOL(blk_max_low_pfn);
 
 unsigned long blk_max_pfn;
-EXPORT_SYMBOL(blk_max_pfn);
 
 /**
  * blk_queue_prep_rq - set a prepare_request function for queue
@@ -288,7 +287,7 @@
 	t->max_segment_size = min(t->max_segment_size, b->max_segment_size);
 	t->hardsect_size = max(t->hardsect_size, b->hardsect_size);
 	if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags))
-		clear_bit(QUEUE_FLAG_CLUSTER, &t->queue_flags);
+		queue_flag_clear(QUEUE_FLAG_CLUSTER, t);
 }
 EXPORT_SYMBOL(blk_queue_stack_limits);
 
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index fc41d83..e85c401 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -135,6 +135,25 @@
 	return queue_var_show(max_hw_sectors_kb, (page));
 }
 
+static ssize_t queue_nomerges_show(struct request_queue *q, char *page)
+{
+	return queue_var_show(blk_queue_nomerges(q), page);
+}
+
+static ssize_t queue_nomerges_store(struct request_queue *q, const char *page,
+				    size_t count)
+{
+	unsigned long nm;
+	ssize_t ret = queue_var_store(&nm, page, count);
+
+	if (nm)
+	       set_bit(QUEUE_FLAG_NOMERGES, &q->queue_flags);
+	else
+	       clear_bit(QUEUE_FLAG_NOMERGES, &q->queue_flags);
+
+	return ret;
+}
+
 
 static struct queue_sysfs_entry queue_requests_entry = {
 	.attr = {.name = "nr_requests", .mode = S_IRUGO | S_IWUSR },
@@ -170,6 +189,12 @@
 	.show = queue_hw_sector_size_show,
 };
 
+static struct queue_sysfs_entry queue_nomerges_entry = {
+	.attr = {.name = "nomerges", .mode = S_IRUGO | S_IWUSR },
+	.show = queue_nomerges_show,
+	.store = queue_nomerges_store,
+};
+
 static struct attribute *default_attrs[] = {
 	&queue_requests_entry.attr,
 	&queue_ra_entry.attr,
@@ -177,6 +202,7 @@
 	&queue_max_sectors_entry.attr,
 	&queue_iosched_entry.attr,
 	&queue_hw_sector_size_entry.attr,
+	&queue_nomerges_entry.attr,
 	NULL,
 };
 
diff --git a/block/blk-tag.c b/block/blk-tag.c
index 4780a46..e176ddb 100644
--- a/block/blk-tag.c
+++ b/block/blk-tag.c
@@ -70,7 +70,7 @@
 	__blk_free_tags(bqt);
 
 	q->queue_tags = NULL;
-	q->queue_flags &= ~(1 << QUEUE_FLAG_QUEUED);
+	queue_flag_clear(QUEUE_FLAG_QUEUED, q);
 }
 
 /**
@@ -98,7 +98,7 @@
  **/
 void blk_queue_free_tags(struct request_queue *q)
 {
-	clear_bit(QUEUE_FLAG_QUEUED, &q->queue_flags);
+	queue_flag_clear(QUEUE_FLAG_QUEUED, q);
 }
 EXPORT_SYMBOL(blk_queue_free_tags);
 
@@ -188,7 +188,7 @@
 		rc = blk_queue_resize_tags(q, depth);
 		if (rc)
 			return rc;
-		set_bit(QUEUE_FLAG_QUEUED, &q->queue_flags);
+		queue_flag_set(QUEUE_FLAG_QUEUED, q);
 		return 0;
 	} else
 		atomic_inc(&tags->refcnt);
@@ -197,7 +197,7 @@
 	 * assign it, all done
 	 */
 	q->queue_tags = tags;
-	q->queue_flags |= (1 << QUEUE_FLAG_QUEUED);
+	queue_flag_set(QUEUE_FLAG_QUEUED, q);
 	INIT_LIST_HEAD(&q->tag_busy_list);
 	return 0;
 fail:
diff --git a/block/blk.h b/block/blk.h
index ec9120f..59776ab 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -10,7 +10,6 @@
 extern struct kmem_cache *blk_requestq_cachep;
 extern struct kobj_type blk_queue_ktype;
 
-void rq_init(struct request_queue *q, struct request *rq);
 void init_request_from_bio(struct request *req, struct bio *bio);
 void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
 			struct bio *bio);
diff --git a/block/elevator.c b/block/elevator.c
index 88318c3..ac5310e 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -69,7 +69,7 @@
 /*
  * can we safely merge with this request?
  */
-inline int elv_rq_merge_ok(struct request *rq, struct bio *bio)
+int elv_rq_merge_ok(struct request *rq, struct bio *bio)
 {
 	if (!rq_mergeable(rq))
 		return 0;
@@ -488,6 +488,9 @@
 		}
 	}
 
+	if (blk_queue_nomerges(q))
+		return ELEVATOR_NO_MERGE;
+
 	/*
 	 * See if our hash lookup can find a potential backmerge.
 	 */
@@ -1070,7 +1073,7 @@
 	 */
 	spin_lock_irq(q->queue_lock);
 
-	set_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags);
+	queue_flag_set(QUEUE_FLAG_ELVSWITCH, q);
 
 	elv_drain_elevator(q);
 
@@ -1104,7 +1107,10 @@
 	 * finally exit old elevator and turn off BYPASS.
 	 */
 	elevator_exit(old_elevator);
-	clear_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags);
+	spin_lock_irq(q->queue_lock);
+	queue_flag_clear(QUEUE_FLAG_ELVSWITCH, q);
+	spin_unlock_irq(q->queue_lock);
+
 	return 1;
 
 fail_register:
@@ -1115,7 +1121,11 @@
 	elevator_exit(e);
 	q->elevator = old_elevator;
 	elv_register_queue(q);
-	clear_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags);
+
+	spin_lock_irq(q->queue_lock);
+	queue_flag_clear(QUEUE_FLAG_ELVSWITCH, q);
+	spin_unlock_irq(q->queue_lock);
+
 	return 0;
 }
 
diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
index a2c3a93..ffa3720 100644
--- a/block/scsi_ioctl.c
+++ b/block/scsi_ioctl.c
@@ -217,8 +217,6 @@
 static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
 			     struct sg_io_hdr *hdr, int has_write_perm)
 {
-	memset(rq->cmd, 0, BLK_MAX_CDB); /* ATAPI hates garbage after CDB */
-
 	if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
 		return -EFAULT;
 	if (blk_verify_command(rq->cmd, has_write_perm))
@@ -531,7 +529,6 @@
 	rq->data_len = 0;
 	rq->extra_len = 0;
 	rq->timeout = BLK_DEFAULT_SG_TIMEOUT;
-	memset(rq->cmd, 0, sizeof(rq->cmd));
 	rq->cmd[0] = cmd;
 	rq->cmd[4] = data;
 	rq->cmd_len = 6;
diff --git a/drivers/acpi/ac.c b/drivers/acpi/ac.c
index 43a95e5..5b73f6a 100644
--- a/drivers/acpi/ac.c
+++ b/drivers/acpi/ac.c
@@ -92,6 +92,7 @@
 
 #ifdef CONFIG_ACPI_PROCFS_POWER
 static const struct file_operations acpi_ac_fops = {
+	.owner = THIS_MODULE,
 	.open = acpi_ac_open_fs,
 	.read = seq_read,
 	.llseek = seq_lseek,
@@ -195,16 +196,11 @@
 	}
 
 	/* 'state' [R] */
-	entry = create_proc_entry(ACPI_AC_FILE_STATE,
-				  S_IRUGO, acpi_device_dir(device));
+	entry = proc_create_data(ACPI_AC_FILE_STATE,
+				 S_IRUGO, acpi_device_dir(device),
+				 &acpi_ac_fops, acpi_driver_data(device));
 	if (!entry)
 		return -ENODEV;
-	else {
-		entry->proc_fops = &acpi_ac_fops;
-		entry->data = acpi_driver_data(device);
-		entry->owner = THIS_MODULE;
-	}
-
 	return 0;
 }
 
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
index d5729d5..b1c723f 100644
--- a/drivers/acpi/battery.c
+++ b/drivers/acpi/battery.c
@@ -741,15 +741,13 @@
 	}
 
 	for (i = 0; i < ACPI_BATTERY_NUMFILES; ++i) {
-		entry = create_proc_entry(acpi_battery_file[i].name,
-				  acpi_battery_file[i].mode, acpi_device_dir(device));
+		entry = proc_create_data(acpi_battery_file[i].name,
+					 acpi_battery_file[i].mode,
+					 acpi_device_dir(device),
+					 &acpi_battery_file[i].ops,
+					 acpi_driver_data(device));
 		if (!entry)
 			return -ENODEV;
-		else {
-			entry->proc_fops = &acpi_battery_file[i].ops;
-			entry->data = acpi_driver_data(device);
-			entry->owner = THIS_MODULE;
-		}
 	}
 	return 0;
 }
diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c
index 6c5da83..1dfec41 100644
--- a/drivers/acpi/button.c
+++ b/drivers/acpi/button.c
@@ -102,6 +102,7 @@
 };
 
 static const struct file_operations acpi_button_info_fops = {
+	.owner = THIS_MODULE,
 	.open = acpi_button_info_open_fs,
 	.read = seq_read,
 	.llseek = seq_lseek,
@@ -109,6 +110,7 @@
 };
 
 static const struct file_operations acpi_button_state_fops = {
+	.owner = THIS_MODULE,
 	.open = acpi_button_state_open_fs,
 	.read = seq_read,
 	.llseek = seq_lseek,
@@ -207,27 +209,21 @@
 	acpi_device_dir(device)->owner = THIS_MODULE;
 
 	/* 'info' [R] */
-	entry = create_proc_entry(ACPI_BUTTON_FILE_INFO,
-				  S_IRUGO, acpi_device_dir(device));
+	entry = proc_create_data(ACPI_BUTTON_FILE_INFO,
+				 S_IRUGO, acpi_device_dir(device),
+				 &acpi_button_info_fops,
+				 acpi_driver_data(device));
 	if (!entry)
 		return -ENODEV;
-	else {
-		entry->proc_fops = &acpi_button_info_fops;
-		entry->data = acpi_driver_data(device);
-		entry->owner = THIS_MODULE;
-	}
 
 	/* show lid state [R] */
 	if (button->type == ACPI_BUTTON_TYPE_LID) {
-		entry = create_proc_entry(ACPI_BUTTON_FILE_STATE,
-					  S_IRUGO, acpi_device_dir(device));
+		entry = proc_create_data(ACPI_BUTTON_FILE_STATE,
+					 S_IRUGO, acpi_device_dir(device),
+					 &acpi_button_state_fops,
+					 acpi_driver_data(device));
 		if (!entry)
 			return -ENODEV;
-		else {
-			entry->proc_fops = &acpi_button_state_fops;
-			entry->data = acpi_driver_data(device);
-			entry->owner = THIS_MODULE;
-		}
 	}
 
 	return 0;
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index 7222a18..e3f04b2 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -669,16 +669,11 @@
 			return -ENODEV;
 	}
 
-	entry = create_proc_entry(ACPI_EC_FILE_INFO, S_IRUGO,
-				  acpi_device_dir(device));
+	entry = proc_create_data(ACPI_EC_FILE_INFO, S_IRUGO,
+				 acpi_device_dir(device),
+				 &acpi_ec_info_ops, acpi_driver_data(device));
 	if (!entry)
 		return -ENODEV;
-	else {
-		entry->proc_fops = &acpi_ec_info_ops;
-		entry->data = acpi_driver_data(device);
-		entry->owner = THIS_MODULE;
-	}
-
 	return 0;
 }
 
diff --git a/drivers/acpi/event.c b/drivers/acpi/event.c
index abec1ca..0c24bd4 100644
--- a/drivers/acpi/event.c
+++ b/drivers/acpi/event.c
@@ -102,6 +102,7 @@
 }
 
 static const struct file_operations acpi_system_event_ops = {
+	.owner = THIS_MODULE,
 	.open = acpi_system_open_event,
 	.read = acpi_system_read_event,
 	.release = acpi_system_close_event,
@@ -294,10 +295,9 @@
 
 #ifdef CONFIG_ACPI_PROC_EVENT
 	/* 'event' [R] */
-	entry = create_proc_entry("event", S_IRUSR, acpi_root_dir);
-	if (entry)
-		entry->proc_fops = &acpi_system_event_ops;
-	else
+	entry = proc_create("event", S_IRUSR, acpi_root_dir,
+			    &acpi_system_event_ops);
+	if (!entry)
 		return -ENODEV;
 #endif
 
diff --git a/drivers/acpi/fan.c b/drivers/acpi/fan.c
index c8e3cba..194077a 100644
--- a/drivers/acpi/fan.c
+++ b/drivers/acpi/fan.c
@@ -192,17 +192,13 @@
 	}
 
 	/* 'status' [R/W] */
-	entry = create_proc_entry(ACPI_FAN_FILE_STATE,
-				  S_IFREG | S_IRUGO | S_IWUSR,
-				  acpi_device_dir(device));
+	entry = proc_create_data(ACPI_FAN_FILE_STATE,
+				 S_IFREG | S_IRUGO | S_IWUSR,
+				 acpi_device_dir(device),
+				 &acpi_fan_state_ops,
+				 device);
 	if (!entry)
 		return -ENODEV;
-	else {
-		entry->proc_fops = &acpi_fan_state_ops;
-		entry->data = device;
-		entry->owner = THIS_MODULE;
-	}
-
 	return 0;
 }
 
diff --git a/drivers/acpi/power.c b/drivers/acpi/power.c
index 76bf6d9..21fc8bf 100644
--- a/drivers/acpi/power.c
+++ b/drivers/acpi/power.c
@@ -93,6 +93,7 @@
 static struct list_head acpi_power_resource_list;
 
 static const struct file_operations acpi_power_fops = {
+	.owner = THIS_MODULE,
 	.open = acpi_power_open_fs,
 	.read = seq_read,
 	.llseek = seq_lseek,
@@ -543,15 +544,11 @@
 	}
 
 	/* 'status' [R] */
-	entry = create_proc_entry(ACPI_POWER_FILE_STATUS,
-				  S_IRUGO, acpi_device_dir(device));
+	entry = proc_create_data(ACPI_POWER_FILE_STATUS,
+				 S_IRUGO, acpi_device_dir(device),
+				 &acpi_power_fops, acpi_driver_data(device));
 	if (!entry)
 		return -EIO;
-	else {
-		entry->proc_fops = &acpi_power_fops;
-		entry->data = acpi_driver_data(device);
-	}
-
 	return 0;
 }
 
diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c
index a825b43..dd28c91 100644
--- a/drivers/acpi/processor_core.c
+++ b/drivers/acpi/processor_core.c
@@ -112,6 +112,7 @@
 #define UNINSTALL_NOTIFY_HANDLER	2
 
 static const struct file_operations acpi_processor_info_fops = {
+	.owner = THIS_MODULE,
 	.open = acpi_processor_info_open_fs,
 	.read = seq_read,
 	.llseek = seq_lseek,
@@ -326,40 +327,30 @@
 	acpi_device_dir(device)->owner = THIS_MODULE;
 
 	/* 'info' [R] */
-	entry = create_proc_entry(ACPI_PROCESSOR_FILE_INFO,
-				  S_IRUGO, acpi_device_dir(device));
+	entry = proc_create_data(ACPI_PROCESSOR_FILE_INFO,
+				 S_IRUGO, acpi_device_dir(device),
+				 &acpi_processor_info_fops,
+				 acpi_driver_data(device));
 	if (!entry)
 		return -EIO;
-	else {
-		entry->proc_fops = &acpi_processor_info_fops;
-		entry->data = acpi_driver_data(device);
-		entry->owner = THIS_MODULE;
-	}
 
 	/* 'throttling' [R/W] */
-	entry = create_proc_entry(ACPI_PROCESSOR_FILE_THROTTLING,
-				  S_IFREG | S_IRUGO | S_IWUSR,
-				  acpi_device_dir(device));
+	entry = proc_create_data(ACPI_PROCESSOR_FILE_THROTTLING,
+				 S_IFREG | S_IRUGO | S_IWUSR,
+				 acpi_device_dir(device),
+				 &acpi_processor_throttling_fops,
+				 acpi_driver_data(device));
 	if (!entry)
 		return -EIO;
-	else {
-		entry->proc_fops = &acpi_processor_throttling_fops;
-		entry->data = acpi_driver_data(device);
-		entry->owner = THIS_MODULE;
-	}
 
 	/* 'limit' [R/W] */
-	entry = create_proc_entry(ACPI_PROCESSOR_FILE_LIMIT,
-				  S_IFREG | S_IRUGO | S_IWUSR,
-				  acpi_device_dir(device));
+	entry = proc_create_data(ACPI_PROCESSOR_FILE_LIMIT,
+				 S_IFREG | S_IRUGO | S_IWUSR,
+				 acpi_device_dir(device),
+				 &acpi_processor_limit_fops,
+				 acpi_driver_data(device));
 	if (!entry)
 		return -EIO;
-	else {
-		entry->proc_fops = &acpi_processor_limit_fops;
-		entry->data = acpi_driver_data(device);
-		entry->owner = THIS_MODULE;
-	}
-
 	return 0;
 }
 
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index 0d90ff5..789d494 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -1282,6 +1282,7 @@
 }
 
 static const struct file_operations acpi_processor_power_fops = {
+	.owner = THIS_MODULE,
 	.open = acpi_processor_power_open_fs,
 	.read = seq_read,
 	.llseek = seq_lseek,
@@ -1822,16 +1823,12 @@
 	}
 
 	/* 'power' [R] */
-	entry = create_proc_entry(ACPI_PROCESSOR_FILE_POWER,
-				  S_IRUGO, acpi_device_dir(device));
+	entry = proc_create_data(ACPI_PROCESSOR_FILE_POWER,
+				 S_IRUGO, acpi_device_dir(device),
+				 &acpi_processor_power_fops,
+				 acpi_driver_data(device));
 	if (!entry)
 		return -EIO;
-	else {
-		entry->proc_fops = &acpi_processor_power_fops;
-		entry->data = acpi_driver_data(device);
-		entry->owner = THIS_MODULE;
-	}
-
 	return 0;
 }
 
diff --git a/drivers/acpi/processor_perflib.c b/drivers/acpi/processor_perflib.c
index b477a4b..d80b2d1 100644
--- a/drivers/acpi/processor_perflib.c
+++ b/drivers/acpi/processor_perflib.c
@@ -411,6 +411,7 @@
 
 static int acpi_processor_perf_open_fs(struct inode *inode, struct file *file);
 static struct file_operations acpi_processor_perf_fops = {
+	.owner = THIS_MODULE,
 	.open = acpi_processor_perf_open_fs,
 	.read = seq_read,
 	.llseek = seq_lseek,
@@ -456,7 +457,6 @@
 
 static void acpi_cpufreq_add_file(struct acpi_processor *pr)
 {
-	struct proc_dir_entry *entry = NULL;
 	struct acpi_device *device = NULL;
 
 
@@ -464,14 +464,9 @@
 		return;
 
 	/* add file 'performance' [R/W] */
-	entry = create_proc_entry(ACPI_PROCESSOR_FILE_PERFORMANCE,
-				  S_IFREG | S_IRUGO,
-				  acpi_device_dir(device));
-	if (entry){
-		entry->proc_fops = &acpi_processor_perf_fops;
-		entry->data = acpi_driver_data(device);
-		entry->owner = THIS_MODULE;
-	}
+	proc_create_data(ACPI_PROCESSOR_FILE_PERFORMANCE, S_IFREG | S_IRUGO,
+			 acpi_device_dir(device),
+			 &acpi_processor_perf_fops, acpi_driver_data(device));
 	return;
 }
 
diff --git a/drivers/acpi/processor_thermal.c b/drivers/acpi/processor_thermal.c
index 649ae99..ef34b18 100644
--- a/drivers/acpi/processor_thermal.c
+++ b/drivers/acpi/processor_thermal.c
@@ -509,6 +509,7 @@
 }
 
 struct file_operations acpi_processor_limit_fops = {
+	.owner = THIS_MODULE,
 	.open = acpi_processor_limit_open_fs,
 	.read = seq_read,
 	.write = acpi_processor_write_limit,
diff --git a/drivers/acpi/processor_throttling.c b/drivers/acpi/processor_throttling.c
index 0bba3a9..bb06738 100644
--- a/drivers/acpi/processor_throttling.c
+++ b/drivers/acpi/processor_throttling.c
@@ -1252,6 +1252,7 @@
 }
 
 struct file_operations acpi_processor_throttling_fops = {
+	.owner = THIS_MODULE,
 	.open = acpi_processor_throttling_open_fs,
 	.read = seq_read,
 	.write = acpi_processor_write_throttling,
diff --git a/drivers/acpi/sbs.c b/drivers/acpi/sbs.c
index 585ae3c..10a3651 100644
--- a/drivers/acpi/sbs.c
+++ b/drivers/acpi/sbs.c
@@ -483,8 +483,6 @@
 		struct file_operations *state_fops,
 		struct file_operations *alarm_fops, void *data)
 {
-	struct proc_dir_entry *entry = NULL;
-
 	if (!*dir) {
 		*dir = proc_mkdir(dir_name, parent_dir);
 		if (!*dir) {
@@ -494,34 +492,19 @@
 	}
 
 	/* 'info' [R] */
-	if (info_fops) {
-		entry = create_proc_entry(ACPI_SBS_FILE_INFO, S_IRUGO, *dir);
-		if (entry) {
-			entry->proc_fops = info_fops;
-			entry->data = data;
-			entry->owner = THIS_MODULE;
-		}
-	}
+	if (info_fops)
+		proc_create_data(ACPI_SBS_FILE_INFO, S_IRUGO, *dir,
+				 info_fops, data);
 
 	/* 'state' [R] */
-	if (state_fops) {
-		entry = create_proc_entry(ACPI_SBS_FILE_STATE, S_IRUGO, *dir);
-		if (entry) {
-			entry->proc_fops = state_fops;
-			entry->data = data;
-			entry->owner = THIS_MODULE;
-		}
-	}
+	if (state_fops)
+		proc_create_data(ACPI_SBS_FILE_STATE, S_IRUGO, *dir,
+				 state_fops, data);
 
 	/* 'alarm' [R/W] */
-	if (alarm_fops) {
-		entry = create_proc_entry(ACPI_SBS_FILE_ALARM, S_IRUGO, *dir);
-		if (entry) {
-			entry->proc_fops = alarm_fops;
-			entry->data = data;
-			entry->owner = THIS_MODULE;
-		}
-	}
+	if (alarm_fops)
+		proc_create_data(ACPI_SBS_FILE_ALARM, S_IRUGO, *dir,
+				 alarm_fops, data);
 	return 0;
 }
 
diff --git a/drivers/acpi/sleep/proc.c b/drivers/acpi/sleep/proc.c
index f8df521..8a5fe87 100644
--- a/drivers/acpi/sleep/proc.c
+++ b/drivers/acpi/sleep/proc.c
@@ -440,6 +440,7 @@
 }
 
 static const struct file_operations acpi_system_wakeup_device_fops = {
+	.owner = THIS_MODULE,
 	.open = acpi_system_wakeup_device_open_fs,
 	.read = seq_read,
 	.write = acpi_system_write_wakeup_device,
@@ -449,6 +450,7 @@
 
 #ifdef	CONFIG_ACPI_PROCFS
 static const struct file_operations acpi_system_sleep_fops = {
+	.owner = THIS_MODULE,
 	.open = acpi_system_sleep_open_fs,
 	.read = seq_read,
 	.write = acpi_system_write_sleep,
@@ -459,6 +461,7 @@
 
 #ifdef	HAVE_ACPI_LEGACY_ALARM
 static const struct file_operations acpi_system_alarm_fops = {
+	.owner = THIS_MODULE,
 	.open = acpi_system_alarm_open_fs,
 	.read = seq_read,
 	.write = acpi_system_write_alarm,
@@ -477,37 +480,26 @@
 
 static int __init acpi_sleep_proc_init(void)
 {
-	struct proc_dir_entry *entry = NULL;
-
 	if (acpi_disabled)
 		return 0;
 
 #ifdef	CONFIG_ACPI_PROCFS
 	/* 'sleep' [R/W] */
-	entry =
-	    create_proc_entry("sleep", S_IFREG | S_IRUGO | S_IWUSR,
-			      acpi_root_dir);
-	if (entry)
-		entry->proc_fops = &acpi_system_sleep_fops;
+	proc_create("sleep", S_IFREG | S_IRUGO | S_IWUSR,
+		    acpi_root_dir, &acpi_system_sleep_fops);
 #endif				/* CONFIG_ACPI_PROCFS */
 
 #ifdef	HAVE_ACPI_LEGACY_ALARM
 	/* 'alarm' [R/W] */
-	entry =
-	    create_proc_entry("alarm", S_IFREG | S_IRUGO | S_IWUSR,
-			      acpi_root_dir);
-	if (entry)
-		entry->proc_fops = &acpi_system_alarm_fops;
+	proc_create("alarm", S_IFREG | S_IRUGO | S_IWUSR,
+		    acpi_root_dir, &acpi_system_alarm_fops);
 
 	acpi_install_fixed_event_handler(ACPI_EVENT_RTC, rtc_handler, NULL);
 #endif				/* HAVE_ACPI_LEGACY_ALARM */
 
 	/* 'wakeup device' [R/W] */
-	entry =
-	    create_proc_entry("wakeup", S_IFREG | S_IRUGO | S_IWUSR,
-			      acpi_root_dir);
-	if (entry)
-		entry->proc_fops = &acpi_system_wakeup_device_fops;
+	proc_create("wakeup", S_IFREG | S_IRUGO | S_IWUSR,
+		    acpi_root_dir, &acpi_system_wakeup_device_fops);
 
 	return 0;
 }
diff --git a/drivers/acpi/system.c b/drivers/acpi/system.c
index 4749f37..769f248 100644
--- a/drivers/acpi/system.c
+++ b/drivers/acpi/system.c
@@ -396,6 +396,7 @@
 }
 
 static const struct file_operations acpi_system_info_ops = {
+	.owner = THIS_MODULE,
 	.open = acpi_system_info_open_fs,
 	.read = seq_read,
 	.llseek = seq_lseek,
@@ -406,6 +407,7 @@
 				     loff_t *);
 
 static const struct file_operations acpi_system_dsdt_ops = {
+	.owner = THIS_MODULE,
 	.read = acpi_system_read_dsdt,
 };
 
@@ -430,6 +432,7 @@
 				     loff_t *);
 
 static const struct file_operations acpi_system_fadt_ops = {
+	.owner = THIS_MODULE,
 	.read = acpi_system_read_fadt,
 };
 
@@ -454,31 +457,23 @@
 {
 	struct proc_dir_entry *entry;
 	int error = 0;
-	char *name;
 
 	/* 'info' [R] */
-	name = ACPI_SYSTEM_FILE_INFO;
-	entry = create_proc_entry(name, S_IRUGO, acpi_root_dir);
+	entry = proc_create(ACPI_SYSTEM_FILE_INFO, S_IRUGO, acpi_root_dir,
+			    &acpi_system_info_ops);
 	if (!entry)
 		goto Error;
-	else {
-		entry->proc_fops = &acpi_system_info_ops;
-	}
 
 	/* 'dsdt' [R] */
-	name = ACPI_SYSTEM_FILE_DSDT;
-	entry = create_proc_entry(name, S_IRUSR, acpi_root_dir);
-	if (entry)
-		entry->proc_fops = &acpi_system_dsdt_ops;
-	else
+	entry = proc_create(ACPI_SYSTEM_FILE_DSDT, S_IRUSR, acpi_root_dir,
+			    &acpi_system_dsdt_ops);
+	if (!entry)
 		goto Error;
 
 	/* 'fadt' [R] */
-	name = ACPI_SYSTEM_FILE_FADT;
-	entry = create_proc_entry(name, S_IRUSR, acpi_root_dir);
-	if (entry)
-		entry->proc_fops = &acpi_system_fadt_ops;
-	else
+	entry = proc_create(ACPI_SYSTEM_FILE_FADT, S_IRUSR, acpi_root_dir,
+			    &acpi_system_fadt_ops);
+	if (!entry)
 		goto Error;
 
       Done:
diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c
index 766bd25..0815ac3 100644
--- a/drivers/acpi/thermal.c
+++ b/drivers/acpi/thermal.c
@@ -198,6 +198,7 @@
 };
 
 static const struct file_operations acpi_thermal_state_fops = {
+	.owner = THIS_MODULE,
 	.open = acpi_thermal_state_open_fs,
 	.read = seq_read,
 	.llseek = seq_lseek,
@@ -205,6 +206,7 @@
 };
 
 static const struct file_operations acpi_thermal_temp_fops = {
+	.owner = THIS_MODULE,
 	.open = acpi_thermal_temp_open_fs,
 	.read = seq_read,
 	.llseek = seq_lseek,
@@ -212,6 +214,7 @@
 };
 
 static const struct file_operations acpi_thermal_trip_fops = {
+	.owner = THIS_MODULE,
 	.open = acpi_thermal_trip_open_fs,
 	.read = seq_read,
 	.llseek = seq_lseek,
@@ -219,6 +222,7 @@
 };
 
 static const struct file_operations acpi_thermal_cooling_fops = {
+	.owner = THIS_MODULE,
 	.open = acpi_thermal_cooling_open_fs,
 	.read = seq_read,
 	.write = acpi_thermal_write_cooling_mode,
@@ -227,6 +231,7 @@
 };
 
 static const struct file_operations acpi_thermal_polling_fops = {
+	.owner = THIS_MODULE,
 	.open = acpi_thermal_polling_open_fs,
 	.read = seq_read,
 	.write = acpi_thermal_write_polling,
@@ -1419,63 +1424,47 @@
 	}
 
 	/* 'state' [R] */
-	entry = create_proc_entry(ACPI_THERMAL_FILE_STATE,
-				  S_IRUGO, acpi_device_dir(device));
+	entry = proc_create_data(ACPI_THERMAL_FILE_STATE,
+				 S_IRUGO, acpi_device_dir(device),
+				 &acpi_thermal_state_fops,
+				 acpi_driver_data(device));
 	if (!entry)
 		return -ENODEV;
-	else {
-		entry->proc_fops = &acpi_thermal_state_fops;
-		entry->data = acpi_driver_data(device);
-		entry->owner = THIS_MODULE;
-	}
 
 	/* 'temperature' [R] */
-	entry = create_proc_entry(ACPI_THERMAL_FILE_TEMPERATURE,
-				  S_IRUGO, acpi_device_dir(device));
+	entry = proc_create_data(ACPI_THERMAL_FILE_TEMPERATURE,
+				 S_IRUGO, acpi_device_dir(device),
+				 &acpi_thermal_temp_fops,
+				 acpi_driver_data(device));
 	if (!entry)
 		return -ENODEV;
-	else {
-		entry->proc_fops = &acpi_thermal_temp_fops;
-		entry->data = acpi_driver_data(device);
-		entry->owner = THIS_MODULE;
-	}
 
 	/* 'trip_points' [R] */
-	entry = create_proc_entry(ACPI_THERMAL_FILE_TRIP_POINTS,
-				  S_IRUGO,
-				  acpi_device_dir(device));
+	entry = proc_create_data(ACPI_THERMAL_FILE_TRIP_POINTS,
+				 S_IRUGO,
+				 acpi_device_dir(device),
+				 &acpi_thermal_trip_fops,
+				 acpi_driver_data(device));
 	if (!entry)
 		return -ENODEV;
-	else {
-		entry->proc_fops = &acpi_thermal_trip_fops;
-		entry->data = acpi_driver_data(device);
-		entry->owner = THIS_MODULE;
-	}
 
 	/* 'cooling_mode' [R/W] */
-	entry = create_proc_entry(ACPI_THERMAL_FILE_COOLING_MODE,
-				  S_IFREG | S_IRUGO | S_IWUSR,
-				  acpi_device_dir(device));
+	entry = proc_create_data(ACPI_THERMAL_FILE_COOLING_MODE,
+				 S_IFREG | S_IRUGO | S_IWUSR,
+				 acpi_device_dir(device),
+				 &acpi_thermal_cooling_fops,
+				 acpi_driver_data(device));
 	if (!entry)
 		return -ENODEV;
-	else {
-		entry->proc_fops = &acpi_thermal_cooling_fops;
-		entry->data = acpi_driver_data(device);
-		entry->owner = THIS_MODULE;
-	}
 
 	/* 'polling_frequency' [R/W] */
-	entry = create_proc_entry(ACPI_THERMAL_FILE_POLLING_FREQ,
-				  S_IFREG | S_IRUGO | S_IWUSR,
-				  acpi_device_dir(device));
+	entry = proc_create_data(ACPI_THERMAL_FILE_POLLING_FREQ,
+				 S_IFREG | S_IRUGO | S_IWUSR,
+				 acpi_device_dir(device),
+				 &acpi_thermal_polling_fops,
+				 acpi_driver_data(device));
 	if (!entry)
 		return -ENODEV;
-	else {
-		entry->proc_fops = &acpi_thermal_polling_fops;
-		entry->data = acpi_driver_data(device);
-		entry->owner = THIS_MODULE;
-	}
-
 	return 0;
 }
 
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
index 980a741..43b2283 100644
--- a/drivers/acpi/video.c
+++ b/drivers/acpi/video.c
@@ -192,6 +192,7 @@
 /* bus */
 static int acpi_video_bus_info_open_fs(struct inode *inode, struct file *file);
 static struct file_operations acpi_video_bus_info_fops = {
+	.owner = THIS_MODULE,
 	.open = acpi_video_bus_info_open_fs,
 	.read = seq_read,
 	.llseek = seq_lseek,
@@ -200,6 +201,7 @@
 
 static int acpi_video_bus_ROM_open_fs(struct inode *inode, struct file *file);
 static struct file_operations acpi_video_bus_ROM_fops = {
+	.owner = THIS_MODULE,
 	.open = acpi_video_bus_ROM_open_fs,
 	.read = seq_read,
 	.llseek = seq_lseek,
@@ -209,6 +211,7 @@
 static int acpi_video_bus_POST_info_open_fs(struct inode *inode,
 					    struct file *file);
 static struct file_operations acpi_video_bus_POST_info_fops = {
+	.owner = THIS_MODULE,
 	.open = acpi_video_bus_POST_info_open_fs,
 	.read = seq_read,
 	.llseek = seq_lseek,
@@ -217,6 +220,7 @@
 
 static int acpi_video_bus_POST_open_fs(struct inode *inode, struct file *file);
 static struct file_operations acpi_video_bus_POST_fops = {
+	.owner = THIS_MODULE,
 	.open = acpi_video_bus_POST_open_fs,
 	.read = seq_read,
 	.llseek = seq_lseek,
@@ -225,6 +229,7 @@
 
 static int acpi_video_bus_DOS_open_fs(struct inode *inode, struct file *file);
 static struct file_operations acpi_video_bus_DOS_fops = {
+	.owner = THIS_MODULE,
 	.open = acpi_video_bus_DOS_open_fs,
 	.read = seq_read,
 	.llseek = seq_lseek,
@@ -235,6 +240,7 @@
 static int acpi_video_device_info_open_fs(struct inode *inode,
 					  struct file *file);
 static struct file_operations acpi_video_device_info_fops = {
+	.owner = THIS_MODULE,
 	.open = acpi_video_device_info_open_fs,
 	.read = seq_read,
 	.llseek = seq_lseek,
@@ -244,6 +250,7 @@
 static int acpi_video_device_state_open_fs(struct inode *inode,
 					   struct file *file);
 static struct file_operations acpi_video_device_state_fops = {
+	.owner = THIS_MODULE,
 	.open = acpi_video_device_state_open_fs,
 	.read = seq_read,
 	.llseek = seq_lseek,
@@ -253,6 +260,7 @@
 static int acpi_video_device_brightness_open_fs(struct inode *inode,
 						struct file *file);
 static struct file_operations acpi_video_device_brightness_fops = {
+	.owner = THIS_MODULE,
 	.open = acpi_video_device_brightness_open_fs,
 	.read = seq_read,
 	.llseek = seq_lseek,
@@ -262,6 +270,7 @@
 static int acpi_video_device_EDID_open_fs(struct inode *inode,
 					  struct file *file);
 static struct file_operations acpi_video_device_EDID_fops = {
+	.owner = THIS_MODULE,
 	.open = acpi_video_device_EDID_open_fs,
 	.read = seq_read,
 	.llseek = seq_lseek,
@@ -1070,51 +1079,36 @@
 	}
 
 	/* 'info' [R] */
-	entry = create_proc_entry("info", S_IRUGO, acpi_device_dir(device));
+	entry = proc_create_data("info", S_IRUGO, acpi_device_dir(device),
+			&acpi_video_device_info_fops, acpi_driver_data(device));
 	if (!entry)
 		return -ENODEV;
-	else {
-		entry->proc_fops = &acpi_video_device_info_fops;
-		entry->data = acpi_driver_data(device);
-		entry->owner = THIS_MODULE;
-	}
 
 	/* 'state' [R/W] */
-	entry =
-	    create_proc_entry("state", S_IFREG | S_IRUGO | S_IWUSR,
-			      acpi_device_dir(device));
+	acpi_video_device_state_fops.write = acpi_video_device_write_state;
+	entry = proc_create_data("state", S_IFREG | S_IRUGO | S_IWUSR,
+				 acpi_device_dir(device),
+				 &acpi_video_device_state_fops,
+				 acpi_driver_data(device));
 	if (!entry)
 		return -ENODEV;
-	else {
-		acpi_video_device_state_fops.write = acpi_video_device_write_state;
-		entry->proc_fops = &acpi_video_device_state_fops;
-		entry->data = acpi_driver_data(device);
-		entry->owner = THIS_MODULE;
-	}
 
 	/* 'brightness' [R/W] */
-	entry =
-	    create_proc_entry("brightness", S_IFREG | S_IRUGO | S_IWUSR,
-			      acpi_device_dir(device));
+	acpi_video_device_brightness_fops.write =
+		acpi_video_device_write_brightness;
+	entry = proc_create_data("brightness", S_IFREG | S_IRUGO | S_IWUSR,
+				 acpi_device_dir(device),
+				 &acpi_video_device_brightness_fops,
+				 acpi_driver_data(device));
 	if (!entry)
 		return -ENODEV;
-	else {
-		acpi_video_device_brightness_fops.write = acpi_video_device_write_brightness;
-		entry->proc_fops = &acpi_video_device_brightness_fops;
-		entry->data = acpi_driver_data(device);
-		entry->owner = THIS_MODULE;
-	}
 
 	/* 'EDID' [R] */
-	entry = create_proc_entry("EDID", S_IRUGO, acpi_device_dir(device));
+	entry = proc_create_data("EDID", S_IRUGO, acpi_device_dir(device),
+				 &acpi_video_device_EDID_fops,
+				 acpi_driver_data(device));
 	if (!entry)
 		return -ENODEV;
-	else {
-		entry->proc_fops = &acpi_video_device_EDID_fops;
-		entry->data = acpi_driver_data(device);
-		entry->owner = THIS_MODULE;
-	}
-
 	return 0;
 }
 
@@ -1353,61 +1347,43 @@
 	}
 
 	/* 'info' [R] */
-	entry = create_proc_entry("info", S_IRUGO, acpi_device_dir(device));
+	entry = proc_create_data("info", S_IRUGO, acpi_device_dir(device),
+				 &acpi_video_bus_info_fops,
+				 acpi_driver_data(device));
 	if (!entry)
 		return -ENODEV;
-	else {
-		entry->proc_fops = &acpi_video_bus_info_fops;
-		entry->data = acpi_driver_data(device);
-		entry->owner = THIS_MODULE;
-	}
 
 	/* 'ROM' [R] */
-	entry = create_proc_entry("ROM", S_IRUGO, acpi_device_dir(device));
+	entry = proc_create_data("ROM", S_IRUGO, acpi_device_dir(device),
+				 &acpi_video_bus_ROM_fops,
+				 acpi_driver_data(device));
 	if (!entry)
 		return -ENODEV;
-	else {
-		entry->proc_fops = &acpi_video_bus_ROM_fops;
-		entry->data = acpi_driver_data(device);
-		entry->owner = THIS_MODULE;
-	}
 
 	/* 'POST_info' [R] */
-	entry =
-	    create_proc_entry("POST_info", S_IRUGO, acpi_device_dir(device));
+	entry = proc_create_data("POST_info", S_IRUGO, acpi_device_dir(device),
+				 &acpi_video_bus_POST_info_fops,
+				 acpi_driver_data(device));
 	if (!entry)
 		return -ENODEV;
-	else {
-		entry->proc_fops = &acpi_video_bus_POST_info_fops;
-		entry->data = acpi_driver_data(device);
-		entry->owner = THIS_MODULE;
-	}
 
 	/* 'POST' [R/W] */
-	entry =
-	    create_proc_entry("POST", S_IFREG | S_IRUGO | S_IRUSR,
-			      acpi_device_dir(device));
+	acpi_video_bus_POST_fops.write = acpi_video_bus_write_POST;
+	entry = proc_create_data("POST", S_IFREG | S_IRUGO | S_IRUSR,
+				 acpi_device_dir(device),
+				 &acpi_video_bus_POST_fops,
+				 acpi_driver_data(device));
 	if (!entry)
 		return -ENODEV;
-	else {
-		acpi_video_bus_POST_fops.write = acpi_video_bus_write_POST;
-		entry->proc_fops = &acpi_video_bus_POST_fops;
-		entry->data = acpi_driver_data(device);
-		entry->owner = THIS_MODULE;
-	}
 
 	/* 'DOS' [R/W] */
-	entry =
-	    create_proc_entry("DOS", S_IFREG | S_IRUGO | S_IRUSR,
-			      acpi_device_dir(device));
+	acpi_video_bus_DOS_fops.write = acpi_video_bus_write_DOS;
+	entry = proc_create_data("DOS", S_IFREG | S_IRUGO | S_IRUSR,
+				 acpi_device_dir(device),
+				 &acpi_video_bus_DOS_fops,
+				 acpi_driver_data(device));
 	if (!entry)
 		return -ENODEV;
-	else {
-		acpi_video_bus_DOS_fops.write = acpi_video_bus_write_DOS;
-		entry->proc_fops = &acpi_video_bus_DOS_fops;
-		entry->data = acpi_driver_data(device);
-		entry->owner = THIS_MODULE;
-	}
 
 	return 0;
 }
diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c
index 38c769f..3da804b 100644
--- a/drivers/atm/nicstar.c
+++ b/drivers/atm/nicstar.c
@@ -415,7 +415,7 @@
    card->pcidev = pcidev;
    membase = pci_resource_start(pcidev, 1);
    card->membase = ioremap(membase, NS_IOREMAP_SIZE);
-   if (card->membase == 0)
+   if (!card->membase)
    {
       printk("nicstar%d: can't ioremap() membase.\n",i);
       error = 3;
diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c
index 1fef7df..9fd4a85 100644
--- a/drivers/base/firmware_class.c
+++ b/drivers/base/firmware_class.c
@@ -396,6 +396,8 @@
 	if (!firmware_p)
 		return -EINVAL;
 
+	printk(KERN_INFO "firmware: requesting %s\n", name);
+
 	*firmware_p = firmware = kzalloc(sizeof(*firmware), GFP_KERNEL);
 	if (!firmware) {
 		printk(KERN_ERR "%s: kmalloc(struct firmware) failed\n",
diff --git a/drivers/block/aoe/aoe.h b/drivers/block/aoe/aoe.h
index 280e71e..5b4c6e6 100644
--- a/drivers/block/aoe/aoe.h
+++ b/drivers/block/aoe/aoe.h
@@ -195,7 +195,6 @@
 struct aoedev *aoedev_by_aoeaddr(int maj, int min);
 struct aoedev *aoedev_by_sysminor_m(ulong sysminor);
 void aoedev_downdev(struct aoedev *d);
-int aoedev_isbusy(struct aoedev *d);
 int aoedev_flush(const char __user *str, size_t size);
 
 int aoenet_init(void);
diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c
index d00293b..8fc429c 100644
--- a/drivers/block/aoe/aoecmd.c
+++ b/drivers/block/aoe/aoecmd.c
@@ -668,16 +668,16 @@
 	u16 n;
 
 	/* word 83: command set supported */
-	n = le16_to_cpu(get_unaligned((__le16 *) &id[83<<1]));
+	n = get_unaligned_le16(&id[83 << 1]);
 
 	/* word 86: command set/feature enabled */
-	n |= le16_to_cpu(get_unaligned((__le16 *) &id[86<<1]));
+	n |= get_unaligned_le16(&id[86 << 1]);
 
 	if (n & (1<<10)) {	/* bit 10: LBA 48 */
 		d->flags |= DEVFL_EXT;
 
 		/* word 100: number lba48 sectors */
-		ssize = le64_to_cpu(get_unaligned((__le64 *) &id[100<<1]));
+		ssize = get_unaligned_le64(&id[100 << 1]);
 
 		/* set as in ide-disk.c:init_idedisk_capacity */
 		d->geo.cylinders = ssize;
@@ -688,12 +688,12 @@
 		d->flags &= ~DEVFL_EXT;
 
 		/* number lba28 sectors */
-		ssize = le32_to_cpu(get_unaligned((__le32 *) &id[60<<1]));
+		ssize = get_unaligned_le32(&id[60 << 1]);
 
 		/* NOTE: obsolete in ATA 6 */
-		d->geo.cylinders = le16_to_cpu(get_unaligned((__le16 *) &id[54<<1]));
-		d->geo.heads = le16_to_cpu(get_unaligned((__le16 *) &id[55<<1]));
-		d->geo.sectors = le16_to_cpu(get_unaligned((__le16 *) &id[56<<1]));
+		d->geo.cylinders = get_unaligned_le16(&id[54 << 1]);
+		d->geo.heads = get_unaligned_le16(&id[55 << 1]);
+		d->geo.sectors = get_unaligned_le16(&id[56 << 1]);
 	}
 
 	if (d->ssize != ssize)
@@ -779,7 +779,7 @@
 	u16 aoemajor;
 
 	hin = (struct aoe_hdr *) skb_mac_header(skb);
-	aoemajor = be16_to_cpu(get_unaligned(&hin->major));
+	aoemajor = get_unaligned_be16(&hin->major);
 	d = aoedev_by_aoeaddr(aoemajor, hin->minor);
 	if (d == NULL) {
 		snprintf(ebuf, sizeof ebuf, "aoecmd_ata_rsp: ata response "
@@ -791,7 +791,7 @@
 
 	spin_lock_irqsave(&d->lock, flags);
 
-	n = be32_to_cpu(get_unaligned(&hin->tag));
+	n = get_unaligned_be32(&hin->tag);
 	t = gettgt(d, hin->src);
 	if (t == NULL) {
 		printk(KERN_INFO "aoe: can't find target e%ld.%d:%012llx\n",
@@ -806,9 +806,9 @@
 		snprintf(ebuf, sizeof ebuf,
 			"%15s e%d.%d    tag=%08x@%08lx\n",
 			"unexpected rsp",
-			be16_to_cpu(get_unaligned(&hin->major)),
+			get_unaligned_be16(&hin->major),
 			hin->minor,
-			be32_to_cpu(get_unaligned(&hin->tag)),
+			get_unaligned_be32(&hin->tag),
 			jiffies);
 		aoechr_error(ebuf);
 		return;
@@ -873,7 +873,7 @@
 			printk(KERN_INFO
 				"aoe: unrecognized ata command %2.2Xh for %d.%d\n",
 				ahout->cmdstat,
-				be16_to_cpu(get_unaligned(&hin->major)),
+				get_unaligned_be16(&hin->major),
 				hin->minor);
 		}
 	}
diff --git a/drivers/block/aoe/aoedev.c b/drivers/block/aoe/aoedev.c
index f9a1cd9..a1d813a 100644
--- a/drivers/block/aoe/aoedev.c
+++ b/drivers/block/aoe/aoedev.c
@@ -18,24 +18,6 @@
 static struct aoedev *devlist;
 static DEFINE_SPINLOCK(devlist_lock);
 
-int
-aoedev_isbusy(struct aoedev *d)
-{
-	struct aoetgt **t, **te;
-	struct frame *f, *e;
-
-	t = d->targets;
-	te = t + NTARGETS;
-	for (; t < te && *t; t++) {
-		f = (*t)->frames;
-		e = f + (*t)->nframes;
-		for (; f < e; f++)
-			if (f->tag != FREETAG)
-				return 1;
-	}
-	return 0;
-}
-
 struct aoedev *
 aoedev_by_aoeaddr(int maj, int min)
 {
diff --git a/drivers/block/aoe/aoenet.c b/drivers/block/aoe/aoenet.c
index 18d243c..d625169 100644
--- a/drivers/block/aoe/aoenet.c
+++ b/drivers/block/aoe/aoenet.c
@@ -128,7 +128,7 @@
 	skb_push(skb, ETH_HLEN);	/* (1) */
 
 	h = (struct aoe_hdr *) skb_mac_header(skb);
-	n = be32_to_cpu(get_unaligned(&h->tag));
+	n = get_unaligned_be32(&h->tag);
 	if ((h->verfl & AOEFL_RSP) == 0 || (n & 1<<31))
 		goto exit;
 
@@ -140,7 +140,7 @@
 			printk(KERN_ERR
 				"%s%d.%d@%s; ecode=%d '%s'\n",
 				"aoe: error packet from ",
-				be16_to_cpu(get_unaligned(&h->major)),
+				get_unaligned_be16(&h->major),
 				h->minor, skb->dev->name,
 				h->err, aoe_errlist[n]);
 		goto exit;
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index cf6083a..e539be5 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -425,7 +425,7 @@
 	struct proc_dir_entry *pde;
 
 	if (proc_cciss == NULL)
-		proc_cciss = proc_mkdir("cciss", proc_root_driver);
+		proc_cciss = proc_mkdir("driver/cciss", NULL);
 	if (!proc_cciss)
 		return;
 	pde = proc_create(hba[i]->devname, S_IWUSR | S_IRUSR | S_IRGRP |
@@ -3700,7 +3700,7 @@
 			cciss_remove_one(hba[i]->pdev);
 		}
 	}
-	remove_proc_entry("cciss", proc_root_driver);
+	remove_proc_entry("driver/cciss", NULL);
 }
 
 static void fail_all_cmds(unsigned long ctlr)
diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
index 6919918..09c1434 100644
--- a/drivers/block/cpqarray.c
+++ b/drivers/block/cpqarray.c
@@ -214,7 +214,7 @@
 static void __init ida_procinit(int i)
 {
 	if (proc_array == NULL) {
-		proc_array = proc_mkdir("cpqarray", proc_root_driver);
+		proc_array = proc_mkdir("driver/cpqarray", NULL);
 		if (!proc_array) return;
 	}
 
@@ -1796,7 +1796,7 @@
 		}
 	}
 
-	remove_proc_entry("cpqarray", proc_root_driver);
+	remove_proc_entry("driver/cpqarray", NULL);
 }
 
 module_init(cpqarray_init)
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index 7652e87d..395f8ea 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -4526,14 +4526,15 @@
 	}
 }
 
-int __init init_module(void)
+static int __init floppy_module_init(void)
 {
 	if (floppy)
 		parse_floppy_cfg_string(floppy);
 	return floppy_init();
 }
+module_init(floppy_module_init);
 
-void cleanup_module(void)
+static void __exit floppy_module_exit(void)
 {
 	int drive;
 
@@ -4562,6 +4563,7 @@
 	/* eject disk, if any */
 	fd_eject(0);
 }
+module_exit(floppy_module_exit);
 
 module_param(floppy, charp, 0);
 module_param(FLOPPY_IRQ, int, 0);
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index f7f1635..d3a25b0 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -546,7 +546,7 @@
 {
 	struct loop_device *lo = q->queuedata;
 
-	clear_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags);
+	queue_flag_clear_unlocked(QUEUE_FLAG_PLUGGED, q);
 	blk_run_address_space(lo->lo_backing_file->f_mapping);
 }
 
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 60cc543..ad98dda 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -29,6 +29,7 @@
 #include <linux/kernel.h>
 #include <net/sock.h>
 #include <linux/net.h>
+#include <linux/kthread.h>
 
 #include <asm/uaccess.h>
 #include <asm/system.h>
@@ -55,6 +56,7 @@
 
 static unsigned int nbds_max = 16;
 static struct nbd_device *nbd_dev;
+static int max_part;
 
 /*
  * Use just one lock (or at most 1 per NIC). Two arguments for this:
@@ -337,7 +339,7 @@
 	}
 
 	req = nbd_find_request(lo, *(struct request **)reply.handle);
-	if (unlikely(IS_ERR(req))) {
+	if (IS_ERR(req)) {
 		result = PTR_ERR(req);
 		if (result != -ENOENT)
 			goto harderror;
@@ -441,6 +443,85 @@
 }
 
 
+static void nbd_handle_req(struct nbd_device *lo, struct request *req)
+{
+	if (!blk_fs_request(req))
+		goto error_out;
+
+	nbd_cmd(req) = NBD_CMD_READ;
+	if (rq_data_dir(req) == WRITE) {
+		nbd_cmd(req) = NBD_CMD_WRITE;
+		if (lo->flags & NBD_READ_ONLY) {
+			printk(KERN_ERR "%s: Write on read-only\n",
+					lo->disk->disk_name);
+			goto error_out;
+		}
+	}
+
+	req->errors = 0;
+
+	mutex_lock(&lo->tx_lock);
+	if (unlikely(!lo->sock)) {
+		mutex_unlock(&lo->tx_lock);
+		printk(KERN_ERR "%s: Attempted send on closed socket\n",
+		       lo->disk->disk_name);
+		req->errors++;
+		nbd_end_request(req);
+		return;
+	}
+
+	lo->active_req = req;
+
+	if (nbd_send_req(lo, req) != 0) {
+		printk(KERN_ERR "%s: Request send failed\n",
+				lo->disk->disk_name);
+		req->errors++;
+		nbd_end_request(req);
+	} else {
+		spin_lock(&lo->queue_lock);
+		list_add(&req->queuelist, &lo->queue_head);
+		spin_unlock(&lo->queue_lock);
+	}
+
+	lo->active_req = NULL;
+	mutex_unlock(&lo->tx_lock);
+	wake_up_all(&lo->active_wq);
+
+	return;
+
+error_out:
+	req->errors++;
+	nbd_end_request(req);
+}
+
+static int nbd_thread(void *data)
+{
+	struct nbd_device *lo = data;
+	struct request *req;
+
+	set_user_nice(current, -20);
+	while (!kthread_should_stop() || !list_empty(&lo->waiting_queue)) {
+		/* wait for something to do */
+		wait_event_interruptible(lo->waiting_wq,
+					 kthread_should_stop() ||
+					 !list_empty(&lo->waiting_queue));
+
+		/* extract request */
+		if (list_empty(&lo->waiting_queue))
+			continue;
+
+		spin_lock_irq(&lo->queue_lock);
+		req = list_entry(lo->waiting_queue.next, struct request,
+				 queuelist);
+		list_del_init(&req->queuelist);
+		spin_unlock_irq(&lo->queue_lock);
+
+		/* handle request */
+		nbd_handle_req(lo, req);
+	}
+	return 0;
+}
+
 /*
  * We always wait for result of write, for now. It would be nice to make it optional
  * in future
@@ -456,65 +537,23 @@
 		struct nbd_device *lo;
 
 		blkdev_dequeue_request(req);
+
+		spin_unlock_irq(q->queue_lock);
+
 		dprintk(DBG_BLKDEV, "%s: request %p: dequeued (flags=%x)\n",
 				req->rq_disk->disk_name, req, req->cmd_type);
 
-		if (!blk_fs_request(req))
-			goto error_out;
-
 		lo = req->rq_disk->private_data;
 
 		BUG_ON(lo->magic != LO_MAGIC);
 
-		nbd_cmd(req) = NBD_CMD_READ;
-		if (rq_data_dir(req) == WRITE) {
-			nbd_cmd(req) = NBD_CMD_WRITE;
-			if (lo->flags & NBD_READ_ONLY) {
-				printk(KERN_ERR "%s: Write on read-only\n",
-						lo->disk->disk_name);
-				goto error_out;
-			}
-		}
+		spin_lock_irq(&lo->queue_lock);
+		list_add_tail(&req->queuelist, &lo->waiting_queue);
+		spin_unlock_irq(&lo->queue_lock);
 
-		req->errors = 0;
-		spin_unlock_irq(q->queue_lock);
-
-		mutex_lock(&lo->tx_lock);
-		if (unlikely(!lo->sock)) {
-			mutex_unlock(&lo->tx_lock);
-			printk(KERN_ERR "%s: Attempted send on closed socket\n",
-			       lo->disk->disk_name);
-			req->errors++;
-			nbd_end_request(req);
-			spin_lock_irq(q->queue_lock);
-			continue;
-		}
-
-		lo->active_req = req;
-
-		if (nbd_send_req(lo, req) != 0) {
-			printk(KERN_ERR "%s: Request send failed\n",
-					lo->disk->disk_name);
-			req->errors++;
-			nbd_end_request(req);
-		} else {
-			spin_lock(&lo->queue_lock);
-			list_add(&req->queuelist, &lo->queue_head);
-			spin_unlock(&lo->queue_lock);
-		}
-
-		lo->active_req = NULL;
-		mutex_unlock(&lo->tx_lock);
-		wake_up_all(&lo->active_wq);
+		wake_up(&lo->waiting_wq);
 
 		spin_lock_irq(q->queue_lock);
-		continue;
-
-error_out:
-		req->errors++;
-		spin_unlock(q->queue_lock);
-		nbd_end_request(req);
-		spin_lock(q->queue_lock);
 	}
 }
 
@@ -524,6 +563,7 @@
 	struct nbd_device *lo = inode->i_bdev->bd_disk->private_data;
 	int error;
 	struct request sreq ;
+	struct task_struct *thread;
 
 	if (!capable(CAP_SYS_ADMIN))
 		return -EPERM;
@@ -537,6 +577,7 @@
 	switch (cmd) {
 	case NBD_DISCONNECT:
 	        printk(KERN_INFO "%s: NBD_DISCONNECT\n", lo->disk->disk_name);
+		blk_rq_init(NULL, &sreq);
 		sreq.cmd_type = REQ_TYPE_SPECIAL;
 		nbd_cmd(&sreq) = NBD_CMD_DISC;
 		/*
@@ -571,10 +612,13 @@
 		error = -EINVAL;
 		file = fget(arg);
 		if (file) {
+			struct block_device *bdev = inode->i_bdev;
 			inode = file->f_path.dentry->d_inode;
 			if (S_ISSOCK(inode->i_mode)) {
 				lo->file = file;
 				lo->sock = SOCKET_I(inode);
+				if (max_part > 0)
+					bdev->bd_invalidated = 1;
 				error = 0;
 			} else {
 				fput(file);
@@ -606,7 +650,12 @@
 	case NBD_DO_IT:
 		if (!lo->file)
 			return -EINVAL;
+		thread = kthread_create(nbd_thread, lo, lo->disk->disk_name);
+		if (IS_ERR(thread))
+			return PTR_ERR(thread);
+		wake_up_process(thread);
 		error = nbd_do_it(lo);
+		kthread_stop(thread);
 		if (error)
 			return error;
 		sock_shutdown(lo, 1);
@@ -619,6 +668,8 @@
 		lo->bytesize = 0;
 		inode->i_bdev->bd_inode->i_size = 0;
 		set_capacity(lo->disk, 0);
+		if (max_part > 0)
+			ioctl_by_bdev(inode->i_bdev, BLKRRPART, 0);
 		return lo->harderror;
 	case NBD_CLEAR_QUE:
 		/*
@@ -652,6 +703,7 @@
 {
 	int err = -ENOMEM;
 	int i;
+	int part_shift;
 
 	BUILD_BUG_ON(sizeof(struct nbd_request) != 28);
 
@@ -659,8 +711,17 @@
 	if (!nbd_dev)
 		return -ENOMEM;
 
+	if (max_part < 0) {
+		printk(KERN_CRIT "nbd: max_part must be >= 0\n");
+		return -EINVAL;
+	}
+
+	part_shift = 0;
+	if (max_part > 0)
+		part_shift = fls(max_part);
+
 	for (i = 0; i < nbds_max; i++) {
-		struct gendisk *disk = alloc_disk(1);
+		struct gendisk *disk = alloc_disk(1 << part_shift);
 		elevator_t *old_e;
 		if (!disk)
 			goto out;
@@ -695,17 +756,18 @@
 		nbd_dev[i].file = NULL;
 		nbd_dev[i].magic = LO_MAGIC;
 		nbd_dev[i].flags = 0;
+		INIT_LIST_HEAD(&nbd_dev[i].waiting_queue);
 		spin_lock_init(&nbd_dev[i].queue_lock);
 		INIT_LIST_HEAD(&nbd_dev[i].queue_head);
 		mutex_init(&nbd_dev[i].tx_lock);
 		init_waitqueue_head(&nbd_dev[i].active_wq);
+		init_waitqueue_head(&nbd_dev[i].waiting_wq);
 		nbd_dev[i].blksize = 1024;
 		nbd_dev[i].bytesize = 0;
 		disk->major = NBD_MAJOR;
-		disk->first_minor = i;
+		disk->first_minor = i << part_shift;
 		disk->fops = &nbd_fops;
 		disk->private_data = &nbd_dev[i];
-		disk->flags |= GENHD_FL_SUPPRESS_PARTITION_INFO;
 		sprintf(disk->disk_name, "nbd%d", i);
 		set_capacity(disk, 0);
 		add_disk(disk);
@@ -743,7 +805,9 @@
 MODULE_LICENSE("GPL");
 
 module_param(nbds_max, int, 0444);
-MODULE_PARM_DESC(nbds_max, "How many network block devices to initialize.");
+MODULE_PARM_DESC(nbds_max, "number of network block devices to initialize (default: 16)");
+module_param(max_part, int, 0444);
+MODULE_PARM_DESC(max_part, "number of partitions per device (default: 0)");
 #ifndef NDEBUG
 module_param(debugflags, int, 0644);
 MODULE_PARM_DESC(debugflags, "flags for controlling debug output");
diff --git a/drivers/block/paride/pd.c b/drivers/block/paride/pd.c
index df819f8..570f3b7 100644
--- a/drivers/block/paride/pd.c
+++ b/drivers/block/paride/pd.c
@@ -716,10 +716,8 @@
 	struct request rq;
 	int err = 0;
 
-	memset(&rq, 0, sizeof(rq));
-	rq.errors = 0;
+	blk_rq_init(NULL, &rq);
 	rq.rq_disk = disk->gd;
-	rq.ref_count = 1;
 	rq.end_io_data = &wait;
 	rq.end_io = blk_end_sync_rq;
 	blk_insert_request(disk->gd->queue, &rq, 0, func);
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index 18feb1c..3ba1df9 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -776,8 +776,6 @@
 
 	rq->cmd_len = COMMAND_SIZE(cgc->cmd[0]);
 	memcpy(rq->cmd, cgc->cmd, CDROM_PACKET_SIZE);
-	if (sizeof(rq->cmd) > CDROM_PACKET_SIZE)
-		memset(rq->cmd + CDROM_PACKET_SIZE, 0, sizeof(rq->cmd) - CDROM_PACKET_SIZE);
 
 	rq->timeout = 60*HZ;
 	rq->cmd_type = REQ_TYPE_BLOCK_PC;
@@ -2744,7 +2742,6 @@
 	int i;
 	int ret = 0;
 	char b[BDEVNAME_SIZE];
-	struct proc_dir_entry *proc;
 	struct block_device *bdev;
 
 	if (pd->pkt_dev == dev) {
@@ -2788,11 +2785,7 @@
 		goto out_mem;
 	}
 
-	proc = create_proc_entry(pd->name, 0, pkt_proc);
-	if (proc) {
-		proc->data = pd;
-		proc->proc_fops = &pkt_proc_fops;
-	}
+	proc_create_data(pd->name, 0, pkt_proc, &pkt_proc_fops, pd);
 	DPRINTK(DRIVER_NAME": writer %s mapped to %s\n", pd->name, bdevname(bdev, b));
 	return 0;
 
@@ -3101,7 +3094,7 @@
 		goto out_misc;
 	}
 
-	pkt_proc = proc_mkdir(DRIVER_NAME, proc_root_driver);
+	pkt_proc = proc_mkdir("driver/"DRIVER_NAME, NULL);
 
 	return 0;
 
@@ -3117,7 +3110,7 @@
 
 static void __exit pkt_exit(void)
 {
-	remove_proc_entry(DRIVER_NAME, proc_root_driver);
+	remove_proc_entry("driver/"DRIVER_NAME, NULL);
 	misc_deregister(&pkt_misc);
 
 	pkt_debugfs_cleanup();
diff --git a/drivers/block/ps3disk.c b/drivers/block/ps3disk.c
index 7483f94..d797e20 100644
--- a/drivers/block/ps3disk.c
+++ b/drivers/block/ps3disk.c
@@ -102,8 +102,7 @@
 		dev_dbg(&dev->sbd.core,
 			"%s:%u: bio %u: %u segs %u sectors from %lu\n",
 			__func__, __LINE__, i, bio_segments(iter.bio),
-			bio_sectors(iter.bio),
-			(unsigned long)iter.bio->bi_sector);
+			bio_sectors(iter.bio), iter.bio->bi_sector);
 
 		size = bvec->bv_len;
 		buf = bvec_kmap_irq(bvec, &flags);
@@ -406,7 +405,6 @@
 
 	dev_dbg(&dev->sbd.core, "%s:%u\n", __func__, __LINE__);
 
-	memset(req->cmd, 0, sizeof(req->cmd));
 	req->cmd_type = REQ_TYPE_FLUSH;
 }
 
diff --git a/drivers/block/ub.c b/drivers/block/ub.c
index 27bfe72..e322cce 100644
--- a/drivers/block/ub.c
+++ b/drivers/block/ub.c
@@ -2399,7 +2399,7 @@
 		del_gendisk(lun->disk);
 		/*
 		 * I wish I could do:
-		 *    set_bit(QUEUE_FLAG_DEAD, &q->queue_flags);
+		 *    queue_flag_set(QUEUE_FLAG_DEAD, q);
 		 * As it is, we rely on our internal poisoning and let
 		 * the upper levels to spin furiously failing all the I/O.
 		 */
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index d771da8..f2fff57 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -137,7 +137,7 @@
 	schedule_work(&info->work);
 }
 
-int blkif_getgeo(struct block_device *bd, struct hd_geometry *hg)
+static int blkif_getgeo(struct block_device *bd, struct hd_geometry *hg)
 {
 	/* We don't have real geometry info, but let's at least return
 	   values consistent with the size of the device */
diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
index ac38290..69f26eb 100644
--- a/drivers/cdrom/cdrom.c
+++ b/drivers/cdrom/cdrom.c
@@ -2194,7 +2194,6 @@
 		if (ret)
 			break;
 
-		memset(rq->cmd, 0, sizeof(rq->cmd));
 		rq->cmd[0] = GPCMD_READ_CD;
 		rq->cmd[1] = 1 << 2;
 		rq->cmd[2] = (lba >> 24) & 0xff;
diff --git a/drivers/cdrom/viocd.c b/drivers/cdrom/viocd.c
index b74b6c2..5245a4a 100644
--- a/drivers/cdrom/viocd.c
+++ b/drivers/cdrom/viocd.c
@@ -144,6 +144,7 @@
 }
 
 static const struct file_operations proc_viocd_operations = {
+	.owner		= THIS_MODULE,
 	.open		= proc_viocd_open,
 	.read		= seq_read,
 	.llseek		= seq_lseek,
@@ -679,7 +680,6 @@
 
 static int __init viocd_init(void)
 {
-	struct proc_dir_entry *e;
 	int ret = 0;
 
 	if (!firmware_has_feature(FW_FEATURE_ISERIES))
@@ -719,12 +719,8 @@
 	if (ret)
 		goto out_free_info;
 
-	e = create_proc_entry("iSeries/viocd", S_IFREG|S_IRUGO, NULL);
-	if (e) {
-		e->owner = THIS_MODULE;
-		e->proc_fops = &proc_viocd_operations;
-	}
-
+	proc_create("iSeries/viocd", S_IFREG|S_IRUGO, NULL,
+		    &proc_viocd_operations);
 	return 0;
 
 out_free_info:
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index 929d4fa..5dce387 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -80,6 +80,15 @@
 	 information. For framebuffer console users, please refer to
 	 <file:Documentation/fb/fbcon.txt>.
 
+config DEVKMEM
+	bool "/dev/kmem virtual device support"
+	default y
+	help
+	  Say Y here if you want to support the /dev/kmem device. The
+	  /dev/kmem device is rarely used, but can be used for certain
+	  kind of kernel debugging operations.
+	  When in doubt, say "N".
+
 config SERIAL_NONSTANDARD
 	bool "Non-standard serial port support"
 	depends on HAS_IOMEM
diff --git a/drivers/char/apm-emulation.c b/drivers/char/apm-emulation.c
index 17d5431..cdd876d 100644
--- a/drivers/char/apm-emulation.c
+++ b/drivers/char/apm-emulation.c
@@ -14,6 +14,7 @@
 #include <linux/poll.h>
 #include <linux/slab.h>
 #include <linux/proc_fs.h>
+#include <linux/seq_file.h>
 #include <linux/miscdevice.h>
 #include <linux/apm_bios.h>
 #include <linux/capability.h>
@@ -493,11 +494,10 @@
  *	-1: Unknown
  *   8) min = minutes; sec = seconds
  */
-static int apm_get_info(char *buf, char **start, off_t fpos, int length)
+static int proc_apm_show(struct seq_file *m, void *v)
 {
 	struct apm_power_info info;
 	char *units;
-	int ret;
 
 	info.ac_line_status = 0xff;
 	info.battery_status = 0xff;
@@ -515,14 +515,27 @@
 	case 1: 	units = "sec";	break;
 	}
 
-	ret = sprintf(buf, "%s 1.2 0x%02x 0x%02x 0x%02x 0x%02x %d%% %d %s\n",
+	seq_printf(m, "%s 1.2 0x%02x 0x%02x 0x%02x 0x%02x %d%% %d %s\n",
 		     driver_version, APM_32_BIT_SUPPORT,
 		     info.ac_line_status, info.battery_status,
 		     info.battery_flag, info.battery_life,
 		     info.time, units);
 
-	return ret;
+	return 0;
 }
+
+static int proc_apm_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, proc_apm_show, NULL);
+}
+
+static const struct file_operations apm_proc_fops = {
+	.owner		= THIS_MODULE,
+	.open		= proc_apm_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
 #endif
 
 static int kapmd(void *arg)
@@ -593,7 +606,7 @@
 	wake_up_process(kapmd_tsk);
 
 #ifdef CONFIG_PROC_FS
-	create_proc_info_entry("apm", 0, NULL, apm_get_info);
+	proc_create("apm", 0, NULL, &apm_proc_fops);
 #endif
 
 	ret = misc_register(&apm_device);
diff --git a/drivers/char/cs5535_gpio.c b/drivers/char/cs5535_gpio.c
index c2d23ca..c0a4a0b 100644
--- a/drivers/char/cs5535_gpio.c
+++ b/drivers/char/cs5535_gpio.c
@@ -215,7 +215,7 @@
 	else
 		mask = 0x0b003c66;
 
-	if (request_region(gpio_base, CS5535_GPIO_SIZE, NAME) == 0) {
+	if (!request_region(gpio_base, CS5535_GPIO_SIZE, NAME)) {
 		printk(KERN_ERR NAME ": can't allocate I/O for GPIO\n");
 		return -ENODEV;
 	}
diff --git a/drivers/char/drm/r128_cce.c b/drivers/char/drm/r128_cce.c
index f36adbd..c31afbd 100644
--- a/drivers/char/drm/r128_cce.c
+++ b/drivers/char/drm/r128_cce.c
@@ -817,7 +817,7 @@
 	for (i = 0; i < dma->buf_count; i++) {
 		buf = dma->buflist[i];
 		buf_priv = buf->dev_private;
-		if (buf->file_priv == 0)
+		if (!buf->file_priv)
 			return buf;
 	}
 
diff --git a/drivers/char/i8k.c b/drivers/char/i8k.c
index 8609b82..f49037b 100644
--- a/drivers/char/i8k.c
+++ b/drivers/char/i8k.c
@@ -82,6 +82,7 @@
 		     unsigned long);
 
 static const struct file_operations i8k_fops = {
+	.owner		= THIS_MODULE,
 	.open		= i8k_open_fs,
 	.read		= seq_read,
 	.llseek		= seq_lseek,
@@ -554,13 +555,10 @@
 		return -ENODEV;
 
 	/* Register the proc entry */
-	proc_i8k = create_proc_entry("i8k", 0, NULL);
+	proc_i8k = proc_create("i8k", 0, NULL, &i8k_fops);
 	if (!proc_i8k)
 		return -ENOENT;
 
-	proc_i8k->proc_fops = &i8k_fops;
-	proc_i8k->owner = THIS_MODULE;
-
 	printk(KERN_INFO
 	       "Dell laptop SMM driver v%s Massimo Dal Zotto (dz@debian.org)\n",
 	       I8K_VERSION);
diff --git a/drivers/char/ip2/ip2main.c b/drivers/char/ip2/ip2main.c
index b1d6cad..0a61856 100644
--- a/drivers/char/ip2/ip2main.c
+++ b/drivers/char/ip2/ip2main.c
@@ -133,8 +133,9 @@
  *****************/
 
 #include <linux/proc_fs.h>
+#include <linux/seq_file.h>
 
-static int ip2_read_procmem(char *, char **, off_t, int);
+static const struct file_operations ip2mem_proc_fops;
 static int ip2_read_proc(char *, char **, off_t, int, int *, void * );
 
 /********************/
@@ -423,7 +424,7 @@
 	}
 	put_tty_driver(ip2_tty_driver);
 	unregister_chrdev(IP2_IPL_MAJOR, pcIpl);
-	remove_proc_entry("ip2mem", &proc_root);
+	remove_proc_entry("ip2mem", NULL);
 
 	// free memory
 	for (i = 0; i < IP2_MAX_BOARDS; i++) {
@@ -695,7 +696,7 @@
 		}
 	}
 	/* Register the read_procmem thing */
-	if (!create_proc_info_entry("ip2mem",0,&proc_root,ip2_read_procmem)) {
+	if (!proc_create("ip2mem",0,NULL,&ip2mem_proc_fops)) {
 		printk(KERN_ERR "IP2: failed to register read_procmem\n");
 	} else {
 
@@ -2967,65 +2968,61 @@
 	}
 	return 0;
 }
-/******************************************************************************/
-/* Function:   ip2_read_procmem                                               */
-/* Parameters:                                                                */
-/*                                                                            */
-/* Returns: Length of output                                                  */
-/*                                                                            */
-/* Description:                                                               */
-/*   Supplies some driver operating parameters                                */
-/*	Not real useful unless your debugging the fifo							  */
-/*                                                                            */
-/******************************************************************************/
-
-#define LIMIT  (PAGE_SIZE - 120)
 
 static int
-ip2_read_procmem(char *buf, char **start, off_t offset, int len)
+proc_ip2mem_show(struct seq_file *m, void *v)
 {
 	i2eBordStrPtr  pB;
 	i2ChanStrPtr  pCh;
 	PTTY tty;
 	int i;
 
-	len = 0;
-
 #define FMTLINE	"%3d: 0x%08x 0x%08x 0%011o 0%011o\n"
 #define FMTLIN2	"     0x%04x 0x%04x tx flow 0x%x\n"
 #define FMTLIN3	"     0x%04x 0x%04x rc flow\n"
 
-	len += sprintf(buf+len,"\n");
+	seq_printf(m,"\n");
 
 	for( i = 0; i < IP2_MAX_BOARDS; ++i ) {
 		pB = i2BoardPtrTable[i];
 		if ( pB ) {
-			len += sprintf(buf+len,"board %d:\n",i);
-			len += sprintf(buf+len,"\tFifo rem: %d mty: %x outM %x\n",
+			seq_printf(m,"board %d:\n",i);
+			seq_printf(m,"\tFifo rem: %d mty: %x outM %x\n",
 				pB->i2eFifoRemains,pB->i2eWaitingForEmptyFifo,pB->i2eOutMailWaiting);
 		}
 	}
 
-	len += sprintf(buf+len,"#: tty flags, port flags,     cflags,     iflags\n");
+	seq_printf(m,"#: tty flags, port flags,     cflags,     iflags\n");
 	for (i=0; i < IP2_MAX_PORTS; i++) {
-		if (len > LIMIT)
-			break;
 		pCh = DevTable[i];
 		if (pCh) {
 			tty = pCh->pTTY;
 			if (tty && tty->count) {
-				len += sprintf(buf+len,FMTLINE,i,(int)tty->flags,pCh->flags,
+				seq_printf(m,FMTLINE,i,(int)tty->flags,pCh->flags,
 									tty->termios->c_cflag,tty->termios->c_iflag);
 
-				len += sprintf(buf+len,FMTLIN2,
+				seq_printf(m,FMTLIN2,
 						pCh->outfl.asof,pCh->outfl.room,pCh->channelNeeds);
-				len += sprintf(buf+len,FMTLIN3,pCh->infl.asof,pCh->infl.room);
+				seq_printf(m,FMTLIN3,pCh->infl.asof,pCh->infl.room);
 			}
 		}
 	}
-	return len;
+	return 0;
 }
 
+static int proc_ip2mem_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, proc_ip2mem_show, NULL);
+}
+
+static const struct file_operations ip2mem_proc_fops = {
+	.owner		= THIS_MODULE,
+	.open		= proc_ip2mem_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+
 /*
  * This is the handler for /proc/tty/driver/ip2
  *
diff --git a/drivers/char/ipmi/Makefile b/drivers/char/ipmi/Makefile
index 553f0a4..eb8a1a8 100644
--- a/drivers/char/ipmi/Makefile
+++ b/drivers/char/ipmi/Makefile
@@ -9,7 +9,3 @@
 obj-$(CONFIG_IPMI_SI) += ipmi_si.o
 obj-$(CONFIG_IPMI_WATCHDOG) += ipmi_watchdog.o
 obj-$(CONFIG_IPMI_POWEROFF) += ipmi_poweroff.o
-
-ipmi_si.o:	$(ipmi_si-objs)
-	$(LD) -r -o $@ $(ipmi_si-objs)
-
diff --git a/drivers/char/ipmi/ipmi_bt_sm.c b/drivers/char/ipmi/ipmi_bt_sm.c
index e736119..7b98c06 100644
--- a/drivers/char/ipmi/ipmi_bt_sm.c
+++ b/drivers/char/ipmi/ipmi_bt_sm.c
@@ -37,26 +37,32 @@
 #define BT_DEBUG_ENABLE	1	/* Generic messages */
 #define BT_DEBUG_MSG	2	/* Prints all request/response buffers */
 #define BT_DEBUG_STATES	4	/* Verbose look at state changes */
-/* BT_DEBUG_OFF must be zero to correspond to the default uninitialized
-   value */
+/*
+ * BT_DEBUG_OFF must be zero to correspond to the default uninitialized
+ * value
+ */
 
 static int bt_debug; /* 0 == BT_DEBUG_OFF */
 
 module_param(bt_debug, int, 0644);
 MODULE_PARM_DESC(bt_debug, "debug bitmask, 1=enable, 2=messages, 4=states");
 
-/* Typical "Get BT Capabilities" values are 2-3 retries, 5-10 seconds,
-   and 64 byte buffers.  However, one HP implementation wants 255 bytes of
-   buffer (with a documented message of 160 bytes) so go for the max.
-   Since the Open IPMI architecture is single-message oriented at this
-   stage, the queue depth of BT is of no concern. */
+/*
+ * Typical "Get BT Capabilities" values are 2-3 retries, 5-10 seconds,
+ * and 64 byte buffers.  However, one HP implementation wants 255 bytes of
+ * buffer (with a documented message of 160 bytes) so go for the max.
+ * Since the Open IPMI architecture is single-message oriented at this
+ * stage, the queue depth of BT is of no concern.
+ */
 
 #define BT_NORMAL_TIMEOUT	5	/* seconds */
 #define BT_NORMAL_RETRY_LIMIT	2
 #define BT_RESET_DELAY		6	/* seconds after warm reset */
 
-/* States are written in chronological order and usually cover
-   multiple rows of the state table discussion in the IPMI spec. */
+/*
+ * States are written in chronological order and usually cover
+ * multiple rows of the state table discussion in the IPMI spec.
+ */
 
 enum bt_states {
 	BT_STATE_IDLE = 0,	/* Order is critical in this list */
@@ -76,10 +82,12 @@
 	BT_STATE_LONG_BUSY	/* BT doesn't get hosed :-) */
 };
 
-/* Macros seen at the end of state "case" blocks.  They help with legibility
-   and debugging. */
+/*
+ * Macros seen at the end of state "case" blocks.  They help with legibility
+ * and debugging.
+ */
 
-#define BT_STATE_CHANGE(X,Y) { bt->state = X; return Y; }
+#define BT_STATE_CHANGE(X, Y) { bt->state = X; return Y; }
 
 #define BT_SI_SM_RETURN(Y)   { last_printed = BT_STATE_PRINTME; return Y; }
 
@@ -110,11 +118,13 @@
 #define BT_H_BUSY	0x40
 #define BT_B_BUSY	0x80
 
-/* Some bits are toggled on each write: write once to set it, once
-   more to clear it; writing a zero does nothing.  To absolutely
-   clear it, check its state and write if set.  This avoids the "get
-   current then use as mask" scheme to modify one bit.  Note that the
-   variable "bt" is hardcoded into these macros. */
+/*
+ * Some bits are toggled on each write: write once to set it, once
+ * more to clear it; writing a zero does nothing.  To absolutely
+ * clear it, check its state and write if set.  This avoids the "get
+ * current then use as mask" scheme to modify one bit.  Note that the
+ * variable "bt" is hardcoded into these macros.
+ */
 
 #define BT_STATUS	bt->io->inputb(bt->io, 0)
 #define BT_CONTROL(x)	bt->io->outputb(bt->io, 0, x)
@@ -125,8 +135,10 @@
 #define BT_INTMASK_R	bt->io->inputb(bt->io, 2)
 #define BT_INTMASK_W(x)	bt->io->outputb(bt->io, 2, x)
 
-/* Convenience routines for debugging.  These are not multi-open safe!
-   Note the macros have hardcoded variables in them. */
+/*
+ * Convenience routines for debugging.  These are not multi-open safe!
+ * Note the macros have hardcoded variables in them.
+ */
 
 static char *state2txt(unsigned char state)
 {
@@ -182,7 +194,8 @@
 static unsigned int bt_init_data(struct si_sm_data *bt, struct si_sm_io *io)
 {
 	memset(bt, 0, sizeof(struct si_sm_data));
-	if (bt->io != io) {		/* external: one-time only things */
+	if (bt->io != io) {
+		/* external: one-time only things */
 		bt->io = io;
 		bt->seq = 0;
 	}
@@ -229,7 +242,7 @@
 		printk(KERN_WARNING "BT: +++++++++++++++++ New command\n");
 		printk(KERN_WARNING "BT: NetFn/LUN CMD [%d data]:", size - 2);
 		for (i = 0; i < size; i ++)
-			printk (" %02x", data[i]);
+			printk(" %02x", data[i]);
 		printk("\n");
 	}
 	bt->write_data[0] = size + 1;	/* all data plus seq byte */
@@ -246,8 +259,10 @@
 	return 0;
 }
 
-/* After the upper state machine has been told SI_SM_TRANSACTION_COMPLETE
-   it calls this.  Strip out the length and seq bytes. */
+/*
+ * After the upper state machine has been told SI_SM_TRANSACTION_COMPLETE
+ * it calls this.  Strip out the length and seq bytes.
+ */
 
 static int bt_get_result(struct si_sm_data *bt,
 			 unsigned char *data,
@@ -269,10 +284,10 @@
 		memcpy(data + 2, bt->read_data + 4, msg_len - 2);
 
 	if (bt_debug & BT_DEBUG_MSG) {
-		printk (KERN_WARNING "BT: result %d bytes:", msg_len);
+		printk(KERN_WARNING "BT: result %d bytes:", msg_len);
 		for (i = 0; i < msg_len; i++)
 			printk(" %02x", data[i]);
-		printk ("\n");
+		printk("\n");
 	}
 	return msg_len;
 }
@@ -292,8 +307,10 @@
 	BT_INTMASK_W(BT_BMC_HWRST);
 }
 
-/* Get rid of an unwanted/stale response.  This should only be needed for
-   BMCs that support multiple outstanding requests. */
+/*
+ * Get rid of an unwanted/stale response.  This should only be needed for
+ * BMCs that support multiple outstanding requests.
+ */
 
 static void drain_BMC2HOST(struct si_sm_data *bt)
 {
@@ -326,8 +343,8 @@
 		printk(KERN_WARNING "BT: write %d bytes seq=0x%02X",
 			bt->write_count, bt->seq);
 		for (i = 0; i < bt->write_count; i++)
-			printk (" %02x", bt->write_data[i]);
-		printk ("\n");
+			printk(" %02x", bt->write_data[i]);
+		printk("\n");
 	}
 	for (i = 0; i < bt->write_count; i++)
 		HOST2BMC(bt->write_data[i]);
@@ -337,8 +354,10 @@
 {
 	unsigned char i;
 
-	/* length is "framing info", minimum = 4: NetFn, Seq, Cmd, cCode.
-	   Keep layout of first four bytes aligned with write_data[] */
+	/*
+	 * length is "framing info", minimum = 4: NetFn, Seq, Cmd, cCode.
+	 * Keep layout of first four bytes aligned with write_data[]
+	 */
 
 	bt->read_data[0] = BMC2HOST;
 	bt->read_count = bt->read_data[0];
@@ -362,8 +381,8 @@
 		if (max > 16)
 			max = 16;
 		for (i = 0; i < max; i++)
-			printk (" %02x", bt->read_data[i]);
-		printk ("%s\n", bt->read_count == max ? "" : " ...");
+			printk(KERN_CONT " %02x", bt->read_data[i]);
+		printk(KERN_CONT "%s\n", bt->read_count == max ? "" : " ...");
 	}
 
 	/* per the spec, the (NetFn[1], Seq[2], Cmd[3]) tuples must match */
@@ -402,8 +421,10 @@
 	printk(KERN_WARNING "IPMI BT: %s in %s %s ", 	/* open-ended line */
 		reason, STATE2TXT, STATUS2TXT);
 
-	/* Per the IPMI spec, retries are based on the sequence number
-	   known only to this module, so manage a restart here. */
+	/*
+	 * Per the IPMI spec, retries are based on the sequence number
+	 * known only to this module, so manage a restart here.
+	 */
 	(bt->error_retries)++;
 	if (bt->error_retries < bt->BT_CAP_retries) {
 		printk("%d retries left\n",
@@ -412,8 +433,8 @@
 		return SI_SM_CALL_WITHOUT_DELAY;
 	}
 
-	printk("failed %d retries, sending error response\n",
-		bt->BT_CAP_retries);
+	printk(KERN_WARNING "failed %d retries, sending error response\n",
+	       bt->BT_CAP_retries);
 	if (!bt->nonzero_status)
 		printk(KERN_ERR "IPMI BT: stuck, try power cycle\n");
 
@@ -424,8 +445,10 @@
 		return SI_SM_CALL_WITHOUT_DELAY;
 	}
 
-	/* Concoct a useful error message, set up the next state, and
-	   be done with this sequence. */
+	/*
+	 * Concoct a useful error message, set up the next state, and
+	 * be done with this sequence.
+	 */
 
 	bt->state = BT_STATE_IDLE;
 	switch (cCode) {
@@ -461,10 +484,12 @@
 		last_printed = bt->state;
 	}
 
-	/* Commands that time out may still (eventually) provide a response.
-	   This stale response will get in the way of a new response so remove
-	   it if possible (hopefully during IDLE).  Even if it comes up later
-	   it will be rejected by its (now-forgotten) seq number. */
+	/*
+	 * Commands that time out may still (eventually) provide a response.
+	 * This stale response will get in the way of a new response so remove
+	 * it if possible (hopefully during IDLE).  Even if it comes up later
+	 * it will be rejected by its (now-forgotten) seq number.
+	 */
 
 	if ((bt->state < BT_STATE_WRITE_BYTES) && (status & BT_B2H_ATN)) {
 		drain_BMC2HOST(bt);
@@ -472,7 +497,8 @@
 	}
 
 	if ((bt->state != BT_STATE_IDLE) &&
-	    (bt->state <  BT_STATE_PRINTME)) {		/* check timeout */
+	    (bt->state <  BT_STATE_PRINTME)) {
+		/* check timeout */
 		bt->timeout -= time;
 		if ((bt->timeout < 0) && (bt->state < BT_STATE_RESET1))
 			return error_recovery(bt,
@@ -482,8 +508,10 @@
 
 	switch (bt->state) {
 
-	/* Idle state first checks for asynchronous messages from another
-	   channel, then does some opportunistic housekeeping. */
+	/*
+	 * Idle state first checks for asynchronous messages from another
+	 * channel, then does some opportunistic housekeeping.
+	 */
 
 	case BT_STATE_IDLE:
 		if (status & BT_SMS_ATN) {
@@ -531,16 +559,19 @@
 			BT_SI_SM_RETURN(SI_SM_CALL_WITH_DELAY);
 		BT_CONTROL(BT_H_BUSY);		/* set */
 
-		/* Uncached, ordered writes should just proceeed serially but
-		   some BMCs don't clear B2H_ATN with one hit.  Fast-path a
-		   workaround without too much penalty to the general case. */
+		/*
+		 * Uncached, ordered writes should just proceeed serially but
+		 * some BMCs don't clear B2H_ATN with one hit.  Fast-path a
+		 * workaround without too much penalty to the general case.
+		 */
 
 		BT_CONTROL(BT_B2H_ATN);		/* clear it to ACK the BMC */
 		BT_STATE_CHANGE(BT_STATE_CLEAR_B2H,
 				SI_SM_CALL_WITHOUT_DELAY);
 
 	case BT_STATE_CLEAR_B2H:
-		if (status & BT_B2H_ATN) {	/* keep hitting it */
+		if (status & BT_B2H_ATN) {
+			/* keep hitting it */
 			BT_CONTROL(BT_B2H_ATN);
 			BT_SI_SM_RETURN(SI_SM_CALL_WITH_DELAY);
 		}
@@ -548,7 +579,8 @@
 				SI_SM_CALL_WITHOUT_DELAY);
 
 	case BT_STATE_READ_BYTES:
-		if (!(status & BT_H_BUSY))	/* check in case of retry */
+		if (!(status & BT_H_BUSY))
+			/* check in case of retry */
 			BT_CONTROL(BT_H_BUSY);
 		BT_CONTROL(BT_CLR_RD_PTR);	/* start of BMC2HOST buffer */
 		i = read_all_bytes(bt);		/* true == packet seq match */
@@ -599,8 +631,10 @@
 		BT_STATE_CHANGE(BT_STATE_XACTION_START,
 				SI_SM_CALL_WITH_DELAY);
 
-	/* Get BT Capabilities, using timing of upper level state machine.
-	   Set outreqs to prevent infinite loop on timeout. */
+	/*
+	 * Get BT Capabilities, using timing of upper level state machine.
+	 * Set outreqs to prevent infinite loop on timeout.
+	 */
 	case BT_STATE_CAPABILITIES_BEGIN:
 		bt->BT_CAP_outreqs = 1;
 		{
@@ -638,10 +672,12 @@
 
 static int bt_detect(struct si_sm_data *bt)
 {
-	/* It's impossible for the BT status and interrupt registers to be
-	   all 1's, (assuming a properly functioning, self-initialized BMC)
-	   but that's what you get from reading a bogus address, so we
-	   test that first.  The calling routine uses negative logic. */
+	/*
+	 * It's impossible for the BT status and interrupt registers to be
+	 * all 1's, (assuming a properly functioning, self-initialized BMC)
+	 * but that's what you get from reading a bogus address, so we
+	 * test that first.  The calling routine uses negative logic.
+	 */
 
 	if ((BT_STATUS == 0xFF) && (BT_INTMASK_R == 0xFF))
 		return 1;
@@ -658,8 +694,7 @@
 	return sizeof(struct si_sm_data);
 }
 
-struct si_sm_handlers bt_smi_handlers =
-{
+struct si_sm_handlers bt_smi_handlers = {
 	.init_data		= bt_init_data,
 	.start_transaction	= bt_start_transaction,
 	.get_result		= bt_get_result,
diff --git a/drivers/char/ipmi/ipmi_kcs_sm.c b/drivers/char/ipmi/ipmi_kcs_sm.c
index c1b8228..8070487 100644
--- a/drivers/char/ipmi/ipmi_kcs_sm.c
+++ b/drivers/char/ipmi/ipmi_kcs_sm.c
@@ -60,37 +60,58 @@
 
 /* The states the KCS driver may be in. */
 enum kcs_states {
-	KCS_IDLE,		/* The KCS interface is currently
-                                   doing nothing. */
-	KCS_START_OP,		/* We are starting an operation.  The
-				   data is in the output buffer, but
-				   nothing has been done to the
-				   interface yet.  This was added to
-				   the state machine in the spec to
-				   wait for the initial IBF. */
-	KCS_WAIT_WRITE_START,	/* We have written a write cmd to the
-				   interface. */
-	KCS_WAIT_WRITE,		/* We are writing bytes to the
-                                   interface. */
-	KCS_WAIT_WRITE_END,	/* We have written the write end cmd
-                                   to the interface, and still need to
-                                   write the last byte. */
-	KCS_WAIT_READ,		/* We are waiting to read data from
-				   the interface. */
-	KCS_ERROR0,		/* State to transition to the error
-				   handler, this was added to the
-				   state machine in the spec to be
-				   sure IBF was there. */
-	KCS_ERROR1,		/* First stage error handler, wait for
-                                   the interface to respond. */
-	KCS_ERROR2,		/* The abort cmd has been written,
-				   wait for the interface to
-				   respond. */
-	KCS_ERROR3,		/* We wrote some data to the
-				   interface, wait for it to switch to
-				   read mode. */
-	KCS_HOSED		/* The hardware failed to follow the
-				   state machine. */
+	/* The KCS interface is currently doing nothing. */
+	KCS_IDLE,
+
+	/*
+	 * We are starting an operation.  The data is in the output
+	 * buffer, but nothing has been done to the interface yet.  This
+	 * was added to the state machine in the spec to wait for the
+	 * initial IBF.
+	 */
+	KCS_START_OP,
+
+	/* We have written a write cmd to the interface. */
+	KCS_WAIT_WRITE_START,
+
+	/* We are writing bytes to the interface. */
+	KCS_WAIT_WRITE,
+
+	/*
+	 * We have written the write end cmd to the interface, and
+	 * still need to write the last byte.
+	 */
+	KCS_WAIT_WRITE_END,
+
+	/* We are waiting to read data from the interface. */
+	KCS_WAIT_READ,
+
+	/*
+	 * State to transition to the error handler, this was added to
+	 * the state machine in the spec to be sure IBF was there.
+	 */
+	KCS_ERROR0,
+
+	/*
+	 * First stage error handler, wait for the interface to
+	 * respond.
+	 */
+	KCS_ERROR1,
+
+	/*
+	 * The abort cmd has been written, wait for the interface to
+	 * respond.
+	 */
+	KCS_ERROR2,
+
+	/*
+	 * We wrote some data to the interface, wait for it to switch
+	 * to read mode.
+	 */
+	KCS_ERROR3,
+
+	/* The hardware failed to follow the state machine. */
+	KCS_HOSED
 };
 
 #define MAX_KCS_READ_SIZE IPMI_MAX_MSG_LENGTH
@@ -102,8 +123,7 @@
 #define MAX_ERROR_RETRIES 10
 #define ERROR0_OBF_WAIT_JIFFIES (2*HZ)
 
-struct si_sm_data
-{
+struct si_sm_data {
 	enum kcs_states  state;
 	struct si_sm_io *io;
 	unsigned char    write_data[MAX_KCS_WRITE_SIZE];
@@ -187,7 +207,8 @@
 	(kcs->error_retries)++;
 	if (kcs->error_retries > MAX_ERROR_RETRIES) {
 		if (kcs_debug & KCS_DEBUG_ENABLE)
-			printk(KERN_DEBUG "ipmi_kcs_sm: kcs hosed: %s\n", reason);
+			printk(KERN_DEBUG "ipmi_kcs_sm: kcs hosed: %s\n",
+			       reason);
 		kcs->state = KCS_HOSED;
 	} else {
 		kcs->error0_timeout = jiffies + ERROR0_OBF_WAIT_JIFFIES;
@@ -271,10 +292,9 @@
 
 	if (kcs_debug & KCS_DEBUG_MSG) {
 		printk(KERN_DEBUG "start_kcs_transaction -");
-		for (i = 0; i < size; i ++) {
+		for (i = 0; i < size; i++)
 			printk(" %02x", (unsigned char) (data [i]));
-		}
-		printk ("\n");
+		printk("\n");
 	}
 	kcs->error_retries = 0;
 	memcpy(kcs->write_data, data, size);
@@ -305,9 +325,11 @@
 		kcs->read_pos = 3;
 	}
 	if (kcs->truncated) {
-		/* Report a truncated error.  We might overwrite
-		   another error, but that's too bad, the user needs
-		   to know it was truncated. */
+		/*
+		 * Report a truncated error.  We might overwrite
+		 * another error, but that's too bad, the user needs
+		 * to know it was truncated.
+		 */
 		data[2] = IPMI_ERR_MSG_TRUNCATED;
 		kcs->truncated = 0;
 	}
@@ -315,9 +337,11 @@
 	return kcs->read_pos;
 }
 
-/* This implements the state machine defined in the IPMI manual, see
-   that for details on how this works.  Divide that flowchart into
-   sections delimited by "Wait for IBF" and this will become clear. */
+/*
+ * This implements the state machine defined in the IPMI manual, see
+ * that for details on how this works.  Divide that flowchart into
+ * sections delimited by "Wait for IBF" and this will become clear.
+ */
 static enum si_sm_result kcs_event(struct si_sm_data *kcs, long time)
 {
 	unsigned char status;
@@ -388,11 +412,12 @@
 			write_next_byte(kcs);
 		}
 		break;
-		
+
 	case KCS_WAIT_WRITE_END:
 		if (state != KCS_WRITE_STATE) {
 			start_error_recovery(kcs,
-					     "Not in write state for write end");
+					     "Not in write state"
+					     " for write end");
 			break;
 		}
 		clear_obf(kcs, status);
@@ -413,13 +438,15 @@
 				return SI_SM_CALL_WITH_DELAY;
 			read_next_byte(kcs);
 		} else {
-			/* We don't implement this exactly like the state
-			   machine in the spec.  Some broken hardware
-			   does not write the final dummy byte to the
-			   read register.  Thus obf will never go high
-			   here.  We just go straight to idle, and we
-			   handle clearing out obf in idle state if it
-			   happens to come in. */
+			/*
+			 * We don't implement this exactly like the state
+			 * machine in the spec.  Some broken hardware
+			 * does not write the final dummy byte to the
+			 * read register.  Thus obf will never go high
+			 * here.  We just go straight to idle, and we
+			 * handle clearing out obf in idle state if it
+			 * happens to come in.
+			 */
 			clear_obf(kcs, status);
 			kcs->orig_write_count = 0;
 			kcs->state = KCS_IDLE;
@@ -430,7 +457,8 @@
 	case KCS_ERROR0:
 		clear_obf(kcs, status);
 		status = read_status(kcs);
-		if  (GET_STATUS_OBF(status)) /* controller isn't responding */
+		if (GET_STATUS_OBF(status))
+			/* controller isn't responding */
 			if (time_before(jiffies, kcs->error0_timeout))
 				return SI_SM_CALL_WITH_TICK_DELAY;
 		write_cmd(kcs, KCS_GET_STATUS_ABORT);
@@ -442,7 +470,7 @@
 		write_data(kcs, 0);
 		kcs->state = KCS_ERROR2;
 		break;
-		
+
 	case KCS_ERROR2:
 		if (state != KCS_READ_STATE) {
 			start_error_recovery(kcs,
@@ -456,7 +484,7 @@
 		write_data(kcs, KCS_READ_BYTE);
 		kcs->state = KCS_ERROR3;
 		break;
-		
+
 	case KCS_ERROR3:
 		if (state != KCS_IDLE_STATE) {
 			start_error_recovery(kcs,
@@ -475,7 +503,7 @@
 			return SI_SM_TRANSACTION_COMPLETE;
 		}
 		break;
-			
+
 	case KCS_HOSED:
 		break;
 	}
@@ -495,10 +523,12 @@
 
 static int kcs_detect(struct si_sm_data *kcs)
 {
-	/* It's impossible for the KCS status register to be all 1's,
-	   (assuming a properly functioning, self-initialized BMC)
-	   but that's what you get from reading a bogus address, so we
-	   test that first. */
+	/*
+	 * It's impossible for the KCS status register to be all 1's,
+	 * (assuming a properly functioning, self-initialized BMC)
+	 * but that's what you get from reading a bogus address, so we
+	 * test that first.
+	 */
 	if (read_status(kcs) == 0xff)
 		return 1;
 
@@ -509,8 +539,7 @@
 {
 }
 
-struct si_sm_handlers kcs_smi_handlers =
-{
+struct si_sm_handlers kcs_smi_handlers = {
 	.init_data         = init_kcs_data,
 	.start_transaction = start_kcs_transaction,
 	.get_result        = get_kcs_result,
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index 32b2b22..8a59aaa 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -47,7 +47,7 @@
 
 #define PFX "IPMI message handler: "
 
-#define IPMI_DRIVER_VERSION "39.1"
+#define IPMI_DRIVER_VERSION "39.2"
 
 static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void);
 static int ipmi_init_msghandler(void);
@@ -63,16 +63,16 @@
 
 #define MAX_EVENTS_IN_QUEUE	25
 
-/* Don't let a message sit in a queue forever, always time it with at lest
-   the max message timer.  This is in milliseconds. */
+/*
+ * Don't let a message sit in a queue forever, always time it with at lest
+ * the max message timer.  This is in milliseconds.
+ */
 #define MAX_MSG_TIMEOUT		60000
 
-
 /*
  * The main "user" data structure.
  */
-struct ipmi_user
-{
+struct ipmi_user {
 	struct list_head link;
 
 	/* Set to "0" when the user is destroyed. */
@@ -91,8 +91,7 @@
 	int gets_events;
 };
 
-struct cmd_rcvr
-{
+struct cmd_rcvr {
 	struct list_head link;
 
 	ipmi_user_t   user;
@@ -106,12 +105,12 @@
 	 * or change any data until the RCU period completes.  So we
 	 * use this next variable during mass deletion so we can have
 	 * a list and don't have to wait and restart the search on
-	 * every individual deletion of a command. */
+	 * every individual deletion of a command.
+	 */
 	struct cmd_rcvr *next;
 };
 
-struct seq_table
-{
+struct seq_table {
 	unsigned int         inuse : 1;
 	unsigned int         broadcast : 1;
 
@@ -119,53 +118,60 @@
 	unsigned long        orig_timeout;
 	unsigned int         retries_left;
 
-	/* To verify on an incoming send message response that this is
-           the message that the response is for, we keep a sequence id
-           and increment it every time we send a message. */
+	/*
+	 * To verify on an incoming send message response that this is
+	 * the message that the response is for, we keep a sequence id
+	 * and increment it every time we send a message.
+	 */
 	long                 seqid;
 
-	/* This is held so we can properly respond to the message on a
-           timeout, and it is used to hold the temporary data for
-           retransmission, too. */
+	/*
+	 * This is held so we can properly respond to the message on a
+	 * timeout, and it is used to hold the temporary data for
+	 * retransmission, too.
+	 */
 	struct ipmi_recv_msg *recv_msg;
 };
 
-/* Store the information in a msgid (long) to allow us to find a
-   sequence table entry from the msgid. */
+/*
+ * Store the information in a msgid (long) to allow us to find a
+ * sequence table entry from the msgid.
+ */
 #define STORE_SEQ_IN_MSGID(seq, seqid) (((seq&0xff)<<26) | (seqid&0x3ffffff))
 
 #define GET_SEQ_FROM_MSGID(msgid, seq, seqid) \
 	do {								\
 		seq = ((msgid >> 26) & 0x3f);				\
 		seqid = (msgid & 0x3fffff);				\
-        } while (0)
+	} while (0)
 
 #define NEXT_SEQID(seqid) (((seqid) + 1) & 0x3fffff)
 
-struct ipmi_channel
-{
+struct ipmi_channel {
 	unsigned char medium;
 	unsigned char protocol;
 
-	/* My slave address.  This is initialized to IPMI_BMC_SLAVE_ADDR,
-	   but may be changed by the user. */
+	/*
+	 * My slave address.  This is initialized to IPMI_BMC_SLAVE_ADDR,
+	 * but may be changed by the user.
+	 */
 	unsigned char address;
 
-	/* My LUN.  This should generally stay the SMS LUN, but just in
-	   case... */
+	/*
+	 * My LUN.  This should generally stay the SMS LUN, but just in
+	 * case...
+	 */
 	unsigned char lun;
 };
 
 #ifdef CONFIG_PROC_FS
-struct ipmi_proc_entry
-{
+struct ipmi_proc_entry {
 	char                   *name;
 	struct ipmi_proc_entry *next;
 };
 #endif
 
-struct bmc_device
-{
+struct bmc_device {
 	struct platform_device *dev;
 	struct ipmi_device_id  id;
 	unsigned char          guid[16];
@@ -186,10 +192,108 @@
 	struct device_attribute aux_firmware_rev_attr;
 };
 
+/*
+ * Various statistics for IPMI, these index stats[] in the ipmi_smi
+ * structure.
+ */
+enum ipmi_stat_indexes {
+	/* Commands we got from the user that were invalid. */
+	IPMI_STAT_sent_invalid_commands = 0,
+
+	/* Commands we sent to the MC. */
+	IPMI_STAT_sent_local_commands,
+
+	/* Responses from the MC that were delivered to a user. */
+	IPMI_STAT_handled_local_responses,
+
+	/* Responses from the MC that were not delivered to a user. */
+	IPMI_STAT_unhandled_local_responses,
+
+	/* Commands we sent out to the IPMB bus. */
+	IPMI_STAT_sent_ipmb_commands,
+
+	/* Commands sent on the IPMB that had errors on the SEND CMD */
+	IPMI_STAT_sent_ipmb_command_errs,
+
+	/* Each retransmit increments this count. */
+	IPMI_STAT_retransmitted_ipmb_commands,
+
+	/*
+	 * When a message times out (runs out of retransmits) this is
+	 * incremented.
+	 */
+	IPMI_STAT_timed_out_ipmb_commands,
+
+	/*
+	 * This is like above, but for broadcasts.  Broadcasts are
+	 * *not* included in the above count (they are expected to
+	 * time out).
+	 */
+	IPMI_STAT_timed_out_ipmb_broadcasts,
+
+	/* Responses I have sent to the IPMB bus. */
+	IPMI_STAT_sent_ipmb_responses,
+
+	/* The response was delivered to the user. */
+	IPMI_STAT_handled_ipmb_responses,
+
+	/* The response had invalid data in it. */
+	IPMI_STAT_invalid_ipmb_responses,
+
+	/* The response didn't have anyone waiting for it. */
+	IPMI_STAT_unhandled_ipmb_responses,
+
+	/* Commands we sent out to the IPMB bus. */
+	IPMI_STAT_sent_lan_commands,
+
+	/* Commands sent on the IPMB that had errors on the SEND CMD */
+	IPMI_STAT_sent_lan_command_errs,
+
+	/* Each retransmit increments this count. */
+	IPMI_STAT_retransmitted_lan_commands,
+
+	/*
+	 * When a message times out (runs out of retransmits) this is
+	 * incremented.
+	 */
+	IPMI_STAT_timed_out_lan_commands,
+
+	/* Responses I have sent to the IPMB bus. */
+	IPMI_STAT_sent_lan_responses,
+
+	/* The response was delivered to the user. */
+	IPMI_STAT_handled_lan_responses,
+
+	/* The response had invalid data in it. */
+	IPMI_STAT_invalid_lan_responses,
+
+	/* The response didn't have anyone waiting for it. */
+	IPMI_STAT_unhandled_lan_responses,
+
+	/* The command was delivered to the user. */
+	IPMI_STAT_handled_commands,
+
+	/* The command had invalid data in it. */
+	IPMI_STAT_invalid_commands,
+
+	/* The command didn't have anyone waiting for it. */
+	IPMI_STAT_unhandled_commands,
+
+	/* Invalid data in an event. */
+	IPMI_STAT_invalid_events,
+
+	/* Events that were received with the proper format. */
+	IPMI_STAT_events,
+
+
+	/* This *must* remain last, add new values above this. */
+	IPMI_NUM_STATS
+};
+
+
 #define IPMI_IPMB_NUM_SEQ	64
 #define IPMI_MAX_CHANNELS       16
-struct ipmi_smi
-{
+struct ipmi_smi {
 	/* What interface number are we? */
 	int intf_num;
 
@@ -198,8 +302,10 @@
 	/* Used for a list of interfaces. */
 	struct list_head link;
 
-	/* The list of upper layers that are using me.  seq_lock
-	 * protects this. */
+	/*
+	 * The list of upper layers that are using me.  seq_lock
+	 * protects this.
+	 */
 	struct list_head users;
 
 	/* Information to supply to users. */
@@ -213,10 +319,12 @@
 	char *my_dev_name;
 	char *sysfs_name;
 
-	/* This is the lower-layer's sender routine.  Note that you
+	/*
+	 * This is the lower-layer's sender routine.  Note that you
 	 * must either be holding the ipmi_interfaces_mutex or be in
 	 * an umpreemptible region to use this.  You must fetch the
-	 * value into a local variable and make sure it is not NULL. */
+	 * value into a local variable and make sure it is not NULL.
+	 */
 	struct ipmi_smi_handlers *handlers;
 	void                     *send_info;
 
@@ -229,34 +337,45 @@
 	/* Driver-model device for the system interface. */
 	struct device          *si_dev;
 
-	/* A table of sequence numbers for this interface.  We use the
-           sequence numbers for IPMB messages that go out of the
-           interface to match them up with their responses.  A routine
-           is called periodically to time the items in this list. */
+	/*
+	 * A table of sequence numbers for this interface.  We use the
+	 * sequence numbers for IPMB messages that go out of the
+	 * interface to match them up with their responses.  A routine
+	 * is called periodically to time the items in this list.
+	 */
 	spinlock_t       seq_lock;
 	struct seq_table seq_table[IPMI_IPMB_NUM_SEQ];
 	int curr_seq;
 
-	/* Messages that were delayed for some reason (out of memory,
-           for instance), will go in here to be processed later in a
-           periodic timer interrupt. */
+	/*
+	 * Messages that were delayed for some reason (out of memory,
+	 * for instance), will go in here to be processed later in a
+	 * periodic timer interrupt.
+	 */
 	spinlock_t       waiting_msgs_lock;
 	struct list_head waiting_msgs;
 
-	/* The list of command receivers that are registered for commands
-	   on this interface. */
+	/*
+	 * The list of command receivers that are registered for commands
+	 * on this interface.
+	 */
 	struct mutex     cmd_rcvrs_mutex;
 	struct list_head cmd_rcvrs;
 
-	/* Events that were queues because no one was there to receive
-           them. */
+	/*
+	 * Events that were queues because no one was there to receive
+	 * them.
+	 */
 	spinlock_t       events_lock; /* For dealing with event stuff. */
 	struct list_head waiting_events;
 	unsigned int     waiting_events_count; /* How many events in queue? */
-	int              delivering_events;
+	char             delivering_events;
+	char             event_msg_printed;
 
-	/* The event receiver for my BMC, only really used at panic
-	   shutdown as a place to store this. */
+	/*
+	 * The event receiver for my BMC, only really used at panic
+	 * shutdown as a place to store this.
+	 */
 	unsigned char event_receiver;
 	unsigned char event_receiver_lun;
 	unsigned char local_sel_device;
@@ -268,14 +387,18 @@
 	int auto_maintenance_timeout;
 	spinlock_t maintenance_mode_lock; /* Used in a timer... */
 
-	/* A cheap hack, if this is non-null and a message to an
-	   interface comes in with a NULL user, call this routine with
-	   it.  Note that the message will still be freed by the
-	   caller.  This only works on the system interface. */
+	/*
+	 * A cheap hack, if this is non-null and a message to an
+	 * interface comes in with a NULL user, call this routine with
+	 * it.  Note that the message will still be freed by the
+	 * caller.  This only works on the system interface.
+	 */
 	void (*null_user_handler)(ipmi_smi_t intf, struct ipmi_recv_msg *msg);
 
-	/* When we are scanning the channels for an SMI, this will
-	   tell which channel we are scanning. */
+	/*
+	 * When we are scanning the channels for an SMI, this will
+	 * tell which channel we are scanning.
+	 */
 	int curr_channel;
 
 	/* Channel information */
@@ -285,74 +408,14 @@
 	struct proc_dir_entry *proc_dir;
 	char                  proc_dir_name[10];
 
-	spinlock_t   counter_lock; /* For making counters atomic. */
+	atomic_t stats[IPMI_NUM_STATS];
 
-	/* Commands we got that were invalid. */
-	unsigned int sent_invalid_commands;
-
-	/* Commands we sent to the MC. */
-	unsigned int sent_local_commands;
-	/* Responses from the MC that were delivered to a user. */
-	unsigned int handled_local_responses;
-	/* Responses from the MC that were not delivered to a user. */
-	unsigned int unhandled_local_responses;
-
-	/* Commands we sent out to the IPMB bus. */
-	unsigned int sent_ipmb_commands;
-	/* Commands sent on the IPMB that had errors on the SEND CMD */
-	unsigned int sent_ipmb_command_errs;
-	/* Each retransmit increments this count. */
-	unsigned int retransmitted_ipmb_commands;
-	/* When a message times out (runs out of retransmits) this is
-           incremented. */
-	unsigned int timed_out_ipmb_commands;
-
-	/* This is like above, but for broadcasts.  Broadcasts are
-           *not* included in the above count (they are expected to
-           time out). */
-	unsigned int timed_out_ipmb_broadcasts;
-
-	/* Responses I have sent to the IPMB bus. */
-	unsigned int sent_ipmb_responses;
-
-	/* The response was delivered to the user. */
-	unsigned int handled_ipmb_responses;
-	/* The response had invalid data in it. */
-	unsigned int invalid_ipmb_responses;
-	/* The response didn't have anyone waiting for it. */
-	unsigned int unhandled_ipmb_responses;
-
-	/* Commands we sent out to the IPMB bus. */
-	unsigned int sent_lan_commands;
-	/* Commands sent on the IPMB that had errors on the SEND CMD */
-	unsigned int sent_lan_command_errs;
-	/* Each retransmit increments this count. */
-	unsigned int retransmitted_lan_commands;
-	/* When a message times out (runs out of retransmits) this is
-           incremented. */
-	unsigned int timed_out_lan_commands;
-
-	/* Responses I have sent to the IPMB bus. */
-	unsigned int sent_lan_responses;
-
-	/* The response was delivered to the user. */
-	unsigned int handled_lan_responses;
-	/* The response had invalid data in it. */
-	unsigned int invalid_lan_responses;
-	/* The response didn't have anyone waiting for it. */
-	unsigned int unhandled_lan_responses;
-
-	/* The command was delivered to the user. */
-	unsigned int handled_commands;
-	/* The command had invalid data in it. */
-	unsigned int invalid_commands;
-	/* The command didn't have anyone waiting for it. */
-	unsigned int unhandled_commands;
-
-	/* Invalid data in an event. */
-	unsigned int invalid_events;
-	/* Events that were received with the proper format. */
-	unsigned int events;
+	/*
+	 * run_to_completion duplicate of smb_info, smi_info
+	 * and ipmi_serial_info structures. Used to decrease numbers of
+	 * parameters passed by "low" level IPMI code.
+	 */
+	int run_to_completion;
 };
 #define to_si_intf_from_dev(device) container_of(device, struct ipmi_smi, dev)
 
@@ -368,12 +431,19 @@
 static LIST_HEAD(ipmi_interfaces);
 static DEFINE_MUTEX(ipmi_interfaces_mutex);
 
-/* List of watchers that want to know when smi's are added and
-   deleted. */
+/*
+ * List of watchers that want to know when smi's are added and deleted.
+ */
 static LIST_HEAD(smi_watchers);
 static DEFINE_MUTEX(smi_watchers_mutex);
 
 
+#define ipmi_inc_stat(intf, stat) \
+	atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
+#define ipmi_get_stat(intf, stat) \
+	((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
+
+
 static void free_recv_msg_list(struct list_head *q)
 {
 	struct ipmi_recv_msg *msg, *msg2;
@@ -417,10 +487,8 @@
 
 	for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) {
 		if ((intf->seq_table[i].inuse)
-		    && (intf->seq_table[i].recv_msg))
-		{
+					&& (intf->seq_table[i].recv_msg))
 			ipmi_free_recv_msg(intf->seq_table[i].recv_msg);
-		}
 	}
 }
 
@@ -487,6 +555,7 @@
 	}
 	return -ENOMEM;
 }
+EXPORT_SYMBOL(ipmi_smi_watcher_register);
 
 int ipmi_smi_watcher_unregister(struct ipmi_smi_watcher *watcher)
 {
@@ -495,6 +564,7 @@
 	mutex_unlock(&smi_watchers_mutex);
 	return 0;
 }
+EXPORT_SYMBOL(ipmi_smi_watcher_unregister);
 
 /*
  * Must be called with smi_watchers_mutex held.
@@ -530,8 +600,7 @@
 	}
 
 	if ((addr1->addr_type == IPMI_IPMB_ADDR_TYPE)
-	    || (addr1->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE))
-	{
+	    || (addr1->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE)) {
 		struct ipmi_ipmb_addr *ipmb_addr1
 		    = (struct ipmi_ipmb_addr *) addr1;
 		struct ipmi_ipmb_addr *ipmb_addr2
@@ -559,9 +628,8 @@
 
 int ipmi_validate_addr(struct ipmi_addr *addr, int len)
 {
-	if (len < sizeof(struct ipmi_system_interface_addr)) {
+	if (len < sizeof(struct ipmi_system_interface_addr))
 		return -EINVAL;
-	}
 
 	if (addr->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) {
 		if (addr->channel != IPMI_BMC_CHANNEL)
@@ -575,23 +643,21 @@
 		return -EINVAL;
 
 	if ((addr->addr_type == IPMI_IPMB_ADDR_TYPE)
-	    || (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE))
-	{
-		if (len < sizeof(struct ipmi_ipmb_addr)) {
+	    || (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE)) {
+		if (len < sizeof(struct ipmi_ipmb_addr))
 			return -EINVAL;
-		}
 		return 0;
 	}
 
 	if (addr->addr_type == IPMI_LAN_ADDR_TYPE) {
-		if (len < sizeof(struct ipmi_lan_addr)) {
+		if (len < sizeof(struct ipmi_lan_addr))
 			return -EINVAL;
-		}
 		return 0;
 	}
 
 	return -EINVAL;
 }
+EXPORT_SYMBOL(ipmi_validate_addr);
 
 unsigned int ipmi_addr_length(int addr_type)
 {
@@ -599,34 +665,28 @@
 		return sizeof(struct ipmi_system_interface_addr);
 
 	if ((addr_type == IPMI_IPMB_ADDR_TYPE)
-	    || (addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE))
-	{
+			|| (addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE))
 		return sizeof(struct ipmi_ipmb_addr);
-	}
 
 	if (addr_type == IPMI_LAN_ADDR_TYPE)
 		return sizeof(struct ipmi_lan_addr);
 
 	return 0;
 }
+EXPORT_SYMBOL(ipmi_addr_length);
 
 static void deliver_response(struct ipmi_recv_msg *msg)
 {
 	if (!msg->user) {
 		ipmi_smi_t    intf = msg->user_msg_data;
-		unsigned long flags;
 
 		/* Special handling for NULL users. */
 		if (intf->null_user_handler) {
 			intf->null_user_handler(intf, msg);
-			spin_lock_irqsave(&intf->counter_lock, flags);
-			intf->handled_local_responses++;
-			spin_unlock_irqrestore(&intf->counter_lock, flags);
+			ipmi_inc_stat(intf, handled_local_responses);
 		} else {
 			/* No handler, so give up. */
-			spin_lock_irqsave(&intf->counter_lock, flags);
-			intf->unhandled_local_responses++;
-			spin_unlock_irqrestore(&intf->counter_lock, flags);
+			ipmi_inc_stat(intf, unhandled_local_responses);
 		}
 		ipmi_free_recv_msg(msg);
 	} else {
@@ -646,9 +706,11 @@
 	deliver_response(msg);
 }
 
-/* Find the next sequence number not being used and add the given
-   message with the given timeout to the sequence table.  This must be
-   called with the interface's seq_lock held. */
+/*
+ * Find the next sequence number not being used and add the given
+ * message with the given timeout to the sequence table.  This must be
+ * called with the interface's seq_lock held.
+ */
 static int intf_next_seq(ipmi_smi_t           intf,
 			 struct ipmi_recv_msg *recv_msg,
 			 unsigned long        timeout,
@@ -660,10 +722,8 @@
 	int          rv = 0;
 	unsigned int i;
 
-	for (i = intf->curr_seq;
-	     (i+1)%IPMI_IPMB_NUM_SEQ != intf->curr_seq;
-	     i = (i+1)%IPMI_IPMB_NUM_SEQ)
-	{
+	for (i = intf->curr_seq; (i+1)%IPMI_IPMB_NUM_SEQ != intf->curr_seq;
+					i = (i+1)%IPMI_IPMB_NUM_SEQ) {
 		if (!intf->seq_table[i].inuse)
 			break;
 	}
@@ -671,8 +731,10 @@
 	if (!intf->seq_table[i].inuse) {
 		intf->seq_table[i].recv_msg = recv_msg;
 
-		/* Start with the maximum timeout, when the send response
-		   comes in we will start the real timer. */
+		/*
+		 * Start with the maximum timeout, when the send response
+		 * comes in we will start the real timer.
+		 */
 		intf->seq_table[i].timeout = MAX_MSG_TIMEOUT;
 		intf->seq_table[i].orig_timeout = timeout;
 		intf->seq_table[i].retries_left = retries;
@@ -685,15 +747,17 @@
 	} else {
 		rv = -EAGAIN;
 	}
-	
+
 	return rv;
 }
 
-/* Return the receive message for the given sequence number and
-   release the sequence number so it can be reused.  Some other data
-   is passed in to be sure the message matches up correctly (to help
-   guard against message coming in after their timeout and the
-   sequence number being reused). */
+/*
+ * Return the receive message for the given sequence number and
+ * release the sequence number so it can be reused.  Some other data
+ * is passed in to be sure the message matches up correctly (to help
+ * guard against message coming in after their timeout and the
+ * sequence number being reused).
+ */
 static int intf_find_seq(ipmi_smi_t           intf,
 			 unsigned char        seq,
 			 short                channel,
@@ -712,11 +776,9 @@
 	if (intf->seq_table[seq].inuse) {
 		struct ipmi_recv_msg *msg = intf->seq_table[seq].recv_msg;
 
-		if ((msg->addr.channel == channel)
-		    && (msg->msg.cmd == cmd)
-		    && (msg->msg.netfn == netfn)
-		    && (ipmi_addr_equal(addr, &(msg->addr))))
-		{
+		if ((msg->addr.channel == channel) && (msg->msg.cmd == cmd)
+				&& (msg->msg.netfn == netfn)
+				&& (ipmi_addr_equal(addr, &(msg->addr)))) {
 			*recv_msg = msg;
 			intf->seq_table[seq].inuse = 0;
 			rv = 0;
@@ -741,11 +803,12 @@
 	GET_SEQ_FROM_MSGID(msgid, seq, seqid);
 
 	spin_lock_irqsave(&(intf->seq_lock), flags);
-	/* We do this verification because the user can be deleted
-           while a message is outstanding. */
+	/*
+	 * We do this verification because the user can be deleted
+	 * while a message is outstanding.
+	 */
 	if ((intf->seq_table[seq].inuse)
-	    && (intf->seq_table[seq].seqid == seqid))
-	{
+				&& (intf->seq_table[seq].seqid == seqid)) {
 		struct seq_table *ent = &(intf->seq_table[seq]);
 		ent->timeout = ent->orig_timeout;
 		rv = 0;
@@ -770,11 +833,12 @@
 	GET_SEQ_FROM_MSGID(msgid, seq, seqid);
 
 	spin_lock_irqsave(&(intf->seq_lock), flags);
-	/* We do this verification because the user can be deleted
-           while a message is outstanding. */
+	/*
+	 * We do this verification because the user can be deleted
+	 * while a message is outstanding.
+	 */
 	if ((intf->seq_table[seq].inuse)
-	    && (intf->seq_table[seq].seqid == seqid))
-	{
+				&& (intf->seq_table[seq].seqid == seqid)) {
 		struct seq_table *ent = &(intf->seq_table[seq]);
 
 		ent->inuse = 0;
@@ -800,24 +864,30 @@
 	int           rv = 0;
 	ipmi_smi_t    intf;
 
-	/* There is no module usecount here, because it's not
-           required.  Since this can only be used by and called from
-           other modules, they will implicitly use this module, and
-           thus this can't be removed unless the other modules are
-           removed. */
+	/*
+	 * There is no module usecount here, because it's not
+	 * required.  Since this can only be used by and called from
+	 * other modules, they will implicitly use this module, and
+	 * thus this can't be removed unless the other modules are
+	 * removed.
+	 */
 
 	if (handler == NULL)
 		return -EINVAL;
 
-	/* Make sure the driver is actually initialized, this handles
-	   problems with initialization order. */
+	/*
+	 * Make sure the driver is actually initialized, this handles
+	 * problems with initialization order.
+	 */
 	if (!initialized) {
 		rv = ipmi_init_msghandler();
 		if (rv)
 			return rv;
 
-		/* The init code doesn't return an error if it was turned
-		   off, but it won't initialize.  Check that. */
+		/*
+		 * The init code doesn't return an error if it was turned
+		 * off, but it won't initialize.  Check that.
+		 */
 		if (!initialized)
 			return -ENODEV;
 	}
@@ -858,8 +928,10 @@
 		}
 	}
 
-	/* Hold the lock so intf->handlers is guaranteed to be good
-	 * until now */
+	/*
+	 * Hold the lock so intf->handlers is guaranteed to be good
+	 * until now
+	 */
 	mutex_unlock(&ipmi_interfaces_mutex);
 
 	new_user->valid = 1;
@@ -876,6 +948,7 @@
 	kfree(new_user);
 	return rv;
 }
+EXPORT_SYMBOL(ipmi_create_user);
 
 static void free_user(struct kref *ref)
 {
@@ -899,8 +972,7 @@
 
 	for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) {
 		if (intf->seq_table[i].inuse
-		    && (intf->seq_table[i].recv_msg->user == user))
-		{
+		    && (intf->seq_table[i].recv_msg->user == user)) {
 			intf->seq_table[i].inuse = 0;
 			ipmi_free_recv_msg(intf->seq_table[i].recv_msg);
 		}
@@ -943,6 +1015,7 @@
 
 	return 0;
 }
+EXPORT_SYMBOL(ipmi_destroy_user);
 
 void ipmi_get_version(ipmi_user_t   user,
 		      unsigned char *major,
@@ -951,6 +1024,7 @@
 	*major = user->intf->ipmi_version_major;
 	*minor = user->intf->ipmi_version_minor;
 }
+EXPORT_SYMBOL(ipmi_get_version);
 
 int ipmi_set_my_address(ipmi_user_t   user,
 			unsigned int  channel,
@@ -961,6 +1035,7 @@
 	user->intf->channels[channel].address = address;
 	return 0;
 }
+EXPORT_SYMBOL(ipmi_set_my_address);
 
 int ipmi_get_my_address(ipmi_user_t   user,
 			unsigned int  channel,
@@ -971,6 +1046,7 @@
 	*address = user->intf->channels[channel].address;
 	return 0;
 }
+EXPORT_SYMBOL(ipmi_get_my_address);
 
 int ipmi_set_my_LUN(ipmi_user_t   user,
 		    unsigned int  channel,
@@ -981,6 +1057,7 @@
 	user->intf->channels[channel].lun = LUN & 0x3;
 	return 0;
 }
+EXPORT_SYMBOL(ipmi_set_my_LUN);
 
 int ipmi_get_my_LUN(ipmi_user_t   user,
 		    unsigned int  channel,
@@ -991,6 +1068,7 @@
 	*address = user->intf->channels[channel].lun;
 	return 0;
 }
+EXPORT_SYMBOL(ipmi_get_my_LUN);
 
 int ipmi_get_maintenance_mode(ipmi_user_t user)
 {
@@ -1075,6 +1153,11 @@
 		list_for_each_entry_safe(msg, msg2, &intf->waiting_events, link)
 			list_move_tail(&msg->link, &msgs);
 		intf->waiting_events_count = 0;
+		if (intf->event_msg_printed) {
+			printk(KERN_WARNING PFX "Event queue no longer"
+			       " full\n");
+			intf->event_msg_printed = 0;
+		}
 
 		intf->delivering_events = 1;
 		spin_unlock_irqrestore(&intf->events_lock, flags);
@@ -1094,6 +1177,7 @@
 
 	return 0;
 }
+EXPORT_SYMBOL(ipmi_set_gets_events);
 
 static struct cmd_rcvr *find_cmd_rcvr(ipmi_smi_t    intf,
 				      unsigned char netfn,
@@ -1159,6 +1243,7 @@
 
 	return rv;
 }
+EXPORT_SYMBOL(ipmi_register_for_cmd);
 
 int ipmi_unregister_for_cmd(ipmi_user_t   user,
 			    unsigned char netfn,
@@ -1196,19 +1281,13 @@
 	}
 	return rv;
 }
-
-void ipmi_user_set_run_to_completion(ipmi_user_t user, int val)
-{
-	ipmi_smi_t intf = user->intf;
-	if (intf->handlers)
-		intf->handlers->set_run_to_completion(intf->send_info, val);
-}
+EXPORT_SYMBOL(ipmi_unregister_for_cmd);
 
 static unsigned char
 ipmb_checksum(unsigned char *data, int size)
 {
 	unsigned char csum = 0;
-	
+
 	for (; size > 0; size--, data++)
 		csum += *data;
 
@@ -1250,8 +1329,10 @@
 		= ipmb_checksum(&(smi_msg->data[i+6]),
 				smi_msg->data_size-6);
 
-	/* Add on the checksum size and the offset from the
-	   broadcast. */
+	/*
+	 * Add on the checksum size and the offset from the
+	 * broadcast.
+	 */
 	smi_msg->data_size += 1 + i;
 
 	smi_msg->msgid = msgid;
@@ -1287,17 +1368,21 @@
 		= ipmb_checksum(&(smi_msg->data[7]),
 				smi_msg->data_size-7);
 
-	/* Add on the checksum size and the offset from the
-	   broadcast. */
+	/*
+	 * Add on the checksum size and the offset from the
+	 * broadcast.
+	 */
 	smi_msg->data_size += 1;
 
 	smi_msg->msgid = msgid;
 }
 
-/* Separate from ipmi_request so that the user does not have to be
-   supplied in certain circumstances (mainly at panic time).  If
-   messages are supplied, they will be freed, even if an error
-   occurs. */
+/*
+ * Separate from ipmi_request so that the user does not have to be
+ * supplied in certain circumstances (mainly at panic time).  If
+ * messages are supplied, they will be freed, even if an error
+ * occurs.
+ */
 static int i_ipmi_request(ipmi_user_t          user,
 			  ipmi_smi_t           intf,
 			  struct ipmi_addr     *addr,
@@ -1319,19 +1404,18 @@
 	struct ipmi_smi_handlers *handlers;
 
 
-	if (supplied_recv) {
+	if (supplied_recv)
 		recv_msg = supplied_recv;
-	} else {
+	else {
 		recv_msg = ipmi_alloc_recv_msg();
-		if (recv_msg == NULL) {
+		if (recv_msg == NULL)
 			return -ENOMEM;
-		}
 	}
 	recv_msg->user_msg_data = user_msg_data;
 
-	if (supplied_smi) {
+	if (supplied_smi)
 		smi_msg = (struct ipmi_smi_msg *) supplied_smi;
-	} else {
+	else {
 		smi_msg = ipmi_alloc_smi_msg();
 		if (smi_msg == NULL) {
 			ipmi_free_recv_msg(recv_msg);
@@ -1350,8 +1434,10 @@
 	if (user)
 		kref_get(&user->refcount);
 	recv_msg->msgid = msgid;
-	/* Store the message to send in the receive message so timeout
-	   responses can get the proper response data. */
+	/*
+	 * Store the message to send in the receive message so timeout
+	 * responses can get the proper response data.
+	 */
 	recv_msg->msg = *msg;
 
 	if (addr->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) {
@@ -1365,9 +1451,7 @@
 
 		smi_addr = (struct ipmi_system_interface_addr *) addr;
 		if (smi_addr->lun > 3) {
-			spin_lock_irqsave(&intf->counter_lock, flags);
-			intf->sent_invalid_commands++;
-			spin_unlock_irqrestore(&intf->counter_lock, flags);
+			ipmi_inc_stat(intf, sent_invalid_commands);
 			rv = -EINVAL;
 			goto out_err;
 		}
@@ -1377,13 +1461,12 @@
 		if ((msg->netfn == IPMI_NETFN_APP_REQUEST)
 		    && ((msg->cmd == IPMI_SEND_MSG_CMD)
 			|| (msg->cmd == IPMI_GET_MSG_CMD)
-			|| (msg->cmd == IPMI_READ_EVENT_MSG_BUFFER_CMD)))
-		{
-			/* We don't let the user do these, since we manage
-			   the sequence numbers. */
-			spin_lock_irqsave(&intf->counter_lock, flags);
-			intf->sent_invalid_commands++;
-			spin_unlock_irqrestore(&intf->counter_lock, flags);
+			|| (msg->cmd == IPMI_READ_EVENT_MSG_BUFFER_CMD))) {
+			/*
+			 * We don't let the user do these, since we manage
+			 * the sequence numbers.
+			 */
+			ipmi_inc_stat(intf, sent_invalid_commands);
 			rv = -EINVAL;
 			goto out_err;
 		}
@@ -1391,14 +1474,12 @@
 		if (((msg->netfn == IPMI_NETFN_APP_REQUEST)
 		      && ((msg->cmd == IPMI_COLD_RESET_CMD)
 			  || (msg->cmd == IPMI_WARM_RESET_CMD)))
-		     || (msg->netfn == IPMI_NETFN_FIRMWARE_REQUEST))
-		{
+		     || (msg->netfn == IPMI_NETFN_FIRMWARE_REQUEST)) {
 			spin_lock_irqsave(&intf->maintenance_mode_lock, flags);
 			intf->auto_maintenance_timeout
 				= IPMI_MAINTENANCE_MODE_TIMEOUT;
 			if (!intf->maintenance_mode
-			    && !intf->maintenance_mode_enable)
-			{
+			    && !intf->maintenance_mode_enable) {
 				intf->maintenance_mode_enable = 1;
 				maintenance_mode_update(intf);
 			}
@@ -1407,9 +1488,7 @@
 		}
 
 		if ((msg->data_len + 2) > IPMI_MAX_MSG_LENGTH) {
-			spin_lock_irqsave(&intf->counter_lock, flags);
-			intf->sent_invalid_commands++;
-			spin_unlock_irqrestore(&intf->counter_lock, flags);
+			ipmi_inc_stat(intf, sent_invalid_commands);
 			rv = -EMSGSIZE;
 			goto out_err;
 		}
@@ -1421,31 +1500,23 @@
 		if (msg->data_len > 0)
 			memcpy(&(smi_msg->data[2]), msg->data, msg->data_len);
 		smi_msg->data_size = msg->data_len + 2;
-		spin_lock_irqsave(&intf->counter_lock, flags);
-		intf->sent_local_commands++;
-		spin_unlock_irqrestore(&intf->counter_lock, flags);
+		ipmi_inc_stat(intf, sent_local_commands);
 	} else if ((addr->addr_type == IPMI_IPMB_ADDR_TYPE)
-		   || (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE))
-	{
+		   || (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE)) {
 		struct ipmi_ipmb_addr *ipmb_addr;
 		unsigned char         ipmb_seq;
 		long                  seqid;
 		int                   broadcast = 0;
 
 		if (addr->channel >= IPMI_MAX_CHANNELS) {
-		        spin_lock_irqsave(&intf->counter_lock, flags);
-			intf->sent_invalid_commands++;
-			spin_unlock_irqrestore(&intf->counter_lock, flags);
+			ipmi_inc_stat(intf, sent_invalid_commands);
 			rv = -EINVAL;
 			goto out_err;
 		}
 
 		if (intf->channels[addr->channel].medium
-		    != IPMI_CHANNEL_MEDIUM_IPMB)
-		{
-			spin_lock_irqsave(&intf->counter_lock, flags);
-			intf->sent_invalid_commands++;
-			spin_unlock_irqrestore(&intf->counter_lock, flags);
+					!= IPMI_CHANNEL_MEDIUM_IPMB) {
+			ipmi_inc_stat(intf, sent_invalid_commands);
 			rv = -EINVAL;
 			goto out_err;
 		}
@@ -1457,9 +1528,11 @@
 			retries = 4;
 		}
 		if (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE) {
-		    /* Broadcasts add a zero at the beginning of the
-		       message, but otherwise is the same as an IPMB
-		       address. */
+		    /*
+		     * Broadcasts add a zero at the beginning of the
+		     * message, but otherwise is the same as an IPMB
+		     * address.
+		     */
 		    addr->addr_type = IPMI_IPMB_ADDR_TYPE;
 		    broadcast = 1;
 		}
@@ -1469,21 +1542,19 @@
 		if (retry_time_ms == 0)
 		    retry_time_ms = 1000;
 
-		/* 9 for the header and 1 for the checksum, plus
-                   possibly one for the broadcast. */
+		/*
+		 * 9 for the header and 1 for the checksum, plus
+		 * possibly one for the broadcast.
+		 */
 		if ((msg->data_len + 10 + broadcast) > IPMI_MAX_MSG_LENGTH) {
-			spin_lock_irqsave(&intf->counter_lock, flags);
-			intf->sent_invalid_commands++;
-			spin_unlock_irqrestore(&intf->counter_lock, flags);
+			ipmi_inc_stat(intf, sent_invalid_commands);
 			rv = -EMSGSIZE;
 			goto out_err;
 		}
 
 		ipmb_addr = (struct ipmi_ipmb_addr *) addr;
 		if (ipmb_addr->lun > 3) {
-			spin_lock_irqsave(&intf->counter_lock, flags);
-			intf->sent_invalid_commands++;
-			spin_unlock_irqrestore(&intf->counter_lock, flags);
+			ipmi_inc_stat(intf, sent_invalid_commands);
 			rv = -EINVAL;
 			goto out_err;
 		}
@@ -1491,29 +1562,31 @@
 		memcpy(&recv_msg->addr, ipmb_addr, sizeof(*ipmb_addr));
 
 		if (recv_msg->msg.netfn & 0x1) {
-			/* It's a response, so use the user's sequence
-                           from msgid. */
-			spin_lock_irqsave(&intf->counter_lock, flags);
-			intf->sent_ipmb_responses++;
-			spin_unlock_irqrestore(&intf->counter_lock, flags);
+			/*
+			 * It's a response, so use the user's sequence
+			 * from msgid.
+			 */
+			ipmi_inc_stat(intf, sent_ipmb_responses);
 			format_ipmb_msg(smi_msg, msg, ipmb_addr, msgid,
 					msgid, broadcast,
 					source_address, source_lun);
 
-			/* Save the receive message so we can use it
-			   to deliver the response. */
+			/*
+			 * Save the receive message so we can use it
+			 * to deliver the response.
+			 */
 			smi_msg->user_data = recv_msg;
 		} else {
 			/* It's a command, so get a sequence for it. */
 
 			spin_lock_irqsave(&(intf->seq_lock), flags);
 
-			spin_lock(&intf->counter_lock);
-			intf->sent_ipmb_commands++;
-			spin_unlock(&intf->counter_lock);
+			ipmi_inc_stat(intf, sent_ipmb_commands);
 
-			/* Create a sequence number with a 1 second
-                           timeout and 4 retries. */
+			/*
+			 * Create a sequence number with a 1 second
+			 * timeout and 4 retries.
+			 */
 			rv = intf_next_seq(intf,
 					   recv_msg,
 					   retry_time_ms,
@@ -1522,34 +1595,42 @@
 					   &ipmb_seq,
 					   &seqid);
 			if (rv) {
-				/* We have used up all the sequence numbers,
-				   probably, so abort. */
+				/*
+				 * We have used up all the sequence numbers,
+				 * probably, so abort.
+				 */
 				spin_unlock_irqrestore(&(intf->seq_lock),
 						       flags);
 				goto out_err;
 			}
 
-			/* Store the sequence number in the message,
-                           so that when the send message response
-                           comes back we can start the timer. */
+			/*
+			 * Store the sequence number in the message,
+			 * so that when the send message response
+			 * comes back we can start the timer.
+			 */
 			format_ipmb_msg(smi_msg, msg, ipmb_addr,
 					STORE_SEQ_IN_MSGID(ipmb_seq, seqid),
 					ipmb_seq, broadcast,
 					source_address, source_lun);
 
-			/* Copy the message into the recv message data, so we
-			   can retransmit it later if necessary. */
+			/*
+			 * Copy the message into the recv message data, so we
+			 * can retransmit it later if necessary.
+			 */
 			memcpy(recv_msg->msg_data, smi_msg->data,
 			       smi_msg->data_size);
 			recv_msg->msg.data = recv_msg->msg_data;
 			recv_msg->msg.data_len = smi_msg->data_size;
 
-			/* We don't unlock until here, because we need
-                           to copy the completed message into the
-                           recv_msg before we release the lock.
-                           Otherwise, race conditions may bite us.  I
-                           know that's pretty paranoid, but I prefer
-                           to be correct. */
+			/*
+			 * We don't unlock until here, because we need
+			 * to copy the completed message into the
+			 * recv_msg before we release the lock.
+			 * Otherwise, race conditions may bite us.  I
+			 * know that's pretty paranoid, but I prefer
+			 * to be correct.
+			 */
 			spin_unlock_irqrestore(&(intf->seq_lock), flags);
 		}
 	} else if (addr->addr_type == IPMI_LAN_ADDR_TYPE) {
@@ -1558,21 +1639,16 @@
 		long                  seqid;
 
 		if (addr->channel >= IPMI_MAX_CHANNELS) {
-			spin_lock_irqsave(&intf->counter_lock, flags);
-			intf->sent_invalid_commands++;
-			spin_unlock_irqrestore(&intf->counter_lock, flags);
+			ipmi_inc_stat(intf, sent_invalid_commands);
 			rv = -EINVAL;
 			goto out_err;
 		}
 
 		if ((intf->channels[addr->channel].medium
-		    != IPMI_CHANNEL_MEDIUM_8023LAN)
+				!= IPMI_CHANNEL_MEDIUM_8023LAN)
 		    && (intf->channels[addr->channel].medium
-			!= IPMI_CHANNEL_MEDIUM_ASYNC))
-		{
-			spin_lock_irqsave(&intf->counter_lock, flags);
-			intf->sent_invalid_commands++;
-			spin_unlock_irqrestore(&intf->counter_lock, flags);
+				!= IPMI_CHANNEL_MEDIUM_ASYNC)) {
+			ipmi_inc_stat(intf, sent_invalid_commands);
 			rv = -EINVAL;
 			goto out_err;
 		}
@@ -1585,18 +1661,14 @@
 
 		/* 11 for the header and 1 for the checksum. */
 		if ((msg->data_len + 12) > IPMI_MAX_MSG_LENGTH) {
-			spin_lock_irqsave(&intf->counter_lock, flags);
-			intf->sent_invalid_commands++;
-			spin_unlock_irqrestore(&intf->counter_lock, flags);
+			ipmi_inc_stat(intf, sent_invalid_commands);
 			rv = -EMSGSIZE;
 			goto out_err;
 		}
 
 		lan_addr = (struct ipmi_lan_addr *) addr;
 		if (lan_addr->lun > 3) {
-			spin_lock_irqsave(&intf->counter_lock, flags);
-			intf->sent_invalid_commands++;
-			spin_unlock_irqrestore(&intf->counter_lock, flags);
+			ipmi_inc_stat(intf, sent_invalid_commands);
 			rv = -EINVAL;
 			goto out_err;
 		}
@@ -1604,28 +1676,30 @@
 		memcpy(&recv_msg->addr, lan_addr, sizeof(*lan_addr));
 
 		if (recv_msg->msg.netfn & 0x1) {
-			/* It's a response, so use the user's sequence
-                           from msgid. */
-			spin_lock_irqsave(&intf->counter_lock, flags);
-			intf->sent_lan_responses++;
-			spin_unlock_irqrestore(&intf->counter_lock, flags);
+			/*
+			 * It's a response, so use the user's sequence
+			 * from msgid.
+			 */
+			ipmi_inc_stat(intf, sent_lan_responses);
 			format_lan_msg(smi_msg, msg, lan_addr, msgid,
 				       msgid, source_lun);
 
-			/* Save the receive message so we can use it
-			   to deliver the response. */
+			/*
+			 * Save the receive message so we can use it
+			 * to deliver the response.
+			 */
 			smi_msg->user_data = recv_msg;
 		} else {
 			/* It's a command, so get a sequence for it. */
 
 			spin_lock_irqsave(&(intf->seq_lock), flags);
 
-			spin_lock(&intf->counter_lock);
-			intf->sent_lan_commands++;
-			spin_unlock(&intf->counter_lock);
+			ipmi_inc_stat(intf, sent_lan_commands);
 
-			/* Create a sequence number with a 1 second
-                           timeout and 4 retries. */
+			/*
+			 * Create a sequence number with a 1 second
+			 * timeout and 4 retries.
+			 */
 			rv = intf_next_seq(intf,
 					   recv_msg,
 					   retry_time_ms,
@@ -1634,40 +1708,46 @@
 					   &ipmb_seq,
 					   &seqid);
 			if (rv) {
-				/* We have used up all the sequence numbers,
-				   probably, so abort. */
+				/*
+				 * We have used up all the sequence numbers,
+				 * probably, so abort.
+				 */
 				spin_unlock_irqrestore(&(intf->seq_lock),
 						       flags);
 				goto out_err;
 			}
 
-			/* Store the sequence number in the message,
-                           so that when the send message response
-                           comes back we can start the timer. */
+			/*
+			 * Store the sequence number in the message,
+			 * so that when the send message response
+			 * comes back we can start the timer.
+			 */
 			format_lan_msg(smi_msg, msg, lan_addr,
 				       STORE_SEQ_IN_MSGID(ipmb_seq, seqid),
 				       ipmb_seq, source_lun);
 
-			/* Copy the message into the recv message data, so we
-			   can retransmit it later if necessary. */
+			/*
+			 * Copy the message into the recv message data, so we
+			 * can retransmit it later if necessary.
+			 */
 			memcpy(recv_msg->msg_data, smi_msg->data,
 			       smi_msg->data_size);
 			recv_msg->msg.data = recv_msg->msg_data;
 			recv_msg->msg.data_len = smi_msg->data_size;
 
-			/* We don't unlock until here, because we need
-                           to copy the completed message into the
-                           recv_msg before we release the lock.
-                           Otherwise, race conditions may bite us.  I
-                           know that's pretty paranoid, but I prefer
-                           to be correct. */
+			/*
+			 * We don't unlock until here, because we need
+			 * to copy the completed message into the
+			 * recv_msg before we release the lock.
+			 * Otherwise, race conditions may bite us.  I
+			 * know that's pretty paranoid, but I prefer
+			 * to be correct.
+			 */
 			spin_unlock_irqrestore(&(intf->seq_lock), flags);
 		}
 	} else {
 	    /* Unknown address type. */
-		spin_lock_irqsave(&intf->counter_lock, flags);
-		intf->sent_invalid_commands++;
-		spin_unlock_irqrestore(&intf->counter_lock, flags);
+		ipmi_inc_stat(intf, sent_invalid_commands);
 		rv = -EINVAL;
 		goto out_err;
 	}
@@ -1735,6 +1815,7 @@
 			      retries,
 			      retry_time_ms);
 }
+EXPORT_SYMBOL(ipmi_request_settime);
 
 int ipmi_request_supply_msgs(ipmi_user_t          user,
 			     struct ipmi_addr     *addr,
@@ -1766,6 +1847,7 @@
 			      lun,
 			      -1, 0);
 }
+EXPORT_SYMBOL(ipmi_request_supply_msgs);
 
 #ifdef CONFIG_PROC_FS
 static int ipmb_file_read_proc(char *page, char **start, off_t off,
@@ -1790,7 +1872,7 @@
 	char       *out = (char *) page;
 	ipmi_smi_t intf = data;
 
-	return sprintf(out, "%d.%d\n",
+	return sprintf(out, "%u.%u\n",
 		       ipmi_version_major(&intf->bmc->id),
 		       ipmi_version_minor(&intf->bmc->id));
 }
@@ -1801,65 +1883,65 @@
 	char       *out = (char *) page;
 	ipmi_smi_t intf = data;
 
-	out += sprintf(out, "sent_invalid_commands:       %d\n",
-		       intf->sent_invalid_commands);
-	out += sprintf(out, "sent_local_commands:         %d\n",
-		       intf->sent_local_commands);
-	out += sprintf(out, "handled_local_responses:     %d\n",
-		       intf->handled_local_responses);
-	out += sprintf(out, "unhandled_local_responses:   %d\n",
-		       intf->unhandled_local_responses);
-	out += sprintf(out, "sent_ipmb_commands:          %d\n",
-		       intf->sent_ipmb_commands);
-	out += sprintf(out, "sent_ipmb_command_errs:      %d\n",
-		       intf->sent_ipmb_command_errs);
-	out += sprintf(out, "retransmitted_ipmb_commands: %d\n",
-		       intf->retransmitted_ipmb_commands);
-	out += sprintf(out, "timed_out_ipmb_commands:     %d\n",
-		       intf->timed_out_ipmb_commands);
-	out += sprintf(out, "timed_out_ipmb_broadcasts:   %d\n",
-		       intf->timed_out_ipmb_broadcasts);
-	out += sprintf(out, "sent_ipmb_responses:         %d\n",
-		       intf->sent_ipmb_responses);
-	out += sprintf(out, "handled_ipmb_responses:      %d\n",
-		       intf->handled_ipmb_responses);
-	out += sprintf(out, "invalid_ipmb_responses:      %d\n",
-		       intf->invalid_ipmb_responses);
-	out += sprintf(out, "unhandled_ipmb_responses:    %d\n",
-		       intf->unhandled_ipmb_responses);
-	out += sprintf(out, "sent_lan_commands:           %d\n",
-		       intf->sent_lan_commands);
-	out += sprintf(out, "sent_lan_command_errs:       %d\n",
-		       intf->sent_lan_command_errs);
-	out += sprintf(out, "retransmitted_lan_commands:  %d\n",
-		       intf->retransmitted_lan_commands);
-	out += sprintf(out, "timed_out_lan_commands:      %d\n",
-		       intf->timed_out_lan_commands);
-	out += sprintf(out, "sent_lan_responses:          %d\n",
-		       intf->sent_lan_responses);
-	out += sprintf(out, "handled_lan_responses:       %d\n",
-		       intf->handled_lan_responses);
-	out += sprintf(out, "invalid_lan_responses:       %d\n",
-		       intf->invalid_lan_responses);
-	out += sprintf(out, "unhandled_lan_responses:     %d\n",
-		       intf->unhandled_lan_responses);
-	out += sprintf(out, "handled_commands:            %d\n",
-		       intf->handled_commands);
-	out += sprintf(out, "invalid_commands:            %d\n",
-		       intf->invalid_commands);
-	out += sprintf(out, "unhandled_commands:          %d\n",
-		       intf->unhandled_commands);
-	out += sprintf(out, "invalid_events:              %d\n",
-		       intf->invalid_events);
-	out += sprintf(out, "events:                      %d\n",
-		       intf->events);
+	out += sprintf(out, "sent_invalid_commands:       %u\n",
+		       ipmi_get_stat(intf, sent_invalid_commands));
+	out += sprintf(out, "sent_local_commands:         %u\n",
+		       ipmi_get_stat(intf, sent_local_commands));
+	out += sprintf(out, "handled_local_responses:     %u\n",
+		       ipmi_get_stat(intf, handled_local_responses));
+	out += sprintf(out, "unhandled_local_responses:   %u\n",
+		       ipmi_get_stat(intf, unhandled_local_responses));
+	out += sprintf(out, "sent_ipmb_commands:          %u\n",
+		       ipmi_get_stat(intf, sent_ipmb_commands));
+	out += sprintf(out, "sent_ipmb_command_errs:      %u\n",
+		       ipmi_get_stat(intf, sent_ipmb_command_errs));
+	out += sprintf(out, "retransmitted_ipmb_commands: %u\n",
+		       ipmi_get_stat(intf, retransmitted_ipmb_commands));
+	out += sprintf(out, "timed_out_ipmb_commands:     %u\n",
+		       ipmi_get_stat(intf, timed_out_ipmb_commands));
+	out += sprintf(out, "timed_out_ipmb_broadcasts:   %u\n",
+		       ipmi_get_stat(intf, timed_out_ipmb_broadcasts));
+	out += sprintf(out, "sent_ipmb_responses:         %u\n",
+		       ipmi_get_stat(intf, sent_ipmb_responses));
+	out += sprintf(out, "handled_ipmb_responses:      %u\n",
+		       ipmi_get_stat(intf, handled_ipmb_responses));
+	out += sprintf(out, "invalid_ipmb_responses:      %u\n",
+		       ipmi_get_stat(intf, invalid_ipmb_responses));
+	out += sprintf(out, "unhandled_ipmb_responses:    %u\n",
+		       ipmi_get_stat(intf, unhandled_ipmb_responses));
+	out += sprintf(out, "sent_lan_commands:           %u\n",
+		       ipmi_get_stat(intf, sent_lan_commands));
+	out += sprintf(out, "sent_lan_command_errs:       %u\n",
+		       ipmi_get_stat(intf, sent_lan_command_errs));
+	out += sprintf(out, "retransmitted_lan_commands:  %u\n",
+		       ipmi_get_stat(intf, retransmitted_lan_commands));
+	out += sprintf(out, "timed_out_lan_commands:      %u\n",
+		       ipmi_get_stat(intf, timed_out_lan_commands));
+	out += sprintf(out, "sent_lan_responses:          %u\n",
+		       ipmi_get_stat(intf, sent_lan_responses));
+	out += sprintf(out, "handled_lan_responses:       %u\n",
+		       ipmi_get_stat(intf, handled_lan_responses));
+	out += sprintf(out, "invalid_lan_responses:       %u\n",
+		       ipmi_get_stat(intf, invalid_lan_responses));
+	out += sprintf(out, "unhandled_lan_responses:     %u\n",
+		       ipmi_get_stat(intf, unhandled_lan_responses));
+	out += sprintf(out, "handled_commands:            %u\n",
+		       ipmi_get_stat(intf, handled_commands));
+	out += sprintf(out, "invalid_commands:            %u\n",
+		       ipmi_get_stat(intf, invalid_commands));
+	out += sprintf(out, "unhandled_commands:          %u\n",
+		       ipmi_get_stat(intf, unhandled_commands));
+	out += sprintf(out, "invalid_events:              %u\n",
+		       ipmi_get_stat(intf, invalid_events));
+	out += sprintf(out, "events:                      %u\n",
+		       ipmi_get_stat(intf, events));
 
 	return (out - ((char *) page));
 }
 #endif /* CONFIG_PROC_FS */
 
 int ipmi_smi_add_proc_entry(ipmi_smi_t smi, char *name,
-			    read_proc_t *read_proc, write_proc_t *write_proc,
+			    read_proc_t *read_proc,
 			    void *data, struct module *owner)
 {
 	int                    rv = 0;
@@ -1886,7 +1968,6 @@
 	} else {
 		file->data = data;
 		file->read_proc = read_proc;
-		file->write_proc = write_proc;
 		file->owner = owner;
 
 		mutex_lock(&smi->proc_entry_lock);
@@ -1899,6 +1980,7 @@
 
 	return rv;
 }
+EXPORT_SYMBOL(ipmi_smi_add_proc_entry);
 
 static int add_proc_entries(ipmi_smi_t smi, int num)
 {
@@ -1909,23 +1991,22 @@
 	smi->proc_dir = proc_mkdir(smi->proc_dir_name, proc_ipmi_root);
 	if (!smi->proc_dir)
 		rv = -ENOMEM;
-	else {
+	else
 		smi->proc_dir->owner = THIS_MODULE;
-	}
 
 	if (rv == 0)
 		rv = ipmi_smi_add_proc_entry(smi, "stats",
-					     stat_file_read_proc, NULL,
+					     stat_file_read_proc,
 					     smi, THIS_MODULE);
 
 	if (rv == 0)
 		rv = ipmi_smi_add_proc_entry(smi, "ipmb",
-					     ipmb_file_read_proc, NULL,
+					     ipmb_file_read_proc,
 					     smi, THIS_MODULE);
 
 	if (rv == 0)
 		rv = ipmi_smi_add_proc_entry(smi, "version",
-					     version_file_read_proc, NULL,
+					     version_file_read_proc,
 					     smi, THIS_MODULE);
 #endif /* CONFIG_PROC_FS */
 
@@ -2210,37 +2291,47 @@
 
 	err = device_create_file(&bmc->dev->dev,
 			   &bmc->device_id_attr);
-	if (err) goto out;
+	if (err)
+		goto out;
 	err = device_create_file(&bmc->dev->dev,
 			   &bmc->provides_dev_sdrs_attr);
-	if (err) goto out_devid;
+	if (err)
+		goto out_devid;
 	err = device_create_file(&bmc->dev->dev,
 			   &bmc->revision_attr);
-	if (err) goto out_sdrs;
+	if (err)
+		goto out_sdrs;
 	err = device_create_file(&bmc->dev->dev,
 			   &bmc->firmware_rev_attr);
-	if (err) goto out_rev;
+	if (err)
+		goto out_rev;
 	err = device_create_file(&bmc->dev->dev,
 			   &bmc->version_attr);
-	if (err) goto out_firm;
+	if (err)
+		goto out_firm;
 	err = device_create_file(&bmc->dev->dev,
 			   &bmc->add_dev_support_attr);
-	if (err) goto out_version;
+	if (err)
+		goto out_version;
 	err = device_create_file(&bmc->dev->dev,
 			   &bmc->manufacturer_id_attr);
-	if (err) goto out_add_dev;
+	if (err)
+		goto out_add_dev;
 	err = device_create_file(&bmc->dev->dev,
 			   &bmc->product_id_attr);
-	if (err) goto out_manu;
+	if (err)
+		goto out_manu;
 	if (bmc->id.aux_firmware_revision_set) {
 		err = device_create_file(&bmc->dev->dev,
 				   &bmc->aux_firmware_rev_attr);
-		if (err) goto out_prod_id;
+		if (err)
+			goto out_prod_id;
 	}
 	if (bmc->guid_set) {
 		err = device_create_file(&bmc->dev->dev,
 				   &bmc->guid_attr);
-		if (err) goto out_aux_firm;
+		if (err)
+			goto out_aux_firm;
 	}
 
 	return 0;
@@ -2368,8 +2459,10 @@
 			       "ipmi_msghandler:"
 			       " Unable to register bmc device: %d\n",
 			       rv);
-			/* Don't go to out_err, you can only do that if
-			   the device is registered already. */
+			/*
+			 * Don't go to out_err, you can only do that if
+			 * the device is registered already.
+			 */
 			return rv;
 		}
 
@@ -2560,17 +2653,18 @@
 
 	if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
 	    && (msg->msg.netfn == IPMI_NETFN_APP_RESPONSE)
-	    && (msg->msg.cmd == IPMI_GET_CHANNEL_INFO_CMD))
-	{
+	    && (msg->msg.cmd == IPMI_GET_CHANNEL_INFO_CMD)) {
 		/* It's the one we want */
 		if (msg->msg.data[0] != 0) {
 			/* Got an error from the channel, just go on. */
 
 			if (msg->msg.data[0] == IPMI_INVALID_COMMAND_ERR) {
-				/* If the MC does not support this
-				   command, that is legal.  We just
-				   assume it has one IPMB at channel
-				   zero. */
+				/*
+				 * If the MC does not support this
+				 * command, that is legal.  We just
+				 * assume it has one IPMB at channel
+				 * zero.
+				 */
 				intf->channels[0].medium
 					= IPMI_CHANNEL_MEDIUM_IPMB;
 				intf->channels[0].protocol
@@ -2591,7 +2685,7 @@
 		intf->channels[chan].medium = msg->msg.data[2] & 0x7f;
 		intf->channels[chan].protocol = msg->msg.data[3] & 0x1f;
 
-	next_channel:
+ next_channel:
 		intf->curr_channel++;
 		if (intf->curr_channel >= IPMI_MAX_CHANNELS)
 			wake_up(&intf->waitq);
@@ -2619,6 +2713,7 @@
 	if (intf->handlers->poll)
 		intf->handlers->poll(intf->send_info);
 }
+EXPORT_SYMBOL(ipmi_poll_interface);
 
 int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
 		      void		       *send_info,
@@ -2633,14 +2728,18 @@
 	ipmi_smi_t       tintf;
 	struct list_head *link;
 
-	/* Make sure the driver is actually initialized, this handles
-	   problems with initialization order. */
+	/*
+	 * Make sure the driver is actually initialized, this handles
+	 * problems with initialization order.
+	 */
 	if (!initialized) {
 		rv = ipmi_init_msghandler();
 		if (rv)
 			return rv;
-		/* The init code doesn't return an error if it was turned
-		   off, but it won't initialize.  Check that. */
+		/*
+		 * The init code doesn't return an error if it was turned
+		 * off, but it won't initialize.  Check that.
+		 */
 		if (!initialized)
 			return -ENODEV;
 	}
@@ -2688,8 +2787,9 @@
 	spin_lock_init(&intf->maintenance_mode_lock);
 	INIT_LIST_HEAD(&intf->cmd_rcvrs);
 	init_waitqueue_head(&intf->waitq);
+	for (i = 0; i < IPMI_NUM_STATS; i++)
+		atomic_set(&intf->stats[i], 0);
 
-	spin_lock_init(&intf->counter_lock);
 	intf->proc_dir = NULL;
 
 	mutex_lock(&smi_watchers_mutex);
@@ -2717,11 +2817,12 @@
 	get_guid(intf);
 
 	if ((intf->ipmi_version_major > 1)
-	    || ((intf->ipmi_version_major == 1)
-		&& (intf->ipmi_version_minor >= 5)))
-	{
-		/* Start scanning the channels to see what is
-		   available. */
+			|| ((intf->ipmi_version_major == 1)
+			    && (intf->ipmi_version_minor >= 5))) {
+		/*
+		 * Start scanning the channels to see what is
+		 * available.
+		 */
 		intf->null_user_handler = channel_handler;
 		intf->curr_channel = 0;
 		rv = send_channel_info_cmd(intf, 0);
@@ -2769,6 +2870,7 @@
 
 	return rv;
 }
+EXPORT_SYMBOL(ipmi_register_smi);
 
 static void cleanup_smi_msgs(ipmi_smi_t intf)
 {
@@ -2803,8 +2905,10 @@
 
 	remove_proc_entries(intf);
 
-	/* Call all the watcher interfaces to tell them that
-	   an interface is gone. */
+	/*
+	 * Call all the watcher interfaces to tell them that
+	 * an interface is gone.
+	 */
 	list_for_each_entry(w, &smi_watchers, link)
 		w->smi_gone(intf_num);
 	mutex_unlock(&smi_watchers_mutex);
@@ -2812,22 +2916,21 @@
 	kref_put(&intf->refcount, intf_free);
 	return 0;
 }
+EXPORT_SYMBOL(ipmi_unregister_smi);
 
 static int handle_ipmb_get_msg_rsp(ipmi_smi_t          intf,
 				   struct ipmi_smi_msg *msg)
 {
 	struct ipmi_ipmb_addr ipmb_addr;
 	struct ipmi_recv_msg  *recv_msg;
-	unsigned long         flags;
 
-	
-	/* This is 11, not 10, because the response must contain a
-	 * completion code. */
+	/*
+	 * This is 11, not 10, because the response must contain a
+	 * completion code.
+	 */
 	if (msg->rsp_size < 11) {
 		/* Message not big enough, just ignore it. */
-		spin_lock_irqsave(&intf->counter_lock, flags);
-		intf->invalid_ipmb_responses++;
-		spin_unlock_irqrestore(&intf->counter_lock, flags);
+		ipmi_inc_stat(intf, invalid_ipmb_responses);
 		return 0;
 	}
 
@@ -2841,37 +2944,38 @@
 	ipmb_addr.channel = msg->rsp[3] & 0x0f;
 	ipmb_addr.lun = msg->rsp[7] & 3;
 
-	/* It's a response from a remote entity.  Look up the sequence
-	   number and handle the response. */
+	/*
+	 * It's a response from a remote entity.  Look up the sequence
+	 * number and handle the response.
+	 */
 	if (intf_find_seq(intf,
 			  msg->rsp[7] >> 2,
 			  msg->rsp[3] & 0x0f,
 			  msg->rsp[8],
 			  (msg->rsp[4] >> 2) & (~1),
 			  (struct ipmi_addr *) &(ipmb_addr),
-			  &recv_msg))
-	{
-		/* We were unable to find the sequence number,
-		   so just nuke the message. */
-		spin_lock_irqsave(&intf->counter_lock, flags);
-		intf->unhandled_ipmb_responses++;
-		spin_unlock_irqrestore(&intf->counter_lock, flags);
+			  &recv_msg)) {
+		/*
+		 * We were unable to find the sequence number,
+		 * so just nuke the message.
+		 */
+		ipmi_inc_stat(intf, unhandled_ipmb_responses);
 		return 0;
 	}
 
 	memcpy(recv_msg->msg_data,
 	       &(msg->rsp[9]),
 	       msg->rsp_size - 9);
-	/* THe other fields matched, so no need to set them, except
-           for netfn, which needs to be the response that was
-           returned, not the request value. */
+	/*
+	 * The other fields matched, so no need to set them, except
+	 * for netfn, which needs to be the response that was
+	 * returned, not the request value.
+	 */
 	recv_msg->msg.netfn = msg->rsp[4] >> 2;
 	recv_msg->msg.data = recv_msg->msg_data;
 	recv_msg->msg.data_len = msg->rsp_size - 10;
 	recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
-	spin_lock_irqsave(&intf->counter_lock, flags);
-	intf->handled_ipmb_responses++;
-	spin_unlock_irqrestore(&intf->counter_lock, flags);
+	ipmi_inc_stat(intf, handled_ipmb_responses);
 	deliver_response(recv_msg);
 
 	return 0;
@@ -2888,14 +2992,11 @@
 	ipmi_user_t              user = NULL;
 	struct ipmi_ipmb_addr    *ipmb_addr;
 	struct ipmi_recv_msg     *recv_msg;
-	unsigned long            flags;
 	struct ipmi_smi_handlers *handlers;
 
 	if (msg->rsp_size < 10) {
 		/* Message not big enough, just ignore it. */
-		spin_lock_irqsave(&intf->counter_lock, flags);
-		intf->invalid_commands++;
-		spin_unlock_irqrestore(&intf->counter_lock, flags);
+		ipmi_inc_stat(intf, invalid_commands);
 		return 0;
 	}
 
@@ -2919,19 +3020,17 @@
 
 	if (user == NULL) {
 		/* We didn't find a user, deliver an error response. */
-		spin_lock_irqsave(&intf->counter_lock, flags);
-		intf->unhandled_commands++;
-		spin_unlock_irqrestore(&intf->counter_lock, flags);
+		ipmi_inc_stat(intf, unhandled_commands);
 
 		msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
 		msg->data[1] = IPMI_SEND_MSG_CMD;
 		msg->data[2] = msg->rsp[3];
 		msg->data[3] = msg->rsp[6];
-                msg->data[4] = ((netfn + 1) << 2) | (msg->rsp[7] & 0x3);
+		msg->data[4] = ((netfn + 1) << 2) | (msg->rsp[7] & 0x3);
 		msg->data[5] = ipmb_checksum(&(msg->data[3]), 2);
 		msg->data[6] = intf->channels[msg->rsp[3] & 0xf].address;
-                /* rqseq/lun */
-                msg->data[7] = (msg->rsp[7] & 0xfc) | (msg->rsp[4] & 0x3);
+		/* rqseq/lun */
+		msg->data[7] = (msg->rsp[7] & 0xfc) | (msg->rsp[4] & 0x3);
 		msg->data[8] = msg->rsp[8]; /* cmd */
 		msg->data[9] = IPMI_INVALID_CMD_COMPLETION_CODE;
 		msg->data[10] = ipmb_checksum(&(msg->data[6]), 4);
@@ -2950,23 +3049,25 @@
 		handlers = intf->handlers;
 		if (handlers) {
 			handlers->sender(intf->send_info, msg, 0);
-			/* We used the message, so return the value
-			   that causes it to not be freed or
-			   queued. */
+			/*
+			 * We used the message, so return the value
+			 * that causes it to not be freed or
+			 * queued.
+			 */
 			rv = -1;
 		}
 		rcu_read_unlock();
 	} else {
 		/* Deliver the message to the user. */
-		spin_lock_irqsave(&intf->counter_lock, flags);
-		intf->handled_commands++;
-		spin_unlock_irqrestore(&intf->counter_lock, flags);
+		ipmi_inc_stat(intf, handled_commands);
 
 		recv_msg = ipmi_alloc_recv_msg();
 		if (!recv_msg) {
-			/* We couldn't allocate memory for the
-                           message, so requeue it for handling
-                           later. */
+			/*
+			 * We couldn't allocate memory for the
+			 * message, so requeue it for handling
+			 * later.
+			 */
 			rv = 1;
 			kref_put(&user->refcount, free_user);
 		} else {
@@ -2977,8 +3078,10 @@
 			ipmb_addr->lun = msg->rsp[7] & 3;
 			ipmb_addr->channel = msg->rsp[3] & 0xf;
 
-			/* Extract the rest of the message information
-			   from the IPMB header.*/
+			/*
+			 * Extract the rest of the message information
+			 * from the IPMB header.
+			 */
 			recv_msg->user = user;
 			recv_msg->recv_type = IPMI_CMD_RECV_TYPE;
 			recv_msg->msgid = msg->rsp[7] >> 2;
@@ -2986,8 +3089,10 @@
 			recv_msg->msg.cmd = msg->rsp[8];
 			recv_msg->msg.data = recv_msg->msg_data;
 
-			/* We chop off 10, not 9 bytes because the checksum
-			   at the end also needs to be removed. */
+			/*
+			 * We chop off 10, not 9 bytes because the checksum
+			 * at the end also needs to be removed.
+			 */
 			recv_msg->msg.data_len = msg->rsp_size - 10;
 			memcpy(recv_msg->msg_data,
 			       &(msg->rsp[9]),
@@ -3004,16 +3109,15 @@
 {
 	struct ipmi_lan_addr  lan_addr;
 	struct ipmi_recv_msg  *recv_msg;
-	unsigned long         flags;
 
 
-	/* This is 13, not 12, because the response must contain a
-	 * completion code. */
+	/*
+	 * This is 13, not 12, because the response must contain a
+	 * completion code.
+	 */
 	if (msg->rsp_size < 13) {
 		/* Message not big enough, just ignore it. */
-		spin_lock_irqsave(&intf->counter_lock, flags);
-		intf->invalid_lan_responses++;
-		spin_unlock_irqrestore(&intf->counter_lock, flags);
+		ipmi_inc_stat(intf, invalid_lan_responses);
 		return 0;
 	}
 
@@ -3030,37 +3134,38 @@
 	lan_addr.privilege = msg->rsp[3] >> 4;
 	lan_addr.lun = msg->rsp[9] & 3;
 
-	/* It's a response from a remote entity.  Look up the sequence
-	   number and handle the response. */
+	/*
+	 * It's a response from a remote entity.  Look up the sequence
+	 * number and handle the response.
+	 */
 	if (intf_find_seq(intf,
 			  msg->rsp[9] >> 2,
 			  msg->rsp[3] & 0x0f,
 			  msg->rsp[10],
 			  (msg->rsp[6] >> 2) & (~1),
 			  (struct ipmi_addr *) &(lan_addr),
-			  &recv_msg))
-	{
-		/* We were unable to find the sequence number,
-		   so just nuke the message. */
-		spin_lock_irqsave(&intf->counter_lock, flags);
-		intf->unhandled_lan_responses++;
-		spin_unlock_irqrestore(&intf->counter_lock, flags);
+			  &recv_msg)) {
+		/*
+		 * We were unable to find the sequence number,
+		 * so just nuke the message.
+		 */
+		ipmi_inc_stat(intf, unhandled_lan_responses);
 		return 0;
 	}
 
 	memcpy(recv_msg->msg_data,
 	       &(msg->rsp[11]),
 	       msg->rsp_size - 11);
-	/* The other fields matched, so no need to set them, except
-           for netfn, which needs to be the response that was
-           returned, not the request value. */
+	/*
+	 * The other fields matched, so no need to set them, except
+	 * for netfn, which needs to be the response that was
+	 * returned, not the request value.
+	 */
 	recv_msg->msg.netfn = msg->rsp[6] >> 2;
 	recv_msg->msg.data = recv_msg->msg_data;
 	recv_msg->msg.data_len = msg->rsp_size - 12;
 	recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
-	spin_lock_irqsave(&intf->counter_lock, flags);
-	intf->handled_lan_responses++;
-	spin_unlock_irqrestore(&intf->counter_lock, flags);
+	ipmi_inc_stat(intf, handled_lan_responses);
 	deliver_response(recv_msg);
 
 	return 0;
@@ -3077,13 +3182,10 @@
 	ipmi_user_t              user = NULL;
 	struct ipmi_lan_addr     *lan_addr;
 	struct ipmi_recv_msg     *recv_msg;
-	unsigned long            flags;
 
 	if (msg->rsp_size < 12) {
 		/* Message not big enough, just ignore it. */
-		spin_lock_irqsave(&intf->counter_lock, flags);
-		intf->invalid_commands++;
-		spin_unlock_irqrestore(&intf->counter_lock, flags);
+		ipmi_inc_stat(intf, invalid_commands);
 		return 0;
 	}
 
@@ -3107,23 +3209,23 @@
 
 	if (user == NULL) {
 		/* We didn't find a user, just give up. */
-		spin_lock_irqsave(&intf->counter_lock, flags);
-		intf->unhandled_commands++;
-		spin_unlock_irqrestore(&intf->counter_lock, flags);
+		ipmi_inc_stat(intf, unhandled_commands);
 
-		rv = 0; /* Don't do anything with these messages, just
-			   allow them to be freed. */
+		/*
+		 * Don't do anything with these messages, just allow
+		 * them to be freed.
+		 */
+		rv = 0;
 	} else {
 		/* Deliver the message to the user. */
-		spin_lock_irqsave(&intf->counter_lock, flags);
-		intf->handled_commands++;
-		spin_unlock_irqrestore(&intf->counter_lock, flags);
+		ipmi_inc_stat(intf, handled_commands);
 
 		recv_msg = ipmi_alloc_recv_msg();
 		if (!recv_msg) {
-			/* We couldn't allocate memory for the
-                           message, so requeue it for handling
-                           later. */
+			/*
+			 * We couldn't allocate memory for the
+			 * message, so requeue it for handling later.
+			 */
 			rv = 1;
 			kref_put(&user->refcount, free_user);
 		} else {
@@ -3137,8 +3239,10 @@
 			lan_addr->channel = msg->rsp[3] & 0xf;
 			lan_addr->privilege = msg->rsp[3] >> 4;
 
-			/* Extract the rest of the message information
-			   from the IPMB header.*/
+			/*
+			 * Extract the rest of the message information
+			 * from the IPMB header.
+			 */
 			recv_msg->user = user;
 			recv_msg->recv_type = IPMI_CMD_RECV_TYPE;
 			recv_msg->msgid = msg->rsp[9] >> 2;
@@ -3146,8 +3250,10 @@
 			recv_msg->msg.cmd = msg->rsp[10];
 			recv_msg->msg.data = recv_msg->msg_data;
 
-			/* We chop off 12, not 11 bytes because the checksum
-			   at the end also needs to be removed. */
+			/*
+			 * We chop off 12, not 11 bytes because the checksum
+			 * at the end also needs to be removed.
+			 */
 			recv_msg->msg.data_len = msg->rsp_size - 12;
 			memcpy(recv_msg->msg_data,
 			       &(msg->rsp[11]),
@@ -3163,7 +3269,7 @@
 				     struct ipmi_smi_msg  *msg)
 {
 	struct ipmi_system_interface_addr *smi_addr;
-	
+
 	recv_msg->msgid = 0;
 	smi_addr = (struct ipmi_system_interface_addr *) &(recv_msg->addr);
 	smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
@@ -3189,9 +3295,7 @@
 
 	if (msg->rsp_size < 19) {
 		/* Message is too small to be an IPMB event. */
-		spin_lock_irqsave(&intf->counter_lock, flags);
-		intf->invalid_events++;
-		spin_unlock_irqrestore(&intf->counter_lock, flags);
+		ipmi_inc_stat(intf, invalid_events);
 		return 0;
 	}
 
@@ -3204,12 +3308,12 @@
 
 	spin_lock_irqsave(&intf->events_lock, flags);
 
-	spin_lock(&intf->counter_lock);
-	intf->events++;
-	spin_unlock(&intf->counter_lock);
+	ipmi_inc_stat(intf, events);
 
-	/* Allocate and fill in one message for every user that is getting
-	   events. */
+	/*
+	 * Allocate and fill in one message for every user that is
+	 * getting events.
+	 */
 	rcu_read_lock();
 	list_for_each_entry_rcu(user, &intf->users, link) {
 		if (!user->gets_events)
@@ -3223,9 +3327,11 @@
 				list_del(&recv_msg->link);
 				ipmi_free_recv_msg(recv_msg);
 			}
-			/* We couldn't allocate memory for the
-                           message, so requeue it for handling
-                           later. */
+			/*
+			 * We couldn't allocate memory for the
+			 * message, so requeue it for handling
+			 * later.
+			 */
 			rv = 1;
 			goto out;
 		}
@@ -3246,13 +3352,17 @@
 			deliver_response(recv_msg);
 		}
 	} else if (intf->waiting_events_count < MAX_EVENTS_IN_QUEUE) {
-		/* No one to receive the message, put it in queue if there's
-		   not already too many things in the queue. */
+		/*
+		 * No one to receive the message, put it in queue if there's
+		 * not already too many things in the queue.
+		 */
 		recv_msg = ipmi_alloc_recv_msg();
 		if (!recv_msg) {
-			/* We couldn't allocate memory for the
-                           message, so requeue it for handling
-                           later. */
+			/*
+			 * We couldn't allocate memory for the
+			 * message, so requeue it for handling
+			 * later.
+			 */
 			rv = 1;
 			goto out;
 		}
@@ -3260,11 +3370,14 @@
 		copy_event_into_recv_msg(recv_msg, msg);
 		list_add_tail(&(recv_msg->link), &(intf->waiting_events));
 		intf->waiting_events_count++;
-	} else {
-		/* There's too many things in the queue, discard this
-		   message. */
-		printk(KERN_WARNING PFX "Event queue full, discarding an"
-		       " incoming event\n");
+	} else if (!intf->event_msg_printed) {
+		/*
+		 * There's too many things in the queue, discard this
+		 * message.
+		 */
+		printk(KERN_WARNING PFX "Event queue full, discarding"
+		       " incoming events\n");
+		intf->event_msg_printed = 1;
 	}
 
  out:
@@ -3277,16 +3390,15 @@
 			  struct ipmi_smi_msg *msg)
 {
 	struct ipmi_recv_msg *recv_msg;
-	unsigned long        flags;
 	struct ipmi_user     *user;
 
 	recv_msg = (struct ipmi_recv_msg *) msg->user_data;
-	if (recv_msg == NULL)
-	{
-		printk(KERN_WARNING"IPMI message received with no owner. This\n"
-			"could be because of a malformed message, or\n"
-			"because of a hardware error.  Contact your\n"
-			"hardware vender for assistance\n");
+	if (recv_msg == NULL) {
+		printk(KERN_WARNING
+		       "IPMI message received with no owner. This\n"
+		       "could be because of a malformed message, or\n"
+		       "because of a hardware error.  Contact your\n"
+		       "hardware vender for assistance\n");
 		return 0;
 	}
 
@@ -3294,16 +3406,12 @@
 	/* Make sure the user still exists. */
 	if (user && !user->valid) {
 		/* The user for the message went away, so give up. */
-		spin_lock_irqsave(&intf->counter_lock, flags);
-		intf->unhandled_local_responses++;
-		spin_unlock_irqrestore(&intf->counter_lock, flags);
+		ipmi_inc_stat(intf, unhandled_local_responses);
 		ipmi_free_recv_msg(recv_msg);
 	} else {
 		struct ipmi_system_interface_addr *smi_addr;
 
-		spin_lock_irqsave(&intf->counter_lock, flags);
-		intf->handled_local_responses++;
-		spin_unlock_irqrestore(&intf->counter_lock, flags);
+		ipmi_inc_stat(intf, handled_local_responses);
 		recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
 		recv_msg->msgid = msg->msgid;
 		smi_addr = ((struct ipmi_system_interface_addr *)
@@ -3324,9 +3432,11 @@
 	return 0;
 }
 
-/* Handle a new message.  Return 1 if the message should be requeued,
-   0 if the message should be freed, or -1 if the message should not
-   be freed or requeued. */
+/*
+ * Handle a new message.  Return 1 if the message should be requeued,
+ * 0 if the message should be freed, or -1 if the message should not
+ * be freed or requeued.
+ */
 static int handle_new_recv_msg(ipmi_smi_t          intf,
 			       struct ipmi_smi_msg *msg)
 {
@@ -3351,10 +3461,12 @@
 		msg->rsp[1] = msg->data[1];
 		msg->rsp[2] = IPMI_ERR_UNSPECIFIED;
 		msg->rsp_size = 3;
-	} else if (((msg->rsp[0] >> 2) != ((msg->data[0] >> 2) | 1))/* Netfn */
-		   || (msg->rsp[1] != msg->data[1]))		  /* Command */
-	{
-		/* The response is not even marginally correct. */
+	} else if (((msg->rsp[0] >> 2) != ((msg->data[0] >> 2) | 1))
+		   || (msg->rsp[1] != msg->data[1])) {
+		/*
+		 * The NetFN and Command in the response is not even
+		 * marginally correct.
+		 */
 		printk(KERN_WARNING PFX "BMC returned incorrect response,"
 		       " expected netfn %x cmd %x, got netfn %x cmd %x\n",
 		       (msg->data[0] >> 2) | 1, msg->data[1],
@@ -3369,10 +3481,11 @@
 
 	if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
 	    && (msg->rsp[1] == IPMI_SEND_MSG_CMD)
-	    && (msg->user_data != NULL))
-	{
-		/* It's a response to a response we sent.  For this we
-		   deliver a send message response to the user. */
+	    && (msg->user_data != NULL)) {
+		/*
+		 * It's a response to a response we sent.  For this we
+		 * deliver a send message response to the user.
+		 */
 		struct ipmi_recv_msg     *recv_msg = msg->user_data;
 
 		requeue = 0;
@@ -3398,8 +3511,7 @@
 		recv_msg->msg_data[0] = msg->rsp[2];
 		deliver_response(recv_msg);
 	} else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
-		   && (msg->rsp[1] == IPMI_GET_MSG_CMD))
-	{
+		   && (msg->rsp[1] == IPMI_GET_MSG_CMD)) {
 		/* It's from the receive queue. */
 		chan = msg->rsp[3] & 0xf;
 		if (chan >= IPMI_MAX_CHANNELS) {
@@ -3411,12 +3523,16 @@
 		switch (intf->channels[chan].medium) {
 		case IPMI_CHANNEL_MEDIUM_IPMB:
 			if (msg->rsp[4] & 0x04) {
-				/* It's a response, so find the
-				   requesting message and send it up. */
+				/*
+				 * It's a response, so find the
+				 * requesting message and send it up.
+				 */
 				requeue = handle_ipmb_get_msg_rsp(intf, msg);
 			} else {
-				/* It's a command to the SMS from some other
-				   entity.  Handle that. */
+				/*
+				 * It's a command to the SMS from some other
+				 * entity.  Handle that.
+				 */
 				requeue = handle_ipmb_get_msg_cmd(intf, msg);
 			}
 			break;
@@ -3424,25 +3540,30 @@
 		case IPMI_CHANNEL_MEDIUM_8023LAN:
 		case IPMI_CHANNEL_MEDIUM_ASYNC:
 			if (msg->rsp[6] & 0x04) {
-				/* It's a response, so find the
-				   requesting message and send it up. */
+				/*
+				 * It's a response, so find the
+				 * requesting message and send it up.
+				 */
 				requeue = handle_lan_get_msg_rsp(intf, msg);
 			} else {
-				/* It's a command to the SMS from some other
-				   entity.  Handle that. */
+				/*
+				 * It's a command to the SMS from some other
+				 * entity.  Handle that.
+				 */
 				requeue = handle_lan_get_msg_cmd(intf, msg);
 			}
 			break;
 
 		default:
-			/* We don't handle the channel type, so just
-			 * free the message. */
+			/*
+			 * We don't handle the channel type, so just
+			 * free the message.
+			 */
 			requeue = 0;
 		}
 
 	} else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
-		   && (msg->rsp[1] == IPMI_READ_EVENT_MSG_BUFFER_CMD))
-	{
+		   && (msg->rsp[1] == IPMI_READ_EVENT_MSG_BUFFER_CMD)) {
 		/* It's an asyncronous event. */
 		requeue = handle_read_event_rsp(intf, msg);
 	} else {
@@ -3458,71 +3579,82 @@
 void ipmi_smi_msg_received(ipmi_smi_t          intf,
 			   struct ipmi_smi_msg *msg)
 {
-	unsigned long flags;
+	unsigned long flags = 0; /* keep us warning-free. */
 	int           rv;
+	int           run_to_completion;
 
 
 	if ((msg->data_size >= 2)
 	    && (msg->data[0] == (IPMI_NETFN_APP_REQUEST << 2))
 	    && (msg->data[1] == IPMI_SEND_MSG_CMD)
-	    && (msg->user_data == NULL))
-	{
-		/* This is the local response to a command send, start
-                   the timer for these.  The user_data will not be
-                   NULL if this is a response send, and we will let
-                   response sends just go through. */
+	    && (msg->user_data == NULL)) {
+		/*
+		 * This is the local response to a command send, start
+		 * the timer for these.  The user_data will not be
+		 * NULL if this is a response send, and we will let
+		 * response sends just go through.
+		 */
 
-		/* Check for errors, if we get certain errors (ones
-                   that mean basically we can try again later), we
-                   ignore them and start the timer.  Otherwise we
-                   report the error immediately. */
+		/*
+		 * Check for errors, if we get certain errors (ones
+		 * that mean basically we can try again later), we
+		 * ignore them and start the timer.  Otherwise we
+		 * report the error immediately.
+		 */
 		if ((msg->rsp_size >= 3) && (msg->rsp[2] != 0)
 		    && (msg->rsp[2] != IPMI_NODE_BUSY_ERR)
 		    && (msg->rsp[2] != IPMI_LOST_ARBITRATION_ERR)
 		    && (msg->rsp[2] != IPMI_BUS_ERR)
-		    && (msg->rsp[2] != IPMI_NAK_ON_WRITE_ERR))
-		{
+		    && (msg->rsp[2] != IPMI_NAK_ON_WRITE_ERR)) {
 			int chan = msg->rsp[3] & 0xf;
 
 			/* Got an error sending the message, handle it. */
-			spin_lock_irqsave(&intf->counter_lock, flags);
 			if (chan >= IPMI_MAX_CHANNELS)
 				; /* This shouldn't happen */
 			else if ((intf->channels[chan].medium
 				  == IPMI_CHANNEL_MEDIUM_8023LAN)
 				 || (intf->channels[chan].medium
 				     == IPMI_CHANNEL_MEDIUM_ASYNC))
-				intf->sent_lan_command_errs++;
+				ipmi_inc_stat(intf, sent_lan_command_errs);
 			else
-				intf->sent_ipmb_command_errs++;
-			spin_unlock_irqrestore(&intf->counter_lock, flags);
+				ipmi_inc_stat(intf, sent_ipmb_command_errs);
 			intf_err_seq(intf, msg->msgid, msg->rsp[2]);
-		} else {
+		} else
 			/* The message was sent, start the timer. */
 			intf_start_seq_timer(intf, msg->msgid);
-		}
 
 		ipmi_free_smi_msg(msg);
 		goto out;
 	}
 
-	/* To preserve message order, if the list is not empty, we
-           tack this message onto the end of the list. */
-	spin_lock_irqsave(&intf->waiting_msgs_lock, flags);
+	/*
+	 * To preserve message order, if the list is not empty, we
+	 * tack this message onto the end of the list.
+	 */
+	run_to_completion = intf->run_to_completion;
+	if (!run_to_completion)
+		spin_lock_irqsave(&intf->waiting_msgs_lock, flags);
 	if (!list_empty(&intf->waiting_msgs)) {
 		list_add_tail(&msg->link, &intf->waiting_msgs);
-		spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags);
+		if (!run_to_completion)
+			spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags);
 		goto out;
 	}
-	spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags);
-		
+	if (!run_to_completion)
+		spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags);
+
 	rv = handle_new_recv_msg(intf, msg);
 	if (rv > 0) {
-		/* Could not handle the message now, just add it to a
-                   list to handle later. */
-		spin_lock_irqsave(&intf->waiting_msgs_lock, flags);
+		/*
+		 * Could not handle the message now, just add it to a
+		 * list to handle later.
+		 */
+		run_to_completion = intf->run_to_completion;
+		if (!run_to_completion)
+			spin_lock_irqsave(&intf->waiting_msgs_lock, flags);
 		list_add_tail(&msg->link, &intf->waiting_msgs);
-		spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags);
+		if (!run_to_completion)
+			spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags);
 	} else if (rv == 0) {
 		ipmi_free_smi_msg(msg);
 	}
@@ -3530,6 +3662,7 @@
  out:
 	return;
 }
+EXPORT_SYMBOL(ipmi_smi_msg_received);
 
 void ipmi_smi_watchdog_pretimeout(ipmi_smi_t intf)
 {
@@ -3544,7 +3677,7 @@
 	}
 	rcu_read_unlock();
 }
-
+EXPORT_SYMBOL(ipmi_smi_watchdog_pretimeout);
 
 static struct ipmi_smi_msg *
 smi_from_recv_msg(ipmi_smi_t intf, struct ipmi_recv_msg *recv_msg,
@@ -3552,14 +3685,16 @@
 {
 	struct ipmi_smi_msg *smi_msg = ipmi_alloc_smi_msg();
 	if (!smi_msg)
-		/* If we can't allocate the message, then just return, we
-		   get 4 retries, so this should be ok. */
+		/*
+		 * If we can't allocate the message, then just return, we
+		 * get 4 retries, so this should be ok.
+		 */
 		return NULL;
 
 	memcpy(smi_msg->data, recv_msg->msg.data, recv_msg->msg.data_len);
 	smi_msg->data_size = recv_msg->msg.data_len;
 	smi_msg->msgid = STORE_SEQ_IN_MSGID(seq, seqid);
-		
+
 #ifdef DEBUG_MSGING
 	{
 		int m;
@@ -3594,28 +3729,26 @@
 		ent->inuse = 0;
 		msg = ent->recv_msg;
 		list_add_tail(&msg->link, timeouts);
-		spin_lock(&intf->counter_lock);
 		if (ent->broadcast)
-			intf->timed_out_ipmb_broadcasts++;
+			ipmi_inc_stat(intf, timed_out_ipmb_broadcasts);
 		else if (ent->recv_msg->addr.addr_type == IPMI_LAN_ADDR_TYPE)
-			intf->timed_out_lan_commands++;
+			ipmi_inc_stat(intf, timed_out_lan_commands);
 		else
-			intf->timed_out_ipmb_commands++;
-		spin_unlock(&intf->counter_lock);
+			ipmi_inc_stat(intf, timed_out_ipmb_commands);
 	} else {
 		struct ipmi_smi_msg *smi_msg;
 		/* More retries, send again. */
 
-		/* Start with the max timer, set to normal
-		   timer after the message is sent. */
+		/*
+		 * Start with the max timer, set to normal timer after
+		 * the message is sent.
+		 */
 		ent->timeout = MAX_MSG_TIMEOUT;
 		ent->retries_left--;
-		spin_lock(&intf->counter_lock);
 		if (ent->recv_msg->addr.addr_type == IPMI_LAN_ADDR_TYPE)
-			intf->retransmitted_lan_commands++;
+			ipmi_inc_stat(intf, retransmitted_lan_commands);
 		else
-			intf->retransmitted_ipmb_commands++;
-		spin_unlock(&intf->counter_lock);
+			ipmi_inc_stat(intf, retransmitted_ipmb_commands);
 
 		smi_msg = smi_from_recv_msg(intf, ent->recv_msg, slot,
 					    ent->seqid);
@@ -3624,11 +3757,13 @@
 
 		spin_unlock_irqrestore(&intf->seq_lock, *flags);
 
-		/* Send the new message.  We send with a zero
-		 * priority.  It timed out, I doubt time is
-		 * that critical now, and high priority
-		 * messages are really only for messages to the
-		 * local MC, which don't get resent. */
+		/*
+		 * Send the new message.  We send with a zero
+		 * priority.  It timed out, I doubt time is that
+		 * critical now, and high priority messages are really
+		 * only for messages to the local MC, which don't get
+		 * resent.
+		 */
 		handlers = intf->handlers;
 		if (handlers)
 			intf->handlers->sender(intf->send_info,
@@ -3659,16 +3794,20 @@
 				list_del(&smi_msg->link);
 				ipmi_free_smi_msg(smi_msg);
 			} else {
-				/* To preserve message order, quit if we
-				   can't handle a message. */
+				/*
+				 * To preserve message order, quit if we
+				 * can't handle a message.
+				 */
 				break;
 			}
 		}
 		spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags);
 
-		/* Go through the seq table and find any messages that
-		   have timed out, putting them in the timeouts
-		   list. */
+		/*
+		 * Go through the seq table and find any messages that
+		 * have timed out, putting them in the timeouts
+		 * list.
+		 */
 		INIT_LIST_HEAD(&timeouts);
 		spin_lock_irqsave(&intf->seq_lock, flags);
 		for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++)
@@ -3694,8 +3833,7 @@
 				intf->auto_maintenance_timeout
 					-= timeout_period;
 				if (!intf->maintenance_mode
-				    && (intf->auto_maintenance_timeout <= 0))
-				{
+				    && (intf->auto_maintenance_timeout <= 0)) {
 					intf->maintenance_mode_enable = 0;
 					maintenance_mode_update(intf);
 				}
@@ -3713,8 +3851,10 @@
 	struct ipmi_smi_handlers *handlers;
 
 	rcu_read_lock();
-	/* Called from the timer, no need to check if handlers is
-	 * valid. */
+	/*
+	 * Called from the timer, no need to check if handlers is
+	 * valid.
+	 */
 	list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
 		/* No event requests when in maintenance mode. */
 		if (intf->maintenance_mode_enable)
@@ -3735,10 +3875,12 @@
 /* How many jiffies does it take to get to the timeout time. */
 #define IPMI_TIMEOUT_JIFFIES	((IPMI_TIMEOUT_TIME * HZ) / 1000)
 
-/* Request events from the queue every second (this is the number of
-   IPMI_TIMEOUT_TIMES between event requests).  Hopefully, in the
-   future, IPMI will add a way to know immediately if an event is in
-   the queue and this silliness can go away. */
+/*
+ * Request events from the queue every second (this is the number of
+ * IPMI_TIMEOUT_TIMES between event requests).  Hopefully, in the
+ * future, IPMI will add a way to know immediately if an event is in
+ * the queue and this silliness can go away.
+ */
 #define IPMI_REQUEST_EV_TIME	(1000 / (IPMI_TIMEOUT_TIME))
 
 static atomic_t stop_operation;
@@ -3782,6 +3924,7 @@
 	}
 	return rv;
 }
+EXPORT_SYMBOL(ipmi_alloc_smi_msg);
 
 static void free_recv_msg(struct ipmi_recv_msg *msg)
 {
@@ -3789,7 +3932,7 @@
 	kfree(msg);
 }
 
-struct ipmi_recv_msg *ipmi_alloc_recv_msg(void)
+static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void)
 {
 	struct ipmi_recv_msg *rv;
 
@@ -3808,6 +3951,7 @@
 		kref_put(&msg->user->refcount, free_user);
 	msg->done(msg);
 }
+EXPORT_SYMBOL(ipmi_free_recv_msg);
 
 #ifdef CONFIG_IPMI_PANIC_EVENT
 
@@ -3825,8 +3969,7 @@
 	if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
 	    && (msg->msg.netfn == IPMI_NETFN_SENSOR_EVENT_RESPONSE)
 	    && (msg->msg.cmd == IPMI_GET_EVENT_RECEIVER_CMD)
-	    && (msg->msg.data[0] == IPMI_CC_NO_ERROR))
-	{
+	    && (msg->msg.data[0] == IPMI_CC_NO_ERROR)) {
 		/* A get event receiver command, save it. */
 		intf->event_receiver = msg->msg.data[1];
 		intf->event_receiver_lun = msg->msg.data[2] & 0x3;
@@ -3838,10 +3981,11 @@
 	if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
 	    && (msg->msg.netfn == IPMI_NETFN_APP_RESPONSE)
 	    && (msg->msg.cmd == IPMI_GET_DEVICE_ID_CMD)
-	    && (msg->msg.data[0] == IPMI_CC_NO_ERROR))
-	{
-		/* A get device id command, save if we are an event
-		   receiver or generator. */
+	    && (msg->msg.data[0] == IPMI_CC_NO_ERROR)) {
+		/*
+		 * A get device id command, save if we are an event
+		 * receiver or generator.
+		 */
 		intf->local_sel_device = (msg->msg.data[6] >> 2) & 1;
 		intf->local_event_generator = (msg->msg.data[6] >> 5) & 1;
 	}
@@ -3874,8 +4018,10 @@
 	data[4] = 0x6f; /* Sensor specific, IPMI table 36-1 */
 	data[5] = 0xa1; /* Runtime stop OEM bytes 2 & 3. */
 
-	/* Put a few breadcrumbs in.  Hopefully later we can add more things
-	   to make the panic events more useful. */
+	/*
+	 * Put a few breadcrumbs in.  Hopefully later we can add more things
+	 * to make the panic events more useful.
+	 */
 	if (str) {
 		data[3] = str[0];
 		data[6] = str[1];
@@ -3891,6 +4037,7 @@
 			/* Interface is not ready. */
 			continue;
 
+		intf->run_to_completion = 1;
 		/* Send the event announcing the panic. */
 		intf->handlers->set_run_to_completion(intf->send_info, 1);
 		i_ipmi_request(NULL,
@@ -3908,9 +4055,11 @@
 	}
 
 #ifdef CONFIG_IPMI_PANIC_STRING
-	/* On every interface, dump a bunch of OEM event holding the
-	   string. */
-	if (!str) 
+	/*
+	 * On every interface, dump a bunch of OEM event holding the
+	 * string.
+	 */
+	if (!str)
 		return;
 
 	/* For every registered interface, send the event. */
@@ -3931,11 +4080,13 @@
 		 */
 		smp_rmb();
 
-		/* First job here is to figure out where to send the
-		   OEM events.  There's no way in IPMI to send OEM
-		   events using an event send command, so we have to
-		   find the SEL to put them in and stick them in
-		   there. */
+		/*
+		 * First job here is to figure out where to send the
+		 * OEM events.  There's no way in IPMI to send OEM
+		 * events using an event send command, so we have to
+		 * find the SEL to put them in and stick them in
+		 * there.
+		 */
 
 		/* Get capabilities from the get device id. */
 		intf->local_sel_device = 0;
@@ -3983,24 +4134,29 @@
 		}
 		intf->null_user_handler = NULL;
 
-		/* Validate the event receiver.  The low bit must not
-		   be 1 (it must be a valid IPMB address), it cannot
-		   be zero, and it must not be my address. */
-                if (((intf->event_receiver & 1) == 0)
+		/*
+		 * Validate the event receiver.  The low bit must not
+		 * be 1 (it must be a valid IPMB address), it cannot
+		 * be zero, and it must not be my address.
+		 */
+		if (((intf->event_receiver & 1) == 0)
 		    && (intf->event_receiver != 0)
-		    && (intf->event_receiver != intf->channels[0].address))
-		{
-			/* The event receiver is valid, send an IPMB
-			   message. */
+		    && (intf->event_receiver != intf->channels[0].address)) {
+			/*
+			 * The event receiver is valid, send an IPMB
+			 * message.
+			 */
 			ipmb = (struct ipmi_ipmb_addr *) &addr;
 			ipmb->addr_type = IPMI_IPMB_ADDR_TYPE;
 			ipmb->channel = 0; /* FIXME - is this right? */
 			ipmb->lun = intf->event_receiver_lun;
 			ipmb->slave_addr = intf->event_receiver;
 		} else if (intf->local_sel_device) {
-			/* The event receiver was not valid (or was
-			   me), but I am an SEL device, just dump it
-			   in my SEL. */
+			/*
+			 * The event receiver was not valid (or was
+			 * me), but I am an SEL device, just dump it
+			 * in my SEL.
+			 */
 			si = (struct ipmi_system_interface_addr *) &addr;
 			si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
 			si->channel = IPMI_BMC_CHANNEL;
@@ -4008,7 +4164,6 @@
 		} else
 			continue; /* No where to send the event. */
 
-		
 		msg.netfn = IPMI_NETFN_STORAGE_REQUEST; /* Storage. */
 		msg.cmd = IPMI_ADD_SEL_ENTRY_CMD;
 		msg.data = data;
@@ -4025,8 +4180,10 @@
 			data[2] = 0xf0; /* OEM event without timestamp. */
 			data[3] = intf->channels[0].address;
 			data[4] = j++; /* sequence # */
-			/* Always give 11 bytes, so strncpy will fill
-			   it with zeroes for me. */
+			/*
+			 * Always give 11 bytes, so strncpy will fill
+			 * it with zeroes for me.
+			 */
 			strncpy(data+5, p, 11);
 			p += size;
 
@@ -4043,7 +4200,7 @@
 				       intf->channels[0].lun,
 				       0, 1); /* no retry, and no wait. */
 		}
-	}	
+	}
 #endif /* CONFIG_IPMI_PANIC_STRING */
 }
 #endif /* CONFIG_IPMI_PANIC_EVENT */
@@ -4052,7 +4209,7 @@
 
 static int panic_event(struct notifier_block *this,
 		       unsigned long         event,
-                       void                  *ptr)
+		       void                  *ptr)
 {
 	ipmi_smi_t intf;
 
@@ -4066,6 +4223,7 @@
 			/* Interface is not ready. */
 			continue;
 
+		intf->run_to_completion = 1;
 		intf->handlers->set_run_to_completion(intf->send_info, 1);
 	}
 
@@ -4133,11 +4291,16 @@
 
 	atomic_notifier_chain_unregister(&panic_notifier_list, &panic_block);
 
-	/* This can't be called if any interfaces exist, so no worry about
-	   shutting down the interfaces. */
+	/*
+	 * This can't be called if any interfaces exist, so no worry
+	 * about shutting down the interfaces.
+	 */
 
-	/* Tell the timer to stop, then wait for it to stop.  This avoids
-	   problems with race conditions removing the timer here. */
+	/*
+	 * Tell the timer to stop, then wait for it to stop.  This
+	 * avoids problems with race conditions removing the timer
+	 * here.
+	 */
 	atomic_inc(&stop_operation);
 	del_timer_sync(&ipmi_timer);
 
@@ -4164,31 +4327,6 @@
 module_init(ipmi_init_msghandler_mod);
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>");
-MODULE_DESCRIPTION("Incoming and outgoing message routing for an IPMI interface.");
+MODULE_DESCRIPTION("Incoming and outgoing message routing for an IPMI"
+		   " interface.");
 MODULE_VERSION(IPMI_DRIVER_VERSION);
-
-EXPORT_SYMBOL(ipmi_create_user);
-EXPORT_SYMBOL(ipmi_destroy_user);
-EXPORT_SYMBOL(ipmi_get_version);
-EXPORT_SYMBOL(ipmi_request_settime);
-EXPORT_SYMBOL(ipmi_request_supply_msgs);
-EXPORT_SYMBOL(ipmi_poll_interface);
-EXPORT_SYMBOL(ipmi_register_smi);
-EXPORT_SYMBOL(ipmi_unregister_smi);
-EXPORT_SYMBOL(ipmi_register_for_cmd);
-EXPORT_SYMBOL(ipmi_unregister_for_cmd);
-EXPORT_SYMBOL(ipmi_smi_msg_received);
-EXPORT_SYMBOL(ipmi_smi_watchdog_pretimeout);
-EXPORT_SYMBOL(ipmi_alloc_smi_msg);
-EXPORT_SYMBOL(ipmi_addr_length);
-EXPORT_SYMBOL(ipmi_validate_addr);
-EXPORT_SYMBOL(ipmi_set_gets_events);
-EXPORT_SYMBOL(ipmi_smi_watcher_register);
-EXPORT_SYMBOL(ipmi_smi_watcher_unregister);
-EXPORT_SYMBOL(ipmi_set_my_address);
-EXPORT_SYMBOL(ipmi_get_my_address);
-EXPORT_SYMBOL(ipmi_set_my_LUN);
-EXPORT_SYMBOL(ipmi_get_my_LUN);
-EXPORT_SYMBOL(ipmi_smi_add_proc_entry);
-EXPORT_SYMBOL(ipmi_user_set_run_to_completion);
-EXPORT_SYMBOL(ipmi_free_recv_msg);
diff --git a/drivers/char/ipmi/ipmi_poweroff.c b/drivers/char/ipmi/ipmi_poweroff.c
index b86186d..a261bd7 100644
--- a/drivers/char/ipmi/ipmi_poweroff.c
+++ b/drivers/char/ipmi/ipmi_poweroff.c
@@ -87,7 +87,10 @@
 
 /* parameter definition to allow user to flag power cycle */
 module_param(poweroff_powercycle, int, 0644);
-MODULE_PARM_DESC(poweroff_powercycle, " Set to non-zero to enable power cycle instead of power down. Power cycle is contingent on hardware support, otherwise it defaults back to power down.");
+MODULE_PARM_DESC(poweroff_powercycle,
+		 " Set to non-zero to enable power cycle instead of power"
+		 " down. Power cycle is contingent on hardware support,"
+		 " otherwise it defaults back to power down.");
 
 /* Stuff from the get device id command. */
 static unsigned int mfg_id;
@@ -95,22 +98,25 @@
 static unsigned char capabilities;
 static unsigned char ipmi_version;
 
-/* We use our own messages for this operation, we don't let the system
-   allocate them, since we may be in a panic situation.  The whole
-   thing is single-threaded, anyway, so multiple messages are not
-   required. */
+/*
+ * We use our own messages for this operation, we don't let the system
+ * allocate them, since we may be in a panic situation.  The whole
+ * thing is single-threaded, anyway, so multiple messages are not
+ * required.
+ */
+static atomic_t dummy_count = ATOMIC_INIT(0);
 static void dummy_smi_free(struct ipmi_smi_msg *msg)
 {
+	atomic_dec(&dummy_count);
 }
 static void dummy_recv_free(struct ipmi_recv_msg *msg)
 {
+	atomic_dec(&dummy_count);
 }
-static struct ipmi_smi_msg halt_smi_msg =
-{
+static struct ipmi_smi_msg halt_smi_msg = {
 	.done = dummy_smi_free
 };
-static struct ipmi_recv_msg halt_recv_msg =
-{
+static struct ipmi_recv_msg halt_recv_msg = {
 	.done = dummy_recv_free
 };
 
@@ -127,8 +133,7 @@
 		complete(comp);
 }
 
-static struct ipmi_user_hndl ipmi_poweroff_handler =
-{
+static struct ipmi_user_hndl ipmi_poweroff_handler = {
 	.ipmi_recv_hndl = receive_handler
 };
 
@@ -152,17 +157,28 @@
 	return halt_recv_msg.msg.data[0];
 }
 
-/* We are in run-to-completion mode, no completion is desired. */
+/* Wait for message to complete, spinning. */
 static int ipmi_request_in_rc_mode(ipmi_user_t            user,
 				   struct ipmi_addr       *addr,
 				   struct kernel_ipmi_msg *send_msg)
 {
 	int rv;
 
+	atomic_set(&dummy_count, 2);
 	rv = ipmi_request_supply_msgs(user, addr, 0, send_msg, NULL,
 				      &halt_smi_msg, &halt_recv_msg, 0);
-	if (rv)
+	if (rv) {
+		atomic_set(&dummy_count, 0);
 		return rv;
+	}
+
+	/*
+	 * Spin until our message is done.
+	 */
+	while (atomic_read(&dummy_count) > 0) {
+		ipmi_poll_interface(user);
+		cpu_relax();
+	}
 
 	return halt_recv_msg.msg.data[0];
 }
@@ -184,47 +200,47 @@
 
 static void (*atca_oem_poweroff_hook)(ipmi_user_t user);
 
-static void pps_poweroff_atca (ipmi_user_t user)
+static void pps_poweroff_atca(ipmi_user_t user)
 {
-        struct ipmi_system_interface_addr smi_addr;
-        struct kernel_ipmi_msg            send_msg;
-        int                               rv;
-        /*
-         * Configure IPMI address for local access
-         */
-        smi_addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
-        smi_addr.channel = IPMI_BMC_CHANNEL;
-        smi_addr.lun = 0;
+	struct ipmi_system_interface_addr smi_addr;
+	struct kernel_ipmi_msg            send_msg;
+	int                               rv;
+	/*
+	 * Configure IPMI address for local access
+	 */
+	smi_addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
+	smi_addr.channel = IPMI_BMC_CHANNEL;
+	smi_addr.lun = 0;
 
-        printk(KERN_INFO PFX "PPS powerdown hook used");
+	printk(KERN_INFO PFX "PPS powerdown hook used");
 
-        send_msg.netfn = IPMI_NETFN_OEM;
-        send_msg.cmd = IPMI_ATCA_PPS_GRACEFUL_RESTART;
-        send_msg.data = IPMI_ATCA_PPS_IANA;
-        send_msg.data_len = 3;
-        rv = ipmi_request_in_rc_mode(user,
-                                  (struct ipmi_addr *) &smi_addr,
-                                   &send_msg);
-        if (rv && rv != IPMI_UNKNOWN_ERR_COMPLETION_CODE) {
-                printk(KERN_ERR PFX "Unable to send ATCA ,"
-                       " IPMI error 0x%x\n", rv);
-        }
+	send_msg.netfn = IPMI_NETFN_OEM;
+	send_msg.cmd = IPMI_ATCA_PPS_GRACEFUL_RESTART;
+	send_msg.data = IPMI_ATCA_PPS_IANA;
+	send_msg.data_len = 3;
+	rv = ipmi_request_in_rc_mode(user,
+				     (struct ipmi_addr *) &smi_addr,
+				     &send_msg);
+	if (rv && rv != IPMI_UNKNOWN_ERR_COMPLETION_CODE) {
+		printk(KERN_ERR PFX "Unable to send ATCA ,"
+		       " IPMI error 0x%x\n", rv);
+	}
 	return;
 }
 
-static int ipmi_atca_detect (ipmi_user_t user)
+static int ipmi_atca_detect(ipmi_user_t user)
 {
 	struct ipmi_system_interface_addr smi_addr;
 	struct kernel_ipmi_msg            send_msg;
 	int                               rv;
 	unsigned char                     data[1];
 
-        /*
-         * Configure IPMI address for local access
-         */
-        smi_addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
-        smi_addr.channel = IPMI_BMC_CHANNEL;
-        smi_addr.lun = 0;
+	/*
+	 * Configure IPMI address for local access
+	 */
+	smi_addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
+	smi_addr.channel = IPMI_BMC_CHANNEL;
+	smi_addr.lun = 0;
 
 	/*
 	 * Use get address info to check and see if we are ATCA
@@ -238,28 +254,30 @@
 					    (struct ipmi_addr *) &smi_addr,
 					    &send_msg);
 
-        printk(KERN_INFO PFX "ATCA Detect mfg 0x%X prod 0x%X\n", mfg_id, prod_id);
-        if((mfg_id == IPMI_MOTOROLA_MANUFACTURER_ID)
-            && (prod_id == IPMI_MOTOROLA_PPS_IPMC_PRODUCT_ID)) {
-		printk(KERN_INFO PFX "Installing Pigeon Point Systems Poweroff Hook\n");
+	printk(KERN_INFO PFX "ATCA Detect mfg 0x%X prod 0x%X\n",
+	       mfg_id, prod_id);
+	if ((mfg_id == IPMI_MOTOROLA_MANUFACTURER_ID)
+	    && (prod_id == IPMI_MOTOROLA_PPS_IPMC_PRODUCT_ID)) {
+		printk(KERN_INFO PFX
+		       "Installing Pigeon Point Systems Poweroff Hook\n");
 		atca_oem_poweroff_hook = pps_poweroff_atca;
 	}
 	return !rv;
 }
 
-static void ipmi_poweroff_atca (ipmi_user_t user)
+static void ipmi_poweroff_atca(ipmi_user_t user)
 {
 	struct ipmi_system_interface_addr smi_addr;
 	struct kernel_ipmi_msg            send_msg;
 	int                               rv;
 	unsigned char                     data[4];
 
-        /*
-         * Configure IPMI address for local access
-         */
-        smi_addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
-        smi_addr.channel = IPMI_BMC_CHANNEL;
-        smi_addr.lun = 0;
+	/*
+	 * Configure IPMI address for local access
+	 */
+	smi_addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
+	smi_addr.channel = IPMI_BMC_CHANNEL;
+	smi_addr.lun = 0;
 
 	printk(KERN_INFO PFX "Powering down via ATCA power command\n");
 
@@ -273,23 +291,24 @@
 	data[2] = 0; /* Power Level */
 	data[3] = 0; /* Don't change saved presets */
 	send_msg.data = data;
-	send_msg.data_len = sizeof (data);
+	send_msg.data_len = sizeof(data);
 	rv = ipmi_request_in_rc_mode(user,
 				     (struct ipmi_addr *) &smi_addr,
 				     &send_msg);
-        /** At this point, the system may be shutting down, and most
-         ** serial drivers (if used) will have interrupts turned off
-         ** it may be better to ignore IPMI_UNKNOWN_ERR_COMPLETION_CODE
-         ** return code
-         **/
-        if (rv && rv != IPMI_UNKNOWN_ERR_COMPLETION_CODE) {
+	/*
+	 * At this point, the system may be shutting down, and most
+	 * serial drivers (if used) will have interrupts turned off
+	 * it may be better to ignore IPMI_UNKNOWN_ERR_COMPLETION_CODE
+	 * return code
+	 */
+	if (rv && rv != IPMI_UNKNOWN_ERR_COMPLETION_CODE) {
 		printk(KERN_ERR PFX "Unable to send ATCA powerdown message,"
 		       " IPMI error 0x%x\n", rv);
 		goto out;
 	}
 
-	if(atca_oem_poweroff_hook)
-		return atca_oem_poweroff_hook(user);
+	if (atca_oem_poweroff_hook)
+		atca_oem_poweroff_hook(user);
  out:
 	return;
 }
@@ -310,13 +329,13 @@
 #define IPMI_CPI1_PRODUCT_ID		0x000157
 #define IPMI_CPI1_MANUFACTURER_ID	0x0108
 
-static int ipmi_cpi1_detect (ipmi_user_t user)
+static int ipmi_cpi1_detect(ipmi_user_t user)
 {
 	return ((mfg_id == IPMI_CPI1_MANUFACTURER_ID)
 		&& (prod_id == IPMI_CPI1_PRODUCT_ID));
 }
 
-static void ipmi_poweroff_cpi1 (ipmi_user_t user)
+static void ipmi_poweroff_cpi1(ipmi_user_t user)
 {
 	struct ipmi_system_interface_addr smi_addr;
 	struct ipmi_ipmb_addr             ipmb_addr;
@@ -328,12 +347,12 @@
 	unsigned char                     aer_addr;
 	unsigned char                     aer_lun;
 
-        /*
-         * Configure IPMI address for local access
-         */
-        smi_addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
-        smi_addr.channel = IPMI_BMC_CHANNEL;
-        smi_addr.lun = 0;
+	/*
+	 * Configure IPMI address for local access
+	 */
+	smi_addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
+	smi_addr.channel = IPMI_BMC_CHANNEL;
+	smi_addr.lun = 0;
 
 	printk(KERN_INFO PFX "Powering down via CPI1 power command\n");
 
@@ -425,7 +444,7 @@
  */
 
 #define DELL_IANA_MFR_ID {0xA2, 0x02, 0x00}
-static int ipmi_dell_chassis_detect (ipmi_user_t user)
+static int ipmi_dell_chassis_detect(ipmi_user_t user)
 {
 	const char ipmi_version_major = ipmi_version & 0xF;
 	const char ipmi_version_minor = (ipmi_version >> 4) & 0xF;
@@ -444,25 +463,25 @@
 #define IPMI_NETFN_CHASSIS_REQUEST	0
 #define IPMI_CHASSIS_CONTROL_CMD	0x02
 
-static int ipmi_chassis_detect (ipmi_user_t user)
+static int ipmi_chassis_detect(ipmi_user_t user)
 {
 	/* Chassis support, use it. */
 	return (capabilities & 0x80);
 }
 
-static void ipmi_poweroff_chassis (ipmi_user_t user)
+static void ipmi_poweroff_chassis(ipmi_user_t user)
 {
 	struct ipmi_system_interface_addr smi_addr;
 	struct kernel_ipmi_msg            send_msg;
 	int                               rv;
 	unsigned char                     data[1];
 
-        /*
-         * Configure IPMI address for local access
-         */
-        smi_addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
-        smi_addr.channel = IPMI_BMC_CHANNEL;
-        smi_addr.lun = 0;
+	/*
+	 * Configure IPMI address for local access
+	 */
+	smi_addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
+	smi_addr.channel = IPMI_BMC_CHANNEL;
+	smi_addr.lun = 0;
 
  powercyclefailed:
 	printk(KERN_INFO PFX "Powering %s via IPMI chassis control command\n",
@@ -525,15 +544,13 @@
 
 
 /* Called on a powerdown request. */
-static void ipmi_poweroff_function (void)
+static void ipmi_poweroff_function(void)
 {
 	if (!ready)
 		return;
 
 	/* Use run-to-completion mode, since interrupts may be off. */
-	ipmi_user_set_run_to_completion(ipmi_user, 1);
 	specific_poweroff_func(ipmi_user);
-	ipmi_user_set_run_to_completion(ipmi_user, 0);
 }
 
 /* Wait for an IPMI interface to be installed, the first one installed
@@ -561,13 +578,13 @@
 
 	ipmi_ifnum = if_num;
 
-        /*
-         * Do a get device ide and store some results, since this is
+	/*
+	 * Do a get device ide and store some results, since this is
 	 * used by several functions.
-         */
-        smi_addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
-        smi_addr.channel = IPMI_BMC_CHANNEL;
-        smi_addr.lun = 0;
+	 */
+	smi_addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
+	smi_addr.channel = IPMI_BMC_CHANNEL;
+	smi_addr.lun = 0;
 
 	send_msg.netfn = IPMI_NETFN_APP_REQUEST;
 	send_msg.cmd = IPMI_GET_DEVICE_ID_CMD;
@@ -632,8 +649,7 @@
 	pm_power_off = old_poweroff_func;
 }
 
-static struct ipmi_smi_watcher smi_watcher =
-{
+static struct ipmi_smi_watcher smi_watcher = {
 	.owner    = THIS_MODULE,
 	.new_smi  = ipmi_po_new_smi,
 	.smi_gone = ipmi_po_smi_gone
@@ -675,12 +691,12 @@
 /*
  * Startup and shutdown functions.
  */
-static int ipmi_poweroff_init (void)
+static int ipmi_poweroff_init(void)
 {
 	int rv;
 
-	printk (KERN_INFO "Copyright (C) 2004 MontaVista Software -"
-		" IPMI Powerdown via sys_reboot.\n");
+	printk(KERN_INFO "Copyright (C) 2004 MontaVista Software -"
+	       " IPMI Powerdown via sys_reboot.\n");
 
 	if (poweroff_powercycle)
 		printk(KERN_INFO PFX "Power cycle is enabled.\n");
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index 4f560d0..5a54555 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -80,7 +80,7 @@
 #define SI_USEC_PER_JIFFY	(1000000/HZ)
 #define SI_TIMEOUT_JIFFIES	(SI_TIMEOUT_TIME_USEC/SI_USEC_PER_JIFFY)
 #define SI_SHORT_TIMEOUT_USEC  250 /* .25ms when the SM request a
-                                       short timeout */
+				      short timeout */
 
 /* Bit for BMC global enables. */
 #define IPMI_BMC_RCV_MSG_INTR     0x01
@@ -114,14 +114,61 @@
 
 #define DEVICE_NAME "ipmi_si"
 
-static struct device_driver ipmi_driver =
-{
+static struct device_driver ipmi_driver = {
 	.name = DEVICE_NAME,
 	.bus = &platform_bus_type
 };
 
-struct smi_info
-{
+
+/*
+ * Indexes into stats[] in smi_info below.
+ */
+enum si_stat_indexes {
+	/*
+	 * Number of times the driver requested a timer while an operation
+	 * was in progress.
+	 */
+	SI_STAT_short_timeouts = 0,
+
+	/*
+	 * Number of times the driver requested a timer while nothing was in
+	 * progress.
+	 */
+	SI_STAT_long_timeouts,
+
+	/* Number of times the interface was idle while being polled. */
+	SI_STAT_idles,
+
+	/* Number of interrupts the driver handled. */
+	SI_STAT_interrupts,
+
+	/* Number of time the driver got an ATTN from the hardware. */
+	SI_STAT_attentions,
+
+	/* Number of times the driver requested flags from the hardware. */
+	SI_STAT_flag_fetches,
+
+	/* Number of times the hardware didn't follow the state machine. */
+	SI_STAT_hosed_count,
+
+	/* Number of completed messages. */
+	SI_STAT_complete_transactions,
+
+	/* Number of IPMI events received from the hardware. */
+	SI_STAT_events,
+
+	/* Number of watchdog pretimeouts. */
+	SI_STAT_watchdog_pretimeouts,
+
+	/* Number of asyncronous messages received. */
+	SI_STAT_incoming_messages,
+
+
+	/* This *must* remain last, add new values above this. */
+	SI_NUM_STATS
+};
+
+struct smi_info {
 	int                    intf_num;
 	ipmi_smi_t             intf;
 	struct si_sm_data      *si_sm;
@@ -134,8 +181,10 @@
 	struct ipmi_smi_msg    *curr_msg;
 	enum si_intf_state     si_state;
 
-	/* Used to handle the various types of I/O that can occur with
-           IPMI */
+	/*
+	 * Used to handle the various types of I/O that can occur with
+	 * IPMI
+	 */
 	struct si_sm_io io;
 	int (*io_setup)(struct smi_info *info);
 	void (*io_cleanup)(struct smi_info *info);
@@ -146,15 +195,18 @@
 	void (*addr_source_cleanup)(struct smi_info *info);
 	void *addr_source_data;
 
-	/* Per-OEM handler, called from handle_flags().
-	   Returns 1 when handle_flags() needs to be re-run
-	   or 0 indicating it set si_state itself.
-	*/
+	/*
+	 * Per-OEM handler, called from handle_flags().  Returns 1
+	 * when handle_flags() needs to be re-run or 0 indicating it
+	 * set si_state itself.
+	 */
 	int (*oem_data_avail_handler)(struct smi_info *smi_info);
 
-	/* Flags from the last GET_MSG_FLAGS command, used when an ATTN
-	   is set to hold the flags until we are done handling everything
-	   from the flags. */
+	/*
+	 * Flags from the last GET_MSG_FLAGS command, used when an ATTN
+	 * is set to hold the flags until we are done handling everything
+	 * from the flags.
+	 */
 #define RECEIVE_MSG_AVAIL	0x01
 #define EVENT_MSG_BUFFER_FULL	0x02
 #define WDT_PRE_TIMEOUT_INT	0x08
@@ -162,25 +214,31 @@
 #define OEM1_DATA_AVAIL     0x40
 #define OEM2_DATA_AVAIL     0x80
 #define OEM_DATA_AVAIL      (OEM0_DATA_AVAIL | \
-                             OEM1_DATA_AVAIL | \
-                             OEM2_DATA_AVAIL)
+			     OEM1_DATA_AVAIL | \
+			     OEM2_DATA_AVAIL)
 	unsigned char       msg_flags;
 
-	/* If set to true, this will request events the next time the
-	   state machine is idle. */
+	/*
+	 * If set to true, this will request events the next time the
+	 * state machine is idle.
+	 */
 	atomic_t            req_events;
 
-	/* If true, run the state machine to completion on every send
-	   call.  Generally used after a panic to make sure stuff goes
-	   out. */
+	/*
+	 * If true, run the state machine to completion on every send
+	 * call.  Generally used after a panic to make sure stuff goes
+	 * out.
+	 */
 	int                 run_to_completion;
 
 	/* The I/O port of an SI interface. */
 	int                 port;
 
-	/* The space between start addresses of the two ports.  For
-	   instance, if the first port is 0xca2 and the spacing is 4, then
-	   the second port is 0xca6. */
+	/*
+	 * The space between start addresses of the two ports.  For
+	 * instance, if the first port is 0xca2 and the spacing is 4, then
+	 * the second port is 0xca6.
+	 */
 	unsigned int        spacing;
 
 	/* zero if no irq; */
@@ -195,10 +253,12 @@
 	/* Used to gracefully stop the timer without race conditions. */
 	atomic_t            stop_operation;
 
-	/* The driver will disable interrupts when it gets into a
-	   situation where it cannot handle messages due to lack of
-	   memory.  Once that situation clears up, it will re-enable
-	   interrupts. */
+	/*
+	 * The driver will disable interrupts when it gets into a
+	 * situation where it cannot handle messages due to lack of
+	 * memory.  Once that situation clears up, it will re-enable
+	 * interrupts.
+	 */
 	int interrupt_disabled;
 
 	/* From the get device id response... */
@@ -208,33 +268,28 @@
 	struct device *dev;
 	struct platform_device *pdev;
 
-	 /* True if we allocated the device, false if it came from
-	  * someplace else (like PCI). */
+	/*
+	 * True if we allocated the device, false if it came from
+	 * someplace else (like PCI).
+	 */
 	int dev_registered;
 
 	/* Slave address, could be reported from DMI. */
 	unsigned char slave_addr;
 
 	/* Counters and things for the proc filesystem. */
-	spinlock_t count_lock;
-	unsigned long short_timeouts;
-	unsigned long long_timeouts;
-	unsigned long timeout_restarts;
-	unsigned long idles;
-	unsigned long interrupts;
-	unsigned long attentions;
-	unsigned long flag_fetches;
-	unsigned long hosed_count;
-	unsigned long complete_transactions;
-	unsigned long events;
-	unsigned long watchdog_pretimeouts;
-	unsigned long incoming_messages;
+	atomic_t stats[SI_NUM_STATS];
 
-        struct task_struct *thread;
+	struct task_struct *thread;
 
 	struct list_head link;
 };
 
+#define smi_inc_stat(smi, stat) \
+	atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
+#define smi_get_stat(smi, stat) \
+	((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
+
 #define SI_MAX_PARMS 4
 
 static int force_kipmid[SI_MAX_PARMS];
@@ -246,7 +301,7 @@
 static void cleanup_one_si(struct smi_info *to_clean);
 
 static ATOMIC_NOTIFIER_HEAD(xaction_notifier_list);
-static int register_xaction_notifier(struct notifier_block * nb)
+static int register_xaction_notifier(struct notifier_block *nb)
 {
 	return atomic_notifier_chain_register(&xaction_notifier_list, nb);
 }
@@ -255,7 +310,7 @@
 			     struct ipmi_smi_msg *msg)
 {
 	/* Deliver the message to the upper layer with the lock
-           released. */
+	   released. */
 	spin_unlock(&(smi_info->si_lock));
 	ipmi_smi_msg_received(smi_info->intf, msg);
 	spin_lock(&(smi_info->si_lock));
@@ -287,9 +342,12 @@
 	struct timeval t;
 #endif
 
-	/* No need to save flags, we aleady have interrupts off and we
-	   already hold the SMI lock. */
-	spin_lock(&(smi_info->msg_lock));
+	/*
+	 * No need to save flags, we aleady have interrupts off and we
+	 * already hold the SMI lock.
+	 */
+	if (!smi_info->run_to_completion)
+		spin_lock(&(smi_info->msg_lock));
 
 	/* Pick the high priority queue first. */
 	if (!list_empty(&(smi_info->hp_xmit_msgs))) {
@@ -310,7 +368,7 @@
 						link);
 #ifdef DEBUG_TIMING
 		do_gettimeofday(&t);
-		printk("**Start2: %d.%9.9d\n", t.tv_sec, t.tv_usec);
+		printk(KERN_DEBUG "**Start2: %d.%9.9d\n", t.tv_sec, t.tv_usec);
 #endif
 		err = atomic_notifier_call_chain(&xaction_notifier_list,
 				0, smi_info);
@@ -322,14 +380,14 @@
 			smi_info->si_sm,
 			smi_info->curr_msg->data,
 			smi_info->curr_msg->data_size);
-		if (err) {
+		if (err)
 			return_hosed_msg(smi_info, err);
-		}
 
 		rv = SI_SM_CALL_WITHOUT_DELAY;
 	}
-	out:
-	spin_unlock(&(smi_info->msg_lock));
+ out:
+	if (!smi_info->run_to_completion)
+		spin_unlock(&(smi_info->msg_lock));
 
 	return rv;
 }
@@ -338,8 +396,10 @@
 {
 	unsigned char msg[2];
 
-	/* If we are enabling interrupts, we have to tell the
-	   BMC to use them. */
+	/*
+	 * If we are enabling interrupts, we have to tell the
+	 * BMC to use them.
+	 */
 	msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
 	msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD;
 
@@ -371,10 +431,12 @@
 	smi_info->si_state = SI_CLEARING_FLAGS;
 }
 
-/* When we have a situtaion where we run out of memory and cannot
-   allocate messages, we just leave them in the BMC and run the system
-   polled until we can allocate some memory.  Once we have some
-   memory, we will re-enable the interrupt. */
+/*
+ * When we have a situtaion where we run out of memory and cannot
+ * allocate messages, we just leave them in the BMC and run the system
+ * polled until we can allocate some memory.  Once we have some
+ * memory, we will re-enable the interrupt.
+ */
 static inline void disable_si_irq(struct smi_info *smi_info)
 {
 	if ((smi_info->irq) && (!smi_info->interrupt_disabled)) {
@@ -396,9 +458,7 @@
  retry:
 	if (smi_info->msg_flags & WDT_PRE_TIMEOUT_INT) {
 		/* Watchdog pre-timeout */
-		spin_lock(&smi_info->count_lock);
-		smi_info->watchdog_pretimeouts++;
-		spin_unlock(&smi_info->count_lock);
+		smi_inc_stat(smi_info, watchdog_pretimeouts);
 
 		start_clear_flags(smi_info);
 		smi_info->msg_flags &= ~WDT_PRE_TIMEOUT_INT;
@@ -444,12 +504,11 @@
 			smi_info->curr_msg->data_size);
 		smi_info->si_state = SI_GETTING_EVENTS;
 	} else if (smi_info->msg_flags & OEM_DATA_AVAIL &&
-	           smi_info->oem_data_avail_handler) {
+		   smi_info->oem_data_avail_handler) {
 		if (smi_info->oem_data_avail_handler(smi_info))
 			goto retry;
-	} else {
+	} else
 		smi_info->si_state = SI_NORMAL;
-	}
 }
 
 static void handle_transaction_done(struct smi_info *smi_info)
@@ -459,7 +518,7 @@
 	struct timeval t;
 
 	do_gettimeofday(&t);
-	printk("**Done: %d.%9.9d\n", t.tv_sec, t.tv_usec);
+	printk(KERN_DEBUG "**Done: %d.%9.9d\n", t.tv_sec, t.tv_usec);
 #endif
 	switch (smi_info->si_state) {
 	case SI_NORMAL:
@@ -472,9 +531,11 @@
 				smi_info->curr_msg->rsp,
 				IPMI_MAX_MSG_LENGTH);
 
-		/* Do this here becase deliver_recv_msg() releases the
-		   lock, and a new message can be put in during the
-		   time the lock is released. */
+		/*
+		 * Do this here becase deliver_recv_msg() releases the
+		 * lock, and a new message can be put in during the
+		 * time the lock is released.
+		 */
 		msg = smi_info->curr_msg;
 		smi_info->curr_msg = NULL;
 		deliver_recv_msg(smi_info, msg);
@@ -488,12 +549,13 @@
 		/* We got the flags from the SMI, now handle them. */
 		len = smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
 		if (msg[2] != 0) {
-			/* Error fetching flags, just give up for
-			   now. */
+			/* Error fetching flags, just give up for now. */
 			smi_info->si_state = SI_NORMAL;
 		} else if (len < 4) {
-			/* Hmm, no flags.  That's technically illegal, but
-			   don't use uninitialized data. */
+			/*
+			 * Hmm, no flags.  That's technically illegal, but
+			 * don't use uninitialized data.
+			 */
 			smi_info->si_state = SI_NORMAL;
 		} else {
 			smi_info->msg_flags = msg[3];
@@ -530,9 +592,11 @@
 				smi_info->curr_msg->rsp,
 				IPMI_MAX_MSG_LENGTH);
 
-		/* Do this here becase deliver_recv_msg() releases the
-		   lock, and a new message can be put in during the
-		   time the lock is released. */
+		/*
+		 * Do this here becase deliver_recv_msg() releases the
+		 * lock, and a new message can be put in during the
+		 * time the lock is released.
+		 */
 		msg = smi_info->curr_msg;
 		smi_info->curr_msg = NULL;
 		if (msg->rsp[2] != 0) {
@@ -543,14 +607,14 @@
 			smi_info->msg_flags &= ~EVENT_MSG_BUFFER_FULL;
 			handle_flags(smi_info);
 		} else {
-			spin_lock(&smi_info->count_lock);
-			smi_info->events++;
-			spin_unlock(&smi_info->count_lock);
+			smi_inc_stat(smi_info, events);
 
-			/* Do this before we deliver the message
-			   because delivering the message releases the
-			   lock and something else can mess with the
-			   state. */
+			/*
+			 * Do this before we deliver the message
+			 * because delivering the message releases the
+			 * lock and something else can mess with the
+			 * state.
+			 */
 			handle_flags(smi_info);
 
 			deliver_recv_msg(smi_info, msg);
@@ -566,9 +630,11 @@
 				smi_info->curr_msg->rsp,
 				IPMI_MAX_MSG_LENGTH);
 
-		/* Do this here becase deliver_recv_msg() releases the
-		   lock, and a new message can be put in during the
-		   time the lock is released. */
+		/*
+		 * Do this here becase deliver_recv_msg() releases the
+		 * lock, and a new message can be put in during the
+		 * time the lock is released.
+		 */
 		msg = smi_info->curr_msg;
 		smi_info->curr_msg = NULL;
 		if (msg->rsp[2] != 0) {
@@ -579,14 +645,14 @@
 			smi_info->msg_flags &= ~RECEIVE_MSG_AVAIL;
 			handle_flags(smi_info);
 		} else {
-			spin_lock(&smi_info->count_lock);
-			smi_info->incoming_messages++;
-			spin_unlock(&smi_info->count_lock);
+			smi_inc_stat(smi_info, incoming_messages);
 
-			/* Do this before we deliver the message
-			   because delivering the message releases the
-			   lock and something else can mess with the
-			   state. */
+			/*
+			 * Do this before we deliver the message
+			 * because delivering the message releases the
+			 * lock and something else can mess with the
+			 * state.
+			 */
 			handle_flags(smi_info);
 
 			deliver_recv_msg(smi_info, msg);
@@ -674,69 +740,70 @@
 	}
 }
 
-/* Called on timeouts and events.  Timeouts should pass the elapsed
-   time, interrupts should pass in zero.  Must be called with
-   si_lock held and interrupts disabled. */
+/*
+ * Called on timeouts and events.  Timeouts should pass the elapsed
+ * time, interrupts should pass in zero.  Must be called with
+ * si_lock held and interrupts disabled.
+ */
 static enum si_sm_result smi_event_handler(struct smi_info *smi_info,
 					   int time)
 {
 	enum si_sm_result si_sm_result;
 
  restart:
-	/* There used to be a loop here that waited a little while
-	   (around 25us) before giving up.  That turned out to be
-	   pointless, the minimum delays I was seeing were in the 300us
-	   range, which is far too long to wait in an interrupt.  So
-	   we just run until the state machine tells us something
-	   happened or it needs a delay. */
+	/*
+	 * There used to be a loop here that waited a little while
+	 * (around 25us) before giving up.  That turned out to be
+	 * pointless, the minimum delays I was seeing were in the 300us
+	 * range, which is far too long to wait in an interrupt.  So
+	 * we just run until the state machine tells us something
+	 * happened or it needs a delay.
+	 */
 	si_sm_result = smi_info->handlers->event(smi_info->si_sm, time);
 	time = 0;
 	while (si_sm_result == SI_SM_CALL_WITHOUT_DELAY)
-	{
 		si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0);
-	}
 
-	if (si_sm_result == SI_SM_TRANSACTION_COMPLETE)
-	{
-		spin_lock(&smi_info->count_lock);
-		smi_info->complete_transactions++;
-		spin_unlock(&smi_info->count_lock);
+	if (si_sm_result == SI_SM_TRANSACTION_COMPLETE) {
+		smi_inc_stat(smi_info, complete_transactions);
 
 		handle_transaction_done(smi_info);
 		si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0);
-	}
-	else if (si_sm_result == SI_SM_HOSED)
-	{
-		spin_lock(&smi_info->count_lock);
-		smi_info->hosed_count++;
-		spin_unlock(&smi_info->count_lock);
+	} else if (si_sm_result == SI_SM_HOSED) {
+		smi_inc_stat(smi_info, hosed_count);
 
-		/* Do the before return_hosed_msg, because that
-		   releases the lock. */
+		/*
+		 * Do the before return_hosed_msg, because that
+		 * releases the lock.
+		 */
 		smi_info->si_state = SI_NORMAL;
 		if (smi_info->curr_msg != NULL) {
-			/* If we were handling a user message, format
-                           a response to send to the upper layer to
-                           tell it about the error. */
+			/*
+			 * If we were handling a user message, format
+			 * a response to send to the upper layer to
+			 * tell it about the error.
+			 */
 			return_hosed_msg(smi_info, IPMI_ERR_UNSPECIFIED);
 		}
 		si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0);
 	}
 
-	/* We prefer handling attn over new messages. */
-	if (si_sm_result == SI_SM_ATTN)
-	{
+	/*
+	 * We prefer handling attn over new messages.  But don't do
+	 * this if there is not yet an upper layer to handle anything.
+	 */
+	if (likely(smi_info->intf) && si_sm_result == SI_SM_ATTN) {
 		unsigned char msg[2];
 
-		spin_lock(&smi_info->count_lock);
-		smi_info->attentions++;
-		spin_unlock(&smi_info->count_lock);
+		smi_inc_stat(smi_info, attentions);
 
-		/* Got a attn, send down a get message flags to see
-                   what's causing it.  It would be better to handle
-                   this in the upper layer, but due to the way
-                   interrupts work with the SMI, that's not really
-                   possible. */
+		/*
+		 * Got a attn, send down a get message flags to see
+		 * what's causing it.  It would be better to handle
+		 * this in the upper layer, but due to the way
+		 * interrupts work with the SMI, that's not really
+		 * possible.
+		 */
 		msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
 		msg[1] = IPMI_GET_MSG_FLAGS_CMD;
 
@@ -748,20 +815,19 @@
 
 	/* If we are currently idle, try to start the next message. */
 	if (si_sm_result == SI_SM_IDLE) {
-		spin_lock(&smi_info->count_lock);
-		smi_info->idles++;
-		spin_unlock(&smi_info->count_lock);
+		smi_inc_stat(smi_info, idles);
 
 		si_sm_result = start_next_msg(smi_info);
 		if (si_sm_result != SI_SM_IDLE)
 			goto restart;
-        }
+	}
 
 	if ((si_sm_result == SI_SM_IDLE)
-	    && (atomic_read(&smi_info->req_events)))
-	{
-		/* We are idle and the upper layer requested that I fetch
-		   events, so do so. */
+	    && (atomic_read(&smi_info->req_events))) {
+		/*
+		 * We are idle and the upper layer requested that I fetch
+		 * events, so do so.
+		 */
 		atomic_set(&smi_info->req_events, 0);
 
 		smi_info->curr_msg = ipmi_alloc_smi_msg();
@@ -803,56 +869,50 @@
 		return;
 	}
 
-	spin_lock_irqsave(&(smi_info->msg_lock), flags);
 #ifdef DEBUG_TIMING
 	do_gettimeofday(&t);
 	printk("**Enqueue: %d.%9.9d\n", t.tv_sec, t.tv_usec);
 #endif
 
 	if (smi_info->run_to_completion) {
-		/* If we are running to completion, then throw it in
-		   the list and run transactions until everything is
-		   clear.  Priority doesn't matter here. */
+		/*
+		 * If we are running to completion, then throw it in
+		 * the list and run transactions until everything is
+		 * clear.  Priority doesn't matter here.
+		 */
+
+		/*
+		 * Run to completion means we are single-threaded, no
+		 * need for locks.
+		 */
 		list_add_tail(&(msg->link), &(smi_info->xmit_msgs));
 
-		/* We have to release the msg lock and claim the smi
-		   lock in this case, because of race conditions. */
-		spin_unlock_irqrestore(&(smi_info->msg_lock), flags);
-
-		spin_lock_irqsave(&(smi_info->si_lock), flags);
 		result = smi_event_handler(smi_info, 0);
 		while (result != SI_SM_IDLE) {
 			udelay(SI_SHORT_TIMEOUT_USEC);
 			result = smi_event_handler(smi_info,
 						   SI_SHORT_TIMEOUT_USEC);
 		}
-		spin_unlock_irqrestore(&(smi_info->si_lock), flags);
 		return;
-	} else {
-		if (priority > 0) {
-			list_add_tail(&(msg->link), &(smi_info->hp_xmit_msgs));
-		} else {
-			list_add_tail(&(msg->link), &(smi_info->xmit_msgs));
-		}
 	}
-	spin_unlock_irqrestore(&(smi_info->msg_lock), flags);
 
-	spin_lock_irqsave(&(smi_info->si_lock), flags);
-	if ((smi_info->si_state == SI_NORMAL)
-	    && (smi_info->curr_msg == NULL))
-	{
+	spin_lock_irqsave(&smi_info->msg_lock, flags);
+	if (priority > 0)
+		list_add_tail(&msg->link, &smi_info->hp_xmit_msgs);
+	else
+		list_add_tail(&msg->link, &smi_info->xmit_msgs);
+	spin_unlock_irqrestore(&smi_info->msg_lock, flags);
+
+	spin_lock_irqsave(&smi_info->si_lock, flags);
+	if (smi_info->si_state == SI_NORMAL && smi_info->curr_msg == NULL)
 		start_next_msg(smi_info);
-	}
-	spin_unlock_irqrestore(&(smi_info->si_lock), flags);
+	spin_unlock_irqrestore(&smi_info->si_lock, flags);
 }
 
 static void set_run_to_completion(void *send_info, int i_run_to_completion)
 {
 	struct smi_info   *smi_info = send_info;
 	enum si_sm_result result;
-	unsigned long     flags;
-
-	spin_lock_irqsave(&(smi_info->si_lock), flags);
 
 	smi_info->run_to_completion = i_run_to_completion;
 	if (i_run_to_completion) {
@@ -863,8 +923,6 @@
 						   SI_SHORT_TIMEOUT_USEC);
 		}
 	}
-
-	spin_unlock_irqrestore(&(smi_info->si_lock), flags);
 }
 
 static int ipmi_thread(void *data)
@@ -878,9 +936,8 @@
 		spin_lock_irqsave(&(smi_info->si_lock), flags);
 		smi_result = smi_event_handler(smi_info, 0);
 		spin_unlock_irqrestore(&(smi_info->si_lock), flags);
-		if (smi_result == SI_SM_CALL_WITHOUT_DELAY) {
-			/* do nothing */
-		}
+		if (smi_result == SI_SM_CALL_WITHOUT_DELAY)
+			; /* do nothing */
 		else if (smi_result == SI_SM_CALL_WITH_DELAY)
 			schedule();
 		else
@@ -931,7 +988,7 @@
 	spin_lock_irqsave(&(smi_info->si_lock), flags);
 #ifdef DEBUG_TIMING
 	do_gettimeofday(&t);
-	printk("**Timer: %d.%9.9d\n", t.tv_sec, t.tv_usec);
+	printk(KERN_DEBUG "**Timer: %d.%9.9d\n", t.tv_sec, t.tv_usec);
 #endif
 	jiffies_now = jiffies;
 	time_diff = (((long)jiffies_now - (long)smi_info->last_timeout_jiffies)
@@ -945,23 +1002,19 @@
 	if ((smi_info->irq) && (!smi_info->interrupt_disabled)) {
 		/* Running with interrupts, only do long timeouts. */
 		smi_info->si_timer.expires = jiffies + SI_TIMEOUT_JIFFIES;
-		spin_lock_irqsave(&smi_info->count_lock, flags);
-		smi_info->long_timeouts++;
-		spin_unlock_irqrestore(&smi_info->count_lock, flags);
+		smi_inc_stat(smi_info, long_timeouts);
 		goto do_add_timer;
 	}
 
-	/* If the state machine asks for a short delay, then shorten
-           the timer timeout. */
+	/*
+	 * If the state machine asks for a short delay, then shorten
+	 * the timer timeout.
+	 */
 	if (smi_result == SI_SM_CALL_WITH_DELAY) {
-		spin_lock_irqsave(&smi_info->count_lock, flags);
-		smi_info->short_timeouts++;
-		spin_unlock_irqrestore(&smi_info->count_lock, flags);
+		smi_inc_stat(smi_info, short_timeouts);
 		smi_info->si_timer.expires = jiffies + 1;
 	} else {
-		spin_lock_irqsave(&smi_info->count_lock, flags);
-		smi_info->long_timeouts++;
-		spin_unlock_irqrestore(&smi_info->count_lock, flags);
+		smi_inc_stat(smi_info, long_timeouts);
 		smi_info->si_timer.expires = jiffies + SI_TIMEOUT_JIFFIES;
 	}
 
@@ -979,13 +1032,11 @@
 
 	spin_lock_irqsave(&(smi_info->si_lock), flags);
 
-	spin_lock(&smi_info->count_lock);
-	smi_info->interrupts++;
-	spin_unlock(&smi_info->count_lock);
+	smi_inc_stat(smi_info, interrupts);
 
 #ifdef DEBUG_TIMING
 	do_gettimeofday(&t);
-	printk("**Interrupt: %d.%9.9d\n", t.tv_sec, t.tv_usec);
+	printk(KERN_DEBUG "**Interrupt: %d.%9.9d\n", t.tv_sec, t.tv_usec);
 #endif
 	smi_event_handler(smi_info, 0);
 	spin_unlock_irqrestore(&(smi_info->si_lock), flags);
@@ -1028,7 +1079,7 @@
 	 * The BT interface is efficient enough to not need a thread,
 	 * and there is no need for a thread if we have interrupts.
 	 */
- 	else if ((new_smi->si_type != SI_BT) && (!new_smi->irq))
+	else if ((new_smi->si_type != SI_BT) && (!new_smi->irq))
 		enable = 1;
 
 	if (enable) {
@@ -1054,8 +1105,7 @@
 		atomic_set(&smi_info->req_events, 0);
 }
 
-static struct ipmi_smi_handlers handlers =
-{
+static struct ipmi_smi_handlers handlers = {
 	.owner                  = THIS_MODULE,
 	.start_processing       = smi_start_processing,
 	.sender			= sender,
@@ -1065,8 +1115,10 @@
 	.poll			= poll,
 };
 
-/* There can be 4 IO ports passed in (with or without IRQs), 4 addresses,
-   a default IO port, and 1 ACPI/SPMI address.  That sets SI_MAX_DRIVERS */
+/*
+ * There can be 4 IO ports passed in (with or without IRQs), 4 addresses,
+ * a default IO port, and 1 ACPI/SPMI address.  That sets SI_MAX_DRIVERS.
+ */
 
 static LIST_HEAD(smi_infos);
 static DEFINE_MUTEX(smi_infos_lock);
@@ -1257,10 +1309,9 @@
 	int          idx;
 
 	if (addr) {
-	  	for (idx = 0; idx < info->io_size; idx++) {
+		for (idx = 0; idx < info->io_size; idx++)
 			release_region(addr + idx * info->io.regspacing,
 				       info->io.regsize);
-		}
 	}
 }
 
@@ -1274,8 +1325,10 @@
 
 	info->io_cleanup = port_cleanup;
 
-	/* Figure out the actual inb/inw/inl/etc routine to use based
-	   upon the register size. */
+	/*
+	 * Figure out the actual inb/inw/inl/etc routine to use based
+	 * upon the register size.
+	 */
 	switch (info->io.regsize) {
 	case 1:
 		info->io.inputb = port_inb;
@@ -1290,17 +1343,18 @@
 		info->io.outputb = port_outl;
 		break;
 	default:
-		printk("ipmi_si: Invalid register size: %d\n",
+		printk(KERN_WARNING "ipmi_si: Invalid register size: %d\n",
 		       info->io.regsize);
 		return -EINVAL;
 	}
 
-	/* Some BIOSes reserve disjoint I/O regions in their ACPI
+	/*
+	 * Some BIOSes reserve disjoint I/O regions in their ACPI
 	 * tables.  This causes problems when trying to register the
 	 * entire I/O region.  Therefore we must register each I/O
 	 * port separately.
 	 */
-  	for (idx = 0; idx < info->io_size; idx++) {
+	for (idx = 0; idx < info->io_size; idx++) {
 		if (request_region(addr + idx * info->io.regspacing,
 				   info->io.regsize, DEVICE_NAME) == NULL) {
 			/* Undo allocations */
@@ -1388,8 +1442,10 @@
 
 	info->io_cleanup = mem_cleanup;
 
-	/* Figure out the actual readb/readw/readl/etc routine to use based
-	   upon the register size. */
+	/*
+	 * Figure out the actual readb/readw/readl/etc routine to use based
+	 * upon the register size.
+	 */
 	switch (info->io.regsize) {
 	case 1:
 		info->io.inputb = intf_mem_inb;
@@ -1410,16 +1466,18 @@
 		break;
 #endif
 	default:
-		printk("ipmi_si: Invalid register size: %d\n",
+		printk(KERN_WARNING "ipmi_si: Invalid register size: %d\n",
 		       info->io.regsize);
 		return -EINVAL;
 	}
 
-	/* Calculate the total amount of memory to claim.  This is an
+	/*
+	 * Calculate the total amount of memory to claim.  This is an
 	 * unusual looking calculation, but it avoids claiming any
 	 * more memory than it has to.  It will claim everything
 	 * between the first address to the end of the last full
-	 * register. */
+	 * register.
+	 */
 	mapsize = ((info->io_size * info->io.regspacing)
 		   - (info->io.regspacing - info->io.regsize));
 
@@ -1749,9 +1807,11 @@
 
 #include <linux/acpi.h>
 
-/* Once we get an ACPI failure, we don't try any more, because we go
-   through the tables sequentially.  Once we don't find a table, there
-   are no more. */
+/*
+ * Once we get an ACPI failure, we don't try any more, because we go
+ * through the tables sequentially.  Once we don't find a table, there
+ * are no more.
+ */
 static int acpi_failure;
 
 /* For GPE-type interrupts. */
@@ -1765,9 +1825,7 @@
 
 	spin_lock_irqsave(&(smi_info->si_lock), flags);
 
-	spin_lock(&smi_info->count_lock);
-	smi_info->interrupts++;
-	spin_unlock(&smi_info->count_lock);
+	smi_inc_stat(smi_info, interrupts);
 
 #ifdef DEBUG_TIMING
 	do_gettimeofday(&t);
@@ -1816,7 +1874,8 @@
 
 /*
  * Defined at
- * http://h21007.www2.hp.com/dspp/files/unprotected/devresource/Docs/TechPapers/IA64/hpspmi.pdf
+ * http://h21007.www2.hp.com/dspp/files/unprotected/devresource/
+ * Docs/TechPapers/IA64/hpspmi.pdf
  */
 struct SPMITable {
 	s8	Signature[4];
@@ -1838,14 +1897,18 @@
 	 */
 	u8	InterruptType;
 
-	/* If bit 0 of InterruptType is set, then this is the SCI
-           interrupt in the GPEx_STS register. */
+	/*
+	 * If bit 0 of InterruptType is set, then this is the SCI
+	 * interrupt in the GPEx_STS register.
+	 */
 	u8	GPE;
 
 	s16	Reserved;
 
-	/* If bit 1 of InterruptType is set, then this is the I/O
-           APIC/SAPIC interrupt. */
+	/*
+	 * If bit 1 of InterruptType is set, then this is the I/O
+	 * APIC/SAPIC interrupt.
+	 */
 	u32	GlobalSystemInterrupt;
 
 	/* The actual register address. */
@@ -1863,7 +1926,7 @@
 
 	if (spmi->IPMIlegacy != 1) {
 	    printk(KERN_INFO "IPMI: Bad SPMI legacy %d\n", spmi->IPMIlegacy);
-  	    return -ENODEV;
+	    return -ENODEV;
 	}
 
 	if (spmi->addr.space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
@@ -1880,8 +1943,7 @@
 	info->addr_source = "ACPI";
 
 	/* Figure out the interface type. */
-	switch (spmi->InterfaceType)
-	{
+	switch (spmi->InterfaceType) {
 	case 1:	/* KCS */
 		info->si_type = SI_KCS;
 		break;
@@ -1929,7 +1991,8 @@
 		info->io.addr_type = IPMI_IO_ADDR_SPACE;
 	} else {
 		kfree(info);
-		printk("ipmi_si: Unknown ACPI I/O Address type\n");
+		printk(KERN_WARNING
+		       "ipmi_si: Unknown ACPI I/O Address type\n");
 		return -EIO;
 	}
 	info->io.addr_data = spmi->addr.address;
@@ -1963,8 +2026,7 @@
 #endif
 
 #ifdef CONFIG_DMI
-struct dmi_ipmi_data
-{
+struct dmi_ipmi_data {
 	u8   		type;
 	u8   		addr_space;
 	unsigned long	base_addr;
@@ -1989,11 +2051,10 @@
 			/* I/O */
 			base_addr &= 0xFFFE;
 			dmi->addr_space = IPMI_IO_ADDR_SPACE;
-		}
-		else {
+		} else
 			/* Memory */
 			dmi->addr_space = IPMI_MEM_ADDR_SPACE;
-		}
+
 		/* If bit 4 of byte 0x10 is set, then the lsb for the address
 		   is odd. */
 		dmi->base_addr = base_addr | ((data[0x10] & 0x10) >> 4);
@@ -2002,7 +2063,7 @@
 
 		/* The top two bits of byte 0x10 hold the register spacing. */
 		reg_spacing = (data[0x10] & 0xC0) >> 6;
-		switch(reg_spacing){
+		switch (reg_spacing) {
 		case 0x00: /* Byte boundaries */
 		    dmi->offset = 1;
 		    break;
@@ -2018,12 +2079,14 @@
 		}
 	} else {
 		/* Old DMI spec. */
-		/* Note that technically, the lower bit of the base
+		/*
+		 * Note that technically, the lower bit of the base
 		 * address should be 1 if the address is I/O and 0 if
 		 * the address is in memory.  So many systems get that
 		 * wrong (and all that I have seen are I/O) so we just
 		 * ignore that bit and assume I/O.  Systems that use
-		 * memory should use the newer spec, anyway. */
+		 * memory should use the newer spec, anyway.
+		 */
 		dmi->base_addr = base_addr & 0xfffe;
 		dmi->addr_space = IPMI_IO_ADDR_SPACE;
 		dmi->offset = 1;
@@ -2230,13 +2293,13 @@
 MODULE_DEVICE_TABLE(pci, ipmi_pci_devices);
 
 static struct pci_driver ipmi_pci_driver = {
-        .name =         DEVICE_NAME,
-        .id_table =     ipmi_pci_devices,
-        .probe =        ipmi_pci_probe,
-        .remove =       __devexit_p(ipmi_pci_remove),
+	.name =         DEVICE_NAME,
+	.id_table =     ipmi_pci_devices,
+	.probe =        ipmi_pci_probe,
+	.remove =       __devexit_p(ipmi_pci_remove),
 #ifdef CONFIG_PM
-        .suspend =      ipmi_pci_suspend,
-        .resume =       ipmi_pci_resume,
+	.suspend =      ipmi_pci_suspend,
+	.resume =       ipmi_pci_resume,
 #endif
 };
 #endif /* CONFIG_PCI */
@@ -2306,7 +2369,7 @@
 		info->io.addr_data, info->io.regsize, info->io.regspacing,
 		info->irq);
 
-	dev->dev.driver_data = (void*) info;
+	dev->dev.driver_data = (void *) info;
 
 	return try_smi_init(info);
 }
@@ -2319,14 +2382,16 @@
 
 static struct of_device_id ipmi_match[] =
 {
-	{ .type = "ipmi", .compatible = "ipmi-kcs",  .data = (void *)(unsigned long) SI_KCS },
-	{ .type = "ipmi", .compatible = "ipmi-smic", .data = (void *)(unsigned long) SI_SMIC },
-	{ .type = "ipmi", .compatible = "ipmi-bt",   .data = (void *)(unsigned long) SI_BT },
+	{ .type = "ipmi", .compatible = "ipmi-kcs",
+	  .data = (void *)(unsigned long) SI_KCS },
+	{ .type = "ipmi", .compatible = "ipmi-smic",
+	  .data = (void *)(unsigned long) SI_SMIC },
+	{ .type = "ipmi", .compatible = "ipmi-bt",
+	  .data = (void *)(unsigned long) SI_BT },
 	{},
 };
 
-static struct of_platform_driver ipmi_of_platform_driver =
-{
+static struct of_platform_driver ipmi_of_platform_driver = {
 	.name		= "ipmi",
 	.match_table	= ipmi_match,
 	.probe		= ipmi_of_probe,
@@ -2347,32 +2412,32 @@
 	if (!resp)
 		return -ENOMEM;
 
-	/* Do a Get Device ID command, since it comes back with some
-	   useful info. */
+	/*
+	 * Do a Get Device ID command, since it comes back with some
+	 * useful info.
+	 */
 	msg[0] = IPMI_NETFN_APP_REQUEST << 2;
 	msg[1] = IPMI_GET_DEVICE_ID_CMD;
 	smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
 
 	smi_result = smi_info->handlers->event(smi_info->si_sm, 0);
-	for (;;)
-	{
+	for (;;) {
 		if (smi_result == SI_SM_CALL_WITH_DELAY ||
 		    smi_result == SI_SM_CALL_WITH_TICK_DELAY) {
 			schedule_timeout_uninterruptible(1);
 			smi_result = smi_info->handlers->event(
 				smi_info->si_sm, 100);
-		}
-		else if (smi_result == SI_SM_CALL_WITHOUT_DELAY)
-		{
+		} else if (smi_result == SI_SM_CALL_WITHOUT_DELAY) {
 			smi_result = smi_info->handlers->event(
 				smi_info->si_sm, 0);
-		}
-		else
+		} else
 			break;
 	}
 	if (smi_result == SI_SM_HOSED) {
-		/* We couldn't get the state machine to run, so whatever's at
-		   the port is probably not an IPMI SMI interface. */
+		/*
+		 * We couldn't get the state machine to run, so whatever's at
+		 * the port is probably not an IPMI SMI interface.
+		 */
 		rv = -ENODEV;
 		goto out;
 	}
@@ -2405,30 +2470,28 @@
 
 	out += sprintf(out, "interrupts_enabled:    %d\n",
 		       smi->irq && !smi->interrupt_disabled);
-	out += sprintf(out, "short_timeouts:        %ld\n",
-		       smi->short_timeouts);
-	out += sprintf(out, "long_timeouts:         %ld\n",
-		       smi->long_timeouts);
-	out += sprintf(out, "timeout_restarts:      %ld\n",
-		       smi->timeout_restarts);
-	out += sprintf(out, "idles:                 %ld\n",
-		       smi->idles);
-	out += sprintf(out, "interrupts:            %ld\n",
-		       smi->interrupts);
-	out += sprintf(out, "attentions:            %ld\n",
-		       smi->attentions);
-	out += sprintf(out, "flag_fetches:          %ld\n",
-		       smi->flag_fetches);
-	out += sprintf(out, "hosed_count:           %ld\n",
-		       smi->hosed_count);
-	out += sprintf(out, "complete_transactions: %ld\n",
-		       smi->complete_transactions);
-	out += sprintf(out, "events:                %ld\n",
-		       smi->events);
-	out += sprintf(out, "watchdog_pretimeouts:  %ld\n",
-		       smi->watchdog_pretimeouts);
-	out += sprintf(out, "incoming_messages:     %ld\n",
-		       smi->incoming_messages);
+	out += sprintf(out, "short_timeouts:        %u\n",
+		       smi_get_stat(smi, short_timeouts));
+	out += sprintf(out, "long_timeouts:         %u\n",
+		       smi_get_stat(smi, long_timeouts));
+	out += sprintf(out, "idles:                 %u\n",
+		       smi_get_stat(smi, idles));
+	out += sprintf(out, "interrupts:            %u\n",
+		       smi_get_stat(smi, interrupts));
+	out += sprintf(out, "attentions:            %u\n",
+		       smi_get_stat(smi, attentions));
+	out += sprintf(out, "flag_fetches:          %u\n",
+		       smi_get_stat(smi, flag_fetches));
+	out += sprintf(out, "hosed_count:           %u\n",
+		       smi_get_stat(smi, hosed_count));
+	out += sprintf(out, "complete_transactions: %u\n",
+		       smi_get_stat(smi, complete_transactions));
+	out += sprintf(out, "events:                %u\n",
+		       smi_get_stat(smi, events));
+	out += sprintf(out, "watchdog_pretimeouts:  %u\n",
+		       smi_get_stat(smi, watchdog_pretimeouts));
+	out += sprintf(out, "incoming_messages:     %u\n",
+		       smi_get_stat(smi, incoming_messages));
 
 	return out - page;
 }
@@ -2460,7 +2523,7 @@
 static int oem_data_avail_to_receive_msg_avail(struct smi_info *smi_info)
 {
 	smi_info->msg_flags = ((smi_info->msg_flags & ~OEM_DATA_AVAIL) |
-			      	RECEIVE_MSG_AVAIL);
+			       RECEIVE_MSG_AVAIL);
 	return 1;
 }
 
@@ -2502,10 +2565,9 @@
 		    id->ipmi_version   == DELL_POWEREDGE_8G_BMC_IPMI_VERSION) {
 			smi_info->oem_data_avail_handler =
 				oem_data_avail_to_receive_msg_avail;
-		}
-		else if (ipmi_version_major(id) < 1 ||
-			 (ipmi_version_major(id) == 1 &&
-			  ipmi_version_minor(id) < 5)) {
+		} else if (ipmi_version_major(id) < 1 ||
+			   (ipmi_version_major(id) == 1 &&
+			    ipmi_version_minor(id) < 5)) {
 			smi_info->oem_data_avail_handler =
 				oem_data_avail_to_receive_msg_avail;
 		}
@@ -2597,8 +2659,10 @@
 static inline void wait_for_timer_and_thread(struct smi_info *smi_info)
 {
 	if (smi_info->intf) {
-		/* The timer and thread are only running if the
-		   interface has been started up and registered. */
+		/*
+		 * The timer and thread are only running if the
+		 * interface has been started up and registered.
+		 */
 		if (smi_info->thread != NULL)
 			kthread_stop(smi_info->thread);
 		del_timer_sync(&smi_info->si_timer);
@@ -2676,6 +2740,7 @@
 static int try_smi_init(struct smi_info *new_smi)
 {
 	int rv;
+	int i;
 
 	if (new_smi->addr_source) {
 		printk(KERN_INFO "ipmi_si: Trying %s-specified %s state"
@@ -2722,7 +2787,7 @@
 	/* Allocate the state machine's data and initialize it. */
 	new_smi->si_sm = kmalloc(new_smi->handlers->size(), GFP_KERNEL);
 	if (!new_smi->si_sm) {
-		printk(" Could not allocate state machine memory\n");
+		printk(KERN_ERR "Could not allocate state machine memory\n");
 		rv = -ENOMEM;
 		goto out_err;
 	}
@@ -2732,13 +2797,12 @@
 	/* Now that we know the I/O size, we can set up the I/O. */
 	rv = new_smi->io_setup(new_smi);
 	if (rv) {
-		printk(" Could not set up I/O space\n");
+		printk(KERN_ERR "Could not set up I/O space\n");
 		goto out_err;
 	}
 
 	spin_lock_init(&(new_smi->si_lock));
 	spin_lock_init(&(new_smi->msg_lock));
-	spin_lock_init(&(new_smi->count_lock));
 
 	/* Do low-level detection first. */
 	if (new_smi->handlers->detect(new_smi->si_sm)) {
@@ -2749,8 +2813,10 @@
 		goto out_err;
 	}
 
-	/* Attempt a get device id command.  If it fails, we probably
-           don't have a BMC here. */
+	/*
+	 * Attempt a get device id command.  If it fails, we probably
+	 * don't have a BMC here.
+	 */
 	rv = try_get_dev_id(new_smi);
 	if (rv) {
 		if (new_smi->addr_source)
@@ -2767,22 +2833,28 @@
 	new_smi->curr_msg = NULL;
 	atomic_set(&new_smi->req_events, 0);
 	new_smi->run_to_completion = 0;
+	for (i = 0; i < SI_NUM_STATS; i++)
+		atomic_set(&new_smi->stats[i], 0);
 
 	new_smi->interrupt_disabled = 0;
 	atomic_set(&new_smi->stop_operation, 0);
 	new_smi->intf_num = smi_num;
 	smi_num++;
 
-	/* Start clearing the flags before we enable interrupts or the
-	   timer to avoid racing with the timer. */
+	/*
+	 * Start clearing the flags before we enable interrupts or the
+	 * timer to avoid racing with the timer.
+	 */
 	start_clear_flags(new_smi);
 	/* IRQ is defined to be set when non-zero. */
 	if (new_smi->irq)
 		new_smi->si_state = SI_CLEARING_FLAGS_THEN_SET_IRQ;
 
 	if (!new_smi->dev) {
-		/* If we don't already have a device from something
-		 * else (like PCI), then register a new one. */
+		/*
+		 * If we don't already have a device from something
+		 * else (like PCI), then register a new one.
+		 */
 		new_smi->pdev = platform_device_alloc("ipmi_si",
 						      new_smi->intf_num);
 		if (rv) {
@@ -2820,7 +2892,7 @@
 	}
 
 	rv = ipmi_smi_add_proc_entry(new_smi->intf, "type",
-				     type_file_read_proc, NULL,
+				     type_file_read_proc,
 				     new_smi, THIS_MODULE);
 	if (rv) {
 		printk(KERN_ERR
@@ -2830,7 +2902,7 @@
 	}
 
 	rv = ipmi_smi_add_proc_entry(new_smi->intf, "si_stats",
-				     stat_file_read_proc, NULL,
+				     stat_file_read_proc,
 				     new_smi, THIS_MODULE);
 	if (rv) {
 		printk(KERN_ERR
@@ -2840,7 +2912,7 @@
 	}
 
 	rv = ipmi_smi_add_proc_entry(new_smi->intf, "params",
-				     param_read_proc, NULL,
+				     param_read_proc,
 				     new_smi, THIS_MODULE);
 	if (rv) {
 		printk(KERN_ERR
@@ -2853,7 +2925,8 @@
 
 	mutex_unlock(&smi_infos_lock);
 
-	printk(KERN_INFO "IPMI %s interface initialized\n",si_to_str[new_smi->si_type]);
+	printk(KERN_INFO "IPMI %s interface initialized\n",
+	       si_to_str[new_smi->si_type]);
 
 	return 0;
 
@@ -2868,9 +2941,11 @@
 	if (new_smi->irq_cleanup)
 		new_smi->irq_cleanup(new_smi);
 
-	/* Wait until we know that we are out of any interrupt
-	   handlers might have been running before we freed the
-	   interrupt. */
+	/*
+	 * Wait until we know that we are out of any interrupt
+	 * handlers might have been running before we freed the
+	 * interrupt.
+	 */
 	synchronize_sched();
 
 	if (new_smi->si_sm) {
@@ -2942,11 +3017,10 @@
 
 #ifdef CONFIG_PCI
 	rv = pci_register_driver(&ipmi_pci_driver);
-	if (rv){
+	if (rv)
 		printk(KERN_ERR
 		       "init_ipmi_si: Unable to register PCI driver: %d\n",
 		       rv);
-	}
 #endif
 
 #ifdef CONFIG_PPC_OF
@@ -2975,7 +3049,8 @@
 		of_unregister_platform_driver(&ipmi_of_platform_driver);
 #endif
 		driver_unregister(&ipmi_driver);
-		printk("ipmi_si: Unable to find any System Interface(s)\n");
+		printk(KERN_WARNING
+		       "ipmi_si: Unable to find any System Interface(s)\n");
 		return -ENODEV;
 	} else {
 		mutex_unlock(&smi_infos_lock);
@@ -2997,13 +3072,17 @@
 	/* Tell the driver that we are shutting down. */
 	atomic_inc(&to_clean->stop_operation);
 
-	/* Make sure the timer and thread are stopped and will not run
-	   again. */
+	/*
+	 * Make sure the timer and thread are stopped and will not run
+	 * again.
+	 */
 	wait_for_timer_and_thread(to_clean);
 
-	/* Timeouts are stopped, now make sure the interrupts are off
-	   for the device.  A little tricky with locks to make sure
-	   there are no races. */
+	/*
+	 * Timeouts are stopped, now make sure the interrupts are off
+	 * for the device.  A little tricky with locks to make sure
+	 * there are no races.
+	 */
 	spin_lock_irqsave(&to_clean->si_lock, flags);
 	while (to_clean->curr_msg || (to_clean->si_state != SI_NORMAL)) {
 		spin_unlock_irqrestore(&to_clean->si_lock, flags);
@@ -3074,4 +3153,5 @@
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>");
-MODULE_DESCRIPTION("Interface to the IPMI driver for the KCS, SMIC, and BT system interfaces.");
+MODULE_DESCRIPTION("Interface to the IPMI driver for the KCS, SMIC, and BT"
+		   " system interfaces.");
diff --git a/drivers/char/ipmi/ipmi_si_sm.h b/drivers/char/ipmi/ipmi_si_sm.h
index 4b731b2..df89f73 100644
--- a/drivers/char/ipmi/ipmi_si_sm.h
+++ b/drivers/char/ipmi/ipmi_si_sm.h
@@ -34,22 +34,27 @@
  *  675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
-/* This is defined by the state machines themselves, it is an opaque
-   data type for them to use. */
+/*
+ * This is defined by the state machines themselves, it is an opaque
+ * data type for them to use.
+ */
 struct si_sm_data;
 
-/* The structure for doing I/O in the state machine.  The state
-   machine doesn't have the actual I/O routines, they are done through
-   this interface. */
-struct si_sm_io
-{
+/*
+ * The structure for doing I/O in the state machine.  The state
+ * machine doesn't have the actual I/O routines, they are done through
+ * this interface.
+ */
+struct si_sm_io {
 	unsigned char (*inputb)(struct si_sm_io *io, unsigned int offset);
 	void (*outputb)(struct si_sm_io *io,
 			unsigned int  offset,
 			unsigned char b);
 
-	/* Generic info used by the actual handling routines, the
-           state machine shouldn't touch these. */
+	/*
+	 * Generic info used by the actual handling routines, the
+	 * state machine shouldn't touch these.
+	 */
 	void __iomem *addr;
 	int  regspacing;
 	int  regsize;
@@ -59,53 +64,67 @@
 };
 
 /* Results of SMI events. */
-enum si_sm_result
-{
+enum si_sm_result {
 	SI_SM_CALL_WITHOUT_DELAY, /* Call the driver again immediately */
 	SI_SM_CALL_WITH_DELAY,	/* Delay some before calling again. */
-	SI_SM_CALL_WITH_TICK_DELAY,	/* Delay at least 1 tick before calling again. */
+	SI_SM_CALL_WITH_TICK_DELAY,/* Delay >=1 tick before calling again. */
 	SI_SM_TRANSACTION_COMPLETE, /* A transaction is finished. */
 	SI_SM_IDLE,		/* The SM is in idle state. */
 	SI_SM_HOSED,		/* The hardware violated the state machine. */
-	SI_SM_ATTN		/* The hardware is asserting attn and the
-				   state machine is idle. */
+
+	/*
+	 * The hardware is asserting attn and the state machine is
+	 * idle.
+	 */
+	SI_SM_ATTN
 };
 
 /* Handlers for the SMI state machine. */
-struct si_sm_handlers
-{
-	/* Put the version number of the state machine here so the
-           upper layer can print it. */
+struct si_sm_handlers {
+	/*
+	 * Put the version number of the state machine here so the
+	 * upper layer can print it.
+	 */
 	char *version;
 
-	/* Initialize the data and return the amount of I/O space to
-           reserve for the space. */
+	/*
+	 * Initialize the data and return the amount of I/O space to
+	 * reserve for the space.
+	 */
 	unsigned int (*init_data)(struct si_sm_data *smi,
 				  struct si_sm_io   *io);
 
-	/* Start a new transaction in the state machine.  This will
-	   return -2 if the state machine is not idle, -1 if the size
-	   is invalid (to large or too small), or 0 if the transaction
-	   is successfully completed. */
+	/*
+	 * Start a new transaction in the state machine.  This will
+	 * return -2 if the state machine is not idle, -1 if the size
+	 * is invalid (to large or too small), or 0 if the transaction
+	 * is successfully completed.
+	 */
 	int (*start_transaction)(struct si_sm_data *smi,
 				 unsigned char *data, unsigned int size);
 
-	/* Return the results after the transaction.  This will return
-	   -1 if the buffer is too small, zero if no transaction is
-	   present, or the actual length of the result data. */
+	/*
+	 * Return the results after the transaction.  This will return
+	 * -1 if the buffer is too small, zero if no transaction is
+	 * present, or the actual length of the result data.
+	 */
 	int (*get_result)(struct si_sm_data *smi,
 			  unsigned char *data, unsigned int length);
 
-	/* Call this periodically (for a polled interface) or upon
-	   receiving an interrupt (for a interrupt-driven interface).
-	   If interrupt driven, you should probably poll this
-	   periodically when not in idle state.  This should be called
-	   with the time that passed since the last call, if it is
-	   significant.  Time is in microseconds. */
+	/*
+	 * Call this periodically (for a polled interface) or upon
+	 * receiving an interrupt (for a interrupt-driven interface).
+	 * If interrupt driven, you should probably poll this
+	 * periodically when not in idle state.  This should be called
+	 * with the time that passed since the last call, if it is
+	 * significant.  Time is in microseconds.
+	 */
 	enum si_sm_result (*event)(struct si_sm_data *smi, long time);
 
-	/* Attempt to detect an SMI.  Returns 0 on success or nonzero
-           on failure. */
+	/*
+	 * Attempt to detect an SMI.  Returns 0 on success or nonzero
+	 * on failure.
+	 */
 	int (*detect)(struct si_sm_data *smi);
 
 	/* The interface is shutting down, so clean it up. */
diff --git a/drivers/char/ipmi/ipmi_smic_sm.c b/drivers/char/ipmi/ipmi_smic_sm.c
index e64ea7d..faed929 100644
--- a/drivers/char/ipmi/ipmi_smic_sm.c
+++ b/drivers/char/ipmi/ipmi_smic_sm.c
@@ -85,6 +85,7 @@
 /* SMIC Flags Register Bits */
 #define SMIC_RX_DATA_READY	0x80
 #define SMIC_TX_DATA_READY	0x40
+
 /*
  * SMIC_SMI and SMIC_EVM_DATA_AVAIL are only used by
  * a few systems, and then only by Systems Management
@@ -104,23 +105,22 @@
 #define	EC_ILLEGAL_COMMAND	0x04
 #define	EC_BUFFER_FULL		0x05
 
-struct si_sm_data
-{
+struct si_sm_data {
 	enum smic_states state;
 	struct si_sm_io *io;
-        unsigned char	 write_data[MAX_SMIC_WRITE_SIZE];
-        int		 write_pos;
-        int		 write_count;
-        int		 orig_write_count;
-        unsigned char	 read_data[MAX_SMIC_READ_SIZE];
-        int		 read_pos;
-        int		 truncated;
-        unsigned int	 error_retries;
-        long		 smic_timeout;
+	unsigned char	 write_data[MAX_SMIC_WRITE_SIZE];
+	int		 write_pos;
+	int		 write_count;
+	int		 orig_write_count;
+	unsigned char	 read_data[MAX_SMIC_READ_SIZE];
+	int		 read_pos;
+	int		 truncated;
+	unsigned int	 error_retries;
+	long		 smic_timeout;
 };
 
-static unsigned int init_smic_data (struct si_sm_data *smic,
-				    struct si_sm_io *io)
+static unsigned int init_smic_data(struct si_sm_data *smic,
+				   struct si_sm_io *io)
 {
 	smic->state = SMIC_IDLE;
 	smic->io = io;
@@ -150,11 +150,10 @@
 		return IPMI_NOT_IN_MY_STATE_ERR;
 
 	if (smic_debug & SMIC_DEBUG_MSG) {
-		printk(KERN_INFO "start_smic_transaction -");
-		for (i = 0; i < size; i ++) {
-			printk (" %02x", (unsigned char) (data [i]));
-		}
-		printk ("\n");
+		printk(KERN_DEBUG "start_smic_transaction -");
+		for (i = 0; i < size; i++)
+			printk(" %02x", (unsigned char) data[i]);
+		printk("\n");
 	}
 	smic->error_retries = 0;
 	memcpy(smic->write_data, data, size);
@@ -173,11 +172,10 @@
 	int i;
 
 	if (smic_debug & SMIC_DEBUG_MSG) {
-		printk (KERN_INFO "smic_get result -");
-		for (i = 0; i < smic->read_pos; i ++) {
-			printk (" %02x", (smic->read_data [i]));
-		}
-		printk ("\n");
+		printk(KERN_DEBUG "smic_get result -");
+		for (i = 0; i < smic->read_pos; i++)
+			printk(" %02x", smic->read_data[i]);
+		printk("\n");
 	}
 	if (length < smic->read_pos) {
 		smic->read_pos = length;
@@ -223,8 +221,8 @@
 	smic->io->outputb(smic->io, 1, control);
 }
 
-static inline void write_si_sm_data (struct si_sm_data *smic,
-				   unsigned char   data)
+static inline void write_si_sm_data(struct si_sm_data *smic,
+				    unsigned char   data)
 {
 	smic->io->outputb(smic->io, 0, data);
 }
@@ -233,10 +231,9 @@
 {
 	(smic->error_retries)++;
 	if (smic->error_retries > SMIC_MAX_ERROR_RETRIES) {
-		if (smic_debug & SMIC_DEBUG_ENABLE) {
+		if (smic_debug & SMIC_DEBUG_ENABLE)
 			printk(KERN_WARNING
 			       "ipmi_smic_drv: smic hosed: %s\n", reason);
-		}
 		smic->state = SMIC_HOSED;
 	} else {
 		smic->write_count = smic->orig_write_count;
@@ -254,14 +251,14 @@
 	(smic->write_count)--;
 }
 
-static inline void read_next_byte (struct si_sm_data *smic)
+static inline void read_next_byte(struct si_sm_data *smic)
 {
 	if (smic->read_pos >= MAX_SMIC_READ_SIZE) {
-		read_smic_data (smic);
+		read_smic_data(smic);
 		smic->truncated = 1;
 	} else {
 		smic->read_data[smic->read_pos] = read_smic_data(smic);
-		(smic->read_pos)++;
+		smic->read_pos++;
 	}
 }
 
@@ -336,7 +333,7 @@
 	SMIC_SC_SMS_RD_END	0xC6
 */
 
-static enum si_sm_result smic_event (struct si_sm_data *smic, long time)
+static enum si_sm_result smic_event(struct si_sm_data *smic, long time)
 {
 	unsigned char status;
 	unsigned char flags;
@@ -347,13 +344,15 @@
 		return SI_SM_HOSED;
 	}
 	if (smic->state != SMIC_IDLE) {
-		if (smic_debug & SMIC_DEBUG_STATES) {
-			printk(KERN_INFO
+		if (smic_debug & SMIC_DEBUG_STATES)
+			printk(KERN_DEBUG
 			       "smic_event - smic->smic_timeout = %ld,"
 			       " time = %ld\n",
 			       smic->smic_timeout, time);
-		}
-/* FIXME: smic_event is sometimes called with time > SMIC_RETRY_TIMEOUT */
+		/*
+		 * FIXME: smic_event is sometimes called with time >
+		 * SMIC_RETRY_TIMEOUT
+		 */
 		if (time < SMIC_RETRY_TIMEOUT) {
 			smic->smic_timeout -= time;
 			if (smic->smic_timeout < 0) {
@@ -366,9 +365,9 @@
 	if (flags & SMIC_FLAG_BSY)
 		return SI_SM_CALL_WITH_DELAY;
 
-	status = read_smic_status (smic);
+	status = read_smic_status(smic);
 	if (smic_debug & SMIC_DEBUG_STATES)
-		printk(KERN_INFO
+		printk(KERN_DEBUG
 		       "smic_event - state = %d, flags = 0x%02x,"
 		       " status = 0x%02x\n",
 		       smic->state, flags, status);
@@ -377,9 +376,7 @@
 	case SMIC_IDLE:
 		/* in IDLE we check for available messages */
 		if (flags & SMIC_SMS_DATA_AVAIL)
-		{
 			return SI_SM_ATTN;
-		}
 		return SI_SM_IDLE;
 
 	case SMIC_START_OP:
@@ -391,7 +388,7 @@
 
 	case SMIC_OP_OK:
 		if (status != SMIC_SC_SMS_READY) {
-				/* this should not happen */
+			/* this should not happen */
 			start_error_recovery(smic,
 					     "state = SMIC_OP_OK,"
 					     " status != SMIC_SC_SMS_READY");
@@ -411,8 +408,10 @@
 					     "status != SMIC_SC_SMS_WR_START");
 			return SI_SM_CALL_WITH_DELAY;
 		}
-		/* we must not issue WR_(NEXT|END) unless
-                   TX_DATA_READY is set */
+		/*
+		 * we must not issue WR_(NEXT|END) unless
+		 * TX_DATA_READY is set
+		 * */
 		if (flags & SMIC_TX_DATA_READY) {
 			if (smic->write_count == 1) {
 				/* last byte */
@@ -424,10 +423,8 @@
 			}
 			write_next_byte(smic);
 			write_smic_flags(smic, flags | SMIC_FLAG_BSY);
-		}
-		else {
+		} else
 			return SI_SM_CALL_WITH_DELAY;
-		}
 		break;
 
 	case SMIC_WRITE_NEXT:
@@ -442,52 +439,48 @@
 			if (smic->write_count == 1) {
 				write_smic_control(smic, SMIC_CC_SMS_WR_END);
 				smic->state = SMIC_WRITE_END;
-			}
-			else {
+			} else {
 				write_smic_control(smic, SMIC_CC_SMS_WR_NEXT);
 				smic->state = SMIC_WRITE_NEXT;
 			}
 			write_next_byte(smic);
 			write_smic_flags(smic, flags | SMIC_FLAG_BSY);
-		}
-		else {
+		} else
 			return SI_SM_CALL_WITH_DELAY;
-		}
 		break;
 
 	case SMIC_WRITE_END:
 		if (status != SMIC_SC_SMS_WR_END) {
-			start_error_recovery (smic,
-					      "state = SMIC_WRITE_END, "
-					      "status != SMIC_SC_SMS_WR_END");
+			start_error_recovery(smic,
+					     "state = SMIC_WRITE_END, "
+					     "status != SMIC_SC_SMS_WR_END");
 			return SI_SM_CALL_WITH_DELAY;
 		}
 		/* data register holds an error code */
 		data = read_smic_data(smic);
 		if (data != 0) {
-			if (smic_debug & SMIC_DEBUG_ENABLE) {
-				printk(KERN_INFO
+			if (smic_debug & SMIC_DEBUG_ENABLE)
+				printk(KERN_DEBUG
 				       "SMIC_WRITE_END: data = %02x\n", data);
-			}
 			start_error_recovery(smic,
 					     "state = SMIC_WRITE_END, "
 					     "data != SUCCESS");
 			return SI_SM_CALL_WITH_DELAY;
-		} else {
+		} else
 			smic->state = SMIC_WRITE2READ;
-		}
 		break;
 
 	case SMIC_WRITE2READ:
-		/* we must wait for RX_DATA_READY to be set before we
-                   can continue */
+		/*
+		 * we must wait for RX_DATA_READY to be set before we
+		 * can continue
+		 */
 		if (flags & SMIC_RX_DATA_READY) {
 			write_smic_control(smic, SMIC_CC_SMS_RD_START);
 			write_smic_flags(smic, flags | SMIC_FLAG_BSY);
 			smic->state = SMIC_READ_START;
-		} else {
+		} else
 			return SI_SM_CALL_WITH_DELAY;
-		}
 		break;
 
 	case SMIC_READ_START:
@@ -502,15 +495,16 @@
 			write_smic_control(smic, SMIC_CC_SMS_RD_NEXT);
 			write_smic_flags(smic, flags | SMIC_FLAG_BSY);
 			smic->state = SMIC_READ_NEXT;
-		} else {
+		} else
 			return SI_SM_CALL_WITH_DELAY;
-		}
 		break;
 
 	case SMIC_READ_NEXT:
 		switch (status) {
-		/* smic tells us that this is the last byte to be read
-                   --> clean up */
+		/*
+		 * smic tells us that this is the last byte to be read
+		 * --> clean up
+		 */
 		case SMIC_SC_SMS_RD_END:
 			read_next_byte(smic);
 			write_smic_control(smic, SMIC_CC_SMS_RD_END);
@@ -523,9 +517,8 @@
 				write_smic_control(smic, SMIC_CC_SMS_RD_NEXT);
 				write_smic_flags(smic, flags | SMIC_FLAG_BSY);
 				smic->state = SMIC_READ_NEXT;
-			} else {
+			} else
 				return SI_SM_CALL_WITH_DELAY;
-			}
 			break;
 		default:
 			start_error_recovery(
@@ -546,10 +539,9 @@
 		data = read_smic_data(smic);
 		/* data register holds an error code */
 		if (data != 0) {
-			if (smic_debug & SMIC_DEBUG_ENABLE) {
-				printk(KERN_INFO
+			if (smic_debug & SMIC_DEBUG_ENABLE)
+				printk(KERN_DEBUG
 				       "SMIC_READ_END: data = %02x\n", data);
-			}
 			start_error_recovery(smic,
 					     "state = SMIC_READ_END, "
 					     "data != SUCCESS");
@@ -565,7 +557,7 @@
 
 	default:
 		if (smic_debug & SMIC_DEBUG_ENABLE) {
-			printk(KERN_WARNING "smic->state = %d\n", smic->state);
+			printk(KERN_DEBUG "smic->state = %d\n", smic->state);
 			start_error_recovery(smic, "state = UNKNOWN");
 			return SI_SM_CALL_WITH_DELAY;
 		}
@@ -576,10 +568,12 @@
 
 static int smic_detect(struct si_sm_data *smic)
 {
-	/* It's impossible for the SMIC fnags register to be all 1's,
-	   (assuming a properly functioning, self-initialized BMC)
-	   but that's what you get from reading a bogus address, so we
-	   test that first. */
+	/*
+	 * It's impossible for the SMIC fnags register to be all 1's,
+	 * (assuming a properly functioning, self-initialized BMC)
+	 * but that's what you get from reading a bogus address, so we
+	 * test that first.
+	 */
 	if (read_smic_flags(smic) == 0xff)
 		return 1;
 
@@ -595,8 +589,7 @@
 	return sizeof(struct si_sm_data);
 }
 
-struct si_sm_handlers smic_smi_handlers =
-{
+struct si_sm_handlers smic_smi_handlers = {
 	.init_data         = init_smic_data,
 	.start_transaction = start_smic_transaction,
 	.get_result        = smic_get_result,
diff --git a/drivers/char/ipmi/ipmi_watchdog.c b/drivers/char/ipmi/ipmi_watchdog.c
index 8f45ca9..1b9a870 100644
--- a/drivers/char/ipmi/ipmi_watchdog.c
+++ b/drivers/char/ipmi/ipmi_watchdog.c
@@ -54,13 +54,15 @@
 #include <asm/atomic.h>
 
 #ifdef CONFIG_X86
-/* This is ugly, but I've determined that x86 is the only architecture
-   that can reasonably support the IPMI NMI watchdog timeout at this
-   time.  If another architecture adds this capability somehow, it
-   will have to be a somewhat different mechanism and I have no idea
-   how it will work.  So in the unlikely event that another
-   architecture supports this, we can figure out a good generic
-   mechanism for it at that time. */
+/*
+ * This is ugly, but I've determined that x86 is the only architecture
+ * that can reasonably support the IPMI NMI watchdog timeout at this
+ * time.  If another architecture adds this capability somehow, it
+ * will have to be a somewhat different mechanism and I have no idea
+ * how it will work.  So in the unlikely event that another
+ * architecture supports this, we can figure out a good generic
+ * mechanism for it at that time.
+ */
 #include <asm/kdebug.h>
 #define HAVE_DIE_NMI
 #endif
@@ -95,9 +97,8 @@
 /* Operations that can be performed on a pretimout. */
 #define WDOG_PREOP_NONE		0
 #define WDOG_PREOP_PANIC	1
-#define WDOG_PREOP_GIVE_DATA	2 /* Cause data to be available to
-                                     read.  Doesn't work in NMI
-                                     mode. */
+/* Cause data to be available to read.  Doesn't work in NMI mode. */
+#define WDOG_PREOP_GIVE_DATA	2
 
 /* Actions to perform on a full timeout. */
 #define WDOG_SET_TIMEOUT_ACT(byte, use) \
@@ -108,8 +109,10 @@
 #define WDOG_TIMEOUT_POWER_DOWN		2
 #define WDOG_TIMEOUT_POWER_CYCLE	3
 
-/* Byte 3 of the get command, byte 4 of the get response is the
-   pre-timeout in seconds. */
+/*
+ * Byte 3 of the get command, byte 4 of the get response is the
+ * pre-timeout in seconds.
+ */
 
 /* Bits for setting byte 4 of the set command, byte 5 of the get response. */
 #define WDOG_EXPIRE_CLEAR_BIOS_FRB2	(1 << 1)
@@ -118,11 +121,13 @@
 #define WDOG_EXPIRE_CLEAR_SMS_OS	(1 << 4)
 #define WDOG_EXPIRE_CLEAR_OEM		(1 << 5)
 
-/* Setting/getting the watchdog timer value.  This is for bytes 5 and
-   6 (the timeout time) of the set command, and bytes 6 and 7 (the
-   timeout time) and 8 and 9 (the current countdown value) of the
-   response.  The timeout value is given in seconds (in the command it
-   is 100ms intervals). */
+/*
+ * Setting/getting the watchdog timer value.  This is for bytes 5 and
+ * 6 (the timeout time) of the set command, and bytes 6 and 7 (the
+ * timeout time) and 8 and 9 (the current countdown value) of the
+ * response.  The timeout value is given in seconds (in the command it
+ * is 100ms intervals).
+ */
 #define WDOG_SET_TIMEOUT(byte1, byte2, val) \
 	(byte1) = (((val) * 10) & 0xff), (byte2) = (((val) * 10) >> 8)
 #define WDOG_GET_TIMEOUT(byte1, byte2) \
@@ -184,8 +189,10 @@
 static void ipmi_register_watchdog(int ipmi_intf);
 static void ipmi_unregister_watchdog(int ipmi_intf);
 
-/* If true, the driver will start running as soon as it is configured
-   and ready. */
+/*
+ * If true, the driver will start running as soon as it is configured
+ * and ready.
+ */
 static int start_now;
 
 static int set_param_int(const char *val, struct kernel_param *kp)
@@ -309,10 +316,12 @@
 /* Is someone using the watchdog?  Only one user is allowed. */
 static unsigned long ipmi_wdog_open;
 
-/* If set to 1, the heartbeat command will set the state to reset and
-   start the timer.  The timer doesn't normally run when the driver is
-   first opened until the heartbeat is set the first time, this
-   variable is used to accomplish this. */
+/*
+ * If set to 1, the heartbeat command will set the state to reset and
+ * start the timer.  The timer doesn't normally run when the driver is
+ * first opened until the heartbeat is set the first time, this
+ * variable is used to accomplish this.
+ */
 static int ipmi_start_timer_on_heartbeat;
 
 /* IPMI version of the BMC. */
@@ -329,10 +338,12 @@
 
 static int ipmi_heartbeat(void);
 
-/* We use a mutex to make sure that only one thing can send a set
-   timeout at one time, because we only have one copy of the data.
-   The mutex is claimed when the set_timeout is sent and freed
-   when both messages are free. */
+/*
+ * We use a mutex to make sure that only one thing can send a set
+ * timeout at one time, because we only have one copy of the data.
+ * The mutex is claimed when the set_timeout is sent and freed
+ * when both messages are free.
+ */
 static atomic_t set_timeout_tofree = ATOMIC_INIT(0);
 static DEFINE_MUTEX(set_timeout_lock);
 static DECLARE_COMPLETION(set_timeout_wait);
@@ -346,15 +357,13 @@
     if (atomic_dec_and_test(&set_timeout_tofree))
 	    complete(&set_timeout_wait);
 }
-static struct ipmi_smi_msg set_timeout_smi_msg =
-{
+static struct ipmi_smi_msg set_timeout_smi_msg = {
 	.done = set_timeout_free_smi
 };
-static struct ipmi_recv_msg set_timeout_recv_msg =
-{
+static struct ipmi_recv_msg set_timeout_recv_msg = {
 	.done = set_timeout_free_recv
 };
- 
+
 static int i_ipmi_set_timeout(struct ipmi_smi_msg  *smi_msg,
 			      struct ipmi_recv_msg *recv_msg,
 			      int                  *send_heartbeat_now)
@@ -373,13 +382,14 @@
 	WDOG_SET_TIMER_USE(data[0], WDOG_TIMER_USE_SMS_OS);
 
 	if ((ipmi_version_major > 1)
-	    || ((ipmi_version_major == 1) && (ipmi_version_minor >= 5)))
-	{
+	    || ((ipmi_version_major == 1) && (ipmi_version_minor >= 5))) {
 		/* This is an IPMI 1.5-only feature. */
 		data[0] |= WDOG_DONT_STOP_ON_SET;
 	} else if (ipmi_watchdog_state != WDOG_TIMEOUT_NONE) {
-		/* In ipmi 1.0, setting the timer stops the watchdog, we
-		   need to start it back up again. */
+		/*
+		 * In ipmi 1.0, setting the timer stops the watchdog, we
+		 * need to start it back up again.
+		 */
 		hbnow = 1;
 	}
 
@@ -465,12 +475,10 @@
 	atomic_dec(&panic_done_count);
 }
 
-static struct ipmi_smi_msg panic_halt_heartbeat_smi_msg =
-{
+static struct ipmi_smi_msg panic_halt_heartbeat_smi_msg = {
 	.done = panic_smi_free
 };
-static struct ipmi_recv_msg panic_halt_heartbeat_recv_msg =
-{
+static struct ipmi_recv_msg panic_halt_heartbeat_recv_msg = {
 	.done = panic_recv_free
 };
 
@@ -480,8 +488,10 @@
 	struct ipmi_system_interface_addr addr;
 	int rv;
 
-	/* Don't reset the timer if we have the timer turned off, that
-           re-enables the watchdog. */
+	/*
+	 * Don't reset the timer if we have the timer turned off, that
+	 * re-enables the watchdog.
+	 */
 	if (ipmi_watchdog_state == WDOG_TIMEOUT_NONE)
 		return;
 
@@ -505,19 +515,19 @@
 		atomic_add(2, &panic_done_count);
 }
 
-static struct ipmi_smi_msg panic_halt_smi_msg =
-{
+static struct ipmi_smi_msg panic_halt_smi_msg = {
 	.done = panic_smi_free
 };
-static struct ipmi_recv_msg panic_halt_recv_msg =
-{
+static struct ipmi_recv_msg panic_halt_recv_msg = {
 	.done = panic_recv_free
 };
 
-/* Special call, doesn't claim any locks.  This is only to be called
-   at panic or halt time, in run-to-completion mode, when the caller
-   is the only CPU and the only thing that will be going is these IPMI
-   calls. */
+/*
+ * Special call, doesn't claim any locks.  This is only to be called
+ * at panic or halt time, in run-to-completion mode, when the caller
+ * is the only CPU and the only thing that will be going is these IPMI
+ * calls.
+ */
 static void panic_halt_ipmi_set_timeout(void)
 {
 	int send_heartbeat_now;
@@ -540,10 +550,12 @@
 		ipmi_poll_interface(watchdog_user);
 }
 
-/* We use a semaphore to make sure that only one thing can send a
-   heartbeat at one time, because we only have one copy of the data.
-   The semaphore is claimed when the set_timeout is sent and freed
-   when both messages are free. */
+/*
+ * We use a mutex to make sure that only one thing can send a
+ * heartbeat at one time, because we only have one copy of the data.
+ * The semaphore is claimed when the set_timeout is sent and freed
+ * when both messages are free.
+ */
 static atomic_t heartbeat_tofree = ATOMIC_INIT(0);
 static DEFINE_MUTEX(heartbeat_lock);
 static DECLARE_COMPLETION(heartbeat_wait);
@@ -557,15 +569,13 @@
     if (atomic_dec_and_test(&heartbeat_tofree))
 	    complete(&heartbeat_wait);
 }
-static struct ipmi_smi_msg heartbeat_smi_msg =
-{
+static struct ipmi_smi_msg heartbeat_smi_msg = {
 	.done = heartbeat_free_smi
 };
-static struct ipmi_recv_msg heartbeat_recv_msg =
-{
+static struct ipmi_recv_msg heartbeat_recv_msg = {
 	.done = heartbeat_free_recv
 };
- 
+
 static int ipmi_heartbeat(void)
 {
 	struct kernel_ipmi_msg            msg;
@@ -580,10 +590,12 @@
 		ipmi_watchdog_state = action_val;
 		return ipmi_set_timeout(IPMI_SET_TIMEOUT_FORCE_HB);
 	} else if (pretimeout_since_last_heartbeat) {
-		/* A pretimeout occurred, make sure we set the timeout.
-		   We don't want to set the action, though, we want to
-		   leave that alone (thus it can't be combined with the
-		   above operation. */
+		/*
+		 * A pretimeout occurred, make sure we set the timeout.
+		 * We don't want to set the action, though, we want to
+		 * leave that alone (thus it can't be combined with the
+		 * above operation.
+		 */
 		return ipmi_set_timeout(IPMI_SET_TIMEOUT_HB_IF_NECESSARY);
 	}
 
@@ -591,8 +603,10 @@
 
 	atomic_set(&heartbeat_tofree, 2);
 
-	/* Don't reset the timer if we have the timer turned off, that
-           re-enables the watchdog. */
+	/*
+	 * Don't reset the timer if we have the timer turned off, that
+	 * re-enables the watchdog.
+	 */
 	if (ipmi_watchdog_state == WDOG_TIMEOUT_NONE) {
 		mutex_unlock(&heartbeat_lock);
 		return 0;
@@ -625,10 +639,12 @@
 	wait_for_completion(&heartbeat_wait);
 
 	if (heartbeat_recv_msg.msg.data[0] != 0) {
-	    /* Got an error in the heartbeat response.  It was already
-	       reported in ipmi_wdog_msg_handler, but we should return
-	       an error here. */
-	    rv = -EINVAL;
+		/*
+		 * Got an error in the heartbeat response.  It was already
+		 * reported in ipmi_wdog_msg_handler, but we should return
+		 * an error here.
+		 */
+		rv = -EINVAL;
 	}
 
 	mutex_unlock(&heartbeat_lock);
@@ -636,8 +652,7 @@
 	return rv;
 }
 
-static struct watchdog_info ident =
-{
+static struct watchdog_info ident = {
 	.options	= 0,	/* WDIOF_SETTIMEOUT, */
 	.firmware_version = 1,
 	.identity	= "IPMI"
@@ -650,7 +665,7 @@
 	int i;
 	int val;
 
-	switch(cmd) {
+	switch (cmd) {
 	case WDIOC_GETSUPPORT:
 		i = copy_to_user(argp, &ident, sizeof(ident));
 		return i ? -EFAULT : 0;
@@ -690,15 +705,13 @@
 		i = copy_from_user(&val, argp, sizeof(int));
 		if (i)
 			return -EFAULT;
-		if (val & WDIOS_DISABLECARD)
-		{
+		if (val & WDIOS_DISABLECARD) {
 			ipmi_watchdog_state = WDOG_TIMEOUT_NONE;
 			ipmi_set_timeout(IPMI_SET_TIMEOUT_NO_HB);
 			ipmi_start_timer_on_heartbeat = 0;
 		}
 
-		if (val & WDIOS_ENABLECARD)
-		{
+		if (val & WDIOS_ENABLECARD) {
 			ipmi_watchdog_state = action_val;
 			ipmi_set_timeout(IPMI_SET_TIMEOUT_FORCE_HB);
 		}
@@ -724,13 +737,13 @@
 	int rv;
 
 	if (len) {
-	    	if (!nowayout) {
-		    	size_t i;
+		if (!nowayout) {
+			size_t i;
 
 			/* In case it was set long ago */
 			expect_close = 0;
 
-    			for (i = 0; i != len; i++) {
+			for (i = 0; i != len; i++) {
 				char c;
 
 				if (get_user(c, buf + i))
@@ -758,15 +771,17 @@
 	if (count <= 0)
 		return 0;
 
-	/* Reading returns if the pretimeout has gone off, and it only does
-	   it once per pretimeout. */
+	/*
+	 * Reading returns if the pretimeout has gone off, and it only does
+	 * it once per pretimeout.
+	 */
 	spin_lock(&ipmi_read_lock);
 	if (!data_to_read) {
 		if (file->f_flags & O_NONBLOCK) {
 			rv = -EAGAIN;
 			goto out;
 		}
-		
+
 		init_waitqueue_entry(&wait, current);
 		add_wait_queue(&read_q, &wait);
 		while (!data_to_read) {
@@ -776,7 +791,7 @@
 			spin_lock(&ipmi_read_lock);
 		}
 		remove_wait_queue(&read_q, &wait);
-	    
+
 		if (signal_pending(current)) {
 			rv = -ERESTARTSYS;
 			goto out;
@@ -799,25 +814,27 @@
 
 static int ipmi_open(struct inode *ino, struct file *filep)
 {
-        switch (iminor(ino)) {
-        case WATCHDOG_MINOR:
+	switch (iminor(ino)) {
+	case WATCHDOG_MINOR:
 		if (test_and_set_bit(0, &ipmi_wdog_open))
-                        return -EBUSY;
+			return -EBUSY;
 
-		/* Don't start the timer now, let it start on the
-		   first heartbeat. */
+		/*
+		 * Don't start the timer now, let it start on the
+		 * first heartbeat.
+		 */
 		ipmi_start_timer_on_heartbeat = 1;
 		return nonseekable_open(ino, filep);
 
 	default:
 		return (-ENODEV);
-        }
+	}
 }
 
 static unsigned int ipmi_poll(struct file *file, poll_table *wait)
 {
 	unsigned int mask = 0;
-	
+
 	poll_wait(file, &read_q, wait);
 
 	spin_lock(&ipmi_read_lock);
@@ -851,7 +868,7 @@
 		clear_bit(0, &ipmi_wdog_open);
 	}
 
-	ipmi_fasync (-1, filep, 0);
+	ipmi_fasync(-1, filep, 0);
 	expect_close = 0;
 
 	return 0;
@@ -882,7 +899,7 @@
 		       msg->msg.data[0],
 		       msg->msg.cmd);
 	}
-	
+
 	ipmi_free_recv_msg(msg);
 }
 
@@ -902,14 +919,14 @@
 		}
 	}
 
-	/* On some machines, the heartbeat will give
-	   an error and not work unless we re-enable
-	   the timer.   So do so. */
+	/*
+	 * On some machines, the heartbeat will give an error and not
+	 * work unless we re-enable the timer.  So do so.
+	 */
 	pretimeout_since_last_heartbeat = 1;
 }
 
-static struct ipmi_user_hndl ipmi_hndlrs =
-{
+static struct ipmi_user_hndl ipmi_hndlrs = {
 	.ipmi_recv_hndl           = ipmi_wdog_msg_handler,
 	.ipmi_watchdog_pretimeout = ipmi_wdog_pretimeout_handler
 };
@@ -949,8 +966,10 @@
 		int old_timeout = timeout;
 		int old_preop_val = preop_val;
 
-		/* Set the pretimeout to go off in a second and give
-		   ourselves plenty of time to stop the timer. */
+		/*
+		 * Set the pretimeout to go off in a second and give
+		 * ourselves plenty of time to stop the timer.
+		 */
 		ipmi_watchdog_state = WDOG_TIMEOUT_RESET;
 		preop_val = WDOG_PREOP_NONE; /* Make sure nothing happens */
 		pretimeout = 99;
@@ -974,7 +993,7 @@
 			       " occur.  The NMI pretimeout will"
 			       " likely not work\n");
 		}
-	out_restore:
+ out_restore:
 		testing_nmi = 0;
 		preop_val = old_preop_val;
 		pretimeout = old_pretimeout;
@@ -1009,9 +1028,11 @@
 	/* Make sure no one can call us any more. */
 	misc_deregister(&ipmi_wdog_miscdev);
 
-	/* Wait to make sure the message makes it out.  The lower layer has
-	   pointers to our buffers, we want to make sure they are done before
-	   we release our memory. */
+	/*
+	 * Wait to make sure the message makes it out.  The lower layer has
+	 * pointers to our buffers, we want to make sure they are done before
+	 * we release our memory.
+	 */
 	while (atomic_read(&set_timeout_tofree))
 		schedule_timeout_uninterruptible(1);
 
@@ -1052,15 +1073,17 @@
 		return NOTIFY_STOP;
 	}
 
-        /* If we are not expecting a timeout, ignore it. */
+	/* If we are not expecting a timeout, ignore it. */
 	if (ipmi_watchdog_state == WDOG_TIMEOUT_NONE)
 		return NOTIFY_OK;
 
 	if (preaction_val != WDOG_PRETIMEOUT_NMI)
 		return NOTIFY_OK;
 
-	/* If no one else handled the NMI, we assume it was the IPMI
-           watchdog. */
+	/*
+	 * If no one else handled the NMI, we assume it was the IPMI
+	 * watchdog.
+	 */
 	if (preop_val == WDOG_PREOP_PANIC) {
 		/* On some machines, the heartbeat will give
 		   an error and not work unless we re-enable
@@ -1082,7 +1105,7 @@
 			       unsigned long         code,
 			       void                  *unused)
 {
-	static int reboot_event_handled = 0;
+	static int reboot_event_handled;
 
 	if ((watchdog_user) && (!reboot_event_handled)) {
 		/* Make sure we only do this once. */
@@ -1115,7 +1138,7 @@
 			      unsigned long         event,
 			      void                  *unused)
 {
-	static int panic_event_handled = 0;
+	static int panic_event_handled;
 
 	/* On a panic, if we have a panic timeout, make sure to extend
 	   the watchdog timer to a reasonable value to complete the
@@ -1125,7 +1148,7 @@
 	    ipmi_watchdog_state != WDOG_TIMEOUT_NONE) {
 		/* Make sure we do this only once. */
 		panic_event_handled = 1;
-	    
+
 		timeout = 255;
 		pretimeout = 0;
 		panic_halt_ipmi_set_timeout();
@@ -1151,8 +1174,7 @@
 	ipmi_unregister_watchdog(if_num);
 }
 
-static struct ipmi_smi_watcher smi_watcher =
-{
+static struct ipmi_smi_watcher smi_watcher = {
 	.owner    = THIS_MODULE,
 	.new_smi  = ipmi_new_smi,
 	.smi_gone = ipmi_smi_gone
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index e83623e..934ffaf 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -364,6 +364,7 @@
 	return 0;
 }
 
+#ifdef CONFIG_DEVKMEM
 static int mmap_kmem(struct file * file, struct vm_area_struct * vma)
 {
 	unsigned long pfn;
@@ -384,6 +385,7 @@
 	vma->vm_pgoff = pfn;
 	return mmap_mem(file, vma);
 }
+#endif
 
 #ifdef CONFIG_CRASH_DUMP
 /*
@@ -422,6 +424,7 @@
 extern long vread(char *buf, char *addr, unsigned long count);
 extern long vwrite(char *buf, char *addr, unsigned long count);
 
+#ifdef CONFIG_DEVKMEM
 /*
  * This function reads the *virtual* memory as seen by the kernel.
  */
@@ -626,6 +629,7 @@
  	*ppos = p;
  	return virtr + wrote;
 }
+#endif
 
 #ifdef CONFIG_DEVPORT
 static ssize_t read_port(struct file * file, char __user * buf,
@@ -803,6 +807,7 @@
 	.get_unmapped_area = get_unmapped_area_mem,
 };
 
+#ifdef CONFIG_DEVKMEM
 static const struct file_operations kmem_fops = {
 	.llseek		= memory_lseek,
 	.read		= read_kmem,
@@ -811,6 +816,7 @@
 	.open		= open_kmem,
 	.get_unmapped_area = get_unmapped_area_mem,
 };
+#endif
 
 static const struct file_operations null_fops = {
 	.llseek		= null_lseek,
@@ -889,11 +895,13 @@
 			filp->f_mapping->backing_dev_info =
 				&directly_mappable_cdev_bdi;
 			break;
+#ifdef CONFIG_DEVKMEM
 		case 2:
 			filp->f_op = &kmem_fops;
 			filp->f_mapping->backing_dev_info =
 				&directly_mappable_cdev_bdi;
 			break;
+#endif
 		case 3:
 			filp->f_op = &null_fops;
 			break;
@@ -942,7 +950,9 @@
 	const struct file_operations	*fops;
 } devlist[] = { /* list of minor devices */
 	{1, "mem",     S_IRUSR | S_IWUSR | S_IRGRP, &mem_fops},
+#ifdef CONFIG_DEVKMEM
 	{2, "kmem",    S_IRUSR | S_IWUSR | S_IRGRP, &kmem_fops},
+#endif
 	{3, "null",    S_IRUGO | S_IWUGO,           &null_fops},
 #ifdef CONFIG_DEVPORT
 	{4, "port",    S_IRUSR | S_IWUSR | S_IRGRP, &port_fops},
diff --git a/drivers/char/misc.c b/drivers/char/misc.c
index 4d058da..eaace0d 100644
--- a/drivers/char/misc.c
+++ b/drivers/char/misc.c
@@ -263,23 +263,26 @@
 
 static int __init misc_init(void)
 {
-#ifdef CONFIG_PROC_FS
-	struct proc_dir_entry *ent;
+	int err;
 
-	ent = create_proc_entry("misc", 0, NULL);
-	if (ent)
-		ent->proc_fops = &misc_proc_fops;
+#ifdef CONFIG_PROC_FS
+	proc_create("misc", 0, NULL, &misc_proc_fops);
 #endif
 	misc_class = class_create(THIS_MODULE, "misc");
+	err = PTR_ERR(misc_class);
 	if (IS_ERR(misc_class))
-		return PTR_ERR(misc_class);
+		goto fail_remove;
 
-	if (register_chrdev(MISC_MAJOR,"misc",&misc_fops)) {
-		printk("unable to get major %d for misc devices\n",
-		       MISC_MAJOR);
-		class_destroy(misc_class);
-		return -EIO;
-	}
+	err = -EIO;
+	if (register_chrdev(MISC_MAJOR,"misc",&misc_fops))
+		goto fail_printk;
 	return 0;
+
+fail_printk:
+	printk("unable to get major %d for misc devices\n", MISC_MAJOR);
+	class_destroy(misc_class);
+fail_remove:
+	remove_proc_entry("misc", NULL);
+	return err;
 }
 subsys_initcall(misc_init);
diff --git a/drivers/char/n_hdlc.c b/drivers/char/n_hdlc.c
index 82bcfb9..06803ed 100644
--- a/drivers/char/n_hdlc.c
+++ b/drivers/char/n_hdlc.c
@@ -501,7 +501,7 @@
 			__FILE__,__LINE__, count);
 		
 	/* This can happen if stuff comes in on the backup tty */
-	if (n_hdlc == 0 || tty != n_hdlc->tty)
+	if (!n_hdlc || tty != n_hdlc->tty)
 		return;
 		
 	/* verify line is using HDLC discipline */
diff --git a/drivers/char/pcmcia/ipwireless/hardware.c b/drivers/char/pcmcia/ipwireless/hardware.c
index 1f978ff..fa9d3c9 100644
--- a/drivers/char/pcmcia/ipwireless/hardware.c
+++ b/drivers/char/pcmcia/ipwireless/hardware.c
@@ -354,32 +354,6 @@
 	unsigned int channel_idx;
 };
 
-#ifdef IPWIRELESS_STATE_DEBUG
-int ipwireless_dump_hardware_state(char *p, size_t limit,
-				   struct ipw_hardware *hw)
-{
-	return snprintf(p, limit,
-			"debug: initializing=%d\n"
-			"debug: tx_ready=%d\n"
-			"debug: tx_queued=%d\n"
-			"debug: rx_ready=%d\n"
-			"debug: rx_bytes_queued=%d\n"
-			"debug: blocking_rx=%d\n"
-			"debug: removed=%d\n"
-			"debug: hardware.shutting_down=%d\n"
-			"debug: to_setup=%d\n",
-			hw->initializing,
-			hw->tx_ready,
-			hw->tx_queued,
-			hw->rx_ready,
-			hw->rx_bytes_queued,
-			hw->blocking_rx,
-			hw->removed,
-			hw->shutting_down,
-			hw->to_setup);
-}
-#endif
-
 static char *data_type(const unsigned char *buf, unsigned length)
 {
 	struct nl_packet_header *hdr = (struct nl_packet_header *) buf;
diff --git a/drivers/char/pcmcia/ipwireless/hardware.h b/drivers/char/pcmcia/ipwireless/hardware.h
index c83190f..19ce5eb 100644
--- a/drivers/char/pcmcia/ipwireless/hardware.h
+++ b/drivers/char/pcmcia/ipwireless/hardware.h
@@ -58,7 +58,5 @@
 				 void *reboot_cb_data);
 void ipwireless_init_hardware_v2_v3(struct ipw_hardware *hw);
 void ipwireless_sleep(unsigned int tenths);
-int ipwireless_dump_hardware_state(char *p, size_t limit,
-				   struct ipw_hardware *hw);
 
 #endif
diff --git a/drivers/char/pcmcia/ipwireless/network.c b/drivers/char/pcmcia/ipwireless/network.c
index d793e68..fe914d3 100644
--- a/drivers/char/pcmcia/ipwireless/network.c
+++ b/drivers/char/pcmcia/ipwireless/network.c
@@ -63,21 +63,6 @@
 	struct work_struct work_go_offline;
 };
 
-
-#ifdef IPWIRELESS_STATE_DEBUG
-int ipwireless_dump_network_state(char *p, size_t limit,
-				  struct ipw_network *network)
-{
-	return snprintf(p, limit,
-			"debug: ppp_blocked=%d\n"
-			"debug: outgoing_packets_queued=%d\n"
-			"debug: network.shutting_down=%d\n",
-			network->ppp_blocked,
-			network->outgoing_packets_queued,
-			network->shutting_down);
-}
-#endif
-
 static void notify_packet_sent(void *callback_data, unsigned int packet_length)
 {
 	struct ipw_network *network = callback_data;
diff --git a/drivers/char/pcmcia/ipwireless/network.h b/drivers/char/pcmcia/ipwireless/network.h
index b0e1e95..ccacd26 100644
--- a/drivers/char/pcmcia/ipwireless/network.h
+++ b/drivers/char/pcmcia/ipwireless/network.h
@@ -49,7 +49,4 @@
 int ipwireless_ppp_channel_index(struct ipw_network *net);
 int ipwireless_ppp_unit_number(struct ipw_network *net);
 
-int ipwireless_dump_network_state(char *p, size_t limit,
-				  struct ipw_network *net);
-
 #endif
diff --git a/drivers/char/random.c b/drivers/char/random.c
index f43c89f..0cf98bd 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -272,7 +272,7 @@
 
 static int trickle_thresh __read_mostly = INPUT_POOL_WORDS * 28;
 
-static DEFINE_PER_CPU(int, trickle_count) = 0;
+static DEFINE_PER_CPU(int, trickle_count);
 
 /*
  * A pool of size .poolwords is stirred with a primitive polynomial
@@ -370,17 +370,19 @@
  */
 static DECLARE_WAIT_QUEUE_HEAD(random_read_wait);
 static DECLARE_WAIT_QUEUE_HEAD(random_write_wait);
+static struct fasync_struct *fasync;
 
 #if 0
-static int debug = 0;
+static int debug;
 module_param(debug, bool, 0644);
-#define DEBUG_ENT(fmt, arg...) do { if (debug) \
-	printk(KERN_DEBUG "random %04d %04d %04d: " \
-	fmt,\
-	input_pool.entropy_count,\
-	blocking_pool.entropy_count,\
-	nonblocking_pool.entropy_count,\
-	## arg); } while (0)
+#define DEBUG_ENT(fmt, arg...) do { \
+	if (debug) \
+		printk(KERN_DEBUG "random %04d %04d %04d: " \
+		fmt,\
+		input_pool.entropy_count,\
+		blocking_pool.entropy_count,\
+		nonblocking_pool.entropy_count,\
+		## arg); } while (0)
 #else
 #define DEBUG_ENT(fmt, arg...) do {} while (0)
 #endif
@@ -394,7 +396,7 @@
 
 struct entropy_store;
 struct entropy_store {
-	/* mostly-read data: */
+	/* read-only data: */
 	struct poolinfo *poolinfo;
 	__u32 *pool;
 	const char *name;
@@ -402,7 +404,7 @@
 	struct entropy_store *pull;
 
 	/* read-write data: */
-	spinlock_t lock ____cacheline_aligned_in_smp;
+	spinlock_t lock;
 	unsigned add_ptr;
 	int entropy_count;
 	int input_rotate;
@@ -438,25 +440,26 @@
 };
 
 /*
- * This function adds a byte into the entropy "pool".  It does not
+ * This function adds bytes into the entropy "pool".  It does not
  * update the entropy estimate.  The caller should call
- * credit_entropy_store if this is appropriate.
+ * credit_entropy_bits if this is appropriate.
  *
  * The pool is stirred with a primitive polynomial of the appropriate
  * degree, and then twisted.  We twist by three bits at a time because
  * it's cheap to do so and helps slightly in the expected case where
  * the entropy is concentrated in the low-order bits.
  */
-static void __add_entropy_words(struct entropy_store *r, const __u32 *in,
-				int nwords, __u32 out[16])
+static void mix_pool_bytes_extract(struct entropy_store *r, const void *in,
+				   int nbytes, __u8 out[64])
 {
 	static __u32 const twist_table[8] = {
 		0x00000000, 0x3b6e20c8, 0x76dc4190, 0x4db26158,
 		0xedb88320, 0xd6d6a3e8, 0x9b64c2b0, 0xa00ae278 };
-	unsigned long i, add_ptr, tap1, tap2, tap3, tap4, tap5;
-	int new_rotate, input_rotate;
+	unsigned long i, j, tap1, tap2, tap3, tap4, tap5;
+	int input_rotate;
 	int wordmask = r->poolinfo->poolwords - 1;
-	__u32 w, next_w;
+	const char *bytes = in;
+	__u32 w;
 	unsigned long flags;
 
 	/* Taps are constant, so we can load them without holding r->lock.  */
@@ -465,78 +468,76 @@
 	tap3 = r->poolinfo->tap3;
 	tap4 = r->poolinfo->tap4;
 	tap5 = r->poolinfo->tap5;
-	next_w = *in++;
 
 	spin_lock_irqsave(&r->lock, flags);
-	prefetch_range(r->pool, wordmask);
 	input_rotate = r->input_rotate;
-	add_ptr = r->add_ptr;
+	i = r->add_ptr;
 
-	while (nwords--) {
-		w = rol32(next_w, input_rotate);
-		if (nwords > 0)
-			next_w = *in++;
-		i = add_ptr = (add_ptr - 1) & wordmask;
+	/* mix one byte at a time to simplify size handling and churn faster */
+	while (nbytes--) {
+		w = rol32(*bytes++, input_rotate & 31);
+		i = (i - 1) & wordmask;
+
+		/* XOR in the various taps */
+		w ^= r->pool[i];
+		w ^= r->pool[(i + tap1) & wordmask];
+		w ^= r->pool[(i + tap2) & wordmask];
+		w ^= r->pool[(i + tap3) & wordmask];
+		w ^= r->pool[(i + tap4) & wordmask];
+		w ^= r->pool[(i + tap5) & wordmask];
+
+		/* Mix the result back in with a twist */
+		r->pool[i] = (w >> 3) ^ twist_table[w & 7];
+
 		/*
 		 * Normally, we add 7 bits of rotation to the pool.
 		 * At the beginning of the pool, add an extra 7 bits
 		 * rotation, so that successive passes spread the
 		 * input bits across the pool evenly.
 		 */
-		new_rotate = input_rotate + 14;
-		if (i)
-			new_rotate = input_rotate + 7;
-		input_rotate = new_rotate & 31;
-
-		/* XOR in the various taps */
-		w ^= r->pool[(i + tap1) & wordmask];
-		w ^= r->pool[(i + tap2) & wordmask];
-		w ^= r->pool[(i + tap3) & wordmask];
-		w ^= r->pool[(i + tap4) & wordmask];
-		w ^= r->pool[(i + tap5) & wordmask];
-		w ^= r->pool[i];
-		r->pool[i] = (w >> 3) ^ twist_table[w & 7];
+		input_rotate += i ? 7 : 14;
 	}
 
 	r->input_rotate = input_rotate;
-	r->add_ptr = add_ptr;
+	r->add_ptr = i;
 
-	if (out) {
-		for (i = 0; i < 16; i++) {
-			out[i] = r->pool[add_ptr];
-			add_ptr = (add_ptr - 1) & wordmask;
-		}
-	}
+	if (out)
+		for (j = 0; j < 16; j++)
+			((__u32 *)out)[j] = r->pool[(i - j) & wordmask];
 
 	spin_unlock_irqrestore(&r->lock, flags);
 }
 
-static inline void add_entropy_words(struct entropy_store *r, const __u32 *in,
-				     int nwords)
+static void mix_pool_bytes(struct entropy_store *r, const void *in, int bytes)
 {
-	__add_entropy_words(r, in, nwords, NULL);
+       mix_pool_bytes_extract(r, in, bytes, NULL);
 }
 
 /*
  * Credit (or debit) the entropy store with n bits of entropy
  */
-static void credit_entropy_store(struct entropy_store *r, int nbits)
+static void credit_entropy_bits(struct entropy_store *r, int nbits)
 {
 	unsigned long flags;
 
+	if (!nbits)
+		return;
+
 	spin_lock_irqsave(&r->lock, flags);
 
-	if (r->entropy_count + nbits < 0) {
-		DEBUG_ENT("negative entropy/overflow (%d+%d)\n",
-			  r->entropy_count, nbits);
+	DEBUG_ENT("added %d entropy credits to %s\n", nbits, r->name);
+	r->entropy_count += nbits;
+	if (r->entropy_count < 0) {
+		DEBUG_ENT("negative entropy/overflow\n");
 		r->entropy_count = 0;
-	} else if (r->entropy_count + nbits > r->poolinfo->POOLBITS) {
+	} else if (r->entropy_count > r->poolinfo->POOLBITS)
 		r->entropy_count = r->poolinfo->POOLBITS;
-	} else {
-		r->entropy_count += nbits;
-		if (nbits)
-			DEBUG_ENT("added %d entropy credits to %s\n",
-				  nbits, r->name);
+
+	/* should we wake readers? */
+	if (r == &input_pool &&
+	    r->entropy_count >= random_read_wakeup_thresh) {
+		wake_up_interruptible(&random_read_wait);
+		kill_fasync(&fasync, SIGIO, POLL_IN);
 	}
 
 	spin_unlock_irqrestore(&r->lock, flags);
@@ -551,7 +552,7 @@
 /* There is one of these per entropy source */
 struct timer_rand_state {
 	cycles_t last_time;
-	long last_delta,last_delta2;
+	long last_delta, last_delta2;
 	unsigned dont_count_entropy:1;
 };
 
@@ -586,7 +587,7 @@
 	sample.jiffies = jiffies;
 	sample.cycles = get_cycles();
 	sample.num = num;
-	add_entropy_words(&input_pool, (u32 *)&sample, sizeof(sample)/4);
+	mix_pool_bytes(&input_pool, &sample, sizeof(sample));
 
 	/*
 	 * Calculate number of bits of randomness we probably added.
@@ -620,13 +621,9 @@
 		 * Round down by 1 bit on general principles,
 		 * and limit entropy entimate to 12 bits.
 		 */
-		credit_entropy_store(&input_pool,
-				     min_t(int, fls(delta>>1), 11));
+		credit_entropy_bits(&input_pool,
+				    min_t(int, fls(delta>>1), 11));
 	}
-
-	if(input_pool.entropy_count >= random_read_wakeup_thresh)
-		wake_up_interruptible(&random_read_wait);
-
 out:
 	preempt_enable();
 }
@@ -677,7 +674,7 @@
  *
  *********************************************************************/
 
-static ssize_t extract_entropy(struct entropy_store *r, void * buf,
+static ssize_t extract_entropy(struct entropy_store *r, void *buf,
 			       size_t nbytes, int min, int rsvd);
 
 /*
@@ -704,10 +701,10 @@
 			  "(%d of %d requested)\n",
 			  r->name, bytes * 8, nbytes * 8, r->entropy_count);
 
-		bytes=extract_entropy(r->pull, tmp, bytes,
-				      random_read_wakeup_thresh / 8, rsvd);
-		add_entropy_words(r, tmp, (bytes + 3) / 4);
-		credit_entropy_store(r, bytes*8);
+		bytes = extract_entropy(r->pull, tmp, bytes,
+					random_read_wakeup_thresh / 8, rsvd);
+		mix_pool_bytes(r, tmp, bytes);
+		credit_entropy_bits(r, bytes*8);
 	}
 }
 
@@ -744,13 +741,15 @@
 		if (r->limit && nbytes + reserved >= r->entropy_count / 8)
 			nbytes = r->entropy_count/8 - reserved;
 
-		if(r->entropy_count / 8 >= nbytes + reserved)
+		if (r->entropy_count / 8 >= nbytes + reserved)
 			r->entropy_count -= nbytes*8;
 		else
 			r->entropy_count = reserved;
 
-		if (r->entropy_count < random_write_wakeup_thresh)
+		if (r->entropy_count < random_write_wakeup_thresh) {
 			wake_up_interruptible(&random_write_wait);
+			kill_fasync(&fasync, SIGIO, POLL_OUT);
+		}
 	}
 
 	DEBUG_ENT("debiting %d entropy credits from %s%s\n",
@@ -764,45 +763,46 @@
 static void extract_buf(struct entropy_store *r, __u8 *out)
 {
 	int i;
-	__u32 data[16], buf[5 + SHA_WORKSPACE_WORDS];
+	__u32 hash[5], workspace[SHA_WORKSPACE_WORDS];
+	__u8 extract[64];
 
-	sha_init(buf);
-	/*
-	 * As we hash the pool, we mix intermediate values of
-	 * the hash back into the pool.  This eliminates
-	 * backtracking attacks (where the attacker knows
-	 * the state of the pool plus the current outputs, and
-	 * attempts to find previous ouputs), unless the hash
-	 * function can be inverted.
-	 */
-	for (i = 0; i < r->poolinfo->poolwords; i += 16) {
-		/* hash blocks of 16 words = 512 bits */
-		sha_transform(buf, (__u8 *)(r->pool + i), buf + 5);
-		/* feed back portion of the resulting hash */
-		add_entropy_words(r, &buf[i % 5], 1);
-	}
+	/* Generate a hash across the pool, 16 words (512 bits) at a time */
+	sha_init(hash);
+	for (i = 0; i < r->poolinfo->poolwords; i += 16)
+		sha_transform(hash, (__u8 *)(r->pool + i), workspace);
 
 	/*
-	 * To avoid duplicates, we atomically extract a
-	 * portion of the pool while mixing, and hash one
-	 * final time.
+	 * We mix the hash back into the pool to prevent backtracking
+	 * attacks (where the attacker knows the state of the pool
+	 * plus the current outputs, and attempts to find previous
+	 * ouputs), unless the hash function can be inverted. By
+	 * mixing at least a SHA1 worth of hash data back, we make
+	 * brute-forcing the feedback as hard as brute-forcing the
+	 * hash.
 	 */
-	__add_entropy_words(r, &buf[i % 5], 1, data);
-	sha_transform(buf, (__u8 *)data, buf + 5);
+	mix_pool_bytes_extract(r, hash, sizeof(hash), extract);
 
 	/*
-	 * In case the hash function has some recognizable
-	 * output pattern, we fold it in half.
+	 * To avoid duplicates, we atomically extract a portion of the
+	 * pool while mixing, and hash one final time.
 	 */
+	sha_transform(hash, extract, workspace);
+	memset(extract, 0, sizeof(extract));
+	memset(workspace, 0, sizeof(workspace));
 
-	buf[0] ^= buf[3];
-	buf[1] ^= buf[4];
-	buf[2] ^= rol32(buf[2], 16);
-	memcpy(out, buf, EXTRACT_SIZE);
-	memset(buf, 0, sizeof(buf));
+	/*
+	 * In case the hash function has some recognizable output
+	 * pattern, we fold it in half. Thus, we always feed back
+	 * twice as much data as we output.
+	 */
+	hash[0] ^= hash[3];
+	hash[1] ^= hash[4];
+	hash[2] ^= rol32(hash[2], 16);
+	memcpy(out, hash, EXTRACT_SIZE);
+	memset(hash, 0, sizeof(hash));
 }
 
-static ssize_t extract_entropy(struct entropy_store *r, void * buf,
+static ssize_t extract_entropy(struct entropy_store *r, void *buf,
 			       size_t nbytes, int min, int reserved)
 {
 	ssize_t ret = 0, i;
@@ -872,7 +872,6 @@
 {
 	extract_entropy(&nonblocking_pool, buf, nbytes, 0, 0);
 }
-
 EXPORT_SYMBOL(get_random_bytes);
 
 /*
@@ -894,12 +893,11 @@
 	spin_unlock_irqrestore(&r->lock, flags);
 
 	now = ktime_get_real();
-	add_entropy_words(r, (__u32 *)&now, sizeof(now)/4);
-	add_entropy_words(r, (__u32 *)utsname(),
-			  sizeof(*(utsname()))/4);
+	mix_pool_bytes(r, &now, sizeof(now));
+	mix_pool_bytes(r, utsname(), sizeof(*(utsname())));
 }
 
-static int __init rand_initialize(void)
+static int rand_initialize(void)
 {
 	init_std_data(&input_pool);
 	init_std_data(&blocking_pool);
@@ -940,7 +938,7 @@
 #endif
 
 static ssize_t
-random_read(struct file * file, char __user * buf, size_t nbytes, loff_t *ppos)
+random_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
 {
 	ssize_t n, retval = 0, count = 0;
 
@@ -1002,8 +1000,7 @@
 }
 
 static ssize_t
-urandom_read(struct file * file, char __user * buf,
-		      size_t nbytes, loff_t *ppos)
+urandom_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
 {
 	return extract_entropy_user(&nonblocking_pool, buf, nbytes);
 }
@@ -1038,16 +1035,15 @@
 		count -= bytes;
 		p += bytes;
 
-		add_entropy_words(r, buf, (bytes + 3) / 4);
+		mix_pool_bytes(r, buf, bytes);
 		cond_resched();
 	}
 
 	return 0;
 }
 
-static ssize_t
-random_write(struct file * file, const char __user * buffer,
-	     size_t count, loff_t *ppos)
+static ssize_t random_write(struct file *file, const char __user *buffer,
+			    size_t count, loff_t *ppos)
 {
 	size_t ret;
 	struct inode *inode = file->f_path.dentry->d_inode;
@@ -1064,9 +1060,7 @@
 	return (ssize_t)count;
 }
 
-static int
-random_ioctl(struct inode * inode, struct file * file,
-	     unsigned int cmd, unsigned long arg)
+static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
 {
 	int size, ent_count;
 	int __user *p = (int __user *)arg;
@@ -1074,8 +1068,8 @@
 
 	switch (cmd) {
 	case RNDGETENTCNT:
-		ent_count = input_pool.entropy_count;
-		if (put_user(ent_count, p))
+		/* inherently racy, no point locking */
+		if (put_user(input_pool.entropy_count, p))
 			return -EFAULT;
 		return 0;
 	case RNDADDTOENTCNT:
@@ -1083,13 +1077,7 @@
 			return -EPERM;
 		if (get_user(ent_count, p))
 			return -EFAULT;
-		credit_entropy_store(&input_pool, ent_count);
-		/*
-		 * Wake up waiting processes if we have enough
-		 * entropy.
-		 */
-		if (input_pool.entropy_count >= random_read_wakeup_thresh)
-			wake_up_interruptible(&random_read_wait);
+		credit_entropy_bits(&input_pool, ent_count);
 		return 0;
 	case RNDADDENTROPY:
 		if (!capable(CAP_SYS_ADMIN))
@@ -1104,39 +1092,45 @@
 				    size);
 		if (retval < 0)
 			return retval;
-		credit_entropy_store(&input_pool, ent_count);
-		/*
-		 * Wake up waiting processes if we have enough
-		 * entropy.
-		 */
-		if (input_pool.entropy_count >= random_read_wakeup_thresh)
-			wake_up_interruptible(&random_read_wait);
+		credit_entropy_bits(&input_pool, ent_count);
 		return 0;
 	case RNDZAPENTCNT:
 	case RNDCLEARPOOL:
 		/* Clear the entropy pool counters. */
 		if (!capable(CAP_SYS_ADMIN))
 			return -EPERM;
-		init_std_data(&input_pool);
-		init_std_data(&blocking_pool);
-		init_std_data(&nonblocking_pool);
+		rand_initialize();
 		return 0;
 	default:
 		return -EINVAL;
 	}
 }
 
+static int random_fasync(int fd, struct file *filp, int on)
+{
+	return fasync_helper(fd, filp, on, &fasync);
+}
+
+static int random_release(struct inode *inode, struct file *filp)
+{
+	return fasync_helper(-1, filp, 0, &fasync);
+}
+
 const struct file_operations random_fops = {
 	.read  = random_read,
 	.write = random_write,
 	.poll  = random_poll,
-	.ioctl = random_ioctl,
+	.unlocked_ioctl = random_ioctl,
+	.fasync = random_fasync,
+	.release = random_release,
 };
 
 const struct file_operations urandom_fops = {
 	.read  = urandom_read,
 	.write = random_write,
-	.ioctl = random_ioctl,
+	.unlocked_ioctl = random_ioctl,
+	.fasync = random_fasync,
+	.release = random_release,
 };
 
 /***************************************************************
@@ -1157,7 +1151,6 @@
 	/* Set the UUID variant to DCE */
 	uuid_out[8] = (uuid_out[8] & 0x3F) | 0x80;
 }
-
 EXPORT_SYMBOL(generate_random_uuid);
 
 /********************************************************************
@@ -1339,7 +1332,7 @@
 
 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
 
-static __u32 twothirdsMD4Transform (__u32 const buf[4], __u32 const in[12])
+static __u32 twothirdsMD4Transform(__u32 const buf[4], __u32 const in[12])
 {
 	__u32 a = buf[0], b = buf[1], c = buf[2], d = buf[3];
 
@@ -1487,8 +1480,8 @@
 	 */
 
 	memcpy(hash, saddr, 16);
-	hash[4]=((__force u16)sport << 16) + (__force u16)dport;
-	memcpy(&hash[5],keyptr->secret,sizeof(__u32) * 7);
+	hash[4] = ((__force u16)sport << 16) + (__force u16)dport;
+	memcpy(&hash[5], keyptr->secret, sizeof(__u32) * 7);
 
 	seq = twothirdsMD4Transform((const __u32 *)daddr, hash) & HASH_MASK;
 	seq += keyptr->count;
@@ -1538,10 +1531,10 @@
 	 *  Note that the words are placed into the starting vector, which is
 	 *  then mixed with a partial MD4 over random data.
 	 */
-	hash[0]=(__force u32)saddr;
-	hash[1]=(__force u32)daddr;
-	hash[2]=((__force u16)sport << 16) + (__force u16)dport;
-	hash[3]=keyptr->secret[11];
+	hash[0] = (__force u32)saddr;
+	hash[1] = (__force u32)daddr;
+	hash[2] = ((__force u16)sport << 16) + (__force u16)dport;
+	hash[3] = keyptr->secret[11];
 
 	seq = half_md4_transform(hash, keyptr->secret) & HASH_MASK;
 	seq += keyptr->count;
@@ -1556,10 +1549,7 @@
 	 *	Choosing a clock of 64 ns period is OK. (period of 274 s)
 	 */
 	seq += ktime_to_ns(ktime_get_real()) >> 6;
-#if 0
-	printk("init_seq(%lx, %lx, %d, %d) = %d\n",
-	       saddr, daddr, sport, dport, seq);
-#endif
+
 	return seq;
 }
 
@@ -1582,14 +1572,15 @@
 }
 
 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
-u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr, __be16 dport)
+u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
+			       __be16 dport)
 {
 	struct keydata *keyptr = get_keyptr();
 	u32 hash[12];
 
 	memcpy(hash, saddr, 16);
 	hash[4] = (__force u32)dport;
-	memcpy(&hash[5],keyptr->secret,sizeof(__u32) * 7);
+	memcpy(&hash[5], keyptr->secret, sizeof(__u32) * 7);
 
 	return twothirdsMD4Transform((const __u32 *)daddr, hash);
 }
@@ -1617,13 +1608,9 @@
 
 	seq += ktime_to_ns(ktime_get_real());
 	seq &= (1ull << 48) - 1;
-#if 0
-	printk("dccp init_seq(%lx, %lx, %d, %d) = %d\n",
-	       saddr, daddr, sport, dport, seq);
-#endif
+
 	return seq;
 }
-
 EXPORT_SYMBOL(secure_dccp_sequence_number);
 #endif
 
diff --git a/drivers/char/rio/rioroute.c b/drivers/char/rio/rioroute.c
index 85091ff..7a9df7d 100644
--- a/drivers/char/rio/rioroute.c
+++ b/drivers/char/rio/rioroute.c
@@ -526,7 +526,7 @@
 			 ** If RTA is not powered on, the tx packets will be
 			 ** unset, so go no further.
 			 */
-			if (PortP->TxStart == 0) {
+			if (!PortP->TxStart) {
 				rio_dprintk(RIO_DEBUG_ROUTE, "Tx pkts not set up yet\n");
 				rio_spin_unlock_irqrestore(&PortP->portSem, flags);
 				break;
diff --git a/drivers/char/rocket_int.h b/drivers/char/rocket_int.h
index b01d381..143cc43 100644
--- a/drivers/char/rocket_int.h
+++ b/drivers/char/rocket_int.h
@@ -55,7 +55,7 @@
 
 static inline void out32(unsigned short port, Byte_t *p)
 {
-	u32 value = le32_to_cpu(get_unaligned((__le32 *)p));
+	u32 value = get_unaligned_le32(p);
 #ifdef ROCKET_DEBUG_IO
 	printk(KERN_DEBUG "out32(%x, %lx)...\n", port, value);
 #endif
diff --git a/drivers/char/rtc.c b/drivers/char/rtc.c
index e2ec2ee..5f80a9d 100644
--- a/drivers/char/rtc.c
+++ b/drivers/char/rtc.c
@@ -1069,10 +1069,8 @@
 	}
 
 #ifdef CONFIG_PROC_FS
-	ent = create_proc_entry("driver/rtc", 0, NULL);
-	if (ent)
-		ent->proc_fops = &rtc_proc_fops;
-	else
+	ent = proc_create("driver/rtc", 0, NULL, &rtc_proc_fops);
+	if (!ent)
 		printk(KERN_WARNING "rtc: Failed to register with procfs.\n");
 #endif
 
diff --git a/drivers/char/snsc_event.c b/drivers/char/snsc_event.c
index 1b75b0b..31a7765 100644
--- a/drivers/char/snsc_event.c
+++ b/drivers/char/snsc_event.c
@@ -63,16 +63,13 @@
 scdrv_parse_event(char *event, int *src, int *code, int *esp_code, char *desc)
 {
 	char *desc_end;
-	__be32 from_buf;
 
 	/* record event source address */
-	from_buf = get_unaligned((__be32 *)event);
-	*src = be32_to_cpup(&from_buf);
+	*src = get_unaligned_be32(event);
 	event += 4; 			/* move on to event code */
 
 	/* record the system controller's event code */
-	from_buf = get_unaligned((__be32 *)event);
-	*code = be32_to_cpup(&from_buf);
+	*code = get_unaligned_be32(event);
 	event += 4;			/* move on to event arguments */
 
 	/* how many arguments are in the packet? */
@@ -86,8 +83,7 @@
 		/* not an integer argument, so give up */
 		return -1;
 	}
-	from_buf = get_unaligned((__be32 *)event);
-	*esp_code = be32_to_cpup(&from_buf);
+	*esp_code = get_unaligned_be32(event);
 	event += 4;
 
 	/* parse out the event description */
diff --git a/drivers/char/sysrq.c b/drivers/char/sysrq.c
index 1ade193..9e9bad8b 100644
--- a/drivers/char/sysrq.c
+++ b/drivers/char/sysrq.c
@@ -196,6 +196,48 @@
 #define sysrq_showlocks_op (*(struct sysrq_key_op *)0)
 #endif
 
+#ifdef CONFIG_SMP
+static DEFINE_SPINLOCK(show_lock);
+
+static void showacpu(void *dummy)
+{
+	unsigned long flags;
+
+	/* Idle CPUs have no interesting backtrace. */
+	if (idle_cpu(smp_processor_id()))
+		return;
+
+	spin_lock_irqsave(&show_lock, flags);
+	printk(KERN_INFO "CPU%d:\n", smp_processor_id());
+	show_stack(NULL, NULL);
+	spin_unlock_irqrestore(&show_lock, flags);
+}
+
+static void sysrq_showregs_othercpus(struct work_struct *dummy)
+{
+	smp_call_function(showacpu, NULL, 0, 0);
+}
+
+static DECLARE_WORK(sysrq_showallcpus, sysrq_showregs_othercpus);
+
+static void sysrq_handle_showallcpus(int key, struct tty_struct *tty)
+{
+	struct pt_regs *regs = get_irq_regs();
+	if (regs) {
+		printk(KERN_INFO "CPU%d:\n", smp_processor_id());
+		show_regs(regs);
+	}
+	schedule_work(&sysrq_showallcpus);
+}
+
+static struct sysrq_key_op sysrq_showallcpus_op = {
+	.handler	= sysrq_handle_showallcpus,
+	.help_msg	= "aLlcpus",
+	.action_msg	= "Show backtrace of all active CPUs",
+	.enable_mask	= SYSRQ_ENABLE_DUMP,
+};
+#endif
+
 static void sysrq_handle_showregs(int key, struct tty_struct *tty)
 {
 	struct pt_regs *regs = get_irq_regs();
@@ -340,7 +382,11 @@
 	&sysrq_kill_op,			/* i */
 	NULL,				/* j */
 	&sysrq_SAK_op,			/* k */
+#ifdef CONFIG_SMP
+	&sysrq_showallcpus_op,		/* l */
+#else
 	NULL,				/* l */
+#endif
 	&sysrq_showmem_op,		/* m */
 	&sysrq_unrt_op,			/* n */
 	/* o: This will often be registered as 'Off' at init time */
diff --git a/drivers/char/toshiba.c b/drivers/char/toshiba.c
index ce5ebe3..64f1cee 100644
--- a/drivers/char/toshiba.c
+++ b/drivers/char/toshiba.c
@@ -520,12 +520,11 @@
 	{
 		struct proc_dir_entry *pde;
 
-		pde = create_proc_entry("toshiba", 0, NULL);
+		pde = proc_create("toshiba", 0, NULL, &proc_toshiba_fops);
 		if (!pde) {
 			misc_deregister(&tosh_device);
 			return -ENOMEM;
 		}
-		pde->proc_fops = &proc_toshiba_fops;
 	}
 #endif
 
diff --git a/drivers/char/tpm/Kconfig b/drivers/char/tpm/Kconfig
index 8f3f762..3738cfa 100644
--- a/drivers/char/tpm/Kconfig
+++ b/drivers/char/tpm/Kconfig
@@ -23,7 +23,7 @@
 
 config TCG_TIS
 	tristate "TPM Interface Specification 1.2 Interface"
-	depends on PNPACPI
+	depends on PNP
 	---help---
 	  If you have a TPM security chip that is compliant with the
 	  TCG TIS 1.2 TPM specification say Yes and it will be accessible
@@ -32,7 +32,6 @@
 
 config TCG_NSC
 	tristate "National Semiconductor TPM Interface"
-	depends on PNPACPI
 	---help---
 	  If you have a TPM security chip from National Semiconductor 
 	  say Yes and it will be accessible from within Linux.  To 
@@ -48,7 +47,7 @@
 
 config TCG_INFINEON
 	tristate "Infineon Technologies TPM Interface"
-	depends on PNPACPI
+	depends on PNP
 	---help---
 	  If you have a TPM security chip from Infineon Technologies
 	  (either SLD 9630 TT 1.1 or SLB 9635 TT 1.2) say Yes and it
diff --git a/drivers/char/tpm/tpm_nsc.c b/drivers/char/tpm/tpm_nsc.c
index 6313326..ab18c1e 100644
--- a/drivers/char/tpm/tpm_nsc.c
+++ b/drivers/char/tpm/tpm_nsc.c
@@ -264,7 +264,7 @@
 
 static struct platform_device *pdev = NULL;
 
-static void __devexit tpm_nsc_remove(struct device *dev)
+static void tpm_nsc_remove(struct device *dev)
 {
 	struct tpm_chip *chip = dev_get_drvdata(dev);
 	if ( chip ) {
diff --git a/drivers/char/tty_io.c b/drivers/char/tty_io.c
index 4d3c701..98b65a2 100644
--- a/drivers/char/tty_io.c
+++ b/drivers/char/tty_io.c
@@ -1180,7 +1180,7 @@
 		if (*str == ',')
 			str++;
 		if (*str == '\0')
-			str = 0;
+			str = NULL;
 
 		if (tty_line >= 0 && tty_line <= p->num && p->poll_init &&
 				!p->poll_init(p, tty_line, str)) {
diff --git a/drivers/char/viotape.c b/drivers/char/viotape.c
index db7a731..58aad638 100644
--- a/drivers/char/viotape.c
+++ b/drivers/char/viotape.c
@@ -249,6 +249,7 @@
 }
 
 static const struct file_operations proc_viotape_operations = {
+	.owner		= THIS_MODULE,
 	.open		= proc_viotape_open,
 	.read		= seq_read,
 	.llseek		= seq_lseek,
@@ -915,7 +916,6 @@
 int __init viotap_init(void)
 {
 	int ret;
-	struct proc_dir_entry *e;
 
 	if (!firmware_has_feature(FW_FEATURE_ISERIES))
 		return -ENODEV;
@@ -968,11 +968,8 @@
 	if (ret)
 		goto unreg_class;
 
-	e = create_proc_entry("iSeries/viotape", S_IFREG|S_IRUGO, NULL);
-	if (e) {
-		e->owner = THIS_MODULE;
-		e->proc_fops = &proc_viotape_operations;
-	}
+	proc_create("iSeries/viotape", S_IFREG|S_IRUGO, NULL,
+		    &proc_viotape_operations);
 
 	return 0;
 
diff --git a/drivers/char/vt.c b/drivers/char/vt.c
index df4c3ea..1c26604 100644
--- a/drivers/char/vt.c
+++ b/drivers/char/vt.c
@@ -301,7 +301,7 @@
 	d = (unsigned short *)(vc->vc_origin + vc->vc_size_row * t);
 	s = (unsigned short *)(vc->vc_origin + vc->vc_size_row * (t + nr));
 	scr_memmovew(d, s, (b - t - nr) * vc->vc_size_row);
-	scr_memsetw(d + (b - t - nr) * vc->vc_cols, vc->vc_video_erase_char,
+	scr_memsetw(d + (b - t - nr) * vc->vc_cols, vc->vc_scrl_erase_char,
 		    vc->vc_size_row * nr);
 }
 
@@ -319,7 +319,7 @@
 	s = (unsigned short *)(vc->vc_origin + vc->vc_size_row * t);
 	step = vc->vc_cols * nr;
 	scr_memmovew(s + step, s, (b - t - nr) * vc->vc_size_row);
-	scr_memsetw(s, vc->vc_video_erase_char, 2 * step);
+	scr_memsetw(s, vc->vc_scrl_erase_char, 2 * step);
 }
 
 static void do_update_region(struct vc_data *vc, unsigned long start, int count)
@@ -400,7 +400,7 @@
  *  Bit 7   : blink
  */
 	{
-	u8 a = vc->vc_color;
+	u8 a = _color;
 	if (!vc->vc_can_do_color)
 		return _intensity |
 		       (_italic ? 2 : 0) |
@@ -434,6 +434,7 @@
 	              vc->vc_blink, vc->vc_underline,
 	              vc->vc_reverse ^ vc->vc_decscnm, vc->vc_italic);
 	vc->vc_video_erase_char = (build_attr(vc, vc->vc_color, 1, vc->vc_blink, 0, vc->vc_decscnm, 0) << 8) | ' ';
+	vc->vc_scrl_erase_char = (build_attr(vc, vc->vc_def_color, 1, false, false, false, false) << 8) | ' ';
 }
 
 /* Note: inverting the screen twice should revert to the original state */
diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig
index 2b38299..6e6c3c4 100644
--- a/drivers/edac/Kconfig
+++ b/drivers/edac/Kconfig
@@ -67,7 +67,7 @@
 	  E7205, E7500, E7501 and E7505 server chipsets.
 
 config EDAC_E752X
-	tristate "Intel e752x (e7520, e7525, e7320)"
+	tristate "Intel e752x (e7520, e7525, e7320) and 3100"
 	depends on EDAC_MM_EDAC && PCI && X86 && HOTPLUG
 	help
 	  Support for error detection and correction on the Intel
diff --git a/drivers/edac/amd76x_edac.c b/drivers/edac/amd76x_edac.c
index f220754..2b95f1a 100644
--- a/drivers/edac/amd76x_edac.c
+++ b/drivers/edac/amd76x_edac.c
@@ -17,6 +17,7 @@
 #include <linux/pci.h>
 #include <linux/pci_ids.h>
 #include <linux/slab.h>
+#include <linux/edac.h>
 #include "edac_core.h"
 
 #define AMD76X_REVISION	" Ver: 2.0.2 "  __DATE__
@@ -344,6 +345,9 @@
 
 static int __init amd76x_init(void)
 {
+       /* Ensure that the OPSTATE is set correctly for POLL or NMI */
+       opstate_init();
+
 	return pci_register_driver(&amd76x_driver);
 }
 
@@ -358,3 +362,6 @@
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Linux Networx (http://lnxi.com) Thayne Harbaugh");
 MODULE_DESCRIPTION("MC support for AMD 76x memory controllers");
+
+module_param(edac_op_state, int, 0444);
+MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
diff --git a/drivers/edac/e752x_edac.c b/drivers/edac/e752x_edac.c
index 6eb4347..c94a0eb 100644
--- a/drivers/edac/e752x_edac.c
+++ b/drivers/edac/e752x_edac.c
@@ -29,6 +29,7 @@
 #define EDAC_MOD_STR	"e752x_edac"
 
 static int force_function_unhide;
+static int sysbus_parity = -1;
 
 static struct edac_pci_ctl_info *e752x_pci;
 
@@ -62,6 +63,14 @@
 #define PCI_DEVICE_ID_INTEL_7320_1_ERR	0x3593
 #endif				/* PCI_DEVICE_ID_INTEL_7320_1_ERR */
 
+#ifndef PCI_DEVICE_ID_INTEL_3100_0
+#define PCI_DEVICE_ID_INTEL_3100_0	0x35B0
+#endif				/* PCI_DEVICE_ID_INTEL_3100_0 */
+
+#ifndef PCI_DEVICE_ID_INTEL_3100_1_ERR
+#define PCI_DEVICE_ID_INTEL_3100_1_ERR	0x35B1
+#endif				/* PCI_DEVICE_ID_INTEL_3100_1_ERR */
+
 #define E752X_NR_CSROWS		8	/* number of csrows */
 
 /* E752X register addresses - device 0 function 0 */
@@ -152,6 +161,12 @@
 					/*     error syndrome register (16b) */
 #define E752X_DEVPRES1		0xF4	/* Device Present 1 register (8b) */
 
+/* 3100 IMCH specific register addresses - device 0 function 1 */
+#define I3100_NSI_FERR		0x48	/* NSI first error reg (32b) */
+#define I3100_NSI_NERR		0x4C	/* NSI next error reg (32b) */
+#define I3100_NSI_SMICMD	0x54	/* NSI SMI command register (32b) */
+#define I3100_NSI_EMASK		0x90	/* NSI error mask register (32b) */
+
 /* ICH5R register addresses - device 30 function 0 */
 #define ICH5R_PCI_STAT		0x06	/* PCI status register (16b) */
 #define ICH5R_PCI_2ND_STAT	0x1E	/* PCI status secondary reg (16b) */
@@ -160,7 +175,8 @@
 enum e752x_chips {
 	E7520 = 0,
 	E7525 = 1,
-	E7320 = 2
+	E7320 = 2,
+	I3100 = 3
 };
 
 struct e752x_pvt {
@@ -185,8 +201,10 @@
 struct e752x_error_info {
 	u32 ferr_global;
 	u32 nerr_global;
-	u8 hi_ferr;
-	u8 hi_nerr;
+	u32 nsi_ferr;	/* 3100 only */
+	u32 nsi_nerr;	/* 3100 only */
+	u8 hi_ferr;	/* all but 3100 */
+	u8 hi_nerr;	/* all but 3100 */
 	u16 sysbus_ferr;
 	u16 sysbus_nerr;
 	u8 buf_ferr;
@@ -215,6 +233,10 @@
 		.err_dev = PCI_DEVICE_ID_INTEL_7320_1_ERR,
 		.ctl_dev = PCI_DEVICE_ID_INTEL_7320_0,
 		.ctl_name = "E7320"},
+	[I3100] = {
+		.err_dev = PCI_DEVICE_ID_INTEL_3100_1_ERR,
+		.ctl_dev = PCI_DEVICE_ID_INTEL_3100_0,
+		.ctl_name = "3100"},
 };
 
 static unsigned long ctl_page_to_phys(struct mem_ctl_info *mci,
@@ -402,7 +424,7 @@
 static char *global_message[11] = {
 	"PCI Express C1", "PCI Express C", "PCI Express B1",
 	"PCI Express B", "PCI Express A1", "PCI Express A",
-	"DMA Controler", "HUB Interface", "System Bus",
+	"DMA Controler", "HUB or NS Interface", "System Bus",
 	"DRAM Controler", "Internal Buffer"
 };
 
@@ -455,6 +477,63 @@
 		do_hub_error(fatal, errors);
 }
 
+#define NSI_FATAL_MASK		0x0c080081
+#define NSI_NON_FATAL_MASK	0x23a0ba64
+#define NSI_ERR_MASK		(NSI_FATAL_MASK | NSI_NON_FATAL_MASK)
+
+static char *nsi_message[30] = {
+	"NSI Link Down",	/* NSI_FERR/NSI_NERR bit 0, fatal error */
+	"",						/* reserved */
+	"NSI Parity Error",				/* bit 2, non-fatal */
+	"",						/* reserved */
+	"",						/* reserved */
+	"Correctable Error Message",			/* bit 5, non-fatal */
+	"Non-Fatal Error Message",			/* bit 6, non-fatal */
+	"Fatal Error Message",				/* bit 7, fatal */
+	"",						/* reserved */
+	"Receiver Error",				/* bit 9, non-fatal */
+	"",						/* reserved */
+	"Bad TLP",					/* bit 11, non-fatal */
+	"Bad DLLP",					/* bit 12, non-fatal */
+	"REPLAY_NUM Rollover",				/* bit 13, non-fatal */
+	"",						/* reserved */
+	"Replay Timer Timeout",				/* bit 15, non-fatal */
+	"",						/* reserved */
+	"",						/* reserved */
+	"",						/* reserved */
+	"Data Link Protocol Error",			/* bit 19, fatal */
+	"",						/* reserved */
+	"Poisoned TLP",					/* bit 21, non-fatal */
+	"",						/* reserved */
+	"Completion Timeout",				/* bit 23, non-fatal */
+	"Completer Abort",				/* bit 24, non-fatal */
+	"Unexpected Completion",			/* bit 25, non-fatal */
+	"Receiver Overflow",				/* bit 26, fatal */
+	"Malformed TLP",				/* bit 27, fatal */
+	"",						/* reserved */
+	"Unsupported Request"				/* bit 29, non-fatal */
+};
+
+static void do_nsi_error(int fatal, u32 errors)
+{
+	int i;
+
+	for (i = 0; i < 30; i++) {
+		if (errors & (1 << i))
+			printk(KERN_WARNING "%sError %s\n",
+			       fatal_message[fatal], nsi_message[i]);
+	}
+}
+
+static inline void nsi_error(int fatal, u32 errors, int *error_found,
+		int handle_error)
+{
+	*error_found = 1;
+
+	if (handle_error)
+		do_nsi_error(fatal, errors);
+}
+
 static char *membuf_message[4] = {
 	"Internal PMWB to DRAM parity",
 	"Internal PMWB to System Bus Parity",
@@ -546,6 +625,31 @@
 	}
 }
 
+static void e752x_check_ns_interface(struct e752x_error_info *info,
+				int *error_found, int handle_error)
+{
+	u32 stat32;
+
+	stat32 = info->nsi_ferr;
+	if (stat32 & NSI_ERR_MASK) { /* Error, so process */
+		if (stat32 & NSI_FATAL_MASK)	/* check for fatal errors */
+			nsi_error(1, stat32 & NSI_FATAL_MASK, error_found,
+				  handle_error);
+		if (stat32 & NSI_NON_FATAL_MASK) /* check for non-fatal ones */
+			nsi_error(0, stat32 & NSI_NON_FATAL_MASK, error_found,
+				  handle_error);
+	}
+	stat32 = info->nsi_nerr;
+	if (stat32 & NSI_ERR_MASK) {
+		if (stat32 & NSI_FATAL_MASK)
+			nsi_error(1, stat32 & NSI_FATAL_MASK, error_found,
+				  handle_error);
+		if (stat32 & NSI_NON_FATAL_MASK)
+			nsi_error(0, stat32 & NSI_NON_FATAL_MASK, error_found,
+				  handle_error);
+	}
+}
+
 static void e752x_check_sysbus(struct e752x_error_info *info,
 			int *error_found, int handle_error)
 {
@@ -653,7 +757,15 @@
 	pci_read_config_dword(dev, E752X_FERR_GLOBAL, &info->ferr_global);
 
 	if (info->ferr_global) {
-		pci_read_config_byte(dev, E752X_HI_FERR, &info->hi_ferr);
+		if (pvt->dev_info->err_dev == PCI_DEVICE_ID_INTEL_3100_1_ERR) {
+			pci_read_config_dword(dev, I3100_NSI_FERR,
+					     &info->nsi_ferr);
+			info->hi_ferr = 0;
+		} else {
+			pci_read_config_byte(dev, E752X_HI_FERR,
+					     &info->hi_ferr);
+			info->nsi_ferr = 0;
+		}
 		pci_read_config_word(dev, E752X_SYSBUS_FERR,
 				&info->sysbus_ferr);
 		pci_read_config_byte(dev, E752X_BUF_FERR, &info->buf_ferr);
@@ -669,10 +781,15 @@
 		pci_read_config_dword(dev, E752X_DRAM_RETR_ADD,
 				&info->dram_retr_add);
 
+		/* ignore the reserved bits just in case */
 		if (info->hi_ferr & 0x7f)
 			pci_write_config_byte(dev, E752X_HI_FERR,
 					info->hi_ferr);
 
+		if (info->nsi_ferr & NSI_ERR_MASK)
+			pci_write_config_dword(dev, I3100_NSI_FERR,
+					info->nsi_ferr);
+
 		if (info->sysbus_ferr)
 			pci_write_config_word(dev, E752X_SYSBUS_FERR,
 					info->sysbus_ferr);
@@ -692,7 +809,15 @@
 	pci_read_config_dword(dev, E752X_NERR_GLOBAL, &info->nerr_global);
 
 	if (info->nerr_global) {
-		pci_read_config_byte(dev, E752X_HI_NERR, &info->hi_nerr);
+		if (pvt->dev_info->err_dev == PCI_DEVICE_ID_INTEL_3100_1_ERR) {
+			pci_read_config_dword(dev, I3100_NSI_NERR,
+					     &info->nsi_nerr);
+			info->hi_nerr = 0;
+		} else {
+			pci_read_config_byte(dev, E752X_HI_NERR,
+					     &info->hi_nerr);
+			info->nsi_nerr = 0;
+		}
 		pci_read_config_word(dev, E752X_SYSBUS_NERR,
 				&info->sysbus_nerr);
 		pci_read_config_byte(dev, E752X_BUF_NERR, &info->buf_nerr);
@@ -706,6 +831,10 @@
 			pci_write_config_byte(dev, E752X_HI_NERR,
 					info->hi_nerr);
 
+		if (info->nsi_nerr & NSI_ERR_MASK)
+			pci_write_config_dword(dev, I3100_NSI_NERR,
+					info->nsi_nerr);
+
 		if (info->sysbus_nerr)
 			pci_write_config_word(dev, E752X_SYSBUS_NERR,
 					info->sysbus_nerr);
@@ -750,6 +879,7 @@
 		global_error(0, stat32, &error_found, handle_errors);
 
 	e752x_check_hub_interface(info, &error_found, handle_errors);
+	e752x_check_ns_interface(info, &error_found, handle_errors);
 	e752x_check_sysbus(info, &error_found, handle_errors);
 	e752x_check_membuf(info, &error_found, handle_errors);
 	e752x_check_dram(mci, info, &error_found, handle_errors);
@@ -920,15 +1050,53 @@
 	return 1;
 }
 
+/* Setup system bus parity mask register.
+ * Sysbus parity supported on:
+ *   e7320/e7520/e7525 + Xeon
+ *   i3100 + Xeon/Celeron
+ * Sysbus parity not supported on:
+ *   i3100 + Pentium M/Celeron M/Core Duo/Core2 Duo
+ */
+static void e752x_init_sysbus_parity_mask(struct e752x_pvt *pvt)
+{
+	char *cpu_id = cpu_data(0).x86_model_id;
+	struct pci_dev *dev = pvt->dev_d0f1;
+	int enable = 1;
+
+	/* Allow module paramter override, else see if CPU supports parity */
+	if (sysbus_parity != -1) {
+		enable = sysbus_parity;
+	} else if (cpu_id[0] &&
+		   ((strstr(cpu_id, "Pentium") && strstr(cpu_id, " M ")) ||
+		    (strstr(cpu_id, "Celeron") && strstr(cpu_id, " M ")) ||
+		    (strstr(cpu_id, "Core") && strstr(cpu_id, "Duo")))) {
+		e752x_printk(KERN_INFO, "System Bus Parity not "
+			     "supported by CPU, disabling\n");
+		enable = 0;
+	}
+
+	if (enable)
+		pci_write_config_word(dev, E752X_SYSBUS_ERRMASK, 0x0000);
+	else
+		pci_write_config_word(dev, E752X_SYSBUS_ERRMASK, 0x0309);
+}
+
 static void e752x_init_error_reporting_regs(struct e752x_pvt *pvt)
 {
 	struct pci_dev *dev;
 
 	dev = pvt->dev_d0f1;
 	/* Turn off error disable & SMI in case the BIOS turned it on */
-	pci_write_config_byte(dev, E752X_HI_ERRMASK, 0x00);
-	pci_write_config_byte(dev, E752X_HI_SMICMD, 0x00);
-	pci_write_config_word(dev, E752X_SYSBUS_ERRMASK, 0x00);
+	if (pvt->dev_info->err_dev == PCI_DEVICE_ID_INTEL_3100_1_ERR) {
+		pci_write_config_dword(dev, I3100_NSI_EMASK, 0);
+		pci_write_config_dword(dev, I3100_NSI_SMICMD, 0);
+	} else {
+		pci_write_config_byte(dev, E752X_HI_ERRMASK, 0x00);
+		pci_write_config_byte(dev, E752X_HI_SMICMD, 0x00);
+	}
+
+	e752x_init_sysbus_parity_mask(pvt);
+
 	pci_write_config_word(dev, E752X_SYSBUS_SMICMD, 0x00);
 	pci_write_config_byte(dev, E752X_BUF_ERRMASK, 0x00);
 	pci_write_config_byte(dev, E752X_BUF_SMICMD, 0x00);
@@ -949,16 +1117,6 @@
 	debugf0("%s(): mci\n", __func__);
 	debugf0("Starting Probe1\n");
 
-	/* make sure error reporting method is sane */
-	switch (edac_op_state) {
-	case EDAC_OPSTATE_POLL:
-	case EDAC_OPSTATE_NMI:
-		break;
-	default:
-		edac_op_state = EDAC_OPSTATE_POLL;
-		break;
-	}
-
 	/* check to see if device 0 function 1 is enabled; if it isn't, we
 	 * assume the BIOS has reserved it for a reason and is expecting
 	 * exclusive access, we take care not to violate that assumption and
@@ -985,8 +1143,9 @@
 
 	debugf3("%s(): init mci\n", __func__);
 	mci->mtype_cap = MEM_FLAG_RDDR;
-	mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED |
-		EDAC_FLAG_S4ECD4ED;
+	/* 3100 IMCH supports SECDEC only */
+	mci->edac_ctl_cap = (dev_idx == I3100) ? EDAC_FLAG_SECDED :
+		(EDAC_FLAG_NONE | EDAC_FLAG_SECDED | EDAC_FLAG_S4ECD4ED);
 	/* FIXME - what if different memory types are in different csrows? */
 	mci->mod_name = EDAC_MOD_STR;
 	mci->mod_ver = E752X_REVISION;
@@ -1018,7 +1177,10 @@
 	e752x_init_csrows(mci, pdev, ddrcsr);
 	e752x_init_mem_map_table(pdev, pvt);
 
-	mci->edac_cap |= EDAC_FLAG_NONE;
+	if (dev_idx == I3100)
+		mci->edac_cap = EDAC_FLAG_SECDED; /* the only mode supported */
+	else
+		mci->edac_cap |= EDAC_FLAG_NONE;
 	debugf3("%s(): tolm, remapbase, remaplimit\n", __func__);
 
 	/* load the top of low memory, remap base, and remap limit vars */
@@ -1110,6 +1272,9 @@
 	 PCI_VEND_DEV(INTEL, 7320_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
 	 E7320},
 	{
+	 PCI_VEND_DEV(INTEL, 3100_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+	 I3100},
+	{
 	 0,
 	 }			/* 0 terminated list. */
 };
@@ -1128,6 +1293,10 @@
 	int pci_rc;
 
 	debugf3("%s()\n", __func__);
+
+       /* Ensure that the OPSTATE is set correctly for POLL or NMI */
+       opstate_init();
+
 	pci_rc = pci_register_driver(&e752x_driver);
 	return (pci_rc < 0) ? pci_rc : 0;
 }
@@ -1143,10 +1312,15 @@
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Linux Networx (http://lnxi.com) Tom Zimmerman\n");
-MODULE_DESCRIPTION("MC support for Intel e752x memory controllers");
+MODULE_DESCRIPTION("MC support for Intel e752x/3100 memory controllers");
 
 module_param(force_function_unhide, int, 0444);
 MODULE_PARM_DESC(force_function_unhide, "if BIOS sets Dev0:Fun1 up as hidden:"
 		 " 1=force unhide and hope BIOS doesn't fight driver for Dev0:Fun1 access");
+
 module_param(edac_op_state, int, 0444);
 MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
+
+module_param(sysbus_parity, int, 0444);
+MODULE_PARM_DESC(sysbus_parity, "0=disable system bus parity checking,"
+		" 1=enable system bus parity checking, default=auto-detect");
diff --git a/drivers/edac/e7xxx_edac.c b/drivers/edac/e7xxx_edac.c
index 96ecc49..c7d11cc 100644
--- a/drivers/edac/e7xxx_edac.c
+++ b/drivers/edac/e7xxx_edac.c
@@ -414,16 +414,6 @@
 
 	debugf0("%s(): mci\n", __func__);
 
-	/* make sure error reporting method is sane */
-	switch (edac_op_state) {
-	case EDAC_OPSTATE_POLL:
-	case EDAC_OPSTATE_NMI:
-		break;
-	default:
-		edac_op_state = EDAC_OPSTATE_POLL;
-		break;
-	}
-
 	pci_read_config_dword(pdev, E7XXX_DRC, &drc);
 
 	drc_chan = dual_channel_active(drc, dev_idx);
@@ -565,6 +555,9 @@
 
 static int __init e7xxx_init(void)
 {
+       /* Ensure that the OPSTATE is set correctly for POLL or NMI */
+       opstate_init();
+
 	return pci_register_driver(&e7xxx_driver);
 }
 
diff --git a/drivers/edac/edac_device.c b/drivers/edac/edac_device.c
index b9552bc..63372fa 100644
--- a/drivers/edac/edac_device.c
+++ b/drivers/edac/edac_device.c
@@ -36,7 +36,7 @@
  * is protected by the 'device_ctls_mutex' lock
  */
 static DEFINE_MUTEX(device_ctls_mutex);
-static struct list_head edac_device_list = LIST_HEAD_INIT(edac_device_list);
+static LIST_HEAD(edac_device_list);
 
 #ifdef CONFIG_EDAC_DEBUG
 static void edac_device_dump_device(struct edac_device_ctl_info *edac_dev)
@@ -375,37 +375,6 @@
 	wait_for_completion(&edac_device->removal_complete);
 }
 
-/**
- * edac_device_find
- *	Search for a edac_device_ctl_info structure whose index is 'idx'.
- *
- * If found, return a pointer to the structure.
- * Else return NULL.
- *
- * Caller must hold device_ctls_mutex.
- */
-struct edac_device_ctl_info *edac_device_find(int idx)
-{
-	struct list_head *item;
-	struct edac_device_ctl_info *edac_dev;
-
-	/* Iterate over list, looking for exact match of ID */
-	list_for_each(item, &edac_device_list) {
-		edac_dev = list_entry(item, struct edac_device_ctl_info, link);
-
-		if (edac_dev->dev_idx >= idx) {
-			if (edac_dev->dev_idx == idx)
-				return edac_dev;
-
-			/* not on list, so terminate early */
-			break;
-		}
-	}
-
-	return NULL;
-}
-EXPORT_SYMBOL_GPL(edac_device_find);
-
 /*
  * edac_device_workq_function
  *	performs the operation scheduled by a workq request
diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c
index 063a1bf..a4cf164 100644
--- a/drivers/edac/edac_mc.c
+++ b/drivers/edac/edac_mc.c
@@ -36,7 +36,7 @@
 
 /* lock to memory controller's control array */
 static DEFINE_MUTEX(mem_ctls_mutex);
-static struct list_head mc_devices = LIST_HEAD_INIT(mc_devices);
+static LIST_HEAD(mc_devices);
 
 #ifdef CONFIG_EDAC_DEBUG
 
@@ -886,24 +886,3 @@
 	mci->csrows[csrow].channels[channel].ce_count++;
 }
 EXPORT_SYMBOL(edac_mc_handle_fbd_ce);
-
-/*
- * Iterate over all MC instances and check for ECC, et al, errors
- */
-void edac_check_mc_devices(void)
-{
-	struct list_head *item;
-	struct mem_ctl_info *mci;
-
-	debugf3("%s()\n", __func__);
-	mutex_lock(&mem_ctls_mutex);
-
-	list_for_each(item, &mc_devices) {
-		mci = list_entry(item, struct mem_ctl_info, link);
-
-		if (mci->edac_check != NULL)
-			mci->edac_check(mci);
-	}
-
-	mutex_unlock(&mem_ctls_mutex);
-}
diff --git a/drivers/edac/edac_module.h b/drivers/edac/edac_module.h
index cbc419c..233d479 100644
--- a/drivers/edac/edac_module.h
+++ b/drivers/edac/edac_module.h
@@ -27,7 +27,6 @@
 extern void edac_mc_unregister_sysfs_main_kobj(struct mem_ctl_info *mci);
 extern int edac_create_sysfs_mci_device(struct mem_ctl_info *mci);
 extern void edac_remove_sysfs_mci_device(struct mem_ctl_info *mci);
-extern void edac_check_mc_devices(void);
 extern int edac_get_log_ue(void);
 extern int edac_get_log_ce(void);
 extern int edac_get_panic_on_ue(void);
diff --git a/drivers/edac/edac_pci.c b/drivers/edac/edac_pci.c
index 32be435..9b24340 100644
--- a/drivers/edac/edac_pci.c
+++ b/drivers/edac/edac_pci.c
@@ -29,7 +29,7 @@
 #include "edac_module.h"
 
 static DEFINE_MUTEX(edac_pci_ctls_mutex);
-static struct list_head edac_pci_list = LIST_HEAD_INIT(edac_pci_list);
+static LIST_HEAD(edac_pci_list);
 
 /*
  * edac_pci_alloc_ctl_info
@@ -189,6 +189,9 @@
 	wait_for_completion(&pci->complete);
 }
 
+#if 0
+/* Older code, but might use in the future */
+
 /*
  * edac_pci_find()
  * 	Search for an edac_pci_ctl_info structure whose index is 'idx'
@@ -219,6 +222,7 @@
 	return NULL;
 }
 EXPORT_SYMBOL_GPL(edac_pci_find);
+#endif
 
 /*
  * edac_pci_workq_function()
@@ -422,7 +426,7 @@
  *
  *	a Generic parity check API
  */
-void edac_pci_generic_check(struct edac_pci_ctl_info *pci)
+static void edac_pci_generic_check(struct edac_pci_ctl_info *pci)
 {
 	debugf4("%s()\n", __func__);
 	edac_pci_do_parity_check();
diff --git a/drivers/edac/edac_pci_sysfs.c b/drivers/edac/edac_pci_sysfs.c
index 71c3195..2c1fa1b 100644
--- a/drivers/edac/edac_pci_sysfs.c
+++ b/drivers/edac/edac_pci_sysfs.c
@@ -37,17 +37,17 @@
 	return check_pci_errors;
 }
 
-int edac_pci_get_log_pe(void)
+static int edac_pci_get_log_pe(void)
 {
 	return edac_pci_log_pe;
 }
 
-int edac_pci_get_log_npe(void)
+static int edac_pci_get_log_npe(void)
 {
 	return edac_pci_log_npe;
 }
 
-int edac_pci_get_panic_on_pe(void)
+static int edac_pci_get_panic_on_pe(void)
 {
 	return edac_pci_panic_on_pe;
 }
@@ -197,7 +197,8 @@
  *
  *	unregister the kobj for the EDAC PCI instance
  */
-void edac_pci_unregister_sysfs_instance_kobj(struct edac_pci_ctl_info *pci)
+static void edac_pci_unregister_sysfs_instance_kobj(
+			struct edac_pci_ctl_info *pci)
 {
 	debugf0("%s()\n", __func__);
 
@@ -337,7 +338,7 @@
  *	setup the sysfs for EDAC PCI attributes
  *	assumes edac_class has already been initialized
  */
-int edac_pci_main_kobj_setup(void)
+static int edac_pci_main_kobj_setup(void)
 {
 	int err;
 	struct sysdev_class *edac_class;
diff --git a/drivers/edac/i3000_edac.c b/drivers/edac/i3000_edac.c
index 5d42928..6c9a0f2 100644
--- a/drivers/edac/i3000_edac.c
+++ b/drivers/edac/i3000_edac.c
@@ -326,15 +326,6 @@
 		return -ENODEV;
 	}
 
-	switch (edac_op_state) {
-	case EDAC_OPSTATE_POLL:
-	case EDAC_OPSTATE_NMI:
-		break;
-	default:
-		edac_op_state = EDAC_OPSTATE_POLL;
-		break;
-	}
-
 	c0dra[0] = readb(window + I3000_C0DRA + 0);	/* ranks 0,1 */
 	c0dra[1] = readb(window + I3000_C0DRA + 1);	/* ranks 2,3 */
 	c1dra[0] = readb(window + I3000_C1DRA + 0);	/* ranks 0,1 */
@@ -503,6 +494,10 @@
 	int pci_rc;
 
 	debugf3("MC: %s()\n", __func__);
+
+       /* Ensure that the OPSTATE is set correctly for POLL or NMI */
+       opstate_init();
+
 	pci_rc = pci_register_driver(&i3000_driver);
 	if (pci_rc < 0)
 		goto fail0;
diff --git a/drivers/edac/i5000_edac.c b/drivers/edac/i5000_edac.c
index 5a85201..4a16b5b 100644
--- a/drivers/edac/i5000_edac.c
+++ b/drivers/edac/i5000_edac.c
@@ -1286,16 +1286,6 @@
 	if (PCI_FUNC(pdev->devfn) != 0)
 		return -ENODEV;
 
-	/* make sure error reporting method is sane */
-	switch (edac_op_state) {
-	case EDAC_OPSTATE_POLL:
-	case EDAC_OPSTATE_NMI:
-		break;
-	default:
-		edac_op_state = EDAC_OPSTATE_POLL;
-		break;
-	}
-
 	/* Ask the devices for the number of CSROWS and CHANNELS so
 	 * that we can calculate the memory resources, etc
 	 *
@@ -1478,6 +1468,9 @@
 
 	debugf2("MC: " __FILE__ ": %s()\n", __func__);
 
+       /* Ensure that the OPSTATE is set correctly for POLL or NMI */
+       opstate_init();
+
 	pci_rc = pci_register_driver(&i5000_driver);
 
 	return (pci_rc < 0) ? pci_rc : 0;
@@ -1501,5 +1494,6 @@
     ("Linux Networx (http://lnxi.com) Doug Thompson <norsk5@xmission.com>");
 MODULE_DESCRIPTION("MC Driver for Intel I5000 memory controllers - "
 		I5000_REVISION);
+
 module_param(edac_op_state, int, 0444);
 MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
diff --git a/drivers/edac/i82443bxgx_edac.c b/drivers/edac/i82443bxgx_edac.c
index 83bfe37..c5305e3 100644
--- a/drivers/edac/i82443bxgx_edac.c
+++ b/drivers/edac/i82443bxgx_edac.c
@@ -29,6 +29,7 @@
 
 #include <linux/slab.h>
 
+#include <linux/edac.h>
 #include "edac_core.h"
 
 #define I82443_REVISION	"0.1"
@@ -386,6 +387,9 @@
 
 static int __init i82443bxgx_edacmc_init(void)
 {
+       /* Ensure that the OPSTATE is set correctly for POLL or NMI */
+       opstate_init();
+
 	return pci_register_driver(&i82443bxgx_edacmc_driver);
 }
 
@@ -400,3 +404,6 @@
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Tim Small <tim@buttersideup.com> - WPAD");
 MODULE_DESCRIPTION("EDAC MC support for Intel 82443BX/GX memory controllers");
+
+module_param(edac_op_state, int, 0444);
+MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
diff --git a/drivers/edac/i82860_edac.c b/drivers/edac/i82860_edac.c
index f5ecd2c..c0088ba 100644
--- a/drivers/edac/i82860_edac.c
+++ b/drivers/edac/i82860_edac.c
@@ -14,6 +14,7 @@
 #include <linux/pci.h>
 #include <linux/pci_ids.h>
 #include <linux/slab.h>
+#include <linux/edac.h>
 #include "edac_core.h"
 
 #define  I82860_REVISION " Ver: 2.0.2 " __DATE__
@@ -294,6 +295,9 @@
 
 	debugf3("%s()\n", __func__);
 
+       /* Ensure that the OPSTATE is set correctly for POLL or NMI */
+       opstate_init();
+
 	if ((pci_rc = pci_register_driver(&i82860_driver)) < 0)
 		goto fail0;
 
@@ -345,3 +349,6 @@
 MODULE_AUTHOR("Red Hat Inc. (http://www.redhat.com) "
 		"Ben Woodard <woodard@redhat.com>");
 MODULE_DESCRIPTION("ECC support for Intel 82860 memory hub controllers");
+
+module_param(edac_op_state, int, 0444);
+MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
diff --git a/drivers/edac/i82875p_edac.c b/drivers/edac/i82875p_edac.c
index 031abad..e43bdc4 100644
--- a/drivers/edac/i82875p_edac.c
+++ b/drivers/edac/i82875p_edac.c
@@ -18,6 +18,7 @@
 #include <linux/pci.h>
 #include <linux/pci_ids.h>
 #include <linux/slab.h>
+#include <linux/edac.h>
 #include "edac_core.h"
 
 #define I82875P_REVISION	" Ver: 2.0.2 " __DATE__
@@ -393,6 +394,7 @@
 	struct i82875p_error_info discard;
 
 	debugf0("%s()\n", __func__);
+
 	ovrfl_pdev = pci_get_device(PCI_VEND_DEV(INTEL, 82875_6), NULL);
 
 	if (i82875p_setup_overfl_dev(pdev, &ovrfl_pdev, &ovrfl_window))
@@ -532,6 +534,10 @@
 	int pci_rc;
 
 	debugf3("%s()\n", __func__);
+
+       /* Ensure that the OPSTATE is set correctly for POLL or NMI */
+       opstate_init();
+
 	pci_rc = pci_register_driver(&i82875p_driver);
 
 	if (pci_rc < 0)
@@ -586,3 +592,6 @@
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Linux Networx (http://lnxi.com) Thayne Harbaugh");
 MODULE_DESCRIPTION("MC support for Intel 82875 memory hub controllers");
+
+module_param(edac_op_state, int, 0444);
+MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
diff --git a/drivers/edac/i82975x_edac.c b/drivers/edac/i82975x_edac.c
index 0ee8884..2eed3ea 100644
--- a/drivers/edac/i82975x_edac.c
+++ b/drivers/edac/i82975x_edac.c
@@ -14,7 +14,7 @@
 #include <linux/pci.h>
 #include <linux/pci_ids.h>
 #include <linux/slab.h>
-
+#include <linux/edac.h>
 #include "edac_core.h"
 
 #define I82975X_REVISION	" Ver: 1.0.0 " __DATE__
@@ -611,6 +611,9 @@
 
 	debugf3("%s()\n", __func__);
 
+       /* Ensure that the OPSTATE is set correctly for POLL or NMI */
+       opstate_init();
+
 	pci_rc = pci_register_driver(&i82975x_driver);
 	if (pci_rc < 0)
 		goto fail0;
@@ -664,3 +667,6 @@
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Arvind R. <arvind@acarlab.com>");
 MODULE_DESCRIPTION("MC support for Intel 82975 memory hub controllers");
+
+module_param(edac_op_state, int, 0444);
+MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
diff --git a/drivers/edac/pasemi_edac.c b/drivers/edac/pasemi_edac.c
index 9032091..3fd65a5 100644
--- a/drivers/edac/pasemi_edac.c
+++ b/drivers/edac/pasemi_edac.c
@@ -284,6 +284,9 @@
 
 static int __init pasemi_edac_init(void)
 {
+       /* Ensure that the OPSTATE is set correctly for POLL or NMI */
+       opstate_init();
+
 	return pci_register_driver(&pasemi_edac_driver);
 }
 
@@ -298,3 +301,6 @@
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Egor Martovetsky <egor@pasemi.com>");
 MODULE_DESCRIPTION("MC support for PA Semi PWRficient memory controller");
+module_param(edac_op_state, int, 0444);
+MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
+
diff --git a/drivers/edac/r82600_edac.c b/drivers/edac/r82600_edac.c
index e25f712..9900675 100644
--- a/drivers/edac/r82600_edac.c
+++ b/drivers/edac/r82600_edac.c
@@ -20,6 +20,7 @@
 #include <linux/pci.h>
 #include <linux/pci_ids.h>
 #include <linux/slab.h>
+#include <linux/edac.h>
 #include "edac_core.h"
 
 #define R82600_REVISION	" Ver: 2.0.2 " __DATE__
@@ -393,6 +394,9 @@
 
 static int __init r82600_init(void)
 {
+       /* Ensure that the OPSTATE is set correctly for POLL or NMI */
+       opstate_init();
+
 	return pci_register_driver(&r82600_driver);
 }
 
@@ -412,3 +416,6 @@
 module_param(disable_hardware_scrub, bool, 0644);
 MODULE_PARM_DESC(disable_hardware_scrub,
 		 "If set, disable the chipset's automatic scrub for CEs");
+
+module_param(edac_op_state, int, 0444);
+MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig
index 40ffd76..dc2cec6 100644
--- a/drivers/firmware/Kconfig
+++ b/drivers/firmware/Kconfig
@@ -17,6 +17,15 @@
           obscure configurations. Most disk controller BIOS vendors do
           not yet implement this feature.
 
+config EDD_OFF
+	bool "Sets default behavior for EDD detection to off"
+	depends on EDD
+	default n
+	help
+	  Say Y if you want EDD disabled by default, even though it is compiled into the
+	  kernel. Say N if you want EDD enabled by default. EDD can be dynamically set
+	  using the kernel parameter 'edd={on|skipmbr|off}'.
+
 config EFI_VARS
 	tristate "EFI Variable Support via sysfs"
 	depends on EFI
diff --git a/drivers/firmware/dcdbas.c b/drivers/firmware/dcdbas.c
index f235940..25918f7 100644
--- a/drivers/firmware/dcdbas.c
+++ b/drivers/firmware/dcdbas.c
@@ -63,7 +63,7 @@
 		return;
 
 	dev_dbg(&dcdbas_pdev->dev, "%s: phys: %x size: %lu\n",
-		__FUNCTION__, smi_data_buf_phys_addr, smi_data_buf_size);
+		__func__, smi_data_buf_phys_addr, smi_data_buf_size);
 
 	dma_free_coherent(&dcdbas_pdev->dev, smi_data_buf_size, smi_data_buf,
 			  smi_data_buf_handle);
@@ -92,7 +92,7 @@
 	if (!buf) {
 		dev_dbg(&dcdbas_pdev->dev,
 			"%s: failed to allocate memory size %lu\n",
-			__FUNCTION__, size);
+			__func__, size);
 		return -ENOMEM;
 	}
 	/* memory zeroed by dma_alloc_coherent */
@@ -110,7 +110,7 @@
 	smi_data_buf_size = size;
 
 	dev_dbg(&dcdbas_pdev->dev, "%s: phys: %x size: %lu\n",
-		__FUNCTION__, smi_data_buf_phys_addr, smi_data_buf_size);
+		__func__, smi_data_buf_phys_addr, smi_data_buf_size);
 
 	return 0;
 }
@@ -258,7 +258,7 @@
 
 	if (smi_cmd->magic != SMI_CMD_MAGIC) {
 		dev_info(&dcdbas_pdev->dev, "%s: invalid magic value\n",
-			 __FUNCTION__);
+			 __func__);
 		return -EBADR;
 	}
 
@@ -267,7 +267,7 @@
 	set_cpus_allowed_ptr(current, &cpumask_of_cpu(0));
 	if (smp_processor_id() != 0) {
 		dev_dbg(&dcdbas_pdev->dev, "%s: failed to get CPU 0\n",
-			__FUNCTION__);
+			__func__);
 		ret = -EBUSY;
 		goto out;
 	}
@@ -428,7 +428,7 @@
 
 	default:
 		dev_dbg(&dcdbas_pdev->dev, "%s: invalid SMI type %u\n",
-			__FUNCTION__, host_control_smi_type);
+			__func__, host_control_smi_type);
 		return -ENOSYS;
 	}
 
@@ -456,13 +456,13 @@
 	host_control_action = HC_ACTION_NONE;
 
 	if (!smi_data_buf) {
-		dev_dbg(&dcdbas_pdev->dev, "%s: no SMI buffer\n", __FUNCTION__);
+		dev_dbg(&dcdbas_pdev->dev, "%s: no SMI buffer\n", __func__);
 		return;
 	}
 
 	if (smi_data_buf_size < sizeof(struct apm_cmd)) {
 		dev_dbg(&dcdbas_pdev->dev, "%s: SMI buffer too small\n",
-			__FUNCTION__);
+			__func__);
 		return;
 	}
 
diff --git a/drivers/firmware/dell_rbu.c b/drivers/firmware/dell_rbu.c
index 477a3d0..6a8b1e0 100644
--- a/drivers/firmware/dell_rbu.c
+++ b/drivers/firmware/dell_rbu.c
@@ -123,7 +123,7 @@
 	if (!newpacket) {
 		printk(KERN_WARNING
 			"dell_rbu:%s: failed to allocate new "
-			"packet\n", __FUNCTION__);
+			"packet\n", __func__);
 		retval = -ENOMEM;
 		spin_lock(&rbu_data.lock);
 		goto out_noalloc;
@@ -152,7 +152,7 @@
 		printk(KERN_WARNING
 			"dell_rbu:%s: failed to allocate "
 			"invalid_addr_packet_array \n",
-			__FUNCTION__);
+			__func__);
 		retval = -ENOMEM;
 		spin_lock(&rbu_data.lock);
 		goto out_alloc_packet;
@@ -164,7 +164,7 @@
 		if (!packet_data_temp_buf) {
 			printk(KERN_WARNING
 				"dell_rbu:%s: failed to allocate new "
-				"packet\n", __FUNCTION__);
+				"packet\n", __func__);
 			retval = -ENOMEM;
 			spin_lock(&rbu_data.lock);
 			goto out_alloc_packet_array;
@@ -416,7 +416,7 @@
 		 */
 		if ((size != 0) && (rbu_data.image_update_buffer == NULL)) {
 			printk(KERN_ERR "dell_rbu:%s: corruption "
-				"check failed\n", __FUNCTION__);
+				"check failed\n", __func__);
 			return -EINVAL;
 		}
 		/*
@@ -642,7 +642,7 @@
 			if (req_firm_rc) {
 				printk(KERN_ERR
 					"dell_rbu:%s request_firmware_nowait"
-					" failed %d\n", __FUNCTION__, rc);
+					" failed %d\n", __func__, rc);
 				rc = -EIO;
 			} else
 				rbu_data.entry_created = 1;
@@ -718,7 +718,7 @@
 	if (IS_ERR(rbu_device)) {
 		printk(KERN_ERR
 			"dell_rbu:%s:platform_device_register_simple "
-			"failed\n", __FUNCTION__);
+			"failed\n", __func__);
 		return PTR_ERR(rbu_device);
 	}
 
diff --git a/drivers/firmware/iscsi_ibft_find.c b/drivers/firmware/iscsi_ibft_find.c
index d0e5fa4..11f1744 100644
--- a/drivers/firmware/iscsi_ibft_find.c
+++ b/drivers/firmware/iscsi_ibft_find.c
@@ -58,7 +58,7 @@
 	unsigned int len = 0;
 	void *virt;
 
-	ibft_addr = 0;
+	ibft_addr = NULL;
 
 	for (pos = IBFT_START; pos < IBFT_END; pos += 16) {
 		/* The table can't be inside the VGA BIOS reserved space,
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index e03c67d..f43d6d3 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -606,7 +606,7 @@
 		case 2:
 			if ((end - start) < 2)
 				return NULL;
-			item->data.u16 = le16_to_cpu(get_unaligned((__le16*)start));
+			item->data.u16 = get_unaligned_le16(start);
 			start = (__u8 *)((__le16 *)start + 1);
 			return start;
 
@@ -614,7 +614,7 @@
 			item->size++;
 			if ((end - start) < 4)
 				return NULL;
-			item->data.u32 = le32_to_cpu(get_unaligned((__le32*)start));
+			item->data.u32 = get_unaligned_le32(start);
 			start = (__u8 *)((__le32 *)start + 1);
 			return start;
 	}
@@ -765,7 +765,7 @@
 
 	report += offset >> 3;  /* adjust byte index */
 	offset &= 7;            /* now only need bit offset into one byte */
-	x = le64_to_cpu(get_unaligned((__le64 *) report));
+	x = get_unaligned_le64(report);
 	x = (x >> offset) & ((1ULL << n) - 1);  /* extract bit field */
 	return (u32) x;
 }
diff --git a/drivers/ide/Kconfig b/drivers/ide/Kconfig
index 3f9e100..f702f91 100644
--- a/drivers/ide/Kconfig
+++ b/drivers/ide/Kconfig
@@ -862,40 +862,6 @@
 	  Say Y here if you want to support the onboard IDE channels on the
 	  Simtec BAST or the Thorcom VR1000
 
-config ETRAX_IDE
-	tristate "ETRAX IDE support"
-	depends on CRIS && BROKEN
-	select BLK_DEV_IDEDMA
-	help
-	  Enables the ETRAX IDE driver.
-
-	  You can't use parallel ports or SCSI ports at the same time.
-
-config ETRAX_IDE_DELAY
-	int "Delay for drives to regain consciousness"
-	depends on ETRAX_IDE && ETRAX_ARCH_V10
-	default 15
-	help
-	  Number of seconds to wait for IDE drives to spin up after an IDE
-	  reset.
-
-choice
-	prompt "IDE reset pin"
-	depends on ETRAX_IDE && ETRAX_ARCH_V10
-	default ETRAX_IDE_PB7_RESET
-
-config ETRAX_IDE_PB7_RESET
-	bool "Port_PB_Bit_7"
-	help
-	  IDE reset on pin 7 on port B
-
-config ETRAX_IDE_G27_RESET
-	bool "Port_G_Bit_27"
-	help
-	  IDE reset on pin 27 on port G
-
-endchoice
-
 config IDE_H8300
 	tristate "H8300 IDE support"
 	depends on H8300
diff --git a/drivers/ide/Makefile b/drivers/ide/Makefile
index 571544c..f94b679 100644
--- a/drivers/ide/Makefile
+++ b/drivers/ide/Makefile
@@ -35,7 +35,7 @@
 	obj-y += cmd640-core.o
 endif
 
-obj-$(CONFIG_BLK_DEV_IDE)		+= cris/ ppc/
+obj-$(CONFIG_BLK_DEV_IDE)		+= ppc/
 obj-$(CONFIG_IDE_H8300)			+= h8300/
 obj-$(CONFIG_IDE_GENERIC)		+= ide-generic.o
 obj-$(CONFIG_BLK_DEV_IDEPNP)		+= ide-pnp.o
diff --git a/drivers/ide/arm/icside.c b/drivers/ide/arm/icside.c
index 65038ca..0614569 100644
--- a/drivers/ide/arm/icside.c
+++ b/drivers/ide/arm/icside.c
@@ -483,7 +483,7 @@
 	.init_dma		= icside_dma_off_init,
 	.port_ops		= &icside_v6_no_dma_port_ops,
 	.dma_ops		= &icside_v6_dma_ops,
-	.host_flags		= IDE_HFLAG_SERIALIZE,
+	.host_flags		= IDE_HFLAG_SERIALIZE | IDE_HFLAG_MMIO,
 	.mwdma_mask		= ATA_MWDMA2,
 	.swdma_mask		= ATA_SWDMA2,
 };
diff --git a/drivers/ide/arm/palm_bk3710.c b/drivers/ide/arm/palm_bk3710.c
index aaf3254..96378eb 100644
--- a/drivers/ide/arm/palm_bk3710.c
+++ b/drivers/ide/arm/palm_bk3710.c
@@ -342,6 +342,7 @@
 static const struct ide_port_info __devinitdata palm_bk3710_port_info = {
 	.init_dma		= palm_bk3710_init_dma,
 	.port_ops		= &palm_bk3710_ports_ops,
+	.host_flags		= IDE_HFLAG_MMIO,
 	.pio_mask		= ATA_PIO4,
 	.udma_mask		= ATA_UDMA4,	/* (input clk 99MHz) */
 	.mwdma_mask		= ATA_MWDMA2,
diff --git a/drivers/ide/arm/rapide.c b/drivers/ide/arm/rapide.c
index babc1a5..1747b23 100644
--- a/drivers/ide/arm/rapide.c
+++ b/drivers/ide/arm/rapide.c
@@ -53,6 +53,7 @@
 
 		ide_init_port_hw(hwif, &hw);
 
+		hwif->host_flags = IDE_HFLAG_MMIO;
 		default_hwif_mmiops(hwif);
 
 		idx[0] = hwif->index;
diff --git a/drivers/ide/cris/Makefile b/drivers/ide/cris/Makefile
deleted file mode 100644
index 20b9596..0000000
--- a/drivers/ide/cris/Makefile
+++ /dev/null
@@ -1,3 +0,0 @@
-EXTRA_CFLAGS				+= -Idrivers/ide
-
-obj-$(CONFIG_IDE_ETRAX)			+= ide-cris.o
diff --git a/drivers/ide/cris/ide-cris.c b/drivers/ide/cris/ide-cris.c
deleted file mode 100644
index 9df2685..0000000
--- a/drivers/ide/cris/ide-cris.c
+++ /dev/null
@@ -1,1086 +0,0 @@
-/*
- * Etrax specific IDE functions, like init and PIO-mode setting etc.
- * Almost the entire ide.c is used for the rest of the Etrax ATA driver.
- * Copyright (c) 2000-2005 Axis Communications AB
- *
- * Authors:    Bjorn Wesen        (initial version)
- *             Mikael Starvik     (crisv32 port)
- */
-
-/* Regarding DMA:
- *
- * There are two forms of DMA - "DMA handshaking" between the interface and the drive,
- * and DMA between the memory and the interface. We can ALWAYS use the latter, since it's
- * something built-in in the Etrax. However only some drives support the DMA-mode handshaking
- * on the ATA-bus. The normal PC driver and Triton interface disables memory-if DMA when the
- * device can't do DMA handshaking for some stupid reason. We don't need to do that.
- */
-
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/timer.h>
-#include <linux/mm.h>
-#include <linux/interrupt.h>
-#include <linux/delay.h>
-#include <linux/blkdev.h>
-#include <linux/hdreg.h>
-#include <linux/ide.h>
-#include <linux/init.h>
-
-#include <asm/io.h>
-#include <asm/dma.h>
-
-/* number of DMA descriptors */
-#define MAX_DMA_DESCRS 64
-
-/* number of times to retry busy-flags when reading/writing IDE-registers
- * this can't be too high because a hung harddisk might cause the watchdog
- * to trigger (sometimes INB and OUTB are called with irq's disabled)
- */
-
-#define IDE_REGISTER_TIMEOUT 300
-
-#define LOWDB(x)
-#define D(x)
-
-enum /* Transfer types */
-{
-	TYPE_PIO,
-	TYPE_DMA,
-	TYPE_UDMA
-};
-
-/* CRISv32 specifics */
-#ifdef CONFIG_ETRAX_ARCH_V32
-#include <asm/arch/hwregs/ata_defs.h>
-#include <asm/arch/hwregs/dma_defs.h>
-#include <asm/arch/hwregs/dma.h>
-#include <asm/arch/pinmux.h>
-
-#define ATA_UDMA2_CYC    2
-#define ATA_UDMA2_DVS    3
-#define ATA_UDMA1_CYC    2
-#define ATA_UDMA1_DVS    4
-#define ATA_UDMA0_CYC    4
-#define ATA_UDMA0_DVS    6
-#define ATA_DMA2_STROBE  7
-#define ATA_DMA2_HOLD    1
-#define ATA_DMA1_STROBE  8
-#define ATA_DMA1_HOLD    3
-#define ATA_DMA0_STROBE 25
-#define ATA_DMA0_HOLD   19
-#define ATA_PIO4_SETUP   3
-#define ATA_PIO4_STROBE  7
-#define ATA_PIO4_HOLD    1
-#define ATA_PIO3_SETUP   3
-#define ATA_PIO3_STROBE  9
-#define ATA_PIO3_HOLD    3
-#define ATA_PIO2_SETUP   3
-#define ATA_PIO2_STROBE 13
-#define ATA_PIO2_HOLD    5
-#define ATA_PIO1_SETUP   5
-#define ATA_PIO1_STROBE 23
-#define ATA_PIO1_HOLD    9
-#define ATA_PIO0_SETUP   9
-#define ATA_PIO0_STROBE 39
-#define ATA_PIO0_HOLD    9
-
-int
-cris_ide_ack_intr(ide_hwif_t* hwif)
-{
-	reg_ata_rw_ctrl2 ctrl2 = REG_TYPE_CONV(reg_ata_rw_ctrl2, int,
-					       hwif->io_ports.data_addr);
-	REG_WR_INT(ata, regi_ata, rw_ack_intr, 1 << ctrl2.sel);
-	return 1;
-}
-
-static inline int
-cris_ide_busy(void)
-{
-	reg_ata_rs_stat_data stat_data;
-	stat_data = REG_RD(ata, regi_ata, rs_stat_data);
-	return stat_data.busy;
-}
-
-static inline int
-cris_ide_ready(void)
-{
-	return !cris_ide_busy();
-}
-
-static inline int
-cris_ide_data_available(unsigned short* data)
-{
-	reg_ata_rs_stat_data stat_data;
-	stat_data = REG_RD(ata, regi_ata, rs_stat_data);
-	*data = stat_data.data;
-	return stat_data.dav;
-}
-
-static void
-cris_ide_write_command(unsigned long command)
-{
-	REG_WR_INT(ata, regi_ata, rw_ctrl2, command); /* write data to the drive's register */
-}
-
-static void
-cris_ide_set_speed(int type, int setup, int strobe, int hold)
-{
-	reg_ata_rw_ctrl0 ctrl0 = REG_RD(ata, regi_ata, rw_ctrl0);
-	reg_ata_rw_ctrl1 ctrl1 = REG_RD(ata, regi_ata, rw_ctrl1);
-
-	if (type == TYPE_PIO) {
-		ctrl0.pio_setup = setup;
-		ctrl0.pio_strb = strobe;
-		ctrl0.pio_hold = hold;
-	} else if (type == TYPE_DMA) {
-		ctrl0.dma_strb = strobe;
-		ctrl0.dma_hold = hold;
-	} else if (type == TYPE_UDMA) {
-		ctrl1.udma_tcyc = setup;
-		ctrl1.udma_tdvs = strobe;
-	}
-	REG_WR(ata, regi_ata, rw_ctrl0, ctrl0);
-	REG_WR(ata, regi_ata, rw_ctrl1, ctrl1);
-}
-
-static unsigned long
-cris_ide_base_address(int bus)
-{
-	reg_ata_rw_ctrl2 ctrl2 = {0};
-	ctrl2.sel = bus;
-	return REG_TYPE_CONV(int, reg_ata_rw_ctrl2, ctrl2);
-}
-
-static unsigned long
-cris_ide_reg_addr(unsigned long addr, int cs0, int cs1)
-{
-	reg_ata_rw_ctrl2 ctrl2 = {0};
-	ctrl2.addr = addr;
-	ctrl2.cs1 = cs1;
-	ctrl2.cs0 = cs0;
-	return REG_TYPE_CONV(int, reg_ata_rw_ctrl2, ctrl2);
-}
-
-static __init void
-cris_ide_reset(unsigned val)
-{
-	reg_ata_rw_ctrl0 ctrl0 = {0};
-	ctrl0.rst = val ? regk_ata_active : regk_ata_inactive;
-	REG_WR(ata, regi_ata, rw_ctrl0, ctrl0);
-}
-
-static __init void
-cris_ide_init(void)
-{
-	reg_ata_rw_ctrl0 ctrl0 = {0};
-	reg_ata_rw_intr_mask intr_mask = {0};
-
-	ctrl0.en = regk_ata_yes;
-	REG_WR(ata, regi_ata, rw_ctrl0, ctrl0);
-
-	intr_mask.bus0 = regk_ata_yes;
-	intr_mask.bus1 = regk_ata_yes;
-	intr_mask.bus2 = regk_ata_yes;
-	intr_mask.bus3 = regk_ata_yes;
-
-	REG_WR(ata, regi_ata, rw_intr_mask, intr_mask);
-
-	crisv32_request_dma(2, "ETRAX FS built-in ATA", DMA_VERBOSE_ON_ERROR, 0, dma_ata);
-	crisv32_request_dma(3, "ETRAX FS built-in ATA", DMA_VERBOSE_ON_ERROR, 0, dma_ata);
-
-	crisv32_pinmux_alloc_fixed(pinmux_ata);
-	crisv32_pinmux_alloc_fixed(pinmux_ata0);
-	crisv32_pinmux_alloc_fixed(pinmux_ata1);
-	crisv32_pinmux_alloc_fixed(pinmux_ata2);
-	crisv32_pinmux_alloc_fixed(pinmux_ata3);
-
-	DMA_RESET(regi_dma2);
-	DMA_ENABLE(regi_dma2);
-	DMA_RESET(regi_dma3);
-	DMA_ENABLE(regi_dma3);
-
-	DMA_WR_CMD (regi_dma2, regk_dma_set_w_size2);
-	DMA_WR_CMD (regi_dma3, regk_dma_set_w_size2);
-}
-
-static dma_descr_context mycontext __attribute__ ((__aligned__(32)));
-
-#define cris_dma_descr_type dma_descr_data
-#define cris_pio_read regk_ata_rd
-#define cris_ultra_mask 0x7
-#define MAX_DESCR_SIZE 0xffffffffUL
-
-static unsigned long
-cris_ide_get_reg(unsigned long reg)
-{
-	return (reg & 0x0e000000) >> 25;
-}
-
-static void
-cris_ide_fill_descriptor(cris_dma_descr_type *d, void* buf, unsigned int len, int last)
-{
-	d->buf = (char*)virt_to_phys(buf);
-	d->after = d->buf + len;
-	d->eol = last;
-}
-
-static void
-cris_ide_start_dma(ide_drive_t *drive, cris_dma_descr_type *d, int dir,int type,int len)
-{
-	ide_hwif_t *hwif = drive->hwif;
-
-	reg_ata_rw_ctrl2 ctrl2 = REG_TYPE_CONV(reg_ata_rw_ctrl2, int,
-					       hwif->io_ports.data_addr);
-	reg_ata_rw_trf_cnt trf_cnt = {0};
-
-	mycontext.saved_data = (dma_descr_data*)virt_to_phys(d);
-	mycontext.saved_data_buf = d->buf;
-	/* start the dma channel */
-	DMA_START_CONTEXT(dir ? regi_dma3 : regi_dma2, virt_to_phys(&mycontext));
-
-	/* initiate a multi word dma read using PIO handshaking */
-	trf_cnt.cnt = len >> 1;
-	/* Due to a "feature" the transfer count has to be one extra word for UDMA. */
-	if (type == TYPE_UDMA)
-		trf_cnt.cnt++;
-	REG_WR(ata, regi_ata, rw_trf_cnt, trf_cnt);
-
-	ctrl2.rw = dir ? regk_ata_rd : regk_ata_wr;
-	ctrl2.trf_mode = regk_ata_dma;
-	ctrl2.hsh = type == TYPE_PIO ? regk_ata_pio :
-	            type == TYPE_DMA ? regk_ata_dma : regk_ata_udma;
-	ctrl2.multi = regk_ata_yes;
-	ctrl2.dma_size = regk_ata_word;
-	REG_WR(ata, regi_ata, rw_ctrl2, ctrl2);
-}
-
-static void
-cris_ide_wait_dma(int dir)
-{
-	reg_dma_rw_stat status;
-	do
-	{
-		status = REG_RD(dma, dir ? regi_dma3 : regi_dma2, rw_stat);
-	} while(status.list_state != regk_dma_data_at_eol);
-}
-
-static int cris_dma_test_irq(ide_drive_t *drive)
-{
-	ide_hwif_t *hwif = drive->hwif;
-	int intr = REG_RD_INT(ata, regi_ata, r_intr);
-
-	reg_ata_rw_ctrl2 ctrl2 = REG_TYPE_CONV(reg_ata_rw_ctrl2, int,
-					       hwif->io_ports.data_addr);
-
-	return intr & (1 << ctrl2.sel) ? 1 : 0;
-}
-
-static void cris_ide_initialize_dma(int dir)
-{
-}
-
-#else
-/* CRISv10 specifics */
-#include <asm/arch/svinto.h>
-#include <asm/arch/io_interface_mux.h>
-
-/* PIO timing (in R_ATA_CONFIG)
- *
- *                        _____________________________
- * ADDRESS :     ________/
- *
- *                            _______________
- * DIOR    :     ____________/               \__________
- *
- *                               _______________
- * DATA    :     XXXXXXXXXXXXXXXX_______________XXXXXXXX
- *
- *
- * DIOR is unbuffered while address and data is buffered.
- * This creates two problems:
- * 1. The DIOR pulse is to early (because it is unbuffered)
- * 2. The rise time of DIOR is long
- *
- * There are at least three different plausible solutions
- * 1. Use a pad capable of larger currents in Etrax
- * 2. Use an external buffer
- * 3. Make the strobe pulse longer
- *
- * Some of the strobe timings below are modified to compensate
- * for this. This implies a slight performance decrease.
- *
- * THIS SHOULD NEVER BE CHANGED!
- *
- * TODO: Is this true for the latest LX boards still ?
- */
-
-#define ATA_UDMA2_CYC    0 /* No UDMA supported, just to make it compile. */
-#define ATA_UDMA2_DVS    0
-#define ATA_UDMA1_CYC    0
-#define ATA_UDMA1_DVS    0
-#define ATA_UDMA0_CYC    0
-#define ATA_UDMA0_DVS    0
-#define ATA_DMA2_STROBE  4
-#define ATA_DMA2_HOLD    0
-#define ATA_DMA1_STROBE  4
-#define ATA_DMA1_HOLD    1
-#define ATA_DMA0_STROBE 12
-#define ATA_DMA0_HOLD    9
-#define ATA_PIO4_SETUP   1
-#define ATA_PIO4_STROBE  5
-#define ATA_PIO4_HOLD    0
-#define ATA_PIO3_SETUP   1
-#define ATA_PIO3_STROBE  5
-#define ATA_PIO3_HOLD    1
-#define ATA_PIO2_SETUP   1
-#define ATA_PIO2_STROBE  6
-#define ATA_PIO2_HOLD    2
-#define ATA_PIO1_SETUP   2
-#define ATA_PIO1_STROBE 11
-#define ATA_PIO1_HOLD    4
-#define ATA_PIO0_SETUP   4
-#define ATA_PIO0_STROBE 19
-#define ATA_PIO0_HOLD    4
-
-int
-cris_ide_ack_intr(ide_hwif_t* hwif)
-{
-	return 1;
-}
-
-static inline int
-cris_ide_busy(void)
-{
-	return *R_ATA_STATUS_DATA & IO_MASK(R_ATA_STATUS_DATA, busy) ;
-}
-
-static inline int
-cris_ide_ready(void)
-{
-	return *R_ATA_STATUS_DATA & IO_MASK(R_ATA_STATUS_DATA, tr_rdy) ;
-}
-
-static inline int
-cris_ide_data_available(unsigned short* data)
-{
-	unsigned long status = *R_ATA_STATUS_DATA;
-	*data = (unsigned short)status;
-	return status & IO_MASK(R_ATA_STATUS_DATA, dav);
-}
-
-static void
-cris_ide_write_command(unsigned long command)
-{
-	*R_ATA_CTRL_DATA = command;
-}
-
-static void
-cris_ide_set_speed(int type, int setup, int strobe, int hold)
-{
-	static int pio_setup = ATA_PIO4_SETUP;
-	static int pio_strobe = ATA_PIO4_STROBE;
-	static int pio_hold = ATA_PIO4_HOLD;
-	static int dma_strobe = ATA_DMA2_STROBE;
-	static int dma_hold = ATA_DMA2_HOLD;
-
-	if (type == TYPE_PIO) {
-		pio_setup = setup;
-		pio_strobe = strobe;
-		pio_hold = hold;
-	} else if (type == TYPE_DMA) {
-		dma_strobe = strobe;
-	  dma_hold = hold;
-	}
-	*R_ATA_CONFIG = ( IO_FIELD( R_ATA_CONFIG, enable, 1 ) |
-	  IO_FIELD( R_ATA_CONFIG, dma_strobe, dma_strobe ) |
-		IO_FIELD( R_ATA_CONFIG, dma_hold,   dma_hold ) |
-		IO_FIELD( R_ATA_CONFIG, pio_setup,  pio_setup ) |
-		IO_FIELD( R_ATA_CONFIG, pio_strobe, pio_strobe ) |
-		IO_FIELD( R_ATA_CONFIG, pio_hold,   pio_hold ) );
-}
-
-static unsigned long
-cris_ide_base_address(int bus)
-{
-	return IO_FIELD(R_ATA_CTRL_DATA, sel, bus);
-}
-
-static unsigned long
-cris_ide_reg_addr(unsigned long addr, int cs0, int cs1)
-{
-	return IO_FIELD(R_ATA_CTRL_DATA, addr, addr) |
-	       IO_FIELD(R_ATA_CTRL_DATA, cs0, cs0) |
-	       IO_FIELD(R_ATA_CTRL_DATA, cs1, cs1);
-}
-
-static __init void
-cris_ide_reset(unsigned val)
-{
-#ifdef CONFIG_ETRAX_IDE_G27_RESET
-	REG_SHADOW_SET(R_PORT_G_DATA, port_g_data_shadow, 27, val);
-#endif
-#ifdef CONFIG_ETRAX_IDE_PB7_RESET
-	port_pb_dir_shadow = port_pb_dir_shadow |
-		IO_STATE(R_PORT_PB_DIR, dir7, output);
-	*R_PORT_PB_DIR = port_pb_dir_shadow;
-	REG_SHADOW_SET(R_PORT_PB_DATA, port_pb_data_shadow, 7, val);
-#endif
-}
-
-static __init void
-cris_ide_init(void)
-{
-	volatile unsigned int dummy;
-
-	*R_ATA_CTRL_DATA = 0;
-	*R_ATA_TRANSFER_CNT = 0;
-	*R_ATA_CONFIG = 0;
-
-	if (cris_request_io_interface(if_ata, "ETRAX100LX IDE")) {
-		printk(KERN_CRIT "ide: Failed to get IO interface\n");
-		return;
-	} else if (cris_request_dma(ATA_TX_DMA_NBR,
-		                          "ETRAX100LX IDE TX",
-		                          DMA_VERBOSE_ON_ERROR,
-		                          dma_ata)) {
-		cris_free_io_interface(if_ata);
-		printk(KERN_CRIT "ide: Failed to get Tx DMA channel\n");
-		return;
-	} else if (cris_request_dma(ATA_RX_DMA_NBR,
-		                          "ETRAX100LX IDE RX",
-		                          DMA_VERBOSE_ON_ERROR,
-		                          dma_ata)) {
-		cris_free_dma(ATA_TX_DMA_NBR, "ETRAX100LX IDE Tx");
-		cris_free_io_interface(if_ata);
-		printk(KERN_CRIT "ide: Failed to get Rx DMA channel\n");
-		return;
-	}
-
-	/* make a dummy read to set the ata controller in a proper state */
-	dummy = *R_ATA_STATUS_DATA;
-
-	*R_ATA_CONFIG = ( IO_FIELD( R_ATA_CONFIG, enable, 1 ));
-	*R_ATA_CTRL_DATA = ( IO_STATE( R_ATA_CTRL_DATA, rw,   read) |
-	                     IO_FIELD( R_ATA_CTRL_DATA, addr, 1   ) );
-
-	while(*R_ATA_STATUS_DATA & IO_MASK(R_ATA_STATUS_DATA, busy)); /* wait for busy flag*/
-
-	*R_IRQ_MASK0_SET = ( IO_STATE( R_IRQ_MASK0_SET, ata_irq0, set ) |
-	                     IO_STATE( R_IRQ_MASK0_SET, ata_irq1, set ) |
-	                     IO_STATE( R_IRQ_MASK0_SET, ata_irq2, set ) |
-	                     IO_STATE( R_IRQ_MASK0_SET, ata_irq3, set ) );
-
-	/* reset the dma channels we will use */
-
-	RESET_DMA(ATA_TX_DMA_NBR);
-	RESET_DMA(ATA_RX_DMA_NBR);
-	WAIT_DMA(ATA_TX_DMA_NBR);
-	WAIT_DMA(ATA_RX_DMA_NBR);
-}
-
-#define cris_dma_descr_type etrax_dma_descr
-#define cris_pio_read IO_STATE(R_ATA_CTRL_DATA, rw, read)
-#define cris_ultra_mask 0x0
-#define MAX_DESCR_SIZE 0x10000UL
-
-static unsigned long
-cris_ide_get_reg(unsigned long reg)
-{
-	return (reg & 0x0e000000) >> 25;
-}
-
-static void
-cris_ide_fill_descriptor(cris_dma_descr_type *d, void* buf, unsigned int len, int last)
-{
-	d->buf = virt_to_phys(buf);
-	d->sw_len = len == MAX_DESCR_SIZE ? 0 : len;
-	if (last)
-		d->ctrl |= d_eol;
-}
-
-static void cris_ide_start_dma(ide_drive_t *drive, cris_dma_descr_type *d, int dir, int type, int len)
-{
-	unsigned long cmd;
-
-	if (dir) {
-		/* need to do this before RX DMA due to a chip bug
-		 * it is enough to just flush the part of the cache that
-		 * corresponds to the buffers we start, but since HD transfers
-		 * usually are more than 8 kB, it is easier to optimize for the
-		 * normal case and just flush the entire cache. its the only
-		 * way to be sure! (OB movie quote)
-		 */
-		flush_etrax_cache();
-		*R_DMA_CH3_FIRST = virt_to_phys(d);
-		*R_DMA_CH3_CMD   = IO_STATE(R_DMA_CH3_CMD, cmd, start);
-
-	} else {
-		*R_DMA_CH2_FIRST = virt_to_phys(d);
-		*R_DMA_CH2_CMD   = IO_STATE(R_DMA_CH2_CMD, cmd, start);
-	}
-
-	/* initiate a multi word dma read using DMA handshaking */
-
-	*R_ATA_TRANSFER_CNT =
-		IO_FIELD(R_ATA_TRANSFER_CNT, count, len >> 1);
-
-	cmd = dir ? IO_STATE(R_ATA_CTRL_DATA, rw, read) : IO_STATE(R_ATA_CTRL_DATA, rw, write);
-	cmd |= type == TYPE_PIO ? IO_STATE(R_ATA_CTRL_DATA, handsh, pio) :
-	                          IO_STATE(R_ATA_CTRL_DATA, handsh, dma);
-	*R_ATA_CTRL_DATA =
-		cmd |
-		IO_FIELD(R_ATA_CTRL_DATA, data,
-			 drive->hwif->io_ports.data_addr) |
-		IO_STATE(R_ATA_CTRL_DATA, src_dst,  dma)  |
-		IO_STATE(R_ATA_CTRL_DATA, multi,    on)   |
-		IO_STATE(R_ATA_CTRL_DATA, dma_size, word);
-}
-
-static void
-cris_ide_wait_dma(int dir)
-{
-	if (dir)
-		WAIT_DMA(ATA_RX_DMA_NBR);
-	else
-		WAIT_DMA(ATA_TX_DMA_NBR);
-}
-
-static int cris_dma_test_irq(ide_drive_t *drive)
-{
-	int intr = *R_IRQ_MASK0_RD;
-	int bus = IO_EXTRACT(R_ATA_CTRL_DATA, sel,
-			     drive->hwif->io_ports.data_addr);
-
-	return intr & (1 << (bus + IO_BITNR(R_IRQ_MASK0_RD, ata_irq0))) ? 1 : 0;
-}
-
-
-static void cris_ide_initialize_dma(int dir)
-{
-	if (dir)
-	{
-		RESET_DMA(ATA_RX_DMA_NBR); /* sometimes the DMA channel get stuck so we need to do this */
-		WAIT_DMA(ATA_RX_DMA_NBR);
-	}
-	else
-	{
-		RESET_DMA(ATA_TX_DMA_NBR); /* sometimes the DMA channel get stuck so we need to do this */
-		WAIT_DMA(ATA_TX_DMA_NBR);
-	}
-}
-
-#endif
-
-void
-cris_ide_outw(unsigned short data, unsigned long reg) {
-	int timeleft;
-
-	LOWDB(printk("ow: data 0x%x, reg 0x%x\n", data, reg));
-
-	/* note the lack of handling any timeouts. we stop waiting, but we don't
-	 * really notify anybody.
-	 */
-
-	timeleft = IDE_REGISTER_TIMEOUT;
-	/* wait for busy flag */
-	do {
-		timeleft--;
-	} while(timeleft && cris_ide_busy());
-
-	/*
-	 * Fall through at a timeout, so the ongoing command will be
-	 * aborted by the write below, which is expected to be a dummy
-	 * command to the command register.  This happens when a faulty
-	 * drive times out on a command.  See comment on timeout in
-	 * INB.
-	 */
-	if(!timeleft)
-		printk("ATA timeout reg 0x%lx := 0x%x\n", reg, data);
-
-	cris_ide_write_command(reg|data); /* write data to the drive's register */
-
-	timeleft = IDE_REGISTER_TIMEOUT;
-	/* wait for transmitter ready */
-	do {
-		timeleft--;
-	} while(timeleft && !cris_ide_ready());
-}
-
-void
-cris_ide_outb(unsigned char data, unsigned long reg)
-{
-	cris_ide_outw(data, reg);
-}
-
-void
-cris_ide_outbsync(ide_drive_t *drive, u8 addr, unsigned long port)
-{
-	cris_ide_outw(addr, port);
-}
-
-unsigned short
-cris_ide_inw(unsigned long reg) {
-	int timeleft;
-	unsigned short val;
-
-	timeleft = IDE_REGISTER_TIMEOUT;
-	/* wait for busy flag */
-	do {
-		timeleft--;
-	} while(timeleft && cris_ide_busy());
-
-	if(!timeleft) {
-		/*
-		 * If we're asked to read the status register, like for
-		 * example when a command does not complete for an
-		 * extended time, but the ATA interface is stuck in a
-		 * busy state at the *ETRAX* ATA interface level (as has
-		 * happened repeatedly with at least one bad disk), then
-		 * the best thing to do is to pretend that we read
-		 * "busy" in the status register, so the IDE driver will
-		 * time-out, abort the ongoing command and perform a
-		 * reset sequence.  Note that the subsequent OUT_BYTE
-		 * call will also timeout on busy, but as long as the
-		 * write is still performed, everything will be fine.
-		 */
-		if (cris_ide_get_reg(reg) == 7)
-			return BUSY_STAT;
-		else
-			/* For other rare cases we assume 0 is good enough.  */
-			return 0;
-	}
-
-	cris_ide_write_command(reg | cris_pio_read);
-
-	timeleft = IDE_REGISTER_TIMEOUT;
-	/* wait for available */
-	do {
-		timeleft--;
-	} while(timeleft && !cris_ide_data_available(&val));
-
-	if(!timeleft)
-		return 0;
-
-	LOWDB(printk("inb: 0x%x from reg 0x%x\n", val & 0xff, reg));
-
-	return val;
-}
-
-unsigned char
-cris_ide_inb(unsigned long reg)
-{
-	return (unsigned char)cris_ide_inw(reg);
-}
-
-static void cris_ide_input_data (ide_drive_t *drive, void *, unsigned int);
-static void cris_ide_output_data (ide_drive_t *drive, void *, unsigned int);
-static void cris_atapi_input_bytes(ide_drive_t *drive, void *, unsigned int);
-static void cris_atapi_output_bytes(ide_drive_t *drive, void *, unsigned int);
-
-static void cris_dma_host_set(ide_drive_t *drive, int on)
-{
-}
-
-static void cris_set_pio_mode(ide_drive_t *drive, const u8 pio)
-{
-	int setup, strobe, hold;
-
-	switch(pio)
-	{
-		case 0:
-			setup = ATA_PIO0_SETUP;
-			strobe = ATA_PIO0_STROBE;
-			hold = ATA_PIO0_HOLD;
-			break;
-		case 1:
-			setup = ATA_PIO1_SETUP;
-			strobe = ATA_PIO1_STROBE;
-			hold = ATA_PIO1_HOLD;
-			break;
-		case 2:
-			setup = ATA_PIO2_SETUP;
-			strobe = ATA_PIO2_STROBE;
-			hold = ATA_PIO2_HOLD;
-			break;
-		case 3:
-			setup = ATA_PIO3_SETUP;
-			strobe = ATA_PIO3_STROBE;
-			hold = ATA_PIO3_HOLD;
-			break;
-		case 4:
-			setup = ATA_PIO4_SETUP;
-			strobe = ATA_PIO4_STROBE;
-			hold = ATA_PIO4_HOLD;
-			break;
-		default:
-			return;
-	}
-
-	cris_ide_set_speed(TYPE_PIO, setup, strobe, hold);
-}
-
-static void cris_set_dma_mode(ide_drive_t *drive, const u8 speed)
-{
-	int cyc = 0, dvs = 0, strobe = 0, hold = 0;
-
-	switch(speed)
-	{
-		case XFER_UDMA_0:
-			cyc = ATA_UDMA0_CYC;
-			dvs = ATA_UDMA0_DVS;
-			break;
-		case XFER_UDMA_1:
-			cyc = ATA_UDMA1_CYC;
-			dvs = ATA_UDMA1_DVS;
-			break;
-		case XFER_UDMA_2:
-			cyc = ATA_UDMA2_CYC;
-			dvs = ATA_UDMA2_DVS;
-			break;
-		case XFER_MW_DMA_0:
-			strobe = ATA_DMA0_STROBE;
-			hold = ATA_DMA0_HOLD;
-			break;
-		case XFER_MW_DMA_1:
-			strobe = ATA_DMA1_STROBE;
-			hold = ATA_DMA1_HOLD;
-			break;
-		case XFER_MW_DMA_2:
-			strobe = ATA_DMA2_STROBE;
-			hold = ATA_DMA2_HOLD;
-			break;
-	}
-
-	if (speed >= XFER_UDMA_0)
-		cris_ide_set_speed(TYPE_UDMA, cyc, dvs, 0);
-	else
-		cris_ide_set_speed(TYPE_DMA, 0, strobe, hold);
-}
-
-static void __init cris_setup_ports(hw_regs_t *hw, unsigned long base)
-{
-	int i;
-
-	memset(hw, 0, sizeof(*hw));
-
-	for (i = 0; i <= 7; i++)
-		hw->io_ports_array[i] = base + cris_ide_reg_addr(i, 0, 1);
-
-	/*
-	 * the IDE control register is at ATA address 6,
-	 * with CS1 active instead of CS0
-	 */
-	hw->io_ports.ctl_addr = base + cris_ide_reg_addr(6, 1, 0);
-
-	hw->irq = ide_default_irq(0);
-	hw->ack_intr = cris_ide_ack_intr;
-}
-
-static const struct ide_port_ops cris_port_ops = {
-	.set_pio_mode		= cris_set_pio_mode,
-	.set_dma_mode		= cris_set_dma_mode,
-};
-
-static const struct ide_dma_ops cris_dma_ops;
-
-static const struct ide_port_info cris_port_info __initdata = {
-	.chipset		= ide_etrax100,
-	.port_ops		= &cris_port_ops,
-	.dma_ops		= &cris_dma_ops,
-	.host_flags		= IDE_HFLAG_NO_ATAPI_DMA |
-				  IDE_HFLAG_NO_DMA, /* no SFF-style DMA */
-	.pio_mask		= ATA_PIO4,
-	.udma_mask		= cris_ultra_mask,
-	.mwdma_mask		= ATA_MWDMA2,
-};
-
-static int __init init_e100_ide(void)
-{
-	hw_regs_t hw;
-	int h;
-	u8 idx[4] = { 0xff, 0xff, 0xff, 0xff };
-
-	printk("ide: ETRAX FS built-in ATA DMA controller\n");
-
-	for (h = 0; h < 4; h++) {
-		ide_hwif_t *hwif = NULL;
-
-		cris_setup_ports(&hw, cris_ide_base_address(h));
-
-		hwif = ide_find_port();
-		if (hwif == NULL)
-			continue;
-		ide_init_port_data(hwif, hwif->index);
-		ide_init_port_hw(hwif, &hw);
-
-		hwif->ata_input_data = &cris_ide_input_data;
-		hwif->ata_output_data = &cris_ide_output_data;
-		hwif->atapi_input_bytes = &cris_atapi_input_bytes;
-		hwif->atapi_output_bytes = &cris_atapi_output_bytes;
-		hwif->OUTB = &cris_ide_outb;
-		hwif->OUTW = &cris_ide_outw;
-		hwif->OUTBSYNC = &cris_ide_outbsync;
-		hwif->INB = &cris_ide_inb;
-		hwif->INW = &cris_ide_inw;
-		hwif->cbl = ATA_CBL_PATA40;
-
-		idx[h] = hwif->index;
-	}
-
-	/* Reset pulse */
-	cris_ide_reset(0);
-	udelay(25);
-	cris_ide_reset(1);
-
-	cris_ide_init();
-
-	cris_ide_set_speed(TYPE_PIO, ATA_PIO4_SETUP, ATA_PIO4_STROBE, ATA_PIO4_HOLD);
-	cris_ide_set_speed(TYPE_DMA, 0, ATA_DMA2_STROBE, ATA_DMA2_HOLD);
-	cris_ide_set_speed(TYPE_UDMA, ATA_UDMA2_CYC, ATA_UDMA2_DVS, 0);
-
-	ide_device_add(idx, &cris_port_info);
-
-	return 0;
-}
-
-static cris_dma_descr_type mydescr __attribute__ ((__aligned__(16)));
-
-/*
- * The following routines are mainly used by the ATAPI drivers.
- *
- * These routines will round up any request for an odd number of bytes,
- * so if an odd bytecount is specified, be sure that there's at least one
- * extra byte allocated for the buffer.
- */
-static void
-cris_atapi_input_bytes (ide_drive_t *drive, void *buffer, unsigned int bytecount)
-{
-	D(printk("atapi_input_bytes, buffer 0x%x, count %d\n",
-	         buffer, bytecount));
-
-	if(bytecount & 1) {
-		printk("warning, odd bytecount in cdrom_in_bytes = %d.\n", bytecount);
-		bytecount++; /* to round off */
-	}
-
-	/* setup DMA and start transfer */
-
-	cris_ide_fill_descriptor(&mydescr, buffer, bytecount, 1);
-	cris_ide_start_dma(drive, &mydescr, 1, TYPE_PIO, bytecount);
-
-	/* wait for completion */
-	LED_DISK_READ(1);
-	cris_ide_wait_dma(1);
-	LED_DISK_READ(0);
-}
-
-static void
-cris_atapi_output_bytes (ide_drive_t *drive, void *buffer, unsigned int bytecount)
-{
-	D(printk("atapi_output_bytes, buffer 0x%x, count %d\n",
-	         buffer, bytecount));
-
-	if(bytecount & 1) {
-		printk("odd bytecount %d in atapi_out_bytes!\n", bytecount);
-		bytecount++;
-	}
-
-	cris_ide_fill_descriptor(&mydescr, buffer, bytecount, 1);
-	cris_ide_start_dma(drive, &mydescr, 0, TYPE_PIO, bytecount);
-
-	/* wait for completion */
-
-	LED_DISK_WRITE(1);
-	LED_DISK_READ(1);
-	cris_ide_wait_dma(0);
-	LED_DISK_WRITE(0);
-}
-
-/*
- * This is used for most PIO data transfers *from* the IDE interface
- */
-static void
-cris_ide_input_data (ide_drive_t *drive, void *buffer, unsigned int wcount)
-{
-	cris_atapi_input_bytes(drive, buffer, wcount << 2);
-}
-
-/*
- * This is used for most PIO data transfers *to* the IDE interface
- */
-static void
-cris_ide_output_data (ide_drive_t *drive, void *buffer, unsigned int wcount)
-{
-	cris_atapi_output_bytes(drive, buffer, wcount << 2);
-}
-
-/* we only have one DMA channel on the chip for ATA, so we can keep these statically */
-static cris_dma_descr_type ata_descrs[MAX_DMA_DESCRS] __attribute__ ((__aligned__(16)));
-static unsigned int ata_tot_size;
-
-/*
- * cris_ide_build_dmatable() prepares a dma request.
- * Returns 0 if all went okay, returns 1 otherwise.
- */
-static int cris_ide_build_dmatable (ide_drive_t *drive)
-{
-	ide_hwif_t *hwif = drive->hwif;
-	struct scatterlist* sg;
-	struct request *rq  = drive->hwif->hwgroup->rq;
-	unsigned long size, addr;
-	unsigned int count = 0;
-	int i = 0;
-
-	sg = hwif->sg_table;
-
-	ata_tot_size = 0;
-
-	ide_map_sg(drive, rq);
-	i = hwif->sg_nents;
-
-	while(i) {
-		/*
-		 * Determine addr and size of next buffer area.  We assume that
-		 * individual virtual buffers are always composed linearly in
-		 * physical memory.  For example, we assume that any 8kB buffer
-		 * is always composed of two adjacent physical 4kB pages rather
-		 * than two possibly non-adjacent physical 4kB pages.
-		 */
-		/* group sequential buffers into one large buffer */
-		addr = sg_phys(sg);
-		size = sg_dma_len(sg);
-		while (--i) {
-			sg = sg_next(sg);
-			if ((addr + size) != sg_phys(sg))
-				break;
-			size += sg_dma_len(sg);
-		}
-
-		/* did we run out of descriptors? */
-
-		if(count >= MAX_DMA_DESCRS) {
-			printk("%s: too few DMA descriptors\n", drive->name);
-			return 1;
-		}
-
-		/* however, this case is more difficult - rw_trf_cnt cannot be more
-		   than 65536 words per transfer, so in that case we need to either
-		   1) use a DMA interrupt to re-trigger rw_trf_cnt and continue with
-		      the descriptors, or
-		   2) simply do the request here, and get dma_intr to only ide_end_request on
-		      those blocks that were actually set-up for transfer.
-		*/
-
-		if(ata_tot_size + size > 131072) {
-			printk("too large total ATA DMA request, %d + %d!\n", ata_tot_size, (int)size);
-			return 1;
-		}
-
-		/* If size > MAX_DESCR_SIZE it has to be splitted into new descriptors. Since we
-                   don't handle size > 131072 only one split is necessary */
-
-		if(size > MAX_DESCR_SIZE) {
-			cris_ide_fill_descriptor(&ata_descrs[count], (void*)addr, MAX_DESCR_SIZE, 0);
-			count++;
-			ata_tot_size += MAX_DESCR_SIZE;
-			size -= MAX_DESCR_SIZE;
-			addr += MAX_DESCR_SIZE;
-		}
-
-		cris_ide_fill_descriptor(&ata_descrs[count], (void*)addr, size,i ? 0 : 1);
-		count++;
-		ata_tot_size += size;
-	}
-
-	if (count) {
-		/* return and say all is ok */
-		return 0;
-	}
-
-	printk("%s: empty DMA table?\n", drive->name);
-	return 1;	/* let the PIO routines handle this weirdness */
-}
-
-/*
- * cris_dma_intr() is the handler for disk read/write DMA interrupts
- */
-static ide_startstop_t cris_dma_intr (ide_drive_t *drive)
-{
-	LED_DISK_READ(0);
-	LED_DISK_WRITE(0);
-
-	return ide_dma_intr(drive);
-}
-
-/*
- * Functions below initiates/aborts DMA read/write operations on a drive.
- *
- * The caller is assumed to have selected the drive and programmed the drive's
- * sector address using CHS or LBA.  All that remains is to prepare for DMA
- * and then issue the actual read/write DMA/PIO command to the drive.
- *
- * For ATAPI devices, we just prepare for DMA and return. The caller should
- * then issue the packet command to the drive and call us again with
- * cris_dma_start afterwards.
- *
- * Returns 0 if all went well.
- * Returns 1 if DMA read/write could not be started, in which case
- * the caller should revert to PIO for the current request.
- */
-
-static int cris_dma_end(ide_drive_t *drive)
-{
-	drive->waiting_for_dma = 0;
-	return 0;
-}
-
-static int cris_dma_setup(ide_drive_t *drive)
-{
-	struct request *rq = drive->hwif->hwgroup->rq;
-
-	cris_ide_initialize_dma(!rq_data_dir(rq));
-	if (cris_ide_build_dmatable (drive)) {
-		ide_map_sg(drive, rq);
-		return 1;
-	}
-
-	drive->waiting_for_dma = 1;
-	return 0;
-}
-
-static void cris_dma_exec_cmd(ide_drive_t *drive, u8 command)
-{
-	ide_execute_command(drive, command, &cris_dma_intr, WAIT_CMD, NULL);
-}
-
-static void cris_dma_start(ide_drive_t *drive)
-{
-	struct request *rq = drive->hwif->hwgroup->rq;
-	int writing = rq_data_dir(rq);
-	int type = TYPE_DMA;
-
-	if (drive->current_speed >= XFER_UDMA_0)
-		type = TYPE_UDMA;
-
-	cris_ide_start_dma(drive, &ata_descrs[0], writing ? 0 : 1, type, ata_tot_size);
-
-	if (writing) {
-		LED_DISK_WRITE(1);
-	} else {
-		LED_DISK_READ(1);
-	}
-}
-
-static const struct ide_dma_ops cris_dma_ops = {
-	.dma_host_set		= cris_dma_host_set,
-	.dma_setup		= cris_dma_setup,
-	.dma_exec_cmd		= cris_dma_exec_cmd,
-	.dma_start		= cris_dma_start,
-	.dma_end		= cris_dma_end,
-	.dma_test_irq		= cris_dma_test_irq,
-};
-
-module_init(init_e100_ide);
-
-MODULE_LICENSE("GPL");
diff --git a/drivers/ide/h8300/ide-h8300.c b/drivers/ide/h8300/ide-h8300.c
index fd23f12..ecf53bb 100644
--- a/drivers/ide/h8300/ide-h8300.c
+++ b/drivers/ide/h8300/ide-h8300.c
@@ -42,6 +42,91 @@
 	return r;
 }
 
+static void h8300_tf_load(ide_drive_t *drive, ide_task_t *task)
+{
+	ide_hwif_t *hwif = drive->hwif;
+	struct ide_io_ports *io_ports = &hwif->io_ports;
+	struct ide_taskfile *tf = &task->tf;
+	u8 HIHI = (task->tf_flags & IDE_TFLAG_LBA48) ? 0xE0 : 0xEF;
+
+	if (task->tf_flags & IDE_TFLAG_FLAGGED)
+		HIHI = 0xFF;
+
+	ide_set_irq(drive, 1);
+
+	if (task->tf_flags & IDE_TFLAG_OUT_DATA)
+		mm_outw((tf->hob_data << 8) | tf->data, io_ports->data_addr);
+
+	if (task->tf_flags & IDE_TFLAG_OUT_HOB_FEATURE)
+		outb(tf->hob_feature, io_ports->feature_addr);
+	if (task->tf_flags & IDE_TFLAG_OUT_HOB_NSECT)
+		outb(tf->hob_nsect, io_ports->nsect_addr);
+	if (task->tf_flags & IDE_TFLAG_OUT_HOB_LBAL)
+		outb(tf->hob_lbal, io_ports->lbal_addr);
+	if (task->tf_flags & IDE_TFLAG_OUT_HOB_LBAM)
+		outb(tf->hob_lbam, io_ports->lbam_addr);
+	if (task->tf_flags & IDE_TFLAG_OUT_HOB_LBAH)
+		outb(tf->hob_lbah, io_ports->lbah_addr);
+
+	if (task->tf_flags & IDE_TFLAG_OUT_FEATURE)
+		outb(tf->feature, io_ports->feature_addr);
+	if (task->tf_flags & IDE_TFLAG_OUT_NSECT)
+		outb(tf->nsect, io_ports->nsect_addr);
+	if (task->tf_flags & IDE_TFLAG_OUT_LBAL)
+		outb(tf->lbal, io_ports->lbal_addr);
+	if (task->tf_flags & IDE_TFLAG_OUT_LBAM)
+		outb(tf->lbam, io_ports->lbam_addr);
+	if (task->tf_flags & IDE_TFLAG_OUT_LBAH)
+		outb(tf->lbah, io_ports->lbah_addr);
+
+	if (task->tf_flags & IDE_TFLAG_OUT_DEVICE)
+		outb((tf->device & HIHI) | drive->select.all,
+		     io_ports->device_addr);
+}
+
+static void h8300_tf_read(ide_drive_t *drive, ide_task_t *task)
+{
+	ide_hwif_t *hwif = drive->hwif;
+	struct ide_io_ports *io_ports = &hwif->io_ports;
+	struct ide_taskfile *tf = &task->tf;
+
+	if (task->tf_flags & IDE_TFLAG_IN_DATA) {
+		u16 data = mm_inw(io_ports->data_addr);
+
+		tf->data = data & 0xff;
+		tf->hob_data = (data >> 8) & 0xff;
+	}
+
+	/* be sure we're looking at the low order bits */
+	outb(drive->ctl & ~0x80, io_ports->ctl_addr);
+
+	if (task->tf_flags & IDE_TFLAG_IN_NSECT)
+		tf->nsect  = inb(io_ports->nsect_addr);
+	if (task->tf_flags & IDE_TFLAG_IN_LBAL)
+		tf->lbal   = inb(io_ports->lbal_addr);
+	if (task->tf_flags & IDE_TFLAG_IN_LBAM)
+		tf->lbam   = inb(io_ports->lbam_addr);
+	if (task->tf_flags & IDE_TFLAG_IN_LBAH)
+		tf->lbah   = inb(io_ports->lbah_addr);
+	if (task->tf_flags & IDE_TFLAG_IN_DEVICE)
+		tf->device = inb(io_ports->device_addr);
+
+	if (task->tf_flags & IDE_TFLAG_LBA48) {
+		outb(drive->ctl | 0x80, io_ports->ctl_addr);
+
+		if (task->tf_flags & IDE_TFLAG_IN_HOB_FEATURE)
+			tf->hob_feature = inb(io_ports->feature_addr);
+		if (task->tf_flags & IDE_TFLAG_IN_HOB_NSECT)
+			tf->hob_nsect   = inb(io_ports->nsect_addr);
+		if (task->tf_flags & IDE_TFLAG_IN_HOB_LBAL)
+			tf->hob_lbal    = inb(io_ports->lbal_addr);
+		if (task->tf_flags & IDE_TFLAG_IN_HOB_LBAM)
+			tf->hob_lbam    = inb(io_ports->lbam_addr);
+		if (task->tf_flags & IDE_TFLAG_IN_HOB_LBAH)
+			tf->hob_lbah    = inb(io_ports->lbah_addr);
+	}
+}
+
 static void mm_outsw(unsigned long addr, void *buf, u32 len)
 {
 	unsigned short *bp = (unsigned short *)buf;
@@ -56,6 +141,18 @@
 		*bp = bswap(*(volatile u16 *)addr);
 }
 
+static void h8300_input_data(ide_drive_t *drive, struct request *rq,
+			     void *buf, unsigned int len)
+{
+	mm_insw(drive->hwif->io_ports.data_addr, buf, (len + 1) / 2);
+}
+
+static void h8300_output_data(ide_drive_t *drive, struct request *rq,
+			      void *buf, unsigned int len)
+{
+	mm_outsw(drive->hwif->io_ports.data_addr, buf, (len + 1) / 2);
+}
+
 #define H8300_IDE_GAP (2)
 
 static inline void hw_setup(hw_regs_t *hw)
@@ -74,12 +171,11 @@
 {
 	default_hwif_iops(hwif);
 
-	hwif->OUTW  = mm_outw;
-	hwif->OUTSW = mm_outsw;
-	hwif->INW   = mm_inw;
-	hwif->INSW  = mm_insw;
-	hwif->OUTSL = NULL;
-	hwif->INSL  = NULL;
+	hwif->tf_load = h8300_tf_load;
+	hwif->tf_read = h8300_tf_read;
+
+	hwif->input_data  = h8300_input_data;
+	hwif->output_data = h8300_output_data;
 }
 
 static int __init h8300_ide_init(void)
diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
index b34fd2b..68e7f19 100644
--- a/drivers/ide/ide-cd.c
+++ b/drivers/ide/ide-cd.c
@@ -142,7 +142,6 @@
 {
 	unsigned long sector;
 	unsigned long bio_sectors;
-	unsigned long valid;
 	struct cdrom_info *info = drive->driver_data;
 
 	if (!cdrom_log_sense(drive, failed_command, sense))
@@ -173,17 +172,13 @@
 				 (sense->information[2] <<  8) |
 				 (sense->information[3]);
 
-			bio_sectors = bio_sectors(failed_command->bio);
-			if (bio_sectors < 4)
-				bio_sectors = 4;
 			if (drive->queue->hardsect_size == 2048)
 				/* device sector size is 2K */
 				sector <<= 2;
-			sector &= ~(bio_sectors - 1);
-			valid = (sector - failed_command->sector) << 9;
 
-			if (valid < 0)
-				valid = 0;
+			bio_sectors = max(bio_sectors(failed_command->bio), 4U);
+			sector &= ~(bio_sectors - 1);
+
 			if (sector < get_capacity(info->disk) &&
 			    drive->probed_capacity - sector < 4 * 75)
 				set_capacity(info->disk, sector);
@@ -555,14 +550,7 @@
 				    ATAPI_WAIT_PC, cdrom_timer_expiry);
 		return ide_started;
 	} else {
-		unsigned long flags;
-
-		/* packet command */
-		spin_lock_irqsave(&ide_lock, flags);
-		hwif->OUTBSYNC(drive, WIN_PACKETCMD,
-			       hwif->io_ports.command_addr);
-		ndelay(400);
-		spin_unlock_irqrestore(&ide_lock, flags);
+		ide_execute_pkt_cmd(drive);
 
 		return (*handler) (drive);
 	}
@@ -613,7 +601,7 @@
 		cmd_len = ATAPI_MIN_CDB_BYTES;
 
 	/* send the command to the device */
-	HWIF(drive)->atapi_output_bytes(drive, rq->cmd, cmd_len);
+	hwif->output_data(drive, NULL, rq->cmd, cmd_len);
 
 	/* start the DMA if need be */
 	if (info->dma)
@@ -629,7 +617,7 @@
 {
 	while (len > 0) {
 		int dum = 0;
-		xf(drive, &dum, sizeof(dum));
+		xf(drive, NULL, &dum, sizeof(dum));
 		len -= sizeof(dum);
 	}
 }
@@ -639,7 +627,7 @@
 	while (nsects > 0) {
 		static char dum[SECTOR_SIZE];
 
-		drive->hwif->atapi_input_bytes(drive, dum, sizeof(dum));
+		drive->hwif->input_data(drive, NULL, dum, sizeof(dum));
 		nsects--;
 	}
 }
@@ -666,7 +654,7 @@
 		printk(KERN_ERR "%s: %s: wrong transfer direction!\n",
 				drive->name, __func__);
 
-		xf = rw ? hwif->atapi_output_bytes : hwif->atapi_input_bytes;
+		xf = rw ? hwif->output_data : hwif->input_data;
 		ide_cd_pad_transfer(drive, xf, len);
 	} else  if (rw == 0 && ireason == 1) {
 		/*
@@ -794,7 +782,7 @@
 
 	sector_div(frame, queue_hardsect_size(drive->queue) >> SECTOR_BITS);
 
-	memset(rq->cmd, 0, sizeof(rq->cmd));
+	memset(rq->cmd, 0, BLK_MAX_CDB);
 	rq->cmd[0] = GPCMD_SEEK;
 	put_unaligned(cpu_to_be32(frame), (unsigned int *) &rq->cmd[2]);
 
@@ -1019,10 +1007,10 @@
 
 	if (ireason == 0) {
 		write = 1;
-		xferfunc = HWIF(drive)->atapi_output_bytes;
+		xferfunc = hwif->output_data;
 	} else {
 		write = 0;
-		xferfunc = HWIF(drive)->atapi_input_bytes;
+		xferfunc = hwif->input_data;
 	}
 
 	/* transfer data */
@@ -1061,7 +1049,7 @@
 		if (blen > thislen)
 			blen = thislen;
 
-		xferfunc(drive, ptr, blen);
+		xferfunc(drive, NULL, ptr, blen);
 
 		thislen -= blen;
 		len -= blen;
@@ -1706,7 +1694,7 @@
 	long block = (long)rq->hard_sector / (hard_sect >> 9);
 	unsigned long blocks = rq->hard_nr_sectors / (hard_sect >> 9);
 
-	memset(rq->cmd, 0, sizeof(rq->cmd));
+	memset(rq->cmd, 0, BLK_MAX_CDB);
 
 	if (rq_data_dir(rq) == READ)
 		rq->cmd[0] = GPCMD_READ_10;
diff --git a/drivers/ide/ide-cd_verbose.c b/drivers/ide/ide-cd_verbose.c
index 6ed7ca0..6490a2d 100644
--- a/drivers/ide/ide-cd_verbose.c
+++ b/drivers/ide/ide-cd_verbose.c
@@ -326,7 +326,7 @@
 
 		printk(KERN_ERR "  The failed \"%s\" packet command "
 				"was: \n  \"", s);
-		for (i = 0; i < sizeof(failed_command->cmd); i++)
+		for (i = 0; i < BLK_MAX_CDB; i++)
 			printk(KERN_CONT "%02x ", failed_command->cmd[i]);
 		printk(KERN_CONT "\"\n");
 	}
diff --git a/drivers/ide/ide-dma.c b/drivers/ide/ide-dma.c
index c352cf2..653b1ad 100644
--- a/drivers/ide/ide-dma.c
+++ b/drivers/ide/ide-dma.c
@@ -464,9 +464,10 @@
 
 	/* PRD table */
 	if (hwif->mmio)
-		writel(hwif->dmatable_dma, (void __iomem *)hwif->dma_prdtable);
+		writel(hwif->dmatable_dma,
+		       (void __iomem *)(hwif->dma_base + ATA_DMA_TABLE_OFS));
 	else
-		outl(hwif->dmatable_dma, hwif->dma_prdtable);
+		outl(hwif->dmatable_dma, hwif->dma_base + ATA_DMA_TABLE_OFS);
 
 	/* specify r/w */
 	hwif->OUTB(reading, hwif->dma_command);
@@ -858,14 +859,8 @@
 
 	if (!hwif->dma_command)
 		hwif->dma_command	= hwif->dma_base + 0;
-	if (!hwif->dma_vendor1)
-		hwif->dma_vendor1	= hwif->dma_base + 1;
 	if (!hwif->dma_status)
 		hwif->dma_status	= hwif->dma_base + 2;
-	if (!hwif->dma_vendor3)
-		hwif->dma_vendor3	= hwif->dma_base + 3;
-	if (!hwif->dma_prdtable)
-		hwif->dma_prdtable	= hwif->dma_base + 4;
 
 	hwif->dma_ops = &sff_dma_ops;
 }
diff --git a/drivers/ide/ide-floppy.c b/drivers/ide/ide-floppy.c
index 489079b..f05fbc2 100644
--- a/drivers/ide/ide-floppy.c
+++ b/drivers/ide/ide-floppy.c
@@ -231,6 +231,7 @@
 static void ide_floppy_io_buffers(ide_drive_t *drive, struct ide_atapi_pc *pc,
 				  unsigned int bcount, int direction)
 {
+	ide_hwif_t *hwif = drive->hwif;
 	struct request *rq = pc->rq;
 	struct req_iterator iter;
 	struct bio_vec *bvec;
@@ -246,9 +247,9 @@
 
 		data = bvec_kmap_irq(bvec, &flags);
 		if (direction)
-			drive->hwif->atapi_output_bytes(drive, data, count);
+			hwif->output_data(drive, NULL, data, count);
 		else
-			drive->hwif->atapi_input_bytes(drive, data, count);
+			hwif->input_data(drive, NULL, data, count);
 		bvec_kunmap_irq(data, &flags);
 
 		bcount -= count;
@@ -261,10 +262,7 @@
 	if (bcount) {
 		printk(KERN_ERR "%s: leftover data in %s, bcount == %d\n",
 				drive->name, __func__, bcount);
-		if (direction)
-			ide_atapi_write_zeros(drive, bcount);
-		else
-			ide_atapi_discard_data(drive, bcount);
+		ide_pad_transfer(drive, direction, bcount);
 	}
 }
 
@@ -490,7 +488,7 @@
 				printk(KERN_ERR "ide-floppy: The floppy wants "
 					"to send us more data than expected "
 					"- discarding data\n");
-				ide_atapi_discard_data(drive, bcount);
+				ide_pad_transfer(drive, 0, bcount);
 
 				ide_set_handler(drive,
 						&idefloppy_pc_intr,
@@ -503,12 +501,12 @@
 		}
 	}
 	if (pc->flags & PC_FLAG_WRITING)
-		xferfunc = hwif->atapi_output_bytes;
+		xferfunc = hwif->output_data;
 	else
-		xferfunc = hwif->atapi_input_bytes;
+		xferfunc = hwif->input_data;
 
 	if (pc->buf)
-		xferfunc(drive, pc->cur_pos, bcount);
+		xferfunc(drive, NULL, pc->cur_pos, bcount);
 	else
 		ide_floppy_io_buffers(drive, pc, bcount,
 				      !!(pc->flags & PC_FLAG_WRITING));
@@ -548,8 +546,10 @@
 
 	/* Set the interrupt routine */
 	ide_set_handler(drive, &idefloppy_pc_intr, IDEFLOPPY_WAIT_CMD, NULL);
+
 	/* Send the actual packet */
-	HWIF(drive)->atapi_output_bytes(drive, floppy->pc->c, 12);
+	hwif->output_data(drive, NULL, floppy->pc->c, 12);
+
 	return ide_started;
 }
 
@@ -569,7 +569,8 @@
 	idefloppy_floppy_t *floppy = drive->driver_data;
 
 	/* Send the actual packet */
-	HWIF(drive)->atapi_output_bytes(drive, floppy->pc->c, 12);
+	drive->hwif->output_data(drive, NULL, floppy->pc->c, 12);
+
 	/* Timeout for the packet command */
 	return IDEFLOPPY_WAIT_CMD;
 }
@@ -692,7 +693,7 @@
 		return ide_started;
 	} else {
 		/* Issue the packet command */
-		hwif->OUTB(WIN_PACKETCMD, hwif->io_ports.command_addr);
+		ide_execute_pkt_cmd(drive);
 		return (*pkt_xfer_routine) (drive);
 	}
 }
diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c
index 3a2d893..6965253 100644
--- a/drivers/ide/ide-io.c
+++ b/drivers/ide/ide-io.c
@@ -295,49 +295,6 @@
 	spin_unlock_irqrestore(&ide_lock, flags);
 }
 
-void ide_tf_read(ide_drive_t *drive, ide_task_t *task)
-{
-	ide_hwif_t *hwif = drive->hwif;
-	struct ide_io_ports *io_ports = &hwif->io_ports;
-	struct ide_taskfile *tf = &task->tf;
-
-	if (task->tf_flags & IDE_TFLAG_IN_DATA) {
-		u16 data = hwif->INW(io_ports->data_addr);
-
-		tf->data = data & 0xff;
-		tf->hob_data = (data >> 8) & 0xff;
-	}
-
-	/* be sure we're looking at the low order bits */
-	hwif->OUTB(drive->ctl & ~0x80, io_ports->ctl_addr);
-
-	if (task->tf_flags & IDE_TFLAG_IN_NSECT)
-		tf->nsect  = hwif->INB(io_ports->nsect_addr);
-	if (task->tf_flags & IDE_TFLAG_IN_LBAL)
-		tf->lbal   = hwif->INB(io_ports->lbal_addr);
-	if (task->tf_flags & IDE_TFLAG_IN_LBAM)
-		tf->lbam   = hwif->INB(io_ports->lbam_addr);
-	if (task->tf_flags & IDE_TFLAG_IN_LBAH)
-		tf->lbah   = hwif->INB(io_ports->lbah_addr);
-	if (task->tf_flags & IDE_TFLAG_IN_DEVICE)
-		tf->device = hwif->INB(io_ports->device_addr);
-
-	if (task->tf_flags & IDE_TFLAG_LBA48) {
-		hwif->OUTB(drive->ctl | 0x80, io_ports->ctl_addr);
-
-		if (task->tf_flags & IDE_TFLAG_IN_HOB_FEATURE)
-			tf->hob_feature = hwif->INB(io_ports->feature_addr);
-		if (task->tf_flags & IDE_TFLAG_IN_HOB_NSECT)
-			tf->hob_nsect   = hwif->INB(io_ports->nsect_addr);
-		if (task->tf_flags & IDE_TFLAG_IN_HOB_LBAL)
-			tf->hob_lbal    = hwif->INB(io_ports->lbal_addr);
-		if (task->tf_flags & IDE_TFLAG_IN_HOB_LBAM)
-			tf->hob_lbam    = hwif->INB(io_ports->lbam_addr);
-		if (task->tf_flags & IDE_TFLAG_IN_HOB_LBAH)
-			tf->hob_lbah    = hwif->INB(io_ports->lbah_addr);
-	}
-}
-
 /**
  *	ide_end_drive_cmd	-	end an explicit drive command
  *	@drive: command 
@@ -373,7 +330,7 @@
 			tf->error = err;
 			tf->status = stat;
 
-			ide_tf_read(drive, task);
+			drive->hwif->tf_read(drive, task);
 
 			if (task->tf_flags & IDE_TFLAG_DYN)
 				kfree(task);
@@ -422,7 +379,7 @@
 		u32 wcount = (i > 16) ? 16 : i;
 
 		i -= wcount;
-		HWIF(drive)->ata_input_data(drive, buffer, wcount);
+		drive->hwif->input_data(drive, NULL, buffer, wcount * 4);
 	}
 }
 
@@ -502,7 +459,8 @@
 
 	if (ide_read_status(drive) & (BUSY_STAT | DRQ_STAT))
 		/* force an abort */
-		hwif->OUTB(WIN_IDLEIMMEDIATE, hwif->io_ports.command_addr);
+		hwif->OUTBSYNC(drive, WIN_IDLEIMMEDIATE,
+			       hwif->io_ports.command_addr);
 
 	if (rq->errors >= ERROR_MAX) {
 		ide_kill_rq(drive, rq);
@@ -1592,8 +1550,7 @@
 
 void ide_init_drive_cmd (struct request *rq)
 {
-	memset(rq, 0, sizeof(*rq));
-	rq->ref_count = 1;
+	blk_rq_init(NULL, rq);
 }
 
 EXPORT_SYMBOL(ide_init_drive_cmd);
@@ -1679,7 +1636,23 @@
 	task.tf.lbam    = bcount & 0xff;
 	task.tf.lbah    = (bcount >> 8) & 0xff;
 
-	ide_tf_load(drive, &task);
+	ide_tf_dump(drive->name, &task.tf);
+	drive->hwif->tf_load(drive, &task);
 }
 
 EXPORT_SYMBOL_GPL(ide_pktcmd_tf_load);
+
+void ide_pad_transfer(ide_drive_t *drive, int write, int len)
+{
+	ide_hwif_t *hwif = drive->hwif;
+	u8 buf[4] = { 0 };
+
+	while (len > 0) {
+		if (write)
+			hwif->output_data(drive, NULL, buf, min(4, len));
+		else
+			hwif->input_data(drive, NULL, buf, min(4, len));
+		len -= 4;
+	}
+}
+EXPORT_SYMBOL_GPL(ide_pad_transfer);
diff --git a/drivers/ide/ide-iops.c b/drivers/ide/ide-iops.c
index 5425d30..57d9a9a 100644
--- a/drivers/ide/ide-iops.c
+++ b/drivers/ide/ide-iops.c
@@ -37,21 +37,6 @@
 	return (u8) inb(port);
 }
 
-static u16 ide_inw (unsigned long port)
-{
-	return (u16) inw(port);
-}
-
-static void ide_insw (unsigned long port, void *addr, u32 count)
-{
-	insw(port, addr, count);
-}
-
-static void ide_insl (unsigned long port, void *addr, u32 count)
-{
-	insl(port, addr, count);
-}
-
 static void ide_outb (u8 val, unsigned long port)
 {
 	outb(val, port);
@@ -62,32 +47,11 @@
 	outb(addr, port);
 }
 
-static void ide_outw (u16 val, unsigned long port)
-{
-	outw(val, port);
-}
-
-static void ide_outsw (unsigned long port, void *addr, u32 count)
-{
-	outsw(port, addr, count);
-}
-
-static void ide_outsl (unsigned long port, void *addr, u32 count)
-{
-	outsl(port, addr, count);
-}
-
 void default_hwif_iops (ide_hwif_t *hwif)
 {
 	hwif->OUTB	= ide_outb;
 	hwif->OUTBSYNC	= ide_outbsync;
-	hwif->OUTW	= ide_outw;
-	hwif->OUTSW	= ide_outsw;
-	hwif->OUTSL	= ide_outsl;
 	hwif->INB	= ide_inb;
-	hwif->INW	= ide_inw;
-	hwif->INSW	= ide_insw;
-	hwif->INSL	= ide_insl;
 }
 
 /*
@@ -99,21 +63,6 @@
 	return (u8) readb((void __iomem *) port);
 }
 
-static u16 ide_mm_inw (unsigned long port)
-{
-	return (u16) readw((void __iomem *) port);
-}
-
-static void ide_mm_insw (unsigned long port, void *addr, u32 count)
-{
-	__ide_mm_insw((void __iomem *) port, addr, count);
-}
-
-static void ide_mm_insl (unsigned long port, void *addr, u32 count)
-{
-	__ide_mm_insl((void __iomem *) port, addr, count);
-}
-
 static void ide_mm_outb (u8 value, unsigned long port)
 {
 	writeb(value, (void __iomem *) port);
@@ -124,34 +73,13 @@
 	writeb(value, (void __iomem *) port);
 }
 
-static void ide_mm_outw (u16 value, unsigned long port)
-{
-	writew(value, (void __iomem *) port);
-}
-
-static void ide_mm_outsw (unsigned long port, void *addr, u32 count)
-{
-	__ide_mm_outsw((void __iomem *) port, addr, count);
-}
-
-static void ide_mm_outsl (unsigned long port, void *addr, u32 count)
-{
-	__ide_mm_outsl((void __iomem *) port, addr, count);
-}
-
 void default_hwif_mmiops (ide_hwif_t *hwif)
 {
 	hwif->OUTB	= ide_mm_outb;
 	/* Most systems will need to override OUTBSYNC, alas however
 	   this one is controller specific! */
 	hwif->OUTBSYNC	= ide_mm_outbsync;
-	hwif->OUTW	= ide_mm_outw;
-	hwif->OUTSW	= ide_mm_outsw;
-	hwif->OUTSL	= ide_mm_outsl;
 	hwif->INB	= ide_mm_inb;
-	hwif->INW	= ide_mm_inw;
-	hwif->INSW	= ide_mm_insw;
-	hwif->INSL	= ide_mm_insl;
 }
 
 EXPORT_SYMBOL(default_hwif_mmiops);
@@ -175,6 +103,123 @@
 		port_ops->maskproc(drive, mask);
 }
 
+static void ide_tf_load(ide_drive_t *drive, ide_task_t *task)
+{
+	ide_hwif_t *hwif = drive->hwif;
+	struct ide_io_ports *io_ports = &hwif->io_ports;
+	struct ide_taskfile *tf = &task->tf;
+	void (*tf_outb)(u8 addr, unsigned long port);
+	u8 mmio = (hwif->host_flags & IDE_HFLAG_MMIO) ? 1 : 0;
+	u8 HIHI = (task->tf_flags & IDE_TFLAG_LBA48) ? 0xE0 : 0xEF;
+
+	if (mmio)
+		tf_outb = ide_mm_outb;
+	else
+		tf_outb = ide_outb;
+
+	if (task->tf_flags & IDE_TFLAG_FLAGGED)
+		HIHI = 0xFF;
+
+	ide_set_irq(drive, 1);
+
+	if ((task->tf_flags & IDE_TFLAG_NO_SELECT_MASK) == 0)
+		SELECT_MASK(drive, 0);
+
+	if (task->tf_flags & IDE_TFLAG_OUT_DATA) {
+		u16 data = (tf->hob_data << 8) | tf->data;
+
+		if (mmio)
+			writew(data, (void __iomem *)io_ports->data_addr);
+		else
+			outw(data, io_ports->data_addr);
+	}
+
+	if (task->tf_flags & IDE_TFLAG_OUT_HOB_FEATURE)
+		tf_outb(tf->hob_feature, io_ports->feature_addr);
+	if (task->tf_flags & IDE_TFLAG_OUT_HOB_NSECT)
+		tf_outb(tf->hob_nsect, io_ports->nsect_addr);
+	if (task->tf_flags & IDE_TFLAG_OUT_HOB_LBAL)
+		tf_outb(tf->hob_lbal, io_ports->lbal_addr);
+	if (task->tf_flags & IDE_TFLAG_OUT_HOB_LBAM)
+		tf_outb(tf->hob_lbam, io_ports->lbam_addr);
+	if (task->tf_flags & IDE_TFLAG_OUT_HOB_LBAH)
+		tf_outb(tf->hob_lbah, io_ports->lbah_addr);
+
+	if (task->tf_flags & IDE_TFLAG_OUT_FEATURE)
+		tf_outb(tf->feature, io_ports->feature_addr);
+	if (task->tf_flags & IDE_TFLAG_OUT_NSECT)
+		tf_outb(tf->nsect, io_ports->nsect_addr);
+	if (task->tf_flags & IDE_TFLAG_OUT_LBAL)
+		tf_outb(tf->lbal, io_ports->lbal_addr);
+	if (task->tf_flags & IDE_TFLAG_OUT_LBAM)
+		tf_outb(tf->lbam, io_ports->lbam_addr);
+	if (task->tf_flags & IDE_TFLAG_OUT_LBAH)
+		tf_outb(tf->lbah, io_ports->lbah_addr);
+
+	if (task->tf_flags & IDE_TFLAG_OUT_DEVICE)
+		tf_outb((tf->device & HIHI) | drive->select.all,
+			 io_ports->device_addr);
+}
+
+static void ide_tf_read(ide_drive_t *drive, ide_task_t *task)
+{
+	ide_hwif_t *hwif = drive->hwif;
+	struct ide_io_ports *io_ports = &hwif->io_ports;
+	struct ide_taskfile *tf = &task->tf;
+	void (*tf_outb)(u8 addr, unsigned long port);
+	u8 (*tf_inb)(unsigned long port);
+	u8 mmio = (hwif->host_flags & IDE_HFLAG_MMIO) ? 1 : 0;
+
+	if (mmio) {
+		tf_outb = ide_mm_outb;
+		tf_inb  = ide_mm_inb;
+	} else {
+		tf_outb = ide_outb;
+		tf_inb  = ide_inb;
+	}
+
+	if (task->tf_flags & IDE_TFLAG_IN_DATA) {
+		u16 data;
+
+		if (mmio)
+			data = readw((void __iomem *)io_ports->data_addr);
+		else
+			data = inw(io_ports->data_addr);
+
+		tf->data = data & 0xff;
+		tf->hob_data = (data >> 8) & 0xff;
+	}
+
+	/* be sure we're looking at the low order bits */
+	tf_outb(drive->ctl & ~0x80, io_ports->ctl_addr);
+
+	if (task->tf_flags & IDE_TFLAG_IN_NSECT)
+		tf->nsect  = tf_inb(io_ports->nsect_addr);
+	if (task->tf_flags & IDE_TFLAG_IN_LBAL)
+		tf->lbal   = tf_inb(io_ports->lbal_addr);
+	if (task->tf_flags & IDE_TFLAG_IN_LBAM)
+		tf->lbam   = tf_inb(io_ports->lbam_addr);
+	if (task->tf_flags & IDE_TFLAG_IN_LBAH)
+		tf->lbah   = tf_inb(io_ports->lbah_addr);
+	if (task->tf_flags & IDE_TFLAG_IN_DEVICE)
+		tf->device = tf_inb(io_ports->device_addr);
+
+	if (task->tf_flags & IDE_TFLAG_LBA48) {
+		tf_outb(drive->ctl | 0x80, io_ports->ctl_addr);
+
+		if (task->tf_flags & IDE_TFLAG_IN_HOB_FEATURE)
+			tf->hob_feature = tf_inb(io_ports->feature_addr);
+		if (task->tf_flags & IDE_TFLAG_IN_HOB_NSECT)
+			tf->hob_nsect   = tf_inb(io_ports->nsect_addr);
+		if (task->tf_flags & IDE_TFLAG_IN_HOB_LBAL)
+			tf->hob_lbal    = tf_inb(io_ports->lbal_addr);
+		if (task->tf_flags & IDE_TFLAG_IN_HOB_LBAM)
+			tf->hob_lbam    = tf_inb(io_ports->lbam_addr);
+		if (task->tf_flags & IDE_TFLAG_IN_HOB_LBAH)
+			tf->hob_lbah    = tf_inb(io_ports->lbah_addr);
+	}
+}
+
 /*
  * Some localbus EIDE interfaces require a special access sequence
  * when using 32-bit I/O instructions to transfer data.  We call this
@@ -182,109 +227,112 @@
  * of the sector count register location, with interrupts disabled
  * to ensure that the reads all happen together.
  */
-static void ata_vlb_sync(ide_drive_t *drive, unsigned long port)
+static void ata_vlb_sync(unsigned long port)
 {
-	(void) HWIF(drive)->INB(port);
-	(void) HWIF(drive)->INB(port);
-	(void) HWIF(drive)->INB(port);
+	(void)inb(port);
+	(void)inb(port);
+	(void)inb(port);
 }
 
 /*
  * This is used for most PIO data transfers *from* the IDE interface
+ *
+ * These routines will round up any request for an odd number of bytes,
+ * so if an odd len is specified, be sure that there's at least one
+ * extra byte allocated for the buffer.
  */
-static void ata_input_data(ide_drive_t *drive, void *buffer, u32 wcount)
+static void ata_input_data(ide_drive_t *drive, struct request *rq,
+			   void *buf, unsigned int len)
 {
 	ide_hwif_t *hwif = drive->hwif;
 	struct ide_io_ports *io_ports = &hwif->io_ports;
+	unsigned long data_addr = io_ports->data_addr;
 	u8 io_32bit = drive->io_32bit;
+	u8 mmio = (hwif->host_flags & IDE_HFLAG_MMIO) ? 1 : 0;
+
+	len++;
 
 	if (io_32bit) {
-		if (io_32bit & 2) {
-			unsigned long flags;
+		unsigned long uninitialized_var(flags);
 
+		if ((io_32bit & 2) && !mmio) {
 			local_irq_save(flags);
-			ata_vlb_sync(drive, io_ports->nsect_addr);
-			hwif->INSL(io_ports->data_addr, buffer, wcount);
+			ata_vlb_sync(io_ports->nsect_addr);
+		}
+
+		if (mmio)
+			__ide_mm_insl((void __iomem *)data_addr, buf, len / 4);
+		else
+			insl(data_addr, buf, len / 4);
+
+		if ((io_32bit & 2) && !mmio)
 			local_irq_restore(flags);
-		} else
-			hwif->INSL(io_ports->data_addr, buffer, wcount);
-	} else
-		hwif->INSW(io_ports->data_addr, buffer, wcount << 1);
+
+		if ((len & 3) >= 2) {
+			if (mmio)
+				__ide_mm_insw((void __iomem *)data_addr,
+						(u8 *)buf + (len & ~3), 1);
+			else
+				insw(data_addr, (u8 *)buf + (len & ~3), 1);
+		}
+	} else {
+		if (mmio)
+			__ide_mm_insw((void __iomem *)data_addr, buf, len / 2);
+		else
+			insw(data_addr, buf, len / 2);
+	}
 }
 
 /*
  * This is used for most PIO data transfers *to* the IDE interface
  */
-static void ata_output_data(ide_drive_t *drive, void *buffer, u32 wcount)
+static void ata_output_data(ide_drive_t *drive, struct request *rq,
+			    void *buf, unsigned int len)
 {
 	ide_hwif_t *hwif = drive->hwif;
 	struct ide_io_ports *io_ports = &hwif->io_ports;
+	unsigned long data_addr = io_ports->data_addr;
 	u8 io_32bit = drive->io_32bit;
+	u8 mmio = (hwif->host_flags & IDE_HFLAG_MMIO) ? 1 : 0;
 
 	if (io_32bit) {
-		if (io_32bit & 2) {
-			unsigned long flags;
+		unsigned long uninitialized_var(flags);
 
+		if ((io_32bit & 2) && !mmio) {
 			local_irq_save(flags);
-			ata_vlb_sync(drive, io_ports->nsect_addr);
-			hwif->OUTSL(io_ports->data_addr, buffer, wcount);
+			ata_vlb_sync(io_ports->nsect_addr);
+		}
+
+		if (mmio)
+			__ide_mm_outsl((void __iomem *)data_addr, buf, len / 4);
+		else
+			outsl(data_addr, buf, len / 4);
+
+		if ((io_32bit & 2) && !mmio)
 			local_irq_restore(flags);
-		} else
-			hwif->OUTSL(io_ports->data_addr, buffer, wcount);
-	} else
-		hwif->OUTSW(io_ports->data_addr, buffer, wcount << 1);
-}
 
-/*
- * The following routines are mainly used by the ATAPI drivers.
- *
- * These routines will round up any request for an odd number of bytes,
- * so if an odd bytecount is specified, be sure that there's at least one
- * extra byte allocated for the buffer.
- */
-
-static void atapi_input_bytes(ide_drive_t *drive, void *buffer, u32 bytecount)
-{
-	ide_hwif_t *hwif = HWIF(drive);
-
-	++bytecount;
-#if defined(CONFIG_ATARI) || defined(CONFIG_Q40)
-	if (MACH_IS_ATARI || MACH_IS_Q40) {
-		/* Atari has a byte-swapped IDE interface */
-		insw_swapw(hwif->io_ports.data_addr, buffer, bytecount / 2);
-		return;
+		if ((len & 3) >= 2) {
+			if (mmio)
+				__ide_mm_outsw((void __iomem *)data_addr,
+						 (u8 *)buf + (len & ~3), 1);
+			else
+				outsw(data_addr, (u8 *)buf + (len & ~3), 1);
+		}
+	} else {
+		if (mmio)
+			__ide_mm_outsw((void __iomem *)data_addr, buf, len / 2);
+		else
+			outsw(data_addr, buf, len / 2);
 	}
-#endif /* CONFIG_ATARI || CONFIG_Q40 */
-	hwif->ata_input_data(drive, buffer, bytecount / 4);
-	if ((bytecount & 0x03) >= 2)
-		hwif->INSW(hwif->io_ports.data_addr,
-			   (u8 *)buffer + (bytecount & ~0x03), 1);
-}
-
-static void atapi_output_bytes(ide_drive_t *drive, void *buffer, u32 bytecount)
-{
-	ide_hwif_t *hwif = HWIF(drive);
-
-	++bytecount;
-#if defined(CONFIG_ATARI) || defined(CONFIG_Q40)
-	if (MACH_IS_ATARI || MACH_IS_Q40) {
-		/* Atari has a byte-swapped IDE interface */
-		outsw_swapw(hwif->io_ports.data_addr, buffer, bytecount / 2);
-		return;
-	}
-#endif /* CONFIG_ATARI || CONFIG_Q40 */
-	hwif->ata_output_data(drive, buffer, bytecount / 4);
-	if ((bytecount & 0x03) >= 2)
-		hwif->OUTSW(hwif->io_ports.data_addr,
-			    (u8 *)buffer + (bytecount & ~0x03), 1);
 }
 
 void default_hwif_transport(ide_hwif_t *hwif)
 {
-	hwif->ata_input_data		= ata_input_data;
-	hwif->ata_output_data		= ata_output_data;
-	hwif->atapi_input_bytes		= atapi_input_bytes;
-	hwif->atapi_output_bytes	= atapi_output_bytes;
+	hwif->tf_load	  = ide_tf_load;
+	hwif->tf_read	  = ide_tf_read;
+
+	hwif->input_data  = ata_input_data;
+	hwif->output_data = ata_output_data;
 }
 
 void ide_fix_driveid (struct hd_driveid *id)
@@ -577,6 +625,8 @@
 	{ "TSSTcorp CDDVDW SH-S202J"	, "SB01"	},
 	{ "TSSTcorp CDDVDW SH-S202N"	, "SB00"	},
 	{ "TSSTcorp CDDVDW SH-S202N"	, "SB01"	},
+	{ "TSSTcorp CDDVDW SH-S202H"	, "SB00"	},
+	{ "TSSTcorp CDDVDW SH-S202H"	, "SB01"	},
 	{ NULL				, NULL		}
 };
 
@@ -641,7 +691,7 @@
 	SELECT_MASK(drive, 1);
 	ide_set_irq(drive, 1);
 	msleep(50);
-	hwif->OUTB(WIN_IDENTIFY, hwif->io_ports.command_addr);
+	hwif->OUTBSYNC(drive, WIN_IDENTIFY, hwif->io_ports.command_addr);
 	timeout = jiffies + WAIT_WORSTCASE;
 	do {
 		if (time_after(jiffies, timeout)) {
@@ -668,7 +718,7 @@
 		local_irq_restore(flags);
 		return 0;
 	}
-	hwif->ata_input_data(drive, id, SECTOR_WORDS);
+	hwif->input_data(drive, NULL, id, SECTOR_SIZE);
 	(void)ide_read_status(drive);	/* clear drive IRQ */
 	local_irq_enable();
 	local_irq_restore(flags);
@@ -849,9 +899,19 @@
 	ndelay(400);
 	spin_unlock_irqrestore(&ide_lock, flags);
 }
-
 EXPORT_SYMBOL(ide_execute_command);
 
+void ide_execute_pkt_cmd(ide_drive_t *drive)
+{
+	ide_hwif_t *hwif = drive->hwif;
+	unsigned long flags;
+
+	spin_lock_irqsave(&ide_lock, flags);
+	hwif->OUTBSYNC(drive, WIN_PACKETCMD, hwif->io_ports.command_addr);
+	ndelay(400);
+	spin_unlock_irqrestore(&ide_lock, flags);
+}
+EXPORT_SYMBOL_GPL(ide_execute_pkt_cmd);
 
 /* needed below */
 static ide_startstop_t do_reset1 (ide_drive_t *, int);
diff --git a/drivers/ide/ide-lib.c b/drivers/ide/ide-lib.c
index 6f04ea3..47af80d 100644
--- a/drivers/ide/ide-lib.c
+++ b/drivers/ide/ide-lib.c
@@ -487,7 +487,7 @@
 	else
 		task.tf_flags = IDE_TFLAG_IN_LBA | IDE_TFLAG_IN_DEVICE;
 
-	ide_tf_read(drive, &task);
+	drive->hwif->tf_read(drive, &task);
 
 	if (lba48 || (tf->device & ATA_LBA))
 		printk(", LBAsect=%llu",
diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c
index 862f026..099a0fe 100644
--- a/drivers/ide/ide-probe.c
+++ b/drivers/ide/ide-probe.c
@@ -124,7 +124,7 @@
 
 	id = drive->id;
 	/* read 512 bytes of id info */
-	hwif->ata_input_data(drive, id, SECTOR_WORDS);
+	hwif->input_data(drive, NULL, id, SECTOR_SIZE);
 
 	drive->id_read = 1;
 	local_irq_enable();
@@ -293,7 +293,7 @@
 		hwif->OUTB(0, io_ports->feature_addr);
 
 	/* ask drive for ID */
-	hwif->OUTB(cmd, io_ports->command_addr);
+	hwif->OUTBSYNC(drive, cmd, io_ports->command_addr);
 
 	timeout = ((cmd == WIN_IDENTIFY) ? WAIT_WORSTCASE : WAIT_PIDENTIFY) / 2;
 	timeout += jiffies;
@@ -480,7 +480,7 @@
 			msleep(50);
 			hwif->OUTB(drive->select.all, io_ports->device_addr);
 			msleep(50);
-			hwif->OUTB(WIN_SRST, io_ports->command_addr);
+			hwif->OUTBSYNC(drive, WIN_SRST, io_ports->command_addr);
 			(void)ide_busy_sleep(hwif);
 			rc = try_to_identify(drive, cmd);
 		}
@@ -516,7 +516,7 @@
 	printk("%s: enabling %s -- ", hwif->name, drive->id->model);
 	SELECT_DRIVE(drive);
 	msleep(50);
-	hwif->OUTB(EXABYTE_ENABLE_NEST, hwif->io_ports.command_addr);
+	hwif->OUTBSYNC(drive, EXABYTE_ENABLE_NEST, hwif->io_ports.command_addr);
 
 	if (ide_busy_sleep(hwif)) {
 		printk(KERN_CONT "failed (timeout)\n");
diff --git a/drivers/ide/ide-proc.c b/drivers/ide/ide-proc.c
index 7b2f381..8d6ad81 100644
--- a/drivers/ide/ide-proc.c
+++ b/drivers/ide/ide-proc.c
@@ -822,6 +822,7 @@
 }
 
 static const struct file_operations ide_drivers_operations = {
+	.owner		= THIS_MODULE,
 	.open		= ide_drivers_open,
 	.read		= seq_read,
 	.llseek		= seq_lseek,
@@ -830,16 +831,12 @@
 
 void proc_ide_create(void)
 {
-	struct proc_dir_entry *entry;
-
 	proc_ide_root = proc_mkdir("ide", NULL);
 
 	if (!proc_ide_root)
 		return;
 
-	entry = create_proc_entry("drivers", 0, proc_ide_root);
-	if (entry)
-		entry->proc_fops = &ide_drivers_operations;
+	proc_create("drivers", 0, proc_ide_root, &ide_drivers_operations);
 }
 
 void proc_ide_destroy(void)
diff --git a/drivers/ide/ide-tape.c b/drivers/ide/ide-tape.c
index 29870c4..1e1f263 100644
--- a/drivers/ide/ide-tape.c
+++ b/drivers/ide/ide-tape.c
@@ -395,13 +395,13 @@
 		if (bh == NULL) {
 			printk(KERN_ERR "ide-tape: bh == NULL in "
 				"idetape_input_buffers\n");
-			ide_atapi_discard_data(drive, bcount);
+			ide_pad_transfer(drive, 0, bcount);
 			return;
 		}
 		count = min(
 			(unsigned int)(bh->b_size - atomic_read(&bh->b_count)),
 			bcount);
-		HWIF(drive)->atapi_input_bytes(drive, bh->b_data +
+		drive->hwif->input_data(drive, NULL, bh->b_data +
 					atomic_read(&bh->b_count), count);
 		bcount -= count;
 		atomic_add(count, &bh->b_count);
@@ -427,7 +427,7 @@
 			return;
 		}
 		count = min((unsigned int)pc->b_count, (unsigned int)bcount);
-		HWIF(drive)->atapi_output_bytes(drive, pc->b_data, count);
+		drive->hwif->output_data(drive, NULL, pc->b_data, count);
 		bcount -= count;
 		pc->b_data += count;
 		pc->b_count -= count;
@@ -662,7 +662,7 @@
 
 static void idetape_init_rq(struct request *rq, u8 cmd)
 {
-	memset(rq, 0, sizeof(*rq));
+	blk_rq_init(NULL, rq);
 	rq->cmd_type = REQ_TYPE_SPECIAL;
 	rq->cmd[0] = cmd;
 }
@@ -871,7 +871,7 @@
 				printk(KERN_ERR "ide-tape: The tape wants to "
 					"send us more data than expected "
 					"- discarding data\n");
-				ide_atapi_discard_data(drive, bcount);
+				ide_pad_transfer(drive, 0, bcount);
 				ide_set_handler(drive, &idetape_pc_intr,
 						IDETAPE_WAIT_CMD, NULL);
 				return ide_started;
@@ -880,16 +880,16 @@
 				"data than expected - allowing transfer\n");
 		}
 		iobuf = &idetape_input_buffers;
-		xferfunc = hwif->atapi_input_bytes;
+		xferfunc = hwif->input_data;
 	} else {
 		iobuf = &idetape_output_buffers;
-		xferfunc = hwif->atapi_output_bytes;
+		xferfunc = hwif->output_data;
 	}
 
 	if (pc->bh)
 		iobuf(drive, pc, bcount);
 	else
-		xferfunc(drive, pc->cur_pos, bcount);
+		xferfunc(drive, NULL, pc->cur_pos, bcount);
 
 	/* Update the current position */
 	pc->xferred += bcount;
@@ -979,7 +979,8 @@
 		hwif->dma_ops->dma_start(drive);
 #endif
 	/* Send the actual packet */
-	HWIF(drive)->atapi_output_bytes(drive, pc->c, 12);
+	hwif->output_data(drive, NULL, pc->c, 12);
+
 	return ide_started;
 }
 
@@ -1055,7 +1056,7 @@
 				    IDETAPE_WAIT_CMD, NULL);
 		return ide_started;
 	} else {
-		hwif->OUTB(WIN_PACKETCMD, hwif->io_ports.command_addr);
+		ide_execute_pkt_cmd(drive);
 		return idetape_transfer_pc(drive);
 	}
 }
diff --git a/drivers/ide/ide-taskfile.c b/drivers/ide/ide-taskfile.c
index 9f9ad9f..0c908ca 100644
--- a/drivers/ide/ide-taskfile.c
+++ b/drivers/ide/ide-taskfile.c
@@ -33,60 +33,18 @@
 #include <asm/uaccess.h>
 #include <asm/io.h>
 
-void ide_tf_load(ide_drive_t *drive, ide_task_t *task)
+void ide_tf_dump(const char *s, struct ide_taskfile *tf)
 {
-	ide_hwif_t *hwif = drive->hwif;
-	struct ide_io_ports *io_ports = &hwif->io_ports;
-	struct ide_taskfile *tf = &task->tf;
-	u8 HIHI = (task->tf_flags & IDE_TFLAG_LBA48) ? 0xE0 : 0xEF;
-
-	if (task->tf_flags & IDE_TFLAG_FLAGGED)
-		HIHI = 0xFF;
-
 #ifdef DEBUG
 	printk("%s: tf: feat 0x%02x nsect 0x%02x lbal 0x%02x "
 		"lbam 0x%02x lbah 0x%02x dev 0x%02x cmd 0x%02x\n",
-		drive->name, tf->feature, tf->nsect, tf->lbal,
+		s, tf->feature, tf->nsect, tf->lbal,
 		tf->lbam, tf->lbah, tf->device, tf->command);
 	printk("%s: hob: nsect 0x%02x lbal 0x%02x "
 		"lbam 0x%02x lbah 0x%02x\n",
-		drive->name, tf->hob_nsect, tf->hob_lbal,
+		s, tf->hob_nsect, tf->hob_lbal,
 		tf->hob_lbam, tf->hob_lbah);
 #endif
-
-	ide_set_irq(drive, 1);
-
-	if ((task->tf_flags & IDE_TFLAG_NO_SELECT_MASK) == 0)
-		SELECT_MASK(drive, 0);
-
-	if (task->tf_flags & IDE_TFLAG_OUT_DATA)
-		hwif->OUTW((tf->hob_data << 8) | tf->data, io_ports->data_addr);
-
-	if (task->tf_flags & IDE_TFLAG_OUT_HOB_FEATURE)
-		hwif->OUTB(tf->hob_feature, io_ports->feature_addr);
-	if (task->tf_flags & IDE_TFLAG_OUT_HOB_NSECT)
-		hwif->OUTB(tf->hob_nsect, io_ports->nsect_addr);
-	if (task->tf_flags & IDE_TFLAG_OUT_HOB_LBAL)
-		hwif->OUTB(tf->hob_lbal, io_ports->lbal_addr);
-	if (task->tf_flags & IDE_TFLAG_OUT_HOB_LBAM)
-		hwif->OUTB(tf->hob_lbam, io_ports->lbam_addr);
-	if (task->tf_flags & IDE_TFLAG_OUT_HOB_LBAH)
-		hwif->OUTB(tf->hob_lbah, io_ports->lbah_addr);
-
-	if (task->tf_flags & IDE_TFLAG_OUT_FEATURE)
-		hwif->OUTB(tf->feature, io_ports->feature_addr);
-	if (task->tf_flags & IDE_TFLAG_OUT_NSECT)
-		hwif->OUTB(tf->nsect, io_ports->nsect_addr);
-	if (task->tf_flags & IDE_TFLAG_OUT_LBAL)
-		hwif->OUTB(tf->lbal, io_ports->lbal_addr);
-	if (task->tf_flags & IDE_TFLAG_OUT_LBAM)
-		hwif->OUTB(tf->lbam, io_ports->lbam_addr);
-	if (task->tf_flags & IDE_TFLAG_OUT_LBAH)
-		hwif->OUTB(tf->lbah, io_ports->lbah_addr);
-
-	if (task->tf_flags & IDE_TFLAG_OUT_DEVICE)
-		hwif->OUTB((tf->device & HIHI) | drive->select.all,
-			   io_ports->device_addr);
 }
 
 int taskfile_lib_get_identify (ide_drive_t *drive, u8 *buf)
@@ -149,8 +107,10 @@
 	if (task->tf_flags & IDE_TFLAG_FLAGGED)
 		task->tf_flags |= IDE_TFLAG_FLAGGED_SET_IN_FLAGS;
 
-	if ((task->tf_flags & IDE_TFLAG_DMA_PIO_FALLBACK) == 0)
-		ide_tf_load(drive, task);
+	if ((task->tf_flags & IDE_TFLAG_DMA_PIO_FALLBACK) == 0) {
+		ide_tf_dump(drive->name, tf);
+		hwif->tf_load(drive, task);
+	}
 
 	switch (task->data_phase) {
 	case TASKFILE_MULTI_OUT:
@@ -283,7 +243,8 @@
 	return stat;
 }
 
-static void ide_pio_sector(ide_drive_t *drive, unsigned int write)
+static void ide_pio_sector(ide_drive_t *drive, struct request *rq,
+			   unsigned int write)
 {
 	ide_hwif_t *hwif = drive->hwif;
 	struct scatterlist *sg = hwif->sg_table;
@@ -323,9 +284,9 @@
 
 	/* do the actual data transfer */
 	if (write)
-		hwif->ata_output_data(drive, buf, SECTOR_WORDS);
+		hwif->output_data(drive, rq, buf, SECTOR_SIZE);
 	else
-		hwif->ata_input_data(drive, buf, SECTOR_WORDS);
+		hwif->input_data(drive, rq, buf, SECTOR_SIZE);
 
 	kunmap_atomic(buf, KM_BIO_SRC_IRQ);
 #ifdef CONFIG_HIGHMEM
@@ -333,13 +294,14 @@
 #endif
 }
 
-static void ide_pio_multi(ide_drive_t *drive, unsigned int write)
+static void ide_pio_multi(ide_drive_t *drive, struct request *rq,
+			  unsigned int write)
 {
 	unsigned int nsect;
 
 	nsect = min_t(unsigned int, drive->hwif->nleft, drive->mult_count);
 	while (nsect--)
-		ide_pio_sector(drive, write);
+		ide_pio_sector(drive, rq, write);
 }
 
 static void ide_pio_datablock(ide_drive_t *drive, struct request *rq,
@@ -362,10 +324,10 @@
 	switch (drive->hwif->data_phase) {
 	case TASKFILE_MULTI_IN:
 	case TASKFILE_MULTI_OUT:
-		ide_pio_multi(drive, write);
+		ide_pio_multi(drive, rq, write);
 		break;
 	default:
-		ide_pio_sector(drive, write);
+		ide_pio_sector(drive, rq, write);
 		break;
 	}
 
@@ -532,8 +494,7 @@
 {
 	struct request rq;
 
-	memset(&rq, 0, sizeof(rq));
-	rq.ref_count = 1;
+	blk_rq_init(NULL, &rq);
 	rq.cmd_type = REQ_TYPE_ATA_TASKFILE;
 	rq.buffer = buf;
 
diff --git a/drivers/ide/ide.c b/drivers/ide/ide.c
index 999584c..c758dcb 100644
--- a/drivers/ide/ide.c
+++ b/drivers/ide/ide.c
@@ -564,7 +564,7 @@
 	if (!(drive->dn % 2))
 		ide_acpi_get_timing(hwif);
 
-	memset(&rq, 0, sizeof(rq));
+	blk_rq_init(NULL, &rq);
 	memset(&rqpm, 0, sizeof(rqpm));
 	memset(&args, 0, sizeof(args));
 	rq.cmd_type = REQ_TYPE_PM_SUSPEND;
@@ -602,7 +602,7 @@
 
 	ide_acpi_exec_tfs(drive);
 
-	memset(&rq, 0, sizeof(rq));
+	blk_rq_init(NULL, &rq);
 	memset(&rqpm, 0, sizeof(rqpm));
 	memset(&args, 0, sizeof(args));
 	rq.cmd_type = REQ_TYPE_PM_RESUME;
diff --git a/drivers/ide/legacy/falconide.c b/drivers/ide/legacy/falconide.c
index 56cdaa0..83555ca 100644
--- a/drivers/ide/legacy/falconide.c
+++ b/drivers/ide/legacy/falconide.c
@@ -44,6 +44,28 @@
 int falconide_intr_lock;
 EXPORT_SYMBOL(falconide_intr_lock);
 
+static void falconide_input_data(ide_drive_t *drive, struct request *rq,
+				 void *buf, unsigned int len)
+{
+	unsigned long data_addr = drive->hwif->io_ports.data_addr;
+
+	if (drive->media == ide_disk && rq && rq->cmd_type == REQ_TYPE_FS)
+		return insw(data_addr, buf, (len + 1) / 2);
+
+	insw_swapw(data_addr, buf, (len + 1) / 2);
+}
+
+static void falconide_output_data(ide_drive_t *drive, struct request *rq,
+				  void *buf, unsigned int len)
+{
+	unsigned long data_addr = drive->hwif->io_ports.data_addr;
+
+	if (drive->media == ide_disk && rq && rq->cmd_type == REQ_TYPE_FS)
+		return outsw(data_adr, buf, (len + 1) / 2);
+
+	outsw_swapw(data_addr, buf, (len + 1) / 2);
+}
+
 static void __init falconide_setup_ports(hw_regs_t *hw)
 {
 	int i;
@@ -90,6 +112,10 @@
 		ide_init_port_data(hwif, index);
 		ide_init_port_hw(hwif, &hw);
 
+		/* Atari has a byte-swapped IDE interface */
+		hwif->input_data  = falconide_input_data;
+		hwif->output_data = falconide_output_data;
+
 		ide_get_lock(NULL, NULL);
 		ide_device_add(idx, NULL);
 		ide_release_lock();
diff --git a/drivers/ide/legacy/ide_platform.c b/drivers/ide/legacy/ide_platform.c
index 8279dc7..d3bc3f2 100644
--- a/drivers/ide/legacy/ide_platform.c
+++ b/drivers/ide/legacy/ide_platform.c
@@ -101,8 +101,10 @@
 
 	ide_init_port_hw(hwif, &hw);
 
-	if (mmio)
+	if (mmio) {
+		hwif->host_flags = IDE_HFLAG_MMIO;
 		default_hwif_mmiops(hwif);
+	}
 
 	idx[0] = hwif->index;
 
diff --git a/drivers/ide/legacy/q40ide.c b/drivers/ide/legacy/q40ide.c
index f921045..6f535d0 100644
--- a/drivers/ide/legacy/q40ide.c
+++ b/drivers/ide/legacy/q40ide.c
@@ -72,7 +72,27 @@
 	hw->ack_intr = ack_intr;
 }
 
+static void q40ide_input_data(ide_drive_t *drive, struct request *rq,
+			      void *buf, unsigned int len)
+{
+	unsigned long data_addr = drive->hwif->io_ports.data_addr;
 
+	if (drive->media == ide_disk && rq && rq->cmd_type == REQ_TYPE_FS)
+		return insw(data_addr, buf, (len + 1) / 2);
+
+	insw_swapw(data_addr, buf, (len + 1) / 2);
+}
+
+static void q40ide_output_data(ide_drive_t *drive, struct request *rq,
+			       void *buf, unsigned int len)
+{
+	unsigned long data_addr = drive->hwif->io_ports.data_addr;
+
+	if (drive->media == ide_disk && rq && rq->cmd_type == REQ_TYPE_FS)
+		return outsw(data_addr, buf, (len + 1) / 2);
+
+	outsw_swapw(data_addr, buf, (len + 1) / 2);
+}
 
 /* 
  * the static array is needed to have the name reported in /proc/ioports,
@@ -123,6 +143,10 @@
 		ide_init_port_data(hwif, hwif->index);
 		ide_init_port_hw(hwif, &hw);
 
+		/* Q40 has a byte-swapped IDE interface */
+		hwif->input_data  = q40ide_input_data;
+		hwif->output_data = q40ide_output_data;
+
 		idx[i] = hwif->index;
 	}
     }
diff --git a/drivers/ide/mips/au1xxx-ide.c b/drivers/ide/mips/au1xxx-ide.c
index e0cf5e2..1a6c27b 100644
--- a/drivers/ide/mips/au1xxx-ide.c
+++ b/drivers/ide/mips/au1xxx-ide.c
@@ -48,8 +48,6 @@
 
 static _auide_hwif auide_hwif;
 
-static int auide_ddma_init(_auide_hwif *auide);
-
 #if defined(CONFIG_BLK_DEV_IDE_AU1XXX_PIO_DBDMA)
 
 void auide_insw(unsigned long port, void *addr, u32 count)
@@ -88,6 +86,17 @@
 	ctp->cur_ptr = au1xxx_ddma_get_nextptr_virt(dp);
 }
 
+static void au1xxx_input_data(ide_drive_t *drive, struct request *rq,
+			      void *buf, unsigned int len)
+{
+	auide_insw(drive->hwif->io_ports.data_addr, buf, (len + 1) / 2);
+}
+
+static void au1xxx_output_data(ide_drive_t *drive, struct request *rq,
+			       void *buf, unsigned int len)
+{
+	auide_outsw(drive->hwif->io_ports.data_addr, buf, (len + 1) / 2);
+}
 #endif
 
 static void au1xxx_set_pio_mode(ide_drive_t *drive, const u8 pio)
@@ -598,8 +607,8 @@
 	*/
 
 #ifdef CONFIG_BLK_DEV_IDE_AU1XXX_PIO_DBDMA	
-	hwif->INSW                      = auide_insw;
-	hwif->OUTSW                     = auide_outsw;
+	hwif->input_data  = au1xxx_input_data;
+	hwif->output_data = au1xxx_output_data;
 #endif
 	hwif->select_data               = 0;    /* no chipset-specific code */
 	hwif->config_data               = 0;    /* no chipset-specific code */
diff --git a/drivers/ide/mips/swarm.c b/drivers/ide/mips/swarm.c
index 6894762..712d17b 100644
--- a/drivers/ide/mips/swarm.c
+++ b/drivers/ide/mips/swarm.c
@@ -109,6 +109,7 @@
 	base = ioremap(offset, size);
 
 	/* Setup MMIO ops.  */
+	hwif->host_flags = IDE_HFLAG_MMIO;
 	default_hwif_mmiops(hwif);
 
 	hwif->chipset = ide_generic;
diff --git a/drivers/ide/pci/ns87415.c b/drivers/ide/pci/ns87415.c
index c13e299..fec4955 100644
--- a/drivers/ide/pci/ns87415.c
+++ b/drivers/ide/pci/ns87415.c
@@ -63,6 +63,48 @@
 	return inb(port);
 }
 
+static void superio_tf_read(ide_drive_t *drive, ide_task_t *task)
+{
+	struct ide_io_ports *io_ports = &drive->hwif->io_ports;
+	struct ide_taskfile *tf = &task->tf;
+
+	if (task->tf_flags & IDE_TFLAG_IN_DATA) {
+		u16 data = inw(io_ports->data_addr);
+
+		tf->data = data & 0xff;
+		tf->hob_data = (data >> 8) & 0xff;
+	}
+
+	/* be sure we're looking at the low order bits */
+	outb(drive->ctl & ~0x80, io_ports->ctl_addr);
+
+	if (task->tf_flags & IDE_TFLAG_IN_NSECT)
+		tf->nsect  = inb(io_ports->nsect_addr);
+	if (task->tf_flags & IDE_TFLAG_IN_LBAL)
+		tf->lbal   = inb(io_ports->lbal_addr);
+	if (task->tf_flags & IDE_TFLAG_IN_LBAM)
+		tf->lbam   = inb(io_ports->lbam_addr);
+	if (task->tf_flags & IDE_TFLAG_IN_LBAH)
+		tf->lbah   = inb(io_ports->lbah_addr);
+	if (task->tf_flags & IDE_TFLAG_IN_DEVICE)
+		tf->device = superio_ide_inb(io_ports->device_addr);
+
+	if (task->tf_flags & IDE_TFLAG_LBA48) {
+		outb(drive->ctl | 0x80, io_ports->ctl_addr);
+
+		if (task->tf_flags & IDE_TFLAG_IN_HOB_FEATURE)
+			tf->hob_feature = inb(io_ports->feature_addr);
+		if (task->tf_flags & IDE_TFLAG_IN_HOB_NSECT)
+			tf->hob_nsect   = inb(io_ports->nsect_addr);
+		if (task->tf_flags & IDE_TFLAG_IN_HOB_LBAL)
+			tf->hob_lbal    = inb(io_ports->lbal_addr);
+		if (task->tf_flags & IDE_TFLAG_IN_HOB_LBAM)
+			tf->hob_lbam    = inb(io_ports->lbam_addr);
+		if (task->tf_flags & IDE_TFLAG_IN_HOB_LBAH)
+			tf->hob_lbah    = inb(io_ports->lbah_addr);
+	}
+}
+
 static void __devinit superio_ide_init_iops (struct hwif_s *hwif)
 {
 	struct pci_dev *pdev = to_pci_dev(hwif->dev);
@@ -80,6 +122,8 @@
 	tmp = superio_ide_inb(superio_ide_dma_status[port]);
 	outb(tmp | 0x66, superio_ide_dma_status[port]);
 
+	hwif->tf_read = superio_tf_read;
+
 	/* We need to override inb to workaround a SuperIO errata */
 	hwif->INB = superio_ide_inb;
 }
diff --git a/drivers/ide/pci/pdc202xx_new.c b/drivers/ide/pci/pdc202xx_new.c
index ec9bd7b..070df8a 100644
--- a/drivers/ide/pci/pdc202xx_new.c
+++ b/drivers/ide/pci/pdc202xx_new.c
@@ -83,8 +83,8 @@
 {
 	u8 value;
 
-	outb(index, hwif->dma_vendor1);
-	value = inb(hwif->dma_vendor3);
+	outb(index, hwif->dma_base + 1);
+	value = inb(hwif->dma_base + 3);
 
 	DBG("index[%02X] value[%02X]\n", index, value);
 	return value;
@@ -97,8 +97,8 @@
  */
 static void set_indexed_reg(ide_hwif_t *hwif, u8 index, u8 value)
 {
-	outb(index, hwif->dma_vendor1);
-	outb(value, hwif->dma_vendor3);
+	outb(index, hwif->dma_base + 1);
+	outb(value, hwif->dma_base + 3);
 	DBG("index[%02X] value[%02X]\n", index, value);
 }
 
diff --git a/drivers/ide/pci/piix.c b/drivers/ide/pci/piix.c
index 21c5dd2..f04738d 100644
--- a/drivers/ide/pci/piix.c
+++ b/drivers/ide/pci/piix.c
@@ -250,6 +250,7 @@
 	{ 0x27DF, 0x1043, 0x1267 },	/* ICH7 on Asus W5F */
 	{ 0x27DF, 0x103C, 0x30A1 },	/* ICH7 on HP Compaq nc2400 */
 	{ 0x24CA, 0x1025, 0x0061 },	/* ICH4 on Acer Aspire 2023WLMi */
+	{ 0x2653, 0x1043, 0x82D8 },	/* ICH6M on Asus Eee 701 */
 	/* end marker */
 	{ 0, }
 };
diff --git a/drivers/ide/pci/scc_pata.c b/drivers/ide/pci/scc_pata.c
index ad7cdf9..910fb00 100644
--- a/drivers/ide/pci/scc_pata.c
+++ b/drivers/ide/pci/scc_pata.c
@@ -126,12 +126,6 @@
 	return (u8)data;
 }
 
-static u16 scc_ide_inw(unsigned long port)
-{
-	u32 data = in_be32((void*)port);
-	return (u16)data;
-}
-
 static void scc_ide_insw(unsigned long port, void *addr, u32 count)
 {
 	u16 *ptr = (u16 *)addr;
@@ -154,11 +148,6 @@
 	out_be32((void*)port, addr);
 }
 
-static void scc_ide_outw(u16 addr, unsigned long port)
-{
-	out_be32((void*)port, addr);
-}
-
 static void
 scc_ide_outbsync(ide_drive_t * drive, u8 addr, unsigned long port)
 {
@@ -271,6 +260,20 @@
 	out_be32((void __iomem *)udenvt_port, reg);
 }
 
+static void scc_dma_host_set(ide_drive_t *drive, int on)
+{
+	ide_hwif_t *hwif = drive->hwif;
+	u8 unit = (drive->select.b.unit & 0x01);
+	u8 dma_stat = scc_ide_inb(hwif->dma_status);
+
+	if (on)
+		dma_stat |= (1 << (5 + unit));
+	else
+		dma_stat &= ~(1 << (5 + unit));
+
+	scc_ide_outb(dma_stat, hwif->dma_status);
+}
+
 /**
  *	scc_ide_dma_setup	-	begin a DMA phase
  *	@drive: target device
@@ -301,7 +304,7 @@
 	}
 
 	/* PRD table */
-	out_be32((void __iomem *)hwif->dma_prdtable, hwif->dmatable_dma);
+	out_be32((void __iomem *)(hwif->dma_base + 8), hwif->dmatable_dma);
 
 	/* specify r/w */
 	out_be32((void __iomem *)hwif->dma_command, reading);
@@ -315,13 +318,45 @@
 	return 0;
 }
 
+static void scc_dma_start(ide_drive_t *drive)
+{
+	ide_hwif_t *hwif = drive->hwif;
+	u8 dma_cmd = scc_ide_inb(hwif->dma_command);
+
+	/* start DMA */
+	scc_ide_outb(dma_cmd | 1, hwif->dma_command);
+	hwif->dma = 1;
+	wmb();
+}
+
+static int __scc_dma_end(ide_drive_t *drive)
+{
+	ide_hwif_t *hwif = drive->hwif;
+	u8 dma_stat, dma_cmd;
+
+	drive->waiting_for_dma = 0;
+	/* get DMA command mode */
+	dma_cmd = scc_ide_inb(hwif->dma_command);
+	/* stop DMA */
+	scc_ide_outb(dma_cmd & ~1, hwif->dma_command);
+	/* get DMA status */
+	dma_stat = scc_ide_inb(hwif->dma_status);
+	/* clear the INTR & ERROR bits */
+	scc_ide_outb(dma_stat | 6, hwif->dma_status);
+	/* purge DMA mappings */
+	ide_destroy_dmatable(drive);
+	/* verify good DMA status */
+	hwif->dma = 0;
+	wmb();
+	return (dma_stat & 7) != 4 ? (0x10 | dma_stat) : 0;
+}
 
 /**
  *	scc_dma_end	-	Stop DMA
  *	@drive: IDE drive
  *
  *	Check and clear INT Status register.
- *      Then call __ide_dma_end().
+ *	Then call __scc_dma_end().
  */
 
 static int scc_dma_end(ide_drive_t *drive)
@@ -425,7 +460,7 @@
 		break;
 	}
 
-	dma_stat = __ide_dma_end(drive);
+	dma_stat = __scc_dma_end(drive);
 	if (data_loss)
 		dma_stat |= 2; /* emulate DMA error (to retry command) */
 	return dma_stat;
@@ -618,6 +653,122 @@
 	return rc;
 }
 
+static void scc_tf_load(ide_drive_t *drive, ide_task_t *task)
+{
+	struct ide_io_ports *io_ports = &drive->hwif->io_ports;
+	struct ide_taskfile *tf = &task->tf;
+	u8 HIHI = (task->tf_flags & IDE_TFLAG_LBA48) ? 0xE0 : 0xEF;
+
+	if (task->tf_flags & IDE_TFLAG_FLAGGED)
+		HIHI = 0xFF;
+
+	ide_set_irq(drive, 1);
+
+	if (task->tf_flags & IDE_TFLAG_OUT_DATA)
+		out_be32((void *)io_ports->data_addr,
+			 (tf->hob_data << 8) | tf->data);
+
+	if (task->tf_flags & IDE_TFLAG_OUT_HOB_FEATURE)
+		scc_ide_outb(tf->hob_feature, io_ports->feature_addr);
+	if (task->tf_flags & IDE_TFLAG_OUT_HOB_NSECT)
+		scc_ide_outb(tf->hob_nsect, io_ports->nsect_addr);
+	if (task->tf_flags & IDE_TFLAG_OUT_HOB_LBAL)
+		scc_ide_outb(tf->hob_lbal, io_ports->lbal_addr);
+	if (task->tf_flags & IDE_TFLAG_OUT_HOB_LBAM)
+		scc_ide_outb(tf->hob_lbam, io_ports->lbam_addr);
+	if (task->tf_flags & IDE_TFLAG_OUT_HOB_LBAH)
+		scc_ide_outb(tf->hob_lbah, io_ports->lbah_addr);
+
+	if (task->tf_flags & IDE_TFLAG_OUT_FEATURE)
+		scc_ide_outb(tf->feature, io_ports->feature_addr);
+	if (task->tf_flags & IDE_TFLAG_OUT_NSECT)
+		scc_ide_outb(tf->nsect, io_ports->nsect_addr);
+	if (task->tf_flags & IDE_TFLAG_OUT_LBAL)
+		scc_ide_outb(tf->lbal, io_ports->lbal_addr);
+	if (task->tf_flags & IDE_TFLAG_OUT_LBAM)
+		scc_ide_outb(tf->lbam, io_ports->lbam_addr);
+	if (task->tf_flags & IDE_TFLAG_OUT_LBAH)
+		scc_ide_outb(tf->lbah, io_ports->lbah_addr);
+
+	if (task->tf_flags & IDE_TFLAG_OUT_DEVICE)
+		scc_ide_outb((tf->device & HIHI) | drive->select.all,
+			     io_ports->device_addr);
+}
+
+static void scc_tf_read(ide_drive_t *drive, ide_task_t *task)
+{
+	struct ide_io_ports *io_ports = &drive->hwif->io_ports;
+	struct ide_taskfile *tf = &task->tf;
+
+	if (task->tf_flags & IDE_TFLAG_IN_DATA) {
+		u16 data = (u16)in_be32((void *)io_ports->data_addr);
+
+		tf->data = data & 0xff;
+		tf->hob_data = (data >> 8) & 0xff;
+	}
+
+	/* be sure we're looking at the low order bits */
+	scc_ide_outb(drive->ctl & ~0x80, io_ports->ctl_addr);
+
+	if (task->tf_flags & IDE_TFLAG_IN_NSECT)
+		tf->nsect  = scc_ide_inb(io_ports->nsect_addr);
+	if (task->tf_flags & IDE_TFLAG_IN_LBAL)
+		tf->lbal   = scc_ide_inb(io_ports->lbal_addr);
+	if (task->tf_flags & IDE_TFLAG_IN_LBAM)
+		tf->lbam   = scc_ide_inb(io_ports->lbam_addr);
+	if (task->tf_flags & IDE_TFLAG_IN_LBAH)
+		tf->lbah   = scc_ide_inb(io_ports->lbah_addr);
+	if (task->tf_flags & IDE_TFLAG_IN_DEVICE)
+		tf->device = scc_ide_inb(io_ports->device_addr);
+
+	if (task->tf_flags & IDE_TFLAG_LBA48) {
+		scc_ide_outb(drive->ctl | 0x80, io_ports->ctl_addr);
+
+		if (task->tf_flags & IDE_TFLAG_IN_HOB_FEATURE)
+			tf->hob_feature = scc_ide_inb(io_ports->feature_addr);
+		if (task->tf_flags & IDE_TFLAG_IN_HOB_NSECT)
+			tf->hob_nsect   = scc_ide_inb(io_ports->nsect_addr);
+		if (task->tf_flags & IDE_TFLAG_IN_HOB_LBAL)
+			tf->hob_lbal    = scc_ide_inb(io_ports->lbal_addr);
+		if (task->tf_flags & IDE_TFLAG_IN_HOB_LBAM)
+			tf->hob_lbam    = scc_ide_inb(io_ports->lbam_addr);
+		if (task->tf_flags & IDE_TFLAG_IN_HOB_LBAH)
+			tf->hob_lbah    = scc_ide_inb(io_ports->lbah_addr);
+	}
+}
+
+static void scc_input_data(ide_drive_t *drive, struct request *rq,
+			   void *buf, unsigned int len)
+{
+	unsigned long data_addr = drive->hwif->io_ports.data_addr;
+
+	len++;
+
+	if (drive->io_32bit) {
+		scc_ide_insl(data_addr, buf, len / 4);
+
+		if ((len & 3) >= 2)
+			scc_ide_insw(data_addr, (u8 *)buf + (len & ~3), 1);
+	} else
+		scc_ide_insw(data_addr, buf, len / 2);
+}
+
+static void scc_output_data(ide_drive_t *drive,  struct request *rq,
+			    void *buf, unsigned int len)
+{
+	unsigned long data_addr = drive->hwif->io_ports.data_addr;
+
+	len++;
+
+	if (drive->io_32bit) {
+		scc_ide_outsl(data_addr, buf, len / 4);
+
+		if ((len & 3) >= 2)
+			scc_ide_outsw(data_addr, (u8 *)buf + (len & ~3), 1);
+	} else
+		scc_ide_outsw(data_addr, buf, len / 2);
+}
+
 /**
  *	init_mmio_iops_scc	-	set up the iops for MMIO
  *	@hwif: interface to set up
@@ -632,15 +783,15 @@
 
 	ide_set_hwifdata(hwif, ports);
 
+	hwif->tf_load = scc_tf_load;
+	hwif->tf_read = scc_tf_read;
+
+	hwif->input_data  = scc_input_data;
+	hwif->output_data = scc_output_data;
+
 	hwif->INB = scc_ide_inb;
-	hwif->INW = scc_ide_inw;
-	hwif->INSW = scc_ide_insw;
-	hwif->INSL = scc_ide_insl;
 	hwif->OUTB = scc_ide_outb;
 	hwif->OUTBSYNC = scc_ide_outbsync;
-	hwif->OUTW = scc_ide_outw;
-	hwif->OUTSW = scc_ide_outsw;
-	hwif->OUTSL = scc_ide_outsl;
 
 	hwif->dma_base = dma_base;
 	hwif->config_data = ports->ctl;
@@ -687,7 +838,6 @@
 
 	hwif->dma_command = hwif->dma_base;
 	hwif->dma_status = hwif->dma_base + 0x04;
-	hwif->dma_prdtable = hwif->dma_base + 0x08;
 
 	/* PTERADD */
 	out_be32((void __iomem *)(hwif->dma_base + 0x018), hwif->dmatable_dma);
@@ -706,10 +856,10 @@
 };
 
 static const struct ide_dma_ops scc_dma_ops = {
-	.dma_host_set		= ide_dma_host_set,
+	.dma_host_set		= scc_dma_host_set,
 	.dma_setup		= scc_dma_setup,
 	.dma_exec_cmd		= ide_dma_exec_cmd,
-	.dma_start		= ide_dma_start,
+	.dma_start		= scc_dma_start,
 	.dma_end		= scc_dma_end,
 	.dma_test_irq		= scc_dma_test_irq,
 	.dma_lost_irq		= ide_dma_lost_irq,
diff --git a/drivers/ide/pci/sgiioc4.c b/drivers/ide/pci/sgiioc4.c
index 63e28f4..16a0bce 100644
--- a/drivers/ide/pci/sgiioc4.c
+++ b/drivers/ide/pci/sgiioc4.c
@@ -573,6 +573,7 @@
 	.init_dma		= ide_dma_sgiioc4,
 	.port_ops		= &sgiioc4_port_ops,
 	.dma_ops		= &sgiioc4_dma_ops,
+	.host_flags		= IDE_HFLAG_MMIO,
 	.mwdma_mask		= ATA_MWDMA2_ONLY,
 };
 
diff --git a/drivers/ide/pci/siimage.c b/drivers/ide/pci/siimage.c
index c2040a0..4cf8fc5 100644
--- a/drivers/ide/pci/siimage.c
+++ b/drivers/ide/pci/siimage.c
@@ -1,8 +1,8 @@
 /*
  * Copyright (C) 2001-2002	Andre Hedrick <andre@linux-ide.org>
  * Copyright (C) 2003		Red Hat <alan@redhat.com>
- * Copyright (C) 2007		MontaVista Software, Inc.
- * Copyright (C) 2007		Bartlomiej Zolnierkiewicz
+ * Copyright (C) 2007-2008	MontaVista Software, Inc.
+ * Copyright (C) 2007-2008	Bartlomiej Zolnierkiewicz
  *
  *  May be copied or modified under the terms of the GNU General Public License
  *
@@ -17,10 +17,10 @@
  *
  *  FAQ Items:
  *	If you are using Marvell SATA-IDE adapters with Maxtor drives
- *	ensure the system is set up for ATA100/UDMA5 not UDMA6.
+ *	ensure the system is set up for ATA100/UDMA5, not UDMA6.
  *
  *	If you are using WD drives with SATA bridges you must set the
- *	drive to "Single". "Master" will hang
+ *	drive to "Single". "Master" will hang.
  *
  *	If you have strange problems with nVidia chipset systems please
  *	see the SI support documentation and update your system BIOS
@@ -42,25 +42,24 @@
 #include <linux/hdreg.h>
 #include <linux/ide.h>
 #include <linux/init.h>
-
-#include <asm/io.h>
+#include <linux/io.h>
 
 /**
  *	pdev_is_sata		-	check if device is SATA
  *	@pdev:	PCI device to check
- *	
+ *
  *	Returns true if this is a SATA controller
  */
- 
+
 static int pdev_is_sata(struct pci_dev *pdev)
 {
 #ifdef CONFIG_BLK_DEV_IDE_SATA
-	switch(pdev->device) {
-		case PCI_DEVICE_ID_SII_3112:
-		case PCI_DEVICE_ID_SII_1210SA:
-			return 1;
-		case PCI_DEVICE_ID_SII_680:
-			return 0;
+	switch (pdev->device) {
+	case PCI_DEVICE_ID_SII_3112:
+	case PCI_DEVICE_ID_SII_1210SA:
+		return 1;
+	case PCI_DEVICE_ID_SII_680:
+		return 0;
 	}
 	BUG();
 #endif
@@ -70,10 +69,10 @@
 /**
  *	is_sata			-	check if hwif is SATA
  *	@hwif:	interface to check
- *	
+ *
  *	Returns true if this is a SATA controller
  */
- 
+
 static inline int is_sata(ide_hwif_t *hwif)
 {
 	return pdev_is_sata(to_pci_dev(hwif->dev));
@@ -86,21 +85,22 @@
  *
  *	Turn a config register offset into the right address in either
  *	PCI space or MMIO space to access the control register in question
- *	Thankfully this is a configuration operation so isnt performance
- *	criticial. 
+ *	Thankfully this is a configuration operation, so isn't performance
+ *	critical.
  */
- 
+
 static unsigned long siimage_selreg(ide_hwif_t *hwif, int r)
 {
 	unsigned long base = (unsigned long)hwif->hwif_data;
+
 	base += 0xA0 + r;
-	if(hwif->mmio)
-		base += (hwif->channel << 6);
+	if (hwif->mmio)
+		base += hwif->channel << 6;
 	else
-		base += (hwif->channel << 4);
+		base += hwif->channel << 4;
 	return base;
 }
-	
+
 /**
  *	siimage_seldev		-	return register base
  *	@hwif: interface
@@ -110,20 +110,69 @@
  *	PCI space or MMIO space to access the control register in question
  *	including accounting for the unit shift.
  */
- 
+
 static inline unsigned long siimage_seldev(ide_drive_t *drive, int r)
 {
 	ide_hwif_t *hwif	= HWIF(drive);
-	unsigned long base = (unsigned long)hwif->hwif_data;
+	unsigned long base	= (unsigned long)hwif->hwif_data;
+
 	base += 0xA0 + r;
-	if(hwif->mmio)
-		base += (hwif->channel << 6);
+	if (hwif->mmio)
+		base += hwif->channel << 6;
 	else
-		base += (hwif->channel << 4);
+		base += hwif->channel << 4;
 	base |= drive->select.b.unit << drive->select.b.unit;
 	return base;
 }
 
+static u8 sil_ioread8(struct pci_dev *dev, unsigned long addr)
+{
+	u8 tmp = 0;
+
+	if (pci_get_drvdata(dev))
+		tmp = readb((void __iomem *)addr);
+	else
+		pci_read_config_byte(dev, addr, &tmp);
+
+	return tmp;
+}
+
+static u16 sil_ioread16(struct pci_dev *dev, unsigned long addr)
+{
+	u16 tmp = 0;
+
+	if (pci_get_drvdata(dev))
+		tmp = readw((void __iomem *)addr);
+	else
+		pci_read_config_word(dev, addr, &tmp);
+
+	return tmp;
+}
+
+static void sil_iowrite8(struct pci_dev *dev, u8 val, unsigned long addr)
+{
+	if (pci_get_drvdata(dev))
+		writeb(val, (void __iomem *)addr);
+	else
+		pci_write_config_byte(dev, addr, val);
+}
+
+static void sil_iowrite16(struct pci_dev *dev, u16 val, unsigned long addr)
+{
+	if (pci_get_drvdata(dev))
+		writew(val, (void __iomem *)addr);
+	else
+		pci_write_config_word(dev, addr, val);
+}
+
+static void sil_iowrite32(struct pci_dev *dev, u32 val, unsigned long addr)
+{
+	if (pci_get_drvdata(dev))
+		writel(val, (void __iomem *)addr);
+	else
+		pci_write_config_dword(dev, addr, val);
+}
+
 /**
  *	sil_udma_filter		-	compute UDMA mask
  *	@drive: IDE device
@@ -136,24 +185,26 @@
 
 static u8 sil_pata_udma_filter(ide_drive_t *drive)
 {
-	ide_hwif_t *hwif = drive->hwif;
-	struct pci_dev *dev = to_pci_dev(hwif->dev);
-	unsigned long base = (unsigned long) hwif->hwif_data;
-	u8 mask = 0, scsc = 0;
+	ide_hwif_t *hwif	= drive->hwif;
+	struct pci_dev *dev	= to_pci_dev(hwif->dev);
+	unsigned long base	= (unsigned long)hwif->hwif_data;
+	u8 scsc, mask		= 0;
 
-	if (hwif->mmio)
-		scsc = hwif->INB(base + 0x4A);
-	else
-		pci_read_config_byte(dev, 0x8A, &scsc);
+	scsc = sil_ioread8(dev, base + (hwif->mmio ? 0x4A : 0x8A));
 
-	if ((scsc & 0x30) == 0x10)	/* 133 */
+	switch (scsc & 0x30) {
+	case 0x10:	/* 133 */
 		mask = ATA_UDMA6;
-	else if ((scsc & 0x30) == 0x20)	/* 2xPCI */
+		break;
+	case 0x20:	/* 2xPCI */
 		mask = ATA_UDMA6;
-	else if ((scsc & 0x30) == 0x00)	/* 100 */
+		break;
+	case 0x00:	/* 100 */
 		mask = ATA_UDMA5;
-	else 	/* Disabled ? */
+		break;
+	default: 	/* Disabled ? */
 		BUG();
+	}
 
 	return mask;
 }
@@ -175,15 +226,16 @@
 
 static void sil_set_pio_mode(ide_drive_t *drive, u8 pio)
 {
-	const u16 tf_speed[]	= { 0x328a, 0x2283, 0x1281, 0x10c3, 0x10c1 };
-	const u16 data_speed[]	= { 0x328a, 0x2283, 0x1104, 0x10c3, 0x10c1 };
+	static const u16 tf_speed[]   = { 0x328a, 0x2283, 0x1281, 0x10c3, 0x10c1 };
+	static const u16 data_speed[] = { 0x328a, 0x2283, 0x1104, 0x10c3, 0x10c1 };
 
 	ide_hwif_t *hwif	= HWIF(drive);
+	struct pci_dev *dev	= to_pci_dev(hwif->dev);
 	ide_drive_t *pair	= ide_get_paired_drive(drive);
 	u32 speedt		= 0;
 	u16 speedp		= 0;
 	unsigned long addr	= siimage_seldev(drive, 0x04);
-	unsigned long tfaddr	= siimage_selreg(hwif, 0x02);
+	unsigned long tfaddr	= siimage_selreg(hwif,	0x02);
 	unsigned long base	= (unsigned long)hwif->hwif_data;
 	u8 tf_pio		= pio;
 	u8 addr_mask		= hwif->channel ? (hwif->mmio ? 0xF4 : 0x84)
@@ -203,36 +255,20 @@
 	speedp = data_speed[pio];
 	speedt = tf_speed[tf_pio];
 
-	if (hwif->mmio) {
-		hwif->OUTW(speedp, addr);
-		hwif->OUTW(speedt, tfaddr);
-		/* Now set up IORDY */
-		if (pio > 2)
-			hwif->OUTW(hwif->INW(tfaddr-2)|0x200, tfaddr-2);
-		else
-			hwif->OUTW(hwif->INW(tfaddr-2)&~0x200, tfaddr-2);
+	sil_iowrite16(dev, speedp, addr);
+	sil_iowrite16(dev, speedt, tfaddr);
 
-		mode = hwif->INB(base + addr_mask);
-		mode &= ~(unit ? 0x30 : 0x03);
-		mode |= (unit ? 0x10 : 0x01);
-		hwif->OUTB(mode, base + addr_mask);
-	} else {
-		struct pci_dev *dev = to_pci_dev(hwif->dev);
+	/* now set up IORDY */
+	speedp = sil_ioread16(dev, tfaddr - 2);
+	speedp &= ~0x200;
+	if (pio > 2)
+		speedp |= 0x200;
+	sil_iowrite16(dev, speedp, tfaddr - 2);
 
-		pci_write_config_word(dev, addr, speedp);
-		pci_write_config_word(dev, tfaddr, speedt);
-		pci_read_config_word(dev, tfaddr - 2, &speedp);
-		speedp &= ~0x200;
-		/* Set IORDY for mode 3 or 4 */
-		if (pio > 2)
-			speedp |= 0x200;
-		pci_write_config_word(dev, tfaddr - 2, speedp);
-
-		pci_read_config_byte(dev, addr_mask, &mode);
-		mode &= ~(unit ? 0x30 : 0x03);
-		mode |= (unit ? 0x10 : 0x01);
-		pci_write_config_byte(dev, addr_mask, mode);
-	}
+	mode = sil_ioread8(dev, base + addr_mask);
+	mode &= ~(unit ? 0x30 : 0x03);
+	mode |= unit ? 0x10 : 0x01;
+	sil_iowrite8(dev, mode, base + addr_mask);
 }
 
 /**
@@ -245,59 +281,45 @@
 
 static void sil_set_dma_mode(ide_drive_t *drive, const u8 speed)
 {
-	u8 ultra6[]		= { 0x0F, 0x0B, 0x07, 0x05, 0x03, 0x02, 0x01 };
-	u8 ultra5[]		= { 0x0C, 0x07, 0x05, 0x04, 0x02, 0x01 };
-	u16 dma[]		= { 0x2208, 0x10C2, 0x10C1 };
+	static const u8 ultra6[] = { 0x0F, 0x0B, 0x07, 0x05, 0x03, 0x02, 0x01 };
+	static const u8 ultra5[] = { 0x0C, 0x07, 0x05, 0x04, 0x02, 0x01 };
+	static const u16 dma[]	 = { 0x2208, 0x10C2, 0x10C1 };
 
 	ide_hwif_t *hwif	= HWIF(drive);
 	struct pci_dev *dev	= to_pci_dev(hwif->dev);
 	u16 ultra = 0, multi	= 0;
 	u8 mode = 0, unit	= drive->select.b.unit;
 	unsigned long base	= (unsigned long)hwif->hwif_data;
-	u8 scsc = 0, addr_mask	= ((hwif->channel) ?
-				    ((hwif->mmio) ? 0xF4 : 0x84) :
-				    ((hwif->mmio) ? 0xB4 : 0x80));
-				    
+	u8 scsc = 0, addr_mask	= hwif->channel ?
+					(hwif->mmio ? 0xF4 : 0x84) :
+					(hwif->mmio ? 0xB4 : 0x80);
 	unsigned long ma	= siimage_seldev(drive, 0x08);
 	unsigned long ua	= siimage_seldev(drive, 0x0C);
 
-	if (hwif->mmio) {
-		scsc = hwif->INB(base + 0x4A);
-		mode = hwif->INB(base + addr_mask);
-		multi = hwif->INW(ma);
-		ultra = hwif->INW(ua);
-	} else {
-		pci_read_config_byte(dev, 0x8A, &scsc);
-		pci_read_config_byte(dev, addr_mask, &mode);
-		pci_read_config_word(dev, ma, &multi);
-		pci_read_config_word(dev, ua, &ultra);
-	}
+	scsc  = sil_ioread8 (dev, base + (hwif->mmio ? 0x4A : 0x8A));
+	mode  = sil_ioread8 (dev, base + addr_mask);
+	multi = sil_ioread16(dev, ma);
+	ultra = sil_ioread16(dev, ua);
 
-	mode &= ~((unit) ? 0x30 : 0x03);
+	mode  &= ~(unit ? 0x30 : 0x03);
 	ultra &= ~0x3F;
 	scsc = ((scsc & 0x30) == 0x00) ? 0 : 1;
 
 	scsc = is_sata(hwif) ? 1 : scsc;
 
 	if (speed >= XFER_UDMA_0) {
-		multi = dma[2];
-		ultra |= (scsc ? ultra6[speed - XFER_UDMA_0] :
-				 ultra5[speed - XFER_UDMA_0]);
-		mode |= (unit ? 0x30 : 0x03);
+		multi  = dma[2];
+		ultra |= scsc ? ultra6[speed - XFER_UDMA_0] :
+				ultra5[speed - XFER_UDMA_0];
+		mode  |= unit ? 0x30 : 0x03;
 	} else {
 		multi = dma[speed - XFER_MW_DMA_0];
-		mode |= (unit ? 0x20 : 0x02);
+		mode |= unit ? 0x20 : 0x02;
 	}
 
-	if (hwif->mmio) {
-		hwif->OUTB(mode, base + addr_mask);
-		hwif->OUTW(multi, ma);
-		hwif->OUTW(ultra, ua);
-	} else {
-		pci_write_config_byte(dev, addr_mask, mode);
-		pci_write_config_word(dev, ma, multi);
-		pci_write_config_word(dev, ua, ultra);
-	}
+	sil_iowrite8 (dev, mode, base + addr_mask);
+	sil_iowrite16(dev, multi, ma);
+	sil_iowrite16(dev, ultra, ua);
 }
 
 /* returns 1 if dma irq issued, 0 otherwise */
@@ -309,13 +331,14 @@
 	unsigned long addr	= siimage_selreg(hwif, 1);
 
 	/* return 1 if INTR asserted */
-	if ((hwif->INB(hwif->dma_status) & 4) == 4)
+	if (hwif->INB(hwif->dma_status) & 4)
 		return 1;
 
 	/* return 1 if Device INTR asserted */
 	pci_read_config_byte(dev, addr, &dma_altstat);
 	if (dma_altstat & 8)
-		return 0;	//return 1;
+		return 0;	/* return 1; */
+
 	return 0;
 }
 
@@ -335,9 +358,9 @@
 		= (void __iomem *)hwif->sata_scr[SATA_ERROR_OFFSET];
 
 	if (sata_error_addr) {
-		unsigned long base = (unsigned long)hwif->hwif_data;
-		u32 ext_stat = readl((void __iomem *)(base + 0x10));
-		u8 watchdog = 0;
+		unsigned long base	= (unsigned long)hwif->hwif_data;
+		u32 ext_stat		= readl((void __iomem *)(base + 0x10));
+		u8 watchdog		= 0;
 
 		if (ext_stat & ((hwif->channel) ? 0x40 : 0x10)) {
 			u32 sata_error = readl(sata_error_addr);
@@ -346,25 +369,22 @@
 			watchdog = (sata_error & 0x00680000) ? 1 : 0;
 			printk(KERN_WARNING "%s: sata_error = 0x%08x, "
 				"watchdog = %d, %s\n",
-				drive->name, sata_error, watchdog,
-				__func__);
-
-		} else {
+				drive->name, sata_error, watchdog, __func__);
+		} else
 			watchdog = (ext_stat & 0x8000) ? 1 : 0;
-		}
-		ext_stat >>= 16;
 
+		ext_stat >>= 16;
 		if (!(ext_stat & 0x0404) && !watchdog)
 			return 0;
 	}
 
 	/* return 1 if INTR asserted */
-	if ((readb((void __iomem *)hwif->dma_status) & 0x04) == 0x04)
+	if (readb((void __iomem *)hwif->dma_status) & 0x04)
 		return 1;
 
 	/* return 1 if Device INTR asserted */
-	if ((readb((void __iomem *)addr) & 8) == 8)
-		return 0;	//return 1;
+	if (readb((void __iomem *)addr) & 8)
+		return 0;	/* return 1; */
 
 	return 0;
 }
@@ -423,63 +443,33 @@
 }
 
 /**
- *	proc_reports_siimage		-	add siimage controller to proc
- *	@dev: PCI device
- *	@clocking: SCSC value
- *	@name: controller name
- *
- *	Report the clocking mode of the controller and add it to
- *	the /proc interface layer
- */
- 
-static void proc_reports_siimage (struct pci_dev *dev, u8 clocking, const char *name)
-{
-	if (!pdev_is_sata(dev)) {
-		printk(KERN_INFO "%s: BASE CLOCK ", name);
-		clocking &= 0x03;
-		switch (clocking) {
-			case 0x03: printk("DISABLED!\n"); break;
-			case 0x02: printk("== 2X PCI\n"); break;
-			case 0x01: printk("== 133\n"); break;
-			case 0x00: printk("== 100\n"); break;
-		}
-	}
-}
-
-/**
- *	setup_mmio_siimage	-	switch an SI controller into MMIO
+ *	setup_mmio_siimage	-	switch controller into MMIO mode
  *	@dev: PCI device we are configuring
  *	@name: device name
  *
- *	Attempt to put the device into mmio mode. There are some slight
- *	complications here with certain systems where the mmio bar isnt
- *	mapped so we have to be sure we can fall back to I/O.
+ *	Attempt to put the device into MMIO mode. There are some slight
+ *	complications here with certain systems where the MMIO BAR isn't
+ *	mapped, so we have to be sure that we can fall back to I/O.
  */
- 
-static unsigned int setup_mmio_siimage (struct pci_dev *dev, const char *name)
+
+static unsigned int setup_mmio_siimage(struct pci_dev *dev, const char *name)
 {
 	resource_size_t bar5	= pci_resource_start(dev, 5);
 	unsigned long barsize	= pci_resource_len(dev, 5);
-	u8 tmpbyte	= 0;
 	void __iomem *ioaddr;
-	u32 tmp, irq_mask;
 
 	/*
-	 *	Drop back to PIO if we can't map the mmio. Some
-	 *	systems seem to get terminally confused in the PCI
-	 *	spaces.
+	 *	Drop back to PIO if we can't map the MMIO. Some	systems
+	 *	seem to get terminally confused in the PCI spaces.
 	 */
-	 
-	if(!request_mem_region(bar5, barsize, name))
-	{
-		printk(KERN_WARNING "siimage: IDE controller MMIO ports not available.\n");
+	if (!request_mem_region(bar5, barsize, name)) {
+		printk(KERN_WARNING "siimage: IDE controller MMIO ports not "
+				    "available.\n");
 		return 0;
 	}
-		
-	ioaddr = ioremap(bar5, barsize);
 
-	if (ioaddr == NULL)
-	{
+	ioaddr = ioremap(bar5, barsize);
+	if (ioaddr == NULL) {
 		release_mem_region(bar5, barsize);
 		return 0;
 	}
@@ -487,62 +477,6 @@
 	pci_set_master(dev);
 	pci_set_drvdata(dev, (void *) ioaddr);
 
-	if (pdev_is_sata(dev)) {
-		/* make sure IDE0/1 interrupts are not masked */
-		irq_mask = (1 << 22) | (1 << 23);
-		tmp = readl(ioaddr + 0x48);
-		if (tmp & irq_mask) {
-			tmp &= ~irq_mask;
-			writel(tmp, ioaddr + 0x48);
-			readl(ioaddr + 0x48); /* flush */
-		}
-		writel(0, ioaddr + 0x148);
-		writel(0, ioaddr + 0x1C8);
-	}
-
-	writeb(0, ioaddr + 0xB4);
-	writeb(0, ioaddr + 0xF4);
-	tmpbyte = readb(ioaddr + 0x4A);
-
-	switch(tmpbyte & 0x30) {
-		case 0x00:
-			/* In 100 MHz clocking, try and switch to 133 */
-			writeb(tmpbyte|0x10, ioaddr + 0x4A);
-			break;
-		case 0x10:
-			/* On 133Mhz clocking */
-			break;
-		case 0x20:
-			/* On PCIx2 clocking */
-			break;
-		case 0x30:
-			/* Clocking is disabled */
-			/* 133 clock attempt to force it on */
-			writeb(tmpbyte & ~0x20, ioaddr + 0x4A);
-			break;
-	}
-	
-	writeb(      0x72, ioaddr + 0xA1);
-	writew(    0x328A, ioaddr + 0xA2);
-	writel(0x62DD62DD, ioaddr + 0xA4);
-	writel(0x43924392, ioaddr + 0xA8);
-	writel(0x40094009, ioaddr + 0xAC);
-	writeb(      0x72, ioaddr + 0xE1);
-	writew(    0x328A, ioaddr + 0xE2);
-	writel(0x62DD62DD, ioaddr + 0xE4);
-	writel(0x43924392, ioaddr + 0xE8);
-	writel(0x40094009, ioaddr + 0xEC);
-
-	if (pdev_is_sata(dev)) {
-		writel(0xFFFF0000, ioaddr + 0x108);
-		writel(0xFFFF0000, ioaddr + 0x188);
-		writel(0x00680000, ioaddr + 0x148);
-		writel(0x00680000, ioaddr + 0x1C8);
-	}
-
-	tmpbyte = readb(ioaddr + 0x4A);
-
-	proc_reports_siimage(dev, (tmpbyte>>4), name);
 	return 1;
 }
 
@@ -552,55 +486,92 @@
  *	@name: device name
  *
  *	Perform the initial PCI set up for this device. Attempt to switch
- *	to 133MHz clocking if the system isn't already set up to do it.
+ *	to 133 MHz clocking if the system isn't already set up to do it.
  */
 
-static unsigned int __devinit init_chipset_siimage(struct pci_dev *dev, const char *name)
+static unsigned int __devinit init_chipset_siimage(struct pci_dev *dev,
+						   const char *name)
 {
-	u8 rev = dev->revision, tmpbyte = 0, BA5_EN = 0;
+	unsigned long base, scsc_addr;
+	void __iomem *ioaddr = NULL;
+	u8 rev = dev->revision, tmp, BA5_EN;
 
 	pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, rev ? 1 : 255);
 
 	pci_read_config_byte(dev, 0x8A, &BA5_EN);
-	if ((BA5_EN & 0x01) || (pci_resource_start(dev, 5))) {
-		if (setup_mmio_siimage(dev, name)) {
-			return 0;
+
+	if ((BA5_EN & 0x01) || pci_resource_start(dev, 5))
+		if (setup_mmio_siimage(dev, name))
+			ioaddr = pci_get_drvdata(dev);
+
+	base = (unsigned long)ioaddr;
+
+	if (ioaddr && pdev_is_sata(dev)) {
+		u32 tmp32, irq_mask;
+
+		/* make sure IDE0/1 interrupts are not masked */
+		irq_mask = (1 << 22) | (1 << 23);
+		tmp32 = readl(ioaddr + 0x48);
+		if (tmp32 & irq_mask) {
+			tmp32 &= ~irq_mask;
+			writel(tmp32, ioaddr + 0x48);
+			readl(ioaddr + 0x48); /* flush */
 		}
+		writel(0, ioaddr + 0x148);
+		writel(0, ioaddr + 0x1C8);
 	}
 
-	pci_write_config_byte(dev, 0x80, 0x00);
-	pci_write_config_byte(dev, 0x84, 0x00);
-	pci_read_config_byte(dev, 0x8A, &tmpbyte);
-	switch(tmpbyte & 0x30) {
-		case 0x00:
-			/* 133 clock attempt to force it on */
-			pci_write_config_byte(dev, 0x8A, tmpbyte|0x10);
-		case 0x30:
-			/* if clocking is disabled */
-			/* 133 clock attempt to force it on */
-			pci_write_config_byte(dev, 0x8A, tmpbyte & ~0x20);
-		case 0x10:
-			/* 133 already */
-			break;
-		case 0x20:
-			/* BIOS set PCI x2 clocking */
-			break;
+	sil_iowrite8(dev, 0, base ? (base + 0xB4) : 0x80);
+	sil_iowrite8(dev, 0, base ? (base + 0xF4) : 0x84);
+
+	scsc_addr = base ? (base + 0x4A) : 0x8A;
+	tmp = sil_ioread8(dev, scsc_addr);
+
+	switch (tmp & 0x30) {
+	case 0x00:
+		/* On 100 MHz clocking, try and switch to 133 MHz */
+		sil_iowrite8(dev, tmp | 0x10, scsc_addr);
+		break;
+	case 0x30:
+		/* Clocking is disabled, attempt to force 133MHz clocking. */
+		sil_iowrite8(dev, tmp & ~0x20, scsc_addr);
+	case 0x10:
+		/* On 133Mhz clocking. */
+		break;
+	case 0x20:
+		/* On PCIx2 clocking. */
+		break;
 	}
 
-	pci_read_config_byte(dev,   0x8A, &tmpbyte);
+	tmp = sil_ioread8(dev, scsc_addr);
 
-	pci_write_config_byte(dev,  0xA1, 0x72);
-	pci_write_config_word(dev,  0xA2, 0x328A);
-	pci_write_config_dword(dev, 0xA4, 0x62DD62DD);
-	pci_write_config_dword(dev, 0xA8, 0x43924392);
-	pci_write_config_dword(dev, 0xAC, 0x40094009);
-	pci_write_config_byte(dev,  0xB1, 0x72);
-	pci_write_config_word(dev,  0xB2, 0x328A);
-	pci_write_config_dword(dev, 0xB4, 0x62DD62DD);
-	pci_write_config_dword(dev, 0xB8, 0x43924392);
-	pci_write_config_dword(dev, 0xBC, 0x40094009);
+	sil_iowrite8 (dev,       0x72, base + 0xA1);
+	sil_iowrite16(dev,     0x328A, base + 0xA2);
+	sil_iowrite32(dev, 0x62DD62DD, base + 0xA4);
+	sil_iowrite32(dev, 0x43924392, base + 0xA8);
+	sil_iowrite32(dev, 0x40094009, base + 0xAC);
+	sil_iowrite8 (dev,       0x72, base ? (base + 0xE1) : 0xB1);
+	sil_iowrite16(dev,     0x328A, base ? (base + 0xE2) : 0xB2);
+	sil_iowrite32(dev, 0x62DD62DD, base ? (base + 0xE4) : 0xB4);
+	sil_iowrite32(dev, 0x43924392, base ? (base + 0xE8) : 0xB8);
+	sil_iowrite32(dev, 0x40094009, base ? (base + 0xEC) : 0xBC);
 
-	proc_reports_siimage(dev, (tmpbyte>>4), name);
+	if (base && pdev_is_sata(dev)) {
+		writel(0xFFFF0000, ioaddr + 0x108);
+		writel(0xFFFF0000, ioaddr + 0x188);
+		writel(0x00680000, ioaddr + 0x148);
+		writel(0x00680000, ioaddr + 0x1C8);
+	}
+
+	/* report the clocking mode of the controller */
+	if (!pdev_is_sata(dev)) {
+		static const char *clk_str[] =
+			{ "== 100", "== 133", "== 2X PCI", "DISABLED!" };
+
+		tmp >>= 4;
+		printk(KERN_INFO "%s: BASE CLOCK %s\n", name, clk_str[tmp & 3]);
+	}
+
 	return 0;
 }
 
@@ -610,8 +581,7 @@
  *
  *	The basic setup here is fairly simple, we can use standard MMIO
  *	operations. However we do have to set the taskfile register offsets
- *	by hand as there isnt a standard defined layout for them this
- *	time.
+ *	by hand as there isn't a standard defined layout for them this time.
  *
  *	The hardware supports buffered taskfiles and also some rather nice
  *	extended PRD tables. For better SI3112 support use the libata driver
@@ -622,23 +592,20 @@
 	struct pci_dev *dev	= to_pci_dev(hwif->dev);
 	void *addr		= pci_get_drvdata(dev);
 	u8 ch			= hwif->channel;
-	unsigned long		base;
-
 	struct ide_io_ports *io_ports = &hwif->io_ports;
+	unsigned long base;
 
 	/*
-	 *	Fill in the basic HWIF bits
+	 *	Fill in the basic hwif bits
 	 */
-
+	hwif->host_flags |= IDE_HFLAG_MMIO;
 	default_hwif_mmiops(hwif);
-	hwif->hwif_data			= addr;
+	hwif->hwif_data	= addr;
 
 	/*
-	 *	Now set up the hw. We have to do this ourselves as
-	 *	the MMIO layout isnt the same as the standard port
-	 *	based I/O
+	 *	Now set up the hw. We have to do this ourselves as the
+	 *	MMIO layout isn't the same as the standard port based I/O.
 	 */
-
 	memset(io_ports, 0, sizeof(*io_ports));
 
 	base = (unsigned long)addr;
@@ -648,10 +615,9 @@
 		base += 0x80;
 
 	/*
-	 *	The buffered task file doesn't have status/control
-	 *	so we can't currently use it sanely since we want to
-	 *	use LBA48 mode.
-	 */	
+	 *	The buffered task file doesn't have status/control, so we
+	 *	can't currently use it sanely since we want to use LBA48 mode.
+	 */
 	io_ports->data_addr	= base;
 	io_ports->error_addr	= base + 1;
 	io_ports->nsect_addr	= base + 2;
@@ -680,19 +646,17 @@
 
 static int is_dev_seagate_sata(ide_drive_t *drive)
 {
-	const char *s = &drive->id->model[0];
-	unsigned len;
+	const char *s	= &drive->id->model[0];
+	unsigned len	= strnlen(s, sizeof(drive->id->model));
 
-	len = strnlen(s, sizeof(drive->id->model));
-
-	if ((len > 4) && (!memcmp(s, "ST", 2))) {
+	if ((len > 4) && (!memcmp(s, "ST", 2)))
 		if ((!memcmp(s + len - 2, "AS", 2)) ||
 		    (!memcmp(s + len - 3, "ASL", 3))) {
 			printk(KERN_INFO "%s: applying pessimistic Seagate "
 					 "errata fix\n", drive->name);
 			return 1;
 		}
-	}
+
 	return 0;
 }
 
@@ -709,7 +673,7 @@
 {
 	ide_hwif_t *hwif = drive->hwif;
 
-	/* Try and raise the rqsize */
+	/* Try and rise the rqsize */
 	if (!is_sata(hwif) || !is_dev_seagate_sata(drive))
 		hwif->rqsize = 128;
 }
@@ -743,20 +707,14 @@
  *	sil_cable_detect	-	cable detection
  *	@hwif: interface to check
  *
- *	Check for the presence of an ATA66 capable cable on the
- *	interface.
+ *	Check for the presence of an ATA66 capable cable on the interface.
  */
 
 static u8 __devinit sil_cable_detect(ide_hwif_t *hwif)
 {
-	struct pci_dev *dev = to_pci_dev(hwif->dev);
-	unsigned long addr = siimage_selreg(hwif, 0);
-	u8 ata66 = 0;
-
-	if (pci_get_drvdata(dev) == NULL)
-		pci_read_config_byte(dev, addr, &ata66);
-	else
-		ata66 = hwif->INB(addr);
+	struct pci_dev *dev	= to_pci_dev(hwif->dev);
+	unsigned long addr	= siimage_selreg(hwif, 0);
+	u8 ata66		= sil_ioread8(dev, addr);
 
 	return (ata66 & 0x01) ? ATA_CBL_PATA80 : ATA_CBL_PATA40;
 }
@@ -802,15 +760,16 @@
 };
 
 /**
- *	siimage_init_one	-	pci layer discovery entry
+ *	siimage_init_one	-	PCI layer discovery entry
  *	@dev: PCI device
  *	@id: ident table entry
  *
- *	Called by the PCI code when it finds an SI680 or SI3112 controller.
+ *	Called by the PCI code when it finds an SiI680 or SiI3112 controller.
  *	We then use the IDE PCI generic helper to do most of the work.
  */
- 
-static int __devinit siimage_init_one(struct pci_dev *dev, const struct pci_device_id *id)
+
+static int __devinit siimage_init_one(struct pci_dev *dev,
+				      const struct pci_device_id *id)
 {
 	struct ide_port_info d;
 	u8 idx = id->driver_data;
diff --git a/drivers/ide/ppc/pmac.c b/drivers/ide/ppc/pmac.c
index 3cac6b2..48aa019 100644
--- a/drivers/ide/ppc/pmac.c
+++ b/drivers/ide/ppc/pmac.c
@@ -941,6 +941,7 @@
 	.port_ops		= &pmac_ide_port_ops,
 	.host_flags		= IDE_HFLAG_SET_PIO_MODE_KEEP_DMA |
 				  IDE_HFLAG_POST_SET_MODE |
+				  IDE_HFLAG_MMIO |
 				  IDE_HFLAG_UNMASK_IRQS,
 	.pio_mask		= ATA_PIO4,
 	.mwdma_mask		= ATA_MWDMA2,
diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
index 4e3128f..fe78f7d 100644
--- a/drivers/infiniband/core/umem.c
+++ b/drivers/infiniband/core/umem.c
@@ -38,6 +38,7 @@
 #include <linux/dma-mapping.h>
 #include <linux/sched.h>
 #include <linux/hugetlb.h>
+#include <linux/dma-attrs.h>
 
 #include "uverbs.h"
 
@@ -72,9 +73,10 @@
  * @addr: userspace virtual address to start at
  * @size: length of region to pin
  * @access: IB_ACCESS_xxx flags for memory being pinned
+ * @dmasync: flush in-flight DMA when the memory region is written
  */
 struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
-			    size_t size, int access)
+			    size_t size, int access, int dmasync)
 {
 	struct ib_umem *umem;
 	struct page **page_list;
@@ -87,6 +89,10 @@
 	int ret;
 	int off;
 	int i;
+	DEFINE_DMA_ATTRS(attrs);
+
+	if (dmasync)
+		dma_set_attr(DMA_ATTR_WRITE_BARRIER, &attrs);
 
 	if (!can_do_mlock())
 		return ERR_PTR(-EPERM);
@@ -174,10 +180,11 @@
 				sg_set_page(&chunk->page_list[i], page_list[i + off], PAGE_SIZE, 0);
 			}
 
-			chunk->nmap = ib_dma_map_sg(context->device,
-						    &chunk->page_list[0],
-						    chunk->nents,
-						    DMA_BIDIRECTIONAL);
+			chunk->nmap = ib_dma_map_sg_attrs(context->device,
+							  &chunk->page_list[0],
+							  chunk->nents,
+							  DMA_BIDIRECTIONAL,
+							  &attrs);
 			if (chunk->nmap <= 0) {
 				for (i = 0; i < chunk->nents; ++i)
 					put_page(sg_page(&chunk->page_list[i]));
diff --git a/drivers/infiniband/hw/amso1100/c2_provider.c b/drivers/infiniband/hw/amso1100/c2_provider.c
index 6af2c0f..2acf9b6 100644
--- a/drivers/infiniband/hw/amso1100/c2_provider.c
+++ b/drivers/infiniband/hw/amso1100/c2_provider.c
@@ -452,7 +452,7 @@
 		return ERR_PTR(-ENOMEM);
 	c2mr->pd = c2pd;
 
-	c2mr->umem = ib_umem_get(pd->uobject->context, start, length, acc);
+	c2mr->umem = ib_umem_get(pd->uobject->context, start, length, acc, 0);
 	if (IS_ERR(c2mr->umem)) {
 		err = PTR_ERR(c2mr->umem);
 		kfree(c2mr);
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c
index ab4695c..e343e9e 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_provider.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c
@@ -602,7 +602,7 @@
 	if (!mhp)
 		return ERR_PTR(-ENOMEM);
 
-	mhp->umem = ib_umem_get(pd->uobject->context, start, length, acc);
+	mhp->umem = ib_umem_get(pd->uobject->context, start, length, acc, 0);
 	if (IS_ERR(mhp->umem)) {
 		err = PTR_ERR(mhp->umem);
 		kfree(mhp);
diff --git a/drivers/infiniband/hw/ehca/ehca_mrmw.c b/drivers/infiniband/hw/ehca/ehca_mrmw.c
index 46ae4eb..f974367 100644
--- a/drivers/infiniband/hw/ehca/ehca_mrmw.c
+++ b/drivers/infiniband/hw/ehca/ehca_mrmw.c
@@ -323,7 +323,7 @@
 	}
 
 	e_mr->umem = ib_umem_get(pd->uobject->context, start, length,
-				 mr_access_flags);
+				 mr_access_flags, 0);
 	if (IS_ERR(e_mr->umem)) {
 		ib_mr = (void *)e_mr->umem;
 		goto reg_user_mr_exit1;
diff --git a/drivers/infiniband/hw/ipath/ipath_mr.c b/drivers/infiniband/hw/ipath/ipath_mr.c
index db4ba92..9d343b7 100644
--- a/drivers/infiniband/hw/ipath/ipath_mr.c
+++ b/drivers/infiniband/hw/ipath/ipath_mr.c
@@ -195,7 +195,8 @@
 		goto bail;
 	}
 
-	umem = ib_umem_get(pd->uobject->context, start, length, mr_access_flags);
+	umem = ib_umem_get(pd->uobject->context, start, length,
+			   mr_access_flags, 0);
 	if (IS_ERR(umem))
 		return (void *) umem;
 
diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c
index 5e570bb..e3dddfc 100644
--- a/drivers/infiniband/hw/mlx4/cq.c
+++ b/drivers/infiniband/hw/mlx4/cq.c
@@ -137,7 +137,7 @@
 	int err;
 
 	*umem = ib_umem_get(context, buf_addr, cqe * sizeof (struct mlx4_cqe),
-			    IB_ACCESS_LOCAL_WRITE);
+			    IB_ACCESS_LOCAL_WRITE, 1);
 	if (IS_ERR(*umem))
 		return PTR_ERR(*umem);
 
diff --git a/drivers/infiniband/hw/mlx4/doorbell.c b/drivers/infiniband/hw/mlx4/doorbell.c
index 8e342cc..8aee423 100644
--- a/drivers/infiniband/hw/mlx4/doorbell.c
+++ b/drivers/infiniband/hw/mlx4/doorbell.c
@@ -63,7 +63,7 @@
 	page->user_virt = (virt & PAGE_MASK);
 	page->refcnt    = 0;
 	page->umem      = ib_umem_get(&context->ibucontext, virt & PAGE_MASK,
-				      PAGE_SIZE, 0);
+				      PAGE_SIZE, 0, 0);
 	if (IS_ERR(page->umem)) {
 		err = PTR_ERR(page->umem);
 		kfree(page);
diff --git a/drivers/infiniband/hw/mlx4/mr.c b/drivers/infiniband/hw/mlx4/mr.c
index fe2c2e9..68e9248 100644
--- a/drivers/infiniband/hw/mlx4/mr.c
+++ b/drivers/infiniband/hw/mlx4/mr.c
@@ -132,7 +132,8 @@
 	if (!mr)
 		return ERR_PTR(-ENOMEM);
 
-	mr->umem = ib_umem_get(pd->uobject->context, start, length, access_flags);
+	mr->umem = ib_umem_get(pd->uobject->context, start, length,
+			       access_flags, 0);
 	if (IS_ERR(mr->umem)) {
 		err = PTR_ERR(mr->umem);
 		goto err_free;
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index 80ea8b9..8e02ecf 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -482,7 +482,7 @@
 			goto err;
 
 		qp->umem = ib_umem_get(pd->uobject->context, ucmd.buf_addr,
-				       qp->buf_size, 0);
+				       qp->buf_size, 0, 0);
 		if (IS_ERR(qp->umem)) {
 			err = PTR_ERR(qp->umem);
 			goto err;
diff --git a/drivers/infiniband/hw/mlx4/srq.c b/drivers/infiniband/hw/mlx4/srq.c
index 2046197..12d6bc6 100644
--- a/drivers/infiniband/hw/mlx4/srq.c
+++ b/drivers/infiniband/hw/mlx4/srq.c
@@ -109,7 +109,7 @@
 		}
 
 		srq->umem = ib_umem_get(pd->uobject->context, ucmd.buf_addr,
-					buf_size, 0);
+					buf_size, 0, 0);
 		if (IS_ERR(srq->umem)) {
 			err = PTR_ERR(srq->umem);
 			goto err_srq;
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c
index 696e1f3..2a9f460 100644
--- a/drivers/infiniband/hw/mthca/mthca_provider.c
+++ b/drivers/infiniband/hw/mthca/mthca_provider.c
@@ -1006,17 +1006,23 @@
 	struct mthca_dev *dev = to_mdev(pd->device);
 	struct ib_umem_chunk *chunk;
 	struct mthca_mr *mr;
+	struct mthca_reg_mr ucmd;
 	u64 *pages;
 	int shift, n, len;
 	int i, j, k;
 	int err = 0;
 	int write_mtt_size;
 
+	if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd))
+		return ERR_PTR(-EFAULT);
+
 	mr = kmalloc(sizeof *mr, GFP_KERNEL);
 	if (!mr)
 		return ERR_PTR(-ENOMEM);
 
-	mr->umem = ib_umem_get(pd->uobject->context, start, length, acc);
+	mr->umem = ib_umem_get(pd->uobject->context, start, length, acc,
+			       ucmd.mr_attrs & MTHCA_MR_DMASYNC);
+
 	if (IS_ERR(mr->umem)) {
 		err = PTR_ERR(mr->umem);
 		goto err;
diff --git a/drivers/infiniband/hw/mthca/mthca_user.h b/drivers/infiniband/hw/mthca/mthca_user.h
index 02cc0a7..f8cb3b6 100644
--- a/drivers/infiniband/hw/mthca/mthca_user.h
+++ b/drivers/infiniband/hw/mthca/mthca_user.h
@@ -41,7 +41,7 @@
  * Increment this value if any changes that break userspace ABI
  * compatibility are made.
  */
-#define MTHCA_UVERBS_ABI_VERSION	1
+#define MTHCA_UVERBS_ABI_VERSION	2
 
 /*
  * Make sure that all structs defined in this file remain laid out so
@@ -61,6 +61,14 @@
 	__u32 reserved;
 };
 
+struct mthca_reg_mr {
+	__u32 mr_attrs;
+#define MTHCA_MR_DMASYNC 0x1
+/* mark the memory region with a DMA attribute that causes
+ * in-flight DMA to be flushed when the region is written to */
+	__u32 reserved;
+};
+
 struct mthca_create_cq {
 	__u32 lkey;
 	__u32 pdn;
diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
index ee74f7c..9ae397a 100644
--- a/drivers/infiniband/hw/nes/nes_verbs.c
+++ b/drivers/infiniband/hw/nes/nes_verbs.c
@@ -2377,7 +2377,7 @@
 	u8 single_page = 1;
 	u8 stag_key;
 
-	region = ib_umem_get(pd->uobject->context, start, length, acc);
+	region = ib_umem_get(pd->uobject->context, start, length, acc, 0);
 	if (IS_ERR(region)) {
 		return (struct ib_mr *)region;
 	}
diff --git a/drivers/input/input.c b/drivers/input/input.c
index f02c242..27006fc 100644
--- a/drivers/input/input.c
+++ b/drivers/input/input.c
@@ -898,30 +898,26 @@
 {
 	struct proc_dir_entry *entry;
 
-	proc_bus_input_dir = proc_mkdir("input", proc_bus);
+	proc_bus_input_dir = proc_mkdir("bus/input", NULL);
 	if (!proc_bus_input_dir)
 		return -ENOMEM;
 
 	proc_bus_input_dir->owner = THIS_MODULE;
 
-	entry = create_proc_entry("devices", 0, proc_bus_input_dir);
+	entry = proc_create("devices", 0, proc_bus_input_dir,
+			    &input_devices_fileops);
 	if (!entry)
 		goto fail1;
 
-	entry->owner = THIS_MODULE;
-	entry->proc_fops = &input_devices_fileops;
-
-	entry = create_proc_entry("handlers", 0, proc_bus_input_dir);
+	entry = proc_create("handlers", 0, proc_bus_input_dir,
+			    &input_handlers_fileops);
 	if (!entry)
 		goto fail2;
 
-	entry->owner = THIS_MODULE;
-	entry->proc_fops = &input_handlers_fileops;
-
 	return 0;
 
  fail2:	remove_proc_entry("devices", proc_bus_input_dir);
- fail1: remove_proc_entry("input", proc_bus);
+ fail1: remove_proc_entry("bus/input", NULL);
 	return -ENOMEM;
 }
 
@@ -929,7 +925,7 @@
 {
 	remove_proc_entry("devices", proc_bus_input_dir);
 	remove_proc_entry("handlers", proc_bus_input_dir);
-	remove_proc_entry("input", proc_bus);
+	remove_proc_entry("bus/input", NULL);
 }
 
 #else /* !CONFIG_PROC_FS */
diff --git a/drivers/input/tablet/aiptek.c b/drivers/input/tablet/aiptek.c
index 1d759f6..55c1134 100644
--- a/drivers/input/tablet/aiptek.c
+++ b/drivers/input/tablet/aiptek.c
@@ -528,9 +528,9 @@
 			    (aiptek->curSetting.pointerMode)) {
 				aiptek->diagnostic = AIPTEK_DIAGNOSTIC_TOOL_DISALLOWED;
 		} else {
-			x = le16_to_cpu(get_unaligned((__le16 *) (data + 1)));
-			y = le16_to_cpu(get_unaligned((__le16 *) (data + 3)));
-			z = le16_to_cpu(get_unaligned((__le16 *) (data + 6)));
+			x = get_unaligned_le16(data + 1);
+			y = get_unaligned_le16(data + 3);
+			z = get_unaligned_le16(data + 6);
 
 			dv = (data[5] & 0x01) != 0 ? 1 : 0;
 			p = (data[5] & 0x02) != 0 ? 1 : 0;
@@ -613,8 +613,8 @@
 			(aiptek->curSetting.pointerMode)) {
 			aiptek->diagnostic = AIPTEK_DIAGNOSTIC_TOOL_DISALLOWED;
 		} else {
-			x = le16_to_cpu(get_unaligned((__le16 *) (data + 1)));
-			y = le16_to_cpu(get_unaligned((__le16 *) (data + 3)));
+			x = get_unaligned_le16(data + 1);
+			y = get_unaligned_le16(data + 3);
 
 			jitterable = data[5] & 0x1c;
 
@@ -679,7 +679,7 @@
 		pck = (data[1] & aiptek->curSetting.stylusButtonUpper) != 0 ? 1 : 0;
 
 		macro = dv && p && tip && !(data[3] & 1) ? (data[3] >> 1) : -1;
-		z = le16_to_cpu(get_unaligned((__le16 *) (data + 4)));
+		z = get_unaligned_le16(data + 4);
 
 		if (dv) {
 		        /* If the selected tool changed, reset the old
@@ -757,7 +757,7 @@
 	 * hat switches (which just so happen to be the macroKeys.)
 	 */
 	else if (data[0] == 6) {
-		macro = le16_to_cpu(get_unaligned((__le16 *) (data + 1)));
+		macro = get_unaligned_le16(data + 1);
 		if (macro > 0) {
 			input_report_key(inputdev, macroKeyEvents[macro - 1],
 					 0);
@@ -952,7 +952,7 @@
 		    buf[0], buf[1], buf[2]);
 		ret = -EIO;
 	} else {
-		ret = le16_to_cpu(get_unaligned((__le16 *) (buf + 1)));
+		ret = get_unaligned_le16(buf + 1);
 	}
 	kfree(buf);
 	return ret;
diff --git a/drivers/input/tablet/gtco.c b/drivers/input/tablet/gtco.c
index f66ca21..c5a8661 100644
--- a/drivers/input/tablet/gtco.c
+++ b/drivers/input/tablet/gtco.c
@@ -245,11 +245,11 @@
 			data = report[i];
 			break;
 		case 2:
-			data16 = le16_to_cpu(get_unaligned((__le16 *)&report[i]));
+			data16 = get_unaligned_le16(&report[i]);
 			break;
 		case 3:
 			size = 4;
-			data32 = le32_to_cpu(get_unaligned((__le32 *)&report[i]));
+			data32 = get_unaligned_le32(&report[i]);
 			break;
 		}
 
@@ -695,10 +695,10 @@
 			/*  Fall thru */
 		case 1:
 			/* All reports have X and Y coords in the same place */
-			val = le16_to_cpu(get_unaligned((__le16 *)&device->buffer[1]));
+			val = get_unaligned_le16(&device->buffer[1]);
 			input_report_abs(inputdev, ABS_X, val);
 
-			val = le16_to_cpu(get_unaligned((__le16 *)&device->buffer[3]));
+			val = get_unaligned_le16(&device->buffer[3]);
 			input_report_abs(inputdev, ABS_Y, val);
 
 			/* Ditto for proximity bit */
@@ -762,7 +762,7 @@
 				le_buffer[1]  = (u8)(device->buffer[4] >> 1);
 				le_buffer[1] |= (u8)((device->buffer[5] & 0x1) << 7);
 
-				val = le16_to_cpu(get_unaligned((__le16 *)le_buffer));
+				val = get_unaligned_le16(le_buffer);
 				input_report_abs(inputdev, ABS_Y, val);
 
 				/*
@@ -772,10 +772,10 @@
 				buttonbyte = device->buffer[5] >> 1;
 			} else {
 
-				val = le16_to_cpu(get_unaligned((__le16 *)&device->buffer[1]));
+				val = get_unaligned_le16(&device->buffer[1]);
 				input_report_abs(inputdev, ABS_X, val);
 
-				val = le16_to_cpu(get_unaligned((__le16 *)&device->buffer[3]));
+				val = get_unaligned_le16(&device->buffer[3]);
 				input_report_abs(inputdev, ABS_Y, val);
 
 				buttonbyte = device->buffer[5];
diff --git a/drivers/input/tablet/kbtab.c b/drivers/input/tablet/kbtab.c
index 1182fc1..f23f5a9 100644
--- a/drivers/input/tablet/kbtab.c
+++ b/drivers/input/tablet/kbtab.c
@@ -63,8 +63,8 @@
 		goto exit;
 	}
 
-	kbtab->x = le16_to_cpu(get_unaligned((__le16 *) &data[1]));
-	kbtab->y = le16_to_cpu(get_unaligned((__le16 *) &data[3]));
+	kbtab->x = get_unaligned_le16(&data[1]);
+	kbtab->y = get_unaligned_le16(&data[3]);
 
 	kbtab->pressure = (data[5]);
 
diff --git a/drivers/isdn/capi/kcapi_proc.c b/drivers/isdn/capi/kcapi_proc.c
index 845a797..c29208b 100644
--- a/drivers/isdn/capi/kcapi_proc.c
+++ b/drivers/isdn/capi/kcapi_proc.c
@@ -114,6 +114,7 @@
 }
 
 static const struct file_operations proc_controller_ops = {
+	.owner		= THIS_MODULE,
 	.open		= seq_controller_open,
 	.read		= seq_read,
 	.llseek		= seq_lseek,
@@ -121,6 +122,7 @@
 };
 
 static const struct file_operations proc_contrstats_ops = {
+	.owner		= THIS_MODULE,
 	.open		= seq_contrstats_open,
 	.read		= seq_read,
 	.llseek		= seq_lseek,
@@ -219,6 +221,7 @@
 }
 
 static const struct file_operations proc_applications_ops = {
+	.owner		= THIS_MODULE,
 	.open		= seq_applications_open,
 	.read		= seq_read,
 	.llseek		= seq_lseek,
@@ -226,21 +229,13 @@
 };
 
 static const struct file_operations proc_applstats_ops = {
+	.owner		= THIS_MODULE,
 	.open		= seq_applstats_open,
 	.read		= seq_read,
 	.llseek		= seq_lseek,
 	.release	= seq_release,
 };
 
-static void
-create_seq_entry(char *name, mode_t mode, const struct file_operations *f)
-{
-	struct proc_dir_entry *entry;
-	entry = create_proc_entry(name, mode, NULL);
-	if (entry)
-		entry->proc_fops = f;
-}
-
 // ---------------------------------------------------------------------------
 
 static void *capi_driver_start(struct seq_file *seq, loff_t *pos)
@@ -283,6 +278,7 @@
 }
 
 static const struct file_operations proc_driver_ops = {
+	.owner		= THIS_MODULE,
 	.open		= seq_capi_driver_open,
 	.read		= seq_read,
 	.llseek		= seq_lseek,
@@ -296,11 +292,11 @@
 {
 	proc_mkdir("capi",             NULL);
 	proc_mkdir("capi/controllers", NULL);
-	create_seq_entry("capi/controller",   0, &proc_controller_ops);
-	create_seq_entry("capi/contrstats",   0, &proc_contrstats_ops);
-	create_seq_entry("capi/applications", 0, &proc_applications_ops);
-	create_seq_entry("capi/applstats",    0, &proc_applstats_ops);
-	create_seq_entry("capi/driver",       0, &proc_driver_ops);
+	proc_create("capi/controller",   0, NULL, &proc_controller_ops);
+	proc_create("capi/contrstats",   0, NULL, &proc_contrstats_ops);
+	proc_create("capi/applications", 0, NULL, &proc_applications_ops);
+	proc_create("capi/applstats",    0, NULL, &proc_applstats_ops);
+	proc_create("capi/driver",       0, NULL, &proc_driver_ops);
 }
 
 void __exit
diff --git a/drivers/isdn/divert/divert_procfs.c b/drivers/isdn/divert/divert_procfs.c
index 4fd4c46..8b256a6 100644
--- a/drivers/isdn/divert/divert_procfs.c
+++ b/drivers/isdn/divert/divert_procfs.c
@@ -288,13 +288,12 @@
 	isdn_proc_entry = proc_mkdir("isdn", init_net.proc_net);
 	if (!isdn_proc_entry)
 		return (-1);
-	isdn_divert_entry = create_proc_entry("divert", S_IFREG | S_IRUGO, isdn_proc_entry);
+	isdn_divert_entry = proc_create("divert", S_IFREG | S_IRUGO,
+					isdn_proc_entry, &isdn_fops);
 	if (!isdn_divert_entry) {
 		remove_proc_entry("isdn", init_net.proc_net);
 		return (-1);
 	}
-	isdn_divert_entry->proc_fops = &isdn_fops; 
-	isdn_divert_entry->owner = THIS_MODULE; 
 #endif	/* CONFIG_PROC_FS */
 
 	return (0);
diff --git a/drivers/isdn/hardware/eicon/divasproc.c b/drivers/isdn/hardware/eicon/divasproc.c
index 0632a26..fae8958 100644
--- a/drivers/isdn/hardware/eicon/divasproc.c
+++ b/drivers/isdn/hardware/eicon/divasproc.c
@@ -125,15 +125,11 @@
 
 int create_divas_proc(void)
 {
-	divas_proc_entry = create_proc_entry(divas_proc_name,
-					     S_IFREG | S_IRUGO,
-					     proc_net_eicon);
+	proc_create(divas_proc_name, S_IFREG | S_IRUGO, proc_net_eicon,
+		    &divas_fops);
 	if (!divas_proc_entry)
 		return (0);
 
-	divas_proc_entry->proc_fops = &divas_fops;
-	divas_proc_entry->owner = THIS_MODULE;
-
 	return (1);
 }
 
diff --git a/drivers/isdn/hysdn/hysdn_procconf.c b/drivers/isdn/hysdn/hysdn_procconf.c
index 27d890b..877be99 100644
--- a/drivers/isdn/hysdn/hysdn_procconf.c
+++ b/drivers/isdn/hysdn/hysdn_procconf.c
@@ -370,6 +370,7 @@
 /******************************************************/
 static const struct file_operations conf_fops =
 {
+	.owner		= THIS_MODULE,
 	.llseek         = no_llseek,
 	.read           = hysdn_conf_read,
 	.write          = hysdn_conf_write,
@@ -402,11 +403,9 @@
 	while (card) {
 
 		sprintf(conf_name, "%s%d", PROC_CONF_BASENAME, card->myid);
-		if ((card->procconf = (void *) create_proc_entry(conf_name,
-					     S_IFREG | S_IRUGO | S_IWUSR,
-					    hysdn_proc_entry)) != NULL) {
-			((struct proc_dir_entry *) card->procconf)->proc_fops = &conf_fops;
-			((struct proc_dir_entry *) card->procconf)->owner = THIS_MODULE;
+		if ((card->procconf = (void *) proc_create(conf_name,
+						S_IFREG | S_IRUGO | S_IWUSR,
+						hysdn_proc_entry)) != NULL) {
 			hysdn_proclog_init(card);	/* init the log file entry */
 		}
 		card = card->next;	/* next entry */
diff --git a/drivers/isdn/hysdn/hysdn_proclog.c b/drivers/isdn/hysdn/hysdn_proclog.c
index 27b3991..8991d2c 100644
--- a/drivers/isdn/hysdn/hysdn_proclog.c
+++ b/drivers/isdn/hysdn/hysdn_proclog.c
@@ -380,6 +380,7 @@
 /**************************************************/
 static const struct file_operations log_fops =
 {
+	.owner		= THIS_MODULE,
 	.llseek         = no_llseek,
 	.read           = hysdn_log_read,
 	.write          = hysdn_log_write,
@@ -402,10 +403,9 @@
 
 	if ((pd = kzalloc(sizeof(struct procdata), GFP_KERNEL)) != NULL) {
 		sprintf(pd->log_name, "%s%d", PROC_LOG_BASENAME, card->myid);
-		if ((pd->log = create_proc_entry(pd->log_name, S_IFREG | S_IRUGO | S_IWUSR, hysdn_proc_entry)) != NULL) {
-		        pd->log->proc_fops = &log_fops; 
-		        pd->log->owner = THIS_MODULE;
-		}
+		pd->log = proc_create(pd->log_name,
+				S_IFREG | S_IRUGO | S_IWUSR, hysdn_proc_entry,
+				&log_fops);
 
 		init_waitqueue_head(&(pd->rd_queue));
 
diff --git a/drivers/leds/led-class.c b/drivers/leds/led-class.c
index ac05a92..b3c54be 100644
--- a/drivers/leds/led-class.c
+++ b/drivers/leds/led-class.c
@@ -105,7 +105,7 @@
 
 	led_cdev->dev = device_create(leds_class, parent, 0, "%s",
 					    led_cdev->name);
-	if (unlikely(IS_ERR(led_cdev->dev)))
+	if (IS_ERR(led_cdev->dev))
 		return PTR_ERR(led_cdev->dev);
 
 	dev_set_drvdata(led_cdev->dev, led_cdev);
diff --git a/drivers/mca/mca-legacy.c b/drivers/mca/mca-legacy.c
index 0c7bfa7..494f0c2 100644
--- a/drivers/mca/mca-legacy.c
+++ b/drivers/mca/mca-legacy.c
@@ -282,24 +282,6 @@
 EXPORT_SYMBOL(mca_set_adapter_name);
 
 /**
- *	mca_is_adapter_used - check if claimed by driver
- *	@slot:	slot to check
- *
- *	Returns 1 if the slot has been claimed by a driver
- */
-
-int mca_is_adapter_used(int slot)
-{
-	struct mca_device *mca_dev = mca_find_device_by_slot(slot);
-
-	if(!mca_dev)
-		return 0;
-
-	return mca_device_claimed(mca_dev);
-}
-EXPORT_SYMBOL(mca_is_adapter_used);
-
-/**
  *	mca_mark_as_used - claim an MCA device
  *	@slot:	slot to claim
  *	FIXME:  should we make this threadsafe
diff --git a/drivers/mca/mca-proc.c b/drivers/mca/mca-proc.c
index 33d5e08..81ea0d3 100644
--- a/drivers/mca/mca-proc.c
+++ b/drivers/mca/mca-proc.c
@@ -183,7 +183,7 @@
 	struct proc_dir_entry* node = NULL;
 	struct mca_device *mca_dev;
 
-	proc_mca = proc_mkdir("mca", &proc_root);
+	proc_mca = proc_mkdir("mca", NULL);
 	create_proc_read_entry("pos",0,proc_mca,get_mca_info,NULL);
 	create_proc_read_entry("machine",0,proc_mca,get_mca_machine_info,NULL);
 
diff --git a/drivers/md/dm-emc.c b/drivers/md/dm-emc.c
index 6b91b9a..3ea5ad4 100644
--- a/drivers/md/dm-emc.c
+++ b/drivers/md/dm-emc.c
@@ -110,8 +110,6 @@
 	memset(rq->sense, 0, SCSI_SENSE_BUFFERSIZE);
 	rq->sense_len = 0;
 
-	memset(&rq->cmd, 0, BLK_MAX_CDB);
-
 	rq->timeout = EMC_FAILOVER_TIMEOUT;
 	rq->cmd_type = REQ_TYPE_BLOCK_PC;
 	rq->cmd_flags |= REQ_FAILFAST | REQ_NOMERGE;
diff --git a/drivers/md/dm-mpath-hp-sw.c b/drivers/md/dm-mpath-hp-sw.c
index 204bf42..b63a0ab 100644
--- a/drivers/md/dm-mpath-hp-sw.c
+++ b/drivers/md/dm-mpath-hp-sw.c
@@ -137,7 +137,6 @@
 	req->sense = h->sense;
 	memset(req->sense, 0, SCSI_SENSE_BUFFERSIZE);
 
-	memset(&req->cmd, 0, BLK_MAX_CDB);
 	req->cmd[0] = START_STOP;
 	req->cmd[4] = 1;
 	req->cmd_len = COMMAND_SIZE(req->cmd[0]);
diff --git a/drivers/md/dm-mpath-rdac.c b/drivers/md/dm-mpath-rdac.c
index e04eb5c..95e7773 100644
--- a/drivers/md/dm-mpath-rdac.c
+++ b/drivers/md/dm-mpath-rdac.c
@@ -284,7 +284,6 @@
 		return NULL;
 	}
 
- 	memset(&rq->cmd, 0, BLK_MAX_CDB);
 	rq->sense = h->sense;
 	memset(rq->sense, 0, SCSI_SENSE_BUFFERSIZE);
 	rq->sense_len = 0;
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 51be533..73326e7 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -873,10 +873,13 @@
 	q->max_hw_sectors = t->limits.max_hw_sectors;
 	q->seg_boundary_mask = t->limits.seg_boundary_mask;
 	q->bounce_pfn = t->limits.bounce_pfn;
+	/* XXX: the below will probably go bug. must ensure there can be no
+	 * concurrency on queue_flags, and use the unlocked versions...
+	 */
 	if (t->limits.no_cluster)
-		q->queue_flags &= ~(1 << QUEUE_FLAG_CLUSTER);
+		queue_flag_clear(QUEUE_FLAG_CLUSTER, q);
 	else
-		q->queue_flags |= (1 << QUEUE_FLAG_CLUSTER);
+		queue_flag_set(QUEUE_FLAG_CLUSTER, q);
 
 }
 
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 87620b7..bb3e4b1 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -282,7 +282,8 @@
 		kfree(new);
 		return NULL;
 	}
-	set_bit(QUEUE_FLAG_CLUSTER, &new->queue->queue_flags);
+	/* Can be unlocked because the queue is new: no concurrency */
+	queue_flag_set_unlocked(QUEUE_FLAG_CLUSTER, new->queue);
 
 	blk_queue_make_request(new->queue, md_fail_request);
 
@@ -5947,13 +5948,9 @@
 
 static void md_geninit(void)
 {
-	struct proc_dir_entry *p;
-
 	dprintk("md: sizeof(mdp_super_t) = %d\n", (int)sizeof(mdp_super_t));
 
-	p = create_proc_entry("mdstat", S_IRUGO, NULL);
-	if (p)
-		p->proc_fops = &md_seq_fops;
+	proc_create("mdstat", S_IRUGO, NULL, &md_seq_fops);
 }
 
 static int __init md_init(void)
diff --git a/drivers/media/video/au0828/au0828-dvb.c b/drivers/media/video/au0828/au0828-dvb.c
index 5040d7f..1371b4e 100644
--- a/drivers/media/video/au0828/au0828-dvb.c
+++ b/drivers/media/video/au0828/au0828-dvb.c
@@ -119,7 +119,7 @@
 		purb->transfer_buffer = kzalloc(URB_BUFSIZE, GFP_KERNEL);
 		if (!purb->transfer_buffer) {
 			usb_free_urb(purb);
-			dev->urbs[i] = 0;
+			dev->urbs[i] = NULL;
 			goto err;
 		}
 
diff --git a/drivers/media/video/ivtv/ivtv-yuv.c b/drivers/media/video/ivtv/ivtv-yuv.c
index 393d917..62f70bd 100644
--- a/drivers/media/video/ivtv/ivtv-yuv.c
+++ b/drivers/media/video/ivtv/ivtv-yuv.c
@@ -1098,8 +1098,8 @@
 	ivtv_yuv_next_free(itv);
 
 	/* Copy V4L2 parameters to an ivtv_dma_frame struct... */
-	dma_args.y_source = 0L;
-	dma_args.uv_source = 0L;
+	dma_args.y_source = NULL;
+	dma_args.uv_source = NULL;
 	dma_args.src.left = 0;
 	dma_args.src.top = 0;
 	dma_args.src.width = yi->v4l2_src_w;
diff --git a/drivers/media/video/pvrusb2/pvrusb2-audio.c b/drivers/media/video/pvrusb2/pvrusb2-audio.c
index 9a7c8e9..8d859cc 100644
--- a/drivers/media/video/pvrusb2/pvrusb2-audio.c
+++ b/drivers/media/video/pvrusb2/pvrusb2-audio.c
@@ -75,7 +75,7 @@
 	pvr2_trace(PVR2_TRACE_CHIPS,"i2c msp3400 v4l2 set_stereo");
 
 	if ((sid < ARRAY_SIZE(routing_schemes)) &&
-	    ((sp = routing_schemes + sid) != 0) &&
+	    ((sp = routing_schemes + sid) != NULL) &&
 	    (hdw->input_val >= 0) &&
 	    (hdw->input_val < sp->cnt)) {
 		route.input = sp->def[hdw->input_val];
diff --git a/drivers/media/video/pvrusb2/pvrusb2-context.c b/drivers/media/video/pvrusb2/pvrusb2-context.c
index b5db6a5..73dcb1c 100644
--- a/drivers/media/video/pvrusb2/pvrusb2-context.c
+++ b/drivers/media/video/pvrusb2/pvrusb2-context.c
@@ -195,7 +195,7 @@
 int pvr2_context_global_init(void)
 {
 	pvr2_context_thread_ptr = kthread_run(pvr2_context_thread_func,
-					      0,
+					      NULL,
 					      "pvrusb2-context");
 	return (pvr2_context_thread_ptr ? 0 : -ENOMEM);
 }
diff --git a/drivers/media/video/pvrusb2/pvrusb2-cx2584x-v4l.c b/drivers/media/video/pvrusb2/pvrusb2-cx2584x-v4l.c
index 97350b0..29d5059 100644
--- a/drivers/media/video/pvrusb2/pvrusb2-cx2584x-v4l.c
+++ b/drivers/media/video/pvrusb2/pvrusb2-cx2584x-v4l.c
@@ -123,7 +123,7 @@
 	memset(&route,0,sizeof(route));
 
 	if ((sid < ARRAY_SIZE(routing_schemes)) &&
-	    ((sp = routing_schemes + sid) != 0) &&
+	    ((sp = routing_schemes + sid) != NULL) &&
 	    (hdw->input_val >= 0) &&
 	    (hdw->input_val < sp->cnt)) {
 		vid_input = sp->def[hdw->input_val].vid;
diff --git a/drivers/media/video/pvrusb2/pvrusb2-dvb.c b/drivers/media/video/pvrusb2/pvrusb2-dvb.c
index 2e64f98..6504c97 100644
--- a/drivers/media/video/pvrusb2/pvrusb2-dvb.c
+++ b/drivers/media/video/pvrusb2/pvrusb2-dvb.c
@@ -130,7 +130,7 @@
 		for (idx = 0; idx < PVR2_DVB_BUFFER_COUNT; idx++) {
 			if (!(adap->buffer_storage[idx])) continue;
 			kfree(adap->buffer_storage[idx]);
-			adap->buffer_storage[idx] = 0;
+			adap->buffer_storage[idx] = NULL;
 		}
 		adap->stream_run = 0;
 	}
@@ -142,7 +142,7 @@
 	unsigned int idx;
 	int ret;
 	struct pvr2_buffer *bp;
-	struct pvr2_stream *stream = 0;
+	struct pvr2_stream *stream = NULL;
 
 	if (adap->stream_run) return -EIO;
 
@@ -174,7 +174,7 @@
 	ret = pvr2_hdw_set_streaming(adap->channel.hdw, 1);
 	if (ret < 0) return ret;
 
-	while ((bp = pvr2_stream_get_idle_buffer(stream)) != 0) {
+	while ((bp = pvr2_stream_get_idle_buffer(stream)) != NULL) {
 		ret = pvr2_buffer_queue(bp);
 		if (ret < 0) return ret;
 	}
diff --git a/drivers/media/video/pvrusb2/pvrusb2-v4l2.c b/drivers/media/video/pvrusb2/pvrusb2-v4l2.c
index 087a182..e9b5d4e 100644
--- a/drivers/media/video/pvrusb2/pvrusb2-v4l2.c
+++ b/drivers/media/video/pvrusb2/pvrusb2-v4l2.c
@@ -1261,7 +1261,7 @@
  fail:
 	pvr2_trace(PVR2_TRACE_STRUCT,"Failure creating pvr2_v4l2 id=%p",vp);
 	pvr2_v4l2_destroy_no_lock(vp);
-	return 0;
+	return NULL;
 }
 
 /*
diff --git a/drivers/media/video/pvrusb2/pvrusb2-video-v4l.c b/drivers/media/video/pvrusb2/pvrusb2-video-v4l.c
index 7c47345..2433a31 100644
--- a/drivers/media/video/pvrusb2/pvrusb2-video-v4l.c
+++ b/drivers/media/video/pvrusb2/pvrusb2-video-v4l.c
@@ -81,7 +81,7 @@
 	pvr2_trace(PVR2_TRACE_CHIPS,"i2c v4l2 set_input(%d)",hdw->input_val);
 
 	if ((sid < ARRAY_SIZE(routing_schemes)) &&
-	    ((sp = routing_schemes + sid) != 0) &&
+	    ((sp = routing_schemes + sid) != NULL) &&
 	    (hdw->input_val >= 0) &&
 	    (hdw->input_val < sp->cnt)) {
 		route.input = sp->def[hdw->input_val];
diff --git a/drivers/media/video/v4l2-common.c b/drivers/media/video/v4l2-common.c
index 34deb68..7cc42c1 100644
--- a/drivers/media/video/v4l2-common.c
+++ b/drivers/media/video/v4l2-common.c
@@ -716,7 +716,7 @@
 	int err;
 
 	client = kzalloc(sizeof(struct i2c_client), GFP_KERNEL);
-	if (client == 0)
+	if (!client)
 		return -ENOMEM;
 
 	client->addr = address;
diff --git a/drivers/media/video/videobuf-core.c b/drivers/media/video/videobuf-core.c
index fc51e49..982f446 100644
--- a/drivers/media/video/videobuf-core.c
+++ b/drivers/media/video/videobuf-core.c
@@ -97,7 +97,10 @@
 void *videobuf_queue_to_vmalloc (struct videobuf_queue *q,
 			   struct videobuf_buffer *buf)
 {
-	return CALL(q, vmalloc, buf);
+	if (q->int_ops->vmalloc)
+		return q->int_ops->vmalloc(buf);
+	else
+		return NULL;
 }
 EXPORT_SYMBOL_GPL(videobuf_queue_to_vmalloc);
 
diff --git a/drivers/media/video/zoran_procfs.c b/drivers/media/video/zoran_procfs.c
index 328ed6e..870bc5a 100644
--- a/drivers/media/video/zoran_procfs.c
+++ b/drivers/media/video/zoran_procfs.c
@@ -180,6 +180,7 @@
 }
 
 static const struct file_operations zoran_operations = {
+	.owner		= THIS_MODULE,
 	.open		= zoran_open,
 	.read		= seq_read,
 	.write		= zoran_write,
@@ -195,10 +196,8 @@
 	char name[8];
 
 	snprintf(name, 7, "zoran%d", zr->id);
-	if ((zr->zoran_proc = create_proc_entry(name, 0, NULL))) {
-		zr->zoran_proc->data = zr;
-		zr->zoran_proc->owner = THIS_MODULE;
-		zr->zoran_proc->proc_fops = &zoran_operations;
+	zr->zoran_proc = proc_create_data(name, 0, NULL, &zoran_operations, zr);
+	if (zr->zoran_proc != NULL) {
 		dprintk(2,
 			KERN_INFO
 			"%s: procfs entry /proc/%s allocated. data=%p\n",
diff --git a/drivers/message/i2o/i2o_block.c b/drivers/message/i2o/i2o_block.c
index a953148..81483de 100644
--- a/drivers/message/i2o/i2o_block.c
+++ b/drivers/message/i2o/i2o_block.c
@@ -371,7 +371,7 @@
 	/* connect the i2o_block_request to the request */
 	if (!req->special) {
 		ireq = i2o_block_request_alloc();
-		if (unlikely(IS_ERR(ireq))) {
+		if (IS_ERR(ireq)) {
 			osm_debug("unable to allocate i2o_block_request!\n");
 			return BLKPREP_DEFER;
 		}
diff --git a/drivers/message/i2o/i2o_proc.c b/drivers/message/i2o/i2o_proc.c
index 6fdd072..54a3016 100644
--- a/drivers/message/i2o/i2o_proc.c
+++ b/drivers/message/i2o/i2o_proc.c
@@ -1893,13 +1893,11 @@
 	struct proc_dir_entry *tmp;
 
 	while (i2o_pe->name) {
-		tmp = create_proc_entry(i2o_pe->name, i2o_pe->mode, dir);
+		tmp = proc_create_data(i2o_pe->name, i2o_pe->mode, dir,
+				       i2o_pe->fops, data);
 		if (!tmp)
 			return -1;
 
-		tmp->data = data;
-		tmp->proc_fops = i2o_pe->fops;
-
 		i2o_pe++;
 	}
 
diff --git a/drivers/misc/hdpuftrs/hdpu_cpustate.c b/drivers/misc/hdpuftrs/hdpu_cpustate.c
index 302e924..ff51ab6 100644
--- a/drivers/misc/hdpuftrs/hdpu_cpustate.c
+++ b/drivers/misc/hdpuftrs/hdpu_cpustate.c
@@ -210,13 +210,10 @@
 		return ret;
 	}
 
-	proc_de = create_proc_entry("sky_cpustate", 0666, &proc_root);
+	proc_de = proc_create("sky_cpustate", 0666, NULL, &proc_cpustate);
 	if (!proc_de) {
 		printk(KERN_WARNING "sky_cpustate: "
 		       "Unable to create proc entry\n");
-	} else {
-		proc_de->proc_fops = &proc_cpustate;
-		proc_de->owner = THIS_MODULE;
 	}
 
 	printk(KERN_INFO "Sky CPU State Driver v" SKY_CPUSTATE_VERSION "\n");
diff --git a/drivers/misc/hdpuftrs/hdpu_nexus.c b/drivers/misc/hdpuftrs/hdpu_nexus.c
index 2fa36f7..08e26be 100644
--- a/drivers/misc/hdpuftrs/hdpu_nexus.c
+++ b/drivers/misc/hdpuftrs/hdpu_nexus.c
@@ -102,22 +102,17 @@
 		printk(KERN_ERR "sky_nexus: Could not map slot id\n");
 	}
 
-	hdpu_slot_id = create_proc_entry("sky_slot_id", 0666, &proc_root);
+	hdpu_slot_id = proc_create("sky_slot_id", 0666, NULL, &proc_slot_id);
 	if (!hdpu_slot_id) {
 		printk(KERN_WARNING "sky_nexus: "
 		       "Unable to create proc dir entry: sky_slot_id\n");
-	} else {
-		hdpu_slot_id->proc_fops = &proc_slot_id;
-		hdpu_slot_id->owner = THIS_MODULE;
 	}
 
-	hdpu_chassis_id = create_proc_entry("sky_chassis_id", 0666, &proc_root);
-	if (!hdpu_chassis_id) {
+	hdpu_chassis_id = proc_create("sky_chassis_id", 0666, NULL,
+				      &proc_chassis_id);
+	if (!hdpu_chassis_id)
 		printk(KERN_WARNING "sky_nexus: "
 		       "Unable to create proc dir entry: sky_chassis_id\n");
-	} else {
-		hdpu_chassis_id->proc_fops = &proc_chassis_id;
-		hdpu_chassis_id->owner = THIS_MODULE;
 	}
 
 	return 0;
@@ -128,8 +123,8 @@
 	slot_id = -1;
 	chassis_id = -1;
 
-	remove_proc_entry("sky_slot_id", &proc_root);
-	remove_proc_entry("sky_chassis_id", &proc_root);
+	remove_proc_entry("sky_slot_id", NULL);
+	remove_proc_entry("sky_chassis_id", NULL);
 
 	hdpu_slot_id = 0;
 	hdpu_chassis_id = 0;
diff --git a/drivers/misc/ibmasm/command.c b/drivers/misc/ibmasm/command.c
index 1a0e797..276d3fb 100644
--- a/drivers/misc/ibmasm/command.c
+++ b/drivers/misc/ibmasm/command.c
@@ -96,7 +96,7 @@
 {
 	char tsbuf[32];
 
-	dbg("%s:%d at %s\n", __FUNCTION__, __LINE__, get_timestamp(tsbuf));
+	dbg("%s:%d at %s\n", __func__, __LINE__, get_timestamp(tsbuf));
 
 	if (ibmasm_send_i2o_message(sp)) {
 		sp->current_command->status = IBMASM_CMD_FAILED;
@@ -119,7 +119,7 @@
 	unsigned long flags;
 	char tsbuf[32];
 
-	dbg("%s:%d at %s\n", __FUNCTION__, __LINE__, get_timestamp(tsbuf));
+	dbg("%s:%d at %s\n", __func__, __LINE__, get_timestamp(tsbuf));
 
 	spin_lock_irqsave(&sp->lock, flags);
 
@@ -139,7 +139,7 @@
 	unsigned long flags;
 	char tsbuf[32];
 
-	dbg("%s:%d at %s\n", __FUNCTION__, __LINE__, get_timestamp(tsbuf));
+	dbg("%s:%d at %s\n", __func__, __LINE__, get_timestamp(tsbuf));
 
 	spin_lock_irqsave(&sp->lock, flags);
 	sp->current_command = dequeue_command(sp);
diff --git a/drivers/misc/ibmasm/heartbeat.c b/drivers/misc/ibmasm/heartbeat.c
index 3036e78..1bc4306 100644
--- a/drivers/misc/ibmasm/heartbeat.c
+++ b/drivers/misc/ibmasm/heartbeat.c
@@ -75,9 +75,9 @@
 {
 	char tsbuf[32];
 
-	dbg("%s:%d at %s\n", __FUNCTION__, __LINE__, get_timestamp(tsbuf));
+	dbg("%s:%d at %s\n", __func__, __LINE__, get_timestamp(tsbuf));
 	ibmasm_wait_for_response(sp->heartbeat, IBMASM_CMD_TIMEOUT_NORMAL);
-	dbg("%s:%d at %s\n", __FUNCTION__, __LINE__, get_timestamp(tsbuf));
+	dbg("%s:%d at %s\n", __func__, __LINE__, get_timestamp(tsbuf));
 	suspend_heartbeats = 1;
 	command_put(sp->heartbeat);
 }
@@ -88,7 +88,7 @@
 	struct dot_command_header *header = (struct dot_command_header *)cmd->buffer;
 	char tsbuf[32];
 
-	dbg("%s:%d at %s\n", __FUNCTION__, __LINE__, get_timestamp(tsbuf));
+	dbg("%s:%d at %s\n", __func__, __LINE__, get_timestamp(tsbuf));
 	if (suspend_heartbeats)
 		return;
 
diff --git a/drivers/misc/intel_menlow.c b/drivers/misc/intel_menlow.c
index 0c0bb30..80a1363 100644
--- a/drivers/misc/intel_menlow.c
+++ b/drivers/misc/intel_menlow.c
@@ -175,19 +175,17 @@
 		goto end;
 	}
 
-	if (cdev) {
-		acpi_driver_data(device) = cdev;
-		result = sysfs_create_link(&device->dev.kobj,
-					&cdev->device.kobj, "thermal_cooling");
-		if (result)
-			goto unregister;
+	acpi_driver_data(device) = cdev;
+	result = sysfs_create_link(&device->dev.kobj,
+				&cdev->device.kobj, "thermal_cooling");
+	if (result)
+		goto unregister;
 
-		result = sysfs_create_link(&cdev->device.kobj,
-					&device->dev.kobj, "device");
-		if (result) {
-			sysfs_remove_link(&device->dev.kobj, "thermal_cooling");
-			goto unregister;
-		}
+	result = sysfs_create_link(&cdev->device.kobj,
+				&device->dev.kobj, "device");
+	if (result) {
+		sysfs_remove_link(&device->dev.kobj, "thermal_cooling");
+		goto unregister;
 	}
 
  end:
diff --git a/drivers/misc/ioc4.c b/drivers/misc/ioc4.c
index 05172d2..6f76573 100644
--- a/drivers/misc/ioc4.c
+++ b/drivers/misc/ioc4.c
@@ -75,7 +75,7 @@
 			printk(KERN_WARNING
 			       "%s: IOC4 submodule %s probe failed "
 			       "for pci_dev %s",
-			       __FUNCTION__, module_name(is->is_owner),
+			       __func__, module_name(is->is_owner),
 			       pci_name(idd->idd_pdev));
 		}
 	}
@@ -102,7 +102,7 @@
 			printk(KERN_WARNING
 			       "%s: IOC4 submodule %s remove failed "
 			       "for pci_dev %s.\n",
-			       __FUNCTION__, module_name(is->is_owner),
+			       __func__, module_name(is->is_owner),
 			       pci_name(idd->idd_pdev));
 		}
 	}
@@ -282,7 +282,7 @@
 	if ((ret = pci_enable_device(pdev))) {
 		printk(KERN_WARNING
 		       "%s: Failed to enable IOC4 device for pci_dev %s.\n",
-		       __FUNCTION__, pci_name(pdev));
+		       __func__, pci_name(pdev));
 		goto out;
 	}
 	pci_set_master(pdev);
@@ -292,7 +292,7 @@
 	if (!idd) {
 		printk(KERN_WARNING
 		       "%s: Failed to allocate IOC4 data for pci_dev %s.\n",
-		       __FUNCTION__, pci_name(pdev));
+		       __func__, pci_name(pdev));
 		ret = -ENODEV;
 		goto out_idd;
 	}
@@ -307,7 +307,7 @@
 		printk(KERN_WARNING
 		       "%s: Unable to find IOC4 misc resource "
 		       "for pci_dev %s.\n",
-		       __FUNCTION__, pci_name(idd->idd_pdev));
+		       __func__, pci_name(idd->idd_pdev));
 		ret = -ENODEV;
 		goto out_pci;
 	}
@@ -316,7 +316,7 @@
 		printk(KERN_WARNING
 		       "%s: Unable to request IOC4 misc region "
 		       "for pci_dev %s.\n",
-		       __FUNCTION__, pci_name(idd->idd_pdev));
+		       __func__, pci_name(idd->idd_pdev));
 		ret = -ENODEV;
 		goto out_pci;
 	}
@@ -326,7 +326,7 @@
 		printk(KERN_WARNING
 		       "%s: Unable to remap IOC4 misc region "
 		       "for pci_dev %s.\n",
-		       __FUNCTION__, pci_name(idd->idd_pdev));
+		       __func__, pci_name(idd->idd_pdev));
 		ret = -ENODEV;
 		goto out_misc_region;
 	}
@@ -372,7 +372,7 @@
 			printk(KERN_WARNING
 			       "%s: IOC4 submodule 0x%s probe failed "
 			       "for pci_dev %s.\n",
-			       __FUNCTION__, module_name(is->is_owner),
+			       __func__, module_name(is->is_owner),
 			       pci_name(idd->idd_pdev));
 		}
 	}
@@ -406,7 +406,7 @@
 			printk(KERN_WARNING
 			       "%s: IOC4 submodule 0x%s remove failed "
 			       "for pci_dev %s.\n",
-			       __FUNCTION__, module_name(is->is_owner),
+			       __func__, module_name(is->is_owner),
 			       pci_name(idd->idd_pdev));
 		}
 	}
@@ -418,7 +418,7 @@
 		printk(KERN_WARNING
 		       "%s: Unable to get IOC4 misc mapping for pci_dev %s. "
 		       "Device removal may be incomplete.\n",
-		       __FUNCTION__, pci_name(idd->idd_pdev));
+		       __func__, pci_name(idd->idd_pdev));
 	}
 	release_mem_region(idd->idd_bar0, sizeof(struct ioc4_misc_regs));
 
diff --git a/drivers/misc/phantom.c b/drivers/misc/phantom.c
index 7fa61e9..71d1c84 100644
--- a/drivers/misc/phantom.c
+++ b/drivers/misc/phantom.c
@@ -12,6 +12,7 @@
  *  or alternatively, you might use OpenHaptics provided by Sensable.
  */
 
+#include <linux/compat.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/device.h>
@@ -91,11 +92,8 @@
 	unsigned long flags;
 	unsigned int i;
 
-	if (_IOC_TYPE(cmd) != PH_IOC_MAGIC ||
-			_IOC_NR(cmd) > PH_IOC_MAXNR)
-		return -ENOTTY;
-
 	switch (cmd) {
+	case PHN_SETREG:
 	case PHN_SET_REG:
 		if (copy_from_user(&r, argp, sizeof(r)))
 			return -EFAULT;
@@ -126,6 +124,7 @@
 			phantom_status(dev, dev->status & ~PHB_RUNNING);
 		spin_unlock_irqrestore(&dev->regs_lock, flags);
 		break;
+	case PHN_SETREGS:
 	case PHN_SET_REGS:
 		if (copy_from_user(&rs, argp, sizeof(rs)))
 			return -EFAULT;
@@ -143,6 +142,7 @@
 		}
 		spin_unlock_irqrestore(&dev->regs_lock, flags);
 		break;
+	case PHN_GETREG:
 	case PHN_GET_REG:
 		if (copy_from_user(&r, argp, sizeof(r)))
 			return -EFAULT;
@@ -155,6 +155,7 @@
 		if (copy_to_user(argp, &r, sizeof(r)))
 			return -EFAULT;
 		break;
+	case PHN_GETREGS:
 	case PHN_GET_REGS: {
 		u32 m;
 
@@ -168,6 +169,7 @@
 		for (i = 0; i < m; i++)
 			if (rs.mask & BIT(i))
 				rs.values[i] = ioread32(dev->iaddr + i);
+		atomic_set(&dev->counter, 0);
 		spin_unlock_irqrestore(&dev->regs_lock, flags);
 
 		if (copy_to_user(argp, &rs, sizeof(rs)))
@@ -191,6 +193,20 @@
 	return 0;
 }
 
+#ifdef CONFIG_COMPAT
+static long phantom_compat_ioctl(struct file *filp, unsigned int cmd,
+		unsigned long arg)
+{
+	if (_IOC_NR(cmd) <= 3 && _IOC_SIZE(cmd) == sizeof(compat_uptr_t)) {
+		cmd &= ~(_IOC_SIZEMASK << _IOC_SIZESHIFT);
+		cmd |= sizeof(void *) << _IOC_SIZESHIFT;
+	}
+	return phantom_ioctl(filp, cmd, (unsigned long)compat_ptr(arg));
+}
+#else
+#define phantom_compat_ioctl NULL
+#endif
+
 static int phantom_open(struct inode *inode, struct file *file)
 {
 	struct phantom_device *dev = container_of(inode->i_cdev,
@@ -239,11 +255,12 @@
 
 	pr_debug("phantom_poll: %d\n", atomic_read(&dev->counter));
 	poll_wait(file, &dev->wait, wait);
-	if (atomic_read(&dev->counter)) {
+
+	if (!(dev->status & PHB_RUNNING))
+		mask = POLLERR;
+	else if (atomic_read(&dev->counter))
 		mask = POLLIN | POLLRDNORM;
-		atomic_dec(&dev->counter);
-	} else if ((dev->status & PHB_RUNNING) == 0)
-		mask = POLLIN | POLLRDNORM | POLLERR;
+
 	pr_debug("phantom_poll end: %x/%d\n", mask, atomic_read(&dev->counter));
 
 	return mask;
@@ -253,6 +270,7 @@
 	.open = phantom_open,
 	.release = phantom_release,
 	.unlocked_ioctl = phantom_ioctl,
+	.compat_ioctl = phantom_compat_ioctl,
 	.poll = phantom_poll,
 };
 
diff --git a/drivers/misc/sony-laptop.c b/drivers/misc/sony-laptop.c
index 02ff3d1..00e48e2 100644
--- a/drivers/misc/sony-laptop.c
+++ b/drivers/misc/sony-laptop.c
@@ -961,7 +961,7 @@
 		ret = acpi_callsetfunc(sony_nc_acpi_handle, *item->acpiset,
 				       item->value, NULL);
 		if (ret < 0) {
-			printk("%s: %d\n", __FUNCTION__, ret);
+			printk("%s: %d\n", __func__, ret);
 			break;
 		}
 	}
@@ -1453,7 +1453,7 @@
 		udelay(1);						\
 	if (!n)								\
 		dprintk("command failed at %s : %s (line %d)\n",	\
-				__FILE__, __FUNCTION__, __LINE__);	\
+				__FILE__, __func__, __LINE__);	\
 }
 
 static u8 sony_pic_call1(u8 dev)
diff --git a/drivers/mmc/host/mmc_spi.c b/drivers/mmc/host/mmc_spi.c
index 365024b8..3550858 100644
--- a/drivers/mmc/host/mmc_spi.c
+++ b/drivers/mmc/host/mmc_spi.c
@@ -340,7 +340,7 @@
 
 	/* SPI R3, R4, or R7 == R1 + 4 bytes */
 	case MMC_RSP_SPI_R3:
-		cmd->resp[1] = be32_to_cpu(get_unaligned((u32 *)cp));
+		cmd->resp[1] = get_unaligned_be32(cp);
 		break;
 
 	/* SPI R1 == just one status byte */
diff --git a/drivers/mtd/maps/plat-ram.c b/drivers/mtd/maps/plat-ram.c
index f0b10ca..3eb2643 100644
--- a/drivers/mtd/maps/plat-ram.c
+++ b/drivers/mtd/maps/plat-ram.c
@@ -209,7 +209,7 @@
 	/* probe for the right mtd map driver
 	 * supplied by the platform_data struct */
 
-	if (pdata->map_probes != 0) {
+	if (pdata->map_probes) {
 		const char **map_probes = pdata->map_probes;
 
 		for ( ; !info->mtd && *map_probes; map_probes++)
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 6e91b4b..6425603 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -3282,17 +3282,14 @@
 	struct net_device *bond_dev = bond->dev;
 
 	if (bond_proc_dir) {
-		bond->proc_entry = create_proc_entry(bond_dev->name,
-						     S_IRUGO,
-						     bond_proc_dir);
+		bond->proc_entry = proc_create_data(bond_dev->name,
+						    S_IRUGO, bond_proc_dir,
+						    &bond_info_fops, bond);
 		if (bond->proc_entry == NULL) {
 			printk(KERN_WARNING DRV_NAME
 			       ": Warning: Cannot create /proc/net/%s/%s\n",
 			       DRV_NAME, bond_dev->name);
 		} else {
-			bond->proc_entry->data = bond;
-			bond->proc_entry->proc_fops = &bond_info_fops;
-			bond->proc_entry->owner = THIS_MODULE;
 			memcpy(bond->proc_file_name, bond_dev->name, IFNAMSIZ);
 		}
 	}
diff --git a/drivers/net/e100.c b/drivers/net/e100.c
index 2d139ec..f3cba5e 100644
--- a/drivers/net/e100.c
+++ b/drivers/net/e100.c
@@ -1802,7 +1802,7 @@
 	 * it is protected by the before last buffer's el bit being set */
 	if (rx->prev->skb) {
 		struct rfd *prev_rfd = (struct rfd *)rx->prev->skb->data;
-		put_unaligned(cpu_to_le32(rx->dma_addr), &prev_rfd->link);
+		put_unaligned_le32(rx->dma_addr, &prev_rfd->link);
 	}
 
 	return 0;
diff --git a/drivers/net/hamachi.c b/drivers/net/hamachi.c
index b53f6b6..e5c2380 100644
--- a/drivers/net/hamachi.c
+++ b/drivers/net/hamachi.c
@@ -1508,7 +1508,7 @@
 					    hmp->rx_buf_sz,
 					    PCI_DMA_FROMDEVICE);
 		buf_addr = (u8 *) hmp->rx_skbuff[entry]->data;
-		frame_status = le32_to_cpu(get_unaligned((__le32*)&(buf_addr[data_size - 12])));
+		frame_status = get_unaligned_le32(&(buf_addr[data_size - 12]));
 		if (hamachi_debug > 4)
 			printk(KERN_DEBUG "  hamachi_rx() status was %8.8x.\n",
 				frame_status);
diff --git a/drivers/net/ibmveth.c b/drivers/net/ibmveth.c
index ce4fc2e..0052780 100644
--- a/drivers/net/ibmveth.c
+++ b/drivers/net/ibmveth.c
@@ -1302,13 +1302,10 @@
 	if (ibmveth_proc_dir) {
 		char u_addr[10];
 		sprintf(u_addr, "%x", adapter->vdev->unit_address);
-		entry = create_proc_entry(u_addr, S_IFREG, ibmveth_proc_dir);
-		if (!entry) {
+		entry = proc_create_data(u_addr, S_IFREG, ibmveth_proc_dir,
+					 &ibmveth_proc_fops, adapter);
+		if (!entry)
 			ibmveth_error_printk("Cannot create adapter proc entry");
-		} else {
-			entry->data = (void *) adapter;
-			entry->proc_fops = &ibmveth_proc_fops;
-		}
 	}
 	return;
 }
diff --git a/drivers/net/irda/mcs7780.c b/drivers/net/irda/mcs7780.c
index 93916cf..ad92d3f 100644
--- a/drivers/net/irda/mcs7780.c
+++ b/drivers/net/irda/mcs7780.c
@@ -464,7 +464,7 @@
 	}
 
 	fcs = ~(crc32_le(~0, buf, new_len));
-	if(fcs != le32_to_cpu(get_unaligned((__le32 *)(buf+new_len)))) {
+	if(fcs != get_unaligned_le32(buf + new_len)) {
 		IRDA_ERROR("crc error calc 0x%x len %d\n", fcs, new_len);
 		mcs->stats.rx_errors++;
 		mcs->stats.rx_crc_errors++;
diff --git a/drivers/net/irda/stir4200.c b/drivers/net/irda/stir4200.c
index e59c485..0519637 100644
--- a/drivers/net/irda/stir4200.c
+++ b/drivers/net/irda/stir4200.c
@@ -329,7 +329,7 @@
 	}
 
 	fcs = ~(crc32_le(~0, rx_buff->data, len));
-	if (fcs != le32_to_cpu(get_unaligned((__le32 *)(rx_buff->data+len)))) {
+	if (fcs != get_unaligned_le32(rx_buff->data + len)) {
 		pr_debug("crc error calc 0x%x len %d\n", fcs, len);
 		stir->stats.rx_errors++;
 		stir->stats.rx_crc_errors++;
diff --git a/drivers/net/irda/vlsi_ir.c b/drivers/net/irda/vlsi_ir.c
index acd082a..d15e00b 100644
--- a/drivers/net/irda/vlsi_ir.c
+++ b/drivers/net/irda/vlsi_ir.c
@@ -1674,13 +1674,12 @@
 	if (vlsi_proc_root != NULL) {
 		struct proc_dir_entry *ent;
 
-		ent = create_proc_entry(ndev->name, S_IFREG|S_IRUGO, vlsi_proc_root);
+		ent = proc_create_data(ndev->name, S_IFREG|S_IRUGO,
+				       vlsi_proc_root, VLSI_PROC_FOPS, ndev);
 		if (!ent) {
 			IRDA_WARNING("%s: failed to create proc entry\n",
 				     __FUNCTION__);
 		} else {
-			ent->data = ndev;
-			ent->proc_fops = VLSI_PROC_FOPS;
 			ent->size = 0;
 		}
 		idev->proc_entry = ent;
diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c
index cead81e..ef63c8d 100644
--- a/drivers/net/myri10ge/myri10ge.c
+++ b/drivers/net/myri10ge/myri10ge.c
@@ -2437,7 +2437,7 @@
 	int status;
 
 	segs = skb_gso_segment(skb, dev->features & ~NETIF_F_TSO6);
-	if (unlikely(IS_ERR(segs)))
+	if (IS_ERR(segs))
 		goto drop;
 
 	while (segs) {
diff --git a/drivers/net/pppoe.c b/drivers/net/pppoe.c
index 4fad4dd..58a26a4 100644
--- a/drivers/net/pppoe.c
+++ b/drivers/net/pppoe.c
@@ -1052,11 +1052,9 @@
 {
 	struct proc_dir_entry *p;
 
-	p = create_proc_entry("pppoe", S_IRUGO, init_net.proc_net);
+	p = proc_net_fops_create(&init_net, "pppoe", S_IRUGO, &pppoe_seq_fops);
 	if (!p)
 		return -ENOMEM;
-
-	p->proc_fops = &pppoe_seq_fops;
 	return 0;
 }
 #else /* CONFIG_PROC_FS */
diff --git a/drivers/net/pppol2tp.c b/drivers/net/pppol2tp.c
index 3d10ca0..244d783 100644
--- a/drivers/net/pppol2tp.c
+++ b/drivers/net/pppol2tp.c
@@ -2469,12 +2469,12 @@
 		goto out_unregister_pppol2tp_proto;
 
 #ifdef CONFIG_PROC_FS
-	pppol2tp_proc = create_proc_entry("pppol2tp", 0, init_net.proc_net);
+	pppol2tp_proc = proc_net_fops_create(&init_net, "pppol2tp", 0,
+					     &pppol2tp_proc_fops);
 	if (!pppol2tp_proc) {
 		err = -ENOMEM;
 		goto out_unregister_pppox_proto;
 	}
-	pppol2tp_proc->proc_fops = &pppol2tp_proc_fops;
 #endif /* CONFIG_PROC_FS */
 	printk(KERN_INFO "PPPoL2TP kernel driver, %s\n",
 	       PPPOL2TP_DRV_VERSION);
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index e3f74c9..b66c75e 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -4361,7 +4361,7 @@
 	}
 
 	segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
-	if (unlikely(IS_ERR(segs)))
+	if (IS_ERR(segs))
 		goto tg3_tso_bug_end;
 
 	do {
diff --git a/drivers/net/tulip/de4x5.c b/drivers/net/tulip/de4x5.c
index 6c6fc32..bc30c6e 100644
--- a/drivers/net/tulip/de4x5.c
+++ b/drivers/net/tulip/de4x5.c
@@ -482,7 +482,6 @@
 static char version[] __devinitdata = "de4x5.c:V0.546 2001/02/22 davies@maniac.ultranet.com\n";
 
 #define c_char const char
-#define TWIDDLE(a) (u_short)le16_to_cpu(get_unaligned((__le16 *)(a)))
 
 /*
 ** MII Information
@@ -4405,7 +4404,7 @@
 	}
     }
 
-    lp->infoleaf_offset = TWIDDLE(p+1);
+	lp->infoleaf_offset = get_unaligned_le16(p + 1);
 
     return 0;
 }
@@ -4476,7 +4475,7 @@
 
     while (count--) {
 	gep_wr(((lp->chipset==DC21140) && (lp->ibn!=5) ?
-		                                   *p++ : TWIDDLE(w++)), dev);
+		                                   *p++ : get_unaligned_le16(w++)), dev);
 	mdelay(2);                          /* 2ms per action */
     }
 
@@ -4711,10 +4710,10 @@
 	lp->active = *p++;
 	lp->phy[lp->active].gep = (*p ? p : NULL); p += (*p + 1);
 	lp->phy[lp->active].rst = (*p ? p : NULL); p += (*p + 1);
-	lp->phy[lp->active].mc  = TWIDDLE(p); p += 2;
-	lp->phy[lp->active].ana = TWIDDLE(p); p += 2;
-	lp->phy[lp->active].fdx = TWIDDLE(p); p += 2;
-	lp->phy[lp->active].ttm = TWIDDLE(p);
+	lp->phy[lp->active].mc  = get_unaligned_le16(p); p += 2;
+	lp->phy[lp->active].ana = get_unaligned_le16(p); p += 2;
+	lp->phy[lp->active].fdx = get_unaligned_le16(p); p += 2;
+	lp->phy[lp->active].ttm = get_unaligned_le16(p);
 	return 0;
     } else if ((lp->media == INIT) && (lp->timeout < 0)) {
         lp->ibn = 1;
@@ -4751,16 +4750,16 @@
 	lp->infoblock_media = (*p) & MEDIA_CODE;
 
         if ((*p++) & EXT_FIELD) {
-	    lp->cache.csr13 = TWIDDLE(p); p += 2;
-	    lp->cache.csr14 = TWIDDLE(p); p += 2;
-	    lp->cache.csr15 = TWIDDLE(p); p += 2;
+	    lp->cache.csr13 = get_unaligned_le16(p); p += 2;
+	    lp->cache.csr14 = get_unaligned_le16(p); p += 2;
+	    lp->cache.csr15 = get_unaligned_le16(p); p += 2;
 	} else {
 	    lp->cache.csr13 = CSR13;
 	    lp->cache.csr14 = CSR14;
 	    lp->cache.csr15 = CSR15;
 	}
-        lp->cache.gepc = ((s32)(TWIDDLE(p)) << 16); p += 2;
-        lp->cache.gep  = ((s32)(TWIDDLE(p)) << 16);
+        lp->cache.gepc = ((s32)(get_unaligned_le16(p)) << 16); p += 2;
+        lp->cache.gep  = ((s32)(get_unaligned_le16(p)) << 16);
 	lp->infoblock_csr6 = OMR_SIA;
 	lp->useMII = false;
 
@@ -4792,10 +4791,10 @@
 	if (MOTO_SROM_BUG) lp->active = 0;
 	lp->phy[lp->active].gep = (*p ? p : NULL); p += (2 * (*p) + 1);
 	lp->phy[lp->active].rst = (*p ? p : NULL); p += (2 * (*p) + 1);
-	lp->phy[lp->active].mc  = TWIDDLE(p); p += 2;
-	lp->phy[lp->active].ana = TWIDDLE(p); p += 2;
-	lp->phy[lp->active].fdx = TWIDDLE(p); p += 2;
-	lp->phy[lp->active].ttm = TWIDDLE(p); p += 2;
+	lp->phy[lp->active].mc  = get_unaligned_le16(p); p += 2;
+	lp->phy[lp->active].ana = get_unaligned_le16(p); p += 2;
+	lp->phy[lp->active].fdx = get_unaligned_le16(p); p += 2;
+	lp->phy[lp->active].ttm = get_unaligned_le16(p); p += 2;
 	lp->phy[lp->active].mci = *p;
 	return 0;
     } else if ((lp->media == INIT) && (lp->timeout < 0)) {
@@ -4835,8 +4834,8 @@
         lp->cache.csr13 = CSR13;              /* Hard coded defaults */
 	lp->cache.csr14 = CSR14;
 	lp->cache.csr15 = CSR15;
-        lp->cache.gepc = ((s32)(TWIDDLE(p)) << 16); p += 2;
-        lp->cache.gep  = ((s32)(TWIDDLE(p)) << 16); p += 2;
+        lp->cache.gepc = ((s32)(get_unaligned_le16(p)) << 16); p += 2;
+        lp->cache.gep  = ((s32)(get_unaligned_le16(p)) << 16); p += 2;
 	csr6 = *p++;
 	flags = *p++;
 
diff --git a/drivers/net/tulip/de4x5.h b/drivers/net/tulip/de4x5.h
index 9fb8d7f..f5f33b3 100644
--- a/drivers/net/tulip/de4x5.h
+++ b/drivers/net/tulip/de4x5.h
@@ -1017,4 +1017,4 @@
 #define DE4X5_SET_OMR           0x0d /* Set the OMR Register contents */
 #define DE4X5_GET_REG           0x0e /* Get the DE4X5 Registers */
 
-#define MOTO_SROM_BUG    ((lp->active == 8) && (((le32_to_cpu(get_unaligned(((__le32 *)dev->dev_addr))))&0x00ffffff)==0x3e0008))
+#define MOTO_SROM_BUG    (lp->active == 8 && (get_unaligned_le32(dev->dev_addr) & 0x00ffffff) == 0x3e0008)
diff --git a/drivers/net/tulip/tulip.h b/drivers/net/tulip/tulip.h
index 908422f..92c68a2 100644
--- a/drivers/net/tulip/tulip.h
+++ b/drivers/net/tulip/tulip.h
@@ -25,6 +25,7 @@
 #include <linux/pci.h>
 #include <asm/io.h>
 #include <asm/irq.h>
+#include <asm/unaligned.h>
 
 
 
@@ -304,11 +305,7 @@
 
 #define RUN_AT(x) (jiffies + (x))
 
-#if defined(__i386__)			/* AKA get_unaligned() */
-#define get_u16(ptr) (*(u16 *)(ptr))
-#else
-#define get_u16(ptr) (((u8*)(ptr))[0] + (((u8*)(ptr))[1]<<8))
-#endif
+#define get_u16(ptr) get_unaligned_le16((ptr))
 
 struct medialeaf {
 	u8 type;
diff --git a/drivers/net/tulip/tulip_core.c b/drivers/net/tulip/tulip_core.c
index fa1c1c3..f9d13fa 100644
--- a/drivers/net/tulip/tulip_core.c
+++ b/drivers/net/tulip/tulip_core.c
@@ -327,8 +327,8 @@
 	tp->dirty_rx = tp->dirty_tx = 0;
 
 	if (tp->flags & MC_HASH_ONLY) {
-		u32 addr_low = le32_to_cpu(get_unaligned((__le32 *)dev->dev_addr));
-		u32 addr_high = le16_to_cpu(get_unaligned((__le16 *)(dev->dev_addr+4)));
+		u32 addr_low = get_unaligned_le32(dev->dev_addr);
+		u32 addr_high = get_unaligned_le16(dev->dev_addr + 4);
 		if (tp->chip_id == AX88140) {
 			iowrite32(0, ioaddr + CSR13);
 			iowrite32(addr_low,  ioaddr + CSR14);
@@ -1437,13 +1437,13 @@
 			do
 				value = ioread32(ioaddr + CSR9);
 			while (value < 0  && --boguscnt > 0);
-			put_unaligned(cpu_to_le16(value), ((__le16*)dev->dev_addr) + i);
+			put_unaligned_le16(value, ((__le16 *)dev->dev_addr) + i);
 			sum += value & 0xffff;
 		}
 	} else if (chip_idx == COMET) {
 		/* No need to read the EEPROM. */
-		put_unaligned(cpu_to_le32(ioread32(ioaddr + 0xA4)), (__le32 *)dev->dev_addr);
-		put_unaligned(cpu_to_le16(ioread32(ioaddr + 0xA8)), (__le16 *)(dev->dev_addr + 4));
+		put_unaligned_le32(ioread32(ioaddr + 0xA4), dev->dev_addr);
+		put_unaligned_le16(ioread32(ioaddr + 0xA8), dev->dev_addr + 4);
 		for (i = 0; i < 6; i ++)
 			sum += dev->dev_addr[i];
 	} else {
diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c
index 932d6b1..45f47c1 100644
--- a/drivers/net/wireless/airo.c
+++ b/drivers/net/wireless/airo.c
@@ -3657,7 +3657,7 @@
 	ptr += hdrlen;
 	if (hdrlen == 24)
 		ptr += 6;
-	gap = le16_to_cpu(get_unaligned((__le16 *)ptr));
+	gap = get_unaligned_le16(ptr);
 	ptr += sizeof(__le16);
 	if (gap) {
 		if (gap <= 8)
@@ -4347,24 +4347,28 @@
 static int proc_wepkey_open( struct inode *inode, struct file *file );
 
 static const struct file_operations proc_statsdelta_ops = {
+	.owner		= THIS_MODULE,
 	.read		= proc_read,
 	.open		= proc_statsdelta_open,
 	.release	= proc_close
 };
 
 static const struct file_operations proc_stats_ops = {
+	.owner		= THIS_MODULE,
 	.read		= proc_read,
 	.open		= proc_stats_open,
 	.release	= proc_close
 };
 
 static const struct file_operations proc_status_ops = {
+	.owner		= THIS_MODULE,
 	.read		= proc_read,
 	.open		= proc_status_open,
 	.release	= proc_close
 };
 
 static const struct file_operations proc_SSID_ops = {
+	.owner		= THIS_MODULE,
 	.read		= proc_read,
 	.write		= proc_write,
 	.open		= proc_SSID_open,
@@ -4372,6 +4376,7 @@
 };
 
 static const struct file_operations proc_BSSList_ops = {
+	.owner		= THIS_MODULE,
 	.read		= proc_read,
 	.write		= proc_write,
 	.open		= proc_BSSList_open,
@@ -4379,6 +4384,7 @@
 };
 
 static const struct file_operations proc_APList_ops = {
+	.owner		= THIS_MODULE,
 	.read		= proc_read,
 	.write		= proc_write,
 	.open		= proc_APList_open,
@@ -4386,6 +4392,7 @@
 };
 
 static const struct file_operations proc_config_ops = {
+	.owner		= THIS_MODULE,
 	.read		= proc_read,
 	.write		= proc_write,
 	.open		= proc_config_open,
@@ -4393,6 +4400,7 @@
 };
 
 static const struct file_operations proc_wepkey_ops = {
+	.owner		= THIS_MODULE,
 	.read		= proc_read,
 	.write		= proc_write,
 	.open		= proc_wepkey_open,
@@ -4411,10 +4419,6 @@
 	void (*on_close) (struct inode *, struct file *);
 };
 
-#ifndef SETPROC_OPS
-#define SETPROC_OPS(entry, ops) (entry)->proc_fops = &(ops)
-#endif
-
 static int setup_proc_entry( struct net_device *dev,
 			     struct airo_info *apriv ) {
 	struct proc_dir_entry *entry;
@@ -4430,100 +4434,76 @@
 	apriv->proc_entry->owner = THIS_MODULE;
 
 	/* Setup the StatsDelta */
-	entry = create_proc_entry("StatsDelta",
-				  S_IFREG | (S_IRUGO&proc_perm),
-				  apriv->proc_entry);
+	entry = proc_create_data("StatsDelta",
+				 S_IFREG | (S_IRUGO&proc_perm),
+				 apriv->proc_entry, &proc_statsdelta_ops, dev);
 	if (!entry)
 		goto fail_stats_delta;
 	entry->uid = proc_uid;
 	entry->gid = proc_gid;
-	entry->data = dev;
-	entry->owner = THIS_MODULE;
-	SETPROC_OPS(entry, proc_statsdelta_ops);
 
 	/* Setup the Stats */
-	entry = create_proc_entry("Stats",
-				  S_IFREG | (S_IRUGO&proc_perm),
-				  apriv->proc_entry);
+	entry = proc_create_data("Stats",
+				 S_IFREG | (S_IRUGO&proc_perm),
+				 apriv->proc_entry, &proc_stats_ops, dev);
 	if (!entry)
 		goto fail_stats;
 	entry->uid = proc_uid;
 	entry->gid = proc_gid;
-	entry->data = dev;
-	entry->owner = THIS_MODULE;
-	SETPROC_OPS(entry, proc_stats_ops);
 
 	/* Setup the Status */
-	entry = create_proc_entry("Status",
-				  S_IFREG | (S_IRUGO&proc_perm),
-				  apriv->proc_entry);
+	entry = proc_create_data("Status",
+				 S_IFREG | (S_IRUGO&proc_perm),
+				 apriv->proc_entry, &proc_status_ops, dev);
 	if (!entry)
 		goto fail_status;
 	entry->uid = proc_uid;
 	entry->gid = proc_gid;
-	entry->data = dev;
-	entry->owner = THIS_MODULE;
-	SETPROC_OPS(entry, proc_status_ops);
 
 	/* Setup the Config */
-	entry = create_proc_entry("Config",
-				  S_IFREG | proc_perm,
-				  apriv->proc_entry);
+	entry = proc_create_data("Config",
+				 S_IFREG | proc_perm,
+				 apriv->proc_entry, &proc_config_ops, dev);
 	if (!entry)
 		goto fail_config;
 	entry->uid = proc_uid;
 	entry->gid = proc_gid;
-	entry->data = dev;
-	entry->owner = THIS_MODULE;
-	SETPROC_OPS(entry, proc_config_ops);
 
 	/* Setup the SSID */
-	entry = create_proc_entry("SSID",
-				  S_IFREG | proc_perm,
-				  apriv->proc_entry);
+	entry = proc_create_data("SSID",
+				 S_IFREG | proc_perm,
+				 apriv->proc_entry, &proc_SSID_ops, dev);
 	if (!entry)
 		goto fail_ssid;
 	entry->uid = proc_uid;
 	entry->gid = proc_gid;
-	entry->data = dev;
-	entry->owner = THIS_MODULE;
-	SETPROC_OPS(entry, proc_SSID_ops);
 
 	/* Setup the APList */
-	entry = create_proc_entry("APList",
-				  S_IFREG | proc_perm,
-				  apriv->proc_entry);
+	entry = proc_create_data("APList",
+				 S_IFREG | proc_perm,
+				 apriv->proc_entry, &proc_APList_ops, dev);
 	if (!entry)
 		goto fail_aplist;
 	entry->uid = proc_uid;
 	entry->gid = proc_gid;
-	entry->data = dev;
-	entry->owner = THIS_MODULE;
-	SETPROC_OPS(entry, proc_APList_ops);
 
 	/* Setup the BSSList */
-	entry = create_proc_entry("BSSList",
-				  S_IFREG | proc_perm,
-				  apriv->proc_entry);
+	entry = proc_create_data("BSSList",
+				 S_IFREG | proc_perm,
+				 apriv->proc_entry, &proc_BSSList_ops, dev);
 	if (!entry)
 		goto fail_bsslist;
 	entry->uid = proc_uid;
 	entry->gid = proc_gid;
-	entry->data = dev;
-	entry->owner = THIS_MODULE;
-	SETPROC_OPS(entry, proc_BSSList_ops);
 
 	/* Setup the WepKey */
-	entry = create_proc_entry("WepKey",
-				  S_IFREG | proc_perm,
-				  apriv->proc_entry);
+	entry = proc_create_data("WepKey",
+				 S_IFREG | proc_perm,
+				 apriv->proc_entry, &proc_wepkey_ops, dev);
 	if (!entry)
 		goto fail_wepkey;
 	entry->uid = proc_uid;
 	entry->gid = proc_gid;
-	entry->data = dev;
-	entry->owner = THIS_MODULE;
-	SETPROC_OPS(entry, proc_wepkey_ops);
 
 	return 0;
 
@@ -5625,9 +5605,9 @@
 	int have_isa_dev = 0;
 #endif
 
-	airo_entry = create_proc_entry("aironet",
+	airo_entry = create_proc_entry("driver/aironet",
 				       S_IFDIR | airo_perm,
-				       proc_root_driver);
+				       NULL);
 
 	if (airo_entry) {
 		airo_entry->uid = proc_uid;
@@ -5651,7 +5631,7 @@
 	airo_print_info("", "Finished probing for PCI adapters");
 
 	if (i) {
-		remove_proc_entry("aironet", proc_root_driver);
+		remove_proc_entry("driver/aironet", NULL);
 		return i;
 	}
 #endif
@@ -5673,7 +5653,7 @@
 #ifdef CONFIG_PCI
 	pci_unregister_driver(&airo_driver);
 #endif
-	remove_proc_entry("aironet", proc_root_driver);
+	remove_proc_entry("driver/aironet", NULL);
 }
 
 /*
diff --git a/drivers/net/wireless/ath5k/base.c b/drivers/net/wireless/ath5k/base.c
index e18305b..4e5c8fc 100644
--- a/drivers/net/wireless/ath5k/base.c
+++ b/drivers/net/wireless/ath5k/base.c
@@ -58,10 +58,6 @@
 #include "reg.h"
 #include "debug.h"
 
-/* unaligned little endian access */
-#define LE_READ_2(_p) (le16_to_cpu(get_unaligned((__le16 *)(_p))))
-#define LE_READ_4(_p) (le32_to_cpu(get_unaligned((__le32 *)(_p))))
-
 enum {
 	ATH_LED_TX,
 	ATH_LED_RX,
@@ -2909,9 +2905,9 @@
 			if (!mclist)
 				break;
 			/* calculate XOR of eight 6-bit values */
-			val = LE_READ_4(mclist->dmi_addr + 0);
+			val = get_unaligned_le32(mclist->dmi_addr + 0);
 			pos = (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val;
-			val = LE_READ_4(mclist->dmi_addr + 3);
+			val = get_unaligned_le32(mclist->dmi_addr + 3);
 			pos ^= (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val;
 			pos &= 0x3f;
 			mfilt[pos / 32] |= (1 << (pos % 32));
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
index 4bf8a99..8c24cd7 100644
--- a/drivers/net/wireless/b43/main.c
+++ b/drivers/net/wireless/b43/main.c
@@ -2171,7 +2171,7 @@
 				goto err_format;
 			array_size -= sizeof(iv->data.d32);
 
-			value = be32_to_cpu(get_unaligned(&iv->data.d32));
+			value = get_unaligned_be32(&iv->data.d32);
 			b43_write32(dev, offset, value);
 
 			iv = (const struct b43_iv *)((const uint8_t *)iv +
diff --git a/drivers/net/wireless/b43legacy/main.c b/drivers/net/wireless/b43legacy/main.c
index ef829ee..14a5eea 100644
--- a/drivers/net/wireless/b43legacy/main.c
+++ b/drivers/net/wireless/b43legacy/main.c
@@ -1720,7 +1720,7 @@
 				goto err_format;
 			array_size -= sizeof(iv->data.d32);
 
-			value = be32_to_cpu(get_unaligned(&iv->data.d32));
+			value = get_unaligned_be32(&iv->data.d32);
 			b43legacy_write32(dev, offset, value);
 
 			iv = (const struct b43legacy_iv *)((const uint8_t *)iv +
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.c b/drivers/net/wireless/iwlwifi/iwl-3945.c
index 598e4ee..d340683 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945.c
+++ b/drivers/net/wireless/iwlwifi/iwl-3945.c
@@ -554,40 +554,36 @@
 	iwl3945_rt->rt_hdr.it_pad = 0;
 
 	/* total header + data */
-	put_unaligned(cpu_to_le16(sizeof(*iwl3945_rt)),
-		      &iwl3945_rt->rt_hdr.it_len);
+	put_unaligned_le16(sizeof(*iwl3945_rt), &iwl3945_rt->rt_hdr.it_len);
 
 	/* Indicate all the fields we add to the radiotap header */
-	put_unaligned(cpu_to_le32((1 << IEEE80211_RADIOTAP_TSFT) |
-				  (1 << IEEE80211_RADIOTAP_FLAGS) |
-				  (1 << IEEE80211_RADIOTAP_RATE) |
-				  (1 << IEEE80211_RADIOTAP_CHANNEL) |
-				  (1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL) |
-				  (1 << IEEE80211_RADIOTAP_DBM_ANTNOISE) |
-				  (1 << IEEE80211_RADIOTAP_ANTENNA)),
-		      &iwl3945_rt->rt_hdr.it_present);
+	put_unaligned_le32((1 << IEEE80211_RADIOTAP_TSFT) |
+			   (1 << IEEE80211_RADIOTAP_FLAGS) |
+			   (1 << IEEE80211_RADIOTAP_RATE) |
+			   (1 << IEEE80211_RADIOTAP_CHANNEL) |
+			   (1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL) |
+			   (1 << IEEE80211_RADIOTAP_DBM_ANTNOISE) |
+			   (1 << IEEE80211_RADIOTAP_ANTENNA),
+			&iwl3945_rt->rt_hdr.it_present);
 
 	/* Zero the flags, we'll add to them as we go */
 	iwl3945_rt->rt_flags = 0;
 
-	put_unaligned(cpu_to_le64(tsf), &iwl3945_rt->rt_tsf);
+	put_unaligned_le64(tsf, &iwl3945_rt->rt_tsf);
 
 	iwl3945_rt->rt_dbmsignal = signal;
 	iwl3945_rt->rt_dbmnoise = noise;
 
 	/* Convert the channel frequency and set the flags */
-	put_unaligned(cpu_to_le16(stats->freq), &iwl3945_rt->rt_channelMHz);
+	put_unaligned_le16(stats->freq, &iwl3945_rt->rt_channelMHz);
 	if (!(phy_flags_hw & RX_RES_PHY_FLAGS_BAND_24_MSK))
-		put_unaligned(cpu_to_le16(IEEE80211_CHAN_OFDM |
-					  IEEE80211_CHAN_5GHZ),
+		put_unaligned_le16(IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ,
 			      &iwl3945_rt->rt_chbitmask);
 	else if (phy_flags_hw & RX_RES_PHY_FLAGS_MOD_CCK_MSK)
-		put_unaligned(cpu_to_le16(IEEE80211_CHAN_CCK |
-					  IEEE80211_CHAN_2GHZ),
+		put_unaligned_le16(IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ,
 			      &iwl3945_rt->rt_chbitmask);
 	else	/* 802.11g */
-		put_unaligned(cpu_to_le16(IEEE80211_CHAN_OFDM |
-					  IEEE80211_CHAN_2GHZ),
+		put_unaligned_le16(IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ,
 			      &iwl3945_rt->rt_chbitmask);
 
 	if (rate == -1)
diff --git a/drivers/net/wireless/libertas/scan.c b/drivers/net/wireless/libertas/scan.c
index e72c97a..1a409fc 100644
--- a/drivers/net/wireless/libertas/scan.c
+++ b/drivers/net/wireless/libertas/scan.c
@@ -522,7 +522,7 @@
 
 	if (*bytesleft >= sizeof(beaconsize)) {
 		/* Extract & convert beacon size from the command buffer */
-		beaconsize = le16_to_cpu(get_unaligned((__le16 *)*pbeaconinfo));
+		beaconsize = get_unaligned_le16(*pbeaconinfo);
 		*bytesleft -= sizeof(beaconsize);
 		*pbeaconinfo += sizeof(beaconsize);
 	}
diff --git a/drivers/net/wireless/zd1211rw/zd_usb.c b/drivers/net/wireless/zd1211rw/zd_usb.c
index e34675c..5316074 100644
--- a/drivers/net/wireless/zd1211rw/zd_usb.c
+++ b/drivers/net/wireless/zd1211rw/zd_usb.c
@@ -545,11 +545,11 @@
 	 * be padded. Unaligned access might also happen if the length_info
 	 * structure is not present.
 	 */
-	if (get_unaligned(&length_info->tag) == cpu_to_le16(RX_LENGTH_INFO_TAG))
+	if (get_unaligned_le16(&length_info->tag) == RX_LENGTH_INFO_TAG)
 	{
 		unsigned int l, k, n;
 		for (i = 0, l = 0;; i++) {
-			k = le16_to_cpu(get_unaligned(&length_info->length[i]));
+			k = get_unaligned_le16(&length_info->length[i]);
 			if (k == 0)
 				return;
 			n = l+k;
diff --git a/drivers/net/yellowfin.c b/drivers/net/yellowfin.c
index 2464072..57e1f49 100644
--- a/drivers/net/yellowfin.c
+++ b/drivers/net/yellowfin.c
@@ -1062,7 +1062,7 @@
 		buf_addr = rx_skb->data;
 		data_size = (le32_to_cpu(desc->dbdma_cmd) -
 			le32_to_cpu(desc->result_status)) & 0xffff;
-		frame_status = le16_to_cpu(get_unaligned((__le16*)&(buf_addr[data_size - 2])));
+		frame_status = get_unaligned_le16(&(buf_addr[data_size - 2]));
 		if (yellowfin_debug > 4)
 			printk(KERN_DEBUG "  yellowfin_rx() status was %4.4x.\n",
 				   frame_status);
diff --git a/drivers/nubus/proc.c b/drivers/nubus/proc.c
index e07492b..208dd12 100644
--- a/drivers/nubus/proc.c
+++ b/drivers/nubus/proc.c
@@ -21,6 +21,7 @@
 #include <linux/kernel.h>
 #include <linux/nubus.h>
 #include <linux/proc_fs.h>
+#include <linux/seq_file.h>
 #include <linux/init.h>
 #include <linux/module.h>
 
@@ -28,38 +29,36 @@
 #include <asm/byteorder.h>
 
 static int
-get_nubus_dev_info(char *buf, char **start, off_t pos, int count)
+nubus_devices_proc_show(struct seq_file *m, void *v)
 {
 	struct nubus_dev *dev = nubus_devices;
-	off_t at = 0;
-	int len, cnt;
 
-	cnt = 0;
-	while (dev && count > cnt) {
-		len = sprintf(buf, "%x\t%04x %04x %04x %04x",
+	while (dev) {
+		seq_printf(m, "%x\t%04x %04x %04x %04x",
 			      dev->board->slot,
 			      dev->category,
 			      dev->type,
 			      dev->dr_sw,
 			      dev->dr_hw);
-		len += sprintf(buf+len,
-			       "\t%08lx",
-			       dev->board->slot_addr);
-		buf[len++] = '\n';
-		at += len;
-		if (at >= pos) {
-			if (!*start) {
-				*start = buf + (pos - (at - len));
-				cnt = at - pos;
-			} else
-				cnt += len;
-			buf += len;
-		}
+		seq_printf(m, "\t%08lx\n", dev->board->slot_addr);
 		dev = dev->next;
 	}
-	return (count > cnt) ? cnt : count;
+	return 0;
 }
 
+static int nubus_devices_proc_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, nubus_devices_proc_show, NULL);
+}
+
+static const struct file_operations nubus_devices_proc_fops = {
+	.owner		= THIS_MODULE,
+	.open		= nubus_devices_proc_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+
 static struct proc_dir_entry *proc_bus_nubus_dir;
 
 static void nubus_proc_subdir(struct nubus_dev* dev,
@@ -171,8 +170,7 @@
 {
 	if (!MACH_IS_MAC)
 		return;
-	proc_bus_nubus_dir = proc_mkdir("nubus", proc_bus);
-	create_proc_info_entry("devices", 0, proc_bus_nubus_dir,
-				get_nubus_dev_info);
+	proc_bus_nubus_dir = proc_mkdir("bus/nubus", NULL);
+	proc_create("devices", 0, proc_bus_nubus_dir, &nubus_devices_proc_fops);
 	proc_bus_nubus_add_devices();
 }
diff --git a/drivers/parisc/ccio-dma.c b/drivers/parisc/ccio-dma.c
index 62db3c3..07d2a8d 100644
--- a/drivers/parisc/ccio-dma.c
+++ b/drivers/parisc/ccio-dma.c
@@ -1551,8 +1551,7 @@
 {
 	int i;
 	struct ioc *ioc, **ioc_p = &ioc_list;
-	struct proc_dir_entry *info_entry, *bitmap_entry;
-	
+
 	ioc = kzalloc(sizeof(struct ioc), GFP_KERNEL);
 	if (ioc == NULL) {
 		printk(KERN_ERR MODULE_NAME ": memory allocation failure\n");
@@ -1580,13 +1579,10 @@
 	HBA_DATA(dev->dev.platform_data)->iommu = ioc;
 	
 	if (ioc_count == 0) {
-		info_entry = create_proc_entry(MODULE_NAME, 0, proc_runway_root);
-		if (info_entry)
-			info_entry->proc_fops = &ccio_proc_info_fops;
-
-		bitmap_entry = create_proc_entry(MODULE_NAME"-bitmap", 0, proc_runway_root);
-		if (bitmap_entry)
-			bitmap_entry->proc_fops = &ccio_proc_bitmap_fops;
+		proc_create(MODULE_NAME, 0, proc_runway_root,
+			    &ccio_proc_info_fops);
+		proc_create(MODULE_NAME"-bitmap", 0, proc_runway_root,
+			    &ccio_proc_bitmap_fops);
 	}
 
 	ioc_count++;
diff --git a/drivers/parisc/sba_iommu.c b/drivers/parisc/sba_iommu.c
index 8c4d2c1..afc849b 100644
--- a/drivers/parisc/sba_iommu.c
+++ b/drivers/parisc/sba_iommu.c
@@ -1895,7 +1895,9 @@
 	int i;
 	char *version;
 	void __iomem *sba_addr = ioremap_nocache(dev->hpa.start, SBA_FUNC_SIZE);
-	struct proc_dir_entry *info_entry, *bitmap_entry, *root;
+#ifdef CONFIG_PROC_FS
+	struct proc_dir_entry *root;
+#endif
 
 	sba_dump_ranges(sba_addr);
 
@@ -1973,14 +1975,8 @@
 		break;
 	}
 
-	info_entry = create_proc_entry("sba_iommu", 0, root);
-	bitmap_entry = create_proc_entry("sba_iommu-bitmap", 0, root);
-
-	if (info_entry)
-		info_entry->proc_fops = &sba_proc_fops;
-
-	if (bitmap_entry)
-		bitmap_entry->proc_fops = &sba_proc_bitmap_fops;
+	proc_create("sba_iommu", 0, root, &sba_proc_fops);
+	proc_create("sba_iommu-bitmap", 0, root, &sba_proc_bitmap_fops);
 #endif
 
 	parisc_vmerge_boundary = IOVP_SIZE;
diff --git a/drivers/parport/parport_pc.c b/drivers/parport/parport_pc.c
index a858089..e71092e 100644
--- a/drivers/parport/parport_pc.c
+++ b/drivers/parport/parport_pc.c
@@ -3082,6 +3082,7 @@
 static int __init parport_pc_init_superio(int autoirq, int autodma) {return 0;}
 #endif /* CONFIG_PCI */
 
+#ifdef CONFIG_PNP
 
 static const struct pnp_device_id parport_pc_pnp_tbl[] = {
 	/* Standard LPT Printer Port */
@@ -3148,6 +3149,9 @@
 	.remove		= parport_pc_pnp_remove,
 };
 
+#else
+static struct pnp_driver parport_pc_pnp_driver;
+#endif /* CONFIG_PNP */
 
 static int __devinit parport_pc_platform_probe(struct platform_device *pdev)
 {
diff --git a/drivers/pci/pcie/aer/aerdrv_acpi.c b/drivers/pci/pcie/aer/aerdrv_acpi.c
index 96ac540..d39a78d 100644
--- a/drivers/pci/pcie/aer/aerdrv_acpi.c
+++ b/drivers/pci/pcie/aer/aerdrv_acpi.c
@@ -31,7 +31,7 @@
 {
 	acpi_status status = AE_NOT_FOUND;
 	struct pci_dev *pdev = pciedev->port;
-	acpi_handle handle = 0;
+	acpi_handle handle = NULL;
 
 	if (acpi_pci_disabled)
 		return -1;
diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c
index ef18fcd..963a976 100644
--- a/drivers/pci/proc.c
+++ b/drivers/pci/proc.c
@@ -293,6 +293,7 @@
 #endif /* HAVE_PCI_MMAP */
 
 static const struct file_operations proc_bus_pci_operations = {
+	.owner		= THIS_MODULE,
 	.llseek		= proc_bus_pci_lseek,
 	.read		= proc_bus_pci_read,
 	.write		= proc_bus_pci_write,
@@ -406,11 +407,10 @@
 	}
 
 	sprintf(name, "%02x.%x", PCI_SLOT(dev->devfn), PCI_FUNC(dev->devfn));
-	e = create_proc_entry(name, S_IFREG | S_IRUGO | S_IWUSR, bus->procdir);
+	e = proc_create_data(name, S_IFREG | S_IRUGO | S_IWUSR, bus->procdir,
+			     &proc_bus_pci_operations, dev);
 	if (!e)
 		return -ENOMEM;
-	e->proc_fops = &proc_bus_pci_operations;
-	e->data = dev;
 	e->size = dev->cfg_size;
 	dev->procent = e;
 
@@ -462,6 +462,7 @@
 	return seq_open(file, &proc_bus_pci_devices_op);
 }
 static const struct file_operations proc_bus_pci_dev_operations = {
+	.owner		= THIS_MODULE,
 	.open		= proc_bus_pci_dev_open,
 	.read		= seq_read,
 	.llseek		= seq_lseek,
@@ -470,12 +471,10 @@
 
 static int __init pci_proc_init(void)
 {
-	struct proc_dir_entry *entry;
 	struct pci_dev *dev = NULL;
-	proc_bus_pci_dir = proc_mkdir("pci", proc_bus);
-	entry = create_proc_entry("devices", 0, proc_bus_pci_dir);
-	if (entry)
-		entry->proc_fops = &proc_bus_pci_dev_operations;
+	proc_bus_pci_dir = proc_mkdir("bus/pci", NULL);
+	proc_create("devices", 0, proc_bus_pci_dir,
+		    &proc_bus_pci_dev_operations);
 	proc_initialized = 1;
 	while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) {
 		pci_proc_attach_device(dev);
diff --git a/drivers/pcmcia/Kconfig b/drivers/pcmcia/Kconfig
index 8d88526..1b0eb5a 100644
--- a/drivers/pcmcia/Kconfig
+++ b/drivers/pcmcia/Kconfig
@@ -38,7 +38,6 @@
 config PCMCIA
 	tristate "16-bit PCMCIA support"
 	select CRC32
-	select HAVE_IDE
 	default y
 	---help---
 	   This option enables support for 16-bit PCMCIA cards. Most older
diff --git a/drivers/pcmcia/cistpl.c b/drivers/pcmcia/cistpl.c
index 06a85d7..3637953 100644
--- a/drivers/pcmcia/cistpl.c
+++ b/drivers/pcmcia/cistpl.c
@@ -402,15 +402,6 @@
     
 ======================================================================*/
 
-static inline u16 cis_get_u16(void *ptr)
-{
-	return le16_to_cpu(get_unaligned((__le16 *) ptr));
-}
-static inline u32 cis_get_u32(void *ptr)
-{
-	return le32_to_cpu(get_unaligned((__le32 *) ptr));
-}
-
 typedef struct tuple_flags {
     u_int		link_space:4;
     u_int		has_link:1;
@@ -471,7 +462,7 @@
 	/* Get indirect link from the MFC tuple */
 	read_cis_cache(s, LINK_SPACE(tuple->Flags),
 		       tuple->LinkOffset, 5, link);
-	ofs = cis_get_u32(link + 1);
+	ofs = get_unaligned_le32(link + 1);
 	SPACE(tuple->Flags) = (link[0] == CISTPL_MFC_ATTR);
 	/* Move to the next indirect link */
 	tuple->LinkOffset += 5;
@@ -679,8 +670,8 @@
     if (tuple->TupleDataLen < 5)
 	return CS_BAD_TUPLE;
     p = (u_char *) tuple->TupleData;
-    csum->addr = tuple->CISOffset + cis_get_u16(p) - 2;
-    csum->len = cis_get_u16(p + 2);
+    csum->addr = tuple->CISOffset + get_unaligned_le16(p) - 2;
+    csum->len = get_unaligned_le16(p + 2);
     csum->sum = *(p + 4);
     return CS_SUCCESS;
 }
@@ -691,7 +682,7 @@
 {
     if (tuple->TupleDataLen < 4)
 	return CS_BAD_TUPLE;
-    link->addr = cis_get_u32(tuple->TupleData);
+    link->addr = get_unaligned_le32(tuple->TupleData);
     return CS_SUCCESS;
 }
 
@@ -710,7 +701,7 @@
 	return CS_BAD_TUPLE;
     for (i = 0; i < link->nfn; i++) {
 	link->fn[i].space = *p; p++;
-	link->fn[i].addr = cis_get_u32(p);
+	link->fn[i].addr = get_unaligned_le32(p);
 	p += 4;
     }
     return CS_SUCCESS;
@@ -800,8 +791,8 @@
 {
     if (tuple->TupleDataLen < 4)
 	return CS_BAD_TUPLE;
-    m->manf = cis_get_u16(tuple->TupleData);
-    m->card = cis_get_u16(tuple->TupleData + 2);
+    m->manf = get_unaligned_le16(tuple->TupleData);
+    m->card = get_unaligned_le16(tuple->TupleData + 2);
     return CS_SUCCESS;
 }
 
@@ -1100,7 +1091,7 @@
 	break;
     case 0x20:
 	entry->mem.nwin = 1;
-	entry->mem.win[0].len = cis_get_u16(p) << 8;
+	entry->mem.win[0].len = get_unaligned_le16(p) << 8;
 	entry->mem.win[0].card_addr = 0;
 	entry->mem.win[0].host_addr = 0;
 	p += 2;
@@ -1108,8 +1099,8 @@
 	break;
     case 0x40:
 	entry->mem.nwin = 1;
-	entry->mem.win[0].len = cis_get_u16(p) << 8;
-	entry->mem.win[0].card_addr = cis_get_u16(p + 2) << 8;
+	entry->mem.win[0].len = get_unaligned_le16(p) << 8;
+	entry->mem.win[0].card_addr = get_unaligned_le16(p + 2) << 8;
 	entry->mem.win[0].host_addr = 0;
 	p += 4;
 	if (p > q) return CS_BAD_TUPLE;
@@ -1146,7 +1137,7 @@
     p = (u_char *)tuple->TupleData;
     bar->attr = *p;
     p += 2;
-    bar->size = cis_get_u32(p);
+    bar->size = get_unaligned_le32(p);
     return CS_SUCCESS;
 }
 
@@ -1159,7 +1150,7 @@
 	return CS_BAD_TUPLE;
     config->last_idx = *(++p);
     p++;
-    config->base = cis_get_u32(p);
+    config->base = get_unaligned_le32(p);
     config->subtuples = tuple->TupleDataLen - 6;
     return CS_SUCCESS;
 }
@@ -1275,7 +1266,7 @@
 
     v2->vers = p[0];
     v2->comply = p[1];
-    v2->dindex = cis_get_u16(p +2 );
+    v2->dindex = get_unaligned_le16(p +2 );
     v2->vspec8 = p[6];
     v2->vspec9 = p[7];
     v2->nhdr = p[8];
@@ -1316,8 +1307,8 @@
 
     fmt->type = p[0];
     fmt->edc = p[1];
-    fmt->offset = cis_get_u32(p + 2);
-    fmt->length = cis_get_u32(p + 6);
+    fmt->offset = get_unaligned_le32(p + 2);
+    fmt->length = get_unaligned_le32(p + 6);
 
     return CS_SUCCESS;
 }
diff --git a/drivers/pnp/isapnp/proc.c b/drivers/pnp/isapnp/proc.c
index 2b8266c..3f94eda 100644
--- a/drivers/pnp/isapnp/proc.c
+++ b/drivers/pnp/isapnp/proc.c
@@ -85,6 +85,7 @@
 }
 
 static const struct file_operations isapnp_proc_bus_file_operations = {
+	.owner	= THIS_MODULE,
 	.llseek = isapnp_proc_bus_lseek,
 	.read = isapnp_proc_bus_read,
 };
@@ -102,12 +103,10 @@
 			return -ENOMEM;
 	}
 	sprintf(name, "%02x", dev->number);
-	e = dev->procent = create_proc_entry(name, S_IFREG | S_IRUGO, de);
+	e = dev->procent = proc_create_data(name, S_IFREG | S_IRUGO, de,
+			&isapnp_proc_bus_file_operations, dev);
 	if (!e)
 		return -ENOMEM;
-	e->proc_fops = &isapnp_proc_bus_file_operations;
-	e->owner = THIS_MODULE;
-	e->data = dev;
 	e->size = 256;
 	return 0;
 }
@@ -116,7 +115,7 @@
 {
 	struct pnp_dev *dev;
 
-	isapnp_proc_bus_dir = proc_mkdir("isapnp", proc_bus);
+	isapnp_proc_bus_dir = proc_mkdir("bus/isapnp", NULL);
 	protocol_for_each_dev(&isapnp_protocol, dev) {
 		isapnp_proc_attach_device(dev);
 	}
diff --git a/drivers/pnp/pnpbios/proc.c b/drivers/pnp/pnpbios/proc.c
index bb19bc9..46d506f 100644
--- a/drivers/pnp/pnpbios/proc.c
+++ b/drivers/pnp/pnpbios/proc.c
@@ -256,7 +256,7 @@
  */
 int __init pnpbios_proc_init(void)
 {
-	proc_pnp = proc_mkdir("pnp", proc_bus);
+	proc_pnp = proc_mkdir("bus/pnp", NULL);
 	if (!proc_pnp)
 		return -EIO;
 	proc_pnp_boot = proc_mkdir("boot", proc_pnp);
@@ -294,5 +294,5 @@
 	remove_proc_entry("configuration_info", proc_pnp);
 	remove_proc_entry("devices", proc_pnp);
 	remove_proc_entry("boot", proc_pnp);
-	remove_proc_entry("pnp", proc_bus);
+	remove_proc_entry("bus/pnp", NULL);
 }
diff --git a/drivers/power/ds2760_battery.c b/drivers/power/ds2760_battery.c
index bdb9b72..71be36f 100644
--- a/drivers/power/ds2760_battery.c
+++ b/drivers/power/ds2760_battery.c
@@ -262,7 +262,7 @@
 		struct ds2760_device_info, monitor_work.work);
 	const int interval = HZ * 60;
 
-	dev_dbg(di->dev, "%s\n", __FUNCTION__);
+	dev_dbg(di->dev, "%s\n", __func__);
 
 	ds2760_battery_update_status(di);
 	queue_delayed_work(di->monitor_wqueue, &di->monitor_work, interval);
@@ -275,7 +275,7 @@
 {
 	struct ds2760_device_info *di = to_ds2760_device_info(psy);
 
-	dev_dbg(di->dev, "%s\n", __FUNCTION__);
+	dev_dbg(di->dev, "%s\n", __func__);
 
 	cancel_delayed_work(&di->monitor_work);
 	queue_delayed_work(di->monitor_wqueue, &di->monitor_work, HZ/10);
diff --git a/drivers/power/olpc_battery.c b/drivers/power/olpc_battery.c
index af7a231..ab1e828 100644
--- a/drivers/power/olpc_battery.c
+++ b/drivers/power/olpc_battery.c
@@ -315,7 +315,6 @@
 	if (ret)
 		goto battery_failed;
 
-	olpc_register_battery_callback(&olpc_battery_trigger_uevent);
 	goto success;
 
 battery_failed:
@@ -328,7 +327,6 @@
 
 static void __exit olpc_bat_exit(void)
 {
-	olpc_deregister_battery_callback();
 	power_supply_unregister(&olpc_bat);
 	power_supply_unregister(&olpc_ac);
 	platform_device_unregister(bat_pdev);
diff --git a/drivers/power/power_supply_core.c b/drivers/power/power_supply_core.c
index 03d6a38..138dd76 100644
--- a/drivers/power/power_supply_core.c
+++ b/drivers/power/power_supply_core.c
@@ -39,7 +39,7 @@
 	struct power_supply *psy = container_of(work, struct power_supply,
 						changed_work);
 
-	dev_dbg(psy->dev, "%s\n", __FUNCTION__);
+	dev_dbg(psy->dev, "%s\n", __func__);
 
 	class_for_each_device(power_supply_class, psy,
 			      __power_supply_changed_work);
@@ -51,7 +51,7 @@
 
 void power_supply_changed(struct power_supply *psy)
 {
-	dev_dbg(psy->dev, "%s\n", __FUNCTION__);
+	dev_dbg(psy->dev, "%s\n", __func__);
 
 	schedule_work(&psy->changed_work);
 }
@@ -82,7 +82,7 @@
 	error = class_for_each_device(power_supply_class, psy,
 				      __power_supply_am_i_supplied);
 
-	dev_dbg(psy->dev, "%s %d\n", __FUNCTION__, error);
+	dev_dbg(psy->dev, "%s %d\n", __func__, error);
 
 	return error;
 }
diff --git a/drivers/power/power_supply_leds.c b/drivers/power/power_supply_leds.c
index fa3034f..2dece40 100644
--- a/drivers/power/power_supply_leds.c
+++ b/drivers/power/power_supply_leds.c
@@ -24,7 +24,7 @@
 	if (psy->get_property(psy, POWER_SUPPLY_PROP_STATUS, &status))
 		return;
 
-	dev_dbg(psy->dev, "%s %d\n", __FUNCTION__, status.intval);
+	dev_dbg(psy->dev, "%s %d\n", __func__, status.intval);
 
 	switch (status.intval) {
 	case POWER_SUPPLY_STATUS_FULL:
@@ -101,7 +101,7 @@
 	if (psy->get_property(psy, POWER_SUPPLY_PROP_ONLINE, &online))
 		return;
 
-	dev_dbg(psy->dev, "%s %d\n", __FUNCTION__, online.intval);
+	dev_dbg(psy->dev, "%s %d\n", __func__, online.intval);
 
 	if (online.intval)
 		led_trigger_event(psy->online_trig, LED_FULL);
diff --git a/drivers/rtc/rtc-bfin.c b/drivers/rtc/rtc-bfin.c
index 4f28045..8624f55 100644
--- a/drivers/rtc/rtc-bfin.c
+++ b/drivers/rtc/rtc-bfin.c
@@ -419,7 +419,7 @@
 		return -ENOMEM;
 
 	rtc->rtc_dev = rtc_device_register(pdev->name, &pdev->dev, &bfin_rtc_ops, THIS_MODULE);
-	if (unlikely(IS_ERR(rtc))) {
+	if (IS_ERR(rtc)) {
 		ret = PTR_ERR(rtc->rtc_dev);
 		goto err;
 	}
diff --git a/drivers/rtc/rtc-proc.c b/drivers/rtc/rtc-proc.c
index 8d300e6..0c6257a 100644
--- a/drivers/rtc/rtc-proc.c
+++ b/drivers/rtc/rtc-proc.c
@@ -108,12 +108,10 @@
 	if (rtc->id == 0) {
 		struct proc_dir_entry *ent;
 
-		ent = create_proc_entry("driver/rtc", 0, NULL);
-		if (ent) {
-			ent->proc_fops = &rtc_proc_fops;
+		ent = proc_create_data("driver/rtc", 0, NULL,
+				       &rtc_proc_fops, rtc);
+		if (ent)
 			ent->owner = rtc->owner;
-			ent->data = rtc;
-		}
 	}
 }
 
diff --git a/drivers/s390/block/dasd_proc.c b/drivers/s390/block/dasd_proc.c
index 556063e..03c0e40 100644
--- a/drivers/s390/block/dasd_proc.c
+++ b/drivers/s390/block/dasd_proc.c
@@ -157,6 +157,7 @@
 }
 
 static const struct file_operations dasd_devices_file_ops = {
+	.owner		= THIS_MODULE,
 	.open		= dasd_devices_open,
 	.read		= seq_read,
 	.llseek		= seq_lseek,
@@ -311,17 +312,16 @@
 int
 dasd_proc_init(void)
 {
-	dasd_proc_root_entry = proc_mkdir("dasd", &proc_root);
+	dasd_proc_root_entry = proc_mkdir("dasd", NULL);
 	if (!dasd_proc_root_entry)
 		goto out_nodasd;
 	dasd_proc_root_entry->owner = THIS_MODULE;
-	dasd_devices_entry = create_proc_entry("devices",
-					       S_IFREG | S_IRUGO | S_IWUSR,
-					       dasd_proc_root_entry);
+	dasd_devices_entry = proc_create("devices",
+					 S_IFREG | S_IRUGO | S_IWUSR,
+					 dasd_proc_root_entry,
+					 &dasd_devices_file_ops);
 	if (!dasd_devices_entry)
 		goto out_nodevices;
-	dasd_devices_entry->proc_fops = &dasd_devices_file_ops;
-	dasd_devices_entry->owner = THIS_MODULE;
 	dasd_statistics_entry = create_proc_entry("statistics",
 						  S_IFREG | S_IRUGO | S_IWUSR,
 						  dasd_proc_root_entry);
@@ -335,7 +335,7 @@
  out_nostatistics:
 	remove_proc_entry("devices", dasd_proc_root_entry);
  out_nodevices:
-	remove_proc_entry("dasd", &proc_root);
+	remove_proc_entry("dasd", NULL);
  out_nodasd:
 	return -ENOENT;
 }
@@ -345,5 +345,5 @@
 {
 	remove_proc_entry("devices", dasd_proc_root_entry);
 	remove_proc_entry("statistics", dasd_proc_root_entry);
-	remove_proc_entry("dasd", &proc_root);
+	remove_proc_entry("dasd", NULL);
 }
diff --git a/drivers/s390/char/tape_proc.c b/drivers/s390/char/tape_proc.c
index c9b96d5..e7c888c 100644
--- a/drivers/s390/char/tape_proc.c
+++ b/drivers/s390/char/tape_proc.c
@@ -111,6 +111,7 @@
 
 static const struct file_operations tape_proc_ops =
 {
+	.owner		= THIS_MODULE,
 	.open		= tape_proc_open,
 	.read		= seq_read,
 	.llseek		= seq_lseek,
@@ -124,14 +125,12 @@
 tape_proc_init(void)
 {
 	tape_proc_devices =
-		create_proc_entry ("tapedevices", S_IFREG | S_IRUGO | S_IWUSR,
-				   &proc_root);
+		proc_create("tapedevices", S_IFREG | S_IRUGO | S_IWUSR, NULL,
+			    &tape_proc_ops);
 	if (tape_proc_devices == NULL) {
 		PRINT_WARN("tape: Cannot register procfs entry tapedevices\n");
 		return;
 	}
-	tape_proc_devices->proc_fops = &tape_proc_ops;
-	tape_proc_devices->owner = THIS_MODULE;
 }
 
 /*
@@ -141,5 +140,5 @@
 tape_proc_cleanup(void)
 {
 	if (tape_proc_devices != NULL)
-		remove_proc_entry ("tapedevices", &proc_root);
+		remove_proc_entry ("tapedevices", NULL);
 }
diff --git a/drivers/s390/cio/blacklist.c b/drivers/s390/cio/blacklist.c
index e8597ec..40ef948 100644
--- a/drivers/s390/cio/blacklist.c
+++ b/drivers/s390/cio/blacklist.c
@@ -374,13 +374,10 @@
 {
 	struct proc_dir_entry *entry;
 
-	entry = create_proc_entry ("cio_ignore", S_IFREG | S_IRUGO | S_IWUSR,
-				   &proc_root);
+	entry = proc_create("cio_ignore", S_IFREG | S_IRUGO | S_IWUSR, NULL,
+			    &cio_ignore_proc_fops);
 	if (!entry)
 		return -ENOENT;
-
-	entry->proc_fops = &cio_ignore_proc_fops;
-
 	return 0;
 }
 
diff --git a/drivers/s390/cio/qdio.c b/drivers/s390/cio/qdio.c
index 10aa1e7..43876e2 100644
--- a/drivers/s390/cio/qdio.c
+++ b/drivers/s390/cio/qdio.c
@@ -3632,7 +3632,7 @@
 {
         proc_perf_file_registration=0;
 	qdio_perf_proc_file=create_proc_entry(QDIO_PERF,
-					      S_IFREG|0444,&proc_root);
+					      S_IFREG|0444,NULL);
 	if (qdio_perf_proc_file) {
 		qdio_perf_proc_file->read_proc=&qdio_perf_procfile_read;
 	} else proc_perf_file_registration=-1;
@@ -3647,7 +3647,7 @@
 qdio_remove_procfs_entry(void)
 {
         if (!proc_perf_file_registration) /* means if it went ok earlier */
-		remove_proc_entry(QDIO_PERF,&proc_root);
+		remove_proc_entry(QDIO_PERF,NULL);
 }
 
 /**
diff --git a/drivers/scsi/aha152x.c b/drivers/scsi/aha152x.c
index a09b2d3..f5215fd 100644
--- a/drivers/scsi/aha152x.c
+++ b/drivers/scsi/aha152x.c
@@ -994,13 +994,13 @@
 	SCpnt->SCp.sent_command	= 0;
 
 	if(SCpnt->SCp.phase & (resetting|check_condition)) {
-		if(SCpnt->host_scribble==0 || SCSEM(SCpnt) || SCNEXT(SCpnt)) {
+		if (!SCpnt->host_scribble || SCSEM(SCpnt) || SCNEXT(SCpnt)) {
 			printk(ERR_LEAD "cannot reuse command\n", CMDINFO(SCpnt));
 			return FAILED;
 		}
 	} else {
 		SCpnt->host_scribble = kmalloc(sizeof(struct aha152x_scdata), GFP_ATOMIC);
-		if(SCpnt->host_scribble==0) {
+		if(!SCpnt->host_scribble) {
 			printk(ERR_LEAD "allocation failed\n", CMDINFO(SCpnt));
 			return FAILED;
 		}
@@ -1162,7 +1162,7 @@
 	}
 
 	DO_LOCK(flags);
-	issued       = remove_SC(&ISSUE_SC, SCpnt)==0;
+	issued       = remove_SC(&ISSUE_SC, SCpnt) == NULL;
 	disconnected = issued && remove_SC(&DISCONNECTED_SC, SCpnt);
 	DO_UNLOCK(flags);
 
diff --git a/drivers/scsi/aic7xxx/aic7770_osm.c b/drivers/scsi/aic7xxx/aic7770_osm.c
index 1ac1197..f220e5e 100644
--- a/drivers/scsi/aic7xxx/aic7770_osm.c
+++ b/drivers/scsi/aic7xxx/aic7770_osm.c
@@ -50,7 +50,7 @@
 	/*
 	 * Lock out other contenders for our i/o space.
 	 */
-	if (request_region(port, AHC_EISA_IOSIZE, "aic7xxx") == 0)
+	if (!request_region(port, AHC_EISA_IOSIZE, "aic7xxx"))
 		return (ENOMEM);
 	ahc->tag = BUS_SPACE_PIO;
 	ahc->bsh.ioport = port;
diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c b/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c
index 0d7628f..00f5b98 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c
@@ -352,7 +352,7 @@
 	*base = pci_resource_start(ahc->dev_softc, 0);
 	if (*base == 0)
 		return (ENOMEM);
-	if (request_region(*base, 256, "aic7xxx") == 0)
+	if (!request_region(*base, 256, "aic7xxx"))
 		return (ENOMEM);
 	return (0);
 }
@@ -369,7 +369,7 @@
 	start = pci_resource_start(ahc->dev_softc, 1);
 	if (start != 0) {
 		*bus_addr = start;
-		if (request_mem_region(start, 0x1000, "aic7xxx") == 0)
+		if (!request_mem_region(start, 0x1000, "aic7xxx"))
 			error = ENOMEM;
 		if (error == 0) {
 			*maddr = ioremap_nocache(start, 256);
diff --git a/drivers/scsi/dpt_i2o.c b/drivers/scsi/dpt_i2o.c
index c9dd839..ac92ac1 100644
--- a/drivers/scsi/dpt_i2o.c
+++ b/drivers/scsi/dpt_i2o.c
@@ -153,7 +153,7 @@
 
 static u8 adpt_read_blink_led(adpt_hba* host)
 {
-	if(host->FwDebugBLEDflag_P != 0) {
+	if (host->FwDebugBLEDflag_P) {
 		if( readb(host->FwDebugBLEDflag_P) == 0xbc ){
 			return readb(host->FwDebugBLEDvalue_P);
 		}
diff --git a/drivers/scsi/fdomain.c b/drivers/scsi/fdomain.c
index 2cd6b49..c33bcb2 100644
--- a/drivers/scsi/fdomain.c
+++ b/drivers/scsi/fdomain.c
@@ -1443,7 +1443,7 @@
 	   current_SC->SCp.this_residual    = current_SC->SCp.buffer->length;
 	   current_SC->SCp.buffers_residual = scsi_sg_count(current_SC) - 1;
    } else {
-	   current_SC->SCp.ptr              = 0;
+	   current_SC->SCp.ptr              = NULL;
 	   current_SC->SCp.this_residual    = 0;
 	   current_SC->SCp.buffer           = NULL;
 	   current_SC->SCp.buffers_residual = 0;
diff --git a/drivers/scsi/ide-scsi.c b/drivers/scsi/ide-scsi.c
index 3255363..44d8d51 100644
--- a/drivers/scsi/ide-scsi.c
+++ b/drivers/scsi/ide-scsi.c
@@ -134,6 +134,7 @@
 static void idescsi_input_buffers(ide_drive_t *drive, struct ide_atapi_pc *pc,
 		unsigned int bcount)
 {
+	ide_hwif_t *hwif = drive->hwif;
 	int count;
 	char *buf;
 
@@ -145,14 +146,12 @@
 			local_irq_save(flags);
 			buf = kmap_atomic(sg_page(pc->sg), KM_IRQ0) +
 					pc->sg->offset;
-			drive->hwif->atapi_input_bytes(drive,
-						buf + pc->b_count, count);
+			hwif->input_data(drive, NULL, buf + pc->b_count, count);
 			kunmap_atomic(buf - pc->sg->offset, KM_IRQ0);
 			local_irq_restore(flags);
 		} else {
 			buf = sg_virt(pc->sg);
-			drive->hwif->atapi_input_bytes(drive,
-						buf + pc->b_count, count);
+			hwif->input_data(drive, NULL, buf + pc->b_count, count);
 		}
 		bcount -= count; pc->b_count += count;
 		if (pc->b_count == pc->sg->length) {
@@ -165,13 +164,14 @@
 
 	if (bcount) {
 		printk (KERN_ERR "ide-scsi: scatter gather table too small, discarding data\n");
-		ide_atapi_discard_data(drive, bcount);
+		ide_pad_transfer(drive, 0, bcount);
 	}
 }
 
 static void idescsi_output_buffers(ide_drive_t *drive, struct ide_atapi_pc *pc,
 		unsigned int bcount)
 {
+	ide_hwif_t *hwif = drive->hwif;
 	int count;
 	char *buf;
 
@@ -183,14 +183,12 @@
 			local_irq_save(flags);
 			buf = kmap_atomic(sg_page(pc->sg), KM_IRQ0) +
 						pc->sg->offset;
-			drive->hwif->atapi_output_bytes(drive,
-						buf + pc->b_count, count);
+			hwif->output_data(drive, NULL, buf + pc->b_count, count);
 			kunmap_atomic(buf - pc->sg->offset, KM_IRQ0);
 			local_irq_restore(flags);
 		} else {
 			buf = sg_virt(pc->sg);
-			drive->hwif->atapi_output_bytes(drive,
-						buf + pc->b_count, count);
+			hwif->output_data(drive, NULL, buf + pc->b_count, count);
 		}
 		bcount -= count; pc->b_count += count;
 		if (pc->b_count == pc->sg->length) {
@@ -203,7 +201,7 @@
 
 	if (bcount) {
 		printk (KERN_ERR "ide-scsi: scatter gather table too small, padding with zeros\n");
-		ide_atapi_write_zeros(drive, bcount);
+		ide_pad_transfer(drive, 1, bcount);
 	}
 }
 
@@ -258,7 +256,8 @@
 
 	if (ide_read_status(drive) & (BUSY_STAT | DRQ_STAT))
 		/* force an abort */
-		hwif->OUTB(WIN_IDLEIMMEDIATE, hwif->io_ports.command_addr);
+		hwif->OUTBSYNC(drive, WIN_IDLEIMMEDIATE,
+			       hwif->io_ports.command_addr);
 
 	rq->errors++;
 
@@ -431,14 +430,15 @@
 						idescsi_input_buffers(drive, pc,
 									temp);
 					else
-						drive->hwif->atapi_input_bytes(drive, pc->cur_pos, temp);
+						hwif->input_data(drive, NULL,
+							pc->cur_pos, temp);
 					printk(KERN_ERR "ide-scsi: transferred"
 							" %d of %d bytes\n",
 							temp, bcount);
 				}
 				pc->xferred += temp;
 				pc->cur_pos += temp;
-				ide_atapi_discard_data(drive, bcount - temp);
+				ide_pad_transfer(drive, 0, bcount - temp);
 				ide_set_handler(drive, &idescsi_pc_intr, get_timeout(pc), idescsi_expiry);
 				return ide_started;
 			}
@@ -452,15 +452,13 @@
 		if (pc->sg)
 			idescsi_input_buffers(drive, pc, bcount);
 		else
-			hwif->atapi_input_bytes(drive, pc->cur_pos,
-						bcount);
+			hwif->input_data(drive, NULL, pc->cur_pos, bcount);
 	} else {
 		pc->flags |= PC_FLAG_WRITING;
 		if (pc->sg)
 			idescsi_output_buffers(drive, pc, bcount);
 		else
-			hwif->atapi_output_bytes(drive, pc->cur_pos,
-						 bcount);
+			hwif->output_data(drive, NULL, pc->cur_pos, bcount);
 	}
 	/* Update the current position */
 	pc->xferred += bcount;
@@ -493,8 +491,10 @@
 	BUG_ON(HWGROUP(drive)->handler != NULL);
 	/* Set the interrupt routine */
 	ide_set_handler(drive, &idescsi_pc_intr, get_timeout(pc), idescsi_expiry);
+
 	/* Send the actual packet */
-	drive->hwif->atapi_output_bytes(drive, scsi->pc->c, 12);
+	hwif->output_data(drive, NULL, scsi->pc->c, 12);
+
 	if (pc->flags & PC_FLAG_DMA_OK) {
 		pc->flags |= PC_FLAG_DMA_IN_PROGRESS;
 		hwif->dma_ops->dma_start(drive);
@@ -574,7 +574,7 @@
 		return ide_started;
 	} else {
 		/* Issue the packet command */
-		hwif->OUTB(WIN_PACKETCMD, hwif->io_ports.command_addr);
+		ide_execute_pkt_cmd(drive);
 		return idescsi_transfer_pc(drive);
 	}
 }
diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c
index b135a1e..18551aa 100644
--- a/drivers/scsi/megaraid.c
+++ b/drivers/scsi/megaraid.c
@@ -4996,7 +4996,7 @@
 		max_mbox_busy_wait = MBOX_BUSY_WAIT;
 
 #ifdef CONFIG_PROC_FS
-	mega_proc_dir_entry = proc_mkdir("megaraid", &proc_root);
+	mega_proc_dir_entry = proc_mkdir("megaraid", NULL);
 	if (!mega_proc_dir_entry) {
 		printk(KERN_WARNING
 				"megaraid: failed to create megaraid root\n");
@@ -5005,7 +5005,7 @@
 	error = pci_register_driver(&megaraid_pci_driver);
 	if (error) {
 #ifdef CONFIG_PROC_FS
-		remove_proc_entry("megaraid", &proc_root);
+		remove_proc_entry("megaraid", NULL);
 #endif
 		return error;
 	}
@@ -5035,7 +5035,7 @@
 	pci_unregister_driver(&megaraid_pci_driver);
 
 #ifdef CONFIG_PROC_FS
-	remove_proc_entry("megaraid", &proc_root);
+	remove_proc_entry("megaraid", NULL);
 #endif
 }
 
diff --git a/drivers/scsi/ncr53c8xx.c b/drivers/scsi/ncr53c8xx.c
index d892894..ceab4f7 100644
--- a/drivers/scsi/ncr53c8xx.c
+++ b/drivers/scsi/ncr53c8xx.c
@@ -8186,7 +8186,7 @@
 	cmd->next_wcmd = NULL;
 	if (!(wcmd = np->waiting_list)) np->waiting_list = cmd;
 	else {
-		while ((wcmd->next_wcmd) != 0)
+		while (wcmd->next_wcmd)
 			wcmd = (struct scsi_cmnd *) wcmd->next_wcmd;
 		wcmd->next_wcmd = (char *) cmd;
 	}
@@ -8222,7 +8222,7 @@
 #ifdef DEBUG_WAITING_LIST
 	if (waiting_list) printk("%s: waiting_list=%lx processing sts=%d\n", ncr_name(np), (u_long) waiting_list, sts);
 #endif
-	while ((wcmd = waiting_list) != 0) {
+	while (wcmd = waiting_list) {
 		waiting_list = (struct scsi_cmnd *) wcmd->next_wcmd;
 		wcmd->next_wcmd = NULL;
 		if (sts == DID_OK) {
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index 07103c3..f6600bf 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -1773,7 +1773,7 @@
 	if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
 		printk(KERN_INFO "scsi_debug: slave_alloc <%u %u %u %u>\n",
 		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
-	set_bit(QUEUE_FLAG_BIDI, &sdp->request_queue->queue_flags);
+	queue_flag_set_unlocked(QUEUE_FLAG_BIDI, sdp->request_queue);
 	return 0;
 }
 
diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c
index b8de041..a235802 100644
--- a/drivers/scsi/scsi_devinfo.c
+++ b/drivers/scsi/scsi_devinfo.c
@@ -449,37 +449,40 @@
 }
 
 #ifdef CONFIG_SCSI_PROC_FS
-/* 
- * proc_scsi_dev_info_read: dump the scsi_dev_info_list via
- * /proc/scsi/device_info
- */
-static int proc_scsi_devinfo_read(char *buffer, char **start,
-				  off_t offset, int length)
+static int devinfo_seq_show(struct seq_file *m, void *v)
 {
-	struct scsi_dev_info_list *devinfo;
-	int size, len = 0;
-	off_t begin = 0;
-	off_t pos = 0;
+	struct scsi_dev_info_list *devinfo =
+		list_entry(v, struct scsi_dev_info_list, dev_info_list);
 
-	list_for_each_entry(devinfo, &scsi_dev_info_list, dev_info_list) {
-		size = sprintf(buffer + len, "'%.8s' '%.16s' 0x%x\n",
+	seq_printf(m, "'%.8s' '%.16s' 0x%x\n",
 			devinfo->vendor, devinfo->model, devinfo->flags);
-		len += size;
-		pos = begin + len;
-		if (pos < offset) {
-			len = 0;
-			begin = pos;
-		}
-		if (pos > offset + length)
-			goto stop_output;
-	}
+	return 0;
+}
 
-stop_output:
-	*start = buffer + (offset - begin);	/* Start of wanted data */
-	len -= (offset - begin);	/* Start slop */
-	if (len > length)
-		len = length;	/* Ending slop */
-	return (len);
+static void * devinfo_seq_start(struct seq_file *m, loff_t *pos)
+{
+	return seq_list_start(&scsi_dev_info_list, *pos);
+}
+
+static void * devinfo_seq_next(struct seq_file *m, void *v, loff_t *pos)
+{
+	return seq_list_next(v, &scsi_dev_info_list, pos);
+}
+
+static void devinfo_seq_stop(struct seq_file *m, void *v)
+{
+}
+
+static const struct seq_operations scsi_devinfo_seq_ops = {
+	.start	= devinfo_seq_start,
+	.next	= devinfo_seq_next,
+	.stop	= devinfo_seq_stop,
+	.show	= devinfo_seq_show,
+};
+
+static int proc_scsi_devinfo_open(struct inode *inode, struct file *file)
+{
+	return seq_open(file, &scsi_devinfo_seq_ops);
 }
 
 /* 
@@ -489,11 +492,12 @@
  * integer value of flag to the scsi device info list.
  * To use, echo "vendor:model:flag" > /proc/scsi/device_info
  */
-static int proc_scsi_devinfo_write(struct file *file, const char __user *buf,
-				   unsigned long length, void *data)
+static ssize_t proc_scsi_devinfo_write(struct file *file,
+				       const char __user *buf,
+				       size_t length, loff_t *ppos)
 {
 	char *buffer;
-	int err = length;
+	ssize_t err = length;
 
 	if (!buf || length>PAGE_SIZE)
 		return -EINVAL;
@@ -517,6 +521,15 @@
 	free_page((unsigned long)buffer);
 	return err;
 }
+
+static const struct file_operations scsi_devinfo_proc_fops = {
+	.owner		= THIS_MODULE,
+	.open		= proc_scsi_devinfo_open,
+	.read		= seq_read,
+	.write		= proc_scsi_devinfo_write,
+	.llseek		= seq_lseek,
+	.release	= seq_release,
+};
 #endif /* CONFIG_SCSI_PROC_FS */
 
 module_param_string(dev_flags, scsi_dev_flags, sizeof(scsi_dev_flags), 0);
@@ -577,15 +590,13 @@
 	}
 
 #ifdef CONFIG_SCSI_PROC_FS
-	p = create_proc_entry("scsi/device_info", 0, NULL);
+	p = proc_create("scsi/device_info", 0, NULL, &scsi_devinfo_proc_fops);
 	if (!p) {
 		error = -ENOMEM;
 		goto out;
 	}
 
 	p->owner = THIS_MODULE;
-	p->get_info = proc_scsi_devinfo_read;
-	p->write_proc = proc_scsi_devinfo_write;
 #endif /* CONFIG_SCSI_PROC_FS */
 
  out:
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 221f31e..1eaba6c 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -1771,6 +1771,7 @@
 	unsigned long flags;
 	int rtn;
 
+	blk_rq_init(NULL, &req);
 	scmd->request = &req;
 	memset(&scmd->eh_timeout, 0, sizeof(scmd->eh_timeout));
 
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 67f412b..d545ad1 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -536,6 +536,9 @@
 	       !shost->host_blocked && !shost->host_self_blocked &&
 		!((shost->can_queue > 0) &&
 		  (shost->host_busy >= shost->can_queue))) {
+
+		int flagset;
+
 		/*
 		 * As long as shost is accepting commands and we have
 		 * starved queues, call blk_run_queue. scsi_request_fn
@@ -549,19 +552,20 @@
 		sdev = list_entry(shost->starved_list.next,
 					  struct scsi_device, starved_entry);
 		list_del_init(&sdev->starved_entry);
-		spin_unlock_irqrestore(shost->host_lock, flags);
+		spin_unlock(shost->host_lock);
 
+		spin_lock(sdev->request_queue->queue_lock);
+		flagset = test_bit(QUEUE_FLAG_REENTER, &q->queue_flags) &&
+				!test_bit(QUEUE_FLAG_REENTER,
+					&sdev->request_queue->queue_flags);
+		if (flagset)
+			queue_flag_set(QUEUE_FLAG_REENTER, sdev->request_queue);
+		__blk_run_queue(sdev->request_queue);
+		if (flagset)
+			queue_flag_clear(QUEUE_FLAG_REENTER, sdev->request_queue);
+		spin_unlock(sdev->request_queue->queue_lock);
 
-		if (test_bit(QUEUE_FLAG_REENTER, &q->queue_flags) &&
-		    !test_and_set_bit(QUEUE_FLAG_REENTER,
-				      &sdev->request_queue->queue_flags)) {
-			blk_run_queue(sdev->request_queue);
-			clear_bit(QUEUE_FLAG_REENTER,
-				  &sdev->request_queue->queue_flags);
-		} else
-			blk_run_queue(sdev->request_queue);
-
-		spin_lock_irqsave(shost->host_lock, flags);
+		spin_lock(shost->host_lock);
 		if (unlikely(!list_empty(&sdev->starved_entry)))
 			/*
 			 * sdev lost a race, and was put back on the
@@ -1585,8 +1589,9 @@
 
 	blk_queue_max_segment_size(q, dma_get_max_seg_size(dev));
 
+	/* New queue, no concurrency on queue_flags */
 	if (!shost->use_clustering)
-		clear_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags);
+		queue_flag_clear_unlocked(QUEUE_FLAG_CLUSTER, q);
 
 	/*
 	 * set a reasonable default alignment on word boundaries: the
diff --git a/drivers/scsi/scsi_proc.c b/drivers/scsi/scsi_proc.c
index 3a1c99d..e4a0d2f 100644
--- a/drivers/scsi/scsi_proc.c
+++ b/drivers/scsi/scsi_proc.c
@@ -413,6 +413,7 @@
 }
 
 static const struct file_operations proc_scsi_operations = {
+	.owner		= THIS_MODULE,
 	.open		= proc_scsi_open,
 	.read		= seq_read,
 	.write		= proc_scsi_write,
@@ -431,10 +432,9 @@
 	if (!proc_scsi)
 		goto err1;
 
-	pde = create_proc_entry("scsi/scsi", 0, NULL);
+	pde = proc_create("scsi/scsi", 0, NULL, &proc_scsi_operations);
 	if (!pde)
 		goto err2;
-	pde->proc_fops = &proc_scsi_operations;
 
 	return 0;
 
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index fcd7455..a00eee6 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -1828,7 +1828,7 @@
 	}
 
 	p = kthread_run(do_scan_async, data, "scsi_scan_%d", shost->host_no);
-	if (unlikely(IS_ERR(p)))
+	if (IS_ERR(p))
 		do_scan_async(data);
 }
 EXPORT_SYMBOL(scsi_scan_host);
diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c
index 7899e3d..f4461d3 100644
--- a/drivers/scsi/scsi_transport_sas.c
+++ b/drivers/scsi/scsi_transport_sas.c
@@ -248,8 +248,7 @@
 	else
 		q->queuedata = shost;
 
-	set_bit(QUEUE_FLAG_BIDI, &q->queue_flags);
-
+	queue_flag_set_unlocked(QUEUE_FLAG_BIDI, q);
 	return 0;
 }
 
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 3cea17d..01cefbb 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -860,7 +860,6 @@
 
 static void sd_prepare_flush(struct request_queue *q, struct request *rq)
 {
-	memset(rq->cmd, 0, sizeof(rq->cmd));
 	rq->cmd_type = REQ_TYPE_BLOCK_PC;
 	rq->timeout = SD_TIMEOUT;
 	rq->cmd[0] = SYNCHRONIZE_CACHE;
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 2029422..c9d7f72 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -2667,7 +2667,6 @@
 {
 	int k, mask;
 	int num_leaves = ARRAY_SIZE(sg_proc_leaf_arr);
-	struct proc_dir_entry *pdep;
 	struct sg_proc_leaf * leaf;
 
 	sg_proc_sgp = proc_mkdir(sg_proc_sg_dirname, NULL);
@@ -2676,13 +2675,10 @@
 	for (k = 0; k < num_leaves; ++k) {
 		leaf = &sg_proc_leaf_arr[k];
 		mask = leaf->fops->write ? S_IRUGO | S_IWUSR : S_IRUGO;
-		pdep = create_proc_entry(leaf->name, mask, sg_proc_sgp);
-		if (pdep) {
-			leaf->fops->owner = THIS_MODULE,
-			leaf->fops->read = seq_read,
-			leaf->fops->llseek = seq_lseek,
-			pdep->proc_fops = leaf->fops;
-		}
+		leaf->fops->owner = THIS_MODULE;
+		leaf->fops->read = seq_read;
+		leaf->fops->llseek = seq_lseek;
+		proc_create(leaf->name, mask, sg_proc_sgp, leaf->fops);
 	}
 	return 0;
 }
diff --git a/drivers/scsi/sym53c8xx_2/sym_hipd.c b/drivers/scsi/sym53c8xx_2/sym_hipd.c
index 35142b5..22a6aae 100644
--- a/drivers/scsi/sym53c8xx_2/sym_hipd.c
+++ b/drivers/scsi/sym53c8xx_2/sym_hipd.c
@@ -1647,7 +1647,7 @@
 	SYM_QUEHEAD *qp;
 	struct sym_ccb *cp;
 
-	while ((qp = sym_remque_head(&np->comp_ccbq)) != 0) {
+	while ((qp = sym_remque_head(&np->comp_ccbq)) != NULL) {
 		struct scsi_cmnd *cmd;
 		cp = sym_que_entry(qp, struct sym_ccb, link_ccbq);
 		sym_insque_tail(&cp->link_ccbq, &np->busy_ccbq);
@@ -3168,7 +3168,7 @@
 	 *  the COMP queue and put back other ones into 
 	 *  the BUSY queue.
 	 */
-	while ((qp = sym_remque_head(&qtmp)) != 0) {
+	while ((qp = sym_remque_head(&qtmp)) != NULL) {
 		struct scsi_cmnd *cmd;
 		cp = sym_que_entry(qp, struct sym_ccb, link_ccbq);
 		cmd = cp->cmd;
@@ -5729,7 +5729,7 @@
 		sym_mfree_dma(np->dqueue, sizeof(u32)*(MAX_QUEUE*2), "DQUEUE");
 
 	if (np->actccbs) {
-		while ((qp = sym_remque_head(&np->free_ccbq)) != 0) {
+		while ((qp = sym_remque_head(&np->free_ccbq)) != NULL) {
 			cp = sym_que_entry(qp, struct sym_ccb, link_ccbq);
 			sym_mfree_dma(cp, sizeof(*cp), "CCB");
 		}
diff --git a/drivers/scsi/ultrastor.c b/drivers/scsi/ultrastor.c
index f385dce..27aa40f 100644
--- a/drivers/scsi/ultrastor.c
+++ b/drivers/scsi/ultrastor.c
@@ -951,7 +951,7 @@
 	printk("abort: command mismatch, %p != %p\n",
 	       config.mscp[mscp_index].SCint, SCpnt);
 #endif
-    if (config.mscp[mscp_index].SCint == 0)
+    if (config.mscp[mscp_index].SCint == NULL)
 	return FAILED;
 
     if (config.mscp[mscp_index].SCint != SCpnt) panic("Bad abort");
@@ -1101,7 +1101,7 @@
     SCtmp = mscp->SCint;
     mscp->SCint = NULL;
 
-    if (SCtmp == 0)
+    if (!SCtmp)
       {
 #if ULTRASTOR_DEBUG & (UD_ABORT|UD_INTERRUPT)
 	printk("MSCP %d (%x): no command\n", mscp_index, (unsigned int) mscp);
diff --git a/drivers/usb/atm/ueagle-atm.c b/drivers/usb/atm/ueagle-atm.c
index 4220f22..5f71ff3 100644
--- a/drivers/usb/atm/ueagle-atm.c
+++ b/drivers/usb/atm/ueagle-atm.c
@@ -305,8 +305,6 @@
  */
 
 #define FW_GET_BYTE(p)	*((__u8 *) (p))
-#define FW_GET_WORD(p)	le16_to_cpu(get_unaligned((__le16 *) (p)))
-#define FW_GET_LONG(p)	le32_to_cpu(get_unaligned((__le32 *) (p)))
 
 #define FW_DIR "ueagle-atm/"
 #define NB_MODEM 4
@@ -621,7 +619,7 @@
 	if (size < 4)
 		goto err_fw_corrupted;
 
-	crc = FW_GET_LONG(pfw);
+	crc = get_unaligned_le32(pfw);
 	pfw += 4;
 	size -= 4;
 	if (crc32_be(0, pfw, size) != crc)
@@ -640,7 +638,7 @@
 
 	while (size > 3) {
 		u8 len = FW_GET_BYTE(pfw);
-		u16 add = FW_GET_WORD(pfw + 1);
+		u16 add = get_unaligned_le16(pfw + 1);
 
 		size -= len + 3;
 		if (size < 0)
@@ -738,7 +736,7 @@
 
 	for (i = 0; i < pagecount; i++) {
 
-		pageoffset = FW_GET_LONG(dsp + p);
+		pageoffset = get_unaligned_le32(dsp + p);
 		p += 4;
 
 		if (pageoffset == 0)
@@ -759,7 +757,7 @@
 				return 1;
 
 			pp += 2;	/* skip blockaddr */
-			blocksize = FW_GET_WORD(dsp + pp);
+			blocksize = get_unaligned_le16(dsp + pp);
 			pp += 2;
 
 			/* enough space for block data? */
@@ -928,7 +926,7 @@
 		goto bad1;
 
 	p += 4 * pageno;
-	pageoffset = FW_GET_LONG(p);
+	pageoffset = get_unaligned_le32(p);
 
 	if (pageoffset == 0)
 		goto bad1;
@@ -945,10 +943,10 @@
 	bi.wOvlOffset = cpu_to_le16(ovl | 0x8000);
 
 	for (i = 0; i < blockcount; i++) {
-		blockaddr = FW_GET_WORD(p);
+		blockaddr = get_unaligned_le16(p);
 		p += 2;
 
-		blocksize = FW_GET_WORD(p);
+		blocksize = get_unaligned_le16(p);
 		p += 2;
 
 		bi.wSize = cpu_to_le16(blocksize);
@@ -1152,9 +1150,9 @@
 	cmv.bDirection = E1_HOSTTOMODEM;
 	cmv.bFunction = function;
 	cmv.wIndex = cpu_to_le16(sc->cmv_dsc.e1.idx);
-	put_unaligned(cpu_to_le32(address), &cmv.dwSymbolicAddress);
+	put_unaligned_le32(address, &cmv.dwSymbolicAddress);
 	cmv.wOffsetAddress = cpu_to_le16(offset);
-	put_unaligned(cpu_to_le32(data >> 16 | data << 16), &cmv.dwData);
+	put_unaligned_le32(data >> 16 | data << 16, &cmv.dwData);
 
 	ret = uea_request(sc, UEA_E1_SET_BLOCK, UEA_MPTX_START, sizeof(cmv), &cmv);
 	if (ret < 0)
@@ -1646,7 +1644,7 @@
 	if (size < 5)
 		goto err_fw_corrupted;
 
-	crc = FW_GET_LONG(data);
+	crc = get_unaligned_le32(data);
 	data += 4;
 	size -= 4;
 	if (crc32_be(0, data, size) != crc)
@@ -1696,9 +1694,9 @@
 			"please update your firmware\n");
 
 		for (i = 0; i < len; i++) {
-			ret = uea_write_cmv_e1(sc, FW_GET_LONG(&cmvs_v1[i].address),
-						FW_GET_WORD(&cmvs_v1[i].offset),
-						FW_GET_LONG(&cmvs_v1[i].data));
+			ret = uea_write_cmv_e1(sc, get_unaligned_le32(&cmvs_v1[i].address),
+						get_unaligned_le16(&cmvs_v1[i].offset),
+						get_unaligned_le32(&cmvs_v1[i].data));
 			if (ret < 0)
 				goto out;
 		}
@@ -1706,9 +1704,9 @@
 		struct uea_cmvs_v2 *cmvs_v2 = cmvs_ptr;
 
 		for (i = 0; i < len; i++) {
-			ret = uea_write_cmv_e1(sc, FW_GET_LONG(&cmvs_v2[i].address),
-						(u16) FW_GET_LONG(&cmvs_v2[i].offset),
-						FW_GET_LONG(&cmvs_v2[i].data));
+			ret = uea_write_cmv_e1(sc, get_unaligned_le32(&cmvs_v2[i].address),
+						(u16) get_unaligned_le32(&cmvs_v2[i].offset),
+						get_unaligned_le32(&cmvs_v2[i].data));
 			if (ret < 0)
 				goto out;
 		}
@@ -1759,10 +1757,10 @@
 
 		for (i = 0; i < len; i++) {
 			ret = uea_write_cmv_e4(sc, 1,
-						FW_GET_LONG(&cmvs_v2[i].group),
-						FW_GET_LONG(&cmvs_v2[i].address),
-						FW_GET_LONG(&cmvs_v2[i].offset),
-						FW_GET_LONG(&cmvs_v2[i].data));
+						get_unaligned_le32(&cmvs_v2[i].group),
+						get_unaligned_le32(&cmvs_v2[i].address),
+						get_unaligned_le32(&cmvs_v2[i].offset),
+						get_unaligned_le32(&cmvs_v2[i].data));
 			if (ret < 0)
 				goto out;
 		}
@@ -1964,7 +1962,7 @@
 		if (UEA_CHIP_VERSION(sc) == ADI930
 				&& cmv->bFunction ==  E1_MAKEFUNCTION(2, 2)) {
 			cmv->wIndex = cpu_to_le16(dsc->idx);
-			put_unaligned(cpu_to_le32(dsc->address), &cmv->dwSymbolicAddress);
+			put_unaligned_le32(dsc->address, &cmv->dwSymbolicAddress);
 			cmv->wOffsetAddress = cpu_to_le16(dsc->offset);
 		} else
 			goto bad2;
@@ -1978,11 +1976,11 @@
 
 	/* in case of MEMACCESS */
 	if (le16_to_cpu(cmv->wIndex) != dsc->idx ||
-	    le32_to_cpu(get_unaligned(&cmv->dwSymbolicAddress)) != dsc->address ||
+	    get_unaligned_le32(&cmv->dwSymbolicAddress) != dsc->address ||
 	    le16_to_cpu(cmv->wOffsetAddress) != dsc->offset)
 		goto bad2;
 
-	sc->data = le32_to_cpu(get_unaligned(&cmv->dwData));
+	sc->data = get_unaligned_le32(&cmv->dwData);
 	sc->data = sc->data << 16 | sc->data >> 16;
 
 	wake_up_cmv_ack(sc);
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index 7b572e7..cefe7f2 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -280,7 +280,7 @@
 
 		case USB_CDC_NOTIFY_SERIAL_STATE:
 
-			newctrl = le16_to_cpu(get_unaligned((__le16 *) data));
+			newctrl = get_unaligned_le16(data);
 
 			if (acm->tty && !acm->clocal && (acm->ctrlin & ~newctrl & ACM_CTRL_DCD)) {
 				dbg("calling hangup");
diff --git a/drivers/usb/core/inode.c b/drivers/usb/core/inode.c
index 8607846..1d253dd 100644
--- a/drivers/usb/core/inode.c
+++ b/drivers/usb/core/inode.c
@@ -773,7 +773,7 @@
 	usb_register_notify(&usbfs_nb);
 
 	/* create mount point for usbfs */
-	usbdir = proc_mkdir("usb", proc_bus);
+	usbdir = proc_mkdir("bus/usb", NULL);
 
 	return 0;
 }
@@ -783,6 +783,6 @@
 	usb_unregister_notify(&usbfs_nb);
 	unregister_filesystem(&usb_fs_type);
 	if (usbdir)
-		remove_proc_entry("usb", proc_bus);
+		remove_proc_entry("bus/usb", NULL);
 }
 
diff --git a/drivers/usb/gadget/at91_udc.c b/drivers/usb/gadget/at91_udc.c
index 9b913af..274c60a 100644
--- a/drivers/usb/gadget/at91_udc.c
+++ b/drivers/usb/gadget/at91_udc.c
@@ -231,6 +231,7 @@
 }
 
 static const struct file_operations proc_ops = {
+	.owner		= THIS_MODULE,
 	.open		= proc_udc_open,
 	.read		= seq_read,
 	.llseek		= seq_lseek,
@@ -239,15 +240,7 @@
 
 static void create_debug_file(struct at91_udc *udc)
 {
-	struct proc_dir_entry *pde;
-
-	pde = create_proc_entry (debug_filename, 0, NULL);
-	udc->pde = pde;
-	if (pde == NULL)
-		return;
-
-	pde->proc_fops = &proc_ops;
-	pde->data = udc;
+	udc->pde = proc_create_data(debug_filename, 0, NULL, &proc_ops, udc);
 }
 
 static void remove_debug_file(struct at91_udc *udc)
diff --git a/drivers/usb/gadget/goku_udc.c b/drivers/usb/gadget/goku_udc.c
index 64a592c..be6613a 100644
--- a/drivers/usb/gadget/goku_udc.c
+++ b/drivers/usb/gadget/goku_udc.c
@@ -127,7 +127,7 @@
 
 	/* enabling the no-toggle interrupt mode would need an api hook */
 	mode = 0;
-	max = le16_to_cpu(get_unaligned(&desc->wMaxPacketSize));
+	max = get_unaligned_le16(&desc->wMaxPacketSize);
 	switch (max) {
 	case 64:	mode++;
 	case 32:	mode++;
diff --git a/drivers/usb/gadget/omap_udc.c b/drivers/usb/gadget/omap_udc.c
index 95f7662..881d74c 100644
--- a/drivers/usb/gadget/omap_udc.c
+++ b/drivers/usb/gadget/omap_udc.c
@@ -2504,6 +2504,7 @@
 }
 
 static const struct file_operations proc_ops = {
+	.owner		= THIS_MODULE,
 	.open		= proc_udc_open,
 	.read		= seq_read,
 	.llseek		= seq_lseek,
@@ -2512,11 +2513,7 @@
 
 static void create_proc_file(void)
 {
-	struct proc_dir_entry *pde;
-
-	pde = create_proc_entry (proc_filename, 0, NULL);
-	if (pde)
-		pde->proc_fops = &proc_ops;
+	proc_create(proc_filename, 0, NULL, &proc_ops);
 }
 
 static void remove_proc_file(void)
diff --git a/drivers/usb/gadget/rndis.c b/drivers/usb/gadget/rndis.c
index bd58dd5..d0677f5 100644
--- a/drivers/usb/gadget/rndis.c
+++ b/drivers/usb/gadget/rndis.c
@@ -183,14 +183,10 @@
 		DBG("query OID %08x value, len %d:\n", OID, buf_len);
 		for (i = 0; i < buf_len; i += 16) {
 			DBG("%03d: %08x %08x %08x %08x\n", i,
-				le32_to_cpu(get_unaligned((__le32 *)
-					&buf[i])),
-				le32_to_cpu(get_unaligned((__le32 *)
-					&buf[i + 4])),
-				le32_to_cpu(get_unaligned((__le32 *)
-					&buf[i + 8])),
-				le32_to_cpu(get_unaligned((__le32 *)
-					&buf[i + 12])));
+				get_unaligned_le32(&buf[i]),
+				get_unaligned_le32(&buf[i + 4]),
+				get_unaligned_le32(&buf[i + 8]),
+				get_unaligned_le32(&buf[i + 12]));
 		}
 	}
 
@@ -666,7 +662,7 @@
 		break;
 	case OID_PNP_QUERY_POWER:
 		DBG("%s: OID_PNP_QUERY_POWER D%d\n", __func__,
-				le32_to_cpu(get_unaligned((__le32 *)buf)) - 1);
+				get_unaligned_le32(buf) - 1);
 		/* only suspend is a real power state, and
 		 * it can't be entered by OID_PNP_SET_POWER...
 		 */
@@ -705,14 +701,10 @@
 		DBG("set OID %08x value, len %d:\n", OID, buf_len);
 		for (i = 0; i < buf_len; i += 16) {
 			DBG("%03d: %08x %08x %08x %08x\n", i,
-				le32_to_cpu(get_unaligned((__le32 *)
-					&buf[i])),
-				le32_to_cpu(get_unaligned((__le32 *)
-					&buf[i + 4])),
-				le32_to_cpu(get_unaligned((__le32 *)
-					&buf[i + 8])),
-				le32_to_cpu(get_unaligned((__le32 *)
-					&buf[i + 12])));
+				get_unaligned_le32(&buf[i]),
+				get_unaligned_le32(&buf[i + 4]),
+				get_unaligned_le32(&buf[i + 8]),
+				get_unaligned_le32(&buf[i + 12]));
 		}
 	}
 
@@ -726,8 +718,7 @@
 		 *	PROMISCUOUS, DIRECTED,
 		 *	MULTICAST, ALL_MULTICAST, BROADCAST
 		 */
-		*params->filter = (u16) le32_to_cpu(get_unaligned(
-				(__le32 *)buf));
+		*params->filter = (u16)get_unaligned_le32(buf);
 		DBG("%s: OID_GEN_CURRENT_PACKET_FILTER %08x\n",
 			__func__, *params->filter);
 
@@ -777,7 +768,7 @@
 		 * resuming, Windows forces a reset, and then SET_POWER D0.
 		 * FIXME ... then things go batty; Windows wedges itself.
 		 */
-		i = le32_to_cpu(get_unaligned((__le32 *)buf));
+		i = get_unaligned_le32(buf);
 		DBG("%s: OID_PNP_SET_POWER D%d\n", __func__, i - 1);
 		switch (i) {
 		case NdisDeviceStateD0:
@@ -1064,8 +1055,8 @@
 		return -ENOMEM;
 
 	tmp = (__le32 *) buf;
-	MsgType   = le32_to_cpu(get_unaligned(tmp++));
-	MsgLength = le32_to_cpu(get_unaligned(tmp++));
+	MsgType   = get_unaligned_le32(tmp++);
+	MsgLength = get_unaligned_le32(tmp++);
 
 	if (configNr >= RNDIS_MAX_CONFIGS)
 		return -ENOTSUPP;
@@ -1296,10 +1287,9 @@
 	tmp++;
 
 	/* DataOffset, DataLength */
-	if (!skb_pull(skb, le32_to_cpu(get_unaligned(tmp++))
-			+ 8 /* offset of DataOffset */))
+	if (!skb_pull(skb, get_unaligned_le32(tmp++) + 8))
 		return -EOVERFLOW;
-	skb_trim(skb, le32_to_cpu(get_unaligned(tmp++)));
+	skb_trim(skb, get_unaligned_le32(tmp++));
 
 	return 0;
 }
diff --git a/drivers/usb/gadget/usbstring.c b/drivers/usb/gadget/usbstring.c
index 878e428..4154be3 100644
--- a/drivers/usb/gadget/usbstring.c
+++ b/drivers/usb/gadget/usbstring.c
@@ -74,7 +74,7 @@
 				goto fail;
 		} else
 			uchar = c;
-		put_unaligned (cpu_to_le16 (uchar), cp++);
+		put_unaligned_le16(uchar, cp++);
 		count++;
 		len--;
 	}
diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c
index f13d102..382587c 100644
--- a/drivers/usb/host/ehci-hub.c
+++ b/drivers/usb/host/ehci-hub.c
@@ -770,7 +770,7 @@
 	if (status & ~0xffff)	/* only if wPortChange is interesting */
 #endif
 		dbg_port (ehci, "GetStatus", wIndex + 1, temp);
-		put_unaligned(cpu_to_le32 (status), (__le32 *) buf);
+		put_unaligned_le32(status, buf);
 		break;
 	case SetHubFeature:
 		switch (wValue) {
diff --git a/drivers/usb/host/ohci-hub.c b/drivers/usb/host/ohci-hub.c
index 5be3bb3..17dc2ec 100644
--- a/drivers/usb/host/ohci-hub.c
+++ b/drivers/usb/host/ohci-hub.c
@@ -736,14 +736,14 @@
 		break;
 	case GetHubStatus:
 		temp = roothub_status (ohci) & ~(RH_HS_CRWE | RH_HS_DRWE);
-		put_unaligned(cpu_to_le32 (temp), (__le32 *) buf);
+		put_unaligned_le32(temp, buf);
 		break;
 	case GetPortStatus:
 		if (!wIndex || wIndex > ports)
 			goto error;
 		wIndex--;
 		temp = roothub_portstatus (ohci, wIndex);
-		put_unaligned(cpu_to_le32 (temp), (__le32 *) buf);
+		put_unaligned_le32(temp, buf);
 
 #ifndef	OHCI_VERBOSE_DEBUG
 	if (*(u16*)(buf+2))	/* only if wPortChange is interesting */
diff --git a/drivers/usb/host/sl811-hcd.c b/drivers/usb/host/sl811-hcd.c
index 3fd7a0c..4265752 100644
--- a/drivers/usb/host/sl811-hcd.c
+++ b/drivers/usb/host/sl811-hcd.c
@@ -1506,15 +1506,7 @@
 
 static void create_debug_file(struct sl811 *sl811)
 {
-	struct proc_dir_entry *pde;
-
-	pde = create_proc_entry(proc_filename, 0, NULL);
-	if (pde == NULL)
-		return;
-
-	pde->proc_fops = &proc_ops;
-	pde->data = sl811;
-	sl811->pde = pde;
+	sl811->pde = proc_create_data(proc_filename, 0, NULL, &proc_ops, sl811);
 }
 
 static void remove_debug_file(struct sl811 *sl811)
diff --git a/drivers/video/clps711xfb.c b/drivers/video/clps711xfb.c
index 17b5267..9f8a389 100644
--- a/drivers/video/clps711xfb.c
+++ b/drivers/video/clps711xfb.c
@@ -381,7 +381,7 @@
 
 	/* Register the /proc entries. */
 	clps7111fb_backlight_proc_entry = create_proc_entry("backlight", 0444,
-		&proc_root);
+		NULL);
 	if (clps7111fb_backlight_proc_entry == NULL) {
 		printk("Couldn't create the /proc entry for the backlight.\n");
 		return -EINVAL;
diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c
index 8eda7b6..ad31983 100644
--- a/drivers/video/console/fbcon.c
+++ b/drivers/video/console/fbcon.c
@@ -1881,7 +1881,7 @@
 			scr_memsetw((unsigned short *) (vc->vc_origin +
 							vc->vc_size_row *
 							(b - count)),
-				    vc->vc_video_erase_char,
+				    vc->vc_scrl_erase_char,
 				    vc->vc_size_row * count);
 			return 1;
 			break;
@@ -1953,7 +1953,7 @@
 			scr_memsetw((unsigned short *) (vc->vc_origin +
 							vc->vc_size_row *
 							(b - count)),
-				    vc->vc_video_erase_char,
+				    vc->vc_scrl_erase_char,
 				    vc->vc_size_row * count);
 			return 1;
 		}
@@ -1972,7 +1972,7 @@
 			scr_memsetw((unsigned short *) (vc->vc_origin +
 							vc->vc_size_row *
 							t),
-				    vc->vc_video_erase_char,
+				    vc->vc_scrl_erase_char,
 				    vc->vc_size_row * count);
 			return 1;
 			break;
@@ -2042,7 +2042,7 @@
 			scr_memsetw((unsigned short *) (vc->vc_origin +
 							vc->vc_size_row *
 							t),
-				    vc->vc_video_erase_char,
+				    vc->vc_scrl_erase_char,
 				    vc->vc_size_row * count);
 			return 1;
 		}
diff --git a/drivers/video/console/mdacon.c b/drivers/video/console/mdacon.c
index bd8d995..38a296b 100644
--- a/drivers/video/console/mdacon.c
+++ b/drivers/video/console/mdacon.c
@@ -531,7 +531,7 @@
 
 static int mdacon_scroll(struct vc_data *c, int t, int b, int dir, int lines)
 {
-	u16 eattr = mda_convert_attr(c->vc_video_erase_char);
+	u16 eattr = mda_convert_attr(c->vc_scrl_erase_char);
 
 	if (!lines)
 		return 0;
diff --git a/drivers/video/console/sticon.c b/drivers/video/console/sticon.c
index 67a682d..a11cc2f 100644
--- a/drivers/video/console/sticon.c
+++ b/drivers/video/console/sticon.c
@@ -170,12 +170,12 @@
     switch (dir) {
     case SM_UP:
 	sti_bmove(sti, t + count, 0, t, 0, b - t - count, conp->vc_cols);
-	sti_clear(sti, b - count, 0, count, conp->vc_cols, conp->vc_video_erase_char);
+	sti_clear(sti, b - count, 0, count, conp->vc_cols, conp->vc_scrl_erase_char);
 	break;
 
     case SM_DOWN:
 	sti_bmove(sti, t, 0, t + count, 0, b - t - count, conp->vc_cols);
-	sti_clear(sti, t, 0, count, conp->vc_cols, conp->vc_video_erase_char);
+	sti_clear(sti, t, 0, count, conp->vc_cols, conp->vc_scrl_erase_char);
 	break;
     }
 
diff --git a/drivers/video/console/vgacon.c b/drivers/video/console/vgacon.c
index 6df29a6..bd1f57b 100644
--- a/drivers/video/console/vgacon.c
+++ b/drivers/video/console/vgacon.c
@@ -1350,7 +1350,7 @@
 		} else
 			c->vc_origin += delta;
 		scr_memsetw((u16 *) (c->vc_origin + c->vc_screenbuf_size -
-				     delta), c->vc_video_erase_char,
+				     delta), c->vc_scrl_erase_char,
 			    delta);
 	} else {
 		if (oldo - delta < vga_vram_base) {
@@ -1363,7 +1363,7 @@
 		} else
 			c->vc_origin -= delta;
 		c->vc_scr_end = c->vc_origin + c->vc_screenbuf_size;
-		scr_memsetw((u16 *) (c->vc_origin), c->vc_video_erase_char,
+		scr_memsetw((u16 *) (c->vc_origin), c->vc_scrl_erase_char,
 			    delta);
 	}
 	c->vc_scr_end = c->vc_origin + c->vc_screenbuf_size;
diff --git a/drivers/video/matrox/matroxfb_misc.c b/drivers/video/matrox/matroxfb_misc.c
index aaa3e53..5b5f072 100644
--- a/drivers/video/matrox/matroxfb_misc.c
+++ b/drivers/video/matrox/matroxfb_misc.c
@@ -522,8 +522,6 @@
 #endif
 }
 
-#define get_u16(x) (le16_to_cpu(get_unaligned((__u16*)(x))))
-#define get_u32(x) (le32_to_cpu(get_unaligned((__u32*)(x))))
 static int parse_pins1(WPMINFO const struct matrox_bios* bd) {
 	unsigned int maxdac;
 
@@ -532,11 +530,12 @@
 		case 1:		maxdac = 220000; break;
 		default:	maxdac = 240000; break;
 	}
-	if (get_u16(bd->pins + 24)) {
-		maxdac = get_u16(bd->pins + 24) * 10;
+	if (get_unaligned_le16(bd->pins + 24)) {
+		maxdac = get_unaligned_le16(bd->pins + 24) * 10;
 	}
 	MINFO->limits.pixel.vcomax = maxdac;
-	MINFO->values.pll.system = get_u16(bd->pins + 28) ? get_u16(bd->pins + 28) * 10 : 50000;
+	MINFO->values.pll.system = get_unaligned_le16(bd->pins + 28) ?
+		get_unaligned_le16(bd->pins + 28) * 10 : 50000;
 	/* ignore 4MB, 8MB, module clocks */
 	MINFO->features.pll.ref_freq = 14318;
 	MINFO->values.reg.mctlwtst	= 0x00030101;
@@ -575,7 +574,8 @@
 static int parse_pins3(WPMINFO const struct matrox_bios* bd) {
 	MINFO->limits.pixel.vcomax	=
 	MINFO->limits.system.vcomax	= (bd->pins[36] == 0xFF) ? 230000			: ((bd->pins[36] + 100) * 1000);
-	MINFO->values.reg.mctlwtst	= get_u32(bd->pins + 48) == 0xFFFFFFFF ? 0x01250A21	: get_u32(bd->pins + 48);
+	MINFO->values.reg.mctlwtst	= get_unaligned_le32(bd->pins + 48) == 0xFFFFFFFF ?
+		0x01250A21 : get_unaligned_le32(bd->pins + 48);
 	/* memory config */
 	MINFO->values.reg.memrdbk	= ((bd->pins[57] << 21) & 0x1E000000) |
 					  ((bd->pins[57] << 22) & 0x00C00000) |
@@ -601,7 +601,7 @@
 static int parse_pins4(WPMINFO const struct matrox_bios* bd) {
 	MINFO->limits.pixel.vcomax	= (bd->pins[ 39] == 0xFF) ? 230000			: bd->pins[ 39] * 4000;
 	MINFO->limits.system.vcomax	= (bd->pins[ 38] == 0xFF) ? MINFO->limits.pixel.vcomax	: bd->pins[ 38] * 4000;
-	MINFO->values.reg.mctlwtst	= get_u32(bd->pins + 71);
+	MINFO->values.reg.mctlwtst	= get_unaligned_le32(bd->pins + 71);
 	MINFO->values.reg.memrdbk	= ((bd->pins[87] << 21) & 0x1E000000) |
 					  ((bd->pins[87] << 22) & 0x00C00000) |
 					  ((bd->pins[86] <<  1) & 0x000001E0) |
@@ -609,7 +609,7 @@
 	MINFO->values.reg.opt		= ((bd->pins[53] << 15) & 0x00400000) |
 					  ((bd->pins[53] << 22) & 0x10000000) |
 					  ((bd->pins[53] <<  7) & 0x00001C00);
-	MINFO->values.reg.opt3		= get_u32(bd->pins + 67);
+	MINFO->values.reg.opt3		= get_unaligned_le32(bd->pins + 67);
 	MINFO->values.pll.system	= (bd->pins[ 65] == 0xFF) ? 200000 			: bd->pins[ 65] * 4000;
 	MINFO->features.pll.ref_freq	= (bd->pins[ 92] & 0x01) ? 14318 : 27000;
 	return 0;
@@ -640,12 +640,12 @@
 	MINFO->limits.video.vcomin	= (bd->pins[122] == 0xFF) ? MINFO->limits.system.vcomin	: bd->pins[122] * mult;
 	MINFO->values.pll.system	=
 	MINFO->values.pll.video		= (bd->pins[ 92] == 0xFF) ? 284000			: bd->pins[ 92] * 4000;
-	MINFO->values.reg.opt		= get_u32(bd->pins+ 48);
-	MINFO->values.reg.opt2		= get_u32(bd->pins+ 52);
-	MINFO->values.reg.opt3		= get_u32(bd->pins+ 94);
-	MINFO->values.reg.mctlwtst	= get_u32(bd->pins+ 98);
-	MINFO->values.reg.memmisc	= get_u32(bd->pins+102);
-	MINFO->values.reg.memrdbk	= get_u32(bd->pins+106);
+	MINFO->values.reg.opt		= get_unaligned_le32(bd->pins + 48);
+	MINFO->values.reg.opt2		= get_unaligned_le32(bd->pins + 52);
+	MINFO->values.reg.opt3		= get_unaligned_le32(bd->pins + 94);
+	MINFO->values.reg.mctlwtst	= get_unaligned_le32(bd->pins + 98);
+	MINFO->values.reg.memmisc	= get_unaligned_le32(bd->pins + 102);
+	MINFO->values.reg.memrdbk	= get_unaligned_le32(bd->pins + 106);
 	MINFO->features.pll.ref_freq	= (bd->pins[110] & 0x01) ? 14318 : 27000;
 	MINFO->values.memory.ddr	= (bd->pins[114] & 0x60) == 0x20;
 	MINFO->values.memory.dll	= (bd->pins[115] & 0x02) != 0;
diff --git a/drivers/video/metronomefb.c b/drivers/video/metronomefb.c
index 2497912..cc4c038 100644
--- a/drivers/video/metronomefb.c
+++ b/drivers/video/metronomefb.c
@@ -206,8 +206,7 @@
 	}
 
 	/* check waveform mode table address checksum */
-	wmta = le32_to_cpu(get_unaligned((__le32 *) wfm_hdr->wmta));
-	wmta &= 0x00FFFFFF;
+	wmta = get_unaligned_le32(wfm_hdr->wmta) & 0x00FFFFFF;
 	cksum_idx = wmta + m*4 + 3;
 	if (cksum_idx > size)
 		return -EINVAL;
@@ -219,8 +218,7 @@
 	}
 
 	/* check waveform temperature table address checksum */
-	tta = le32_to_cpu(get_unaligned((int *) (mem + wmta + m*4)));
-	tta &= 0x00FFFFFF;
+	tta = get_unaligned_le32(mem + wmta + m * 4) & 0x00FFFFFF;
 	cksum_idx = tta + trn*4 + 3;
 	if (cksum_idx > size)
 		return -EINVAL;
@@ -233,8 +231,7 @@
 
 	/* here we do the real work of putting the waveform into the
 	metromem buffer. this does runlength decoding of the waveform */
-	wfm_idx = le32_to_cpu(get_unaligned((__le32 *) (mem + tta + trn*4)));
-	wfm_idx &= 0x00FFFFFF;
+	wfm_idx = get_unaligned_le32(mem + tta + trn * 4) & 0x00FFFFFF;
 	owfm_idx = wfm_idx;
 	if (wfm_idx > size)
 		return -EINVAL;
diff --git a/drivers/zorro/proc.c b/drivers/zorro/proc.c
index 2ce4ceb..099b6fb 100644
--- a/drivers/zorro/proc.c
+++ b/drivers/zorro/proc.c
@@ -13,6 +13,7 @@
 #include <linux/types.h>
 #include <linux/zorro.h>
 #include <linux/proc_fs.h>
+#include <linux/seq_file.h>
 #include <linux/init.h>
 #include <linux/smp_lock.h>
 #include <asm/uaccess.h>
@@ -76,36 +77,58 @@
 }
 
 static const struct file_operations proc_bus_zorro_operations = {
+	.owner		= THIS_MODULE,
 	.llseek		= proc_bus_zorro_lseek,
 	.read		= proc_bus_zorro_read,
 };
 
-static int
-get_zorro_dev_info(char *buf, char **start, off_t pos, int count)
+static void * zorro_seq_start(struct seq_file *m, loff_t *pos)
 {
-	u_int slot;
-	off_t at = 0;
-	int len, cnt;
-
-	for (slot = cnt = 0; slot < zorro_num_autocon && count > cnt; slot++) {
-		struct zorro_dev *z = &zorro_autocon[slot];
-		len = sprintf(buf, "%02x\t%08x\t%08lx\t%08lx\t%02x\n", slot,
-			      z->id, (unsigned long)zorro_resource_start(z),
-			      (unsigned long)zorro_resource_len(z),
-			      z->rom.er_Type);
-		at += len;
-		if (at >= pos) {
-			if (!*start) {
-				*start = buf + (pos - (at - len));
-				cnt = at - pos;
-			} else
-				cnt += len;
-			buf += len;
-		}
-	}
-	return (count > cnt) ? cnt : count;
+	return (*pos < zorro_num_autocon) ? pos : NULL;
 }
 
+static void * zorro_seq_next(struct seq_file *m, void *v, loff_t *pos)
+{
+	(*pos)++;
+	return (*pos < zorro_num_autocon) ? pos : NULL;
+}
+
+static void zorro_seq_stop(struct seq_file *m, void *v)
+{
+}
+
+static int zorro_seq_show(struct seq_file *m, void *v)
+{
+	u_int slot = *(loff_t *)v;
+	struct zorro_dev *z = &zorro_autocon[slot];
+
+	seq_printf(m, "%02x\t%08x\t%08lx\t%08lx\t%02x\n", slot, z->id,
+		   (unsigned long)zorro_resource_start(z),
+		   (unsigned long)zorro_resource_len(z),
+		   z->rom.er_Type);
+	return 0;
+}
+
+static const struct seq_operations zorro_devices_seq_ops = {
+	.start = zorro_seq_start,
+	.next  = zorro_seq_next,
+	.stop  = zorro_seq_stop,
+	.show  = zorro_seq_show,
+};
+
+static int zorro_devices_proc_open(struct inode *inode, struct file *file)
+{
+	return seq_open(file, &zorro_devices_seq_ops);
+}
+
+static const struct file_operations zorro_devices_proc_fops = {
+	.owner		= THIS_MODULE,
+	.open		= zorro_devices_proc_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= seq_release,
+};
+
 static struct proc_dir_entry *proc_bus_zorro_dir;
 
 static int __init zorro_proc_attach_device(u_int slot)
@@ -114,11 +137,11 @@
 	char name[4];
 
 	sprintf(name, "%02x", slot);
-	entry = create_proc_entry(name, 0, proc_bus_zorro_dir);
+	entry = proc_create_data(name, 0, proc_bus_zorro_dir,
+				 &proc_bus_zorro_operations,
+				 &zorro_autocon[slot]);
 	if (!entry)
 		return -ENOMEM;
-	entry->proc_fops = &proc_bus_zorro_operations;
-	entry->data = &zorro_autocon[slot];
 	entry->size = sizeof(struct zorro_dev);
 	return 0;
 }
@@ -128,9 +151,9 @@
 	u_int slot;
 
 	if (MACH_IS_AMIGA && AMIGAHW_PRESENT(ZORRO)) {
-		proc_bus_zorro_dir = proc_mkdir("zorro", proc_bus);
-		create_proc_info_entry("devices", 0, proc_bus_zorro_dir,
-				       get_zorro_dev_info);
+		proc_bus_zorro_dir = proc_mkdir("bus/zorro", NULL);
+		proc_create("devices", 0, proc_bus_zorro_dir,
+			    &zorro_devices_proc_fops);
 		for (slot = 0; slot < zorro_num_autocon; slot++)
 			zorro_proc_attach_device(slot);
 	}
diff --git a/fs/Kconfig.binfmt b/fs/Kconfig.binfmt
index 853845a..55e8ee1 100644
--- a/fs/Kconfig.binfmt
+++ b/fs/Kconfig.binfmt
@@ -41,7 +41,7 @@
 	  It is also possible to run FDPIC ELF binaries on MMU linux also.
 
 config BINFMT_FLAT
-	tristate "Kernel support for flat binaries"
+	bool "Kernel support for flat binaries"
 	depends on !MMU
 	help
 	  Support uClinux FLAT format binaries.
diff --git a/fs/adfs/dir_f.c b/fs/adfs/dir_f.c
index b9b2b27..ea7df21 100644
--- a/fs/adfs/dir_f.c
+++ b/fs/adfs/dir_f.c
@@ -122,9 +122,9 @@
 		ptr.ptr8 = bufoff(bh, i);
 		end.ptr8 = ptr.ptr8 + last - i;
 
-		do
+		do {
 			dircheck = *ptr.ptr8++ ^ ror13(dircheck);
-		while (ptr.ptr8 < end.ptr8);
+		} while (ptr.ptr8 < end.ptr8);
 	}
 
 	/*
diff --git a/fs/affs/file.c b/fs/affs/file.c
index 6e0c939..e87ede6 100644
--- a/fs/affs/file.c
+++ b/fs/affs/file.c
@@ -325,8 +325,7 @@
 	pr_debug("AFFS: get_block(%u, %lu)\n", (u32)inode->i_ino, (unsigned long)block);
 
 
-	if (block > (sector_t)0x7fffffffUL)
-		BUG();
+	BUG_ON(block > (sector_t)0x7fffffffUL);
 
 	if (block >= AFFS_I(inode)->i_blkcnt) {
 		if (block > AFFS_I(inode)->i_blkcnt || !create)
@@ -493,8 +492,7 @@
 	u32 tmp;
 
 	pr_debug("AFFS: read_page(%u, %ld, %d, %d)\n", (u32)inode->i_ino, page->index, from, to);
-	if (from > to || to > PAGE_CACHE_SIZE)
-		BUG();
+	BUG_ON(from > to || to > PAGE_CACHE_SIZE);
 	kmap(page);
 	data = page_address(page);
 	bsize = AFFS_SB(sb)->s_data_blksize;
@@ -507,8 +505,7 @@
 		if (IS_ERR(bh))
 			return PTR_ERR(bh);
 		tmp = min(bsize - boff, to - from);
-		if (from + tmp > to || tmp > bsize)
-			BUG();
+		BUG_ON(from + tmp > to || tmp > bsize);
 		memcpy(data + from, AFFS_DATA(bh) + boff, tmp);
 		affs_brelse(bh);
 		bidx++;
@@ -540,8 +537,7 @@
 		if (IS_ERR(bh))
 			return PTR_ERR(bh);
 		tmp = min(bsize - boff, newsize - size);
-		if (boff + tmp > bsize || tmp > bsize)
-			BUG();
+		BUG_ON(boff + tmp > bsize || tmp > bsize);
 		memset(AFFS_DATA(bh) + boff, 0, tmp);
 		AFFS_DATA_HEAD(bh)->size = cpu_to_be32(be32_to_cpu(AFFS_DATA_HEAD(bh)->size) + tmp);
 		affs_fix_checksum(sb, bh);
@@ -560,8 +556,7 @@
 		if (IS_ERR(bh))
 			goto out;
 		tmp = min(bsize, newsize - size);
-		if (tmp > bsize)
-			BUG();
+		BUG_ON(tmp > bsize);
 		AFFS_DATA_HEAD(bh)->ptype = cpu_to_be32(T_DATA);
 		AFFS_DATA_HEAD(bh)->key = cpu_to_be32(inode->i_ino);
 		AFFS_DATA_HEAD(bh)->sequence = cpu_to_be32(bidx);
@@ -683,8 +678,7 @@
 		if (IS_ERR(bh))
 			return PTR_ERR(bh);
 		tmp = min(bsize - boff, to - from);
-		if (boff + tmp > bsize || tmp > bsize)
-			BUG();
+		BUG_ON(boff + tmp > bsize || tmp > bsize);
 		memcpy(AFFS_DATA(bh) + boff, data + from, tmp);
 		AFFS_DATA_HEAD(bh)->size = cpu_to_be32(be32_to_cpu(AFFS_DATA_HEAD(bh)->size) + tmp);
 		affs_fix_checksum(sb, bh);
@@ -732,8 +726,7 @@
 		if (IS_ERR(bh))
 			goto out;
 		tmp = min(bsize, to - from);
-		if (tmp > bsize)
-			BUG();
+		BUG_ON(tmp > bsize);
 		memcpy(AFFS_DATA(bh), data + from, tmp);
 		if (buffer_new(bh)) {
 			AFFS_DATA_HEAD(bh)->ptype = cpu_to_be32(T_DATA);
diff --git a/fs/affs/super.c b/fs/affs/super.c
index d2dc047..01d25d532 100644
--- a/fs/affs/super.c
+++ b/fs/affs/super.c
@@ -199,7 +199,6 @@
 		case Opt_prefix:
 			/* Free any previous prefix */
 			kfree(*prefix);
-			*prefix = NULL;
 			*prefix = match_strdup(&args[0]);
 			if (!*prefix)
 				return 0;
@@ -233,6 +232,8 @@
 			break;
 		case Opt_volume: {
 			char *vol = match_strdup(&args[0]);
+			if (!vol)
+				return 0;
 			strlcpy(volume, vol, 32);
 			kfree(vol);
 			break;
diff --git a/fs/afs/afs_cm.h b/fs/afs/afs_cm.h
index 7b4d4fa..255f5dd 100644
--- a/fs/afs/afs_cm.h
+++ b/fs/afs/afs_cm.h
@@ -24,7 +24,8 @@
 	CBGetXStatsVersion	= 209,	/* get version of extended statistics */
 	CBGetXStats		= 210,	/* get contents of extended statistics data */
 	CBInitCallBackState3	= 213,	/* initialise callback state, version 3 */
-	CBGetCapabilities	= 65538, /* get client capabilities */
+	CBProbeUuid		= 214,	/* check the client hasn't rebooted */
+	CBTellMeAboutYourself	= 65538, /* get client capabilities */
 };
 
 #define AFS_CAP_ERROR_TRANSLATION	0x1
diff --git a/fs/afs/cell.c b/fs/afs/cell.c
index 584bb0f..5e1df14 100644
--- a/fs/afs/cell.c
+++ b/fs/afs/cell.c
@@ -20,7 +20,7 @@
 DECLARE_RWSEM(afs_proc_cells_sem);
 LIST_HEAD(afs_proc_cells);
 
-static struct list_head afs_cells = LIST_HEAD_INIT(afs_cells);
+static LIST_HEAD(afs_cells);
 static DEFINE_RWLOCK(afs_cells_lock);
 static DECLARE_RWSEM(afs_cells_sem); /* add/remove serialisation */
 static DECLARE_WAIT_QUEUE_HEAD(afs_cells_freeable_wq);
diff --git a/fs/afs/cmservice.c b/fs/afs/cmservice.c
index 47b71c8..eb76548 100644
--- a/fs/afs/cmservice.c
+++ b/fs/afs/cmservice.c
@@ -26,8 +26,9 @@
 						struct sk_buff *, bool);
 static int afs_deliver_cb_probe(struct afs_call *, struct sk_buff *, bool);
 static int afs_deliver_cb_callback(struct afs_call *, struct sk_buff *, bool);
-static int afs_deliver_cb_get_capabilities(struct afs_call *, struct sk_buff *,
-					   bool);
+static int afs_deliver_cb_probe_uuid(struct afs_call *, struct sk_buff *, bool);
+static int afs_deliver_cb_tell_me_about_yourself(struct afs_call *,
+						 struct sk_buff *, bool);
 static void afs_cm_destructor(struct afs_call *);
 
 /*
@@ -71,11 +72,21 @@
 };
 
 /*
- * CB.GetCapabilities operation type
+ * CB.ProbeUuid operation type
  */
-static const struct afs_call_type afs_SRXCBGetCapabilites = {
-	.name		= "CB.GetCapabilities",
-	.deliver	= afs_deliver_cb_get_capabilities,
+static const struct afs_call_type afs_SRXCBProbeUuid = {
+	.name		= "CB.ProbeUuid",
+	.deliver	= afs_deliver_cb_probe_uuid,
+	.abort_to_error	= afs_abort_to_error,
+	.destructor	= afs_cm_destructor,
+};
+
+/*
+ * CB.TellMeAboutYourself operation type
+ */
+static const struct afs_call_type afs_SRXCBTellMeAboutYourself = {
+	.name		= "CB.TellMeAboutYourself",
+	.deliver	= afs_deliver_cb_tell_me_about_yourself,
 	.abort_to_error	= afs_abort_to_error,
 	.destructor	= afs_cm_destructor,
 };
@@ -103,8 +114,8 @@
 	case CBProbe:
 		call->type = &afs_SRXCBProbe;
 		return true;
-	case CBGetCapabilities:
-		call->type = &afs_SRXCBGetCapabilites;
+	case CBTellMeAboutYourself:
+		call->type = &afs_SRXCBTellMeAboutYourself;
 		return true;
 	default:
 		return false;
@@ -393,9 +404,105 @@
 }
 
 /*
+ * allow the fileserver to quickly find out if the fileserver has been rebooted
+ */
+static void SRXAFSCB_ProbeUuid(struct work_struct *work)
+{
+	struct afs_call *call = container_of(work, struct afs_call, work);
+	struct afs_uuid *r = call->request;
+
+	struct {
+		__be32	match;
+	} reply;
+
+	_enter("");
+
+
+	if (memcmp(r, &afs_uuid, sizeof(afs_uuid)) == 0)
+		reply.match = htonl(0);
+	else
+		reply.match = htonl(1);
+
+	afs_send_simple_reply(call, &reply, sizeof(reply));
+	_leave("");
+}
+
+/*
+ * deliver request data to a CB.ProbeUuid call
+ */
+static int afs_deliver_cb_probe_uuid(struct afs_call *call, struct sk_buff *skb,
+				     bool last)
+{
+	struct afs_uuid *r;
+	unsigned loop;
+	__be32 *b;
+	int ret;
+
+	_enter("{%u},{%u},%d", call->unmarshall, skb->len, last);
+
+	if (skb->len > 0)
+		return -EBADMSG;
+	if (!last)
+		return 0;
+
+	switch (call->unmarshall) {
+	case 0:
+		call->offset = 0;
+		call->buffer = kmalloc(11 * sizeof(__be32), GFP_KERNEL);
+		if (!call->buffer)
+			return -ENOMEM;
+		call->unmarshall++;
+
+	case 1:
+		_debug("extract UUID");
+		ret = afs_extract_data(call, skb, last, call->buffer,
+				       11 * sizeof(__be32));
+		switch (ret) {
+		case 0:		break;
+		case -EAGAIN:	return 0;
+		default:	return ret;
+		}
+
+		_debug("unmarshall UUID");
+		call->request = kmalloc(sizeof(struct afs_uuid), GFP_KERNEL);
+		if (!call->request)
+			return -ENOMEM;
+
+		b = call->buffer;
+		r = call->request;
+		r->time_low			= ntohl(b[0]);
+		r->time_mid			= ntohl(b[1]);
+		r->time_hi_and_version		= ntohl(b[2]);
+		r->clock_seq_hi_and_reserved 	= ntohl(b[3]);
+		r->clock_seq_low		= ntohl(b[4]);
+
+		for (loop = 0; loop < 6; loop++)
+			r->node[loop] = ntohl(b[loop + 5]);
+
+		call->offset = 0;
+		call->unmarshall++;
+
+	case 2:
+		_debug("trailer");
+		if (skb->len != 0)
+			return -EBADMSG;
+		break;
+	}
+
+	if (!last)
+		return 0;
+
+	call->state = AFS_CALL_REPLYING;
+
+	INIT_WORK(&call->work, SRXAFSCB_ProbeUuid);
+	schedule_work(&call->work);
+	return 0;
+}
+
+/*
  * allow the fileserver to ask about the cache manager's capabilities
  */
-static void SRXAFSCB_GetCapabilities(struct work_struct *work)
+static void SRXAFSCB_TellMeAboutYourself(struct work_struct *work)
 {
 	struct afs_interface *ifs;
 	struct afs_call *call = container_of(work, struct afs_call, work);
@@ -456,10 +563,10 @@
 }
 
 /*
- * deliver request data to a CB.GetCapabilities call
+ * deliver request data to a CB.TellMeAboutYourself call
  */
-static int afs_deliver_cb_get_capabilities(struct afs_call *call,
-					   struct sk_buff *skb, bool last)
+static int afs_deliver_cb_tell_me_about_yourself(struct afs_call *call,
+						 struct sk_buff *skb, bool last)
 {
 	_enter(",{%u},%d", skb->len, last);
 
@@ -471,7 +578,7 @@
 	/* no unmarshalling required */
 	call->state = AFS_CALL_REPLYING;
 
-	INIT_WORK(&call->work, SRXAFSCB_GetCapabilities);
+	INIT_WORK(&call->work, SRXAFSCB_TellMeAboutYourself);
 	schedule_work(&call->work);
 	return 0;
 }
diff --git a/fs/afs/proc.c b/fs/afs/proc.c
index 846c761..9f7d1ae 100644
--- a/fs/afs/proc.c
+++ b/fs/afs/proc.c
@@ -41,6 +41,7 @@
 	.write		= afs_proc_cells_write,
 	.llseek		= seq_lseek,
 	.release	= seq_release,
+	.owner		= THIS_MODULE,
 };
 
 static int afs_proc_rootcell_open(struct inode *inode, struct file *file);
@@ -56,7 +57,8 @@
 	.read		= afs_proc_rootcell_read,
 	.write		= afs_proc_rootcell_write,
 	.llseek		= no_llseek,
-	.release	= afs_proc_rootcell_release
+	.release	= afs_proc_rootcell_release,
+	.owner		= THIS_MODULE,
 };
 
 static int afs_proc_cell_volumes_open(struct inode *inode, struct file *file);
@@ -80,6 +82,7 @@
 	.read		= seq_read,
 	.llseek		= seq_lseek,
 	.release	= afs_proc_cell_volumes_release,
+	.owner		= THIS_MODULE,
 };
 
 static int afs_proc_cell_vlservers_open(struct inode *inode,
@@ -104,6 +107,7 @@
 	.read		= seq_read,
 	.llseek		= seq_lseek,
 	.release	= afs_proc_cell_vlservers_release,
+	.owner		= THIS_MODULE,
 };
 
 static int afs_proc_cell_servers_open(struct inode *inode, struct file *file);
@@ -127,6 +131,7 @@
 	.read		= seq_read,
 	.llseek		= seq_lseek,
 	.release	= afs_proc_cell_servers_release,
+	.owner		= THIS_MODULE,
 };
 
 /*
@@ -143,17 +148,13 @@
 		goto error_dir;
 	proc_afs->owner = THIS_MODULE;
 
-	p = create_proc_entry("cells", 0, proc_afs);
+	p = proc_create("cells", 0, proc_afs, &afs_proc_cells_fops);
 	if (!p)
 		goto error_cells;
-	p->proc_fops = &afs_proc_cells_fops;
-	p->owner = THIS_MODULE;
 
-	p = create_proc_entry("rootcell", 0, proc_afs);
+	p = proc_create("rootcell", 0, proc_afs, &afs_proc_rootcell_fops);
 	if (!p)
 		goto error_rootcell;
-	p->proc_fops = &afs_proc_rootcell_fops;
-	p->owner = THIS_MODULE;
 
 	_leave(" = 0");
 	return 0;
@@ -395,26 +396,20 @@
 	if (!cell->proc_dir)
 		goto error_dir;
 
-	p = create_proc_entry("servers", 0, cell->proc_dir);
+	p = proc_create_data("servers", 0, cell->proc_dir,
+			     &afs_proc_cell_servers_fops, cell);
 	if (!p)
 		goto error_servers;
-	p->proc_fops = &afs_proc_cell_servers_fops;
-	p->owner = THIS_MODULE;
-	p->data = cell;
 
-	p = create_proc_entry("vlservers", 0, cell->proc_dir);
+	p = proc_create_data("vlservers", 0, cell->proc_dir,
+			     &afs_proc_cell_vlservers_fops, cell);
 	if (!p)
 		goto error_vlservers;
-	p->proc_fops = &afs_proc_cell_vlservers_fops;
-	p->owner = THIS_MODULE;
-	p->data = cell;
 
-	p = create_proc_entry("volumes", 0, cell->proc_dir);
+	p = proc_create_data("volumes", 0, cell->proc_dir,
+			     &afs_proc_cell_volumes_fops, cell);
 	if (!p)
 		goto error_volumes;
-	p->proc_fops = &afs_proc_cell_volumes_fops;
-	p->owner = THIS_MODULE;
-	p->data = cell;
 
 	_leave(" = 0");
 	return 0;
diff --git a/fs/aio.c b/fs/aio.c
index ae94e1dea..99c2352 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -191,6 +191,43 @@
 	kunmap_atomic((void *)((unsigned long)__event & PAGE_MASK), km); \
 } while(0)
 
+
+/* __put_ioctx
+ *	Called when the last user of an aio context has gone away,
+ *	and the struct needs to be freed.
+ */
+static void __put_ioctx(struct kioctx *ctx)
+{
+	unsigned nr_events = ctx->max_reqs;
+
+	BUG_ON(ctx->reqs_active);
+
+	cancel_delayed_work(&ctx->wq);
+	cancel_work_sync(&ctx->wq.work);
+	aio_free_ring(ctx);
+	mmdrop(ctx->mm);
+	ctx->mm = NULL;
+	pr_debug("__put_ioctx: freeing %p\n", ctx);
+	kmem_cache_free(kioctx_cachep, ctx);
+
+	if (nr_events) {
+		spin_lock(&aio_nr_lock);
+		BUG_ON(aio_nr - nr_events > aio_nr);
+		aio_nr -= nr_events;
+		spin_unlock(&aio_nr_lock);
+	}
+}
+
+#define get_ioctx(kioctx) do {						\
+	BUG_ON(atomic_read(&(kioctx)->users) <= 0);			\
+	atomic_inc(&(kioctx)->users);					\
+} while (0)
+#define put_ioctx(kioctx) do {						\
+	BUG_ON(atomic_read(&(kioctx)->users) <= 0);			\
+	if (unlikely(atomic_dec_and_test(&(kioctx)->users))) 		\
+		__put_ioctx(kioctx);					\
+} while (0)
+
 /* ioctx_alloc
  *	Allocates and initializes an ioctx.  Returns an ERR_PTR if it failed.
  */
@@ -240,7 +277,7 @@
 	if (ctx->max_reqs == 0)
 		goto out_cleanup;
 
-	/* now link into global list.  kludge.  FIXME */
+	/* now link into global list. */
 	write_lock(&mm->ioctx_list_lock);
 	ctx->next = mm->ioctx_list;
 	mm->ioctx_list = ctx;
@@ -361,32 +398,6 @@
 	}
 }
 
-/* __put_ioctx
- *	Called when the last user of an aio context has gone away,
- *	and the struct needs to be freed.
- */
-void __put_ioctx(struct kioctx *ctx)
-{
-	unsigned nr_events = ctx->max_reqs;
-
-	BUG_ON(ctx->reqs_active);
-
-	cancel_delayed_work(&ctx->wq);
-	cancel_work_sync(&ctx->wq.work);
-	aio_free_ring(ctx);
-	mmdrop(ctx->mm);
-	ctx->mm = NULL;
-	pr_debug("__put_ioctx: freeing %p\n", ctx);
-	kmem_cache_free(kioctx_cachep, ctx);
-
-	if (nr_events) {
-		spin_lock(&aio_nr_lock);
-		BUG_ON(aio_nr - nr_events > aio_nr);
-		aio_nr -= nr_events;
-		spin_unlock(&aio_nr_lock);
-	}
-}
-
 /* aio_get_req
  *	Allocate a slot for an aio request.  Increments the users count
  * of the kioctx so that the kioctx stays around until all requests are
@@ -542,10 +553,7 @@
 	return ret;
 }
 
-/*	Lookup an ioctx id.  ioctx_list is lockless for reads.
- *	FIXME: this is O(n) and is only suitable for development.
- */
-struct kioctx *lookup_ioctx(unsigned long ctx_id)
+static struct kioctx *lookup_ioctx(unsigned long ctx_id)
 {
 	struct kioctx *ioctx;
 	struct mm_struct *mm;
@@ -1552,7 +1560,7 @@
 	return 1;
 }
 
-int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
+static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
 			 struct iocb *iocb)
 {
 	struct kiocb *req;
@@ -1593,7 +1601,7 @@
 		 * event using the eventfd_signal() function.
 		 */
 		req->ki_eventfd = eventfd_fget((int) iocb->aio_resfd);
-		if (unlikely(IS_ERR(req->ki_eventfd))) {
+		if (IS_ERR(req->ki_eventfd)) {
 			ret = PTR_ERR(req->ki_eventfd);
 			goto out_put_req;
 		}
diff --git a/fs/autofs4/root.c b/fs/autofs4/root.c
index a54a946..aa4c5ff 100644
--- a/fs/autofs4/root.c
+++ b/fs/autofs4/root.c
@@ -533,9 +533,9 @@
 			goto next;
 
 		if (d_unhashed(dentry)) {
-			struct autofs_info *ino = autofs4_dentry_ino(dentry);
 			struct inode *inode = dentry->d_inode;
 
+			ino = autofs4_dentry_ino(dentry);
 			list_del_init(&ino->rehash);
 			dget(dentry);
 			/*
diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c
index 82123ff..e8717de 100644
--- a/fs/befs/linuxvfs.c
+++ b/fs/befs/linuxvfs.c
@@ -489,9 +489,9 @@
 {
 	befs_inode_info *befs_ino = BEFS_I(dentry->d_inode);
 	if (befs_ino->i_flags & BEFS_LONG_SYMLINK) {
-		char *p = nd_get_link(nd);
-		if (!IS_ERR(p))
-			kfree(p);
+		char *link = nd_get_link(nd);
+		if (!IS_ERR(link))
+			kfree(link);
 	}
 }
 
diff --git a/fs/binfmt_aout.c b/fs/binfmt_aout.c
index a1bb224..ba4cddb 100644
--- a/fs/binfmt_aout.c
+++ b/fs/binfmt_aout.c
@@ -372,21 +372,17 @@
 			 
 		flush_icache_range(text_addr, text_addr+ex.a_text+ex.a_data);
 	} else {
-		static unsigned long error_time, error_time2;
 		if ((ex.a_text & 0xfff || ex.a_data & 0xfff) &&
-		    (N_MAGIC(ex) != NMAGIC) && (jiffies-error_time2) > 5*HZ)
+		    (N_MAGIC(ex) != NMAGIC) && printk_ratelimit())
 		{
 			printk(KERN_NOTICE "executable not page aligned\n");
-			error_time2 = jiffies;
 		}
 
-		if ((fd_offset & ~PAGE_MASK) != 0 &&
-		    (jiffies-error_time) > 5*HZ)
+		if ((fd_offset & ~PAGE_MASK) != 0 && printk_ratelimit())
 		{
 			printk(KERN_WARNING 
 			       "fd_offset is not page aligned. Please convert program: %s\n",
 			       bprm->file->f_path.dentry->d_name.name);
-			error_time = jiffies;
 		}
 
 		if (!bprm->file->f_op->mmap||((fd_offset & ~PAGE_MASK) != 0)) {
@@ -495,15 +491,13 @@
 	start_addr =  ex.a_entry & 0xfffff000;
 
 	if ((N_TXTOFF(ex) & ~PAGE_MASK) != 0) {
-		static unsigned long error_time;
 		loff_t pos = N_TXTOFF(ex);
 
-		if ((jiffies-error_time) > 5*HZ)
+		if (printk_ratelimit())
 		{
 			printk(KERN_WARNING 
 			       "N_TXTOFF is not page aligned. Please convert library: %s\n",
 			       file->f_path.dentry->d_name.name);
-			error_time = jiffies;
 		}
 		down_write(&current->mm->mmap_sem);
 		do_brk(start_addr, ex.a_text + ex.a_data + ex.a_bss);
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
index 9924581..b25707f 100644
--- a/fs/binfmt_elf.c
+++ b/fs/binfmt_elf.c
@@ -1255,26 +1255,23 @@
 static void fill_elf_header(struct elfhdr *elf, int segs,
 			    u16 machine, u32 flags, u8 osabi)
 {
+	memset(elf, 0, sizeof(*elf));
+
 	memcpy(elf->e_ident, ELFMAG, SELFMAG);
 	elf->e_ident[EI_CLASS] = ELF_CLASS;
 	elf->e_ident[EI_DATA] = ELF_DATA;
 	elf->e_ident[EI_VERSION] = EV_CURRENT;
 	elf->e_ident[EI_OSABI] = ELF_OSABI;
-	memset(elf->e_ident+EI_PAD, 0, EI_NIDENT-EI_PAD);
 
 	elf->e_type = ET_CORE;
 	elf->e_machine = machine;
 	elf->e_version = EV_CURRENT;
-	elf->e_entry = 0;
 	elf->e_phoff = sizeof(struct elfhdr);
-	elf->e_shoff = 0;
 	elf->e_flags = flags;
 	elf->e_ehsize = sizeof(struct elfhdr);
 	elf->e_phentsize = sizeof(struct elf_phdr);
 	elf->e_phnum = segs;
-	elf->e_shentsize = 0;
-	elf->e_shnum = 0;
-	elf->e_shstrndx = 0;
+
 	return;
 }
 
@@ -1725,26 +1722,25 @@
 
 	info->thread_status_size = 0;
 	if (signr) {
-		struct elf_thread_status *tmp;
+		struct elf_thread_status *ets;
 		rcu_read_lock();
 		do_each_thread(g, p)
 			if (current->mm == p->mm && current != p) {
-				tmp = kzalloc(sizeof(*tmp), GFP_ATOMIC);
-				if (!tmp) {
+				ets = kzalloc(sizeof(*ets), GFP_ATOMIC);
+				if (!ets) {
 					rcu_read_unlock();
 					return 0;
 				}
-				tmp->thread = p;
-				list_add(&tmp->list, &info->thread_list);
+				ets->thread = p;
+				list_add(&ets->list, &info->thread_list);
 			}
 		while_each_thread(g, p);
 		rcu_read_unlock();
 		list_for_each(t, &info->thread_list) {
-			struct elf_thread_status *tmp;
 			int sz;
 
-			tmp = list_entry(t, struct elf_thread_status, list);
-			sz = elf_dump_thread_status(signr, tmp);
+			ets = list_entry(t, struct elf_thread_status, list);
+			sz = elf_dump_thread_status(signr, ets);
 			info->thread_status_size += sz;
 		}
 	}
@@ -2000,10 +1996,10 @@
 
 		for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
 			struct page *page;
-			struct vm_area_struct *vma;
+			struct vm_area_struct *tmp_vma;
 
 			if (get_user_pages(current, current->mm, addr, 1, 0, 1,
-						&page, &vma) <= 0) {
+						&page, &tmp_vma) <= 0) {
 				DUMP_SEEK(PAGE_SIZE);
 			} else {
 				if (page == ZERO_PAGE(0)) {
@@ -2013,7 +2009,7 @@
 					}
 				} else {
 					void *kaddr;
-					flush_cache_page(vma, addr,
+					flush_cache_page(tmp_vma, addr,
 							 page_to_pfn(page));
 					kaddr = kmap(page);
 					if ((size += PAGE_SIZE) > limit ||
diff --git a/fs/binfmt_elf_fdpic.c b/fs/binfmt_elf_fdpic.c
index 32649f2..ddd35d8 100644
--- a/fs/binfmt_elf_fdpic.c
+++ b/fs/binfmt_elf_fdpic.c
@@ -136,8 +136,8 @@
 
 	retval = kernel_read(file, params->hdr.e_phoff,
 			     (char *) params->phdrs, size);
-	if (retval < 0)
-		return retval;
+	if (unlikely(retval != size))
+		return retval < 0 ? retval : -ENOEXEC;
 
 	/* determine stack size for this binary */
 	phdr = params->phdrs;
@@ -218,8 +218,11 @@
 					     phdr->p_offset,
 					     interpreter_name,
 					     phdr->p_filesz);
-			if (retval < 0)
+			if (unlikely(retval != phdr->p_filesz)) {
+				if (retval >= 0)
+					retval = -ENOEXEC;
 				goto error;
+			}
 
 			retval = -ENOENT;
 			if (interpreter_name[phdr->p_filesz - 1] != '\0')
@@ -245,8 +248,11 @@
 
 			retval = kernel_read(interpreter, 0, bprm->buf,
 					     BINPRM_BUF_SIZE);
-			if (retval < 0)
+			if (unlikely(retval != BINPRM_BUF_SIZE)) {
+				if (retval >= 0)
+					retval = -ENOEXEC;
 				goto error;
+			}
 
 			interp_params.hdr = *((struct elfhdr *) bprm->buf);
 			break;
diff --git a/fs/binfmt_em86.c b/fs/binfmt_em86.c
index f95ae97..f9c88d0 100644
--- a/fs/binfmt_em86.c
+++ b/fs/binfmt_em86.c
@@ -43,7 +43,7 @@
 			return -ENOEXEC;
 	}
 
-	bprm->sh_bang++;	/* Well, the bang-shell is implicit... */
+	bprm->sh_bang = 1;	/* Well, the bang-shell is implicit... */
 	allow_write_access(bprm->file);
 	fput(bprm->file);
 	bprm->file = NULL;
diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c
index 0498b18..3b40d45 100644
--- a/fs/binfmt_flat.c
+++ b/fs/binfmt_flat.c
@@ -531,7 +531,8 @@
 		DBG_FLT("BINFMT_FLAT: ROM mapping of file (we hope)\n");
 
 		down_write(&current->mm->mmap_sem);
-		textpos = do_mmap(bprm->file, 0, text_len, PROT_READ|PROT_EXEC, MAP_PRIVATE, 0);
+		textpos = do_mmap(bprm->file, 0, text_len, PROT_READ|PROT_EXEC,
+				  MAP_PRIVATE|MAP_EXECUTABLE, 0);
 		up_write(&current->mm->mmap_sem);
 		if (!textpos  || textpos >= (unsigned long) -4096) {
 			if (!textpos)
@@ -932,14 +933,8 @@
 	return register_binfmt(&flat_format);
 }
 
-static void __exit exit_flat_binfmt(void)
-{
-	unregister_binfmt(&flat_format);
-}
-
 /****************************************************************************/
 
 core_initcall(init_flat_binfmt);
-module_exit(exit_flat_binfmt);
 
 /****************************************************************************/
diff --git a/fs/binfmt_misc.c b/fs/binfmt_misc.c
index dbf0ac0..7191306 100644
--- a/fs/binfmt_misc.c
+++ b/fs/binfmt_misc.c
@@ -115,6 +115,12 @@
 	if (!enabled)
 		goto _ret;
 
+	retval = -ENOEXEC;
+	if (bprm->misc_bang)
+		goto _ret;
+
+	bprm->misc_bang = 1;
+
 	/* to keep locking time low, we copy the interpreter string */
 	read_lock(&entries_lock);
 	fmt = check_file(bprm);
diff --git a/fs/binfmt_script.c b/fs/binfmt_script.c
index ab33939..9e3963f 100644
--- a/fs/binfmt_script.c
+++ b/fs/binfmt_script.c
@@ -29,7 +29,7 @@
 	 * Sorta complicated, but hopefully it will work.  -TYT
 	 */
 
-	bprm->sh_bang++;
+	bprm->sh_bang = 1;
 	allow_write_access(bprm->file);
 	fput(bprm->file);
 	bprm->file = NULL;
diff --git a/fs/bio.c b/fs/bio.c
index 6e0b6f6..799f86d 100644
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -937,6 +937,95 @@
 	return ERR_PTR(-EINVAL);
 }
 
+static void bio_copy_kern_endio(struct bio *bio, int err)
+{
+	struct bio_vec *bvec;
+	const int read = bio_data_dir(bio) == READ;
+	char *p = bio->bi_private;
+	int i;
+
+	__bio_for_each_segment(bvec, bio, i, 0) {
+		char *addr = page_address(bvec->bv_page);
+
+		if (read && !err)
+			memcpy(p, addr, bvec->bv_len);
+
+		__free_page(bvec->bv_page);
+		p += bvec->bv_len;
+	}
+
+	bio_put(bio);
+}
+
+/**
+ *	bio_copy_kern	-	copy kernel address into bio
+ *	@q: the struct request_queue for the bio
+ *	@data: pointer to buffer to copy
+ *	@len: length in bytes
+ *	@gfp_mask: allocation flags for bio and page allocation
+ *
+ *	copy the kernel address into a bio suitable for io to a block
+ *	device. Returns an error pointer in case of error.
+ */
+struct bio *bio_copy_kern(struct request_queue *q, void *data, unsigned int len,
+			  gfp_t gfp_mask, int reading)
+{
+	unsigned long kaddr = (unsigned long)data;
+	unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
+	unsigned long start = kaddr >> PAGE_SHIFT;
+	const int nr_pages = end - start;
+	struct bio *bio;
+	struct bio_vec *bvec;
+	int i, ret;
+
+	bio = bio_alloc(gfp_mask, nr_pages);
+	if (!bio)
+		return ERR_PTR(-ENOMEM);
+
+	while (len) {
+		struct page *page;
+		unsigned int bytes = PAGE_SIZE;
+
+		if (bytes > len)
+			bytes = len;
+
+		page = alloc_page(q->bounce_gfp | gfp_mask);
+		if (!page) {
+			ret = -ENOMEM;
+			goto cleanup;
+		}
+
+		if (bio_add_pc_page(q, bio, page, bytes, 0) < bytes) {
+			ret = -EINVAL;
+			goto cleanup;
+		}
+
+		len -= bytes;
+	}
+
+	if (!reading) {
+		void *p = data;
+
+		bio_for_each_segment(bvec, bio, i) {
+			char *addr = page_address(bvec->bv_page);
+
+			memcpy(addr, p, bvec->bv_len);
+			p += bvec->bv_len;
+		}
+	}
+
+	bio->bi_private = data;
+	bio->bi_end_io = bio_copy_kern_endio;
+	return bio;
+cleanup:
+	bio_for_each_segment(bvec, bio, i)
+		__free_page(bvec->bv_page);
+
+	bio_put(bio);
+
+	return ERR_PTR(ret);
+}
+
 /*
  * bio_set_pages_dirty() and bio_check_pages_dirty() are support functions
  * for performing direct-IO in BIOs.
@@ -1273,6 +1362,7 @@
 EXPORT_SYMBOL(bio_map_user);
 EXPORT_SYMBOL(bio_unmap_user);
 EXPORT_SYMBOL(bio_map_kern);
+EXPORT_SYMBOL(bio_copy_kern);
 EXPORT_SYMBOL(bio_pair_release);
 EXPORT_SYMBOL(bio_split);
 EXPORT_SYMBOL(bio_split_pool);
diff --git a/fs/buffer.c b/fs/buffer.c
index 3db4a26..189efa4 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -2211,8 +2211,8 @@
 	return err;
 }
 
-int cont_expand_zero(struct file *file, struct address_space *mapping,
-			loff_t pos, loff_t *bytes)
+static int cont_expand_zero(struct file *file, struct address_space *mapping,
+			    loff_t pos, loff_t *bytes)
 {
 	struct inode *inode = mapping->host;
 	unsigned blocksize = 1 << inode->i_blkbits;
@@ -2328,23 +2328,6 @@
 	return 0;
 }
 
-int generic_commit_write(struct file *file, struct page *page,
-		unsigned from, unsigned to)
-{
-	struct inode *inode = page->mapping->host;
-	loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
-	__block_commit_write(inode,page,from,to);
-	/*
-	 * No need to use i_size_read() here, the i_size
-	 * cannot change under us because we hold i_mutex.
-	 */
-	if (pos > inode->i_size) {
-		i_size_write(inode, pos);
-		mark_inode_dirty(inode);
-	}
-	return 0;
-}
-
 /*
  * block_page_mkwrite() is not allowed to change the file size as it gets
  * called from a page fault handler when a page is first dirtied. Hence we must
@@ -3315,7 +3298,6 @@
 EXPORT_SYMBOL(file_fsync);
 EXPORT_SYMBOL(fsync_bdev);
 EXPORT_SYMBOL(generic_block_bmap);
-EXPORT_SYMBOL(generic_commit_write);
 EXPORT_SYMBOL(generic_cont_expand_simple);
 EXPORT_SYMBOL(init_buffer);
 EXPORT_SYMBOL(invalidate_bdev);
diff --git a/fs/char_dev.c b/fs/char_dev.c
index 038674a..68e510b 100644
--- a/fs/char_dev.c
+++ b/fs/char_dev.c
@@ -55,7 +55,6 @@
 	unsigned int baseminor;
 	int minorct;
 	char name[64];
-	struct file_operations *fops;
 	struct cdev *cdev;		/* will die */
 } *chrdevs[CHRDEV_MAJOR_HASH_SIZE];
 
diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
index 0228ed0..cc950f6 100644
--- a/fs/cifs/cifs_debug.c
+++ b/fs/cifs/cifs_debug.c
@@ -468,7 +468,7 @@
 {
 	struct proc_dir_entry *pde;
 
-	proc_fs_cifs = proc_mkdir("cifs", proc_root_fs);
+	proc_fs_cifs = proc_mkdir("fs/cifs", NULL);
 	if (proc_fs_cifs == NULL)
 		return;
 
@@ -559,7 +559,7 @@
 	remove_proc_entry("LinuxExtensionsEnabled", proc_fs_cifs);
 	remove_proc_entry("Experimental", proc_fs_cifs);
 	remove_proc_entry("LookupCacheEnabled", proc_fs_cifs);
-	remove_proc_entry("cifs", proc_root_fs);
+	remove_proc_entry("fs/cifs", NULL);
 }
 
 static int
diff --git a/fs/coda/coda_linux.c b/fs/coda/coda_linux.c
index 95a5425..e1c8548 100644
--- a/fs/coda/coda_linux.c
+++ b/fs/coda/coda_linux.c
@@ -134,7 +134,7 @@
         unsigned int valid;
 
         /* clean out */        
-        vattr->va_mode = (umode_t) -1;
+	vattr->va_mode = -1;
         vattr->va_uid = (vuid_t) -1; 
         vattr->va_gid = (vgid_t) -1;
         vattr->va_size = (off_t) -1;
diff --git a/fs/coda/dir.c b/fs/coda/dir.c
index f89ff08..3d2580e 100644
--- a/fs/coda/dir.c
+++ b/fs/coda/dir.c
@@ -345,7 +345,7 @@
 }
 
 /* destruction routines: unlink, rmdir */
-int coda_unlink(struct inode *dir, struct dentry *de)
+static int coda_unlink(struct inode *dir, struct dentry *de)
 {
         int error;
 	const char *name = de->d_name.name;
@@ -365,7 +365,7 @@
 	return 0;
 }
 
-int coda_rmdir(struct inode *dir, struct dentry *de)
+static int coda_rmdir(struct inode *dir, struct dentry *de)
 {
 	const char *name = de->d_name.name;
 	int len = de->d_name.len;
@@ -424,7 +424,7 @@
 
 
 /* file operations for directories */
-int coda_readdir(struct file *coda_file, void *buf, filldir_t filldir)
+static int coda_readdir(struct file *coda_file, void *buf, filldir_t filldir)
 {
 	struct coda_file_info *cfi;
 	struct file *host_file;
diff --git a/fs/drop_caches.c b/fs/drop_caches.c
index 59375ef..3e5637f 100644
--- a/fs/drop_caches.c
+++ b/fs/drop_caches.c
@@ -14,18 +14,26 @@
 
 static void drop_pagecache_sb(struct super_block *sb)
 {
-	struct inode *inode;
+	struct inode *inode, *toput_inode = NULL;
 
 	spin_lock(&inode_lock);
 	list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
 		if (inode->i_state & (I_FREEING|I_WILL_FREE))
 			continue;
+		if (inode->i_mapping->nrpages == 0)
+			continue;
+		__iget(inode);
+		spin_unlock(&inode_lock);
 		__invalidate_mapping_pages(inode->i_mapping, 0, -1, true);
+		iput(toput_inode);
+		toput_inode = inode;
+		spin_lock(&inode_lock);
 	}
 	spin_unlock(&inode_lock);
+	iput(toput_inode);
 }
 
-void drop_pagecache(void)
+static void drop_pagecache(void)
 {
 	struct super_block *sb;
 
@@ -45,7 +53,7 @@
 	spin_unlock(&sb_lock);
 }
 
-void drop_slab(void)
+static void drop_slab(void)
 {
 	int nr_objects;
 
diff --git a/fs/ecryptfs/Makefile b/fs/ecryptfs/Makefile
index 7688570..1e34a7f 100644
--- a/fs/ecryptfs/Makefile
+++ b/fs/ecryptfs/Makefile
@@ -4,4 +4,4 @@
 
 obj-$(CONFIG_ECRYPT_FS) += ecryptfs.o
 
-ecryptfs-objs := dentry.o file.o inode.o main.o super.o mmap.o read_write.o crypto.o keystore.o messaging.o netlink.o debug.o
+ecryptfs-objs := dentry.o file.o inode.o main.o super.o mmap.o read_write.o crypto.o keystore.o messaging.o netlink.o miscdev.o debug.o
diff --git a/fs/ecryptfs/crypto.c b/fs/ecryptfs/crypto.c
index a066e10..cd62d75 100644
--- a/fs/ecryptfs/crypto.c
+++ b/fs/ecryptfs/crypto.c
@@ -119,21 +119,21 @@
 	if (rc) {
 		printk(KERN_ERR
 		       "%s: Error initializing crypto hash; rc = [%d]\n",
-		       __FUNCTION__, rc);
+		       __func__, rc);
 		goto out;
 	}
 	rc = crypto_hash_update(&desc, &sg, len);
 	if (rc) {
 		printk(KERN_ERR
 		       "%s: Error updating crypto hash; rc = [%d]\n",
-		       __FUNCTION__, rc);
+		       __func__, rc);
 		goto out;
 	}
 	rc = crypto_hash_final(&desc, dst);
 	if (rc) {
 		printk(KERN_ERR
 		       "%s: Error finalizing crypto hash; rc = [%d]\n",
-		       __FUNCTION__, rc);
+		       __func__, rc);
 		goto out;
 	}
 out:
@@ -437,7 +437,7 @@
 	if (rc < 0) {
 		printk(KERN_ERR "%s: Error attempting to encrypt page with "
 		       "page->index = [%ld], extent_offset = [%ld]; "
-		       "rc = [%d]\n", __FUNCTION__, page->index, extent_offset,
+		       "rc = [%d]\n", __func__, page->index, extent_offset,
 		       rc);
 		goto out;
 	}
@@ -487,7 +487,7 @@
 						       0, PAGE_CACHE_SIZE);
 		if (rc)
 			printk(KERN_ERR "%s: Error attempting to copy "
-			       "page at index [%ld]\n", __FUNCTION__,
+			       "page at index [%ld]\n", __func__,
 			       page->index);
 		goto out;
 	}
@@ -508,7 +508,7 @@
 					     extent_offset);
 		if (rc) {
 			printk(KERN_ERR "%s: Error encrypting extent; "
-			       "rc = [%d]\n", __FUNCTION__, rc);
+			       "rc = [%d]\n", __func__, rc);
 			goto out;
 		}
 		ecryptfs_lower_offset_for_extent(
@@ -569,7 +569,7 @@
 	if (rc < 0) {
 		printk(KERN_ERR "%s: Error attempting to decrypt to page with "
 		       "page->index = [%ld], extent_offset = [%ld]; "
-		       "rc = [%d]\n", __FUNCTION__, page->index, extent_offset,
+		       "rc = [%d]\n", __func__, page->index, extent_offset,
 		       rc);
 		goto out;
 	}
@@ -622,7 +622,7 @@
 						      ecryptfs_inode);
 		if (rc)
 			printk(KERN_ERR "%s: Error attempting to copy "
-			       "page at index [%ld]\n", __FUNCTION__,
+			       "page at index [%ld]\n", __func__,
 			       page->index);
 		goto out;
 	}
@@ -656,7 +656,7 @@
 					     extent_offset);
 		if (rc) {
 			printk(KERN_ERR "%s: Error encrypting extent; "
-			       "rc = [%d]\n", __FUNCTION__, rc);
+			       "rc = [%d]\n", __func__, rc);
 			goto out;
 		}
 	}
@@ -1215,7 +1215,7 @@
 				 ecryptfs_inode);
 	if (rc) {
 		printk(KERN_ERR "%s: Error reading header region; rc = [%d]\n",
-		       __FUNCTION__, rc);
+		       __func__, rc);
 		goto out;
 	}
 	if (!contains_ecryptfs_marker(data + ECRYPTFS_FILE_SIZE_BYTES)) {
@@ -1246,7 +1246,6 @@
 	(*written) = 6;
 }
 
-struct kmem_cache *ecryptfs_header_cache_0;
 struct kmem_cache *ecryptfs_header_cache_1;
 struct kmem_cache *ecryptfs_header_cache_2;
 
@@ -1320,7 +1319,7 @@
 				  0, crypt_stat->num_header_bytes_at_front);
 	if (rc)
 		printk(KERN_ERR "%s: Error attempting to write header "
-		       "information to lower file; rc = [%d]\n", __FUNCTION__,
+		       "information to lower file; rc = [%d]\n", __func__,
 		       rc);
 	return rc;
 }
@@ -1365,14 +1364,14 @@
 		}
 	} else {
 		printk(KERN_WARNING "%s: Encrypted flag not set\n",
-		       __FUNCTION__);
+		       __func__);
 		rc = -EINVAL;
 		goto out;
 	}
 	/* Released in this function */
 	virt = kzalloc(crypt_stat->num_header_bytes_at_front, GFP_KERNEL);
 	if (!virt) {
-		printk(KERN_ERR "%s: Out of memory\n", __FUNCTION__);
+		printk(KERN_ERR "%s: Out of memory\n", __func__);
 		rc = -ENOMEM;
 		goto out;
 	}
@@ -1380,7 +1379,7 @@
 					 ecryptfs_dentry);
 	if (unlikely(rc)) {
 		printk(KERN_ERR "%s: Error whilst writing headers; rc = [%d]\n",
-		       __FUNCTION__, rc);
+		       __func__, rc);
 		goto out_free;
 	}
 	if (crypt_stat->flags & ECRYPTFS_METADATA_IN_XATTR)
@@ -1391,7 +1390,7 @@
 							 ecryptfs_dentry, virt);
 	if (rc) {
 		printk(KERN_ERR "%s: Error writing metadata out to lower file; "
-		       "rc = [%d]\n", __FUNCTION__, rc);
+		       "rc = [%d]\n", __func__, rc);
 		goto out_free;
 	}
 out_free:
@@ -1585,7 +1584,7 @@
 	if (!page_virt) {
 		rc = -ENOMEM;
 		printk(KERN_ERR "%s: Unable to allocate page_virt\n",
-		       __FUNCTION__);
+		       __func__);
 		goto out;
 	}
 	rc = ecryptfs_read_lower(page_virt, 0, crypt_stat->extent_size,
diff --git a/fs/ecryptfs/ecryptfs_kernel.h b/fs/ecryptfs/ecryptfs_kernel.h
index 5007f78..951ee33 100644
--- a/fs/ecryptfs/ecryptfs_kernel.h
+++ b/fs/ecryptfs/ecryptfs_kernel.h
@@ -4,7 +4,7 @@
  *
  * Copyright (C) 1997-2003 Erez Zadok
  * Copyright (C) 2001-2003 Stony Brook University
- * Copyright (C) 2004-2007 International Business Machines Corp.
+ * Copyright (C) 2004-2008 International Business Machines Corp.
  *   Author(s): Michael A. Halcrow <mahalcro@us.ibm.com>
  *              Trevor S. Highland <trevor.highland@gmail.com>
  *              Tyler Hicks <tyhicks@ou.edu>
@@ -34,6 +34,7 @@
 #include <linux/namei.h>
 #include <linux/scatterlist.h>
 #include <linux/hash.h>
+#include <linux/nsproxy.h>
 
 /* Version verification for shared data structures w/ userspace */
 #define ECRYPTFS_VERSION_MAJOR 0x00
@@ -49,11 +50,13 @@
 #define ECRYPTFS_VERSIONING_POLICY                0x00000008
 #define ECRYPTFS_VERSIONING_XATTR                 0x00000010
 #define ECRYPTFS_VERSIONING_MULTKEY               0x00000020
+#define ECRYPTFS_VERSIONING_DEVMISC               0x00000040
 #define ECRYPTFS_VERSIONING_MASK (ECRYPTFS_VERSIONING_PASSPHRASE \
 				  | ECRYPTFS_VERSIONING_PLAINTEXT_PASSTHROUGH \
 				  | ECRYPTFS_VERSIONING_PUBKEY \
 				  | ECRYPTFS_VERSIONING_XATTR \
-				  | ECRYPTFS_VERSIONING_MULTKEY)
+				  | ECRYPTFS_VERSIONING_MULTKEY \
+				  | ECRYPTFS_VERSIONING_DEVMISC)
 #define ECRYPTFS_MAX_PASSWORD_LENGTH 64
 #define ECRYPTFS_MAX_PASSPHRASE_BYTES ECRYPTFS_MAX_PASSWORD_LENGTH
 #define ECRYPTFS_SALT_SIZE 8
@@ -73,17 +76,14 @@
 #define ECRYPTFS_DEFAULT_MSG_CTX_ELEMS 32
 #define ECRYPTFS_DEFAULT_SEND_TIMEOUT HZ
 #define ECRYPTFS_MAX_MSG_CTX_TTL (HZ*3)
-#define ECRYPTFS_NLMSG_HELO 100
-#define ECRYPTFS_NLMSG_QUIT 101
-#define ECRYPTFS_NLMSG_REQUEST 102
-#define ECRYPTFS_NLMSG_RESPONSE 103
 #define ECRYPTFS_MAX_PKI_NAME_BYTES 16
 #define ECRYPTFS_DEFAULT_NUM_USERS 4
 #define ECRYPTFS_MAX_NUM_USERS 32768
 #define ECRYPTFS_TRANSPORT_NETLINK 0
 #define ECRYPTFS_TRANSPORT_CONNECTOR 1
 #define ECRYPTFS_TRANSPORT_RELAYFS 2
-#define ECRYPTFS_DEFAULT_TRANSPORT ECRYPTFS_TRANSPORT_NETLINK
+#define ECRYPTFS_TRANSPORT_MISCDEV 3
+#define ECRYPTFS_DEFAULT_TRANSPORT ECRYPTFS_TRANSPORT_MISCDEV
 #define ECRYPTFS_XATTR_NAME "user.ecryptfs"
 
 #define RFC2440_CIPHER_DES3_EDE 0x02
@@ -366,32 +366,63 @@
 };
 
 struct ecryptfs_message {
+	/* Can never be greater than ecryptfs_message_buf_len */
+	/* Used to find the parent msg_ctx */
+	/* Inherits from msg_ctx->index */
 	u32 index;
 	u32 data_len;
 	u8 data[];
 };
 
 struct ecryptfs_msg_ctx {
-#define ECRYPTFS_MSG_CTX_STATE_FREE      0x0001
-#define ECRYPTFS_MSG_CTX_STATE_PENDING   0x0002
-#define ECRYPTFS_MSG_CTX_STATE_DONE      0x0003
-	u32 state;
-	unsigned int index;
-	unsigned int counter;
+#define ECRYPTFS_MSG_CTX_STATE_FREE     0x01
+#define ECRYPTFS_MSG_CTX_STATE_PENDING  0x02
+#define ECRYPTFS_MSG_CTX_STATE_DONE     0x03
+#define ECRYPTFS_MSG_CTX_STATE_NO_REPLY 0x04
+	u8 state;
+#define ECRYPTFS_MSG_HELO 100
+#define ECRYPTFS_MSG_QUIT 101
+#define ECRYPTFS_MSG_REQUEST 102
+#define ECRYPTFS_MSG_RESPONSE 103
+	u8 type;
+	u32 index;
+	/* Counter converts to a sequence number. Each message sent
+	 * out for which we expect a response has an associated
+	 * sequence number. The response must have the same sequence
+	 * number as the counter for the msg_stc for the message to be
+	 * valid. */
+	u32 counter;
+	size_t msg_size;
 	struct ecryptfs_message *msg;
 	struct task_struct *task;
 	struct list_head node;
+	struct list_head daemon_out_list;
 	struct mutex mux;
 };
 
 extern unsigned int ecryptfs_transport;
 
-struct ecryptfs_daemon_id {
-	pid_t pid;
-	uid_t uid;
-	struct hlist_node id_chain;
+struct ecryptfs_daemon;
+
+struct ecryptfs_daemon {
+#define ECRYPTFS_DAEMON_IN_READ      0x00000001
+#define ECRYPTFS_DAEMON_IN_POLL      0x00000002
+#define ECRYPTFS_DAEMON_ZOMBIE       0x00000004
+#define ECRYPTFS_DAEMON_MISCDEV_OPEN 0x00000008
+	u32 flags;
+	u32 num_queued_msg_ctx;
+	struct pid *pid;
+	uid_t euid;
+	struct user_namespace *user_ns;
+	struct task_struct *task;
+	struct mutex mux;
+	struct list_head msg_ctx_out_queue;
+	wait_queue_head_t wait;
+	struct hlist_node euid_chain;
 };
 
+extern struct mutex ecryptfs_daemon_hash_mux;
+
 static inline struct ecryptfs_file_info *
 ecryptfs_file_to_private(struct file *file)
 {
@@ -500,7 +531,7 @@
 }
 
 #define ecryptfs_printk(type, fmt, arg...) \
-        __ecryptfs_printk(type "%s: " fmt, __FUNCTION__, ## arg);
+        __ecryptfs_printk(type "%s: " fmt, __func__, ## arg);
 void __ecryptfs_printk(const char *fmt, ...);
 
 extern const struct file_operations ecryptfs_main_fops;
@@ -581,10 +612,13 @@
 ecryptfs_setxattr(struct dentry *dentry, const char *name, const void *value,
 		  size_t size, int flags);
 int ecryptfs_read_xattr_region(char *page_virt, struct inode *ecryptfs_inode);
-int ecryptfs_process_helo(unsigned int transport, uid_t uid, pid_t pid);
-int ecryptfs_process_quit(uid_t uid, pid_t pid);
-int ecryptfs_process_response(struct ecryptfs_message *msg, uid_t uid,
-			      pid_t pid, u32 seq);
+int ecryptfs_process_helo(unsigned int transport, uid_t euid,
+			  struct user_namespace *user_ns, struct pid *pid);
+int ecryptfs_process_quit(uid_t euid, struct user_namespace *user_ns,
+			  struct pid *pid);
+int ecryptfs_process_response(struct ecryptfs_message *msg, uid_t euid,
+			      struct user_namespace *user_ns, struct pid *pid,
+			      u32 seq);
 int ecryptfs_send_message(unsigned int transport, char *data, int data_len,
 			  struct ecryptfs_msg_ctx **msg_ctx);
 int ecryptfs_wait_for_response(struct ecryptfs_msg_ctx *msg_ctx,
@@ -593,14 +627,14 @@
 void ecryptfs_release_messaging(unsigned int transport);
 
 int ecryptfs_send_netlink(char *data, int data_len,
-			  struct ecryptfs_msg_ctx *msg_ctx, u16 msg_type,
-			  u16 msg_flags, pid_t daemon_pid);
+			  struct ecryptfs_msg_ctx *msg_ctx, u8 msg_type,
+			  u16 msg_flags, struct pid *daemon_pid);
 int ecryptfs_init_netlink(void);
 void ecryptfs_release_netlink(void);
 
 int ecryptfs_send_connector(char *data, int data_len,
-			    struct ecryptfs_msg_ctx *msg_ctx, u16 msg_type,
-			    u16 msg_flags, pid_t daemon_pid);
+			    struct ecryptfs_msg_ctx *msg_ctx, u8 msg_type,
+			    u16 msg_flags, struct pid *daemon_pid);
 int ecryptfs_init_connector(void);
 void ecryptfs_release_connector(void);
 void
@@ -642,5 +676,21 @@
 				     size_t offset_in_page, size_t size,
 				     struct inode *ecryptfs_inode);
 struct page *ecryptfs_get_locked_page(struct file *file, loff_t index);
+int ecryptfs_exorcise_daemon(struct ecryptfs_daemon *daemon);
+int ecryptfs_find_daemon_by_euid(struct ecryptfs_daemon **daemon, uid_t euid,
+				 struct user_namespace *user_ns);
+int ecryptfs_parse_packet_length(unsigned char *data, size_t *size,
+				 size_t *length_size);
+int ecryptfs_write_packet_length(char *dest, size_t size,
+				 size_t *packet_size_length);
+int ecryptfs_init_ecryptfs_miscdev(void);
+void ecryptfs_destroy_ecryptfs_miscdev(void);
+int ecryptfs_send_miscdev(char *data, size_t data_size,
+			  struct ecryptfs_msg_ctx *msg_ctx, u8 msg_type,
+			  u16 msg_flags, struct ecryptfs_daemon *daemon);
+void ecryptfs_msg_ctx_alloc_to_free(struct ecryptfs_msg_ctx *msg_ctx);
+int
+ecryptfs_spawn_daemon(struct ecryptfs_daemon **daemon, uid_t euid,
+		      struct user_namespace *user_ns, struct pid *pid);
 
 #endif /* #ifndef ECRYPTFS_KERNEL_H */
diff --git a/fs/ecryptfs/file.c b/fs/ecryptfs/file.c
index 2b8f5ed..2258b8f 100644
--- a/fs/ecryptfs/file.c
+++ b/fs/ecryptfs/file.c
@@ -195,7 +195,9 @@
 		file, ecryptfs_inode_to_private(inode)->lower_file);
 	if (S_ISDIR(ecryptfs_dentry->d_inode->i_mode)) {
 		ecryptfs_printk(KERN_DEBUG, "This is a directory\n");
+		mutex_lock(&crypt_stat->cs_mutex);
 		crypt_stat->flags &= ~(ECRYPTFS_ENCRYPTED);
+		mutex_unlock(&crypt_stat->cs_mutex);
 		rc = 0;
 		goto out;
 	}
diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
index e238611..0a13973 100644
--- a/fs/ecryptfs/inode.c
+++ b/fs/ecryptfs/inode.c
@@ -111,7 +111,7 @@
 
 	lower_dentry = ecryptfs_dentry_to_lower(ecryptfs_dentry);
 	lower_dir_dentry = lock_parent(lower_dentry);
-	if (unlikely(IS_ERR(lower_dir_dentry))) {
+	if (IS_ERR(lower_dir_dentry)) {
 		ecryptfs_printk(KERN_ERR, "Error locking directory of "
 				"dentry\n");
 		rc = PTR_ERR(lower_dir_dentry);
@@ -121,7 +121,7 @@
 					     ecryptfs_dentry, mode, nd);
 	if (rc) {
 		printk(KERN_ERR "%s: Failure to create dentry in lower fs; "
-		       "rc = [%d]\n", __FUNCTION__, rc);
+		       "rc = [%d]\n", __func__, rc);
 		goto out_lock;
 	}
 	rc = ecryptfs_interpose(lower_dentry, ecryptfs_dentry,
@@ -908,7 +908,9 @@
 	if (ia->ia_valid & (ATTR_KILL_SUID | ATTR_KILL_SGID))
 		ia->ia_valid &= ~ATTR_MODE;
 
+	mutex_lock(&lower_dentry->d_inode->i_mutex);
 	rc = notify_change(lower_dentry, ia);
+	mutex_unlock(&lower_dentry->d_inode->i_mutex);
 out:
 	fsstack_copy_attr_all(inode, lower_inode, NULL);
 	return rc;
diff --git a/fs/ecryptfs/keystore.c b/fs/ecryptfs/keystore.c
index 682b1b2..e82b457 100644
--- a/fs/ecryptfs/keystore.c
+++ b/fs/ecryptfs/keystore.c
@@ -65,7 +65,7 @@
 }
 
 /**
- * parse_packet_length
+ * ecryptfs_parse_packet_length
  * @data: Pointer to memory containing length at offset
  * @size: This function writes the decoded size to this memory
  *        address; zero on error
@@ -73,8 +73,8 @@
  *
  * Returns zero on success; non-zero on error
  */
-static int parse_packet_length(unsigned char *data, size_t *size,
-			       size_t *length_size)
+int ecryptfs_parse_packet_length(unsigned char *data, size_t *size,
+				 size_t *length_size)
 {
 	int rc = 0;
 
@@ -105,7 +105,7 @@
 }
 
 /**
- * write_packet_length
+ * ecryptfs_write_packet_length
  * @dest: The byte array target into which to write the length. Must
  *        have at least 5 bytes allocated.
  * @size: The length to write.
@@ -114,8 +114,8 @@
  *
  * Returns zero on success; non-zero on error.
  */
-static int write_packet_length(char *dest, size_t size,
-			       size_t *packet_size_length)
+int ecryptfs_write_packet_length(char *dest, size_t size,
+				 size_t *packet_size_length)
 {
 	int rc = 0;
 
@@ -162,8 +162,8 @@
 		goto out;
 	}
 	message[i++] = ECRYPTFS_TAG_64_PACKET_TYPE;
-	rc = write_packet_length(&message[i], ECRYPTFS_SIG_SIZE_HEX,
-				 &packet_size_len);
+	rc = ecryptfs_write_packet_length(&message[i], ECRYPTFS_SIG_SIZE_HEX,
+					  &packet_size_len);
 	if (rc) {
 		ecryptfs_printk(KERN_ERR, "Error generating tag 64 packet "
 				"header; cannot generate packet length\n");
@@ -172,8 +172,9 @@
 	i += packet_size_len;
 	memcpy(&message[i], signature, ECRYPTFS_SIG_SIZE_HEX);
 	i += ECRYPTFS_SIG_SIZE_HEX;
-	rc = write_packet_length(&message[i], session_key->encrypted_key_size,
-				 &packet_size_len);
+	rc = ecryptfs_write_packet_length(&message[i],
+					  session_key->encrypted_key_size,
+					  &packet_size_len);
 	if (rc) {
 		ecryptfs_printk(KERN_ERR, "Error generating tag 64 packet "
 				"header; cannot generate packet length\n");
@@ -225,7 +226,7 @@
 		rc = -EIO;
 		goto out;
 	}
-	rc = parse_packet_length(&data[i], &m_size, &data_len);
+	rc = ecryptfs_parse_packet_length(&data[i], &m_size, &data_len);
 	if (rc) {
 		ecryptfs_printk(KERN_WARNING, "Error parsing packet length; "
 				"rc = [%d]\n", rc);
@@ -304,8 +305,8 @@
 		goto out;
 	}
 	message[i++] = ECRYPTFS_TAG_66_PACKET_TYPE;
-	rc = write_packet_length(&message[i], ECRYPTFS_SIG_SIZE_HEX,
-				 &packet_size_len);
+	rc = ecryptfs_write_packet_length(&message[i], ECRYPTFS_SIG_SIZE_HEX,
+					  &packet_size_len);
 	if (rc) {
 		ecryptfs_printk(KERN_ERR, "Error generating tag 66 packet "
 				"header; cannot generate packet length\n");
@@ -315,8 +316,8 @@
 	memcpy(&message[i], signature, ECRYPTFS_SIG_SIZE_HEX);
 	i += ECRYPTFS_SIG_SIZE_HEX;
 	/* The encrypted key includes 1 byte cipher code and 2 byte checksum */
-	rc = write_packet_length(&message[i], crypt_stat->key_size + 3,
-				 &packet_size_len);
+	rc = ecryptfs_write_packet_length(&message[i], crypt_stat->key_size + 3,
+					  &packet_size_len);
 	if (rc) {
 		ecryptfs_printk(KERN_ERR, "Error generating tag 66 packet "
 				"header; cannot generate packet length\n");
@@ -357,20 +358,25 @@
 	/* verify that everything through the encrypted FEK size is present */
 	if (message_len < 4) {
 		rc = -EIO;
+		printk(KERN_ERR "%s: message_len is [%Zd]; minimum acceptable "
+		       "message length is [%d]\n", __func__, message_len, 4);
 		goto out;
 	}
 	if (data[i++] != ECRYPTFS_TAG_67_PACKET_TYPE) {
-		ecryptfs_printk(KERN_ERR, "Type should be ECRYPTFS_TAG_67\n");
 		rc = -EIO;
+		printk(KERN_ERR "%s: Type should be ECRYPTFS_TAG_67\n",
+		       __func__);
 		goto out;
 	}
 	if (data[i++]) {
-		ecryptfs_printk(KERN_ERR, "Status indicator has non zero value"
-				" [%d]\n", data[i-1]);
 		rc = -EIO;
+		printk(KERN_ERR "%s: Status indicator has non zero "
+		       "value [%d]\n", __func__, data[i-1]);
+
 		goto out;
 	}
-	rc = parse_packet_length(&data[i], &key_rec->enc_key_size, &data_len);
+	rc = ecryptfs_parse_packet_length(&data[i], &key_rec->enc_key_size,
+					  &data_len);
 	if (rc) {
 		ecryptfs_printk(KERN_WARNING, "Error parsing packet length; "
 				"rc = [%d]\n", rc);
@@ -378,17 +384,17 @@
 	}
 	i += data_len;
 	if (message_len < (i + key_rec->enc_key_size)) {
-		ecryptfs_printk(KERN_ERR, "message_len [%d]; max len is [%d]\n",
-				message_len, (i + key_rec->enc_key_size));
 		rc = -EIO;
+		printk(KERN_ERR "%s: message_len [%Zd]; max len is [%Zd]\n",
+		       __func__, message_len, (i + key_rec->enc_key_size));
 		goto out;
 	}
 	if (key_rec->enc_key_size > ECRYPTFS_MAX_ENCRYPTED_KEY_BYTES) {
-		ecryptfs_printk(KERN_ERR, "Encrypted key_size [%d] larger than "
-				"the maximum key size [%d]\n",
-				key_rec->enc_key_size,
-				ECRYPTFS_MAX_ENCRYPTED_KEY_BYTES);
 		rc = -EIO;
+		printk(KERN_ERR "%s: Encrypted key_size [%Zd] larger than "
+		       "the maximum key size [%d]\n", __func__,
+		       key_rec->enc_key_size,
+		       ECRYPTFS_MAX_ENCRYPTED_KEY_BYTES);
 		goto out;
 	}
 	memcpy(key_rec->enc_key, &data[i], key_rec->enc_key_size);
@@ -445,7 +451,7 @@
 	rc = write_tag_64_packet(auth_tok_sig, &(auth_tok->session_key),
 				 &netlink_message, &netlink_message_length);
 	if (rc) {
-		ecryptfs_printk(KERN_ERR, "Failed to write tag 64 packet");
+		ecryptfs_printk(KERN_ERR, "Failed to write tag 64 packet\n");
 		goto out;
 	}
 	rc = ecryptfs_send_message(ecryptfs_transport, netlink_message,
@@ -570,8 +576,8 @@
 		goto out;
 	}
 	(*new_auth_tok) = &auth_tok_list_item->auth_tok;
-	rc = parse_packet_length(&data[(*packet_size)], &body_size,
-				 &length_size);
+	rc = ecryptfs_parse_packet_length(&data[(*packet_size)], &body_size,
+					  &length_size);
 	if (rc) {
 		printk(KERN_WARNING "Error parsing packet length; "
 		       "rc = [%d]\n", rc);
@@ -704,8 +710,8 @@
 		goto out;
 	}
 	(*new_auth_tok) = &auth_tok_list_item->auth_tok;
-	rc = parse_packet_length(&data[(*packet_size)], &body_size,
-				 &length_size);
+	rc = ecryptfs_parse_packet_length(&data[(*packet_size)], &body_size,
+					  &length_size);
 	if (rc) {
 		printk(KERN_WARNING "Error parsing packet length; rc = [%d]\n",
 		       rc);
@@ -852,8 +858,8 @@
 		rc = -EINVAL;
 		goto out;
 	}
-	rc = parse_packet_length(&data[(*packet_size)], &body_size,
-				 &length_size);
+	rc = ecryptfs_parse_packet_length(&data[(*packet_size)], &body_size,
+					  &length_size);
 	if (rc) {
 		printk(KERN_WARNING "Invalid tag 11 packet format\n");
 		goto out;
@@ -1405,8 +1411,8 @@
 			auth_tok->token.private_key.key_size;
 	rc = pki_encrypt_session_key(auth_tok, crypt_stat, key_rec);
 	if (rc) {
-		ecryptfs_printk(KERN_ERR, "Failed to encrypt session key "
-				"via a pki");
+		printk(KERN_ERR "Failed to encrypt session key via a key "
+		       "module; rc = [%d]\n", rc);
 		goto out;
 	}
 	if (ecryptfs_verbosity > 0) {
@@ -1430,8 +1436,9 @@
 		goto out;
 	}
 	dest[(*packet_size)++] = ECRYPTFS_TAG_1_PACKET_TYPE;
-	rc = write_packet_length(&dest[(*packet_size)], (max_packet_size - 4),
-				 &packet_size_length);
+	rc = ecryptfs_write_packet_length(&dest[(*packet_size)],
+					  (max_packet_size - 4),
+					  &packet_size_length);
 	if (rc) {
 		ecryptfs_printk(KERN_ERR, "Error generating tag 1 packet "
 				"header; cannot generate packet length\n");
@@ -1489,8 +1496,9 @@
 		goto out;
 	}
 	dest[(*packet_length)++] = ECRYPTFS_TAG_11_PACKET_TYPE;
-	rc = write_packet_length(&dest[(*packet_length)],
-				 (max_packet_size - 4), &packet_size_length);
+	rc = ecryptfs_write_packet_length(&dest[(*packet_length)],
+					  (max_packet_size - 4),
+					  &packet_size_length);
 	if (rc) {
 		printk(KERN_ERR "Error generating tag 11 packet header; cannot "
 		       "generate packet length. rc = [%d]\n", rc);
@@ -1682,8 +1690,9 @@
 	dest[(*packet_size)++] = ECRYPTFS_TAG_3_PACKET_TYPE;
 	/* Chop off the Tag 3 identifier(1) and Tag 3 packet size(3)
 	 * to get the number of octets in the actual Tag 3 packet */
-	rc = write_packet_length(&dest[(*packet_size)], (max_packet_size - 4),
-				 &packet_size_length);
+	rc = ecryptfs_write_packet_length(&dest[(*packet_size)],
+					  (max_packet_size - 4),
+					  &packet_size_length);
 	if (rc) {
 		printk(KERN_ERR "Error generating tag 3 packet header; cannot "
 		       "generate packet length. rc = [%d]\n", rc);
diff --git a/fs/ecryptfs/main.c b/fs/ecryptfs/main.c
index d25ac95..d603631 100644
--- a/fs/ecryptfs/main.c
+++ b/fs/ecryptfs/main.c
@@ -219,7 +219,7 @@
 	if (rc) {
 		printk(KERN_ERR "%s: Error attempting to initialize the "
 		       "persistent file for the dentry with name [%s]; "
-		       "rc = [%d]\n", __FUNCTION__, dentry->d_name.name, rc);
+		       "rc = [%d]\n", __func__, dentry->d_name.name, rc);
 		goto out;
 	}
 out:
diff --git a/fs/ecryptfs/messaging.c b/fs/ecryptfs/messaging.c
index 9cc2aec..1b5c200 100644
--- a/fs/ecryptfs/messaging.c
+++ b/fs/ecryptfs/messaging.c
@@ -1,7 +1,7 @@
 /**
  * eCryptfs: Linux filesystem encryption layer
  *
- * Copyright (C) 2004-2006 International Business Machines Corp.
+ * Copyright (C) 2004-2008 International Business Machines Corp.
  *   Author(s): Michael A. Halcrow <mhalcrow@us.ibm.com>
  *		Tyler Hicks <tyhicks@ou.edu>
  *
@@ -20,19 +20,21 @@
  * 02111-1307, USA.
  */
 #include <linux/sched.h>
+#include <linux/user_namespace.h>
+#include <linux/nsproxy.h>
 #include "ecryptfs_kernel.h"
 
 static LIST_HEAD(ecryptfs_msg_ctx_free_list);
 static LIST_HEAD(ecryptfs_msg_ctx_alloc_list);
 static struct mutex ecryptfs_msg_ctx_lists_mux;
 
-static struct hlist_head *ecryptfs_daemon_id_hash;
-static struct mutex ecryptfs_daemon_id_hash_mux;
+static struct hlist_head *ecryptfs_daemon_hash;
+struct mutex ecryptfs_daemon_hash_mux;
 static int ecryptfs_hash_buckets;
 #define ecryptfs_uid_hash(uid) \
         hash_long((unsigned long)uid, ecryptfs_hash_buckets)
 
-static unsigned int ecryptfs_msg_counter;
+static u32 ecryptfs_msg_counter;
 static struct ecryptfs_msg_ctx *ecryptfs_msg_ctx_arr;
 
 /**
@@ -40,9 +42,10 @@
  * @msg_ctx: The context that was acquired from the free list
  *
  * Acquires a context element from the free list and locks the mutex
- * on the context.  Returns zero on success; non-zero on error or upon
- * failure to acquire a free context element.  Be sure to lock the
- * list mutex before calling.
+ * on the context.  Sets the msg_ctx task to current.  Returns zero on
+ * success; non-zero on error or upon failure to acquire a free
+ * context element.  Must be called with ecryptfs_msg_ctx_lists_mux
+ * held.
  */
 static int ecryptfs_acquire_free_msg_ctx(struct ecryptfs_msg_ctx **msg_ctx)
 {
@@ -50,11 +53,11 @@
 	int rc;
 
 	if (list_empty(&ecryptfs_msg_ctx_free_list)) {
-		ecryptfs_printk(KERN_WARNING, "The eCryptfs free "
-				"context list is empty.  It may be helpful to "
-				"specify the ecryptfs_message_buf_len "
-				"parameter to be greater than the current "
-				"value of [%d]\n", ecryptfs_message_buf_len);
+		printk(KERN_WARNING "%s: The eCryptfs free "
+		       "context list is empty.  It may be helpful to "
+		       "specify the ecryptfs_message_buf_len "
+		       "parameter to be greater than the current "
+		       "value of [%d]\n", __func__, ecryptfs_message_buf_len);
 		rc = -ENOMEM;
 		goto out;
 	}
@@ -75,8 +78,7 @@
  * ecryptfs_msg_ctx_free_to_alloc
  * @msg_ctx: The context to move from the free list to the alloc list
  *
- * Be sure to lock the list mutex and the context mutex before
- * calling.
+ * Must be called with ecryptfs_msg_ctx_lists_mux held.
  */
 static void ecryptfs_msg_ctx_free_to_alloc(struct ecryptfs_msg_ctx *msg_ctx)
 {
@@ -89,36 +91,39 @@
  * ecryptfs_msg_ctx_alloc_to_free
  * @msg_ctx: The context to move from the alloc list to the free list
  *
- * Be sure to lock the list mutex and the context mutex before
- * calling.
+ * Must be called with ecryptfs_msg_ctx_lists_mux held.
  */
-static void ecryptfs_msg_ctx_alloc_to_free(struct ecryptfs_msg_ctx *msg_ctx)
+void ecryptfs_msg_ctx_alloc_to_free(struct ecryptfs_msg_ctx *msg_ctx)
 {
 	list_move(&(msg_ctx->node), &ecryptfs_msg_ctx_free_list);
 	if (msg_ctx->msg)
 		kfree(msg_ctx->msg);
+	msg_ctx->msg = NULL;
 	msg_ctx->state = ECRYPTFS_MSG_CTX_STATE_FREE;
 }
 
 /**
- * ecryptfs_find_daemon_id
- * @uid: The user id which maps to the desired daemon id
- * @id: If return value is zero, points to the desired daemon id
- *      pointer
+ * ecryptfs_find_daemon_by_euid
+ * @euid: The effective user id which maps to the desired daemon id
+ * @user_ns: The namespace in which @euid applies
+ * @daemon: If return value is zero, points to the desired daemon pointer
  *
- * Search the hash list for the given user id.  Returns zero if the
- * user id exists in the list; non-zero otherwise.  The daemon id hash
- * mutex should be held before calling this function.
+ * Must be called with ecryptfs_daemon_hash_mux held.
+ *
+ * Search the hash list for the given user id.
+ *
+ * Returns zero if the user id exists in the list; non-zero otherwise.
  */
-static int ecryptfs_find_daemon_id(uid_t uid, struct ecryptfs_daemon_id **id)
+int ecryptfs_find_daemon_by_euid(struct ecryptfs_daemon **daemon, uid_t euid,
+				 struct user_namespace *user_ns)
 {
 	struct hlist_node *elem;
 	int rc;
 
-	hlist_for_each_entry(*id, elem,
-			     &ecryptfs_daemon_id_hash[ecryptfs_uid_hash(uid)],
-			     id_chain) {
-		if ((*id)->uid == uid) {
+	hlist_for_each_entry(*daemon, elem,
+			     &ecryptfs_daemon_hash[ecryptfs_uid_hash(euid)],
+			     euid_chain) {
+		if ((*daemon)->euid == euid && (*daemon)->user_ns == user_ns) {
 			rc = 0;
 			goto out;
 		}
@@ -128,181 +133,325 @@
 	return rc;
 }
 
-static int ecryptfs_send_raw_message(unsigned int transport, u16 msg_type,
-				     pid_t pid)
+static int
+ecryptfs_send_message_locked(unsigned int transport, char *data, int data_len,
+			     u8 msg_type, struct ecryptfs_msg_ctx **msg_ctx);
+
+/**
+ * ecryptfs_send_raw_message
+ * @transport: Transport type
+ * @msg_type: Message type
+ * @daemon: Daemon struct for recipient of message
+ *
+ * A raw message is one that does not include an ecryptfs_message
+ * struct. It simply has a type.
+ *
+ * Must be called with ecryptfs_daemon_hash_mux held.
+ *
+ * Returns zero on success; non-zero otherwise
+ */
+static int ecryptfs_send_raw_message(unsigned int transport, u8 msg_type,
+				     struct ecryptfs_daemon *daemon)
 {
+	struct ecryptfs_msg_ctx *msg_ctx;
 	int rc;
 
 	switch(transport) {
 	case ECRYPTFS_TRANSPORT_NETLINK:
-		rc = ecryptfs_send_netlink(NULL, 0, NULL, msg_type, 0, pid);
+		rc = ecryptfs_send_netlink(NULL, 0, NULL, msg_type, 0,
+					   daemon->pid);
+		break;
+	case ECRYPTFS_TRANSPORT_MISCDEV:
+		rc = ecryptfs_send_message_locked(transport, NULL, 0, msg_type,
+						  &msg_ctx);
+		if (rc) {
+			printk(KERN_ERR "%s: Error whilst attempting to send "
+			       "message via procfs; rc = [%d]\n", __func__, rc);
+			goto out;
+		}
+		/* Raw messages are logically context-free (e.g., no
+		 * reply is expected), so we set the state of the
+		 * ecryptfs_msg_ctx object to indicate that it should
+		 * be freed as soon as the transport sends out the message. */
+		mutex_lock(&msg_ctx->mux);
+		msg_ctx->state = ECRYPTFS_MSG_CTX_STATE_NO_REPLY;
+		mutex_unlock(&msg_ctx->mux);
 		break;
 	case ECRYPTFS_TRANSPORT_CONNECTOR:
 	case ECRYPTFS_TRANSPORT_RELAYFS:
 	default:
 		rc = -ENOSYS;
 	}
+out:
+	return rc;
+}
+
+/**
+ * ecryptfs_spawn_daemon - Create and initialize a new daemon struct
+ * @daemon: Pointer to set to newly allocated daemon struct
+ * @euid: Effective user id for the daemon
+ * @user_ns: The namespace in which @euid applies
+ * @pid: Process id for the daemon
+ *
+ * Must be called ceremoniously while in possession of
+ * ecryptfs_sacred_daemon_hash_mux
+ *
+ * Returns zero on success; non-zero otherwise
+ */
+int
+ecryptfs_spawn_daemon(struct ecryptfs_daemon **daemon, uid_t euid,
+		      struct user_namespace *user_ns, struct pid *pid)
+{
+	int rc = 0;
+
+	(*daemon) = kzalloc(sizeof(**daemon), GFP_KERNEL);
+	if (!(*daemon)) {
+		rc = -ENOMEM;
+		printk(KERN_ERR "%s: Failed to allocate [%Zd] bytes of "
+		       "GFP_KERNEL memory\n", __func__, sizeof(**daemon));
+		goto out;
+	}
+	(*daemon)->euid = euid;
+	(*daemon)->user_ns = get_user_ns(user_ns);
+	(*daemon)->pid = get_pid(pid);
+	(*daemon)->task = current;
+	mutex_init(&(*daemon)->mux);
+	INIT_LIST_HEAD(&(*daemon)->msg_ctx_out_queue);
+	init_waitqueue_head(&(*daemon)->wait);
+	(*daemon)->num_queued_msg_ctx = 0;
+	hlist_add_head(&(*daemon)->euid_chain,
+		       &ecryptfs_daemon_hash[ecryptfs_uid_hash(euid)]);
+out:
 	return rc;
 }
 
 /**
  * ecryptfs_process_helo
  * @transport: The underlying transport (netlink, etc.)
- * @uid: The user ID owner of the message
+ * @euid: The user ID owner of the message
+ * @user_ns: The namespace in which @euid applies
  * @pid: The process ID for the userspace program that sent the
  *       message
  *
- * Adds the uid and pid values to the daemon id hash.  If a uid
+ * Adds the euid and pid values to the daemon euid hash.  If an euid
  * already has a daemon pid registered, the daemon will be
- * unregistered before the new daemon id is put into the hash list.
- * Returns zero after adding a new daemon id to the hash list;
+ * unregistered before the new daemon is put into the hash list.
+ * Returns zero after adding a new daemon to the hash list;
  * non-zero otherwise.
  */
-int ecryptfs_process_helo(unsigned int transport, uid_t uid, pid_t pid)
+int ecryptfs_process_helo(unsigned int transport, uid_t euid,
+			  struct user_namespace *user_ns, struct pid *pid)
 {
-	struct ecryptfs_daemon_id *new_id;
-	struct ecryptfs_daemon_id *old_id;
+	struct ecryptfs_daemon *new_daemon;
+	struct ecryptfs_daemon *old_daemon;
 	int rc;
 
-	mutex_lock(&ecryptfs_daemon_id_hash_mux);
-	new_id = kmalloc(sizeof(*new_id), GFP_KERNEL);
-	if (!new_id) {
-		rc = -ENOMEM;
-		ecryptfs_printk(KERN_ERR, "Failed to allocate memory; unable "
-				"to register daemon [%d] for user [%d]\n",
-				pid, uid);
-		goto unlock;
-	}
-	if (!ecryptfs_find_daemon_id(uid, &old_id)) {
+	mutex_lock(&ecryptfs_daemon_hash_mux);
+	rc = ecryptfs_find_daemon_by_euid(&old_daemon, euid, user_ns);
+	if (rc != 0) {
 		printk(KERN_WARNING "Received request from user [%d] "
-		       "to register daemon [%d]; unregistering daemon "
-		       "[%d]\n", uid, pid, old_id->pid);
-		hlist_del(&old_id->id_chain);
-		rc = ecryptfs_send_raw_message(transport, ECRYPTFS_NLMSG_QUIT,
-					       old_id->pid);
+		       "to register daemon [0x%p]; unregistering daemon "
+		       "[0x%p]\n", euid, pid, old_daemon->pid);
+		rc = ecryptfs_send_raw_message(transport, ECRYPTFS_MSG_QUIT,
+					       old_daemon);
 		if (rc)
 			printk(KERN_WARNING "Failed to send QUIT "
-			       "message to daemon [%d]; rc = [%d]\n",
-			       old_id->pid, rc);
-		kfree(old_id);
+			       "message to daemon [0x%p]; rc = [%d]\n",
+			       old_daemon->pid, rc);
+		hlist_del(&old_daemon->euid_chain);
+		kfree(old_daemon);
 	}
-	new_id->uid = uid;
-	new_id->pid = pid;
-	hlist_add_head(&new_id->id_chain,
-		       &ecryptfs_daemon_id_hash[ecryptfs_uid_hash(uid)]);
-	rc = 0;
-unlock:
-	mutex_unlock(&ecryptfs_daemon_id_hash_mux);
+	rc = ecryptfs_spawn_daemon(&new_daemon, euid, user_ns, pid);
+	if (rc)
+		printk(KERN_ERR "%s: The gods are displeased with this attempt "
+		       "to create a new daemon object for euid [%d]; pid "
+		       "[0x%p]; rc = [%d]\n", __func__, euid, pid, rc);
+	mutex_unlock(&ecryptfs_daemon_hash_mux);
+	return rc;
+}
+
+/**
+ * ecryptfs_exorcise_daemon - Destroy the daemon struct
+ *
+ * Must be called ceremoniously while in possession of
+ * ecryptfs_daemon_hash_mux and the daemon's own mux.
+ */
+int ecryptfs_exorcise_daemon(struct ecryptfs_daemon *daemon)
+{
+	struct ecryptfs_msg_ctx *msg_ctx, *msg_ctx_tmp;
+	int rc = 0;
+
+	mutex_lock(&daemon->mux);
+	if ((daemon->flags & ECRYPTFS_DAEMON_IN_READ)
+	    || (daemon->flags & ECRYPTFS_DAEMON_IN_POLL)) {
+		rc = -EBUSY;
+		printk(KERN_WARNING "%s: Attempt to destroy daemon with pid "
+		       "[0x%p], but it is in the midst of a read or a poll\n",
+		       __func__, daemon->pid);
+		mutex_unlock(&daemon->mux);
+		goto out;
+	}
+	list_for_each_entry_safe(msg_ctx, msg_ctx_tmp,
+				 &daemon->msg_ctx_out_queue, daemon_out_list) {
+		list_del(&msg_ctx->daemon_out_list);
+		daemon->num_queued_msg_ctx--;
+		printk(KERN_WARNING "%s: Warning: dropping message that is in "
+		       "the out queue of a dying daemon\n", __func__);
+		ecryptfs_msg_ctx_alloc_to_free(msg_ctx);
+	}
+	hlist_del(&daemon->euid_chain);
+	if (daemon->task)
+		wake_up_process(daemon->task);
+	if (daemon->pid)
+		put_pid(daemon->pid);
+	if (daemon->user_ns)
+		put_user_ns(daemon->user_ns);
+	mutex_unlock(&daemon->mux);
+	memset(daemon, 0, sizeof(*daemon));
+	kfree(daemon);
+out:
 	return rc;
 }
 
 /**
  * ecryptfs_process_quit
- * @uid: The user ID owner of the message
+ * @euid: The user ID owner of the message
+ * @user_ns: The namespace in which @euid applies
  * @pid: The process ID for the userspace program that sent the
  *       message
  *
- * Deletes the corresponding daemon id for the given uid and pid, if
+ * Deletes the corresponding daemon for the given euid and pid, if
  * it is the registered that is requesting the deletion. Returns zero
- * after deleting the desired daemon id; non-zero otherwise.
+ * after deleting the desired daemon; non-zero otherwise.
  */
-int ecryptfs_process_quit(uid_t uid, pid_t pid)
+int ecryptfs_process_quit(uid_t euid, struct user_namespace *user_ns,
+			  struct pid *pid)
 {
-	struct ecryptfs_daemon_id *id;
+	struct ecryptfs_daemon *daemon;
 	int rc;
 
-	mutex_lock(&ecryptfs_daemon_id_hash_mux);
-	if (ecryptfs_find_daemon_id(uid, &id)) {
+	mutex_lock(&ecryptfs_daemon_hash_mux);
+	rc = ecryptfs_find_daemon_by_euid(&daemon, euid, user_ns);
+	if (rc || !daemon) {
 		rc = -EINVAL;
-		ecryptfs_printk(KERN_ERR, "Received request from user [%d] to "
-				"unregister unrecognized daemon [%d]\n", uid,
-				pid);
-		goto unlock;
+		printk(KERN_ERR "Received request from user [%d] to "
+		       "unregister unrecognized daemon [0x%p]\n", euid, pid);
+		goto out_unlock;
 	}
-	if (id->pid != pid) {
-		rc = -EINVAL;
-		ecryptfs_printk(KERN_WARNING, "Received request from user [%d] "
-				"with pid [%d] to unregister daemon [%d]\n",
-				uid, pid, id->pid);
-		goto unlock;
-	}
-	hlist_del(&id->id_chain);
-	kfree(id);
-	rc = 0;
-unlock:
-	mutex_unlock(&ecryptfs_daemon_id_hash_mux);
+	rc = ecryptfs_exorcise_daemon(daemon);
+out_unlock:
+	mutex_unlock(&ecryptfs_daemon_hash_mux);
 	return rc;
 }
 
 /**
  * ecryptfs_process_reponse
  * @msg: The ecryptfs message received; the caller should sanity check
- *       msg->data_len
+ *       msg->data_len and free the memory
  * @pid: The process ID of the userspace application that sent the
  *       message
- * @seq: The sequence number of the message
+ * @seq: The sequence number of the message; must match the sequence
+ *       number for the existing message context waiting for this
+ *       response
  *
- * Processes a response message after sending a operation request to
- * userspace. Returns zero upon delivery to desired context element;
- * non-zero upon delivery failure or error.
+ * Processes a response message after sending an operation request to
+ * userspace. Some other process is awaiting this response. Before
+ * sending out its first communications, the other process allocated a
+ * msg_ctx from the ecryptfs_msg_ctx_arr at a particular index. The
+ * response message contains this index so that we can copy over the
+ * response message into the msg_ctx that the process holds a
+ * reference to. The other process is going to wake up, check to see
+ * that msg_ctx->state == ECRYPTFS_MSG_CTX_STATE_DONE, and then
+ * proceed to read off and process the response message. Returns zero
+ * upon delivery to desired context element; non-zero upon delivery
+ * failure or error.
+ *
+ * Returns zero on success; non-zero otherwise
  */
-int ecryptfs_process_response(struct ecryptfs_message *msg, uid_t uid,
-			      pid_t pid, u32 seq)
+int ecryptfs_process_response(struct ecryptfs_message *msg, uid_t euid,
+			      struct user_namespace *user_ns, struct pid *pid,
+			      u32 seq)
 {
-	struct ecryptfs_daemon_id *id;
+	struct ecryptfs_daemon *daemon;
 	struct ecryptfs_msg_ctx *msg_ctx;
-	int msg_size;
+	size_t msg_size;
+	struct nsproxy *nsproxy;
+	struct user_namespace *current_user_ns;
 	int rc;
 
 	if (msg->index >= ecryptfs_message_buf_len) {
 		rc = -EINVAL;
-		ecryptfs_printk(KERN_ERR, "Attempt to reference "
-				"context buffer at index [%d]; maximum "
-				"allowable is [%d]\n", msg->index,
-				(ecryptfs_message_buf_len - 1));
+		printk(KERN_ERR "%s: Attempt to reference "
+		       "context buffer at index [%d]; maximum "
+		       "allowable is [%d]\n", __func__, msg->index,
+		       (ecryptfs_message_buf_len - 1));
 		goto out;
 	}
 	msg_ctx = &ecryptfs_msg_ctx_arr[msg->index];
 	mutex_lock(&msg_ctx->mux);
-	if (ecryptfs_find_daemon_id(msg_ctx->task->euid, &id)) {
+	mutex_lock(&ecryptfs_daemon_hash_mux);
+	rcu_read_lock();
+	nsproxy = task_nsproxy(msg_ctx->task);
+	if (nsproxy == NULL) {
 		rc = -EBADMSG;
-		ecryptfs_printk(KERN_WARNING, "User [%d] received a "
-				"message response from process [%d] but does "
-				"not have a registered daemon\n",
-				msg_ctx->task->euid, pid);
+		printk(KERN_ERR "%s: Receiving process is a zombie. Dropping "
+		       "message.\n", __func__);
+		rcu_read_unlock();
+		mutex_unlock(&ecryptfs_daemon_hash_mux);
 		goto wake_up;
 	}
-	if (msg_ctx->task->euid != uid) {
+	current_user_ns = nsproxy->user_ns;
+	rc = ecryptfs_find_daemon_by_euid(&daemon, msg_ctx->task->euid,
+					  current_user_ns);
+	rcu_read_unlock();
+	mutex_unlock(&ecryptfs_daemon_hash_mux);
+	if (rc) {
 		rc = -EBADMSG;
-		ecryptfs_printk(KERN_WARNING, "Received message from user "
-				"[%d]; expected message from user [%d]\n",
-				uid, msg_ctx->task->euid);
+		printk(KERN_WARNING "%s: User [%d] received a "
+		       "message response from process [0x%p] but does "
+		       "not have a registered daemon\n", __func__,
+		       msg_ctx->task->euid, pid);
+		goto wake_up;
+	}
+	if (msg_ctx->task->euid != euid) {
+		rc = -EBADMSG;
+		printk(KERN_WARNING "%s: Received message from user "
+		       "[%d]; expected message from user [%d]\n", __func__,
+		       euid, msg_ctx->task->euid);
 		goto unlock;
 	}
-	if (id->pid != pid) {
+	if (current_user_ns != user_ns) {
 		rc = -EBADMSG;
-		ecryptfs_printk(KERN_ERR, "User [%d] received a "
-				"message response from an unrecognized "
-				"process [%d]\n", msg_ctx->task->euid, pid);
+		printk(KERN_WARNING "%s: Received message from user_ns "
+		       "[0x%p]; expected message from user_ns [0x%p]\n",
+		       __func__, user_ns, nsproxy->user_ns);
+		goto unlock;
+	}
+	if (daemon->pid != pid) {
+		rc = -EBADMSG;
+		printk(KERN_ERR "%s: User [%d] sent a message response "
+		       "from an unrecognized process [0x%p]\n",
+		       __func__, msg_ctx->task->euid, pid);
 		goto unlock;
 	}
 	if (msg_ctx->state != ECRYPTFS_MSG_CTX_STATE_PENDING) {
 		rc = -EINVAL;
-		ecryptfs_printk(KERN_WARNING, "Desired context element is not "
-				"pending a response\n");
+		printk(KERN_WARNING "%s: Desired context element is not "
+		       "pending a response\n", __func__);
 		goto unlock;
 	} else if (msg_ctx->counter != seq) {
 		rc = -EINVAL;
-		ecryptfs_printk(KERN_WARNING, "Invalid message sequence; "
-				"expected [%d]; received [%d]\n",
-				msg_ctx->counter, seq);
+		printk(KERN_WARNING "%s: Invalid message sequence; "
+		       "expected [%d]; received [%d]\n", __func__,
+		       msg_ctx->counter, seq);
 		goto unlock;
 	}
-	msg_size = sizeof(*msg) + msg->data_len;
+	msg_size = (sizeof(*msg) + msg->data_len);
 	msg_ctx->msg = kmalloc(msg_size, GFP_KERNEL);
 	if (!msg_ctx->msg) {
 		rc = -ENOMEM;
-		ecryptfs_printk(KERN_ERR, "Failed to allocate memory\n");
+		printk(KERN_ERR "%s: Failed to allocate [%Zd] bytes of "
+		       "GFP_KERNEL memory\n", __func__, msg_size);
 		goto unlock;
 	}
 	memcpy(msg_ctx->msg, msg, msg_size);
@@ -317,34 +466,38 @@
 }
 
 /**
- * ecryptfs_send_message
+ * ecryptfs_send_message_locked
  * @transport: The transport over which to send the message (i.e.,
  *             netlink)
  * @data: The data to send
  * @data_len: The length of data
  * @msg_ctx: The message context allocated for the send
+ *
+ * Must be called with ecryptfs_daemon_hash_mux held.
+ *
+ * Returns zero on success; non-zero otherwise
  */
-int ecryptfs_send_message(unsigned int transport, char *data, int data_len,
-			  struct ecryptfs_msg_ctx **msg_ctx)
+static int
+ecryptfs_send_message_locked(unsigned int transport, char *data, int data_len,
+			     u8 msg_type, struct ecryptfs_msg_ctx **msg_ctx)
 {
-	struct ecryptfs_daemon_id *id;
+	struct ecryptfs_daemon *daemon;
 	int rc;
 
-	mutex_lock(&ecryptfs_daemon_id_hash_mux);
-	if (ecryptfs_find_daemon_id(current->euid, &id)) {
-		mutex_unlock(&ecryptfs_daemon_id_hash_mux);
+	rc = ecryptfs_find_daemon_by_euid(&daemon, current->euid,
+					  current->nsproxy->user_ns);
+	if (rc || !daemon) {
 		rc = -ENOTCONN;
-		ecryptfs_printk(KERN_ERR, "User [%d] does not have a daemon "
-				"registered\n", current->euid);
+		printk(KERN_ERR "%s: User [%d] does not have a daemon "
+		       "registered\n", __func__, current->euid);
 		goto out;
 	}
-	mutex_unlock(&ecryptfs_daemon_id_hash_mux);
 	mutex_lock(&ecryptfs_msg_ctx_lists_mux);
 	rc = ecryptfs_acquire_free_msg_ctx(msg_ctx);
 	if (rc) {
 		mutex_unlock(&ecryptfs_msg_ctx_lists_mux);
-		ecryptfs_printk(KERN_WARNING, "Could not claim a free "
-				"context element\n");
+		printk(KERN_WARNING "%s: Could not claim a free "
+		       "context element\n", __func__);
 		goto out;
 	}
 	ecryptfs_msg_ctx_free_to_alloc(*msg_ctx);
@@ -352,23 +505,50 @@
 	mutex_unlock(&ecryptfs_msg_ctx_lists_mux);
 	switch (transport) {
 	case ECRYPTFS_TRANSPORT_NETLINK:
-		rc = ecryptfs_send_netlink(data, data_len, *msg_ctx,
-					   ECRYPTFS_NLMSG_REQUEST, 0, id->pid);
+		rc = ecryptfs_send_netlink(data, data_len, *msg_ctx, msg_type,
+					   0, daemon->pid);
+		break;
+	case ECRYPTFS_TRANSPORT_MISCDEV:
+		rc = ecryptfs_send_miscdev(data, data_len, *msg_ctx, msg_type,
+					   0, daemon);
 		break;
 	case ECRYPTFS_TRANSPORT_CONNECTOR:
 	case ECRYPTFS_TRANSPORT_RELAYFS:
 	default:
 		rc = -ENOSYS;
 	}
-	if (rc) {
-		printk(KERN_ERR "Error attempting to send message to userspace "
-		       "daemon; rc = [%d]\n", rc);
-	}
+	if (rc)
+		printk(KERN_ERR "%s: Error attempting to send message to "
+		       "userspace daemon; rc = [%d]\n", __func__, rc);
 out:
 	return rc;
 }
 
 /**
+ * ecryptfs_send_message
+ * @transport: The transport over which to send the message (i.e.,
+ *             netlink)
+ * @data: The data to send
+ * @data_len: The length of data
+ * @msg_ctx: The message context allocated for the send
+ *
+ * Grabs ecryptfs_daemon_hash_mux.
+ *
+ * Returns zero on success; non-zero otherwise
+ */
+int ecryptfs_send_message(unsigned int transport, char *data, int data_len,
+			  struct ecryptfs_msg_ctx **msg_ctx)
+{
+	int rc;
+
+	mutex_lock(&ecryptfs_daemon_hash_mux);
+	rc = ecryptfs_send_message_locked(transport, data, data_len,
+					  ECRYPTFS_MSG_REQUEST, msg_ctx);
+	mutex_unlock(&ecryptfs_daemon_hash_mux);
+	return rc;
+}
+
+/**
  * ecryptfs_wait_for_response
  * @msg_ctx: The context that was assigned when sending a message
  * @msg: The incoming message from userspace; not set if rc != 0
@@ -377,7 +557,7 @@
  * of time exceeds ecryptfs_message_wait_timeout.  If zero is
  * returned, msg will point to a valid message from userspace; a
  * non-zero value is returned upon failure to receive a message or an
- * error occurs.
+ * error occurs. Callee must free @msg on success.
  */
 int ecryptfs_wait_for_response(struct ecryptfs_msg_ctx *msg_ctx,
 			       struct ecryptfs_message **msg)
@@ -413,32 +593,32 @@
 
 	if (ecryptfs_number_of_users > ECRYPTFS_MAX_NUM_USERS) {
 		ecryptfs_number_of_users = ECRYPTFS_MAX_NUM_USERS;
-		ecryptfs_printk(KERN_WARNING, "Specified number of users is "
-				"too large, defaulting to [%d] users\n",
-				ecryptfs_number_of_users);
+		printk(KERN_WARNING "%s: Specified number of users is "
+		       "too large, defaulting to [%d] users\n", __func__,
+		       ecryptfs_number_of_users);
 	}
-	mutex_init(&ecryptfs_daemon_id_hash_mux);
-	mutex_lock(&ecryptfs_daemon_id_hash_mux);
+	mutex_init(&ecryptfs_daemon_hash_mux);
+	mutex_lock(&ecryptfs_daemon_hash_mux);
 	ecryptfs_hash_buckets = 1;
 	while (ecryptfs_number_of_users >> ecryptfs_hash_buckets)
 		ecryptfs_hash_buckets++;
-	ecryptfs_daemon_id_hash = kmalloc(sizeof(struct hlist_head)
-					  * ecryptfs_hash_buckets, GFP_KERNEL);
-	if (!ecryptfs_daemon_id_hash) {
+	ecryptfs_daemon_hash = kmalloc((sizeof(struct hlist_head)
+					* ecryptfs_hash_buckets), GFP_KERNEL);
+	if (!ecryptfs_daemon_hash) {
 		rc = -ENOMEM;
-		ecryptfs_printk(KERN_ERR, "Failed to allocate memory\n");
-		mutex_unlock(&ecryptfs_daemon_id_hash_mux);
+		printk(KERN_ERR "%s: Failed to allocate memory\n", __func__);
+		mutex_unlock(&ecryptfs_daemon_hash_mux);
 		goto out;
 	}
 	for (i = 0; i < ecryptfs_hash_buckets; i++)
-		INIT_HLIST_HEAD(&ecryptfs_daemon_id_hash[i]);
-	mutex_unlock(&ecryptfs_daemon_id_hash_mux);
-
+		INIT_HLIST_HEAD(&ecryptfs_daemon_hash[i]);
+	mutex_unlock(&ecryptfs_daemon_hash_mux);
 	ecryptfs_msg_ctx_arr = kmalloc((sizeof(struct ecryptfs_msg_ctx)
-				      * ecryptfs_message_buf_len), GFP_KERNEL);
+					* ecryptfs_message_buf_len),
+				       GFP_KERNEL);
 	if (!ecryptfs_msg_ctx_arr) {
 		rc = -ENOMEM;
-		ecryptfs_printk(KERN_ERR, "Failed to allocate memory\n");
+		printk(KERN_ERR "%s: Failed to allocate memory\n", __func__);
 		goto out;
 	}
 	mutex_init(&ecryptfs_msg_ctx_lists_mux);
@@ -446,6 +626,7 @@
 	ecryptfs_msg_counter = 0;
 	for (i = 0; i < ecryptfs_message_buf_len; i++) {
 		INIT_LIST_HEAD(&ecryptfs_msg_ctx_arr[i].node);
+		INIT_LIST_HEAD(&ecryptfs_msg_ctx_arr[i].daemon_out_list);
 		mutex_init(&ecryptfs_msg_ctx_arr[i].mux);
 		mutex_lock(&ecryptfs_msg_ctx_arr[i].mux);
 		ecryptfs_msg_ctx_arr[i].index = i;
@@ -464,6 +645,11 @@
 		if (rc)
 			ecryptfs_release_messaging(transport);
 		break;
+	case ECRYPTFS_TRANSPORT_MISCDEV:
+		rc = ecryptfs_init_ecryptfs_miscdev();
+		if (rc)
+			ecryptfs_release_messaging(transport);
+		break;
 	case ECRYPTFS_TRANSPORT_CONNECTOR:
 	case ECRYPTFS_TRANSPORT_RELAYFS:
 	default:
@@ -488,27 +674,37 @@
 		kfree(ecryptfs_msg_ctx_arr);
 		mutex_unlock(&ecryptfs_msg_ctx_lists_mux);
 	}
-	if (ecryptfs_daemon_id_hash) {
+	if (ecryptfs_daemon_hash) {
 		struct hlist_node *elem;
-		struct ecryptfs_daemon_id *id;
+		struct ecryptfs_daemon *daemon;
 		int i;
 
-		mutex_lock(&ecryptfs_daemon_id_hash_mux);
+		mutex_lock(&ecryptfs_daemon_hash_mux);
 		for (i = 0; i < ecryptfs_hash_buckets; i++) {
-			hlist_for_each_entry(id, elem,
-					     &ecryptfs_daemon_id_hash[i],
-					     id_chain) {
-				hlist_del(elem);
-				kfree(id);
+			int rc;
+
+			hlist_for_each_entry(daemon, elem,
+					     &ecryptfs_daemon_hash[i],
+					     euid_chain) {
+				rc = ecryptfs_exorcise_daemon(daemon);
+				if (rc)
+					printk(KERN_ERR "%s: Error whilst "
+					       "attempting to destroy daemon; "
+					       "rc = [%d]. Dazed and confused, "
+					       "but trying to continue.\n",
+					       __func__, rc);
 			}
 		}
-		kfree(ecryptfs_daemon_id_hash);
-		mutex_unlock(&ecryptfs_daemon_id_hash_mux);
+		kfree(ecryptfs_daemon_hash);
+		mutex_unlock(&ecryptfs_daemon_hash_mux);
 	}
 	switch(transport) {
 	case ECRYPTFS_TRANSPORT_NETLINK:
 		ecryptfs_release_netlink();
 		break;
+	case ECRYPTFS_TRANSPORT_MISCDEV:
+		ecryptfs_destroy_ecryptfs_miscdev();
+		break;
 	case ECRYPTFS_TRANSPORT_CONNECTOR:
 	case ECRYPTFS_TRANSPORT_RELAYFS:
 	default:
diff --git a/fs/ecryptfs/miscdev.c b/fs/ecryptfs/miscdev.c
new file mode 100644
index 0000000..788995e
--- /dev/null
+++ b/fs/ecryptfs/miscdev.c
@@ -0,0 +1,598 @@
+/**
+ * eCryptfs: Linux filesystem encryption layer
+ *
+ * Copyright (C) 2008 International Business Machines Corp.
+ *   Author(s): Michael A. Halcrow <mhalcrow@us.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+
+#include <linux/fs.h>
+#include <linux/hash.h>
+#include <linux/random.h>
+#include <linux/miscdevice.h>
+#include <linux/poll.h>
+#include <linux/wait.h>
+#include <linux/module.h>
+#include "ecryptfs_kernel.h"
+
+static atomic_t ecryptfs_num_miscdev_opens;
+
+/**
+ * ecryptfs_miscdev_poll
+ * @file: dev file (ignored)
+ * @pt: dev poll table (ignored)
+ *
+ * Returns the poll mask
+ */
+static unsigned int
+ecryptfs_miscdev_poll(struct file *file, poll_table *pt)
+{
+	struct ecryptfs_daemon *daemon;
+	unsigned int mask = 0;
+	int rc;
+
+	mutex_lock(&ecryptfs_daemon_hash_mux);
+	/* TODO: Just use file->private_data? */
+	rc = ecryptfs_find_daemon_by_euid(&daemon, current->euid,
+					  current->nsproxy->user_ns);
+	BUG_ON(rc || !daemon);
+	mutex_lock(&daemon->mux);
+	mutex_unlock(&ecryptfs_daemon_hash_mux);
+	if (daemon->flags & ECRYPTFS_DAEMON_ZOMBIE) {
+		printk(KERN_WARNING "%s: Attempt to poll on zombified "
+		       "daemon\n", __func__);
+		goto out_unlock_daemon;
+	}
+	if (daemon->flags & ECRYPTFS_DAEMON_IN_READ)
+		goto out_unlock_daemon;
+	if (daemon->flags & ECRYPTFS_DAEMON_IN_POLL)
+		goto out_unlock_daemon;
+	daemon->flags |= ECRYPTFS_DAEMON_IN_POLL;
+	mutex_unlock(&daemon->mux);
+	poll_wait(file, &daemon->wait, pt);
+	mutex_lock(&daemon->mux);
+	if (!list_empty(&daemon->msg_ctx_out_queue))
+		mask |= POLLIN | POLLRDNORM;
+out_unlock_daemon:
+	daemon->flags &= ~ECRYPTFS_DAEMON_IN_POLL;
+	mutex_unlock(&daemon->mux);
+	return mask;
+}
+
+/**
+ * ecryptfs_miscdev_open
+ * @inode: inode of miscdev handle (ignored)
+ * @file: file for miscdev handle (ignored)
+ *
+ * Returns zero on success; non-zero otherwise
+ */
+static int
+ecryptfs_miscdev_open(struct inode *inode, struct file *file)
+{
+	struct ecryptfs_daemon *daemon = NULL;
+	int rc;
+
+	mutex_lock(&ecryptfs_daemon_hash_mux);
+	rc = try_module_get(THIS_MODULE);
+	if (rc == 0) {
+		rc = -EIO;
+		printk(KERN_ERR "%s: Error attempting to increment module use "
+		       "count; rc = [%d]\n", __func__, rc);
+		goto out_unlock_daemon_list;
+	}
+	rc = ecryptfs_find_daemon_by_euid(&daemon, current->euid,
+					  current->nsproxy->user_ns);
+	if (rc || !daemon) {
+		rc = ecryptfs_spawn_daemon(&daemon, current->euid,
+					   current->nsproxy->user_ns,
+					   task_pid(current));
+		if (rc) {
+			printk(KERN_ERR "%s: Error attempting to spawn daemon; "
+			       "rc = [%d]\n", __func__, rc);
+			goto out_module_put_unlock_daemon_list;
+		}
+	}
+	mutex_lock(&daemon->mux);
+	if (daemon->pid != task_pid(current)) {
+		rc = -EINVAL;
+		printk(KERN_ERR "%s: pid [0x%p] has registered with euid [%d], "
+		       "but pid [0x%p] has attempted to open the handle "
+		       "instead\n", __func__, daemon->pid, daemon->euid,
+		       task_pid(current));
+		goto out_unlock_daemon;
+	}
+	if (daemon->flags & ECRYPTFS_DAEMON_MISCDEV_OPEN) {
+		rc = -EBUSY;
+		printk(KERN_ERR "%s: Miscellaneous device handle may only be "
+		       "opened once per daemon; pid [0x%p] already has this "
+		       "handle open\n", __func__, daemon->pid);
+		goto out_unlock_daemon;
+	}
+	daemon->flags |= ECRYPTFS_DAEMON_MISCDEV_OPEN;
+	atomic_inc(&ecryptfs_num_miscdev_opens);
+out_unlock_daemon:
+	mutex_unlock(&daemon->mux);
+out_module_put_unlock_daemon_list:
+	if (rc)
+		module_put(THIS_MODULE);
+out_unlock_daemon_list:
+	mutex_unlock(&ecryptfs_daemon_hash_mux);
+	return rc;
+}
+
+/**
+ * ecryptfs_miscdev_release
+ * @inode: inode of fs/ecryptfs/euid handle (ignored)
+ * @file: file for fs/ecryptfs/euid handle (ignored)
+ *
+ * This keeps the daemon registered until the daemon sends another
+ * ioctl to fs/ecryptfs/ctl or until the kernel module unregisters.
+ *
+ * Returns zero on success; non-zero otherwise
+ */
+static int
+ecryptfs_miscdev_release(struct inode *inode, struct file *file)
+{
+	struct ecryptfs_daemon *daemon = NULL;
+	int rc;
+
+	mutex_lock(&ecryptfs_daemon_hash_mux);
+	rc = ecryptfs_find_daemon_by_euid(&daemon, current->euid,
+					  current->nsproxy->user_ns);
+	BUG_ON(rc || !daemon);
+	mutex_lock(&daemon->mux);
+	BUG_ON(daemon->pid != task_pid(current));
+	BUG_ON(!(daemon->flags & ECRYPTFS_DAEMON_MISCDEV_OPEN));
+	daemon->flags &= ~ECRYPTFS_DAEMON_MISCDEV_OPEN;
+	atomic_dec(&ecryptfs_num_miscdev_opens);
+	mutex_unlock(&daemon->mux);
+	rc = ecryptfs_exorcise_daemon(daemon);
+	if (rc) {
+		printk(KERN_CRIT "%s: Fatal error whilst attempting to "
+		       "shut down daemon; rc = [%d]. Please report this "
+		       "bug.\n", __func__, rc);
+		BUG();
+	}
+	module_put(THIS_MODULE);
+	mutex_unlock(&ecryptfs_daemon_hash_mux);
+	return rc;
+}
+
+/**
+ * ecryptfs_send_miscdev
+ * @data: Data to send to daemon; may be NULL
+ * @data_size: Amount of data to send to daemon
+ * @msg_ctx: Message context, which is used to handle the reply. If
+ *           this is NULL, then we do not expect a reply.
+ * @msg_type: Type of message
+ * @msg_flags: Flags for message
+ * @daemon: eCryptfs daemon object
+ *
+ * Add msg_ctx to queue and then, if it exists, notify the blocked
+ * miscdevess about the data being available. Must be called with
+ * ecryptfs_daemon_hash_mux held.
+ *
+ * Returns zero on success; non-zero otherwise
+ */
+int ecryptfs_send_miscdev(char *data, size_t data_size,
+			  struct ecryptfs_msg_ctx *msg_ctx, u8 msg_type,
+			  u16 msg_flags, struct ecryptfs_daemon *daemon)
+{
+	int rc = 0;
+
+	mutex_lock(&msg_ctx->mux);
+	if (data) {
+		msg_ctx->msg = kmalloc((sizeof(*msg_ctx->msg) + data_size),
+				       GFP_KERNEL);
+		if (!msg_ctx->msg) {
+			rc = -ENOMEM;
+			printk(KERN_ERR "%s: Out of memory whilst attempting "
+			       "to kmalloc(%Zd, GFP_KERNEL)\n", __func__,
+			       (sizeof(*msg_ctx->msg) + data_size));
+			goto out_unlock;
+		}
+	} else
+		msg_ctx->msg = NULL;
+	msg_ctx->msg->index = msg_ctx->index;
+	msg_ctx->msg->data_len = data_size;
+	msg_ctx->type = msg_type;
+	if (data) {
+		memcpy(msg_ctx->msg->data, data, data_size);
+		msg_ctx->msg_size = (sizeof(*msg_ctx->msg) + data_size);
+	} else
+		msg_ctx->msg_size = 0;
+	mutex_lock(&daemon->mux);
+	list_add_tail(&msg_ctx->daemon_out_list, &daemon->msg_ctx_out_queue);
+	daemon->num_queued_msg_ctx++;
+	wake_up_interruptible(&daemon->wait);
+	mutex_unlock(&daemon->mux);
+out_unlock:
+	mutex_unlock(&msg_ctx->mux);
+	return rc;
+}
+
+/**
+ * ecryptfs_miscdev_read - format and send message from queue
+ * @file: fs/ecryptfs/euid miscdevfs handle (ignored)
+ * @buf: User buffer into which to copy the next message on the daemon queue
+ * @count: Amount of space available in @buf
+ * @ppos: Offset in file (ignored)
+ *
+ * Pulls the most recent message from the daemon queue, formats it for
+ * being sent via a miscdevfs handle, and copies it into @buf
+ *
+ * Returns the number of bytes copied into the user buffer
+ */
+static ssize_t
+ecryptfs_miscdev_read(struct file *file, char __user *buf, size_t count,
+		      loff_t *ppos)
+{
+	struct ecryptfs_daemon *daemon;
+	struct ecryptfs_msg_ctx *msg_ctx;
+	size_t packet_length_size;
+	u32 counter_nbo;
+	char packet_length[3];
+	size_t i;
+	size_t total_length;
+	int rc;
+
+	mutex_lock(&ecryptfs_daemon_hash_mux);
+	/* TODO: Just use file->private_data? */
+	rc = ecryptfs_find_daemon_by_euid(&daemon, current->euid,
+					  current->nsproxy->user_ns);
+	BUG_ON(rc || !daemon);
+	mutex_lock(&daemon->mux);
+	if (daemon->flags & ECRYPTFS_DAEMON_ZOMBIE) {
+		rc = 0;
+		printk(KERN_WARNING "%s: Attempt to read from zombified "
+		       "daemon\n", __func__);
+		goto out_unlock_daemon;
+	}
+	if (daemon->flags & ECRYPTFS_DAEMON_IN_READ) {
+		rc = 0;
+		goto out_unlock_daemon;
+	}
+	/* This daemon will not go away so long as this flag is set */
+	daemon->flags |= ECRYPTFS_DAEMON_IN_READ;
+	mutex_unlock(&ecryptfs_daemon_hash_mux);
+check_list:
+	if (list_empty(&daemon->msg_ctx_out_queue)) {
+		mutex_unlock(&daemon->mux);
+		rc = wait_event_interruptible(
+			daemon->wait, !list_empty(&daemon->msg_ctx_out_queue));
+		mutex_lock(&daemon->mux);
+		if (rc < 0) {
+			rc = 0;
+			goto out_unlock_daemon;
+		}
+	}
+	if (daemon->flags & ECRYPTFS_DAEMON_ZOMBIE) {
+		rc = 0;
+		goto out_unlock_daemon;
+	}
+	if (list_empty(&daemon->msg_ctx_out_queue)) {
+		/* Something else jumped in since the
+		 * wait_event_interruptable() and removed the
+		 * message from the queue; try again */
+		goto check_list;
+	}
+	BUG_ON(current->euid != daemon->euid);
+	BUG_ON(current->nsproxy->user_ns != daemon->user_ns);
+	BUG_ON(task_pid(current) != daemon->pid);
+	msg_ctx = list_first_entry(&daemon->msg_ctx_out_queue,
+				   struct ecryptfs_msg_ctx, daemon_out_list);
+	BUG_ON(!msg_ctx);
+	mutex_lock(&msg_ctx->mux);
+	if (msg_ctx->msg) {
+		rc = ecryptfs_write_packet_length(packet_length,
+						  msg_ctx->msg_size,
+						  &packet_length_size);
+		if (rc) {
+			rc = 0;
+			printk(KERN_WARNING "%s: Error writing packet length; "
+			       "rc = [%d]\n", __func__, rc);
+			goto out_unlock_msg_ctx;
+		}
+	} else {
+		packet_length_size = 0;
+		msg_ctx->msg_size = 0;
+	}
+	/* miscdevfs packet format:
+	 *  Octet 0: Type
+	 *  Octets 1-4: network byte order msg_ctx->counter
+	 *  Octets 5-N0: Size of struct ecryptfs_message to follow
+	 *  Octets N0-N1: struct ecryptfs_message (including data)
+	 *
+	 *  Octets 5-N1 not written if the packet type does not
+	 *  include a message */
+	total_length = (1 + 4 + packet_length_size + msg_ctx->msg_size);
+	if (count < total_length) {
+		rc = 0;
+		printk(KERN_WARNING "%s: Only given user buffer of "
+		       "size [%Zd], but we need [%Zd] to read the "
+		       "pending message\n", __func__, count, total_length);
+		goto out_unlock_msg_ctx;
+	}
+	i = 0;
+	buf[i++] = msg_ctx->type;
+	counter_nbo = cpu_to_be32(msg_ctx->counter);
+	memcpy(&buf[i], (char *)&counter_nbo, 4);
+	i += 4;
+	if (msg_ctx->msg) {
+		memcpy(&buf[i], packet_length, packet_length_size);
+		i += packet_length_size;
+		rc = copy_to_user(&buf[i], msg_ctx->msg, msg_ctx->msg_size);
+		if (rc) {
+			printk(KERN_ERR "%s: copy_to_user returned error "
+			       "[%d]\n", __func__, rc);
+			goto out_unlock_msg_ctx;
+		}
+		i += msg_ctx->msg_size;
+	}
+	rc = i;
+	list_del(&msg_ctx->daemon_out_list);
+	kfree(msg_ctx->msg);
+	msg_ctx->msg = NULL;
+	/* We do not expect a reply from the userspace daemon for any
+	 * message type other than ECRYPTFS_MSG_REQUEST */
+	if (msg_ctx->type != ECRYPTFS_MSG_REQUEST)
+		ecryptfs_msg_ctx_alloc_to_free(msg_ctx);
+out_unlock_msg_ctx:
+	mutex_unlock(&msg_ctx->mux);
+out_unlock_daemon:
+	daemon->flags &= ~ECRYPTFS_DAEMON_IN_READ;
+	mutex_unlock(&daemon->mux);
+	return rc;
+}
+
+/**
+ * ecryptfs_miscdev_helo
+ * @euid: effective user id of miscdevess sending helo packet
+ * @user_ns: The namespace in which @euid applies
+ * @pid: miscdevess id of miscdevess sending helo packet
+ *
+ * Returns zero on success; non-zero otherwise
+ */
+static int ecryptfs_miscdev_helo(uid_t euid, struct user_namespace *user_ns,
+				 struct pid *pid)
+{
+	int rc;
+
+	rc = ecryptfs_process_helo(ECRYPTFS_TRANSPORT_MISCDEV, euid, user_ns,
+				   pid);
+	if (rc)
+		printk(KERN_WARNING "Error processing HELO; rc = [%d]\n", rc);
+	return rc;
+}
+
+/**
+ * ecryptfs_miscdev_quit
+ * @euid: effective user id of miscdevess sending quit packet
+ * @user_ns: The namespace in which @euid applies
+ * @pid: miscdevess id of miscdevess sending quit packet
+ *
+ * Returns zero on success; non-zero otherwise
+ */
+static int ecryptfs_miscdev_quit(uid_t euid, struct user_namespace *user_ns,
+				 struct pid *pid)
+{
+	int rc;
+
+	rc = ecryptfs_process_quit(euid, user_ns, pid);
+	if (rc)
+		printk(KERN_WARNING
+		       "Error processing QUIT message; rc = [%d]\n", rc);
+	return rc;
+}
+
+/**
+ * ecryptfs_miscdev_response - miscdevess response to message previously sent to daemon
+ * @data: Bytes comprising struct ecryptfs_message
+ * @data_size: sizeof(struct ecryptfs_message) + data len
+ * @euid: Effective user id of miscdevess sending the miscdev response
+ * @user_ns: The namespace in which @euid applies
+ * @pid: Miscdevess id of miscdevess sending the miscdev response
+ * @seq: Sequence number for miscdev response packet
+ *
+ * Returns zero on success; non-zero otherwise
+ */
+static int ecryptfs_miscdev_response(char *data, size_t data_size,
+				     uid_t euid, struct user_namespace *user_ns,
+				     struct pid *pid, u32 seq)
+{
+	struct ecryptfs_message *msg = (struct ecryptfs_message *)data;
+	int rc;
+
+	if ((sizeof(*msg) + msg->data_len) != data_size) {
+		printk(KERN_WARNING "%s: (sizeof(*msg) + msg->data_len) = "
+		       "[%Zd]; data_size = [%Zd]. Invalid packet.\n", __func__,
+		       (sizeof(*msg) + msg->data_len), data_size);
+		rc = -EINVAL;
+		goto out;
+	}
+	rc = ecryptfs_process_response(msg, euid, user_ns, pid, seq);
+	if (rc)
+		printk(KERN_ERR
+		       "Error processing response message; rc = [%d]\n", rc);
+out:
+	return rc;
+}
+
+/**
+ * ecryptfs_miscdev_write - handle write to daemon miscdev handle
+ * @file: File for misc dev handle (ignored)
+ * @buf: Buffer containing user data
+ * @count: Amount of data in @buf
+ * @ppos: Pointer to offset in file (ignored)
+ *
+ * miscdevfs packet format:
+ *  Octet 0: Type
+ *  Octets 1-4: network byte order msg_ctx->counter (0's for non-response)
+ *  Octets 5-N0: Size of struct ecryptfs_message to follow
+ *  Octets N0-N1: struct ecryptfs_message (including data)
+ *
+ * Returns the number of bytes read from @buf
+ */
+static ssize_t
+ecryptfs_miscdev_write(struct file *file, const char __user *buf,
+		       size_t count, loff_t *ppos)
+{
+	u32 counter_nbo, seq;
+	size_t packet_size, packet_size_length, i;
+	ssize_t sz = 0;
+	char *data;
+	int rc;
+
+	if (count == 0)
+		goto out;
+	data = kmalloc(count, GFP_KERNEL);
+	if (!data) {
+		printk(KERN_ERR "%s: Out of memory whilst attempting to "
+		       "kmalloc([%Zd], GFP_KERNEL)\n", __func__, count);
+		goto out;
+	}
+	rc = copy_from_user(data, buf, count);
+	if (rc) {
+		printk(KERN_ERR "%s: copy_from_user returned error [%d]\n",
+		       __func__, rc);
+		goto out_free;
+	}
+	sz = count;
+	i = 0;
+	switch (data[i++]) {
+	case ECRYPTFS_MSG_RESPONSE:
+		if (count < (1 + 4 + 1 + sizeof(struct ecryptfs_message))) {
+			printk(KERN_WARNING "%s: Minimum acceptable packet "
+			       "size is [%Zd], but amount of data written is "
+			       "only [%Zd]. Discarding response packet.\n",
+			       __func__,
+			       (1 + 4 + 1 + sizeof(struct ecryptfs_message)),
+			       count);
+			goto out_free;
+		}
+		memcpy((char *)&counter_nbo, &data[i], 4);
+		seq = be32_to_cpu(counter_nbo);
+		i += 4;
+		rc = ecryptfs_parse_packet_length(&data[i], &packet_size,
+						  &packet_size_length);
+		if (rc) {
+			printk(KERN_WARNING "%s: Error parsing packet length; "
+			       "rc = [%d]\n", __func__, rc);
+			goto out_free;
+		}
+		i += packet_size_length;
+		if ((1 + 4 + packet_size_length + packet_size) != count) {
+			printk(KERN_WARNING "%s: (1 + packet_size_length([%Zd])"
+			       " + packet_size([%Zd]))([%Zd]) != "
+			       "count([%Zd]). Invalid packet format.\n",
+			       __func__, packet_size_length, packet_size,
+			       (1 + packet_size_length + packet_size), count);
+			goto out_free;
+		}
+		rc = ecryptfs_miscdev_response(&data[i], packet_size,
+					       current->euid,
+					       current->nsproxy->user_ns,
+					       task_pid(current), seq);
+		if (rc)
+			printk(KERN_WARNING "%s: Failed to deliver miscdev "
+			       "response to requesting operation; rc = [%d]\n",
+			       __func__, rc);
+		break;
+	case ECRYPTFS_MSG_HELO:
+		rc = ecryptfs_miscdev_helo(current->euid,
+					   current->nsproxy->user_ns,
+					   task_pid(current));
+		if (rc) {
+			printk(KERN_ERR "%s: Error attempting to process "
+			       "helo from pid [0x%p]; rc = [%d]\n", __func__,
+			       task_pid(current), rc);
+			goto out_free;
+		}
+		break;
+	case ECRYPTFS_MSG_QUIT:
+		rc = ecryptfs_miscdev_quit(current->euid,
+					   current->nsproxy->user_ns,
+					   task_pid(current));
+		if (rc) {
+			printk(KERN_ERR "%s: Error attempting to process "
+			       "quit from pid [0x%p]; rc = [%d]\n", __func__,
+			       task_pid(current), rc);
+			goto out_free;
+		}
+		break;
+	default:
+		ecryptfs_printk(KERN_WARNING, "Dropping miscdev "
+				"message of unrecognized type [%d]\n",
+				data[0]);
+		break;
+	}
+out_free:
+	kfree(data);
+out:
+	return sz;
+}
+
+
+static const struct file_operations ecryptfs_miscdev_fops = {
+	.open    = ecryptfs_miscdev_open,
+	.poll    = ecryptfs_miscdev_poll,
+	.read    = ecryptfs_miscdev_read,
+	.write   = ecryptfs_miscdev_write,
+	.release = ecryptfs_miscdev_release,
+};
+
+static struct miscdevice ecryptfs_miscdev = {
+	.minor = MISC_DYNAMIC_MINOR,
+	.name  = "ecryptfs",
+	.fops  = &ecryptfs_miscdev_fops
+};
+
+/**
+ * ecryptfs_init_ecryptfs_miscdev
+ *
+ * Messages sent to the userspace daemon from the kernel are placed on
+ * a queue associated with the daemon. The next read against the
+ * miscdev handle by that daemon will return the oldest message placed
+ * on the message queue for the daemon.
+ *
+ * Returns zero on success; non-zero otherwise
+ */
+int ecryptfs_init_ecryptfs_miscdev(void)
+{
+	int rc;
+
+	atomic_set(&ecryptfs_num_miscdev_opens, 0);
+	mutex_lock(&ecryptfs_daemon_hash_mux);
+	rc = misc_register(&ecryptfs_miscdev);
+	if (rc)
+		printk(KERN_ERR "%s: Failed to register miscellaneous device "
+		       "for communications with userspace daemons; rc = [%d]\n",
+		       __func__, rc);
+	mutex_unlock(&ecryptfs_daemon_hash_mux);
+	return rc;
+}
+
+/**
+ * ecryptfs_destroy_ecryptfs_miscdev
+ *
+ * All of the daemons must be exorcised prior to calling this
+ * function.
+ */
+void ecryptfs_destroy_ecryptfs_miscdev(void)
+{
+	BUG_ON(atomic_read(&ecryptfs_num_miscdev_opens) != 0);
+	misc_deregister(&ecryptfs_miscdev);
+}
diff --git a/fs/ecryptfs/mmap.c b/fs/ecryptfs/mmap.c
index 6df1deb..2b6fe1e 100644
--- a/fs/ecryptfs/mmap.c
+++ b/fs/ecryptfs/mmap.c
@@ -153,7 +153,7 @@
 			flush_dcache_page(page);
 			if (rc) {
 				printk(KERN_ERR "%s: Error reading xattr "
-				       "region; rc = [%d]\n", __FUNCTION__, rc);
+				       "region; rc = [%d]\n", __func__, rc);
 				goto out;
 			}
 		} else {
@@ -169,7 +169,7 @@
 			if (rc) {
 				printk(KERN_ERR "%s: Error attempting to read "
 				       "extent at offset [%lld] in the lower "
-				       "file; rc = [%d]\n", __FUNCTION__,
+				       "file; rc = [%d]\n", __func__,
 				       lower_offset, rc);
 				goto out;
 			}
@@ -212,7 +212,7 @@
 				       "the encrypted content from the lower "
 				       "file whilst inserting the metadata "
 				       "from the xattr into the header; rc = "
-				       "[%d]\n", __FUNCTION__, rc);
+				       "[%d]\n", __func__, rc);
 				goto out;
 			}
 
@@ -293,7 +293,7 @@
 			if (rc) {
 				printk(KERN_ERR "%s: Error attemping to read "
 				       "lower page segment; rc = [%d]\n",
-				       __FUNCTION__, rc);
+				       __func__, rc);
 				ClearPageUptodate(page);
 				goto out;
 			} else
@@ -308,7 +308,7 @@
 					       "from the lower file whilst "
 					       "inserting the metadata from "
 					       "the xattr into the header; rc "
-					       "= [%d]\n", __FUNCTION__, rc);
+					       "= [%d]\n", __func__, rc);
 					ClearPageUptodate(page);
 					goto out;
 				}
@@ -320,7 +320,7 @@
 				if (rc) {
 					printk(KERN_ERR "%s: Error reading "
 					       "page; rc = [%d]\n",
-					       __FUNCTION__, rc);
+					       __func__, rc);
 					ClearPageUptodate(page);
 					goto out;
 				}
@@ -331,7 +331,7 @@
 			if (rc) {
 				printk(KERN_ERR "%s: Error decrypting page "
 				       "at index [%ld]; rc = [%d]\n",
-				       __FUNCTION__, page->index, rc);
+				       __func__, page->index, rc);
 				ClearPageUptodate(page);
 				goto out;
 			}
@@ -348,7 +348,7 @@
 			if (rc) {
 				printk(KERN_ERR "%s: Error on attempt to "
 				       "truncate to (higher) offset [%lld];"
-				       " rc = [%d]\n", __FUNCTION__,
+				       " rc = [%d]\n", __func__,
 				       prev_page_end_size, rc);
 				goto out;
 			}
@@ -389,7 +389,7 @@
 	kfree(file_size_virt);
 	if (rc)
 		printk(KERN_ERR "%s: Error writing file size to header; "
-		       "rc = [%d]\n", __FUNCTION__, rc);
+		       "rc = [%d]\n", __func__, rc);
 out:
 	return rc;
 }
diff --git a/fs/ecryptfs/netlink.c b/fs/ecryptfs/netlink.c
index f638a69..e0abad6 100644
--- a/fs/ecryptfs/netlink.c
+++ b/fs/ecryptfs/netlink.c
@@ -44,8 +44,8 @@
  * upon sending the message; non-zero upon error.
  */
 int ecryptfs_send_netlink(char *data, int data_len,
-			  struct ecryptfs_msg_ctx *msg_ctx, u16 msg_type,
-			  u16 msg_flags, pid_t daemon_pid)
+			  struct ecryptfs_msg_ctx *msg_ctx, u8 msg_type,
+			  u16 msg_flags, struct pid *daemon_pid)
 {
 	struct sk_buff *skb;
 	struct nlmsghdr *nlh;
@@ -60,7 +60,7 @@
 		ecryptfs_printk(KERN_ERR, "Failed to allocate socket buffer\n");
 		goto out;
 	}
-	nlh = NLMSG_PUT(skb, daemon_pid, msg_ctx ? msg_ctx->counter : 0,
+	nlh = NLMSG_PUT(skb, pid_nr(daemon_pid), msg_ctx ? msg_ctx->counter : 0,
 			msg_type, payload_len);
 	nlh->nlmsg_flags = msg_flags;
 	if (msg_ctx && payload_len) {
@@ -69,7 +69,7 @@
 		msg->data_len = data_len;
 		memcpy(msg->data, data, data_len);
 	}
-	rc = netlink_unicast(ecryptfs_nl_sock, skb, daemon_pid, 0);
+	rc = netlink_unicast(ecryptfs_nl_sock, skb, pid_nr(daemon_pid), 0);
 	if (rc < 0) {
 		ecryptfs_printk(KERN_ERR, "Failed to send eCryptfs netlink "
 				"message; rc = [%d]\n", rc);
@@ -99,6 +99,7 @@
 {
 	struct nlmsghdr *nlh = nlmsg_hdr(skb);
 	struct ecryptfs_message *msg = NLMSG_DATA(nlh);
+	struct pid *pid;
 	int rc;
 
 	if (skb->len - NLMSG_HDRLEN - sizeof(*msg) != msg->data_len) {
@@ -107,8 +108,10 @@
 				"incorrectly specified data length\n");
 		goto out;
 	}
-	rc = ecryptfs_process_response(msg, NETLINK_CREDS(skb)->uid,
-				       NETLINK_CREDS(skb)->pid, nlh->nlmsg_seq);
+	pid = find_get_pid(NETLINK_CREDS(skb)->pid);
+	rc = ecryptfs_process_response(msg, NETLINK_CREDS(skb)->uid, NULL,
+				       pid, nlh->nlmsg_seq);
+	put_pid(pid);
 	if (rc)
 		printk(KERN_ERR
 		       "Error processing response message; rc = [%d]\n", rc);
@@ -126,11 +129,13 @@
  */
 static int ecryptfs_process_nl_helo(struct sk_buff *skb)
 {
+	struct pid *pid;
 	int rc;
 
+	pid = find_get_pid(NETLINK_CREDS(skb)->pid);
 	rc = ecryptfs_process_helo(ECRYPTFS_TRANSPORT_NETLINK,
-				   NETLINK_CREDS(skb)->uid,
-				   NETLINK_CREDS(skb)->pid);
+				   NETLINK_CREDS(skb)->uid, NULL, pid);
+	put_pid(pid);
 	if (rc)
 		printk(KERN_WARNING "Error processing HELO; rc = [%d]\n", rc);
 	return rc;
@@ -147,10 +152,12 @@
  */
 static int ecryptfs_process_nl_quit(struct sk_buff *skb)
 {
+	struct pid *pid;
 	int rc;
 
-	rc = ecryptfs_process_quit(NETLINK_CREDS(skb)->uid,
-				   NETLINK_CREDS(skb)->pid);
+	pid = find_get_pid(NETLINK_CREDS(skb)->pid);
+	rc = ecryptfs_process_quit(NETLINK_CREDS(skb)->uid, NULL, pid);
+	put_pid(pid);
 	if (rc)
 		printk(KERN_WARNING
 		       "Error processing QUIT message; rc = [%d]\n", rc);
@@ -176,20 +183,20 @@
 		goto free;
 	}
 	switch (nlh->nlmsg_type) {
-		case ECRYPTFS_NLMSG_RESPONSE:
+		case ECRYPTFS_MSG_RESPONSE:
 			if (ecryptfs_process_nl_response(skb)) {
 				ecryptfs_printk(KERN_WARNING, "Failed to "
 						"deliver netlink response to "
 						"requesting operation\n");
 			}
 			break;
-		case ECRYPTFS_NLMSG_HELO:
+		case ECRYPTFS_MSG_HELO:
 			if (ecryptfs_process_nl_helo(skb)) {
 				ecryptfs_printk(KERN_WARNING, "Failed to "
 						"fulfill HELO request\n");
 			}
 			break;
-		case ECRYPTFS_NLMSG_QUIT:
+		case ECRYPTFS_MSG_QUIT:
 			if (ecryptfs_process_nl_quit(skb)) {
 				ecryptfs_printk(KERN_WARNING, "Failed to "
 						"fulfill QUIT request\n");
diff --git a/fs/ecryptfs/read_write.c b/fs/ecryptfs/read_write.c
index 0c49286..ebf5515 100644
--- a/fs/ecryptfs/read_write.c
+++ b/fs/ecryptfs/read_write.c
@@ -55,7 +55,7 @@
 	set_fs(fs_save);
 	if (octets_written < 0) {
 		printk(KERN_ERR "%s: octets_written = [%td]; "
-		       "expected [%td]\n", __FUNCTION__, octets_written, size);
+		       "expected [%td]\n", __func__, octets_written, size);
 		rc = -EINVAL;
 	}
 	mutex_unlock(&inode_info->lower_file_mutex);
@@ -153,7 +153,7 @@
 			rc = PTR_ERR(ecryptfs_page);
 			printk(KERN_ERR "%s: Error getting page at "
 			       "index [%ld] from eCryptfs inode "
-			       "mapping; rc = [%d]\n", __FUNCTION__,
+			       "mapping; rc = [%d]\n", __func__,
 			       ecryptfs_page_idx, rc);
 			goto out;
 		}
@@ -165,7 +165,7 @@
 			if (rc) {
 				printk(KERN_ERR "%s: Error decrypting "
 				       "page; rc = [%d]\n",
-				       __FUNCTION__, rc);
+				       __func__, rc);
 				ClearPageUptodate(ecryptfs_page);
 				page_cache_release(ecryptfs_page);
 				goto out;
@@ -202,7 +202,7 @@
 		page_cache_release(ecryptfs_page);
 		if (rc) {
 			printk(KERN_ERR "%s: Error encrypting "
-			       "page; rc = [%d]\n", __FUNCTION__, rc);
+			       "page; rc = [%d]\n", __func__, rc);
 			goto out;
 		}
 		pos += num_bytes;
@@ -254,7 +254,7 @@
 	set_fs(fs_save);
 	if (octets_read < 0) {
 		printk(KERN_ERR "%s: octets_read = [%td]; "
-		       "expected [%td]\n", __FUNCTION__, octets_read, size);
+		       "expected [%td]\n", __func__, octets_read, size);
 		rc = -EINVAL;
 	}
 	mutex_unlock(&inode_info->lower_file_mutex);
@@ -327,7 +327,7 @@
 		printk(KERN_ERR "%s: Attempt to read data past the end of the "
 			"file; offset = [%lld]; size = [%td]; "
 		       "ecryptfs_file_size = [%lld]\n",
-		       __FUNCTION__, offset, size, ecryptfs_file_size);
+		       __func__, offset, size, ecryptfs_file_size);
 		goto out;
 	}
 	pos = offset;
@@ -345,14 +345,14 @@
 			rc = PTR_ERR(ecryptfs_page);
 			printk(KERN_ERR "%s: Error getting page at "
 			       "index [%ld] from eCryptfs inode "
-			       "mapping; rc = [%d]\n", __FUNCTION__,
+			       "mapping; rc = [%d]\n", __func__,
 			       ecryptfs_page_idx, rc);
 			goto out;
 		}
 		rc = ecryptfs_decrypt_page(ecryptfs_page);
 		if (rc) {
 			printk(KERN_ERR "%s: Error decrypting "
-			       "page; rc = [%d]\n", __FUNCTION__, rc);
+			       "page; rc = [%d]\n", __func__, rc);
 			ClearPageUptodate(ecryptfs_page);
 			page_cache_release(ecryptfs_page);
 			goto out;
diff --git a/fs/eventpoll.c b/fs/eventpoll.c
index a415f42..0d23718 100644
--- a/fs/eventpoll.c
+++ b/fs/eventpoll.c
@@ -257,25 +257,6 @@
 	        (p1->file < p2->file ? -1 : p1->fd - p2->fd));
 }
 
-/* Special initialization for the RB tree node to detect linkage */
-static inline void ep_rb_initnode(struct rb_node *n)
-{
-	rb_set_parent(n, n);
-}
-
-/* Removes a node from the RB tree and marks it for a fast is-linked check */
-static inline void ep_rb_erase(struct rb_node *n, struct rb_root *r)
-{
-	rb_erase(n, r);
-	rb_set_parent(n, n);
-}
-
-/* Fast check to verify that the item is linked to the main RB tree */
-static inline int ep_rb_linked(struct rb_node *n)
-{
-	return rb_parent(n) != n;
-}
-
 /* Tells us if the item is currently linked */
 static inline int ep_is_linked(struct list_head *p)
 {
@@ -283,13 +264,13 @@
 }
 
 /* Get the "struct epitem" from a wait queue pointer */
-static inline struct epitem * ep_item_from_wait(wait_queue_t *p)
+static inline struct epitem *ep_item_from_wait(wait_queue_t *p)
 {
 	return container_of(p, struct eppoll_entry, wait)->base;
 }
 
 /* Get the "struct epitem" from an epoll queue wrapper */
-static inline struct epitem * ep_item_from_epqueue(poll_table *p)
+static inline struct epitem *ep_item_from_epqueue(poll_table *p)
 {
 	return container_of(p, struct ep_pqueue, pt)->epi;
 }
@@ -411,8 +392,7 @@
 		list_del_init(&epi->fllink);
 	spin_unlock(&file->f_ep_lock);
 
-	if (ep_rb_linked(&epi->rbn))
-		ep_rb_erase(&epi->rbn, &ep->rbr);
+	rb_erase(&epi->rbn, &ep->rbr);
 
 	spin_lock_irqsave(&ep->lock, flags);
 	if (ep_is_linked(&epi->rdllink))
@@ -728,7 +708,6 @@
 		goto error_return;
 
 	/* Item initialization follow here ... */
-	ep_rb_initnode(&epi->rbn);
 	INIT_LIST_HEAD(&epi->rdllink);
 	INIT_LIST_HEAD(&epi->fllink);
 	INIT_LIST_HEAD(&epi->pwqlist);
diff --git a/fs/exec.c b/fs/exec.c
index b152029..a138839 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -735,6 +735,7 @@
 	tsk->active_mm = mm;
 	activate_mm(active_mm, mm);
 	task_unlock(tsk);
+	mm_update_next_owner(mm);
 	arch_pick_mmap_layout(mm);
 	if (old_mm) {
 		up_read(&old_mm->mmap_sem);
@@ -963,6 +964,8 @@
 	if (retval)
 		goto out;
 
+	set_mm_exe_file(bprm->mm, bprm->file);
+
 	/*
 	 * Release all of the old mmap stuff
 	 */
@@ -1268,7 +1271,6 @@
 {
 	struct linux_binprm *bprm;
 	struct file *file;
-	unsigned long env_p;
 	struct files_struct *displaced;
 	int retval;
 
@@ -1321,11 +1323,9 @@
 	if (retval < 0)
 		goto out;
 
-	env_p = bprm->p;
 	retval = copy_strings(bprm->argc, argv, bprm);
 	if (retval < 0)
 		goto out;
-	bprm->argv_len = env_p - bprm->p;
 
 	retval = search_binary_handler(bprm,regs);
 	if (retval >= 0) {
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index ef97f19..9d57695 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -2449,17 +2449,10 @@
 	int i;
 
 	if (sbi->s_mb_proc != NULL) {
-		struct proc_dir_entry *p;
-		p = create_proc_entry("mb_history", S_IRUGO, sbi->s_mb_proc);
-		if (p) {
-			p->proc_fops = &ext4_mb_seq_history_fops;
-			p->data = sb;
-		}
-		p = create_proc_entry("mb_groups", S_IRUGO, sbi->s_mb_proc);
-		if (p) {
-			p->proc_fops = &ext4_mb_seq_groups_fops;
-			p->data = sb;
-		}
+		proc_create_data("mb_history", S_IRUGO, sbi->s_mb_proc,
+				 &ext4_mb_seq_history_fops, sb);
+		proc_create_data("mb_groups", S_IRUGO, sbi->s_mb_proc,
+				 &ext4_mb_seq_groups_fops, sb);
 	}
 
 	sbi->s_mb_history_max = 1000;
@@ -2867,7 +2860,6 @@
 	mb_debug("freed %u blocks in %u structures\n", count, count2);
 }
 
-#define EXT4_ROOT			"ext4"
 #define EXT4_MB_STATS_NAME		"stats"
 #define EXT4_MB_MAX_TO_SCAN_NAME	"max_to_scan"
 #define EXT4_MB_MIN_TO_SCAN_NAME	"min_to_scan"
@@ -3007,9 +2999,9 @@
 		return -ENOMEM;
 	}
 #ifdef CONFIG_PROC_FS
-	proc_root_ext4 = proc_mkdir(EXT4_ROOT, proc_root_fs);
+	proc_root_ext4 = proc_mkdir("fs/ext4", NULL);
 	if (proc_root_ext4 == NULL)
-		printk(KERN_ERR "EXT4-fs: Unable to create %s\n", EXT4_ROOT);
+		printk(KERN_ERR "EXT4-fs: Unable to create fs/ext4\n");
 #endif
 	return 0;
 }
@@ -3020,7 +3012,7 @@
 	kmem_cache_destroy(ext4_pspace_cachep);
 	kmem_cache_destroy(ext4_ac_cachep);
 #ifdef CONFIG_PROC_FS
-	remove_proc_entry(EXT4_ROOT, proc_root_fs);
+	remove_proc_entry("fs/ext4", NULL);
 #endif
 }
 
diff --git a/fs/fat/inode.c b/fs/fat/inode.c
index 5f522a5..4e0a3dd 100644
--- a/fs/fat/inode.c
+++ b/fs/fat/inode.c
@@ -1222,8 +1222,7 @@
 		brelse(bh);
 		goto out_invalid;
 	}
-	logical_sector_size =
-		le16_to_cpu(get_unaligned((__le16 *)&b->sector_size));
+	logical_sector_size = get_unaligned_le16(&b->sector_size);
 	if (!is_power_of_2(logical_sector_size)
 	    || (logical_sector_size < 512)
 	    || (logical_sector_size > 4096)) {
@@ -1322,8 +1321,7 @@
 	sbi->dir_per_block_bits = ffs(sbi->dir_per_block) - 1;
 
 	sbi->dir_start = sbi->fat_start + sbi->fats * sbi->fat_length;
-	sbi->dir_entries =
-		le16_to_cpu(get_unaligned((__le16 *)&b->dir_entries));
+	sbi->dir_entries = get_unaligned_le16(&b->dir_entries);
 	if (sbi->dir_entries & (sbi->dir_per_block - 1)) {
 		if (!silent)
 			printk(KERN_ERR "FAT: bogus directroy-entries per block"
@@ -1335,7 +1333,7 @@
 	rootdir_sectors = sbi->dir_entries
 		* sizeof(struct msdos_dir_entry) / sb->s_blocksize;
 	sbi->data_start = sbi->dir_start + rootdir_sectors;
-	total_sectors = le16_to_cpu(get_unaligned((__le16 *)&b->sectors));
+	total_sectors = get_unaligned_le16(&b->sectors);
 	if (total_sectors == 0)
 		total_sectors = le32_to_cpu(b->total_sect);
 
diff --git a/fs/freevxfs/vxfs_extern.h b/fs/freevxfs/vxfs_extern.h
index 2b46064..50ab5ee 100644
--- a/fs/freevxfs/vxfs_extern.h
+++ b/fs/freevxfs/vxfs_extern.h
@@ -50,7 +50,11 @@
 /* vxfs_fshead.c */
 extern int			vxfs_read_fshead(struct super_block *);
 
+/* vxfs_immed.c */
+extern const struct inode_operations vxfs_immed_symlink_iops;
+
 /* vxfs_inode.c */
+extern const struct address_space_operations vxfs_immed_aops;
 extern struct kmem_cache	*vxfs_inode_cachep;
 extern void			vxfs_dumpi(struct vxfs_inode_info *, ino_t);
 extern struct inode *		vxfs_get_fake_inode(struct super_block *,
@@ -69,6 +73,7 @@
 extern int			vxfs_read_olt(struct super_block *, u_long);
 
 /* vxfs_subr.c */
+extern const struct address_space_operations vxfs_aops;
 extern struct page *		vxfs_get_page(struct address_space *, u_long);
 extern void			vxfs_put_page(struct page *);
 extern struct buffer_head *	vxfs_bread(struct inode *, int);
diff --git a/fs/freevxfs/vxfs_immed.c b/fs/freevxfs/vxfs_immed.c
index 8a5959a..c36aeaf 100644
--- a/fs/freevxfs/vxfs_immed.c
+++ b/fs/freevxfs/vxfs_immed.c
@@ -35,6 +35,7 @@
 #include <linux/namei.h>
 
 #include "vxfs.h"
+#include "vxfs_extern.h"
 #include "vxfs_inode.h"
 
 
diff --git a/fs/freevxfs/vxfs_inode.c b/fs/freevxfs/vxfs_inode.c
index ad88d23..9f3f2ce 100644
--- a/fs/freevxfs/vxfs_inode.c
+++ b/fs/freevxfs/vxfs_inode.c
@@ -41,11 +41,6 @@
 #include "vxfs_extern.h"
 
 
-extern const struct address_space_operations vxfs_aops;
-extern const struct address_space_operations vxfs_immed_aops;
-
-extern const struct inode_operations vxfs_immed_symlink_iops;
-
 struct kmem_cache		*vxfs_inode_cachep;
 
 
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index 0655767..ae45f77 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -25,6 +25,45 @@
 #include <linux/buffer_head.h>
 #include "internal.h"
 
+
+/**
+ * writeback_acquire - attempt to get exclusive writeback access to a device
+ * @bdi: the device's backing_dev_info structure
+ *
+ * It is a waste of resources to have more than one pdflush thread blocked on
+ * a single request queue.  Exclusion at the request_queue level is obtained
+ * via a flag in the request_queue's backing_dev_info.state.
+ *
+ * Non-request_queue-backed address_spaces will share default_backing_dev_info,
+ * unless they implement their own.  Which is somewhat inefficient, as this
+ * may prevent concurrent writeback against multiple devices.
+ */
+static int writeback_acquire(struct backing_dev_info *bdi)
+{
+	return !test_and_set_bit(BDI_pdflush, &bdi->state);
+}
+
+/**
+ * writeback_in_progress - determine whether there is writeback in progress
+ * @bdi: the device's backing_dev_info structure.
+ *
+ * Determine whether there is writeback in progress against a backing device.
+ */
+int writeback_in_progress(struct backing_dev_info *bdi)
+{
+	return test_bit(BDI_pdflush, &bdi->state);
+}
+
+/**
+ * writeback_release - relinquish exclusive writeback access against a device.
+ * @bdi: the device's backing_dev_info structure
+ */
+static void writeback_release(struct backing_dev_info *bdi)
+{
+	BUG_ON(!writeback_in_progress(bdi));
+	clear_bit(BDI_pdflush, &bdi->state);
+}
+
 /**
  *	__mark_inode_dirty -	internal function
  *	@inode: inode to mark
@@ -747,43 +786,4 @@
 
 	return err;
 }
-
 EXPORT_SYMBOL(generic_osync_inode);
-
-/**
- * writeback_acquire - attempt to get exclusive writeback access to a device
- * @bdi: the device's backing_dev_info structure
- *
- * It is a waste of resources to have more than one pdflush thread blocked on
- * a single request queue.  Exclusion at the request_queue level is obtained
- * via a flag in the request_queue's backing_dev_info.state.
- *
- * Non-request_queue-backed address_spaces will share default_backing_dev_info,
- * unless they implement their own.  Which is somewhat inefficient, as this
- * may prevent concurrent writeback against multiple devices.
- */
-int writeback_acquire(struct backing_dev_info *bdi)
-{
-	return !test_and_set_bit(BDI_pdflush, &bdi->state);
-}
-
-/**
- * writeback_in_progress - determine whether there is writeback in progress
- * @bdi: the device's backing_dev_info structure.
- *
- * Determine whether there is writeback in progress against a backing device.
- */
-int writeback_in_progress(struct backing_dev_info *bdi)
-{
-	return test_bit(BDI_pdflush, &bdi->state);
-}
-
-/**
- * writeback_release - relinquish exclusive writeback access against a device.
- * @bdi: the device's backing_dev_info structure
- */
-void writeback_release(struct backing_dev_info *bdi)
-{
-	BUG_ON(!writeback_in_progress(bdi));
-	clear_bit(BDI_pdflush, &bdi->state);
-}
diff --git a/fs/hfs/super.c b/fs/hfs/super.c
index 32de44e..8cf6797 100644
--- a/fs/hfs/super.c
+++ b/fs/hfs/super.c
@@ -297,7 +297,8 @@
 				return 0;
 			}
 			p = match_strdup(&args[0]);
-			hsb->nls_disk = load_nls(p);
+			if (p)
+				hsb->nls_disk = load_nls(p);
 			if (!hsb->nls_disk) {
 				printk(KERN_ERR "hfs: unable to load codepage \"%s\"\n", p);
 				kfree(p);
@@ -311,7 +312,8 @@
 				return 0;
 			}
 			p = match_strdup(&args[0]);
-			hsb->nls_io = load_nls(p);
+			if (p)
+				hsb->nls_io = load_nls(p);
 			if (!hsb->nls_io) {
 				printk(KERN_ERR "hfs: unable to load iocharset \"%s\"\n", p);
 				kfree(p);
diff --git a/fs/hfsplus/hfsplus_fs.h b/fs/hfsplus/hfsplus_fs.h
index d72d0a8..9e59537 100644
--- a/fs/hfsplus/hfsplus_fs.h
+++ b/fs/hfsplus/hfsplus_fs.h
@@ -311,6 +311,10 @@
 int hfsplus_rename_cat(u32, struct inode *, struct qstr *,
 		       struct inode *, struct qstr *);
 
+/* dir.c */
+extern const struct inode_operations hfsplus_dir_inode_operations;
+extern const struct file_operations hfsplus_dir_operations;
+
 /* extents.c */
 int hfsplus_ext_cmp_key(const hfsplus_btree_key *, const hfsplus_btree_key *);
 void hfsplus_ext_write_extent(struct inode *);
diff --git a/fs/hfsplus/inode.c b/fs/hfsplus/inode.c
index 37744cf3..d53b2af 100644
--- a/fs/hfsplus/inode.c
+++ b/fs/hfsplus/inode.c
@@ -278,9 +278,6 @@
 	return 0;
 }
 
-extern const struct inode_operations hfsplus_dir_inode_operations;
-extern struct file_operations hfsplus_dir_operations;
-
 static const struct inode_operations hfsplus_file_inode_operations = {
 	.lookup		= hfsplus_file_lookup,
 	.truncate	= hfsplus_file_truncate,
diff --git a/fs/hfsplus/options.c b/fs/hfsplus/options.c
index dc64fac..9997cbf 100644
--- a/fs/hfsplus/options.c
+++ b/fs/hfsplus/options.c
@@ -132,7 +132,8 @@
 				return 0;
 			}
 			p = match_strdup(&args[0]);
-			sbi->nls = load_nls(p);
+			if (p)
+				sbi->nls = load_nls(p);
 			if (!sbi->nls) {
 				printk(KERN_ERR "hfs: unable to load nls mapping \"%s\"\n", p);
 				kfree(p);
diff --git a/fs/hfsplus/wrapper.c b/fs/hfsplus/wrapper.c
index 72cab78..175d08e 100644
--- a/fs/hfsplus/wrapper.c
+++ b/fs/hfsplus/wrapper.c
@@ -47,7 +47,7 @@
 		return 0;
 	wd->ablk_start = be16_to_cpu(*(__be16 *)(bufptr + HFSP_WRAPOFF_ABLKSTART));
 
-	extent = be32_to_cpu(get_unaligned((__be32 *)(bufptr + HFSP_WRAPOFF_EMBEDEXT)));
+	extent = get_unaligned_be32(bufptr + HFSP_WRAPOFF_EMBEDEXT);
 	wd->embed_start = (extent >> 16) & 0xFFFF;
 	wd->embed_count = extent & 0xFFFF;
 
diff --git a/fs/inode.c b/fs/inode.c
index 27ee1af..bf64781 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -495,8 +495,7 @@
 	struct inode * inode = NULL;
 
 repeat:
-	hlist_for_each (node, head) { 
-		inode = hlist_entry(node, struct inode, i_hash);
+	hlist_for_each_entry(inode, node, head, i_hash) {
 		if (inode->i_sb != sb)
 			continue;
 		if (!test(inode, data))
@@ -520,8 +519,7 @@
 	struct inode * inode = NULL;
 
 repeat:
-	hlist_for_each (node, head) {
-		inode = hlist_entry(node, struct inode, i_hash);
+	hlist_for_each_entry(inode, node, head, i_hash) {
 		if (inode->i_ino != ino)
 			continue;
 		if (inode->i_sb != sb)
diff --git a/fs/inotify_user.c b/fs/inotify_user.c
index 7b94a1e..6676c06 100644
--- a/fs/inotify_user.c
+++ b/fs/inotify_user.c
@@ -598,7 +598,7 @@
 	}
 
 	ih = inotify_init(&inotify_user_ops);
-	if (unlikely(IS_ERR(ih))) {
+	if (IS_ERR(ih)) {
 		ret = PTR_ERR(ih);
 		goto out_free_dev;
 	}
diff --git a/fs/ioctl.c b/fs/ioctl.c
index f32fbde..7db32b3 100644
--- a/fs/ioctl.c
+++ b/fs/ioctl.c
@@ -28,8 +28,8 @@
  *
  * Returns 0 on success, -errno on error.
  */
-long vfs_ioctl(struct file *filp, unsigned int cmd,
-	       unsigned long arg)
+static long vfs_ioctl(struct file *filp, unsigned int cmd,
+		      unsigned long arg)
 {
 	int error = -ENOTTY;
 
diff --git a/fs/isofs/isofs.h b/fs/isofs/isofs.h
index d1bdf8ad..ccbf72f 100644
--- a/fs/isofs/isofs.h
+++ b/fs/isofs/isofs.h
@@ -78,29 +78,29 @@
 }
 static inline unsigned int isonum_721(char *p)
 {
-	return le16_to_cpu(get_unaligned((__le16 *)p));
+	return get_unaligned_le16(p);
 }
 static inline unsigned int isonum_722(char *p)
 {
-	return be16_to_cpu(get_unaligned((__le16 *)p));
+	return get_unaligned_be16(p);
 }
 static inline unsigned int isonum_723(char *p)
 {
 	/* Ignore bigendian datum due to broken mastering programs */
-	return le16_to_cpu(get_unaligned((__le16 *)p));
+	return get_unaligned_le16(p);
 }
 static inline unsigned int isonum_731(char *p)
 {
-	return le32_to_cpu(get_unaligned((__le32 *)p));
+	return get_unaligned_le32(p);
 }
 static inline unsigned int isonum_732(char *p)
 {
-	return be32_to_cpu(get_unaligned((__le32 *)p));
+	return get_unaligned_be32(p);
 }
 static inline unsigned int isonum_733(char *p)
 {
 	/* Ignore bigendian datum due to broken mastering programs */
-	return le32_to_cpu(get_unaligned((__le32 *)p));
+	return get_unaligned_le32(p);
 }
 extern int iso_date(char *, int);
 
diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
index 954cff0..eb7eb6c 100644
--- a/fs/jbd2/journal.c
+++ b/fs/jbd2/journal.c
@@ -904,19 +904,10 @@
 	snprintf(name, sizeof(name) - 1, "%s", bdevname(journal->j_dev, name));
 	journal->j_proc_entry = proc_mkdir(name, proc_jbd2_stats);
 	if (journal->j_proc_entry) {
-		struct proc_dir_entry *p;
-		p = create_proc_entry("history", S_IRUGO,
-				journal->j_proc_entry);
-		if (p) {
-			p->proc_fops = &jbd2_seq_history_fops;
-			p->data = journal;
-			p = create_proc_entry("info", S_IRUGO,
-						journal->j_proc_entry);
-			if (p) {
-				p->proc_fops = &jbd2_seq_info_fops;
-				p->data = journal;
-			}
-		}
+		proc_create_data("history", S_IRUGO, journal->j_proc_entry,
+				 &jbd2_seq_history_fops, journal);
+		proc_create_data("info", S_IRUGO, journal->j_proc_entry,
+				 &jbd2_seq_info_fops, journal);
 	}
 }
 
diff --git a/fs/jfs/jfs_debug.c b/fs/jfs/jfs_debug.c
index 887f575..bf6ab19 100644
--- a/fs/jfs/jfs_debug.c
+++ b/fs/jfs/jfs_debug.c
@@ -89,7 +89,7 @@
 {
 	int i;
 
-	if (!(base = proc_mkdir("jfs", proc_root_fs)))
+	if (!(base = proc_mkdir("fs/jfs", NULL)))
 		return;
 	base->owner = THIS_MODULE;
 
@@ -109,7 +109,7 @@
 	if (base) {
 		for (i = 0; i < NPROCENT; i++)
 			remove_proc_entry(Entries[i].name, base);
-		remove_proc_entry("jfs", proc_root_fs);
+		remove_proc_entry("fs/jfs", NULL);
 	}
 }
 
diff --git a/fs/namei.c b/fs/namei.c
index e179f71..32fd965 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -30,6 +30,7 @@
 #include <linux/capability.h>
 #include <linux/file.h>
 #include <linux/fcntl.h>
+#include <linux/device_cgroup.h>
 #include <asm/namei.h>
 #include <asm/uaccess.h>
 
@@ -281,6 +282,10 @@
 	if (retval)
 		return retval;
 
+	retval = devcgroup_inode_permission(inode, mask);
+	if (retval)
+		return retval;
+
 	return security_inode_permission(inode, mask, nd);
 }
 
@@ -2028,6 +2033,10 @@
 	if (!dir->i_op || !dir->i_op->mknod)
 		return -EPERM;
 
+	error = devcgroup_inode_mknod(mode, dev);
+	if (error)
+		return error;
+
 	error = security_inode_mknod(dir, dentry, mode, dev);
 	if (error)
 		return error;
diff --git a/fs/namespace.c b/fs/namespace.c
index fe37680..061e5ed 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -1176,17 +1176,6 @@
 #endif
 }
 
-static int lives_below_in_same_fs(struct dentry *d, struct dentry *dentry)
-{
-	while (1) {
-		if (d == dentry)
-			return 1;
-		if (d == NULL || d == d->d_parent)
-			return 0;
-		d = d->d_parent;
-	}
-}
-
 struct vfsmount *copy_tree(struct vfsmount *mnt, struct dentry *dentry,
 					int flag)
 {
@@ -1203,7 +1192,7 @@
 
 	p = mnt;
 	list_for_each_entry(r, &mnt->mnt_mounts, mnt_child) {
-		if (!lives_below_in_same_fs(r->mnt_mountpoint, dentry))
+		if (!is_subdir(r->mnt_mountpoint, dentry))
 			continue;
 
 		for (s = r; s; s = next_mnt(s, r)) {
diff --git a/fs/ncpfs/ncplib_kernel.c b/fs/ncpfs/ncplib_kernel.c
index df6d60b..97645f1 100644
--- a/fs/ncpfs/ncplib_kernel.c
+++ b/fs/ncpfs/ncplib_kernel.c
@@ -102,48 +102,47 @@
 }
 
 static inline char *
- ncp_reply_data(struct ncp_server *server, int offset)
+ncp_reply_data(struct ncp_server *server, int offset)
 {
 	return &(server->packet[sizeof(struct ncp_reply_header) + offset]);
 }
 
-static inline __u8 BVAL(void* data)
+static inline u8 BVAL(void *data)
 {
-	return get_unaligned((__u8*)data);
+	return *(u8 *)data;
 }
 
-static __u8
- ncp_reply_byte(struct ncp_server *server, int offset)
+static u8 ncp_reply_byte(struct ncp_server *server, int offset)
 {
-	return get_unaligned((__u8 *) ncp_reply_data(server, offset));
+	return *(u8 *)ncp_reply_data(server, offset);
 }
 
-static inline __u16 WVAL_LH(void* data)
+static inline u16 WVAL_LH(void *data)
 {
-	return le16_to_cpu(get_unaligned((__le16*)data));
+	return get_unaligned_le16(data);
 }
 
-static __u16
- ncp_reply_le16(struct ncp_server *server, int offset)
+static u16
+ncp_reply_le16(struct ncp_server *server, int offset)
 {
-	return le16_to_cpu(get_unaligned((__le16 *) ncp_reply_data(server, offset)));
+	return get_unaligned_le16(ncp_reply_data(server, offset));
 }
 
-static __u16
- ncp_reply_be16(struct ncp_server *server, int offset)
+static u16
+ncp_reply_be16(struct ncp_server *server, int offset)
 {
-	return be16_to_cpu(get_unaligned((__be16 *) ncp_reply_data(server, offset)));
+	return get_unaligned_be16(ncp_reply_data(server, offset));
 }
 
-static inline __u32 DVAL_LH(void* data)
+static inline u32 DVAL_LH(void *data)
 {
-	return le32_to_cpu(get_unaligned((__le32*)data));
+	return get_unaligned_le32(data);
 }
 
 static __le32
- ncp_reply_dword(struct ncp_server *server, int offset)
+ncp_reply_dword(struct ncp_server *server, int offset)
 {
-	return get_unaligned((__le32 *) ncp_reply_data(server, offset));
+	return get_unaligned((__le32 *)ncp_reply_data(server, offset));
 }
 
 static inline __u32 ncp_reply_dword_lh(struct ncp_server* server, int offset) {
@@ -1006,8 +1005,8 @@
 	result = ncp_request2(server, 72, bounce, bufsize);
 	ncp_unlock_server(server);
 	if (!result) {
-		int len = be16_to_cpu(get_unaligned((__be16*)((char*)bounce + 
-			  sizeof(struct ncp_reply_header))));
+		int len = get_unaligned_be16((char *)bounce +
+			  sizeof(struct ncp_reply_header));
 		result = -EIO;
 		if (len <= to_read) {
 			char* source;
diff --git a/fs/nfs/client.c b/fs/nfs/client.c
index f2f3b28..89ac5bb 100644
--- a/fs/nfs/client.c
+++ b/fs/nfs/client.c
@@ -1321,6 +1321,7 @@
 	.read		= seq_read,
 	.llseek		= seq_lseek,
 	.release	= seq_release,
+	.owner		= THIS_MODULE,
 };
 
 static int nfs_volume_list_open(struct inode *inode, struct file *file);
@@ -1341,6 +1342,7 @@
 	.read		= seq_read,
 	.llseek		= seq_lseek,
 	.release	= seq_release,
+	.owner		= THIS_MODULE,
 };
 
 /*
@@ -1500,33 +1502,29 @@
 {
 	struct proc_dir_entry *p;
 
-	proc_fs_nfs = proc_mkdir("nfsfs", proc_root_fs);
+	proc_fs_nfs = proc_mkdir("fs/nfsfs", NULL);
 	if (!proc_fs_nfs)
 		goto error_0;
 
 	proc_fs_nfs->owner = THIS_MODULE;
 
 	/* a file of servers with which we're dealing */
-	p = create_proc_entry("servers", S_IFREG|S_IRUGO, proc_fs_nfs);
+	p = proc_create("servers", S_IFREG|S_IRUGO,
+			proc_fs_nfs, &nfs_server_list_fops);
 	if (!p)
 		goto error_1;
 
-	p->proc_fops = &nfs_server_list_fops;
-	p->owner = THIS_MODULE;
-
 	/* a file of volumes that we have mounted */
-	p = create_proc_entry("volumes", S_IFREG|S_IRUGO, proc_fs_nfs);
+	p = proc_create("volumes", S_IFREG|S_IRUGO,
+			proc_fs_nfs, &nfs_volume_list_fops);
 	if (!p)
 		goto error_2;
-
-	p->proc_fops = &nfs_volume_list_fops;
-	p->owner = THIS_MODULE;
 	return 0;
 
 error_2:
 	remove_proc_entry("servers", proc_fs_nfs);
 error_1:
-	remove_proc_entry("nfsfs", proc_root_fs);
+	remove_proc_entry("fs/nfsfs", NULL);
 error_0:
 	return -ENOMEM;
 }
@@ -1538,7 +1536,7 @@
 {
 	remove_proc_entry("volumes", proc_fs_nfs);
 	remove_proc_entry("servers", proc_fs_nfs);
-	remove_proc_entry("nfsfs", proc_root_fs);
+	remove_proc_entry("fs/nfsfs", NULL);
 }
 
 #endif /* CONFIG_PROC_FS */
diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c
index 42f3820..5ac00c4 100644
--- a/fs/nfsd/nfsctl.c
+++ b/fs/nfsd/nfsctl.c
@@ -169,6 +169,7 @@
 	.read		= seq_read,
 	.llseek		= seq_lseek,
 	.release	= seq_release,
+	.owner		= THIS_MODULE,
 };
 
 /*----------------------------------------------------------------------------*/
@@ -801,10 +802,9 @@
 	entry = proc_mkdir("fs/nfs", NULL);
 	if (!entry)
 		return -ENOMEM;
-	entry = create_proc_entry("fs/nfs/exports", 0, NULL);
+	entry = proc_create("exports", 0, entry, &exports_operations);
 	if (!entry)
 		return -ENOMEM;
-	entry->proc_fops =  &exports_operations;
 	return 0;
 }
 #else /* CONFIG_PROC_FS */
diff --git a/fs/ntfs/mft.c b/fs/ntfs/mft.c
index 2ad5c8b..790defb 100644
--- a/fs/ntfs/mft.c
+++ b/fs/ntfs/mft.c
@@ -1191,7 +1191,7 @@
 		if (size) {
 			page = ntfs_map_page(mftbmp_mapping,
 					ofs >> PAGE_CACHE_SHIFT);
-			if (unlikely(IS_ERR(page))) {
+			if (IS_ERR(page)) {
 				ntfs_error(vol->sb, "Failed to read mft "
 						"bitmap, aborting.");
 				return PTR_ERR(page);
@@ -2118,7 +2118,7 @@
 	}
 	/* Read, map, and pin the page containing the mft record. */
 	page = ntfs_map_page(mft_vi->i_mapping, index);
-	if (unlikely(IS_ERR(page))) {
+	if (IS_ERR(page)) {
 		ntfs_error(vol->sb, "Failed to map page containing mft record "
 				"to format 0x%llx.", (long long)mft_no);
 		return PTR_ERR(page);
@@ -2519,7 +2519,7 @@
 	ofs = (bit << vol->mft_record_size_bits) & ~PAGE_CACHE_MASK;
 	/* Read, map, and pin the page containing the mft record. */
 	page = ntfs_map_page(vol->mft_ino->i_mapping, index);
-	if (unlikely(IS_ERR(page))) {
+	if (IS_ERR(page)) {
 		ntfs_error(vol->sb, "Failed to map page containing allocated "
 				"mft record 0x%llx.", (long long)bit);
 		err = PTR_ERR(page);
diff --git a/fs/proc/base.c b/fs/proc/base.c
index c5e412a..fcf02f2 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -195,12 +195,32 @@
 	return result;
 }
 
-#define MAY_PTRACE(task) \
-	(task == current || \
-	(task->parent == current && \
-	(task->ptrace & PT_PTRACED) && \
-	 (task_is_stopped_or_traced(task)) && \
-	 security_ptrace(current,task) == 0))
+/*
+ * Return zero if current may access user memory in @task, -error if not.
+ */
+static int check_mem_permission(struct task_struct *task)
+{
+	/*
+	 * A task can always look at itself, in case it chooses
+	 * to use system calls instead of load instructions.
+	 */
+	if (task == current)
+		return 0;
+
+	/*
+	 * If current is actively ptrace'ing, and would also be
+	 * permitted to freshly attach with ptrace now, permit it.
+	 */
+	if (task->parent == current && (task->ptrace & PT_PTRACED) &&
+	    task_is_stopped_or_traced(task) &&
+	    ptrace_may_attach(task))
+		return 0;
+
+	/*
+	 * Noone else is allowed.
+	 */
+	return -EPERM;
+}
 
 struct mm_struct *mm_for_maps(struct task_struct *task)
 {
@@ -722,7 +742,7 @@
 	if (!task)
 		goto out_no_task;
 
-	if (!MAY_PTRACE(task) || !ptrace_may_attach(task))
+	if (check_mem_permission(task))
 		goto out;
 
 	ret = -ENOMEM;
@@ -748,7 +768,7 @@
 
 		this_len = (count > PAGE_SIZE) ? PAGE_SIZE : count;
 		retval = access_process_vm(task, src, page, this_len, 0);
-		if (!retval || !MAY_PTRACE(task) || !ptrace_may_attach(task)) {
+		if (!retval || check_mem_permission(task)) {
 			if (!ret)
 				ret = -EIO;
 			break;
@@ -792,7 +812,7 @@
 	if (!task)
 		goto out_no_task;
 
-	if (!MAY_PTRACE(task) || !ptrace_may_attach(task))
+	if (check_mem_permission(task))
 		goto out;
 
 	copied = -ENOMEM;
@@ -1181,6 +1201,81 @@
 
 #endif
 
+/*
+ * We added or removed a vma mapping the executable. The vmas are only mapped
+ * during exec and are not mapped with the mmap system call.
+ * Callers must hold down_write() on the mm's mmap_sem for these
+ */
+void added_exe_file_vma(struct mm_struct *mm)
+{
+	mm->num_exe_file_vmas++;
+}
+
+void removed_exe_file_vma(struct mm_struct *mm)
+{
+	mm->num_exe_file_vmas--;
+	if ((mm->num_exe_file_vmas == 0) && mm->exe_file){
+		fput(mm->exe_file);
+		mm->exe_file = NULL;
+	}
+
+}
+
+void set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file)
+{
+	if (new_exe_file)
+		get_file(new_exe_file);
+	if (mm->exe_file)
+		fput(mm->exe_file);
+	mm->exe_file = new_exe_file;
+	mm->num_exe_file_vmas = 0;
+}
+
+struct file *get_mm_exe_file(struct mm_struct *mm)
+{
+	struct file *exe_file;
+
+	/* We need mmap_sem to protect against races with removal of
+	 * VM_EXECUTABLE vmas */
+	down_read(&mm->mmap_sem);
+	exe_file = mm->exe_file;
+	if (exe_file)
+		get_file(exe_file);
+	up_read(&mm->mmap_sem);
+	return exe_file;
+}
+
+void dup_mm_exe_file(struct mm_struct *oldmm, struct mm_struct *newmm)
+{
+	/* It's safe to write the exe_file pointer without exe_file_lock because
+	 * this is called during fork when the task is not yet in /proc */
+	newmm->exe_file = get_mm_exe_file(oldmm);
+}
+
+static int proc_exe_link(struct inode *inode, struct path *exe_path)
+{
+	struct task_struct *task;
+	struct mm_struct *mm;
+	struct file *exe_file;
+
+	task = get_proc_task(inode);
+	if (!task)
+		return -ENOENT;
+	mm = get_task_mm(task);
+	put_task_struct(task);
+	if (!mm)
+		return -ENOENT;
+	exe_file = get_mm_exe_file(mm);
+	mmput(mm);
+	if (exe_file) {
+		*exe_path = exe_file->f_path;
+		path_get(&exe_file->f_path);
+		fput(exe_file);
+		return 0;
+	} else
+		return -ENOENT;
+}
+
 static void *proc_pid_follow_link(struct dentry *dentry, struct nameidata *nd)
 {
 	struct inode *inode = dentry->d_inode;
diff --git a/fs/proc/generic.c b/fs/proc/generic.c
index a36ad3c..9d53b39 100644
--- a/fs/proc/generic.c
+++ b/fs/proc/generic.c
@@ -69,12 +69,7 @@
 		count = min_t(size_t, PROC_BLOCK_SIZE, nbytes);
 
 		start = NULL;
-		if (dp->get_info) {
-			/* Handle old net routines */
-			n = dp->get_info(page, &start, *ppos, count);
-			if (n < count)
-				eof = 1;
-		} else if (dp->read_proc) {
+		if (dp->read_proc) {
 			/*
 			 * How to be a proc read function
 			 * ------------------------------
@@ -277,8 +272,11 @@
 	int			len;
 	int 			rtn = 0;
 
+	de = *ret;
+	if (!de)
+		de = &proc_root;
+
 	spin_lock(&proc_subdir_lock);
-	de = &proc_root;
 	while (1) {
 		next = strchr(cp, '/');
 		if (!next)
@@ -385,20 +383,18 @@
 
 	lock_kernel();
 	spin_lock(&proc_subdir_lock);
-	if (de) {
-		for (de = de->subdir; de ; de = de->next) {
-			if (de->namelen != dentry->d_name.len)
-				continue;
-			if (!memcmp(dentry->d_name.name, de->name, de->namelen)) {
-				unsigned int ino;
+	for (de = de->subdir; de ; de = de->next) {
+		if (de->namelen != dentry->d_name.len)
+			continue;
+		if (!memcmp(dentry->d_name.name, de->name, de->namelen)) {
+			unsigned int ino;
 
-				ino = de->low_ino;
-				de_get(de);
-				spin_unlock(&proc_subdir_lock);
-				error = -EINVAL;
-				inode = proc_get_inode(dir->i_sb, ino, de);
-				goto out_unlock;
-			}
+			ino = de->low_ino;
+			de_get(de);
+			spin_unlock(&proc_subdir_lock);
+			error = -EINVAL;
+			inode = proc_get_inode(dir->i_sb, ino, de);
+			goto out_unlock;
 		}
 	}
 	spin_unlock(&proc_subdir_lock);
@@ -410,7 +406,8 @@
 		d_add(dentry, inode);
 		return NULL;
 	}
-	de_put(de);
+	if (de)
+		de_put(de);
 	return ERR_PTR(error);
 }
 
@@ -440,10 +437,6 @@
 	lock_kernel();
 
 	ino = inode->i_ino;
-	if (!de) {
-		ret = -EINVAL;
-		goto out;
-	}
 	i = filp->f_pos;
 	switch (i) {
 		case 0:
@@ -582,7 +575,7 @@
 	/* make sure name is valid */
 	if (!name || !strlen(name)) goto out;
 
-	if (!(*parent) && xlate_proc_name(name, parent, &fn) != 0)
+	if (xlate_proc_name(name, parent, &fn) != 0)
 		goto out;
 
 	/* At this point there must not be any '/' characters beyond *fn */
@@ -682,9 +675,10 @@
 	return ent;
 }
 
-struct proc_dir_entry *proc_create(const char *name, mode_t mode,
-				   struct proc_dir_entry *parent,
-				   const struct file_operations *proc_fops)
+struct proc_dir_entry *proc_create_data(const char *name, mode_t mode,
+					struct proc_dir_entry *parent,
+					const struct file_operations *proc_fops,
+					void *data)
 {
 	struct proc_dir_entry *pde;
 	nlink_t nlink;
@@ -705,6 +699,7 @@
 	if (!pde)
 		goto out;
 	pde->proc_fops = proc_fops;
+	pde->data = data;
 	if (proc_register(parent, pde) < 0)
 		goto out_free;
 	return pde;
@@ -734,55 +729,58 @@
 void remove_proc_entry(const char *name, struct proc_dir_entry *parent)
 {
 	struct proc_dir_entry **p;
-	struct proc_dir_entry *de;
+	struct proc_dir_entry *de = NULL;
 	const char *fn = name;
 	int len;
 
-	if (!parent && xlate_proc_name(name, &parent, &fn) != 0)
-		goto out;
+	if (xlate_proc_name(name, &parent, &fn) != 0)
+		return;
 	len = strlen(fn);
 
 	spin_lock(&proc_subdir_lock);
 	for (p = &parent->subdir; *p; p=&(*p)->next ) {
-		if (!proc_match(len, fn, *p))
-			continue;
-		de = *p;
-		*p = de->next;
-		de->next = NULL;
-
-		spin_lock(&de->pde_unload_lock);
-		/*
-		 * Stop accepting new callers into module. If you're
-		 * dynamically allocating ->proc_fops, save a pointer somewhere.
-		 */
-		de->proc_fops = NULL;
-		/* Wait until all existing callers into module are done. */
-		if (de->pde_users > 0) {
-			DECLARE_COMPLETION_ONSTACK(c);
-
-			if (!de->pde_unload_completion)
-				de->pde_unload_completion = &c;
-
-			spin_unlock(&de->pde_unload_lock);
-			spin_unlock(&proc_subdir_lock);
-
-			wait_for_completion(de->pde_unload_completion);
-
-			spin_lock(&proc_subdir_lock);
-			goto continue_removing;
+		if (proc_match(len, fn, *p)) {
+			de = *p;
+			*p = de->next;
+			de->next = NULL;
+			break;
 		}
-		spin_unlock(&de->pde_unload_lock);
-
-continue_removing:
-		if (S_ISDIR(de->mode))
-			parent->nlink--;
-		de->nlink = 0;
-		WARN_ON(de->subdir);
-		if (atomic_dec_and_test(&de->count))
-			free_proc_entry(de);
-		break;
 	}
 	spin_unlock(&proc_subdir_lock);
-out:
-	return;
+	if (!de)
+		return;
+
+	spin_lock(&de->pde_unload_lock);
+	/*
+	 * Stop accepting new callers into module. If you're
+	 * dynamically allocating ->proc_fops, save a pointer somewhere.
+	 */
+	de->proc_fops = NULL;
+	/* Wait until all existing callers into module are done. */
+	if (de->pde_users > 0) {
+		DECLARE_COMPLETION_ONSTACK(c);
+
+		if (!de->pde_unload_completion)
+			de->pde_unload_completion = &c;
+
+		spin_unlock(&de->pde_unload_lock);
+
+		wait_for_completion(de->pde_unload_completion);
+
+		goto continue_removing;
+	}
+	spin_unlock(&de->pde_unload_lock);
+
+continue_removing:
+	if (S_ISDIR(de->mode))
+		parent->nlink--;
+	de->nlink = 0;
+	if (de->subdir) {
+		printk(KERN_WARNING "%s: removing non-empty directory "
+			"'%s/%s', leaking at least '%s'\n", __func__,
+			de->parent->name, de->name, de->subdir->name);
+		WARN_ON(1);
+	}
+	if (atomic_dec_and_test(&de->count))
+		free_proc_entry(de);
 }
diff --git a/fs/proc/inode.c b/fs/proc/inode.c
index 82b3a1b..6f4e8dc 100644
--- a/fs/proc/inode.c
+++ b/fs/proc/inode.c
@@ -25,8 +25,7 @@
 
 struct proc_dir_entry *de_get(struct proc_dir_entry *de)
 {
-	if (de)
-		atomic_inc(&de->count);
+	atomic_inc(&de->count);
 	return de;
 }
 
@@ -35,18 +34,16 @@
  */
 void de_put(struct proc_dir_entry *de)
 {
-	if (de) {	
-		lock_kernel();		
-		if (!atomic_read(&de->count)) {
-			printk("de_put: entry %s already free!\n", de->name);
-			unlock_kernel();
-			return;
-		}
-
-		if (atomic_dec_and_test(&de->count))
-			free_proc_entry(de);
+	lock_kernel();
+	if (!atomic_read(&de->count)) {
+		printk("de_put: entry %s already free!\n", de->name);
 		unlock_kernel();
+		return;
 	}
+
+	if (atomic_dec_and_test(&de->count))
+		free_proc_entry(de);
+	unlock_kernel();
 }
 
 /*
@@ -392,7 +389,7 @@
 {
 	struct inode * inode;
 
-	if (de != NULL && !try_module_get(de->owner))
+	if (!try_module_get(de->owner))
 		goto out_mod;
 
 	inode = iget_locked(sb, ino);
@@ -402,30 +399,29 @@
 		inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
 		PROC_I(inode)->fd = 0;
 		PROC_I(inode)->pde = de;
-		if (de) {
-			if (de->mode) {
-				inode->i_mode = de->mode;
-				inode->i_uid = de->uid;
-				inode->i_gid = de->gid;
-			}
-			if (de->size)
-				inode->i_size = de->size;
-			if (de->nlink)
-				inode->i_nlink = de->nlink;
-			if (de->proc_iops)
-				inode->i_op = de->proc_iops;
-			if (de->proc_fops) {
-				if (S_ISREG(inode->i_mode)) {
+
+		if (de->mode) {
+			inode->i_mode = de->mode;
+			inode->i_uid = de->uid;
+			inode->i_gid = de->gid;
+		}
+		if (de->size)
+			inode->i_size = de->size;
+		if (de->nlink)
+			inode->i_nlink = de->nlink;
+		if (de->proc_iops)
+			inode->i_op = de->proc_iops;
+		if (de->proc_fops) {
+			if (S_ISREG(inode->i_mode)) {
 #ifdef CONFIG_COMPAT
-					if (!de->proc_fops->compat_ioctl)
-						inode->i_fop =
-							&proc_reg_file_ops_no_compat;
-					else
+				if (!de->proc_fops->compat_ioctl)
+					inode->i_fop =
+						&proc_reg_file_ops_no_compat;
+				else
 #endif
-						inode->i_fop = &proc_reg_file_ops;
-				} else {
-					inode->i_fop = de->proc_fops;
-				}
+					inode->i_fop = &proc_reg_file_ops;
+			} else {
+				inode->i_fop = de->proc_fops;
 			}
 		}
 		unlock_new_inode(inode);
@@ -433,8 +429,7 @@
 	return inode;
 
 out_ino:
-	if (de != NULL)
-		module_put(de->owner);
+	module_put(de->owner);
 out_mod:
 	return NULL;
 }			
diff --git a/fs/proc/internal.h b/fs/proc/internal.h
index bc72f5c..28cbca8 100644
--- a/fs/proc/internal.h
+++ b/fs/proc/internal.h
@@ -11,6 +11,7 @@
 
 #include <linux/proc_fs.h>
 
+extern struct proc_dir_entry proc_root;
 #ifdef CONFIG_PROC_SYSCTL
 extern int proc_sys_init(void);
 #else
@@ -46,9 +47,6 @@
 
 extern int maps_protect;
 
-extern void create_seq_entry(char *name, mode_t mode,
-				const struct file_operations *f);
-extern int proc_exe_link(struct inode *, struct path *);
 extern int proc_tid_stat(struct seq_file *m, struct pid_namespace *ns,
 				struct pid *pid, struct task_struct *task);
 extern int proc_tgid_stat(struct seq_file *m, struct pid_namespace *ns,
diff --git a/fs/proc/nommu.c b/fs/proc/nommu.c
index 941e951..79ecd28 100644
--- a/fs/proc/nommu.c
+++ b/fs/proc/nommu.c
@@ -137,7 +137,7 @@
 
 static int __init proc_nommu_init(void)
 {
-	create_seq_entry("maps", S_IRUGO, &proc_nommu_vma_list_operations);
+	proc_create("maps", S_IRUGO, NULL, &proc_nommu_vma_list_operations);
 	return 0;
 }
 
diff --git a/fs/proc/proc_misc.c b/fs/proc/proc_misc.c
index 441a32f..48bcf20 100644
--- a/fs/proc/proc_misc.c
+++ b/fs/proc/proc_misc.c
@@ -826,14 +826,6 @@
 
 struct proc_dir_entry *proc_root_kcore;
 
-void create_seq_entry(char *name, mode_t mode, const struct file_operations *f)
-{
-	struct proc_dir_entry *entry;
-	entry = create_proc_entry(name, mode, NULL);
-	if (entry)
-		entry->proc_fops = f;
-}
-
 void __init proc_misc_init(void)
 {
 	static struct {
@@ -862,66 +854,52 @@
 
 	/* And now for trickier ones */
 #ifdef CONFIG_PRINTK
-	{
-		struct proc_dir_entry *entry;
-		entry = create_proc_entry("kmsg", S_IRUSR, &proc_root);
-		if (entry)
-			entry->proc_fops = &proc_kmsg_operations;
-	}
+	proc_create("kmsg", S_IRUSR, NULL, &proc_kmsg_operations);
 #endif
-	create_seq_entry("locks", 0, &proc_locks_operations);
-	create_seq_entry("devices", 0, &proc_devinfo_operations);
-	create_seq_entry("cpuinfo", 0, &proc_cpuinfo_operations);
+	proc_create("locks", 0, NULL, &proc_locks_operations);
+	proc_create("devices", 0, NULL, &proc_devinfo_operations);
+	proc_create("cpuinfo", 0, NULL, &proc_cpuinfo_operations);
 #ifdef CONFIG_BLOCK
-	create_seq_entry("partitions", 0, &proc_partitions_operations);
+	proc_create("partitions", 0, NULL, &proc_partitions_operations);
 #endif
-	create_seq_entry("stat", 0, &proc_stat_operations);
-	create_seq_entry("interrupts", 0, &proc_interrupts_operations);
+	proc_create("stat", 0, NULL, &proc_stat_operations);
+	proc_create("interrupts", 0, NULL, &proc_interrupts_operations);
 #ifdef CONFIG_SLABINFO
-	create_seq_entry("slabinfo",S_IWUSR|S_IRUGO,&proc_slabinfo_operations);
+	proc_create("slabinfo",S_IWUSR|S_IRUGO,NULL,&proc_slabinfo_operations);
 #ifdef CONFIG_DEBUG_SLAB_LEAK
-	create_seq_entry("slab_allocators", 0 ,&proc_slabstats_operations);
+	proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
 #endif
 #endif
 #ifdef CONFIG_MMU
 	proc_create("vmallocinfo", S_IRUSR, NULL, &proc_vmalloc_operations);
 #endif
-	create_seq_entry("buddyinfo",S_IRUGO, &fragmentation_file_operations);
-	create_seq_entry("pagetypeinfo", S_IRUGO, &pagetypeinfo_file_ops);
-	create_seq_entry("vmstat",S_IRUGO, &proc_vmstat_file_operations);
-	create_seq_entry("zoneinfo",S_IRUGO, &proc_zoneinfo_file_operations);
+	proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
+	proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
+	proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
+	proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
 #ifdef CONFIG_BLOCK
-	create_seq_entry("diskstats", 0, &proc_diskstats_operations);
+	proc_create("diskstats", 0, NULL, &proc_diskstats_operations);
 #endif
 #ifdef CONFIG_MODULES
-	create_seq_entry("modules", 0, &proc_modules_operations);
+	proc_create("modules", 0, NULL, &proc_modules_operations);
 #endif
 #ifdef CONFIG_SCHEDSTATS
-	create_seq_entry("schedstat", 0, &proc_schedstat_operations);
+	proc_create("schedstat", 0, NULL, &proc_schedstat_operations);
 #endif
 #ifdef CONFIG_PROC_KCORE
-	proc_root_kcore = create_proc_entry("kcore", S_IRUSR, NULL);
-	if (proc_root_kcore) {
-		proc_root_kcore->proc_fops = &proc_kcore_operations;
+	proc_root_kcore = proc_create("kcore", S_IRUSR, NULL, &proc_kcore_operations);
+	if (proc_root_kcore)
 		proc_root_kcore->size =
 				(size_t)high_memory - PAGE_OFFSET + PAGE_SIZE;
-	}
 #endif
 #ifdef CONFIG_PROC_PAGE_MONITOR
-	create_seq_entry("kpagecount", S_IRUSR, &proc_kpagecount_operations);
-	create_seq_entry("kpageflags", S_IRUSR, &proc_kpageflags_operations);
+	proc_create("kpagecount", S_IRUSR, NULL, &proc_kpagecount_operations);
+	proc_create("kpageflags", S_IRUSR, NULL, &proc_kpageflags_operations);
 #endif
 #ifdef CONFIG_PROC_VMCORE
-	proc_vmcore = create_proc_entry("vmcore", S_IRUSR, NULL);
-	if (proc_vmcore)
-		proc_vmcore->proc_fops = &proc_vmcore_operations;
+	proc_vmcore = proc_create("vmcore", S_IRUSR, NULL, &proc_vmcore_operations);
 #endif
 #ifdef CONFIG_MAGIC_SYSRQ
-	{
-		struct proc_dir_entry *entry;
-		entry = create_proc_entry("sysrq-trigger", S_IWUSR, NULL);
-		if (entry)
-			entry->proc_fops = &proc_sysrq_trigger_operations;
-	}
+	proc_create("sysrq-trigger", S_IWUSR, NULL, &proc_sysrq_trigger_operations);
 #endif
 }
diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
index 614c34b..5acc001 100644
--- a/fs/proc/proc_sysctl.c
+++ b/fs/proc/proc_sysctl.c
@@ -165,8 +165,8 @@
 	return err;
 }
 
-static ssize_t proc_sys_read(struct file *filp, char __user *buf,
-				size_t count, loff_t *ppos)
+static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
+		size_t count, loff_t *ppos, int write)
 {
 	struct dentry *dentry = filp->f_dentry;
 	struct ctl_table_header *head;
@@ -190,12 +190,12 @@
 	 * and won't be until we finish.
 	 */
 	error = -EPERM;
-	if (sysctl_perm(table, MAY_READ))
+	if (sysctl_perm(head->root, table, write ? MAY_WRITE : MAY_READ))
 		goto out;
 
 	/* careful: calling conventions are nasty here */
 	res = count;
-	error = table->proc_handler(table, 0, filp, buf, &res, ppos);
+	error = table->proc_handler(table, write, filp, buf, &res, ppos);
 	if (!error)
 		error = res;
 out:
@@ -204,44 +204,16 @@
 	return error;
 }
 
+static ssize_t proc_sys_read(struct file *filp, char __user *buf,
+				size_t count, loff_t *ppos)
+{
+	return proc_sys_call_handler(filp, (void __user *)buf, count, ppos, 0);
+}
+
 static ssize_t proc_sys_write(struct file *filp, const char __user *buf,
 				size_t count, loff_t *ppos)
 {
-	struct dentry *dentry = filp->f_dentry;
-	struct ctl_table_header *head;
-	struct ctl_table *table;
-	ssize_t error;
-	size_t res;
-
-	table = do_proc_sys_lookup(dentry->d_parent, &dentry->d_name, &head);
-	/* Has the sysctl entry disappeared on us? */
-	error = -ENOENT;
-	if (!table)
-		goto out;
-
-	/* Has the sysctl entry been replaced by a directory? */
-	error = -EISDIR;
-	if (!table->proc_handler)
-		goto out;
-
-	/*
-	 * At this point we know that the sysctl was not unregistered
-	 * and won't be until we finish.
-	 */
-	error = -EPERM;
-	if (sysctl_perm(table, MAY_WRITE))
-		goto out;
-
-	/* careful: calling conventions are nasty here */
-	res = count;
-	error = table->proc_handler(table, 1, filp, (char __user *)buf,
-				    &res, ppos);
-	if (!error)
-		error = res;
-out:
-	sysctl_head_finish(head);
-
-	return error;
+	return proc_sys_call_handler(filp, (void __user *)buf, count, ppos, 1);
 }
 
 
@@ -416,7 +388,7 @@
 		goto out;
 
 	/* Use the permissions on the sysctl table entry */
-	error = sysctl_perm(table, mask);
+	error = sysctl_perm(head->root, table, mask);
 out:
 	sysctl_head_finish(head);
 	return error;
diff --git a/fs/proc/proc_tty.c b/fs/proc/proc_tty.c
index 49816e0..ac26ccc 100644
--- a/fs/proc/proc_tty.c
+++ b/fs/proc/proc_tty.c
@@ -5,7 +5,7 @@
  */
 
 #include <asm/uaccess.h>
-
+#include <linux/module.h>
 #include <linux/init.h>
 #include <linux/errno.h>
 #include <linux/time.h>
@@ -136,39 +136,54 @@
 	.release	= seq_release,
 };
 
-/*
- * This is the handler for /proc/tty/ldiscs
- */
-static int tty_ldiscs_read_proc(char *page, char **start, off_t off,
-				int count, int *eof, void *data)
+static void * tty_ldiscs_seq_start(struct seq_file *m, loff_t *pos)
 {
-	int	i;
-	int	len = 0;
-	off_t	begin = 0;
+	return (*pos < NR_LDISCS) ? pos : NULL;
+}
+
+static void * tty_ldiscs_seq_next(struct seq_file *m, void *v, loff_t *pos)
+{
+	(*pos)++;
+	return (*pos < NR_LDISCS) ? pos : NULL;
+}
+
+static void tty_ldiscs_seq_stop(struct seq_file *m, void *v)
+{
+}
+
+static int tty_ldiscs_seq_show(struct seq_file *m, void *v)
+{
+	int i = *(loff_t *)v;
 	struct tty_ldisc *ld;
 	
-	for (i=0; i < NR_LDISCS; i++) {
-		ld = tty_ldisc_get(i);
-		if (ld == NULL)
-			continue;
-		len += sprintf(page+len, "%-10s %2d\n",
-			       ld->name ? ld->name : "???", i);
-		tty_ldisc_put(i);
-		if (len+begin > off+count)
-			break;
-		if (len+begin < off) {
-			begin += len;
-			len = 0;
-		}
-	}
-	if (i >= NR_LDISCS)
-		*eof = 1;
-	if (off >= len+begin)
+	ld = tty_ldisc_get(i);
+	if (ld == NULL)
 		return 0;
-	*start = page + (off-begin);
-	return ((count < begin+len-off) ? count : begin+len-off);
+	seq_printf(m, "%-10s %2d\n", ld->name ? ld->name : "???", i);
+	tty_ldisc_put(i);
+	return 0;
 }
 
+static const struct seq_operations tty_ldiscs_seq_ops = {
+	.start	= tty_ldiscs_seq_start,
+	.next	= tty_ldiscs_seq_next,
+	.stop	= tty_ldiscs_seq_stop,
+	.show	= tty_ldiscs_seq_show,
+};
+
+static int proc_tty_ldiscs_open(struct inode *inode, struct file *file)
+{
+	return seq_open(file, &tty_ldiscs_seq_ops);
+}
+
+static const struct file_operations tty_ldiscs_proc_fops = {
+	.owner		= THIS_MODULE,
+	.open		= proc_tty_ldiscs_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= seq_release,
+};
+
 /*
  * This function is called by tty_register_driver() to handle
  * registering the driver's /proc handler into /proc/tty/driver/<foo>
@@ -214,7 +229,6 @@
  */
 void __init proc_tty_init(void)
 {
-	struct proc_dir_entry *entry;
 	if (!proc_mkdir("tty", NULL))
 		return;
 	proc_tty_ldisc = proc_mkdir("tty/ldisc", NULL);
@@ -224,10 +238,7 @@
 	 * password lengths and inter-keystroke timings during password
 	 * entry.
 	 */
-	proc_tty_driver = proc_mkdir_mode("tty/driver", S_IRUSR | S_IXUSR, NULL);
-
-	create_proc_read_entry("tty/ldiscs", 0, NULL, tty_ldiscs_read_proc, NULL);
-	entry = create_proc_entry("tty/drivers", 0, NULL);
-	if (entry)
-		entry->proc_fops = &proc_tty_drivers_operations;
+	proc_tty_driver = proc_mkdir_mode("tty/driver", S_IRUSR|S_IXUSR, NULL);
+	proc_create("tty/ldiscs", 0, NULL, &tty_ldiscs_proc_fops);
+	proc_create("tty/drivers", 0, NULL, &proc_tty_drivers_operations);
 }
diff --git a/fs/proc/root.c b/fs/proc/root.c
index ef0fb57..9511753 100644
--- a/fs/proc/root.c
+++ b/fs/proc/root.c
@@ -22,8 +22,6 @@
 
 #include "internal.h"
 
-struct proc_dir_entry *proc_bus, *proc_root_fs, *proc_root_driver;
-
 static int proc_test_super(struct super_block *sb, void *data)
 {
 	return sb->s_fs_info == data;
@@ -126,8 +124,8 @@
 #ifdef CONFIG_SYSVIPC
 	proc_mkdir("sysvipc", NULL);
 #endif
-	proc_root_fs = proc_mkdir("fs", NULL);
-	proc_root_driver = proc_mkdir("driver", NULL);
+	proc_mkdir("fs", NULL);
+	proc_mkdir("driver", NULL);
 	proc_mkdir("fs/nfsd", NULL); /* somewhere for the nfsd filesystem to be mounted */
 #if defined(CONFIG_SUN_OPENPROMFS) || defined(CONFIG_SUN_OPENPROMFS_MODULE)
 	/* just give it a mountpoint */
@@ -137,7 +135,7 @@
 #ifdef CONFIG_PROC_DEVICETREE
 	proc_device_tree_init();
 #endif
-	proc_bus = proc_mkdir("bus", NULL);
+	proc_mkdir("bus", NULL);
 	proc_sys_init();
 }
 
@@ -232,9 +230,5 @@
 EXPORT_SYMBOL(proc_symlink);
 EXPORT_SYMBOL(proc_mkdir);
 EXPORT_SYMBOL(create_proc_entry);
-EXPORT_SYMBOL(proc_create);
+EXPORT_SYMBOL(proc_create_data);
 EXPORT_SYMBOL(remove_proc_entry);
-EXPORT_SYMBOL(proc_root);
-EXPORT_SYMBOL(proc_root_fs);
-EXPORT_SYMBOL(proc_bus);
-EXPORT_SYMBOL(proc_root_driver);
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 7415eeb..e2b8e76 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -75,40 +75,6 @@
 	return mm->total_vm;
 }
 
-int proc_exe_link(struct inode *inode, struct path *path)
-{
-	struct vm_area_struct * vma;
-	int result = -ENOENT;
-	struct task_struct *task = get_proc_task(inode);
-	struct mm_struct * mm = NULL;
-
-	if (task) {
-		mm = get_task_mm(task);
-		put_task_struct(task);
-	}
-	if (!mm)
-		goto out;
-	down_read(&mm->mmap_sem);
-
-	vma = mm->mmap;
-	while (vma) {
-		if ((vma->vm_flags & VM_EXECUTABLE) && vma->vm_file)
-			break;
-		vma = vma->vm_next;
-	}
-
-	if (vma) {
-		*path = vma->vm_file->f_path;
-		path_get(&vma->vm_file->f_path);
-		result = 0;
-	}
-
-	up_read(&mm->mmap_sem);
-	mmput(mm);
-out:
-	return result;
-}
-
 static void pad_len_spaces(struct seq_file *m, int len)
 {
 	len = 25 + sizeof(void*) * 6 - len;
diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c
index 8011528..4b733f1 100644
--- a/fs/proc/task_nommu.c
+++ b/fs/proc/task_nommu.c
@@ -103,40 +103,6 @@
 	return size;
 }
 
-int proc_exe_link(struct inode *inode, struct path *path)
-{
-	struct vm_list_struct *vml;
-	struct vm_area_struct *vma;
-	struct task_struct *task = get_proc_task(inode);
-	struct mm_struct *mm = get_task_mm(task);
-	int result = -ENOENT;
-
-	if (!mm)
-		goto out;
-	down_read(&mm->mmap_sem);
-
-	vml = mm->context.vmlist;
-	vma = NULL;
-	while (vml) {
-		if ((vml->vma->vm_flags & VM_EXECUTABLE) && vml->vma->vm_file) {
-			vma = vml->vma;
-			break;
-		}
-		vml = vml->next;
-	}
-
-	if (vma) {
-		*path = vma->vm_file->f_path;
-		path_get(&vma->vm_file->f_path);
-		result = 0;
-	}
-
-	up_read(&mm->mmap_sem);
-	mmput(mm);
-out:
-	return result;
-}
-
 /*
  * display mapping lines for a particular process's /proc/pid/maps
  */
diff --git a/fs/ramfs/file-mmu.c b/fs/ramfs/file-mmu.c
index b41a514..9590b90 100644
--- a/fs/ramfs/file-mmu.c
+++ b/fs/ramfs/file-mmu.c
@@ -26,6 +26,9 @@
 
 #include <linux/fs.h>
 #include <linux/mm.h>
+#include <linux/ramfs.h>
+
+#include "internal.h"
 
 const struct address_space_operations ramfs_aops = {
 	.readpage	= simple_readpage,
diff --git a/fs/ramfs/internal.h b/fs/ramfs/internal.h
index af7cc07..6b33063 100644
--- a/fs/ramfs/internal.h
+++ b/fs/ramfs/internal.h
@@ -11,5 +11,4 @@
 
 
 extern const struct address_space_operations ramfs_aops;
-extern const struct file_operations ramfs_file_operations;
 extern const struct inode_operations ramfs_file_inode_operations;
diff --git a/fs/reiserfs/procfs.c b/fs/reiserfs/procfs.c
index 8f86c52..b9dbeec 100644
--- a/fs/reiserfs/procfs.c
+++ b/fs/reiserfs/procfs.c
@@ -467,6 +467,7 @@
 	.read = seq_read,
 	.llseek = seq_lseek,
 	.release = seq_release,
+	.owner = THIS_MODULE,
 };
 
 static struct proc_dir_entry *proc_info_root = NULL;
@@ -475,12 +476,8 @@
 static void add_file(struct super_block *sb, char *name,
 		     int (*func) (struct seq_file *, struct super_block *))
 {
-	struct proc_dir_entry *de;
-	de = create_proc_entry(name, 0, REISERFS_SB(sb)->procdir);
-	if (de) {
-		de->data = func;
-		de->proc_fops = &r_file_operations;
-	}
+	proc_create_data(name, 0, REISERFS_SB(sb)->procdir,
+			 &r_file_operations, func);
 }
 
 int reiserfs_proc_info_init(struct super_block *sb)
diff --git a/fs/splice.c b/fs/splice.c
index eeb1a86..633f58e 100644
--- a/fs/splice.c
+++ b/fs/splice.c
@@ -1075,7 +1075,7 @@
 
 	ret = splice_direct_to_actor(in, &sd, direct_splice_actor);
 	if (ret > 0)
-		*ppos += ret;
+		*ppos = sd.pos;
 
 	return ret;
 }
diff --git a/fs/super.c b/fs/super.c
index a5a4aca..453877c 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -117,7 +117,7 @@
  * Drop a superblock's refcount.  Returns non-zero if the superblock was
  * destroyed.  The caller must hold sb_lock.
  */
-int __put_super(struct super_block *sb)
+static int __put_super(struct super_block *sb)
 {
 	int ret = 0;
 
diff --git a/fs/sync.c b/fs/sync.c
index 7cd005e..228e17b 100644
--- a/fs/sync.c
+++ b/fs/sync.c
@@ -64,7 +64,7 @@
 	/* sync the superblock to buffers */
 	sb = inode->i_sb;
 	lock_super(sb);
-	if (sb->s_op->write_super)
+	if (sb->s_dirt && sb->s_op->write_super)
 		sb->s_op->write_super(sb);
 	unlock_super(sb);
 
diff --git a/fs/timerfd.c b/fs/timerfd.c
index 10c80b5..5400524 100644
--- a/fs/timerfd.c
+++ b/fs/timerfd.c
@@ -20,6 +20,7 @@
 #include <linux/hrtimer.h>
 #include <linux/anon_inodes.h>
 #include <linux/timerfd.h>
+#include <linux/syscalls.h>
 
 struct timerfd_ctx {
 	struct hrtimer tmr;
diff --git a/fs/xattr.c b/fs/xattr.c
index 89a942f..4706a8b 100644
--- a/fs/xattr.c
+++ b/fs/xattr.c
@@ -67,7 +67,7 @@
 }
 
 int
-vfs_setxattr(struct dentry *dentry, char *name, void *value,
+vfs_setxattr(struct dentry *dentry, const char *name, const void *value,
 		size_t size, int flags)
 {
 	struct inode *inode = dentry->d_inode;
@@ -131,7 +131,7 @@
 EXPORT_SYMBOL_GPL(xattr_getsecurity);
 
 ssize_t
-vfs_getxattr(struct dentry *dentry, char *name, void *value, size_t size)
+vfs_getxattr(struct dentry *dentry, const char *name, void *value, size_t size)
 {
 	struct inode *inode = dentry->d_inode;
 	int error;
@@ -187,7 +187,7 @@
 EXPORT_SYMBOL_GPL(vfs_listxattr);
 
 int
-vfs_removexattr(struct dentry *dentry, char *name)
+vfs_removexattr(struct dentry *dentry, const char *name)
 {
 	struct inode *inode = dentry->d_inode;
 	int error;
@@ -218,7 +218,7 @@
  * Extended attribute SET operations
  */
 static long
-setxattr(struct dentry *d, char __user *name, void __user *value,
+setxattr(struct dentry *d, const char __user *name, const void __user *value,
 	 size_t size, int flags)
 {
 	int error;
@@ -252,8 +252,8 @@
 }
 
 asmlinkage long
-sys_setxattr(char __user *path, char __user *name, void __user *value,
-	     size_t size, int flags)
+sys_setxattr(const char __user *path, const char __user *name,
+	     const void __user *value, size_t size, int flags)
 {
 	struct nameidata nd;
 	int error;
@@ -271,8 +271,8 @@
 }
 
 asmlinkage long
-sys_lsetxattr(char __user *path, char __user *name, void __user *value,
-	      size_t size, int flags)
+sys_lsetxattr(const char __user *path, const char __user *name,
+	      const void __user *value, size_t size, int flags)
 {
 	struct nameidata nd;
 	int error;
@@ -290,7 +290,7 @@
 }
 
 asmlinkage long
-sys_fsetxattr(int fd, char __user *name, void __user *value,
+sys_fsetxattr(int fd, const char __user *name, const void __user *value,
 	      size_t size, int flags)
 {
 	struct file *f;
@@ -315,7 +315,8 @@
  * Extended attribute GET operations
  */
 static ssize_t
-getxattr(struct dentry *d, char __user *name, void __user *value, size_t size)
+getxattr(struct dentry *d, const char __user *name, void __user *value,
+	 size_t size)
 {
 	ssize_t error;
 	void *kvalue = NULL;
@@ -349,8 +350,8 @@
 }
 
 asmlinkage ssize_t
-sys_getxattr(char __user *path, char __user *name, void __user *value,
-	     size_t size)
+sys_getxattr(const char __user *path, const char __user *name,
+	     void __user *value, size_t size)
 {
 	struct nameidata nd;
 	ssize_t error;
@@ -364,7 +365,7 @@
 }
 
 asmlinkage ssize_t
-sys_lgetxattr(char __user *path, char __user *name, void __user *value,
+sys_lgetxattr(const char __user *path, const char __user *name, void __user *value,
 	      size_t size)
 {
 	struct nameidata nd;
@@ -379,7 +380,7 @@
 }
 
 asmlinkage ssize_t
-sys_fgetxattr(int fd, char __user *name, void __user *value, size_t size)
+sys_fgetxattr(int fd, const char __user *name, void __user *value, size_t size)
 {
 	struct file *f;
 	ssize_t error = -EBADF;
@@ -424,7 +425,7 @@
 }
 
 asmlinkage ssize_t
-sys_listxattr(char __user *path, char __user *list, size_t size)
+sys_listxattr(const char __user *path, char __user *list, size_t size)
 {
 	struct nameidata nd;
 	ssize_t error;
@@ -438,7 +439,7 @@
 }
 
 asmlinkage ssize_t
-sys_llistxattr(char __user *path, char __user *list, size_t size)
+sys_llistxattr(const char __user *path, char __user *list, size_t size)
 {
 	struct nameidata nd;
 	ssize_t error;
@@ -470,7 +471,7 @@
  * Extended attribute REMOVE operations
  */
 static long
-removexattr(struct dentry *d, char __user *name)
+removexattr(struct dentry *d, const char __user *name)
 {
 	int error;
 	char kname[XATTR_NAME_MAX + 1];
@@ -485,7 +486,7 @@
 }
 
 asmlinkage long
-sys_removexattr(char __user *path, char __user *name)
+sys_removexattr(const char __user *path, const char __user *name)
 {
 	struct nameidata nd;
 	int error;
@@ -503,7 +504,7 @@
 }
 
 asmlinkage long
-sys_lremovexattr(char __user *path, char __user *name)
+sys_lremovexattr(const char __user *path, const char __user *name)
 {
 	struct nameidata nd;
 	int error;
@@ -521,7 +522,7 @@
 }
 
 asmlinkage long
-sys_fremovexattr(int fd, char __user *name)
+sys_fremovexattr(int fd, const char __user *name)
 {
 	struct file *f;
 	struct dentry *dentry;
diff --git a/include/asm-alpha/unaligned.h b/include/asm-alpha/unaligned.h
index a1d7284..3787c60 100644
--- a/include/asm-alpha/unaligned.h
+++ b/include/asm-alpha/unaligned.h
@@ -1,6 +1,11 @@
-#ifndef __ALPHA_UNALIGNED_H
-#define __ALPHA_UNALIGNED_H
+#ifndef _ASM_ALPHA_UNALIGNED_H
+#define _ASM_ALPHA_UNALIGNED_H
 
-#include <asm-generic/unaligned.h>
+#include <linux/unaligned/le_struct.h>
+#include <linux/unaligned/be_byteshift.h>
+#include <linux/unaligned/generic.h>
 
-#endif
+#define get_unaligned __get_unaligned_le
+#define put_unaligned __put_unaligned_le
+
+#endif /* _ASM_ALPHA_UNALIGNED_H */
diff --git a/include/asm-arm/unaligned.h b/include/asm-arm/unaligned.h
index 5db03cf..44593a8 100644
--- a/include/asm-arm/unaligned.h
+++ b/include/asm-arm/unaligned.h
@@ -1,171 +1,9 @@
-#ifndef __ASM_ARM_UNALIGNED_H
-#define __ASM_ARM_UNALIGNED_H
+#ifndef _ASM_ARM_UNALIGNED_H
+#define _ASM_ARM_UNALIGNED_H
 
-#include <asm/types.h>
-
-extern int __bug_unaligned_x(const void *ptr);
-
-/*
- * What is the most efficient way of loading/storing an unaligned value?
- *
- * That is the subject of this file.  Efficiency here is defined as
- * minimum code size with minimum register usage for the common cases.
- * It is currently not believed that long longs are common, so we
- * trade efficiency for the chars, shorts and longs against the long
- * longs.
- *
- * Current stats with gcc 2.7.2.2 for these functions:
- *
- *	ptrsize	get:	code	regs	put:	code	regs
- *	1		1	1		1	2
- *	2		3	2		3	2
- *	4		7	3		7	3
- *	8		20	6		16	6
- *
- * gcc 2.95.1 seems to code differently:
- *
- *	ptrsize	get:	code	regs	put:	code	regs
- *	1		1	1		1	2
- *	2		3	2		3	2
- *	4		7	4		7	4
- *	8		19	8		15	6
- *
- * which may or may not be more efficient (depending upon whether
- * you can afford the extra registers).  Hopefully the gcc 2.95
- * is inteligent enough to decide if it is better to use the
- * extra register, but evidence so far seems to suggest otherwise.
- *
- * Unfortunately, gcc is not able to optimise the high word
- * out of long long >> 32, or the low word from long long << 32
- */
-
-#define __get_unaligned_2_le(__p)					\
-	(unsigned int)(__p[0] | __p[1] << 8)
-
-#define __get_unaligned_2_be(__p)					\
-	(unsigned int)(__p[0] << 8 | __p[1])
-
-#define __get_unaligned_4_le(__p)					\
-	(unsigned int)(__p[0] | __p[1] << 8 | __p[2] << 16 | __p[3] << 24)
-
-#define __get_unaligned_4_be(__p)					\
-	(unsigned int)(__p[0] << 24 | __p[1] << 16 | __p[2] << 8 | __p[3])
-
-#define __get_unaligned_8_le(__p)					\
-	((unsigned long long)__get_unaligned_4_le((__p+4)) << 32 |	\
-		__get_unaligned_4_le(__p))
-
-#define __get_unaligned_8_be(__p)					\
-	((unsigned long long)__get_unaligned_4_be(__p) << 32 |		\
-		__get_unaligned_4_be((__p+4)))
-
-#define __get_unaligned_le(ptr)						\
-	((__force typeof(*(ptr)))({					\
-		const __u8 *__p = (const __u8 *)(ptr);			\
-		__builtin_choose_expr(sizeof(*(ptr)) == 1, *__p,	\
-		  __builtin_choose_expr(sizeof(*(ptr)) == 2, __get_unaligned_2_le(__p),	\
-		  __builtin_choose_expr(sizeof(*(ptr)) == 4, __get_unaligned_4_le(__p),	\
-		  __builtin_choose_expr(sizeof(*(ptr)) == 8, __get_unaligned_8_le(__p),	\
-		    (void)__bug_unaligned_x(__p)))));			\
-	}))
-
-#define __get_unaligned_be(ptr)						\
-	((__force typeof(*(ptr)))({					\
-		const __u8 *__p = (const __u8 *)(ptr);			\
-		__builtin_choose_expr(sizeof(*(ptr)) == 1, *__p,	\
-		  __builtin_choose_expr(sizeof(*(ptr)) == 2, __get_unaligned_2_be(__p),	\
-		  __builtin_choose_expr(sizeof(*(ptr)) == 4, __get_unaligned_4_be(__p),	\
-		  __builtin_choose_expr(sizeof(*(ptr)) == 8, __get_unaligned_8_be(__p),	\
-		    (void)__bug_unaligned_x(__p)))));			\
-	}))
-
-
-static inline void __put_unaligned_2_le(__u32 __v, register __u8 *__p)
-{
-	*__p++ = __v;
-	*__p++ = __v >> 8;
-}
-
-static inline void __put_unaligned_2_be(__u32 __v, register __u8 *__p)
-{
-	*__p++ = __v >> 8;
-	*__p++ = __v;
-}
-
-static inline void __put_unaligned_4_le(__u32 __v, register __u8 *__p)
-{
-	__put_unaligned_2_le(__v >> 16, __p + 2);
-	__put_unaligned_2_le(__v, __p);
-}
-
-static inline void __put_unaligned_4_be(__u32 __v, register __u8 *__p)
-{
-	__put_unaligned_2_be(__v >> 16, __p);
-	__put_unaligned_2_be(__v, __p + 2);
-}
-
-static inline void __put_unaligned_8_le(const unsigned long long __v, register __u8 *__p)
-{
-	/*
-	 * tradeoff: 8 bytes of stack for all unaligned puts (2
-	 * instructions), or an extra register in the long long
-	 * case - go for the extra register.
-	 */
-	__put_unaligned_4_le(__v >> 32, __p+4);
-	__put_unaligned_4_le(__v, __p);
-}
-
-static inline void __put_unaligned_8_be(const unsigned long long __v, register __u8 *__p)
-{
-	/*
-	 * tradeoff: 8 bytes of stack for all unaligned puts (2
-	 * instructions), or an extra register in the long long
-	 * case - go for the extra register.
-	 */
-	__put_unaligned_4_be(__v >> 32, __p);
-	__put_unaligned_4_be(__v, __p+4);
-}
-
-/*
- * Try to store an unaligned value as efficiently as possible.
- */
-#define __put_unaligned_le(val,ptr)					\
-	({							\
-		(void)sizeof(*(ptr) = (val));			\
-		switch (sizeof(*(ptr))) {			\
-		case 1:						\
-			*(ptr) = (val);				\
-			break;					\
-		case 2: __put_unaligned_2_le((__force u16)(val),(__u8 *)(ptr));	\
-			break;					\
-		case 4:	__put_unaligned_4_le((__force u32)(val),(__u8 *)(ptr));	\
-			break;					\
-		case 8:	__put_unaligned_8_le((__force u64)(val),(__u8 *)(ptr)); \
-			break;					\
-		default: __bug_unaligned_x(ptr);		\
-			break;					\
-		}						\
-		(void) 0;					\
-	})
-
-#define __put_unaligned_be(val,ptr)					\
-	({							\
-		(void)sizeof(*(ptr) = (val));			\
-		switch (sizeof(*(ptr))) {			\
-		case 1:						\
-			*(ptr) = (val);				\
-			break;					\
-		case 2: __put_unaligned_2_be((__force u16)(val),(__u8 *)(ptr));	\
-			break;					\
-		case 4:	__put_unaligned_4_be((__force u32)(val),(__u8 *)(ptr));	\
-			break;					\
-		case 8:	__put_unaligned_8_be((__force u64)(val),(__u8 *)(ptr)); \
-			break;					\
-		default: __bug_unaligned_x(ptr);		\
-			break;					\
-		}						\
-		(void) 0;					\
-	})
+#include <linux/unaligned/le_byteshift.h>
+#include <linux/unaligned/be_byteshift.h>
+#include <linux/unaligned/generic.h>
 
 /*
  * Select endianness
@@ -178,4 +16,4 @@
 #define put_unaligned	__put_unaligned_be
 #endif
 
-#endif
+#endif /* _ASM_ARM_UNALIGNED_H */
diff --git a/include/asm-avr32/unaligned.h b/include/asm-avr32/unaligned.h
index 36f5fd4..0418772 100644
--- a/include/asm-avr32/unaligned.h
+++ b/include/asm-avr32/unaligned.h
@@ -1,5 +1,5 @@
-#ifndef __ASM_AVR32_UNALIGNED_H
-#define __ASM_AVR32_UNALIGNED_H
+#ifndef _ASM_AVR32_UNALIGNED_H
+#define _ASM_AVR32_UNALIGNED_H
 
 /*
  * AVR32 can handle some unaligned accesses, depending on the
@@ -11,6 +11,11 @@
  * optimize word loads in general.
  */
 
-#include <asm-generic/unaligned.h>
+#include <linux/unaligned/be_struct.h>
+#include <linux/unaligned/le_byteshift.h>
+#include <linux/unaligned/generic.h>
 
-#endif /* __ASM_AVR32_UNALIGNED_H */
+#define get_unaligned	__get_unaligned_be
+#define put_unaligned	__put_unaligned_be
+
+#endif /* _ASM_AVR32_UNALIGNED_H */
diff --git a/include/asm-blackfin/unaligned.h b/include/asm-blackfin/unaligned.h
index 10081dc..fd8a1d6 100644
--- a/include/asm-blackfin/unaligned.h
+++ b/include/asm-blackfin/unaligned.h
@@ -1,6 +1,11 @@
-#ifndef __BFIN_UNALIGNED_H
-#define __BFIN_UNALIGNED_H
+#ifndef _ASM_BLACKFIN_UNALIGNED_H
+#define _ASM_BLACKFIN_UNALIGNED_H
 
-#include <asm-generic/unaligned.h>
+#include <linux/unaligned/le_struct.h>
+#include <linux/unaligned/be_byteshift.h>
+#include <linux/unaligned/generic.h>
 
-#endif				/* __BFIN_UNALIGNED_H */
+#define get_unaligned	__get_unaligned_le
+#define put_unaligned	__put_unaligned_le
+
+#endif /* _ASM_BLACKFIN_UNALIGNED_H */
diff --git a/include/asm-cris/unaligned.h b/include/asm-cris/unaligned.h
index 7fbbb39..7b3f3fe 100644
--- a/include/asm-cris/unaligned.h
+++ b/include/asm-cris/unaligned.h
@@ -1,16 +1,13 @@
-#ifndef __CRIS_UNALIGNED_H
-#define __CRIS_UNALIGNED_H
+#ifndef _ASM_CRIS_UNALIGNED_H
+#define _ASM_CRIS_UNALIGNED_H
 
 /*
  * CRIS can do unaligned accesses itself. 
- *
- * The strange macros are there to make sure these can't
- * be misused in a way that makes them not work on other
- * architectures where unaligned accesses aren't as simple.
  */
+#include <linux/unaligned/access_ok.h>
+#include <linux/unaligned/generic.h>
 
-#define get_unaligned(ptr) (*(ptr))
+#define get_unaligned	__get_unaligned_le
+#define put_unaligned	__put_unaligned_le
 
-#define put_unaligned(val, ptr) ((void)( *(ptr) = (val) ))
-
-#endif
+#endif /* _ASM_CRIS_UNALIGNED_H */
diff --git a/include/asm-frv/unaligned.h b/include/asm-frv/unaligned.h
index dc8e9c9..64ccc73 100644
--- a/include/asm-frv/unaligned.h
+++ b/include/asm-frv/unaligned.h
@@ -9,194 +9,14 @@
  * 2 of the License, or (at your option) any later version.
  */
 
-#ifndef _ASM_UNALIGNED_H
-#define _ASM_UNALIGNED_H
+#ifndef _ASM_FRV_UNALIGNED_H
+#define _ASM_FRV_UNALIGNED_H
 
+#include <linux/unaligned/le_byteshift.h>
+#include <linux/unaligned/be_byteshift.h>
+#include <linux/unaligned/generic.h>
 
-/*
- * Unaligned accesses on uClinux can't be performed in a fault handler - the
- * CPU detects them as imprecise exceptions making this impossible.
- *
- * With the FR451, however, they are precise, and so we used to fix them up in
- * the memory access fault handler.  However, instruction bundling make this
- * impractical.  So, now we fall back to using memcpy.
- */
-#ifdef CONFIG_MMU
+#define get_unaligned	__get_unaligned_be
+#define put_unaligned	__put_unaligned_be
 
-/*
- * The asm statement in the macros below is a way to get GCC to copy a
- * value from one variable to another without having any clue it's
- * actually doing so, so that it won't have any idea that the values
- * in the two variables are related.
- */
-
-#define get_unaligned(ptr) ({				\
-	typeof((*(ptr))) __x;				\
-	void *__ptrcopy;				\
-	asm("" : "=r" (__ptrcopy) : "0" (ptr));		\
-	memcpy(&__x, __ptrcopy, sizeof(*(ptr)));	\
-	__x;						\
-})
-
-#define put_unaligned(val, ptr) ({			\
-	typeof((*(ptr))) __x = (val);			\
-	void *__ptrcopy;				\
-	asm("" : "=r" (__ptrcopy) : "0" (ptr));		\
-	memcpy(__ptrcopy, &__x, sizeof(*(ptr)));	\
-})
-
-extern int handle_misalignment(unsigned long esr0, unsigned long ear0, unsigned long epcr0);
-
-#else
-
-#define get_unaligned(ptr)							\
-({										\
-	typeof(*(ptr)) x;							\
-	const char *__p = (const char *) (ptr);					\
-										\
-	switch (sizeof(x)) {							\
-	case 1:									\
-		x = *(ptr);							\
-		break;								\
-	case 2:									\
-	{									\
-		uint8_t a;							\
-		asm("	ldub%I2		%M2,%0		\n"			\
-		    "	ldub%I3.p	%M3,%1		\n"			\
-		    "	slli		%0,#8,%0	\n"			\
-		    "	or		%0,%1,%0	\n"			\
-		    : "=&r"(x), "=&r"(a)					\
-		    : "m"(__p[0]),  "m"(__p[1])					\
-		    );								\
-		break;								\
-	}									\
-										\
-	case 4:									\
-	{									\
-		uint8_t a;							\
-		asm("	ldub%I2		%M2,%0		\n"			\
-		    "	ldub%I3.p	%M3,%1		\n"			\
-		    "	slli		%0,#8,%0	\n"			\
-		    "	or		%0,%1,%0	\n"			\
-		    "	ldub%I4.p	%M4,%1		\n"			\
-		    "	slli		%0,#8,%0	\n"			\
-		    "	or		%0,%1,%0	\n"			\
-		    "	ldub%I5.p	%M5,%1		\n"			\
-		    "	slli		%0,#8,%0	\n"			\
-		    "	or		%0,%1,%0	\n"			\
-		    : "=&r"(x), "=&r"(a)					\
-		    : "m"(__p[0]),  "m"(__p[1]), "m"(__p[2]), "m"(__p[3])	\
-		    );								\
-		break;								\
-	}									\
-										\
-	case 8:									\
-	{									\
-		union { uint64_t x; u32 y[2]; } z;				\
-		uint8_t a;							\
-		asm("	ldub%I3		%M3,%0		\n"			\
-		    "	ldub%I4.p	%M4,%2		\n"			\
-		    "	slli		%0,#8,%0	\n"			\
-		    "	or		%0,%2,%0	\n"			\
-		    "	ldub%I5.p	%M5,%2		\n"			\
-		    "	slli		%0,#8,%0	\n"			\
-		    "	or		%0,%2,%0	\n"			\
-		    "	ldub%I6.p	%M6,%2		\n"			\
-		    "	slli		%0,#8,%0	\n"			\
-		    "	or		%0,%2,%0	\n"			\
-		    "	ldub%I7		%M7,%1		\n"			\
-		    "	ldub%I8.p	%M8,%2		\n"			\
-		    "	slli		%1,#8,%1	\n"			\
-		    "	or		%1,%2,%1	\n"			\
-		    "	ldub%I9.p	%M9,%2		\n"			\
-		    "	slli		%1,#8,%1	\n"			\
-		    "	or		%1,%2,%1	\n"			\
-		    "	ldub%I10.p	%M10,%2		\n"			\
-		    "	slli		%1,#8,%1	\n"			\
-		    "	or		%1,%2,%1	\n"			\
-		    : "=&r"(z.y[0]), "=&r"(z.y[1]), "=&r"(a)			\
-		    : "m"(__p[0]), "m"(__p[1]), "m"(__p[2]), "m"(__p[3]),	\
-		      "m"(__p[4]), "m"(__p[5]), "m"(__p[6]), "m"(__p[7])	\
-		    );								\
-		x = z.x;							\
-		break;								\
-	}									\
-										\
-	default:								\
-		x = 0;								\
-		BUG();								\
-		break;								\
-	}									\
-										\
-	x;									\
-})
-
-#define put_unaligned(val, ptr)								\
-do {											\
-	char *__p = (char *) (ptr);							\
-	int x;										\
-											\
-	switch (sizeof(*ptr)) {								\
-	case 2:										\
-	{										\
-		asm("	stb%I1.p	%0,%M1		\n"				\
-		    "	srli		%0,#8,%0	\n"				\
-		    "	stb%I2		%0,%M2		\n"				\
-		    : "=r"(x), "=m"(__p[1]),  "=m"(__p[0])				\
-		    : "0"(val)								\
-		    );									\
-		break;									\
-	}										\
-											\
-	case 4:										\
-	{										\
-		asm("	stb%I1.p	%0,%M1		\n"				\
-		    "	srli		%0,#8,%0	\n"				\
-		    "	stb%I2.p	%0,%M2		\n"				\
-		    "	srli		%0,#8,%0	\n"				\
-		    "	stb%I3.p	%0,%M3		\n"				\
-		    "	srli		%0,#8,%0	\n"				\
-		    "	stb%I4		%0,%M4		\n"				\
-		    : "=r"(x), "=m"(__p[3]),  "=m"(__p[2]), "=m"(__p[1]), "=m"(__p[0])	\
-		    : "0"(val)								\
-		    );									\
-		break;									\
-	}										\
-											\
-	case 8:										\
-	{										\
-		uint32_t __high, __low;							\
-		__high = (uint64_t)val >> 32;						\
-		__low = val & 0xffffffff;						\
-		asm("	stb%I2.p	%0,%M2		\n"				\
-		    "	srli		%0,#8,%0	\n"				\
-		    "	stb%I3.p	%0,%M3		\n"				\
-		    "	srli		%0,#8,%0	\n"				\
-		    "	stb%I4.p	%0,%M4		\n"				\
-		    "	srli		%0,#8,%0	\n"				\
-		    "	stb%I5.p	%0,%M5		\n"				\
-		    "	srli		%0,#8,%0	\n"				\
-		    "	stb%I6.p	%1,%M6		\n"				\
-		    "	srli		%1,#8,%1	\n"				\
-		    "	stb%I7.p	%1,%M7		\n"				\
-		    "	srli		%1,#8,%1	\n"				\
-		    "	stb%I8.p	%1,%M8		\n"				\
-		    "	srli		%1,#8,%1	\n"				\
-		    "	stb%I9		%1,%M9		\n"				\
-		    : "=&r"(__low), "=&r"(__high), "=m"(__p[7]), "=m"(__p[6]), 		\
-		      "=m"(__p[5]), "=m"(__p[4]), "=m"(__p[3]), "=m"(__p[2]), 		\
-		      "=m"(__p[1]), "=m"(__p[0])					\
-		    : "0"(__low), "1"(__high)						\
-		    );									\
-		break;									\
-	}										\
-											\
-        default:									\
-		*(ptr) = (val);								\
-		break;									\
-	}										\
-} while(0)
-
-#endif
-
-#endif
+#endif /* _ASM_FRV_UNALIGNED_H */
diff --git a/include/asm-generic/ioctl.h b/include/asm-generic/ioctl.h
index cd02729..8641813 100644
--- a/include/asm-generic/ioctl.h
+++ b/include/asm-generic/ioctl.h
@@ -21,8 +21,19 @@
  */
 #define _IOC_NRBITS	8
 #define _IOC_TYPEBITS	8
-#define _IOC_SIZEBITS	14
-#define _IOC_DIRBITS	2
+
+/*
+ * Let any architecture override either of the following before
+ * including this file.
+ */
+
+#ifndef _IOC_SIZEBITS
+# define _IOC_SIZEBITS	14
+#endif
+
+#ifndef _IOC_DIRBITS
+# define _IOC_DIRBITS	2
+#endif
 
 #define _IOC_NRMASK	((1 << _IOC_NRBITS)-1)
 #define _IOC_TYPEMASK	((1 << _IOC_TYPEBITS)-1)
@@ -35,11 +46,21 @@
 #define _IOC_DIRSHIFT	(_IOC_SIZESHIFT+_IOC_SIZEBITS)
 
 /*
- * Direction bits.
+ * Direction bits, which any architecture can choose to override
+ * before including this file.
  */
-#define _IOC_NONE	0U
-#define _IOC_WRITE	1U
-#define _IOC_READ	2U
+
+#ifndef _IOC_NONE
+# define _IOC_NONE	0U
+#endif
+
+#ifndef _IOC_WRITE
+# define _IOC_WRITE	1U
+#endif
+
+#ifndef _IOC_READ
+# define _IOC_READ	2U
+#endif
 
 #define _IOC(dir,type,nr,size) \
 	(((dir)  << _IOC_DIRSHIFT) | \
diff --git a/include/asm-generic/unaligned.h b/include/asm-generic/unaligned.h
deleted file mode 100644
index 2fe1b2e..0000000
--- a/include/asm-generic/unaligned.h
+++ /dev/null
@@ -1,124 +0,0 @@
-#ifndef _ASM_GENERIC_UNALIGNED_H_
-#define _ASM_GENERIC_UNALIGNED_H_
-
-/*
- * For the benefit of those who are trying to port Linux to another
- * architecture, here are some C-language equivalents. 
- *
- * This is based almost entirely upon Richard Henderson's
- * asm-alpha/unaligned.h implementation.  Some comments were
- * taken from David Mosberger's asm-ia64/unaligned.h header.
- */
-
-#include <linux/types.h>
-
-/* 
- * The main single-value unaligned transfer routines.
- */
-#define get_unaligned(ptr) \
-	__get_unaligned((ptr), sizeof(*(ptr)))
-#define put_unaligned(x,ptr) \
-	((void)sizeof(*(ptr)=(x)),\
-	__put_unaligned((__force __u64)(x), (ptr), sizeof(*(ptr))))
-
-/*
- * This function doesn't actually exist.  The idea is that when
- * someone uses the macros below with an unsupported size (datatype),
- * the linker will alert us to the problem via an unresolved reference
- * error.
- */
-extern void bad_unaligned_access_length(void) __attribute__((noreturn));
-
-struct __una_u64 { __u64 x __attribute__((packed)); };
-struct __una_u32 { __u32 x __attribute__((packed)); };
-struct __una_u16 { __u16 x __attribute__((packed)); };
-
-/*
- * Elemental unaligned loads 
- */
-
-static inline __u64 __uldq(const __u64 *addr)
-{
-	const struct __una_u64 *ptr = (const struct __una_u64 *) addr;
-	return ptr->x;
-}
-
-static inline __u32 __uldl(const __u32 *addr)
-{
-	const struct __una_u32 *ptr = (const struct __una_u32 *) addr;
-	return ptr->x;
-}
-
-static inline __u16 __uldw(const __u16 *addr)
-{
-	const struct __una_u16 *ptr = (const struct __una_u16 *) addr;
-	return ptr->x;
-}
-
-/*
- * Elemental unaligned stores 
- */
-
-static inline void __ustq(__u64 val, __u64 *addr)
-{
-	struct __una_u64 *ptr = (struct __una_u64 *) addr;
-	ptr->x = val;
-}
-
-static inline void __ustl(__u32 val, __u32 *addr)
-{
-	struct __una_u32 *ptr = (struct __una_u32 *) addr;
-	ptr->x = val;
-}
-
-static inline void __ustw(__u16 val, __u16 *addr)
-{
-	struct __una_u16 *ptr = (struct __una_u16 *) addr;
-	ptr->x = val;
-}
-
-#define __get_unaligned(ptr, size) ({		\
-	const void *__gu_p = ptr;		\
-	__u64 __val;				\
-	switch (size) {				\
-	case 1:					\
-		__val = *(const __u8 *)__gu_p;	\
-		break;				\
-	case 2:					\
-		__val = __uldw(__gu_p);		\
-		break;				\
-	case 4:					\
-		__val = __uldl(__gu_p);		\
-		break;				\
-	case 8:					\
-		__val = __uldq(__gu_p);		\
-		break;				\
-	default:				\
-		bad_unaligned_access_length();	\
-	};					\
-	(__force __typeof__(*(ptr)))__val;	\
-})
-
-#define __put_unaligned(val, ptr, size)		\
-({						\
-	void *__gu_p = ptr;			\
-	switch (size) {				\
-	case 1:					\
-		*(__u8 *)__gu_p = (__force __u8)val;		\
-	        break;				\
-	case 2:					\
-		__ustw((__force __u16)val, __gu_p);		\
-		break;				\
-	case 4:					\
-		__ustl((__force __u32)val, __gu_p);		\
-		break;				\
-	case 8:					\
-		__ustq(val, __gu_p);		\
-		break;				\
-	default:				\
-	    	bad_unaligned_access_length();	\
-	};					\
-	(void)0;				\
-})
-
-#endif /* _ASM_GENERIC_UNALIGNED_H */
diff --git a/include/asm-h8300/unaligned.h b/include/asm-h8300/unaligned.h
index ffb67f4..b8d06c7 100644
--- a/include/asm-h8300/unaligned.h
+++ b/include/asm-h8300/unaligned.h
@@ -1,15 +1,11 @@
-#ifndef __H8300_UNALIGNED_H
-#define __H8300_UNALIGNED_H
+#ifndef _ASM_H8300_UNALIGNED_H
+#define _ASM_H8300_UNALIGNED_H
 
+#include <linux/unaligned/be_memmove.h>
+#include <linux/unaligned/le_byteshift.h>
+#include <linux/unaligned/generic.h>
 
-/* Use memmove here, so gcc does not insert a __builtin_memcpy. */
+#define get_unaligned	__get_unaligned_be
+#define put_unaligned	__put_unaligned_be
 
-#define get_unaligned(ptr) \
-  ({ __typeof__(*(ptr)) __tmp; memmove(&__tmp, (ptr), sizeof(*(ptr))); __tmp; })
-
-#define put_unaligned(val, ptr)				\
-  ({ __typeof__(*(ptr)) __tmp = (val);			\
-     memmove((ptr), &__tmp, sizeof(*(ptr)));		\
-     (void)0; })
-
-#endif
+#endif /* _ASM_H8300_UNALIGNED_H */
diff --git a/include/asm-ia64/dma-mapping.h b/include/asm-ia64/dma-mapping.h
index f1735a2..9f0df9b 100644
--- a/include/asm-ia64/dma-mapping.h
+++ b/include/asm-ia64/dma-mapping.h
@@ -23,10 +23,30 @@
 {
 	dma_free_coherent(dev, size, cpu_addr, dma_handle);
 }
-#define dma_map_single		platform_dma_map_single
-#define dma_map_sg		platform_dma_map_sg
-#define dma_unmap_single	platform_dma_unmap_single
-#define dma_unmap_sg		platform_dma_unmap_sg
+#define dma_map_single_attrs	platform_dma_map_single_attrs
+static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr,
+					size_t size, int dir)
+{
+	return dma_map_single_attrs(dev, cpu_addr, size, dir, NULL);
+}
+#define dma_map_sg_attrs	platform_dma_map_sg_attrs
+static inline int dma_map_sg(struct device *dev, struct scatterlist *sgl,
+			     int nents, int dir)
+{
+	return dma_map_sg_attrs(dev, sgl, nents, dir, NULL);
+}
+#define dma_unmap_single_attrs	platform_dma_unmap_single_attrs
+static inline void dma_unmap_single(struct device *dev, dma_addr_t cpu_addr,
+				    size_t size, int dir)
+{
+	return dma_unmap_single_attrs(dev, cpu_addr, size, dir, NULL);
+}
+#define dma_unmap_sg_attrs	platform_dma_unmap_sg_attrs
+static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sgl,
+				int nents, int dir)
+{
+	return dma_unmap_sg_attrs(dev, sgl, nents, dir, NULL);
+}
 #define dma_sync_single_for_cpu	platform_dma_sync_single_for_cpu
 #define dma_sync_sg_for_cpu	platform_dma_sync_sg_for_cpu
 #define dma_sync_single_for_device platform_dma_sync_single_for_device
diff --git a/include/asm-ia64/machvec.h b/include/asm-ia64/machvec.h
index c201a20..9f020eb 100644
--- a/include/asm-ia64/machvec.h
+++ b/include/asm-ia64/machvec.h
@@ -22,6 +22,7 @@
 struct task_struct;
 struct pci_dev;
 struct msi_desc;
+struct dma_attrs;
 
 typedef void ia64_mv_setup_t (char **);
 typedef void ia64_mv_cpu_init_t (void);
@@ -56,6 +57,11 @@
 typedef int ia64_mv_dma_mapping_error (dma_addr_t dma_addr);
 typedef int ia64_mv_dma_supported (struct device *, u64);
 
+typedef dma_addr_t ia64_mv_dma_map_single_attrs (struct device *, void *, size_t, int, struct dma_attrs *);
+typedef void ia64_mv_dma_unmap_single_attrs (struct device *, dma_addr_t, size_t, int, struct dma_attrs *);
+typedef int ia64_mv_dma_map_sg_attrs (struct device *, struct scatterlist *, int, int, struct dma_attrs *);
+typedef void ia64_mv_dma_unmap_sg_attrs (struct device *, struct scatterlist *, int, int, struct dma_attrs *);
+
 /*
  * WARNING: The legacy I/O space is _architected_.  Platforms are
  * expected to follow this architected model (see Section 10.7 in the
@@ -136,10 +142,10 @@
 #  define platform_dma_init		ia64_mv.dma_init
 #  define platform_dma_alloc_coherent	ia64_mv.dma_alloc_coherent
 #  define platform_dma_free_coherent	ia64_mv.dma_free_coherent
-#  define platform_dma_map_single	ia64_mv.dma_map_single
-#  define platform_dma_unmap_single	ia64_mv.dma_unmap_single
-#  define platform_dma_map_sg		ia64_mv.dma_map_sg
-#  define platform_dma_unmap_sg		ia64_mv.dma_unmap_sg
+#  define platform_dma_map_single_attrs	ia64_mv.dma_map_single_attrs
+#  define platform_dma_unmap_single_attrs	ia64_mv.dma_unmap_single_attrs
+#  define platform_dma_map_sg_attrs	ia64_mv.dma_map_sg_attrs
+#  define platform_dma_unmap_sg_attrs	ia64_mv.dma_unmap_sg_attrs
 #  define platform_dma_sync_single_for_cpu ia64_mv.dma_sync_single_for_cpu
 #  define platform_dma_sync_sg_for_cpu	ia64_mv.dma_sync_sg_for_cpu
 #  define platform_dma_sync_single_for_device ia64_mv.dma_sync_single_for_device
@@ -190,10 +196,10 @@
 	ia64_mv_dma_init *dma_init;
 	ia64_mv_dma_alloc_coherent *dma_alloc_coherent;
 	ia64_mv_dma_free_coherent *dma_free_coherent;
-	ia64_mv_dma_map_single *dma_map_single;
-	ia64_mv_dma_unmap_single *dma_unmap_single;
-	ia64_mv_dma_map_sg *dma_map_sg;
-	ia64_mv_dma_unmap_sg *dma_unmap_sg;
+	ia64_mv_dma_map_single_attrs *dma_map_single_attrs;
+	ia64_mv_dma_unmap_single_attrs *dma_unmap_single_attrs;
+	ia64_mv_dma_map_sg_attrs *dma_map_sg_attrs;
+	ia64_mv_dma_unmap_sg_attrs *dma_unmap_sg_attrs;
 	ia64_mv_dma_sync_single_for_cpu *dma_sync_single_for_cpu;
 	ia64_mv_dma_sync_sg_for_cpu *dma_sync_sg_for_cpu;
 	ia64_mv_dma_sync_single_for_device *dma_sync_single_for_device;
@@ -240,10 +246,10 @@
 	platform_dma_init,			\
 	platform_dma_alloc_coherent,		\
 	platform_dma_free_coherent,		\
-	platform_dma_map_single,		\
-	platform_dma_unmap_single,		\
-	platform_dma_map_sg,			\
-	platform_dma_unmap_sg,			\
+	platform_dma_map_single_attrs,		\
+	platform_dma_unmap_single_attrs,	\
+	platform_dma_map_sg_attrs,		\
+	platform_dma_unmap_sg_attrs,		\
 	platform_dma_sync_single_for_cpu,	\
 	platform_dma_sync_sg_for_cpu,		\
 	platform_dma_sync_single_for_device,	\
@@ -292,9 +298,13 @@
 extern ia64_mv_dma_alloc_coherent	swiotlb_alloc_coherent;
 extern ia64_mv_dma_free_coherent	swiotlb_free_coherent;
 extern ia64_mv_dma_map_single		swiotlb_map_single;
+extern ia64_mv_dma_map_single_attrs	swiotlb_map_single_attrs;
 extern ia64_mv_dma_unmap_single		swiotlb_unmap_single;
+extern ia64_mv_dma_unmap_single_attrs	swiotlb_unmap_single_attrs;
 extern ia64_mv_dma_map_sg		swiotlb_map_sg;
+extern ia64_mv_dma_map_sg_attrs		swiotlb_map_sg_attrs;
 extern ia64_mv_dma_unmap_sg		swiotlb_unmap_sg;
+extern ia64_mv_dma_unmap_sg_attrs	swiotlb_unmap_sg_attrs;
 extern ia64_mv_dma_sync_single_for_cpu	swiotlb_sync_single_for_cpu;
 extern ia64_mv_dma_sync_sg_for_cpu	swiotlb_sync_sg_for_cpu;
 extern ia64_mv_dma_sync_single_for_device swiotlb_sync_single_for_device;
@@ -340,17 +350,17 @@
 #ifndef platform_dma_free_coherent
 # define platform_dma_free_coherent	swiotlb_free_coherent
 #endif
-#ifndef platform_dma_map_single
-# define platform_dma_map_single	swiotlb_map_single
+#ifndef platform_dma_map_single_attrs
+# define platform_dma_map_single_attrs	swiotlb_map_single_attrs
 #endif
-#ifndef platform_dma_unmap_single
-# define platform_dma_unmap_single	swiotlb_unmap_single
+#ifndef platform_dma_unmap_single_attrs
+# define platform_dma_unmap_single_attrs	swiotlb_unmap_single_attrs
 #endif
-#ifndef platform_dma_map_sg
-# define platform_dma_map_sg		swiotlb_map_sg
+#ifndef platform_dma_map_sg_attrs
+# define platform_dma_map_sg_attrs	swiotlb_map_sg_attrs
 #endif
-#ifndef platform_dma_unmap_sg
-# define platform_dma_unmap_sg		swiotlb_unmap_sg
+#ifndef platform_dma_unmap_sg_attrs
+# define platform_dma_unmap_sg_attrs	swiotlb_unmap_sg_attrs
 #endif
 #ifndef platform_dma_sync_single_for_cpu
 # define platform_dma_sync_single_for_cpu	swiotlb_sync_single_for_cpu
diff --git a/include/asm-ia64/machvec_hpzx1.h b/include/asm-ia64/machvec_hpzx1.h
index e90daf9..2f57f51 100644
--- a/include/asm-ia64/machvec_hpzx1.h
+++ b/include/asm-ia64/machvec_hpzx1.h
@@ -4,10 +4,10 @@
 extern ia64_mv_setup_t			dig_setup;
 extern ia64_mv_dma_alloc_coherent	sba_alloc_coherent;
 extern ia64_mv_dma_free_coherent	sba_free_coherent;
-extern ia64_mv_dma_map_single		sba_map_single;
-extern ia64_mv_dma_unmap_single		sba_unmap_single;
-extern ia64_mv_dma_map_sg		sba_map_sg;
-extern ia64_mv_dma_unmap_sg		sba_unmap_sg;
+extern ia64_mv_dma_map_single_attrs	sba_map_single_attrs;
+extern ia64_mv_dma_unmap_single_attrs	sba_unmap_single_attrs;
+extern ia64_mv_dma_map_sg_attrs		sba_map_sg_attrs;
+extern ia64_mv_dma_unmap_sg_attrs	sba_unmap_sg_attrs;
 extern ia64_mv_dma_supported		sba_dma_supported;
 extern ia64_mv_dma_mapping_error	sba_dma_mapping_error;
 
@@ -23,10 +23,10 @@
 #define platform_dma_init			machvec_noop
 #define platform_dma_alloc_coherent		sba_alloc_coherent
 #define platform_dma_free_coherent		sba_free_coherent
-#define platform_dma_map_single			sba_map_single
-#define platform_dma_unmap_single		sba_unmap_single
-#define platform_dma_map_sg			sba_map_sg
-#define platform_dma_unmap_sg			sba_unmap_sg
+#define platform_dma_map_single_attrs		sba_map_single_attrs
+#define platform_dma_unmap_single_attrs		sba_unmap_single_attrs
+#define platform_dma_map_sg_attrs		sba_map_sg_attrs
+#define platform_dma_unmap_sg_attrs		sba_unmap_sg_attrs
 #define platform_dma_sync_single_for_cpu	machvec_dma_sync_single
 #define platform_dma_sync_sg_for_cpu		machvec_dma_sync_sg
 #define platform_dma_sync_single_for_device	machvec_dma_sync_single
diff --git a/include/asm-ia64/machvec_hpzx1_swiotlb.h b/include/asm-ia64/machvec_hpzx1_swiotlb.h
index f00a34a..a842cdd 100644
--- a/include/asm-ia64/machvec_hpzx1_swiotlb.h
+++ b/include/asm-ia64/machvec_hpzx1_swiotlb.h
@@ -4,10 +4,10 @@
 extern ia64_mv_setup_t				dig_setup;
 extern ia64_mv_dma_alloc_coherent		hwsw_alloc_coherent;
 extern ia64_mv_dma_free_coherent		hwsw_free_coherent;
-extern ia64_mv_dma_map_single			hwsw_map_single;
-extern ia64_mv_dma_unmap_single			hwsw_unmap_single;
-extern ia64_mv_dma_map_sg			hwsw_map_sg;
-extern ia64_mv_dma_unmap_sg			hwsw_unmap_sg;
+extern ia64_mv_dma_map_single_attrs		hwsw_map_single_attrs;
+extern ia64_mv_dma_unmap_single_attrs		hwsw_unmap_single_attrs;
+extern ia64_mv_dma_map_sg_attrs			hwsw_map_sg_attrs;
+extern ia64_mv_dma_unmap_sg_attrs		hwsw_unmap_sg_attrs;
 extern ia64_mv_dma_supported			hwsw_dma_supported;
 extern ia64_mv_dma_mapping_error		hwsw_dma_mapping_error;
 extern ia64_mv_dma_sync_single_for_cpu		hwsw_sync_single_for_cpu;
@@ -28,10 +28,10 @@
 #define platform_dma_init			machvec_noop
 #define platform_dma_alloc_coherent		hwsw_alloc_coherent
 #define platform_dma_free_coherent		hwsw_free_coherent
-#define platform_dma_map_single			hwsw_map_single
-#define platform_dma_unmap_single		hwsw_unmap_single
-#define platform_dma_map_sg			hwsw_map_sg
-#define platform_dma_unmap_sg			hwsw_unmap_sg
+#define platform_dma_map_single_attrs		hwsw_map_single_attrs
+#define platform_dma_unmap_single_attrs		hwsw_unmap_single_attrs
+#define platform_dma_map_sg_attrs		hwsw_map_sg_attrs
+#define platform_dma_unmap_sg_attrs		hwsw_unmap_sg_attrs
 #define platform_dma_supported			hwsw_dma_supported
 #define platform_dma_mapping_error		hwsw_dma_mapping_error
 #define platform_dma_sync_single_for_cpu	hwsw_sync_single_for_cpu
diff --git a/include/asm-ia64/machvec_sn2.h b/include/asm-ia64/machvec_sn2.h
index 61439a7..781308e 100644
--- a/include/asm-ia64/machvec_sn2.h
+++ b/include/asm-ia64/machvec_sn2.h
@@ -57,10 +57,10 @@
 extern ia64_mv_readq_t __sn_readq_relaxed;
 extern ia64_mv_dma_alloc_coherent	sn_dma_alloc_coherent;
 extern ia64_mv_dma_free_coherent	sn_dma_free_coherent;
-extern ia64_mv_dma_map_single		sn_dma_map_single;
-extern ia64_mv_dma_unmap_single		sn_dma_unmap_single;
-extern ia64_mv_dma_map_sg		sn_dma_map_sg;
-extern ia64_mv_dma_unmap_sg		sn_dma_unmap_sg;
+extern ia64_mv_dma_map_single_attrs	sn_dma_map_single_attrs;
+extern ia64_mv_dma_unmap_single_attrs	sn_dma_unmap_single_attrs;
+extern ia64_mv_dma_map_sg_attrs		sn_dma_map_sg_attrs;
+extern ia64_mv_dma_unmap_sg_attrs	sn_dma_unmap_sg_attrs;
 extern ia64_mv_dma_sync_single_for_cpu	sn_dma_sync_single_for_cpu;
 extern ia64_mv_dma_sync_sg_for_cpu	sn_dma_sync_sg_for_cpu;
 extern ia64_mv_dma_sync_single_for_device sn_dma_sync_single_for_device;
@@ -113,10 +113,10 @@
 #define platform_dma_init		machvec_noop
 #define platform_dma_alloc_coherent	sn_dma_alloc_coherent
 #define platform_dma_free_coherent	sn_dma_free_coherent
-#define platform_dma_map_single		sn_dma_map_single
-#define platform_dma_unmap_single	sn_dma_unmap_single
-#define platform_dma_map_sg		sn_dma_map_sg
-#define platform_dma_unmap_sg		sn_dma_unmap_sg
+#define platform_dma_map_single_attrs	sn_dma_map_single_attrs
+#define platform_dma_unmap_single_attrs	sn_dma_unmap_single_attrs
+#define platform_dma_map_sg_attrs	sn_dma_map_sg_attrs
+#define platform_dma_unmap_sg_attrs	sn_dma_unmap_sg_attrs
 #define platform_dma_sync_single_for_cpu sn_dma_sync_single_for_cpu
 #define platform_dma_sync_sg_for_cpu	sn_dma_sync_sg_for_cpu
 #define platform_dma_sync_single_for_device sn_dma_sync_single_for_device
diff --git a/include/asm-ia64/unaligned.h b/include/asm-ia64/unaligned.h
index bb85598..7bddc7f 100644
--- a/include/asm-ia64/unaligned.h
+++ b/include/asm-ia64/unaligned.h
@@ -1,6 +1,11 @@
 #ifndef _ASM_IA64_UNALIGNED_H
 #define _ASM_IA64_UNALIGNED_H
 
-#include <asm-generic/unaligned.h>
+#include <linux/unaligned/le_struct.h>
+#include <linux/unaligned/be_byteshift.h>
+#include <linux/unaligned/generic.h>
+
+#define get_unaligned	__get_unaligned_le
+#define put_unaligned	__put_unaligned_le
 
 #endif /* _ASM_IA64_UNALIGNED_H */
diff --git a/include/asm-m32r/unaligned.h b/include/asm-m32r/unaligned.h
index fccc180..377eb20 100644
--- a/include/asm-m32r/unaligned.h
+++ b/include/asm-m32r/unaligned.h
@@ -1,19 +1,18 @@
 #ifndef _ASM_M32R_UNALIGNED_H
 #define _ASM_M32R_UNALIGNED_H
 
-/*
- * For the benefit of those who are trying to port Linux to another
- * architecture, here are some C-language equivalents.
- */
-
-#include <asm/string.h>
-
-#define get_unaligned(ptr) \
-  ({ __typeof__(*(ptr)) __tmp; memmove(&__tmp, (ptr), sizeof(*(ptr))); __tmp; })
-
-#define put_unaligned(val, ptr)				\
-  ({ __typeof__(*(ptr)) __tmp = (val);			\
-     memmove((ptr), &__tmp, sizeof(*(ptr)));		\
-     (void)0; })
+#if defined(__LITTLE_ENDIAN__)
+# include <linux/unaligned/le_memmove.h>
+# include <linux/unaligned/be_byteshift.h>
+# include <linux/unaligned/generic.h>
+# define get_unaligned	__get_unaligned_le
+# define put_unaligned	__put_unaligned_le
+#else
+# include <linux/unaligned/be_memmove.h>
+# include <linux/unaligned/le_byteshift.h>
+# include <linux/unaligned/generic.h>
+# define get_unaligned	__get_unaligned_be
+# define put_unaligned	__put_unaligned_be
+#endif
 
 #endif /* _ASM_M32R_UNALIGNED_H */
diff --git a/include/asm-m68k/unaligned.h b/include/asm-m68k/unaligned.h
index 804cb3f..77698f2 100644
--- a/include/asm-m68k/unaligned.h
+++ b/include/asm-m68k/unaligned.h
@@ -1,16 +1,13 @@
-#ifndef __M68K_UNALIGNED_H
-#define __M68K_UNALIGNED_H
+#ifndef _ASM_M68K_UNALIGNED_H
+#define _ASM_M68K_UNALIGNED_H
 
 /*
  * The m68k can do unaligned accesses itself.
- *
- * The strange macros are there to make sure these can't
- * be misused in a way that makes them not work on other
- * architectures where unaligned accesses aren't as simple.
  */
+#include <linux/unaligned/access_ok.h>
+#include <linux/unaligned/generic.h>
 
-#define get_unaligned(ptr) (*(ptr))
+#define get_unaligned	__get_unaligned_be
+#define put_unaligned	__put_unaligned_be
 
-#define put_unaligned(val, ptr) ((void)( *(ptr) = (val) ))
-
-#endif
+#endif /* _ASM_M68K_UNALIGNED_H */
diff --git a/include/asm-m68knommu/unaligned.h b/include/asm-m68knommu/unaligned.h
index 869e9dd..eb1ea4c 100644
--- a/include/asm-m68knommu/unaligned.h
+++ b/include/asm-m68knommu/unaligned.h
@@ -1,23 +1,25 @@
-#ifndef __M68K_UNALIGNED_H
-#define __M68K_UNALIGNED_H
+#ifndef _ASM_M68KNOMMU_UNALIGNED_H
+#define _ASM_M68KNOMMU_UNALIGNED_H
 
 
 #ifdef CONFIG_COLDFIRE
+#include <linux/unaligned/be_struct.h>
+#include <linux/unaligned/le_byteshift.h>
+#include <linux/unaligned/generic.h>
 
-#include <asm-generic/unaligned.h>
+#define get_unaligned	__get_unaligned_be
+#define put_unaligned	__put_unaligned_be
 
 #else
 /*
  * The m68k can do unaligned accesses itself. 
- *
- * The strange macros are there to make sure these can't
- * be misused in a way that makes them not work on other
- * architectures where unaligned accesses aren't as simple.
  */
+#include <linux/unaligned/access_ok.h>
+#include <linux/unaligned/generic.h>
 
-#define get_unaligned(ptr) (*(ptr))
-#define put_unaligned(val, ptr) ((void)( *(ptr) = (val) ))
+#define get_unaligned	__get_unaligned_be
+#define put_unaligned	__put_unaligned_be
 
 #endif
 
-#endif
+#endif /* _ASM_M68KNOMMU_UNALIGNED_H */
diff --git a/include/asm-mips/unaligned.h b/include/asm-mips/unaligned.h
index 3249049..7924049 100644
--- a/include/asm-mips/unaligned.h
+++ b/include/asm-mips/unaligned.h
@@ -5,25 +5,24 @@
  *
  * Copyright (C) 2007 Ralf Baechle (ralf@linux-mips.org)
  */
-#ifndef __ASM_GENERIC_UNALIGNED_H
-#define __ASM_GENERIC_UNALIGNED_H
+#ifndef _ASM_MIPS_UNALIGNED_H
+#define _ASM_MIPS_UNALIGNED_H
 
 #include <linux/compiler.h>
+#if defined(__MIPSEB__)
+# include <linux/unaligned/be_struct.h>
+# include <linux/unaligned/le_byteshift.h>
+# include <linux/unaligned/generic.h>
+# define get_unaligned	__get_unaligned_be
+# define put_unaligned	__put_unaligned_be
+#elif defined(__MIPSEL__)
+# include <linux/unaligned/le_struct.h>
+# include <linux/unaligned/be_byteshift.h>
+# include <linux/unaligned/generic.h>
+# define get_unaligned	__get_unaligned_le
+# define put_unaligned	__put_unaligned_le
+#else
+#  error "MIPS, but neither __MIPSEB__, nor __MIPSEL__???"
+#endif
 
-#define get_unaligned(ptr)					\
-({								\
-	struct __packed {					\
-		typeof(*(ptr)) __v;				\
-	} *__p = (void *) (ptr);				\
-	__p->__v;						\
-})
-
-#define put_unaligned(val, ptr)					\
-do {								\
-	struct __packed {					\
-		typeof(*(ptr)) __v;				\
-	} *__p = (void *) (ptr);				\
-	__p->__v = (val);					\
-} while(0)
-
-#endif /* __ASM_GENERIC_UNALIGNED_H */
+#endif /* _ASM_MIPS_UNALIGNED_H */
diff --git a/include/asm-mn10300/unaligned.h b/include/asm-mn10300/unaligned.h
index cad3afb..0df6713 100644
--- a/include/asm-mn10300/unaligned.h
+++ b/include/asm-mn10300/unaligned.h
@@ -8,129 +8,13 @@
  * as published by the Free Software Foundation; either version
  * 2 of the Licence, or (at your option) any later version.
  */
-#ifndef _ASM_UNALIGNED_H
-#define _ASM_UNALIGNED_H
+#ifndef _ASM_MN10300_UNALIGNED_H
+#define _ASM_MN10300_UNALIGNED_H
 
-#include <asm/types.h>
+#include <linux/unaligned/access_ok.h>
+#include <linux/unaligned/generic.h>
 
-#if 0
-extern int __bug_unaligned_x(void *ptr);
+#define get_unaligned	__get_unaligned_le
+#define put_unaligned	__put_unaligned_le
 
-/*
- * What is the most efficient way of loading/storing an unaligned value?
- *
- * That is the subject of this file.  Efficiency here is defined as
- * minimum code size with minimum register usage for the common cases.
- * It is currently not believed that long longs are common, so we
- * trade efficiency for the chars, shorts and longs against the long
- * longs.
- *
- * Current stats with gcc 2.7.2.2 for these functions:
- *
- *	ptrsize	get:	code	regs	put:	code	regs
- *	1		1	1		1	2
- *	2		3	2		3	2
- *	4		7	3		7	3
- *	8		20	6		16	6
- *
- * gcc 2.95.1 seems to code differently:
- *
- *	ptrsize	get:	code	regs	put:	code	regs
- *	1		1	1		1	2
- *	2		3	2		3	2
- *	4		7	4		7	4
- *	8		19	8		15	6
- *
- * which may or may not be more efficient (depending upon whether
- * you can afford the extra registers).  Hopefully the gcc 2.95
- * is inteligent enough to decide if it is better to use the
- * extra register, but evidence so far seems to suggest otherwise.
- *
- * Unfortunately, gcc is not able to optimise the high word
- * out of long long >> 32, or the low word from long long << 32
- */
-
-#define __get_unaligned_2(__p)					\
-	(__p[0] | __p[1] << 8)
-
-#define __get_unaligned_4(__p)					\
-	(__p[0] | __p[1] << 8 | __p[2] << 16 | __p[3] << 24)
-
-#define get_unaligned(ptr)					\
-({								\
-	unsigned int __v1, __v2;				\
-	__typeof__(*(ptr)) __v;					\
-	__u8 *__p = (__u8 *)(ptr);				\
-								\
-	switch (sizeof(*(ptr))) {				\
-	case 1:	__v = *(ptr);			break;		\
-	case 2: __v = __get_unaligned_2(__p);	break;		\
-	case 4: __v = __get_unaligned_4(__p);	break;		\
-	case 8:							\
-		__v2 = __get_unaligned_4((__p+4));		\
-		__v1 = __get_unaligned_4(__p);			\
-		__v = ((unsigned long long)__v2 << 32 | __v1);	\
-		break;						\
-	default: __v = __bug_unaligned_x(__p);	break;		\
-	}							\
-	__v;							\
-})
-
-
-static inline void __put_unaligned_2(__u32 __v, register __u8 *__p)
-{
-	*__p++ = __v;
-	*__p++ = __v >> 8;
-}
-
-static inline void __put_unaligned_4(__u32 __v, register __u8 *__p)
-{
-	__put_unaligned_2(__v >> 16, __p + 2);
-	__put_unaligned_2(__v, __p);
-}
-
-static inline void __put_unaligned_8(const unsigned long long __v, __u8 *__p)
-{
-	/*
-	 * tradeoff: 8 bytes of stack for all unaligned puts (2
-	 * instructions), or an extra register in the long long
-	 * case - go for the extra register.
-	 */
-	__put_unaligned_4(__v >> 32, __p + 4);
-	__put_unaligned_4(__v, __p);
-}
-
-/*
- * Try to store an unaligned value as efficiently as possible.
- */
-#define put_unaligned(val, ptr)						\
-	({								\
-		switch (sizeof(*(ptr))) {				\
-		case 1:							\
-			*(ptr) = (val);					\
-			break;						\
-		case 2:							\
-			__put_unaligned_2((val), (__u8 *)(ptr));	\
-			break;						\
-		case 4:							\
-			__put_unaligned_4((val), (__u8 *)(ptr));	\
-			break;						\
-		case 8:							\
-			__put_unaligned_8((val), (__u8 *)(ptr));	\
-			break;						\
-		default:						\
-			__bug_unaligned_x(ptr);				\
-			break;						\
-		}							\
-		(void) 0;						\
-	})
-
-
-#else
-
-#define get_unaligned(ptr) (*(ptr))
-#define put_unaligned(val, ptr) ({ *(ptr) = (val); (void) 0; })
-
-#endif
-
-#endif
+#endif /* _ASM_MN10300_UNALIGNED_H */
diff --git a/include/asm-parisc/unaligned.h b/include/asm-parisc/unaligned.h
index 53c9058..dfc5d33 100644
--- a/include/asm-parisc/unaligned.h
+++ b/include/asm-parisc/unaligned.h
@@ -1,7 +1,11 @@
-#ifndef _ASM_PARISC_UNALIGNED_H_
-#define _ASM_PARISC_UNALIGNED_H_
+#ifndef _ASM_PARISC_UNALIGNED_H
+#define _ASM_PARISC_UNALIGNED_H
 
-#include <asm-generic/unaligned.h>
+#include <linux/unaligned/be_struct.h>
+#include <linux/unaligned/le_byteshift.h>
+#include <linux/unaligned/generic.h>
+#define get_unaligned	__get_unaligned_be
+#define put_unaligned	__put_unaligned_be
 
 #ifdef __KERNEL__
 struct pt_regs;
@@ -9,4 +13,4 @@
 int check_unaligned(struct pt_regs *regs);
 #endif
 
-#endif /* _ASM_PARISC_UNALIGNED_H_ */
+#endif /* _ASM_PARISC_UNALIGNED_H */
diff --git a/include/asm-powerpc/irq.h b/include/asm-powerpc/irq.h
index b5c0312..5089deb 100644
--- a/include/asm-powerpc/irq.h
+++ b/include/asm-powerpc/irq.h
@@ -619,8 +619,6 @@
 
 #define __ARCH_HAS_DO_SOFTIRQ
 
-extern void __do_softirq(void);
-
 #ifdef CONFIG_IRQSTACKS
 /*
  * Per-cpu stacks for handling hard and soft interrupts.
diff --git a/include/asm-powerpc/unaligned.h b/include/asm-powerpc/unaligned.h
index 6c95dfa..5f1b1e3 100644
--- a/include/asm-powerpc/unaligned.h
+++ b/include/asm-powerpc/unaligned.h
@@ -5,15 +5,12 @@
 
 /*
  * The PowerPC can do unaligned accesses itself in big endian mode.
- *
- * The strange macros are there to make sure these can't
- * be misused in a way that makes them not work on other
- * architectures where unaligned accesses aren't as simple.
  */
+#include <linux/unaligned/access_ok.h>
+#include <linux/unaligned/generic.h>
 
-#define get_unaligned(ptr) (*(ptr))
-
-#define put_unaligned(val, ptr) ((void)( *(ptr) = (val) ))
+#define get_unaligned	__get_unaligned_be
+#define put_unaligned	__put_unaligned_be
 
 #endif	/* __KERNEL__ */
 #endif	/* _ASM_POWERPC_UNALIGNED_H */
diff --git a/include/asm-s390/unaligned.h b/include/asm-s390/unaligned.h
index 8ee86db..da9627a 100644
--- a/include/asm-s390/unaligned.h
+++ b/include/asm-s390/unaligned.h
@@ -1,24 +1,13 @@
-/*
- *  include/asm-s390/unaligned.h
- *
- *  S390 version
- *
- *  Derived from "include/asm-i386/unaligned.h"
- */
-
-#ifndef __S390_UNALIGNED_H
-#define __S390_UNALIGNED_H
+#ifndef _ASM_S390_UNALIGNED_H
+#define _ASM_S390_UNALIGNED_H
 
 /*
  * The S390 can do unaligned accesses itself. 
- *
- * The strange macros are there to make sure these can't
- * be misused in a way that makes them not work on other
- * architectures where unaligned accesses aren't as simple.
  */
+#include <linux/unaligned/access_ok.h>
+#include <linux/unaligned/generic.h>
 
-#define get_unaligned(ptr) (*(ptr))
+#define get_unaligned	__get_unaligned_be
+#define put_unaligned	__put_unaligned_be
 
-#define put_unaligned(val, ptr) ((void)( *(ptr) = (val) ))
-
-#endif
+#endif /* _ASM_S390_UNALIGNED_H */
diff --git a/include/asm-sh/unaligned.h b/include/asm-sh/unaligned.h
index 5250e30..c1641a0 100644
--- a/include/asm-sh/unaligned.h
+++ b/include/asm-sh/unaligned.h
@@ -1,7 +1,19 @@
-#ifndef __ASM_SH_UNALIGNED_H
-#define __ASM_SH_UNALIGNED_H
+#ifndef _ASM_SH_UNALIGNED_H
+#define _ASM_SH_UNALIGNED_H
 
 /* SH can't handle unaligned accesses. */
-#include <asm-generic/unaligned.h>
+#ifdef __LITTLE_ENDIAN__
+# include <linux/unaligned/le_struct.h>
+# include <linux/unaligned/be_byteshift.h>
+# include <linux/unaligned/generic.h>
+# define get_unaligned	__get_unaligned_le
+# define put_unaligned	__put_unaligned_le
+#else
+# include <linux/unaligned/be_struct.h>
+# include <linux/unaligned/le_byteshift.h>
+# include <linux/unaligned/generic.h>
+# define get_unaligned	__get_unaligned_be
+# define put_unaligned	__put_unaligned_be
+#endif
 
-#endif /* __ASM_SH_UNALIGNED_H */
+#endif /* _ASM_SH_UNALIGNED_H */
diff --git a/include/asm-sparc/unaligned.h b/include/asm-sparc/unaligned.h
index b6f8edd..11d2d5f 100644
--- a/include/asm-sparc/unaligned.h
+++ b/include/asm-sparc/unaligned.h
@@ -1,6 +1,10 @@
-#ifndef _ASM_SPARC_UNALIGNED_H_
-#define _ASM_SPARC_UNALIGNED_H_
+#ifndef _ASM_SPARC_UNALIGNED_H
+#define _ASM_SPARC_UNALIGNED_H
 
-#include <asm-generic/unaligned.h>
+#include <linux/unaligned/be_struct.h>
+#include <linux/unaligned/le_byteshift.h>
+#include <linux/unaligned/generic.h>
+#define get_unaligned	__get_unaligned_be
+#define put_unaligned	__put_unaligned_be
 
 #endif /* _ASM_SPARC_UNALIGNED_H */
diff --git a/include/asm-sparc64/unaligned.h b/include/asm-sparc64/unaligned.h
index 1ed3ba5..edcebb0 100644
--- a/include/asm-sparc64/unaligned.h
+++ b/include/asm-sparc64/unaligned.h
@@ -1,6 +1,10 @@
-#ifndef _ASM_SPARC64_UNALIGNED_H_
-#define _ASM_SPARC64_UNALIGNED_H_
+#ifndef _ASM_SPARC64_UNALIGNED_H
+#define _ASM_SPARC64_UNALIGNED_H
 
-#include <asm-generic/unaligned.h>
+#include <linux/unaligned/be_struct.h>
+#include <linux/unaligned/le_byteshift.h>
+#include <linux/unaligned/generic.h>
+#define get_unaligned	__get_unaligned_be
+#define put_unaligned	__put_unaligned_be
 
 #endif /* _ASM_SPARC64_UNALIGNED_H */
diff --git a/include/asm-um/unaligned.h b/include/asm-um/unaligned.h
index 1d2497c..a471969 100644
--- a/include/asm-um/unaligned.h
+++ b/include/asm-um/unaligned.h
@@ -1,6 +1,6 @@
-#ifndef __UM_UNALIGNED_H
-#define __UM_UNALIGNED_H
+#ifndef _ASM_UM_UNALIGNED_H
+#define _ASM_UM_UNALIGNED_H
 
 #include "asm/arch/unaligned.h"
 
-#endif
+#endif /* _ASM_UM_UNALIGNED_H */
diff --git a/include/asm-v850/unaligned.h b/include/asm-v850/unaligned.h
index e30b186..53122b2 100644
--- a/include/asm-v850/unaligned.h
+++ b/include/asm-v850/unaligned.h
@@ -1,6 +1,4 @@
 /*
- * include/asm-v850/unaligned.h -- Unaligned memory access
- *
  *  Copyright (C) 2001  NEC Corporation
  *  Copyright (C) 2001  Miles Bader <miles@gnu.org>
  *
@@ -8,123 +6,17 @@
  * Public License.  See the file COPYING in the main directory of this
  * archive for more details.
  *
- * This file is a copy of the arm version, include/asm-arm/unaligned.h
- *
  * Note that some v850 chips support unaligned access, but it seems too
  * annoying to use.
  */
+#ifndef _ASM_V850_UNALIGNED_H
+#define _ASM_V850_UNALIGNED_H
 
-#ifndef __V850_UNALIGNED_H__
-#define __V850_UNALIGNED_H__
+#include <linux/unaligned/be_byteshift.h>
+#include <linux/unaligned/le_byteshift.h>
+#include <linux/unaligned/generic.h>
 
-#include <asm/types.h>
+#define get_unaligned	__get_unaligned_le
+#define put_unaligned	__put_unaligned_le
 
-extern int __bug_unaligned_x(void *ptr);
-
-/*
- * What is the most efficient way of loading/storing an unaligned value?
- *
- * That is the subject of this file.  Efficiency here is defined as
- * minimum code size with minimum register usage for the common cases.
- * It is currently not believed that long longs are common, so we
- * trade efficiency for the chars, shorts and longs against the long
- * longs.
- *
- * Current stats with gcc 2.7.2.2 for these functions:
- *
- *	ptrsize	get:	code	regs	put:	code	regs
- *	1		1	1		1	2
- *	2		3	2		3	2
- *	4		7	3		7	3
- *	8		20	6		16	6
- *
- * gcc 2.95.1 seems to code differently:
- *
- *	ptrsize	get:	code	regs	put:	code	regs
- *	1		1	1		1	2
- *	2		3	2		3	2
- *	4		7	4		7	4
- *	8		19	8		15	6
- *
- * which may or may not be more efficient (depending upon whether
- * you can afford the extra registers).  Hopefully the gcc 2.95
- * is inteligent enough to decide if it is better to use the
- * extra register, but evidence so far seems to suggest otherwise.
- *
- * Unfortunately, gcc is not able to optimise the high word
- * out of long long >> 32, or the low word from long long << 32
- */
-
-#define __get_unaligned_2(__p)					\
-	(__p[0] | __p[1] << 8)
-
-#define __get_unaligned_4(__p)					\
-	(__p[0] | __p[1] << 8 | __p[2] << 16 | __p[3] << 24)
-
-#define get_unaligned(ptr)					\
-	({							\
-		__typeof__(*(ptr)) __v;				\
-		__u8 *__p = (__u8 *)(ptr);			\
-		switch (sizeof(*(ptr))) {			\
-		case 1:	__v = *(ptr);			break;	\
-		case 2: __v = __get_unaligned_2(__p);	break;	\
-		case 4: __v = __get_unaligned_4(__p);	break;	\
-		case 8: {					\
-				unsigned int __v1, __v2;	\
-				__v2 = __get_unaligned_4((__p+4)); \
-				__v1 = __get_unaligned_4(__p);	\
-				__v = ((unsigned long long)__v2 << 32 | __v1);	\
-			}					\
-			break;					\
-		default: __v = __bug_unaligned_x(__p);	break;	\
-		}						\
-		__v;						\
-	})
-
-
-static inline void __put_unaligned_2(__u32 __v, register __u8 *__p)
-{
-	*__p++ = __v;
-	*__p++ = __v >> 8;
-}
-
-static inline void __put_unaligned_4(__u32 __v, register __u8 *__p)
-{
-	__put_unaligned_2(__v >> 16, __p + 2);
-	__put_unaligned_2(__v, __p);
-}
-
-static inline void __put_unaligned_8(const unsigned long long __v, register __u8 *__p)
-{
-	/*
-	 * tradeoff: 8 bytes of stack for all unaligned puts (2
-	 * instructions), or an extra register in the long long
-	 * case - go for the extra register.
-	 */
-	__put_unaligned_4(__v >> 32, __p+4);
-	__put_unaligned_4(__v, __p);
-}
-
-/*
- * Try to store an unaligned value as efficiently as possible.
- */
-#define put_unaligned(val,ptr)					\
-	({							\
-		switch (sizeof(*(ptr))) {			\
-		case 1:						\
-			*(ptr) = (val);				\
-			break;					\
-		case 2: __put_unaligned_2((val),(__u8 *)(ptr));	\
-			break;					\
-		case 4:	__put_unaligned_4((val),(__u8 *)(ptr));	\
-			break;					\
-		case 8:	__put_unaligned_8((val),(__u8 *)(ptr)); \
-			break;					\
-		default: __bug_unaligned_x(ptr);		\
-			break;					\
-		}						\
-		(void) 0;					\
-	})
-
-
-#endif /* __V850_UNALIGNED_H__ */
+#endif /* _ASM_V850_UNALIGNED_H */
diff --git a/include/asm-x86/olpc.h b/include/asm-x86/olpc.h
new file mode 100644
index 0000000..97d4713
--- /dev/null
+++ b/include/asm-x86/olpc.h
@@ -0,0 +1,132 @@
+/* OLPC machine specific definitions */
+
+#ifndef ASM_OLPC_H_
+#define ASM_OLPC_H_
+
+#include <asm/geode.h>
+
+struct olpc_platform_t {
+	int flags;
+	uint32_t boardrev;
+	int ecver;
+};
+
+#define OLPC_F_PRESENT		0x01
+#define OLPC_F_DCON		0x02
+#define OLPC_F_VSA		0x04
+
+#ifdef CONFIG_OLPC
+
+extern struct olpc_platform_t olpc_platform_info;
+
+/*
+ * OLPC board IDs contain the major build number within the mask 0x0ff0,
+ * and the minor build number withing 0x000f.  Pre-builds have a minor
+ * number less than 8, and normal builds start at 8.  For example, 0x0B10
+ * is a PreB1, and 0x0C18 is a C1.
+ */
+
+static inline uint32_t olpc_board(uint8_t id)
+{
+	return (id << 4) | 0x8;
+}
+
+static inline uint32_t olpc_board_pre(uint8_t id)
+{
+	return id << 4;
+}
+
+static inline int machine_is_olpc(void)
+{
+	return (olpc_platform_info.flags & OLPC_F_PRESENT) ? 1 : 0;
+}
+
+/*
+ * The DCON is OLPC's Display Controller.  It has a number of unique
+ * features that we might want to take advantage of..
+ */
+static inline int olpc_has_dcon(void)
+{
+	return (olpc_platform_info.flags & OLPC_F_DCON) ? 1 : 0;
+}
+
+/*
+ * The VSA is software from AMD that typical Geode bioses will include.
+ * It is used to emulate the PCI bus, VGA, etc.  OLPC's Open Firmware does
+ * not include the VSA; instead, PCI is emulated by the kernel.
+ *
+ * The VSA is described further in arch/x86/pci/olpc.c.
+ */
+static inline int olpc_has_vsa(void)
+{
+	return (olpc_platform_info.flags & OLPC_F_VSA) ? 1 : 0;
+}
+
+/*
+ * The "Mass Production" version of OLPC's XO is identified as being model
+ * C2.  During the prototype phase, the following models (in chronological
+ * order) were created: A1, B1, B2, B3, B4, C1.  The A1 through B2 models
+ * were based on Geode GX CPUs, and models after that were based upon
+ * Geode LX CPUs.  There were also some hand-assembled models floating
+ * around, referred to as PreB1, PreB2, etc.
+ */
+static inline int olpc_board_at_least(uint32_t rev)
+{
+	return olpc_platform_info.boardrev >= rev;
+}
+
+#else
+
+static inline int machine_is_olpc(void)
+{
+	return 0;
+}
+
+static inline int olpc_has_dcon(void)
+{
+	return 0;
+}
+
+static inline int olpc_has_vsa(void)
+{
+	return 0;
+}
+
+#endif
+
+/* EC related functions */
+
+extern int olpc_ec_cmd(unsigned char cmd, unsigned char *inbuf, size_t inlen,
+		unsigned char *outbuf, size_t outlen);
+
+extern int olpc_ec_mask_set(uint8_t bits);
+extern int olpc_ec_mask_unset(uint8_t bits);
+
+/* EC commands */
+
+#define EC_FIRMWARE_REV		0x08
+
+/* SCI source values */
+
+#define EC_SCI_SRC_EMPTY	0x00
+#define EC_SCI_SRC_GAME		0x01
+#define EC_SCI_SRC_BATTERY	0x02
+#define EC_SCI_SRC_BATSOC	0x04
+#define EC_SCI_SRC_BATERR	0x08
+#define EC_SCI_SRC_EBOOK	0x10
+#define EC_SCI_SRC_WLAN		0x20
+#define EC_SCI_SRC_ACPWR	0x40
+#define EC_SCI_SRC_ALL		0x7F
+
+/* GPIO assignments */
+
+#define OLPC_GPIO_MIC_AC	geode_gpio(1)
+#define OLPC_GPIO_DCON_IRQ	geode_gpio(7)
+#define OLPC_GPIO_THRM_ALRM	geode_gpio(10)
+#define OLPC_GPIO_SMB_CLK	geode_gpio(14)
+#define OLPC_GPIO_SMB_DATA	geode_gpio(15)
+#define OLPC_GPIO_WORKAUX	geode_gpio(24)
+#define OLPC_GPIO_LID		geode_gpio(26)
+#define OLPC_GPIO_ECSCI		geode_gpio(27)
+
+#endif
diff --git a/include/asm-x86/time.h b/include/asm-x86/time.h
index 68779b0..bce72d7 100644
--- a/include/asm-x86/time.h
+++ b/include/asm-x86/time.h
@@ -1,7 +1,6 @@
 #ifndef _ASMX86_TIME_H
 #define _ASMX86_TIME_H
 
-extern void (*late_time_init)(void);
 extern void hpet_time_init(void);
 
 #include <asm/mc146818rtc.h>
diff --git a/include/asm-x86/unaligned.h b/include/asm-x86/unaligned.h
index d270ffe..a7bd416 100644
--- a/include/asm-x86/unaligned.h
+++ b/include/asm-x86/unaligned.h
@@ -3,35 +3,12 @@
 
 /*
  * The x86 can do unaligned accesses itself.
- *
- * The strange macros are there to make sure these can't
- * be misused in a way that makes them not work on other
- * architectures where unaligned accesses aren't as simple.
  */
 
-/**
- * get_unaligned - get value from possibly mis-aligned location
- * @ptr: pointer to value
- *
- * This macro should be used for accessing values larger in size than
- * single bytes at locations that are expected to be improperly aligned,
- * e.g. retrieving a u16 value from a location not u16-aligned.
- *
- * Note that unaligned accesses can be very expensive on some architectures.
- */
-#define get_unaligned(ptr) (*(ptr))
+#include <linux/unaligned/access_ok.h>
+#include <linux/unaligned/generic.h>
 
-/**
- * put_unaligned - put value to a possibly mis-aligned location
- * @val: value to place
- * @ptr: pointer to location
- *
- * This macro should be used for placing values larger in size than
- * single bytes at locations that are expected to be improperly aligned,
- * e.g. writing a u16 value to a location not u16-aligned.
- *
- * Note that unaligned accesses can be very expensive on some architectures.
- */
-#define put_unaligned(val, ptr) ((void)(*(ptr) = (val)))
+#define get_unaligned __get_unaligned_le
+#define put_unaligned __put_unaligned_le
 
 #endif /* _ASM_X86_UNALIGNED_H */
diff --git a/include/asm-xtensa/unaligned.h b/include/asm-xtensa/unaligned.h
index 2822089..8f3424f 100644
--- a/include/asm-xtensa/unaligned.h
+++ b/include/asm-xtensa/unaligned.h
@@ -1,6 +1,4 @@
 /*
- * include/asm-xtensa/unaligned.h
- *
  * Xtensa doesn't handle unaligned accesses efficiently.
  *
  * This file is subject to the terms and conditions of the GNU General Public
@@ -9,20 +7,23 @@
  *
  * Copyright (C) 2001 - 2005 Tensilica Inc.
  */
+#ifndef _ASM_XTENSA_UNALIGNED_H
+#define _ASM_XTENSA_UNALIGNED_H
 
-#ifndef _XTENSA_UNALIGNED_H
-#define _XTENSA_UNALIGNED_H
+#ifdef __XTENSA_EL__
+# include <linux/unaligned/le_memmove.h>
+# include <linux/unaligned/be_byteshift.h>
+# include <linux/unaligned/generic.h>
+# define get_unaligned	__get_unaligned_le
+# define put_unaligned	__put_unaligned_le
+#elif defined(__XTENSA_EB__)
+# include <linux/unaligned/be_memmove.h>
+# include <linux/unaligned/le_byteshift.h>
+# include <linux/unaligned/generic.h>
+# define get_unaligned	__get_unaligned_be
+# define put_unaligned	__put_unaligned_be
+#else
+# error processor byte order undefined!
+#endif
 
-#include <linux/string.h>
-
-/* Use memmove here, so gcc does not insert a __builtin_memcpy. */
-
-#define get_unaligned(ptr) \
-  ({ __typeof__(*(ptr)) __tmp; memmove(&__tmp, (ptr), sizeof(*(ptr))); __tmp; })
-
-#define put_unaligned(val, ptr)				\
-  ({ __typeof__(*(ptr)) __tmp = (val);			\
-     memmove((ptr), &__tmp, sizeof(*(ptr)));		\
-     (void)0; })
-
-#endif	/* _XTENSA_UNALIGNED_H */
+#endif	/* _ASM_XTENSA_UNALIGNED_H */
diff --git a/include/linux/Kbuild b/include/linux/Kbuild
index bda6f04..78fade0 100644
--- a/include/linux/Kbuild
+++ b/include/linux/Kbuild
@@ -20,6 +20,7 @@
 header-y += aio_abi.h
 header-y += arcfb.h
 header-y += atmapi.h
+header-y += atmarp.h
 header-y += atmbr2684.h
 header-y += atmclip.h
 header-y += atm_eni.h
@@ -48,6 +49,7 @@
 header-y += comstats.h
 header-y += const.h
 header-y += cgroupstats.h
+header-y += cramfs_fs.h
 header-y += cycx_cfm.h
 header-y += dlmconstants.h
 header-y += dlm_device.h
@@ -70,10 +72,12 @@
 header-y += fuse.h
 header-y += genetlink.h
 header-y += gen_stats.h
+header-y += gfs2_ondisk.h
 header-y += gigaset_dev.h
 header-y += hysdn_if.h
 header-y += i2o-dev.h
 header-y += i8k.h
+header-y += if_addrlabel.h
 header-y += if_arcnet.h
 header-y += if_bonding.h
 header-y += if_cablemodem.h
@@ -91,6 +95,7 @@
 header-y += in6.h
 header-y += in_route.h
 header-y += ioctl.h
+header-y += ip6_tunnel.h
 header-y += ipmi_msgdefs.h
 header-y += ipsec.h
 header-y += ipx.h
@@ -117,7 +122,6 @@
 header-y += nfs4_mount.h
 header-y += nfs_mount.h
 header-y += nl80211.h
-header-y += oom.h
 header-y += param.h
 header-y += pci_regs.h
 header-y += pfkeyv2.h
@@ -166,7 +170,6 @@
 unifdef-y += agpgart.h
 unifdef-y += apm_bios.h
 unifdef-y += atalk.h
-unifdef-y += atmarp.h
 unifdef-y += atmdev.h
 unifdef-y += atm.h
 unifdef-y += atm_tcp.h
@@ -182,7 +185,6 @@
 unifdef-y += cn_proc.h
 unifdef-y += coda.h
 unifdef-y += connector.h
-unifdef-y += cramfs_fs.h
 unifdef-y += cuda.h
 unifdef-y += cyclades.h
 unifdef-y += dccp.h
@@ -205,7 +207,6 @@
 unifdef-y += fs.h
 unifdef-y += gameport.h
 unifdef-y += generic_serial.h
-unifdef-y += gfs2_ondisk.h
 unifdef-y += hayesesp.h
 unifdef-y += hdlcdrv.h
 unifdef-y += hdlc.h
@@ -219,7 +220,6 @@
 unifdef-y += icmp.h
 unifdef-y += icmpv6.h
 unifdef-y += if_addr.h
-unifdef-y += if_addrlabel.h
 unifdef-y += if_arp.h
 unifdef-y += if_bridge.h
 unifdef-y += if_ec.h
@@ -243,7 +243,6 @@
 unifdef-y += ipmi.h
 unifdef-y += ipv6.h
 unifdef-y += ipv6_route.h
-unifdef-y += ip6_tunnel.h
 unifdef-y += isdn.h
 unifdef-y += isdnif.h
 unifdef-y += isdn_divertif.h
diff --git a/include/linux/aio.h b/include/linux/aio.h
index 0d0b7f6..b51ddd2 100644
--- a/include/linux/aio.h
+++ b/include/linux/aio.h
@@ -209,27 +209,8 @@
 extern int aio_put_req(struct kiocb *iocb);
 extern void kick_iocb(struct kiocb *iocb);
 extern int aio_complete(struct kiocb *iocb, long res, long res2);
-extern void __put_ioctx(struct kioctx *ctx);
 struct mm_struct;
 extern void exit_aio(struct mm_struct *mm);
-extern struct kioctx *lookup_ioctx(unsigned long ctx_id);
-extern int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
-			 struct iocb *iocb);
-
-/* semi private, but used by the 32bit emulations: */
-struct kioctx *lookup_ioctx(unsigned long ctx_id);
-int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
-		  struct iocb *iocb);
-
-#define get_ioctx(kioctx) do {						\
-	BUG_ON(atomic_read(&(kioctx)->users) <= 0);			\
-	atomic_inc(&(kioctx)->users);					\
-} while (0)
-#define put_ioctx(kioctx) do {						\
-	BUG_ON(atomic_read(&(kioctx)->users) <= 0);			\
-	if (unlikely(atomic_dec_and_test(&(kioctx)->users))) 		\
-		__put_ioctx(kioctx);					\
-} while (0)
 
 #define io_wait_to_kiocb(wait) container_of(wait, struct kiocb, ki_wait)
 
diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h
index 48a62ba..b66fa2b 100644
--- a/include/linux/backing-dev.h
+++ b/include/linux/backing-dev.h
@@ -156,9 +156,7 @@
 extern struct backing_dev_info default_backing_dev_info;
 void default_unplug_io_fn(struct backing_dev_info *bdi, struct page *page);
 
-int writeback_acquire(struct backing_dev_info *bdi);
 int writeback_in_progress(struct backing_dev_info *bdi);
-void writeback_release(struct backing_dev_info *bdi);
 
 static inline int bdi_congested(struct backing_dev_info *bdi, int bdi_bits)
 {
diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
index b7fc55e..b512e48 100644
--- a/include/linux/binfmts.h
+++ b/include/linux/binfmts.h
@@ -34,7 +34,8 @@
 #endif
 	struct mm_struct *mm;
 	unsigned long p; /* current top of mem */
-	int sh_bang;
+	unsigned int sh_bang:1,
+		     misc_bang:1;
 	struct file * file;
 	int e_uid, e_gid;
 	kernel_cap_t cap_inheritable, cap_permitted;
@@ -48,7 +49,6 @@
 	unsigned interp_flags;
 	unsigned interp_data;
 	unsigned long loader, exec;
-	unsigned long argv_len;
 };
 
 #define BINPRM_FLAGS_ENFORCE_NONDUMP_BIT 0
diff --git a/include/linux/bio.h b/include/linux/bio.h
index d259690..61c15ea 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -324,6 +324,8 @@
 extern void bio_unmap_user(struct bio *);
 extern struct bio *bio_map_kern(struct request_queue *, void *, unsigned int,
 				gfp_t);
+extern struct bio *bio_copy_kern(struct request_queue *, void *, unsigned int,
+				 gfp_t, int);
 extern void bio_set_pages_dirty(struct bio *bio);
 extern void bio_check_pages_dirty(struct bio *bio);
 extern struct bio *bio_copy_user(struct request_queue *, unsigned long, unsigned int, int);
diff --git a/include/linux/bitops.h b/include/linux/bitops.h
index 48bde60..024f2b0 100644
--- a/include/linux/bitops.h
+++ b/include/linux/bitops.h
@@ -6,8 +6,8 @@
 #define BIT(nr)			(1UL << (nr))
 #define BIT_MASK(nr)		(1UL << ((nr) % BITS_PER_LONG))
 #define BIT_WORD(nr)		((nr) / BITS_PER_LONG)
-#define BITS_TO_LONGS(nr)	DIV_ROUND_UP(nr, BITS_PER_LONG)
 #define BITS_PER_BYTE		8
+#define BITS_TO_LONGS(nr)	DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(long))
 #endif
 
 /*
@@ -114,8 +114,6 @@
 
 #ifdef __KERNEL__
 #ifdef CONFIG_GENERIC_FIND_FIRST_BIT
-extern unsigned long __find_first_bit(const unsigned long *addr,
-		unsigned long size);
 
 /**
  * find_first_bit - find the first set bit in a memory region
@@ -124,28 +122,8 @@
  *
  * Returns the bit number of the first set bit.
  */
-static __always_inline unsigned long
-find_first_bit(const unsigned long *addr, unsigned long size)
-{
-	/* Avoid a function call if the bitmap size is a constant */
-	/* and not bigger than BITS_PER_LONG. */
-
-	/* insert a sentinel so that __ffs returns size if there */
-	/* are no set bits in the bitmap */
-	if (__builtin_constant_p(size) && (size < BITS_PER_LONG))
-		return __ffs((*addr) | (1ul << size));
-
-	/* the result of __ffs(0) is undefined, so it needs to be */
-	/* handled separately */
-	if (__builtin_constant_p(size) && (size == BITS_PER_LONG))
-		return ((*addr) == 0) ? BITS_PER_LONG : __ffs(*addr);
-
-	/* size is not constant or too big */
-	return __find_first_bit(addr, size);
-}
-
-extern unsigned long __find_first_zero_bit(const unsigned long *addr,
-		unsigned long size);
+extern unsigned long find_first_bit(const unsigned long *addr,
+				    unsigned long size);
 
 /**
  * find_first_zero_bit - find the first cleared bit in a memory region
@@ -154,31 +132,12 @@
  *
  * Returns the bit number of the first cleared bit.
  */
-static __always_inline unsigned long
-find_first_zero_bit(const unsigned long *addr, unsigned long size)
-{
-	/* Avoid a function call if the bitmap size is a constant */
-	/* and not bigger than BITS_PER_LONG. */
+extern unsigned long find_first_zero_bit(const unsigned long *addr,
+					 unsigned long size);
 
-	/* insert a sentinel so that __ffs returns size if there */
-	/* are no set bits in the bitmap */
-	if (__builtin_constant_p(size) && (size < BITS_PER_LONG)) {
-		return __ffs(~(*addr) | (1ul << size));
-	}
-
-	/* the result of __ffs(0) is undefined, so it needs to be */
-	/* handled separately */
-	if (__builtin_constant_p(size) && (size == BITS_PER_LONG))
-		return (~(*addr) == 0) ? BITS_PER_LONG : __ffs(~(*addr));
-
-	/* size is not constant or too big */
-	return __find_first_zero_bit(addr, size);
-}
 #endif /* CONFIG_GENERIC_FIND_FIRST_BIT */
 
 #ifdef CONFIG_GENERIC_FIND_NEXT_BIT
-extern unsigned long __find_next_bit(const unsigned long *addr,
-		unsigned long size, unsigned long offset);
 
 /**
  * find_next_bit - find the next set bit in a memory region
@@ -186,36 +145,8 @@
  * @offset: The bitnumber to start searching at
  * @size: The bitmap size in bits
  */
-static __always_inline unsigned long
-find_next_bit(const unsigned long *addr, unsigned long size,
-		unsigned long offset)
-{
-	unsigned long value;
-
-	/* Avoid a function call if the bitmap size is a constant */
-	/* and not bigger than BITS_PER_LONG. */
-
-	/* insert a sentinel so that __ffs returns size if there */
-	/* are no set bits in the bitmap */
-	if (__builtin_constant_p(size) && (size < BITS_PER_LONG)) {
-		value = (*addr) & ((~0ul) << offset);
-		value |= (1ul << size);
-		return __ffs(value);
-	}
-
-	/* the result of __ffs(0) is undefined, so it needs to be */
-	/* handled separately */
-	if (__builtin_constant_p(size) && (size == BITS_PER_LONG)) {
-		value = (*addr) & ((~0ul) << offset);
-		return (value == 0) ? BITS_PER_LONG : __ffs(value);
-	}
-
-	/* size is not constant or too big */
-	return __find_next_bit(addr, size, offset);
-}
-
-extern unsigned long __find_next_zero_bit(const unsigned long *addr,
-		unsigned long size, unsigned long offset);
+extern unsigned long find_next_bit(const unsigned long *addr,
+				   unsigned long size, unsigned long offset);
 
 /**
  * find_next_zero_bit - find the next cleared bit in a memory region
@@ -223,33 +154,11 @@
  * @offset: The bitnumber to start searching at
  * @size: The bitmap size in bits
  */
-static __always_inline unsigned long
-find_next_zero_bit(const unsigned long *addr, unsigned long size,
-		unsigned long offset)
-{
-	unsigned long value;
 
-	/* Avoid a function call if the bitmap size is a constant */
-	/* and not bigger than BITS_PER_LONG. */
+extern unsigned long find_next_zero_bit(const unsigned long *addr,
+					unsigned long size,
+					unsigned long offset);
 
-	/* insert a sentinel so that __ffs returns size if there */
-	/* are no set bits in the bitmap */
-	if (__builtin_constant_p(size) && (size < BITS_PER_LONG)) {
-		value = (~(*addr)) & ((~0ul) << offset);
-		value |= (1ul << size);
-		return __ffs(value);
-	}
-
-	/* the result of __ffs(0) is undefined, so it needs to be */
-	/* handled separately */
-	if (__builtin_constant_p(size) && (size == BITS_PER_LONG)) {
-		value = (~(*addr)) & ((~0ul) << offset);
-		return (value == 0) ? BITS_PER_LONG : __ffs(value);
-	}
-
-	/* size is not constant or too big */
-	return __find_next_zero_bit(addr, size, offset);
-}
 #endif /* CONFIG_GENERIC_FIND_NEXT_BIT */
 #endif /* __KERNEL__ */
 #endif
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index c5065e3..c09696a 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -215,8 +215,9 @@
 	/*
 	 * when request is used as a packet command carrier
 	 */
-	unsigned int cmd_len;
-	unsigned char cmd[BLK_MAX_CDB];
+	unsigned short cmd_len;
+	unsigned char __cmd[BLK_MAX_CDB];
+	unsigned char *cmd;
 
 	unsigned int data_len;
 	unsigned int extra_len;	/* length of alignment and padding */
@@ -407,6 +408,31 @@
 #define QUEUE_FLAG_PLUGGED	7	/* queue is plugged */
 #define QUEUE_FLAG_ELVSWITCH	8	/* don't use elevator, just do FIFO */
 #define QUEUE_FLAG_BIDI		9	/* queue supports bidi requests */
+#define QUEUE_FLAG_NOMERGES    10	/* disable merge attempts */
+
+static inline void queue_flag_set_unlocked(unsigned int flag,
+					   struct request_queue *q)
+{
+	__set_bit(flag, &q->queue_flags);
+}
+
+static inline void queue_flag_set(unsigned int flag, struct request_queue *q)
+{
+	WARN_ON_ONCE(!spin_is_locked(q->queue_lock));
+	__set_bit(flag, &q->queue_flags);
+}
+
+static inline void queue_flag_clear_unlocked(unsigned int flag,
+					     struct request_queue *q)
+{
+	__clear_bit(flag, &q->queue_flags);
+}
+
+static inline void queue_flag_clear(unsigned int flag, struct request_queue *q)
+{
+	WARN_ON_ONCE(!spin_is_locked(q->queue_lock));
+	__clear_bit(flag, &q->queue_flags);
+}
 
 enum {
 	/*
@@ -451,6 +477,7 @@
 #define blk_queue_plugged(q)	test_bit(QUEUE_FLAG_PLUGGED, &(q)->queue_flags)
 #define blk_queue_tagged(q)	test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags)
 #define blk_queue_stopped(q)	test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags)
+#define blk_queue_nomerges(q)	test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags)
 #define blk_queue_flushing(q)	((q)->ordseq)
 
 #define blk_fs_request(rq)	((rq)->cmd_type == REQ_TYPE_FS)
@@ -496,17 +523,17 @@
 static inline void blk_set_queue_full(struct request_queue *q, int rw)
 {
 	if (rw == READ)
-		set_bit(QUEUE_FLAG_READFULL, &q->queue_flags);
+		queue_flag_set(QUEUE_FLAG_READFULL, q);
 	else
-		set_bit(QUEUE_FLAG_WRITEFULL, &q->queue_flags);
+		queue_flag_set(QUEUE_FLAG_WRITEFULL, q);
 }
 
 static inline void blk_clear_queue_full(struct request_queue *q, int rw)
 {
 	if (rw == READ)
-		clear_bit(QUEUE_FLAG_READFULL, &q->queue_flags);
+		queue_flag_clear(QUEUE_FLAG_READFULL, q);
 	else
-		clear_bit(QUEUE_FLAG_WRITEFULL, &q->queue_flags);
+		queue_flag_clear(QUEUE_FLAG_WRITEFULL, q);
 }
 
 
@@ -583,6 +610,7 @@
 extern void blk_unregister_queue(struct gendisk *disk);
 extern void register_disk(struct gendisk *dev);
 extern void generic_make_request(struct bio *bio);
+extern void blk_rq_init(struct request_queue *q, struct request *rq);
 extern void blk_put_request(struct request *);
 extern void __blk_put_request(struct request_queue *, struct request *);
 extern void blk_end_sync_rq(struct request *rq, int error);
@@ -626,6 +654,7 @@
 extern void blk_stop_queue(struct request_queue *q);
 extern void blk_sync_queue(struct request_queue *q);
 extern void __blk_stop_queue(struct request_queue *q);
+extern void __blk_run_queue(struct request_queue *);
 extern void blk_run_queue(struct request_queue *);
 extern void blk_start_queueing(struct request_queue *);
 extern int blk_rq_map_user(struct request_queue *, struct request *, void __user *, unsigned long);
diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h
index 932eb02..82aa36c 100644
--- a/include/linux/buffer_head.h
+++ b/include/linux/buffer_head.h
@@ -225,7 +225,6 @@
 				get_block_t get_block);
 void block_sync_page(struct page *);
 sector_t generic_block_bmap(struct address_space *, sector_t, get_block_t *);
-int generic_commit_write(struct file *, struct page *, unsigned, unsigned);
 int block_truncate_page(struct address_space *, loff_t, get_block_t *);
 int file_fsync(struct file *, struct dentry *, int);
 int nobh_write_begin(struct file *, struct address_space *,
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index a6a6035..e155aa7 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -88,6 +88,17 @@
 		__css_put(css);
 }
 
+/* bits in struct cgroup flags field */
+enum {
+	/* Control Group is dead */
+	CGRP_REMOVED,
+	/* Control Group has previously had a child cgroup or a task,
+	 * but no longer (only if CGRP_NOTIFY_ON_RELEASE is set) */
+	CGRP_RELEASABLE,
+	/* Control Group requires release notifications to userspace */
+	CGRP_NOTIFY_ON_RELEASE,
+};
+
 struct cgroup {
 	unsigned long flags;		/* "unsigned long" so bitops work */
 
@@ -139,10 +150,10 @@
 	struct kref ref;
 
 	/*
-	 * List running through all cgroup groups. Protected by
-	 * css_set_lock
+	 * List running through all cgroup groups in the same hash
+	 * slot. Protected by css_set_lock
 	 */
-	struct list_head list;
+	struct hlist_node hlist;
 
 	/*
 	 * List running through all tasks using this cgroup
@@ -163,7 +174,16 @@
 	 * during subsystem registration (at boot time).
 	 */
 	struct cgroup_subsys_state *subsys[CGROUP_SUBSYS_COUNT];
+};
 
+/*
+ * cgroup_map_cb is an abstract callback API for reporting map-valued
+ * control files
+ */
+
+struct cgroup_map_cb {
+	int (*fill)(struct cgroup_map_cb *cb, const char *key, u64 value);
+	void *state;
 };
 
 /* struct cftype:
@@ -190,20 +210,51 @@
 			 struct file *file,
 			 char __user *buf, size_t nbytes, loff_t *ppos);
 	/*
-	 * read_uint() is a shortcut for the common case of returning a
+	 * read_u64() is a shortcut for the common case of returning a
 	 * single integer. Use it in place of read()
 	 */
-	u64 (*read_uint) (struct cgroup *cgrp, struct cftype *cft);
+	u64 (*read_u64) (struct cgroup *cgrp, struct cftype *cft);
+	/*
+	 * read_s64() is a signed version of read_u64()
+	 */
+	s64 (*read_s64) (struct cgroup *cgrp, struct cftype *cft);
+	/*
+	 * read_map() is used for defining a map of key/value
+	 * pairs. It should call cb->fill(cb, key, value) for each
+	 * entry. The key/value pairs (and their ordering) should not
+	 * change between reboots.
+	 */
+	int (*read_map) (struct cgroup *cont, struct cftype *cft,
+			 struct cgroup_map_cb *cb);
+	/*
+	 * read_seq_string() is used for outputting a simple sequence
+	 * using seqfile.
+	 */
+	int (*read_seq_string) (struct cgroup *cont, struct cftype *cft,
+			 struct seq_file *m);
+
 	ssize_t (*write) (struct cgroup *cgrp, struct cftype *cft,
 			  struct file *file,
 			  const char __user *buf, size_t nbytes, loff_t *ppos);
 
 	/*
-	 * write_uint() is a shortcut for the common case of accepting
+	 * write_u64() is a shortcut for the common case of accepting
 	 * a single integer (as parsed by simple_strtoull) from
 	 * userspace. Use in place of write(); return 0 or error.
 	 */
-	int (*write_uint) (struct cgroup *cgrp, struct cftype *cft, u64 val);
+	int (*write_u64) (struct cgroup *cgrp, struct cftype *cft, u64 val);
+	/*
+	 * write_s64() is a signed version of write_u64()
+	 */
+	int (*write_s64) (struct cgroup *cgrp, struct cftype *cft, s64 val);
+
+	/*
+	 * trigger() callback can be used to get some kick from the
+	 * userspace, when the actual string written is not important
+	 * at all. The private field can be used to determine the
+	 * kick type for multiplexing.
+	 */
+	int (*trigger)(struct cgroup *cgrp, unsigned int event);
 
 	int (*release) (struct inode *inode, struct file *file);
 };
@@ -254,6 +305,12 @@
 			struct cgroup *cgrp);
 	void (*post_clone)(struct cgroup_subsys *ss, struct cgroup *cgrp);
 	void (*bind)(struct cgroup_subsys *ss, struct cgroup *root);
+	/*
+	 * This routine is called with the task_lock of mm->owner held
+	 */
+	void (*mm_owner_changed)(struct cgroup_subsys *ss,
+					struct cgroup *old,
+					struct cgroup *new);
 	int subsys_id;
 	int active;
 	int disabled;
@@ -339,4 +396,13 @@
 
 #endif /* !CONFIG_CGROUPS */
 
+#ifdef CONFIG_MM_OWNER
+extern void
+cgroup_mm_owner_callbacks(struct task_struct *old, struct task_struct *new);
+#else /* !CONFIG_MM_OWNER */
+static inline void
+cgroup_mm_owner_callbacks(struct task_struct *old, struct task_struct *new)
+{
+}
+#endif /* CONFIG_MM_OWNER */
 #endif /* _LINUX_CGROUP_H */
diff --git a/include/linux/cgroup_subsys.h b/include/linux/cgroup_subsys.h
index 1ddebfc..e287745 100644
--- a/include/linux/cgroup_subsys.h
+++ b/include/linux/cgroup_subsys.h
@@ -42,3 +42,9 @@
 #endif
 
 /* */
+
+#ifdef CONFIG_CGROUP_DEVICE
+SUBSYS(devices)
+#endif
+
+/* */
diff --git a/include/linux/coda_linux.h b/include/linux/coda_linux.h
index 1c47a34..31b7531 100644
--- a/include/linux/coda_linux.h
+++ b/include/linux/coda_linux.h
@@ -43,9 +43,6 @@
 int coda_setattr(struct dentry *, struct iattr *);
 
 /* this file:  heloers */
-static __inline__ struct CodaFid *coda_i2f(struct inode *);
-static __inline__ char *coda_i2s(struct inode *);
-static __inline__ void coda_flag_inode(struct inode *, int flag);
 char *coda_f2s(struct CodaFid *f);
 int coda_isroot(struct inode *i);
 int coda_iscontrol(const char *name, size_t length);
diff --git a/include/linux/console_struct.h b/include/linux/console_struct.h
index d71f7c0..b03f80a 100644
--- a/include/linux/console_struct.h
+++ b/include/linux/console_struct.h
@@ -53,6 +53,7 @@
 	unsigned short	vc_hi_font_mask;	/* [#] Attribute set for upper 256 chars of font or 0 if not supported */
 	struct console_font vc_font;		/* Current VC font set */
 	unsigned short	vc_video_erase_char;	/* Background erase character */
+	unsigned short	vc_scrl_erase_char;	/* Erase character for scroll */
 	/* VT terminal data */
 	unsigned int	vc_state;		/* Escape sequence parser state */
 	unsigned int	vc_npar,vc_par[NPAR];	/* Parameters of current escape sequence */
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index f212fa9..7464ba3 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -108,7 +108,7 @@
 extern void get_online_cpus(void);
 extern void put_online_cpus(void);
 #define hotcpu_notifier(fn, pri) {				\
-	static struct notifier_block fn##_nb =			\
+	static struct notifier_block fn##_nb __cpuinitdata =	\
 		{ .notifier_call = fn, .priority = pri };	\
 	register_cpu_notifier(&fn##_nb);			\
 }
diff --git a/include/linux/device_cgroup.h b/include/linux/device_cgroup.h
new file mode 100644
index 0000000..0b0d9c3
--- /dev/null
+++ b/include/linux/device_cgroup.h
@@ -0,0 +1,12 @@
+#include <linux/module.h>
+#include <linux/fs.h>
+
+#ifdef CONFIG_CGROUP_DEVICE
+extern int devcgroup_inode_permission(struct inode *inode, int mask);
+extern int devcgroup_inode_mknod(int mode, dev_t dev);
+#else
+static inline int devcgroup_inode_permission(struct inode *inode, int mask)
+{ return 0; }
+static inline int devcgroup_inode_mknod(int mode, dev_t dev)
+{ return 0; }
+#endif
diff --git a/include/linux/dma-attrs.h b/include/linux/dma-attrs.h
new file mode 100644
index 0000000..1677e2b
--- /dev/null
+++ b/include/linux/dma-attrs.h
@@ -0,0 +1,74 @@
+#ifndef _DMA_ATTR_H
+#define _DMA_ATTR_H
+
+#include <linux/bitmap.h>
+#include <linux/bitops.h>
+#include <linux/bug.h>
+
+/**
+ * an enum dma_attr represents an attribute associated with a DMA
+ * mapping. The semantics of each attribute should be defined in
+ * Documentation/DMA-attributes.txt.
+ */
+enum dma_attr {
+	DMA_ATTR_WRITE_BARRIER,
+	DMA_ATTR_MAX,
+};
+
+#define __DMA_ATTRS_LONGS BITS_TO_LONGS(DMA_ATTR_MAX)
+
+/**
+ * struct dma_attrs - an opaque container for DMA attributes
+ * @flags - bitmask representing a collection of enum dma_attr
+ */
+struct dma_attrs {
+	unsigned long flags[__DMA_ATTRS_LONGS];
+};
+
+#define DEFINE_DMA_ATTRS(x) 					\
+	struct dma_attrs x = {					\
+		.flags = { [0 ... __DMA_ATTRS_LONGS-1] = 0 },	\
+	}
+
+static inline void init_dma_attrs(struct dma_attrs *attrs)
+{
+	bitmap_zero(attrs->flags, __DMA_ATTRS_LONGS);
+}
+
+#ifdef CONFIG_HAVE_DMA_ATTRS
+/**
+ * dma_set_attr - set a specific attribute
+ * @attr: attribute to set
+ * @attrs: struct dma_attrs (may be NULL)
+ */
+static inline void dma_set_attr(enum dma_attr attr, struct dma_attrs *attrs)
+{
+	if (attrs == NULL)
+		return;
+	BUG_ON(attr >= DMA_ATTR_MAX);
+	__set_bit(attr, attrs->flags);
+}
+
+/**
+ * dma_get_attr - check for a specific attribute
+ * @attr: attribute to set
+ * @attrs: struct dma_attrs (may be NULL)
+ */
+static inline int dma_get_attr(enum dma_attr attr, struct dma_attrs *attrs)
+{
+	if (attrs == NULL)
+		return 0;
+	BUG_ON(attr >= DMA_ATTR_MAX);
+	return test_bit(attr, attrs->flags);
+}
+#else /* !CONFIG_HAVE_DMA_ATTRS */
+static inline void dma_set_attr(enum dma_attr attr, struct dma_attrs *attrs)
+{
+}
+
+static inline int dma_get_attr(enum dma_attr attr, struct dma_attrs *attrs)
+{
+	return 0;
+}
+#endif /* CONFIG_HAVE_DMA_ATTRS */
+#endif /* _DMA_ATTR_H */
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index 3320307..952e0f8 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -146,4 +146,21 @@
 }
 #endif /* ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY */
 
+#ifndef CONFIG_HAVE_DMA_ATTRS
+struct dma_attrs;
+
+#define dma_map_single_attrs(dev, cpu_addr, size, dir, attrs) \
+	dma_map_single(dev, cpu_addr, size, dir)
+
+#define dma_unmap_single_attrs(dev, dma_addr, size, dir, attrs) \
+	dma_unmap_single(dev, dma_addr, size, dir)
+
+#define dma_map_sg_attrs(dev, sgl, nents, dir, attrs) \
+	dma_map_sg(dev, sgl, nents, dir)
+
+#define dma_unmap_sg_attrs(dev, sgl, nents, dir, attrs) \
+	dma_unmap_sg(dev, sgl, nents, dir)
+
+#endif /* CONFIG_HAVE_DMA_ATTRS */
+
 #endif
diff --git a/include/linux/edac.h b/include/linux/edac.h
index eab451e..7cf92e8 100644
--- a/include/linux/edac.h
+++ b/include/linux/edac.h
@@ -3,7 +3,7 @@
  *
  * Author: Dave Jiang <djiang@mvista.com>
  *
- * 2006-2007 (c) MontaVista Software, Inc. This file is licensed under
+ * 2006-2008 (c) MontaVista Software, Inc. This file is licensed under
  * the terms of the GNU General Public License version 2. This program
  * is licensed "as is" without any warranty of any kind, whether express
  * or implied.
@@ -26,4 +26,16 @@
 extern int edac_handler_set(void);
 extern void edac_atomic_assert_error(void);
 
+static inline void opstate_init(void)
+{
+	switch (edac_op_state) {
+	case EDAC_OPSTATE_POLL:
+	case EDAC_OPSTATE_NMI:
+		break;
+	default:
+		edac_op_state = EDAC_OPSTATE_POLL;
+	}
+	return;
+}
+
 #endif
diff --git a/include/linux/elf.h b/include/linux/elf.h
index bad1b16..ff9fbed 100644
--- a/include/linux/elf.h
+++ b/include/linux/elf.h
@@ -208,7 +208,7 @@
 } Elf32_Ehdr;
 
 typedef struct elf64_hdr {
-  unsigned char	e_ident[16];		/* ELF "magic number" */
+  unsigned char	e_ident[EI_NIDENT];	/* ELF "magic number" */
   Elf64_Half e_type;
   Elf64_Half e_machine;
   Elf64_Word e_version;
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 2c92574..a1ba005 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -1521,7 +1521,6 @@
 	const struct super_operations *ops, unsigned long,
 	struct vfsmount *mnt);
 extern int simple_set_mnt(struct vfsmount *mnt, struct super_block *sb);
-int __put_super(struct super_block *sb);
 int __put_super_and_need_restart(struct super_block *sb);
 void unnamed_dev_init(void);
 
@@ -1965,7 +1964,6 @@
 extern int vfs_lstat_fd(int dfd, char __user *, struct kstat *);
 extern int vfs_fstat(unsigned int, struct kstat *);
 
-extern long vfs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
 extern int do_vfs_ioctl(struct file *filp, unsigned int fd, unsigned int cmd,
 		    unsigned long arg);
 
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index c37653b..b414be3 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -40,9 +40,9 @@
 #define __GFP_FS	((__force gfp_t)0x80u)	/* Can call down to low-level FS? */
 #define __GFP_COLD	((__force gfp_t)0x100u)	/* Cache-cold page required */
 #define __GFP_NOWARN	((__force gfp_t)0x200u)	/* Suppress page allocation failure warning */
-#define __GFP_REPEAT	((__force gfp_t)0x400u)	/* Retry the allocation.  Might fail */
-#define __GFP_NOFAIL	((__force gfp_t)0x800u)	/* Retry for ever.  Cannot fail */
-#define __GFP_NORETRY	((__force gfp_t)0x1000u)/* Do not retry.  Might fail */
+#define __GFP_REPEAT	((__force gfp_t)0x400u)	/* See above */
+#define __GFP_NOFAIL	((__force gfp_t)0x800u)	/* See above */
+#define __GFP_NORETRY	((__force gfp_t)0x1000u)/* See above */
 #define __GFP_COMP	((__force gfp_t)0x4000u)/* Add compound page metadata */
 #define __GFP_ZERO	((__force gfp_t)0x8000u)/* Return zeroed page on success */
 #define __GFP_NOMEMALLOC ((__force gfp_t)0x10000u) /* Don't use emergency reserves */
diff --git a/include/linux/ide.h b/include/linux/ide.h
index 32fd77b..b0135b0 100644
--- a/include/linux/ide.h
+++ b/include/linux/ide.h
@@ -427,6 +427,8 @@
 	void	(*dma_timeout)(struct ide_drive_s *);
 };
 
+struct ide_task_s;
+
 typedef struct hwif_s {
 	struct hwif_s *next;		/* for linked-list in ide_hwgroup_t */
 	struct hwif_s *mate;		/* other hwif from same PCI chip */
@@ -467,24 +469,18 @@
 	const struct ide_port_ops	*port_ops;
 	const struct ide_dma_ops	*dma_ops;
 
-	void (*ata_input_data)(ide_drive_t *, void *, u32);
-	void (*ata_output_data)(ide_drive_t *, void *, u32);
+	void (*tf_load)(ide_drive_t *, struct ide_task_s *);
+	void (*tf_read)(ide_drive_t *, struct ide_task_s *);
 
-	void (*atapi_input_bytes)(ide_drive_t *, void *, u32);
-	void (*atapi_output_bytes)(ide_drive_t *, void *, u32);
+	void (*input_data)(ide_drive_t *, struct request *, void *, unsigned);
+	void (*output_data)(ide_drive_t *, struct request *, void *, unsigned);
 
 	void (*ide_dma_clear_irq)(ide_drive_t *drive);
 
 	void (*OUTB)(u8 addr, unsigned long port);
 	void (*OUTBSYNC)(ide_drive_t *drive, u8 addr, unsigned long port);
-	void (*OUTW)(u16 addr, unsigned long port);
-	void (*OUTSW)(unsigned long port, void *addr, u32 count);
-	void (*OUTSL)(unsigned long port, void *addr, u32 count);
 
 	u8  (*INB)(unsigned long port);
-	u16 (*INW)(unsigned long port);
-	void (*INSW)(unsigned long port, void *addr, u32 count);
-	void (*INSL)(unsigned long port, void *addr, u32 count);
 
 	/* dma physical region descriptor table (cpu view) */
 	unsigned int	*dmatable_cpu;
@@ -509,10 +505,7 @@
 
 	unsigned long	dma_base;	/* base addr for dma ports */
 	unsigned long	dma_command;	/* dma command register */
-	unsigned long	dma_vendor1;	/* dma vendor 1 register */
 	unsigned long	dma_status;	/* dma status register */
-	unsigned long	dma_vendor3;	/* dma vendor 3 register */
-	unsigned long	dma_prdtable;	/* actual prd table address */
 
 	unsigned long	config_data;	/* for use by chipset-specific code */
 	unsigned long	select_data;	/* for use by chipset-specific code */
@@ -547,7 +540,7 @@
 typedef int (ide_expiry_t)(ide_drive_t *);
 
 /* used by ide-cd, ide-floppy, etc. */
-typedef void (xfer_func_t)(ide_drive_t *, void *, u32);
+typedef void (xfer_func_t)(ide_drive_t *, struct request *rq, void *, unsigned);
 
 typedef struct hwgroup_s {
 		/* irq handler, if active */
@@ -829,6 +822,10 @@
 void ide_execute_command(ide_drive_t *, u8, ide_handler_t *, unsigned int,
 			 ide_expiry_t *);
 
+void ide_execute_pkt_cmd(ide_drive_t *);
+
+void ide_pad_transfer(ide_drive_t *, int, int);
+
 ide_startstop_t __ide_error(ide_drive_t *, struct request *, u8, u8);
 
 ide_startstop_t ide_error (ide_drive_t *drive, const char *msg, byte stat);
@@ -965,8 +962,7 @@
 	void			*special;	/* valid_t generally */
 } ide_task_t;
 
-void ide_tf_load(ide_drive_t *, ide_task_t *);
-void ide_tf_read(ide_drive_t *, ide_task_t *);
+void ide_tf_dump(const char *, struct ide_taskfile *);
 
 extern void SELECT_DRIVE(ide_drive_t *);
 extern void SELECT_MASK(ide_drive_t *, int);
@@ -1072,6 +1068,8 @@
 	IDE_HFLAG_NO_DMA		= (1 << 14),
 	/* check if host is PCI IDE device before allowing DMA */
 	IDE_HFLAG_NO_AUTODMA		= (1 << 15),
+	/* host uses MMIO */
+	IDE_HFLAG_MMIO			= (1 << 16),
 	/* host is CS5510/CS5520 */
 	IDE_HFLAG_CS5520		= IDE_HFLAG_VDMA,
 	/* no LBA48 */
@@ -1360,27 +1358,4 @@
 
 	return hwif->INB(hwif->io_ports.error_addr);
 }
-
-/*
- * Too bad. The drive wants to send us data which we are not ready to accept.
- * Just throw it away.
- */
-static inline void ide_atapi_discard_data(ide_drive_t *drive, unsigned bcount)
-{
-	ide_hwif_t *hwif = drive->hwif;
-
-	/* FIXME: use ->atapi_input_bytes */
-	while (bcount--)
-		(void)hwif->INB(hwif->io_ports.data_addr);
-}
-
-static inline void ide_atapi_write_zeros(ide_drive_t *drive, unsigned bcount)
-{
-	ide_hwif_t *hwif = drive->hwif;
-
-	/* FIXME: use ->atapi_output_bytes */
-	while (bcount--)
-		hwif->OUTB(0, hwif->io_ports.data_addr);
-}
-
 #endif /* _IDE_H */
diff --git a/include/linux/idr.h b/include/linux/idr.h
index 0edda41..9a2d762 100644
--- a/include/linux/idr.h
+++ b/include/linux/idr.h
@@ -14,6 +14,7 @@
 
 #include <linux/types.h>
 #include <linux/bitops.h>
+#include <linux/init.h>
 
 #if BITS_PER_LONG == 32
 # define IDR_BITS 5
@@ -115,4 +116,6 @@
 void ida_destroy(struct ida *ida);
 void ida_init(struct ida *ida);
 
+void __init idr_init_cache(void);
+
 #endif /* __IDR_H__ */
diff --git a/include/linux/init.h b/include/linux/init.h
index fb58c04..21d658c 100644
--- a/include/linux/init.h
+++ b/include/linux/init.h
@@ -147,6 +147,8 @@
 void setup_arch(char **);
 void prepare_namespace(void);
 
+extern void (*late_time_init)(void);
+
 #endif
   
 #ifndef MODULE
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index b5fef13..f1fc747 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -289,6 +289,7 @@
 };
 
 asmlinkage void do_softirq(void);
+asmlinkage void __do_softirq(void);
 extern void open_softirq(int nr, void (*action)(struct softirq_action*), void *data);
 extern void softirq_init(void);
 #define __raise_softirq_irqoff(nr) do { or_softirq_pending(1UL << (nr)); } while (0)
diff --git a/include/linux/ipc_namespace.h b/include/linux/ipc_namespace.h
index e4451d1..ea6c18a 100644
--- a/include/linux/ipc_namespace.h
+++ b/include/linux/ipc_namespace.h
@@ -4,6 +4,17 @@
 #include <linux/err.h>
 #include <linux/idr.h>
 #include <linux/rwsem.h>
+#include <linux/notifier.h>
+
+/*
+ * ipc namespace events
+ */
+#define IPCNS_MEMCHANGED   0x00000001   /* Notify lowmem size changed */
+#define IPCNS_CREATED  0x00000002   /* Notify new ipc namespace created */
+#define IPCNS_REMOVED  0x00000003   /* Notify ipc namespace removed */
+
+#define IPCNS_CALLBACK_PRI 0
+
 
 struct ipc_ids {
 	int in_use;
@@ -30,15 +41,24 @@
 	size_t		shm_ctlall;
 	int		shm_ctlmni;
 	int		shm_tot;
+
+	struct notifier_block ipcns_nb;
 };
 
 extern struct ipc_namespace init_ipc_ns;
+extern atomic_t nr_ipc_ns;
 
 #ifdef CONFIG_SYSVIPC
 #define INIT_IPC_NS(ns)		.ns		= &init_ipc_ns,
-#else
+
+extern int register_ipcns_notifier(struct ipc_namespace *);
+extern int cond_register_ipcns_notifier(struct ipc_namespace *);
+extern int unregister_ipcns_notifier(struct ipc_namespace *);
+extern int ipcns_notify(unsigned long);
+
+#else /* CONFIG_SYSVIPC */
 #define INIT_IPC_NS(ns)
-#endif
+#endif /* CONFIG_SYSVIPC */
 
 #if defined(CONFIG_SYSVIPC) && defined(CONFIG_IPC_NS)
 extern void free_ipc_ns(struct kref *kref);
diff --git a/include/linux/ipmi.h b/include/linux/ipmi.h
index c5bd28b..7ebdb4f 100644
--- a/include/linux/ipmi.h
+++ b/include/linux/ipmi.h
@@ -64,7 +64,7 @@
  * applications and another for userland applications.  The
  * capabilities are basically the same for both interface, although
  * the interfaces are somewhat different.  The stuff in the
- * #ifdef KERNEL below is the in-kernel interface.  The userland
+ * #ifdef __KERNEL__ below is the in-kernel interface.  The userland
  * interface is defined later in the file.  */
 
 
@@ -75,8 +75,7 @@
  * work for sockets.
  */
 #define IPMI_MAX_ADDR_SIZE 32
-struct ipmi_addr
-{
+struct ipmi_addr {
 	 /* Try to take these from the "Channel Medium Type" table
 	    in section 6.5 of the IPMI 1.5 manual. */
 	int   addr_type;
@@ -90,8 +89,7 @@
  * 0), or IPMC_BMC_CHANNEL if communicating directly with the BMC.
  */
 #define IPMI_SYSTEM_INTERFACE_ADDR_TYPE	0x0c
-struct ipmi_system_interface_addr
-{
+struct ipmi_system_interface_addr {
 	int           addr_type;
 	short         channel;
 	unsigned char lun;
@@ -100,10 +98,9 @@
 /* An IPMB Address. */
 #define IPMI_IPMB_ADDR_TYPE		0x01
 /* Used for broadcast get device id as described in section 17.9 of the
-   IPMI 1.5 manual. */ 
+   IPMI 1.5 manual. */
 #define IPMI_IPMB_BROADCAST_ADDR_TYPE	0x41
-struct ipmi_ipmb_addr
-{
+struct ipmi_ipmb_addr {
 	int           addr_type;
 	short         channel;
 	unsigned char slave_addr;
@@ -128,8 +125,7 @@
  * message is a little weird, but this is required.
  */
 #define IPMI_LAN_ADDR_TYPE		0x04
-struct ipmi_lan_addr
-{
+struct ipmi_lan_addr {
 	int           addr_type;
 	short         channel;
 	unsigned char privilege;
@@ -162,16 +158,14 @@
  * byte of data in the response (as the spec shows the messages laid
  * out).
  */
-struct ipmi_msg
-{
+struct ipmi_msg {
 	unsigned char  netfn;
 	unsigned char  cmd;
 	unsigned short data_len;
 	unsigned char  __user *data;
 };
 
-struct kernel_ipmi_msg
-{
+struct kernel_ipmi_msg {
 	unsigned char  netfn;
 	unsigned char  cmd;
 	unsigned short data_len;
@@ -239,12 +233,11 @@
  * used after the message is delivered, so the upper layer may use the
  * link to build a linked list, if it likes.
  */
-struct ipmi_recv_msg
-{
+struct ipmi_recv_msg {
 	struct list_head link;
 
 	/* The type of message as defined in the "Receive Types"
-           defines above. */
+	   defines above. */
 	int              recv_type;
 
 	ipmi_user_t      user;
@@ -271,9 +264,8 @@
 /* Allocate and free the receive message. */
 void ipmi_free_recv_msg(struct ipmi_recv_msg *msg);
 
-struct ipmi_user_hndl
-{
-        /* Routine type to call when a message needs to be routed to
+struct ipmi_user_hndl {
+	/* Routine type to call when a message needs to be routed to
 	   the upper layer.  This will be called with some locks held,
 	   the only IPMI routines that can be called are ipmi_request
 	   and the alloc/free operations.  The handler_data is the
@@ -368,9 +360,8 @@
  * Poll the IPMI interface for the user.  This causes the IPMI code to
  * do an immediate check for information from the driver and handle
  * anything that is immediately pending.  This will not block in any
- * way.  This is useful if you need to implement polling from the user
- * for things like modifying the watchdog timeout when a panic occurs
- * or disabling the watchdog timer on a reboot.
+ * way.  This is useful if you need to spin waiting for something to
+ * happen in the IPMI driver.
  */
 void ipmi_poll_interface(ipmi_user_t user);
 
@@ -422,12 +413,6 @@
 int ipmi_set_maintenance_mode(ipmi_user_t user, int mode);
 
 /*
- * Allow run-to-completion mode to be set for the interface of
- * a specific user.
- */
-void ipmi_user_set_run_to_completion(ipmi_user_t user, int val);
-
-/*
  * When the user is created, it will not receive IPMI events by
  * default.  The user must set this to TRUE to get incoming events.
  * The first user that sets this to TRUE will receive all events that
@@ -440,8 +425,7 @@
  * every existing interface when a new watcher is registered with
  * ipmi_smi_watcher_register().
  */
-struct ipmi_smi_watcher
-{
+struct ipmi_smi_watcher {
 	struct list_head link;
 
 	/* You must set the owner to the current module, if you are in
@@ -512,8 +496,7 @@
 
 
 /* Messages sent to the interface are this format. */
-struct ipmi_req
-{
+struct ipmi_req {
 	unsigned char __user *addr; /* Address to send the message to. */
 	unsigned int  addr_len;
 
@@ -538,12 +521,11 @@
 
 /* Messages sent to the interface with timing parameters are this
    format. */
-struct ipmi_req_settime
-{
+struct ipmi_req_settime {
 	struct ipmi_req req;
 
 	/* See ipmi_request_settime() above for details on these
-           values. */
+	   values. */
 	int          retries;
 	unsigned int retry_time_ms;
 };
@@ -560,8 +542,7 @@
 					     struct ipmi_req_settime)
 
 /* Messages received from the interface are this format. */
-struct ipmi_recv
-{
+struct ipmi_recv {
 	int     recv_type; /* Is this a command, response or an
 			      asyncronous event. */
 
@@ -607,13 +588,12 @@
 					      struct ipmi_recv)
 
 /* Register to get commands from other entities on this interface. */
-struct ipmi_cmdspec
-{
+struct ipmi_cmdspec {
 	unsigned char netfn;
 	unsigned char cmd;
 };
 
-/* 
+/*
  * Register to receive a specific command.  error values:
  *   - EFAULT - an address supplied was invalid.
  *   - EBUSY - The netfn/cmd supplied was already in use.
@@ -636,8 +616,7 @@
  * else.  The chans field is a bitmask, (1 << channel) for each channel.
  * It may be IPMI_CHAN_ALL for all channels.
  */
-struct ipmi_cmdspec_chans
-{
+struct ipmi_cmdspec_chans {
 	unsigned int netfn;
 	unsigned int cmd;
 	unsigned int chans;
@@ -659,7 +638,7 @@
 #define IPMICTL_UNREGISTER_FOR_CMD_CHANS _IOR(IPMI_IOC_MAGIC, 29,	\
 					     struct ipmi_cmdspec_chans)
 
-/* 
+/*
  * Set whether this interface receives events.  Note that the first
  * user registered for events will get all pending events for the
  * interface.  error values:
@@ -675,15 +654,18 @@
  * things it takes to determine your address (if not the BMC) and set
  * it for everyone else.  You should probably leave the LUN alone.
  */
-struct ipmi_channel_lun_address_set
-{
+struct ipmi_channel_lun_address_set {
 	unsigned short channel;
 	unsigned char  value;
 };
-#define IPMICTL_SET_MY_CHANNEL_ADDRESS_CMD _IOR(IPMI_IOC_MAGIC, 24, struct ipmi_channel_lun_address_set)
-#define IPMICTL_GET_MY_CHANNEL_ADDRESS_CMD _IOR(IPMI_IOC_MAGIC, 25, struct ipmi_channel_lun_address_set)
-#define IPMICTL_SET_MY_CHANNEL_LUN_CMD	   _IOR(IPMI_IOC_MAGIC, 26, struct ipmi_channel_lun_address_set)
-#define IPMICTL_GET_MY_CHANNEL_LUN_CMD	   _IOR(IPMI_IOC_MAGIC, 27, struct ipmi_channel_lun_address_set)
+#define IPMICTL_SET_MY_CHANNEL_ADDRESS_CMD \
+	_IOR(IPMI_IOC_MAGIC, 24, struct ipmi_channel_lun_address_set)
+#define IPMICTL_GET_MY_CHANNEL_ADDRESS_CMD \
+	_IOR(IPMI_IOC_MAGIC, 25, struct ipmi_channel_lun_address_set)
+#define IPMICTL_SET_MY_CHANNEL_LUN_CMD \
+	_IOR(IPMI_IOC_MAGIC, 26, struct ipmi_channel_lun_address_set)
+#define IPMICTL_GET_MY_CHANNEL_LUN_CMD \
+	_IOR(IPMI_IOC_MAGIC, 27, struct ipmi_channel_lun_address_set)
 /* Legacy interfaces, these only set IPMB 0. */
 #define IPMICTL_SET_MY_ADDRESS_CMD	_IOR(IPMI_IOC_MAGIC, 17, unsigned int)
 #define IPMICTL_GET_MY_ADDRESS_CMD	_IOR(IPMI_IOC_MAGIC, 18, unsigned int)
@@ -694,8 +676,7 @@
  * Get/set the default timing values for an interface.  You shouldn't
  * generally mess with these.
  */
-struct ipmi_timing_parms
-{
+struct ipmi_timing_parms {
 	int          retries;
 	unsigned int retry_time_ms;
 };
diff --git a/include/linux/ipmi_smi.h b/include/linux/ipmi_smi.h
index 6e8cec5..62b7366 100644
--- a/include/linux/ipmi_smi.h
+++ b/include/linux/ipmi_smi.h
@@ -60,8 +60,7 @@
  * asynchronous data and messages and request them from the
  * interface.
  */
-struct ipmi_smi_msg
-{
+struct ipmi_smi_msg {
 	struct list_head link;
 
 	long    msgid;
@@ -74,12 +73,11 @@
 	unsigned char rsp[IPMI_MAX_MSG_LENGTH];
 
 	/* Will be called when the system is done with the message
-           (presumably to free it). */
+	   (presumably to free it). */
 	void (*done)(struct ipmi_smi_msg *msg);
 };
 
-struct ipmi_smi_handlers
-{
+struct ipmi_smi_handlers {
 	struct module *owner;
 
 	/* The low-level interface cannot start sending messages to
@@ -231,7 +229,7 @@
    directory for this interface.  Note that the entry will
    automatically be dstroyed when the interface is destroyed. */
 int ipmi_smi_add_proc_entry(ipmi_smi_t smi, char *name,
-			    read_proc_t *read_proc, write_proc_t *write_proc,
+			    read_proc_t *read_proc,
 			    void *data, struct module *owner);
 
 #endif /* __LINUX_IPMI_SMI_H */
diff --git a/include/linux/kbuild.h b/include/linux/kbuild.h
new file mode 100644
index 0000000..22a7219
--- /dev/null
+++ b/include/linux/kbuild.h
@@ -0,0 +1,15 @@
+#ifndef __LINUX_KBUILD_H
+#define __LINUX_KBUILD_H
+
+#define DEFINE(sym, val) \
+        asm volatile("\n->" #sym " %0 " #val : : "i" (val))
+
+#define BLANK() asm volatile("\n->" : : )
+
+#define OFFSET(sym, str, mem) \
+	DEFINE(sym, offsetof(struct str, mem))
+
+#define COMMENT(x) \
+	asm volatile("\n->#" x)
+
+#endif
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index cd6d02c..53839ba 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -20,6 +20,9 @@
 extern const char linux_banner[];
 extern const char linux_proc_banner[];
 
+#define USHORT_MAX	((u16)(~0U))
+#define SHORT_MAX	((s16)(USHORT_MAX>>1))
+#define SHORT_MIN	(-SHORT_MAX - 1)
 #define INT_MAX		((int)(~0U>>1))
 #define INT_MIN		(-INT_MAX - 1)
 #define UINT_MAX	(~0U)
@@ -188,6 +191,7 @@
 extern int printk_ratelimit_jiffies;
 extern int printk_ratelimit_burst;
 extern int printk_ratelimit(void);
+extern int __ratelimit(int ratelimit_jiffies, int ratelimit_burst);
 extern int __printk_ratelimit(int ratelimit_jiffies, int ratelimit_burst);
 extern bool printk_timed_ratelimit(unsigned long *caller_jiffies,
 				   unsigned int interval_msec);
@@ -255,6 +259,7 @@
 #define TAINT_USER			(1<<6)
 #define TAINT_DIE			(1<<7)
 #define TAINT_OVERRIDDEN_ACPI_TABLE	(1<<8)
+#define TAINT_WARN			(1<<9)
 
 extern void dump_stack(void) __cold;
 
diff --git a/include/linux/key.h b/include/linux/key.h
index a70b8a8..c45c962 100644
--- a/include/linux/key.h
+++ b/include/linux/key.h
@@ -19,6 +19,7 @@
 #include <linux/list.h>
 #include <linux/rbtree.h>
 #include <linux/rcupdate.h>
+#include <linux/sysctl.h>
 #include <asm/atomic.h>
 
 #ifdef __KERNEL__
@@ -67,6 +68,8 @@
 #define KEY_OTH_SETATTR	0x00000020
 #define KEY_OTH_ALL	0x0000003f
 
+#define KEY_PERM_UNDEF	0xffffffff
+
 struct seq_file;
 struct user_struct;
 struct signal_struct;
@@ -208,16 +211,19 @@
 
 extern struct key *request_key_with_auxdata(struct key_type *type,
 					    const char *description,
-					    const char *callout_info,
+					    const void *callout_info,
+					    size_t callout_len,
 					    void *aux);
 
 extern struct key *request_key_async(struct key_type *type,
 				     const char *description,
-				     const char *callout_info);
+				     const void *callout_info,
+				     size_t callout_len);
 
 extern struct key *request_key_async_with_auxdata(struct key_type *type,
 						  const char *description,
-						  const char *callout_info,
+						  const void *callout_info,
+						  size_t callout_len,
 						  void *aux);
 
 extern int wait_for_key_construction(struct key *key, bool intr);
@@ -229,6 +235,7 @@
 				      const char *description,
 				      const void *payload,
 				      size_t plen,
+				      key_perm_t perm,
 				      unsigned long flags);
 
 extern int key_update(key_ref_t key,
@@ -257,14 +264,18 @@
 
 extern struct key *key_lookup(key_serial_t id);
 
-#define key_serial(key) ((key) ? (key)->serial : 0)
+static inline key_serial_t key_serial(struct key *key)
+{
+	return key ? key->serial : 0;
+}
+
+#ifdef CONFIG_SYSCTL
+extern ctl_table key_sysctls[];
+#endif
 
 /*
  * the userspace interface
  */
-extern struct key root_user_keyring, root_session_keyring;
-extern int alloc_uid_keyring(struct user_struct *user,
-			     struct task_struct *ctx);
 extern void switch_uid_keyring(struct user_struct *new_user);
 extern int copy_keys(unsigned long clone_flags, struct task_struct *tsk);
 extern int copy_thread_group_keys(struct task_struct *tsk);
@@ -293,7 +304,6 @@
 #define make_key_ref(k, p)			({ NULL; })
 #define key_ref_to_ptr(k)		({ NULL; })
 #define is_key_possessed(k)		0
-#define alloc_uid_keyring(u,c)		0
 #define switch_uid_keyring(u)		do { } while(0)
 #define __install_session_keyring(t, k)	({ NULL; })
 #define copy_keys(f,t)			0
@@ -306,10 +316,6 @@
 #define key_fsgid_changed(t)		do { } while(0)
 #define key_init()			do { } while(0)
 
-/* Initial keyrings */
-extern struct key root_user_keyring;
-extern struct key root_session_keyring;
-
 #endif /* CONFIG_KEYS */
 #endif /* __KERNEL__ */
 #endif /* _LINUX_KEY_H */
diff --git a/include/linux/keyctl.h b/include/linux/keyctl.h
index 3365945..656ee6b 100644
--- a/include/linux/keyctl.h
+++ b/include/linux/keyctl.h
@@ -49,5 +49,6 @@
 #define KEYCTL_SET_REQKEY_KEYRING	14	/* set default request-key keyring */
 #define KEYCTL_SET_TIMEOUT		15	/* set key timeout */
 #define KEYCTL_ASSUME_AUTHORITY		16	/* assume request_key() authorisation */
+#define KEYCTL_GET_SECURITY		17	/* get key security label */
 
 #endif /*  _LINUX_KEYCTL_H */
diff --git a/include/linux/list.h b/include/linux/list.h
index b4a939b..7627508 100644
--- a/include/linux/list.h
+++ b/include/linux/list.h
@@ -328,7 +328,7 @@
 	return !list_empty(head) && (head->next == head->prev);
 }
 
-static inline void __list_splice(struct list_head *list,
+static inline void __list_splice(const struct list_head *list,
 				 struct list_head *head)
 {
 	struct list_head *first = list->next;
@@ -347,7 +347,8 @@
  * @list: the new list to add.
  * @head: the place to add it in the first list.
  */
-static inline void list_splice(struct list_head *list, struct list_head *head)
+static inline void list_splice(const struct list_head *list,
+				struct list_head *head)
 {
 	if (!list_empty(list))
 		__list_splice(list, head);
diff --git a/include/linux/mca-legacy.h b/include/linux/mca-legacy.h
index f2bb770..7a3aea8 100644
--- a/include/linux/mca-legacy.h
+++ b/include/linux/mca-legacy.h
@@ -34,7 +34,6 @@
 extern int mca_find_adapter(int id, int start);
 extern int mca_find_unused_adapter(int id, int start);
 
-extern int mca_is_adapter_used(int slot);
 extern int mca_mark_as_used(int slot);
 extern void mca_mark_as_unused(int slot);
 
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 8b1c429..e660877 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -27,9 +27,6 @@
 
 #ifdef CONFIG_CGROUP_MEM_RES_CTLR
 
-extern void mm_init_cgroup(struct mm_struct *mm, struct task_struct *p);
-extern void mm_free_cgroup(struct mm_struct *mm);
-
 #define page_reset_bad_cgroup(page)	((page)->page_cgroup = 0)
 
 extern struct page_cgroup *page_get_page_cgroup(struct page *page);
@@ -48,8 +45,10 @@
 extern void mem_cgroup_out_of_memory(struct mem_cgroup *mem, gfp_t gfp_mask);
 int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem);
 
+extern struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p);
+
 #define mm_match_cgroup(mm, cgroup)	\
-	((cgroup) == rcu_dereference((mm)->mem_cgroup))
+	((cgroup) == mem_cgroup_from_task((mm)->owner))
 
 extern int mem_cgroup_prepare_migration(struct page *page);
 extern void mem_cgroup_end_migration(struct page *page);
@@ -73,15 +72,6 @@
 				struct zone *zone, int priority);
 
 #else /* CONFIG_CGROUP_MEM_RES_CTLR */
-static inline void mm_init_cgroup(struct mm_struct *mm,
-					struct task_struct *p)
-{
-}
-
-static inline void mm_free_cgroup(struct mm_struct *mm)
-{
-}
-
 static inline void page_reset_bad_cgroup(struct page *page)
 {
 }
diff --git a/include/linux/memory.h b/include/linux/memory.h
index f80e0e3..2f5f8a5 100644
--- a/include/linux/memory.h
+++ b/include/linux/memory.h
@@ -53,6 +53,13 @@
 struct notifier_block;
 struct mem_section;
 
+/*
+ * Priorities for the hotplug memory callback routines (stored in decreasing
+ * order in the callback chain)
+ */
+#define SLAB_CALLBACK_PRI       1
+#define IPC_CALLBACK_PRI        10
+
 #ifndef CONFIG_MEMORY_HOTPLUG_SPARSE
 static inline int memory_dev_init(void)
 {
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 8b7f4a5..c31a9cd 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1066,6 +1066,19 @@
 extern struct vm_area_struct *copy_vma(struct vm_area_struct **,
 	unsigned long addr, unsigned long len, pgoff_t pgoff);
 extern void exit_mmap(struct mm_struct *);
+
+#ifdef CONFIG_PROC_FS
+/* From fs/proc/base.c. callers must _not_ hold the mm's exe_file_lock */
+extern void added_exe_file_vma(struct mm_struct *mm);
+extern void removed_exe_file_vma(struct mm_struct *mm);
+#else
+static inline void added_exe_file_vma(struct mm_struct *mm)
+{}
+
+static inline void removed_exe_file_vma(struct mm_struct *mm)
+{}
+#endif /* CONFIG_PROC_FS */
+
 extern int may_expand_vm(struct mm_struct *mm, unsigned long npages);
 extern int install_special_mapping(struct mm_struct *mm,
 				   unsigned long addr, unsigned long len,
@@ -1230,8 +1243,6 @@
 					void __user *, size_t *, loff_t *);
 unsigned long shrink_slab(unsigned long scanned, gfp_t gfp_mask,
 			unsigned long lru_pages);
-void drop_pagecache(void);
-void drop_slab(void);
 
 #ifndef CONFIG_MMU
 #define randomize_va_space 0
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 29adaa7..eb7c16c 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -42,7 +42,10 @@
 					 * to show when page is mapped
 					 * & limit reverse map searches.
 					 */
-		unsigned int inuse;	/* SLUB: Nr of objects */
+		struct {		/* SLUB */
+			u16 inuse;
+			u16 objects;
+		};
 	};
 	union {
 	    struct {
@@ -222,8 +225,15 @@
 	/* aio bits */
 	rwlock_t		ioctx_list_lock;	/* aio lock */
 	struct kioctx		*ioctx_list;
-#ifdef CONFIG_CGROUP_MEM_RES_CTLR
-	struct mem_cgroup *mem_cgroup;
+#ifdef CONFIG_MM_OWNER
+	struct task_struct *owner;	/* The thread group leader that */
+					/* owns the mm_struct.		*/
+#endif
+
+#ifdef CONFIG_PROC_FS
+	/* store ref to file /proc/<pid>/exe symlink points to */
+	struct file *exe_file;
+	unsigned long num_exe_file_vmas;
 #endif
 };
 
diff --git a/include/linux/msg.h b/include/linux/msg.h
index 10a3d5a..6f3b8e7 100644
--- a/include/linux/msg.h
+++ b/include/linux/msg.h
@@ -49,16 +49,26 @@
 	unsigned short  msgseg; 
 };
 
+/*
+ * Scaling factor to compute msgmni:
+ * the memory dedicated to msg queues (msgmni * msgmnb) should occupy
+ * at most 1/MSG_MEM_SCALE of the lowmem (see the formula in ipc/msg.c):
+ * up to 8MB       : msgmni = 16 (MSGMNI)
+ * 4 GB            : msgmni = 8K
+ * more than 16 GB : msgmni = 32K (IPCMNI)
+ */
+#define MSG_MEM_SCALE 32
+
 #define MSGMNI    16   /* <= IPCMNI */     /* max # of msg queue identifiers */
 #define MSGMAX  8192   /* <= INT_MAX */   /* max size of message (bytes) */
 #define MSGMNB 16384   /* <= INT_MAX */   /* default max size of a message queue */
 
 /* unused */
-#define MSGPOOL (MSGMNI*MSGMNB/1024)  /* size in kilobytes of message pool */
+#define MSGPOOL (MSGMNI * MSGMNB) /* size in bytes of message pool */
 #define MSGTQL  MSGMNB            /* number of system message headers */
 #define MSGMAP  MSGMNB            /* number of entries in message map */
 #define MSGSSZ  16                /* message segment size */
-#define __MSGSEG ((MSGPOOL*1024)/ MSGSSZ) /* max no. of segments */
+#define __MSGSEG (MSGPOOL / MSGSSZ) /* max no. of segments */
 #define MSGSEG (__MSGSEG <= 0xffff ? __MSGSEG : 0xffff)
 
 #ifdef __KERNEL__
diff --git a/include/linux/nbd.h b/include/linux/nbd.h
index 9865720..155719d 100644
--- a/include/linux/nbd.h
+++ b/include/linux/nbd.h
@@ -56,9 +56,11 @@
 	int magic;
 
 	spinlock_t queue_lock;
-	struct list_head queue_head;/* Requests are added here...	*/
+	struct list_head queue_head;	/* Requests waiting result */
 	struct request *active_req;
 	wait_queue_head_t active_wq;
+	struct list_head waiting_queue;	/* Requests to be sent */
+	wait_queue_head_t waiting_wq;
 
 	struct mutex tx_lock;
 	struct gendisk *disk;
@@ -86,11 +88,7 @@
 	char handle[8];
 	__be64 from;
 	__be32 len;
-}
-#ifdef __GNUC__
-	__attribute__ ((packed))
-#endif
-;
+} __attribute__ ((packed));
 
 /*
  * This is the reply packet that nbd-server sends back to the client after
diff --git a/include/linux/notifier.h b/include/linux/notifier.h
index 20dfed5..0ff6224 100644
--- a/include/linux/notifier.h
+++ b/include/linux/notifier.h
@@ -121,6 +121,10 @@
 extern int srcu_notifier_chain_register(struct srcu_notifier_head *nh,
 		struct notifier_block *nb);
 
+extern int blocking_notifier_chain_cond_register(
+		struct blocking_notifier_head *nh,
+		struct notifier_block *nb);
+
 extern int atomic_notifier_chain_unregister(struct atomic_notifier_head *nh,
 		struct notifier_block *nb);
 extern int blocking_notifier_chain_unregister(struct blocking_notifier_head *nh,
diff --git a/include/linux/percpu.h b/include/linux/percpu.h
index 1ac9697..d746a2a 100644
--- a/include/linux/percpu.h
+++ b/include/linux/percpu.h
@@ -4,7 +4,6 @@
 #include <linux/preempt.h>
 #include <linux/slab.h> /* For kmalloc() */
 #include <linux/smp.h>
-#include <linux/string.h> /* For memset() */
 #include <linux/cpumask.h>
 
 #include <asm/percpu.h>
diff --git a/include/linux/personality.h b/include/linux/personality.h
index 012cd55..a84e9ff 100644
--- a/include/linux/personality.h
+++ b/include/linux/personality.h
@@ -105,10 +105,6 @@
  */
 #define personality(pers)	(pers & PER_MASK)
 
-/*
- * Personality of the currently running process.
- */
-#define get_personality		(current->personality)
 
 /*
  * Change personality of the currently running process.
diff --git a/include/linux/phantom.h b/include/linux/phantom.h
index 96f4048..02268c5 100644
--- a/include/linux/phantom.h
+++ b/include/linux/phantom.h
@@ -27,14 +27,17 @@
 
 #define PH_IOC_MAGIC		'p'
 #define PHN_GET_REG		_IOWR(PH_IOC_MAGIC, 0, struct phm_reg *)
-#define PHN_SET_REG		_IOW (PH_IOC_MAGIC, 1, struct phm_reg *)
+#define PHN_SET_REG		_IOW(PH_IOC_MAGIC, 1, struct phm_reg *)
 #define PHN_GET_REGS		_IOWR(PH_IOC_MAGIC, 2, struct phm_regs *)
-#define PHN_SET_REGS		_IOW (PH_IOC_MAGIC, 3, struct phm_regs *)
+#define PHN_SET_REGS		_IOW(PH_IOC_MAGIC, 3, struct phm_regs *)
 /* this ioctl tells the driver, that the caller is not OpenHaptics and might
  * use improved registers update (no more phantom switchoffs when using
  * libphantom) */
-#define PHN_NOT_OH		_IO  (PH_IOC_MAGIC, 4)
-#define PH_IOC_MAXNR		4
+#define PHN_NOT_OH		_IO(PH_IOC_MAGIC, 4)
+#define PHN_GETREG		_IOWR(PH_IOC_MAGIC, 5, struct phm_reg)
+#define PHN_SETREG		_IOW(PH_IOC_MAGIC, 6, struct phm_reg)
+#define PHN_GETREGS		_IOWR(PH_IOC_MAGIC, 7, struct phm_regs)
+#define PHN_SETREGS		_IOW(PH_IOC_MAGIC, 8, struct phm_regs)
 
 #define PHN_CONTROL		0x6     /* control byte in iaddr space */
 #define PHN_CTL_AMP		0x1     /*   switch after torques change */
diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
index 9b6c935..9883bc9 100644
--- a/include/linux/proc_fs.h
+++ b/include/linux/proc_fs.h
@@ -9,7 +9,6 @@
 
 struct net;
 struct completion;
-
 /*
  * The proc filesystem constants/structures
  */
@@ -41,7 +40,7 @@
  * /proc file has a parent, but "subdir" is NULL for all
  * non-directory entries).
  *
- * "get_info" is called at "read", while "owner" is used to protect module
+ * "owner" is used to protect module
  * from unloading while proc_dir_entry is in use
  */
 
@@ -49,7 +48,6 @@
 			  int count, int *eof, void *data);
 typedef	int (write_proc_t)(struct file *file, const char __user *buffer,
 			   unsigned long count, void *data);
-typedef int (get_info_t)(char *, char **, off_t, int);
 
 struct proc_dir_entry {
 	unsigned int low_ino;
@@ -70,7 +68,6 @@
 	 * somewhere.
 	 */
 	const struct file_operations *proc_fops;
-	get_info_t *get_info;
 	struct module *owner;
 	struct proc_dir_entry *next, *parent, *subdir;
 	void *data;
@@ -97,10 +94,6 @@
 
 #ifdef CONFIG_PROC_FS
 
-extern struct proc_dir_entry proc_root;
-extern struct proc_dir_entry *proc_root_fs;
-extern struct proc_dir_entry *proc_bus;
-extern struct proc_dir_entry *proc_root_driver;
 extern struct proc_dir_entry *proc_root_kcore;
 
 extern spinlock_t proc_subdir_lock;
@@ -123,9 +116,10 @@
 
 extern struct proc_dir_entry *create_proc_entry(const char *name, mode_t mode,
 						struct proc_dir_entry *parent);
-struct proc_dir_entry *proc_create(const char *name, mode_t mode,
+struct proc_dir_entry *proc_create_data(const char *name, mode_t mode,
 				struct proc_dir_entry *parent,
-				const struct file_operations *proc_fops);
+				const struct file_operations *proc_fops,
+				void *data);
 extern void remove_proc_entry(const char *name, struct proc_dir_entry *parent);
 
 extern struct vfsmount *proc_mnt;
@@ -180,6 +174,12 @@
 extern struct proc_dir_entry *proc_mkdir_mode(const char *name, mode_t mode,
 			struct proc_dir_entry *parent);
 
+static inline struct proc_dir_entry *proc_create(const char *name, mode_t mode,
+	struct proc_dir_entry *parent, const struct file_operations *proc_fops)
+{
+	return proc_create_data(name, mode, parent, proc_fops, NULL);
+}
+
 static inline struct proc_dir_entry *create_proc_read_entry(const char *name,
 	mode_t mode, struct proc_dir_entry *base, 
 	read_proc_t *read_proc, void * data)
@@ -192,24 +192,19 @@
 	return res;
 }
  
-static inline struct proc_dir_entry *create_proc_info_entry(const char *name,
-	mode_t mode, struct proc_dir_entry *base, get_info_t *get_info)
-{
-	struct proc_dir_entry *res=create_proc_entry(name,mode,base);
-	if (res) res->get_info=get_info;
-	return res;
-}
-
 extern struct proc_dir_entry *proc_net_fops_create(struct net *net,
 	const char *name, mode_t mode, const struct file_operations *fops);
 extern void proc_net_remove(struct net *net, const char *name);
 extern struct proc_dir_entry *proc_net_mkdir(struct net *net, const char *name,
 	struct proc_dir_entry *parent);
 
-#else
+/* While the {get|set|dup}_mm_exe_file functions are for mm_structs, they are
+ * only needed to implement /proc/<pid>|self/exe so we define them here. */
+extern void set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file);
+extern struct file *get_mm_exe_file(struct mm_struct *mm);
+extern void dup_mm_exe_file(struct mm_struct *oldmm, struct mm_struct *newmm);
 
-#define proc_root_driver NULL
-#define proc_bus NULL
+#else
 
 #define proc_net_fops_create(net, name, mode, fops)  ({ (void)(mode), NULL; })
 static inline void proc_net_remove(struct net *net, const char *name) {}
@@ -226,6 +221,12 @@
 {
 	return NULL;
 }
+static inline struct proc_dir_entry *proc_create_data(const char *name,
+	mode_t mode, struct proc_dir_entry *parent,
+	const struct file_operations *proc_fops, void *data)
+{
+	return NULL;
+}
 #define remove_proc_entry(name, parent) do {} while (0)
 
 static inline struct proc_dir_entry *proc_symlink(const char *name,
@@ -236,16 +237,11 @@
 static inline struct proc_dir_entry *create_proc_read_entry(const char *name,
 	mode_t mode, struct proc_dir_entry *base, 
 	read_proc_t *read_proc, void * data) { return NULL; }
-static inline struct proc_dir_entry *create_proc_info_entry(const char *name,
-	mode_t mode, struct proc_dir_entry *base, get_info_t *get_info)
-	{ return NULL; }
 
 struct tty_driver;
 static inline void proc_tty_register_driver(struct tty_driver *driver) {};
 static inline void proc_tty_unregister_driver(struct tty_driver *driver) {};
 
-extern struct proc_dir_entry proc_root;
-
 static inline int pid_ns_prepare_proc(struct pid_namespace *ns)
 {
 	return 0;
@@ -255,6 +251,19 @@
 {
 }
 
+static inline void set_mm_exe_file(struct mm_struct *mm,
+				   struct file *new_exe_file)
+{}
+
+static inline struct file *get_mm_exe_file(struct mm_struct *mm)
+{
+	return NULL;
+}
+
+static inline void dup_mm_exe_file(struct mm_struct *oldmm,
+	       			   struct mm_struct *newmm)
+{}
+
 #endif /* CONFIG_PROC_FS */
 
 #if !defined(CONFIG_PROC_KCORE)
diff --git a/include/linux/res_counter.h b/include/linux/res_counter.h
index 61363ce..6d9e1fc 100644
--- a/include/linux/res_counter.h
+++ b/include/linux/res_counter.h
@@ -9,6 +9,8 @@
  *
  * Author: Pavel Emelianov <xemul@openvz.org>
  *
+ * See Documentation/controllers/resource_counter.txt for more
+ * info about what this counter is.
  */
 
 #include <linux/cgroup.h>
@@ -25,6 +27,10 @@
 	 */
 	unsigned long long usage;
 	/*
+	 * the maximal value of the usage from the counter creation
+	 */
+	unsigned long long max_usage;
+	/*
 	 * the limit that usage cannot exceed
 	 */
 	unsigned long long limit;
@@ -39,8 +45,9 @@
 	spinlock_t lock;
 };
 
-/*
+/**
  * Helpers to interact with userspace
+ * res_counter_read_u64() - returns the value of the specified member.
  * res_counter_read/_write - put/get the specified fields from the
  * res_counter struct to/from the user
  *
@@ -51,6 +58,8 @@
  * @pos:     and the offset.
  */
 
+u64 res_counter_read_u64(struct res_counter *counter, int member);
+
 ssize_t res_counter_read(struct res_counter *counter, int member,
 		const char __user *buf, size_t nbytes, loff_t *pos,
 		int (*read_strategy)(unsigned long long val, char *s));
@@ -64,6 +73,7 @@
 
 enum {
 	RES_USAGE,
+	RES_MAX_USAGE,
 	RES_LIMIT,
 	RES_FAILCNT,
 };
@@ -124,4 +134,21 @@
 	return ret;
 }
 
+static inline void res_counter_reset_max(struct res_counter *cnt)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&cnt->lock, flags);
+	cnt->max_usage = cnt->usage;
+	spin_unlock_irqrestore(&cnt->lock, flags);
+}
+
+static inline void res_counter_reset_failcnt(struct res_counter *cnt)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&cnt->lock, flags);
+	cnt->failcnt = 0;
+	spin_unlock_irqrestore(&cnt->lock, flags);
+}
 #endif
diff --git a/include/linux/resource.h b/include/linux/resource.h
index ae13db7..aaa423a 100644
--- a/include/linux/resource.h
+++ b/include/linux/resource.h
@@ -19,6 +19,7 @@
 #define	RUSAGE_SELF	0
 #define	RUSAGE_CHILDREN	(-1)
 #define RUSAGE_BOTH	(-2)		/* sys_wait4() uses this */
+#define	RUSAGE_THREAD	1		/* only the calling thread */
 
 struct	rusage {
 	struct timeval ru_utime;	/* user time used */
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 024d72b..1d02bab 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -2148,6 +2148,19 @@
 #define TASK_SIZE_OF(tsk)	TASK_SIZE
 #endif
 
+#ifdef CONFIG_MM_OWNER
+extern void mm_update_next_owner(struct mm_struct *mm);
+extern void mm_init_owner(struct mm_struct *mm, struct task_struct *p);
+#else
+static inline void mm_update_next_owner(struct mm_struct *mm)
+{
+}
+
+static inline void mm_init_owner(struct mm_struct *mm, struct task_struct *p)
+{
+}
+#endif /* CONFIG_MM_OWNER */
+
 #endif /* __KERNEL__ */
 
 #endif
diff --git a/include/linux/security.h b/include/linux/security.h
index d0a28fd..adb09d8 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -53,8 +53,9 @@
 extern int cap_bprm_set_security(struct linux_binprm *bprm);
 extern void cap_bprm_apply_creds(struct linux_binprm *bprm, int unsafe);
 extern int cap_bprm_secureexec(struct linux_binprm *bprm);
-extern int cap_inode_setxattr(struct dentry *dentry, char *name, void *value, size_t size, int flags);
-extern int cap_inode_removexattr(struct dentry *dentry, char *name);
+extern int cap_inode_setxattr(struct dentry *dentry, const char *name,
+			      const void *value, size_t size, int flags);
+extern int cap_inode_removexattr(struct dentry *dentry, const char *name);
 extern int cap_inode_need_killpriv(struct dentry *dentry);
 extern int cap_inode_killpriv(struct dentry *dentry);
 extern int cap_task_post_setuid(uid_t old_ruid, uid_t old_euid, uid_t old_suid, int flags);
@@ -1008,6 +1009,17 @@
  *	@perm describes the combination of permissions required of this key.
  *	Return 1 if permission granted, 0 if permission denied and -ve it the
  *	normal permissions model should be effected.
+ * @key_getsecurity:
+ *	Get a textual representation of the security context attached to a key
+ *	for the purposes of honouring KEYCTL_GETSECURITY.  This function
+ *	allocates the storage for the NUL-terminated string and the caller
+ *	should free it.
+ *	@key points to the key to be queried.
+ *	@_buffer points to a pointer that should be set to point to the
+ *	 resulting string (if no label or an error occurs).
+ *	Return the length of the string (including terminating NUL) or -ve if
+ *      an error.
+ *	May also return 0 (and a NULL buffer pointer) if there is no label.
  *
  * Security hooks affecting all System V IPC operations.
  *
@@ -1362,13 +1374,13 @@
 	int (*inode_setattr)	(struct dentry *dentry, struct iattr *attr);
 	int (*inode_getattr) (struct vfsmount *mnt, struct dentry *dentry);
 	void (*inode_delete) (struct inode *inode);
-	int (*inode_setxattr) (struct dentry *dentry, char *name, void *value,
-			       size_t size, int flags);
-	void (*inode_post_setxattr) (struct dentry *dentry, char *name, void *value,
-				     size_t size, int flags);
-	int (*inode_getxattr) (struct dentry *dentry, char *name);
+	int (*inode_setxattr) (struct dentry *dentry, const char *name,
+			       const void *value, size_t size, int flags);
+	void (*inode_post_setxattr) (struct dentry *dentry, const char *name,
+				     const void *value, size_t size, int flags);
+	int (*inode_getxattr) (struct dentry *dentry, const char *name);
 	int (*inode_listxattr) (struct dentry *dentry);
-	int (*inode_removexattr) (struct dentry *dentry, char *name);
+	int (*inode_removexattr) (struct dentry *dentry, const char *name);
 	int (*inode_need_killpriv) (struct dentry *dentry);
 	int (*inode_killpriv) (struct dentry *dentry);
 	int (*inode_getsecurity) (const struct inode *inode, const char *name, void **buffer, bool alloc);
@@ -1537,7 +1549,7 @@
 	int (*key_permission) (key_ref_t key_ref,
 			       struct task_struct *context,
 			       key_perm_t perm);
-
+	int (*key_getsecurity)(struct key *key, char **_buffer);
 #endif	/* CONFIG_KEYS */
 
 #ifdef CONFIG_AUDIT
@@ -1633,13 +1645,13 @@
 int security_inode_setattr(struct dentry *dentry, struct iattr *attr);
 int security_inode_getattr(struct vfsmount *mnt, struct dentry *dentry);
 void security_inode_delete(struct inode *inode);
-int security_inode_setxattr(struct dentry *dentry, char *name,
-			    void *value, size_t size, int flags);
-void security_inode_post_setxattr(struct dentry *dentry, char *name,
-				  void *value, size_t size, int flags);
-int security_inode_getxattr(struct dentry *dentry, char *name);
+int security_inode_setxattr(struct dentry *dentry, const char *name,
+			    const void *value, size_t size, int flags);
+void security_inode_post_setxattr(struct dentry *dentry, const char *name,
+				  const void *value, size_t size, int flags);
+int security_inode_getxattr(struct dentry *dentry, const char *name);
 int security_inode_listxattr(struct dentry *dentry);
-int security_inode_removexattr(struct dentry *dentry, char *name);
+int security_inode_removexattr(struct dentry *dentry, const char *name);
 int security_inode_need_killpriv(struct dentry *dentry);
 int security_inode_killpriv(struct dentry *dentry);
 int security_inode_getsecurity(const struct inode *inode, const char *name, void **buffer, bool alloc);
@@ -2041,17 +2053,18 @@
 static inline void security_inode_delete(struct inode *inode)
 { }
 
-static inline int security_inode_setxattr(struct dentry *dentry, char *name,
-					   void *value, size_t size, int flags)
+static inline int security_inode_setxattr(struct dentry *dentry,
+		const char *name, const void *value, size_t size, int flags)
 {
 	return cap_inode_setxattr(dentry, name, value, size, flags);
 }
 
-static inline void security_inode_post_setxattr(struct dentry *dentry, char *name,
-						 void *value, size_t size, int flags)
+static inline void security_inode_post_setxattr(struct dentry *dentry,
+		const char *name, const void *value, size_t size, int flags)
 { }
 
-static inline int security_inode_getxattr(struct dentry *dentry, char *name)
+static inline int security_inode_getxattr(struct dentry *dentry,
+			const char *name)
 {
 	return 0;
 }
@@ -2061,7 +2074,8 @@
 	return 0;
 }
 
-static inline int security_inode_removexattr(struct dentry *dentry, char *name)
+static inline int security_inode_removexattr(struct dentry *dentry,
+			const char *name)
 {
 	return cap_inode_removexattr(dentry, name);
 }
@@ -2729,6 +2743,7 @@
 void security_key_free(struct key *key);
 int security_key_permission(key_ref_t key_ref,
 			    struct task_struct *context, key_perm_t perm);
+int security_key_getsecurity(struct key *key, char **_buffer);
 
 #else
 
@@ -2750,6 +2765,12 @@
 	return 0;
 }
 
+static inline int security_key_getsecurity(struct key *key, char **_buffer)
+{
+	*_buffer = NULL;
+	return 0;
+}
+
 #endif
 #endif /* CONFIG_KEYS */
 
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index 79d59c9..71e43a1 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -29,6 +29,7 @@
 	DEACTIVATE_TO_HEAD,	/* Cpu slab was moved to the head of partials */
 	DEACTIVATE_TO_TAIL,	/* Cpu slab was moved to the tail of partials */
 	DEACTIVATE_REMOTE_FREES,/* Slab contained remotely freed objects */
+	ORDER_FALLBACK,		/* Number of times fallback was necessary */
 	NR_SLUB_STAT_ITEMS };
 
 struct kmem_cache_cpu {
@@ -48,11 +49,21 @@
 	struct list_head partial;
 #ifdef CONFIG_SLUB_DEBUG
 	atomic_long_t nr_slabs;
+	atomic_long_t total_objects;
 	struct list_head full;
 #endif
 };
 
 /*
+ * Word size structure that can be atomically updated or read and that
+ * contains both the order and the number of objects that a slab of the
+ * given order would contain.
+ */
+struct kmem_cache_order_objects {
+	unsigned long x;
+};
+
+/*
  * Slab cache management.
  */
 struct kmem_cache {
@@ -61,7 +72,7 @@
 	int size;		/* The size of an object including meta data */
 	int objsize;		/* The size of an object without meta data */
 	int offset;		/* Free pointer offset. */
-	int order;		/* Current preferred allocation order */
+	struct kmem_cache_order_objects oo;
 
 	/*
 	 * Avoid an extra cache line for UP, SMP and for the node local to
@@ -70,7 +81,8 @@
 	struct kmem_cache_node local_node;
 
 	/* Allocation and freeing of slabs */
-	int objects;		/* Number of objects in slab */
+	struct kmem_cache_order_objects max;
+	struct kmem_cache_order_objects min;
 	gfp_t allocflags;	/* gfp flags to use on each alloc */
 	int refcount;		/* Refcount for slab cache destroy */
 	void (*ctor)(struct kmem_cache *, void *);
diff --git a/include/linux/smb.h b/include/linux/smb.h
index f098dff..caa43b2 100644
--- a/include/linux/smb.h
+++ b/include/linux/smb.h
@@ -11,6 +11,7 @@
 
 #include <linux/types.h>
 #include <linux/magic.h>
+#include <linux/time.h>
 
 enum smb_protocol { 
 	SMB_PROTOCOL_NONE, 
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index 8df6d13..0522f36 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -240,26 +240,28 @@
 asmlinkage long sys_ftruncate64(unsigned int fd, loff_t length);
 #endif
 
-asmlinkage long sys_setxattr(char __user *path, char __user *name,
-				void __user *value, size_t size, int flags);
-asmlinkage long sys_lsetxattr(char __user *path, char __user *name,
-				void __user *value, size_t size, int flags);
-asmlinkage long sys_fsetxattr(int fd, char __user *name, void __user *value,
-				size_t size, int flags);
-asmlinkage ssize_t sys_getxattr(char __user *path, char __user *name,
+asmlinkage long sys_setxattr(const char __user *path, const char __user *name,
+			     const void __user *value, size_t size, int flags);
+asmlinkage long sys_lsetxattr(const char __user *path, const char __user *name,
+			      const void __user *value, size_t size, int flags);
+asmlinkage long sys_fsetxattr(int fd, const char __user *name,
+			      const void __user *value, size_t size, int flags);
+asmlinkage ssize_t sys_getxattr(const char __user *path, const char __user *name,
 				void __user *value, size_t size);
-asmlinkage ssize_t sys_lgetxattr(char __user *path, char __user *name,
+asmlinkage ssize_t sys_lgetxattr(const char __user *path, const char __user *name,
 				void __user *value, size_t size);
-asmlinkage ssize_t sys_fgetxattr(int fd, char __user *name,
+asmlinkage ssize_t sys_fgetxattr(int fd, const char __user *name,
 				void __user *value, size_t size);
-asmlinkage ssize_t sys_listxattr(char __user *path, char __user *list,
+asmlinkage ssize_t sys_listxattr(const char __user *path, char __user *list,
 				size_t size);
-asmlinkage ssize_t sys_llistxattr(char __user *path, char __user *list,
+asmlinkage ssize_t sys_llistxattr(const char __user *path, char __user *list,
 				size_t size);
 asmlinkage ssize_t sys_flistxattr(int fd, char __user *list, size_t size);
-asmlinkage long sys_removexattr(char __user *path, char __user *name);
-asmlinkage long sys_lremovexattr(char __user *path, char __user *name);
-asmlinkage long sys_fremovexattr(int fd, char __user *name);
+asmlinkage long sys_removexattr(const char __user *path,
+				const char __user *name);
+asmlinkage long sys_lremovexattr(const char __user *path,
+				 const char __user *name);
+asmlinkage long sys_fremovexattr(int fd, const char __user *name);
 
 asmlinkage unsigned long sys_brk(unsigned long brk);
 asmlinkage long sys_mprotect(unsigned long start, size_t len,
diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
index 571f01d..24141b4 100644
--- a/include/linux/sysctl.h
+++ b/include/linux/sysctl.h
@@ -945,11 +945,14 @@
 /* For the /proc/sys support */
 struct ctl_table;
 struct nsproxy;
+struct ctl_table_root;
+
 extern struct ctl_table_header *sysctl_head_next(struct ctl_table_header *prev);
 extern struct ctl_table_header *__sysctl_head_next(struct nsproxy *namespaces,
 						struct ctl_table_header *prev);
 extern void sysctl_head_finish(struct ctl_table_header *prev);
-extern int sysctl_perm(struct ctl_table *table, int op);
+extern int sysctl_perm(struct ctl_table_root *root,
+		struct ctl_table *table, int op);
 
 typedef struct ctl_table ctl_table;
 
@@ -981,11 +984,6 @@
 		      void __user *oldval, size_t __user *oldlenp,
 		      void __user *newval, size_t newlen);
 
-extern int do_sysctl_strategy (struct ctl_table *table,
-			       int __user *name, int nlen,
-			       void __user *oldval, size_t __user *oldlenp,
-			       void __user *newval, size_t newlen);
-
 extern ctl_handler sysctl_data;
 extern ctl_handler sysctl_string;
 extern ctl_handler sysctl_intvec;
@@ -1054,6 +1052,8 @@
 	struct list_head header_list;
 	struct list_head *(*lookup)(struct ctl_table_root *root,
 					   struct nsproxy *namespaces);
+	int (*permissions)(struct ctl_table_root *root,
+			struct nsproxy *namespaces, struct ctl_table *table);
 };
 
 /* struct ctl_table_header is used to maintain dynamic lists of
@@ -1085,8 +1085,6 @@
 void unregister_sysctl_table(struct ctl_table_header * table);
 int sysctl_check_table(struct nsproxy *namespaces, struct ctl_table *table);
 
-#else /* __KERNEL__ */
-
 #endif /* __KERNEL__ */
 
 #endif /* _LINUX_SYSCTL_H */
diff --git a/include/linux/sysv_fs.h b/include/linux/sysv_fs.h
index e024863..9641130 100644
--- a/include/linux/sysv_fs.h
+++ b/include/linux/sysv_fs.h
@@ -1,11 +1,7 @@
 #ifndef _LINUX_SYSV_FS_H
 #define _LINUX_SYSV_FS_H
 
-#if defined(__GNUC__)
-# define __packed2__	__attribute__((packed, aligned(2)))
-#else
->> I want to scream! <<
-#endif
+#define __packed2__	__attribute__((packed, aligned(2)))
 
 
 #ifndef __KERNEL__
diff --git a/include/linux/unaligned/access_ok.h b/include/linux/unaligned/access_ok.h
new file mode 100644
index 0000000..99c1b4d
--- /dev/null
+++ b/include/linux/unaligned/access_ok.h
@@ -0,0 +1,67 @@
+#ifndef _LINUX_UNALIGNED_ACCESS_OK_H
+#define _LINUX_UNALIGNED_ACCESS_OK_H
+
+#include <linux/kernel.h>
+#include <asm/byteorder.h>
+
+static inline u16 get_unaligned_le16(const void *p)
+{
+	return le16_to_cpup((__le16 *)p);
+}
+
+static inline u32 get_unaligned_le32(const void *p)
+{
+	return le32_to_cpup((__le32 *)p);
+}
+
+static inline u64 get_unaligned_le64(const void *p)
+{
+	return le64_to_cpup((__le64 *)p);
+}
+
+static inline u16 get_unaligned_be16(const void *p)
+{
+	return be16_to_cpup((__be16 *)p);
+}
+
+static inline u32 get_unaligned_be32(const void *p)
+{
+	return be32_to_cpup((__be32 *)p);
+}
+
+static inline u64 get_unaligned_be64(const void *p)
+{
+	return be64_to_cpup((__be64 *)p);
+}
+
+static inline void put_unaligned_le16(u16 val, void *p)
+{
+	*((__le16 *)p) = cpu_to_le16(val);
+}
+
+static inline void put_unaligned_le32(u32 val, void *p)
+{
+	*((__le32 *)p) = cpu_to_le32(val);
+}
+
+static inline void put_unaligned_le64(u64 val, void *p)
+{
+	*((__le64 *)p) = cpu_to_le64(val);
+}
+
+static inline void put_unaligned_be16(u16 val, void *p)
+{
+	*((__be16 *)p) = cpu_to_be16(val);
+}
+
+static inline void put_unaligned_be32(u32 val, void *p)
+{
+	*((__be32 *)p) = cpu_to_be32(val);
+}
+
+static inline void put_unaligned_be64(u64 val, void *p)
+{
+	*((__be64 *)p) = cpu_to_be64(val);
+}
+
+#endif /* _LINUX_UNALIGNED_ACCESS_OK_H */
diff --git a/include/linux/unaligned/be_byteshift.h b/include/linux/unaligned/be_byteshift.h
new file mode 100644
index 0000000..46dd12c
--- /dev/null
+++ b/include/linux/unaligned/be_byteshift.h
@@ -0,0 +1,70 @@
+#ifndef _LINUX_UNALIGNED_BE_BYTESHIFT_H
+#define _LINUX_UNALIGNED_BE_BYTESHIFT_H
+
+#include <linux/kernel.h>
+
+static inline u16 __get_unaligned_be16(const u8 *p)
+{
+	return p[0] << 8 | p[1];
+}
+
+static inline u32 __get_unaligned_be32(const u8 *p)
+{
+	return p[0] << 24 | p[1] << 16 | p[2] << 8 | p[3];
+}
+
+static inline u64 __get_unaligned_be64(const u8 *p)
+{
+	return (u64)__get_unaligned_be32(p) << 32 |
+	       __get_unaligned_be32(p + 4);
+}
+
+static inline void __put_unaligned_be16(u16 val, u8 *p)
+{
+	*p++ = val >> 8;
+	*p++ = val;
+}
+
+static inline void __put_unaligned_be32(u32 val, u8 *p)
+{
+	__put_unaligned_be16(val >> 16, p);
+	__put_unaligned_be16(val, p + 2);
+}
+
+static inline void __put_unaligned_be64(u64 val, u8 *p)
+{
+	__put_unaligned_be32(val >> 32, p);
+	__put_unaligned_be32(val, p + 4);
+}
+
+static inline u16 get_unaligned_be16(const void *p)
+{
+	return __get_unaligned_be16((const u8 *)p);
+}
+
+static inline u32 get_unaligned_be32(const void *p)
+{
+	return __get_unaligned_be32((const u8 *)p);
+}
+
+static inline u64 get_unaligned_be64(const void *p)
+{
+	return __get_unaligned_be64((const u8 *)p);
+}
+
+static inline void put_unaligned_be16(u16 val, void *p)
+{
+	__put_unaligned_be16(val, p);
+}
+
+static inline void put_unaligned_be32(u32 val, void *p)
+{
+	__put_unaligned_be32(val, p);
+}
+
+static inline void put_unaligned_be64(u64 val, void *p)
+{
+	__put_unaligned_be64(val, p);
+}
+
+#endif /* _LINUX_UNALIGNED_BE_BYTESHIFT_H */
diff --git a/include/linux/unaligned/be_memmove.h b/include/linux/unaligned/be_memmove.h
new file mode 100644
index 0000000..c2a76c5
--- /dev/null
+++ b/include/linux/unaligned/be_memmove.h
@@ -0,0 +1,36 @@
+#ifndef _LINUX_UNALIGNED_BE_MEMMOVE_H
+#define _LINUX_UNALIGNED_BE_MEMMOVE_H
+
+#include <linux/unaligned/memmove.h>
+
+static inline u16 get_unaligned_be16(const void *p)
+{
+	return __get_unaligned_memmove16((const u8 *)p);
+}
+
+static inline u32 get_unaligned_be32(const void *p)
+{
+	return __get_unaligned_memmove32((const u8 *)p);
+}
+
+static inline u64 get_unaligned_be64(const void *p)
+{
+	return __get_unaligned_memmove64((const u8 *)p);
+}
+
+static inline void put_unaligned_be16(u16 val, void *p)
+{
+	__put_unaligned_memmove16(val, p);
+}
+
+static inline void put_unaligned_be32(u32 val, void *p)
+{
+	__put_unaligned_memmove32(val, p);
+}
+
+static inline void put_unaligned_be64(u64 val, void *p)
+{
+	__put_unaligned_memmove64(val, p);
+}
+
+#endif /* _LINUX_UNALIGNED_LE_MEMMOVE_H */
diff --git a/include/linux/unaligned/be_struct.h b/include/linux/unaligned/be_struct.h
new file mode 100644
index 0000000..1324158
--- /dev/null
+++ b/include/linux/unaligned/be_struct.h
@@ -0,0 +1,36 @@
+#ifndef _LINUX_UNALIGNED_BE_STRUCT_H
+#define _LINUX_UNALIGNED_BE_STRUCT_H
+
+#include <linux/unaligned/packed_struct.h>
+
+static inline u16 get_unaligned_be16(const void *p)
+{
+	return __get_unaligned_cpu16((const u8 *)p);
+}
+
+static inline u32 get_unaligned_be32(const void *p)
+{
+	return __get_unaligned_cpu32((const u8 *)p);
+}
+
+static inline u64 get_unaligned_be64(const void *p)
+{
+	return __get_unaligned_cpu64((const u8 *)p);
+}
+
+static inline void put_unaligned_be16(u16 val, void *p)
+{
+	__put_unaligned_cpu16(val, p);
+}
+
+static inline void put_unaligned_be32(u32 val, void *p)
+{
+	__put_unaligned_cpu32(val, p);
+}
+
+static inline void put_unaligned_be64(u64 val, void *p)
+{
+	__put_unaligned_cpu64(val, p);
+}
+
+#endif /* _LINUX_UNALIGNED_BE_STRUCT_H */
diff --git a/include/linux/unaligned/generic.h b/include/linux/unaligned/generic.h
new file mode 100644
index 0000000..02d97ff
--- /dev/null
+++ b/include/linux/unaligned/generic.h
@@ -0,0 +1,68 @@
+#ifndef _LINUX_UNALIGNED_GENERIC_H
+#define _LINUX_UNALIGNED_GENERIC_H
+
+/*
+ * Cause a link-time error if we try an unaligned access other than
+ * 1,2,4 or 8 bytes long
+ */
+extern void __bad_unaligned_access_size(void);
+
+#define __get_unaligned_le(ptr) ((__force typeof(*(ptr)))({			\
+	__builtin_choose_expr(sizeof(*(ptr)) == 1, *(ptr),			\
+	__builtin_choose_expr(sizeof(*(ptr)) == 2, get_unaligned_le16((ptr)),	\
+	__builtin_choose_expr(sizeof(*(ptr)) == 4, get_unaligned_le32((ptr)),	\
+	__builtin_choose_expr(sizeof(*(ptr)) == 8, get_unaligned_le64((ptr)),	\
+	__bad_unaligned_access_size()))));					\
+	}))
+
+#define __get_unaligned_be(ptr) ((__force typeof(*(ptr)))({			\
+	__builtin_choose_expr(sizeof(*(ptr)) == 1, *(ptr),			\
+	__builtin_choose_expr(sizeof(*(ptr)) == 2, get_unaligned_be16((ptr)),	\
+	__builtin_choose_expr(sizeof(*(ptr)) == 4, get_unaligned_be32((ptr)),	\
+	__builtin_choose_expr(sizeof(*(ptr)) == 8, get_unaligned_be64((ptr)),	\
+	__bad_unaligned_access_size()))));					\
+	}))
+
+#define __put_unaligned_le(val, ptr) ({					\
+	void *__gu_p = (ptr);						\
+	switch (sizeof(*(ptr))) {					\
+	case 1:								\
+		*(u8 *)__gu_p = (__force u8)(val);			\
+		break;							\
+	case 2:								\
+		put_unaligned_le16((__force u16)(val), __gu_p);		\
+		break;							\
+	case 4:								\
+		put_unaligned_le32((__force u32)(val), __gu_p);		\
+		break;							\
+	case 8:								\
+		put_unaligned_le64((__force u64)(val), __gu_p);		\
+		break;							\
+	default:							\
+		__bad_unaligned_access_size();				\
+		break;							\
+	}								\
+	(void)0; })
+
+#define __put_unaligned_be(val, ptr) ({					\
+	void *__gu_p = (ptr);						\
+	switch (sizeof(*(ptr))) {					\
+	case 1:								\
+		*(u8 *)__gu_p = (__force u8)(val);			\
+		break;							\
+	case 2:								\
+		put_unaligned_be16((__force u16)(val), __gu_p);		\
+		break;							\
+	case 4:								\
+		put_unaligned_be32((__force u32)(val), __gu_p);		\
+		break;							\
+	case 8:								\
+		put_unaligned_be64((__force u64)(val), __gu_p);		\
+		break;							\
+	default:							\
+		__bad_unaligned_access_size();				\
+		break;							\
+	}								\
+	(void)0; })
+
+#endif /* _LINUX_UNALIGNED_GENERIC_H */
diff --git a/include/linux/unaligned/le_byteshift.h b/include/linux/unaligned/le_byteshift.h
new file mode 100644
index 0000000..59777e9
--- /dev/null
+++ b/include/linux/unaligned/le_byteshift.h
@@ -0,0 +1,70 @@
+#ifndef _LINUX_UNALIGNED_LE_BYTESHIFT_H
+#define _LINUX_UNALIGNED_LE_BYTESHIFT_H
+
+#include <linux/kernel.h>
+
+static inline u16 __get_unaligned_le16(const u8 *p)
+{
+	return p[0] | p[1] << 8;
+}
+
+static inline u32 __get_unaligned_le32(const u8 *p)
+{
+	return p[0] | p[1] << 8 | p[2] << 16 | p[3] << 24;
+}
+
+static inline u64 __get_unaligned_le64(const u8 *p)
+{
+	return (u64)__get_unaligned_le32(p + 4) << 32 |
+	       __get_unaligned_le32(p);
+}
+
+static inline void __put_unaligned_le16(u16 val, u8 *p)
+{
+	*p++ = val;
+	*p++ = val >> 8;
+}
+
+static inline void __put_unaligned_le32(u32 val, u8 *p)
+{
+	__put_unaligned_le16(val >> 16, p + 2);
+	__put_unaligned_le16(val, p);
+}
+
+static inline void __put_unaligned_le64(u64 val, u8 *p)
+{
+	__put_unaligned_le32(val >> 32, p + 4);
+	__put_unaligned_le32(val, p);
+}
+
+static inline u16 get_unaligned_le16(const void *p)
+{
+	return __get_unaligned_le16((const u8 *)p);
+}
+
+static inline u32 get_unaligned_le32(const void *p)
+{
+	return __get_unaligned_le32((const u8 *)p);
+}
+
+static inline u64 get_unaligned_le64(const void *p)
+{
+	return __get_unaligned_le64((const u8 *)p);
+}
+
+static inline void put_unaligned_le16(u16 val, void *p)
+{
+	__put_unaligned_le16(val, p);
+}
+
+static inline void put_unaligned_le32(u32 val, void *p)
+{
+	__put_unaligned_le32(val, p);
+}
+
+static inline void put_unaligned_le64(u64 val, void *p)
+{
+	__put_unaligned_le64(val, p);
+}
+
+#endif /* _LINUX_UNALIGNED_LE_BYTESHIFT_H */
diff --git a/include/linux/unaligned/le_memmove.h b/include/linux/unaligned/le_memmove.h
new file mode 100644
index 0000000..269849b
--- /dev/null
+++ b/include/linux/unaligned/le_memmove.h
@@ -0,0 +1,36 @@
+#ifndef _LINUX_UNALIGNED_LE_MEMMOVE_H
+#define _LINUX_UNALIGNED_LE_MEMMOVE_H
+
+#include <linux/unaligned/memmove.h>
+
+static inline u16 get_unaligned_le16(const void *p)
+{
+	return __get_unaligned_memmove16((const u8 *)p);
+}
+
+static inline u32 get_unaligned_le32(const void *p)
+{
+	return __get_unaligned_memmove32((const u8 *)p);
+}
+
+static inline u64 get_unaligned_le64(const void *p)
+{
+	return __get_unaligned_memmove64((const u8 *)p);
+}
+
+static inline void put_unaligned_le16(u16 val, void *p)
+{
+	__put_unaligned_memmove16(val, p);
+}
+
+static inline void put_unaligned_le32(u32 val, void *p)
+{
+	__put_unaligned_memmove32(val, p);
+}
+
+static inline void put_unaligned_le64(u64 val, void *p)
+{
+	__put_unaligned_memmove64(val, p);
+}
+
+#endif /* _LINUX_UNALIGNED_LE_MEMMOVE_H */
diff --git a/include/linux/unaligned/le_struct.h b/include/linux/unaligned/le_struct.h
new file mode 100644
index 0000000..088c457
--- /dev/null
+++ b/include/linux/unaligned/le_struct.h
@@ -0,0 +1,36 @@
+#ifndef _LINUX_UNALIGNED_LE_STRUCT_H
+#define _LINUX_UNALIGNED_LE_STRUCT_H
+
+#include <linux/unaligned/packed_struct.h>
+
+static inline u16 get_unaligned_le16(const void *p)
+{
+	return __get_unaligned_cpu16((const u8 *)p);
+}
+
+static inline u32 get_unaligned_le32(const void *p)
+{
+	return __get_unaligned_cpu32((const u8 *)p);
+}
+
+static inline u64 get_unaligned_le64(const void *p)
+{
+	return __get_unaligned_cpu64((const u8 *)p);
+}
+
+static inline void put_unaligned_le16(u16 val, void *p)
+{
+	__put_unaligned_cpu16(val, p);
+}
+
+static inline void put_unaligned_le32(u32 val, void *p)
+{
+	__put_unaligned_cpu32(val, p);
+}
+
+static inline void put_unaligned_le64(u64 val, void *p)
+{
+	__put_unaligned_cpu64(val, p);
+}
+
+#endif /* _LINUX_UNALIGNED_LE_STRUCT_H */
diff --git a/include/linux/unaligned/memmove.h b/include/linux/unaligned/memmove.h
new file mode 100644
index 0000000..eeb5a77
--- /dev/null
+++ b/include/linux/unaligned/memmove.h
@@ -0,0 +1,45 @@
+#ifndef _LINUX_UNALIGNED_MEMMOVE_H
+#define _LINUX_UNALIGNED_MEMMOVE_H
+
+#include <linux/kernel.h>
+#include <linux/string.h>
+
+/* Use memmove here, so gcc does not insert a __builtin_memcpy. */
+
+static inline u16 __get_unaligned_memmove16(const void *p)
+{
+	u16 tmp;
+	memmove(&tmp, p, 2);
+	return tmp;
+}
+
+static inline u32 __get_unaligned_memmove32(const void *p)
+{
+	u32 tmp;
+	memmove(&tmp, p, 4);
+	return tmp;
+}
+
+static inline u64 __get_unaligned_memmove64(const void *p)
+{
+	u64 tmp;
+	memmove(&tmp, p, 8);
+	return tmp;
+}
+
+static inline void __put_unaligned_memmove16(u16 val, void *p)
+{
+	memmove(p, &val, 2);
+}
+
+static inline void __put_unaligned_memmove32(u32 val, void *p)
+{
+	memmove(p, &val, 4);
+}
+
+static inline void __put_unaligned_memmove64(u64 val, void *p)
+{
+	memmove(p, &val, 8);
+}
+
+#endif /* _LINUX_UNALIGNED_MEMMOVE_H */
diff --git a/include/linux/unaligned/packed_struct.h b/include/linux/unaligned/packed_struct.h
new file mode 100644
index 0000000..2498bb9
--- /dev/null
+++ b/include/linux/unaligned/packed_struct.h
@@ -0,0 +1,46 @@
+#ifndef _LINUX_UNALIGNED_PACKED_STRUCT_H
+#define _LINUX_UNALIGNED_PACKED_STRUCT_H
+
+#include <linux/kernel.h>
+
+struct __una_u16 { u16 x __attribute__((packed)); };
+struct __una_u32 { u32 x __attribute__((packed)); };
+struct __una_u64 { u64 x __attribute__((packed)); };
+
+static inline u16 __get_unaligned_cpu16(const void *p)
+{
+	const struct __una_u16 *ptr = (const struct __una_u16 *)p;
+	return ptr->x;
+}
+
+static inline u32 __get_unaligned_cpu32(const void *p)
+{
+	const struct __una_u32 *ptr = (const struct __una_u32 *)p;
+	return ptr->x;
+}
+
+static inline u64 __get_unaligned_cpu64(const void *p)
+{
+	const struct __una_u64 *ptr = (const struct __una_u64 *)p;
+	return ptr->x;
+}
+
+static inline void __put_unaligned_cpu16(u16 val, void *p)
+{
+	struct __una_u16 *ptr = (struct __una_u16 *)p;
+	ptr->x = val;
+}
+
+static inline void __put_unaligned_cpu32(u32 val, void *p)
+{
+	struct __una_u32 *ptr = (struct __una_u32 *)p;
+	ptr->x = val;
+}
+
+static inline void __put_unaligned_cpu64(u64 val, void *p)
+{
+	struct __una_u64 *ptr = (struct __una_u64 *)p;
+	ptr->x = val;
+}
+
+#endif /* _LINUX_UNALIGNED_PACKED_STRUCT_H */
diff --git a/include/linux/xattr.h b/include/linux/xattr.h
index df6b95d..d131e35 100644
--- a/include/linux/xattr.h
+++ b/include/linux/xattr.h
@@ -47,10 +47,10 @@
 };
 
 ssize_t xattr_getsecurity(struct inode *, const char *, void *, size_t);
-ssize_t vfs_getxattr(struct dentry *, char *, void *, size_t);
+ssize_t vfs_getxattr(struct dentry *, const char *, void *, size_t);
 ssize_t vfs_listxattr(struct dentry *d, char *list, size_t size);
-int vfs_setxattr(struct dentry *, char *, void *, size_t, int);
-int vfs_removexattr(struct dentry *, char *);
+int vfs_setxattr(struct dentry *, const char *, const void *, size_t, int);
+int vfs_removexattr(struct dentry *, const char *);
 
 ssize_t generic_getxattr(struct dentry *dentry, const char *name, void *buffer, size_t size);
 ssize_t generic_listxattr(struct dentry *dentry, char *buffer, size_t buffer_size);
diff --git a/include/rdma/ib_umem.h b/include/rdma/ib_umem.h
index 2229842..9ee0d2e 100644
--- a/include/rdma/ib_umem.h
+++ b/include/rdma/ib_umem.h
@@ -62,7 +62,7 @@
 #ifdef CONFIG_INFINIBAND_USER_MEM
 
 struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
-			    size_t size, int access);
+			    size_t size, int access, int dmasync);
 void ib_umem_release(struct ib_umem *umem);
 int ib_umem_page_count(struct ib_umem *umem);
 
@@ -72,7 +72,7 @@
 
 static inline struct ib_umem *ib_umem_get(struct ib_ucontext *context,
 					  unsigned long addr, size_t size,
-					  int access) {
+					  int access, int dmasync) {
 	return ERR_PTR(-EINVAL);
 }
 static inline void ib_umem_release(struct ib_umem *umem) { }
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index 2dcbecc..911a661 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -1542,6 +1542,24 @@
 		dma_unmap_single(dev->dma_device, addr, size, direction);
 }
 
+static inline u64 ib_dma_map_single_attrs(struct ib_device *dev,
+					  void *cpu_addr, size_t size,
+					  enum dma_data_direction direction,
+					  struct dma_attrs *attrs)
+{
+	return dma_map_single_attrs(dev->dma_device, cpu_addr, size,
+				    direction, attrs);
+}
+
+static inline void ib_dma_unmap_single_attrs(struct ib_device *dev,
+					     u64 addr, size_t size,
+					     enum dma_data_direction direction,
+					     struct dma_attrs *attrs)
+{
+	return dma_unmap_single_attrs(dev->dma_device, addr, size,
+				      direction, attrs);
+}
+
 /**
  * ib_dma_map_page - Map a physical page to DMA address
  * @dev: The device for which the dma_addr is to be created
@@ -1611,6 +1629,21 @@
 		dma_unmap_sg(dev->dma_device, sg, nents, direction);
 }
 
+static inline int ib_dma_map_sg_attrs(struct ib_device *dev,
+				      struct scatterlist *sg, int nents,
+				      enum dma_data_direction direction,
+				      struct dma_attrs *attrs)
+{
+	return dma_map_sg_attrs(dev->dma_device, sg, nents, direction, attrs);
+}
+
+static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev,
+					 struct scatterlist *sg, int nents,
+					 enum dma_data_direction direction,
+					 struct dma_attrs *attrs)
+{
+	dma_unmap_sg_attrs(dev->dma_device, sg, nents, direction, attrs);
+}
 /**
  * ib_sg_dma_address - Return the DMA address from a scatter/gather entry
  * @dev: The device for which the DMA addresses were created
diff --git a/init/Kconfig b/init/Kconfig
index da071c4..3e7b257 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -259,17 +259,14 @@
 config LOG_BUF_SHIFT
 	int "Kernel log buffer size (16 => 64KB, 17 => 128KB)"
 	range 12 21
-	default 17 if S390 || LOCKDEP
-	default 16 if X86_NUMAQ || IA64
-	default 15 if SMP
-	default 14
+	default 17
 	help
 	  Select kernel log buffer size as a power of 2.
-	  Defaults and Examples:
-	  	     17 => 128 KB for S/390
-		     16 => 64 KB for x86 NUMAQ or IA-64
-	             15 => 32 KB for SMP
-	             14 => 16 KB for uniprocessor
+	  Examples:
+	  	     17 => 128 KB
+		     16 => 64 KB
+	             15 => 32 KB
+	             14 => 16 KB
 		     13 =>  8 KB
 		     12 =>  4 KB
 
@@ -284,6 +281,7 @@
 config CGROUP_DEBUG
 	bool "Example debug cgroup subsystem"
 	depends on CGROUPS
+	default n
 	help
 	  This option enables a simple cgroup subsystem that
 	  exports useful debugging information about the cgroups
@@ -300,6 +298,13 @@
           for instance virtual servers and checkpoint/restart
           jobs.
 
+config CGROUP_DEVICE
+	bool "Device controller for cgroups"
+	depends on CGROUPS && EXPERIMENTAL
+	help
+	  Provides a cgroup implementing whitelists for devices which
+	  a process in the cgroup can mknod or open.
+
 config CPUSETS
 	bool "Cpuset support"
 	depends on SMP && CGROUPS
@@ -373,9 +378,13 @@
           infrastructure that works with cgroups
 	depends on CGROUPS
 
+config MM_OWNER
+	bool
+
 config CGROUP_MEM_RES_CTLR
 	bool "Memory Resource Controller for Control Groups"
 	depends on CGROUPS && RESOURCE_COUNTERS
+	select MM_OWNER
 	help
 	  Provides a memory resource controller that manages both page cache and
 	  RSS memory.
@@ -388,6 +397,9 @@
 	  Only enable when you're ok with these trade offs and really
 	  sure you need the memory resource controller.
 
+	  This config option also selects MM_OWNER config option, which
+	  could in turn add some fork/exit overhead.
+
 config SYSFS_DEPRECATED
 	bool
 
@@ -538,6 +550,17 @@
 
 	  If unsure say Y here.
 
+config SYSCTL_SYSCALL_CHECK
+	bool "Sysctl checks" if EMBEDDED
+	depends on SYSCTL_SYSCALL
+	default y
+	---help---
+	  sys_sysctl uses binary paths that have been found challenging
+	  to properly maintain and use. This enables checks that help
+	  you to keep things correct.
+
+	  If unsure say Y here.
+
 config KALLSYMS
 	 bool "Load all symbols for debugging/ksymoops" if EMBEDDED
 	 default y
diff --git a/init/do_mounts_md.c b/init/do_mounts_md.c
index 753dc54..7473b0c 100644
--- a/init/do_mounts_md.c
+++ b/init/do_mounts_md.c
@@ -133,7 +133,7 @@
 		else
 			dev = MKDEV(MD_MAJOR, minor);
 		create_dev(name, dev);
-		for (i = 0; i < MD_SB_DISKS && devname != 0; i++) {
+		for (i = 0; i < MD_SB_DISKS && devname != NULL; i++) {
 			char *p;
 			char comp_name[64];
 			u32 rdev;
diff --git a/init/do_mounts_rd.c b/init/do_mounts_rd.c
index 3ac5904..46dfd64 100644
--- a/init/do_mounts_rd.c
+++ b/init/do_mounts_rd.c
@@ -212,7 +212,7 @@
 	}
 
 	buf = kmalloc(BLOCK_SIZE, GFP_KERNEL);
-	if (buf == 0) {
+	if (!buf) {
 		printk(KERN_ERR "RAMDISK: could not allocate buffer\n");
 		goto done;
 	}
diff --git a/init/initramfs.c b/init/initramfs.c
index d53fee8..8eeeccb 100644
--- a/init/initramfs.c
+++ b/init/initramfs.c
@@ -57,7 +57,7 @@
 			continue;
 		return (*p)->name;
 	}
-	q = (struct hash *)malloc(sizeof(struct hash));
+	q = kmalloc(sizeof(struct hash), GFP_KERNEL);
 	if (!q)
 		panic("can't allocate link hash entry");
 	q->major = major;
@@ -77,7 +77,7 @@
 		while (*p) {
 			q = *p;
 			*p = q->next;
-			free(q);
+			kfree(q);
 		}
 	}
 }
@@ -445,10 +445,10 @@
 {
 	int written;
 	dry_run = check_only;
-	header_buf = malloc(110);
-	symlink_buf = malloc(PATH_MAX + N_ALIGN(PATH_MAX) + 1);
-	name_buf = malloc(N_ALIGN(PATH_MAX));
-	window = malloc(WSIZE);
+	header_buf = kmalloc(110, GFP_KERNEL);
+	symlink_buf = kmalloc(PATH_MAX + N_ALIGN(PATH_MAX) + 1, GFP_KERNEL);
+	name_buf = kmalloc(N_ALIGN(PATH_MAX), GFP_KERNEL);
+	window = kmalloc(WSIZE, GFP_KERNEL);
 	if (!window || !header_buf || !symlink_buf || !name_buf)
 		panic("can't allocate buffers");
 	state = Start;
@@ -484,10 +484,10 @@
 		buf += inptr;
 		len -= inptr;
 	}
-	free(window);
-	free(name_buf);
-	free(symlink_buf);
-	free(header_buf);
+	kfree(window);
+	kfree(name_buf);
+	kfree(symlink_buf);
+	kfree(header_buf);
 	return message;
 }
 
diff --git a/init/main.c b/init/main.c
index 1687b01..624266b 100644
--- a/init/main.c
+++ b/init/main.c
@@ -58,6 +58,7 @@
 #include <linux/kthread.h>
 #include <linux/sched.h>
 #include <linux/signal.h>
+#include <linux/idr.h>
 
 #include <asm/io.h>
 #include <asm/bugs.h>
@@ -559,6 +560,7 @@
 	printk(KERN_NOTICE);
 	printk(linux_banner);
 	setup_arch(&command_line);
+	mm_init_owner(&init_mm, &init_task);
 	setup_command_line(command_line);
 	unwind_setup();
 	setup_per_cpu_areas();
@@ -636,6 +638,7 @@
 	enable_debug_pagealloc();
 	cpu_hotplug_init();
 	kmem_cache_init();
+	idr_init_cache();
 	setup_per_cpu_pageset();
 	numa_policy_init();
 	if (late_time_init)
@@ -700,10 +703,8 @@
 		int result;
 
 		if (initcall_debug) {
-			printk("Calling initcall 0x%p", *call);
-			print_fn_descriptor_symbol(": %s()",
+			print_fn_descriptor_symbol("calling  %s()\n",
 					(unsigned long) *call);
-			printk("\n");
 			t0 = ktime_get();
 		}
 
@@ -713,15 +714,10 @@
 			t1 = ktime_get();
 			delta = ktime_sub(t1, t0);
 
-			printk("initcall 0x%p", *call);
-			print_fn_descriptor_symbol(": %s()",
+			print_fn_descriptor_symbol("initcall %s()",
 					(unsigned long) *call);
-			printk(" returned %d.\n", result);
-
-			printk("initcall 0x%p ran for %Ld msecs: ",
-				*call, (unsigned long long)delta.tv64 >> 20);
-			print_fn_descriptor_symbol("%s()\n",
-				(unsigned long) *call);
+			printk(" returned %d after %Ld msecs\n", result,
+				(unsigned long long) delta.tv64 >> 20);
 		}
 
 		if (result && result != -ENODEV && initcall_debug) {
@@ -737,10 +733,9 @@
 			local_irq_enable();
 		}
 		if (msg) {
-			printk(KERN_WARNING "initcall at 0x%p", *call);
-			print_fn_descriptor_symbol(": %s()",
+			print_fn_descriptor_symbol(KERN_WARNING "initcall %s()",
 					(unsigned long) *call);
-			printk(": returned with %s\n", msg);
+			printk(" returned with %s\n", msg);
 		}
 	}
 
diff --git a/ipc/Makefile b/ipc/Makefile
index 5fc5e33e..65c3843 100644
--- a/ipc/Makefile
+++ b/ipc/Makefile
@@ -3,7 +3,7 @@
 #
 
 obj-$(CONFIG_SYSVIPC_COMPAT) += compat.o
-obj-$(CONFIG_SYSVIPC) += util.o msgutil.o msg.o sem.o shm.o
+obj-$(CONFIG_SYSVIPC) += util.o msgutil.o msg.o sem.o shm.o ipcns_notifier.o
 obj-$(CONFIG_SYSVIPC_SYSCTL) += ipc_sysctl.o
 obj_mq-$(CONFIG_COMPAT) += compat_mq.o
 obj-$(CONFIG_POSIX_MQUEUE) += mqueue.o msgutil.o $(obj_mq-y)
diff --git a/ipc/ipc_sysctl.c b/ipc/ipc_sysctl.c
index 7f4235b..d349746 100644
--- a/ipc/ipc_sysctl.c
+++ b/ipc/ipc_sysctl.c
@@ -15,6 +15,8 @@
 #include <linux/sysctl.h>
 #include <linux/uaccess.h>
 #include <linux/ipc_namespace.h>
+#include <linux/msg.h>
+#include "util.h"
 
 static void *get_ipc(ctl_table *table)
 {
@@ -24,6 +26,27 @@
 	return which;
 }
 
+/*
+ * Routine that is called when a tunable has successfully been changed by
+ * hand and it has a callback routine registered on the ipc namespace notifier
+ * chain: we don't want such tunables to be recomputed anymore upon memory
+ * add/remove or ipc namespace creation/removal.
+ * They can come back to a recomputable state by being set to a <0 value.
+ */
+static void tunable_set_callback(int val)
+{
+	if (val >= 0)
+		unregister_ipcns_notifier(current->nsproxy->ipc_ns);
+	else {
+		/*
+		 * Re-enable automatic recomputing only if not already
+		 * enabled.
+		 */
+		recompute_msgmni(current->nsproxy->ipc_ns);
+		cond_register_ipcns_notifier(current->nsproxy->ipc_ns);
+	}
+}
+
 #ifdef CONFIG_PROC_FS
 static int proc_ipc_dointvec(ctl_table *table, int write, struct file *filp,
 	void __user *buffer, size_t *lenp, loff_t *ppos)
@@ -35,6 +58,24 @@
 	return proc_dointvec(&ipc_table, write, filp, buffer, lenp, ppos);
 }
 
+static int proc_ipc_callback_dointvec(ctl_table *table, int write,
+	struct file *filp, void __user *buffer, size_t *lenp, loff_t *ppos)
+{
+	struct ctl_table ipc_table;
+	size_t lenp_bef = *lenp;
+	int rc;
+
+	memcpy(&ipc_table, table, sizeof(ipc_table));
+	ipc_table.data = get_ipc(table);
+
+	rc = proc_dointvec(&ipc_table, write, filp, buffer, lenp, ppos);
+
+	if (write && !rc && lenp_bef == *lenp)
+		tunable_set_callback(*((int *)(ipc_table.data)));
+
+	return rc;
+}
+
 static int proc_ipc_doulongvec_minmax(ctl_table *table, int write,
 	struct file *filp, void __user *buffer, size_t *lenp, loff_t *ppos)
 {
@@ -49,6 +90,7 @@
 #else
 #define proc_ipc_doulongvec_minmax NULL
 #define proc_ipc_dointvec	   NULL
+#define proc_ipc_callback_dointvec NULL
 #endif
 
 #ifdef CONFIG_SYSCTL_SYSCALL
@@ -90,8 +132,30 @@
 	}
 	return 1;
 }
+
+static int sysctl_ipc_registered_data(ctl_table *table, int __user *name,
+		int nlen, void __user *oldval, size_t __user *oldlenp,
+		void __user *newval, size_t newlen)
+{
+	int rc;
+
+	rc = sysctl_ipc_data(table, name, nlen, oldval, oldlenp, newval,
+		newlen);
+
+	if (newval && newlen && rc > 0) {
+		/*
+		 * Tunable has successfully been changed from userland
+		 */
+		int *data = get_ipc(table);
+
+		tunable_set_callback(*data);
+	}
+
+	return rc;
+}
 #else
 #define sysctl_ipc_data NULL
+#define sysctl_ipc_registered_data NULL
 #endif
 
 static struct ctl_table ipc_kern_table[] = {
@@ -137,8 +201,8 @@
 		.data		= &init_ipc_ns.msg_ctlmni,
 		.maxlen		= sizeof (init_ipc_ns.msg_ctlmni),
 		.mode		= 0644,
-		.proc_handler	= proc_ipc_dointvec,
-		.strategy	= sysctl_ipc_data,
+		.proc_handler	= proc_ipc_callback_dointvec,
+		.strategy	= sysctl_ipc_registered_data,
 	},
 	{
 		.ctl_name	= KERN_MSGMNB,
diff --git a/ipc/ipcns_notifier.c b/ipc/ipcns_notifier.c
new file mode 100644
index 0000000..70ff091
--- /dev/null
+++ b/ipc/ipcns_notifier.c
@@ -0,0 +1,82 @@
+/*
+ * linux/ipc/ipcns_notifier.c
+ * Copyright (C) 2007 BULL SA. Nadia Derbey
+ *
+ * Notification mechanism for ipc namespaces:
+ * The callback routine registered in the memory chain invokes the ipcns
+ * notifier chain with the IPCNS_MEMCHANGED event.
+ * Each callback routine registered in the ipcns namespace recomputes msgmni
+ * for the owning namespace.
+ */
+
+#include <linux/msg.h>
+#include <linux/rcupdate.h>
+#include <linux/notifier.h>
+#include <linux/nsproxy.h>
+#include <linux/ipc_namespace.h>
+
+#include "util.h"
+
+
+
+static BLOCKING_NOTIFIER_HEAD(ipcns_chain);
+
+
+static int ipcns_callback(struct notifier_block *self,
+				unsigned long action, void *arg)
+{
+	struct ipc_namespace *ns;
+
+	switch (action) {
+	case IPCNS_MEMCHANGED:   /* amount of lowmem has changed */
+	case IPCNS_CREATED:
+	case IPCNS_REMOVED:
+		/*
+		 * It's time to recompute msgmni
+		 */
+		ns = container_of(self, struct ipc_namespace, ipcns_nb);
+		/*
+		 * No need to get a reference on the ns: the 1st job of
+		 * free_ipc_ns() is to unregister the callback routine.
+		 * blocking_notifier_chain_unregister takes the wr lock to do
+		 * it.
+		 * When this callback routine is called the rd lock is held by
+		 * blocking_notifier_call_chain.
+		 * So the ipc ns cannot be freed while we are here.
+		 */
+		recompute_msgmni(ns);
+		break;
+	default:
+		break;
+	}
+
+	return NOTIFY_OK;
+}
+
+int register_ipcns_notifier(struct ipc_namespace *ns)
+{
+	memset(&ns->ipcns_nb, 0, sizeof(ns->ipcns_nb));
+	ns->ipcns_nb.notifier_call = ipcns_callback;
+	ns->ipcns_nb.priority = IPCNS_CALLBACK_PRI;
+	return blocking_notifier_chain_register(&ipcns_chain, &ns->ipcns_nb);
+}
+
+int cond_register_ipcns_notifier(struct ipc_namespace *ns)
+{
+	memset(&ns->ipcns_nb, 0, sizeof(ns->ipcns_nb));
+	ns->ipcns_nb.notifier_call = ipcns_callback;
+	ns->ipcns_nb.priority = IPCNS_CALLBACK_PRI;
+	return blocking_notifier_chain_cond_register(&ipcns_chain,
+							&ns->ipcns_nb);
+}
+
+int unregister_ipcns_notifier(struct ipc_namespace *ns)
+{
+	return blocking_notifier_chain_unregister(&ipcns_chain,
+						&ns->ipcns_nb);
+}
+
+int ipcns_notify(unsigned long val)
+{
+	return blocking_notifier_call_chain(&ipcns_chain, val, NULL);
+}
diff --git a/ipc/msg.c b/ipc/msg.c
index 46585a0..32494e8 100644
--- a/ipc/msg.c
+++ b/ipc/msg.c
@@ -27,6 +27,7 @@
 #include <linux/msg.h>
 #include <linux/spinlock.h>
 #include <linux/init.h>
+#include <linux/mm.h>
 #include <linux/proc_fs.h>
 #include <linux/list.h>
 #include <linux/security.h>
@@ -70,7 +71,6 @@
 #define msg_ids(ns)	((ns)->ids[IPC_MSG_IDS])
 
 #define msg_unlock(msq)		ipc_unlock(&(msq)->q_perm)
-#define msg_buildid(id, seq)	ipc_buildid(id, seq)
 
 static void freeque(struct ipc_namespace *, struct kern_ipc_perm *);
 static int newque(struct ipc_namespace *, struct ipc_params *);
@@ -78,11 +78,49 @@
 static int sysvipc_msg_proc_show(struct seq_file *s, void *it);
 #endif
 
+/*
+ * Scale msgmni with the available lowmem size: the memory dedicated to msg
+ * queues should occupy at most 1/MSG_MEM_SCALE of lowmem.
+ * Also take into account the number of nsproxies created so far.
+ * This should be done staying within the (MSGMNI , IPCMNI/nr_ipc_ns) range.
+ */
+void recompute_msgmni(struct ipc_namespace *ns)
+{
+	struct sysinfo i;
+	unsigned long allowed;
+	int nb_ns;
+
+	si_meminfo(&i);
+	allowed = (((i.totalram - i.totalhigh) / MSG_MEM_SCALE) * i.mem_unit)
+		/ MSGMNB;
+	nb_ns = atomic_read(&nr_ipc_ns);
+	allowed /= nb_ns;
+
+	if (allowed < MSGMNI) {
+		ns->msg_ctlmni = MSGMNI;
+		goto out_callback;
+	}
+
+	if (allowed > IPCMNI / nb_ns) {
+		ns->msg_ctlmni = IPCMNI / nb_ns;
+		goto out_callback;
+	}
+
+	ns->msg_ctlmni = allowed;
+
+out_callback:
+
+	printk(KERN_INFO "msgmni has been set to %d for ipc namespace %p\n",
+		ns->msg_ctlmni, ns);
+}
+
 void msg_init_ns(struct ipc_namespace *ns)
 {
 	ns->msg_ctlmax = MSGMAX;
 	ns->msg_ctlmnb = MSGMNB;
-	ns->msg_ctlmni = MSGMNI;
+
+	recompute_msgmni(ns);
+
 	atomic_set(&ns->msg_bytes, 0);
 	atomic_set(&ns->msg_hdrs, 0);
 	ipc_init_ids(&ns->ids[IPC_MSG_IDS]);
@@ -104,21 +142,6 @@
 }
 
 /*
- * This routine is called in the paths where the rw_mutex is held to protect
- * access to the idr tree.
- */
-static inline struct msg_queue *msg_lock_check_down(struct ipc_namespace *ns,
-						int id)
-{
-	struct kern_ipc_perm *ipcp = ipc_lock_check_down(&msg_ids(ns), id);
-
-	if (IS_ERR(ipcp))
-		return (struct msg_queue *)ipcp;
-
-	return container_of(ipcp, struct msg_queue, q_perm);
-}
-
-/*
  * msg_lock_(check_) routines are called in the paths where the rw_mutex
  * is not held.
  */
@@ -186,7 +209,6 @@
 		return id;
 	}
 
-	msq->q_perm.id = msg_buildid(id, msq->q_perm.seq);
 	msq->q_stime = msq->q_rtime = 0;
 	msq->q_ctime = get_seconds();
 	msq->q_cbytes = msq->q_qnum = 0;
@@ -324,19 +346,19 @@
 		out.msg_rtime		= in->msg_rtime;
 		out.msg_ctime		= in->msg_ctime;
 
-		if (in->msg_cbytes > USHRT_MAX)
-			out.msg_cbytes	= USHRT_MAX;
+		if (in->msg_cbytes > USHORT_MAX)
+			out.msg_cbytes	= USHORT_MAX;
 		else
 			out.msg_cbytes	= in->msg_cbytes;
 		out.msg_lcbytes		= in->msg_cbytes;
 
-		if (in->msg_qnum > USHRT_MAX)
-			out.msg_qnum	= USHRT_MAX;
+		if (in->msg_qnum > USHORT_MAX)
+			out.msg_qnum	= USHORT_MAX;
 		else
 			out.msg_qnum	= in->msg_qnum;
 
-		if (in->msg_qbytes > USHRT_MAX)
-			out.msg_qbytes	= USHRT_MAX;
+		if (in->msg_qbytes > USHORT_MAX)
+			out.msg_qbytes	= USHORT_MAX;
 		else
 			out.msg_qbytes	= in->msg_qbytes;
 		out.msg_lqbytes		= in->msg_qbytes;
@@ -351,31 +373,14 @@
 	}
 }
 
-struct msq_setbuf {
-	unsigned long	qbytes;
-	uid_t		uid;
-	gid_t		gid;
-	mode_t		mode;
-};
-
 static inline unsigned long
-copy_msqid_from_user(struct msq_setbuf *out, void __user *buf, int version)
+copy_msqid_from_user(struct msqid64_ds *out, void __user *buf, int version)
 {
 	switch(version) {
 	case IPC_64:
-	{
-		struct msqid64_ds tbuf;
-
-		if (copy_from_user(&tbuf, buf, sizeof(tbuf)))
+		if (copy_from_user(out, buf, sizeof(*out)))
 			return -EFAULT;
-
-		out->qbytes		= tbuf.msg_qbytes;
-		out->uid		= tbuf.msg_perm.uid;
-		out->gid		= tbuf.msg_perm.gid;
-		out->mode		= tbuf.msg_perm.mode;
-
 		return 0;
-	}
 	case IPC_OLD:
 	{
 		struct msqid_ds tbuf_old;
@@ -383,14 +388,14 @@
 		if (copy_from_user(&tbuf_old, buf, sizeof(tbuf_old)))
 			return -EFAULT;
 
-		out->uid		= tbuf_old.msg_perm.uid;
-		out->gid		= tbuf_old.msg_perm.gid;
-		out->mode		= tbuf_old.msg_perm.mode;
+		out->msg_perm.uid      	= tbuf_old.msg_perm.uid;
+		out->msg_perm.gid      	= tbuf_old.msg_perm.gid;
+		out->msg_perm.mode     	= tbuf_old.msg_perm.mode;
 
 		if (tbuf_old.msg_qbytes == 0)
-			out->qbytes	= tbuf_old.msg_lqbytes;
+			out->msg_qbytes	= tbuf_old.msg_lqbytes;
 		else
-			out->qbytes	= tbuf_old.msg_qbytes;
+			out->msg_qbytes	= tbuf_old.msg_qbytes;
 
 		return 0;
 	}
@@ -399,10 +404,71 @@
 	}
 }
 
-asmlinkage long sys_msgctl(int msqid, int cmd, struct msqid_ds __user *buf)
+/*
+ * This function handles some msgctl commands which require the rw_mutex
+ * to be held in write mode.
+ * NOTE: no locks must be held, the rw_mutex is taken inside this function.
+ */
+static int msgctl_down(struct ipc_namespace *ns, int msqid, int cmd,
+		       struct msqid_ds __user *buf, int version)
 {
 	struct kern_ipc_perm *ipcp;
-	struct msq_setbuf uninitialized_var(setbuf);
+	struct msqid64_ds msqid64;
+	struct msg_queue *msq;
+	int err;
+
+	if (cmd == IPC_SET) {
+		if (copy_msqid_from_user(&msqid64, buf, version))
+			return -EFAULT;
+	}
+
+	ipcp = ipcctl_pre_down(&msg_ids(ns), msqid, cmd,
+			       &msqid64.msg_perm, msqid64.msg_qbytes);
+	if (IS_ERR(ipcp))
+		return PTR_ERR(ipcp);
+
+	msq = container_of(ipcp, struct msg_queue, q_perm);
+
+	err = security_msg_queue_msgctl(msq, cmd);
+	if (err)
+		goto out_unlock;
+
+	switch (cmd) {
+	case IPC_RMID:
+		freeque(ns, ipcp);
+		goto out_up;
+	case IPC_SET:
+		if (msqid64.msg_qbytes > ns->msg_ctlmnb &&
+		    !capable(CAP_SYS_RESOURCE)) {
+			err = -EPERM;
+			goto out_unlock;
+		}
+
+		msq->q_qbytes = msqid64.msg_qbytes;
+
+		ipc_update_perm(&msqid64.msg_perm, ipcp);
+		msq->q_ctime = get_seconds();
+		/* sleeping receivers might be excluded by
+		 * stricter permissions.
+		 */
+		expunge_all(msq, -EAGAIN);
+		/* sleeping senders might be able to send
+		 * due to a larger queue size.
+		 */
+		ss_wakeup(&msq->q_senders, 0);
+		break;
+	default:
+		err = -EINVAL;
+	}
+out_unlock:
+	msg_unlock(msq);
+out_up:
+	up_write(&msg_ids(ns).rw_mutex);
+	return err;
+}
+
+asmlinkage long sys_msgctl(int msqid, int cmd, struct msqid_ds __user *buf)
+{
 	struct msg_queue *msq;
 	int err, version;
 	struct ipc_namespace *ns;
@@ -498,82 +564,13 @@
 		return success_return;
 	}
 	case IPC_SET:
-		if (!buf)
-			return -EFAULT;
-		if (copy_msqid_from_user(&setbuf, buf, version))
-			return -EFAULT;
-		break;
 	case IPC_RMID:
-		break;
+		err = msgctl_down(ns, msqid, cmd, buf, version);
+		return err;
 	default:
 		return  -EINVAL;
 	}
 
-	down_write(&msg_ids(ns).rw_mutex);
-	msq = msg_lock_check_down(ns, msqid);
-	if (IS_ERR(msq)) {
-		err = PTR_ERR(msq);
-		goto out_up;
-	}
-
-	ipcp = &msq->q_perm;
-
-	err = audit_ipc_obj(ipcp);
-	if (err)
-		goto out_unlock_up;
-	if (cmd == IPC_SET) {
-		err = audit_ipc_set_perm(setbuf.qbytes, setbuf.uid, setbuf.gid,
-					 setbuf.mode);
-		if (err)
-			goto out_unlock_up;
-	}
-
-	err = -EPERM;
-	if (current->euid != ipcp->cuid &&
-	    current->euid != ipcp->uid && !capable(CAP_SYS_ADMIN))
-		/* We _could_ check for CAP_CHOWN above, but we don't */
-		goto out_unlock_up;
-
-	err = security_msg_queue_msgctl(msq, cmd);
-	if (err)
-		goto out_unlock_up;
-
-	switch (cmd) {
-	case IPC_SET:
-	{
-		err = -EPERM;
-		if (setbuf.qbytes > ns->msg_ctlmnb && !capable(CAP_SYS_RESOURCE))
-			goto out_unlock_up;
-
-		msq->q_qbytes = setbuf.qbytes;
-
-		ipcp->uid = setbuf.uid;
-		ipcp->gid = setbuf.gid;
-		ipcp->mode = (ipcp->mode & ~S_IRWXUGO) |
-			     (S_IRWXUGO & setbuf.mode);
-		msq->q_ctime = get_seconds();
-		/* sleeping receivers might be excluded by
-		 * stricter permissions.
-		 */
-		expunge_all(msq, -EAGAIN);
-		/* sleeping senders might be able to send
-		 * due to a larger queue size.
-		 */
-		ss_wakeup(&msq->q_senders, 0);
-		msg_unlock(msq);
-		break;
-	}
-	case IPC_RMID:
-		freeque(ns, &msq->q_perm);
-		break;
-	}
-	err = 0;
-out_up:
-	up_write(&msg_ids(ns).rw_mutex);
-	return err;
-out_unlock_up:
-	msg_unlock(msq);
-	goto out_up;
 out_unlock:
 	msg_unlock(msq);
 	return err;
diff --git a/ipc/namespace.c b/ipc/namespace.c
index 1b96765..9171d94 100644
--- a/ipc/namespace.c
+++ b/ipc/namespace.c
@@ -20,10 +20,20 @@
 	if (ns == NULL)
 		return ERR_PTR(-ENOMEM);
 
+	atomic_inc(&nr_ipc_ns);
+
 	sem_init_ns(ns);
 	msg_init_ns(ns);
 	shm_init_ns(ns);
 
+	/*
+	 * msgmni has already been computed for the new ipc ns.
+	 * Thus, do the ipcns creation notification before registering that
+	 * new ipcns in the chain.
+	 */
+	ipcns_notify(IPCNS_CREATED);
+	register_ipcns_notifier(ns);
+
 	kref_init(&ns->kref);
 	return ns;
 }
@@ -79,8 +89,24 @@
 	struct ipc_namespace *ns;
 
 	ns = container_of(kref, struct ipc_namespace, kref);
+	/*
+	 * Unregistering the hotplug notifier at the beginning guarantees
+	 * that the ipc namespace won't be freed while we are inside the
+	 * callback routine. Since the blocking_notifier_chain_XXX routines
+	 * hold a rw lock on the notifier list, unregister_ipcns_notifier()
+	 * won't take the rw lock before blocking_notifier_call_chain() has
+	 * released the rd lock.
+	 */
+	unregister_ipcns_notifier(ns);
 	sem_exit_ns(ns);
 	msg_exit_ns(ns);
 	shm_exit_ns(ns);
 	kfree(ns);
+	atomic_dec(&nr_ipc_ns);
+
+	/*
+	 * Do the ipcns removal notification after decrementing nr_ipc_ns in
+	 * order to have a correct value when recomputing msgmni.
+	 */
+	ipcns_notify(IPCNS_REMOVED);
 }
diff --git a/ipc/sem.c b/ipc/sem.c
index 0b45a4d..e9418df 100644
--- a/ipc/sem.c
+++ b/ipc/sem.c
@@ -91,7 +91,6 @@
 
 #define sem_unlock(sma)		ipc_unlock(&(sma)->sem_perm)
 #define sem_checkid(sma, semid)	ipc_checkid(&sma->sem_perm, semid)
-#define sem_buildid(id, seq)	ipc_buildid(id, seq)
 
 static int newary(struct ipc_namespace *, struct ipc_params *);
 static void freeary(struct ipc_namespace *, struct kern_ipc_perm *);
@@ -142,21 +141,6 @@
 }
 
 /*
- * This routine is called in the paths where the rw_mutex is held to protect
- * access to the idr tree.
- */
-static inline struct sem_array *sem_lock_check_down(struct ipc_namespace *ns,
-						int id)
-{
-	struct kern_ipc_perm *ipcp = ipc_lock_check_down(&sem_ids(ns), id);
-
-	if (IS_ERR(ipcp))
-		return (struct sem_array *)ipcp;
-
-	return container_of(ipcp, struct sem_array, sem_perm);
-}
-
-/*
  * sem_lock_(check_) routines are called in the paths where the rw_mutex
  * is not held.
  */
@@ -181,6 +165,25 @@
 	return container_of(ipcp, struct sem_array, sem_perm);
 }
 
+static inline void sem_lock_and_putref(struct sem_array *sma)
+{
+	ipc_lock_by_ptr(&sma->sem_perm);
+	ipc_rcu_putref(sma);
+}
+
+static inline void sem_getref_and_unlock(struct sem_array *sma)
+{
+	ipc_rcu_getref(sma);
+	ipc_unlock(&(sma)->sem_perm);
+}
+
+static inline void sem_putref(struct sem_array *sma)
+{
+	ipc_lock_by_ptr(&sma->sem_perm);
+	ipc_rcu_putref(sma);
+	ipc_unlock(&(sma)->sem_perm);
+}
+
 static inline void sem_rmid(struct ipc_namespace *ns, struct sem_array *s)
 {
 	ipc_rmid(&sem_ids(ns), &s->sem_perm);
@@ -268,7 +271,6 @@
 	}
 	ns->used_sems += nsems;
 
-	sma->sem_perm.id = sem_buildid(id, sma->sem_perm.seq);
 	sma->sem_base = (struct sem *) &sma[1];
 	/* sma->sem_pending = NULL; */
 	sma->sem_pending_last = &sma->sem_pending;
@@ -700,19 +702,15 @@
 		int i;
 
 		if(nsems > SEMMSL_FAST) {
-			ipc_rcu_getref(sma);
-			sem_unlock(sma);			
+			sem_getref_and_unlock(sma);
 
 			sem_io = ipc_alloc(sizeof(ushort)*nsems);
 			if(sem_io == NULL) {
-				ipc_lock_by_ptr(&sma->sem_perm);
-				ipc_rcu_putref(sma);
-				sem_unlock(sma);
+				sem_putref(sma);
 				return -ENOMEM;
 			}
 
-			ipc_lock_by_ptr(&sma->sem_perm);
-			ipc_rcu_putref(sma);
+			sem_lock_and_putref(sma);
 			if (sma->sem_perm.deleted) {
 				sem_unlock(sma);
 				err = -EIDRM;
@@ -733,38 +731,30 @@
 		int i;
 		struct sem_undo *un;
 
-		ipc_rcu_getref(sma);
-		sem_unlock(sma);
+		sem_getref_and_unlock(sma);
 
 		if(nsems > SEMMSL_FAST) {
 			sem_io = ipc_alloc(sizeof(ushort)*nsems);
 			if(sem_io == NULL) {
-				ipc_lock_by_ptr(&sma->sem_perm);
-				ipc_rcu_putref(sma);
-				sem_unlock(sma);
+				sem_putref(sma);
 				return -ENOMEM;
 			}
 		}
 
 		if (copy_from_user (sem_io, arg.array, nsems*sizeof(ushort))) {
-			ipc_lock_by_ptr(&sma->sem_perm);
-			ipc_rcu_putref(sma);
-			sem_unlock(sma);
+			sem_putref(sma);
 			err = -EFAULT;
 			goto out_free;
 		}
 
 		for (i = 0; i < nsems; i++) {
 			if (sem_io[i] > SEMVMX) {
-				ipc_lock_by_ptr(&sma->sem_perm);
-				ipc_rcu_putref(sma);
-				sem_unlock(sma);
+				sem_putref(sma);
 				err = -ERANGE;
 				goto out_free;
 			}
 		}
-		ipc_lock_by_ptr(&sma->sem_perm);
-		ipc_rcu_putref(sma);
+		sem_lock_and_putref(sma);
 		if (sma->sem_perm.deleted) {
 			sem_unlock(sma);
 			err = -EIDRM;
@@ -830,28 +820,14 @@
 	return err;
 }
 
-struct sem_setbuf {
-	uid_t	uid;
-	gid_t	gid;
-	mode_t	mode;
-};
-
-static inline unsigned long copy_semid_from_user(struct sem_setbuf *out, void __user *buf, int version)
+static inline unsigned long
+copy_semid_from_user(struct semid64_ds *out, void __user *buf, int version)
 {
 	switch(version) {
 	case IPC_64:
-	    {
-		struct semid64_ds tbuf;
-
-		if(copy_from_user(&tbuf, buf, sizeof(tbuf)))
+		if (copy_from_user(out, buf, sizeof(*out)))
 			return -EFAULT;
-
-		out->uid	= tbuf.sem_perm.uid;
-		out->gid	= tbuf.sem_perm.gid;
-		out->mode	= tbuf.sem_perm.mode;
-
 		return 0;
-	    }
 	case IPC_OLD:
 	    {
 		struct semid_ds tbuf_old;
@@ -859,9 +835,9 @@
 		if(copy_from_user(&tbuf_old, buf, sizeof(tbuf_old)))
 			return -EFAULT;
 
-		out->uid	= tbuf_old.sem_perm.uid;
-		out->gid	= tbuf_old.sem_perm.gid;
-		out->mode	= tbuf_old.sem_perm.mode;
+		out->sem_perm.uid	= tbuf_old.sem_perm.uid;
+		out->sem_perm.gid	= tbuf_old.sem_perm.gid;
+		out->sem_perm.mode	= tbuf_old.sem_perm.mode;
 
 		return 0;
 	    }
@@ -870,38 +846,29 @@
 	}
 }
 
-static int semctl_down(struct ipc_namespace *ns, int semid, int semnum,
-		int cmd, int version, union semun arg)
+/*
+ * This function handles some semctl commands which require the rw_mutex
+ * to be held in write mode.
+ * NOTE: no locks must be held, the rw_mutex is taken inside this function.
+ */
+static int semctl_down(struct ipc_namespace *ns, int semid,
+		       int cmd, int version, union semun arg)
 {
 	struct sem_array *sma;
 	int err;
-	struct sem_setbuf uninitialized_var(setbuf);
+	struct semid64_ds semid64;
 	struct kern_ipc_perm *ipcp;
 
 	if(cmd == IPC_SET) {
-		if(copy_semid_from_user (&setbuf, arg.buf, version))
+		if (copy_semid_from_user(&semid64, arg.buf, version))
 			return -EFAULT;
 	}
-	sma = sem_lock_check_down(ns, semid);
-	if (IS_ERR(sma))
-		return PTR_ERR(sma);
 
-	ipcp = &sma->sem_perm;
+	ipcp = ipcctl_pre_down(&sem_ids(ns), semid, cmd, &semid64.sem_perm, 0);
+	if (IS_ERR(ipcp))
+		return PTR_ERR(ipcp);
 
-	err = audit_ipc_obj(ipcp);
-	if (err)
-		goto out_unlock;
-
-	if (cmd == IPC_SET) {
-		err = audit_ipc_set_perm(0, setbuf.uid, setbuf.gid, setbuf.mode);
-		if (err)
-			goto out_unlock;
-	}
-	if (current->euid != ipcp->cuid && 
-	    current->euid != ipcp->uid && !capable(CAP_SYS_ADMIN)) {
-	    	err=-EPERM;
-		goto out_unlock;
-	}
+	sma = container_of(ipcp, struct sem_array, sem_perm);
 
 	err = security_sem_semctl(sma, cmd);
 	if (err)
@@ -910,26 +877,19 @@
 	switch(cmd){
 	case IPC_RMID:
 		freeary(ns, ipcp);
-		err = 0;
-		break;
+		goto out_up;
 	case IPC_SET:
-		ipcp->uid = setbuf.uid;
-		ipcp->gid = setbuf.gid;
-		ipcp->mode = (ipcp->mode & ~S_IRWXUGO)
-				| (setbuf.mode & S_IRWXUGO);
+		ipc_update_perm(&semid64.sem_perm, ipcp);
 		sma->sem_ctime = get_seconds();
-		sem_unlock(sma);
-		err = 0;
 		break;
 	default:
-		sem_unlock(sma);
 		err = -EINVAL;
-		break;
 	}
-	return err;
 
 out_unlock:
 	sem_unlock(sma);
+out_up:
+	up_write(&sem_ids(ns).rw_mutex);
 	return err;
 }
 
@@ -963,9 +923,7 @@
 		return err;
 	case IPC_RMID:
 	case IPC_SET:
-		down_write(&sem_ids(ns).rw_mutex);
-		err = semctl_down(ns,semid,semnum,cmd,version,arg);
-		up_write(&sem_ids(ns).rw_mutex);
+		err = semctl_down(ns, semid, cmd, version, arg);
 		return err;
 	default:
 		return -EINVAL;
@@ -1044,14 +1002,11 @@
 		return ERR_PTR(PTR_ERR(sma));
 
 	nsems = sma->sem_nsems;
-	ipc_rcu_getref(sma);
-	sem_unlock(sma);
+	sem_getref_and_unlock(sma);
 
 	new = kzalloc(sizeof(struct sem_undo) + sizeof(short)*nsems, GFP_KERNEL);
 	if (!new) {
-		ipc_lock_by_ptr(&sma->sem_perm);
-		ipc_rcu_putref(sma);
-		sem_unlock(sma);
+		sem_putref(sma);
 		return ERR_PTR(-ENOMEM);
 	}
 	new->semadj = (short *) &new[1];
@@ -1062,13 +1017,10 @@
 	if (un) {
 		spin_unlock(&ulp->lock);
 		kfree(new);
-		ipc_lock_by_ptr(&sma->sem_perm);
-		ipc_rcu_putref(sma);
-		sem_unlock(sma);
+		sem_putref(sma);
 		goto out;
 	}
-	ipc_lock_by_ptr(&sma->sem_perm);
-	ipc_rcu_putref(sma);
+	sem_lock_and_putref(sma);
 	if (sma->sem_perm.deleted) {
 		sem_unlock(sma);
 		spin_unlock(&ulp->lock);
@@ -1298,6 +1250,7 @@
 	undo_list = tsk->sysvsem.undo_list;
 	if (!undo_list)
 		return;
+	tsk->sysvsem.undo_list = NULL;
 
 	if (!atomic_dec_and_test(&undo_list->refcnt))
 		return;
diff --git a/ipc/shm.c b/ipc/shm.c
index e636910..554429a 100644
--- a/ipc/shm.c
+++ b/ipc/shm.c
@@ -60,7 +60,6 @@
 
 #define shm_unlock(shp)			\
 	ipc_unlock(&(shp)->shm_perm)
-#define shm_buildid(id, seq)	ipc_buildid(id, seq)
 
 static int newseg(struct ipc_namespace *, struct ipc_params *);
 static void shm_open(struct vm_area_struct *vma);
@@ -127,18 +126,6 @@
 	return container_of(ipcp, struct shmid_kernel, shm_perm);
 }
 
-static inline struct shmid_kernel *shm_lock_check_down(
-						struct ipc_namespace *ns,
-						int id)
-{
-	struct kern_ipc_perm *ipcp = ipc_lock_check_down(&shm_ids(ns), id);
-
-	if (IS_ERR(ipcp))
-		return (struct shmid_kernel *)ipcp;
-
-	return container_of(ipcp, struct shmid_kernel, shm_perm);
-}
-
 /*
  * shm_lock_(check_) routines are called in the paths where the rw_mutex
  * is not held.
@@ -169,12 +156,6 @@
 	ipc_rmid(&shm_ids(ns), &s->shm_perm);
 }
 
-static inline int shm_addid(struct ipc_namespace *ns, struct shmid_kernel *shp)
-{
-	return ipc_addid(&shm_ids(ns), &shp->shm_perm, ns->shm_ctlmni);
-}
-
-
 
 /* This is called by fork, once for every shm attach. */
 static void shm_open(struct vm_area_struct *vma)
@@ -416,7 +397,7 @@
 	if (IS_ERR(file))
 		goto no_file;
 
-	id = shm_addid(ns, shp);
+	id = ipc_addid(&shm_ids(ns), &shp->shm_perm, ns->shm_ctlmni);
 	if (id < 0) {
 		error = id;
 		goto no_id;
@@ -428,7 +409,6 @@
 	shp->shm_ctim = get_seconds();
 	shp->shm_segsz = size;
 	shp->shm_nattch = 0;
-	shp->shm_perm.id = shm_buildid(id, shp->shm_perm.seq);
 	shp->shm_file = file;
 	/*
 	 * shmid gets reported as "inode#" in /proc/pid/maps.
@@ -519,28 +499,14 @@
 	}
 }
 
-struct shm_setbuf {
-	uid_t	uid;
-	gid_t	gid;
-	mode_t	mode;
-};	
-
-static inline unsigned long copy_shmid_from_user(struct shm_setbuf *out, void __user *buf, int version)
+static inline unsigned long
+copy_shmid_from_user(struct shmid64_ds *out, void __user *buf, int version)
 {
 	switch(version) {
 	case IPC_64:
-	    {
-		struct shmid64_ds tbuf;
-
-		if (copy_from_user(&tbuf, buf, sizeof(tbuf)))
+		if (copy_from_user(out, buf, sizeof(*out)))
 			return -EFAULT;
-
-		out->uid	= tbuf.shm_perm.uid;
-		out->gid	= tbuf.shm_perm.gid;
-		out->mode	= tbuf.shm_perm.mode;
-
 		return 0;
-	    }
 	case IPC_OLD:
 	    {
 		struct shmid_ds tbuf_old;
@@ -548,9 +514,9 @@
 		if (copy_from_user(&tbuf_old, buf, sizeof(tbuf_old)))
 			return -EFAULT;
 
-		out->uid	= tbuf_old.shm_perm.uid;
-		out->gid	= tbuf_old.shm_perm.gid;
-		out->mode	= tbuf_old.shm_perm.mode;
+		out->shm_perm.uid	= tbuf_old.shm_perm.uid;
+		out->shm_perm.gid	= tbuf_old.shm_perm.gid;
+		out->shm_perm.mode	= tbuf_old.shm_perm.mode;
 
 		return 0;
 	    }
@@ -624,9 +590,53 @@
 	}
 }
 
-asmlinkage long sys_shmctl (int shmid, int cmd, struct shmid_ds __user *buf)
+/*
+ * This function handles some shmctl commands which require the rw_mutex
+ * to be held in write mode.
+ * NOTE: no locks must be held, the rw_mutex is taken inside this function.
+ */
+static int shmctl_down(struct ipc_namespace *ns, int shmid, int cmd,
+		       struct shmid_ds __user *buf, int version)
 {
-	struct shm_setbuf setbuf;
+	struct kern_ipc_perm *ipcp;
+	struct shmid64_ds shmid64;
+	struct shmid_kernel *shp;
+	int err;
+
+	if (cmd == IPC_SET) {
+		if (copy_shmid_from_user(&shmid64, buf, version))
+			return -EFAULT;
+	}
+
+	ipcp = ipcctl_pre_down(&shm_ids(ns), shmid, cmd, &shmid64.shm_perm, 0);
+	if (IS_ERR(ipcp))
+		return PTR_ERR(ipcp);
+
+	shp = container_of(ipcp, struct shmid_kernel, shm_perm);
+
+	err = security_shm_shmctl(shp, cmd);
+	if (err)
+		goto out_unlock;
+	switch (cmd) {
+	case IPC_RMID:
+		do_shm_rmid(ns, ipcp);
+		goto out_up;
+	case IPC_SET:
+		ipc_update_perm(&shmid64.shm_perm, ipcp);
+		shp->shm_ctim = get_seconds();
+		break;
+	default:
+		err = -EINVAL;
+	}
+out_unlock:
+	shm_unlock(shp);
+out_up:
+	up_write(&shm_ids(ns).rw_mutex);
+	return err;
+}
+
+asmlinkage long sys_shmctl(int shmid, int cmd, struct shmid_ds __user *buf)
+{
 	struct shmid_kernel *shp;
 	int err, version;
 	struct ipc_namespace *ns;
@@ -783,97 +793,13 @@
 		goto out;
 	}
 	case IPC_RMID:
-	{
-		/*
-		 *	We cannot simply remove the file. The SVID states
-		 *	that the block remains until the last person
-		 *	detaches from it, then is deleted. A shmat() on
-		 *	an RMID segment is legal in older Linux and if 
-		 *	we change it apps break...
-		 *
-		 *	Instead we set a destroyed flag, and then blow
-		 *	the name away when the usage hits zero.
-		 */
-		down_write(&shm_ids(ns).rw_mutex);
-		shp = shm_lock_check_down(ns, shmid);
-		if (IS_ERR(shp)) {
-			err = PTR_ERR(shp);
-			goto out_up;
-		}
-
-		err = audit_ipc_obj(&(shp->shm_perm));
-		if (err)
-			goto out_unlock_up;
-
-		if (current->euid != shp->shm_perm.uid &&
-		    current->euid != shp->shm_perm.cuid && 
-		    !capable(CAP_SYS_ADMIN)) {
-			err=-EPERM;
-			goto out_unlock_up;
-		}
-
-		err = security_shm_shmctl(shp, cmd);
-		if (err)
-			goto out_unlock_up;
-
-		do_shm_rmid(ns, &shp->shm_perm);
-		up_write(&shm_ids(ns).rw_mutex);
-		goto out;
-	}
-
 	case IPC_SET:
-	{
-		if (!buf) {
-			err = -EFAULT;
-			goto out;
-		}
-
-		if (copy_shmid_from_user (&setbuf, buf, version)) {
-			err = -EFAULT;
-			goto out;
-		}
-		down_write(&shm_ids(ns).rw_mutex);
-		shp = shm_lock_check_down(ns, shmid);
-		if (IS_ERR(shp)) {
-			err = PTR_ERR(shp);
-			goto out_up;
-		}
-		err = audit_ipc_obj(&(shp->shm_perm));
-		if (err)
-			goto out_unlock_up;
-		err = audit_ipc_set_perm(0, setbuf.uid, setbuf.gid, setbuf.mode);
-		if (err)
-			goto out_unlock_up;
-		err=-EPERM;
-		if (current->euid != shp->shm_perm.uid &&
-		    current->euid != shp->shm_perm.cuid && 
-		    !capable(CAP_SYS_ADMIN)) {
-			goto out_unlock_up;
-		}
-
-		err = security_shm_shmctl(shp, cmd);
-		if (err)
-			goto out_unlock_up;
-		
-		shp->shm_perm.uid = setbuf.uid;
-		shp->shm_perm.gid = setbuf.gid;
-		shp->shm_perm.mode = (shp->shm_perm.mode & ~S_IRWXUGO)
-			| (setbuf.mode & S_IRWXUGO);
-		shp->shm_ctim = get_seconds();
-		break;
-	}
-
+		err = shmctl_down(ns, shmid, cmd, buf, version);
+		return err;
 	default:
-		err = -EINVAL;
-		goto out;
+		return -EINVAL;
 	}
 
-	err = 0;
-out_unlock_up:
-	shm_unlock(shp);
-out_up:
-	up_write(&shm_ids(ns).rw_mutex);
-	goto out;
 out_unlock:
 	shm_unlock(shp);
 out:
diff --git a/ipc/util.c b/ipc/util.c
index fd1b50d..3339177 100644
--- a/ipc/util.c
+++ b/ipc/util.c
@@ -33,6 +33,7 @@
 #include <linux/audit.h>
 #include <linux/nsproxy.h>
 #include <linux/rwsem.h>
+#include <linux/memory.h>
 #include <linux/ipc_namespace.h>
 
 #include <asm/unistd.h>
@@ -52,11 +53,57 @@
 	},
 };
 
+atomic_t nr_ipc_ns = ATOMIC_INIT(1);
+
+
+#ifdef CONFIG_MEMORY_HOTPLUG
+
+static void ipc_memory_notifier(struct work_struct *work)
+{
+	ipcns_notify(IPCNS_MEMCHANGED);
+}
+
+static DECLARE_WORK(ipc_memory_wq, ipc_memory_notifier);
+
+
+static int ipc_memory_callback(struct notifier_block *self,
+				unsigned long action, void *arg)
+{
+	switch (action) {
+	case MEM_ONLINE:    /* memory successfully brought online */
+	case MEM_OFFLINE:   /* or offline: it's time to recompute msgmni */
+		/*
+		 * This is done by invoking the ipcns notifier chain with the
+		 * IPC_MEMCHANGED event.
+		 * In order not to keep the lock on the hotplug memory chain
+		 * for too long, queue a work item that will, when waken up,
+		 * activate the ipcns notification chain.
+		 * No need to keep several ipc work items on the queue.
+		 */
+		if (!work_pending(&ipc_memory_wq))
+			schedule_work(&ipc_memory_wq);
+		break;
+	case MEM_GOING_ONLINE:
+	case MEM_GOING_OFFLINE:
+	case MEM_CANCEL_ONLINE:
+	case MEM_CANCEL_OFFLINE:
+	default:
+		break;
+	}
+
+	return NOTIFY_OK;
+}
+
+#endif /* CONFIG_MEMORY_HOTPLUG */
+
 /**
  *	ipc_init	-	initialise IPC subsystem
  *
  *	The various system5 IPC resources (semaphores, messages and shared
  *	memory) are initialised
+ *	A callback routine is registered into the memory hotplug notifier
+ *	chain: since msgmni scales to lowmem this callback routine will be
+ *	called upon successful memory add / remove to recompute msmgni.
  */
  
 static int __init ipc_init(void)
@@ -64,6 +111,8 @@
 	sem_init();
 	msg_init();
 	shm_init();
+	hotplug_memory_notifier(ipc_memory_callback, IPC_CALLBACK_PRI);
+	register_ipcns_notifier(&init_ipc_ns);
 	return 0;
 }
 __initcall(ipc_init);
@@ -84,8 +133,8 @@
 	ids->seq = 0;
 	{
 		int seq_limit = INT_MAX/SEQ_MULTIPLIER;
-		if(seq_limit > USHRT_MAX)
-			ids->seq_max = USHRT_MAX;
+		if (seq_limit > USHORT_MAX)
+			ids->seq_max = USHORT_MAX;
 		 else
 		 	ids->seq_max = seq_limit;
 	}
@@ -116,13 +165,12 @@
 	iface->ids	= ids;
 	iface->show	= show;
 
-	pde = create_proc_entry(path,
-				S_IRUGO,        /* world readable */
-				NULL            /* parent dir */);
-	if (pde) {
-		pde->data = iface;
-		pde->proc_fops = &sysvipc_proc_fops;
-	} else {
+	pde = proc_create_data(path,
+			       S_IRUGO,        /* world readable */
+			       NULL,           /* parent dir */
+			       &sysvipc_proc_fops,
+			       iface);
+	if (!pde) {
 		kfree(iface);
 	}
 }
@@ -231,6 +279,7 @@
 	if(ids->seq > ids->seq_max)
 		ids->seq = 0;
 
+	new->id = ipc_buildid(id, new->seq);
 	spin_lock_init(&new->lock);
 	new->deleted = 0;
 	rcu_read_lock();
@@ -761,6 +810,70 @@
 		return ipcget_public(ns, ids, ops, params);
 }
 
+/**
+ * ipc_update_perm - update the permissions of an IPC.
+ * @in:  the permission given as input.
+ * @out: the permission of the ipc to set.
+ */
+void ipc_update_perm(struct ipc64_perm *in, struct kern_ipc_perm *out)
+{
+	out->uid = in->uid;
+	out->gid = in->gid;
+	out->mode = (out->mode & ~S_IRWXUGO)
+		| (in->mode & S_IRWXUGO);
+}
+
+/**
+ * ipcctl_pre_down - retrieve an ipc and check permissions for some IPC_XXX cmd
+ * @ids:  the table of ids where to look for the ipc
+ * @id:   the id of the ipc to retrieve
+ * @cmd:  the cmd to check
+ * @perm: the permission to set
+ * @extra_perm: one extra permission parameter used by msq
+ *
+ * This function does some common audit and permissions check for some IPC_XXX
+ * cmd and is called from semctl_down, shmctl_down and msgctl_down.
+ * It must be called without any lock held and
+ *  - retrieves the ipc with the given id in the given table.
+ *  - performs some audit and permission check, depending on the given cmd
+ *  - returns the ipc with both ipc and rw_mutex locks held in case of success
+ *    or an err-code without any lock held otherwise.
+ */
+struct kern_ipc_perm *ipcctl_pre_down(struct ipc_ids *ids, int id, int cmd,
+				      struct ipc64_perm *perm, int extra_perm)
+{
+	struct kern_ipc_perm *ipcp;
+	int err;
+
+	down_write(&ids->rw_mutex);
+	ipcp = ipc_lock_check_down(ids, id);
+	if (IS_ERR(ipcp)) {
+		err = PTR_ERR(ipcp);
+		goto out_up;
+	}
+
+	err = audit_ipc_obj(ipcp);
+	if (err)
+		goto out_unlock;
+
+	if (cmd == IPC_SET) {
+		err = audit_ipc_set_perm(extra_perm, perm->uid,
+					 perm->gid, perm->mode);
+		if (err)
+			goto out_unlock;
+	}
+	if (current->euid == ipcp->cuid ||
+	    current->euid == ipcp->uid || capable(CAP_SYS_ADMIN))
+		return ipcp;
+
+	err = -EPERM;
+out_unlock:
+	ipc_unlock(ipcp);
+out_up:
+	up_write(&ids->rw_mutex);
+	return ERR_PTR(err);
+}
+
 #ifdef __ARCH_WANT_IPC_PARSE_VERSION
 
 
diff --git a/ipc/util.h b/ipc/util.h
index f37d160..cdb966a 100644
--- a/ipc/util.h
+++ b/ipc/util.h
@@ -12,7 +12,6 @@
 
 #include <linux/err.h>
 
-#define USHRT_MAX 0xffff
 #define SEQ_MULTIPLIER	(IPCMNI)
 
 void sem_init (void);
@@ -112,6 +111,9 @@
 
 void kernel_to_ipc64_perm(struct kern_ipc_perm *in, struct ipc64_perm *out);
 void ipc64_perm_to_ipc_perm(struct ipc64_perm *in, struct ipc_perm *out);
+void ipc_update_perm(struct ipc64_perm *in, struct kern_ipc_perm *out);
+struct kern_ipc_perm *ipcctl_pre_down(struct ipc_ids *ids, int id, int cmd,
+				      struct ipc64_perm *perm, int extra_perm);
 
 #if defined(__ia64__) || defined(__x86_64__) || defined(__hppa__) || defined(__XTENSA__)
   /* On IA-64, we always use the "64-bit version" of the IPC structures.  */ 
@@ -124,6 +126,8 @@
 extern struct msg_msg *load_msg(const void __user *src, int len);
 extern int store_msg(void __user *dest, struct msg_msg *msg, int len);
 
+extern void recompute_msgmni(struct ipc_namespace *);
+
 static inline int ipc_buildid(int id, int seq)
 {
 	return SEQ_MULTIPLIER * seq + id;
diff --git a/kernel/Makefile b/kernel/Makefile
index 6c5f081..188c432 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -11,7 +11,7 @@
 	    hrtimer.o rwsem.o nsproxy.o srcu.o semaphore.o \
 	    notifier.o ksysfs.o pm_qos_params.o
 
-obj-$(CONFIG_SYSCTL) += sysctl_check.o
+obj-$(CONFIG_SYSCTL_SYSCALL_CHECK) += sysctl_check.o
 obj-$(CONFIG_STACKTRACE) += stacktrace.o
 obj-y += time/
 obj-$(CONFIG_DEBUG_MUTEXES) += mutex-debug.o
diff --git a/kernel/auditfilter.c b/kernel/auditfilter.c
index 28fef6b..1343017 100644
--- a/kernel/auditfilter.c
+++ b/kernel/auditfilter.c
@@ -272,7 +272,7 @@
 		return -EINVAL;
 
 	watch = audit_init_watch(path);
-	if (unlikely(IS_ERR(watch)))
+	if (IS_ERR(watch))
 		return PTR_ERR(watch);
 
 	audit_get_watch(watch);
@@ -848,7 +848,7 @@
 		return ERR_PTR(-ENOMEM);
 
 	new = audit_init_watch(path);
-	if (unlikely(IS_ERR(new))) {
+	if (IS_ERR(new)) {
 		kfree(path);
 		goto out;
 	}
@@ -989,7 +989,7 @@
 			audit_set_auditable(current->audit_context);
 
 		nwatch = audit_dupe_watch(owatch);
-		if (unlikely(IS_ERR(nwatch))) {
+		if (IS_ERR(nwatch)) {
 			mutex_unlock(&audit_filter_mutex);
 			audit_panic("error updating watch, skipping");
 			return;
@@ -1004,7 +1004,7 @@
 			list_del_rcu(&oentry->list);
 
 			nentry = audit_dupe_rule(&oentry->rule, nwatch);
-			if (unlikely(IS_ERR(nentry)))
+			if (IS_ERR(nentry))
 				audit_panic("error updating watch, removing");
 			else {
 				int h = audit_hash_ino((u32)ino);
@@ -1785,7 +1785,7 @@
 			watch = entry->rule.watch;
 			tree = entry->rule.tree;
 			nentry = audit_dupe_rule(&entry->rule, watch);
-			if (unlikely(IS_ERR(nentry))) {
+			if (IS_ERR(nentry)) {
 				/* save the first error encountered for the
 				 * return value */
 				if (!err)
diff --git a/kernel/bounds.c b/kernel/bounds.c
index c3c5554..3c53013 100644
--- a/kernel/bounds.c
+++ b/kernel/bounds.c
@@ -8,11 +8,7 @@
 /* Include headers that define the enum constants of interest */
 #include <linux/page-flags.h>
 #include <linux/mmzone.h>
-
-#define DEFINE(sym, val) \
-        asm volatile("\n->" #sym " %0 " #val : : "i" (val))
-
-#define BLANK() asm volatile("\n->" : : )
+#include <linux/kbuild.h>
 
 void foo(void)
 {
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index 6d8de05..b9d467d 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -44,6 +44,7 @@
 #include <linux/kmod.h>
 #include <linux/delayacct.h>
 #include <linux/cgroupstats.h>
+#include <linux/hash.h>
 
 #include <asm/atomic.h>
 
@@ -118,17 +119,7 @@
  * be called.
  */
 static int need_forkexit_callback;
-
-/* bits in struct cgroup flags field */
-enum {
-	/* Control Group is dead */
-	CGRP_REMOVED,
-	/* Control Group has previously had a child cgroup or a task,
-	 * but no longer (only if CGRP_NOTIFY_ON_RELEASE is set) */
-	CGRP_RELEASABLE,
-	/* Control Group requires release notifications to userspace */
-	CGRP_NOTIFY_ON_RELEASE,
-};
+static int need_mm_owner_callback __read_mostly;
 
 /* convenient tests for these bits */
 inline int cgroup_is_removed(const struct cgroup *cgrp)
@@ -204,6 +195,27 @@
 static DEFINE_RWLOCK(css_set_lock);
 static int css_set_count;
 
+/* hash table for cgroup groups. This improves the performance to
+ * find an existing css_set */
+#define CSS_SET_HASH_BITS	7
+#define CSS_SET_TABLE_SIZE	(1 << CSS_SET_HASH_BITS)
+static struct hlist_head css_set_table[CSS_SET_TABLE_SIZE];
+
+static struct hlist_head *css_set_hash(struct cgroup_subsys_state *css[])
+{
+	int i;
+	int index;
+	unsigned long tmp = 0UL;
+
+	for (i = 0; i < CGROUP_SUBSYS_COUNT; i++)
+		tmp += (unsigned long)css[i];
+	tmp = (tmp >> 16) ^ tmp;
+
+	index = hash_long(tmp, CSS_SET_HASH_BITS);
+
+	return &css_set_table[index];
+}
+
 /* We don't maintain the lists running through each css_set to its
  * task until after the first call to cgroup_iter_start(). This
  * reduces the fork()/exit() overhead for people who have cgroups
@@ -230,7 +242,7 @@
 static void unlink_css_set(struct css_set *cg)
 {
 	write_lock(&css_set_lock);
-	list_del(&cg->list);
+	hlist_del(&cg->hlist);
 	css_set_count--;
 	while (!list_empty(&cg->cg_links)) {
 		struct cg_cgroup_link *link;
@@ -295,9 +307,7 @@
 /*
  * find_existing_css_set() is a helper for
  * find_css_set(), and checks to see whether an existing
- * css_set is suitable. This currently walks a linked-list for
- * simplicity; a later patch will use a hash table for better
- * performance
+ * css_set is suitable.
  *
  * oldcg: the cgroup group that we're using before the cgroup
  * transition
@@ -314,7 +324,9 @@
 {
 	int i;
 	struct cgroupfs_root *root = cgrp->root;
-	struct list_head *l = &init_css_set.list;
+	struct hlist_head *hhead;
+	struct hlist_node *node;
+	struct css_set *cg;
 
 	/* Built the set of subsystem state objects that we want to
 	 * see in the new css_set */
@@ -331,18 +343,13 @@
 		}
 	}
 
-	/* Look through existing cgroup groups to find one to reuse */
-	do {
-		struct css_set *cg =
-			list_entry(l, struct css_set, list);
-
+	hhead = css_set_hash(template);
+	hlist_for_each_entry(cg, node, hhead, hlist) {
 		if (!memcmp(template, cg->subsys, sizeof(cg->subsys))) {
 			/* All subsystems matched */
 			return cg;
 		}
-		/* Try the next cgroup group */
-		l = l->next;
-	} while (l != &init_css_set.list);
+	}
 
 	/* No existing cgroup group matched */
 	return NULL;
@@ -404,6 +411,8 @@
 	struct list_head tmp_cg_links;
 	struct cg_cgroup_link *link;
 
+	struct hlist_head *hhead;
+
 	/* First see if we already have a cgroup group that matches
 	 * the desired set */
 	write_lock(&css_set_lock);
@@ -428,6 +437,7 @@
 	kref_init(&res->ref);
 	INIT_LIST_HEAD(&res->cg_links);
 	INIT_LIST_HEAD(&res->tasks);
+	INIT_HLIST_NODE(&res->hlist);
 
 	/* Copy the set of subsystem state objects generated in
 	 * find_existing_css_set() */
@@ -467,9 +477,12 @@
 
 	BUG_ON(!list_empty(&tmp_cg_links));
 
-	/* Link this cgroup group into the list */
-	list_add(&res->list, &init_css_set.list);
 	css_set_count++;
+
+	/* Add this cgroup group to the hash table */
+	hhead = css_set_hash(res->subsys);
+	hlist_add_head(&res->hlist, hhead);
+
 	write_unlock(&css_set_lock);
 
 	return res;
@@ -948,7 +961,7 @@
 	int ret = 0;
 	struct super_block *sb;
 	struct cgroupfs_root *root;
-	struct list_head tmp_cg_links, *l;
+	struct list_head tmp_cg_links;
 	INIT_LIST_HEAD(&tmp_cg_links);
 
 	/* First find the desired set of subsystems */
@@ -990,6 +1003,7 @@
 		/* New superblock */
 		struct cgroup *cgrp = &root->top_cgroup;
 		struct inode *inode;
+		int i;
 
 		BUG_ON(sb->s_root != NULL);
 
@@ -1034,22 +1048,25 @@
 		/* Link the top cgroup in this hierarchy into all
 		 * the css_set objects */
 		write_lock(&css_set_lock);
-		l = &init_css_set.list;
-		do {
+		for (i = 0; i < CSS_SET_TABLE_SIZE; i++) {
+			struct hlist_head *hhead = &css_set_table[i];
+			struct hlist_node *node;
 			struct css_set *cg;
-			struct cg_cgroup_link *link;
-			cg = list_entry(l, struct css_set, list);
-			BUG_ON(list_empty(&tmp_cg_links));
-			link = list_entry(tmp_cg_links.next,
-					  struct cg_cgroup_link,
-					  cgrp_link_list);
-			list_del(&link->cgrp_link_list);
-			link->cg = cg;
-			list_add(&link->cgrp_link_list,
-				 &root->top_cgroup.css_sets);
-			list_add(&link->cg_link_list, &cg->cg_links);
-			l = l->next;
-		} while (l != &init_css_set.list);
+
+			hlist_for_each_entry(cg, node, hhead, hlist) {
+				struct cg_cgroup_link *link;
+
+				BUG_ON(list_empty(&tmp_cg_links));
+				link = list_entry(tmp_cg_links.next,
+						  struct cg_cgroup_link,
+						  cgrp_link_list);
+				list_del(&link->cgrp_link_list);
+				link->cg = cg;
+				list_add(&link->cgrp_link_list,
+					 &root->top_cgroup.css_sets);
+				list_add(&link->cg_link_list, &cg->cg_links);
+			}
+		}
 		write_unlock(&css_set_lock);
 
 		free_cg_links(&tmp_cg_links);
@@ -1307,18 +1324,16 @@
 	FILE_DIR,
 	FILE_TASKLIST,
 	FILE_NOTIFY_ON_RELEASE,
-	FILE_RELEASABLE,
 	FILE_RELEASE_AGENT,
 };
 
-static ssize_t cgroup_write_uint(struct cgroup *cgrp, struct cftype *cft,
-				 struct file *file,
-				 const char __user *userbuf,
-				 size_t nbytes, loff_t *unused_ppos)
+static ssize_t cgroup_write_X64(struct cgroup *cgrp, struct cftype *cft,
+				struct file *file,
+				const char __user *userbuf,
+				size_t nbytes, loff_t *unused_ppos)
 {
 	char buffer[64];
 	int retval = 0;
-	u64 val;
 	char *end;
 
 	if (!nbytes)
@@ -1329,16 +1344,18 @@
 		return -EFAULT;
 
 	buffer[nbytes] = 0;     /* nul-terminate */
-
-	/* strip newline if necessary */
-	if (nbytes && (buffer[nbytes-1] == '\n'))
-		buffer[nbytes-1] = 0;
-	val = simple_strtoull(buffer, &end, 0);
-	if (*end)
-		return -EINVAL;
-
-	/* Pass to subsystem */
-	retval = cft->write_uint(cgrp, cft, val);
+	strstrip(buffer);
+	if (cft->write_u64) {
+		u64 val = simple_strtoull(buffer, &end, 0);
+		if (*end)
+			return -EINVAL;
+		retval = cft->write_u64(cgrp, cft, val);
+	} else {
+		s64 val = simple_strtoll(buffer, &end, 0);
+		if (*end)
+			return -EINVAL;
+		retval = cft->write_s64(cgrp, cft, val);
+	}
 	if (!retval)
 		retval = nbytes;
 	return retval;
@@ -1419,23 +1436,39 @@
 		return -ENODEV;
 	if (cft->write)
 		return cft->write(cgrp, cft, file, buf, nbytes, ppos);
-	if (cft->write_uint)
-		return cgroup_write_uint(cgrp, cft, file, buf, nbytes, ppos);
+	if (cft->write_u64 || cft->write_s64)
+		return cgroup_write_X64(cgrp, cft, file, buf, nbytes, ppos);
+	if (cft->trigger) {
+		int ret = cft->trigger(cgrp, (unsigned int)cft->private);
+		return ret ? ret : nbytes;
+	}
 	return -EINVAL;
 }
 
-static ssize_t cgroup_read_uint(struct cgroup *cgrp, struct cftype *cft,
-				   struct file *file,
-				   char __user *buf, size_t nbytes,
-				   loff_t *ppos)
+static ssize_t cgroup_read_u64(struct cgroup *cgrp, struct cftype *cft,
+			       struct file *file,
+			       char __user *buf, size_t nbytes,
+			       loff_t *ppos)
 {
 	char tmp[64];
-	u64 val = cft->read_uint(cgrp, cft);
+	u64 val = cft->read_u64(cgrp, cft);
 	int len = sprintf(tmp, "%llu\n", (unsigned long long) val);
 
 	return simple_read_from_buffer(buf, nbytes, ppos, tmp, len);
 }
 
+static ssize_t cgroup_read_s64(struct cgroup *cgrp, struct cftype *cft,
+			       struct file *file,
+			       char __user *buf, size_t nbytes,
+			       loff_t *ppos)
+{
+	char tmp[64];
+	s64 val = cft->read_s64(cgrp, cft);
+	int len = sprintf(tmp, "%lld\n", (long long) val);
+
+	return simple_read_from_buffer(buf, nbytes, ppos, tmp, len);
+}
+
 static ssize_t cgroup_common_file_read(struct cgroup *cgrp,
 					  struct cftype *cft,
 					  struct file *file,
@@ -1490,11 +1523,56 @@
 
 	if (cft->read)
 		return cft->read(cgrp, cft, file, buf, nbytes, ppos);
-	if (cft->read_uint)
-		return cgroup_read_uint(cgrp, cft, file, buf, nbytes, ppos);
+	if (cft->read_u64)
+		return cgroup_read_u64(cgrp, cft, file, buf, nbytes, ppos);
+	if (cft->read_s64)
+		return cgroup_read_s64(cgrp, cft, file, buf, nbytes, ppos);
 	return -EINVAL;
 }
 
+/*
+ * seqfile ops/methods for returning structured data. Currently just
+ * supports string->u64 maps, but can be extended in future.
+ */
+
+struct cgroup_seqfile_state {
+	struct cftype *cft;
+	struct cgroup *cgroup;
+};
+
+static int cgroup_map_add(struct cgroup_map_cb *cb, const char *key, u64 value)
+{
+	struct seq_file *sf = cb->state;
+	return seq_printf(sf, "%s %llu\n", key, (unsigned long long)value);
+}
+
+static int cgroup_seqfile_show(struct seq_file *m, void *arg)
+{
+	struct cgroup_seqfile_state *state = m->private;
+	struct cftype *cft = state->cft;
+	if (cft->read_map) {
+		struct cgroup_map_cb cb = {
+			.fill = cgroup_map_add,
+			.state = m,
+		};
+		return cft->read_map(state->cgroup, cft, &cb);
+	}
+	return cft->read_seq_string(state->cgroup, cft, m);
+}
+
+int cgroup_seqfile_release(struct inode *inode, struct file *file)
+{
+	struct seq_file *seq = file->private_data;
+	kfree(seq->private);
+	return single_release(inode, file);
+}
+
+static struct file_operations cgroup_seqfile_operations = {
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = cgroup_seqfile_release,
+};
+
 static int cgroup_file_open(struct inode *inode, struct file *file)
 {
 	int err;
@@ -1507,7 +1585,18 @@
 	cft = __d_cft(file->f_dentry);
 	if (!cft)
 		return -ENODEV;
-	if (cft->open)
+	if (cft->read_map || cft->read_seq_string) {
+		struct cgroup_seqfile_state *state =
+			kzalloc(sizeof(*state), GFP_USER);
+		if (!state)
+			return -ENOMEM;
+		state->cft = cft;
+		state->cgroup = __d_cgrp(file->f_dentry->d_parent);
+		file->f_op = &cgroup_seqfile_operations;
+		err = single_open(file, cgroup_seqfile_show, state);
+		if (err < 0)
+			kfree(state);
+	} else if (cft->open)
 		err = cft->open(inode, file);
 	else
 		err = 0;
@@ -1715,7 +1804,7 @@
  * The tasklist_lock is not held here, as do_each_thread() and
  * while_each_thread() are protected by RCU.
  */
-void cgroup_enable_task_cg_lists(void)
+static void cgroup_enable_task_cg_lists(void)
 {
 	struct task_struct *p, *g;
 	write_lock(&css_set_lock);
@@ -1913,14 +2002,14 @@
 
 	if (heap->size) {
 		for (i = 0; i < heap->size; i++) {
-			struct task_struct *p = heap->ptrs[i];
+			struct task_struct *q = heap->ptrs[i];
 			if (i == 0) {
-				latest_time = p->start_time;
-				latest_task = p;
+				latest_time = q->start_time;
+				latest_task = q;
 			}
 			/* Process the task per the caller's callback */
-			scan->process_task(p, scan);
-			put_task_struct(p);
+			scan->process_task(q, scan);
+			put_task_struct(q);
 		}
 		/*
 		 * If we had to process any tasks at all, scan again
@@ -2138,11 +2227,6 @@
 	return notify_on_release(cgrp);
 }
 
-static u64 cgroup_read_releasable(struct cgroup *cgrp, struct cftype *cft)
-{
-	return test_bit(CGRP_RELEASABLE, &cgrp->flags);
-}
-
 /*
  * for the common functions, 'private' gives the type of file
  */
@@ -2158,16 +2242,10 @@
 
 	{
 		.name = "notify_on_release",
-		.read_uint = cgroup_read_notify_on_release,
+		.read_u64 = cgroup_read_notify_on_release,
 		.write = cgroup_common_file_write,
 		.private = FILE_NOTIFY_ON_RELEASE,
 	},
-
-	{
-		.name = "releasable",
-		.read_uint = cgroup_read_releasable,
-		.private = FILE_RELEASABLE,
-	}
 };
 
 static struct cftype cft_release_agent = {
@@ -2401,10 +2479,9 @@
 	return 0;
 }
 
-static void cgroup_init_subsys(struct cgroup_subsys *ss)
+static void __init cgroup_init_subsys(struct cgroup_subsys *ss)
 {
 	struct cgroup_subsys_state *css;
-	struct list_head *l;
 
 	printk(KERN_INFO "Initializing cgroup subsys %s\n", ss->name);
 
@@ -2415,34 +2492,19 @@
 	BUG_ON(IS_ERR(css));
 	init_cgroup_css(css, ss, dummytop);
 
-	/* Update all cgroup groups to contain a subsys
+	/* Update the init_css_set to contain a subsys
 	 * pointer to this state - since the subsystem is
-	 * newly registered, all tasks and hence all cgroup
-	 * groups are in the subsystem's top cgroup. */
-	write_lock(&css_set_lock);
-	l = &init_css_set.list;
-	do {
-		struct css_set *cg =
-			list_entry(l, struct css_set, list);
-		cg->subsys[ss->subsys_id] = dummytop->subsys[ss->subsys_id];
-		l = l->next;
-	} while (l != &init_css_set.list);
-	write_unlock(&css_set_lock);
-
- 	/* If this subsystem requested that it be notified with fork
- 	 * events, we should send it one now for every process in the
- 	 * system */
-	if (ss->fork) {
-		struct task_struct *g, *p;
-
-		read_lock(&tasklist_lock);
-		do_each_thread(g, p) {
-			ss->fork(ss, p);
-		} while_each_thread(g, p);
-		read_unlock(&tasklist_lock);
-	}
+	 * newly registered, all tasks and hence the
+	 * init_css_set is in the subsystem's top cgroup. */
+	init_css_set.subsys[ss->subsys_id] = dummytop->subsys[ss->subsys_id];
 
 	need_forkexit_callback |= ss->fork || ss->exit;
+	need_mm_owner_callback |= !!ss->mm_owner_changed;
+
+	/* At system boot, before all subsystems have been
+	 * registered, no tasks have been forked, so we don't
+	 * need to invoke fork callbacks here. */
+	BUG_ON(!list_empty(&init_task.tasks));
 
 	ss->active = 1;
 }
@@ -2458,9 +2520,9 @@
 	int i;
 	kref_init(&init_css_set.ref);
 	kref_get(&init_css_set.ref);
-	INIT_LIST_HEAD(&init_css_set.list);
 	INIT_LIST_HEAD(&init_css_set.cg_links);
 	INIT_LIST_HEAD(&init_css_set.tasks);
+	INIT_HLIST_NODE(&init_css_set.hlist);
 	css_set_count = 1;
 	init_cgroup_root(&rootnode);
 	list_add(&rootnode.root_list, &roots);
@@ -2473,6 +2535,9 @@
 	list_add(&init_css_set_link.cg_link_list,
 		 &init_css_set.cg_links);
 
+	for (i = 0; i < CSS_SET_TABLE_SIZE; i++)
+		INIT_HLIST_HEAD(&css_set_table[i]);
+
 	for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
 		struct cgroup_subsys *ss = subsys[i];
 
@@ -2502,7 +2567,7 @@
 {
 	int err;
 	int i;
-	struct proc_dir_entry *entry;
+	struct hlist_head *hhead;
 
 	err = bdi_init(&cgroup_backing_dev_info);
 	if (err)
@@ -2514,13 +2579,15 @@
 			cgroup_init_subsys(ss);
 	}
 
+	/* Add init_css_set to the hash table */
+	hhead = css_set_hash(init_css_set.subsys);
+	hlist_add_head(&init_css_set.hlist, hhead);
+
 	err = register_filesystem(&cgroup_fs_type);
 	if (err < 0)
 		goto out;
 
-	entry = create_proc_entry("cgroups", 0, NULL);
-	if (entry)
-		entry->proc_fops = &proc_cgroupstats_operations;
+	proc_create("cgroups", 0, NULL, &proc_cgroupstats_operations);
 
 out:
 	if (err)
@@ -2683,6 +2750,34 @@
 	}
 }
 
+#ifdef CONFIG_MM_OWNER
+/**
+ * cgroup_mm_owner_callbacks - run callbacks when the mm->owner changes
+ * @p: the new owner
+ *
+ * Called on every change to mm->owner. mm_init_owner() does not
+ * invoke this routine, since it assigns the mm->owner the first time
+ * and does not change it.
+ */
+void cgroup_mm_owner_callbacks(struct task_struct *old, struct task_struct *new)
+{
+	struct cgroup *oldcgrp, *newcgrp;
+
+	if (need_mm_owner_callback) {
+		int i;
+		for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
+			struct cgroup_subsys *ss = subsys[i];
+			oldcgrp = task_cgroup(old, ss->subsys_id);
+			newcgrp = task_cgroup(new, ss->subsys_id);
+			if (oldcgrp == newcgrp)
+				continue;
+			if (ss->mm_owner_changed)
+				ss->mm_owner_changed(ss, oldcgrp, newcgrp);
+		}
+	}
+}
+#endif /* CONFIG_MM_OWNER */
+
 /**
  * cgroup_post_fork - called on a new task after adding it to the task list
  * @child: the task in question
diff --git a/kernel/cgroup_debug.c b/kernel/cgroup_debug.c
index 37301e8..c3dc3ab 100644
--- a/kernel/cgroup_debug.c
+++ b/kernel/cgroup_debug.c
@@ -1,5 +1,5 @@
 /*
- * kernel/ccontainer_debug.c - Example cgroup subsystem that
+ * kernel/cgroup_debug.c - Example cgroup subsystem that
  * exposes debug info
  *
  * Copyright (C) Google Inc, 2007
@@ -62,25 +62,35 @@
 	return count;
 }
 
+static u64 releasable_read(struct cgroup *cgrp, struct cftype *cft)
+{
+	return test_bit(CGRP_RELEASABLE, &cgrp->flags);
+}
+
 static struct cftype files[] =  {
 	{
 		.name = "cgroup_refcount",
-		.read_uint = cgroup_refcount_read,
+		.read_u64 = cgroup_refcount_read,
 	},
 	{
 		.name = "taskcount",
-		.read_uint = taskcount_read,
+		.read_u64 = taskcount_read,
 	},
 
 	{
 		.name = "current_css_set",
-		.read_uint = current_css_set_read,
+		.read_u64 = current_css_set_read,
 	},
 
 	{
 		.name = "current_css_set_refcount",
-		.read_uint = current_css_set_refcount_read,
+		.read_u64 = current_css_set_refcount_read,
 	},
+
+	{
+		.name = "releasable",
+		.read_u64 = releasable_read,
+	}
 };
 
 static int debug_populate(struct cgroup_subsys *ss, struct cgroup *cont)
diff --git a/kernel/configs.c b/kernel/configs.c
index e84d3f9..4c34521 100644
--- a/kernel/configs.c
+++ b/kernel/configs.c
@@ -79,12 +79,11 @@
 	struct proc_dir_entry *entry;
 
 	/* create the current config file */
-	entry = create_proc_entry("config.gz", S_IFREG | S_IRUGO,
-				  &proc_root);
+	entry = proc_create("config.gz", S_IFREG | S_IRUGO, NULL,
+			    &ikconfig_file_ops);
 	if (!entry)
 		return -ENOMEM;
 
-	entry->proc_fops = &ikconfig_file_ops;
 	entry->size = kernel_config_data_size;
 
 	return 0;
@@ -95,7 +94,7 @@
 
 static void __exit ikconfig_cleanup(void)
 {
-	remove_proc_entry("config.gz", &proc_root);
+	remove_proc_entry("config.gz", NULL);
 }
 
 module_init(ikconfig_init);
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 2011ad8..a98f6ab 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -33,17 +33,13 @@
 	 * an ongoing cpu hotplug operation.
 	 */
 	int refcount;
-	wait_queue_head_t writer_queue;
 } cpu_hotplug;
 
-#define writer_exists() (cpu_hotplug.active_writer != NULL)
-
 void __init cpu_hotplug_init(void)
 {
 	cpu_hotplug.active_writer = NULL;
 	mutex_init(&cpu_hotplug.lock);
 	cpu_hotplug.refcount = 0;
-	init_waitqueue_head(&cpu_hotplug.writer_queue);
 }
 
 #ifdef CONFIG_HOTPLUG_CPU
@@ -65,11 +61,8 @@
 	if (cpu_hotplug.active_writer == current)
 		return;
 	mutex_lock(&cpu_hotplug.lock);
-	cpu_hotplug.refcount--;
-
-	if (unlikely(writer_exists()) && !cpu_hotplug.refcount)
-		wake_up(&cpu_hotplug.writer_queue);
-
+	if (!--cpu_hotplug.refcount && unlikely(cpu_hotplug.active_writer))
+		wake_up_process(cpu_hotplug.active_writer);
 	mutex_unlock(&cpu_hotplug.lock);
 
 }
@@ -98,8 +91,8 @@
  * Note that during a cpu-hotplug operation, the new readers, if any,
  * will be blocked by the cpu_hotplug.lock
  *
- * Since cpu_maps_update_begin is always called after invoking
- * cpu_maps_update_begin, we can be sure that only one writer is active.
+ * Since cpu_hotplug_begin() is always called after invoking
+ * cpu_maps_update_begin(), we can be sure that only one writer is active.
  *
  * Note that theoretically, there is a possibility of a livelock:
  * - Refcount goes to zero, last reader wakes up the sleeping
@@ -115,19 +108,16 @@
  */
 static void cpu_hotplug_begin(void)
 {
-	DECLARE_WAITQUEUE(wait, current);
-
-	mutex_lock(&cpu_hotplug.lock);
-
 	cpu_hotplug.active_writer = current;
-	add_wait_queue_exclusive(&cpu_hotplug.writer_queue, &wait);
-	while (cpu_hotplug.refcount) {
-		set_current_state(TASK_UNINTERRUPTIBLE);
+
+	for (;;) {
+		mutex_lock(&cpu_hotplug.lock);
+		if (likely(!cpu_hotplug.refcount))
+			break;
+		__set_current_state(TASK_UNINTERRUPTIBLE);
 		mutex_unlock(&cpu_hotplug.lock);
 		schedule();
-		mutex_lock(&cpu_hotplug.lock);
 	}
-	remove_wait_queue_locked(&cpu_hotplug.writer_queue, &wait);
 }
 
 static void cpu_hotplug_done(void)
@@ -136,7 +126,7 @@
 	mutex_unlock(&cpu_hotplug.lock);
 }
 /* Need to know about CPUs going up/down? */
-int __cpuinit register_cpu_notifier(struct notifier_block *nb)
+int __ref register_cpu_notifier(struct notifier_block *nb)
 {
 	int ret;
 	cpu_maps_update_begin();
@@ -149,7 +139,7 @@
 
 EXPORT_SYMBOL(register_cpu_notifier);
 
-void unregister_cpu_notifier(struct notifier_block *nb)
+void __ref unregister_cpu_notifier(struct notifier_block *nb)
 {
 	cpu_maps_update_begin();
 	raw_notifier_chain_unregister(&cpu_chain, nb);
@@ -180,7 +170,7 @@
 };
 
 /* Take this CPU down. */
-static int take_cpu_down(void *_param)
+static int __ref take_cpu_down(void *_param)
 {
 	struct take_cpu_down_param *param = _param;
 	int err;
@@ -199,7 +189,7 @@
 }
 
 /* Requires cpu_add_remove_lock to be held */
-static int _cpu_down(unsigned int cpu, int tasks_frozen)
+static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
 {
 	int err, nr_calls = 0;
 	struct task_struct *p;
@@ -274,7 +264,7 @@
 	return err;
 }
 
-int cpu_down(unsigned int cpu)
+int __ref cpu_down(unsigned int cpu)
 {
 	int err = 0;
 
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index 024888b..8da627d 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -127,6 +127,7 @@
 typedef enum {
 	CS_CPU_EXCLUSIVE,
 	CS_MEM_EXCLUSIVE,
+	CS_MEM_HARDWALL,
 	CS_MEMORY_MIGRATE,
 	CS_SCHED_LOAD_BALANCE,
 	CS_SPREAD_PAGE,
@@ -144,6 +145,11 @@
 	return test_bit(CS_MEM_EXCLUSIVE, &cs->flags);
 }
 
+static inline int is_mem_hardwall(const struct cpuset *cs)
+{
+	return test_bit(CS_MEM_HARDWALL, &cs->flags);
+}
+
 static inline int is_sched_load_balance(const struct cpuset *cs)
 {
 	return test_bit(CS_SCHED_LOAD_BALANCE, &cs->flags);
@@ -735,7 +741,8 @@
  * Return nonzero if this tasks's cpus_allowed mask should be changed (in other
  * words, if its mask is not equal to its cpuset's mask).
  */
-int cpuset_test_cpumask(struct task_struct *tsk, struct cgroup_scanner *scan)
+static int cpuset_test_cpumask(struct task_struct *tsk,
+			       struct cgroup_scanner *scan)
 {
 	return !cpus_equal(tsk->cpus_allowed,
 			(cgroup_cs(scan->cg))->cpus_allowed);
@@ -752,7 +759,8 @@
  * We don't need to re-check for the cgroup/cpuset membership, since we're
  * holding cgroup_lock() at this point.
  */
-void cpuset_change_cpumask(struct task_struct *tsk, struct cgroup_scanner *scan)
+static void cpuset_change_cpumask(struct task_struct *tsk,
+				  struct cgroup_scanner *scan)
 {
 	set_cpus_allowed_ptr(tsk, &((cgroup_cs(scan->cg))->cpus_allowed));
 }
@@ -1023,19 +1031,6 @@
 	return task_cs(current) == cpuset_being_rebound;
 }
 
-/*
- * Call with cgroup_mutex held.
- */
-
-static int update_memory_pressure_enabled(struct cpuset *cs, char *buf)
-{
-	if (simple_strtoul(buf, NULL, 10) != 0)
-		cpuset_memory_pressure_enabled = 1;
-	else
-		cpuset_memory_pressure_enabled = 0;
-	return 0;
-}
-
 static int update_relax_domain_level(struct cpuset *cs, char *buf)
 {
 	int val = simple_strtol(buf, NULL, 10);
@@ -1053,25 +1048,20 @@
 
 /*
  * update_flag - read a 0 or a 1 in a file and update associated flag
- * bit:	the bit to update (CS_CPU_EXCLUSIVE, CS_MEM_EXCLUSIVE,
- *				CS_SCHED_LOAD_BALANCE,
- *				CS_NOTIFY_ON_RELEASE, CS_MEMORY_MIGRATE,
- *				CS_SPREAD_PAGE, CS_SPREAD_SLAB)
- * cs:	the cpuset to update
- * buf:	the buffer where we read the 0 or 1
+ * bit:		the bit to update (see cpuset_flagbits_t)
+ * cs:		the cpuset to update
+ * turning_on: 	whether the flag is being set or cleared
  *
  * Call with cgroup_mutex held.
  */
 
-static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs, char *buf)
+static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs,
+		       int turning_on)
 {
-	int turning_on;
 	struct cpuset trialcs;
 	int err;
 	int cpus_nonempty, balance_flag_changed;
 
-	turning_on = (simple_strtoul(buf, NULL, 10) != 0);
-
 	trialcs = *cs;
 	if (turning_on)
 		set_bit(bit, &trialcs.flags);
@@ -1241,6 +1231,7 @@
 	FILE_MEMLIST,
 	FILE_CPU_EXCLUSIVE,
 	FILE_MEM_EXCLUSIVE,
+	FILE_MEM_HARDWALL,
 	FILE_SCHED_LOAD_BALANCE,
 	FILE_SCHED_RELAX_DOMAIN_LEVEL,
 	FILE_MEMORY_PRESSURE_ENABLED,
@@ -1265,7 +1256,8 @@
 		return -E2BIG;
 
 	/* +1 for nul-terminator */
-	if ((buffer = kmalloc(nbytes + 1, GFP_KERNEL)) == 0)
+	buffer = kmalloc(nbytes + 1, GFP_KERNEL);
+	if (!buffer)
 		return -ENOMEM;
 
 	if (copy_from_user(buffer, userbuf, nbytes)) {
@@ -1288,35 +1280,9 @@
 	case FILE_MEMLIST:
 		retval = update_nodemask(cs, buffer);
 		break;
-	case FILE_CPU_EXCLUSIVE:
-		retval = update_flag(CS_CPU_EXCLUSIVE, cs, buffer);
-		break;
-	case FILE_MEM_EXCLUSIVE:
-		retval = update_flag(CS_MEM_EXCLUSIVE, cs, buffer);
-		break;
-	case FILE_SCHED_LOAD_BALANCE:
-		retval = update_flag(CS_SCHED_LOAD_BALANCE, cs, buffer);
-		break;
 	case FILE_SCHED_RELAX_DOMAIN_LEVEL:
 		retval = update_relax_domain_level(cs, buffer);
 		break;
-	case FILE_MEMORY_MIGRATE:
-		retval = update_flag(CS_MEMORY_MIGRATE, cs, buffer);
-		break;
-	case FILE_MEMORY_PRESSURE_ENABLED:
-		retval = update_memory_pressure_enabled(cs, buffer);
-		break;
-	case FILE_MEMORY_PRESSURE:
-		retval = -EACCES;
-		break;
-	case FILE_SPREAD_PAGE:
-		retval = update_flag(CS_SPREAD_PAGE, cs, buffer);
-		cs->mems_generation = cpuset_mems_generation++;
-		break;
-	case FILE_SPREAD_SLAB:
-		retval = update_flag(CS_SPREAD_SLAB, cs, buffer);
-		cs->mems_generation = cpuset_mems_generation++;
-		break;
 	default:
 		retval = -EINVAL;
 		goto out2;
@@ -1331,6 +1297,57 @@
 	return retval;
 }
 
+static int cpuset_write_u64(struct cgroup *cgrp, struct cftype *cft, u64 val)
+{
+	int retval = 0;
+	struct cpuset *cs = cgroup_cs(cgrp);
+	cpuset_filetype_t type = cft->private;
+
+	cgroup_lock();
+
+	if (cgroup_is_removed(cgrp)) {
+		cgroup_unlock();
+		return -ENODEV;
+	}
+
+	switch (type) {
+	case FILE_CPU_EXCLUSIVE:
+		retval = update_flag(CS_CPU_EXCLUSIVE, cs, val);
+		break;
+	case FILE_MEM_EXCLUSIVE:
+		retval = update_flag(CS_MEM_EXCLUSIVE, cs, val);
+		break;
+	case FILE_MEM_HARDWALL:
+		retval = update_flag(CS_MEM_HARDWALL, cs, val);
+		break;
+	case FILE_SCHED_LOAD_BALANCE:
+		retval = update_flag(CS_SCHED_LOAD_BALANCE, cs, val);
+		break;
+	case FILE_MEMORY_MIGRATE:
+		retval = update_flag(CS_MEMORY_MIGRATE, cs, val);
+		break;
+	case FILE_MEMORY_PRESSURE_ENABLED:
+		cpuset_memory_pressure_enabled = !!val;
+		break;
+	case FILE_MEMORY_PRESSURE:
+		retval = -EACCES;
+		break;
+	case FILE_SPREAD_PAGE:
+		retval = update_flag(CS_SPREAD_PAGE, cs, val);
+		cs->mems_generation = cpuset_mems_generation++;
+		break;
+	case FILE_SPREAD_SLAB:
+		retval = update_flag(CS_SPREAD_SLAB, cs, val);
+		cs->mems_generation = cpuset_mems_generation++;
+		break;
+	default:
+		retval = -EINVAL;
+		break;
+	}
+	cgroup_unlock();
+	return retval;
+}
+
 /*
  * These ascii lists should be read in a single call, by using a user
  * buffer large enough to hold the entire map.  If read in smaller
@@ -1389,33 +1406,9 @@
 	case FILE_MEMLIST:
 		s += cpuset_sprintf_memlist(s, cs);
 		break;
-	case FILE_CPU_EXCLUSIVE:
-		*s++ = is_cpu_exclusive(cs) ? '1' : '0';
-		break;
-	case FILE_MEM_EXCLUSIVE:
-		*s++ = is_mem_exclusive(cs) ? '1' : '0';
-		break;
-	case FILE_SCHED_LOAD_BALANCE:
-		*s++ = is_sched_load_balance(cs) ? '1' : '0';
-		break;
 	case FILE_SCHED_RELAX_DOMAIN_LEVEL:
 		s += sprintf(s, "%d", cs->relax_domain_level);
 		break;
-	case FILE_MEMORY_MIGRATE:
-		*s++ = is_memory_migrate(cs) ? '1' : '0';
-		break;
-	case FILE_MEMORY_PRESSURE_ENABLED:
-		*s++ = cpuset_memory_pressure_enabled ? '1' : '0';
-		break;
-	case FILE_MEMORY_PRESSURE:
-		s += sprintf(s, "%d", fmeter_getrate(&cs->fmeter));
-		break;
-	case FILE_SPREAD_PAGE:
-		*s++ = is_spread_page(cs) ? '1' : '0';
-		break;
-	case FILE_SPREAD_SLAB:
-		*s++ = is_spread_slab(cs) ? '1' : '0';
-		break;
 	default:
 		retval = -EINVAL;
 		goto out;
@@ -1428,121 +1421,137 @@
 	return retval;
 }
 
-
-
+static u64 cpuset_read_u64(struct cgroup *cont, struct cftype *cft)
+{
+	struct cpuset *cs = cgroup_cs(cont);
+	cpuset_filetype_t type = cft->private;
+	switch (type) {
+	case FILE_CPU_EXCLUSIVE:
+		return is_cpu_exclusive(cs);
+	case FILE_MEM_EXCLUSIVE:
+		return is_mem_exclusive(cs);
+	case FILE_MEM_HARDWALL:
+		return is_mem_hardwall(cs);
+	case FILE_SCHED_LOAD_BALANCE:
+		return is_sched_load_balance(cs);
+	case FILE_MEMORY_MIGRATE:
+		return is_memory_migrate(cs);
+	case FILE_MEMORY_PRESSURE_ENABLED:
+		return cpuset_memory_pressure_enabled;
+	case FILE_MEMORY_PRESSURE:
+		return fmeter_getrate(&cs->fmeter);
+	case FILE_SPREAD_PAGE:
+		return is_spread_page(cs);
+	case FILE_SPREAD_SLAB:
+		return is_spread_slab(cs);
+	default:
+		BUG();
+	}
+}
 
 
 /*
  * for the common functions, 'private' gives the type of file
  */
 
-static struct cftype cft_cpus = {
-	.name = "cpus",
-	.read = cpuset_common_file_read,
-	.write = cpuset_common_file_write,
-	.private = FILE_CPULIST,
-};
+static struct cftype files[] = {
+	{
+		.name = "cpus",
+		.read = cpuset_common_file_read,
+		.write = cpuset_common_file_write,
+		.private = FILE_CPULIST,
+	},
 
-static struct cftype cft_mems = {
-	.name = "mems",
-	.read = cpuset_common_file_read,
-	.write = cpuset_common_file_write,
-	.private = FILE_MEMLIST,
-};
+	{
+		.name = "mems",
+		.read = cpuset_common_file_read,
+		.write = cpuset_common_file_write,
+		.private = FILE_MEMLIST,
+	},
 
-static struct cftype cft_cpu_exclusive = {
-	.name = "cpu_exclusive",
-	.read = cpuset_common_file_read,
-	.write = cpuset_common_file_write,
-	.private = FILE_CPU_EXCLUSIVE,
-};
+	{
+		.name = "cpu_exclusive",
+		.read_u64 = cpuset_read_u64,
+		.write_u64 = cpuset_write_u64,
+		.private = FILE_CPU_EXCLUSIVE,
+	},
 
-static struct cftype cft_mem_exclusive = {
-	.name = "mem_exclusive",
-	.read = cpuset_common_file_read,
-	.write = cpuset_common_file_write,
-	.private = FILE_MEM_EXCLUSIVE,
-};
+	{
+		.name = "mem_exclusive",
+		.read_u64 = cpuset_read_u64,
+		.write_u64 = cpuset_write_u64,
+		.private = FILE_MEM_EXCLUSIVE,
+	},
 
-static struct cftype cft_sched_load_balance = {
-	.name = "sched_load_balance",
-	.read = cpuset_common_file_read,
-	.write = cpuset_common_file_write,
-	.private = FILE_SCHED_LOAD_BALANCE,
-};
+	{
+		.name = "mem_hardwall",
+		.read_u64 = cpuset_read_u64,
+		.write_u64 = cpuset_write_u64,
+		.private = FILE_MEM_HARDWALL,
+	},
 
-static struct cftype cft_sched_relax_domain_level = {
-	.name = "sched_relax_domain_level",
-	.read = cpuset_common_file_read,
-	.write = cpuset_common_file_write,
-	.private = FILE_SCHED_RELAX_DOMAIN_LEVEL,
-};
+	{
+		.name = "sched_load_balance",
+		.read_u64 = cpuset_read_u64,
+		.write_u64 = cpuset_write_u64,
+		.private = FILE_SCHED_LOAD_BALANCE,
+	},
 
-static struct cftype cft_memory_migrate = {
-	.name = "memory_migrate",
-	.read = cpuset_common_file_read,
-	.write = cpuset_common_file_write,
-	.private = FILE_MEMORY_MIGRATE,
+	{
+		.name = "sched_relax_domain_level",
+		.read_u64 = cpuset_read_u64,
+		.write_u64 = cpuset_write_u64,
+		.private = FILE_SCHED_RELAX_DOMAIN_LEVEL,
+	},
+
+	{
+		.name = "memory_migrate",
+		.read_u64 = cpuset_read_u64,
+		.write_u64 = cpuset_write_u64,
+		.private = FILE_MEMORY_MIGRATE,
+	},
+
+	{
+		.name = "memory_pressure",
+		.read_u64 = cpuset_read_u64,
+		.write_u64 = cpuset_write_u64,
+		.private = FILE_MEMORY_PRESSURE,
+	},
+
+	{
+		.name = "memory_spread_page",
+		.read_u64 = cpuset_read_u64,
+		.write_u64 = cpuset_write_u64,
+		.private = FILE_SPREAD_PAGE,
+	},
+
+	{
+		.name = "memory_spread_slab",
+		.read_u64 = cpuset_read_u64,
+		.write_u64 = cpuset_write_u64,
+		.private = FILE_SPREAD_SLAB,
+	},
 };
 
 static struct cftype cft_memory_pressure_enabled = {
 	.name = "memory_pressure_enabled",
-	.read = cpuset_common_file_read,
-	.write = cpuset_common_file_write,
+	.read_u64 = cpuset_read_u64,
+	.write_u64 = cpuset_write_u64,
 	.private = FILE_MEMORY_PRESSURE_ENABLED,
 };
 
-static struct cftype cft_memory_pressure = {
-	.name = "memory_pressure",
-	.read = cpuset_common_file_read,
-	.write = cpuset_common_file_write,
-	.private = FILE_MEMORY_PRESSURE,
-};
-
-static struct cftype cft_spread_page = {
-	.name = "memory_spread_page",
-	.read = cpuset_common_file_read,
-	.write = cpuset_common_file_write,
-	.private = FILE_SPREAD_PAGE,
-};
-
-static struct cftype cft_spread_slab = {
-	.name = "memory_spread_slab",
-	.read = cpuset_common_file_read,
-	.write = cpuset_common_file_write,
-	.private = FILE_SPREAD_SLAB,
-};
-
 static int cpuset_populate(struct cgroup_subsys *ss, struct cgroup *cont)
 {
 	int err;
 
-	if ((err = cgroup_add_file(cont, ss, &cft_cpus)) < 0)
-		return err;
-	if ((err = cgroup_add_file(cont, ss, &cft_mems)) < 0)
-		return err;
-	if ((err = cgroup_add_file(cont, ss, &cft_cpu_exclusive)) < 0)
-		return err;
-	if ((err = cgroup_add_file(cont, ss, &cft_mem_exclusive)) < 0)
-		return err;
-	if ((err = cgroup_add_file(cont, ss, &cft_memory_migrate)) < 0)
-		return err;
-	if ((err = cgroup_add_file(cont, ss, &cft_sched_load_balance)) < 0)
-		return err;
-	if ((err = cgroup_add_file(cont, ss,
-					&cft_sched_relax_domain_level)) < 0)
-		return err;
-	if ((err = cgroup_add_file(cont, ss, &cft_memory_pressure)) < 0)
-		return err;
-	if ((err = cgroup_add_file(cont, ss, &cft_spread_page)) < 0)
-		return err;
-	if ((err = cgroup_add_file(cont, ss, &cft_spread_slab)) < 0)
+	err = cgroup_add_files(cont, ss, files, ARRAY_SIZE(files));
+	if (err)
 		return err;
 	/* memory_pressure_enabled is in root cpuset only */
-	if (err == 0 && !cont->parent)
+	if (!cont->parent)
 		err = cgroup_add_file(cont, ss,
-					 &cft_memory_pressure_enabled);
-	return 0;
+				      &cft_memory_pressure_enabled);
+	return err;
 }
 
 /*
@@ -1642,7 +1651,7 @@
 	cpuset_update_task_memory_state();
 
 	if (is_sched_load_balance(cs))
-		update_flag(CS_SCHED_LOAD_BALANCE, cs, "0");
+		update_flag(CS_SCHED_LOAD_BALANCE, cs, 0);
 
 	number_of_cpusets--;
 	kfree(cs);
@@ -1707,7 +1716,8 @@
  * Called by cgroup_scan_tasks() for each task in a cgroup.
  * Return nonzero to stop the walk through the tasks.
  */
-void cpuset_do_move_task(struct task_struct *tsk, struct cgroup_scanner *scan)
+static void cpuset_do_move_task(struct task_struct *tsk,
+				struct cgroup_scanner *scan)
 {
 	struct cpuset_hotplug_scanner *chsp;
 
@@ -1969,14 +1979,14 @@
 }
 
 /*
- * nearest_exclusive_ancestor() - Returns the nearest mem_exclusive
- * ancestor to the specified cpuset.  Call holding callback_mutex.
- * If no ancestor is mem_exclusive (an unusual configuration), then
- * returns the root cpuset.
+ * nearest_hardwall_ancestor() - Returns the nearest mem_exclusive or
+ * mem_hardwall ancestor to the specified cpuset.  Call holding
+ * callback_mutex.  If no ancestor is mem_exclusive or mem_hardwall
+ * (an unusual configuration), then returns the root cpuset.
  */
-static const struct cpuset *nearest_exclusive_ancestor(const struct cpuset *cs)
+static const struct cpuset *nearest_hardwall_ancestor(const struct cpuset *cs)
 {
-	while (!is_mem_exclusive(cs) && cs->parent)
+	while (!(is_mem_exclusive(cs) || is_mem_hardwall(cs)) && cs->parent)
 		cs = cs->parent;
 	return cs;
 }
@@ -1990,7 +2000,7 @@
  * __GFP_THISNODE is set, yes, we can always allocate.  If zone
  * z's node is in our tasks mems_allowed, yes.  If it's not a
  * __GFP_HARDWALL request and this zone's nodes is in the nearest
- * mem_exclusive cpuset ancestor to this tasks cpuset, yes.
+ * hardwalled cpuset ancestor to this tasks cpuset, yes.
  * If the task has been OOM killed and has access to memory reserves
  * as specified by the TIF_MEMDIE flag, yes.
  * Otherwise, no.
@@ -2013,7 +2023,7 @@
  * and do not allow allocations outside the current tasks cpuset
  * unless the task has been OOM killed as is marked TIF_MEMDIE.
  * GFP_KERNEL allocations are not so marked, so can escape to the
- * nearest enclosing mem_exclusive ancestor cpuset.
+ * nearest enclosing hardwalled ancestor cpuset.
  *
  * Scanning up parent cpusets requires callback_mutex.  The
  * __alloc_pages() routine only calls here with __GFP_HARDWALL bit
@@ -2036,7 +2046,7 @@
  *	in_interrupt - any node ok (current task context irrelevant)
  *	GFP_ATOMIC   - any node ok
  *	TIF_MEMDIE   - any node ok
- *	GFP_KERNEL   - any node in enclosing mem_exclusive cpuset ok
+ *	GFP_KERNEL   - any node in enclosing hardwalled cpuset ok
  *	GFP_USER     - only nodes in current tasks mems allowed ok.
  *
  * Rule:
@@ -2073,7 +2083,7 @@
 	mutex_lock(&callback_mutex);
 
 	task_lock(current);
-	cs = nearest_exclusive_ancestor(task_cs(current));
+	cs = nearest_hardwall_ancestor(task_cs(current));
 	task_unlock(current);
 
 	allowed = node_isset(node, cs->mems_allowed);
diff --git a/kernel/dma.c b/kernel/dma.c
index 6a82bb7..d2c60a8 100644
--- a/kernel/dma.c
+++ b/kernel/dma.c
@@ -149,12 +149,7 @@
 
 static int __init proc_dma_init(void)
 {
-	struct proc_dir_entry *e;
-
-	e = create_proc_entry("dma", 0, NULL);
-	if (e)
-		e->proc_fops = &proc_dma_operations;
-
+	proc_create("dma", 0, NULL, &proc_dma_operations);
 	return 0;
 }
 
diff --git a/kernel/exit.c b/kernel/exit.c
index 2a9d98c..ae0f2c4 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -557,6 +557,88 @@
 
 EXPORT_SYMBOL_GPL(exit_fs);
 
+#ifdef CONFIG_MM_OWNER
+/*
+ * Task p is exiting and it owned mm, lets find a new owner for it
+ */
+static inline int
+mm_need_new_owner(struct mm_struct *mm, struct task_struct *p)
+{
+	/*
+	 * If there are other users of the mm and the owner (us) is exiting
+	 * we need to find a new owner to take on the responsibility.
+	 */
+	if (!mm)
+		return 0;
+	if (atomic_read(&mm->mm_users) <= 1)
+		return 0;
+	if (mm->owner != p)
+		return 0;
+	return 1;
+}
+
+void mm_update_next_owner(struct mm_struct *mm)
+{
+	struct task_struct *c, *g, *p = current;
+
+retry:
+	if (!mm_need_new_owner(mm, p))
+		return;
+
+	read_lock(&tasklist_lock);
+	/*
+	 * Search in the children
+	 */
+	list_for_each_entry(c, &p->children, sibling) {
+		if (c->mm == mm)
+			goto assign_new_owner;
+	}
+
+	/*
+	 * Search in the siblings
+	 */
+	list_for_each_entry(c, &p->parent->children, sibling) {
+		if (c->mm == mm)
+			goto assign_new_owner;
+	}
+
+	/*
+	 * Search through everything else. We should not get
+	 * here often
+	 */
+	do_each_thread(g, c) {
+		if (c->mm == mm)
+			goto assign_new_owner;
+	} while_each_thread(g, c);
+
+	read_unlock(&tasklist_lock);
+	return;
+
+assign_new_owner:
+	BUG_ON(c == p);
+	get_task_struct(c);
+	/*
+	 * The task_lock protects c->mm from changing.
+	 * We always want mm->owner->mm == mm
+	 */
+	task_lock(c);
+	/*
+	 * Delay read_unlock() till we have the task_lock()
+	 * to ensure that c does not slip away underneath us
+	 */
+	read_unlock(&tasklist_lock);
+	if (c->mm != mm) {
+		task_unlock(c);
+		put_task_struct(c);
+		goto retry;
+	}
+	cgroup_mm_owner_callbacks(mm->owner, c);
+	mm->owner = c;
+	task_unlock(c);
+	put_task_struct(c);
+}
+#endif /* CONFIG_MM_OWNER */
+
 /*
  * Turn us into a lazy TLB process if we
  * aren't already..
@@ -596,6 +678,7 @@
 	/* We don't want this task to be frozen prematurely */
 	clear_freeze_flag(tsk);
 	task_unlock(tsk);
+	mm_update_next_owner(mm);
 	mmput(mm);
 }
 
diff --git a/kernel/fork.c b/kernel/fork.c
index 6067e42..068ffe0 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -381,14 +381,13 @@
 	mm->ioctx_list = NULL;
 	mm->free_area_cache = TASK_UNMAPPED_BASE;
 	mm->cached_hole_size = ~0UL;
-	mm_init_cgroup(mm, p);
+	mm_init_owner(mm, p);
 
 	if (likely(!mm_alloc_pgd(mm))) {
 		mm->def_flags = 0;
 		return mm;
 	}
 
-	mm_free_cgroup(mm);
 	free_mm(mm);
 	return NULL;
 }
@@ -432,13 +431,13 @@
 	if (atomic_dec_and_test(&mm->mm_users)) {
 		exit_aio(mm);
 		exit_mmap(mm);
+		set_mm_exe_file(mm, NULL);
 		if (!list_empty(&mm->mmlist)) {
 			spin_lock(&mmlist_lock);
 			list_del(&mm->mmlist);
 			spin_unlock(&mmlist_lock);
 		}
 		put_swap_token(mm);
-		mm_free_cgroup(mm);
 		mmdrop(mm);
 	}
 }
@@ -545,6 +544,8 @@
 	if (init_new_context(tsk, mm))
 		goto fail_nocontext;
 
+	dup_mm_exe_file(oldmm, mm);
+
 	err = dup_mmap(mm, oldmm);
 	if (err)
 		goto free_pt;
@@ -982,6 +983,13 @@
 #endif
 }
 
+#ifdef CONFIG_MM_OWNER
+void mm_init_owner(struct mm_struct *mm, struct task_struct *p)
+{
+	mm->owner = p;
+}
+#endif /* CONFIG_MM_OWNER */
+
 /*
  * This creates a new process as a copy of the old one,
  * but does not actually start it yet.
@@ -1664,18 +1672,6 @@
 }
 
 /*
- * Unsharing of semundo for tasks created with CLONE_SYSVSEM is not
- * supported yet
- */
-static int unshare_semundo(unsigned long unshare_flags, struct sem_undo_list **new_ulistp)
-{
-	if (unshare_flags & CLONE_SYSVSEM)
-		return -EINVAL;
-
-	return 0;
-}
-
-/*
  * unshare allows a process to 'unshare' part of the process
  * context which was originally shared using clone.  copy_*
  * functions used by do_fork() cannot be used here directly
@@ -1690,8 +1686,8 @@
 	struct sighand_struct *new_sigh = NULL;
 	struct mm_struct *mm, *new_mm = NULL, *active_mm = NULL;
 	struct files_struct *fd, *new_fd = NULL;
-	struct sem_undo_list *new_ulist = NULL;
 	struct nsproxy *new_nsproxy = NULL;
+	int do_sysvsem = 0;
 
 	check_unshare_flags(&unshare_flags);
 
@@ -1703,6 +1699,13 @@
 				CLONE_NEWNET))
 		goto bad_unshare_out;
 
+	/*
+	 * CLONE_NEWIPC must also detach from the undolist: after switching
+	 * to a new ipc namespace, the semaphore arrays from the old
+	 * namespace are unreachable.
+	 */
+	if (unshare_flags & (CLONE_NEWIPC|CLONE_SYSVSEM))
+		do_sysvsem = 1;
 	if ((err = unshare_thread(unshare_flags)))
 		goto bad_unshare_out;
 	if ((err = unshare_fs(unshare_flags, &new_fs)))
@@ -1713,13 +1716,17 @@
 		goto bad_unshare_cleanup_sigh;
 	if ((err = unshare_fd(unshare_flags, &new_fd)))
 		goto bad_unshare_cleanup_vm;
-	if ((err = unshare_semundo(unshare_flags, &new_ulist)))
-		goto bad_unshare_cleanup_fd;
 	if ((err = unshare_nsproxy_namespaces(unshare_flags, &new_nsproxy,
 			new_fs)))
-		goto bad_unshare_cleanup_semundo;
+		goto bad_unshare_cleanup_fd;
 
-	if (new_fs ||  new_mm || new_fd || new_ulist || new_nsproxy) {
+	if (new_fs ||  new_mm || new_fd || do_sysvsem || new_nsproxy) {
+		if (do_sysvsem) {
+			/*
+			 * CLONE_SYSVSEM is equivalent to sys_exit().
+			 */
+			exit_sem(current);
+		}
 
 		if (new_nsproxy) {
 			switch_task_namespaces(current, new_nsproxy);
@@ -1755,7 +1762,6 @@
 	if (new_nsproxy)
 		put_nsproxy(new_nsproxy);
 
-bad_unshare_cleanup_semundo:
 bad_unshare_cleanup_fd:
 	if (new_fd)
 		put_files_struct(new_fd);
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index e379ef0..dea4c91 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -590,7 +590,6 @@
 			list_add_tail(&timer->cb_entry,
 				      &base->cpu_base->cb_pending);
 			timer->state = HRTIMER_STATE_PENDING;
-			raise_softirq(HRTIMER_SOFTIRQ);
 			return 1;
 		default:
 			BUG();
@@ -633,6 +632,11 @@
 	return 1;
 }
 
+static inline void hrtimer_raise_softirq(void)
+{
+	raise_softirq(HRTIMER_SOFTIRQ);
+}
+
 #else
 
 static inline int hrtimer_hres_active(void) { return 0; }
@@ -651,6 +655,7 @@
 {
 	return 0;
 }
+static inline void hrtimer_raise_softirq(void) { }
 
 #endif /* CONFIG_HIGH_RES_TIMERS */
 
@@ -850,7 +855,7 @@
 {
 	struct hrtimer_clock_base *base, *new_base;
 	unsigned long flags;
-	int ret;
+	int ret, raise;
 
 	base = lock_hrtimer_base(timer, &flags);
 
@@ -884,8 +889,18 @@
 	enqueue_hrtimer(timer, new_base,
 			new_base->cpu_base == &__get_cpu_var(hrtimer_bases));
 
+	/*
+	 * The timer may be expired and moved to the cb_pending
+	 * list. We can not raise the softirq with base lock held due
+	 * to a possible deadlock with runqueue lock.
+	 */
+	raise = timer->state == HRTIMER_STATE_PENDING;
+
 	unlock_hrtimer_base(timer, &flags);
 
+	if (raise)
+		hrtimer_raise_softirq();
+
 	return ret;
 }
 EXPORT_SYMBOL_GPL(hrtimer_start);
diff --git a/kernel/irq/devres.c b/kernel/irq/devres.c
index 6d9204f3..38a25b8 100644
--- a/kernel/irq/devres.c
+++ b/kernel/irq/devres.c
@@ -1,6 +1,7 @@
 #include <linux/module.h>
 #include <linux/interrupt.h>
 #include <linux/device.h>
+#include <linux/gfp.h>
 
 /*
  * Device resource management aware IRQ request/free implementation.
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 438a014..46e4ad1 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -11,6 +11,7 @@
 #include <linux/module.h>
 #include <linux/random.h>
 #include <linux/interrupt.h>
+#include <linux/slab.h>
 
 #include "internals.h"
 
diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
index f091d13..6fc0040 100644
--- a/kernel/kallsyms.c
+++ b/kernel/kallsyms.c
@@ -472,11 +472,7 @@
 
 static int __init kallsyms_init(void)
 {
-	struct proc_dir_entry *entry;
-
-	entry = create_proc_entry("kallsyms", 0444, NULL);
-	if (entry)
-		entry->proc_fops = &kallsyms_operations;
+	proc_create("kallsyms", 0444, NULL, &kallsyms_operations);
 	return 0;
 }
 __initcall(kallsyms_init);
diff --git a/kernel/kthread.c b/kernel/kthread.c
index 92cf693..ac72eea 100644
--- a/kernel/kthread.c
+++ b/kernel/kthread.c
@@ -144,9 +144,9 @@
 
 	spin_lock(&kthread_create_lock);
 	list_add_tail(&create.list, &kthread_create_list);
-	wake_up_process(kthreadd_task);
 	spin_unlock(&kthread_create_lock);
 
+	wake_up_process(kthreadd_task);
 	wait_for_completion(&create.done);
 
 	if (!IS_ERR(create.result)) {
diff --git a/kernel/latencytop.c b/kernel/latencytop.c
index 7c74dab..5e7b45c 100644
--- a/kernel/latencytop.c
+++ b/kernel/latencytop.c
@@ -233,14 +233,7 @@
 
 static int __init init_lstats_procfs(void)
 {
-	struct proc_dir_entry *pe;
-
-	pe = create_proc_entry("latency_stats", 0644, NULL);
-	if (!pe)
-		return -ENOMEM;
-
-	pe->proc_fops = &lstats_fops;
-
+	proc_create("latency_stats", 0644, NULL, &lstats_fops);
 	return 0;
 }
 __initcall(init_lstats_procfs);
diff --git a/kernel/lockdep_proc.c b/kernel/lockdep_proc.c
index 8a135bd..dc5d296 100644
--- a/kernel/lockdep_proc.c
+++ b/kernel/lockdep_proc.c
@@ -660,20 +660,12 @@
 
 static int __init lockdep_proc_init(void)
 {
-	struct proc_dir_entry *entry;
-
-	entry = create_proc_entry("lockdep", S_IRUSR, NULL);
-	if (entry)
-		entry->proc_fops = &proc_lockdep_operations;
-
-	entry = create_proc_entry("lockdep_stats", S_IRUSR, NULL);
-	if (entry)
-		entry->proc_fops = &proc_lockdep_stats_operations;
+	proc_create("lockdep", S_IRUSR, NULL, &proc_lockdep_operations);
+	proc_create("lockdep_stats", S_IRUSR, NULL,
+		    &proc_lockdep_stats_operations);
 
 #ifdef CONFIG_LOCK_STAT
-	entry = create_proc_entry("lock_stat", S_IRUSR, NULL);
-	if (entry)
-		entry->proc_fops = &proc_lock_stat_operations;
+	proc_create("lock_stat", S_IRUSR, NULL, &proc_lock_stat_operations);
 #endif
 
 	return 0;
diff --git a/kernel/marker.c b/kernel/marker.c
index 005b959..139260e 100644
--- a/kernel/marker.c
+++ b/kernel/marker.c
@@ -23,6 +23,7 @@
 #include <linux/rcupdate.h>
 #include <linux/marker.h>
 #include <linux/err.h>
+#include <linux/slab.h>
 
 extern struct marker __start___markers[];
 extern struct marker __stop___markers[];
diff --git a/kernel/notifier.c b/kernel/notifier.c
index 643360d..823be11 100644
--- a/kernel/notifier.c
+++ b/kernel/notifier.c
@@ -31,6 +31,21 @@
 	return 0;
 }
 
+static int notifier_chain_cond_register(struct notifier_block **nl,
+		struct notifier_block *n)
+{
+	while ((*nl) != NULL) {
+		if ((*nl) == n)
+			return 0;
+		if (n->priority > (*nl)->priority)
+			break;
+		nl = &((*nl)->next);
+	}
+	n->next = *nl;
+	rcu_assign_pointer(*nl, n);
+	return 0;
+}
+
 static int notifier_chain_unregister(struct notifier_block **nl,
 		struct notifier_block *n)
 {
@@ -205,6 +220,29 @@
 EXPORT_SYMBOL_GPL(blocking_notifier_chain_register);
 
 /**
+ *	blocking_notifier_chain_cond_register - Cond add notifier to a blocking notifier chain
+ *	@nh: Pointer to head of the blocking notifier chain
+ *	@n: New entry in notifier chain
+ *
+ *	Adds a notifier to a blocking notifier chain, only if not already
+ *	present in the chain.
+ *	Must be called in process context.
+ *
+ *	Currently always returns zero.
+ */
+int blocking_notifier_chain_cond_register(struct blocking_notifier_head *nh,
+		struct notifier_block *n)
+{
+	int ret;
+
+	down_write(&nh->rwsem);
+	ret = notifier_chain_cond_register(&nh->head, n);
+	up_write(&nh->rwsem);
+	return ret;
+}
+EXPORT_SYMBOL_GPL(blocking_notifier_chain_cond_register);
+
+/**
  *	blocking_notifier_chain_unregister - Remove notifier from a blocking notifier chain
  *	@nh: Pointer to head of the blocking notifier chain
  *	@n: Entry to remove from notifier chain
diff --git a/kernel/ns_cgroup.c b/kernel/ns_cgroup.c
index aead4d6..48d7ed6 100644
--- a/kernel/ns_cgroup.c
+++ b/kernel/ns_cgroup.c
@@ -7,6 +7,8 @@
 #include <linux/module.h>
 #include <linux/cgroup.h>
 #include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/nsproxy.h>
 
 struct ns_cgroup {
 	struct cgroup_subsys_state css;
diff --git a/kernel/nsproxy.c b/kernel/nsproxy.c
index f5d332c..adc7851 100644
--- a/kernel/nsproxy.c
+++ b/kernel/nsproxy.c
@@ -139,6 +139,18 @@
 		goto out;
 	}
 
+	/*
+	 * CLONE_NEWIPC must detach from the undolist: after switching
+	 * to a new ipc namespace, the semaphore arrays from the old
+	 * namespace are unreachable.  In clone parlance, CLONE_SYSVSEM
+	 * means share undolist with parent, so we must forbid using
+	 * it along with CLONE_NEWIPC.
+	 */
+	if ((flags & CLONE_NEWIPC) && (flags & CLONE_SYSVSEM)) {
+		err = -EINVAL;
+		goto out;
+	}
+
 	new_ns = create_new_namespaces(flags, tsk, tsk->fs);
 	if (IS_ERR(new_ns)) {
 		err = PTR_ERR(new_ns);
diff --git a/kernel/panic.c b/kernel/panic.c
index 24af9f8..425567f 100644
--- a/kernel/panic.c
+++ b/kernel/panic.c
@@ -153,6 +153,8 @@
  *  'M' - System experienced a machine check exception.
  *  'B' - System has hit bad_page.
  *  'U' - Userspace-defined naughtiness.
+ *  'A' - ACPI table overridden.
+ *  'W' - Taint on warning.
  *
  *	The string is overwritten by the next call to print_taint().
  */
@@ -161,7 +163,7 @@
 {
 	static char buf[20];
 	if (tainted) {
-		snprintf(buf, sizeof(buf), "Tainted: %c%c%c%c%c%c%c%c%c",
+		snprintf(buf, sizeof(buf), "Tainted: %c%c%c%c%c%c%c%c%c%c",
 			tainted & TAINT_PROPRIETARY_MODULE ? 'P' : 'G',
 			tainted & TAINT_FORCED_MODULE ? 'F' : ' ',
 			tainted & TAINT_UNSAFE_SMP ? 'S' : ' ',
@@ -170,7 +172,8 @@
 			tainted & TAINT_BAD_PAGE ? 'B' : ' ',
 			tainted & TAINT_USER ? 'U' : ' ',
 			tainted & TAINT_DIE ? 'D' : ' ',
-			tainted & TAINT_OVERRIDDEN_ACPI_TABLE ? 'A' : ' ');
+			tainted & TAINT_OVERRIDDEN_ACPI_TABLE ? 'A' : ' ',
+			tainted & TAINT_WARN ? 'W' : ' ');
 	}
 	else
 		snprintf(buf, sizeof(buf), "Not tainted");
@@ -312,6 +315,7 @@
 	print_modules();
 	dump_stack();
 	print_oops_end_marker();
+	add_taint(TAINT_WARN);
 }
 EXPORT_SYMBOL(warn_on_slowpath);
 #endif
diff --git a/kernel/pid_namespace.c b/kernel/pid_namespace.c
index 6d792b6..5ca37fa 100644
--- a/kernel/pid_namespace.c
+++ b/kernel/pid_namespace.c
@@ -92,7 +92,7 @@
 	atomic_set(&ns->pidmap[0].nr_free, BITS_PER_PAGE - 1);
 
 	for (i = 1; i < PIDMAP_ENTRIES; i++) {
-		ns->pidmap[i].page = 0;
+		ns->pidmap[i].page = NULL;
 		atomic_set(&ns->pidmap[i].nr_free, BITS_PER_PAGE);
 	}
 
diff --git a/kernel/printk.c b/kernel/printk.c
index bdd4ea8..d3f9c0f 100644
--- a/kernel/printk.c
+++ b/kernel/printk.c
@@ -1287,31 +1287,7 @@
  */
 int __printk_ratelimit(int ratelimit_jiffies, int ratelimit_burst)
 {
-	static DEFINE_SPINLOCK(ratelimit_lock);
-	static unsigned toks = 10 * 5 * HZ;
-	static unsigned long last_msg;
-	static int missed;
-	unsigned long flags;
-	unsigned long now = jiffies;
-
-	spin_lock_irqsave(&ratelimit_lock, flags);
-	toks += now - last_msg;
-	last_msg = now;
-	if (toks > (ratelimit_burst * ratelimit_jiffies))
-		toks = ratelimit_burst * ratelimit_jiffies;
-	if (toks >= ratelimit_jiffies) {
-		int lost = missed;
-
-		missed = 0;
-		toks -= ratelimit_jiffies;
-		spin_unlock_irqrestore(&ratelimit_lock, flags);
-		if (lost)
-			printk(KERN_WARNING "printk: %d messages suppressed.\n", lost);
-		return 1;
-	}
-	missed++;
-	spin_unlock_irqrestore(&ratelimit_lock, flags);
-	return 0;
+	return __ratelimit(ratelimit_jiffies, ratelimit_burst);
 }
 EXPORT_SYMBOL(__printk_ratelimit);
 
diff --git a/kernel/profile.c b/kernel/profile.c
index 606d738..ae7ead8 100644
--- a/kernel/profile.c
+++ b/kernel/profile.c
@@ -587,10 +587,10 @@
 		return 0;
 	if (create_hash_tables())
 		return -1;
-	entry = create_proc_entry("profile", S_IWUSR | S_IRUGO, NULL);
+	entry = proc_create("profile", S_IWUSR | S_IRUGO,
+			    NULL, &proc_profile_operations);
 	if (!entry)
 		return 0;
-	entry->proc_fops = &proc_profile_operations;
 	entry->size = (1+prof_len) * sizeof(atomic_t);
 	hotcpu_notifier(profile_cpu_callback, 0);
 	return 0;
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index 67e392e..dac4b4e 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -612,7 +612,7 @@
 	return (copied == sizeof(data)) ? 0 : -EIO;
 }
 
-#ifdef CONFIG_COMPAT
+#if defined CONFIG_COMPAT && defined __ARCH_WANT_COMPAT_SYS_PTRACE
 #include <linux/compat.h>
 
 int compat_ptrace_request(struct task_struct *child, compat_long_t request,
@@ -667,7 +667,6 @@
 	return ret;
 }
 
-#ifdef __ARCH_WANT_COMPAT_SYS_PTRACE
 asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
 				  compat_long_t addr, compat_long_t data)
 {
@@ -710,6 +709,4 @@
 	unlock_kernel();
 	return ret;
 }
-#endif /* __ARCH_WANT_COMPAT_SYS_PTRACE */
-
-#endif	/* CONFIG_COMPAT */
+#endif	/* CONFIG_COMPAT && __ARCH_WANT_COMPAT_SYS_PTRACE */
diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c
index 47894f9..33acc424 100644
--- a/kernel/rcutorture.c
+++ b/kernel/rcutorture.c
@@ -45,6 +45,7 @@
 #include <linux/byteorder/swabb.h>
 #include <linux/stat.h>
 #include <linux/srcu.h>
+#include <linux/slab.h>
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Paul E. McKenney <paulmck@us.ibm.com> and "
diff --git a/kernel/relay.c b/kernel/relay.c
index d6204a4..7de644c 100644
--- a/kernel/relay.c
+++ b/kernel/relay.c
@@ -65,6 +65,35 @@
 	.close = relay_file_mmap_close,
 };
 
+/*
+ * allocate an array of pointers of struct page
+ */
+static struct page **relay_alloc_page_array(unsigned int n_pages)
+{
+	struct page **array;
+	size_t pa_size = n_pages * sizeof(struct page *);
+
+	if (pa_size > PAGE_SIZE) {
+		array = vmalloc(pa_size);
+		if (array)
+			memset(array, 0, pa_size);
+	} else {
+		array = kzalloc(pa_size, GFP_KERNEL);
+	}
+	return array;
+}
+
+/*
+ * free an array of pointers of struct page
+ */
+static void relay_free_page_array(struct page **array)
+{
+	if (is_vmalloc_addr(array))
+		vfree(array);
+	else
+		kfree(array);
+}
+
 /**
  *	relay_mmap_buf: - mmap channel buffer to process address space
  *	@buf: relay channel buffer
@@ -109,7 +138,7 @@
 	*size = PAGE_ALIGN(*size);
 	n_pages = *size >> PAGE_SHIFT;
 
-	buf->page_array = kcalloc(n_pages, sizeof(struct page *), GFP_KERNEL);
+	buf->page_array = relay_alloc_page_array(n_pages);
 	if (!buf->page_array)
 		return NULL;
 
@@ -130,7 +159,7 @@
 depopulate:
 	for (j = 0; j < i; j++)
 		__free_page(buf->page_array[j]);
-	kfree(buf->page_array);
+	relay_free_page_array(buf->page_array);
 	return NULL;
 }
 
@@ -189,7 +218,7 @@
 		vunmap(buf->start);
 		for (i = 0; i < buf->page_count; i++)
 			__free_page(buf->page_array[i]);
-		kfree(buf->page_array);
+		relay_free_page_array(buf->page_array);
 	}
 	chan->buf[buf->cpu] = NULL;
 	kfree(buf->padding);
@@ -1162,7 +1191,7 @@
 	ret = 0;
 	spliced = 0;
 
-	while (len) {
+	while (len && !spliced) {
 		ret = subbuf_splice_actor(in, ppos, pipe, len, flags, &nonpad_ret);
 		if (ret < 0)
 			break;
diff --git a/kernel/res_counter.c b/kernel/res_counter.c
index efbfc0f..d3c61b4 100644
--- a/kernel/res_counter.c
+++ b/kernel/res_counter.c
@@ -10,6 +10,7 @@
 #include <linux/types.h>
 #include <linux/parser.h>
 #include <linux/fs.h>
+#include <linux/slab.h>
 #include <linux/res_counter.h>
 #include <linux/uaccess.h>
 
@@ -27,6 +28,8 @@
 	}
 
 	counter->usage += val;
+	if (counter->usage > counter->max_usage)
+		counter->max_usage = counter->usage;
 	return 0;
 }
 
@@ -65,6 +68,8 @@
 	switch (member) {
 	case RES_USAGE:
 		return &counter->usage;
+	case RES_MAX_USAGE:
+		return &counter->max_usage;
 	case RES_LIMIT:
 		return &counter->limit;
 	case RES_FAILCNT:
@@ -92,6 +97,11 @@
 			pos, buf, s - buf);
 }
 
+u64 res_counter_read_u64(struct res_counter *counter, int member)
+{
+	return *res_counter_member(counter, member);
+}
+
 ssize_t res_counter_write(struct res_counter *counter, int member,
 		const char __user *userbuf, size_t nbytes, loff_t *pos,
 		int (*write_strategy)(char *st_buf, unsigned long long *val))
diff --git a/kernel/resource.c b/kernel/resource.c
index cee12cc..74af2d7 100644
--- a/kernel/resource.c
+++ b/kernel/resource.c
@@ -131,14 +131,8 @@
 
 static int __init ioresources_init(void)
 {
-	struct proc_dir_entry *entry;
-
-	entry = create_proc_entry("ioports", 0, NULL);
-	if (entry)
-		entry->proc_fops = &proc_ioports_operations;
-	entry = create_proc_entry("iomem", 0, NULL);
-	if (entry)
-		entry->proc_fops = &proc_iomem_operations;
+	proc_create("ioports", 0, NULL, &proc_ioports_operations);
+	proc_create("iomem", 0, NULL, &proc_iomem_operations);
 	return 0;
 }
 __initcall(ioresources_init);
diff --git a/kernel/sched.c b/kernel/sched.c
index 740fb40..e2f7f5a 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -9057,13 +9057,13 @@
 }
 
 #ifdef CONFIG_FAIR_GROUP_SCHED
-static int cpu_shares_write_uint(struct cgroup *cgrp, struct cftype *cftype,
+static int cpu_shares_write_u64(struct cgroup *cgrp, struct cftype *cftype,
 				u64 shareval)
 {
 	return sched_group_set_shares(cgroup_tg(cgrp), shareval);
 }
 
-static u64 cpu_shares_read_uint(struct cgroup *cgrp, struct cftype *cft)
+static u64 cpu_shares_read_u64(struct cgroup *cgrp, struct cftype *cft)
 {
 	struct task_group *tg = cgroup_tg(cgrp);
 
@@ -9073,48 +9073,14 @@
 
 #ifdef CONFIG_RT_GROUP_SCHED
 static ssize_t cpu_rt_runtime_write(struct cgroup *cgrp, struct cftype *cft,
-				struct file *file,
-				const char __user *userbuf,
-				size_t nbytes, loff_t *unused_ppos)
+				s64 val)
 {
-	char buffer[64];
-	int retval = 0;
-	s64 val;
-	char *end;
-
-	if (!nbytes)
-		return -EINVAL;
-	if (nbytes >= sizeof(buffer))
-		return -E2BIG;
-	if (copy_from_user(buffer, userbuf, nbytes))
-		return -EFAULT;
-
-	buffer[nbytes] = 0;     /* nul-terminate */
-
-	/* strip newline if necessary */
-	if (nbytes && (buffer[nbytes-1] == '\n'))
-		buffer[nbytes-1] = 0;
-	val = simple_strtoll(buffer, &end, 0);
-	if (*end)
-		return -EINVAL;
-
-	/* Pass to subsystem */
-	retval = sched_group_set_rt_runtime(cgroup_tg(cgrp), val);
-	if (!retval)
-		retval = nbytes;
-	return retval;
+	return sched_group_set_rt_runtime(cgroup_tg(cgrp), val);
 }
 
-static ssize_t cpu_rt_runtime_read(struct cgroup *cgrp, struct cftype *cft,
-				   struct file *file,
-				   char __user *buf, size_t nbytes,
-				   loff_t *ppos)
+static s64 cpu_rt_runtime_read(struct cgroup *cgrp, struct cftype *cft)
 {
-	char tmp[64];
-	long val = sched_group_rt_runtime(cgroup_tg(cgrp));
-	int len = sprintf(tmp, "%ld\n", val);
-
-	return simple_read_from_buffer(buf, nbytes, ppos, tmp, len);
+	return sched_group_rt_runtime(cgroup_tg(cgrp));
 }
 
 static int cpu_rt_period_write_uint(struct cgroup *cgrp, struct cftype *cftype,
@@ -9133,20 +9099,20 @@
 #ifdef CONFIG_FAIR_GROUP_SCHED
 	{
 		.name = "shares",
-		.read_uint = cpu_shares_read_uint,
-		.write_uint = cpu_shares_write_uint,
+		.read_u64 = cpu_shares_read_u64,
+		.write_u64 = cpu_shares_write_u64,
 	},
 #endif
 #ifdef CONFIG_RT_GROUP_SCHED
 	{
 		.name = "rt_runtime_us",
-		.read = cpu_rt_runtime_read,
-		.write = cpu_rt_runtime_write,
+		.read_s64 = cpu_rt_runtime_read,
+		.write_s64 = cpu_rt_runtime_write,
 	},
 	{
 		.name = "rt_period_us",
-		.read_uint = cpu_rt_period_read_uint,
-		.write_uint = cpu_rt_period_write_uint,
+		.read_u64 = cpu_rt_period_read_uint,
+		.write_u64 = cpu_rt_period_write_uint,
 	},
 #endif
 };
@@ -9277,8 +9243,8 @@
 static struct cftype files[] = {
 	{
 		.name = "usage",
-		.read_uint = cpuusage_read,
-		.write_uint = cpuusage_write,
+		.read_u64 = cpuusage_read,
+		.write_u64 = cpuusage_write,
 	},
 };
 
diff --git a/kernel/sched_debug.c b/kernel/sched_debug.c
index f3f4af4..8a9498e 100644
--- a/kernel/sched_debug.c
+++ b/kernel/sched_debug.c
@@ -277,12 +277,9 @@
 {
 	struct proc_dir_entry *pe;
 
-	pe = create_proc_entry("sched_debug", 0644, NULL);
+	pe = proc_create("sched_debug", 0644, NULL, &sched_debug_fops);
 	if (!pe)
 		return -ENOMEM;
-
-	pe->proc_fops = &sched_debug_fops;
-
 	return 0;
 }
 
diff --git a/kernel/sys.c b/kernel/sys.c
index f2a4513..e423d0d 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -1545,6 +1545,19 @@
  *
  */
 
+static void accumulate_thread_rusage(struct task_struct *t, struct rusage *r,
+				     cputime_t *utimep, cputime_t *stimep)
+{
+	*utimep = cputime_add(*utimep, t->utime);
+	*stimep = cputime_add(*stimep, t->stime);
+	r->ru_nvcsw += t->nvcsw;
+	r->ru_nivcsw += t->nivcsw;
+	r->ru_minflt += t->min_flt;
+	r->ru_majflt += t->maj_flt;
+	r->ru_inblock += task_io_get_inblock(t);
+	r->ru_oublock += task_io_get_oublock(t);
+}
+
 static void k_getrusage(struct task_struct *p, int who, struct rusage *r)
 {
 	struct task_struct *t;
@@ -1554,6 +1567,11 @@
 	memset((char *) r, 0, sizeof *r);
 	utime = stime = cputime_zero;
 
+	if (who == RUSAGE_THREAD) {
+		accumulate_thread_rusage(p, r, &utime, &stime);
+		goto out;
+	}
+
 	rcu_read_lock();
 	if (!lock_task_sighand(p, &flags)) {
 		rcu_read_unlock();
@@ -1586,14 +1604,7 @@
 			r->ru_oublock += p->signal->oublock;
 			t = p;
 			do {
-				utime = cputime_add(utime, t->utime);
-				stime = cputime_add(stime, t->stime);
-				r->ru_nvcsw += t->nvcsw;
-				r->ru_nivcsw += t->nivcsw;
-				r->ru_minflt += t->min_flt;
-				r->ru_majflt += t->maj_flt;
-				r->ru_inblock += task_io_get_inblock(t);
-				r->ru_oublock += task_io_get_oublock(t);
+				accumulate_thread_rusage(t, r, &utime, &stime);
 				t = next_thread(t);
 			} while (t != p);
 			break;
@@ -1605,6 +1616,7 @@
 	unlock_task_sighand(p, &flags);
 	rcu_read_unlock();
 
+out:
 	cputime_to_timeval(utime, &r->ru_utime);
 	cputime_to_timeval(stime, &r->ru_stime);
 }
@@ -1618,7 +1630,8 @@
 
 asmlinkage long sys_getrusage(int who, struct rusage __user *ru)
 {
-	if (who != RUSAGE_SELF && who != RUSAGE_CHILDREN)
+	if (who != RUSAGE_SELF && who != RUSAGE_CHILDREN &&
+	    who != RUSAGE_THREAD)
 		return -EINVAL;
 	return getrusage(current, who, ru);
 }
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index fd33648..d7ffdc5 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -38,6 +38,7 @@
 #include <linux/writeback.h>
 #include <linux/hugetlb.h>
 #include <linux/initrd.h>
+#include <linux/key.h>
 #include <linux/times.h>
 #include <linux/limits.h>
 #include <linux/dcache.h>
@@ -144,12 +145,6 @@
 extern int max_lock_depth;
 #endif
 
-#ifdef CONFIG_SYSCTL_SYSCALL
-static int parse_table(int __user *, int, void __user *, size_t __user *,
-		void __user *, size_t, struct ctl_table *);
-#endif
-
-
 #ifdef CONFIG_PROC_SYSCTL
 static int proc_do_cad_pid(struct ctl_table *table, int write, struct file *filp,
 		  void __user *buffer, size_t *lenp, loff_t *ppos);
@@ -809,6 +804,14 @@
 		.proc_handler	= &proc_dostring,
 		.strategy	= &sysctl_string,
 	},
+#ifdef CONFIG_KEYS
+	{
+		.ctl_name	= CTL_UNNUMBERED,
+		.procname	= "keys",
+		.mode		= 0555,
+		.child		= key_sysctls,
+	},
+#endif
 /*
  * NOTE: do not add new entries to this table unless you have read
  * Documentation/sysctl/ctl_unnumbered.txt
@@ -1430,6 +1433,76 @@
 }
 
 #ifdef CONFIG_SYSCTL_SYSCALL
+/* Perform the actual read/write of a sysctl table entry. */
+static int do_sysctl_strategy(struct ctl_table_root *root,
+			struct ctl_table *table,
+			int __user *name, int nlen,
+			void __user *oldval, size_t __user *oldlenp,
+			void __user *newval, size_t newlen)
+{
+	int op = 0, rc;
+
+	if (oldval)
+		op |= 004;
+	if (newval)
+		op |= 002;
+	if (sysctl_perm(root, table, op))
+		return -EPERM;
+
+	if (table->strategy) {
+		rc = table->strategy(table, name, nlen, oldval, oldlenp,
+				     newval, newlen);
+		if (rc < 0)
+			return rc;
+		if (rc > 0)
+			return 0;
+	}
+
+	/* If there is no strategy routine, or if the strategy returns
+	 * zero, proceed with automatic r/w */
+	if (table->data && table->maxlen) {
+		rc = sysctl_data(table, name, nlen, oldval, oldlenp,
+				 newval, newlen);
+		if (rc < 0)
+			return rc;
+	}
+	return 0;
+}
+
+static int parse_table(int __user *name, int nlen,
+		       void __user *oldval, size_t __user *oldlenp,
+		       void __user *newval, size_t newlen,
+		       struct ctl_table_root *root,
+		       struct ctl_table *table)
+{
+	int n;
+repeat:
+	if (!nlen)
+		return -ENOTDIR;
+	if (get_user(n, name))
+		return -EFAULT;
+	for ( ; table->ctl_name || table->procname; table++) {
+		if (!table->ctl_name)
+			continue;
+		if (n == table->ctl_name) {
+			int error;
+			if (table->child) {
+				if (sysctl_perm(root, table, 001))
+					return -EPERM;
+				name++;
+				nlen--;
+				table = table->child;
+				goto repeat;
+			}
+			error = do_sysctl_strategy(root, table, name, nlen,
+						   oldval, oldlenp,
+						   newval, newlen);
+			return error;
+		}
+	}
+	return -ENOTDIR;
+}
+
 int do_sysctl(int __user *name, int nlen, void __user *oldval, size_t __user *oldlenp,
 	       void __user *newval, size_t newlen)
 {
@@ -1447,7 +1520,8 @@
 	for (head = sysctl_head_next(NULL); head;
 			head = sysctl_head_next(head)) {
 		error = parse_table(name, nlen, oldval, oldlenp, 
-					newval, newlen, head->ctl_table);
+					newval, newlen,
+					head->root, head->ctl_table);
 		if (error != -ENOTDIR) {
 			sysctl_head_finish(head);
 			break;
@@ -1493,85 +1567,23 @@
 	return -EACCES;
 }
 
-int sysctl_perm(struct ctl_table *table, int op)
+int sysctl_perm(struct ctl_table_root *root, struct ctl_table *table, int op)
 {
 	int error;
+	int mode;
+
 	error = security_sysctl(table, op);
 	if (error)
 		return error;
-	return test_perm(table->mode, op);
+
+	if (root->permissions)
+		mode = root->permissions(root, current->nsproxy, table);
+	else
+		mode = table->mode;
+
+	return test_perm(mode, op);
 }
 
-#ifdef CONFIG_SYSCTL_SYSCALL
-static int parse_table(int __user *name, int nlen,
-		       void __user *oldval, size_t __user *oldlenp,
-		       void __user *newval, size_t newlen,
-		       struct ctl_table *table)
-{
-	int n;
-repeat:
-	if (!nlen)
-		return -ENOTDIR;
-	if (get_user(n, name))
-		return -EFAULT;
-	for ( ; table->ctl_name || table->procname; table++) {
-		if (!table->ctl_name)
-			continue;
-		if (n == table->ctl_name) {
-			int error;
-			if (table->child) {
-				if (sysctl_perm(table, 001))
-					return -EPERM;
-				name++;
-				nlen--;
-				table = table->child;
-				goto repeat;
-			}
-			error = do_sysctl_strategy(table, name, nlen,
-						   oldval, oldlenp,
-						   newval, newlen);
-			return error;
-		}
-	}
-	return -ENOTDIR;
-}
-
-/* Perform the actual read/write of a sysctl table entry. */
-int do_sysctl_strategy (struct ctl_table *table,
-			int __user *name, int nlen,
-			void __user *oldval, size_t __user *oldlenp,
-			void __user *newval, size_t newlen)
-{
-	int op = 0, rc;
-
-	if (oldval)
-		op |= 004;
-	if (newval) 
-		op |= 002;
-	if (sysctl_perm(table, op))
-		return -EPERM;
-
-	if (table->strategy) {
-		rc = table->strategy(table, name, nlen, oldval, oldlenp,
-				     newval, newlen);
-		if (rc < 0)
-			return rc;
-		if (rc > 0)
-			return 0;
-	}
-
-	/* If there is no strategy routine, or if the strategy returns
-	 * zero, proceed with automatic r/w */
-	if (table->data && table->maxlen) {
-		rc = sysctl_data(table, name, nlen, oldval, oldlenp,
-				 newval, newlen);
-		if (rc < 0)
-			return rc;
-	}
-	return 0;
-}
-#endif /* CONFIG_SYSCTL_SYSCALL */
-
 static void sysctl_set_parent(struct ctl_table *parent, struct ctl_table *table)
 {
 	for (; table->ctl_name || table->procname; table++) {
@@ -1583,9 +1595,13 @@
 
 static __init int sysctl_init(void)
 {
-	int err;
 	sysctl_set_parent(NULL, root_table);
-	err = sysctl_check_table(current->nsproxy, root_table);
+#ifdef CONFIG_SYSCTL_SYSCALL_CHECK
+	{
+		int err;
+		err = sysctl_check_table(current->nsproxy, root_table);
+	}
+#endif
 	return 0;
 }
 
@@ -1712,10 +1728,12 @@
 	header->unregistering = NULL;
 	header->root = root;
 	sysctl_set_parent(NULL, header->ctl_table);
+#ifdef CONFIG_SYSCTL_SYSCALL_CHECK
 	if (sysctl_check_table(namespaces, header->ctl_table)) {
 		kfree(header);
 		return NULL;
 	}
+#endif
 	spin_lock(&sysctl_lock);
 	header_list = lookup_header_list(root, namespaces);
 	list_add_tail(&header->ctl_entry, header_list);
diff --git a/kernel/time.c b/kernel/time.c
index 35d373a..8672904 100644
--- a/kernel/time.c
+++ b/kernel/time.c
@@ -35,6 +35,7 @@
 #include <linux/syscalls.h>
 #include <linux/security.h>
 #include <linux/fs.h>
+#include <linux/slab.h>
 
 #include <asm/uaccess.h>
 #include <asm/unistd.h>
diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
index 67fe8fc..a40e20f 100644
--- a/kernel/time/timer_list.c
+++ b/kernel/time/timer_list.c
@@ -278,12 +278,9 @@
 {
 	struct proc_dir_entry *pe;
 
-	pe = create_proc_entry("timer_list", 0644, NULL);
+	pe = proc_create("timer_list", 0644, NULL, &timer_list_fops);
 	if (!pe)
 		return -ENOMEM;
-
-	pe->proc_fops = &timer_list_fops;
-
 	return 0;
 }
 __initcall(init_timer_list_procfs);
diff --git a/kernel/time/timer_stats.c b/kernel/time/timer_stats.c
index 417da8c..c994530 100644
--- a/kernel/time/timer_stats.c
+++ b/kernel/time/timer_stats.c
@@ -415,12 +415,9 @@
 {
 	struct proc_dir_entry *pe;
 
-	pe = create_proc_entry("timer_stats", 0644, NULL);
+	pe = proc_create("timer_stats", 0644, NULL, &tstats_fops);
 	if (!pe)
 		return -ENOMEM;
-
-	pe->proc_fops = &tstats_fops;
-
 	return 0;
 }
 __initcall(init_tstats_procfs);
diff --git a/kernel/user.c b/kernel/user.c
index debce60..aefbbfa 100644
--- a/kernel/user.c
+++ b/kernel/user.c
@@ -53,10 +53,6 @@
 	.files		= ATOMIC_INIT(0),
 	.sigpending	= ATOMIC_INIT(0),
 	.locked_shm     = 0,
-#ifdef CONFIG_KEYS
-	.uid_keyring	= &root_user_keyring,
-	.session_keyring = &root_session_keyring,
-#endif
 #ifdef CONFIG_USER_SCHED
 	.tg		= &init_task_group,
 #endif
@@ -420,12 +416,12 @@
 		new->mq_bytes = 0;
 #endif
 		new->locked_shm = 0;
-
-		if (alloc_uid_keyring(new, current) < 0)
-			goto out_free_user;
+#ifdef CONFIG_KEYS
+		new->uid_keyring = new->session_keyring = NULL;
+#endif
 
 		if (sched_create_user(new) < 0)
-			goto out_put_keys;
+			goto out_free_user;
 
 		if (uids_user_create(new))
 			goto out_destoy_sched;
@@ -459,9 +455,6 @@
 
 out_destoy_sched:
 	sched_destroy_user(new);
-out_put_keys:
-	key_put(new->uid_keyring);
-	key_put(new->session_keyring);
 out_free_user:
 	kmem_cache_free(uid_cachep, new);
 out_unlock:
diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c
index 4c90062..a9ab059 100644
--- a/kernel/user_namespace.c
+++ b/kernel/user_namespace.c
@@ -8,6 +8,7 @@
 #include <linux/module.h>
 #include <linux/version.h>
 #include <linux/nsproxy.h>
+#include <linux/slab.h>
 #include <linux/user_namespace.h>
 
 /*
@@ -73,3 +74,4 @@
 	release_uids(ns);
 	kfree(ns);
 }
+EXPORT_SYMBOL(free_user_ns);
diff --git a/kernel/utsname.c b/kernel/utsname.c
index 816d7b2..64d398f 100644
--- a/kernel/utsname.c
+++ b/kernel/utsname.c
@@ -14,6 +14,7 @@
 #include <linux/utsname.h>
 #include <linux/version.h>
 #include <linux/err.h>
+#include <linux/slab.h>
 
 /*
  * Clone a new ns copying an original utsname, setting refcount to 1
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 00ff4d0..7db251a 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -158,8 +158,8 @@
  *
  * Returns 0 if @work was already on a queue, non-zero otherwise.
  *
- * We queue the work to the CPU it was submitted, but there is no
- * guarantee that it will be processed by that CPU.
+ * We queue the work to the CPU on which it was submitted, but if the CPU dies
+ * it can be processed by another CPU.
  */
 int queue_work(struct workqueue_struct *wq, struct work_struct *work)
 {
@@ -772,7 +772,7 @@
 }
 EXPORT_SYMBOL_GPL(__create_workqueue_key);
 
-static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu)
+static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq)
 {
 	/*
 	 * Our caller is either destroy_workqueue() or CPU_DEAD,
@@ -808,19 +808,16 @@
 void destroy_workqueue(struct workqueue_struct *wq)
 {
 	const cpumask_t *cpu_map = wq_cpu_map(wq);
-	struct cpu_workqueue_struct *cwq;
 	int cpu;
 
 	get_online_cpus();
 	spin_lock(&workqueue_lock);
 	list_del(&wq->list);
 	spin_unlock(&workqueue_lock);
-	put_online_cpus();
 
-	for_each_cpu_mask(cpu, *cpu_map) {
-		cwq = per_cpu_ptr(wq->cpu_wq, cpu);
-		cleanup_workqueue_thread(cwq, cpu);
-	}
+	for_each_cpu_mask(cpu, *cpu_map)
+		cleanup_workqueue_thread(per_cpu_ptr(wq->cpu_wq, cpu));
+	put_online_cpus();
 
 	free_percpu(wq->cpu_wq);
 	kfree(wq);
@@ -838,7 +835,6 @@
 	action &= ~CPU_TASKS_FROZEN;
 
 	switch (action) {
-
 	case CPU_UP_PREPARE:
 		cpu_set(cpu, cpu_populated_map);
 	}
@@ -861,11 +857,17 @@
 		case CPU_UP_CANCELED:
 			start_workqueue_thread(cwq, -1);
 		case CPU_DEAD:
-			cleanup_workqueue_thread(cwq, cpu);
+			cleanup_workqueue_thread(cwq);
 			break;
 		}
 	}
 
+	switch (action) {
+	case CPU_UP_CANCELED:
+	case CPU_DEAD:
+		cpu_clear(cpu, cpu_populated_map);
+	}
+
 	return NOTIFY_OK;
 }
 
diff --git a/lib/Makefile b/lib/Makefile
index 2d7001b..0ae4eb0 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -6,7 +6,7 @@
 	 rbtree.o radix-tree.o dump_stack.o \
 	 idr.o int_sqrt.o extable.o prio_tree.o \
 	 sha1.o irq_regs.o reciprocal_div.o argv_split.o \
-	 proportions.o prio_heap.o
+	 proportions.o prio_heap.o ratelimit.o
 
 lib-$(CONFIG_MMU) += ioremap.o
 lib-$(CONFIG_SMP) += cpumask.o
diff --git a/lib/find_next_bit.c b/lib/find_next_bit.c
index d3f5784..24c59de 100644
--- a/lib/find_next_bit.c
+++ b/lib/find_next_bit.c
@@ -20,8 +20,8 @@
 /*
  * Find the next set bit in a memory region.
  */
-unsigned long __find_next_bit(const unsigned long *addr,
-		unsigned long size, unsigned long offset)
+unsigned long find_next_bit(const unsigned long *addr, unsigned long size,
+			    unsigned long offset)
 {
 	const unsigned long *p = addr + BITOP_WORD(offset);
 	unsigned long result = offset & ~(BITS_PER_LONG-1);
@@ -58,14 +58,14 @@
 found_middle:
 	return result + __ffs(tmp);
 }
-EXPORT_SYMBOL(__find_next_bit);
+EXPORT_SYMBOL(find_next_bit);
 
 /*
  * This implementation of find_{first,next}_zero_bit was stolen from
  * Linus' asm-alpha/bitops.h.
  */
-unsigned long __find_next_zero_bit(const unsigned long *addr,
-		unsigned long size, unsigned long offset)
+unsigned long find_next_zero_bit(const unsigned long *addr, unsigned long size,
+				 unsigned long offset)
 {
 	const unsigned long *p = addr + BITOP_WORD(offset);
 	unsigned long result = offset & ~(BITS_PER_LONG-1);
@@ -102,15 +102,14 @@
 found_middle:
 	return result + ffz(tmp);
 }
-EXPORT_SYMBOL(__find_next_zero_bit);
+EXPORT_SYMBOL(find_next_zero_bit);
 #endif /* CONFIG_GENERIC_FIND_NEXT_BIT */
 
 #ifdef CONFIG_GENERIC_FIND_FIRST_BIT
 /*
  * Find the first set bit in a memory region.
  */
-unsigned long __find_first_bit(const unsigned long *addr,
-		unsigned long size)
+unsigned long find_first_bit(const unsigned long *addr, unsigned long size)
 {
 	const unsigned long *p = addr;
 	unsigned long result = 0;
@@ -131,13 +130,12 @@
 found:
 	return result + __ffs(tmp);
 }
-EXPORT_SYMBOL(__find_first_bit);
+EXPORT_SYMBOL(find_first_bit);
 
 /*
  * Find the first cleared bit in a memory region.
  */
-unsigned long __find_first_zero_bit(const unsigned long *addr,
-		unsigned long size)
+unsigned long find_first_zero_bit(const unsigned long *addr, unsigned long size)
 {
 	const unsigned long *p = addr;
 	unsigned long result = 0;
@@ -158,7 +156,7 @@
 found:
 	return result + ffz(tmp);
 }
-EXPORT_SYMBOL(__find_first_zero_bit);
+EXPORT_SYMBOL(find_first_zero_bit);
 #endif /* CONFIG_GENERIC_FIND_FIRST_BIT */
 
 #ifdef __BIG_ENDIAN
diff --git a/lib/idr.c b/lib/idr.c
index afbb0b1..8368c81 100644
--- a/lib/idr.c
+++ b/lib/idr.c
@@ -585,12 +585,11 @@
 	memset(idr_layer, 0, sizeof(struct idr_layer));
 }
 
-static  int init_id_cache(void)
+void __init idr_init_cache(void)
 {
-	if (!idr_layer_cache)
-		idr_layer_cache = kmem_cache_create("idr_layer_cache",
-			sizeof(struct idr_layer), 0, 0, idr_cache_ctor);
-	return 0;
+	idr_layer_cache = kmem_cache_create("idr_layer_cache",
+				sizeof(struct idr_layer), 0, SLAB_PANIC,
+				idr_cache_ctor);
 }
 
 /**
@@ -602,7 +601,6 @@
  */
 void idr_init(struct idr *idp)
 {
-	init_id_cache();
 	memset(idp, 0, sizeof(struct idr));
 	spin_lock_init(&idp->lock);
 }
diff --git a/lib/inflate.c b/lib/inflate.c
index 845f91d..9762294 100644
--- a/lib/inflate.c
+++ b/lib/inflate.c
@@ -811,6 +811,9 @@
   ll = malloc(sizeof(*ll) * (286+30));  /* literal/length and distance code lengths */
 #endif
 
+  if (ll == NULL)
+    return 1;
+
   /* make local bit buffer */
   b = bb;
   k = bk;
diff --git a/lib/iomap.c b/lib/iomap.c
index dd6ca48..37a3ea4 100644
--- a/lib/iomap.c
+++ b/lib/iomap.c
@@ -257,7 +257,7 @@
 void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen)
 {
 	resource_size_t start = pci_resource_start(dev, bar);
-	unsigned long len = pci_resource_len(dev, bar);
+	resource_size_t len = pci_resource_len(dev, bar);
 	unsigned long flags = pci_resource_flags(dev, bar);
 
 	if (!len || !start)
diff --git a/lib/ratelimit.c b/lib/ratelimit.c
new file mode 100644
index 0000000..485e304
--- /dev/null
+++ b/lib/ratelimit.c
@@ -0,0 +1,51 @@
+/*
+ * ratelimit.c - Do something with rate limit.
+ *
+ * Isolated from kernel/printk.c by Dave Young <hidave.darkstar@gmail.com>
+ *
+ * This file is released under the GPLv2.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/jiffies.h>
+#include <linux/module.h>
+
+/*
+ * __ratelimit - rate limiting
+ * @ratelimit_jiffies: minimum time in jiffies between two callbacks
+ * @ratelimit_burst: number of callbacks we do before ratelimiting
+ *
+ * This enforces a rate limit: not more than @ratelimit_burst callbacks
+ * in every ratelimit_jiffies
+ */
+int __ratelimit(int ratelimit_jiffies, int ratelimit_burst)
+{
+	static DEFINE_SPINLOCK(ratelimit_lock);
+	static unsigned toks = 10 * 5 * HZ;
+	static unsigned long last_msg;
+	static int missed;
+	unsigned long flags;
+	unsigned long now = jiffies;
+
+	spin_lock_irqsave(&ratelimit_lock, flags);
+	toks += now - last_msg;
+	last_msg = now;
+	if (toks > (ratelimit_burst * ratelimit_jiffies))
+		toks = ratelimit_burst * ratelimit_jiffies;
+	if (toks >= ratelimit_jiffies) {
+		int lost = missed;
+
+		missed = 0;
+		toks -= ratelimit_jiffies;
+		spin_unlock_irqrestore(&ratelimit_lock, flags);
+		if (lost)
+			printk(KERN_WARNING "%s: %d messages suppressed\n",
+				__func__, lost);
+		return 1;
+	}
+	missed++;
+	spin_unlock_irqrestore(&ratelimit_lock, flags);
+	return 0;
+}
+EXPORT_SYMBOL(__ratelimit);
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index 0259228..d568894 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -31,6 +31,7 @@
 
 #include <linux/init.h>
 #include <linux/bootmem.h>
+#include <linux/iommu-helper.h>
 
 #define OFFSET(val,align) ((unsigned long)	\
 	                   ( (val) & ( (align) - 1)))
@@ -282,15 +283,6 @@
 	return (addr & ~mask) != 0;
 }
 
-static inline unsigned int is_span_boundary(unsigned int index,
-					    unsigned int nslots,
-					    unsigned long offset_slots,
-					    unsigned long max_slots)
-{
-	unsigned long offset = (offset_slots + index) & (max_slots - 1);
-	return offset + nslots > max_slots;
-}
-
 /*
  * Allocates bounce buffer and returns its kernel virtual address.
  */
@@ -331,56 +323,53 @@
 	 * request and allocate a buffer from that IO TLB pool.
 	 */
 	spin_lock_irqsave(&io_tlb_lock, flags);
-	{
-		index = ALIGN(io_tlb_index, stride);
-		if (index >= io_tlb_nslabs)
-			index = 0;
-		wrap = index;
+	index = ALIGN(io_tlb_index, stride);
+	if (index >= io_tlb_nslabs)
+		index = 0;
+	wrap = index;
 
-		do {
-			while (is_span_boundary(index, nslots, offset_slots,
-						max_slots)) {
-				index += stride;
-				if (index >= io_tlb_nslabs)
-					index = 0;
-				if (index == wrap)
-					goto not_found;
-			}
-
-			/*
-			 * If we find a slot that indicates we have 'nslots'
-			 * number of contiguous buffers, we allocate the
-			 * buffers from that slot and mark the entries as '0'
-			 * indicating unavailable.
-			 */
-			if (io_tlb_list[index] >= nslots) {
-				int count = 0;
-
-				for (i = index; i < (int) (index + nslots); i++)
-					io_tlb_list[i] = 0;
-				for (i = index - 1; (OFFSET(i, IO_TLB_SEGSIZE) != IO_TLB_SEGSIZE -1) && io_tlb_list[i]; i--)
-					io_tlb_list[i] = ++count;
-				dma_addr = io_tlb_start + (index << IO_TLB_SHIFT);
-
-				/*
-				 * Update the indices to avoid searching in
-				 * the next round.
-				 */
-				io_tlb_index = ((index + nslots) < io_tlb_nslabs
-						? (index + nslots) : 0);
-
-				goto found;
-			}
+	do {
+		while (iommu_is_span_boundary(index, nslots, offset_slots,
+					      max_slots)) {
 			index += stride;
 			if (index >= io_tlb_nslabs)
 				index = 0;
-		} while (index != wrap);
+			if (index == wrap)
+				goto not_found;
+		}
 
-  not_found:
-		spin_unlock_irqrestore(&io_tlb_lock, flags);
-		return NULL;
-	}
-  found:
+		/*
+		 * If we find a slot that indicates we have 'nslots' number of
+		 * contiguous buffers, we allocate the buffers from that slot
+		 * and mark the entries as '0' indicating unavailable.
+		 */
+		if (io_tlb_list[index] >= nslots) {
+			int count = 0;
+
+			for (i = index; i < (int) (index + nslots); i++)
+				io_tlb_list[i] = 0;
+			for (i = index - 1; (OFFSET(i, IO_TLB_SEGSIZE) != IO_TLB_SEGSIZE - 1) && io_tlb_list[i]; i--)
+				io_tlb_list[i] = ++count;
+			dma_addr = io_tlb_start + (index << IO_TLB_SHIFT);
+
+			/*
+			 * Update the indices to avoid searching in the next
+			 * round.
+			 */
+			io_tlb_index = ((index + nslots) < io_tlb_nslabs
+					? (index + nslots) : 0);
+
+			goto found;
+		}
+		index += stride;
+		if (index >= io_tlb_nslabs)
+			index = 0;
+	} while (index != wrap);
+
+not_found:
+	spin_unlock_irqrestore(&io_tlb_lock, flags);
+	return NULL;
+found:
 	spin_unlock_irqrestore(&io_tlb_lock, flags);
 
 	/*
@@ -566,7 +555,8 @@
  * either swiotlb_unmap_single or swiotlb_dma_sync_single is performed.
  */
 dma_addr_t
-swiotlb_map_single(struct device *hwdev, void *ptr, size_t size, int dir)
+swiotlb_map_single_attrs(struct device *hwdev, void *ptr, size_t size,
+			 int dir, struct dma_attrs *attrs)
 {
 	dma_addr_t dev_addr = virt_to_bus(ptr);
 	void *map;
@@ -599,6 +589,13 @@
 
 	return dev_addr;
 }
+EXPORT_SYMBOL(swiotlb_map_single_attrs);
+
+dma_addr_t
+swiotlb_map_single(struct device *hwdev, void *ptr, size_t size, int dir)
+{
+	return swiotlb_map_single_attrs(hwdev, ptr, size, dir, NULL);
+}
 
 /*
  * Unmap a single streaming mode DMA translation.  The dma_addr and size must
@@ -609,8 +606,8 @@
  * whatever the device wrote there.
  */
 void
-swiotlb_unmap_single(struct device *hwdev, dma_addr_t dev_addr, size_t size,
-		     int dir)
+swiotlb_unmap_single_attrs(struct device *hwdev, dma_addr_t dev_addr,
+			   size_t size, int dir, struct dma_attrs *attrs)
 {
 	char *dma_addr = bus_to_virt(dev_addr);
 
@@ -620,7 +617,14 @@
 	else if (dir == DMA_FROM_DEVICE)
 		dma_mark_clean(dma_addr, size);
 }
+EXPORT_SYMBOL(swiotlb_unmap_single_attrs);
 
+void
+swiotlb_unmap_single(struct device *hwdev, dma_addr_t dev_addr, size_t size,
+		     int dir)
+{
+	return swiotlb_unmap_single_attrs(hwdev, dev_addr, size, dir, NULL);
+}
 /*
  * Make physical memory consistent for a single streaming mode DMA translation
  * after a transfer.
@@ -691,6 +695,8 @@
 				  SYNC_FOR_DEVICE);
 }
 
+void swiotlb_unmap_sg_attrs(struct device *, struct scatterlist *, int, int,
+			    struct dma_attrs *);
 /*
  * Map a set of buffers described by scatterlist in streaming mode for DMA.
  * This is the scatter-gather version of the above swiotlb_map_single
@@ -708,8 +714,8 @@
  * same here.
  */
 int
-swiotlb_map_sg(struct device *hwdev, struct scatterlist *sgl, int nelems,
-	       int dir)
+swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems,
+		     int dir, struct dma_attrs *attrs)
 {
 	struct scatterlist *sg;
 	void *addr;
@@ -727,7 +733,8 @@
 				/* Don't panic here, we expect map_sg users
 				   to do proper error handling. */
 				swiotlb_full(hwdev, sg->length, dir, 0);
-				swiotlb_unmap_sg(hwdev, sgl, i, dir);
+				swiotlb_unmap_sg_attrs(hwdev, sgl, i, dir,
+						       attrs);
 				sgl[0].dma_length = 0;
 				return 0;
 			}
@@ -738,14 +745,22 @@
 	}
 	return nelems;
 }
+EXPORT_SYMBOL(swiotlb_map_sg_attrs);
+
+int
+swiotlb_map_sg(struct device *hwdev, struct scatterlist *sgl, int nelems,
+	       int dir)
+{
+	return swiotlb_map_sg_attrs(hwdev, sgl, nelems, dir, NULL);
+}
 
 /*
  * Unmap a set of streaming mode DMA translations.  Again, cpu read rules
  * concerning calls here are the same as for swiotlb_unmap_single() above.
  */
 void
-swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sgl, int nelems,
-		 int dir)
+swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
+		       int nelems, int dir, struct dma_attrs *attrs)
 {
 	struct scatterlist *sg;
 	int i;
@@ -760,6 +775,14 @@
 			dma_mark_clean(SG_ENT_VIRT_ADDRESS(sg), sg->dma_length);
 	}
 }
+EXPORT_SYMBOL(swiotlb_unmap_sg_attrs);
+
+void
+swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sgl, int nelems,
+		 int dir)
+{
+	return swiotlb_unmap_sg_attrs(hwdev, sgl, nelems, dir, NULL);
+}
 
 /*
  * Make physical memory consistent for a set of streaming mode DMA translations
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index df28c17..bbf953e 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -199,12 +199,13 @@
 	struct page *page;
 
 	page = alloc_pages_node(nid,
-		htlb_alloc_mask|__GFP_COMP|__GFP_THISNODE|__GFP_NOWARN,
+		htlb_alloc_mask|__GFP_COMP|__GFP_THISNODE|
+						__GFP_REPEAT|__GFP_NOWARN,
 		HUGETLB_PAGE_ORDER);
 	if (page) {
 		if (arch_prepare_hugepage(page)) {
 			__free_pages(page, HUGETLB_PAGE_ORDER);
-			return 0;
+			return NULL;
 		}
 		set_compound_page_dtor(page, free_huge_page);
 		spin_lock(&hugetlb_lock);
@@ -294,7 +295,8 @@
 	}
 	spin_unlock(&hugetlb_lock);
 
-	page = alloc_pages(htlb_alloc_mask|__GFP_COMP|__GFP_NOWARN,
+	page = alloc_pages(htlb_alloc_mask|__GFP_COMP|
+					__GFP_REPEAT|__GFP_NOWARN,
 					HUGETLB_PAGE_ORDER);
 
 	spin_lock(&hugetlb_lock);
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 2e0bfc93..33add96 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -26,15 +26,18 @@
 #include <linux/backing-dev.h>
 #include <linux/bit_spinlock.h>
 #include <linux/rcupdate.h>
+#include <linux/slab.h>
 #include <linux/swap.h>
 #include <linux/spinlock.h>
 #include <linux/fs.h>
 #include <linux/seq_file.h>
+#include <linux/vmalloc.h>
 
 #include <asm/uaccess.h>
 
 struct cgroup_subsys mem_cgroup_subsys;
 static const int MEM_CGROUP_RECLAIM_RETRIES = 5;
+static struct kmem_cache *page_cgroup_cache;
 
 /*
  * Statistics for memory cgroup.
@@ -236,26 +239,12 @@
 				css);
 }
 
-static struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
+struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
 {
 	return container_of(task_subsys_state(p, mem_cgroup_subsys_id),
 				struct mem_cgroup, css);
 }
 
-void mm_init_cgroup(struct mm_struct *mm, struct task_struct *p)
-{
-	struct mem_cgroup *mem;
-
-	mem = mem_cgroup_from_task(p);
-	css_get(&mem->css);
-	mm->mem_cgroup = mem;
-}
-
-void mm_free_cgroup(struct mm_struct *mm)
-{
-	css_put(&mm->mem_cgroup->css);
-}
-
 static inline int page_cgroup_locked(struct page *page)
 {
 	return bit_spin_is_locked(PAGE_CGROUP_LOCK_BIT, &page->page_cgroup);
@@ -287,10 +276,10 @@
 	bit_spin_unlock(PAGE_CGROUP_LOCK_BIT, &page->page_cgroup);
 }
 
-static void __mem_cgroup_remove_list(struct page_cgroup *pc)
+static void __mem_cgroup_remove_list(struct mem_cgroup_per_zone *mz,
+			struct page_cgroup *pc)
 {
 	int from = pc->flags & PAGE_CGROUP_FLAG_ACTIVE;
-	struct mem_cgroup_per_zone *mz = page_cgroup_zoneinfo(pc);
 
 	if (from)
 		MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_ACTIVE) -= 1;
@@ -301,10 +290,10 @@
 	list_del_init(&pc->lru);
 }
 
-static void __mem_cgroup_add_list(struct page_cgroup *pc)
+static void __mem_cgroup_add_list(struct mem_cgroup_per_zone *mz,
+				struct page_cgroup *pc)
 {
 	int to = pc->flags & PAGE_CGROUP_FLAG_ACTIVE;
-	struct mem_cgroup_per_zone *mz = page_cgroup_zoneinfo(pc);
 
 	if (!to) {
 		MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_INACTIVE) += 1;
@@ -476,6 +465,7 @@
 	int zid = zone_idx(z);
 	struct mem_cgroup_per_zone *mz;
 
+	BUG_ON(!mem_cont);
 	mz = mem_cgroup_zoneinfo(mem_cont, nid, zid);
 	if (active)
 		src = &mz->active_list;
@@ -560,7 +550,7 @@
 	}
 	unlock_page_cgroup(page);
 
-	pc = kzalloc(sizeof(struct page_cgroup), gfp_mask);
+	pc = kmem_cache_zalloc(page_cgroup_cache, gfp_mask);
 	if (pc == NULL)
 		goto err;
 
@@ -574,7 +564,7 @@
 		mm = &init_mm;
 
 	rcu_read_lock();
-	mem = rcu_dereference(mm->mem_cgroup);
+	mem = mem_cgroup_from_task(rcu_dereference(mm->owner));
 	/*
 	 * For every charge from the cgroup, increment reference count
 	 */
@@ -602,7 +592,6 @@
 			mem_cgroup_out_of_memory(mem, gfp_mask);
 			goto out;
 		}
-		congestion_wait(WRITE, HZ/10);
 	}
 
 	pc->ref_cnt = 1;
@@ -610,7 +599,7 @@
 	pc->page = page;
 	pc->flags = PAGE_CGROUP_FLAG_ACTIVE;
 	if (ctype == MEM_CGROUP_CHARGE_TYPE_CACHE)
-		pc->flags |= PAGE_CGROUP_FLAG_CACHE;
+		pc->flags = PAGE_CGROUP_FLAG_CACHE;
 
 	lock_page_cgroup(page);
 	if (page_get_page_cgroup(page)) {
@@ -622,14 +611,14 @@
 		 */
 		res_counter_uncharge(&mem->res, PAGE_SIZE);
 		css_put(&mem->css);
-		kfree(pc);
+		kmem_cache_free(page_cgroup_cache, pc);
 		goto retry;
 	}
 	page_assign_page_cgroup(page, pc);
 
 	mz = page_cgroup_zoneinfo(pc);
 	spin_lock_irqsave(&mz->lru_lock, flags);
-	__mem_cgroup_add_list(pc);
+	__mem_cgroup_add_list(mz, pc);
 	spin_unlock_irqrestore(&mz->lru_lock, flags);
 
 	unlock_page_cgroup(page);
@@ -637,7 +626,7 @@
 	return 0;
 out:
 	css_put(&mem->css);
-	kfree(pc);
+	kmem_cache_free(page_cgroup_cache, pc);
 err:
 	return -ENOMEM;
 }
@@ -685,7 +674,7 @@
 	if (--(pc->ref_cnt) == 0) {
 		mz = page_cgroup_zoneinfo(pc);
 		spin_lock_irqsave(&mz->lru_lock, flags);
-		__mem_cgroup_remove_list(pc);
+		__mem_cgroup_remove_list(mz, pc);
 		spin_unlock_irqrestore(&mz->lru_lock, flags);
 
 		page_assign_page_cgroup(page, NULL);
@@ -695,7 +684,7 @@
 		res_counter_uncharge(&mem->res, PAGE_SIZE);
 		css_put(&mem->css);
 
-		kfree(pc);
+		kmem_cache_free(page_cgroup_cache, pc);
 		return;
 	}
 
@@ -747,7 +736,7 @@
 
 	mz = page_cgroup_zoneinfo(pc);
 	spin_lock_irqsave(&mz->lru_lock, flags);
-	__mem_cgroup_remove_list(pc);
+	__mem_cgroup_remove_list(mz, pc);
 	spin_unlock_irqrestore(&mz->lru_lock, flags);
 
 	page_assign_page_cgroup(page, NULL);
@@ -759,7 +748,7 @@
 
 	mz = page_cgroup_zoneinfo(pc);
 	spin_lock_irqsave(&mz->lru_lock, flags);
-	__mem_cgroup_add_list(pc);
+	__mem_cgroup_add_list(mz, pc);
 	spin_unlock_irqrestore(&mz->lru_lock, flags);
 
 	unlock_page_cgroup(newpage);
@@ -853,13 +842,10 @@
 	return 0;
 }
 
-static ssize_t mem_cgroup_read(struct cgroup *cont,
-			struct cftype *cft, struct file *file,
-			char __user *userbuf, size_t nbytes, loff_t *ppos)
+static u64 mem_cgroup_read(struct cgroup *cont, struct cftype *cft)
 {
-	return res_counter_read(&mem_cgroup_from_cont(cont)->res,
-				cft->private, userbuf, nbytes, ppos,
-				NULL);
+	return res_counter_read_u64(&mem_cgroup_from_cont(cont)->res,
+				    cft->private);
 }
 
 static ssize_t mem_cgroup_write(struct cgroup *cont, struct cftype *cft,
@@ -871,27 +857,25 @@
 				mem_cgroup_write_strategy);
 }
 
-static ssize_t mem_force_empty_write(struct cgroup *cont,
-				struct cftype *cft, struct file *file,
-				const char __user *userbuf,
-				size_t nbytes, loff_t *ppos)
+static int mem_cgroup_reset(struct cgroup *cont, unsigned int event)
 {
-	struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
-	int ret = mem_cgroup_force_empty(mem);
-	if (!ret)
-		ret = nbytes;
-	return ret;
+	struct mem_cgroup *mem;
+
+	mem = mem_cgroup_from_cont(cont);
+	switch (event) {
+	case RES_MAX_USAGE:
+		res_counter_reset_max(&mem->res);
+		break;
+	case RES_FAILCNT:
+		res_counter_reset_failcnt(&mem->res);
+		break;
+	}
+	return 0;
 }
 
-/*
- * Note: This should be removed if cgroup supports write-only file.
- */
-static ssize_t mem_force_empty_read(struct cgroup *cont,
-				struct cftype *cft,
-				struct file *file, char __user *userbuf,
-				size_t nbytes, loff_t *ppos)
+static int mem_force_empty_write(struct cgroup *cont, unsigned int event)
 {
-	return -EINVAL;
+	return mem_cgroup_force_empty(mem_cgroup_from_cont(cont));
 }
 
 static const struct mem_cgroup_stat_desc {
@@ -902,9 +886,9 @@
 	[MEM_CGROUP_STAT_RSS] = { "rss", PAGE_SIZE, },
 };
 
-static int mem_control_stat_show(struct seq_file *m, void *arg)
+static int mem_control_stat_show(struct cgroup *cont, struct cftype *cft,
+				 struct cgroup_map_cb *cb)
 {
-	struct cgroup *cont = m->private;
 	struct mem_cgroup *mem_cont = mem_cgroup_from_cont(cont);
 	struct mem_cgroup_stat *stat = &mem_cont->stat;
 	int i;
@@ -914,8 +898,7 @@
 
 		val = mem_cgroup_read_stat(stat, i);
 		val *= mem_cgroup_stat_desc[i].unit;
-		seq_printf(m, "%s %lld\n", mem_cgroup_stat_desc[i].msg,
-				(long long)val);
+		cb->fill(cb, mem_cgroup_stat_desc[i].msg, val);
 	}
 	/* showing # of active pages */
 	{
@@ -925,52 +908,43 @@
 						MEM_CGROUP_ZSTAT_INACTIVE);
 		active = mem_cgroup_get_all_zonestat(mem_cont,
 						MEM_CGROUP_ZSTAT_ACTIVE);
-		seq_printf(m, "active %ld\n", (active) * PAGE_SIZE);
-		seq_printf(m, "inactive %ld\n", (inactive) * PAGE_SIZE);
+		cb->fill(cb, "active", (active) * PAGE_SIZE);
+		cb->fill(cb, "inactive", (inactive) * PAGE_SIZE);
 	}
 	return 0;
 }
 
-static const struct file_operations mem_control_stat_file_operations = {
-	.read = seq_read,
-	.llseek = seq_lseek,
-	.release = single_release,
-};
-
-static int mem_control_stat_open(struct inode *unused, struct file *file)
-{
-	/* XXX __d_cont */
-	struct cgroup *cont = file->f_dentry->d_parent->d_fsdata;
-
-	file->f_op = &mem_control_stat_file_operations;
-	return single_open(file, mem_control_stat_show, cont);
-}
-
 static struct cftype mem_cgroup_files[] = {
 	{
 		.name = "usage_in_bytes",
 		.private = RES_USAGE,
-		.read = mem_cgroup_read,
+		.read_u64 = mem_cgroup_read,
+	},
+	{
+		.name = "max_usage_in_bytes",
+		.private = RES_MAX_USAGE,
+		.trigger = mem_cgroup_reset,
+		.read_u64 = mem_cgroup_read,
 	},
 	{
 		.name = "limit_in_bytes",
 		.private = RES_LIMIT,
 		.write = mem_cgroup_write,
-		.read = mem_cgroup_read,
+		.read_u64 = mem_cgroup_read,
 	},
 	{
 		.name = "failcnt",
 		.private = RES_FAILCNT,
-		.read = mem_cgroup_read,
+		.trigger = mem_cgroup_reset,
+		.read_u64 = mem_cgroup_read,
 	},
 	{
 		.name = "force_empty",
-		.write = mem_force_empty_write,
-		.read = mem_force_empty_read,
+		.trigger = mem_force_empty_write,
 	},
 	{
 		.name = "stat",
-		.open = mem_control_stat_open,
+		.read_map = mem_control_stat_show,
 	},
 };
 
@@ -1010,6 +984,29 @@
 	kfree(mem->info.nodeinfo[node]);
 }
 
+static struct mem_cgroup *mem_cgroup_alloc(void)
+{
+	struct mem_cgroup *mem;
+
+	if (sizeof(*mem) < PAGE_SIZE)
+		mem = kmalloc(sizeof(*mem), GFP_KERNEL);
+	else
+		mem = vmalloc(sizeof(*mem));
+
+	if (mem)
+		memset(mem, 0, sizeof(*mem));
+	return mem;
+}
+
+static void mem_cgroup_free(struct mem_cgroup *mem)
+{
+	if (sizeof(*mem) < PAGE_SIZE)
+		kfree(mem);
+	else
+		vfree(mem);
+}
+
+
 static struct cgroup_subsys_state *
 mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
 {
@@ -1018,17 +1015,15 @@
 
 	if (unlikely((cont->parent) == NULL)) {
 		mem = &init_mem_cgroup;
-		init_mm.mem_cgroup = mem;
-	} else
-		mem = kzalloc(sizeof(struct mem_cgroup), GFP_KERNEL);
-
-	if (mem == NULL)
-		return ERR_PTR(-ENOMEM);
+		page_cgroup_cache = KMEM_CACHE(page_cgroup, SLAB_PANIC);
+	} else {
+		mem = mem_cgroup_alloc();
+		if (!mem)
+			return ERR_PTR(-ENOMEM);
+	}
 
 	res_counter_init(&mem->res);
 
-	memset(&mem->info, 0, sizeof(mem->info));
-
 	for_each_node_state(node, N_POSSIBLE)
 		if (alloc_mem_cgroup_per_zone_info(mem, node))
 			goto free_out;
@@ -1038,7 +1033,7 @@
 	for_each_node_state(node, N_POSSIBLE)
 		free_mem_cgroup_per_zone_info(mem, node);
 	if (cont->parent != NULL)
-		kfree(mem);
+		mem_cgroup_free(mem);
 	return ERR_PTR(-ENOMEM);
 }
 
@@ -1058,7 +1053,7 @@
 	for_each_node_state(node, N_POSSIBLE)
 		free_mem_cgroup_per_zone_info(mem, node);
 
-	kfree(mem_cgroup_from_cont(cont));
+	mem_cgroup_free(mem_cgroup_from_cont(cont));
 }
 
 static int mem_cgroup_populate(struct cgroup_subsys *ss,
@@ -1098,10 +1093,6 @@
 	if (!thread_group_leader(p))
 		goto out;
 
-	css_get(&mem->css);
-	rcu_assign_pointer(mm->mem_cgroup, mem);
-	css_put(&old_mem->css);
-
 out:
 	mmput(mm);
 }
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index c4ba85c..b17dca7 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -29,6 +29,8 @@
 
 #include <asm/tlbflush.h>
 
+#include "internal.h"
+
 /* add this memory to iomem resource */
 static struct resource *register_memory_resource(u64 start, u64 size)
 {
diff --git a/mm/mmap.c b/mm/mmap.c
index 677d184..fac6633 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -230,8 +230,11 @@
 	might_sleep();
 	if (vma->vm_ops && vma->vm_ops->close)
 		vma->vm_ops->close(vma);
-	if (vma->vm_file)
+	if (vma->vm_file) {
 		fput(vma->vm_file);
+		if (vma->vm_flags & VM_EXECUTABLE)
+			removed_exe_file_vma(vma->vm_mm);
+	}
 	mpol_put(vma_policy(vma));
 	kmem_cache_free(vm_area_cachep, vma);
 	return next;
@@ -623,8 +626,11 @@
 		spin_unlock(&mapping->i_mmap_lock);
 
 	if (remove_next) {
-		if (file)
+		if (file) {
 			fput(file);
+			if (next->vm_flags & VM_EXECUTABLE)
+				removed_exe_file_vma(mm);
+		}
 		mm->map_count--;
 		mpol_put(vma_policy(next));
 		kmem_cache_free(vm_area_cachep, next);
@@ -1154,6 +1160,8 @@
 		error = file->f_op->mmap(file, vma);
 		if (error)
 			goto unmap_and_free_vma;
+		if (vm_flags & VM_EXECUTABLE)
+			added_exe_file_vma(mm);
 	} else if (vm_flags & VM_SHARED) {
 		error = shmem_zero_setup(vma);
 		if (error)
@@ -1185,6 +1193,8 @@
 		mpol_put(vma_policy(vma));
 		kmem_cache_free(vm_area_cachep, vma);
 		fput(file);
+		if (vm_flags & VM_EXECUTABLE)
+			removed_exe_file_vma(mm);
 	} else {
 		vma_link(mm, vma, prev, rb_link, rb_parent);
 		file = vma->vm_file;
@@ -1817,8 +1827,11 @@
 	}
 	vma_set_policy(new, pol);
 
-	if (new->vm_file)
+	if (new->vm_file) {
 		get_file(new->vm_file);
+		if (vma->vm_flags & VM_EXECUTABLE)
+			added_exe_file_vma(mm);
+	}
 
 	if (new->vm_ops && new->vm_ops->open)
 		new->vm_ops->open(new);
@@ -2135,8 +2148,11 @@
 			new_vma->vm_start = addr;
 			new_vma->vm_end = addr + len;
 			new_vma->vm_pgoff = pgoff;
-			if (new_vma->vm_file)
+			if (new_vma->vm_file) {
 				get_file(new_vma->vm_file);
+				if (vma->vm_flags & VM_EXECUTABLE)
+					added_exe_file_vma(mm);
+			}
 			if (new_vma->vm_ops && new_vma->vm_ops->open)
 				new_vma->vm_ops->open(new_vma);
 			vma_link(mm, new_vma, prev, rb_link, rb_parent);
diff --git a/mm/nommu.c b/mm/nommu.c
index 1d32fe8..ef8c62c 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -966,8 +966,13 @@
 
 	INIT_LIST_HEAD(&vma->anon_vma_node);
 	atomic_set(&vma->vm_usage, 1);
-	if (file)
+	if (file) {
 		get_file(file);
+		if (vm_flags & VM_EXECUTABLE) {
+			added_exe_file_vma(current->mm);
+			vma->vm_mm = current->mm;
+		}
+	}
 	vma->vm_file	= file;
 	vma->vm_flags	= vm_flags;
 	vma->vm_start	= addr;
@@ -1022,8 +1027,11 @@
 	up_write(&nommu_vma_sem);
 	kfree(vml);
 	if (vma) {
-		if (vma->vm_file)
+		if (vma->vm_file) {
 			fput(vma->vm_file);
+			if (vma->vm_flags & VM_EXECUTABLE)
+				removed_exe_file_vma(vma->vm_mm);
+		}
 		kfree(vma);
 	}
 	return ret;
@@ -1053,7 +1061,7 @@
 /*
  * handle mapping disposal for uClinux
  */
-static void put_vma(struct vm_area_struct *vma)
+static void put_vma(struct mm_struct *mm, struct vm_area_struct *vma)
 {
 	if (vma) {
 		down_write(&nommu_vma_sem);
@@ -1075,8 +1083,11 @@
 			realalloc -= kobjsize(vma);
 			askedalloc -= sizeof(*vma);
 
-			if (vma->vm_file)
+			if (vma->vm_file) {
 				fput(vma->vm_file);
+				if (vma->vm_flags & VM_EXECUTABLE)
+					removed_exe_file_vma(mm);
+			}
 			kfree(vma);
 		}
 
@@ -1113,7 +1124,7 @@
  found:
 	vml = *parent;
 
-	put_vma(vml->vma);
+	put_vma(mm, vml->vma);
 
 	*parent = vml->next;
 	realalloc -= kobjsize(vml);
@@ -1158,7 +1169,7 @@
 
 		while ((tmp = mm->context.vmlist)) {
 			mm->context.vmlist = tmp->next;
-			put_vma(tmp->vma);
+			put_vma(mm, tmp->vma);
 
 			realalloc -= kobjsize(tmp);
 			askedalloc -= sizeof(*tmp);
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index d1cf4f0..0a502e9 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1461,7 +1461,8 @@
 	struct task_struct *p = current;
 	int do_retry;
 	int alloc_flags;
-	int did_some_progress;
+	unsigned long did_some_progress;
+	unsigned long pages_reclaimed = 0;
 
 	might_sleep_if(wait);
 
@@ -1611,14 +1612,26 @@
 	 * Don't let big-order allocations loop unless the caller explicitly
 	 * requests that.  Wait for some write requests to complete then retry.
 	 *
-	 * In this implementation, __GFP_REPEAT means __GFP_NOFAIL for order
-	 * <= 3, but that may not be true in other implementations.
+	 * In this implementation, order <= PAGE_ALLOC_COSTLY_ORDER
+	 * means __GFP_NOFAIL, but that may not be true in other
+	 * implementations.
+	 *
+	 * For order > PAGE_ALLOC_COSTLY_ORDER, if __GFP_REPEAT is
+	 * specified, then we retry until we no longer reclaim any pages
+	 * (above), or we've reclaimed an order of pages at least as
+	 * large as the allocation's order. In both cases, if the
+	 * allocation still fails, we stop retrying.
 	 */
+	pages_reclaimed += did_some_progress;
 	do_retry = 0;
 	if (!(gfp_mask & __GFP_NORETRY)) {
-		if ((order <= PAGE_ALLOC_COSTLY_ORDER) ||
-						(gfp_mask & __GFP_REPEAT))
+		if (order <= PAGE_ALLOC_COSTLY_ORDER) {
 			do_retry = 1;
+		} else {
+			if (gfp_mask & __GFP_REPEAT &&
+				pages_reclaimed < (1 << order))
+					do_retry = 1;
+		}
 		if (gfp_mask & __GFP_NOFAIL)
 			do_retry = 1;
 	}
@@ -2524,7 +2537,9 @@
 	struct page *page;
 	unsigned long end_pfn = start_pfn + size;
 	unsigned long pfn;
+	struct zone *z;
 
+	z = &NODE_DATA(nid)->node_zones[zone];
 	for (pfn = start_pfn; pfn < end_pfn; pfn++) {
 		/*
 		 * There can be holes in boot-time mem_map[]s
@@ -2542,7 +2557,6 @@
 		init_page_count(page);
 		reset_page_mapcount(page);
 		SetPageReserved(page);
-
 		/*
 		 * Mark the block movable so that blocks are reserved for
 		 * movable at startup. This will force kernel allocations
@@ -2551,8 +2565,15 @@
 		 * kernel allocations are made. Later some blocks near
 		 * the start are marked MIGRATE_RESERVE by
 		 * setup_zone_migrate_reserve()
+		 *
+		 * bitmap is created for zone's valid pfn range. but memmap
+		 * can be created for invalid pages (for alignment)
+		 * check here not to call set_pageblock_migratetype() against
+		 * pfn out of zone.
 		 */
-		if ((pfn & (pageblock_nr_pages-1)))
+		if ((z->zone_start_pfn <= pfn)
+		    && (pfn < z->zone_start_pfn + z->spanned_pages)
+		    && !(pfn & (pageblock_nr_pages - 1)))
 			set_pageblock_migratetype(page, MIGRATE_MOVABLE);
 
 		INIT_LIST_HEAD(&page->lru);
@@ -4464,6 +4485,8 @@
 	pfn = page_to_pfn(page);
 	bitmap = get_pageblock_bitmap(zone, pfn);
 	bitidx = pfn_to_bitidx(zone, pfn);
+	VM_BUG_ON(pfn < zone->zone_start_pfn);
+	VM_BUG_ON(pfn >= zone->zone_start_pfn + zone->spanned_pages);
 
 	for (; start_bitidx <= end_bitidx; start_bitidx++, value <<= 1)
 		if (flags & value)
diff --git a/mm/slob.c b/mm/slob.c
index e2c3c0e..6038cba 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -533,7 +533,8 @@
 {
 	struct kmem_cache *c;
 
-	c = slob_alloc(sizeof(struct kmem_cache), flags, 0, -1);
+	c = slob_alloc(sizeof(struct kmem_cache),
+		flags, ARCH_KMALLOC_MINALIGN, -1);
 
 	if (c) {
 		c->name = name;
diff --git a/mm/slub.c b/mm/slub.c
index 38914bc..b145e79 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -149,25 +149,6 @@
 /* Enable to test recovery from slab corruption on boot */
 #undef SLUB_RESILIENCY_TEST
 
-#if PAGE_SHIFT <= 12
-
-/*
- * Small page size. Make sure that we do not fragment memory
- */
-#define DEFAULT_MAX_ORDER 1
-#define DEFAULT_MIN_OBJECTS 4
-
-#else
-
-/*
- * Large page machines are customarily able to handle larger
- * page orders.
- */
-#define DEFAULT_MAX_ORDER 2
-#define DEFAULT_MIN_OBJECTS 8
-
-#endif
-
 /*
  * Mininum number of partial slabs. These will be left on the partial
  * lists even if they are empty. kmem_cache_shrink may reclaim them.
@@ -204,8 +185,6 @@
 /* Internal SLUB flags */
 #define __OBJECT_POISON		0x80000000 /* Poison object */
 #define __SYSFS_ADD_DEFERRED	0x40000000 /* Not yet visible via sysfs */
-#define __KMALLOC_CACHE		0x20000000 /* objects freed using kfree */
-#define __PAGE_ALLOC_FALLBACK	0x10000000 /* Allow fallback to page alloc */
 
 static int kmem_size = sizeof(struct kmem_cache);
 
@@ -296,7 +275,7 @@
 		return 1;
 
 	base = page_address(page);
-	if (object < base || object >= base + s->objects * s->size ||
+	if (object < base || object >= base + page->objects * s->size ||
 		(object - base) % s->size) {
 		return 0;
 	}
@@ -322,8 +301,8 @@
 }
 
 /* Loop over all objects in a slab */
-#define for_each_object(__p, __s, __addr) \
-	for (__p = (__addr); __p < (__addr) + (__s)->objects * (__s)->size;\
+#define for_each_object(__p, __s, __addr, __objects) \
+	for (__p = (__addr); __p < (__addr) + (__objects) * (__s)->size;\
 			__p += (__s)->size)
 
 /* Scan freelist */
@@ -336,6 +315,26 @@
 	return (p - addr) / s->size;
 }
 
+static inline struct kmem_cache_order_objects oo_make(int order,
+						unsigned long size)
+{
+	struct kmem_cache_order_objects x = {
+		(order << 16) + (PAGE_SIZE << order) / size
+	};
+
+	return x;
+}
+
+static inline int oo_order(struct kmem_cache_order_objects x)
+{
+	return x.x >> 16;
+}
+
+static inline int oo_objects(struct kmem_cache_order_objects x)
+{
+	return x.x & ((1 << 16) - 1);
+}
+
 #ifdef CONFIG_SLUB_DEBUG
 /*
  * Debug settings:
@@ -446,8 +445,8 @@
 
 static void print_page_info(struct page *page)
 {
-	printk(KERN_ERR "INFO: Slab 0x%p used=%u fp=0x%p flags=0x%04lx\n",
-		page, page->inuse, page->freelist, page->flags);
+	printk(KERN_ERR "INFO: Slab 0x%p objects=%u used=%u fp=0x%p flags=0x%04lx\n",
+		page, page->objects, page->inuse, page->freelist, page->flags);
 
 }
 
@@ -647,6 +646,7 @@
 				p + off, POISON_INUSE, s->size - off);
 }
 
+/* Check the pad bytes at the end of a slab page */
 static int slab_pad_check(struct kmem_cache *s, struct page *page)
 {
 	u8 *start;
@@ -659,20 +659,20 @@
 		return 1;
 
 	start = page_address(page);
-	end = start + (PAGE_SIZE << s->order);
-	length = s->objects * s->size;
-	remainder = end - (start + length);
+	length = (PAGE_SIZE << compound_order(page));
+	end = start + length;
+	remainder = length % s->size;
 	if (!remainder)
 		return 1;
 
-	fault = check_bytes(start + length, POISON_INUSE, remainder);
+	fault = check_bytes(end - remainder, POISON_INUSE, remainder);
 	if (!fault)
 		return 1;
 	while (end > fault && end[-1] == POISON_INUSE)
 		end--;
 
 	slab_err(s, page, "Padding overwritten. 0x%p-0x%p", fault, end - 1);
-	print_section("Padding", start, length);
+	print_section("Padding", end - remainder, remainder);
 
 	restore_bytes(s, "slab padding", POISON_INUSE, start, end);
 	return 0;
@@ -734,15 +734,24 @@
 
 static int check_slab(struct kmem_cache *s, struct page *page)
 {
+	int maxobj;
+
 	VM_BUG_ON(!irqs_disabled());
 
 	if (!PageSlab(page)) {
 		slab_err(s, page, "Not a valid slab page");
 		return 0;
 	}
-	if (page->inuse > s->objects) {
+
+	maxobj = (PAGE_SIZE << compound_order(page)) / s->size;
+	if (page->objects > maxobj) {
+		slab_err(s, page, "objects %u > max %u",
+			s->name, page->objects, maxobj);
+		return 0;
+	}
+	if (page->inuse > page->objects) {
 		slab_err(s, page, "inuse %u > max %u",
-			s->name, page->inuse, s->objects);
+			s->name, page->inuse, page->objects);
 		return 0;
 	}
 	/* Slab_pad_check fixes things up after itself */
@@ -759,8 +768,9 @@
 	int nr = 0;
 	void *fp = page->freelist;
 	void *object = NULL;
+	unsigned long max_objects;
 
-	while (fp && nr <= s->objects) {
+	while (fp && nr <= page->objects) {
 		if (fp == search)
 			return 1;
 		if (!check_valid_pointer(s, page, fp)) {
@@ -772,7 +782,7 @@
 			} else {
 				slab_err(s, page, "Freepointer corrupt");
 				page->freelist = NULL;
-				page->inuse = s->objects;
+				page->inuse = page->objects;
 				slab_fix(s, "Freelist cleared");
 				return 0;
 			}
@@ -783,10 +793,20 @@
 		nr++;
 	}
 
-	if (page->inuse != s->objects - nr) {
+	max_objects = (PAGE_SIZE << compound_order(page)) / s->size;
+	if (max_objects > 65535)
+		max_objects = 65535;
+
+	if (page->objects != max_objects) {
+		slab_err(s, page, "Wrong number of objects. Found %d but "
+			"should be %d", page->objects, max_objects);
+		page->objects = max_objects;
+		slab_fix(s, "Number of objects adjusted.");
+	}
+	if (page->inuse != page->objects - nr) {
 		slab_err(s, page, "Wrong object count. Counter is %d but "
-			"counted were %d", page->inuse, s->objects - nr);
-		page->inuse = s->objects - nr;
+			"counted were %d", page->inuse, page->objects - nr);
+		page->inuse = page->objects - nr;
 		slab_fix(s, "Object count adjusted.");
 	}
 	return search == NULL;
@@ -840,7 +860,7 @@
 	return atomic_long_read(&n->nr_slabs);
 }
 
-static inline void inc_slabs_node(struct kmem_cache *s, int node)
+static inline void inc_slabs_node(struct kmem_cache *s, int node, int objects)
 {
 	struct kmem_cache_node *n = get_node(s, node);
 
@@ -850,14 +870,17 @@
 	 * dilemma by deferring the increment of the count during
 	 * bootstrap (see early_kmem_cache_node_alloc).
 	 */
-	if (!NUMA_BUILD || n)
+	if (!NUMA_BUILD || n) {
 		atomic_long_inc(&n->nr_slabs);
+		atomic_long_add(objects, &n->total_objects);
+	}
 }
-static inline void dec_slabs_node(struct kmem_cache *s, int node)
+static inline void dec_slabs_node(struct kmem_cache *s, int node, int objects)
 {
 	struct kmem_cache_node *n = get_node(s, node);
 
 	atomic_long_dec(&n->nr_slabs);
+	atomic_long_sub(objects, &n->total_objects);
 }
 
 /* Object debug checks for alloc/free paths */
@@ -905,7 +928,7 @@
 		 * as used avoids touching the remaining objects.
 		 */
 		slab_fix(s, "Marking all objects used");
-		page->inuse = s->objects;
+		page->inuse = page->objects;
 		page->freelist = NULL;
 	}
 	return 0;
@@ -1055,31 +1078,52 @@
 
 static inline unsigned long slabs_node(struct kmem_cache *s, int node)
 							{ return 0; }
-static inline void inc_slabs_node(struct kmem_cache *s, int node) {}
-static inline void dec_slabs_node(struct kmem_cache *s, int node) {}
+static inline void inc_slabs_node(struct kmem_cache *s, int node,
+							int objects) {}
+static inline void dec_slabs_node(struct kmem_cache *s, int node,
+							int objects) {}
 #endif
+
 /*
  * Slab allocation and freeing
  */
+static inline struct page *alloc_slab_page(gfp_t flags, int node,
+					struct kmem_cache_order_objects oo)
+{
+	int order = oo_order(oo);
+
+	if (node == -1)
+		return alloc_pages(flags, order);
+	else
+		return alloc_pages_node(node, flags, order);
+}
+
 static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
 {
 	struct page *page;
-	int pages = 1 << s->order;
+	struct kmem_cache_order_objects oo = s->oo;
 
 	flags |= s->allocflags;
 
-	if (node == -1)
-		page = alloc_pages(flags, s->order);
-	else
-		page = alloc_pages_node(node, flags, s->order);
+	page = alloc_slab_page(flags | __GFP_NOWARN | __GFP_NORETRY, node,
+									oo);
+	if (unlikely(!page)) {
+		oo = s->min;
+		/*
+		 * Allocation may have failed due to fragmentation.
+		 * Try a lower order alloc if possible
+		 */
+		page = alloc_slab_page(flags, node, oo);
+		if (!page)
+			return NULL;
 
-	if (!page)
-		return NULL;
-
+		stat(get_cpu_slab(s, raw_smp_processor_id()), ORDER_FALLBACK);
+	}
+	page->objects = oo_objects(oo);
 	mod_zone_page_state(page_zone(page),
 		(s->flags & SLAB_RECLAIM_ACCOUNT) ?
 		NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
-		pages);
+		1 << oo_order(oo));
 
 	return page;
 }
@@ -1106,7 +1150,7 @@
 	if (!page)
 		goto out;
 
-	inc_slabs_node(s, page_to_nid(page));
+	inc_slabs_node(s, page_to_nid(page), page->objects);
 	page->slab = s;
 	page->flags |= 1 << PG_slab;
 	if (s->flags & (SLAB_DEBUG_FREE | SLAB_RED_ZONE | SLAB_POISON |
@@ -1116,10 +1160,10 @@
 	start = page_address(page);
 
 	if (unlikely(s->flags & SLAB_POISON))
-		memset(start, POISON_INUSE, PAGE_SIZE << s->order);
+		memset(start, POISON_INUSE, PAGE_SIZE << compound_order(page));
 
 	last = start;
-	for_each_object(p, s, start) {
+	for_each_object(p, s, start, page->objects) {
 		setup_object(s, page, last);
 		set_freepointer(s, last, p);
 		last = p;
@@ -1135,13 +1179,15 @@
 
 static void __free_slab(struct kmem_cache *s, struct page *page)
 {
-	int pages = 1 << s->order;
+	int order = compound_order(page);
+	int pages = 1 << order;
 
 	if (unlikely(SlabDebug(page))) {
 		void *p;
 
 		slab_pad_check(s, page);
-		for_each_object(p, s, page_address(page))
+		for_each_object(p, s, page_address(page),
+						page->objects)
 			check_object(s, page, p, 0);
 		ClearSlabDebug(page);
 	}
@@ -1153,7 +1199,7 @@
 
 	__ClearPageSlab(page);
 	reset_page_mapcount(page);
-	__free_pages(page, s->order);
+	__free_pages(page, order);
 }
 
 static void rcu_free_slab(struct rcu_head *h)
@@ -1179,7 +1225,7 @@
 
 static void discard_slab(struct kmem_cache *s, struct page *page)
 {
-	dec_slabs_node(s, page_to_nid(page));
+	dec_slabs_node(s, page_to_nid(page), page->objects);
 	free_slab(s, page);
 }
 
@@ -1515,7 +1561,7 @@
 		goto debug;
 
 	c->freelist = object[c->offset];
-	c->page->inuse = s->objects;
+	c->page->inuse = c->page->objects;
 	c->page->freelist = NULL;
 	c->node = page_to_nid(c->page);
 unlock_out:
@@ -1552,27 +1598,6 @@
 		c->page = new;
 		goto load_freelist;
 	}
-
-	/*
-	 * No memory available.
-	 *
-	 * If the slab uses higher order allocs but the object is
-	 * smaller than a page size then we can fallback in emergencies
-	 * to the page allocator via kmalloc_large. The page allocator may
-	 * have failed to obtain a higher order page and we can try to
-	 * allocate a single page if the object fits into a single page.
-	 * That is only possible if certain conditions are met that are being
-	 * checked when a slab is created.
-	 */
-	if (!(gfpflags & __GFP_NORETRY) &&
-				(s->flags & __PAGE_ALLOC_FALLBACK)) {
-		if (gfpflags & __GFP_WAIT)
-			local_irq_enable();
-		object = kmalloc_large(s->objsize, gfpflags);
-		if (gfpflags & __GFP_WAIT)
-			local_irq_disable();
-		return object;
-	}
 	return NULL;
 debug:
 	if (!alloc_debug_processing(s, c->page, object, addr))
@@ -1773,8 +1798,8 @@
  * take the list_lock.
  */
 static int slub_min_order;
-static int slub_max_order = DEFAULT_MAX_ORDER;
-static int slub_min_objects = DEFAULT_MIN_OBJECTS;
+static int slub_max_order = PAGE_ALLOC_COSTLY_ORDER;
+static int slub_min_objects;
 
 /*
  * Merge control. If this is set then no merging of slab caches will occur.
@@ -1789,7 +1814,7 @@
  * system components. Generally order 0 allocations should be preferred since
  * order 0 does not cause fragmentation in the page allocator. Larger objects
  * be problematic to put into order 0 slabs because there may be too much
- * unused space left. We go to a higher order if more than 1/8th of the slab
+ * unused space left. We go to a higher order if more than 1/16th of the slab
  * would be wasted.
  *
  * In order to reach satisfactory performance we must ensure that a minimum
@@ -1814,6 +1839,9 @@
 	int rem;
 	int min_order = slub_min_order;
 
+	if ((PAGE_SIZE << min_order) / size > 65535)
+		return get_order(size * 65535) - 1;
+
 	for (order = max(min_order,
 				fls(min_objects * size - 1) - PAGE_SHIFT);
 			order <= max_order; order++) {
@@ -1848,8 +1876,10 @@
 	 * we reduce the minimum objects required in a slab.
 	 */
 	min_objects = slub_min_objects;
+	if (!min_objects)
+		min_objects = 4 * (fls(nr_cpu_ids) + 1);
 	while (min_objects > 1) {
-		fraction = 8;
+		fraction = 16;
 		while (fraction >= 4) {
 			order = slab_order(size, min_objects,
 						slub_max_order, fraction);
@@ -2091,7 +2121,7 @@
 	init_tracking(kmalloc_caches, n);
 #endif
 	init_kmem_cache_node(n);
-	inc_slabs_node(kmalloc_caches, node);
+	inc_slabs_node(kmalloc_caches, node, page->objects);
 
 	/*
 	 * lockdep requires consistent irq usage for each lock
@@ -2167,11 +2197,12 @@
  * calculate_sizes() determines the order and the distribution of data within
  * a slab object.
  */
-static int calculate_sizes(struct kmem_cache *s)
+static int calculate_sizes(struct kmem_cache *s, int forced_order)
 {
 	unsigned long flags = s->flags;
 	unsigned long size = s->objsize;
 	unsigned long align = s->align;
+	int order;
 
 	/*
 	 * Round up object size to the next word boundary. We can only
@@ -2255,26 +2286,16 @@
 	 */
 	size = ALIGN(size, align);
 	s->size = size;
+	if (forced_order >= 0)
+		order = forced_order;
+	else
+		order = calculate_order(size);
 
-	if ((flags & __KMALLOC_CACHE) &&
-			PAGE_SIZE / size < slub_min_objects) {
-		/*
-		 * Kmalloc cache that would not have enough objects in
-		 * an order 0 page. Kmalloc slabs can fallback to
-		 * page allocator order 0 allocs so take a reasonably large
-		 * order that will allows us a good number of objects.
-		 */
-		s->order = max(slub_max_order, PAGE_ALLOC_COSTLY_ORDER);
-		s->flags |= __PAGE_ALLOC_FALLBACK;
-		s->allocflags |= __GFP_NOWARN;
-	} else
-		s->order = calculate_order(size);
-
-	if (s->order < 0)
+	if (order < 0)
 		return 0;
 
 	s->allocflags = 0;
-	if (s->order)
+	if (order)
 		s->allocflags |= __GFP_COMP;
 
 	if (s->flags & SLAB_CACHE_DMA)
@@ -2286,9 +2307,12 @@
 	/*
 	 * Determine the number of objects per slab
 	 */
-	s->objects = (PAGE_SIZE << s->order) / size;
+	s->oo = oo_make(order, size);
+	s->min = oo_make(get_order(size), size);
+	if (oo_objects(s->oo) > oo_objects(s->max))
+		s->max = s->oo;
 
-	return !!s->objects;
+	return !!oo_objects(s->oo);
 
 }
 
@@ -2304,7 +2328,7 @@
 	s->align = align;
 	s->flags = kmem_cache_flags(size, flags, name, ctor);
 
-	if (!calculate_sizes(s))
+	if (!calculate_sizes(s, -1))
 		goto error;
 
 	s->refcount = 1;
@@ -2321,7 +2345,7 @@
 	if (flags & SLAB_PANIC)
 		panic("Cannot create slab %s size=%lu realsize=%u "
 			"order=%u offset=%u flags=%lx\n",
-			s->name, (unsigned long)size, s->size, s->order,
+			s->name, (unsigned long)size, s->size, oo_order(s->oo),
 			s->offset, flags);
 	return 0;
 }
@@ -2367,26 +2391,52 @@
 }
 EXPORT_SYMBOL(kmem_cache_name);
 
-/*
- * Attempt to free all slabs on a node. Return the number of slabs we
- * were unable to free.
- */
-static int free_list(struct kmem_cache *s, struct kmem_cache_node *n,
-			struct list_head *list)
+static void list_slab_objects(struct kmem_cache *s, struct page *page,
+							const char *text)
 {
-	int slabs_inuse = 0;
+#ifdef CONFIG_SLUB_DEBUG
+	void *addr = page_address(page);
+	void *p;
+	DECLARE_BITMAP(map, page->objects);
+
+	bitmap_zero(map, page->objects);
+	slab_err(s, page, "%s", text);
+	slab_lock(page);
+	for_each_free_object(p, s, page->freelist)
+		set_bit(slab_index(p, s, addr), map);
+
+	for_each_object(p, s, addr, page->objects) {
+
+		if (!test_bit(slab_index(p, s, addr), map)) {
+			printk(KERN_ERR "INFO: Object 0x%p @offset=%tu\n",
+							p, p - addr);
+			print_tracking(s, p);
+		}
+	}
+	slab_unlock(page);
+#endif
+}
+
+/*
+ * Attempt to free all partial slabs on a node.
+ */
+static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n)
+{
 	unsigned long flags;
 	struct page *page, *h;
 
 	spin_lock_irqsave(&n->list_lock, flags);
-	list_for_each_entry_safe(page, h, list, lru)
+	list_for_each_entry_safe(page, h, &n->partial, lru) {
 		if (!page->inuse) {
 			list_del(&page->lru);
 			discard_slab(s, page);
-		} else
-			slabs_inuse++;
+			n->nr_partial--;
+		} else {
+			list_slab_objects(s, page,
+				"Objects remaining on kmem_cache_close()");
+		}
+	}
 	spin_unlock_irqrestore(&n->list_lock, flags);
-	return slabs_inuse;
 }
 
 /*
@@ -2403,8 +2453,8 @@
 	for_each_node_state(node, N_NORMAL_MEMORY) {
 		struct kmem_cache_node *n = get_node(s, node);
 
-		n->nr_partial -= free_list(s, n, &n->partial);
-		if (slabs_node(s, node))
+		free_partial(s, n);
+		if (n->nr_partial || slabs_node(s, node))
 			return 1;
 	}
 	free_kmem_cache_nodes(s);
@@ -2422,8 +2472,11 @@
 	if (!s->refcount) {
 		list_del(&s->list);
 		up_write(&slub_lock);
-		if (kmem_cache_close(s))
-			WARN_ON(1);
+		if (kmem_cache_close(s)) {
+			printk(KERN_ERR "SLUB %s: %s called for cache that "
+				"still has objects.\n", s->name, __func__);
+			dump_stack();
+		}
 		sysfs_slab_remove(s);
 	} else
 		up_write(&slub_lock);
@@ -2482,7 +2535,7 @@
 
 	down_write(&slub_lock);
 	if (!kmem_cache_open(s, gfp_flags, name, size, ARCH_KMALLOC_MINALIGN,
-			flags | __KMALLOC_CACHE, NULL))
+								flags, NULL))
 		goto panic;
 
 	list_add(&s->list, &slab_caches);
@@ -2730,8 +2783,9 @@
 	struct kmem_cache_node *n;
 	struct page *page;
 	struct page *t;
+	int objects = oo_objects(s->max);
 	struct list_head *slabs_by_inuse =
-		kmalloc(sizeof(struct list_head) * s->objects, GFP_KERNEL);
+		kmalloc(sizeof(struct list_head) * objects, GFP_KERNEL);
 	unsigned long flags;
 
 	if (!slabs_by_inuse)
@@ -2744,7 +2798,7 @@
 		if (!n->nr_partial)
 			continue;
 
-		for (i = 0; i < s->objects; i++)
+		for (i = 0; i < objects; i++)
 			INIT_LIST_HEAD(slabs_by_inuse + i);
 
 		spin_lock_irqsave(&n->list_lock, flags);
@@ -2776,7 +2830,7 @@
 		 * Rebuild the partial list with the slabs filled up most
 		 * first and the least used slabs at the end.
 		 */
-		for (i = s->objects - 1; i >= 0; i--)
+		for (i = objects - 1; i >= 0; i--)
 			list_splice(slabs_by_inuse + i, n->partial.prev);
 
 		spin_unlock_irqrestore(&n->list_lock, flags);
@@ -2924,7 +2978,7 @@
 	kmalloc_caches[0].refcount = -1;
 	caches++;
 
-	hotplug_memory_notifier(slab_memory_callback, 1);
+	hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI);
 #endif
 
 	/* Able to allocate the per node structures */
@@ -2997,9 +3051,6 @@
 	if (slub_nomerge || (s->flags & SLUB_NEVER_MERGE))
 		return 1;
 
-	if ((s->flags & __PAGE_ALLOC_FALLBACK))
-		return 1;
-
 	if (s->ctor)
 		return 1;
 
@@ -3192,7 +3243,8 @@
 }
 
 #if (defined(CONFIG_SYSFS) && defined(CONFIG_SLUB_DEBUG)) || defined(CONFIG_SLABINFO)
-static unsigned long count_partial(struct kmem_cache_node *n)
+static unsigned long count_partial(struct kmem_cache_node *n,
+					int (*get_count)(struct page *))
 {
 	unsigned long flags;
 	unsigned long x = 0;
@@ -3200,10 +3252,25 @@
 
 	spin_lock_irqsave(&n->list_lock, flags);
 	list_for_each_entry(page, &n->partial, lru)
-		x += page->inuse;
+		x += get_count(page);
 	spin_unlock_irqrestore(&n->list_lock, flags);
 	return x;
 }
+
+static int count_inuse(struct page *page)
+{
+	return page->inuse;
+}
+
+static int count_total(struct page *page)
+{
+	return page->objects;
+}
+
+static int count_free(struct page *page)
+{
+	return page->objects - page->inuse;
+}
 #endif
 
 #if defined(CONFIG_SYSFS) && defined(CONFIG_SLUB_DEBUG)
@@ -3218,7 +3285,7 @@
 		return 0;
 
 	/* Now we know that a valid freelist exists */
-	bitmap_zero(map, s->objects);
+	bitmap_zero(map, page->objects);
 
 	for_each_free_object(p, s, page->freelist) {
 		set_bit(slab_index(p, s, addr), map);
@@ -3226,7 +3293,7 @@
 			return 0;
 	}
 
-	for_each_object(p, s, addr)
+	for_each_object(p, s, addr, page->objects)
 		if (!test_bit(slab_index(p, s, addr), map))
 			if (!check_object(s, page, p, 1))
 				return 0;
@@ -3292,7 +3359,7 @@
 {
 	int node;
 	unsigned long count = 0;
-	unsigned long *map = kmalloc(BITS_TO_LONGS(s->objects) *
+	unsigned long *map = kmalloc(BITS_TO_LONGS(oo_objects(s->max)) *
 				sizeof(unsigned long), GFP_KERNEL);
 
 	if (!map)
@@ -3495,14 +3562,14 @@
 		struct page *page, enum track_item alloc)
 {
 	void *addr = page_address(page);
-	DECLARE_BITMAP(map, s->objects);
+	DECLARE_BITMAP(map, page->objects);
 	void *p;
 
-	bitmap_zero(map, s->objects);
+	bitmap_zero(map, page->objects);
 	for_each_free_object(p, s, page->freelist)
 		set_bit(slab_index(p, s, addr), map);
 
-	for_each_object(p, s, addr)
+	for_each_object(p, s, addr, page->objects)
 		if (!test_bit(slab_index(p, s, addr), map))
 			add_location(t, s, get_track(s, p, alloc));
 }
@@ -3592,22 +3659,23 @@
 }
 
 enum slab_stat_type {
-	SL_FULL,
-	SL_PARTIAL,
-	SL_CPU,
-	SL_OBJECTS
+	SL_ALL,			/* All slabs */
+	SL_PARTIAL,		/* Only partially allocated slabs */
+	SL_CPU,			/* Only slabs used for cpu caches */
+	SL_OBJECTS,		/* Determine allocated objects not slabs */
+	SL_TOTAL		/* Determine object capacity not slabs */
 };
 
-#define SO_FULL		(1 << SL_FULL)
+#define SO_ALL		(1 << SL_ALL)
 #define SO_PARTIAL	(1 << SL_PARTIAL)
 #define SO_CPU		(1 << SL_CPU)
 #define SO_OBJECTS	(1 << SL_OBJECTS)
+#define SO_TOTAL	(1 << SL_TOTAL)
 
 static ssize_t show_slab_objects(struct kmem_cache *s,
 			    char *buf, unsigned long flags)
 {
 	unsigned long total = 0;
-	int cpu;
 	int node;
 	int x;
 	unsigned long *nodes;
@@ -3618,56 +3686,60 @@
 		return -ENOMEM;
 	per_cpu = nodes + nr_node_ids;
 
-	for_each_possible_cpu(cpu) {
-		struct page *page;
-		struct kmem_cache_cpu *c = get_cpu_slab(s, cpu);
+	if (flags & SO_CPU) {
+		int cpu;
 
-		if (!c)
-			continue;
+		for_each_possible_cpu(cpu) {
+			struct kmem_cache_cpu *c = get_cpu_slab(s, cpu);
 
-		page = c->page;
-		node = c->node;
-		if (node < 0)
-			continue;
-		if (page) {
-			if (flags & SO_CPU) {
-				if (flags & SO_OBJECTS)
-					x = page->inuse;
+			if (!c || c->node < 0)
+				continue;
+
+			if (c->page) {
+					if (flags & SO_TOTAL)
+						x = c->page->objects;
+				else if (flags & SO_OBJECTS)
+					x = c->page->inuse;
 				else
 					x = 1;
+
 				total += x;
-				nodes[node] += x;
+				nodes[c->node] += x;
 			}
-			per_cpu[node]++;
+			per_cpu[c->node]++;
 		}
 	}
 
-	for_each_node_state(node, N_NORMAL_MEMORY) {
-		struct kmem_cache_node *n = get_node(s, node);
+	if (flags & SO_ALL) {
+		for_each_node_state(node, N_NORMAL_MEMORY) {
+			struct kmem_cache_node *n = get_node(s, node);
 
-		if (flags & SO_PARTIAL) {
-			if (flags & SO_OBJECTS)
-				x = count_partial(n);
+		if (flags & SO_TOTAL)
+			x = atomic_long_read(&n->total_objects);
+		else if (flags & SO_OBJECTS)
+			x = atomic_long_read(&n->total_objects) -
+				count_partial(n, count_free);
+
+			else
+				x = atomic_long_read(&n->nr_slabs);
+			total += x;
+			nodes[node] += x;
+		}
+
+	} else if (flags & SO_PARTIAL) {
+		for_each_node_state(node, N_NORMAL_MEMORY) {
+			struct kmem_cache_node *n = get_node(s, node);
+
+			if (flags & SO_TOTAL)
+				x = count_partial(n, count_total);
+			else if (flags & SO_OBJECTS)
+				x = count_partial(n, count_inuse);
 			else
 				x = n->nr_partial;
 			total += x;
 			nodes[node] += x;
 		}
-
-		if (flags & SO_FULL) {
-			int full_slabs = atomic_long_read(&n->nr_slabs)
-					- per_cpu[node]
-					- n->nr_partial;
-
-			if (flags & SO_OBJECTS)
-				x = full_slabs * s->objects;
-			else
-				x = full_slabs;
-			total += x;
-			nodes[node] += x;
-		}
 	}
-
 	x = sprintf(buf, "%lu", total);
 #ifdef CONFIG_NUMA
 	for_each_node_state(node, N_NORMAL_MEMORY)
@@ -3682,14 +3754,6 @@
 static int any_slab_objects(struct kmem_cache *s)
 {
 	int node;
-	int cpu;
-
-	for_each_possible_cpu(cpu) {
-		struct kmem_cache_cpu *c = get_cpu_slab(s, cpu);
-
-		if (c && c->page)
-			return 1;
-	}
 
 	for_each_online_node(node) {
 		struct kmem_cache_node *n = get_node(s, node);
@@ -3697,7 +3761,7 @@
 		if (!n)
 			continue;
 
-		if (n->nr_partial || atomic_long_read(&n->nr_slabs))
+		if (atomic_read(&n->total_objects))
 			return 1;
 	}
 	return 0;
@@ -3739,15 +3803,27 @@
 
 static ssize_t objs_per_slab_show(struct kmem_cache *s, char *buf)
 {
-	return sprintf(buf, "%d\n", s->objects);
+	return sprintf(buf, "%d\n", oo_objects(s->oo));
 }
 SLAB_ATTR_RO(objs_per_slab);
 
+static ssize_t order_store(struct kmem_cache *s,
+				const char *buf, size_t length)
+{
+	int order = simple_strtoul(buf, NULL, 10);
+
+	if (order > slub_max_order || order < slub_min_order)
+		return -EINVAL;
+
+	calculate_sizes(s, order);
+	return length;
+}
+
 static ssize_t order_show(struct kmem_cache *s, char *buf)
 {
-	return sprintf(buf, "%d\n", s->order);
+	return sprintf(buf, "%d\n", oo_order(s->oo));
 }
-SLAB_ATTR_RO(order);
+SLAB_ATTR(order);
 
 static ssize_t ctor_show(struct kmem_cache *s, char *buf)
 {
@@ -3768,7 +3844,7 @@
 
 static ssize_t slabs_show(struct kmem_cache *s, char *buf)
 {
-	return show_slab_objects(s, buf, SO_FULL|SO_PARTIAL|SO_CPU);
+	return show_slab_objects(s, buf, SO_ALL);
 }
 SLAB_ATTR_RO(slabs);
 
@@ -3786,10 +3862,22 @@
 
 static ssize_t objects_show(struct kmem_cache *s, char *buf)
 {
-	return show_slab_objects(s, buf, SO_FULL|SO_PARTIAL|SO_CPU|SO_OBJECTS);
+	return show_slab_objects(s, buf, SO_ALL|SO_OBJECTS);
 }
 SLAB_ATTR_RO(objects);
 
+static ssize_t objects_partial_show(struct kmem_cache *s, char *buf)
+{
+	return show_slab_objects(s, buf, SO_PARTIAL|SO_OBJECTS);
+}
+SLAB_ATTR_RO(objects_partial);
+
+static ssize_t total_objects_show(struct kmem_cache *s, char *buf)
+{
+	return show_slab_objects(s, buf, SO_ALL|SO_TOTAL);
+}
+SLAB_ATTR_RO(total_objects);
+
 static ssize_t sanity_checks_show(struct kmem_cache *s, char *buf)
 {
 	return sprintf(buf, "%d\n", !!(s->flags & SLAB_DEBUG_FREE));
@@ -3869,7 +3957,7 @@
 	s->flags &= ~SLAB_RED_ZONE;
 	if (buf[0] == '1')
 		s->flags |= SLAB_RED_ZONE;
-	calculate_sizes(s);
+	calculate_sizes(s, -1);
 	return length;
 }
 SLAB_ATTR(red_zone);
@@ -3888,7 +3976,7 @@
 	s->flags &= ~SLAB_POISON;
 	if (buf[0] == '1')
 		s->flags |= SLAB_POISON;
-	calculate_sizes(s);
+	calculate_sizes(s, -1);
 	return length;
 }
 SLAB_ATTR(poison);
@@ -3907,7 +3995,7 @@
 	s->flags &= ~SLAB_STORE_USER;
 	if (buf[0] == '1')
 		s->flags |= SLAB_STORE_USER;
-	calculate_sizes(s);
+	calculate_sizes(s, -1);
 	return length;
 }
 SLAB_ATTR(store_user);
@@ -4038,7 +4126,7 @@
 STAT_ATTR(DEACTIVATE_TO_HEAD, deactivate_to_head);
 STAT_ATTR(DEACTIVATE_TO_TAIL, deactivate_to_tail);
 STAT_ATTR(DEACTIVATE_REMOTE_FREES, deactivate_remote_frees);
-
+STAT_ATTR(ORDER_FALLBACK, order_fallback);
 #endif
 
 static struct attribute *slab_attrs[] = {
@@ -4047,6 +4135,8 @@
 	&objs_per_slab_attr.attr,
 	&order_attr.attr,
 	&objects_attr.attr,
+	&objects_partial_attr.attr,
+	&total_objects_attr.attr,
 	&slabs_attr.attr,
 	&partial_attr.attr,
 	&cpu_slabs_attr.attr,
@@ -4089,6 +4179,7 @@
 	&deactivate_to_head_attr.attr,
 	&deactivate_to_tail_attr.attr,
 	&deactivate_remote_frees_attr.attr,
+	&order_fallback_attr.attr,
 #endif
 	NULL
 };
@@ -4375,7 +4466,8 @@
 	unsigned long nr_partials = 0;
 	unsigned long nr_slabs = 0;
 	unsigned long nr_inuse = 0;
-	unsigned long nr_objs;
+	unsigned long nr_objs = 0;
+	unsigned long nr_free = 0;
 	struct kmem_cache *s;
 	int node;
 
@@ -4389,14 +4481,15 @@
 
 		nr_partials += n->nr_partial;
 		nr_slabs += atomic_long_read(&n->nr_slabs);
-		nr_inuse += count_partial(n);
+		nr_objs += atomic_long_read(&n->total_objects);
+		nr_free += count_partial(n, count_free);
 	}
 
-	nr_objs = nr_slabs * s->objects;
-	nr_inuse += (nr_slabs - nr_partials) * s->objects;
+	nr_inuse = nr_objs - nr_free;
 
 	seq_printf(m, "%-17s %6lu %6lu %6u %4u %4d", s->name, nr_inuse,
-		   nr_objs, s->size, s->objects, (1 << s->order));
+		   nr_objs, s->size, oo_objects(s->oo),
+		   (1 << oo_order(s->oo)));
 	seq_printf(m, " : tunables %4u %4u %4u", 0, 0, 0);
 	seq_printf(m, " : slabdata %6lu %6lu %6lu", nr_slabs, nr_slabs,
 		   0UL);
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 67051be..bd1bb59 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -1426,11 +1426,7 @@
 
 static int __init procswaps_init(void)
 {
-	struct proc_dir_entry *entry;
-
-	entry = create_proc_entry("swaps", 0, NULL);
-	if (entry)
-		entry->proc_fops = &proc_swaps_operations;
+	proc_create("swaps", 0, NULL, &proc_swaps_operations);
 	return 0;
 }
 __initcall(procswaps_init);
diff --git a/mm/vmscan.c b/mm/vmscan.c
index eceac9f..12e8627 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1299,6 +1299,9 @@
  * hope that some of these pages can be written.  But if the allocating task
  * holds filesystem locks which prevent writeout this might not work, and the
  * allocation attempt will fail.
+ *
+ * returns:	0, if no pages reclaimed
+ * 		else, the number of pages reclaimed
  */
 static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
 					struct scan_control *sc)
@@ -1347,7 +1350,7 @@
 		}
 		total_scanned += sc->nr_scanned;
 		if (nr_reclaimed >= sc->swap_cluster_max) {
-			ret = 1;
+			ret = nr_reclaimed;
 			goto out;
 		}
 
@@ -1370,7 +1373,7 @@
 	}
 	/* top priority shrink_caches still had more to do? don't OOM, then */
 	if (!sc->all_unreclaimable && scan_global_lru(sc))
-		ret = 1;
+		ret = nr_reclaimed;
 out:
 	/*
 	 * Now that we've scanned all the zones at this priority level, note
diff --git a/net/core/dev.c b/net/core/dev.c
index e1df1ab..ed49da5 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1524,7 +1524,7 @@
 	if (!segs)
 		return 0;
 
-	if (unlikely(IS_ERR(segs)))
+	if (IS_ERR(segs))
 		return PTR_ERR(segs);
 
 	skb->next = segs;
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index f2b5270..24eca23 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -1234,7 +1234,7 @@
 		segs = ops->gso_segment(skb, features);
 	rcu_read_unlock();
 
-	if (!segs || unlikely(IS_ERR(segs)))
+	if (!segs || IS_ERR(segs))
 		goto out;
 
 	skb = segs;
diff --git a/net/irda/irnet/irnet_irda.c b/net/irda/irnet/irnet_irda.c
index a4f1439..75497e5 100644
--- a/net/irda/irnet/irnet_irda.c
+++ b/net/irda/irnet/irnet_irda.c
@@ -9,6 +9,7 @@
  */
 
 #include "irnet_irda.h"		/* Private header */
+#include <linux/seq_file.h>
 
 /*
  * PPP disconnect work: we need to make sure we're in
@@ -1717,34 +1718,23 @@
  */
 
 #ifdef CONFIG_PROC_FS
-/*------------------------------------------------------------------*/
-/*
- * Function irnet_proc_read (buf, start, offset, len, unused)
- *
- *    Give some info to the /proc file system
- */
 static int
-irnet_proc_read(char *	buf,
-		char **	start,
-		off_t	offset,
-		int	len)
+irnet_proc_show(struct seq_file *m, void *v)
 {
   irnet_socket *	self;
   char *		state;
   int			i = 0;
 
-  len = 0;
-
   /* Get the IrNET server information... */
-  len += sprintf(buf+len, "IrNET server - ");
-  len += sprintf(buf+len, "IrDA state: %s, ",
+  seq_printf(m, "IrNET server - ");
+  seq_printf(m, "IrDA state: %s, ",
 		 (irnet_server.running ? "running" : "dead"));
-  len += sprintf(buf+len, "stsap_sel: %02x, ", irnet_server.s.stsap_sel);
-  len += sprintf(buf+len, "dtsap_sel: %02x\n", irnet_server.s.dtsap_sel);
+  seq_printf(m, "stsap_sel: %02x, ", irnet_server.s.stsap_sel);
+  seq_printf(m, "dtsap_sel: %02x\n", irnet_server.s.dtsap_sel);
 
   /* Do we need to continue ? */
   if(!irnet_server.running)
-    return len;
+    return 0;
 
   /* Protect access to the instance list */
   spin_lock_bh(&irnet_server.spinlock);
@@ -1754,23 +1744,23 @@
   while(self != NULL)
     {
       /* Start printing info about the socket. */
-      len += sprintf(buf+len, "\nIrNET socket %d - ", i++);
+      seq_printf(m, "\nIrNET socket %d - ", i++);
 
       /* First, get the requested configuration */
-      len += sprintf(buf+len, "Requested IrDA name: \"%s\", ", self->rname);
-      len += sprintf(buf+len, "daddr: %08x, ", self->rdaddr);
-      len += sprintf(buf+len, "saddr: %08x\n", self->rsaddr);
+      seq_printf(m, "Requested IrDA name: \"%s\", ", self->rname);
+      seq_printf(m, "daddr: %08x, ", self->rdaddr);
+      seq_printf(m, "saddr: %08x\n", self->rsaddr);
 
       /* Second, get all the PPP info */
-      len += sprintf(buf+len, "	PPP state: %s",
+      seq_printf(m, "	PPP state: %s",
 		 (self->ppp_open ? "registered" : "unregistered"));
       if(self->ppp_open)
 	{
-	  len += sprintf(buf+len, ", unit: ppp%d",
+	  seq_printf(m, ", unit: ppp%d",
 			 ppp_unit_number(&self->chan));
-	  len += sprintf(buf+len, ", channel: %d",
+	  seq_printf(m, ", channel: %d",
 			 ppp_channel_index(&self->chan));
-	  len += sprintf(buf+len, ", mru: %d",
+	  seq_printf(m, ", mru: %d",
 			 self->mru);
 	  /* Maybe add self->flags ? Later... */
 	}
@@ -1789,10 +1779,10 @@
 	      state = "weird";
 	    else
 	      state = "idle";
-      len += sprintf(buf+len, "\n	IrDA state: %s, ", state);
-      len += sprintf(buf+len, "daddr: %08x, ", self->daddr);
-      len += sprintf(buf+len, "stsap_sel: %02x, ", self->stsap_sel);
-      len += sprintf(buf+len, "dtsap_sel: %02x\n", self->dtsap_sel);
+      seq_printf(m, "\n	IrDA state: %s, ", state);
+      seq_printf(m, "daddr: %08x, ", self->daddr);
+      seq_printf(m, "stsap_sel: %02x, ", self->stsap_sel);
+      seq_printf(m, "dtsap_sel: %02x\n", self->dtsap_sel);
 
       /* Next socket, please... */
       self = (irnet_socket *) hashbin_get_next(irnet_server.list);
@@ -1801,8 +1791,21 @@
   /* Spin lock end */
   spin_unlock_bh(&irnet_server.spinlock);
 
-  return len;
+  return 0;
 }
+
+static int irnet_proc_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, irnet_proc_show, NULL);
+}
+
+static const struct file_operations irnet_proc_fops = {
+	.owner		= THIS_MODULE,
+	.open		= irnet_proc_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
 #endif /* PROC_FS */
 
 
@@ -1841,7 +1844,7 @@
 
 #ifdef CONFIG_PROC_FS
   /* Add a /proc file for irnet infos */
-  create_proc_info_entry("irnet", 0, proc_irda, irnet_proc_read);
+  proc_create("irnet", 0, proc_irda, &irnet_proc_fops);
 #endif /* CONFIG_PROC_FS */
 
   /* Setup the IrNET server */
diff --git a/net/irda/irnet/irnet_irda.h b/net/irda/irnet/irnet_irda.h
index 0ba92d0..3e40895 100644
--- a/net/irda/irnet/irnet_irda.h
+++ b/net/irda/irnet/irnet_irda.h
@@ -159,14 +159,6 @@
 				DISCOVERY_MODE,
 				void *);
 #endif
-/* -------------------------- PROC ENTRY -------------------------- */
-#ifdef CONFIG_PROC_FS
-static int
-	irnet_proc_read(char *,
-			char **,
-			off_t,
-			int);
-#endif /* CONFIG_PROC_FS */
 
 /**************************** VARIABLES ****************************/
 
diff --git a/net/netfilter/nf_queue.c b/net/netfilter/nf_queue.c
index bbd2689..582ec3e 100644
--- a/net/netfilter/nf_queue.c
+++ b/net/netfilter/nf_queue.c
@@ -214,7 +214,7 @@
 
 	segs = skb_gso_segment(skb, 0);
 	kfree_skb(skb);
-	if (unlikely(IS_ERR(segs)))
+	if (IS_ERR(segs))
 		return 1;
 
 	do {
diff --git a/net/xfrm/xfrm_output.c b/net/xfrm/xfrm_output.c
index 2519129..09cd9c0 100644
--- a/net/xfrm/xfrm_output.c
+++ b/net/xfrm/xfrm_output.c
@@ -150,7 +150,7 @@
 
 	segs = skb_gso_segment(skb, 0);
 	kfree_skb(skb);
-	if (unlikely(IS_ERR(segs)))
+	if (IS_ERR(segs))
 		return PTR_ERR(segs);
 
 	do {
diff --git a/samples/markers/marker-example.c b/samples/markers/marker-example.c
index 05e438f..e90dc5d 100644
--- a/samples/markers/marker-example.c
+++ b/samples/markers/marker-example.c
@@ -33,10 +33,8 @@
 static int example_init(void)
 {
 	printk(KERN_ALERT "example init\n");
-	pentry_example = create_proc_entry("marker-example", 0444, NULL);
-	if (pentry_example)
-		pentry_example->proc_fops = &mark_ops;
-	else
+	pentry_example = proc_create("marker-example", 0444, NULL, &mark_ops);
+	if (!pentry_example)
 		return -EPERM;
 	return 0;
 }
diff --git a/scripts/Lindent b/scripts/Lindent
index 9468ec7..9c4b3e2 100755
--- a/scripts/Lindent
+++ b/scripts/Lindent
@@ -1,2 +1,18 @@
 #!/bin/sh
-indent -npro -kr -i8 -ts8 -sob -l80 -ss -ncs -cp1 "$@"
+PARAM="-npro -kr -i8 -ts8 -sob -l80 -ss -ncs -cp1"
+RES=`indent --version`
+V1=`echo $RES | cut -d' ' -f3 | cut -d'.' -f1`
+V2=`echo $RES | cut -d' ' -f3 | cut -d'.' -f2`
+V3=`echo $RES | cut -d' ' -f3 | cut -d'.' -f3`
+if [ $V1 -gt 2 ]; then
+  PARAM="$PARAM -il0"
+elif [ $V1 -eq 2 ]; then
+  if [ $V2 -gt 2 ]; then
+    PARAM="$PARAM -il0";
+  elif [ $V2 -eq 2 ]; then
+    if [ $V3 -ge 10 ]; then
+      PARAM="$PARAM -il0"
+    fi
+  fi
+fi
+indent $PARAM "$@"
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl
index 64ec4b8..b6bbbcd 100755
--- a/scripts/checkpatch.pl
+++ b/scripts/checkpatch.pl
@@ -9,7 +9,7 @@
 my $P = $0;
 $P =~ s@.*/@@g;
 
-my $V = '0.16';
+my $V = '0.18';
 
 use Getopt::Long qw(:config no_auto_abbrev);
 
@@ -131,6 +131,17 @@
 our $Type;
 our $Declare;
 
+our $UTF8	= qr {
+	[\x09\x0A\x0D\x20-\x7E]              # ASCII
+	| [\xC2-\xDF][\x80-\xBF]             # non-overlong 2-byte
+	|  \xE0[\xA0-\xBF][\x80-\xBF]        # excluding overlongs
+	| [\xE1-\xEC\xEE\xEF][\x80-\xBF]{2}  # straight 3-byte
+	|  \xED[\x80-\x9F][\x80-\xBF]        # excluding surrogates
+	|  \xF0[\x90-\xBF][\x80-\xBF]{2}     # planes 1-3
+	| [\xF1-\xF3][\x80-\xBF]{3}          # planes 4-15
+	|  \xF4[\x80-\x8F][\x80-\xBF]{2}     # plane 16
+}x;
+
 our @typeList = (
 	qr{void},
 	qr{char},
@@ -692,7 +703,7 @@
 	while (length($cur)) {
 		@av_paren_type = ('E') if ($#av_paren_type < 0);
 		print " <" . join('', @av_paren_type) .
-					"> <$type> " if ($dbg_values > 1);
+				"> <$type> <$av_pending>" if ($dbg_values > 1);
 		if ($cur =~ /^(\s+)/o) {
 			print "WS($1)\n" if ($dbg_values > 1);
 			if ($1 =~ /\n/ && $av_preprocessor) {
@@ -705,9 +716,18 @@
 			$type = 'T';
 
 		} elsif ($cur =~ /^(#\s*define\s*$Ident)(\(?)/o) {
-			print "DEFINE($1)\n" if ($dbg_values > 1);
+			print "DEFINE($1,$2)\n" if ($dbg_values > 1);
 			$av_preprocessor = 1;
-			$av_pending = 'N';
+			push(@av_paren_type, $type);
+			if ($2 ne '') {
+				$av_pending = 'N';
+			}
+			$type = 'E';
+
+		} elsif ($cur =~ /^(#\s*undef\s*$Ident)/o) {
+			print "UNDEF($1)\n" if ($dbg_values > 1);
+			$av_preprocessor = 1;
+			push(@av_paren_type, $type);
 
 		} elsif ($cur =~ /^(#\s*(?:ifdef|ifndef|if))/o) {
 			print "PRE_START($1)\n" if ($dbg_values > 1);
@@ -715,7 +735,7 @@
 
 			push(@av_paren_type, $type);
 			push(@av_paren_type, $type);
-			$type = 'N';
+			$type = 'E';
 
 		} elsif ($cur =~ /^(#\s*(?:else|elif))/o) {
 			print "PRE_RESTART($1)\n" if ($dbg_values > 1);
@@ -723,7 +743,7 @@
 
 			push(@av_paren_type, $av_paren_type[$#av_paren_type]);
 
-			$type = 'N';
+			$type = 'E';
 
 		} elsif ($cur =~ /^(#\s*(?:endif))/o) {
 			print "PRE_END($1)\n" if ($dbg_values > 1);
@@ -734,11 +754,16 @@
 			# one does, and continue as if the #endif was not here.
 			pop(@av_paren_type);
 			push(@av_paren_type, $type);
-			$type = 'N';
+			$type = 'E';
 
 		} elsif ($cur =~ /^(\\\n)/o) {
 			print "PRECONT($1)\n" if ($dbg_values > 1);
 
+		} elsif ($cur =~ /^(__attribute__)\s*\(?/o) {
+			print "ATTR($1)\n" if ($dbg_values > 1);
+			$av_pending = $type;
+			$type = 'N';
+
 		} elsif ($cur =~ /^(sizeof)\s*(\()?/o) {
 			print "SIZEOF($1)\n" if ($dbg_values > 1);
 			if (defined $2) {
@@ -930,7 +955,7 @@
 			# edge is a close comment then we must be in a comment
 			# at context start.
 			my $edge;
-			for (my $ln = $linenr; $ln < ($linenr + $realcnt); $ln++) {
+			for (my $ln = $linenr + 1; $ln < ($linenr + $realcnt); $ln++) {
 				next if ($line =~ /^-/);
 				($edge) = ($rawlines[$ln - 1] =~ m@(/\*|\*/)@);
 				last if (defined $edge);
@@ -951,9 +976,9 @@
 			##print "COMMENT:$in_comment edge<$edge> $rawline\n";
 			sanitise_line_reset($in_comment);
 
-		} elsif ($realcnt) {
+		} elsif ($realcnt && $rawline =~ /^(?:\+| |$)/) {
 			# Standardise the strings and chars within the input to
-			# simplify matching.
+			# simplify matching -- only bother with positive lines.
 			$line = sanitise_line($rawline);
 		}
 		push(@lines, $line);
@@ -1066,17 +1091,14 @@
 
 # UTF-8 regex found at http://www.w3.org/International/questions/qa-forms-utf-8.en.php
 		if (($realfile =~ /^$/ || $line =~ /^\+/) &&
-		     !($rawline =~ m/^(
-				[\x09\x0A\x0D\x20-\x7E]              # ASCII
-				| [\xC2-\xDF][\x80-\xBF]             # non-overlong 2-byte
-				|  \xE0[\xA0-\xBF][\x80-\xBF]        # excluding overlongs
-				| [\xE1-\xEC\xEE\xEF][\x80-\xBF]{2}  # straight 3-byte
-				|  \xED[\x80-\x9F][\x80-\xBF]        # excluding surrogates
-				|  \xF0[\x90-\xBF][\x80-\xBF]{2}     # planes 1-3
-				| [\xF1-\xF3][\x80-\xBF]{3}          # planes 4-15
-				|  \xF4[\x80-\x8F][\x80-\xBF]{2}     # plane 16
-				)*$/x )) {
-			ERROR("Invalid UTF-8, patch and commit message should be encoded in UTF-8\n" . $herecurr);
+		    $rawline !~ m/^$UTF8*$/) {
+			my ($utf8_prefix) = ($rawline =~ /^($UTF8*)/);
+
+			my $blank = copy_spacing($rawline);
+			my $ptr = substr($blank, 0, length($utf8_prefix)) . "^";
+			my $hereptr = "$hereline$ptr\n";
+
+			ERROR("Invalid UTF-8, patch and commit message should be encoded in UTF-8\n" . $hereptr);
 		}
 
 #ignore lines being removed
@@ -1112,7 +1134,7 @@
 		if ($rawline =~ /^\+\s* \t\s*\S/ ||
 		    $rawline =~ /^\+\s*        \s*/) {
 			my $herevet = "$here\n" . cat_vet($rawline) . "\n";
-			ERROR("use tabs not spaces\n" . $herevet);
+			ERROR("code indent should use tabs where possible\n" . $herevet);
 		}
 
 # check for RCS/CVS revision markers
@@ -1121,35 +1143,40 @@
 		}
 
 # Check for potential 'bare' types
-		if ($realcnt) {
-			my ($s, $c) = ctx_statement_block($linenr, $realcnt, 0);
-			$s =~ s/\n./ /g;
-			$s =~ s/{.*$//;
+		my ($stat, $cond);
+		if ($realcnt && $line =~ /.\s*\S/) {
+			($stat, $cond) = ctx_statement_block($linenr,
+								$realcnt, 0);
+			$stat =~ s/\n./\n /g;
+			$cond =~ s/\n./\n /g;
+
+			my $s = $stat;
+			$s =~ s/{.*$//s;
 
 			# Ignore goto labels.
-			if ($s =~ /$Ident:\*$/) {
+			if ($s =~ /$Ident:\*$/s) {
 
 			# Ignore functions being called
-			} elsif ($s =~ /^.\s*$Ident\s*\(/) {
+			} elsif ($s =~ /^.\s*$Ident\s*\(/s) {
 
 			# definitions in global scope can only start with types
-			} elsif ($s =~ /^.(?:$Storage\s+)?(?:$Inline\s+)?(?:const\s+)?($Ident)\b/) {
+			} elsif ($s =~ /^.(?:$Storage\s+)?(?:$Inline\s+)?(?:const\s+)?($Ident)\b/s) {
 				possible($1, $s);
 
 			# declarations always start with types
-			} elsif ($prev_values eq 'E' && $s =~ /^.\s*(?:$Storage\s+)?(?:const\s+)?($Ident)\b(:?\s+$Sparse)?\s*\**\s*$Ident\s*(?:;|=|,)/) {
+			} elsif ($prev_values eq 'E' && $s =~ /^.\s*(?:$Storage\s+)?(?:const\s+)?($Ident)\b(:?\s+$Sparse)?\s*\**\s*$Ident\s*(?:;|=|,)/s) {
 				possible($1, $s);
 			}
 
 			# any (foo ... *) is a pointer cast, and foo is a type
-			while ($s =~ /\(($Ident)(?:\s+$Sparse)*\s*\*+\s*\)/g) {
+			while ($s =~ /\(($Ident)(?:\s+$Sparse)*\s*\*+\s*\)/sg) {
 				possible($1, $s);
 			}
 
 			# Check for any sort of function declaration.
 			# int foo(something bar, other baz);
 			# void (*store_gdt)(x86_descr_ptr *);
-			if ($prev_values eq 'E' && $s =~ /^(.(?:typedef\s*)?(?:(?:$Storage|$Inline)\s*)*\s*$Type\s*(?:\b$Ident|\(\*\s*$Ident\))\s*)\(/) {
+			if ($prev_values eq 'E' && $s =~ /^(.(?:typedef\s*)?(?:(?:$Storage|$Inline)\s*)*\s*$Type\s*(?:\b$Ident|\(\*\s*$Ident\))\s*)\(/s) {
 				my ($name_len) = length($1);
 
 				my $ctx = $s;
@@ -1282,18 +1309,19 @@
 			   ($prevline !~ /^ }/) &&
 			   ($prevline !~ /^.DECLARE_$Ident\(\Q$name\E\)/) &&
 			   ($prevline !~ /^.LIST_HEAD\(\Q$name\E\)/) &&
+			   ($prevline !~ /^.$Type\s*\(\s*\*\s*\Q$name\E\s*\)\s*\(/) &&
 			   ($prevline !~ /\b\Q$name\E(?:\s+$Attribute)?\s*(?:;|=|\[)/)) {
 				WARN("EXPORT_SYMBOL(foo); should immediately follow its function/variable\n" . $herecurr);
 			}
 		}
 
 # check for external initialisers.
-		if ($line =~ /^.$Type\s*$Ident\s*=\s*(0|NULL);/) {
+		if ($line =~ /^.$Type\s*$Ident\s*=\s*(0|NULL|false)\s*;/) {
 			ERROR("do not initialise externals to 0 or NULL\n" .
 				$herecurr);
 		}
 # check for static initialisers.
-		if ($line =~ /\s*static\s.*=\s*(0|NULL);/) {
+		if ($line =~ /\s*static\s.*=\s*(0|NULL|false)\s*;/) {
 			ERROR("do not initialise statics to 0 or NULL\n" .
 				$herecurr);
 		}
@@ -1512,7 +1540,10 @@
 					if ($ctx !~ /[WEBC]x./ && $ca !~ /(?:\)|!|~|\*|-|\&|\||\+\+|\-\-|\{)$/) {
 						ERROR("space required before that '$op' $at\n" . $hereptr);
 					}
-					if ($ctx =~ /.xW/) {
+					if ($op  eq '*' && $cc =~/\s*const\b/) {
+						# A unary '*' may be const
+
+					} elsif ($ctx =~ /.xW/) {
 						ERROR("space prohibited after that '$op' $at\n" . $hereptr);
 					}
 
@@ -1617,7 +1648,7 @@
 
 # Check for illegal assignment in if conditional.
 		if ($line =~ /\bif\s*\(/) {
-			my ($s, $c) = ctx_statement_block($linenr, $realcnt, 0);
+			my ($s, $c) = ($stat, $cond);
 
 			if ($c =~ /\bif\s*\(.*[^<>!=]=[^=].*/) {
 				ERROR("do not use assignment in if condition\n" . $herecurr);
@@ -1695,7 +1726,7 @@
 #warn if <asm/foo.h> is #included and <linux/foo.h> is available (uses RAW line)
 		if ($tree && $rawline =~ m{^.\#\s*include\s*\<asm\/(.*)\.h\>}) {
 			my $checkfile = "$root/include/linux/$1.h";
-			if (-f $checkfile && $1 ne 'irq.h') {
+			if (-f $checkfile && $1 ne 'irq') {
 				WARN("Use #include <linux/$1.h> instead of <asm/$1.h>\n" .
 					$herecurr);
 			}
@@ -1910,7 +1941,8 @@
 		}
 
 # check for spinlock_t definitions without a comment.
-		if ($line =~ /^.\s*(struct\s+mutex|spinlock_t)\s+\S+;/) {
+		if ($line =~ /^.\s*(struct\s+mutex|spinlock_t)\s+\S+;/ ||
+		    $line =~ /^.\s*(DEFINE_MUTEX)\s*\(/) {
 			my $which = $1;
 			if (!ctx_has_comment($first_line, $linenr)) {
 				CHK("$1 definition without comment\n" . $herecurr);
@@ -1940,7 +1972,26 @@
 		}
 
 # check for new externs in .c files.
-		if ($line =~ /^.\s*extern\s/ && ($realfile =~ /\.c$/)) {
+		if ($realfile =~ /\.c$/ && defined $stat &&
+		    $stat =~ /^.\s*(?:extern\s+)?$Type\s+$Ident(\s*)\(/s)
+		{
+			my $paren_space = $1;
+
+			my $s = $stat;
+			if (defined $cond) {
+				substr($s, 0, length($cond), '');
+			}
+			if ($s =~ /^\s*;/) {
+				WARN("externs should be avoided in .c files\n" .  $herecurr);
+			}
+
+			if ($paren_space =~ /\n/) {
+				WARN("arguments for function declarations should follow identifier\n" . $herecurr);
+			}
+
+		} elsif ($realfile =~ /\.c$/ && defined $stat &&
+		    $stat =~ /^.\s*extern\s+/)
+		{
 			WARN("externs should be avoided in .c files\n" .  $herecurr);
 		}
 
@@ -1964,11 +2015,11 @@
 		}
 
 # check for semaphores used as mutexes
-		if ($line =~ /\b(DECLARE_MUTEX|init_MUTEX)\s*\(/) {
+		if ($line =~ /^.\s*(DECLARE_MUTEX|init_MUTEX)\s*\(/) {
 			WARN("mutexes are preferred for single holder semaphores\n" . $herecurr);
 		}
 # check for semaphores used as mutexes
-		if ($line =~ /\binit_MUTEX_LOCKED\s*\(/) {
+		if ($line =~ /^.\s*init_MUTEX_LOCKED\s*\(/) {
 			WARN("consider using a completion\n" . $herecurr);
 		}
 # recommend strict_strto* over simple_strto*
@@ -1979,11 +2030,24 @@
 # use of NR_CPUS is usually wrong
 # ignore definitions of NR_CPUS and usage to define arrays as likely right
 		if ($line =~ /\bNR_CPUS\b/ &&
-		    $line !~ /^.#\s*define\s+NR_CPUS\s+/ &&
-		    $line !~ /^.\s*$Declare\s.*\[[^\]]*NR_CPUS[^\]]*\]/)
+		    $line !~ /^.#\s*if\b.*\bNR_CPUS\b/ &&
+		    $line !~ /^.#\s*define\b.*\bNR_CPUS\b/ &&
+		    $line !~ /^.\s*$Declare\s.*\[[^\]]*NR_CPUS[^\]]*\]/ &&
+		    $line !~ /\[[^\]]*\.\.\.[^\]]*NR_CPUS[^\]]*\]/ &&
+		    $line !~ /\[[^\]]*NR_CPUS[^\]]*\.\.\.[^\]]*\]/)
 		{
 			WARN("usage of NR_CPUS is often wrong - consider using cpu_possible(), num_possible_cpus(), for_each_possible_cpu(), etc\n" . $herecurr);
 		}
+
+# check for %L{u,d,i} in strings
+		my $string;
+		while ($line =~ /(?:^|")([X\t]*)(?:"|$)/g) {
+			$string = substr($rawline, $-[1], $+[1] - $-[1]);
+			if ($string =~ /(?<!%)%L[udi]/) {
+				WARN("\%Ld/%Lu are not-standard C, use %lld/%llu\n" . $herecurr);
+				last;
+			}
+		}
 	}
 
 	# If we have no input at all, then there is nothing to report on
diff --git a/scripts/gen_initramfs_list.sh b/scripts/gen_initramfs_list.sh
index 684fb9c..5f3415f 100644
--- a/scripts/gen_initramfs_list.sh
+++ b/scripts/gen_initramfs_list.sh
@@ -135,7 +135,7 @@
 			str="${ftype} ${name} ${str} ${dev} ${maj} ${min}"
 			;;
 		"slink")
-			local target=`field 11 $(LC_ALL=C ls -l "${location}")`
+			local target=`readlink "${location}"`
 			str="${ftype} ${name} ${target} ${str}"
 			;;
 		*)
diff --git a/scripts/kconfig/lex.zconf.c_shipped b/scripts/kconfig/lex.zconf.c_shipped
index bed0f4e..6a61cee 100644
--- a/scripts/kconfig/lex.zconf.c_shipped
+++ b/scripts/kconfig/lex.zconf.c_shipped
@@ -5,25 +5,6 @@
 
 /* A lexical scanner generated by flex */
 
-#define yy_create_buffer zconf_create_buffer
-#define yy_delete_buffer zconf_delete_buffer
-#define yy_flex_debug zconf_flex_debug
-#define yy_init_buffer zconf_init_buffer
-#define yy_flush_buffer zconf_flush_buffer
-#define yy_load_buffer_state zconf_load_buffer_state
-#define yy_switch_to_buffer zconf_switch_to_buffer
-#define yyin zconfin
-#define yyleng zconfleng
-#define yylex zconflex
-#define yylineno zconflineno
-#define yyout zconfout
-#define yyrestart zconfrestart
-#define yytext zconftext
-#define yywrap zconfwrap
-#define yyalloc zconfalloc
-#define yyrealloc zconfrealloc
-#define yyfree zconffree
-
 #define FLEX_SCANNER
 #define YY_FLEX_MAJOR_VERSION 2
 #define YY_FLEX_MINOR_VERSION 5
@@ -354,7 +335,7 @@
 
 /* Begin user sect3 */
 
-#define zconfwrap(n) 1
+#define zconfwrap() 1
 #define YY_SKIP_YYWRAP
 
 typedef unsigned char YY_CHAR;
@@ -1535,7 +1516,7 @@
 
 		/* Read in more data. */
 		YY_INPUT( (&YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[number_to_move]),
-			(yy_n_chars), num_to_read );
+			(yy_n_chars), (size_t) num_to_read );
 
 		YY_CURRENT_BUFFER_LVALUE->yy_n_chars = (yy_n_chars);
 		}
@@ -2007,7 +1988,7 @@
 
 /** Setup the input buffer state to scan a string. The next call to zconflex() will
  * scan from a @e copy of @a str.
- * @param str a NUL-terminated string to scan
+ * @param yystr a NUL-terminated string to scan
  * 
  * @return the newly allocated buffer state object.
  * @note If you want to scan bytes that may contain NUL values, then use
diff --git a/scripts/kconfig/lkc_proto.h b/scripts/kconfig/lkc_proto.h
index 4d09f6d..8e69461 100644
--- a/scripts/kconfig/lkc_proto.h
+++ b/scripts/kconfig/lkc_proto.h
@@ -21,7 +21,7 @@
 /* symbol.c */
 P(symbol_hash,struct symbol *,[SYMBOL_HASHSIZE]);
 
-P(sym_lookup,struct symbol *,(const char *name, int isconst));
+P(sym_lookup,struct symbol *,(const char *name, int flags));
 P(sym_find,struct symbol *,(const char *name));
 P(sym_re_search,struct symbol **,(const char *pattern));
 P(sym_type_name,const char *,(enum symbol_type type));
diff --git a/scripts/kconfig/lxdialog/inputbox.c b/scripts/kconfig/lxdialog/inputbox.c
index 4946bd0..616c601 100644
--- a/scripts/kconfig/lxdialog/inputbox.c
+++ b/scripts/kconfig/lxdialog/inputbox.c
@@ -89,7 +89,7 @@
 	box_y = y + 2;
 	box_x = (width - box_width) / 2;
 	draw_box(dialog, y + 1, box_x - 1, 3, box_width + 2,
-		 dlg.border.atr, dlg.dialog.atr);
+		 dlg.dialog.atr, dlg.border.atr);
 
 	print_buttons(dialog, height, width, 0);
 
diff --git a/scripts/kconfig/menu.c b/scripts/kconfig/menu.c
index 606ceb9..07ff8d1 100644
--- a/scripts/kconfig/menu.c
+++ b/scripts/kconfig/menu.c
@@ -235,18 +235,22 @@
 	sym = parent->sym;
 	if (parent->list) {
 		if (sym && sym_is_choice(sym)) {
-			/* find the first choice value and find out choice type */
-			for (menu = parent->list; menu; menu = menu->next) {
-				if (menu->sym) {
-					current_entry = parent;
-					if (sym->type == S_UNKNOWN)
+			if (sym->type == S_UNKNOWN) {
+				/* find the first choice value to find out choice type */
+				current_entry = parent;
+				for (menu = parent->list; menu; menu = menu->next) {
+					if (menu->sym && menu->sym->type != S_UNKNOWN) {
 						menu_set_type(menu->sym->type);
-					current_entry = menu;
-					if (menu->sym->type == S_UNKNOWN)
-						menu_set_type(sym->type);
-					break;
+						break;
+					}
 				}
 			}
+			/* set the type of the remaining choice values */
+			for (menu = parent->list; menu; menu = menu->next) {
+				current_entry = menu;
+				if (menu->sym && menu->sym->type == S_UNKNOWN)
+					menu_set_type(sym->type);
+			}
 			parentdep = expr_alloc_symbol(sym);
 		} else if (parent->prompt)
 			parentdep = parent->prompt->visible.expr;
@@ -313,50 +317,36 @@
 		}
 	}
 	for (menu = parent->list; menu; menu = menu->next) {
-		if (sym && sym_is_choice(sym) && menu->sym) {
+		if (sym && sym_is_choice(sym) &&
+		    menu->sym && !sym_is_choice_value(menu->sym)) {
+			current_entry = menu;
 			menu->sym->flags |= SYMBOL_CHOICEVAL;
 			if (!menu->prompt)
 				menu_warn(menu, "choice value must have a prompt");
 			for (prop = menu->sym->prop; prop; prop = prop->next) {
-				if (prop->type == P_PROMPT && prop->menu != menu) {
-					prop_warn(prop, "choice values "
-					    "currently only support a "
-					    "single prompt");
-				}
 				if (prop->type == P_DEFAULT)
 					prop_warn(prop, "defaults for choice "
-					    "values not supported");
+						  "values not supported");
+				if (prop->menu == menu)
+					continue;
+				if (prop->type == P_PROMPT &&
+				    prop->menu->parent->sym != sym)
+					prop_warn(prop, "choice value used outside its choice group");
 			}
-			current_entry = menu;
-			if (menu->sym->type == S_UNKNOWN)
-				menu_set_type(sym->type);
 			/* Non-tristate choice values of tristate choices must
 			 * depend on the choice being set to Y. The choice
 			 * values' dependencies were propagated to their
 			 * properties above, so the change here must be re-
-			 * propagated. */
+			 * propagated.
+			 */
 			if (sym->type == S_TRISTATE && menu->sym->type != S_TRISTATE) {
 				basedep = expr_alloc_comp(E_EQUAL, sym, &symbol_yes);
-				basedep = expr_alloc_and(basedep, menu->dep);
-				basedep = expr_eliminate_dups(basedep);
-				menu->dep = basedep;
+				menu->dep = expr_alloc_and(basedep, menu->dep);
 				for (prop = menu->sym->prop; prop; prop = prop->next) {
 					if (prop->menu != menu)
 						continue;
-					dep = expr_alloc_and(expr_copy(basedep),
-							     prop->visible.expr);
-					dep = expr_eliminate_dups(dep);
-					dep = expr_trans_bool(dep);
-					prop->visible.expr = dep;
-					if (prop->type == P_SELECT) {
-						struct symbol *es = prop_get_symbol(prop);
-						dep2 = expr_alloc_symbol(menu->sym);
-						dep = expr_alloc_and(dep2,
-								     expr_copy(dep));
-						dep = expr_alloc_or(es->rev_dep.expr, dep);
-						dep = expr_eliminate_dups(dep);
-						es->rev_dep.expr = dep;
-					}
+					prop->visible.expr = expr_alloc_and(expr_copy(basedep),
+									    prop->visible.expr);
 				}
 			}
 			menu_add_symbol(P_CHOICE, sym, NULL);
diff --git a/scripts/kconfig/symbol.c b/scripts/kconfig/symbol.c
index 4a03191..18f3e5c 100644
--- a/scripts/kconfig/symbol.c
+++ b/scripts/kconfig/symbol.c
@@ -40,7 +40,7 @@
 {
 	struct property *prop = prop_alloc(P_DEFAULT, sym);
 
-	prop->expr = expr_alloc_symbol(sym_lookup(def, 1));
+	prop->expr = expr_alloc_symbol(sym_lookup(def, SYMBOL_CONST));
 }
 
 void sym_init(void)
@@ -350,9 +350,6 @@
 		;
 	}
 
-	if (sym->flags & SYMBOL_AUTO)
-		sym->flags &= ~SYMBOL_WRITE;
-
 	sym->curr = newval;
 	if (sym_is_choice(sym) && newval.tri == yes)
 		sym->curr.val = sym_calc_choice(sym);
@@ -377,6 +374,9 @@
 				sym_set_changed(choice_sym);
 		}
 	}
+
+	if (sym->flags & SYMBOL_AUTO)
+		sym->flags &= ~SYMBOL_WRITE;
 }
 
 void sym_clear_all_valid(void)
@@ -651,7 +651,7 @@
 	return sym->visible > sym->rev_dep.tri;
 }
 
-struct symbol *sym_lookup(const char *name, int isconst)
+struct symbol *sym_lookup(const char *name, int flags)
 {
 	struct symbol *symbol;
 	const char *ptr;
@@ -671,11 +671,10 @@
 		hash &= 0xff;
 
 		for (symbol = symbol_hash[hash]; symbol; symbol = symbol->next) {
-			if (!strcmp(symbol->name, name)) {
-				if ((isconst && symbol->flags & SYMBOL_CONST) ||
-				    (!isconst && !(symbol->flags & SYMBOL_CONST)))
-					return symbol;
-			}
+			if (!strcmp(symbol->name, name) &&
+			    (flags ? symbol->flags & flags
+				   : !(symbol->flags & (SYMBOL_CONST|SYMBOL_CHOICE))))
+				return symbol;
 		}
 		new_name = strdup(name);
 	} else {
@@ -687,8 +686,7 @@
 	memset(symbol, 0, sizeof(*symbol));
 	symbol->name = new_name;
 	symbol->type = S_UNKNOWN;
-	if (isconst)
-		symbol->flags |= SYMBOL_CONST;
+	symbol->flags |= flags;
 
 	symbol->next = symbol_hash[hash];
 	symbol_hash[hash] = symbol;
@@ -762,8 +760,6 @@
 }
 
 
-struct symbol *sym_check_deps(struct symbol *sym);
-
 static struct symbol *sym_check_expr_deps(struct expr *e)
 {
 	struct symbol *sym;
@@ -795,6 +791,65 @@
 }
 
 /* return NULL when dependencies are OK */
+static struct symbol *sym_check_sym_deps(struct symbol *sym)
+{
+	struct symbol *sym2;
+	struct property *prop;
+
+	sym2 = sym_check_expr_deps(sym->rev_dep.expr);
+	if (sym2)
+		return sym2;
+
+	for (prop = sym->prop; prop; prop = prop->next) {
+		if (prop->type == P_CHOICE || prop->type == P_SELECT)
+			continue;
+		sym2 = sym_check_expr_deps(prop->visible.expr);
+		if (sym2)
+			break;
+		if (prop->type != P_DEFAULT || sym_is_choice(sym))
+			continue;
+		sym2 = sym_check_expr_deps(prop->expr);
+		if (sym2)
+			break;
+	}
+
+	return sym2;
+}
+
+static struct symbol *sym_check_choice_deps(struct symbol *choice)
+{
+	struct symbol *sym, *sym2;
+	struct property *prop;
+	struct expr *e;
+
+	prop = sym_get_choice_prop(choice);
+	expr_list_for_each_sym(prop->expr, e, sym)
+		sym->flags |= (SYMBOL_CHECK | SYMBOL_CHECKED);
+
+	choice->flags |= (SYMBOL_CHECK | SYMBOL_CHECKED);
+	sym2 = sym_check_sym_deps(choice);
+	choice->flags &= ~SYMBOL_CHECK;
+	if (sym2)
+		goto out;
+
+	expr_list_for_each_sym(prop->expr, e, sym) {
+		sym2 = sym_check_sym_deps(sym);
+		if (sym2) {
+			fprintf(stderr, " -> %s", sym->name);
+			break;
+		}
+	}
+out:
+	expr_list_for_each_sym(prop->expr, e, sym)
+		sym->flags &= ~SYMBOL_CHECK;
+
+	if (sym2 && sym_is_choice_value(sym2) &&
+	    prop_get_symbol(sym_get_choice_prop(sym2)) == choice)
+		sym2 = choice;
+
+	return sym2;
+}
+
 struct symbol *sym_check_deps(struct symbol *sym)
 {
 	struct symbol *sym2;
@@ -802,33 +857,34 @@
 
 	if (sym->flags & SYMBOL_CHECK) {
 		fprintf(stderr, "%s:%d:error: found recursive dependency: %s",
-		        sym->prop->file->name, sym->prop->lineno, sym->name);
+		        sym->prop->file->name, sym->prop->lineno,
+			sym->name ? sym->name : "<choice>");
 		return sym;
 	}
 	if (sym->flags & SYMBOL_CHECKED)
 		return NULL;
 
-	sym->flags |= (SYMBOL_CHECK | SYMBOL_CHECKED);
-	sym2 = sym_check_expr_deps(sym->rev_dep.expr);
-	if (sym2)
-		goto out;
-
-	for (prop = sym->prop; prop; prop = prop->next) {
-		if (prop->type == P_CHOICE || prop->type == P_SELECT)
-			continue;
-		sym2 = sym_check_expr_deps(prop->visible.expr);
-		if (sym2)
-			goto out;
-		if (prop->type != P_DEFAULT || sym_is_choice(sym))
-			continue;
-		sym2 = sym_check_expr_deps(prop->expr);
-		if (sym2)
-			goto out;
+	if (sym_is_choice_value(sym)) {
+		/* for choice groups start the check with main choice symbol */
+		prop = sym_get_choice_prop(sym);
+		sym2 = sym_check_deps(prop_get_symbol(prop));
+	} else if (sym_is_choice(sym)) {
+		sym2 = sym_check_choice_deps(sym);
+	} else {
+		sym->flags |= (SYMBOL_CHECK | SYMBOL_CHECKED);
+		sym2 = sym_check_sym_deps(sym);
+		sym->flags &= ~SYMBOL_CHECK;
 	}
-out:
-	if (sym2)
-		fprintf(stderr, " -> %s%s", sym->name, sym2 == sym? "\n": "");
-	sym->flags &= ~SYMBOL_CHECK;
+
+	if (sym2) {
+		fprintf(stderr, " -> %s", sym->name ? sym->name : "<choice>");
+		if (sym2 == sym) {
+			fprintf(stderr, "\n");
+			zconfnerrs++;
+			sym2 = NULL;
+		}
+	}
+
 	return sym2;
 }
 
@@ -904,7 +960,7 @@
 	}
 
 	prop = prop_alloc(P_ENV, sym);
-	prop->expr = expr_alloc_symbol(sym_lookup(env, 1));
+	prop->expr = expr_alloc_symbol(sym_lookup(env, SYMBOL_CONST));
 
 	sym_env_list = expr_alloc_one(E_LIST, sym_env_list);
 	sym_env_list->right.sym = sym;
diff --git a/scripts/kconfig/zconf.tab.c_shipped b/scripts/kconfig/zconf.tab.c_shipped
index d22d924..95df833 100644
--- a/scripts/kconfig/zconf.tab.c_shipped
+++ b/scripts/kconfig/zconf.tab.c_shipped
@@ -446,16 +446,16 @@
 /* YYFINAL -- State number of the termination state.  */
 #define YYFINAL  3
 /* YYLAST -- Last index in YYTABLE.  */
-#define YYLAST   258
+#define YYLAST   259
 
 /* YYNTOKENS -- Number of terminals.  */
 #define YYNTOKENS  35
 /* YYNNTS -- Number of nonterminals.  */
-#define YYNNTS  45
+#define YYNNTS  46
 /* YYNRULES -- Number of rules.  */
-#define YYNRULES  108
+#define YYNRULES  110
 /* YYNRULES -- Number of states.  */
-#define YYNSTATES  178
+#define YYNSTATES  180
 
 /* YYTRANSLATE(YYLEX) -- Bison symbol number corresponding to YYLEX.  */
 #define YYUNDEFTOK  2
@@ -507,13 +507,14 @@
       28,    33,    37,    39,    41,    43,    45,    47,    49,    51,
       53,    55,    57,    59,    61,    63,    67,    70,    74,    77,
       81,    84,    85,    88,    91,    94,    97,   100,   103,   107,
-     112,   117,   122,   128,   132,   133,   137,   138,   141,   144,
-     147,   149,   153,   154,   157,   160,   163,   166,   169,   174,
-     178,   181,   186,   187,   190,   194,   196,   200,   201,   204,
-     207,   210,   214,   217,   219,   223,   224,   227,   230,   233,
-     237,   241,   244,   247,   250,   251,   254,   257,   260,   265,
-     266,   269,   271,   273,   276,   279,   282,   284,   287,   288,
-     291,   293,   297,   301,   305,   308,   312,   316,   318
+     112,   117,   122,   128,   132,   133,   137,   138,   141,   145,
+     148,   150,   154,   155,   158,   161,   164,   167,   170,   175,
+     179,   182,   187,   188,   191,   195,   197,   201,   202,   205,
+     208,   211,   215,   218,   220,   224,   225,   228,   231,   234,
+     238,   242,   245,   248,   251,   252,   255,   258,   261,   266,
+     267,   270,   272,   274,   277,   280,   283,   285,   288,   289,
+     292,   294,   298,   302,   306,   309,   313,   317,   319,   321,
+     322
 };
 
 /* YYRHS -- A `-1'-separated list of the rules' RHS.  */
@@ -533,24 +534,25 @@
       30,    -1,    20,    78,    77,    30,    -1,    21,    25,    77,
       30,    -1,    22,    79,    79,    77,    30,    -1,    23,    48,
       30,    -1,    -1,    48,    25,    49,    -1,    -1,    33,    74,
-      -1,     7,    30,    -1,    50,    54,    -1,    75,    -1,    51,
-      56,    52,    -1,    -1,    54,    55,    -1,    54,    72,    -1,
-      54,    70,    -1,    54,    30,    -1,    54,    40,    -1,    18,
-      74,    77,    30,    -1,    19,    73,    30,    -1,    17,    30,
-      -1,    20,    25,    77,    30,    -1,    -1,    56,    39,    -1,
-      14,    78,    76,    -1,    75,    -1,    57,    60,    58,    -1,
-      -1,    60,    39,    -1,    60,    64,    -1,    60,    53,    -1,
-       4,    74,    30,    -1,    61,    71,    -1,    75,    -1,    62,
-      65,    63,    -1,    -1,    65,    39,    -1,    65,    64,    -1,
-      65,    53,    -1,     6,    74,    30,    -1,     9,    74,    30,
-      -1,    67,    71,    -1,    12,    30,    -1,    69,    13,    -1,
-      -1,    71,    72,    -1,    71,    30,    -1,    71,    40,    -1,
-      16,    24,    78,    30,    -1,    -1,    74,    77,    -1,    25,
-      -1,    26,    -1,     5,    30,    -1,     8,    30,    -1,    15,
-      30,    -1,    30,    -1,    76,    30,    -1,    -1,    14,    78,
-      -1,    79,    -1,    79,    33,    79,    -1,    79,    27,    79,
-      -1,    29,    78,    28,    -1,    34,    78,    -1,    78,    31,
-      78,    -1,    78,    32,    78,    -1,    25,    -1,    26,    -1
+      -1,     7,    80,    30,    -1,    50,    54,    -1,    75,    -1,
+      51,    56,    52,    -1,    -1,    54,    55,    -1,    54,    72,
+      -1,    54,    70,    -1,    54,    30,    -1,    54,    40,    -1,
+      18,    74,    77,    30,    -1,    19,    73,    30,    -1,    17,
+      30,    -1,    20,    25,    77,    30,    -1,    -1,    56,    39,
+      -1,    14,    78,    76,    -1,    75,    -1,    57,    60,    58,
+      -1,    -1,    60,    39,    -1,    60,    64,    -1,    60,    53,
+      -1,     4,    74,    30,    -1,    61,    71,    -1,    75,    -1,
+      62,    65,    63,    -1,    -1,    65,    39,    -1,    65,    64,
+      -1,    65,    53,    -1,     6,    74,    30,    -1,     9,    74,
+      30,    -1,    67,    71,    -1,    12,    30,    -1,    69,    13,
+      -1,    -1,    71,    72,    -1,    71,    30,    -1,    71,    40,
+      -1,    16,    24,    78,    30,    -1,    -1,    74,    77,    -1,
+      25,    -1,    26,    -1,     5,    30,    -1,     8,    30,    -1,
+      15,    30,    -1,    30,    -1,    76,    30,    -1,    -1,    14,
+      78,    -1,    79,    -1,    79,    33,    79,    -1,    79,    27,
+      79,    -1,    29,    78,    28,    -1,    34,    78,    -1,    78,
+      31,    78,    -1,    78,    32,    78,    -1,    25,    -1,    26,
+      -1,    -1,    25,    -1
 };
 
 /* YYRLINE[YYN] -- source line where rule number YYN was defined.  */
@@ -566,7 +568,8 @@
      339,   344,   351,   356,   364,   367,   369,   370,   371,   374,
      382,   389,   396,   402,   409,   411,   412,   413,   416,   424,
      426,   431,   432,   435,   436,   437,   441,   442,   445,   446,
-     449,   450,   451,   452,   453,   454,   455,   458,   459
+     449,   450,   451,   452,   453,   454,   455,   458,   459,   462,
+     463
 };
 #endif
 
@@ -590,7 +593,8 @@
   "if_entry", "if_end", "if_stmt", "if_block", "menu", "menu_entry",
   "menu_end", "menu_stmt", "menu_block", "source_stmt", "comment",
   "comment_stmt", "help_start", "help", "depends_list", "depends",
-  "prompt_stmt_opt", "prompt", "end", "nl", "if_expr", "expr", "symbol", 0
+  "prompt_stmt_opt", "prompt", "end", "nl", "if_expr", "expr", "symbol",
+  "word_opt", 0
 };
 #endif
 
@@ -619,7 +623,8 @@
       60,    61,    62,    63,    64,    65,    65,    65,    65,    66,
       67,    68,    69,    70,    71,    71,    71,    71,    72,    73,
       73,    74,    74,    75,    75,    75,    76,    76,    77,    77,
-      78,    78,    78,    78,    78,    78,    78,    79,    79
+      78,    78,    78,    78,    78,    78,    78,    79,    79,    80,
+      80
 };
 
 /* YYR2[YYN] -- Number of symbols composing right hand side of rule YYN.  */
@@ -629,13 +634,14 @@
        4,     3,     1,     1,     1,     1,     1,     1,     1,     1,
        1,     1,     1,     1,     1,     3,     2,     3,     2,     3,
        2,     0,     2,     2,     2,     2,     2,     2,     3,     4,
-       4,     4,     5,     3,     0,     3,     0,     2,     2,     2,
+       4,     4,     5,     3,     0,     3,     0,     2,     3,     2,
        1,     3,     0,     2,     2,     2,     2,     2,     4,     3,
        2,     4,     0,     2,     3,     1,     3,     0,     2,     2,
        2,     3,     2,     1,     3,     0,     2,     2,     2,     3,
        3,     2,     2,     2,     0,     2,     2,     2,     4,     0,
        2,     1,     1,     2,     2,     2,     1,     2,     0,     2,
-       1,     3,     3,     3,     2,     3,     3,     1,     1
+       1,     3,     3,     3,     2,     3,     3,     1,     1,     0,
+       1
 };
 
 /* YYDEFACT[STATE-NAME] -- Default rule to reduce with in state
@@ -643,69 +649,69 @@
    means the default is an error.  */
 static const yytype_uint8 yydefact[] =
 {
-       3,     0,     0,     1,     0,     0,     0,     0,     0,     0,
+       3,     0,     0,     1,     0,     0,     0,     0,     0,   109,
        0,     0,     0,     0,     0,     0,    12,    16,    13,    14,
       18,    15,    17,     0,    19,     0,     4,    31,    22,    31,
       23,    52,    62,     5,    67,    20,    84,    75,     6,    24,
       84,    21,     8,    11,    91,    92,     0,     0,    93,     0,
-      48,    94,     0,     0,     0,   107,   108,     0,     0,     0,
-     100,    95,     0,     0,     0,     0,     0,     0,     0,     0,
-       0,     0,    96,     7,    71,    79,    80,    27,    29,     0,
-     104,     0,     0,    64,     0,     0,     9,    10,     0,     0,
-       0,     0,    89,     0,     0,     0,    44,     0,    37,    36,
-      32,    33,     0,    35,    34,     0,     0,    89,     0,    56,
-      57,    53,    55,    54,    63,    51,    50,    68,    70,    66,
-      69,    65,    86,    87,    85,    76,    78,    74,    77,    73,
-      97,   103,   105,   106,   102,   101,    26,    82,     0,    98,
-       0,    98,    98,    98,     0,     0,     0,    83,    60,    98,
-       0,    98,     0,     0,     0,    38,    90,     0,     0,    98,
-      46,    43,    25,     0,    59,     0,    88,    99,    39,    40,
-      41,     0,     0,    45,    58,    61,    42,    47
+     110,     0,    94,     0,     0,     0,   107,   108,     0,     0,
+       0,   100,    95,     0,     0,     0,     0,     0,     0,     0,
+       0,     0,     0,    96,     7,    71,    79,    48,    80,    27,
+      29,     0,   104,     0,     0,    64,     0,     0,     9,    10,
+       0,     0,     0,     0,    89,     0,     0,     0,    44,     0,
+      37,    36,    32,    33,     0,    35,    34,     0,     0,    89,
+       0,    56,    57,    53,    55,    54,    63,    51,    50,    68,
+      70,    66,    69,    65,    86,    87,    85,    76,    78,    74,
+      77,    73,    97,   103,   105,   106,   102,   101,    26,    82,
+       0,    98,     0,    98,    98,    98,     0,     0,     0,    83,
+      60,    98,     0,    98,     0,     0,     0,    38,    90,     0,
+       0,    98,    46,    43,    25,     0,    59,     0,    88,    99,
+      39,    40,    41,     0,     0,    45,    58,    61,    42,    47
 };
 
 /* YYDEFGOTO[NTERM-NUM].  */
 static const yytype_int16 yydefgoto[] =
 {
-      -1,     1,     2,    25,    26,    99,    27,    28,    29,    30,
-      64,   100,   101,   145,   173,    31,    32,   115,    33,    66,
-     111,    67,    34,   119,    35,    68,    36,    37,   127,    38,
-      70,    39,    40,    41,   102,   103,    69,   104,   140,   141,
-      42,    73,   154,    59,    60
+      -1,     1,     2,    25,    26,   101,    27,    28,    29,    30,
+      65,   102,   103,   147,   175,    31,    32,   117,    33,    67,
+     113,    68,    34,   121,    35,    69,    36,    37,   129,    38,
+      71,    39,    40,    41,   104,   105,    70,   106,   142,   143,
+      42,    74,   156,    60,    61,    51
 };
 
 /* YYPACT[STATE-NUM] -- Index in YYTABLE of the portion describing
    STATE-NUM.  */
-#define YYPACT_NINF -78
+#define YYPACT_NINF -80
 static const yytype_int16 yypact[] =
 {
-     -78,    33,   130,   -78,   -28,    73,    73,     7,    73,    36,
-      41,    73,    26,    52,    -4,    58,   -78,   -78,   -78,   -78,
-     -78,   -78,   -78,    90,   -78,    94,   -78,   -78,   -78,   -78,
-     -78,   -78,   -78,   -78,   -78,   -78,   -78,   -78,   -78,   -78,
-     -78,   -78,   -78,   -78,   -78,   -78,    74,    85,   -78,    96,
-     -78,   -78,   131,   134,   147,   -78,   -78,    -4,    -4,   193,
-     -10,   -78,   162,   164,    38,   102,    64,   148,     5,   192,
-       5,   165,   -78,   174,   -78,   -78,   -78,   -78,   -78,    65,
-     -78,    -4,    -4,   174,   103,   103,   -78,   -78,   175,   185,
-     197,    73,    73,    -4,   194,   103,   -78,   231,   -78,   -78,
-     -78,   -78,   220,   -78,   -78,   204,    73,    73,   210,   -78,
-     -78,   -78,   -78,   -78,   -78,   -78,   -78,   -78,   -78,   -78,
-     -78,   -78,   -78,   -78,   -78,   -78,   -78,   -78,   -78,   -78,
-     -78,   -78,   205,   -78,   -78,   -78,   -78,   -78,    -4,   222,
-     208,   222,   195,   222,   103,     2,   209,   -78,   -78,   222,
-     211,   222,   199,    -4,   212,   -78,   -78,   213,   214,   222,
-     207,   -78,   -78,   215,   -78,   216,   -78,   111,   -78,   -78,
-     -78,   217,    73,   -78,   -78,   -78,   -78,   -78
+     -80,     2,   132,   -80,   -13,    -1,    -1,    -2,    -1,     9,
+      33,    -1,    27,    40,    -3,    38,   -80,   -80,   -80,   -80,
+     -80,   -80,   -80,    71,   -80,    77,   -80,   -80,   -80,   -80,
+     -80,   -80,   -80,   -80,   -80,   -80,   -80,   -80,   -80,   -80,
+     -80,   -80,   -80,   -80,   -80,   -80,    57,    61,   -80,    63,
+     -80,    76,   -80,    87,   101,   133,   -80,   -80,    -3,    -3,
+     195,    -6,   -80,   136,   149,    39,   104,    65,   150,     5,
+     194,     5,   167,   -80,   176,   -80,   -80,   -80,   -80,   -80,
+     -80,    68,   -80,    -3,    -3,   176,    72,    72,   -80,   -80,
+     177,   187,    78,    -1,    -1,    -3,   196,    72,   -80,   222,
+     -80,   -80,   -80,   -80,   221,   -80,   -80,   205,    -1,    -1,
+     211,   -80,   -80,   -80,   -80,   -80,   -80,   -80,   -80,   -80,
+     -80,   -80,   -80,   -80,   -80,   -80,   -80,   -80,   -80,   -80,
+     -80,   -80,   -80,   -80,   206,   -80,   -80,   -80,   -80,   -80,
+      -3,   223,   209,   223,   197,   223,    72,     7,   210,   -80,
+     -80,   223,   212,   223,   201,    -3,   213,   -80,   -80,   214,
+     215,   223,   208,   -80,   -80,   216,   -80,   217,   -80,   113,
+     -80,   -80,   -80,   218,    -1,   -80,   -80,   -80,   -80,   -80
 };
 
 /* YYPGOTO[NTERM-NUM].  */
 static const yytype_int16 yypgoto[] =
 {
-     -78,   -78,   -78,   -78,   121,   -35,   -78,   -78,   -78,   -78,
-     219,   -78,   -78,   -78,   -78,   -78,   -78,   -78,   -44,   -78,
-     -78,   -78,   -78,   -78,   -78,   -78,   -78,   -78,   -78,    -6,
-     -78,   -78,   -78,   -78,   -78,   183,   218,    21,   143,    -5,
-     146,   196,    69,   -53,   -77
+     -80,   -80,   -80,   -80,   122,   -34,   -80,   -80,   -80,   -80,
+     220,   -80,   -80,   -80,   -80,   -80,   -80,   -80,    59,   -80,
+     -80,   -80,   -80,   -80,   -80,   -80,   -80,   -80,   -80,   125,
+     -80,   -80,   -80,   -80,   -80,   183,   219,    22,   142,    -5,
+     147,   192,    69,   -54,   -79,   -80
 };
 
 /* YYTABLE[YYPACT[STATE-NUM]].  What to do in state STATE-NUM.  If
@@ -715,62 +721,62 @@
 #define YYTABLE_NINF -82
 static const yytype_int16 yytable[] =
 {
-      46,    47,    43,    49,    79,    80,    52,   134,   135,     6,
-       7,     8,     9,    10,    11,    12,    13,    84,   144,    14,
-      15,    55,    56,    85,   118,    57,   126,   160,   132,   133,
-      58,   110,   161,     3,   123,    24,   123,    48,   -28,    88,
-     142,   -28,   -28,   -28,   -28,   -28,   -28,   -28,   -28,   -28,
-      89,    53,   -28,   -28,    90,   -28,    91,    92,    93,    94,
-      95,    96,   120,    97,   128,    88,    50,   159,    98,   -49,
-     -49,    51,   -49,   -49,   -49,   -49,    89,    54,   -49,   -49,
-      90,   105,   106,   107,   108,   152,   139,   113,    61,    97,
-     124,    62,   124,   131,   109,    63,    81,    82,    44,    45,
-     167,   149,   -30,    88,    72,   -30,   -30,   -30,   -30,   -30,
-     -30,   -30,   -30,   -30,    89,    74,   -30,   -30,    90,   -30,
-      91,    92,    93,    94,    95,    96,    75,    97,    55,    56,
-      -2,     4,    98,     5,     6,     7,     8,     9,    10,    11,
-      12,    13,    81,    82,    14,    15,    16,    17,    18,    19,
-      20,    21,    22,     7,     8,    23,    10,    11,    12,    13,
-      24,    76,    14,    15,    77,   -81,    88,   177,   -81,   -81,
-     -81,   -81,   -81,   -81,   -81,   -81,   -81,    78,    24,   -81,
-     -81,    90,   -81,   -81,   -81,   -81,   -81,   -81,   114,   117,
-      97,   125,    86,    88,    87,   122,   -72,   -72,   -72,   -72,
-     -72,   -72,   -72,   -72,   130,   136,   -72,   -72,    90,   153,
-     156,   157,   158,   116,   121,   137,   129,    97,   163,   143,
-     165,   138,   122,    72,    81,    82,    81,    82,   171,   166,
-      81,    82,   146,   147,   148,   151,   153,    82,   155,   162,
-     172,   164,   168,   169,   170,   174,   175,   176,    65,   112,
-     150,     0,     0,     0,     0,    83,     0,     0,    71
+      46,    47,     3,    49,    81,    82,    53,   136,   137,     6,
+       7,     8,     9,    10,    11,    12,    13,    43,   146,    14,
+      15,    86,    56,    57,    44,    45,    58,    87,    48,   134,
+     135,    59,   162,   112,    50,    24,   125,   163,   125,   -28,
+      90,   144,   -28,   -28,   -28,   -28,   -28,   -28,   -28,   -28,
+     -28,    91,    54,   -28,   -28,    92,   -28,    93,    94,    95,
+      96,    97,    98,    52,    99,    55,    90,   161,    62,   100,
+     -49,   -49,    63,   -49,   -49,   -49,   -49,    91,    64,   -49,
+     -49,    92,   107,   108,   109,   110,   154,    73,   141,   115,
+      99,    75,   126,    76,   126,   111,   133,    56,    57,    83,
+      84,   169,   140,   151,   -30,    90,    77,   -30,   -30,   -30,
+     -30,   -30,   -30,   -30,   -30,   -30,    91,    78,   -30,   -30,
+      92,   -30,    93,    94,    95,    96,    97,    98,   120,    99,
+     128,    79,    -2,     4,   100,     5,     6,     7,     8,     9,
+      10,    11,    12,    13,    83,    84,    14,    15,    16,    17,
+      18,    19,    20,    21,    22,     7,     8,    23,    10,    11,
+      12,    13,    24,    80,    14,    15,    88,   -81,    90,   179,
+     -81,   -81,   -81,   -81,   -81,   -81,   -81,   -81,   -81,    89,
+      24,   -81,   -81,    92,   -81,   -81,   -81,   -81,   -81,   -81,
+     116,   119,    99,   127,   122,    90,   130,   124,   -72,   -72,
+     -72,   -72,   -72,   -72,   -72,   -72,   132,   138,   -72,   -72,
+      92,   155,   158,   159,   160,   118,   123,   139,   131,    99,
+     165,   145,   167,   148,   124,    73,    83,    84,    83,    84,
+     173,   168,    83,    84,   149,   150,   153,   155,    84,   157,
+     164,   174,   166,   170,   171,   172,   176,   177,   178,    66,
+     114,   152,    85,     0,     0,     0,     0,     0,     0,    72
 };
 
 static const yytype_int16 yycheck[] =
 {
-       5,     6,    30,     8,    57,    58,    11,    84,    85,     4,
-       5,     6,     7,     8,     9,    10,    11,    27,    95,    14,
-      15,    25,    26,    33,    68,    29,    70,    25,    81,    82,
-      34,    66,    30,     0,    69,    30,    71,    30,     0,     1,
-      93,     3,     4,     5,     6,     7,     8,     9,    10,    11,
-      12,    25,    14,    15,    16,    17,    18,    19,    20,    21,
-      22,    23,    68,    25,    70,     1,    30,   144,    30,     5,
-       6,    30,     8,     9,    10,    11,    12,    25,    14,    15,
-      16,    17,    18,    19,    20,   138,    91,    66,    30,    25,
-      69,     1,    71,    28,    30,     1,    31,    32,    25,    26,
-     153,   106,     0,     1,    30,     3,     4,     5,     6,     7,
-       8,     9,    10,    11,    12,    30,    14,    15,    16,    17,
-      18,    19,    20,    21,    22,    23,    30,    25,    25,    26,
-       0,     1,    30,     3,     4,     5,     6,     7,     8,     9,
-      10,    11,    31,    32,    14,    15,    16,    17,    18,    19,
-      20,    21,    22,     5,     6,    25,     8,     9,    10,    11,
-      30,    30,    14,    15,    30,     0,     1,   172,     3,     4,
-       5,     6,     7,     8,     9,    10,    11,    30,    30,    14,
-      15,    16,    17,    18,    19,    20,    21,    22,    67,    68,
-      25,    70,    30,     1,    30,    30,     4,     5,     6,     7,
-       8,     9,    10,    11,    30,    30,    14,    15,    16,    14,
-     141,   142,   143,    67,    68,    30,    70,    25,   149,    25,
-     151,    24,    30,    30,    31,    32,    31,    32,   159,    30,
-      31,    32,     1,    13,    30,    25,    14,    32,    30,    30,
-      33,    30,    30,    30,    30,    30,    30,    30,    29,    66,
-     107,    -1,    -1,    -1,    -1,    59,    -1,    -1,    40
+       5,     6,     0,     8,    58,    59,    11,    86,    87,     4,
+       5,     6,     7,     8,     9,    10,    11,    30,    97,    14,
+      15,    27,    25,    26,    25,    26,    29,    33,    30,    83,
+      84,    34,    25,    67,    25,    30,    70,    30,    72,     0,
+       1,    95,     3,     4,     5,     6,     7,     8,     9,    10,
+      11,    12,    25,    14,    15,    16,    17,    18,    19,    20,
+      21,    22,    23,    30,    25,    25,     1,   146,    30,    30,
+       5,     6,     1,     8,     9,    10,    11,    12,     1,    14,
+      15,    16,    17,    18,    19,    20,   140,    30,    93,    67,
+      25,    30,    70,    30,    72,    30,    28,    25,    26,    31,
+      32,   155,    24,   108,     0,     1,    30,     3,     4,     5,
+       6,     7,     8,     9,    10,    11,    12,    30,    14,    15,
+      16,    17,    18,    19,    20,    21,    22,    23,    69,    25,
+      71,    30,     0,     1,    30,     3,     4,     5,     6,     7,
+       8,     9,    10,    11,    31,    32,    14,    15,    16,    17,
+      18,    19,    20,    21,    22,     5,     6,    25,     8,     9,
+      10,    11,    30,    30,    14,    15,    30,     0,     1,   174,
+       3,     4,     5,     6,     7,     8,     9,    10,    11,    30,
+      30,    14,    15,    16,    17,    18,    19,    20,    21,    22,
+      68,    69,    25,    71,    69,     1,    71,    30,     4,     5,
+       6,     7,     8,     9,    10,    11,    30,    30,    14,    15,
+      16,    14,   143,   144,   145,    68,    69,    30,    71,    25,
+     151,    25,   153,     1,    30,    30,    31,    32,    31,    32,
+     161,    30,    31,    32,    13,    30,    25,    14,    32,    30,
+      30,    33,    30,    30,    30,    30,    30,    30,    30,    29,
+      67,   109,    60,    -1,    -1,    -1,    -1,    -1,    -1,    40
 };
 
 /* YYSTOS[STATE-NUM] -- The (internal number of the) accessing
@@ -782,19 +788,19 @@
       20,    21,    22,    25,    30,    38,    39,    41,    42,    43,
       44,    50,    51,    53,    57,    59,    61,    62,    64,    66,
       67,    68,    75,    30,    25,    26,    74,    74,    30,    74,
-      30,    30,    74,    25,    25,    25,    26,    29,    34,    78,
-      79,    30,     1,     1,    45,    45,    54,    56,    60,    71,
-      65,    71,    30,    76,    30,    30,    30,    30,    30,    78,
-      78,    31,    32,    76,    27,    33,    30,    30,     1,    12,
-      16,    18,    19,    20,    21,    22,    23,    25,    30,    40,
-      46,    47,    69,    70,    72,    17,    18,    19,    20,    30,
-      40,    55,    70,    72,    39,    52,    75,    39,    53,    58,
-      64,    75,    30,    40,    72,    39,    53,    63,    64,    75,
-      30,    28,    78,    78,    79,    79,    30,    30,    24,    74,
-      73,    74,    78,    25,    79,    48,     1,    13,    30,    74,
-      73,    25,    78,    14,    77,    30,    77,    77,    77,    79,
-      25,    30,    30,    77,    30,    77,    30,    78,    30,    30,
-      30,    77,    33,    49,    30,    30,    30,    74
+      25,    80,    30,    74,    25,    25,    25,    26,    29,    34,
+      78,    79,    30,     1,     1,    45,    45,    54,    56,    60,
+      71,    65,    71,    30,    76,    30,    30,    30,    30,    30,
+      30,    78,    78,    31,    32,    76,    27,    33,    30,    30,
+       1,    12,    16,    18,    19,    20,    21,    22,    23,    25,
+      30,    40,    46,    47,    69,    70,    72,    17,    18,    19,
+      20,    30,    40,    55,    70,    72,    39,    52,    75,    39,
+      53,    58,    64,    75,    30,    40,    72,    39,    53,    63,
+      64,    75,    30,    28,    78,    78,    79,    79,    30,    30,
+      24,    74,    73,    74,    78,    25,    79,    48,     1,    13,
+      30,    74,    73,    25,    78,    14,    77,    30,    77,    77,
+      77,    79,    25,    30,    30,    77,    30,    77,    30,    78,
+      30,    30,    30,    77,    33,    49,    30,    30,    30,    74
 };
 
 #define yyerrok		(yyerrstatus = 0)
@@ -1781,8 +1787,8 @@
   case 48:
 
     {
-	struct symbol *sym = sym_lookup(NULL, 0);
-	sym->flags |= SYMBOL_CHOICE;
+	struct symbol *sym = sym_lookup((yyvsp[(2) - (3)].string), SYMBOL_CHOICE);
+	sym->flags |= SYMBOL_AUTO;
 	menu_add_entry(sym);
 	menu_add_expr(P_CHOICE, NULL, NULL);
 	printd(DEBUG_PARSE, "%s:%d:choice\n", zconf_curname(), zconf_lineno());
@@ -2014,7 +2020,12 @@
 
   case 108:
 
-    { (yyval.symbol) = sym_lookup((yyvsp[(1) - (1)].string), 1); free((yyvsp[(1) - (1)].string)); ;}
+    { (yyval.symbol) = sym_lookup((yyvsp[(1) - (1)].string), SYMBOL_CONST); free((yyvsp[(1) - (1)].string)); ;}
+    break;
+
+  case 109:
+
+    { (yyval.string) = NULL; ;}
     break;
 
 
diff --git a/scripts/kconfig/zconf.y b/scripts/kconfig/zconf.y
index d9b96ba..9710b82 100644
--- a/scripts/kconfig/zconf.y
+++ b/scripts/kconfig/zconf.y
@@ -91,7 +91,7 @@
 %type <id> end
 %type <id> option_name
 %type <menu> if_entry menu_entry choice_entry
-%type <string> symbol_option_arg
+%type <string> symbol_option_arg word_opt
 
 %destructor {
 	fprintf(stderr, "%s:%d: missing end statement for this entry\n",
@@ -239,10 +239,10 @@
 
 /* choice entry */
 
-choice: T_CHOICE T_EOL
+choice: T_CHOICE word_opt T_EOL
 {
-	struct symbol *sym = sym_lookup(NULL, 0);
-	sym->flags |= SYMBOL_CHOICE;
+	struct symbol *sym = sym_lookup($2, SYMBOL_CHOICE);
+	sym->flags |= SYMBOL_AUTO;
 	menu_add_entry(sym);
 	menu_add_expr(P_CHOICE, NULL, NULL);
 	printd(DEBUG_PARSE, "%s:%d:choice\n", zconf_curname(), zconf_lineno());
@@ -456,9 +456,12 @@
 ;
 
 symbol:	  T_WORD	{ $$ = sym_lookup($1, 0); free($1); }
-	| T_WORD_QUOTE	{ $$ = sym_lookup($1, 1); free($1); }
+	| T_WORD_QUOTE	{ $$ = sym_lookup($1, SYMBOL_CONST); free($1); }
 ;
 
+word_opt: /* empty */			{ $$ = NULL; }
+	| T_WORD
+
 %%
 
 void conf_parse(const char *name)
diff --git a/security/Makefile b/security/Makefile
index 9e8b025..7ef1107 100644
--- a/security/Makefile
+++ b/security/Makefile
@@ -18,3 +18,4 @@
 obj-$(CONFIG_SECURITY_SMACK)		+= commoncap.o smack/built-in.o
 obj-$(CONFIG_SECURITY_CAPABILITIES)	+= commoncap.o capability.o
 obj-$(CONFIG_SECURITY_ROOTPLUG)		+= commoncap.o root_plug.o
+obj-$(CONFIG_CGROUP_DEVICE)		+= device_cgroup.o
diff --git a/security/commoncap.c b/security/commoncap.c
index e8c3f5e..5edabc7 100644
--- a/security/commoncap.c
+++ b/security/commoncap.c
@@ -383,8 +383,8 @@
 		current->egid != current->gid);
 }
 
-int cap_inode_setxattr(struct dentry *dentry, char *name, void *value,
-		       size_t size, int flags)
+int cap_inode_setxattr(struct dentry *dentry, const char *name,
+		       const void *value, size_t size, int flags)
 {
 	if (!strcmp(name, XATTR_NAME_CAPS)) {
 		if (!capable(CAP_SETFCAP))
@@ -397,7 +397,7 @@
 	return 0;
 }
 
-int cap_inode_removexattr(struct dentry *dentry, char *name)
+int cap_inode_removexattr(struct dentry *dentry, const char *name)
 {
 	if (!strcmp(name, XATTR_NAME_CAPS)) {
 		if (!capable(CAP_SETFCAP))
diff --git a/security/device_cgroup.c b/security/device_cgroup.c
new file mode 100644
index 0000000..4ea5836
--- /dev/null
+++ b/security/device_cgroup.c
@@ -0,0 +1,575 @@
+/*
+ * dev_cgroup.c - device cgroup subsystem
+ *
+ * Copyright 2007 IBM Corp
+ */
+
+#include <linux/device_cgroup.h>
+#include <linux/cgroup.h>
+#include <linux/ctype.h>
+#include <linux/list.h>
+#include <linux/uaccess.h>
+#include <linux/seq_file.h>
+
+#define ACC_MKNOD 1
+#define ACC_READ  2
+#define ACC_WRITE 4
+#define ACC_MASK (ACC_MKNOD | ACC_READ | ACC_WRITE)
+
+#define DEV_BLOCK 1
+#define DEV_CHAR  2
+#define DEV_ALL   4  /* this represents all devices */
+
+/*
+ * whitelist locking rules:
+ * cgroup_lock() cannot be taken under dev_cgroup->lock.
+ * dev_cgroup->lock can be taken with or without cgroup_lock().
+ *
+ * modifications always require cgroup_lock
+ * modifications to a list which is visible require the
+ *   dev_cgroup->lock *and* cgroup_lock()
+ * walking the list requires dev_cgroup->lock or cgroup_lock().
+ *
+ * reasoning: dev_whitelist_copy() needs to kmalloc, so needs
+ *   a mutex, which the cgroup_lock() is.  Since modifying
+ *   a visible list requires both locks, either lock can be
+ *   taken for walking the list.
+ */
+
+struct dev_whitelist_item {
+	u32 major, minor;
+	short type;
+	short access;
+	struct list_head list;
+};
+
+struct dev_cgroup {
+	struct cgroup_subsys_state css;
+	struct list_head whitelist;
+	spinlock_t lock;
+};
+
+static inline struct dev_cgroup *cgroup_to_devcgroup(struct cgroup *cgroup)
+{
+	return container_of(cgroup_subsys_state(cgroup, devices_subsys_id),
+			    struct dev_cgroup, css);
+}
+
+struct cgroup_subsys devices_subsys;
+
+static int devcgroup_can_attach(struct cgroup_subsys *ss,
+		struct cgroup *new_cgroup, struct task_struct *task)
+{
+	if (current != task && !capable(CAP_SYS_ADMIN))
+			return -EPERM;
+
+	return 0;
+}
+
+/*
+ * called under cgroup_lock()
+ */
+static int dev_whitelist_copy(struct list_head *dest, struct list_head *orig)
+{
+	struct dev_whitelist_item *wh, *tmp, *new;
+
+	list_for_each_entry(wh, orig, list) {
+		new = kmalloc(sizeof(*wh), GFP_KERNEL);
+		if (!new)
+			goto free_and_exit;
+		new->major = wh->major;
+		new->minor = wh->minor;
+		new->type = wh->type;
+		new->access = wh->access;
+		list_add_tail(&new->list, dest);
+	}
+
+	return 0;
+
+free_and_exit:
+	list_for_each_entry_safe(wh, tmp, dest, list) {
+		list_del(&wh->list);
+		kfree(wh);
+	}
+	return -ENOMEM;
+}
+
+/* Stupid prototype - don't bother combining existing entries */
+/*
+ * called under cgroup_lock()
+ * since the list is visible to other tasks, we need the spinlock also
+ */
+static int dev_whitelist_add(struct dev_cgroup *dev_cgroup,
+			struct dev_whitelist_item *wh)
+{
+	struct dev_whitelist_item *whcopy;
+
+	whcopy = kmalloc(sizeof(*whcopy), GFP_KERNEL);
+	if (!whcopy)
+		return -ENOMEM;
+
+	memcpy(whcopy, wh, sizeof(*whcopy));
+	spin_lock(&dev_cgroup->lock);
+	list_add_tail(&whcopy->list, &dev_cgroup->whitelist);
+	spin_unlock(&dev_cgroup->lock);
+	return 0;
+}
+
+/*
+ * called under cgroup_lock()
+ * since the list is visible to other tasks, we need the spinlock also
+ */
+static void dev_whitelist_rm(struct dev_cgroup *dev_cgroup,
+			struct dev_whitelist_item *wh)
+{
+	struct dev_whitelist_item *walk, *tmp;
+
+	spin_lock(&dev_cgroup->lock);
+	list_for_each_entry_safe(walk, tmp, &dev_cgroup->whitelist, list) {
+		if (walk->type == DEV_ALL)
+			goto remove;
+		if (walk->type != wh->type)
+			continue;
+		if (walk->major != ~0 && walk->major != wh->major)
+			continue;
+		if (walk->minor != ~0 && walk->minor != wh->minor)
+			continue;
+
+remove:
+		walk->access &= ~wh->access;
+		if (!walk->access) {
+			list_del(&walk->list);
+			kfree(walk);
+		}
+	}
+	spin_unlock(&dev_cgroup->lock);
+}
+
+/*
+ * called from kernel/cgroup.c with cgroup_lock() held.
+ */
+static struct cgroup_subsys_state *devcgroup_create(struct cgroup_subsys *ss,
+						struct cgroup *cgroup)
+{
+	struct dev_cgroup *dev_cgroup, *parent_dev_cgroup;
+	struct cgroup *parent_cgroup;
+	int ret;
+
+	dev_cgroup = kzalloc(sizeof(*dev_cgroup), GFP_KERNEL);
+	if (!dev_cgroup)
+		return ERR_PTR(-ENOMEM);
+	INIT_LIST_HEAD(&dev_cgroup->whitelist);
+	parent_cgroup = cgroup->parent;
+
+	if (parent_cgroup == NULL) {
+		struct dev_whitelist_item *wh;
+		wh = kmalloc(sizeof(*wh), GFP_KERNEL);
+		if (!wh) {
+			kfree(dev_cgroup);
+			return ERR_PTR(-ENOMEM);
+		}
+		wh->minor = wh->major = ~0;
+		wh->type = DEV_ALL;
+		wh->access = ACC_MKNOD | ACC_READ | ACC_WRITE;
+		list_add(&wh->list, &dev_cgroup->whitelist);
+	} else {
+		parent_dev_cgroup = cgroup_to_devcgroup(parent_cgroup);
+		ret = dev_whitelist_copy(&dev_cgroup->whitelist,
+				&parent_dev_cgroup->whitelist);
+		if (ret) {
+			kfree(dev_cgroup);
+			return ERR_PTR(ret);
+		}
+	}
+
+	spin_lock_init(&dev_cgroup->lock);
+	return &dev_cgroup->css;
+}
+
+static void devcgroup_destroy(struct cgroup_subsys *ss,
+			struct cgroup *cgroup)
+{
+	struct dev_cgroup *dev_cgroup;
+	struct dev_whitelist_item *wh, *tmp;
+
+	dev_cgroup = cgroup_to_devcgroup(cgroup);
+	list_for_each_entry_safe(wh, tmp, &dev_cgroup->whitelist, list) {
+		list_del(&wh->list);
+		kfree(wh);
+	}
+	kfree(dev_cgroup);
+}
+
+#define DEVCG_ALLOW 1
+#define DEVCG_DENY 2
+#define DEVCG_LIST 3
+
+#define MAJMINLEN 10
+#define ACCLEN 4
+
+static void set_access(char *acc, short access)
+{
+	int idx = 0;
+	memset(acc, 0, ACCLEN);
+	if (access & ACC_READ)
+		acc[idx++] = 'r';
+	if (access & ACC_WRITE)
+		acc[idx++] = 'w';
+	if (access & ACC_MKNOD)
+		acc[idx++] = 'm';
+}
+
+static char type_to_char(short type)
+{
+	if (type == DEV_ALL)
+		return 'a';
+	if (type == DEV_CHAR)
+		return 'c';
+	if (type == DEV_BLOCK)
+		return 'b';
+	return 'X';
+}
+
+static void set_majmin(char *str, unsigned m)
+{
+	memset(str, 0, MAJMINLEN);
+	if (m == ~0)
+		sprintf(str, "*");
+	else
+		snprintf(str, MAJMINLEN, "%d", m);
+}
+
+static int devcgroup_seq_read(struct cgroup *cgroup, struct cftype *cft,
+				struct seq_file *m)
+{
+	struct dev_cgroup *devcgroup = cgroup_to_devcgroup(cgroup);
+	struct dev_whitelist_item *wh;
+	char maj[MAJMINLEN], min[MAJMINLEN], acc[ACCLEN];
+
+	spin_lock(&devcgroup->lock);
+	list_for_each_entry(wh, &devcgroup->whitelist, list) {
+		set_access(acc, wh->access);
+		set_majmin(maj, wh->major);
+		set_majmin(min, wh->minor);
+		seq_printf(m, "%c %s:%s %s\n", type_to_char(wh->type),
+			   maj, min, acc);
+	}
+	spin_unlock(&devcgroup->lock);
+
+	return 0;
+}
+
+/*
+ * may_access_whitelist:
+ * does the access granted to dev_cgroup c contain the access
+ * requested in whitelist item refwh.
+ * return 1 if yes, 0 if no.
+ * call with c->lock held
+ */
+static int may_access_whitelist(struct dev_cgroup *c,
+				       struct dev_whitelist_item *refwh)
+{
+	struct dev_whitelist_item *whitem;
+
+	list_for_each_entry(whitem, &c->whitelist, list) {
+		if (whitem->type & DEV_ALL)
+			return 1;
+		if ((refwh->type & DEV_BLOCK) && !(whitem->type & DEV_BLOCK))
+			continue;
+		if ((refwh->type & DEV_CHAR) && !(whitem->type & DEV_CHAR))
+			continue;
+		if (whitem->major != ~0 && whitem->major != refwh->major)
+			continue;
+		if (whitem->minor != ~0 && whitem->minor != refwh->minor)
+			continue;
+		if (refwh->access & (~(whitem->access | ACC_MASK)))
+			continue;
+		return 1;
+	}
+	return 0;
+}
+
+/*
+ * parent_has_perm:
+ * when adding a new allow rule to a device whitelist, the rule
+ * must be allowed in the parent device
+ */
+static int parent_has_perm(struct cgroup *childcg,
+				  struct dev_whitelist_item *wh)
+{
+	struct cgroup *pcg = childcg->parent;
+	struct dev_cgroup *parent;
+	int ret;
+
+	if (!pcg)
+		return 1;
+	parent = cgroup_to_devcgroup(pcg);
+	spin_lock(&parent->lock);
+	ret = may_access_whitelist(parent, wh);
+	spin_unlock(&parent->lock);
+	return ret;
+}
+
+/*
+ * Modify the whitelist using allow/deny rules.
+ * CAP_SYS_ADMIN is needed for this.  It's at least separate from CAP_MKNOD
+ * so we can give a container CAP_MKNOD to let it create devices but not
+ * modify the whitelist.
+ * It seems likely we'll want to add a CAP_CONTAINER capability to allow
+ * us to also grant CAP_SYS_ADMIN to containers without giving away the
+ * device whitelist controls, but for now we'll stick with CAP_SYS_ADMIN
+ *
+ * Taking rules away is always allowed (given CAP_SYS_ADMIN).  Granting
+ * new access is only allowed if you're in the top-level cgroup, or your
+ * parent cgroup has the access you're asking for.
+ */
+static ssize_t devcgroup_access_write(struct cgroup *cgroup, struct cftype *cft,
+				struct file *file, const char __user *userbuf,
+				size_t nbytes, loff_t *ppos)
+{
+	struct cgroup *cur_cgroup;
+	struct dev_cgroup *devcgroup, *cur_devcgroup;
+	int filetype = cft->private;
+	char *buffer, *b;
+	int retval = 0, count;
+	struct dev_whitelist_item wh;
+
+	if (!capable(CAP_SYS_ADMIN))
+		return -EPERM;
+
+	devcgroup = cgroup_to_devcgroup(cgroup);
+	cur_cgroup = task_cgroup(current, devices_subsys.subsys_id);
+	cur_devcgroup = cgroup_to_devcgroup(cur_cgroup);
+
+	buffer = kmalloc(nbytes+1, GFP_KERNEL);
+	if (!buffer)
+		return -ENOMEM;
+
+	if (copy_from_user(buffer, userbuf, nbytes)) {
+		retval = -EFAULT;
+		goto out1;
+	}
+	buffer[nbytes] = 0;	/* nul-terminate */
+
+	cgroup_lock();
+	if (cgroup_is_removed(cgroup)) {
+		retval = -ENODEV;
+		goto out2;
+	}
+
+	memset(&wh, 0, sizeof(wh));
+	b = buffer;
+
+	switch (*b) {
+	case 'a':
+		wh.type = DEV_ALL;
+		wh.access = ACC_MASK;
+		goto handle;
+	case 'b':
+		wh.type = DEV_BLOCK;
+		break;
+	case 'c':
+		wh.type = DEV_CHAR;
+		break;
+	default:
+		retval = -EINVAL;
+		goto out2;
+	}
+	b++;
+	if (!isspace(*b)) {
+		retval = -EINVAL;
+		goto out2;
+	}
+	b++;
+	if (*b == '*') {
+		wh.major = ~0;
+		b++;
+	} else if (isdigit(*b)) {
+		wh.major = 0;
+		while (isdigit(*b)) {
+			wh.major = wh.major*10+(*b-'0');
+			b++;
+		}
+	} else {
+		retval = -EINVAL;
+		goto out2;
+	}
+	if (*b != ':') {
+		retval = -EINVAL;
+		goto out2;
+	}
+	b++;
+
+	/* read minor */
+	if (*b == '*') {
+		wh.minor = ~0;
+		b++;
+	} else if (isdigit(*b)) {
+		wh.minor = 0;
+		while (isdigit(*b)) {
+			wh.minor = wh.minor*10+(*b-'0');
+			b++;
+		}
+	} else {
+		retval = -EINVAL;
+		goto out2;
+	}
+	if (!isspace(*b)) {
+		retval = -EINVAL;
+		goto out2;
+	}
+	for (b++, count = 0; count < 3; count++, b++) {
+		switch (*b) {
+		case 'r':
+			wh.access |= ACC_READ;
+			break;
+		case 'w':
+			wh.access |= ACC_WRITE;
+			break;
+		case 'm':
+			wh.access |= ACC_MKNOD;
+			break;
+		case '\n':
+		case '\0':
+			count = 3;
+			break;
+		default:
+			retval = -EINVAL;
+			goto out2;
+		}
+	}
+
+handle:
+	retval = 0;
+	switch (filetype) {
+	case DEVCG_ALLOW:
+		if (!parent_has_perm(cgroup, &wh))
+			retval = -EPERM;
+		else
+			retval = dev_whitelist_add(devcgroup, &wh);
+		break;
+	case DEVCG_DENY:
+		dev_whitelist_rm(devcgroup, &wh);
+		break;
+	default:
+		retval = -EINVAL;
+		goto out2;
+	}
+
+	if (retval == 0)
+		retval = nbytes;
+
+out2:
+	cgroup_unlock();
+out1:
+	kfree(buffer);
+	return retval;
+}
+
+static struct cftype dev_cgroup_files[] = {
+	{
+		.name = "allow",
+		.write  = devcgroup_access_write,
+		.private = DEVCG_ALLOW,
+	},
+	{
+		.name = "deny",
+		.write = devcgroup_access_write,
+		.private = DEVCG_DENY,
+	},
+	{
+		.name = "list",
+		.read_seq_string = devcgroup_seq_read,
+		.private = DEVCG_LIST,
+	},
+};
+
+static int devcgroup_populate(struct cgroup_subsys *ss,
+				struct cgroup *cgroup)
+{
+	return cgroup_add_files(cgroup, ss, dev_cgroup_files,
+					ARRAY_SIZE(dev_cgroup_files));
+}
+
+struct cgroup_subsys devices_subsys = {
+	.name = "devices",
+	.can_attach = devcgroup_can_attach,
+	.create = devcgroup_create,
+	.destroy  = devcgroup_destroy,
+	.populate = devcgroup_populate,
+	.subsys_id = devices_subsys_id,
+};
+
+int devcgroup_inode_permission(struct inode *inode, int mask)
+{
+	struct cgroup *cgroup;
+	struct dev_cgroup *dev_cgroup;
+	struct dev_whitelist_item *wh;
+
+	dev_t device = inode->i_rdev;
+	if (!device)
+		return 0;
+	if (!S_ISBLK(inode->i_mode) && !S_ISCHR(inode->i_mode))
+		return 0;
+	cgroup = task_cgroup(current, devices_subsys.subsys_id);
+	dev_cgroup = cgroup_to_devcgroup(cgroup);
+	if (!dev_cgroup)
+		return 0;
+
+	spin_lock(&dev_cgroup->lock);
+	list_for_each_entry(wh, &dev_cgroup->whitelist, list) {
+		if (wh->type & DEV_ALL)
+			goto acc_check;
+		if ((wh->type & DEV_BLOCK) && !S_ISBLK(inode->i_mode))
+			continue;
+		if ((wh->type & DEV_CHAR) && !S_ISCHR(inode->i_mode))
+			continue;
+		if (wh->major != ~0 && wh->major != imajor(inode))
+			continue;
+		if (wh->minor != ~0 && wh->minor != iminor(inode))
+			continue;
+acc_check:
+		if ((mask & MAY_WRITE) && !(wh->access & ACC_WRITE))
+			continue;
+		if ((mask & MAY_READ) && !(wh->access & ACC_READ))
+			continue;
+		spin_unlock(&dev_cgroup->lock);
+		return 0;
+	}
+	spin_unlock(&dev_cgroup->lock);
+
+	return -EPERM;
+}
+
+int devcgroup_inode_mknod(int mode, dev_t dev)
+{
+	struct cgroup *cgroup;
+	struct dev_cgroup *dev_cgroup;
+	struct dev_whitelist_item *wh;
+
+	cgroup = task_cgroup(current, devices_subsys.subsys_id);
+	dev_cgroup = cgroup_to_devcgroup(cgroup);
+	if (!dev_cgroup)
+		return 0;
+
+	spin_lock(&dev_cgroup->lock);
+	list_for_each_entry(wh, &dev_cgroup->whitelist, list) {
+		if (wh->type & DEV_ALL)
+			goto acc_check;
+		if ((wh->type & DEV_BLOCK) && !S_ISBLK(mode))
+			continue;
+		if ((wh->type & DEV_CHAR) && !S_ISCHR(mode))
+			continue;
+		if (wh->major != ~0 && wh->major != MAJOR(dev))
+			continue;
+		if (wh->minor != ~0 && wh->minor != MINOR(dev))
+			continue;
+acc_check:
+		if (!(wh->access & ACC_MKNOD))
+			continue;
+		spin_unlock(&dev_cgroup->lock);
+		return 0;
+	}
+	spin_unlock(&dev_cgroup->lock);
+	return -EPERM;
+}
diff --git a/security/dummy.c b/security/dummy.c
index 58d4dd1..48cf302 100644
--- a/security/dummy.c
+++ b/security/dummy.c
@@ -365,8 +365,8 @@
 	return;
 }
 
-static int dummy_inode_setxattr (struct dentry *dentry, char *name, void *value,
-				size_t size, int flags)
+static int dummy_inode_setxattr (struct dentry *dentry, const char *name,
+				 const void *value, size_t size, int flags)
 {
 	if (!strncmp(name, XATTR_SECURITY_PREFIX,
 		     sizeof(XATTR_SECURITY_PREFIX) - 1) &&
@@ -375,12 +375,13 @@
 	return 0;
 }
 
-static void dummy_inode_post_setxattr (struct dentry *dentry, char *name, void *value,
-				       size_t size, int flags)
+static void dummy_inode_post_setxattr (struct dentry *dentry, const char *name,
+				       const void *value, size_t size,
+				       int flags)
 {
 }
 
-static int dummy_inode_getxattr (struct dentry *dentry, char *name)
+static int dummy_inode_getxattr (struct dentry *dentry, const char *name)
 {
 	return 0;
 }
@@ -390,7 +391,7 @@
 	return 0;
 }
 
-static int dummy_inode_removexattr (struct dentry *dentry, char *name)
+static int dummy_inode_removexattr (struct dentry *dentry, const char *name)
 {
 	if (!strncmp(name, XATTR_SECURITY_PREFIX,
 		     sizeof(XATTR_SECURITY_PREFIX) - 1) &&
@@ -993,6 +994,13 @@
 {
 	return 0;
 }
+
+static int dummy_key_getsecurity(struct key *key, char **_buffer)
+{
+	*_buffer = NULL;
+	return 0;
+}
+
 #endif /* CONFIG_KEYS */
 
 #ifdef CONFIG_AUDIT
@@ -1209,6 +1217,7 @@
 	set_to_dummy_if_null(ops, key_alloc);
 	set_to_dummy_if_null(ops, key_free);
 	set_to_dummy_if_null(ops, key_permission);
+	set_to_dummy_if_null(ops, key_getsecurity);
 #endif	/* CONFIG_KEYS */
 #ifdef CONFIG_AUDIT
 	set_to_dummy_if_null(ops, audit_rule_init);
diff --git a/security/keys/Makefile b/security/keys/Makefile
index 5145adf..747a464 100644
--- a/security/keys/Makefile
+++ b/security/keys/Makefile
@@ -14,3 +14,4 @@
 
 obj-$(CONFIG_KEYS_COMPAT) += compat.o
 obj-$(CONFIG_PROC_FS) += proc.o
+obj-$(CONFIG_SYSCTL) += sysctl.o
diff --git a/security/keys/compat.c b/security/keys/compat.c
index e10ec99..c766c68 100644
--- a/security/keys/compat.c
+++ b/security/keys/compat.c
@@ -79,6 +79,9 @@
 	case KEYCTL_ASSUME_AUTHORITY:
 		return keyctl_assume_authority(arg2);
 
+	case KEYCTL_GET_SECURITY:
+		return keyctl_get_security(arg2, compat_ptr(arg3), arg4);
+
 	default:
 		return -EOPNOTSUPP;
 	}
diff --git a/security/keys/internal.h b/security/keys/internal.h
index 7d894ef..8c05587 100644
--- a/security/keys/internal.h
+++ b/security/keys/internal.h
@@ -57,10 +57,6 @@
 	int			qnbytes;	/* number of bytes allocated to this user */
 };
 
-#define KEYQUOTA_MAX_KEYS	100
-#define KEYQUOTA_MAX_BYTES	10000
-#define KEYQUOTA_LINK_BYTES	4		/* a link in a keyring is worth 4 bytes */
-
 extern struct rb_root	key_user_tree;
 extern spinlock_t	key_user_lock;
 extern struct key_user	root_key_user;
@@ -68,6 +64,16 @@
 extern struct key_user *key_user_lookup(uid_t uid);
 extern void key_user_put(struct key_user *user);
 
+/*
+ * key quota limits
+ * - root has its own separate limits to everyone else
+ */
+extern unsigned key_quota_root_maxkeys;
+extern unsigned key_quota_root_maxbytes;
+extern unsigned key_quota_maxkeys;
+extern unsigned key_quota_maxbytes;
+
+#define KEYQUOTA_LINK_BYTES	4		/* a link in a keyring is worth 4 bytes */
 
 
 extern struct rb_root key_serial_tree;
@@ -77,8 +83,6 @@
 extern wait_queue_head_t request_key_conswq;
 
 
-extern void keyring_publish_name(struct key *keyring);
-
 extern int __key_link(struct key *keyring, struct key *key);
 
 extern key_ref_t __keyring_search_one(key_ref_t keyring_ref,
@@ -102,14 +106,15 @@
 					 key_match_func_t match,
 					 struct task_struct *tsk);
 
-extern struct key *find_keyring_by_name(const char *name, key_serial_t bound);
+extern struct key *find_keyring_by_name(const char *name, bool skip_perm_check);
 
 extern int install_thread_keyring(struct task_struct *tsk);
 extern int install_process_keyring(struct task_struct *tsk);
 
 extern struct key *request_key_and_link(struct key_type *type,
 					const char *description,
-					const char *callout_info,
+					const void *callout_info,
+					size_t callout_len,
 					void *aux,
 					struct key *dest_keyring,
 					unsigned long flags);
@@ -120,13 +125,15 @@
 struct request_key_auth {
 	struct key		*target_key;
 	struct task_struct	*context;
-	char			*callout_info;
+	void			*callout_info;
+	size_t			callout_len;
 	pid_t			pid;
 };
 
 extern struct key_type key_type_request_key_auth;
 extern struct key *request_key_auth_new(struct key *target,
-					const char *callout_info);
+					const void *callout_info,
+					size_t callout_len);
 
 extern struct key *key_get_instantiation_authkey(key_serial_t target_id);
 
@@ -152,7 +159,8 @@
 extern long keyctl_set_reqkey_keyring(int);
 extern long keyctl_set_timeout(key_serial_t, unsigned);
 extern long keyctl_assume_authority(key_serial_t);
-
+extern long keyctl_get_security(key_serial_t keyid, char __user *buffer,
+				size_t buflen);
 
 /*
  * debugging key validation
diff --git a/security/keys/key.c b/security/keys/key.c
index 654d23b..14948cf 100644
--- a/security/keys/key.c
+++ b/security/keys/key.c
@@ -1,6 +1,6 @@
 /* Basic authentication token and access key management
  *
- * Copyright (C) 2004-2007 Red Hat, Inc. All Rights Reserved.
+ * Copyright (C) 2004-2008 Red Hat, Inc. All Rights Reserved.
  * Written by David Howells (dhowells@redhat.com)
  *
  * This program is free software; you can redistribute it and/or
@@ -27,6 +27,11 @@
 struct rb_root	key_user_tree; /* tree of quota records indexed by UID */
 DEFINE_SPINLOCK(key_user_lock);
 
+unsigned int key_quota_root_maxkeys = 200;	/* root's key count quota */
+unsigned int key_quota_root_maxbytes = 20000;	/* root's key space quota */
+unsigned int key_quota_maxkeys = 200;		/* general key count quota */
+unsigned int key_quota_maxbytes = 20000;	/* general key space quota */
+
 static LIST_HEAD(key_types_list);
 static DECLARE_RWSEM(key_types_sem);
 
@@ -139,36 +144,6 @@
 
 /*****************************************************************************/
 /*
- * insert a key with a fixed serial number
- */
-static void __init __key_insert_serial(struct key *key)
-{
-	struct rb_node *parent, **p;
-	struct key *xkey;
-
-	parent = NULL;
-	p = &key_serial_tree.rb_node;
-
-	while (*p) {
-		parent = *p;
-		xkey = rb_entry(parent, struct key, serial_node);
-
-		if (key->serial < xkey->serial)
-			p = &(*p)->rb_left;
-		else if (key->serial > xkey->serial)
-			p = &(*p)->rb_right;
-		else
-			BUG();
-	}
-
-	/* we've found a suitable hole - arrange for this key to occupy it */
-	rb_link_node(&key->serial_node, parent, p);
-	rb_insert_color(&key->serial_node, &key_serial_tree);
-
-} /* end __key_insert_serial() */
-
-/*****************************************************************************/
-/*
  * assign a key the next unique serial number
  * - these are assigned randomly to avoid security issues through covert
  *   channel problems
@@ -266,11 +241,16 @@
 	/* check that the user's quota permits allocation of another key and
 	 * its description */
 	if (!(flags & KEY_ALLOC_NOT_IN_QUOTA)) {
+		unsigned maxkeys = (uid == 0) ?
+			key_quota_root_maxkeys : key_quota_maxkeys;
+		unsigned maxbytes = (uid == 0) ?
+			key_quota_root_maxbytes : key_quota_maxbytes;
+
 		spin_lock(&user->lock);
 		if (!(flags & KEY_ALLOC_QUOTA_OVERRUN)) {
-			if (user->qnkeys + 1 >= KEYQUOTA_MAX_KEYS ||
-			    user->qnbytes + quotalen >= KEYQUOTA_MAX_BYTES
-			    )
+			if (user->qnkeys + 1 >= maxkeys ||
+			    user->qnbytes + quotalen >= maxbytes ||
+			    user->qnbytes + quotalen < user->qnbytes)
 				goto no_quota;
 		}
 
@@ -375,11 +355,14 @@
 
 	/* contemplate the quota adjustment */
 	if (delta != 0 && test_bit(KEY_FLAG_IN_QUOTA, &key->flags)) {
+		unsigned maxbytes = (key->user->uid == 0) ?
+			key_quota_root_maxbytes : key_quota_maxbytes;
+
 		spin_lock(&key->user->lock);
 
 		if (delta > 0 &&
-		    key->user->qnbytes + delta > KEYQUOTA_MAX_BYTES
-		    ) {
+		    (key->user->qnbytes + delta >= maxbytes ||
+		     key->user->qnbytes + delta < key->user->qnbytes)) {
 			ret = -EDQUOT;
 		}
 		else {
@@ -757,11 +740,11 @@
 			       const char *description,
 			       const void *payload,
 			       size_t plen,
+			       key_perm_t perm,
 			       unsigned long flags)
 {
 	struct key_type *ktype;
 	struct key *keyring, *key = NULL;
-	key_perm_t perm;
 	key_ref_t key_ref;
 	int ret;
 
@@ -806,15 +789,17 @@
 			goto found_matching_key;
 	}
 
-	/* decide on the permissions we want */
-	perm = KEY_POS_VIEW | KEY_POS_SEARCH | KEY_POS_LINK | KEY_POS_SETATTR;
-	perm |= KEY_USR_VIEW | KEY_USR_SEARCH | KEY_USR_LINK | KEY_USR_SETATTR;
+	/* if the client doesn't provide, decide on the permissions we want */
+	if (perm == KEY_PERM_UNDEF) {
+		perm = KEY_POS_VIEW | KEY_POS_SEARCH | KEY_POS_LINK | KEY_POS_SETATTR;
+		perm |= KEY_USR_VIEW | KEY_USR_SEARCH | KEY_USR_LINK | KEY_USR_SETATTR;
 
-	if (ktype->read)
-		perm |= KEY_POS_READ | KEY_USR_READ;
+		if (ktype->read)
+			perm |= KEY_POS_READ | KEY_USR_READ;
 
-	if (ktype == &key_type_keyring || ktype->update)
-		perm |= KEY_USR_WRITE;
+		if (ktype == &key_type_keyring || ktype->update)
+			perm |= KEY_USR_WRITE;
+	}
 
 	/* allocate a new key */
 	key = key_alloc(ktype, description, current->fsuid, current->fsgid,
@@ -1018,17 +1003,4 @@
 	rb_insert_color(&root_key_user.node,
 			&key_user_tree);
 
-	/* record root's user standard keyrings */
-	key_check(&root_user_keyring);
-	key_check(&root_session_keyring);
-
-	__key_insert_serial(&root_user_keyring);
-	__key_insert_serial(&root_session_keyring);
-
-	keyring_publish_name(&root_user_keyring);
-	keyring_publish_name(&root_session_keyring);
-
-	/* link the two root keyrings together */
-	key_link(&root_session_keyring, &root_user_keyring);
-
 } /* end key_init() */
diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c
index d9ca15c..acc9c89 100644
--- a/security/keys/keyctl.c
+++ b/security/keys/keyctl.c
@@ -19,6 +19,8 @@
 #include <linux/capability.h>
 #include <linux/string.h>
 #include <linux/err.h>
+#include <linux/vmalloc.h>
+#include <linux/security.h>
 #include <asm/uaccess.h>
 #include "internal.h"
 
@@ -62,9 +64,10 @@
 	char type[32], *description;
 	void *payload;
 	long ret;
+	bool vm;
 
 	ret = -EINVAL;
-	if (plen > 32767)
+	if (plen > 1024 * 1024 - 1)
 		goto error;
 
 	/* draw all the data into kernel space */
@@ -81,11 +84,18 @@
 	/* pull the payload in if one was supplied */
 	payload = NULL;
 
+	vm = false;
 	if (_payload) {
 		ret = -ENOMEM;
 		payload = kmalloc(plen, GFP_KERNEL);
-		if (!payload)
-			goto error2;
+		if (!payload) {
+			if (plen <= PAGE_SIZE)
+				goto error2;
+			vm = true;
+			payload = vmalloc(plen);
+			if (!payload)
+				goto error2;
+		}
 
 		ret = -EFAULT;
 		if (copy_from_user(payload, _payload, plen) != 0)
@@ -102,7 +112,8 @@
 	/* create or update the requested key and add it to the target
 	 * keyring */
 	key_ref = key_create_or_update(keyring_ref, type, description,
-				       payload, plen, KEY_ALLOC_IN_QUOTA);
+				       payload, plen, KEY_PERM_UNDEF,
+				       KEY_ALLOC_IN_QUOTA);
 	if (!IS_ERR(key_ref)) {
 		ret = key_ref_to_ptr(key_ref)->serial;
 		key_ref_put(key_ref);
@@ -113,7 +124,10 @@
 
 	key_ref_put(keyring_ref);
  error3:
-	kfree(payload);
+	if (!vm)
+		kfree(payload);
+	else
+		vfree(payload);
  error2:
 	kfree(description);
  error:
@@ -140,6 +154,7 @@
 	struct key_type *ktype;
 	struct key *key;
 	key_ref_t dest_ref;
+	size_t callout_len;
 	char type[32], *description, *callout_info;
 	long ret;
 
@@ -157,12 +172,14 @@
 
 	/* pull the callout info into kernel space */
 	callout_info = NULL;
+	callout_len = 0;
 	if (_callout_info) {
 		callout_info = strndup_user(_callout_info, PAGE_SIZE);
 		if (IS_ERR(callout_info)) {
 			ret = PTR_ERR(callout_info);
 			goto error2;
 		}
+		callout_len = strlen(callout_info);
 	}
 
 	/* get the destination keyring if specified */
@@ -183,8 +200,8 @@
 	}
 
 	/* do the search */
-	key = request_key_and_link(ktype, description, callout_info, NULL,
-				   key_ref_to_ptr(dest_ref),
+	key = request_key_and_link(ktype, description, callout_info,
+				   callout_len, NULL, key_ref_to_ptr(dest_ref),
 				   KEY_ALLOC_IN_QUOTA);
 	if (IS_ERR(key)) {
 		ret = PTR_ERR(key);
@@ -714,10 +731,16 @@
 
 		/* transfer the quota burden to the new user */
 		if (test_bit(KEY_FLAG_IN_QUOTA, &key->flags)) {
+			unsigned maxkeys = (uid == 0) ?
+				key_quota_root_maxkeys : key_quota_maxkeys;
+			unsigned maxbytes = (uid == 0) ?
+				key_quota_root_maxbytes : key_quota_maxbytes;
+
 			spin_lock(&newowner->lock);
-			if (newowner->qnkeys + 1 >= KEYQUOTA_MAX_KEYS ||
-			    newowner->qnbytes + key->quotalen >=
-			    KEYQUOTA_MAX_BYTES)
+			if (newowner->qnkeys + 1 >= maxkeys ||
+			    newowner->qnbytes + key->quotalen >= maxbytes ||
+			    newowner->qnbytes + key->quotalen <
+			    newowner->qnbytes)
 				goto quota_overrun;
 
 			newowner->qnkeys++;
@@ -821,9 +844,10 @@
 	key_ref_t keyring_ref;
 	void *payload;
 	long ret;
+	bool vm = false;
 
 	ret = -EINVAL;
-	if (plen > 32767)
+	if (plen > 1024 * 1024 - 1)
 		goto error;
 
 	/* the appropriate instantiation authorisation key must have been
@@ -843,8 +867,14 @@
 	if (_payload) {
 		ret = -ENOMEM;
 		payload = kmalloc(plen, GFP_KERNEL);
-		if (!payload)
-			goto error;
+		if (!payload) {
+			if (plen <= PAGE_SIZE)
+				goto error;
+			vm = true;
+			payload = vmalloc(plen);
+			if (!payload)
+				goto error;
+		}
 
 		ret = -EFAULT;
 		if (copy_from_user(payload, _payload, plen) != 0)
@@ -877,7 +907,10 @@
 	}
 
 error2:
-	kfree(payload);
+	if (!vm)
+		kfree(payload);
+	else
+		vfree(payload);
 error:
 	return ret;
 
@@ -1055,6 +1088,66 @@
 
 } /* end keyctl_assume_authority() */
 
+/*
+ * get the security label of a key
+ * - the key must grant us view permission
+ * - if there's a buffer, we place up to buflen bytes of data into it
+ * - unless there's an error, we return the amount of information available,
+ *   irrespective of how much we may have copied (including the terminal NUL)
+ * - implements keyctl(KEYCTL_GET_SECURITY)
+ */
+long keyctl_get_security(key_serial_t keyid,
+			 char __user *buffer,
+			 size_t buflen)
+{
+	struct key *key, *instkey;
+	key_ref_t key_ref;
+	char *context;
+	long ret;
+
+	key_ref = lookup_user_key(NULL, keyid, 0, 1, KEY_VIEW);
+	if (IS_ERR(key_ref)) {
+		if (PTR_ERR(key_ref) != -EACCES)
+			return PTR_ERR(key_ref);
+
+		/* viewing a key under construction is also permitted if we
+		 * have the authorisation token handy */
+		instkey = key_get_instantiation_authkey(keyid);
+		if (IS_ERR(instkey))
+			return PTR_ERR(key_ref);
+		key_put(instkey);
+
+		key_ref = lookup_user_key(NULL, keyid, 0, 1, 0);
+		if (IS_ERR(key_ref))
+			return PTR_ERR(key_ref);
+	}
+
+	key = key_ref_to_ptr(key_ref);
+	ret = security_key_getsecurity(key, &context);
+	if (ret == 0) {
+		/* if no information was returned, give userspace an empty
+		 * string */
+		ret = 1;
+		if (buffer && buflen > 0 &&
+		    copy_to_user(buffer, "", 1) != 0)
+			ret = -EFAULT;
+	} else if (ret > 0) {
+		/* return as much data as there's room for */
+		if (buffer && buflen > 0) {
+			if (buflen > ret)
+				buflen = ret;
+
+			if (copy_to_user(buffer, context, buflen) != 0)
+				ret = -EFAULT;
+		}
+
+		kfree(context);
+	}
+
+	key_ref_put(key_ref);
+	return ret;
+}
+
 /*****************************************************************************/
 /*
  * the key control system call
@@ -1135,6 +1228,11 @@
 	case KEYCTL_ASSUME_AUTHORITY:
 		return keyctl_assume_authority((key_serial_t) arg2);
 
+	case KEYCTL_GET_SECURITY:
+		return keyctl_get_security((key_serial_t) arg2,
+					   (char *) arg3,
+					   (size_t) arg4);
+
 	default:
 		return -EOPNOTSUPP;
 	}
diff --git a/security/keys/keyring.c b/security/keys/keyring.c
index 88292e3..a9ab8af 100644
--- a/security/keys/keyring.c
+++ b/security/keys/keyring.c
@@ -1,6 +1,6 @@
-/* keyring.c: keyring handling
+/* Keyring handling
  *
- * Copyright (C) 2004-5 Red Hat, Inc. All Rights Reserved.
+ * Copyright (C) 2004-2005, 2008 Red Hat, Inc. All Rights Reserved.
  * Written by David Howells (dhowells@redhat.com)
  *
  * This program is free software; you can redistribute it and/or
@@ -79,7 +79,7 @@
  * publish the name of a keyring so that it can be found by name (if it has
  * one)
  */
-void keyring_publish_name(struct key *keyring)
+static void keyring_publish_name(struct key *keyring)
 {
 	int bucket;
 
@@ -292,7 +292,7 @@
 
 	struct keyring_list *keylist;
 	struct timespec now;
-	unsigned long possessed;
+	unsigned long possessed, kflags;
 	struct key *keyring, *key;
 	key_ref_t key_ref;
 	long err;
@@ -319,6 +319,32 @@
 	err = -EAGAIN;
 	sp = 0;
 
+	/* firstly we should check to see if this top-level keyring is what we
+	 * are looking for */
+	key_ref = ERR_PTR(-EAGAIN);
+	kflags = keyring->flags;
+	if (keyring->type == type && match(keyring, description)) {
+		key = keyring;
+
+		/* check it isn't negative and hasn't expired or been
+		 * revoked */
+		if (kflags & (1 << KEY_FLAG_REVOKED))
+			goto error_2;
+		if (key->expiry && now.tv_sec >= key->expiry)
+			goto error_2;
+		key_ref = ERR_PTR(-ENOKEY);
+		if (kflags & (1 << KEY_FLAG_NEGATIVE))
+			goto error_2;
+		goto found;
+	}
+
+	/* otherwise, the top keyring must not be revoked, expired, or
+	 * negatively instantiated if we are to search it */
+	key_ref = ERR_PTR(-EAGAIN);
+	if (kflags & ((1 << KEY_FLAG_REVOKED) | (1 << KEY_FLAG_NEGATIVE)) ||
+	    (keyring->expiry && now.tv_sec >= keyring->expiry))
+		goto error_2;
+
 	/* start processing a new keyring */
 descend:
 	if (test_bit(KEY_FLAG_REVOKED, &keyring->flags))
@@ -331,13 +357,14 @@
 	/* iterate through the keys in this keyring first */
 	for (kix = 0; kix < keylist->nkeys; kix++) {
 		key = keylist->keys[kix];
+		kflags = key->flags;
 
 		/* ignore keys not of this type */
 		if (key->type != type)
 			continue;
 
 		/* skip revoked keys and expired keys */
-		if (test_bit(KEY_FLAG_REVOKED, &key->flags))
+		if (kflags & (1 << KEY_FLAG_REVOKED))
 			continue;
 
 		if (key->expiry && now.tv_sec >= key->expiry)
@@ -352,8 +379,8 @@
 					context, KEY_SEARCH) < 0)
 			continue;
 
-		/* we set a different error code if we find a negative key */
-		if (test_bit(KEY_FLAG_NEGATIVE, &key->flags)) {
+		/* we set a different error code if we pass a negative key */
+		if (kflags & (1 << KEY_FLAG_NEGATIVE)) {
 			err = -ENOKEY;
 			continue;
 		}
@@ -489,10 +516,9 @@
 /*
  * find a keyring with the specified name
  * - all named keyrings are searched
- * - only find keyrings with search permission for the process
- * - only find keyrings with a serial number greater than the one specified
+ * - normally only finds keyrings with search permission for the current process
  */
-struct key *find_keyring_by_name(const char *name, key_serial_t bound)
+struct key *find_keyring_by_name(const char *name, bool skip_perm_check)
 {
 	struct key *keyring;
 	int bucket;
@@ -518,15 +544,11 @@
 			if (strcmp(keyring->description, name) != 0)
 				continue;
 
-			if (key_permission(make_key_ref(keyring, 0),
+			if (!skip_perm_check &&
+			    key_permission(make_key_ref(keyring, 0),
 					   KEY_SEARCH) < 0)
 				continue;
 
-			/* found a potential candidate, but we still need to
-			 * check the serial number */
-			if (keyring->serial <= bound)
-				continue;
-
 			/* we've got a match */
 			atomic_inc(&keyring->usage);
 			read_unlock(&keyring_name_lock);
diff --git a/security/keys/proc.c b/security/keys/proc.c
index 6941260..f619170 100644
--- a/security/keys/proc.c
+++ b/security/keys/proc.c
@@ -70,19 +70,15 @@
 	struct proc_dir_entry *p;
 
 #ifdef CONFIG_KEYS_DEBUG_PROC_KEYS
-	p = create_proc_entry("keys", 0, NULL);
+	p = proc_create("keys", 0, NULL, &proc_keys_fops);
 	if (!p)
 		panic("Cannot create /proc/keys\n");
-
-	p->proc_fops = &proc_keys_fops;
 #endif
 
-	p = create_proc_entry("key-users", 0, NULL);
+	p = proc_create("key-users", 0, NULL, &proc_key_users_fops);
 	if (!p)
 		panic("Cannot create /proc/key-users\n");
 
-	p->proc_fops = &proc_key_users_fops;
-
 	return 0;
 
 } /* end key_proc_init() */
@@ -246,6 +242,10 @@
 {
 	struct rb_node *_p = v;
 	struct key_user *user = rb_entry(_p, struct key_user, node);
+	unsigned maxkeys = (user->uid == 0) ?
+		key_quota_root_maxkeys : key_quota_maxkeys;
+	unsigned maxbytes = (user->uid == 0) ?
+		key_quota_root_maxbytes : key_quota_maxbytes;
 
 	seq_printf(m, "%5u: %5d %d/%d %d/%d %d/%d\n",
 		   user->uid,
@@ -253,10 +253,9 @@
 		   atomic_read(&user->nkeys),
 		   atomic_read(&user->nikeys),
 		   user->qnkeys,
-		   KEYQUOTA_MAX_KEYS,
+		   maxkeys,
 		   user->qnbytes,
-		   KEYQUOTA_MAX_BYTES
-		   );
+		   maxbytes);
 
 	return 0;
 
diff --git a/security/keys/process_keys.c b/security/keys/process_keys.c
index c886a2b..5be6d01 100644
--- a/security/keys/process_keys.c
+++ b/security/keys/process_keys.c
@@ -1,6 +1,6 @@
-/* process_keys.c: management of a process's keyrings
+/* Management of a process's keyrings
  *
- * Copyright (C) 2004-5 Red Hat, Inc. All Rights Reserved.
+ * Copyright (C) 2004-2005, 2008 Red Hat, Inc. All Rights Reserved.
  * Written by David Howells (dhowells@redhat.com)
  *
  * This program is free software; you can redistribute it and/or
@@ -23,6 +23,9 @@
 /* session keyring create vs join semaphore */
 static DEFINE_MUTEX(key_session_mutex);
 
+/* user keyring creation semaphore */
+static DEFINE_MUTEX(key_user_keyring_mutex);
+
 /* the root user's tracking struct */
 struct key_user root_key_user = {
 	.usage		= ATOMIC_INIT(3),
@@ -33,78 +36,84 @@
 	.uid		= 0,
 };
 
-/* the root user's UID keyring */
-struct key root_user_keyring = {
-	.usage		= ATOMIC_INIT(1),
-	.serial		= 2,
-	.type		= &key_type_keyring,
-	.user		= &root_key_user,
-	.sem		= __RWSEM_INITIALIZER(root_user_keyring.sem),
-	.perm		= (KEY_POS_ALL & ~KEY_POS_SETATTR) | KEY_USR_ALL,
-	.flags		= 1 << KEY_FLAG_INSTANTIATED,
-	.description	= "_uid.0",
-#ifdef KEY_DEBUGGING
-	.magic		= KEY_DEBUG_MAGIC,
-#endif
-};
-
-/* the root user's default session keyring */
-struct key root_session_keyring = {
-	.usage		= ATOMIC_INIT(1),
-	.serial		= 1,
-	.type		= &key_type_keyring,
-	.user		= &root_key_user,
-	.sem		= __RWSEM_INITIALIZER(root_session_keyring.sem),
-	.perm		= (KEY_POS_ALL & ~KEY_POS_SETATTR) | KEY_USR_ALL,
-	.flags		= 1 << KEY_FLAG_INSTANTIATED,
-	.description	= "_uid_ses.0",
-#ifdef KEY_DEBUGGING
-	.magic		= KEY_DEBUG_MAGIC,
-#endif
-};
-
 /*****************************************************************************/
 /*
- * allocate the keyrings to be associated with a UID
+ * install user and user session keyrings for a particular UID
  */
-int alloc_uid_keyring(struct user_struct *user,
-		      struct task_struct *ctx)
+static int install_user_keyrings(struct task_struct *tsk)
 {
+	struct user_struct *user = tsk->user;
 	struct key *uid_keyring, *session_keyring;
 	char buf[20];
 	int ret;
 
-	/* concoct a default session keyring */
-	sprintf(buf, "_uid_ses.%u", user->uid);
+	kenter("%p{%u}", user, user->uid);
 
-	session_keyring = keyring_alloc(buf, user->uid, (gid_t) -1, ctx,
-					KEY_ALLOC_IN_QUOTA, NULL);
-	if (IS_ERR(session_keyring)) {
-		ret = PTR_ERR(session_keyring);
-		goto error;
+	if (user->uid_keyring) {
+		kleave(" = 0 [exist]");
+		return 0;
 	}
 
-	/* and a UID specific keyring, pointed to by the default session
-	 * keyring */
-	sprintf(buf, "_uid.%u", user->uid);
-
-	uid_keyring = keyring_alloc(buf, user->uid, (gid_t) -1, ctx,
-				    KEY_ALLOC_IN_QUOTA, session_keyring);
-	if (IS_ERR(uid_keyring)) {
-		key_put(session_keyring);
-		ret = PTR_ERR(uid_keyring);
-		goto error;
-	}
-
-	/* install the keyrings */
-	user->uid_keyring = uid_keyring;
-	user->session_keyring = session_keyring;
+	mutex_lock(&key_user_keyring_mutex);
 	ret = 0;
 
-error:
-	return ret;
+	if (!user->uid_keyring) {
+		/* get the UID-specific keyring
+		 * - there may be one in existence already as it may have been
+		 *   pinned by a session, but the user_struct pointing to it
+		 *   may have been destroyed by setuid */
+		sprintf(buf, "_uid.%u", user->uid);
 
-} /* end alloc_uid_keyring() */
+		uid_keyring = find_keyring_by_name(buf, true);
+		if (IS_ERR(uid_keyring)) {
+			uid_keyring = keyring_alloc(buf, user->uid, (gid_t) -1,
+						    tsk, KEY_ALLOC_IN_QUOTA,
+						    NULL);
+			if (IS_ERR(uid_keyring)) {
+				ret = PTR_ERR(uid_keyring);
+				goto error;
+			}
+		}
+
+		/* get a default session keyring (which might also exist
+		 * already) */
+		sprintf(buf, "_uid_ses.%u", user->uid);
+
+		session_keyring = find_keyring_by_name(buf, true);
+		if (IS_ERR(session_keyring)) {
+			session_keyring =
+				keyring_alloc(buf, user->uid, (gid_t) -1,
+					      tsk, KEY_ALLOC_IN_QUOTA, NULL);
+			if (IS_ERR(session_keyring)) {
+				ret = PTR_ERR(session_keyring);
+				goto error_release;
+			}
+
+			/* we install a link from the user session keyring to
+			 * the user keyring */
+			ret = key_link(session_keyring, uid_keyring);
+			if (ret < 0)
+				goto error_release_both;
+		}
+
+		/* install the keyrings */
+		user->uid_keyring = uid_keyring;
+		user->session_keyring = session_keyring;
+	}
+
+	mutex_unlock(&key_user_keyring_mutex);
+	kleave(" = 0");
+	return 0;
+
+error_release_both:
+	key_put(session_keyring);
+error_release:
+	key_put(uid_keyring);
+error:
+	mutex_unlock(&key_user_keyring_mutex);
+	kleave(" = %d", ret);
+	return ret;
+}
 
 /*****************************************************************************/
 /*
@@ -481,7 +490,7 @@
 		}
 	}
 	/* or search the user-session keyring */
-	else {
+	else if (context->user->session_keyring) {
 		key_ref = keyring_search_aux(
 			make_key_ref(context->user->session_keyring, 1),
 			context, type, description, match);
@@ -614,6 +623,9 @@
 		if (!context->signal->session_keyring) {
 			/* always install a session keyring upon access if one
 			 * doesn't exist yet */
+			ret = install_user_keyrings(context);
+			if (ret < 0)
+				goto error;
 			ret = install_session_keyring(
 				context, context->user->session_keyring);
 			if (ret < 0)
@@ -628,12 +640,24 @@
 		break;
 
 	case KEY_SPEC_USER_KEYRING:
+		if (!context->user->uid_keyring) {
+			ret = install_user_keyrings(context);
+			if (ret < 0)
+				goto error;
+		}
+
 		key = context->user->uid_keyring;
 		atomic_inc(&key->usage);
 		key_ref = make_key_ref(key, 1);
 		break;
 
 	case KEY_SPEC_USER_SESSION_KEYRING:
+		if (!context->user->session_keyring) {
+			ret = install_user_keyrings(context);
+			if (ret < 0)
+				goto error;
+		}
+
 		key = context->user->session_keyring;
 		atomic_inc(&key->usage);
 		key_ref = make_key_ref(key, 1);
@@ -744,7 +768,7 @@
 	mutex_lock(&key_session_mutex);
 
 	/* look for an existing keyring of this name */
-	keyring = find_keyring_by_name(name, 0);
+	keyring = find_keyring_by_name(name, false);
 	if (PTR_ERR(keyring) == -ENOKEY) {
 		/* not found - try and create a new one */
 		keyring = keyring_alloc(name, tsk->uid, tsk->gid, tsk,
diff --git a/security/keys/request_key.c b/security/keys/request_key.c
index 5ecc505..ba32ca6 100644
--- a/security/keys/request_key.c
+++ b/security/keys/request_key.c
@@ -16,6 +16,7 @@
 #include <linux/kmod.h>
 #include <linux/err.h>
 #include <linux/keyctl.h>
+#include <linux/slab.h>
 #include "internal.h"
 
 /*
@@ -161,21 +162,22 @@
  * call out to userspace for key construction
  * - we ignore program failure and go on key status instead
  */
-static int construct_key(struct key *key, const char *callout_info, void *aux)
+static int construct_key(struct key *key, const void *callout_info,
+			 size_t callout_len, void *aux)
 {
 	struct key_construction *cons;
 	request_key_actor_t actor;
 	struct key *authkey;
 	int ret;
 
-	kenter("%d,%s,%p", key->serial, callout_info, aux);
+	kenter("%d,%p,%zu,%p", key->serial, callout_info, callout_len, aux);
 
 	cons = kmalloc(sizeof(*cons), GFP_KERNEL);
 	if (!cons)
 		return -ENOMEM;
 
 	/* allocate an authorisation key */
-	authkey = request_key_auth_new(key, callout_info);
+	authkey = request_key_auth_new(key, callout_info, callout_len);
 	if (IS_ERR(authkey)) {
 		kfree(cons);
 		ret = PTR_ERR(authkey);
@@ -331,6 +333,7 @@
 static struct key *construct_key_and_link(struct key_type *type,
 					  const char *description,
 					  const char *callout_info,
+					  size_t callout_len,
 					  void *aux,
 					  struct key *dest_keyring,
 					  unsigned long flags)
@@ -348,7 +351,7 @@
 	key_user_put(user);
 
 	if (ret == 0) {
-		ret = construct_key(key, callout_info, aux);
+		ret = construct_key(key, callout_info, callout_len, aux);
 		if (ret < 0)
 			goto construction_failed;
 	}
@@ -370,7 +373,8 @@
  */
 struct key *request_key_and_link(struct key_type *type,
 				 const char *description,
-				 const char *callout_info,
+				 const void *callout_info,
+				 size_t callout_len,
 				 void *aux,
 				 struct key *dest_keyring,
 				 unsigned long flags)
@@ -378,8 +382,8 @@
 	struct key *key;
 	key_ref_t key_ref;
 
-	kenter("%s,%s,%s,%p,%p,%lx",
-	       type->name, description, callout_info, aux,
+	kenter("%s,%s,%p,%zu,%p,%p,%lx",
+	       type->name, description, callout_info, callout_len, aux,
 	       dest_keyring, flags);
 
 	/* search all the process keyrings for a key */
@@ -398,7 +402,8 @@
 			goto error;
 
 		key = construct_key_and_link(type, description, callout_info,
-					     aux, dest_keyring, flags);
+					     callout_len, aux, dest_keyring,
+					     flags);
 	}
 
 error:
@@ -434,10 +439,13 @@
 			const char *callout_info)
 {
 	struct key *key;
+	size_t callout_len = 0;
 	int ret;
 
-	key = request_key_and_link(type, description, callout_info, NULL,
-				   NULL, KEY_ALLOC_IN_QUOTA);
+	if (callout_info)
+		callout_len = strlen(callout_info);
+	key = request_key_and_link(type, description, callout_info, callout_len,
+				   NULL, NULL, KEY_ALLOC_IN_QUOTA);
 	if (!IS_ERR(key)) {
 		ret = wait_for_key_construction(key, false);
 		if (ret < 0) {
@@ -458,14 +466,15 @@
  */
 struct key *request_key_with_auxdata(struct key_type *type,
 				     const char *description,
-				     const char *callout_info,
+				     const void *callout_info,
+				     size_t callout_len,
 				     void *aux)
 {
 	struct key *key;
 	int ret;
 
-	key = request_key_and_link(type, description, callout_info, aux,
-				   NULL, KEY_ALLOC_IN_QUOTA);
+	key = request_key_and_link(type, description, callout_info, callout_len,
+				   aux, NULL, KEY_ALLOC_IN_QUOTA);
 	if (!IS_ERR(key)) {
 		ret = wait_for_key_construction(key, false);
 		if (ret < 0) {
@@ -485,10 +494,12 @@
  */
 struct key *request_key_async(struct key_type *type,
 			      const char *description,
-			      const char *callout_info)
+			      const void *callout_info,
+			      size_t callout_len)
 {
-	return request_key_and_link(type, description, callout_info, NULL,
-				    NULL, KEY_ALLOC_IN_QUOTA);
+	return request_key_and_link(type, description, callout_info,
+				    callout_len, NULL, NULL,
+				    KEY_ALLOC_IN_QUOTA);
 }
 EXPORT_SYMBOL(request_key_async);
 
@@ -500,10 +511,11 @@
  */
 struct key *request_key_async_with_auxdata(struct key_type *type,
 					   const char *description,
-					   const char *callout_info,
+					   const void *callout_info,
+					   size_t callout_len,
 					   void *aux)
 {
-	return request_key_and_link(type, description, callout_info, aux,
-				    NULL, KEY_ALLOC_IN_QUOTA);
+	return request_key_and_link(type, description, callout_info,
+				    callout_len, aux, NULL, KEY_ALLOC_IN_QUOTA);
 }
 EXPORT_SYMBOL(request_key_async_with_auxdata);
diff --git a/security/keys/request_key_auth.c b/security/keys/request_key_auth.c
index e42b525..bd237b0 100644
--- a/security/keys/request_key_auth.c
+++ b/security/keys/request_key_auth.c
@@ -15,6 +15,7 @@
 #include <linux/sched.h>
 #include <linux/err.h>
 #include <linux/seq_file.h>
+#include <linux/slab.h>
 #include <asm/uaccess.h>
 #include "internal.h"
 
@@ -61,7 +62,7 @@
 
 	seq_puts(m, "key:");
 	seq_puts(m, key->description);
-	seq_printf(m, " pid:%d ci:%zu", rka->pid, strlen(rka->callout_info));
+	seq_printf(m, " pid:%d ci:%zu", rka->pid, rka->callout_len);
 
 } /* end request_key_auth_describe() */
 
@@ -77,7 +78,7 @@
 	size_t datalen;
 	long ret;
 
-	datalen = strlen(rka->callout_info);
+	datalen = rka->callout_len;
 	ret = datalen;
 
 	/* we can return the data as is */
@@ -137,7 +138,8 @@
  * create an authorisation token for /sbin/request-key or whoever to gain
  * access to the caller's security data
  */
-struct key *request_key_auth_new(struct key *target, const char *callout_info)
+struct key *request_key_auth_new(struct key *target, const void *callout_info,
+				 size_t callout_len)
 {
 	struct request_key_auth *rka, *irka;
 	struct key *authkey = NULL;
@@ -152,7 +154,7 @@
 		kleave(" = -ENOMEM");
 		return ERR_PTR(-ENOMEM);
 	}
-	rka->callout_info = kmalloc(strlen(callout_info) + 1, GFP_KERNEL);
+	rka->callout_info = kmalloc(callout_len, GFP_KERNEL);
 	if (!rka->callout_info) {
 		kleave(" = -ENOMEM");
 		kfree(rka);
@@ -186,7 +188,8 @@
 	}
 
 	rka->target_key = key_get(target);
-	strcpy(rka->callout_info, callout_info);
+	memcpy(rka->callout_info, callout_info, callout_len);
+	rka->callout_len = callout_len;
 
 	/* allocate the auth key */
 	sprintf(desc, "%x", target->serial);
diff --git a/security/keys/sysctl.c b/security/keys/sysctl.c
new file mode 100644
index 0000000..b611d49
--- /dev/null
+++ b/security/keys/sysctl.c
@@ -0,0 +1,50 @@
+/* Key management controls
+ *
+ * Copyright (C) 2008 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+
+#include <linux/key.h>
+#include <linux/sysctl.h>
+#include "internal.h"
+
+ctl_table key_sysctls[] = {
+	{
+		.ctl_name = CTL_UNNUMBERED,
+		.procname = "maxkeys",
+		.data = &key_quota_maxkeys,
+		.maxlen = sizeof(unsigned),
+		.mode = 0644,
+		.proc_handler = &proc_dointvec,
+	},
+	{
+		.ctl_name = CTL_UNNUMBERED,
+		.procname = "maxbytes",
+		.data = &key_quota_maxbytes,
+		.maxlen = sizeof(unsigned),
+		.mode = 0644,
+		.proc_handler = &proc_dointvec,
+	},
+	{
+		.ctl_name = CTL_UNNUMBERED,
+		.procname = "root_maxkeys",
+		.data = &key_quota_root_maxkeys,
+		.maxlen = sizeof(unsigned),
+		.mode = 0644,
+		.proc_handler = &proc_dointvec,
+	},
+	{
+		.ctl_name = CTL_UNNUMBERED,
+		.procname = "root_maxbytes",
+		.data = &key_quota_root_maxbytes,
+		.maxlen = sizeof(unsigned),
+		.mode = 0644,
+		.proc_handler = &proc_dointvec,
+	},
+	{ .ctl_name = 0 }
+};
diff --git a/security/security.c b/security/security.c
index d5cb589..8e64a29 100644
--- a/security/security.c
+++ b/security/security.c
@@ -491,23 +491,23 @@
 	security_ops->inode_delete(inode);
 }
 
-int security_inode_setxattr(struct dentry *dentry, char *name,
-			     void *value, size_t size, int flags)
+int security_inode_setxattr(struct dentry *dentry, const char *name,
+			    const void *value, size_t size, int flags)
 {
 	if (unlikely(IS_PRIVATE(dentry->d_inode)))
 		return 0;
 	return security_ops->inode_setxattr(dentry, name, value, size, flags);
 }
 
-void security_inode_post_setxattr(struct dentry *dentry, char *name,
-				   void *value, size_t size, int flags)
+void security_inode_post_setxattr(struct dentry *dentry, const char *name,
+				  const void *value, size_t size, int flags)
 {
 	if (unlikely(IS_PRIVATE(dentry->d_inode)))
 		return;
 	security_ops->inode_post_setxattr(dentry, name, value, size, flags);
 }
 
-int security_inode_getxattr(struct dentry *dentry, char *name)
+int security_inode_getxattr(struct dentry *dentry, const char *name)
 {
 	if (unlikely(IS_PRIVATE(dentry->d_inode)))
 		return 0;
@@ -521,7 +521,7 @@
 	return security_ops->inode_listxattr(dentry);
 }
 
-int security_inode_removexattr(struct dentry *dentry, char *name)
+int security_inode_removexattr(struct dentry *dentry, const char *name)
 {
 	if (unlikely(IS_PRIVATE(dentry->d_inode)))
 		return 0;
@@ -1156,6 +1156,11 @@
 	return security_ops->key_permission(key_ref, context, perm);
 }
 
+int security_key_getsecurity(struct key *key, char **_buffer)
+{
+	return security_ops->key_getsecurity(key, _buffer);
+}
+
 #endif	/* CONFIG_KEYS */
 
 #ifdef CONFIG_AUDIT
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index 04acb5a..4e4de98 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -2619,7 +2619,7 @@
 	return dentry_has_perm(current, mnt, dentry, FILE__GETATTR);
 }
 
-static int selinux_inode_setotherxattr(struct dentry *dentry, char *name)
+static int selinux_inode_setotherxattr(struct dentry *dentry, const char *name)
 {
 	if (!strncmp(name, XATTR_SECURITY_PREFIX,
 		     sizeof XATTR_SECURITY_PREFIX - 1)) {
@@ -2638,7 +2638,8 @@
 	return dentry_has_perm(current, NULL, dentry, FILE__SETATTR);
 }
 
-static int selinux_inode_setxattr(struct dentry *dentry, char *name, void *value, size_t size, int flags)
+static int selinux_inode_setxattr(struct dentry *dentry, const char *name,
+				  const void *value, size_t size, int flags)
 {
 	struct task_security_struct *tsec = current->security;
 	struct inode *inode = dentry->d_inode;
@@ -2687,8 +2688,9 @@
 			    &ad);
 }
 
-static void selinux_inode_post_setxattr(struct dentry *dentry, char *name,
-					void *value, size_t size, int flags)
+static void selinux_inode_post_setxattr(struct dentry *dentry, const char *name,
+                                        const void *value, size_t size,
+					int flags)
 {
 	struct inode *inode = dentry->d_inode;
 	struct inode_security_struct *isec = inode->i_security;
@@ -2711,7 +2713,7 @@
 	return;
 }
 
-static int selinux_inode_getxattr(struct dentry *dentry, char *name)
+static int selinux_inode_getxattr(struct dentry *dentry, const char *name)
 {
 	return dentry_has_perm(current, NULL, dentry, FILE__GETATTR);
 }
@@ -2721,7 +2723,7 @@
 	return dentry_has_perm(current, NULL, dentry, FILE__GETATTR);
 }
 
-static int selinux_inode_removexattr(struct dentry *dentry, char *name)
+static int selinux_inode_removexattr(struct dentry *dentry, const char *name)
 {
 	if (strcmp(name, XATTR_NAME_SELINUX))
 		return selinux_inode_setotherxattr(dentry, name);
@@ -5298,6 +5300,20 @@
 			    SECCLASS_KEY, perm, NULL);
 }
 
+static int selinux_key_getsecurity(struct key *key, char **_buffer)
+{
+	struct key_security_struct *ksec = key->security;
+	char *context = NULL;
+	unsigned len;
+	int rc;
+
+	rc = security_sid_to_context(ksec->sid, &context, &len);
+	if (!rc)
+		rc = len;
+	*_buffer = context;
+	return rc;
+}
+
 #endif
 
 static struct security_operations selinux_ops = {
@@ -5486,6 +5502,7 @@
 	.key_alloc =			selinux_key_alloc,
 	.key_free =			selinux_key_free,
 	.key_permission =		selinux_key_permission,
+	.key_getsecurity =		selinux_key_getsecurity,
 #endif
 
 #ifdef CONFIG_AUDIT
@@ -5534,14 +5551,6 @@
 	else
 		printk(KERN_DEBUG "SELinux:  Starting in permissive mode\n");
 
-#ifdef CONFIG_KEYS
-	/* Add security information to initial keyrings */
-	selinux_key_alloc(&root_user_keyring, current,
-			  KEY_ALLOC_NOT_IN_QUOTA);
-	selinux_key_alloc(&root_session_keyring, current,
-			  KEY_ALLOC_NOT_IN_QUOTA);
-#endif
-
 	return 0;
 }
 
diff --git a/security/selinux/include/security.h b/security/selinux/include/security.h
index 6445b64..cdb14ad 100644
--- a/security/selinux/include/security.h
+++ b/security/selinux/include/security.h
@@ -93,7 +93,7 @@
 int security_sid_to_context(u32 sid, char **scontext,
 	u32 *scontext_len);
 
-int security_context_to_sid(char *scontext, u32 scontext_len,
+int security_context_to_sid(const char *scontext, u32 scontext_len,
 	u32 *out_sid);
 
 int security_context_to_sid_default(char *scontext, u32 scontext_len,
diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c
index 2daaddb..25cac5a 100644
--- a/security/selinux/ss/services.c
+++ b/security/selinux/ss/services.c
@@ -708,7 +708,7 @@
 
 }
 
-static int security_context_to_sid_core(char *scontext, u32 scontext_len,
+static int security_context_to_sid_core(const char *scontext, u32 scontext_len,
 					u32 *sid, u32 def_sid, gfp_t gfp_flags)
 {
 	char *scontext2;
@@ -835,7 +835,7 @@
  * Returns -%EINVAL if the context is invalid, -%ENOMEM if insufficient
  * memory is available, or 0 on success.
  */
-int security_context_to_sid(char *scontext, u32 scontext_len, u32 *sid)
+int security_context_to_sid(const char *scontext, u32 scontext_len, u32 *sid)
 {
 	return security_context_to_sid_core(scontext, scontext_len,
 					    sid, SECSID_NULL, GFP_KERNEL);
diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
index 77ec16a..5d2ec56 100644
--- a/security/smack/smack_lsm.c
+++ b/security/smack/smack_lsm.c
@@ -574,8 +574,8 @@
  *
  * Returns 0 if access is permitted, an error code otherwise
  */
-static int smack_inode_setxattr(struct dentry *dentry, char *name,
-				void *value, size_t size, int flags)
+static int smack_inode_setxattr(struct dentry *dentry, const char *name,
+				const void *value, size_t size, int flags)
 {
 	int rc = 0;
 
@@ -604,8 +604,8 @@
  * Set the pointer in the inode blob to the entry found
  * in the master label list.
  */
-static void smack_inode_post_setxattr(struct dentry *dentry, char *name,
-				      void *value, size_t size, int flags)
+static void smack_inode_post_setxattr(struct dentry *dentry, const char *name,
+				      const void *value, size_t size, int flags)
 {
 	struct inode_smack *isp;
 	char *nsp;
@@ -641,7 +641,7 @@
  *
  * Returns 0 if access is permitted, an error code otherwise
  */
-static int smack_inode_getxattr(struct dentry *dentry, char *name)
+static int smack_inode_getxattr(struct dentry *dentry, const char *name)
 {
 	return smk_curacc(smk_of_inode(dentry->d_inode), MAY_READ);
 }
@@ -655,7 +655,7 @@
  *
  * Returns 0 if access is permitted, an error code otherwise
  */
-static int smack_inode_removexattr(struct dentry *dentry, char *name)
+static int smack_inode_removexattr(struct dentry *dentry, const char *name)
 {
 	int rc = 0;
 
diff --git a/sound/core/info.c b/sound/core/info.c
index 9977ec2..cb5ead3 100644
--- a/sound/core/info.c
+++ b/sound/core/info.c
@@ -544,7 +544,7 @@
 {
 	struct proc_dir_entry *p;
 
-	p = snd_create_proc_entry("asound", S_IFDIR | S_IRUGO | S_IXUGO, &proc_root);
+	p = snd_create_proc_entry("asound", S_IFDIR | S_IRUGO | S_IXUGO, NULL);
 	if (p == NULL)
 		return -ENOMEM;
 	snd_proc_root = p;
@@ -594,7 +594,7 @@
 #ifdef CONFIG_SND_OSSEMUL
 		snd_info_free_entry(snd_oss_root);
 #endif
-		snd_remove_proc_entry(&proc_root, snd_proc_root);
+		snd_remove_proc_entry(NULL, snd_proc_root);
 	}
 	return 0;
 }
diff --git a/sound/core/memalloc.c b/sound/core/memalloc.c
index 920e578..23b7bc0 100644
--- a/sound/core/memalloc.c
+++ b/sound/core/memalloc.c
@@ -629,9 +629,8 @@
 static int __init snd_mem_init(void)
 {
 #ifdef CONFIG_PROC_FS
-	snd_mem_proc = create_proc_entry(SND_MEM_PROC_FILE, 0644, NULL);
-	if (snd_mem_proc)
-		snd_mem_proc->proc_fops = &snd_mem_proc_fops;
+	snd_mem_proc = proc_create(SND_MEM_PROC_FILE, 0644, NULL,
+				   &snd_mem_proc_fops);
 #endif
 	return 0;
 }
diff --git a/sound/sh/aica.c b/sound/sh/aica.c
index d49417b..9ca1133 100644
--- a/sound/sh/aica.c
+++ b/sound/sh/aica.c
@@ -663,7 +663,7 @@
 		return err;
 	pd = platform_device_register_simple(SND_AICA_DRIVER, -1,
 					     aica_memory_space, 2);
-	if (unlikely(IS_ERR(pd))) {
+	if (IS_ERR(pd)) {
 		platform_driver_unregister(&snd_aica_driver);
 		return PTR_ERR(pd);
 	}