Merge branch 'sg' of git://git.kernel.dk/linux-2.6-block

* 'sg' of git://git.kernel.dk/linux-2.6-block:
  fix sg_phys to use dma_addr_t
  ub: add sg_init_table for sense and read capacity commands
  x86: pci-gart fix
  blackfin: fix sg fallout
  xtensa: dma-mapping.h is using linux/scatterlist.h functions, so include it
  SG: audit of drivers that use blk_rq_map_sg()
  arch/um/drivers/ubd_kern.c: fix a building error
  SG: Change sg_set_page() to take length and offset argument
  AVR32: Fix sg_page breakage
  mmc: sg fallout
  m68k: sg fallout
  More SG build fixes
  sg: add missing sg_init_table calls to zfcp
  SG build fix
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index a13d69b..8ae5fac 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -1444,7 +1444,8 @@
 			Param: "schedule" - profile schedule points.
 			Param: <number> - step/bucket size as a power of 2 for
 				statistical time based profiling.
-			Param: "sleep" - profile D-state sleeping (millisecs)
+			Param: "sleep" - profile D-state sleeping (millisecs).
+				Requires CONFIG_SCHEDSTATS
 			Param: "kvm" - profile VM exits.
 
 	processor.max_cstate=	[HW,ACPI]
diff --git a/Documentation/lguest/lguest.c b/Documentation/lguest/lguest.c
index 5bdc37f..f266839 100644
--- a/Documentation/lguest/lguest.c
+++ b/Documentation/lguest/lguest.c
@@ -34,25 +34,24 @@
 #include <zlib.h>
 #include <assert.h>
 #include <sched.h>
-/*L:110 We can ignore the 30 include files we need for this program, but I do
- * want to draw attention to the use of kernel-style types.
- *
- * As Linus said, "C is a Spartan language, and so should your naming be."  I
- * like these abbreviations and the header we need uses them, so we define them
- * here.
- */
-typedef unsigned long long u64;
-typedef uint32_t u32;
-typedef uint16_t u16;
-typedef uint8_t u8;
 #include "linux/lguest_launcher.h"
-#include "linux/pci_ids.h"
 #include "linux/virtio_config.h"
 #include "linux/virtio_net.h"
 #include "linux/virtio_blk.h"
 #include "linux/virtio_console.h"
 #include "linux/virtio_ring.h"
 #include "asm-x86/bootparam.h"
+/*L:110 We can ignore the 38 include files we need for this program, but I do
+ * want to draw attention to the use of kernel-style types.
+ *
+ * As Linus said, "C is a Spartan language, and so should your naming be."  I
+ * like these abbreviations, so we define them here.  Note that u64 is always
+ * unsigned long long, which works on all Linux systems: this means that we can
+ * use %llu in printf for any u64. */
+typedef unsigned long long u64;
+typedef uint32_t u32;
+typedef uint16_t u16;
+typedef uint8_t u8;
 /*:*/
 
 #define PAGE_PRESENT 0x7 	/* Present, RW, Execute */
@@ -361,8 +360,8 @@
 }
 
 /*L:140 Loading the kernel is easy when it's a "vmlinux", but most kernels
- * come wrapped up in the self-decompressing "bzImage" format.  With some funky
- * coding, we can load those, too. */
+ * come wrapped up in the self-decompressing "bzImage" format.  With a little
+ * work, we can load those, too. */
 static unsigned long load_kernel(int fd)
 {
 	Elf32_Ehdr hdr;
@@ -465,6 +464,7 @@
 	 * to know where it is. */
 	return to_guest_phys(pgdir);
 }
+/*:*/
 
 /* Simple routine to roll all the commandline arguments together with spaces
  * between them. */
@@ -481,9 +481,9 @@
 	dst[len] = '\0';
 }
 
-/* This is where we actually tell the kernel to initialize the Guest.  We saw
- * the arguments it expects when we looked at initialize() in lguest_user.c:
- * the base of guest "physical" memory, the top physical page to allow, the
+/*L:185 This is where we actually tell the kernel to initialize the Guest.  We
+ * saw the arguments it expects when we looked at initialize() in lguest_user.c:
+ * the base of Guest "physical" memory, the top physical page to allow, the
  * top level pagetable and the entry point for the Guest. */
 static int tell_kernel(unsigned long pgdir, unsigned long start)
 {
@@ -513,13 +513,14 @@
 /*L:200
  * The Waker.
  *
- * With a console and network devices, we can have lots of input which we need
- * to process.  We could try to tell the kernel what file descriptors to watch,
- * but handing a file descriptor mask through to the kernel is fairly icky.
+ * With console, block and network devices, we can have lots of input which we
+ * need to process.  We could try to tell the kernel what file descriptors to
+ * watch, but handing a file descriptor mask through to the kernel is fairly
+ * icky.
  *
  * Instead, we fork off a process which watches the file descriptors and writes
- * the LHREQ_BREAK command to the /dev/lguest filedescriptor to tell the Host
- * loop to stop running the Guest.  This causes it to return from the
+ * the LHREQ_BREAK command to the /dev/lguest file descriptor to tell the Host
+ * stop running the Guest.  This causes the Launcher to return from the
  * /dev/lguest read with -EAGAIN, where it will write to /dev/lguest to reset
  * the LHREQ_BREAK and wake us up again.
  *
@@ -545,7 +546,9 @@
 			if (read(pipefd, &fd, sizeof(fd)) == 0)
 				exit(0);
 			/* Otherwise it's telling us to change what file
-			 * descriptors we're to listen to. */
+			 * descriptors we're to listen to.  Positive means
+			 * listen to a new one, negative means stop
+			 * listening. */
 			if (fd >= 0)
 				FD_SET(fd, &devices.infds);
 			else
@@ -560,7 +563,7 @@
 {
 	int pipefd[2], child;
 
-	/* We create a pipe to talk to the waker, and also so it knows when the
+	/* We create a pipe to talk to the Waker, and also so it knows when the
 	 * Launcher dies (and closes pipe). */
 	pipe(pipefd);
 	child = fork();
@@ -568,7 +571,8 @@
 		err(1, "forking");
 
 	if (child == 0) {
-		/* Close the "writing" end of our copy of the pipe */
+		/* We are the Waker: close the "writing" end of our copy of the
+		 * pipe and start waiting for input. */
 		close(pipefd[1]);
 		wake_parent(pipefd[0], lguest_fd);
 	}
@@ -579,12 +583,12 @@
 	return pipefd[1];
 }
 
-/*L:210
+/*
  * Device Handling.
  *
- * When the Guest sends DMA to us, it sends us an array of addresses and sizes.
+ * When the Guest gives us a buffer, it sends an array of addresses and sizes.
  * We need to make sure it's not trying to reach into the Launcher itself, so
- * we have a convenient routine which check it and exits with an error message
+ * we have a convenient routine which checks it and exits with an error message
  * if something funny is going on:
  */
 static void *_check_pointer(unsigned long addr, unsigned int size,
@@ -601,7 +605,9 @@
 /* A macro which transparently hands the line number to the real function. */
 #define check_pointer(addr,size) _check_pointer(addr, size, __LINE__)
 
-/* This function returns the next descriptor in the chain, or vq->vring.num. */
+/* Each buffer in the virtqueues is actually a chain of descriptors.  This
+ * function returns the next descriptor in the chain, or vq->vring.num if we're
+ * at the end. */
 static unsigned next_desc(struct virtqueue *vq, unsigned int i)
 {
 	unsigned int next;
@@ -680,13 +686,14 @@
 	return head;
 }
 
-/* Once we've used one of their buffers, we tell them about it.  We'll then
+/* After we've used one of their buffers, we tell them about it.  We'll then
  * want to send them an interrupt, using trigger_irq(). */
 static void add_used(struct virtqueue *vq, unsigned int head, int len)
 {
 	struct vring_used_elem *used;
 
-	/* Get a pointer to the next entry in the used ring. */
+	/* The virtqueue contains a ring of used buffers.  Get a pointer to the
+	 * next entry in that used ring. */
 	used = &vq->vring.used->ring[vq->vring.used->idx % vq->vring.num];
 	used->id = head;
 	used->len = len;
@@ -700,6 +707,7 @@
 {
 	unsigned long buf[] = { LHREQ_IRQ, vq->config.irq };
 
+	/* If they don't want an interrupt, don't send one. */
 	if (vq->vring.avail->flags & VRING_AVAIL_F_NO_INTERRUPT)
 		return;
 
@@ -716,8 +724,11 @@
 	trigger_irq(fd, vq);
 }
 
-/* Here is the input terminal setting we save, and the routine to restore them
- * on exit so the user can see what they type next. */
+/*
+ * The Console
+ *
+ * Here is the input terminal setting we save, and the routine to restore them
+ * on exit so the user gets their terminal back. */
 static struct termios orig_term;
 static void restore_term(void)
 {
@@ -818,7 +829,10 @@
 	}
 }
 
-/* Handling output for network is also simple: we get all the output buffers
+/*
+ * The Network
+ *
+ * Handling output for network is also simple: we get all the output buffers
  * and write them (ignoring the first element) to this device's file descriptor
  * (stdout). */
 static void handle_net_output(int fd, struct virtqueue *vq)
@@ -831,8 +845,9 @@
 	while ((head = get_vq_desc(vq, iov, &out, &in)) != vq->vring.num) {
 		if (in)
 			errx(1, "Input buffers in output queue?");
-		/* Check header, but otherwise ignore it (we said we supported
-		 * no features). */
+		/* Check header, but otherwise ignore it (we told the Guest we
+		 * supported no features, so it shouldn't have anything
+		 * interesting). */
 		(void)convert(&iov[0], struct virtio_net_hdr);
 		len = writev(vq->dev->fd, iov+1, out-1);
 		add_used_and_trigger(fd, vq, head, len);
@@ -883,7 +898,8 @@
 	return true;
 }
 
-/* This callback ensures we try again, in case we stopped console or net
+/*L:215 This is the callback attached to the network and console input
+ * virtqueues: it ensures we try again, in case we stopped console or net
  * delivery because Guest didn't have any buffers. */
 static void enable_fd(int fd, struct virtqueue *vq)
 {
@@ -919,7 +935,7 @@
 	      strnlen(from_guest_phys(addr), guest_limit - addr));
 }
 
-/* This is called when the waker wakes us up: check for incoming file
+/* This is called when the Waker wakes us up: check for incoming file
  * descriptors. */
 static void handle_input(int fd)
 {
@@ -986,8 +1002,7 @@
 }
 
 /* Each device descriptor is followed by some configuration information.
- * The first byte is a "status" byte for the Guest to report what's happening.
- * After that are fields: u8 type, u8 len, [... len bytes...].
+ * Each configuration field looks like: u8 type, u8 len, [... len bytes...].
  *
  * This routine adds a new field to an existing device's descriptor.  It only
  * works for the last device, but that's OK because that's how we use it. */
@@ -1044,14 +1059,17 @@
 	/* Link virtqueue back to device. */
 	vq->dev = dev;
 
-	/* Set up handler. */
+	/* Set the routine to call when the Guest does something to this
+	 * virtqueue. */
 	vq->handle_output = handle_output;
+
+	/* Set the "Don't Notify Me" flag if we don't have a handler */
 	if (!handle_output)
 		vq->vring.used->flags = VRING_USED_F_NO_NOTIFY;
 }
 
 /* This routine does all the creation and setup of a new device, including
- * caling new_dev_desc() to allocate the descriptor and device memory. */
+ * calling new_dev_desc() to allocate the descriptor and device memory. */
 static struct device *new_device(const char *name, u16 type, int fd,
 				 bool (*handle_input)(int, struct device *))
 {
@@ -1060,7 +1078,7 @@
 	/* Append to device list.  Prepending to a single-linked list is
 	 * easier, but the user expects the devices to be arranged on the bus
 	 * in command-line order.  The first network device on the command line
-	 * is eth0, the first block device /dev/lgba, etc. */
+	 * is eth0, the first block device /dev/vda, etc. */
 	*devices.lastdev = dev;
 	dev->next = NULL;
 	devices.lastdev = &dev->next;
@@ -1104,7 +1122,7 @@
 	/* The console needs two virtqueues: the input then the output.  When
 	 * they put something the input queue, we make sure we're listening to
 	 * stdin.  When they put something in the output queue, we write it to
-	 * stdout.  */
+	 * stdout. */
 	add_virtqueue(dev, VIRTQUEUE_NUM, enable_fd);
 	add_virtqueue(dev, VIRTQUEUE_NUM, handle_console_output);
 
@@ -1252,21 +1270,17 @@
 		verbose("attached to bridge: %s\n", br_name);
 }
 
-
-/*
- * Block device.
+/* Our block (disk) device should be really simple: the Guest asks for a block
+ * number and we read or write that position in the file.  Unfortunately, that
+ * was amazingly slow: the Guest waits until the read is finished before
+ * running anything else, even if it could have been doing useful work.
  *
- * Serving a block device is really easy: the Guest asks for a block number and
- * we read or write that position in the file.
- *
- * Unfortunately, this is amazingly slow: the Guest waits until the read is
- * finished before running anything else, even if it could be doing useful
- * work.  We could use async I/O, except it's reputed to suck so hard that
- * characters actually go missing from your code when you try to use it.
+ * We could use async I/O, except it's reputed to suck so hard that characters
+ * actually go missing from your code when you try to use it.
  *
  * So we farm the I/O out to thread, and communicate with it via a pipe. */
 
-/* This hangs off device->priv, with the data. */
+/* This hangs off device->priv. */
 struct vblk_info
 {
 	/* The size of the file. */
@@ -1282,8 +1296,14 @@
 	 * Launcher triggers interrupt to Guest. */
 	int done_fd;
 };
+/*:*/
 
-/* This is the core of the I/O thread.  It returns true if it did something. */
+/*L:210
+ * The Disk
+ *
+ * Remember that the block device is handled by a separate I/O thread.  We head
+ * straight into the core of that thread here:
+ */
 static bool service_io(struct device *dev)
 {
 	struct vblk_info *vblk = dev->priv;
@@ -1294,10 +1314,14 @@
 	struct iovec iov[dev->vq->vring.num];
 	off64_t off;
 
+	/* See if there's a request waiting.  If not, nothing to do. */
 	head = get_vq_desc(dev->vq, iov, &out_num, &in_num);
 	if (head == dev->vq->vring.num)
 		return false;
 
+	/* Every block request should contain at least one output buffer
+	 * (detailing the location on disk and the type of request) and one
+	 * input buffer (to hold the result). */
 	if (out_num == 0 || in_num == 0)
 		errx(1, "Bad virtblk cmd %u out=%u in=%u",
 		     head, out_num, in_num);
@@ -1306,10 +1330,15 @@
 	in = convert(&iov[out_num+in_num-1], struct virtio_blk_inhdr);
 	off = out->sector * 512;
 
-	/* This is how we implement barriers.  Pretty poor, no? */
+	/* The block device implements "barriers", where the Guest indicates
+	 * that it wants all previous writes to occur before this write.  We
+	 * don't have a way of asking our kernel to do a barrier, so we just
+	 * synchronize all the data in the file.  Pretty poor, no? */
 	if (out->type & VIRTIO_BLK_T_BARRIER)
 		fdatasync(vblk->fd);
 
+	/* In general the virtio block driver is allowed to try SCSI commands.
+	 * It'd be nice if we supported eject, for example, but we don't. */
 	if (out->type & VIRTIO_BLK_T_SCSI_CMD) {
 		fprintf(stderr, "Scsi commands unsupported\n");
 		in->status = VIRTIO_BLK_S_UNSUPP;
@@ -1375,7 +1404,7 @@
 
 	/* When this read fails, it means Launcher died, so we follow. */
 	while (read(vblk->workpipe[0], &c, 1) == 1) {
-		/* We acknowledge each request immediately, to reduce latency,
+		/* We acknowledge each request immediately to reduce latency,
 		 * rather than waiting until we've done them all.  I haven't
 		 * measured to see if it makes any difference. */
 		while (service_io(dev))
@@ -1384,12 +1413,14 @@
 	return 0;
 }
 
-/* When the thread says some I/O is done, we interrupt the Guest. */
+/* Now we've seen the I/O thread, we return to the Launcher to see what happens
+ * when the thread tells us it's completed some I/O. */
 static bool handle_io_finish(int fd, struct device *dev)
 {
 	char c;
 
-	/* If child died, presumably it printed message. */
+	/* If the I/O thread died, presumably it printed the error, so we
+	 * simply exit. */
 	if (read(dev->fd, &c, 1) != 1)
 		exit(1);
 
@@ -1398,7 +1429,7 @@
 	return true;
 }
 
-/* When the Guest submits some I/O, we wake the I/O thread. */
+/* When the Guest submits some I/O, we just need to wake the I/O thread. */
 static void handle_virtblk_output(int fd, struct virtqueue *vq)
 {
 	struct vblk_info *vblk = vq->dev->priv;
@@ -1410,7 +1441,7 @@
 		exit(1);
 }
 
-/* This creates a virtual block device. */
+/*L:198 This actually sets up a virtual block device. */
 static void setup_block_file(const char *filename)
 {
 	int p[2];
@@ -1426,7 +1457,7 @@
 	/* The device responds to return from I/O thread. */
 	dev = new_device("block", VIRTIO_ID_BLOCK, p[0], handle_io_finish);
 
-	/* The device has a virtqueue. */
+	/* The device has one virtqueue, where the Guest places requests. */
 	add_virtqueue(dev, VIRTQUEUE_NUM, handle_virtblk_output);
 
 	/* Allocate the room for our own bookkeeping */
@@ -1448,7 +1479,8 @@
 	/* The I/O thread writes to this end of the pipe when done. */
 	vblk->done_fd = p[1];
 
-	/* This is how we tell the I/O thread about more work. */
+	/* This is the second pipe, which is how we tell the I/O thread about
+	 * more work. */
 	pipe(vblk->workpipe);
 
 	/* Create stack for thread and run it */
@@ -1487,24 +1519,25 @@
 			char reason[1024] = { 0 };
 			read(lguest_fd, reason, sizeof(reason)-1);
 			errx(1, "%s", reason);
-		/* EAGAIN means the waker wanted us to look at some input.
+		/* EAGAIN means the Waker wanted us to look at some input.
 		 * Anything else means a bug or incompatible change. */
 		} else if (errno != EAGAIN)
 			err(1, "Running guest failed");
 
-		/* Service input, then unset the BREAK which releases
-		 * the Waker. */
+		/* Service input, then unset the BREAK to release the Waker. */
 		handle_input(lguest_fd);
 		if (write(lguest_fd, args, sizeof(args)) < 0)
 			err(1, "Resetting break");
 	}
 }
 /*
- * This is the end of the Launcher.
+ * This is the end of the Launcher.  The good news: we are over halfway
+ * through!  The bad news: the most fiendish part of the code still lies ahead
+ * of us.
  *
- * But wait!  We've seen I/O from the Launcher, and we've seen I/O from the
- * Drivers.  If we were to see the Host kernel I/O code, our understanding
- * would be complete... :*/
+ * Are you ready?  Take a deep breath and join me in the core of the Host, in
+ * "make Host".
+ :*/
 
 static struct option opts[] = {
 	{ "verbose", 0, NULL, 'v' },
@@ -1527,7 +1560,7 @@
 	/* Memory, top-level pagetable, code startpoint and size of the
 	 * (optional) initrd. */
 	unsigned long mem = 0, pgdir, start, initrd_size = 0;
-	/* A temporary and the /dev/lguest file descriptor. */
+	/* Two temporaries and the /dev/lguest file descriptor. */
 	int i, c, lguest_fd;
 	/* The boot information for the Guest. */
 	struct boot_params *boot;
@@ -1622,6 +1655,7 @@
 	/* The boot header contains a command line pointer: we put the command
 	 * line after the boot header. */
 	boot->hdr.cmd_line_ptr = to_guest_phys(boot + 1);
+	/* We use a simple helper to copy the arguments separated by spaces. */
 	concat((char *)(boot + 1), argv+optind+2);
 
 	/* Boot protocol version: 2.07 supports the fields for lguest. */
diff --git a/Documentation/networking/00-INDEX b/Documentation/networking/00-INDEX
index 153d84d..f5a5e6d 100644
--- a/Documentation/networking/00-INDEX
+++ b/Documentation/networking/00-INDEX
@@ -80,8 +80,6 @@
 	- Behaviour of cards under Multicast
 ncsa-telnet
 	- notes on how NCSA telnet (DOS) breaks with MTU discovery enabled.
-net-modules.txt
-	- info and "insmod" parameters for all network driver modules.
 netdevices.txt
 	- info on network device driver functions exported to the kernel.
 olympic.txt
diff --git a/Documentation/networking/net-modules.txt b/Documentation/networking/net-modules.txt
deleted file mode 100644
index 98c4392..0000000
--- a/Documentation/networking/net-modules.txt
+++ /dev/null
@@ -1,315 +0,0 @@
-Wed 2-Aug-95  <matti.aarnio@utu.fi>
-
-		Linux network driver modules
-
-	Do not mistake this for "README.modules" at the top-level
-	directory!  That document tells about modules in general, while
-	this one tells only about network device driver modules.
-
-	This is a potpourri of INSMOD-time(*) configuration options
-	(if such exists) and their default values of various modules
-	in the Linux network drivers collection.
-
-	Some modules have also hidden (= non-documented) tunable values.
-	The choice of not documenting them is based on general belief, that
-	the less the user needs to know, the better.  (There are things that
-	driver developers can use, others should not confuse themselves.)
-
-	In many cases it is highly preferred that insmod:ing is done
-	ONLY with defining an explicit address for the card, AND BY
-	NOT USING AUTO-PROBING!
-
-	Now most cards have some explicitly defined base address that they
-	are compiled with (to avoid auto-probing, among other things).
-	If that compiled value does not match your actual configuration,
-	do use the "io=0xXXX" -parameter for the insmod, and give there
-	a value matching your environment.
-
-	If you are adventurous, you can ask the driver to autoprobe
-	by using the "io=0" parameter, however it is a potentially dangerous
-	thing to do in a live system.  (If you don't know where the
-	card is located, you can try autoprobing, and after possible
-	crash recovery, insmod with proper IO-address..)
-
-	--------------------------
-	(*)	"INSMOD-time" means when you load module with
-		/sbin/insmod  you can feed it optional parameters.
-		See "man insmod".
-	--------------------------
-
-
-	8390 based Network Modules		(Paul Gortmaker, Nov 12, 1995)
-	--------------------------
-
-(Includes: smc-ultra, ne, wd, 3c503, hp, hp-plus, e2100 and ac3200)
-
-The 8390 series of network drivers now support multiple card systems without 
-reloading the same module multiple times (memory efficient!) This is done by 
-specifying multiple comma separated values, such as:
-
-	insmod 3c503.o io=0x280,0x300,0x330,0x350  xcvr=0,1,0,1
-
-The above would have the one module controlling four 3c503 cards, with card 2
-and 4 using external transceivers. The "insmod" manual describes the usage
-of comma separated value lists.
-
-It is *STRONGLY RECOMMENDED* that you supply "io=" instead of autoprobing.
-If an "io=" argument is not supplied, then the ISA drivers will complain
-about autoprobing being not recommended, and begrudgingly autoprobe for
-a *SINGLE CARD ONLY* -- if you want to use multiple cards you *have* to 
-supply an "io=0xNNN,0xQQQ,..." argument.
-
-The ne module is an exception to the above. A NE2000 is essentially an
-8390 chip, some bus glue and some RAM. Because of this, the ne probe is
-more invasive than the rest, and so at boot we make sure the ne probe is 
-done last of all the 8390 cards (so that it won't trip over other 8390 based
-cards) With modules we can't ensure that all other non-ne 8390 cards have
-already been found. Because of this, the ne module REQUIRES an "io=0xNNN" 
-argument passed in via insmod. It will refuse to autoprobe.
-
-It is also worth noting that auto-IRQ probably isn't as reliable during 
-the flurry of interrupt activity on a running machine. Cards such as the 
-ne2000 that can't get the IRQ setting from an EEPROM or configuration
-register are probably best supplied with an "irq=M" argument as well.
-
-
-----------------------------------------------------------------------
-Card/Module List - Configurable Parameters and Default Values
-----------------------------------------------------------------------
-
-3c501.c:
-	io  = 0x280	IO base address
-	irq = 5		IRQ
-	(Probes ports:	0x280, 0x300)
-
-3c503.c:
-	io = 0		(It will complain if you don't supply an "io=0xNNN")
-	irq = 0		(IRQ software selected by driver using autoIRQ)
-	xcvr = 0	(Use xcvr=1 to select external transceiver.)
-	(Probes ports: 0x300, 0x310, 0x330, 0x350, 0x250, 0x280, 0x2A0, 0x2E0)
-
-3c505.c:
-	io = 0
-	irq = 0
-	dma = 6         (not autoprobed)
-	(Probes ports: 0x300, 0x280, 0x310)
-
-3c507.c:
-	io = 0x300
-	irq = 0
-	(Probes ports: 0x300, 0x320, 0x340, 0x280)
-
-3c509.c:
-	io = 0
-	irq = 0
-	( Module load-time probing Works reliably only on EISA, ISA ID-PROBE
-	  IS NOT RELIABLE!  Compile this driver statically into kernel for
-	  now, if you need it auto-probing on an ISA-bus machine. )
-
-8390.c:
-	(No public options, several other modules need this one)
-
-a2065.c:
-	Since this is a Zorro board, it supports full autoprobing, even for
-	multiple boards. (m68k/Amiga)
-
-ac3200.c:
-	io = 0		(Checks 0x1000 to 0x8fff in 0x1000 intervals)
-	irq = 0		(Read from config register)
-	(EISA probing..)
-
-apricot.c:
-	io = 0x300  (Can't be altered!)
-	irq = 10
-
-arcnet.c:
-	io = 0
-	irqnum = 0
-	shmem = 0
-	num = 0
-	DO SET THESE MANUALLY AT INSMOD!
-	(When probing, looks at the following possible addresses:
-	 Suggested ones:
-		0x300, 0x2E0, 0x2F0, 0x2D0
-	 Other ones:
-		0x200, 0x210, 0x220, 0x230, 0x240, 0x250, 0x260, 0x270,
-		0x280, 0x290, 0x2A0, 0x2B0, 0x2C0,
-		       0x310, 0x320, 0x330, 0x340, 0x350, 0x360, 0x370,
-		0x380, 0x390, 0x3A0,			  0x3E0, 0x3F0  )
-
-ariadne.c:
-	Since this is a Zorro board, it supports full autoprobing, even for
-	multiple boards. (m68k/Amiga)
-
-at1700.c:
-	io = 0x260
-	irq = 0
-	(Probes ports: 0x260, 0x280, 0x2A0, 0x240, 0x340, 0x320, 0x380, 0x300)
-
-atarilance.c:
-	Supports full autoprobing. (m68k/Atari)
-
-atp.c: *Not modularized*
-	(Probes ports: 0x378, 0x278, 0x3BC;
-	 fixed IRQs: 5 and 7			)
-
-cops.c:
-	io = 0x240
-	irq = 5
-	nodeid = 0	(AutoSelect = 0, NodeID 1-254 is hand selected.)
-	(Probes ports: 0x240, 0x340, 0x200, 0x210, 0x220, 0x230, 0x260,
-		       0x2A0, 0x300, 0x310, 0x320, 0x330, 0x350, 0x360)	
-
-de4x5.c:
-	io = 0x000b
-	irq = 10
-	is_not_dec = 0  -- For non-DEC card using DEC 21040/21041/21140 chip, set this to 1
-	(EISA, and PCI probing)
-
-de600.c:
-	de600_debug = 0
-	(On port 0x378, irq 7 -- lpt1;  compile time configurable)
-
-de620.c:
-	bnc = 0, utp = 0  <-- Force media by setting either.
-	io = 0x378	(also compile-time configurable)
-	irq = 7
-
-depca.c:
-	io = 0x200
-	irq = 7
-	(Probes ports:	ISA:  0x300, 0x200;
-			EISA: 0x0c00		)
-
-dummy.c:
-	No options
-
-e2100.c:
-	io = 0		(It will complain if you don't supply an "io=0xNNN")
-	irq = 0		(IRQ software selected by driver)
-	mem = 0		(Override default shared memory start of 0xd0000)
-	xcvr = 0	(Use xcvr=1 to select external transceiver.)
-	(Probes ports: 0x300, 0x280, 0x380, 0x220)
-
-eepro.c:
-	io = 0x200
-	irq = 0
-	(Probes ports: 0x200, 0x240, 0x280, 0x2C0, 0x300, 0x320, 0x340, 0x360)
-
-eexpress.c:
-	io = 0x300
-	irq = 0		(IRQ value read from EEPROM)
-	(Probes ports: 0x300, 0x270, 0x320, 0x340)
-
-eql.c:
-	(No parameters)
-
-ewrk3.c:
-	io = 0x300
-	irq = 5
-	(With module no autoprobing!
-	 On EISA-bus does EISA probing.
-	 Static linkage probes ports on ISA bus:
-		0x100, 0x120, 0x140, 0x160, 0x180, 0x1A0, 0x1C0,
-		0x200, 0x220, 0x240, 0x260, 0x280, 0x2A0, 0x2C0, 0x2E0,
-		0x300,        0x340, 0x360, 0x380, 0x3A0, 0x3C0)
-
-hp-plus.c:
-	io = 0		(It will complain if you don't supply an "io=0xNNN")
-	irq = 0		(IRQ read from configuration register)
-	(Probes ports: 0x200, 0x240, 0x280, 0x2C0, 0x300, 0x320, 0x340)
-
-hp.c:
-	io = 0		(It will complain if you don't supply an "io=0xNNN")
-	irq = 0		(IRQ software selected by driver using autoIRQ)
-	(Probes ports: 0x300, 0x320, 0x340, 0x280, 0x2C0, 0x200, 0x240)
-
-hp100.c:
-	hp100_port = 0 (IO-base address)
-	(Does EISA-probing, if on EISA-slot;
-	 On ISA-bus probes all ports from 0x100 thru to 0x3E0
-	 in increments of 0x020)
-
-hydra.c:
-	Since this is a Zorro board, it supports full autoprobing, even for
-	multiple boards. (m68k/Amiga)
-
-ibmtr.c:
-	io = 0xa20, 0xa24 (autoprobed by default)
-	irq = 0 (driver cannot select irq - read from hardware)
-	mem = 0 (shared memory base set at 0xd0000 and not yet 
-	         able to override thru mem= parameter.)
-
-lance.c: *Not modularized*
-	(PCI, and ISA probing; "CONFIG_PCI" needed for PCI support)
-	(Probes ISA ports: 0x300, 0x320, 0x340, 0x360)
-
-loopback.c: *Static kernel component*
-
-ne.c:
-	io = 0		(Explicitly *requires* an "io=0xNNN" value)
-	irq = 0		(Tries to determine configured IRQ via autoIRQ)
-	(Probes ports: 0x300, 0x280, 0x320, 0x340, 0x360)
-
-net_init.c: *Static kernel component*
-
-ni52.c: *Not modularized*
-	(Probes ports:	0x300, 0x280, 0x360, 0x320, 0x340
-		mems:	0xD0000, 0xD2000, 0xC8000, 0xCA000,
-			0xD4000, 0xD6000, 0xD8000 )
-
-ni65.c: *Not modularized*  **16MB MEMORY BARRIER BUG**
-	(Probes ports:	0x300, 0x320, 0x340, 0x360)
-
-pi2.c:	*Not modularized* (well, NON-STANDARD modularization!)
-	Only one card supported at this time.
-	(Probes ports: 0x380, 0x300, 0x320, 0x340, 0x360, 0x3A0)
-
-plip.c:
-	io = 0
-	irq = 0		(by default, uses IRQ 5 for port at 0x3bc, IRQ 7
-			for port at 0x378, and IRQ 2 for port at 0x278)
-	(Probes ports: 0x278, 0x378, 0x3bc)
-
-ppp.c:
-	No options (ppp-2.2+ has some, this is based on non-dynamic
-	version from ppp-2.1.2d)
-
-seeq8005.c: *Not modularized*
-	(Probes ports: 0x300, 0x320, 0x340, 0x360)
-
-skeleton.c: *Skeleton*
-
-slhc.c:
-	No configuration parameters
-
-slip.c:
-	slip_maxdev = 256 (default value from SL_NRUNIT on slip.h)
-
-
-smc-ultra.c:
-	io = 0		(It will complain if you don't supply an "io=0xNNN")
-	irq = 0		(IRQ val. read from EEPROM)
-	(Probes ports:	0x200, 0x220, 0x240, 0x280, 0x300, 0x340, 0x380)
-
-tulip.c: *Partial modularization*
-	(init-time memory allocation makes problems..)
-
-tunnel.c:
-	No insmod parameters
-
-wavelan.c:
-	io = 0x390	(Settable, but change not recommended)
-	irq = 0		(Not honoured, if changed..)
-
-wd.c:
-	io = 0		(It will complain if you don't supply an "io=0xNNN")
-	irq = 0		(IRQ val. read from EEPROM, ancient cards use autoIRQ)
-	mem = 0		(Force shared-memory on address 0xC8000, or whatever..)
-	mem_end = 0	(Force non-std. mem. size via supplying mem_end val.)
-			(eg. for 32k WD8003EBT, use mem=0xd0000 mem_end=0xd8000)
-	(Probes ports:	0x300, 0x280, 0x380, 0x240)
-
-znet.c: *Not modularized*
-	(Only one device on  Zenith Z-Note (notebook?) systems,
-	 configuration information from (EE)PROM)
diff --git a/MAINTAINERS b/MAINTAINERS
index 76b8571..5862b78 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -2259,6 +2259,13 @@
 W:	http://legousb.sourceforge.net/
 S:	Maintained
 
+LGUEST
+P:	Rusty Russell
+M:	rusty@rustcorp.com.au
+L:	lguest@ozlabs.org
+W:	http://lguest.ozlabs.org/
+S:	Maintained
+
 LINUX FOR IBM pSERIES (RS/6000)
 P:	Paul Mackerras
 M:	paulus@au.ibm.com
diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
index d2235db..a55b090 100644
--- a/arch/x86/lguest/boot.c
+++ b/arch/x86/lguest/boot.c
@@ -56,6 +56,7 @@
 #include <linux/lguest.h>
 #include <linux/lguest_launcher.h>
 #include <linux/virtio_console.h>
+#include <linux/pm.h>
 #include <asm/paravirt.h>
 #include <asm/param.h>
 #include <asm/page.h>
@@ -98,7 +99,7 @@
  * When lazy_mode is set, it means we're allowed to defer all hypercalls and do
  * them as a batch when lazy_mode is eventually turned off.  Because hypercalls
  * are reasonably expensive, batching them up makes sense.  For example, a
- * large mmap might update dozens of page table entries: that code calls
+ * large munmap might update dozens of page table entries: that code calls
  * paravirt_enter_lazy_mmu(), does the dozen updates, then calls
  * lguest_leave_lazy_mode().
  *
@@ -163,8 +164,8 @@
 /*:*/
 
 /*G:033
- * Here are our first native-instruction replacements: four functions for
- * interrupt control.
+ * After that diversion we return to our first native-instruction
+ * replacements: four functions for interrupt control.
  *
  * The simplest way of implementing these would be to have "turn interrupts
  * off" and "turn interrupts on" hypercalls.  Unfortunately, this is too slow:
@@ -183,7 +184,7 @@
 	return lguest_data.irq_enabled;
 }
 
-/* "restore_flags" just sets the flags back to the value given. */
+/* restore_flags() just sets the flags back to the value given. */
 static void restore_fl(unsigned long flags)
 {
 	lguest_data.irq_enabled = flags;
@@ -356,7 +357,7 @@
  * it.  The Host needs to know when the Guest wants to change them, so we have
  * a whole series of functions like read_cr0() and write_cr0().
  *
- * We start with CR0.  CR0 allows you to turn on and off all kinds of basic
+ * We start with cr0.  cr0 allows you to turn on and off all kinds of basic
  * features, but Linux only really cares about one: the horrifically-named Task
  * Switched (TS) bit at bit 3 (ie. 8)
  *
@@ -371,8 +372,7 @@
 static unsigned long current_cr0, current_cr3;
 static void lguest_write_cr0(unsigned long val)
 {
-	/* 8 == TS bit. */
-	lazy_hcall(LHCALL_TS, val & 8, 0, 0);
+	lazy_hcall(LHCALL_TS, val & X86_CR0_TS, 0, 0);
 	current_cr0 = val;
 }
 
@@ -387,10 +387,10 @@
 static void lguest_clts(void)
 {
 	lazy_hcall(LHCALL_TS, 0, 0, 0);
-	current_cr0 &= ~8U;
+	current_cr0 &= ~X86_CR0_TS;
 }
 
-/* CR2 is the virtual address of the last page fault, which the Guest only ever
+/* cr2 is the virtual address of the last page fault, which the Guest only ever
  * reads.  The Host kindly writes this into our "struct lguest_data", so we
  * just read it out of there. */
 static unsigned long lguest_read_cr2(void)
@@ -398,7 +398,7 @@
 	return lguest_data.cr2;
 }
 
-/* CR3 is the current toplevel pagetable page: the principle is the same as
+/* cr3 is the current toplevel pagetable page: the principle is the same as
  * cr0.  Keep a local copy, and tell the Host when it changes. */
 static void lguest_write_cr3(unsigned long cr3)
 {
@@ -411,7 +411,7 @@
 	return current_cr3;
 }
 
-/* CR4 is used to enable and disable PGE, but we don't care. */
+/* cr4 is used to enable and disable PGE, but we don't care. */
 static unsigned long lguest_read_cr4(void)
 {
 	return 0;
@@ -432,7 +432,7 @@
  * maps virtual addresses to physical addresses using "page tables".  We could
  * use one huge index of 1 million entries: each address is 4 bytes, so that's
  * 1024 pages just to hold the page tables.   But since most virtual addresses
- * are unused, we use a two level index which saves space.  The CR3 register
+ * are unused, we use a two level index which saves space.  The cr3 register
  * contains the physical address of the top level "page directory" page, which
  * contains physical addresses of up to 1024 second-level pages.  Each of these
  * second level pages contains up to 1024 physical addresses of actual pages,
@@ -440,7 +440,7 @@
  *
  * Here's a diagram, where arrows indicate physical addresses:
  *
- * CR3 ---> +---------+
+ * cr3 ---> +---------+
  *	    |  	   --------->+---------+
  *	    |	      |	     | PADDR1  |
  *	  Top-level   |	     | PADDR2  |
@@ -498,8 +498,7 @@
  *
  * ... except in early boot when the kernel sets up the initial pagetables,
  * which makes booting astonishingly slow.  So we don't even tell the Host
- * anything changed until we've done the first page table switch.
- */
+ * anything changed until we've done the first page table switch. */
 static void lguest_set_pte(pte_t *ptep, pte_t pteval)
 {
 	*ptep = pteval;
@@ -720,10 +719,10 @@
 	/* Set up the timer interrupt (0) to go to our simple timer routine */
 	set_irq_handler(0, lguest_time_irq);
 
-	/* Our clock structure look like arch/i386/kernel/tsc.c if we can use
-	 * the TSC, otherwise it's a dumb nanosecond-resolution clock.  Either
-	 * way, the "rating" is initialized so high that it's always chosen
-	 * over any other clocksource. */
+	/* Our clock structure looks like arch/x86/kernel/tsc_32.c if we can
+	 * use the TSC, otherwise it's a dumb nanosecond-resolution clock.
+	 * Either way, the "rating" is set so high that it's always chosen over
+	 * any other clocksource. */
 	if (lguest_data.tsc_khz)
 		lguest_clock.mult = clocksource_khz2mult(lguest_data.tsc_khz,
 							 lguest_clock.shift);
@@ -749,7 +748,7 @@
  * to work.  They're pretty simple.
  */
 
-/* The Guest needs to tell the host what stack it expects traps to use.  For
+/* The Guest needs to tell the Host what stack it expects traps to use.  For
  * native hardware, this is part of the Task State Segment mentioned above in
  * lguest_load_tr_desc(), but to help hypervisors there's this special call.
  *
@@ -850,13 +849,16 @@
 	return "LGUEST";
 }
 
-/* Before virtqueues are set up, we use LHCALL_NOTIFY on normal memory to
- * produce console output. */
+/* We will eventually use the virtio console device to produce console output,
+ * but before that is set up we use LHCALL_NOTIFY on normal memory to produce
+ * console output. */
 static __init int early_put_chars(u32 vtermno, const char *buf, int count)
 {
 	char scratch[17];
 	unsigned int len = count;
 
+	/* We use a nul-terminated string, so we have to make a copy.  Icky,
+	 * huh? */
 	if (len > sizeof(scratch) - 1)
 		len = sizeof(scratch) - 1;
 	scratch[len] = '\0';
@@ -883,7 +885,7 @@
  * Our current solution is to allow the paravirt back end to optionally patch
  * over the indirect calls to replace them with something more efficient.  We
  * patch the four most commonly called functions: disable interrupts, enable
- * interrupts, restore interrupts and save interrupts.  We usually have 10
+ * interrupts, restore interrupts and save interrupts.  We usually have 6 or 10
  * bytes to patch into: the Guest versions of these operations are small enough
  * that we can fit comfortably.
  *
@@ -1015,7 +1017,7 @@
 	asm volatile ("mov %0, %%fs" : : "r" (__KERNEL_DS) : "memory");
 
 	/* The Host uses the top of the Guest's virtual address space for the
-	 * Host<->Guest Switcher, and it tells us how much it needs in
+	 * Host<->Guest Switcher, and it tells us how big that is in
 	 * lguest_data.reserve_mem, set up on the LGUEST_INIT hypercall. */
 	reserve_top_address(lguest_data.reserve_mem);
 
@@ -1065,6 +1067,6 @@
 /*
  * This marks the end of stage II of our journey, The Guest.
  *
- * It is now time for us to explore the nooks and crannies of the three Guest
- * devices and complete our understanding of the Guest in "make Drivers".
+ * It is now time for us to explore the layer of virtual drivers and complete
+ * our understanding of the Guest in "make Drivers".
  */
diff --git a/arch/x86/lguest/i386_head.S b/arch/x86/lguest/i386_head.S
index ebc6ac7..95b6fbcd 100644
--- a/arch/x86/lguest/i386_head.S
+++ b/arch/x86/lguest/i386_head.S
@@ -6,7 +6,7 @@
 #include <asm/processor-flags.h>
 
 /*G:020 This is where we begin: head.S notes that the boot header's platform
- * type field is "1" (lguest), so calls us here.  The boot header is in %esi.
+ * type field is "1" (lguest), so calls us here.
  *
  * WARNING: be very careful here!  We're running at addresses equal to physical
  * addesses (around 0), not above PAGE_OFFSET as most code expectes
@@ -17,13 +17,15 @@
  * boot. */
 .section .init.text, "ax", @progbits
 ENTRY(lguest_entry)
-	/* Make initial hypercall now, so we can set up the pagetables. */
+	/* We make the "initialization" hypercall now to tell the Host about
+	 * us, and also find out where it put our page tables. */
 	movl $LHCALL_LGUEST_INIT, %eax
 	movl $lguest_data - __PAGE_OFFSET, %edx
 	int $LGUEST_TRAP_ENTRY
 
 	/* The Host put the toplevel pagetable in lguest_data.pgdir.  The movsl
-	 * instruction uses %esi implicitly. */
+	 * instruction uses %esi implicitly as the source for the copy we'
+	 * about to do. */
 	movl lguest_data - __PAGE_OFFSET + LGUEST_DATA_pgdir, %esi
 
 	/* Copy first 32 entries of page directory to __PAGE_OFFSET entries.
diff --git a/drivers/acpi/sleep/proc.c b/drivers/acpi/sleep/proc.c
index 3839efd..1538355 100644
--- a/drivers/acpi/sleep/proc.c
+++ b/drivers/acpi/sleep/proc.c
@@ -194,6 +194,23 @@
 	return result;
 }
 
+/* Read a possibly BCD register, always return binary */
+static u32 cmos_bcd_read(int offset, int rtc_control)
+{
+	u32 val = CMOS_READ(offset);
+	if (!(rtc_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD)
+		BCD_TO_BIN(val);
+	return val;
+}
+
+/* Write binary value into possibly BCD register */
+static void cmos_bcd_write(u32 val, int offset, int rtc_control)
+{
+	if (!(rtc_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD)
+		BIN_TO_BCD(val);
+	CMOS_WRITE(val, offset);
+}
+
 static ssize_t
 acpi_system_write_alarm(struct file *file,
 			const char __user * buffer, size_t count, loff_t * ppos)
@@ -258,35 +275,18 @@
 	spin_lock_irq(&rtc_lock);
 
 	rtc_control = CMOS_READ(RTC_CONTROL);
-	if (!(rtc_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD) {
-		BIN_TO_BCD(yr);
-		BIN_TO_BCD(mo);
-		BIN_TO_BCD(day);
-		BIN_TO_BCD(hr);
-		BIN_TO_BCD(min);
-		BIN_TO_BCD(sec);
-	}
 
 	if (adjust) {
-		yr += CMOS_READ(RTC_YEAR);
-		mo += CMOS_READ(RTC_MONTH);
-		day += CMOS_READ(RTC_DAY_OF_MONTH);
-		hr += CMOS_READ(RTC_HOURS);
-		min += CMOS_READ(RTC_MINUTES);
-		sec += CMOS_READ(RTC_SECONDS);
+		yr += cmos_bcd_read(RTC_YEAR, rtc_control);
+		mo += cmos_bcd_read(RTC_MONTH, rtc_control);
+		day += cmos_bcd_read(RTC_DAY_OF_MONTH, rtc_control);
+		hr += cmos_bcd_read(RTC_HOURS, rtc_control);
+		min += cmos_bcd_read(RTC_MINUTES, rtc_control);
+		sec += cmos_bcd_read(RTC_SECONDS, rtc_control);
 	}
 
 	spin_unlock_irq(&rtc_lock);
 
-	if (!(rtc_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD) {
-		BCD_TO_BIN(yr);
-		BCD_TO_BIN(mo);
-		BCD_TO_BIN(day);
-		BCD_TO_BIN(hr);
-		BCD_TO_BIN(min);
-		BCD_TO_BIN(sec);
-	}
-
 	if (sec > 59) {
 		min++;
 		sec -= 60;
@@ -307,14 +307,6 @@
 		yr++;
 		mo -= 12;
 	}
-	if (!(rtc_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD) {
-		BIN_TO_BCD(yr);
-		BIN_TO_BCD(mo);
-		BIN_TO_BCD(day);
-		BIN_TO_BCD(hr);
-		BIN_TO_BCD(min);
-		BIN_TO_BCD(sec);
-	}
 
 	spin_lock_irq(&rtc_lock);
 	/*
@@ -326,9 +318,9 @@
 	CMOS_READ(RTC_INTR_FLAGS);
 
 	/* write the fields the rtc knows about */
-	CMOS_WRITE(hr, RTC_HOURS_ALARM);
-	CMOS_WRITE(min, RTC_MINUTES_ALARM);
-	CMOS_WRITE(sec, RTC_SECONDS_ALARM);
+	cmos_bcd_write(hr, RTC_HOURS_ALARM, rtc_control);
+	cmos_bcd_write(min, RTC_MINUTES_ALARM, rtc_control);
+	cmos_bcd_write(sec, RTC_SECONDS_ALARM, rtc_control);
 
 	/*
 	 * If the system supports an enhanced alarm it will have non-zero
@@ -336,11 +328,11 @@
 	 * to the RTC area of memory.
 	 */
 	if (acpi_gbl_FADT.day_alarm)
-		CMOS_WRITE(day, acpi_gbl_FADT.day_alarm);
+		cmos_bcd_write(day, acpi_gbl_FADT.day_alarm, rtc_control);
 	if (acpi_gbl_FADT.month_alarm)
-		CMOS_WRITE(mo, acpi_gbl_FADT.month_alarm);
+		cmos_bcd_write(mo, acpi_gbl_FADT.month_alarm, rtc_control);
 	if (acpi_gbl_FADT.century)
-		CMOS_WRITE(yr / 100, acpi_gbl_FADT.century);
+		cmos_bcd_write(yr / 100, acpi_gbl_FADT.century, rtc_control);
 	/* enable the rtc alarm interrupt */
 	rtc_control |= RTC_AIE;
 	CMOS_WRITE(rtc_control, RTC_CONTROL);
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 95229e7..49cf4cf 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -41,6 +41,7 @@
 #include <linux/interrupt.h>
 #include <linux/dma-mapping.h>
 #include <linux/device.h>
+#include <linux/dmi.h>
 #include <scsi/scsi_host.h>
 #include <scsi/scsi_cmnd.h>
 #include <linux/libata.h>
@@ -241,6 +242,7 @@
 static void ahci_pmp_detach(struct ata_port *ap);
 static void ahci_error_handler(struct ata_port *ap);
 static void ahci_vt8251_error_handler(struct ata_port *ap);
+static void ahci_p5wdh_error_handler(struct ata_port *ap);
 static void ahci_post_internal_cmd(struct ata_queued_cmd *qc);
 static int ahci_port_resume(struct ata_port *ap);
 static unsigned int ahci_fill_sg(struct ata_queued_cmd *qc, void *cmd_tbl);
@@ -339,6 +341,40 @@
 	.port_stop		= ahci_port_stop,
 };
 
+static const struct ata_port_operations ahci_p5wdh_ops = {
+	.check_status		= ahci_check_status,
+	.check_altstatus	= ahci_check_status,
+	.dev_select		= ata_noop_dev_select,
+
+	.tf_read		= ahci_tf_read,
+
+	.qc_defer		= sata_pmp_qc_defer_cmd_switch,
+	.qc_prep		= ahci_qc_prep,
+	.qc_issue		= ahci_qc_issue,
+
+	.irq_clear		= ahci_irq_clear,
+
+	.scr_read		= ahci_scr_read,
+	.scr_write		= ahci_scr_write,
+
+	.freeze			= ahci_freeze,
+	.thaw			= ahci_thaw,
+
+	.error_handler		= ahci_p5wdh_error_handler,
+	.post_internal_cmd	= ahci_post_internal_cmd,
+
+	.pmp_attach		= ahci_pmp_attach,
+	.pmp_detach		= ahci_pmp_detach,
+
+#ifdef CONFIG_PM
+	.port_suspend		= ahci_port_suspend,
+	.port_resume		= ahci_port_resume,
+#endif
+
+	.port_start		= ahci_port_start,
+	.port_stop		= ahci_port_stop,
+};
+
 #define AHCI_HFLAGS(flags)	.private_data	= (void *)(flags)
 
 static const struct ata_port_info ahci_port_info[] = {
@@ -1213,6 +1249,53 @@
 	return rc ?: -EAGAIN;
 }
 
+static int ahci_p5wdh_hardreset(struct ata_link *link, unsigned int *class,
+				unsigned long deadline)
+{
+	struct ata_port *ap = link->ap;
+	struct ahci_port_priv *pp = ap->private_data;
+	u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
+	struct ata_taskfile tf;
+	int rc;
+
+	ahci_stop_engine(ap);
+
+	/* clear D2H reception area to properly wait for D2H FIS */
+	ata_tf_init(link->device, &tf);
+	tf.command = 0x80;
+	ata_tf_to_fis(&tf, 0, 0, d2h_fis);
+
+	rc = sata_link_hardreset(link, sata_ehc_deb_timing(&link->eh_context),
+				 deadline);
+
+	ahci_start_engine(ap);
+
+	if (rc || ata_link_offline(link))
+		return rc;
+
+	/* spec mandates ">= 2ms" before checking status */
+	msleep(150);
+
+	/* The pseudo configuration device on SIMG4726 attached to
+	 * ASUS P5W-DH Deluxe doesn't send signature FIS after
+	 * hardreset if no device is attached to the first downstream
+	 * port && the pseudo device locks up on SRST w/ PMP==0.  To
+	 * work around this, wait for !BSY only briefly.  If BSY isn't
+	 * cleared, perform CLO and proceed to IDENTIFY (achieved by
+	 * ATA_LFLAG_NO_SRST and ATA_LFLAG_ASSUME_ATA).
+	 *
+	 * Wait for two seconds.  Devices attached to downstream port
+	 * which can't process the following IDENTIFY after this will
+	 * have to be reset again.  For most cases, this should
+	 * suffice while making probing snappish enough.
+	 */
+	rc = ata_wait_ready(ap, jiffies + 2 * HZ);
+	if (rc)
+		ahci_kick_engine(ap, 0);
+
+	return 0;
+}
+
 static void ahci_postreset(struct ata_link *link, unsigned int *class)
 {
 	struct ata_port *ap = link->ap;
@@ -1670,6 +1753,19 @@
 		  ahci_postreset);
 }
 
+static void ahci_p5wdh_error_handler(struct ata_port *ap)
+{
+	if (!(ap->pflags & ATA_PFLAG_FROZEN)) {
+		/* restart engine */
+		ahci_stop_engine(ap);
+		ahci_start_engine(ap);
+	}
+
+	/* perform recovery */
+	ata_do_eh(ap, ata_std_prereset, ahci_softreset, ahci_p5wdh_hardreset,
+		  ahci_postreset);
+}
+
 static void ahci_post_internal_cmd(struct ata_queued_cmd *qc)
 {
 	struct ata_port *ap = qc->ap;
@@ -1955,6 +2051,51 @@
 		);
 }
 
+/* On ASUS P5W DH Deluxe, the second port of PCI device 00:1f.2 is
+ * hardwired to on-board SIMG 4726.  The chipset is ICH8 and doesn't
+ * support PMP and the 4726 either directly exports the device
+ * attached to the first downstream port or acts as a hardware storage
+ * controller and emulate a single ATA device (can be RAID 0/1 or some
+ * other configuration).
+ *
+ * When there's no device attached to the first downstream port of the
+ * 4726, "Config Disk" appears, which is a pseudo ATA device to
+ * configure the 4726.  However, ATA emulation of the device is very
+ * lame.  It doesn't send signature D2H Reg FIS after the initial
+ * hardreset, pukes on SRST w/ PMP==0 and has bunch of other issues.
+ *
+ * The following function works around the problem by always using
+ * hardreset on the port and not depending on receiving signature FIS
+ * afterward.  If signature FIS isn't received soon, ATA class is
+ * assumed without follow-up softreset.
+ */
+static void ahci_p5wdh_workaround(struct ata_host *host)
+{
+	static struct dmi_system_id sysids[] = {
+		{
+			.ident = "P5W DH Deluxe",
+			.matches = {
+				DMI_MATCH(DMI_SYS_VENDOR,
+					  "ASUSTEK COMPUTER INC"),
+				DMI_MATCH(DMI_PRODUCT_NAME, "P5W DH Deluxe"),
+			},
+		},
+		{ }
+	};
+	struct pci_dev *pdev = to_pci_dev(host->dev);
+
+	if (pdev->bus->number == 0 && pdev->devfn == PCI_DEVFN(0x1f, 2) &&
+	    dmi_check_system(sysids)) {
+		struct ata_port *ap = host->ports[1];
+
+		dev_printk(KERN_INFO, &pdev->dev, "enabling ASUS P5W DH "
+			   "Deluxe on-board SIMG4726 workaround\n");
+
+		ap->ops = &ahci_p5wdh_ops;
+		ap->link.flags |= ATA_LFLAG_NO_SRST | ATA_LFLAG_ASSUME_ATA;
+	}
+}
+
 static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 {
 	static int printed_version;
@@ -2024,6 +2165,9 @@
 			ap->ops = &ata_dummy_port_ops;
 	}
 
+	/* apply workaround for ASUS P5W DH Deluxe mainboard */
+	ahci_p5wdh_workaround(host);
+
 	/* initialize adapter */
 	rc = ahci_configure_dma_masks(pdev, hpriv->cap & HOST_CAP_64);
 	if (rc)
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index b5f7c59..081e3df 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -68,7 +68,8 @@
 static unsigned int ata_dev_init_params(struct ata_device *dev,
 					u16 heads, u16 sectors);
 static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
-static unsigned int ata_dev_set_AN(struct ata_device *dev, u8 enable);
+static unsigned int ata_dev_set_feature(struct ata_device *dev,
+					u8 enable, u8 feature);
 static void ata_dev_xfermask(struct ata_device *dev);
 static unsigned long ata_dev_blacklisted(const struct ata_device *dev);
 
@@ -1799,13 +1800,7 @@
 		 * SET_FEATURES spin-up subcommand before it will accept
 		 * anything other than the original IDENTIFY command.
 		 */
-		ata_tf_init(dev, &tf);
-		tf.command = ATA_CMD_SET_FEATURES;
-		tf.feature = SETFEATURES_SPINUP;
-		tf.protocol = ATA_PROT_NODATA;
-		tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
-		err_mask = ata_exec_internal(dev, &tf, NULL,
-					     DMA_NONE, NULL, 0, 0);
+		err_mask = ata_dev_set_feature(dev, SETFEATURES_SPINUP, 0);
 		if (err_mask && id[2] != 0x738c) {
 			rc = -EIO;
 			reason = "SPINUP failed";
@@ -2075,7 +2070,8 @@
 			unsigned int err_mask;
 
 			/* issue SET feature command to turn this on */
-			err_mask = ata_dev_set_AN(dev, SETFEATURES_SATA_ENABLE);
+			err_mask = ata_dev_set_feature(dev,
+					SETFEATURES_SATA_ENABLE, SATA_AN);
 			if (err_mask)
 				ata_dev_printk(dev, KERN_ERR,
 					"failed to enable ATAPI AN "
@@ -2886,6 +2882,13 @@
 			dev->pio_mode <= XFER_PIO_2)
 		err_mask &= ~AC_ERR_DEV;
 
+	/* Early MWDMA devices do DMA but don't allow DMA mode setting.
+	   Don't fail an MWDMA0 set IFF the device indicates it is in MWDMA0 */
+	if (dev->xfer_shift == ATA_SHIFT_MWDMA && 
+	    dev->dma_mode == XFER_MW_DMA_0 &&
+	    (dev->id[63] >> 8) & 1)
+		err_mask &= ~AC_ERR_DEV;
+
 	if (err_mask) {
 		ata_dev_printk(dev, KERN_ERR, "failed to set xfermode "
 			       "(err_mask=0x%x)\n", err_mask);
@@ -3947,9 +3950,6 @@
 	{ "_NEC DV5800A", 	NULL,		ATA_HORKAGE_NODMA },
 	{ "SAMSUNG CD-ROM SN-124", "N001",	ATA_HORKAGE_NODMA },
 	{ "Seagate STT20000A", NULL,		ATA_HORKAGE_NODMA },
-	{ "IOMEGA  ZIP 250       ATAPI", NULL,	ATA_HORKAGE_NODMA }, /* temporary fix */
-	{ "IOMEGA  ZIP 250       ATAPI       Floppy",
-				NULL,		ATA_HORKAGE_NODMA },
 	/* Odd clown on sil3726/4726 PMPs */
 	{ "Config  Disk",	NULL,		ATA_HORKAGE_NODMA |
 						ATA_HORKAGE_SKIP_PM },
@@ -4007,7 +4007,7 @@
 	{ }
 };
 
-int strn_pattern_cmp(const char *patt, const char *name, int wildchar)
+static int strn_pattern_cmp(const char *patt, const char *name, int wildchar)
 {
 	const char *p;
 	int len;
@@ -4181,15 +4181,14 @@
 	DPRINTK("EXIT, err_mask=%x\n", err_mask);
 	return err_mask;
 }
-
 /**
- *	ata_dev_set_AN - Issue SET FEATURES - SATA FEATURES
+ *	ata_dev_set_feature - Issue SET FEATURES - SATA FEATURES
  *	@dev: Device to which command will be sent
  *	@enable: Whether to enable or disable the feature
+ *	@feature: The sector count represents the feature to set
  *
  *	Issue SET FEATURES - SATA FEATURES command to device @dev
- *	on port @ap with sector count set to indicate Asynchronous
- *	Notification feature
+ *	on port @ap with sector count
  *
  *	LOCKING:
  *	PCI/etc. bus probe sem.
@@ -4197,7 +4196,8 @@
  *	RETURNS:
  *	0 on success, AC_ERR_* mask otherwise.
  */
-static unsigned int ata_dev_set_AN(struct ata_device *dev, u8 enable)
+static unsigned int ata_dev_set_feature(struct ata_device *dev, u8 enable,
+					u8 feature)
 {
 	struct ata_taskfile tf;
 	unsigned int err_mask;
@@ -4210,7 +4210,7 @@
 	tf.feature = enable;
 	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
 	tf.protocol = ATA_PROT_NODATA;
-	tf.nsect = SATA_AN;
+	tf.nsect = feature;
 
 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
 
@@ -6921,7 +6921,7 @@
  *	LOCKING:
  *	Kernel thread context (may sleep).
  */
-void ata_port_detach(struct ata_port *ap)
+static void ata_port_detach(struct ata_port *ap)
 {
 	unsigned long flags;
 	struct ata_link *link;
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index 93e2b54..8cb35bb 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -2071,7 +2071,7 @@
 	int try = 0;
 	struct ata_device *dev;
 	unsigned long deadline;
-	unsigned int action;
+	unsigned int tmp_action;
 	ata_reset_fn_t reset;
 	unsigned long flags;
 	int rc;
@@ -2086,14 +2086,14 @@
 	/* Determine which reset to use and record in ehc->i.action.
 	 * prereset() may examine and modify it.
 	 */
-	action = ehc->i.action;
-	ehc->i.action &= ~ATA_EH_RESET_MASK;
 	if (softreset && (!hardreset || (!(link->flags & ATA_LFLAG_NO_SRST) &&
 					 !sata_set_spd_needed(link) &&
-					 !(action & ATA_EH_HARDRESET))))
-		ehc->i.action |= ATA_EH_SOFTRESET;
+					 !(ehc->i.action & ATA_EH_HARDRESET))))
+		tmp_action = ATA_EH_SOFTRESET;
 	else
-		ehc->i.action |= ATA_EH_HARDRESET;
+		tmp_action = ATA_EH_HARDRESET;
+
+	ehc->i.action = (ehc->i.action & ~ATA_EH_RESET_MASK) | tmp_action;
 
 	if (prereset) {
 		rc = prereset(link, jiffies + ATA_EH_PRERESET_TIMEOUT);
diff --git a/drivers/ata/pata_icside.c b/drivers/ata/pata_icside.c
index be30923..842fe08 100644
--- a/drivers/ata/pata_icside.c
+++ b/drivers/ata/pata_icside.c
@@ -332,12 +332,13 @@
 {
 }
 
-static void pata_icside_postreset(struct ata_port *ap, unsigned int *classes)
+static void pata_icside_postreset(struct ata_link *link, unsigned int *classes)
 {
+	struct ata_port *ap = link->ap;
 	struct pata_icside_state *state = ap->host->private_data;
 
 	if (classes[0] != ATA_DEV_NONE || classes[1] != ATA_DEV_NONE)
-		return ata_std_postreset(ap, classes);
+		return ata_std_postreset(link, classes);
 
 	state->port[ap->port_no].disabled = 1;
 
@@ -395,29 +396,30 @@
 
 static void __devinit
 pata_icside_setup_ioaddr(struct ata_port *ap, void __iomem *base,
-			 const struct portinfo *info)
+			 struct pata_icside_info *info,
+			 const struct portinfo *port)
 {
 	struct ata_ioports *ioaddr = &ap->ioaddr;
-	void __iomem *cmd = base + info->dataoffset;
+	void __iomem *cmd = base + port->dataoffset;
 
 	ioaddr->cmd_addr	= cmd;
-	ioaddr->data_addr	= cmd + (ATA_REG_DATA    << info->stepping);
-	ioaddr->error_addr	= cmd + (ATA_REG_ERR     << info->stepping);
-	ioaddr->feature_addr	= cmd + (ATA_REG_FEATURE << info->stepping);
-	ioaddr->nsect_addr	= cmd + (ATA_REG_NSECT   << info->stepping);
-	ioaddr->lbal_addr	= cmd + (ATA_REG_LBAL    << info->stepping);
-	ioaddr->lbam_addr	= cmd + (ATA_REG_LBAM    << info->stepping);
-	ioaddr->lbah_addr	= cmd + (ATA_REG_LBAH    << info->stepping);
-	ioaddr->device_addr	= cmd + (ATA_REG_DEVICE  << info->stepping);
-	ioaddr->status_addr	= cmd + (ATA_REG_STATUS  << info->stepping);
-	ioaddr->command_addr	= cmd + (ATA_REG_CMD     << info->stepping);
+	ioaddr->data_addr	= cmd + (ATA_REG_DATA    << port->stepping);
+	ioaddr->error_addr	= cmd + (ATA_REG_ERR     << port->stepping);
+	ioaddr->feature_addr	= cmd + (ATA_REG_FEATURE << port->stepping);
+	ioaddr->nsect_addr	= cmd + (ATA_REG_NSECT   << port->stepping);
+	ioaddr->lbal_addr	= cmd + (ATA_REG_LBAL    << port->stepping);
+	ioaddr->lbam_addr	= cmd + (ATA_REG_LBAM    << port->stepping);
+	ioaddr->lbah_addr	= cmd + (ATA_REG_LBAH    << port->stepping);
+	ioaddr->device_addr	= cmd + (ATA_REG_DEVICE  << port->stepping);
+	ioaddr->status_addr	= cmd + (ATA_REG_STATUS  << port->stepping);
+	ioaddr->command_addr	= cmd + (ATA_REG_CMD     << port->stepping);
 
-	ioaddr->ctl_addr	= base + info->ctrloffset;
+	ioaddr->ctl_addr	= base + port->ctrloffset;
 	ioaddr->altstatus_addr	= ioaddr->ctl_addr;
 
 	ata_port_desc(ap, "cmd 0x%lx ctl 0x%lx",
-		      info->raw_base + info->dataoffset,
-		      info->raw_base + info->ctrloffset);
+		      info->raw_base + port->dataoffset,
+		      info->raw_base + port->ctrloffset);
 
 	if (info->raw_ioc_base)
 		ata_port_desc(ap, "iocbase 0x%lx", info->raw_ioc_base);
@@ -441,7 +443,7 @@
 	info->nr_ports = 1;
 	info->port[0] = &pata_icside_portinfo_v5;
 
-	info->raw_base = ecard_resource_start(ec, ECARD_RES_MEMC);
+	info->raw_base = ecard_resource_start(info->ec, ECARD_RES_MEMC);
 
 	return 0;
 }
@@ -522,7 +524,7 @@
 		ap->flags |= ATA_FLAG_SLAVE_POSS;
 		ap->ops = &pata_icside_port_ops;
 
-		pata_icside_setup_ioaddr(ap, info->base, info->port[i]);
+		pata_icside_setup_ioaddr(ap, info->base, info, info->port[i]);
 	}
 
 	return ata_host_activate(host, ec->irq, ata_interrupt, 0,
diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c
index 2e0279f..f1b422f 100644
--- a/drivers/ata/sata_nv.c
+++ b/drivers/ata/sata_nv.c
@@ -365,9 +365,9 @@
 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2), SWNCQ },
 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA), SWNCQ },
 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2), SWNCQ },
-	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA), SWNCQ },
-	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA2), SWNCQ },
-	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA3), SWNCQ },
+	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA), GENERIC },
+	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA2), GENERIC },
+	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA3), GENERIC },
 
 	{ } /* terminate list */
 };
diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c
index 35d19ae..cb4c670 100644
--- a/drivers/lguest/core.c
+++ b/drivers/lguest/core.c
@@ -128,9 +128,12 @@
 		__free_pages(switcher_page[i], 0);
 }
 
-/*L:305
+/*H:032
  * Dealing With Guest Memory.
  *
+ * Before we go too much further into the Host, we need to grok the routines
+ * we use to deal with Guest memory.
+ *
  * When the Guest gives us (what it thinks is) a physical address, we can use
  * the normal copy_from_user() & copy_to_user() on the corresponding place in
  * the memory region allocated by the Launcher.
diff --git a/drivers/lguest/hypercalls.c b/drivers/lguest/hypercalls.c
index 9d5184c..b478aff 100644
--- a/drivers/lguest/hypercalls.c
+++ b/drivers/lguest/hypercalls.c
@@ -90,6 +90,7 @@
 		lg->pending_notify = args->arg1;
 		break;
 	default:
+		/* It should be an architecture-specific hypercall. */
 		if (lguest_arch_do_hcall(lg, args))
 			kill_guest(lg, "Bad hypercall %li\n", args->arg0);
 	}
@@ -157,7 +158,6 @@
  * Guest makes a hypercall, we end up here to set things up: */
 static void initialize(struct lguest *lg)
 {
-
 	/* You can't do anything until you're initialized.  The Guest knows the
 	 * rules, so we're unforgiving here. */
 	if (lg->hcall->arg0 != LHCALL_LGUEST_INIT) {
@@ -174,7 +174,8 @@
 	    || get_user(lg->noirq_end, &lg->lguest_data->noirq_end))
 		kill_guest(lg, "bad guest page %p", lg->lguest_data);
 
-	/* We write the current time into the Guest's data page once now. */
+	/* We write the current time into the Guest's data page once so it can
+	 * set its clock. */
 	write_timestamp(lg);
 
 	/* page_tables.c will also do some setup. */
@@ -182,8 +183,8 @@
 
 	/* This is the one case where the above accesses might have been the
 	 * first write to a Guest page.  This may have caused a copy-on-write
-	 * fault, but the Guest might be referring to the old (read-only)
-	 * page. */
+	 * fault, but the old page might be (read-only) in the Guest
+	 * pagetable. */
 	guest_pagetable_clear_all(lg);
 }
 
@@ -220,7 +221,7 @@
 		 * Normally it doesn't matter: the Guest will run again and
 		 * update the trap number before we come back here.
 		 *
-		 * However, if we are signalled or the Guest sends DMA to the
+		 * However, if we are signalled or the Guest sends I/O to the
 		 * Launcher, the run_guest() loop will exit without running the
 		 * Guest.  When it comes back it would try to re-run the
 		 * hypercall. */
diff --git a/drivers/lguest/interrupts_and_traps.c b/drivers/lguest/interrupts_and_traps.c
index 8296698..2b66f79 100644
--- a/drivers/lguest/interrupts_and_traps.c
+++ b/drivers/lguest/interrupts_and_traps.c
@@ -92,8 +92,8 @@
 
 	/* Remember that we never let the Guest actually disable interrupts, so
 	 * the "Interrupt Flag" bit is always set.  We copy that bit from the
-	 * Guest's "irq_enabled" field into the eflags word: the Guest copies
-	 * it back in "lguest_iret". */
+	 * Guest's "irq_enabled" field into the eflags word: we saw the Guest
+	 * copy it back in "lguest_iret". */
 	eflags = lg->regs->eflags;
 	if (get_user(irq_enable, &lg->lguest_data->irq_enabled) == 0
 	    && !(irq_enable & X86_EFLAGS_IF))
@@ -124,7 +124,7 @@
 			kill_guest(lg, "Disabling interrupts");
 }
 
-/*H:200
+/*H:205
  * Virtual Interrupts.
  *
  * maybe_do_interrupt() gets called before every entry to the Guest, to see if
@@ -256,19 +256,21 @@
 	 * bogus one in): if we fail here, the Guest will be killed. */
 	if (!idt_present(lg->arch.idt[num].a, lg->arch.idt[num].b))
 		return 0;
-	set_guest_interrupt(lg, lg->arch.idt[num].a, lg->arch.idt[num].b, has_err(num));
+	set_guest_interrupt(lg, lg->arch.idt[num].a, lg->arch.idt[num].b,
+			    has_err(num));
 	return 1;
 }
 
 /*H:250 Here's the hard part: returning to the Host every time a trap happens
  * and then calling deliver_trap() and re-entering the Guest is slow.
- * Particularly because Guest userspace system calls are traps (trap 128).
+ * Particularly because Guest userspace system calls are traps (usually trap
+ * 128).
  *
  * So we'd like to set up the IDT to tell the CPU to deliver traps directly
  * into the Guest.  This is possible, but the complexities cause the size of
  * this file to double!  However, 150 lines of code is worth writing for taking
  * system calls down from 1750ns to 270ns.  Plus, if lguest didn't do it, all
- * the other hypervisors would tease it.
+ * the other hypervisors would beat it up at lunchtime.
  *
  * This routine indicates if a particular trap number could be delivered
  * directly. */
@@ -331,7 +333,7 @@
  * change stacks on each context switch. */
 void guest_set_stack(struct lguest *lg, u32 seg, u32 esp, unsigned int pages)
 {
-	/* You are not allowd have a stack segment with privilege level 0: bad
+	/* You are not allowed have a stack segment with privilege level 0: bad
 	 * Guest! */
 	if ((seg & 0x3) != GUEST_PL)
 		kill_guest(lg, "bad stack segment %i", seg);
@@ -350,7 +352,7 @@
  * part of the Host: page table handling. */
 
 /*H:235 This is the routine which actually checks the Guest's IDT entry and
- * transfers it into our entry in "struct lguest": */
+ * transfers it into the entry in "struct lguest": */
 static void set_trap(struct lguest *lg, struct desc_struct *trap,
 		     unsigned int num, u32 lo, u32 hi)
 {
@@ -456,6 +458,18 @@
 	}
 }
 
+/*H:200
+ * The Guest Clock.
+ *
+ * There are two sources of virtual interrupts.  We saw one in lguest_user.c:
+ * the Launcher sending interrupts for virtual devices.  The other is the Guest
+ * timer interrupt.
+ *
+ * The Guest uses the LHCALL_SET_CLOCKEVENT hypercall to tell us how long to
+ * the next timer interrupt (in nanoseconds).  We use the high-resolution timer
+ * infrastructure to set a callback at that time.
+ *
+ * 0 means "turn off the clock". */
 void guest_set_clockevent(struct lguest *lg, unsigned long delta)
 {
 	ktime_t expires;
@@ -466,20 +480,27 @@
 		return;
 	}
 
+	/* We use wallclock time here, so the Guest might not be running for
+	 * all the time between now and the timer interrupt it asked for.  This
+	 * is almost always the right thing to do. */
 	expires = ktime_add_ns(ktime_get_real(), delta);
 	hrtimer_start(&lg->hrt, expires, HRTIMER_MODE_ABS);
 }
 
+/* This is the function called when the Guest's timer expires. */
 static enum hrtimer_restart clockdev_fn(struct hrtimer *timer)
 {
 	struct lguest *lg = container_of(timer, struct lguest, hrt);
 
+	/* Remember the first interrupt is the timer interrupt. */
 	set_bit(0, lg->irqs_pending);
+	/* If the Guest is actually stopped, we need to wake it up. */
 	if (lg->halted)
 		wake_up_process(lg->tsk);
 	return HRTIMER_NORESTART;
 }
 
+/* This sets up the timer for this Guest. */
 void init_clockdev(struct lguest *lg)
 {
 	hrtimer_init(&lg->hrt, CLOCK_REALTIME, HRTIMER_MODE_ABS);
diff --git a/drivers/lguest/lg.h b/drivers/lguest/lg.h
index d9144be..8692489 100644
--- a/drivers/lguest/lg.h
+++ b/drivers/lguest/lg.h
@@ -74,9 +74,6 @@
 	u32 pgdidx;
 	struct pgdir pgdirs[4];
 
-	/* Cached wakeup: we hold a reference to this task. */
-	struct task_struct *wake;
-
 	unsigned long noirq_start, noirq_end;
 	unsigned long pending_notify; /* pfn from LHCALL_NOTIFY */
 
@@ -103,7 +100,7 @@
 void __lgread(struct lguest *, void *, unsigned long, unsigned);
 void __lgwrite(struct lguest *, unsigned long, const void *, unsigned);
 
-/*L:306 Using memory-copy operations like that is usually inconvient, so we
+/*H:035 Using memory-copy operations like that is usually inconvient, so we
  * have the following helper macros which read and write a specific type (often
  * an unsigned long).
  *
@@ -191,7 +188,7 @@
  * Let's step aside for the moment, to study one important routine that's used
  * widely in the Host code.
  *
- * There are many cases where the Guest does something invalid, like pass crap
+ * There are many cases where the Guest can do something invalid, like pass crap
  * to a hypercall.  Since only the Guest kernel can make hypercalls, it's quite
  * acceptable to simply terminate the Guest and give the Launcher a nicely
  * formatted reason.  It's also simpler for the Guest itself, which doesn't
diff --git a/drivers/lguest/lguest_device.c b/drivers/lguest/lguest_device.c
index 71c6483..8904f72 100644
--- a/drivers/lguest/lguest_device.c
+++ b/drivers/lguest/lguest_device.c
@@ -53,7 +53,8 @@
  * Device configurations
  *
  * The configuration information for a device consists of a series of fields.
- * The device will look for these fields during setup.
+ * We don't really care what they are: the Launcher set them up, and the driver
+ * will look at them during setup.
  *
  * For us these fields come immediately after that device's descriptor in the
  * lguest_devices page.
@@ -122,8 +123,8 @@
  * The other piece of infrastructure virtio needs is a "virtqueue": a way of
  * the Guest device registering buffers for the other side to read from or
  * write into (ie. send and receive buffers).  Each device can have multiple
- * virtqueues: for example the console has one queue for sending and one for
- * receiving.
+ * virtqueues: for example the console driver uses one queue for sending and
+ * another for receiving.
  *
  * Fortunately for us, a very fast shared-memory-plus-descriptors virtqueue
  * already exists in virtio_ring.c.  We just need to connect it up.
@@ -158,7 +159,7 @@
  *
  * This is kind of an ugly duckling.  It'd be nicer to have a standard
  * representation of a virtqueue in the configuration space, but it seems that
- * everyone wants to do it differently.  The KVM guys want the Guest to
+ * everyone wants to do it differently.  The KVM coders want the Guest to
  * allocate its own pages and tell the Host where they are, but for lguest it's
  * simpler for the Host to simply tell us where the pages are.
  *
@@ -284,6 +285,8 @@
 {
 	struct lguest_device *ldev;
 
+	/* Start with zeroed memory; Linux's device layer seems to count on
+	 * it. */
 	ldev = kzalloc(sizeof(*ldev), GFP_KERNEL);
 	if (!ldev) {
 		printk(KERN_EMERG "Cannot allocate lguest dev %u\n",
diff --git a/drivers/lguest/lguest_user.c b/drivers/lguest/lguest_user.c
index ee405b38..9d716fa 100644
--- a/drivers/lguest/lguest_user.c
+++ b/drivers/lguest/lguest_user.c
@@ -8,20 +8,22 @@
 #include <linux/fs.h>
 #include "lg.h"
 
-/*L:315 To force the Guest to stop running and return to the Launcher, the
- * Waker sets writes LHREQ_BREAK and the value "1" to /dev/lguest.  The
- * Launcher then writes LHREQ_BREAK and "0" to release the Waker. */
+/*L:055 When something happens, the Waker process needs a way to stop the
+ * kernel running the Guest and return to the Launcher.  So the Waker writes
+ * LHREQ_BREAK and the value "1" to /dev/lguest to do this.  Once the Launcher
+ * has done whatever needs attention, it writes LHREQ_BREAK and "0" to release
+ * the Waker. */
 static int break_guest_out(struct lguest *lg, const unsigned long __user *input)
 {
 	unsigned long on;
 
-	/* Fetch whether they're turning break on or off.. */
+	/* Fetch whether they're turning break on or off. */
 	if (get_user(on, input) != 0)
 		return -EFAULT;
 
 	if (on) {
 		lg->break_out = 1;
-		/* Pop it out (may be running on different CPU) */
+		/* Pop it out of the Guest (may be running on different CPU) */
 		wake_up_process(lg->tsk);
 		/* Wait for them to reset it */
 		return wait_event_interruptible(lg->break_wq, !lg->break_out);
@@ -58,7 +60,7 @@
 	if (!lg)
 		return -EINVAL;
 
-	/* If you're not the task which owns the guest, go away. */
+	/* If you're not the task which owns the Guest, go away. */
 	if (current != lg->tsk)
 		return -EPERM;
 
@@ -92,8 +94,8 @@
  * base: The start of the Guest-physical memory inside the Launcher memory.
  *
  * pfnlimit: The highest (Guest-physical) page number the Guest should be
- * allowed to access.  The Launcher has to live in Guest memory, so it sets
- * this to ensure the Guest can't reach it.
+ * allowed to access.  The Guest memory lives inside the Launcher, so it sets
+ * this to ensure the Guest can only reach its own memory.
  *
  * pgdir: The (Guest-physical) address of the top of the initial Guest
  * pagetables (which are set up by the Launcher).
@@ -189,7 +191,7 @@
 }
 
 /*L:010 The first operation the Launcher does must be a write.  All writes
- * start with a 32 bit number: for the first write this must be
+ * start with an unsigned long number: for the first write this must be
  * LHREQ_INITIALIZE to set up the Guest.  After that the Launcher can use
  * writes of other values to send interrupts. */
 static ssize_t write(struct file *file, const char __user *in,
@@ -275,8 +277,7 @@
  * The Launcher is the Host userspace program which sets up, runs and services
  * the Guest.  In fact, many comments in the Drivers which refer to "the Host"
  * doing things are inaccurate: the Launcher does all the device handling for
- * the Guest.  The Guest can't tell what's done by the the Launcher and what by
- * the Host.
+ * the Guest, but the Guest can't know that.
  *
  * Just to confuse you: to the Host kernel, the Launcher *is* the Guest and we
  * shall see more of that later.
diff --git a/drivers/lguest/page_tables.c b/drivers/lguest/page_tables.c
index 2a45f06..fffabb3 100644
--- a/drivers/lguest/page_tables.c
+++ b/drivers/lguest/page_tables.c
@@ -26,7 +26,8 @@
  *
  * We use two-level page tables for the Guest.  If you're not entirely
  * comfortable with virtual addresses, physical addresses and page tables then
- * I recommend you review lguest.c's "Page Table Handling" (with diagrams!).
+ * I recommend you review arch/x86/lguest/boot.c's "Page Table Handling" (with
+ * diagrams!).
  *
  * The Guest keeps page tables, but we maintain the actual ones here: these are
  * called "shadow" page tables.  Which is a very Guest-centric name: these are
@@ -36,11 +37,11 @@
  *
  * Anyway, this is the most complicated part of the Host code.  There are seven
  * parts to this:
- *  (i) Setting up a page table entry for the Guest when it faults,
- *  (ii) Setting up the page table entry for the Guest stack,
- *  (iii) Setting up a page table entry when the Guest tells us it has changed,
+ *  (i) Looking up a page table entry when the Guest faults,
+ *  (ii) Making sure the Guest stack is mapped,
+ *  (iii) Setting up a page table entry when the Guest tells us one has changed,
  *  (iv) Switching page tables,
- *  (v) Flushing (thowing away) page tables,
+ *  (v) Flushing (throwing away) page tables,
  *  (vi) Mapping the Switcher when the Guest is about to run,
  *  (vii) Setting up the page tables initially.
  :*/
@@ -57,16 +58,15 @@
 static DEFINE_PER_CPU(pte_t *, switcher_pte_pages);
 #define switcher_pte_page(cpu) per_cpu(switcher_pte_pages, cpu)
 
-/*H:320 With our shadow and Guest types established, we need to deal with
- * them: the page table code is curly enough to need helper functions to keep
- * it clear and clean.
+/*H:320 The page table code is curly enough to need helper functions to keep it
+ * clear and clean.
  *
  * There are two functions which return pointers to the shadow (aka "real")
  * page tables.
  *
  * spgd_addr() takes the virtual address and returns a pointer to the top-level
- * page directory entry for that address.  Since we keep track of several page
- * tables, the "i" argument tells us which one we're interested in (it's
+ * page directory entry (PGD) for that address.  Since we keep track of several
+ * page tables, the "i" argument tells us which one we're interested in (it's
  * usually the current one). */
 static pgd_t *spgd_addr(struct lguest *lg, u32 i, unsigned long vaddr)
 {
@@ -81,9 +81,9 @@
 	return &lg->pgdirs[i].pgdir[index];
 }
 
-/* This routine then takes the PGD entry given above, which contains the
- * address of the PTE page.  It then returns a pointer to the PTE entry for the
- * given address. */
+/* This routine then takes the page directory entry returned above, which
+ * contains the address of the page table entry (PTE) page.  It then returns a
+ * pointer to the PTE entry for the given address. */
 static pte_t *spte_addr(struct lguest *lg, pgd_t spgd, unsigned long vaddr)
 {
 	pte_t *page = __va(pgd_pfn(spgd) << PAGE_SHIFT);
@@ -191,7 +191,7 @@
 }
 
 /*H:330
- * (i) Setting up a page table entry for the Guest when it faults
+ * (i) Looking up a page table entry when the Guest faults.
  *
  * We saw this call in run_guest(): when we see a page fault in the Guest, we
  * come here.  That's because we only set up the shadow page tables lazily as
@@ -199,7 +199,7 @@
  * and return to the Guest without it knowing.
  *
  * If we fixed up the fault (ie. we mapped the address), this routine returns
- * true. */
+ * true.  Otherwise, it was a real fault and we need to tell the Guest. */
 int demand_page(struct lguest *lg, unsigned long vaddr, int errcode)
 {
 	pgd_t gpgd;
@@ -246,16 +246,16 @@
 	if ((errcode & 2) && !(pte_flags(gpte) & _PAGE_RW))
 		return 0;
 
-	/* User access to a kernel page? (bit 3 == user access) */
+	/* User access to a kernel-only page? (bit 3 == user access) */
 	if ((errcode & 4) && !(pte_flags(gpte) & _PAGE_USER))
 		return 0;
 
 	/* Check that the Guest PTE flags are OK, and the page number is below
 	 * the pfn_limit (ie. not mapping the Launcher binary). */
 	check_gpte(lg, gpte);
+
 	/* Add the _PAGE_ACCESSED and (for a write) _PAGE_DIRTY flag */
 	gpte = pte_mkyoung(gpte);
-
 	if (errcode & 2)
 		gpte = pte_mkdirty(gpte);
 
@@ -272,23 +272,28 @@
 	else
 		/* If this is a read, don't set the "writable" bit in the page
 		 * table entry, even if the Guest says it's writable.  That way
-		 * we come back here when a write does actually ocur, so we can
-		 * update the Guest's _PAGE_DIRTY flag. */
+		 * we will come back here when a write does actually occur, so
+		 * we can update the Guest's _PAGE_DIRTY flag. */
 		*spte = gpte_to_spte(lg, pte_wrprotect(gpte), 0);
 
 	/* Finally, we write the Guest PTE entry back: we've set the
 	 * _PAGE_ACCESSED and maybe the _PAGE_DIRTY flags. */
 	lgwrite(lg, gpte_ptr, pte_t, gpte);
 
-	/* We succeeded in mapping the page! */
+	/* The fault is fixed, the page table is populated, the mapping
+	 * manipulated, the result returned and the code complete.  A small
+	 * delay and a trace of alliteration are the only indications the Guest
+	 * has that a page fault occurred at all. */
 	return 1;
 }
 
-/*H:360 (ii) Setting up the page table entry for the Guest stack.
+/*H:360
+ * (ii) Making sure the Guest stack is mapped.
  *
- * Remember pin_stack_pages() which makes sure the stack is mapped?  It could
- * simply call demand_page(), but as we've seen that logic is quite long, and
- * usually the stack pages are already mapped anyway, so it's not required.
+ * Remember that direct traps into the Guest need a mapped Guest kernel stack.
+ * pin_stack_pages() calls us here: we could simply call demand_page(), but as
+ * we've seen that logic is quite long, and usually the stack pages are already
+ * mapped, so it's overkill.
  *
  * This is a quick version which answers the question: is this virtual address
  * mapped by the shadow page tables, and is it writable? */
@@ -297,7 +302,7 @@
 	pgd_t *spgd;
 	unsigned long flags;
 
-	/* Look at the top level entry: is it present? */
+	/* Look at the current top level entry: is it present? */
 	spgd = spgd_addr(lg, lg->pgdidx, vaddr);
 	if (!(pgd_flags(*spgd) & _PAGE_PRESENT))
 		return 0;
@@ -333,15 +338,14 @@
 			release_pte(ptepage[i]);
 		/* Now we can free the page of PTEs */
 		free_page((long)ptepage);
-		/* And zero out the PGD entry we we never release it twice. */
+		/* And zero out the PGD entry so we never release it twice. */
 		*spgd = __pgd(0);
 	}
 }
 
-/*H:440 (v) Flushing (thowing away) page tables,
- *
- * We saw flush_user_mappings() called when we re-used a top-level pgdir page.
- * It simply releases every PTE page from 0 up to the kernel address. */
+/*H:445 We saw flush_user_mappings() twice: once from the flush_user_mappings()
+ * hypercall and once in new_pgdir() when we re-used a top-level pgdir page.
+ * It simply releases every PTE page from 0 up to the Guest's kernel address. */
 static void flush_user_mappings(struct lguest *lg, int idx)
 {
 	unsigned int i;
@@ -350,8 +354,10 @@
 		release_pgd(lg, lg->pgdirs[idx].pgdir + i);
 }
 
-/* The Guest also has a hypercall to do this manually: it's used when a large
- * number of mappings have been changed. */
+/*H:440 (v) Flushing (throwing away) page tables,
+ *
+ * The Guest has a hypercall to throw away the page tables: it's used when a
+ * large number of mappings have been changed. */
 void guest_pagetable_flush_user(struct lguest *lg)
 {
 	/* Drop the userspace part of the current page table. */
@@ -423,8 +429,9 @@
 
 /*H:430 (iv) Switching page tables
  *
- * This is what happens when the Guest changes page tables (ie. changes the
- * top-level pgdir).  This happens on almost every context switch. */
+ * Now we've seen all the page table setting and manipulation, let's see what
+ * what happens when the Guest changes page tables (ie. changes the top-level
+ * pgdir).  This occurs on almost every context switch. */
 void guest_new_pagetable(struct lguest *lg, unsigned long pgtable)
 {
 	int newpgdir, repin = 0;
@@ -443,7 +450,8 @@
 }
 
 /*H:470 Finally, a routine which throws away everything: all PGD entries in all
- * the shadow page tables.  This is used when we destroy the Guest. */
+ * the shadow page tables, including the Guest's kernel mappings.  This is used
+ * when we destroy the Guest. */
 static void release_all_pagetables(struct lguest *lg)
 {
 	unsigned int i, j;
@@ -458,13 +466,22 @@
 
 /* We also throw away everything when a Guest tells us it's changed a kernel
  * mapping.  Since kernel mappings are in every page table, it's easiest to
- * throw them all away.  This is amazingly slow, but thankfully rare. */
+ * throw them all away.  This traps the Guest in amber for a while as
+ * everything faults back in, but it's rare. */
 void guest_pagetable_clear_all(struct lguest *lg)
 {
 	release_all_pagetables(lg);
 	/* We need the Guest kernel stack mapped again. */
 	pin_stack_pages(lg);
 }
+/*:*/
+/*M:009 Since we throw away all mappings when a kernel mapping changes, our
+ * performance sucks for guests using highmem.  In fact, a guest with
+ * PAGE_OFFSET 0xc0000000 (the default) and more than about 700MB of RAM is
+ * usually slower than a Guest with less memory.
+ *
+ * This, of course, cannot be fixed.  It would take some kind of... well, I
+ * don't know, but the term "puissant code-fu" comes to mind. :*/
 
 /*H:420 This is the routine which actually sets the page table entry for then
  * "idx"'th shadow page table.
@@ -483,7 +500,7 @@
 static void do_set_pte(struct lguest *lg, int idx,
 		       unsigned long vaddr, pte_t gpte)
 {
-	/* Look up the matching shadow page directot entry. */
+	/* Look up the matching shadow page directory entry. */
 	pgd_t *spgd = spgd_addr(lg, idx, vaddr);
 
 	/* If the top level isn't present, there's no entry to update. */
@@ -500,7 +517,8 @@
 			*spte = gpte_to_spte(lg, gpte,
 					     pte_flags(gpte) & _PAGE_DIRTY);
 		} else
-			/* Otherwise we can demand_page() it in later. */
+			/* Otherwise kill it and we can demand_page() it in
+			 * later. */
 			*spte = __pte(0);
 	}
 }
@@ -535,7 +553,7 @@
 }
 
 /*H:400
- * (iii) Setting up a page table entry when the Guest tells us it has changed.
+ * (iii) Setting up a page table entry when the Guest tells us one has changed.
  *
  * Just like we did in interrupts_and_traps.c, it makes sense for us to deal
  * with the other side of page tables while we're here: what happens when the
@@ -612,9 +630,10 @@
 
 /*H:480 (vi) Mapping the Switcher when the Guest is about to run.
  *
- * The Switcher and the two pages for this CPU need to be available to the
+ * The Switcher and the two pages for this CPU need to be visible in the
  * Guest (and not the pages for other CPUs).  We have the appropriate PTE pages
- * for each CPU already set up, we just need to hook them in. */
+ * for each CPU already set up, we just need to hook them in now we know which
+ * Guest is about to run on this CPU. */
 void map_switcher_in_guest(struct lguest *lg, struct lguest_pages *pages)
 {
 	pte_t *switcher_pte_page = __get_cpu_var(switcher_pte_pages);
@@ -677,6 +696,18 @@
 			   __pgprot(_PAGE_PRESENT|_PAGE_ACCESSED));
 }
 
+/* We've made it through the page table code.  Perhaps our tired brains are
+ * still processing the details, or perhaps we're simply glad it's over.
+ *
+ * If nothing else, note that all this complexity in juggling shadow page
+ * tables in sync with the Guest's page tables is for one reason: for most
+ * Guests this page table dance determines how bad performance will be.  This
+ * is why Xen uses exotic direct Guest pagetable manipulation, and why both
+ * Intel and AMD have implemented shadow page table support directly into
+ * hardware.
+ *
+ * There is just one file remaining in the Host. */
+
 /*H:510 At boot or module load time, init_pagetables() allocates and populates
  * the Switcher PTE page for each CPU. */
 __init int init_pagetables(struct page **switcher_page, unsigned int pages)
diff --git a/drivers/lguest/segments.c b/drivers/lguest/segments.c
index c2434ec..9e189cb 100644
--- a/drivers/lguest/segments.c
+++ b/drivers/lguest/segments.c
@@ -12,8 +12,6 @@
 #include "lg.h"
 
 /*H:600
- * We've almost completed the Host; there's just one file to go!
- *
  * Segments & The Global Descriptor Table
  *
  * (That title sounds like a bad Nerdcore group.  Not to suggest that there are
@@ -55,7 +53,7 @@
 		|| num == GDT_ENTRY_DOUBLEFAULT_TSS);
 }
 
-/*H:610 Once the GDT has been changed, we fix the new entries up a little.  We
+/*H:630 Once the Guest gave us new GDT entries, we fix them up a little.  We
  * don't care if they're invalid: the worst that can happen is a General
  * Protection Fault in the Switcher when it restores a Guest segment register
  * which tries to use that entry.  Then we kill the Guest for causing such a
@@ -84,25 +82,33 @@
 	}
 }
 
-/* This routine is called at boot or modprobe time for each CPU to set up the
- * "constant" GDT entries for Guests running on that CPU. */
+/*H:610 Like the IDT, we never simply use the GDT the Guest gives us.  We keep
+ * a GDT for each CPU, and copy across the Guest's entries each time we want to
+ * run the Guest on that CPU.
+ *
+ * This routine is called at boot or modprobe time for each CPU to set up the
+ * constant GDT entries: the ones which are the same no matter what Guest we're
+ * running. */
 void setup_default_gdt_entries(struct lguest_ro_state *state)
 {
 	struct desc_struct *gdt = state->guest_gdt;
 	unsigned long tss = (unsigned long)&state->guest_tss;
 
-	/* The hypervisor segments are full 0-4G segments, privilege level 0 */
+	/* The Switcher segments are full 0-4G segments, privilege level 0 */
 	gdt[GDT_ENTRY_LGUEST_CS] = FULL_EXEC_SEGMENT;
 	gdt[GDT_ENTRY_LGUEST_DS] = FULL_SEGMENT;
 
-	/* The TSS segment refers to the TSS entry for this CPU, so we cannot
-	 * copy it from the Guest.  Forgive the magic flags */
+	/* The TSS segment refers to the TSS entry for this particular CPU.
+	 * Forgive the magic flags: the 0x8900 means the entry is Present, it's
+	 * privilege level 0 Available 386 TSS system segment, and the 0x67
+	 * means Saturn is eclipsed by Mercury in the twelfth house. */
 	gdt[GDT_ENTRY_TSS].a = 0x00000067 | (tss << 16);
 	gdt[GDT_ENTRY_TSS].b = 0x00008900 | (tss & 0xFF000000)
 		| ((tss >> 16) & 0x000000FF);
 }
 
-/* This routine is called before the Guest is run for the first time. */
+/* This routine sets up the initial Guest GDT for booting.  All entries start
+ * as 0 (unusable). */
 void setup_guest_gdt(struct lguest *lg)
 {
 	/* Start with full 0-4G segments... */
@@ -114,13 +120,8 @@
 	lg->arch.gdt[GDT_ENTRY_KERNEL_DS].b |= (GUEST_PL << 13);
 }
 
-/* Like the IDT, we never simply use the GDT the Guest gives us.  We set up the
- * GDTs for each CPU, then we copy across the entries each time we want to run
- * a different Guest on that CPU. */
-
-/* A partial GDT load, for the three "thead-local storage" entries.  Otherwise
- * it's just like load_guest_gdt().  So much, in fact, it would probably be
- * neater to have a single hypercall to cover both. */
+/*H:650 An optimization of copy_gdt(), for just the three "thead-local storage"
+ * entries. */
 void copy_gdt_tls(const struct lguest *lg, struct desc_struct *gdt)
 {
 	unsigned int i;
@@ -129,7 +130,9 @@
 		gdt[i] = lg->arch.gdt[i];
 }
 
-/* This is the full version */
+/*H:640 When the Guest is run on a different CPU, or the GDT entries have
+ * changed, copy_gdt() is called to copy the Guest's GDT entries across to this
+ * CPU's GDT. */
 void copy_gdt(const struct lguest *lg, struct desc_struct *gdt)
 {
 	unsigned int i;
@@ -141,7 +144,8 @@
 			gdt[i] = lg->arch.gdt[i];
 }
 
-/* This is where the Guest asks us to load a new GDT (LHCALL_LOAD_GDT). */
+/*H:620 This is where the Guest asks us to load a new GDT (LHCALL_LOAD_GDT).
+ * We copy it from the Guest and tweak the entries. */
 void load_guest_gdt(struct lguest *lg, unsigned long table, u32 num)
 {
 	/* We assume the Guest has the same number of GDT entries as the
@@ -157,16 +161,22 @@
 	lg->changed |= CHANGED_GDT;
 }
 
+/* This is the fast-track version for just changing the three TLS entries.
+ * Remember that this happens on every context switch, so it's worth
+ * optimizing.  But wouldn't it be neater to have a single hypercall to cover
+ * both cases? */
 void guest_load_tls(struct lguest *lg, unsigned long gtls)
 {
 	struct desc_struct *tls = &lg->arch.gdt[GDT_ENTRY_TLS_MIN];
 
 	__lgread(lg, tls, gtls, sizeof(*tls)*GDT_ENTRY_TLS_ENTRIES);
 	fixup_gdt_table(lg, GDT_ENTRY_TLS_MIN, GDT_ENTRY_TLS_MAX+1);
+	/* Note that just the TLS entries have changed. */
 	lg->changed |= CHANGED_GDT_TLS;
 }
+/*:*/
 
-/*
+/*H:660
  * With this, we have finished the Host.
  *
  * Five of the seven parts of our task are complete.  You have made it through
diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c
index 9eed12d..482aec2 100644
--- a/drivers/lguest/x86/core.c
+++ b/drivers/lguest/x86/core.c
@@ -63,7 +63,7 @@
 static DEFINE_PER_CPU(struct lguest *, last_guest);
 
 /*S:010
- * We are getting close to the Switcher.
+ * We approach the Switcher.
  *
  * Remember that each CPU has two pages which are visible to the Guest when it
  * runs on that CPU.  This has to contain the state for that Guest: we copy the
@@ -134,7 +134,7 @@
 	 *
 	 * The lcall also pushes the old code segment (KERNEL_CS) onto the
 	 * stack, then the address of this call.  This stack layout happens to
-	 * exactly match the stack of an interrupt... */
+	 * exactly match the stack layout created by an interrupt... */
 	asm volatile("pushf; lcall *lguest_entry"
 		     /* This is how we tell GCC that %eax ("a") and %ebx ("b")
 		      * are changed by this routine.  The "=" means output. */
@@ -151,40 +151,46 @@
 }
 /*:*/
 
+/*M:002 There are hooks in the scheduler which we can register to tell when we
+ * get kicked off the CPU (preempt_notifier_register()).  This would allow us
+ * to lazily disable SYSENTER which would regain some performance, and should
+ * also simplify copy_in_guest_info().  Note that we'd still need to restore
+ * things when we exit to Launcher userspace, but that's fairly easy.
+ *
+ * The hooks were designed for KVM, but we can also put them to good use. :*/
+
 /*H:040 This is the i386-specific code to setup and run the Guest.  Interrupts
  * are disabled: we own the CPU. */
 void lguest_arch_run_guest(struct lguest *lg)
 {
-	/* Remember the awfully-named TS bit?  If the Guest has asked
-	 * to set it we set it now, so we can trap and pass that trap
-	 * to the Guest if it uses the FPU. */
+	/* Remember the awfully-named TS bit?  If the Guest has asked to set it
+	 * we set it now, so we can trap and pass that trap to the Guest if it
+	 * uses the FPU. */
 	if (lg->ts)
 		lguest_set_ts();
 
-	/* SYSENTER is an optimized way of doing system calls.  We
-	 * can't allow it because it always jumps to privilege level 0.
-	 * A normal Guest won't try it because we don't advertise it in
-	 * CPUID, but a malicious Guest (or malicious Guest userspace
-	 * program) could, so we tell the CPU to disable it before
-	 * running the Guest. */
+	/* SYSENTER is an optimized way of doing system calls.  We can't allow
+	 * it because it always jumps to privilege level 0.  A normal Guest
+	 * won't try it because we don't advertise it in CPUID, but a malicious
+	 * Guest (or malicious Guest userspace program) could, so we tell the
+	 * CPU to disable it before running the Guest. */
 	if (boot_cpu_has(X86_FEATURE_SEP))
 		wrmsr(MSR_IA32_SYSENTER_CS, 0, 0);
 
-	/* Now we actually run the Guest.  It will pop back out when
-	 * something interesting happens, and we can examine its
-	 * registers to see what it was doing. */
+	/* Now we actually run the Guest.  It will return when something
+	 * interesting happens, and we can examine its registers to see what it
+	 * was doing. */
 	run_guest_once(lg, lguest_pages(raw_smp_processor_id()));
 
-	/* The "regs" pointer contains two extra entries which are not
-	 * really registers: a trap number which says what interrupt or
-	 * trap made the switcher code come back, and an error code
-	 * which some traps set.  */
+	/* Note that the "regs" pointer contains two extra entries which are
+	 * not really registers: a trap number which says what interrupt or
+	 * trap made the switcher code come back, and an error code which some
+	 * traps set.  */
 
-	/* If the Guest page faulted, then the cr2 register will tell
-	 * us the bad virtual address.  We have to grab this now,
-	 * because once we re-enable interrupts an interrupt could
-	 * fault and thus overwrite cr2, or we could even move off to a
-	 * different CPU. */
+	/* If the Guest page faulted, then the cr2 register will tell us the
+	 * bad virtual address.  We have to grab this now, because once we
+	 * re-enable interrupts an interrupt could fault and thus overwrite
+	 * cr2, or we could even move off to a different CPU. */
 	if (lg->regs->trapnum == 14)
 		lg->arch.last_pagefault = read_cr2();
 	/* Similarly, if we took a trap because the Guest used the FPU,
@@ -197,14 +203,15 @@
 		wrmsr(MSR_IA32_SYSENTER_CS, __KERNEL_CS, 0);
 }
 
-/*H:130 Our Guest is usually so well behaved; it never tries to do things it
- * isn't allowed to.  Unfortunately, Linux's paravirtual infrastructure isn't
- * quite complete, because it doesn't contain replacements for the Intel I/O
- * instructions.  As a result, the Guest sometimes fumbles across one during
- * the boot process as it probes for various things which are usually attached
- * to a PC.
+/*H:130 Now we've examined the hypercall code; our Guest can make requests.
+ * Our Guest is usually so well behaved; it never tries to do things it isn't
+ * allowed to, and uses hypercalls instead.  Unfortunately, Linux's paravirtual
+ * infrastructure isn't quite complete, because it doesn't contain replacements
+ * for the Intel I/O instructions.  As a result, the Guest sometimes fumbles
+ * across one during the boot process as it probes for various things which are
+ * usually attached to a PC.
  *
- * When the Guest uses one of these instructions, we get trap #13 (General
+ * When the Guest uses one of these instructions, we get a trap (General
  * Protection Fault) and come here.  We see if it's one of those troublesome
  * instructions and skip over it.  We return true if we did. */
 static int emulate_insn(struct lguest *lg)
@@ -275,43 +282,43 @@
 void lguest_arch_handle_trap(struct lguest *lg)
 {
 	switch (lg->regs->trapnum) {
-	case 13: /* We've intercepted a GPF. */
-		 /* Check if this was one of those annoying IN or OUT
-		  * instructions which we need to emulate.  If so, we
-		  * just go back into the Guest after we've done it. */
+	case 13: /* We've intercepted a General Protection Fault. */
+		/* Check if this was one of those annoying IN or OUT
+		 * instructions which we need to emulate.  If so, we just go
+		 * back into the Guest after we've done it. */
 		if (lg->regs->errcode == 0) {
 			if (emulate_insn(lg))
 				return;
 		}
 		break;
-	case 14: /* We've intercepted a page fault. */
-		 /* The Guest accessed a virtual address that wasn't
-		  * mapped.  This happens a lot: we don't actually set
-		  * up most of the page tables for the Guest at all when
-		  * we start: as it runs it asks for more and more, and
-		  * we set them up as required. In this case, we don't
-		  * even tell the Guest that the fault happened.
-		  *
-		  * The errcode tells whether this was a read or a
-		  * write, and whether kernel or userspace code. */
+	case 14: /* We've intercepted a Page Fault. */
+		/* The Guest accessed a virtual address that wasn't mapped.
+		 * This happens a lot: we don't actually set up most of the
+		 * page tables for the Guest at all when we start: as it runs
+		 * it asks for more and more, and we set them up as
+		 * required. In this case, we don't even tell the Guest that
+		 * the fault happened.
+		 *
+		 * The errcode tells whether this was a read or a write, and
+		 * whether kernel or userspace code. */
 		if (demand_page(lg, lg->arch.last_pagefault, lg->regs->errcode))
 			return;
 
-		 /* OK, it's really not there (or not OK): the Guest
-		  * needs to know.  We write out the cr2 value so it
-		  * knows where the fault occurred.
-		  *
-		  * Note that if the Guest were really messed up, this
-		  * could happen before it's done the INITIALIZE
-		  * hypercall, so lg->lguest_data will be NULL */
+		/* OK, it's really not there (or not OK): the Guest needs to
+		 * know.  We write out the cr2 value so it knows where the
+		 * fault occurred.
+		 *
+		 * Note that if the Guest were really messed up, this could
+		 * happen before it's done the LHCALL_LGUEST_INIT hypercall, so
+		 * lg->lguest_data could be NULL */
 		if (lg->lguest_data &&
 		    put_user(lg->arch.last_pagefault, &lg->lguest_data->cr2))
 			kill_guest(lg, "Writing cr2");
 		break;
 	case 7: /* We've intercepted a Device Not Available fault. */
-		/* If the Guest doesn't want to know, we already
-		 * restored the Floating Point Unit, so we just
-		 * continue without telling it. */
+		/* If the Guest doesn't want to know, we already restored the
+		 * Floating Point Unit, so we just continue without telling
+		 * it. */
 		if (!lg->ts)
 			return;
 		break;
@@ -536,9 +543,6 @@
 
 	return 0;
 }
-/* Now we've examined the hypercall code; our Guest can make requests.  There
- * is one other way we can do things for the Guest, as we see in
- * emulate_insn(). :*/
 
 /*L:030 lguest_arch_setup_regs()
  *
@@ -562,7 +566,7 @@
 	 * is supposed to always be "1".  Bit 9 (0x200) controls whether
 	 * interrupts are enabled.  We always leave interrupts enabled while
 	 * running the Guest. */
-	regs->eflags = 0x202;
+	regs->eflags = X86_EFLAGS_IF | 0x2;
 
 	/* The "Extended Instruction Pointer" register says where the Guest is
 	 * running. */
@@ -570,8 +574,8 @@
 
 	/* %esi points to our boot information, at physical address 0, so don't
 	 * touch it. */
+
 	/* There are a couple of GDT entries the Guest expects when first
 	 * booting. */
-
 	setup_guest_gdt(lg);
 }
diff --git a/drivers/lguest/x86/switcher_32.S b/drivers/lguest/x86/switcher_32.S
index 1010b90..0af8baa 100644
--- a/drivers/lguest/x86/switcher_32.S
+++ b/drivers/lguest/x86/switcher_32.S
@@ -6,6 +6,37 @@
  * are feeling invigorated and refreshed then the next, more challenging stage
  * can be found in "make Guest". :*/
 
+/*M:012 Lguest is meant to be simple: my rule of thumb is that 1% more LOC must
+ * gain at least 1% more performance.  Since neither LOC nor performance can be
+ * measured beforehand, it generally means implementing a feature then deciding
+ * if it's worth it.  And once it's implemented, who can say no?
+ *
+ * This is why I haven't implemented this idea myself.  I want to, but I
+ * haven't.  You could, though.
+ *
+ * The main place where lguest performance sucks is Guest page faulting.  When
+ * a Guest userspace process hits an unmapped page we switch back to the Host,
+ * walk the page tables, find it's not mapped, switch back to the Guest page
+ * fault handler, which calls a hypercall to set the page table entry, then
+ * finally returns to userspace.  That's two round-trips.
+ *
+ * If we had a small walker in the Switcher, we could quickly check the Guest
+ * page table and if the page isn't mapped, immediately reflect the fault back
+ * into the Guest.  This means the Switcher would have to know the top of the
+ * Guest page table and the page fault handler address.
+ *
+ * For simplicity, the Guest should only handle the case where the privilege
+ * level of the fault is 3 and probably only not present or write faults.  It
+ * should also detect recursive faults, and hand the original fault to the
+ * Host (which is actually really easy).
+ *
+ * Two questions remain.  Would the performance gain outweigh the complexity?
+ * And who would write the verse documenting it? :*/
+
+/*M:011 Lguest64 handles NMI.  This gave me NMI envy (until I looked at their
+ * code).  It's worth doing though, since it would let us use oprofile in the
+ * Host when a Guest is running. :*/
+
 /*S:100
  * Welcome to the Switcher itself!
  *
@@ -88,7 +119,7 @@
 
 	// All saved and there's now five steps before us:
 	// Stack, GDT, IDT, TSS
-	// And last of all the page tables are flipped.
+	// Then last of all the page tables are flipped.
 
 	// Yet beware that our stack pointer must be
 	// Always valid lest an NMI hits
@@ -103,25 +134,25 @@
 	lgdt	LGUEST_PAGES_guest_gdt_desc(%eax)
 
 	// The Guest's IDT we did partially
-	// Move to the "struct lguest_pages" as well.
+	// Copy to "struct lguest_pages" as well.
 	lidt	LGUEST_PAGES_guest_idt_desc(%eax)
 
 	// The TSS entry which controls traps
 	// Must be loaded up with "ltr" now:
+	// The GDT entry that TSS uses 
+	// Changes type when we load it: damn Intel!
 	// For after we switch over our page tables
-	// It (as the rest) will be writable no more.
-	// (The GDT entry TSS needs
-	// Changes type when we load it: damn Intel!)
+	// That entry will be read-only: we'd crash.
 	movl	$(GDT_ENTRY_TSS*8), %edx
 	ltr	%dx
 
 	// Look back now, before we take this last step!
 	// The Host's TSS entry was also marked used;
-	// Let's clear it again, ere we return.
+	// Let's clear it again for our return.
 	// The GDT descriptor of the Host
 	// Points to the table after two "size" bytes
 	movl	(LGUEST_PAGES_host_gdt_desc+2)(%eax), %edx
-	// Clear the type field of "used" (byte 5, bit 2)
+	// Clear "used" from type field (byte 5, bit 2)
 	andb	$0xFD, (GDT_ENTRY_TSS*8 + 5)(%edx)
 
 	// Once our page table's switched, the Guest is live!
@@ -131,7 +162,7 @@
 
 	// The page table change did one tricky thing:
 	// The Guest's register page has been mapped
-	// Writable onto our %esp (stack) --
+	// Writable under our %esp (stack) --
 	// We can simply pop off all Guest regs.
 	popl	%eax
 	popl	%ebx
@@ -152,16 +183,15 @@
 	addl	$8, %esp
 
 	// The last five stack slots hold return address
-	// And everything needed to change privilege
-	// Into the Guest privilege level of 1,
+	// And everything needed to switch privilege
+	// From Switcher's level 0 to Guest's 1,
 	// And the stack where the Guest had last left it.
 	// Interrupts are turned back on: we are Guest.
 	iret
 
-// There are two paths where we switch to the Host
+// We treat two paths to switch back to the Host
+// Yet both must save Guest state and restore Host
 // So we put the routine in a macro.
-// We are on our way home, back to the Host
-// Interrupted out of the Guest, we come here.
 #define SWITCH_TO_HOST							\
 	/* We save the Guest state: all registers first			\
 	 * Laid out just as "struct lguest_regs" defines */		\
@@ -194,7 +224,7 @@
 	movl	%esp, %eax;						\
 	andl	$(~(1 << PAGE_SHIFT - 1)), %eax;			\
 	/* Save our trap number: the switch will obscure it		\
-	 * (The Guest regs are not mapped here in the Host)		\
+	 * (In the Host the Guest regs are not mapped here)		\
 	 * %ebx holds it safe for deliver_to_host */			\
 	movl	LGUEST_PAGES_regs_trapnum(%eax), %ebx;			\
 	/* The Host GDT, IDT and stack!					\
@@ -210,9 +240,9 @@
 	/* Switch to Host's GDT, IDT. */				\
 	lgdt	LGUEST_PAGES_host_gdt_desc(%eax);			\
 	lidt	LGUEST_PAGES_host_idt_desc(%eax);			\
-	/* Restore the Host's stack where it's saved regs lie */	\
+	/* Restore the Host's stack where its saved regs lie */		\
 	movl	LGUEST_PAGES_host_sp(%eax), %esp;			\
-	/* Last the TSS: our Host is complete */			\
+	/* Last the TSS: our Host is returned */			\
 	movl	$(GDT_ENTRY_TSS*8), %edx;				\
 	ltr	%dx;							\
 	/* Restore now the regs saved right at the first. */		\
@@ -222,14 +252,15 @@
 	popl	%ds;							\
 	popl	%es
 
-// Here's where we come when the Guest has just trapped:
-// (Which trap we'll see has been pushed on the stack).
+// The first path is trod when the Guest has trapped:
+// (Which trap it was has been pushed on the stack).
 // We need only switch back, and the Host will decode
 // Why we came home, and what needs to be done.
 return_to_host:
 	SWITCH_TO_HOST
 	iret
 
+// We are lead to the second path like so:
 // An interrupt, with some cause external
 // Has ajerked us rudely from the Guest's code
 // Again we must return home to the Host
@@ -238,7 +269,7 @@
 	// But now we must go home via that place
 	// Where that interrupt was supposed to go
 	// Had we not been ensconced, running the Guest.
-	// Here we see the cleverness of our stack:
+	// Here we see the trickness of run_guest_once():
 	// The Host stack is formed like an interrupt
 	// With EIP, CS and EFLAGS layered.
 	// Interrupt handlers end with "iret"
@@ -263,7 +294,7 @@
 	xorw	%ax, %ax
 	orl	%eax, %edx
 	// Now the address of the handler's in %edx
-	// We call it now: its "iret" takes us home.
+	// We call it now: its "iret" drops us home.
 	jmp	*%edx
 
 // Every interrupt can come to us here
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 6909bec..6937ef0 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -188,6 +188,7 @@
 /*-------------------------- Forward declarations ---------------------------*/
 
 static void bond_send_gratuitous_arp(struct bonding *bond);
+static void bond_deinit(struct net_device *bond_dev);
 
 /*---------------------------- General routines -----------------------------*/
 
@@ -3681,7 +3682,7 @@
 	}
 
 	if (bond->params.mode == BOND_MODE_8023AD) {
-		INIT_DELAYED_WORK(&bond->ad_work, bond_alb_monitor);
+		INIT_DELAYED_WORK(&bond->ad_work, bond_3ad_state_machine_handler);
 		queue_delayed_work(bond->wq, &bond->ad_work, 0);
 		/* register to receive LACPDUs */
 		bond_register_lacpdu(bond);
@@ -4449,7 +4450,7 @@
 /* De-initialize device specific data.
  * Caller must hold rtnl_lock.
  */
-void bond_deinit(struct net_device *bond_dev)
+static void bond_deinit(struct net_device *bond_dev)
 {
 	struct bonding *bond = bond_dev->priv;
 
diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h
index d1ed14b..61c1b45 100644
--- a/drivers/net/bonding/bonding.h
+++ b/drivers/net/bonding/bonding.h
@@ -302,7 +302,6 @@
 int bond_create(char *name, struct bond_params *params, struct bonding **newbond);
 void bond_destroy(struct bonding *bond);
 int  bond_release_and_destroy(struct net_device *bond_dev, struct net_device *slave_dev);
-void bond_deinit(struct net_device *bond_dev);
 int bond_create_sysfs(void);
 void bond_destroy_sysfs(void);
 void bond_destroy_sysfs_entry(struct bonding *bond);
diff --git a/drivers/net/cpmac.c b/drivers/net/cpmac.c
index 57541d2..6fd95a2 100644
--- a/drivers/net/cpmac.c
+++ b/drivers/net/cpmac.c
@@ -34,6 +34,7 @@
 #include <linux/skbuff.h>
 #include <linux/mii.h>
 #include <linux/phy.h>
+#include <linux/phy_fixed.h>
 #include <linux/platform_device.h>
 #include <linux/dma-mapping.h>
 #include <asm/gpio.h>
@@ -53,12 +54,6 @@
 MODULE_PARM_DESC(dumb_switch, "Assume switch is not connected to MDIO bus");
 
 #define CPMAC_VERSION "0.5.0"
-/* stolen from net/ieee80211.h */
-#ifndef MAC_FMT
-#define MAC_FMT "%02x:%02x:%02x:%02x:%02x:%02x"
-#define MAC_ARG(x) ((u8*)(x))[0], ((u8*)(x))[1], ((u8*)(x))[2], \
-		   ((u8*)(x))[3], ((u8*)(x))[4], ((u8*)(x))[5]
-#endif
 /* frame size + 802.1q tag */
 #define CPMAC_SKB_SIZE		(ETH_FRAME_LEN + 4)
 #define CPMAC_QUEUES	8
@@ -211,6 +206,7 @@
 	struct net_device *dev;
 	struct work_struct reset_work;
 	struct platform_device *pdev;
+	struct napi_struct napi;
 };
 
 static irqreturn_t cpmac_irq(int, void *);
@@ -362,47 +358,48 @@
 	}
 }
 
-static struct sk_buff *cpmac_rx_one(struct net_device *dev,
-				    struct cpmac_priv *priv,
+static struct sk_buff *cpmac_rx_one(struct cpmac_priv *priv,
 				    struct cpmac_desc *desc)
 {
 	struct sk_buff *skb, *result = NULL;
 
 	if (unlikely(netif_msg_hw(priv)))
-		cpmac_dump_desc(dev, desc);
+		cpmac_dump_desc(priv->dev, desc);
 	cpmac_write(priv->regs, CPMAC_RX_ACK(0), (u32)desc->mapping);
 	if (unlikely(!desc->datalen)) {
 		if (netif_msg_rx_err(priv) && net_ratelimit())
 			printk(KERN_WARNING "%s: rx: spurious interrupt\n",
-			       dev->name);
+			       priv->dev->name);
 		return NULL;
 	}
 
-	skb = netdev_alloc_skb(dev, CPMAC_SKB_SIZE);
+	skb = netdev_alloc_skb(priv->dev, CPMAC_SKB_SIZE);
 	if (likely(skb)) {
 		skb_reserve(skb, 2);
 		skb_put(desc->skb, desc->datalen);
-		desc->skb->protocol = eth_type_trans(desc->skb, dev);
+		desc->skb->protocol = eth_type_trans(desc->skb, priv->dev);
 		desc->skb->ip_summed = CHECKSUM_NONE;
-		dev->stats.rx_packets++;
-		dev->stats.rx_bytes += desc->datalen;
+		priv->dev->stats.rx_packets++;
+		priv->dev->stats.rx_bytes += desc->datalen;
 		result = desc->skb;
-		dma_unmap_single(&dev->dev, desc->data_mapping, CPMAC_SKB_SIZE,
-				 DMA_FROM_DEVICE);
+		dma_unmap_single(&priv->dev->dev, desc->data_mapping,
+				 CPMAC_SKB_SIZE, DMA_FROM_DEVICE);
 		desc->skb = skb;
-		desc->data_mapping = dma_map_single(&dev->dev, skb->data,
+		desc->data_mapping = dma_map_single(&priv->dev->dev, skb->data,
 						    CPMAC_SKB_SIZE,
 						    DMA_FROM_DEVICE);
 		desc->hw_data = (u32)desc->data_mapping;
 		if (unlikely(netif_msg_pktdata(priv))) {
-			printk(KERN_DEBUG "%s: received packet:\n", dev->name);
-			cpmac_dump_skb(dev, result);
+			printk(KERN_DEBUG "%s: received packet:\n",
+			       priv->dev->name);
+			cpmac_dump_skb(priv->dev, result);
 		}
 	} else {
 		if (netif_msg_rx_err(priv) && net_ratelimit())
 			printk(KERN_WARNING
-			       "%s: low on skbs, dropping packet\n", dev->name);
-		dev->stats.rx_dropped++;
+			       "%s: low on skbs, dropping packet\n",
+			       priv->dev->name);
+		priv->dev->stats.rx_dropped++;
 	}
 
 	desc->buflen = CPMAC_SKB_SIZE;
@@ -411,25 +408,25 @@
 	return result;
 }
 
-static int cpmac_poll(struct net_device *dev, int *budget)
+static int cpmac_poll(struct napi_struct *napi, int budget)
 {
 	struct sk_buff *skb;
 	struct cpmac_desc *desc;
-	int received = 0, quota = min(dev->quota, *budget);
-	struct cpmac_priv *priv = netdev_priv(dev);
+	int received = 0;
+	struct cpmac_priv *priv = container_of(napi, struct cpmac_priv, napi);
 
 	spin_lock(&priv->rx_lock);
 	if (unlikely(!priv->rx_head)) {
 		if (netif_msg_rx_err(priv) && net_ratelimit())
 			printk(KERN_WARNING "%s: rx: polling, but no queue\n",
-			       dev->name);
-		netif_rx_complete(dev);
+			       priv->dev->name);
+		netif_rx_complete(priv->dev, napi);
 		return 0;
 	}
 
 	desc = priv->rx_head;
-	while ((received < quota) && ((desc->dataflags & CPMAC_OWN) == 0)) {
-		skb = cpmac_rx_one(dev, priv, desc);
+	while (((desc->dataflags & CPMAC_OWN) == 0) && (received < budget)) {
+		skb = cpmac_rx_one(priv, desc);
 		if (likely(skb)) {
 			netif_receive_skb(skb);
 			received++;
@@ -439,13 +436,11 @@
 
 	priv->rx_head = desc;
 	spin_unlock(&priv->rx_lock);
-	*budget -= received;
-	dev->quota -= received;
 	if (unlikely(netif_msg_rx_status(priv)))
-		printk(KERN_DEBUG "%s: poll processed %d packets\n", dev->name,
-		       received);
+		printk(KERN_DEBUG "%s: poll processed %d packets\n",
+		       priv->dev->name, received);
 	if (desc->dataflags & CPMAC_OWN) {
-		netif_rx_complete(dev);
+		netif_rx_complete(priv->dev, napi);
 		cpmac_write(priv->regs, CPMAC_RX_PTR(0), (u32)desc->mapping);
 		cpmac_write(priv->regs, CPMAC_RX_INT_ENABLE, 1);
 		return 0;
@@ -655,6 +650,7 @@
 	spin_unlock(&priv->rx_lock);
 	cpmac_clear_tx(priv->dev);
 	cpmac_hw_start(priv->dev);
+	napi_enable(&priv->napi);
 	netif_start_queue(priv->dev);
 }
 
@@ -681,8 +677,10 @@
 
 	if (status & MAC_INT_RX) {
 		queue = (status >> 8) & 7;
-		netif_rx_schedule(dev);
-		cpmac_write(priv->regs, CPMAC_RX_INT_CLEAR, 1 << queue);
+		if (netif_rx_schedule_prep(dev, &priv->napi)) {
+			cpmac_write(priv->regs, CPMAC_RX_INT_CLEAR, 1 << queue);
+			__netif_rx_schedule(dev, &priv->napi);
+		}
 	}
 
 	cpmac_write(priv->regs, CPMAC_MAC_EOI_VECTOR, 0);
@@ -692,6 +690,7 @@
 			printk(KERN_ERR "%s: hw error, resetting...\n",
 			       dev->name);
 		netif_stop_queue(dev);
+		napi_disable(&priv->napi);
 		cpmac_hw_stop(dev);
 		schedule_work(&priv->reset_work);
 		if (unlikely(netif_msg_hw(priv)))
@@ -849,6 +848,15 @@
 	spin_unlock(&priv->lock);
 }
 
+static int cpmac_link_update(struct net_device *dev,
+			     struct fixed_phy_status *status)
+{
+	status->link = 1;
+	status->speed = 100;
+	status->duplex = 1;
+	return 0;
+}
+
 static int cpmac_open(struct net_device *dev)
 {
 	int i, size, res;
@@ -857,15 +865,6 @@
 	struct cpmac_desc *desc;
 	struct sk_buff *skb;
 
-	priv->phy = phy_connect(dev, priv->phy_name, &cpmac_adjust_link,
-				0, PHY_INTERFACE_MODE_MII);
-	if (IS_ERR(priv->phy)) {
-		if (netif_msg_drv(priv))
-			printk(KERN_ERR "%s: Could not attach to PHY\n",
-			       dev->name);
-		return PTR_ERR(priv->phy);
-	}
-
 	mem = platform_get_resource_byname(priv->pdev, IORESOURCE_MEM, "regs");
 	if (!request_mem_region(mem->start, mem->end - mem->start, dev->name)) {
 		if (netif_msg_drv(priv))
@@ -927,6 +926,7 @@
 	INIT_WORK(&priv->reset_work, cpmac_hw_error);
 	cpmac_hw_start(dev);
 
+	napi_enable(&priv->napi);
 	priv->phy->state = PHY_CHANGELINK;
 	phy_start(priv->phy);
 
@@ -951,8 +951,6 @@
 	release_mem_region(mem->start, mem->end - mem->start);
 
 fail_reserve:
-	phy_disconnect(priv->phy);
-
 	return res;
 }
 
@@ -965,9 +963,8 @@
 	netif_stop_queue(dev);
 
 	cancel_work_sync(&priv->reset_work);
+	napi_disable(&priv->napi);
 	phy_stop(priv->phy);
-	phy_disconnect(priv->phy);
-	priv->phy = NULL;
 
 	cpmac_hw_stop(dev);
 
@@ -1001,11 +998,13 @@
 
 static int __devinit cpmac_probe(struct platform_device *pdev)
 {
-	int rc, phy_id;
+	int rc, phy_id, i;
 	struct resource *mem;
 	struct cpmac_priv *priv;
 	struct net_device *dev;
 	struct plat_cpmac_data *pdata;
+	struct fixed_info *fixed_phy;
+	DECLARE_MAC_BUF(mac);
 
 	pdata = pdev->dev.platform_data;
 
@@ -1053,21 +1052,51 @@
 	dev->set_multicast_list = cpmac_set_multicast_list;
 	dev->tx_timeout         = cpmac_tx_timeout;
 	dev->ethtool_ops        = &cpmac_ethtool_ops;
-	dev->poll = cpmac_poll;
-	dev->weight = 64;
 	dev->features |= NETIF_F_MULTI_QUEUE;
 
+	netif_napi_add(dev, &priv->napi, cpmac_poll, 64);
+
 	spin_lock_init(&priv->lock);
 	spin_lock_init(&priv->rx_lock);
 	priv->dev = dev;
 	priv->ring_size = 64;
 	priv->msg_enable = netif_msg_init(debug_level, 0xff);
 	memcpy(dev->dev_addr, pdata->dev_addr, sizeof(dev->dev_addr));
+
 	if (phy_id == 31) {
-		snprintf(priv->phy_name, BUS_ID_SIZE, PHY_ID_FMT,
-			 cpmac_mii.id, phy_id);
-	} else
-		snprintf(priv->phy_name, BUS_ID_SIZE, "fixed@%d:%d", 100, 1);
+		snprintf(priv->phy_name, BUS_ID_SIZE, PHY_ID_FMT, cpmac_mii.id,
+			 phy_id);
+	} else {
+		/* Let's try to get a free fixed phy... */
+		for (i = 0; i < MAX_PHY_AMNT; i++) {
+			fixed_phy = fixed_mdio_get_phydev(i);
+			if (!fixed_phy)
+				continue;
+			if (!fixed_phy->phydev->attached_dev) {
+				strncpy(priv->phy_name,
+					fixed_phy->phydev->dev.bus_id,
+					BUS_ID_SIZE);
+				fixed_mdio_set_link_update(fixed_phy->phydev,
+							   &cpmac_link_update);
+				goto phy_found;
+			}
+		}
+		if (netif_msg_drv(priv))
+			printk(KERN_ERR "%s: Could not find fixed PHY\n",
+			       dev->name);
+		rc = -ENODEV;
+		goto fail;
+	}
+
+phy_found:
+	priv->phy = phy_connect(dev, priv->phy_name, &cpmac_adjust_link, 0,
+				PHY_INTERFACE_MODE_MII);
+	if (IS_ERR(priv->phy)) {
+		if (netif_msg_drv(priv))
+			printk(KERN_ERR "%s: Could not attach to PHY\n",
+			       dev->name);
+		return PTR_ERR(priv->phy);
+	}
 
 	if ((rc = register_netdev(dev))) {
 		printk(KERN_ERR "cpmac: error %i registering device %s\n", rc,
@@ -1077,9 +1106,9 @@
 
 	if (netif_msg_probe(priv)) {
 		printk(KERN_INFO
-		       "cpmac: device %s (regs: %p, irq: %d, phy: %s, mac: "
-		       MAC_FMT ")\n", dev->name, (void *)mem->start, dev->irq,
-		       priv->phy_name, MAC_ARG(dev->dev_addr));
+		       "cpmac: device %s (regs: %p, irq: %d, phy: %s, "
+		       "mac: %s)\n", dev->name, (void *)mem->start, dev->irq,
+		       priv->phy_name, print_mac(mac, dev->dev_addr));
 	}
 	return 0;
 
diff --git a/drivers/net/ehea/ehea.h b/drivers/net/ehea/ehea.h
index b557bb4..4b4b74e 100644
--- a/drivers/net/ehea/ehea.h
+++ b/drivers/net/ehea/ehea.h
@@ -40,7 +40,7 @@
 #include <asm/io.h>
 
 #define DRV_NAME	"ehea"
-#define DRV_VERSION	"EHEA_0078"
+#define DRV_VERSION	"EHEA_0079"
 
 /* eHEA capability flags */
 #define DLPAR_PORT_ADD_REM 1
diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c
index 2809c99..0a7e789 100644
--- a/drivers/net/ehea/ehea_main.c
+++ b/drivers/net/ehea/ehea_main.c
@@ -2329,7 +2329,7 @@
 {
 	int i;
 
-	for (i = 0; i < port->num_def_qps; i++)
+	for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++)
 		napi_disable(&port->port_res[i].napi);
 }
 
@@ -2337,7 +2337,7 @@
 {
 	int i;
 
-	for (i = 0; i < port->num_def_qps; i++)
+	for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++)
 		napi_enable(&port->port_res[i].napi);
 }
 
@@ -2373,8 +2373,6 @@
 	ehea_drop_multicast_list(dev);
 	ehea_free_interrupts(dev);
 
-	port_napi_disable(port);
-
 	port->state = EHEA_PORT_DOWN;
 
 	ret = ehea_clean_all_portres(port);
@@ -2396,6 +2394,7 @@
 	flush_scheduled_work();
 	down(&port->port_lock);
 	netif_stop_queue(dev);
+	port_napi_disable(port);
 	ret = ehea_down(dev);
 	up(&port->port_lock);
 	return ret;
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index 70ddf1a..92ce2e3 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -5597,6 +5597,22 @@
 		PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_31),
 		.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
 	},
+	{	/* MCP77 Ethernet Controller */
+		PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_32),
+		.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
+	},
+	{	/* MCP77 Ethernet Controller */
+		PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_33),
+		.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
+	},
+	{	/* MCP77 Ethernet Controller */
+		PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_34),
+		.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
+	},
+	{	/* MCP77 Ethernet Controller */
+		PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_35),
+		.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
+	},
 	{0,},
 };
 
diff --git a/drivers/net/ipg.c b/drivers/net/ipg.c
index 6888723..dbd23bb 100644
--- a/drivers/net/ipg.c
+++ b/drivers/net/ipg.c
@@ -55,6 +55,26 @@
 		   DrvVer);
 MODULE_LICENSE("GPL");
 
+//variable record -- index by leading revision/length
+//Revision/Length(=N*4), Address1, Data1, Address2, Data2,...,AddressN,DataN
+static unsigned short DefaultPhyParam[] = {
+	// 11/12/03 IP1000A v1-3 rev=0x40
+	/*--------------------------------------------------------------------------
+	(0x4000|(15*4)), 31, 0x0001, 27, 0x01e0, 31, 0x0002, 22, 0x85bd, 24, 0xfff2,
+				 27, 0x0c10, 28, 0x0c10, 29, 0x2c10, 31, 0x0003, 23, 0x92f6,
+				 31, 0x0000, 23, 0x003d, 30, 0x00de, 20, 0x20e7,  9, 0x0700,
+	  --------------------------------------------------------------------------*/
+	// 12/17/03 IP1000A v1-4 rev=0x40
+	(0x4000 | (07 * 4)), 31, 0x0001, 27, 0x01e0, 31, 0x0002, 27, 0xeb8e, 31,
+	    0x0000,
+	30, 0x005e, 9, 0x0700,
+	// 01/09/04 IP1000A v1-5 rev=0x41
+	(0x4100 | (07 * 4)), 31, 0x0001, 27, 0x01e0, 31, 0x0002, 27, 0xeb8e, 31,
+	    0x0000,
+	30, 0x005e, 9, 0x0700,
+	0x0000
+};
+
 static const char *ipg_brand_name[] = {
 	"IC PLUS IP1000 1000/100/10 based NIC",
 	"Sundance Technology ST2021 based NIC",
@@ -990,7 +1010,7 @@
 }
 
 /* Provides statistical information about the IPG NIC. */
-struct net_device_stats *ipg_nic_get_stats(struct net_device *dev)
+static struct net_device_stats *ipg_nic_get_stats(struct net_device *dev)
 {
 	struct ipg_nic_private *sp = netdev_priv(dev);
 	void __iomem *ioaddr = sp->ioaddr;
diff --git a/drivers/net/ipg.h b/drivers/net/ipg.h
index e418b90..d5d092c 100644
--- a/drivers/net/ipg.h
+++ b/drivers/net/ipg.h
@@ -833,24 +833,4 @@
 	struct delayed_work task;
 };
 
-//variable record -- index by leading revision/length
-//Revision/Length(=N*4), Address1, Data1, Address2, Data2,...,AddressN,DataN
-unsigned short DefaultPhyParam[] = {
-	// 11/12/03 IP1000A v1-3 rev=0x40
-	/*--------------------------------------------------------------------------
-	(0x4000|(15*4)), 31, 0x0001, 27, 0x01e0, 31, 0x0002, 22, 0x85bd, 24, 0xfff2,
-				 27, 0x0c10, 28, 0x0c10, 29, 0x2c10, 31, 0x0003, 23, 0x92f6,
-				 31, 0x0000, 23, 0x003d, 30, 0x00de, 20, 0x20e7,  9, 0x0700,
-	  --------------------------------------------------------------------------*/
-	// 12/17/03 IP1000A v1-4 rev=0x40
-	(0x4000 | (07 * 4)), 31, 0x0001, 27, 0x01e0, 31, 0x0002, 27, 0xeb8e, 31,
-	    0x0000,
-	30, 0x005e, 9, 0x0700,
-	// 01/09/04 IP1000A v1-5 rev=0x41
-	(0x4100 | (07 * 4)), 31, 0x0001, 27, 0x01e0, 31, 0x0002, 27, 0xeb8e, 31,
-	    0x0000,
-	30, 0x005e, 9, 0x0700,
-	0x0000
-};
-
 #endif				/* __LINUX_IPG_H */
diff --git a/drivers/net/natsemi.c b/drivers/net/natsemi.c
index 9531171..87cde06 100644
--- a/drivers/net/natsemi.c
+++ b/drivers/net/natsemi.c
@@ -864,6 +864,7 @@
 
 	np = netdev_priv(dev);
 	netif_napi_add(dev, &np->napi, natsemi_poll, 64);
+	np->dev = dev;
 
 	np->pci_dev = pdev;
 	pci_set_drvdata(pdev, dev);
diff --git a/drivers/net/usb/rndis_host.c b/drivers/net/usb/rndis_host.c
index cd991a0..1ebe325 100644
--- a/drivers/net/usb/rndis_host.c
+++ b/drivers/net/usb/rndis_host.c
@@ -512,11 +512,19 @@
 	}
 	tmp = le32_to_cpu(u.init_c->max_transfer_size);
 	if (tmp < dev->hard_mtu) {
-		dev_err(&intf->dev,
-			"dev can't take %u byte packets (max %u)\n",
-			dev->hard_mtu, tmp);
-		retval = -EINVAL;
-		goto fail_and_release;
+		if (tmp <= net->hard_header_len) {
+			dev_err(&intf->dev,
+				"dev can't take %u byte packets (max %u)\n",
+				dev->hard_mtu, tmp);
+			retval = -EINVAL;
+			goto fail_and_release;
+		}
+		dev->hard_mtu = tmp;
+		net->mtu = dev->hard_mtu - net->hard_header_len;
+		dev_warn(&intf->dev,
+			 "dev can't take %u byte packets (max %u), "
+			 "adjusting MTU to %u\n",
+			 dev->hard_mtu, tmp, net->mtu);
 	}
 
 	/* REVISIT:  peripheral "alignment" request is ignored ... */
diff --git a/fs/mbcache.c b/fs/mbcache.c
index 1046cbe..eb31b73 100644
--- a/fs/mbcache.c
+++ b/fs/mbcache.c
@@ -403,9 +403,9 @@
 {
 	struct mb_cache_entry *ce;
 
-	atomic_inc(&cache->c_entry_count);
 	ce = kmem_cache_alloc(cache->c_entry_cache, GFP_KERNEL);
 	if (ce) {
+		atomic_inc(&cache->c_entry_count);
 		INIT_LIST_HEAD(&ce->e_lru_list);
 		INIT_LIST_HEAD(&ce->e_block_list);
 		ce->e_cache = cache;
diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
index 680c429..4e57fcf 100644
--- a/fs/proc/proc_sysctl.c
+++ b/fs/proc/proc_sysctl.c
@@ -171,7 +171,8 @@
 	struct dentry *dentry = filp->f_dentry;
 	struct ctl_table_header *head;
 	struct ctl_table *table;
-	ssize_t error, res;
+	ssize_t error;
+	size_t res;
 
 	table = do_proc_sys_lookup(dentry->d_parent, &dentry->d_name, &head);
 	/* Has the sysctl entry disappeared on us? */
@@ -209,7 +210,8 @@
 	struct dentry *dentry = filp->f_dentry;
 	struct ctl_table_header *head;
 	struct ctl_table *table;
-	ssize_t error, res;
+	ssize_t error;
+	size_t res;
 
 	table = do_proc_sys_lookup(dentry->d_parent, &dentry->d_name, &head);
 	/* Has the sysctl entry disappeared on us? */
diff --git a/include/asm-x86/lguest_hcall.h b/include/asm-x86/lguest_hcall.h
index f948491..9c5092b 100644
--- a/include/asm-x86/lguest_hcall.h
+++ b/include/asm-x86/lguest_hcall.h
@@ -18,12 +18,17 @@
 #define LHCALL_LOAD_TLS		16
 #define LHCALL_NOTIFY		17
 
+#define LGUEST_TRAP_ENTRY 0x1F
+
+#ifndef __ASSEMBLY__
+#include <asm/hw_irq.h>
+
 /*G:031 First, how does our Guest contact the Host to ask for privileged
  * operations?  There are two ways: the direct way is to make a "hypercall",
  * to make requests of the Host Itself.
  *
  * Our hypercall mechanism uses the highest unused trap code (traps 32 and
- * above are used by real hardware interrupts).  Seventeen hypercalls are
+ * above are used by real hardware interrupts).  Fifteen hypercalls are
  * available: the hypercall number is put in the %eax register, and the
  * arguments (when required) are placed in %edx, %ebx and %ecx.  If a return
  * value makes sense, it's returned in %eax.
@@ -31,20 +36,15 @@
  * Grossly invalid calls result in Sudden Death at the hands of the vengeful
  * Host, rather than returning failure.  This reflects Winston Churchill's
  * definition of a gentleman: "someone who is only rude intentionally". */
-#define LGUEST_TRAP_ENTRY 0x1F
-
-#ifndef __ASSEMBLY__
-#include <asm/hw_irq.h>
-
 static inline unsigned long
 hcall(unsigned long call,
       unsigned long arg1, unsigned long arg2, unsigned long arg3)
 {
 	/* "int" is the Intel instruction to trigger a trap. */
 	asm volatile("int $" __stringify(LGUEST_TRAP_ENTRY)
-		       /* The call is in %eax (aka "a"), and can be replaced */
+		       /* The call in %eax (aka "a") might be overwritten */
 		     : "=a"(call)
-		       /* The other arguments are in %eax, %edx, %ebx & %ecx */
+		       /* The arguments are in %eax, %edx, %ebx & %ecx */
 		     : "a"(call), "d"(arg1), "b"(arg2), "c"(arg3)
 		       /* "memory" means this might write somewhere in memory.
 			* This isn't true for all calls, but it's safe to tell
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index c811c8b..c68b67b 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -101,6 +101,12 @@
 #undef __must_check
 #define __must_check
 #endif
+#ifndef CONFIG_ENABLE_WARN_DEPRECATED
+#undef __deprecated
+#undef __deprecated_for_modules
+#define __deprecated
+#define __deprecated_for_modules
+#endif
 
 /*
  * Allow us to avoid 'defined but not used' warnings on functions and data,
diff --git a/include/linux/completion.h b/include/linux/completion.h
index 268c5a4..33d6aaf 100644
--- a/include/linux/completion.h
+++ b/include/linux/completion.h
@@ -42,15 +42,15 @@
 	init_waitqueue_head(&x->wait);
 }
 
-extern void FASTCALL(wait_for_completion(struct completion *));
-extern int FASTCALL(wait_for_completion_interruptible(struct completion *x));
-extern unsigned long FASTCALL(wait_for_completion_timeout(struct completion *x,
-						   unsigned long timeout));
-extern unsigned long FASTCALL(wait_for_completion_interruptible_timeout(
-			struct completion *x, unsigned long timeout));
+extern void wait_for_completion(struct completion *);
+extern int wait_for_completion_interruptible(struct completion *x);
+extern unsigned long wait_for_completion_timeout(struct completion *x,
+						   unsigned long timeout);
+extern unsigned long wait_for_completion_interruptible_timeout(
+			struct completion *x, unsigned long timeout);
 
-extern void FASTCALL(complete(struct completion *));
-extern void FASTCALL(complete_all(struct completion *));
+extern void complete(struct completion *);
+extern void complete_all(struct completion *);
 
 #define INIT_COMPLETION(x)	((x).done = 0)
 
diff --git a/include/linux/lguest.h b/include/linux/lguest.h
index 8beb291..175e63f 100644
--- a/include/linux/lguest.h
+++ b/include/linux/lguest.h
@@ -12,8 +12,8 @@
 #define LG_CLOCK_MAX_DELTA	ULONG_MAX
 
 /*G:032 The second method of communicating with the Host is to via "struct
- * lguest_data".  The Guest's very first hypercall is to tell the Host where
- * this is, and then the Guest and Host both publish information in it. :*/
+ * lguest_data".  Once the Guest's initialization hypercall tells the Host where
+ * this is, the Guest and Host both publish information in it. :*/
 struct lguest_data
 {
 	/* 512 == enabled (same as eflags in normal hardware).  The Guest
diff --git a/include/linux/lguest_launcher.h b/include/linux/lguest_launcher.h
index 61e1e3e..697104d 100644
--- a/include/linux/lguest_launcher.h
+++ b/include/linux/lguest_launcher.h
@@ -1,17 +1,7 @@
-#ifndef _ASM_LGUEST_USER
-#define _ASM_LGUEST_USER
+#ifndef _LINUX_LGUEST_LAUNCHER
+#define _LINUX_LGUEST_LAUNCHER
 /* Everything the "lguest" userspace program needs to know. */
 #include <linux/types.h>
-/* They can register up to 32 arrays of lguest_dma. */
-#define LGUEST_MAX_DMA		32
-/* At most we can dma 16 lguest_dma in one op. */
-#define LGUEST_MAX_DMA_SECTIONS	16
-
-/* How many devices?  Assume each one wants up to two dma arrays per device. */
-#define LGUEST_MAX_DEVICES (LGUEST_MAX_DMA/2)
-
-/* Where the Host expects the Guest to SEND_DMA console output to. */
-#define LGUEST_CONSOLE_DMA_KEY 0
 
 /*D:010
  * Drivers
@@ -20,7 +10,11 @@
  * real devices (think of the damage it could do!) we provide virtual devices.
  * We could emulate a PCI bus with various devices on it, but that is a fairly
  * complex burden for the Host and suboptimal for the Guest, so we have our own
- * "lguest" bus and simple drivers.
+ * simple lguest bus and we use "virtio" drivers.  These drivers need a set of
+ * routines from us which will actually do the virtual I/O, but they handle all
+ * the net/block/console stuff themselves.  This means that if we want to add
+ * a new device, we simply need to write a new virtio driver and create support
+ * for it in the Launcher: this code won't need to change.
  *
  * Devices are described by a simplified ID, a status byte, and some "config"
  * bytes which describe this device's configuration.  This is placed by the
@@ -51,9 +45,9 @@
 /* Write command first word is a request. */
 enum lguest_req
 {
-	LHREQ_INITIALIZE, /* + pfnlimit, pgdir, start, pageoffset */
+	LHREQ_INITIALIZE, /* + base, pfnlimit, pgdir, start */
 	LHREQ_GETDMA, /* No longer used */
 	LHREQ_IRQ, /* + irq */
 	LHREQ_BREAK, /* + on/off flag (on blocks until someone does off) */
 };
-#endif /* _ASM_LGUEST_USER */
+#endif /* _LINUX_LGUEST_LAUNCHER */
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index 4e10a07..e44aac8c 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -1236,6 +1236,10 @@
 #define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP67_IDE       0x0560
 #define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP73_IDE       0x056C
 #define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP77_IDE       0x0759
+#define PCI_DEVICE_ID_NVIDIA_NVENET_32              0x0760
+#define PCI_DEVICE_ID_NVIDIA_NVENET_33              0x0761
+#define PCI_DEVICE_ID_NVIDIA_NVENET_34              0x0762
+#define PCI_DEVICE_ID_NVIDIA_NVENET_35              0x0763
 
 #define PCI_VENDOR_ID_IMS		0x10e0
 #define PCI_DEVICE_ID_IMS_TT128		0x9128
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 13df99f..24e08d1 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -828,12 +828,17 @@
 	struct task_struct * (*pick_next_task) (struct rq *rq);
 	void (*put_prev_task) (struct rq *rq, struct task_struct *p);
 
+#ifdef CONFIG_SMP
 	unsigned long (*load_balance) (struct rq *this_rq, int this_cpu,
-			struct rq *busiest,
-			unsigned long max_nr_move, unsigned long max_load_move,
+			struct rq *busiest, unsigned long max_load_move,
 			struct sched_domain *sd, enum cpu_idle_type idle,
 			int *all_pinned, int *this_best_prio);
 
+	int (*move_one_task) (struct rq *this_rq, int this_cpu,
+			      struct rq *busiest, struct sched_domain *sd,
+			      enum cpu_idle_type idle);
+#endif
+
 	void (*set_curr_task) (struct rq *rq);
 	void (*task_tick) (struct rq *rq, struct task_struct *p);
 	void (*task_new) (struct rq *rq, struct task_struct *p);
@@ -1196,7 +1201,7 @@
 	return 0;
 }
 
-static inline int rt_task(struct task_struct *p)
+static inline int rt_task(const struct task_struct *p)
 {
 	return rt_prio(p->prio);
 }
@@ -1211,22 +1216,22 @@
 	tsk->signal->__pgrp = pgrp;
 }
 
-static inline struct pid *task_pid(struct task_struct *task)
+static inline struct pid *task_pid(const struct task_struct *task)
 {
 	return task->pids[PIDTYPE_PID].pid;
 }
 
-static inline struct pid *task_tgid(struct task_struct *task)
+static inline struct pid *task_tgid(const struct task_struct *task)
 {
 	return task->group_leader->pids[PIDTYPE_PID].pid;
 }
 
-static inline struct pid *task_pgrp(struct task_struct *task)
+static inline struct pid *task_pgrp(const struct task_struct *task)
 {
 	return task->group_leader->pids[PIDTYPE_PGID].pid;
 }
 
-static inline struct pid *task_session(struct task_struct *task)
+static inline struct pid *task_session(const struct task_struct *task)
 {
 	return task->group_leader->pids[PIDTYPE_SID].pid;
 }
@@ -1255,7 +1260,7 @@
  * see also pid_nr() etc in include/linux/pid.h
  */
 
-static inline pid_t task_pid_nr(struct task_struct *tsk)
+static inline pid_t task_pid_nr(const struct task_struct *tsk)
 {
 	return tsk->pid;
 }
@@ -1268,7 +1273,7 @@
 }
 
 
-static inline pid_t task_tgid_nr(struct task_struct *tsk)
+static inline pid_t task_tgid_nr(const struct task_struct *tsk)
 {
 	return tsk->tgid;
 }
@@ -1281,7 +1286,7 @@
 }
 
 
-static inline pid_t task_pgrp_nr(struct task_struct *tsk)
+static inline pid_t task_pgrp_nr(const struct task_struct *tsk)
 {
 	return tsk->signal->__pgrp;
 }
@@ -1294,7 +1299,7 @@
 }
 
 
-static inline pid_t task_session_nr(struct task_struct *tsk)
+static inline pid_t task_session_nr(const struct task_struct *tsk)
 {
 	return tsk->signal->__session;
 }
@@ -1321,7 +1326,7 @@
  * If pid_alive fails, then pointers within the task structure
  * can be stale and must not be dereferenced.
  */
-static inline int pid_alive(struct task_struct *p)
+static inline int pid_alive(const struct task_struct *p)
 {
 	return p->pids[PIDTYPE_PID].pid != NULL;
 }
@@ -1332,7 +1337,7 @@
  *
  * Check if a task structure is the first user space task the kernel created.
  */
-static inline int is_global_init(struct task_struct *tsk)
+static inline int is_global_init(const struct task_struct *tsk)
 {
 	return tsk->pid == 1;
 }
@@ -1469,7 +1474,7 @@
 extern void rt_mutex_setprio(struct task_struct *p, int prio);
 extern void rt_mutex_adjust_pi(struct task_struct *p);
 #else
-static inline int rt_mutex_getprio(struct task_struct *p)
+static inline int rt_mutex_getprio(const struct task_struct *p)
 {
 	return p->normal_prio;
 }
@@ -1721,7 +1726,7 @@
  * all we care about is that we have a task with the appropriate
  * pid, we don't actually care if we have the right task.
  */
-static inline int has_group_leader_pid(struct task_struct *p)
+static inline int has_group_leader_pid(const struct task_struct *p)
 {
 	return p->pid == p->tgid;
 }
@@ -1738,7 +1743,7 @@
 			  struct task_struct, thread_group);
 }
 
-static inline int thread_group_empty(struct task_struct *p)
+static inline int thread_group_empty(const struct task_struct *p)
 {
 	return list_empty(&p->thread_group);
 }
diff --git a/init/Kconfig b/init/Kconfig
index b7dffa8..8b88d0b 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -322,7 +322,6 @@
 config FAIR_GROUP_SCHED
 	bool "Fair group CPU scheduler"
 	default y
-	depends on EXPERIMENTAL
 	help
 	  This feature lets CPU scheduler recognize task groups and control CPU
 	  bandwidth allocation to such task groups.
diff --git a/kernel/profile.c b/kernel/profile.c
index 631b75c..5e95330 100644
--- a/kernel/profile.c
+++ b/kernel/profile.c
@@ -60,6 +60,7 @@
 	int par;
 
 	if (!strncmp(str, sleepstr, strlen(sleepstr))) {
+#ifdef CONFIG_SCHEDSTATS
 		prof_on = SLEEP_PROFILING;
 		if (str[strlen(sleepstr)] == ',')
 			str += strlen(sleepstr) + 1;
@@ -68,6 +69,10 @@
 		printk(KERN_INFO
 			"kernel sleep profiling enabled (shift: %ld)\n",
 			prof_shift);
+#else
+		printk(KERN_WARNING
+			"kernel sleep profiling requires CONFIG_SCHEDSTATS\n");
+#endif /* CONFIG_SCHEDSTATS */
 	} else if (!strncmp(str, schedstr, strlen(schedstr))) {
 		prof_on = SCHED_PROFILING;
 		if (str[strlen(schedstr)] == ',')
diff --git a/kernel/sched.c b/kernel/sched.c
index 2810e56..b4fbbc4 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -66,6 +66,7 @@
 #include <linux/pagemap.h>
 
 #include <asm/tlb.h>
+#include <asm/irq_regs.h>
 
 /*
  * Scheduler clock - returns current time in nanosec units.
@@ -837,11 +838,18 @@
 	struct task_struct *(*next)(void *);
 };
 
-static int balance_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
-		      unsigned long max_nr_move, unsigned long max_load_move,
-		      struct sched_domain *sd, enum cpu_idle_type idle,
-		      int *all_pinned, unsigned long *load_moved,
-		      int *this_best_prio, struct rq_iterator *iterator);
+#ifdef CONFIG_SMP
+static unsigned long
+balance_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
+	      unsigned long max_load_move, struct sched_domain *sd,
+	      enum cpu_idle_type idle, int *all_pinned,
+	      int *this_best_prio, struct rq_iterator *iterator);
+
+static int
+iter_move_one_task(struct rq *this_rq, int this_cpu, struct rq *busiest,
+		   struct sched_domain *sd, enum cpu_idle_type idle,
+		   struct rq_iterator *iterator);
+#endif
 
 #include "sched_stats.h"
 #include "sched_idletask.c"
@@ -2223,17 +2231,17 @@
 	return 1;
 }
 
-static int balance_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
-		      unsigned long max_nr_move, unsigned long max_load_move,
-		      struct sched_domain *sd, enum cpu_idle_type idle,
-		      int *all_pinned, unsigned long *load_moved,
-		      int *this_best_prio, struct rq_iterator *iterator)
+static unsigned long
+balance_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
+	      unsigned long max_load_move, struct sched_domain *sd,
+	      enum cpu_idle_type idle, int *all_pinned,
+	      int *this_best_prio, struct rq_iterator *iterator)
 {
 	int pulled = 0, pinned = 0, skip_for_load;
 	struct task_struct *p;
 	long rem_load_move = max_load_move;
 
-	if (max_nr_move == 0 || max_load_move == 0)
+	if (max_load_move == 0)
 		goto out;
 
 	pinned = 1;
@@ -2266,7 +2274,7 @@
 	 * We only want to steal up to the prescribed number of tasks
 	 * and the prescribed amount of weighted load.
 	 */
-	if (pulled < max_nr_move && rem_load_move > 0) {
+	if (rem_load_move > 0) {
 		if (p->prio < *this_best_prio)
 			*this_best_prio = p->prio;
 		p = iterator->next(iterator->arg);
@@ -2274,7 +2282,7 @@
 	}
 out:
 	/*
-	 * Right now, this is the only place pull_task() is called,
+	 * Right now, this is one of only two places pull_task() is called,
 	 * so we can safely collect pull_task() stats here rather than
 	 * inside pull_task().
 	 */
@@ -2282,8 +2290,8 @@
 
 	if (all_pinned)
 		*all_pinned = pinned;
-	*load_moved = max_load_move - rem_load_move;
-	return pulled;
+
+	return max_load_move - rem_load_move;
 }
 
 /*
@@ -2305,7 +2313,7 @@
 	do {
 		total_load_moved +=
 			class->load_balance(this_rq, this_cpu, busiest,
-				ULONG_MAX, max_load_move - total_load_moved,
+				max_load_move - total_load_moved,
 				sd, idle, all_pinned, &this_best_prio);
 		class = class->next;
 	} while (class && max_load_move > total_load_moved);
@@ -2313,6 +2321,32 @@
 	return total_load_moved > 0;
 }
 
+static int
+iter_move_one_task(struct rq *this_rq, int this_cpu, struct rq *busiest,
+		   struct sched_domain *sd, enum cpu_idle_type idle,
+		   struct rq_iterator *iterator)
+{
+	struct task_struct *p = iterator->start(iterator->arg);
+	int pinned = 0;
+
+	while (p) {
+		if (can_migrate_task(p, busiest, this_cpu, sd, idle, &pinned)) {
+			pull_task(busiest, p, this_rq, this_cpu);
+			/*
+			 * Right now, this is only the second place pull_task()
+			 * is called, so we can safely collect pull_task()
+			 * stats here rather than inside pull_task().
+			 */
+			schedstat_inc(sd, lb_gained[idle]);
+
+			return 1;
+		}
+		p = iterator->next(iterator->arg);
+	}
+
+	return 0;
+}
+
 /*
  * move_one_task tries to move exactly one task from busiest to this_rq, as
  * part of active balancing operations within "domain".
@@ -2324,12 +2358,9 @@
 			 struct sched_domain *sd, enum cpu_idle_type idle)
 {
 	const struct sched_class *class;
-	int this_best_prio = MAX_PRIO;
 
 	for (class = sched_class_highest; class; class = class->next)
-		if (class->load_balance(this_rq, this_cpu, busiest,
-					1, ULONG_MAX, sd, idle, NULL,
-					&this_best_prio))
+		if (class->move_one_task(this_rq, this_cpu, busiest, sd, idle))
 			return 1;
 
 	return 0;
@@ -3266,18 +3297,6 @@
 {
 }
 
-/* Avoid "used but not defined" warning on UP */
-static int balance_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
-		      unsigned long max_nr_move, unsigned long max_load_move,
-		      struct sched_domain *sd, enum cpu_idle_type idle,
-		      int *all_pinned, unsigned long *load_moved,
-		      int *this_best_prio, struct rq_iterator *iterator)
-{
-	*load_moved = 0;
-
-	return 0;
-}
-
 #endif
 
 DEFINE_PER_CPU(struct kernel_stat, kstat);
@@ -3507,12 +3526,19 @@
  */
 static noinline void __schedule_bug(struct task_struct *prev)
 {
-	printk(KERN_ERR "BUG: scheduling while atomic: %s/0x%08x/%d\n",
-		prev->comm, preempt_count(), task_pid_nr(prev));
+	struct pt_regs *regs = get_irq_regs();
+
+	printk(KERN_ERR "BUG: scheduling while atomic: %s/%d/0x%08x\n",
+		prev->comm, prev->pid, preempt_count());
+
 	debug_show_held_locks(prev);
 	if (irqs_disabled())
 		print_irqtrace_events(prev);
-	dump_stack();
+
+	if (regs)
+		show_regs(regs);
+	else
+		dump_stack();
 }
 
 /*
@@ -3820,7 +3846,7 @@
 }
 EXPORT_SYMBOL_GPL(__wake_up_sync);	/* For internal use only */
 
-void fastcall complete(struct completion *x)
+void complete(struct completion *x)
 {
 	unsigned long flags;
 
@@ -3832,7 +3858,7 @@
 }
 EXPORT_SYMBOL(complete);
 
-void fastcall complete_all(struct completion *x)
+void complete_all(struct completion *x)
 {
 	unsigned long flags;
 
@@ -3884,13 +3910,13 @@
 	return timeout;
 }
 
-void fastcall __sched wait_for_completion(struct completion *x)
+void __sched wait_for_completion(struct completion *x)
 {
 	wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_UNINTERRUPTIBLE);
 }
 EXPORT_SYMBOL(wait_for_completion);
 
-unsigned long fastcall __sched
+unsigned long __sched
 wait_for_completion_timeout(struct completion *x, unsigned long timeout)
 {
 	return wait_for_common(x, timeout, TASK_UNINTERRUPTIBLE);
@@ -3906,7 +3932,7 @@
 }
 EXPORT_SYMBOL(wait_for_completion_interruptible);
 
-unsigned long fastcall __sched
+unsigned long __sched
 wait_for_completion_interruptible_timeout(struct completion *x,
 					  unsigned long timeout)
 {
@@ -5461,11 +5487,12 @@
 	struct ctl_table *entry = sd_alloc_ctl_entry(cpu_num + 1);
 	char buf[32];
 
+	WARN_ON(sd_ctl_dir[0].child);
+	sd_ctl_dir[0].child = entry;
+
 	if (entry == NULL)
 		return;
 
-	sd_ctl_dir[0].child = entry;
-
 	for_each_online_cpu(i) {
 		snprintf(buf, 32, "cpu%d", i);
 		entry->procname = kstrdup(buf, GFP_KERNEL);
@@ -5473,14 +5500,19 @@
 		entry->child = sd_alloc_ctl_cpu_table(i);
 		entry++;
 	}
+
+	WARN_ON(sd_sysctl_header);
 	sd_sysctl_header = register_sysctl_table(sd_ctl_root);
 }
 
+/* may be called multiple times per register */
 static void unregister_sched_domain_sysctl(void)
 {
-	unregister_sysctl_table(sd_sysctl_header);
+	if (sd_sysctl_header)
+		unregister_sysctl_table(sd_sysctl_header);
 	sd_sysctl_header = NULL;
-	sd_free_ctl_entry(&sd_ctl_dir[0].child);
+	if (sd_ctl_dir[0].child)
+		sd_free_ctl_entry(&sd_ctl_dir[0].child);
 }
 #else
 static void register_sched_domain_sysctl(void)
@@ -5611,6 +5643,82 @@
 EXPORT_SYMBOL(nr_cpu_ids);
 
 #ifdef CONFIG_SCHED_DEBUG
+
+static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level)
+{
+	struct sched_group *group = sd->groups;
+	cpumask_t groupmask;
+	char str[NR_CPUS];
+
+	cpumask_scnprintf(str, NR_CPUS, sd->span);
+	cpus_clear(groupmask);
+
+	printk(KERN_DEBUG "%*s domain %d: ", level, "", level);
+
+	if (!(sd->flags & SD_LOAD_BALANCE)) {
+		printk("does not load-balance\n");
+		if (sd->parent)
+			printk(KERN_ERR "ERROR: !SD_LOAD_BALANCE domain"
+					" has parent");
+		return -1;
+	}
+
+	printk(KERN_CONT "span %s\n", str);
+
+	if (!cpu_isset(cpu, sd->span)) {
+		printk(KERN_ERR "ERROR: domain->span does not contain "
+				"CPU%d\n", cpu);
+	}
+	if (!cpu_isset(cpu, group->cpumask)) {
+		printk(KERN_ERR "ERROR: domain->groups does not contain"
+				" CPU%d\n", cpu);
+	}
+
+	printk(KERN_DEBUG "%*s groups:", level + 1, "");
+	do {
+		if (!group) {
+			printk("\n");
+			printk(KERN_ERR "ERROR: group is NULL\n");
+			break;
+		}
+
+		if (!group->__cpu_power) {
+			printk(KERN_CONT "\n");
+			printk(KERN_ERR "ERROR: domain->cpu_power not "
+					"set\n");
+			break;
+		}
+
+		if (!cpus_weight(group->cpumask)) {
+			printk(KERN_CONT "\n");
+			printk(KERN_ERR "ERROR: empty group\n");
+			break;
+		}
+
+		if (cpus_intersects(groupmask, group->cpumask)) {
+			printk(KERN_CONT "\n");
+			printk(KERN_ERR "ERROR: repeated CPUs\n");
+			break;
+		}
+
+		cpus_or(groupmask, groupmask, group->cpumask);
+
+		cpumask_scnprintf(str, NR_CPUS, group->cpumask);
+		printk(KERN_CONT " %s", str);
+
+		group = group->next;
+	} while (group != sd->groups);
+	printk(KERN_CONT "\n");
+
+	if (!cpus_equal(sd->span, groupmask))
+		printk(KERN_ERR "ERROR: groups don't span domain->span\n");
+
+	if (sd->parent && !cpus_subset(groupmask, sd->parent->span))
+		printk(KERN_ERR "ERROR: parent span is not a superset "
+			"of domain->span\n");
+	return 0;
+}
+
 static void sched_domain_debug(struct sched_domain *sd, int cpu)
 {
 	int level = 0;
@@ -5622,90 +5730,14 @@
 
 	printk(KERN_DEBUG "CPU%d attaching sched-domain:\n", cpu);
 
-	do {
-		int i;
-		char str[NR_CPUS];
-		struct sched_group *group = sd->groups;
-		cpumask_t groupmask;
-
-		cpumask_scnprintf(str, NR_CPUS, sd->span);
-		cpus_clear(groupmask);
-
-		printk(KERN_DEBUG);
-		for (i = 0; i < level + 1; i++)
-			printk(" ");
-		printk("domain %d: ", level);
-
-		if (!(sd->flags & SD_LOAD_BALANCE)) {
-			printk("does not load-balance\n");
-			if (sd->parent)
-				printk(KERN_ERR "ERROR: !SD_LOAD_BALANCE domain"
-						" has parent");
+	for (;;) {
+		if (sched_domain_debug_one(sd, cpu, level))
 			break;
-		}
-
-		printk("span %s\n", str);
-
-		if (!cpu_isset(cpu, sd->span))
-			printk(KERN_ERR "ERROR: domain->span does not contain "
-					"CPU%d\n", cpu);
-		if (!cpu_isset(cpu, group->cpumask))
-			printk(KERN_ERR "ERROR: domain->groups does not contain"
-					" CPU%d\n", cpu);
-
-		printk(KERN_DEBUG);
-		for (i = 0; i < level + 2; i++)
-			printk(" ");
-		printk("groups:");
-		do {
-			if (!group) {
-				printk("\n");
-				printk(KERN_ERR "ERROR: group is NULL\n");
-				break;
-			}
-
-			if (!group->__cpu_power) {
-				printk(KERN_CONT "\n");
-				printk(KERN_ERR "ERROR: domain->cpu_power not "
-						"set\n");
-				break;
-			}
-
-			if (!cpus_weight(group->cpumask)) {
-				printk(KERN_CONT "\n");
-				printk(KERN_ERR "ERROR: empty group\n");
-				break;
-			}
-
-			if (cpus_intersects(groupmask, group->cpumask)) {
-				printk(KERN_CONT "\n");
-				printk(KERN_ERR "ERROR: repeated CPUs\n");
-				break;
-			}
-
-			cpus_or(groupmask, groupmask, group->cpumask);
-
-			cpumask_scnprintf(str, NR_CPUS, group->cpumask);
-			printk(KERN_CONT " %s", str);
-
-			group = group->next;
-		} while (group != sd->groups);
-		printk(KERN_CONT "\n");
-
-		if (!cpus_equal(sd->span, groupmask))
-			printk(KERN_ERR "ERROR: groups don't span "
-					"domain->span\n");
-
 		level++;
 		sd = sd->parent;
 		if (!sd)
-			continue;
-
-		if (!cpus_subset(groupmask, sd->span))
-			printk(KERN_ERR "ERROR: parent span is not a superset "
-				"of domain->span\n");
-
-	} while (sd);
+			break;
+	}
 }
 #else
 # define sched_domain_debug(sd, cpu) do { } while (0)
@@ -6424,13 +6456,17 @@
  */
 static int arch_init_sched_domains(const cpumask_t *cpu_map)
 {
+	int err;
+
 	ndoms_cur = 1;
 	doms_cur = kmalloc(sizeof(cpumask_t), GFP_KERNEL);
 	if (!doms_cur)
 		doms_cur = &fallback_doms;
 	cpus_andnot(*doms_cur, *cpu_map, cpu_isolated_map);
+	err = build_sched_domains(doms_cur);
 	register_sched_domain_sysctl();
-	return build_sched_domains(doms_cur);
+
+	return err;
 }
 
 static void arch_destroy_sched_domains(const cpumask_t *cpu_map)
@@ -6479,6 +6515,9 @@
 {
 	int i, j;
 
+	/* always unregister in case we don't destroy any domains */
+	unregister_sched_domain_sysctl();
+
 	if (doms_new == NULL) {
 		ndoms_new = 1;
 		doms_new = &fallback_doms;
@@ -6514,6 +6553,8 @@
 		kfree(doms_cur);
 	doms_cur = doms_new;
 	ndoms_cur = ndoms_new;
+
+	register_sched_domain_sysctl();
 }
 
 #if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
@@ -7101,25 +7142,25 @@
 #ifdef CONFIG_FAIR_CGROUP_SCHED
 
 /* return corresponding task_group object of a cgroup */
-static inline struct task_group *cgroup_tg(struct cgroup *cont)
+static inline struct task_group *cgroup_tg(struct cgroup *cgrp)
 {
-	return container_of(cgroup_subsys_state(cont, cpu_cgroup_subsys_id),
-					 struct task_group, css);
+	return container_of(cgroup_subsys_state(cgrp, cpu_cgroup_subsys_id),
+			    struct task_group, css);
 }
 
 static struct cgroup_subsys_state *
-cpu_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
+cpu_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cgrp)
 {
 	struct task_group *tg;
 
-	if (!cont->parent) {
+	if (!cgrp->parent) {
 		/* This is early initialization for the top cgroup */
-		init_task_group.css.cgroup = cont;
+		init_task_group.css.cgroup = cgrp;
 		return &init_task_group.css;
 	}
 
 	/* we support only 1-level deep hierarchical scheduler atm */
-	if (cont->parent->parent)
+	if (cgrp->parent->parent)
 		return ERR_PTR(-EINVAL);
 
 	tg = sched_create_group();
@@ -7127,21 +7168,21 @@
 		return ERR_PTR(-ENOMEM);
 
 	/* Bind the cgroup to task_group object we just created */
-	tg->css.cgroup = cont;
+	tg->css.cgroup = cgrp;
 
 	return &tg->css;
 }
 
 static void cpu_cgroup_destroy(struct cgroup_subsys *ss,
-					struct cgroup *cont)
+			       struct cgroup *cgrp)
 {
-	struct task_group *tg = cgroup_tg(cont);
+	struct task_group *tg = cgroup_tg(cgrp);
 
 	sched_destroy_group(tg);
 }
 
 static int cpu_cgroup_can_attach(struct cgroup_subsys *ss,
-			     struct cgroup *cont, struct task_struct *tsk)
+			     struct cgroup *cgrp, struct task_struct *tsk)
 {
 	/* We don't support RT-tasks being in separate groups */
 	if (tsk->sched_class != &fair_sched_class)
@@ -7151,38 +7192,21 @@
 }
 
 static void
-cpu_cgroup_attach(struct cgroup_subsys *ss, struct cgroup *cont,
+cpu_cgroup_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
 			struct cgroup *old_cont, struct task_struct *tsk)
 {
 	sched_move_task(tsk);
 }
 
-static ssize_t cpu_shares_write(struct cgroup *cont, struct cftype *cftype,
-				struct file *file, const char __user *userbuf,
-				size_t nbytes, loff_t *ppos)
+static int cpu_shares_write_uint(struct cgroup *cgrp, struct cftype *cftype,
+				u64 shareval)
 {
-	unsigned long shareval;
-	struct task_group *tg = cgroup_tg(cont);
-	char buffer[2*sizeof(unsigned long) + 1];
-	int rc;
-
-	if (nbytes > 2*sizeof(unsigned long))	/* safety check */
-		return -E2BIG;
-
-	if (copy_from_user(buffer, userbuf, nbytes))
-		return -EFAULT;
-
-	buffer[nbytes] = 0;	/* nul-terminate */
-	shareval = simple_strtoul(buffer, NULL, 10);
-
-	rc = sched_group_set_shares(tg, shareval);
-
-	return (rc < 0 ? rc : nbytes);
+	return sched_group_set_shares(cgroup_tg(cgrp), shareval);
 }
 
-static u64 cpu_shares_read_uint(struct cgroup *cont, struct cftype *cft)
+static u64 cpu_shares_read_uint(struct cgroup *cgrp, struct cftype *cft)
 {
-	struct task_group *tg = cgroup_tg(cont);
+	struct task_group *tg = cgroup_tg(cgrp);
 
 	return (u64) tg->shares;
 }
@@ -7190,7 +7214,7 @@
 static struct cftype cpu_shares = {
 	.name = "shares",
 	.read_uint = cpu_shares_read_uint,
-	.write = cpu_shares_write,
+	.write_uint = cpu_shares_write_uint,
 };
 
 static int cpu_cgroup_populate(struct cgroup_subsys *ss, struct cgroup *cont)
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 166ed6d..9971831 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -876,6 +876,7 @@
 	}
 }
 
+#ifdef CONFIG_SMP
 /**************************************************
  * Fair scheduling class load-balancing methods:
  */
@@ -936,12 +937,11 @@
 
 static unsigned long
 load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
-		  unsigned long max_nr_move, unsigned long max_load_move,
+		  unsigned long max_load_move,
 		  struct sched_domain *sd, enum cpu_idle_type idle,
 		  int *all_pinned, int *this_best_prio)
 {
 	struct cfs_rq *busy_cfs_rq;
-	unsigned long load_moved, total_nr_moved = 0, nr_moved;
 	long rem_load_move = max_load_move;
 	struct rq_iterator cfs_rq_iterator;
 
@@ -969,25 +969,48 @@
 #else
 # define maxload rem_load_move
 #endif
-		/* pass busy_cfs_rq argument into
+		/*
+		 * pass busy_cfs_rq argument into
 		 * load_balance_[start|next]_fair iterators
 		 */
 		cfs_rq_iterator.arg = busy_cfs_rq;
-		nr_moved = balance_tasks(this_rq, this_cpu, busiest,
-				max_nr_move, maxload, sd, idle, all_pinned,
-				&load_moved, this_best_prio, &cfs_rq_iterator);
+		rem_load_move -= balance_tasks(this_rq, this_cpu, busiest,
+					       maxload, sd, idle, all_pinned,
+					       this_best_prio,
+					       &cfs_rq_iterator);
 
-		total_nr_moved += nr_moved;
-		max_nr_move -= nr_moved;
-		rem_load_move -= load_moved;
-
-		if (max_nr_move <= 0 || rem_load_move <= 0)
+		if (rem_load_move <= 0)
 			break;
 	}
 
 	return max_load_move - rem_load_move;
 }
 
+static int
+move_one_task_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
+		   struct sched_domain *sd, enum cpu_idle_type idle)
+{
+	struct cfs_rq *busy_cfs_rq;
+	struct rq_iterator cfs_rq_iterator;
+
+	cfs_rq_iterator.start = load_balance_start_fair;
+	cfs_rq_iterator.next = load_balance_next_fair;
+
+	for_each_leaf_cfs_rq(busiest, busy_cfs_rq) {
+		/*
+		 * pass busy_cfs_rq argument into
+		 * load_balance_[start|next]_fair iterators
+		 */
+		cfs_rq_iterator.arg = busy_cfs_rq;
+		if (iter_move_one_task(this_rq, this_cpu, busiest, sd, idle,
+				       &cfs_rq_iterator))
+		    return 1;
+	}
+
+	return 0;
+}
+#endif
+
 /*
  * scheduler tick hitting a task of our scheduling class:
  */
@@ -1063,7 +1086,10 @@
 	.pick_next_task		= pick_next_task_fair,
 	.put_prev_task		= put_prev_task_fair,
 
+#ifdef CONFIG_SMP
 	.load_balance		= load_balance_fair,
+	.move_one_task		= move_one_task_fair,
+#endif
 
 	.set_curr_task          = set_curr_task_fair,
 	.task_tick		= task_tick_fair,
diff --git a/kernel/sched_idletask.c b/kernel/sched_idletask.c
index 6e2ead4..bf9c25c 100644
--- a/kernel/sched_idletask.c
+++ b/kernel/sched_idletask.c
@@ -37,15 +37,24 @@
 {
 }
 
+#ifdef CONFIG_SMP
 static unsigned long
 load_balance_idle(struct rq *this_rq, int this_cpu, struct rq *busiest,
-			unsigned long max_nr_move, unsigned long max_load_move,
-			struct sched_domain *sd, enum cpu_idle_type idle,
-			int *all_pinned, int *this_best_prio)
+		  unsigned long max_load_move,
+		  struct sched_domain *sd, enum cpu_idle_type idle,
+		  int *all_pinned, int *this_best_prio)
 {
 	return 0;
 }
 
+static int
+move_one_task_idle(struct rq *this_rq, int this_cpu, struct rq *busiest,
+		   struct sched_domain *sd, enum cpu_idle_type idle)
+{
+	return 0;
+}
+#endif
+
 static void task_tick_idle(struct rq *rq, struct task_struct *curr)
 {
 }
@@ -69,7 +78,10 @@
 	.pick_next_task		= pick_next_task_idle,
 	.put_prev_task		= put_prev_task_idle,
 
+#ifdef CONFIG_SMP
 	.load_balance		= load_balance_idle,
+	.move_one_task		= move_one_task_idle,
+#endif
 
 	.set_curr_task          = set_curr_task_idle,
 	.task_tick		= task_tick_idle,
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index d0097a0..8abd752 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -98,6 +98,7 @@
 	p->se.exec_start = 0;
 }
 
+#ifdef CONFIG_SMP
 /*
  * Load-balancing iterator. Note: while the runqueue stays locked
  * during the whole iteration, the current task might be
@@ -172,13 +173,11 @@
 
 static unsigned long
 load_balance_rt(struct rq *this_rq, int this_cpu, struct rq *busiest,
-			unsigned long max_nr_move, unsigned long max_load_move,
-			struct sched_domain *sd, enum cpu_idle_type idle,
-			int *all_pinned, int *this_best_prio)
+		unsigned long max_load_move,
+		struct sched_domain *sd, enum cpu_idle_type idle,
+		int *all_pinned, int *this_best_prio)
 {
-	int nr_moved;
 	struct rq_iterator rt_rq_iterator;
-	unsigned long load_moved;
 
 	rt_rq_iterator.start = load_balance_start_rt;
 	rt_rq_iterator.next = load_balance_next_rt;
@@ -187,13 +186,25 @@
 	 */
 	rt_rq_iterator.arg = busiest;
 
-	nr_moved = balance_tasks(this_rq, this_cpu, busiest, max_nr_move,
-			max_load_move, sd, idle, all_pinned, &load_moved,
-			this_best_prio, &rt_rq_iterator);
-
-	return load_moved;
+	return balance_tasks(this_rq, this_cpu, busiest, max_load_move, sd,
+			     idle, all_pinned, this_best_prio, &rt_rq_iterator);
 }
 
+static int
+move_one_task_rt(struct rq *this_rq, int this_cpu, struct rq *busiest,
+		 struct sched_domain *sd, enum cpu_idle_type idle)
+{
+	struct rq_iterator rt_rq_iterator;
+
+	rt_rq_iterator.start = load_balance_start_rt;
+	rt_rq_iterator.next = load_balance_next_rt;
+	rt_rq_iterator.arg = busiest;
+
+	return iter_move_one_task(this_rq, this_cpu, busiest, sd, idle,
+				  &rt_rq_iterator);
+}
+#endif
+
 static void task_tick_rt(struct rq *rq, struct task_struct *p)
 {
 	/*
@@ -236,7 +247,10 @@
 	.pick_next_task		= pick_next_task_rt,
 	.put_prev_task		= put_prev_task_rt,
 
+#ifdef CONFIG_SMP
 	.load_balance		= load_balance_rt,
+	.move_one_task		= move_one_task_rt,
+#endif
 
 	.set_curr_task          = set_curr_task_rt,
 	.task_tick		= task_tick_rt,
diff --git a/kernel/user.c b/kernel/user.c
index e91331c..0f3aa02 100644
--- a/kernel/user.c
+++ b/kernel/user.c
@@ -129,7 +129,7 @@
 }
 
 /* return cpu shares held by the user */
-ssize_t cpu_shares_show(struct kset *kset, char *buffer)
+static ssize_t cpu_shares_show(struct kset *kset, char *buffer)
 {
 	struct user_struct *up = container_of(kset, struct user_struct, kset);
 
@@ -137,7 +137,8 @@
 }
 
 /* modify cpu shares held by the user */
-ssize_t cpu_shares_store(struct kset *kset, const char *buffer, size_t size)
+static ssize_t cpu_shares_store(struct kset *kset, const char *buffer,
+				size_t size)
 {
 	struct user_struct *up = container_of(kset, struct user_struct, kset);
 	unsigned long shares;
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 1faa508..1e5f207 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -9,6 +9,14 @@
 	  operations.  This is useful for identifying long delays
 	  in kernel startup.
 
+config ENABLE_WARN_DEPRECATED
+	bool "Enable __deprecated logic"
+	default y
+	help
+	  Enable the __deprecated logic in the kernel build.
+	  Disable this to suppress the "warning: 'foo' is deprecated
+	  (declared at kernel/power/somefile.c:1234)" messages.
+
 config ENABLE_MUST_CHECK
 	bool "Enable __must_check logic"
 	default y