Merge branch 'for-linus' of git://one.firstfloor.org/home/andi/git/linux-2.6

* 'for-linus' of git://one.firstfloor.org/home/andi/git/linux-2.6:
  [PATCH] x86-64: no paravirt for X86_VOYAGER or X86_VISWS
  [PATCH] i386: Fix io_apic.c warning
  [PATCH] i386: export smp_num_siblings for oprofile
  [PATCH] x86: Work around gcc 4.2 over aggressive optimizer
  [PATCH] x86: Fix boot hang due to nmi watchdog init code
  [PATCH] x86: Fix verify_quirk_intel_irqbalance()
  [PATCH] i386: Update defconfig
  [PATCH] x86-64: Update defconfig
diff --git a/drivers/char/drm/drm.h b/drivers/char/drm/drm.h
index 5642ac4..8db9041 100644
--- a/drivers/char/drm/drm.h
+++ b/drivers/char/drm/drm.h
@@ -117,6 +117,14 @@
 } drm_clip_rect_t;
 
 /**
+ * Drawable information.
+ */
+typedef struct drm_drawable_info {
+	unsigned int num_rects;
+	drm_clip_rect_t *rects;
+} drm_drawable_info_t;
+
+/**
  * Texture region,
  */
 typedef struct drm_tex_region {
@@ -348,7 +356,8 @@
 		_DRM_PAGE_ALIGN = 0x01,	/**< Align on page boundaries for DMA */
 		_DRM_AGP_BUFFER = 0x02,	/**< Buffer is in AGP space */
 		_DRM_SG_BUFFER = 0x04,	/**< Scatter/gather memory buffer */
-		_DRM_FB_BUFFER = 0x08	/**< Buffer is in frame buffer */
+		_DRM_FB_BUFFER = 0x08,	/**< Buffer is in frame buffer */
+		_DRM_PCI_BUFFER_RO = 0x10 /**< Map PCI DMA buffer read-only */
 	} flags;
 	unsigned long agp_start; /**<
 				  * Start address of where the AGP buffers are
@@ -444,6 +453,20 @@
 } drm_draw_t;
 
 /**
+ * DRM_IOCTL_UPDATE_DRAW ioctl argument type.
+ */
+typedef enum {
+	DRM_DRAWABLE_CLIPRECTS,
+} drm_drawable_info_type_t;
+
+typedef struct drm_update_draw {
+	drm_drawable_t handle;
+	unsigned int type;
+	unsigned int num;
+	unsigned long long data;
+} drm_update_draw_t;
+
+/**
  * DRM_IOCTL_GET_MAGIC and DRM_IOCTL_AUTH_MAGIC ioctl argument type.
  */
 typedef struct drm_auth {
@@ -465,10 +488,14 @@
 typedef enum {
 	_DRM_VBLANK_ABSOLUTE = 0x0,	/**< Wait for specific vblank sequence number */
 	_DRM_VBLANK_RELATIVE = 0x1,	/**< Wait for given number of vblanks */
+	_DRM_VBLANK_NEXTONMISS = 0x10000000,	/**< If missed, wait for next vblank */
+	_DRM_VBLANK_SECONDARY = 0x20000000,	/**< Secondary display controller */
 	_DRM_VBLANK_SIGNAL = 0x40000000	/**< Send signal instead of blocking */
 } drm_vblank_seq_type_t;
 
-#define _DRM_VBLANK_FLAGS_MASK _DRM_VBLANK_SIGNAL
+#define _DRM_VBLANK_TYPES_MASK (_DRM_VBLANK_ABSOLUTE | _DRM_VBLANK_RELATIVE)
+#define _DRM_VBLANK_FLAGS_MASK (_DRM_VBLANK_SIGNAL | _DRM_VBLANK_SECONDARY | \
+				_DRM_VBLANK_NEXTONMISS)
 
 struct drm_wait_vblank_request {
 	drm_vblank_seq_type_t type;
@@ -623,6 +650,8 @@
 
 #define DRM_IOCTL_WAIT_VBLANK		DRM_IOWR(0x3a, drm_wait_vblank_t)
 
+#define DRM_IOCTL_UPDATE_DRAW		DRM_IOW(0x3f, drm_update_draw_t)
+
 /**
  * Device specific ioctls should only be in their respective headers
  * The device specific ioctl range is from 0x40 to 0x79.
diff --git a/drivers/char/drm/drmP.h b/drivers/char/drm/drmP.h
index 7690a59..0bbb04f 100644
--- a/drivers/char/drm/drmP.h
+++ b/drivers/char/drm/drmP.h
@@ -97,6 +97,7 @@
 #define DRIVER_IRQ_VBL     0x100
 #define DRIVER_DMA_QUEUE   0x200
 #define DRIVER_FB_DMA      0x400
+#define DRIVER_IRQ_VBL2    0x800
 
 /***********************************************************************/
 /** \name Begin the DRM... */
@@ -430,7 +431,8 @@
 	enum {
 		_DRM_DMA_USE_AGP = 0x01,
 		_DRM_DMA_USE_SG = 0x02,
-		_DRM_DMA_USE_FB = 0x04
+		_DRM_DMA_USE_FB = 0x04,
+		_DRM_DMA_USE_PCI_RO = 0x08
 	} flags;
 
 } drm_device_dma_t;
@@ -562,6 +564,7 @@
 	void (*kernel_context_switch_unlock) (struct drm_device * dev,
 					      drm_lock_t *lock);
 	int (*vblank_wait) (struct drm_device * dev, unsigned int *sequence);
+	int (*vblank_wait2) (struct drm_device * dev, unsigned int *sequence);
 	int (*dri_library_name) (struct drm_device *dev, char *buf);
 
 	/**
@@ -708,9 +711,13 @@
 
 	wait_queue_head_t vbl_queue;	/**< VBLANK wait queue */
 	atomic_t vbl_received;
+	atomic_t vbl_received2;		/**< number of secondary VBLANK interrupts */
 	spinlock_t vbl_lock;
 	drm_vbl_sig_t vbl_sigs;		/**< signal list to send on VBLANK */
+	drm_vbl_sig_t vbl_sigs2;	/**< signals to send on secondary VBLANK */
 	unsigned int vbl_pending;
+	spinlock_t tasklet_lock;	/**< For drm_locked_tasklet */
+	void (*locked_tasklet_func)(struct drm_device *dev);
 
 	/*@} */
 	cycles_t ctx_start;
@@ -738,6 +745,15 @@
 	drm_local_map_t *agp_buffer_map;
 	unsigned int agp_buffer_token;
 	drm_head_t primary;		/**< primary screen head */
+
+	/** \name Drawable information */
+	/*@{ */
+	spinlock_t drw_lock;
+	unsigned int drw_bitfield_length;
+	u32 *drw_bitfield;
+	unsigned int drw_info_length;
+	drm_drawable_info_t **drw_info;
+	/*@} */
 } drm_device_t;
 
 static __inline__ int drm_core_check_feature(struct drm_device *dev,
@@ -885,6 +901,10 @@
 		       unsigned int cmd, unsigned long arg);
 extern int drm_rmdraw(struct inode *inode, struct file *filp,
 		      unsigned int cmd, unsigned long arg);
+extern int drm_update_drawable_info(struct inode *inode, struct file *filp,
+		       unsigned int cmd, unsigned long arg);
+extern drm_drawable_info_t *drm_get_drawable_info(drm_device_t *dev,
+						  drm_drawable_t id);
 
 				/* Authentication IOCTL support (drm_auth.h) */
 extern int drm_getmagic(struct inode *inode, struct file *filp,
@@ -949,6 +969,7 @@
 			   unsigned int cmd, unsigned long arg);
 extern int drm_vblank_wait(drm_device_t * dev, unsigned int *vbl_seq);
 extern void drm_vbl_send_signals(drm_device_t * dev);
+extern void drm_locked_tasklet(drm_device_t *dev, void(*func)(drm_device_t*));
 
 				/* AGP/GART support (drm_agpsupport.h) */
 extern drm_agp_head_t *drm_agp_init(drm_device_t * dev);
diff --git a/drivers/char/drm/drm_bufs.c b/drivers/char/drm/drm_bufs.c
index 6eafff1..9f65f56 100644
--- a/drivers/char/drm/drm_bufs.c
+++ b/drivers/char/drm/drm_bufs.c
@@ -887,6 +887,9 @@
 	request->count = entry->buf_count;
 	request->size = size;
 
+	if (request->flags & _DRM_PCI_BUFFER_RO)
+		dma->flags = _DRM_DMA_USE_PCI_RO;
+
 	atomic_dec(&dev->buf_alloc);
 	return 0;
 
@@ -1471,9 +1474,10 @@
  * \param arg pointer to a drm_buf_map structure.
  * \return zero on success or a negative number on failure.
  *
- * Maps the AGP or SG buffer region with do_mmap(), and copies information
- * about each buffer into user space. The PCI buffers are already mapped on the
- * addbufs_pci() call.
+ * Maps the AGP, SG or PCI buffer region with do_mmap(), and copies information
+ * about each buffer into user space. For PCI buffers, it calls do_mmap() with
+ * offset equal to 0, which drm_mmap() interpretes as PCI buffers and calls
+ * drm_mmap_dma().
  */
 int drm_mapbufs(struct inode *inode, struct file *filp,
 		unsigned int cmd, unsigned long arg)
diff --git a/drivers/char/drm/drm_core.h b/drivers/char/drm/drm_core.h
index f4f9db6c..3167390 100644
--- a/drivers/char/drm/drm_core.h
+++ b/drivers/char/drm/drm_core.h
@@ -24,11 +24,11 @@
 
 #define CORE_NAME		"drm"
 #define CORE_DESC		"DRM shared core routines"
-#define CORE_DATE		"20051102"
+#define CORE_DATE		"20060810"
 
 #define DRM_IF_MAJOR	1
-#define DRM_IF_MINOR	2
+#define DRM_IF_MINOR	3
 
 #define CORE_MAJOR	1
-#define CORE_MINOR	0
-#define CORE_PATCHLEVEL 1
+#define CORE_MINOR	1
+#define CORE_PATCHLEVEL 0
diff --git a/drivers/char/drm/drm_drawable.c b/drivers/char/drm/drm_drawable.c
index 7857453..de37d5f 100644
--- a/drivers/char/drm/drm_drawable.c
+++ b/drivers/char/drm/drm_drawable.c
@@ -4,6 +4,7 @@
  *
  * \author Rickard E. (Rik) Faith <faith@valinux.com>
  * \author Gareth Hughes <gareth@valinux.com>
+ * \author Michel Dänzer <michel@tungstengraphics.com>
  */
 
 /*
@@ -11,6 +12,7 @@
  *
  * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
  * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * Copyright 2006 Tungsten Graphics, Inc., Bismarck, North Dakota.
  * All Rights Reserved.
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
@@ -35,22 +37,294 @@
 
 #include "drmP.h"
 
-/** No-op. */
-int drm_adddraw(struct inode *inode, struct file *filp,
-		unsigned int cmd, unsigned long arg)
+/**
+ * Allocate drawable ID and memory to store information about it.
+ */
+int drm_adddraw(DRM_IOCTL_ARGS)
 {
+	DRM_DEVICE;
+	unsigned long irqflags;
+	int i, j;
+	u32 *bitfield = dev->drw_bitfield;
+	unsigned int bitfield_length = dev->drw_bitfield_length;
+	drm_drawable_info_t **info = dev->drw_info;
+	unsigned int info_length = dev->drw_info_length;
 	drm_draw_t draw;
 
-	draw.handle = 0;	/* NOOP */
+	for (i = 0, j = 0; i < bitfield_length; i++) {
+		if (bitfield[i] == ~0)
+			continue;
+
+		for (; j < 8 * sizeof(*bitfield); j++)
+			if (!(bitfield[i] & (1 << j)))
+				goto done;
+	}
+done:
+
+	if (i == bitfield_length) {
+		bitfield_length++;
+
+		bitfield = drm_alloc(bitfield_length * sizeof(*bitfield),
+				     DRM_MEM_BUFS);
+
+		if (!bitfield) {
+			DRM_ERROR("Failed to allocate new drawable bitfield\n");
+			return DRM_ERR(ENOMEM);
+		}
+
+		if (8 * sizeof(*bitfield) * bitfield_length > info_length) {
+			info_length += 8 * sizeof(*bitfield);
+
+			info = drm_alloc(info_length * sizeof(*info),
+					 DRM_MEM_BUFS);
+
+			if (!info) {
+				DRM_ERROR("Failed to allocate new drawable info"
+					  " array\n");
+
+				drm_free(bitfield,
+					 bitfield_length * sizeof(*bitfield),
+					 DRM_MEM_BUFS);
+				return DRM_ERR(ENOMEM);
+			}
+		}
+
+		bitfield[i] = 0;
+	}
+
+	draw.handle = i * 8 * sizeof(*bitfield) + j + 1;
 	DRM_DEBUG("%d\n", draw.handle);
-	if (copy_to_user((drm_draw_t __user *) arg, &draw, sizeof(draw)))
-		return -EFAULT;
+
+	spin_lock_irqsave(&dev->drw_lock, irqflags);
+
+	bitfield[i] |= 1 << j;
+	info[draw.handle - 1] = NULL;
+
+	if (bitfield != dev->drw_bitfield) {
+		memcpy(bitfield, dev->drw_bitfield, dev->drw_bitfield_length *
+		       sizeof(*bitfield));
+		drm_free(dev->drw_bitfield, sizeof(*bitfield) *
+			 dev->drw_bitfield_length, DRM_MEM_BUFS);
+		dev->drw_bitfield = bitfield;
+		dev->drw_bitfield_length = bitfield_length;
+	}
+
+	if (info != dev->drw_info) {
+		memcpy(info, dev->drw_info, dev->drw_info_length *
+		       sizeof(*info));
+		drm_free(dev->drw_info, sizeof(*info) * dev->drw_info_length,
+			 DRM_MEM_BUFS);
+		dev->drw_info = info;
+		dev->drw_info_length = info_length;
+	}
+
+	spin_unlock_irqrestore(&dev->drw_lock, irqflags);
+
+	DRM_COPY_TO_USER_IOCTL((drm_draw_t __user *)data, draw, sizeof(draw));
+
 	return 0;
 }
 
-/** No-op. */
-int drm_rmdraw(struct inode *inode, struct file *filp,
-	       unsigned int cmd, unsigned long arg)
+/**
+ * Free drawable ID and memory to store information about it.
+ */
+int drm_rmdraw(DRM_IOCTL_ARGS)
 {
-	return 0;		/* NOOP */
+	DRM_DEVICE;
+	drm_draw_t draw;
+ 	int id, idx;
+ 	unsigned int shift;
+	unsigned long irqflags;
+	u32 *bitfield = dev->drw_bitfield;
+	unsigned int bitfield_length = dev->drw_bitfield_length;
+	drm_drawable_info_t **info = dev->drw_info;
+	unsigned int info_length = dev->drw_info_length;
+
+	DRM_COPY_FROM_USER_IOCTL(draw, (drm_draw_t __user *) data,
+				 sizeof(draw));
+
+	id = draw.handle - 1;
+	idx = id / (8 * sizeof(*bitfield));
+	shift = id % (8 * sizeof(*bitfield));
+
+	if (idx < 0 || idx >= bitfield_length ||
+	    !(bitfield[idx] & (1 << shift))) {
+		DRM_DEBUG("No such drawable %d\n", draw.handle);
+		return 0;
+	}
+
+	spin_lock_irqsave(&dev->drw_lock, irqflags);
+
+	bitfield[idx] &= ~(1 << shift);
+
+	spin_unlock_irqrestore(&dev->drw_lock, irqflags);
+
+	if (info[id]) {
+		drm_free(info[id]->rects, info[id]->num_rects *
+			 sizeof(drm_clip_rect_t), DRM_MEM_BUFS);
+		drm_free(info[id], sizeof(**info), DRM_MEM_BUFS);
+	}
+
+	/* Can we shrink the arrays? */
+	if (idx == bitfield_length - 1) {
+		while (idx >= 0 && !bitfield[idx])
+			--idx;
+
+		bitfield_length = idx + 1;
+
+		if (idx != id / (8 * sizeof(*bitfield)))
+			bitfield = drm_alloc(bitfield_length *
+					     sizeof(*bitfield), DRM_MEM_BUFS);
+
+		if (!bitfield && bitfield_length) {
+			bitfield = dev->drw_bitfield;
+			bitfield_length = dev->drw_bitfield_length;
+		}
+	}
+
+	if (bitfield != dev->drw_bitfield) {
+		info_length = 8 * sizeof(*bitfield) * bitfield_length;
+
+		info = drm_alloc(info_length * sizeof(*info), DRM_MEM_BUFS);
+
+		if (!info && info_length) {
+			info = dev->drw_info;
+			info_length = dev->drw_info_length;
+		}
+
+		spin_lock_irqsave(&dev->drw_lock, irqflags);
+
+		memcpy(bitfield, dev->drw_bitfield, bitfield_length *
+		       sizeof(*bitfield));
+		drm_free(dev->drw_bitfield, sizeof(*bitfield) *
+			 dev->drw_bitfield_length, DRM_MEM_BUFS);
+		dev->drw_bitfield = bitfield;
+		dev->drw_bitfield_length = bitfield_length;
+
+		if (info != dev->drw_info) {
+			memcpy(info, dev->drw_info, info_length *
+			       sizeof(*info));
+			drm_free(dev->drw_info, sizeof(*info) *
+				 dev->drw_info_length, DRM_MEM_BUFS);
+			dev->drw_info = info;
+			dev->drw_info_length = info_length;
+		}
+
+		spin_unlock_irqrestore(&dev->drw_lock, irqflags);
+	}
+
+	DRM_DEBUG("%d\n", draw.handle);
+	return 0;
 }
+
+int drm_update_drawable_info(DRM_IOCTL_ARGS) {
+	DRM_DEVICE;
+	drm_update_draw_t update;
+	unsigned int id, idx, shift;
+	u32 *bitfield = dev->drw_bitfield;
+	unsigned long irqflags, bitfield_length = dev->drw_bitfield_length;
+	drm_drawable_info_t *info;
+	drm_clip_rect_t *rects;
+	int err;
+
+	DRM_COPY_FROM_USER_IOCTL(update, (drm_update_draw_t __user *) data,
+				 sizeof(update));
+
+	id = update.handle - 1;
+	idx = id / (8 * sizeof(*bitfield));
+	shift = id % (8 * sizeof(*bitfield));
+
+	if (idx < 0 || idx >= bitfield_length ||
+	    !(bitfield[idx] & (1 << shift))) {
+		DRM_ERROR("No such drawable %d\n", update.handle);
+		return DRM_ERR(EINVAL);
+	}
+
+	info = dev->drw_info[id];
+
+	if (!info) {
+		info = drm_calloc(1, sizeof(drm_drawable_info_t), DRM_MEM_BUFS);
+
+		if (!info) {
+			DRM_ERROR("Failed to allocate drawable info memory\n");
+			return DRM_ERR(ENOMEM);
+		}
+	}
+
+	switch (update.type) {
+	case DRM_DRAWABLE_CLIPRECTS:
+		if (update.num != info->num_rects) {
+			rects = drm_alloc(update.num * sizeof(drm_clip_rect_t),
+					 DRM_MEM_BUFS);
+		} else
+			rects = info->rects;
+
+		if (update.num && !rects) {
+			DRM_ERROR("Failed to allocate cliprect memory\n");
+			err = DRM_ERR(ENOMEM);
+			goto error;
+		}
+
+		if (update.num && DRM_COPY_FROM_USER(rects,
+						     (drm_clip_rect_t __user *)
+						     (unsigned long)update.data,
+						     update.num *
+						     sizeof(*rects))) {
+			DRM_ERROR("Failed to copy cliprects from userspace\n");
+			err = DRM_ERR(EFAULT);
+			goto error;
+		}
+
+		spin_lock_irqsave(&dev->drw_lock, irqflags);
+
+		if (rects != info->rects) {
+			drm_free(info->rects, info->num_rects *
+				 sizeof(drm_clip_rect_t), DRM_MEM_BUFS);
+		}
+
+		info->rects = rects;
+		info->num_rects = update.num;
+		dev->drw_info[id] = info;
+
+		spin_unlock_irqrestore(&dev->drw_lock, irqflags);
+
+		DRM_DEBUG("Updated %d cliprects for drawable %d\n",
+			  info->num_rects, id);
+		break;
+	default:
+		DRM_ERROR("Invalid update type %d\n", update.type);
+		return DRM_ERR(EINVAL);
+	}
+
+	return 0;
+
+error:
+	if (!dev->drw_info[id])
+		drm_free(info, sizeof(*info), DRM_MEM_BUFS);
+	else if (rects != dev->drw_info[id]->rects)
+		drm_free(rects, update.num *
+			 sizeof(drm_clip_rect_t), DRM_MEM_BUFS);
+
+	return err;
+}
+
+/**
+ * Caller must hold the drawable spinlock!
+ */
+drm_drawable_info_t *drm_get_drawable_info(drm_device_t *dev, drm_drawable_t id) {
+	u32 *bitfield = dev->drw_bitfield;
+	unsigned int idx, shift;
+
+	id--;
+	idx = id / (8 * sizeof(*bitfield));
+	shift = id % (8 * sizeof(*bitfield));
+
+	if (idx < 0 || idx >= dev->drw_bitfield_length ||
+	    !(bitfield[idx] & (1 << shift))) {
+		DRM_DEBUG("No such drawable %d\n", id);
+		return NULL;
+	}
+
+	return dev->drw_info[id];
+}
+EXPORT_SYMBOL(drm_get_drawable_info);
diff --git a/drivers/char/drm/drm_drv.c b/drivers/char/drm/drm_drv.c
index b366c5b..a70af0d 100644
--- a/drivers/char/drm/drm_drv.c
+++ b/drivers/char/drm/drm_drv.c
@@ -116,6 +116,8 @@
 	[DRM_IOCTL_NR(DRM_IOCTL_SG_FREE)] = {drm_sg_free, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
 
 	[DRM_IOCTL_NR(DRM_IOCTL_WAIT_VBLANK)] = {drm_wait_vblank, 0},
+
+	[DRM_IOCTL_NR(DRM_IOCTL_UPDATE_DRAW)] = {drm_update_drawable_info, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
 };
 
 #define DRIVER_IOCTL_COUNT	ARRAY_SIZE( drm_ioctls )
@@ -151,6 +153,18 @@
 	if (dev->irq_enabled)
 		drm_irq_uninstall(dev);
 
+	/* Free drawable information memory */
+	for (i = 0; i < dev->drw_bitfield_length / sizeof(*dev->drw_bitfield);
+	     i++) {
+		drm_drawable_info_t *info = drm_get_drawable_info(dev, i);
+
+		if (info) {
+			drm_free(info->rects, info->num_rects *
+				 sizeof(drm_clip_rect_t), DRM_MEM_BUFS);
+			drm_free(info, sizeof(*info), DRM_MEM_BUFS);
+		}
+	}
+
 	mutex_lock(&dev->struct_mutex);
 	del_timer(&dev->timer);
 
diff --git a/drivers/char/drm/drm_irq.c b/drivers/char/drm/drm_irq.c
index 4553a3a..9d00c51 100644
--- a/drivers/char/drm/drm_irq.c
+++ b/drivers/char/drm/drm_irq.c
@@ -121,6 +121,7 @@
 		spin_lock_init(&dev->vbl_lock);
 
 		INIT_LIST_HEAD(&dev->vbl_sigs.head);
+		INIT_LIST_HEAD(&dev->vbl_sigs2.head);
 
 		dev->vbl_pending = 0;
 	}
@@ -175,6 +176,8 @@
 
 	free_irq(dev->irq, dev);
 
+	dev->locked_tasklet_func = NULL;
+
 	return 0;
 }
 
@@ -247,10 +250,7 @@
 	drm_wait_vblank_t vblwait;
 	struct timeval now;
 	int ret = 0;
-	unsigned int flags;
-
-	if (!drm_core_check_feature(dev, DRIVER_IRQ_VBL))
-		return -EINVAL;
+	unsigned int flags, seq;
 
 	if (!dev->irq)
 		return -EINVAL;
@@ -258,9 +258,26 @@
 	if (copy_from_user(&vblwait, argp, sizeof(vblwait)))
 		return -EFAULT;
 
-	switch (vblwait.request.type & ~_DRM_VBLANK_FLAGS_MASK) {
+	if (vblwait.request.type &
+	    ~(_DRM_VBLANK_TYPES_MASK | _DRM_VBLANK_FLAGS_MASK)) {
+		DRM_ERROR("Unsupported type value 0x%x, supported mask 0x%x\n",
+			  vblwait.request.type,
+			  (_DRM_VBLANK_TYPES_MASK | _DRM_VBLANK_FLAGS_MASK));
+		return -EINVAL;
+	}
+
+	flags = vblwait.request.type & _DRM_VBLANK_FLAGS_MASK;
+
+	if (!drm_core_check_feature(dev, (flags & _DRM_VBLANK_SECONDARY) ?
+				    DRIVER_IRQ_VBL2 : DRIVER_IRQ_VBL))
+		return -EINVAL;
+
+	seq = atomic_read((flags & _DRM_VBLANK_SECONDARY) ? &dev->vbl_received2
+			  : &dev->vbl_received);
+
+	switch (vblwait.request.type & _DRM_VBLANK_TYPES_MASK) {
 	case _DRM_VBLANK_RELATIVE:
-		vblwait.request.sequence += atomic_read(&dev->vbl_received);
+		vblwait.request.sequence += seq;
 		vblwait.request.type &= ~_DRM_VBLANK_RELATIVE;
 	case _DRM_VBLANK_ABSOLUTE:
 		break;
@@ -268,26 +285,30 @@
 		return -EINVAL;
 	}
 
-	flags = vblwait.request.type & _DRM_VBLANK_FLAGS_MASK;
+	if ((flags & _DRM_VBLANK_NEXTONMISS) &&
+	    (seq - vblwait.request.sequence) <= (1<<23)) {
+		vblwait.request.sequence = seq + 1;
+	}
 
 	if (flags & _DRM_VBLANK_SIGNAL) {
 		unsigned long irqflags;
+		drm_vbl_sig_t *vbl_sigs = (flags & _DRM_VBLANK_SECONDARY)
+				      ? &dev->vbl_sigs2 : &dev->vbl_sigs;
 		drm_vbl_sig_t *vbl_sig;
 
-		vblwait.reply.sequence = atomic_read(&dev->vbl_received);
-
 		spin_lock_irqsave(&dev->vbl_lock, irqflags);
 
 		/* Check if this task has already scheduled the same signal
 		 * for the same vblank sequence number; nothing to be done in
 		 * that case
 		 */
-		list_for_each_entry(vbl_sig, &dev->vbl_sigs.head, head) {
+		list_for_each_entry(vbl_sig, &vbl_sigs->head, head) {
 			if (vbl_sig->sequence == vblwait.request.sequence
 			    && vbl_sig->info.si_signo == vblwait.request.signal
 			    && vbl_sig->task == current) {
 				spin_unlock_irqrestore(&dev->vbl_lock,
 						       irqflags);
+				vblwait.reply.sequence = seq;
 				goto done;
 			}
 		}
@@ -315,11 +336,16 @@
 
 		spin_lock_irqsave(&dev->vbl_lock, irqflags);
 
-		list_add_tail((struct list_head *)vbl_sig, &dev->vbl_sigs.head);
+		list_add_tail((struct list_head *)vbl_sig, &vbl_sigs->head);
 
 		spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
+
+		vblwait.reply.sequence = seq;
 	} else {
-		if (dev->driver->vblank_wait)
+		if (flags & _DRM_VBLANK_SECONDARY) {
+			if (dev->driver->vblank_wait2)
+				ret = dev->driver->vblank_wait2(dev, &vblwait.request.sequence);
+		} else if (dev->driver->vblank_wait)
 			ret =
 			    dev->driver->vblank_wait(dev,
 						     &vblwait.request.sequence);
@@ -347,25 +373,32 @@
  */
 void drm_vbl_send_signals(drm_device_t * dev)
 {
-	struct list_head *list, *tmp;
-	drm_vbl_sig_t *vbl_sig;
-	unsigned int vbl_seq = atomic_read(&dev->vbl_received);
 	unsigned long flags;
+	int i;
 
 	spin_lock_irqsave(&dev->vbl_lock, flags);
 
-	list_for_each_safe(list, tmp, &dev->vbl_sigs.head) {
-		vbl_sig = list_entry(list, drm_vbl_sig_t, head);
-		if ((vbl_seq - vbl_sig->sequence) <= (1 << 23)) {
-			vbl_sig->info.si_code = vbl_seq;
-			send_sig_info(vbl_sig->info.si_signo, &vbl_sig->info,
-				      vbl_sig->task);
+	for (i = 0; i < 2; i++) {
+		struct list_head *list, *tmp;
+		drm_vbl_sig_t *vbl_sig;
+		drm_vbl_sig_t *vbl_sigs = i ? &dev->vbl_sigs2 : &dev->vbl_sigs;
+		unsigned int vbl_seq = atomic_read(i ? &dev->vbl_received2 :
+						   &dev->vbl_received);
 
-			list_del(list);
+		list_for_each_safe(list, tmp, &vbl_sigs->head) {
+			vbl_sig = list_entry(list, drm_vbl_sig_t, head);
+			if ((vbl_seq - vbl_sig->sequence) <= (1 << 23)) {
+				vbl_sig->info.si_code = vbl_seq;
+				send_sig_info(vbl_sig->info.si_signo,
+					      &vbl_sig->info, vbl_sig->task);
 
-			drm_free(vbl_sig, sizeof(*vbl_sig), DRM_MEM_DRIVER);
+				list_del(list);
 
-			dev->vbl_pending--;
+				drm_free(vbl_sig, sizeof(*vbl_sig),
+					 DRM_MEM_DRIVER);
+
+				dev->vbl_pending--;
+			}
 		}
 	}
 
@@ -373,3 +406,77 @@
 }
 
 EXPORT_SYMBOL(drm_vbl_send_signals);
+
+/**
+ * Tasklet wrapper function.
+ *
+ * \param data DRM device in disguise.
+ *
+ * Attempts to grab the HW lock and calls the driver callback on success. On
+ * failure, leave the lock marked as contended so the callback can be called
+ * from drm_unlock().
+ */
+static void drm_locked_tasklet_func(unsigned long data)
+{
+	drm_device_t *dev = (drm_device_t*)data;
+	unsigned long irqflags;
+
+	spin_lock_irqsave(&dev->tasklet_lock, irqflags);
+
+	if (!dev->locked_tasklet_func ||
+	    !drm_lock_take(&dev->lock.hw_lock->lock,
+			   DRM_KERNEL_CONTEXT)) {
+		spin_unlock_irqrestore(&dev->tasklet_lock, irqflags);
+		return;
+	}
+
+	dev->lock.lock_time = jiffies;
+	atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
+
+	dev->locked_tasklet_func(dev);
+
+	drm_lock_free(dev, &dev->lock.hw_lock->lock,
+		      DRM_KERNEL_CONTEXT);
+
+	dev->locked_tasklet_func = NULL;
+
+	spin_unlock_irqrestore(&dev->tasklet_lock, irqflags);
+}
+
+/**
+ * Schedule a tasklet to call back a driver hook with the HW lock held.
+ *
+ * \param dev DRM device.
+ * \param func Driver callback.
+ *
+ * This is intended for triggering actions that require the HW lock from an
+ * interrupt handler. The lock will be grabbed ASAP after the interrupt handler
+ * completes. Note that the callback may be called from interrupt or process
+ * context, it must not make any assumptions about this. Also, the HW lock will
+ * be held with the kernel context or any client context.
+ */
+void drm_locked_tasklet(drm_device_t *dev, void (*func)(drm_device_t*))
+{
+	unsigned long irqflags;
+	static DECLARE_TASKLET(drm_tasklet, drm_locked_tasklet_func, 0);
+
+	if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ) ||
+	    test_bit(TASKLET_STATE_SCHED, &drm_tasklet.state))
+		return;
+
+	spin_lock_irqsave(&dev->tasklet_lock, irqflags);
+
+	if (dev->locked_tasklet_func) {
+		spin_unlock_irqrestore(&dev->tasklet_lock, irqflags);
+		return;
+	}
+
+	dev->locked_tasklet_func = func;
+
+	spin_unlock_irqrestore(&dev->tasklet_lock, irqflags);
+
+	drm_tasklet.data = (unsigned long)dev;
+
+	tasklet_hi_schedule(&drm_tasklet);
+}
+EXPORT_SYMBOL(drm_locked_tasklet);
diff --git a/drivers/char/drm/drm_lock.c b/drivers/char/drm/drm_lock.c
index f9e4530..116ed0f 100644
--- a/drivers/char/drm/drm_lock.c
+++ b/drivers/char/drm/drm_lock.c
@@ -155,6 +155,7 @@
 	drm_file_t *priv = filp->private_data;
 	drm_device_t *dev = priv->head->dev;
 	drm_lock_t lock;
+	unsigned long irqflags;
 
 	if (copy_from_user(&lock, (drm_lock_t __user *) arg, sizeof(lock)))
 		return -EFAULT;
@@ -165,6 +166,16 @@
 		return -EINVAL;
 	}
 
+	spin_lock_irqsave(&dev->tasklet_lock, irqflags);
+
+	if (dev->locked_tasklet_func) {
+		dev->locked_tasklet_func(dev);
+
+		dev->locked_tasklet_func = NULL;
+	}
+
+	spin_unlock_irqrestore(&dev->tasklet_lock, irqflags);
+
 	atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
 
 	/* kernel_context_switch isn't used by any of the x86 drm
diff --git a/drivers/char/drm/drm_stub.c b/drivers/char/drm/drm_stub.c
index 7b1d4e8..5fd6dc0 100644
--- a/drivers/char/drm/drm_stub.c
+++ b/drivers/char/drm/drm_stub.c
@@ -60,6 +60,8 @@
 	int retcode;
 
 	spin_lock_init(&dev->count_lock);
+	spin_lock_init(&dev->drw_lock);
+	spin_lock_init(&dev->tasklet_lock);
 	init_timer(&dev->timer);
 	mutex_init(&dev->struct_mutex);
 	mutex_init(&dev->ctxlist_mutex);
diff --git a/drivers/char/drm/drm_vm.c b/drivers/char/drm/drm_vm.c
index ae26919..b9cfc07 100644
--- a/drivers/char/drm/drm_vm.c
+++ b/drivers/char/drm/drm_vm.c
@@ -473,6 +473,22 @@
 	}
 	unlock_kernel();
 
+	if (!capable(CAP_SYS_ADMIN) &&
+	    (dma->flags & _DRM_DMA_USE_PCI_RO)) {
+		vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
+#if defined(__i386__) || defined(__x86_64__)
+		pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
+#else
+		/* Ye gads this is ugly.  With more thought
+		   we could move this up higher and use
+		   `protection_map' instead.  */
+		vma->vm_page_prot =
+		    __pgprot(pte_val
+			     (pte_wrprotect
+			      (__pte(pgprot_val(vma->vm_page_prot)))));
+#endif
+	}
+
 	vma->vm_ops = &drm_vm_dma_ops;
 
 	vma->vm_flags |= VM_RESERVED;	/* Don't swap */
diff --git a/drivers/char/drm/i915_dma.c b/drivers/char/drm/i915_dma.c
index fb7913f..9354ce3 100644
--- a/drivers/char/drm/i915_dma.c
+++ b/drivers/char/drm/i915_dma.c
@@ -162,6 +162,7 @@
 
 	dev_priv->ring.virtual_start = dev_priv->ring.map.handle;
 
+	dev_priv->cpp = init->cpp;
 	dev_priv->back_offset = init->back_offset;
 	dev_priv->front_offset = init->front_offset;
 	dev_priv->current_page = 0;
@@ -782,6 +783,7 @@
 	[DRM_IOCTL_NR(DRM_I915_DESTROY_HEAP)] = { i915_mem_destroy_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY },
 	[DRM_IOCTL_NR(DRM_I915_SET_VBLANK_PIPE)] = { i915_vblank_pipe_set, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY },
 	[DRM_IOCTL_NR(DRM_I915_GET_VBLANK_PIPE)] = { i915_vblank_pipe_get, DRM_AUTH },
+	[DRM_IOCTL_NR(DRM_I915_VBLANK_SWAP)] = {i915_vblank_swap, DRM_AUTH},
 };
 
 int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls);
diff --git a/drivers/char/drm/i915_drm.h b/drivers/char/drm/i915_drm.h
index 6af83e6..96a4688 100644
--- a/drivers/char/drm/i915_drm.h
+++ b/drivers/char/drm/i915_drm.h
@@ -104,6 +104,15 @@
 	unsigned int depth_tiled;
 	unsigned int rotated_tiled;
 	unsigned int rotated2_tiled;
+
+	int pipeA_x;
+	int pipeA_y;
+	int pipeA_w;
+	int pipeA_h;
+	int pipeB_x;
+	int pipeB_y;
+	int pipeB_w;
+	int pipeB_h;
 } drm_i915_sarea_t;
 
 /* Flags for perf_boxes
@@ -132,6 +141,7 @@
 #define DRM_I915_DESTROY_HEAP	0x0c
 #define DRM_I915_SET_VBLANK_PIPE	0x0d
 #define DRM_I915_GET_VBLANK_PIPE	0x0e
+#define DRM_I915_VBLANK_SWAP	0x0f
 
 #define DRM_IOCTL_I915_INIT		DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t)
 #define DRM_IOCTL_I915_FLUSH		DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH)
@@ -148,6 +158,7 @@
 #define DRM_IOCTL_I915_DESTROY_HEAP	DRM_IOW( DRM_COMMAND_BASE + DRM_I915_DESTROY_HEAP, drm_i915_mem_destroy_heap_t)
 #define DRM_IOCTL_I915_SET_VBLANK_PIPE	DRM_IOW( DRM_COMMAND_BASE + DRM_I915_SET_VBLANK_PIPE, drm_i915_vblank_pipe_t)
 #define DRM_IOCTL_I915_GET_VBLANK_PIPE	DRM_IOR( DRM_COMMAND_BASE + DRM_I915_GET_VBLANK_PIPE, drm_i915_vblank_pipe_t)
+#define DRM_IOCTL_I915_VBLANK_SWAP	DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_VBLANK_SWAP, drm_i915_vblank_swap_t)
 
 /* Allow drivers to submit batchbuffers directly to hardware, relying
  * on the security mechanisms provided by hardware.
@@ -243,4 +254,12 @@
 	int pipe;
 } drm_i915_vblank_pipe_t;
 
+/* Schedule buffer swap at given vertical blank:
+ */
+typedef struct drm_i915_vblank_swap {
+	drm_drawable_t drawable;
+	drm_vblank_seq_type_t seqtype;
+	unsigned int sequence;
+} drm_i915_vblank_swap_t;
+
 #endif				/* _I915_DRM_H_ */
diff --git a/drivers/char/drm/i915_drv.c b/drivers/char/drm/i915_drv.c
index 8e2e609..85bcc27 100644
--- a/drivers/char/drm/i915_drv.c
+++ b/drivers/char/drm/i915_drv.c
@@ -44,12 +44,14 @@
 	 */
 	.driver_features =
 	    DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | /* DRIVER_USE_MTRR |*/
-	    DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_IRQ_VBL,
+	    DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_IRQ_VBL |
+	    DRIVER_IRQ_VBL2,
 	.load = i915_driver_load,
 	.lastclose = i915_driver_lastclose,
 	.preclose = i915_driver_preclose,
 	.device_is_agp = i915_driver_device_is_agp,
 	.vblank_wait = i915_driver_vblank_wait,
+	.vblank_wait2 = i915_driver_vblank_wait2,
 	.irq_preinstall = i915_driver_irq_preinstall,
 	.irq_postinstall = i915_driver_irq_postinstall,
 	.irq_uninstall = i915_driver_irq_uninstall,
diff --git a/drivers/char/drm/i915_drv.h b/drivers/char/drm/i915_drv.h
index fdc2bf1..93cdcfe 100644
--- a/drivers/char/drm/i915_drv.h
+++ b/drivers/char/drm/i915_drv.h
@@ -46,9 +46,11 @@
  * 1.3: Add vblank support
  * 1.4: Fix cmdbuffer path, add heap destroy
  * 1.5: Add vblank pipe configuration
+ * 1.6: - New ioctl for scheduling buffer swaps on vertical blank
+ *      - Support vertical blank on secondary display pipe
  */
 #define DRIVER_MAJOR		1
-#define DRIVER_MINOR		5
+#define DRIVER_MINOR		6
 #define DRIVER_PATCHLEVEL	0
 
 typedef struct _drm_i915_ring_buffer {
@@ -71,6 +73,13 @@
 	DRMFILE filp;		/* 0: free, -1: heap, other: real files */
 };
 
+typedef struct _drm_i915_vbl_swap {
+	struct list_head head;
+	drm_drawable_t drw_id;
+	unsigned int pipe;
+	unsigned int sequence;
+} drm_i915_vbl_swap_t;
+
 typedef struct drm_i915_private {
 	drm_local_map_t *sarea;
 	drm_local_map_t *mmio_map;
@@ -83,6 +92,7 @@
 	dma_addr_t dma_status_page;
 	unsigned long counter;
 
+	unsigned int cpp;
 	int back_offset;
 	int front_offset;
 	int current_page;
@@ -98,6 +108,10 @@
 	struct mem_block *agp_heap;
 	unsigned int sr01, adpa, ppcr, dvob, dvoc, lvds;
 	int vblank_pipe;
+
+	spinlock_t swaps_lock;
+	drm_i915_vbl_swap_t vbl_swaps;
+	unsigned int swaps_pending;
 } drm_i915_private_t;
 
 extern drm_ioctl_desc_t i915_ioctls[];
@@ -117,12 +131,14 @@
 extern int i915_irq_wait(DRM_IOCTL_ARGS);
 
 extern int i915_driver_vblank_wait(drm_device_t *dev, unsigned int *sequence);
+extern int i915_driver_vblank_wait2(drm_device_t *dev, unsigned int *sequence);
 extern irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS);
 extern void i915_driver_irq_preinstall(drm_device_t * dev);
 extern void i915_driver_irq_postinstall(drm_device_t * dev);
 extern void i915_driver_irq_uninstall(drm_device_t * dev);
 extern int i915_vblank_pipe_set(DRM_IOCTL_ARGS);
 extern int i915_vblank_pipe_get(DRM_IOCTL_ARGS);
+extern int i915_vblank_swap(DRM_IOCTL_ARGS);
 
 /* i915_mem.c */
 extern int i915_mem_alloc(DRM_IOCTL_ARGS);
@@ -256,6 +272,10 @@
 
 #define GFX_OP_DRAWRECT_INFO_I965 ((0x7900<<16)|0x2)
 
+#define XY_SRC_COPY_BLT_CMD		((2<<29)|(0x53<<22)|6)
+#define XY_SRC_COPY_BLT_WRITE_ALPHA	(1<<21)
+#define XY_SRC_COPY_BLT_WRITE_RGB	(1<<20)
+
 #define MI_BATCH_BUFFER 	((0x30<<23)|1)
 #define MI_BATCH_BUFFER_START 	(0x31<<23)
 #define MI_BATCH_BUFFER_END 	(0xA<<23)
diff --git a/drivers/char/drm/i915_irq.c b/drivers/char/drm/i915_irq.c
index 0d4a162..e5463b1 100644
--- a/drivers/char/drm/i915_irq.c
+++ b/drivers/char/drm/i915_irq.c
@@ -37,6 +37,99 @@
 
 #define MAX_NOPID ((u32)~0)
 
+/**
+ * Emit blits for scheduled buffer swaps.
+ *
+ * This function will be called with the HW lock held.
+ */
+static void i915_vblank_tasklet(drm_device_t *dev)
+{
+	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+	unsigned long irqflags;
+	struct list_head *list, *tmp;
+
+	DRM_DEBUG("\n");
+
+	spin_lock_irqsave(&dev_priv->swaps_lock, irqflags);
+
+	list_for_each_safe(list, tmp, &dev_priv->vbl_swaps.head) {
+		drm_i915_vbl_swap_t *vbl_swap =
+			list_entry(list, drm_i915_vbl_swap_t, head);
+		atomic_t *counter = vbl_swap->pipe ? &dev->vbl_received2 :
+			&dev->vbl_received;
+
+		if ((atomic_read(counter) - vbl_swap->sequence) <= (1<<23)) {
+			drm_drawable_info_t *drw;
+
+			spin_unlock(&dev_priv->swaps_lock);
+
+			spin_lock(&dev->drw_lock);
+
+			drw = drm_get_drawable_info(dev, vbl_swap->drw_id);
+				
+			if (drw) {
+				int i, num_rects = drw->num_rects;
+				drm_clip_rect_t *rect = drw->rects;
+				drm_i915_sarea_t *sarea_priv =
+				    dev_priv->sarea_priv;
+				u32 cpp = dev_priv->cpp;
+				u32 cmd = (cpp == 4) ? (XY_SRC_COPY_BLT_CMD |
+							XY_SRC_COPY_BLT_WRITE_ALPHA |
+							XY_SRC_COPY_BLT_WRITE_RGB)
+						     : XY_SRC_COPY_BLT_CMD;
+				u32 pitchropcpp = (sarea_priv->pitch * cpp) |
+						  (0xcc << 16) | (cpp << 23) |
+						  (1 << 24);
+				RING_LOCALS;
+
+				i915_kernel_lost_context(dev);
+
+				BEGIN_LP_RING(6);
+
+				OUT_RING(GFX_OP_DRAWRECT_INFO);
+				OUT_RING(0);
+				OUT_RING(0);
+				OUT_RING(sarea_priv->width |
+					 sarea_priv->height << 16);
+				OUT_RING(sarea_priv->width |
+					 sarea_priv->height << 16);
+				OUT_RING(0);
+
+				ADVANCE_LP_RING();
+
+				sarea_priv->ctxOwner = DRM_KERNEL_CONTEXT;
+
+				for (i = 0; i < num_rects; i++, rect++) {
+					BEGIN_LP_RING(8);
+
+					OUT_RING(cmd);
+					OUT_RING(pitchropcpp);
+					OUT_RING((rect->y1 << 16) | rect->x1);
+					OUT_RING((rect->y2 << 16) | rect->x2);
+					OUT_RING(sarea_priv->front_offset);
+					OUT_RING((rect->y1 << 16) | rect->x1);
+					OUT_RING(pitchropcpp & 0xffff);
+					OUT_RING(sarea_priv->back_offset);
+
+					ADVANCE_LP_RING();
+				}
+			}
+
+			spin_unlock(&dev->drw_lock);
+
+			spin_lock(&dev_priv->swaps_lock);
+
+			list_del(list);
+
+			drm_free(vbl_swap, sizeof(*vbl_swap), DRM_MEM_DRIVER);
+
+			dev_priv->swaps_pending--;
+		}
+	}
+
+	spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags);
+}
+
 irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
 {
 	drm_device_t *dev = (drm_device_t *) arg;
@@ -60,9 +153,26 @@
 		DRM_WAKEUP(&dev_priv->irq_queue);
 
 	if (temp & (VSYNC_PIPEA_FLAG | VSYNC_PIPEB_FLAG)) {
-		atomic_inc(&dev->vbl_received);
+		int vblank_pipe = dev_priv->vblank_pipe;
+
+		if ((vblank_pipe &
+		     (DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B))
+		    == (DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B)) {
+			if (temp & VSYNC_PIPEA_FLAG)
+				atomic_inc(&dev->vbl_received);
+			if (temp & VSYNC_PIPEB_FLAG)
+				atomic_inc(&dev->vbl_received2);
+		} else if (((temp & VSYNC_PIPEA_FLAG) &&
+			    (vblank_pipe & DRM_I915_VBLANK_PIPE_A)) ||
+			   ((temp & VSYNC_PIPEB_FLAG) &&
+			    (vblank_pipe & DRM_I915_VBLANK_PIPE_B)))
+			atomic_inc(&dev->vbl_received);
+
 		DRM_WAKEUP(&dev->vbl_queue);
 		drm_vbl_send_signals(dev);
+
+		if (dev_priv->swaps_pending > 0)
+			drm_locked_tasklet(dev, i915_vblank_tasklet);
 	}
 
 	return IRQ_HANDLED;
@@ -120,7 +230,8 @@
 	return ret;
 }
 
-int i915_driver_vblank_wait(drm_device_t *dev, unsigned int *sequence)
+static int i915_driver_vblank_do_wait(drm_device_t *dev, unsigned int *sequence,
+				      atomic_t *counter)
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	unsigned int cur_vblank;
@@ -132,7 +243,7 @@
 	}
 
 	DRM_WAIT_ON(ret, dev->vbl_queue, 3 * DRM_HZ,
-		    (((cur_vblank = atomic_read(&dev->vbl_received))
+		    (((cur_vblank = atomic_read(counter))
 			- *sequence) <= (1<<23)));
 	
 	*sequence = cur_vblank;
@@ -141,6 +252,16 @@
 }
 
 
+int i915_driver_vblank_wait(drm_device_t *dev, unsigned int *sequence)
+{
+	return i915_driver_vblank_do_wait(dev, sequence, &dev->vbl_received);
+}
+
+int i915_driver_vblank_wait2(drm_device_t *dev, unsigned int *sequence)
+{
+	return i915_driver_vblank_do_wait(dev, sequence, &dev->vbl_received2);
+}
+
 /* Needs the lock as it touches the ring.
  */
 int i915_irq_emit(DRM_IOCTL_ARGS)
@@ -189,7 +310,7 @@
 	return i915_wait_irq(dev, irqwait.irq_seq);
 }
 
-static int i915_enable_interrupt (drm_device_t *dev)
+static void i915_enable_interrupt (drm_device_t *dev)
 {
 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 	u16 flag;
@@ -199,13 +320,8 @@
 		flag |= VSYNC_PIPEA_FLAG;
 	if (dev_priv->vblank_pipe & DRM_I915_VBLANK_PIPE_B)
 		flag |= VSYNC_PIPEB_FLAG;
-	if (dev_priv->vblank_pipe & ~(DRM_I915_VBLANK_PIPE_A|DRM_I915_VBLANK_PIPE_B)) {
-		DRM_ERROR("%s called with invalid pipe 0x%x\n",
-			  __FUNCTION__, dev_priv->vblank_pipe);
-		return DRM_ERR(EINVAL);
-	}
+
 	I915_WRITE16(I915REG_INT_ENABLE_R, USER_INT_FLAG | flag);
-	return 0;
 }
 
 /* Set the vblank monitor pipe
@@ -224,8 +340,17 @@
 	DRM_COPY_FROM_USER_IOCTL(pipe, (drm_i915_vblank_pipe_t __user *) data,
 				 sizeof(pipe));
 
+	if (pipe.pipe & ~(DRM_I915_VBLANK_PIPE_A|DRM_I915_VBLANK_PIPE_B)) {
+		DRM_ERROR("%s called with invalid pipe 0x%x\n", 
+			  __FUNCTION__, pipe.pipe);
+		return DRM_ERR(EINVAL);
+	}
+
 	dev_priv->vblank_pipe = pipe.pipe;
-	return i915_enable_interrupt (dev);
+
+	i915_enable_interrupt (dev);
+
+	return 0;
 }
 
 int i915_vblank_pipe_get(DRM_IOCTL_ARGS)
@@ -251,6 +376,118 @@
 	return 0;
 }
 
+/**
+ * Schedule buffer swap at given vertical blank.
+ */
+int i915_vblank_swap(DRM_IOCTL_ARGS)
+{
+	DRM_DEVICE;
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	drm_i915_vblank_swap_t swap;
+	drm_i915_vbl_swap_t *vbl_swap;
+	unsigned int pipe, seqtype, curseq;
+	unsigned long irqflags;
+	struct list_head *list;
+
+	if (!dev_priv) {
+		DRM_ERROR("%s called with no initialization\n", __func__);
+		return DRM_ERR(EINVAL);
+	}
+
+	if (dev_priv->sarea_priv->rotation) {
+		DRM_DEBUG("Rotation not supported\n");
+		return DRM_ERR(EINVAL);
+	}
+
+	DRM_COPY_FROM_USER_IOCTL(swap, (drm_i915_vblank_swap_t __user *) data,
+				 sizeof(swap));
+
+	if (swap.seqtype & ~(_DRM_VBLANK_RELATIVE | _DRM_VBLANK_ABSOLUTE |
+			     _DRM_VBLANK_SECONDARY | _DRM_VBLANK_NEXTONMISS)) {
+		DRM_ERROR("Invalid sequence type 0x%x\n", swap.seqtype);
+		return DRM_ERR(EINVAL);
+	}
+
+	pipe = (swap.seqtype & _DRM_VBLANK_SECONDARY) ? 1 : 0;
+
+	seqtype = swap.seqtype & (_DRM_VBLANK_RELATIVE | _DRM_VBLANK_ABSOLUTE);
+
+	if (!(dev_priv->vblank_pipe & (1 << pipe))) {
+		DRM_ERROR("Invalid pipe %d\n", pipe);
+		return DRM_ERR(EINVAL);
+	}
+
+	spin_lock_irqsave(&dev->drw_lock, irqflags);
+
+	if (!drm_get_drawable_info(dev, swap.drawable)) {
+		spin_unlock_irqrestore(&dev->drw_lock, irqflags);
+		DRM_ERROR("Invalid drawable ID %d\n", swap.drawable);
+		return DRM_ERR(EINVAL);
+	}
+
+	spin_unlock_irqrestore(&dev->drw_lock, irqflags);
+
+	curseq = atomic_read(pipe ? &dev->vbl_received2 : &dev->vbl_received);
+
+	if (seqtype == _DRM_VBLANK_RELATIVE)
+		swap.sequence += curseq;
+
+	if ((curseq - swap.sequence) <= (1<<23)) {
+		if (swap.seqtype & _DRM_VBLANK_NEXTONMISS) {
+			swap.sequence = curseq + 1;
+		} else {
+			DRM_DEBUG("Missed target sequence\n");
+			return DRM_ERR(EINVAL);
+		}
+	}
+
+	spin_lock_irqsave(&dev_priv->swaps_lock, irqflags);
+
+	list_for_each(list, &dev_priv->vbl_swaps.head) {
+		vbl_swap = list_entry(list, drm_i915_vbl_swap_t, head);
+
+		if (vbl_swap->drw_id == swap.drawable &&
+		    vbl_swap->pipe == pipe &&
+		    vbl_swap->sequence == swap.sequence) {
+			spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags);
+			DRM_DEBUG("Already scheduled\n");
+			return 0;
+		}
+	}
+
+	spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags);
+
+	if (dev_priv->swaps_pending >= 100) {
+		DRM_DEBUG("Too many swaps queued\n");
+		return DRM_ERR(EBUSY);
+	}
+
+	vbl_swap = drm_calloc(1, sizeof(vbl_swap), DRM_MEM_DRIVER);
+
+	if (!vbl_swap) {
+		DRM_ERROR("Failed to allocate memory to queue swap\n");
+		return DRM_ERR(ENOMEM);
+	}
+
+	DRM_DEBUG("\n");
+
+	vbl_swap->drw_id = swap.drawable;
+	vbl_swap->pipe = pipe;
+	vbl_swap->sequence = swap.sequence;
+
+	spin_lock_irqsave(&dev_priv->swaps_lock, irqflags);
+
+	list_add_tail((struct list_head *)vbl_swap, &dev_priv->vbl_swaps.head);
+	dev_priv->swaps_pending++;
+
+	spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags);
+
+	DRM_COPY_TO_USER_IOCTL((drm_i915_vblank_swap_t __user *) data, swap,
+			       sizeof(swap));
+
+	return 0;
+}
+
 /* drm_dma.h hooks
 */
 void i915_driver_irq_preinstall(drm_device_t * dev)
@@ -266,6 +503,12 @@
 {
 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 
+	dev_priv->swaps_lock = SPIN_LOCK_UNLOCKED;
+	INIT_LIST_HEAD(&dev_priv->vbl_swaps.head);
+	dev_priv->swaps_pending = 0;
+
+	if (!dev_priv->vblank_pipe)
+		dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A;
 	i915_enable_interrupt(dev);
 	DRM_INIT_WAITQUEUE(&dev_priv->irq_queue);
 }
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 6b18675..db49886 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -85,22 +85,19 @@
 	return list_empty(&wq->list);
 }
 
+/*
+ * Set the workqueue on which a work item is to be run
+ * - Must *only* be called if the pending flag is set
+ */
 static inline void set_wq_data(struct work_struct *work, void *wq)
 {
-	unsigned long new, old, res;
+	unsigned long new;
 
-	/* assume the pending flag is already set and that the task has already
-	 * been queued on this workqueue */
+	BUG_ON(!work_pending(work));
+
 	new = (unsigned long) wq | (1UL << WORK_STRUCT_PENDING);
-	res = work->management;
-	if (res != new) {
-		do {
-			old = res;
-			new = (unsigned long) wq;
-			new |= (old & WORK_STRUCT_FLAG_MASK);
-			res = cmpxchg(&work->management, old, new);
-		} while (res != old);
-	}
+	new |= work->management & WORK_STRUCT_FLAG_MASK;
+	work->management = new;
 }
 
 static inline void *get_wq_data(struct work_struct *work)