Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/sparc-2.6

* 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/sparc-2.6:
  [VIDEO] sunxvr500fb: Fix pseudo_palette array size
  [VIDEO] sunxvr2500fb: Fix pseudo_palette array size
  [VIDEO] ffb: The pseudo_palette is only 16 elements long
  [VIDEO]: Fix section mismatch warning in promcon.
  [ATA]: Back out bogus (SPARC64 && !PCI) Kconfig depends.
  [SPARC64]: Fill in gaps in non-PCI dma_*() NOP implementation.
  [SPARC64]: Fix {mc,smt}_capable().
  [SPARC64]: Make core and sibling groups equal on UltraSPARC-IV.
  [SPARC64]: Proper multi-core scheduling support.
  [SPARC64]: Provide mmu statistics via sysfs.
  [SPARC64]: Fix service channel hypervisor function names.
  [SPARC64]: Export basic cpu properties via sysfs.
  [SPARC64]: Move topology init code into new file, sysfs.c
diff --git a/Documentation/powerpc/booting-without-of.txt b/Documentation/powerpc/booting-without-of.txt
index b49ce16..d42d981 100644
--- a/Documentation/powerpc/booting-without-of.txt
+++ b/Documentation/powerpc/booting-without-of.txt
@@ -1,7 +1,6 @@
            Booting the Linux/ppc kernel without Open Firmware
            --------------------------------------------------
 
-
 (c) 2005 Benjamin Herrenschmidt <benh at kernel.crashing.org>,
     IBM Corp.
 (c) 2005 Becky Bruce <becky.bruce at freescale.com>,
@@ -9,6 +8,62 @@
 (c) 2006 MontaVista Software, Inc.
     Flash chip node definition
 
+Table of Contents
+=================
+
+  I - Introduction
+    1) Entry point for arch/powerpc
+    2) Board support
+
+  II - The DT block format
+    1) Header
+    2) Device tree generalities
+    3) Device tree "structure" block
+    4) Device tree "strings" block
+
+  III - Required content of the device tree
+    1) Note about cells and address representation
+    2) Note about "compatible" properties
+    3) Note about "name" properties
+    4) Note about node and property names and character set
+    5) Required nodes and properties
+      a) The root node
+      b) The /cpus node
+      c) The /cpus/* nodes
+      d) the /memory node(s)
+      e) The /chosen node
+      f) the /soc<SOCname> node
+
+  IV - "dtc", the device tree compiler
+
+  V - Recommendations for a bootloader
+
+  VI - System-on-a-chip devices and nodes
+    1) Defining child nodes of an SOC
+    2) Representing devices without a current OF specification
+      a) MDIO IO device
+      c) PHY nodes
+      b) Gianfar-compatible ethernet nodes
+      d) Interrupt controllers
+      e) I2C
+      f) Freescale SOC USB controllers
+      g) Freescale SOC SEC Security Engines
+      h) Board Control and Status (BCSR)
+      i) Freescale QUICC Engine module (QE)
+      g) Flash chip nodes
+
+  VII - Specifying interrupt information for devices
+    1) interrupts property
+    2) interrupt-parent property
+    3) OpenPIC Interrupt Controllers
+    4) ISA Interrupt Controllers
+
+  Appendix A - Sample SOC node for MPC8540
+
+
+Revision Information
+====================
+
    May 18, 2005: Rev 0.1 - Initial draft, no chapter III yet.
 
    May 19, 2005: Rev 0.2 - Add chapter III and bits & pieces here or
@@ -1687,7 +1742,7 @@
 		};
 	};
 
-    g) Flash chip nodes
+    j) Flash chip nodes
 
     Flash chips (Memory Technology Devices) are often used for solid state
     file systems on embedded devices.
diff --git a/arch/powerpc/boot/crt0.S b/arch/powerpc/boot/crt0.S
index 5a4215c..f1c4dfc 100644
--- a/arch/powerpc/boot/crt0.S
+++ b/arch/powerpc/boot/crt0.S
@@ -13,6 +13,7 @@
 
 	.text
 	/* a procedure descriptor used when booting this as a COFF file */
+	.globl	_zimage_start_opd
 _zimage_start_opd:
 	.long	_zimage_start, 0, 0, 0
 
diff --git a/arch/powerpc/kernel/of_platform.c b/arch/powerpc/kernel/of_platform.c
index d501c23..d454f61 100644
--- a/arch/powerpc/kernel/of_platform.c
+++ b/arch/powerpc/kernel/of_platform.c
@@ -433,7 +433,7 @@
 	 * Note also that we don't do ISA, this will also be fixed with a
 	 * more massive rework.
 	 */
-	pci_setup_phb_io(phb, 0);
+	pci_setup_phb_io(phb, pci_io_base == 0);
 
 	/* Init pci_dn data structures */
 	pci_devs_phb_init_dynamic(phb);
diff --git a/arch/powerpc/platforms/cell/cbe_cpufreq.c b/arch/powerpc/platforms/cell/cbe_cpufreq.c
index f9ac3fe..ac445998 100644
--- a/arch/powerpc/platforms/cell/cbe_cpufreq.c
+++ b/arch/powerpc/platforms/cell/cbe_cpufreq.c
@@ -67,6 +67,7 @@
 	0x00003FC000000000ull,
 };
 
+static unsigned int pmi_frequency_limit = 0;
 /*
  * hardware specific functions
  */
@@ -164,7 +165,6 @@
 
 static void cbe_cpufreq_handle_pmi(struct of_device *dev, pmi_message_t pmi_msg)
 {
-	struct cpufreq_policy policy;
 	u8 cpu;
 	u8 cbe_pmode_new;
 
@@ -173,15 +173,27 @@
 	cpu = cbe_node_to_cpu(pmi_msg.data1);
 	cbe_pmode_new = pmi_msg.data2;
 
-	cpufreq_get_policy(&policy, cpu);
+	pmi_frequency_limit = cbe_freqs[cbe_pmode_new].frequency;
 
-	policy.max = min(policy.max, cbe_freqs[cbe_pmode_new].frequency);
-	policy.min = min(policy.min, policy.max);
-
-	pr_debug("cbe_handle_pmi: new policy.min=%d policy.max=%d\n", policy.min, policy.max);
-	cpufreq_set_policy(&policy);
+	pr_debug("cbe_handle_pmi: max freq=%d\n", pmi_frequency_limit);
 }
 
+static int pmi_notifier(struct notifier_block *nb,
+				       unsigned long event, void *data)
+{
+	struct cpufreq_policy *policy = data;
+
+	if (event != CPUFREQ_INCOMPATIBLE)
+		return 0;
+
+	cpufreq_verify_within_limits(policy, 0, pmi_frequency_limit);
+	return 0;
+}
+
+static struct notifier_block pmi_notifier_block = {
+	.notifier_call = pmi_notifier,
+};
+
 static struct pmi_handler cbe_pmi_handler = {
 	.type			= PMI_TYPE_FREQ_CHANGE,
 	.handle_pmi_message	= cbe_cpufreq_handle_pmi,
@@ -238,12 +250,21 @@
 
 	cpufreq_frequency_table_get_attr(cbe_freqs, policy->cpu);
 
+	if (pmi_dev) {
+		/* frequency might get limited later, initialize limit with max_freq */
+		pmi_frequency_limit = max_freq;
+		cpufreq_register_notifier(&pmi_notifier_block, CPUFREQ_POLICY_NOTIFIER);
+	}
+
 	/* this ensures that policy->cpuinfo_min and policy->cpuinfo_max are set correctly */
 	return cpufreq_frequency_table_cpuinfo(policy, cbe_freqs);
 }
 
 static int cbe_cpufreq_cpu_exit(struct cpufreq_policy *policy)
 {
+	if (pmi_dev)
+		cpufreq_unregister_notifier(&pmi_notifier_block, CPUFREQ_POLICY_NOTIFIER);
+
 	cpufreq_frequency_table_put_attr(policy->cpu);
 	return 0;
 }
diff --git a/arch/powerpc/platforms/cell/spufs/context.c b/arch/powerpc/platforms/cell/spufs/context.c
index 8654749..7c51cb5 100644
--- a/arch/powerpc/platforms/cell/spufs/context.c
+++ b/arch/powerpc/platforms/cell/spufs/context.c
@@ -39,7 +39,7 @@
 	if (spu_init_csa(&ctx->csa))
 		goto out_free;
 	spin_lock_init(&ctx->mmio_lock);
-	spin_lock_init(&ctx->mapping_lock);
+	mutex_init(&ctx->mapping_lock);
 	kref_init(&ctx->kref);
 	mutex_init(&ctx->state_mutex);
 	mutex_init(&ctx->run_mutex);
@@ -103,6 +103,7 @@
 
 void spu_unmap_mappings(struct spu_context *ctx)
 {
+	mutex_lock(&ctx->mapping_lock);
 	if (ctx->local_store)
 		unmap_mapping_range(ctx->local_store, 0, LS_SIZE, 1);
 	if (ctx->mfc)
@@ -117,6 +118,7 @@
 		unmap_mapping_range(ctx->mss, 0, 0x1000, 1);
 	if (ctx->psmap)
 		unmap_mapping_range(ctx->psmap, 0, 0x20000, 1);
+	mutex_unlock(&ctx->mapping_lock);
 }
 
 /**
diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c
index 45614c73..b1e7e2f 100644
--- a/arch/powerpc/platforms/cell/spufs/file.c
+++ b/arch/powerpc/platforms/cell/spufs/file.c
@@ -45,11 +45,11 @@
 	struct spufs_inode_info *i = SPUFS_I(inode);
 	struct spu_context *ctx = i->i_ctx;
 
-	spin_lock(&ctx->mapping_lock);
+	mutex_lock(&ctx->mapping_lock);
 	file->private_data = ctx;
 	if (!i->i_openers++)
 		ctx->local_store = inode->i_mapping;
-	spin_unlock(&ctx->mapping_lock);
+	mutex_unlock(&ctx->mapping_lock);
 	return 0;
 }
 
@@ -59,10 +59,10 @@
 	struct spufs_inode_info *i = SPUFS_I(inode);
 	struct spu_context *ctx = i->i_ctx;
 
-	spin_lock(&ctx->mapping_lock);
+	mutex_lock(&ctx->mapping_lock);
 	if (!--i->i_openers)
 		ctx->local_store = NULL;
-	spin_unlock(&ctx->mapping_lock);
+	mutex_unlock(&ctx->mapping_lock);
 	return 0;
 }
 
@@ -217,6 +217,7 @@
 
 static const struct file_operations spufs_mem_fops = {
 	.open	 		= spufs_mem_open,
+	.release 		= spufs_mem_release,
 	.read   		= spufs_mem_read,
 	.write   		= spufs_mem_write,
 	.llseek  		= generic_file_llseek,
@@ -309,11 +310,11 @@
 	struct spufs_inode_info *i = SPUFS_I(inode);
 	struct spu_context *ctx = i->i_ctx;
 
-	spin_lock(&ctx->mapping_lock);
+	mutex_lock(&ctx->mapping_lock);
 	file->private_data = ctx;
 	if (!i->i_openers++)
 		ctx->cntl = inode->i_mapping;
-	spin_unlock(&ctx->mapping_lock);
+	mutex_unlock(&ctx->mapping_lock);
 	return simple_attr_open(inode, file, spufs_cntl_get,
 					spufs_cntl_set, "0x%08lx");
 }
@@ -326,10 +327,10 @@
 
 	simple_attr_close(inode, file);
 
-	spin_lock(&ctx->mapping_lock);
+	mutex_lock(&ctx->mapping_lock);
 	if (!--i->i_openers)
 		ctx->cntl = NULL;
-	spin_unlock(&ctx->mapping_lock);
+	mutex_unlock(&ctx->mapping_lock);
 	return 0;
 }
 
@@ -812,11 +813,11 @@
 	struct spufs_inode_info *i = SPUFS_I(inode);
 	struct spu_context *ctx = i->i_ctx;
 
-	spin_lock(&ctx->mapping_lock);
+	mutex_lock(&ctx->mapping_lock);
 	file->private_data = ctx;
 	if (!i->i_openers++)
 		ctx->signal1 = inode->i_mapping;
-	spin_unlock(&ctx->mapping_lock);
+	mutex_unlock(&ctx->mapping_lock);
 	return nonseekable_open(inode, file);
 }
 
@@ -826,10 +827,10 @@
 	struct spufs_inode_info *i = SPUFS_I(inode);
 	struct spu_context *ctx = i->i_ctx;
 
-	spin_lock(&ctx->mapping_lock);
+	mutex_lock(&ctx->mapping_lock);
 	if (!--i->i_openers)
 		ctx->signal1 = NULL;
-	spin_unlock(&ctx->mapping_lock);
+	mutex_unlock(&ctx->mapping_lock);
 	return 0;
 }
 
@@ -936,11 +937,11 @@
 	struct spufs_inode_info *i = SPUFS_I(inode);
 	struct spu_context *ctx = i->i_ctx;
 
-	spin_lock(&ctx->mapping_lock);
+	mutex_lock(&ctx->mapping_lock);
 	file->private_data = ctx;
 	if (!i->i_openers++)
 		ctx->signal2 = inode->i_mapping;
-	spin_unlock(&ctx->mapping_lock);
+	mutex_unlock(&ctx->mapping_lock);
 	return nonseekable_open(inode, file);
 }
 
@@ -950,10 +951,10 @@
 	struct spufs_inode_info *i = SPUFS_I(inode);
 	struct spu_context *ctx = i->i_ctx;
 
-	spin_lock(&ctx->mapping_lock);
+	mutex_lock(&ctx->mapping_lock);
 	if (!--i->i_openers)
 		ctx->signal2 = NULL;
-	spin_unlock(&ctx->mapping_lock);
+	mutex_unlock(&ctx->mapping_lock);
 	return 0;
 }
 
@@ -1154,10 +1155,10 @@
 
 	file->private_data = i->i_ctx;
 
-	spin_lock(&ctx->mapping_lock);
+	mutex_lock(&ctx->mapping_lock);
 	if (!i->i_openers++)
 		ctx->mss = inode->i_mapping;
-	spin_unlock(&ctx->mapping_lock);
+	mutex_unlock(&ctx->mapping_lock);
 	return nonseekable_open(inode, file);
 }
 
@@ -1167,10 +1168,10 @@
 	struct spufs_inode_info *i = SPUFS_I(inode);
 	struct spu_context *ctx = i->i_ctx;
 
-	spin_lock(&ctx->mapping_lock);
+	mutex_lock(&ctx->mapping_lock);
 	if (!--i->i_openers)
 		ctx->mss = NULL;
-	spin_unlock(&ctx->mapping_lock);
+	mutex_unlock(&ctx->mapping_lock);
 	return 0;
 }
 
@@ -1211,11 +1212,11 @@
 	struct spufs_inode_info *i = SPUFS_I(inode);
 	struct spu_context *ctx = i->i_ctx;
 
-	spin_lock(&ctx->mapping_lock);
+	mutex_lock(&ctx->mapping_lock);
 	file->private_data = i->i_ctx;
 	if (!i->i_openers++)
 		ctx->psmap = inode->i_mapping;
-	spin_unlock(&ctx->mapping_lock);
+	mutex_unlock(&ctx->mapping_lock);
 	return nonseekable_open(inode, file);
 }
 
@@ -1225,10 +1226,10 @@
 	struct spufs_inode_info *i = SPUFS_I(inode);
 	struct spu_context *ctx = i->i_ctx;
 
-	spin_lock(&ctx->mapping_lock);
+	mutex_lock(&ctx->mapping_lock);
 	if (!--i->i_openers)
 		ctx->psmap = NULL;
-	spin_unlock(&ctx->mapping_lock);
+	mutex_unlock(&ctx->mapping_lock);
 	return 0;
 }
 
@@ -1281,11 +1282,11 @@
 	if (atomic_read(&inode->i_count) != 1)
 		return -EBUSY;
 
-	spin_lock(&ctx->mapping_lock);
+	mutex_lock(&ctx->mapping_lock);
 	file->private_data = ctx;
 	if (!i->i_openers++)
 		ctx->mfc = inode->i_mapping;
-	spin_unlock(&ctx->mapping_lock);
+	mutex_unlock(&ctx->mapping_lock);
 	return nonseekable_open(inode, file);
 }
 
@@ -1295,10 +1296,10 @@
 	struct spufs_inode_info *i = SPUFS_I(inode);
 	struct spu_context *ctx = i->i_ctx;
 
-	spin_lock(&ctx->mapping_lock);
+	mutex_lock(&ctx->mapping_lock);
 	if (!--i->i_openers)
 		ctx->mfc = NULL;
-	spin_unlock(&ctx->mapping_lock);
+	mutex_unlock(&ctx->mapping_lock);
 	return 0;
 }
 
diff --git a/arch/powerpc/platforms/cell/spufs/inode.c b/arch/powerpc/platforms/cell/spufs/inode.c
index 7150730..9807206 100644
--- a/arch/powerpc/platforms/cell/spufs/inode.c
+++ b/arch/powerpc/platforms/cell/spufs/inode.c
@@ -177,7 +177,7 @@
 static int spufs_fill_dir(struct dentry *dir, struct tree_descr *files,
 			  int mode, struct spu_context *ctx)
 {
-	struct dentry *dentry;
+	struct dentry *dentry, *tmp;
 	int ret;
 
 	while (files->name && files->name[0]) {
@@ -193,7 +193,20 @@
 	}
 	return 0;
 out:
-	spufs_prune_dir(dir);
+	/*
+	 * remove all children from dir. dir->inode is not set so don't
+	 * just simply use spufs_prune_dir() and panic afterwards :)
+	 * dput() looks like it will do the right thing:
+	 * - dec parent's ref counter
+	 * - remove child from parent's child list
+	 * - free child's inode if possible
+	 * - free child
+	 */
+	list_for_each_entry_safe(dentry, tmp, &dir->d_subdirs, d_u.d_child) {
+		dput(dentry);
+	}
+
+	shrink_dcache_parent(dir);
 	return ret;
 }
 
@@ -274,6 +287,7 @@
 	goto out;
 
 out_free_ctx:
+	spu_forget(ctx);
 	put_spu_context(ctx);
 out_iput:
 	iput(inode);
@@ -349,37 +363,6 @@
 	return ret;
 }
 
-static int spufs_rmgang(struct inode *root, struct dentry *dir)
-{
-	/* FIXME: this fails if the dir is not empty,
-	          which causes a leak of gangs. */
-	return simple_rmdir(root, dir);
-}
-
-static int spufs_gang_close(struct inode *inode, struct file *file)
-{
-	struct inode *parent;
-	struct dentry *dir;
-	int ret;
-
-	dir = file->f_path.dentry;
-	parent = dir->d_parent->d_inode;
-
-	ret = spufs_rmgang(parent, dir);
-	WARN_ON(ret);
-
-	return dcache_dir_close(inode, file);
-}
-
-const struct file_operations spufs_gang_fops = {
-	.open		= dcache_dir_open,
-	.release	= spufs_gang_close,
-	.llseek		= dcache_dir_lseek,
-	.read		= generic_read_dir,
-	.readdir	= dcache_readdir,
-	.fsync		= simple_sync_file,
-};
-
 static int
 spufs_mkgang(struct inode *dir, struct dentry *dentry, int mode)
 {
@@ -407,7 +390,6 @@
 	inode->i_fop = &simple_dir_operations;
 
 	d_instantiate(dentry, inode);
-	dget(dentry);
 	dir->i_nlink++;
 	dentry->d_inode->i_nlink++;
 	return ret;
@@ -437,7 +419,7 @@
 		goto out;
 	}
 
-	filp->f_op = &spufs_gang_fops;
+	filp->f_op = &simple_dir_operations;
 	fd_install(ret, filp);
 out:
 	return ret;
@@ -458,8 +440,10 @@
 	 * in error path of *_open().
 	 */
 	ret = spufs_gang_open(dget(dentry), mntget(mnt));
-	if (ret < 0)
-		WARN_ON(spufs_rmgang(inode, dentry));
+	if (ret < 0) {
+		int err = simple_rmdir(inode, dentry);
+		WARN_ON(err);
+	}
 
 out:
 	mutex_unlock(&inode->i_mutex);
@@ -600,6 +584,10 @@
 	struct inode *inode;
 	int ret;
 
+	ret = -ENODEV;
+	if (!spu_management_ops)
+		goto out;
+
 	ret = -ENOMEM;
 	inode = spufs_new_inode(sb, S_IFDIR | 0775);
 	if (!inode)
diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c
index b6ecb30..3b831e0 100644
--- a/arch/powerpc/platforms/cell/spufs/sched.c
+++ b/arch/powerpc/platforms/cell/spufs/sched.c
@@ -93,43 +93,6 @@
 	}
 }
 
-void spu_sched_tick(struct work_struct *work)
-{
-	struct spu_context *ctx =
-		container_of(work, struct spu_context, sched_work.work);
-	struct spu *spu;
-	int preempted = 0;
-
-	/*
-	 * If this context is being stopped avoid rescheduling from the
-	 * scheduler tick because we would block on the state_mutex.
-	 * The caller will yield the spu later on anyway.
-	 */
-	if (test_bit(SPU_SCHED_EXITING, &ctx->sched_flags))
-		return;
-
-	mutex_lock(&ctx->state_mutex);
-	spu = ctx->spu;
-	if (spu) {
-		int best = sched_find_first_bit(spu_prio->bitmap);
-		if (best <= ctx->prio) {
-			spu_deactivate(ctx);
-			preempted = 1;
-		}
-	}
-	mutex_unlock(&ctx->state_mutex);
-
-	if (preempted) {
-		/*
-		 * We need to break out of the wait loop in spu_run manually
-		 * to ensure this context gets put on the runqueue again
-		 * ASAP.
-		 */
-		wake_up(&ctx->stop_wq);
-	} else
-		spu_start_tick(ctx);
-}
-
 /**
  * spu_add_to_active_list - add spu to active list
  * @spu:	spu to add to the active list
@@ -273,34 +236,6 @@
 	remove_wait_queue(&ctx->stop_wq, &wait);
 }
 
-/**
- * spu_reschedule - try to find a runnable context for a spu
- * @spu:       spu available
- *
- * This function is called whenever a spu becomes idle.  It looks for the
- * most suitable runnable spu context and schedules it for execution.
- */
-static void spu_reschedule(struct spu *spu)
-{
-	int best;
-
-	spu_free(spu);
-
-	spin_lock(&spu_prio->runq_lock);
-	best = sched_find_first_bit(spu_prio->bitmap);
-	if (best < MAX_PRIO) {
-		struct list_head *rq = &spu_prio->runq[best];
-		struct spu_context *ctx;
-
-		BUG_ON(list_empty(rq));
-
-		ctx = list_entry(rq->next, struct spu_context, rq);
-		__spu_del_from_rq(ctx);
-		wake_up(&ctx->stop_wq);
-	}
-	spin_unlock(&spu_prio->runq_lock);
-}
-
 static struct spu *spu_get_idle(struct spu_context *ctx)
 {
 	struct spu *spu = NULL;
@@ -429,6 +364,51 @@
 }
 
 /**
+ * grab_runnable_context - try to find a runnable context
+ *
+ * Remove the highest priority context on the runqueue and return it
+ * to the caller.  Returns %NULL if no runnable context was found.
+ */
+static struct spu_context *grab_runnable_context(int prio)
+{
+	struct spu_context *ctx = NULL;
+	int best;
+
+	spin_lock(&spu_prio->runq_lock);
+	best = sched_find_first_bit(spu_prio->bitmap);
+	if (best < prio) {
+		struct list_head *rq = &spu_prio->runq[best];
+
+		BUG_ON(list_empty(rq));
+
+		ctx = list_entry(rq->next, struct spu_context, rq);
+		__spu_del_from_rq(ctx);
+	}
+	spin_unlock(&spu_prio->runq_lock);
+
+	return ctx;
+}
+
+static int __spu_deactivate(struct spu_context *ctx, int force, int max_prio)
+{
+	struct spu *spu = ctx->spu;
+	struct spu_context *new = NULL;
+
+	if (spu) {
+		new = grab_runnable_context(max_prio);
+		if (new || force) {
+			spu_unbind_context(spu, ctx);
+			spu_free(spu);
+			if (new)
+				wake_up(&new->stop_wq);
+		}
+
+	}
+
+	return new != NULL;
+}
+
+/**
  * spu_deactivate - unbind a context from it's physical spu
  * @ctx:	spu context to unbind
  *
@@ -437,12 +417,7 @@
  */
 void spu_deactivate(struct spu_context *ctx)
 {
-	struct spu *spu = ctx->spu;
-
-	if (spu) {
-		spu_unbind_context(spu, ctx);
-		spu_reschedule(spu);
-	}
+	__spu_deactivate(ctx, 1, MAX_PRIO);
 }
 
 /**
@@ -455,21 +430,43 @@
  */
 void spu_yield(struct spu_context *ctx)
 {
-	struct spu *spu;
-
-	if (mutex_trylock(&ctx->state_mutex)) {
-		if ((spu = ctx->spu) != NULL) {
-			int best = sched_find_first_bit(spu_prio->bitmap);
-			if (best < MAX_PRIO) {
-				pr_debug("%s: yielding SPU %d NODE %d\n",
-					 __FUNCTION__, spu->number, spu->node);
-				spu_deactivate(ctx);
-			}
-		}
+	if (!(ctx->flags & SPU_CREATE_NOSCHED)) {
+		mutex_lock(&ctx->state_mutex);
+		__spu_deactivate(ctx, 0, MAX_PRIO);
 		mutex_unlock(&ctx->state_mutex);
 	}
 }
 
+void spu_sched_tick(struct work_struct *work)
+{
+	struct spu_context *ctx =
+		container_of(work, struct spu_context, sched_work.work);
+	int preempted;
+
+	/*
+	 * If this context is being stopped avoid rescheduling from the
+	 * scheduler tick because we would block on the state_mutex.
+	 * The caller will yield the spu later on anyway.
+	 */
+	if (test_bit(SPU_SCHED_EXITING, &ctx->sched_flags))
+		return;
+
+	mutex_lock(&ctx->state_mutex);
+	preempted = __spu_deactivate(ctx, 0, ctx->prio + 1);
+	mutex_unlock(&ctx->state_mutex);
+
+	if (preempted) {
+		/*
+		 * We need to break out of the wait loop in spu_run manually
+		 * to ensure this context gets put on the runqueue again
+		 * ASAP.
+		 */
+		wake_up(&ctx->stop_wq);
+	} else {
+		spu_start_tick(ctx);
+	}
+}
+
 int __init spu_sched_init(void)
 {
 	int i;
diff --git a/arch/powerpc/platforms/cell/spufs/spufs.h b/arch/powerpc/platforms/cell/spufs/spufs.h
index 0a947fd..47617e8 100644
--- a/arch/powerpc/platforms/cell/spufs/spufs.h
+++ b/arch/powerpc/platforms/cell/spufs/spufs.h
@@ -55,7 +55,7 @@
 	struct address_space *signal2;	   /* 'signal2' area mappings. */
 	struct address_space *mss;	   /* 'mss' area mappings. */
 	struct address_space *psmap;	   /* 'psmap' area mappings. */
-	spinlock_t mapping_lock;
+	struct mutex mapping_lock;
 	u64 object_id;		   /* user space pointer for oprofile */
 
 	enum { SPU_STATE_RUNNABLE, SPU_STATE_SAVED } state;
diff --git a/arch/powerpc/platforms/celleb/Makefile b/arch/powerpc/platforms/celleb/Makefile
index f4f8252..5240046 100644
--- a/arch/powerpc/platforms/celleb/Makefile
+++ b/arch/powerpc/platforms/celleb/Makefile
@@ -4,5 +4,5 @@
 
 obj-$(CONFIG_SMP)		+= smp.o
 obj-$(CONFIG_PPC_UDBG_BEAT)	+= udbg_beat.o
-obj-$(CONFIG_HAS_TXX9_SERIAL)	+= scc_sio.o
+obj-$(CONFIG_SERIAL_TXX9)	+= scc_sio.o
 obj-$(CONFIG_SPU_BASE)		+= spu_priv1.o
diff --git a/arch/powerpc/platforms/pasemi/iommu.c b/arch/powerpc/platforms/pasemi/iommu.c
index 95fa6a7..f33b21b 100644
--- a/arch/powerpc/platforms/pasemi/iommu.c
+++ b/arch/powerpc/platforms/pasemi/iommu.c
@@ -31,8 +31,6 @@
 #define IOBMAP_PAGE_SIZE	(1 << IOBMAP_PAGE_SHIFT)
 #define IOBMAP_PAGE_MASK	(IOBMAP_PAGE_SIZE - 1)
 
-#define IOBMAP_PAGE_FACTOR	(PAGE_SHIFT - IOBMAP_PAGE_SHIFT)
-
 #define IOB_BASE		0xe0000000
 #define IOB_SIZE		0x3000
 /* Configuration registers */
@@ -97,9 +95,6 @@
 
 	bus_addr = (tbl->it_offset + index) << PAGE_SHIFT;
 
-	npages <<= IOBMAP_PAGE_FACTOR;
-	index <<= IOBMAP_PAGE_FACTOR;
-
 	ip = ((u32 *)tbl->it_base) + index;
 
 	while (npages--) {
@@ -125,9 +120,6 @@
 
 	bus_addr = (tbl->it_offset + index) << PAGE_SHIFT;
 
-	npages <<= IOBMAP_PAGE_FACTOR;
-	index <<= IOBMAP_PAGE_FACTOR;
-
 	ip = ((u32 *)tbl->it_base) + index;
 
 	while (npages--) {
diff --git a/kernel/signal.c b/kernel/signal.c
index acdfc05..fe590e0 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -105,7 +105,11 @@
 		set_tsk_thread_flag(t, TIF_SIGPENDING);
 		return 1;
 	}
-	clear_tsk_thread_flag(t, TIF_SIGPENDING);
+	/*
+	 * We must never clear the flag in another thread, or in current
+	 * when it's possible the current syscall is returning -ERESTART*.
+	 * So we don't clear it here, and only callers who know they should do.
+	 */
 	return 0;
 }
 
@@ -121,7 +125,9 @@
 
 void recalc_sigpending(void)
 {
-	recalc_sigpending_tsk(current);
+	if (!recalc_sigpending_tsk(current))
+		clear_thread_flag(TIF_SIGPENDING);
+
 }
 
 /* Given the mask, find the first available signal that should be serviced. */
@@ -385,7 +391,8 @@
 			}
 		}
 	}
-	recalc_sigpending_tsk(tsk);
+	if (likely(tsk == current))
+		recalc_sigpending();
 	if (signr && unlikely(sig_kernel_stop(signr))) {
 		/*
 		 * Set a marker that we have dequeued a stop signal.  Our
@@ -1580,8 +1587,9 @@
 	/*
 	 * Queued signals ignored us while we were stopped for tracing.
 	 * So check for any that we should take before resuming user mode.
+	 * This sets TIF_SIGPENDING, but never clears it.
 	 */
-	recalc_sigpending();
+	recalc_sigpending_tsk(current);
 }
 
 void ptrace_notify(int exit_code)
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl
old mode 100644
new mode 100755