Merge branch 'fixes-for-linus' of git://git.monstr.eu/linux-2.6-microblaze

* 'fixes-for-linus' of git://git.monstr.eu/linux-2.6-microblaze:
  microblaze: Clear sticky FSR register after saving it to func parametr
  microblaze: UMS is used only for MMU kernel
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 6fa7292..9107b38 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -671,6 +671,7 @@
 	earlyprintk=	[X86,SH,BLACKFIN]
 			earlyprintk=vga
 			earlyprintk=serial[,ttySn[,baudrate]]
+			earlyprintk=ttySn[,baudrate]
 			earlyprintk=dbgp[debugController#]
 
 			Append ",keep" to not disable it when the real console
diff --git a/arch/m68knommu/kernel/asm-offsets.c b/arch/m68knommu/kernel/asm-offsets.c
index 594ee0e..9a8876f 100644
--- a/arch/m68knommu/kernel/asm-offsets.c
+++ b/arch/m68knommu/kernel/asm-offsets.c
@@ -45,25 +45,25 @@
 	DEFINE(THREAD_FPSTATE, offsetof(struct thread_struct, fpstate));
 
 	/* offsets into the pt_regs */
-	DEFINE(PT_D0, offsetof(struct pt_regs, d0));
-	DEFINE(PT_ORIG_D0, offsetof(struct pt_regs, orig_d0));
-	DEFINE(PT_D1, offsetof(struct pt_regs, d1));
-	DEFINE(PT_D2, offsetof(struct pt_regs, d2));
-	DEFINE(PT_D3, offsetof(struct pt_regs, d3));
-	DEFINE(PT_D4, offsetof(struct pt_regs, d4));
-	DEFINE(PT_D5, offsetof(struct pt_regs, d5));
-	DEFINE(PT_A0, offsetof(struct pt_regs, a0));
-	DEFINE(PT_A1, offsetof(struct pt_regs, a1));
-	DEFINE(PT_A2, offsetof(struct pt_regs, a2));
-	DEFINE(PT_PC, offsetof(struct pt_regs, pc));
-	DEFINE(PT_SR, offsetof(struct pt_regs, sr));
+	DEFINE(PT_OFF_D0, offsetof(struct pt_regs, d0));
+	DEFINE(PT_OFF_ORIG_D0, offsetof(struct pt_regs, orig_d0));
+	DEFINE(PT_OFF_D1, offsetof(struct pt_regs, d1));
+	DEFINE(PT_OFF_D2, offsetof(struct pt_regs, d2));
+	DEFINE(PT_OFF_D3, offsetof(struct pt_regs, d3));
+	DEFINE(PT_OFF_D4, offsetof(struct pt_regs, d4));
+	DEFINE(PT_OFF_D5, offsetof(struct pt_regs, d5));
+	DEFINE(PT_OFF_A0, offsetof(struct pt_regs, a0));
+	DEFINE(PT_OFF_A1, offsetof(struct pt_regs, a1));
+	DEFINE(PT_OFF_A2, offsetof(struct pt_regs, a2));
+	DEFINE(PT_OFF_PC, offsetof(struct pt_regs, pc));
+	DEFINE(PT_OFF_SR, offsetof(struct pt_regs, sr));
 
 #ifdef CONFIG_COLDFIRE
 	/* bitfields are a bit difficult */
-	DEFINE(PT_FORMATVEC, offsetof(struct pt_regs, sr) - 2);
+	DEFINE(PT_OFF_FORMATVEC, offsetof(struct pt_regs, sr) - 2);
 #else
 	/* bitfields are a bit difficult */
-	DEFINE(PT_VECTOR, offsetof(struct pt_regs, pc) + 4);
+	DEFINE(PT_OFF_VECTOR, offsetof(struct pt_regs, pc) + 4);
 #endif
 
 	/* signal defines */
diff --git a/arch/m68knommu/kernel/entry.S b/arch/m68knommu/kernel/entry.S
index f56faa5..56043ad 100644
--- a/arch/m68knommu/kernel/entry.S
+++ b/arch/m68knommu/kernel/entry.S
@@ -46,7 +46,7 @@
 ENTRY(buserr)
 	SAVE_ALL
 	moveq	#-1,%d0
-	movel	%d0,%sp@(PT_ORIG_D0)
+	movel	%d0,%sp@(PT_OFF_ORIG_D0)
 	movel	%sp,%sp@- 		/* stack frame pointer argument */
 	jsr	buserr_c
 	addql	#4,%sp
@@ -55,7 +55,7 @@
 ENTRY(trap)
 	SAVE_ALL
 	moveq	#-1,%d0
-	movel	%d0,%sp@(PT_ORIG_D0)
+	movel	%d0,%sp@(PT_OFF_ORIG_D0)
 	movel	%sp,%sp@- 		/* stack frame pointer argument */
 	jsr	trap_c
 	addql	#4,%sp
@@ -67,7 +67,7 @@
 ENTRY(dbginterrupt)
 	SAVE_ALL
 	moveq	#-1,%d0
-	movel	%d0,%sp@(PT_ORIG_D0)
+	movel	%d0,%sp@(PT_OFF_ORIG_D0)
 	movel	%sp,%sp@- 		/* stack frame pointer argument */
 	jsr	dbginterrupt_c
 	addql	#4,%sp
diff --git a/arch/m68knommu/mm/init.c b/arch/m68knommu/mm/init.c
index b1703c6..f3236d0 100644
--- a/arch/m68knommu/mm/init.c
+++ b/arch/m68knommu/mm/init.c
@@ -162,7 +162,7 @@
 		totalram_pages++;
 		pages++;
 	}
-	printk (KERN_NOTICE "Freeing initrd memory: %dk freed\n", pages);
+	printk (KERN_NOTICE "Freeing initrd memory: %dk freed\n", pages * (PAGE_SIZE / 1024));
 }
 #endif
 
diff --git a/arch/m68knommu/platform/5206e/config.c b/arch/m68knommu/platform/5206e/config.c
index 0f41ba8..9423979 100644
--- a/arch/m68knommu/platform/5206e/config.c
+++ b/arch/m68knommu/platform/5206e/config.c
@@ -17,7 +17,6 @@
 #include <asm/mcfsim.h>
 #include <asm/mcfuart.h>
 #include <asm/mcfdma.h>
-#include <asm/mcfuart.h>
 
 /***************************************************************************/
 
diff --git a/arch/m68knommu/platform/68328/entry.S b/arch/m68knommu/platform/68328/entry.S
index b1aef72..9d80d2c 100644
--- a/arch/m68knommu/platform/68328/entry.S
+++ b/arch/m68knommu/platform/68328/entry.S
@@ -39,17 +39,17 @@
 .globl inthandler7
 
 badsys:
-	movel	#-ENOSYS,%sp@(PT_D0)
+	movel	#-ENOSYS,%sp@(PT_OFF_D0)
 	jra	ret_from_exception
 
 do_trace:
-	movel	#-ENOSYS,%sp@(PT_D0)	/* needed for strace*/
+	movel	#-ENOSYS,%sp@(PT_OFF_D0)	/* needed for strace*/
 	subql	#4,%sp
 	SAVE_SWITCH_STACK
 	jbsr	syscall_trace
 	RESTORE_SWITCH_STACK
 	addql	#4,%sp
-	movel	%sp@(PT_ORIG_D0),%d1
+	movel	%sp@(PT_OFF_ORIG_D0),%d1
 	movel	#-ENOSYS,%d0
 	cmpl	#NR_syscalls,%d1
 	jcc	1f
@@ -57,7 +57,7 @@
 	lea	sys_call_table, %a0
 	jbsr	%a0@(%d1)
 
-1:	movel	%d0,%sp@(PT_D0)		/* save the return value */
+1:	movel	%d0,%sp@(PT_OFF_D0)		/* save the return value */
 	subql	#4,%sp			/* dummy return address */
 	SAVE_SWITCH_STACK
 	jbsr	syscall_trace
@@ -75,7 +75,7 @@
 	jbsr    set_esp0
 	addql   #4,%sp
 
-	movel	%sp@(PT_ORIG_D0),%d0
+	movel	%sp@(PT_OFF_ORIG_D0),%d0
 
 	movel	%sp,%d1			/* get thread_info pointer */
 	andl	#-THREAD_SIZE,%d1
@@ -88,10 +88,10 @@
 	lea	sys_call_table,%a0
 	movel	%a0@(%d0), %a0
 	jbsr	%a0@
-	movel	%d0,%sp@(PT_D0)		/* save the return value*/
+	movel	%d0,%sp@(PT_OFF_D0)		/* save the return value*/
 
 ret_from_exception:
-	btst	#5,%sp@(PT_SR)		/* check if returning to kernel*/
+	btst	#5,%sp@(PT_OFF_SR)		/* check if returning to kernel*/
 	jeq	Luser_return		/* if so, skip resched, signals*/
 
 Lkernel_return:
@@ -133,7 +133,7 @@
  */
 inthandler1:
 	SAVE_ALL
-	movew	%sp@(PT_VECTOR), %d0
+	movew	%sp@(PT_OFF_VECTOR), %d0
 	and	#0x3ff, %d0
 
 	movel	%sp,%sp@-
@@ -144,7 +144,7 @@
 
 inthandler2:
 	SAVE_ALL
-	movew	%sp@(PT_VECTOR), %d0
+	movew	%sp@(PT_OFF_VECTOR), %d0
 	and	#0x3ff, %d0
 
 	movel	%sp,%sp@-
@@ -155,7 +155,7 @@
 
 inthandler3:
 	SAVE_ALL
-	movew	%sp@(PT_VECTOR), %d0
+	movew	%sp@(PT_OFF_VECTOR), %d0
 	and	#0x3ff, %d0
 
 	movel	%sp,%sp@-
@@ -166,7 +166,7 @@
 
 inthandler4:
 	SAVE_ALL
-	movew	%sp@(PT_VECTOR), %d0
+	movew	%sp@(PT_OFF_VECTOR), %d0
 	and	#0x3ff, %d0
 
 	movel	%sp,%sp@-
@@ -177,7 +177,7 @@
 
 inthandler5:
 	SAVE_ALL
-	movew	%sp@(PT_VECTOR), %d0
+	movew	%sp@(PT_OFF_VECTOR), %d0
 	and	#0x3ff, %d0
 
 	movel	%sp,%sp@-
@@ -188,7 +188,7 @@
 
 inthandler6:
 	SAVE_ALL
-	movew	%sp@(PT_VECTOR), %d0
+	movew	%sp@(PT_OFF_VECTOR), %d0
 	and	#0x3ff, %d0
 
 	movel	%sp,%sp@-
@@ -199,7 +199,7 @@
 
 inthandler7:
 	SAVE_ALL
-	movew	%sp@(PT_VECTOR), %d0
+	movew	%sp@(PT_OFF_VECTOR), %d0
 	and	#0x3ff, %d0
 
 	movel	%sp,%sp@-
@@ -210,7 +210,7 @@
 
 inthandler:
 	SAVE_ALL
-	movew	%sp@(PT_VECTOR), %d0
+	movew	%sp@(PT_OFF_VECTOR), %d0
 	and	#0x3ff, %d0
 
 	movel	%sp,%sp@-
@@ -224,7 +224,7 @@
 2:
 	RESTORE_ALL
 1:
-	moveb	%sp@(PT_SR), %d0
+	moveb	%sp@(PT_OFF_SR), %d0
 	and	#7, %d0
 	jhi	2b
 
diff --git a/arch/m68knommu/platform/68360/entry.S b/arch/m68knommu/platform/68360/entry.S
index 55dfefe..6d3460a 100644
--- a/arch/m68knommu/platform/68360/entry.S
+++ b/arch/m68knommu/platform/68360/entry.S
@@ -35,17 +35,17 @@
 .globl inthandler
 
 badsys:
-	movel	#-ENOSYS,%sp@(PT_D0)
+	movel	#-ENOSYS,%sp@(PT_OFF_D0)
 	jra	ret_from_exception
 
 do_trace:
-	movel	#-ENOSYS,%sp@(PT_D0)	/* needed for strace*/
+	movel	#-ENOSYS,%sp@(PT_OFF_D0) /* needed for strace*/
 	subql	#4,%sp
 	SAVE_SWITCH_STACK
 	jbsr	syscall_trace
 	RESTORE_SWITCH_STACK
 	addql	#4,%sp
-	movel	%sp@(PT_ORIG_D0),%d1
+	movel	%sp@(PT_OFF_ORIG_D0),%d1
 	movel	#-ENOSYS,%d0
 	cmpl	#NR_syscalls,%d1
 	jcc	1f
@@ -53,7 +53,7 @@
 	lea	sys_call_table, %a0
 	jbsr	%a0@(%d1)
 
-1:	movel	%d0,%sp@(PT_D0)		/* save the return value */
+1:	movel	%d0,%sp@(PT_OFF_D0)	/* save the return value */
 	subql	#4,%sp			/* dummy return address */
 	SAVE_SWITCH_STACK
 	jbsr	syscall_trace
@@ -79,10 +79,10 @@
 	lea	sys_call_table,%a0
 	movel	%a0@(%d0), %a0
 	jbsr	%a0@
-	movel	%d0,%sp@(PT_D0)		/* save the return value*/
+	movel	%d0,%sp@(PT_OFF_D0)	/* save the return value*/
 
 ret_from_exception:
-	btst	#5,%sp@(PT_SR)		/* check if returning to kernel*/
+	btst	#5,%sp@(PT_OFF_SR)	/* check if returning to kernel*/
 	jeq	Luser_return		/* if so, skip resched, signals*/
 
 Lkernel_return:
@@ -124,7 +124,7 @@
  */
 inthandler:
 	SAVE_ALL
-	movew	%sp@(PT_VECTOR), %d0
+	movew	%sp@(PT_OFF_VECTOR), %d0
 	and.l	#0x3ff, %d0
 	lsr.l   #0x02,  %d0
 
@@ -139,7 +139,7 @@
 2:
 	RESTORE_ALL
 1:
-	moveb	%sp@(PT_SR), %d0
+	moveb	%sp@(PT_OFF_SR), %d0
 	and	#7, %d0
 	jhi	2b
 	/* check if we need to do software interrupts */
diff --git a/arch/m68knommu/platform/coldfire/entry.S b/arch/m68knommu/platform/coldfire/entry.S
index 3b471c0..dd7d591 100644
--- a/arch/m68knommu/platform/coldfire/entry.S
+++ b/arch/m68knommu/platform/coldfire/entry.S
@@ -81,11 +81,11 @@
 
 	movel	%d3,%a0
 	jbsr	%a0@
-	movel	%d0,%sp@(PT_D0)		/* save the return value */
+	movel	%d0,%sp@(PT_OFF_D0)	/* save the return value */
 	jra	ret_from_exception
 1:
-	movel	#-ENOSYS,%d2		/* strace needs -ENOSYS in PT_D0 */
-	movel	%d2,PT_D0(%sp)		/* on syscall entry */
+	movel	#-ENOSYS,%d2		/* strace needs -ENOSYS in PT_OFF_D0 */
+	movel	%d2,PT_OFF_D0(%sp)	/* on syscall entry */
 	subql	#4,%sp
 	SAVE_SWITCH_STACK
 	jbsr	syscall_trace
@@ -93,7 +93,7 @@
 	addql	#4,%sp
 	movel	%d3,%a0
 	jbsr	%a0@
-	movel	%d0,%sp@(PT_D0)		/* save the return value */
+	movel	%d0,%sp@(PT_OFF_D0)		/* save the return value */
 	subql	#4,%sp			/* dummy return address */
 	SAVE_SWITCH_STACK
 	jbsr	syscall_trace
@@ -104,7 +104,7 @@
 
 ret_from_exception:
 	move	#0x2700,%sr		/* disable intrs */
-	btst	#5,%sp@(PT_SR)		/* check if returning to kernel */
+	btst	#5,%sp@(PT_OFF_SR)	/* check if returning to kernel */
 	jeq	Luser_return		/* if so, skip resched, signals */
 
 #ifdef CONFIG_PREEMPT
@@ -142,8 +142,8 @@
 Lreturn:
 	move	#0x2700,%sr		/* disable intrs */
 	movel	sw_usp,%a0		/* get usp */
-	movel	%sp@(PT_PC),%a0@-	/* copy exception program counter */
-	movel	%sp@(PT_FORMATVEC),%a0@-/* copy exception format/vector/sr */
+	movel	%sp@(PT_OFF_PC),%a0@-	/* copy exception program counter */
+	movel	%sp@(PT_OFF_FORMATVEC),%a0@- /* copy exception format/vector/sr */
 	moveml	%sp@,%d1-%d5/%a0-%a2
 	lea	%sp@(32),%sp		/* space for 8 regs */
 	movel	%sp@+,%d0
@@ -181,9 +181,9 @@
 ENTRY(inthandler)
 	SAVE_ALL
 	moveq	#-1,%d0
-	movel	%d0,%sp@(PT_ORIG_D0)
+	movel	%d0,%sp@(PT_OFF_ORIG_D0)
 
-	movew	%sp@(PT_FORMATVEC),%d0	/* put exception # in d0 */
+	movew	%sp@(PT_OFF_FORMATVEC),%d0 /* put exception # in d0 */
 	andl	#0x03fc,%d0		/* mask out vector only */
 
 	movel	%sp,%sp@-		/* push regs arg */
@@ -203,7 +203,7 @@
 ENTRY(fasthandler)
 	SAVE_LOCAL
 
-	movew	%sp@(PT_FORMATVEC),%d0
+	movew	%sp@(PT_OFF_FORMATVEC),%d0
 	andl	#0x03fc,%d0		/* mask out vector only */
 
 	movel	%sp,%sp@-		/* push regs arg */
diff --git a/arch/sparc/include/asm/hardirq_32.h b/arch/sparc/include/asm/hardirq_32.h
index 4f63ed8..1620076 100644
--- a/arch/sparc/include/asm/hardirq_32.h
+++ b/arch/sparc/include/asm/hardirq_32.h
@@ -7,17 +7,7 @@
 #ifndef __SPARC_HARDIRQ_H
 #define __SPARC_HARDIRQ_H
 
-#include <linux/threads.h>
-#include <linux/spinlock.h>
-#include <linux/cache.h>
-
-/* entry.S is sensitive to the offsets of these fields */ /* XXX P3 Is it? */
-typedef struct {
-	unsigned int __softirq_pending;
-} ____cacheline_aligned irq_cpustat_t;
-
-#include <linux/irq_cpustat.h>	/* Standard mappings for irq_cpustat_t above */
-
 #define HARDIRQ_BITS    8
+#include <asm-generic/hardirq.h>
 
 #endif /* __SPARC_HARDIRQ_H */
diff --git a/arch/sparc/include/asm/irq_32.h b/arch/sparc/include/asm/irq_32.h
index ea43057..cbf4801 100644
--- a/arch/sparc/include/asm/irq_32.h
+++ b/arch/sparc/include/asm/irq_32.h
@@ -6,10 +6,10 @@
 #ifndef _SPARC_IRQ_H
 #define _SPARC_IRQ_H
 
-#include <linux/interrupt.h>
-
 #define NR_IRQS    16
 
+#include <linux/interrupt.h>
+
 #define irq_canonicalize(irq)	(irq)
 
 extern void __init init_IRQ(void);
diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h
index 0ff92fa..f3cb790 100644
--- a/arch/sparc/include/asm/pgtable_64.h
+++ b/arch/sparc/include/asm/pgtable_64.h
@@ -41,8 +41,8 @@
 #define LOW_OBP_ADDRESS		_AC(0x00000000f0000000,UL)
 #define HI_OBP_ADDRESS		_AC(0x0000000100000000,UL)
 #define VMALLOC_START		_AC(0x0000000100000000,UL)
-#define VMALLOC_END		_AC(0x0000000200000000,UL)
-#define VMEMMAP_BASE		_AC(0x0000000200000000,UL)
+#define VMALLOC_END		_AC(0x0000010000000000,UL)
+#define VMEMMAP_BASE		_AC(0x0000010000000000,UL)
 
 #define vmemmap			((struct page *)VMEMMAP_BASE)
 
diff --git a/arch/sparc/kernel/ktlb.S b/arch/sparc/kernel/ktlb.S
index 3ea6e8c..1d36147 100644
--- a/arch/sparc/kernel/ktlb.S
+++ b/arch/sparc/kernel/ktlb.S
@@ -280,8 +280,8 @@
 
 #ifdef CONFIG_SPARSEMEM_VMEMMAP
 	/* Do not use the TSB for vmemmap.  */
-	mov		(VMEMMAP_BASE >> 24), %g5
-	sllx		%g5, 24, %g5
+	mov		(VMEMMAP_BASE >> 40), %g5
+	sllx		%g5, 40, %g5
 	cmp		%g4,%g5
 	bgeu,pn		%xcc, kvmap_vmemmap
 	 nop
@@ -293,8 +293,8 @@
 	sethi		%hi(MODULES_VADDR), %g5
 	cmp		%g4, %g5
 	blu,pn		%xcc, kvmap_dtlb_longpath
-	 mov		(VMALLOC_END >> 24), %g5
-	sllx		%g5, 24, %g5
+	 mov		(VMALLOC_END >> 40), %g5
+	sllx		%g5, 40, %g5
 	cmp		%g4, %g5
 	bgeu,pn		%xcc, kvmap_dtlb_longpath
 	 nop
diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c
index 2d6a1b1..04db927 100644
--- a/arch/sparc/kernel/perf_event.c
+++ b/arch/sparc/kernel/perf_event.c
@@ -56,7 +56,8 @@
 	struct perf_event	*events[MAX_HWEVENTS];
 	unsigned long		used_mask[BITS_TO_LONGS(MAX_HWEVENTS)];
 	unsigned long		active_mask[BITS_TO_LONGS(MAX_HWEVENTS)];
-	int enabled;
+	u64			pcr;
+	int			enabled;
 };
 DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = { .enabled = 1, };
 
@@ -68,8 +69,30 @@
 #define PIC_LOWER	0x02
 };
 
+static unsigned long perf_event_encode(const struct perf_event_map *pmap)
+{
+	return ((unsigned long) pmap->encoding << 16) | pmap->pic_mask;
+}
+
+static void perf_event_decode(unsigned long val, u16 *enc, u8 *msk)
+{
+	*msk = val & 0xff;
+	*enc = val >> 16;
+}
+
+#define C(x) PERF_COUNT_HW_CACHE_##x
+
+#define CACHE_OP_UNSUPPORTED	0xfffe
+#define CACHE_OP_NONSENSE	0xffff
+
+typedef struct perf_event_map cache_map_t
+				[PERF_COUNT_HW_CACHE_MAX]
+				[PERF_COUNT_HW_CACHE_OP_MAX]
+				[PERF_COUNT_HW_CACHE_RESULT_MAX];
+
 struct sparc_pmu {
 	const struct perf_event_map	*(*event_map)(int);
+	const cache_map_t		*cache_map;
 	int				max_events;
 	int				upper_shift;
 	int				lower_shift;
@@ -80,21 +103,109 @@
 	int				lower_nop;
 };
 
-static const struct perf_event_map ultra3i_perfmon_event_map[] = {
+static const struct perf_event_map ultra3_perfmon_event_map[] = {
 	[PERF_COUNT_HW_CPU_CYCLES] = { 0x0000, PIC_UPPER | PIC_LOWER },
 	[PERF_COUNT_HW_INSTRUCTIONS] = { 0x0001, PIC_UPPER | PIC_LOWER },
 	[PERF_COUNT_HW_CACHE_REFERENCES] = { 0x0009, PIC_LOWER },
 	[PERF_COUNT_HW_CACHE_MISSES] = { 0x0009, PIC_UPPER },
 };
 
-static const struct perf_event_map *ultra3i_event_map(int event_id)
+static const struct perf_event_map *ultra3_event_map(int event_id)
 {
-	return &ultra3i_perfmon_event_map[event_id];
+	return &ultra3_perfmon_event_map[event_id];
 }
 
-static const struct sparc_pmu ultra3i_pmu = {
-	.event_map	= ultra3i_event_map,
-	.max_events	= ARRAY_SIZE(ultra3i_perfmon_event_map),
+static const cache_map_t ultra3_cache_map = {
+[C(L1D)] = {
+	[C(OP_READ)] = {
+		[C(RESULT_ACCESS)] = { 0x09, PIC_LOWER, },
+		[C(RESULT_MISS)] = { 0x09, PIC_UPPER, },
+	},
+	[C(OP_WRITE)] = {
+		[C(RESULT_ACCESS)] = { 0x0a, PIC_LOWER },
+		[C(RESULT_MISS)] = { 0x0a, PIC_UPPER },
+	},
+	[C(OP_PREFETCH)] = {
+		[C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
+		[C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
+	},
+},
+[C(L1I)] = {
+	[C(OP_READ)] = {
+		[C(RESULT_ACCESS)] = { 0x09, PIC_LOWER, },
+		[C(RESULT_MISS)] = { 0x09, PIC_UPPER, },
+	},
+	[ C(OP_WRITE) ] = {
+		[ C(RESULT_ACCESS) ] = { CACHE_OP_NONSENSE },
+		[ C(RESULT_MISS)   ] = { CACHE_OP_NONSENSE },
+	},
+	[ C(OP_PREFETCH) ] = {
+		[ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
+		[ C(RESULT_MISS)   ] = { CACHE_OP_UNSUPPORTED },
+	},
+},
+[C(LL)] = {
+	[C(OP_READ)] = {
+		[C(RESULT_ACCESS)] = { 0x0c, PIC_LOWER, },
+		[C(RESULT_MISS)] = { 0x0c, PIC_UPPER, },
+	},
+	[C(OP_WRITE)] = {
+		[C(RESULT_ACCESS)] = { 0x0c, PIC_LOWER },
+		[C(RESULT_MISS)] = { 0x0c, PIC_UPPER },
+	},
+	[C(OP_PREFETCH)] = {
+		[C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
+		[C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
+	},
+},
+[C(DTLB)] = {
+	[C(OP_READ)] = {
+		[C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
+		[C(RESULT_MISS)] = { 0x12, PIC_UPPER, },
+	},
+	[ C(OP_WRITE) ] = {
+		[ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
+		[ C(RESULT_MISS)   ] = { CACHE_OP_UNSUPPORTED },
+	},
+	[ C(OP_PREFETCH) ] = {
+		[ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
+		[ C(RESULT_MISS)   ] = { CACHE_OP_UNSUPPORTED },
+	},
+},
+[C(ITLB)] = {
+	[C(OP_READ)] = {
+		[C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
+		[C(RESULT_MISS)] = { 0x11, PIC_UPPER, },
+	},
+	[ C(OP_WRITE) ] = {
+		[ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
+		[ C(RESULT_MISS)   ] = { CACHE_OP_UNSUPPORTED },
+	},
+	[ C(OP_PREFETCH) ] = {
+		[ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
+		[ C(RESULT_MISS)   ] = { CACHE_OP_UNSUPPORTED },
+	},
+},
+[C(BPU)] = {
+	[C(OP_READ)] = {
+		[C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
+		[C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
+	},
+	[ C(OP_WRITE) ] = {
+		[ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
+		[ C(RESULT_MISS)   ] = { CACHE_OP_UNSUPPORTED },
+	},
+	[ C(OP_PREFETCH) ] = {
+		[ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
+		[ C(RESULT_MISS)   ] = { CACHE_OP_UNSUPPORTED },
+	},
+},
+};
+
+static const struct sparc_pmu ultra3_pmu = {
+	.event_map	= ultra3_event_map,
+	.cache_map	= &ultra3_cache_map,
+	.max_events	= ARRAY_SIZE(ultra3_perfmon_event_map),
 	.upper_shift	= 11,
 	.lower_shift	= 4,
 	.event_mask	= 0x3f,
@@ -102,6 +213,121 @@
 	.lower_nop	= 0x14,
 };
 
+/* Niagara1 is very limited.  The upper PIC is hard-locked to count
+ * only instructions, so it is free running which creates all kinds of
+ * problems.  Some hardware designs make one wonder if the creator
+ * even looked at how this stuff gets used by software.
+ */
+static const struct perf_event_map niagara1_perfmon_event_map[] = {
+	[PERF_COUNT_HW_CPU_CYCLES] = { 0x00, PIC_UPPER },
+	[PERF_COUNT_HW_INSTRUCTIONS] = { 0x00, PIC_UPPER },
+	[PERF_COUNT_HW_CACHE_REFERENCES] = { 0, PIC_NONE },
+	[PERF_COUNT_HW_CACHE_MISSES] = { 0x03, PIC_LOWER },
+};
+
+static const struct perf_event_map *niagara1_event_map(int event_id)
+{
+	return &niagara1_perfmon_event_map[event_id];
+}
+
+static const cache_map_t niagara1_cache_map = {
+[C(L1D)] = {
+	[C(OP_READ)] = {
+		[C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
+		[C(RESULT_MISS)] = { 0x03, PIC_LOWER, },
+	},
+	[C(OP_WRITE)] = {
+		[C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
+		[C(RESULT_MISS)] = { 0x03, PIC_LOWER, },
+	},
+	[C(OP_PREFETCH)] = {
+		[C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
+		[C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
+	},
+},
+[C(L1I)] = {
+	[C(OP_READ)] = {
+		[C(RESULT_ACCESS)] = { 0x00, PIC_UPPER },
+		[C(RESULT_MISS)] = { 0x02, PIC_LOWER, },
+	},
+	[ C(OP_WRITE) ] = {
+		[ C(RESULT_ACCESS) ] = { CACHE_OP_NONSENSE },
+		[ C(RESULT_MISS)   ] = { CACHE_OP_NONSENSE },
+	},
+	[ C(OP_PREFETCH) ] = {
+		[ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
+		[ C(RESULT_MISS)   ] = { CACHE_OP_UNSUPPORTED },
+	},
+},
+[C(LL)] = {
+	[C(OP_READ)] = {
+		[C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
+		[C(RESULT_MISS)] = { 0x07, PIC_LOWER, },
+	},
+	[C(OP_WRITE)] = {
+		[C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
+		[C(RESULT_MISS)] = { 0x07, PIC_LOWER, },
+	},
+	[C(OP_PREFETCH)] = {
+		[C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
+		[C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
+	},
+},
+[C(DTLB)] = {
+	[C(OP_READ)] = {
+		[C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
+		[C(RESULT_MISS)] = { 0x05, PIC_LOWER, },
+	},
+	[ C(OP_WRITE) ] = {
+		[ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
+		[ C(RESULT_MISS)   ] = { CACHE_OP_UNSUPPORTED },
+	},
+	[ C(OP_PREFETCH) ] = {
+		[ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
+		[ C(RESULT_MISS)   ] = { CACHE_OP_UNSUPPORTED },
+	},
+},
+[C(ITLB)] = {
+	[C(OP_READ)] = {
+		[C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
+		[C(RESULT_MISS)] = { 0x04, PIC_LOWER, },
+	},
+	[ C(OP_WRITE) ] = {
+		[ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
+		[ C(RESULT_MISS)   ] = { CACHE_OP_UNSUPPORTED },
+	},
+	[ C(OP_PREFETCH) ] = {
+		[ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
+		[ C(RESULT_MISS)   ] = { CACHE_OP_UNSUPPORTED },
+	},
+},
+[C(BPU)] = {
+	[C(OP_READ)] = {
+		[C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
+		[C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
+	},
+	[ C(OP_WRITE) ] = {
+		[ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
+		[ C(RESULT_MISS)   ] = { CACHE_OP_UNSUPPORTED },
+	},
+	[ C(OP_PREFETCH) ] = {
+		[ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
+		[ C(RESULT_MISS)   ] = { CACHE_OP_UNSUPPORTED },
+	},
+},
+};
+
+static const struct sparc_pmu niagara1_pmu = {
+	.event_map	= niagara1_event_map,
+	.cache_map	= &niagara1_cache_map,
+	.max_events	= ARRAY_SIZE(niagara1_perfmon_event_map),
+	.upper_shift	= 0,
+	.lower_shift	= 4,
+	.event_mask	= 0x7,
+	.upper_nop	= 0x0,
+	.lower_nop	= 0x0,
+};
+
 static const struct perf_event_map niagara2_perfmon_event_map[] = {
 	[PERF_COUNT_HW_CPU_CYCLES] = { 0x02ff, PIC_UPPER | PIC_LOWER },
 	[PERF_COUNT_HW_INSTRUCTIONS] = { 0x02ff, PIC_UPPER | PIC_LOWER },
@@ -116,8 +342,96 @@
 	return &niagara2_perfmon_event_map[event_id];
 }
 
+static const cache_map_t niagara2_cache_map = {
+[C(L1D)] = {
+	[C(OP_READ)] = {
+		[C(RESULT_ACCESS)] = { 0x0208, PIC_UPPER | PIC_LOWER, },
+		[C(RESULT_MISS)] = { 0x0302, PIC_UPPER | PIC_LOWER, },
+	},
+	[C(OP_WRITE)] = {
+		[C(RESULT_ACCESS)] = { 0x0210, PIC_UPPER | PIC_LOWER, },
+		[C(RESULT_MISS)] = { 0x0302, PIC_UPPER | PIC_LOWER, },
+	},
+	[C(OP_PREFETCH)] = {
+		[C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
+		[C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
+	},
+},
+[C(L1I)] = {
+	[C(OP_READ)] = {
+		[C(RESULT_ACCESS)] = { 0x02ff, PIC_UPPER | PIC_LOWER, },
+		[C(RESULT_MISS)] = { 0x0301, PIC_UPPER | PIC_LOWER, },
+	},
+	[ C(OP_WRITE) ] = {
+		[ C(RESULT_ACCESS) ] = { CACHE_OP_NONSENSE },
+		[ C(RESULT_MISS)   ] = { CACHE_OP_NONSENSE },
+	},
+	[ C(OP_PREFETCH) ] = {
+		[ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
+		[ C(RESULT_MISS)   ] = { CACHE_OP_UNSUPPORTED },
+	},
+},
+[C(LL)] = {
+	[C(OP_READ)] = {
+		[C(RESULT_ACCESS)] = { 0x0208, PIC_UPPER | PIC_LOWER, },
+		[C(RESULT_MISS)] = { 0x0330, PIC_UPPER | PIC_LOWER, },
+	},
+	[C(OP_WRITE)] = {
+		[C(RESULT_ACCESS)] = { 0x0210, PIC_UPPER | PIC_LOWER, },
+		[C(RESULT_MISS)] = { 0x0320, PIC_UPPER | PIC_LOWER, },
+	},
+	[C(OP_PREFETCH)] = {
+		[C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
+		[C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
+	},
+},
+[C(DTLB)] = {
+	[C(OP_READ)] = {
+		[C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
+		[C(RESULT_MISS)] = { 0x0b08, PIC_UPPER | PIC_LOWER, },
+	},
+	[ C(OP_WRITE) ] = {
+		[ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
+		[ C(RESULT_MISS)   ] = { CACHE_OP_UNSUPPORTED },
+	},
+	[ C(OP_PREFETCH) ] = {
+		[ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
+		[ C(RESULT_MISS)   ] = { CACHE_OP_UNSUPPORTED },
+	},
+},
+[C(ITLB)] = {
+	[C(OP_READ)] = {
+		[C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
+		[C(RESULT_MISS)] = { 0xb04, PIC_UPPER | PIC_LOWER, },
+	},
+	[ C(OP_WRITE) ] = {
+		[ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
+		[ C(RESULT_MISS)   ] = { CACHE_OP_UNSUPPORTED },
+	},
+	[ C(OP_PREFETCH) ] = {
+		[ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
+		[ C(RESULT_MISS)   ] = { CACHE_OP_UNSUPPORTED },
+	},
+},
+[C(BPU)] = {
+	[C(OP_READ)] = {
+		[C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
+		[C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
+	},
+	[ C(OP_WRITE) ] = {
+		[ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
+		[ C(RESULT_MISS)   ] = { CACHE_OP_UNSUPPORTED },
+	},
+	[ C(OP_PREFETCH) ] = {
+		[ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
+		[ C(RESULT_MISS)   ] = { CACHE_OP_UNSUPPORTED },
+	},
+},
+};
+
 static const struct sparc_pmu niagara2_pmu = {
 	.event_map	= niagara2_event_map,
+	.cache_map	= &niagara2_cache_map,
 	.max_events	= ARRAY_SIZE(niagara2_perfmon_event_map),
 	.upper_shift	= 19,
 	.lower_shift	= 6,
@@ -151,23 +465,30 @@
 			      sparc_pmu->lower_nop, idx);
 }
 
-static inline void sparc_pmu_enable_event(struct hw_perf_event *hwc,
-					    int idx)
+static inline void sparc_pmu_enable_event(struct cpu_hw_events *cpuc, struct hw_perf_event *hwc, int idx)
 {
 	u64 val, mask = mask_for_index(idx);
 
-	val = pcr_ops->read();
-	pcr_ops->write((val & ~mask) | hwc->config);
+	val = cpuc->pcr;
+	val &= ~mask;
+	val |= hwc->config;
+	cpuc->pcr = val;
+
+	pcr_ops->write(cpuc->pcr);
 }
 
-static inline void sparc_pmu_disable_event(struct hw_perf_event *hwc,
-					     int idx)
+static inline void sparc_pmu_disable_event(struct cpu_hw_events *cpuc, struct hw_perf_event *hwc, int idx)
 {
 	u64 mask = mask_for_index(idx);
 	u64 nop = nop_for_index(idx);
-	u64 val = pcr_ops->read();
+	u64 val;
 
-	pcr_ops->write((val & ~mask) | nop);
+	val = cpuc->pcr;
+	val &= ~mask;
+	val |= nop;
+	cpuc->pcr = val;
+
+	pcr_ops->write(cpuc->pcr);
 }
 
 void hw_perf_enable(void)
@@ -182,7 +503,7 @@
 	cpuc->enabled = 1;
 	barrier();
 
-	val = pcr_ops->read();
+	val = cpuc->pcr;
 
 	for (i = 0; i < MAX_HWEVENTS; i++) {
 		struct perf_event *cp = cpuc->events[i];
@@ -194,7 +515,9 @@
 		val |= hwc->config_base;
 	}
 
-	pcr_ops->write(val);
+	cpuc->pcr = val;
+
+	pcr_ops->write(cpuc->pcr);
 }
 
 void hw_perf_disable(void)
@@ -207,10 +530,12 @@
 
 	cpuc->enabled = 0;
 
-	val = pcr_ops->read();
+	val = cpuc->pcr;
 	val &= ~(PCR_UTRACE | PCR_STRACE |
 		 sparc_pmu->hv_bit | sparc_pmu->irq_bit);
-	pcr_ops->write(val);
+	cpuc->pcr = val;
+
+	pcr_ops->write(cpuc->pcr);
 }
 
 static u32 read_pmc(int idx)
@@ -242,7 +567,7 @@
 }
 
 static int sparc_perf_event_set_period(struct perf_event *event,
-					 struct hw_perf_event *hwc, int idx)
+				       struct hw_perf_event *hwc, int idx)
 {
 	s64 left = atomic64_read(&hwc->period_left);
 	s64 period = hwc->sample_period;
@@ -282,19 +607,19 @@
 	if (test_and_set_bit(idx, cpuc->used_mask))
 		return -EAGAIN;
 
-	sparc_pmu_disable_event(hwc, idx);
+	sparc_pmu_disable_event(cpuc, hwc, idx);
 
 	cpuc->events[idx] = event;
 	set_bit(idx, cpuc->active_mask);
 
 	sparc_perf_event_set_period(event, hwc, idx);
-	sparc_pmu_enable_event(hwc, idx);
+	sparc_pmu_enable_event(cpuc, hwc, idx);
 	perf_event_update_userpage(event);
 	return 0;
 }
 
 static u64 sparc_perf_event_update(struct perf_event *event,
-				     struct hw_perf_event *hwc, int idx)
+				   struct hw_perf_event *hwc, int idx)
 {
 	int shift = 64 - 32;
 	u64 prev_raw_count, new_raw_count;
@@ -324,7 +649,7 @@
 	int idx = hwc->idx;
 
 	clear_bit(idx, cpuc->active_mask);
-	sparc_pmu_disable_event(hwc, idx);
+	sparc_pmu_disable_event(cpuc, hwc, idx);
 
 	barrier();
 
@@ -338,18 +663,29 @@
 static void sparc_pmu_read(struct perf_event *event)
 {
 	struct hw_perf_event *hwc = &event->hw;
+
 	sparc_perf_event_update(event, hwc, hwc->idx);
 }
 
 static void sparc_pmu_unthrottle(struct perf_event *event)
 {
+	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
 	struct hw_perf_event *hwc = &event->hw;
-	sparc_pmu_enable_event(hwc, hwc->idx);
+
+	sparc_pmu_enable_event(cpuc, hwc, hwc->idx);
 }
 
 static atomic_t active_events = ATOMIC_INIT(0);
 static DEFINE_MUTEX(pmc_grab_mutex);
 
+static void perf_stop_nmi_watchdog(void *unused)
+{
+	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+
+	stop_nmi_watchdog(NULL);
+	cpuc->pcr = pcr_ops->read();
+}
+
 void perf_event_grab_pmc(void)
 {
 	if (atomic_inc_not_zero(&active_events))
@@ -358,7 +694,7 @@
 	mutex_lock(&pmc_grab_mutex);
 	if (atomic_read(&active_events) == 0) {
 		if (atomic_read(&nmi_active) > 0) {
-			on_each_cpu(stop_nmi_watchdog, NULL, 1);
+			on_each_cpu(perf_stop_nmi_watchdog, NULL, 1);
 			BUG_ON(atomic_read(&nmi_active) != 0);
 		}
 		atomic_inc(&active_events);
@@ -375,30 +711,160 @@
 	}
 }
 
+static const struct perf_event_map *sparc_map_cache_event(u64 config)
+{
+	unsigned int cache_type, cache_op, cache_result;
+	const struct perf_event_map *pmap;
+
+	if (!sparc_pmu->cache_map)
+		return ERR_PTR(-ENOENT);
+
+	cache_type = (config >>  0) & 0xff;
+	if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
+		return ERR_PTR(-EINVAL);
+
+	cache_op = (config >>  8) & 0xff;
+	if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
+		return ERR_PTR(-EINVAL);
+
+	cache_result = (config >> 16) & 0xff;
+	if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
+		return ERR_PTR(-EINVAL);
+
+	pmap = &((*sparc_pmu->cache_map)[cache_type][cache_op][cache_result]);
+
+	if (pmap->encoding == CACHE_OP_UNSUPPORTED)
+		return ERR_PTR(-ENOENT);
+
+	if (pmap->encoding == CACHE_OP_NONSENSE)
+		return ERR_PTR(-EINVAL);
+
+	return pmap;
+}
+
 static void hw_perf_event_destroy(struct perf_event *event)
 {
 	perf_event_release_pmc();
 }
 
+/* Make sure all events can be scheduled into the hardware at
+ * the same time.  This is simplified by the fact that we only
+ * need to support 2 simultaneous HW events.
+ */
+static int sparc_check_constraints(unsigned long *events, int n_ev)
+{
+	if (n_ev <= perf_max_events) {
+		u8 msk1, msk2;
+		u16 dummy;
+
+		if (n_ev == 1)
+			return 0;
+		BUG_ON(n_ev != 2);
+		perf_event_decode(events[0], &dummy, &msk1);
+		perf_event_decode(events[1], &dummy, &msk2);
+
+		/* If both events can go on any counter, OK.  */
+		if (msk1 == (PIC_UPPER | PIC_LOWER) &&
+		    msk2 == (PIC_UPPER | PIC_LOWER))
+			return 0;
+
+		/* If one event is limited to a specific counter,
+		 * and the other can go on both, OK.
+		 */
+		if ((msk1 == PIC_UPPER || msk1 == PIC_LOWER) &&
+		    msk2 == (PIC_UPPER | PIC_LOWER))
+			return 0;
+		if ((msk2 == PIC_UPPER || msk2 == PIC_LOWER) &&
+		    msk1 == (PIC_UPPER | PIC_LOWER))
+			return 0;
+
+		/* If the events are fixed to different counters, OK.  */
+		if ((msk1 == PIC_UPPER && msk2 == PIC_LOWER) ||
+		    (msk1 == PIC_LOWER && msk2 == PIC_UPPER))
+			return 0;
+
+		/* Otherwise, there is a conflict.  */
+	}
+
+	return -1;
+}
+
+static int check_excludes(struct perf_event **evts, int n_prev, int n_new)
+{
+	int eu = 0, ek = 0, eh = 0;
+	struct perf_event *event;
+	int i, n, first;
+
+	n = n_prev + n_new;
+	if (n <= 1)
+		return 0;
+
+	first = 1;
+	for (i = 0; i < n; i++) {
+		event = evts[i];
+		if (first) {
+			eu = event->attr.exclude_user;
+			ek = event->attr.exclude_kernel;
+			eh = event->attr.exclude_hv;
+			first = 0;
+		} else if (event->attr.exclude_user != eu ||
+			   event->attr.exclude_kernel != ek ||
+			   event->attr.exclude_hv != eh) {
+			return -EAGAIN;
+		}
+	}
+
+	return 0;
+}
+
+static int collect_events(struct perf_event *group, int max_count,
+			  struct perf_event *evts[], unsigned long *events)
+{
+	struct perf_event *event;
+	int n = 0;
+
+	if (!is_software_event(group)) {
+		if (n >= max_count)
+			return -1;
+		evts[n] = group;
+		events[n++] = group->hw.event_base;
+	}
+	list_for_each_entry(event, &group->sibling_list, group_entry) {
+		if (!is_software_event(event) &&
+		    event->state != PERF_EVENT_STATE_OFF) {
+			if (n >= max_count)
+				return -1;
+			evts[n] = event;
+			events[n++] = event->hw.event_base;
+		}
+	}
+	return n;
+}
+
 static int __hw_perf_event_init(struct perf_event *event)
 {
 	struct perf_event_attr *attr = &event->attr;
+	struct perf_event *evts[MAX_HWEVENTS];
 	struct hw_perf_event *hwc = &event->hw;
+	unsigned long events[MAX_HWEVENTS];
 	const struct perf_event_map *pmap;
 	u64 enc;
+	int n;
 
 	if (atomic_read(&nmi_active) < 0)
 		return -ENODEV;
 
-	if (attr->type != PERF_TYPE_HARDWARE)
+	if (attr->type == PERF_TYPE_HARDWARE) {
+		if (attr->config >= sparc_pmu->max_events)
+			return -EINVAL;
+		pmap = sparc_pmu->event_map(attr->config);
+	} else if (attr->type == PERF_TYPE_HW_CACHE) {
+		pmap = sparc_map_cache_event(attr->config);
+		if (IS_ERR(pmap))
+			return PTR_ERR(pmap);
+	} else
 		return -EOPNOTSUPP;
 
-	if (attr->config >= sparc_pmu->max_events)
-		return -EINVAL;
-
-	perf_event_grab_pmc();
-	event->destroy = hw_perf_event_destroy;
-
 	/* We save the enable bits in the config_base.  So to
 	 * turn off sampling just write 'config', and to enable
 	 * things write 'config | config_base'.
@@ -411,15 +877,39 @@
 	if (!attr->exclude_hv)
 		hwc->config_base |= sparc_pmu->hv_bit;
 
+	hwc->event_base = perf_event_encode(pmap);
+
+	enc = pmap->encoding;
+
+	n = 0;
+	if (event->group_leader != event) {
+		n = collect_events(event->group_leader,
+				   perf_max_events - 1,
+				   evts, events);
+		if (n < 0)
+			return -EINVAL;
+	}
+	events[n] = hwc->event_base;
+	evts[n] = event;
+
+	if (check_excludes(evts, n, 1))
+		return -EINVAL;
+
+	if (sparc_check_constraints(events, n + 1))
+		return -EINVAL;
+
+	/* Try to do all error checking before this point, as unwinding
+	 * state after grabbing the PMC is difficult.
+	 */
+	perf_event_grab_pmc();
+	event->destroy = hw_perf_event_destroy;
+
 	if (!hwc->sample_period) {
 		hwc->sample_period = MAX_PERIOD;
 		hwc->last_period = hwc->sample_period;
 		atomic64_set(&hwc->period_left, hwc->sample_period);
 	}
 
-	pmap = sparc_pmu->event_map(attr->config);
-
-	enc = pmap->encoding;
 	if (pmap->pic_mask & PIC_UPPER) {
 		hwc->idx = PIC_UPPER_INDEX;
 		enc <<= sparc_pmu->upper_shift;
@@ -472,7 +962,7 @@
 }
 
 static int __kprobes perf_event_nmi_handler(struct notifier_block *self,
-					      unsigned long cmd, void *__args)
+					    unsigned long cmd, void *__args)
 {
 	struct die_args *args = __args;
 	struct perf_sample_data data;
@@ -513,7 +1003,7 @@
 			continue;
 
 		if (perf_event_overflow(event, 1, &data, regs))
-			sparc_pmu_disable_event(hwc, idx);
+			sparc_pmu_disable_event(cpuc, hwc, idx);
 	}
 
 	return NOTIFY_STOP;
@@ -525,8 +1015,15 @@
 
 static bool __init supported_pmu(void)
 {
-	if (!strcmp(sparc_pmu_type, "ultra3i")) {
-		sparc_pmu = &ultra3i_pmu;
+	if (!strcmp(sparc_pmu_type, "ultra3") ||
+	    !strcmp(sparc_pmu_type, "ultra3+") ||
+	    !strcmp(sparc_pmu_type, "ultra3i") ||
+	    !strcmp(sparc_pmu_type, "ultra4+")) {
+		sparc_pmu = &ultra3_pmu;
+		return true;
+	}
+	if (!strcmp(sparc_pmu_type, "niagara")) {
+		sparc_pmu = &niagara1_pmu;
 		return true;
 	}
 	if (!strcmp(sparc_pmu_type, "niagara2")) {
diff --git a/arch/sparc/oprofile/init.c b/arch/sparc/oprofile/init.c
index f97cb8b..f9024bc 100644
--- a/arch/sparc/oprofile/init.c
+++ b/arch/sparc/oprofile/init.c
@@ -11,6 +11,7 @@
 #include <linux/oprofile.h>
 #include <linux/errno.h>
 #include <linux/init.h>
+#include <linux/param.h>	/* for HZ */
  
 #ifdef CONFIG_SPARC64
 #include <linux/notifier.h>
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 8da9374..c876bace 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -86,10 +86,6 @@
 config HAVE_LATENCYTOP_SUPPORT
 	def_bool y
 
-config FAST_CMPXCHG_LOCAL
-	bool
-	default y
-
 config MMU
 	def_bool y
 
diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
index 527519b..f2824fb 100644
--- a/arch/x86/Kconfig.cpu
+++ b/arch/x86/Kconfig.cpu
@@ -400,7 +400,7 @@
 
 config X86_CMPXCHG64
 	def_bool y
-	depends on X86_PAE || X86_64
+	depends on X86_PAE || X86_64 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MATOM
 
 # this should be set for all -march=.. options where the compiler
 # generates cmov.
@@ -412,6 +412,7 @@
 	int
 	default "64" if X86_64
 	default "6" if X86_32 && X86_P6_NOP
+	default "5" if X86_32 && X86_CMPXCHG64
 	default "4" if X86_32 && (X86_XADD || X86_CMPXCHG || X86_BSWAP || X86_WP_WORKS_OK)
 	default "3"
 
diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
index 74619c4..1733f9f 100644
--- a/arch/x86/ia32/ia32entry.S
+++ b/arch/x86/ia32/ia32entry.S
@@ -21,8 +21,8 @@
 #define __AUDIT_ARCH_LE	   0x40000000
 
 #ifndef CONFIG_AUDITSYSCALL
-#define sysexit_audit int_ret_from_sys_call
-#define sysretl_audit int_ret_from_sys_call
+#define sysexit_audit ia32_ret_from_sys_call
+#define sysretl_audit ia32_ret_from_sys_call
 #endif
 
 #define IA32_NR_syscalls ((ia32_syscall_end - ia32_sys_call_table)/8)
@@ -39,12 +39,12 @@
 	.endm 
 
 	/* clobbers %eax */	
-	.macro  CLEAR_RREGS _r9=rax
+	.macro  CLEAR_RREGS offset=0, _r9=rax
 	xorl 	%eax,%eax
-	movq	%rax,R11(%rsp)
-	movq	%rax,R10(%rsp)
-	movq	%\_r9,R9(%rsp)
-	movq	%rax,R8(%rsp)
+	movq	%rax,\offset+R11(%rsp)
+	movq	%rax,\offset+R10(%rsp)
+	movq	%\_r9,\offset+R9(%rsp)
+	movq	%rax,\offset+R8(%rsp)
 	.endm
 
 	/*
@@ -172,6 +172,10 @@
 	movl	RIP-R11(%rsp),%edx		/* User %eip */
 	CFI_REGISTER rip,rdx
 	RESTORE_ARGS 1,24,1,1,1,1
+	xorq	%r8,%r8
+	xorq	%r9,%r9
+	xorq	%r10,%r10
+	xorq	%r11,%r11
 	popfq
 	CFI_ADJUST_CFA_OFFSET -8
 	/*CFI_RESTORE rflags*/
@@ -202,7 +206,7 @@
 
 	.macro auditsys_exit exit,ebpsave=RBP
 	testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r10)
-	jnz int_ret_from_sys_call
+	jnz ia32_ret_from_sys_call
 	TRACE_IRQS_ON
 	sti
 	movl %eax,%esi		/* second arg, syscall return value */
@@ -218,8 +222,9 @@
 	cli
 	TRACE_IRQS_OFF
 	testl %edi,TI_flags(%r10)
-	jnz int_with_check
-	jmp \exit
+	jz \exit
+	CLEAR_RREGS -ARGOFFSET
+	jmp int_with_check
 	.endm
 
 sysenter_auditsys:
@@ -329,6 +334,9 @@
 	CFI_REGISTER rip,rcx
 	movl EFLAGS-ARGOFFSET(%rsp),%r11d	
 	/*CFI_REGISTER rflags,r11*/
+	xorq	%r10,%r10
+	xorq	%r9,%r9
+	xorq	%r8,%r8
 	TRACE_IRQS_ON
 	movl RSP-ARGOFFSET(%rsp),%esp
 	CFI_RESTORE rsp
@@ -353,7 +361,7 @@
 #endif
 	xchgl %r9d,%ebp
 	SAVE_REST
-	CLEAR_RREGS r9
+	CLEAR_RREGS 0, r9
 	movq $-ENOSYS,RAX(%rsp)	/* ptrace can change this for a bad syscall */
 	movq %rsp,%rdi        /* &pt_regs -> arg1 */
 	call syscall_trace_enter
@@ -425,6 +433,8 @@
 	call *ia32_sys_call_table(,%rax,8) # xxx: rip relative
 ia32_sysret:
 	movq %rax,RAX-ARGOFFSET(%rsp)
+ia32_ret_from_sys_call:
+	CLEAR_RREGS -ARGOFFSET
 	jmp int_ret_from_sys_call 
 
 ia32_tracesys:			 
@@ -442,8 +452,8 @@
 
 ia32_badsys:
 	movq $0,ORIG_RAX-ARGOFFSET(%rsp)
-	movq $-ENOSYS,RAX-ARGOFFSET(%rsp)
-	jmp int_ret_from_sys_call
+	movq $-ENOSYS,%rax
+	jmp ia32_sysret
 
 quiet_ni_syscall:
 	movq $-ENOSYS,%rax
diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c
index 41fd965..b9c830c 100644
--- a/arch/x86/kernel/early_printk.c
+++ b/arch/x86/kernel/early_printk.c
@@ -206,8 +206,11 @@
 
 	while (*buf != '\0') {
 		if (!strncmp(buf, "serial", 6)) {
-			early_serial_init(buf + 6);
+			buf += 6;
+			early_serial_init(buf);
 			early_console_register(&early_serial_console, keep);
+			if (!strncmp(buf, ",ttyS", 5))
+				buf += 5;
 		}
 		if (!strncmp(buf, "ttyS", 4)) {
 			early_serial_init(buf + 4);
diff --git a/arch/x86/kernel/i386_ksyms_32.c b/arch/x86/kernel/i386_ksyms_32.c
index 1736c5a..9c3bd4a 100644
--- a/arch/x86/kernel/i386_ksyms_32.c
+++ b/arch/x86/kernel/i386_ksyms_32.c
@@ -15,8 +15,10 @@
  * the export, but dont use it from C code, it is used
  * by assembly code and is not using C calling convention!
  */
+#ifndef CONFIG_X86_CMPXCHG64
 extern void cmpxchg8b_emu(void);
 EXPORT_SYMBOL(cmpxchg8b_emu);
+#endif
 
 /* Networking helper routines. */
 EXPORT_SYMBOL(csum_partial_copy_generic);
diff --git a/arch/x86/lib/Makefile b/arch/x86/lib/Makefile
index 3e549b8..85f5db9 100644
--- a/arch/x86/lib/Makefile
+++ b/arch/x86/lib/Makefile
@@ -15,8 +15,10 @@
         obj-y += atomic64_32.o
         lib-y += checksum_32.o
         lib-y += strstr_32.o
-        lib-y += semaphore_32.o string_32.o cmpxchg8b_emu.o
-
+        lib-y += semaphore_32.o string_32.o
+ifneq ($(CONFIG_X86_CMPXCHG64),y)
+        lib-y += cmpxchg8b_emu.o
+endif
         lib-$(CONFIG_X86_USE_3DNOW) += mmx_32.o
 else
         obj-y += io_64.o iomap_copy_64.o
diff --git a/drivers/firmware/iscsi_ibft.c b/drivers/firmware/iscsi_ibft.c
index 420a96e..051d1eb 100644
--- a/drivers/firmware/iscsi_ibft.c
+++ b/drivers/firmware/iscsi_ibft.c
@@ -939,7 +939,7 @@
 
 	if (ibft_addr) {
 		printk(KERN_INFO "iBFT detected at 0x%llx.\n",
-		       (u64)virt_to_phys((void *)ibft_addr));
+		       (u64)isa_virt_to_bus(ibft_addr));
 
 		rc = ibft_check_device();
 		if (rc)
diff --git a/drivers/firmware/iscsi_ibft_find.c b/drivers/firmware/iscsi_ibft_find.c
index d53fbbf..dfb15c0 100644
--- a/drivers/firmware/iscsi_ibft_find.c
+++ b/drivers/firmware/iscsi_ibft_find.c
@@ -65,10 +65,10 @@
 		 * so skip that area */
 		if (pos == VGA_MEM)
 			pos += VGA_SIZE;
-		virt = phys_to_virt(pos);
+		virt = isa_bus_to_virt(pos);
 		if (memcmp(virt, IBFT_SIGN, IBFT_SIGN_LEN) == 0) {
 			unsigned long *addr =
-			    (unsigned long *)phys_to_virt(pos + 4);
+			    (unsigned long *)isa_bus_to_virt(pos + 4);
 			len = *addr;
 			/* if the length of the table extends past 1M,
 			 * the table cannot be valid. */
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index 6d70204..3e1c36e 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -726,8 +726,6 @@
 	/* "Retrigger" the interrupt to get things going */
 	retrigger_next_event(NULL);
 	local_irq_restore(flags);
-	printk(KERN_DEBUG "Switched to high resolution mode on CPU %d\n",
-	       smp_processor_id());
 	return 1;
 }
 
diff --git a/kernel/perf_event.c b/kernel/perf_event.c
index 0f86feb..e491fb0 100644
--- a/kernel/perf_event.c
+++ b/kernel/perf_event.c
@@ -1030,14 +1030,10 @@
 	update_context_time(ctx);
 
 	perf_disable();
-	if (ctx->nr_active) {
-		list_for_each_entry(event, &ctx->group_list, group_entry) {
-			if (event != event->group_leader)
-				event_sched_out(event, cpuctx, ctx);
-			else
-				group_sched_out(event, cpuctx, ctx);
-		}
-	}
+	if (ctx->nr_active)
+		list_for_each_entry(event, &ctx->group_list, group_entry)
+			group_sched_out(event, cpuctx, ctx);
+
 	perf_enable();
  out:
 	spin_unlock(&ctx->lock);
@@ -1258,12 +1254,8 @@
 		if (event->cpu != -1 && event->cpu != cpu)
 			continue;
 
-		if (event != event->group_leader)
-			event_sched_in(event, cpuctx, ctx, cpu);
-		else {
-			if (group_can_go_on(event, cpuctx, 1))
-				group_sched_in(event, cpuctx, ctx, cpu);
-		}
+		if (group_can_go_on(event, cpuctx, 1))
+			group_sched_in(event, cpuctx, ctx, cpu);
 
 		/*
 		 * If this pinned group hasn't been scheduled,
@@ -1291,15 +1283,9 @@
 		if (event->cpu != -1 && event->cpu != cpu)
 			continue;
 
-		if (event != event->group_leader) {
-			if (event_sched_in(event, cpuctx, ctx, cpu))
+		if (group_can_go_on(event, cpuctx, can_add_hw))
+			if (group_sched_in(event, cpuctx, ctx, cpu))
 				can_add_hw = 0;
-		} else {
-			if (group_can_go_on(event, cpuctx, can_add_hw)) {
-				if (group_sched_in(event, cpuctx, ctx, cpu))
-					can_add_hw = 0;
-			}
-		}
 	}
 	perf_enable();
  out:
@@ -4781,9 +4767,7 @@
 	 * We dont have to disable NMIs - we are only looking at
 	 * the list, not manipulating it:
 	 */
-	list_for_each_entry_rcu(event, &parent_ctx->event_list, event_entry) {
-		if (event != event->group_leader)
-			continue;
+	list_for_each_entry(event, &parent_ctx->group_list, group_entry) {
 
 		if (!event->attr.inherit) {
 			inherited_all = 0;
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 46592fe..3724756 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -225,7 +225,11 @@
 	if (ftrace_trace_function == ftrace_stub)
 		return;
 
+#ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
 	func = ftrace_trace_function;
+#else
+	func = __ftrace_trace_function;
+#endif
 
 	if (ftrace_pid_trace) {
 		set_ftrace_pid_function(func);
diff --git a/kernel/trace/kmemtrace.c b/kernel/trace/kmemtrace.c
index 81b1645..a91da69 100644
--- a/kernel/trace/kmemtrace.c
+++ b/kernel/trace/kmemtrace.c
@@ -501,7 +501,7 @@
 		return 1;
 	}
 
-	if (!register_tracer(&kmem_tracer)) {
+	if (register_tracer(&kmem_tracer) != 0) {
 		pr_warning("Warning: could not register the kmem tracer\n");
 		return 1;
 	}
diff --git a/tools/perf/Documentation/perf-timechart.txt b/tools/perf/Documentation/perf-timechart.txt
index 1c2ed30..a791009 100644
--- a/tools/perf/Documentation/perf-timechart.txt
+++ b/tools/perf/Documentation/perf-timechart.txt
@@ -31,6 +31,9 @@
 -w::
 --width=::
         Select the width of the SVG file (default: 1000)
+-p::
+--power-only::
+        Only output the CPU power section of the diagram
 
 
 SEE ALSO
diff --git a/tools/perf/Makefile b/tools/perf/Makefile
index b5f1953..5881943 100644
--- a/tools/perf/Makefile
+++ b/tools/perf/Makefile
@@ -728,7 +728,7 @@
 common-cmds.h: util/generate-cmdlist.sh command-list.txt
 
 common-cmds.h: $(wildcard Documentation/perf-*.txt)
-	$(QUIET_GEN)util/generate-cmdlist.sh > $@+ && mv $@+ $@
+	$(QUIET_GEN). util/generate-cmdlist.sh > $@+ && mv $@+ $@
 
 $(patsubst %.sh,%,$(SCRIPT_SH)) : % : %.sh
 	$(QUIET_GEN)$(RM) $@ $@+ && \
diff --git a/tools/perf/builtin-timechart.c b/tools/perf/builtin-timechart.c
index 4405681..702d8fe 100644
--- a/tools/perf/builtin-timechart.c
+++ b/tools/perf/builtin-timechart.c
@@ -46,6 +46,8 @@
 
 static u64		first_time, last_time;
 
+static int		power_only;
+
 
 static struct perf_header	*header;
 
@@ -547,7 +549,7 @@
 	u64 cpu;
 	struct power_event *pwr;
 
-	for (cpu = 0; cpu < numcpus; cpu++) {
+	for (cpu = 0; cpu <= numcpus; cpu++) {
 		pwr = malloc(sizeof(struct power_event));
 		if (!pwr)
 			return;
@@ -871,7 +873,7 @@
 		/* no exit marker, task kept running to the end */
 		if (p->end_time == 0)
 			p->end_time = last_time;
-		if (p->total_time >= threshold)
+		if (p->total_time >= threshold && !power_only)
 			p->display = 1;
 
 		c = p->all;
@@ -882,7 +884,7 @@
 			if (c->start_time == 1)
 				c->start_time = first_time;
 
-			if (c->total_time >= threshold) {
+			if (c->total_time >= threshold && !power_only) {
 				c->display = 1;
 				count++;
 			}
@@ -1134,6 +1136,8 @@
 		    "output file name"),
 	OPT_INTEGER('w', "width", &svg_page_width,
 		    "page width"),
+	OPT_BOOLEAN('p', "power-only", &power_only,
+		    "output power data only"),
 	OPT_END()
 };
 
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
index 1ca8889..37512e9 100644
--- a/tools/perf/builtin-top.c
+++ b/tools/perf/builtin-top.c
@@ -782,6 +782,7 @@
 	"exit_idle",
 	"mwait_idle",
 	"mwait_idle_with_hints",
+	"poll_idle",
 	"ppc64_runlatch_off",
 	"pseries_dedicated_idle_sleep",
 	NULL
diff --git a/tools/perf/util/svghelper.c b/tools/perf/util/svghelper.c
index a778fd0..856655d 100644
--- a/tools/perf/util/svghelper.c
+++ b/tools/perf/util/svghelper.c
@@ -28,7 +28,7 @@
 
 int svg_page_width = 1000;
 
-#define MIN_TEXT_SIZE 0.001
+#define MIN_TEXT_SIZE 0.01
 
 static u64 total_height;
 static FILE *svgfile;
@@ -217,6 +217,18 @@
 		}
 		fclose(file);
 	}
+
+	/* CPU type */
+	file = fopen("/sys/devices/system/cpu/cpu0/cpufreq/scaling_available_frequencies", "r");
+	if (file) {
+		while (fgets(buf, 255, file)) {
+			unsigned int freq;
+			freq = strtoull(buf, NULL, 10);
+			if (freq > max_freq)
+				max_freq = freq;
+		}
+		fclose(file);
+	}
 	return cpu_m;
 }