Initial Contribution

msm-2.6.38: tag AU_LINUX_ANDROID_GINGERBREAD.02.03.04.00.142

Signed-off-by: Bryan Huntsman <bryanh@codeaurora.org>
diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
index 1252a26..584fe0b 100644
--- a/arch/arm/include/asm/cacheflush.h
+++ b/arch/arm/include/asm/cacheflush.h
@@ -87,6 +87,21 @@
  *	DMA Cache Coherency
  *	===================
  *
+ *	dma_inv_range(start, end)
+ *
+ *		Invalidate (discard) the specified virtual address range.
+ *		May not write back any entries.  If 'start' or 'end'
+ *		are not cache line aligned, those lines must be written
+ *		back.
+ *		- start  - virtual start address
+ *		- end    - virtual end address
+ *
+ *	dma_clean_range(start, end)
+ *
+ *		Clean (write back) the specified virtual address range.
+ *		- start  - virtual start address
+ *		- end    - virtual end address
+ *
  *	dma_flush_range(start, end)
  *
  *		Clean and invalidate the specified virtual address range.
@@ -107,6 +122,8 @@
 	void (*dma_map_area)(const void *, size_t, int);
 	void (*dma_unmap_area)(const void *, size_t, int);
 
+	void (*dma_inv_range)(const void *, const void *);
+	void (*dma_clean_range)(const void *, const void *);
 	void (*dma_flush_range)(const void *, const void *);
 };
 
@@ -133,6 +150,8 @@
  */
 #define dmac_map_area			cpu_cache.dma_map_area
 #define dmac_unmap_area			cpu_cache.dma_unmap_area
+#define dmac_inv_range			cpu_cache.dma_inv_range
+#define dmac_clean_range		cpu_cache.dma_clean_range
 #define dmac_flush_range		cpu_cache.dma_flush_range
 
 #else
@@ -153,6 +172,8 @@
  */
 extern void dmac_map_area(const void *, size_t, int);
 extern void dmac_unmap_area(const void *, size_t, int);
+extern void dmac_inv_range(const void *, const void *);
+extern void dmac_clean_range(const void *, const void *);
 extern void dmac_flush_range(const void *, const void *);
 
 #endif
diff --git a/arch/arm/include/asm/cputype.h b/arch/arm/include/asm/cputype.h
index cd4458f..cb47d28 100644
--- a/arch/arm/include/asm/cputype.h
+++ b/arch/arm/include/asm/cputype.h
@@ -8,6 +8,7 @@
 #define CPUID_CACHETYPE	1
 #define CPUID_TCM	2
 #define CPUID_TLBTYPE	3
+#define CPUID_MPIDR	5
 
 #define CPUID_EXT_PFR0	"c1, 0"
 #define CPUID_EXT_PFR1	"c1, 1"
@@ -70,6 +71,11 @@
 	return read_cpuid(CPUID_TCM);
 }
 
+static inline unsigned int __attribute_const__ read_cpuid_mpidr(void)
+{
+	return read_cpuid(CPUID_MPIDR);
+}
+
 /*
  * Intel's XScale3 core supports some v6 features (supersections, L2)
  * but advertises itself as v5 as it does not support the v6 ISA.  For
diff --git a/arch/arm/include/asm/delay.h b/arch/arm/include/asm/delay.h
index b2deda1..5c6b9a3 100644
--- a/arch/arm/include/asm/delay.h
+++ b/arch/arm/include/asm/delay.h
@@ -8,7 +8,7 @@
 
 #include <asm/param.h>	/* HZ */
 
-extern void __delay(int loops);
+extern void __delay(unsigned long loops);
 
 /*
  * This function intentionally does not exist; if you see references to
@@ -40,5 +40,8 @@
 			__const_udelay((n) * ((2199023U*HZ)>>11))) :	\
 	  __udelay(n))
 
+extern void set_delay_fn(void (*fn)(unsigned long));
+extern void read_current_timer_delay_loop(unsigned long loops);
+
 #endif /* defined(_ARM_DELAY_H) */
 
diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h
index 4fff837..6f48921 100644
--- a/arch/arm/include/asm/dma-mapping.h
+++ b/arch/arm/include/asm/dma-mapping.h
@@ -172,6 +172,46 @@
 {
 }
 
+
+/*
+ * dma_coherent_pre_ops - barrier functions for coherent memory before DMA.
+ * A barrier is required to ensure memory operations are complete before the
+ * initiation of a DMA xfer.
+ * If the coherent memory is Strongly Ordered
+ * - pre ARMv7 and 8x50 guarantees ordering wrt other mem accesses
+ * - ARMv7 guarantees ordering only within a 1KB block, so we need a barrier
+ * If coherent memory is normal then we need a barrier to prevent
+ * reordering
+ */
+static inline void dma_coherent_pre_ops(void)
+{
+#if COHERENT_IS_NORMAL == 1
+	dmb();
+#else
+	if (arch_is_coherent())
+		dmb();
+	else
+		barrier();
+#endif
+}
+/*
+ * dma_post_coherent_ops - barrier functions for coherent memory after DMA.
+ * If the coherent memory is Strongly Ordered we dont need a barrier since
+ * there are no speculative fetches to Strongly Ordered memory.
+ * If coherent memory is normal then we need a barrier to prevent reordering
+ */
+static inline void dma_coherent_post_ops(void)
+{
+#if COHERENT_IS_NORMAL == 1
+	dmb();
+#else
+	if (arch_is_coherent())
+		dmb();
+	else
+		barrier();
+#endif
+}
+
 /**
  * dma_alloc_coherent - allocate consistent memory for DMA
  * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
@@ -386,6 +426,58 @@
 }
 
 /**
+ * dma_cache_pre_ops - clean or invalidate cache before dma transfer is
+ *                     initiated and perform a barrier operation.
+ * @virtual_addr: A kernel logical or kernel virtual address
+ * @size: size of buffer to map
+ * @dir: DMA transfer direction
+ *
+ * Ensure that any data held in the cache is appropriately discarded
+ * or written back.
+ *
+ */
+static inline void dma_cache_pre_ops(void *virtual_addr,
+		size_t size, enum dma_data_direction dir)
+{
+	extern void ___dma_single_cpu_to_dev(const void *, size_t,
+		enum dma_data_direction);
+
+	BUG_ON(!valid_dma_direction(dir));
+
+	if (!arch_is_coherent())
+		___dma_single_cpu_to_dev(virtual_addr, size, dir);
+}
+
+/**
+ * dma_cache_post_ops - clean or invalidate cache after dma transfer is
+ *                     initiated and perform a barrier operation.
+ * @virtual_addr: A kernel logical or kernel virtual address
+ * @size: size of buffer to map
+ * @dir: DMA transfer direction
+ *
+ * Ensure that any data held in the cache is appropriately discarded
+ * or written back.
+ *
+ */
+static inline void dma_cache_post_ops(void *virtual_addr,
+		size_t size, enum dma_data_direction dir)
+{
+	extern void ___dma_single_cpu_to_dev(const void *, size_t,
+		enum dma_data_direction);
+
+	BUG_ON(!valid_dma_direction(dir));
+
+	if (arch_has_speculative_dfetch() && !arch_is_coherent()
+	 && dir != DMA_TO_DEVICE)
+		/*
+		 * Treat DMA_BIDIRECTIONAL and DMA_FROM_DEVICE
+		 * identically: invalidate
+		 */
+		___dma_single_cpu_to_dev(virtual_addr,
+					 size, DMA_FROM_DEVICE);
+}
+
+/**
  * dma_map_page - map a portion of a page for streaming DMA
  * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
  * @page: page that buffer resides in
diff --git a/arch/arm/include/asm/domain.h b/arch/arm/include/asm/domain.h
index af18cea..0854849 100644
--- a/arch/arm/include/asm/domain.h
+++ b/arch/arm/include/asm/domain.h
@@ -2,6 +2,7 @@
  *  arch/arm/include/asm/domain.h
  *
  *  Copyright (C) 1999 Russell King.
+ *  Copyright (c) 2009, Code Aurora Forum. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
@@ -27,8 +28,13 @@
  *
  * 36-bit addressing and supersections are only available on
  * CPUs based on ARMv6+ or the Intel XSC3 core.
+ *
+ * We cannot use domain 0 for the kernel on QSD8x50 since the kernel domain
+ * is set to manager mode when set_fs(KERNEL_DS) is called. Setting domain 0
+ * to manager mode will disable the workaround for a cpu bug that can cause an
+ * invalid fault status and/or tlb corruption (CONFIG_VERIFY_PERMISSION_FAULT).
  */
-#ifndef CONFIG_IO_36
+#if !defined(CONFIG_IO_36) && !defined(CONFIG_VERIFY_PERMISSION_FAULT)
 #define DOMAIN_KERNEL	0
 #define DOMAIN_TABLE	0
 #define DOMAIN_USER	1
@@ -56,6 +62,17 @@
 #ifndef __ASSEMBLY__
 
 #ifdef CONFIG_CPU_USE_DOMAINS
+#ifdef CONFIG_EMULATE_DOMAIN_MANAGER_V7
+void emulate_domain_manager_set(u32 domain);
+int emulate_domain_manager_data_abort(u32 dfsr, u32 dfar);
+int emulate_domain_manager_prefetch_abort(u32 ifsr, u32 ifar);
+void emulate_domain_manager_switch_mm(
+	unsigned long pgd_phys,
+	struct mm_struct *mm,
+	void (*switch_mm)(unsigned long pgd_phys, struct mm_struct *));
+
+#define set_domain(x) emulate_domain_manager_set(x)
+#else
 #define set_domain(x)					\
 	do {						\
 	__asm__ __volatile__(				\
@@ -63,6 +80,7 @@
 	  : : "r" (x));					\
 	isb();						\
 	} while (0)
+#endif
 
 #define modify_domain(dom,type)					\
 	do {							\
diff --git a/arch/arm/include/asm/hardware/cache-l2x0.h b/arch/arm/include/asm/hardware/cache-l2x0.h
index 16bd480..1fc2f49 100644
--- a/arch/arm/include/asm/hardware/cache-l2x0.h
+++ b/arch/arm/include/asm/hardware/cache-l2x0.h
@@ -59,6 +59,7 @@
 /* Registers shifts and masks */
 #define L2X0_CACHE_ID_PART_MASK		(0xf << 6)
 #define L2X0_CACHE_ID_PART_L210		(1 << 6)
+#define L2X0_CACHE_ID_PART_L220		(2 << 6)
 #define L2X0_CACHE_ID_PART_L310		(3 << 6)
 
 #define L2X0_AUX_CTRL_MASK			0xc0000fff
@@ -71,9 +72,13 @@
 #define L2X0_AUX_CTRL_DATA_PREFETCH_SHIFT	28
 #define L2X0_AUX_CTRL_INSTR_PREFETCH_SHIFT	29
 #define L2X0_AUX_CTRL_EARLY_BRESP_SHIFT		30
+#define L2X0_AUX_CTRL_EVNT_MON_BUS_EN_SHIFT	20
 
 #ifndef __ASSEMBLY__
 extern void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask);
+extern void l2x0_suspend(void);
+extern void l2x0_resume(int collapsed);
+extern void l2x0_cache_sync(void);
 #endif
 
 #endif
diff --git a/arch/arm/include/asm/hardware/gic.h b/arch/arm/include/asm/hardware/gic.h
index 0691f9d..27adea3 100644
--- a/arch/arm/include/asm/hardware/gic.h
+++ b/arch/arm/include/asm/hardware/gic.h
@@ -41,6 +41,8 @@
 void gic_cascade_irq(unsigned int gic_nr, unsigned int irq);
 void gic_raise_softirq(const struct cpumask *mask, unsigned int irq);
 void gic_enable_ppi(unsigned int);
+bool gic_is_spi_pending(unsigned int irq);
+void gic_clear_spi_pending(unsigned int irq);
 #endif
 
 #endif
diff --git a/arch/arm/include/asm/io.h b/arch/arm/include/asm/io.h
index d66605d..761c29e 100644
--- a/arch/arm/include/asm/io.h
+++ b/arch/arm/include/asm/io.h
@@ -248,6 +248,8 @@
 
 #define ioremap(cookie,size)		__arch_ioremap((cookie), (size), MT_DEVICE)
 #define ioremap_nocache(cookie,size)	__arch_ioremap((cookie), (size), MT_DEVICE)
+#define ioremap_strongly_ordered(cookie, size)  __arch_ioremap(cookie, size, \
+						MT_DEVICE_STRONGLY_ORDERED)
 #define ioremap_cached(cookie,size)	__arch_ioremap((cookie), (size), MT_DEVICE_CACHED)
 #define ioremap_wc(cookie,size)		__arch_ioremap((cookie), (size), MT_DEVICE_WC)
 #define iounmap				__arch_iounmap
diff --git a/arch/arm/include/asm/mach/flash.h b/arch/arm/include/asm/mach/flash.h
index 4ca69fe..36938ea 100644
--- a/arch/arm/include/asm/mach/flash.h
+++ b/arch/arm/include/asm/mach/flash.h
@@ -17,6 +17,7 @@
  * map_name:	the map probe function name
  * name:	flash device name (eg, as used with mtdparts=)
  * width:	width of mapped device
+ * interleave:  interleave mode feature support
  * init:	method called at driver/device initialisation
  * exit:	method called at driver/device removal
  * set_vpp:	method called to enable or disable VPP
@@ -28,6 +29,7 @@
 	const char	*map_name;
 	const char	*name;
 	unsigned int	width;
+	unsigned int    interleave;
 	int		(*init)(void);
 	void		(*exit)(void);
 	void		(*set_vpp)(int on);
diff --git a/arch/arm/include/asm/mach/map.h b/arch/arm/include/asm/mach/map.h
index d2fedb5..02fa753 100644
--- a/arch/arm/include/asm/mach/map.h
+++ b/arch/arm/include/asm/mach/map.h
@@ -29,6 +29,10 @@
 #define MT_MEMORY_NONCACHED	11
 #define MT_MEMORY_DTCM		12
 #define MT_MEMORY_ITCM		13
+#define MT_DEVICE_STRONGLY_ORDERED 14
+#define MT_MEMORY_R		15
+#define MT_MEMORY_RW		16
+#define MT_MEMORY_RX		17
 
 #ifdef CONFIG_MMU
 extern void iotable_init(struct map_desc *, int);
diff --git a/arch/arm/include/asm/mach/mmc.h b/arch/arm/include/asm/mach/mmc.h
index bca864a..d439160 100644
--- a/arch/arm/include/asm/mach/mmc.h
+++ b/arch/arm/include/asm/mach/mmc.h
@@ -7,6 +7,12 @@
 #include <linux/mmc/host.h>
 #include <linux/mmc/card.h>
 #include <linux/mmc/sdio_func.h>
+#include <mach/gpio.h>
+
+#define SDC_DAT1_DISABLE 0
+#define SDC_DAT1_ENABLE  1
+#define SDC_DAT1_ENWAKE  2
+#define SDC_DAT1_DISWAKE 3
 
 struct embedded_sdio_data {
         struct sdio_cis cis;
@@ -15,6 +21,91 @@
         int num_funcs;
 };
 
+/* This structure keeps information per regulator */
+struct msm_mmc_reg_data {
+	/* voltage regulator handle */
+	struct regulator *reg;
+	/* regulator name */
+	const char *name;
+	/* voltage level to be set */
+	unsigned int level;
+	/* Load values for low power and high power mode */
+	unsigned int lpm_uA;
+	unsigned int hpm_uA;
+	/*
+	 * is set voltage supported for this regulator?
+	 * false => set voltage is not supported
+	 * true  => set voltage is supported
+	 */
+	bool set_voltage_sup;
+	/* is this regulator enabled? */
+	bool is_enabled;
+	/* is this regulator needs to be always on? */
+	bool always_on;
+	/* is low power mode setting required for this regulator? */
+	bool lpm_sup;
+};
+
+/*
+ * This structure keeps information for all the
+ * regulators required for a SDCC slot.
+ */
+struct msm_mmc_slot_reg_data {
+	struct msm_mmc_reg_data *vdd_data; /* keeps VDD/VCC regulator info */
+	struct msm_mmc_reg_data *vccq_data; /* keeps VCCQ regulator info */
+	struct msm_mmc_reg_data *vddp_data; /* keeps VDD Pad regulator info */
+};
+
+struct msm_mmc_gpio {
+	u32 no;
+	const char *name;
+	bool is_always_on;
+	bool is_enabled;
+};
+
+struct msm_mmc_gpio_data {
+	struct msm_mmc_gpio *gpio;
+	u8 size;
+};
+
+struct msm_mmc_pad_pull {
+	enum msm_tlmm_pull_tgt no;
+	u32 val;
+};
+
+struct msm_mmc_pad_pull_data {
+	struct msm_mmc_pad_pull *on;
+	struct msm_mmc_pad_pull *off;
+	u8 size;
+};
+
+struct msm_mmc_pad_drv {
+	enum msm_tlmm_hdrive_tgt no;
+	u32 val;
+};
+
+struct msm_mmc_pad_drv_data {
+	struct msm_mmc_pad_drv *on;
+	struct msm_mmc_pad_drv *off;
+	u8 size;
+};
+
+struct msm_mmc_pad_data {
+	struct msm_mmc_pad_pull_data *pull;
+	struct msm_mmc_pad_drv_data *drv;
+};
+
+struct msm_mmc_pin_data {
+	/*
+	 * = 1 if controller pins are using gpios
+	 * = 0 if controller has dedicated MSM pads
+	 */
+	u8 is_gpio;
+	u8 cfg_sts;
+	struct msm_mmc_gpio_data *gpio_data;
+	struct msm_mmc_pad_data *pad_data;
+};
+
 struct mmc_platform_data {
 	unsigned int ocr_mask;			/* available voltages */
 	int built_in;				/* built-in device flag */
@@ -23,6 +114,35 @@
 	unsigned int (*status)(struct device *);
 	struct embedded_sdio_data *embedded_sdio;
 	int (*register_status_notify)(void (*callback)(int card_present, void *dev_id), void *dev_id);
+	/*
+	 * XPC controls the maximum current in the
+	 * default speed mode of SDXC card.
+	 */
+	unsigned int xpc_cap;
+	/* Supported UHS-I Modes */
+	unsigned int uhs_caps;
+	void (*sdio_lpm_gpio_setup)(struct device *, unsigned int);
+        unsigned int status_irq;
+	unsigned int status_gpio;
+        unsigned int sdiowakeup_irq;
+        unsigned long irq_flags;
+        unsigned long mmc_bus_width;
+        int (*wpswitch) (struct device *);
+	int dummy52_required;
+	unsigned int msmsdcc_fmin;
+	unsigned int msmsdcc_fmid;
+	unsigned int msmsdcc_fmax;
+	bool nonremovable;
+	bool pclk_src_dfab;
+	int (*cfg_mpm_sdiowakeup)(struct device *, unsigned);
+	bool sdcc_v4_sup;
+	unsigned int wpswitch_gpio;
+	unsigned char wpswitch_polarity;
+	struct msm_mmc_slot_reg_data *vreg_data;
+	int is_sdio_al_client;
+	unsigned int *sup_clk_table;
+	unsigned char sup_clk_cnt;
+	struct msm_mmc_pin_data *pin_data;
 };
 
 #endif
diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h
index af44a8f..9ecd2dc 100644
--- a/arch/arm/include/asm/memory.h
+++ b/arch/arm/include/asm/memory.h
@@ -299,6 +299,13 @@
 #define arch_is_coherent()		0
 #endif
 
+/*
+ * Set if the architecture speculatively fetches data into cache.
+ */
+#ifndef arch_has_speculative_dfetch
+#define arch_has_speculative_dfetch()	0
+#endif
+
 #endif
 
 #include <asm-generic/memory_model.h>
diff --git a/arch/arm/include/asm/mutex.h b/arch/arm/include/asm/mutex.h
index 93226cf..fd3f17e 100644
--- a/arch/arm/include/asm/mutex.h
+++ b/arch/arm/include/asm/mutex.h
@@ -41,6 +41,8 @@
 	__res |= __ex_flag;
 	if (unlikely(__res != 0))
 		fail_fn(count);
+	else
+		smp_rmb();
 }
 
 static inline int
@@ -61,6 +63,9 @@
 	__res |= __ex_flag;
 	if (unlikely(__res != 0))
 		__res = fail_fn(count);
+	else
+		smp_rmb();
+
 	return __res;
 }
 
@@ -74,6 +79,7 @@
 {
 	int __ex_flag, __res, __orig;
 
+	smp_wmb();
 	__asm__ (
 
 		"ldrex	%0, [%3]	\n\t"
@@ -119,6 +125,8 @@
 		: "=&r" (__orig), "=&r" (__res), "=&r" (__ex_flag)
 		: "r" (&count->counter)
 		: "cc", "memory" );
+	if (__orig)
+		smp_rmb();
 
 	return __orig;
 }
diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h
index ac75d08..ac11816 100644
--- a/arch/arm/include/asm/page.h
+++ b/arch/arm/include/asm/page.h
@@ -201,6 +201,11 @@
 extern int pfn_valid(unsigned long);
 #endif
 
+#ifdef CONFIG_MEMORY_HOTPLUG_SPARSE
+extern int _early_pfn_valid(unsigned long);
+#define early_pfn_valid(pfn) (_early_pfn_valid(pfn))
+#endif
+
 #include <asm/memory.h>
 
 #endif /* !__ASSEMBLY__ */
diff --git a/arch/arm/include/asm/perf_event.h b/arch/arm/include/asm/perf_event.h
index c4aa4e8..0e1fd19 100644
--- a/arch/arm/include/asm/perf_event.h
+++ b/arch/arm/include/asm/perf_event.h
@@ -24,6 +24,9 @@
 	ARM_PERF_PMU_ID_V6MP,
 	ARM_PERF_PMU_ID_CA8,
 	ARM_PERF_PMU_ID_CA9,
+	ARM_PERF_PMU_ID_SCORPION,
+	ARM_PERF_PMU_ID_SCORPIONMP,
+	ARM_PERF_PMU_ID_KRAIT,
 	ARM_NUM_PMU_IDS,
 };
 
diff --git a/arch/arm/include/asm/perftypes.h b/arch/arm/include/asm/perftypes.h
new file mode 100644
index 0000000..8d21dcd
--- /dev/null
+++ b/arch/arm/include/asm/perftypes.h
@@ -0,0 +1,33 @@
+/* Copyright (c) 2010, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+/*
+**  perftypes.h
+**  DESCRIPTION
+**  ksapi.ko function hooks header file
+*/
+
+#ifndef __PERFTYPES_H__
+#define __PERFTYPES_H__
+
+typedef void (*VPVF)(void);
+typedef void (*VPULF)(unsigned long);
+typedef void (*VPULULF)(unsigned long, unsigned long);
+
+extern VPVF pp_interrupt_out_ptr;
+extern VPVF pp_interrupt_in_ptr;
+extern VPULF pp_process_remove_ptr;
+extern void perf_mon_interrupt_in(void);
+extern void perf_mon_interrupt_out(void);
+
+#endif
diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
index 5750704..cbb2f45 100644
--- a/arch/arm/include/asm/pgtable.h
+++ b/arch/arm/include/asm/pgtable.h
@@ -22,7 +22,9 @@
 
 #include <asm/memory.h>
 #include <mach/vmalloc.h>
+#include <mach/memory.h>
 #include <asm/pgtable-hwdef.h>
+#include <asm/tlbflush.h>
 
 /*
  * Just any arbitrary offset to the start of the vmalloc VM area: the
@@ -232,16 +234,30 @@
 #define pgprot_writecombine(prot) \
 	__pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_BUFFERABLE)
 
+#define pgprot_device(prot) \
+	__pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_DEV_NONSHARED)
+
+#define pgprot_writethroughcache(prot) \
+	__pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_WRITETHROUGH)
+
+#define pgprot_writebackcache(prot) \
+	__pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_WRITEBACK)
+
+#define pgprot_writebackwacache(prot) \
+	__pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_WRITEALLOC)
+
 #ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE
 #define pgprot_dmacoherent(prot) \
 	__pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_BUFFERABLE | L_PTE_XN)
 #define __HAVE_PHYS_MEM_ACCESS_PROT
+#define COHERENT_IS_NORMAL 1
 struct file;
 extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
 				     unsigned long size, pgprot_t vma_prot);
 #else
 #define pgprot_dmacoherent(prot) \
 	__pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_UNCACHED | L_PTE_XN)
+#define COHERENT_IS_NORMAL 0
 #endif
 
 #endif /* __ASSEMBLY__ */
@@ -469,8 +485,15 @@
  * remap a physical page `pfn' of size `size' with page protection `prot'
  * into virtual address `from'
  */
+#ifndef HAS_ARCH_IO_REMAP_PFN_RANGE
 #define io_remap_pfn_range(vma,from,pfn,size,prot) \
-		remap_pfn_range(vma, from, pfn, size, prot)
+	remap_pfn_range(vma,from,pfn,size,prot)
+#else
+extern int arch_io_remap_pfn_range(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn, unsigned long size, pgprot_t prot);
+#define io_remap_pfn_range(vma,from,pfn,size,prot) \
+	arch_io_remap_pfn_range(vma,from,pfn,size,prot)
+#endif
+
 
 #define pgtable_cache_init() do { } while (0)
 
diff --git a/arch/arm/include/asm/pmu.h b/arch/arm/include/asm/pmu.h
index 7544ce6..48d148d 100644
--- a/arch/arm/include/asm/pmu.h
+++ b/arch/arm/include/asm/pmu.h
@@ -16,6 +16,7 @@
 
 enum arm_pmu_type {
 	ARM_PMU_DEVICE_CPU	= 0,
+	ARM_PMU_DEVICE_L2	= 1,
 	ARM_NUM_PMU_DEVICES,
 };
 
diff --git a/arch/arm/include/asm/processor.h b/arch/arm/include/asm/processor.h
index b2d9df5..12bc94d 100644
--- a/arch/arm/include/asm/processor.h
+++ b/arch/arm/include/asm/processor.h
@@ -29,6 +29,8 @@
 #define STACK_TOP_MAX	TASK_SIZE
 #endif
 
+extern unsigned int boot_reason;
+
 struct debug_info {
 #ifdef CONFIG_HAVE_HW_BREAKPOINT
 	struct perf_event	*hbp[ARM_MAX_HBP_SLOTS];
diff --git a/arch/arm/include/asm/remote_spinlock.h b/arch/arm/include/asm/remote_spinlock.h
new file mode 100644
index 0000000..702b669
--- /dev/null
+++ b/arch/arm/include/asm/remote_spinlock.h
@@ -0,0 +1,18 @@
+/* Copyright (c) 2009, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef __ASM_REMOTE_SPINLOCK_H
+#define __ASM_REMOTE_SPINLOCK_H
+
+#include <mach/remote_spinlock.h>
+
+#endif /* __ASM_REMOTE_SPINLOCK_H */
diff --git a/arch/arm/include/asm/setup.h b/arch/arm/include/asm/setup.h
index ee2ad8a..950665d 100644
--- a/arch/arm/include/asm/setup.h
+++ b/arch/arm/include/asm/setup.h
@@ -37,6 +37,15 @@
 
 /* it is allowed to have multiple ATAG_MEM nodes */
 #define ATAG_MEM	0x54410002
+/* it is allowed to have multiple ATAG_MEM_RESERVED nodes */
+/* these indicate places where hotpluggable memory is present */
+/* which are not active during boot */
+#define ATAG_MEM_RESERVED	0x5441000A
+/* it is allowed to have multiple ATAG_MEM_LOW_POWER nodes */
+/* these indicate memory which can be put in a low power state */
+#define ATAG_MEM_LOW_POWER	0x5441000B
+/* these indicate memory which can be reclaimed from OSBL memory after bootup */
+#define ATAG_MEM_OSBL		0x5441000C
 
 struct tag_mem32 {
 	__u32	size;
@@ -221,6 +230,18 @@
 extern void early_print(const char *str, ...);
 extern void dump_machine_table(void);
 
+/*
+ * Early command line parameters.
+ */
+struct early_params {
+	const char *arg;
+	void (*fn)(char **p);
+};
+
+#define __early_param(name,fn)					\
+static struct early_params __early_##fn __used			\
+__attribute__((__section__(".early_param.init"))) = { name, fn }
+
 #endif  /*  __KERNEL__  */
 
 #endif
diff --git a/arch/arm/include/asm/system.h b/arch/arm/include/asm/system.h
index 832888d..d3808b5 100644
--- a/arch/arm/include/asm/system.h
+++ b/arch/arm/include/asm/system.h
@@ -69,6 +69,8 @@
 #define __exception_irq_entry	__exception
 #endif
 
+void cpu_idle_wait(void);
+
 struct thread_info;
 struct task_struct;
 
@@ -141,7 +143,7 @@
 #define dsb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 4" \
 				    : : "r" (0) : "memory")
 #define dmb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 5" \
-				    : : "r" (0) : "memory")
+				: : "r" (0) : "memory")
 #elif defined(CONFIG_CPU_FA526)
 #define isb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c5, 4" \
 				    : : "r" (0) : "memory")
diff --git a/arch/arm/include/asm/tlbflush.h b/arch/arm/include/asm/tlbflush.h
index d2005de..ab2a488 100644
--- a/arch/arm/include/asm/tlbflush.h
+++ b/arch/arm/include/asm/tlbflush.h
@@ -433,7 +433,7 @@
 	if (tlb_flag(TLB_V6_I_PAGE))
 		asm("mcr p15, 0, %0, c8, c5, 1" : : "r" (uaddr) : "cc");
 	if (tlb_flag(TLB_V7_UIS_PAGE))
-#ifdef CONFIG_ARM_ERRATA_720789
+#if defined(CONFIG_ARM_ERRATA_720789) || defined(CONFIG_ARCH_MSM8X60)
 		asm("mcr p15, 0, %0, c8, c3, 3" : : "r" (uaddr & PAGE_MASK) : "cc");
 #else
 		asm("mcr p15, 0, %0, c8, c3, 1" : : "r" (uaddr) : "cc");
@@ -480,7 +480,11 @@
 	if (tlb_flag(TLB_V6_I_PAGE))
 		asm("mcr p15, 0, %0, c8, c5, 1" : : "r" (kaddr) : "cc");
 	if (tlb_flag(TLB_V7_UIS_PAGE))
+#ifdef CONFIG_ARCH_MSM8X60
+		asm("mcr p15, 0, %0, c8, c3, 3" : : "r" (kaddr) : "cc");
+#else
 		asm("mcr p15, 0, %0, c8, c3, 1" : : "r" (kaddr) : "cc");
+#endif
 
 	if (tlb_flag(TLB_BTB)) {
 		/* flush the branch target cache */
diff --git a/arch/arm/include/asm/vfp.h b/arch/arm/include/asm/vfp.h
index f4ab34f..cb3ea08 100644
--- a/arch/arm/include/asm/vfp.h
+++ b/arch/arm/include/asm/vfp.h
@@ -21,7 +21,7 @@
 #define FPSID_FORMAT_MASK	(0x3  << FPSID_FORMAT_BIT)
 #define FPSID_NODOUBLE		(1<<20)
 #define FPSID_ARCH_BIT		(16)
-#define FPSID_ARCH_MASK		(0xF  << FPSID_ARCH_BIT)
+#define FPSID_ARCH_MASK		(0x7F  << FPSID_ARCH_BIT)
 #define FPSID_PART_BIT		(8)
 #define FPSID_PART_MASK		(0xFF << FPSID_PART_BIT)
 #define FPSID_VARIANT_BIT	(4)
@@ -82,3 +82,8 @@
 #define VFPOPDESC_UNUSED_BIT	(24)
 #define VFPOPDESC_UNUSED_MASK	(0xFF << VFPOPDESC_UNUSED_BIT)
 #define VFPOPDESC_OPDESC_MASK	(~(VFPOPDESC_LENGTH_MASK | VFPOPDESC_UNUSED_MASK))
+
+#ifndef __ASSEMBLY__
+int vfp_flush_context(void);
+void vfp_reinit(void);
+#endif