Merge "power: pm8921-charger: Allow disabling of input current limit" into msm-3.0
diff --git a/arch/arm/boot/dts/msmcopper.dts b/arch/arm/boot/dts/msmcopper.dts
index 327cce5..69de152 100644
--- a/arch/arm/boot/dts/msmcopper.dts
+++ b/arch/arm/boot/dts/msmcopper.dts
@@ -55,4 +55,14 @@
qcom,sdcc-bus-width = <4>;
qcom,sdcc-disable_cmd23;
};
+
+ qcom,sps@F9980000 {
+ compatible = "qcom,msm_sps";
+ reg = <0xF9984000 0x15000>,
+ <0xF9999000 0xB000>;
+ interrupts = <94>;
+
+ qcom,bam-dma-res-pipes = <6>;
+ };
+
};
diff --git a/arch/arm/common/gic.c b/arch/arm/common/gic.c
index 2b7b749..7fcf9dc 100644
--- a/arch/arm/common/gic.c
+++ b/arch/arm/common/gic.c
@@ -35,7 +35,7 @@
#include <asm/hardware/gic.h>
#include <asm/system.h>
-static DEFINE_SPINLOCK(irq_controller_lock);
+static DEFINE_RAW_SPINLOCK(irq_controller_lock);
/* Address of GIC 0 CPU interface */
void __iomem *gic_cpu_base_addr __read_mostly;
@@ -85,23 +85,22 @@
{
u32 mask = 1 << (d->irq % 32);
- spin_lock(&irq_controller_lock);
+ raw_spin_lock(&irq_controller_lock);
writel_relaxed(mask, gic_dist_base(d) + GIC_DIST_ENABLE_CLEAR + (gic_irq(d) / 32) * 4);
if (gic_arch_extn.irq_mask)
gic_arch_extn.irq_mask(d);
- spin_unlock(&irq_controller_lock);
-
+ raw_spin_unlock(&irq_controller_lock);
}
static void gic_unmask_irq(struct irq_data *d)
{
u32 mask = 1 << (d->irq % 32);
- spin_lock(&irq_controller_lock);
+ raw_spin_lock(&irq_controller_lock);
if (gic_arch_extn.irq_unmask)
gic_arch_extn.irq_unmask(d);
writel_relaxed(mask, gic_dist_base(d) + GIC_DIST_ENABLE_SET + (gic_irq(d) / 32) * 4);
- spin_unlock(&irq_controller_lock);
+ raw_spin_unlock(&irq_controller_lock);
}
static void gic_disable_irq(struct irq_data *d)
@@ -149,13 +148,13 @@
if (!msm_show_resume_irq_mask)
return;
- spin_lock(&irq_controller_lock);
+ raw_spin_lock(&irq_controller_lock);
for (i = 0; i * 32 < gic->max_irq; i++) {
enabled = readl_relaxed(base + GIC_DIST_ENABLE_CLEAR + i * 4);
pending[i] = readl_relaxed(base + GIC_DIST_PENDING_SET + i * 4);
pending[i] &= enabled;
}
- spin_unlock(&irq_controller_lock);
+ raw_spin_unlock(&irq_controller_lock);
for (i = find_first_bit(pending, gic->max_irq);
i < gic->max_irq;
@@ -205,9 +204,9 @@
static void gic_eoi_irq(struct irq_data *d)
{
if (gic_arch_extn.irq_eoi) {
- spin_lock(&irq_controller_lock);
+ raw_spin_lock(&irq_controller_lock);
gic_arch_extn.irq_eoi(d);
- spin_unlock(&irq_controller_lock);
+ raw_spin_unlock(&irq_controller_lock);
}
writel_relaxed(gic_irq(d), gic_cpu_base(d) + GIC_CPU_EOI);
@@ -231,7 +230,7 @@
if (type != IRQ_TYPE_LEVEL_HIGH && type != IRQ_TYPE_EDGE_RISING)
return -EINVAL;
- spin_lock(&irq_controller_lock);
+ raw_spin_lock(&irq_controller_lock);
if (gic_arch_extn.irq_set_type)
gic_arch_extn.irq_set_type(d, type);
@@ -256,7 +255,7 @@
if (enabled)
writel_relaxed(enablemask, base + GIC_DIST_ENABLE_SET + enableoff);
- spin_unlock(&irq_controller_lock);
+ raw_spin_unlock(&irq_controller_lock);
return 0;
}
@@ -276,22 +275,21 @@
{
void __iomem *reg = gic_dist_base(d) + GIC_DIST_TARGET + (gic_irq(d) & ~3);
unsigned int shift = (d->irq % 4) * 8;
- unsigned int cpu = cpumask_first(mask_val);
+ unsigned int cpu = cpumask_any_and(mask_val, cpu_online_mask);
u32 val, mask, bit;
- if (cpu >= 8)
+ if (cpu >= 8 || cpu >= nr_cpu_ids)
return -EINVAL;
mask = 0xff << shift;
bit = 1 << (cpu + shift);
- spin_lock(&irq_controller_lock);
- d->node = cpu;
+ raw_spin_lock(&irq_controller_lock);
val = readl_relaxed(reg) & ~mask;
writel_relaxed(val | bit, reg);
- spin_unlock(&irq_controller_lock);
+ raw_spin_unlock(&irq_controller_lock);
- return 0;
+ return IRQ_SET_MASK_OK;
}
#endif
@@ -336,9 +334,9 @@
chained_irq_enter(chip, desc);
- spin_lock(&irq_controller_lock);
+ raw_spin_lock(&irq_controller_lock);
status = readl_relaxed(chip_data->cpu_base + GIC_CPU_INTACK);
- spin_unlock(&irq_controller_lock);
+ raw_spin_unlock(&irq_controller_lock);
gic_irq = (status & 0x3ff);
if (gic_irq == 1023)
@@ -531,7 +529,7 @@
u32 mask, val;
WARN_ON(!irqs_disabled());
- spin_lock(&irq_controller_lock);
+ raw_spin_lock(&irq_controller_lock);
mask = 1 << (gic_irq(d) % 32);
val = readl(gic_dist_base(d) +
GIC_DIST_ENABLE_SET + (gic_irq(d) / 32) * 4);
@@ -539,7 +537,7 @@
WARN_ON(val & mask);
val = readl(gic_dist_base(d) +
GIC_DIST_PENDING_SET + (gic_irq(d) / 32) * 4);
- spin_unlock(&irq_controller_lock);
+ raw_spin_unlock(&irq_controller_lock);
return (bool) (val & mask);
}
@@ -552,7 +550,7 @@
u32 mask, val;
WARN_ON(!irqs_disabled());
- spin_lock(&irq_controller_lock);
+ raw_spin_lock(&irq_controller_lock);
mask = 1 << (gic_irq(d) % 32);
val = readl(gic_dist_base(d) +
GIC_DIST_ENABLE_SET + (gic_irq(d) / 32) * 4);
@@ -560,5 +558,5 @@
WARN_ON(val & mask);
writel(mask, gic_dist_base(d) +
GIC_DIST_PENDING_CLEAR + (gic_irq(d) / 32) * 4);
- spin_unlock(&irq_controller_lock);
+ raw_spin_unlock(&irq_controller_lock);
}
diff --git a/arch/arm/configs/msm-copper_defconfig b/arch/arm/configs/msm-copper_defconfig
index f617aff..9c8c978 100644
--- a/arch/arm/configs/msm-copper_defconfig
+++ b/arch/arm/configs/msm-copper_defconfig
@@ -121,6 +121,9 @@
CONFIG_ANDROID_TIMED_GPIO=y
CONFIG_ANDROID_LOW_MEMORY_KILLER=y
CONFIG_MSM_SSBI=y
+CONFIG_SPS=y
+CONFIG_SPS_SUPPORT_BAMDMA=y
+CONFIG_SPS_SUPPORT_NDP_BAM=y
CONFIG_EXT2_FS=y
CONFIG_EXT2_FS_XATTR=y
CONFIG_EXT3_FS=y
diff --git a/arch/arm/configs/msm8960_defconfig b/arch/arm/configs/msm8960_defconfig
index ee31efc..56e0ecd 100644
--- a/arch/arm/configs/msm8960_defconfig
+++ b/arch/arm/configs/msm8960_defconfig
@@ -44,6 +44,8 @@
CONFIG_MACH_MSM8930_CDP=y
CONFIG_MACH_MSM8930_MTP=y
CONFIG_MACH_MSM8930_FLUID=y
+CONFIG_MACH_MSM8627_CDP=y
+CONFIG_MACH_MSM8627_MTP=y
CONFIG_MACH_APQ8064_SIM=y
CONFIG_MACH_APQ8064_RUMI3=y
# CONFIG_MSM_STACKED_MEMORY is not set
diff --git a/arch/arm/include/asm/dma.h b/arch/arm/include/asm/dma.h
index 4200554..8a54b7d 100644
--- a/arch/arm/include/asm/dma.h
+++ b/arch/arm/include/asm/dma.h
@@ -33,18 +33,18 @@
#define DMA_MODE_CASCADE 0xc0
#define DMA_AUTOINIT 0x10
-extern spinlock_t dma_spin_lock;
+extern raw_spinlock_t dma_spin_lock;
static inline unsigned long claim_dma_lock(void)
{
unsigned long flags;
- spin_lock_irqsave(&dma_spin_lock, flags);
+ raw_spin_lock_irqsave(&dma_spin_lock, flags);
return flags;
}
static inline void release_dma_lock(unsigned long flags)
{
- spin_unlock_irqrestore(&dma_spin_lock, flags);
+ raw_spin_unlock_irqrestore(&dma_spin_lock, flags);
}
/* Clear the 'DMA Pointer Flip Flop'.
diff --git a/arch/arm/include/asm/mmu.h b/arch/arm/include/asm/mmu.h
index b4ffe9d..1496565 100644
--- a/arch/arm/include/asm/mmu.h
+++ b/arch/arm/include/asm/mmu.h
@@ -6,7 +6,7 @@
typedef struct {
#ifdef CONFIG_CPU_HAS_ASID
unsigned int id;
- spinlock_t id_lock;
+ raw_spinlock_t id_lock;
#endif
unsigned int kvm_seq;
} mm_context_t;
@@ -16,7 +16,7 @@
/* init_mm.context.id_lock should be initialized. */
#define INIT_MM_CONTEXT(name) \
- .context.id_lock = __SPIN_LOCK_UNLOCKED(name.context.id_lock),
+ .context.id_lock = __RAW_SPIN_LOCK_UNLOCKED(name.context.id_lock),
#else
#define ASID(mm) (0)
#endif
diff --git a/arch/arm/kernel/dma.c b/arch/arm/kernel/dma.c
index 2c4a185..7b829d9 100644
--- a/arch/arm/kernel/dma.c
+++ b/arch/arm/kernel/dma.c
@@ -23,7 +23,7 @@
#include <asm/mach/dma.h>
-DEFINE_SPINLOCK(dma_spin_lock);
+DEFINE_RAW_SPINLOCK(dma_spin_lock);
EXPORT_SYMBOL(dma_spin_lock);
static dma_t *dma_chan[MAX_DMA_CHANNELS];
diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c
index 4ef97a0..5bd484f 100644
--- a/arch/arm/kernel/irq.c
+++ b/arch/arm/kernel/irq.c
@@ -144,54 +144,63 @@
#ifdef CONFIG_HOTPLUG_CPU
-static bool migrate_one_irq(struct irq_data *d)
+static bool migrate_one_irq(struct irq_desc *desc)
{
- unsigned int cpu = cpumask_any_and(d->affinity, cpu_online_mask);
+ struct irq_data *d = irq_desc_get_irq_data(desc);
+ const struct cpumask *affinity = d->affinity;
+ struct irq_chip *c;
bool ret = false;
- if (cpu >= nr_cpu_ids) {
- cpu = cpumask_any(cpu_online_mask);
+ /*
+ * If this is a per-CPU interrupt, or the affinity does not
+ * include this CPU, then we have nothing to do.
+ */
+ if (irqd_is_per_cpu(d) || !cpumask_test_cpu(smp_processor_id(), affinity))
+ return false;
+
+ if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) {
+ affinity = cpu_online_mask;
ret = true;
}
- pr_debug("IRQ%u: moving from cpu%u to cpu%u\n", d->irq, d->node, cpu);
-
- d->chip->irq_set_affinity(d, cpumask_of(cpu), true);
+ c = irq_data_get_irq_chip(d);
+ if (c->irq_set_affinity)
+ c->irq_set_affinity(d, affinity, true);
+ else
+ pr_debug("IRQ%u: unable to set affinity\n", d->irq);
return ret;
}
/*
- * The CPU has been marked offline. Migrate IRQs off this CPU. If
- * the affinity settings do not allow other CPUs, force them onto any
+ * The current CPU has been marked offline. Migrate IRQs off this CPU.
+ * If the affinity settings do not allow other CPUs, force them onto any
* available CPU.
+ *
+ * Note: we must iterate over all IRQs, whether they have an attached
+ * action structure or not, as we need to get chained interrupts too.
*/
void migrate_irqs(void)
{
- unsigned int i, cpu = smp_processor_id();
+ unsigned int i;
struct irq_desc *desc;
unsigned long flags;
local_irq_save(flags);
for_each_irq_desc(i, desc) {
- struct irq_data *d = &desc->irq_data;
bool affinity_broken = false;
+ if (!desc)
+ continue;
+
raw_spin_lock(&desc->lock);
- do {
- if (desc->action == NULL)
- break;
-
- if (d->node != cpu)
- break;
-
- affinity_broken = migrate_one_irq(d);
- } while (0);
+ affinity_broken = migrate_one_irq(desc);
raw_spin_unlock(&desc->lock);
if (affinity_broken && printk_ratelimit())
- pr_warning("IRQ%u no longer affine to CPU%u\n", i, cpu);
+ pr_warning("IRQ%u no longer affine to CPU%u\n", i,
+ smp_processor_id());
}
local_irq_restore(flags);
diff --git a/arch/arm/kernel/perf_event_msm_l2.c b/arch/arm/kernel/perf_event_msm_l2.c
index 3cb251b..ee6b605 100644
--- a/arch/arm/kernel/perf_event_msm_l2.c
+++ b/arch/arm/kernel/perf_event_msm_l2.c
@@ -10,7 +10,7 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
-#ifdef CONFIG_ARCH_MSM8x60
+#ifdef CONFIG_ARCH_MSM8X60
#include <linux/irq.h>
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index 9e217ad..0045a7f 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -535,7 +535,7 @@
}
#endif
-static DEFINE_SPINLOCK(stop_lock);
+static DEFINE_RAW_SPINLOCK(stop_lock);
/*
* ipi_cpu_stop - handle IPI from smp_send_stop()
@@ -544,10 +544,10 @@
{
if (system_state == SYSTEM_BOOTING ||
system_state == SYSTEM_RUNNING) {
- spin_lock(&stop_lock);
+ raw_spin_lock(&stop_lock);
printk(KERN_CRIT "CPU%u: stopping\n", cpu);
dump_stack();
- spin_unlock(&stop_lock);
+ raw_spin_unlock(&stop_lock);
}
set_cpu_online(cpu, false);
diff --git a/arch/arm/kernel/swp_emulate.c b/arch/arm/kernel/swp_emulate.c
index 40ee7e5..7669848 100644
--- a/arch/arm/kernel/swp_emulate.c
+++ b/arch/arm/kernel/swp_emulate.c
@@ -173,6 +173,57 @@
return res;
}
+static int check_condition(struct pt_regs *regs, unsigned int insn)
+{
+ unsigned int base_cond, neg, cond = 0;
+ unsigned int cpsr_z, cpsr_c, cpsr_n, cpsr_v;
+
+ cpsr_n = (regs->ARM_cpsr & PSR_N_BIT) ? 1 : 0;
+ cpsr_z = (regs->ARM_cpsr & PSR_Z_BIT) ? 1 : 0;
+ cpsr_c = (regs->ARM_cpsr & PSR_C_BIT) ? 1 : 0;
+ cpsr_v = (regs->ARM_cpsr & PSR_V_BIT) ? 1 : 0;
+
+ /* Upper 3 bits indicate condition, lower bit incicates negation */
+ base_cond = insn >> 29;
+ neg = insn & BIT(28) ? 1 : 0;
+
+ switch (base_cond) {
+ case 0x0: /* equal */
+ cond = cpsr_z;
+ break;
+
+ case 0x1: /* carry set */
+ cond = cpsr_c;
+ break;
+
+ case 0x2: /* minus / negative */
+ cond = cpsr_n;
+ break;
+
+ case 0x3: /* overflow */
+ cond = cpsr_v;
+ break;
+
+ case 0x4: /* unsigned higher */
+ cond = (cpsr_c == 1) && (cpsr_z == 0);
+ break;
+
+ case 0x5: /* signed greater / equal */
+ cond = (cpsr_n == cpsr_v);
+ break;
+
+ case 0x6: /* signed greater */
+ cond = (cpsr_z == 0) && (cpsr_n == cpsr_v);
+ break;
+
+ case 0x7: /* always */
+ cond = 1;
+ break;
+ };
+
+ return cond && !neg;
+}
+
/*
* swp_handler logs the id of calling process, dissects the instruction, sanity
* checks the memory location, calls emulate_swpX for the actual operation and
@@ -191,6 +242,12 @@
previous_pid = current->pid;
}
+ /* Ignore the instruction if it fails its condition code check */
+ if (!check_condition(regs, instr)) {
+ regs->ARM_pc += 4;
+ return 0;
+ }
+
address = regs->uregs[EXTRACT_REG_NUM(instr, RN_OFFSET)];
data = regs->uregs[EXTRACT_REG_NUM(instr, RT2_OFFSET)];
destreg = EXTRACT_REG_NUM(instr, RT_OFFSET);
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index aaca029..c063c56 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -255,7 +255,7 @@
return ret;
}
-static DEFINE_SPINLOCK(die_lock);
+static DEFINE_RAW_SPINLOCK(die_lock);
/*
* This function is protected against re-entrancy.
@@ -267,7 +267,7 @@
oops_enter();
- spin_lock_irq(&die_lock);
+ raw_spin_lock_irq(&die_lock);
console_verbose();
bust_spinlocks(1);
ret = __die(str, err, thread, regs);
@@ -277,7 +277,7 @@
bust_spinlocks(0);
add_taint(TAINT_DIE);
- spin_unlock_irq(&die_lock);
+ raw_spin_unlock_irq(&die_lock);
oops_exit();
if (in_interrupt())
@@ -302,24 +302,24 @@
}
static LIST_HEAD(undef_hook);
-static DEFINE_SPINLOCK(undef_lock);
+static DEFINE_RAW_SPINLOCK(undef_lock);
void register_undef_hook(struct undef_hook *hook)
{
unsigned long flags;
- spin_lock_irqsave(&undef_lock, flags);
+ raw_spin_lock_irqsave(&undef_lock, flags);
list_add(&hook->node, &undef_hook);
- spin_unlock_irqrestore(&undef_lock, flags);
+ raw_spin_unlock_irqrestore(&undef_lock, flags);
}
void unregister_undef_hook(struct undef_hook *hook)
{
unsigned long flags;
- spin_lock_irqsave(&undef_lock, flags);
+ raw_spin_lock_irqsave(&undef_lock, flags);
list_del(&hook->node);
- spin_unlock_irqrestore(&undef_lock, flags);
+ raw_spin_unlock_irqrestore(&undef_lock, flags);
}
static int call_undef_hook(struct pt_regs *regs, unsigned int instr)
@@ -328,12 +328,12 @@
unsigned long flags;
int (*fn)(struct pt_regs *regs, unsigned int instr) = NULL;
- spin_lock_irqsave(&undef_lock, flags);
+ raw_spin_lock_irqsave(&undef_lock, flags);
list_for_each_entry(hook, &undef_hook, node)
if ((instr & hook->instr_mask) == hook->instr_val &&
(regs->ARM_cpsr & hook->cpsr_mask) == hook->cpsr_val)
fn = hook->fn;
- spin_unlock_irqrestore(&undef_lock, flags);
+ raw_spin_unlock_irqrestore(&undef_lock, flags);
return fn ? fn(regs, instr) : 1;
}
diff --git a/arch/arm/mach-footbridge/include/mach/hardware.h b/arch/arm/mach-footbridge/include/mach/hardware.h
index b6fdf23..14db5a0 100644
--- a/arch/arm/mach-footbridge/include/mach/hardware.h
+++ b/arch/arm/mach-footbridge/include/mach/hardware.h
@@ -93,7 +93,7 @@
#define CPLD_FLASH_WR_ENABLE 1
#ifndef __ASSEMBLY__
-extern spinlock_t nw_gpio_lock;
+extern raw_spinlock_t nw_gpio_lock;
extern void nw_gpio_modify_op(unsigned int mask, unsigned int set);
extern void nw_gpio_modify_io(unsigned int mask, unsigned int in);
extern unsigned int nw_gpio_read(void);
diff --git a/arch/arm/mach-footbridge/netwinder-hw.c b/arch/arm/mach-footbridge/netwinder-hw.c
index 06e514f..5b73190 100644
--- a/arch/arm/mach-footbridge/netwinder-hw.c
+++ b/arch/arm/mach-footbridge/netwinder-hw.c
@@ -68,7 +68,7 @@
/*
* This is a lock for accessing ports GP1_IO_BASE and GP2_IO_BASE
*/
-DEFINE_SPINLOCK(nw_gpio_lock);
+DEFINE_RAW_SPINLOCK(nw_gpio_lock);
EXPORT_SYMBOL(nw_gpio_lock);
static unsigned int current_gpio_op;
@@ -327,9 +327,9 @@
/*
* Set Group1/Group2 outputs
*/
- spin_lock_irqsave(&nw_gpio_lock, flags);
+ raw_spin_lock_irqsave(&nw_gpio_lock, flags);
nw_gpio_modify_op(-1, GPIO_RED_LED | GPIO_FAN);
- spin_unlock_irqrestore(&nw_gpio_lock, flags);
+ raw_spin_unlock_irqrestore(&nw_gpio_lock, flags);
}
/*
@@ -390,9 +390,9 @@
{
unsigned long flags;
- spin_lock_irqsave(&nw_gpio_lock, flags);
+ raw_spin_lock_irqsave(&nw_gpio_lock, flags);
nw_cpld_modify(-1, CPLD_UNMUTE | CPLD_7111_DISABLE);
- spin_unlock_irqrestore(&nw_gpio_lock, flags);
+ raw_spin_unlock_irqrestore(&nw_gpio_lock, flags);
}
static unsigned char rwa_unlock[] __initdata =
@@ -616,9 +616,9 @@
cpld_init();
rwa010_init();
- spin_lock_irqsave(&nw_gpio_lock, flags);
+ raw_spin_lock_irqsave(&nw_gpio_lock, flags);
nw_gpio_modify_op(GPIO_RED_LED|GPIO_GREEN_LED, DEFAULT_LEDS);
- spin_unlock_irqrestore(&nw_gpio_lock, flags);
+ raw_spin_unlock_irqrestore(&nw_gpio_lock, flags);
}
return 0;
}
diff --git a/arch/arm/mach-footbridge/netwinder-leds.c b/arch/arm/mach-footbridge/netwinder-leds.c
index 00269fe..e57102e 100644
--- a/arch/arm/mach-footbridge/netwinder-leds.c
+++ b/arch/arm/mach-footbridge/netwinder-leds.c
@@ -31,13 +31,13 @@
static char led_state;
static char hw_led_state;
-static DEFINE_SPINLOCK(leds_lock);
+static DEFINE_RAW_SPINLOCK(leds_lock);
static void netwinder_leds_event(led_event_t evt)
{
unsigned long flags;
- spin_lock_irqsave(&leds_lock, flags);
+ raw_spin_lock_irqsave(&leds_lock, flags);
switch (evt) {
case led_start:
@@ -117,12 +117,12 @@
break;
}
- spin_unlock_irqrestore(&leds_lock, flags);
+ raw_spin_unlock_irqrestore(&leds_lock, flags);
if (led_state & LED_STATE_ENABLED) {
- spin_lock_irqsave(&nw_gpio_lock, flags);
+ raw_spin_lock_irqsave(&nw_gpio_lock, flags);
nw_gpio_modify_op(GPIO_RED_LED | GPIO_GREEN_LED, hw_led_state);
- spin_unlock_irqrestore(&nw_gpio_lock, flags);
+ raw_spin_unlock_irqrestore(&nw_gpio_lock, flags);
}
}
diff --git a/arch/arm/mach-integrator/core.c b/arch/arm/mach-integrator/core.c
index 77315b9..0c20cf6 100644
--- a/arch/arm/mach-integrator/core.c
+++ b/arch/arm/mach-integrator/core.c
@@ -205,7 +205,7 @@
#define CM_CTRL IO_ADDRESS(INTEGRATOR_HDR_CTRL)
-static DEFINE_SPINLOCK(cm_lock);
+static DEFINE_RAW_SPINLOCK(cm_lock);
/**
* cm_control - update the CM_CTRL register.
@@ -217,10 +217,10 @@
unsigned long flags;
u32 val;
- spin_lock_irqsave(&cm_lock, flags);
+ raw_spin_lock_irqsave(&cm_lock, flags);
val = readl(CM_CTRL) & ~mask;
writel(val | set, CM_CTRL);
- spin_unlock_irqrestore(&cm_lock, flags);
+ raw_spin_unlock_irqrestore(&cm_lock, flags);
}
EXPORT_SYMBOL(cm_control);
diff --git a/arch/arm/mach-integrator/pci_v3.c b/arch/arm/mach-integrator/pci_v3.c
index 6467d99..a9b90a0 100644
--- a/arch/arm/mach-integrator/pci_v3.c
+++ b/arch/arm/mach-integrator/pci_v3.c
@@ -163,7 +163,7 @@
* 7:2 register number
*
*/
-static DEFINE_SPINLOCK(v3_lock);
+static DEFINE_RAW_SPINLOCK(v3_lock);
#define PCI_BUS_NONMEM_START 0x00000000
#define PCI_BUS_NONMEM_SIZE SZ_256M
@@ -284,7 +284,7 @@
unsigned long flags;
u32 v;
- spin_lock_irqsave(&v3_lock, flags);
+ raw_spin_lock_irqsave(&v3_lock, flags);
addr = v3_open_config_window(bus, devfn, where);
switch (size) {
@@ -302,7 +302,7 @@
}
v3_close_config_window();
- spin_unlock_irqrestore(&v3_lock, flags);
+ raw_spin_unlock_irqrestore(&v3_lock, flags);
*val = v;
return PCIBIOS_SUCCESSFUL;
@@ -314,7 +314,7 @@
unsigned long addr;
unsigned long flags;
- spin_lock_irqsave(&v3_lock, flags);
+ raw_spin_lock_irqsave(&v3_lock, flags);
addr = v3_open_config_window(bus, devfn, where);
switch (size) {
@@ -335,7 +335,7 @@
}
v3_close_config_window();
- spin_unlock_irqrestore(&v3_lock, flags);
+ raw_spin_unlock_irqrestore(&v3_lock, flags);
return PCIBIOS_SUCCESSFUL;
}
@@ -510,7 +510,7 @@
hook_fault_code(8, v3_pci_fault, SIGBUS, 0, "external abort on non-linefetch");
hook_fault_code(10, v3_pci_fault, SIGBUS, 0, "external abort on non-linefetch");
- spin_lock_irqsave(&v3_lock, flags);
+ raw_spin_lock_irqsave(&v3_lock, flags);
/*
* Unlock V3 registers, but only if they were previously locked.
@@ -583,7 +583,7 @@
printk(KERN_ERR "PCI: unable to grab PCI error "
"interrupt: %d\n", ret);
- spin_unlock_irqrestore(&v3_lock, flags);
+ raw_spin_unlock_irqrestore(&v3_lock, flags);
}
void __init pci_v3_postinit(void)
diff --git a/arch/arm/mach-ixp4xx/common-pci.c b/arch/arm/mach-ixp4xx/common-pci.c
index e9a5893..ab19445 100644
--- a/arch/arm/mach-ixp4xx/common-pci.c
+++ b/arch/arm/mach-ixp4xx/common-pci.c
@@ -54,7 +54,7 @@
* these transactions are atomic or we will end up
* with corrupt data on the bus or in a driver.
*/
-static DEFINE_SPINLOCK(ixp4xx_pci_lock);
+static DEFINE_RAW_SPINLOCK(ixp4xx_pci_lock);
/*
* Read from PCI config space
@@ -62,10 +62,10 @@
static void crp_read(u32 ad_cbe, u32 *data)
{
unsigned long flags;
- spin_lock_irqsave(&ixp4xx_pci_lock, flags);
+ raw_spin_lock_irqsave(&ixp4xx_pci_lock, flags);
*PCI_CRP_AD_CBE = ad_cbe;
*data = *PCI_CRP_RDATA;
- spin_unlock_irqrestore(&ixp4xx_pci_lock, flags);
+ raw_spin_unlock_irqrestore(&ixp4xx_pci_lock, flags);
}
/*
@@ -74,10 +74,10 @@
static void crp_write(u32 ad_cbe, u32 data)
{
unsigned long flags;
- spin_lock_irqsave(&ixp4xx_pci_lock, flags);
+ raw_spin_lock_irqsave(&ixp4xx_pci_lock, flags);
*PCI_CRP_AD_CBE = CRP_AD_CBE_WRITE | ad_cbe;
*PCI_CRP_WDATA = data;
- spin_unlock_irqrestore(&ixp4xx_pci_lock, flags);
+ raw_spin_unlock_irqrestore(&ixp4xx_pci_lock, flags);
}
static inline int check_master_abort(void)
@@ -101,7 +101,7 @@
int retval = 0;
int i;
- spin_lock_irqsave(&ixp4xx_pci_lock, flags);
+ raw_spin_lock_irqsave(&ixp4xx_pci_lock, flags);
*PCI_NP_AD = addr;
@@ -118,7 +118,7 @@
if(check_master_abort())
retval = 1;
- spin_unlock_irqrestore(&ixp4xx_pci_lock, flags);
+ raw_spin_unlock_irqrestore(&ixp4xx_pci_lock, flags);
return retval;
}
@@ -127,7 +127,7 @@
unsigned long flags;
int retval = 0;
- spin_lock_irqsave(&ixp4xx_pci_lock, flags);
+ raw_spin_lock_irqsave(&ixp4xx_pci_lock, flags);
*PCI_NP_AD = addr;
@@ -140,7 +140,7 @@
if(check_master_abort())
retval = 1;
- spin_unlock_irqrestore(&ixp4xx_pci_lock, flags);
+ raw_spin_unlock_irqrestore(&ixp4xx_pci_lock, flags);
return retval;
}
@@ -149,7 +149,7 @@
unsigned long flags;
int retval = 0;
- spin_lock_irqsave(&ixp4xx_pci_lock, flags);
+ raw_spin_lock_irqsave(&ixp4xx_pci_lock, flags);
*PCI_NP_AD = addr;
@@ -162,7 +162,7 @@
if(check_master_abort())
retval = 1;
- spin_unlock_irqrestore(&ixp4xx_pci_lock, flags);
+ raw_spin_unlock_irqrestore(&ixp4xx_pci_lock, flags);
return retval;
}
diff --git a/arch/arm/mach-msm/Kconfig b/arch/arm/mach-msm/Kconfig
index 5663ef7..9b5c2ae 100644
--- a/arch/arm/mach-msm/Kconfig
+++ b/arch/arm/mach-msm/Kconfig
@@ -190,6 +190,9 @@
select MSM_SCM if SMP
select MSM_GPIOMUX
select MSM_REMOTE_SPINLOCK_SFPB
+ select MSM_PIL
+ select MSM_QDSP6_APR
+ select MSM_AUDIO_QDSP6 if SND_SOC
config ARCH_MSMCOPPER
bool "MSM Copper"
diff --git a/arch/arm/mach-msm/bam_dmux.c b/arch/arm/mach-msm/bam_dmux.c
index e3ca6d1..0041f0d 100644
--- a/arch/arm/mach-msm/bam_dmux.c
+++ b/arch/arm/mach-msm/bam_dmux.c
@@ -360,6 +360,7 @@
int rc;
struct tx_pkt_info *pkt;
dma_addr_t dma_address;
+ unsigned long flags;
pkt = kmalloc(sizeof(struct tx_pkt_info), GFP_ATOMIC);
if (pkt == NULL) {
@@ -380,17 +381,17 @@
pkt->dma_address = dma_address;
pkt->is_cmd = 1;
INIT_WORK(&pkt->work, bam_mux_write_done);
- spin_lock(&bam_tx_pool_spinlock);
+ spin_lock_irqsave(&bam_tx_pool_spinlock, flags);
list_add_tail(&pkt->list_node, &bam_tx_pool);
- spin_unlock(&bam_tx_pool_spinlock);
+ spin_unlock_irqrestore(&bam_tx_pool_spinlock, flags);
rc = sps_transfer_one(bam_tx_pipe, dma_address, len,
pkt, SPS_IOVEC_FLAG_INT | SPS_IOVEC_FLAG_EOT);
if (rc) {
DBG("%s sps_transfer_one failed rc=%d\n", __func__, rc);
- spin_lock(&bam_tx_pool_spinlock);
+ spin_lock_irqsave(&bam_tx_pool_spinlock, flags);
list_del(&pkt->list_node);
DBG_INC_TX_SPS_FAILURE_CNT();
- spin_unlock(&bam_tx_pool_spinlock);
+ spin_unlock_irqrestore(&bam_tx_pool_spinlock, flags);
kfree(pkt);
}
@@ -409,10 +410,10 @@
if (in_global_reset)
return;
- spin_lock(&bam_tx_pool_spinlock);
+ spin_lock_irqsave(&bam_tx_pool_spinlock, flags);
node = bam_tx_pool.next;
list_del(node);
- spin_unlock(&bam_tx_pool_spinlock);
+ spin_unlock_irqrestore(&bam_tx_pool_spinlock, flags);
info = container_of(work, struct tx_pkt_info, work);
if (info->is_cmd) {
kfree(info->skb);
@@ -524,18 +525,20 @@
pkt->dma_address = dma_address;
pkt->is_cmd = 0;
INIT_WORK(&pkt->work, bam_mux_write_done);
- spin_lock(&bam_tx_pool_spinlock);
+ spin_lock_irqsave(&bam_tx_pool_spinlock, flags);
list_add_tail(&pkt->list_node, &bam_tx_pool);
- spin_unlock(&bam_tx_pool_spinlock);
+ spin_unlock_irqrestore(&bam_tx_pool_spinlock, flags);
rc = sps_transfer_one(bam_tx_pipe, dma_address, skb->len,
pkt, SPS_IOVEC_FLAG_INT | SPS_IOVEC_FLAG_EOT);
if (rc) {
DBG("%s sps_transfer_one failed rc=%d\n", __func__, rc);
- spin_lock(&bam_tx_pool_spinlock);
+ spin_lock_irqsave(&bam_tx_pool_spinlock, flags);
list_del(&pkt->list_node);
DBG_INC_TX_SPS_FAILURE_CNT();
- spin_unlock(&bam_tx_pool_spinlock);
+ spin_unlock_irqrestore(&bam_tx_pool_spinlock, flags);
kfree(pkt);
+ if (new_skb)
+ dev_kfree_skb_any(new_skb);
} else {
spin_lock_irqsave(&bam_ch[id].lock, flags);
bam_ch[id].num_tx_pkts++;
@@ -844,15 +847,7 @@
" not disabled\n", __func__);
break;
}
- rx_register_event.options = 0;
- ret = sps_register_event(bam_rx_pipe,
- &rx_register_event);
- if (ret) {
- pr_err("%s: sps_register_event ret = %d\n",
- __func__, ret);
- break;
- }
- cur_rx_conn.options = SPS_O_AUTO_ENABLE | SPS_O_EOT |
+ cur_rx_conn.options = SPS_O_AUTO_ENABLE |
SPS_O_ACK_TRANSFERS | SPS_O_POLL;
ret = sps_set_config(bam_rx_pipe, &cur_rx_conn);
if (ret) {
@@ -1089,6 +1084,7 @@
struct list_head *node;
struct tx_pkt_info *info;
int temp_remote_status;
+ unsigned long flags;
if (code != SUBSYS_AFTER_SHUTDOWN)
return NOTIFY_DONE;
@@ -1107,7 +1103,7 @@
}
}
/*cleanup UL*/
- spin_lock(&bam_tx_pool_spinlock);
+ spin_lock_irqsave(&bam_tx_pool_spinlock, flags);
while (!list_empty(&bam_tx_pool)) {
node = bam_tx_pool.next;
list_del(node);
@@ -1126,7 +1122,7 @@
}
kfree(info);
}
- spin_unlock(&bam_tx_pool_spinlock);
+ spin_unlock_irqrestore(&bam_tx_pool_spinlock, flags);
smsm_change_state(SMSM_APPS_STATE, SMSM_A2_POWER_CONTROL, 0);
return NOTIFY_DONE;
diff --git a/arch/arm/mach-msm/board-9615.c b/arch/arm/mach-msm/board-9615.c
index 44b78d1..724b3c5 100644
--- a/arch/arm/mach-msm/board-9615.c
+++ b/arch/arm/mach-msm/board-9615.c
@@ -89,7 +89,7 @@
static struct pm8xxx_pwrkey_platform_data pm8xxx_pwrkey_pdata = {
.pull_up = 1,
- .kpd_trigger_delay_us = 970,
+ .kpd_trigger_delay_us = 15625,
.wakeup = 1,
};
diff --git a/arch/arm/mach-msm/board-apq8064-regulator.c b/arch/arm/mach-msm/board-apq8064-regulator.c
index 8448600..90a9df1 100644
--- a/arch/arm/mach-msm/board-apq8064-regulator.c
+++ b/arch/arm/mach-msm/board-apq8064-regulator.c
@@ -93,6 +93,8 @@
};
VREG_CONSUMERS(L25) = {
REGULATOR_SUPPLY("8921_l25", NULL),
+ REGULATOR_SUPPLY("VDDD_CDC_D", "tabla-slim"),
+ REGULATOR_SUPPLY("CDC_VDDA_A_1P2V", "tabla-slim"),
};
VREG_CONSUMERS(L26) = {
REGULATOR_SUPPLY("8921_l26", NULL),
@@ -118,6 +120,10 @@
VREG_CONSUMERS(S4) = {
REGULATOR_SUPPLY("8921_s4", NULL),
REGULATOR_SUPPLY("sdc_vccq", "msm_sdcc.1"),
+ REGULATOR_SUPPLY("VDDIO_CDC", "tabla-slim"),
+ REGULATOR_SUPPLY("CDC_VDD_CP", "tabla-slim"),
+ REGULATOR_SUPPLY("CDC_VDDA_TX", "tabla-slim"),
+ REGULATOR_SUPPLY("CDC_VDDA_RX", "tabla-slim"),
};
VREG_CONSUMERS(S5) = {
REGULATOR_SUPPLY("8921_s5", NULL),
diff --git a/arch/arm/mach-msm/board-apq8064.c b/arch/arm/mach-msm/board-apq8064.c
index 71719cd..680060d 100644
--- a/arch/arm/mach-msm/board-apq8064.c
+++ b/arch/arm/mach-msm/board-apq8064.c
@@ -16,6 +16,8 @@
#include <linux/irq.h>
#include <linux/i2c.h>
#include <linux/slimbus/slimbus.h>
+#include <linux/mfd/wcd9310/core.h>
+#include <linux/mfd/wcd9310/pdata.h>
#include <linux/msm_ssbi.h>
#include <linux/spi/spi.h>
#include <linux/dma-mapping.h>
@@ -226,6 +228,46 @@
MAX_SDCC_CONTROLLER
};
+#define TABLA_INTERRUPT_BASE (NR_MSM_IRQS + NR_GPIO_IRQS + NR_PM8921_IRQS)
+
+/* Micbias setting is based on 8660 CDP/MTP/FLUID requirement
+ * 4 micbiases are used to power various analog and digital
+ * microphones operating at 1800 mV. Technically, all micbiases
+ * can source from single cfilter since all microphones operate
+ * at the same voltage level. The arrangement below is to make
+ * sure all cfilters are exercised. LDO_H regulator ouput level
+ * does not need to be as high as 2.85V. It is choosen for
+ * microphone sensitivity purpose.
+ */
+static struct tabla_pdata apq8064_tabla_platform_data = {
+ .slimbus_slave_device = {
+ .name = "tabla-slave",
+ .e_addr = {0, 0, 0x10, 0, 0x17, 2},
+ },
+ .irq = MSM_GPIO_TO_INT(62),
+ .irq_base = TABLA_INTERRUPT_BASE,
+ .num_irqs = NR_TABLA_IRQS,
+ .reset_gpio = PM8921_GPIO_PM_TO_SYS(34),
+ .micbias = {
+ .ldoh_v = TABLA_LDOH_2P85_V,
+ .cfilt1_mv = 1800,
+ .cfilt2_mv = 1800,
+ .cfilt3_mv = 1800,
+ .bias1_cfilt_sel = TABLA_CFILT1_SEL,
+ .bias2_cfilt_sel = TABLA_CFILT2_SEL,
+ .bias3_cfilt_sel = TABLA_CFILT3_SEL,
+ .bias4_cfilt_sel = TABLA_CFILT3_SEL,
+ }
+};
+
+static struct slim_device apq8064_slim_tabla = {
+ .name = "tabla-slim",
+ .e_addr = {0, 1, 0x10, 0, 0x17, 2},
+ .dev = {
+ .platform_data = &apq8064_tabla_platform_data,
+ },
+};
+
/* All SDCC controllers require VDD/VCC voltage */
static struct msm_mmc_reg_data mmc_vdd_reg_data[MAX_SDCC_CONTROLLER] = {
/* SDCC1 : eMMC card connected */
@@ -656,6 +698,27 @@
#ifdef CONFIG_HW_RANDOM_MSM
&apq8064_device_rng,
#endif
+ &msm_pcm,
+ &msm_pcm_routing,
+ &msm_cpudai0,
+ &msm_cpudai1,
+ &msm_cpudai_hdmi_rx,
+ &msm_cpudai_bt_rx,
+ &msm_cpudai_bt_tx,
+ &msm_cpudai_fm_rx,
+ &msm_cpudai_fm_tx,
+ &msm_cpu_fe,
+ &msm_stub_codec,
+ &msm_voice,
+ &msm_voip,
+ &msm_lpa_pcm,
+ &msm_cpudai_afe_01_rx,
+ &msm_cpudai_afe_01_tx,
+ &msm_cpudai_afe_02_rx,
+ &msm_cpudai_afe_02_tx,
+ &msm_pcm_afe,
+ &msm_cpudai_auxpcm_rx,
+ &msm_cpudai_auxpcm_tx,
};
static struct platform_device *sim_devices[] __initdata = {
@@ -666,6 +729,10 @@
static struct platform_device *rumi3_devices[] __initdata = {
&apq8064_device_uart_gsbi1,
&msm_device_sps_apq8064,
+ &msm_cpudai_bt_rx,
+ &msm_cpudai_bt_tx,
+ &msm_cpudai_fm_rx,
+ &msm_cpudai_fm_tx,
};
static struct msm_spi_platform_data apq8064_qup_spi_gsbi5_pdata = {
@@ -810,7 +877,11 @@
};
static struct slim_boardinfo apq8064_slim_devices[] = {
- /* Add slimbus slaves as needed */
+ {
+ .bus_num = 1,
+ .slim_slave = &apq8064_slim_tabla,
+ },
+ /* add more slimbus slaves as needed */
};
static struct msm_i2c_platform_data apq8064_i2c_qup_gsbi4_pdata = {
@@ -818,6 +889,52 @@
.src_clk_rate = 24000000,
};
+
+static struct gpiomux_setting audio_auxpcm[] = {
+ /* Suspended state */
+ {
+ .func = GPIOMUX_FUNC_GPIO,
+ .drv = GPIOMUX_DRV_2MA,
+ .pull = GPIOMUX_PULL_NONE,
+ },
+ /* Active state */
+ {
+ .func = GPIOMUX_FUNC_1,
+ .drv = GPIOMUX_DRV_2MA,
+ .pull = GPIOMUX_PULL_NONE,
+ },
+};
+static struct msm_gpiomux_config apq8064_audio_auxpcm_configs[] __initdata = {
+ {
+ .gpio = 43,
+ .settings = {
+ [GPIOMUX_SUSPENDED] = &audio_auxpcm[0],
+ [GPIOMUX_ACTIVE] = &audio_auxpcm[1],
+ },
+ },
+ {
+ .gpio = 44,
+ .settings = {
+ [GPIOMUX_SUSPENDED] = &audio_auxpcm[0],
+ [GPIOMUX_ACTIVE] = &audio_auxpcm[1],
+ },
+ },
+ {
+ .gpio = 45,
+ .settings = {
+ [GPIOMUX_SUSPENDED] = &audio_auxpcm[0],
+ [GPIOMUX_ACTIVE] = &audio_auxpcm[1],
+ },
+ },
+ {
+ .gpio = 46,
+ .settings = {
+ [GPIOMUX_SUSPENDED] = &audio_auxpcm[0],
+ [GPIOMUX_ACTIVE] = &audio_auxpcm[1],
+ },
+ },
+};
+
static void __init apq8064_i2c_init(void)
{
apq8064_device_qup_i2c_gsbi4.dev.platform_data =
@@ -838,6 +955,8 @@
msm_gpiomux_install(apq8064_gsbi_configs,
ARRAY_SIZE(apq8064_gsbi_configs));
+ msm_gpiomux_install(apq8064_audio_auxpcm_configs,
+ ARRAY_SIZE(apq8064_audio_auxpcm_configs));
return 0;
}
diff --git a/arch/arm/mach-msm/board-copper.c b/arch/arm/mach-msm/board-copper.c
index 1cc9d7f..13d63d3 100644
--- a/arch/arm/mach-msm/board-copper.c
+++ b/arch/arm/mach-msm/board-copper.c
@@ -83,6 +83,9 @@
CLK_DUMMY("usb_phy_clk", NULL, NULL, OFF),
CLK_DUMMY("usb_hs_clk", NULL, NULL, OFF),
CLK_DUMMY("usb_hs_pclk", NULL, NULL, OFF),
+ CLK_DUMMY("dfab_clk", DFAB_CLK, NULL, 0),
+ CLK_DUMMY("dma_bam_pclk", DMA_BAM_P_CLK, NULL, 0),
+ CLK_DUMMY("mem_clk", NULL, NULL, 0),
};
struct clock_init_data msm_dummy_clock_init_data __initdata = {
diff --git a/arch/arm/mach-msm/board-msm7x27a.c b/arch/arm/mach-msm/board-msm7x27a.c
index 05b4896..6b3cad3 100644
--- a/arch/arm/mach-msm/board-msm7x27a.c
+++ b/arch/arm/mach-msm/board-msm7x27a.c
@@ -1072,7 +1072,7 @@
#define MSM_PMEM_MDP_SIZE 0x1900000
#define MSM7x25A_MSM_PMEM_MDP_SIZE 0x1000000
-#define MSM_PMEM_ADSP_SIZE 0x2000000
+#define MSM_PMEM_ADSP_SIZE 0x1000000
#define MSM7x25A_MSM_PMEM_ADSP_SIZE 0xB91000
diff --git a/arch/arm/mach-msm/board-msm8960-camera.c b/arch/arm/mach-msm/board-msm8960-camera.c
index 5384888..a17008b 100644
--- a/arch/arm/mach-msm/board-msm8960-camera.c
+++ b/arch/arm/mach-msm/board-msm8960-camera.c
@@ -96,6 +96,15 @@
};
+static struct msm_gpiomux_config msm8960_cdp_flash_configs[] = {
+ {
+ .gpio = 3,
+ .settings = {
+ [GPIOMUX_ACTIVE] = &cam_settings[1],
+ [GPIOMUX_SUSPENDED] = &cam_settings[0],
+ },
+ },
+};
static struct msm_gpiomux_config msm8960_cam_common_configs[] = {
{
@@ -108,7 +117,7 @@
{
.gpio = 3,
.settings = {
- [GPIOMUX_ACTIVE] = &cam_settings[1],
+ [GPIOMUX_ACTIVE] = &cam_settings[2],
[GPIOMUX_SUSPENDED] = &cam_settings[0],
},
},
@@ -202,12 +211,8 @@
#ifdef CONFIG_MSM_CAMERA_FLASH
static struct msm_camera_sensor_flash_src msm_flash_src = {
.flash_sr_type = MSM_CAMERA_FLASH_SRC_EXT,
- ._fsrc.ext_driver_src.led_en = GPIO_CAM_GP_LED_EN1,
- ._fsrc.ext_driver_src.led_flash_en = GPIO_CAM_GP_LED_EN2,
-#if defined(CONFIG_I2C) && (defined(CONFIG_GPIO_SX150X) || \
- defined(CONFIG_GPIO_SX150X_MODULE))
- ._fsrc.ext_driver_src.expander_info = cam_expander_info,
-#endif
+ ._fsrc.ext_driver_src.led_en = VFE_CAMIF_TIMER1_GPIO,
+ ._fsrc.ext_driver_src.led_flash_en = VFE_CAMIF_TIMER2_GPIO,
};
#endif
@@ -456,6 +461,20 @@
msm_gpiomux_install(msm8960_cam_common_configs,
ARRAY_SIZE(msm8960_cam_common_configs));
+ if (machine_is_msm8960_cdp()) {
+ msm_gpiomux_install(msm8960_cdp_flash_configs,
+ ARRAY_SIZE(msm8960_cdp_flash_configs));
+ msm_flash_src._fsrc.ext_driver_src.led_en =
+ GPIO_CAM_GP_LED_EN1;
+ msm_flash_src._fsrc.ext_driver_src.led_flash_en =
+ GPIO_CAM_GP_LED_EN2;
+ #if defined(CONFIG_I2C) && (defined(CONFIG_GPIO_SX150X) || \
+ defined(CONFIG_GPIO_SX150X_MODULE))
+ msm_flash_src._fsrc.ext_driver_src.expander_info =
+ cam_expander_info;
+ #endif
+ }
+
if (machine_is_msm8960_liquid()) {
struct msm_camera_sensor_info *s_info;
s_info = msm8960_camera_sensor_imx074.dev.platform_data;
diff --git a/arch/arm/mach-msm/board-msm8960-pmic.c b/arch/arm/mach-msm/board-msm8960-pmic.c
index 99682c3..298956b 100644
--- a/arch/arm/mach-msm/board-msm8960-pmic.c
+++ b/arch/arm/mach-msm/board-msm8960-pmic.c
@@ -203,7 +203,7 @@
static struct pm8xxx_pwrkey_platform_data pm8xxx_pwrkey_pdata = {
.pull_up = 1,
- .kpd_trigger_delay_us = 970,
+ .kpd_trigger_delay_us = 15625,
.wakeup = 1,
};
diff --git a/arch/arm/mach-msm/board-msm8960.c b/arch/arm/mach-msm/board-msm8960.c
index 7ffb9b2..251c1d9 100644
--- a/arch/arm/mach-msm/board-msm8960.c
+++ b/arch/arm/mach-msm/board-msm8960.c
@@ -576,7 +576,7 @@
#define MSM_PMEM_KERNEL_EBI1_SIZE 0xB0C000
#define MSM_ION_EBI_SIZE (MSM_PMEM_SIZE + 0x600000)
#define MSM_ION_ADSP_SIZE MSM_PMEM_ADSP_SIZE
-#define MSM_ION_HEAP_NUM 4
+#define MSM_ION_HEAP_NUM 5
#else
#define MSM_PMEM_KERNEL_EBI1_SIZE 0x110C000
#define MSM_ION_HEAP_NUM 2
@@ -755,6 +755,11 @@
.size = MSM_ION_ADSP_SIZE,
.memory_type = ION_EBI_TYPE,
},
+ {
+ .id = ION_HEAP_IOMMU_ID,
+ .type = ION_HEAP_TYPE_IOMMU,
+ .name = ION_IOMMU_HEAP_NAME,
+ },
#endif
}
};
diff --git a/arch/arm/mach-msm/board-msm8x60.c b/arch/arm/mach-msm/board-msm8x60.c
index d013d11..36d6aab 100644
--- a/arch/arm/mach-msm/board-msm8x60.c
+++ b/arch/arm/mach-msm/board-msm8x60.c
@@ -5785,7 +5785,7 @@
static struct pm8xxx_pwrkey_platform_data pm8058_pwrkey_pdata = {
.pull_up = 1,
- .kpd_trigger_delay_us = 970,
+ .kpd_trigger_delay_us = 15625,
.wakeup = 1,
};
diff --git a/arch/arm/mach-msm/devices-8960.c b/arch/arm/mach-msm/devices-8960.c
index 154c7d1..e814b4e 100644
--- a/arch/arm/mach-msm/devices-8960.c
+++ b/arch/arm/mach-msm/devices-8960.c
@@ -837,6 +837,7 @@
.xo_id = MSM_XO_PXO,
.name = "q6",
.pas_id = PAS_Q6,
+ .bus_port = MSM_BUS_MASTER_LPASS_PROC,
};
struct platform_device msm_8960_q6_lpass = {
@@ -875,6 +876,7 @@
.name = "modem_fw",
.depends = "q6",
.pas_id = PAS_MODEM_FW,
+ .bus_port = MSM_BUS_MASTER_MSS_FW_PROC,
};
struct platform_device msm_8960_q6_mss_fw = {
@@ -912,6 +914,7 @@
.name = "modem",
.depends = "modem_fw",
.pas_id = PAS_MODEM_SW,
+ .bus_port = MSM_BUS_MASTER_MSS_SW_PROC,
};
struct platform_device msm_8960_q6_mss_sw = {
diff --git a/arch/arm/mach-msm/include/mach/camera.h b/arch/arm/mach-msm/include/mach/camera.h
index 75291ba..1745f26 100644
--- a/arch/arm/mach-msm/include/mach/camera.h
+++ b/arch/arm/mach-msm/include/mach/camera.h
@@ -556,7 +556,6 @@
CAMIO_JPEG_PCLK,
CAMIO_VPE_CLK,
CAMIO_VPE_PCLK,
- CAMIO_VPE_AXI_CLK,
CAMIO_CSI0_PHY_CLK,
CAMIO_CSI1_PHY_CLK,
diff --git a/arch/arm/mach-msm/msm_rq_stats.c b/arch/arm/mach-msm/msm_rq_stats.c
index 9daaaba..da7fb51 100644
--- a/arch/arm/mach-msm/msm_rq_stats.c
+++ b/arch/arm/mach-msm/msm_rq_stats.c
@@ -198,6 +198,8 @@
static int __init msm_rq_stats_init(void)
{
+ int ret;
+
rq_wq = create_singlethread_workqueue("rq_stats");
BUG_ON(!rq_wq);
INIT_WORK(&rq_info.def_timer_work, def_work_fn);
@@ -206,7 +208,9 @@
rq_info.def_timer_jiffies = DEFAULT_DEF_TIMER_JIFFIES;
rq_info.rq_poll_last_jiffy = 0;
rq_info.def_timer_last_jiffy = 0;
+ ret = init_rq_attribs();
+
rq_info.init = 1;
- return init_rq_attribs();
+ return ret;
}
late_initcall(msm_rq_stats_init);
diff --git a/arch/arm/mach-msm/peripheral-reset-8960.c b/arch/arm/mach-msm/peripheral-reset-8960.c
index 54b103a..fa22e4e 100644
--- a/arch/arm/mach-msm/peripheral-reset-8960.c
+++ b/arch/arm/mach-msm/peripheral-reset-8960.c
@@ -100,49 +100,55 @@
bool xo;
/* Enable A2XB bridge */
- reg = readl(RIVA_PMU_A2XB_CFG);
+ reg = readl_relaxed(RIVA_PMU_A2XB_CFG);
reg |= RIVA_PMU_A2XB_CFG_EN;
- writel(reg, RIVA_PMU_A2XB_CFG);
+ writel_relaxed(reg, RIVA_PMU_A2XB_CFG);
/* Determine which XO to use */
- reg = readl(RIVA_PMU_CFG);
+ reg = readl_relaxed(RIVA_PMU_CFG);
xo = (reg & RIVA_PMU_CFG_IRIS_XO_MODE) == RIVA_PMU_CFG_IRIS_XO_MODE_48;
/* Program PLL 13 to 960 MHz */
- reg = readl(RIVA_PLL_MODE);
+ reg = readl_relaxed(RIVA_PLL_MODE);
reg &= ~(PLL_MODE_BYPASSNL | PLL_MODE_OUTCTRL | PLL_MODE_RESET_N);
- writel(reg, RIVA_PLL_MODE);
+ writel_relaxed(reg, RIVA_PLL_MODE);
if (xo)
- writel(0x40000C00 | 40, RIVA_PLL_L_VAL);
+ writel_relaxed(0x40000C00 | 40, RIVA_PLL_L_VAL);
else
- writel(0x40000C00 | 50, RIVA_PLL_L_VAL);
- writel(0, RIVA_PLL_M_VAL);
- writel(1, RIVA_PLL_N_VAL);
+ writel_relaxed(0x40000C00 | 50, RIVA_PLL_L_VAL);
+ writel_relaxed(0, RIVA_PLL_M_VAL);
+ writel_relaxed(1, RIVA_PLL_N_VAL);
writel_relaxed(0x01495227, RIVA_PLL_CONFIG);
- reg = readl(RIVA_PLL_MODE);
+ reg = readl_relaxed(RIVA_PLL_MODE);
reg &= ~(PLL_MODE_REF_XO_SEL);
reg |= xo ? PLL_MODE_REF_XO_SEL_RF : PLL_MODE_REF_XO_SEL_CXO;
- writel(reg, RIVA_PLL_MODE);
+ writel_relaxed(reg, RIVA_PLL_MODE);
/* Enable PLL 13 */
reg |= PLL_MODE_BYPASSNL;
- writel(reg, RIVA_PLL_MODE);
+ writel_relaxed(reg, RIVA_PLL_MODE);
+ /*
+ * H/W requires a 5us delay between disabling the bypass and
+ * de-asserting the reset. Delay 10us just to be safe.
+ */
+ mb();
usleep_range(10, 20);
reg |= PLL_MODE_RESET_N;
- writel(reg, RIVA_PLL_MODE);
+ writel_relaxed(reg, RIVA_PLL_MODE);
reg |= PLL_MODE_OUTCTRL;
- writel(reg, RIVA_PLL_MODE);
+ writel_relaxed(reg, RIVA_PLL_MODE);
/* Wait for PLL to settle */
+ mb();
usleep_range(50, 100);
/* Configure cCPU for 240 MHz */
- reg = readl(RIVA_PMU_CLK_ROOT3);
- if (readl(RIVA_PMU_ROOT_CLK_SEL) & RIVA_PMU_ROOT_CLK_SEL_3) {
+ reg = readl_relaxed(RIVA_PMU_CLK_ROOT3);
+ if (readl_relaxed(RIVA_PMU_ROOT_CLK_SEL) & RIVA_PMU_ROOT_CLK_SEL_3) {
reg &= ~(RIVA_PMU_CLK_ROOT3_SRC0_SEL |
RIVA_PMU_CLK_ROOT3_SRC0_DIV);
reg |= RIVA_PMU_CLK_ROOT3_SRC0_SEL_RIVA |
@@ -153,34 +159,34 @@
reg |= RIVA_PMU_CLK_ROOT3_SRC1_SEL_RIVA |
RIVA_PMU_CLK_ROOT3_SRC1_DIV_2;
}
- writel(reg, RIVA_PMU_CLK_ROOT3);
+ writel_relaxed(reg, RIVA_PMU_CLK_ROOT3);
reg |= RIVA_PMU_CLK_ROOT3_ENA;
- writel(reg, RIVA_PMU_CLK_ROOT3);
- reg = readl(RIVA_PMU_ROOT_CLK_SEL);
+ writel_relaxed(reg, RIVA_PMU_CLK_ROOT3);
+ reg = readl_relaxed(RIVA_PMU_ROOT_CLK_SEL);
reg ^= RIVA_PMU_ROOT_CLK_SEL_3;
- writel(reg, RIVA_PMU_ROOT_CLK_SEL);
+ writel_relaxed(reg, RIVA_PMU_ROOT_CLK_SEL);
/* Use the high vector table */
- reg = readl(RIVA_PMU_CCPU_CTL);
+ reg = readl_relaxed(RIVA_PMU_CCPU_CTL);
reg |= RIVA_PMU_CCPU_CTL_HIGH_IVT | RIVA_PMU_CCPU_CTL_REMAP_EN;
- writel(reg, RIVA_PMU_CCPU_CTL);
+ writel_relaxed(reg, RIVA_PMU_CCPU_CTL);
/* Set base memory address */
writel_relaxed(riva_start >> 16, RIVA_PMU_CCPU_BOOT_REMAP_ADDR);
/* Clear warmboot bit indicating this is a cold boot */
- reg = readl(RIVA_PMU_CFG);
+ reg = readl_relaxed(RIVA_PMU_CFG);
reg &= ~(RIVA_PMU_CFG_WARM_BOOT);
- writel(reg, RIVA_PMU_CFG);
+ writel_relaxed(reg, RIVA_PMU_CFG);
/* Enable the cCPU clock */
- reg = readl(RIVA_PMU_OVRD_VAL);
+ reg = readl_relaxed(RIVA_PMU_OVRD_VAL);
reg |= RIVA_PMU_OVRD_VAL_CCPU_CLK;
- writel(reg, RIVA_PMU_OVRD_VAL);
+ writel_relaxed(reg, RIVA_PMU_OVRD_VAL);
/* Take cCPU out of reset */
reg |= RIVA_PMU_OVRD_VAL_CCPU_RESET;
- writel(reg, RIVA_PMU_OVRD_VAL);
+ writel_relaxed(reg, RIVA_PMU_OVRD_VAL);
return 0;
}
@@ -189,9 +195,9 @@
{
u32 reg;
/* Put riva into reset */
- reg = readl(RIVA_PMU_OVRD_VAL);
+ reg = readl_relaxed(RIVA_PMU_OVRD_VAL);
reg &= ~(RIVA_PMU_OVRD_VAL_CCPU_RESET | RIVA_PMU_OVRD_VAL_CCPU_CLK);
- writel(reg, RIVA_PMU_OVRD_VAL);
+ writel_relaxed(reg, RIVA_PMU_OVRD_VAL);
return 0;
}
diff --git a/arch/arm/mach-msm/pil-q6v4.c b/arch/arm/mach-msm/pil-q6v4.c
index 6e85fbc..24c479c 100644
--- a/arch/arm/mach-msm/pil-q6v4.c
+++ b/arch/arm/mach-msm/pil-q6v4.c
@@ -20,6 +20,7 @@
#include <linux/delay.h>
#include <linux/err.h>
+#include <mach/msm_bus.h>
#include <mach/msm_iomap.h>
#include <mach/msm_xo.h>
@@ -194,6 +195,11 @@
if (drv->modem_base)
pil_q6v4_init_modem(drv->modem_base, pdata->jtag_clk_reg);
+ /* Unhalt bus port */
+ err = msm_bus_axi_portunhalt(pdata->bus_port);
+ if (err)
+ dev_err(pil->dev, "Failed to unhalt bus port\n");
+
/*
* Assert AXIS_ACLK_EN override to allow for correct updating of the
* QDSP6_CORE_STATE status bit. This is mandatory only for the SW Q6
@@ -269,6 +275,10 @@
{
u32 reg;
struct q6v4_data *drv = dev_get_drvdata(pil->dev);
+ const struct pil_q6v4_pdata *pdata = pil->dev->platform_data;
+
+ /* Make sure bus port is halted */
+ msm_bus_axi_porthalt(pdata->bus_port);
/* Turn off Q6 core clock */
writel_relaxed(Q6SS_SRC_SWITCH_CLK_OVR,
@@ -319,6 +329,11 @@
err = pil_q6v4_power_up(pil->dev);
if (err)
return err;
+
+ /* Unhalt bus port */
+ err = msm_bus_axi_portunhalt(pdata->bus_port);
+ if (err)
+ dev_err(pil->dev, "Failed to unhalt bus port\n");
return pas_auth_and_reset(pdata->pas_id);
}
@@ -328,6 +343,9 @@
struct q6v4_data *drv = dev_get_drvdata(pil->dev);
struct pil_q6v4_pdata *pdata = pil->dev->platform_data;
+ /* Make sure bus port is halted */
+ msm_bus_axi_porthalt(pdata->bus_port);
+
ret = pas_shutdown(pdata->pas_id);
if (ret)
return ret;
diff --git a/arch/arm/mach-msm/pil-q6v4.h b/arch/arm/mach-msm/pil-q6v4.h
index b4715f0..54bdf88 100644
--- a/arch/arm/mach-msm/pil-q6v4.h
+++ b/arch/arm/mach-msm/pil-q6v4.h
@@ -22,5 +22,6 @@
const char *name;
const char *depends;
const unsigned pas_id;
+ int bus_port;
};
#endif
diff --git a/arch/arm/mach-msm/qdsp6v2/lpa_if_hdmi.c b/arch/arm/mach-msm/qdsp6v2/lpa_if_hdmi.c
index 64344ef..c19fd85 100644
--- a/arch/arm/mach-msm/qdsp6v2/lpa_if_hdmi.c
+++ b/arch/arm/mach-msm/qdsp6v2/lpa_if_hdmi.c
@@ -404,6 +404,9 @@
}
core_req_bus_bandwith(AUDIO_IF_BUS_ID, 0, 0);
+ if (hdmi_msm_audio_get_sample_rate() != HDMI_SAMPLE_RATE_48KHZ)
+ hdmi_msm_audio_sample_rate_reset(HDMI_SAMPLE_RATE_48KHZ);
+
return 0;
}
diff --git a/arch/arm/mach-msm/smd.c b/arch/arm/mach-msm/smd.c
index e794801..691dad2 100644
--- a/arch/arm/mach-msm/smd.c
+++ b/arch/arm/mach-msm/smd.c
@@ -660,9 +660,11 @@
if (smsm_info.state) {
writel_relaxed(0, SMSM_STATE_ADDR(restart_pid));
- /* clear apps SMSM to restart SMSM init handshake */
- if (restart_pid == SMSM_MODEM)
- writel_relaxed(0, SMSM_STATE_ADDR(SMSM_APPS));
+ /* restart SMSM init handshake */
+ if (restart_pid == SMSM_MODEM) {
+ smsm_change_state(SMSM_APPS_STATE,
+ SMSM_INIT | SMSM_SMD_LOOPBACK, 0);
+ }
/* notify SMSM processors */
smsm_irq_handler(0, 0);
@@ -2541,8 +2543,10 @@
void *data);
static struct restart_notifier_block restart_notifiers[] = {
- {SMSM_MODEM, "modem", .nb.notifier_call = restart_notifier_cb},
- {SMSM_Q6, "lpass", .nb.notifier_call = restart_notifier_cb},
+ {SMD_MODEM, "modem", .nb.notifier_call = restart_notifier_cb},
+ {SMD_Q6, "lpass", .nb.notifier_call = restart_notifier_cb},
+ {SMD_WCNSS, "riva", .nb.notifier_call = restart_notifier_cb},
+ {SMD_DSPS, "dsps", .nb.notifier_call = restart_notifier_cb},
};
static int restart_notifier_cb(struct notifier_block *this,
diff --git a/arch/arm/mach-msm/vreg.c b/arch/arm/mach-msm/vreg.c
index 8f782a9..cffa5c7 100644
--- a/arch/arm/mach-msm/vreg.c
+++ b/arch/arm/mach-msm/vreg.c
@@ -18,7 +18,12 @@
#include <linux/kernel.h>
#include <linux/device.h>
#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/atomic.h>
#include <linux/debugfs.h>
+#include <linux/regulator/consumer.h>
#include <linux/string.h>
#include <mach/vreg.h>
@@ -33,173 +38,187 @@
#endif
struct vreg {
- const char *name;
- unsigned id;
- int status;
- unsigned refcnt;
+ struct list_head list;
+ struct mutex lock;
+ const char *name;
+ u64 refcnt;
+ unsigned mv;
+ struct regulator *reg;
};
-#define VREG(_name, _id, _status, _refcnt) \
- { .name = _name, .id = _id, .status = _status, .refcnt = _refcnt }
+static LIST_HEAD(vreg_list);
+static DEFINE_MUTEX(vreg_lock);
-static struct vreg vregs[] = {
- VREG("msma", 0, 0, 0),
- VREG("msmp", 1, 0, 0),
- VREG("msme1", 2, 0, 0),
- VREG("msmc1", 3, 0, 0),
- VREG("msmc2", 4, 0, 0),
- VREG("gp3", 5, 0, 0),
- VREG("msme2", 6, 0, 0),
- VREG("gp4", 7, 0, 0),
- VREG("gp1", 8, 0, 0),
- VREG("tcxo", 9, 0, 0),
- VREG("pa", 10, 0, 0),
- VREG("rftx", 11, 0, 0),
- VREG("rfrx1", 12, 0, 0),
- VREG("rfrx2", 13, 0, 0),
- VREG("synt", 14, 0, 0),
- VREG("wlan", 15, 0, 0),
- VREG("usb", 16, 0, 0),
- VREG("boost", 17, 0, 0),
- VREG("mmc", 18, 0, 0),
- VREG("ruim", 19, 0, 0),
- VREG("msmc0", 20, 0, 0),
- VREG("gp2", 21, 0, 0),
- VREG("gp5", 22, 0, 0),
- VREG("gp6", 23, 0, 0),
- VREG("rf", 24, 0, 0),
- VREG("rf_vco", 26, 0, 0),
- VREG("mpll", 27, 0, 0),
- VREG("s2", 28, 0, 0),
- VREG("s3", 29, 0, 0),
- VREG("rfubm", 30, 0, 0),
- VREG("ncp", 31, 0, 0),
- VREG("gp7", 32, 0, 0),
- VREG("gp8", 33, 0, 0),
- VREG("gp9", 34, 0, 0),
- VREG("gp10", 35, 0, 0),
- VREG("gp11", 36, 0, 0),
- VREG("gp12", 37, 0, 0),
- VREG("gp13", 38, 0, 0),
- VREG("gp14", 39, 0, 0),
- VREG("gp15", 40, 0, 0),
- VREG("gp16", 41, 0, 0),
- VREG("gp17", 42, 0, 0),
- VREG("s4", 43, 0, 0),
- VREG("usb2", 44, 0, 0),
- VREG("wlan2", 45, 0, 0),
- VREG("xo_out", 46, 0, 0),
- VREG("lvsw0", 47, 0, 0),
- VREG("lvsw1", 48, 0, 0),
- VREG("mddi", 49, 0, 0),
- VREG("pllx", 50, 0, 0),
- VREG("wlan3", 51, 0, 0),
- VREG("emmc", 52, 0, 0),
- VREG("wlan_tcx0", 53, 0, 0),
- VREG("usim2", 54, 0, 0),
- VREG("usim", 55, 0, 0),
- VREG("bt", 56, 0, 0),
- VREG("wlan4", 57, 0, 0),
-};
+#ifdef CONFIG_DEBUG_FS
+static void vreg_add_debugfs(struct vreg *vreg);
+#else
+static inline void vreg_add_debugfs(struct vreg *vreg) { }
+#endif
+
+static struct vreg *vreg_create(const char *id)
+{
+ int rc;
+ struct vreg *vreg;
+
+ vreg = kzalloc(sizeof(*vreg), GFP_KERNEL);
+ if (!vreg) {
+ rc = -ENOMEM;
+ goto error;
+ }
+
+ INIT_LIST_HEAD(&vreg->list);
+ mutex_init(&vreg->lock);
+
+ vreg->reg = regulator_get(NULL, id);
+ if (IS_ERR(vreg->reg)) {
+ rc = PTR_ERR(vreg->reg);
+ goto free_vreg;
+ }
+
+ vreg->name = kstrdup(id, GFP_KERNEL);
+ if (!vreg->name) {
+ rc = -ENOMEM;
+ goto put_reg;
+ }
+
+ list_add_tail(&vreg->list, &vreg_list);
+ vreg_add_debugfs(vreg);
+
+ return vreg;
+
+put_reg:
+ regulator_put(vreg->reg);
+free_vreg:
+ kfree(vreg);
+error:
+ return ERR_PTR(rc);
+}
+
+static void vreg_destroy(struct vreg *vreg)
+{
+ if (!vreg)
+ return;
+
+ if (vreg->refcnt)
+ regulator_disable(vreg->reg);
+
+ kfree(vreg->name);
+ regulator_put(vreg->reg);
+ kfree(vreg);
+}
struct vreg *vreg_get(struct device *dev, const char *id)
{
- int n;
- for (n = 0; n < ARRAY_SIZE(vregs); n++) {
- if (!strcmp(vregs[n].name, id))
- return vregs + n;
+ struct vreg *vreg = NULL;
+
+ if (!id)
+ return ERR_PTR(-EINVAL);
+
+ mutex_lock(&vreg_lock);
+ list_for_each_entry(vreg, &vreg_list, list) {
+ if (!strncmp(vreg->name, id, 10))
+ goto ret;
}
- return ERR_PTR(-ENOENT);
+
+ vreg = vreg_create(id);
+
+ret:
+ mutex_unlock(&vreg_lock);
+ return vreg;
}
EXPORT_SYMBOL(vreg_get);
void vreg_put(struct vreg *vreg)
{
+ kfree(vreg->name);
+ regulator_put(vreg->reg);
}
int vreg_enable(struct vreg *vreg)
{
- unsigned id = vreg->id;
- int enable = VREG_SWITCH_ENABLE;
+ int rc = 0;
+ if (!vreg)
+ return -ENODEV;
- if (vreg->refcnt == 0)
- vreg->status = msm_proc_comm(PCOM_VREG_SWITCH, &id, &enable);
+ mutex_lock(&vreg->lock);
+ if (vreg->refcnt == 0) {
+ rc = regulator_enable(vreg->reg);
+ if (!rc)
+ vreg->refcnt++;
+ } else {
+ rc = 0;
+ if (vreg->refcnt < UINT_MAX)
+ vreg->refcnt++;
+ }
+ mutex_unlock(&vreg->lock);
- if ((vreg->refcnt < UINT_MAX) && (!vreg->status))
- vreg->refcnt++;
-
- return vreg->status;
+ return rc;
}
EXPORT_SYMBOL(vreg_enable);
int vreg_disable(struct vreg *vreg)
{
- unsigned id = vreg->id;
- int disable = VREG_SWITCH_DISABLE;
+ int rc = 0;
+ if (!vreg)
+ return -ENODEV;
- if (!vreg->refcnt)
- return 0;
-
- if (vreg->refcnt == 1)
- vreg->status = msm_proc_comm(PCOM_VREG_SWITCH, &id, &disable);
-
- if (!vreg->status)
+ mutex_lock(&vreg->lock);
+ if (vreg->refcnt == 0) {
+ pr_warn("%s: unbalanced disables for vreg %s\n",
+ __func__, vreg->name);
+ rc = -EINVAL;
+ } else if (vreg->refcnt == 1) {
+ rc = regulator_disable(vreg->reg);
+ if (!rc)
+ vreg->refcnt--;
+ } else {
+ rc = 0;
vreg->refcnt--;
+ }
+ mutex_unlock(&vreg->lock);
- return vreg->status;
+ return rc;
}
EXPORT_SYMBOL(vreg_disable);
int vreg_set_level(struct vreg *vreg, unsigned mv)
{
- unsigned id = vreg->id;
+ unsigned uv;
+ int rc;
- vreg->status = msm_proc_comm(PCOM_VREG_SET_LEVEL, &id, &mv);
- return vreg->status;
+ if (!vreg)
+ return -EINVAL;
+
+ if (mv > (UINT_MAX / 1000))
+ return -ERANGE;
+
+ uv = mv * 1000;
+
+ mutex_lock(&vreg->lock);
+ rc = regulator_set_voltage(vreg->reg, uv, uv);
+ if (!rc)
+ vreg->mv = mv;
+ mutex_unlock(&vreg->lock);
+
+ return rc;
}
EXPORT_SYMBOL(vreg_set_level);
#if defined(CONFIG_DEBUG_FS)
-static int vreg_debug_set(void *data, u64 val)
-{
- struct vreg *vreg = data;
- switch (val) {
- case 0:
- vreg_disable(vreg);
- break;
- case 1:
- vreg_enable(vreg);
- break;
- default:
- vreg_set_level(vreg, val);
- break;
- }
- return 0;
-}
-
-static int vreg_debug_get(void *data, u64 *val)
+static int vreg_debug_enabled_set(void *data, u64 val)
{
struct vreg *vreg = data;
- if (!vreg->status)
- *val = 0;
+ if (val == 0)
+ return vreg_disable(vreg);
+ else if (val == 1)
+ return vreg_enable(vreg);
else
- *val = 1;
-
- return 0;
+ return -EINVAL;
}
-static int vreg_debug_count_set(void *data, u64 val)
-{
- struct vreg *vreg = data;
- if (val > UINT_MAX)
- val = UINT_MAX;
- vreg->refcnt = val;
- return 0;
-}
-
-static int vreg_debug_count_get(void *data, u64 *val)
+static int vreg_debug_enabled_get(void *data, u64 *val)
{
struct vreg *vreg = data;
@@ -208,33 +227,97 @@
return 0;
}
-DEFINE_SIMPLE_ATTRIBUTE(vreg_fops, vreg_debug_get, vreg_debug_set, "%llu\n");
-DEFINE_SIMPLE_ATTRIBUTE(vreg_count_fops, vreg_debug_count_get,
- vreg_debug_count_set, "%llu\n");
-
-static int __init vreg_debug_init(void)
+static int vreg_debug_voltage_set(void *data, u64 val)
{
- struct dentry *dent;
- int n;
- char name[32];
- const char *refcnt_name = "_refcnt";
+ struct vreg *vreg = data;
+ return vreg_set_level(vreg, val);
+}
- dent = debugfs_create_dir("vreg", 0);
- if (IS_ERR(dent))
- return 0;
+static int vreg_debug_voltage_get(void *data, u64 *val)
+{
+ struct vreg *vreg = data;
+ *val = vreg->mv;
+ return 0;
+}
- for (n = 0; n < ARRAY_SIZE(vregs); n++) {
- (void) debugfs_create_file(vregs[n].name, 0644,
- dent, vregs + n, &vreg_fops);
+DEFINE_SIMPLE_ATTRIBUTE(vreg_debug_enabled, vreg_debug_enabled_get,
+ vreg_debug_enabled_set, "%llu");
+DEFINE_SIMPLE_ATTRIBUTE(vreg_debug_voltage, vreg_debug_voltage_get,
+ vreg_debug_voltage_set, "%llu");
- strlcpy(name, vregs[n].name, sizeof(name));
- strlcat(name, refcnt_name, sizeof(name));
- (void) debugfs_create_file(name, 0644,
- dent, vregs + n, &vreg_count_fops);
+static struct dentry *root;
+
+static void vreg_add_debugfs(struct vreg *vreg)
+{
+ struct dentry *dir;
+
+ if (!root)
+ return;
+
+ dir = debugfs_create_dir(vreg->name, root);
+
+ if (IS_ERR_OR_NULL(dir))
+ goto err;
+
+ if (IS_ERR_OR_NULL(debugfs_create_file("enabled", 0644, dir, vreg,
+ &vreg_debug_enabled)))
+ goto destroy;
+
+ if (IS_ERR_OR_NULL(debugfs_create_file("voltage", 0644, dir, vreg,
+ &vreg_debug_voltage)))
+ goto destroy;
+
+ return;
+
+destroy:
+ debugfs_remove_recursive(dir);
+err:
+ pr_warn("%s: could not create debugfs for vreg %s\n",
+ __func__, vreg->name);
+}
+
+static int __devinit vreg_debug_init(void)
+{
+ root = debugfs_create_dir("vreg", NULL);
+
+ if (IS_ERR_OR_NULL(root)) {
+ pr_debug("%s: error initializing debugfs: %ld - "
+ "disabling debugfs\n",
+ __func__, root ? PTR_ERR(root) : 0);
+ root = NULL;
}
return 0;
}
-
-device_initcall(vreg_debug_init);
+static void __devexit vreg_debug_exit(void)
+{
+ if (root)
+ debugfs_remove_recursive(root);
+ root = NULL;
+}
+#else
+static inline int __init vreg_debug_init(void) { return 0; }
+static inline void __exit vreg_debug_exit(void) { return 0; }
#endif
+
+static int __init vreg_init(void)
+{
+ return vreg_debug_init();
+}
+module_init(vreg_init);
+
+static void __exit vreg_exit(void)
+{
+ struct vreg *vreg, *next;
+ vreg_debug_exit();
+
+ mutex_lock(&vreg_lock);
+ list_for_each_entry_safe(vreg, next, &vreg_list, list)
+ vreg_destroy(vreg);
+ mutex_unlock(&vreg_lock);
+}
+module_exit(vreg_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("vreg.c regulator shim");
+MODULE_VERSION("1.0");
diff --git a/arch/arm/mach-shark/leds.c b/arch/arm/mach-shark/leds.c
index c9e32de..ccd4918 100644
--- a/arch/arm/mach-shark/leds.c
+++ b/arch/arm/mach-shark/leds.c
@@ -36,7 +36,7 @@
static short hw_led_state;
static short saved_state;
-static DEFINE_SPINLOCK(leds_lock);
+static DEFINE_RAW_SPINLOCK(leds_lock);
short sequoia_read(int addr) {
outw(addr,0x24);
@@ -52,7 +52,7 @@
{
unsigned long flags;
- spin_lock_irqsave(&leds_lock, flags);
+ raw_spin_lock_irqsave(&leds_lock, flags);
hw_led_state = sequoia_read(0x09);
@@ -144,7 +144,7 @@
if (led_state & LED_STATE_ENABLED)
sequoia_write(hw_led_state,0x09);
- spin_unlock_irqrestore(&leds_lock, flags);
+ raw_spin_unlock_irqrestore(&leds_lock, flags);
}
static int __init leds_init(void)
diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
index 7aacb84..bc5ffce 100644
--- a/arch/arm/mm/cache-l2x0.c
+++ b/arch/arm/mm/cache-l2x0.c
@@ -29,7 +29,8 @@
static void __iomem *l2x0_base;
static uint32_t aux_ctrl_save;
static uint32_t data_latency_ctrl;
-static DEFINE_SPINLOCK(l2x0_lock);
+static DEFINE_RAW_SPINLOCK(l2x0_lock);
+
static uint32_t l2x0_way_mask; /* Bitmask of active ways */
static uint32_t l2x0_size;
static u32 l2x0_cache_id;
@@ -126,7 +127,11 @@
void l2x0_cache_sync(void)
{
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&l2x0_lock, flags);
cache_sync();
+ raw_spin_unlock_irqrestore(&l2x0_lock, flags);
}
#ifdef CONFIG_PL310_ERRATA_727915
@@ -167,9 +172,9 @@
#endif
/* clean all ways */
- spin_lock_irqsave(&l2x0_lock, flags);
+ raw_spin_lock_irqsave(&l2x0_lock, flags);
__l2x0_flush_all();
- spin_unlock_irqrestore(&l2x0_lock, flags);
+ raw_spin_unlock_irqrestore(&l2x0_lock, flags);
}
static void l2x0_clean_all(void)
@@ -184,13 +189,13 @@
#endif
/* clean all ways */
- spin_lock_irqsave(&l2x0_lock, flags);
+ raw_spin_lock_irqsave(&l2x0_lock, flags);
debug_writel(0x03);
writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_CLEAN_WAY);
cache_wait_way(l2x0_base + L2X0_CLEAN_WAY, l2x0_way_mask);
cache_sync();
debug_writel(0x00);
- spin_unlock_irqrestore(&l2x0_lock, flags);
+ raw_spin_unlock_irqrestore(&l2x0_lock, flags);
}
static void l2x0_inv_all(void)
@@ -198,13 +203,13 @@
unsigned long flags;
/* invalidate all ways */
- spin_lock_irqsave(&l2x0_lock, flags);
+ raw_spin_lock_irqsave(&l2x0_lock, flags);
/* Invalidating when L2 is enabled is a nono */
BUG_ON(readl(l2x0_base + L2X0_CTRL) & 1);
writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_INV_WAY);
cache_wait_way(l2x0_base + L2X0_INV_WAY, l2x0_way_mask);
cache_sync();
- spin_unlock_irqrestore(&l2x0_lock, flags);
+ raw_spin_unlock_irqrestore(&l2x0_lock, flags);
}
static void l2x0_inv_range(unsigned long start, unsigned long end)
@@ -212,7 +217,7 @@
void __iomem *base = l2x0_base;
unsigned long flags;
- spin_lock_irqsave(&l2x0_lock, flags);
+ raw_spin_lock_irqsave(&l2x0_lock, flags);
if (start & (CACHE_LINE_SIZE - 1)) {
start &= ~(CACHE_LINE_SIZE - 1);
debug_writel(0x03);
@@ -237,13 +242,13 @@
}
if (blk_end < end) {
- spin_unlock_irqrestore(&l2x0_lock, flags);
- spin_lock_irqsave(&l2x0_lock, flags);
+ raw_spin_unlock_irqrestore(&l2x0_lock, flags);
+ raw_spin_lock_irqsave(&l2x0_lock, flags);
}
}
cache_wait(base + L2X0_INV_LINE_PA, 1);
cache_sync();
- spin_unlock_irqrestore(&l2x0_lock, flags);
+ raw_spin_unlock_irqrestore(&l2x0_lock, flags);
}
static void l2x0_inv_range_atomic(unsigned long start, unsigned long end)
@@ -277,7 +282,7 @@
return;
}
- spin_lock_irqsave(&l2x0_lock, flags);
+ raw_spin_lock_irqsave(&l2x0_lock, flags);
start &= ~(CACHE_LINE_SIZE - 1);
while (start < end) {
unsigned long blk_end = start + min(end - start, 4096UL);
@@ -288,13 +293,13 @@
}
if (blk_end < end) {
- spin_unlock_irqrestore(&l2x0_lock, flags);
- spin_lock_irqsave(&l2x0_lock, flags);
+ raw_spin_unlock_irqrestore(&l2x0_lock, flags);
+ raw_spin_lock_irqsave(&l2x0_lock, flags);
}
}
cache_wait(base + L2X0_CLEAN_LINE_PA, 1);
cache_sync();
- spin_unlock_irqrestore(&l2x0_lock, flags);
+ raw_spin_unlock_irqrestore(&l2x0_lock, flags);
}
static void l2x0_clean_range_atomic(unsigned long start, unsigned long end)
@@ -318,7 +323,7 @@
return;
}
- spin_lock_irqsave(&l2x0_lock, flags);
+ raw_spin_lock_irqsave(&l2x0_lock, flags);
start &= ~(CACHE_LINE_SIZE - 1);
while (start < end) {
unsigned long blk_end = start + min(end - start, 4096UL);
@@ -331,13 +336,13 @@
debug_writel(0x00);
if (blk_end < end) {
- spin_unlock_irqrestore(&l2x0_lock, flags);
- spin_lock_irqsave(&l2x0_lock, flags);
+ raw_spin_unlock_irqrestore(&l2x0_lock, flags);
+ raw_spin_lock_irqsave(&l2x0_lock, flags);
}
}
cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1);
cache_sync();
- spin_unlock_irqrestore(&l2x0_lock, flags);
+ raw_spin_unlock_irqrestore(&l2x0_lock, flags);
}
void l2x0_flush_range_atomic(unsigned long start, unsigned long end)
@@ -355,11 +360,11 @@
{
unsigned long flags;
- spin_lock_irqsave(&l2x0_lock, flags);
+ raw_spin_lock_irqsave(&l2x0_lock, flags);
__l2x0_flush_all();
writel_relaxed(0, l2x0_base + L2X0_CTRL);
dsb();
- spin_unlock_irqrestore(&l2x0_lock, flags);
+ raw_spin_unlock_irqrestore(&l2x0_lock, flags);
}
void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask)
diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c
index b0ee9ba..93aac06 100644
--- a/arch/arm/mm/context.c
+++ b/arch/arm/mm/context.c
@@ -16,7 +16,7 @@
#include <asm/mmu_context.h>
#include <asm/tlbflush.h>
-static DEFINE_SPINLOCK(cpu_asid_lock);
+static DEFINE_RAW_SPINLOCK(cpu_asid_lock);
unsigned int cpu_last_asid = ASID_FIRST_VERSION;
#ifdef CONFIG_SMP
DEFINE_PER_CPU(struct mm_struct *, current_mm);
@@ -31,7 +31,7 @@
void __init_new_context(struct task_struct *tsk, struct mm_struct *mm)
{
mm->context.id = 0;
- spin_lock_init(&mm->context.id_lock);
+ raw_spin_lock_init(&mm->context.id_lock);
}
static void flush_context(void)
@@ -58,7 +58,7 @@
* the broadcast. This function is also called via IPI so the
* mm->context.id_lock has to be IRQ-safe.
*/
- spin_lock_irqsave(&mm->context.id_lock, flags);
+ raw_spin_lock_irqsave(&mm->context.id_lock, flags);
if (likely((mm->context.id ^ cpu_last_asid) >> ASID_BITS)) {
/*
* Old version of ASID found. Set the new one and
@@ -67,7 +67,7 @@
mm->context.id = asid;
cpumask_clear(mm_cpumask(mm));
}
- spin_unlock_irqrestore(&mm->context.id_lock, flags);
+ raw_spin_unlock_irqrestore(&mm->context.id_lock, flags);
/*
* Set the mm_cpumask(mm) bit for the current CPU.
@@ -117,7 +117,7 @@
{
unsigned int asid;
- spin_lock(&cpu_asid_lock);
+ raw_spin_lock(&cpu_asid_lock);
#ifdef CONFIG_SMP
/*
* Check the ASID again, in case the change was broadcast from
@@ -125,7 +125,7 @@
*/
if (unlikely(((mm->context.id ^ cpu_last_asid) >> ASID_BITS) == 0)) {
cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
- spin_unlock(&cpu_asid_lock);
+ raw_spin_unlock(&cpu_asid_lock);
return;
}
#endif
@@ -153,5 +153,5 @@
}
set_mm_context(mm, asid);
- spin_unlock(&cpu_asid_lock);
+ raw_spin_unlock(&cpu_asid_lock);
}
diff --git a/arch/arm/mm/copypage-v4mc.c b/arch/arm/mm/copypage-v4mc.c
index b806151..7d0a8c2 100644
--- a/arch/arm/mm/copypage-v4mc.c
+++ b/arch/arm/mm/copypage-v4mc.c
@@ -30,7 +30,7 @@
#define minicache_pgprot __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | \
L_PTE_MT_MINICACHE)
-static DEFINE_SPINLOCK(minicache_lock);
+static DEFINE_RAW_SPINLOCK(minicache_lock);
/*
* ARMv4 mini-dcache optimised copy_user_highpage
@@ -76,14 +76,14 @@
if (!test_and_set_bit(PG_dcache_clean, &from->flags))
__flush_dcache_page(page_mapping(from), from);
- spin_lock(&minicache_lock);
+ raw_spin_lock(&minicache_lock);
set_pte_ext(TOP_PTE(0xffff8000), pfn_pte(page_to_pfn(from), minicache_pgprot), 0);
flush_tlb_kernel_page(0xffff8000);
mc_copy_user_page((void *)0xffff8000, kto);
- spin_unlock(&minicache_lock);
+ raw_spin_unlock(&minicache_lock);
kunmap_atomic(kto, KM_USER1);
}
diff --git a/arch/arm/mm/copypage-v6.c b/arch/arm/mm/copypage-v6.c
index bdba6c6..b2a8f9c 100644
--- a/arch/arm/mm/copypage-v6.c
+++ b/arch/arm/mm/copypage-v6.c
@@ -27,7 +27,7 @@
#define from_address (0xffff8000)
#define to_address (0xffffc000)
-static DEFINE_SPINLOCK(v6_lock);
+static DEFINE_RAW_SPINLOCK(v6_lock);
/*
* Copy the user page. No aliasing to deal with so we can just
@@ -89,7 +89,7 @@
* Now copy the page using the same cache colour as the
* pages ultimate destination.
*/
- spin_lock(&v6_lock);
+ raw_spin_lock(&v6_lock);
set_pte_ext(TOP_PTE(from_address) + offset, pfn_pte(page_to_pfn(from), PAGE_KERNEL), 0);
set_pte_ext(TOP_PTE(to_address) + offset, pfn_pte(page_to_pfn(to), PAGE_KERNEL), 0);
@@ -102,7 +102,7 @@
copy_page((void *)kto, (void *)kfrom);
- spin_unlock(&v6_lock);
+ raw_spin_unlock(&v6_lock);
}
/*
@@ -122,13 +122,13 @@
* Now clear the page using the same cache colour as
* the pages ultimate destination.
*/
- spin_lock(&v6_lock);
+ raw_spin_lock(&v6_lock);
set_pte_ext(TOP_PTE(to_address) + offset, pfn_pte(page_to_pfn(page), PAGE_KERNEL), 0);
flush_tlb_kernel_page(to);
clear_page((void *)to);
- spin_unlock(&v6_lock);
+ raw_spin_unlock(&v6_lock);
}
struct cpu_user_fns v6_user_fns __initdata = {
diff --git a/arch/arm/mm/copypage-xscale.c b/arch/arm/mm/copypage-xscale.c
index 649bbcd..610c24c 100644
--- a/arch/arm/mm/copypage-xscale.c
+++ b/arch/arm/mm/copypage-xscale.c
@@ -32,7 +32,7 @@
#define minicache_pgprot __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | \
L_PTE_MT_MINICACHE)
-static DEFINE_SPINLOCK(minicache_lock);
+static DEFINE_RAW_SPINLOCK(minicache_lock);
/*
* XScale mini-dcache optimised copy_user_highpage
@@ -98,14 +98,14 @@
if (!test_and_set_bit(PG_dcache_clean, &from->flags))
__flush_dcache_page(page_mapping(from), from);
- spin_lock(&minicache_lock);
+ raw_spin_lock(&minicache_lock);
set_pte_ext(TOP_PTE(COPYPAGE_MINICACHE), pfn_pte(page_to_pfn(from), minicache_pgprot), 0);
flush_tlb_kernel_page(COPYPAGE_MINICACHE);
mc_copy_user_page((void *)COPYPAGE_MINICACHE, kto);
- spin_unlock(&minicache_lock);
+ raw_spin_unlock(&minicache_lock);
kunmap_atomic(kto, KM_USER1);
}
diff --git a/drivers/base/genlock.c b/drivers/base/genlock.c
index 40b9575..41bbd3a 100644
--- a/drivers/base/genlock.c
+++ b/drivers/base/genlock.c
@@ -37,6 +37,7 @@
wait_queue_head_t queue; /* Holding pen for processes pending lock */
struct file *file; /* File structure for exported lock */
int state; /* Current state of the lock */
+ struct kref refcount;
};
struct genlock_handle {
@@ -47,6 +48,14 @@
taken */
};
+static void genlock_destroy(struct kref *kref)
+{
+ struct genlock *lock = container_of(kref, struct genlock,
+ refcount);
+
+ kfree(lock);
+}
+
/*
* Release the genlock object. Called when all the references to
* the genlock file descriptor are released
@@ -54,7 +63,6 @@
static int genlock_release(struct inode *inodep, struct file *file)
{
- kfree(file->private_data);
return 0;
}
@@ -96,6 +104,7 @@
/* Attach the new lock to the handle */
handle->lock = lock;
+ kref_init(&lock->refcount);
return lock;
}
@@ -131,6 +140,7 @@
struct genlock *genlock_attach_lock(struct genlock_handle *handle, int fd)
{
struct file *file;
+ struct genlock *lock;
if (handle->lock != NULL)
return ERR_PTR(-EINVAL);
@@ -139,9 +149,17 @@
if (file == NULL)
return ERR_PTR(-EBADF);
- handle->lock = file->private_data;
+ lock = file->private_data;
- return handle->lock;
+ fput(file);
+
+ if (lock == NULL)
+ return ERR_PTR(-EINVAL);
+
+ handle->lock = lock;
+ kref_get(&lock->refcount);
+
+ return lock;
}
EXPORT_SYMBOL(genlock_attach_lock);
@@ -418,7 +436,7 @@
}
spin_unlock_irqrestore(&handle->lock->lock, flags);
- fput(handle->lock->file);
+ kref_put(&handle->lock->refcount, genlock_destroy);
handle->lock = NULL;
handle->active = 0;
}
@@ -575,7 +593,8 @@
{
struct genlock_handle *handle = file->private_data;
- genlock_put_handle(handle);
+ genlock_release_lock(handle);
+ kfree(handle);
return 0;
}
diff --git a/drivers/gpu/ion/Makefile b/drivers/gpu/ion/Makefile
index c0a47d8..3911950 100644
--- a/drivers/gpu/ion/Makefile
+++ b/drivers/gpu/ion/Makefile
@@ -1,3 +1,3 @@
-obj-$(CONFIG_ION) += ion.o ion_heap.o ion_system_heap.o ion_carveout_heap.o
+obj-$(CONFIG_ION) += ion.o ion_heap.o ion_system_heap.o ion_carveout_heap.o ion_iommu_heap.o
obj-$(CONFIG_ION_TEGRA) += tegra/
obj-$(CONFIG_ION_MSM) += msm/
diff --git a/drivers/gpu/ion/ion.c b/drivers/gpu/ion/ion.c
index 48dc9c0..e6a1b86 100644
--- a/drivers/gpu/ion/ion.c
+++ b/drivers/gpu/ion/ion.c
@@ -30,6 +30,7 @@
#include <linux/uaccess.h>
#include <linux/debugfs.h>
+#include <mach/iommu_domains.h>
#include "ion_priv.h"
#define DEBUG
@@ -102,8 +103,27 @@
unsigned int kmap_cnt;
unsigned int dmap_cnt;
unsigned int usermap_cnt;
+ unsigned int iommu_map_cnt;
};
+static int ion_validate_buffer_flags(struct ion_buffer *buffer,
+ unsigned long flags)
+{
+ if (buffer->kmap_cnt || buffer->dmap_cnt || buffer->umap_cnt ||
+ buffer->iommu_map_cnt) {
+ if (buffer->flags != flags) {
+ pr_err("%s: buffer was already mapped with flags %lx,"
+ " cannot map with flags %lx\n", __func__,
+ buffer->flags, flags);
+ return 1;
+ }
+
+ } else {
+ buffer->flags = flags;
+ }
+ return 0;
+}
+
/* this function should only be called while dev->lock is held */
static void ion_buffer_add(struct ion_device *dev,
struct ion_buffer *buffer)
@@ -130,6 +150,61 @@
rb_insert_color(&buffer->node, &dev->buffers);
}
+void ion_iommu_add(struct ion_buffer *buffer,
+ struct ion_iommu_map *iommu)
+{
+ struct rb_node **p = &buffer->iommu_maps.rb_node;
+ struct rb_node *parent = NULL;
+ struct ion_iommu_map *entry;
+
+ while (*p) {
+ parent = *p;
+ entry = rb_entry(parent, struct ion_iommu_map, node);
+
+ if (iommu->key < entry->key) {
+ p = &(*p)->rb_left;
+ } else if (iommu->key > entry->key) {
+ p = &(*p)->rb_right;
+ } else {
+ pr_err("%s: buffer %p already has mapping for domain %d"
+ " and partition %d\n", __func__,
+ buffer,
+ iommu_map_domain(iommu),
+ iommu_map_partition(iommu));
+ BUG();
+ }
+ }
+
+ rb_link_node(&iommu->node, parent, p);
+ rb_insert_color(&iommu->node, &buffer->iommu_maps);
+
+}
+
+static struct ion_iommu_map *ion_iommu_lookup(struct ion_buffer *buffer,
+ unsigned int domain_no,
+ unsigned int partition_no)
+{
+ struct rb_node **p = &buffer->iommu_maps.rb_node;
+ struct rb_node *parent = NULL;
+ struct ion_iommu_map *entry;
+ uint64_t key = domain_no;
+ key = key << 32 | partition_no;
+
+ while (*p) {
+ parent = *p;
+ entry = rb_entry(parent, struct ion_iommu_map, node);
+
+ if (key < entry->key)
+ p = &(*p)->rb_left;
+ else if (key > entry->key)
+ p = &(*p)->rb_right;
+ else
+ return entry;
+ }
+
+ return NULL;
+}
+
/* this function should only be called while dev->lock is held */
static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
struct ion_device *dev,
@@ -433,17 +508,9 @@
return ERR_PTR(-ENODEV);
}
- if (buffer->kmap_cnt || buffer->dmap_cnt || buffer->umap_cnt) {
- if (buffer->flags != flags) {
- pr_err("%s: buffer was already mapped with flags %lx,"
- " cannot map with flags %lx\n", __func__,
- buffer->flags, flags);
+ if (ion_validate_buffer_flags(buffer, flags)) {
vaddr = ERR_PTR(-EEXIST);
goto out;
- }
-
- } else {
- buffer->flags = flags;
}
if (_ion_map(&buffer->kmap_cnt, &handle->kmap_cnt)) {
@@ -462,6 +529,179 @@
return vaddr;
}
+int __ion_iommu_map(struct ion_buffer *buffer,
+ int domain_num, int partition_num, unsigned long align,
+ unsigned long iova_length, unsigned long flags,
+ unsigned long *iova)
+{
+ struct ion_iommu_map *data;
+ int ret;
+
+ data = kmalloc(sizeof(*data), GFP_ATOMIC);
+
+ if (!data)
+ return -ENOMEM;
+
+ data->buffer = buffer;
+ iommu_map_domain(data) = domain_num;
+ iommu_map_partition(data) = partition_num;
+
+ ret = buffer->heap->ops->map_iommu(buffer, data,
+ domain_num,
+ partition_num,
+ align,
+ iova_length,
+ flags);
+
+ if (ret)
+ goto out;
+
+ kref_init(&data->ref);
+ *iova = data->iova_addr;
+
+ ion_iommu_add(buffer, data);
+
+ return 0;
+
+out:
+ msm_free_iova_address(data->iova_addr, domain_num, partition_num,
+ buffer->size);
+ kfree(data);
+ return ret;
+}
+
+int ion_map_iommu(struct ion_client *client, struct ion_handle *handle,
+ int domain_num, int partition_num, unsigned long align,
+ unsigned long iova_length, unsigned long *iova,
+ unsigned long *buffer_size,
+ unsigned long flags)
+{
+ struct ion_buffer *buffer;
+ struct ion_iommu_map *iommu_map;
+ int ret = 0;
+
+ mutex_lock(&client->lock);
+ if (!ion_handle_validate(client, handle)) {
+ pr_err("%s: invalid handle passed to map_kernel.\n",
+ __func__);
+ mutex_unlock(&client->lock);
+ return -EINVAL;
+ }
+
+ buffer = handle->buffer;
+ mutex_lock(&buffer->lock);
+
+ if (!handle->buffer->heap->ops->map_iommu) {
+ pr_err("%s: map_iommu is not implemented by this heap.\n",
+ __func__);
+ ret = -ENODEV;
+ goto out;
+ }
+
+ if (ion_validate_buffer_flags(buffer, flags)) {
+ ret = -EEXIST;
+ goto out;
+ }
+
+ /*
+ * If clients don't want a custom iova length, just use whatever
+ * the buffer size is
+ */
+ if (!iova_length)
+ iova_length = buffer->size;
+
+ if (buffer->size > iova_length) {
+ pr_debug("%s: iova length %lx is not at least buffer size"
+ " %x\n", __func__, iova_length, buffer->size);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (buffer->size & ~PAGE_MASK) {
+ pr_debug("%s: buffer size %x is not aligned to %lx", __func__,
+ buffer->size, PAGE_SIZE);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (iova_length & ~PAGE_MASK) {
+ pr_debug("%s: iova_length %lx is not aligned to %lx", __func__,
+ iova_length, PAGE_SIZE);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ iommu_map = ion_iommu_lookup(buffer, domain_num, partition_num);
+ if (_ion_map(&buffer->iommu_map_cnt, &handle->iommu_map_cnt) ||
+ !iommu_map) {
+ ret = __ion_iommu_map(buffer, domain_num, partition_num, align,
+ iova_length, flags, iova);
+ if (ret < 0)
+ _ion_unmap(&buffer->iommu_map_cnt,
+ &handle->iommu_map_cnt);
+ } else {
+ if (iommu_map->mapped_size != iova_length) {
+ pr_err("%s: handle %p is already mapped with length"
+ " %x, trying to map with length %lx\n",
+ __func__, handle, iommu_map->mapped_size,
+ iova_length);
+ _ion_unmap(&buffer->iommu_map_cnt,
+ &handle->iommu_map_cnt);
+ ret = -EINVAL;
+ } else {
+ kref_get(&iommu_map->ref);
+ *iova = iommu_map->iova_addr;
+ }
+ }
+ *buffer_size = buffer->size;
+out:
+ mutex_unlock(&buffer->lock);
+ mutex_unlock(&client->lock);
+ return ret;
+}
+EXPORT_SYMBOL(ion_map_iommu);
+
+static void ion_iommu_release(struct kref *kref)
+{
+ struct ion_iommu_map *map = container_of(kref, struct ion_iommu_map,
+ ref);
+ struct ion_buffer *buffer = map->buffer;
+
+ rb_erase(&map->node, &buffer->iommu_maps);
+ buffer->heap->ops->unmap_iommu(map);
+ kfree(map);
+}
+
+void ion_unmap_iommu(struct ion_client *client, struct ion_handle *handle,
+ int domain_num, int partition_num)
+{
+ struct ion_iommu_map *iommu_map;
+ struct ion_buffer *buffer;
+
+ mutex_lock(&client->lock);
+ buffer = handle->buffer;
+
+ mutex_lock(&buffer->lock);
+
+ iommu_map = ion_iommu_lookup(buffer, domain_num, partition_num);
+
+ if (!iommu_map) {
+ WARN(1, "%s: (%d,%d) was never mapped for %p\n", __func__,
+ domain_num, partition_num, buffer);
+ goto out;
+ }
+
+ _ion_unmap(&buffer->iommu_map_cnt, &handle->iommu_map_cnt);
+ kref_put(&iommu_map->ref, ion_iommu_release);
+
+out:
+ mutex_unlock(&buffer->lock);
+
+ mutex_unlock(&client->lock);
+
+}
+EXPORT_SYMBOL(ion_unmap_iommu);
+
struct scatterlist *ion_map_dma(struct ion_client *client,
struct ion_handle *handle,
unsigned long flags)
@@ -487,17 +727,9 @@
return ERR_PTR(-ENODEV);
}
- if (buffer->kmap_cnt || buffer->dmap_cnt || buffer->umap_cnt) {
- if (buffer->flags != flags) {
- pr_err("%s: buffer was already mapped with flags %lx,"
- " cannot map with flags %lx\n", __func__,
- buffer->flags, flags);
- sglist = ERR_PTR(-EEXIST);
- goto out;
- }
-
- } else {
- buffer->flags = flags;
+ if (ion_validate_buffer_flags(buffer, flags)) {
+ sglist = ERR_PTR(-EEXIST);
+ goto out;
}
if (_ion_map(&buffer->dmap_cnt, &handle->dmap_cnt)) {
@@ -888,6 +1120,28 @@
}
EXPORT_SYMBOL(ion_handle_get_flags);
+int ion_handle_get_size(struct ion_client *client, struct ion_handle *handle,
+ unsigned long *size)
+{
+ struct ion_buffer *buffer;
+
+ mutex_lock(&client->lock);
+ if (!ion_handle_validate(client, handle)) {
+ pr_err("%s: invalid handle passed to %s.\n",
+ __func__, __func__);
+ mutex_unlock(&client->lock);
+ return -EINVAL;
+ }
+ buffer = handle->buffer;
+ mutex_lock(&buffer->lock);
+ *size = buffer->size;
+ mutex_unlock(&buffer->lock);
+ mutex_unlock(&client->lock);
+
+ return 0;
+}
+EXPORT_SYMBOL(ion_handle_get_size);
+
static int ion_share_release(struct inode *inode, struct file* file)
{
struct ion_buffer *buffer = file->private_data;
@@ -1001,19 +1255,13 @@
}
mutex_lock(&buffer->lock);
- if (buffer->kmap_cnt || buffer->dmap_cnt || buffer->umap_cnt) {
- if (buffer->flags != flags) {
- pr_err("%s: buffer was already mapped with flags %lx,"
- " cannot map with flags %lx\n", __func__,
- buffer->flags, flags);
- ret = -EEXIST;
- mutex_unlock(&buffer->lock);
- goto err1;
- }
- } else {
- buffer->flags = flags;
+ if (ion_validate_buffer_flags(buffer, flags)) {
+ ret = -EEXIST;
+ mutex_unlock(&buffer->lock);
+ goto err1;
}
+
/* now map it to userspace */
ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma,
flags);
diff --git a/drivers/gpu/ion/ion_carveout_heap.c b/drivers/gpu/ion/ion_carveout_heap.c
index 700bb79..b03fa23 100644
--- a/drivers/gpu/ion/ion_carveout_heap.c
+++ b/drivers/gpu/ion/ion_carveout_heap.c
@@ -2,6 +2,7 @@
* drivers/gpu/ion/ion_carveout_heap.c
*
* Copyright (C) 2011 Google, Inc.
+ * Copyright (c) 2011, Code Aurora Forum. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
@@ -23,8 +24,10 @@
#include <linux/scatterlist.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
+#include <linux/iommu.h>
#include "ion_priv.h"
+#include <mach/iommu_domains.h>
#include <asm/mach/map.h>
struct ion_carveout_heap {
@@ -232,6 +235,108 @@
return carveout_heap->total_size;
}
+int ion_carveout_heap_map_iommu(struct ion_buffer *buffer,
+ struct ion_iommu_map *data,
+ unsigned int domain_num,
+ unsigned int partition_num,
+ unsigned long align,
+ unsigned long iova_length,
+ unsigned long flags)
+{
+ unsigned long temp_phys, temp_iova;
+ struct iommu_domain *domain;
+ int i, ret = 0;
+ unsigned long extra;
+
+ data->mapped_size = iova_length;
+
+ if (!msm_use_iommu()) {
+ data->iova_addr = buffer->priv_phys;
+ return 0;
+ }
+
+ extra = iova_length - buffer->size;
+
+ data->iova_addr = msm_allocate_iova_address(domain_num, partition_num,
+ data->mapped_size, align);
+
+ if (!data->iova_addr) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ domain = msm_get_iommu_domain(domain_num);
+
+ if (!domain) {
+ ret = -ENOMEM;
+ goto out1;
+ }
+
+ temp_iova = data->iova_addr;
+ temp_phys = buffer->priv_phys;
+ for (i = buffer->size; i > 0; i -= SZ_4K, temp_iova += SZ_4K,
+ temp_phys += SZ_4K) {
+ ret = iommu_map(domain, temp_iova, temp_phys,
+ get_order(SZ_4K),
+ ION_IS_CACHED(flags) ? 1 : 0);
+
+ if (ret) {
+ pr_err("%s: could not map %lx to %lx in domain %p\n",
+ __func__, temp_iova, temp_phys, domain);
+ goto out2;
+ }
+ }
+
+ if (extra && (msm_iommu_map_extra(domain, temp_iova, extra, flags) < 0))
+ goto out2;
+
+ return 0;
+
+
+out2:
+ for ( ; i < buffer->size; i += SZ_4K, temp_iova -= SZ_4K)
+ iommu_unmap(domain, temp_iova, get_order(SZ_4K));
+
+out1:
+ msm_free_iova_address(data->iova_addr, domain_num, partition_num,
+ data->mapped_size);
+
+out:
+
+ return ret;
+}
+
+void ion_carveout_heap_unmap_iommu(struct ion_iommu_map *data)
+{
+ int i;
+ unsigned long temp_iova;
+ unsigned int domain_num;
+ unsigned int partition_num;
+ struct iommu_domain *domain;
+
+ if (!msm_use_iommu())
+ return;
+
+ domain_num = iommu_map_domain(data);
+ partition_num = iommu_map_partition(data);
+
+ domain = msm_get_iommu_domain(domain_num);
+
+ if (!domain) {
+ WARN(1, "Could not get domain %d. Corruption?\n", domain_num);
+ return;
+ }
+
+ temp_iova = data->iova_addr;
+ for (i = data->mapped_size; i > 0; i -= SZ_4K, temp_iova += SZ_4K)
+ iommu_unmap(domain, temp_iova, get_order(SZ_4K));
+
+ msm_free_iova_address(data->iova_addr, domain_num, partition_num,
+ data->mapped_size);
+
+ return;
+}
+
static struct ion_heap_ops carveout_heap_ops = {
.allocate = ion_carveout_heap_allocate,
.free = ion_carveout_heap_free,
@@ -245,6 +350,8 @@
.cache_op = ion_carveout_cache_ops,
.get_allocated = ion_carveout_get_allocated,
.get_total = ion_carveout_get_total,
+ .map_iommu = ion_carveout_heap_map_iommu,
+ .unmap_iommu = ion_carveout_heap_unmap_iommu,
};
struct ion_heap *ion_carveout_heap_create(struct ion_platform_heap *heap_data)
diff --git a/drivers/gpu/ion/ion_heap.c b/drivers/gpu/ion/ion_heap.c
index 8ce3c19..900f445 100644
--- a/drivers/gpu/ion/ion_heap.c
+++ b/drivers/gpu/ion/ion_heap.c
@@ -32,6 +32,9 @@
case ION_HEAP_TYPE_CARVEOUT:
heap = ion_carveout_heap_create(heap_data);
break;
+ case ION_HEAP_TYPE_IOMMU:
+ heap = ion_iommu_heap_create(heap_data);
+ break;
default:
pr_err("%s: Invalid heap type %d\n", __func__,
heap_data->type);
diff --git a/drivers/gpu/ion/ion_iommu_heap.c b/drivers/gpu/ion/ion_iommu_heap.c
new file mode 100644
index 0000000..d37a811
--- /dev/null
+++ b/drivers/gpu/ion/ion_iommu_heap.c
@@ -0,0 +1,288 @@
+/*
+ * Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/ion.h>
+#include <linux/mm.h>
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/iommu.h>
+#include <linux/pfn.h>
+#include "ion_priv.h"
+
+#include <asm/mach/map.h>
+#include <asm/page.h>
+#include <mach/iommu_domains.h>
+
+struct ion_iommu_heap {
+ struct ion_heap heap;
+};
+
+struct ion_iommu_priv_data {
+ struct page **pages;
+ int nrpages;
+ unsigned long size;
+};
+
+static int ion_iommu_heap_allocate(struct ion_heap *heap,
+ struct ion_buffer *buffer,
+ unsigned long size, unsigned long align,
+ unsigned long flags)
+{
+ int ret, i;
+ struct ion_iommu_priv_data *data = NULL;
+
+ if (msm_use_iommu()) {
+ data = kmalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->size = PFN_ALIGN(size);
+ data->nrpages = data->size >> PAGE_SHIFT;
+ data->pages = kzalloc(sizeof(struct page *)*data->nrpages,
+ GFP_KERNEL);
+ if (!data->pages) {
+ ret = -ENOMEM;
+ goto err1;
+ }
+
+ for (i = 0; i < data->nrpages; i++) {
+ data->pages[i] = alloc_page(GFP_KERNEL | __GFP_ZERO);
+ if (!data->pages[i])
+ goto err2;
+ }
+
+
+ buffer->priv_virt = data;
+ return 0;
+
+ } else {
+ return -ENOMEM;
+ }
+
+
+err2:
+ for (i = 0; i < data->nrpages; i++) {
+ if (data->pages[i])
+ __free_page(data->pages[i]);
+ }
+ kfree(data->pages);
+err1:
+ kfree(data);
+ return ret;
+}
+
+static void ion_iommu_heap_free(struct ion_buffer *buffer)
+{
+ struct ion_iommu_priv_data *data = buffer->priv_virt;
+ int i;
+
+ if (!data)
+ return;
+
+ for (i = 0; i < data->nrpages; i++)
+ __free_page(data->pages[i]);
+
+ kfree(data->pages);
+ kfree(data);
+}
+
+void *ion_iommu_heap_map_kernel(struct ion_heap *heap,
+ struct ion_buffer *buffer,
+ unsigned long flags)
+{
+ struct ion_iommu_priv_data *data = buffer->priv_virt;
+ pgprot_t page_prot = PAGE_KERNEL;
+
+ if (!data)
+ return NULL;
+
+ if (!ION_IS_CACHED(flags))
+ page_prot = pgprot_noncached(page_prot);
+
+ buffer->vaddr = vmap(data->pages, data->nrpages, VM_IOREMAP, page_prot);
+
+ return buffer->vaddr;
+}
+
+void ion_iommu_heap_unmap_kernel(struct ion_heap *heap,
+ struct ion_buffer *buffer)
+{
+ if (!buffer->vaddr)
+ return;
+
+ vunmap(buffer->vaddr);
+ buffer->vaddr = NULL;
+}
+
+int ion_iommu_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
+ struct vm_area_struct *vma, unsigned long flags)
+{
+ struct ion_iommu_priv_data *data = buffer->priv_virt;
+ int i;
+
+ if (!data)
+ return -EINVAL;
+
+ if (!ION_IS_CACHED(flags))
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+
+ for (i = 0; i < data->nrpages; i++)
+ if (vm_insert_page(vma, vma->vm_start + i * PAGE_SIZE,
+ data->pages[i]))
+ /*
+ * This will fail the mmap which will
+ * clean up the vma space properly.
+ */
+ return -EINVAL;
+
+ return 0;
+}
+
+int ion_iommu_heap_map_iommu(struct ion_buffer *buffer,
+ struct ion_iommu_map *data,
+ unsigned int domain_num,
+ unsigned int partition_num,
+ unsigned long align,
+ unsigned long iova_length,
+ unsigned long flags)
+{
+ unsigned long temp_iova;
+ struct iommu_domain *domain;
+ struct ion_iommu_priv_data *buffer_data = buffer->priv_virt;
+ int i, j, ret = 0;
+ unsigned long extra;
+
+ BUG_ON(!msm_use_iommu());
+
+ data->mapped_size = iova_length;
+ extra = iova_length - buffer->size;
+
+ data->iova_addr = msm_allocate_iova_address(domain_num, partition_num,
+ data->mapped_size, align);
+
+ if (!data->iova_addr) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ domain = msm_get_iommu_domain(domain_num);
+
+ if (!domain) {
+ ret = -ENOMEM;
+ goto out1;
+ }
+
+ temp_iova = data->iova_addr;
+ for (i = buffer->size, j = 0; i > 0; j++, i -= SZ_4K,
+ temp_iova += SZ_4K) {
+ ret = iommu_map(domain, temp_iova,
+ page_to_phys(buffer_data->pages[j]),
+ get_order(SZ_4K),
+ ION_IS_CACHED(flags) ? 1 : 0);
+
+ if (ret) {
+ pr_err("%s: could not map %lx to %x in domain %p\n",
+ __func__, temp_iova,
+ page_to_phys(buffer_data->pages[j]),
+ domain);
+ goto out2;
+ }
+ }
+
+
+ if (extra &&
+ msm_iommu_map_extra
+ (domain, temp_iova, extra, flags) < 0)
+ goto out2;
+
+ return 0;
+
+
+out2:
+ for ( ; i < buffer->size; i += SZ_4K, temp_iova -= SZ_4K)
+ iommu_unmap(domain, temp_iova, get_order(SZ_4K));
+
+out1:
+ msm_free_iova_address(data->iova_addr, domain_num, partition_num,
+ buffer->size);
+
+out:
+
+ return ret;
+}
+
+void ion_iommu_heap_unmap_iommu(struct ion_iommu_map *data)
+{
+ int i;
+ unsigned long temp_iova;
+ unsigned int domain_num;
+ unsigned int partition_num;
+ struct iommu_domain *domain;
+
+ BUG_ON(!msm_use_iommu());
+
+ domain_num = iommu_map_domain(data);
+ partition_num = iommu_map_partition(data);
+
+ domain = msm_get_iommu_domain(domain_num);
+
+ if (!domain) {
+ WARN(1, "Could not get domain %d. Corruption?\n", domain_num);
+ return;
+ }
+
+ temp_iova = data->iova_addr;
+ for (i = data->mapped_size; i > 0; i -= SZ_4K, temp_iova += SZ_4K)
+ iommu_unmap(domain, temp_iova, get_order(SZ_4K));
+
+ msm_free_iova_address(data->iova_addr, domain_num, partition_num,
+ data->mapped_size);
+
+ return;
+}
+
+
+static struct ion_heap_ops iommu_heap_ops = {
+ .allocate = ion_iommu_heap_allocate,
+ .free = ion_iommu_heap_free,
+ .map_user = ion_iommu_heap_map_user,
+ .map_kernel = ion_iommu_heap_map_kernel,
+ .unmap_kernel = ion_iommu_heap_unmap_kernel,
+ .map_iommu = ion_iommu_heap_map_iommu,
+ .unmap_iommu = ion_iommu_heap_unmap_iommu,
+};
+
+struct ion_heap *ion_iommu_heap_create(struct ion_platform_heap *heap_data)
+{
+ struct ion_iommu_heap *iommu_heap;
+
+ iommu_heap = kzalloc(sizeof(struct ion_iommu_heap), GFP_KERNEL);
+ if (!iommu_heap)
+ return ERR_PTR(-ENOMEM);
+
+ iommu_heap->heap.ops = &iommu_heap_ops;
+ iommu_heap->heap.type = ION_HEAP_TYPE_IOMMU;
+
+ return &iommu_heap->heap;
+}
+
+void ion_iommu_heap_destroy(struct ion_heap *heap)
+{
+ struct ion_iommu_heap *iommu_heap =
+ container_of(heap, struct ion_iommu_heap, heap);
+
+ kfree(iommu_heap);
+ iommu_heap = NULL;
+}
diff --git a/drivers/gpu/ion/ion_priv.h b/drivers/gpu/ion/ion_priv.h
index ac51854..77b73e2 100644
--- a/drivers/gpu/ion/ion_priv.h
+++ b/drivers/gpu/ion/ion_priv.h
@@ -22,6 +22,7 @@
#include <linux/mutex.h>
#include <linux/rbtree.h>
#include <linux/ion.h>
+#include <linux/iommu.h>
struct ion_mapping;
@@ -35,6 +36,34 @@
void *vaddr;
};
+/**
+ * struct ion_iommu_map - represents a mapping of an ion buffer to an iommu
+ * @iova_addr - iommu virtual address
+ * @node - rb node to exist in the buffer's tree of iommu mappings
+ * @domain_info - contains the partition number and domain number
+ * domain_info[1] = domain number
+ * domain_info[0] = partition number
+ * @ref - for reference counting this mapping
+ * @mapped_size - size of the iova space mapped
+ * (may not be the same as the buffer size)
+ *
+ * Represents a mapping of one ion buffer to a particular iommu domain
+ * and address range. There may exist other mappings of this buffer in
+ * different domains or address ranges. All mappings will have the same
+ * cacheability and security.
+ */
+struct ion_iommu_map {
+ unsigned long iova_addr;
+ struct rb_node node;
+ union {
+ int domain_info[2];
+ uint64_t key;
+ };
+ struct ion_buffer *buffer;
+ struct kref ref;
+ int mapped_size;
+};
+
struct ion_buffer *ion_handle_buffer(struct ion_handle *handle);
/**
@@ -72,6 +101,8 @@
int dmap_cnt;
struct scatterlist *sglist;
int umap_cnt;
+ unsigned int iommu_map_cnt;
+ struct rb_root iommu_maps;
int marked;
};
@@ -109,6 +140,15 @@
unsigned int length, unsigned int cmd);
unsigned long (*get_allocated)(struct ion_heap *heap);
unsigned long (*get_total)(struct ion_heap *heap);
+ int (*map_iommu)(struct ion_buffer *buffer,
+ struct ion_iommu_map *map_data,
+ unsigned int domain_num,
+ unsigned int partition_num,
+ unsigned long align,
+ unsigned long iova_length,
+ unsigned long flags);
+ void (*unmap_iommu)(struct ion_iommu_map *data);
+
};
/**
@@ -136,6 +176,11 @@
const char *name;
};
+
+
+#define iommu_map_domain(__m) ((__m)->domain_info[1])
+#define iommu_map_partition(__m) ((__m)->domain_info[0])
+
/**
* ion_device_create - allocates and returns an ion device
* @custom_ioctl: arch specific ioctl function if applicable
@@ -177,6 +222,10 @@
struct ion_heap *ion_carveout_heap_create(struct ion_platform_heap *);
void ion_carveout_heap_destroy(struct ion_heap *);
+
+struct ion_heap *ion_iommu_heap_create(struct ion_platform_heap *);
+void ion_iommu_heap_destroy(struct ion_heap *);
+
/**
* kernel api to allocate/free from carveout -- used when carveout is
* used to back an architecture specific custom heap
@@ -185,6 +234,9 @@
unsigned long align);
void ion_carveout_free(struct ion_heap *heap, ion_phys_addr_t addr,
unsigned long size);
+
+
+struct ion_heap *msm_get_contiguous_heap(void);
/**
* The carveout heap returns physical addresses, since 0 may be a valid
* physical address, this is used to indicate allocation failed
diff --git a/drivers/gpu/ion/ion_system_heap.c b/drivers/gpu/ion/ion_system_heap.c
index b26d48c..5957658 100644
--- a/drivers/gpu/ion/ion_system_heap.c
+++ b/drivers/gpu/ion/ion_system_heap.c
@@ -2,6 +2,7 @@
* drivers/gpu/ion/ion_system_heap.c
*
* Copyright (C) 2011 Google, Inc.
+ * Copyright (c) 2011, Code Aurora Forum. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
@@ -20,6 +21,8 @@
#include <linux/scatterlist.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
+#include <linux/iommu.h>
+#include <mach/iommu_domains.h>
#include "ion_priv.h"
#include <mach/memory.h>
@@ -98,6 +101,37 @@
{
}
+void ion_system_heap_unmap_iommu(struct ion_iommu_map *data)
+{
+ int i;
+ unsigned long temp_iova;
+ unsigned int domain_num;
+ unsigned int partition_num;
+ struct iommu_domain *domain;
+
+ if (!msm_use_iommu())
+ return;
+
+ domain_num = iommu_map_domain(data);
+ partition_num = iommu_map_partition(data);
+
+ domain = msm_get_iommu_domain(domain_num);
+
+ if (!domain) {
+ WARN(1, "Could not get domain %d. Corruption?\n", domain_num);
+ return;
+ }
+
+ temp_iova = data->iova_addr;
+ for (i = data->mapped_size; i > 0; i -= SZ_4K, temp_iova += SZ_4K)
+ iommu_unmap(domain, temp_iova, get_order(SZ_4K));
+
+ msm_free_iova_address(data->iova_addr, domain_num, partition_num,
+ data->mapped_size);
+
+ return;
+}
+
int ion_system_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
struct vm_area_struct *vma, unsigned long flags)
{
@@ -160,6 +194,77 @@
return atomic_read(&system_heap_allocated);
}
+int ion_system_heap_map_iommu(struct ion_buffer *buffer,
+ struct ion_iommu_map *data,
+ unsigned int domain_num,
+ unsigned int partition_num,
+ unsigned long align,
+ unsigned long iova_length,
+ unsigned long flags)
+{
+ int ret, i;
+ unsigned long temp_iova;
+ struct iommu_domain *domain;
+ void *temp_phys;
+ unsigned long extra;
+
+ if (!ION_IS_CACHED(flags))
+ return -EINVAL;
+
+ if (!msm_use_iommu())
+ return -EINVAL;
+
+ data->mapped_size = iova_length;
+ extra = iova_length - buffer->size;
+
+ data->iova_addr = msm_allocate_iova_address(domain_num, partition_num,
+ data->mapped_size, align);
+
+ if (!data->iova_addr) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ domain = msm_get_iommu_domain(domain_num);
+
+ if (!domain) {
+ ret = -ENOMEM;
+ goto out1;
+ }
+
+ temp_iova = data->iova_addr;
+ temp_phys = buffer->vaddr;
+ for (i = buffer->size; i > 0; i -= SZ_4K, temp_iova += SZ_4K,
+ temp_phys += SZ_4K) {
+ ret = iommu_map(domain, temp_iova,
+ page_to_phys(vmalloc_to_page(temp_phys)),
+ get_order(SZ_4K), ION_IS_CACHED(flags) ? 1 : 0);
+
+ if (ret) {
+ pr_err("%s: could not map %lx to %x in domain %p\n",
+ __func__, temp_iova,
+ page_to_phys(vmalloc_to_page(temp_phys)),
+ domain);
+ goto out2;
+ }
+ }
+
+ if (extra && (msm_iommu_map_extra(domain, temp_iova, extra, flags) < 0))
+ goto out2;
+
+ return 0;
+
+out2:
+ for ( ; i < buffer->size; i += SZ_4K, temp_iova -= SZ_4K)
+ iommu_unmap(domain, temp_iova, get_order(SZ_4K));
+
+out1:
+ msm_free_iova_address(data->iova_addr, domain_num, partition_num,
+ data->mapped_size);
+out:
+ return ret;
+}
+
static struct ion_heap_ops vmalloc_ops = {
.allocate = ion_system_heap_allocate,
.free = ion_system_heap_free,
@@ -170,6 +275,8 @@
.map_user = ion_system_heap_map_user,
.cache_op = ion_system_heap_cache_ops,
.get_allocated = ion_system_heap_get_allocated,
+ .map_iommu = ion_system_heap_map_iommu,
+ .unmap_iommu = ion_system_heap_unmap_iommu,
};
struct ion_heap *ion_system_heap_create(struct ion_platform_heap *unused)
@@ -285,6 +392,74 @@
return atomic_read(&system_contig_heap_allocated);
}
+int ion_system_contig_heap_map_iommu(struct ion_buffer *buffer,
+ struct ion_iommu_map *data,
+ unsigned int domain_num,
+ unsigned int partition_num,
+ unsigned long align,
+ unsigned long iova_length,
+ unsigned long flags)
+{
+ int ret, i;
+ struct iommu_domain *domain;
+ unsigned long temp_phys, temp_iova;
+ unsigned long extra;
+
+ if (!ION_IS_CACHED(flags))
+ return -EINVAL;
+
+ if (!msm_use_iommu()) {
+ data->iova_addr = virt_to_phys(buffer->vaddr);
+ return 0;
+ }
+
+ data->mapped_size = iova_length;
+ extra = iova_length - buffer->size;
+
+ data->iova_addr = msm_allocate_iova_address(domain_num, partition_num,
+ data->mapped_size, align);
+
+ if (!data->iova_addr) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ domain = msm_get_iommu_domain(domain_num);
+
+ if (!domain) {
+ ret = -ENOMEM;
+ goto out1;
+ }
+ temp_iova = data->iova_addr;
+ temp_phys = virt_to_phys(buffer->vaddr);
+ for (i = buffer->size; i > 0; i -= SZ_4K, temp_iova += SZ_4K,
+ temp_phys += SZ_4K) {
+ ret = iommu_map(domain, temp_iova,
+ temp_phys,
+ get_order(SZ_4K), ION_IS_CACHED(flags) ? 1 : 0);
+
+ if (ret) {
+ pr_err("%s: could not map %lx to %lx in domain %p\n",
+ __func__, temp_iova, temp_phys, domain);
+ goto out2;
+ }
+ }
+
+ if (extra && (msm_iommu_map_extra(domain, temp_iova, extra, flags) < 0))
+ goto out2;
+
+ return 0;
+out2:
+ for ( ; i < buffer->size; i += SZ_4K, temp_iova -= SZ_4K)
+ iommu_unmap(domain, temp_iova, get_order(SZ_4K));
+
+out1:
+ msm_free_iova_address(data->iova_addr, domain_num, partition_num,
+ data->mapped_size);
+out:
+ return ret;
+}
+
static struct ion_heap_ops kmalloc_ops = {
.allocate = ion_system_contig_heap_allocate,
.free = ion_system_contig_heap_free,
@@ -296,6 +471,8 @@
.map_user = ion_system_contig_heap_map_user,
.cache_op = ion_system_contig_heap_cache_ops,
.get_allocated = ion_system_contig_heap_get_allocated,
+ .map_iommu = ion_system_contig_heap_map_iommu,
+ .unmap_iommu = ion_system_heap_unmap_iommu,
};
struct ion_heap *ion_system_contig_heap_create(struct ion_platform_heap *unused)
diff --git a/drivers/gpu/ion/msm/msm_ion.c b/drivers/gpu/ion/msm/msm_ion.c
index 54dd056..0c96eaf 100644
--- a/drivers/gpu/ion/msm/msm_ion.c
+++ b/drivers/gpu/ion/msm/msm_ion.c
@@ -89,6 +89,7 @@
heaps[i] = 0;
continue;
}
+
ion_device_add_heap(idev, heaps[i]);
}
platform_set_drvdata(pdev, idev);
diff --git a/drivers/gpu/msm/kgsl.c b/drivers/gpu/msm/kgsl.c
index 8c9eff4..75c01b4 100644
--- a/drivers/gpu/msm/kgsl.c
+++ b/drivers/gpu/msm/kgsl.c
@@ -1906,7 +1906,7 @@
KGSL_IOCTL_FUNC(IOCTL_KGSL_CFF_USER_EVENT,
kgsl_ioctl_cff_user_event, 0),
KGSL_IOCTL_FUNC(IOCTL_KGSL_TIMESTAMP_EVENT,
- kgsl_ioctl_timestamp_event, 0),
+ kgsl_ioctl_timestamp_event, 1),
};
static long kgsl_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
diff --git a/drivers/input/misc/pmic8xxx-pwrkey.c b/drivers/input/misc/pmic8xxx-pwrkey.c
index 2225b7a..f0da3e8 100644
--- a/drivers/input/misc/pmic8xxx-pwrkey.c
+++ b/drivers/input/misc/pmic8xxx-pwrkey.c
@@ -100,7 +100,9 @@
return -EINVAL;
}
- if (pdata->kpd_trigger_delay_us > 62500) {
+ /* Valid range of pwr key trigger delay is 1/64 sec to 2 seconds. */
+ if (pdata->kpd_trigger_delay_us > USEC_PER_SEC * 2 ||
+ pdata->kpd_trigger_delay_us < USEC_PER_SEC / 64) {
dev_err(&pdev->dev, "invalid power key trigger delay\n");
return -EINVAL;
}
@@ -124,8 +126,8 @@
pwr->phys = "pmic8xxx_pwrkey/input0";
pwr->dev.parent = &pdev->dev;
- delay = (pdata->kpd_trigger_delay_us << 10) / USEC_PER_SEC;
- delay = 1 + ilog2(delay);
+ delay = (pdata->kpd_trigger_delay_us << 6) / USEC_PER_SEC;
+ delay = ilog2(delay);
err = pm8xxx_readb(pdev->dev.parent, PON_CNTL_1, &pon_cntl);
if (err < 0) {
diff --git a/drivers/media/video/msm/flash.c b/drivers/media/video/msm/flash.c
index f1a24cc..6985f3c 100644
--- a/drivers/media/video/msm/flash.c
+++ b/drivers/media/video/msm/flash.c
@@ -293,6 +293,7 @@
if (sc628a_client) {
gpio_set_value_cansleep(external->led_en, 1);
gpio_set_value_cansleep(external->led_flash_en, 1);
+ usleep_range(2000, 3000);
}
rc = sc628a_i2c_write_b_flash(0x02, 0x06);
break;
@@ -301,6 +302,7 @@
if (sc628a_client) {
gpio_set_value_cansleep(external->led_en, 1);
gpio_set_value_cansleep(external->led_flash_en, 1);
+ usleep_range(2000, 3000);
}
rc = sc628a_i2c_write_b_flash(0x02, 0x49);
break;
diff --git a/drivers/media/video/msm/msm_io_8x60.c b/drivers/media/video/msm/msm_io_8x60.c
index 4ad8ff5..2262aa4 100644
--- a/drivers/media/video/msm/msm_io_8x60.c
+++ b/drivers/media/video/msm/msm_io_8x60.c
@@ -89,7 +89,6 @@
static struct clk *camio_jpeg_pclk;
static struct clk *camio_vpe_clk;
static struct clk *camio_vpe_pclk;
-static struct clk *camio_vpe_axi_clk;
static struct regulator *fs_vfe;
static struct regulator *fs_ijpeg;
static struct regulator *fs_vpe;
@@ -352,10 +351,6 @@
camio_vpe_pclk =
clk = clk_get(NULL, "vpe_pclk");
break;
- case CAMIO_VPE_AXI_CLK:
- camio_vpe_axi_clk =
- clk = clk_get(NULL, "vpe_axi_clk");
- break;
default:
break;
@@ -430,10 +425,6 @@
clk = camio_vpe_pclk;
break;
- case CAMIO_VPE_AXI_CLK:
- clk = camio_vpe_axi_clk;
- break;
-
default:
break;
}
@@ -532,10 +523,6 @@
rc = msm_camio_clk_disable(CAMIO_VPE_CLK);
if (rc < 0)
return rc;
- rc = msm_camio_clk_disable(CAMIO_VPE_AXI_CLK);
- if (rc < 0)
- return rc;
-
rc = msm_camio_clk_disable(CAMIO_VPE_PCLK);
return rc;
}
@@ -557,9 +544,6 @@
rc = msm_camio_clk_enable(CAMIO_VPE_CLK);
if (rc < 0)
return rc;
- rc = msm_camio_clk_enable(CAMIO_VPE_AXI_CLK);
- if (rc < 0)
- return rc;
rc = msm_camio_clk_enable(CAMIO_VPE_PCLK);
return rc;
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index 71d0fa6..918fc9e 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -831,7 +831,7 @@
*
* WARNING: eMMC rules are NOT the same as SD DDR
*/
- if (ddr == EXT_CSD_CARD_TYPE_DDR_1_2V) {
+ if (ddr == MMC_1_2V_DDR_MODE) {
err = mmc_set_signal_voltage(host,
MMC_SIGNAL_VOLTAGE_120, 0);
if (err)
diff --git a/drivers/platform/msm/Kconfig b/drivers/platform/msm/Kconfig
index 75a6b5e..26441cd 100644
--- a/drivers/platform/msm/Kconfig
+++ b/drivers/platform/msm/Kconfig
@@ -13,7 +13,7 @@
config SPS
bool "SPS support"
depends on (HAS_IOMEM && (ARCH_MSM8960 || ARCH_MSM8X60 \
- || ARCH_APQ8064 || ARCH_MSM9615))
+ || ARCH_APQ8064 || ARCH_MSM9615 || ARCH_MSMCOPPER))
select GENERIC_ALLOCATOR
default n
help
@@ -28,10 +28,17 @@
depends on SPS
default n
help
- The BAM-DMA is used for Memory-to-Memory transfers.
- The main use cases is RPC between processors.
- The BAM-DMA hardware has 2 registers sets:
- 1. A BAM HW like all the peripherals.
- 2. A DMA channel configuration (i.e. channel priority).
+ The BAM-DMA is used for Memory-to-Memory transfers.
+ The main use cases is RPC between processors.
+ The BAM-DMA hardware has 2 registers sets:
+ 1. A BAM HW like all the peripherals.
+ 2. A DMA channel configuration (i.e. channel priority).
+
+config SPS_SUPPORT_NDP_BAM
+ bool "SPS support NDP BAM"
+ depends on SPS
+ default n
+ help
+ No-Data-Path BAM is used to improve BAM performance.
endmenu
diff --git a/drivers/platform/msm/sps/bam.c b/drivers/platform/msm/sps/bam.c
index 2af4fa4..4279603 100644
--- a/drivers/platform/msm/sps/bam.c
+++ b/drivers/platform/msm/sps/bam.c
@@ -28,6 +28,286 @@
#define BAM_MIN_VERSION 2
#define BAM_MAX_VERSION 0x1f
+#ifdef CONFIG_SPS_SUPPORT_NDP_BAM
+
+/* Maximum number of execution environment */
+#define BAM_MAX_EES 8
+
+/**
+ * BAM Hardware registers.
+ *
+ */
+#define CTRL (0x0)
+#define REVISION (0x4)
+#define SW_REVISION (0x80)
+#define NUM_PIPES (0x3c)
+#define TIMER (0x40)
+#define TIMER_CTRL (0x44)
+#define DESC_CNT_TRSHLD (0x8)
+#define IRQ_SRCS (0xc)
+#define IRQ_SRCS_MSK (0x10)
+#define IRQ_SRCS_UNMASKED (0x30)
+#define IRQ_STTS (0x14)
+#define IRQ_CLR (0x18)
+#define IRQ_EN (0x1c)
+#define AHB_MASTER_ERR_CTRLS (0x24)
+#define AHB_MASTER_ERR_ADDR (0x28)
+#define AHB_MASTER_ERR_DATA (0x2c)
+#define TRUST_REG (0x70)
+#define TEST_BUS_SEL (0x74)
+#define TEST_BUS_REG (0x78)
+#define CNFG_BITS (0x7c)
+#define IRQ_SRCS_EE(n) (0x800 + 128 * (n))
+#define IRQ_SRCS_MSK_EE(n) (0x804 + 128 * (n))
+#define IRQ_SRCS_UNMASKED_EE(n) (0x808 + 128 * (n))
+
+#define P_CTRL(n) (0x1000 + 4096 * (n))
+#define P_RST(n) (0x1004 + 4096 * (n))
+#define P_HALT(n) (0x1008 + 4096 * (n))
+#define P_IRQ_STTS(n) (0x1010 + 4096 * (n))
+#define P_IRQ_CLR(n) (0x1014 + 4096 * (n))
+#define P_IRQ_EN(n) (0x1018 + 4096 * (n))
+#define P_TIMER(n) (0x101c + 4096 * (n))
+#define P_TIMER_CTRL(n) (0x1020 + 4096 * (n))
+#define P_PRDCR_SDBND(n) (0x1024 + 4096 * (n))
+#define P_CNSMR_SDBND(n) (0x1028 + 4096 * (n))
+#define P_TRUST_REG(n) (0x1030 + 4096 * (n))
+#define P_EVNT_DEST_ADDR(n) (0x182c + 4096 * (n))
+#define P_EVNT_REG(n) (0x1818 + 4096 * (n))
+#define P_SW_OFSTS(n) (0x1800 + 4096 * (n))
+#define P_DATA_FIFO_ADDR(n) (0x1824 + 4096 * (n))
+#define P_DESC_FIFO_ADDR(n) (0x181c + 4096 * (n))
+#define P_EVNT_GEN_TRSHLD(n) (0x1828 + 4096 * (n))
+#define P_FIFO_SIZES(n) (0x1820 + 4096 * (n))
+#define P_RETR_CNTXT(n) (0x1834 + 4096 * (n))
+#define P_SI_CNTXT(n) (0x1838 + 4096 * (n))
+#define P_DF_CNTXT(n) (0x1830 + 4096 * (n))
+#define P_AU_PSM_CNTXT_1(n) (0x1804 + 4096 * (n))
+#define P_PSM_CNTXT_2(n) (0x1808 + 4096 * (n))
+#define P_PSM_CNTXT_3(n) (0x180c + 4096 * (n))
+#define P_PSM_CNTXT_4(n) (0x1810 + 4096 * (n))
+#define P_PSM_CNTXT_5(n) (0x1814 + 4096 * (n))
+
+/**
+ * BAM Hardware registers bitmask.
+ * format: <register>_<field>
+ *
+ */
+/* CTRL */
+#define IBC_DISABLE 0x10000
+#define BAM_CACHED_DESC_STORE 0x8000
+#define BAM_DESC_CACHE_SEL 0x6000
+#define BAM_EN_ACCUM 0x10
+#define BAM_EN 0x2
+#define BAM_SW_RST 0x1
+
+/* REVISION */
+#define BAM_INACTIV_TMR_BASE 0xff000000
+#define BAM_CMD_DESC_EN 0x800000
+#define BAM_DESC_CACHE_DEPTH 0x600000
+#define BAM_NUM_INACTIV_TMRS 0x100000
+#define BAM_INACTIV_TMRS_EXST 0x80000
+#define BAM_HIGH_FREQUENCY_BAM 0x40000
+#define BAM_HAS_NO_BYPASS 0x20000
+#define BAM_SECURED 0x10000
+#define BAM_USE_VMIDMT 0x8000
+#define BAM_AXI_ACTIVE 0x4000
+#define BAM_CE_BUFFER_SIZE 0x2000
+#define BAM_NUM_EES 0xf00
+#define BAM_REVISION 0xff
+
+/* SW_REVISION */
+#define BAM_MAJOR 0xf0000000
+#define BAM_MINOR 0xfff0000
+#define BAM_STEP 0xffff
+
+/* NUM_PIPES */
+#define BAM_NON_PIPE_GRP 0xff000000
+#define BAM_PERIPH_NON_PIPE_GRP 0xff0000
+#define BAM_NUM_PIPES 0xff
+
+/* TIMER */
+#define BAM_TIMER 0xffff
+
+/* TIMER_CTRL */
+#define TIMER_RST 0x80000000
+#define TIMER_RUN 0x40000000
+#define TIMER_MODE 0x20000000
+#define TIMER_TRSHLD 0xffff
+
+/* DESC_CNT_TRSHLD */
+#define BAM_DESC_CNT_TRSHLD 0xffff
+
+/* IRQ_SRCS */
+#define BAM_IRQ 0x80000000
+#define P_IRQ 0x7fffffff
+
+/* IRQ_STTS */
+#define IRQ_STTS_BAM_TIMER_IRQ 0x10
+#define IRQ_STTS_BAM_EMPTY_IRQ 0x8
+#define IRQ_STTS_BAM_ERROR_IRQ 0x4
+#define IRQ_STTS_BAM_HRESP_ERR_IRQ 0x2
+
+/* IRQ_CLR */
+#define IRQ_CLR_BAM_TIMER_IRQ 0x10
+#define IRQ_CLR_BAM_EMPTY_CLR 0x8
+#define IRQ_CLR_BAM_ERROR_CLR 0x4
+#define IRQ_CLR_BAM_HRESP_ERR_CLR 0x2
+
+/* IRQ_EN */
+#define IRQ_EN_BAM_TIMER_IRQ 0x10
+#define IRQ_EN_BAM_EMPTY_EN 0x8
+#define IRQ_EN_BAM_ERROR_EN 0x4
+#define IRQ_EN_BAM_HRESP_ERR_EN 0x2
+
+/* AHB_MASTER_ERR_CTRLS */
+#define AHB_MASTER_ERR_CTRLS_BAM_ERR_HVMID 0x7c0000
+#define AHB_MASTER_ERR_CTRLS_BAM_ERR_DIRECT_MODE 0x20000
+#define AHB_MASTER_ERR_CTRLS_BAM_ERR_HCID 0x1f000
+#define AHB_MASTER_ERR_CTRLS_BAM_ERR_HPROT 0xf00
+#define AHB_MASTER_ERR_CTRLS_BAM_ERR_HBURST 0xe0
+#define AHB_MASTER_ERR_CTRLS_BAM_ERR_HSIZE 0x18
+#define AHB_MASTER_ERR_CTRLS_BAM_ERR_HWRITE 0x4
+#define AHB_MASTER_ERR_CTRLS_BAM_ERR_HTRANS 0x3
+
+/* TRUST_REG */
+#define BAM_VMID 0x1f00
+#define BAM_RST_BLOCK 0x80
+#define BAM_EE 0x7
+
+/* TEST_BUS_SEL */
+#define BAM_DATA_ERASE 0x40000
+#define BAM_DATA_FLUSH 0x20000
+#define BAM_CLK_ALWAYS_ON 0x10000
+#define BAM_TESTBUS_SEL 0x7f
+
+/* CNFG_BITS */
+#define CNFG_BITS_BAM_CD_ENABLE 0x8000000
+#define CNFG_BITS_BAM_AU_ACCUMED 0x4000000
+#define CNFG_BITS_BAM_PSM_P_HD_DATA 0x2000000
+#define CNFG_BITS_BAM_REG_P_EN 0x1000000
+#define CNFG_BITS_BAM_WB_DSC_AVL_P_RST 0x800000
+#define CNFG_BITS_BAM_WB_RETR_SVPNT 0x400000
+#define CNFG_BITS_BAM_WB_CSW_ACK_IDL 0x200000
+#define CNFG_BITS_BAM_WB_BLK_CSW 0x100000
+#define CNFG_BITS_BAM_WB_P_RES 0x80000
+#define CNFG_BITS_BAM_SI_P_RES 0x40000
+#define CNFG_BITS_BAM_AU_P_RES 0x20000
+#define CNFG_BITS_BAM_PSM_P_RES 0x10000
+#define CNFG_BITS_BAM_PSM_CSW_REQ 0x8000
+#define CNFG_BITS_BAM_SB_CLK_REQ 0x4000
+#define CNFG_BITS_BAM_IBC_DISABLE 0x2000
+#define CNFG_BITS_BAM_NO_EXT_P_RST 0x1000
+#define CNFG_BITS_BAM_FULL_PIPE 0x800
+#define CNFG_BITS_BAM_PIPE_CNFG 0x4
+
+/* P_ctrln */
+#define P_LOCK_GROUP 0x1f0000
+#define P_WRITE_NWD 0x800
+#define P_PREFETCH_LIMIT 0x600
+#define P_AUTO_EOB_SEL 0x180
+#define P_AUTO_EOB 0x40
+#define P_SYS_MODE 0x20
+#define P_SYS_STRM 0x10
+#define P_DIRECTION 0x8
+#define P_EN 0x2
+
+/* P_RSTn */
+#define P_RST_P_SW_RST 0x1
+
+/* P_HALTn */
+#define P_HALT_P_PROD_HALTED 0x2
+#define P_HALT_P_HALT 0x1
+
+/* P_TRUST_REGn */
+#define BAM_P_VMID 0x1f00
+#define BAM_P_EE 0x7
+
+/* P_IRQ_STTSn */
+#define P_IRQ_STTS_P_TRNSFR_END_IRQ 0x20
+#define P_IRQ_STTS_P_ERR_IRQ 0x10
+#define P_IRQ_STTS_P_OUT_OF_DESC_IRQ 0x8
+#define P_IRQ_STTS_P_WAKE_IRQ 0x4
+#define P_IRQ_STTS_P_TIMER_IRQ 0x2
+#define P_IRQ_STTS_P_PRCSD_DESC_IRQ 0x1
+
+/* P_IRQ_CLRn */
+#define P_IRQ_CLR_P_TRNSFR_END_CLR 0x20
+#define P_IRQ_CLR_P_ERR_CLR 0x10
+#define P_IRQ_CLR_P_OUT_OF_DESC_CLR 0x8
+#define P_IRQ_CLR_P_WAKE_CLR 0x4
+#define P_IRQ_CLR_P_TIMER_CLR 0x2
+#define P_IRQ_CLR_P_PRCSD_DESC_CLR 0x1
+
+/* P_IRQ_ENn */
+#define P_IRQ_EN_P_TRNSFR_END_EN 0x20
+#define P_IRQ_EN_P_ERR_EN 0x10
+#define P_IRQ_EN_P_OUT_OF_DESC_EN 0x8
+#define P_IRQ_EN_P_WAKE_EN 0x4
+#define P_IRQ_EN_P_TIMER_EN 0x2
+#define P_IRQ_EN_P_PRCSD_DESC_EN 0x1
+
+/* P_TIMERn */
+#define P_TIMER_P_TIMER 0xffff
+
+/* P_TIMER_ctrln */
+#define P_TIMER_RST 0x80000000
+#define P_TIMER_RUN 0x40000000
+#define P_TIMER_MODE 0x20000000
+#define P_TIMER_TRSHLD 0xffff
+
+/* P_PRDCR_SDBNDn */
+#define P_PRDCR_SDBNDn_BAM_P_SB_UPDATED 0x1000000
+#define P_PRDCR_SDBNDn_BAM_P_TOGGLE 0x100000
+#define P_PRDCR_SDBNDn_BAM_P_CTRL 0xf0000
+#define P_PRDCR_SDBNDn_BAM_P_BYTES_FREE 0xffff
+
+/* P_CNSMR_SDBNDn */
+#define P_CNSMR_SDBNDn_BAM_P_SB_UPDATED 0x1000000
+#define P_CNSMR_SDBNDn_BAM_P_WAIT_4_ACK 0x800000
+#define P_CNSMR_SDBNDn_BAM_P_ACK_TOGGLE 0x400000
+#define P_CNSMR_SDBNDn_BAM_P_ACK_TOGGLE_R 0x200000
+#define P_CNSMR_SDBNDn_BAM_P_TOGGLE 0x100000
+#define P_CNSMR_SDBNDn_BAM_P_CTRL 0xf0000
+#define P_CNSMR_SDBNDn_BAM_P_BYTES_AVAIL 0xffff
+
+/* P_EVNT_regn */
+#define P_BYTES_CONSUMED 0xffff0000
+#define P_DESC_FIFO_PEER_OFST 0xffff
+
+/* P_SW_ofstsn */
+#define SW_OFST_IN_DESC 0xffff0000
+#define SW_DESC_OFST 0xffff
+
+/* P_EVNT_GEN_TRSHLDn */
+#define P_EVNT_GEN_TRSHLD_P_TRSHLD 0xffff
+
+/* P_FIFO_sizesn */
+#define P_DATA_FIFO_SIZE 0xffff0000
+#define P_DESC_FIFO_SIZE 0xffff
+
+#define P_RETR_CNTXT_RETR_DESC_OFST 0xffff0000
+#define P_RETR_CNTXT_RETR_OFST_IN_DESC 0xffff
+#define P_SI_CNTXT_SI_DESC_OFST 0xffff
+#define P_DF_CNTXT_WB_ACCUMULATED 0xffff0000
+#define P_DF_CNTXT_DF_DESC_OFST 0xffff
+#define P_AU_PSM_CNTXT_1_AU_PSM_ACCUMED 0xffff0000
+#define P_AU_PSM_CNTXT_1_AU_ACKED 0xffff
+#define P_PSM_CNTXT_2_PSM_DESC_VALID 0x80000000
+#define P_PSM_CNTXT_2_PSM_DESC_IRQ 0x40000000
+#define P_PSM_CNTXT_2_PSM_DESC_IRQ_DONE 0x20000000
+#define P_PSM_CNTXT_2_PSM_GENERAL_BITS 0x1e000000
+#define P_PSM_CNTXT_2_PSM_CONS_STATE 0x1c00000
+#define P_PSM_CNTXT_2_PSM_PROD_SYS_STATE 0x380000
+#define P_PSM_CNTXT_2_PSM_PROD_B2B_STATE 0x70000
+#define P_PSM_CNTXT_2_PSM_DESC_SIZE 0xffff
+#define P_PSM_CNTXT_4_PSM_DESC_OFST 0xffff0000
+#define P_PSM_CNTXT_4_PSM_SAVED_ACCUMED_SIZE 0xffff
+#define P_PSM_CNTXT_5_PSM_BLOCK_BYTE_CNT 0xffff0000
+#define P_PSM_CNTXT_5_PSM_OFST_IN_DESC 0xffff
+
+#else
+
/* Maximum number of execution environment */
#define BAM_MAX_EES 4
@@ -263,6 +543,7 @@
#define P_PSM_CNTXT_4_PSM_SAVED_ACCUMED_SIZE 0xffff
#define P_PSM_CNTXT_5_PSM_BLOCK_BYTE_CNT 0xffff0000
#define P_PSM_CNTXT_5_PSM_OFST_IN_DESC 0xffff
+#endif
#define BAM_ERROR (-1)
@@ -658,9 +939,10 @@
void bam_pipe_satellite_mti(void *base, u32 pipe, u32 irq_gen_addr, u32 ee)
{
bam_write_reg(base, P_IRQ_EN(pipe), 0);
+#ifndef CONFIG_SPS_SUPPORT_NDP_BAM
bam_write_reg(base, P_IRQ_DEST_ADDR(pipe), irq_gen_addr);
-
bam_write_reg_field(base, IRQ_SIC_SEL, (1 << pipe), 1);
+#endif
bam_write_reg_field(base, IRQ_SRCS_MSK, (1 << pipe), 1);
}
@@ -680,9 +962,9 @@
* interrupt. Since the remote processor enable both SIC and interrupt,
* the interrupt enable mask must be set to zero for polling mode.
*/
-
+#ifndef CONFIG_SPS_SUPPORT_NDP_BAM
bam_write_reg(base, P_IRQ_DEST_ADDR(pipe), irq_gen_addr);
-
+#endif
if (!irq_en)
src_mask = 0;
diff --git a/drivers/platform/msm/sps/sps.c b/drivers/platform/msm/sps/sps.c
index e43166e..b002657 100644
--- a/drivers/platform/msm/sps/sps.c
+++ b/drivers/platform/msm/sps/sps.c
@@ -26,6 +26,7 @@
#include <linux/platform_device.h> /* platform_get_resource_byname() */
#include <linux/debugfs.h>
#include <linux/uaccess.h>
+#include <linux/of.h>
#include <mach/msm_sps.h> /* msm_sps_platform_data */
#include "sps_bam.h"
@@ -1428,13 +1429,74 @@
return 0;
}
+/**
+ * Read data from device tree
+ */
+static int get_device_tree_data(struct platform_device *pdev)
+{
+#ifdef CONFIG_SPS_SUPPORT_BAMDMA
+ struct resource *resource;
+
+ if (of_property_read_u32((&pdev->dev)->of_node,
+ "qcom,bam-dma-res-pipes",
+ &sps->bamdma_restricted_pipes))
+ return -EINVAL;
+ else
+ SPS_DBG("sps:bamdma_restricted_pipes=0x%x.",
+ sps->bamdma_restricted_pipes);
+
+ resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (resource) {
+ sps->bamdma_bam_phys_base = resource->start;
+ sps->bamdma_bam_size = resource_size(resource);
+ SPS_DBG("sps:bamdma_bam.base=0x%x,size=0x%x.",
+ sps->bamdma_bam_phys_base,
+ sps->bamdma_bam_size);
+ } else {
+ SPS_ERR("sps:BAM DMA BAM mem unavailable.");
+ return -ENODEV;
+ }
+
+ resource = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (resource) {
+ sps->bamdma_dma_phys_base = resource->start;
+ sps->bamdma_dma_size = resource_size(resource);
+ SPS_DBG("sps:bamdma_dma.base=0x%x,size=0x%x.",
+ sps->bamdma_dma_phys_base,
+ sps->bamdma_dma_size);
+ } else {
+ SPS_ERR("sps:BAM DMA mem unavailable.");
+ return -ENODEV;
+ }
+
+ resource = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (resource) {
+ sps->bamdma_irq = resource->start;
+ SPS_DBG("sps:bamdma_irq=%d.", sps->bamdma_irq);
+ } else {
+ SPS_ERR("sps:BAM DMA IRQ unavailable.");
+ return -ENODEV;
+ }
+#endif
+
+ return 0;
+}
+
static int __devinit msm_sps_probe(struct platform_device *pdev)
{
int ret;
SPS_DBG("sps:msm_sps_probe.");
- ret = get_platform_data(pdev);
+ if (pdev->dev.of_node) {
+ SPS_DBG("sps:get data from device tree.");
+ ret = get_device_tree_data(pdev);
+
+ } else {
+ SPS_DBG("sps:get platform data.");
+ ret = get_platform_data(pdev);
+ }
+
if (ret)
return -ENODEV;
@@ -1542,11 +1604,18 @@
return 0;
}
+static struct of_device_id msm_sps_match[] = {
+ { .compatible = "qcom,msm_sps",
+ },
+ {}
+};
+
static struct platform_driver msm_sps_driver = {
.probe = msm_sps_probe,
.driver = {
.name = SPS_DRV_NAME,
.owner = THIS_MODULE,
+ .of_match_table = msm_sps_match,
},
.remove = __exit_p(msm_sps_remove),
};
diff --git a/drivers/platform/msm/sps/sps_dma.c b/drivers/platform/msm/sps/sps_dma.c
index 9f42403..b650098 100644
--- a/drivers/platform/msm/sps/sps_dma.c
+++ b/drivers/platform/msm/sps/sps_dma.c
@@ -26,14 +26,24 @@
*/
#define DMA_ENBL (0x00000000)
+#ifdef CONFIG_SPS_SUPPORT_NDP_BAM
+#define DMA_REVISION (0x00000004)
+#define DMA_CONFIG (0x00000008)
+#define DMA_CHNL_CONFIG(n) (0x00001000 + 4096 * (n))
+#else
#define DMA_CHNL_CONFIG(n) (0x00000004 + 4 * (n))
#define DMA_CONFIG (0x00000040)
+#endif
/**
* masks
*/
/* DMA_CHNL_confign */
+#ifdef CONFIG_SPS_SUPPORT_NDP_BAM
+#define DMA_CHNL_PRODUCER_PIPE_ENABLED 0x40000
+#define DMA_CHNL_CONSUMER_PIPE_ENABLED 0x20000
+#endif
#define DMA_CHNL_HALT_DONE 0x10000
#define DMA_CHNL_HALT 0x1000
#define DMA_CHNL_ENABLE 0x100
diff --git a/drivers/platform/msm/sps/sps_mem.c b/drivers/platform/msm/sps/sps_mem.c
index 3aee4ba..31c1314 100644
--- a/drivers/platform/msm/sps/sps_mem.c
+++ b/drivers/platform/msm/sps/sps_mem.c
@@ -104,10 +104,13 @@
*/
int sps_mem_init(u32 pipemem_phys_base, u32 pipemem_size)
{
+#ifndef CONFIG_SPS_SUPPORT_NDP_BAM
int res;
+#endif
/* 2^8=128. The desc-fifo and data-fifo minimal allocation. */
int min_alloc_order = 8;
+#ifndef CONFIG_SPS_SUPPORT_NDP_BAM
iomem_phys = pipemem_phys_base;
iomem_size = pipemem_size;
@@ -125,11 +128,14 @@
iomem_offset = 0;
SPS_DBG("sps:sps_mem_init.iomem_phys=0x%x,iomem_virt=0x%x.",
iomem_phys, (u32) iomem_virt);
+#endif
pool = gen_pool_create(min_alloc_order, nid);
+#ifndef CONFIG_SPS_SUPPORT_NDP_BAM
res = gen_pool_add(pool, (u32) iomem_virt, iomem_size, nid);
if (res)
return res;
+#endif
return 0;
}
diff --git a/drivers/usb/gadget/ci13xxx_udc.c b/drivers/usb/gadget/ci13xxx_udc.c
index 17fe820..e084278 100644
--- a/drivers/usb/gadget/ci13xxx_udc.c
+++ b/drivers/usb/gadget/ci13xxx_udc.c
@@ -2773,6 +2773,7 @@
unsigned long flags;
int i, j;
int retval = -ENOMEM;
+ bool put = false;
trace("%p", driver);
@@ -2877,18 +2878,21 @@
if (udc->udc_driver->flags & CI13XXX_REGS_SHARED)
hw_device_reset(udc);
} else {
+ put = true;
goto done;
}
}
- if (!udc->softconnect)
+ if (!udc->softconnect) {
+ put = true;
goto done;
+ }
retval = hw_device_state(udc->ep0out.qh.dma);
done:
spin_unlock_irqrestore(udc->lock, flags);
- if (retval)
+ if (retval || put)
pm_runtime_put_sync(&udc->gadget.dev);
return retval;
}
diff --git a/drivers/video/msm/hdmi_msm.c b/drivers/video/msm/hdmi_msm.c
index 8a1d56e..1f24cea 100644
--- a/drivers/video/msm/hdmi_msm.c
+++ b/drivers/video/msm/hdmi_msm.c
@@ -2314,6 +2314,12 @@
goto error;
}
+ /*
+ * A small delay is needed here to avoid device crash observed
+ * during reauthentication in MSM8960
+ */
+ msleep(20);
+
/* 0x0168 HDCP_RCVPORT_DATA12
[23:8] BSTATUS
[7:0] BCAPS */
diff --git a/drivers/video/msm/mdp.c b/drivers/video/msm/mdp.c
index 372e617..dd81243 100644
--- a/drivers/video/msm/mdp.c
+++ b/drivers/video/msm/mdp.c
@@ -1340,6 +1340,10 @@
/* link to the latest pdev */
mfd->pdev = msm_fb_dev;
+ mfd->mdp_rev = mdp_rev;
+
+ mfd->ov0_blt_state = 0;
+ mfd->use_ov0_blt = 0 ;
/* add panel data */
if (platform_device_add_data
diff --git a/drivers/video/msm/mdp4_overlay.c b/drivers/video/msm/mdp4_overlay.c
index 383a16d..aae1711 100644
--- a/drivers/video/msm/mdp4_overlay.c
+++ b/drivers/video/msm/mdp4_overlay.c
@@ -1555,8 +1555,8 @@
pr_debug("fillratex100 %lu, mdp_pixels_produced %lu\n",
fillratex100, mdp_pixels_produced);
if (mdp_pixels_produced <= mfd->panel_info.xres) {
- pr_err("%s(): LCDC underflow detected during downscale\n",
- __func__);
+ pr_err("%s():display underflow detected with downscale"
+ " params\n", __func__);
return -ERANGE;
}
@@ -1968,6 +1968,60 @@
}
}
+static void mdp4_overlay_update_blt_mode(struct msm_fb_data_type *mfd)
+{
+ if (mfd->use_ov0_blt) {
+ if (mfd->panel_info.type == LCDC_PANEL)
+ mdp4_lcdc_overlay_blt_start(mfd);
+ else if (mfd->panel_info.type == MIPI_VIDEO_PANEL)
+ mdp4_dsi_video_blt_start(mfd);
+ else if (ctrl->panel_mode & MDP4_PANEL_DSI_CMD)
+ mdp4_dsi_overlay_blt_start(mfd);
+ } else {
+ if (mfd->panel_info.type == LCDC_PANEL)
+ mdp4_lcdc_overlay_blt_stop(mfd);
+ else if (mfd->panel_info.type == MIPI_VIDEO_PANEL)
+ mdp4_dsi_video_blt_stop(mfd);
+ else if (ctrl->panel_mode & MDP4_PANEL_DSI_CMD)
+ mdp4_dsi_overlay_blt_stop(mfd);
+ }
+}
+
+static u32 mdp4_overlay_blt_enable(struct mdp_overlay *req,
+ struct msm_fb_data_type *mfd, uint32 perf_level)
+{
+ u32 clk_rate = mfd->panel_info.clk_rate;
+ u32 pull_mode = 0, use_blt = 0;
+
+ if (mfd->panel_info.type == MIPI_VIDEO_PANEL)
+ clk_rate = (&mfd->panel_info.mipi)->dsi_pclk_rate;
+
+ if ((mfd->panel_info.type == LCDC_PANEL) ||
+ (mfd->panel_info.type == MIPI_VIDEO_PANEL))
+ pull_mode = 1;
+
+ if (pull_mode && (req->src_rect.h > req->dst_rect.h ||
+ req->src_rect.w > req->dst_rect.w)) {
+ if (mdp4_overlay_validate_downscale(req, mfd, perf_level,
+ clk_rate))
+ use_blt = 1;
+ }
+
+ if (mfd->mdp_rev == MDP_REV_41) {
+ /*
+ * writeback (blt) mode to provide work around for
+ * dsi cmd mode interface hardware bug.
+ */
+ if (ctrl->panel_mode & MDP4_PANEL_DSI_CMD) {
+ if (req->dst_rect.x != 0)
+ use_blt = 1;
+ }
+ if (mfd->panel_info.xres > 1280)
+ use_blt = 1;
+ }
+ return use_blt;
+}
+
int mdp4_overlay_set(struct fb_info *info, struct mdp_overlay *req)
{
struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
@@ -1992,22 +2046,6 @@
perf_level = mdp4_overlay_get_perf_level(req);
- if ((mfd->panel_info.type == LCDC_PANEL) &&
- (req->src_rect.h >
- req->dst_rect.h || req->src_rect.w > req->dst_rect.w)) {
- if (mdp4_overlay_validate_downscale(req, mfd,
- perf_level, mfd->panel_info.clk_rate))
- mdp4_lcdc_overlay_blt_start(mfd);
- }
-
- if ((mfd->panel_info.type == MIPI_VIDEO_PANEL) &&
- (req->src_rect.h >
- req->dst_rect.h || req->src_rect.w > req->dst_rect.w)) {
- if (mdp4_overlay_validate_downscale(req, mfd,
- perf_level, (&mfd->panel_info.mipi)->dsi_pclk_rate))
- mdp4_dsi_video_blt_start(mfd);
- }
-
mixer = mfd->panel_info.pdest; /* DISPLAY_1 or DISPLAY_2 */
ret = mdp4_overlay_req2pipe(req, mixer, &pipe, mfd);
@@ -2017,15 +2055,10 @@
return ret;
}
- /*
- * writeback (blt) mode to provide work around for
- * dsi cmd mode interface hardware bug.
- */
- if (ctrl->panel_mode & MDP4_PANEL_DSI_CMD) {
- if (mixer == MDP4_MIXER0 && req->dst_rect.x != 0) {
- mdp4_dsi_blt_dmap_busy_wait(mfd);
- mdp4_dsi_overlay_blt_start(mfd);
- }
+ if (mixer == MDP4_MIXER0) {
+ u32 use_blt = mdp4_overlay_blt_enable(req, mfd, perf_level);
+ mfd->use_ov0_blt &= ~(1 << (pipe->pipe_ndx-1));
+ mfd->use_ov0_blt |= (use_blt << (pipe->pipe_ndx-1));
}
/* return id back to user */
@@ -2136,11 +2169,11 @@
mdp4_mixer_stage_down(pipe);
if (pipe->mixer_num == MDP4_MIXER0) {
+
#ifdef CONFIG_FB_MSM_MIPI_DSI
if (ctrl->panel_mode & MDP4_PANEL_DSI_CMD) {
if (mfd->panel_power_on)
- if (mdp4_dsi_overlay_blt_stop(mfd) == 0)
- mdp4_dsi_cmd_overlay_restore();
+ mdp4_dsi_cmd_overlay_restore();
} else if (ctrl->panel_mode & MDP4_PANEL_DSI_VIDEO) {
mdp4_overlay_reg_flush(pipe, 1);
if (mfd->panel_power_on) {
@@ -2149,7 +2182,6 @@
mdp4_overlay_dsi_video_vsync_push(mfd, pipe);
pipe->flags = flags;
}
- mdp4_dsi_video_blt_stop(mfd);
}
#else
if (ctrl->panel_mode & MDP4_PANEL_MDDI) {
@@ -2168,8 +2200,12 @@
mdp4_overlay_lcdc_vsync_push(mfd, pipe);
pipe->flags = flags;
}
- mdp4_lcdc_overlay_blt_stop(mfd);
}
+
+ mfd->use_ov0_blt &= ~(1 << (pipe->pipe_ndx-1));
+ mdp4_overlay_update_blt_mode(mfd);
+ mfd->ov0_blt_state = mfd->use_ov0_blt;
+
}
#ifdef CONFIG_FB_MSM_DTV
else { /* mixer1, DTV, ATV */
@@ -2402,6 +2438,11 @@
}
}
+ if (mfd->use_ov0_blt != mfd->ov0_blt_state) {
+ mdp4_overlay_update_blt_mode(mfd);
+ mfd->ov0_blt_state = mfd->use_ov0_blt;
+ }
+
if (pipe->pipe_num >= OVERLAY_PIPE_VG1)
mdp4_overlay_vg_setup(pipe); /* video/graphic pipe */
else {
diff --git a/drivers/video/msm/mipi_dsi_host.c b/drivers/video/msm/mipi_dsi_host.c
index e7604de..a177976 100644
--- a/drivers/video/msm/mipi_dsi_host.c
+++ b/drivers/video/msm/mipi_dsi_host.c
@@ -1202,7 +1202,7 @@
if (mfd->panel_info.mipi.no_max_pkt_size) {
/* Only support rlen = 4*n */
rlen += 3;
- rlen &= 0x03;
+ rlen &= ~0x03;
}
len = rlen;
diff --git a/drivers/video/msm/msm_fb.h b/drivers/video/msm/msm_fb.h
index bc25062..de2734d 100644
--- a/drivers/video/msm/msm_fb.h
+++ b/drivers/video/msm/msm_fb.h
@@ -166,7 +166,6 @@
struct completion msmfb_update_notify;
struct completion msmfb_no_update_notify;
u32 ov_start, ov_end;
-
struct mutex writeback_mutex;
struct mutex unregister_mutex;
struct list_head writeback_busy_queue;
@@ -174,6 +173,8 @@
struct list_head writeback_register_queue;
wait_queue_head_t wait_q;
struct ion_client *client;
+ u32 mdp_rev;
+ u32 use_ov0_blt, ov0_blt_state;
};
struct dentry *msm_fb_get_debugfs_root(void);
diff --git a/include/linux/cpu_pm.h b/include/linux/cpu_pm.h
index a165fd7..d399bf8 100644
--- a/include/linux/cpu_pm.h
+++ b/include/linux/cpu_pm.h
@@ -67,7 +67,13 @@
CPU_CLUSTER_PM_EXIT,
};
+#ifdef CONFIG_CPU_PM
int cpu_pm_register_notifier(struct notifier_block *nb);
+#else
+static inline int cpu_pm_register_notifier(struct notifier_block *nb)
+{ return 0; }
+#endif
+
int cpu_pm_unregister_notifier(struct notifier_block *nb);
/*
diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h
index bf56b6f..1c4085e 100644
--- a/include/linux/ieee80211.h
+++ b/include/linux/ieee80211.h
@@ -1305,6 +1305,7 @@
WLAN_KEY_LEN_CCMP = 16,
WLAN_KEY_LEN_TKIP = 32,
WLAN_KEY_LEN_AES_CMAC = 16,
+ WLAN_KEY_LEN_WAPI_SMS4 = 32,
};
/**
@@ -1434,6 +1435,7 @@
#define WLAN_CIPHER_SUITE_CCMP 0x000FAC04
#define WLAN_CIPHER_SUITE_WEP104 0x000FAC05
#define WLAN_CIPHER_SUITE_AES_CMAC 0x000FAC06
+#define WLAN_CIPHER_SUITE_SMS4 0x00147201
/* AKM suite selectors */
#define WLAN_AKM_SUITE_8021X 0x000FAC01
diff --git a/include/linux/input/pmic8xxx-pwrkey.h b/include/linux/input/pmic8xxx-pwrkey.h
index 6d2974e..a32eafd 100644
--- a/include/linux/input/pmic8xxx-pwrkey.h
+++ b/include/linux/input/pmic8xxx-pwrkey.h
@@ -24,6 +24,13 @@
*/
struct pm8xxx_pwrkey_platform_data {
bool pull_up;
+ /* Time delay for pwr-key state change interrupt triggering in micro-
+ * second. The actual delay can only be one of these eight levels:
+ * 2 sec, 1 sec, 1/2 sec, 1/4 sec, 1/8 sec, 1/16 sec, 1/32 sec, and
+ * 1/64 sec. The valid range of kpd_trigger_delay_us is 1/64 second to
+ * 2 seconds. A value within the valid range will be rounded down to the
+ * closest level. Any value outside the valid range will be rejected.
+ */
u32 kpd_trigger_delay_us;
u32 wakeup;
};
diff --git a/include/linux/ion.h b/include/linux/ion.h
index a7f3de5..b396369 100644
--- a/include/linux/ion.h
+++ b/include/linux/ion.h
@@ -35,6 +35,7 @@
ION_HEAP_TYPE_SYSTEM,
ION_HEAP_TYPE_SYSTEM_CONTIG,
ION_HEAP_TYPE_CARVEOUT,
+ ION_HEAP_TYPE_IOMMU,
ION_HEAP_TYPE_CUSTOM, /* must be last so device specific heaps always
are at the end of this enum */
ION_NUM_HEAPS,
@@ -59,6 +60,7 @@
ION_HEAP_SMI_ID,
ION_HEAP_ADSP_ID,
ION_HEAP_AUDIO_ID,
+ ION_HEAP_IOMMU_ID,
};
#define ION_KMALLOC_HEAP_NAME "kmalloc"
@@ -66,6 +68,7 @@
#define ION_EBI1_HEAP_NAME "EBI1"
#define ION_ADSP_HEAP_NAME "adsp"
#define ION_SMI_HEAP_NAME "smi"
+#define ION_IOMMU_HEAP_NAME "iommu"
#define CACHED 1
#define UNCACHED 0
@@ -306,6 +309,65 @@
int ion_handle_get_flags(struct ion_client *client, struct ion_handle *handle,
unsigned long *flags);
+
+/**
+ * ion_map_iommu - map the given handle into an iommu
+ *
+ * @client - client who allocated the handle
+ * @handle - handle to map
+ * @domain_num - domain number to map to
+ * @partition_num - partition number to allocate iova from
+ * @align - alignment for the iova
+ * @iova_length - length of iova to map. If the iova length is
+ * greater than the handle length, the remaining
+ * address space will be mapped to a dummy buffer.
+ * @iova - pointer to store the iova address
+ * @buffer_size - pointer to store the size of the buffer
+ * @flags - flags for options to map
+ *
+ * Maps the handle into the iova space specified via domain number. Iova
+ * will be allocated from the partition specified via partition_num.
+ * Returns 0 on success, negative value on error.
+ */
+int ion_map_iommu(struct ion_client *client, struct ion_handle *handle,
+ int domain_num, int partition_num, unsigned long align,
+ unsigned long iova_length, unsigned long *iova,
+ unsigned long *buffer_size,
+ unsigned long flags);
+
+
+/**
+ * ion_handle_get_size - get the allocated size of a given handle
+ *
+ * @client - client who allocated the handle
+ * @handle - handle to get the size
+ * @size - pointer to store the size
+ *
+ * gives the allocated size of a handle. returns 0 on success, negative
+ * value on error
+ *
+ * NOTE: This is intended to be used only to get a size to pass to map_iommu.
+ * You should *NOT* rely on this for any other usage.
+ */
+
+int ion_handle_get_size(struct ion_client *client, struct ion_handle *handle,
+ unsigned long *size);
+
+/**
+ * ion_unmap_iommu - unmap the handle from an iommu
+ *
+ * @client - client who allocated the handle
+ * @handle - handle to unmap
+ * @domain_num - domain to unmap from
+ * @partition_num - partition to unmap from
+ *
+ * Decrement the reference count on the iommu mapping. If the count is
+ * 0, the mapping will be removed from the iommu.
+ */
+void ion_unmap_iommu(struct ion_client *client, struct ion_handle *handle,
+ int domain_num, int partition_num);
+
+
#else
static inline struct ion_client *ion_client_create(struct ion_device *dev,
unsigned int heap_mask, const char *name)
@@ -378,6 +440,24 @@
{
return -ENODEV;
}
+
+static inline int ion_map_iommu(struct ion_client *client,
+ struct ion_handle *handle, int domain_num,
+ int partition_num, unsigned long align,
+ unsigned long iova_length, unsigned long *iova,
+ unsigned long flags)
+{
+ return -ENODEV;
+}
+
+static inline void ion_unmap_iommu(struct ion_client *client,
+ struct ion_handle *handle, int domain_num,
+ int partition_num)
+{
+ return;
+}
+
+
#endif /* CONFIG_ION */
#endif /* __KERNEL__ */
diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
index 7faf7fe..5a15cec 100644
--- a/include/net/bluetooth/hci_core.h
+++ b/include/net/bluetooth/hci_core.h
@@ -240,6 +240,9 @@
rwlock_t adv_entries_lock;
struct timer_list adv_timer;
+ struct timer_list disc_timer;
+ struct timer_list disc_le_timer;
+
struct hci_dev_stats stat;
struct sk_buff_head driver_init;
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index e6b53ea..5489c43 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1485,8 +1485,9 @@
static bool __zone_watermark_ok(struct zone *z, int order, unsigned long mark,
int classzone_idx, int alloc_flags, long free_pages)
{
- /* free_pages my go negative - that's OK */
+ /* free_pages may go negative - that's OK */
long min = mark;
+ long lowmem_reserve = z->lowmem_reserve[classzone_idx];
int o;
free_pages -= (1 << order) + 1;
@@ -1495,7 +1496,7 @@
if (alloc_flags & ALLOC_HARDER)
min -= min / 4;
- if (free_pages <= min + z->lowmem_reserve[classzone_idx])
+ if (free_pages <= min + lowmem_reserve)
return false;
for (o = 0; o < order; o++) {
/* At the next order, this order's pages become unavailable */
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index d63dd1a..9ab0199 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -5494,6 +5494,7 @@
/* TODO MM/PK - What to do if connection is LOCAL_BUSY? */
if (l2cap_pi(sk)->ampchan == chan) {
l2cap_pi(sk)->ampchan = NULL;
+ l2cap_pi(sk)->ampcon = NULL;
l2cap_amp_move_init(sk);
}
bh_unlock_sock(sk);
diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
index 4a976a9..b413a9c 100644
--- a/net/bluetooth/mgmt.c
+++ b/net/bluetooth/mgmt.c
@@ -41,8 +41,6 @@
};
struct disco_interleave {
- struct timer_list timer;
- struct timer_list le_timer;
u16 index;
enum scan_mode mode;
int int_phase;
@@ -1565,12 +1563,19 @@
static void discovery_terminated(struct pending_cmd *cmd, void *data)
{
+ struct hci_dev *hdev;
struct mgmt_mode ev = {0};
- struct disco_interleave *ilp = cmd->param;
BT_DBG("");
- del_timer_sync(&ilp->le_timer);
- del_timer_sync(&ilp->timer);
+ hdev = hci_dev_get(cmd->index);
+ if (!hdev)
+ goto not_found;
+
+ del_timer_sync(&hdev->disc_le_timer);
+ del_timer_sync(&hdev->disc_timer);
+ hci_dev_put(hdev);
+
+not_found:
mgmt_event(MGMT_EV_DISCOVERING, cmd->index, &ev, sizeof(ev), NULL);
list_del(&cmd->list);
@@ -1851,10 +1856,12 @@
cmd_complete(cmd->sk, cmd->index, MGMT_OP_STOP_DISCOVERY,
NULL, 0);
if (cmd->opcode == MGMT_OP_STOP_DISCOVERY) {
- struct disco_interleave *ilp = cmd->param;
-
- del_timer_sync(&ilp->le_timer);
- del_timer_sync(&ilp->timer);
+ struct hci_dev *hdev = hci_dev_get(cmd->index);
+ if (hdev) {
+ del_timer_sync(&hdev->disc_le_timer);
+ del_timer_sync(&hdev->disc_timer);
+ hci_dev_put(hdev);
+ }
}
}
@@ -1903,8 +1910,8 @@
err = hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE,
sizeof(le_cp), &le_cp);
- if (err >= 0) {
- mod_timer(&ilp->le_timer, jiffies +
+ if (err >= 0 && hdev) {
+ mod_timer(&hdev->disc_le_timer, jiffies +
msecs_to_jiffies(ilp->int_phase * 1000));
ilp->mode = SCAN_LE;
} else
@@ -1927,11 +1934,11 @@
BT_DBG("hci%d", ilp->index);
- del_timer_sync(&ilp->le_timer);
hdev = hci_dev_get(ilp->index);
if (hdev) {
hci_dev_lock_bh(hdev);
+ del_timer_sync(&hdev->disc_le_timer);
cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, ilp->index);
@@ -2056,10 +2063,11 @@
cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, index);
if (cmd) {
ilp = cmd->param;
- setup_timer(&ilp->le_timer, disco_le_to,
+ setup_timer(&hdev->disc_le_timer, disco_le_to,
(unsigned long) ilp);
- setup_timer(&ilp->timer, disco_to, (unsigned long) ilp);
- mod_timer(&ilp->timer,
+ setup_timer(&hdev->disc_timer, disco_to,
+ (unsigned long) ilp);
+ mod_timer(&hdev->disc_timer,
jiffies + msecs_to_jiffies(20000));
}
}
@@ -2110,8 +2118,8 @@
if (ilp) {
ilp->mode = SCAN_IDLE;
- del_timer_sync(&ilp->le_timer);
- del_timer_sync(&ilp->timer);
+ del_timer_sync(&hdev->disc_le_timer);
+ del_timer_sync(&hdev->disc_timer);
}
if (err < 0 && cmd)
@@ -2859,7 +2867,7 @@
hci_send_cmd(hdev, HCI_OP_INQUIRY,
sizeof(cp), &cp);
ilp->mode = SCAN_BR;
- del_timer_sync(&ilp->le_timer);
+ del_timer_sync(&hdev->disc_le_timer);
}
}
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 1ac9443..584a7cd 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -92,7 +92,7 @@
[NL80211_ATTR_KEY_IDX] = { .type = NLA_U8 },
[NL80211_ATTR_KEY_CIPHER] = { .type = NLA_U32 },
[NL80211_ATTR_KEY_DEFAULT] = { .type = NLA_FLAG },
- [NL80211_ATTR_KEY_SEQ] = { .type = NLA_BINARY, .len = 8 },
+ [NL80211_ATTR_KEY_SEQ] = { .type = NLA_BINARY, .len = 16 },
[NL80211_ATTR_KEY_TYPE] = { .type = NLA_U32 },
[NL80211_ATTR_BEACON_INTERVAL] = { .type = NLA_U32 },
@@ -183,7 +183,7 @@
[NL80211_KEY_DATA] = { .type = NLA_BINARY, .len = WLAN_MAX_KEY_LEN },
[NL80211_KEY_IDX] = { .type = NLA_U8 },
[NL80211_KEY_CIPHER] = { .type = NLA_U32 },
- [NL80211_KEY_SEQ] = { .type = NLA_BINARY, .len = 8 },
+ [NL80211_KEY_SEQ] = { .type = NLA_BINARY, .len = 16 },
[NL80211_KEY_DEFAULT] = { .type = NLA_FLAG },
[NL80211_KEY_DEFAULT_MGMT] = { .type = NLA_FLAG },
[NL80211_KEY_TYPE] = { .type = NLA_U32 },
@@ -3878,7 +3878,8 @@
cipher == WLAN_CIPHER_SUITE_WEP104 ||
cipher == WLAN_CIPHER_SUITE_TKIP ||
cipher == WLAN_CIPHER_SUITE_CCMP ||
- cipher == WLAN_CIPHER_SUITE_AES_CMAC;
+ cipher == WLAN_CIPHER_SUITE_AES_CMAC ||
+ cipher == WLAN_CIPHER_SUITE_SMS4;
}
diff --git a/net/wireless/util.c b/net/wireless/util.c
index 4d7b83f..c00a511 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -199,6 +199,10 @@
if (params->key_len != WLAN_KEY_LEN_AES_CMAC)
return -EINVAL;
break;
+ case WLAN_CIPHER_SUITE_SMS4:
+ if (params->key_len != WLAN_KEY_LEN_WAPI_SMS4)
+ return -EINVAL;
+ break;
default:
/*
* We don't know anything about this algorithm,
diff --git a/sound/soc/msm/Kconfig b/sound/soc/msm/Kconfig
index ced46de..9e0549b 100644
--- a/sound/soc/msm/Kconfig
+++ b/sound/soc/msm/Kconfig
@@ -113,8 +113,8 @@
To add support for MSM QDSP6 Soc Audio.
config SND_SOC_MSM8960
- tristate "SoC Machine driver for MSM8960 boards"
- depends on ARCH_MSM8960
+ tristate "SoC Machine driver for MSM8960 and APQ8064 boards"
+ depends on ARCH_MSM8960 || ARCH_APQ8064
select SND_SOC_VOICE
select SND_SOC_QDSP6
select SND_SOC_MSM_STUB
@@ -122,7 +122,7 @@
select SND_SOC_MSM_HOSTLESS_PCM
default n
help
- To add support for SoC audio on MSM8960 boards
+ To add support for SoC audio on MSM8960 and APQ8064 boards
config SND_SOC_MSM8660_APQ
tristate "Soc Machine driver for APQ8060 WM8903 codec"
diff --git a/sound/soc/soc-dsp.c b/sound/soc/soc-dsp.c
index 6b0c215..706954b 100644
--- a/sound/soc/soc-dsp.c
+++ b/sound/soc/soc-dsp.c
@@ -627,6 +627,10 @@
struct snd_soc_dsp_params *dsp_params;
int ret = 0;
+ if ((cmd == SNDRV_PCM_TRIGGER_PAUSE_RELEASE) ||
+ (cmd == SNDRV_PCM_TRIGGER_PAUSE_PUSH))
+ return ret;
+
list_for_each_entry(dsp_params, &fe->dsp[stream].be_clients, list_be) {
struct snd_pcm_substream *be_substream =