Merge "ARM: gic: Add support for logging interrupts in RTB"
diff --git a/arch/arm/configs/msm8974-perf_defconfig b/arch/arm/configs/msm8974-perf_defconfig
index 478e766..b9f0f8b 100755
--- a/arch/arm/configs/msm8974-perf_defconfig
+++ b/arch/arm/configs/msm8974-perf_defconfig
@@ -503,6 +503,7 @@
CONFIG_CRYPTO_DEV_QCRYPTO=m
CONFIG_CRYPTO_DEV_QCE=y
CONFIG_CRYPTO_DEV_QCEDEV=y
+CONFIG_PFT=y
CONFIG_SND_SOC_MSM_HDMI_CODEC_RX=y
CONFIG_BUILD_ARM_APPENDED_DTB_IMAGE=y
CONFIG_MSM_RDBG=m
diff --git a/arch/arm/configs/msm8974_defconfig b/arch/arm/configs/msm8974_defconfig
index c5c16c2..e843739 100755
--- a/arch/arm/configs/msm8974_defconfig
+++ b/arch/arm/configs/msm8974_defconfig
@@ -561,6 +561,7 @@
CONFIG_CRYPTO_DEV_QCRYPTO=y
CONFIG_CRYPTO_DEV_QCE=y
CONFIG_CRYPTO_DEV_QCEDEV=y
+CONFIG_PFT=y
CONFIG_SND_SOC_MSM_HDMI_CODEC_RX=y
CONFIG_BUILD_ARM_APPENDED_DTB_IMAGE=y
CONFIG_MSM_RDBG=m
diff --git a/block/blk-merge.c b/block/blk-merge.c
index 5d74cc3..9fa7765 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -6,6 +6,7 @@
#include <linux/bio.h>
#include <linux/blkdev.h>
#include <linux/scatterlist.h>
+#include <linux/security.h>
#include "blk.h"
@@ -509,6 +510,10 @@
if (bio_integrity(bio) != blk_integrity_rq(rq))
return false;
+ /* Don't merge bios of files with different encryption */
+ if (!security_allow_merge_bio(rq->bio, bio))
+ return false;
+
return true;
}
diff --git a/drivers/gpu/msm/adreno_dispatch.c b/drivers/gpu/msm/adreno_dispatch.c
index 588c243..5f80fcf 100644
--- a/drivers/gpu/msm/adreno_dispatch.c
+++ b/drivers/gpu/msm/adreno_dispatch.c
@@ -155,6 +155,7 @@
struct adreno_context *drawctxt)
{
struct kgsl_cmdbatch *cmdbatch = NULL;
+ int pending;
mutex_lock(&drawctxt->mutex);
if (drawctxt->cmdqueue_head != drawctxt->cmdqueue_tail) {
@@ -164,7 +165,32 @@
* Don't dequeue a cmdbatch that is still waiting for other
* events
*/
- if (kgsl_cmdbatch_sync_pending(cmdbatch)) {
+
+ spin_lock(&cmdbatch->lock);
+ pending = list_empty(&cmdbatch->synclist) ? 0 : 1;
+
+ /*
+ * If changes are pending and the canary timer hasn't been
+ * started yet, start it
+ */
+ if (pending) {
+ /*
+ * If syncpoints are pending start the canary timer if
+ * it hasn't already been started
+ */
+ if (!timer_pending(&cmdbatch->timer))
+ mod_timer(&cmdbatch->timer, jiffies + (5 * HZ));
+ spin_unlock(&cmdbatch->lock);
+ } else {
+ /*
+ * Otherwise, delete the timer to make sure it is good
+ * and dead before queuing the buffer
+ */
+ spin_unlock(&cmdbatch->lock);
+ del_timer_sync(&cmdbatch->timer);
+ }
+
+ if (pending) {
cmdbatch = ERR_PTR(-EAGAIN);
goto done;
}
diff --git a/drivers/gpu/msm/kgsl.c b/drivers/gpu/msm/kgsl.c
index 5d78879..462e7a5 100644
--- a/drivers/gpu/msm/kgsl.c
+++ b/drivers/gpu/msm/kgsl.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2008-2013, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2008-2014, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -105,52 +105,108 @@
}
EXPORT_SYMBOL(kgsl_trace_regwrite);
-int kgsl_memfree_hist_init(void)
-{
- void *base;
+/*
+ * The memfree list contains the last N blocks of memory that have been freed.
+ * On a GPU fault we walk the list to see if the faulting address had been
+ * recently freed and print out a message to that effect
+ */
- base = kzalloc(KGSL_MEMFREE_HIST_SIZE, GFP_KERNEL);
- kgsl_driver.memfree_hist.base_hist_rb = base;
- if (base == NULL)
- return -ENOMEM;
- kgsl_driver.memfree_hist.size = KGSL_MEMFREE_HIST_SIZE;
- kgsl_driver.memfree_hist.wptr = base;
+#define MEMFREE_ENTRIES 512
+
+static DEFINE_SPINLOCK(memfree_lock);
+
+struct memfree_entry {
+ unsigned long gpuaddr;
+ unsigned long size;
+ pid_t pid;
+ unsigned int flags;
+};
+
+static struct {
+ struct memfree_entry *list;
+ int head;
+ int tail;
+} memfree;
+
+static int kgsl_memfree_init(void)
+{
+ memfree.list = kzalloc(MEMFREE_ENTRIES * sizeof(struct memfree_entry),
+ GFP_KERNEL);
+
+ return (memfree.list) ? 0 : -ENOMEM;
+}
+
+static void kgsl_memfree_exit(void)
+{
+ kfree(memfree.list);
+ memset(&memfree, 0, sizeof(memfree));
+}
+
+int kgsl_memfree_find_entry(pid_t pid, unsigned long *gpuaddr,
+ unsigned long *size, unsigned int *flags)
+{
+ int ptr;
+
+ if (memfree.list == NULL)
+ return 0;
+
+ spin_lock(&memfree_lock);
+
+ ptr = memfree.head - 1;
+ if (ptr < 0)
+ ptr = MEMFREE_ENTRIES - 1;
+
+ /* Walk backwards through the list looking for the last match */
+ while (ptr != memfree.tail) {
+ struct memfree_entry *entry = &memfree.list[ptr];
+
+ if ((entry->pid == pid) &&
+ (*gpuaddr >= entry->gpuaddr &&
+ *gpuaddr < (entry->gpuaddr + entry->size))) {
+ *gpuaddr = entry->gpuaddr;
+ *flags = entry->flags;
+ *size = entry->size;
+
+ spin_unlock(&memfree_lock);
+ return 1;
+ }
+
+ ptr = ptr - 1;
+
+ if (ptr < 0)
+ ptr = MEMFREE_ENTRIES - 1;
+ }
+
+ spin_unlock(&memfree_lock);
return 0;
}
-void kgsl_memfree_hist_exit(void)
+static void kgsl_memfree_add(pid_t pid, unsigned int gpuaddr,
+ unsigned int size, int flags)
+
{
- kfree(kgsl_driver.memfree_hist.base_hist_rb);
- kgsl_driver.memfree_hist.base_hist_rb = NULL;
-}
+ struct memfree_entry *entry;
-void kgsl_memfree_hist_set_event(unsigned int pid, unsigned int gpuaddr,
- unsigned int size, int flags)
-{
- struct kgsl_memfree_hist_elem *p;
-
- void *base = kgsl_driver.memfree_hist.base_hist_rb;
- int rbsize = kgsl_driver.memfree_hist.size;
-
- if (base == NULL)
+ if (memfree.list == NULL)
return;
- mutex_lock(&kgsl_driver.memfree_hist_mutex);
- p = kgsl_driver.memfree_hist.wptr;
- p->pid = pid;
- p->gpuaddr = gpuaddr;
- p->size = size;
- p->flags = flags;
+ spin_lock(&memfree_lock);
- kgsl_driver.memfree_hist.wptr++;
- if ((void *)kgsl_driver.memfree_hist.wptr >= base+rbsize) {
- kgsl_driver.memfree_hist.wptr =
- (struct kgsl_memfree_hist_elem *)base;
- }
- mutex_unlock(&kgsl_driver.memfree_hist_mutex);
+ entry = &memfree.list[memfree.head];
+
+ entry->pid = pid;
+ entry->gpuaddr = gpuaddr;
+ entry->size = size;
+ entry->flags = flags;
+
+ memfree.head = (memfree.head + 1) % MEMFREE_ENTRIES;
+
+ if (memfree.head == memfree.tail)
+ memfree.tail = (memfree.tail + 1) % MEMFREE_ENTRIES;
+
+ spin_unlock(&memfree_lock);
}
-
/* kgsl_get_mem_entry - get the mem_entry structure for the specified object
* @device - Pointer to the device structure
* @ptbase - the pagetable base of the object
@@ -557,6 +613,13 @@
write_lock(&device->context_lock);
if (context->id != KGSL_CONTEXT_INVALID) {
+
+ /* Clear the timestamps in the memstore during destroy */
+ kgsl_sharedmem_writel(device, &device->memstore,
+ KGSL_MEMSTORE_OFFSET(context->id, soptimestamp), 0);
+ kgsl_sharedmem_writel(device, &device->memstore,
+ KGSL_MEMSTORE_OFFSET(context->id, eoptimestamp), 0);
+
idr_remove(&device->context_idr, context->id);
context->id = KGSL_CONTEXT_INVALID;
}
@@ -1484,6 +1547,49 @@
struct kref refcount;
};
+static void _kgsl_cmdbatch_timer(unsigned long data)
+{
+ struct kgsl_cmdbatch *cmdbatch = (struct kgsl_cmdbatch *) data;
+ struct kgsl_cmdbatch_sync_event *event;
+
+ if (cmdbatch == NULL || cmdbatch->context == NULL)
+ return;
+
+ spin_lock(&cmdbatch->lock);
+ if (list_empty(&cmdbatch->synclist))
+ goto done;
+
+ pr_err("kgsl: possible gpu syncpoint deadlock for context %d timestamp %d\n",
+ cmdbatch->context->id, cmdbatch->timestamp);
+ pr_err(" Active sync points:\n");
+
+ /* Print all the pending sync objects */
+ list_for_each_entry(event, &cmdbatch->synclist, node) {
+
+ switch (event->type) {
+ case KGSL_CMD_SYNCPOINT_TYPE_TIMESTAMP: {
+ unsigned int retired;
+
+ retired = kgsl_readtimestamp(event->device,
+ event->context, KGSL_TIMESTAMP_RETIRED);
+
+ pr_err(" [timestamp] context %d timestamp %d (retired %d)\n",
+ event->context->id, event->timestamp,
+ retired);
+ break;
+ }
+ case KGSL_CMD_SYNCPOINT_TYPE_FENCE:
+ pr_err(" fence: [%p] %s\n", event->handle,
+ (event->handle && event->handle->fence)
+ ? event->handle->fence->name : "NULL");
+ break;
+ }
+ }
+
+done:
+ spin_unlock(&cmdbatch->lock);
+}
+
/**
* kgsl_cmdbatch_sync_event_destroy() - Destroy a sync event object
* @kref: Pointer to the kref structure for this object
@@ -1558,6 +1664,10 @@
sched = list_empty(&event->cmdbatch->synclist) ? 1 : 0;
spin_unlock(&event->cmdbatch->lock);
+ /* If the list is empty delete the canary timer */
+ if (sched)
+ del_timer_sync(&event->cmdbatch->timer);
+
/*
* if this is the last event in the list then tell
* the GPU device that the cmdbatch can be submitted
@@ -1600,10 +1710,12 @@
struct kgsl_cmdbatch_sync_event *event, *tmp;
LIST_HEAD(cancel_synclist);
- /*
- * Empty the synclist before canceling events
- */
+ /* Zap the canary timer */
+ del_timer_sync(&cmdbatch->timer);
+
spin_lock(&cmdbatch->lock);
+
+ /* Empty the synclist before canceling events */
list_splice_init(&cmdbatch->synclist, &cancel_synclist);
spin_unlock(&cmdbatch->lock);
@@ -1782,6 +1894,7 @@
event->cmdbatch = cmdbatch;
event->context = context;
event->timestamp = sync->timestamp;
+ event->device = device;
/*
* Two krefs are required to support events. The first kref is for
@@ -1917,6 +2030,10 @@
cmdbatch->context = context;
cmdbatch->flags = flags & ~KGSL_CONTEXT_SUBMIT_IB_LIST;
+ /* Add a timer to help debug sync deadlocks */
+ setup_timer(&cmdbatch->timer, _kgsl_cmdbatch_timer,
+ (unsigned long) cmdbatch);
+
return cmdbatch;
}
@@ -2306,7 +2423,30 @@
return result;
}
-static long kgsl_ioctl_sharedmem_free(struct kgsl_device_private *dev_priv,
+static long _sharedmem_free_entry(struct kgsl_mem_entry *entry)
+{
+ if (!kgsl_mem_entry_set_pend(entry)) {
+ kgsl_mem_entry_put(entry);
+ return -EBUSY;
+ }
+
+ trace_kgsl_mem_free(entry);
+
+ kgsl_memfree_add(entry->priv->pid, entry->memdesc.gpuaddr,
+ entry->memdesc.size, entry->memdesc.flags);
+
+ /*
+ * First kgsl_mem_entry_put is for the reference that we took in
+ * this function when calling kgsl_sharedmem_find, second one is
+ * to free the memory since this is a free ioctl
+ */
+ kgsl_mem_entry_put(entry);
+ kgsl_mem_entry_put(entry);
+
+ return 0;
+}
+
+long kgsl_ioctl_sharedmem_free(struct kgsl_device_private *dev_priv,
unsigned int cmd, void *data)
{
struct kgsl_sharedmem_free *param = data;
@@ -2319,29 +2459,11 @@
param->gpuaddr);
return -EINVAL;
}
- if (!kgsl_mem_entry_set_pend(entry)) {
- kgsl_mem_entry_put(entry);
- return -EBUSY;
- }
- trace_kgsl_mem_free(entry);
-
- kgsl_memfree_hist_set_event(entry->priv->pid,
- entry->memdesc.gpuaddr,
- entry->memdesc.size,
- entry->memdesc.flags);
-
- /*
- * First kgsl_mem_entry_put is for the reference that we took in
- * this function when calling kgsl_sharedmem_find, second one is
- * to free the memory since this is a free ioctl
- */
- kgsl_mem_entry_put(entry);
- kgsl_mem_entry_put(entry);
- return 0;
+ return _sharedmem_free_entry(entry);
}
-static long kgsl_ioctl_gpumem_free_id(struct kgsl_device_private *dev_priv,
+long kgsl_ioctl_gpumem_free_id(struct kgsl_device_private *dev_priv,
unsigned int cmd, void *data)
{
struct kgsl_gpumem_free_id *param = data;
@@ -2355,26 +2477,7 @@
return -EINVAL;
}
- if (!kgsl_mem_entry_set_pend(entry)) {
- kgsl_mem_entry_put(entry);
- return -EBUSY;
- }
-
- trace_kgsl_mem_free(entry);
-
- kgsl_memfree_hist_set_event(entry->priv->pid,
- entry->memdesc.gpuaddr,
- entry->memdesc.size,
- entry->memdesc.flags);
-
- /*
- * First kgsl_mem_entry_put is for the reference that we took in
- * this function when calling kgsl_sharedmem_find_id, second one is
- * to free the memory since this is a free ioctl
- */
- kgsl_mem_entry_put(entry);
- kgsl_mem_entry_put(entry);
- return 0;
+ return _sharedmem_free_entry(entry);
}
static struct vm_area_struct *kgsl_get_vma_from_start_addr(unsigned int addr)
@@ -3902,8 +4005,6 @@
.process_mutex = __MUTEX_INITIALIZER(kgsl_driver.process_mutex),
.ptlock = __SPIN_LOCK_UNLOCKED(kgsl_driver.ptlock),
.devlock = __MUTEX_INITIALIZER(kgsl_driver.devlock),
- .memfree_hist_mutex =
- __MUTEX_INITIALIZER(kgsl_driver.memfree_hist_mutex),
/*
* Full cache flushes are faster than line by line on at least
* 8064 and 8974 once the region to be flushed is > 16mb.
@@ -4247,7 +4348,7 @@
kgsl_driver.class = NULL;
}
- kgsl_memfree_hist_exit();
+ kgsl_memfree_exit();
unregister_chrdev_region(kgsl_driver.major, KGSL_DEVICE_MAX);
}
@@ -4319,8 +4420,7 @@
goto err;
}
- if (kgsl_memfree_hist_init())
- KGSL_CORE_ERR("failed to init memfree_hist");
+ kgsl_memfree_init();
return 0;
diff --git a/drivers/gpu/msm/kgsl.h b/drivers/gpu/msm/kgsl.h
index 6da4a86..0bd71cb 100644
--- a/drivers/gpu/msm/kgsl.h
+++ b/drivers/gpu/msm/kgsl.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2008-2013, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2008-2014, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -75,25 +75,8 @@
#define KGSL_STATS_ADD(_size, _stat, _max) \
do { _stat += (_size); if (_stat > _max) _max = _stat; } while (0)
-
-#define KGSL_MEMFREE_HIST_SIZE ((int)(PAGE_SIZE * 2))
-
#define KGSL_MAX_NUMIBS 100000
-struct kgsl_memfree_hist_elem {
- unsigned int pid;
- unsigned int gpuaddr;
- unsigned int size;
- unsigned int flags;
-};
-
-struct kgsl_memfree_hist {
- void *base_hist_rb;
- unsigned int size;
- struct kgsl_memfree_hist_elem *wptr;
-};
-
-
struct kgsl_device;
struct kgsl_context;
@@ -122,9 +105,6 @@
void *ptpool;
- struct mutex memfree_hist_mutex;
- struct kgsl_memfree_hist memfree_hist;
-
struct {
unsigned int vmalloc;
unsigned int vmalloc_max;
diff --git a/drivers/gpu/msm/kgsl_debugfs.c b/drivers/gpu/msm/kgsl_debugfs.c
index 5645628..ccb2312 100644
--- a/drivers/gpu/msm/kgsl_debugfs.c
+++ b/drivers/gpu/msm/kgsl_debugfs.c
@@ -123,52 +123,6 @@
KGSL_DEBUGFS_LOG(mem_log);
KGSL_DEBUGFS_LOG(pwr_log);
-static int memfree_hist_print(struct seq_file *s, void *unused)
-{
- void *base = kgsl_driver.memfree_hist.base_hist_rb;
-
- struct kgsl_memfree_hist_elem *wptr = kgsl_driver.memfree_hist.wptr;
- struct kgsl_memfree_hist_elem *p;
- char str[16];
-
- seq_printf(s, "%8s %8s %8s %11s\n",
- "pid", "gpuaddr", "size", "flags");
-
- mutex_lock(&kgsl_driver.memfree_hist_mutex);
- p = wptr;
- for (;;) {
- kgsl_get_memory_usage(str, sizeof(str), p->flags);
- /*
- * if the ring buffer is not filled up yet
- * all its empty elems have size==0
- * just skip them ...
- */
- if (p->size)
- seq_printf(s, "%8d %08x %8d %11s\n",
- p->pid, p->gpuaddr, p->size, str);
- p++;
- if ((void *)p >= base + kgsl_driver.memfree_hist.size)
- p = (struct kgsl_memfree_hist_elem *) base;
-
- if (p == kgsl_driver.memfree_hist.wptr)
- break;
- }
- mutex_unlock(&kgsl_driver.memfree_hist_mutex);
- return 0;
-}
-
-static int memfree_hist_open(struct inode *inode, struct file *file)
-{
- return single_open(file, memfree_hist_print, inode->i_private);
-}
-
-static const struct file_operations memfree_hist_fops = {
- .open = memfree_hist_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
void kgsl_device_debugfs_init(struct kgsl_device *device)
{
if (kgsl_debugfs_dir && !IS_ERR(kgsl_debugfs_dir))
@@ -188,8 +142,6 @@
&mem_log_fops);
debugfs_create_file("log_level_pwr", 0644, device->d_debugfs, device,
&pwr_log_fops);
- debugfs_create_file("memfree_history", 0444, device->d_debugfs, device,
- &memfree_hist_fops);
/* Create postmortem dump control files */
diff --git a/drivers/gpu/msm/kgsl_device.h b/drivers/gpu/msm/kgsl_device.h
index 1e6fbc9..6f17d56c 100644
--- a/drivers/gpu/msm/kgsl_device.h
+++ b/drivers/gpu/msm/kgsl_device.h
@@ -173,10 +173,9 @@
* @ibcount: Number of IBs in the command list
* @ibdesc: Pointer to the list of IBs
* @expires: Point in time when the cmdbatch is considered to be hung
- * @invalid: non-zero if the dispatcher determines the command and the owning
- * context should be invalidated
* @refcount: kref structure to maintain the reference count
* @synclist: List of context/timestamp tuples to wait for before issuing
+ * @timer: a timer used to track possible sync timeouts for this cmdbatch
*
* This struture defines an atomic batch of command buffers issued from
* userspace.
@@ -193,9 +192,9 @@
uint32_t ibcount;
struct kgsl_ibdesc *ibdesc;
unsigned long expires;
- int invalid;
struct kref refcount;
struct list_head synclist;
+ struct timer_list timer;
};
/**
@@ -548,6 +547,9 @@
*context);
int kgsl_context_detach(struct kgsl_context *context);
+int kgsl_memfree_find_entry(pid_t pid, unsigned long *gpuaddr,
+ unsigned long *size, unsigned int *flags);
+
/**
* kgsl_context_put() - Release context reference count
* @context: Pointer to the KGSL context to be released
@@ -728,27 +730,6 @@
}
/**
- * kgsl_cmdbatch_sync_pending() - return true if the cmdbatch is waiting
- * @cmdbatch: Pointer to the command batch object to check
- *
- * Return non-zero if the specified command batch is still waiting for sync
- * point dependencies to be satisfied
- */
-static inline int kgsl_cmdbatch_sync_pending(struct kgsl_cmdbatch *cmdbatch)
-{
- int ret;
-
- if (cmdbatch == NULL)
- return 0;
-
- spin_lock(&cmdbatch->lock);
- ret = list_empty(&cmdbatch->synclist) ? 0 : 1;
- spin_unlock(&cmdbatch->lock);
-
- return ret;
-}
-
-/**
* kgsl_sysfs_store() - parse a string from a sysfs store function
* @buf: Incoming string to parse
* @ptr: Pointer to an unsigned int to store the value
diff --git a/drivers/gpu/msm/kgsl_iommu.c b/drivers/gpu/msm/kgsl_iommu.c
index 488e5a8..c4fa8af 100644
--- a/drivers/gpu/msm/kgsl_iommu.c
+++ b/drivers/gpu/msm/kgsl_iommu.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2011-2014, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -281,39 +281,20 @@
static void _check_if_freed(struct kgsl_iommu_device *iommu_dev,
unsigned long addr, unsigned int pid)
{
- void *base = kgsl_driver.memfree_hist.base_hist_rb;
- struct kgsl_memfree_hist_elem *wptr;
- struct kgsl_memfree_hist_elem *p;
+ unsigned long gpuaddr = addr;
+ unsigned long size = 0;
+ unsigned int flags = 0;
+
char name[32];
memset(name, 0, sizeof(name));
- mutex_lock(&kgsl_driver.memfree_hist_mutex);
- wptr = kgsl_driver.memfree_hist.wptr;
- p = wptr;
- for (;;) {
- if (p->size && p->pid == pid)
- if (addr >= p->gpuaddr &&
- addr < (p->gpuaddr + p->size)) {
-
- kgsl_get_memory_usage(name, sizeof(name) - 1,
- p->flags);
- KGSL_LOG_DUMP(iommu_dev->kgsldev,
- "---- premature free ----\n");
- KGSL_LOG_DUMP(iommu_dev->kgsldev,
- "[%8.8X-%8.8X] (%s) was already freed by pid %d\n",
- p->gpuaddr,
- p->gpuaddr + p->size,
- name,
- p->pid);
- }
- p++;
- if ((void *)p >= base + kgsl_driver.memfree_hist.size)
- p = (struct kgsl_memfree_hist_elem *) base;
-
- if (p == kgsl_driver.memfree_hist.wptr)
- break;
+ if (kgsl_memfree_find_entry(pid, &gpuaddr, &size, &flags)) {
+ kgsl_get_memory_usage(name, sizeof(name) - 1, flags);
+ KGSL_LOG_DUMP(iommu_dev->kgsldev, "---- premature free ----\n");
+ KGSL_LOG_DUMP(iommu_dev->kgsldev,
+ "[%8.8lX-%8.8lX] (%s) was already freed by pid %d\n",
+ gpuaddr, gpuaddr + size, name, pid);
}
- mutex_unlock(&kgsl_driver.memfree_hist_mutex);
}
static int kgsl_iommu_fault_handler(struct iommu_domain *domain,
diff --git a/drivers/gpu/msm/kgsl_sync.c b/drivers/gpu/msm/kgsl_sync.c
index dc3ad21..0e694a7 100644
--- a/drivers/gpu/msm/kgsl_sync.c
+++ b/drivers/gpu/msm/kgsl_sync.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -118,6 +118,7 @@
struct sync_pt *pt;
struct sync_fence *fence = NULL;
int ret = -EINVAL;
+ char fence_name[sizeof(fence->name)] = {};
if (len != sizeof(priv))
return -EINVAL;
@@ -140,8 +141,13 @@
ret = -ENOMEM;
goto fail_pt;
}
+ snprintf(fence_name, sizeof(fence_name),
+ "%s-pid-%d-ctx-%d-ts-%d",
+ device->name, current->group_leader->pid,
+ context_id, timestamp);
- fence = sync_fence_create("kgsl-fence", pt);
+
+ fence = sync_fence_create(fence_name, pt);
if (fence == NULL) {
/* only destroy pt when not added to fence */
kgsl_sync_pt_destroy(pt);
diff --git a/drivers/md/dm-req-crypt.c b/drivers/md/dm-req-crypt.c
index 87a4ab9..799fc21 100644
--- a/drivers/md/dm-req-crypt.c
+++ b/drivers/md/dm-req-crypt.c
@@ -23,6 +23,10 @@
#include <linux/backing-dev.h>
#include <linux/atomic.h>
#include <linux/scatterlist.h>
+#include <linux/device-mapper.h>
+#include <linux/printk.h>
+#include <linux/pft.h>
+
#include <crypto/scatterwalk.h>
#include <asm/page.h>
#include <asm/unaligned.h>
@@ -31,9 +35,6 @@
#include <crypto/algapi.h>
#include <mach/qcrypto.h>
-#include <linux/device-mapper.h>
-
-
#define DM_MSG_PREFIX "req-crypt"
#define MAX_SG_LIST 1024
@@ -52,13 +53,17 @@
int err;
};
-struct dm_dev *dev;
+#define FDE_KEY_ID 0
+#define PFE_KEY_ID 1
+
+static struct dm_dev *dev;
static struct kmem_cache *_req_crypt_io_pool;
-sector_t start_sector_orig;
-struct workqueue_struct *req_crypt_queue;
-mempool_t *req_io_pool;
-mempool_t *req_page_pool;
-struct crypto_ablkcipher *tfm;
+static sector_t start_sector_orig;
+static struct workqueue_struct *req_crypt_queue;
+static mempool_t *req_io_pool;
+static mempool_t *req_page_pool;
+static bool is_fde_enabled;
+static struct crypto_ablkcipher *tfm;
struct req_dm_crypt_io {
struct work_struct work;
@@ -66,12 +71,83 @@
int error;
atomic_t pending;
struct timespec start_time;
+ bool should_encrypt;
+ bool should_decrypt;
+ u32 key_id;
};
static void req_crypt_cipher_complete
(struct crypto_async_request *req, int err);
+static bool req_crypt_should_encrypt(struct req_dm_crypt_io *req)
+{
+ int ret;
+ bool should_encrypt = false;
+ struct bio *bio = NULL;
+ struct inode *inode = NULL;
+ u32 key_id = 0;
+ bool is_encrypted = false;
+ bool is_inplace = false;
+
+ if (!req || !req->cloned_request || !req->cloned_request->bio)
+ return false;
+
+ bio = req->cloned_request->bio;
+
+ if (!bio->bi_io_vec || !bio->bi_io_vec->bv_page ||
+ !bio->bi_io_vec->bv_page->mapping)
+ return false;
+
+ inode = bio->bi_io_vec->bv_page->mapping->host;
+
+ ret = pft_get_key_index(inode, &key_id, &is_encrypted, &is_inplace);
+ /* req->key_id = key_id; @todo support more than 1 pfe key */
+ if ((ret == 0) && (is_encrypted || is_inplace)) {
+ should_encrypt = true;
+ req->key_id = PFE_KEY_ID;
+ } else if (is_fde_enabled) {
+ should_encrypt = true;
+ req->key_id = FDE_KEY_ID;
+ }
+
+ return should_encrypt;
+}
+
+static bool req_crypt_should_deccrypt(struct req_dm_crypt_io *req)
+{
+ int ret;
+ bool should_deccrypt = false;
+ struct bio *bio = NULL;
+ struct inode *inode = NULL;
+ u32 key_id = 0;
+ bool is_encrypted = false;
+ bool is_inplace = false;
+
+ if (!req || !req->cloned_request || !req->cloned_request->bio)
+ return false;
+
+ bio = req->cloned_request->bio;
+
+ if (!bio->bi_io_vec || !bio->bi_io_vec->bv_page ||
+ !bio->bi_io_vec->bv_page->mapping)
+ return false;
+
+ inode = bio->bi_io_vec->bv_page->mapping->host;
+
+ ret = pft_get_key_index(inode, &key_id, &is_encrypted, &is_inplace);
+ /* req->key_id = key_id; @todo support more than 1 pfe key */
+ if ((ret == 0) && (is_encrypted && !is_inplace)) {
+ should_deccrypt = true;
+ req->key_id = PFE_KEY_ID;
+ } else if (is_fde_enabled) {
+ should_deccrypt = true;
+ req->key_id = FDE_KEY_ID;
+ }
+
+ return should_deccrypt;
+}
+
static void req_crypt_inc_pending(struct req_dm_crypt_io *io)
{
atomic_inc(&io->pending);
@@ -196,6 +272,13 @@
ablkcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
req_crypt_cipher_complete, &result);
init_completion(&result.completion);
+ err = qcrypto_cipher_set_device(req, io->key_id);
+ if (err != 0) {
+ DMERR("%s qcrypto_cipher_set_device failed with err %d\n",
+ __func__, err);
+ error = DM_REQ_CRYPT_ERROR;
+ goto ablkcipher_req_alloc_failure;
+ }
qcrypto_cipher_set_flag(req,
QCRYPTO_CTX_USE_PIPE_KEY | QCRYPTO_CTX_XTS_DU_SIZE_512B);
crypto_ablkcipher_clear_flags(tfm, ~0);
@@ -270,6 +353,26 @@
}
/*
+ * This callback is called by the worker queue to perform non-decrypt reads
+ * and use the dm function to complete the bios and requests.
+ */
+static void req_cryptd_crypt_read_plain(struct req_dm_crypt_io *io)
+{
+ struct request *clone = NULL;
+ int error = 0;
+
+ if (!io || !io->cloned_request) {
+ DMERR("%s io is invalid\n", __func__);
+ BUG(); /* should not happen */
+ }
+
+ clone = io->cloned_request;
+
+ dm_end_request(clone, error);
+ mempool_free(io, req_io_pool);
+}
+
+/*
* The callback that will be called by the worker queue to perform Encryption
* for writes and submit the request using the elevelator.
*/
@@ -291,6 +394,7 @@
struct page *page = NULL;
u8 IV[AES_XTS_IV_LEN];
int remaining_size = 0;
+ int err = 0;
if (io) {
if (io->cloned_request) {
@@ -322,6 +426,13 @@
req_crypt_cipher_complete, &result);
init_completion(&result.completion);
+ err = qcrypto_cipher_set_device(req, io->key_id);
+ if (err != 0) {
+ DMERR("%s qcrypto_cipher_set_device failed with err %d\n",
+ __func__, err);
+ error = DM_REQ_CRYPT_ERROR;
+ goto ablkcipher_req_alloc_failure;
+ }
qcrypto_cipher_set_flag(req,
QCRYPTO_CTX_USE_PIPE_KEY | QCRYPTO_CTX_XTS_DU_SIZE_512B);
crypto_ablkcipher_clear_flags(tfm, ~0);
@@ -460,19 +571,44 @@
req_crypt_dec_pending_encrypt(io);
}
+/*
+ * This callback is called by the worker queue to perform non-encrypted writes
+ * and submit the request using the elevelator.
+ */
+static void req_cryptd_crypt_write_plain(struct req_dm_crypt_io *io)
+{
+ struct request *clone = NULL;
+
+ if (!io || !io->cloned_request) {
+ DMERR("%s io is invalid\n", __func__);
+ BUG(); /* should not happen */
+ }
+
+ clone = io->cloned_request;
+ io->error = 0;
+ dm_dispatch_request(clone);
+}
+
/* Queue callback function that will get triggered */
static void req_cryptd_crypt(struct work_struct *work)
{
struct req_dm_crypt_io *io =
container_of(work, struct req_dm_crypt_io, work);
- if (rq_data_dir(io->cloned_request) == WRITE)
- req_cryptd_crypt_write_convert(io);
- else if (rq_data_dir(io->cloned_request) == READ)
- req_cryptd_crypt_read_convert(io);
- else
- DMERR("%s received non-read/write request for Clone %u\n",
+ if (rq_data_dir(io->cloned_request) == WRITE) {
+ if (io->should_encrypt)
+ req_cryptd_crypt_write_convert(io);
+ else
+ req_cryptd_crypt_write_plain(io);
+ } else if (rq_data_dir(io->cloned_request) == READ) {
+ if (io->should_decrypt)
+ req_cryptd_crypt_read_convert(io);
+ else
+ req_cryptd_crypt_read_plain(io);
+ } else {
+ DMERR("%s received non-write request for Clone %u\n",
__func__, (unsigned int)io->cloned_request);
+ }
}
static void req_cryptd_queue_crypt(struct req_dm_crypt_io *io)
@@ -537,7 +673,7 @@
bvec = NULL;
if (rq_data_dir(clone) == WRITE) {
rq_for_each_segment(bvec, clone, iter1) {
- if (bvec->bv_offset == 0) {
+ if (req_io->should_encrypt && bvec->bv_offset == 0) {
mempool_free(bvec->bv_page, req_page_pool);
bvec->bv_page = NULL;
} else
@@ -565,7 +701,6 @@
* For a read request no pre-processing is required the request
* is returned to dm once mapping is done
*/
-
static int req_crypt_map(struct dm_target *ti, struct request *clone,
union map_info *map_context)
{
@@ -594,6 +729,11 @@
map_context->ptr = req_io;
atomic_set(&req_io->pending, 0);
+ if (rq_data_dir(clone) == WRITE)
+ req_io->should_encrypt = req_crypt_should_encrypt(req_io);
+ if (rq_data_dir(clone) == READ)
+ req_io->should_decrypt = req_crypt_should_deccrypt(req_io);
+
/* Get the queue of the underlying original device */
clone->q = bdev_get_queue(dev->bdev);
clone->rq_disk = dev->bdev->bd_disk;
@@ -641,6 +781,8 @@
static void req_crypt_dtr(struct dm_target *ti)
{
+ DMDEBUG("dm-req-crypt Destructor.\n");
+
if (req_crypt_queue) {
destroy_workqueue(req_crypt_queue);
req_crypt_queue = NULL;
@@ -670,6 +812,8 @@
char dummy;
int err = DM_REQ_CRYPT_ERROR;
+ DMDEBUG("dm-req-crypt Constructor.\n");
+
if (argc < 5) {
DMERR(" %s Not enough args\n", __func__);
err = DM_REQ_CRYPT_ERROR;
@@ -696,13 +840,24 @@
goto ctr_exit;
}
} else {
- DMERR(" %s Arg[4]invalid\n", __func__);
+ DMERR(" %s Arg[4] invalid\n", __func__);
err = DM_REQ_CRYPT_ERROR;
goto ctr_exit;
}
start_sector_orig = tmpll;
+ if (argv[5]) {
+ if (!strcmp(argv[5], "fde_enabled"))
+ is_fde_enabled = true;
+ else
+ is_fde_enabled = false;
+ } else {
+ DMERR(" %s Arg[5] invalid, set FDE eanbled.\n", __func__);
+ is_fde_enabled = true; /* backward compatible */
+ }
+ DMDEBUG("%s is_fde_enabled=%d\n", __func__, is_fde_enabled);
+
req_crypt_queue = alloc_workqueue("req_cryptd",
WQ_NON_REENTRANT |
WQ_HIGHPRI |
@@ -725,6 +880,7 @@
}
req_io_pool = mempool_create_slab_pool(MIN_IOS, _req_crypt_io_pool);
+ BUG_ON(!req_io_pool);
if (!req_io_pool) {
DMERR("%s req_io_pool not allocated\n", __func__);
err = DM_REQ_CRYPT_ERROR;
@@ -791,6 +947,8 @@
kmem_cache_destroy(_req_crypt_io_pool);
}
+ DMINFO("dm-req-crypt successfully initalized.\n");
+
return r;
}
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp.h b/drivers/media/platform/msm/camera_v2/isp/msm_isp.h
index 69c5190..e8702e4 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp.h
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp.h
@@ -355,6 +355,7 @@
struct msm_vfe_src_info src_info[VFE_SRC_MAX];
uint16_t stream_handle_cnt;
unsigned long event_mask;
+ uint32_t burst_len;
};
struct msm_vfe_stats_hardware_info {
@@ -397,6 +398,7 @@
uint16_t stream_handle_cnt;
atomic_t stats_update;
uint32_t stats_mask;
+ uint32_t stats_burst_len;
};
struct msm_vfe_tasklet_queue_cmd {
@@ -480,8 +482,7 @@
struct list_head tasklet_q;
struct tasklet_struct vfe_tasklet;
struct msm_vfe_tasklet_queue_cmd
- tasklet_queue_cmd[MSM_VFE_TASKLETQ_SIZE];
-
+ tasklet_queue_cmd[MSM_VFE_TASKLETQ_SIZE];
uint32_t soc_hw_version;
uint32_t vfe_hw_version;
struct msm_vfe_hardware_info *hw_info;
@@ -498,6 +499,7 @@
void __iomem *p_avtimer_lsw;
uint8_t ignore_error;
struct msm_isp_statistics *stats;
+ uint32_t vfe_ub_size;
};
#endif
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp40.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp40.c
index e817680..353b55f 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp40.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp40.c
@@ -36,10 +36,10 @@
#define VFE40_8x26_VERSION 0x20000013
#define VFE40_8x26V2_VERSION 0x20010014
-#define VFE40_BURST_LEN 1
-#define VFE40_STATS_BURST_LEN 1
-#define VFE40_UB_SIZE 1536
-#define VFE40_EQUAL_SLICE_UB 228
+
+/* STATS_SIZE (BE + BG + BF+ RS + CS + IHIST + BHIST ) = 392 */
+#define VFE40_STATS_SIZE 392
+
#define VFE40_WM_BASE(idx) (0x6C + 0x24 * idx)
#define VFE40_RDI_BASE(idx) (0x2E8 + 0x4 * idx)
#define VFE40_XBAR_BASE(idx) (0x58 + 0x4 * (idx / 2))
@@ -971,6 +971,11 @@
uint8_t plane_idx)
{
uint32_t val;
+
+ struct msm_vfe_axi_shared_data *axi_data =
+ &vfe_dev->axi_data;
+ uint32_t burst_len = axi_data->burst_len;
+
uint32_t wm_base = VFE40_WM_BASE(stream_info->wm[plane_idx]);
if (!stream_info->frame_based) {
@@ -992,7 +997,7 @@
plane_idx].output_stride) << 16 |
(stream_info->plane_cfg[
plane_idx].output_height - 1) << 4 |
- VFE40_BURST_LEN;
+ burst_len;
msm_camera_io_w(val, vfe_dev->vfe_base + wm_base + 0x18);
} else {
msm_camera_io_w(0x2, vfe_dev->vfe_base + wm_base);
@@ -1002,7 +1007,7 @@
plane_idx].output_width) << 16 |
(stream_info->plane_cfg[
plane_idx].output_height - 1) << 4 |
- VFE40_BURST_LEN;
+ burst_len;
msm_camera_io_w(val, vfe_dev->vfe_base + wm_base + 0x18);
}
@@ -1117,6 +1122,7 @@
uint8_t num_used_wms = 0;
uint32_t prop_size = 0;
uint32_t wm_ub_size;
+ uint32_t axi_wm_ub;
for (i = 0; i < axi_data->hw_info->num_wm; i++) {
if (axi_data->free_wm[i] > 0) {
@@ -1124,7 +1130,9 @@
total_image_size += axi_data->wm_image_size[i];
}
}
- prop_size = MSM_ISP40_TOTAL_WM_UB -
+ axi_wm_ub = vfe_dev->vfe_ub_size - VFE40_STATS_SIZE;
+
+ prop_size = axi_wm_ub -
axi_data->hw_info->min_wm_ub * num_used_wms;
for (i = 0; i < axi_data->hw_info->num_wm; i++) {
if (axi_data->free_wm[i]) {
@@ -1149,10 +1157,14 @@
int i;
uint32_t ub_offset = 0;
struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
+ uint32_t axi_equal_slice_ub =
+ (vfe_dev->vfe_ub_size - VFE40_STATS_SIZE)/
+ (axi_data->hw_info->num_wm - 1);
+
for (i = 0; i < axi_data->hw_info->num_wm; i++) {
- msm_camera_io_w(ub_offset << 16 | (VFE40_EQUAL_SLICE_UB - 1),
+ msm_camera_io_w(ub_offset << 16 | (axi_equal_slice_ub - 1),
vfe_dev->vfe_base + VFE40_WM_BASE(i) + 0x10);
- ub_offset += VFE40_EQUAL_SLICE_UB;
+ ub_offset += axi_equal_slice_ub;
}
}
@@ -1334,7 +1346,11 @@
static void msm_vfe40_stats_cfg_ub(struct vfe_device *vfe_dev)
{
int i;
- uint32_t ub_offset = VFE40_UB_SIZE;
+ struct msm_vfe_stats_shared_data *stats_data = &vfe_dev->stats_data;
+ uint32_t ub_offset = vfe_dev->vfe_ub_size;
+ uint32_t stats_burst_len = stats_data->stats_burst_len;
+
+
uint32_t ub_size[VFE40_NUM_STATS_TYPE] = {
64, /*MSM_ISP_STATS_BE*/
128, /*MSM_ISP_STATS_BG*/
@@ -1348,7 +1364,7 @@
for (i = 0; i < VFE40_NUM_STATS_TYPE; i++) {
ub_offset -= ub_size[i];
- msm_camera_io_w(VFE40_STATS_BURST_LEN << 30 |
+ msm_camera_io_w(stats_burst_len << 30 |
ub_offset << 16 | (ub_size[i] - 1),
vfe_dev->vfe_base + VFE40_STATS_BASE(i) + 0xC);
}
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c
index 4c3a3d5..d11ea68 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c
@@ -524,7 +524,9 @@
stream_info->format_factor / ISP_Q2;
} else {
int rdi = SRC_TO_INTF(stream_info->stream_src);
- stream_info->bandwidth = axi_data->src_info[rdi].pixel_clock;
+ if (rdi < VFE_SRC_MAX)
+ stream_info->bandwidth =
+ axi_data->src_info[rdi].pixel_clock;
}
}
@@ -534,6 +536,7 @@
uint32_t io_format = 0;
struct msm_vfe_axi_stream_request_cmd *stream_cfg_cmd = arg;
struct msm_vfe_axi_stream *stream_info;
+ struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
rc = msm_isp_axi_create_stream(
&vfe_dev->axi_data, stream_cfg_cmd);
@@ -581,6 +584,8 @@
msm_isp_calculate_framedrop(&vfe_dev->axi_data, stream_cfg_cmd);
stream_info->vt_enable = stream_cfg_cmd->vt_enable;
+ axi_data->burst_len = stream_cfg_cmd->burst_len;
+
if (stream_info->vt_enable) {
vfe_dev->vt_enable = stream_info->vt_enable;
#ifdef CONFIG_MSM_AVTIMER
@@ -853,8 +858,11 @@
struct msm_isp_event_data buf_event;
struct timeval *time_stamp;
uint32_t stream_idx = HANDLE_TO_IDX(stream_info->stream_handle);
- uint32_t frame_id = vfe_dev->axi_data.
- src_info[SRC_TO_INTF(stream_info->stream_src)].frame_id;
+ uint32_t src_intf = SRC_TO_INTF(stream_info->stream_src);
+ uint32_t frame_id = 0;
+ if (src_intf < VFE_SRC_MAX) {
+ frame_id = vfe_dev->axi_data.src_info[src_intf].frame_id;
+ }
if (buf && ts) {
if (vfe_dev->vt_enable) {
@@ -1196,7 +1204,7 @@
enum msm_isp_camif_update_state camif_update)
{
int i, rc = 0;
- uint8_t src_state, wait_for_complete = 0;
+ uint8_t src_state = 0, wait_for_complete = 0;
uint32_t wm_reload_mask = 0x0;
struct msm_vfe_axi_stream *stream_info;
struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
@@ -1212,8 +1220,9 @@
}
stream_info = &axi_data->stream_info[
HANDLE_TO_IDX(stream_cfg_cmd->stream_handle[i])];
- src_state = axi_data->src_info[
- SRC_TO_INTF(stream_info->stream_src)].active;
+ if (SRC_TO_INTF(stream_info->stream_src) < VFE_SRC_MAX)
+ src_state = axi_data->src_info[
+ SRC_TO_INTF(stream_info->stream_src)].active;
msm_isp_calculate_bandwidth(axi_data, stream_info);
msm_isp_reset_framedrop(vfe_dev, stream_info);
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp_stats_util.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp_stats_util.c
index 6bd7585..d4c86a5 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp_stats_util.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp_stats_util.c
@@ -490,6 +490,9 @@
{
int rc = 0;
struct msm_vfe_stats_stream_cfg_cmd *stream_cfg_cmd = arg;
+ struct msm_vfe_stats_shared_data *stats_data = &vfe_dev->stats_data;
+ stats_data->stats_burst_len = stream_cfg_cmd->stats_burst_len;
+
if (vfe_dev->stats_data.num_active_stream == 0)
vfe_dev->hw_info->vfe_ops.stats_ops.cfg_ub(vfe_dev);
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp_util.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp_util.c
index b1521df..a81c7bb 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp_util.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp_util.c
@@ -701,6 +701,10 @@
}
break;
}
+ case SET_WM_UB_SIZE: {
+ vfe_dev->vfe_ub_size = *cfg_data;
+ break;
+ }
}
return 0;
}
@@ -1143,6 +1147,11 @@
vfe_dev->hw_info->vfe_ops.irq_ops.
read_irq_status(vfe_dev, &irq_status0, &irq_status1);
+ if ((irq_status0 == 0) && (irq_status1 == 0)) {
+ pr_err_ratelimited("%s: irq_status0 & 1 are both 0\n",
+ __func__);
+ return IRQ_HANDLED;
+ }
msm_isp_process_overflow_irq(vfe_dev,
&irq_status0, &irq_status1);
vfe_dev->hw_info->vfe_ops.core_ops.
@@ -1158,7 +1167,7 @@
if ((irq_status0 == 0) && (irq_status1 == 0) &&
(!((error_mask0 != 0) || (error_mask1 != 0)) &&
vfe_dev->error_info.error_count == 1)) {
- ISP_DBG("%s: irq_status0 & 1 are both 0!\n", __func__);
+ ISP_DBG("%s: error_mask0/1 & error_count are set!\n", __func__);
return IRQ_HANDLED;
}
@@ -1211,7 +1220,7 @@
spin_unlock_irqrestore(&vfe_dev->tasklet_lock, flags);
if (atomic_read(&vfe_dev->error_info.overflow_state) !=
NO_OVERFLOW) {
- pr_err("There is Overflow, kicking up recovery !!!!");
+ pr_err_ratelimited("There is Overflow, kicking up recovery !!!!");
msm_isp_process_overflow_recovery(vfe_dev,
irq_status0, irq_status1);
continue;
diff --git a/drivers/media/platform/msm/camera_v2/jpeg_10/msm_jpeg_platform.c b/drivers/media/platform/msm/camera_v2/jpeg_10/msm_jpeg_platform.c
index 407b81f..2f943a4 100644
--- a/drivers/media/platform/msm/camera_v2/jpeg_10/msm_jpeg_platform.c
+++ b/drivers/media/platform/msm/camera_v2/jpeg_10/msm_jpeg_platform.c
@@ -347,8 +347,12 @@
JPEG_DBG("%s:%d]", __func__, __LINE__);
}
#endif
+ if (pgmn_dev->jpeg_bus_client) {
+ msm_bus_scale_client_update_request(
+ pgmn_dev->jpeg_bus_client, 0);
+ msm_bus_scale_unregister_client(pgmn_dev->jpeg_bus_client);
+ }
- msm_bus_scale_unregister_client(pgmn_dev->jpeg_bus_client);
msm_cam_clk_enable(&pgmn_dev->pdev->dev, jpeg_8x_clk_info,
pgmn_dev->jpeg_clk, ARRAY_SIZE(jpeg_8x_clk_info), 0);
JPEG_DBG("%s:%d] clock disbale done", __func__, __LINE__);
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_common.c b/drivers/media/platform/msm/vidc/msm_vidc_common.c
old mode 100644
new mode 100755
index 071f698..9f0dac4
--- a/drivers/media/platform/msm/vidc/msm_vidc_common.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_common.c
@@ -3467,13 +3467,13 @@
capability->height.min);
rc = -ENOTSUPP;
}
-
- if (!rc) {
- rc = call_hfi_op(hdev, capability_check,
- inst->fmts[OUTPUT_PORT]->fourcc,
- inst->prop.width[CAPTURE_PORT],
- &capability->width.max,
- &capability->height.max);
+ if (!rc && (inst->prop.width[CAPTURE_PORT] >
+ capability->width.max)) {
+ dprintk(VIDC_ERR,
+ "Unsupported width = %u supported max width = %u",
+ inst->prop.width[CAPTURE_PORT],
+ capability->width.max);
+ rc = -ENOTSUPP;
}
if (!rc && (inst->prop.height[CAPTURE_PORT]
* inst->prop.width[CAPTURE_PORT] >
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_debug.c b/drivers/media/platform/msm/vidc/msm_vidc_debug.c
old mode 100644
new mode 100755
index 475683c..0ac6fc4
--- a/drivers/media/platform/msm/vidc/msm_vidc_debug.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_debug.c
@@ -20,7 +20,6 @@
int msm_fw_debug = 0x18;
int msm_fw_debug_mode = 0x1;
int msm_fw_low_power_mode = 0x1;
-int msm_vp8_low_tier = 0x1;
int msm_vidc_hw_rsp_timeout = 1000;
struct debug_buffer {
@@ -184,11 +183,6 @@
dprintk(VIDC_ERR, "debugfs_create_file: fail\n");
goto failed_create_dir;
}
- if (!debugfs_create_u32("vp8_low_tier", S_IRUGO | S_IWUSR,
- parent, &msm_vp8_low_tier)) {
- dprintk(VIDC_ERR, "debugfs_create_file: fail\n");
- goto failed_create_dir;
- }
if (!debugfs_create_u32("debug_output", S_IRUGO | S_IWUSR,
parent, &msm_vidc_debug_out)) {
dprintk(VIDC_ERR, "debugfs_create_file: fail\n");
diff --git a/drivers/media/platform/msm/vidc/q6_hfi.c b/drivers/media/platform/msm/vidc/q6_hfi.c
old mode 100644
new mode 100755
index bbba29a..486d740
--- a/drivers/media/platform/msm/vidc/q6_hfi.c
+++ b/drivers/media/platform/msm/vidc/q6_hfi.c
@@ -1318,24 +1318,6 @@
return rc;
}
-int q6_hfi_capability_check(u32 fourcc, u32 width,
- u32 *max_width, u32 *max_height)
-{
- int rc = 0;
- if (!max_width || !max_height) {
- dprintk(VIDC_ERR, "%s - invalid parameter\n", __func__);
- return -EINVAL;
- }
-
- if (width > *max_width) {
- dprintk(VIDC_ERR,
- "Unsupported width = %u supported max width = %u\n",
- width, *max_width);
- rc = -ENOTSUPP;
- }
- return rc;
-}
-
static void q6_hfi_unload_fw(void *hfi_device_data)
{
struct q6_hfi_device *device = hfi_device_data;
@@ -1390,7 +1372,6 @@
hdev->unset_ocmem = q6_hfi_unset_ocmem;
hdev->iommu_get_domain_partition = q6_hfi_iommu_get_domain_partition;
hdev->load_fw = q6_hfi_load_fw;
- hdev->capability_check = q6_hfi_capability_check;
hdev->unload_fw = q6_hfi_unload_fw;
hdev->get_stride_scanline = q6_hfi_get_stride_scanline;
}
diff --git a/drivers/media/platform/msm/vidc/venus_hfi.c b/drivers/media/platform/msm/vidc/venus_hfi.c
old mode 100644
new mode 100755
index 30ee45d..448fe3b
--- a/drivers/media/platform/msm/vidc/venus_hfi.c
+++ b/drivers/media/platform/msm/vidc/venus_hfi.c
@@ -3747,29 +3747,6 @@
return rc;
}
-int venus_hfi_capability_check(u32 fourcc, u32 width,
- u32 *max_width, u32 *max_height)
-{
- int rc = 0;
- if (!max_width || !max_height) {
- dprintk(VIDC_ERR, "%s - invalid parameter\n", __func__);
- return -EINVAL;
- }
-
- if (msm_vp8_low_tier && fourcc == V4L2_PIX_FMT_VP8) {
- *max_width = DEFAULT_WIDTH;
- *max_height = DEFAULT_HEIGHT;
- }
-
- if (width > *max_width) {
- dprintk(VIDC_ERR,
- "Unsupported width = %u supported max width = %u\n",
- width, *max_width);
- rc = -ENOTSUPP;
- }
- return rc;
-}
-
static void *venus_hfi_add_device(u32 device_id,
struct msm_vidc_platform_resources *res,
hfi_cmd_response_callback callback)
@@ -3932,7 +3909,6 @@
hdev->get_info = venus_hfi_get_info;
hdev->get_stride_scanline = venus_hfi_get_stride_scanline;
hdev->get_core_capabilities = venus_hfi_get_core_capabilities;
- hdev->capability_check = venus_hfi_capability_check;
hdev->power_enable = venus_hfi_power_enable;
}
diff --git a/drivers/media/platform/msm/vidc/vidc_hfi_api.h b/drivers/media/platform/msm/vidc/vidc_hfi_api.h
old mode 100644
new mode 100755
index 62507a1..38c5bdb
--- a/drivers/media/platform/msm/vidc/vidc_hfi_api.h
+++ b/drivers/media/platform/msm/vidc/vidc_hfi_api.h
@@ -1168,8 +1168,6 @@
int (*get_info) (void *dev, enum dev_info info);
int (*get_stride_scanline)(int color_fmt, int width,
int height, int *stride, int *scanlines);
- int (*capability_check)(u32 fourcc, u32 width,
- u32 *max_width, u32 *max_height);
int (*session_clean)(void *sess);
int (*get_core_capabilities)(void);
int (*power_enable)(void *dev);
diff --git a/drivers/power/qpnp-charger.c b/drivers/power/qpnp-charger.c
index d5c753f..3b8fba8 100644
--- a/drivers/power/qpnp-charger.c
+++ b/drivers/power/qpnp-charger.c
@@ -1814,7 +1814,8 @@
chip->dc_present = dc_present;
if (qpnp_chg_is_otg_en_set(chip))
qpnp_chg_force_run_on_batt(chip, !dc_present ? 1 : 0);
- if (!dc_present && !qpnp_chg_is_usb_chg_plugged_in(chip)) {
+ if (!dc_present && (!qpnp_chg_is_usb_chg_plugged_in(chip) ||
+ qpnp_chg_is_otg_en_set(chip))) {
chip->delta_vddmax_mv = 0;
qpnp_chg_set_appropriate_vddmax(chip);
chip->chg_done = false;
diff --git a/drivers/spmi/spmi-pmic-arb.c b/drivers/spmi/spmi-pmic-arb.c
index e3284d5..81640a0 100644
--- a/drivers/spmi/spmi-pmic-arb.c
+++ b/drivers/spmi/spmi-pmic-arb.c
@@ -33,7 +33,9 @@
/* PMIC Arbiter configuration registers */
#define PMIC_ARB_VERSION 0x0000
#define PMIC_ARB_INT_EN 0x0004
-
+#define PMIC_ARB_PROTOCOL_IRQ_STATUS (0x700 + 0x820)
+#define PMIC_ARB_GENI_CTRL 0x0024
+#define PMIC_ARB_GENI_STATUS 0x0028
/* PMIC Arbiter channel registers */
#define PMIC_ARB_CMD(N) (0x0800 + (0x80 * (N)))
#define PMIC_ARB_CONFIG(N) (0x0804 + (0x80 * (N)))
@@ -125,6 +127,7 @@
u8 max_apid;
u16 periph_id_map[PMIC_ARB_MAX_PERIPHS];
u32 mapping_table[SPMI_MAPPING_TABLE_LEN];
+ u32 prev_prtcl_irq_stat;
};
static struct spmi_pmic_arb_dev *the_pmic_arb;
@@ -143,6 +146,37 @@
writel_relaxed(val, dev->base + offset);
}
+static void pmic_arb_save_stat_before_txn(struct spmi_pmic_arb_dev *dev)
+{
+ dev->prev_prtcl_irq_stat =
+ readl_relaxed(dev->cnfg + PMIC_ARB_PROTOCOL_IRQ_STATUS);
+}
+
+static int pmic_arb_diagnosis(struct spmi_pmic_arb_dev *dev, u32 status)
+{
+ if (status & PMIC_ARB_STATUS_DENIED) {
+ dev_err(dev->dev,
+ "wait_for_done: transaction denied by SPMI master (0x%x)\n",
+ status);
+ return -EPERM;
+ }
+
+ if (status & PMIC_ARB_STATUS_FAILURE) {
+ dev_err(dev->dev,
+ "wait_for_done: transaction failed (0x%x)\n", status);
+ return -EIO;
+ }
+
+ if (status & PMIC_ARB_STATUS_DROPPED) {
+ dev_err(dev->dev,
+ "wait_for_done: transaction dropped pmic-arb busy (0x%x)\n",
+ status);
+ return -EAGAIN;
+ }
+
+ return 0;
+}
+
static int pmic_arb_wait_for_done(struct spmi_pmic_arb_dev *dev)
{
u32 status = 0;
@@ -152,34 +186,13 @@
while (timeout--) {
status = pmic_arb_read(dev, offset);
- if (status & PMIC_ARB_STATUS_DONE) {
- if (status & PMIC_ARB_STATUS_DENIED) {
- dev_err(dev->dev,
- "%s: transaction denied (0x%x)\n",
- __func__, status);
- return -EPERM;
- }
+ if (status & PMIC_ARB_STATUS_DONE)
+ return pmic_arb_diagnosis(dev, status);
- if (status & PMIC_ARB_STATUS_FAILURE) {
- dev_err(dev->dev,
- "%s: transaction failed (0x%x)\n",
- __func__, status);
- return -EIO;
- }
-
- if (status & PMIC_ARB_STATUS_DROPPED) {
- dev_err(dev->dev,
- "%s: transaction dropped (0x%x)\n",
- __func__, status);
- return -EIO;
- }
-
- return 0;
- }
udelay(1);
}
- dev_err(dev->dev, "%s: timeout, status 0x%x\n", __func__, status);
+ dev_err(dev->dev, "wait_for_done:: timeout, status 0x%x\n", status);
return -ETIMEDOUT;
}
@@ -209,6 +222,29 @@
pmic_arb_write(dev, reg, data);
}
+static void pmic_arb_dbg_err_dump(struct spmi_pmic_arb_dev *pmic_arb, int ret,
+ const char *msg, u8 opc, u8 sid, u16 addr, u8 bc, u8 *buf)
+{
+ u32 irq_stat = readl_relaxed(pmic_arb->cnfg
+ + PMIC_ARB_PROTOCOL_IRQ_STATUS);
+ u32 geni_stat = readl_relaxed(pmic_arb->cnfg + PMIC_ARB_GENI_STATUS);
+ u32 geni_ctrl = readl_relaxed(pmic_arb->cnfg + PMIC_ARB_GENI_CTRL);
+
+ bc += 1; /* actual byte count */
+
+ if (buf)
+ dev_err(pmic_arb->dev,
+ "error:%d on data %s opcode:0x%x sid:%d addr:0x%x bc:%d buf:%*phC\n",
+ ret, msg, opc, sid, addr, bc, bc, buf);
+ else
+ dev_err(pmic_arb->dev,
+ "error:%d on non-data-cmd opcode:0x%x sid:%d\n",
+ ret, opc, sid);
+ dev_err(pmic_arb->dev,
+ "PROTOCOL_IRQ_STATUS before:0x%x after:0x%x GENI_STATUS:0x%x GENI_CTRL:0x%x\n",
+ irq_stat, pmic_arb->prev_prtcl_irq_stat, geni_stat, geni_ctrl);
+}
+
/* Non-data command */
static int pmic_arb_cmd(struct spmi_controller *ctrl, u8 opc, u8 sid)
{
@@ -228,10 +264,13 @@
cmd = (opc << 27) | ((sid & 0xf) << 20);
spin_lock_irqsave(&pmic_arb->lock, flags);
+ pmic_arb_save_stat_before_txn(pmic_arb);
pmic_arb_write(pmic_arb, PMIC_ARB_CMD(pmic_arb->channel), cmd);
rc = pmic_arb_wait_for_done(pmic_arb);
spin_unlock_irqrestore(&pmic_arb->lock, flags);
+ if (rc)
+ pmic_arb_dbg_err_dump(pmic_arb, rc, "cmd", opc, sid, 0, 0, 0);
return rc;
}
@@ -249,7 +288,8 @@
, PMIC_ARB_MAX_TRANS_BYTES, bc+1);
return -EINVAL;
}
- pr_debug("op:0x%x sid:%d bc:%d addr:0x%x\n", opc, sid, bc, addr);
+ dev_dbg(pmic_arb->dev, "client-rd op:0x%x sid:%d addr:0x%x bc:%d\n",
+ opc, sid, addr, bc + 1);
/* Check the opcode */
if (opc >= 0x60 && opc <= 0x7F)
@@ -264,6 +304,7 @@
cmd = (opc << 27) | ((sid & 0xf) << 20) | (addr << 4) | (bc & 0x7);
spin_lock_irqsave(&pmic_arb->lock, flags);
+ pmic_arb_save_stat_before_txn(pmic_arb);
pmic_arb_write(pmic_arb, PMIC_ARB_CMD(pmic_arb->channel), cmd);
rc = pmic_arb_wait_for_done(pmic_arb);
if (rc)
@@ -279,6 +320,9 @@
done:
spin_unlock_irqrestore(&pmic_arb->lock, flags);
+ if (rc)
+ pmic_arb_dbg_err_dump(pmic_arb, rc, "read", opc, sid, addr, bc,
+ buf);
return rc;
}
@@ -296,7 +340,8 @@
, PMIC_ARB_MAX_TRANS_BYTES, bc+1);
return -EINVAL;
}
- pr_debug("op:0x%x sid:%d bc:%d addr:0x%x\n", opc, sid, bc, addr);
+ dev_dbg(pmic_arb->dev, "client-wr op:0x%x sid:%d addr:0x%x bc:%d\n",
+ opc, sid, addr, bc + 1);
/* Check the opcode */
if (opc >= 0x40 && opc <= 0x5F)
@@ -314,6 +359,7 @@
/* Write data to FIFOs */
spin_lock_irqsave(&pmic_arb->lock, flags);
+ pmic_arb_save_stat_before_txn(pmic_arb);
pa_write_data(pmic_arb, buf, PMIC_ARB_WDATA0(pmic_arb->channel)
, min_t(u8, bc, 3));
if (bc > 3)
@@ -325,6 +371,10 @@
rc = pmic_arb_wait_for_done(pmic_arb);
spin_unlock_irqrestore(&pmic_arb->lock, flags);
+ if (rc)
+ pmic_arb_dbg_err_dump(pmic_arb, rc, "write", opc, sid, addr, bc,
+ buf);
+
return rc;
}
@@ -501,7 +551,9 @@
int i;
if (!is_apid_valid(pmic_arb, apid)) {
- dev_err(pmic_arb->dev, "unknown peripheral id 0x%x\n", ppid);
+ dev_err(pmic_arb->dev,
+ "periph_interrupt(apid:0x%x sid:0x%x pid:0x%x) unknown peripheral\n",
+ apid, sid, pid);
/* return IRQ_NONE; */
}
diff --git a/drivers/thermal/msm_thermal.c b/drivers/thermal/msm_thermal.c
index 81b683d..e5eb9b2 100644
--- a/drivers/thermal/msm_thermal.c
+++ b/drivers/thermal/msm_thermal.c
@@ -11,6 +11,8 @@
*
*/
+#define pr_fmt(fmt) "%s:%s " fmt, KBUILD_MODNAME, __func__
+
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/module.h>
@@ -257,8 +259,8 @@
switch (event) {
case CPUFREQ_INCOMPATIBLE:
- pr_debug("%s: mitigating cpu %d to freq max: %u min: %u\n",
- KBUILD_MODNAME, policy->cpu, max_freq_req, min_freq_req);
+ pr_debug("mitigating CPU%d to freq max: %u min: %u\n",
+ policy->cpu, max_freq_req, min_freq_req);
cpufreq_verify_within_limits(policy, min_freq_req,
max_freq_req);
@@ -283,7 +285,7 @@
table = cpufreq_frequency_get_table(0);
if (!table) {
- pr_debug("%s: error reading cpufreq table\n", __func__);
+ pr_debug("error reading cpufreq table\n");
return -EINVAL;
}
freq_table_get = 1;
@@ -293,9 +295,13 @@
static void update_cpu_freq(int cpu)
{
+ int ret = 0;
+
if (cpu_online(cpu)) {
- if (cpufreq_update_policy(cpu))
- pr_err("Unable to update policy for cpu:%d\n", cpu);
+ ret = cpufreq_update_policy(cpu);
+ if (ret)
+ pr_err("Unable to update policy for cpu:%d. err:%d\n",
+ cpu, ret);
}
}
@@ -307,13 +313,14 @@
if (!freq_table_get) {
ret = check_freq_table();
if (ret) {
- pr_err("%s:Fail to get freq table\n", KBUILD_MODNAME);
+ pr_err("Fail to get freq table. err:%d\n", ret);
return ret;
}
}
/* If min is larger than allowed max */
min = min(min, table[limit_idx_high].frequency);
+ pr_debug("Requesting min freq:%u for all CPU's\n", min);
if (freq_mitigation_task) {
min_freq_limit = min;
complete(&freq_mitigation_complete);
@@ -362,7 +369,7 @@
int ret = 0;
if (r->reg == NULL) {
- pr_info("Do not have regulator handle:%s, can't apply vdd\n",
+ pr_err("%s don't have regulator handle. can't apply vdd\n",
r->name);
return -EFAULT;
}
@@ -375,11 +382,15 @@
r->levels[r->num_levels - 1]);
if (!ret)
r->curr_level = -1;
+ pr_debug("Requested min level for %s. curr level: %d\n",
+ r->name, r->curr_level);
} else if (level >= 0 && level < (r->num_levels)) {
ret = regulator_set_voltage(r->reg, r->levels[level],
r->levels[r->num_levels - 1]);
if (!ret)
r->curr_level = level;
+ pr_debug("Requesting level %d for %s. curr level: %d\n",
+ r->levels[level], r->name, r->levels[r->curr_level]);
} else {
pr_err("level input:%d is not within range\n", level);
return -EINVAL;
@@ -395,12 +406,13 @@
int fail_cnt = 0;
int ret = 0;
+ pr_debug("Requesting PMIC Mode: %d\n", mode);
for (i = 0; i < psm_rails_cnt; i++) {
if (psm_rails[i].mode != mode) {
ret = rpm_regulator_set_mode(psm_rails[i].reg, mode);
if (ret) {
- pr_err("Cannot set mode:%d for %s",
- mode, psm_rails[i].name);
+ pr_err("Cannot set mode:%d for %s. err:%d",
+ mode, psm_rails[i].name, ret);
fail_cnt++;
} else
psm_rails[i].mode = mode;
@@ -473,6 +485,8 @@
en->enabled = 1;
else if (!val && (dis_cnt == rails_cnt))
en->enabled = 0;
+ pr_debug("%s vdd restriction. curr: %d\n",
+ (val) ? "Enable" : "Disable", en->enabled);
done_vdd_rstr_en:
mutex_unlock(&vdd_rstr_mutex);
@@ -547,12 +561,14 @@
ret = vdd_restriction_apply_voltage(reg, val);
if (ret) {
pr_err( \
- "Set vdd restriction for regulator %s failed\n",
- reg->name);
+ "Set vdd restriction for regulator %s failed. err:%d\n",
+ reg->name, ret);
goto done_store_level;
}
}
reg->curr_level = val;
+ pr_debug("Request level %d for %s\n",
+ reg->curr_level, reg->name);
}
done_store_level:
@@ -668,16 +684,15 @@
}
if ((val != PMIC_PWM_MODE) && (val != PMIC_AUTO_MODE)) {
- pr_err(" Invalid number %d for mode\n", val);
+ pr_err("Invalid number %d for mode\n", val);
goto done_psm_store;
}
if (val != reg->mode) {
ret = rpm_regulator_set_mode(reg->reg, val);
if (ret) {
- pr_err( \
- "Fail to set PMIC SW Mode:%d for %s\n",
- val, reg->name);
+ pr_err("Fail to set Mode:%d for %s. err:%d\n",
+ val, reg->name, ret);
goto done_psm_store;
}
reg->mode = val;
@@ -701,7 +716,7 @@
}
}
if (!hw_id_found) {
- pr_err("%s: Invalid sensor hw id :%d\n", __func__, sensor_id);
+ pr_err("Invalid sensor hw id:%d\n", sensor_id);
return -EINVAL;
}
@@ -716,8 +731,7 @@
tsens_id_map = kzalloc(sizeof(int) * max_tsens_num,
GFP_KERNEL);
if (!tsens_id_map) {
- pr_err("%s: Cannot allocate memory for tsens_id_map\n",
- __func__);
+ pr_err("Cannot allocate memory for tsens_id_map\n");
return -ENOMEM;
}
@@ -729,9 +743,8 @@
tsens_id_map[i] = i;
ret = 0;
} else {
- pr_err( \
- "%s: Failed to get hw id for sw id %d\n",
- __func__, i);
+ pr_err("Failed to get hw id for id:%d.err:%d\n",
+ i, ret);
goto fail;
}
}
@@ -760,7 +773,9 @@
ret = vdd_restriction_apply_voltage(&rails[i],
en ? 0 : -1);
if (ret) {
- pr_err("Cannot set voltage for %s", rails[i].name);
+ pr_err("Failed to %s for %s. err:%d",
+ (en) ? "enable" : "disable",
+ rails[i].name, ret);
fail_cnt++;
} else {
if (en)
@@ -792,7 +807,7 @@
table = cpufreq_frequency_get_table(0);
if (table == NULL) {
- pr_debug("%s: error reading cpufreq table\n", KBUILD_MODNAME);
+ pr_err("error reading cpufreq table\n");
ret = -EINVAL;
goto fail;
}
@@ -814,15 +829,15 @@
ret = sensor_set_trip(sensor_id, threshold);
if (ret != 0) {
- pr_err("%s: Error in setting trip %d\n",
- KBUILD_MODNAME, threshold->trip);
+ pr_err("sensor:%u Error in setting trip:%d. err:%d\n",
+ sensor_id, threshold->trip, ret);
goto set_done;
}
ret = sensor_activate_trip(sensor_id, threshold, true);
if (ret != 0) {
- pr_err("%s: Error in enabling trip %d\n",
- KBUILD_MODNAME, threshold->trip);
+ pr_err("sensor:%u Error in enabling trip:%d. err:%d\n",
+ sensor_id, threshold->trip, ret);
goto set_done;
}
@@ -857,7 +872,7 @@
ret = tsens_get_temp(&tsens_dev, temp);
if (ret) {
- pr_err("Unable to read TSENS sensor %d\n",
+ pr_err("Unable to read TSENS sensor:%d\n",
tsens_dev.sensor_num);
goto get_temp_exit;
}
@@ -873,14 +888,17 @@
long temp;
if ((!threshold) || (zone_id >= max_tsens_num)) {
- pr_err("%s: Invalid input\n", KBUILD_MODNAME);
+ pr_err("Invalid input\n");
ret = -EINVAL;
goto set_threshold_exit;
}
ret = therm_get_temp(zone_id, THERM_ZONE_ID, &temp);
- if (ret)
+ if (ret) {
+ pr_err("Unable to read temperature for zone:%d. err:%d\n",
+ zone_id, ret);
goto set_threshold_exit;
+ }
while (i < MAX_THRESHOLD) {
switch (threshold[i].trip) {
@@ -901,6 +919,8 @@
}
break;
default:
+ pr_err("zone:%u Invalid trip:%d\n", zone_id,
+ threshold[i].trip);
break;
}
i++;
@@ -926,12 +946,12 @@
continue;
if (cpus_offlined & BIT(i) && !cpu_online(i))
continue;
- pr_info("%s: Set Offline: CPU%d Temp: %ld\n",
- KBUILD_MODNAME, i, temp);
+ pr_info("Set Offline: CPU%d Temp: %ld\n",
+ i, temp);
ret = cpu_down(i);
if (ret)
- pr_err("%s: Error %d offline core %d\n",
- KBUILD_MODNAME, ret, i);
+ pr_err("Error %d offline core %d\n",
+ ret, i);
cpus_offlined |= BIT(i);
break;
}
@@ -942,8 +962,8 @@
if (!(cpus_offlined & BIT(i)))
continue;
cpus_offlined &= ~BIT(i);
- pr_info("%s: Allow Online CPU%d Temp: %ld\n",
- KBUILD_MODNAME, i, temp);
+ pr_info("Allow Online CPU%d Temp: %ld\n",
+ i, temp);
/*
* If this core is already online, then bring up the
* next offlined core.
@@ -952,8 +972,8 @@
continue;
ret = cpu_up(i);
if (ret)
- pr_err("%s: Error %d online core %d\n",
- KBUILD_MODNAME, ret, i);
+ pr_err("Error %d online core %d\n",
+ ret, i);
break;
}
}
@@ -977,8 +997,10 @@
continue;
ret = cpu_down(cpu);
if (ret)
- pr_err("%s: Unable to offline cpu%d\n",
- KBUILD_MODNAME, cpu);
+ pr_err("Unable to offline CPU%d. err:%d\n",
+ cpu, ret);
+ else
+ pr_debug("Offlined CPU%d\n", cpu);
}
return ret;
}
@@ -988,8 +1010,10 @@
int ret = 0;
uint32_t cpu = 0, mask = 0;
- if (!core_control_enabled)
+ if (!core_control_enabled) {
+ pr_debug("Core control disabled\n");
return -EINVAL;
+ }
while (!kthread_should_stop()) {
while (wait_for_completion_interruptible(
@@ -1056,7 +1080,11 @@
ocr_rails[j].init = OPTIMUM_CURRENT_NR;
ret = ocr_set_mode_all(OPTIMUM_CURRENT_MAX);
if (ret)
- pr_err("Error setting max optimum current\n");
+ pr_err("Error setting max ocr. err:%d\n",
+ ret);
+ else
+ pr_debug("Requested MAX OCR. tsens:%d Temp:%ld",
+ tsens_id_map[i], temp);
goto do_ocr_exit;
} else if (temp <= (msm_thermal_info.ocr_temp_degC -
msm_thermal_info.ocr_temp_hyst_degC))
@@ -1077,6 +1105,8 @@
if (ret) {
pr_err("Error setting min optimum current\n");
goto do_ocr_exit;
+ } else {
+ pr_debug("Requested MIN OCR. Temp:%ld", temp);
}
}
@@ -1104,8 +1134,8 @@
for (i = 0; i < max_tsens_num; i++) {
ret = therm_get_temp(tsens_id_map[i], THERM_TSENS_ID, &temp);
if (ret) {
- pr_debug("Unable to read TSENS sensor %d\n",
- tsens_id_map[i]);
+ pr_err("Unable to read TSENS sensor:%d. err:%d\n",
+ tsens_id_map[i], ret);
dis_cnt++;
continue;
}
@@ -1113,9 +1143,13 @@
ret = vdd_restriction_apply_all(1);
if (ret) {
pr_err( \
- "Enable vdd rstr votlage for all failed\n");
+ "Enable vdd rstr for all failed. err:%d\n",
+ ret);
goto exit;
}
+ pr_debug("Enabled Vdd Restriction tsens:%d. Temp:%ld\n",
+ thresh[MSM_VDD_RESTRICTION].thresh_list[i].sensor_id,
+ temp);
goto exit;
} else if (temp > msm_thermal_info.vdd_rstr_temp_hyst_degC)
dis_cnt++;
@@ -1123,9 +1157,11 @@
if (dis_cnt == max_tsens_num) {
ret = vdd_restriction_apply_all(0);
if (ret) {
- pr_err("Disable vdd rstr votlage for all failed\n");
+ pr_err("Disable vdd rstr for all failed. err:%d\n",
+ ret);
goto exit;
}
+ pr_debug("Disabled Vdd Restriction\n");
}
exit:
mutex_unlock(&vdd_rstr_mutex);
@@ -1143,8 +1179,8 @@
for (i = 0; i < max_tsens_num; i++) {
ret = therm_get_temp(tsens_id_map[i], THERM_TSENS_ID, &temp);
if (ret) {
- pr_debug("%s: Unable to read TSENS sensor %d\n",
- __func__, tsens_id_map[i]);
+ pr_err("Unable to read TSENS sensor:%d. err:%d\n",
+ tsens_id_map[i], ret);
auto_cnt++;
continue;
}
@@ -1157,9 +1193,12 @@
if (temp > msm_thermal_info.psm_temp_degC) {
ret = psm_set_mode_all(PMIC_PWM_MODE);
if (ret) {
- pr_err("Set pwm mode for all failed\n");
+ pr_err("Set pwm mode for all failed. err:%d\n",
+ ret);
goto exit;
}
+ pr_debug("Requested PMIC PWM Mode tsens:%d. Temp:%ld\n",
+ tsens_id_map[i], temp);
break;
} else if (temp <= msm_thermal_info.psm_temp_hyst_degC)
auto_cnt++;
@@ -1168,9 +1207,10 @@
if (auto_cnt == max_tsens_num) {
ret = psm_set_mode_all(PMIC_AUTO_MODE);
if (ret) {
- pr_err("Set auto mode for all failed\n");
+ pr_err("Set auto mode for all failed. err:%d\n", ret);
goto exit;
}
+ pr_debug("Requested PMIC AUTO Mode\n");
}
exit:
@@ -1212,6 +1252,8 @@
for_each_possible_cpu(cpu) {
if (!(msm_thermal_info.bootup_freq_control_mask & BIT(cpu)))
continue;
+ pr_info("Limiting CPU%d max frequency to %u. Temp:%ld\n",
+ cpu, max_freq, temp);
cpus[cpu].limited_max_freq = max_freq;
update_cpu_freq(cpu);
}
@@ -1226,8 +1268,8 @@
ret = therm_get_temp(msm_thermal_info.sensor_id, THERM_TSENS_ID, &temp);
if (ret) {
- pr_debug("Unable to read TSENS sensor %d\n",
- msm_thermal_info.sensor_id);
+ pr_err("Unable to read TSENS sensor:%d. err:%d\n",
+ msm_thermal_info.sensor_id, ret);
goto reschedule;
}
@@ -1260,14 +1302,13 @@
if (core_control_enabled &&
(msm_thermal_info.core_control_mask & BIT(cpu)) &&
(cpus_offlined & BIT(cpu))) {
- pr_debug(
- "%s: Preventing cpu%d from coming online.\n",
- KBUILD_MODNAME, cpu);
+ pr_debug("Preventing CPU%d from coming online.\n",
+ cpu);
return NOTIFY_BAD;
}
}
-
+ pr_debug("voting for CPU%d to be online\n", cpu);
return NOTIFY_OK;
}
@@ -1312,8 +1353,7 @@
{
struct cpu_info *cpu_node = (struct cpu_info *)data;
- pr_info("%s: %s reach temp threshold: %d\n", KBUILD_MODNAME,
- cpu_node->sensor_type, temp);
+ pr_info("%s reach temp threshold: %d\n", cpu_node->sensor_type, temp);
if (!(msm_thermal_info.core_control_mask & BIT(cpu_node->cpu)))
return 0;
@@ -1333,7 +1373,7 @@
cpu_node->hotplug_thresh_clear = true;
complete(&hotplug_notify_complete);
} else {
- pr_err("%s: Hotplug task is not initialized\n", KBUILD_MODNAME);
+ pr_err("Hotplug task is not initialized\n");
}
return 0;
}
@@ -1352,8 +1392,8 @@
continue;
if (therm_get_temp(cpus[cpu].sensor_id, cpus[cpu].id_type,
&temp)) {
- pr_err("%s: Unable to read TSENS sensor %d\n",
- KBUILD_MODNAME, cpus[cpu].sensor_id);
+ pr_err("Unable to read TSENS sensor:%d.\n",
+ cpus[cpu].sensor_id);
mutex_unlock(&core_control_mutex);
return -EINVAL;
}
@@ -1369,8 +1409,7 @@
if (hotplug_task)
complete(&hotplug_notify_complete);
else {
- pr_err("%s: Hotplug task is not initialized\n",
- KBUILD_MODNAME);
+ pr_err("Hotplug task is not initialized\n");
return -EINVAL;
}
return 0;
@@ -1410,8 +1449,8 @@
init_completion(&hotplug_notify_complete);
hotplug_task = kthread_run(do_hotplug, NULL, "msm_thermal:hotplug");
if (IS_ERR(hotplug_task)) {
- pr_err("%s: Failed to create do_hotplug thread\n",
- KBUILD_MODNAME);
+ pr_err("Failed to create do_hotplug thread. err:%ld\n",
+ PTR_ERR(hotplug_task));
return;
}
/*
@@ -1471,7 +1510,7 @@
{
struct cpu_info *cpu_node = (struct cpu_info *) data;
- pr_debug("%s: %s reached temp threshold: %d\n", KBUILD_MODNAME,
+ pr_debug("%s reached temp threshold: %d\n",
cpu_node->sensor_type, temp);
if (!(msm_thermal_info.freq_mitig_control_mask &
@@ -1481,8 +1520,8 @@
switch (type) {
case THERMAL_TRIP_CONFIGURABLE_HI:
if (!cpu_node->max_freq) {
- pr_info("%s: Mitigating cpu %d frequency to %d\n",
- KBUILD_MODNAME, cpu_node->cpu,
+ pr_info("Mitigating CPU%d frequency to %d\n",
+ cpu_node->cpu,
msm_thermal_info.freq_limit);
cpu_node->max_freq = true;
@@ -1490,8 +1529,8 @@
break;
case THERMAL_TRIP_CONFIGURABLE_LOW:
if (cpu_node->max_freq) {
- pr_info("%s: Removing frequency mitigation for cpu%d\n",
- KBUILD_MODNAME, cpu_node->cpu);
+ pr_info("Removing frequency mitigation for CPU%d\n",
+ cpu_node->cpu);
cpu_node->max_freq = false;
}
@@ -1504,8 +1543,7 @@
cpu_node->freq_thresh_clear = true;
complete(&freq_mitigation_complete);
} else {
- pr_err("%s: Frequency mitigation task is not initialized\n",
- KBUILD_MODNAME);
+ pr_err("Frequency mitigation task is not initialized\n");
}
return 0;
@@ -1544,8 +1582,8 @@
"msm_thermal:freq_mitig");
if (IS_ERR(freq_mitigation_task)) {
- pr_err("%s: Failed to create frequency mitigation thread\n",
- KBUILD_MODNAME);
+ pr_err("Failed to create frequency mitigation thread. err:%ld\n",
+ PTR_ERR(freq_mitigation_task));
return;
}
}
@@ -1555,11 +1593,13 @@
int ret = 0;
if (cpu >= num_possible_cpus()) {
- pr_err("%s: Invalid input\n", KBUILD_MODNAME);
+ pr_err("Invalid input\n");
ret = -EINVAL;
goto set_freq_exit;
}
+ pr_debug("Userspace requested %s frequency %u for CPU%u\n",
+ (is_max) ? "Max" : "Min", freq, cpu);
if (is_max) {
if (cpus[cpu].user_max_freq == freq)
goto set_freq_exit;
@@ -1575,8 +1615,7 @@
if (freq_mitigation_task) {
complete(&freq_mitigation_complete);
} else {
- pr_err("%s: Frequency mitigation task is not initialized\n",
- KBUILD_MODNAME);
+ pr_err("Frequency mitigation task is not initialized\n");
ret = -ESRCH;
goto set_freq_exit;
}
@@ -1591,8 +1630,7 @@
struct therm_threshold *thresh_ptr;
if (!thresh_inp) {
- pr_err("%s: %s: Invalid input\n",
- KBUILD_MODNAME, __func__);
+ pr_err("Invalid input\n");
ret = -EINVAL;
goto therm_set_exit;
}
@@ -1621,16 +1659,17 @@
if (!vdd_rstr_enabled)
return;
if (!trig_thresh) {
- pr_err("%s:%s Invalid input\n", KBUILD_MODNAME, __func__);
+ pr_err("Invalid input\n");
return;
}
if (trig_thresh->trip_triggered < 0)
goto set_and_exit;
mutex_lock(&vdd_rstr_mutex);
- pr_debug("%s: sensor%d reached %d thresh for Vdd restriction\n",
- KBUILD_MODNAME, trig_thresh->sensor_id,
- trig_thresh->trip_triggered);
+ pr_debug("sensor:%d reached %s thresh for Vdd restriction\n",
+ tsens_id_map[trig_thresh->sensor_id],
+ (trig_thresh->trip_triggered == THERMAL_TRIP_CONFIGURABLE_HI) ?
+ "high" : "low");
switch (trig_thresh->trip_triggered) {
case THERMAL_TRIP_CONFIGURABLE_HI:
if (vdd_sens_status & BIT(trig_thresh->sensor_id))
@@ -1640,8 +1679,7 @@
vdd_sens_status |= BIT(trig_thresh->sensor_id);
break;
default:
- pr_err("%s:%s: Unsupported trip type\n",
- KBUILD_MODNAME, __func__);
+ pr_err("Unsupported trip type\n");
goto unlock_and_exit;
break;
}
@@ -1697,8 +1735,8 @@
thermal_monitor_task = kthread_run(do_thermal_monitor, NULL,
"msm_thermal:therm_monitor");
if (IS_ERR(thermal_monitor_task)) {
- pr_err("%s: Failed to create thermal monitor thread\n",
- KBUILD_MODNAME);
+ pr_err("Failed to create thermal monitor thread. err:%ld\n",
+ PTR_ERR(thermal_monitor_task));
goto init_exit;
}
@@ -1718,8 +1756,7 @@
thresh_data->parent->thresh_triggered = true;
complete(&thermal_monitor_complete);
} else {
- pr_err("%s: Thermal monitor task is not initialized\n",
- KBUILD_MODNAME);
+ pr_err("Thermal monitor task is not initialized\n");
}
return 0;
}
@@ -1733,14 +1770,13 @@
if (!callback || index >= MSM_LIST_MAX_NR || index < 0
|| sensor_id == -ENODEV) {
- pr_err("%s: Invalid input to init_threshold\n",
- KBUILD_MODNAME);
+ pr_err("Invalid input. sensor:%d. index:%d\n",
+ sensor_id, index);
ret = -EINVAL;
goto init_thresh_exit;
}
if (thresh[index].thresh_list) {
- pr_err("%s: threshold already initialized\n",
- KBUILD_MODNAME);
+ pr_err("threshold id:%d already initialized\n", index);
ret = -EEXIST;
goto init_thresh_exit;
}
@@ -1751,7 +1787,7 @@
thresh[index].thresh_list = kzalloc(sizeof(struct therm_threshold) *
thresh[index].thresh_ct, GFP_KERNEL);
if (!thresh[index].thresh_list) {
- pr_err("%s: kzalloc failed\n", KBUILD_MODNAME);
+ pr_err("kzalloc failed for thresh index:%d\n", index);
ret = -ENOMEM;
goto init_thresh_exit;
}
@@ -1813,6 +1849,7 @@
if (cpus[cpu].limited_max_freq == UINT_MAX &&
cpus[cpu].limited_min_freq == 0)
continue;
+ pr_info("Max frequency reset for CPU%d\n", cpu);
cpus[cpu].limited_max_freq = UINT_MAX;
cpus[cpu].limited_min_freq = 0;
update_cpu_freq(cpu);
@@ -1827,7 +1864,7 @@
return;
}
if (polling_enabled) {
- pr_info("%s: Interrupt mode init\n", KBUILD_MODNAME);
+ pr_info("Interrupt mode init\n");
polling_enabled = 0;
disable_msm_thermal();
hotplug_init();
@@ -1844,10 +1881,10 @@
if (!enabled)
interrupt_mode_init();
else
- pr_info("%s: no action for enabled = %d\n",
- KBUILD_MODNAME, enabled);
+ pr_info("no action for enabled = %d\n",
+ enabled);
- pr_info("%s: enabled = %d\n", KBUILD_MODNAME, enabled);
+ pr_info("enabled = %d\n", enabled);
return ret;
}
@@ -1874,7 +1911,7 @@
ret = kstrtoint(buf, 10, &val);
if (ret) {
- pr_err("%s: Invalid input %s\n", KBUILD_MODNAME, buf);
+ pr_err("Invalid input %s. err:%d\n", buf, ret);
goto done_store_cc;
}
@@ -1883,15 +1920,14 @@
core_control_enabled = !!val;
if (core_control_enabled) {
- pr_info("%s: Core control enabled\n", KBUILD_MODNAME);
+ pr_info("Core control enabled\n");
register_cpu_notifier(&msm_thermal_cpu_notifier);
if (hotplug_task)
complete(&hotplug_notify_complete);
else
- pr_err("%s: Hotplug task is not initialized\n",
- KBUILD_MODNAME);
+ pr_err("Hotplug task is not initialized\n");
} else {
- pr_info("%s: Core control disabled\n", KBUILD_MODNAME);
+ pr_info("Core control disabled\n");
unregister_cpu_notifier(&msm_thermal_cpu_notifier);
}
@@ -1915,13 +1951,12 @@
mutex_lock(&core_control_mutex);
ret = kstrtouint(buf, 10, &val);
if (ret) {
- pr_err("%s: Invalid input %s\n", KBUILD_MODNAME, buf);
+ pr_err("Invalid input %s. err:%d\n", buf, ret);
goto done_cc;
}
if (polling_enabled) {
- pr_err("%s: Ignoring request; polling thread is enabled.\n",
- KBUILD_MODNAME);
+ pr_err("Ignoring request; polling thread is enabled.\n");
goto done_cc;
}
@@ -1929,12 +1964,15 @@
if (!(msm_thermal_info.core_control_mask & BIT(cpu)))
continue;
cpus[cpu].user_offline = !!(val & BIT(cpu));
+ pr_debug("\"%s\"(PID:%i) requests %s CPU%d.\n", current->comm,
+ current->pid, (cpus[cpu].user_offline) ? "offline" :
+ "online", cpu);
}
if (hotplug_task)
complete(&hotplug_notify_complete);
else
- pr_err("%s: Hotplug task is not initialized\n", KBUILD_MODNAME);
+ pr_err("Hotplug task is not initialized\n");
done_cc:
mutex_unlock(&core_control_mutex);
return count;
@@ -2009,23 +2047,21 @@
module_kobj = kset_find_obj(module_kset, KBUILD_MODNAME);
if (!module_kobj) {
- pr_err("%s: cannot find kobject for module\n",
- KBUILD_MODNAME);
+ pr_err("cannot find kobject\n");
ret = -ENOENT;
goto done_cc_nodes;
}
cc_kobj = kobject_create_and_add("core_control", module_kobj);
if (!cc_kobj) {
- pr_err("%s: cannot create core control kobj\n",
- KBUILD_MODNAME);
+ pr_err("cannot create core control kobj\n");
ret = -ENOMEM;
goto done_cc_nodes;
}
ret = sysfs_create_group(cc_kobj, &cc_attr_group);
if (ret) {
- pr_err("%s: cannot create group\n", KBUILD_MODNAME);
+ pr_err("cannot create sysfs group. err:%d\n", ret);
goto done_cc_nodes;
}
@@ -2078,6 +2114,7 @@
tsens_get_max_sensor_num(&max_tsens_num);
if (create_sensor_id_map()) {
+ pr_err("Creating sensor id map failed\n");
ret = -EINVAL;
goto pre_init_exit;
}
@@ -2087,8 +2124,7 @@
sizeof(struct threshold_info) * MSM_LIST_MAX_NR,
GFP_KERNEL);
if (!thresh) {
- pr_err("%s:%s: kzalloc failed\n",
- KBUILD_MODNAME, __func__);
+ pr_err("kzalloc failed\n");
ret = -ENOMEM;
goto pre_init_exit;
}
@@ -2119,16 +2155,19 @@
BUG_ON(!pdata);
memcpy(&msm_thermal_info, pdata, sizeof(struct msm_thermal_data));
- if (check_sensor_id(msm_thermal_info.sensor_id))
+ if (check_sensor_id(msm_thermal_info.sensor_id)) {
+ pr_err("Invalid sensor:%d for polling\n",
+ msm_thermal_info.sensor_id);
return -EINVAL;
+ }
enabled = 1;
polling_enabled = 1;
ret = cpufreq_register_notifier(&msm_thermal_cpufreq_notifier,
CPUFREQ_POLICY_NOTIFIER);
if (ret)
- pr_err("%s: cannot register cpufreq notifier\n",
- KBUILD_MODNAME);
+ pr_err("cannot register cpufreq notifier. err:%d\n", ret);
+
INIT_DELAYED_WORK(&check_temp_work, check_temp);
schedule_delayed_work(&check_temp_work, 0);
@@ -2190,8 +2229,7 @@
if (freq_table_get)
ret = vdd_restriction_apply_freq(&rails[i], 0);
else
- pr_info("%s:Defer vdd rstr freq init\n",
- __func__);
+ pr_info("Defer vdd rstr freq init.\n");
} else {
rails[i].reg = devm_regulator_get(&pdev->dev,
rails[i].name);
@@ -2199,12 +2237,14 @@
ret = PTR_ERR(rails[i].reg);
if (ret != -EPROBE_DEFER) {
pr_err( \
- "%s, could not get regulator: %s\n",
- rails[i].name, __func__);
+ "could not get regulator: %s. err:%d\n",
+ rails[i].name, ret);
rails[i].reg = NULL;
rails[i].curr_level = -2;
return ret;
}
+ pr_info("Defer regulator %s probe\n",
+ rails[i].name);
return ret;
}
/*
@@ -2230,11 +2270,13 @@
if (IS_ERR_OR_NULL(psm_rails[i].reg)) {
ret = PTR_ERR(psm_rails[i].reg);
if (ret != -EPROBE_DEFER) {
- pr_err("%s, could not get rpm regulator: %s\n",
- psm_rails[i].name, __func__);
+ pr_err("couldn't get rpm regulator %s. err%d\n",
+ psm_rails[i].name, ret);
psm_rails[i].reg = NULL;
goto psm_reg_exit;
}
+ pr_info("Defer regulator %s probe\n",
+ psm_rails[i].name);
return ret;
}
/* Apps default vote for PWM mode */
@@ -2242,7 +2284,7 @@
ret = rpm_regulator_set_mode(psm_rails[i].reg,
psm_rails[i].init);
if (ret) {
- pr_err("%s: Cannot set PMIC PWM mode\n", __func__);
+ pr_err("Cannot set PMIC PWM mode. err:%d\n", ret);
return ret;
} else
psm_rails[i].mode = PMIC_PWM_MODE;
@@ -2311,22 +2353,21 @@
module_kobj = kset_find_obj(module_kset, KBUILD_MODNAME);
if (!module_kobj) {
- pr_err("%s: cannot find kobject for module %s\n",
- __func__, KBUILD_MODNAME);
+ pr_err("cannot find kobject\n");
rc = -ENOENT;
goto thermal_sysfs_add_exit;
}
vdd_rstr_kobj = kobject_create_and_add("vdd_restriction", module_kobj);
if (!vdd_rstr_kobj) {
- pr_err("%s: cannot create vdd_restriction kobject\n", __func__);
+ pr_err("cannot create vdd_restriction kobject\n");
rc = -ENOMEM;
goto thermal_sysfs_add_exit;
}
rc = sysfs_create_group(vdd_rstr_kobj, &vdd_rstr_en_attribs_gp);
if (rc) {
- pr_err("%s: cannot create kobject attribute group\n", __func__);
+ pr_err("cannot create kobject attribute group. err:%d\n", rc);
rc = -ENOMEM;
goto thermal_sysfs_add_exit;
}
@@ -2335,8 +2376,8 @@
vdd_rstr_reg_kobj[i] = kobject_create_and_add(rails[i].name,
vdd_rstr_kobj);
if (!vdd_rstr_reg_kobj[i]) {
- pr_err("%s: cannot create for kobject for %s\n",
- __func__, rails[i].name);
+ pr_err("cannot create kobject for %s\n",
+ rails[i].name);
rc = -ENOMEM;
goto thermal_sysfs_add_exit;
}
@@ -2344,6 +2385,7 @@
rails[i].attr_gp.attrs = kzalloc(sizeof(struct attribute *) * 3,
GFP_KERNEL);
if (!rails[i].attr_gp.attrs) {
+ pr_err("kzalloc failed\n");
rc = -ENOMEM;
goto thermal_sysfs_add_exit;
}
@@ -2355,8 +2397,8 @@
rc = sysfs_create_group(vdd_rstr_reg_kobj[i],
&rails[i].attr_gp);
if (rc) {
- pr_err("%s: cannot create attribute group for %s\n",
- __func__, rails[i].name);
+ pr_err("cannot create attribute group for %s. err:%d\n",
+ rails[i].name, rc);
goto thermal_sysfs_add_exit;
}
}
@@ -2467,15 +2509,14 @@
module_kobj = kset_find_obj(module_kset, KBUILD_MODNAME);
if (!module_kobj) {
- pr_err("%s: cannot find kobject for module %s\n",
- __func__, KBUILD_MODNAME);
+ pr_err("cannot find kobject\n");
rc = -ENOENT;
goto psm_node_exit;
}
psm_kobj = kobject_create_and_add("pmic_sw_mode", module_kobj);
if (!psm_kobj) {
- pr_err("%s: cannot create psm kobject\n", KBUILD_MODNAME);
+ pr_err("cannot create psm kobject\n");
rc = -ENOMEM;
goto psm_node_exit;
}
@@ -2484,14 +2525,15 @@
psm_reg_kobj[i] = kobject_create_and_add(psm_rails[i].name,
psm_kobj);
if (!psm_reg_kobj[i]) {
- pr_err("%s: cannot create for kobject for %s\n",
- KBUILD_MODNAME, psm_rails[i].name);
+ pr_err("cannot create kobject for %s\n",
+ psm_rails[i].name);
rc = -ENOMEM;
goto psm_node_exit;
}
psm_rails[i].attr_gp.attrs = kzalloc( \
sizeof(struct attribute *) * 2, GFP_KERNEL);
if (!psm_rails[i].attr_gp.attrs) {
+ pr_err("kzalloc failed\n");
rc = -ENOMEM;
goto psm_node_exit;
}
@@ -2501,8 +2543,8 @@
rc = sysfs_create_group(psm_reg_kobj[i], &psm_rails[i].attr_gp);
if (rc) {
- pr_err("%s: cannot create attribute group for %s\n",
- KBUILD_MODNAME, psm_rails[i].name);
+ pr_err("cannot create attribute group for %s. err:%d\n",
+ psm_rails[i].name, rc);
goto psm_node_exit;
}
}
@@ -2549,14 +2591,14 @@
if (rails_cnt == 0)
goto read_node_fail;
if (rails_cnt >= MAX_RAILS) {
- pr_err("%s: Too many rails.\n", __func__);
+ pr_err("Too many rails:%d.\n", rails_cnt);
return -EFAULT;
}
rails = kzalloc(sizeof(struct rail) * rails_cnt,
GFP_KERNEL);
if (!rails) {
- pr_err("%s: Fail to allocate memory for rails.\n", __func__);
+ pr_err("Fail to allocate memory for rails.\n");
return -ENOMEM;
}
@@ -2573,7 +2615,8 @@
rails[i].num_levels = arr_size/sizeof(__be32);
if (rails[i].num_levels >
sizeof(rails[i].levels)/sizeof(uint32_t)) {
- pr_err("%s: Array size too large\n", __func__);
+ pr_err("Array size:%d too large for index:%d\n",
+ rails[i].num_levels, i);
return -EFAULT;
}
ret = of_property_read_u32_array(child_node, key,
@@ -2601,23 +2644,26 @@
if (rails_cnt) {
ret = vdd_restriction_reg_init(pdev);
if (ret) {
- pr_info("%s:Failed to get regulators. KTM continues.\n",
- __func__);
+ pr_err("Err regulator init. err:%d. KTM continues.\n",
+ ret);
goto read_node_fail;
}
ret = init_threshold(MSM_VDD_RESTRICTION, MONITOR_ALL_TSENS,
data->vdd_rstr_temp_hyst_degC, data->vdd_rstr_temp_degC,
vdd_restriction_notify);
- if (ret)
+ if (ret) {
+ pr_err("Error in initializing thresholds. err:%d\n",
+ ret);
goto read_node_fail;
+ }
vdd_rstr_enabled = true;
}
read_node_fail:
vdd_rstr_probed = true;
if (ret) {
dev_info(&pdev->dev,
- "%s:Failed reading node=%s, key=%s. KTM continues\n",
- __func__, node->full_name, key);
+ "%s:Failed reading node=%s, key=%s. err=%d. KTM continues\n",
+ __func__, node->full_name, key, ret);
kfree(rails);
rails_cnt = 0;
}
@@ -2890,7 +2936,7 @@
psm_rails = kzalloc(sizeof(struct psm_rail) * psm_rails_cnt,
GFP_KERNEL);
if (!psm_rails) {
- pr_err("%s: Fail to allocate memory for psm rails\n", __func__);
+ pr_err("Fail to allocate memory for psm rails\n");
psm_rails_cnt = 0;
return -ENOMEM;
}
@@ -2905,8 +2951,8 @@
if (psm_rails_cnt) {
ret = psm_reg_init(pdev);
if (ret) {
- pr_info("%s:Failed to get regulators. KTM continues.\n",
- __func__);
+ pr_err("Err regulator init. err:%d. KTM continues.\n",
+ ret);
goto read_node_fail;
}
psm_enabled = true;
@@ -2916,8 +2962,8 @@
psm_probed = true;
if (ret) {
dev_info(&pdev->dev,
- "%s:Failed reading node=%s, key=%s. KTM continues\n",
- __func__, node->full_name, key);
+ "%s:Failed reading node=%s, key=%s. err=%d. KTM continues\n",
+ __func__, node->full_name, key, ret);
kfree(psm_rails);
psm_rails_cnt = 0;
}
@@ -2968,7 +3014,7 @@
key = "qcom,cpu-sensors";
cpu_cnt = of_property_count_strings(node, key);
if (cpu_cnt < num_possible_cpus()) {
- pr_err("%s: Wrong number of cpu sensors\n", KBUILD_MODNAME);
+ pr_err("Wrong number of cpu sensors:%d\n", cpu_cnt);
ret = -EINVAL;
goto hotplug_node_fail;
}
@@ -2983,8 +3029,8 @@
read_node_fail:
if (ret) {
dev_info(&pdev->dev,
- "%s:Failed reading node=%s, key=%s. KTM continues\n",
- KBUILD_MODNAME, node->full_name, key);
+ "%s:Failed reading node=%s, key=%s. err=%d. KTM continues\n",
+ KBUILD_MODNAME, node->full_name, key, ret);
core_control_enabled = 0;
}
@@ -2993,8 +3039,8 @@
hotplug_node_fail:
if (ret) {
dev_info(&pdev->dev,
- "%s:Failed reading node=%s, key=%s. KTM continues\n",
- KBUILD_MODNAME, node->full_name, key);
+ "%s:Failed reading node=%s, key=%s. err=%d. KTM continues\n",
+ KBUILD_MODNAME, node->full_name, key, ret);
hotplug_enabled = 0;
}
@@ -3034,8 +3080,8 @@
PROBE_FREQ_EXIT:
if (ret) {
dev_info(&pdev->dev,
- "%s:Failed reading node=%s, key=%s. KTM continues\n",
- __func__, node->full_name, key);
+ "%s:Failed reading node=%s, key=%s. err=%d. KTM continues\n",
+ __func__, node->full_name, key, ret);
freq_mitigation_enabled = 0;
}
return ret;
@@ -3050,8 +3096,10 @@
memset(&data, 0, sizeof(struct msm_thermal_data));
ret = msm_thermal_pre_init();
- if (ret)
+ if (ret) {
+ pr_err("thermal pre init failed. err:%d\n", ret);
goto fail;
+ }
key = "qcom,sensor-id";
ret = of_property_read_u32(node, key, &data.sensor_id);
@@ -3135,8 +3183,8 @@
return ret;
fail:
if (ret)
- pr_err("%s: Failed reading node=%s, key=%s\n",
- __func__, node->full_name, key);
+ pr_err("Failed reading node=%s, key=%s. err:%d\n",
+ node->full_name, key, ret);
return ret;
}
diff --git a/drivers/video/msm/mdss/mdss_dsi_host.c b/drivers/video/msm/mdss/mdss_dsi_host.c
index 74d2739..457deb7 100644
--- a/drivers/video/msm/mdss/mdss_dsi_host.c
+++ b/drivers/video/msm/mdss/mdss_dsi_host.c
@@ -882,6 +882,7 @@
if (rlen <= 2) {
short_response = 1;
+ pkt_size = rlen;
rx_byte = 4;
} else {
short_response = 0;
@@ -905,32 +906,30 @@
while (!end) {
pr_debug("%s: rlen=%d pkt_size=%d rx_byte=%d\n",
__func__, rlen, pkt_size, rx_byte);
- if (!short_response) {
- max_pktsize[0] = pkt_size;
- mdss_dsi_buf_init(tp);
- ret = mdss_dsi_cmd_dma_add(tp, &pkt_size_cmd);
- if (!ret) {
- pr_err("%s: failed to add max_pkt_size\n",
- __func__);
- rp->len = 0;
- goto end;
- }
-
- mdss_dsi_wait4video_eng_busy(ctrl);
-
- mdss_dsi_enable_irq(ctrl, DSI_CMD_TERM);
- ret = mdss_dsi_cmd_dma_tx(ctrl, tp);
- if (IS_ERR_VALUE(ret)) {
- mdss_dsi_disable_irq(ctrl, DSI_CMD_TERM);
- pr_err("%s: failed to tx max_pkt_size\n",
- __func__);
- rp->len = 0;
- goto end;
- }
- pr_debug("%s: max_pkt_size=%d sent\n",
- __func__, pkt_size);
+ max_pktsize[0] = pkt_size;
+ mdss_dsi_buf_init(tp);
+ ret = mdss_dsi_cmd_dma_add(tp, &pkt_size_cmd);
+ if (!ret) {
+ pr_err("%s: failed to add max_pkt_size\n",
+ __func__);
+ rp->len = 0;
+ goto end;
}
+ mdss_dsi_wait4video_eng_busy(ctrl);
+
+ mdss_dsi_enable_irq(ctrl, DSI_CMD_TERM);
+ ret = mdss_dsi_cmd_dma_tx(ctrl, tp);
+ if (IS_ERR_VALUE(ret)) {
+ mdss_dsi_disable_irq(ctrl, DSI_CMD_TERM);
+ pr_err("%s: failed to tx max_pkt_size\n",
+ __func__);
+ rp->len = 0;
+ goto end;
+ }
+ pr_debug("%s: max_pkt_size=%d sent\n",
+ __func__, pkt_size);
+
mdss_dsi_buf_init(tp);
ret = mdss_dsi_cmd_dma_add(tp, cmds);
if (!ret) {
@@ -1381,8 +1380,10 @@
if (todo & DSI_EV_MDP_FIFO_UNDERFLOW) {
if (ctrl->recovery) {
+ mdss_dsi_clk_ctrl(ctrl, DSI_ALL_CLKS, 1);
mdss_dsi_sw_reset_restore(ctrl);
ctrl->recovery->fxn(ctrl->recovery->data);
+ mdss_dsi_clk_ctrl(ctrl, DSI_ALL_CLKS, 0);
}
}
@@ -1397,7 +1398,9 @@
spin_unlock_irqrestore(&ctrl->mdp_lock, flag);
/* enable dsi error interrupt */
+ mdss_dsi_clk_ctrl(ctrl, DSI_ALL_CLKS, 1);
mdss_dsi_err_intr_ctrl(ctrl, DSI_INTR_ERROR_MASK, 1);
+ mdss_dsi_clk_ctrl(ctrl, DSI_ALL_CLKS, 0);
}
}
diff --git a/drivers/video/msm/mdss/mdss_fb.c b/drivers/video/msm/mdss/mdss_fb.c
index c14f936..da2ae5f 100644
--- a/drivers/video/msm/mdss/mdss_fb.c
+++ b/drivers/video/msm/mdss/mdss_fb.c
@@ -454,6 +454,7 @@
mfd->ext_ad_ctrl = -1;
mfd->bl_level = 0;
+ mfd->bl_level_prev_scaled = 0;
mfd->bl_scale = 1024;
mfd->bl_min_lvl = 30;
mfd->fb_imgType = MDP_RGBA_8888;
@@ -782,6 +783,7 @@
pdata = dev_get_platdata(&mfd->pdev->dev);
if ((pdata) && (pdata->set_backlight)) {
+ mfd->bl_level_prev_scaled = mfd->bl_level_scaled;
if (!IS_CALIB_MODE_BL(mfd))
mdss_fb_scale_bl(mfd, &temp);
/*
@@ -792,13 +794,13 @@
* as well as setting bl_level to bkl_lvl even though the
* backlight has been set to the scaled value.
*/
- if (mfd->bl_level_old == temp) {
+ if (mfd->bl_level_scaled == temp) {
mfd->bl_level = bkl_lvl;
return;
}
pdata->set_backlight(pdata, temp);
mfd->bl_level = bkl_lvl;
- mfd->bl_level_old = temp;
+ mfd->bl_level_scaled = temp;
if (mfd->mdp.update_ad_input) {
update_ad_input = mfd->mdp.update_ad_input;
@@ -821,7 +823,7 @@
if ((pdata) && (pdata->set_backlight)) {
mfd->bl_level = mfd->unset_bl_level;
pdata->set_backlight(pdata, mfd->bl_level);
- mfd->bl_level_old = mfd->unset_bl_level;
+ mfd->bl_level_scaled = mfd->unset_bl_level;
mfd->bl_updated = 1;
}
}
@@ -858,6 +860,13 @@
schedule_delayed_work(&mfd->idle_notify_work,
msecs_to_jiffies(mfd->idle_time));
}
+
+ mutex_lock(&mfd->bl_lock);
+ if (!mfd->bl_updated) {
+ mfd->bl_updated = 1;
+ mdss_fb_set_backlight(mfd, mfd->bl_level_prev_scaled);
+ }
+ mutex_unlock(&mfd->bl_lock);
break;
case FB_BLANK_VSYNC_SUSPEND:
@@ -879,8 +888,9 @@
mfd->op_enable = false;
curr_pwr_state = mfd->panel_power_on;
- mfd->panel_power_on = false;
mutex_lock(&mfd->bl_lock);
+ mdss_fb_set_backlight(mfd, 0);
+ mfd->panel_power_on = false;
mfd->bl_updated = 0;
mutex_unlock(&mfd->bl_lock);
@@ -1687,7 +1697,11 @@
u32 wait_for_finish = disp_commit->wait_for_finish;
int ret = 0;
- if (!mfd || (!mfd->op_enable) || (!mfd->panel_power_on))
+ if (!mfd || (!mfd->op_enable))
+ return -EPERM;
+
+ if ((!mfd->panel_power_on) && !((mfd->dcm_state == DCM_ENTER) &&
+ (mfd->panel.type == MIPI_CMD_PANEL)))
return -EPERM;
if (var->xoffset > (info->var.xres_virtual - info->var.xres))
@@ -1739,7 +1753,11 @@
{
struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
- if ((!mfd->op_enable) || (!mfd->panel_power_on))
+ if (!mfd->op_enable)
+ return -EPERM;
+
+ if ((!mfd->panel_power_on) && !((mfd->dcm_state == DCM_ENTER) &&
+ (mfd->panel.type == MIPI_CMD_PANEL)))
return -EPERM;
if (var->xoffset > (info->var.xres_virtual - info->var.xres))
diff --git a/drivers/video/msm/mdss/mdss_fb.h b/drivers/video/msm/mdss/mdss_fb.h
index 3416b9e..ce0a7f9 100644
--- a/drivers/video/msm/mdss/mdss_fb.h
+++ b/drivers/video/msm/mdss/mdss_fb.h
@@ -189,7 +189,8 @@
u32 bl_min_lvl;
u32 unset_bl_level;
u32 bl_updated;
- u32 bl_level_old;
+ u32 bl_level_scaled;
+ u32 bl_level_prev_scaled;
struct mutex bl_lock;
struct mutex lock;
diff --git a/drivers/video/msm/mdss/mdss_mdp_overlay.c b/drivers/video/msm/mdss/mdss_mdp_overlay.c
index 52c220f..fdfa2b0 100644
--- a/drivers/video/msm/mdss/mdss_mdp_overlay.c
+++ b/drivers/video/msm/mdss/mdss_mdp_overlay.c
@@ -1605,7 +1605,8 @@
if (mutex_lock_interruptible(&mdp5_data->ov_lock))
return;
- if (!mfd->panel_power_on) {
+ if ((!mfd->panel_power_on) && !((mfd->dcm_state == DCM_ENTER) &&
+ (mfd->panel.type == MIPI_CMD_PANEL))) {
mutex_unlock(&mdp5_data->ov_lock);
return;
}
diff --git a/fs/namei.c b/fs/namei.c
index c427919..a87e323 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -2072,6 +2072,13 @@
if (error)
return error;
error = dir->i_op->create(dir, dentry, mode, nd);
+ if (error)
+ return error;
+
+ error = security_inode_post_create(dir, dentry, mode);
+ if (error)
+ return error;
+
if (!error)
fsnotify_create(dir, dentry);
return error;
@@ -2547,6 +2554,13 @@
return error;
error = dir->i_op->mknod(dir, dentry, mode, dev);
+ if (error)
+ return error;
+
+ error = security_inode_post_create(dir, dentry, mode);
+ if (error)
+ return error;
+
if (!error)
fsnotify_create(dir, dentry);
return error;
diff --git a/fs/open.c b/fs/open.c
index 5720854..56c8810 100644
--- a/fs/open.c
+++ b/fs/open.c
@@ -1054,6 +1054,7 @@
dnotify_flush(filp, id);
locks_remove_posix(filp, id);
}
+ security_file_close(filp);
fput(filp);
return retval;
}
diff --git a/include/linux/security.h b/include/linux/security.h
index b62f396..0fe0a70 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -26,6 +26,7 @@
#include <linux/capability.h>
#include <linux/slab.h>
#include <linux/err.h>
+#include <linux/bio.h>
struct linux_binprm;
struct cred;
@@ -1453,6 +1454,8 @@
void **value, size_t *len);
int (*inode_create) (struct inode *dir,
struct dentry *dentry, umode_t mode);
+ int (*inode_post_create) (struct inode *dir,
+ struct dentry *dentry, umode_t mode);
int (*inode_link) (struct dentry *old_dentry,
struct inode *dir, struct dentry *new_dentry);
int (*inode_unlink) (struct inode *dir, struct dentry *dentry);
@@ -1503,6 +1506,8 @@
struct fown_struct *fown, int sig);
int (*file_receive) (struct file *file);
int (*dentry_open) (struct file *file, const struct cred *cred);
+ int (*file_close) (struct file *file);
+ bool (*allow_merge_bio)(struct bio *bio1, struct bio *bio2);
int (*task_create) (unsigned long clone_flags);
void (*task_free) (struct task_struct *task);
@@ -1722,6 +1727,9 @@
const struct qstr *qstr, char **name,
void **value, size_t *len);
int security_inode_create(struct inode *dir, struct dentry *dentry, umode_t mode);
+int security_inode_post_create(struct inode *dir, struct dentry *dentry,
+ umode_t mode);
+
int security_inode_link(struct dentry *old_dentry, struct inode *dir,
struct dentry *new_dentry);
int security_inode_unlink(struct inode *dir, struct dentry *dentry);
@@ -1766,6 +1774,9 @@
struct fown_struct *fown, int sig);
int security_file_receive(struct file *file);
int security_dentry_open(struct file *file, const struct cred *cred);
+int security_file_close(struct file *file);
+bool security_allow_merge_bio(struct bio *bio1, struct bio *bio2);
+
int security_task_create(unsigned long clone_flags);
void security_task_free(struct task_struct *task);
int security_cred_alloc_blank(struct cred *cred, gfp_t gfp);
@@ -2060,6 +2071,13 @@
return 0;
}
+static inline int security_inode_post_create(struct inode *dir,
+ struct dentry *dentry,
+ umode_t mode)
+{
+ return 0;
+}
+
static inline int security_inode_link(struct dentry *old_dentry,
struct inode *dir,
struct dentry *new_dentry)
@@ -2262,6 +2280,16 @@
return 0;
}
+static inline int security_file_close(struct file *file)
+{
+ return 0;
+}
+
+static inline bool security_allow_merge_bio(struct bio *bio1, struct bio *bio2)
+{
+ return true; /* The default is to allow it for performance */
+}
+
static inline int security_task_create(unsigned long clone_flags)
{
return 0;
diff --git a/include/linux/spmi.h b/include/linux/spmi.h
index e8e932e..b581de8 100644
--- a/include/linux/spmi.h
+++ b/include/linux/spmi.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2014 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -382,6 +382,7 @@
* -EPERM if the SPMI transaction is denied due to permission issues.
* -EIO if the SPMI transaction fails (parity errors, etc).
* -ETIMEDOUT if the SPMI transaction times out.
+ * -EAGAIN if the SPMI transaction is temporarily unavailable
*/
extern int spmi_command_reset(struct spmi_controller *ctrl, u8 sid);
@@ -397,6 +398,7 @@
* -EPERM if the SPMI transaction is denied due to permission issues.
* -EIO if the SPMI transaction fails (parity errors, etc).
* -ETIMEDOUT if the SPMI transaction times out.
+ * -EAGAIN if the SPMI transaction is temporarily unavailable
*/
extern int spmi_command_sleep(struct spmi_controller *ctrl, u8 sid);
@@ -413,6 +415,7 @@
* -EPERM if the SPMI transaction is denied due to permission issues.
* -EIO if the SPMI transaction fails (parity errors, etc).
* -ETIMEDOUT if the SPMI transaction times out.
+ * -EAGAIN if the SPMI transaction is temporarily unavailable
*/
extern int spmi_command_wakeup(struct spmi_controller *ctrl, u8 sid);
@@ -428,6 +431,7 @@
* -EPERM if the SPMI transaction is denied due to permission issues.
* -EIO if the SPMI transaction fails (parity errors, etc).
* -ETIMEDOUT if the SPMI transaction times out.
+ * -EAGAIN if the SPMI transaction is temporarily unavailable
*/
extern int spmi_command_shutdown(struct spmi_controller *ctrl, u8 sid);
diff --git a/include/media/msmb_isp.h b/include/media/msmb_isp.h
index 30e7d06..e627977 100644
--- a/include/media/msmb_isp.h
+++ b/include/media/msmb_isp.h
@@ -154,6 +154,7 @@
uint8_t buf_divert; /* if TRUE no vb2 buf done. */
/*Return values*/
uint32_t axi_stream_handle;
+ uint32_t burst_len;
};
struct msm_vfe_axi_stream_release_cmd {
@@ -225,6 +226,7 @@
uint8_t num_streams;
uint32_t stream_handle[MSM_ISP_STATS_MAX];
uint8_t enable;
+ uint32_t stats_burst_len;
};
enum msm_vfe_reg_cfg_type {
@@ -242,6 +244,7 @@
GET_MAX_CLK_RATE,
VFE_HW_UPDATE_LOCK,
VFE_HW_UPDATE_UNLOCK,
+ SET_WM_UB_SIZE,
};
struct msm_vfe_cfg_cmd2 {
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index cf82dbd..5bc3663 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -1963,8 +1963,8 @@
hdr = nl80211hdr_put(msg, info->snd_pid, info->snd_seq, 0,
NL80211_CMD_NEW_KEY);
- if (IS_ERR(hdr))
- return PTR_ERR(hdr);
+ if (!hdr)
+ return -ENOBUFS;
cookie.msg = msg;
cookie.idx = key_idx;
@@ -5409,6 +5409,9 @@
NL80211_CMD_TESTMODE);
struct nlattr *tmdata;
+ if (!hdr)
+ break;
+
if (nla_put_u32(skb, NL80211_ATTR_WIPHY, phy_idx) < 0) {
genlmsg_cancel(skb, hdr);
break;
@@ -5817,9 +5820,8 @@
hdr = nl80211hdr_put(msg, info->snd_pid, info->snd_seq, 0,
NL80211_CMD_REMAIN_ON_CHANNEL);
-
- if (IS_ERR(hdr)) {
- err = PTR_ERR(hdr);
+ if (!hdr) {
+ err = -ENOBUFS;
goto free_msg;
}
@@ -6100,9 +6102,8 @@
hdr = nl80211hdr_put(msg, info->snd_pid, info->snd_seq, 0,
NL80211_CMD_FRAME);
-
- if (IS_ERR(hdr)) {
- err = PTR_ERR(hdr);
+ if (!hdr) {
+ err = -ENOBUFS;
goto free_msg;
}
}
@@ -6662,9 +6663,8 @@
hdr = nl80211hdr_put(msg, info->snd_pid, info->snd_seq, 0,
NL80211_CMD_PROBE_CLIENT);
-
- if (IS_ERR(hdr)) {
- err = PTR_ERR(hdr);
+ if (!hdr) {
+ err = -ENOBUFS;
goto free_msg;
}
diff --git a/net/wireless/sme.c b/net/wireless/sme.c
index 87547ca..94c06df 100644
--- a/net/wireless/sme.c
+++ b/net/wireless/sme.c
@@ -65,11 +65,45 @@
return is_all_idle;
}
+
+static bool cfg80211_is_all_countryie_ignore(void)
+{
+ struct cfg80211_registered_device *rdev;
+ struct wireless_dev *wdev;
+ bool is_all_countryie_ignore = true;
+
+ mutex_lock(&cfg80211_mutex);
+
+ list_for_each_entry(rdev, &cfg80211_rdev_list, list) {
+ cfg80211_lock_rdev(rdev);
+ list_for_each_entry(wdev, &rdev->netdev_list, list) {
+ wdev_lock(wdev);
+ if (!(wdev->wiphy->country_ie_pref &
+ NL80211_COUNTRY_IE_IGNORE_CORE)) {
+ is_all_countryie_ignore = false;
+ wdev_unlock(wdev);
+ cfg80211_unlock_rdev(rdev);
+ goto out;
+ }
+ wdev_unlock(wdev);
+ }
+ cfg80211_unlock_rdev(rdev);
+ }
+out:
+ mutex_unlock(&cfg80211_mutex);
+
+ return is_all_countryie_ignore;
+}
+
+
static void disconnect_work(struct work_struct *work)
{
if (!cfg80211_is_all_idle())
return;
+ if (cfg80211_is_all_countryie_ignore())
+ return;
+
regulatory_hint_disconnect();
}
diff --git a/security/security.c b/security/security.c
index cecd55e..cc355c0 100644
--- a/security/security.c
+++ b/security/security.c
@@ -471,6 +471,16 @@
}
EXPORT_SYMBOL_GPL(security_inode_create);
+int security_inode_post_create(struct inode *dir, struct dentry *dentry,
+ umode_t mode)
+{
+ if (unlikely(IS_PRIVATE(dir)))
+ return 0;
+ if (security_ops->inode_post_create == NULL)
+ return 0;
+ return security_ops->inode_post_create(dir, dentry, mode);
+}
+
int security_inode_link(struct dentry *old_dentry, struct inode *dir,
struct dentry *new_dentry)
{
@@ -732,6 +742,22 @@
return fsnotify_perm(file, MAY_OPEN);
}
+int security_file_close(struct file *file)
+{
+ if (security_ops->file_close)
+ return security_ops->file_close(file);
+
+ return 0;
+}
+
+bool security_allow_merge_bio(struct bio *bio1, struct bio *bio2)
+{
+ if (security_ops->allow_merge_bio)
+ return security_ops->allow_merge_bio(bio1, bio2);
+
+ return true;
+}
+
int security_task_create(unsigned long clone_flags)
{
return security_ops->task_create(clone_flags);
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index c868a74..50b003a 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -82,6 +82,7 @@
#include <linux/export.h>
#include <linux/msg.h>
#include <linux/shm.h>
+#include <linux/pft.h>
#include "avc.h"
#include "objsec.h"
@@ -1617,9 +1618,15 @@
if (rc)
return rc;
- return avc_has_perm(newsid, sbsec->sid,
- SECCLASS_FILESYSTEM,
- FILESYSTEM__ASSOCIATE, &ad);
+ rc = avc_has_perm(newsid, sbsec->sid,
+ SECCLASS_FILESYSTEM,
+ FILESYSTEM__ASSOCIATE, &ad);
+ if (rc)
+ return rc;
+
+ rc = pft_inode_mknod(dir, dentry, 0, 0);
+
+ return rc;
}
/* Check whether a task can create a key. */
@@ -1678,6 +1685,12 @@
}
rc = avc_has_perm(sid, isec->sid, isec->sclass, av, &ad);
+ if (rc)
+ return rc;
+
+ if (kind == MAY_UNLINK)
+ rc = pft_inode_unlink(dir, dentry);
+
return rc;
}
@@ -2684,9 +2697,25 @@
static int selinux_inode_create(struct inode *dir, struct dentry *dentry, umode_t mode)
{
+ int ret;
+
+ ret = pft_inode_create(dir, dentry, mode);
+ if (ret < 0)
+ return ret;
+
return may_create(dir, dentry, SECCLASS_FILE);
}
+static int selinux_inode_post_create(struct inode *dir, struct dentry *dentry,
+ umode_t mode)
+{
+ int ret;
+
+ ret = pft_inode_post_create(dir, dentry, mode);
+
+ return ret;
+}
+
static int selinux_inode_link(struct dentry *old_dentry, struct inode *dir, struct dentry *new_dentry)
{
return may_link(dir, old_dentry, MAY_LINK);
@@ -2720,6 +2749,12 @@
static int selinux_inode_rename(struct inode *old_inode, struct dentry *old_dentry,
struct inode *new_inode, struct dentry *new_dentry)
{
+ int rc;
+
+ rc = pft_inode_rename(old_inode, old_dentry, new_inode, new_dentry);
+ if (rc)
+ return rc;
+
return may_rename(old_inode, old_dentry, new_inode, new_dentry);
}
@@ -2800,6 +2835,10 @@
{
const struct cred *cred = current_cred();
+ if (pft_inode_set_xattr(dentry, name) < 0)
+ return -EACCES;
+
+
if (!strncmp(name, XATTR_SECURITY_PREFIX,
sizeof XATTR_SECURITY_PREFIX - 1)) {
if (!strcmp(name, XATTR_NAME_CAPS)) {
@@ -3023,11 +3062,16 @@
struct file_security_struct *fsec = file->f_security;
struct inode_security_struct *isec = inode->i_security;
u32 sid = current_sid();
+ int ret;
if (!mask)
/* No permission to check. Existence test. */
return 0;
+ ret = pft_file_permission(file, mask);
+ if (ret < 0)
+ return ret;
+
if (sid == fsec->sid && fsec->isid == isec->sid &&
fsec->pseqno == avc_policy_seqno())
/* No change since dentry_open check. */
@@ -3294,6 +3338,11 @@
struct file_security_struct *fsec;
struct inode *inode;
struct inode_security_struct *isec;
+ int ret;
+
+ ret = pft_file_open(file, cred);
+ if (ret < 0)
+ return ret;
inode = file->f_path.dentry->d_inode;
fsec = file->f_security;
@@ -3318,6 +3367,16 @@
return inode_has_perm_noadp(cred, inode, open_file_to_av(file), 0);
}
+static int selinux_file_close(struct file *file)
+{
+ return pft_file_close(file);
+}
+
+static bool selinux_allow_merge_bio(struct bio *bio1, struct bio *bio2)
+{
+ return pft_allow_merge_bio(bio1, bio2);
+}
+
/* task security operations */
static int selinux_task_create(unsigned long clone_flags)
@@ -5629,6 +5688,7 @@
.inode_free_security = selinux_inode_free_security,
.inode_init_security = selinux_inode_init_security,
.inode_create = selinux_inode_create,
+ .inode_post_create = selinux_inode_post_create,
.inode_link = selinux_inode_link,
.inode_unlink = selinux_inode_unlink,
.inode_symlink = selinux_inode_symlink,
@@ -5664,6 +5724,8 @@
.file_receive = selinux_file_receive,
.dentry_open = selinux_dentry_open,
+ .file_close = selinux_file_close,
+ .allow_merge_bio = selinux_allow_merge_bio,
.task_create = selinux_task_create,
.cred_alloc_blank = selinux_cred_alloc_blank,
diff --git a/sound/soc/codecs/wcd9320.c b/sound/soc/codecs/wcd9320.c
index 4c5d327..5602dd1 100644
--- a/sound/soc/codecs/wcd9320.c
+++ b/sound/soc/codecs/wcd9320.c
@@ -2555,9 +2555,10 @@
WCD9XXX_CLSH_STATE_LO,
WCD9XXX_CLSH_REQ_ENABLE,
WCD9XXX_CLSH_EVENT_POST_PA);
- pr_debug("%s: sleeping 3 ms after %s PA turn on\n",
+ pr_debug("%s: sleeping 5 ms after %s PA turn on\n",
__func__, w->name);
- usleep_range(3000, 3000);
+ /* Wait for CnP time after PA enable */
+ usleep_range(5000, 5100);
break;
case SND_SOC_DAPM_POST_PMD:
wcd9xxx_clsh_fsm(codec, &taiko->clsh_d,
@@ -2565,6 +2566,10 @@
WCD9XXX_CLSH_REQ_DISABLE,
WCD9XXX_CLSH_EVENT_POST_PA);
snd_soc_update_bits(codec, lineout_gain_reg, 0x40, 0x00);
+ pr_debug("%s: sleeping 5 ms after %s PA turn off\n",
+ __func__, w->name);
+ /* Wait for CnP time after PA disable */
+ usleep_range(5000, 5100);
break;
}
return 0;
diff --git a/sound/soc/msm/qdsp6v2/msm-compress-q6-v2.c b/sound/soc/msm/qdsp6v2/msm-compress-q6-v2.c
index fce1940..f6702c5 100644
--- a/sound/soc/msm/qdsp6v2/msm-compress-q6-v2.c
+++ b/sound/soc/msm/qdsp6v2/msm-compress-q6-v2.c
@@ -129,6 +129,8 @@
uint32_t stream_available;
uint32_t next_stream;
+ uint64_t marker_timestamp;
+
struct msm_compr_gapless_state gapless_state;
atomic_t start;
@@ -1064,6 +1066,7 @@
prtd->app_pointer = 0;
prtd->bytes_received = 0;
prtd->bytes_sent = 0;
+ prtd->marker_timestamp = 0;
atomic_set(&prtd->xrun, 0);
spin_unlock_irqrestore(&prtd->lock, flags);
@@ -1196,6 +1199,8 @@
prtd->first_buffer = 1;
prtd->last_buffer = 0;
prtd->gapless_state.gapless_transition = 1;
+ prtd->marker_timestamp = 0;
+
/*
Don't reset these as these vars map to
total_bytes_transferred and total_bytes_available
@@ -1251,23 +1256,23 @@
q6asm_stream_cmd_nowait(ac, CMD_PAUSE, ac->stream_id);
prtd->cmd_ack = 0;
spin_unlock_irqrestore(&prtd->lock, flags);
- pr_debug("%s:issue CMD_FLUSH ac->stream_id %d",
- __func__, ac->stream_id);
- q6asm_stream_cmd(ac, CMD_FLUSH, ac->stream_id);
- wait_event_timeout(prtd->flush_wait,
- prtd->cmd_ack, 1 * HZ / 4);
+ /*
+ * Cache this time as last known time
+ */
+ q6asm_get_session_time(prtd->audio_client,
+ &prtd->marker_timestamp);
spin_lock_irqsave(&prtd->lock, flags);
/*
- Don't reset these as these vars map to
- total_bytes_transferred and total_bytes_available
- directly, only total_bytes_transferred will be updated
- in the next avail() ioctl
- prtd->copied_total = 0;
- prtd->bytes_received = 0;
- do not reset prtd->bytes_sent as well as the same
- session is used for gapless playback
- */
+ * Don't reset these as these vars map to
+ * total_bytes_transferred and total_bytes_available.
+ * Just total_bytes_transferred will be updated
+ * in the next avail() ioctl.
+ * prtd->copied_total = 0;
+ * prtd->bytes_received = 0;
+ * do not reset prtd->bytes_sent as well as the same
+ * session is used for gapless playback
+ */
prtd->byte_offset = 0;
prtd->app_pointer = 0;
@@ -1275,8 +1280,15 @@
prtd->last_buffer = 0;
atomic_set(&prtd->drain, 0);
atomic_set(&prtd->xrun, 1);
- q6asm_run_nowait(prtd->audio_client, 0, 0, 0);
spin_unlock_irqrestore(&prtd->lock, flags);
+
+ pr_debug("%s:issue CMD_FLUSH ac->stream_id %d",
+ __func__, ac->stream_id);
+ q6asm_stream_cmd(ac, CMD_FLUSH, ac->stream_id);
+ wait_event_timeout(prtd->flush_wait,
+ prtd->cmd_ack, 1 * HZ / 4);
+
+ q6asm_run_nowait(prtd->audio_client, 0, 0, 0);
}
prtd->cmd_interrupt = 0;
break;
@@ -1404,6 +1416,8 @@
__func__, timestamp);
return -EAGAIN;
}
+ } else {
+ timestamp = prtd->marker_timestamp;
}
/* DSP returns timestamp in usec */
diff --git a/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c b/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c
index 4893990..1553d1c 100644
--- a/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c
+++ b/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c
@@ -3971,50 +3971,36 @@
{"AUDIO_REF_EC_UL1 MUX", "SEC_MI2S_TX" , "SEC_MI2S_TX"},
{"AUDIO_REF_EC_UL1 MUX", "TERT_MI2S_TX" , "TERT_MI2S_TX"},
{"AUDIO_REF_EC_UL1 MUX", "QUAT_MI2S_TX" , "QUAT_MI2S_TX"},
- {"AUDIO_REF_EC_UL1 MUX", "I2S_RX" , "PRI_I2S_TX"},
- {"AUDIO_REF_EC_UL1 MUX", "SLIM_RX" , "SLIMBUS_0_TX"},
{"AUDIO_REF_EC_UL2 MUX", "PRI_MI2S_TX" , "PRI_MI2S_TX"},
{"AUDIO_REF_EC_UL2 MUX", "SEC_MI2S_TX" , "SEC_MI2S_TX"},
{"AUDIO_REF_EC_UL2 MUX", "TERT_MI2S_TX" , "TERT_MI2S_TX"},
{"AUDIO_REF_EC_UL2 MUX", "QUAT_MI2S_TX" , "QUAT_MI2S_TX"},
- {"AUDIO_REF_EC_UL2 MUX", "I2S_RX" , "PRI_I2S_TX"},
- {"AUDIO_REF_EC_UL2 MUX", "SLIM_RX" , "SLIMBUS_0_TX"},
{"AUDIO_REF_EC_UL4 MUX", "PRI_MI2S_TX" , "PRI_MI2S_TX"},
{"AUDIO_REF_EC_UL4 MUX", "SEC_MI2S_TX" , "SEC_MI2S_TX"},
{"AUDIO_REF_EC_UL4 MUX", "TERT_MI2S_TX" , "TERT_MI2S_TX"},
{"AUDIO_REF_EC_UL4 MUX", "QUAT_MI2S_TX" , "QUAT_MI2S_TX"},
- {"AUDIO_REF_EC_UL4 MUX", "I2S_RX" , "PRI_I2S_TX"},
- {"AUDIO_REF_EC_UL4 MUX", "SLIM_RX" , "SLIMBUS_0_TX"},
{"AUDIO_REF_EC_UL5 MUX", "PRI_MI2S_TX" , "PRI_MI2S_TX"},
{"AUDIO_REF_EC_UL5 MUX", "SEC_MI2S_TX" , "SEC_MI2S_TX"},
{"AUDIO_REF_EC_UL5 MUX", "TERT_MI2S_TX" , "TERT_MI2S_TX"},
{"AUDIO_REF_EC_UL5 MUX", "QUAT_MI2S_TX" , "QUAT_MI2S_TX"},
- {"AUDIO_REF_EC_UL5 MUX", "I2S_RX" , "PRI_I2S_TX"},
- {"AUDIO_REF_EC_UL5 MUX", "SLIM_RX" , "SLIMBUS_0_TX"},
{"AUDIO_REF_EC_UL6 MUX", "PRI_MI2S_TX" , "PRI_MI2S_TX"},
{"AUDIO_REF_EC_UL6 MUX", "SEC_MI2S_TX" , "SEC_MI2S_TX"},
{"AUDIO_REF_EC_UL6 MUX", "TERT_MI2S_TX" , "TERT_MI2S_TX"},
{"AUDIO_REF_EC_UL6 MUX", "QUAT_MI2S_TX" , "QUAT_MI2S_TX"},
- {"AUDIO_REF_EC_UL6 MUX", "I2S_RX" , "PRI_I2S_TX"},
- {"AUDIO_REF_EC_UL6 MUX", "SLIM_RX" , "SLIMBUS_0_TX"},
{"AUDIO_REF_EC_UL8 MUX", "PRI_MI2S_TX" , "PRI_MI2S_TX"},
{"AUDIO_REF_EC_UL8 MUX", "SEC_MI2S_TX" , "SEC_MI2S_TX"},
{"AUDIO_REF_EC_UL8 MUX", "TERT_MI2S_TX" , "TERT_MI2S_TX"},
{"AUDIO_REF_EC_UL8 MUX", "QUAT_MI2S_TX" , "QUAT_MI2S_TX"},
- {"AUDIO_REF_EC_UL8 MUX", "I2S_RX" , "PRI_I2S_TX"},
- {"AUDIO_REF_EC_UL8 MUX", "SLIM_RX" , "SLIMBUS_0_TX"},
{"AUDIO_REF_EC_UL9 MUX", "PRI_MI2S_TX" , "PRI_MI2S_TX"},
{"AUDIO_REF_EC_UL9 MUX", "SEC_MI2S_TX" , "SEC_MI2S_TX"},
{"AUDIO_REF_EC_UL9 MUX", "TERT_MI2S_TX" , "TERT_MI2S_TX"},
{"AUDIO_REF_EC_UL9 MUX", "QUAT_MI2S_TX" , "QUAT_MI2S_TX"},
- {"AUDIO_REF_EC_UL9 MUX", "I2S_RX" , "PRI_I2S_TX"},
- {"AUDIO_REF_EC_UL9 MUX", "SLIM_RX" , "SLIMBUS_0_TX"},
{"MM_UL1", NULL, "AUDIO_REF_EC_UL1 MUX"},
{"MM_UL2", NULL, "AUDIO_REF_EC_UL2 MUX"},
diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c
index 7c6f0ea..a5d42d5 100644
--- a/sound/soc/soc-pcm.c
+++ b/sound/soc/soc-pcm.c
@@ -1703,25 +1703,22 @@
}
ret = dpcm_be_dai_prepare(fe, substream->stream);
- if (ret < 0)
+ if (ret < 0) {
+ dev_err(fe->dev, "ASoC: prepare FE %s failed\n",
+ fe->dai_link->name);
goto out;
+ }
/* call prepare on the frontend */
if (!fe->fe_compr) {
ret = soc_pcm_prepare(substream);
if (ret < 0) {
- dev_err(fe->dev,"ASoC: prepare FE %s failed\n",
+ dev_err(fe->dev, "ASoC: prepare FE %s failed\n",
fe->dai_link->name);
goto out;
}
}
- ret = soc_pcm_prepare(substream);
- if (ret < 0) {
- dev_err(fe->dev,"dpcm: prepare FE %s failed\n", fe->dai_link->name);
- goto out;
- }
-
/* run the stream event for each BE */
if (stream == SNDRV_PCM_STREAM_PLAYBACK)
dpcm_dapm_stream_event(fe, stream,