Merge "rcu: Update RCU_FAST_NO_HZ tracing for lazy callbacks"
diff --git a/arch/arm/mach-msm/qdsp6v2/voice_svc.c b/arch/arm/mach-msm/qdsp6v2/voice_svc.c
old mode 100644
new mode 100755
index 92b3003..5bf86dc
--- a/arch/arm/mach-msm/qdsp6v2/voice_svc.c
+++ b/arch/arm/mach-msm/qdsp6v2/voice_svc.c
@@ -56,8 +56,15 @@
static struct voice_svc_device *voice_svc_dev;
static struct class *voice_svc_class;
+static bool reg_dummy_sess;
+static void *dummy_q6_mvm;
+static void *dummy_q6_cvs;
dev_t device_num;
+static int voice_svc_dummy_reg(void);
+static int32_t qdsp_dummy_apr_callback(struct apr_client_data *data,
+ void *priv);
+
static int32_t qdsp_apr_callback(struct apr_client_data *data, void *priv)
{
struct voice_svc_prvt *prtd;
@@ -127,6 +134,12 @@
return 0;
}
+static int32_t qdsp_dummy_apr_callback(struct apr_client_data *data, void *priv)
+{
+ /* Do Nothing */
+ return 0;
+}
+
static void voice_svc_update_hdr(struct voice_svc_cmd_request* apr_req_data,
struct apr_data *aprdata,
struct voice_svc_prvt *prtd)
@@ -223,6 +236,13 @@
goto done;
}
+ if (src_port == (APR_MAX_PORTS - 1)) {
+ pr_err("%s: SRC port reserved for dummy session\n", __func__);
+ pr_err("%s: Unable to register %s\n", __func__, svc);
+ ret = -EINVAL;
+ goto done;
+ }
+
*handle = apr_register("ADSP",
svc, qdsp_apr_callback,
((src_port) << 8 | 0x0001),
@@ -449,6 +469,37 @@
return ret;
}
+static int voice_svc_dummy_reg()
+{
+ uint32_t src_port = APR_MAX_PORTS - 1;
+
+ pr_debug("%s\n", __func__);
+ dummy_q6_mvm = apr_register("ADSP", "MVM",
+ qdsp_dummy_apr_callback,
+ src_port,
+ NULL);
+ if (dummy_q6_mvm == NULL) {
+ pr_err("%s: Unable to register dummy MVM\n", __func__);
+ goto err;
+ }
+
+ dummy_q6_cvs = apr_register("ADSP", "CVS",
+ qdsp_dummy_apr_callback,
+ src_port,
+ NULL);
+ if (dummy_q6_cvs == NULL) {
+ pr_err("%s: Unable to register dummy CVS\n", __func__);
+ goto err;
+ }
+ return 0;
+err:
+ if (dummy_q6_mvm != NULL) {
+ apr_deregister(dummy_q6_mvm);
+ dummy_q6_mvm = NULL;
+ }
+ return -EINVAL;
+}
+
static int voice_svc_open(struct inode *inode, struct file *file)
{
struct voice_svc_prvt *prtd = NULL;
@@ -472,6 +523,16 @@
file->private_data = (void*)prtd;
+ /* Current APR implementation doesn't support session based
+ * multiple service registrations. The apr_deregister()
+ * function sets the destination and client IDs to zero, if
+ * deregister is called for a single service instance.
+ * To avoid this, register for additional services.
+ */
+ if (!reg_dummy_sess) {
+ voice_svc_dummy_reg();
+ reg_dummy_sess = 1;
+ }
return 0;
}
diff --git a/drivers/gpu/ion/ion_heap.c b/drivers/gpu/ion/ion_heap.c
index aa2551a..0b193a0 100644
--- a/drivers/gpu/ion/ion_heap.c
+++ b/drivers/gpu/ion/ion_heap.c
@@ -2,7 +2,7 @@
* drivers/gpu/ion/ion_heap.c
*
* Copyright (C) 2011 Google, Inc.
- * Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2011-2014, The Linux Foundation. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
@@ -26,6 +26,7 @@
#include <linux/vmalloc.h>
#include <linux/slab.h>
#include <linux/highmem.h>
+#include <linux/dma-mapping.h>
#include "ion_priv.h"
void *ion_heap_map_kernel(struct ion_heap *heap,
@@ -107,16 +108,15 @@
* chunks to minimize the number of memsets and vmaps/vunmaps.
*
* Note that the `pages' array should be composed of all 4K pages.
+ *
+ * NOTE: This function does not guarantee synchronization of the caches
+ * and thus caller is responsible for handling any cache maintenance
+ * operations needed.
*/
int ion_heap_pages_zero(struct page **pages, int num_pages)
{
- int i, j, k, npages_to_vmap;
+ int i, j, npages_to_vmap;
void *ptr = NULL;
- /*
- * It's cheaper just to use writecombine memory and skip the
- * cache vs. using a cache memory and trying to flush it afterwards
- */
- pgprot_t pgprot = pgprot_writecombine(pgprot_kernel);
/*
* As an optimization, we manually zero out all of the pages
@@ -132,7 +132,7 @@
for (j = 0; j < MAX_VMAP_RETRIES && npages_to_vmap;
++j) {
ptr = vmap(&pages[i], npages_to_vmap,
- VM_IOREMAP, pgprot);
+ VM_IOREMAP, PAGE_KERNEL);
if (ptr)
break;
else
@@ -141,21 +141,6 @@
if (!ptr)
return -ENOMEM;
- /*
- * We have to invalidate the cache here because there
- * might be dirty lines to these physical pages (which
- * we don't care about) that could get written out at
- * any moment.
- */
- for (k = 0; k < npages_to_vmap; k++) {
- void *p = kmap_atomic(pages[i + k]);
- phys_addr_t phys = page_to_phys(
- pages[i + k]);
-
- dmac_inv_range(p, p + PAGE_SIZE);
- outer_inv_range(phys, phys + PAGE_SIZE);
- kunmap_atomic(p);
- }
memset(ptr, 0, npages_to_vmap * PAGE_SIZE);
vunmap(ptr);
}
@@ -163,11 +148,12 @@
return 0;
}
-static int ion_heap_alloc_pages_mem(int page_tbl_size,
- struct pages_mem *pages_mem)
+int ion_heap_alloc_pages_mem(struct pages_mem *pages_mem)
{
struct page **pages;
+ unsigned int page_tbl_size;
pages_mem->free_fn = kfree;
+ page_tbl_size = sizeof(struct page *) * (pages_mem->size >> PAGE_SHIFT);
if (page_tbl_size > SZ_8K) {
/*
* Do fallback to ensure we have a balance between
@@ -191,7 +177,7 @@
return 0;
}
-static void ion_heap_free_pages_mem(struct pages_mem *pages_mem)
+void ion_heap_free_pages_mem(struct pages_mem *pages_mem)
{
pages_mem->free_fn(pages_mem->pages);
}
@@ -201,15 +187,17 @@
int i, ret;
struct pages_mem pages_mem;
int npages = 1 << order;
- int page_tbl_size = sizeof(struct page *) * npages;
+ pages_mem.size = npages * PAGE_SIZE;
- if (ion_heap_alloc_pages_mem(page_tbl_size, &pages_mem))
+ if (ion_heap_alloc_pages_mem(&pages_mem))
return -ENOMEM;
for (i = 0; i < (1 << order); ++i)
pages_mem.pages[i] = page + i;
ret = ion_heap_pages_zero(pages_mem.pages, npages);
+ dma_sync_single_for_device(NULL, page_to_phys(page), pages_mem.size,
+ DMA_BIDIRECTIONAL);
ion_heap_free_pages_mem(&pages_mem);
return ret;
}
@@ -218,16 +206,12 @@
{
struct sg_table *table = buffer->sg_table;
struct scatterlist *sg;
- int i, j, ret = 0, npages = 0, page_tbl_size = 0;
+ int i, j, ret = 0, npages = 0;
struct pages_mem pages_mem;
- for_each_sg(table->sgl, sg, table->nents, i) {
- unsigned long len = sg_dma_len(sg);
- int nrpages = len >> PAGE_SHIFT;
- page_tbl_size += sizeof(struct page *) * nrpages;
- }
+ pages_mem.size = PAGE_ALIGN(buffer->size);
- if (ion_heap_alloc_pages_mem(page_tbl_size, &pages_mem))
+ if (ion_heap_alloc_pages_mem(&pages_mem))
return -ENOMEM;
for_each_sg(table->sgl, sg, table->nents, i) {
@@ -239,6 +223,8 @@
}
ret = ion_heap_pages_zero(pages_mem.pages, npages);
+ dma_sync_sg_for_device(NULL, table->sgl, table->nents,
+ DMA_BIDIRECTIONAL);
ion_heap_free_pages_mem(&pages_mem);
return ret;
}
diff --git a/drivers/gpu/ion/ion_page_pool.c b/drivers/gpu/ion/ion_page_pool.c
index cc2a36d..a1845de 100644
--- a/drivers/gpu/ion/ion_page_pool.c
+++ b/drivers/gpu/ion/ion_page_pool.c
@@ -32,7 +32,6 @@
static void *ion_page_pool_alloc_pages(struct ion_page_pool *pool)
{
struct page *page;
- struct scatterlist sg;
page = alloc_pages(pool->gfp_mask & ~__GFP_ZERO, pool->order);
@@ -43,11 +42,6 @@
if (ion_heap_high_order_page_zero(page, pool->order))
goto error_free_pages;
- sg_init_table(&sg, 1);
- sg_set_page(&sg, page, PAGE_SIZE << pool->order, 0);
- sg_dma_address(&sg) = sg_phys(&sg);
- dma_sync_sg_for_device(NULL, &sg, 1, DMA_BIDIRECTIONAL);
-
return page;
error_free_pages:
__free_pages(page, pool->order);
@@ -104,22 +98,25 @@
return page;
}
-void *ion_page_pool_alloc(struct ion_page_pool *pool)
+void *ion_page_pool_alloc(struct ion_page_pool *pool, bool *from_pool)
{
struct page *page = NULL;
BUG_ON(!pool);
- mutex_lock(&pool->mutex);
- if (pool->high_count)
- page = ion_page_pool_remove(pool, true);
- else if (pool->low_count)
- page = ion_page_pool_remove(pool, false);
- mutex_unlock(&pool->mutex);
+ *from_pool = true;
- if (!page)
+ if (mutex_trylock(&pool->mutex)) {
+ if (pool->high_count)
+ page = ion_page_pool_remove(pool, true);
+ else if (pool->low_count)
+ page = ion_page_pool_remove(pool, false);
+ mutex_unlock(&pool->mutex);
+ }
+ if (!page) {
page = ion_page_pool_alloc_pages(pool);
-
+ *from_pool = false;
+ }
return page;
}
diff --git a/drivers/gpu/ion/ion_priv.h b/drivers/gpu/ion/ion_priv.h
index c57efc1..1f78cb1 100644
--- a/drivers/gpu/ion/ion_priv.h
+++ b/drivers/gpu/ion/ion_priv.h
@@ -2,7 +2,7 @@
* drivers/gpu/ion/ion_priv.h
*
* Copyright (C) 2011 Google, Inc.
- * Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2011-2014, The Linux Foundation. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
@@ -223,6 +223,7 @@
struct pages_mem {
struct page **pages;
+ u32 size;
void (*free_fn) (const void *);
};
@@ -237,6 +238,8 @@
int ion_heap_pages_zero(struct page **pages, int num_pages);
int ion_heap_buffer_zero(struct ion_buffer *buffer);
int ion_heap_high_order_page_zero(struct page *page, int order);
+int ion_heap_alloc_pages_mem(struct pages_mem *pages_mem);
+void ion_heap_free_pages_mem(struct pages_mem *pages_mem);
/**
* ion_heap_init_deferred_free -- initialize deferred free functionality
@@ -374,7 +377,7 @@
struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order);
void ion_page_pool_destroy(struct ion_page_pool *);
-void *ion_page_pool_alloc(struct ion_page_pool *);
+void *ion_page_pool_alloc(struct ion_page_pool *, bool *from_pool);
void ion_page_pool_free(struct ion_page_pool *, struct page *);
/** ion_page_pool_shrink - shrinks the size of the memory cached in the pool
diff --git a/drivers/gpu/ion/ion_system_heap.c b/drivers/gpu/ion/ion_system_heap.c
index cfdd5f4..b7ad01f 100644
--- a/drivers/gpu/ion/ion_system_heap.c
+++ b/drivers/gpu/ion/ion_system_heap.c
@@ -29,10 +29,10 @@
#include <linux/dma-mapping.h>
#include <trace/events/kmem.h>
-static unsigned int high_order_gfp_flags = (GFP_HIGHUSER | __GFP_ZERO |
+static unsigned int high_order_gfp_flags = (GFP_HIGHUSER |
__GFP_NOWARN | __GFP_NORETRY |
__GFP_NO_KSWAPD) & ~__GFP_WAIT;
-static unsigned int low_order_gfp_flags = (GFP_HIGHUSER | __GFP_ZERO |
+static unsigned int low_order_gfp_flags = (GFP_HIGHUSER |
__GFP_NOWARN);
static const unsigned int orders[] = {9, 8, 4, 0};
static const int num_orders = ARRAY_SIZE(orders);
@@ -59,13 +59,15 @@
struct page_info {
struct page *page;
+ bool from_pool;
unsigned int order;
struct list_head list;
};
static struct page *alloc_buffer_page(struct ion_system_heap *heap,
struct ion_buffer *buffer,
- unsigned long order)
+ unsigned long order,
+ bool *from_pool)
{
bool cached = ion_buffer_cached(buffer);
bool split_pages = ion_buffer_fault_user_mappings(buffer);
@@ -76,7 +78,7 @@
pool = heap->uncached_pools[order_to_index(order)];
else
pool = heap->cached_pools[order_to_index(order)];
- page = ion_page_pool_alloc(pool);
+ page = ion_page_pool_alloc(pool, from_pool);
if (!page)
return 0;
@@ -119,14 +121,14 @@
struct page *page;
struct page_info *info;
int i;
-
+ bool from_pool;
for (i = 0; i < num_orders; i++) {
if (size < order_to_size(orders[i]))
continue;
if (max_order < orders[i])
continue;
- page = alloc_buffer_page(heap, buffer, orders[i]);
+ page = alloc_buffer_page(heap, buffer, orders[i], &from_pool);
if (!page)
continue;
@@ -134,11 +136,39 @@
if (info) {
info->page = page;
info->order = orders[i];
+ info->from_pool = from_pool;
}
return info;
}
return NULL;
}
+static unsigned int process_info(struct page_info *info,
+ struct scatterlist *sg,
+ struct scatterlist *sg_sync,
+ struct pages_mem *data, unsigned int i)
+{
+ struct page *page = info->page;
+ unsigned int j;
+
+ if (sg_sync) {
+ sg_set_page(sg_sync, page, (1 << info->order) * PAGE_SIZE, 0);
+ sg_dma_address(sg_sync) = page_to_phys(page);
+ }
+ sg_set_page(sg, page, (1 << info->order) * PAGE_SIZE, 0);
+ /*
+ * This is not correct - sg_dma_address needs a dma_addr_t
+ * that is valid for the the targeted device, but this works
+ * on the currently targeted hardware.
+ */
+ sg_dma_address(sg) = page_to_phys(page);
+ if (data) {
+ for (j = 0; j < (1 << info->order); ++j)
+ data->pages[i++] = nth_page(page, j);
+ }
+ list_del(&info->list);
+ kfree(info);
+ return i;
+}
static int ion_system_heap_allocate(struct ion_heap *heap,
struct ion_buffer *buffer,
@@ -149,29 +179,51 @@
struct ion_system_heap,
heap);
struct sg_table *table;
+ struct sg_table table_sync;
struct scatterlist *sg;
+ struct scatterlist *sg_sync;
int ret;
struct list_head pages;
+ struct list_head pages_from_pool;
struct page_info *info, *tmp_info;
int i = 0;
+ unsigned int nents_sync = 0;
unsigned long size_remaining = PAGE_ALIGN(size);
unsigned int max_order = orders[0];
+ struct pages_mem data;
+ unsigned int sz;
bool split_pages = ion_buffer_fault_user_mappings(buffer);
+ data.size = 0;
INIT_LIST_HEAD(&pages);
+ INIT_LIST_HEAD(&pages_from_pool);
while (size_remaining > 0) {
info = alloc_largest_available(sys_heap, buffer, size_remaining, max_order);
if (!info)
goto err;
- list_add_tail(&info->list, &pages);
- size_remaining -= (1 << info->order) * PAGE_SIZE;
+
+ sz = (1 << info->order) * PAGE_SIZE;
+
+ if (info->from_pool) {
+ list_add_tail(&info->list, &pages_from_pool);
+ } else {
+ list_add_tail(&info->list, &pages);
+ data.size += sz;
+ ++nents_sync;
+ }
+ size_remaining -= sz;
max_order = info->order;
i++;
}
+ ret = ion_heap_alloc_pages_mem(&data);
+
+ if (ret)
+ goto err;
+
table = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
if (!table)
- goto err;
+ goto err_free_data_pages;
if (split_pages)
ret = sg_alloc_table(table, PAGE_ALIGN(size) / PAGE_SIZE,
@@ -182,32 +234,91 @@
if (ret)
goto err1;
- sg = table->sgl;
- list_for_each_entry_safe(info, tmp_info, &pages, list) {
- struct page *page = info->page;
- if (split_pages) {
- for (i = 0; i < (1 << info->order); i++) {
- sg_set_page(sg, page + i, PAGE_SIZE, 0);
- sg = sg_next(sg);
- }
- } else {
- sg_set_page(sg, page, (1 << info->order) * PAGE_SIZE,
- 0);
- sg = sg_next(sg);
- }
- list_del(&info->list);
- kfree(info);
+ if (nents_sync) {
+ ret = sg_alloc_table(&table_sync, nents_sync, GFP_KERNEL);
+ if (ret)
+ goto err_free_sg;
}
+ i = 0;
+ sg = table->sgl;
+ sg_sync = table_sync.sgl;
+
+ /*
+ * We now have two separate lists. One list contains pages from the
+ * pool and the other pages from buddy. We want to merge these
+ * together while preserving the ordering of the pages (higher order
+ * first).
+ */
+ do {
+ if (!list_empty(&pages))
+ info = list_first_entry(&pages, struct page_info, list);
+ else
+ info = NULL;
+ if (!list_empty(&pages_from_pool))
+ tmp_info = list_first_entry(&pages_from_pool,
+ struct page_info, list);
+ else
+ tmp_info = NULL;
+
+ if (info && tmp_info) {
+ if (info->order >= tmp_info->order) {
+ i = process_info(info, sg, sg_sync, &data, i);
+ sg_sync = sg_next(sg_sync);
+ } else {
+ i = process_info(tmp_info, sg, 0, 0, i);
+ }
+ } else if (info) {
+ i = process_info(info, sg, sg_sync, &data, i);
+ sg_sync = sg_next(sg_sync);
+ } else if (tmp_info) {
+ i = process_info(tmp_info, sg, 0, 0, i);
+ } else {
+ BUG();
+ }
+ sg = sg_next(sg);
+
+ } while (sg);
+
+ ret = ion_heap_pages_zero(data.pages, data.size >> PAGE_SHIFT);
+ if (ret) {
+ pr_err("Unable to zero pages\n");
+ goto err_free_sg2;
+ }
+
+ if (nents_sync)
+ dma_sync_sg_for_device(NULL, table_sync.sgl, table_sync.nents,
+ DMA_BIDIRECTIONAL);
+
buffer->priv_virt = table;
+ if (nents_sync)
+ sg_free_table(&table_sync);
+ ion_heap_free_pages_mem(&data);
return 0;
+err_free_sg2:
+ /* We failed to zero buffers. Bypass pool */
+ buffer->flags |= ION_FLAG_FREED_FROM_SHRINKER;
+
+ for_each_sg(table->sgl, sg, table->nents, i)
+ free_buffer_page(sys_heap, buffer, sg_page(sg),
+ get_order(sg->length));
+ if (nents_sync)
+ sg_free_table(&table_sync);
+err_free_sg:
+ sg_free_table(table);
err1:
kfree(table);
+err_free_data_pages:
+ ion_heap_free_pages_mem(&data);
err:
list_for_each_entry_safe(info, tmp_info, &pages, list) {
free_buffer_page(sys_heap, buffer, info->page, info->order);
kfree(info);
}
+ list_for_each_entry_safe(info, tmp_info, &pages_from_pool, list) {
+ free_buffer_page(sys_heap, buffer, info->page, info->order);
+ kfree(info);
+ }
return -ENOMEM;
}
diff --git a/drivers/media/platform/msm/vidc/hfi_packetization.c b/drivers/media/platform/msm/vidc/hfi_packetization.c
index a563f68..0e38b18 100644
--- a/drivers/media/platform/msm/vidc/hfi_packetization.c
+++ b/drivers/media/platform/msm/vidc/hfi_packetization.c
@@ -1447,7 +1447,6 @@
hfi->ltrcount = hal->ltrcount;
hfi->trustmode = hal->trustmode;
pkt->size += sizeof(u32) + sizeof(struct hfi_ltrmode);
- pr_err("SET LTR\n");
break;
}
case HAL_CONFIG_VENC_USELTRFRAME:
@@ -1461,7 +1460,6 @@
hfi->refltr = hal->refltr;
hfi->useconstrnt = hal->useconstrnt;
pkt->size += sizeof(u32) + sizeof(struct hfi_ltruse);
- pr_err("USE LTR\n");
break;
}
case HAL_CONFIG_VENC_MARKLTRFRAME:
@@ -1473,7 +1471,6 @@
hfi = (struct hfi_ltrmark *) &pkt->rg_property_data[1];
hfi->markframe = hal->markframe;
pkt->size += sizeof(u32) + sizeof(struct hfi_ltrmark);
- pr_err("MARK LTR\n");
break;
}
case HAL_PARAM_VENC_HIER_P_MAX_ENH_LAYERS:
diff --git a/drivers/media/platform/msm/vidc/msm_venc.c b/drivers/media/platform/msm/vidc/msm_venc.c
index bcd13b8..701a8cc 100644
--- a/drivers/media/platform/msm/vidc/msm_venc.c
+++ b/drivers/media/platform/msm/vidc/msm_venc.c
@@ -723,7 +723,7 @@
{
.id = V4L2_CID_MPEG_VIDC_VIDEO_USELTRFRAME,
.name = "H264 Use LTR",
- .type = V4L2_CTRL_TYPE_BUTTON,
+ .type = V4L2_CTRL_TYPE_INTEGER,
.minimum = 0,
.maximum = (MAX_LTR_FRAME_COUNT - 1),
.default_value = 0,
@@ -753,7 +753,7 @@
{
.id = V4L2_CID_MPEG_VIDC_VIDEO_MARKLTRFRAME,
.name = "H264 Mark LTR",
- .type = V4L2_CTRL_TYPE_BUTTON,
+ .type = V4L2_CTRL_TYPE_INTEGER,
.minimum = 0,
.maximum = (MAX_LTR_FRAME_COUNT - 1),
.default_value = 0,
@@ -2179,7 +2179,7 @@
}
case V4L2_CID_MPEG_VIDC_VIDEO_USELTRFRAME:
property_id = HAL_CONFIG_VENC_USELTRFRAME;
- useltr.refltr = ctrl->val;
+ useltr.refltr = (1 << ctrl->val);
useltr.useconstrnt = false;
useltr.frames = 0;
pdata = &useltr;
@@ -2302,7 +2302,6 @@
rc = call_hfi_op(hdev, session_set_property,
(void *)inst->session, property_id, pdata);
}
- pr_err("Returning from %s\n", __func__);
return rc;
}
diff --git a/drivers/net/wireless/wcnss/wcnss_wlan.c b/drivers/net/wireless/wcnss/wcnss_wlan.c
index d06ec85..2d260be 100644
--- a/drivers/net/wireless/wcnss/wcnss_wlan.c
+++ b/drivers/net/wireless/wcnss/wcnss_wlan.c
@@ -207,7 +207,7 @@
/* max 20mhz channel count */
#define WCNSS_MAX_CH_NUM 45
-#define WCNSS_MAX_PIL_RETRY 3
+#define WCNSS_MAX_PIL_RETRY 2
#define VALID_VERSION(version) \
((strncmp(version, "INVALID", WCNSS_VERSION_LEN)) ? 1 : 0)
diff --git a/drivers/power/qpnp-charger.c b/drivers/power/qpnp-charger.c
index 94d48e6..7963ed4 100644
--- a/drivers/power/qpnp-charger.c
+++ b/drivers/power/qpnp-charger.c
@@ -2465,11 +2465,6 @@
int rc;
u8 chgr_sts, bat_if_sts;
- if ((qpnp_chg_is_usb_chg_plugged_in(chip) ||
- qpnp_chg_is_dc_chg_plugged_in(chip)) && chip->chg_done) {
- return POWER_SUPPLY_STATUS_FULL;
- }
-
rc = qpnp_chg_read(chip, &chgr_sts, INT_RT_STS(chip->chgr_base), 1);
if (rc) {
pr_err("failed to read interrupt sts %d\n", rc);
@@ -2487,10 +2482,14 @@
if (chgr_sts & FAST_CHG_ON_IRQ && bat_if_sts & BAT_FET_ON_IRQ)
return POWER_SUPPLY_STATUS_CHARGING;
- /* report full if state of charge is 100 and a charger is connected */
+ /*
+ * Report full if state of charge is 100 or chg_done is true
+ * when a charger is connected and boost is disabled
+ */
if ((qpnp_chg_is_usb_chg_plugged_in(chip) ||
- qpnp_chg_is_dc_chg_plugged_in(chip))
- && get_batt_capacity(chip) == 100) {
+ qpnp_chg_is_dc_chg_plugged_in(chip)) &&
+ (chip->chg_done || get_batt_capacity(chip) == 100)
+ && qpnp_chg_is_boost_en_set(chip) == 0) {
return POWER_SUPPLY_STATUS_FULL;
}
@@ -3242,10 +3241,10 @@
qpnp_chg_regulator_boost_enable(struct regulator_dev *rdev)
{
struct qpnp_chg_chip *chip = rdev_get_drvdata(rdev);
+ int usb_present = qpnp_chg_is_usb_chg_plugged_in(chip);
int rc;
- if (qpnp_chg_is_usb_chg_plugged_in(chip) &&
- (chip->flags & BOOST_FLASH_WA)) {
+ if (usb_present && (chip->flags & BOOST_FLASH_WA)) {
if (ext_ovp_isns_present && chip->ext_ovp_ic_gpio_enabled) {
pr_debug("EXT OVP IC ISNS disabled\n");
@@ -3273,10 +3272,24 @@
}
}
- return qpnp_chg_masked_write(chip,
+ rc = qpnp_chg_masked_write(chip,
chip->boost_base + BOOST_ENABLE_CONTROL,
BOOST_PWR_EN,
BOOST_PWR_EN, 1);
+ if (rc) {
+ pr_err("failed to enable boost rc = %d\n", rc);
+ return rc;
+ }
+ /*
+ * update battery status when charger is connected and state is full
+ */
+ if (usb_present && (chip->chg_done
+ || (get_batt_capacity(chip) == 100)
+ || (get_prop_batt_status(chip) ==
+ POWER_SUPPLY_STATUS_FULL)))
+ power_supply_changed(&chip->batt_psy);
+
+ return rc;
}
/* Boost regulator operations */
@@ -3369,6 +3382,20 @@
qpnp_chg_usb_suspend_enable(chip, 0);
}
+ /*
+ * When a charger is connected,if state of charge is not full
+ * resumeing charging else update battery status
+ */
+ if (qpnp_chg_is_usb_chg_plugged_in(chip)) {
+ if (get_batt_capacity(chip) < 100 || !chip->chg_done) {
+ chip->chg_done = false;
+ chip->resuming_charging = true;
+ qpnp_chg_set_appropriate_vbatdet(chip);
+ } else if (chip->chg_done) {
+ power_supply_changed(&chip->batt_psy);
+ }
+ }
+
if (ext_ovp_isns_present && chip->ext_ovp_ic_gpio_enabled) {
pr_debug("EXT OVP IC ISNS enable\n");
gpio_direction_output(chip->ext_ovp_isns_gpio, 1);
diff --git a/drivers/spi/spi_qsd.c b/drivers/spi/spi_qsd.c
index 5686107..b570937 100644
--- a/drivers/spi/spi_qsd.c
+++ b/drivers/spi/spi_qsd.c
@@ -3102,6 +3102,7 @@
spin_lock_init(&dd->queue_lock);
mutex_init(&dd->core_lock);
+ init_waitqueue_head(&dd->continue_suspend);
if (!devm_request_mem_region(&pdev->dev, dd->mem_phys_addr,
dd->mem_size, SPI_DRV_NAME)) {
diff --git a/drivers/video/msm/mdss/dsi_v2.c b/drivers/video/msm/mdss/dsi_v2.c
index 653b187..4fc3909 100644
--- a/drivers/video/msm/mdss/dsi_v2.c
+++ b/drivers/video/msm/mdss/dsi_v2.c
@@ -133,8 +133,8 @@
pdata->panel_info.panel_power_on = 0;
if (!pdata->panel_info.dynamic_switch_pending) {
if (pdata->panel_info.type == MIPI_CMD_PANEL)
- mdss_dsi_panel_reset(pdata, 0);
- dsi_ctrl_gpio_free(ctrl_pdata);
+ dsi_ctrl_gpio_free(ctrl_pdata);
+ mdss_dsi_panel_reset(pdata, 0);
}
}
return rc;
diff --git a/drivers/video/msm/mdss/mdss_mdp_intf_cmd.c b/drivers/video/msm/mdss/mdss_mdp_intf_cmd.c
index e619e6b..6808313 100644
--- a/drivers/video/msm/mdss/mdss_mdp_intf_cmd.c
+++ b/drivers/video/msm/mdss/mdss_mdp_intf_cmd.c
@@ -405,14 +405,6 @@
return;
}
- mutex_lock(&ctx->clk_mtx);
- if (ctx->clk_enabled) {
- mutex_unlock(&ctx->clk_mtx);
- pr_warn("Cannot enter ulps mode if DSI clocks are on\n");
- return;
- }
- mutex_unlock(&ctx->clk_mtx);
-
if (!ctx->panel_on) {
pr_err("Panel is off. skipping ULPS configuration\n");
return;