Merge "qseecom: Check the return value of "ion_phys""
diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug
index 344b57e..969ddcb 100644
--- a/arch/arm/Kconfig.debug
+++ b/arch/arm/Kconfig.debug
@@ -63,6 +63,29 @@
8 - SIGSEGV faults
16 - SIGBUS faults
+config FORCE_PAGES
+ bool "Force lowmem to be mapped with 4K pages"
+ help
+ There are some advanced debug features that can only be done when
+ memory is mapped with pages instead of sections. Enable this option
+ to always map lowmem pages with pages. This may have a performance
+ cost due to increased TLB pressure.
+
+ If unsure say N.
+
+config FREE_PAGES_RDONLY
+ bool "Set pages as read only while on the buddy list"
+ select FORCE_PAGES
+ select PAGE_POISONING
+ help
+ Pages are always mapped in the kernel. This means that anyone
+ can write to the page if they have the address. Enable this option
+ to mark pages as read only to trigger a fault if any code attempts
+ to write to a page on the buddy list. This may have a performance
+ impact.
+
+ If unsure, say N.
+
# These options are only for real kernel hackers who want to get their hands dirty.
config DEBUG_LL
bool "Kernel low-level debugging functions (read help!)"
diff --git a/arch/arm/boot/dts/msm-pma8084.dtsi b/arch/arm/boot/dts/msm-pma8084.dtsi
index c070443..93f05c4 100644
--- a/arch/arm/boot/dts/msm-pma8084.dtsi
+++ b/arch/arm/boot/dts/msm-pma8084.dtsi
@@ -837,5 +837,53 @@
status = "disabled";
};
};
+
+ pma8084_lpg1: pwm@b100 {
+ compatible = "qcom,qpnp-pwm";
+ reg = <0xb100 0x100>,
+ <0xb042 0x7e>;
+ reg-names = "qpnp-lpg-channel-base", "qpnp-lpg-lut-base";
+ qcom,channel-id = <0>;
+ };
+
+ pma8084_lpg2: pwm@b200 {
+ compatible = "qcom,qpnp-pwm";
+ reg = <0xb200 0x100>,
+ <0xb042 0x7e>;
+ reg-names = "qpnp-lpg-channel-base", "qpnp-lpg-lut-base";
+ qcom,channel-id = <1>;
+ };
+
+ pma8084_lpg3: pwm@b300 {
+ compatible = "qcom,qpnp-pwm";
+ reg = <0xb300 0x100>,
+ <0xb042 0x7e>;
+ reg-names = "qpnp-lpg-channel-base", "qpnp-lpg-lut-base";
+ qcom,channel-id = <2>;
+ };
+
+ pma8084_lpg4: pwm@b400 {
+ compatible = "qcom,qpnp-pwm";
+ reg = <0xb400 0x100>,
+ <0xb042 0x7e>;
+ reg-names = "qpnp-lpg-channel-base", "qpnp-lpg-lut-base";
+ qcom,channel-id = <3>;
+ };
+
+ pma8084_lpg5: pwm@b500 {
+ compatible = "qcom,qpnp-pwm";
+ reg = <0xb500 0x100>,
+ <0xb042 0x7e>;
+ reg-names = "qpnp-lpg-channel-base", "qpnp-lpg-lut-base";
+ qcom,channel-id = <4>;
+ };
+
+ pma8084_lpg6: pwm@b600 {
+ compatible = "qcom,qpnp-pwm";
+ reg = <0xb600 0x100>,
+ <0xb042 0x7e>;
+ reg-names = "qpnp-lpg-channel-base", "qpnp-lpg-lut-base";
+ qcom,channel-id = <5>;
+ };
};
};
diff --git a/arch/arm/boot/dts/msm8974pro-ac-pma8084-pm8941.dtsi b/arch/arm/boot/dts/msm8974pro-ac-pma8084-pm8941.dtsi
index d679880..df04f82 100644
--- a/arch/arm/boot/dts/msm8974pro-ac-pma8084-pm8941.dtsi
+++ b/arch/arm/boot/dts/msm8974pro-ac-pma8084-pm8941.dtsi
@@ -74,6 +74,54 @@
qcom,auto-mode-enable = <0>;
status = "okay";
};
+
+ pwm@b100 {
+ status = "disabled";
+ };
+
+ pwm@b200 {
+ status = "disabled";
+ };
+
+ pwm@b300 {
+ status = "disabled";
+ };
+
+ pwm@b400 {
+ status = "disabled";
+ };
+
+ pwm@b500 {
+ status = "disabled";
+ };
+
+ pwm@b600 {
+ status = "disabled";
+ };
+
+ pwm@b700 {
+ status = "disabled";
+ };
+
+ pwm@b800 {
+ status = "disabled";
+ };
+
+ pwm@e400 {
+ status = "disabled";
+ };
+
+ pwm@e500 {
+ status = "disabled";
+ };
+
+ pwm@e600 {
+ status = "disabled";
+ };
+
+ pwm@e700 {
+ status = "disabled";
+ };
};
&pma8084_mvs1 {
diff --git a/arch/arm/boot/dts/msm8974pro-pma8084-mtp.dtsi b/arch/arm/boot/dts/msm8974pro-pma8084-mtp.dtsi
index 680674d..12ed7d4 100644
--- a/arch/arm/boot/dts/msm8974pro-pma8084-mtp.dtsi
+++ b/arch/arm/boot/dts/msm8974pro-pma8084-mtp.dtsi
@@ -200,9 +200,12 @@
};
gpio@c700 { /* GPIO 8 */
- /* Unused */
- qcom,mode = <0>; /* Digital input */
- qcom,pull = <0>; /* Pull up 30 uA */
+ qcom,mode = <1>; /* Digital output */
+ qcom,output-type = <0>; /* CMOS logic */
+ qcom,invert = <0>; /* Do not invert the output */
+ qcom,vin-sel = <2>; /* PMA8084 S4 = 1.8V */
+ qcom,src-sel = <2>; /* Special function 1=LPG 3 */
+ qcom,out-strength = <3>; /* High drive Strength*/
qcom,master-en = <1>;
};
diff --git a/arch/arm/boot/dts/msm8974pro-pma8084.dtsi b/arch/arm/boot/dts/msm8974pro-pma8084.dtsi
index c06ebf8..ab4ffb5 100644
--- a/arch/arm/boot/dts/msm8974pro-pma8084.dtsi
+++ b/arch/arm/boot/dts/msm8974pro-pma8084.dtsi
@@ -210,3 +210,7 @@
status = "ok";
};
};
+
+&dsi_generic_720p_cmd {
+ qcom,mdss-dsi-bl-pmic-bank-select = <3>;
+};
diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
index 0e8f4916..0fd3191 100644
--- a/arch/arm/include/asm/cacheflush.h
+++ b/arch/arm/include/asm/cacheflush.h
@@ -385,4 +385,12 @@
int set_memory_x(unsigned long addr, int numpages);
int set_memory_nx(unsigned long addr, int numpages);
+#ifdef CONFIG_FREE_PAGES_RDONLY
+#define mark_addr_rdonly(a) set_memory_ro((unsigned long)a, 1);
+#define mark_addr_rdwrite(a) set_memory_rw((unsigned long)a, 1);
+#else
+#define mark_addr_rdonly(a)
+#define mark_addr_rdwrite(a)
+#endif
+
#endif
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 269ae80..3d52735 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -344,11 +344,13 @@
unsigned long size = PAGE_SIZE*numpages; \
unsigned end = start + size; \
\
- if (start < MODULES_VADDR || start >= MODULES_END) \
- return -EINVAL;\
+ if (!IS_ENABLED(CONFIG_FORCE_PAGES)) { \
+ if (start < MODULES_VADDR || start >= MODULES_END) \
+ return -EINVAL;\
\
- if (end < MODULES_VADDR || end >= MODULES_END) \
- return -EINVAL; \
+ if (end < MODULES_VADDR || end >= MODULES_END) \
+ return -EINVAL; \
+ } \
\
apply_to_page_range(&init_mm, start, size, callback, NULL); \
flush_tlb_kernel_range(start, end); \
@@ -1507,6 +1509,100 @@
}
}
+#ifdef CONFIG_FORCE_PAGES
+/*
+ * remap a PMD into pages
+ * We split a single pmd here none of this two pmd nonsense
+ */
+static noinline void split_pmd(pmd_t *pmd, unsigned long addr,
+ unsigned long end, unsigned long pfn,
+ const struct mem_type *type)
+{
+ pte_t *pte, *start_pte;
+
+ start_pte = early_alloc(PTE_HWTABLE_OFF + PTE_HWTABLE_SIZE);
+
+ pte = start_pte;
+
+ do {
+ set_pte_ext(pte, pfn_pte(pfn, type->prot_pte), 0);
+ pfn++;
+ } while (pte++, addr += PAGE_SIZE, addr != end);
+
+ *pmd = __pmd((__pa(start_pte) + PTE_HWTABLE_OFF) | type->prot_l1);
+ mb();
+ flush_pmd_entry(pmd);
+ flush_tlb_all();
+}
+
+/*
+ * It's significantly easier to remap as pages later after all memory is
+ * mapped. Everything is sections so all we have to do is split
+ */
+static void __init remap_pages(void)
+{
+ struct memblock_region *reg;
+
+ for_each_memblock(memory, reg) {
+ phys_addr_t phys_start = reg->base;
+ phys_addr_t phys_end = reg->base + reg->size;
+ unsigned long addr = (unsigned long)__va(phys_start);
+ unsigned long end = (unsigned long)__va(phys_end);
+ pmd_t *pmd = NULL;
+ unsigned long next;
+ unsigned long pfn = __phys_to_pfn(phys_start);
+ bool fixup = false;
+ unsigned long saved_start = addr;
+
+ if (phys_end > arm_lowmem_limit)
+ end = (unsigned long)__va(arm_lowmem_limit);
+ if (phys_start >= phys_end)
+ break;
+
+ pmd = pmd_offset(
+ pud_offset(pgd_offset(&init_mm, addr), addr), addr);
+
+#ifndef CONFIG_ARM_LPAE
+ if (addr & SECTION_SIZE) {
+ fixup = true;
+ pmd_empty_section_gap((addr - SECTION_SIZE) & PMD_MASK);
+ pmd++;
+ }
+
+ if (end & SECTION_SIZE)
+ pmd_empty_section_gap(end);
+#endif
+
+ do {
+ next = addr + SECTION_SIZE;
+
+ if (pmd_none(*pmd) || pmd_bad(*pmd))
+ split_pmd(pmd, addr, next, pfn,
+ &mem_types[MT_MEMORY]);
+ pmd++;
+ pfn += SECTION_SIZE >> PAGE_SHIFT;
+
+ } while (addr = next, addr < end);
+
+ if (fixup) {
+ /*
+ * Put a faulting page table here to avoid detecting no
+ * pmd when accessing an odd section boundary. This
+ * needs to be faulting to help catch errors and avoid
+ * speculation
+ */
+ pmd = pmd_off_k(saved_start);
+ pmd[0] = pmd[1] & ~1;
+ }
+ }
+}
+#else
+static void __init remap_pages(void)
+{
+
+}
+#endif
+
/*
* paging_init() sets up the page tables, initialises the zone memory
* maps, and sets up the zero page, bad page and bad page tables.
@@ -1521,6 +1617,7 @@
prepare_page_table();
map_lowmem();
dma_contiguous_remap();
+ remap_pages();
devicemaps_init(mdesc);
kmap_init();
diff --git a/drivers/md/dm-req-crypt.c b/drivers/md/dm-req-crypt.c
index ab21404..87a4ab9 100644
--- a/drivers/md/dm-req-crypt.c
+++ b/drivers/md/dm-req-crypt.c
@@ -45,6 +45,7 @@
#define AES_XTS_IV_LEN 16
#define DM_REQ_CRYPT_ERROR -1
+#define DM_REQ_CRYPT_ERROR_AFTER_PAGE_MALLOC -2
struct req_crypt_result {
struct completion completion;
@@ -105,9 +106,10 @@
atomic_dec(&io->pending);
- if (error < 0)
+ if (error < 0) {
dm_kill_unmapped_request(clone, error);
- else
+ mempool_free(io, req_io_pool);
+ } else
dm_dispatch_request(clone);
}
@@ -158,8 +160,6 @@
struct req_crypt_result result;
struct scatterlist *req_sg_read = NULL;
int err = 0;
- struct req_iterator iter2;
- struct bio_vec *bvec = NULL;
u8 IV[AES_XTS_IV_LEN];
if (io) {
@@ -217,11 +217,12 @@
goto ablkcipher_req_alloc_failure;
}
-
- /* total bytes to copy */
- bvec = NULL;
- rq_for_each_segment(bvec, clone, iter2) {
- total_bytes_in_req = total_bytes_in_req + bvec->bv_len;
+ total_bytes_in_req = clone->__data_len;
+ if (total_bytes_in_req > REQ_DM_512_KB) {
+ DMERR("%s total_bytes_in_req > 512 MB %d",
+ __func__, total_bytes_in_req);
+ err = DM_REQ_CRYPT_ERROR;
+ goto ablkcipher_req_alloc_failure;
}
memset(IV, 0, AES_XTS_IV_LEN);
@@ -263,7 +264,8 @@
kfree(req_sg_read);
submit_request:
- io->error = err;
+ if (io)
+ io->error = err;
req_crypt_dec_pending_decrypt(io);
}
@@ -277,7 +279,8 @@
struct bio *bio_src = NULL;
unsigned int total_sg_len_req_in = 0, total_sg_len_req_out = 0,
total_bytes_in_req = 0, error = DM_MAPIO_REMAPPED, rc = 0;
- struct req_iterator iter;
+ struct req_iterator iter = {0, NULL};
+ struct req_iterator iter1 = {0, NULL};
struct ablkcipher_request *req = NULL;
struct req_crypt_result result;
struct bio_vec *bvec = NULL;
@@ -350,20 +353,28 @@
goto ablkcipher_req_alloc_failure;
}
+ total_bytes_in_req = clone->__data_len;
+ if (total_bytes_in_req > REQ_DM_512_KB) {
+ DMERR("%s total_bytes_in_req > 512 MB %d",
+ __func__, total_bytes_in_req);
+ error = DM_REQ_CRYPT_ERROR;
+ goto ablkcipher_req_alloc_failure;
+ }
rq_for_each_segment(bvec, clone, iter) {
-try_again:
if (bvec->bv_len > remaining_size) {
page = NULL;
- page = mempool_alloc(req_page_pool, gfp_mask);
- if (!page) {
- DMERR("%s Crypt page alloc failed", __func__);
- congestion_wait(BLK_RW_ASYNC, HZ/100);
- goto try_again;
+ while (page == NULL) {
+ page = mempool_alloc(req_page_pool, gfp_mask);
+ if (!page) {
+ DMERR("%s Crypt page alloc failed",
+ __func__);
+ congestion_wait(BLK_RW_ASYNC, HZ/100);
+ }
}
+
bvec->bv_page = page;
bvec->bv_offset = 0;
- total_bytes_in_req = total_bytes_in_req + bvec->bv_len;
remaining_size = PAGE_SIZE - bvec->bv_len;
if (remaining_size < 0)
BUG();
@@ -371,7 +382,6 @@
bvec->bv_page = page;
bvec->bv_offset = PAGE_SIZE - remaining_size;
remaining_size = remaining_size - bvec->bv_len;
- total_bytes_in_req = total_bytes_in_req + bvec->bv_len;
}
}
@@ -379,7 +389,7 @@
if ((total_sg_len_req_out <= 0) ||
(total_sg_len_req_out > MAX_SG_LIST)) {
DMERR("%s Request Error %d", __func__, total_sg_len_req_out);
- error = DM_REQ_CRYPT_ERROR;
+ error = DM_REQ_CRYPT_ERROR_AFTER_PAGE_MALLOC;
goto ablkcipher_req_alloc_failure;
}
@@ -405,13 +415,13 @@
if (result.err) {
DMERR("%s error = %d encrypting the request\n",
__func__, result.err);
- error = DM_REQ_CRYPT_ERROR;
+ error = DM_REQ_CRYPT_ERROR_AFTER_PAGE_MALLOC;
goto ablkcipher_req_alloc_failure;
}
break;
default:
- error = DM_REQ_CRYPT_ERROR;
+ error = DM_REQ_CRYPT_ERROR_AFTER_PAGE_MALLOC;
goto ablkcipher_req_alloc_failure;
}
@@ -428,13 +438,25 @@
if (req)
ablkcipher_request_free(req);
+ if (error == DM_REQ_CRYPT_ERROR_AFTER_PAGE_MALLOC) {
+ bvec = NULL;
+ rq_for_each_segment(bvec, clone, iter1) {
+ if (bvec->bv_offset == 0) {
+ mempool_free(bvec->bv_page, req_page_pool);
+ bvec->bv_page = NULL;
+ } else
+ bvec->bv_page = NULL;
+ }
+ }
+
kfree(req_sg_in);
kfree(req_sg_out);
submit_request:
- io->error = error;
+ if (io)
+ io->error = error;
req_crypt_dec_pending_encrypt(io);
}
@@ -449,7 +471,7 @@
else if (rq_data_dir(io->cloned_request) == READ)
req_cryptd_crypt_read_convert(io);
else
- DMERR("%s received non-write request for Clone %u\n",
+ DMERR("%s received non-read/write request for Clone %u\n",
__func__, (unsigned int)io->cloned_request);
}
@@ -484,6 +506,11 @@
if (bio_sectors(bio) && bdev != bdev->bd_contains) {
struct hd_struct *p = bdev->bd_part;
+ /*
+ * Check for integer overflow, should never happen.
+ */
+ if (p->start_sect > (UINT_MAX - bio->bi_sector))
+ BUG();
bio->bi_sector += p->start_sect;
bio->bi_bdev = bdev->bd_contains;
@@ -543,9 +570,16 @@
union map_info *map_context)
{
struct req_dm_crypt_io *req_io = NULL;
- int error = DM_MAPIO_REMAPPED, copy_bio_sector_to_req = 0;
+ int error = DM_REQ_CRYPT_ERROR, copy_bio_sector_to_req = 0;
struct bio *bio_src = NULL;
+ if ((rq_data_dir(clone) != READ) &&
+ (rq_data_dir(clone) != WRITE)) {
+ error = DM_REQ_CRYPT_ERROR;
+ DMERR("%s Unknown request\n", __func__);
+ goto submit_request;
+ }
+
req_io = mempool_alloc(req_io_pool, GFP_NOWAIT);
if (!req_io) {
DMERR("%s req_io allocation failed\n", __func__);
@@ -598,9 +632,6 @@
req_cryptd_queue_crypt(req_io);
error = DM_MAPIO_SUBMITTED;
goto submit_request;
- } else {
- error = DM_REQ_CRYPT_ERROR;
- DMERR("%s Unknown request\n", __func__);
}
submit_request:
@@ -608,22 +639,24 @@
}
-static int req_crypt_status(struct dm_target *ti, status_type_t type,
- char *result, unsigned maxlen)
-{
- return 0;
-}
-
static void req_crypt_dtr(struct dm_target *ti)
{
- if (req_crypt_queue)
+ if (req_crypt_queue) {
destroy_workqueue(req_crypt_queue);
- if (req_io_pool)
+ req_crypt_queue = NULL;
+ }
+ if (req_io_pool) {
mempool_destroy(req_io_pool);
- if (req_page_pool)
+ req_io_pool = NULL;
+ }
+ if (req_page_pool) {
mempool_destroy(req_page_pool);
- if (tfm)
+ req_page_pool = NULL;
+ }
+ if (tfm) {
crypto_free_ablkcipher(tfm);
+ tfm = NULL;
+ }
}
@@ -635,71 +668,96 @@
{
unsigned long long tmpll;
char dummy;
+ int err = DM_REQ_CRYPT_ERROR;
- if (dm_get_device(ti, argv[3], dm_table_get_mode(ti->table), &dev)) {
- DMERR(" %s Device Lookup failed\n", __func__);
- return DM_REQ_CRYPT_ERROR;
+ if (argc < 5) {
+ DMERR(" %s Not enough args\n", __func__);
+ err = DM_REQ_CRYPT_ERROR;
+ goto ctr_exit;
}
- if (sscanf(argv[4], "%llu%c", &tmpll, &dummy) != 1) {
- DMERR("%s Invalid device sector\n", __func__);
- return DM_REQ_CRYPT_ERROR;
+ if (argv[3]) {
+ if (dm_get_device(ti, argv[3],
+ dm_table_get_mode(ti->table), &dev)) {
+ DMERR(" %s Device Lookup failed\n", __func__);
+ err = DM_REQ_CRYPT_ERROR;
+ goto ctr_exit;
+ }
+ } else {
+ DMERR(" %s Arg[3] invalid\n", __func__);
+ err = DM_REQ_CRYPT_ERROR;
+ goto ctr_exit;
}
+
+ if (argv[4]) {
+ if (sscanf(argv[4], "%llu%c", &tmpll, &dummy) != 1) {
+ DMERR("%s Invalid device sector\n", __func__);
+ err = DM_REQ_CRYPT_ERROR;
+ goto ctr_exit;
+ }
+ } else {
+ DMERR(" %s Arg[4]invalid\n", __func__);
+ err = DM_REQ_CRYPT_ERROR;
+ goto ctr_exit;
+ }
+
start_sector_orig = tmpll;
req_crypt_queue = alloc_workqueue("req_cryptd",
- WQ_HIGHPRI |
- WQ_CPU_INTENSIVE|
- WQ_MEM_RECLAIM,
- 1);
+ WQ_NON_REENTRANT |
+ WQ_HIGHPRI |
+ WQ_CPU_INTENSIVE|
+ WQ_MEM_RECLAIM,
+ 1);
if (!req_crypt_queue) {
DMERR("%s req_crypt_queue not allocated\n", __func__);
- return DM_REQ_CRYPT_ERROR;
+ err = DM_REQ_CRYPT_ERROR;
+ goto ctr_exit;
}
/* Allocate the crypto alloc blk cipher and keep the handle */
tfm = crypto_alloc_ablkcipher("qcom-xts(aes)", 0, 0);
if (IS_ERR(tfm)) {
- DMERR("%s ablkcipher tfm allocation failed : error = %lu\n",
- __func__, PTR_ERR(tfm));
- return DM_REQ_CRYPT_ERROR;
+ DMERR("%s ablkcipher tfm allocation failed : error\n",
+ __func__);
+ err = DM_REQ_CRYPT_ERROR;
+ goto ctr_exit;
}
req_io_pool = mempool_create_slab_pool(MIN_IOS, _req_crypt_io_pool);
if (!req_io_pool) {
DMERR("%s req_io_pool not allocated\n", __func__);
- return DM_REQ_CRYPT_ERROR;
+ err = DM_REQ_CRYPT_ERROR;
+ goto ctr_exit;
}
req_page_pool = mempool_create_page_pool(MIN_POOL_PAGES, 0);
if (!req_page_pool) {
DMERR("%s req_page_pool not allocated\n", __func__);
- return DM_REQ_CRYPT_ERROR;
+ err = DM_REQ_CRYPT_ERROR;
+ goto ctr_exit;
}
-
- return 0;
-}
-
-static void req_crypt_postsuspend(struct dm_target *ti)
-{
-}
-
-static int req_crypt_preresume(struct dm_target *ti)
-{
- return 0;
-}
-
-static void req_crypt_resume(struct dm_target *ti)
-{
-}
-
-/* Message interface
- * key set <key>
- * key wipe
- */
-static int req_crypt_message(struct dm_target *ti, unsigned argc, char **argv)
-{
- return 0;
+ err = 0;
+ctr_exit:
+ if (err != 0) {
+ if (req_crypt_queue) {
+ destroy_workqueue(req_crypt_queue);
+ req_crypt_queue = NULL;
+ }
+ if (req_io_pool) {
+ mempool_destroy(req_io_pool);
+ req_io_pool = NULL;
+ }
+ if (req_page_pool) {
+ mempool_destroy(req_page_pool);
+ req_page_pool = NULL;
+ }
+ if (tfm) {
+ crypto_free_ablkcipher(tfm);
+ tfm = NULL;
+ }
+ }
+ return err;
}
static int req_crypt_iterate_devices(struct dm_target *ti,
@@ -716,11 +774,6 @@
.dtr = req_crypt_dtr,
.map_rq = req_crypt_map,
.rq_end_io = req_crypt_endio,
- .status = req_crypt_status,
- .postsuspend = req_crypt_postsuspend,
- .preresume = req_crypt_preresume,
- .resume = req_crypt_resume,
- .message = req_crypt_message,
.iterate_devices = req_crypt_iterate_devices,
};
@@ -733,8 +786,10 @@
return -ENOMEM;
r = dm_register_target(&req_crypt_target);
- if (r < 0)
+ if (r < 0) {
DMERR("register failed %d", r);
+ kmem_cache_destroy(_req_crypt_io_pool);
+ }
return r;
}
diff --git a/drivers/media/platform/msm/camera_v2/sensor/cci/msm_cci.c b/drivers/media/platform/msm/camera_v2/sensor/cci/msm_cci.c
index cb63d12..bf66442 100644
--- a/drivers/media/platform/msm/camera_v2/sensor/cci/msm_cci.c
+++ b/drivers/media/platform/msm/camera_v2/sensor/cci/msm_cci.c
@@ -181,7 +181,7 @@
uint16_t i = 0, j = 0, k = 0, h = 0, len = 0;
int32_t rc = 0;
uint32_t cmd = 0, delay = 0;
- uint8_t data[10];
+ uint8_t data[11];
uint16_t reg_addr = 0;
struct msm_camera_i2c_reg_setting *i2c_msg =
&c_ctrl->cfg.cci_i2c_write_cfg;
@@ -618,7 +618,7 @@
msm_cci_flush_queue(cci_dev, master);
goto ERROR;
} else {
- rc = 0;
+ rc = cci_dev->cci_master_info[master].status;
}
CDBG("%s:%d X wait_for_completion_interruptible\n", __func__,
__LINE__);
diff --git a/drivers/media/platform/msm/vidc/msm_vdec.c b/drivers/media/platform/msm/vidc/msm_vdec.c
index a8f4ca7..8a0dfc2 100644
--- a/drivers/media/platform/msm/vidc/msm_vdec.c
+++ b/drivers/media/platform/msm/vidc/msm_vdec.c
@@ -802,6 +802,26 @@
f->fmt.pix_mp.plane_fmt[0].reserved[0] =
(__u16)inst->prop.height[CAPTURE_PORT];
}
+
+ if (msm_comm_get_stream_output_mode(inst) ==
+ HAL_VIDEO_DECODER_SECONDARY) {
+ if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+ f->fmt.pix_mp.height =
+ inst->prop.height[CAPTURE_PORT];
+ f->fmt.pix_mp.width =
+ inst->prop.width[CAPTURE_PORT];
+ } else if (f->type ==
+ V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ f->fmt.pix_mp.height =
+ inst->prop.height[OUTPUT_PORT];
+ f->fmt.pix_mp.width =
+ inst->prop.width[OUTPUT_PORT];
+ f->fmt.pix_mp.plane_fmt[0].bytesperline =
+ (__u16)inst->prop.width[OUTPUT_PORT];
+ f->fmt.pix_mp.plane_fmt[0].reserved[0] =
+ (__u16)inst->prop.height[OUTPUT_PORT];
+ }
+ }
} else {
dprintk(VIDC_ERR,
"Buf type not recognized, type = %d\n",
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_common.c b/drivers/media/platform/msm/vidc/msm_vidc_common.c
index 24bed94..c8cd75e 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_common.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_common.c
@@ -588,10 +588,16 @@
} else {
dprintk(VIDC_DBG,
"V4L2_EVENT_SEQ_CHANGED_SUFFICIENT\n");
- inst->prop.height[CAPTURE_PORT] = event_notify->height;
- inst->prop.width[CAPTURE_PORT] = event_notify->width;
- if (!msm_comm_get_stream_output_mode(inst) ==
+ if (msm_comm_get_stream_output_mode(inst) !=
HAL_VIDEO_DECODER_SECONDARY) {
+ dprintk(VIDC_DBG,
+ "event_notify->height = %d event_notify->width = %d\n",
+ event_notify->height,
+ event_notify->width);
+ inst->prop.height[CAPTURE_PORT] =
+ event_notify->height;
+ inst->prop.width[CAPTURE_PORT] =
+ event_notify->width;
inst->prop.height[OUTPUT_PORT] =
event_notify->height;
inst->prop.width[OUTPUT_PORT] =
diff --git a/drivers/video/msm/mdss/mdss_mdp_ctl.c b/drivers/video/msm/mdss/mdss_mdp_ctl.c
index ba5791b..9cf745e 100644
--- a/drivers/video/msm/mdss/mdss_mdp_ctl.c
+++ b/drivers/video/msm/mdss/mdss_mdp_ctl.c
@@ -49,6 +49,11 @@
writel_relaxed(val, mixer->base + reg);
}
+static inline u32 mdp_mixer_read(struct mdss_mdp_mixer *mixer, u32 reg)
+{
+ return readl_relaxed(mixer->base + reg);
+}
+
static inline u32 mdss_mdp_get_pclk_rate(struct mdss_mdp_ctl *ctl)
{
struct mdss_panel_info *pinfo = &ctl->panel_data->panel_info;
@@ -1954,6 +1959,7 @@
int stage, secure = 0;
int screen_state;
int outsize = 0;
+ u32 op_mode;
screen_state = ctl->force_screen_state;
@@ -2087,6 +2093,11 @@
else
ctl->flush_bits |= BIT(6) << mixer->num;
+ op_mode = mdp_mixer_read(mixer, MDSS_MDP_REG_LM_OP_MODE);
+ /* Read GC enable/disable status on LM */
+ op_mode = (op_mode & BIT(0));
+ blend_color_out |= op_mode;
+
mdp_mixer_write(mixer, MDSS_MDP_REG_LM_OP_MODE, blend_color_out);
off = __mdss_mdp_ctl_get_mixer_off(mixer);
mdss_mdp_ctl_write(ctl, off, mixercfg);
diff --git a/drivers/video/msm/mdss/mdss_mdp_hwio.h b/drivers/video/msm/mdss/mdss_mdp_hwio.h
index 4b9ea20..c11b438 100644
--- a/drivers/video/msm/mdss/mdss_mdp_hwio.h
+++ b/drivers/video/msm/mdss/mdss_mdp_hwio.h
@@ -109,7 +109,8 @@
#define MDSS_MDP_REG_CTL_OFFSET(ctl) (0x00600 + ((ctl) * \
MDSS_MDP_CTL_ADDRESS_OFFSET))
-#define MDSS_MDP_REG_CTL_LAYER(lm) ((lm) * 0x004)
+#define MDSS_MDP_REG_CTL_LAYER(lm) \
+ ((lm == 5) ? (0x024) : ((lm) * 0x004))
#define MDSS_MDP_REG_CTL_TOP 0x014
#define MDSS_MDP_REG_CTL_FLUSH 0x018
#define MDSS_MDP_REG_CTL_START 0x01C
diff --git a/drivers/video/msm/mdss/mdss_mdp_pp.c b/drivers/video/msm/mdss/mdss_mdp_pp.c
index d1e5b52..6b497bb 100644
--- a/drivers/video/msm/mdss/mdss_mdp_pp.c
+++ b/drivers/video/msm/mdss/mdss_mdp_pp.c
@@ -181,6 +181,7 @@
#define MDSS_MDP_GAMUT_SIZE 0x5C
#define MDSS_MDP_IGC_DSPP_SIZE 0x28
#define MDSS_MDP_IGC_SSPP_SIZE 0x88
+#define MDSS_MDP_VIG_QSEED2_SHARP_SIZE 0x0C
#define TOTAL_BLEND_STAGES 0x4
#define PP_FLAGS_DIRTY_PA 0x1
@@ -789,9 +790,15 @@
unsigned long flags = 0;
char __iomem *offset;
struct mdss_data_type *mdata;
+ u32 current_opmode;
+ u32 csc_reset;
+ u32 dcm_state = DCM_UNINIT;
pr_debug("pnum=%x\n", pipe->num);
+ if (pipe->mixer && pipe->mixer->ctl && pipe->mixer->ctl->mfd)
+ dcm_state = pipe->mixer->ctl->mfd->dcm_state;
+
mdata = mdss_mdp_get_mdata();
if ((pipe->flags & MDP_OVERLAY_PP_CFG_EN) &&
(pipe->pp_cfg.config_ops & MDP_OVERLAY_PP_CSC_CFG)) {
@@ -825,6 +832,16 @@
pp_histogram_setup(&opmode, MDSS_PP_SSPP_CFG | pipe->num, pipe->mixer);
+ /* Update CSC state only if tuning mode is enable */
+ if (dcm_state == DTM_ENTER) {
+ /* Reset bit 16 to 19 for CSC_STATE in VIG_OP_MODE */
+ csc_reset = 0xFFF0FFFF;
+ current_opmode = readl_relaxed(pipe->base +
+ MDSS_MDP_REG_VIG_OP_MODE);
+ *op |= ((current_opmode & csc_reset) | opmode);
+ return 0;
+ }
+
if (pipe->flags & MDP_OVERLAY_PP_CFG_EN) {
if ((pipe->pp_cfg.config_ops & MDP_OVERLAY_PP_PA_CFG) &&
(mdata->mdp_rev < MDSS_MDP_HW_REV_103)) {
@@ -910,10 +927,15 @@
u32 filter_mode;
struct mdss_data_type *mdata;
u32 src_w, src_h;
+ u32 dcm_state = DCM_UNINIT;
pr_debug("pipe=%d, change pxl ext=%d\n", pipe->num,
pipe->scale.enable_pxl_ext);
mdata = mdss_mdp_get_mdata();
+
+ if (pipe->mixer && pipe->mixer->ctl && pipe->mixer->ctl->mfd)
+ dcm_state = pipe->mixer->ctl->mfd->dcm_state;
+
if (mdata->mdp_rev >= MDSS_MDP_HW_REV_102 && pipe->src_fmt->is_yuv)
filter_mode = MDSS_MDP_SCALE_FILTER_CA;
else
@@ -948,12 +970,13 @@
pipe->pp_cfg.sharp_cfg.noise_thr = SHARP_NOISE_THR_DEFAULT;
}
- if ((pipe->src_fmt->is_yuv) &&
- !((pipe->dst.w < src_w) || (pipe->dst.h < src_h))) {
- pp_sharp_config(pipe->base +
- MDSS_MDP_REG_VIG_QSEED2_SHARP,
- &pipe->pp_res.pp_sts,
- &pipe->pp_cfg.sharp_cfg);
+ if (dcm_state != DTM_ENTER &&
+ ((pipe->src_fmt->is_yuv) &&
+ !((pipe->dst.w < src_w) || (pipe->dst.h < src_h)))) {
+ pp_sharp_config(pipe->base +
+ MDSS_MDP_REG_VIG_QSEED2_SHARP,
+ &pipe->pp_res.pp_sts,
+ &pipe->pp_cfg.sharp_cfg);
}
if ((src_h != pipe->dst.h) ||
@@ -1155,10 +1178,23 @@
char __iomem *pipe_base;
u32 pipe_num;
struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+ u32 current_opmode;
+ u32 dcm_state = DCM_UNINIT;
if (pipe == NULL)
return -EINVAL;
+ if (pipe->mixer && pipe->mixer->ctl && pipe->mixer->ctl->mfd)
+ dcm_state = pipe->mixer->ctl->mfd->dcm_state;
+
+ /* Read IGC state and update the same if tuning mode is enable */
+ if (dcm_state == DTM_ENTER) {
+ current_opmode = readl_relaxed(pipe->base +
+ MDSS_MDP_REG_SSPP_SRC_OP_MODE);
+ *op |= (current_opmode & BIT(16));
+ return ret;
+ }
+
/*
* TODO: should this function be responsible for masking multiple
* pipes to be written in dual pipe case?
@@ -4662,7 +4698,8 @@
break;
}
- for (stage = 0; stage < mdss_res->nmixers_intf; stage++)
+ for (stage = 0; stage < (mdss_res->nmixers_intf +
+ mdss_res->nmixers_wb); stage++)
if (ptr == base + MDSS_MDP_REG_CTL_LAYER(stage)) {
ret = MDP_PP_OPS_READ | MDP_PP_OPS_WRITE;
goto End;
@@ -4753,7 +4790,10 @@
} else if (ptr == base + MDSS_MDP_REG_SSPP_SRC_OP_MODE) {
ret = MDP_PP_OPS_READ | MDP_PP_OPS_WRITE;
break;
- } else if ((ptr == base + MDSS_MDP_REG_VIG_QSEED2_SHARP)) {
+ /* QSEED2 range */
+ } else if ((ptr >= base + MDSS_MDP_REG_VIG_QSEED2_SHARP) &&
+ (ptr <= base + MDSS_MDP_REG_VIG_QSEED2_SHARP +
+ MDSS_MDP_VIG_QSEED2_SHARP_SIZE)) {
ret = MDP_PP_OPS_READ | MDP_PP_OPS_WRITE;
break;
/* PA range */
@@ -4838,7 +4878,8 @@
int stage = 0;
struct mdss_mdp_mixer *mixer;
- for (counter = 0; counter < mdss_res->nmixers_intf; counter++) {
+ for (counter = 0; counter < (mdss_res->nmixers_intf +
+ mdss_res->nmixers_wb); counter++) {
mixer = mdss_res->mixer_intf + counter;
base = mixer->base;
diff --git a/mm/debug-pagealloc.c b/mm/debug-pagealloc.c
index bc91cba..87dde36 100644
--- a/mm/debug-pagealloc.c
+++ b/mm/debug-pagealloc.c
@@ -6,6 +6,14 @@
#include <linux/poison.h>
#include <linux/ratelimit.h>
+#ifndef mark_addr_rdonly
+#define mark_addr_rdonly(a)
+#endif
+
+#ifndef mark_addr_rdwrite
+#define mark_addr_rdwrite(a)
+#endif
+
static inline void set_page_poison(struct page *page)
{
__set_bit(PAGE_DEBUG_FLAG_POISON, &page->debug_flags);
@@ -27,6 +35,7 @@
set_page_poison(page);
memset(addr, PAGE_POISON, PAGE_SIZE);
+ mark_addr_rdonly(addr);
kunmap_atomic(addr);
}
@@ -82,6 +91,7 @@
addr = kmap_atomic(page);
check_poison_mem(addr, PAGE_SIZE);
+ mark_addr_rdwrite(addr);
clear_page_poison(page);
kunmap_atomic(addr);
}
diff --git a/net/ipv4/sysfs_net_ipv4.c b/net/ipv4/sysfs_net_ipv4.c
index 0cbbf10..699075a 100644
--- a/net/ipv4/sysfs_net_ipv4.c
+++ b/net/ipv4/sysfs_net_ipv4.c
@@ -53,6 +53,9 @@
CREATE_IPV4_FILE(tcp_rmem_def, sysctl_tcp_rmem[1]);
CREATE_IPV4_FILE(tcp_rmem_max, sysctl_tcp_rmem[2]);
+CREATE_IPV4_FILE(tcp_delack_seg, sysctl_tcp_delack_seg);
+CREATE_IPV4_FILE(tcp_use_userconfig, sysctl_tcp_use_userconfig);
+
static struct attribute *ipv4_attrs[] = {
&tcp_wmem_min_attr.attr,
&tcp_wmem_def_attr.attr,
@@ -60,6 +63,8 @@
&tcp_rmem_min_attr.attr,
&tcp_rmem_def_attr.attr,
&tcp_rmem_max_attr.attr,
+ &tcp_delack_seg_attr.attr,
+ &tcp_use_userconfig_attr.attr,
NULL
};