Merge "msm: camera: sensor: remove unnecessary logs"
diff --git a/Documentation/devicetree/bindings/media/video/msm-cci.txt b/Documentation/devicetree/bindings/media/video/msm-cci.txt
index f256d78..da54138 100644
--- a/Documentation/devicetree/bindings/media/video/msm-cci.txt
+++ b/Documentation/devicetree/bindings/media/video/msm-cci.txt
@@ -171,6 +171,22 @@
- 0 -> MASTER 0
- 1 -> MASTER 1
+Optional properties:
+- qcom,cam-vreg-name : should contain names of all regulators needed by this
+ actuator
+ - "cam_vaf"
+- qcom,cam-vreg-type : should contain regulator type for regulators mentioned in
+ qcom,cam-vreg-name property (in the same order)
+ - 0 for LDO and 1 for LVS
+- qcom,cam-vreg-min-voltage : should contain minimum voltage level in mcrovolts
+ for regulators mentioned in qcom,cam-vreg-name property (in the same order)
+- qcom,cam-vreg-max-voltage : should contain maximum voltage level in mcrovolts
+ for regulators mentioned in qcom,cam-vreg-name property (in the same order)
+- qcom,cam-vreg-op-mode : should contain the maximum current in microamps
+ required from the regulators mentioned in the qcom,cam-vreg-name property
+ (in the same order).
+- cam_vaf-supply : should contain regulator from which AF voltage is supplied
+
Example:
qcom,cci@0xfda0c000 {
@@ -200,8 +216,14 @@
actuator0: qcom,actuator@18 {
cell-index = <0>;
reg = <0x18>;
- compatible = "qcom,actuator";
- qcom,cci-master = <0>;
+ compatible = "qcom,actuator";
+ qcom,cci-master = <0>;
+ cam_vaf-supply = <&pm8941_l23>;
+ qcom,cam-vreg-name = "cam_vaf";
+ qcom,cam-vreg-type = <0>;
+ qcom,cam-vreg-min-voltage = <3000000>;
+ qcom,cam-vreg-max-voltage = <3000000>;
+ qcom,cam-vreg-op-mode = <100000>;
};
qcom,s5k3l1yx@6e {
diff --git a/arch/arm/boot/dts/msm8610.dtsi b/arch/arm/boot/dts/msm8610.dtsi
index 90dae06..d243b78 100644
--- a/arch/arm/boot/dts/msm8610.dtsi
+++ b/arch/arm/boot/dts/msm8610.dtsi
@@ -895,6 +895,9 @@
qcom,hotplug-temp-hysteresis = <20>;
qcom,cpu-sensors = "tsens_tz_sensor5", "tsens_tz_sensor5",
"tsens_tz_sensor5", "tsens_tz_sensor5";
+ qcom,default-temp = <80>;
+ qcom,efuse-data = <0xfc4b8000 0x1000 23 30 0x3>;
+ qcom,efuse-temperature-map = <0x1 70>;
qcom,vdd-restriction-temp = <5>;
qcom,vdd-restriction-temp-hysteresis = <10>;
vdd-dig-supply = <&pm8110_s1_floor_corner>;
diff --git a/arch/arm/common/gic.c b/arch/arm/common/gic.c
index b4574aa..3f40def 100644
--- a/arch/arm/common/gic.c
+++ b/arch/arm/common/gic.c
@@ -48,6 +48,7 @@
#include <asm/system.h>
#include <mach/socinfo.h>
+#include <mach/msm_rtb.h>
union gic_base {
void __iomem *common_base;
@@ -310,7 +311,7 @@
if (gic->need_access_lock)
raw_spin_lock(&irq_controller_lock);
- writel_relaxed(gic_irq(d), gic_cpu_base(d) + GIC_CPU_EOI);
+ writel_relaxed_no_log(gic_irq(d), gic_cpu_base(d) + GIC_CPU_EOI);
if (gic->need_access_lock)
raw_spin_unlock(&irq_controller_lock);
}
@@ -388,8 +389,8 @@
bit = 1 << (cpu_logical_map(cpu) + shift);
raw_spin_lock(&irq_controller_lock);
- val = readl_relaxed(reg) & ~mask;
- writel_relaxed(val | bit, reg);
+ val = readl_relaxed_no_log(reg) & ~mask;
+ writel_relaxed_no_log(val | bit, reg);
raw_spin_unlock(&irq_controller_lock);
return IRQ_SET_MASK_OK;
@@ -434,7 +435,7 @@
do {
if (gic->need_access_lock)
raw_spin_lock(&irq_controller_lock);
- irqstat = readl_relaxed(cpu_base + GIC_CPU_INTACK);
+ irqstat = readl_relaxed_no_log(cpu_base + GIC_CPU_INTACK);
if (gic->need_access_lock)
raw_spin_unlock(&irq_controller_lock);
irqnr = irqstat & ~0x1c00;
@@ -442,16 +443,18 @@
if (likely(irqnr > 15 && irqnr < 1021)) {
irqnr = irq_find_mapping(gic->domain, irqnr);
handle_IRQ(irqnr, regs);
+ uncached_logk(LOGK_IRQ, (void *)(uintptr_t)irqnr);
continue;
}
if (irqnr < 16) {
if (gic->need_access_lock)
raw_spin_lock(&irq_controller_lock);
- writel_relaxed(irqstat, cpu_base + GIC_CPU_EOI);
+ writel_relaxed_no_log(irqstat, cpu_base + GIC_CPU_EOI);
if (gic->need_access_lock)
raw_spin_unlock(&irq_controller_lock);
#ifdef CONFIG_SMP
handle_IPI(irqnr, regs);
+ uncached_logk(LOGK_IRQ, (void *)(uintptr_t)irqnr);
#endif
continue;
}
@@ -713,19 +716,22 @@
if (!dist_base || !cpu_base)
return;
- saved_cpu_ctrl = readl_relaxed(cpu_base + GIC_CPU_CTRL);
+ saved_cpu_ctrl = readl_relaxed_no_log(cpu_base + GIC_CPU_CTRL);
for (i = 0; i < DIV_ROUND_UP(32, 4); i++)
- gic_data[gic_nr].saved_dist_pri[i] = readl_relaxed(dist_base +
+ gic_data[gic_nr].saved_dist_pri[i] = readl_relaxed_no_log(
+ dist_base +
GIC_DIST_PRI + i * 4);
ptr = __this_cpu_ptr(gic_data[gic_nr].saved_ppi_enable);
for (i = 0; i < DIV_ROUND_UP(32, 32); i++)
- ptr[i] = readl_relaxed(dist_base + GIC_DIST_ENABLE_SET + i * 4);
+ ptr[i] = readl_relaxed_no_log(dist_base +
+ GIC_DIST_ENABLE_SET + i * 4);
ptr = __this_cpu_ptr(gic_data[gic_nr].saved_ppi_conf);
for (i = 0; i < DIV_ROUND_UP(32, 16); i++)
- ptr[i] = readl_relaxed(dist_base + GIC_DIST_CONFIG + i * 4);
+ ptr[i] = readl_relaxed_no_log(dist_base +
+ GIC_DIST_CONFIG + i * 4);
}
@@ -747,18 +753,20 @@
ptr = __this_cpu_ptr(gic_data[gic_nr].saved_ppi_enable);
for (i = 0; i < DIV_ROUND_UP(32, 32); i++)
- writel_relaxed(ptr[i], dist_base + GIC_DIST_ENABLE_SET + i * 4);
+ writel_relaxed_no_log(ptr[i], dist_base +
+ GIC_DIST_ENABLE_SET + i * 4);
ptr = __this_cpu_ptr(gic_data[gic_nr].saved_ppi_conf);
for (i = 0; i < DIV_ROUND_UP(32, 16); i++)
- writel_relaxed(ptr[i], dist_base + GIC_DIST_CONFIG + i * 4);
+ writel_relaxed_no_log(ptr[i], dist_base +
+ GIC_DIST_CONFIG + i * 4);
for (i = 0; i < DIV_ROUND_UP(32, 4); i++)
- writel_relaxed(gic_data[gic_nr].saved_dist_pri[i],
+ writel_relaxed_no_log(gic_data[gic_nr].saved_dist_pri[i],
dist_base + GIC_DIST_PRI + i * 4);
- writel_relaxed(0xf0, cpu_base + GIC_CPU_PRIMASK);
- writel_relaxed(saved_cpu_ctrl, cpu_base + GIC_CPU_CTRL);
+ writel_relaxed_no_log(0xf0, cpu_base + GIC_CPU_PRIMASK);
+ writel_relaxed_no_log(saved_cpu_ctrl, cpu_base + GIC_CPU_CTRL);
}
static int gic_notifier(struct notifier_block *self, unsigned long cmd, void *v)
@@ -993,7 +1001,7 @@
if (gic->need_access_lock)
raw_spin_lock_irqsave(&irq_controller_lock, flags);
/* this always happens on GIC0 */
- writel_relaxed(sgir, gic_data_dist_base(gic) + GIC_DIST_SOFTINT);
+ writel_relaxed_no_log(sgir, gic_data_dist_base(gic) + GIC_DIST_SOFTINT);
if (gic->need_access_lock)
raw_spin_unlock_irqrestore(&irq_controller_lock, flags);
mb();
diff --git a/arch/arm/configs/msm8974-perf_defconfig b/arch/arm/configs/msm8974-perf_defconfig
index 478e766..b9f0f8b 100755
--- a/arch/arm/configs/msm8974-perf_defconfig
+++ b/arch/arm/configs/msm8974-perf_defconfig
@@ -503,6 +503,7 @@
CONFIG_CRYPTO_DEV_QCRYPTO=m
CONFIG_CRYPTO_DEV_QCE=y
CONFIG_CRYPTO_DEV_QCEDEV=y
+CONFIG_PFT=y
CONFIG_SND_SOC_MSM_HDMI_CODEC_RX=y
CONFIG_BUILD_ARM_APPENDED_DTB_IMAGE=y
CONFIG_MSM_RDBG=m
diff --git a/arch/arm/configs/msm8974_defconfig b/arch/arm/configs/msm8974_defconfig
index c5c16c2..e843739 100755
--- a/arch/arm/configs/msm8974_defconfig
+++ b/arch/arm/configs/msm8974_defconfig
@@ -561,6 +561,7 @@
CONFIG_CRYPTO_DEV_QCRYPTO=y
CONFIG_CRYPTO_DEV_QCE=y
CONFIG_CRYPTO_DEV_QCEDEV=y
+CONFIG_PFT=y
CONFIG_SND_SOC_MSM_HDMI_CODEC_RX=y
CONFIG_BUILD_ARM_APPENDED_DTB_IMAGE=y
CONFIG_MSM_RDBG=m
diff --git a/arch/arm/mach-msm/include/mach/msm_rtb.h b/arch/arm/mach-msm/include/mach/msm_rtb.h
index b33e8b6..c419a3d 100644
--- a/arch/arm/mach-msm/include/mach/msm_rtb.h
+++ b/arch/arm/mach-msm/include/mach/msm_rtb.h
@@ -27,6 +27,7 @@
LOGK_TIMESTAMP = 6,
LOGK_L2CPREAD = 7,
LOGK_L2CPWRITE = 8,
+ LOGK_IRQ = 9,
};
#define LOGTYPE_NOPC 0x80
diff --git a/block/blk-merge.c b/block/blk-merge.c
index 5d74cc3..9fa7765 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -6,6 +6,7 @@
#include <linux/bio.h>
#include <linux/blkdev.h>
#include <linux/scatterlist.h>
+#include <linux/security.h>
#include "blk.h"
@@ -509,6 +510,10 @@
if (bio_integrity(bio) != blk_integrity_rq(rq))
return false;
+ /* Don't merge bios of files with different encryption */
+ if (!security_allow_merge_bio(rq->bio, bio))
+ return false;
+
return true;
}
diff --git a/drivers/char/diag/diag_dci.c b/drivers/char/diag/diag_dci.c
index 0edfdad..653e299 100644
--- a/drivers/char/diag/diag_dci.c
+++ b/drivers/char/diag/diag_dci.c
@@ -22,7 +22,10 @@
#include <linux/platform_device.h>
#include <linux/pm_wakeup.h>
#include <linux/spinlock.h>
+#include <linux/ratelimit.h>
+#include <linux/reboot.h>
#include <asm/current.h>
+#include <mach/restart.h>
#ifdef CONFIG_DIAG_OVER_USB
#include <mach/usbdiag.h>
#endif
@@ -33,6 +36,10 @@
#include "diagfwd_cntl.h"
#include "diag_dci.h"
+static struct timer_list dci_drain_timer;
+static int dci_timer_in_progress;
+static struct work_struct dci_data_drain_work;
+
unsigned int dci_max_reg = 100;
unsigned int dci_max_clients = 10;
unsigned char dci_cumulative_log_mask[DCI_LOG_MASK_SIZE];
@@ -47,14 +54,15 @@
/* Number of milliseconds anticipated to process the DCI data */
#define DCI_WAKEUP_TIMEOUT 1
-#define DCI_CHK_CAPACITY(entry, new_data_len) \
-((entry->data_len + new_data_len > entry->total_capacity) ? 1 : 0) \
+#define DCI_CAN_ADD_BUF_TO_LIST(buf) \
+ (buf && buf->data && !buf->in_busy && buf->data_len > 0) \
#ifdef CONFIG_DEBUG_FS
struct diag_dci_data_info *dci_data_smd;
struct mutex dci_stat_mutex;
-void diag_dci_smd_record_info(int read_bytes, uint8_t ch_type)
+void diag_dci_smd_record_info(int read_bytes, uint8_t ch_type,
+ uint8_t peripheral)
{
static int curr_dci_data_smd;
static unsigned long iteration;
@@ -67,6 +75,7 @@
temp_data += curr_dci_data_smd;
temp_data->iteration = iteration + 1;
temp_data->data_size = read_bytes;
+ temp_data->peripheral = peripheral;
temp_data->ch_type = ch_type;
diag_get_timestamp(temp_data->time_stamp);
curr_dci_data_smd++;
@@ -74,50 +83,393 @@
mutex_unlock(&dci_stat_mutex);
}
#else
-void diag_dci_smd_record_info(int read_bytes, uint8_t ch_type) { }
+void diag_dci_smd_record_info(int read_bytes, uint8_t ch_type,
+ uint8_t peripheral) { }
#endif
+static void dci_drain_data(unsigned long data)
+{
+ queue_work(driver->diag_dci_wq, &dci_data_drain_work);
+}
+
+static void dci_check_drain_timer(void)
+{
+ if (!dci_timer_in_progress) {
+ dci_timer_in_progress = 1;
+ mod_timer(&dci_drain_timer, jiffies + msecs_to_jiffies(500));
+ }
+}
+
+static int diag_dci_init_buffer(struct diag_dci_buffer_t *buffer, int type)
+{
+ if (!buffer || buffer->data)
+ return -EINVAL;
+
+ switch (type) {
+ case DCI_BUF_PRIMARY:
+ buffer->data = kzalloc(IN_BUF_SIZE, GFP_KERNEL);
+ if (!buffer->data)
+ return -ENOMEM;
+ buffer->capacity = IN_BUF_SIZE;
+ break;
+ case DCI_BUF_SECONDARY:
+ buffer->data = NULL;
+ buffer->capacity = IN_BUF_SIZE;
+ break;
+ case DCI_BUF_CMD:
+ buffer->data = kzalloc(PKT_SIZE, GFP_KERNEL);
+ if (!buffer->data)
+ return -ENOMEM;
+ buffer->capacity = PKT_SIZE;
+ break;
+ default:
+ pr_err("diag: In %s, unknown type %d", __func__, type);
+ return -EINVAL;
+ }
+
+ buffer->data_len = 0;
+ buffer->in_busy = 0;
+ buffer->buf_type = type;
+ mutex_init(&buffer->data_mutex);
+
+ return 0;
+}
+
+static inline int diag_dci_check_buffer(struct diag_dci_buffer_t *buf, int len)
+{
+ if (!buf)
+ return -EINVAL;
+
+ /* Return 1 if the buffer is not busy and can hold new data */
+ if ((buf->data_len + len < buf->capacity) && !buf->in_busy)
+ return 1;
+
+ return 0;
+}
+
+static void dci_add_buffer_to_list(struct diag_dci_client_tbl *client,
+ struct diag_dci_buffer_t *buf)
+{
+ if (!buf || !client || !buf->data)
+ return;
+
+ if (buf->in_list || buf->data_len == 0)
+ return;
+
+ mutex_lock(&client->write_buf_mutex);
+ list_add_tail(&buf->buf_track, &client->list_write_buf);
+ mutex_lock(&buf->data_mutex);
+ buf->in_busy = 1;
+ buf->in_list = 1;
+ mutex_unlock(&buf->data_mutex);
+ mutex_unlock(&client->write_buf_mutex);
+}
+
+static int diag_dci_get_buffer(struct diag_dci_client_tbl *client,
+ int data_source, int len)
+{
+ struct diag_dci_buffer_t *buf_primary = NULL;
+ struct diag_dci_buffer_t *buf_temp = NULL;
+ struct diag_dci_buffer_t *curr = NULL;
+
+ if (!client)
+ return -EINVAL;
+ if (len < 0 || len > IN_BUF_SIZE)
+ return -EINVAL;
+
+ curr = client->buffers[data_source].buf_curr;
+ buf_primary = client->buffers[data_source].buf_primary;
+
+ if (curr && diag_dci_check_buffer(curr, len) == 1)
+ return 0;
+
+ dci_add_buffer_to_list(client, curr);
+ client->buffers[data_source].buf_curr = NULL;
+
+ if (diag_dci_check_buffer(buf_primary, len) == 1) {
+ client->buffers[data_source].buf_curr = buf_primary;
+ return 0;
+ }
+
+ buf_temp = kzalloc(sizeof(struct diag_dci_buffer_t), GFP_KERNEL);
+ if (!buf_temp)
+ return -EIO;
+
+ if (!diag_dci_init_buffer(buf_temp, DCI_BUF_SECONDARY)) {
+ buf_temp->data = diagmem_alloc(driver, driver->itemsize_dci,
+ POOL_TYPE_DCI);
+ if (!buf_temp->data) {
+ kfree(buf_temp);
+ buf_temp = NULL;
+ return -ENOMEM;
+ }
+ client->buffers[data_source].buf_curr = buf_temp;
+ return 0;
+ }
+
+ kfree(buf_temp);
+ buf_temp = NULL;
+ return -EIO;
+}
+
+void diag_dci_wakeup_clients()
+{
+ struct list_head *start, *temp;
+ struct diag_dci_client_tbl *entry = NULL;
+
+ list_for_each_safe(start, temp, &driver->dci_client_list) {
+ entry = list_entry(start, struct diag_dci_client_tbl, track);
+
+ /*
+ * Don't wake up the client when there is no pending buffer to
+ * write or when it is writing to user space
+ */
+ if (!list_empty(&entry->list_write_buf) && !entry->in_service) {
+ mutex_lock(&entry->write_buf_mutex);
+ entry->in_service = 1;
+ mutex_unlock(&entry->write_buf_mutex);
+ diag_update_sleeping_process(entry->client->tgid,
+ DCI_DATA_TYPE);
+ }
+ }
+}
+
+void dci_data_drain_work_fn(struct work_struct *work)
+{
+ int i;
+ struct list_head *start, *temp;
+ struct diag_dci_client_tbl *entry = NULL;
+ struct diag_dci_buf_peripheral_t *proc_buf = NULL;
+ struct diag_dci_buffer_t *buf_temp = NULL;
+
+ list_for_each_safe(start, temp, &driver->dci_client_list) {
+ entry = list_entry(start, struct diag_dci_client_tbl, track);
+ for (i = 0; i < NUM_DCI_PROC; i++) {
+ proc_buf = &entry->buffers[i];
+
+ buf_temp = proc_buf->buf_primary;
+ if (DCI_CAN_ADD_BUF_TO_LIST(buf_temp))
+ dci_add_buffer_to_list(entry, buf_temp);
+
+ buf_temp = proc_buf->buf_cmd;
+ if (DCI_CAN_ADD_BUF_TO_LIST(buf_temp))
+ dci_add_buffer_to_list(entry, buf_temp);
+
+ buf_temp = proc_buf->buf_curr;
+ if (DCI_CAN_ADD_BUF_TO_LIST(buf_temp)) {
+ dci_add_buffer_to_list(entry, buf_temp);
+ mutex_lock(&proc_buf->buf_mutex);
+ proc_buf->buf_curr = NULL;
+ mutex_unlock(&proc_buf->buf_mutex);
+ }
+ }
+ if (!list_empty(&entry->list_write_buf) && !entry->in_service) {
+ mutex_lock(&entry->write_buf_mutex);
+ entry->in_service = 1;
+ mutex_unlock(&entry->write_buf_mutex);
+ diag_update_sleeping_process(entry->client->tgid,
+ DCI_DATA_TYPE);
+ }
+ }
+ dci_timer_in_progress = 0;
+}
+
+/* Process the data read from apps userspace client */
+void diag_process_apps_dci_read_data(int data_type, void *buf, int recd_bytes)
+{
+ uint8_t cmd_code;
+
+ if (!buf) {
+ pr_err_ratelimited("diag: In %s, Null buf pointer\n", __func__);
+ return;
+ }
+
+ if (data_type != DATA_TYPE_DCI_LOG && data_type != DATA_TYPE_DCI_EVENT
+ && data_type != DCI_PKT_TYPE) {
+ pr_err("diag: In %s, unsupported data_type: 0x%x\n",
+ __func__, (unsigned int)data_type);
+ return;
+ }
+
+ cmd_code = *(uint8_t *)buf;
+
+ switch (cmd_code) {
+ case LOG_CMD_CODE:
+ extract_dci_log(buf, recd_bytes, APPS_DATA);
+ break;
+ case EVENT_CMD_CODE:
+ extract_dci_events(buf, recd_bytes, APPS_DATA);
+ break;
+ case DCI_PKT_RSP_CODE:
+ case DCI_DELAYED_RSP_CODE:
+ extract_dci_pkt_rsp(buf, recd_bytes, APPS_DATA, NULL);
+ break;
+ default:
+ pr_err("diag: In %s, unsupported command code: 0x%x, not log or event\n",
+ __func__, cmd_code);
+ return;
+
+ }
+
+ /* wake up all sleeping DCI clients which have some data */
+ diag_dci_wakeup_clients();
+ dci_check_drain_timer();
+}
+
/* Process the data read from the smd dci channel */
int diag_process_smd_dci_read_data(struct diag_smd_info *smd_info, void *buf,
int recd_bytes)
{
- int read_bytes, dci_pkt_len, i;
+ int read_bytes, dci_pkt_len;
uint8_t recv_pkt_cmd_code;
- diag_dci_smd_record_info(recd_bytes, (uint8_t)smd_info->type);
+ /*
+ * Release wakeup source when there are no more clients to
+ * process DCI data
+ */
+ if (driver->num_dci_client == 0) {
+ diag_dci_try_deactivate_wakeup_source();
+ return 0;
+ }
+
+ diag_dci_smd_record_info(recd_bytes, (uint8_t)smd_info->type,
+ (uint8_t)smd_info->peripheral);
/* Each SMD read can have multiple DCI packets */
read_bytes = 0;
while (read_bytes < recd_bytes) {
/* read actual length of dci pkt */
dci_pkt_len = *(uint16_t *)(buf+2);
+
+ /* Check if the length of the current packet is lesser than the
+ * remaining bytes in the received buffer. This includes space
+ * for the Start byte (1), Version byte (1), length bytes (2)
+ * and End byte (1)
+ */
+ if ((dci_pkt_len+5) > (recd_bytes-read_bytes)) {
+ pr_err("diag: Invalid length in %s, len: %d, dci_pkt_len: %d",
+ __func__, recd_bytes, dci_pkt_len);
+ diag_dci_try_deactivate_wakeup_source();
+ return 0;
+ }
/* process one dci packet */
- pr_debug("diag: bytes read = %d, single dci pkt len = %d\n",
- read_bytes, dci_pkt_len);
+ pr_debug("diag: dci: peripheral = %d bytes read = %d, single dci pkt len = %d\n",
+ smd_info->peripheral, read_bytes, dci_pkt_len);
/* print_hex_dump(KERN_DEBUG, "Single DCI packet :",
DUMP_PREFIX_ADDRESS, 16, 1, buf, 5 + dci_pkt_len, 1); */
recv_pkt_cmd_code = *(uint8_t *)(buf+4);
- if (recv_pkt_cmd_code == LOG_CMD_CODE)
- extract_dci_log(buf+4);
- else if (recv_pkt_cmd_code == EVENT_CMD_CODE)
- extract_dci_events(buf+4);
- else
- extract_dci_pkt_rsp(smd_info, buf); /* pkt response */
+ if (recv_pkt_cmd_code == LOG_CMD_CODE) {
+ /* Don't include the 4 bytes for command code */
+ extract_dci_log(buf + 4, recd_bytes - 4,
+ smd_info->peripheral);
+ } else if (recv_pkt_cmd_code == EVENT_CMD_CODE) {
+ /* Don't include the 4 bytes for command code */
+ extract_dci_events(buf + 4, recd_bytes - 4,
+ smd_info->peripheral);
+ } else
+ extract_dci_pkt_rsp(buf + 4, dci_pkt_len,
+ smd_info->peripheral, smd_info);
read_bytes += 5 + dci_pkt_len;
buf += 5 + dci_pkt_len; /* advance to next DCI pkt */
}
- /* Release wakeup source when there are no more clients to
- process DCI data */
- if (driver->num_dci_client == 0)
- diag_dci_try_deactivate_wakeup_source(smd_info->ch);
/* wake up all sleeping DCI clients which have some data */
- for (i = 0; i < MAX_DCI_CLIENTS; i++) {
- if (driver->dci_client_tbl[i].client &&
- driver->dci_client_tbl[i].data_len) {
- smd_info->in_busy_1 = 1;
- diag_update_sleeping_process(
- driver->dci_client_tbl[i].client->tgid,
- DCI_DATA_TYPE);
+ diag_dci_wakeup_clients();
+ dci_check_drain_timer();
+ diag_dci_try_deactivate_wakeup_source();
+ return 0;
+}
+
+static inline struct diag_dci_client_tbl *__diag_dci_get_client_entry(
+ int client_id)
+{
+ struct list_head *start, *temp;
+ struct diag_dci_client_tbl *entry = NULL;
+ list_for_each_safe(start, temp, &driver->dci_client_list) {
+ entry = list_entry(start, struct diag_dci_client_tbl, track);
+ if (entry->client->tgid == client_id)
+ return entry;
+ }
+ return NULL;
+}
+
+static inline int __diag_dci_query_log_mask(struct diag_dci_client_tbl *entry,
+ uint16_t log_code)
+{
+ uint16_t item_num;
+ uint8_t equip_id, *log_mask_ptr, byte_mask;
+ int byte_index, offset;
+
+ if (!entry) {
+ pr_err("diag: In %s, invalid client entry\n", __func__);
+ return 0;
+ }
+
+ equip_id = LOG_GET_EQUIP_ID(log_code);
+ item_num = LOG_GET_ITEM_NUM(log_code);
+ byte_index = item_num/8 + 2;
+ byte_mask = 0x01 << (item_num % 8);
+ offset = equip_id * 514;
+
+ if (offset + byte_index > DCI_LOG_MASK_SIZE) {
+ pr_err("diag: In %s, invalid offset: %d, log_code: %d, byte_index: %d\n",
+ __func__, offset, log_code, byte_index);
+ return 0;
+ }
+
+ log_mask_ptr = entry->dci_log_mask;
+ log_mask_ptr = log_mask_ptr + offset + byte_index;
+ return ((*log_mask_ptr & byte_mask) == byte_mask) ? 1 : 0;
+
+}
+
+static inline int __diag_dci_query_event_mask(struct diag_dci_client_tbl *entry,
+ uint16_t event_id)
+{
+ uint8_t *event_mask_ptr, byte_mask;
+ int byte_index, bit_index;
+
+ if (!entry) {
+ pr_err("diag: In %s, invalid client entry\n", __func__);
+ return 0;
+ }
+
+ byte_index = event_id/8;
+ bit_index = event_id % 8;
+ byte_mask = 0x1 << bit_index;
+
+ if (byte_index > DCI_EVENT_MASK_SIZE) {
+ pr_err("diag: In %s, invalid, event_id: %d, byte_index: %d\n",
+ __func__, event_id, byte_index);
+ return 0;
+ }
+
+ event_mask_ptr = entry->dci_event_mask;
+ event_mask_ptr = event_mask_ptr + byte_index;
+ return ((*event_mask_ptr & byte_mask) == byte_mask) ? 1 : 0;
+}
+
+static int diag_dci_filter_commands(struct diag_pkt_header_t *header)
+{
+ if (!header)
+ return -ENOMEM;
+
+ switch (header->cmd_code) {
+ case 0x7d: /* Msg Mask Configuration */
+ case 0x73: /* Log Mask Configuration */
+ case 0x81: /* Event Mask Configuration */
+ case 0x82: /* Event Mask Change */
+ case 0x60: /* Event Mask Toggle */
+ return 1;
+ }
+
+ if (header->cmd_code == 0x4b && header->subsys_id == 0x12) {
+ switch (header->subsys_cmd_code) {
+ case 0x60: /* Extended Event Mask Config */
+ case 0x61: /* Extended Msg Mask Config */
+ case 0x62: /* Extended Log Mask Config */
+ case 0x20C: /* Set current Preset ID */
+ case 0x20D: /* Get current Preset ID */
+ return 1;
}
}
@@ -204,96 +556,181 @@
return 0;
}
-void extract_dci_pkt_rsp(struct diag_smd_info *smd_info, unsigned char *buf)
+void extract_dci_pkt_rsp(unsigned char *buf, int len, int data_source,
+ struct diag_smd_info *smd_info)
{
- int i = 0, cmd_code_len = 1;
- int curr_client_pid = 0, write_len, *tag = NULL;
- struct diag_dci_client_tbl *entry;
+ int tag, curr_client_pid = 0;
+ struct diag_dci_client_tbl *entry = NULL;
void *temp_buf = NULL;
- uint8_t recv_pkt_cmd_code, delete_flag = 0;
+ uint8_t dci_cmd_code, cmd_code_len, delete_flag = 0;
+ uint32_t rsp_len = 0;
+ struct diag_dci_buffer_t *rsp_buf = NULL;
struct dci_pkt_req_entry_t *req_entry = NULL;
- recv_pkt_cmd_code = *(uint8_t *)(buf+4);
- if (recv_pkt_cmd_code != DCI_PKT_RSP_CODE)
- cmd_code_len = 4; /* delayed response */
- write_len = (int)(*(uint16_t *)(buf+2)) - cmd_code_len;
- if (write_len <= 0) {
- pr_err("diag: Invalid length in %s, write_len: %d",
- __func__, write_len);
+ unsigned char *temp = buf;
+
+ if (!buf) {
+ pr_err("diag: Invalid pointer in %s\n", __func__);
return;
}
- pr_debug("diag: len = %d\n", write_len);
- tag = (int *)(buf + (4 + cmd_code_len)); /* Retrieve the Tag field */
- req_entry = diag_dci_get_request_entry(*tag);
+ dci_cmd_code = *(uint8_t *)(temp);
+ if (dci_cmd_code == DCI_PKT_RSP_CODE) {
+ cmd_code_len = sizeof(uint8_t);
+ } else if (dci_cmd_code == DCI_DELAYED_RSP_CODE) {
+ cmd_code_len = sizeof(uint32_t);
+ } else {
+ pr_err("diag: In %s, invalid command code %d\n", __func__,
+ dci_cmd_code);
+ return;
+ }
+ temp += cmd_code_len;
+ tag = *(int *)temp;
+ temp += sizeof(int);
+
+ /*
+ * The size of the response is (total length) - (length of the command
+ * code, the tag (int)
+ */
+ rsp_len = len - (cmd_code_len + sizeof(int));
+ /*
+ * Check if the length embedded in the packet is correct.
+ * Include the start (1), version (1), length (2) and the end
+ * (1) bytes while checking. Total = 5 bytes
+ */
+ if ((rsp_len == 0) || (rsp_len > (len - 5))) {
+ pr_err("diag: Invalid length in %s, len: %d, rsp_len: %d",
+ __func__, len, rsp_len);
+ return;
+ }
+
+ req_entry = diag_dci_get_request_entry(tag);
if (!req_entry) {
- pr_alert("diag: No matching PID for DCI data\n");
+ pr_err("diag: No matching PID for DCI data\n");
return;
}
- *tag = req_entry->uid;
curr_client_pid = req_entry->pid;
/* Remove the headers and send only the response to this function */
- delete_flag = diag_dci_remove_req_entry(buf + 8 + cmd_code_len,
- write_len - 4,
- req_entry);
+ delete_flag = diag_dci_remove_req_entry(temp, rsp_len, req_entry);
if (delete_flag < 0)
return;
- /* Using PID of client process, find client buffer */
- i = diag_dci_find_client_index(curr_client_pid);
- if (i != DCI_CLIENT_INDEX_INVALID) {
- /* copy pkt rsp in client buf */
- entry = &(driver->dci_client_tbl[i]);
- mutex_lock(&entry->data_mutex);
- /*
- * Check if we can fit the data in the rsp buffer. The total
- * length of the rsp is the rsp length (write_len) +
- * DCI_PKT_RSP_TYPE header (int) + field for length (int) +
- * delete_flag (uint8_t)
- */
- if (DCI_CHK_CAPACITY(entry, 9+write_len)) {
- pr_alert("diag: create capacity for pkt rsp\n");
- entry->total_capacity += 9+write_len;
- temp_buf = krealloc(entry->dci_data,
- entry->total_capacity, GFP_KERNEL);
- if (!temp_buf) {
- pr_err("diag: DCI realloc failed\n");
- mutex_unlock(&entry->data_mutex);
- return;
- } else {
- entry->dci_data = temp_buf;
- }
- }
- *(int *)(entry->dci_data+entry->data_len) =
- DCI_PKT_RSP_TYPE;
- entry->data_len += 4;
- *(int *)(entry->dci_data+entry->data_len)
- = write_len;
- entry->data_len += 4;
- *(uint8_t *)(entry->dci_data + entry->data_len) = delete_flag;
- entry->data_len += sizeof(uint8_t);
- memcpy(entry->dci_data+entry->data_len,
- buf+4+cmd_code_len, write_len);
- entry->data_len += write_len;
- mutex_unlock(&entry->data_mutex);
+ entry = __diag_dci_get_client_entry(curr_client_pid);
+ if (!entry) {
+ pr_err("diag: In %s, couldn't find entry\n", __func__);
+ return;
}
+
+ rsp_buf = entry->buffers[data_source].buf_cmd;
+
+ mutex_lock(&rsp_buf->data_mutex);
+ /*
+ * Check if we can fit the data in the rsp buffer. The total length of
+ * the rsp is the rsp length (write_len) + DCI_PKT_RSP_TYPE header (int)
+ * + field for length (int) + delete_flag (uint8_t)
+ */
+ if ((rsp_buf->data_len + 9 + rsp_len) > rsp_buf->capacity) {
+ pr_alert("diag: create capacity for pkt rsp\n");
+ rsp_buf->capacity += 9 + rsp_len;
+ temp_buf = krealloc(rsp_buf->data, rsp_buf->capacity,
+ GFP_KERNEL);
+ if (!temp_buf) {
+ pr_err("diag: DCI realloc failed\n");
+ mutex_unlock(&rsp_buf->data_mutex);
+ return;
+ } else {
+ rsp_buf->data = temp_buf;
+ }
+ }
+
+ /* Fill in packet response header information */
+ *(int *)(rsp_buf->data + rsp_buf->data_len) = DCI_PKT_RSP_TYPE;
+ rsp_buf->data_len += sizeof(int);
+ /* Packet Length = Response Length + Length of uid field (int) */
+ *(int *)(rsp_buf->data + rsp_buf->data_len) = rsp_len + sizeof(int);
+ rsp_buf->data_len += sizeof(int);
+ *(uint8_t *)(rsp_buf->data + rsp_buf->data_len) = delete_flag;
+ rsp_buf->data_len += sizeof(uint8_t);
+ *(int *)(rsp_buf->data + rsp_buf->data_len) = req_entry->uid;
+ rsp_buf->data_len += sizeof(int);
+ memcpy(rsp_buf->data + rsp_buf->data_len, temp, rsp_len);
+ rsp_buf->data_len += rsp_len;
+ rsp_buf->data_source = data_source;
+ if (smd_info)
+ smd_info->in_busy_1 = 1;
+ mutex_unlock(&rsp_buf->data_mutex);
+
+
+ /*
+ * Add directly to the list for writing responses to the
+ * userspace as these shouldn't be buffered and shouldn't wait
+ * for log and event buffers to be full
+ */
+ dci_add_buffer_to_list(entry, rsp_buf);
}
-void extract_dci_events(unsigned char *buf)
+static void copy_dci_event(unsigned char *buf, int len,
+ struct diag_dci_client_tbl *client, int data_source)
+{
+ struct diag_dci_buffer_t *data_buffer = NULL;
+ struct diag_dci_buf_peripheral_t *proc_buf = NULL;
+ int err = 0, total_len = 0;
+
+ if (!buf || !client) {
+ pr_err("diag: Invalid pointers in %s", __func__);
+ return;
+ }
+
+ total_len = sizeof(int) + len;
+
+ proc_buf = &client->buffers[data_source];
+ mutex_lock(&proc_buf->buf_mutex);
+ mutex_lock(&proc_buf->health_mutex);
+ err = diag_dci_get_buffer(client, data_source, total_len);
+ if (err) {
+ if (err == -ENOMEM)
+ proc_buf->health.dropped_events++;
+ else
+ pr_err("diag: In %s, invalid packet\n", __func__);
+ mutex_unlock(&proc_buf->health_mutex);
+ mutex_unlock(&proc_buf->buf_mutex);
+ return;
+ }
+
+ data_buffer = proc_buf->buf_curr;
+
+ proc_buf->health.received_events++;
+ mutex_unlock(&proc_buf->health_mutex);
+ mutex_unlock(&proc_buf->buf_mutex);
+
+ mutex_lock(&data_buffer->data_mutex);
+ *(int *)(data_buffer->data + data_buffer->data_len) = DCI_EVENT_TYPE;
+ data_buffer->data_len += sizeof(int);
+ memcpy(data_buffer->data + data_buffer->data_len, buf, len);
+ data_buffer->data_len += len;
+ data_buffer->data_source = data_source;
+ mutex_unlock(&data_buffer->data_mutex);
+
+}
+
+void extract_dci_events(unsigned char *buf, int len, int data_source)
{
uint16_t event_id, event_id_packet, length, temp_len;
- uint8_t *event_mask_ptr, byte_mask, payload_len, payload_len_field;
- uint8_t timestamp[8] = {0}, bit_index, timestamp_len;
- uint8_t event_data[MAX_EVENT_SIZE];
- unsigned int byte_index, total_event_len, i;
- struct diag_dci_client_tbl *entry;
+ uint8_t payload_len, payload_len_field;
+ uint8_t timestamp[8], timestamp_len;
+ unsigned char event_data[MAX_EVENT_SIZE];
+ unsigned int total_event_len;
+ struct list_head *start, *temp;
+ struct diag_dci_client_tbl *entry = NULL;
length = *(uint16_t *)(buf + 1); /* total length of event series */
if (length == 0) {
pr_err("diag: Incoming dci event length is invalid\n");
return;
}
- temp_len = 0;
- buf = buf + 3; /* start of event series */
+ /* Move directly to the start of the event series. 1 byte for
+ * event code and 2 bytes for the length field.
+ */
+ temp_len = 3;
while (temp_len < (length - 1)) {
event_id_packet = *(uint16_t *)(buf + temp_len);
event_id = event_id_packet & 0x0FFF; /* extract 12 bits */
@@ -334,6 +771,22 @@
memcpy(event_data + 12, buf + temp_len + 2 +
timestamp_len, payload_len);
}
+
+ /* Before copying the data to userspace, check if we are still
+ * within the buffer limit. This is an error case, don't count
+ * it towards the health statistics.
+ *
+ * Here, the offset of 2 bytes(uint16_t) is for the
+ * event_id_packet length
+ */
+ temp_len += sizeof(uint16_t) + timestamp_len +
+ payload_len_field + payload_len;
+ if (temp_len > len) {
+ pr_err("diag: Invalid length in %s, len: %d, read: %d",
+ __func__, len, temp_len);
+ return;
+ }
+
/* 2 bytes for the event id & timestamp len is hard coded to 8,
as individual events have full timestamp */
*(uint16_t *)(event_data) = 10 +
@@ -343,108 +796,114 @@
/* 2 bytes for the event length field which is added to
the event data */
total_event_len = 2 + 10 + payload_len_field + payload_len;
- byte_index = event_id / 8;
- bit_index = event_id % 8;
- byte_mask = 0x1 << bit_index;
/* parse through event mask tbl of each client and check mask */
- for (i = 0; i < MAX_DCI_CLIENTS; i++) {
- if (driver->dci_client_tbl[i].client) {
- entry = &(driver->dci_client_tbl[i]);
- event_mask_ptr = entry->dci_event_mask +
- byte_index;
- mutex_lock(&dci_health_mutex);
- mutex_lock(&entry->data_mutex);
- if (*event_mask_ptr & byte_mask) {
- /* copy to client buffer */
- if (DCI_CHK_CAPACITY(entry,
- 4 + total_event_len)) {
- pr_err("diag: DCI event drop\n");
- driver->dci_client_tbl[i].
- dropped_events++;
- mutex_unlock(
- &entry->data_mutex);
- mutex_unlock(
- &dci_health_mutex);
- break;
- }
- driver->dci_client_tbl[i].
- received_events++;
- *(int *)(entry->dci_data+
- entry->data_len) = DCI_EVENT_TYPE;
- /* 4 bytes for DCI_EVENT_TYPE */
- memcpy(entry->dci_data +
- entry->data_len + 4, event_data
- , total_event_len);
- entry->data_len += 4 + total_event_len;
- }
- mutex_unlock(&entry->data_mutex);
- mutex_unlock(&dci_health_mutex);
+ list_for_each_safe(start, temp, &driver->dci_client_list) {
+ entry = list_entry(start, struct diag_dci_client_tbl,
+ track);
+ if (__diag_dci_query_event_mask(entry, event_id)) {
+ /* copy to client buffer */
+ copy_dci_event(event_data, total_event_len,
+ entry, data_source);
}
}
- temp_len += 2 + timestamp_len + payload_len_field + payload_len;
}
}
-void extract_dci_log(unsigned char *buf)
+static void copy_dci_log(unsigned char *buf, int len,
+ struct diag_dci_client_tbl *client, int data_source)
{
- uint16_t log_code, item_num, log_length;
- uint8_t equip_id, *log_mask_ptr, byte_mask;
- unsigned int i, byte_index, byte_offset = 0;
- struct diag_dci_client_tbl *entry;
+ uint16_t log_length = 0;
+ struct diag_dci_buffer_t *data_buffer = NULL;
+ struct diag_dci_buf_peripheral_t *proc_buf = NULL;
+ int err = 0, total_len = 0;
+
+ if (!buf || !client) {
+ pr_err("diag: Invalid pointers in %s", __func__);
+ return;
+ }
log_length = *(uint16_t *)(buf + 2);
- log_code = *(uint16_t *)(buf + 6);
- equip_id = LOG_GET_EQUIP_ID(log_code);
- item_num = LOG_GET_ITEM_NUM(log_code);
- byte_index = item_num/8 + 2;
- byte_mask = 0x01 << (item_num % 8);
-
if (log_length > USHRT_MAX - 4) {
- pr_err("diag: Integer overflow in %s, log_len:%d",
+ pr_err("diag: Integer overflow in %s, log_len: %d",
__func__, log_length);
return;
}
- byte_offset = (equip_id * 514) + byte_index;
- if (byte_offset >= DCI_LOG_MASK_SIZE) {
- pr_err("diag: Invalid byte_offset %d in dci log\n",
- byte_offset);
+ total_len = sizeof(int) + log_length;
+
+ /* Check if we are within the len. The check should include the
+ * first 4 bytes for the Log code(2) and the length bytes (2)
+ */
+ if ((log_length + sizeof(uint16_t) + 2) > len) {
+ pr_err("diag: Invalid length in %s, log_len: %d, len: %d",
+ __func__, log_length, len);
+ return;
+ }
+
+ proc_buf = &client->buffers[data_source];
+ mutex_lock(&proc_buf->buf_mutex);
+ mutex_lock(&proc_buf->health_mutex);
+ err = diag_dci_get_buffer(client, data_source, total_len);
+ if (err) {
+ if (err == -ENOMEM)
+ proc_buf->health.dropped_logs++;
+ else
+ pr_err("diag: In %s, invalid packet\n", __func__);
+ mutex_unlock(&proc_buf->health_mutex);
+ mutex_unlock(&proc_buf->buf_mutex);
+ return;
+ }
+
+ data_buffer = proc_buf->buf_curr;
+ proc_buf->health.received_logs++;
+ mutex_unlock(&proc_buf->health_mutex);
+ mutex_unlock(&proc_buf->buf_mutex);
+
+ mutex_lock(&data_buffer->data_mutex);
+ if (!data_buffer->data) {
+ mutex_unlock(&data_buffer->data_mutex);
+ return;
+ }
+
+ *(int *)(data_buffer->data + data_buffer->data_len) = DCI_LOG_TYPE;
+ data_buffer->data_len += sizeof(int);
+ memcpy(data_buffer->data + data_buffer->data_len, buf + sizeof(int),
+ log_length);
+ data_buffer->data_len += log_length;
+ data_buffer->data_source = data_source;
+ mutex_unlock(&data_buffer->data_mutex);
+}
+
+void extract_dci_log(unsigned char *buf, int len, int data_source)
+{
+ uint16_t log_code, read_bytes = 0;
+ struct list_head *start, *temp;
+ struct diag_dci_client_tbl *entry = NULL;
+
+ if (!buf) {
+ pr_err("diag: In %s buffer is NULL\n", __func__);
+ return;
+ }
+
+ /* The first six bytes for the incoming log packet contains
+ * Command code (2), the length of the packet (2) and the length
+ * of the log (2)
+ */
+ log_code = *(uint16_t *)(buf + 6);
+ read_bytes += sizeof(uint16_t) + 6;
+ if (read_bytes > len) {
+ pr_err("diag: Invalid length in %s, len: %d, read: %d",
+ __func__, len, read_bytes);
return;
}
/* parse through log mask table of each client and check mask */
- for (i = 0; i < MAX_DCI_CLIENTS; i++) {
- if (driver->dci_client_tbl[i].client) {
- entry = &(driver->dci_client_tbl[i]);
- log_mask_ptr = entry->dci_log_mask;
- if (!log_mask_ptr)
- return;
- log_mask_ptr = log_mask_ptr + byte_offset;
- mutex_lock(&dci_health_mutex);
- mutex_lock(&entry->data_mutex);
- if (*log_mask_ptr & byte_mask) {
- pr_debug("\t log code %x needed by client %d",
- log_code, entry->client->tgid);
- /* copy to client buffer */
- if (DCI_CHK_CAPACITY(entry,
- 4 + *(uint16_t *)(buf + 2))) {
- pr_err("diag: DCI log drop\n");
- driver->dci_client_tbl[i].
- dropped_logs++;
- mutex_unlock(
- &entry->data_mutex);
- mutex_unlock(
- &dci_health_mutex);
- return;
- }
- driver->dci_client_tbl[i].received_logs++;
- *(int *)(entry->dci_data+entry->data_len) =
- DCI_LOG_TYPE;
- memcpy(entry->dci_data + entry->data_len + 4,
- buf + 4, log_length);
- entry->data_len += 4 + log_length;
- }
- mutex_unlock(&entry->data_mutex);
- mutex_unlock(&dci_health_mutex);
+ list_for_each_safe(start, temp, &driver->dci_client_list) {
+ entry = list_entry(start, struct diag_dci_client_tbl, track);
+ if (__diag_dci_query_log_mask(entry, log_code)) {
+ pr_debug("\t log code %x needed by client %d",
+ log_code, entry->client->tgid);
+ /* copy to client buffer */
+ copy_dci_log(buf, len, entry, data_source);
}
}
}
@@ -459,14 +918,10 @@
uint8_t *client_log_mask_ptr;
uint8_t *log_mask_ptr;
int ret;
- int index = smd_info->peripheral;
+ struct list_head *start, *temp;
+ struct diag_dci_client_tbl *entry = NULL;
- /* Update the peripheral(s) with the dci log and event masks */
-
- /* If the cntl channel is not up, we can't update logs and events */
- if (!driver->smd_cntl[index].ch)
- return;
-
+ /* Update apps and peripheral(s) with the dci log and event masks */
memset(dirty_bits, 0, 16 * sizeof(uint8_t));
/*
@@ -474,15 +929,13 @@
* which log entries in the cumulative logs that need
* to be updated on the peripheral.
*/
- for (i = 0; i < MAX_DCI_CLIENTS; i++) {
- if (driver->dci_client_tbl[i].client) {
- client_log_mask_ptr =
- driver->dci_client_tbl[i].dci_log_mask;
- for (j = 0; j < 16; j++) {
- if (*(client_log_mask_ptr+1))
- dirty_bits[j] = 1;
- client_log_mask_ptr += 514;
- }
+ list_for_each_safe(start, temp, &driver->dci_client_list) {
+ entry = list_entry(start, struct diag_dci_client_tbl, track);
+ client_log_mask_ptr = entry->dci_log_mask;
+ for (j = 0; j < 16; j++) {
+ if (*(client_log_mask_ptr+1))
+ dirty_bits[j] = 1;
+ client_log_mask_ptr += 514;
}
}
@@ -497,41 +950,48 @@
}
mutex_unlock(&dci_log_mask_mutex);
- ret = diag_send_dci_log_mask(&driver->smd_cntl[index]);
+ /* Send updated mask to userspace clients */
+ diag_update_userspace_clients(DCI_LOG_MASKS_TYPE);
+ /* Send updated log mask to peripherals */
+ ret = diag_send_dci_log_mask();
- ret = diag_send_dci_event_mask(&driver->smd_cntl[index]);
+ /* Send updated event mask to userspace clients */
+ diag_update_userspace_clients(DCI_EVENT_MASKS_TYPE);
+ /* Send updated event mask to peripheral */
+ ret = diag_send_dci_event_mask();
smd_info->notify_context = 0;
}
void diag_dci_notify_client(int peripheral_mask, int data)
{
- int i, stat;
+ int stat;
struct siginfo info;
+ struct list_head *start, *temp;
+ struct diag_dci_client_tbl *entry = NULL;
+
memset(&info, 0, sizeof(struct siginfo));
info.si_code = SI_QUEUE;
info.si_int = (peripheral_mask | data);
/* Notify the DCI process that the peripheral DCI Channel is up */
- for (i = 0; i < MAX_DCI_CLIENTS; i++) {
- if (!driver->dci_client_tbl[i].client)
- continue;
- if (driver->dci_client_tbl[i].list & peripheral_mask) {
- info.si_signo = driver->dci_client_tbl[i].signal_type;
- stat = send_sig_info(
- driver->dci_client_tbl[i].signal_type,
- &info, driver->dci_client_tbl[i].client);
+ list_for_each_safe(start, temp, &driver->dci_client_list) {
+ entry = list_entry(start, struct diag_dci_client_tbl, track);
+ if (entry->client_info.notification_list & peripheral_mask) {
+ info.si_signo = entry->client_info.signal_type;
+ stat = send_sig_info(entry->client_info.signal_type,
+ &info, entry->client);
if (stat)
pr_err("diag: Err sending dci signal to client, signal data: 0x%x, stat: %d\n",
- info.si_int, stat);
+ info.si_int, stat);
}
- } /* end of loop for all DCI clients */
+ }
}
-static int diag_send_dci_pkt(struct diag_master_table entry, unsigned char *buf,
- int len, int tag)
+static int diag_send_dci_pkt(struct diag_master_table entry,
+ unsigned char *buf, int len, int tag)
{
- int i, status = 0;
+ int i, status = DIAG_DCI_NO_ERROR;
unsigned int read_len = 0;
/* The first 4 bytes is the uid tag and the next four bytes is
@@ -565,48 +1025,289 @@
mutex_unlock(&driver->dci_mutex);
return -EIO;
}
-
- for (i = 0; i < NUM_SMD_DCI_CHANNELS; i++) {
- struct diag_smd_info *smd_info = driver->separate_cmdrsp[i] ?
- &driver->smd_dci_cmd[i] :
- &driver->smd_dci[i];
- if (entry.client_id == smd_info->peripheral) {
- if (smd_info->ch) {
- mutex_lock(&smd_info->smd_ch_mutex);
- smd_write(smd_info->ch,
- driver->apps_dci_buf, len + 10);
- mutex_unlock(&smd_info->smd_ch_mutex);
- status = DIAG_DCI_NO_ERROR;
- }
- break;
- }
+ /* This command is registered locally on the Apps */
+ if (entry.client_id == APPS_DATA) {
+ driver->dci_pkt_length = len + 10;
+ diag_update_pkt_buffer(driver->apps_dci_buf, DCI_PKT_TYPE);
+ diag_update_sleeping_process(entry.process_id, DCI_PKT_TYPE);
+ mutex_unlock(&driver->dci_mutex);
+ return DIAG_DCI_NO_ERROR;
}
- if (status != DIAG_DCI_NO_ERROR) {
- pr_alert("diag: check DCI channel\n");
+ for (i = 0; i < NUM_SMD_DCI_CHANNELS; i++)
+ if (entry.client_id == i) {
+ status = 1;
+ break;
+ }
+
+ if (status) {
+ status = diag_dci_write_proc(entry.client_id,
+ DIAG_DATA_TYPE,
+ driver->apps_dci_buf,
+ len + 10);
+ } else {
+ pr_err("diag: Cannot send packet to peripheral %d",
+ entry.client_id);
status = DIAG_DCI_SEND_DATA_FAIL;
}
mutex_unlock(&driver->dci_mutex);
return status;
}
+static int diag_dci_process_apps_pkt(struct diag_pkt_header_t *pkt_header,
+ unsigned char *req_buf, int tag)
+{
+ uint8_t cmd_code, subsys_id, i, goto_download = 0;
+ uint8_t header_len = sizeof(struct diag_dci_pkt_header_t);
+ uint16_t ss_cmd_code;
+ uint32_t write_len = 0;
+ unsigned char *dest_buf = driver->apps_dci_buf;
+ unsigned char *payload_ptr = driver->apps_dci_buf + header_len;
+ struct diag_dci_pkt_header_t dci_header;
+
+ if (!pkt_header || !req_buf || tag < 0)
+ return -EIO;
+
+ cmd_code = pkt_header->cmd_code;
+ subsys_id = pkt_header->subsys_id;
+ ss_cmd_code = pkt_header->subsys_cmd_code;
+
+ if (cmd_code == DIAG_CMD_DOWNLOAD) {
+ *payload_ptr = DIAG_CMD_DOWNLOAD;
+ write_len = sizeof(uint8_t);
+ goto_download = 1;
+ goto fill_buffer;
+ } else if (cmd_code == DIAG_CMD_VERSION) {
+ if (chk_polling_response()) {
+ for (i = 0; i < 55; i++, write_len++, payload_ptr++)
+ *(payload_ptr) = 0;
+ goto fill_buffer;
+ }
+ } else if (cmd_code == DIAG_CMD_EXT_BUILD) {
+ if (chk_polling_response()) {
+ *payload_ptr = DIAG_CMD_EXT_BUILD;
+ write_len = sizeof(uint8_t);
+ payload_ptr += sizeof(uint8_t);
+ for (i = 0; i < 8; i++, write_len++, payload_ptr++)
+ *(payload_ptr) = 0;
+ *(int *)(payload_ptr) = chk_config_get_id();
+ write_len += sizeof(int);
+ goto fill_buffer;
+ }
+ } else if (cmd_code == DIAG_CMD_LOG_ON_DMND) {
+ if (driver->log_on_demand_support) {
+ *payload_ptr = DIAG_CMD_LOG_ON_DMND;
+ write_len = sizeof(uint8_t);
+ payload_ptr += sizeof(uint8_t);
+ *(uint16_t *)(payload_ptr) = *(uint16_t *)(req_buf + 1);
+ write_len += sizeof(uint16_t);
+ payload_ptr += sizeof(uint16_t);
+ *payload_ptr = 0x1; /* Unknown */
+ write_len += sizeof(uint8_t);
+ goto fill_buffer;
+ }
+ } else if (cmd_code != DIAG_CMD_DIAG_SUBSYS) {
+ return DIAG_DCI_TABLE_ERR;
+ }
+
+ if (subsys_id == DIAG_SS_DIAG) {
+ if (ss_cmd_code == DIAG_DIAG_MAX_PKT_SZ) {
+ memcpy(payload_ptr, pkt_header,
+ sizeof(struct diag_pkt_header_t));
+ write_len = sizeof(struct diag_pkt_header_t);
+ *(uint32_t *)(payload_ptr + write_len) = PKT_SIZE;
+ write_len += sizeof(uint32_t);
+ } else if (ss_cmd_code == DIAG_DIAG_STM) {
+ write_len = diag_process_stm_cmd(req_buf, payload_ptr);
+ }
+ } else if (subsys_id == DIAG_SS_PARAMS) {
+ if (ss_cmd_code == DIAG_DIAG_POLL) {
+ if (chk_polling_response()) {
+ memcpy(payload_ptr, pkt_header,
+ sizeof(struct diag_pkt_header_t));
+ write_len = sizeof(struct diag_pkt_header_t);
+ payload_ptr += write_len;
+ for (i = 0; i < 12; i++, write_len++) {
+ *(payload_ptr) = 0;
+ payload_ptr++;
+ }
+ }
+ } else if (ss_cmd_code == DIAG_DEL_RSP_WRAP) {
+ memcpy(payload_ptr, pkt_header,
+ sizeof(struct diag_pkt_header_t));
+ write_len = sizeof(struct diag_pkt_header_t);
+ *(int *)(payload_ptr + write_len) = wrap_enabled;
+ write_len += sizeof(int);
+ } else if (ss_cmd_code == DIAG_DEL_RSP_WRAP_CNT) {
+ wrap_enabled = true;
+ memcpy(payload_ptr, pkt_header,
+ sizeof(struct diag_pkt_header_t));
+ write_len = sizeof(struct diag_pkt_header_t);
+ *(uint16_t *)(payload_ptr + write_len) = wrap_count;
+ write_len += sizeof(uint16_t);
+ }
+ }
+
+fill_buffer:
+ if (write_len > 0) {
+ /* Check if we are within the range of the buffer*/
+ if (write_len + header_len > PKT_SIZE) {
+ pr_err("diag: In %s, invalid length %d\n", __func__,
+ write_len + header_len);
+ return -ENOMEM;
+ }
+ dci_header.start = CONTROL_CHAR;
+ dci_header.version = 1;
+ /*
+ * Length of the rsp pkt = actual data len + pkt rsp code
+ * (uint8_t) + tag (int)
+ */
+ dci_header.len = write_len + sizeof(uint8_t) + sizeof(int);
+ dci_header.pkt_code = DCI_PKT_RSP_CODE;
+ dci_header.tag = tag;
+ driver->in_busy_dcipktdata = 1;
+ memcpy(dest_buf, &dci_header, header_len);
+ diag_process_apps_dci_read_data(DCI_PKT_TYPE, dest_buf + 4,
+ dci_header.len);
+ driver->in_busy_dcipktdata = 0;
+
+ if (goto_download) {
+ /*
+ * Sleep for sometime so that the response reaches the
+ * client. The value 5000 empirically as an optimum
+ * time for the response to reach the client.
+ */
+ usleep_range(5000, 5100);
+ /* call download API */
+ msm_set_restart_mode(RESTART_DLOAD);
+ pr_alert("diag: download mode set, Rebooting SoC..\n");
+ kernel_restart(NULL);
+ }
+ return DIAG_DCI_NO_ERROR;
+ }
+
+ return DIAG_DCI_TABLE_ERR;
+}
+
+static int diag_process_dci_pkt_rsp(unsigned char *buf, int len)
+{
+ int req_uid, ret = DIAG_DCI_TABLE_ERR, i;
+ struct diag_pkt_header_t *header = NULL;
+ unsigned char *temp = buf;
+ unsigned char *req_buf = NULL;
+ uint8_t retry_count = 0, max_retries = 3, found = 0;
+ uint32_t read_len = 0;
+ struct diag_master_table entry;
+ struct dci_pkt_req_entry_t *req_entry = NULL;
+
+ if (!buf)
+ return -EIO;
+
+ if (len < DCI_PKT_REQ_MIN_LEN || len > USER_SPACE_DATA) {
+ pr_err("diag: dci: Invalid length %d len in %s", len, __func__);
+ return -EIO;
+ }
+
+ req_uid = *(int *)temp; /* UID of the request */
+ temp += sizeof(int);
+ req_buf = temp; /* Start of the Request */
+ header = (struct diag_pkt_header_t *)temp;
+ temp += sizeof(struct diag_pkt_header_t);
+ read_len = sizeof(int) + sizeof(struct diag_pkt_header_t);
+ if (read_len >= USER_SPACE_DATA) {
+ pr_err("diag: dci: Invalid length in %s\n", __func__);
+ return -EIO;
+ }
+
+ /* Check if the command is allowed on DCI */
+ if (diag_dci_filter_commands(header)) {
+ pr_debug("diag: command not supported %d %d %d",
+ header->cmd_code, header->subsys_id,
+ header->subsys_cmd_code);
+ return DIAG_DCI_SEND_DATA_FAIL;
+ }
+
+ /*
+ * Previous packet is yet to be consumed by the client. Wait
+ * till the buffer is free.
+ */
+ while (retry_count < max_retries) {
+ retry_count++;
+ if (driver->in_busy_dcipktdata)
+ usleep_range(10000, 10100);
+ else
+ break;
+ }
+ /* The buffer is still busy */
+ if (driver->in_busy_dcipktdata) {
+ pr_err("diag: In %s, apps dci buffer is still busy. Dropping packet\n",
+ __func__);
+ return -EAGAIN;
+ }
+
+ /* Register this new DCI packet */
+ req_entry = diag_register_dci_transaction(req_uid);
+ if (!req_entry) {
+ pr_alert("diag: registering new DCI transaction failed\n");
+ return DIAG_DCI_NO_REG;
+ }
+
+ /* Check if it is a dedicated Apps command */
+ ret = diag_dci_process_apps_pkt(header, req_buf, req_entry->tag);
+ if (ret == DIAG_DCI_NO_ERROR || ret < 0)
+ return ret;
+
+ /* Check the registration table for command entries */
+ for (i = 0; i < diag_max_reg && !found; i++) {
+ entry = driver->table[i];
+ if (entry.process_id == NO_PROCESS)
+ continue;
+ if (entry.cmd_code == header->cmd_code &&
+ entry.subsys_id == header->subsys_id &&
+ entry.cmd_code_lo <= header->subsys_cmd_code &&
+ entry.cmd_code_hi >= header->subsys_cmd_code) {
+ ret = diag_send_dci_pkt(entry, buf, len,
+ req_entry->tag);
+ found = 1;
+ } else if (entry.cmd_code == 255 && header->cmd_code == 75) {
+ if (entry.subsys_id == header->subsys_id &&
+ entry.cmd_code_lo <= header->subsys_cmd_code &&
+ entry.cmd_code_hi >= header->subsys_cmd_code) {
+ ret = diag_send_dci_pkt(entry, buf, len,
+ req_entry->tag);
+ found = 1;
+ }
+ } else if (entry.cmd_code == 255 && entry.subsys_id == 255) {
+ if (entry.cmd_code_lo <= header->cmd_code &&
+ entry.cmd_code_hi >= header->cmd_code) {
+ /*
+ * If its a Mode reset command, make sure it is
+ * registered on the Apps Processor
+ */
+ if (entry.cmd_code_lo == MODE_CMD &&
+ entry.cmd_code_hi == MODE_CMD)
+ if (entry.client_id != APPS_DATA)
+ continue;
+ ret = diag_send_dci_pkt(entry, buf, len,
+ req_entry->tag);
+ found = 1;
+ }
+ }
+ }
+
+ return ret;
+}
+
int diag_process_dci_transaction(unsigned char *buf, int len)
{
unsigned char *temp = buf;
- uint16_t subsys_cmd_code, log_code, item_num;
- int subsys_id, cmd_code, ret = -1, found = 0;
- struct diag_master_table entry;
- int count, set_mask, num_codes, bit_index, event_id, offset = 0, i;
+ uint16_t log_code, item_num;
+ int ret = -1, found = 0;
+ int count, set_mask, num_codes, bit_index, event_id, offset = 0;
unsigned int byte_index, read_len = 0;
uint8_t equip_id, *log_mask_ptr, *head_log_mask_ptr, byte_mask;
uint8_t *event_mask_ptr;
- struct dci_pkt_req_entry_t *req_entry = NULL;
-
- if (!driver->smd_dci[MODEM_DATA].ch) {
- pr_err("diag: DCI smd channel for peripheral %d not valid for dci updates\n",
- driver->smd_dci[MODEM_DATA].peripheral);
- return DIAG_DCI_SEND_DATA_FAIL;
- }
+ struct diag_dci_client_tbl *dci_entry = NULL;
if (!temp) {
pr_err("diag: Invalid buffer in %s\n", __func__);
@@ -615,68 +1316,7 @@
/* This is Pkt request/response transaction */
if (*(int *)temp > 0) {
- if (len < DCI_PKT_REQ_MIN_LEN || len > USER_SPACE_DATA) {
- pr_err("diag: dci: Invalid length %d len in %s", len,
- __func__);
- return -EIO;
- }
- /* enter this UID into kernel table and return index */
- req_entry = diag_register_dci_transaction(*(int *)temp);
- if (!req_entry) {
- pr_alert("diag: registering new DCI transaction failed\n");
- return DIAG_DCI_NO_REG;
- }
- temp += sizeof(int);
- /*
- * Check for registered peripheral and fwd pkt to
- * appropriate proc
- */
- cmd_code = (int)(*(char *)temp);
- temp++;
- subsys_id = (int)(*(char *)temp);
- temp++;
- subsys_cmd_code = *(uint16_t *)temp;
- temp += sizeof(uint16_t);
- read_len += sizeof(int) + 2 + sizeof(uint16_t);
- if (read_len >= USER_SPACE_DATA) {
- pr_err("diag: dci: Invalid length in %s\n", __func__);
- return -EIO;
- }
- pr_debug("diag: %d %d %d", cmd_code, subsys_id,
- subsys_cmd_code);
- for (i = 0; i < diag_max_reg; i++) {
- entry = driver->table[i];
- if (entry.process_id != NO_PROCESS) {
- if (entry.cmd_code == cmd_code &&
- entry.subsys_id == subsys_id &&
- entry.cmd_code_lo <= subsys_cmd_code &&
- entry.cmd_code_hi >= subsys_cmd_code) {
- ret = diag_send_dci_pkt(entry, buf,
- len,
- req_entry->tag);
- } else if (entry.cmd_code == 255
- && cmd_code == 75) {
- if (entry.subsys_id == subsys_id &&
- entry.cmd_code_lo <=
- subsys_cmd_code &&
- entry.cmd_code_hi >=
- subsys_cmd_code) {
- ret = diag_send_dci_pkt(entry,
- buf, len,
- req_entry->tag);
- }
- } else if (entry.cmd_code == 255 &&
- entry.subsys_id == 255) {
- if (entry.cmd_code_lo <= cmd_code &&
- entry.cmd_code_hi >=
- cmd_code) {
- ret = diag_send_dci_pkt(entry,
- buf, len,
- req_entry->tag);
- }
- }
- }
- }
+ return diag_process_dci_pkt_rsp(buf, len);
} else if (*(int *)temp == DCI_LOG_TYPE) {
/* Minimum length of a log mask config is 12 + 2 bytes for
atleast one log code to be set or reset */
@@ -684,12 +1324,13 @@
pr_err("diag: dci: Invalid length in %s\n", __func__);
return -EIO;
}
- /* find client id and table */
- i = diag_dci_find_client_index(current->tgid);
- if (i == DCI_CLIENT_INDEX_INVALID) {
- pr_err("diag: dci client not registered/found\n");
+ /* find client table entry */
+ dci_entry = diag_dci_get_client_entry();
+ if (!dci_entry) {
+ pr_err("diag: In %s, invalid client\n", __func__);
return ret;
}
+
/* Extract each log code and put in client table */
temp += sizeof(int);
read_len += sizeof(int);
@@ -706,7 +1347,7 @@
return -EIO;
}
- head_log_mask_ptr = driver->dci_client_tbl[i].dci_log_mask;
+ head_log_mask_ptr = dci_entry->dci_log_mask;
if (!head_log_mask_ptr) {
pr_err("diag: dci: Invalid Log mask pointer in %s\n",
__func__);
@@ -768,8 +1409,10 @@
count++;
ret = DIAG_DCI_NO_ERROR;
}
+ /* send updated mask to userspace clients */
+ diag_update_userspace_clients(DCI_LOG_MASKS_TYPE);
/* send updated mask to peripherals */
- ret = diag_send_dci_log_mask(&driver->smd_cntl[MODEM_DATA]);
+ ret = diag_send_dci_log_mask();
} else if (*(int *)temp == DCI_EVENT_TYPE) {
/* Minimum length of a event mask config is 12 + 4 bytes for
atleast one event id to be set or reset. */
@@ -777,10 +1420,10 @@
pr_err("diag: dci: Invalid length in %s\n", __func__);
return -EIO;
}
- /* find client id and table */
- i = diag_dci_find_client_index(current->tgid);
- if (i == DCI_CLIENT_INDEX_INVALID) {
- pr_err("diag: dci client not registered/found\n");
+ /* find client table entry */
+ dci_entry = diag_dci_get_client_entry();
+ if (!dci_entry) {
+ pr_err("diag: In %s, invalid client\n", __func__);
return ret;
}
/* Extract each log code and put in client table */
@@ -802,7 +1445,7 @@
return -EIO;
}
- event_mask_ptr = driver->dci_client_tbl[i].dci_event_mask;
+ event_mask_ptr = dci_entry->dci_event_mask;
if (!event_mask_ptr) {
pr_err("diag: dci: Invalid event mask pointer in %s\n",
__func__);
@@ -839,58 +1482,35 @@
count++;
ret = DIAG_DCI_NO_ERROR;
}
+ /* send updated mask to userspace clients */
+ diag_update_userspace_clients(DCI_EVENT_MASKS_TYPE);
/* send updated mask to peripherals */
- ret = diag_send_dci_event_mask(&driver->smd_cntl[MODEM_DATA]);
+ ret = diag_send_dci_event_mask();
} else {
pr_alert("diag: Incorrect DCI transaction\n");
}
return ret;
}
-int diag_dci_find_client_index_health(int client_id)
+
+struct diag_dci_client_tbl *diag_dci_get_client_entry()
{
- int i, ret = DCI_CLIENT_INDEX_INVALID;
-
- for (i = 0; i < MAX_DCI_CLIENTS; i++) {
- if (driver->dci_client_tbl[i].client != NULL) {
- if (driver->dci_client_tbl[i].client_id ==
- client_id) {
- ret = i;
- break;
- }
- }
- }
- return ret;
-}
-
-int diag_dci_find_client_index(int client_id)
-{
- int i, ret = DCI_CLIENT_INDEX_INVALID;
-
- for (i = 0; i < MAX_DCI_CLIENTS; i++) {
- if (driver->dci_client_tbl[i].client != NULL) {
- if (driver->dci_client_tbl[i].client->tgid ==
- client_id) {
- ret = i;
- break;
- }
- }
- }
- return ret;
+ return __diag_dci_get_client_entry(current->tgid);
}
void update_dci_cumulative_event_mask(int offset, uint8_t byte_mask)
{
- int i;
uint8_t *event_mask_ptr;
uint8_t *update_ptr = dci_cumulative_event_mask;
+ struct list_head *start, *temp;
+ struct diag_dci_client_tbl *entry = NULL;
bool is_set = false;
mutex_lock(&dci_event_mask_mutex);
update_ptr += offset;
- for (i = 0; i < MAX_DCI_CLIENTS; i++) {
- event_mask_ptr =
- driver->dci_client_tbl[i].dci_event_mask;
+ list_for_each_safe(start, temp, &driver->dci_client_list) {
+ entry = list_entry(start, struct diag_dci_client_tbl, track);
+ event_mask_ptr = entry->dci_event_mask;
event_mask_ptr += offset;
if ((*event_mask_ptr & byte_mask) == byte_mask) {
is_set = true;
@@ -905,75 +1525,29 @@
mutex_unlock(&dci_event_mask_mutex);
}
-void clear_client_dci_cumulative_event_mask(int client_index)
+void diag_dci_invalidate_cumulative_event_mask()
{
- int i, j;
- uint8_t *update_ptr = dci_cumulative_event_mask;
- uint8_t *event_mask_ptr, *client_event_mask_ptr, byte_mask = 0;
- bool is_set = false;
-
- event_mask_ptr =
- (driver->dci_client_tbl[client_index].dci_event_mask);
+ int i = 0;
+ struct list_head *start, *temp;
+ struct diag_dci_client_tbl *entry = NULL;
+ uint8_t *update_ptr, *event_mask_ptr;
+ update_ptr = dci_cumulative_event_mask;
mutex_lock(&dci_event_mask_mutex);
- for (i = 0; i < DCI_EVENT_MASK_SIZE; i++) {
- is_set = false;
- /* Already cleared event masks need not to be considered */
- if (*event_mask_ptr != 0) {
- byte_mask = *event_mask_ptr;
- } else {
- update_ptr++;
- event_mask_ptr++;
- continue;
- }
- for (j = 0; j < MAX_DCI_CLIENTS; j++) {
- /* continue searching for valid client */
- if (driver->dci_client_tbl[j].client == NULL ||
- client_index == j)
- continue;
- client_event_mask_ptr =
- (driver->dci_client_tbl[j].dci_event_mask);
- client_event_mask_ptr += i;
- if (*client_event_mask_ptr & byte_mask) {
- /*
- * Break if another client has same
- * event mask set
- */
- if ((*client_event_mask_ptr &
- byte_mask) == byte_mask) {
- is_set = true;
- break;
- } else {
- byte_mask =
- (~(*client_event_mask_ptr) &
- byte_mask);
- is_set = false;
- }
- }
- }
- /*
- * Clear only if this client has event mask set else
- * don't update cumulative event mask ptr
- */
- if (is_set == false)
- *update_ptr &= ~byte_mask;
-
- update_ptr++;
- event_mask_ptr++;
+ list_for_each_safe(start, temp, &driver->dci_client_list) {
+ entry = list_entry(start, struct diag_dci_client_tbl, track);
+ event_mask_ptr = entry->dci_event_mask;
+ for (i = 0; i < DCI_EVENT_MASK_SIZE; i++)
+ *(update_ptr+i) |= *(event_mask_ptr+i);
}
- event_mask_ptr =
- (driver->dci_client_tbl[client_index].dci_event_mask);
- memset(event_mask_ptr, 0, DCI_EVENT_MASK_SIZE);
mutex_unlock(&dci_event_mask_mutex);
}
-
-int diag_send_dci_event_mask(struct diag_smd_info *smd_info)
+int diag_send_dci_event_mask()
{
void *buf = driver->buf_event_mask_update;
int header_size = sizeof(struct diag_ctrl_event_mask);
- int wr_size = -ENOMEM, retry_count = 0, timer;
- int ret = DIAG_DCI_NO_ERROR, i;
+ int ret = DIAG_DCI_NO_ERROR, err = DIAG_DCI_NO_ERROR, i;
mutex_lock(&driver->diag_cntl_mutex);
/* send event mask update */
@@ -991,28 +1565,18 @@
}
memcpy(buf, driver->event_mask, header_size);
memcpy(buf+header_size, dci_cumulative_event_mask, DCI_EVENT_MASK_SIZE);
- if (smd_info && smd_info->ch) {
- while (retry_count < 3) {
- mutex_lock(&smd_info->smd_ch_mutex);
- wr_size = smd_write(smd_info->ch, buf,
- header_size + DCI_EVENT_MASK_SIZE);
- mutex_unlock(&smd_info->smd_ch_mutex);
- if (wr_size == -ENOMEM) {
- retry_count++;
- for (timer = 0; timer < 5; timer++)
- udelay(2000);
- } else {
- break;
- }
- }
- if (wr_size != header_size + DCI_EVENT_MASK_SIZE) {
- pr_err("diag: error writing dci event mask %d, tried %d\n",
- wr_size, header_size + DCI_EVENT_MASK_SIZE);
+ for (i = 0; i < NUM_SMD_DCI_CHANNELS; i++) {
+ /*
+ * Don't send to peripheral if its regular channel
+ * is down. It may also mean that the peripheral doesn't
+ * support DCI.
+ */
+ if (!driver->smd_dci[i].ch)
+ continue;
+ err = diag_dci_write_proc(i, DIAG_CNTL_TYPE, buf,
+ header_size + DCI_EVENT_MASK_SIZE);
+ if (err != DIAG_DCI_NO_ERROR)
ret = DIAG_DCI_SEND_DATA_FAIL;
- }
- } else {
- pr_err("diag: ch not valid for dci event mask update\n");
- ret = DIAG_DCI_SEND_DATA_FAIL;
}
mutex_unlock(&driver->diag_cntl_mutex);
@@ -1026,6 +1590,8 @@
uint8_t *update_ptr = dci_cumulative_log_mask;
uint8_t *log_mask_ptr;
bool is_set = false;
+ struct list_head *start, *temp;
+ struct diag_dci_client_tbl *entry = NULL;
mutex_lock(&dci_log_mask_mutex);
*update_ptr = 0;
@@ -1037,9 +1603,9 @@
/* update the dirty bit */
*(update_ptr+1) = 1;
update_ptr = update_ptr + byte_index;
- for (i = 0; i < MAX_DCI_CLIENTS; i++) {
- log_mask_ptr =
- (driver->dci_client_tbl[i].dci_log_mask);
+ list_for_each_safe(start, temp, &driver->dci_client_list) {
+ entry = list_entry(start, struct diag_dci_client_tbl, track);
+ log_mask_ptr = entry->dci_log_mask;
log_mask_ptr = log_mask_ptr + offset + byte_index;
if ((*log_mask_ptr & byte_mask) == byte_mask) {
is_set = true;
@@ -1055,103 +1621,35 @@
mutex_unlock(&dci_log_mask_mutex);
}
-void clear_client_dci_cumulative_log_mask(int client_index)
+void diag_dci_invalidate_cumulative_log_mask()
{
- int i, j, k;
- uint8_t *update_ptr = dci_cumulative_log_mask;
- uint8_t *log_mask_ptr, *client_log_mask_ptr, byte_mask = 0;
- bool is_set = false;
-
- log_mask_ptr = driver->dci_client_tbl[client_index].dci_log_mask;
+ int i = 0;
+ struct list_head *start, *temp;
+ struct diag_dci_client_tbl *entry = NULL;
+ uint8_t *update_ptr, *log_mask_ptr;
+ update_ptr = dci_cumulative_log_mask;
mutex_lock(&dci_log_mask_mutex);
- *update_ptr = 0;
- /* set the equipment IDs */
- for (i = 0; i < 16; i++)
- *(update_ptr + (i*514)) = i;
-
- /* update cumulative log mask ptr*/
- update_ptr += 2;
- log_mask_ptr += 2;
- for (i = 0; i < 16; i++) {
- for (j = 0; j < 512; j++) {
- is_set = false;
- /*
- * Already cleared log masks need
- * not to be considered
- */
- if (*log_mask_ptr != 0) {
- byte_mask = *log_mask_ptr;
- } else {
- update_ptr++;
- log_mask_ptr++;
- continue;
- }
- for (k = 0; k < MAX_DCI_CLIENTS; k++) {
- /* continue searching for valid client */
- if (driver->dci_client_tbl[k].client == NULL ||
- client_index == k)
- continue;
- client_log_mask_ptr =
- (driver->dci_client_tbl[k].dci_log_mask);
- client_log_mask_ptr += (i*514) + 2 + j;
- if (*client_log_mask_ptr & byte_mask) {
- /*
- * Break if another client has same
- * log mask set
- */
- if ((*client_log_mask_ptr &
- byte_mask) == byte_mask) {
- is_set = true;
- break;
- } else {
- byte_mask =
- (~(*client_log_mask_ptr) &
- byte_mask);
- is_set = false;
- }
- }
- }
- /*
- * Clear only if this client has log mask set else
- * don't update cumulative log mask ptr
- */
- if (is_set == false) {
- /*
- * Update the dirty bit for the equipment
- * whose mask is changing
- */
- dci_cumulative_log_mask[1+(i*514)] = 1;
- *update_ptr &= ~byte_mask;
- }
-
- update_ptr++;
- log_mask_ptr++;
- }
- update_ptr += 2;
- log_mask_ptr += 2;
+ list_for_each_safe(start, temp, &driver->dci_client_list) {
+ entry = list_entry(start, struct diag_dci_client_tbl, track);
+ log_mask_ptr = entry->dci_log_mask;
+ for (i = 0; i < DCI_LOG_MASK_SIZE; i++)
+ *(update_ptr+i) |= *(log_mask_ptr+i);
}
- log_mask_ptr = driver->dci_client_tbl[client_index].dci_log_mask;
- memset(log_mask_ptr, 0, DCI_LOG_MASK_SIZE);
mutex_unlock(&dci_log_mask_mutex);
}
-int diag_send_dci_log_mask(struct diag_smd_info *smd_info)
+int diag_send_dci_log_mask()
{
void *buf = driver->buf_log_mask_update;
int header_size = sizeof(struct diag_ctrl_log_mask);
uint8_t *log_mask_ptr = dci_cumulative_log_mask;
- int i, wr_size = -ENOMEM, retry_count = 0, timer;
- int ret = DIAG_DCI_NO_ERROR;
-
- if (!smd_info || !smd_info->ch) {
- pr_err("diag: ch not valid for dci log mask update\n");
- return DIAG_DCI_SEND_DATA_FAIL;
- }
+ int i, j, ret = DIAG_DCI_NO_ERROR, err = DIAG_DCI_NO_ERROR;
+ int updated;
mutex_lock(&driver->diag_cntl_mutex);
for (i = 0; i < 16; i++) {
- retry_count = 0;
+ updated = 1;
driver->log_mask->cmd_type = DIAG_CTRL_MSG_LOG_MASK;
driver->log_mask->num_items = 512;
driver->log_mask->data_len = 11 + 512;
@@ -1162,31 +1660,26 @@
memcpy(buf, driver->log_mask, header_size);
memcpy(buf+header_size, log_mask_ptr+2, 512);
/* if dirty byte is set and channel is valid */
- if (smd_info->ch && *(log_mask_ptr+1)) {
- while (retry_count < 3) {
- mutex_lock(&smd_info->smd_ch_mutex);
- wr_size = smd_write(smd_info->ch, buf,
- header_size + 512);
- mutex_unlock(&smd_info->smd_ch_mutex);
- if (wr_size == -ENOMEM) {
- retry_count++;
- for (timer = 0; timer < 5; timer++)
- udelay(2000);
- } else
- break;
- }
- if (wr_size != header_size + 512) {
- pr_err("diag: dci log mask update failed %d, tried %d for equip_id %d\n",
- wr_size, header_size + 512,
- driver->log_mask->equip_id);
- ret = DIAG_DCI_SEND_DATA_FAIL;
+ for (j = 0; j < NUM_SMD_DCI_CHANNELS; j++) {
+ /*
+ * Don't send to peripheral if its regular channel
+ * is down. It may also mean that the peripheral
+ * doesn't support DCI.
+ */
+ if (!driver->smd_dci[j].ch)
+ continue;
- } else {
- *(log_mask_ptr+1) = 0; /* clear dirty byte */
- pr_debug("diag: updated dci log equip ID %d\n",
- *log_mask_ptr);
+ if (!(*(log_mask_ptr+1)))
+ continue;
+ err = diag_dci_write_proc(j, DIAG_CNTL_TYPE, buf,
+ header_size + DCI_MAX_ITEMS_PER_LOG_CODE);
+ if (err != DIAG_DCI_NO_ERROR) {
+ updated = 0;
+ ret = DIAG_DCI_SEND_DATA_FAIL;
}
}
+ if (updated)
+ *(log_mask_ptr+1) = 0; /* clear dirty byte */
log_mask_ptr += 514;
}
mutex_unlock(&driver->diag_cntl_mutex);
@@ -1198,6 +1691,9 @@
{
uint8_t i; int count = 0;
+ if (!tbl_buf)
+ return;
+
/* create hard coded table for log mask with 16 categories */
for (i = 0; i < 16; i++) {
*(uint8_t *)tbl_buf = i;
@@ -1220,17 +1716,15 @@
if (pdev->id == SMD_APPS_MODEM) {
index = MODEM_DATA;
- err = smd_open("DIAG_2",
+ err = smd_named_open_on_edge("DIAG_2",
+ SMD_APPS_MODEM,
&driver->smd_dci[index].ch,
&driver->smd_dci[index],
diag_smd_notify);
driver->smd_dci[index].ch_save =
driver->smd_dci[index].ch;
- driver->dci_device = &pdev->dev;
- driver->dci_device->power.wakeup = wakeup_source_register
- ("DIAG_DCI_WS");
if (err)
- pr_err("diag: In %s, cannot open DCI port, Id = %d, err: %d\n",
+ pr_err("diag: In %s, cannot open DCI Modem port, Id = %d, err: %d\n",
__func__, pdev->id, err);
}
@@ -1251,11 +1745,8 @@
diag_smd_notify);
driver->smd_dci_cmd[index].ch_save =
driver->smd_dci_cmd[index].ch;
- driver->dci_cmd_device = &pdev->dev;
- driver->dci_cmd_device->power.wakeup = wakeup_source_register
- ("DIAG_DCI_CMD_WS");
if (err)
- pr_err("diag: In %s, cannot open DCI port, Id = %d, err: %d\n",
+ pr_err("diag: In %s, cannot open DCI Modem CMD port, Id = %d, err: %d\n",
__func__, pdev->id, err);
}
@@ -1305,8 +1796,6 @@
driver->dci_tag = 0;
driver->dci_client_id = 0;
driver->num_dci_client = 0;
- driver->dci_device = NULL;
- driver->dci_cmd_device = NULL;
mutex_init(&driver->dci_mutex);
mutex_init(&dci_log_mask_mutex);
mutex_init(&dci_event_mask_mutex);
@@ -1328,19 +1817,17 @@
goto err;
}
}
+
if (driver->apps_dci_buf == NULL) {
driver->apps_dci_buf = kzalloc(APPS_BUF_SIZE, GFP_KERNEL);
if (driver->apps_dci_buf == NULL)
goto err;
}
- if (driver->dci_client_tbl == NULL) {
- driver->dci_client_tbl = kzalloc(MAX_DCI_CLIENTS *
- sizeof(struct diag_dci_client_tbl), GFP_KERNEL);
- if (driver->dci_client_tbl == NULL)
- goto err;
- }
- driver->diag_dci_wq = create_singlethread_workqueue("diag_dci_wq");
+ INIT_LIST_HEAD(&driver->dci_client_list);
INIT_LIST_HEAD(&driver->dci_req_list);
+
+ driver->diag_dci_wq = create_singlethread_workqueue("diag_dci_wq");
+ INIT_WORK(&dci_data_drain_work, dci_data_drain_work_fn);
success = platform_driver_register(&msm_diag_dci_driver);
if (success) {
pr_err("diag: Could not register DCI driver\n");
@@ -1353,10 +1840,10 @@
goto err;
}
}
+ setup_timer(&dci_drain_timer, dci_drain_data, 0);
return DIAG_DCI_NO_ERROR;
err:
pr_err("diag: Could not initialize diag DCI buffers");
- kfree(driver->dci_client_tbl);
kfree(driver->apps_dci_buf);
for (i = 0; i < NUM_SMD_DCI_CHANNELS; i++)
diag_smd_destructor(&driver->smd_dci[i]);
@@ -1383,21 +1870,12 @@
platform_driver_unregister(&msm_diag_dci_driver);
- if (driver->dci_client_tbl) {
- for (i = 0; i < MAX_DCI_CLIENTS; i++) {
- kfree(driver->dci_client_tbl[i].dci_data);
- mutex_destroy(&driver->dci_client_tbl[i].data_mutex);
- }
- }
-
if (driver->supports_separate_cmdrsp) {
for (i = 0; i < NUM_SMD_DCI_CMD_CHANNELS; i++)
diag_smd_destructor(&driver->smd_dci_cmd[i]);
platform_driver_unregister(&msm_diag_dci_cmd_driver);
}
-
- kfree(driver->dci_client_tbl);
kfree(driver->apps_dci_buf);
mutex_destroy(&driver->dci_mutex);
mutex_destroy(&dci_log_mask_mutex);
@@ -1408,153 +1886,483 @@
int diag_dci_clear_log_mask()
{
- int i, j, k, err = DIAG_DCI_NO_ERROR;
+ int j, k, err = DIAG_DCI_NO_ERROR;
uint8_t *log_mask_ptr, *update_ptr;
+ struct list_head *start, *temp;
+ struct diag_dci_client_tbl *entry = NULL;
- i = diag_dci_find_client_index(current->tgid);
- if (i == DCI_CLIENT_INDEX_INVALID)
+ entry = diag_dci_get_client_entry();
+ if (!entry) {
+ pr_err("diag: In %s, invalid client entry\n", __func__);
return DIAG_DCI_TABLE_ERR;
+ }
mutex_lock(&dci_log_mask_mutex);
- create_dci_log_mask_tbl(
- driver->dci_client_tbl[i].dci_log_mask);
- memset(dci_cumulative_log_mask,
- 0x0, DCI_LOG_MASK_SIZE);
- for (i = 0; i < MAX_DCI_CLIENTS; i++) {
+ create_dci_log_mask_tbl(entry->dci_log_mask);
+ memset(dci_cumulative_log_mask, 0x0, DCI_LOG_MASK_SIZE);
+ list_for_each_safe(start, temp, &driver->dci_client_list) {
+ entry = list_entry(start, struct diag_dci_client_tbl, track);
update_ptr = dci_cumulative_log_mask;
- if (driver->dci_client_tbl[i].client) {
- log_mask_ptr =
- driver->dci_client_tbl[i].dci_log_mask;
- for (j = 0; j < 16; j++) {
- *update_ptr = j;
- *(update_ptr + 1) = 1;
- update_ptr += 2;
- log_mask_ptr += 2;
- for (k = 0; k < 513; k++) {
- *update_ptr |= *log_mask_ptr;
- update_ptr++;
- log_mask_ptr++;
- }
+ log_mask_ptr = entry->dci_log_mask;
+ for (j = 0; j < 16; j++) {
+ *update_ptr = j;
+ *(update_ptr + 1) = 1;
+ update_ptr += 2;
+ log_mask_ptr += 2;
+ for (k = 0; k < 513; k++) {
+ *update_ptr |= *log_mask_ptr;
+ update_ptr++;
+ log_mask_ptr++;
}
}
}
mutex_unlock(&dci_log_mask_mutex);
- err = diag_send_dci_log_mask(&driver->smd_cntl[MODEM_DATA]);
+ /* send updated mask to userspace clients */
+ diag_update_userspace_clients(DCI_LOG_MASKS_TYPE);
+ /* Send updated mask to peripherals */
+ err = diag_send_dci_log_mask();
return err;
}
int diag_dci_clear_event_mask()
{
- int i, j, err = DIAG_DCI_NO_ERROR;
+ int j, err = DIAG_DCI_NO_ERROR;
uint8_t *event_mask_ptr, *update_ptr;
+ struct list_head *start, *temp;
+ struct diag_dci_client_tbl *entry = NULL;
- i = diag_dci_find_client_index(current->tgid);
- if (i == DCI_CLIENT_INDEX_INVALID)
+ entry = diag_dci_get_client_entry();
+ if (!entry) {
+ pr_err("diag: In %s, invalid client entry\n", __func__);
return DIAG_DCI_TABLE_ERR;
+ }
mutex_lock(&dci_event_mask_mutex);
- memset(driver->dci_client_tbl[i].dci_event_mask,
- 0x0, DCI_EVENT_MASK_SIZE);
- memset(dci_cumulative_event_mask,
- 0x0, DCI_EVENT_MASK_SIZE);
+ memset(entry->dci_event_mask, 0x0, DCI_EVENT_MASK_SIZE);
+ memset(dci_cumulative_event_mask, 0x0, DCI_EVENT_MASK_SIZE);
update_ptr = dci_cumulative_event_mask;
- for (i = 0; i < MAX_DCI_CLIENTS; i++) {
- event_mask_ptr =
- driver->dci_client_tbl[i].dci_event_mask;
+
+ list_for_each_safe(start, temp, &driver->dci_client_list) {
+ entry = list_entry(start, struct diag_dci_client_tbl, track);
+ event_mask_ptr = entry->dci_event_mask;
for (j = 0; j < DCI_EVENT_MASK_SIZE; j++)
*(update_ptr + j) |= *(event_mask_ptr + j);
}
mutex_unlock(&dci_event_mask_mutex);
- err = diag_send_dci_event_mask(&driver->smd_cntl[MODEM_DATA]);
+ /* send updated mask to userspace clients */
+ diag_update_userspace_clients(DCI_EVENT_MASKS_TYPE);
+ /* Send updated mask to peripherals */
+ err = diag_send_dci_event_mask();
return err;
}
int diag_dci_query_log_mask(uint16_t log_code)
{
- uint16_t item_num;
- uint8_t equip_id, *log_mask_ptr, byte_mask;
- int i, byte_index, offset;
-
- equip_id = LOG_GET_EQUIP_ID(log_code);
- item_num = LOG_GET_ITEM_NUM(log_code);
- byte_index = item_num/8 + 2;
- byte_mask = 0x01 << (item_num % 8);
- offset = equip_id * 514;
-
- i = diag_dci_find_client_index(current->tgid);
- if (i != DCI_CLIENT_INDEX_INVALID) {
- log_mask_ptr = driver->dci_client_tbl[i].dci_log_mask;
- log_mask_ptr = log_mask_ptr + offset + byte_index;
- return ((*log_mask_ptr & byte_mask) == byte_mask) ?
- 1 : 0;
- }
- return 0;
+ return __diag_dci_query_log_mask(diag_dci_get_client_entry(),
+ log_code);
}
-
int diag_dci_query_event_mask(uint16_t event_id)
{
- uint8_t *event_mask_ptr, byte_mask;
- int i, byte_index, bit_index;
- byte_index = event_id/8;
- bit_index = event_id % 8;
- byte_mask = 0x1 << bit_index;
-
- i = diag_dci_find_client_index(current->tgid);
- if (i != DCI_CLIENT_INDEX_INVALID) {
- event_mask_ptr =
- driver->dci_client_tbl[i].dci_event_mask;
- event_mask_ptr = event_mask_ptr + byte_index;
- if ((*event_mask_ptr & byte_mask) == byte_mask)
- return 1;
- else
- return 0;
- }
- return 0;
+ return __diag_dci_query_event_mask(diag_dci_get_client_entry(),
+ event_id);
}
uint8_t diag_dci_get_cumulative_real_time()
{
- uint8_t real_time = MODE_NONREALTIME, i;
- for (i = 0; i < MAX_DCI_CLIENTS; i++)
- if (driver->dci_client_tbl[i].client &&
- driver->dci_client_tbl[i].real_time ==
- MODE_REALTIME) {
+ uint8_t real_time = MODE_NONREALTIME;
+ struct list_head *start, *temp;
+ struct diag_dci_client_tbl *entry = NULL;
+
+ list_for_each_safe(start, temp, &driver->dci_client_list) {
+ entry = list_entry(start, struct diag_dci_client_tbl, track);
+ if (entry->real_time == MODE_REALTIME) {
real_time = 1;
break;
}
+ }
return real_time;
}
-int diag_dci_set_real_time(int client_id, uint8_t real_time)
+int diag_dci_set_real_time(uint8_t real_time)
{
- int i = DCI_CLIENT_INDEX_INVALID;
- i = diag_dci_find_client_index(client_id);
-
- if (i != DCI_CLIENT_INDEX_INVALID)
- driver->dci_client_tbl[i].real_time = real_time;
- return i;
-}
-
-void diag_dci_try_activate_wakeup_source(smd_channel_t *channel)
-{
- spin_lock_irqsave(&ws_lock, ws_lock_flags);
- if (channel == driver->smd_dci[MODEM_DATA].ch) {
- pm_wakeup_event(driver->dci_device, DCI_WAKEUP_TIMEOUT);
- pm_stay_awake(driver->dci_device);
- } else if (channel == driver->smd_dci_cmd[MODEM_DATA].ch) {
- pm_wakeup_event(driver->dci_cmd_device, DCI_WAKEUP_TIMEOUT);
- pm_stay_awake(driver->dci_cmd_device);
+ struct diag_dci_client_tbl *entry = NULL;
+ entry = diag_dci_get_client_entry();
+ if (!entry) {
+ pr_err("diag: In %s, invalid client entry\n", __func__);
+ return 0;
}
+ entry->real_time = real_time;
+ return 1;
+}
+
+void diag_dci_try_activate_wakeup_source()
+{
+ spin_lock_irqsave(&ws_lock, ws_lock_flags);
+ pm_wakeup_event(driver->diag_dev, DCI_WAKEUP_TIMEOUT);
+ pm_stay_awake(driver->diag_dev);
spin_unlock_irqrestore(&ws_lock, ws_lock_flags);
}
-void diag_dci_try_deactivate_wakeup_source(smd_channel_t *channel)
+void diag_dci_try_deactivate_wakeup_source()
{
spin_lock_irqsave(&ws_lock, ws_lock_flags);
- if (channel == driver->smd_dci[MODEM_DATA].ch)
- pm_relax(driver->dci_device);
- else if (channel == driver->smd_dci_cmd[MODEM_DATA].ch)
- pm_relax(driver->dci_cmd_device);
+ pm_relax(driver->diag_dev);
spin_unlock_irqrestore(&ws_lock, ws_lock_flags);
}
+
+int diag_dci_register_client(struct diag_dci_reg_tbl_t *reg_entry)
+{
+ int i, err = 0;
+ struct diag_dci_client_tbl *new_entry = NULL;
+ struct diag_dci_buf_peripheral_t *proc_buf = NULL;
+
+ if (!reg_entry)
+ return DIAG_DCI_NO_REG;
+
+ if (driver->dci_state == DIAG_DCI_NO_REG)
+ return DIAG_DCI_NO_REG;
+
+ if (driver->num_dci_client >= MAX_DCI_CLIENTS)
+ return DIAG_DCI_NO_REG;
+
+ new_entry = kzalloc(sizeof(struct diag_dci_client_tbl), GFP_KERNEL);
+ if (new_entry == NULL) {
+ pr_err("diag: unable to alloc memory\n");
+ return DIAG_DCI_NO_REG;
+ }
+
+ mutex_lock(&driver->dci_mutex);
+ if (!(driver->num_dci_client)) {
+ for (i = 0; i < NUM_SMD_DCI_CHANNELS; i++)
+ driver->smd_dci[i].in_busy_1 = 0;
+ if (driver->supports_separate_cmdrsp)
+ for (i = 0; i < NUM_SMD_DCI_CMD_CHANNELS; i++)
+ driver->smd_dci_cmd[i].in_busy_1 = 0;
+ }
+
+ new_entry->client = current;
+ new_entry->client_info.notification_list =
+ reg_entry->notification_list;
+ new_entry->client_info.signal_type =
+ reg_entry->signal_type;
+ new_entry->real_time = MODE_REALTIME;
+ new_entry->in_service = 0;
+ INIT_LIST_HEAD(&new_entry->list_write_buf);
+ mutex_init(&new_entry->write_buf_mutex);
+ new_entry->dci_log_mask = kzalloc(DCI_LOG_MASK_SIZE, GFP_KERNEL);
+ if (!new_entry->dci_log_mask) {
+ pr_err("diag: Unable to create log mask for client, %d",
+ driver->dci_client_id);
+ goto fail_alloc;
+ }
+ create_dci_log_mask_tbl(new_entry->dci_log_mask);
+
+ new_entry->dci_event_mask = kzalloc(DCI_EVENT_MASK_SIZE, GFP_KERNEL);
+ if (!new_entry->dci_event_mask) {
+ pr_err("diag: Unable to create event mask for client, %d",
+ driver->dci_client_id);
+ goto fail_alloc;
+ }
+ create_dci_event_mask_tbl(new_entry->dci_event_mask);
+
+ for (i = 0; i < NUM_DCI_PROC; i++) {
+ proc_buf = &new_entry->buffers[i];
+ if (!proc_buf)
+ goto fail_alloc;
+
+ mutex_init(&proc_buf->health_mutex);
+ mutex_init(&proc_buf->buf_mutex);
+ proc_buf->health.dropped_events = 0;
+ proc_buf->health.dropped_logs = 0;
+ proc_buf->health.received_events = 0;
+ proc_buf->health.received_logs = 0;
+ proc_buf->buf_primary = kzalloc(
+ sizeof(struct diag_dci_buffer_t),
+ GFP_KERNEL);
+ if (!proc_buf->buf_primary)
+ goto fail_alloc;
+ proc_buf->buf_cmd = kzalloc(sizeof(struct diag_dci_buffer_t),
+ GFP_KERNEL);
+ if (!proc_buf->buf_cmd)
+ goto fail_alloc;
+ err = diag_dci_init_buffer(proc_buf->buf_primary,
+ DCI_BUF_PRIMARY);
+ if (err)
+ goto fail_alloc;
+ err = diag_dci_init_buffer(proc_buf->buf_cmd, DCI_BUF_CMD);
+ if (err)
+ goto fail_alloc;
+ proc_buf->buf_curr = proc_buf->buf_primary;
+ }
+
+ list_add_tail(&new_entry->track, &driver->dci_client_list);
+ driver->dci_client_id++;
+ new_entry->client_info.client_id = driver->dci_client_id;
+ reg_entry->client_id = driver->dci_client_id;
+ driver->num_dci_client++;
+ if (driver->num_dci_client == 1)
+ diag_update_proc_vote(DIAG_PROC_DCI, VOTE_UP);
+ queue_work(driver->diag_real_time_wq, &driver->diag_real_time_work);
+ mutex_unlock(&driver->dci_mutex);
+
+ return driver->dci_client_id;
+
+fail_alloc:
+ if (new_entry) {
+ for (i = 0; i < NUM_DCI_PROC; i++) {
+ proc_buf = &new_entry->buffers[i];
+ mutex_destroy(&proc_buf->health_mutex);
+ mutex_destroy(&proc_buf->buf_primary->data_mutex);
+ mutex_destroy(&proc_buf->buf_cmd->data_mutex);
+ if (proc_buf->buf_primary)
+ kfree(proc_buf->buf_primary->data);
+ kfree(proc_buf->buf_primary);
+ if (proc_buf->buf_cmd)
+ kfree(proc_buf->buf_cmd->data);
+ kfree(proc_buf->buf_cmd);
+ }
+ kfree(new_entry->dci_event_mask);
+ kfree(new_entry->dci_log_mask);
+ }
+ kfree(new_entry);
+ mutex_unlock(&driver->dci_mutex);
+ return DIAG_DCI_NO_REG;
+}
+
+int diag_dci_deinit_client()
+{
+ int ret = DIAG_DCI_NO_ERROR, real_time = MODE_REALTIME, i, peripheral;
+ struct diag_dci_buf_peripheral_t *proc_buf = NULL;
+ struct diag_dci_client_tbl *entry = diag_dci_get_client_entry();
+ struct diag_dci_buffer_t *buf_entry, *temp;
+ struct list_head *start, *req_temp;
+ struct dci_pkt_req_entry_t *req_entry = NULL;
+ struct diag_smd_info *smd_info = NULL;
+
+ if (!entry)
+ return DIAG_DCI_NOT_SUPPORTED;
+
+ mutex_lock(&driver->dci_mutex);
+ /*
+ * Remove the entry from the list before freeing the buffers
+ * to ensure that we don't have any invalid access.
+ */
+ list_del(&entry->track);
+ driver->num_dci_client--;
+ /*
+ * Clear the client's log and event masks, update the cumulative
+ * masks and send the masks to peripherals
+ */
+ kfree(entry->dci_log_mask);
+ diag_update_userspace_clients(DCI_LOG_MASKS_TYPE);
+ diag_dci_invalidate_cumulative_log_mask();
+ ret = diag_send_dci_event_mask();
+ if (ret != DIAG_DCI_NO_ERROR) {
+ mutex_unlock(&driver->dci_mutex);
+ return ret;
+ }
+ kfree(entry->dci_event_mask);
+ diag_update_userspace_clients(DCI_EVENT_MASKS_TYPE);
+ diag_dci_invalidate_cumulative_event_mask();
+ ret = diag_send_dci_log_mask();
+ if (ret != DIAG_DCI_NO_ERROR) {
+ mutex_unlock(&driver->dci_mutex);
+ return ret;
+ }
+
+ list_for_each_safe(start, req_temp, &driver->dci_req_list) {
+ req_entry = list_entry(start, struct dci_pkt_req_entry_t,
+ track);
+ if (req_entry->pid == current->tgid) {
+ list_del(&req_entry->track);
+ kfree(req_entry);
+ }
+ }
+
+ /* Clean up any buffer that is pending write */
+ mutex_lock(&entry->write_buf_mutex);
+ list_for_each_entry_safe(buf_entry, temp, &entry->list_write_buf,
+ buf_track) {
+ list_del(&buf_entry->buf_track);
+ if (buf_entry->buf_type == DCI_BUF_SECONDARY) {
+ mutex_lock(&buf_entry->data_mutex);
+ diagmem_free(driver, buf_entry->data, POOL_TYPE_DCI);
+ buf_entry->data = NULL;
+ mutex_unlock(&buf_entry->data_mutex);
+ kfree(buf_entry);
+ } else if (buf_entry->buf_type == DCI_BUF_CMD) {
+ peripheral = buf_entry->data_source;
+ if (peripheral == APPS_DATA)
+ continue;
+ mutex_lock(&buf_entry->data_mutex);
+ smd_info = driver->separate_cmdrsp[peripheral] ?
+ &driver->smd_dci_cmd[peripheral] :
+ &driver->smd_dci[peripheral];
+ smd_info->in_busy_1 = 0;
+ mutex_unlock(&buf_entry->data_mutex);
+ }
+ diag_dci_try_deactivate_wakeup_source();
+ }
+ mutex_unlock(&entry->write_buf_mutex);
+
+ for (i = 0; i < NUM_DCI_PROC; i++) {
+ proc_buf = &entry->buffers[i];
+ buf_entry = proc_buf->buf_curr;
+ mutex_lock(&proc_buf->buf_mutex);
+ /* Clean up secondary buffer from mempool that is active */
+ if (buf_entry && buf_entry->buf_type == DCI_BUF_SECONDARY) {
+ mutex_lock(&buf_entry->data_mutex);
+ diagmem_free(driver, buf_entry->data, POOL_TYPE_DCI);
+ buf_entry->data = NULL;
+ mutex_unlock(&buf_entry->data_mutex);
+ mutex_destroy(&buf_entry->data_mutex);
+ kfree(buf_entry);
+ }
+
+ mutex_lock(&proc_buf->buf_primary->data_mutex);
+ kfree(proc_buf->buf_primary->data);
+ mutex_unlock(&proc_buf->buf_primary->data_mutex);
+
+ mutex_lock(&proc_buf->buf_cmd->data_mutex);
+ kfree(proc_buf->buf_cmd->data);
+ mutex_unlock(&proc_buf->buf_cmd->data_mutex);
+
+ mutex_destroy(&proc_buf->health_mutex);
+ mutex_destroy(&proc_buf->buf_primary->data_mutex);
+ mutex_destroy(&proc_buf->buf_cmd->data_mutex);
+
+ kfree(proc_buf->buf_primary);
+ kfree(proc_buf->buf_cmd);
+ mutex_unlock(&proc_buf->buf_mutex);
+ }
+ mutex_destroy(&entry->write_buf_mutex);
+
+ kfree(entry);
+
+ if (driver->num_dci_client == 0) {
+ diag_update_proc_vote(DIAG_PROC_DCI, VOTE_DOWN);
+ } else {
+ real_time = diag_dci_get_cumulative_real_time();
+ diag_update_real_time_vote(DIAG_PROC_DCI, real_time);
+ }
+ queue_work(driver->diag_real_time_wq, &driver->diag_real_time_work);
+
+ mutex_unlock(&driver->dci_mutex);
+
+ return DIAG_DCI_NO_ERROR;
+}
+
+int diag_dci_write_proc(int peripheral, int pkt_type, char *buf, int len)
+{
+ struct diag_smd_info *smd_info = NULL;
+ int wr_size = 0, retry = 0, err = -EAGAIN, timer = 0, i;
+
+ if (!buf || (peripheral < 0 || peripheral > NUM_SMD_DCI_CHANNELS)
+ || len < 0) {
+ pr_err("diag: In %s, invalid data 0x%p, peripheral: %d, len: %d\n",
+ __func__, buf, peripheral, len);
+ return -EINVAL;
+ }
+
+ if (pkt_type == DIAG_DATA_TYPE) {
+ for (i = 0; i < NUM_SMD_DCI_CMD_CHANNELS; i++)
+ if (peripheral == i)
+ smd_info = &driver->smd_dci_cmd[peripheral];
+ /*
+ * This peripheral doesn't support separate channel for
+ * command response.
+ */
+ if (!smd_info)
+ smd_info = &driver->smd_dci[peripheral];
+ } else if (pkt_type == DIAG_CNTL_TYPE) {
+ smd_info = &driver->smd_cntl[peripheral];
+ } else {
+ pr_err("diag: Invalid DCI pkt type in %s", __func__);
+ return -EINVAL;
+ }
+
+ if (!smd_info || !smd_info->ch)
+ return -EINVAL;
+
+ while (retry < 3) {
+ mutex_lock(&smd_info->smd_ch_mutex);
+ wr_size = smd_write(smd_info->ch, buf, len);
+ if (wr_size == len) {
+ pr_debug("diag: successfully wrote pkt_type %d of len %d to %d in trial %d",
+ pkt_type, len, peripheral, (retry+1));
+ err = DIAG_DCI_NO_ERROR;
+ mutex_unlock(&smd_info->smd_ch_mutex);
+ break;
+ }
+ pr_debug("diag: cannot write pkt_type %d of len %d to %d in trial %d",
+ pkt_type, len, peripheral, (retry+1));
+ retry++;
+ mutex_unlock(&smd_info->smd_ch_mutex);
+
+ /*
+ * Sleep for sometime before retrying. The delay of 2000 was
+ * determined empirically as best value to use.
+ */
+ for (timer = 0; timer < 5; timer++)
+ usleep(2000);
+ }
+ return err;
+}
+
+int diag_dci_copy_health_stats(struct diag_dci_health_stats *stats, int proc)
+{
+ struct diag_dci_client_tbl *entry = NULL;
+ struct diag_dci_health_t *health = NULL;
+ int i;
+
+ if (!stats)
+ return -EINVAL;
+
+ if (proc < ALL_PROC || proc > APPS_DATA)
+ return -EINVAL;
+
+ entry = diag_dci_get_client_entry();
+ if (!entry)
+ return DIAG_DCI_NOT_SUPPORTED;
+
+ stats->stats.dropped_logs = 0;
+ stats->stats.dropped_events = 0;
+ stats->stats.received_logs = 0;
+ stats->stats.received_events = 0;
+
+ if (proc != ALL_PROC) {
+ health = &entry->buffers[proc].health;
+ stats->stats.dropped_logs = health->dropped_logs;
+ stats->stats.dropped_events = health->dropped_events;
+ stats->stats.received_logs = health->received_logs;
+ stats->stats.received_events = health->received_events;
+ if (stats->reset_status) {
+ mutex_lock(&entry->buffers[proc].health_mutex);
+ health->dropped_logs = 0;
+ health->dropped_events = 0;
+ health->received_logs = 0;
+ health->received_events = 0;
+ mutex_unlock(&entry->buffers[proc].health_mutex);
+ }
+ return DIAG_DCI_NO_ERROR;
+ }
+
+
+ for (i = 0; i < NUM_DCI_PROC; i++) {
+ health = &entry->buffers[i].health;
+ stats->stats.dropped_logs += health->dropped_logs;
+ stats->stats.dropped_events += health->dropped_events;
+ stats->stats.received_logs += health->received_logs;
+ stats->stats.received_events += health->received_events;
+ if (stats->reset_status) {
+ mutex_lock(&entry->buffers[i].health_mutex);
+ health->dropped_logs = 0;
+ health->dropped_events = 0;
+ health->received_logs = 0;
+ health->received_events = 0;
+ mutex_unlock(&entry->buffers[i].health_mutex);
+ }
+ }
+ return DIAG_DCI_NO_ERROR;
+}
diff --git a/drivers/char/diag/diag_dci.h b/drivers/char/diag/diag_dci.h
index 870b0f3..ccd1a71 100644
--- a/drivers/char/diag/diag_dci.h
+++ b/drivers/char/diag/diag_dci.h
@@ -28,6 +28,13 @@
#define DCI_LOG_CON_MIN_LEN 14
#define DCI_EVENT_CON_MIN_LEN 16
+#define DIAG_DATA_TYPE 1
+#define DIAG_CNTL_TYPE 2
+
+#define DCI_BUF_PRIMARY 1
+#define DCI_BUF_SECONDARY 2
+#define DCI_BUF_CMD 3
+
#ifdef CONFIG_DEBUG_FS
#define DIAG_DCI_DEBUG_CNT 100
#define DIAG_DCI_DEBUG_LEN 100
@@ -46,6 +53,8 @@
extern unsigned int dci_max_reg;
extern unsigned int dci_max_clients;
+extern unsigned char dci_cumulative_log_mask[DCI_LOG_MASK_SIZE];
+extern unsigned char dci_cumulative_event_mask[DCI_EVENT_MASK_SIZE];
extern struct mutex dci_health_mutex;
struct dci_pkt_req_entry_t {
@@ -55,34 +64,63 @@
struct list_head track;
} __packed;
-struct diag_dci_client_tbl {
+struct diag_dci_reg_tbl_t {
uint32_t client_id;
- struct task_struct *client;
- uint16_t list; /* bit mask */
+ uint16_t notification_list;
int signal_type;
- unsigned char dci_log_mask[DCI_LOG_MASK_SIZE];
- unsigned char dci_event_mask[DCI_EVENT_MASK_SIZE];
- unsigned char *dci_data;
- int data_len;
- int total_capacity;
- int dropped_logs;
- int dropped_events;
- int received_logs;
- int received_events;
- struct mutex data_mutex;
- uint8_t real_time;
};
-/* This is used for DCI health stats */
-struct diag_dci_health_stats {
- int client_id;
+struct diag_dci_health_t {
int dropped_logs;
int dropped_events;
int received_logs;
int received_events;
+};
+
+struct diag_dci_buffer_t {
+ unsigned char *data;
+ unsigned int data_len;
+ struct mutex data_mutex;
+ uint8_t in_busy;
+ uint8_t buf_type;
+ int data_source;
+ int capacity;
+ uint8_t in_list;
+ struct list_head buf_track;
+};
+
+struct diag_dci_buf_peripheral_t {
+ struct diag_dci_buffer_t *buf_curr;
+ struct diag_dci_buffer_t *buf_primary;
+ struct diag_dci_buffer_t *buf_cmd;
+ struct diag_dci_health_t health;
+ struct mutex health_mutex;
+ struct mutex buf_mutex;
+};
+
+struct diag_dci_client_tbl {
+ struct diag_dci_reg_tbl_t client_info;
+ struct task_struct *client;
+ unsigned char *dci_log_mask;
+ unsigned char *dci_event_mask;
+ uint8_t real_time;
+ struct list_head track;
+ struct diag_dci_buf_peripheral_t buffers[NUM_DCI_PROC];
+ uint8_t in_service;
+ struct list_head list_write_buf;
+ struct mutex write_buf_mutex;
+};
+
+struct diag_dci_health_stats {
+ struct diag_dci_health_t stats;
int reset_status;
};
+struct diag_dci_health_stats_proc {
+ struct diag_dci_health_stats *health;
+ int proc;
+};
+
/* This is used for querying DCI Log
or Event Mask */
struct diag_log_event_stats {
@@ -90,6 +128,14 @@
int is_set;
};
+struct diag_dci_pkt_header_t {
+ uint8_t start;
+ uint8_t version;
+ uint16_t len;
+ uint8_t pkt_code;
+ int tag;
+} __packed;
+
enum {
DIAG_DCI_NO_ERROR = 1001, /* No error */
DIAG_DCI_NO_REG, /* Could not register */
@@ -106,6 +152,7 @@
unsigned long iteration;
int data_size;
char time_stamp[DIAG_TS_SIZE];
+ uint8_t peripheral;
uint8_t ch_type;
};
@@ -115,36 +162,42 @@
int diag_dci_init(void);
void diag_dci_exit(void);
+int diag_dci_register_client(struct diag_dci_reg_tbl_t *reg_entry);
+int diag_dci_deinit_client(void);
void diag_update_smd_dci_work_fn(struct work_struct *);
void diag_dci_notify_client(int peripheral_mask, int data);
+void diag_dci_wakeup_clients(void);
+void diag_process_apps_dci_read_data(int data_type, void *buf, int recd_bytes);
int diag_process_smd_dci_read_data(struct diag_smd_info *smd_info, void *buf,
int recd_bytes);
int diag_process_dci_transaction(unsigned char *buf, int len);
-void extract_dci_pkt_rsp(struct diag_smd_info *smd_info, unsigned char *buf);
-
-int diag_dci_find_client_index_health(int client_id);
-int diag_dci_find_client_index(int client_id);
+void extract_dci_pkt_rsp(unsigned char *buf, int len, int data_source,
+ struct diag_smd_info *smd_info);
+struct diag_dci_client_tbl *diag_dci_get_client_entry(void);
/* DCI Log streaming functions */
void create_dci_log_mask_tbl(unsigned char *tbl_buf);
void update_dci_cumulative_log_mask(int offset, unsigned int byte_index,
uint8_t byte_mask);
-void clear_client_dci_cumulative_log_mask(int client_index);
-int diag_send_dci_log_mask(struct diag_smd_info *smd_info);
-void extract_dci_log(unsigned char *buf);
+void diag_dci_invalidate_cumulative_log_mask(void);
+int diag_send_dci_log_mask(void);
+void extract_dci_log(unsigned char *buf, int len, int data_source);
int diag_dci_clear_log_mask(void);
int diag_dci_query_log_mask(uint16_t log_code);
/* DCI event streaming functions */
void update_dci_cumulative_event_mask(int offset, uint8_t byte_mask);
-void clear_client_dci_cumulative_event_mask(int client_index);
-int diag_send_dci_event_mask(struct diag_smd_info *smd_info);
-void extract_dci_events(unsigned char *buf);
+void diag_dci_invalidate_cumulative_event_mask(void);
+int diag_send_dci_event_mask(void);
+void extract_dci_events(unsigned char *buf, int len, int data_source);
void create_dci_event_mask_tbl(unsigned char *tbl_buf);
int diag_dci_clear_event_mask(void);
int diag_dci_query_event_mask(uint16_t event_id);
-void diag_dci_smd_record_info(int read_bytes, uint8_t ch_type);
+void diag_dci_smd_record_info(int read_bytes, uint8_t ch_type,
+ uint8_t peripheral);
uint8_t diag_dci_get_cumulative_real_time(void);
-int diag_dci_set_real_time(int client_id, uint8_t real_time);
+int diag_dci_set_real_time(uint8_t real_time);
+int diag_dci_copy_health_stats(struct diag_dci_health_stats *stats, int proc);
/* Functions related to DCI wakeup sources */
-void diag_dci_try_activate_wakeup_source(smd_channel_t *channel);
-void diag_dci_try_deactivate_wakeup_source(smd_channel_t *channel);
+void diag_dci_try_activate_wakeup_source(void);
+void diag_dci_try_deactivate_wakeup_source(void);
+int diag_dci_write_proc(int peripheral, int pkt_type, char *buf, int len);
#endif
diff --git a/drivers/char/diag/diag_debugfs.c b/drivers/char/diag/diag_debugfs.c
index 3a1c96b..4dd0845 100644
--- a/drivers/char/diag/diag_debugfs.c
+++ b/drivers/char/diag/diag_debugfs.c
@@ -233,38 +233,30 @@
bytes_in_buf += bytes_written;
bytes_remaining -= bytes_written;
#endif
- if (driver->dci_device) {
- bytes_written = scnprintf(buf+bytes_in_buf,
- bytes_remaining,
- "dci power active, relax: %lu, %lu\n",
- driver->dci_device->power.wakeup->active_count,
- driver->dci_device->power.wakeup->relax_count);
- bytes_in_buf += bytes_written;
- bytes_remaining -= bytes_written;
- }
- if (driver->dci_cmd_device) {
- bytes_written = scnprintf(buf+bytes_in_buf,
- bytes_remaining,
- "dci cmd power active, relax: %lu, %lu\n",
- driver->dci_cmd_device->power.wakeup->
- active_count,
- driver->dci_cmd_device->power.wakeup->
- relax_count);
- bytes_in_buf += bytes_written;
- bytes_remaining -= bytes_written;
- }
+ bytes_written = scnprintf(buf+bytes_in_buf,
+ bytes_remaining,
+ "dci power: active, relax: %lu, %lu\n",
+ driver->diag_dev->power.wakeup->
+ active_count,
+ driver->diag_dev->
+ power.wakeup->relax_count);
+ bytes_in_buf += bytes_written;
+ bytes_remaining -= bytes_written;
+
}
temp_data += diag_dbgfs_dci_data_index;
for (i = diag_dbgfs_dci_data_index; i < DIAG_DCI_DEBUG_CNT; i++) {
if (temp_data->iteration != 0) {
bytes_written = scnprintf(
buf + bytes_in_buf, bytes_remaining,
- "i %-10ld\t"
- "s %-10d\t"
- "c %-10d\t"
+ "i %-5ld\t"
+ "s %-5d\t"
+ "p %-5d\t"
+ "c %-5d\t"
"t %-15s\n",
temp_data->iteration,
temp_data->data_size,
+ temp_data->peripheral,
temp_data->ch_type,
temp_data->time_stamp);
bytes_in_buf += bytes_written;
@@ -446,7 +438,8 @@
"POOL_TYPE_COPY: [0x%p : 0x%p] count = %d\n"
"POOL_TYPE_HDLC: [0x%p : 0x%p] count = %d\n"
"POOL_TYPE_USER: [0x%p : 0x%p] count = %d\n"
- "POOL_TYPE_WRITE_STRUCT: [0x%p : 0x%p] count = %d\n",
+ "POOL_TYPE_WRITE_STRUCT: [0x%p : 0x%p] count = %d\n"
+ "POOL_TYPE_DCI: [0x%p : 0x%p] count = %d\n",
driver->diagpool,
diag_pools_array[POOL_COPY_IDX],
driver->count,
@@ -458,7 +451,10 @@
driver->count_user_pool,
driver->diag_write_struct_pool,
diag_pools_array[POOL_WRITE_STRUCT_IDX],
- driver->count_write_struct_pool);
+ driver->count_write_struct_pool,
+ driver->diag_dci_pool,
+ diag_pools_array[POOL_DCI_IDX],
+ driver->count_dci_pool);
for (i = 0; i < MAX_HSIC_CH; i++) {
if (!diag_hsic[i].hsic_inited)
@@ -506,7 +502,8 @@
"POOL_TYPE_COPY: [0x%p : 0x%p] count = %d\n"
"POOL_TYPE_HDLC: [0x%p : 0x%p] count = %d\n"
"POOL_TYPE_USER: [0x%p : 0x%p] count = %d\n"
- "POOL_TYPE_WRITE_STRUCT: [0x%p : 0x%p] count = %d\n",
+ "POOL_TYPE_WRITE_STRUCT: [0x%p : 0x%p] count = %d\n"
+ "POOL_TYPE_DCI: [0x%p : 0x%p] count = %d\n",
driver->diagpool,
diag_pools_array[POOL_COPY_IDX],
driver->count,
@@ -518,7 +515,10 @@
driver->count_user_pool,
driver->diag_write_struct_pool,
diag_pools_array[POOL_WRITE_STRUCT_IDX],
- driver->count_write_struct_pool);
+ driver->count_write_struct_pool,
+ driver->diag_dci_pool,
+ diag_pools_array[POOL_DCI_IDX],
+ driver->count_dci_pool);
ret = simple_read_from_buffer(ubuf, count, ppos, buf, ret);
diff --git a/drivers/char/diag/diagchar.h b/drivers/char/diag/diagchar.h
index 95d90b3..eda745f 100644
--- a/drivers/char/diag/diagchar.h
+++ b/drivers/char/diag/diagchar.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2008-2013, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2008-2014, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -17,13 +17,13 @@
#include <linux/module.h>
#include <linux/mempool.h>
#include <linux/mutex.h>
+#include <linux/list.h>
#include <linux/spinlock.h>
#include <linux/workqueue.h>
#include <linux/sched.h>
#include <linux/wakelock.h>
#include <mach/msm_smd.h>
#include <asm/atomic.h>
-#include <asm/mach-types.h>
/* Size of the USB buffers used for read and write*/
#define USB_MAX_OUT_BUF 4096
@@ -44,28 +44,32 @@
#define POOL_TYPE_HSIC_WRITE 11
#define POOL_TYPE_HSIC_2_WRITE 12
#define POOL_TYPE_ALL 10
+#define POOL_TYPE_DCI 20
#define POOL_COPY_IDX 0
#define POOL_HDLC_IDX 1
#define POOL_USER_IDX 2
#define POOL_WRITE_STRUCT_IDX 3
-#define POOL_HSIC_IDX 4
-#define POOL_HSIC_2_IDX 5
-#define POOL_HSIC_3_IDX 6
-#define POOL_HSIC_4_IDX 7
-#define POOL_HSIC_WRITE_IDX 8
-#define POOL_HSIC_2_WRITE_IDX 9
-#define POOL_HSIC_3_WRITE_IDX 10
-#define POOL_HSIC_4_WRITE_IDX 11
+#define POOL_DCI_IDX 4
+#define POOL_BRIDGE_BASE POOL_DCI_IDX
+#define POOL_HSIC_IDX (POOL_BRIDGE_BASE + 1)
+#define POOL_HSIC_2_IDX (POOL_BRIDGE_BASE + 2)
+#define POOL_HSIC_3_IDX (POOL_BRIDGE_BASE + 3)
+#define POOL_HSIC_4_IDX (POOL_BRIDGE_BASE + 4)
+#define POOL_HSIC_WRITE_IDX (POOL_BRIDGE_BASE + 5)
+#define POOL_HSIC_2_WRITE_IDX (POOL_BRIDGE_BASE + 6)
+#define POOL_HSIC_3_WRITE_IDX (POOL_BRIDGE_BASE + 7)
+#define POOL_HSIC_4_WRITE_IDX (POOL_BRIDGE_BASE + 8)
#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
-#define NUM_MEMORY_POOLS 12
+#define NUM_MEMORY_POOLS 13
#else
-#define NUM_MEMORY_POOLS 4
+#define NUM_MEMORY_POOLS 5
#endif
#define MAX_SSID_PER_RANGE 200
+#define ALL_PROC -1
#define MODEM_DATA 0
#define LPASS_DATA 1
#define WCNSS_DATA 2
@@ -107,10 +111,26 @@
#define DIAG_STM_WCNSS 0x04
#define DIAG_STM_APPS 0x08
-#define DIAG_DIAG_STM 0x214
-
#define BAD_PARAM_RESPONSE_MESSAGE 20
+#define DIAG_CMD_VERSION 0
+#define DIAG_CMD_DOWNLOAD 0x3A
+#define DIAG_CMD_DIAG_SUBSYS 0x4B
+#define DIAG_CMD_LOG_ON_DMND 0x78
+#define DIAG_CMD_EXT_BUILD 0x7c
+
+#define DIAG_SS_DIAG 0x12
+#define DIAG_SS_PARAMS 0x32
+
+#define DIAG_DIAG_MAX_PKT_SZ 0x55
+#define DIAG_DIAG_STM 0x20E
+#define DIAG_DIAG_POLL 0x03
+#define DIAG_DEL_RSP_WRAP 0x04
+#define DIAG_DEL_RSP_WRAP_CNT 0x05
+
+#define MODE_CMD 41
+#define RESET_ID 2
+
/*
* The status bit masks when received in a signal handler are to be
* used in conjunction with the peripheral list bit mask to determine the
@@ -129,6 +149,13 @@
#define NUM_SMD_CMD_CHANNELS 1
#define NUM_SMD_DCI_CMD_CHANNELS 1
+/*
+ * Indicates number of peripherals that can support DCI and Apps
+ * processor. This doesn't mean that a peripheral has the
+ * feature.
+ */
+#define NUM_DCI_PROC (NUM_SMD_DATA_CHANNELS + 1)
+
#define SMD_DATA_TYPE 0
#define SMD_CNTL_TYPE 1
#define SMD_DCI_TYPE 2
@@ -165,6 +192,12 @@
QSC = 5,
};
+struct diag_pkt_header_t {
+ uint8_t cmd_code;
+ uint8_t subsys_id;
+ uint16_t subsys_cmd_code;
+} __packed;
+
struct diag_master_table {
uint16_t cmd_code;
uint16_t subsys_id;
@@ -286,6 +319,7 @@
char *name;
int dropped_count;
struct class *diagchar_class;
+ struct device *diag_dev;
int ref_count;
struct mutex diagchar_mutex;
wait_queue_head_t wait_q;
@@ -307,7 +341,7 @@
int peripheral_supports_stm[NUM_SMD_CONTROL_CHANNELS];
/* DCI related variables */
struct list_head dci_req_list;
- struct diag_dci_client_tbl *dci_client_tbl;
+ struct list_head dci_client_list;
int dci_tag;
int dci_client_id;
struct mutex dci_mutex;
@@ -324,17 +358,21 @@
unsigned int poolsize_user;
unsigned int itemsize_write_struct;
unsigned int poolsize_write_struct;
+ unsigned int itemsize_dci;
+ unsigned int poolsize_dci;
unsigned int debug_flag;
/* State for the mempool for the char driver */
mempool_t *diagpool;
mempool_t *diag_hdlc_pool;
mempool_t *diag_user_pool;
mempool_t *diag_write_struct_pool;
+ mempool_t *diag_dci_pool;
spinlock_t diag_mem_lock;
int count;
int count_hdlc_pool;
int count_user_pool;
int count_write_struct_pool;
+ int count_dci_pool;
int used;
/* Buffers for masks */
struct mutex diag_cntl_mutex;
@@ -365,8 +403,6 @@
unsigned hdlc_count;
unsigned hdlc_escape;
int in_busy_pktdata;
- struct device *dci_device;
- struct device *dci_cmd_device;
/* Variables for non real time mode */
int real_time_mode;
int real_time_update_busy;
@@ -397,6 +433,9 @@
struct diag_master_table *table;
uint8_t *pkt_buf;
int pkt_length;
+ uint8_t *dci_pkt_buf; /* For Apps DCI packets */
+ uint32_t dci_pkt_length;
+ int in_busy_dcipktdata;
struct diag_request *usb_read_ptr;
struct diag_request *write_ptr_svc;
int logging_mode;
@@ -441,5 +480,6 @@
void diag_get_timestamp(char *time_str);
int diag_find_polling_reg(int i);
+void check_drain_timer(void);
#endif
diff --git a/drivers/char/diag/diagchar_core.c b/drivers/char/diag/diagchar_core.c
index 93e932a..263715d 100644
--- a/drivers/char/diag/diagchar_core.c
+++ b/drivers/char/diag/diagchar_core.c
@@ -71,6 +71,9 @@
/* for write structure buffer */
static unsigned int itemsize_write_struct = 20; /*Size of item in the mempool */
static unsigned int poolsize_write_struct = 10;/* Num of items in the mempool */
+/* For the dci memory pool */
+static unsigned int itemsize_dci = IN_BUF_SIZE; /*Size of item in the mempool */
+static unsigned int poolsize_dci = 10; /*Number of items in the mempool */
/* This is the max number of user-space clients supported at initialization*/
static unsigned int max_clients = 15;
static unsigned int threshold_client_limit = 30;
@@ -144,6 +147,16 @@
mutex_unlock(&driver->diagchar_mutex);
}
+void check_drain_timer(void)
+{
+ int ret = 0;
+
+ if (!timer_in_progress) {
+ timer_in_progress = 1;
+ ret = mod_timer(&drain_timer, jiffies + msecs_to_jiffies(500));
+ }
+}
+
#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
void diag_clear_hsic_tbl(void)
{
@@ -266,9 +279,7 @@
* This will specially help in case of ungraceful exit of any DCI client
* This call will remove any pending registrations of such client
*/
- if (diag_dci_find_client_index(current->tgid) !=
- DCI_CLIENT_INDEX_INVALID)
- diagchar_ioctl(NULL, DIAG_IOCTL_DCI_DEINIT, 0);
+ diag_dci_deinit_client();
/* If the exiting process is the socket process */
mutex_lock(&driver->diagchar_mutex);
if (driver->socket_process &&
@@ -303,11 +314,12 @@
mutex_lock(&driver->diagchar_mutex);
driver->ref_count--;
- /* On Client exit, try to destroy all 4 pools */
+ /* On Client exit, try to destroy all 5 pools */
diagmem_exit(driver, POOL_TYPE_COPY);
diagmem_exit(driver, POOL_TYPE_HDLC);
diagmem_exit(driver, POOL_TYPE_USER);
diagmem_exit(driver, POOL_TYPE_WRITE_STRUCT);
+ diagmem_exit(driver, POOL_TYPE_DCI);
for (i = 0; i < driver->num_clients; i++) {
if (NULL != diagpriv_data && diagpriv_data->pid ==
driver->client_map[i].pid) {
@@ -552,6 +564,87 @@
int *pnum_data) { return 0; }
#endif
+static int diag_copy_dci(char __user *buf, size_t count,
+ struct diag_dci_client_tbl *entry, int *pret)
+{
+ int total_data_len = 0;
+ int ret = 0;
+ int exit_stat = 1;
+ struct diag_dci_buffer_t *buf_entry, *temp;
+ struct diag_smd_info *smd_info = NULL;
+
+ if (!buf || !entry || !pret)
+ return exit_stat;
+
+ ret = *pret;
+
+ ret += 4;
+
+ mutex_lock(&entry->write_buf_mutex);
+ list_for_each_entry_safe(buf_entry, temp, &entry->list_write_buf,
+ buf_track) {
+ list_del(&buf_entry->buf_track);
+ mutex_lock(&buf_entry->data_mutex);
+ if ((buf_entry->data_len > 0) &&
+ (buf_entry->in_busy) &&
+ (buf_entry->data)) {
+ if (copy_to_user(buf+ret, (void *)buf_entry->data,
+ buf_entry->data_len))
+ goto drop;
+ ret += buf_entry->data_len;
+ total_data_len += buf_entry->data_len;
+drop:
+ buf_entry->in_busy = 0;
+ buf_entry->data_len = 0;
+ buf_entry->in_list = 0;
+ if (buf_entry->buf_type == DCI_BUF_CMD) {
+ if (buf_entry->data_source == APPS_DATA) {
+ mutex_unlock(&buf_entry->data_mutex);
+ continue;
+ }
+ if (driver->separate_cmdrsp[
+ buf_entry->data_source]) {
+ smd_info = &driver->smd_dci_cmd[
+ buf_entry->data_source];
+ } else {
+ smd_info = &driver->smd_dci[
+ buf_entry->data_source];
+ }
+ smd_info->in_busy_1 = 0;
+ mutex_unlock(&buf_entry->data_mutex);
+ continue;
+ } else if (buf_entry->buf_type == DCI_BUF_SECONDARY) {
+ diagmem_free(driver, buf_entry->data,
+ POOL_TYPE_DCI);
+ buf_entry->data = NULL;
+ mutex_unlock(&buf_entry->data_mutex);
+ kfree(buf_entry);
+ continue;
+ }
+
+ }
+ mutex_unlock(&buf_entry->data_mutex);
+ }
+
+ if (total_data_len > 0) {
+ /* Copy the total data length */
+ COPY_USER_SPACE_OR_EXIT(buf+4, total_data_len, 4);
+ ret -= 4;
+ } else {
+ pr_debug("diag: In %s, Trying to copy ZERO bytes, total_data_len: %d\n",
+ __func__, total_data_len);
+ }
+
+ entry->in_service = 0;
+ mutex_unlock(&entry->write_buf_mutex);
+
+ exit_stat = 0;
+exit:
+ *pret = ret;
+
+ return exit_stat;
+}
+
int diag_command_reg(unsigned long ioarg)
{
int i = 0, success = -EINVAL, j;
@@ -877,13 +970,11 @@
int i, result = -EINVAL, interim_size = 0, client_id = 0, real_time = 0;
int retry_count = 0, timer = 0;
uint16_t support_list = 0, interim_rsp_id, remote_dev;
- struct diag_dci_client_tbl *dci_params;
- struct diag_dci_health_stats stats;
+ struct diag_dci_reg_tbl_t *dci_reg_params;
+ struct diag_dci_health_stats_proc stats;
struct diag_log_event_stats le_stats;
struct diagpkt_delay_params delay_params;
struct real_time_vote_t rt_vote;
- struct list_head *start, *req_temp;
- struct dci_pkt_req_entry_t *req_entry = NULL;
switch (iocmd) {
case DIAG_IOCTL_COMMAND_REG:
@@ -909,127 +1000,25 @@
}
break;
case DIAG_IOCTL_DCI_REG:
- if (driver->dci_state == DIAG_DCI_NO_REG)
- return DIAG_DCI_NO_REG;
- if (driver->num_dci_client >= MAX_DCI_CLIENTS)
- return DIAG_DCI_NO_REG;
- dci_params = kzalloc(sizeof(struct diag_dci_client_tbl),
+ dci_reg_params = kzalloc(sizeof(struct diag_dci_reg_tbl_t),
GFP_KERNEL);
- if (dci_params == NULL) {
+ if (dci_reg_params == NULL) {
pr_err("diag: unable to alloc memory\n");
return -ENOMEM;
}
- if (copy_from_user(dci_params, (void *)ioarg,
- sizeof(struct diag_dci_client_tbl))) {
- kfree(dci_params);
+ if (copy_from_user(dci_reg_params, (void *)ioarg,
+ sizeof(struct diag_dci_reg_tbl_t))) {
+ kfree(dci_reg_params);
return -EFAULT;
}
- mutex_lock(&driver->dci_mutex);
- if (!(driver->num_dci_client)) {
- for (i = 0; i < NUM_SMD_DCI_CHANNELS; i++)
- driver->smd_dci[i].in_busy_1 = 0;
- if (driver->supports_separate_cmdrsp)
- for (i = 0; i < NUM_SMD_DCI_CMD_CHANNELS; i++)
- driver->smd_dci_cmd[i].in_busy_1 = 0;
- }
- driver->num_dci_client++;
- if (driver->num_dci_client == 1)
- diag_update_proc_vote(DIAG_PROC_DCI, VOTE_UP);
- queue_work(driver->diag_real_time_wq,
- &driver->diag_real_time_work);
- pr_debug("diag: In %s, id = %d\n",
- __func__, driver->dci_client_id);
- driver->dci_client_id++;
- for (i = 0; i < MAX_DCI_CLIENTS; i++) {
- if (driver->dci_client_tbl[i].client == NULL) {
- driver->dci_client_tbl[i].client = current;
- driver->dci_client_tbl[i].client_id =
- driver->dci_client_id;
- driver->dci_client_tbl[i].list =
- dci_params->list;
- driver->dci_client_tbl[i].signal_type =
- dci_params->signal_type;
- create_dci_log_mask_tbl(driver->
- dci_client_tbl[i].dci_log_mask);
- create_dci_event_mask_tbl(driver->
- dci_client_tbl[i].dci_event_mask);
- driver->dci_client_tbl[i].data_len = 0;
- driver->dci_client_tbl[i].dci_data =
- kzalloc(IN_BUF_SIZE, GFP_KERNEL);
- driver->dci_client_tbl[i].total_capacity =
- IN_BUF_SIZE;
- driver->dci_client_tbl[i].dropped_logs = 0;
- driver->dci_client_tbl[i].dropped_events = 0;
- driver->dci_client_tbl[i].received_logs = 0;
- driver->dci_client_tbl[i].received_events = 0;
- driver->dci_client_tbl[i].real_time = 1;
- mutex_init(&driver->dci_client_tbl[i].
- data_mutex);
- break;
- }
- }
- kfree(dci_params);
- mutex_unlock(&driver->dci_mutex);
- result = driver->dci_client_id;
+ result = diag_dci_register_client(dci_reg_params);
+ kfree(dci_reg_params);
break;
case DIAG_IOCTL_DCI_DEINIT:
- result = -EIO;
- /* Delete this process from DCI table */
- mutex_lock(&driver->dci_mutex);
- i = diag_dci_find_client_index(current->tgid);
- if (i == DCI_CLIENT_INDEX_INVALID) {
- result = DIAG_DCI_NOT_SUPPORTED;
- } else {
- /* clear respective cumulative log masks */
- clear_client_dci_cumulative_log_mask(i);
- /* send updated log mask to peripherals */
- result =
- diag_send_dci_log_mask(&driver->smd_cntl[MODEM_DATA]);
- if (result != DIAG_DCI_NO_ERROR) {
- mutex_unlock(&driver->dci_mutex);
- return result;
- }
- /* clear respective cumulative event masks */
- clear_client_dci_cumulative_event_mask(i);
- /* send updated event mask to peripherals */
- result =
- diag_send_dci_event_mask(
- &driver->smd_cntl[MODEM_DATA]);
- if (result != DIAG_DCI_NO_ERROR) {
- mutex_unlock(&driver->dci_mutex);
- return result;
- }
- result = i;
- /* Delete this process from DCI table */
- list_for_each_safe(start, req_temp,
- &driver->dci_req_list) {
- req_entry = list_entry(start,
- struct dci_pkt_req_entry_t,
- track);
- if (req_entry->pid == current->tgid) {
- list_del(&req_entry->track);
- kfree(req_entry);
- }
- }
- driver->dci_client_tbl[result].client = NULL;
- kfree(driver->dci_client_tbl[result].dci_data);
- driver->dci_client_tbl[result].dci_data = NULL;
- mutex_destroy(&driver->dci_client_tbl[result].
- data_mutex);
- driver->num_dci_client--;
- if (driver->num_dci_client == 0) {
- diag_update_proc_vote(DIAG_PROC_DCI, VOTE_DOWN);
- } else {
- real_time = diag_dci_get_cumulative_real_time();
- diag_update_real_time_vote(DIAG_PROC_DCI,
- real_time);
- }
- queue_work(driver->diag_real_time_wq,
- &driver->diag_real_time_work);
- }
- mutex_unlock(&driver->dci_mutex);
+ result = diag_dci_deinit_client();
break;
case DIAG_IOCTL_DCI_SUPPORT:
+ support_list |= DIAG_CON_APSS;
for (i = 0; i < NUM_SMD_DCI_CHANNELS; i++) {
if (driver->smd_dci[i].ch)
support_list |=
@@ -1042,29 +1031,16 @@
break;
case DIAG_IOCTL_DCI_HEALTH_STATS:
if (copy_from_user(&stats, (void *)ioarg,
- sizeof(struct diag_dci_health_stats)))
+ sizeof(struct diag_dci_health_stats_proc)))
return -EFAULT;
- mutex_lock(&dci_health_mutex);
- i = diag_dci_find_client_index_health(stats.client_id);
- if (i != DCI_CLIENT_INDEX_INVALID) {
- dci_params = &(driver->dci_client_tbl[i]);
- stats.dropped_logs = dci_params->dropped_logs;
- stats.dropped_events =
- dci_params->dropped_events;
- stats.received_logs =
- dci_params->received_logs;
- stats.received_events =
- dci_params->received_events;
- if (stats.reset_status) {
- dci_params->dropped_logs = 0;
- dci_params->dropped_events = 0;
- dci_params->received_logs = 0;
- dci_params->received_events = 0;
- }
- }
- mutex_unlock(&dci_health_mutex);
+
+ result = diag_dci_copy_health_stats(stats.health,
+ stats.proc);
+ if (result != DIAG_DCI_NO_ERROR)
+ break;
+
if (copy_to_user((void *)ioarg, &stats,
- sizeof(struct diag_dci_health_stats)))
+ sizeof(struct diag_dci_health_stats_proc)))
return -EFAULT;
result = DIAG_DCI_NO_ERROR;
break;
@@ -1126,8 +1102,7 @@
return -EFAULT;
driver->real_time_update_busy++;
if (rt_vote.proc == DIAG_PROC_DCI) {
- diag_dci_set_real_time(current->tgid,
- rt_vote.real_time_vote);
+ diag_dci_set_real_time(rt_vote.real_time_vote);
real_time = diag_dci_get_cumulative_real_time();
} else {
real_time = rt_vote.real_time_vote;
@@ -1181,7 +1156,10 @@
pr_err("diag: Client PID not found in table");
return -EINVAL;
}
-
+ if (!buf) {
+ pr_err("diag: bad address from user side\n");
+ return -EFAULT;
+ }
wait_event_interruptible(driver->wait_q, driver->data_ready[index]);
mutex_lock(&driver->diagchar_mutex);
@@ -1389,32 +1367,55 @@
goto exit;
}
+ if (driver->data_ready[index] & DCI_PKT_TYPE) {
+ /* Copy the type of data being passed */
+ data_type = driver->data_ready[index] & DCI_PKT_TYPE;
+ COPY_USER_SPACE_OR_EXIT(buf, data_type, 4);
+ COPY_USER_SPACE_OR_EXIT(buf+4, *(driver->dci_pkt_buf),
+ driver->dci_pkt_length);
+ driver->data_ready[index] ^= DCI_PKT_TYPE;
+ driver->in_busy_dcipktdata = 0;
+ goto exit;
+ }
+
+ if (driver->data_ready[index] & DCI_EVENT_MASKS_TYPE) {
+ /*Copy the type of data being passed*/
+ data_type = driver->data_ready[index] & DCI_EVENT_MASKS_TYPE;
+ COPY_USER_SPACE_OR_EXIT(buf, data_type, 4);
+ COPY_USER_SPACE_OR_EXIT(buf+4, driver->num_dci_client, 4);
+ COPY_USER_SPACE_OR_EXIT(buf+8, *(dci_cumulative_event_mask),
+ DCI_EVENT_MASK_SIZE);
+ driver->data_ready[index] ^= DCI_EVENT_MASKS_TYPE;
+ goto exit;
+ }
+
+ if (driver->data_ready[index] & DCI_LOG_MASKS_TYPE) {
+ /*Copy the type of data being passed*/
+ data_type = driver->data_ready[index] & DCI_LOG_MASKS_TYPE;
+ COPY_USER_SPACE_OR_EXIT(buf, data_type, 4);
+ COPY_USER_SPACE_OR_EXIT(buf+4, driver->num_dci_client, 4);
+ COPY_USER_SPACE_OR_EXIT(buf+8, *(dci_cumulative_log_mask),
+ DCI_LOG_MASK_SIZE);
+ driver->data_ready[index] ^= DCI_LOG_MASKS_TYPE;
+ goto exit;
+ }
+
if (driver->data_ready[index] & DCI_DATA_TYPE) {
/* Copy the type of data being passed */
data_type = driver->data_ready[index] & DCI_DATA_TYPE;
- COPY_USER_SPACE_OR_EXIT(buf, data_type, 4);
- /* check the current client and copy its data */
- for (i = 0; i < MAX_DCI_CLIENTS; i++) {
- entry = &(driver->dci_client_tbl[i]);
- if (entry && entry->client) {
- if (current->tgid == entry->client->tgid) {
- mutex_lock(&entry->data_mutex);
- COPY_USER_SPACE_OR_EXIT(buf+4,
- entry->data_len, 4);
- COPY_USER_SPACE_OR_EXIT(buf+8,
- *(entry->dci_data), entry->data_len);
- entry->data_len = 0;
- mutex_unlock(&entry->data_mutex);
- break;
- }
- }
- }
driver->data_ready[index] ^= DCI_DATA_TYPE;
+ /* check the current client and copy its data */
+ entry = diag_dci_get_client_entry();
+ if (entry) {
+ if (!entry->in_service)
+ goto exit;
+ COPY_USER_SPACE_OR_EXIT(buf, data_type, 4);
+ exit_stat = diag_copy_dci(buf, count, entry, &ret);
+ if (exit_stat == 1)
+ goto exit;
+ }
for (i = 0; i < NUM_SMD_DCI_CHANNELS; i++) {
- driver->smd_dci[i].in_busy_1 = 0;
if (driver->smd_dci[i].ch) {
- diag_dci_try_deactivate_wakeup_source(
- driver->smd_dci[i].ch);
queue_work(driver->diag_dci_wq,
&(driver->smd_dci[i].diag_read_smd_work));
}
@@ -1423,10 +1424,7 @@
for (i = 0; i < NUM_SMD_DCI_CMD_CHANNELS; i++) {
if (!driver->separate_cmdrsp[i])
continue;
- driver->smd_dci_cmd[i].in_busy_1 = 0;
if (driver->smd_dci_cmd[i].ch) {
- diag_dci_try_deactivate_wakeup_source(
- driver->smd_dci_cmd[i].ch);
queue_work(driver->diag_dci_wq,
&(driver->smd_dci_cmd[i].
diag_read_smd_work));
@@ -1449,7 +1447,7 @@
size_t count, loff_t *ppos)
{
int err, ret = 0, pkt_type, token_offset = 0;
- int remote_proc = 0;
+ int remote_proc = 0, data_type;
uint8_t index;
#ifdef DIAG_DEBUG
int length = 0, i;
@@ -1480,9 +1478,11 @@
return -EBADMSG;
}
#ifdef CONFIG_DIAG_OVER_USB
- if (((pkt_type != DCI_DATA_TYPE) && (driver->logging_mode == USB_MODE)
- && (!driver->usb_connected)) ||
- (driver->logging_mode == NO_LOGGING_MODE)) {
+ if (driver->logging_mode == NO_LOGGING_MODE ||
+ (!((pkt_type == DCI_DATA_TYPE) ||
+ ((pkt_type & (DATA_TYPE_DCI_LOG | DATA_TYPE_DCI_EVENT)) == 0))
+ && (driver->logging_mode == USB_MODE) &&
+ (!driver->usb_connected))) {
/*Drop the diag payload */
return -EIO;
}
@@ -1541,9 +1541,9 @@
}
/* The packet is for the remote processor */
if (payload_size <= MIN_SIZ_ALLOW) {
- pr_err("diag: Integer underflow in %s, payload size: %d",
- __func__, payload_size);
- return -EBADMSG;
+ pr_err("diag: Integer underflow in %s, payload size: %d",
+ __func__, payload_size);
+ return -EBADMSG;
}
token_offset = 4;
payload_size -= 4;
@@ -1761,20 +1761,47 @@
return -EBADMSG;
}
- mutex_lock(&driver->diagchar_mutex);
buf_copy = diagmem_alloc(driver, payload_size, POOL_TYPE_COPY);
if (!buf_copy) {
driver->dropped_count++;
- mutex_unlock(&driver->diagchar_mutex);
return -ENOMEM;
}
err = copy_from_user(buf_copy, buf + 4, payload_size);
if (err) {
printk(KERN_INFO "diagchar : copy_from_user failed\n");
- ret = -EFAULT;
- goto fail_free_copy;
+ diagmem_free(driver, buf_copy, POOL_TYPE_COPY);
+ buf_copy = NULL;
+ return -EFAULT;
}
+
+ data_type = pkt_type &
+ (DATA_TYPE_DCI_LOG | DATA_TYPE_DCI_EVENT | DCI_PKT_TYPE);
+ if (data_type) {
+ diag_process_apps_dci_read_data(data_type, buf_copy,
+ payload_size);
+ if (pkt_type & DATA_TYPE_DCI_LOG)
+ pkt_type ^= DATA_TYPE_DCI_LOG;
+ else if (pkt_type & DATA_TYPE_DCI_EVENT) {
+ pkt_type ^= DATA_TYPE_DCI_EVENT;
+ } else {
+ pkt_type ^= DCI_PKT_TYPE;
+ diagmem_free(driver, buf_copy, POOL_TYPE_COPY);
+ return 0;
+ }
+
+ /*
+ * If the data is not headed for normal processing or the usb
+ * is unplugged and we are in usb mode
+ */
+ if ((pkt_type != DATA_TYPE_LOG && pkt_type != DATA_TYPE_EVENT)
+ || ((driver->logging_mode == USB_MODE) &&
+ (!driver->usb_connected))) {
+ diagmem_free(driver, buf_copy, POOL_TYPE_COPY);
+ return 0;
+ }
+ }
+
if (driver->stm_state[APPS_DATA] &&
(pkt_type >= DATA_TYPE_EVENT && pkt_type <= DATA_TYPE_LOG)) {
int stm_size = 0;
@@ -1809,6 +1836,7 @@
length++;
}
#endif
+ mutex_lock(&driver->diagchar_mutex);
if (!buf_hdlc)
buf_hdlc = diagmem_alloc(driver, HDLC_OUT_BUF_SIZE,
POOL_TYPE_HDLC);
@@ -1882,10 +1910,9 @@
diagmem_free(driver, buf_copy, POOL_TYPE_COPY);
buf_copy = NULL;
mutex_unlock(&driver->diagchar_mutex);
- if (!timer_in_progress) {
- timer_in_progress = 1;
- ret = mod_timer(&drain_timer, jiffies + msecs_to_jiffies(500));
- }
+
+ check_drain_timer();
+
return 0;
fail_free_hdlc:
@@ -2017,9 +2044,13 @@
return -1;
}
- device_create(driver->diagchar_class, NULL, devno,
- (void *)driver, "diag");
+ driver->diag_dev = device_create(driver->diagchar_class, NULL, devno,
+ (void *)driver, "diag");
+ if (!driver->diag_dev)
+ return -EIO;
+
+ driver->diag_dev->power.wakeup = wakeup_source_register("DIAG_WS");
return 0;
}
@@ -2110,12 +2141,15 @@
driver->poolsize_user = poolsize_user;
driver->itemsize_write_struct = itemsize_write_struct;
driver->poolsize_write_struct = poolsize_write_struct;
+ driver->itemsize_dci = itemsize_dci;
+ driver->poolsize_dci = poolsize_dci;
driver->num_clients = max_clients;
driver->logging_mode = USB_MODE;
driver->socket_process = NULL;
driver->callback_process = NULL;
driver->mask_check = 0;
driver->in_busy_pktdata = 0;
+ driver->in_busy_dcipktdata = 0;
mutex_init(&driver->diagchar_mutex);
init_waitqueue_head(&driver->wait_q);
init_waitqueue_head(&driver->smd_wait_q);
diff --git a/drivers/char/diag/diagfwd.c b/drivers/char/diag/diagfwd.c
index f7e720f..eb8a75b 100644
--- a/drivers/char/diag/diagfwd.c
+++ b/drivers/char/diag/diagfwd.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2008-2013, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2008-2014, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -43,9 +43,6 @@
#include "diag_masks.h"
#include "diagfwd_bridge.h"
-#define MODE_CMD 41
-#define RESET_ID 2
-
#define STM_CMD_VERSION_OFFSET 4
#define STM_CMD_MASK_OFFSET 5
#define STM_CMD_DATA_OFFSET 6
@@ -695,15 +692,14 @@
if (!buf && (smd_info->type == SMD_DCI_TYPE ||
smd_info->type == SMD_DCI_CMD_TYPE))
- diag_dci_try_deactivate_wakeup_source(smd_info->ch);
+ diag_dci_try_deactivate_wakeup_source();
if (smd_info->ch && buf) {
- pkt_len = smd_cur_packet_size(smd_info->ch);
+ pkt_len = smd_cur_packet_size(smd_info->ch);
if (pkt_len == 0 && (smd_info->type == SMD_DCI_TYPE ||
smd_info->type == SMD_DCI_CMD_TYPE))
- diag_dci_try_deactivate_wakeup_source(smd_info->ch);
-
+ diag_dci_try_deactivate_wakeup_source();
if (pkt_len > buf_size)
resize_success = diag_smd_resize_buf(smd_info, &buf,
&buf_size, pkt_len);
@@ -811,7 +807,7 @@
fail_return:
if (smd_info->type == SMD_DCI_TYPE ||
smd_info->type == SMD_DCI_CMD_TYPE)
- diag_dci_try_deactivate_wakeup_source(smd_info->ch);
+ diag_dci_try_deactivate_wakeup_source();
return;
}
@@ -1049,17 +1045,46 @@
return err;
}
-static void diag_update_pkt_buffer(unsigned char *buf)
+void diag_update_pkt_buffer(unsigned char *buf, int type)
{
- unsigned char *ptr = driver->pkt_buf;
+ unsigned char *ptr = NULL;
unsigned char *temp = buf;
+ unsigned int length;
+ int *in_busy = NULL;
+ if (!buf) {
+ pr_err("diag: Invalid buffer in %s\n", __func__);
+ return;
+ }
+
+ switch (type) {
+ case PKT_TYPE:
+ ptr = driver->pkt_buf;
+ length = driver->pkt_length;
+ in_busy = &driver->in_busy_pktdata;
+ break;
+ case DCI_PKT_TYPE:
+ ptr = driver->dci_pkt_buf;
+ length = driver->dci_pkt_length;
+ in_busy = &driver->in_busy_dcipktdata;
+ break;
+ default:
+ pr_err("diag: Invalid type %d in %s\n", type, __func__);
+ return;
+ }
+
+ if (!ptr || length == 0) {
+ pr_err("diag: Invalid ptr %p and length %d in %s",
+ ptr, length, __func__);
+ return;
+ }
mutex_lock(&driver->diagchar_mutex);
- if (CHK_OVERFLOW(ptr, ptr, ptr + PKT_SIZE, driver->pkt_length)) {
- memcpy(ptr, temp , driver->pkt_length);
- driver->in_busy_pktdata = 1;
- } else
+ if (CHK_OVERFLOW(ptr, ptr, ptr + PKT_SIZE, length)) {
+ memcpy(ptr, temp , length);
+ *in_busy = 1;
+ } else {
printk(KERN_CRIT " Not enough buffer space for PKT_RESP\n");
+ }
mutex_unlock(&driver->diagchar_mutex);
}
@@ -1108,7 +1133,7 @@
if (entry.process_id != NON_APPS_PROC) {
/* If the message is to be sent to the apps process */
if (type != MODEM_DATA) {
- diag_update_pkt_buffer(buf);
+ diag_update_pkt_buffer(buf, PKT_TYPE);
diag_update_sleeping_process(entry.process_id,
PKT_TYPE);
}
@@ -2191,7 +2216,7 @@
if (smd_info->type == SMD_DCI_TYPE ||
smd_info->type == SMD_DCI_CMD_TYPE) {
if (event == SMD_EVENT_DATA)
- diag_dci_try_activate_wakeup_source(smd_info->ch);
+ diag_dci_try_activate_wakeup_source();
queue_work(driver->diag_dci_wq,
&(smd_info->diag_read_smd_work));
} else if (smd_info->type == SMD_DATA_TYPE) {
@@ -2659,6 +2684,12 @@
GFP_KERNEL)) == NULL)
goto err;
kmemleak_not_leak(driver->pkt_buf);
+ if (driver->dci_pkt_buf == NULL) {
+ driver->dci_pkt_buf = kzalloc(PKT_SIZE, GFP_KERNEL);
+ if (!driver->dci_pkt_buf)
+ goto err;
+ }
+ kmemleak_not_leak(driver->dci_pkt_buf);
if (driver->apps_rsp_buf == NULL) {
driver->apps_rsp_buf = kzalloc(APPS_BUF_SIZE, GFP_KERNEL);
if (driver->apps_rsp_buf == NULL)
@@ -2709,6 +2740,7 @@
kfree(driver->data_ready);
kfree(driver->table);
kfree(driver->pkt_buf);
+ kfree(driver->dci_pkt_buf);
kfree(driver->usb_read_ptr);
kfree(driver->apps_rsp_buf);
kfree(driver->user_space_data_buf);
@@ -2749,6 +2781,7 @@
kfree(driver->data_ready);
kfree(driver->table);
kfree(driver->pkt_buf);
+ kfree(driver->dci_pkt_buf);
kfree(driver->usb_read_ptr);
kfree(driver->apps_rsp_buf);
kfree(driver->user_space_data_buf);
diff --git a/drivers/char/diag/diagfwd.h b/drivers/char/diag/diagfwd.h
index 1a4601a..7b2ded3 100644
--- a/drivers/char/diag/diagfwd.h
+++ b/drivers/char/diag/diagfwd.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2008-2013, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2008-2014, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -54,6 +54,8 @@
int diag_process_apps_pkt(unsigned char *buf, int len);
void diag_reset_smd_data(int queue);
int diag_apps_responds(void);
+void diag_update_pkt_buffer(unsigned char *buf, int type);
+int diag_process_stm_cmd(unsigned char *buf, unsigned char *dest_buf);
/* State for diag forwarding */
#ifdef CONFIG_DIAG_OVER_USB
int diagfwd_connect(void);
diff --git a/drivers/char/diag/diagmem.c b/drivers/char/diag/diagmem.c
index 4ceca4f..db01e9b 100644
--- a/drivers/char/diag/diagmem.c
+++ b/drivers/char/diag/diagmem.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2008-2013, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2008-2014, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -70,6 +70,16 @@
driver->diag_write_struct_pool, GFP_ATOMIC);
}
}
+ } else if (pool_type == POOL_TYPE_DCI) {
+ if (driver->diag_dci_pool) {
+ if ((driver->count_dci_pool < driver->poolsize_dci) &&
+ (size <= driver->itemsize_dci)) {
+ atomic_add(1,
+ (atomic_t *)&driver->count_dci_pool);
+ buf = mempool_alloc(driver->diag_dci_pool,
+ GFP_ATOMIC);
+ }
+ }
#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
} else if (pool_type == POOL_TYPE_HSIC ||
pool_type == POOL_TYPE_HSIC_2) {
@@ -155,6 +165,16 @@
pr_err("diag: Unable to destroy STRUCT mempool");
}
}
+
+ if (driver->diag_dci_pool) {
+ if (driver->count_dci_pool == 0 && driver->ref_count == 0) {
+ mempool_destroy(driver->diag_dci_pool);
+ driver->diag_dci_pool = NULL;
+ } else if (driver->ref_count == 0 && pool_type ==
+ POOL_TYPE_ALL) {
+ pr_err("diag: Unable to destroy DCI mempool");
+ }
+ }
#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
for (index = 0; index < MAX_HSIC_CH; index++) {
if (diag_hsic[index].diag_hsic_pool &&
@@ -231,6 +251,15 @@
} else
pr_err("diag: Attempt to free up DIAG driver USB structure mempool which is already free %d ",
driver->count_write_struct_pool);
+ } else if (pool_type == POOL_TYPE_DCI) {
+ if (driver->diag_dci_pool != NULL &&
+ driver->count_dci_pool > 0) {
+ mempool_free(buf, driver->diag_dci_pool);
+ atomic_add(-1,
+ (atomic_t *)&driver->count_dci_pool);
+ } else
+ pr_err("diag: Attempt to free up DIAG driver DCI mempool which is already free %d ",
+ driver->count_dci_pool);
#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
} else if (pool_type == POOL_TYPE_HSIC ||
pool_type == POOL_TYPE_HSIC_2) {
@@ -294,6 +323,12 @@
driver->diag_write_struct_pool;
}
+ if (driver->count_dci_pool == 0) {
+ driver->diag_dci_pool = mempool_create_kmalloc_pool(
+ driver->poolsize_dci, driver->itemsize_dci);
+ diag_pools_array[POOL_DCI_IDX] = driver->diag_dci_pool;
+ }
+
if (!driver->diagpool)
pr_err("diag: Cannot allocate diag mempool\n");
@@ -305,6 +340,10 @@
if (!driver->diag_write_struct_pool)
pr_err("diag: Cannot allocate diag USB struct mempool\n");
+
+ if (!driver->diag_dci_pool)
+ pr_err("diag: Cannot allocate diag DCI mempool\n");
+
}
#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
diff --git a/drivers/gpu/msm/adreno.c b/drivers/gpu/msm/adreno.c
index 12fa799..d163317 100644
--- a/drivers/gpu/msm/adreno.c
+++ b/drivers/gpu/msm/adreno.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2002,2007-2013, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2002,2007-2014, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -461,6 +461,7 @@
struct kgsl_perfcounter_read_group __user *reads, unsigned int count)
{
struct adreno_perfcounters *counters = adreno_dev->gpudev->perfcounters;
+ struct kgsl_device *device = &adreno_dev->dev;
struct adreno_perfcount_group *group;
struct kgsl_perfcounter_read_group *list = NULL;
unsigned int i, j;
@@ -488,6 +489,13 @@
goto done;
}
+ kgsl_mutex_lock(&device->mutex, &device->mutex_owner);
+ ret = kgsl_active_count_get(device);
+ if (ret) {
+ kgsl_mutex_unlock(&device->mutex, &device->mutex_owner);
+ goto done;
+ }
+
/* list iterator */
for (j = 0; j < count; j++) {
@@ -496,7 +504,7 @@
/* Verify that the group ID is within range */
if (list[j].groupid >= counters->group_count) {
ret = -EINVAL;
- goto done;
+ break;
}
group = &(counters->groups[list[j].groupid]);
@@ -512,11 +520,13 @@
}
}
+ kgsl_active_count_put(device);
+ kgsl_mutex_unlock(&device->mutex, &device->mutex_owner);
+
/* write the data */
- if (copy_to_user(reads, list,
- sizeof(struct kgsl_perfcounter_read_group) *
- count) != 0)
- ret = -EFAULT;
+ if (ret == 0)
+ ret = copy_to_user(reads, list,
+ sizeof(struct kgsl_perfcounter_read_group) * count);
done:
kfree(list);
@@ -592,8 +602,11 @@
unsigned int *max_counters)
{
struct adreno_perfcounters *counters = adreno_dev->gpudev->perfcounters;
+ struct kgsl_device *device = &adreno_dev->dev;
struct adreno_perfcount_group *group;
- unsigned int i;
+ unsigned int i, t;
+ int ret;
+ unsigned int *buf;
*max_counters = 0;
@@ -603,6 +616,8 @@
if (groupid >= counters->group_count)
return -EINVAL;
+ kgsl_mutex_lock(&device->mutex, &device->mutex_owner);
+
group = &(counters->groups[groupid]);
*max_counters = group->reg_count;
@@ -610,20 +625,28 @@
* if NULL countable or *count of zero, return max reg_count in
* *max_counters and return success
*/
- if (countables == NULL || count == 0)
+ if (countables == NULL || count == 0) {
+ kgsl_mutex_unlock(&device->mutex, &device->mutex_owner);
return 0;
-
- /*
- * Go through all available counters. Write upto *count * countable
- * values.
- */
- for (i = 0; i < group->reg_count && i < count; i++) {
- if (copy_to_user(&countables[i], &(group->regs[i].countable),
- sizeof(unsigned int)) != 0)
- return -EFAULT;
}
- return 0;
+ t = min_t(int, group->reg_count, count);
+
+ buf = kmalloc(t * sizeof(unsigned int), GFP_KERNEL);
+ if (buf == NULL) {
+ kgsl_mutex_unlock(&device->mutex, &device->mutex_owner);
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < t; i++)
+ buf[i] = group->regs[i].countable;
+
+ kgsl_mutex_unlock(&device->mutex, &device->mutex_owner);
+
+ ret = copy_to_user(countables, buf, sizeof(unsigned int) * t);
+ kfree(buf);
+
+ return ret;
}
static inline void refcount_group(struct adreno_perfcount_group *group,
@@ -2832,6 +2855,8 @@
break;
}
+ kgsl_mutex_lock(&device->mutex, &device->mutex_owner);
+
if (enable) {
device->pwrctrl.ctrl_flags = 0;
adreno_dev->fast_hang_detect = 1;
@@ -2851,6 +2876,7 @@
kgsl_pwrscale_disable(device);
}
+ kgsl_mutex_unlock(&device->mutex, &device->mutex_owner);
status = 0;
}
break;
@@ -2869,10 +2895,13 @@
context = kgsl_context_get_owner(dev_priv,
constraint.context_id);
+
if (context == NULL)
break;
+
status = adreno_set_constraint(device, context,
&constraint);
+
kgsl_context_put(context);
}
break;
@@ -3354,24 +3383,28 @@
}
case IOCTL_KGSL_PERFCOUNTER_GET: {
struct kgsl_perfcounter_get *get = data;
+ kgsl_mutex_lock(&device->mutex, &device->mutex_owner);
/*
* adreno_perfcounter_get() is called by kernel clients
* during start(), so it is not safe to take an
* active count inside this function.
*/
result = kgsl_active_count_get(device);
- if (result)
- break;
- result = adreno_perfcounter_get(adreno_dev, get->groupid,
- get->countable, &get->offset, &get->offset_hi,
- PERFCOUNTER_FLAG_NONE);
- kgsl_active_count_put(device);
+ if (result == 0) {
+ result = adreno_perfcounter_get(adreno_dev,
+ get->groupid, get->countable, &get->offset,
+ &get->offset_hi, PERFCOUNTER_FLAG_NONE);
+ kgsl_active_count_put(device);
+ }
+ kgsl_mutex_unlock(&device->mutex, &device->mutex_owner);
break;
}
case IOCTL_KGSL_PERFCOUNTER_PUT: {
struct kgsl_perfcounter_put *put = data;
+ kgsl_mutex_lock(&device->mutex, &device->mutex_owner);
result = adreno_perfcounter_put(adreno_dev, put->groupid,
put->countable, PERFCOUNTER_FLAG_NONE);
+ kgsl_mutex_unlock(&device->mutex, &device->mutex_owner);
break;
}
case IOCTL_KGSL_PERFCOUNTER_QUERY: {
@@ -3383,12 +3416,8 @@
}
case IOCTL_KGSL_PERFCOUNTER_READ: {
struct kgsl_perfcounter_read *read = data;
- result = kgsl_active_count_get(device);
- if (result)
- break;
result = adreno_perfcounter_read_group(adreno_dev,
read->reads, read->count);
- kgsl_active_count_put(device);
break;
}
default:
diff --git a/drivers/gpu/msm/adreno_dispatch.c b/drivers/gpu/msm/adreno_dispatch.c
index 588c243..5f80fcf 100644
--- a/drivers/gpu/msm/adreno_dispatch.c
+++ b/drivers/gpu/msm/adreno_dispatch.c
@@ -155,6 +155,7 @@
struct adreno_context *drawctxt)
{
struct kgsl_cmdbatch *cmdbatch = NULL;
+ int pending;
mutex_lock(&drawctxt->mutex);
if (drawctxt->cmdqueue_head != drawctxt->cmdqueue_tail) {
@@ -164,7 +165,32 @@
* Don't dequeue a cmdbatch that is still waiting for other
* events
*/
- if (kgsl_cmdbatch_sync_pending(cmdbatch)) {
+
+ spin_lock(&cmdbatch->lock);
+ pending = list_empty(&cmdbatch->synclist) ? 0 : 1;
+
+ /*
+ * If changes are pending and the canary timer hasn't been
+ * started yet, start it
+ */
+ if (pending) {
+ /*
+ * If syncpoints are pending start the canary timer if
+ * it hasn't already been started
+ */
+ if (!timer_pending(&cmdbatch->timer))
+ mod_timer(&cmdbatch->timer, jiffies + (5 * HZ));
+ spin_unlock(&cmdbatch->lock);
+ } else {
+ /*
+ * Otherwise, delete the timer to make sure it is good
+ * and dead before queuing the buffer
+ */
+ spin_unlock(&cmdbatch->lock);
+ del_timer_sync(&cmdbatch->timer);
+ }
+
+ if (pending) {
cmdbatch = ERR_PTR(-EAGAIN);
goto done;
}
diff --git a/drivers/gpu/msm/adreno_profile.c b/drivers/gpu/msm/adreno_profile.c
index 45075a5..7785f3b 100644
--- a/drivers/gpu/msm/adreno_profile.c
+++ b/drivers/gpu/msm/adreno_profile.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -680,6 +680,15 @@
if (adreno_is_a2xx(adreno_dev))
return -ENOSPC;
+ buf = kmalloc(len + 1, GFP_KERNEL);
+ if (buf == NULL)
+ return -ENOMEM;
+
+ if (copy_from_user(buf, user_buf, len)) {
+ size = -EFAULT;
+ goto error_free;
+ }
+
kgsl_mutex_lock(&device->mutex, &device->mutex_owner);
if (adreno_profile_enabled(profile)) {
@@ -688,8 +697,10 @@
}
ret = kgsl_active_count_get(device);
- if (ret)
- return -EINVAL;
+ if (ret) {
+ size = ret;
+ goto error_unlock;
+ }
/*
* When adding/removing assignments, ensure that the GPU is done with
@@ -697,19 +708,13 @@
* GPU and avoid racey conditions.
*/
if (adreno_idle(device)) {
- size = -EINVAL;
+ size = -ETIMEDOUT;
goto error_put;
}
/* clear all shared buffer results */
adreno_profile_process_results(device);
- buf = kmalloc(len + 1, GFP_KERNEL);
- if (!buf) {
- size = -EINVAL;
- goto error_put;
- }
-
pbuf = buf;
/* clear the log buffer */
@@ -718,10 +723,6 @@
profile->log_tail = profile->log_buffer;
}
- if (copy_from_user(buf, user_buf, len)) {
- size = -EFAULT;
- goto error_free;
- }
/* for sanity and parsing, ensure it is null terminated */
buf[len] = '\0';
@@ -741,12 +742,12 @@
size = len;
-error_free:
- kfree(buf);
error_put:
kgsl_active_count_put(device);
error_unlock:
kgsl_mutex_unlock(&device->mutex, &device->mutex_owner);
+error_free:
+ kfree(buf);
return size;
}
diff --git a/drivers/gpu/msm/kgsl.c b/drivers/gpu/msm/kgsl.c
index 5d78879..5a33d9d 100644
--- a/drivers/gpu/msm/kgsl.c
+++ b/drivers/gpu/msm/kgsl.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2008-2013, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2008-2014, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -105,52 +105,108 @@
}
EXPORT_SYMBOL(kgsl_trace_regwrite);
-int kgsl_memfree_hist_init(void)
-{
- void *base;
+/*
+ * The memfree list contains the last N blocks of memory that have been freed.
+ * On a GPU fault we walk the list to see if the faulting address had been
+ * recently freed and print out a message to that effect
+ */
- base = kzalloc(KGSL_MEMFREE_HIST_SIZE, GFP_KERNEL);
- kgsl_driver.memfree_hist.base_hist_rb = base;
- if (base == NULL)
- return -ENOMEM;
- kgsl_driver.memfree_hist.size = KGSL_MEMFREE_HIST_SIZE;
- kgsl_driver.memfree_hist.wptr = base;
+#define MEMFREE_ENTRIES 512
+
+static DEFINE_SPINLOCK(memfree_lock);
+
+struct memfree_entry {
+ unsigned long gpuaddr;
+ unsigned long size;
+ pid_t pid;
+ unsigned int flags;
+};
+
+static struct {
+ struct memfree_entry *list;
+ int head;
+ int tail;
+} memfree;
+
+static int kgsl_memfree_init(void)
+{
+ memfree.list = kzalloc(MEMFREE_ENTRIES * sizeof(struct memfree_entry),
+ GFP_KERNEL);
+
+ return (memfree.list) ? 0 : -ENOMEM;
+}
+
+static void kgsl_memfree_exit(void)
+{
+ kfree(memfree.list);
+ memset(&memfree, 0, sizeof(memfree));
+}
+
+int kgsl_memfree_find_entry(pid_t pid, unsigned long *gpuaddr,
+ unsigned long *size, unsigned int *flags)
+{
+ int ptr;
+
+ if (memfree.list == NULL)
+ return 0;
+
+ spin_lock(&memfree_lock);
+
+ ptr = memfree.head - 1;
+ if (ptr < 0)
+ ptr = MEMFREE_ENTRIES - 1;
+
+ /* Walk backwards through the list looking for the last match */
+ while (ptr != memfree.tail) {
+ struct memfree_entry *entry = &memfree.list[ptr];
+
+ if ((entry->pid == pid) &&
+ (*gpuaddr >= entry->gpuaddr &&
+ *gpuaddr < (entry->gpuaddr + entry->size))) {
+ *gpuaddr = entry->gpuaddr;
+ *flags = entry->flags;
+ *size = entry->size;
+
+ spin_unlock(&memfree_lock);
+ return 1;
+ }
+
+ ptr = ptr - 1;
+
+ if (ptr < 0)
+ ptr = MEMFREE_ENTRIES - 1;
+ }
+
+ spin_unlock(&memfree_lock);
return 0;
}
-void kgsl_memfree_hist_exit(void)
+static void kgsl_memfree_add(pid_t pid, unsigned int gpuaddr,
+ unsigned int size, int flags)
+
{
- kfree(kgsl_driver.memfree_hist.base_hist_rb);
- kgsl_driver.memfree_hist.base_hist_rb = NULL;
-}
+ struct memfree_entry *entry;
-void kgsl_memfree_hist_set_event(unsigned int pid, unsigned int gpuaddr,
- unsigned int size, int flags)
-{
- struct kgsl_memfree_hist_elem *p;
-
- void *base = kgsl_driver.memfree_hist.base_hist_rb;
- int rbsize = kgsl_driver.memfree_hist.size;
-
- if (base == NULL)
+ if (memfree.list == NULL)
return;
- mutex_lock(&kgsl_driver.memfree_hist_mutex);
- p = kgsl_driver.memfree_hist.wptr;
- p->pid = pid;
- p->gpuaddr = gpuaddr;
- p->size = size;
- p->flags = flags;
+ spin_lock(&memfree_lock);
- kgsl_driver.memfree_hist.wptr++;
- if ((void *)kgsl_driver.memfree_hist.wptr >= base+rbsize) {
- kgsl_driver.memfree_hist.wptr =
- (struct kgsl_memfree_hist_elem *)base;
- }
- mutex_unlock(&kgsl_driver.memfree_hist_mutex);
+ entry = &memfree.list[memfree.head];
+
+ entry->pid = pid;
+ entry->gpuaddr = gpuaddr;
+ entry->size = size;
+ entry->flags = flags;
+
+ memfree.head = (memfree.head + 1) % MEMFREE_ENTRIES;
+
+ if (memfree.head == memfree.tail)
+ memfree.tail = (memfree.tail + 1) % MEMFREE_ENTRIES;
+
+ spin_unlock(&memfree_lock);
}
-
/* kgsl_get_mem_entry - get the mem_entry structure for the specified object
* @device - Pointer to the device structure
* @ptbase - the pagetable base of the object
@@ -557,6 +613,13 @@
write_lock(&device->context_lock);
if (context->id != KGSL_CONTEXT_INVALID) {
+
+ /* Clear the timestamps in the memstore during destroy */
+ kgsl_sharedmem_writel(device, &device->memstore,
+ KGSL_MEMSTORE_OFFSET(context->id, soptimestamp), 0);
+ kgsl_sharedmem_writel(device, &device->memstore,
+ KGSL_MEMSTORE_OFFSET(context->id, eoptimestamp), 0);
+
idr_remove(&device->context_idr, context->id);
context->id = KGSL_CONTEXT_INVALID;
}
@@ -1415,7 +1478,6 @@
kgsl_readtimestamp(device, context,
KGSL_TIMESTAMP_RETIRED),
result);
-
return result;
}
@@ -1424,9 +1486,14 @@
void *data)
{
struct kgsl_device_waittimestamp *param = data;
+ struct kgsl_device *device = dev_priv->device;
+ long result = -EINVAL;
- return _device_waittimestamp(dev_priv, NULL,
+ kgsl_mutex_lock(&device->mutex, &device->mutex_owner);
+ result = _device_waittimestamp(dev_priv, NULL,
param->timestamp, param->timeout);
+ kgsl_mutex_unlock(&device->mutex, &device->mutex_owner);
+ return result;
}
static long kgsl_ioctl_device_waittimestamp_ctxtid(struct kgsl_device_private
@@ -1435,8 +1502,10 @@
{
struct kgsl_device_waittimestamp_ctxtid *param = data;
struct kgsl_context *context;
+ struct kgsl_device *device = dev_priv->device;
long result = -EINVAL;
+ kgsl_mutex_lock(&device->mutex, &device->mutex_owner);
context = kgsl_context_get_owner(dev_priv, param->context_id);
if (context)
@@ -1444,6 +1513,7 @@
param->timestamp, param->timeout);
kgsl_context_put(context);
+ kgsl_mutex_unlock(&device->mutex, &device->mutex_owner);
return result;
}
@@ -1484,6 +1554,49 @@
struct kref refcount;
};
+static void _kgsl_cmdbatch_timer(unsigned long data)
+{
+ struct kgsl_cmdbatch *cmdbatch = (struct kgsl_cmdbatch *) data;
+ struct kgsl_cmdbatch_sync_event *event;
+
+ if (cmdbatch == NULL || cmdbatch->context == NULL)
+ return;
+
+ spin_lock(&cmdbatch->lock);
+ if (list_empty(&cmdbatch->synclist))
+ goto done;
+
+ pr_err("kgsl: possible gpu syncpoint deadlock for context %d timestamp %d\n",
+ cmdbatch->context->id, cmdbatch->timestamp);
+ pr_err(" Active sync points:\n");
+
+ /* Print all the pending sync objects */
+ list_for_each_entry(event, &cmdbatch->synclist, node) {
+
+ switch (event->type) {
+ case KGSL_CMD_SYNCPOINT_TYPE_TIMESTAMP: {
+ unsigned int retired;
+
+ retired = kgsl_readtimestamp(event->device,
+ event->context, KGSL_TIMESTAMP_RETIRED);
+
+ pr_err(" [timestamp] context %d timestamp %d (retired %d)\n",
+ event->context->id, event->timestamp,
+ retired);
+ break;
+ }
+ case KGSL_CMD_SYNCPOINT_TYPE_FENCE:
+ pr_err(" fence: [%p] %s\n", event->handle,
+ (event->handle && event->handle->fence)
+ ? event->handle->fence->name : "NULL");
+ break;
+ }
+ }
+
+done:
+ spin_unlock(&cmdbatch->lock);
+}
+
/**
* kgsl_cmdbatch_sync_event_destroy() - Destroy a sync event object
* @kref: Pointer to the kref structure for this object
@@ -1558,6 +1671,10 @@
sched = list_empty(&event->cmdbatch->synclist) ? 1 : 0;
spin_unlock(&event->cmdbatch->lock);
+ /* If the list is empty delete the canary timer */
+ if (sched)
+ del_timer_sync(&event->cmdbatch->timer);
+
/*
* if this is the last event in the list then tell
* the GPU device that the cmdbatch can be submitted
@@ -1600,10 +1717,12 @@
struct kgsl_cmdbatch_sync_event *event, *tmp;
LIST_HEAD(cancel_synclist);
- /*
- * Empty the synclist before canceling events
- */
+ /* Zap the canary timer */
+ del_timer_sync(&cmdbatch->timer);
+
spin_lock(&cmdbatch->lock);
+
+ /* Empty the synclist before canceling events */
list_splice_init(&cmdbatch->synclist, &cancel_synclist);
spin_unlock(&cmdbatch->lock);
@@ -1782,6 +1901,7 @@
event->cmdbatch = cmdbatch;
event->context = context;
event->timestamp = sync->timestamp;
+ event->device = device;
/*
* Two krefs are required to support events. The first kref is for
@@ -1917,6 +2037,10 @@
cmdbatch->context = context;
cmdbatch->flags = flags & ~KGSL_CONTEXT_SUBMIT_IB_LIST;
+ /* Add a timer to help debug sync deadlocks */
+ setup_timer(&cmdbatch->timer, _kgsl_cmdbatch_timer,
+ (unsigned long) cmdbatch);
+
return cmdbatch;
}
@@ -2179,9 +2303,14 @@
void *data)
{
struct kgsl_cmdstream_readtimestamp *param = data;
+ struct kgsl_device *device = dev_priv->device;
+ long result = -EINVAL;
- return _cmdstream_readtimestamp(dev_priv, NULL,
+ kgsl_mutex_lock(&device->mutex, &device->mutex_owner);
+ result = _cmdstream_readtimestamp(dev_priv, NULL,
param->type, ¶m->timestamp);
+ kgsl_mutex_unlock(&device->mutex, &device->mutex_owner);
+ return result;
}
static long kgsl_ioctl_cmdstream_readtimestamp_ctxtid(struct kgsl_device_private
@@ -2189,9 +2318,11 @@
void *data)
{
struct kgsl_cmdstream_readtimestamp_ctxtid *param = data;
+ struct kgsl_device *device = dev_priv->device;
struct kgsl_context *context;
long result = -EINVAL;
+ kgsl_mutex_lock(&device->mutex, &device->mutex_owner);
context = kgsl_context_get_owner(dev_priv, param->context_id);
if (context)
@@ -2199,6 +2330,7 @@
param->type, ¶m->timestamp);
kgsl_context_put(context);
+ kgsl_mutex_unlock(&device->mutex, &device->mutex_owner);
return result;
}
@@ -2250,9 +2382,14 @@
void *data)
{
struct kgsl_cmdstream_freememontimestamp *param = data;
+ struct kgsl_device *device = dev_priv->device;
+ long result = -EINVAL;
- return _cmdstream_freememontimestamp(dev_priv, param->gpuaddr,
+ kgsl_mutex_lock(&device->mutex, &device->mutex_owner);
+ result = _cmdstream_freememontimestamp(dev_priv, param->gpuaddr,
NULL, param->timestamp, param->type);
+ kgsl_mutex_unlock(&device->mutex, &device->mutex_owner);
+ return result;
}
static long kgsl_ioctl_cmdstream_freememontimestamp_ctxtid(
@@ -2262,13 +2399,16 @@
{
struct kgsl_cmdstream_freememontimestamp_ctxtid *param = data;
struct kgsl_context *context;
+ struct kgsl_device *device = dev_priv->device;
long result = -EINVAL;
+ kgsl_mutex_lock(&device->mutex, &device->mutex_owner);
context = kgsl_context_get_owner(dev_priv, param->context_id);
if (context)
result = _cmdstream_freememontimestamp(dev_priv, param->gpuaddr,
context, param->timestamp, param->type);
kgsl_context_put(context);
+ kgsl_mutex_unlock(&device->mutex, &device->mutex_owner);
return result;
}
@@ -2280,6 +2420,7 @@
struct kgsl_context *context = NULL;
struct kgsl_device *device = dev_priv->device;
+ kgsl_mutex_lock(&device->mutex, &device->mutex_owner);
context = device->ftbl->drawctxt_create(dev_priv, ¶m->flags);
if (IS_ERR(context)) {
result = PTR_ERR(context);
@@ -2288,6 +2429,7 @@
trace_kgsl_context_create(dev_priv->device, context, param->flags);
param->drawctxt_id = context->id;
done:
+ kgsl_mutex_unlock(&device->mutex, &device->mutex_owner);
return result;
}
@@ -2295,18 +2437,44 @@
unsigned int cmd, void *data)
{
struct kgsl_drawctxt_destroy *param = data;
+ struct kgsl_device *device = dev_priv->device;
struct kgsl_context *context;
long result;
+ kgsl_mutex_lock(&device->mutex, &device->mutex_owner);
context = kgsl_context_get_owner(dev_priv, param->drawctxt_id);
result = kgsl_context_detach(context);
kgsl_context_put(context);
+ kgsl_mutex_unlock(&device->mutex, &device->mutex_owner);
return result;
}
-static long kgsl_ioctl_sharedmem_free(struct kgsl_device_private *dev_priv,
+static long _sharedmem_free_entry(struct kgsl_mem_entry *entry)
+{
+ if (!kgsl_mem_entry_set_pend(entry)) {
+ kgsl_mem_entry_put(entry);
+ return -EBUSY;
+ }
+
+ trace_kgsl_mem_free(entry);
+
+ kgsl_memfree_add(entry->priv->pid, entry->memdesc.gpuaddr,
+ entry->memdesc.size, entry->memdesc.flags);
+
+ /*
+ * First kgsl_mem_entry_put is for the reference that we took in
+ * this function when calling kgsl_sharedmem_find, second one is
+ * to free the memory since this is a free ioctl
+ */
+ kgsl_mem_entry_put(entry);
+ kgsl_mem_entry_put(entry);
+
+ return 0;
+}
+
+long kgsl_ioctl_sharedmem_free(struct kgsl_device_private *dev_priv,
unsigned int cmd, void *data)
{
struct kgsl_sharedmem_free *param = data;
@@ -2319,29 +2487,11 @@
param->gpuaddr);
return -EINVAL;
}
- if (!kgsl_mem_entry_set_pend(entry)) {
- kgsl_mem_entry_put(entry);
- return -EBUSY;
- }
- trace_kgsl_mem_free(entry);
-
- kgsl_memfree_hist_set_event(entry->priv->pid,
- entry->memdesc.gpuaddr,
- entry->memdesc.size,
- entry->memdesc.flags);
-
- /*
- * First kgsl_mem_entry_put is for the reference that we took in
- * this function when calling kgsl_sharedmem_find, second one is
- * to free the memory since this is a free ioctl
- */
- kgsl_mem_entry_put(entry);
- kgsl_mem_entry_put(entry);
- return 0;
+ return _sharedmem_free_entry(entry);
}
-static long kgsl_ioctl_gpumem_free_id(struct kgsl_device_private *dev_priv,
+long kgsl_ioctl_gpumem_free_id(struct kgsl_device_private *dev_priv,
unsigned int cmd, void *data)
{
struct kgsl_gpumem_free_id *param = data;
@@ -2355,26 +2505,7 @@
return -EINVAL;
}
- if (!kgsl_mem_entry_set_pend(entry)) {
- kgsl_mem_entry_put(entry);
- return -EBUSY;
- }
-
- trace_kgsl_mem_free(entry);
-
- kgsl_memfree_hist_set_event(entry->priv->pid,
- entry->memdesc.gpuaddr,
- entry->memdesc.size,
- entry->memdesc.flags);
-
- /*
- * First kgsl_mem_entry_put is for the reference that we took in
- * this function when calling kgsl_sharedmem_find_id, second one is
- * to free the memory since this is a free ioctl
- */
- kgsl_mem_entry_put(entry);
- kgsl_mem_entry_put(entry);
- return 0;
+ return _sharedmem_free_entry(entry);
}
static struct vm_area_struct *kgsl_get_vma_from_start_addr(unsigned int addr)
@@ -3385,78 +3516,65 @@
typedef long (*kgsl_ioctl_func_t)(struct kgsl_device_private *,
unsigned int, void *);
-#define KGSL_IOCTL_FUNC(_cmd, _func, _flags) \
+#define KGSL_IOCTL_FUNC(_cmd, _func) \
[_IOC_NR((_cmd))] = \
- { .cmd = (_cmd), .func = (_func), .flags = (_flags) }
+ { .cmd = (_cmd), .func = (_func) }
-#define KGSL_IOCTL_LOCK BIT(0)
static const struct {
unsigned int cmd;
kgsl_ioctl_func_t func;
- unsigned int flags;
} kgsl_ioctl_funcs[] = {
KGSL_IOCTL_FUNC(IOCTL_KGSL_DEVICE_GETPROPERTY,
- kgsl_ioctl_device_getproperty,
- KGSL_IOCTL_LOCK),
+ kgsl_ioctl_device_getproperty),
KGSL_IOCTL_FUNC(IOCTL_KGSL_DEVICE_WAITTIMESTAMP,
- kgsl_ioctl_device_waittimestamp,
- KGSL_IOCTL_LOCK),
+ kgsl_ioctl_device_waittimestamp),
KGSL_IOCTL_FUNC(IOCTL_KGSL_DEVICE_WAITTIMESTAMP_CTXTID,
- kgsl_ioctl_device_waittimestamp_ctxtid,
- KGSL_IOCTL_LOCK),
+ kgsl_ioctl_device_waittimestamp_ctxtid),
KGSL_IOCTL_FUNC(IOCTL_KGSL_RINGBUFFER_ISSUEIBCMDS,
- kgsl_ioctl_rb_issueibcmds, 0),
+ kgsl_ioctl_rb_issueibcmds),
KGSL_IOCTL_FUNC(IOCTL_KGSL_SUBMIT_COMMANDS,
- kgsl_ioctl_submit_commands, 0),
+ kgsl_ioctl_submit_commands),
KGSL_IOCTL_FUNC(IOCTL_KGSL_CMDSTREAM_READTIMESTAMP,
- kgsl_ioctl_cmdstream_readtimestamp,
- KGSL_IOCTL_LOCK),
+ kgsl_ioctl_cmdstream_readtimestamp),
KGSL_IOCTL_FUNC(IOCTL_KGSL_CMDSTREAM_READTIMESTAMP_CTXTID,
- kgsl_ioctl_cmdstream_readtimestamp_ctxtid,
- KGSL_IOCTL_LOCK),
+ kgsl_ioctl_cmdstream_readtimestamp_ctxtid),
KGSL_IOCTL_FUNC(IOCTL_KGSL_CMDSTREAM_FREEMEMONTIMESTAMP,
- kgsl_ioctl_cmdstream_freememontimestamp,
- KGSL_IOCTL_LOCK),
+ kgsl_ioctl_cmdstream_freememontimestamp),
KGSL_IOCTL_FUNC(IOCTL_KGSL_CMDSTREAM_FREEMEMONTIMESTAMP_CTXTID,
- kgsl_ioctl_cmdstream_freememontimestamp_ctxtid,
- KGSL_IOCTL_LOCK),
+ kgsl_ioctl_cmdstream_freememontimestamp_ctxtid),
KGSL_IOCTL_FUNC(IOCTL_KGSL_DRAWCTXT_CREATE,
- kgsl_ioctl_drawctxt_create,
- KGSL_IOCTL_LOCK),
+ kgsl_ioctl_drawctxt_create),
KGSL_IOCTL_FUNC(IOCTL_KGSL_DRAWCTXT_DESTROY,
- kgsl_ioctl_drawctxt_destroy,
- KGSL_IOCTL_LOCK),
+ kgsl_ioctl_drawctxt_destroy),
KGSL_IOCTL_FUNC(IOCTL_KGSL_MAP_USER_MEM,
- kgsl_ioctl_map_user_mem, 0),
+ kgsl_ioctl_map_user_mem),
KGSL_IOCTL_FUNC(IOCTL_KGSL_SHAREDMEM_FROM_PMEM,
- kgsl_ioctl_map_user_mem, 0),
+ kgsl_ioctl_map_user_mem),
KGSL_IOCTL_FUNC(IOCTL_KGSL_SHAREDMEM_FREE,
- kgsl_ioctl_sharedmem_free, 0),
+ kgsl_ioctl_sharedmem_free),
KGSL_IOCTL_FUNC(IOCTL_KGSL_SHAREDMEM_FLUSH_CACHE,
- kgsl_ioctl_sharedmem_flush_cache, 0),
+ kgsl_ioctl_sharedmem_flush_cache),
KGSL_IOCTL_FUNC(IOCTL_KGSL_GPUMEM_ALLOC,
- kgsl_ioctl_gpumem_alloc, 0),
+ kgsl_ioctl_gpumem_alloc),
KGSL_IOCTL_FUNC(IOCTL_KGSL_CFF_SYNCMEM,
- kgsl_ioctl_cff_syncmem, 0),
+ kgsl_ioctl_cff_syncmem),
KGSL_IOCTL_FUNC(IOCTL_KGSL_CFF_USER_EVENT,
- kgsl_ioctl_cff_user_event, 0),
+ kgsl_ioctl_cff_user_event),
KGSL_IOCTL_FUNC(IOCTL_KGSL_TIMESTAMP_EVENT,
- kgsl_ioctl_timestamp_event,
- KGSL_IOCTL_LOCK),
+ kgsl_ioctl_timestamp_event),
KGSL_IOCTL_FUNC(IOCTL_KGSL_SETPROPERTY,
- kgsl_ioctl_device_setproperty,
- KGSL_IOCTL_LOCK),
+ kgsl_ioctl_device_setproperty),
KGSL_IOCTL_FUNC(IOCTL_KGSL_GPUMEM_ALLOC_ID,
- kgsl_ioctl_gpumem_alloc_id, 0),
+ kgsl_ioctl_gpumem_alloc_id),
KGSL_IOCTL_FUNC(IOCTL_KGSL_GPUMEM_FREE_ID,
- kgsl_ioctl_gpumem_free_id, 0),
+ kgsl_ioctl_gpumem_free_id),
KGSL_IOCTL_FUNC(IOCTL_KGSL_GPUMEM_GET_INFO,
- kgsl_ioctl_gpumem_get_info, 0),
+ kgsl_ioctl_gpumem_get_info),
KGSL_IOCTL_FUNC(IOCTL_KGSL_GPUMEM_SYNC_CACHE,
- kgsl_ioctl_gpumem_sync_cache, 0),
+ kgsl_ioctl_gpumem_sync_cache),
KGSL_IOCTL_FUNC(IOCTL_KGSL_GPUMEM_SYNC_CACHE_BULK,
- kgsl_ioctl_gpumem_sync_cache_bulk, 0),
+ kgsl_ioctl_gpumem_sync_cache_bulk),
};
static long kgsl_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
@@ -3464,7 +3582,7 @@
struct kgsl_device_private *dev_priv = filep->private_data;
unsigned int nr;
kgsl_ioctl_func_t func;
- int lock, ret;
+ int ret;
char ustack[64];
void *uptr = NULL;
@@ -3521,7 +3639,6 @@
}
func = kgsl_ioctl_funcs[nr].func;
- lock = kgsl_ioctl_funcs[nr].flags & KGSL_IOCTL_LOCK;
} else {
func = dev_priv->device->ftbl->ioctl;
if (!func) {
@@ -3530,19 +3647,10 @@
ret = -ENOIOCTLCMD;
goto done;
}
- lock = 1;
}
- if (lock)
- kgsl_mutex_lock(&dev_priv->device->mutex,
- &dev_priv->device->mutex_owner);
-
ret = func(dev_priv, cmd, uptr);
- if (lock)
- kgsl_mutex_unlock(&dev_priv->device->mutex,
- &dev_priv->device->mutex_owner);
-
/*
* Still copy back on failure, but assume function took
* all necessary precautions sanitizing the return values.
@@ -3902,8 +4010,6 @@
.process_mutex = __MUTEX_INITIALIZER(kgsl_driver.process_mutex),
.ptlock = __SPIN_LOCK_UNLOCKED(kgsl_driver.ptlock),
.devlock = __MUTEX_INITIALIZER(kgsl_driver.devlock),
- .memfree_hist_mutex =
- __MUTEX_INITIALIZER(kgsl_driver.memfree_hist_mutex),
/*
* Full cache flushes are faster than line by line on at least
* 8064 and 8974 once the region to be flushed is > 16mb.
@@ -4247,7 +4353,7 @@
kgsl_driver.class = NULL;
}
- kgsl_memfree_hist_exit();
+ kgsl_memfree_exit();
unregister_chrdev_region(kgsl_driver.major, KGSL_DEVICE_MAX);
}
@@ -4319,8 +4425,7 @@
goto err;
}
- if (kgsl_memfree_hist_init())
- KGSL_CORE_ERR("failed to init memfree_hist");
+ kgsl_memfree_init();
return 0;
diff --git a/drivers/gpu/msm/kgsl.h b/drivers/gpu/msm/kgsl.h
index 6da4a86..0bd71cb 100644
--- a/drivers/gpu/msm/kgsl.h
+++ b/drivers/gpu/msm/kgsl.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2008-2013, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2008-2014, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -75,25 +75,8 @@
#define KGSL_STATS_ADD(_size, _stat, _max) \
do { _stat += (_size); if (_stat > _max) _max = _stat; } while (0)
-
-#define KGSL_MEMFREE_HIST_SIZE ((int)(PAGE_SIZE * 2))
-
#define KGSL_MAX_NUMIBS 100000
-struct kgsl_memfree_hist_elem {
- unsigned int pid;
- unsigned int gpuaddr;
- unsigned int size;
- unsigned int flags;
-};
-
-struct kgsl_memfree_hist {
- void *base_hist_rb;
- unsigned int size;
- struct kgsl_memfree_hist_elem *wptr;
-};
-
-
struct kgsl_device;
struct kgsl_context;
@@ -122,9 +105,6 @@
void *ptpool;
- struct mutex memfree_hist_mutex;
- struct kgsl_memfree_hist memfree_hist;
-
struct {
unsigned int vmalloc;
unsigned int vmalloc_max;
diff --git a/drivers/gpu/msm/kgsl_debugfs.c b/drivers/gpu/msm/kgsl_debugfs.c
index 5645628..ccb2312 100644
--- a/drivers/gpu/msm/kgsl_debugfs.c
+++ b/drivers/gpu/msm/kgsl_debugfs.c
@@ -123,52 +123,6 @@
KGSL_DEBUGFS_LOG(mem_log);
KGSL_DEBUGFS_LOG(pwr_log);
-static int memfree_hist_print(struct seq_file *s, void *unused)
-{
- void *base = kgsl_driver.memfree_hist.base_hist_rb;
-
- struct kgsl_memfree_hist_elem *wptr = kgsl_driver.memfree_hist.wptr;
- struct kgsl_memfree_hist_elem *p;
- char str[16];
-
- seq_printf(s, "%8s %8s %8s %11s\n",
- "pid", "gpuaddr", "size", "flags");
-
- mutex_lock(&kgsl_driver.memfree_hist_mutex);
- p = wptr;
- for (;;) {
- kgsl_get_memory_usage(str, sizeof(str), p->flags);
- /*
- * if the ring buffer is not filled up yet
- * all its empty elems have size==0
- * just skip them ...
- */
- if (p->size)
- seq_printf(s, "%8d %08x %8d %11s\n",
- p->pid, p->gpuaddr, p->size, str);
- p++;
- if ((void *)p >= base + kgsl_driver.memfree_hist.size)
- p = (struct kgsl_memfree_hist_elem *) base;
-
- if (p == kgsl_driver.memfree_hist.wptr)
- break;
- }
- mutex_unlock(&kgsl_driver.memfree_hist_mutex);
- return 0;
-}
-
-static int memfree_hist_open(struct inode *inode, struct file *file)
-{
- return single_open(file, memfree_hist_print, inode->i_private);
-}
-
-static const struct file_operations memfree_hist_fops = {
- .open = memfree_hist_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
void kgsl_device_debugfs_init(struct kgsl_device *device)
{
if (kgsl_debugfs_dir && !IS_ERR(kgsl_debugfs_dir))
@@ -188,8 +142,6 @@
&mem_log_fops);
debugfs_create_file("log_level_pwr", 0644, device->d_debugfs, device,
&pwr_log_fops);
- debugfs_create_file("memfree_history", 0444, device->d_debugfs, device,
- &memfree_hist_fops);
/* Create postmortem dump control files */
diff --git a/drivers/gpu/msm/kgsl_device.h b/drivers/gpu/msm/kgsl_device.h
index 1e6fbc9..6f17d56c 100644
--- a/drivers/gpu/msm/kgsl_device.h
+++ b/drivers/gpu/msm/kgsl_device.h
@@ -173,10 +173,9 @@
* @ibcount: Number of IBs in the command list
* @ibdesc: Pointer to the list of IBs
* @expires: Point in time when the cmdbatch is considered to be hung
- * @invalid: non-zero if the dispatcher determines the command and the owning
- * context should be invalidated
* @refcount: kref structure to maintain the reference count
* @synclist: List of context/timestamp tuples to wait for before issuing
+ * @timer: a timer used to track possible sync timeouts for this cmdbatch
*
* This struture defines an atomic batch of command buffers issued from
* userspace.
@@ -193,9 +192,9 @@
uint32_t ibcount;
struct kgsl_ibdesc *ibdesc;
unsigned long expires;
- int invalid;
struct kref refcount;
struct list_head synclist;
+ struct timer_list timer;
};
/**
@@ -548,6 +547,9 @@
*context);
int kgsl_context_detach(struct kgsl_context *context);
+int kgsl_memfree_find_entry(pid_t pid, unsigned long *gpuaddr,
+ unsigned long *size, unsigned int *flags);
+
/**
* kgsl_context_put() - Release context reference count
* @context: Pointer to the KGSL context to be released
@@ -728,27 +730,6 @@
}
/**
- * kgsl_cmdbatch_sync_pending() - return true if the cmdbatch is waiting
- * @cmdbatch: Pointer to the command batch object to check
- *
- * Return non-zero if the specified command batch is still waiting for sync
- * point dependencies to be satisfied
- */
-static inline int kgsl_cmdbatch_sync_pending(struct kgsl_cmdbatch *cmdbatch)
-{
- int ret;
-
- if (cmdbatch == NULL)
- return 0;
-
- spin_lock(&cmdbatch->lock);
- ret = list_empty(&cmdbatch->synclist) ? 0 : 1;
- spin_unlock(&cmdbatch->lock);
-
- return ret;
-}
-
-/**
* kgsl_sysfs_store() - parse a string from a sysfs store function
* @buf: Incoming string to parse
* @ptr: Pointer to an unsigned int to store the value
diff --git a/drivers/gpu/msm/kgsl_iommu.c b/drivers/gpu/msm/kgsl_iommu.c
index 488e5a8..c4fa8af 100644
--- a/drivers/gpu/msm/kgsl_iommu.c
+++ b/drivers/gpu/msm/kgsl_iommu.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2011-2014, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -281,39 +281,20 @@
static void _check_if_freed(struct kgsl_iommu_device *iommu_dev,
unsigned long addr, unsigned int pid)
{
- void *base = kgsl_driver.memfree_hist.base_hist_rb;
- struct kgsl_memfree_hist_elem *wptr;
- struct kgsl_memfree_hist_elem *p;
+ unsigned long gpuaddr = addr;
+ unsigned long size = 0;
+ unsigned int flags = 0;
+
char name[32];
memset(name, 0, sizeof(name));
- mutex_lock(&kgsl_driver.memfree_hist_mutex);
- wptr = kgsl_driver.memfree_hist.wptr;
- p = wptr;
- for (;;) {
- if (p->size && p->pid == pid)
- if (addr >= p->gpuaddr &&
- addr < (p->gpuaddr + p->size)) {
-
- kgsl_get_memory_usage(name, sizeof(name) - 1,
- p->flags);
- KGSL_LOG_DUMP(iommu_dev->kgsldev,
- "---- premature free ----\n");
- KGSL_LOG_DUMP(iommu_dev->kgsldev,
- "[%8.8X-%8.8X] (%s) was already freed by pid %d\n",
- p->gpuaddr,
- p->gpuaddr + p->size,
- name,
- p->pid);
- }
- p++;
- if ((void *)p >= base + kgsl_driver.memfree_hist.size)
- p = (struct kgsl_memfree_hist_elem *) base;
-
- if (p == kgsl_driver.memfree_hist.wptr)
- break;
+ if (kgsl_memfree_find_entry(pid, &gpuaddr, &size, &flags)) {
+ kgsl_get_memory_usage(name, sizeof(name) - 1, flags);
+ KGSL_LOG_DUMP(iommu_dev->kgsldev, "---- premature free ----\n");
+ KGSL_LOG_DUMP(iommu_dev->kgsldev,
+ "[%8.8lX-%8.8lX] (%s) was already freed by pid %d\n",
+ gpuaddr, gpuaddr + size, name, pid);
}
- mutex_unlock(&kgsl_driver.memfree_hist_mutex);
}
static int kgsl_iommu_fault_handler(struct iommu_domain *domain,
diff --git a/drivers/gpu/msm/kgsl_mmu.c b/drivers/gpu/msm/kgsl_mmu.c
old mode 100644
new mode 100755
index 65e607b..4591165
--- a/drivers/gpu/msm/kgsl_mmu.c
+++ b/drivers/gpu/msm/kgsl_mmu.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2002,2007-2013, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2002,2007-2014, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -85,8 +85,17 @@
return status;
}
-static void _kgsl_destroy_pagetable(struct kgsl_pagetable *pagetable)
+static void kgsl_destroy_pagetable(struct kref *kref)
{
+ struct kgsl_pagetable *pagetable = container_of(kref,
+ struct kgsl_pagetable, refcount);
+
+ unsigned long flags;
+
+ spin_lock_irqsave(&kgsl_driver.ptlock, flags);
+ list_del(&pagetable->list);
+ spin_unlock_irqrestore(&kgsl_driver.ptlock, flags);
+
pagetable_remove_sysfs_objects(pagetable);
kgsl_cleanup_pt(pagetable);
@@ -101,29 +110,6 @@
kfree(pagetable);
}
-static void kgsl_destroy_pagetable(struct kref *kref)
-{
- struct kgsl_pagetable *pagetable = container_of(kref,
- struct kgsl_pagetable, refcount);
- unsigned long flags;
-
- spin_lock_irqsave(&kgsl_driver.ptlock, flags);
- list_del(&pagetable->list);
- spin_unlock_irqrestore(&kgsl_driver.ptlock, flags);
-
- _kgsl_destroy_pagetable(pagetable);
-}
-
-static void kgsl_destroy_pagetable_locked(struct kref *kref)
-{
- struct kgsl_pagetable *pagetable = container_of(kref,
- struct kgsl_pagetable, refcount);
-
- list_del(&pagetable->list);
-
- _kgsl_destroy_pagetable(pagetable);
-}
-
static inline void kgsl_put_pagetable(struct kgsl_pagetable *pagetable)
{
if (pagetable)
@@ -138,12 +124,9 @@
spin_lock_irqsave(&kgsl_driver.ptlock, flags);
list_for_each_entry(pt, &kgsl_driver.pagetable_list, list) {
- if (kref_get_unless_zero(&pt->refcount)) {
- if (pt->name == name) {
- ret = pt;
- break;
- }
- kref_put(&pt->refcount, kgsl_destroy_pagetable_locked);
+ if (name == pt->name && kref_get_unless_zero(&pt->refcount)) {
+ ret = pt;
+ break;
}
}
@@ -340,14 +323,9 @@
return KGSL_MMU_GLOBAL_PT;
spin_lock(&kgsl_driver.ptlock);
list_for_each_entry(pt, &kgsl_driver.pagetable_list, list) {
- if (kref_get_unless_zero(&pt->refcount)) {
- if (mmu->mmu_ops->mmu_pt_equal(mmu, pt, pt_base)) {
- ptid = (int) pt->name;
- kref_put(&pt->refcount,
- kgsl_destroy_pagetable_locked);
- break;
- }
- kref_put(&pt->refcount, kgsl_destroy_pagetable_locked);
+ if (mmu->mmu_ops->mmu_pt_equal(mmu, pt, pt_base)) {
+ ptid = (int) pt->name;
+ break;
}
}
spin_unlock(&kgsl_driver.ptlock);
@@ -367,23 +345,16 @@
return 0;
spin_lock(&kgsl_driver.ptlock);
list_for_each_entry(pt, &kgsl_driver.pagetable_list, list) {
- if (kref_get_unless_zero(&pt->refcount)) {
- if (mmu->mmu_ops->mmu_pt_equal(mmu, pt, pt_base)) {
- if ((addr & ~(PAGE_SIZE-1)) == pt->fault_addr) {
- ret = 1;
- kref_put(&pt->refcount,
- kgsl_destroy_pagetable_locked);
- break;
- } else {
- pt->fault_addr =
- (addr & ~(PAGE_SIZE-1));
- ret = 0;
- kref_put(&pt->refcount,
- kgsl_destroy_pagetable_locked);
- break;
- }
+ if (mmu->mmu_ops->mmu_pt_equal(mmu, pt, pt_base)) {
+ if ((addr & ~(PAGE_SIZE-1)) == pt->fault_addr) {
+ ret = 1;
+ break;
+ } else {
+ pt->fault_addr =
+ (addr & ~(PAGE_SIZE-1));
+ ret = 0;
+ break;
}
- kref_put(&pt->refcount, kgsl_destroy_pagetable_locked);
}
}
spin_unlock(&kgsl_driver.ptlock);
diff --git a/drivers/gpu/msm/kgsl_sync.c b/drivers/gpu/msm/kgsl_sync.c
index dc3ad21..cef052d 100644
--- a/drivers/gpu/msm/kgsl_sync.c
+++ b/drivers/gpu/msm/kgsl_sync.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -118,6 +118,9 @@
struct sync_pt *pt;
struct sync_fence *fence = NULL;
int ret = -EINVAL;
+ char fence_name[sizeof(fence->name)] = {};
+
+ priv.fence_fd = -1;
if (len != sizeof(priv))
return -EINVAL;
@@ -126,10 +129,12 @@
if (event == NULL)
return -ENOMEM;
+ kgsl_mutex_lock(&device->mutex, &device->mutex_owner);
+
context = kgsl_context_get_owner(owner, context_id);
if (context == NULL)
- goto fail_pt;
+ goto unlock;
event->context = context;
event->timestamp = timestamp;
@@ -138,51 +143,67 @@
if (pt == NULL) {
KGSL_DRV_ERR(device, "kgsl_sync_pt_create failed\n");
ret = -ENOMEM;
- goto fail_pt;
+ goto unlock;
}
+ snprintf(fence_name, sizeof(fence_name),
+ "%s-pid-%d-ctx-%d-ts-%d",
+ device->name, current->group_leader->pid,
+ context_id, timestamp);
- fence = sync_fence_create("kgsl-fence", pt);
+
+ fence = sync_fence_create(fence_name, pt);
if (fence == NULL) {
/* only destroy pt when not added to fence */
kgsl_sync_pt_destroy(pt);
KGSL_DRV_ERR(device, "sync_fence_create failed\n");
ret = -ENOMEM;
- goto fail_fence;
+ goto unlock;
}
priv.fence_fd = get_unused_fd_flags(0);
if (priv.fence_fd < 0) {
- KGSL_DRV_ERR(device, "invalid fence fd\n");
- ret = -EINVAL;
- goto fail_fd;
+ KGSL_DRV_ERR(device, "Unable to get a file descriptor: %d\n",
+ priv.fence_fd);
+ ret = priv.fence_fd;
+ goto unlock;
}
sync_fence_install(fence, priv.fence_fd);
+ /* Unlock the mutex before copying to user */
+ kgsl_mutex_unlock(&device->mutex, &device->mutex_owner);
+
if (copy_to_user(data, &priv, sizeof(priv))) {
ret = -EFAULT;
- goto fail_copy_fd;
+ goto out;
}
/*
* Hold the context ref-count for the event - it will get released in
* the callback
*/
+
+ kgsl_mutex_lock(&device->mutex, &device->mutex_owner);
+
ret = kgsl_add_event(device, context_id, timestamp,
kgsl_fence_event_cb, event, owner);
+
+ kgsl_mutex_unlock(&device->mutex, &device->mutex_owner);
+
if (ret)
- goto fail_event;
+ goto out;
return 0;
-fail_event:
-fail_copy_fd:
- /* clean up sync_fence_install */
- put_unused_fd(priv.fence_fd);
-fail_fd:
- /* clean up sync_fence_create */
- sync_fence_put(fence);
-fail_fence:
-fail_pt:
+unlock:
+ kgsl_mutex_unlock(&device->mutex, &device->mutex_owner);
+
+out:
+ if (priv.fence_fd >= 0)
+ put_unused_fd(priv.fence_fd);
+
+ if (fence)
+ sync_fence_put(fence);
+
kgsl_context_put(context);
kfree(event);
return ret;
diff --git a/drivers/i2c/busses/i2c-qup.c b/drivers/i2c/busses/i2c-qup.c
index 1704105..545e68f 100644
--- a/drivers/i2c/busses/i2c-qup.c
+++ b/drivers/i2c/busses/i2c-qup.c
@@ -193,6 +193,7 @@
int wr_sz;
struct msm_i2c_platform_data *pdata;
enum msm_i2c_state pwr_state;
+ atomic_t xfer_progress;
struct mutex mlock;
void *complete;
int i2c_gpios[ARRAY_SIZE(i2c_rsrcs)];
@@ -230,8 +231,10 @@
uint32_t op_flgs = 0;
int err = 0;
- if (pm_runtime_suspended(dev->dev))
+ if (atomic_read(&dev->xfer_progress) != 1) {
+ dev_err(dev->dev, "irq:%d when PM suspended\n", irq);
return IRQ_NONE;
+ }
status = readl_relaxed(dev->base + QUP_I2C_STATUS);
status1 = readl_relaxed(dev->base + QUP_ERROR_FLAGS);
@@ -1002,6 +1005,7 @@
if (dev->pdata->clk_ctl_xfer)
i2c_qup_pm_resume_clk(dev);
+ atomic_set(&dev->xfer_progress, 1);
/* Initialize QUP registers during first transfer */
if (dev->clk_ctl == 0) {
int fs_div;
@@ -1305,6 +1309,7 @@
dev->cnt = 0;
if (dev->pdata->clk_ctl_xfer)
i2c_qup_pm_suspend_clk(dev);
+ atomic_set(&dev->xfer_progress, 0);
mutex_unlock(&dev->mlock);
pm_runtime_mark_last_busy(dev->dev);
pm_runtime_put_autosuspend(dev->dev);
@@ -1655,6 +1660,7 @@
mutex_init(&dev->mlock);
dev->pwr_state = MSM_I2C_PM_SUSPENDED;
+ atomic_set(&dev->xfer_progress, 0);
/* If the same AHB clock is used on Modem side
* switch it on here itself and don't switch it
* on and off during suspend and resume.
diff --git a/drivers/md/dm-req-crypt.c b/drivers/md/dm-req-crypt.c
index 87a4ab9..799fc21 100644
--- a/drivers/md/dm-req-crypt.c
+++ b/drivers/md/dm-req-crypt.c
@@ -23,6 +23,10 @@
#include <linux/backing-dev.h>
#include <linux/atomic.h>
#include <linux/scatterlist.h>
+#include <linux/device-mapper.h>
+#include <linux/printk.h>
+#include <linux/pft.h>
+
#include <crypto/scatterwalk.h>
#include <asm/page.h>
#include <asm/unaligned.h>
@@ -31,9 +35,6 @@
#include <crypto/algapi.h>
#include <mach/qcrypto.h>
-#include <linux/device-mapper.h>
-
-
#define DM_MSG_PREFIX "req-crypt"
#define MAX_SG_LIST 1024
@@ -52,13 +53,17 @@
int err;
};
-struct dm_dev *dev;
+#define FDE_KEY_ID 0
+#define PFE_KEY_ID 1
+
+static struct dm_dev *dev;
static struct kmem_cache *_req_crypt_io_pool;
-sector_t start_sector_orig;
-struct workqueue_struct *req_crypt_queue;
-mempool_t *req_io_pool;
-mempool_t *req_page_pool;
-struct crypto_ablkcipher *tfm;
+static sector_t start_sector_orig;
+static struct workqueue_struct *req_crypt_queue;
+static mempool_t *req_io_pool;
+static mempool_t *req_page_pool;
+static bool is_fde_enabled;
+static struct crypto_ablkcipher *tfm;
struct req_dm_crypt_io {
struct work_struct work;
@@ -66,12 +71,83 @@
int error;
atomic_t pending;
struct timespec start_time;
+ bool should_encrypt;
+ bool should_decrypt;
+ u32 key_id;
};
static void req_crypt_cipher_complete
(struct crypto_async_request *req, int err);
+static bool req_crypt_should_encrypt(struct req_dm_crypt_io *req)
+{
+ int ret;
+ bool should_encrypt = false;
+ struct bio *bio = NULL;
+ struct inode *inode = NULL;
+ u32 key_id = 0;
+ bool is_encrypted = false;
+ bool is_inplace = false;
+
+ if (!req || !req->cloned_request || !req->cloned_request->bio)
+ return false;
+
+ bio = req->cloned_request->bio;
+
+ if (!bio->bi_io_vec || !bio->bi_io_vec->bv_page ||
+ !bio->bi_io_vec->bv_page->mapping)
+ return false;
+
+ inode = bio->bi_io_vec->bv_page->mapping->host;
+
+ ret = pft_get_key_index(inode, &key_id, &is_encrypted, &is_inplace);
+ /* req->key_id = key_id; @todo support more than 1 pfe key */
+ if ((ret == 0) && (is_encrypted || is_inplace)) {
+ should_encrypt = true;
+ req->key_id = PFE_KEY_ID;
+ } else if (is_fde_enabled) {
+ should_encrypt = true;
+ req->key_id = FDE_KEY_ID;
+ }
+
+ return should_encrypt;
+}
+
+static bool req_crypt_should_deccrypt(struct req_dm_crypt_io *req)
+{
+ int ret;
+ bool should_deccrypt = false;
+ struct bio *bio = NULL;
+ struct inode *inode = NULL;
+ u32 key_id = 0;
+ bool is_encrypted = false;
+ bool is_inplace = false;
+
+ if (!req || !req->cloned_request || !req->cloned_request->bio)
+ return false;
+
+ bio = req->cloned_request->bio;
+
+ if (!bio->bi_io_vec || !bio->bi_io_vec->bv_page ||
+ !bio->bi_io_vec->bv_page->mapping)
+ return false;
+
+ inode = bio->bi_io_vec->bv_page->mapping->host;
+
+ ret = pft_get_key_index(inode, &key_id, &is_encrypted, &is_inplace);
+ /* req->key_id = key_id; @todo support more than 1 pfe key */
+ if ((ret == 0) && (is_encrypted && !is_inplace)) {
+ should_deccrypt = true;
+ req->key_id = PFE_KEY_ID;
+ } else if (is_fde_enabled) {
+ should_deccrypt = true;
+ req->key_id = FDE_KEY_ID;
+ }
+
+ return should_deccrypt;
+}
+
static void req_crypt_inc_pending(struct req_dm_crypt_io *io)
{
atomic_inc(&io->pending);
@@ -196,6 +272,13 @@
ablkcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
req_crypt_cipher_complete, &result);
init_completion(&result.completion);
+ err = qcrypto_cipher_set_device(req, io->key_id);
+ if (err != 0) {
+ DMERR("%s qcrypto_cipher_set_device failed with err %d\n",
+ __func__, err);
+ error = DM_REQ_CRYPT_ERROR;
+ goto ablkcipher_req_alloc_failure;
+ }
qcrypto_cipher_set_flag(req,
QCRYPTO_CTX_USE_PIPE_KEY | QCRYPTO_CTX_XTS_DU_SIZE_512B);
crypto_ablkcipher_clear_flags(tfm, ~0);
@@ -270,6 +353,26 @@
}
/*
+ * This callback is called by the worker queue to perform non-decrypt reads
+ * and use the dm function to complete the bios and requests.
+ */
+static void req_cryptd_crypt_read_plain(struct req_dm_crypt_io *io)
+{
+ struct request *clone = NULL;
+ int error = 0;
+
+ if (!io || !io->cloned_request) {
+ DMERR("%s io is invalid\n", __func__);
+ BUG(); /* should not happen */
+ }
+
+ clone = io->cloned_request;
+
+ dm_end_request(clone, error);
+ mempool_free(io, req_io_pool);
+}
+
+/*
* The callback that will be called by the worker queue to perform Encryption
* for writes and submit the request using the elevelator.
*/
@@ -291,6 +394,7 @@
struct page *page = NULL;
u8 IV[AES_XTS_IV_LEN];
int remaining_size = 0;
+ int err = 0;
if (io) {
if (io->cloned_request) {
@@ -322,6 +426,13 @@
req_crypt_cipher_complete, &result);
init_completion(&result.completion);
+ err = qcrypto_cipher_set_device(req, io->key_id);
+ if (err != 0) {
+ DMERR("%s qcrypto_cipher_set_device failed with err %d\n",
+ __func__, err);
+ error = DM_REQ_CRYPT_ERROR;
+ goto ablkcipher_req_alloc_failure;
+ }
qcrypto_cipher_set_flag(req,
QCRYPTO_CTX_USE_PIPE_KEY | QCRYPTO_CTX_XTS_DU_SIZE_512B);
crypto_ablkcipher_clear_flags(tfm, ~0);
@@ -460,19 +571,44 @@
req_crypt_dec_pending_encrypt(io);
}
+/*
+ * This callback is called by the worker queue to perform non-encrypted writes
+ * and submit the request using the elevelator.
+ */
+static void req_cryptd_crypt_write_plain(struct req_dm_crypt_io *io)
+{
+ struct request *clone = NULL;
+
+ if (!io || !io->cloned_request) {
+ DMERR("%s io is invalid\n", __func__);
+ BUG(); /* should not happen */
+ }
+
+ clone = io->cloned_request;
+ io->error = 0;
+ dm_dispatch_request(clone);
+}
+
/* Queue callback function that will get triggered */
static void req_cryptd_crypt(struct work_struct *work)
{
struct req_dm_crypt_io *io =
container_of(work, struct req_dm_crypt_io, work);
- if (rq_data_dir(io->cloned_request) == WRITE)
- req_cryptd_crypt_write_convert(io);
- else if (rq_data_dir(io->cloned_request) == READ)
- req_cryptd_crypt_read_convert(io);
- else
- DMERR("%s received non-read/write request for Clone %u\n",
+ if (rq_data_dir(io->cloned_request) == WRITE) {
+ if (io->should_encrypt)
+ req_cryptd_crypt_write_convert(io);
+ else
+ req_cryptd_crypt_write_plain(io);
+ } else if (rq_data_dir(io->cloned_request) == READ) {
+ if (io->should_decrypt)
+ req_cryptd_crypt_read_convert(io);
+ else
+ req_cryptd_crypt_read_plain(io);
+ } else {
+ DMERR("%s received non-write request for Clone %u\n",
__func__, (unsigned int)io->cloned_request);
+ }
}
static void req_cryptd_queue_crypt(struct req_dm_crypt_io *io)
@@ -537,7 +673,7 @@
bvec = NULL;
if (rq_data_dir(clone) == WRITE) {
rq_for_each_segment(bvec, clone, iter1) {
- if (bvec->bv_offset == 0) {
+ if (req_io->should_encrypt && bvec->bv_offset == 0) {
mempool_free(bvec->bv_page, req_page_pool);
bvec->bv_page = NULL;
} else
@@ -565,7 +701,6 @@
* For a read request no pre-processing is required the request
* is returned to dm once mapping is done
*/
-
static int req_crypt_map(struct dm_target *ti, struct request *clone,
union map_info *map_context)
{
@@ -594,6 +729,11 @@
map_context->ptr = req_io;
atomic_set(&req_io->pending, 0);
+ if (rq_data_dir(clone) == WRITE)
+ req_io->should_encrypt = req_crypt_should_encrypt(req_io);
+ if (rq_data_dir(clone) == READ)
+ req_io->should_decrypt = req_crypt_should_deccrypt(req_io);
+
/* Get the queue of the underlying original device */
clone->q = bdev_get_queue(dev->bdev);
clone->rq_disk = dev->bdev->bd_disk;
@@ -641,6 +781,8 @@
static void req_crypt_dtr(struct dm_target *ti)
{
+ DMDEBUG("dm-req-crypt Destructor.\n");
+
if (req_crypt_queue) {
destroy_workqueue(req_crypt_queue);
req_crypt_queue = NULL;
@@ -670,6 +812,8 @@
char dummy;
int err = DM_REQ_CRYPT_ERROR;
+ DMDEBUG("dm-req-crypt Constructor.\n");
+
if (argc < 5) {
DMERR(" %s Not enough args\n", __func__);
err = DM_REQ_CRYPT_ERROR;
@@ -696,13 +840,24 @@
goto ctr_exit;
}
} else {
- DMERR(" %s Arg[4]invalid\n", __func__);
+ DMERR(" %s Arg[4] invalid\n", __func__);
err = DM_REQ_CRYPT_ERROR;
goto ctr_exit;
}
start_sector_orig = tmpll;
+ if (argv[5]) {
+ if (!strcmp(argv[5], "fde_enabled"))
+ is_fde_enabled = true;
+ else
+ is_fde_enabled = false;
+ } else {
+ DMERR(" %s Arg[5] invalid, set FDE eanbled.\n", __func__);
+ is_fde_enabled = true; /* backward compatible */
+ }
+ DMDEBUG("%s is_fde_enabled=%d\n", __func__, is_fde_enabled);
+
req_crypt_queue = alloc_workqueue("req_cryptd",
WQ_NON_REENTRANT |
WQ_HIGHPRI |
@@ -725,6 +880,7 @@
}
req_io_pool = mempool_create_slab_pool(MIN_IOS, _req_crypt_io_pool);
+ BUG_ON(!req_io_pool);
if (!req_io_pool) {
DMERR("%s req_io_pool not allocated\n", __func__);
err = DM_REQ_CRYPT_ERROR;
@@ -791,6 +947,8 @@
kmem_cache_destroy(_req_crypt_io_pool);
}
+ DMINFO("dm-req-crypt successfully initalized.\n");
+
return r;
}
diff --git a/drivers/media/dvb/dvb-core/dmxdev.c b/drivers/media/dvb/dvb-core/dmxdev.c
index 937fb8c..3a3c370 100644
--- a/drivers/media/dvb/dvb-core/dmxdev.c
+++ b/drivers/media/dvb/dvb-core/dmxdev.c
@@ -3871,7 +3871,12 @@
if (ret > 0) {
dvb_dmxdev_notify_data_read(dmxdevfilter, ret);
spin_lock_irq(&dmxdevfilter->dev->lock);
- dvb_dmxdev_update_events(&dmxdevfilter->events, ret);
+ /*
+ * Updating the events in case of overflow might remove the
+ * overflow event, so avoid that.
+ */
+ if (dmxdevfilter->buffer.error != -EOVERFLOW)
+ dvb_dmxdev_update_events(&dmxdevfilter->events, ret);
spin_unlock_irq(&dmxdevfilter->dev->lock);
/*
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp.h b/drivers/media/platform/msm/camera_v2/isp/msm_isp.h
index 69c5190..e8702e4 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp.h
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp.h
@@ -355,6 +355,7 @@
struct msm_vfe_src_info src_info[VFE_SRC_MAX];
uint16_t stream_handle_cnt;
unsigned long event_mask;
+ uint32_t burst_len;
};
struct msm_vfe_stats_hardware_info {
@@ -397,6 +398,7 @@
uint16_t stream_handle_cnt;
atomic_t stats_update;
uint32_t stats_mask;
+ uint32_t stats_burst_len;
};
struct msm_vfe_tasklet_queue_cmd {
@@ -480,8 +482,7 @@
struct list_head tasklet_q;
struct tasklet_struct vfe_tasklet;
struct msm_vfe_tasklet_queue_cmd
- tasklet_queue_cmd[MSM_VFE_TASKLETQ_SIZE];
-
+ tasklet_queue_cmd[MSM_VFE_TASKLETQ_SIZE];
uint32_t soc_hw_version;
uint32_t vfe_hw_version;
struct msm_vfe_hardware_info *hw_info;
@@ -498,6 +499,7 @@
void __iomem *p_avtimer_lsw;
uint8_t ignore_error;
struct msm_isp_statistics *stats;
+ uint32_t vfe_ub_size;
};
#endif
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp40.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp40.c
index e817680..353b55f 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp40.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp40.c
@@ -36,10 +36,10 @@
#define VFE40_8x26_VERSION 0x20000013
#define VFE40_8x26V2_VERSION 0x20010014
-#define VFE40_BURST_LEN 1
-#define VFE40_STATS_BURST_LEN 1
-#define VFE40_UB_SIZE 1536
-#define VFE40_EQUAL_SLICE_UB 228
+
+/* STATS_SIZE (BE + BG + BF+ RS + CS + IHIST + BHIST ) = 392 */
+#define VFE40_STATS_SIZE 392
+
#define VFE40_WM_BASE(idx) (0x6C + 0x24 * idx)
#define VFE40_RDI_BASE(idx) (0x2E8 + 0x4 * idx)
#define VFE40_XBAR_BASE(idx) (0x58 + 0x4 * (idx / 2))
@@ -971,6 +971,11 @@
uint8_t plane_idx)
{
uint32_t val;
+
+ struct msm_vfe_axi_shared_data *axi_data =
+ &vfe_dev->axi_data;
+ uint32_t burst_len = axi_data->burst_len;
+
uint32_t wm_base = VFE40_WM_BASE(stream_info->wm[plane_idx]);
if (!stream_info->frame_based) {
@@ -992,7 +997,7 @@
plane_idx].output_stride) << 16 |
(stream_info->plane_cfg[
plane_idx].output_height - 1) << 4 |
- VFE40_BURST_LEN;
+ burst_len;
msm_camera_io_w(val, vfe_dev->vfe_base + wm_base + 0x18);
} else {
msm_camera_io_w(0x2, vfe_dev->vfe_base + wm_base);
@@ -1002,7 +1007,7 @@
plane_idx].output_width) << 16 |
(stream_info->plane_cfg[
plane_idx].output_height - 1) << 4 |
- VFE40_BURST_LEN;
+ burst_len;
msm_camera_io_w(val, vfe_dev->vfe_base + wm_base + 0x18);
}
@@ -1117,6 +1122,7 @@
uint8_t num_used_wms = 0;
uint32_t prop_size = 0;
uint32_t wm_ub_size;
+ uint32_t axi_wm_ub;
for (i = 0; i < axi_data->hw_info->num_wm; i++) {
if (axi_data->free_wm[i] > 0) {
@@ -1124,7 +1130,9 @@
total_image_size += axi_data->wm_image_size[i];
}
}
- prop_size = MSM_ISP40_TOTAL_WM_UB -
+ axi_wm_ub = vfe_dev->vfe_ub_size - VFE40_STATS_SIZE;
+
+ prop_size = axi_wm_ub -
axi_data->hw_info->min_wm_ub * num_used_wms;
for (i = 0; i < axi_data->hw_info->num_wm; i++) {
if (axi_data->free_wm[i]) {
@@ -1149,10 +1157,14 @@
int i;
uint32_t ub_offset = 0;
struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
+ uint32_t axi_equal_slice_ub =
+ (vfe_dev->vfe_ub_size - VFE40_STATS_SIZE)/
+ (axi_data->hw_info->num_wm - 1);
+
for (i = 0; i < axi_data->hw_info->num_wm; i++) {
- msm_camera_io_w(ub_offset << 16 | (VFE40_EQUAL_SLICE_UB - 1),
+ msm_camera_io_w(ub_offset << 16 | (axi_equal_slice_ub - 1),
vfe_dev->vfe_base + VFE40_WM_BASE(i) + 0x10);
- ub_offset += VFE40_EQUAL_SLICE_UB;
+ ub_offset += axi_equal_slice_ub;
}
}
@@ -1334,7 +1346,11 @@
static void msm_vfe40_stats_cfg_ub(struct vfe_device *vfe_dev)
{
int i;
- uint32_t ub_offset = VFE40_UB_SIZE;
+ struct msm_vfe_stats_shared_data *stats_data = &vfe_dev->stats_data;
+ uint32_t ub_offset = vfe_dev->vfe_ub_size;
+ uint32_t stats_burst_len = stats_data->stats_burst_len;
+
+
uint32_t ub_size[VFE40_NUM_STATS_TYPE] = {
64, /*MSM_ISP_STATS_BE*/
128, /*MSM_ISP_STATS_BG*/
@@ -1348,7 +1364,7 @@
for (i = 0; i < VFE40_NUM_STATS_TYPE; i++) {
ub_offset -= ub_size[i];
- msm_camera_io_w(VFE40_STATS_BURST_LEN << 30 |
+ msm_camera_io_w(stats_burst_len << 30 |
ub_offset << 16 | (ub_size[i] - 1),
vfe_dev->vfe_base + VFE40_STATS_BASE(i) + 0xC);
}
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c
index 4c3a3d5..d11ea68 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c
@@ -524,7 +524,9 @@
stream_info->format_factor / ISP_Q2;
} else {
int rdi = SRC_TO_INTF(stream_info->stream_src);
- stream_info->bandwidth = axi_data->src_info[rdi].pixel_clock;
+ if (rdi < VFE_SRC_MAX)
+ stream_info->bandwidth =
+ axi_data->src_info[rdi].pixel_clock;
}
}
@@ -534,6 +536,7 @@
uint32_t io_format = 0;
struct msm_vfe_axi_stream_request_cmd *stream_cfg_cmd = arg;
struct msm_vfe_axi_stream *stream_info;
+ struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
rc = msm_isp_axi_create_stream(
&vfe_dev->axi_data, stream_cfg_cmd);
@@ -581,6 +584,8 @@
msm_isp_calculate_framedrop(&vfe_dev->axi_data, stream_cfg_cmd);
stream_info->vt_enable = stream_cfg_cmd->vt_enable;
+ axi_data->burst_len = stream_cfg_cmd->burst_len;
+
if (stream_info->vt_enable) {
vfe_dev->vt_enable = stream_info->vt_enable;
#ifdef CONFIG_MSM_AVTIMER
@@ -853,8 +858,11 @@
struct msm_isp_event_data buf_event;
struct timeval *time_stamp;
uint32_t stream_idx = HANDLE_TO_IDX(stream_info->stream_handle);
- uint32_t frame_id = vfe_dev->axi_data.
- src_info[SRC_TO_INTF(stream_info->stream_src)].frame_id;
+ uint32_t src_intf = SRC_TO_INTF(stream_info->stream_src);
+ uint32_t frame_id = 0;
+ if (src_intf < VFE_SRC_MAX) {
+ frame_id = vfe_dev->axi_data.src_info[src_intf].frame_id;
+ }
if (buf && ts) {
if (vfe_dev->vt_enable) {
@@ -1196,7 +1204,7 @@
enum msm_isp_camif_update_state camif_update)
{
int i, rc = 0;
- uint8_t src_state, wait_for_complete = 0;
+ uint8_t src_state = 0, wait_for_complete = 0;
uint32_t wm_reload_mask = 0x0;
struct msm_vfe_axi_stream *stream_info;
struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
@@ -1212,8 +1220,9 @@
}
stream_info = &axi_data->stream_info[
HANDLE_TO_IDX(stream_cfg_cmd->stream_handle[i])];
- src_state = axi_data->src_info[
- SRC_TO_INTF(stream_info->stream_src)].active;
+ if (SRC_TO_INTF(stream_info->stream_src) < VFE_SRC_MAX)
+ src_state = axi_data->src_info[
+ SRC_TO_INTF(stream_info->stream_src)].active;
msm_isp_calculate_bandwidth(axi_data, stream_info);
msm_isp_reset_framedrop(vfe_dev, stream_info);
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp_stats_util.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp_stats_util.c
index 6bd7585..d4c86a5 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp_stats_util.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp_stats_util.c
@@ -490,6 +490,9 @@
{
int rc = 0;
struct msm_vfe_stats_stream_cfg_cmd *stream_cfg_cmd = arg;
+ struct msm_vfe_stats_shared_data *stats_data = &vfe_dev->stats_data;
+ stats_data->stats_burst_len = stream_cfg_cmd->stats_burst_len;
+
if (vfe_dev->stats_data.num_active_stream == 0)
vfe_dev->hw_info->vfe_ops.stats_ops.cfg_ub(vfe_dev);
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp_util.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp_util.c
index b1521df..a81c7bb 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp_util.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp_util.c
@@ -701,6 +701,10 @@
}
break;
}
+ case SET_WM_UB_SIZE: {
+ vfe_dev->vfe_ub_size = *cfg_data;
+ break;
+ }
}
return 0;
}
@@ -1143,6 +1147,11 @@
vfe_dev->hw_info->vfe_ops.irq_ops.
read_irq_status(vfe_dev, &irq_status0, &irq_status1);
+ if ((irq_status0 == 0) && (irq_status1 == 0)) {
+ pr_err_ratelimited("%s: irq_status0 & 1 are both 0\n",
+ __func__);
+ return IRQ_HANDLED;
+ }
msm_isp_process_overflow_irq(vfe_dev,
&irq_status0, &irq_status1);
vfe_dev->hw_info->vfe_ops.core_ops.
@@ -1158,7 +1167,7 @@
if ((irq_status0 == 0) && (irq_status1 == 0) &&
(!((error_mask0 != 0) || (error_mask1 != 0)) &&
vfe_dev->error_info.error_count == 1)) {
- ISP_DBG("%s: irq_status0 & 1 are both 0!\n", __func__);
+ ISP_DBG("%s: error_mask0/1 & error_count are set!\n", __func__);
return IRQ_HANDLED;
}
@@ -1211,7 +1220,7 @@
spin_unlock_irqrestore(&vfe_dev->tasklet_lock, flags);
if (atomic_read(&vfe_dev->error_info.overflow_state) !=
NO_OVERFLOW) {
- pr_err("There is Overflow, kicking up recovery !!!!");
+ pr_err_ratelimited("There is Overflow, kicking up recovery !!!!");
msm_isp_process_overflow_recovery(vfe_dev,
irq_status0, irq_status1);
continue;
diff --git a/drivers/media/platform/msm/camera_v2/jpeg_10/msm_jpeg_platform.c b/drivers/media/platform/msm/camera_v2/jpeg_10/msm_jpeg_platform.c
index 407b81f..2f943a4 100644
--- a/drivers/media/platform/msm/camera_v2/jpeg_10/msm_jpeg_platform.c
+++ b/drivers/media/platform/msm/camera_v2/jpeg_10/msm_jpeg_platform.c
@@ -347,8 +347,12 @@
JPEG_DBG("%s:%d]", __func__, __LINE__);
}
#endif
+ if (pgmn_dev->jpeg_bus_client) {
+ msm_bus_scale_client_update_request(
+ pgmn_dev->jpeg_bus_client, 0);
+ msm_bus_scale_unregister_client(pgmn_dev->jpeg_bus_client);
+ }
- msm_bus_scale_unregister_client(pgmn_dev->jpeg_bus_client);
msm_cam_clk_enable(&pgmn_dev->pdev->dev, jpeg_8x_clk_info,
pgmn_dev->jpeg_clk, ARRAY_SIZE(jpeg_8x_clk_info), 0);
JPEG_DBG("%s:%d] clock disbale done", __func__, __LINE__);
diff --git a/drivers/media/platform/msm/camera_v2/sensor/actuator/msm_actuator.c b/drivers/media/platform/msm/camera_v2/sensor/actuator/msm_actuator.c
index 50f37db..cce6525 100644
--- a/drivers/media/platform/msm/camera_v2/sensor/actuator/msm_actuator.c
+++ b/drivers/media/platform/msm/camera_v2/sensor/actuator/msm_actuator.c
@@ -27,6 +27,10 @@
#define CDBG(fmt, args...) pr_debug(fmt, ##args)
#endif
+
+static int32_t msm_actuator_power_up(struct msm_actuator_ctrl_t *a_ctrl);
+static int32_t msm_actuator_power_down(struct msm_actuator_ctrl_t *a_ctrl);
+
static struct msm_actuator msm_vcm_actuator_table;
static struct msm_actuator msm_piezo_actuator_table;
@@ -447,6 +451,31 @@
return rc;
}
+static int32_t msm_actuator_vreg_control(struct msm_actuator_ctrl_t *a_ctrl,
+ int config)
+{
+ int rc = 0, i, cnt;
+ struct msm_actuator_vreg *vreg_cfg;
+
+ vreg_cfg = &a_ctrl->vreg_cfg;
+ cnt = vreg_cfg->num_vreg;
+ if (!cnt)
+ return 0;
+
+ if (cnt >= MSM_ACTUATOT_MAX_VREGS) {
+ pr_err("%s failed %d cnt %d\n", __func__, __LINE__, cnt);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < cnt; i++) {
+ rc = msm_camera_config_single_vreg(&(a_ctrl->pdev->dev),
+ &vreg_cfg->cam_vreg[i],
+ (struct regulator **)&vreg_cfg->data[i],
+ config);
+ }
+ return rc;
+}
+
static int32_t msm_actuator_power_down(struct msm_actuator_ctrl_t *a_ctrl)
{
int32_t rc = 0;
@@ -456,6 +485,11 @@
if (!rc)
gpio_free(a_ctrl->vcm_pwd);
}
+ rc = msm_actuator_vreg_control(a_ctrl, 0);
+ if (rc < 0) {
+ pr_err("%s failed %d\n", __func__, __LINE__);
+ return rc;
+ }
kfree(a_ctrl->step_position_table);
a_ctrl->step_position_table = NULL;
@@ -671,6 +705,13 @@
if (rc < 0)
pr_err("actuator_set_position failed %d\n", rc);
break;
+
+ case CFG_ACTUATOR_POWERUP:
+ rc = msm_actuator_power_up(a_ctrl);
+ if (rc < 0)
+ pr_err("Failed actuator power up%d\n", rc);
+ break;
+
default:
break;
}
@@ -794,6 +835,13 @@
CDBG("vcm info: %x %x\n", a_ctrl->vcm_pwd,
a_ctrl->vcm_enable);
+
+ rc = msm_actuator_vreg_control(a_ctrl, 1);
+ if (rc < 0) {
+ pr_err("%s failed %d\n", __func__, __LINE__);
+ return rc;
+ }
+
if (a_ctrl->vcm_enable) {
rc = gpio_request(a_ctrl->vcm_pwd, "msm_actuator");
if (!rc) {
@@ -909,6 +957,7 @@
int32_t rc = 0;
struct msm_camera_cci_client *cci_client = NULL;
struct msm_actuator_ctrl_t *msm_actuator_t = NULL;
+ struct msm_actuator_vreg *vreg_cfg;
CDBG("Enter\n");
if (!pdev->dev.of_node) {
@@ -940,6 +989,18 @@
return rc;
}
+ if (of_find_property((&pdev->dev)->of_node,
+ "qcom,cam-vreg-name", NULL)) {
+ vreg_cfg = &msm_actuator_t->vreg_cfg;
+ rc = msm_camera_get_dt_vreg_data((&pdev->dev)->of_node,
+ &vreg_cfg->cam_vreg, &vreg_cfg->num_vreg);
+ if (rc < 0) {
+ kfree(msm_actuator_t);
+ pr_err("failed rc %d\n", rc);
+ return rc;
+ }
+ }
+
msm_actuator_t->act_v4l2_subdev_ops = &msm_actuator_subdev_ops;
msm_actuator_t->actuator_mutex = &msm_actuator_mutex;
msm_actuator_t->cam_name = pdev->id;
@@ -952,6 +1013,7 @@
msm_actuator_t->i2c_client.cci_client = kzalloc(sizeof(
struct msm_camera_cci_client), GFP_KERNEL);
if (!msm_actuator_t->i2c_client.cci_client) {
+ kfree(msm_actuator_t->vreg_cfg.cam_vreg);
kfree(msm_actuator_t);
pr_err("failed no memory\n");
return -ENOMEM;
diff --git a/drivers/media/platform/msm/camera_v2/sensor/actuator/msm_actuator.h b/drivers/media/platform/msm/camera_v2/sensor/actuator/msm_actuator.h
index 772b12e..b0d9430 100644
--- a/drivers/media/platform/msm/camera_v2/sensor/actuator/msm_actuator.h
+++ b/drivers/media/platform/msm/camera_v2/sensor/actuator/msm_actuator.h
@@ -18,10 +18,15 @@
#include <media/v4l2-subdev.h>
#include <media/msmb_camera.h>
#include "msm_camera_i2c.h"
+#include "msm_camera_dt_util.h"
+#include "msm_camera_io_util.h"
+
#define DEFINE_MSM_MUTEX(mutexname) \
static struct mutex mutexname = __MUTEX_INITIALIZER(mutexname)
+#define MSM_ACTUATOT_MAX_VREGS (10)
+
struct msm_actuator_ctrl_t;
struct msm_actuator_func_tbl {
@@ -52,6 +57,12 @@
struct msm_actuator_func_tbl func_tbl;
};
+struct msm_actuator_vreg {
+ struct camera_vreg_t *cam_vreg;
+ void *data[MSM_ACTUATOT_MAX_VREGS];
+ int num_vreg;
+};
+
struct msm_actuator_ctrl_t {
struct i2c_driver *i2c_driver;
struct platform_driver *pdriver;
@@ -83,6 +94,7 @@
uint16_t i2c_tbl_index;
enum cci_i2c_master_t cci_master;
uint32_t subdev_id;
+ struct msm_actuator_vreg vreg_cfg;
};
#endif
diff --git a/drivers/media/platform/msm/camera_v2/sensor/msm_sensor_init.c b/drivers/media/platform/msm/camera_v2/sensor/msm_sensor_init.c
index 4650df7..c848287 100644
--- a/drivers/media/platform/msm/camera_v2/sensor/msm_sensor_init.c
+++ b/drivers/media/platform/msm/camera_v2/sensor/msm_sensor_init.c
@@ -48,21 +48,19 @@
static int msm_sensor_wait_for_probe_done(struct msm_sensor_init_t *s_init)
{
int rc;
-
+ int tm = 10000;
if (s_init->module_init_status == 1) {
CDBG("msm_cam_get_module_init_status -2\n");
return 0;
}
+ rc = wait_event_interruptible_timeout(s_init->state_wait,
+ (s_init->module_init_status == 1), msecs_to_jiffies(tm));
+ if (rc < 0)
+ pr_err("%s:%d wait failed\n", __func__, __LINE__);
+ else if (rc == 0)
+ pr_err("%s:%d wait timeout\n", __func__, __LINE__);
- while (1) {
- rc = wait_event_interruptible(s_init->state_wait,
- (s_init->module_init_status == 1));
- if (rc == -ETIMEDOUT)
- continue;
- else if (rc == 0)
- break;
- }
- return 0;
+ return rc;
}
/* Static function definition */
diff --git a/drivers/media/platform/msm/vidc/hfi_response_handler.c b/drivers/media/platform/msm/vidc/hfi_response_handler.c
index c6fb382..0eee530 100644
--- a/drivers/media/platform/msm/vidc/hfi_response_handler.c
+++ b/drivers/media/platform/msm/vidc/hfi_response_handler.c
@@ -221,6 +221,7 @@
case HFI_ERR_SESSION_INVALID_SCALE_FACTOR:
case HFI_ERR_SESSION_UNSUPPORT_BUFFERTYPE:
case HFI_ERR_SESSION_UNSUPPORTED_SETTING:
+ case HFI_ERR_SESSION_UPSCALE_NOT_SUPPORTED:
dprintk(VIDC_INFO, "Non Fatal : HFI_EVENT_SESSION_ERROR\n");
break;
default:
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_common.c b/drivers/media/platform/msm/vidc/msm_vidc_common.c
old mode 100644
new mode 100755
index 7429466..9f0dac4
--- a/drivers/media/platform/msm/vidc/msm_vidc_common.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_common.c
@@ -2148,7 +2148,7 @@
int rc = 0;
struct msm_smem *handle;
struct internal_buf *binfo;
- struct vidc_buffer_addr_info buffer_info;
+ struct vidc_buffer_addr_info buffer_info = {0};
u32 smem_flags = 0, buffer_size;
struct hal_buffer_requirements *output_buf, *extradata_buf;
int i;
@@ -2168,19 +2168,21 @@
output_buf->buffer_count_actual,
output_buf->buffer_size);
+ buffer_size = output_buf->buffer_size;
+
extradata_buf = get_buff_req_buffer(inst, HAL_BUFFER_EXTRADATA_OUTPUT);
- if (!extradata_buf) {
+ if (extradata_buf) {
+ dprintk(VIDC_DBG,
+ "extradata: num = %d, size = %d\n",
+ extradata_buf->buffer_count_actual,
+ extradata_buf->buffer_size);
+ buffer_size += extradata_buf->buffer_size;
+ } else {
dprintk(VIDC_DBG,
"This extradata buffer not required, buffer_type: %x\n",
buffer_type);
- return 0;
}
- dprintk(VIDC_DBG,
- "extradata: num = %d, size = %d\n",
- extradata_buf->buffer_count_actual,
- extradata_buf->buffer_size);
- buffer_size = output_buf->buffer_size + extradata_buf->buffer_size;
if (inst->flags & VIDC_SECURE)
smem_flags |= SMEM_SECURE;
@@ -2218,7 +2220,10 @@
buffer_info.align_device_addr = handle->device_addr;
buffer_info.extradata_addr = handle->device_addr +
output_buf->buffer_size;
- buffer_info.extradata_size = extradata_buf->buffer_size;
+ if (extradata_buf) {
+ buffer_info.extradata_size =
+ extradata_buf->buffer_size;
+ }
dprintk(VIDC_DBG, "Output buffer address: %x",
buffer_info.align_device_addr);
dprintk(VIDC_DBG, "Output extradata address: %x",
@@ -3462,20 +3467,14 @@
capability->height.min);
rc = -ENOTSUPP;
}
- if (msm_vp8_low_tier &&
- inst->fmts[OUTPUT_PORT]->fourcc == V4L2_PIX_FMT_VP8) {
- capability->width.max = DEFAULT_WIDTH;
- capability->width.max = DEFAULT_HEIGHT;
- }
if (!rc && (inst->prop.width[CAPTURE_PORT] >
capability->width.max)) {
dprintk(VIDC_ERR,
- "Unsupported width = %u supported max width = %u\n",
+ "Unsupported width = %u supported max width = %u",
inst->prop.width[CAPTURE_PORT],
capability->width.max);
rc = -ENOTSUPP;
}
-
if (!rc && (inst->prop.height[CAPTURE_PORT]
* inst->prop.width[CAPTURE_PORT] >
capability->width.max * capability->height.max)) {
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_debug.c b/drivers/media/platform/msm/vidc/msm_vidc_debug.c
old mode 100644
new mode 100755
index 475683c..0ac6fc4
--- a/drivers/media/platform/msm/vidc/msm_vidc_debug.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_debug.c
@@ -20,7 +20,6 @@
int msm_fw_debug = 0x18;
int msm_fw_debug_mode = 0x1;
int msm_fw_low_power_mode = 0x1;
-int msm_vp8_low_tier = 0x1;
int msm_vidc_hw_rsp_timeout = 1000;
struct debug_buffer {
@@ -184,11 +183,6 @@
dprintk(VIDC_ERR, "debugfs_create_file: fail\n");
goto failed_create_dir;
}
- if (!debugfs_create_u32("vp8_low_tier", S_IRUGO | S_IWUSR,
- parent, &msm_vp8_low_tier)) {
- dprintk(VIDC_ERR, "debugfs_create_file: fail\n");
- goto failed_create_dir;
- }
if (!debugfs_create_u32("debug_output", S_IRUGO | S_IWUSR,
parent, &msm_vidc_debug_out)) {
dprintk(VIDC_ERR, "debugfs_create_file: fail\n");
diff --git a/drivers/media/platform/msm/vidc/q6_hfi.c b/drivers/media/platform/msm/vidc/q6_hfi.c
old mode 100644
new mode 100755
diff --git a/drivers/media/platform/msm/vidc/venus_hfi.c b/drivers/media/platform/msm/vidc/venus_hfi.c
old mode 100644
new mode 100755
diff --git a/drivers/media/platform/msm/vidc/vidc_hfi_api.h b/drivers/media/platform/msm/vidc/vidc_hfi_api.h
old mode 100644
new mode 100755
diff --git a/drivers/media/platform/msm/vidc/vidc_hfi_helper.h b/drivers/media/platform/msm/vidc/vidc_hfi_helper.h
index 7c62e77..b3c20b6 100644
--- a/drivers/media/platform/msm/vidc/vidc_hfi_helper.h
+++ b/drivers/media/platform/msm/vidc/vidc_hfi_helper.h
@@ -70,6 +70,7 @@
#define HFI_ERR_SESSION_UNSUPPORT_BUFFERTYPE (HFI_COMMON_BASE + 0x1010)
#define HFI_ERR_SESSION_BUFFERCOUNT_TOOSMALL (HFI_COMMON_BASE + 0x1011)
#define HFI_ERR_SESSION_INVALID_SCALE_FACTOR (HFI_COMMON_BASE + 0x1012)
+#define HFI_ERR_SESSION_UPSCALE_NOT_SUPPORTED (HFI_COMMON_BASE + 0x1013)
#define HFI_EVENT_SYS_ERROR (HFI_COMMON_BASE + 0x1)
#define HFI_EVENT_SESSION_ERROR (HFI_COMMON_BASE + 0x2)
diff --git a/drivers/platform/msm/Kconfig b/drivers/platform/msm/Kconfig
index 88e8d43..11b064a 100644
--- a/drivers/platform/msm/Kconfig
+++ b/drivers/platform/msm/Kconfig
@@ -120,6 +120,16 @@
This driver gets the Q6 out of power collapsed state and
exposes ioctl control to read avtimer tick.
+config PFT
+ bool "Per-File-Tagger driver"
+ default n
+ help
+ This driver is used for tagging enterprise files.
+ It is part of the Per-File-Encryption (PFE) feature.
+ The driver is tagging files when created by
+ registered application.
+ Tagged files are encrypted using the dm-req-crypt driver.
+
config SSM
tristate "Qualcomm Secure Service Module"
depends on QSEECOM
diff --git a/drivers/platform/msm/Makefile b/drivers/platform/msm/Makefile
index e6493b1..e2be59d 100644
--- a/drivers/platform/msm/Makefile
+++ b/drivers/platform/msm/Makefile
@@ -3,7 +3,7 @@
#
ccflags-y += -Idrivers/misc/
-
+ccflags-y += -Isecurity/selinux -Isecurity/selinux/include
obj-$(CONFIG_MSM_SSBI) += ssbi.o
obj-$(CONFIG_USB_BAM) += usb_bam.o
@@ -16,4 +16,5 @@
obj-$(CONFIG_QPNP_COINCELL) += qpnp-coincell.o
obj-$(CONFIG_MSM_AVTIMER) += avtimer.o
obj-$(CONFIG_SSM) += ssm.o
+obj-$(CONFIG_PFT) += pft.o
obj-$(CONFIG_QPNP_REVID) += qpnp-revid.o
diff --git a/drivers/platform/msm/pft.c b/drivers/platform/msm/pft.c
new file mode 100644
index 0000000..14a6092
--- /dev/null
+++ b/drivers/platform/msm/pft.c
@@ -0,0 +1,1744 @@
+/*
+ * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/*
+ * Per-File-Tagger (PFT).
+ *
+ * This driver tags enterprise file for encryption/decryption,
+ * as part of the Per-File-Encryption (PFE) feature.
+ *
+ * Enterprise registered applications are identified by their UID.
+ *
+ * The PFT exposes character-device interface to the user-space application,
+ * to handle the following commands:
+ * 1. Update registered applications list
+ * 2. Encryption (in-place) of a file that was created before.
+ * 3. Set State - update the state.
+ *
+ * The PFT exposes kernel API hooks that are intercepting file operations
+ * like create/open/read/write for tagging files and also for access control.
+ * It utilizes the existing security framework hooks
+ * that calls the selinux hooks.
+ *
+ * The PFT exposes kernel API to the dm-req-crypt driver to provide the info
+ * if a file is tagged or not. The dm-req-crypt driver is doing the
+ * actual encryption/decryptiom.
+ *
+ * Tagging the file:
+ * 1. Non-volatile tagging on storage using file extra-attribute (xattr).
+ * 2. Volatile tagging on the file's inode, for fast access.
+ *
+ */
+
+/* Uncomment the line below to enable debug messages */
+/* #define DEBUG 1 */
+
+#define pr_fmt(fmt) "pft [%s]: " fmt, __func__
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/cdev.h>
+#include <linux/sched.h>
+#include <linux/uaccess.h>
+#include <linux/cred.h>
+#include <linux/list.h>
+#include <linux/errno.h>
+#include <linux/printk.h>
+#include <linux/blkdev.h>
+#include <linux/elevator.h>
+#include <linux/bio.h>
+#include <linux/bitops.h>
+#include <linux/fdtable.h>
+#include <linux/selinux.h>
+
+#include <linux/pft.h>
+#include <linux/msm_pft.h>
+
+#include "objsec.h"
+
+/* File tagging as encrypted/non-encrypted is valid */
+#define PFT_TAG_MAGIC ((u32)(0xABC00000))
+
+/* File tagged as encrypted */
+#define PFT_TAG_ENCRYPTED BIT(16)
+
+#define PFT_TAG_MAGIC_MASK 0xFFF00000
+#define PFT_TAG_FLAGS_MASK 0x000F0000
+#define PFT_TAG_KEY_MASK 0x0000FFFF
+
+/* The defualt encryption key index */
+#define PFT_DEFAULT_KEY_INDEX 1
+
+/* The defualt key index for non-encrypted files */
+#define PFT_NO_KEY 0
+
+/* PFT extended attribute name */
+#define XATTR_NAME_PFE "security.pfe"
+
+/* PFT driver requested major number */
+#define PFT_REQUESTED_MAJOR 213
+
+/* PFT driver name */
+#define DEVICE_NAME "pft"
+
+/* Maximum registered applications */
+#define PFT_MAX_APPS 1000
+
+/* Maximum command size */
+#define PFT_MAX_COMMAND_SIZE (PAGE_SIZE)
+
+/* Current Process ID */
+#define current_pid() ((u32)(current->pid))
+
+static const char *pft_state_name[PFT_STATE_MAX_INDEX] = {
+ "deactivated",
+ "deactivating",
+ "key_removed",
+ "removing_key",
+ "key_loaded",
+};
+
+/**
+ * struct pft_file_info - pft file node info.
+ * @file: pointer to file stucture.
+ * @pid: process ID.
+ * @list: next list item.
+ *
+ * A node in the list of the current open encrypted files.
+ */
+struct pft_file_info {
+ struct file *file;
+ pid_t pid;
+ struct list_head list;
+};
+
+/**
+ * struct pft_device - device state structure.
+ *
+ * @open_count: device open count.
+ * @major: device major number.
+ * @state: Per-File-Encryption state.
+ * @response: command response.
+ * @pfm_pid: PFM process id.
+ * @inplace_file: file for in-place encryption.
+ * @uid_table: registered application array (UID).
+ * @uid_count: number of registered applications.
+ * @open_file_list: open encrypted file list.
+ * @lock: lock protect list access.
+ *
+ * The open_count purpose is to ensure that only one user space
+ * application uses this driver.
+ * The open_file_list is used to close open encrypted files
+ * after the key is removed from the encryption hardware.
+ */
+struct pft_device {
+ struct cdev cdev;
+ dev_t device_no;
+ struct class *driver_class;
+ int open_count;
+ int major;
+ enum pft_state state;
+ struct pft_command_response response;
+ u32 pfm_pid;
+ struct file *inplace_file;
+ u32 *uid_table;
+ u32 uid_count;
+ struct list_head open_file_list;
+ struct mutex lock;
+};
+
+/* Device Driver State */
+static struct pft_device *pft_dev;
+
+/**
+ * pft_is_ready() - driver is initialized and ready.
+ *
+ * Return: true if the driver is ready.
+ */
+static bool pft_is_ready(void)
+{
+ return (pft_dev != NULL);
+}
+
+/**
+ * file_to_filename() - get the filename from file pointer.
+ * @filp: file pointer
+ *
+ * it is used for debug prints.
+ *
+ * Return: filename string or "unknown".
+ */
+static char *file_to_filename(struct file *filp)
+{
+ struct dentry *dentry = NULL;
+ char *filename = NULL;
+
+ if (!filp || !filp->f_dentry)
+ return "unknown";
+
+ dentry = filp->f_dentry;
+ filename = dentry->d_iname;
+
+ return filename;
+}
+
+/**
+ * inode_to_filename() - get the filename from inode pointer.
+ * @inode: inode pointer
+ *
+ * it is used for debug prints.
+ *
+ * Return: filename string or "unknown".
+ */
+static char *inode_to_filename(struct inode *inode)
+{
+ struct dentry *dentry = NULL;
+ char *filename = NULL;
+
+ if (list_empty(&inode->i_dentry))
+ return "unknown";
+
+ dentry = list_first_entry(&inode->i_dentry, struct dentry, d_alias);
+
+ filename = dentry->d_iname;
+
+ return filename;
+}
+
+/**
+ * pft_set_response() - set response error code.
+ *
+ * @error_code: The error code to return on response.
+ */
+static inline void pft_set_response(u32 error_code)
+{
+ pft_dev->response.error_code = error_code;
+}
+
+/**
+ * pft_add_file()- Add the file to the list of opened encrypted
+ * files.
+ * @filp: file to add.
+ *
+ * Return: 0 of successful operation, negative value otherwise.
+ */
+static int pft_add_file(struct file *filp)
+{
+ struct pft_file_info *node = NULL;
+
+ node = kzalloc(sizeof(*node), GFP_KERNEL);
+ if (!node) {
+ pr_err("malloc failure\n");
+ return -ENOMEM;
+ }
+
+ node->file = filp;
+ INIT_LIST_HEAD(&node->list);
+
+ mutex_lock(&pft_dev->lock);
+ list_add(&node->list, &pft_dev->open_file_list);
+ pr_debug("adding file %s to open list.\n", file_to_filename(filp));
+ mutex_unlock(&pft_dev->lock);
+
+ return 0;
+}
+
+/**
+ * pft_remove_file()- Remove the given file from the list of
+ * open encrypted files.
+ * @filp: file to remove.
+ *
+ * Return: 0 on success, negative value on failure.
+ */
+static int pft_remove_file(struct file *filp)
+{
+ int ret = -ENOENT;
+ struct pft_file_info *tmp = NULL;
+ struct list_head *pos = NULL;
+ struct list_head *next = NULL;
+ bool found = false;
+
+ mutex_lock(&pft_dev->lock);
+ list_for_each_safe(pos, next, &pft_dev->open_file_list) {
+ tmp = list_entry(pos, struct pft_file_info, list);
+ if (filp == tmp->file) {
+ found = true;
+ break;
+ }
+ }
+
+ if (found) {
+ pr_debug("remove file %s. from open list.\n ",
+ file_to_filename(filp));
+ list_del(&tmp->list);
+ kfree(tmp);
+ ret = 0;
+ }
+ mutex_unlock(&pft_dev->lock);
+
+ return ret;
+}
+
+/**
+ * pft_is_current_process_registered()- Check if current process
+ * is registered.
+ *
+ * Return: true if current process is registered.
+ */
+static bool pft_is_current_process_registered(void)
+{
+ int is_registered = false;
+ int i;
+ u32 uid = current_uid();
+
+ mutex_lock(&pft_dev->lock);
+ for (i = 0; i < pft_dev->uid_count; i++) {
+ if (pft_dev->uid_table[i] == uid) {
+ pr_debug("current UID [%u] is registerd.\n", uid);
+ is_registered = true;
+ break;
+ }
+ }
+ mutex_unlock(&pft_dev->lock);
+
+ return is_registered;
+}
+
+/**
+ * pft_is_xattr_supported() - Check if the filesystem supports
+ * extended attributes.
+ * @indoe: pointer to the file inode
+ *
+ * Return: true if supported, false if not.
+ */
+static bool pft_is_xattr_supported(struct inode *inode)
+{
+ if (inode == NULL) {
+ pr_err("invalid argument inode passed as NULL");
+ return false;
+ }
+
+ if (inode->i_security == NULL) {
+ pr_debug("i_security is NULL, not ready yet\n");
+ return false;
+ }
+
+ if (inode->i_op == NULL) {
+ pr_debug("i_op is NULL\n");
+ return false;
+ }
+
+ if (inode->i_op->getxattr == NULL) {
+ pr_debug_once("getxattr() not supported , filename=%s\n",
+ inode_to_filename(inode));
+ return false;
+ }
+
+ if (inode->i_op->setxattr == NULL) {
+ pr_debug("setxattr() not supported\n");
+ return false;
+ }
+
+ return true;
+}
+
+/**
+ * pft_get_inode_tag() - get the file tag.
+ * @indoe: pointer to the file inode
+ *
+ * Return: tag
+ */
+static u32 pft_get_inode_tag(struct inode *inode)
+{
+ struct inode_security_struct *isec = inode->i_security;
+
+ if (isec == NULL)
+ return 0;
+
+ return isec->tag;
+}
+
+/**
+ * pft_get_inode_key_index() - get the file key.
+ * @indoe: pointer to the file inode
+ *
+ * Return: key index
+ */
+static inline u32 pft_get_inode_key_index(struct inode *inode)
+{
+ return pft_get_inode_tag(inode) & PFT_TAG_KEY_MASK;
+}
+
+/**
+ * pft_is_tag_valid() - is the tag valid
+ * @indoe: pointer to the file inode
+ *
+ * The tagging is set to valid when an enterprise file is created
+ * or when an file is opened first time after power up and the
+ * xattr was checked to see if the file is encrypted or not.
+ *
+ * Return: true if the tag is valid.
+ */
+static inline bool pft_is_tag_valid(struct inode *inode)
+{
+ struct inode_security_struct *isec = inode->i_security;
+
+ if (isec == NULL)
+ return false;
+
+ return ((isec->tag & PFT_TAG_MAGIC_MASK) == PFT_TAG_MAGIC) ?
+ true : false;
+}
+
+/**
+ * pft_is_file_encrypted() - is inode tagged as encrypted.
+ *
+ * @tag: holds the key index and tagging flags.
+ *
+ * Return: true if the file is encrypted.
+ */
+static inline bool pft_is_file_encrypted(u32 tag)
+{
+ return (tag & PFT_TAG_ENCRYPTED) ? true : false;
+}
+
+/**
+ * pft_tag_inode_non_encrypted() - Tag the inode as
+ * non-encrypted.
+ * @indoe: pointer to the file inode
+ *
+ * Tag file as non-encrypted, only the valid bit is set,
+ * the encrypted bit is not set.
+ */
+static inline void pft_tag_inode_non_encrypted(struct inode *inode)
+{
+ struct inode_security_struct *isec = inode->i_security;
+
+ isec->tag = (u32)(PFT_TAG_MAGIC);
+}
+
+/**
+ * pft_tag_inode_encrypted() - Tag the inode as encrypted.
+ * @indoe: pointer to the file inode
+ *
+ * Set the valid bit, the encrypted bit, and the key index.
+ */
+static void pft_tag_inode_encrypted(struct inode *inode, u32 key_index)
+{
+ struct inode_security_struct *isec = inode->i_security;
+
+ isec->tag = key_index | PFT_TAG_ENCRYPTED | PFT_TAG_MAGIC;
+}
+
+/**
+ * pft_get_file_tag()- get the file tag.
+ * @dentry: pointer to file dentry.
+ * @tag_ptr: pointer to tag.
+ *
+ * This is the major function for detecting tag files.
+ * Get the tag from the inode if tag is valid,
+ * or from the xattr if this is the 1st time after power up.
+ *
+ * Return: 0 on successe, negative value on failure.
+ */
+static int pft_get_file_tag(struct dentry *dentry, u32 *tag_ptr)
+{
+ ssize_t size = 0;
+ struct inode *inode;
+ const char *xattr_name = XATTR_NAME_PFE;
+ u32 key;
+
+ if (!dentry || !dentry->d_inode || !tag_ptr) {
+ pr_err("invalid param");
+ return -EINVAL;
+ }
+
+ inode = dentry->d_inode;
+ if (pft_is_tag_valid(inode)) {
+ *tag_ptr = pft_get_inode_tag(inode);
+ return 0;
+ }
+
+ /*
+ * For the first time reading the tag, the tag is not valid, hence
+ * get xattr.
+ */
+ size = inode->i_op->getxattr(dentry, xattr_name, &key, sizeof(key));
+
+ if (size == -ENODATA || size == -EOPNOTSUPP) {
+ pft_tag_inode_non_encrypted(inode);
+ *tag_ptr = pft_get_inode_tag(inode);
+ } else if (size > 0) {
+ pr_debug("First time file %s opened, found xattr = %u.\n",
+ inode_to_filename(inode), key);
+ pft_tag_inode_encrypted(inode, key);
+ *tag_ptr = pft_get_inode_tag(inode);
+ } else {
+ pr_err("getxattr() failure, ret=%d.\n", size);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * pft_tag_file() - Tag the file saving the key_index.
+ * @dentry: file dentry.
+ * @key_index: encryption key index.
+ *
+ * This is the major fuction for tagging a file.
+ * Tag the file on both the xattr and the inode.
+ *
+ * Return: 0 on successe, negative value on failure.
+ */
+static int pft_tag_file(struct dentry *dentry, u32 key_index)
+{
+ int size = 0;
+ const char *xattr_name = XATTR_NAME_PFE;
+
+ if (!dentry || !dentry->d_inode) {
+ pr_err("invalid NULL param");
+ return -EINVAL;
+ }
+
+ if (!pft_is_xattr_supported(dentry->d_inode)) {
+ pr_err("set xattr for file %s is not support.\n",
+ dentry->d_iname);
+ return -EINVAL;
+ }
+
+ size = dentry->d_inode->i_op->setxattr(dentry, xattr_name, &key_index,
+ sizeof(key_index), 0);
+ if (size < 0) {
+ pr_err("failed to set xattr for file %s, ret =%d.\n",
+ dentry->d_iname, size);
+ return -EFAULT;
+ }
+
+ pft_tag_inode_encrypted(dentry->d_inode, key_index);
+ pr_debug("file %s tagged encrypted\n", dentry->d_iname);
+
+ return 0;
+}
+
+/**
+ * pft_get_app_key_index() - get the application key index.
+ * @uid: registered application UID
+ *
+ * Get key index based on the given registered application UID.
+ * Currently only one key is supported.
+ *
+ * Return: encryption key index.
+ */
+static inline u32 pft_get_app_key_index(u32 uid)
+{
+ return PFT_DEFAULT_KEY_INDEX;
+}
+
+/**
+ * pft_is_encrypted_file() - is the file encrypted.
+ * @dentry: file pointer.
+ *
+ * Return: true if the file is encrypted, false otherwise.
+ */
+static bool pft_is_encrypted_file(struct dentry *dentry)
+{
+ int rc;
+ u32 tag;
+
+ if (!pft_is_ready())
+ return false;
+
+ if (!pft_is_xattr_supported(dentry->d_inode))
+ return false;
+
+ rc = pft_get_file_tag(dentry, &tag);
+ if (rc < 0)
+ return false;
+
+ return pft_is_file_encrypted(tag);
+}
+
+/**
+ * pft_is_inplace_inode() - is this the inode of file for
+ * in-place encryption.
+ * @inode: inode of file to check.
+ *
+ * Return: true if this file is being encrypted, false
+ * otherwise.
+ */
+static bool pft_is_inplace_inode(struct inode *inode)
+{
+ if (!pft_dev->inplace_file || !pft_dev->inplace_file->f_path.dentry)
+ return false;
+
+ return (pft_dev->inplace_file->f_path.dentry->d_inode == inode);
+}
+
+/**
+ * pft_is_inplace_file() - is this the file for in-place
+ * encryption.
+ * @filp: file to check.
+ *
+ * A file struct might be allocated per process, inode should be
+ * only one.
+ *
+ * Return: true if this file is being encrypted, false
+ * otherwise.
+ */
+static inline bool pft_is_inplace_file(struct file *filp)
+{
+ if (!filp || !filp->f_path.dentry || !filp->f_path.dentry->d_inode)
+ return false;
+
+ return pft_is_inplace_inode(filp->f_path.dentry->d_inode);
+}
+
+/**
+ * pft_get_key_index() - get the key index and other indications
+ * @inode: Pointer to inode struct
+ * @key_index: Pointer to the return value of key index
+ * @is_encrypted: Pointer to the return value.
+ * @is_inplace: Pointer to the return value.
+ *
+ * Provides the given inode's encryption key index, and well as
+ * indications whether the file is encrypted or is it currently
+ * being in-placed encrypted.
+ * This API is called by the dm-req-crypt to decide if to
+ * encrypt/decrypt the file.
+ * File tagging depends on the hooks to be called from selinux,
+ * so if selinux is disabled then tagging is also not
+ * valid.
+ *
+ * Return: 0 on successe, negative value on failure.
+ */
+int pft_get_key_index(struct inode *inode, u32 *key_index,
+ bool *is_encrypted, bool *is_inplace)
+{
+ u32 tag = 0;
+
+ if (!pft_is_ready())
+ return -ENODEV;
+
+ if (!selinux_is_enabled())
+ return -ENODEV;
+
+ if (!inode)
+ return -EPERM;
+
+ if (!is_encrypted) {
+ pr_err("is_encrypted is NULL\n");
+ return -EPERM;
+ }
+ if (!is_inplace) {
+ pr_err("is_inplace is NULL\n");
+ return -EPERM;
+ }
+ if (!key_index) {
+ pr_err("key_index is NULL\n");
+ return -EPERM;
+ }
+
+ if (!pft_is_tag_valid(inode)) {
+ pr_debug("file %s, Tag not valid\n", inode_to_filename(inode));
+ return -EINVAL;
+ }
+
+ if (!pft_is_xattr_supported(inode)) {
+ *is_encrypted = false;
+ *is_inplace = false;
+ *key_index = 0;
+ return 0;
+ }
+
+ tag = pft_get_inode_tag(inode);
+
+ *is_encrypted = pft_is_file_encrypted(tag);
+ *key_index = pft_get_inode_key_index(inode);
+ *is_inplace = pft_is_inplace_inode(inode);
+
+ if (*is_encrypted)
+ pr_debug("file %s is encrypted\n", inode_to_filename(inode));
+
+ return 0;
+}
+EXPORT_SYMBOL(pft_get_key_index);
+
+/**
+ * pft_bio_get_inode() - get the inode from a bio.
+ * @bio: Pointer to BIO structure.
+ *
+ * Walk the bio struct links to get the inode.
+ *
+ * Return: pointer to the inode struct if successful, or NULL otherwise.
+ */
+static struct inode *pft_bio_get_inode(struct bio *bio)
+{
+ if (!bio || !bio->bi_io_vec || !bio->bi_io_vec->bv_page ||
+ !bio->bi_io_vec->bv_page->mapping)
+ return NULL;
+
+ return bio->bi_io_vec->bv_page->mapping->host;
+}
+
+/**
+ * pft_allow_merge_bio()- Check if 2 BIOs can be merged.
+ * @bio1: Pointer to first BIO structure.
+ * @bio2: Pointer to second BIO structure.
+ *
+ * Prevent merging of BIOs from encrypted and non-encrypted
+ * files, or files encrypted with different key.
+ * This API is called by the file system block layer.
+ *
+ * Return: true if the BIOs allowed to be merged, false
+ * otherwise.
+ */
+bool pft_allow_merge_bio(struct bio *bio1, struct bio *bio2)
+{
+ u32 key_index1 = 0, key_index2 = 0;
+ bool is_encrypted1 = false, is_encrypted2 = false;
+ bool allow = false;
+ bool is_inplace = false; /* N.A. */
+ int ret;
+
+ if (!pft_is_ready())
+ return true;
+
+ ret = pft_get_key_index(pft_bio_get_inode(bio1), &key_index1,
+ &is_encrypted1, &is_inplace);
+ if (ret)
+ is_encrypted1 = false;
+
+ ret = pft_get_key_index(pft_bio_get_inode(bio2), &key_index2,
+ &is_encrypted2, &is_inplace);
+ if (ret)
+ is_encrypted2 = false;
+
+ allow = ((is_encrypted1 == is_encrypted2) &&
+ (key_index1 == key_index2));
+
+ return allow;
+}
+EXPORT_SYMBOL(pft_allow_merge_bio);
+
+/**
+ * pft_inode_create() - file creation callback.
+ * @dir: directory inode pointer
+ * @dentry: file dentry pointer
+ * @mode: flags
+ *
+ * This hook is called when file is created by VFS.
+ * This hook is called from the selinux driver.
+ * This hooks check file creation permission for enterprise
+ * applications.
+ * Call path:
+ * vfs_create()->security_inode_create()->selinux_inode_create()
+ *
+ * Return: 0 on successe, negative value on failure.
+ */
+int pft_inode_create(struct inode *dir, struct dentry *dentry, umode_t mode)
+{
+ if (!dir || !dentry)
+ return 0;
+
+ if (!pft_is_ready())
+ return 0;
+
+ switch (pft_dev->state) {
+ case PFT_STATE_DEACTIVATED:
+ case PFT_STATE_KEY_LOADED:
+ break;
+ case PFT_STATE_KEY_REMOVED:
+ case PFT_STATE_DEACTIVATING:
+ case PFT_STATE_REMOVING_KEY:
+ /* At this state no new encrypted files can be created */
+ if (pft_is_current_process_registered()) {
+ pr_debug("key removed, registered uid %u is denied from creating new file %s\n",
+ current_uid(), dentry->d_iname);
+ return -EACCES;
+ }
+ break;
+ default:
+ BUG(); /* State is set by "set state" command */
+ break;
+ }
+
+ return 0;
+
+}
+EXPORT_SYMBOL(pft_inode_create);
+
+/**
+ * pft_inode_post_create() - file creation callback.
+ * @dir: directory inode pointer
+ * @dentry: file dentry pointer
+ * @mode: flags
+ *
+ * This hook is called when file is created by VFS.
+ * This hook is called from the selinux driver.
+ * This hooks tags new files as encrypted when created by
+ * enterprise applications.
+ * Call path:
+ * vfs_create()->security_inode_post_create()->selinux_inode_post_create()
+ *
+ * Return: 0 on successe, negative value on failure.
+ */
+int pft_inode_post_create(struct inode *dir, struct dentry *dentry,
+ umode_t mode)
+{
+ int ret;
+
+ if (!dir || !dentry)
+ return 0;
+
+ if (!pft_is_ready())
+ return 0;
+
+ switch (pft_dev->state) {
+ case PFT_STATE_DEACTIVATED:
+ case PFT_STATE_KEY_REMOVED:
+ case PFT_STATE_DEACTIVATING:
+ case PFT_STATE_REMOVING_KEY:
+ break;
+ case PFT_STATE_KEY_LOADED:
+ /* Check whether the new file should be encrypted */
+ if (pft_is_current_process_registered()) {
+ u32 key_index = pft_get_app_key_index(current_uid());
+ ret = pft_tag_file(dentry, key_index);
+ if (ret == 0)
+ pr_debug("key loaded, pid [%u] uid [%d] is creating file %s\n",
+ current_pid(), current_uid(),
+ dentry->d_iname);
+ else {
+ pr_err("Failed to tag file %s by pid %d\n",
+ dentry->d_iname, current_pid());
+ return -EFAULT;
+ }
+ }
+ break;
+ default:
+ BUG(); /* State is set by "set state" command */
+ break;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(pft_inode_post_create);
+
+/**
+ * pft_inode_mknod() - mknode file hook (callback)
+ * @dir: directory inode pointer
+ * @dentry: file dentry pointer
+ * @mode: flags
+ * @dev:
+ *
+ * This hook checks encrypted file access permission by
+ * enterprise application.
+ * Call path:
+ * vfs_mknod()->security_inode_mknod()->selinux_inode_mknod()->pft_inode_mknod()
+ *
+ * Return: 0 on successful operation, negative value otherwise.
+ */
+int pft_inode_mknod(struct inode *dir, struct dentry *dentry,
+ umode_t mode, dev_t dev)
+{
+ int rc;
+
+ /* Check if allowed to create new encrypted files */
+ rc = pft_inode_create(dir, dentry, mode);
+
+ return rc;
+}
+EXPORT_SYMBOL(pft_inode_mknod);
+
+/**
+ * pft_inode_rename() - file rename hook.
+ * @inode: directory inode
+ * @dentry: file dentry
+ * @new_inode
+ * @new_dentry
+ *
+ * Block attempt to rename enterprise file.
+ *
+ * Return: 0 on allowed operation, negative value otherwise.
+ */
+int pft_inode_rename(struct inode *inode, struct dentry *dentry,
+ struct inode *new_inode, struct dentry *new_dentry)
+{
+ if (!inode || !dentry || !new_inode || !new_dentry || !dentry->d_inode)
+ return 0;
+
+ if (!pft_is_ready())
+ return 0;
+
+ /* do nothing for non-encrypted files */
+ if (!pft_is_encrypted_file(dentry))
+ return 0;
+
+ pr_debug("attempt to rename encrypted file [%s]\n", dentry->d_iname);
+
+ if (pft_is_inplace_inode(dentry->d_inode)) {
+ pr_err("access in-place-encryption file %s by uid [%d] pid [%d] is blocked.\n",
+ inode_to_filename(inode), current_uid(), current_pid());
+ return -EACCES;
+ }
+
+ if (!pft_is_current_process_registered()) {
+ pr_err("unregistered app (uid %u pid %u) is trying to access encrypted file %s\n",
+ current_uid(), current_pid(), dentry->d_iname);
+ return -EACCES;
+ } else
+ pr_debug("rename file %s\n", dentry->d_iname);
+
+ return 0;
+}
+EXPORT_SYMBOL(pft_inode_rename);
+
+/**
+ * pft_file_open() - file open hook (callback).
+ * @filp: file pointer
+ * @cred: credentials pointer
+ *
+ * This hook is called when file is opened by VFS.
+ * It is called from the selinux driver.
+ * It checks enterprise file xattr when first opened.
+ * It adds encrypted file to the list of open files.
+ * Call path:
+ * do_filp_open()->security_dentry_open()->selinux_dentry_open()
+ *
+ * Return: 0 on successe, negative value on failure.
+ */
+int pft_file_open(struct file *filp, const struct cred *cred)
+{
+ int ret;
+
+ if (!filp || !filp->f_path.dentry)
+ return 0;
+
+ if (!pft_is_ready())
+ return 0;
+
+ /* do nothing for non-encrypted files */
+ if (!pft_is_encrypted_file(filp->f_dentry))
+ return 0;
+
+ /*
+ * Only PFM allowed to access in-place-encryption-file
+ * during in-place-encryption process
+ */
+ if (pft_is_inplace_file(filp) && current_pid() != pft_dev->pfm_pid) {
+ pr_err("Access in-place-encryption file %s by uid %d pid %d is blocked.\n",
+ file_to_filename(filp), current_uid(), current_pid());
+ return -EACCES;
+ }
+
+ switch (pft_dev->state) {
+ case PFT_STATE_DEACTIVATED:
+ case PFT_STATE_KEY_REMOVED:
+ case PFT_STATE_DEACTIVATING:
+ case PFT_STATE_REMOVING_KEY:
+ /* Block any access for encrypted files when key not loaded */
+ pr_debug("key not loaded. uid (%u) can not access file %s\n",
+ current_uid(), file_to_filename(filp));
+ return -EACCES;
+ case PFT_STATE_KEY_LOADED:
+ /* Only registered apps may access encrypted files. */
+ if (!pft_is_current_process_registered()) {
+ pr_err("unregistered app (uid %u pid %u) is trying to access encrypted file %s\n",
+ current_uid(), current_pid(),
+ file_to_filename(filp));
+ return -EACCES;
+ }
+
+ ret = pft_add_file(filp);
+ if (ret) {
+ pr_err("failed to add file %s to the list.\n",
+ file_to_filename(filp));
+ return -EFAULT;
+ }
+ break;
+ default:
+ BUG(); /* State is set by "set state" command */
+ break;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(pft_file_open);
+
+/**
+ * pft_file_permission() - check file access permission.
+ * @filp: file pointer
+ * @mask: flags
+ *
+ * This hook is called when file is read/write by VFS.
+ * This hook is called from the selinux driver.
+ * This hook checks encrypted file access permission by
+ * enterprise application.
+ * Call path:
+ * vfs_read()->security_file_permission()->selinux_file_permission()
+ *
+ * Return: 0 on successe, negative value on failure.
+ */
+int pft_file_permission(struct file *filp, int mask)
+{
+ if (!filp)
+ return 0;
+
+ if (!pft_is_ready())
+ return 0;
+
+ /* do nothing for non-encrypted files */
+ if (!pft_is_encrypted_file(filp->f_dentry))
+ return 0;
+
+ /*
+ * Only PFM allowed to access in-place-encryption-file
+ * during in-place encryption process
+ */
+ if (pft_is_inplace_file(filp)) {
+ if (current_pid() == pft_dev->pfm_pid) {
+ /* mask MAY_WRITE=2 / MAY_READ=4 */
+ pr_debug("r/w [mask 0x%x] in-place-encryption file %s by PFM (UID %d, PID %d).\n",
+ mask, file_to_filename(filp),
+ current_uid(), current_pid());
+ return 0;
+ } else {
+ pr_err("Access in-place-encryption file %s by App (UID %d, PID %d) is blocked.\n",
+ file_to_filename(filp),
+ current_uid(), current_pid());
+ return -EACCES;
+ }
+ }
+
+ switch (pft_dev->state) {
+ case PFT_STATE_DEACTIVATED:
+ case PFT_STATE_KEY_REMOVED:
+ case PFT_STATE_DEACTIVATING:
+ case PFT_STATE_REMOVING_KEY:
+ /* Block any access for encrypted files when key not loaded */
+ pr_debug("key not loaded. uid (%u) can not access file %s\n",
+ current_uid(), file_to_filename(filp));
+ return -EACCES;
+ case PFT_STATE_KEY_LOADED:
+ /* Only registered apps can access encrypted files. */
+ if (!pft_is_current_process_registered()) {
+ pr_err("unregistered app (uid %u pid %u) is trying to access encrypted file %s\n",
+ current_uid(), current_pid(),
+ file_to_filename(filp));
+ return -EACCES;
+ }
+ break;
+ default:
+ BUG(); /* State is set by "set state" command */
+ break;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(pft_file_permission);
+
+/**
+ * pft_sync_file() - sync the file.
+ * @filp: file pointer
+ *
+ * Complete writting any pending write request of encrypted data
+ * before key is removed, to avoid writting garbage to
+ * enterprise files.
+ */
+static void pft_sync_file(struct file *filp)
+{
+ int ret;
+
+ ret = vfs_fsync(filp, false);
+
+ if (ret)
+ pr_debug("failed to sync file %s, ret = %d.\n",
+ file_to_filename(filp), ret);
+ else
+ pr_debug("Sync file %s ok.\n", file_to_filename(filp));
+
+}
+
+/**
+ * pft_file_close()- handle file close event
+ * @filp: file pointer
+ *
+ * This hook is called when file is closed by VFS.
+ * This hook is called from the selinux driver.
+ *
+ * Return: 0 on successful operation, negative value otherwise.
+ */
+int pft_file_close(struct file *filp)
+{
+ if (!filp)
+ return 0;
+
+ if (!pft_is_ready())
+ return 0;
+
+ /* do nothing for non-encrypted files */
+ if (!pft_is_encrypted_file(filp->f_dentry))
+ return 0;
+
+ if (pft_is_inplace_file(filp)) {
+ pr_debug("pid [%u] uid [%u] is closing in-place-encryption file %s\n",
+ current_pid(), current_uid(), file_to_filename(filp));
+ pft_dev->inplace_file = NULL;
+ }
+
+ pft_sync_file(filp);
+ pft_remove_file(filp);
+
+ return 0;
+}
+EXPORT_SYMBOL(pft_file_close);
+
+/**
+ * pft_inode_unlink() - Delete file hook.
+ * @dir: directory inode pointer
+ * @dentry: file dentry pointer
+ *
+ * call path: vfs_unlink()->security_inode_unlink().
+ *
+ * Return: 0 on successful operation, negative value otherwise.
+ */
+int pft_inode_unlink(struct inode *dir, struct dentry *dentry)
+{
+ struct inode *inode = NULL;
+
+ if (!dir || !dentry || !dentry->d_inode)
+ return 0;
+
+ if (!pft_is_ready())
+ return 0;
+
+ inode = dentry->d_inode;
+
+ /* do nothing for non-encrypted files */
+ if (!pft_is_encrypted_file(dentry))
+ return 0;
+
+ if (pft_is_inplace_inode(inode)) {
+ pr_err("block delete in-place-encryption file %s by uid [%d] pid [%d], while encryption in progress.\n",
+ inode_to_filename(inode), current_uid(), current_pid());
+ return -EBUSY;
+ }
+
+ if (!pft_is_current_process_registered()) {
+ pr_err("unregistered app (uid %u pid %u) is trying to access encrypted file %s\n",
+ current_uid(), current_pid(), inode_to_filename(inode));
+ return -EACCES;
+ } else
+ pr_debug("delete file %s\n", inode_to_filename(inode));
+
+ return 0;
+}
+EXPORT_SYMBOL(pft_inode_unlink);
+
+/**
+ * pft_inode_set_xattr() - set/remove xattr callback.
+ * @dentry: file dentry pointer
+ * @name: xattr name.
+ *
+ * This hook checks attempt to set/remove PFE xattr.
+ * Only this kernel driver allows to set the PFE xattr, so block
+ * any attempt to do it from user space. Allow access for other
+ * xattr.
+ *
+ * Return: 0 on successful operation, negative value otherwise.
+ */
+int pft_inode_set_xattr(struct dentry *dentry, const char *name)
+{
+ struct inode *inode = NULL;
+
+ if (!dentry || !dentry->d_inode)
+ return 0;
+
+ inode = dentry->d_inode;
+
+ if (strcmp(name, XATTR_NAME_PFE) != 0) {
+ pr_debug("xattr name=%s file %s\n", name,
+ inode_to_filename(inode));
+ return 0; /* Not PFE xattr so it is ok */
+ }
+
+ pr_err("Attemp to set/remove PFE xattr for file %s\n",
+ inode_to_filename(inode));
+
+ /* Only PFT kernel driver allows to set the PFE xattr */
+ return -EACCES;
+}
+EXPORT_SYMBOL(pft_inode_set_xattr);
+
+/**
+ * pft_close_opened_enc_files() - Close all the currently open
+ * encrypted files
+ *
+ * Close all open encrypted file when removing key or
+ * deactivating.
+ */
+static void pft_close_opened_enc_files(void)
+{
+ struct pft_file_info *tmp = NULL;
+ struct list_head *pos = NULL;
+ struct list_head *next = NULL;
+
+ list_for_each_safe(pos, next, &pft_dev->open_file_list) {
+ struct file *filp;
+ tmp = list_entry(pos, struct pft_file_info, list);
+ filp = tmp->file;
+ pr_debug("closing file %s.\n", file_to_filename(filp));
+ /* filp_close() eventually calls pft_file_close() */
+ filp_close(filp, NULL);
+ }
+}
+
+/**
+ * pft_set_state() - Handle "Set State" command
+ * @command: command buffer.
+ * @size: size of command buffer.
+ *
+ * The command execution status is reported by the response.
+ *
+ * Return: 0 on successful operation, negative value otherwise.
+ */
+static int pft_set_state(struct pft_command *command, int size)
+{
+ u32 state = command->set_state.state;
+ int expected_size = sizeof(command->opcode) +
+ sizeof(command->set_state);
+
+ if (size != expected_size) {
+ pr_err("Invalid buffer size\n");
+ pft_set_response(PFT_CMD_RESP_INVALID_CMD_PARAMS);
+ return -EINVAL;
+ }
+
+ if (state >= PFT_STATE_MAX_INDEX) {
+ pr_err("Invalid state %d\n", command->set_state.state);
+ pft_set_response(PFT_CMD_RESP_INVALID_STATE);
+ return 0;
+ }
+
+ pr_debug("Set State %d [%s].\n", state, pft_state_name[state]);
+
+ switch (command->set_state.state) {
+ case PFT_STATE_DEACTIVATING:
+ case PFT_STATE_REMOVING_KEY:
+ pft_close_opened_enc_files();
+ /* Fall through */
+ case PFT_STATE_DEACTIVATED:
+ case PFT_STATE_KEY_LOADED:
+ case PFT_STATE_KEY_REMOVED:
+ pft_dev->state = command->set_state.state;
+ pft_set_response(PFT_CMD_RESP_SUCCESS);
+ break;
+ default:
+ pr_err("Invalid state %d\n", command->set_state.state);
+ pft_set_response(PFT_CMD_RESP_INVALID_STATE);
+ break;
+ }
+
+ return 0;
+}
+
+/**
+ * pft_get_process_open_file() - get file pointer using file
+ * descriptor index.
+ * @index: file descriptor index.
+ *
+ * Return: file pointer on success, NULL on failure.
+ */
+static struct file *pft_get_process_open_file(int index)
+{
+ struct fdtable *files_table;
+
+ files_table = files_fdtable(current->files);
+ if (files_table == NULL)
+ return NULL;
+
+ if (index >= files_table->max_fds)
+ return NULL;
+ else
+ return files_table->fd[index];
+}
+
+/**
+ * pft_set_inplace_file() - handle "inplace file encryption"
+ * command.
+ * @command: command buffer.
+ * @size: size of command buffer.
+ *
+ * The command execution status is reported by the response.
+ *
+ * Return: 0 if command is valid, negative value otherwise.
+ */
+static int pft_set_inplace_file(struct pft_command *command, int size)
+{
+ int expected_size;
+ u32 fd;
+ int rc;
+ struct file *filp = NULL;
+ struct inode *inode = NULL;
+ int writecount;
+
+ expected_size = sizeof(command->opcode) +
+ sizeof(command->preform_in_place_file_enc.file_descriptor);
+
+ if (size != expected_size) {
+ pr_err("invalid command size %d expected %d.\n",
+ size, expected_size);
+ pft_set_response(PFT_CMD_RESP_INVALID_CMD_PARAMS);
+ return -EINVAL;
+ }
+
+ if (pft_dev->state != (u32) PFT_STATE_KEY_LOADED) {
+ pr_err("Key not loaded, state [%d], In-place-encryption is not allowed.\n",
+ pft_dev->state);
+ pft_set_response(PFT_CMD_RESP_GENERAL_ERROR);
+ return 0;
+ }
+
+ /* allow only one in-place file encryption at a time */
+ if (pft_dev->inplace_file != NULL) {
+ pr_err("file %s in-place-encryption in progress.\n",
+ file_to_filename(pft_dev->inplace_file));
+ /* @todo - use new error code */
+ pft_set_response(PFT_CMD_RESP_INPLACE_FILE_IS_OPEN);
+ return 0;
+ }
+
+ fd = command->preform_in_place_file_enc.file_descriptor;
+ filp = pft_get_process_open_file(fd);
+
+ if (filp == NULL) {
+ pr_err("failed to find file by fd %d.\n", fd);
+ pft_set_response(PFT_CMD_RESP_GENERAL_ERROR);
+ return 0;
+ }
+
+ /* Verify the file is not already open by other than PFM */
+ if (!filp->f_path.dentry || !filp->f_path.dentry->d_inode) {
+ pr_err("failed to get inode of inplace-file.\n");
+ pft_set_response(PFT_CMD_RESP_GENERAL_ERROR);
+ return 0;
+ }
+
+ inode = filp->f_path.dentry->d_inode;
+ writecount = atomic_read(&inode->i_writecount);
+ if (writecount > 1) {
+ pr_err("file %s is opened %d times for write.\n",
+ file_to_filename(filp), writecount);
+ pft_set_response(PFT_CMD_RESP_INPLACE_FILE_IS_OPEN);
+ return 0;
+ }
+
+ /*
+ * Check if the file was already encryprted.
+ * In practice, it is unlikely to happen,
+ * because PFM is not an enterprise application
+ * it won't be able to open encrypted file.
+ */
+ if (pft_is_encrypted_file(filp->f_dentry)) {
+ pr_err("file %s is already encrypted.\n",
+ file_to_filename(filp));
+ pft_set_response(PFT_CMD_RESP_GENERAL_ERROR);
+ return 0;
+ }
+
+
+ /* Update the current in-place-encryption file */
+ pft_dev->inplace_file = filp;
+
+ /*
+ * Now, any new access to this file is allowed only to PFM.
+ * Lets make sure that all pending writes are completed
+ * before encrypting the file.
+ */
+ pft_sync_file(filp);
+
+ rc = pft_tag_file(pft_dev->inplace_file->f_dentry,
+ pft_get_app_key_index(current_uid()));
+
+ if (!rc) {
+ pr_debug("tagged file %s to be encrypted.\n",
+ file_to_filename(pft_dev->inplace_file));
+ pft_set_response(PFT_CMD_RESP_SUCCESS);
+ } else {
+ pr_err("failed to tag file %s for encryption.\n",
+ file_to_filename(pft_dev->inplace_file));
+ pft_set_response(PFT_CMD_RESP_GENERAL_ERROR);
+ }
+
+ return 0;
+}
+
+/**
+ * pft_update_reg_apps() - Update the registered application
+ * list.
+ * @command: command buffer.
+ * @size: size of command buffer.
+ *
+ * The command execution status is reported by the response.
+ *
+ * Return: 0 on successful operation, negative value otherwise.
+ */
+static int pft_update_reg_apps(struct pft_command *command, int size)
+{
+ int i;
+ int expected_size;
+ void *buf;
+ int buf_size;
+ u32 items_count = command->update_app_list.items_count;
+
+ if (items_count > PFT_MAX_APPS) {
+ pr_err("Number of apps [%d] > max apps [%d]\n",
+ items_count , PFT_MAX_APPS);
+ pft_set_response(PFT_CMD_RESP_INVALID_CMD_PARAMS);
+ return -EINVAL;
+ }
+
+ expected_size =
+ sizeof(command->opcode) +
+ sizeof(command->update_app_list.items_count) +
+ (command->update_app_list.items_count * sizeof(u32));
+
+ if (size != expected_size) {
+ pr_err("invalid command size %d expected %d.\n",
+ size, expected_size);
+ pft_set_response(PFT_CMD_RESP_INVALID_CMD_PARAMS);
+ return -EINVAL;
+ }
+
+ mutex_lock(&pft_dev->lock);
+
+ /* Free old table */
+ kfree(pft_dev->uid_table);
+ pft_dev->uid_table = NULL;
+ pft_dev->uid_count = 0;
+
+ if (items_count == 0) {
+ pr_info("empty app list - clear list.\n");
+ mutex_unlock(&pft_dev->lock);
+ return 0;
+ }
+
+ buf_size = command->update_app_list.items_count * sizeof(u32);
+ buf = kzalloc(buf_size, GFP_KERNEL);
+
+ if (!buf) {
+ pr_err("malloc failure\n");
+ pft_set_response(PFT_CMD_RESP_GENERAL_ERROR);
+ mutex_unlock(&pft_dev->lock);
+ return 0;
+ }
+
+ pft_dev->uid_table = buf;
+ pft_dev->uid_count = command->update_app_list.items_count;
+ pr_debug("uid_count = %d\n", pft_dev->uid_count);
+ for (i = 0; i < pft_dev->uid_count; i++)
+ pft_dev->uid_table[i] = command->update_app_list.table[i];
+ pft_set_response(PFT_CMD_RESP_SUCCESS);
+ mutex_unlock(&pft_dev->lock);
+
+ return 0;
+}
+
+/**
+ * pft_handle_command() - Handle user space app commands.
+ * @buf: command buffer.
+ * @buf_size: command buffer size.
+ *
+ * Return: 0 on successful operation, negative value otherwise.
+ */
+static int pft_handle_command(void *buf, int buf_size)
+{
+ int ret = 0;
+ struct pft_command *command = NULL;
+
+ /* opcode field is the minimum length of command */
+ if (buf_size < sizeof(command->opcode)) {
+ pr_err("Invalid argument used buffer size\n");
+ return -EINVAL;
+ }
+
+ command = (struct pft_command *)buf;
+
+ pft_dev->response.command_id = command->opcode;
+
+ switch (command->opcode) {
+ case PFT_CMD_OPCODE_SET_STATE:
+ ret = pft_set_state(command, buf_size);
+ break;
+ case PFT_CMD_OPCODE_UPDATE_REG_APP_UID:
+ ret = pft_update_reg_apps(command, buf_size);
+ break;
+ case PFT_CMD_OPCODE_PERFORM_IN_PLACE_FILE_ENC:
+ ret = pft_set_inplace_file(command, buf_size);
+ break;
+ default:
+ pr_err("Invalid command_op_code %u\n", command->opcode);
+ pft_set_response(PFT_CMD_RESP_INVALID_COMMAND);
+ return 0;
+ }
+
+ return ret;
+}
+
+static int pft_device_open(struct inode *inode, struct file *file)
+{
+ int ret;
+
+ mutex_lock(&pft_dev->lock);
+ if (pft_dev->open_count > 0) {
+ pr_err("PFT device is already opened (%d)\n",
+ pft_dev->open_count);
+ ret = -EBUSY;
+ } else {
+ pft_dev->open_count++;
+ pft_dev->pfm_pid = current_pid();
+ pr_debug("PFT device opened by %d (%d)\n",
+ pft_dev->pfm_pid, pft_dev->open_count);
+ ret = 0;
+ }
+ mutex_unlock(&pft_dev->lock);
+
+ pr_debug("device opened, count %d\n", pft_dev->open_count);
+
+ return ret;
+}
+
+static int pft_device_release(struct inode *inode, struct file *file)
+{
+ mutex_lock(&pft_dev->lock);
+ if (pft_dev->open_count > 0)
+ pft_dev->open_count--;
+ pft_dev->pfm_pid = UINT_MAX;
+ mutex_unlock(&pft_dev->lock);
+
+ pr_debug("device released, count %d\n", pft_dev->open_count);
+
+ return 0;
+}
+
+/**
+ * pft_device_write() - Get commands from user sapce.
+ *
+ * Return: number of bytes to write on success to get the
+ * command buffer, negative value on failure.
+ * The error code for handling the command should be retrive by
+ * reading the response.
+ * Note: any reurn value of 0..size-1 will cause retry by the
+ * OS, so avoid it.
+ */
+static ssize_t pft_device_write(struct file *filp, const char __user *user_buff,
+ size_t size, loff_t *f_pos)
+{
+ int ret;
+ char *cmd_buf;
+
+ if (size > PFT_MAX_COMMAND_SIZE || !user_buff || !f_pos) {
+ pr_err("inavlid parameters.\n");
+ return -EINVAL;
+ }
+
+ cmd_buf = kzalloc(size, GFP_KERNEL);
+ if (cmd_buf == NULL) {
+ pr_err("malloc failure for command buffer\n");
+ return -ENOMEM;
+ }
+
+ ret = copy_from_user(cmd_buf, user_buff, size);
+ if (ret) {
+ pr_err("Unable to copy from user (err %d)\n", ret);
+ kfree(cmd_buf);
+ return -EFAULT;
+ }
+
+ ret = pft_handle_command(cmd_buf, size);
+ if (ret) {
+ kfree(cmd_buf);
+ return -EFAULT;
+ }
+
+ kfree(cmd_buf);
+
+ return size;
+}
+
+/**
+ * pft_device_read() - return response of last command.
+ *
+ * Return: number of bytes to read on success, negative value on
+ * failure.
+ */
+static ssize_t pft_device_read(struct file *filp, char __user *buffer,
+ size_t length, loff_t *f_pos)
+{
+ int ret = 0;
+
+ if (!buffer || !f_pos || length < sizeof(pft_dev->response)) {
+ pr_err("inavlid parameters.\n");
+ return -EFAULT;
+ }
+
+ ret = copy_to_user(buffer, &(pft_dev->response),
+ sizeof(pft_dev->response));
+ if (ret) {
+ pr_err("Unable to copy to user, err = %d.\n", ret);
+ return -EINVAL;
+ }
+
+ return sizeof(pft_dev->response);
+}
+
+
+static const struct file_operations fops = {
+ .owner = THIS_MODULE,
+ .read = pft_device_read,
+ .write = pft_device_write,
+ .open = pft_device_open,
+ .release = pft_device_release,
+};
+
+static int __init pft_register_chardev(void)
+{
+ int rc;
+ unsigned baseminor = 0;
+ unsigned count = 1;
+ struct device *class_dev;
+
+ rc = alloc_chrdev_region(&pft_dev->device_no, baseminor, count,
+ DEVICE_NAME);
+ if (rc < 0) {
+ pr_err("alloc_chrdev_region failed %d\n", rc);
+ return rc;
+ }
+
+ pft_dev->driver_class = class_create(THIS_MODULE, DEVICE_NAME);
+ if (IS_ERR(pft_dev->driver_class)) {
+ rc = -ENOMEM;
+ pr_err("class_create failed %d\n", rc);
+ goto exit_unreg_chrdev_region;
+ }
+
+ class_dev = device_create(pft_dev->driver_class, NULL,
+ pft_dev->device_no, NULL,
+ DEVICE_NAME);
+ if (!class_dev) {
+ pr_err("class_device_create failed %d\n", rc);
+ rc = -ENOMEM;
+ goto exit_destroy_class;
+ }
+
+ cdev_init(&pft_dev->cdev, &fops);
+ pft_dev->cdev.owner = THIS_MODULE;
+
+ rc = cdev_add(&pft_dev->cdev, MKDEV(MAJOR(pft_dev->device_no), 0), 1);
+ if (rc < 0) {
+ pr_err("cdev_add failed %d\n", rc);
+ goto exit_destroy_device;
+ }
+
+ return 0;
+
+exit_destroy_device:
+ device_destroy(pft_dev->driver_class, pft_dev->device_no);
+exit_destroy_class:
+ class_destroy(pft_dev->driver_class);
+exit_unreg_chrdev_region:
+ unregister_chrdev_region(pft_dev->device_no, 1);
+ return rc;
+}
+
+static void __exit pft_unregister_chrdev(void)
+{
+ cdev_del(&pft_dev->cdev);
+ device_destroy(pft_dev->driver_class, pft_dev->device_no);
+ class_destroy(pft_dev->driver_class);
+ unregister_chrdev_region(pft_dev->device_no, 1);
+
+}
+
+static void __exit pft_free_open_files_list(void)
+{
+ struct pft_file_info *tmp = NULL;
+ struct list_head *pos = NULL;
+ struct list_head *next = NULL;
+
+ mutex_lock(&pft_dev->lock);
+ list_for_each_safe(pos, next, &pft_dev->open_file_list) {
+ tmp = list_entry(pos, struct pft_file_info, list);
+ list_del(&tmp->list);
+ kfree(tmp);
+ }
+ mutex_unlock(&pft_dev->lock);
+}
+
+static void __exit pft_exit(void)
+{
+ if (pft_dev == NULL)
+ return;
+
+ pft_unregister_chrdev();
+ pft_free_open_files_list();
+
+ kfree(pft_dev->uid_table);
+ kfree(pft_dev);
+ pft_dev = NULL;
+}
+
+static int __init pft_init(void)
+{
+ int ret;
+ struct pft_device *dev = NULL;
+
+ dev = kzalloc(sizeof(struct pft_device), GFP_KERNEL);
+ if (dev == NULL) {
+ pr_err("No memory for device structr\n");
+ return -ENOMEM;
+ }
+ pft_dev = dev;
+
+ dev->state = PFT_STATE_DEACTIVATED;
+ dev->pfm_pid = UINT_MAX;
+
+ INIT_LIST_HEAD(&dev->open_file_list);
+ mutex_init(&dev->lock);
+
+ ret = pft_register_chardev();
+ if (ret) {
+ pr_err("create character device failed.\n");
+ goto fail;
+ }
+
+ pr_info("Drivr initialized successfully %s %s.n", __DATE__, __TIME__);
+
+ return 0;
+
+fail:
+ pr_err("Failed to init driver.\n");
+ kfree(dev);
+ pft_dev = NULL;
+
+ return -ENODEV;
+}
+
+module_init(pft_init);
+module_exit(pft_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Per-File-Tagger driver");
diff --git a/drivers/power/qpnp-charger.c b/drivers/power/qpnp-charger.c
index d5c753f..3b8fba8 100644
--- a/drivers/power/qpnp-charger.c
+++ b/drivers/power/qpnp-charger.c
@@ -1814,7 +1814,8 @@
chip->dc_present = dc_present;
if (qpnp_chg_is_otg_en_set(chip))
qpnp_chg_force_run_on_batt(chip, !dc_present ? 1 : 0);
- if (!dc_present && !qpnp_chg_is_usb_chg_plugged_in(chip)) {
+ if (!dc_present && (!qpnp_chg_is_usb_chg_plugged_in(chip) ||
+ qpnp_chg_is_otg_en_set(chip))) {
chip->delta_vddmax_mv = 0;
qpnp_chg_set_appropriate_vddmax(chip);
chip->chg_done = false;
diff --git a/drivers/spmi/spmi-pmic-arb.c b/drivers/spmi/spmi-pmic-arb.c
index e3284d5..81640a0 100644
--- a/drivers/spmi/spmi-pmic-arb.c
+++ b/drivers/spmi/spmi-pmic-arb.c
@@ -33,7 +33,9 @@
/* PMIC Arbiter configuration registers */
#define PMIC_ARB_VERSION 0x0000
#define PMIC_ARB_INT_EN 0x0004
-
+#define PMIC_ARB_PROTOCOL_IRQ_STATUS (0x700 + 0x820)
+#define PMIC_ARB_GENI_CTRL 0x0024
+#define PMIC_ARB_GENI_STATUS 0x0028
/* PMIC Arbiter channel registers */
#define PMIC_ARB_CMD(N) (0x0800 + (0x80 * (N)))
#define PMIC_ARB_CONFIG(N) (0x0804 + (0x80 * (N)))
@@ -125,6 +127,7 @@
u8 max_apid;
u16 periph_id_map[PMIC_ARB_MAX_PERIPHS];
u32 mapping_table[SPMI_MAPPING_TABLE_LEN];
+ u32 prev_prtcl_irq_stat;
};
static struct spmi_pmic_arb_dev *the_pmic_arb;
@@ -143,6 +146,37 @@
writel_relaxed(val, dev->base + offset);
}
+static void pmic_arb_save_stat_before_txn(struct spmi_pmic_arb_dev *dev)
+{
+ dev->prev_prtcl_irq_stat =
+ readl_relaxed(dev->cnfg + PMIC_ARB_PROTOCOL_IRQ_STATUS);
+}
+
+static int pmic_arb_diagnosis(struct spmi_pmic_arb_dev *dev, u32 status)
+{
+ if (status & PMIC_ARB_STATUS_DENIED) {
+ dev_err(dev->dev,
+ "wait_for_done: transaction denied by SPMI master (0x%x)\n",
+ status);
+ return -EPERM;
+ }
+
+ if (status & PMIC_ARB_STATUS_FAILURE) {
+ dev_err(dev->dev,
+ "wait_for_done: transaction failed (0x%x)\n", status);
+ return -EIO;
+ }
+
+ if (status & PMIC_ARB_STATUS_DROPPED) {
+ dev_err(dev->dev,
+ "wait_for_done: transaction dropped pmic-arb busy (0x%x)\n",
+ status);
+ return -EAGAIN;
+ }
+
+ return 0;
+}
+
static int pmic_arb_wait_for_done(struct spmi_pmic_arb_dev *dev)
{
u32 status = 0;
@@ -152,34 +186,13 @@
while (timeout--) {
status = pmic_arb_read(dev, offset);
- if (status & PMIC_ARB_STATUS_DONE) {
- if (status & PMIC_ARB_STATUS_DENIED) {
- dev_err(dev->dev,
- "%s: transaction denied (0x%x)\n",
- __func__, status);
- return -EPERM;
- }
+ if (status & PMIC_ARB_STATUS_DONE)
+ return pmic_arb_diagnosis(dev, status);
- if (status & PMIC_ARB_STATUS_FAILURE) {
- dev_err(dev->dev,
- "%s: transaction failed (0x%x)\n",
- __func__, status);
- return -EIO;
- }
-
- if (status & PMIC_ARB_STATUS_DROPPED) {
- dev_err(dev->dev,
- "%s: transaction dropped (0x%x)\n",
- __func__, status);
- return -EIO;
- }
-
- return 0;
- }
udelay(1);
}
- dev_err(dev->dev, "%s: timeout, status 0x%x\n", __func__, status);
+ dev_err(dev->dev, "wait_for_done:: timeout, status 0x%x\n", status);
return -ETIMEDOUT;
}
@@ -209,6 +222,29 @@
pmic_arb_write(dev, reg, data);
}
+static void pmic_arb_dbg_err_dump(struct spmi_pmic_arb_dev *pmic_arb, int ret,
+ const char *msg, u8 opc, u8 sid, u16 addr, u8 bc, u8 *buf)
+{
+ u32 irq_stat = readl_relaxed(pmic_arb->cnfg
+ + PMIC_ARB_PROTOCOL_IRQ_STATUS);
+ u32 geni_stat = readl_relaxed(pmic_arb->cnfg + PMIC_ARB_GENI_STATUS);
+ u32 geni_ctrl = readl_relaxed(pmic_arb->cnfg + PMIC_ARB_GENI_CTRL);
+
+ bc += 1; /* actual byte count */
+
+ if (buf)
+ dev_err(pmic_arb->dev,
+ "error:%d on data %s opcode:0x%x sid:%d addr:0x%x bc:%d buf:%*phC\n",
+ ret, msg, opc, sid, addr, bc, bc, buf);
+ else
+ dev_err(pmic_arb->dev,
+ "error:%d on non-data-cmd opcode:0x%x sid:%d\n",
+ ret, opc, sid);
+ dev_err(pmic_arb->dev,
+ "PROTOCOL_IRQ_STATUS before:0x%x after:0x%x GENI_STATUS:0x%x GENI_CTRL:0x%x\n",
+ irq_stat, pmic_arb->prev_prtcl_irq_stat, geni_stat, geni_ctrl);
+}
+
/* Non-data command */
static int pmic_arb_cmd(struct spmi_controller *ctrl, u8 opc, u8 sid)
{
@@ -228,10 +264,13 @@
cmd = (opc << 27) | ((sid & 0xf) << 20);
spin_lock_irqsave(&pmic_arb->lock, flags);
+ pmic_arb_save_stat_before_txn(pmic_arb);
pmic_arb_write(pmic_arb, PMIC_ARB_CMD(pmic_arb->channel), cmd);
rc = pmic_arb_wait_for_done(pmic_arb);
spin_unlock_irqrestore(&pmic_arb->lock, flags);
+ if (rc)
+ pmic_arb_dbg_err_dump(pmic_arb, rc, "cmd", opc, sid, 0, 0, 0);
return rc;
}
@@ -249,7 +288,8 @@
, PMIC_ARB_MAX_TRANS_BYTES, bc+1);
return -EINVAL;
}
- pr_debug("op:0x%x sid:%d bc:%d addr:0x%x\n", opc, sid, bc, addr);
+ dev_dbg(pmic_arb->dev, "client-rd op:0x%x sid:%d addr:0x%x bc:%d\n",
+ opc, sid, addr, bc + 1);
/* Check the opcode */
if (opc >= 0x60 && opc <= 0x7F)
@@ -264,6 +304,7 @@
cmd = (opc << 27) | ((sid & 0xf) << 20) | (addr << 4) | (bc & 0x7);
spin_lock_irqsave(&pmic_arb->lock, flags);
+ pmic_arb_save_stat_before_txn(pmic_arb);
pmic_arb_write(pmic_arb, PMIC_ARB_CMD(pmic_arb->channel), cmd);
rc = pmic_arb_wait_for_done(pmic_arb);
if (rc)
@@ -279,6 +320,9 @@
done:
spin_unlock_irqrestore(&pmic_arb->lock, flags);
+ if (rc)
+ pmic_arb_dbg_err_dump(pmic_arb, rc, "read", opc, sid, addr, bc,
+ buf);
return rc;
}
@@ -296,7 +340,8 @@
, PMIC_ARB_MAX_TRANS_BYTES, bc+1);
return -EINVAL;
}
- pr_debug("op:0x%x sid:%d bc:%d addr:0x%x\n", opc, sid, bc, addr);
+ dev_dbg(pmic_arb->dev, "client-wr op:0x%x sid:%d addr:0x%x bc:%d\n",
+ opc, sid, addr, bc + 1);
/* Check the opcode */
if (opc >= 0x40 && opc <= 0x5F)
@@ -314,6 +359,7 @@
/* Write data to FIFOs */
spin_lock_irqsave(&pmic_arb->lock, flags);
+ pmic_arb_save_stat_before_txn(pmic_arb);
pa_write_data(pmic_arb, buf, PMIC_ARB_WDATA0(pmic_arb->channel)
, min_t(u8, bc, 3));
if (bc > 3)
@@ -325,6 +371,10 @@
rc = pmic_arb_wait_for_done(pmic_arb);
spin_unlock_irqrestore(&pmic_arb->lock, flags);
+ if (rc)
+ pmic_arb_dbg_err_dump(pmic_arb, rc, "write", opc, sid, addr, bc,
+ buf);
+
return rc;
}
@@ -501,7 +551,9 @@
int i;
if (!is_apid_valid(pmic_arb, apid)) {
- dev_err(pmic_arb->dev, "unknown peripheral id 0x%x\n", ppid);
+ dev_err(pmic_arb->dev,
+ "periph_interrupt(apid:0x%x sid:0x%x pid:0x%x) unknown peripheral\n",
+ apid, sid, pid);
/* return IRQ_NONE; */
}
diff --git a/drivers/staging/android/binder.c b/drivers/staging/android/binder.c
index b7bf74e..159121f 100644
--- a/drivers/staging/android/binder.c
+++ b/drivers/staging/android/binder.c
@@ -2847,9 +2847,15 @@
binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES);
}
+static int binder_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+ return VM_FAULT_SIGBUS;
+}
+
static struct vm_operations_struct binder_vm_ops = {
.open = binder_vma_open,
.close = binder_vma_close,
+ .fault = binder_vm_fault,
};
static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
diff --git a/drivers/thermal/msm_thermal.c b/drivers/thermal/msm_thermal.c
index 81b683d..e5eb9b2 100644
--- a/drivers/thermal/msm_thermal.c
+++ b/drivers/thermal/msm_thermal.c
@@ -11,6 +11,8 @@
*
*/
+#define pr_fmt(fmt) "%s:%s " fmt, KBUILD_MODNAME, __func__
+
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/module.h>
@@ -257,8 +259,8 @@
switch (event) {
case CPUFREQ_INCOMPATIBLE:
- pr_debug("%s: mitigating cpu %d to freq max: %u min: %u\n",
- KBUILD_MODNAME, policy->cpu, max_freq_req, min_freq_req);
+ pr_debug("mitigating CPU%d to freq max: %u min: %u\n",
+ policy->cpu, max_freq_req, min_freq_req);
cpufreq_verify_within_limits(policy, min_freq_req,
max_freq_req);
@@ -283,7 +285,7 @@
table = cpufreq_frequency_get_table(0);
if (!table) {
- pr_debug("%s: error reading cpufreq table\n", __func__);
+ pr_debug("error reading cpufreq table\n");
return -EINVAL;
}
freq_table_get = 1;
@@ -293,9 +295,13 @@
static void update_cpu_freq(int cpu)
{
+ int ret = 0;
+
if (cpu_online(cpu)) {
- if (cpufreq_update_policy(cpu))
- pr_err("Unable to update policy for cpu:%d\n", cpu);
+ ret = cpufreq_update_policy(cpu);
+ if (ret)
+ pr_err("Unable to update policy for cpu:%d. err:%d\n",
+ cpu, ret);
}
}
@@ -307,13 +313,14 @@
if (!freq_table_get) {
ret = check_freq_table();
if (ret) {
- pr_err("%s:Fail to get freq table\n", KBUILD_MODNAME);
+ pr_err("Fail to get freq table. err:%d\n", ret);
return ret;
}
}
/* If min is larger than allowed max */
min = min(min, table[limit_idx_high].frequency);
+ pr_debug("Requesting min freq:%u for all CPU's\n", min);
if (freq_mitigation_task) {
min_freq_limit = min;
complete(&freq_mitigation_complete);
@@ -362,7 +369,7 @@
int ret = 0;
if (r->reg == NULL) {
- pr_info("Do not have regulator handle:%s, can't apply vdd\n",
+ pr_err("%s don't have regulator handle. can't apply vdd\n",
r->name);
return -EFAULT;
}
@@ -375,11 +382,15 @@
r->levels[r->num_levels - 1]);
if (!ret)
r->curr_level = -1;
+ pr_debug("Requested min level for %s. curr level: %d\n",
+ r->name, r->curr_level);
} else if (level >= 0 && level < (r->num_levels)) {
ret = regulator_set_voltage(r->reg, r->levels[level],
r->levels[r->num_levels - 1]);
if (!ret)
r->curr_level = level;
+ pr_debug("Requesting level %d for %s. curr level: %d\n",
+ r->levels[level], r->name, r->levels[r->curr_level]);
} else {
pr_err("level input:%d is not within range\n", level);
return -EINVAL;
@@ -395,12 +406,13 @@
int fail_cnt = 0;
int ret = 0;
+ pr_debug("Requesting PMIC Mode: %d\n", mode);
for (i = 0; i < psm_rails_cnt; i++) {
if (psm_rails[i].mode != mode) {
ret = rpm_regulator_set_mode(psm_rails[i].reg, mode);
if (ret) {
- pr_err("Cannot set mode:%d for %s",
- mode, psm_rails[i].name);
+ pr_err("Cannot set mode:%d for %s. err:%d",
+ mode, psm_rails[i].name, ret);
fail_cnt++;
} else
psm_rails[i].mode = mode;
@@ -473,6 +485,8 @@
en->enabled = 1;
else if (!val && (dis_cnt == rails_cnt))
en->enabled = 0;
+ pr_debug("%s vdd restriction. curr: %d\n",
+ (val) ? "Enable" : "Disable", en->enabled);
done_vdd_rstr_en:
mutex_unlock(&vdd_rstr_mutex);
@@ -547,12 +561,14 @@
ret = vdd_restriction_apply_voltage(reg, val);
if (ret) {
pr_err( \
- "Set vdd restriction for regulator %s failed\n",
- reg->name);
+ "Set vdd restriction for regulator %s failed. err:%d\n",
+ reg->name, ret);
goto done_store_level;
}
}
reg->curr_level = val;
+ pr_debug("Request level %d for %s\n",
+ reg->curr_level, reg->name);
}
done_store_level:
@@ -668,16 +684,15 @@
}
if ((val != PMIC_PWM_MODE) && (val != PMIC_AUTO_MODE)) {
- pr_err(" Invalid number %d for mode\n", val);
+ pr_err("Invalid number %d for mode\n", val);
goto done_psm_store;
}
if (val != reg->mode) {
ret = rpm_regulator_set_mode(reg->reg, val);
if (ret) {
- pr_err( \
- "Fail to set PMIC SW Mode:%d for %s\n",
- val, reg->name);
+ pr_err("Fail to set Mode:%d for %s. err:%d\n",
+ val, reg->name, ret);
goto done_psm_store;
}
reg->mode = val;
@@ -701,7 +716,7 @@
}
}
if (!hw_id_found) {
- pr_err("%s: Invalid sensor hw id :%d\n", __func__, sensor_id);
+ pr_err("Invalid sensor hw id:%d\n", sensor_id);
return -EINVAL;
}
@@ -716,8 +731,7 @@
tsens_id_map = kzalloc(sizeof(int) * max_tsens_num,
GFP_KERNEL);
if (!tsens_id_map) {
- pr_err("%s: Cannot allocate memory for tsens_id_map\n",
- __func__);
+ pr_err("Cannot allocate memory for tsens_id_map\n");
return -ENOMEM;
}
@@ -729,9 +743,8 @@
tsens_id_map[i] = i;
ret = 0;
} else {
- pr_err( \
- "%s: Failed to get hw id for sw id %d\n",
- __func__, i);
+ pr_err("Failed to get hw id for id:%d.err:%d\n",
+ i, ret);
goto fail;
}
}
@@ -760,7 +773,9 @@
ret = vdd_restriction_apply_voltage(&rails[i],
en ? 0 : -1);
if (ret) {
- pr_err("Cannot set voltage for %s", rails[i].name);
+ pr_err("Failed to %s for %s. err:%d",
+ (en) ? "enable" : "disable",
+ rails[i].name, ret);
fail_cnt++;
} else {
if (en)
@@ -792,7 +807,7 @@
table = cpufreq_frequency_get_table(0);
if (table == NULL) {
- pr_debug("%s: error reading cpufreq table\n", KBUILD_MODNAME);
+ pr_err("error reading cpufreq table\n");
ret = -EINVAL;
goto fail;
}
@@ -814,15 +829,15 @@
ret = sensor_set_trip(sensor_id, threshold);
if (ret != 0) {
- pr_err("%s: Error in setting trip %d\n",
- KBUILD_MODNAME, threshold->trip);
+ pr_err("sensor:%u Error in setting trip:%d. err:%d\n",
+ sensor_id, threshold->trip, ret);
goto set_done;
}
ret = sensor_activate_trip(sensor_id, threshold, true);
if (ret != 0) {
- pr_err("%s: Error in enabling trip %d\n",
- KBUILD_MODNAME, threshold->trip);
+ pr_err("sensor:%u Error in enabling trip:%d. err:%d\n",
+ sensor_id, threshold->trip, ret);
goto set_done;
}
@@ -857,7 +872,7 @@
ret = tsens_get_temp(&tsens_dev, temp);
if (ret) {
- pr_err("Unable to read TSENS sensor %d\n",
+ pr_err("Unable to read TSENS sensor:%d\n",
tsens_dev.sensor_num);
goto get_temp_exit;
}
@@ -873,14 +888,17 @@
long temp;
if ((!threshold) || (zone_id >= max_tsens_num)) {
- pr_err("%s: Invalid input\n", KBUILD_MODNAME);
+ pr_err("Invalid input\n");
ret = -EINVAL;
goto set_threshold_exit;
}
ret = therm_get_temp(zone_id, THERM_ZONE_ID, &temp);
- if (ret)
+ if (ret) {
+ pr_err("Unable to read temperature for zone:%d. err:%d\n",
+ zone_id, ret);
goto set_threshold_exit;
+ }
while (i < MAX_THRESHOLD) {
switch (threshold[i].trip) {
@@ -901,6 +919,8 @@
}
break;
default:
+ pr_err("zone:%u Invalid trip:%d\n", zone_id,
+ threshold[i].trip);
break;
}
i++;
@@ -926,12 +946,12 @@
continue;
if (cpus_offlined & BIT(i) && !cpu_online(i))
continue;
- pr_info("%s: Set Offline: CPU%d Temp: %ld\n",
- KBUILD_MODNAME, i, temp);
+ pr_info("Set Offline: CPU%d Temp: %ld\n",
+ i, temp);
ret = cpu_down(i);
if (ret)
- pr_err("%s: Error %d offline core %d\n",
- KBUILD_MODNAME, ret, i);
+ pr_err("Error %d offline core %d\n",
+ ret, i);
cpus_offlined |= BIT(i);
break;
}
@@ -942,8 +962,8 @@
if (!(cpus_offlined & BIT(i)))
continue;
cpus_offlined &= ~BIT(i);
- pr_info("%s: Allow Online CPU%d Temp: %ld\n",
- KBUILD_MODNAME, i, temp);
+ pr_info("Allow Online CPU%d Temp: %ld\n",
+ i, temp);
/*
* If this core is already online, then bring up the
* next offlined core.
@@ -952,8 +972,8 @@
continue;
ret = cpu_up(i);
if (ret)
- pr_err("%s: Error %d online core %d\n",
- KBUILD_MODNAME, ret, i);
+ pr_err("Error %d online core %d\n",
+ ret, i);
break;
}
}
@@ -977,8 +997,10 @@
continue;
ret = cpu_down(cpu);
if (ret)
- pr_err("%s: Unable to offline cpu%d\n",
- KBUILD_MODNAME, cpu);
+ pr_err("Unable to offline CPU%d. err:%d\n",
+ cpu, ret);
+ else
+ pr_debug("Offlined CPU%d\n", cpu);
}
return ret;
}
@@ -988,8 +1010,10 @@
int ret = 0;
uint32_t cpu = 0, mask = 0;
- if (!core_control_enabled)
+ if (!core_control_enabled) {
+ pr_debug("Core control disabled\n");
return -EINVAL;
+ }
while (!kthread_should_stop()) {
while (wait_for_completion_interruptible(
@@ -1056,7 +1080,11 @@
ocr_rails[j].init = OPTIMUM_CURRENT_NR;
ret = ocr_set_mode_all(OPTIMUM_CURRENT_MAX);
if (ret)
- pr_err("Error setting max optimum current\n");
+ pr_err("Error setting max ocr. err:%d\n",
+ ret);
+ else
+ pr_debug("Requested MAX OCR. tsens:%d Temp:%ld",
+ tsens_id_map[i], temp);
goto do_ocr_exit;
} else if (temp <= (msm_thermal_info.ocr_temp_degC -
msm_thermal_info.ocr_temp_hyst_degC))
@@ -1077,6 +1105,8 @@
if (ret) {
pr_err("Error setting min optimum current\n");
goto do_ocr_exit;
+ } else {
+ pr_debug("Requested MIN OCR. Temp:%ld", temp);
}
}
@@ -1104,8 +1134,8 @@
for (i = 0; i < max_tsens_num; i++) {
ret = therm_get_temp(tsens_id_map[i], THERM_TSENS_ID, &temp);
if (ret) {
- pr_debug("Unable to read TSENS sensor %d\n",
- tsens_id_map[i]);
+ pr_err("Unable to read TSENS sensor:%d. err:%d\n",
+ tsens_id_map[i], ret);
dis_cnt++;
continue;
}
@@ -1113,9 +1143,13 @@
ret = vdd_restriction_apply_all(1);
if (ret) {
pr_err( \
- "Enable vdd rstr votlage for all failed\n");
+ "Enable vdd rstr for all failed. err:%d\n",
+ ret);
goto exit;
}
+ pr_debug("Enabled Vdd Restriction tsens:%d. Temp:%ld\n",
+ thresh[MSM_VDD_RESTRICTION].thresh_list[i].sensor_id,
+ temp);
goto exit;
} else if (temp > msm_thermal_info.vdd_rstr_temp_hyst_degC)
dis_cnt++;
@@ -1123,9 +1157,11 @@
if (dis_cnt == max_tsens_num) {
ret = vdd_restriction_apply_all(0);
if (ret) {
- pr_err("Disable vdd rstr votlage for all failed\n");
+ pr_err("Disable vdd rstr for all failed. err:%d\n",
+ ret);
goto exit;
}
+ pr_debug("Disabled Vdd Restriction\n");
}
exit:
mutex_unlock(&vdd_rstr_mutex);
@@ -1143,8 +1179,8 @@
for (i = 0; i < max_tsens_num; i++) {
ret = therm_get_temp(tsens_id_map[i], THERM_TSENS_ID, &temp);
if (ret) {
- pr_debug("%s: Unable to read TSENS sensor %d\n",
- __func__, tsens_id_map[i]);
+ pr_err("Unable to read TSENS sensor:%d. err:%d\n",
+ tsens_id_map[i], ret);
auto_cnt++;
continue;
}
@@ -1157,9 +1193,12 @@
if (temp > msm_thermal_info.psm_temp_degC) {
ret = psm_set_mode_all(PMIC_PWM_MODE);
if (ret) {
- pr_err("Set pwm mode for all failed\n");
+ pr_err("Set pwm mode for all failed. err:%d\n",
+ ret);
goto exit;
}
+ pr_debug("Requested PMIC PWM Mode tsens:%d. Temp:%ld\n",
+ tsens_id_map[i], temp);
break;
} else if (temp <= msm_thermal_info.psm_temp_hyst_degC)
auto_cnt++;
@@ -1168,9 +1207,10 @@
if (auto_cnt == max_tsens_num) {
ret = psm_set_mode_all(PMIC_AUTO_MODE);
if (ret) {
- pr_err("Set auto mode for all failed\n");
+ pr_err("Set auto mode for all failed. err:%d\n", ret);
goto exit;
}
+ pr_debug("Requested PMIC AUTO Mode\n");
}
exit:
@@ -1212,6 +1252,8 @@
for_each_possible_cpu(cpu) {
if (!(msm_thermal_info.bootup_freq_control_mask & BIT(cpu)))
continue;
+ pr_info("Limiting CPU%d max frequency to %u. Temp:%ld\n",
+ cpu, max_freq, temp);
cpus[cpu].limited_max_freq = max_freq;
update_cpu_freq(cpu);
}
@@ -1226,8 +1268,8 @@
ret = therm_get_temp(msm_thermal_info.sensor_id, THERM_TSENS_ID, &temp);
if (ret) {
- pr_debug("Unable to read TSENS sensor %d\n",
- msm_thermal_info.sensor_id);
+ pr_err("Unable to read TSENS sensor:%d. err:%d\n",
+ msm_thermal_info.sensor_id, ret);
goto reschedule;
}
@@ -1260,14 +1302,13 @@
if (core_control_enabled &&
(msm_thermal_info.core_control_mask & BIT(cpu)) &&
(cpus_offlined & BIT(cpu))) {
- pr_debug(
- "%s: Preventing cpu%d from coming online.\n",
- KBUILD_MODNAME, cpu);
+ pr_debug("Preventing CPU%d from coming online.\n",
+ cpu);
return NOTIFY_BAD;
}
}
-
+ pr_debug("voting for CPU%d to be online\n", cpu);
return NOTIFY_OK;
}
@@ -1312,8 +1353,7 @@
{
struct cpu_info *cpu_node = (struct cpu_info *)data;
- pr_info("%s: %s reach temp threshold: %d\n", KBUILD_MODNAME,
- cpu_node->sensor_type, temp);
+ pr_info("%s reach temp threshold: %d\n", cpu_node->sensor_type, temp);
if (!(msm_thermal_info.core_control_mask & BIT(cpu_node->cpu)))
return 0;
@@ -1333,7 +1373,7 @@
cpu_node->hotplug_thresh_clear = true;
complete(&hotplug_notify_complete);
} else {
- pr_err("%s: Hotplug task is not initialized\n", KBUILD_MODNAME);
+ pr_err("Hotplug task is not initialized\n");
}
return 0;
}
@@ -1352,8 +1392,8 @@
continue;
if (therm_get_temp(cpus[cpu].sensor_id, cpus[cpu].id_type,
&temp)) {
- pr_err("%s: Unable to read TSENS sensor %d\n",
- KBUILD_MODNAME, cpus[cpu].sensor_id);
+ pr_err("Unable to read TSENS sensor:%d.\n",
+ cpus[cpu].sensor_id);
mutex_unlock(&core_control_mutex);
return -EINVAL;
}
@@ -1369,8 +1409,7 @@
if (hotplug_task)
complete(&hotplug_notify_complete);
else {
- pr_err("%s: Hotplug task is not initialized\n",
- KBUILD_MODNAME);
+ pr_err("Hotplug task is not initialized\n");
return -EINVAL;
}
return 0;
@@ -1410,8 +1449,8 @@
init_completion(&hotplug_notify_complete);
hotplug_task = kthread_run(do_hotplug, NULL, "msm_thermal:hotplug");
if (IS_ERR(hotplug_task)) {
- pr_err("%s: Failed to create do_hotplug thread\n",
- KBUILD_MODNAME);
+ pr_err("Failed to create do_hotplug thread. err:%ld\n",
+ PTR_ERR(hotplug_task));
return;
}
/*
@@ -1471,7 +1510,7 @@
{
struct cpu_info *cpu_node = (struct cpu_info *) data;
- pr_debug("%s: %s reached temp threshold: %d\n", KBUILD_MODNAME,
+ pr_debug("%s reached temp threshold: %d\n",
cpu_node->sensor_type, temp);
if (!(msm_thermal_info.freq_mitig_control_mask &
@@ -1481,8 +1520,8 @@
switch (type) {
case THERMAL_TRIP_CONFIGURABLE_HI:
if (!cpu_node->max_freq) {
- pr_info("%s: Mitigating cpu %d frequency to %d\n",
- KBUILD_MODNAME, cpu_node->cpu,
+ pr_info("Mitigating CPU%d frequency to %d\n",
+ cpu_node->cpu,
msm_thermal_info.freq_limit);
cpu_node->max_freq = true;
@@ -1490,8 +1529,8 @@
break;
case THERMAL_TRIP_CONFIGURABLE_LOW:
if (cpu_node->max_freq) {
- pr_info("%s: Removing frequency mitigation for cpu%d\n",
- KBUILD_MODNAME, cpu_node->cpu);
+ pr_info("Removing frequency mitigation for CPU%d\n",
+ cpu_node->cpu);
cpu_node->max_freq = false;
}
@@ -1504,8 +1543,7 @@
cpu_node->freq_thresh_clear = true;
complete(&freq_mitigation_complete);
} else {
- pr_err("%s: Frequency mitigation task is not initialized\n",
- KBUILD_MODNAME);
+ pr_err("Frequency mitigation task is not initialized\n");
}
return 0;
@@ -1544,8 +1582,8 @@
"msm_thermal:freq_mitig");
if (IS_ERR(freq_mitigation_task)) {
- pr_err("%s: Failed to create frequency mitigation thread\n",
- KBUILD_MODNAME);
+ pr_err("Failed to create frequency mitigation thread. err:%ld\n",
+ PTR_ERR(freq_mitigation_task));
return;
}
}
@@ -1555,11 +1593,13 @@
int ret = 0;
if (cpu >= num_possible_cpus()) {
- pr_err("%s: Invalid input\n", KBUILD_MODNAME);
+ pr_err("Invalid input\n");
ret = -EINVAL;
goto set_freq_exit;
}
+ pr_debug("Userspace requested %s frequency %u for CPU%u\n",
+ (is_max) ? "Max" : "Min", freq, cpu);
if (is_max) {
if (cpus[cpu].user_max_freq == freq)
goto set_freq_exit;
@@ -1575,8 +1615,7 @@
if (freq_mitigation_task) {
complete(&freq_mitigation_complete);
} else {
- pr_err("%s: Frequency mitigation task is not initialized\n",
- KBUILD_MODNAME);
+ pr_err("Frequency mitigation task is not initialized\n");
ret = -ESRCH;
goto set_freq_exit;
}
@@ -1591,8 +1630,7 @@
struct therm_threshold *thresh_ptr;
if (!thresh_inp) {
- pr_err("%s: %s: Invalid input\n",
- KBUILD_MODNAME, __func__);
+ pr_err("Invalid input\n");
ret = -EINVAL;
goto therm_set_exit;
}
@@ -1621,16 +1659,17 @@
if (!vdd_rstr_enabled)
return;
if (!trig_thresh) {
- pr_err("%s:%s Invalid input\n", KBUILD_MODNAME, __func__);
+ pr_err("Invalid input\n");
return;
}
if (trig_thresh->trip_triggered < 0)
goto set_and_exit;
mutex_lock(&vdd_rstr_mutex);
- pr_debug("%s: sensor%d reached %d thresh for Vdd restriction\n",
- KBUILD_MODNAME, trig_thresh->sensor_id,
- trig_thresh->trip_triggered);
+ pr_debug("sensor:%d reached %s thresh for Vdd restriction\n",
+ tsens_id_map[trig_thresh->sensor_id],
+ (trig_thresh->trip_triggered == THERMAL_TRIP_CONFIGURABLE_HI) ?
+ "high" : "low");
switch (trig_thresh->trip_triggered) {
case THERMAL_TRIP_CONFIGURABLE_HI:
if (vdd_sens_status & BIT(trig_thresh->sensor_id))
@@ -1640,8 +1679,7 @@
vdd_sens_status |= BIT(trig_thresh->sensor_id);
break;
default:
- pr_err("%s:%s: Unsupported trip type\n",
- KBUILD_MODNAME, __func__);
+ pr_err("Unsupported trip type\n");
goto unlock_and_exit;
break;
}
@@ -1697,8 +1735,8 @@
thermal_monitor_task = kthread_run(do_thermal_monitor, NULL,
"msm_thermal:therm_monitor");
if (IS_ERR(thermal_monitor_task)) {
- pr_err("%s: Failed to create thermal monitor thread\n",
- KBUILD_MODNAME);
+ pr_err("Failed to create thermal monitor thread. err:%ld\n",
+ PTR_ERR(thermal_monitor_task));
goto init_exit;
}
@@ -1718,8 +1756,7 @@
thresh_data->parent->thresh_triggered = true;
complete(&thermal_monitor_complete);
} else {
- pr_err("%s: Thermal monitor task is not initialized\n",
- KBUILD_MODNAME);
+ pr_err("Thermal monitor task is not initialized\n");
}
return 0;
}
@@ -1733,14 +1770,13 @@
if (!callback || index >= MSM_LIST_MAX_NR || index < 0
|| sensor_id == -ENODEV) {
- pr_err("%s: Invalid input to init_threshold\n",
- KBUILD_MODNAME);
+ pr_err("Invalid input. sensor:%d. index:%d\n",
+ sensor_id, index);
ret = -EINVAL;
goto init_thresh_exit;
}
if (thresh[index].thresh_list) {
- pr_err("%s: threshold already initialized\n",
- KBUILD_MODNAME);
+ pr_err("threshold id:%d already initialized\n", index);
ret = -EEXIST;
goto init_thresh_exit;
}
@@ -1751,7 +1787,7 @@
thresh[index].thresh_list = kzalloc(sizeof(struct therm_threshold) *
thresh[index].thresh_ct, GFP_KERNEL);
if (!thresh[index].thresh_list) {
- pr_err("%s: kzalloc failed\n", KBUILD_MODNAME);
+ pr_err("kzalloc failed for thresh index:%d\n", index);
ret = -ENOMEM;
goto init_thresh_exit;
}
@@ -1813,6 +1849,7 @@
if (cpus[cpu].limited_max_freq == UINT_MAX &&
cpus[cpu].limited_min_freq == 0)
continue;
+ pr_info("Max frequency reset for CPU%d\n", cpu);
cpus[cpu].limited_max_freq = UINT_MAX;
cpus[cpu].limited_min_freq = 0;
update_cpu_freq(cpu);
@@ -1827,7 +1864,7 @@
return;
}
if (polling_enabled) {
- pr_info("%s: Interrupt mode init\n", KBUILD_MODNAME);
+ pr_info("Interrupt mode init\n");
polling_enabled = 0;
disable_msm_thermal();
hotplug_init();
@@ -1844,10 +1881,10 @@
if (!enabled)
interrupt_mode_init();
else
- pr_info("%s: no action for enabled = %d\n",
- KBUILD_MODNAME, enabled);
+ pr_info("no action for enabled = %d\n",
+ enabled);
- pr_info("%s: enabled = %d\n", KBUILD_MODNAME, enabled);
+ pr_info("enabled = %d\n", enabled);
return ret;
}
@@ -1874,7 +1911,7 @@
ret = kstrtoint(buf, 10, &val);
if (ret) {
- pr_err("%s: Invalid input %s\n", KBUILD_MODNAME, buf);
+ pr_err("Invalid input %s. err:%d\n", buf, ret);
goto done_store_cc;
}
@@ -1883,15 +1920,14 @@
core_control_enabled = !!val;
if (core_control_enabled) {
- pr_info("%s: Core control enabled\n", KBUILD_MODNAME);
+ pr_info("Core control enabled\n");
register_cpu_notifier(&msm_thermal_cpu_notifier);
if (hotplug_task)
complete(&hotplug_notify_complete);
else
- pr_err("%s: Hotplug task is not initialized\n",
- KBUILD_MODNAME);
+ pr_err("Hotplug task is not initialized\n");
} else {
- pr_info("%s: Core control disabled\n", KBUILD_MODNAME);
+ pr_info("Core control disabled\n");
unregister_cpu_notifier(&msm_thermal_cpu_notifier);
}
@@ -1915,13 +1951,12 @@
mutex_lock(&core_control_mutex);
ret = kstrtouint(buf, 10, &val);
if (ret) {
- pr_err("%s: Invalid input %s\n", KBUILD_MODNAME, buf);
+ pr_err("Invalid input %s. err:%d\n", buf, ret);
goto done_cc;
}
if (polling_enabled) {
- pr_err("%s: Ignoring request; polling thread is enabled.\n",
- KBUILD_MODNAME);
+ pr_err("Ignoring request; polling thread is enabled.\n");
goto done_cc;
}
@@ -1929,12 +1964,15 @@
if (!(msm_thermal_info.core_control_mask & BIT(cpu)))
continue;
cpus[cpu].user_offline = !!(val & BIT(cpu));
+ pr_debug("\"%s\"(PID:%i) requests %s CPU%d.\n", current->comm,
+ current->pid, (cpus[cpu].user_offline) ? "offline" :
+ "online", cpu);
}
if (hotplug_task)
complete(&hotplug_notify_complete);
else
- pr_err("%s: Hotplug task is not initialized\n", KBUILD_MODNAME);
+ pr_err("Hotplug task is not initialized\n");
done_cc:
mutex_unlock(&core_control_mutex);
return count;
@@ -2009,23 +2047,21 @@
module_kobj = kset_find_obj(module_kset, KBUILD_MODNAME);
if (!module_kobj) {
- pr_err("%s: cannot find kobject for module\n",
- KBUILD_MODNAME);
+ pr_err("cannot find kobject\n");
ret = -ENOENT;
goto done_cc_nodes;
}
cc_kobj = kobject_create_and_add("core_control", module_kobj);
if (!cc_kobj) {
- pr_err("%s: cannot create core control kobj\n",
- KBUILD_MODNAME);
+ pr_err("cannot create core control kobj\n");
ret = -ENOMEM;
goto done_cc_nodes;
}
ret = sysfs_create_group(cc_kobj, &cc_attr_group);
if (ret) {
- pr_err("%s: cannot create group\n", KBUILD_MODNAME);
+ pr_err("cannot create sysfs group. err:%d\n", ret);
goto done_cc_nodes;
}
@@ -2078,6 +2114,7 @@
tsens_get_max_sensor_num(&max_tsens_num);
if (create_sensor_id_map()) {
+ pr_err("Creating sensor id map failed\n");
ret = -EINVAL;
goto pre_init_exit;
}
@@ -2087,8 +2124,7 @@
sizeof(struct threshold_info) * MSM_LIST_MAX_NR,
GFP_KERNEL);
if (!thresh) {
- pr_err("%s:%s: kzalloc failed\n",
- KBUILD_MODNAME, __func__);
+ pr_err("kzalloc failed\n");
ret = -ENOMEM;
goto pre_init_exit;
}
@@ -2119,16 +2155,19 @@
BUG_ON(!pdata);
memcpy(&msm_thermal_info, pdata, sizeof(struct msm_thermal_data));
- if (check_sensor_id(msm_thermal_info.sensor_id))
+ if (check_sensor_id(msm_thermal_info.sensor_id)) {
+ pr_err("Invalid sensor:%d for polling\n",
+ msm_thermal_info.sensor_id);
return -EINVAL;
+ }
enabled = 1;
polling_enabled = 1;
ret = cpufreq_register_notifier(&msm_thermal_cpufreq_notifier,
CPUFREQ_POLICY_NOTIFIER);
if (ret)
- pr_err("%s: cannot register cpufreq notifier\n",
- KBUILD_MODNAME);
+ pr_err("cannot register cpufreq notifier. err:%d\n", ret);
+
INIT_DELAYED_WORK(&check_temp_work, check_temp);
schedule_delayed_work(&check_temp_work, 0);
@@ -2190,8 +2229,7 @@
if (freq_table_get)
ret = vdd_restriction_apply_freq(&rails[i], 0);
else
- pr_info("%s:Defer vdd rstr freq init\n",
- __func__);
+ pr_info("Defer vdd rstr freq init.\n");
} else {
rails[i].reg = devm_regulator_get(&pdev->dev,
rails[i].name);
@@ -2199,12 +2237,14 @@
ret = PTR_ERR(rails[i].reg);
if (ret != -EPROBE_DEFER) {
pr_err( \
- "%s, could not get regulator: %s\n",
- rails[i].name, __func__);
+ "could not get regulator: %s. err:%d\n",
+ rails[i].name, ret);
rails[i].reg = NULL;
rails[i].curr_level = -2;
return ret;
}
+ pr_info("Defer regulator %s probe\n",
+ rails[i].name);
return ret;
}
/*
@@ -2230,11 +2270,13 @@
if (IS_ERR_OR_NULL(psm_rails[i].reg)) {
ret = PTR_ERR(psm_rails[i].reg);
if (ret != -EPROBE_DEFER) {
- pr_err("%s, could not get rpm regulator: %s\n",
- psm_rails[i].name, __func__);
+ pr_err("couldn't get rpm regulator %s. err%d\n",
+ psm_rails[i].name, ret);
psm_rails[i].reg = NULL;
goto psm_reg_exit;
}
+ pr_info("Defer regulator %s probe\n",
+ psm_rails[i].name);
return ret;
}
/* Apps default vote for PWM mode */
@@ -2242,7 +2284,7 @@
ret = rpm_regulator_set_mode(psm_rails[i].reg,
psm_rails[i].init);
if (ret) {
- pr_err("%s: Cannot set PMIC PWM mode\n", __func__);
+ pr_err("Cannot set PMIC PWM mode. err:%d\n", ret);
return ret;
} else
psm_rails[i].mode = PMIC_PWM_MODE;
@@ -2311,22 +2353,21 @@
module_kobj = kset_find_obj(module_kset, KBUILD_MODNAME);
if (!module_kobj) {
- pr_err("%s: cannot find kobject for module %s\n",
- __func__, KBUILD_MODNAME);
+ pr_err("cannot find kobject\n");
rc = -ENOENT;
goto thermal_sysfs_add_exit;
}
vdd_rstr_kobj = kobject_create_and_add("vdd_restriction", module_kobj);
if (!vdd_rstr_kobj) {
- pr_err("%s: cannot create vdd_restriction kobject\n", __func__);
+ pr_err("cannot create vdd_restriction kobject\n");
rc = -ENOMEM;
goto thermal_sysfs_add_exit;
}
rc = sysfs_create_group(vdd_rstr_kobj, &vdd_rstr_en_attribs_gp);
if (rc) {
- pr_err("%s: cannot create kobject attribute group\n", __func__);
+ pr_err("cannot create kobject attribute group. err:%d\n", rc);
rc = -ENOMEM;
goto thermal_sysfs_add_exit;
}
@@ -2335,8 +2376,8 @@
vdd_rstr_reg_kobj[i] = kobject_create_and_add(rails[i].name,
vdd_rstr_kobj);
if (!vdd_rstr_reg_kobj[i]) {
- pr_err("%s: cannot create for kobject for %s\n",
- __func__, rails[i].name);
+ pr_err("cannot create kobject for %s\n",
+ rails[i].name);
rc = -ENOMEM;
goto thermal_sysfs_add_exit;
}
@@ -2344,6 +2385,7 @@
rails[i].attr_gp.attrs = kzalloc(sizeof(struct attribute *) * 3,
GFP_KERNEL);
if (!rails[i].attr_gp.attrs) {
+ pr_err("kzalloc failed\n");
rc = -ENOMEM;
goto thermal_sysfs_add_exit;
}
@@ -2355,8 +2397,8 @@
rc = sysfs_create_group(vdd_rstr_reg_kobj[i],
&rails[i].attr_gp);
if (rc) {
- pr_err("%s: cannot create attribute group for %s\n",
- __func__, rails[i].name);
+ pr_err("cannot create attribute group for %s. err:%d\n",
+ rails[i].name, rc);
goto thermal_sysfs_add_exit;
}
}
@@ -2467,15 +2509,14 @@
module_kobj = kset_find_obj(module_kset, KBUILD_MODNAME);
if (!module_kobj) {
- pr_err("%s: cannot find kobject for module %s\n",
- __func__, KBUILD_MODNAME);
+ pr_err("cannot find kobject\n");
rc = -ENOENT;
goto psm_node_exit;
}
psm_kobj = kobject_create_and_add("pmic_sw_mode", module_kobj);
if (!psm_kobj) {
- pr_err("%s: cannot create psm kobject\n", KBUILD_MODNAME);
+ pr_err("cannot create psm kobject\n");
rc = -ENOMEM;
goto psm_node_exit;
}
@@ -2484,14 +2525,15 @@
psm_reg_kobj[i] = kobject_create_and_add(psm_rails[i].name,
psm_kobj);
if (!psm_reg_kobj[i]) {
- pr_err("%s: cannot create for kobject for %s\n",
- KBUILD_MODNAME, psm_rails[i].name);
+ pr_err("cannot create kobject for %s\n",
+ psm_rails[i].name);
rc = -ENOMEM;
goto psm_node_exit;
}
psm_rails[i].attr_gp.attrs = kzalloc( \
sizeof(struct attribute *) * 2, GFP_KERNEL);
if (!psm_rails[i].attr_gp.attrs) {
+ pr_err("kzalloc failed\n");
rc = -ENOMEM;
goto psm_node_exit;
}
@@ -2501,8 +2543,8 @@
rc = sysfs_create_group(psm_reg_kobj[i], &psm_rails[i].attr_gp);
if (rc) {
- pr_err("%s: cannot create attribute group for %s\n",
- KBUILD_MODNAME, psm_rails[i].name);
+ pr_err("cannot create attribute group for %s. err:%d\n",
+ psm_rails[i].name, rc);
goto psm_node_exit;
}
}
@@ -2549,14 +2591,14 @@
if (rails_cnt == 0)
goto read_node_fail;
if (rails_cnt >= MAX_RAILS) {
- pr_err("%s: Too many rails.\n", __func__);
+ pr_err("Too many rails:%d.\n", rails_cnt);
return -EFAULT;
}
rails = kzalloc(sizeof(struct rail) * rails_cnt,
GFP_KERNEL);
if (!rails) {
- pr_err("%s: Fail to allocate memory for rails.\n", __func__);
+ pr_err("Fail to allocate memory for rails.\n");
return -ENOMEM;
}
@@ -2573,7 +2615,8 @@
rails[i].num_levels = arr_size/sizeof(__be32);
if (rails[i].num_levels >
sizeof(rails[i].levels)/sizeof(uint32_t)) {
- pr_err("%s: Array size too large\n", __func__);
+ pr_err("Array size:%d too large for index:%d\n",
+ rails[i].num_levels, i);
return -EFAULT;
}
ret = of_property_read_u32_array(child_node, key,
@@ -2601,23 +2644,26 @@
if (rails_cnt) {
ret = vdd_restriction_reg_init(pdev);
if (ret) {
- pr_info("%s:Failed to get regulators. KTM continues.\n",
- __func__);
+ pr_err("Err regulator init. err:%d. KTM continues.\n",
+ ret);
goto read_node_fail;
}
ret = init_threshold(MSM_VDD_RESTRICTION, MONITOR_ALL_TSENS,
data->vdd_rstr_temp_hyst_degC, data->vdd_rstr_temp_degC,
vdd_restriction_notify);
- if (ret)
+ if (ret) {
+ pr_err("Error in initializing thresholds. err:%d\n",
+ ret);
goto read_node_fail;
+ }
vdd_rstr_enabled = true;
}
read_node_fail:
vdd_rstr_probed = true;
if (ret) {
dev_info(&pdev->dev,
- "%s:Failed reading node=%s, key=%s. KTM continues\n",
- __func__, node->full_name, key);
+ "%s:Failed reading node=%s, key=%s. err=%d. KTM continues\n",
+ __func__, node->full_name, key, ret);
kfree(rails);
rails_cnt = 0;
}
@@ -2890,7 +2936,7 @@
psm_rails = kzalloc(sizeof(struct psm_rail) * psm_rails_cnt,
GFP_KERNEL);
if (!psm_rails) {
- pr_err("%s: Fail to allocate memory for psm rails\n", __func__);
+ pr_err("Fail to allocate memory for psm rails\n");
psm_rails_cnt = 0;
return -ENOMEM;
}
@@ -2905,8 +2951,8 @@
if (psm_rails_cnt) {
ret = psm_reg_init(pdev);
if (ret) {
- pr_info("%s:Failed to get regulators. KTM continues.\n",
- __func__);
+ pr_err("Err regulator init. err:%d. KTM continues.\n",
+ ret);
goto read_node_fail;
}
psm_enabled = true;
@@ -2916,8 +2962,8 @@
psm_probed = true;
if (ret) {
dev_info(&pdev->dev,
- "%s:Failed reading node=%s, key=%s. KTM continues\n",
- __func__, node->full_name, key);
+ "%s:Failed reading node=%s, key=%s. err=%d. KTM continues\n",
+ __func__, node->full_name, key, ret);
kfree(psm_rails);
psm_rails_cnt = 0;
}
@@ -2968,7 +3014,7 @@
key = "qcom,cpu-sensors";
cpu_cnt = of_property_count_strings(node, key);
if (cpu_cnt < num_possible_cpus()) {
- pr_err("%s: Wrong number of cpu sensors\n", KBUILD_MODNAME);
+ pr_err("Wrong number of cpu sensors:%d\n", cpu_cnt);
ret = -EINVAL;
goto hotplug_node_fail;
}
@@ -2983,8 +3029,8 @@
read_node_fail:
if (ret) {
dev_info(&pdev->dev,
- "%s:Failed reading node=%s, key=%s. KTM continues\n",
- KBUILD_MODNAME, node->full_name, key);
+ "%s:Failed reading node=%s, key=%s. err=%d. KTM continues\n",
+ KBUILD_MODNAME, node->full_name, key, ret);
core_control_enabled = 0;
}
@@ -2993,8 +3039,8 @@
hotplug_node_fail:
if (ret) {
dev_info(&pdev->dev,
- "%s:Failed reading node=%s, key=%s. KTM continues\n",
- KBUILD_MODNAME, node->full_name, key);
+ "%s:Failed reading node=%s, key=%s. err=%d. KTM continues\n",
+ KBUILD_MODNAME, node->full_name, key, ret);
hotplug_enabled = 0;
}
@@ -3034,8 +3080,8 @@
PROBE_FREQ_EXIT:
if (ret) {
dev_info(&pdev->dev,
- "%s:Failed reading node=%s, key=%s. KTM continues\n",
- __func__, node->full_name, key);
+ "%s:Failed reading node=%s, key=%s. err=%d. KTM continues\n",
+ __func__, node->full_name, key, ret);
freq_mitigation_enabled = 0;
}
return ret;
@@ -3050,8 +3096,10 @@
memset(&data, 0, sizeof(struct msm_thermal_data));
ret = msm_thermal_pre_init();
- if (ret)
+ if (ret) {
+ pr_err("thermal pre init failed. err:%d\n", ret);
goto fail;
+ }
key = "qcom,sensor-id";
ret = of_property_read_u32(node, key, &data.sensor_id);
@@ -3135,8 +3183,8 @@
return ret;
fail:
if (ret)
- pr_err("%s: Failed reading node=%s, key=%s\n",
- __func__, node->full_name, key);
+ pr_err("Failed reading node=%s, key=%s. err:%d\n",
+ node->full_name, key, ret);
return ret;
}
diff --git a/drivers/tty/serial/msm_serial_hs.c b/drivers/tty/serial/msm_serial_hs.c
index a701ec8..6078ef1 100644
--- a/drivers/tty/serial/msm_serial_hs.c
+++ b/drivers/tty/serial/msm_serial_hs.c
@@ -2938,6 +2938,7 @@
return ERR_PTR(-ENOMEM);
}
+ pdev->id = of_alias_get_id(pdev->dev.of_node, "uart");
/* UART TX GPIO */
pdata->uart_tx_gpio = of_get_named_gpio(node,
"qcom,tx-gpio", 0);
@@ -3201,13 +3202,47 @@
return rc;
}
-#define BLSP_UART_NR 12
-static int deviceid[BLSP_UART_NR] = {0};
-static atomic_t msm_serial_hs_next_id = ATOMIC_INIT(0);
+
+static bool deviceid[UARTDM_NR] = {0};
+/*
+ * The mutex synchronizes grabbing next free device number
+ * both in case of an alias being used or not. When alias is
+ * used, the msm_hs_dt_to_pdata gets it and the boolean array
+ * is accordingly updated with device_id_set_used. If no alias
+ * is used, then device_id_grab_next_free sets that array.
+ */
+static DEFINE_MUTEX(mutex_next_device_id);
+
+static int device_id_grab_next_free(void)
+{
+ int i;
+ int ret = -ENODEV;
+ mutex_lock(&mutex_next_device_id);
+ for (i = 0; i < UARTDM_NR; i++)
+ if (!deviceid[i]) {
+ ret = i;
+ deviceid[i] = true;
+ break;
+ }
+ mutex_unlock(&mutex_next_device_id);
+ return ret;
+}
+
+static int device_id_set_used(int index)
+{
+ int ret = 0;
+ mutex_lock(&mutex_next_device_id);
+ if (deviceid[index])
+ ret = -ENODEV;
+ else
+ deviceid[index] = true;
+ mutex_unlock(&mutex_next_device_id);
+ return ret;
+}
static int __devinit msm_hs_probe(struct platform_device *pdev)
{
- int ret = 0, alias_num = -1;
+ int ret = 0;
struct uart_port *uport;
struct msm_hs_port *msm_uport;
struct resource *core_resource;
@@ -3224,33 +3259,21 @@
if (IS_ERR(pdata))
return PTR_ERR(pdata);
- if (pdev->id == -1) {
- pdev->id = atomic_inc_return(&msm_serial_hs_next_id)-1;
- deviceid[pdev->id] = 1;
- }
-
- /* Use alias from device tree if present
- * Alias is used as an optional property
- */
- alias_num = of_alias_get_id(pdev->dev.of_node, "uart");
- if (alias_num >= 0) {
- /* If alias_num is between 0 and 11, check that it not
- * equal to previous incremented pdev-ids. If it is
- * equal to previous pdev.ids , fail deviceprobe.
- */
- if (alias_num < BLSP_UART_NR) {
- if (deviceid[alias_num] == 0) {
- pdev->id = alias_num;
- } else {
- MSM_HS_ERR("alias_num=%d already used\n",
- alias_num);
- return -EINVAL;
- }
- } else {
- pdev->id = alias_num;
+ if (pdev->id < 0) {
+ pdev->id = device_id_grab_next_free();
+ if (pdev->id < 0) {
+ dev_err(&pdev->dev,
+ "Error grabbing next free device id");
+ return pdev->id;
+ }
+ } else {
+ ret = device_id_set_used(pdev->id);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "%d alias taken",
+ pdev->id);
+ return ret;
}
}
-
pdev->dev.platform_data = pdata;
}
diff --git a/drivers/usb/dwc3/dwc3_otg.c b/drivers/usb/dwc3/dwc3_otg.c
index e373b9b..e3d339c 100644
--- a/drivers/usb/dwc3/dwc3_otg.c
+++ b/drivers/usb/dwc3/dwc3_otg.c
@@ -552,7 +552,7 @@
dotg->charger->chg_type == DWC3_PROPRIETARY_CHARGER)
power_supply_type = POWER_SUPPLY_TYPE_USB_DCP;
else
- power_supply_type = POWER_SUPPLY_TYPE_BATTERY;
+ power_supply_type = POWER_SUPPLY_TYPE_UNKNOWN;
power_supply_set_supply_type(dotg->psy, power_supply_type);
diff --git a/drivers/usb/gadget/android.c b/drivers/usb/gadget/android.c
index 6b01472..4de97f56 100644
--- a/drivers/usb/gadget/android.c
+++ b/drivers/usb/gadget/android.c
@@ -2723,6 +2723,8 @@
struct android_configuration *conf;
int value = -EOPNOTSUPP;
unsigned long flags;
+ bool do_work = false;
+ bool prev_configured = false;
req->zero = 0;
req->complete = composite_setup_complete;
@@ -2741,6 +2743,12 @@
}
}
+ /*
+ * skip the work when 2nd set config arrives
+ * with same value from the host.
+ */
+ if (cdev->config)
+ prev_configured = true;
/* Special case the accessory function.
* It needs to handle control requests before it is enabled.
*/
@@ -2753,13 +2761,15 @@
spin_lock_irqsave(&cdev->lock, flags);
if (!dev->connected) {
dev->connected = 1;
- schedule_work(&dev->work);
+ do_work = true;
} else if (c->bRequest == USB_REQ_SET_CONFIGURATION &&
cdev->config) {
- schedule_work(&dev->work);
+ if (!prev_configured)
+ do_work = true;
}
spin_unlock_irqrestore(&cdev->lock, flags);
-
+ if (do_work)
+ schedule_work(&dev->work);
return value;
}
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
index 7d63bf9..b13e8e5 100644
--- a/drivers/usb/gadget/composite.c
+++ b/drivers/usb/gadget/composite.c
@@ -604,6 +604,16 @@
unsigned power = gadget_is_otg(gadget) ? 8 : 100;
int tmp;
+ /*
+ * ignore 2nd time SET_CONFIGURATION
+ * only for same config value twice.
+ */
+ if (cdev->config && (cdev->config->bConfigurationValue == number)) {
+ DBG(cdev, "already in the same config with value %d\n",
+ number);
+ return 0;
+ }
+
if (number) {
list_for_each_entry(c, &cdev->configs, list) {
if (c->bConfigurationValue == number) {
diff --git a/drivers/usb/host/ehci-msm2.c b/drivers/usb/host/ehci-msm2.c
index ebb226c..f319238 100644
--- a/drivers/usb/host/ehci-msm2.c
+++ b/drivers/usb/host/ehci-msm2.c
@@ -82,6 +82,7 @@
bool wakeup_irq_enabled;
int wakeup_irq;
enum usb_vdd_type vdd_type;
+ void __iomem *usb_phy_ctrl_reg;
};
static inline struct msm_hcd *hcd_to_mhcd(struct usb_hcd *hcd)
@@ -497,6 +498,9 @@
if (time_after(jiffies, timeout)) {
dev_err(mhcd->dev, "msm_ulpi_read: timeout %08x\n",
readl_relaxed(USB_ULPI_VIEWPORT));
+ dev_err(mhcd->dev, "PORTSC: %08x USBCMD: %08x\n",
+ readl_relaxed(USB_PORTSC),
+ readl_relaxed(USB_USBCMD));
return -ETIMEDOUT;
}
udelay(1);
@@ -521,6 +525,9 @@
while (readl_relaxed(USB_ULPI_VIEWPORT) & ULPI_RUN) {
if (time_after(jiffies, timeout)) {
dev_err(mhcd->dev, "msm_ulpi_write: timeout\n");
+ dev_err(mhcd->dev, "PORTSC: %08x USBCMD: %08x\n",
+ readl_relaxed(USB_PORTSC),
+ readl_relaxed(USB_USBCMD));
return -ETIMEDOUT;
}
udelay(1);
@@ -572,13 +579,13 @@
struct msm_usb_host_platform_data *pdata;
u32 val;
int ret;
- int retries;
ret = msm_ehci_link_clk_reset(mhcd, 1);
if (ret)
return ret;
- usleep_range(10, 12);
+ /* Minimum 10msec delay for block reset as per hardware spec */
+ usleep_range(10000, 12000);
ret = msm_ehci_link_clk_reset(mhcd, 0);
if (ret)
@@ -592,29 +599,34 @@
val = readl_relaxed(USB_PORTSC) & ~PORTSC_PTS_MASK;
writel_relaxed(val | PORTSC_PTS_ULPI, USB_PORTSC);
- for (retries = 3; retries > 0; retries--) {
- ret = msm_ulpi_write(mhcd, ULPI_FUNC_CTRL_SUSPENDM,
- ULPI_CLR(ULPI_FUNC_CTRL));
- if (!ret)
- break;
- }
- if (!retries)
- return -ETIMEDOUT;
-
- /* Wakeup the PHY with a reg-access for calibration */
- for (retries = 3; retries > 0; retries--) {
- ret = msm_ulpi_read(mhcd, ULPI_DEBUG);
- if (ret != -ETIMEDOUT)
- break;
- }
- if (!retries)
- return -ETIMEDOUT;
-
dev_info(mhcd->dev, "phy_reset: success\n");
return 0;
}
+static void usb_phy_reset(struct msm_hcd *mhcd)
+{
+ u32 val;
+
+ /* Assert USB PHY_PON */
+ val = readl_relaxed(mhcd->usb_phy_ctrl_reg);
+ val &= ~PHY_POR_BIT_MASK;
+ val |= PHY_POR_ASSERT;
+ writel_relaxed(val, mhcd->usb_phy_ctrl_reg);
+
+ /* wait for minimum 10 microseconds as suggested in hardware spec */
+ usleep_range(10, 15);
+
+ /* Deassert USB PHY_PON */
+ val = readl_relaxed(mhcd->usb_phy_ctrl_reg);
+ val &= ~PHY_POR_BIT_MASK;
+ val |= PHY_POR_DEASSERT;
+ writel_relaxed(val, mhcd->usb_phy_ctrl_reg);
+
+ /* Ensure that RESET operation is completed. */
+ mb();
+}
+
#define LINK_RESET_TIMEOUT_USEC (250 * 1000)
static int msm_hsusb_reset(struct msm_hcd *mhcd)
{
@@ -649,6 +661,9 @@
writel_relaxed(readl_relaxed(USB_PHY_CTRL2) | (1<<16),
USB_PHY_CTRL2);
+ /* Reset USB PHY after performing USB Link RESET */
+ usb_phy_reset(mhcd);
+
msleep(100);
writel_relaxed(0x0, USB_AHBBURST);
@@ -1523,6 +1538,13 @@
goto disable_ldo;
}
+ pdata = mhcd->dev->platform_data;
+
+ if (pdata && pdata->use_sec_phy)
+ mhcd->usb_phy_ctrl_reg = USB_PHY_CTRL2;
+ else
+ mhcd->usb_phy_ctrl_reg = USB_PHY_CTRL;
+
ret = msm_hsusb_reset(mhcd);
if (ret) {
dev_err(&pdev->dev, "hsusb PHY initialization failed\n");
@@ -1535,7 +1557,6 @@
goto vbus_deinit;
}
- pdata = mhcd->dev->platform_data;
if (pdata && (!pdata->dock_connect_irq ||
!irq_read_line(pdata->dock_connect_irq)))
msm_ehci_vbus_power(mhcd, 1);
diff --git a/drivers/video/msm/mdss/mdss.h b/drivers/video/msm/mdss/mdss.h
index 72cceaa..b1c0343 100644
--- a/drivers/video/msm/mdss/mdss.h
+++ b/drivers/video/msm/mdss/mdss.h
@@ -198,6 +198,7 @@
int handoff_pending;
struct mdss_prefill_data prefill_data;
bool ulps;
+ int iommu_ref_cnt;
};
extern struct mdss_data_type *mdss_res;
@@ -221,6 +222,7 @@
void mdss_disable_irq(struct mdss_hw *hw);
void mdss_disable_irq_nosync(struct mdss_hw *hw);
void mdss_bus_bandwidth_ctrl(int enable);
+int mdss_iommu_ctrl(int enable);
static inline struct ion_client *mdss_get_ionclient(void)
{
diff --git a/drivers/video/msm/mdss/mdss_debug.h b/drivers/video/msm/mdss/mdss_debug.h
index e2c9edd..684bd45 100644
--- a/drivers/video/msm/mdss/mdss_debug.h
+++ b/drivers/video/msm/mdss/mdss_debug.h
@@ -16,6 +16,7 @@
#include <stdarg.h>
#include "mdss.h"
+#include "mdss_mdp_trace.h"
#define MISR_POLL_SLEEP 2000
#define MISR_POLL_TIMEOUT 32000
@@ -30,6 +31,13 @@
#define MDSS_XLOG_TOUT_HANDLER(...) \
mdss_xlog_tout_handler(__func__, ##__VA_ARGS__, XLOG_TOUT_DATA_LIMITER)
+#define ATRACE_END(name) trace_tracing_mark_write(current->tgid, name, 0)
+#define ATRACE_BEGIN(name) trace_tracing_mark_write(current->tgid, name, 1)
+#define ATRACE_FUNC() ATRACE_BEGIN(__func__)
+
+#define ATRACE_INT(name, value) \
+ trace_mdp_trace_counter(current->tgid, name, value)
+
#ifdef CONFIG_DEBUG_FS
struct mdss_debug_base {
struct mdss_debug_data *mdd;
diff --git a/drivers/video/msm/mdss/mdss_dsi_host.c b/drivers/video/msm/mdss/mdss_dsi_host.c
index 46ca84f..56701d4 100644
--- a/drivers/video/msm/mdss/mdss_dsi_host.c
+++ b/drivers/video/msm/mdss/mdss_dsi_host.c
@@ -882,6 +882,7 @@
if (rlen <= 2) {
short_response = 1;
+ pkt_size = rlen;
rx_byte = 4;
} else {
short_response = 0;
@@ -905,32 +906,30 @@
while (!end) {
pr_debug("%s: rlen=%d pkt_size=%d rx_byte=%d\n",
__func__, rlen, pkt_size, rx_byte);
- if (!short_response) {
- max_pktsize[0] = pkt_size;
- mdss_dsi_buf_init(tp);
- ret = mdss_dsi_cmd_dma_add(tp, &pkt_size_cmd);
- if (!ret) {
- pr_err("%s: failed to add max_pkt_size\n",
- __func__);
- rp->len = 0;
- goto end;
- }
-
- mdss_dsi_wait4video_eng_busy(ctrl);
-
- mdss_dsi_enable_irq(ctrl, DSI_CMD_TERM);
- ret = mdss_dsi_cmd_dma_tx(ctrl, tp);
- if (IS_ERR_VALUE(ret)) {
- mdss_dsi_disable_irq(ctrl, DSI_CMD_TERM);
- pr_err("%s: failed to tx max_pkt_size\n",
- __func__);
- rp->len = 0;
- goto end;
- }
- pr_debug("%s: max_pkt_size=%d sent\n",
- __func__, pkt_size);
+ max_pktsize[0] = pkt_size;
+ mdss_dsi_buf_init(tp);
+ ret = mdss_dsi_cmd_dma_add(tp, &pkt_size_cmd);
+ if (!ret) {
+ pr_err("%s: failed to add max_pkt_size\n",
+ __func__);
+ rp->len = 0;
+ goto end;
}
+ mdss_dsi_wait4video_eng_busy(ctrl);
+
+ mdss_dsi_enable_irq(ctrl, DSI_CMD_TERM);
+ ret = mdss_dsi_cmd_dma_tx(ctrl, tp);
+ if (IS_ERR_VALUE(ret)) {
+ mdss_dsi_disable_irq(ctrl, DSI_CMD_TERM);
+ pr_err("%s: failed to tx max_pkt_size\n",
+ __func__);
+ rp->len = 0;
+ goto end;
+ }
+ pr_debug("%s: max_pkt_size=%d sent\n",
+ __func__, pkt_size);
+
mdss_dsi_buf_init(tp);
ret = mdss_dsi_cmd_dma_add(tp, cmds);
if (!ret) {
@@ -1032,7 +1031,7 @@
if (is_mdss_iommu_attached()) {
- int ret = msm_iommu_map_contig_buffer(tp->dmap,
+ ret = msm_iommu_map_contig_buffer(tp->dmap,
mdss_get_iommu_domain(domain), 0,
size, SZ_4K, 0, &(addr));
if (IS_ERR_VALUE(ret)) {
@@ -1241,7 +1240,7 @@
{
struct dcs_cmd_req *req;
int ret = -EINVAL;
-
+ int rc = 0;
mutex_lock(&ctrl->cmd_mutex);
req = mdss_dsi_cmdlist_get(ctrl);
@@ -1265,15 +1264,20 @@
* fetch dcs commands from axi bus
*/
mdss_bus_bandwidth_ctrl(1);
-
pr_debug("%s: from_mdp=%d pid=%d\n", __func__, from_mdp, current->pid);
mdss_dsi_clk_ctrl(ctrl, DSI_ALL_CLKS, 1);
+ rc = mdss_iommu_ctrl(1);
+ if (IS_ERR_VALUE(rc)) {
+ pr_err("IOMMU attach failed\n");
+ mutex_unlock(&ctrl->cmd_mutex);
+ return rc;
+ }
if (req->flags & CMD_REQ_RX)
ret = mdss_dsi_cmdlist_rx(ctrl, req);
else
ret = mdss_dsi_cmdlist_tx(ctrl, req);
-
+ mdss_iommu_ctrl(0);
mdss_dsi_clk_ctrl(ctrl, DSI_ALL_CLKS, 0);
mdss_bus_bandwidth_ctrl(0);
@@ -1377,8 +1381,10 @@
if (todo & DSI_EV_MDP_FIFO_UNDERFLOW) {
if (ctrl->recovery) {
+ mdss_dsi_clk_ctrl(ctrl, DSI_ALL_CLKS, 1);
mdss_dsi_sw_reset_restore(ctrl);
ctrl->recovery->fxn(ctrl->recovery->data);
+ mdss_dsi_clk_ctrl(ctrl, DSI_ALL_CLKS, 0);
}
}
@@ -1393,7 +1399,9 @@
spin_unlock_irqrestore(&ctrl->mdp_lock, flag);
/* enable dsi error interrupt */
+ mdss_dsi_clk_ctrl(ctrl, DSI_ALL_CLKS, 1);
mdss_dsi_err_intr_ctrl(ctrl, DSI_INTR_ERROR_MASK, 1);
+ mdss_dsi_clk_ctrl(ctrl, DSI_ALL_CLKS, 0);
}
}
diff --git a/drivers/video/msm/mdss/mdss_fb.c b/drivers/video/msm/mdss/mdss_fb.c
index c14f936..da2ae5f 100644
--- a/drivers/video/msm/mdss/mdss_fb.c
+++ b/drivers/video/msm/mdss/mdss_fb.c
@@ -454,6 +454,7 @@
mfd->ext_ad_ctrl = -1;
mfd->bl_level = 0;
+ mfd->bl_level_prev_scaled = 0;
mfd->bl_scale = 1024;
mfd->bl_min_lvl = 30;
mfd->fb_imgType = MDP_RGBA_8888;
@@ -782,6 +783,7 @@
pdata = dev_get_platdata(&mfd->pdev->dev);
if ((pdata) && (pdata->set_backlight)) {
+ mfd->bl_level_prev_scaled = mfd->bl_level_scaled;
if (!IS_CALIB_MODE_BL(mfd))
mdss_fb_scale_bl(mfd, &temp);
/*
@@ -792,13 +794,13 @@
* as well as setting bl_level to bkl_lvl even though the
* backlight has been set to the scaled value.
*/
- if (mfd->bl_level_old == temp) {
+ if (mfd->bl_level_scaled == temp) {
mfd->bl_level = bkl_lvl;
return;
}
pdata->set_backlight(pdata, temp);
mfd->bl_level = bkl_lvl;
- mfd->bl_level_old = temp;
+ mfd->bl_level_scaled = temp;
if (mfd->mdp.update_ad_input) {
update_ad_input = mfd->mdp.update_ad_input;
@@ -821,7 +823,7 @@
if ((pdata) && (pdata->set_backlight)) {
mfd->bl_level = mfd->unset_bl_level;
pdata->set_backlight(pdata, mfd->bl_level);
- mfd->bl_level_old = mfd->unset_bl_level;
+ mfd->bl_level_scaled = mfd->unset_bl_level;
mfd->bl_updated = 1;
}
}
@@ -858,6 +860,13 @@
schedule_delayed_work(&mfd->idle_notify_work,
msecs_to_jiffies(mfd->idle_time));
}
+
+ mutex_lock(&mfd->bl_lock);
+ if (!mfd->bl_updated) {
+ mfd->bl_updated = 1;
+ mdss_fb_set_backlight(mfd, mfd->bl_level_prev_scaled);
+ }
+ mutex_unlock(&mfd->bl_lock);
break;
case FB_BLANK_VSYNC_SUSPEND:
@@ -879,8 +888,9 @@
mfd->op_enable = false;
curr_pwr_state = mfd->panel_power_on;
- mfd->panel_power_on = false;
mutex_lock(&mfd->bl_lock);
+ mdss_fb_set_backlight(mfd, 0);
+ mfd->panel_power_on = false;
mfd->bl_updated = 0;
mutex_unlock(&mfd->bl_lock);
@@ -1687,7 +1697,11 @@
u32 wait_for_finish = disp_commit->wait_for_finish;
int ret = 0;
- if (!mfd || (!mfd->op_enable) || (!mfd->panel_power_on))
+ if (!mfd || (!mfd->op_enable))
+ return -EPERM;
+
+ if ((!mfd->panel_power_on) && !((mfd->dcm_state == DCM_ENTER) &&
+ (mfd->panel.type == MIPI_CMD_PANEL)))
return -EPERM;
if (var->xoffset > (info->var.xres_virtual - info->var.xres))
@@ -1739,7 +1753,11 @@
{
struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
- if ((!mfd->op_enable) || (!mfd->panel_power_on))
+ if (!mfd->op_enable)
+ return -EPERM;
+
+ if ((!mfd->panel_power_on) && !((mfd->dcm_state == DCM_ENTER) &&
+ (mfd->panel.type == MIPI_CMD_PANEL)))
return -EPERM;
if (var->xoffset > (info->var.xres_virtual - info->var.xres))
diff --git a/drivers/video/msm/mdss/mdss_fb.h b/drivers/video/msm/mdss/mdss_fb.h
index 3416b9e..ce0a7f9 100644
--- a/drivers/video/msm/mdss/mdss_fb.h
+++ b/drivers/video/msm/mdss/mdss_fb.h
@@ -189,7 +189,8 @@
u32 bl_min_lvl;
u32 unset_bl_level;
u32 bl_updated;
- u32 bl_level_old;
+ u32 bl_level_scaled;
+ u32 bl_level_prev_scaled;
struct mutex bl_lock;
struct mutex lock;
diff --git a/drivers/video/msm/mdss/mdss_mdp.c b/drivers/video/msm/mdss/mdss_mdp.c
index 037a183..097d568 100644
--- a/drivers/video/msm/mdss/mdss_mdp.c
+++ b/drivers/video/msm/mdss/mdss_mdp.c
@@ -620,6 +620,37 @@
return clk_rate;
}
+int mdss_iommu_ctrl(int enable)
+{
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+ int rc = 0;
+
+ mutex_lock(&mdp_iommu_lock);
+ pr_debug("%pS: enable %d mdata->iommu_ref_cnt %d\n",
+ __builtin_return_address(0), enable, mdata->iommu_ref_cnt);
+
+ if (enable) {
+
+ if (mdata->iommu_ref_cnt == 0)
+ rc = mdss_iommu_attach(mdata);
+ mdata->iommu_ref_cnt++;
+ } else {
+ if (mdata->iommu_ref_cnt) {
+ mdata->iommu_ref_cnt--;
+ if (mdata->iommu_ref_cnt == 0)
+ rc = mdss_iommu_dettach(mdata);
+ } else {
+ pr_err("unbalanced iommu ref\n");
+ }
+ }
+ mutex_unlock(&mdp_iommu_lock);
+
+ if (IS_ERR_VALUE(rc))
+ return rc;
+ else
+ return mdata->iommu_ref_cnt;
+}
+
/**
* mdss_bus_bandwidth_ctrl() -- place bus bandwidth request
* @enable: value of enable or disable
@@ -627,7 +658,7 @@
* Function place bus bandwidth request to allocate saved bandwidth
* if enabled or free bus bandwidth allocation if disabled.
* Bus bandwidth is required by mdp.For dsi, it only requires to send
- * dcs coammnd.
+ * dcs coammnd. It returns error if bandwidth request fails.
*/
void mdss_bus_bandwidth_ctrl(int enable)
{
@@ -657,14 +688,11 @@
if (!enable) {
msm_bus_scale_client_update_request(
mdata->bus_hdl, 0);
- mdss_iommu_dettach(mdata);
pm_runtime_put(&mdata->pdev->dev);
} else {
pm_runtime_get_sync(&mdata->pdev->dev);
msm_bus_scale_client_update_request(
mdata->bus_hdl, mdata->curr_bw_uc_idx);
- if (!mdata->handoff_pending)
- mdss_iommu_attach(mdata);
}
}
@@ -794,15 +822,13 @@
{
struct iommu_domain *domain;
struct mdss_iommu_map_type *iomap;
- int i;
+ int i, rc = 0;
- mutex_lock(&mdp_iommu_lock);
MDSS_XLOG(mdata->iommu_attached);
if (mdata->iommu_attached) {
pr_debug("mdp iommu already attached\n");
- mutex_unlock(&mdp_iommu_lock);
- return 0;
+ goto end;
}
for (i = 0; i < MDSS_IOMMU_MAX_DOMAIN; i++) {
@@ -814,13 +840,21 @@
iomap->client_name, iomap->ctx_name);
continue;
}
- iommu_attach_device(domain, iomap->ctx);
+
+ rc = iommu_attach_device(domain, iomap->ctx);
+ if (rc) {
+ WARN(1, "mdp::iommu device attach failed rc:%d\n", rc);
+ for (i--; i >= 0; i--) {
+ iomap = mdata->iommu_map + i;
+ iommu_detach_device(domain, iomap->ctx);
+ }
+ goto end;
+ }
}
mdata->iommu_attached = true;
- mutex_unlock(&mdp_iommu_lock);
-
- return 0;
+end:
+ return rc;
}
int mdss_iommu_dettach(struct mdss_data_type *mdata)
@@ -829,12 +863,10 @@
struct mdss_iommu_map_type *iomap;
int i;
- mutex_lock(&mdp_iommu_lock);
MDSS_XLOG(mdata->iommu_attached);
if (!mdata->iommu_attached) {
pr_debug("mdp iommu already dettached\n");
- mutex_unlock(&mdp_iommu_lock);
return 0;
}
@@ -851,7 +883,6 @@
}
mdata->iommu_attached = false;
- mutex_unlock(&mdp_iommu_lock);
return 0;
}
@@ -2538,7 +2569,6 @@
mdata->fs_ena = true;
} else {
pr_debug("Disable MDP FS\n");
- mdss_iommu_dettach(mdata);
if (mdata->fs_ena) {
regulator_disable(mdata->fs);
if (!mdata->ulps) {
@@ -2558,22 +2588,31 @@
* MDSS GDSC can be voted off during idle-screen usecase for MIPI DSI command
* mode displays with Ultra-Low Power State (ULPS) feature enabled. Upon
* subsequent frame update, MDSS GDSC needs to turned back on and hw state
- * needs to be restored.
+ * needs to be restored. It returns error if footswitch control API
+ * fails.
*/
-void mdss_mdp_footswitch_ctrl_ulps(int on, struct device *dev)
+int mdss_mdp_footswitch_ctrl_ulps(int on, struct device *dev)
{
struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+ int rc = 0;
pr_debug("called on=%d\n", on);
if (on) {
pm_runtime_get_sync(dev);
- mdss_iommu_attach(mdata);
+ rc = mdss_iommu_ctrl(1);
+ if (IS_ERR_VALUE(rc)) {
+ pr_err("mdss iommu attach failed rc=%d\n", rc);
+ return rc;
+ }
mdss_hw_init(mdata);
mdata->ulps = false;
+ mdss_iommu_ctrl(0);
} else {
mdata->ulps = true;
pm_runtime_put_sync(dev);
}
+
+ return 0;
}
static inline int mdss_mdp_suspend_sub(struct mdss_data_type *mdata)
diff --git a/drivers/video/msm/mdss/mdss_mdp.h b/drivers/video/msm/mdss/mdss_mdp.h
index 0c10d8c..8e83f51 100644
--- a/drivers/video/msm/mdss/mdss_mdp.h
+++ b/drivers/video/msm/mdss/mdss_mdp.h
@@ -736,7 +736,7 @@
int mdss_mdp_wb_set_secure(struct msm_fb_data_type *mfd, int enable);
int mdss_mdp_wb_get_secure(struct msm_fb_data_type *mfd, uint8_t *enable);
void mdss_mdp_ctl_restore(struct mdss_mdp_ctl *ctl);
-void mdss_mdp_footswitch_ctrl_ulps(int on, struct device *dev);
+int mdss_mdp_footswitch_ctrl_ulps(int on, struct device *dev);
int mdss_mdp_pipe_program_pixel_extn(struct mdss_mdp_pipe *pipe);
#define mfd_to_mdp5_data(mfd) (mfd->mdp.private1)
diff --git a/drivers/video/msm/mdss/mdss_mdp_ctl.c b/drivers/video/msm/mdss/mdss_mdp_ctl.c
index 307d247..4b7f89d 100644
--- a/drivers/video/msm/mdss/mdss_mdp_ctl.c
+++ b/drivers/video/msm/mdss/mdss_mdp_ctl.c
@@ -24,6 +24,7 @@
#include "mdss_mdp.h"
#include "mdss_debug.h"
#include "mdss_mdp_trace.h"
+#include "mdss_debug.h"
static void mdss_mdp_xlog_mixer_reg(struct mdss_mdp_ctl *ctl);
static inline u64 fudge_factor(u64 val, u32 numer, u32 denom)
@@ -830,7 +831,7 @@
if (!ctl || !ctl->mdata)
return;
-
+ ATRACE_BEGIN(__func__);
mdata = ctl->mdata;
for (i = 0; i < mdata->nctl; i++) {
struct mdss_mdp_ctl *ctl;
@@ -845,8 +846,10 @@
bus_ab_quota = apply_fudge_factor(bw_sum_of_intfs,
&mdss_res->ab_factor);
trace_mdp_perf_update_bus(bus_ab_quota, bus_ib_quota);
+ ATRACE_INT("bus_quota", bus_ib_quota);
mdss_mdp_bus_scale_set_quota(bus_ab_quota, bus_ib_quota);
pr_debug("ab=%llu ib=%llu\n", bus_ab_quota, bus_ib_quota);
+ ATRACE_END(__func__);
}
/**
@@ -929,7 +932,7 @@
if (!ctl || !ctl->mdata)
return;
-
+ ATRACE_BEGIN(__func__);
mutex_lock(&mdss_mdp_ctl_lock);
mdata = ctl->mdata;
@@ -988,11 +991,13 @@
}
clk_rate = mdss_mdp_select_clk_lvl(ctl, clk_rate);
+ ATRACE_INT("mdp_clk", clk_rate);
mdss_mdp_set_clk_rate(clk_rate);
pr_debug("update clk rate = %d HZ\n", clk_rate);
}
mutex_unlock(&mdss_mdp_ctl_lock);
+ ATRACE_END(__func__);
}
static struct mdss_mdp_ctl *mdss_mdp_ctl_alloc(struct mdss_data_type *mdata,
@@ -2499,8 +2504,10 @@
return 0;
}
+ ATRACE_BEGIN("wait_fnc");
if (ctl->wait_fnc)
ret = ctl->wait_fnc(ctl, NULL);
+ ATRACE_END("wait_fnc");
trace_mdp_commit(ctl);
@@ -2569,13 +2576,16 @@
if (is_bw_released || mixer1_changed || mixer2_changed
|| ctl->force_screen_state) {
+ ATRACE_BEGIN("prepare_fnc");
if (ctl->prepare_fnc)
ret = ctl->prepare_fnc(ctl, arg);
+ ATRACE_END("prepare_fnc");
if (ret) {
pr_err("error preparing display\n");
goto done;
}
+ ATRACE_BEGIN("mixer_programming");
mdss_mdp_ctl_perf_update(ctl, 1);
if (mixer1_changed)
@@ -2591,21 +2601,29 @@
sctl->opmode);
sctl->flush_bits |= BIT(17);
}
+ ATRACE_END("mixer_programming");
}
+ ATRACE_BEGIN("frame_ready");
if (!ctl->shared_lock)
mdss_mdp_ctl_notify(ctl, MDP_NOTIFY_FRAME_READY);
+ ATRACE_END("frame_ready");
+ ATRACE_BEGIN("wait_pingpong");
if (ctl->wait_pingpong)
ctl->wait_pingpong(ctl, NULL);
+ ATRACE_END("wait_pingpong");
ctl->roi_bkup.w = ctl->roi.w;
ctl->roi_bkup.h = ctl->roi.h;
+ ATRACE_BEGIN("postproc_programming");
if (ctl->mfd && ctl->mfd->dcm_state != DTM_ENTER)
/* postprocessing setup, including dspp */
mdss_mdp_pp_setup_locked(ctl);
+ ATRACE_END("postproc_programming");
+ ATRACE_BEGIN("flush_kickoff");
mdss_mdp_ctl_write(ctl, MDSS_MDP_REG_CTL_FLUSH, ctl->flush_bits);
if (sctl) {
mdss_mdp_ctl_write(sctl, MDSS_MDP_REG_CTL_FLUSH,
@@ -2622,6 +2640,7 @@
pr_warn("error displaying frame\n");
ctl->play_cnt++;
+ ATRACE_END("flush_kickoff");
done:
mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false);
diff --git a/drivers/video/msm/mdss/mdss_mdp_intf_cmd.c b/drivers/video/msm/mdss/mdss_mdp_intf_cmd.c
index 991eb06..350ea1b 100644
--- a/drivers/video/msm/mdss/mdss_mdp_intf_cmd.c
+++ b/drivers/video/msm/mdss/mdss_mdp_intf_cmd.c
@@ -189,6 +189,7 @@
{
unsigned long flags;
struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+ int rc;
if (!ctx->panel_on)
return;
@@ -201,6 +202,10 @@
if (cancel_delayed_work_sync(&ctx->ulps_work))
pr_debug("deleted pending ulps work\n");
+ rc = mdss_iommu_ctrl(1);
+ if (IS_ERR_VALUE(rc))
+ pr_err("IOMMU attach failed\n");
+
mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON, false);
if (ctx->ulps) {
@@ -243,6 +248,7 @@
mdss_mdp_hist_intr_setup(&mdata->hist_intr, MDSS_IRQ_SUSPEND);
mdss_mdp_ctl_intf_event
(ctx->ctl, MDSS_EVENT_PANEL_CLK_CTRL, (void *)0);
+ mdss_iommu_ctrl(0);
mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false);
if (ctx->panel_on)
schedule_delayed_work(&ctx->ulps_work, ULPS_ENTER_TIME);
diff --git a/drivers/video/msm/mdss/mdss_mdp_intf_video.c b/drivers/video/msm/mdss/mdss_mdp_intf_video.c
index eff708c..e5bba31 100644
--- a/drivers/video/msm/mdss/mdss_mdp_intf_video.c
+++ b/drivers/video/msm/mdss/mdss_mdp_intf_video.c
@@ -333,6 +333,8 @@
frame_rate = 24;
msleep((1000/frame_rate) + 1);
}
+
+ mdss_iommu_ctrl(0);
mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false);
ctx->timegen_en = false;
@@ -717,6 +719,12 @@
pr_debug("enabling timing gen for intf=%d\n", ctl->intf_num);
+ rc = mdss_iommu_ctrl(1);
+ if (IS_ERR_VALUE(rc)) {
+ pr_err("IOMMU attach failed\n");
+ return rc;
+ }
+
mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON, false);
mdss_mdp_irq_enable(MDSS_MDP_IRQ_INTF_UNDER_RUN, ctl->intf_num);
diff --git a/drivers/video/msm/mdss/mdss_mdp_intf_writeback.c b/drivers/video/msm/mdss/mdss_mdp_intf_writeback.c
index 02e7b75..7bf03eb 100644
--- a/drivers/video/msm/mdss/mdss_mdp_intf_writeback.c
+++ b/drivers/video/msm/mdss/mdss_mdp_intf_writeback.c
@@ -442,6 +442,7 @@
rc = 0;
}
+ mdss_iommu_ctrl(0);
mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false); /* clock off */
ctx->comp_cnt--;
@@ -501,6 +502,11 @@
INIT_COMPLETION(ctx->wb_comp);
mdss_mdp_irq_enable(ctx->intr_type, ctx->intf_num);
+ ret = mdss_iommu_ctrl(1);
+ if (IS_ERR_VALUE(ret)) {
+ pr_err("IOMMU attach failed\n");
+ return ret;
+ }
mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON, false);
mdss_mdp_ctl_write(ctl, MDSS_MDP_REG_CTL_START, 1);
wmb();
diff --git a/drivers/video/msm/mdss/mdss_mdp_overlay.c b/drivers/video/msm/mdss/mdss_mdp_overlay.c
index 53eccc6..2141659 100644
--- a/drivers/video/msm/mdss/mdss_mdp_overlay.c
+++ b/drivers/video/msm/mdss/mdss_mdp_overlay.c
@@ -277,7 +277,8 @@
* mdp clock requirement
*/
if (mdata->has_decimation && (pipe->vert_deci < MAX_DECIMATION)
- && !pipe->bwc_mode && !pipe->src_fmt->tile)
+ && !pipe->bwc_mode && !pipe->src_fmt->tile &&
+ !pipe->scale.enable_pxl_ext)
pipe->vert_deci++;
else
return -EPERM;
@@ -409,17 +410,33 @@
}
if (req->id == MSMFB_NEW_REQUEST) {
- if (req->flags & MDP_OV_PIPE_FORCE_DMA)
- pipe_type = MDSS_MDP_PIPE_TYPE_DMA;
- else if (fmt->is_yuv || (req->flags & MDP_OV_PIPE_SHARE))
- pipe_type = MDSS_MDP_PIPE_TYPE_VIG;
- else
- pipe_type = MDSS_MDP_PIPE_TYPE_RGB;
+ switch (req->pipe_type) {
+ case PIPE_TYPE_VIG:
+ pipe_type = MDSS_MDP_PIPE_TYPE_VIG;
+ break;
+ case PIPE_TYPE_RGB:
+ pipe_type = MDSS_MDP_PIPE_TYPE_RGB;
+ break;
+ case PIPE_TYPE_DMA:
+ pipe_type = MDSS_MDP_PIPE_TYPE_DMA;
+ break;
+ case PIPE_TYPE_AUTO:
+ default:
+ if (req->flags & MDP_OV_PIPE_FORCE_DMA)
+ pipe_type = MDSS_MDP_PIPE_TYPE_DMA;
+ else if (fmt->is_yuv ||
+ (req->flags & MDP_OV_PIPE_SHARE))
+ pipe_type = MDSS_MDP_PIPE_TYPE_VIG;
+ else
+ pipe_type = MDSS_MDP_PIPE_TYPE_RGB;
+ break;
+ }
pipe = mdss_mdp_pipe_alloc(mixer, pipe_type);
/* VIG pipes can also support RGB format */
- if (!pipe && pipe_type == MDSS_MDP_PIPE_TYPE_RGB) {
+ if ((req->pipe_type == PIPE_TYPE_AUTO) && !pipe &&
+ (pipe_type == MDSS_MDP_PIPE_TYPE_RGB)) {
pipe_type = MDSS_MDP_PIPE_TYPE_VIG;
pipe = mdss_mdp_pipe_alloc(mixer, pipe_type);
}
@@ -573,14 +590,21 @@
}
}
- if ((pipe->flags & MDP_DEINTERLACE) && !pipe->scale.enable_pxl_ext) {
+ /*
+ * When scaling is enabled src crop and image
+ * width and height is modified by user
+ */
+ if ((pipe->flags & MDP_DEINTERLACE)) {
if (pipe->flags & MDP_SOURCE_ROTATED_90) {
pipe->src.x = DIV_ROUND_UP(pipe->src.x, 2);
pipe->src.x &= ~1;
- pipe->src.w /= 2;
- pipe->img_width /= 2;
+ if (!pipe->scale.enable_pxl_ext) {
+ pipe->src.w /= 2;
+ pipe->img_width /= 2;
+ }
} else {
- pipe->src.h /= 2;
+ if (!pipe->scale.enable_pxl_ext)
+ pipe->src.h /= 2;
pipe->src.y = DIV_ROUND_UP(pipe->src.y, 2);
pipe->src.y &= ~1;
}
@@ -678,12 +702,17 @@
int num_planes,
u32 flags)
{
- int i, rc = 0;
+ int i, rc;
if ((num_planes <= 0) || (num_planes > MAX_PLANES))
return -EINVAL;
- mdss_bus_bandwidth_ctrl(1);
+ rc = mdss_iommu_ctrl(1);
+ if (IS_ERR_VALUE(rc)) {
+ pr_err("Iommu attach failed");
+ goto end;
+ }
+
memset(data, 0, sizeof(*data));
for (i = 0; i < num_planes; i++) {
data->p[i].flags = flags;
@@ -697,24 +726,28 @@
break;
}
}
- mdss_bus_bandwidth_ctrl(0);
+ mdss_iommu_ctrl(0);
data->num_planes = i;
-
+end:
return rc;
}
int mdss_mdp_overlay_free_buf(struct mdss_mdp_data *data)
{
- int i;
+ int i, rc;
- mdss_bus_bandwidth_ctrl(1);
+ rc = mdss_iommu_ctrl(1);
+ if (IS_ERR_VALUE(rc)) {
+ pr_err("Iommu attach failed");
+ return rc;
+ }
+
for (i = 0; i < data->num_planes && data->p[i].len; i++)
mdss_mdp_put_img(&data->p[i]);
- mdss_bus_bandwidth_ctrl(0);
+ mdss_iommu_ctrl(0);
data->num_planes = 0;
-
return 0;
}
@@ -874,14 +907,18 @@
if (ctl->power_on) {
if (mdp5_data->mdata->ulps) {
- mdss_mdp_footswitch_ctrl_ulps(1, &mfd->pdev->dev);
+ rc = mdss_mdp_footswitch_ctrl_ulps(1, &mfd->pdev->dev);
+ if (rc) {
+ pr_err("footswtich control power on failed rc=%d\n",
+ rc);
+ goto end;
+ }
+
mdss_mdp_ctl_restore(ctl);
}
if (!mdp5_data->mdata->batfet)
mdss_mdp_batfet_ctrl(mdp5_data->mdata, true);
- if (!mfd->panel_info->cont_splash_enabled)
- mdss_iommu_attach(mdp5_data->mdata);
mdss_mdp_release_splash_pipe(mfd);
return 0;
}
@@ -891,7 +928,7 @@
rc = pm_runtime_get_sync(&mfd->pdev->dev);
if (IS_ERR_VALUE(rc)) {
pr_err("unable to resume with pm_runtime_get_sync rc=%d\n", rc);
- return rc;
+ goto end;
}
/*
@@ -903,9 +940,15 @@
* we would have called in to TZ to restore security configs from LK.
*/
if (!is_mdss_iommu_attached()) {
- if (!mfd->panel_info->cont_splash_enabled)
- mdss_iommu_attach(mdss_res);
- mdss_hw_init(mdss_res);
+ if (!mfd->panel_info->cont_splash_enabled) {
+ rc = mdss_iommu_ctrl(1);
+ if (IS_ERR_VALUE(rc)) {
+ pr_err("iommu attach failed rc=%d\n", rc);
+ goto pm_error;
+ }
+ mdss_hw_init(mdss_res);
+ mdss_iommu_ctrl(0);
+ }
}
rc = mdss_mdp_ctl_start(ctl, false);
@@ -916,17 +959,19 @@
&mfd->mdp_sync_pt_data.notifier);
} else {
pr_err("mdp ctl start failed.\n");
- goto error;
+ goto ctl_error;
}
rc = mdss_mdp_splash_cleanup(mfd, true);
-error:
- if (rc) {
- mdss_mdp_ctl_destroy(ctl);
- mdp5_data->ctl = NULL;
- pm_runtime_put(&mfd->pdev->dev);
- }
+ if (!rc)
+ goto end;
+ctl_error:
+ mdss_mdp_ctl_destroy(ctl);
+ mdp5_data->ctl = NULL;
+pm_error:
+ pm_runtime_put(&mfd->pdev->dev);
+end:
return rc;
}
@@ -1033,7 +1078,9 @@
mdss_mdp_display_commit(ctl, NULL);
mdss_mdp_display_wait4comp(ctl);
+ ATRACE_BEGIN("sspp_programming");
__overlay_queue_pipes(mfd);
+ ATRACE_END("sspp_programming");
mdss_mdp_display_commit(ctl, NULL);
mdss_mdp_display_wait4comp(ctl);
@@ -1049,6 +1096,7 @@
int sd_in_pipe = 0;
bool need_cleanup = false;
+ ATRACE_BEGIN(__func__);
if (ctl->shared_lock) {
mdss_mdp_ctl_notify(ctl, MDP_NOTIFY_FRAME_BEGIN);
mdss_mdp_ctl_notify(ctl, MDP_NOTIFY_FRAME_READY);
@@ -1095,12 +1143,19 @@
need_cleanup = true;
}
+ ATRACE_BEGIN("sspp_programming");
ret = __overlay_queue_pipes(mfd);
+ ATRACE_END("sspp_programming");
- if (mfd->panel.type == WRITEBACK_PANEL)
+ if (mfd->panel.type == WRITEBACK_PANEL) {
+ ATRACE_BEGIN("wb_kickoff");
ret = mdss_mdp_wb_kickoff(mfd);
- else
+ ATRACE_END("wb_kickoff");
+ } else {
+ ATRACE_BEGIN("display_commit");
ret = mdss_mdp_display_commit(mdp5_data->ctl, NULL);
+ ATRACE_END("display_commit");
+ }
if (!need_cleanup) {
atomic_set(&mfd->kickoff_pending, 0);
@@ -1114,7 +1169,9 @@
mutex_unlock(&mdp5_data->ov_lock);
mdss_mdp_overlay_update_pm(mdp5_data);
+ ATRACE_BEGIN("display_wait4comp");
ret = mdss_mdp_display_wait4comp(mdp5_data->ctl);
+ ATRACE_END("display_wait4comp");
mutex_lock(&mdp5_data->ov_lock);
if (ret == 0) {
@@ -1129,7 +1186,9 @@
mdss_fb_update_notify_update(mfd);
commit_fail:
+ ATRACE_BEGIN("overlay_cleanup");
mdss_mdp_overlay_cleanup(mfd);
+ ATRACE_END("overlay_cleanup");
mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false);
mdss_mdp_ctl_notify(ctl, MDP_NOTIFY_FRAME_FLUSHED);
if (need_cleanup) {
@@ -1139,7 +1198,7 @@
mutex_unlock(&mdp5_data->ov_lock);
if (ctl->shared_lock)
mutex_unlock(ctl->shared_lock);
-
+ ATRACE_END(__func__);
return ret;
}
@@ -1320,9 +1379,6 @@
flags = (pipe->flags & MDP_SECURE_OVERLAY_SESSION);
flags |= (pipe->flags & MDP_SECURE_DISPLAY_OVERLAY_SESSION);
- if (!mfd->panel_info->cont_splash_enabled)
- mdss_iommu_attach(mdata);
-
src_data = &pipe->back_buf;
if (src_data->num_planes) {
pr_warn("dropped buffer pnum=%d play=%d addr=0x%x\n",
@@ -1334,6 +1390,7 @@
if (IS_ERR_VALUE(ret)) {
pr_err("src_data pmem error\n");
}
+
mdss_mdp_pipe_unmap(pipe);
return ret;
@@ -1533,12 +1590,20 @@
if (mutex_lock_interruptible(&mdp5_data->ov_lock))
return;
- if (!mfd->panel_power_on) {
+ if ((!mfd->panel_power_on) && !((mfd->dcm_state == DCM_ENTER) &&
+ (mfd->panel.type == MIPI_CMD_PANEL))) {
mutex_unlock(&mdp5_data->ov_lock);
return;
}
mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON, false);
+
+ ret = mdss_iommu_ctrl(1);
+ if (IS_ERR_VALUE(ret)) {
+ pr_err("IOMMU attach failed\n");
+ goto pan_display_error;
+ }
+
bpp = fbi->var.bits_per_pixel / 8;
offset = fbi->var.xoffset * bpp +
fbi->var.yoffset * fbi->fix.line_length;
@@ -1605,10 +1670,12 @@
(fbi->var.activate & FB_ACTIVATE_FORCE))
mfd->mdp.kickoff_fnc(mfd, NULL);
+ mdss_iommu_ctrl(0);
mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false);
return;
pan_display_error:
+ mdss_iommu_ctrl(0);
mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false);
mutex_unlock(&mdp5_data->ov_lock);
}
diff --git a/drivers/video/msm/mdss/mdss_mdp_pp.c b/drivers/video/msm/mdss/mdss_mdp_pp.c
index 42e7ed2..7159a0a 100644
--- a/drivers/video/msm/mdss/mdss_mdp_pp.c
+++ b/drivers/video/msm/mdss/mdss_mdp_pp.c
@@ -1094,39 +1094,51 @@
}
}
- if (pipe->scale.enable_pxl_ext &&
- pipe->type == MDSS_MDP_PIPE_TYPE_VIG) {
+ if (pipe->scale.enable_pxl_ext) {
+ if (pipe->type == MDSS_MDP_PIPE_TYPE_VIG) {
+ /*program x,y initial phase and phase step*/
+ writel_relaxed(pipe->scale.init_phase_x[0],
+ pipe->base +
+ MDSS_MDP_REG_VIG_QSEED2_C03_INIT_PHASEX);
+ writel_relaxed(pipe->scale.phase_step_x[0],
+ pipe->base +
+ MDSS_MDP_REG_VIG_QSEED2_C03_PHASESTEPX);
+ writel_relaxed(pipe->scale.init_phase_x[1],
+ pipe->base +
+ MDSS_MDP_REG_VIG_QSEED2_C12_INIT_PHASEX);
+ writel_relaxed(pipe->scale.phase_step_x[1],
+ pipe->base +
+ MDSS_MDP_REG_VIG_QSEED2_C12_PHASESTEPX);
- /*program x,y initial phase and phase step*/
- writel_relaxed(pipe->scale.init_phase_x[0],
- pipe->base + MDSS_MDP_REG_VIG_QSEED2_C03_INIT_PHASEX);
- writel_relaxed(pipe->scale.phase_step_x[0],
- pipe->base + MDSS_MDP_REG_VIG_QSEED2_C03_PHASESTEPX);
- writel_relaxed(pipe->scale.init_phase_x[1],
- pipe->base + MDSS_MDP_REG_VIG_QSEED2_C12_INIT_PHASEX);
- writel_relaxed(pipe->scale.phase_step_x[1],
- pipe->base + MDSS_MDP_REG_VIG_QSEED2_C12_PHASESTEPX);
+ writel_relaxed(pipe->scale.init_phase_y[0],
+ pipe->base +
+ MDSS_MDP_REG_VIG_QSEED2_C03_INIT_PHASEY);
+ writel_relaxed(pipe->scale.phase_step_y[0],
+ pipe->base +
+ MDSS_MDP_REG_VIG_QSEED2_C03_PHASESTEPY);
+ writel_relaxed(pipe->scale.init_phase_y[1],
+ pipe->base +
+ MDSS_MDP_REG_VIG_QSEED2_C12_INIT_PHASEY);
+ writel_relaxed(pipe->scale.phase_step_y[1],
+ pipe->base +
+ MDSS_MDP_REG_VIG_QSEED2_C12_PHASESTEPY);
+ } else {
- writel_relaxed(pipe->scale.init_phase_y[0],
- pipe->base + MDSS_MDP_REG_VIG_QSEED2_C03_INIT_PHASEY);
- writel_relaxed(pipe->scale.phase_step_y[0],
- pipe->base + MDSS_MDP_REG_VIG_QSEED2_C03_PHASESTEPY);
- writel_relaxed(pipe->scale.init_phase_y[1],
- pipe->base + MDSS_MDP_REG_VIG_QSEED2_C12_INIT_PHASEY);
- writel_relaxed(pipe->scale.phase_step_y[1],
- pipe->base + MDSS_MDP_REG_VIG_QSEED2_C12_PHASESTEPY);
-
+ writel_relaxed(pipe->scale.phase_step_x[0],
+ pipe->base +
+ MDSS_MDP_REG_SCALE_PHASE_STEP_X);
+ writel_relaxed(pipe->scale.phase_step_y[0],
+ pipe->base +
+ MDSS_MDP_REG_SCALE_PHASE_STEP_Y);
+ writel_relaxed(pipe->scale.init_phase_x[0],
+ pipe->base +
+ MDSS_MDP_REG_SCALE_INIT_PHASE_X);
+ writel_relaxed(pipe->scale.init_phase_y[0],
+ pipe->base +
+ MDSS_MDP_REG_SCALE_INIT_PHASE_Y);
+ }
/*program pixel extn values for the SSPP*/
mdss_mdp_pipe_program_pixel_extn(pipe);
- } else if (pipe->type == MDSS_MDP_PIPE_TYPE_VIG) {
- writel_relaxed(phasex_step, pipe->base +
- MDSS_MDP_REG_SCALE_PHASE_STEP_X);
- writel_relaxed(phasey_step, pipe->base +
- MDSS_MDP_REG_SCALE_PHASE_STEP_Y);
- writel_relaxed(init_phasex, pipe->base +
- MDSS_MDP_REG_SCALE_INIT_PHASE_X);
- writel_relaxed(init_phasey, pipe->base +
- MDSS_MDP_REG_SCALE_INIT_PHASE_Y);
} else {
writel_relaxed(phasex_step, pipe->base +
MDSS_MDP_REG_SCALE_PHASE_STEP_X);
diff --git a/drivers/video/msm/mdss/mdss_mdp_rotator.c b/drivers/video/msm/mdss/mdss_mdp_rotator.c
index ee892e2..ff8c6b8 100755
--- a/drivers/video/msm/mdss/mdss_mdp_rotator.c
+++ b/drivers/video/msm/mdss/mdss_mdp_rotator.c
@@ -23,6 +23,7 @@
#include "mdss_mdp.h"
#include "mdss_mdp_rotator.h"
#include "mdss_fb.h"
+#include "mdss_debug.h"
#define MAX_ROTATOR_SESSIONS 8
@@ -281,8 +282,9 @@
pr_err("unable to queue rot data\n");
goto error;
}
-
+ ATRACE_BEGIN("rotator_kickoff");
ret = mdss_mdp_rotator_kickoff(rot_ctl, rot, dst_data);
+ ATRACE_END("rotator_kickoff");
return ret;
error:
@@ -690,7 +692,6 @@
struct msmfb_overlay_data *req)
{
struct mdss_mdp_rotator_session *rot;
- struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
int ret;
u32 flgs;
@@ -710,9 +711,6 @@
goto dst_buf_fail;
}
- if (!mfd->panel_info->cont_splash_enabled)
- mdss_iommu_attach(mdp5_data->mdata);
-
mdss_mdp_overlay_free_buf(&rot->src_buf);
ret = mdss_mdp_overlay_get_buf(mfd, &rot->src_buf, &req->data, 1, flgs);
if (ret) {
diff --git a/drivers/video/msm/mdss/mdss_mdp_splash_logo.c b/drivers/video/msm/mdss/mdss_mdp_splash_logo.c
index 838f58f..829806d 100644
--- a/drivers/video/msm/mdss/mdss_mdp_splash_logo.c
+++ b/drivers/video/msm/mdss/mdss_mdp_splash_logo.c
@@ -109,7 +109,7 @@
{
struct iommu_domain *domain;
struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
- int rc;
+ int rc, ret;
/*
* iommu dynamic attach for following conditions.
@@ -139,9 +139,9 @@
if (rc) {
pr_debug("iommu memory mapping failed rc=%d\n", rc);
} else {
- rc = mdss_iommu_attach(mdss_res);
- if (rc) {
- pr_debug("mdss iommu attach failed\n");
+ ret = mdss_iommu_ctrl(1);
+ if (IS_ERR_VALUE(ret)) {
+ pr_err("mdss iommu attach failed\n");
iommu_unmap(domain, mdp5_data->splash_mem_addr,
mdp5_data->splash_mem_size);
} else {
@@ -167,6 +167,8 @@
iommu_unmap(domain, mdp5_data->splash_mem_addr,
mdp5_data->splash_mem_size);
+ mdss_iommu_ctrl(0);
+
mfd->splash_info.iommu_dynamic_attached = false;
}
}
@@ -246,12 +248,6 @@
}
mdss_mdp_footswitch_ctrl_splash(0);
- if (!is_mdss_iommu_attached()) {
- rc = mdss_iommu_attach(mdss_res);
- if (rc)
- pr_err("iommu attach failed rc=%d\n", rc);
- }
-
end:
return rc;
}
diff --git a/drivers/video/msm/mdss/mdss_mdp_trace.h b/drivers/video/msm/mdss/mdss_mdp_trace.h
index 0e0c1e7..33fe3a4 100644
--- a/drivers/video/msm/mdss/mdss_mdp_trace.h
+++ b/drivers/video/msm/mdss/mdss_mdp_trace.h
@@ -215,6 +215,40 @@
__entry->kickoff_cnt)
);
+TRACE_EVENT(tracing_mark_write,
+ TP_PROTO(int pid, const char *name, bool trace_begin),
+ TP_ARGS(pid, name, trace_begin),
+ TP_STRUCT__entry(
+ __field(int, pid)
+ __string(trace_name, name)
+ __field(bool, trace_begin)
+ ),
+ TP_fast_assign(
+ __entry->pid = pid;
+ __assign_str(trace_name, name);
+ __entry->trace_begin = trace_begin;
+ ),
+ TP_printk("%s|%d|%s", __entry->trace_begin ? "B" : "E",
+ __entry->pid, __get_str(trace_name))
+);
+
+TRACE_EVENT(mdp_trace_counter,
+ TP_PROTO(int pid, char *name, int value),
+ TP_ARGS(pid, name, value),
+ TP_STRUCT__entry(
+ __field(int, pid)
+ __string(counter_name, name)
+ __field(int, value)
+ ),
+ TP_fast_assign(
+ __entry->pid = current->tgid;
+ __assign_str(counter_name, name);
+ __entry->value = value;
+ ),
+ TP_printk("%d|%s|%d", __entry->pid,
+ __get_str(counter_name), __entry->value)
+);
+
#endif /* if !defined(TRACE_MDSS_MDP_H) || defined(TRACE_HEADER_MULTI_READ) */
/* This part must be outside protection */
diff --git a/drivers/video/msm/mdss/mdss_mdp_wb.c b/drivers/video/msm/mdss/mdss_mdp_wb.c
index aa17472..6086e28 100644
--- a/drivers/video/msm/mdss/mdss_mdp_wb.c
+++ b/drivers/video/msm/mdss/mdss_mdp_wb.c
@@ -427,11 +427,20 @@
buf = &node->buf_data.p[0];
if (wb->is_secure)
buf->flags |= MDP_SECURE_OVERLAY_SESSION;
+
+ ret = mdss_iommu_ctrl(1);
+ if (IS_ERR_VALUE(ret)) {
+ pr_err("IOMMU attach failed\n");
+ goto register_fail;
+ }
ret = mdss_mdp_get_img(data, buf);
if (IS_ERR_VALUE(ret)) {
pr_err("error getting buffer info\n");
+ mdss_iommu_ctrl(0);
goto register_fail;
}
+ mdss_iommu_ctrl(0);
+
memcpy(&node->buf_info, data, sizeof(*data));
ret = mdss_mdp_wb_register_node(wb, node);
@@ -471,7 +480,6 @@
{
struct mdss_mdp_wb *wb = mfd_to_wb(mfd);
struct mdss_mdp_wb_data *node = NULL;
- struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
int ret = 0;
if (!wb) {
@@ -481,9 +489,6 @@
pr_debug("fb%d queue\n", wb->fb_ndx);
- if (!mfd->panel_info->cont_splash_enabled)
- mdss_iommu_attach(mdp5_data->mdata);
-
mutex_lock(&wb->lock);
if (local)
node = get_local_node(wb, data);
@@ -736,7 +741,13 @@
}
break;
case MSMFB_WRITEBACK_TERMINATE:
+ ret = mdss_iommu_ctrl(1);
+ if (IS_ERR_VALUE(ret)) {
+ pr_err("IOMMU attach failed\n");
+ return ret;
+ }
ret = mdss_mdp_wb_terminate(mfd);
+ mdss_iommu_ctrl(0);
break;
case MSMFB_WRITEBACK_SET_MIRRORING_HINT:
if (!copy_from_user(&hint, arg, sizeof(hint))) {
@@ -849,19 +860,25 @@
EXPORT_SYMBOL(msm_fb_writeback_set_secure);
/**
- * msm_fb_writeback_iommu_ref() - Power ON/OFF mdp clock
- * @enable - true/false to Power ON/OFF mdp clock
+ * msm_fb_writeback_iommu_ref() - Add/Remove vote on MDSS IOMMU being attached.
+ * @enable - true adds vote on MDSS IOMMU, false removes the vote.
*
- * Call to enable mdp clock at start of mdp_mmap/mdp_munmap API and
- * to disable mdp clock at end of these API's to ensure iommu is in
- * proper state while driver map/un-map any buffers.
+ * Call to vote on MDSS IOMMU being enabled. To ensure buffers are properly
+ * mapped to IOMMU context bank.
*/
int msm_fb_writeback_iommu_ref(struct fb_info *info, int enable)
{
- if (enable)
- mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON, false);
- else
- mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false);
+ int ret;
+
+ if (enable) {
+ ret = mdss_iommu_ctrl(1);
+ if (IS_ERR_VALUE(ret)) {
+ pr_err("IOMMU attach failed\n");
+ return ret;
+ }
+ } else {
+ mdss_iommu_ctrl(0);
+ }
return 0;
}
diff --git a/fs/namei.c b/fs/namei.c
index c427919..a87e323 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -2072,6 +2072,13 @@
if (error)
return error;
error = dir->i_op->create(dir, dentry, mode, nd);
+ if (error)
+ return error;
+
+ error = security_inode_post_create(dir, dentry, mode);
+ if (error)
+ return error;
+
if (!error)
fsnotify_create(dir, dentry);
return error;
@@ -2547,6 +2554,13 @@
return error;
error = dir->i_op->mknod(dir, dentry, mode, dev);
+ if (error)
+ return error;
+
+ error = security_inode_post_create(dir, dentry, mode);
+ if (error)
+ return error;
+
if (!error)
fsnotify_create(dir, dentry);
return error;
diff --git a/fs/open.c b/fs/open.c
index 5720854..56c8810 100644
--- a/fs/open.c
+++ b/fs/open.c
@@ -1054,6 +1054,7 @@
dnotify_flush(filp, id);
locks_remove_posix(filp, id);
}
+ security_file_close(filp);
fput(filp);
return retval;
}
diff --git a/include/linux/Kbuild b/include/linux/Kbuild
index 6d18d0f..c5adf38 100755
--- a/include/linux/Kbuild
+++ b/include/linux/Kbuild
@@ -454,4 +454,5 @@
header-y += msm_audio_amrwbplus.h
header-y += avtimer.h
header-y += msm_ipa.h
+header-y += msm_pft.h
header-y += msm_thermal_ioctl.h
diff --git a/include/linux/diagchar.h b/include/linux/diagchar.h
index f78d418..1e15415 100644
--- a/include/linux/diagchar.h
+++ b/include/linux/diagchar.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2008-2013, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2008-2014, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -13,14 +13,18 @@
#ifndef DIAGCHAR_SHARED
#define DIAGCHAR_SHARED
-#define MSG_MASKS_TYPE 1
-#define LOG_MASKS_TYPE 2
-#define EVENT_MASKS_TYPE 4
-#define PKT_TYPE 8
-#define DEINIT_TYPE 16
-#define USER_SPACE_DATA_TYPE 32
-#define DCI_DATA_TYPE 64
-#define CALLBACK_DATA_TYPE 128
+#define MSG_MASKS_TYPE 0x00000001
+#define LOG_MASKS_TYPE 0x00000002
+#define EVENT_MASKS_TYPE 0x00000004
+#define PKT_TYPE 0x00000008
+#define DEINIT_TYPE 0x00000010
+#define USER_SPACE_DATA_TYPE 0x00000020
+#define DCI_DATA_TYPE 0x00000040
+#define CALLBACK_DATA_TYPE 0x00000080
+#define DCI_LOG_MASKS_TYPE 0x00000100
+#define DCI_EVENT_MASKS_TYPE 0x00000200
+#define DCI_PKT_TYPE 0x00000400
+
#define USB_MODE 1
#define MEMORY_DEVICE_MODE 2
#define NO_LOGGING_MODE 3
@@ -32,6 +36,8 @@
#define DATA_TYPE_F3 1
#define DATA_TYPE_LOG 2
#define DATA_TYPE_RESPONSE 3
+#define DATA_TYPE_DCI_LOG 0x00000100
+#define DATA_TYPE_DCI_EVENT 0x00000200
/* Different IOCTL values */
#define DIAG_IOCTL_COMMAND_REG 0
diff --git a/include/linux/msm_mdp.h b/include/linux/msm_mdp.h
index 00eba66..fd2cc6c 100644
--- a/include/linux/msm_mdp.h
+++ b/include/linux/msm_mdp.h
@@ -555,6 +555,23 @@
};
/**
+ * enum mdp_overlay_pipe_type - Different pipe type set by userspace
+ *
+ * @PIPE_TYPE_AUTO: Not specified, pipe will be selected according to flags.
+ * @PIPE_TYPE_VIG: VIG pipe.
+ * @PIPE_TYPE_RGB: RGB pipe.
+ * @PIPE_TYPE_DMA: DMA pipe.
+ * @PIPE_TYPE_MAX: Used to track maximum number of pipe type.
+ */
+enum mdp_overlay_pipe_type {
+ PIPE_TYPE_AUTO = 0,
+ PIPE_TYPE_VIG,
+ PIPE_TYPE_RGB,
+ PIPE_TYPE_DMA,
+ PIPE_TYPE_MAX,
+};
+
+/**
* struct mdp_overlay - overlay surface structure
* @src: Source image information (width, height, format).
* @src_rect: Source crop rectangle, portion of image that will be fetched.
@@ -576,6 +593,7 @@
* The color should be in same format as the source image format.
* @flags: This is used to customize operation of overlay. See MDP flags
* for more information.
+ * @pipe_type: Used to specify the type of overlay pipe.
* @user_data: DEPRECATED* Used to store user application specific information.
* @bg_color: Solid color used to fill the overlay surface when no source
* buffer is provided.
@@ -608,6 +626,7 @@
uint32_t blend_op;
uint32_t transp_mask;
uint32_t flags;
+ uint32_t pipe_type;
uint32_t id;
uint32_t user_data[6];
uint32_t bg_color;
diff --git a/include/linux/msm_pft.h b/include/linux/msm_pft.h
new file mode 100644
index 0000000..4daf46b
--- /dev/null
+++ b/include/linux/msm_pft.h
@@ -0,0 +1,134 @@
+/* Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef MSM_PFT_H_
+#define MSM_PFT_H_
+
+#include <linux/types.h>
+
+/**
+ * enum pft_command_opcode - PFT driver command ID
+ *
+ * @PFT_CMD_OPCODE_SET_STATE -
+ * command ID to set PFT driver state
+ * @PFT_CMD_OPCODE_UPDATE_REG_APP_UID -
+ * command ID to update the list of registered application
+ * UID
+ * @PFT_CMD_OPCODE_PERFORM_IN_PLACE_FILE_ENC -
+ * command ID to perfrom in-place file encryption
+ */
+enum pft_command_opcode {
+ PFT_CMD_OPCODE_SET_STATE,
+ PFT_CMD_OPCODE_UPDATE_REG_APP_UID,
+ PFT_CMD_OPCODE_PERFORM_IN_PLACE_FILE_ENC,
+ /* */
+ PFT_CMD_OPCODE_MAX_COMMAND_INDEX
+};
+
+/**
+ * enum pft_state - PFT driver operational states
+ *
+ * @PFT_STATE_DEACTIVATED - driver is deativated.
+ * @PFT_STATE_DEACTIVATING - driver is in the process of being deativated.
+ * @PFT_STATE_KEY_REMOVED - driver is active but no encryption key is loaded.
+ * @PFT_STATE_REMOVING_KEY - driver is active, but the encryption key is being
+ * removed.
+ * @PFT_STATE_KEY_LOADED - driver is active, and the encryption key is loaded
+ * to encryption block, hence registered apps can perform file operations
+ * on encrypted files.
+ */
+enum pft_state {
+ PFT_STATE_DEACTIVATED,
+ PFT_STATE_DEACTIVATING,
+ PFT_STATE_KEY_REMOVED,
+ PFT_STATE_REMOVING_KEY,
+ PFT_STATE_KEY_LOADED,
+ /* Internal */
+ PFT_STATE_MAX_INDEX
+};
+
+/**
+ * enum pft_command_response_code - PFT response on the previous
+ * command
+ *
+ * @PFT_CMD_RESP_SUCCESS - The command was properly processed
+ * without an error.
+ * @PFT_CMD_RESP_GENERAL_ERROR -
+ * Indicates an error that cannot be better described by a
+ * more specific errors below.
+ * @PFT_CMD_RESP_INVALID_COMMAND - Invalid or unsupported
+ * command id.
+ * @PFT_CMD_RESP_INVALID_CMD_PARAMS - Invalid command
+ * parameters.
+ * @PFT_CMD_RESP_INVALID_STATE - Invalid state
+ * @PFT_CMD_RESP_ALREADY_IN_STATE - Used to indicates that
+ * the new state is equal to the existing one.
+ * @PFT_CMD_RESP_INPLACE_FILE_IS_OPEN - Used to indicates
+ * that the file that should be encrypted is already open
+ * and can be encrypted.
+ * @PFT_CMD_RESP_ENT_FILES_CLOSING_FAILURE
+ * Indicates about failure of the PFT to close Enterprise files
+ * @PFT_CMD_RESP_MAX_INDEX
+ */
+enum pft_command_response_code {
+ PFT_CMD_RESP_SUCCESS,
+ PFT_CMD_RESP_GENERAL_ERROR,
+ PFT_CMD_RESP_INVALID_COMMAND,
+ PFT_CMD_RESP_INVALID_CMD_PARAMS,
+ PFT_CMD_RESP_INVALID_STATE,
+ PFT_CMD_RESP_ALREADY_IN_STATE,
+ PFT_CMD_RESP_INPLACE_FILE_IS_OPEN,
+ PFT_CMD_RESP_ENT_FILES_CLOSING_FAILURE,
+ /* Internal */
+ PFT_CMD_RESP_MAX_INDEX
+};
+
+/**
+ * struct pft_command_response - response structure
+ *
+ * @command_id - see enum pft_command_response_code
+ * @error_codee - see enum pft_command_response_code
+ */
+struct pft_command_response {
+ __u32 command_id;
+ __u32 error_code;
+};
+
+/**
+ * struct pft_command - pft command
+ *
+ * @opcode - see enum pft_command_opcode.
+ * @set_state.state - see enum pft_state.
+ * @update_app_list.count - number of items in the
+ * registered applications list.
+ * @update_app_list.table - registered applications array
+ * @preform_in_place_file_enc.file_descriptor - file descriptor
+ * of the opened file to be in-placed encrypted.
+ */
+struct pft_command {
+ __u32 opcode;
+ union {
+ struct {
+ /* @see pft_state */
+ __u32 state;
+ } set_state;
+ struct {
+ __u32 items_count; /* number of items */
+ __u32 table[0]; /* array of UIDs */
+ } update_app_list;
+ struct {
+ __u32 file_descriptor;
+ } preform_in_place_file_enc;
+ };
+};
+
+#endif /* MSM_PFT_H_ */
diff --git a/include/linux/pft.h b/include/linux/pft.h
new file mode 100644
index 0000000..36e9612
--- /dev/null
+++ b/include/linux/pft.h
@@ -0,0 +1,92 @@
+/* Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef PFT_H_
+#define PFT_H_
+
+#include <linux/types.h>
+#include <linux/fs.h>
+
+#ifdef CONFIG_PFT
+
+/* dm-req-crypt API */
+int pft_get_key_index(struct inode *inode, u32 *key_index,
+ bool *is_encrypted, bool *is_inplace);
+
+/* block layer API */
+bool pft_allow_merge_bio(struct bio *bio1, struct bio *bio2);
+
+/* --- security hooks , called from selinux --- */
+int pft_inode_create(struct inode *dir, struct dentry *dentry, umode_t mode);
+
+int pft_inode_post_create(struct inode *dir, struct dentry *dentry,
+ umode_t mode);
+
+int pft_file_open(struct file *filp, const struct cred *cred);
+
+int pft_file_permission(struct file *file, int mask);
+
+int pft_file_close(struct file *filp);
+
+int pft_inode_unlink(struct inode *dir, struct dentry *dentry);
+
+int pft_inode_mknod(struct inode *dir, struct dentry *dentry, umode_t mode,
+ dev_t dev);
+
+int pft_inode_rename(struct inode *inode, struct dentry *dentry,
+ struct inode *new_inode, struct dentry *new_dentry);
+
+int pft_inode_set_xattr(struct dentry *dentry, const char *name);
+
+
+#else
+static inline int pft_get_key_index(struct inode *inode, u32 *key_index,
+ bool *is_encrypted, bool *is_inplace)
+{ return -ENODEV; }
+
+static inline bool pft_allow_merge_bio(struct bio *bio1, struct bio *bio2)
+{ return true; }
+
+static inline int pft_file_permission(struct file *file, int mask)
+{ return 0; }
+
+static inline int pft_inode_create(
+ struct inode *dir, struct dentry *dentry, umode_t mode)
+{ return 0; }
+
+static inline int pft_inode_post_create(
+ struct inode *dir, struct dentry *dentry, umode_t mode)
+{ return 0; }
+
+static inline int pft_file_open(struct file *filp, const struct cred *cred)
+{ return 0; }
+
+static inline int pft_file_close(struct file *filp)
+{ return 0; }
+
+static inline int pft_inode_unlink(struct inode *dir, struct dentry *dentry)
+{ return 0; }
+
+static inline int pft_inode_mknod(struct inode *dir, struct dentry *dentry,
+ umode_t mode, dev_t dev)
+{ return 0; }
+
+static inline int pft_inode_rename(struct inode *inode, struct dentry *dentry,
+ struct inode *new_inode, struct dentry *new_dentry)
+{ return 0; }
+
+static inline int pft_inode_set_xattr(struct dentry *dentry, const char *name)
+{ return 0; }
+
+#endif /* CONFIG_PFT */
+
+#endif /* PFT_H */
diff --git a/include/linux/security.h b/include/linux/security.h
index b62f396..0fe0a70 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -26,6 +26,7 @@
#include <linux/capability.h>
#include <linux/slab.h>
#include <linux/err.h>
+#include <linux/bio.h>
struct linux_binprm;
struct cred;
@@ -1453,6 +1454,8 @@
void **value, size_t *len);
int (*inode_create) (struct inode *dir,
struct dentry *dentry, umode_t mode);
+ int (*inode_post_create) (struct inode *dir,
+ struct dentry *dentry, umode_t mode);
int (*inode_link) (struct dentry *old_dentry,
struct inode *dir, struct dentry *new_dentry);
int (*inode_unlink) (struct inode *dir, struct dentry *dentry);
@@ -1503,6 +1506,8 @@
struct fown_struct *fown, int sig);
int (*file_receive) (struct file *file);
int (*dentry_open) (struct file *file, const struct cred *cred);
+ int (*file_close) (struct file *file);
+ bool (*allow_merge_bio)(struct bio *bio1, struct bio *bio2);
int (*task_create) (unsigned long clone_flags);
void (*task_free) (struct task_struct *task);
@@ -1722,6 +1727,9 @@
const struct qstr *qstr, char **name,
void **value, size_t *len);
int security_inode_create(struct inode *dir, struct dentry *dentry, umode_t mode);
+int security_inode_post_create(struct inode *dir, struct dentry *dentry,
+ umode_t mode);
+
int security_inode_link(struct dentry *old_dentry, struct inode *dir,
struct dentry *new_dentry);
int security_inode_unlink(struct inode *dir, struct dentry *dentry);
@@ -1766,6 +1774,9 @@
struct fown_struct *fown, int sig);
int security_file_receive(struct file *file);
int security_dentry_open(struct file *file, const struct cred *cred);
+int security_file_close(struct file *file);
+bool security_allow_merge_bio(struct bio *bio1, struct bio *bio2);
+
int security_task_create(unsigned long clone_flags);
void security_task_free(struct task_struct *task);
int security_cred_alloc_blank(struct cred *cred, gfp_t gfp);
@@ -2060,6 +2071,13 @@
return 0;
}
+static inline int security_inode_post_create(struct inode *dir,
+ struct dentry *dentry,
+ umode_t mode)
+{
+ return 0;
+}
+
static inline int security_inode_link(struct dentry *old_dentry,
struct inode *dir,
struct dentry *new_dentry)
@@ -2262,6 +2280,16 @@
return 0;
}
+static inline int security_file_close(struct file *file)
+{
+ return 0;
+}
+
+static inline bool security_allow_merge_bio(struct bio *bio1, struct bio *bio2)
+{
+ return true; /* The default is to allow it for performance */
+}
+
static inline int security_task_create(unsigned long clone_flags)
{
return 0;
diff --git a/include/linux/spmi.h b/include/linux/spmi.h
index e8e932e..b581de8 100644
--- a/include/linux/spmi.h
+++ b/include/linux/spmi.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2014 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -382,6 +382,7 @@
* -EPERM if the SPMI transaction is denied due to permission issues.
* -EIO if the SPMI transaction fails (parity errors, etc).
* -ETIMEDOUT if the SPMI transaction times out.
+ * -EAGAIN if the SPMI transaction is temporarily unavailable
*/
extern int spmi_command_reset(struct spmi_controller *ctrl, u8 sid);
@@ -397,6 +398,7 @@
* -EPERM if the SPMI transaction is denied due to permission issues.
* -EIO if the SPMI transaction fails (parity errors, etc).
* -ETIMEDOUT if the SPMI transaction times out.
+ * -EAGAIN if the SPMI transaction is temporarily unavailable
*/
extern int spmi_command_sleep(struct spmi_controller *ctrl, u8 sid);
@@ -413,6 +415,7 @@
* -EPERM if the SPMI transaction is denied due to permission issues.
* -EIO if the SPMI transaction fails (parity errors, etc).
* -ETIMEDOUT if the SPMI transaction times out.
+ * -EAGAIN if the SPMI transaction is temporarily unavailable
*/
extern int spmi_command_wakeup(struct spmi_controller *ctrl, u8 sid);
@@ -428,6 +431,7 @@
* -EPERM if the SPMI transaction is denied due to permission issues.
* -EIO if the SPMI transaction fails (parity errors, etc).
* -ETIMEDOUT if the SPMI transaction times out.
+ * -EAGAIN if the SPMI transaction is temporarily unavailable
*/
extern int spmi_command_shutdown(struct spmi_controller *ctrl, u8 sid);
diff --git a/include/media/msm_cam_sensor.h b/include/media/msm_cam_sensor.h
index b1b54cb..d583601 100644
--- a/include/media/msm_cam_sensor.h
+++ b/include/media/msm_cam_sensor.h
@@ -486,6 +486,8 @@
CFG_SET_DEFAULT_FOCUS,
CFG_MOVE_FOCUS,
CFG_SET_POSITION,
+ CFG_ACTUATOR_POWERDOWN,
+ CFG_ACTUATOR_POWERUP,
};
enum actuator_type {
diff --git a/include/media/msmb_isp.h b/include/media/msmb_isp.h
index 30e7d06..e627977 100644
--- a/include/media/msmb_isp.h
+++ b/include/media/msmb_isp.h
@@ -154,6 +154,7 @@
uint8_t buf_divert; /* if TRUE no vb2 buf done. */
/*Return values*/
uint32_t axi_stream_handle;
+ uint32_t burst_len;
};
struct msm_vfe_axi_stream_release_cmd {
@@ -225,6 +226,7 @@
uint8_t num_streams;
uint32_t stream_handle[MSM_ISP_STATS_MAX];
uint8_t enable;
+ uint32_t stats_burst_len;
};
enum msm_vfe_reg_cfg_type {
@@ -242,6 +244,7 @@
GET_MAX_CLK_RATE,
VFE_HW_UPDATE_LOCK,
VFE_HW_UPDATE_UNLOCK,
+ SET_WM_UB_SIZE,
};
struct msm_vfe_cfg_cmd2 {
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index 30b88d7..cada6a7 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -368,7 +368,7 @@
static inline void free_leaf(struct leaf *l)
{
- call_rcu_bh(&l->rcu, __leaf_free_rcu);
+ call_rcu(&l->rcu, __leaf_free_rcu);
}
static inline void free_leaf_info(struct leaf_info *leaf)
diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
index a8d7ed0..87f05b2 100644
--- a/net/ipv4/ping.c
+++ b/net/ipv4/ping.c
@@ -879,10 +879,12 @@
/* Copy the address and add cmsg data. */
if (family == AF_INET) {
sin = (struct sockaddr_in *) msg->msg_name;
- sin->sin_family = AF_INET;
- sin->sin_port = 0 /* skb->h.uh->source */;
- sin->sin_addr.s_addr = ip_hdr(skb)->saddr;
- memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
+ if (sin) {
+ sin->sin_family = AF_INET;
+ sin->sin_port = 0 /* skb->h.uh->source */;
+ sin->sin_addr.s_addr = ip_hdr(skb)->saddr;
+ memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
+ }
if (isk->cmsg_flags)
ip_cmsg_recv(msg, skb);
@@ -892,17 +894,19 @@
struct ipv6_pinfo *np = inet6_sk(sk);
struct ipv6hdr *ip6 = ipv6_hdr(skb);
sin6 = (struct sockaddr_in6 *) msg->msg_name;
- sin6->sin6_family = AF_INET6;
- sin6->sin6_port = 0;
- sin6->sin6_addr = ip6->saddr;
+ if (sin6) {
+ sin6->sin6_family = AF_INET6;
+ sin6->sin6_port = 0;
+ sin6->sin6_addr = ip6->saddr;
- sin6->sin6_flowinfo = 0;
- if (np->sndflow)
- sin6->sin6_flowinfo =
- *(__be32 *)ip6 & IPV6_FLOWINFO_MASK;
+ sin6->sin6_flowinfo = 0;
+ if (np->sndflow)
+ sin6->sin6_flowinfo =
+ *(__be32 *)ip6 & IPV6_FLOWINFO_MASK;
- sin6->sin6_scope_id = ipv6_iface_scope_id(&sin6->sin6_addr,
- IP6CB(skb)->iif);
+ sin6->sin6_scope_id = ipv6_iface_scope_id(&sin6->sin6_addr,
+ IP6CB(skb)->iif);
+ }
if (inet6_sk(sk)->rxopt.all)
pingv6_ops.datagram_recv_ctl(sk, msg, skb);
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index cf82dbd..4ff640a 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -1963,8 +1963,8 @@
hdr = nl80211hdr_put(msg, info->snd_pid, info->snd_seq, 0,
NL80211_CMD_NEW_KEY);
- if (IS_ERR(hdr))
- return PTR_ERR(hdr);
+ if (!hdr)
+ goto nla_put_failure;
cookie.msg = msg;
cookie.idx = key_idx;
@@ -4245,6 +4245,10 @@
err = -EINVAL;
goto out_free;
}
+
+ if (!wiphy->bands[band])
+ continue;
+
err = ieee80211_get_ratemask(wiphy->bands[band],
nla_data(attr),
nla_len(attr),
@@ -5409,6 +5413,9 @@
NL80211_CMD_TESTMODE);
struct nlattr *tmdata;
+ if (!hdr)
+ break;
+
if (nla_put_u32(skb, NL80211_ATTR_WIPHY, phy_idx) < 0) {
genlmsg_cancel(skb, hdr);
break;
@@ -5817,9 +5824,8 @@
hdr = nl80211hdr_put(msg, info->snd_pid, info->snd_seq, 0,
NL80211_CMD_REMAIN_ON_CHANNEL);
-
- if (IS_ERR(hdr)) {
- err = PTR_ERR(hdr);
+ if (!hdr) {
+ err = -ENOBUFS;
goto free_msg;
}
@@ -6100,9 +6106,8 @@
hdr = nl80211hdr_put(msg, info->snd_pid, info->snd_seq, 0,
NL80211_CMD_FRAME);
-
- if (IS_ERR(hdr)) {
- err = PTR_ERR(hdr);
+ if (!hdr) {
+ err = -ENOBUFS;
goto free_msg;
}
}
@@ -6662,9 +6667,8 @@
hdr = nl80211hdr_put(msg, info->snd_pid, info->snd_seq, 0,
NL80211_CMD_PROBE_CLIENT);
-
- if (IS_ERR(hdr)) {
- err = PTR_ERR(hdr);
+ if (!hdr) {
+ err = -ENOBUFS;
goto free_msg;
}
diff --git a/net/wireless/sme.c b/net/wireless/sme.c
index 87547ca..94c06df 100644
--- a/net/wireless/sme.c
+++ b/net/wireless/sme.c
@@ -65,11 +65,45 @@
return is_all_idle;
}
+
+static bool cfg80211_is_all_countryie_ignore(void)
+{
+ struct cfg80211_registered_device *rdev;
+ struct wireless_dev *wdev;
+ bool is_all_countryie_ignore = true;
+
+ mutex_lock(&cfg80211_mutex);
+
+ list_for_each_entry(rdev, &cfg80211_rdev_list, list) {
+ cfg80211_lock_rdev(rdev);
+ list_for_each_entry(wdev, &rdev->netdev_list, list) {
+ wdev_lock(wdev);
+ if (!(wdev->wiphy->country_ie_pref &
+ NL80211_COUNTRY_IE_IGNORE_CORE)) {
+ is_all_countryie_ignore = false;
+ wdev_unlock(wdev);
+ cfg80211_unlock_rdev(rdev);
+ goto out;
+ }
+ wdev_unlock(wdev);
+ }
+ cfg80211_unlock_rdev(rdev);
+ }
+out:
+ mutex_unlock(&cfg80211_mutex);
+
+ return is_all_countryie_ignore;
+}
+
+
static void disconnect_work(struct work_struct *work)
{
if (!cfg80211_is_all_idle())
return;
+ if (cfg80211_is_all_countryie_ignore())
+ return;
+
regulatory_hint_disconnect();
}
diff --git a/security/security.c b/security/security.c
index cecd55e..cc355c0 100644
--- a/security/security.c
+++ b/security/security.c
@@ -471,6 +471,16 @@
}
EXPORT_SYMBOL_GPL(security_inode_create);
+int security_inode_post_create(struct inode *dir, struct dentry *dentry,
+ umode_t mode)
+{
+ if (unlikely(IS_PRIVATE(dir)))
+ return 0;
+ if (security_ops->inode_post_create == NULL)
+ return 0;
+ return security_ops->inode_post_create(dir, dentry, mode);
+}
+
int security_inode_link(struct dentry *old_dentry, struct inode *dir,
struct dentry *new_dentry)
{
@@ -732,6 +742,22 @@
return fsnotify_perm(file, MAY_OPEN);
}
+int security_file_close(struct file *file)
+{
+ if (security_ops->file_close)
+ return security_ops->file_close(file);
+
+ return 0;
+}
+
+bool security_allow_merge_bio(struct bio *bio1, struct bio *bio2)
+{
+ if (security_ops->allow_merge_bio)
+ return security_ops->allow_merge_bio(bio1, bio2);
+
+ return true;
+}
+
int security_task_create(unsigned long clone_flags)
{
return security_ops->task_create(clone_flags);
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index c868a74..50b003a 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -82,6 +82,7 @@
#include <linux/export.h>
#include <linux/msg.h>
#include <linux/shm.h>
+#include <linux/pft.h>
#include "avc.h"
#include "objsec.h"
@@ -1617,9 +1618,15 @@
if (rc)
return rc;
- return avc_has_perm(newsid, sbsec->sid,
- SECCLASS_FILESYSTEM,
- FILESYSTEM__ASSOCIATE, &ad);
+ rc = avc_has_perm(newsid, sbsec->sid,
+ SECCLASS_FILESYSTEM,
+ FILESYSTEM__ASSOCIATE, &ad);
+ if (rc)
+ return rc;
+
+ rc = pft_inode_mknod(dir, dentry, 0, 0);
+
+ return rc;
}
/* Check whether a task can create a key. */
@@ -1678,6 +1685,12 @@
}
rc = avc_has_perm(sid, isec->sid, isec->sclass, av, &ad);
+ if (rc)
+ return rc;
+
+ if (kind == MAY_UNLINK)
+ rc = pft_inode_unlink(dir, dentry);
+
return rc;
}
@@ -2684,9 +2697,25 @@
static int selinux_inode_create(struct inode *dir, struct dentry *dentry, umode_t mode)
{
+ int ret;
+
+ ret = pft_inode_create(dir, dentry, mode);
+ if (ret < 0)
+ return ret;
+
return may_create(dir, dentry, SECCLASS_FILE);
}
+static int selinux_inode_post_create(struct inode *dir, struct dentry *dentry,
+ umode_t mode)
+{
+ int ret;
+
+ ret = pft_inode_post_create(dir, dentry, mode);
+
+ return ret;
+}
+
static int selinux_inode_link(struct dentry *old_dentry, struct inode *dir, struct dentry *new_dentry)
{
return may_link(dir, old_dentry, MAY_LINK);
@@ -2720,6 +2749,12 @@
static int selinux_inode_rename(struct inode *old_inode, struct dentry *old_dentry,
struct inode *new_inode, struct dentry *new_dentry)
{
+ int rc;
+
+ rc = pft_inode_rename(old_inode, old_dentry, new_inode, new_dentry);
+ if (rc)
+ return rc;
+
return may_rename(old_inode, old_dentry, new_inode, new_dentry);
}
@@ -2800,6 +2835,10 @@
{
const struct cred *cred = current_cred();
+ if (pft_inode_set_xattr(dentry, name) < 0)
+ return -EACCES;
+
+
if (!strncmp(name, XATTR_SECURITY_PREFIX,
sizeof XATTR_SECURITY_PREFIX - 1)) {
if (!strcmp(name, XATTR_NAME_CAPS)) {
@@ -3023,11 +3062,16 @@
struct file_security_struct *fsec = file->f_security;
struct inode_security_struct *isec = inode->i_security;
u32 sid = current_sid();
+ int ret;
if (!mask)
/* No permission to check. Existence test. */
return 0;
+ ret = pft_file_permission(file, mask);
+ if (ret < 0)
+ return ret;
+
if (sid == fsec->sid && fsec->isid == isec->sid &&
fsec->pseqno == avc_policy_seqno())
/* No change since dentry_open check. */
@@ -3294,6 +3338,11 @@
struct file_security_struct *fsec;
struct inode *inode;
struct inode_security_struct *isec;
+ int ret;
+
+ ret = pft_file_open(file, cred);
+ if (ret < 0)
+ return ret;
inode = file->f_path.dentry->d_inode;
fsec = file->f_security;
@@ -3318,6 +3367,16 @@
return inode_has_perm_noadp(cred, inode, open_file_to_av(file), 0);
}
+static int selinux_file_close(struct file *file)
+{
+ return pft_file_close(file);
+}
+
+static bool selinux_allow_merge_bio(struct bio *bio1, struct bio *bio2)
+{
+ return pft_allow_merge_bio(bio1, bio2);
+}
+
/* task security operations */
static int selinux_task_create(unsigned long clone_flags)
@@ -5629,6 +5688,7 @@
.inode_free_security = selinux_inode_free_security,
.inode_init_security = selinux_inode_init_security,
.inode_create = selinux_inode_create,
+ .inode_post_create = selinux_inode_post_create,
.inode_link = selinux_inode_link,
.inode_unlink = selinux_inode_unlink,
.inode_symlink = selinux_inode_symlink,
@@ -5664,6 +5724,8 @@
.file_receive = selinux_file_receive,
.dentry_open = selinux_dentry_open,
+ .file_close = selinux_file_close,
+ .allow_merge_bio = selinux_allow_merge_bio,
.task_create = selinux_task_create,
.cred_alloc_blank = selinux_cred_alloc_blank,
diff --git a/security/selinux/include/objsec.h b/security/selinux/include/objsec.h
index 26c7eee..d160760 100644
--- a/security/selinux/include/objsec.h
+++ b/security/selinux/include/objsec.h
@@ -43,6 +43,7 @@
u32 sid; /* SID of this object */
u16 sclass; /* security class of this object */
unsigned char initialized; /* initialization flag */
+ u32 tag; /* Per-File-Encryption tag */
struct mutex lock;
};
diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c
index 185f849..72b20b1 100644
--- a/security/selinux/ss/services.c
+++ b/security/selinux/ss/services.c
@@ -1229,6 +1229,10 @@
struct context context;
int rc = 0;
+ /* An empty security context is never valid. */
+ if (!scontext_len)
+ return -EINVAL;
+
if (!ss_initialized) {
int i;
diff --git a/sound/soc/codecs/wcd9306.c b/sound/soc/codecs/wcd9306.c
index 38d7901..314c4f9 100644
--- a/sound/soc/codecs/wcd9306.c
+++ b/sound/soc/codecs/wcd9306.c
@@ -4139,6 +4139,12 @@
core = dev_get_drvdata(codec->dev->parent);
+ if(core == NULL) {
+ dev_err(codec->dev, "%s: core is null\n",
+ __func__);
+ return -EINVAL;
+ }
+
dev_dbg(codec->dev, "%s: event called! codec name %s\n",
__func__, w->codec->name);
dev_dbg(codec->dev, "%s: num_dai %d stream name %s event %d\n",
diff --git a/sound/soc/codecs/wcd9320.c b/sound/soc/codecs/wcd9320.c
index 4c5d327..5602dd1 100644
--- a/sound/soc/codecs/wcd9320.c
+++ b/sound/soc/codecs/wcd9320.c
@@ -2555,9 +2555,10 @@
WCD9XXX_CLSH_STATE_LO,
WCD9XXX_CLSH_REQ_ENABLE,
WCD9XXX_CLSH_EVENT_POST_PA);
- pr_debug("%s: sleeping 3 ms after %s PA turn on\n",
+ pr_debug("%s: sleeping 5 ms after %s PA turn on\n",
__func__, w->name);
- usleep_range(3000, 3000);
+ /* Wait for CnP time after PA enable */
+ usleep_range(5000, 5100);
break;
case SND_SOC_DAPM_POST_PMD:
wcd9xxx_clsh_fsm(codec, &taiko->clsh_d,
@@ -2565,6 +2566,10 @@
WCD9XXX_CLSH_REQ_DISABLE,
WCD9XXX_CLSH_EVENT_POST_PA);
snd_soc_update_bits(codec, lineout_gain_reg, 0x40, 0x00);
+ pr_debug("%s: sleeping 5 ms after %s PA turn off\n",
+ __func__, w->name);
+ /* Wait for CnP time after PA disable */
+ usleep_range(5000, 5100);
break;
}
return 0;
diff --git a/sound/soc/msm/qdsp6v2/msm-compress-q6-v2.c b/sound/soc/msm/qdsp6v2/msm-compress-q6-v2.c
index fce1940..f6702c5 100644
--- a/sound/soc/msm/qdsp6v2/msm-compress-q6-v2.c
+++ b/sound/soc/msm/qdsp6v2/msm-compress-q6-v2.c
@@ -129,6 +129,8 @@
uint32_t stream_available;
uint32_t next_stream;
+ uint64_t marker_timestamp;
+
struct msm_compr_gapless_state gapless_state;
atomic_t start;
@@ -1064,6 +1066,7 @@
prtd->app_pointer = 0;
prtd->bytes_received = 0;
prtd->bytes_sent = 0;
+ prtd->marker_timestamp = 0;
atomic_set(&prtd->xrun, 0);
spin_unlock_irqrestore(&prtd->lock, flags);
@@ -1196,6 +1199,8 @@
prtd->first_buffer = 1;
prtd->last_buffer = 0;
prtd->gapless_state.gapless_transition = 1;
+ prtd->marker_timestamp = 0;
+
/*
Don't reset these as these vars map to
total_bytes_transferred and total_bytes_available
@@ -1251,23 +1256,23 @@
q6asm_stream_cmd_nowait(ac, CMD_PAUSE, ac->stream_id);
prtd->cmd_ack = 0;
spin_unlock_irqrestore(&prtd->lock, flags);
- pr_debug("%s:issue CMD_FLUSH ac->stream_id %d",
- __func__, ac->stream_id);
- q6asm_stream_cmd(ac, CMD_FLUSH, ac->stream_id);
- wait_event_timeout(prtd->flush_wait,
- prtd->cmd_ack, 1 * HZ / 4);
+ /*
+ * Cache this time as last known time
+ */
+ q6asm_get_session_time(prtd->audio_client,
+ &prtd->marker_timestamp);
spin_lock_irqsave(&prtd->lock, flags);
/*
- Don't reset these as these vars map to
- total_bytes_transferred and total_bytes_available
- directly, only total_bytes_transferred will be updated
- in the next avail() ioctl
- prtd->copied_total = 0;
- prtd->bytes_received = 0;
- do not reset prtd->bytes_sent as well as the same
- session is used for gapless playback
- */
+ * Don't reset these as these vars map to
+ * total_bytes_transferred and total_bytes_available.
+ * Just total_bytes_transferred will be updated
+ * in the next avail() ioctl.
+ * prtd->copied_total = 0;
+ * prtd->bytes_received = 0;
+ * do not reset prtd->bytes_sent as well as the same
+ * session is used for gapless playback
+ */
prtd->byte_offset = 0;
prtd->app_pointer = 0;
@@ -1275,8 +1280,15 @@
prtd->last_buffer = 0;
atomic_set(&prtd->drain, 0);
atomic_set(&prtd->xrun, 1);
- q6asm_run_nowait(prtd->audio_client, 0, 0, 0);
spin_unlock_irqrestore(&prtd->lock, flags);
+
+ pr_debug("%s:issue CMD_FLUSH ac->stream_id %d",
+ __func__, ac->stream_id);
+ q6asm_stream_cmd(ac, CMD_FLUSH, ac->stream_id);
+ wait_event_timeout(prtd->flush_wait,
+ prtd->cmd_ack, 1 * HZ / 4);
+
+ q6asm_run_nowait(prtd->audio_client, 0, 0, 0);
}
prtd->cmd_interrupt = 0;
break;
@@ -1404,6 +1416,8 @@
__func__, timestamp);
return -EAGAIN;
}
+ } else {
+ timestamp = prtd->marker_timestamp;
}
/* DSP returns timestamp in usec */
diff --git a/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c b/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c
index 4893990..1553d1c 100644
--- a/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c
+++ b/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c
@@ -3971,50 +3971,36 @@
{"AUDIO_REF_EC_UL1 MUX", "SEC_MI2S_TX" , "SEC_MI2S_TX"},
{"AUDIO_REF_EC_UL1 MUX", "TERT_MI2S_TX" , "TERT_MI2S_TX"},
{"AUDIO_REF_EC_UL1 MUX", "QUAT_MI2S_TX" , "QUAT_MI2S_TX"},
- {"AUDIO_REF_EC_UL1 MUX", "I2S_RX" , "PRI_I2S_TX"},
- {"AUDIO_REF_EC_UL1 MUX", "SLIM_RX" , "SLIMBUS_0_TX"},
{"AUDIO_REF_EC_UL2 MUX", "PRI_MI2S_TX" , "PRI_MI2S_TX"},
{"AUDIO_REF_EC_UL2 MUX", "SEC_MI2S_TX" , "SEC_MI2S_TX"},
{"AUDIO_REF_EC_UL2 MUX", "TERT_MI2S_TX" , "TERT_MI2S_TX"},
{"AUDIO_REF_EC_UL2 MUX", "QUAT_MI2S_TX" , "QUAT_MI2S_TX"},
- {"AUDIO_REF_EC_UL2 MUX", "I2S_RX" , "PRI_I2S_TX"},
- {"AUDIO_REF_EC_UL2 MUX", "SLIM_RX" , "SLIMBUS_0_TX"},
{"AUDIO_REF_EC_UL4 MUX", "PRI_MI2S_TX" , "PRI_MI2S_TX"},
{"AUDIO_REF_EC_UL4 MUX", "SEC_MI2S_TX" , "SEC_MI2S_TX"},
{"AUDIO_REF_EC_UL4 MUX", "TERT_MI2S_TX" , "TERT_MI2S_TX"},
{"AUDIO_REF_EC_UL4 MUX", "QUAT_MI2S_TX" , "QUAT_MI2S_TX"},
- {"AUDIO_REF_EC_UL4 MUX", "I2S_RX" , "PRI_I2S_TX"},
- {"AUDIO_REF_EC_UL4 MUX", "SLIM_RX" , "SLIMBUS_0_TX"},
{"AUDIO_REF_EC_UL5 MUX", "PRI_MI2S_TX" , "PRI_MI2S_TX"},
{"AUDIO_REF_EC_UL5 MUX", "SEC_MI2S_TX" , "SEC_MI2S_TX"},
{"AUDIO_REF_EC_UL5 MUX", "TERT_MI2S_TX" , "TERT_MI2S_TX"},
{"AUDIO_REF_EC_UL5 MUX", "QUAT_MI2S_TX" , "QUAT_MI2S_TX"},
- {"AUDIO_REF_EC_UL5 MUX", "I2S_RX" , "PRI_I2S_TX"},
- {"AUDIO_REF_EC_UL5 MUX", "SLIM_RX" , "SLIMBUS_0_TX"},
{"AUDIO_REF_EC_UL6 MUX", "PRI_MI2S_TX" , "PRI_MI2S_TX"},
{"AUDIO_REF_EC_UL6 MUX", "SEC_MI2S_TX" , "SEC_MI2S_TX"},
{"AUDIO_REF_EC_UL6 MUX", "TERT_MI2S_TX" , "TERT_MI2S_TX"},
{"AUDIO_REF_EC_UL6 MUX", "QUAT_MI2S_TX" , "QUAT_MI2S_TX"},
- {"AUDIO_REF_EC_UL6 MUX", "I2S_RX" , "PRI_I2S_TX"},
- {"AUDIO_REF_EC_UL6 MUX", "SLIM_RX" , "SLIMBUS_0_TX"},
{"AUDIO_REF_EC_UL8 MUX", "PRI_MI2S_TX" , "PRI_MI2S_TX"},
{"AUDIO_REF_EC_UL8 MUX", "SEC_MI2S_TX" , "SEC_MI2S_TX"},
{"AUDIO_REF_EC_UL8 MUX", "TERT_MI2S_TX" , "TERT_MI2S_TX"},
{"AUDIO_REF_EC_UL8 MUX", "QUAT_MI2S_TX" , "QUAT_MI2S_TX"},
- {"AUDIO_REF_EC_UL8 MUX", "I2S_RX" , "PRI_I2S_TX"},
- {"AUDIO_REF_EC_UL8 MUX", "SLIM_RX" , "SLIMBUS_0_TX"},
{"AUDIO_REF_EC_UL9 MUX", "PRI_MI2S_TX" , "PRI_MI2S_TX"},
{"AUDIO_REF_EC_UL9 MUX", "SEC_MI2S_TX" , "SEC_MI2S_TX"},
{"AUDIO_REF_EC_UL9 MUX", "TERT_MI2S_TX" , "TERT_MI2S_TX"},
{"AUDIO_REF_EC_UL9 MUX", "QUAT_MI2S_TX" , "QUAT_MI2S_TX"},
- {"AUDIO_REF_EC_UL9 MUX", "I2S_RX" , "PRI_I2S_TX"},
- {"AUDIO_REF_EC_UL9 MUX", "SLIM_RX" , "SLIMBUS_0_TX"},
{"MM_UL1", NULL, "AUDIO_REF_EC_UL1 MUX"},
{"MM_UL2", NULL, "AUDIO_REF_EC_UL2 MUX"},
diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c
index 7c6f0ea..a5d42d5 100644
--- a/sound/soc/soc-pcm.c
+++ b/sound/soc/soc-pcm.c
@@ -1703,25 +1703,22 @@
}
ret = dpcm_be_dai_prepare(fe, substream->stream);
- if (ret < 0)
+ if (ret < 0) {
+ dev_err(fe->dev, "ASoC: prepare FE %s failed\n",
+ fe->dai_link->name);
goto out;
+ }
/* call prepare on the frontend */
if (!fe->fe_compr) {
ret = soc_pcm_prepare(substream);
if (ret < 0) {
- dev_err(fe->dev,"ASoC: prepare FE %s failed\n",
+ dev_err(fe->dev, "ASoC: prepare FE %s failed\n",
fe->dai_link->name);
goto out;
}
}
- ret = soc_pcm_prepare(substream);
- if (ret < 0) {
- dev_err(fe->dev,"dpcm: prepare FE %s failed\n", fe->dai_link->name);
- goto out;
- }
-
/* run the stream event for each BE */
if (stream == SNDRV_PCM_STREAM_PLAYBACK)
dpcm_dapm_stream_event(fe, stream,