Merge "msm: lpm_levels: Adjust latency for Multi core cases"
diff --git a/arch/arm/boot/dts/msm8226-v2-qrd-dvt.dts b/arch/arm/boot/dts/msm8226-v2-qrd-dvt.dts
index 4cdbada..0a3148b 100644
--- a/arch/arm/boot/dts/msm8226-v2-qrd-dvt.dts
+++ b/arch/arm/boot/dts/msm8226-v2-qrd-dvt.dts
@@ -13,7 +13,6 @@
/dts-v1/;
/include/ "msm8226-v2.dtsi"
/include/ "msm8226-qrd.dtsi"
-/include/ "msm8226-camera-sensor-cdp.dtsi"
/include/ "dsi-panel-hx8394a-720p-video.dtsi"
/ {
diff --git a/arch/arm/mach-msm/clock-debug.c b/arch/arm/mach-msm/clock-debug.c
index fc32a59..35917c3 100644
--- a/arch/arm/mach-msm/clock-debug.c
+++ b/arch/arm/mach-msm/clock-debug.c
@@ -232,6 +232,83 @@
.release = seq_release,
};
+#define clock_debug_output(m, c, fmt, ...) \
+do { \
+ if (m) \
+ seq_printf(m, fmt, ##__VA_ARGS__); \
+ else if (c) \
+ pr_cont(fmt, ##__VA_ARGS__); \
+ else \
+ pr_info(fmt, ##__VA_ARGS__); \
+} while (0)
+
+static int clock_debug_print_clock(struct clk *c, struct seq_file *m)
+{
+ char *start = "";
+
+ if (!c || !c->prepare_count)
+ return 0;
+
+ clock_debug_output(m, 0, "\t");
+ do {
+ if (c->vdd_class)
+ clock_debug_output(m, 1, "%s%s:%u:%u [%ld, %lu]", start,
+ c->dbg_name, c->prepare_count, c->count,
+ c->rate, c->vdd_class->cur_level);
+ else
+ clock_debug_output(m, 1, "%s%s:%u:%u [%ld]", start,
+ c->dbg_name, c->prepare_count, c->count,
+ c->rate);
+ start = " -> ";
+ } while ((c = clk_get_parent(c)));
+
+ clock_debug_output(m, 1, "\n");
+
+ return 1;
+}
+
+/**
+ * clock_debug_print_enabled_clocks() - Print names of enabled clocks
+ *
+ */
+static void clock_debug_print_enabled_clocks(struct seq_file *m)
+{
+ struct clk_table *table;
+ unsigned long flags;
+ int i, cnt = 0;
+
+ clock_debug_output(m, 0, "Enabled clocks:\n");
+ spin_lock_irqsave(&clk_list_lock, flags);
+ list_for_each_entry(table, &clk_list, node) {
+ for (i = 0; i < table->num_clocks; i++)
+ cnt += clock_debug_print_clock(table->clocks[i].clk, m);
+ }
+ spin_unlock_irqrestore(&clk_list_lock, flags);
+
+ if (cnt)
+ clock_debug_output(m, 0, "Enabled clock count: %d\n", cnt);
+ else
+ clock_debug_output(m, 0, "No clocks enabled.\n");
+}
+
+static int enabled_clocks_show(struct seq_file *m, void *unused)
+{
+ clock_debug_print_enabled_clocks(m);
+ return 0;
+}
+
+static int enabled_clocks_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, enabled_clocks_show, inode->i_private);
+}
+
+static const struct file_operations enabled_clocks_fops = {
+ .open = enabled_clocks_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
static int list_rates_show(struct seq_file *m, void *unused)
{
struct clk *clock = m->private;
@@ -415,6 +492,10 @@
return -ENOMEM;
}
+ if (!debugfs_create_file("enabled_clocks", S_IRUGO, debugfs_base, NULL,
+ &enabled_clocks_fops))
+ return -ENOMEM;
+
measure = clk_get_sys("debug", "measure");
if (IS_ERR(measure))
measure = NULL;
@@ -461,55 +542,13 @@
return ret;
}
-static int clock_debug_print_clock(struct clk *c)
-{
- char *start = "";
-
- if (!c || !c->prepare_count)
- return 0;
-
- pr_info("\t");
- do {
- if (c->vdd_class)
- pr_cont("%s%s:%u:%u [%ld, %lu]", start, c->dbg_name,
- c->prepare_count, c->count, c->rate,
- c->vdd_class->cur_level);
- else
- pr_cont("%s%s:%u:%u [%ld]", start, c->dbg_name,
- c->prepare_count, c->count, c->rate);
- start = " -> ";
- } while ((c = clk_get_parent(c)));
-
- pr_cont("\n");
-
- return 1;
-}
-
-/**
- * clock_debug_print_enabled() - Print names of enabled clocks for suspend debug
- *
+/*
* Print the names of enabled clocks and their parents if debug_suspend is set
*/
void clock_debug_print_enabled(void)
{
- struct clk_table *table;
- unsigned long flags;
- int i, cnt = 0;
-
if (likely(!debug_suspend))
return;
- pr_info("Enabled clocks:\n");
- spin_lock_irqsave(&clk_list_lock, flags);
- list_for_each_entry(table, &clk_list, node) {
- for (i = 0; i < table->num_clocks; i++)
- cnt += clock_debug_print_clock(table->clocks[i].clk);
- }
- spin_unlock_irqrestore(&clk_list_lock, flags);
-
- if (cnt)
- pr_info("Enabled clock count: %d\n", cnt);
- else
- pr_info("No clocks enabled.\n");
-
+ clock_debug_print_enabled_clocks(NULL);
}
diff --git a/arch/arm/mach-msm/clock-mdss-8974.c b/arch/arm/mach-msm/clock-mdss-8974.c
index aeb4e48..47332a4 100644
--- a/arch/arm/mach-msm/clock-mdss-8974.c
+++ b/arch/arm/mach-msm/clock-mdss-8974.c
@@ -1135,7 +1135,7 @@
return rc;
}
-static int vco_enable(struct clk *c)
+static int dsi_pll_enable(struct clk *c)
{
int i, rc = 0;
struct dsi_pll_vco_clk *vco = to_vco_clk(c);
@@ -1163,7 +1163,7 @@
return rc;
}
-static void vco_disable(struct clk *c)
+static void dsi_pll_disable(struct clk *c)
{
int rc = 0;
@@ -1384,19 +1384,32 @@
static int vco_prepare(struct clk *c)
{
- return vco_set_rate(c, vco_cached_rate);
+ int rc = 0;
+
+ if (vco_cached_rate != 0) {
+ rc = vco_set_rate(c, vco_cached_rate);
+ if (rc) {
+ pr_err("%s: vco_set_rate failed. rc=%d\n",
+ __func__, rc);
+ goto error;
+ }
+ }
+
+ rc = dsi_pll_enable(c);
+
+error:
+ return rc;
}
static void vco_unprepare(struct clk *c)
{
vco_cached_rate = c->rate;
+ dsi_pll_disable(c);
}
/* Op structures */
static struct clk_ops clk_ops_dsi_vco = {
- .enable = vco_enable,
- .disable = vco_disable,
.set_rate = vco_set_rate,
.round_rate = vco_round_rate,
.handoff = vco_handoff,
diff --git a/drivers/char/diag/diag_dci.c b/drivers/char/diag/diag_dci.c
index 0c67ed8..a779b24 100644
--- a/drivers/char/diag/diag_dci.c
+++ b/drivers/char/diag/diag_dci.c
@@ -54,7 +54,7 @@
struct diag_dci_data_info *dci_data_smd;
struct mutex dci_stat_mutex;
-void diag_dci_smd_record_info(int read_bytes)
+void diag_dci_smd_record_info(int read_bytes, uint8_t ch_type)
{
static int curr_dci_data_smd;
static unsigned long iteration;
@@ -67,13 +67,14 @@
temp_data += curr_dci_data_smd;
temp_data->iteration = iteration + 1;
temp_data->data_size = read_bytes;
+ temp_data->ch_type = ch_type;
diag_get_timestamp(temp_data->time_stamp);
curr_dci_data_smd++;
iteration++;
mutex_unlock(&dci_stat_mutex);
}
#else
-void diag_dci_smd_record_info(int read_bytes) { }
+void diag_dci_smd_record_info(int read_bytes, uint8_t ch_type) { }
#endif
/* Process the data read from the smd dci channel */
@@ -83,7 +84,7 @@
int read_bytes, dci_pkt_len, i;
uint8_t recv_pkt_cmd_code;
- diag_dci_smd_record_info(recd_bytes);
+ diag_dci_smd_record_info(recd_bytes, (uint8_t)smd_info->type);
/* Each SMD read can have multiple DCI packets */
read_bytes = 0;
while (read_bytes < recd_bytes) {
diff --git a/drivers/char/diag/diag_dci.h b/drivers/char/diag/diag_dci.h
index 520995b..e2c4158 100644
--- a/drivers/char/diag/diag_dci.h
+++ b/drivers/char/diag/diag_dci.h
@@ -101,6 +101,7 @@
unsigned long iteration;
int data_size;
char time_stamp[DIAG_TS_SIZE];
+ uint8_t ch_type;
};
extern struct diag_dci_data_info *dci_data_smd;
@@ -135,7 +136,7 @@
void create_dci_event_mask_tbl(unsigned char *tbl_buf);
int diag_dci_clear_event_mask(void);
int diag_dci_query_event_mask(uint16_t event_id);
-void diag_dci_smd_record_info(int read_bytes);
+void diag_dci_smd_record_info(int read_bytes, uint8_t ch_type);
uint8_t diag_dci_get_cumulative_real_time(void);
int diag_dci_set_real_time(int client_id, uint8_t real_time);
/* Functions related to DCI wakeup sources */
diff --git a/drivers/char/diag/diag_debugfs.c b/drivers/char/diag/diag_debugfs.c
index 3d1a6cd..a24fc54 100644
--- a/drivers/char/diag/diag_debugfs.c
+++ b/drivers/char/diag/diag_debugfs.c
@@ -81,6 +81,11 @@
"LPASS STM requested state: %d\n"
"RIVA STM requested state: %d\n"
"APPS STM requested state: %d\n"
+ "supports apps hdlc encoding: %d\n"
+ "Modem hdlc encoding: %d\n"
+ "Lpass hdlc encoding: %d\n"
+ "RIVA hdlc encoding: %d\n"
+ "Modem CMD hdlc encoding: %d\n"
"logging_mode: %d\n"
"real_time_mode: %d\n",
(unsigned int)driver->smd_data[MODEM_DATA].ch,
@@ -123,6 +128,11 @@
driver->stm_state_requested[LPASS_DATA],
driver->stm_state_requested[WCNSS_DATA],
driver->stm_state_requested[APPS_DATA],
+ driver->supports_apps_hdlc_encoding,
+ driver->smd_data[MODEM_DATA].encode_hdlc,
+ driver->smd_data[LPASS_DATA].encode_hdlc,
+ driver->smd_data[WCNSS_DATA].encode_hdlc,
+ driver->smd_cmd[MODEM_DATA].encode_hdlc,
driver->logging_mode,
driver->real_time_mode);
@@ -202,11 +212,13 @@
if (temp_data->iteration != 0) {
bytes_written = scnprintf(
buf + bytes_in_buf, bytes_remaining,
- "i %-20ld\t"
- "s %-20d\t"
- "t %-20s\n",
+ "i %-10ld\t"
+ "s %-10d\t"
+ "c %-10d\t"
+ "t %-15s\n",
temp_data->iteration,
temp_data->data_size,
+ temp_data->ch_type,
temp_data->time_stamp);
bytes_in_buf += bytes_written;
bytes_remaining -= bytes_written;
diff --git a/drivers/char/diag/diag_masks.c b/drivers/char/diag/diag_masks.c
index aa1d847..c91095e 100644
--- a/drivers/char/diag/diag_masks.c
+++ b/drivers/char/diag/diag_masks.c
@@ -491,6 +491,8 @@
feature_bytes[0] |= F_DIAG_LOG_ON_DEMAND_RSP_ON_MASTER;
feature_bytes[0] |= driver->supports_separate_cmdrsp ?
F_DIAG_REQ_RSP_CHANNEL : 0;
+ feature_bytes[0] |= driver->supports_apps_hdlc_encoding ?
+ F_DIAG_HDLC_ENCODE_IN_APPS_MASK : 0;
feature_bytes[1] |= F_DIAG_OVER_STM;
memcpy(buf+header_size, &feature_bytes, FEATURE_MASK_LEN_BYTES);
total_len = header_size + FEATURE_MASK_LEN_BYTES;
diff --git a/drivers/char/diag/diagchar.h b/drivers/char/diag/diagchar.h
index 7154942..cf21b82 100644
--- a/drivers/char/diag/diagchar.h
+++ b/drivers/char/diag/diagchar.h
@@ -217,6 +217,7 @@
int peripheral; /* The peripheral this smd channel communicates with */
int type; /* The type of smd channel (data, control, dci) */
uint16_t peripheral_mask;
+ int encode_hdlc; /* Whether data is raw and needs to be hdlc encoded */
smd_channel_t *ch;
smd_channel_t *ch_save;
@@ -229,11 +230,16 @@
unsigned char *buf_in_1;
unsigned char *buf_in_2;
+ unsigned char *buf_in_1_raw;
+ unsigned char *buf_in_2_raw;
+
struct diag_request *write_ptr_1;
struct diag_request *write_ptr_2;
struct diag_nrt_wake_lock nrt_lock;
+ struct workqueue_struct *wq;
+
struct work_struct diag_read_smd_work;
struct work_struct diag_notify_update_smd_work;
int notify_context;
@@ -270,6 +276,7 @@
unsigned int buf_tbl_size;
int use_device_tree;
int supports_separate_cmdrsp;
+ int supports_apps_hdlc_encoding;
/* The state requested in the STM command */
int stm_state_requested[NUM_STM_PROCESSORS];
/* The current STM state */
@@ -384,7 +391,6 @@
struct diag_request *write_ptr_mdm;
#endif
#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
- spinlock_t hsic_ready_spinlock;
/* common for all bridges */
struct work_struct diag_connect_work;
struct work_struct diag_disconnect_work;
diff --git a/drivers/char/diag/diagchar_core.c b/drivers/char/diag/diagchar_core.c
index 6e70062..099dc09 100644
--- a/drivers/char/diag/diagchar_core.c
+++ b/drivers/char/diag/diagchar_core.c
@@ -543,58 +543,10 @@
return exit_stat;
}
-static void diag_update_data_ready(int index)
-{
- int clear_bit = 1;
- unsigned long hsic_lock_flags;
- unsigned long ready_lock_flags;
- int i;
-
- /*
- * Determine whether the data_ready USER_SPACE_DATA_TYPE bit
- * should be updated/cleared or not. There is a race condition that
- * can occur when in MEMORY_DEVICE_MODE with the hsic data.
- * When new hsic data arrives we prepare the data so it can
- * later be copied to userspace. We set the USER_SPACE_DATA_TYPE
- * bit in data ready at that time. We later copy the hsic data
- * to userspace and clear the USER_SPACE_DATA_TYPE bit in
- * data ready. The race condition occurs if new data arrives (bit set)
- * while we are processing the current data and sending
- * it to userspace (bit clear). The clearing of the bit can
- * overwrite the setting of the bit.
- */
-
- spin_lock_irqsave(&driver->hsic_ready_spinlock, ready_lock_flags);
- for (i = 0; i < MAX_HSIC_CH; i++) {
- if (diag_hsic[i].hsic_inited) {
- spin_lock_irqsave(&diag_hsic[i].hsic_spinlock,
- hsic_lock_flags);
- if ((diag_hsic[i].num_hsic_buf_tbl_entries > 0) &&
- diag_hsic[i].hsic_device_enabled &&
- diag_hsic[i].hsic_ch) {
- /* New data do not clear the bit */
- clear_bit = 0;
- }
- spin_unlock_irqrestore(&diag_hsic[i].hsic_spinlock,
- hsic_lock_flags);
- if (!clear_bit)
- break;
- }
- }
-
- if (clear_bit)
- driver->data_ready[index] ^= USER_SPACE_DATA_TYPE;
-
- spin_unlock_irqrestore(&driver->hsic_ready_spinlock, ready_lock_flags);
-}
#else
inline uint16_t diag_get_remote_device_mask(void) { return 0; }
inline int diag_copy_remote(char __user *buf, size_t count, int *pret,
int *pnum_data) { return 0; }
-static void diag_update_data_ready(int index)
-{
- driver->data_ready[index] ^= USER_SPACE_DATA_TYPE;
-}
#endif
int diag_command_reg(unsigned long ioarg)
@@ -1216,8 +1168,8 @@
return -EINVAL;
}
- wait_event_interruptible(driver->wait_q,
- driver->data_ready[index]);
+ wait_event_interruptible(driver->wait_q, driver->data_ready[index]);
+
mutex_lock(&driver->diagchar_mutex);
clear_read_wakelock = 0;
@@ -1227,6 +1179,7 @@
pr_debug("diag: process woken up\n");
/*Copy the type of data being passed*/
data_type = driver->data_ready[index] & USER_SPACE_DATA_TYPE;
+ driver->data_ready[index] ^= USER_SPACE_DATA_TYPE;
COPY_USER_SPACE_OR_EXIT(buf, data_type, 4);
/* place holder for number of data field */
ret += 4;
@@ -1362,10 +1315,9 @@
/* copy number of data fields */
COPY_USER_SPACE_OR_EXIT(buf+4, num_data, 4);
ret -= 4;
- diag_update_data_ready(index);
for (i = 0; i < NUM_SMD_DATA_CHANNELS; i++) {
if (driver->smd_data[i].ch)
- queue_work(driver->diag_wq,
+ queue_work(driver->smd_data[i].wq,
&(driver->smd_data[i].diag_read_smd_work));
}
#ifdef CONFIG_DIAG_SDIO_PIPE
@@ -2176,7 +2128,6 @@
diag_masks_init();
diagfwd_init();
#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
- spin_lock_init(&driver->hsic_ready_spinlock);
diagfwd_bridge_init(HSIC);
diagfwd_bridge_init(HSIC_2);
/* register HSIC device */
diff --git a/drivers/char/diag/diagfwd.c b/drivers/char/diag/diagfwd.c
index 379dc4d..609d6cd 100644
--- a/drivers/char/diag/diagfwd.c
+++ b/drivers/char/diag/diagfwd.c
@@ -61,13 +61,13 @@
/* Number of entries in table of buffers */
static unsigned int buf_tbl_size = 10;
struct diag_master_table entry;
-struct diag_send_desc_type send = { NULL, NULL, DIAG_STATE_START, 0 };
-struct diag_hdlc_dest_type enc = { NULL, NULL, 0 };
int wrap_enabled;
uint16_t wrap_count;
void encode_rsp_and_send(int buf_length)
{
+ struct diag_send_desc_type send = { NULL, NULL, DIAG_STATE_START, 0 };
+ struct diag_hdlc_dest_type enc = { NULL, NULL, 0 };
struct diag_smd_info *data = &(driver->smd_data[MODEM_DATA]);
if (buf_length > APPS_BUF_SIZE) {
@@ -244,6 +244,124 @@
}
}
}
+int diag_add_hdlc_encoding(struct diag_smd_info *smd_info, void *buf,
+ int total_recd, uint8_t *encode_buf,
+ int *encoded_length)
+{
+ struct diag_send_desc_type send = { NULL, NULL, DIAG_STATE_START, 0 };
+ struct diag_hdlc_dest_type enc = { NULL, NULL, 0 };
+ struct data_header {
+ uint8_t control_char;
+ uint8_t version;
+ uint16_t length;
+ };
+ struct data_header *header;
+ int header_size = sizeof(struct data_header);
+ uint8_t *end_control_char;
+ uint8_t *payload;
+ uint8_t *temp_buf;
+ uint8_t *temp_encode_buf;
+ int src_pkt_len;
+ int encoded_pkt_length;
+ int max_size;
+ int total_processed = 0;
+ int bytes_remaining;
+ int success = 1;
+
+ temp_buf = buf;
+ temp_encode_buf = encode_buf;
+ bytes_remaining = *encoded_length;
+ while (total_processed < total_recd) {
+ header = (struct data_header *)temp_buf;
+ /* Perform initial error checking */
+ if (header->control_char != CONTROL_CHAR ||
+ header->version != 1) {
+ success = 0;
+ break;
+ }
+ payload = temp_buf + header_size;
+ end_control_char = payload + header->length;
+ if (*end_control_char != CONTROL_CHAR) {
+ success = 0;
+ break;
+ }
+
+ max_size = 2 * header->length + 3;
+ if (bytes_remaining < max_size) {
+ pr_err("diag: In %s, Not enough room to encode remaining data for peripheral: %d, bytes available: %d, max_size: %d\n",
+ __func__, smd_info->peripheral,
+ bytes_remaining, max_size);
+ success = 0;
+ break;
+ }
+
+ /* Prepare for encoding the data */
+ send.state = DIAG_STATE_START;
+ send.pkt = payload;
+ send.last = (void *)(payload + header->length - 1);
+ send.terminate = 1;
+
+ enc.dest = temp_encode_buf;
+ enc.dest_last = (void *)(temp_encode_buf + max_size);
+ enc.crc = 0;
+ diag_hdlc_encode(&send, &enc);
+
+ /* Prepare for next packet */
+ src_pkt_len = (header_size + header->length + 1);
+ total_processed += src_pkt_len;
+ temp_buf += src_pkt_len;
+
+ encoded_pkt_length = (uint8_t *)enc.dest - temp_encode_buf;
+ bytes_remaining -= encoded_pkt_length;
+ temp_encode_buf = enc.dest;
+ }
+
+ *encoded_length = (int)(temp_encode_buf - encode_buf);
+
+ return success;
+}
+
+static int check_bufsize_for_encoding(struct diag_smd_info *smd_info, void *buf,
+ int total_recd)
+{
+ int buf_size = IN_BUF_SIZE;
+ int max_size = 2 * total_recd + 3;
+ unsigned char *temp_buf;
+
+ if (max_size > IN_BUF_SIZE) {
+ if (max_size < MAX_IN_BUF_SIZE) {
+ pr_err("diag: In %s, SMD sending packet of %d bytes that may expand to %d bytes, peripheral: %d\n",
+ __func__, total_recd, max_size,
+ smd_info->peripheral);
+ if (buf == smd_info->buf_in_1_raw) {
+ temp_buf = krealloc(smd_info->buf_in_1,
+ max_size, GFP_KERNEL);
+ if (temp_buf) {
+ smd_info->buf_in_1 = temp_buf;
+ buf_size = max_size;
+ } else {
+ buf_size = 0;
+ }
+ } else {
+ temp_buf = krealloc(smd_info->buf_in_2,
+ max_size, GFP_KERNEL);
+ if (temp_buf) {
+ smd_info->buf_in_2 = temp_buf;
+ buf_size = max_size;
+ } else {
+ buf_size = 0;
+ }
+ }
+ } else {
+ pr_err("diag: In %s, SMD sending packet of size %d. HDCL encoding can expand to more than %d bytes, peripheral: %d. Discarding.\n",
+ __func__, max_size, MAX_IN_BUF_SIZE,
+ smd_info->peripheral);
+ buf_size = 0;
+ }
+ }
+
+ return buf_size;
+}
void process_lock_enabling(struct diag_nrt_wake_lock *lock, int real_time)
{
@@ -334,7 +452,7 @@
/* Process the data read from the smd data channel */
int diag_process_smd_read_data(struct diag_smd_info *smd_info, void *buf,
- int total_recd)
+ int total_recd)
{
struct diag_request *write_ptr_modem = NULL;
int *in_busy_ptr = 0;
@@ -352,26 +470,74 @@
return 0;
}
- if (smd_info->buf_in_1 == buf) {
- write_ptr_modem = smd_info->write_ptr_1;
- in_busy_ptr = &smd_info->in_busy_1;
- } else if (smd_info->buf_in_2 == buf) {
- write_ptr_modem = smd_info->write_ptr_2;
- in_busy_ptr = &smd_info->in_busy_2;
- } else {
- pr_err("diag: In %s, no match for in_busy_1\n", __func__);
- }
+ /* If the data is already hdlc encoded */
+ if (!smd_info->encode_hdlc) {
+ if (smd_info->buf_in_1 == buf) {
+ write_ptr_modem = smd_info->write_ptr_1;
+ in_busy_ptr = &smd_info->in_busy_1;
+ } else if (smd_info->buf_in_2 == buf) {
+ write_ptr_modem = smd_info->write_ptr_2;
+ in_busy_ptr = &smd_info->in_busy_2;
+ } else {
+ pr_err("diag: In %s, no match for in_busy_1, peripheral: %d\n",
+ __func__, smd_info->peripheral);
+ }
- if (write_ptr_modem) {
- write_ptr_modem->length = total_recd;
- *in_busy_ptr = 1;
- err = diag_device_write(buf, smd_info->peripheral,
- write_ptr_modem);
- if (err) {
- /* Free up the buffer for future use */
- *in_busy_ptr = 0;
- pr_err_ratelimited("diag: In %s, diag_device_write error: %d\n",
- __func__, err);
+ if (write_ptr_modem) {
+ write_ptr_modem->length = total_recd;
+ *in_busy_ptr = 1;
+ err = diag_device_write(buf, smd_info->peripheral,
+ write_ptr_modem);
+ if (err) {
+ /* Free up the buffer for future use */
+ *in_busy_ptr = 0;
+ pr_err_ratelimited("diag: In %s, diag_device_write error: %d\n",
+ __func__, err);
+ }
+ }
+ } else {
+ /* The data is raw and needs to be hdlc encoded */
+ if (smd_info->buf_in_1_raw == buf) {
+ write_ptr_modem = smd_info->write_ptr_1;
+ in_busy_ptr = &smd_info->in_busy_1;
+ } else if (smd_info->buf_in_2_raw == buf) {
+ write_ptr_modem = smd_info->write_ptr_2;
+ in_busy_ptr = &smd_info->in_busy_2;
+ } else {
+ pr_err("diag: In %s, no match for in_busy_1, peripheral: %d\n",
+ __func__, smd_info->peripheral);
+ }
+
+ if (write_ptr_modem) {
+ int success = 0;
+ int write_length = 0;
+ unsigned char *write_buf = NULL;
+
+ write_length = check_bufsize_for_encoding(smd_info, buf,
+ total_recd);
+ if (write_length) {
+ write_buf = (buf == smd_info->buf_in_1_raw) ?
+ smd_info->buf_in_1 : smd_info->buf_in_2;
+ success = diag_add_hdlc_encoding(smd_info, buf,
+ total_recd, write_buf,
+ &write_length);
+ if (success) {
+ write_ptr_modem->length = write_length;
+ *in_busy_ptr = 1;
+ err = diag_device_write(write_buf,
+ smd_info->peripheral,
+ write_ptr_modem);
+ if (err) {
+ /*
+ * Free up the buffer for
+ * future use
+ */
+ *in_busy_ptr = 0;
+ pr_err_ratelimited("diag: In %s, diag_device_write error: %d\n",
+ __func__, err);
+ }
+ }
+ }
}
}
@@ -391,11 +557,32 @@
return;
}
- if (!smd_info->in_busy_1)
+ /* Determine the buffer to read the data into. */
+ if (smd_info->type == SMD_DATA_TYPE) {
+ /* If the data is raw and not hdlc encoded */
+ if (smd_info->encode_hdlc) {
+ if (!smd_info->in_busy_1)
+ buf = smd_info->buf_in_1_raw;
+ else if (!smd_info->in_busy_2)
+ buf = smd_info->buf_in_2_raw;
+ } else {
+ if (!smd_info->in_busy_1)
+ buf = smd_info->buf_in_1;
+ else if (!smd_info->in_busy_2)
+ buf = smd_info->buf_in_2;
+ }
+ } else if (smd_info->type == SMD_CMD_TYPE) {
+ /* If the data is raw and not hdlc encoded */
+ if (smd_info->encode_hdlc) {
+ if (!smd_info->in_busy_1)
+ buf = smd_info->buf_in_1_raw;
+ } else {
+ if (!smd_info->in_busy_1)
+ buf = smd_info->buf_in_1;
+ }
+ } else if (!smd_info->in_busy_1) {
buf = smd_info->buf_in_1;
- else if (!smd_info->in_busy_2 &&
- (smd_info->type == SMD_DATA_TYPE))
- buf = smd_info->buf_in_2;
+ }
if (!buf && (smd_info->type == SMD_DCI_TYPE ||
smd_info->type == SMD_DCI_CMD_TYPE))
@@ -494,32 +681,12 @@
diag_smd_send_req(smd_info);
}
-#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
-static void diag_mem_dev_mode_ready_update(int index, int hsic_updated)
-{
- if (hsic_updated) {
- unsigned long flags;
- spin_lock_irqsave(&driver->hsic_ready_spinlock, flags);
- driver->data_ready[index] |= USER_SPACE_DATA_TYPE;
- spin_unlock_irqrestore(&driver->hsic_ready_spinlock, flags);
- } else {
- driver->data_ready[index] |= USER_SPACE_DATA_TYPE;
- }
-}
-#else
-static void diag_mem_dev_mode_ready_update(int index, int hsic_updated)
-{
- (void) hsic_updated;
- driver->data_ready[index] |= USER_SPACE_DATA_TYPE;
-}
-#endif
int diag_device_write(void *buf, int data_type, struct diag_request *write_ptr)
{
int i, err = 0, index;
index = 0;
if (driver->logging_mode == MEMORY_DEVICE_MODE) {
- int hsic_updated = 0;
if (data_type == APPS_DATA) {
for (i = 0; i < driver->buf_tbl_size; i++)
if (driver->buf_tbl[i].length == 0) {
@@ -540,7 +707,6 @@
else if (data_type == HSIC_DATA || data_type == HSIC_2_DATA) {
unsigned long flags;
int foundIndex = -1;
- hsic_updated = 1;
index = data_type - HSIC_DATA;
spin_lock_irqsave(&diag_hsic[index].hsic_spinlock,
flags);
@@ -573,8 +739,8 @@
driver->logging_process_id)
break;
if (i < driver->num_clients) {
- diag_mem_dev_mode_ready_update(i, hsic_updated);
pr_debug("diag: wake up logging process\n");
+ driver->data_ready[i] |= USER_SPACE_DATA_TYPE;
wake_up_interruptible(&driver->wait_q);
} else
return -EINVAL;
@@ -582,7 +748,7 @@
if ((data_type >= MODEM_DATA) && (data_type <= WCNSS_DATA)) {
driver->smd_data[data_type].in_busy_1 = 0;
driver->smd_data[data_type].in_busy_2 = 0;
- queue_work(driver->diag_wq,
+ queue_work(driver->smd_data[data_type].wq,
&(driver->smd_data[data_type].
diag_read_smd_work));
if (data_type == MODEM_DATA &&
@@ -1439,7 +1605,7 @@
driver->smd_data[i].in_busy_2 = 0;
if (queue)
/* Poll SMD data channels to check for data */
- queue_work(driver->diag_wq,
+ queue_work(driver->smd_data[i].wq,
&(driver->smd_data[i].diag_read_smd_work));
}
@@ -1549,19 +1715,24 @@
for (i = 0; i < num_channels; i++) {
if (buf == (void *)data[i].buf_in_1) {
data[i].in_busy_1 = 0;
- queue_work(driver->diag_wq,
- &(data[i].diag_read_smd_work));
found_it = 1;
break;
} else if (buf == (void *)data[i].buf_in_2) {
data[i].in_busy_2 = 0;
- queue_work(driver->diag_wq,
- &(data[i].diag_read_smd_work));
found_it = 1;
break;
}
}
+ if (found_it) {
+ if (data[i].type == SMD_DATA_TYPE)
+ queue_work(data[i].wq,
+ &(data[i].diag_read_smd_work));
+ else
+ queue_work(driver->diag_wq,
+ &(data[i].diag_read_smd_work));
+ }
+
return found_it;
}
@@ -1734,8 +1905,12 @@
diag_dci_try_activate_wakeup_source(smd_info->ch);
queue_work(driver->diag_dci_wq,
&(smd_info->diag_read_smd_work));
- } else
+ } else if (smd_info->type == SMD_DATA_TYPE) {
+ queue_work(smd_info->wq,
+ &(smd_info->diag_read_smd_work));
+ } else {
queue_work(driver->diag_wq, &(smd_info->diag_read_smd_work));
+ }
}
static int diag_smd_probe(struct platform_device *pdev)
@@ -1863,8 +2038,10 @@
void diag_smd_destructor(struct diag_smd_info *smd_info)
{
- if (smd_info->type == SMD_DATA_TYPE)
+ if (smd_info->type == SMD_DATA_TYPE) {
wake_lock_destroy(&smd_info->nrt_lock.read_lock);
+ destroy_workqueue(smd_info->wq);
+ }
if (smd_info->ch)
smd_close(smd_info->ch);
@@ -1875,6 +2052,8 @@
kfree(smd_info->buf_in_2);
kfree(smd_info->write_ptr_1);
kfree(smd_info->write_ptr_2);
+ kfree(smd_info->buf_in_1_raw);
+ kfree(smd_info->buf_in_2_raw);
}
int diag_smd_constructor(struct diag_smd_info *smd_info, int peripheral,
@@ -1882,6 +2061,7 @@
{
smd_info->peripheral = peripheral;
smd_info->type = type;
+ smd_info->encode_hdlc = 0;
mutex_init(&smd_info->smd_ch_mutex);
switch (peripheral) {
@@ -1934,6 +2114,58 @@
goto err;
kmemleak_not_leak(smd_info->write_ptr_2);
}
+ if (driver->supports_apps_hdlc_encoding) {
+ /* In support of hdlc encoding */
+ if (smd_info->buf_in_1_raw == NULL) {
+ smd_info->buf_in_1_raw = kzalloc(IN_BUF_SIZE,
+ GFP_KERNEL);
+ if (smd_info->buf_in_1_raw == NULL)
+ goto err;
+ kmemleak_not_leak(smd_info->buf_in_1_raw);
+ }
+ if (smd_info->buf_in_2_raw == NULL) {
+ smd_info->buf_in_2_raw = kzalloc(IN_BUF_SIZE,
+ GFP_KERNEL);
+ if (smd_info->buf_in_2_raw == NULL)
+ goto err;
+ kmemleak_not_leak(smd_info->buf_in_2_raw);
+ }
+ }
+ }
+
+ if (smd_info->type == SMD_CMD_TYPE &&
+ driver->supports_apps_hdlc_encoding) {
+ /* In support of hdlc encoding */
+ if (smd_info->buf_in_1_raw == NULL) {
+ smd_info->buf_in_1_raw = kzalloc(IN_BUF_SIZE,
+ GFP_KERNEL);
+ if (smd_info->buf_in_1_raw == NULL)
+ goto err;
+ kmemleak_not_leak(smd_info->buf_in_1_raw);
+ }
+ }
+
+ /* The smd data type needs separate work queues for reads */
+ if (type == SMD_DATA_TYPE) {
+ switch (peripheral) {
+ case MODEM_DATA:
+ smd_info->wq = create_singlethread_workqueue(
+ "diag_modem_data_read_wq");
+ break;
+ case LPASS_DATA:
+ smd_info->wq = create_singlethread_workqueue(
+ "diag_lpass_data_read_wq");
+ break;
+ case WCNSS_DATA:
+ smd_info->wq = create_singlethread_workqueue(
+ "diag_wcnss_data_read_wq");
+ break;
+ default:
+ smd_info->wq = NULL;
+ break;
+ }
+ } else {
+ smd_info->wq = NULL;
}
INIT_WORK(&(smd_info->diag_read_smd_work), diag_read_smd_work_fn);
@@ -2026,6 +2258,8 @@
kfree(smd_info->buf_in_2);
kfree(smd_info->write_ptr_1);
kfree(smd_info->write_ptr_2);
+ kfree(smd_info->buf_in_1_raw);
+ kfree(smd_info->buf_in_2_raw);
return 0;
}
@@ -2048,6 +2282,7 @@
driver->buf_tbl_size = (buf_tbl_size < driver->poolsize_hdlc) ?
driver->poolsize_hdlc : buf_tbl_size;
driver->supports_separate_cmdrsp = device_supports_separate_cmdrsp();
+ driver->supports_apps_hdlc_encoding = 0;
mutex_init(&driver->diag_hdlc_mutex);
mutex_init(&driver->diag_cntl_mutex);
diff --git a/drivers/char/diag/diagfwd_cntl.c b/drivers/char/diag/diagfwd_cntl.c
index a832cb3..e0deef3 100644
--- a/drivers/char/diag/diagfwd_cntl.c
+++ b/drivers/char/diag/diagfwd_cntl.c
@@ -89,6 +89,31 @@
}
}
+static void process_hdlc_encoding_feature(struct diag_smd_info *smd_info,
+ uint8_t feature_mask)
+{
+ /*
+ * Check if apps supports hdlc encoding and the
+ * peripheral supports apps hdlc encoding
+ */
+ if (driver->supports_apps_hdlc_encoding &&
+ (feature_mask & F_DIAG_HDLC_ENCODE_IN_APPS_MASK)) {
+ driver->smd_data[smd_info->peripheral].encode_hdlc =
+ ENABLE_APPS_HDLC_ENCODING;
+ if (driver->separate_cmdrsp[smd_info->peripheral] &&
+ smd_info->peripheral < NUM_SMD_CMD_CHANNELS)
+ driver->smd_cmd[smd_info->peripheral].encode_hdlc =
+ ENABLE_APPS_HDLC_ENCODING;
+ } else {
+ driver->smd_data[smd_info->peripheral].encode_hdlc =
+ DISABLE_APPS_HDLC_ENCODING;
+ if (driver->separate_cmdrsp[smd_info->peripheral] &&
+ smd_info->peripheral < NUM_SMD_CMD_CHANNELS)
+ driver->smd_cmd[smd_info->peripheral].encode_hdlc =
+ DISABLE_APPS_HDLC_ENCODING;
+ }
+}
+
/* Process the data read from the smd control channel */
int diag_process_smd_cntl_read_data(struct diag_smd_info *smd_info, void *buf,
int total_recd)
@@ -187,6 +212,12 @@
else
driver->separate_cmdrsp[periph] =
DISABLE_SEPARATE_CMDRSP;
+ /*
+ * Check if apps supports hdlc encoding and the
+ * peripheral supports apps hdlc encoding
+ */
+ process_hdlc_encoding_feature(smd_info,
+ feature_mask);
if (feature_mask_len > 1) {
feature_mask = *(uint8_t *)(buf+13);
process_stm_feature(smd_info,
diff --git a/drivers/char/diag/diagfwd_cntl.h b/drivers/char/diag/diagfwd_cntl.h
index c90c132..d79195c 100644
--- a/drivers/char/diag/diagfwd_cntl.h
+++ b/drivers/char/diag/diagfwd_cntl.h
@@ -48,6 +48,9 @@
/* Denotes we support diag over stm */
#define F_DIAG_OVER_STM 0x02
+ /* Perform hdlc encoding of data coming from smd channel */
+#define F_DIAG_HDLC_ENCODE_IN_APPS_MASK 0x40
+
#define ENABLE_SEPARATE_CMDRSP 1
#define DISABLE_SEPARATE_CMDRSP 0
@@ -57,6 +60,9 @@
#define UPDATE_PERIPHERAL_STM_STATE 1
#define CLEAR_PERIPHERAL_STM_STATE 2
+#define ENABLE_APPS_HDLC_ENCODING 1
+#define DISABLE_APPS_HDLC_ENCODING 0
+
struct cmd_code_range {
uint16_t cmd_code_lo;
uint16_t cmd_code_hi;
diff --git a/drivers/gpu/ion/ion_iommu_heap.c b/drivers/gpu/ion/ion_iommu_heap.c
index a4d2c1b..1ea3cd2 100644
--- a/drivers/gpu/ion/ion_iommu_heap.c
+++ b/drivers/gpu/ion/ion_iommu_heap.c
@@ -52,7 +52,7 @@
#define MAX_VMAP_RETRIES 10
#define BAD_ORDER -1
-static const unsigned int orders[] = {8, 4, 0};
+static const unsigned int orders[] = {9, 8, 4, 0};
static const int num_orders = ARRAY_SIZE(orders);
static unsigned int low_gfp_flags = __GFP_HIGHMEM | GFP_KERNEL | __GFP_ZERO;
static unsigned int high_gfp_flags = (__GFP_HIGHMEM | __GFP_NORETRY
diff --git a/drivers/gpu/msm/Makefile b/drivers/gpu/msm/Makefile
index 792eb79..118e033 100644
--- a/drivers/gpu/msm/Makefile
+++ b/drivers/gpu/msm/Makefile
@@ -33,7 +33,7 @@
adreno_a3xx_snapshot.o \
adreno.o
-msm_adreno-$(CONFIG_DEBUG_FS) += adreno_debugfs.o
+msm_adreno-$(CONFIG_DEBUG_FS) += adreno_debugfs.o adreno_profile.o
msm_z180-y += \
z180.o \
diff --git a/drivers/gpu/msm/adreno.c b/drivers/gpu/msm/adreno.c
index 0c90efb..b964620 100644
--- a/drivers/gpu/msm/adreno.c
+++ b/drivers/gpu/msm/adreno.c
@@ -274,7 +274,7 @@
}
/**
- * adreno_perfcounter_read_group: Determine which countables are in counters
+ * adreno_perfcounter_read_group() - Determine which countables are in counters
* @adreno_dev: Adreno device to configure
* @reads: List of kgsl_perfcounter_read_groups
* @count: Length of list
@@ -351,6 +351,61 @@
}
/**
+ * adreno_perfcounter_get_groupid() - Get the performance counter ID
+ * @adreno_dev: Adreno device
+ * @name: Performance counter group name string
+ *
+ * Get the groupid based on the name and return this ID
+ */
+
+int adreno_perfcounter_get_groupid(struct adreno_device *adreno_dev,
+ const char *name)
+{
+
+ struct adreno_perfcounters *counters = adreno_dev->gpudev->perfcounters;
+ struct adreno_perfcount_group *group;
+ int i;
+
+ if (name == NULL)
+ return -EINVAL;
+
+ /* perfcounter get/put/query not allowed on a2xx */
+ if (adreno_is_a2xx(adreno_dev))
+ return -EINVAL;
+
+ for (i = 0; i < counters->group_count; ++i) {
+ group = &(counters->groups[i]);
+ if (!strcmp(group->name, name))
+ return i;
+ }
+
+ return -EINVAL;
+}
+
+/**
+ * adreno_perfcounter_get_name() - Get the group name
+ * @adreno_dev: Adreno device
+ * @groupid: Desired performance counter groupid
+ *
+ * Get the name based on the groupid and return it
+ */
+
+const char *adreno_perfcounter_get_name(struct adreno_device *adreno_dev,
+ unsigned int groupid)
+{
+ struct adreno_perfcounters *counters = adreno_dev->gpudev->perfcounters;
+
+ /* perfcounter get/put/query not allowed on a2xx */
+ if (adreno_is_a2xx(adreno_dev))
+ return NULL;
+
+ if (groupid >= counters->group_count)
+ return NULL;
+
+ return counters->groups[groupid].name;
+}
+
+/**
* adreno_perfcounter_query_group: Determine which countables are in counters
* @adreno_dev: Adreno device to configure
* @groupid: Desired performance counter group
@@ -574,6 +629,8 @@
kgsl_mmu_unmap(pagetable, &device->memstore);
+ kgsl_mmu_unmap(pagetable, &adreno_dev->profile.shared_buffer);
+
kgsl_mmu_unmap(pagetable, &device->mmu.setstate_memory);
}
@@ -596,6 +653,11 @@
if (result)
goto unmap_memptrs_desc;
+ result = kgsl_mmu_map_global(pagetable,
+ &adreno_dev->profile.shared_buffer);
+ if (result)
+ goto unmap_profile_shared;
+
result = kgsl_mmu_map_global(pagetable, &device->mmu.setstate_memory);
if (result)
goto unmap_memstore_desc;
@@ -609,6 +671,9 @@
device->mmu.setstate_memory.size;
return result;
+unmap_profile_shared:
+ kgsl_mmu_unmap(pagetable, &adreno_dev->profile.shared_buffer);
+
unmap_memstore_desc:
kgsl_mmu_unmap(pagetable, &device->memstore);
@@ -1522,6 +1587,7 @@
goto error_close_rb;
adreno_debugfs_init(device);
+ adreno_profile_init(device);
adreno_ft_init_sysfs(device);
@@ -1552,6 +1618,7 @@
adreno_dev = ADRENO_DEVICE(device);
adreno_coresight_remove(pdev);
+ adreno_profile_close(device);
kgsl_pwrscale_detach_policy(device);
kgsl_pwrscale_close(device);
@@ -3194,6 +3261,9 @@
int status = 0;
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+ /* process any profiling results that are available */
+ adreno_profile_process_results(device);
+
/* switch to NULL ctxt */
if (adreno_dev->drawctxt_active != NULL) {
adreno_drawctxt_switch(adreno_dev, NULL, 0);
diff --git a/drivers/gpu/msm/adreno.h b/drivers/gpu/msm/adreno.h
index c4ead4a..72f15e7 100644
--- a/drivers/gpu/msm/adreno.h
+++ b/drivers/gpu/msm/adreno.h
@@ -16,6 +16,7 @@
#include "kgsl_device.h"
#include "adreno_drawctxt.h"
#include "adreno_ringbuffer.h"
+#include "adreno_profile.h"
#include "kgsl_iommu.h"
#include <mach/ocmem.h>
@@ -38,6 +39,7 @@
#define KGSL_CMD_FLAGS_PMODE 0x00000001
#define KGSL_CMD_FLAGS_INTERNAL_ISSUE 0x00000002
#define KGSL_CMD_FLAGS_GET_INT 0x00000004
+#define KGSL_CMD_FLAGS_PROFILE 0x00000008
#define KGSL_CMD_FLAGS_EOF 0x00000100
/* Command identifiers */
@@ -48,6 +50,8 @@
#define KGSL_END_OF_IB_IDENTIFIER 0x2ABEDEAD
#define KGSL_END_OF_FRAME_IDENTIFIER 0x2E0F2E0F
#define KGSL_NOP_IB_IDENTIFIER 0x20F20F20
+#define KGSL_START_OF_PROFILE_IDENTIFIER 0x2DEFADE1
+#define KGSL_END_OF_PROFILE_IDENTIFIER 0x2DEFADE2
#ifdef CONFIG_MSM_SCM
#define ADRENO_DEFAULT_PWRSCALE_POLICY (&kgsl_pwrscale_policy_tz)
@@ -131,6 +135,7 @@
struct ocmem_buf *ocmem_hdl;
unsigned int ocmem_base;
unsigned int gpu_cycles;
+ struct adreno_profile profile;
};
#define PERFCOUNTER_FLAG_NONE 0x0
@@ -156,10 +161,12 @@
* struct adreno_perfcount_group: registers for a hardware group
* @regs: available registers for this group
* @reg_count: total registers for this group
+ * @name: group name for this group
*/
struct adreno_perfcount_group {
struct adreno_perfcount_register *regs;
unsigned int reg_count;
+ const char *name;
};
/**
@@ -414,6 +421,12 @@
int adreno_ft_init_sysfs(struct kgsl_device *device);
void adreno_ft_uninit_sysfs(struct kgsl_device *device);
+int adreno_perfcounter_get_groupid(struct adreno_device *adreno_dev,
+ const char *name);
+
+const char *adreno_perfcounter_get_name(struct adreno_device
+ *adreno_dev, unsigned int groupid);
+
int adreno_perfcounter_get(struct adreno_device *adreno_dev,
unsigned int groupid, unsigned int countable, unsigned int *offset,
unsigned int flags);
diff --git a/drivers/gpu/msm/adreno_a3xx.c b/drivers/gpu/msm/adreno_a3xx.c
index 7882dc5..d96965c 100644
--- a/drivers/gpu/msm/adreno_a3xx.c
+++ b/drivers/gpu/msm/adreno_a3xx.c
@@ -3245,22 +3245,25 @@
{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_VBIF_PERF_PWR_CNT2_LO },
};
+#define A3XX_PERFCOUNTER_GROUP(name) { a3xx_perfcounters_##name, \
+ ARRAY_SIZE(a3xx_perfcounters_##name), __stringify(name) }
+
static struct adreno_perfcount_group a3xx_perfcounter_groups[] = {
- { a3xx_perfcounters_cp, ARRAY_SIZE(a3xx_perfcounters_cp) },
- { a3xx_perfcounters_rbbm, ARRAY_SIZE(a3xx_perfcounters_rbbm) },
- { a3xx_perfcounters_pc, ARRAY_SIZE(a3xx_perfcounters_pc) },
- { a3xx_perfcounters_vfd, ARRAY_SIZE(a3xx_perfcounters_vfd) },
- { a3xx_perfcounters_hlsq, ARRAY_SIZE(a3xx_perfcounters_hlsq) },
- { a3xx_perfcounters_vpc, ARRAY_SIZE(a3xx_perfcounters_vpc) },
- { a3xx_perfcounters_tse, ARRAY_SIZE(a3xx_perfcounters_tse) },
- { a3xx_perfcounters_ras, ARRAY_SIZE(a3xx_perfcounters_ras) },
- { a3xx_perfcounters_uche, ARRAY_SIZE(a3xx_perfcounters_uche) },
- { a3xx_perfcounters_tp, ARRAY_SIZE(a3xx_perfcounters_tp) },
- { a3xx_perfcounters_sp, ARRAY_SIZE(a3xx_perfcounters_sp) },
- { a3xx_perfcounters_rb, ARRAY_SIZE(a3xx_perfcounters_rb) },
- { a3xx_perfcounters_pwr, ARRAY_SIZE(a3xx_perfcounters_pwr) },
- { a3xx_perfcounters_vbif, ARRAY_SIZE(a3xx_perfcounters_vbif) },
- { a3xx_perfcounters_vbif_pwr, ARRAY_SIZE(a3xx_perfcounters_vbif_pwr) },
+ A3XX_PERFCOUNTER_GROUP(cp),
+ A3XX_PERFCOUNTER_GROUP(rbbm),
+ A3XX_PERFCOUNTER_GROUP(pc),
+ A3XX_PERFCOUNTER_GROUP(vfd),
+ A3XX_PERFCOUNTER_GROUP(hlsq),
+ A3XX_PERFCOUNTER_GROUP(vpc),
+ A3XX_PERFCOUNTER_GROUP(tse),
+ A3XX_PERFCOUNTER_GROUP(ras),
+ A3XX_PERFCOUNTER_GROUP(uche),
+ A3XX_PERFCOUNTER_GROUP(tp),
+ A3XX_PERFCOUNTER_GROUP(sp),
+ A3XX_PERFCOUNTER_GROUP(rb),
+ A3XX_PERFCOUNTER_GROUP(pwr),
+ A3XX_PERFCOUNTER_GROUP(vbif),
+ A3XX_PERFCOUNTER_GROUP(vbif_pwr),
};
static struct adreno_perfcounters a3xx_perfcounters = {
@@ -3304,6 +3307,9 @@
/* Reserve and start countable 1 in the PWR perfcounter group */
adreno_perfcounter_get(adreno_dev, KGSL_PERFCOUNTER_GROUP_PWR, 1,
NULL, PERFCOUNTER_FLAG_KERNEL);
+
+ /* Default performance counter profiling to false */
+ adreno_dev->profile.enabled = false;
}
/**
diff --git a/drivers/gpu/msm/adreno_drawctxt.c b/drivers/gpu/msm/adreno_drawctxt.c
index bf173a7..0af4c12e 100644
--- a/drivers/gpu/msm/adreno_drawctxt.c
+++ b/drivers/gpu/msm/adreno_drawctxt.c
@@ -249,6 +249,8 @@
if (device->state != KGSL_STATE_HUNG)
adreno_idle(device);
+ adreno_profile_process_results(device);
+
kgsl_sharedmem_free(&drawctxt->gpustate);
kgsl_sharedmem_free(&drawctxt->context_gmem_shadow.gmemshadow);
}
diff --git a/drivers/gpu/msm/adreno_drawctxt.h b/drivers/gpu/msm/adreno_drawctxt.h
index 88d1b8c..3088099 100644
--- a/drivers/gpu/msm/adreno_drawctxt.h
+++ b/drivers/gpu/msm/adreno_drawctxt.h
@@ -61,7 +61,13 @@
{ KGSL_CONTEXT_TYPE_GL, "GL" }, \
{ KGSL_CONTEXT_TYPE_CL, "CL" }, \
{ KGSL_CONTEXT_TYPE_C2D, "C2D" }, \
- { KGSL_CONTEXT_TYPE_RS, "RS" }
+ { KGSL_CONTEXT_TYPE_RS, "RS" }, \
+ { KGSL_CONTEXT_TYPE_UNKNOWN, "UNKNOWN" }
+
+struct adreno_context_type {
+ unsigned int type;
+ const char *str;
+};
struct kgsl_device;
struct adreno_device;
diff --git a/drivers/gpu/msm/adreno_postmortem.c b/drivers/gpu/msm/adreno_postmortem.c
index 7a070a6..32dbd51 100644
--- a/drivers/gpu/msm/adreno_postmortem.c
+++ b/drivers/gpu/msm/adreno_postmortem.c
@@ -79,6 +79,8 @@
{KGSL_CMD_INTERNAL_IDENTIFIER, "CMD__INT"},
{KGSL_START_OF_IB_IDENTIFIER, "IB_START"},
{KGSL_END_OF_IB_IDENTIFIER, "IB___END"},
+ {KGSL_START_OF_PROFILE_IDENTIFIER, "PRO_STRT"},
+ {KGSL_END_OF_PROFILE_IDENTIFIER, "PRO__END"},
};
static uint32_t adreno_is_pm4_len(uint32_t word)
diff --git a/drivers/gpu/msm/adreno_profile.c b/drivers/gpu/msm/adreno_profile.c
new file mode 100644
index 0000000..896b6e8
--- /dev/null
+++ b/drivers/gpu/msm/adreno_profile.c
@@ -0,0 +1,1161 @@
+/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/fs.h>
+#include <linux/kernel.h>
+#include <linux/ctype.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/uaccess.h>
+#include <linux/vmalloc.h>
+#include <linux/debugfs.h>
+
+#include "adreno.h"
+#include "adreno_profile.h"
+#include "kgsl_sharedmem.h"
+#include "kgsl_cffdump.h"
+
+#define ASSIGNS_STR_FORMAT "%.8s:%u "
+
+/*
+ * Raw Data for processing later:
+ * : 3 - timestamp, count, context id
+ * [per counter] - data for each counter
+ * : 1 - Register offset
+ * : 2 - Pre IB register hi/lo value
+ * : 2 - Post IB register hi/lo value
+ * [per counter end]
+ */
+#define SIZE_DATA(cnt) (3 + (cnt) * 5)
+
+/*
+ * Pre-IB command size (in dwords):
+ * : 2 - NOP start identifier
+ * : 3 - timestamp
+ * : 3 - count
+ * : 3 - context id
+ * [loop count start] - for each counter to watch
+ * : 3 - Register offset
+ * : 3 - Register read lo
+ * : 3 - Register read high
+ * [loop end]
+ * : 2 - NOP end identifier
+ */
+#define SIZE_PREIB(cnt) (13 + (cnt) * 9)
+
+/*
+ * Post-IB command size (in dwords):
+ * : 2 - NOP start identifier
+ * [loop count start] - for each counter to watch
+ * : 3 - Register read lo
+ * : 3 - Register read high
+ * [loop end]
+ * : 2 - NOP end identifier
+ */
+#define SIZE_POSTIB(cnt) (4 + (cnt) * 6)
+
+/* Counter data + Pre size + post size = total size */
+#define SIZE_SHARED_ENTRY(cnt) (SIZE_DATA(cnt) + SIZE_PREIB(cnt) \
+ + SIZE_POSTIB(cnt))
+
+/*
+ * Space for following string :"%u %u %u %.5s %u "
+ * [count iterations]: "%.8s:%u %llu %llu%c"
+ */
+#define SIZE_PIPE_ENTRY(cnt) (50 + (cnt) * 62)
+#define SIZE_LOG_ENTRY(cnt) (5 + (cnt) * 5)
+
+static struct adreno_context_type ctxt_type_table[] = {ADRENO_DRAWCTXT_TYPES};
+
+static const char *get_api_type_str(unsigned int type)
+{
+ int i;
+ for (i = 0; i < ARRAY_SIZE(ctxt_type_table) - 1; i++) {
+ if (ctxt_type_table[i].type == type)
+ break;
+ }
+ return ctxt_type_table[i].str;
+}
+
+static inline void _create_ib_ref(struct kgsl_memdesc *memdesc,
+ unsigned int *cmd, unsigned int cnt, unsigned int off)
+{
+ cmd[0] = CP_HDR_INDIRECT_BUFFER_PFD;
+ cmd[1] = memdesc->gpuaddr + off;
+ cmd[2] = cnt;
+}
+
+#define IB_START(cmd) do { \
+ *cmd++ = cp_nop_packet(1); \
+ *cmd++ = KGSL_START_OF_PROFILE_IDENTIFIER; \
+ } while (0);
+
+#define IB_END(cmd) do { \
+ *cmd++ = cp_nop_packet(1); \
+ *cmd++ = KGSL_END_OF_PROFILE_IDENTIFIER; \
+ } while (0);
+
+#define IB_CMD(cmd, type, val1, val2, off) do { \
+ *cmd++ = cp_type3_packet(type, 2); \
+ *cmd++ = val1; \
+ *cmd++ = val2; \
+ off += sizeof(unsigned int); \
+ } while (0);
+
+static void _build_pre_ib_cmds(struct adreno_profile *profile,
+ unsigned int *rbcmds, unsigned int head,
+ unsigned int timestamp, unsigned int ctxt_id)
+{
+ struct adreno_profile_assigns_list *entry;
+ unsigned int *start, *ibcmds;
+ unsigned int count = profile->assignment_count;
+ unsigned int gpuaddr = profile->shared_buffer.gpuaddr;
+ unsigned int ib_offset = head + SIZE_DATA(count);
+ unsigned int data_offset = head * sizeof(unsigned int);
+
+ ibcmds = ib_offset + ((unsigned int *) profile->shared_buffer.hostptr);
+ start = ibcmds;
+
+ /* start of profile identifier */
+ IB_START(ibcmds);
+
+ /* timestamp */
+ IB_CMD(ibcmds, CP_MEM_WRITE, gpuaddr + data_offset,
+ timestamp, data_offset);
+
+ /* count: number of perf counters pairs GPU will write */
+ IB_CMD(ibcmds, CP_MEM_WRITE, gpuaddr + data_offset,
+ profile->assignment_count, data_offset);
+
+ /* context id */
+ IB_CMD(ibcmds, CP_MEM_WRITE, gpuaddr + data_offset,
+ ctxt_id, data_offset);
+
+ /* loop for each countable assigned */
+ list_for_each_entry(entry, &profile->assignments_list, list) {
+ IB_CMD(ibcmds, CP_MEM_WRITE, gpuaddr + data_offset,
+ entry->offset, data_offset);
+ IB_CMD(ibcmds, CP_REG_TO_MEM, entry->offset,
+ gpuaddr + data_offset, data_offset);
+ IB_CMD(ibcmds, CP_REG_TO_MEM, entry->offset + 1,
+ gpuaddr + data_offset, data_offset);
+
+ /* skip over post_ib counter data */
+ data_offset += sizeof(unsigned int) * 2;
+ }
+
+ /* end of profile identifier */
+ IB_END(ibcmds);
+
+ _create_ib_ref(&profile->shared_buffer, rbcmds,
+ ibcmds - start, ib_offset * sizeof(unsigned int));
+}
+
+static void _build_post_ib_cmds(struct adreno_profile *profile,
+ unsigned int *rbcmds, unsigned int head)
+{
+ struct adreno_profile_assigns_list *entry;
+ unsigned int *start, *ibcmds;
+ unsigned int count = profile->assignment_count;
+ unsigned int gpuaddr = profile->shared_buffer.gpuaddr;
+ unsigned int ib_offset = head + SIZE_DATA(count) + SIZE_PREIB(count);
+ unsigned int data_offset = head * sizeof(unsigned int);
+
+ ibcmds = ib_offset + ((unsigned int *) profile->shared_buffer.hostptr);
+ start = ibcmds;
+ /* end of profile identifier */
+ IB_END(ibcmds);
+
+ /* skip over pre_ib preamble */
+ data_offset += sizeof(unsigned int) * 3;
+
+ /* loop for each countable assigned */
+ list_for_each_entry(entry, &profile->assignments_list, list) {
+ /* skip over pre_ib counter data */
+ data_offset += sizeof(unsigned int) * 3;
+
+ IB_CMD(ibcmds, CP_REG_TO_MEM, entry->offset,
+ gpuaddr + data_offset, data_offset);
+ IB_CMD(ibcmds, CP_REG_TO_MEM, entry->offset + 1,
+ gpuaddr + data_offset, data_offset);
+ }
+
+ /* end of profile identifier */
+ IB_END(ibcmds);
+
+ _create_ib_ref(&profile->shared_buffer, rbcmds,
+ ibcmds - start, ib_offset * sizeof(unsigned int));
+}
+
+static bool shared_buf_empty(struct adreno_profile *profile)
+{
+ if (profile->shared_buffer.hostptr == NULL ||
+ profile->shared_buffer.size == 0)
+ return true;
+
+ if (profile->shared_head == profile->shared_tail)
+ return true;
+
+ return false;
+}
+
+static inline void shared_buf_inc(unsigned int max_size,
+ unsigned int *offset, size_t inc)
+{
+ *offset = (*offset + inc) % max_size;
+}
+
+static inline void log_buf_wrapcnt(unsigned int cnt, unsigned int *off)
+{
+ *off = (*off + cnt) % ADRENO_PROFILE_LOG_BUF_SIZE_DWORDS;
+}
+
+static inline void log_buf_wrapinc(unsigned int *profile_log_buffer,
+ unsigned int **ptr)
+{
+ *ptr += 1;
+ if (*ptr >= (profile_log_buffer +
+ ADRENO_PROFILE_LOG_BUF_SIZE_DWORDS))
+ *ptr -= ADRENO_PROFILE_LOG_BUF_SIZE_DWORDS;
+}
+
+static inline unsigned int log_buf_available(struct adreno_profile *profile,
+ unsigned int *head_ptr)
+{
+ unsigned int tail, head;
+
+ tail = (unsigned int) profile->log_tail -
+ (unsigned int) profile->log_buffer;
+ head = (unsigned int) head_ptr - (unsigned int) profile->log_buffer;
+ if (tail > head)
+ return (tail - head) / sizeof(unsigned int);
+ else
+ return ADRENO_PROFILE_LOG_BUF_SIZE_DWORDS - ((head - tail) /
+ sizeof(unsigned int));
+}
+
+static inline unsigned int shared_buf_available(struct adreno_profile *profile)
+{
+ if (profile->shared_tail > profile->shared_head)
+ return profile->shared_tail - profile->shared_head;
+ else
+ return profile->shared_size -
+ (profile->shared_head - profile->shared_tail);
+}
+
+static struct adreno_profile_assigns_list *_find_assignment_by_offset(
+ struct adreno_profile *profile, unsigned int offset)
+{
+ struct adreno_profile_assigns_list *entry;
+
+ list_for_each_entry(entry, &profile->assignments_list, list) {
+ if (entry->offset == offset)
+ return entry;
+ }
+
+ return NULL;
+}
+
+static bool _in_assignments_list(struct adreno_profile *profile,
+ unsigned int groupid, unsigned int countable)
+{
+ struct adreno_profile_assigns_list *entry;
+
+ list_for_each_entry(entry, &profile->assignments_list, list) {
+ if (entry->groupid == groupid && entry->countable ==
+ countable)
+ return true;
+ }
+
+ return false;
+}
+
+static bool _add_to_assignments_list(struct adreno_profile *profile,
+ const char *str, unsigned int groupid, unsigned int countable,
+ unsigned int offset)
+{
+ struct adreno_profile_assigns_list *entry;
+
+ /* first make sure we can alloc memory */
+ entry = kmalloc(sizeof(struct adreno_profile_assigns_list), GFP_KERNEL);
+ if (!entry)
+ return false;
+
+ list_add_tail(&entry->list, &profile->assignments_list);
+
+ entry->countable = countable;
+ entry->groupid = groupid;
+ entry->offset = offset;
+
+ strlcpy(entry->name, str, sizeof(entry->name));
+
+ profile->assignment_count++;
+
+ return true;
+}
+
+static void check_close_profile(struct adreno_profile *profile)
+{
+ if (profile->log_buffer == NULL)
+ return;
+
+ if (!adreno_profile_enabled(profile) && shared_buf_empty(profile)) {
+ if (profile->log_head == profile->log_tail) {
+ vfree(profile->log_buffer);
+ profile->log_buffer = NULL;
+ profile->log_head = NULL;
+ profile->log_tail = NULL;
+ }
+ }
+}
+
+static bool results_available(struct kgsl_device *device,
+ unsigned int *shared_buf_tail)
+{
+ unsigned int global_eop;
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+ struct adreno_profile *profile = &adreno_dev->profile;
+ unsigned int off = profile->shared_tail;
+ unsigned int *shared_ptr = (unsigned int *)
+ profile->shared_buffer.hostptr;
+ unsigned int ts, cnt;
+ int ts_cmp;
+
+ /*
+ * If shared_buffer empty or Memstore EOP timestamp is less than
+ * outstanding counter buffer timestamps then no results available
+ */
+ if (shared_buf_empty(profile))
+ return false;
+
+ global_eop = kgsl_readtimestamp(device, NULL, KGSL_TIMESTAMP_RETIRED);
+ do {
+ cnt = *(shared_ptr + off + 1);
+ if (cnt == 0)
+ return false;
+
+ ts = *(shared_ptr + off);
+ ts_cmp = timestamp_cmp(ts, global_eop);
+ if (ts_cmp >= 0) {
+ *shared_buf_tail = off;
+ if (off == profile->shared_tail)
+ return false;
+ else
+ return true;
+ }
+ shared_buf_inc(profile->shared_size, &off,
+ SIZE_SHARED_ENTRY(cnt));
+ } while (off != profile->shared_head);
+
+ *shared_buf_tail = profile->shared_head;
+
+ return true;
+}
+
+static void transfer_results(struct kgsl_device *device,
+ unsigned int shared_buf_tail)
+{
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+ struct adreno_profile *profile = &adreno_dev->profile;
+ unsigned int buf_off;
+ unsigned int ts, cnt, ctxt_id, pid, tid, client_type;
+ unsigned int *ptr = (unsigned int *) profile->shared_buffer.hostptr;
+ struct kgsl_context *k_ctxt;
+ unsigned int *log_ptr, *log_base;
+ struct adreno_profile_assigns_list *assigns_list;
+ int i;
+
+ log_ptr = profile->log_head;
+ log_base = profile->log_buffer;
+ if (log_ptr == NULL)
+ return;
+
+ /*
+ * go through counter buffers and format for write into log_buffer
+ * if log buffer doesn't have space just overwrite it circularly
+ * shared_buf is guaranteed to not wrap within an entry so can use
+ * ptr increment
+ */
+ while (profile->shared_tail != shared_buf_tail) {
+ buf_off = profile->shared_tail;
+ /*
+ * format: timestamp, count, context_id
+ * count entries: pc_off, pc_start, pc_end
+ */
+ ts = *(ptr + buf_off);
+ cnt = *(ptr + buf_off + 1);
+ ctxt_id = *(ptr + buf_off + 2);
+ /*
+ * if entry overwrites the tail of log_buffer then adjust tail
+ * ptr to make room for the new entry, discarding old entry
+ */
+ while (log_buf_available(profile, log_ptr) <=
+ SIZE_LOG_ENTRY(cnt)) {
+ unsigned int size_tail, boff;
+ size_tail = SIZE_LOG_ENTRY(0xffff &
+ *(profile->log_tail));
+ boff = ((unsigned int) profile->log_tail -
+ (unsigned int) log_base) / sizeof(unsigned int);
+ log_buf_wrapcnt(size_tail, &boff);
+ profile->log_tail = log_base + boff;
+ }
+
+ /* find Adreno ctxt struct */
+ k_ctxt = idr_find(&device->context_idr, ctxt_id);
+ if (k_ctxt == NULL) {
+ shared_buf_inc(profile->shared_size,
+ &profile->shared_tail,
+ SIZE_SHARED_ENTRY(cnt));
+ continue;
+ } else {
+ struct adreno_context *adreno_ctxt =
+ ADRENO_CONTEXT(k_ctxt);
+ pid = k_ctxt->pid; /* pid */
+ tid = k_ctxt->tid; /* tid creator */
+ client_type = adreno_ctxt->type << 16;
+ }
+
+ buf_off += 3;
+ *log_ptr = client_type | cnt;
+ log_buf_wrapinc(log_base, &log_ptr);
+ *log_ptr = pid;
+ log_buf_wrapinc(log_base, &log_ptr);
+ *log_ptr = tid;
+ log_buf_wrapinc(log_base, &log_ptr);
+ *log_ptr = ctxt_id;
+ log_buf_wrapinc(log_base, &log_ptr);
+ *log_ptr = ts;
+ log_buf_wrapinc(log_base, &log_ptr);
+
+ for (i = 0; i < cnt; i++) {
+ assigns_list = _find_assignment_by_offset(
+ profile, *(ptr + buf_off++));
+ if (assigns_list == NULL) {
+ *log_ptr = (unsigned int) -1;
+ goto err;
+ } else {
+ *log_ptr = assigns_list->groupid << 16 |
+ (assigns_list->countable & 0xffff);
+ }
+ log_buf_wrapinc(log_base, &log_ptr);
+ *log_ptr = *(ptr + buf_off++); /* perf cntr start hi */
+ log_buf_wrapinc(log_base, &log_ptr);
+ *log_ptr = *(ptr + buf_off++); /* perf cntr start lo */
+ log_buf_wrapinc(log_base, &log_ptr);
+ *log_ptr = *(ptr + buf_off++); /* perf cntr end hi */
+ log_buf_wrapinc(log_base, &log_ptr);
+ *log_ptr = *(ptr + buf_off++); /* perf cntr end lo */
+ log_buf_wrapinc(log_base, &log_ptr);
+
+ }
+ shared_buf_inc(profile->shared_size,
+ &profile->shared_tail,
+ SIZE_SHARED_ENTRY(cnt));
+
+ }
+ profile->log_head = log_ptr;
+ return;
+err:
+ /* reset head/tail to same on error in hopes we work correctly later */
+ profile->log_head = profile->log_tail;
+}
+
+static int profile_enable_get(void *data, u64 *val)
+{
+ struct kgsl_device *device = data;
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+
+ mutex_lock(&device->mutex);
+ *val = adreno_profile_enabled(&adreno_dev->profile);
+ mutex_unlock(&device->mutex);
+
+ return 0;
+}
+
+static int profile_enable_set(void *data, u64 val)
+{
+ struct kgsl_device *device = data;
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+ struct adreno_profile *profile = &adreno_dev->profile;
+
+ mutex_lock(&device->mutex);
+
+ if (adreno_is_a2xx(adreno_dev)) {
+ mutex_unlock(&device->mutex);
+ return 0;
+ }
+
+ profile->enabled = val;
+
+ check_close_profile(profile);
+
+ mutex_unlock(&device->mutex);
+
+ return 0;
+}
+
+static ssize_t profile_assignments_read(struct file *filep,
+ char __user *ubuf, size_t max, loff_t *ppos)
+{
+ struct kgsl_device *device = (struct kgsl_device *) filep->private_data;
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+ struct adreno_profile *profile = &adreno_dev->profile;
+ struct adreno_profile_assigns_list *entry;
+ int len = 0, max_size = PAGE_SIZE;
+ char *buf, *pos;
+ ssize_t size = 0;
+
+ if (adreno_is_a2xx(adreno_dev))
+ return -EINVAL;
+
+ mutex_lock(&device->mutex);
+
+ buf = kmalloc(max_size, GFP_KERNEL);
+ if (!buf) {
+ mutex_unlock(&device->mutex);
+ return -ENOMEM;
+ }
+
+ pos = buf;
+
+ /* copy all assingments from list to str */
+ list_for_each_entry(entry, &profile->assignments_list, list) {
+ len = snprintf(pos, max_size, ASSIGNS_STR_FORMAT,
+ entry->name, entry->countable);
+
+ max_size -= len;
+ pos += len;
+ }
+
+ size = simple_read_from_buffer(ubuf, max, ppos, buf,
+ strlen(buf));
+
+ kfree(buf);
+
+ mutex_unlock(&device->mutex);
+ return size;
+}
+
+static void _remove_assignment(struct adreno_device *adreno_dev,
+ unsigned int groupid, unsigned int countable)
+{
+ struct adreno_profile *profile = &adreno_dev->profile;
+ struct adreno_profile_assigns_list *entry, *tmp;
+
+ list_for_each_entry_safe(entry, tmp, &profile->assignments_list, list) {
+ if (entry->groupid == groupid &&
+ entry->countable == countable) {
+ list_del(&entry->list);
+
+ profile->assignment_count--;
+
+ kfree(entry);
+
+ /* remove from perf counter allocation */
+ adreno_perfcounter_put(adreno_dev, groupid, countable,
+ PERFCOUNTER_FLAG_KERNEL);
+ }
+ }
+}
+
+static void _add_assignment(struct adreno_device *adreno_dev,
+ unsigned int groupid, unsigned int countable)
+{
+ struct adreno_profile *profile = &adreno_dev->profile;
+ unsigned int offset;
+ const char *name = NULL;
+
+ name = adreno_perfcounter_get_name(adreno_dev, groupid);
+ if (!name)
+ return;
+
+ /* if already in assigned list skip it */
+ if (_in_assignments_list(profile, groupid, countable))
+ return;
+
+ /* add to perf counter allocation, if fail skip it */
+ if (adreno_perfcounter_get(adreno_dev, groupid,
+ countable, &offset, PERFCOUNTER_FLAG_NONE))
+ return;
+
+ /* add to assignments list, put counter back if error */
+ if (!_add_to_assignments_list(profile, name, groupid,
+ countable, offset))
+ adreno_perfcounter_put(adreno_dev, groupid,
+ countable, PERFCOUNTER_FLAG_KERNEL);
+}
+
+static char *_parse_next_assignment(struct adreno_device *adreno_dev,
+ char *str, int *groupid, int *countable, bool *remove)
+{
+ char *groupid_str, *countable_str;
+ int ret;
+
+ *groupid = -EINVAL;
+ *countable = -EINVAL;
+ *remove = false;
+
+ /* remove spaces */
+ while (*str == ' ')
+ str++;
+
+ /* check if it's a remove assignment */
+ if (*str == '-') {
+ *remove = true;
+ str++;
+ }
+
+ /* get the groupid string */
+ groupid_str = str;
+ while (*str != ':') {
+ if (*str == '\0')
+ return NULL;
+ *str = tolower(*str);
+ str++;
+ }
+ if (groupid_str == str)
+ return NULL;
+
+ *str = '\0';
+ str++;
+
+ /* get the countable string */
+ countable_str = str;
+ while (*str != ' ' && *str != '\0')
+ str++;
+ if (countable_str == str)
+ return NULL;
+
+ *str = '\0';
+ str++;
+
+ /* set results */
+ *groupid = adreno_perfcounter_get_groupid(adreno_dev,
+ groupid_str);
+ if (*groupid < 0)
+ return NULL;
+ ret = kstrtou32(countable_str, 10, countable);
+ if (ret)
+ return NULL;
+
+ return str;
+}
+
+static ssize_t profile_assignments_write(struct file *filep,
+ const char __user *user_buf, size_t len, loff_t *off)
+{
+ struct kgsl_device *device = (struct kgsl_device *) filep->private_data;
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+ struct adreno_profile *profile = &adreno_dev->profile;
+ size_t size = 0;
+ char *buf, *pbuf;
+ bool remove_assignment = false;
+ int groupid, countable;
+
+ if (len >= PAGE_SIZE || len == 0)
+ return -EINVAL;
+
+ if (adreno_is_a2xx(adreno_dev))
+ return -ENOSPC;
+
+ mutex_lock(&device->mutex);
+
+ if (adreno_profile_enabled(profile)) {
+ size = -EINVAL;
+ goto error_unlock;
+ }
+
+ kgsl_active_count_get(device);
+
+ /*
+ * When adding/removing assignments, ensure that the GPU is done with
+ * all it's work. This helps to syncronize the work flow to the
+ * GPU and avoid racey conditions.
+ */
+ if (adreno_idle(device)) {
+ size = -EINVAL;
+ goto error_put;
+ }
+
+ /* clear all shared buffer results */
+ adreno_profile_process_results(device);
+
+ buf = kmalloc(len + 1, GFP_KERNEL);
+ if (!buf) {
+ size = -EINVAL;
+ goto error_put;
+ }
+
+ pbuf = buf;
+
+ /* clear the log buffer */
+ if (profile->log_buffer != NULL) {
+ profile->log_head = profile->log_buffer;
+ profile->log_tail = profile->log_buffer;
+ }
+
+ if (copy_from_user(buf, user_buf, len)) {
+ size = -EFAULT;
+ goto error_free;
+ }
+
+ /* for sanity and parsing, ensure it is null terminated */
+ buf[len] = '\0';
+
+ /* parse file buf and add(remove) to(from) appropriate lists */
+ while (1) {
+ pbuf = _parse_next_assignment(adreno_dev, pbuf, &groupid,
+ &countable, &remove_assignment);
+ if (pbuf == NULL)
+ break;
+
+ if (remove_assignment)
+ _remove_assignment(adreno_dev, groupid, countable);
+ else
+ _add_assignment(adreno_dev, groupid, countable);
+ }
+
+ size = len;
+
+error_free:
+ kfree(buf);
+error_put:
+ kgsl_active_count_put(device);
+error_unlock:
+ mutex_unlock(&device->mutex);
+ return size;
+}
+
+static int _pipe_print_pending(char *ubuf, size_t max)
+{
+ loff_t unused = 0;
+ char str[] = "Operation Would Block!";
+
+ return simple_read_from_buffer(ubuf, max,
+ &unused, str, strlen(str));
+}
+
+static int _pipe_print_results(struct adreno_device *adreno_dev,
+ char *ubuf, size_t max)
+{
+ struct adreno_profile *profile = &adreno_dev->profile;
+ const char *grp_name;
+ char *usr_buf = ubuf;
+ unsigned int *log_ptr = NULL;
+ int len, i;
+ int status = 0;
+ ssize_t size, total_size = 0;
+ unsigned int cnt, api_type, ctxt_id, pid, tid, ts, cnt_reg;
+ unsigned long long pc_start, pc_end;
+ const char *api_str;
+ char format_space;
+ loff_t unused = 0;
+ char pipe_hdr_buf[51]; /* 4 uint32 + 5 space + 5 API type + '\0' */
+ char pipe_cntr_buf[63]; /* 2 uint64 + 1 uint32 + 4 spaces + 8 group */
+
+ /* convert unread entries to ASCII, copy to user-space */
+ log_ptr = profile->log_tail;
+
+ do {
+ cnt = *log_ptr & 0xffff;
+ if (SIZE_PIPE_ENTRY(cnt) > max) {
+ status = 0;
+ goto err;
+ }
+ if ((max - (usr_buf - ubuf)) < SIZE_PIPE_ENTRY(cnt))
+ break;
+
+ api_type = *log_ptr >> 16;
+ api_str = get_api_type_str(api_type);
+ log_buf_wrapinc(profile->log_buffer, &log_ptr);
+ pid = *log_ptr;
+ log_buf_wrapinc(profile->log_buffer, &log_ptr);
+ tid = *log_ptr;
+ log_buf_wrapinc(profile->log_buffer, &log_ptr);
+ ctxt_id = *log_ptr;
+ log_buf_wrapinc(profile->log_buffer, &log_ptr);
+ ts = *log_ptr;
+ log_buf_wrapinc(profile->log_buffer, &log_ptr);
+ len = snprintf(pipe_hdr_buf, sizeof(pipe_hdr_buf) - 1,
+ "%u %u %u %.5s %u ",
+ pid, tid, ctxt_id, api_str, ts);
+ size = simple_read_from_buffer(usr_buf,
+ max - (usr_buf - ubuf),
+ &unused, pipe_hdr_buf, len);
+ if (size < 0) {
+ status = -EINVAL;
+ goto err;
+ }
+
+ unused = 0;
+ usr_buf += size;
+ total_size += size;
+
+ for (i = 0; i < cnt; i++) {
+ grp_name = adreno_perfcounter_get_name(
+ adreno_dev, *log_ptr >> 16);
+ if (grp_name == NULL) {
+ status = -EFAULT;
+ goto err;
+ }
+
+ if (i == cnt - 1)
+ format_space = '\n';
+ else
+ format_space = ' ';
+
+ cnt_reg = *log_ptr & 0xffff;
+ log_buf_wrapinc(profile->log_buffer, &log_ptr);
+ pc_start = *((unsigned long long *) log_ptr);
+ log_buf_wrapinc(profile->log_buffer, &log_ptr);
+ log_buf_wrapinc(profile->log_buffer, &log_ptr);
+ pc_end = *((unsigned long long *) log_ptr);
+ log_buf_wrapinc(profile->log_buffer, &log_ptr);
+ log_buf_wrapinc(profile->log_buffer, &log_ptr);
+
+ len = snprintf(pipe_cntr_buf,
+ sizeof(pipe_cntr_buf) - 1,
+ "%.8s:%u %llu %llu%c",
+ grp_name, cnt_reg, pc_start,
+ pc_end, format_space);
+
+ size = simple_read_from_buffer(usr_buf,
+ max - (usr_buf - ubuf),
+ &unused, pipe_cntr_buf, len);
+ if (size < 0) {
+ status = size;
+ goto err;
+ }
+ unused = 0;
+ usr_buf += size;
+ total_size += size;
+ }
+ } while (log_ptr != profile->log_head);
+
+ status = total_size;
+err:
+ profile->log_tail = log_ptr;
+
+ return status;
+}
+
+static int profile_pipe_print(struct file *filep, char __user *ubuf,
+ size_t max, loff_t *ppos)
+{
+ struct kgsl_device *device = (struct kgsl_device *) filep->private_data;
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+ struct adreno_profile *profile = &adreno_dev->profile;
+ char *usr_buf = ubuf;
+ int status = 0;
+
+ if (adreno_is_a2xx(adreno_dev))
+ return 0;
+
+ /*
+ * this file not seekable since it only supports streaming, ignore
+ * ppos <> 0
+ */
+ /*
+ * format <pid> <tid> <context id> <cnt<<16 | client type> <timestamp>
+ * for each perf counter <cntr_reg_off> <start hi & lo> <end hi & low>
+ */
+
+ mutex_lock(&device->mutex);
+
+ while (1) {
+ /* process any results that are available into the log_buffer */
+ status = adreno_profile_process_results(device);
+ if (status > 0) {
+ /* if we have results, print them and exit */
+ status = _pipe_print_results(adreno_dev, usr_buf, max);
+ break;
+ }
+
+ /* there are no unread results, act accordingly */
+ if (filep->f_flags & O_NONBLOCK) {
+ if (profile->shared_tail != profile->shared_head) {
+ status = _pipe_print_pending(usr_buf, max);
+ break;
+ } else {
+ status = 0;
+ break;
+ }
+ }
+
+ mutex_unlock(&device->mutex);
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule_timeout(HZ / 10);
+ mutex_lock(&device->mutex);
+
+ if (signal_pending(current)) {
+ status = 0;
+ break;
+ }
+ }
+
+ check_close_profile(profile);
+ mutex_unlock(&device->mutex);
+
+ return status;
+}
+
+static int profile_groups_print(struct seq_file *s, void *unused)
+{
+ struct kgsl_device *device = (struct kgsl_device *) s->private;
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+ struct adreno_perfcounters *counters = adreno_dev->gpudev->perfcounters;
+ struct adreno_perfcount_group *group;
+ int i, j, used;
+
+ /* perfcounter list not allowed on a2xx */
+ if (adreno_is_a2xx(adreno_dev))
+ return -EINVAL;
+
+ mutex_lock(&device->mutex);
+
+ for (i = 0; i < counters->group_count; ++i) {
+ group = &(counters->groups[i]);
+ /* get number of counters used for this group */
+ used = 0;
+ for (j = 0; j < group->reg_count; j++) {
+ if (group->regs[j].countable !=
+ KGSL_PERFCOUNTER_NOT_USED)
+ used++;
+ }
+
+ seq_printf(s, "%s %d %d\n", group->name,
+ group->reg_count, used);
+ }
+
+ mutex_unlock(&device->mutex);
+
+ return 0;
+}
+
+static int profile_groups_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, profile_groups_print, inode->i_private);
+}
+
+static const struct file_operations profile_groups_fops = {
+ .owner = THIS_MODULE,
+ .open = profile_groups_open,
+ .read = seq_read,
+ .llseek = noop_llseek,
+ .release = single_release,
+};
+
+static const struct file_operations profile_pipe_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = profile_pipe_print,
+ .llseek = noop_llseek,
+};
+
+static const struct file_operations profile_assignments_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = profile_assignments_read,
+ .write = profile_assignments_write,
+ .llseek = noop_llseek,
+};
+
+DEFINE_SIMPLE_ATTRIBUTE(profile_enable_fops,
+ profile_enable_get,
+ profile_enable_set, "%llu\n");
+
+void adreno_profile_init(struct kgsl_device *device)
+{
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+ struct adreno_profile *profile = &adreno_dev->profile;
+ struct dentry *profile_dir;
+ int ret;
+
+ profile->enabled = false;
+
+ /* allocate shared_buffer, which includes pre_ib and post_ib */
+ profile->shared_size = ADRENO_PROFILE_SHARED_BUF_SIZE_DWORDS;
+ ret = kgsl_allocate_contiguous(&profile->shared_buffer,
+ profile->shared_size * sizeof(unsigned int));
+ if (ret) {
+ profile->shared_buffer.hostptr = NULL;
+ profile->shared_size = 0;
+ }
+
+ INIT_LIST_HEAD(&profile->assignments_list);
+
+ /* Create perf counter debugfs */
+ profile_dir = debugfs_create_dir("profiling", device->d_debugfs);
+ if (IS_ERR(profile_dir))
+ return;
+
+ debugfs_create_file("enable", 0644, profile_dir, device,
+ &profile_enable_fops);
+ debugfs_create_file("blocks", 0444, profile_dir, device,
+ &profile_groups_fops);
+ debugfs_create_file("pipe", 0444, profile_dir, device,
+ &profile_pipe_fops);
+ debugfs_create_file("assignments", 0644, profile_dir, device,
+ &profile_assignments_fops);
+}
+
+void adreno_profile_close(struct kgsl_device *device)
+{
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+ struct adreno_profile *profile = &adreno_dev->profile;
+ struct adreno_profile_assigns_list *entry, *tmp;
+
+ profile->enabled = false;
+ vfree(profile->log_buffer);
+ profile->log_buffer = NULL;
+ profile->log_head = NULL;
+ profile->log_tail = NULL;
+ profile->shared_head = 0;
+ profile->shared_tail = 0;
+ kgsl_sharedmem_free(&profile->shared_buffer);
+ profile->shared_buffer.hostptr = NULL;
+ profile->shared_size = 0;
+
+ profile->assignment_count = 0;
+
+ list_for_each_entry_safe(entry, tmp, &profile->assignments_list, list) {
+ list_del(&entry->list);
+ kfree(entry);
+ }
+}
+
+int adreno_profile_process_results(struct kgsl_device *device)
+{
+
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+ struct adreno_profile *profile = &adreno_dev->profile;
+ unsigned int shared_buf_tail = profile->shared_tail;
+
+ if (!results_available(device, &shared_buf_tail)) {
+ check_close_profile(profile);
+ return 0;
+ }
+
+ /* allocate profile_log_buffer if needed */
+ if (profile->log_buffer == NULL) {
+ profile->log_buffer = vmalloc(ADRENO_PROFILE_LOG_BUF_SIZE);
+ if (profile->log_buffer == NULL)
+ return -ENOMEM;
+ profile->log_tail = profile->log_buffer;
+ profile->log_head = profile->log_buffer;
+ }
+
+ /*
+ * transfer retired results to log_buffer
+ * update shared_buffer tail ptr
+ */
+ transfer_results(device, shared_buf_tail);
+
+ /* check for any cleanup */
+ check_close_profile(profile);
+
+ return 1;
+}
+
+void adreno_profile_preib_processing(struct kgsl_device *device,
+ unsigned int context_id, unsigned int *cmd_flags,
+ unsigned int **rbptr, unsigned int *cmds_gpu)
+{
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+ struct adreno_profile *profile = &adreno_dev->profile;
+ int count = profile->assignment_count;
+ unsigned int entry_head = profile->shared_head;
+ unsigned int *shared_ptr;
+ struct adreno_ringbuffer *rb = &adreno_dev->ringbuffer;
+ unsigned int rbcmds[3] = { cp_nop_packet(2),
+ KGSL_NOP_IB_IDENTIFIER, KGSL_NOP_IB_IDENTIFIER };
+
+ *cmd_flags &= ~KGSL_CMD_FLAGS_PROFILE;
+
+ if (!adreno_profile_assignments_ready(profile))
+ goto done;
+
+ /*
+ * check if space available, include the post_ib in space available
+ * check so don't have to handle trying to undo the pre_ib insertion in
+ * ringbuffer in the case where only the post_ib fails enough space
+ */
+ if (SIZE_SHARED_ENTRY(count) >= shared_buf_available(profile))
+ goto done;
+
+ if (entry_head + SIZE_SHARED_ENTRY(count) > profile->shared_size) {
+ /* entry_head would wrap, start entry_head at 0 in buffer */
+ entry_head = 0;
+ profile->shared_size = profile->shared_head;
+ profile->shared_head = 0;
+ if (profile->shared_tail == profile->shared_size)
+ profile->shared_tail = 0;
+
+ /* recheck space available */
+ if (SIZE_SHARED_ENTRY(count) >= shared_buf_available(profile))
+ goto done;
+ }
+
+ /* zero out the counter area of shared_buffer entry_head */
+ shared_ptr = entry_head + ((unsigned int *)
+ profile->shared_buffer.hostptr);
+ memset(shared_ptr, 0, SIZE_SHARED_ENTRY(count) * sizeof(unsigned int));
+
+ /* reserve space for the pre ib shared buffer */
+ shared_buf_inc(profile->shared_size, &profile->shared_head,
+ SIZE_SHARED_ENTRY(count));
+
+ /* create the shared ibdesc */
+ _build_pre_ib_cmds(profile, rbcmds, entry_head,
+ rb->global_ts + 1, context_id);
+
+ /* set flag to sync with post ib commands */
+ *cmd_flags |= KGSL_CMD_FLAGS_PROFILE;
+
+done:
+ /* write the ibdesc to the ringbuffer */
+ GSL_RB_WRITE(device, (*rbptr), (*cmds_gpu), rbcmds[0]);
+ GSL_RB_WRITE(device, (*rbptr), (*cmds_gpu), rbcmds[1]);
+ GSL_RB_WRITE(device, (*rbptr), (*cmds_gpu), rbcmds[2]);
+}
+
+void adreno_profile_postib_processing(struct kgsl_device *device,
+ unsigned int *cmd_flags, unsigned int **rbptr,
+ unsigned int *cmds_gpu)
+{
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+ struct adreno_profile *profile = &adreno_dev->profile;
+ int count = profile->assignment_count;
+ unsigned int entry_head = profile->shared_head -
+ SIZE_SHARED_ENTRY(count);
+ unsigned int rbcmds[3] = { cp_nop_packet(2),
+ KGSL_NOP_IB_IDENTIFIER, KGSL_NOP_IB_IDENTIFIER };
+
+ if (!adreno_profile_assignments_ready(profile))
+ goto done;
+
+ if (!(*cmd_flags & KGSL_CMD_FLAGS_PROFILE))
+ goto done;
+
+ /* create the shared ibdesc */
+ _build_post_ib_cmds(profile, rbcmds, entry_head);
+
+done:
+ /* write the ibdesc to the ringbuffer */
+ GSL_RB_WRITE(device, (*rbptr), (*cmds_gpu), rbcmds[0]);
+ GSL_RB_WRITE(device, (*rbptr), (*cmds_gpu), rbcmds[1]);
+ GSL_RB_WRITE(device, (*rbptr), (*cmds_gpu), rbcmds[2]);
+
+ /* reset the sync flag */
+ *cmd_flags &= ~KGSL_CMD_FLAGS_PROFILE;
+}
+
diff --git a/drivers/gpu/msm/adreno_profile.h b/drivers/gpu/msm/adreno_profile.h
new file mode 100644
index 0000000..d91b09b
--- /dev/null
+++ b/drivers/gpu/msm/adreno_profile.h
@@ -0,0 +1,92 @@
+/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef __ADRENO_PROFILE_H
+#define __ADRENO_PROFILE_H
+#include <linux/seq_file.h>
+
+/**
+ * struct adreno_profile_assigns_list: linked list for assigned perf counters
+ * @list: linkage for nodes in list
+ * @name: group name or GPU name name
+ * @groupid: group id
+ * @countable: countable assigned to perfcounter
+ * @offset: perfcounter register address offset
+ */
+struct adreno_profile_assigns_list {
+ struct list_head list;
+ char name[25];
+ unsigned int groupid;
+ unsigned int countable;
+ unsigned int offset; /* LO offset, HI offset is +1 */
+};
+
+struct adreno_profile {
+ struct list_head assignments_list; /* list of all assignments */
+ unsigned int assignment_count; /* Number of assigned counters */
+ unsigned int *log_buffer;
+ unsigned int *log_head;
+ unsigned int *log_tail;
+ bool enabled;
+ /* counter, pre_ib, and post_ib held in one large circular buffer
+ * shared between kgsl and GPU
+ * counter entry 0
+ * pre_ib entry 0
+ * post_ib entry 0
+ * ...
+ * counter entry N
+ * pre_ib entry N
+ * post_ib entry N
+ */
+ struct kgsl_memdesc shared_buffer;
+ unsigned int shared_head;
+ unsigned int shared_tail;
+ unsigned int shared_size;
+};
+
+#define ADRENO_PROFILE_SHARED_BUF_SIZE_DWORDS (48 * 4096 / sizeof(uint))
+/* sized @ 48 pages should allow for over 50 outstanding IBs minimum, 1755 max*/
+
+#define ADRENO_PROFILE_LOG_BUF_SIZE (1024 * 920)
+/* sized for 1024 entries of fully assigned 45 cnters in log buffer, 230 pages*/
+#define ADRENO_PROFILE_LOG_BUF_SIZE_DWORDS (ADRENO_PROFILE_LOG_BUF_SIZE / \
+ sizeof(unsigned int))
+
+void adreno_profile_init(struct kgsl_device *device);
+void adreno_profile_close(struct kgsl_device *device);
+int adreno_profile_process_results(struct kgsl_device *device);
+void adreno_profile_preib_processing(struct kgsl_device *device,
+ unsigned int context_id, unsigned int *cmd_flags,
+ unsigned int **rbptr, unsigned int *cmds_gpu);
+void adreno_profile_postib_processing(struct kgsl_device *device,
+ unsigned int *cmd_flags, unsigned int **rbptr,
+ unsigned int *cmds_gpu);
+
+static inline bool adreno_profile_enabled(struct adreno_profile *profile)
+{
+ return profile->enabled;
+}
+
+static inline bool adreno_profile_has_assignments(
+ struct adreno_profile *profile)
+{
+ return list_empty(&profile->assignments_list) ? false : true;
+}
+
+static inline bool adreno_profile_assignments_ready(
+ struct adreno_profile *profile)
+{
+ return adreno_profile_enabled(profile) &&
+ adreno_profile_has_assignments(profile);
+}
+
+#endif
diff --git a/drivers/gpu/msm/adreno_ringbuffer.c b/drivers/gpu/msm/adreno_ringbuffer.c
index e03f708..188ef62 100644
--- a/drivers/gpu/msm/adreno_ringbuffer.c
+++ b/drivers/gpu/msm/adreno_ringbuffer.c
@@ -584,6 +584,19 @@
unsigned int context_id = KGSL_MEMSTORE_GLOBAL;
unsigned int gpuaddr = rb->device->memstore.gpuaddr;
unsigned int timestamp;
+ bool profile_ready;
+
+ /*
+ * If in stream ib profiling is enabled and there are counters
+ * assigned, then space needs to be reserved for profiling. This
+ * space in the ringbuffer is always consumed (might be filled with
+ * NOPs in error case. profile_ready needs to be consistent through
+ * the _addcmds call since it is allocating additional ringbuffer
+ * command space.
+ */
+ profile_ready = !adreno_is_a2xx(adreno_dev) &&
+ adreno_profile_assignments_ready(&adreno_dev->profile) &&
+ !(flags & KGSL_CMD_FLAGS_INTERNAL_ISSUE);
/*
* if the context was not created with per context timestamp
@@ -632,6 +645,9 @@
if (flags & KGSL_CMD_FLAGS_EOF)
total_sizedwords += 2;
+ if (profile_ready)
+ total_sizedwords += 6; /* space for pre_ib and post_ib */
+
ringcmds = adreno_ringbuffer_allocspace(rb, context, total_sizedwords);
if (!ringcmds)
return -ENOSPC;
@@ -648,6 +664,11 @@
KGSL_CMD_INTERNAL_IDENTIFIER);
}
+ /* Add any IB required for profiling if it is enabled */
+ if (profile_ready)
+ adreno_profile_preib_processing(rb->device, context->base.id,
+ &flags, &ringcmds, &rcmd_gpu);
+
/* always increment the global timestamp. once. */
rb->global_ts++;
@@ -714,6 +735,12 @@
GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu, 0x00);
}
+ /* Add any postIB required for profiling if it is enabled and has
+ assigned counters */
+ if (profile_ready)
+ adreno_profile_postib_processing(rb->device, &flags,
+ &ringcmds, &rcmd_gpu);
+
/*
* end-of-pipeline timestamp. If per context timestamps is not
* enabled, then context_id will be KGSL_MEMSTORE_GLOBAL so all
@@ -1056,6 +1083,9 @@
goto done;
}
+ /* process any profiling results that are available into the log_buf */
+ adreno_profile_process_results(device);
+
/*When preamble is enabled, the preamble buffer with state restoration
commands are stored in the first node of the IB chain. We can skip that
if a context switch hasn't occured */
diff --git a/drivers/gpu/msm/kgsl.c b/drivers/gpu/msm/kgsl.c
index a066004..fdd19e9 100644
--- a/drivers/gpu/msm/kgsl.c
+++ b/drivers/gpu/msm/kgsl.c
@@ -499,7 +499,8 @@
context->device = dev_priv->device;
context->pagetable = dev_priv->process_priv->pagetable;
context->dev_priv = dev_priv;
- context->pid = dev_priv->process_priv->pid;
+ context->pid = task_tgid_nr(current);
+ context->tid = task_pid_nr(current);
ret = kgsl_sync_timeline_create(context);
if (ret)
@@ -812,7 +813,7 @@
list_del(&private->list);
mutex_unlock(&kgsl_driver.process_mutex);
- if (private->kobj.ktype)
+ if (private->kobj.state_in_sysfs)
kgsl_process_uninit_sysfs(private);
if (private->debug_root)
debugfs_remove_recursive(private->debug_root);
@@ -926,21 +927,23 @@
pt_name = task_tgid_nr(current);
private->pagetable = kgsl_mmu_getpagetable(mmu, pt_name);
- if (private->pagetable == NULL) {
- mutex_unlock(&private->process_private_mutex);
- kgsl_put_process_private(cur_dev_priv->device,
- private);
- return NULL;
- }
+ if (private->pagetable == NULL)
+ goto error;
}
- kgsl_process_init_sysfs(private);
- kgsl_process_init_debugfs(private);
+ if (kgsl_process_init_sysfs(cur_dev_priv->device, private))
+ goto error;
+ if (kgsl_process_init_debugfs(private))
+ goto error;
done:
mutex_unlock(&private->process_private_mutex);
-
return private;
+
+error:
+ mutex_unlock(&private->process_private_mutex);
+ kgsl_put_process_private(cur_dev_priv->device, private);
+ return NULL;
}
int kgsl_close_device(struct kgsl_device *device)
diff --git a/drivers/gpu/msm/kgsl_debugfs.c b/drivers/gpu/msm/kgsl_debugfs.c
index 2a77632..9ab8d22 100644
--- a/drivers/gpu/msm/kgsl_debugfs.c
+++ b/drivers/gpu/msm/kgsl_debugfs.c
@@ -323,16 +323,53 @@
.release = single_release,
};
-void
+
+/**
+ * kgsl_process_init_debugfs() - Initialize debugfs for a process
+ * @private: Pointer to process private structure created for the process
+ *
+ * @returns: 0 on success, error code otherwise
+ *
+ * kgsl_process_init_debugfs() is called at the time of creating the
+ * process struct when a process opens kgsl device for the first time.
+ * The function creates the debugfs files for the process. If debugfs is
+ * disabled in the kernel, we ignore that error and return as successful.
+ */
+int
kgsl_process_init_debugfs(struct kgsl_process_private *private)
{
unsigned char name[16];
+ int ret = 0;
+ struct dentry *dentry;
snprintf(name, sizeof(name), "%d", private->pid);
private->debug_root = debugfs_create_dir(name, proc_d_debugfs);
- debugfs_create_file("mem", 0400, private->debug_root, private,
+
+ if (!private->debug_root)
+ return -EINVAL;
+
+ /*
+ * debugfs_create_dir() and debugfs_create_file() both
+ * return -ENODEV if debugfs is disabled in the kernel.
+ * We make a distinction between these two functions
+ * failing and debugfs being disabled in the kernel.
+ * In the first case, we abort process private struct
+ * creation, in the second we continue without any changes.
+ * So if debugfs is disabled in kernel, return as
+ * success.
+ */
+ dentry = debugfs_create_file("mem", 0400, private->debug_root, private,
&process_mem_fops);
+
+ if (IS_ERR(dentry)) {
+ ret = PTR_ERR(dentry);
+
+ if (ret == -ENODEV)
+ ret = 0;
+ }
+
+ return ret;
}
void kgsl_core_debugfs_init(void)
diff --git a/drivers/gpu/msm/kgsl_debugfs.h b/drivers/gpu/msm/kgsl_debugfs.h
index ae5601f..b2f137c 100644
--- a/drivers/gpu/msm/kgsl_debugfs.h
+++ b/drivers/gpu/msm/kgsl_debugfs.h
@@ -21,7 +21,7 @@
void kgsl_core_debugfs_init(void);
void kgsl_core_debugfs_close(void);
-void kgsl_device_debugfs_init(struct kgsl_device *device);
+int kgsl_device_debugfs_init(struct kgsl_device *device);
extern struct dentry *kgsl_debugfs_dir;
static inline struct dentry *kgsl_get_debugfs_dir(void)
diff --git a/drivers/gpu/msm/kgsl_device.h b/drivers/gpu/msm/kgsl_device.h
index eb92c82..09a31c9 100644
--- a/drivers/gpu/msm/kgsl_device.h
+++ b/drivers/gpu/msm/kgsl_device.h
@@ -293,6 +293,7 @@
* @events: list head of pending events for this context
* @events_list: list node for the list of all contexts that have pending events
* @pid: process that owns this context.
+ * @tid: task that created this context.
* @pagefault: flag set if this context caused a pagefault.
* @pagefault_ts: global timestamp of the pagefault, if KGSL_CONTEXT_PAGEFAULT
* is set.
@@ -301,6 +302,7 @@
struct kref refcount;
uint32_t id;
pid_t pid;
+ pid_t tid;
struct kgsl_device_private *dev_priv;
unsigned long priv;
struct kgsl_device *device;
diff --git a/drivers/gpu/msm/kgsl_sharedmem.c b/drivers/gpu/msm/kgsl_sharedmem.c
index c56d00d..5950451 100644
--- a/drivers/gpu/msm/kgsl_sharedmem.c
+++ b/drivers/gpu/msm/kgsl_sharedmem.c
@@ -170,17 +170,32 @@
kobject_put(&private->kobj);
}
-void
-kgsl_process_init_sysfs(struct kgsl_process_private *private)
+/**
+ * kgsl_process_init_sysfs() - Initialize and create sysfs files for a process
+ *
+ * @device: Pointer to kgsl device struct
+ * @private: Pointer to the structure for the process
+ *
+ * @returns: 0 on success, error code otherwise
+ *
+ * kgsl_process_init_sysfs() is called at the time of creating the
+ * process struct when a process opens the kgsl device for the first time.
+ * This function creates the sysfs files for the process.
+ */
+int
+kgsl_process_init_sysfs(struct kgsl_device *device,
+ struct kgsl_process_private *private)
{
unsigned char name[16];
- int i, ret;
+ int i, ret = 0;
snprintf(name, sizeof(name), "%d", private->pid);
- if (kobject_init_and_add(&private->kobj, &ktype_mem_entry,
- kgsl_driver.prockobj, name))
- return;
+ ret = kobject_init_and_add(&private->kobj, &ktype_mem_entry,
+ kgsl_driver.prockobj, name);
+
+ if (ret)
+ return ret;
for (i = 0; i < ARRAY_SIZE(mem_stats); i++) {
/* We need to check the value of sysfs_create_file, but we
@@ -191,6 +206,7 @@
ret = sysfs_create_file(&private->kobj,
&mem_stats[i].max_attr.attr);
}
+ return ret;
}
static int kgsl_drv_memstat_show(struct device *dev,
diff --git a/drivers/gpu/msm/kgsl_sharedmem.h b/drivers/gpu/msm/kgsl_sharedmem.h
index 9f84690..3986c61 100644
--- a/drivers/gpu/msm/kgsl_sharedmem.h
+++ b/drivers/gpu/msm/kgsl_sharedmem.h
@@ -67,7 +67,8 @@
void kgsl_cache_range_op(struct kgsl_memdesc *memdesc, int op);
-void kgsl_process_init_sysfs(struct kgsl_process_private *private);
+int kgsl_process_init_sysfs(struct kgsl_device *device,
+ struct kgsl_process_private *private);
void kgsl_process_uninit_sysfs(struct kgsl_process_private *private);
int kgsl_sharedmem_init_sysfs(void);
diff --git a/drivers/gpu/msm/kgsl_sync.c b/drivers/gpu/msm/kgsl_sync.c
index 5379670..b7d7235 100644
--- a/drivers/gpu/msm/kgsl_sync.c
+++ b/drivers/gpu/msm/kgsl_sync.c
@@ -220,6 +220,15 @@
snprintf(str, size, "%u", kpt->timestamp);
}
+static void kgsl_sync_timeline_release_obj(struct sync_timeline *sync_timeline)
+{
+ /*
+ * Make sure to free the timeline only after destroy flag is set.
+ * This is to avoid further accessing to the timeline from KGSL and
+ * also to catch any unbalanced kref of timeline.
+ */
+ BUG_ON(sync_timeline && (sync_timeline->destroyed != true));
+}
static const struct sync_timeline_ops kgsl_sync_timeline_ops = {
.driver_name = "kgsl-timeline",
.dup = kgsl_sync_pt_dup,
@@ -227,6 +236,7 @@
.compare = kgsl_sync_pt_compare,
.timeline_value_str = kgsl_sync_timeline_value_str,
.pt_value_str = kgsl_sync_pt_value_str,
+ .release_obj = kgsl_sync_timeline_release_obj,
};
int kgsl_sync_timeline_create(struct kgsl_context *context)
diff --git a/drivers/hwmon/qpnp-adc-current.c b/drivers/hwmon/qpnp-adc-current.c
index f0793b2..a453159 100644
--- a/drivers/hwmon/qpnp-adc-current.c
+++ b/drivers/hwmon/qpnp-adc-current.c
@@ -146,9 +146,10 @@
bool iadc_mode_sel;
struct qpnp_iadc_comp iadc_comp;
struct sensor_device_attribute sens_attr[0];
+ bool skip_auto_calibrations;
};
-struct qpnp_iadc_drv *qpnp_iadc;
+static struct qpnp_iadc_drv *qpnp_iadc;
static int32_t qpnp_iadc_read_reg(uint32_t reg, u8 *data)
{
@@ -635,13 +636,15 @@
struct qpnp_iadc_drv *iadc = qpnp_iadc;
int rc = 0;
- rc = qpnp_iadc_calibrate_for_trim(true);
- if (rc)
- pr_debug("periodic IADC calibration failed\n");
- else
- schedule_delayed_work(&iadc->iadc_work,
- round_jiffies_relative(msecs_to_jiffies
- (QPNP_IADC_CALIB_SECONDS)));
+ if (!iadc->skip_auto_calibrations) {
+ rc = qpnp_iadc_calibrate_for_trim(true);
+ if (rc)
+ pr_debug("periodic IADC calibration failed\n");
+ }
+
+ schedule_delayed_work(&iadc->iadc_work,
+ round_jiffies_relative(msecs_to_jiffies
+ (QPNP_IADC_CALIB_SECONDS)));
return;
}
@@ -731,9 +734,11 @@
if (die_temp_offset > QPNP_IADC_DIE_TEMP_CALIB_OFFSET) {
iadc->die_temp = result_pmic_therm.physical;
- rc = qpnp_iadc_calibrate_for_trim(true);
- if (rc)
- pr_err("periodic IADC calibration failed\n");
+ if (!iadc->skip_auto_calibrations) {
+ rc = qpnp_iadc_calibrate_for_trim(true);
+ if (rc)
+ pr_err("IADC calibration failed rc = %d\n", rc);
+ }
}
return rc;
@@ -833,6 +838,30 @@
}
EXPORT_SYMBOL(qpnp_iadc_get_gain_and_offset);
+int qpnp_iadc_skip_calibration(void)
+{
+ struct qpnp_iadc_drv *iadc = qpnp_iadc;
+
+ if (!iadc || !iadc->iadc_initialized)
+ return -EPROBE_DEFER;
+
+ iadc->skip_auto_calibrations = true;
+ return 0;
+}
+EXPORT_SYMBOL(qpnp_iadc_skip_calibration);
+
+int qpnp_iadc_resume_calibration(void)
+{
+ struct qpnp_iadc_drv *iadc = qpnp_iadc;
+
+ if (!iadc || !iadc->iadc_initialized)
+ return -EPROBE_DEFER;
+
+ iadc->skip_auto_calibrations = false;
+ return 0;
+}
+EXPORT_SYMBOL(qpnp_iadc_resume_calibration);
+
int32_t qpnp_iadc_vadc_sync_read(
enum qpnp_iadc_channels i_channel, struct qpnp_iadc_result *i_result,
enum qpnp_vadc_channels v_channel, struct qpnp_vadc_result *v_result)
diff --git a/drivers/input/touchscreen/ft5x06_ts.c b/drivers/input/touchscreen/ft5x06_ts.c
index 8de6b1e..25228a6 100644
--- a/drivers/input/touchscreen/ft5x06_ts.c
+++ b/drivers/input/touchscreen/ft5x06_ts.c
@@ -490,6 +490,7 @@
{
struct ft5x06_ts_data *data = dev_get_drvdata(dev);
char txbuf[2], i;
+ int err;
if (data->loading_fw) {
dev_info(dev, "Firmware loading in process...\n");
@@ -517,20 +518,58 @@
ft5x06_i2c_write(data->client, txbuf, sizeof(txbuf));
}
+ if (data->pdata->power_on) {
+ err = data->pdata->power_on(false);
+ if (err) {
+ dev_err(dev, "power off failed");
+ goto pwr_off_fail;
+ }
+ } else {
+ err = ft5x06_power_on(data, false);
+ if (err) {
+ dev_err(dev, "power off failed");
+ goto pwr_off_fail;
+ }
+ }
+
data->suspended = true;
return 0;
+
+pwr_off_fail:
+ if (gpio_is_valid(data->pdata->reset_gpio)) {
+ gpio_set_value_cansleep(data->pdata->reset_gpio, 0);
+ msleep(FT_RESET_DLY);
+ gpio_set_value_cansleep(data->pdata->reset_gpio, 1);
+ }
+ enable_irq(data->client->irq);
+ return err;
}
static int ft5x06_ts_resume(struct device *dev)
{
struct ft5x06_ts_data *data = dev_get_drvdata(dev);
+ int err;
if (!data->suspended) {
dev_info(dev, "Already in awake state\n");
return 0;
}
+ if (data->pdata->power_on) {
+ err = data->pdata->power_on(true);
+ if (err) {
+ dev_err(dev, "power on failed");
+ return err;
+ }
+ } else {
+ err = ft5x06_power_on(data, true);
+ if (err) {
+ dev_err(dev, "power on failed");
+ return err;
+ }
+ }
+
if (gpio_is_valid(data->pdata->reset_gpio)) {
gpio_set_value_cansleep(data->pdata->reset_gpio, 0);
msleep(FT_RESET_DLY);
diff --git a/drivers/media/platform/msm/wfd/vsg-subdev.c b/drivers/media/platform/msm/wfd/vsg-subdev.c
index 6ffaffa..0f2fbbb 100644
--- a/drivers/media/platform/msm/wfd/vsg-subdev.c
+++ b/drivers/media/platform/msm/wfd/vsg-subdev.c
@@ -424,7 +424,8 @@
struct timespec diff = timespec_sub(buf_info->time,
context->last_buffer->time);
struct timespec temp = ns_to_timespec(
- context->frame_interval);
+ context->frame_interval -
+ context->frame_interval_variance);
if (timespec_compare(&diff, &temp) >= 0)
push = true;
@@ -633,6 +634,61 @@
return 0;
}
+static long vsg_set_frame_interval_variance(struct v4l2_subdev *sd, void *arg)
+{
+ struct vsg_context *context = NULL;
+ int64_t variance;
+
+ if (!arg || !sd) {
+ WFD_MSG_ERR("ERROR, invalid arguments into %s\n", __func__);
+ return -EINVAL;
+ }
+
+ context = (struct vsg_context *)sd->dev_priv;
+ variance = *(int64_t *)arg;
+
+ if (variance < 0 || variance > 100) {
+ WFD_MSG_ERR("ERROR, invalid variance %lld%% into %s\n",
+ variance, __func__);
+ return -EINVAL;
+ } else if (context->mode == VSG_MODE_CFR) {
+ WFD_MSG_ERR("Setting FPS variance not supported in CFR mode\n");
+ return -ENOTSUPP;
+ }
+
+ mutex_lock(&context->mutex);
+
+ /* Convert from percentage to a value in nano seconds */
+ variance *= context->frame_interval;
+ do_div(variance, 100);
+
+ context->frame_interval_variance = variance;
+ mutex_unlock(&context->mutex);
+
+ return 0;
+}
+
+static long vsg_get_frame_interval_variance(struct v4l2_subdev *sd, void *arg)
+{
+ struct vsg_context *context = NULL;
+ int64_t variance;
+
+ if (!arg || !sd) {
+ WFD_MSG_ERR("ERROR, invalid arguments into %s\n", __func__);
+ return -EINVAL;
+ }
+
+ context = (struct vsg_context *)sd->dev_priv;
+
+ mutex_lock(&context->mutex);
+ variance = context->frame_interval_variance * 100;
+ do_div(variance, context->frame_interval);
+ *(int64_t *)arg = variance;
+ mutex_unlock(&context->mutex);
+
+ return 0;
+}
+
static long vsg_set_mode(struct v4l2_subdev *sd, void *arg)
{
struct vsg_context *context = NULL;
@@ -702,6 +758,12 @@
case VSG_SET_FRAME_INTERVAL:
rc = vsg_set_frame_interval(sd, arg);
break;
+ case VSG_SET_FRAME_INTERVAL_VARIANCE:
+ rc = vsg_set_frame_interval_variance(sd, arg);
+ break;
+ case VSG_GET_FRAME_INTERVAL_VARIANCE:
+ rc = vsg_get_frame_interval_variance(sd, arg);
+ break;
case VSG_GET_MAX_FRAME_INTERVAL:
rc = vsg_get_max_frame_interval(sd, arg);
break;
diff --git a/drivers/media/platform/msm/wfd/vsg-subdev.h b/drivers/media/platform/msm/wfd/vsg-subdev.h
index f5e4f5d..3347e5b 100644
--- a/drivers/media/platform/msm/wfd/vsg-subdev.h
+++ b/drivers/media/platform/msm/wfd/vsg-subdev.h
@@ -59,7 +59,7 @@
struct vsg_buf_info free_queue, busy_queue;
struct vsg_msg_ops vmops;
/* All time related values below in nanosecs */
- int64_t frame_interval, max_frame_interval;
+ int64_t frame_interval, max_frame_interval, frame_interval_variance;
struct workqueue_struct *work_queue;
struct hrtimer threshold_timer;
struct mutex mutex;
@@ -90,9 +90,11 @@
/* Time related arguments for frame interval ioctls are always in nanosecs*/
#define VSG_SET_FRAME_INTERVAL _IOW(VSG_MAGIC_IOCTL, 9, int64_t *)
#define VSG_GET_FRAME_INTERVAL _IOR(VSG_MAGIC_IOCTL, 10, int64_t *)
-#define VSG_SET_MAX_FRAME_INTERVAL _IOW(VSG_MAGIC_IOCTL, 11, int64_t *)
-#define VSG_GET_MAX_FRAME_INTERVAL _IOR(VSG_MAGIC_IOCTL, 12, int64_t *)
-#define VSG_SET_MODE _IOW(VSG_MAGIC_IOCTL, 13, enum vsg_modes *)
+#define VSG_SET_FRAME_INTERVAL_VARIANCE _IOW(VSG_MAGIC_IOCTL, 11, int64_t *)
+#define VSG_GET_FRAME_INTERVAL_VARIANCE _IOR(VSG_MAGIC_IOCTL, 12, int64_t *)
+#define VSG_SET_MAX_FRAME_INTERVAL _IOW(VSG_MAGIC_IOCTL, 13, int64_t *)
+#define VSG_GET_MAX_FRAME_INTERVAL _IOR(VSG_MAGIC_IOCTL, 14, int64_t *)
+#define VSG_SET_MODE _IOW(VSG_MAGIC_IOCTL, 15, enum vsg_modes *)
extern int vsg_init(struct v4l2_subdev *sd, u32 val);
extern long vsg_ioctl(struct v4l2_subdev *sd, unsigned int cmd, void *arg);
diff --git a/drivers/media/platform/msm/wfd/wfd-ioctl.c b/drivers/media/platform/msm/wfd/wfd-ioctl.c
index 6554947..58e008d 100644
--- a/drivers/media/platform/msm/wfd/wfd-ioctl.c
+++ b/drivers/media/platform/msm/wfd/wfd-ioctl.c
@@ -1124,7 +1124,9 @@
struct wfd_device *wfd_dev = video_drvdata(filp);
struct wfd_inst *inst = file_to_inst(filp);
struct v4l2_qcom_frameskip frameskip;
- int64_t frame_interval, max_frame_interval;
+ int64_t frame_interval = 0,
+ max_frame_interval = 0,
+ frame_interval_variance = 0;
void *extendedmode = NULL;
enum vsg_modes vsg_mode = VSG_MODE_VFR;
enum venc_framerate_modes venc_mode = VENC_MODE_VFR;
@@ -1177,6 +1179,7 @@
goto set_parm_fail;
max_frame_interval = (int64_t)frameskip.maxframeinterval;
+ frame_interval_variance = frameskip.fpsvariance;
vsg_mode = VSG_MODE_VFR;
venc_mode = VENC_MODE_VFR;
@@ -1206,6 +1209,16 @@
goto set_parm_fail;
}
+ if (frame_interval_variance) {
+ rc = v4l2_subdev_call(&wfd_dev->vsg_sdev, core,
+ ioctl, VSG_SET_FRAME_INTERVAL_VARIANCE,
+ &frame_interval_variance);
+ if (rc) {
+ WFD_MSG_ERR("Setting FR variance for VSG failed\n");
+ goto set_parm_fail;
+ }
+ }
+
set_parm_fail:
return rc;
}
diff --git a/drivers/media/radio/radio-iris.c b/drivers/media/radio/radio-iris.c
index 7934234..b9eb8f9 100644
--- a/drivers/media/radio/radio-iris.c
+++ b/drivers/media/radio/radio-iris.c
@@ -1635,8 +1635,9 @@
if (status)
return;
- if (radio->mode != FM_CALIB)
+ if ((radio->mode != FM_CALIB) && (radio->mode != FM_OFF))
iris_q_event(radio, IRIS_EVT_RADIO_DISABLED);
+ radio->mode = FM_OFF;
radio_hci_req_complete(hdev, status);
}
@@ -2694,7 +2695,7 @@
radio->fm_hdev);
if (retval < 0)
FMDERR("Disable Failed after calibration %d", retval);
- radio->mode = FM_OFF;
+ radio->mode = FM_TURNING_OFF;
return retval;
}
static int iris_vidioc_g_ctrl(struct file *file, void *priv,
@@ -3240,7 +3241,7 @@
" %d\n", retval);
return retval;
}
- radio->mode = FM_OFF;
+ radio->mode = FM_TURNING_OFF;
break;
case FM_TRANS:
retval = hci_cmd(HCI_FM_DISABLE_TRANS_CMD,
@@ -3251,7 +3252,7 @@
" %d\n", retval);
return retval;
}
- radio->mode = FM_OFF;
+ radio->mode = FM_TURNING_OFF;
break;
default:
retval = -EINVAL;
@@ -4042,16 +4043,18 @@
if (radio->mode == FM_OFF)
return 0;
- if (radio->mode == FM_RECV)
+ if (radio->mode == FM_RECV) {
+ radio->mode = FM_OFF;
retval = hci_cmd(HCI_FM_DISABLE_RECV_CMD,
radio->fm_hdev);
- else if (radio->mode == FM_TRANS)
+ } else if (radio->mode == FM_TRANS) {
+ radio->mode = FM_OFF;
retval = hci_cmd(HCI_FM_DISABLE_TRANS_CMD,
radio->fm_hdev);
+ }
if (retval < 0)
FMDERR("Err on disable FM %d\n", retval);
- radio->mode = FM_OFF;
return retval;
}
diff --git a/drivers/platform/msm/usb_bam.c b/drivers/platform/msm/usb_bam.c
index 7c73a82..408681c 100644
--- a/drivers/platform/msm/usb_bam.c
+++ b/drivers/platform/msm/usb_bam.c
@@ -1069,10 +1069,11 @@
info.connect_complete = 1;
spin_unlock(&usb_bam_ipa_handshake_info_lock);
- if (info.cur_cons_state[HSUSB_BAM] == IPA_RM_RESOURCE_GRANTED) {
- pr_debug("%s: Notify CONS_GRANTED\n", __func__);
+ if (info.cur_cons_state[cur_bam] == IPA_RM_RESOURCE_GRANTED) {
+ pr_debug("%s: Notify %s_CONS_GRANTED\n", __func__,
+ bam_enable_strings[cur_bam]);
ipa_rm_notify_completion(IPA_RM_RESOURCE_GRANTED,
- ipa_rm_resource_cons[HSUSB_BAM]);
+ ipa_rm_resource_cons[cur_bam]);
}
}
@@ -1505,7 +1506,7 @@
ctx.pipes_enabled_per_bam[cur_bam] += 1;
spin_unlock(&usb_bam_lock);
- if (ipa_params->dir == PEER_PERIPHERAL_TO_USB && cur_bam == HSUSB_BAM)
+ if (ipa_params->dir == PEER_PERIPHERAL_TO_USB)
notify_usb_connected(cur_bam);
if (cur_bam == HSUSB_BAM)
diff --git a/drivers/power/qpnp-bms.c b/drivers/power/qpnp-bms.c
index 86d923d..5e6ba9f 100644
--- a/drivers/power/qpnp-bms.c
+++ b/drivers/power/qpnp-bms.c
@@ -47,6 +47,7 @@
#define BMS1_S1_DELAY_CTL 0x5A
/* OCV interrupt threshold */
#define BMS1_OCV_THR0 0x50
+#define BMS1_S2_SAMP_AVG_CTL 0x61
/* SW CC interrupt threshold */
#define BMS1_SW_CC_THR0 0xA0
/* OCV for r registers */
@@ -71,6 +72,7 @@
#define CHARGE_CYCLE_STORAGE_LSB 0xBE /* LSB=0xBE, MSB=0xBF */
/* IADC Channel Select */
+#define IADC1_BMS_REVISION2 0x01
#define IADC1_BMS_ADC_CH_SEL_CTL 0x48
#define IADC1_BMS_ADC_INT_RSNSN_CTL 0x49
#define IADC1_BMS_FAST_AVG_EN 0x5B
@@ -152,6 +154,7 @@
int battery_present;
int battery_status;
+ bool batfet_closed;
bool new_battery;
bool done_charging;
bool last_soc_invalid;
@@ -176,6 +179,7 @@
struct delayed_work calculate_soc_delayed_work;
struct work_struct recalc_work;
+ struct work_struct batfet_open_work;
struct mutex bms_output_lock;
struct mutex last_ocv_uv_mutex;
@@ -745,6 +749,11 @@
return get_battery_status(chip) == POWER_SUPPLY_STATUS_CHARGING;
}
+static bool is_battery_full(struct qpnp_bms_chip *chip)
+{
+ return get_battery_status(chip) == POWER_SUPPLY_STATUS_FULL;
+}
+
static bool is_battery_present(struct qpnp_bms_chip *chip)
{
union power_supply_propval ret = {0,};
@@ -763,9 +772,22 @@
return false;
}
-static bool is_battery_full(struct qpnp_bms_chip *chip)
+static bool is_batfet_closed(struct qpnp_bms_chip *chip)
{
- return get_battery_status(chip) == POWER_SUPPLY_STATUS_FULL;
+ union power_supply_propval ret = {0,};
+
+ if (chip->batt_psy == NULL)
+ chip->batt_psy = power_supply_get_by_name("battery");
+ if (chip->batt_psy) {
+ /* if battery has been registered, use the online property */
+ chip->batt_psy->get_property(chip->batt_psy,
+ POWER_SUPPLY_PROP_ONLINE, &ret);
+ return !!ret.intval;
+ }
+
+ /* Default to true if the battery power supply is not registered. */
+ pr_debug("battery power supply is not registered\n");
+ return true;
}
static int get_simultaneous_batt_v_and_i(struct qpnp_bms_chip *chip,
@@ -2335,7 +2357,8 @@
if (chip->use_voltage_soc) {
soc = calculate_soc_from_voltage(chip);
} else {
- qpnp_iadc_calibrate_for_trim(true);
+ if (!chip->batfet_closed)
+ qpnp_iadc_calibrate_for_trim(true);
rc = qpnp_vadc_read(LR_MUX1_BATT_THERM, &result);
if (rc) {
pr_err("error reading vadc LR_MUX1_BATT_THERM = %d, rc = %d\n",
@@ -2909,9 +2932,55 @@
}
}
+#define MAX_CAL_TRIES 200
+#define MIN_CAL_UA 3000
+static void batfet_open_work(struct work_struct *work)
+{
+ int i;
+ int rc;
+ int result_ua;
+ u8 orig_delay, sample_delay;
+ struct qpnp_bms_chip *chip = container_of(work,
+ struct qpnp_bms_chip,
+ batfet_open_work);
+
+ rc = qpnp_read_wrapper(chip, &orig_delay,
+ chip->base + BMS1_S1_DELAY_CTL, 1);
+
+ sample_delay = 0x0;
+ rc = qpnp_write_wrapper(chip, &sample_delay,
+ chip->base + BMS1_S1_DELAY_CTL, 1);
+
+ /*
+ * In certain PMICs there is a coupling issue which causes
+ * bad calibration value that result in a huge battery current
+ * even when the BATFET is open. Do continious calibrations until
+ * we hit reasonable cal values which result in low battery current
+ */
+
+ for (i = 0; (!chip->batfet_closed) && i < MAX_CAL_TRIES; i++) {
+ rc = qpnp_iadc_calibrate_for_trim(false);
+ /*
+ * Wait 20mS after calibration and before reading battery
+ * current. The BMS h/w uses calibration values in the
+ * next sampling of vsense.
+ */
+ msleep(20);
+ rc |= get_battery_current(chip, &result_ua);
+ if (rc == 0 && abs(result_ua) <= MIN_CAL_UA) {
+ pr_debug("good cal at %d attempt\n", i);
+ break;
+ }
+ }
+ pr_debug("batfet_closed = %d i = %d result_ua = %d\n",
+ chip->batfet_closed, i, result_ua);
+
+ rc = qpnp_write_wrapper(chip, &orig_delay,
+ chip->base + BMS1_S1_DELAY_CTL, 1);
+}
+
static void charging_began(struct qpnp_bms_chip *chip)
{
-
mutex_lock(&chip->last_soc_mutex);
chip->charge_start_tm_sec = 0;
chip->catch_up_time_sec = 0;
@@ -3002,6 +3071,27 @@
}
#define CALIB_WRKARND_DIG_MAJOR_MAX 0x03
+static void batfet_status_check(struct qpnp_bms_chip *chip)
+{
+ bool batfet_closed;
+
+ if (chip->iadc_bms_revision2 > CALIB_WRKARND_DIG_MAJOR_MAX)
+ return;
+
+ batfet_closed = is_batfet_closed(chip);
+ if (chip->batfet_closed != batfet_closed) {
+ chip->batfet_closed = batfet_closed;
+ if (batfet_closed == false) {
+ /* batfet opened */
+ schedule_work(&chip->batfet_open_work);
+ qpnp_iadc_skip_calibration();
+ } else {
+ /* batfet closed */
+ qpnp_iadc_calibrate_for_trim(true);
+ qpnp_iadc_resume_calibration();
+ }
+ }
+}
static void battery_insertion_check(struct qpnp_bms_chip *chip)
{
@@ -3037,6 +3127,7 @@
bms_psy);
battery_insertion_check(chip);
+ batfet_status_check(chip);
battery_status_check(chip);
}
@@ -3796,6 +3887,7 @@
INIT_DELAYED_WORK(&chip->calculate_soc_delayed_work,
calculate_soc_work);
INIT_WORK(&chip->recalc_work, recalculate_work);
+ INIT_WORK(&chip->batfet_open_work, batfet_open_work);
read_shutdown_soc_and_iavg(chip);
@@ -3833,6 +3925,7 @@
}
battery_insertion_check(chip);
+ batfet_status_check(chip);
battery_status_check(chip);
calculate_soc_work(&(chip->calculate_soc_delayed_work.work));
diff --git a/drivers/power/qpnp-charger.c b/drivers/power/qpnp-charger.c
index 88e00ba..e93d085 100644
--- a/drivers/power/qpnp-charger.c
+++ b/drivers/power/qpnp-charger.c
@@ -275,6 +275,7 @@
struct qpnp_chg_irq chg_vbatdet_lo;
struct qpnp_chg_irq batt_pres;
struct qpnp_chg_irq vchg_loop;
+ struct qpnp_chg_irq batt_temp_ok;
bool bat_is_cool;
bool bat_is_warm;
bool chg_done;
@@ -503,6 +504,23 @@
}
static int
+qpnp_chg_is_batt_temp_ok(struct qpnp_chg_chip *chip)
+{
+ u8 batt_rt_sts;
+ int rc;
+
+ rc = qpnp_chg_read(chip, &batt_rt_sts,
+ INT_RT_STS(chip->bat_if_base), 1);
+ if (rc) {
+ pr_err("spmi read failed: addr=%03X, rc=%d\n",
+ INT_RT_STS(chip->bat_if_base), rc);
+ return rc;
+ }
+
+ return (batt_rt_sts & BAT_TEMP_OK_IRQ) ? 1 : 0;
+}
+
+static int
qpnp_chg_is_batt_present(struct qpnp_chg_chip *chip)
{
u8 batt_pres_rt_sts;
@@ -519,6 +537,23 @@
return (batt_pres_rt_sts & BATT_PRES_IRQ) ? 1 : 0;
}
+static int
+qpnp_chg_is_batfet_closed(struct qpnp_chg_chip *chip)
+{
+ u8 batfet_closed_rt_sts;
+ int rc;
+
+ rc = qpnp_chg_read(chip, &batfet_closed_rt_sts,
+ INT_RT_STS(chip->bat_if_base), 1);
+ if (rc) {
+ pr_err("spmi read failed: addr=%03X, rc=%d\n",
+ INT_RT_STS(chip->bat_if_base), rc);
+ return rc;
+ }
+
+ return (batfet_closed_rt_sts & BAT_FET_ON_IRQ) ? 1 : 0;
+}
+
#define USB_VALID_BIT BIT(7)
static int
qpnp_chg_is_usb_chg_plugged_in(struct qpnp_chg_chip *chip)
@@ -980,6 +1015,19 @@
}
static irqreturn_t
+qpnp_chg_bat_if_batt_temp_irq_handler(int irq, void *_chip)
+{
+ struct qpnp_chg_chip *chip = _chip;
+ int batt_temp_good;
+
+ batt_temp_good = qpnp_chg_is_batt_temp_ok(chip);
+ pr_debug("batt-temp triggered: %d\n", batt_temp_good);
+
+ power_supply_changed(&chip->batt_psy);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t
qpnp_chg_bat_if_batt_pres_irq_handler(int irq, void *_chip)
{
struct qpnp_chg_chip *chip = _chip;
@@ -1217,6 +1265,7 @@
POWER_SUPPLY_PROP_CHARGE_TYPE,
POWER_SUPPLY_PROP_HEALTH,
POWER_SUPPLY_PROP_PRESENT,
+ POWER_SUPPLY_PROP_ONLINE,
POWER_SUPPLY_PROP_TECHNOLOGY,
POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN,
POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN,
@@ -1512,6 +1561,11 @@
return (buck_sts & VCHG_LOOP_IRQ) ? 1 : 0;
}
+static int get_prop_online(struct qpnp_chg_chip *chip)
+{
+ return qpnp_chg_is_batfet_closed(chip);
+}
+
static void
qpnp_batt_external_power_changed(struct power_supply *psy)
{
@@ -1621,6 +1675,9 @@
case POWER_SUPPLY_PROP_VOLTAGE_MIN:
val->intval = qpnp_chg_vinmin_get(chip) * 1000;
break;
+ case POWER_SUPPLY_PROP_ONLINE:
+ val->intval = get_prop_online(chip);
+ break;
default:
return -EINVAL;
}
@@ -2291,6 +2348,8 @@
if (qpnp_adc_tm_channel_measure(&chip->adc_param))
pr_err("request ADC error\n");
+
+ power_supply_changed(&chip->batt_psy);
}
static int
@@ -2494,6 +2553,25 @@
}
enable_irq_wake(chip->batt_pres.irq);
+
+ chip->batt_temp_ok.irq = spmi_get_irq_byname(spmi,
+ spmi_resource, "bat-temp-ok");
+ if (chip->batt_temp_ok.irq < 0) {
+ pr_err("Unable to get bat-temp-ok irq\n");
+ return rc;
+ }
+ rc = devm_request_irq(chip->dev, chip->batt_temp_ok.irq,
+ qpnp_chg_bat_if_batt_temp_irq_handler,
+ IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
+ "bat-temp-ok", chip);
+ if (rc < 0) {
+ pr_err("Can't request %d bat-temp-ok irq: %d\n",
+ chip->batt_temp_ok.irq, rc);
+ return rc;
+ }
+
+ enable_irq_wake(chip->batt_temp_ok.irq);
+
break;
case SMBB_BUCK_SUBTYPE:
case SMBBP_BUCK_SUBTYPE:
diff --git a/drivers/spi/spi_qsd.c b/drivers/spi/spi_qsd.c
index 863339b..b389187 100644
--- a/drivers/spi/spi_qsd.c
+++ b/drivers/spi/spi_qsd.c
@@ -2719,7 +2719,7 @@
pdata->use_bam = false;
}
- if (pdata->bam_producer_pipe_index) {
+ if (!pdata->bam_producer_pipe_index) {
dev_warn(&pdev->dev,
"missing qcom,bam-producer-pipe-index entry in device-tree\n");
pdata->use_bam = false;
diff --git a/drivers/usb/otg/msm_otg.c b/drivers/usb/otg/msm_otg.c
index de7fc02..06e3a1b 100644
--- a/drivers/usb/otg/msm_otg.c
+++ b/drivers/usb/otg/msm_otg.c
@@ -863,7 +863,8 @@
struct msm_otg_platform_data *pdata = motg->pdata;
/* Check if target allows min_vote to be same as no_vote */
- if (vote >= pdata->bus_scale_table->num_usecases)
+ if (pdata->bus_scale_table &&
+ vote >= pdata->bus_scale_table->num_usecases)
vote = USB_NO_PERF_VOTE;
if (motg->bus_perf_client) {
diff --git a/drivers/video/msm/mdss/mdss_dsi.h b/drivers/video/msm/mdss/mdss_dsi.h
index 2603648..a8c34f3 100644
--- a/drivers/video/msm/mdss/mdss_dsi.h
+++ b/drivers/video/msm/mdss/mdss_dsi.h
@@ -394,9 +394,7 @@
void mdss_dsi_cmd_mdp_start(struct mdss_dsi_ctrl_pdata *ctrl);
void mdss_dsi_cmd_bta_sw_trigger(struct mdss_panel_data *pdata);
void mdss_dsi_ack_err_status(unsigned char *dsi_base);
-void mdss_dsi_clk_enable(struct mdss_dsi_ctrl_pdata *ctrl);
-void mdss_dsi_clk_disable(struct mdss_dsi_ctrl_pdata *ctrl);
-void mdss_dsi_clk_ctrl(struct mdss_dsi_ctrl_pdata *ctrl, int enable);
+int mdss_dsi_clk_ctrl(struct mdss_dsi_ctrl_pdata *ctrl, int enable);
void mdss_dsi_clk_req(struct mdss_dsi_ctrl_pdata *ctrl,
int enable);
void mdss_dsi_controller_cfg(int enable,
@@ -412,8 +410,6 @@
int mdss_dsi_clk_init(struct platform_device *pdev,
struct mdss_dsi_ctrl_pdata *ctrl_pdata);
void mdss_dsi_clk_deinit(struct mdss_dsi_ctrl_pdata *ctrl_pdata);
-void mdss_dsi_prepare_clocks(struct mdss_dsi_ctrl_pdata *ctrl_pdata);
-void mdss_dsi_unprepare_clocks(struct mdss_dsi_ctrl_pdata *ctrl_pdata);
int mdss_dsi_enable_bus_clocks(struct mdss_dsi_ctrl_pdata *ctrl_pdata);
void mdss_dsi_disable_bus_clocks(struct mdss_dsi_ctrl_pdata *ctrl_pdata);
void mdss_dsi_panel_reset(struct mdss_panel_data *pdata, int enable);
diff --git a/drivers/video/msm/mdss/mdss_dsi_host.c b/drivers/video/msm/mdss/mdss_dsi_host.c
index 5f5084d..055f233 100644
--- a/drivers/video/msm/mdss/mdss_dsi_host.c
+++ b/drivers/video/msm/mdss/mdss_dsi_host.c
@@ -80,34 +80,6 @@
mdss_dsi_buf_alloc(&ctrl->rx_buf, SZ_4K);
}
-/*
- * acquire ctrl->mutex first
- */
-void mdss_dsi_clk_ctrl(struct mdss_dsi_ctrl_pdata *ctrl, int enable)
-{
- mutex_lock(&ctrl->mutex);
- if (enable) {
- if (ctrl->clk_cnt == 0) {
- mdss_dsi_enable_bus_clocks(ctrl);
- mdss_dsi_prepare_clocks(ctrl);
- mdss_dsi_clk_enable(ctrl);
- }
- ctrl->clk_cnt++;
- } else {
- if (ctrl->clk_cnt) {
- ctrl->clk_cnt--;
- if (ctrl->clk_cnt == 0) {
- mdss_dsi_clk_disable(ctrl);
- mdss_dsi_unprepare_clocks(ctrl);
- mdss_dsi_disable_bus_clocks(ctrl);
- }
- }
- }
- pr_debug("%s: ctrl ndx=%d enabled=%d clk_cnt=%d\n",
- __func__, ctrl->ndx, enable, ctrl->clk_cnt);
- mutex_unlock(&ctrl->mutex);
-}
-
void mdss_dsi_clk_req(struct mdss_dsi_ctrl_pdata *ctrl, int enable)
{
if (enable == 0) {
diff --git a/drivers/video/msm/mdss/mhl_msc.c b/drivers/video/msm/mdss/mhl_msc.c
index 15811bb..d0f93cf 100644
--- a/drivers/video/msm/mdss/mhl_msc.c
+++ b/drivers/video/msm/mdss/mhl_msc.c
@@ -224,12 +224,16 @@
case MHL_WRITE_STAT:
if (req->offset == MHL_STATUS_REG_LINK_MODE) {
if (req->payload.data[0]
- & MHL_STATUS_PATH_ENABLED)
+ & MHL_STATUS_PATH_ENABLED) {
/* Enable TMDS output */
mhl_tmds_ctrl(mhl_ctrl, TMDS_ENABLE);
- else
+ if (mhl_ctrl->devcap_state == MHL_DEVCAP_ALL)
+ mhl_drive_hpd(mhl_ctrl, HPD_UP);
+ } else {
/* Disable TMDS output */
mhl_tmds_ctrl(mhl_ctrl, TMDS_DISABLE);
+ mhl_drive_hpd(mhl_ctrl, HPD_DOWN);
+ }
}
break;
case MHL_READ_DEVCAP:
@@ -245,8 +249,9 @@
pr_debug("%s: devcap pow bit unset\n",
__func__);
break;
- case DEVCAP_OFFSET_MHL_VERSION:
- case DEVCAP_OFFSET_INT_STAT_SIZE:
+ case DEVCAP_OFFSET_RESERVED:
+ mhl_tmds_ctrl(mhl_ctrl, TMDS_ENABLE);
+ mhl_drive_hpd(mhl_ctrl, HPD_UP);
break;
}
break;
diff --git a/drivers/video/msm/mdss/mhl_sii8334.c b/drivers/video/msm/mdss/mhl_sii8334.c
index 82b56e3..add15a4 100644
--- a/drivers/video/msm/mdss/mhl_sii8334.c
+++ b/drivers/video/msm/mdss/mhl_sii8334.c
@@ -794,12 +794,10 @@
void mhl_tmds_ctrl(struct mhl_tx_ctrl *mhl_ctrl, uint8_t on)
{
struct i2c_client *client = mhl_ctrl->i2c_handle;
- if (on) {
+ if (on)
MHL_SII_REG_NAME_MOD(REG_TMDS_CCTRL, BIT4, BIT4);
- mhl_drive_hpd(mhl_ctrl, HPD_UP);
- } else {
+ else
MHL_SII_REG_NAME_MOD(REG_TMDS_CCTRL, BIT4, 0x00);
- }
}
void mhl_drive_hpd(struct mhl_tx_ctrl *mhl_ctrl, uint8_t to_state)
diff --git a/drivers/video/msm/mdss/msm_mdss_io_8974.c b/drivers/video/msm/mdss/msm_mdss_io_8974.c
index c24f643..7b89eff 100644
--- a/drivers/video/msm/mdss/msm_mdss_io_8974.c
+++ b/drivers/video/msm/mdss/msm_mdss_io_8974.c
@@ -209,61 +209,136 @@
clk_disable_unprepare(ctrl_pdata->ahb_clk);
}
-void mdss_dsi_prepare_clocks(struct mdss_dsi_ctrl_pdata *ctrl_pdata)
+static int mdss_dsi_clk_prepare(struct mdss_dsi_ctrl_pdata *ctrl_pdata)
{
- clk_prepare(ctrl_pdata->byte_clk);
- clk_prepare(ctrl_pdata->esc_clk);
- clk_prepare(ctrl_pdata->pixel_clk);
-}
+ int rc = 0;
-void mdss_dsi_unprepare_clocks(struct mdss_dsi_ctrl_pdata *ctrl_pdata)
-{
- clk_unprepare(ctrl_pdata->esc_clk);
- clk_unprepare(ctrl_pdata->pixel_clk);
+ rc = clk_prepare(ctrl_pdata->esc_clk);
+ if (rc) {
+ pr_err("%s: Failed to prepare dsi esc clk\n", __func__);
+ goto esc_clk_err;
+ }
+
+ rc = clk_prepare(ctrl_pdata->byte_clk);
+ if (rc) {
+ pr_err("%s: Failed to prepare dsi byte clk\n", __func__);
+ goto byte_clk_err;
+ }
+
+ rc = clk_prepare(ctrl_pdata->pixel_clk);
+ if (rc) {
+ pr_err("%s: Failed to prepare dsi pixel clk\n", __func__);
+ goto pixel_clk_err;
+ }
+
+ return rc;
+
+pixel_clk_err:
clk_unprepare(ctrl_pdata->byte_clk);
+byte_clk_err:
+ clk_unprepare(ctrl_pdata->esc_clk);
+esc_clk_err:
+ return rc;
}
-void mdss_dsi_clk_enable(struct mdss_dsi_ctrl_pdata *ctrl_pdata)
+static void mdss_dsi_clk_unprepare(struct mdss_dsi_ctrl_pdata *ctrl_pdata)
{
- u32 esc_clk_rate = 19200000;
-
if (!ctrl_pdata) {
pr_err("%s: Invalid input data\n", __func__);
return;
}
- if (ctrl_pdata->mdss_dsi_clk_on) {
- pr_info("%s: mdss_dsi_clks already ON\n", __func__);
- return;
+ clk_unprepare(ctrl_pdata->pixel_clk);
+ clk_unprepare(ctrl_pdata->byte_clk);
+ clk_unprepare(ctrl_pdata->esc_clk);
+}
+
+static int mdss_dsi_clk_set_rate(struct mdss_dsi_ctrl_pdata *ctrl_pdata)
+{
+ u32 esc_clk_rate = 19200000;
+ int rc = 0;
+
+ if (!ctrl_pdata) {
+ pr_err("%s: Invalid input data\n", __func__);
+ return -EINVAL;
}
if (!ctrl_pdata->panel_data.panel_info.cont_splash_enabled) {
pr_debug("%s: Set clk rates: pclk=%d, byteclk=%d escclk=%d\n",
__func__, ctrl_pdata->pclk_rate,
ctrl_pdata->byte_clk_rate, esc_clk_rate);
- if (clk_set_rate(ctrl_pdata->esc_clk, esc_clk_rate) < 0)
+ rc = clk_set_rate(ctrl_pdata->esc_clk, esc_clk_rate);
+ if (rc) {
pr_err("%s: dsi_esc_clk - clk_set_rate failed\n",
__func__);
+ goto error;
+ }
- if (clk_set_rate(ctrl_pdata->byte_clk,
- ctrl_pdata->byte_clk_rate) < 0)
+ rc = clk_set_rate(ctrl_pdata->byte_clk,
+ ctrl_pdata->byte_clk_rate);
+ if (rc) {
pr_err("%s: dsi_byte_clk - clk_set_rate failed\n",
__func__);
+ goto error;
+ }
- if (clk_set_rate(ctrl_pdata->pixel_clk,
- ctrl_pdata->pclk_rate) < 0)
+ rc = clk_set_rate(ctrl_pdata->pixel_clk, ctrl_pdata->pclk_rate);
+ if (rc) {
pr_err("%s: dsi_pixel_clk - clk_set_rate failed\n",
__func__);
+ goto error;
+ }
}
- clk_enable(ctrl_pdata->esc_clk);
- clk_enable(ctrl_pdata->byte_clk);
- clk_enable(ctrl_pdata->pixel_clk);
-
- ctrl_pdata->mdss_dsi_clk_on = 1;
+error:
+ return rc;
}
-void mdss_dsi_clk_disable(struct mdss_dsi_ctrl_pdata *ctrl_pdata)
+static int mdss_dsi_clk_enable(struct mdss_dsi_ctrl_pdata *ctrl_pdata)
+{
+ int rc = 0;
+
+ if (!ctrl_pdata) {
+ pr_err("%s: Invalid input data\n", __func__);
+ return -EINVAL;
+ }
+
+ if (ctrl_pdata->mdss_dsi_clk_on) {
+ pr_info("%s: mdss_dsi_clks already ON\n", __func__);
+ return 0;
+ }
+
+ rc = clk_enable(ctrl_pdata->esc_clk);
+ if (rc) {
+ pr_err("%s: Failed to enable dsi esc clk\n", __func__);
+ goto esc_clk_err;
+ }
+
+ rc = clk_enable(ctrl_pdata->byte_clk);
+ if (rc) {
+ pr_err("%s: Failed to enable dsi byte clk\n", __func__);
+ goto byte_clk_err;
+ }
+
+ rc = clk_enable(ctrl_pdata->pixel_clk);
+ if (rc) {
+ pr_err("%s: Failed to enable dsi pixel clk\n", __func__);
+ goto pixel_clk_err;
+ }
+
+ ctrl_pdata->mdss_dsi_clk_on = 1;
+
+ return rc;
+
+pixel_clk_err:
+ clk_disable(ctrl_pdata->byte_clk);
+byte_clk_err:
+ clk_disable(ctrl_pdata->esc_clk);
+esc_clk_err:
+ return rc;
+}
+
+static void mdss_dsi_clk_disable(struct mdss_dsi_ctrl_pdata *ctrl_pdata)
{
if (!ctrl_pdata) {
pr_err("%s: Invalid input data\n", __func__);
@@ -275,13 +350,71 @@
return;
}
+ clk_disable(ctrl_pdata->esc_clk);
clk_disable(ctrl_pdata->pixel_clk);
clk_disable(ctrl_pdata->byte_clk);
- clk_disable(ctrl_pdata->esc_clk);
ctrl_pdata->mdss_dsi_clk_on = 0;
}
+int mdss_dsi_clk_ctrl(struct mdss_dsi_ctrl_pdata *ctrl, int enable)
+{
+ int rc = 0;
+
+ mutex_lock(&ctrl->mutex);
+ if (enable) {
+ if (ctrl->clk_cnt == 0) {
+ rc = mdss_dsi_enable_bus_clocks(ctrl);
+ if (rc) {
+ pr_err("%s: failed to enable bus clks. rc=%d\n",
+ __func__, rc);
+ goto error;
+ }
+
+ rc = mdss_dsi_clk_set_rate(ctrl);
+ if (rc) {
+ pr_err("%s: failed to set clk rates. rc=%d\n",
+ __func__, rc);
+ mdss_dsi_disable_bus_clocks(ctrl);
+ goto error;
+ }
+
+ rc = mdss_dsi_clk_prepare(ctrl);
+ if (rc) {
+ pr_err("%s: failed to prepare clks. rc=%d\n",
+ __func__, rc);
+ mdss_dsi_disable_bus_clocks(ctrl);
+ goto error;
+ }
+
+ rc = mdss_dsi_clk_enable(ctrl);
+ if (rc) {
+ pr_err("%s: failed to enable clks. rc=%d\n",
+ __func__, rc);
+ mdss_dsi_clk_unprepare(ctrl);
+ mdss_dsi_disable_bus_clocks(ctrl);
+ goto error;
+ }
+ }
+ ctrl->clk_cnt++;
+ } else {
+ if (ctrl->clk_cnt) {
+ ctrl->clk_cnt--;
+ if (ctrl->clk_cnt == 0) {
+ mdss_dsi_clk_disable(ctrl);
+ mdss_dsi_clk_unprepare(ctrl);
+ mdss_dsi_disable_bus_clocks(ctrl);
+ }
+ }
+ }
+ pr_debug("%s: ctrl ndx=%d enabled=%d clk_cnt=%d\n",
+ __func__, ctrl->ndx, enable, ctrl->clk_cnt);
+
+error:
+ mutex_unlock(&ctrl->mutex);
+ return rc;
+}
+
void mdss_dsi_phy_sw_reset(unsigned char *ctrl_base)
{
/* start phy sw reset */
diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
index 21d836f..668c397 100644
--- a/fs/proc/proc_sysctl.c
+++ b/fs/proc/proc_sysctl.c
@@ -10,6 +10,7 @@
#include <linux/namei.h>
#include <linux/mm.h>
#include <linux/module.h>
+#include <linux/kmemleak.h>
#include "internal.h"
static const struct dentry_operations proc_sys_dentry_operations;
@@ -1215,6 +1216,8 @@
if (!header)
return NULL;
+ kmemleak_not_leak(header);
+
node = (struct ctl_node *)(header + 1);
init_header(header, root, set, node, table);
if (sysctl_check_table(path, table))
diff --git a/include/linux/mhl_8334.h b/include/linux/mhl_8334.h
index 560f75b..f0a54eb 100644
--- a/include/linux/mhl_8334.h
+++ b/include/linux/mhl_8334.h
@@ -99,7 +99,7 @@
int mhl_mode;
struct completion rgnd_done;
struct completion msc_cmd_done;
- uint8_t devcap_state;
+ uint16_t devcap_state;
uint8_t path_en_state;
struct work_struct mhl_msc_send_work;
struct list_head list_cmd;
@@ -146,7 +146,7 @@
int current_val;
struct completion msc_cmd_done;
uint8_t devcap[16];
- uint8_t devcap_state;
+ uint16_t devcap_state;
uint8_t status[2];
uint8_t path_en_state;
void *hdmi_mhl_ops;
diff --git a/include/linux/mhl_defs.h b/include/linux/mhl_defs.h
index f5dacfd..6177f07 100644
--- a/include/linux/mhl_defs.h
+++ b/include/linux/mhl_defs.h
@@ -132,6 +132,7 @@
#define MHL_SCRATCHPAD_SIZE 16
#define MAX_SCRATCHPAD_TRANSFER_SIZE 64
#define ADOPTER_ID_SIZE 2
+#define MHL_DEVCAP_ALL 0xffff
/* manually define highest number */
#define MHL_MAX_BUFFER_SIZE MHL_SCRATCHPAD_SIZE
diff --git a/include/linux/msm_kgsl.h b/include/linux/msm_kgsl.h
index f8b78a4..87047d2 100644
--- a/include/linux/msm_kgsl.h
+++ b/include/linux/msm_kgsl.h
@@ -30,6 +30,7 @@
#define KGSL_CONTEXT_TYPE_CL 2
#define KGSL_CONTEXT_TYPE_C2D 3
#define KGSL_CONTEXT_TYPE_RS 4
+#define KGSL_CONTEXT_TYPE_UNKNOWN 0x1E
#define KGSL_CONTEXT_INVALID 0xffffffff
diff --git a/include/linux/qpnp/qpnp-adc.h b/include/linux/qpnp/qpnp-adc.h
index 013a778..041aae7 100644
--- a/include/linux/qpnp/qpnp-adc.h
+++ b/include/linux/qpnp/qpnp-adc.h
@@ -1499,6 +1499,20 @@
* has not occured.
*/
int32_t qpnp_adc_tm_is_ready(void);
+/**
+ * qpnp_iadc_skip_calibration() - Clients can use this API to ask the driver
+ * to skip iadc calibrations
+ * @result: 0 on success and -EPROBE_DEFER when probe for the device
+ * has not occured.
+ */
+int qpnp_iadc_skip_calibration(void);
+/**
+ * qpnp_iadc_resume_calibration() - Clients can use this API to ask the driver
+ * to resume iadc calibrations
+ * @result: 0 on success and -EPROBE_DEFER when probe for the device
+ * has not occured.
+ */
+int qpnp_iadc_resume_calibration(void);
#else
static inline int32_t qpnp_adc_tm_usbid_configure(
struct qpnp_adc_tm_btm_param *param)
@@ -1512,6 +1526,10 @@
{ return -ENXIO; }
static inline int32_t qpnp_adc_tm_is_ready(void)
{ return -ENXIO; }
+static inline int qpnp_iadc_skip_calibration(void)
+{ return -ENXIO; }
+static inline int qpnp_iadc_resume_calibration(void);
+{ return -ENXIO; }
#endif
#endif
diff --git a/include/linux/videodev2.h b/include/linux/videodev2.h
index 4404df5..2424518 100644
--- a/include/linux/videodev2.h
+++ b/include/linux/videodev2.h
@@ -767,7 +767,8 @@
#define V4L2_CAP_QCOM_FRAMESKIP 0x2000 /* frame skipping is supported */
struct v4l2_qcom_frameskip {
- __u64 maxframeinterval;
+ __u64 maxframeinterval;
+ __u8 fpsvariance;
};
struct v4l2_outputparm {
diff --git a/include/media/radio-iris.h b/include/media/radio-iris.h
index 4cbac7b..419e055 100644
--- a/include/media/radio-iris.h
+++ b/include/media/radio-iris.h
@@ -626,7 +626,8 @@
FM_RECV,
FM_TRANS,
FM_RESET,
- FM_CALIB
+ FM_CALIB,
+ FM_TURNING_OFF
};
enum emphasis_type {
diff --git a/mm/vmscan.c b/mm/vmscan.c
index c69f5e2..1438de9 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -2310,6 +2310,19 @@
} while (memcg);
}
+static bool zone_balanced(struct zone *zone, int order,
+ unsigned long balance_gap, int classzone_idx)
+{
+ if (!zone_watermark_ok_safe(zone, order, high_wmark_pages(zone) +
+ balance_gap, classzone_idx, 0))
+ return false;
+
+ if (COMPACTION_BUILD && order && !compaction_suitable(zone, order))
+ return false;
+
+ return true;
+}
+
/*
* pgdat_balanced is used when checking if a node is balanced for high-order
* allocations. Only zones that meet watermarks and are in a zone allowed
@@ -2369,8 +2382,7 @@
continue;
}
- if (!zone_watermark_ok_safe(zone, order, high_wmark_pages(zone),
- i, 0))
+ if (!zone_balanced(zone, order, 0, i))
all_zones_ok = false;
else
balanced += zone->present_pages;
@@ -2479,8 +2491,7 @@
break;
}
- if (!zone_watermark_ok_safe(zone, order,
- high_wmark_pages(zone), 0, 0)) {
+ if (!zone_balanced(zone, order, 0, 0)) {
end_zone = i;
break;
} else {
@@ -2556,9 +2567,8 @@
testorder = 0;
if ((buffer_heads_over_limit && is_highmem_idx(i)) ||
- !zone_watermark_ok_safe(zone, testorder,
- high_wmark_pages(zone) + balance_gap,
- end_zone, 0)) {
+ !zone_balanced(zone, testorder,
+ balance_gap, end_zone)) {
shrink_zone(zone, &sc);
reclaim_state->reclaimed_slab = 0;
@@ -2585,8 +2595,7 @@
continue;
}
- if (!zone_watermark_ok_safe(zone, testorder,
- high_wmark_pages(zone), end_zone, 0)) {
+ if (!zone_balanced(zone, testorder, 0, end_zone)) {
all_zones_ok = 0;
/*
* We are still under min water mark. This
@@ -2681,22 +2690,6 @@
if (!populated_zone(zone))
continue;
- if (zone->all_unreclaimable &&
- sc.priority != DEF_PRIORITY)
- continue;
-
- /* Would compaction fail due to lack of free memory? */
- if (COMPACTION_BUILD &&
- compaction_suitable(zone, order) == COMPACT_SKIPPED)
- goto loop_again;
-
- /* Confirm the zone is balanced for order-0 */
- if (!zone_watermark_ok(zone, 0,
- high_wmark_pages(zone), 0, 0)) {
- order = sc.order = 0;
- goto loop_again;
- }
-
/* Check if the memory needs to be defragmented. */
if (zone_watermark_ok(zone, order,
low_wmark_pages(zone), *classzone_idx, 0))
diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
index 0c28508..247c69b 100644
--- a/net/core/sysctl_net_core.c
+++ b/net/core/sysctl_net_core.c
@@ -14,6 +14,7 @@
#include <linux/vmalloc.h>
#include <linux/init.h>
#include <linux/slab.h>
+#include <linux/kmemleak.h>
#include <net/ip.h>
#include <net/sock.h>
@@ -256,7 +257,7 @@
{
static struct ctl_table empty[1];
- register_sysctl_paths(net_core_path, empty);
+ kmemleak_not_leak(register_sysctl_paths(net_core_path, empty));
register_net_sysctl_rotable(net_core_path, net_core_table);
return register_pernet_subsys(&sysctl_core_ops);
}
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 167ea10..d02a8da 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -109,6 +109,7 @@
#include <net/rtnetlink.h>
#ifdef CONFIG_SYSCTL
#include <linux/sysctl.h>
+#include <linux/kmemleak.h>
#endif
#include <net/secure_seq.h>
diff --git a/sound/soc/msm/qdsp6v2/q6asm.c b/sound/soc/msm/qdsp6v2/q6asm.c
index 869d642..6a34470 100644
--- a/sound/soc/msm/qdsp6v2/q6asm.c
+++ b/sound/soc/msm/qdsp6v2/q6asm.c
@@ -542,6 +542,7 @@
}
apr_deregister(ac->apr);
+ ac->apr = NULL;
ac->mmap_apr = NULL;
q6asm_session_free(ac);
q6asm_mmap_apr_dereg();
@@ -550,6 +551,7 @@
/*done:*/
kfree(ac);
+ ac = NULL;
return;
}
@@ -1327,6 +1329,11 @@
{
pr_debug("%s:pkt_size=%d cmd_flg=%d session=%d\n", __func__, pkt_size,
cmd_flg, ac->session);
+ if (ac->apr == NULL) {
+ pr_err("%s: ac->apr is NULL", __func__);
+ return;
+ }
+
mutex_lock(&ac->cmd_lock);
hdr->hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, \
APR_HDR_LEN(sizeof(struct apr_hdr)),\
@@ -1354,6 +1361,10 @@
hdr->hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, \
APR_HDR_LEN(sizeof(struct apr_hdr)),\
APR_PKT_VER);
+ if (ac->apr == NULL) {
+ pr_err("%s: ac->apr is NULL", __func__);
+ return;
+ }
hdr->src_svc = ((struct apr_svc *)ac->apr)->id;
hdr->src_domain = APR_DOMAIN_APPS;
hdr->dest_svc = APR_SVC_ASM;
@@ -2908,6 +2919,12 @@
int sz = 0;
int rc = 0;
+ if (!ac || ac->apr == NULL) {
+ pr_err("%s: APR handle NULL\n", __func__);
+ rc = -EINVAL;
+ goto fail_cmd;
+ }
+
sz = sizeof(struct asm_volume_ctrl_lr_chan_gain);
q6asm_add_hdr_async(ac, &lrgain.hdr, sz, TRUE);
lrgain.hdr.opcode = ASM_STREAM_CMD_SET_PP_PARAMS_V2;
@@ -2950,6 +2967,12 @@
int sz = 0;
int rc = 0;
+ if (!ac || ac->apr == NULL) {
+ pr_err("%s: APR handle NULL\n", __func__);
+ rc = -EINVAL;
+ goto fail_cmd;
+ }
+
sz = sizeof(struct asm_volume_ctrl_mute_config);
q6asm_add_hdr_async(ac, &mute.hdr, sz, TRUE);
mute.hdr.opcode = ASM_STREAM_CMD_SET_PP_PARAMS_V2;
@@ -2991,6 +3014,12 @@
int sz = 0;
int rc = 0;
+ if (!ac || ac->apr == NULL) {
+ pr_err("%s: APR handle NULL\n", __func__);
+ rc = -EINVAL;
+ goto fail_cmd;
+ }
+
sz = sizeof(struct asm_volume_ctrl_master_gain);
q6asm_add_hdr_async(ac, &vol.hdr, sz, TRUE);
vol.hdr.opcode = ASM_STREAM_CMD_SET_PP_PARAMS_V2;
@@ -3034,6 +3063,12 @@
int sz = 0;
int rc = 0;
+ if (!ac || ac->apr == NULL) {
+ pr_err("%s: APR handle NULL\n", __func__);
+ rc = -EINVAL;
+ goto fail_cmd;
+ }
+
sz = sizeof(struct asm_soft_pause_params);
q6asm_add_hdr_async(ac, &softpause.hdr, sz, TRUE);
softpause.hdr.opcode = ASM_STREAM_CMD_SET_PP_PARAMS_V2;
@@ -3081,6 +3116,12 @@
int sz = 0;
int rc = 0;
+ if (!ac || ac->apr == NULL) {
+ pr_err("%s: APR handle NULL\n", __func__);
+ rc = -EINVAL;
+ goto fail_cmd;
+ }
+
sz = sizeof(struct asm_soft_step_volume_params);
q6asm_add_hdr_async(ac, &softvol.hdr, sz, TRUE);
softvol.hdr.opcode = ASM_STREAM_CMD_SET_PP_PARAMS_V2;
@@ -3127,6 +3168,12 @@
int sz = 0;
int rc = 0;
+ if (!ac || ac->apr == NULL) {
+ pr_err("%s: APR handle NULL\n", __func__);
+ rc = -EINVAL;
+ goto fail_cmd;
+ }
+
if (eq_p == NULL) {
pr_err("%s[%d]: Invalid Eq param\n", __func__, ac->session);
rc = -EINVAL;