mmc: add new request stats infrastructure
Add statistics infrastructure for new_request_notification
mechanism. Information about the mechanism is collected by
counters that are located at decision points in the flow
of this feature.
Change-Id: I863911e7ecade609b3b4dff619dd6b059a098fcc
Signed-off-by: Konstantin Dorfman <kdorfman@codeaurora.org>
Signed-off-by: Lee Susman <lsusman@codeaurora.org>
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index 73c47cd..b748228 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -1557,6 +1557,39 @@
}
EXPORT_SYMBOL(mmc_blk_init_packed_statistics);
+/**
+ * mmc_blk_init_async_event_statistics() - Init async event
+ * statistics data
+ * @card: The mmc_card in which the async_event_stats
+ * struct is a member
+ *
+ * Initiate counters for the new request feature, and mark the
+ * statistics as enabled.
+ */
+void mmc_blk_init_async_event_statistics(struct mmc_card *card)
+{
+ if (!card)
+ return;
+
+ /* init async events tests stats */
+ memset(&card->async_event_stats,
+ sizeof(struct mmc_async_event_stats), 0);
+ card->async_event_stats.null_fetched = 0;
+ card->async_event_stats.wakeup_new = 0;
+ card->async_event_stats.new_request_flag = 0;
+ card->async_event_stats.q_no_waiting = 0;
+ card->async_event_stats.enabled = true;
+ card->async_event_stats.no_mmc_request_action = 0;
+ card->async_event_stats.wakeup_mq_thread = 0;
+ card->async_event_stats.fetch_due_to_new_req = 0;
+ card->async_event_stats.returned_new_req = 0;
+ card->async_event_stats.done_flag = 0;
+ card->async_event_stats.cmd_retry = 0;
+ card->async_event_stats.done_when_new_req_event_on = 0;
+ card->async_event_stats.new_req_when_new_marked = 0;
+}
+EXPORT_SYMBOL(mmc_blk_init_async_event_statistics);
+
static u8 mmc_blk_prep_packed_list(struct mmc_queue *mq, struct request *req)
{
struct request_queue *q = mq->queue;
@@ -1569,7 +1602,12 @@
u8 put_back = 0;
u8 max_packed_rw = 0;
u8 reqs = 0;
- struct mmc_wr_pack_stats *stats = &card->wr_pack_stats;
+ struct mmc_wr_pack_stats *stats;
+
+ if (!card)
+ goto no_packed;
+
+ stats = &card->wr_pack_stats;
mmc_blk_clear_packed(mq->mqrq_cur);
@@ -1896,6 +1934,7 @@
struct mmc_async_req *areq;
const u8 packed_num = 2;
u8 reqs = 0;
+ struct mmc_async_event_stats *stats = &card->async_event_stats;
if (!rqc && !mq->mqrq_prev->req)
return 0;
@@ -1918,8 +1957,12 @@
areq = NULL;
areq = mmc_start_req(card->host, areq, (int *) &status);
if (!areq) {
- if (status == MMC_BLK_NEW_REQUEST)
+ if (status == MMC_BLK_NEW_REQUEST && stats) {
+ if (stats->enabled)
+ stats->returned_new_req++;
+
mq->flags |= MMC_QUEUE_NEW_REQUEST;
+ }
return 0;
}
diff --git a/drivers/mmc/card/mmc_block_test.c b/drivers/mmc/card/mmc_block_test.c
index e0b7e35..92603f6 100644
--- a/drivers/mmc/card/mmc_block_test.c
+++ b/drivers/mmc/card/mmc_block_test.c
@@ -278,6 +278,50 @@
spin_unlock(&card->wr_pack_stats.lock);
}
+/**
+ * mmc_print_async_event_stats() - Print async event statistics
+ * @card: The mmc_card in which the async_event_stats
+ * struct is a member
+ */
+void mmc_print_async_event_stats(struct mmc_card *card)
+{
+ struct mmc_async_event_stats *s;
+
+ if (!card)
+ return;
+
+ s = &card->async_event_stats;
+ if (!s)
+ return;
+
+ pr_info("%s: new notification & req statistics:\n",
+ mmc_hostname(card->host));
+ pr_info("%s: done_flag:%d", mmc_hostname(card->host),
+ s->done_flag);
+ pr_info("%s: cmd_retry:%d", mmc_hostname(card->host),
+ s->cmd_retry);
+ pr_info("%s: NULL fetched:%d", mmc_hostname(card->host),
+ s->null_fetched);
+ pr_info("%s: wake up new:%d", mmc_hostname(card->host),
+ s->wakeup_new);
+ pr_info("%s: new_request_flag:%d", mmc_hostname(card->host),
+ s->new_request_flag);
+ pr_info("%s: no waiting:%d\n", mmc_hostname(card->host),
+ s->q_no_waiting);
+ pr_info("%s: no_mmc_request_action:%d", mmc_hostname(card->host),
+ s->no_mmc_request_action);
+ pr_info("%s: wakeup_mq_thread:%d", mmc_hostname(card->host),
+ s->wakeup_mq_thread);
+ pr_info("%s: fetch_due_to_new_req:%d", mmc_hostname(card->host),
+ s->fetch_due_to_new_req);
+ pr_info("%s: returned_new_req:%d", mmc_hostname(card->host),
+ s->returned_new_req);
+ pr_info("%s: done_when_new_req_event_on:%d", mmc_hostname(card->host),
+ s->done_when_new_req_event_on);
+ pr_info("%s: new_req_when_new_marked:%d", mmc_hostname(card->host),
+ s->new_req_when_new_marked);
+}
+
/*
* A callback assigned to the packed_test_fn field.
* Called from block layer in mmc_blk_packed_hdr_wrq_prep.
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c
index c762ed1..7b80dfb 100644
--- a/drivers/mmc/card/queue.c
+++ b/drivers/mmc/card/queue.c
@@ -59,12 +59,19 @@
struct request_queue *q = mq->queue;
struct request *req;
struct mmc_card *card = mq->card;
+ struct mmc_async_event_stats *stats;
+ struct mmc_queue_req *tmp;
+
+ if (!card)
+ return 0;
+
+ stats = &mq->card->async_event_stats;
current->flags |= PF_MEMALLOC;
down(&mq->thread_sem);
do {
- struct mmc_queue_req *tmp;
+
req = NULL; /* Must be set to NULL at each iteration */
spin_lock_irq(q->queue_lock);
@@ -74,9 +81,11 @@
if (!req && mq->mqrq_prev->req &&
!(mq->mqrq_prev->req->cmd_flags & REQ_SANITIZE) &&
!(mq->mqrq_prev->req->cmd_flags & REQ_FLUSH) &&
- !(mq->mqrq_prev->req->cmd_flags & REQ_DISCARD))
+ !(mq->mqrq_prev->req->cmd_flags & REQ_DISCARD)) {
card->host->context_info.is_waiting_last_req = true;
-
+ if (stats && stats->enabled)
+ stats->null_fetched++;
+ }
spin_unlock_irq(q->queue_lock);
if (req || mq->mqrq_prev->req) {
@@ -95,6 +104,8 @@
mq->issue_fn(mq, req);
if (mq->flags & MMC_QUEUE_NEW_REQUEST) {
mq->flags &= ~MMC_QUEUE_NEW_REQUEST;
+ if (stats && stats->enabled)
+ stats->fetch_due_to_new_req++;
continue; /* fetch again */
}
} else {
@@ -129,6 +140,7 @@
static void mmc_request(struct request_queue *q)
{
struct mmc_queue *mq = q->queuedata;
+ struct mmc_async_event_stats *stats;
struct request *req;
unsigned long flags;
struct mmc_context_info *cntx;
@@ -140,22 +152,39 @@
}
return;
}
+ if (mq->card) {
+ cntx = &mq->card->host->context_info;
+ stats = &mq->card->async_event_stats;
+ } else
+ return;
cntx = &mq->card->host->context_info;
+ stats = &mq->card->async_event_stats;
if (!mq->mqrq_cur->req && mq->mqrq_prev->req) {
/*
* New MMC request arrived when MMC thread may be
* blocked on the previous request to be complete
* with no current request fetched
*/
+
spin_lock_irqsave(&cntx->lock, flags);
if (cntx->is_waiting_last_req) {
+ if (stats && stats->enabled)
+ stats->wakeup_new++;
+ if (cntx->is_new_req)
+ if (stats->enabled)
+ stats->new_req_when_new_marked++;
cntx->is_new_req = true;
wake_up_interruptible(&cntx->wait);
- }
+ } else if (stats->enabled)
+ stats->q_no_waiting++;
spin_unlock_irqrestore(&cntx->lock, flags);
- } else if (!mq->mqrq_cur->req && !mq->mqrq_prev->req)
+ } else if (!mq->mqrq_cur->req && !mq->mqrq_prev->req) {
wake_up_process(mq->thread);
+ if (stats->enabled)
+ stats->wakeup_mq_thread++;
+ } else if (stats->enabled)
+ stats->no_mmc_request_action++;
}
static struct scatterlist *mmc_alloc_sg(int sg_len, int *err)
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index ce86105..1ea580e 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -633,6 +633,7 @@
struct mmc_context_info *context_info = &host->context_info;
int err;
unsigned long flags;
+ struct mmc_async_event_stats *stats = &host->card->async_event_stats;
while (1) {
wait_io_event_interruptible(context_info->wait,
@@ -645,13 +646,18 @@
context_info->is_done_rcv = false;
context_info->is_new_req = false;
cmd = mrq->cmd;
+ if (stats->enabled) {
+ stats->done_flag++;
+ if (context_info->is_new_req)
+ stats->done_when_new_req_event_on++;
+ }
if (!cmd->error || !cmd->retries ||
mmc_card_removed(host->card)) {
err = host->areq->err_check(host->card,
host->areq);
break; /* return err */
} else {
- pr_info("%s: req failed (CMD%u): %d, retrying...\n",
+ pr_info("%s: req failed (CMD%u):%d, retrying\n",
mmc_hostname(host),
cmd->opcode, cmd->error);
cmd->retries--;
@@ -661,6 +667,8 @@
}
} else if (context_info->is_new_req) {
context_info->is_new_req = false;
+ if (stats->enabled)
+ stats->new_request_flag++;
err = MMC_BLK_NEW_REQUEST;
break; /* return err */
}
diff --git a/drivers/mmc/core/debugfs.c b/drivers/mmc/core/debugfs.c
index 84a26a1..9642a06 100644
--- a/drivers/mmc/core/debugfs.c
+++ b/drivers/mmc/core/debugfs.c
@@ -528,6 +528,141 @@
.write = mmc_wr_pack_stats_write,
};
+static int mmc_new_req_stats_open(struct inode *inode, struct file *filp)
+{
+ struct mmc_card *card = inode->i_private;
+
+ filp->private_data = card;
+ card->async_event_stats.print_in_read = 1;
+ return 0;
+}
+
+static ssize_t mmc_new_req_stats_read(struct file *filp, char __user *ubuf,
+ size_t cnt, loff_t *ppos)
+{
+ struct mmc_card *card = filp->private_data;
+ struct mmc_async_event_stats *s;
+ char *temp_buf;
+
+ if (!card)
+ return cnt;
+
+ s = &card->async_event_stats;
+
+ if (!card->async_event_stats.enabled) {
+ pr_info("%s: New Request statistics are disabled\n",
+ mmc_hostname(card->host));
+ goto exit;
+ }
+
+ temp_buf = kmalloc(2 * TEMP_BUF_SIZE, GFP_KERNEL);
+ if (!temp_buf)
+ goto exit;
+
+ memset(ubuf, 0, cnt);
+ memset(temp_buf, 0, 2 * TEMP_BUF_SIZE);
+
+ snprintf(temp_buf, TEMP_BUF_SIZE,
+ "%s: new notification & req statistics:\n",
+ mmc_hostname(card->host));
+ strlcat(ubuf, temp_buf, cnt);
+
+ snprintf(temp_buf, TEMP_BUF_SIZE,
+ "%s: done_flag:%d\n", mmc_hostname(card->host), s->done_flag);
+ strlcat(ubuf, temp_buf, cnt);
+
+ snprintf(temp_buf, TEMP_BUF_SIZE,
+ "%s: cmd_retry:%d\n", mmc_hostname(card->host), s->cmd_retry);
+ strlcat(ubuf, temp_buf, cnt);
+
+ snprintf(temp_buf, TEMP_BUF_SIZE,
+ "%s: NULL fetched:%d\n", mmc_hostname(card->host),
+ s->null_fetched);
+ strlcat(ubuf, temp_buf, cnt);
+
+ snprintf(temp_buf, TEMP_BUF_SIZE,
+ "%s: wake up new:%d\n",
+ mmc_hostname(card->host), s->wakeup_new);
+ strlcat(ubuf, temp_buf, cnt);
+
+ snprintf(temp_buf, TEMP_BUF_SIZE,
+ "%s: new_request_flag:%d\n", mmc_hostname(card->host),
+ s->new_request_flag);
+ strlcat(ubuf, temp_buf, cnt);
+
+ snprintf(temp_buf, TEMP_BUF_SIZE,
+ "%s: no waiting:%d\n", mmc_hostname(card->host),
+ s->q_no_waiting);
+ strlcat(ubuf, temp_buf, cnt);
+
+ snprintf(temp_buf, TEMP_BUF_SIZE,
+ "%s: no_mmc_request_action:%d\n", mmc_hostname(card->host),
+ s->no_mmc_request_action);
+ strlcat(ubuf, temp_buf, cnt);
+
+ snprintf(temp_buf, TEMP_BUF_SIZE,
+ "%s: wakeup_mq_thread:%d\n", mmc_hostname(card->host),
+ s->wakeup_mq_thread);
+ strlcat(ubuf, temp_buf, cnt);
+
+ snprintf(temp_buf, TEMP_BUF_SIZE,
+ "%s: fetch_due_to_new_req:%d\n", mmc_hostname(card->host),
+ s->fetch_due_to_new_req);
+ strlcat(ubuf, temp_buf, cnt);
+
+ snprintf(temp_buf, TEMP_BUF_SIZE,
+ "%s: returned_new_req:%d\n", mmc_hostname(card->host),
+ s->returned_new_req);
+ strlcat(ubuf, temp_buf, cnt);
+
+ snprintf(temp_buf, TEMP_BUF_SIZE,
+ "%s: done_when_new_req_event_on:%d\n",
+ mmc_hostname(card->host), s->done_when_new_req_event_on);
+ strlcat(ubuf, temp_buf, cnt);
+
+ kfree(temp_buf);
+
+ pr_info("%s", ubuf);
+
+exit:
+ if (card->async_event_stats.print_in_read == 1) {
+ card->async_event_stats.print_in_read = 0;
+ return strnlen(ubuf, cnt);
+ }
+
+ return 0;
+}
+
+static ssize_t mmc_new_req_stats_write(struct file *filp,
+ const char __user *ubuf, size_t cnt,
+ loff_t *ppos)
+{
+ struct mmc_card *card = filp->private_data;
+ int value;
+
+ if (!card)
+ return cnt;
+
+ sscanf(ubuf, "%d", &value);
+ if (value) {
+ mmc_blk_init_async_event_statistics(card);
+ pr_info("%s: %s: New request statistics are enabled",
+ mmc_hostname(card->host), __func__);
+ } else {
+ card->async_event_stats.enabled = false;
+ pr_info("%s: %s: New request statistics are disabled",
+ mmc_hostname(card->host), __func__);
+ }
+
+ return cnt;
+}
+
+static const struct file_operations mmc_dbg_new_req_stats_fops = {
+ .open = mmc_new_req_stats_open,
+ .read = mmc_new_req_stats_read,
+ .write = mmc_new_req_stats_write,
+};
+
static int mmc_bkops_stats_open(struct inode *inode, struct file *filp)
{
struct mmc_card *card = inode->i_private;
@@ -673,6 +808,10 @@
&mmc_dbg_wr_pack_stats_fops))
goto err;
+ if (!debugfs_create_file("new_req_stats", S_IRUSR, root, card,
+ &mmc_dbg_new_req_stats_fops))
+ goto err;
+
if (mmc_card_mmc(card) && (card->ext_csd.rev >= 6) &&
card->ext_csd.bkops_en)
if (!debugfs_create_file("bkops_stats", S_IRUSR, root, card,