mmc: Allow periodic BKOPS on different sizes of devices

Remove the need to check the number of written or discarded sectors
as a trigger for checking the BKOPS status.
Move the call to mmc_stop_bkops to mmc/block in order to prevent the
need to claim the host again for this operation (which can cause
degradation in case of random operations)

Change-Id: I9986387bc6c7e89ddfed603dbf8fce7607537c5b
Signed-off-by: Maya Erez <merez@codeaurora.org>
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index 73c47cd..b6ba00f 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -331,48 +331,6 @@
 	return ret;
 }
 
-static ssize_t
-min_sectors_to_check_bkops_status_show(struct device *dev,
-				  struct device_attribute *attr, char *buf)
-{
-	struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
-	unsigned int min_sectors_to_check_bkops_status;
-	struct mmc_card *card = md->queue.card;
-	int ret;
-
-	if (!card)
-		ret = -EINVAL;
-	else {
-	    min_sectors_to_check_bkops_status =
-		    card->bkops_info.min_sectors_to_queue_delayed_work;
-	    ret = snprintf(buf, PAGE_SIZE, "%d\n",
-			   min_sectors_to_check_bkops_status);
-	}
-
-	mmc_blk_put(md);
-	return ret;
-}
-
-static ssize_t
-min_sectors_to_check_bkops_status_store(struct device *dev,
-				 struct device_attribute *attr,
-				 const char *buf, size_t count)
-{
-	int value;
-	struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
-	struct mmc_card *card = md->queue.card;
-
-	if (!card)
-		return -EINVAL;
-
-	sscanf(buf, "%d", &value);
-	if (value >= 0)
-		card->bkops_info.min_sectors_to_queue_delayed_work = value;
-
-	mmc_blk_put(md);
-	return count;
-}
-
 static int mmc_blk_open(struct block_device *bdev, fmode_t mode)
 {
 	struct mmc_blk_data *md = mmc_blk_get(bdev->bd_disk);
@@ -1000,9 +958,6 @@
 	from = blk_rq_pos(req);
 	nr = blk_rq_sectors(req);
 
-	if (card->ext_csd.bkops_en)
-		card->bkops_info.sectors_changed += blk_rq_sectors(req);
-
 	if (mmc_can_discard(card))
 		arg = MMC_DISCARD_ARG;
 	else if (mmc_can_trim(card))
@@ -1664,12 +1619,8 @@
 			break;
 		}
 
-		if (rq_data_dir(next) == WRITE) {
+		if (rq_data_dir(next) == WRITE)
 			mq->num_of_potential_packed_wr_reqs++;
-			if (card->ext_csd.bkops_en)
-				card->bkops_info.sectors_changed +=
-					blk_rq_sectors(next);
-		}
 		list_add_tail(&next->queuelist, &mq->mqrq_cur->packed_list);
 		cur = next;
 		reqs++;
@@ -1901,8 +1852,6 @@
 		return 0;
 
 	if (rqc) {
-		if ((card->ext_csd.bkops_en) && (rq_data_dir(rqc) == WRITE))
-			card->bkops_info.sectors_changed += blk_rq_sectors(rqc);
 		reqs = mmc_blk_prep_packed_list(mq, rqc);
 	}
 
@@ -2067,9 +2016,15 @@
 	}
 #endif
 
-	if (req && !mq->mqrq_prev->req)
+	if (req && !mq->mqrq_prev->req) {
 		/* claim host only for the first request */
 		mmc_claim_host(card->host);
+		if (card->ext_csd.bkops_en &&
+		    card->bkops_info.started_delayed_bkops) {
+			card->bkops_info.started_delayed_bkops = false;
+			mmc_stop_bkops(card);
+		}
+	}
 
 	ret = mmc_blk_part_switch(card, md);
 	if (ret) {
@@ -2399,24 +2354,8 @@
 	if (ret)
 		goto num_wr_reqs_to_start_packing_fail;
 
-	md->min_sectors_to_check_bkops_status.show =
-		min_sectors_to_check_bkops_status_show;
-	md->min_sectors_to_check_bkops_status.store =
-		min_sectors_to_check_bkops_status_store;
-	sysfs_attr_init(&md->min_sectors_to_check_bkops_status.attr);
-	md->min_sectors_to_check_bkops_status.attr.name =
-		"min_sectors_to_check_bkops_status";
-	md->min_sectors_to_check_bkops_status.attr.mode = S_IRUGO | S_IWUSR;
-	ret = device_create_file(disk_to_dev(md->disk),
-				 &md->min_sectors_to_check_bkops_status);
-	if (ret)
-		goto min_sectors_to_check_bkops_status_fails;
-
 	return ret;
 
-min_sectors_to_check_bkops_status_fails:
-	device_remove_file(disk_to_dev(md->disk),
-			   &md->num_wr_reqs_to_start_packing);
 num_wr_reqs_to_start_packing_fail:
 	device_remove_file(disk_to_dev(md->disk), &md->power_ro_lock);
 power_ro_lock_fail:
diff --git a/drivers/mmc/card/mmc_block_test.c b/drivers/mmc/card/mmc_block_test.c
index 610a822..3062f64 100644
--- a/drivers/mmc/card/mmc_block_test.c
+++ b/drivers/mmc/card/mmc_block_test.c
@@ -1840,8 +1840,6 @@
 	case BKOPS_DELAYED_WORK_LEVEL_1:
 		bkops_stat->ignore_card_bkops_status = true;
 		card->ext_csd.raw_bkops_status = 1;
-		card->bkops_info.sectors_changed =
-			card->bkops_info.min_sectors_to_queue_delayed_work + 1;
 		mbtd->bkops_stage = BKOPS_STAGE_1;
 
 		__blk_run_queue(q);
@@ -1858,8 +1856,6 @@
 	case BKOPS_DELAYED_WORK_LEVEL_1_HPI:
 		bkops_stat->ignore_card_bkops_status = true;
 		card->ext_csd.raw_bkops_status = 1;
-		card->bkops_info.sectors_changed =
-			card->bkops_info.min_sectors_to_queue_delayed_work + 1;
 		mbtd->bkops_stage = BKOPS_STAGE_1;
 
 		__blk_run_queue(q);
@@ -1890,8 +1886,6 @@
 	case BKOPS_CANCEL_DELAYED_WORK:
 		bkops_stat->ignore_card_bkops_status = true;
 		card->ext_csd.raw_bkops_status = 1;
-		card->bkops_info.sectors_changed =
-			card->bkops_info.min_sectors_to_queue_delayed_work + 1;
 		mbtd->bkops_stage = BKOPS_STAGE_1;
 
 		__blk_run_queue(q);
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c
index c762ed1..bf1203a 100644
--- a/drivers/mmc/card/queue.c
+++ b/drivers/mmc/card/queue.c
@@ -80,17 +80,6 @@
 		spin_unlock_irq(q->queue_lock);
 
 		if (req || mq->mqrq_prev->req) {
-			/*
-			 * If this is the first request, BKOPs might be in
-			 * progress and needs to be stopped before issuing the
-			 * request
-			 */
-			if (card->ext_csd.bkops_en &&
-			    card->bkops_info.started_delayed_bkops) {
-				card->bkops_info.started_delayed_bkops = false;
-				mmc_stop_bkops(card);
-			}
-
 			set_current_state(TASK_RUNNING);
 			mq->issue_fn(mq, req);
 			if (mq->flags & MMC_QUEUE_NEW_REQUEST) {
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index ce86105..59e8f8f 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -357,15 +357,9 @@
 	if (!card || !card->ext_csd.bkops_en || mmc_card_doing_bkops(card))
 		return;
 
-	if (card->bkops_info.sectors_changed <
-	    card->bkops_info.min_sectors_to_queue_delayed_work)
-		return;
-
 	pr_debug("%s: %s: queueing delayed_bkops_work\n",
 		 mmc_hostname(card->host), __func__);
 
-	card->bkops_info.sectors_changed = 0;
-
 	/*
 	 * cancel_delayed_bkops_work will prevent a race condition between
 	 * fetching a request by the mmcqd and the delayed work, in case
@@ -923,8 +917,6 @@
 
 	BUG_ON(!card);
 
-	mmc_claim_host(card->host);
-
 	/*
 	 * Notify the delayed work to be cancelled, in case it was already
 	 * removed from the queue, but was not started yet
@@ -949,7 +941,6 @@
 	MMC_UPDATE_BKOPS_STATS_HPI(card->bkops_info.bkops_stats);
 
 out:
-	mmc_release_host(card->host);
 	return err;
 }
 EXPORT_SYMBOL(mmc_stop_bkops);
@@ -3230,7 +3221,9 @@
 	case PM_HIBERNATION_PREPARE:
 	case PM_SUSPEND_PREPARE:
 		if (host->card && mmc_card_mmc(host->card)) {
+			mmc_claim_host(host);
 			err = mmc_stop_bkops(host->card);
+			mmc_release_host(host);
 			if (err) {
 				pr_err("%s: didn't stop bkops\n",
 					mmc_hostname(host));
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index c1a6b28..f3653b8 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -1413,9 +1413,6 @@
 				card->bkops_info.delay_ms = min(
 					card->bkops_info.delay_ms,
 				      card->bkops_info.host_suspend_tout_ms/2);
-
-			card->bkops_info.min_sectors_to_queue_delayed_work =
-				BKOPS_MIN_SECTORS_TO_QUEUE_DELAYED_WORK;
 		}
 	}
 
diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h
index 2895d51..7d71bc7 100644
--- a/include/linux/mmc/card.h
+++ b/include/linux/mmc/card.h
@@ -270,14 +270,11 @@
  *        should be cancelled
  * @started_delayed_bkops:  A flag to indicate if the delayed
  *        work was scheduled
- * @sectors_changed:  number of  sectors written or
- *       discard since the last idle BKOPS were scheduled
  */
 struct mmc_bkops_info {
 	struct delayed_work	dw;
 	unsigned int		host_suspend_tout_ms;
 	unsigned int		delay_ms;
-	unsigned int		min_sectors_to_queue_delayed_work;
 	struct mmc_bkops_stats  bkops_stats;    /* BKOPS statistics */
 /*
  * A default time for checking the need for non urgent BKOPS once mmcqd
@@ -290,16 +287,6 @@
 #define BKOPS_COMPLETION_POLLING_INTERVAL_MS 1000 /* in ms */
 	bool			cancel_delayed_work;
 	bool			started_delayed_bkops;
-	unsigned int		sectors_changed;
-/*
- * Since canceling the delayed work might have significant effect on the
- * performance of small requests we won't queue the delayed work every time
- * mmcqd thread is idle.
- * The delayed work for idle BKOPS will be scheduled only after a significant
- * amount of write or discard data.
- * 100MB is chosen based on benchmark tests.
- */
-#define BKOPS_MIN_SECTORS_TO_QUEUE_DELAYED_WORK 204800 /* 100MB */
 };
 
 /*