Merge commit 'AU_LINUX_ANDROID_ICS.04.00.04.00.126' into msm-3.4
AU_LINUX_ANDROID_ICS.04.00.04.00.126 from msm-3.0.
First parent is from google/android-3.4.
* commit 'AU_LINUX_ANDROID_ICS.04.00.04.00.126': (8712 commits)
PRNG: Device tree entry for qrng device.
vidc:1080p: Set video core timeout value for Thumbnail mode
msm: sps: improve the debugging support in SPS driver
board-8064 msm: Overlap secure and non secure video firmware heaps.
msm: clock: Add handoff ops for 7x30 and copper XO clocks
msm_fb: display: Wait for external vsync before DTV IOMMU unmap
msm: Fix ciruclar dependency in debug UART settings
msm: gdsc: Add GDSC regulator driver for msm-copper
defconfig: Enable Mobicore Driver.
mobicore: Add mobicore driver.
mobicore: rename variable to lower case.
mobicore: rename folder.
mobicore: add makefiles
mobicore: initial import of kernel driver
ASoC: msm: Add SLIMBUS_2_RX CPU DAI
board-8064-gpio: Update FUNC for EPM SPI CS
msm_fb: display: Remove chicken bit config during video playback
mmc: msm_sdcc: enable the sanitize capability
msm-fb: display: lm2 writeback support on mpq platfroms
msm_fb: display: Disable LVDS phy & pll during panel off
...
Signed-off-by: Steve Muckle <smuckle@codeaurora.org>
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index 54c8322..990faeb 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -58,6 +58,18 @@
#define INAND_CMD38_ARG_SECTRIM1 0x81
#define INAND_CMD38_ARG_SECTRIM2 0x88
+#define MMC_SANITIZE_REQ_TIMEOUT 240000 /* msec */
+#define mmc_req_rel_wr(req) (((req->cmd_flags & REQ_FUA) || \
+ (req->cmd_flags & REQ_META)) && \
+ (rq_data_dir(req) == WRITE))
+#define PACKED_CMD_VER 0x01
+#define PACKED_CMD_WR 0x02
+#define MMC_BLK_UPDATE_STOP_REASON(stats, reason) \
+ do { \
+ if (stats->enabled) \
+ stats->pack_stop_reason[reason]++; \
+ } while (0)
+
static DEFINE_MUTEX(block_mutex);
/*
@@ -108,6 +120,7 @@
struct device_attribute force_ro;
struct device_attribute power_ro_lock;
int area_type;
+ struct device_attribute num_wr_reqs_to_start_packing;
};
static DEFINE_MUTEX(open_lock);
@@ -123,9 +136,21 @@
MMC_BLK_NOMEDIUM,
};
+enum {
+ MMC_PACKED_N_IDX = -1,
+ MMC_PACKED_N_ZERO,
+ MMC_PACKED_N_SINGLE,
+};
+
module_param(perdev_minors, int, 0444);
MODULE_PARM_DESC(perdev_minors, "Minors numbers to allocate per device");
+static inline void mmc_blk_clear_packed(struct mmc_queue_req *mqrq)
+{
+ mqrq->packed_cmd = MMC_PACKED_NONE;
+ mqrq->packed_num = MMC_PACKED_N_ZERO;
+}
+
static struct mmc_blk_data *mmc_blk_get(struct gendisk *disk)
{
struct mmc_blk_data *md;
@@ -259,6 +284,38 @@
return ret;
}
+static ssize_t
+num_wr_reqs_to_start_packing_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
+ int num_wr_reqs_to_start_packing;
+ int ret;
+
+ num_wr_reqs_to_start_packing = md->queue.num_wr_reqs_to_start_packing;
+
+ ret = snprintf(buf, PAGE_SIZE, "%d\n", num_wr_reqs_to_start_packing);
+
+ mmc_blk_put(md);
+ return ret;
+}
+
+static ssize_t
+num_wr_reqs_to_start_packing_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int value;
+ struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
+
+ sscanf(buf, "%d", &value);
+ if (value >= 0)
+ md->queue.num_wr_reqs_to_start_packing = value;
+
+ mmc_blk_put(md);
+ return count;
+}
+
static int mmc_blk_open(struct block_device *bdev, fmode_t mode)
{
struct mmc_blk_data *md = mmc_blk_get(bdev->bd_disk);
@@ -873,10 +930,10 @@
{
struct mmc_blk_data *md = mq->data;
struct mmc_card *card = md->queue.card;
- unsigned int from, nr, arg, trim_arg, erase_arg;
+ unsigned int from, nr, arg;
int err = 0, type = MMC_BLK_SECDISCARD;
- if (!(mmc_can_secure_erase_trim(card) || mmc_can_sanitize(card))) {
+ if (!(mmc_can_secure_erase_trim(card))) {
err = -EOPNOTSUPP;
goto out;
}
@@ -884,23 +941,10 @@
from = blk_rq_pos(req);
nr = blk_rq_sectors(req);
- /* The sanitize operation is supported at v4.5 only */
- if (mmc_can_sanitize(card)) {
- erase_arg = MMC_ERASE_ARG;
- trim_arg = MMC_TRIM_ARG;
- } else {
- erase_arg = MMC_SECURE_ERASE_ARG;
- trim_arg = MMC_SECURE_TRIM1_ARG;
- }
-
- if (mmc_erase_group_aligned(card, from, nr))
- arg = erase_arg;
- else if (mmc_can_trim(card))
- arg = trim_arg;
- else {
- err = -EINVAL;
- goto out;
- }
+ if (mmc_can_trim(card) && !mmc_erase_group_aligned(card, from, nr))
+ arg = MMC_SECURE_TRIM1_ARG;
+ else
+ arg = MMC_SECURE_ERASE_ARG;
retry:
if (card->quirks & MMC_QUIRK_INAND_CMD38) {
err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
@@ -952,6 +996,47 @@
return err ? 0 : 1;
}
+static int mmc_blk_issue_sanitize_rq(struct mmc_queue *mq,
+ struct request *req)
+{
+ struct mmc_blk_data *md = mq->data;
+ struct mmc_card *card = md->queue.card;
+ int err = 0;
+
+ BUG_ON(!card);
+ BUG_ON(!card->host);
+
+ if (!(mmc_can_sanitize(card) &&
+ (card->host->caps2 & MMC_CAP2_SANITIZE))) {
+ pr_warning("%s: %s - SANITIZE is not supported\n",
+ mmc_hostname(card->host), __func__);
+ err = -EOPNOTSUPP;
+ goto out;
+ }
+
+ pr_debug("%s: %s - SANITIZE IN PROGRESS...\n",
+ mmc_hostname(card->host), __func__);
+
+ err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+ EXT_CSD_SANITIZE_START, 1,
+ MMC_SANITIZE_REQ_TIMEOUT);
+
+ if (err)
+ pr_err("%s: %s - mmc_switch() with "
+ "EXT_CSD_SANITIZE_START failed. err=%d\n",
+ mmc_hostname(card->host), __func__, err);
+
+ pr_debug("%s: %s - SANITIZE COMPLETED\n", mmc_hostname(card->host),
+ __func__);
+
+out:
+ spin_lock_irq(&md->lock);
+ __blk_end_request(req, err, blk_rq_bytes(req));
+ spin_unlock_irq(&md->lock);
+
+ return err ? 0 : 1;
+}
+
static int mmc_blk_issue_flush(struct mmc_queue *mq, struct request *req)
{
struct mmc_blk_data *md = mq->data;
@@ -1086,12 +1171,60 @@
if (!brq->data.bytes_xfered)
return MMC_BLK_RETRY;
+ if (mq_mrq->packed_cmd != MMC_PACKED_NONE) {
+ if (unlikely(brq->data.blocks << 9 != brq->data.bytes_xfered))
+ return MMC_BLK_PARTIAL;
+ else
+ return MMC_BLK_SUCCESS;
+ }
+
if (blk_rq_bytes(req) != brq->data.bytes_xfered)
return MMC_BLK_PARTIAL;
return MMC_BLK_SUCCESS;
}
+static int mmc_blk_packed_err_check(struct mmc_card *card,
+ struct mmc_async_req *areq)
+{
+ struct mmc_queue_req *mq_rq = container_of(areq, struct mmc_queue_req,
+ mmc_active);
+ struct request *req = mq_rq->req;
+ int err, check, status;
+ u8 ext_csd[512];
+
+ check = mmc_blk_err_check(card, areq);
+ err = get_card_status(card, &status, 0);
+ if (err) {
+ pr_err("%s: error %d sending status command\n",
+ req->rq_disk->disk_name, err);
+ return MMC_BLK_ABORT;
+ }
+
+ if (status & R1_EXP_EVENT) {
+ err = mmc_send_ext_csd(card, ext_csd);
+ if (err) {
+ pr_err("%s: error %d sending ext_csd\n",
+ req->rq_disk->disk_name, err);
+ return MMC_BLK_ABORT;
+ }
+
+ if ((ext_csd[EXT_CSD_EXP_EVENTS_STATUS] &
+ EXT_CSD_PACKED_FAILURE) &&
+ (ext_csd[EXT_CSD_PACKED_CMD_STATUS] &
+ EXT_CSD_PACKED_GENERIC_ERROR)) {
+ if (ext_csd[EXT_CSD_PACKED_CMD_STATUS] &
+ EXT_CSD_PACKED_INDEXED_ERROR) {
+ mq_rq->packed_fail_idx =
+ ext_csd[EXT_CSD_PACKED_FAILURE_INDEX] - 1;
+ return MMC_BLK_PARTIAL;
+ }
+ }
+ }
+
+ return check;
+}
+
static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
struct mmc_card *card,
int disable_multi,
@@ -1246,10 +1379,286 @@
mmc_queue_bounce_pre(mqrq);
}
+static void mmc_blk_write_packing_control(struct mmc_queue *mq,
+ struct request *req)
+{
+ struct mmc_host *host = mq->card->host;
+ int data_dir;
+
+ if (!(host->caps2 & MMC_CAP2_PACKED_WR))
+ return;
+
+ /*
+ * In case the packing control is not supported by the host, it should
+ * not have an effect on the write packing. Therefore we have to enable
+ * the write packing
+ */
+ if (!(host->caps2 & MMC_CAP2_PACKED_WR_CONTROL)) {
+ mq->wr_packing_enabled = true;
+ return;
+ }
+
+ if (!req || (req && (req->cmd_flags & REQ_FLUSH))) {
+ if (mq->num_of_potential_packed_wr_reqs >
+ mq->num_wr_reqs_to_start_packing)
+ mq->wr_packing_enabled = true;
+ return;
+ }
+
+ data_dir = rq_data_dir(req);
+
+ if (data_dir == READ) {
+ mq->num_of_potential_packed_wr_reqs = 0;
+ mq->wr_packing_enabled = false;
+ return;
+ } else if (data_dir == WRITE) {
+ mq->num_of_potential_packed_wr_reqs++;
+ }
+
+ if (mq->num_of_potential_packed_wr_reqs >
+ mq->num_wr_reqs_to_start_packing)
+ mq->wr_packing_enabled = true;
+
+}
+
+struct mmc_wr_pack_stats *mmc_blk_get_packed_statistics(struct mmc_card *card)
+{
+ if (!card)
+ return NULL;
+
+ return &card->wr_pack_stats;
+}
+EXPORT_SYMBOL(mmc_blk_get_packed_statistics);
+
+void mmc_blk_init_packed_statistics(struct mmc_card *card)
+{
+ int max_num_of_packed_reqs = 0;
+
+ if (!card || !card->wr_pack_stats.packing_events)
+ return;
+
+ max_num_of_packed_reqs = card->ext_csd.max_packed_writes;
+
+ spin_lock(&card->wr_pack_stats.lock);
+ memset(card->wr_pack_stats.packing_events, 0,
+ (max_num_of_packed_reqs + 1) *
+ sizeof(*card->wr_pack_stats.packing_events));
+ memset(&card->wr_pack_stats.pack_stop_reason, 0,
+ sizeof(card->wr_pack_stats.pack_stop_reason));
+ card->wr_pack_stats.enabled = true;
+ spin_unlock(&card->wr_pack_stats.lock);
+}
+EXPORT_SYMBOL(mmc_blk_init_packed_statistics);
+
+static u8 mmc_blk_prep_packed_list(struct mmc_queue *mq, struct request *req)
+{
+ struct request_queue *q = mq->queue;
+ struct mmc_card *card = mq->card;
+ struct request *cur = req, *next = NULL;
+ struct mmc_blk_data *md = mq->data;
+ bool en_rel_wr = card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN;
+ unsigned int req_sectors = 0, phys_segments = 0;
+ unsigned int max_blk_count, max_phys_segs;
+ u8 put_back = 0;
+ u8 max_packed_rw = 0;
+ u8 reqs = 0;
+ struct mmc_wr_pack_stats *stats = &card->wr_pack_stats;
+
+ mmc_blk_clear_packed(mq->mqrq_cur);
+
+ if (!(md->flags & MMC_BLK_CMD23) ||
+ !card->ext_csd.packed_event_en)
+ goto no_packed;
+
+ if (!mq->wr_packing_enabled)
+ goto no_packed;
+
+ if ((rq_data_dir(cur) == WRITE) &&
+ (card->host->caps2 & MMC_CAP2_PACKED_WR))
+ max_packed_rw = card->ext_csd.max_packed_writes;
+
+ if (max_packed_rw == 0)
+ goto no_packed;
+
+ if (mmc_req_rel_wr(cur) &&
+ (md->flags & MMC_BLK_REL_WR) &&
+ !en_rel_wr) {
+ goto no_packed;
+ }
+
+ max_blk_count = min(card->host->max_blk_count,
+ card->host->max_req_size >> 9);
+ if (unlikely(max_blk_count > 0xffff))
+ max_blk_count = 0xffff;
+
+ max_phys_segs = queue_max_segments(q);
+ req_sectors += blk_rq_sectors(cur);
+ phys_segments += cur->nr_phys_segments;
+
+ if (rq_data_dir(cur) == WRITE) {
+ req_sectors++;
+ phys_segments++;
+ }
+
+ spin_lock(&stats->lock);
+
+ while (reqs < max_packed_rw - 1) {
+ spin_lock_irq(q->queue_lock);
+ next = blk_fetch_request(q);
+ spin_unlock_irq(q->queue_lock);
+ if (!next) {
+ MMC_BLK_UPDATE_STOP_REASON(stats, EMPTY_QUEUE);
+ break;
+ }
+
+ if (next->cmd_flags & REQ_DISCARD ||
+ next->cmd_flags & REQ_FLUSH) {
+ MMC_BLK_UPDATE_STOP_REASON(stats, FLUSH_OR_DISCARD);
+ put_back = 1;
+ break;
+ }
+
+ if (rq_data_dir(cur) != rq_data_dir(next)) {
+ MMC_BLK_UPDATE_STOP_REASON(stats, WRONG_DATA_DIR);
+ put_back = 1;
+ break;
+ }
+
+ if (mmc_req_rel_wr(next) &&
+ (md->flags & MMC_BLK_REL_WR) &&
+ !en_rel_wr) {
+ MMC_BLK_UPDATE_STOP_REASON(stats, REL_WRITE);
+ put_back = 1;
+ break;
+ }
+
+ req_sectors += blk_rq_sectors(next);
+ if (req_sectors > max_blk_count) {
+ if (stats->enabled)
+ stats->pack_stop_reason[EXCEEDS_SECTORS]++;
+ put_back = 1;
+ break;
+ }
+
+ phys_segments += next->nr_phys_segments;
+ if (phys_segments > max_phys_segs) {
+ MMC_BLK_UPDATE_STOP_REASON(stats, EXCEEDS_SEGMENTS);
+ put_back = 1;
+ break;
+ }
+
+ if (rq_data_dir(next) == WRITE)
+ mq->num_of_potential_packed_wr_reqs++;
+ list_add_tail(&next->queuelist, &mq->mqrq_cur->packed_list);
+ cur = next;
+ reqs++;
+ }
+
+ if (put_back) {
+ spin_lock_irq(q->queue_lock);
+ blk_requeue_request(q, next);
+ spin_unlock_irq(q->queue_lock);
+ }
+
+ if (stats->enabled) {
+ if (reqs + 1 <= card->ext_csd.max_packed_writes)
+ stats->packing_events[reqs + 1]++;
+ if (reqs + 1 == max_packed_rw)
+ MMC_BLK_UPDATE_STOP_REASON(stats, THRESHOLD);
+ }
+
+ spin_unlock(&stats->lock);
+
+ if (reqs > 0) {
+ list_add(&req->queuelist, &mq->mqrq_cur->packed_list);
+ mq->mqrq_cur->packed_num = ++reqs;
+ return reqs;
+ }
+
+no_packed:
+ mmc_blk_clear_packed(mq->mqrq_cur);
+ return 0;
+}
+
+static void mmc_blk_packed_hdr_wrq_prep(struct mmc_queue_req *mqrq,
+ struct mmc_card *card,
+ struct mmc_queue *mq)
+{
+ struct mmc_blk_request *brq = &mqrq->brq;
+ struct request *req = mqrq->req;
+ struct request *prq;
+ struct mmc_blk_data *md = mq->data;
+ bool do_rel_wr;
+ u32 *packed_cmd_hdr = mqrq->packed_cmd_hdr;
+ u8 i = 1;
+
+ mqrq->packed_cmd = MMC_PACKED_WRITE;
+ mqrq->packed_blocks = 0;
+ mqrq->packed_fail_idx = MMC_PACKED_N_IDX;
+
+ memset(packed_cmd_hdr, 0, sizeof(mqrq->packed_cmd_hdr));
+ packed_cmd_hdr[0] = (mqrq->packed_num << 16) |
+ (PACKED_CMD_WR << 8) | PACKED_CMD_VER;
+
+ /*
+ * Argument for each entry of packed group
+ */
+ list_for_each_entry(prq, &mqrq->packed_list, queuelist) {
+ do_rel_wr = mmc_req_rel_wr(prq) && (md->flags & MMC_BLK_REL_WR);
+ /* Argument of CMD23*/
+ packed_cmd_hdr[(i * 2)] =
+ (do_rel_wr ? MMC_CMD23_ARG_REL_WR : 0) |
+ blk_rq_sectors(prq);
+ /* Argument of CMD18 or CMD25 */
+ packed_cmd_hdr[((i * 2)) + 1] =
+ mmc_card_blockaddr(card) ?
+ blk_rq_pos(prq) : blk_rq_pos(prq) << 9;
+ mqrq->packed_blocks += blk_rq_sectors(prq);
+ i++;
+ }
+
+ memset(brq, 0, sizeof(struct mmc_blk_request));
+ brq->mrq.cmd = &brq->cmd;
+ brq->mrq.data = &brq->data;
+ brq->mrq.sbc = &brq->sbc;
+ brq->mrq.stop = &brq->stop;
+
+ brq->sbc.opcode = MMC_SET_BLOCK_COUNT;
+ brq->sbc.arg = MMC_CMD23_ARG_PACKED | (mqrq->packed_blocks + 1);
+ brq->sbc.flags = MMC_RSP_R1 | MMC_CMD_AC;
+
+ brq->cmd.opcode = MMC_WRITE_MULTIPLE_BLOCK;
+ brq->cmd.arg = blk_rq_pos(req);
+ if (!mmc_card_blockaddr(card))
+ brq->cmd.arg <<= 9;
+ brq->cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
+
+ brq->data.blksz = 512;
+ brq->data.blocks = mqrq->packed_blocks + 1;
+ brq->data.flags |= MMC_DATA_WRITE;
+
+ brq->stop.opcode = MMC_STOP_TRANSMISSION;
+ brq->stop.arg = 0;
+ brq->stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
+
+ mmc_set_data_timeout(&brq->data, card);
+
+ brq->data.sg = mqrq->sg;
+ brq->data.sg_len = mmc_queue_map_sg(mq, mqrq);
+
+ mqrq->mmc_active.mrq = &brq->mrq;
+ mqrq->mmc_active.err_check = mmc_blk_packed_err_check;
+
+ mmc_queue_bounce_pre(mqrq);
+}
+
static int mmc_blk_cmd_err(struct mmc_blk_data *md, struct mmc_card *card,
struct mmc_blk_request *brq, struct request *req,
int ret)
{
+ struct mmc_queue_req *mq_rq;
+ mq_rq = container_of(brq, struct mmc_queue_req, brq);
+
/*
* If this is an SD card and we're writing, we can first
* mark the known good sectors as ok.
@@ -1268,13 +1677,48 @@
spin_unlock_irq(&md->lock);
}
} else {
- spin_lock_irq(&md->lock);
- ret = __blk_end_request(req, 0, brq->data.bytes_xfered);
- spin_unlock_irq(&md->lock);
+ if (mq_rq->packed_cmd == MMC_PACKED_NONE) {
+ spin_lock_irq(&md->lock);
+ ret = __blk_end_request(req, 0, brq->data.bytes_xfered);
+ spin_unlock_irq(&md->lock);
+ }
}
return ret;
}
+static int mmc_blk_end_packed_req(struct mmc_queue *mq,
+ struct mmc_queue_req *mq_rq)
+{
+ struct mmc_blk_data *md = mq->data;
+ struct request *prq;
+ int idx = mq_rq->packed_fail_idx, i = 0;
+ int ret = 0;
+
+ while (!list_empty(&mq_rq->packed_list)) {
+ prq = list_entry_rq(mq_rq->packed_list.next);
+ if (idx == i) {
+ /* retry from error index */
+ mq_rq->packed_num -= idx;
+ mq_rq->req = prq;
+ ret = 1;
+
+ if (mq_rq->packed_num == MMC_PACKED_N_SINGLE) {
+ list_del_init(&prq->queuelist);
+ mmc_blk_clear_packed(mq_rq);
+ }
+ return ret;
+ }
+ list_del_init(&prq->queuelist);
+ spin_lock_irq(&md->lock);
+ __blk_end_request(prq, 0, blk_rq_bytes(prq));
+ spin_unlock_irq(&md->lock);
+ i++;
+ }
+
+ mmc_blk_clear_packed(mq_rq);
+ return ret;
+}
+
static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
{
struct mmc_blk_data *md = mq->data;
@@ -1283,15 +1727,24 @@
int ret = 1, disable_multi = 0, retry = 0, type;
enum mmc_blk_status status;
struct mmc_queue_req *mq_rq;
- struct request *req;
+ struct request *req, *prq;
struct mmc_async_req *areq;
+ const u8 packed_num = 2;
+ u8 reqs = 0;
if (!rqc && !mq->mqrq_prev->req)
return 0;
+ if (rqc)
+ reqs = mmc_blk_prep_packed_list(mq, rqc);
+
do {
if (rqc) {
- mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq);
+ if (reqs >= packed_num)
+ mmc_blk_packed_hdr_wrq_prep(mq->mqrq_cur,
+ card, mq);
+ else
+ mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq);
areq = &mq->mqrq_cur->mmc_active;
} else
areq = NULL;
@@ -1305,6 +1758,13 @@
type = rq_data_dir(req) == READ ? MMC_BLK_READ : MMC_BLK_WRITE;
mmc_queue_bounce_post(mq_rq);
+ /*
+ * Check BKOPS urgency from each R1 response
+ */
+ if (mmc_card_mmc(card) &&
+ (brq->cmd.resp[0] & R1_EXCEPTION_EVENT))
+ mmc_card_set_check_bkops(card);
+
switch (status) {
case MMC_BLK_SUCCESS:
case MMC_BLK_PARTIAL:
@@ -1312,10 +1772,17 @@
* A block was successfully transferred.
*/
mmc_blk_reset_success(md, type);
- spin_lock_irq(&md->lock);
- ret = __blk_end_request(req, 0,
+
+ if (mq_rq->packed_cmd != MMC_PACKED_NONE) {
+ ret = mmc_blk_end_packed_req(mq, mq_rq);
+ break;
+ } else {
+ spin_lock_irq(&md->lock);
+ ret = __blk_end_request(req, 0,
brq->data.bytes_xfered);
- spin_unlock_irq(&md->lock);
+ spin_unlock_irq(&md->lock);
+ }
+
/*
* If the blk_end_request function returns non-zero even
* though all data has been transferred and no errors
@@ -1348,7 +1815,8 @@
err = mmc_blk_reset(md, card->host, type);
if (!err)
break;
- if (err == -ENODEV)
+ if (err == -ENODEV ||
+ mq_rq->packed_cmd != MMC_PACKED_NONE)
goto cmd_abort;
/* Fall through */
}
@@ -1377,27 +1845,66 @@
}
if (ret) {
- /*
- * In case of a incomplete request
- * prepare it again and resend.
- */
- mmc_blk_rw_rq_prep(mq_rq, card, disable_multi, mq);
- mmc_start_req(card->host, &mq_rq->mmc_active, NULL);
+ if (mq_rq->packed_cmd == MMC_PACKED_NONE) {
+ /*
+ * In case of a incomplete request
+ * prepare it again and resend.
+ */
+ mmc_blk_rw_rq_prep(mq_rq, card,
+ disable_multi, mq);
+ mmc_start_req(card->host,
+ &mq_rq->mmc_active, NULL);
+ } else {
+ mmc_blk_packed_hdr_wrq_prep(mq_rq, card, mq);
+ mmc_start_req(card->host,
+ &mq_rq->mmc_active, NULL);
+ }
}
} while (ret);
return 1;
cmd_abort:
- spin_lock_irq(&md->lock);
- if (mmc_card_removed(card))
- req->cmd_flags |= REQ_QUIET;
- while (ret)
- ret = __blk_end_request(req, -EIO, blk_rq_cur_bytes(req));
- spin_unlock_irq(&md->lock);
+ if (mq_rq->packed_cmd == MMC_PACKED_NONE) {
+ spin_lock_irq(&md->lock);
+ if (mmc_card_removed(card))
+ req->cmd_flags |= REQ_QUIET;
+ while (ret)
+ ret = __blk_end_request(req, -EIO,
+ blk_rq_cur_bytes(req));
+ spin_unlock_irq(&md->lock);
+ } else {
+ while (!list_empty(&mq_rq->packed_list)) {
+ prq = list_entry_rq(mq_rq->packed_list.next);
+ list_del_init(&prq->queuelist);
+ spin_lock_irq(&md->lock);
+ __blk_end_request(prq, -EIO, blk_rq_bytes(prq));
+ spin_unlock_irq(&md->lock);
+ }
+ mmc_blk_clear_packed(mq_rq);
+ }
start_new_req:
if (rqc) {
+ /*
+ * If current request is packed, it needs to put back.
+ */
+ if (mq->mqrq_cur->packed_cmd != MMC_PACKED_NONE) {
+ while (!list_empty(&mq->mqrq_cur->packed_list)) {
+ prq = list_entry_rq(
+ mq->mqrq_cur->packed_list.prev);
+ if (prq->queuelist.prev !=
+ &mq->mqrq_cur->packed_list) {
+ list_del_init(&prq->queuelist);
+ spin_lock_irq(mq->queue->queue_lock);
+ blk_requeue_request(mq->queue, prq);
+ spin_unlock_irq(mq->queue->queue_lock);
+ } else {
+ list_del_init(&prq->queuelist);
+ }
+ }
+ mmc_blk_clear_packed(mq->mqrq_cur);
+ }
mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq);
mmc_start_req(card->host, &mq->mqrq_cur->mmc_active, NULL);
}
@@ -1405,9 +1912,6 @@
return 0;
}
-static int
-mmc_blk_set_blksize(struct mmc_blk_data *md, struct mmc_card *card);
-
static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
{
int ret;
@@ -1436,7 +1940,14 @@
goto out;
}
- if (req && req->cmd_flags & REQ_DISCARD) {
+ mmc_blk_write_packing_control(mq, req);
+
+ if (req && req->cmd_flags & REQ_SANITIZE) {
+ /* complete ongoing async transfer before issuing sanitize */
+ if (card->host && card->host->areq)
+ mmc_blk_issue_rw_rq(mq, NULL);
+ ret = mmc_blk_issue_sanitize_rq(mq, req);
+ } else if (req && req->cmd_flags & REQ_DISCARD) {
/* complete ongoing async transfer before issuing discard */
if (card->host->areq)
mmc_blk_issue_rw_rq(mq, NULL);
@@ -1662,6 +2173,8 @@
if (md) {
card = md->queue.card;
+ device_remove_file(disk_to_dev(md->disk),
+ &md->num_wr_reqs_to_start_packing);
if (md->disk->flags & GENHD_FL_UP) {
device_remove_file(disk_to_dev(md->disk), &md->force_ro);
if ((md->area_type & MMC_BLK_DATA_AREA_BOOT) &&
@@ -1728,12 +2241,26 @@
if (ret)
goto power_ro_lock_fail;
}
+
+ md->num_wr_reqs_to_start_packing.show =
+ num_wr_reqs_to_start_packing_show;
+ md->num_wr_reqs_to_start_packing.store =
+ num_wr_reqs_to_start_packing_store;
+ sysfs_attr_init(&md->num_wr_reqs_to_start_packing.attr);
+ md->num_wr_reqs_to_start_packing.attr.name =
+ "num_wr_reqs_to_start_packing";
+ md->num_wr_reqs_to_start_packing.attr.mode = S_IRUGO | S_IWUSR;
+ ret = device_create_file(disk_to_dev(md->disk),
+ &md->num_wr_reqs_to_start_packing);
+ if (ret)
+ goto power_ro_lock_fail;
+
return ret;
power_ro_lock_fail:
- device_remove_file(disk_to_dev(md->disk), &md->force_ro);
+ device_remove_file(disk_to_dev(md->disk), &md->force_ro);
force_ro_fail:
- del_gendisk(md->disk);
+ del_gendisk(md->disk);
return ret;
}
@@ -1777,6 +2304,10 @@
MMC_FIXUP(CID_NAME_ANY, CID_MANFID_MICRON, 0x200, add_quirk_mmc,
MMC_QUIRK_LONG_READ_TIME),
+ /* Some INAND MCP devices advertise incorrect timeout values */
+ MMC_FIXUP("SEM04G", 0x45, CID_OEMID_ANY, add_quirk_mmc,
+ MMC_QUIRK_INAND_DATA_TIMEOUT),
+
END_FIXUP
};