blob: 0a4e77a5ba33fe7e0009ab1f7f97078a38e1559c [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 * Copyright (C) 2003 Russell King, All Rights Reserved.
Pierre Ossman98ac2162006-12-23 20:03:02 +01003 * Copyright 2006-2007 Pierre Ossman
Linus Torvalds1da177e2005-04-16 15:20:36 -07004 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 */
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090010#include <linux/slab.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070011#include <linux/module.h>
12#include <linux/blkdev.h>
Rafael J. Wysocki83144182007-07-17 04:03:35 -070013#include <linux/freezer.h>
Christoph Hellwig87598a22006-11-13 20:23:52 +010014#include <linux/kthread.h>
Jens Axboe45711f12007-10-22 21:19:53 +020015#include <linux/scatterlist.h>
Santosh Shilimkar8e0cb8a2013-07-29 14:20:15 +010016#include <linux/dma-mapping.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070017
18#include <linux/mmc/card.h>
19#include <linux/mmc/host.h>
Linus Walleij29eb7bd2016-09-20 11:34:38 +020020
Pierre Ossman98ac2162006-12-23 20:03:02 +010021#include "queue.h"
Linus Walleij29eb7bd2016-09-20 11:34:38 +020022#include "block.h"
Ulf Hansson55244c52017-01-13 14:14:08 +010023#include "core.h"
Ulf Hansson4facdde2017-01-13 14:14:14 +010024#include "card.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070025
Linus Torvalds1da177e2005-04-16 15:20:36 -070026/*
Pierre Ossman9c9f2d62007-05-16 17:29:21 +020027 * Prepare a MMC request. This just filters out odd stuff.
Linus Torvalds1da177e2005-04-16 15:20:36 -070028 */
29static int mmc_prep_request(struct request_queue *q, struct request *req)
30{
Sujit Reddy Thummaa8ad82cc2011-12-08 14:05:50 +053031 struct mmc_queue *mq = q->queuedata;
32
Chuanxiao Dong4e93b9a2014-08-12 12:01:30 +080033 if (mq && (mmc_card_removed(mq->card) || mmc_access_rpmb(mq)))
Sujit Reddy Thummaa8ad82cc2011-12-08 14:05:50 +053034 return BLKPREP_KILL;
35
Christoph Hellwige8064022016-10-20 15:12:13 +020036 req->rq_flags |= RQF_DONTPREP;
Linus Torvalds1da177e2005-04-16 15:20:36 -070037
Pierre Ossman9c9f2d62007-05-16 17:29:21 +020038 return BLKPREP_OK;
Linus Torvalds1da177e2005-04-16 15:20:36 -070039}
40
41static int mmc_queue_thread(void *d)
42{
43 struct mmc_queue *mq = d;
44 struct request_queue *q = mq->queue;
Adrian Huntere0097cf2016-11-29 12:09:10 +020045 struct mmc_context_info *cntx = &mq->card->host->context_info;
Linus Torvalds1da177e2005-04-16 15:20:36 -070046
Rafael J. Wysocki83144182007-07-17 04:03:35 -070047 current->flags |= PF_MEMALLOC;
Linus Torvalds1da177e2005-04-16 15:20:36 -070048
Linus Torvalds1da177e2005-04-16 15:20:36 -070049 down(&mq->thread_sem);
Linus Torvalds1da177e2005-04-16 15:20:36 -070050 do {
Adrian Huntercdf8a6f2017-03-13 14:36:35 +020051 struct request *req;
Linus Torvalds1da177e2005-04-16 15:20:36 -070052
53 spin_lock_irq(q->queue_lock);
54 set_current_state(TASK_INTERRUPTIBLE);
Jens Axboe7eaceac2011-03-10 08:52:07 +010055 req = blk_fetch_request(q);
Adrian Huntere0097cf2016-11-29 12:09:10 +020056 mq->asleep = false;
57 cntx->is_waiting_last_req = false;
58 cntx->is_new_req = false;
59 if (!req) {
60 /*
61 * Dispatch queue is empty so set flags for
62 * mmc_request_fn() to wake us up.
63 */
Adrian Huntercdf8a6f2017-03-13 14:36:35 +020064 if (mq->qcnt)
Adrian Huntere0097cf2016-11-29 12:09:10 +020065 cntx->is_waiting_last_req = true;
66 else
67 mq->asleep = true;
68 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070069 spin_unlock_irq(q->queue_lock);
70
Adrian Huntercdf8a6f2017-03-13 14:36:35 +020071 if (req || mq->qcnt) {
Per Forlinee8a43a2011-07-01 18:55:33 +020072 set_current_state(TASK_RUNNING);
Linus Walleij29eb7bd2016-09-20 11:34:38 +020073 mmc_blk_issue_rq(mq, req);
Rabin Vincenta8c27c02015-06-14 19:26:11 +020074 cond_resched();
Per Forlinee8a43a2011-07-01 18:55:33 +020075 } else {
Vitaly Wool7b30d282006-12-07 20:08:02 +010076 if (kthread_should_stop()) {
77 set_current_state(TASK_RUNNING);
Linus Torvalds1da177e2005-04-16 15:20:36 -070078 break;
Vitaly Wool7b30d282006-12-07 20:08:02 +010079 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070080 up(&mq->thread_sem);
81 schedule();
82 down(&mq->thread_sem);
Linus Torvalds1da177e2005-04-16 15:20:36 -070083 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070084 } while (1);
Linus Torvalds1da177e2005-04-16 15:20:36 -070085 up(&mq->thread_sem);
86
Linus Torvalds1da177e2005-04-16 15:20:36 -070087 return 0;
88}
89
90/*
91 * Generic MMC request handler. This is called for any queue on a
92 * particular host. When the host is not busy, we look for a request
93 * on any queue on this host, and attempt to issue it. This may
94 * not be the queue we were asked to process.
95 */
Venkatraman S1b50f5f2012-04-13 17:54:11 +053096static void mmc_request_fn(struct request_queue *q)
Linus Torvalds1da177e2005-04-16 15:20:36 -070097{
98 struct mmc_queue *mq = q->queuedata;
Pierre Ossman89b4e132006-11-14 22:08:16 +010099 struct request *req;
Konstantin Dorfman2220eed2013-01-14 14:28:17 -0500100 struct mmc_context_info *cntx;
Pierre Ossman89b4e132006-11-14 22:08:16 +0100101
102 if (!mq) {
Adrian Hunter5fa83ce2010-01-08 14:43:00 -0800103 while ((req = blk_fetch_request(q)) != NULL) {
Christoph Hellwige8064022016-10-20 15:12:13 +0200104 req->rq_flags |= RQF_QUIET;
Christoph Hellwig2a842ac2017-06-03 09:38:04 +0200105 __blk_end_request_all(req, BLK_STS_IOERR);
Adrian Hunter5fa83ce2010-01-08 14:43:00 -0800106 }
Pierre Ossman89b4e132006-11-14 22:08:16 +0100107 return;
108 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109
Konstantin Dorfman2220eed2013-01-14 14:28:17 -0500110 cntx = &mq->card->host->context_info;
Adrian Huntere0097cf2016-11-29 12:09:10 +0200111
112 if (cntx->is_waiting_last_req) {
113 cntx->is_new_req = true;
114 wake_up_interruptible(&cntx->wait);
115 }
116
117 if (mq->asleep)
Christoph Hellwig87598a22006-11-13 20:23:52 +0100118 wake_up_process(mq->thread);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700119}
120
Linus Walleij304419d2017-05-18 11:29:32 +0200121static struct scatterlist *mmc_alloc_sg(int sg_len, gfp_t gfp)
Per Forlin97868a22011-07-09 17:12:36 -0400122{
123 struct scatterlist *sg;
124
Linus Walleij304419d2017-05-18 11:29:32 +0200125 sg = kmalloc_array(sg_len, sizeof(*sg), gfp);
Adrian Hunter7b410d02017-03-13 14:36:36 +0200126 if (sg)
Per Forlin97868a22011-07-09 17:12:36 -0400127 sg_init_table(sg, sg_len);
Per Forlin97868a22011-07-09 17:12:36 -0400128
129 return sg;
130}
131
Adrian Huntere056a1b2011-06-28 17:16:02 +0300132static void mmc_queue_setup_discard(struct request_queue *q,
133 struct mmc_card *card)
134{
135 unsigned max_discard;
136
137 max_discard = mmc_calc_max_discard(card);
138 if (!max_discard)
139 return;
140
141 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
Jens Axboe2bb4cd52015-07-14 08:15:12 -0600142 blk_queue_max_discard_sectors(q, max_discard);
Adrian Huntere056a1b2011-06-28 17:16:02 +0300143 q->limits.discard_granularity = card->pref_erase << 9;
144 /* granularity must not be greater than max. discard */
145 if (card->pref_erase > max_discard)
146 q->limits.discard_granularity = 0;
Maya Erez775a9362013-04-18 15:41:55 +0300147 if (mmc_can_secure_erase_trim(card))
Christoph Hellwig288dab82016-06-09 16:00:36 +0200148 queue_flag_set_unlocked(QUEUE_FLAG_SECERASE, q);
Adrian Huntere056a1b2011-06-28 17:16:02 +0300149}
150
Linus Walleij304419d2017-05-18 11:29:32 +0200151/**
152 * mmc_init_request() - initialize the MMC-specific per-request data
153 * @q: the request queue
154 * @req: the request
155 * @gfp: memory allocation policy
156 */
157static int mmc_init_request(struct request_queue *q, struct request *req,
158 gfp_t gfp)
Adrian Hunterf2b8b522016-11-29 12:09:12 +0200159{
Linus Walleij304419d2017-05-18 11:29:32 +0200160 struct mmc_queue_req *mq_rq = req_to_mmc_queue_req(req);
161 struct mmc_queue *mq = q->queuedata;
162 struct mmc_card *card = mq->card;
163 struct mmc_host *host = card->host;
Adrian Hunterc8539822016-11-29 12:09:11 +0200164
Linus Walleijde3ee992017-09-20 10:56:14 +0200165 mq_rq->sg = mmc_alloc_sg(host->max_segs, gfp);
166 if (!mq_rq->sg)
167 return -ENOMEM;
Adrian Hunter64e29e422016-11-29 12:09:13 +0200168
Adrian Hunterc5bda0c2016-11-29 12:09:15 +0200169 return 0;
170}
Adrian Hunter64e29e422016-11-29 12:09:13 +0200171
Linus Walleij304419d2017-05-18 11:29:32 +0200172static void mmc_exit_request(struct request_queue *q, struct request *req)
Adrian Hunterc5bda0c2016-11-29 12:09:15 +0200173{
Linus Walleij304419d2017-05-18 11:29:32 +0200174 struct mmc_queue_req *mq_rq = req_to_mmc_queue_req(req);
Adrian Hunter64e29e422016-11-29 12:09:13 +0200175
Linus Walleij304419d2017-05-18 11:29:32 +0200176 kfree(mq_rq->sg);
177 mq_rq->sg = NULL;
Adrian Hunterc09949c2016-11-29 12:09:14 +0200178}
179
Linus Torvalds1da177e2005-04-16 15:20:36 -0700180/**
181 * mmc_init_queue - initialise a queue structure.
182 * @mq: mmc queue
183 * @card: mmc card to attach this queue
184 * @lock: queue lock
Adrian Hunterd09408a2011-06-23 13:40:28 +0300185 * @subname: partition subname
Linus Torvalds1da177e2005-04-16 15:20:36 -0700186 *
187 * Initialise a MMC card request queue.
188 */
Adrian Hunterd09408a2011-06-23 13:40:28 +0300189int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
190 spinlock_t *lock, const char *subname)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700191{
192 struct mmc_host *host = card->host;
193 u64 limit = BLK_BOUNCE_HIGH;
Adrian Hunterc5bda0c2016-11-29 12:09:15 +0200194 int ret = -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700195
Greg Kroah-Hartmanfcaf71f2006-09-12 17:00:10 +0200196 if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask)
Russell Kinge83b3662014-02-11 17:11:04 +0000197 limit = (u64)dma_max_pfn(mmc_dev(host)) << PAGE_SHIFT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700198
199 mq->card = card;
Linus Walleij304419d2017-05-18 11:29:32 +0200200 mq->queue = blk_alloc_queue(GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700201 if (!mq->queue)
202 return -ENOMEM;
Linus Walleij304419d2017-05-18 11:29:32 +0200203 mq->queue->queue_lock = lock;
204 mq->queue->request_fn = mmc_request_fn;
205 mq->queue->init_rq_fn = mmc_init_request;
206 mq->queue->exit_rq_fn = mmc_exit_request;
207 mq->queue->cmd_size = sizeof(struct mmc_queue_req);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700208 mq->queue->queuedata = mq;
Linus Walleij304419d2017-05-18 11:29:32 +0200209 mq->qcnt = 0;
210 ret = blk_init_allocated_queue(mq->queue);
211 if (ret) {
212 blk_cleanup_queue(mq->queue);
213 return ret;
214 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700215
Pierre Ossman98ccf142007-05-12 00:26:16 +0200216 blk_queue_prep_rq(mq->queue, mmc_prep_request);
Pierre Ossman8dddfe12008-10-14 20:04:46 +0200217 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue);
Mike Snitzerb277da02014-10-04 10:55:32 -0600218 queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, mq->queue);
Adrian Huntere056a1b2011-06-28 17:16:02 +0300219 if (mmc_can_erase(card))
220 mmc_queue_setup_discard(mq->queue, card);
Pierre Ossman98ccf142007-05-12 00:26:16 +0200221
Linus Walleijde3ee992017-09-20 10:56:14 +0200222 blk_queue_bounce_limit(mq->queue, limit);
223 blk_queue_max_hw_sectors(mq->queue,
224 min(host->max_blk_count, host->max_req_size / 512));
225 blk_queue_max_segments(mq->queue, host->max_segs);
226 blk_queue_max_segment_size(mq->queue, host->max_seg_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700227
Thomas Gleixner632cf922010-09-14 07:12:35 -0400228 sema_init(&mq->thread_sem, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700229
Adrian Hunterd09408a2011-06-23 13:40:28 +0300230 mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd/%d%s",
231 host->index, subname ? subname : "");
Ethan Dude528fa2010-09-30 18:40:27 -0400232
Christoph Hellwig87598a22006-11-13 20:23:52 +0100233 if (IS_ERR(mq->thread)) {
234 ret = PTR_ERR(mq->thread);
Adrian Hunterc09949c2016-11-29 12:09:14 +0200235 goto cleanup_queue;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700236 }
237
Christoph Hellwig87598a22006-11-13 20:23:52 +0100238 return 0;
Per Forlin97868a22011-07-09 17:12:36 -0400239
Adrian Hunter7b410d02017-03-13 14:36:36 +0200240cleanup_queue:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700241 blk_cleanup_queue(mq->queue);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700242 return ret;
243}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700244
245void mmc_cleanup_queue(struct mmc_queue *mq)
246{
Jens Axboe165125e2007-07-24 09:28:11 +0200247 struct request_queue *q = mq->queue;
Pierre Ossman89b4e132006-11-14 22:08:16 +0100248 unsigned long flags;
249
Pierre Ossmand2b46f62007-04-28 16:52:12 +0200250 /* Make sure the queue isn't suspended, as that will deadlock */
251 mmc_queue_resume(mq);
252
Pierre Ossman89b4e132006-11-14 22:08:16 +0100253 /* Then terminate our worker thread */
Christoph Hellwig87598a22006-11-13 20:23:52 +0100254 kthread_stop(mq->thread);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700255
Adrian Hunter5fa83ce2010-01-08 14:43:00 -0800256 /* Empty the queue */
257 spin_lock_irqsave(q->queue_lock, flags);
258 q->queuedata = NULL;
259 blk_start_queue(q);
260 spin_unlock_irqrestore(q->queue_lock, flags);
261
Linus Torvalds1da177e2005-04-16 15:20:36 -0700262 mq->card = NULL;
263}
264EXPORT_SYMBOL(mmc_cleanup_queue);
265
266/**
267 * mmc_queue_suspend - suspend a MMC request queue
268 * @mq: MMC queue to suspend
269 *
270 * Stop the block request queue, and wait for our thread to
271 * complete any outstanding requests. This ensures that we
272 * won't suspend while a request is being processed.
273 */
274void mmc_queue_suspend(struct mmc_queue *mq)
275{
Jens Axboe165125e2007-07-24 09:28:11 +0200276 struct request_queue *q = mq->queue;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700277 unsigned long flags;
278
Linus Walleij9491be52017-02-01 13:47:56 +0100279 if (!mq->suspended) {
280 mq->suspended |= true;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700281
282 spin_lock_irqsave(q->queue_lock, flags);
283 blk_stop_queue(q);
284 spin_unlock_irqrestore(q->queue_lock, flags);
285
286 down(&mq->thread_sem);
287 }
288}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700289
290/**
291 * mmc_queue_resume - resume a previously suspended MMC request queue
292 * @mq: MMC queue to resume
293 */
294void mmc_queue_resume(struct mmc_queue *mq)
295{
Jens Axboe165125e2007-07-24 09:28:11 +0200296 struct request_queue *q = mq->queue;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700297 unsigned long flags;
298
Linus Walleij9491be52017-02-01 13:47:56 +0100299 if (mq->suspended) {
300 mq->suspended = false;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700301
302 up(&mq->thread_sem);
303
304 spin_lock_irqsave(q->queue_lock, flags);
305 blk_start_queue(q);
306 spin_unlock_irqrestore(q->queue_lock, flags);
307 }
308}
Pierre Ossman98ac2162006-12-23 20:03:02 +0100309
Pierre Ossman2ff1fa62008-07-22 14:35:42 +0200310/*
311 * Prepare the sg list(s) to be handed of to the host driver
312 */
Per Forlin97868a22011-07-09 17:12:36 -0400313unsigned int mmc_queue_map_sg(struct mmc_queue *mq, struct mmc_queue_req *mqrq)
Pierre Ossman98ccf142007-05-12 00:26:16 +0200314{
Linus Walleij67e69d52017-05-19 15:37:27 +0200315 struct request *req = mmc_queue_req_to_req(mqrq);
Pierre Ossman98ccf142007-05-12 00:26:16 +0200316
Linus Walleijde3ee992017-09-20 10:56:14 +0200317 return blk_rq_map_sg(mq->queue, req, mqrq->sg);
Pierre Ossman98ccf142007-05-12 00:26:16 +0200318}