blob: fe58d31cbc7ec819540623950b7bbb7002d5a8fc [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#ifndef MMC_QUEUE_H
2#define MMC_QUEUE_H
3
Mike Christiec2df40d2016-06-05 14:32:17 -05004static inline bool mmc_req_is_special(struct request *req)
5{
Mike Christie3a5e02c2016-06-05 14:32:23 -05006 return req &&
Adrian Hunter7afafc82016-08-16 10:59:35 +03007 (req_op(req) == REQ_OP_FLUSH ||
8 req_op(req) == REQ_OP_DISCARD ||
9 req_op(req) == REQ_OP_SECURE_ERASE);
Mike Christiec2df40d2016-06-05 14:32:17 -050010}
Seungwon Jeonef3a69c72013-03-14 15:17:13 +090011
Linus Torvalds1da177e2005-04-16 15:20:36 -070012struct request;
13struct task_struct;
14
Per Forlin97868a22011-07-09 17:12:36 -040015struct mmc_blk_request {
16 struct mmc_request mrq;
17 struct mmc_command sbc;
18 struct mmc_command cmd;
19 struct mmc_command stop;
20 struct mmc_data data;
Adrian Hunterb8360a42015-05-07 13:10:24 +030021 int retune_retry_done;
Per Forlin97868a22011-07-09 17:12:36 -040022};
23
Seungwon Jeonce39f9d2013-02-06 17:02:46 +090024enum mmc_packed_type {
25 MMC_PACKED_NONE = 0,
26 MMC_PACKED_WRITE,
27};
28
29#define mmc_packed_cmd(type) ((type) != MMC_PACKED_NONE)
30#define mmc_packed_wr(type) ((type) == MMC_PACKED_WRITE)
31
32struct mmc_packed {
33 struct list_head list;
Jiri Slaby3f2d2662016-10-03 10:58:28 +020034 __le32 cmd_hdr[1024];
Seungwon Jeonce39f9d2013-02-06 17:02:46 +090035 unsigned int blocks;
36 u8 nr_entries;
37 u8 retries;
38 s16 idx_failure;
39};
40
Per Forlin97868a22011-07-09 17:12:36 -040041struct mmc_queue_req {
42 struct request *req;
43 struct mmc_blk_request brq;
44 struct scatterlist *sg;
45 char *bounce_buf;
46 struct scatterlist *bounce_sg;
47 unsigned int bounce_sg_len;
Per Forlinee8a43a2011-07-01 18:55:33 +020048 struct mmc_async_req mmc_active;
Seungwon Jeonce39f9d2013-02-06 17:02:46 +090049 enum mmc_packed_type cmd_type;
50 struct mmc_packed *packed;
Per Forlin97868a22011-07-09 17:12:36 -040051};
52
Linus Torvalds1da177e2005-04-16 15:20:36 -070053struct mmc_queue {
54 struct mmc_card *card;
Christoph Hellwig87598a22006-11-13 20:23:52 +010055 struct task_struct *thread;
Linus Torvalds1da177e2005-04-16 15:20:36 -070056 struct semaphore thread_sem;
57 unsigned int flags;
Konstantin Dorfman2220eed2013-01-14 14:28:17 -050058#define MMC_QUEUE_SUSPENDED (1 << 0)
59#define MMC_QUEUE_NEW_REQUEST (1 << 1)
Linus Torvalds1da177e2005-04-16 15:20:36 -070060 void *data;
61 struct request_queue *queue;
Per Forlin04296b72011-07-01 18:55:31 +020062 struct mmc_queue_req mqrq[2];
Per Forlin97868a22011-07-09 17:12:36 -040063 struct mmc_queue_req *mqrq_cur;
Per Forlin04296b72011-07-01 18:55:31 +020064 struct mmc_queue_req *mqrq_prev;
Mark Salyzyn92f31302016-01-28 11:12:25 -080065#ifdef CONFIG_MMC_SIMULATE_MAX_SPEED
66 atomic_t max_write_speed;
67 atomic_t max_read_speed;
68 atomic_t cache_size;
69 /* i/o tracking */
70 atomic_long_t cache_used;
71 unsigned long cache_jiffies;
72#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -070073};
74
Adrian Hunterd09408a2011-06-23 13:40:28 +030075extern int mmc_init_queue(struct mmc_queue *, struct mmc_card *, spinlock_t *,
76 const char *);
Linus Torvalds1da177e2005-04-16 15:20:36 -070077extern void mmc_cleanup_queue(struct mmc_queue *);
78extern void mmc_queue_suspend(struct mmc_queue *);
79extern void mmc_queue_resume(struct mmc_queue *);
80
Per Forlin97868a22011-07-09 17:12:36 -040081extern unsigned int mmc_queue_map_sg(struct mmc_queue *,
82 struct mmc_queue_req *);
83extern void mmc_queue_bounce_pre(struct mmc_queue_req *);
84extern void mmc_queue_bounce_post(struct mmc_queue_req *);
Pierre Ossman98ccf142007-05-12 00:26:16 +020085
Seungwon Jeonce39f9d2013-02-06 17:02:46 +090086extern int mmc_packed_init(struct mmc_queue *, struct mmc_card *);
87extern void mmc_packed_clean(struct mmc_queue *);
88
Chuanxiao Dong4e93b9a2014-08-12 12:01:30 +080089extern int mmc_access_rpmb(struct mmc_queue *);
90
Linus Torvalds1da177e2005-04-16 15:20:36 -070091#endif