blob: a85a14355efabdf5723277af7c7b0db4ac8a89ac [file] [log] [blame]
Rusty Russelle467cde2007-10-22 11:03:38 +10001//#define DEBUG
2#include <linux/spinlock.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +09003#include <linux/slab.h>
Rusty Russelle467cde2007-10-22 11:03:38 +10004#include <linux/blkdev.h>
5#include <linux/hdreg.h>
Paul Gortmaker0c8d44f2011-07-01 15:56:05 -04006#include <linux/module.h>
Michael S. Tsirkin4678d6f2012-01-12 15:44:44 +10307#include <linux/mutex.h>
Rusty Russelle467cde2007-10-22 11:03:38 +10008#include <linux/virtio.h>
9#include <linux/virtio_blk.h>
Jens Axboe3d1266c2007-10-24 13:21:21 +020010#include <linux/scatterlist.h>
Christoph Hellwig7a7c9242011-02-01 21:43:48 +010011#include <linux/string_helpers.h>
Liu Yuan6917f832011-04-24 02:49:26 +080012#include <scsi/scsi_cmnd.h>
Michael S. Tsirkin5087a502011-10-30 21:29:59 +020013#include <linux/idr.h>
Jens Axboe1cf7e9c2013-11-01 10:52:52 -060014#include <linux/blk-mq.h>
15#include <linux/numa.h>
Jens Axboe3d1266c2007-10-24 13:21:21 +020016
Christian Borntraeger4f3bf192008-01-31 15:53:53 +010017#define PART_BITS 4
Ming Lei6a27b652014-06-26 17:41:48 +080018#define VQ_NAME_LEN 16
Rusty Russelle467cde2007-10-22 11:03:38 +100019
Michael S. Tsirkin5087a502011-10-30 21:29:59 +020020static int major;
21static DEFINE_IDA(vd_index_ida);
22
Jonghwan Choi2a647bf2013-05-20 10:25:39 +093023static struct workqueue_struct *virtblk_wq;
Christian Borntraeger4f3bf192008-01-31 15:53:53 +010024
Ming Lei6a27b652014-06-26 17:41:48 +080025struct virtio_blk_vq {
26 struct virtqueue *vq;
27 spinlock_t lock;
28 char name[VQ_NAME_LEN];
29} ____cacheline_aligned_in_smp;
30
Michael S. Tsirkinbb6ec572015-01-15 13:33:31 +020031struct virtio_blk {
Rusty Russelle467cde2007-10-22 11:03:38 +100032 struct virtio_device *vdev;
Rusty Russelle467cde2007-10-22 11:03:38 +100033
34 /* The disk structure for the kernel. */
35 struct gendisk *disk;
36
Christoph Hellwig24d2f902014-04-15 14:14:00 -060037 /* Block layer tags. */
38 struct blk_mq_tag_set tag_set;
39
Christoph Hellwig7a7c9242011-02-01 21:43:48 +010040 /* Process context for config space updates */
41 struct work_struct config_work;
42
Rusty Russell0864b792008-12-30 09:26:05 -060043 /* What host tells us, plus 2 for header & tailer. */
44 unsigned int sg_elems;
45
Michael S. Tsirkin5087a502011-10-30 21:29:59 +020046 /* Ida index - used to track minor number allocations. */
47 int index;
Ming Lei6a27b652014-06-26 17:41:48 +080048
49 /* num of vqs */
50 int num_vqs;
51 struct virtio_blk_vq *vqs;
Rusty Russelle467cde2007-10-22 11:03:38 +100052};
53
Michael S. Tsirkinbb6ec572015-01-15 13:33:31 +020054struct virtblk_req {
Rusty Russelle467cde2007-10-22 11:03:38 +100055 struct request *req;
56 struct virtio_blk_outhdr out_hdr;
Hannes Reinecke1cde26f2009-05-18 14:41:30 +020057 struct virtio_scsi_inhdr in_hdr;
Rusty Russellcb38fa22008-05-02 21:50:45 -050058 u8 status;
Asias Hea98755c2012-08-08 16:07:04 +080059 struct scatterlist sg[];
Rusty Russelle467cde2007-10-22 11:03:38 +100060};
61
Asias Hea98755c2012-08-08 16:07:04 +080062static inline int virtblk_result(struct virtblk_req *vbr)
63{
64 switch (vbr->status) {
65 case VIRTIO_BLK_S_OK:
66 return 0;
67 case VIRTIO_BLK_S_UNSUPP:
68 return -ENOTTY;
69 default:
70 return -EIO;
71 }
72}
73
Paolo Bonzini8f39db92013-03-20 15:44:27 +103074static int __virtblk_add_req(struct virtqueue *vq,
Paolo Bonzini20af3cf2013-03-20 15:44:27 +103075 struct virtblk_req *vbr,
76 struct scatterlist *data_sg,
Rusty Russell0a11cc32013-03-20 15:44:27 +103077 bool have_data)
Asias Hec85a1f92012-08-08 16:07:05 +080078{
Paolo Bonzini20af3cf2013-03-20 15:44:27 +103079 struct scatterlist hdr, status, cmd, sense, inhdr, *sgs[6];
Paolo Bonzini8f39db92013-03-20 15:44:27 +103080 unsigned int num_out = 0, num_in = 0;
Michael S. Tsirkin19c1c5a2014-10-07 16:39:49 +020081 __virtio32 type = vbr->out_hdr.type & ~cpu_to_virtio32(vq->vdev, VIRTIO_BLK_T_OUT);
Paolo Bonzini8f39db92013-03-20 15:44:27 +103082
83 sg_init_one(&hdr, &vbr->out_hdr, sizeof(vbr->out_hdr));
84 sgs[num_out++] = &hdr;
85
Paolo Bonzini20af3cf2013-03-20 15:44:27 +103086 /*
87 * If this is a packet command we need a couple of additional headers.
88 * Behind the normal outhdr we put a segment with the scsi command
89 * block, and before the normal inhdr we put the sense data and the
90 * inhdr with additional status information.
91 */
Michael S. Tsirkin19c1c5a2014-10-07 16:39:49 +020092 if (type == cpu_to_virtio32(vq->vdev, VIRTIO_BLK_T_SCSI_CMD)) {
Paolo Bonzini20af3cf2013-03-20 15:44:27 +103093 sg_init_one(&cmd, vbr->req->cmd, vbr->req->cmd_len);
94 sgs[num_out++] = &cmd;
95 }
96
Rusty Russell0a11cc32013-03-20 15:44:27 +103097 if (have_data) {
Michael S. Tsirkin19c1c5a2014-10-07 16:39:49 +020098 if (vbr->out_hdr.type & cpu_to_virtio32(vq->vdev, VIRTIO_BLK_T_OUT))
Paolo Bonzini20af3cf2013-03-20 15:44:27 +103099 sgs[num_out++] = data_sg;
Paolo Bonzini8f39db92013-03-20 15:44:27 +1030100 else
Paolo Bonzini20af3cf2013-03-20 15:44:27 +1030101 sgs[num_out + num_in++] = data_sg;
102 }
103
Michael S. Tsirkin19c1c5a2014-10-07 16:39:49 +0200104 if (type == cpu_to_virtio32(vq->vdev, VIRTIO_BLK_T_SCSI_CMD)) {
Paolo Bonzini20af3cf2013-03-20 15:44:27 +1030105 sg_init_one(&sense, vbr->req->sense, SCSI_SENSE_BUFFERSIZE);
106 sgs[num_out + num_in++] = &sense;
107 sg_init_one(&inhdr, &vbr->in_hdr, sizeof(vbr->in_hdr));
108 sgs[num_out + num_in++] = &inhdr;
Paolo Bonzini8f39db92013-03-20 15:44:27 +1030109 }
110
111 sg_init_one(&status, &vbr->status, sizeof(vbr->status));
112 sgs[num_out + num_in++] = &status;
113
114 return virtqueue_add_sgs(vq, sgs, num_out, num_in, vbr, GFP_ATOMIC);
Paolo Bonzini5ee21a52013-03-20 15:44:27 +1030115}
Asias Hec85a1f92012-08-08 16:07:05 +0800116
Christoph Hellwig5124c282014-02-10 03:24:39 -0800117static inline void virtblk_request_done(struct request *req)
Asias Hec85a1f92012-08-08 16:07:05 +0800118{
Christoph Hellwig9d74e252014-04-14 10:30:07 +0200119 struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
Michael S. Tsirkin19c1c5a2014-10-07 16:39:49 +0200120 struct virtio_blk *vblk = req->q->queuedata;
Asias Hea98755c2012-08-08 16:07:04 +0800121 int error = virtblk_result(vbr);
122
123 if (req->cmd_type == REQ_TYPE_BLOCK_PC) {
Michael S. Tsirkin19c1c5a2014-10-07 16:39:49 +0200124 req->resid_len = virtio32_to_cpu(vblk->vdev, vbr->in_hdr.residual);
125 req->sense_len = virtio32_to_cpu(vblk->vdev, vbr->in_hdr.sense_len);
126 req->errors = virtio32_to_cpu(vblk->vdev, vbr->in_hdr.errors);
Christoph Hellwig4f8c9512015-04-17 22:37:16 +0200127 } else if (req->cmd_type == REQ_TYPE_DRV_PRIV) {
Asias Hea98755c2012-08-08 16:07:04 +0800128 req->errors = (error != 0);
129 }
130
Christoph Hellwigc8a446a2014-09-13 16:40:10 -0700131 blk_mq_end_request(req, error);
Asias Hea98755c2012-08-08 16:07:04 +0800132}
133
134static void virtblk_done(struct virtqueue *vq)
Rusty Russelle467cde2007-10-22 11:03:38 +1000135{
136 struct virtio_blk *vblk = vq->vdev->priv;
Jens Axboe1cf7e9c2013-11-01 10:52:52 -0600137 bool req_done = false;
Ming Lei6a27b652014-06-26 17:41:48 +0800138 int qid = vq->index;
Rusty Russelle467cde2007-10-22 11:03:38 +1000139 struct virtblk_req *vbr;
Rusty Russelle467cde2007-10-22 11:03:38 +1000140 unsigned long flags;
Asias Hea98755c2012-08-08 16:07:04 +0800141 unsigned int len;
Rusty Russelle467cde2007-10-22 11:03:38 +1000142
Ming Lei6a27b652014-06-26 17:41:48 +0800143 spin_lock_irqsave(&vblk->vqs[qid].lock, flags);
Asias Hebb811102012-09-25 10:36:17 +0800144 do {
145 virtqueue_disable_cb(vq);
Ming Lei6a27b652014-06-26 17:41:48 +0800146 while ((vbr = virtqueue_get_buf(vblk->vqs[qid].vq, &len)) != NULL) {
Christoph Hellwigf4829a92015-09-27 21:01:50 +0200147 blk_mq_complete_request(vbr->req, vbr->req->errors);
Jens Axboe1cf7e9c2013-11-01 10:52:52 -0600148 req_done = true;
Rusty Russelle467cde2007-10-22 11:03:38 +1000149 }
Heinz Graalfs7f03b172013-10-29 09:40:30 +1030150 if (unlikely(virtqueue_is_broken(vq)))
151 break;
Asias Hebb811102012-09-25 10:36:17 +0800152 } while (!virtqueue_enable_cb(vq));
Jens Axboe1cf7e9c2013-11-01 10:52:52 -0600153
Rusty Russelle467cde2007-10-22 11:03:38 +1000154 /* In case queue is stopped waiting for more buffers. */
Asias Hea98755c2012-08-08 16:07:04 +0800155 if (req_done)
Christoph Hellwig1b4a3252014-04-16 09:44:54 +0200156 blk_mq_start_stopped_hw_queues(vblk->disk->queue, true);
Ming Lei6a27b652014-06-26 17:41:48 +0800157 spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
Asias Hea98755c2012-08-08 16:07:04 +0800158}
159
Jens Axboe74c45052014-10-29 11:14:52 -0600160static int virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
161 const struct blk_mq_queue_data *bd)
Rusty Russelle467cde2007-10-22 11:03:38 +1000162{
Jens Axboe1cf7e9c2013-11-01 10:52:52 -0600163 struct virtio_blk *vblk = hctx->queue->queuedata;
Jens Axboe74c45052014-10-29 11:14:52 -0600164 struct request *req = bd->rq;
Christoph Hellwig9d74e252014-04-14 10:30:07 +0200165 struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
Jens Axboe1cf7e9c2013-11-01 10:52:52 -0600166 unsigned long flags;
Paolo Bonzini20af3cf2013-03-20 15:44:27 +1030167 unsigned int num;
Ming Lei6a27b652014-06-26 17:41:48 +0800168 int qid = hctx->queue_num;
Rusty Russell5261b852014-03-13 11:23:39 +1030169 int err;
Ming Leie8edca62014-05-30 10:49:29 +0800170 bool notify = false;
Rusty Russelle467cde2007-10-22 11:03:38 +1000171
Jens Axboe1cf7e9c2013-11-01 10:52:52 -0600172 BUG_ON(req->nr_phys_segments + 2 > vblk->sg_elems);
Rusty Russelle467cde2007-10-22 11:03:38 +1000173
174 vbr->req = req;
Mike Christie3a5e02c2016-06-05 14:32:23 -0500175 if (req_op(req) == REQ_OP_FLUSH) {
Michael S. Tsirkin19c1c5a2014-10-07 16:39:49 +0200176 vbr->out_hdr.type = cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_FLUSH);
Rusty Russelle467cde2007-10-22 11:03:38 +1000177 vbr->out_hdr.sector = 0;
Michael S. Tsirkin19c1c5a2014-10-07 16:39:49 +0200178 vbr->out_hdr.ioprio = cpu_to_virtio32(vblk->vdev, req_get_ioprio(vbr->req));
FUJITA Tomonoridd40e452010-07-03 17:45:38 +0900179 } else {
180 switch (req->cmd_type) {
181 case REQ_TYPE_FS:
182 vbr->out_hdr.type = 0;
Michael S. Tsirkin19c1c5a2014-10-07 16:39:49 +0200183 vbr->out_hdr.sector = cpu_to_virtio64(vblk->vdev, blk_rq_pos(vbr->req));
184 vbr->out_hdr.ioprio = cpu_to_virtio32(vblk->vdev, req_get_ioprio(vbr->req));
FUJITA Tomonoridd40e452010-07-03 17:45:38 +0900185 break;
186 case REQ_TYPE_BLOCK_PC:
Michael S. Tsirkin19c1c5a2014-10-07 16:39:49 +0200187 vbr->out_hdr.type = cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_SCSI_CMD);
Christoph Hellwigf1b0ef062009-09-17 19:57:42 +0200188 vbr->out_hdr.sector = 0;
Michael S. Tsirkin19c1c5a2014-10-07 16:39:49 +0200189 vbr->out_hdr.ioprio = cpu_to_virtio32(vblk->vdev, req_get_ioprio(vbr->req));
Christoph Hellwigf1b0ef062009-09-17 19:57:42 +0200190 break;
Christoph Hellwig4f8c9512015-04-17 22:37:16 +0200191 case REQ_TYPE_DRV_PRIV:
Michael S. Tsirkin19c1c5a2014-10-07 16:39:49 +0200192 vbr->out_hdr.type = cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_GET_ID);
FUJITA Tomonoridd40e452010-07-03 17:45:38 +0900193 vbr->out_hdr.sector = 0;
Michael S. Tsirkin19c1c5a2014-10-07 16:39:49 +0200194 vbr->out_hdr.ioprio = cpu_to_virtio32(vblk->vdev, req_get_ioprio(vbr->req));
FUJITA Tomonoridd40e452010-07-03 17:45:38 +0900195 break;
196 default:
197 /* We don't put anything else in the queue. */
198 BUG();
Christoph Hellwigf1b0ef062009-09-17 19:57:42 +0200199 }
Rusty Russelle467cde2007-10-22 11:03:38 +1000200 }
201
Christoph Hellwige2490072014-09-13 16:40:09 -0700202 blk_mq_start_request(req);
203
Jens Axboe1cf7e9c2013-11-01 10:52:52 -0600204 num = blk_rq_map_sg(hctx->queue, vbr->req, vbr->sg);
Hannes Reinecke1cde26f2009-05-18 14:41:30 +0200205 if (num) {
Paolo Bonzini20af3cf2013-03-20 15:44:27 +1030206 if (rq_data_dir(vbr->req) == WRITE)
Michael S. Tsirkin19c1c5a2014-10-07 16:39:49 +0200207 vbr->out_hdr.type |= cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_OUT);
Paolo Bonzini20af3cf2013-03-20 15:44:27 +1030208 else
Michael S. Tsirkin19c1c5a2014-10-07 16:39:49 +0200209 vbr->out_hdr.type |= cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_IN);
Rusty Russelle467cde2007-10-22 11:03:38 +1000210 }
211
Ming Lei6a27b652014-06-26 17:41:48 +0800212 spin_lock_irqsave(&vblk->vqs[qid].lock, flags);
213 err = __virtblk_add_req(vblk->vqs[qid].vq, vbr, vbr->sg, num);
Rusty Russell5261b852014-03-13 11:23:39 +1030214 if (err) {
Ming Lei6a27b652014-06-26 17:41:48 +0800215 virtqueue_kick(vblk->vqs[qid].vq);
Jens Axboe1cf7e9c2013-11-01 10:52:52 -0600216 blk_mq_stop_hw_queue(hctx);
Ming Lei6a27b652014-06-26 17:41:48 +0800217 spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
Rusty Russell5261b852014-03-13 11:23:39 +1030218 /* Out of mem doesn't actually happen, since we fall back
219 * to direct descriptors */
220 if (err == -ENOMEM || err == -ENOSPC)
221 return BLK_MQ_RQ_QUEUE_BUSY;
222 return BLK_MQ_RQ_QUEUE_ERROR;
Asias Hea98755c2012-08-08 16:07:04 +0800223 }
224
Jens Axboe74c45052014-10-29 11:14:52 -0600225 if (bd->last && virtqueue_kick_prepare(vblk->vqs[qid].vq))
Ming Leie8edca62014-05-30 10:49:29 +0800226 notify = true;
Ming Lei6a27b652014-06-26 17:41:48 +0800227 spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
Ming Leie8edca62014-05-30 10:49:29 +0800228
229 if (notify)
Ming Lei6a27b652014-06-26 17:41:48 +0800230 virtqueue_notify(vblk->vqs[qid].vq);
Jens Axboe1cf7e9c2013-11-01 10:52:52 -0600231 return BLK_MQ_RQ_QUEUE_OK;
Asias Hea98755c2012-08-08 16:07:04 +0800232}
233
john cooper4cb2ea22010-03-25 01:33:33 -0400234/* return id (s/n) string for *disk to *id_str
235 */
236static int virtblk_get_id(struct gendisk *disk, char *id_str)
237{
238 struct virtio_blk *vblk = disk->private_data;
Christoph Hellwigf9596692016-07-19 11:31:49 +0200239 struct request_queue *q = vblk->disk->queue;
john cooper4cb2ea22010-03-25 01:33:33 -0400240 struct request *req;
Mike Snitzere4c47762010-10-09 12:12:13 +1030241 int err;
john cooper4cb2ea22010-03-25 01:33:33 -0400242
Christoph Hellwigf9596692016-07-19 11:31:49 +0200243 req = blk_get_request(q, READ, GFP_KERNEL);
244 if (IS_ERR(req))
john cooper4cb2ea22010-03-25 01:33:33 -0400245 return PTR_ERR(req);
Christoph Hellwigf9596692016-07-19 11:31:49 +0200246 blk_rq_set_block_pc(req);
Christoph Hellwig4f8c9512015-04-17 22:37:16 +0200247 req->cmd_type = REQ_TYPE_DRV_PRIV;
Mike Snitzere4c47762010-10-09 12:12:13 +1030248
Christoph Hellwigf9596692016-07-19 11:31:49 +0200249 err = blk_rq_map_kern(q, req, id_str, VIRTIO_BLK_ID_BYTES, GFP_KERNEL);
250 if (err)
251 goto out;
252
253 err = blk_execute_rq(vblk->disk->queue, vblk->disk, req, false);
254out:
255 blk_put_request(req);
Mike Snitzere4c47762010-10-09 12:12:13 +1030256 return err;
john cooper4cb2ea22010-03-25 01:33:33 -0400257}
258
Christoph Hellwigfe5a50a2010-09-15 01:27:23 +0200259static int virtblk_ioctl(struct block_device *bdev, fmode_t mode,
260 unsigned int cmd, unsigned long data)
Rusty Russelle467cde2007-10-22 11:03:38 +1000261{
Hannes Reinecke1cde26f2009-05-18 14:41:30 +0200262 struct gendisk *disk = bdev->bd_disk;
263 struct virtio_blk *vblk = disk->private_data;
264
265 /*
266 * Only allow the generic SCSI ioctls if the host can support it.
267 */
268 if (!virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_SCSI))
Christoph Hellwigd9ecdea2009-06-20 21:29:41 +0200269 return -ENOTTY;
Hannes Reinecke1cde26f2009-05-18 14:41:30 +0200270
Paolo Bonzini577ebb32012-01-12 16:01:27 +0100271 return scsi_cmd_blk_ioctl(bdev, mode, cmd,
272 (void __user *)data);
Rusty Russelle467cde2007-10-22 11:03:38 +1000273}
274
Christian Borntraeger135da0b2008-01-23 17:56:50 +0100275/* We provide getgeo only to please some old bootloader/partitioning tools */
276static int virtblk_getgeo(struct block_device *bd, struct hd_geometry *geo)
277{
Ryan Harper48e40432008-04-16 13:56:37 -0500278 struct virtio_blk *vblk = bd->bd_disk->private_data;
Ryan Harper48e40432008-04-16 13:56:37 -0500279
280 /* see if the host passed in geometry config */
Rusty Russell855e0c52013-10-14 18:11:51 +1030281 if (virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_GEOMETRY)) {
282 virtio_cread(vblk->vdev, struct virtio_blk_config,
283 geometry.cylinders, &geo->cylinders);
284 virtio_cread(vblk->vdev, struct virtio_blk_config,
285 geometry.heads, &geo->heads);
286 virtio_cread(vblk->vdev, struct virtio_blk_config,
287 geometry.sectors, &geo->sectors);
Ryan Harper48e40432008-04-16 13:56:37 -0500288 } else {
289 /* some standard values, similar to sd */
290 geo->heads = 1 << 6;
291 geo->sectors = 1 << 5;
292 geo->cylinders = get_capacity(bd->bd_disk) >> 11;
293 }
Christian Borntraeger135da0b2008-01-23 17:56:50 +0100294 return 0;
295}
296
Alexey Dobriyan83d5cde2009-09-21 17:01:13 -0700297static const struct block_device_operations virtblk_fops = {
Arnd Bergmann8a6cfeb2010-07-08 10:18:46 +0200298 .ioctl = virtblk_ioctl,
Christian Borntraeger135da0b2008-01-23 17:56:50 +0100299 .owner = THIS_MODULE,
300 .getgeo = virtblk_getgeo,
Rusty Russelle467cde2007-10-22 11:03:38 +1000301};
302
Christian Borntraegerd50ed902008-02-01 09:05:00 +0100303static int index_to_minor(int index)
304{
305 return index << PART_BITS;
306}
307
Michael S. Tsirkin5087a502011-10-30 21:29:59 +0200308static int minor_to_index(int minor)
309{
310 return minor >> PART_BITS;
311}
312
Ryan Harpera5eb9e42010-06-23 22:19:57 -0500313static ssize_t virtblk_serial_show(struct device *dev,
314 struct device_attribute *attr, char *buf)
315{
316 struct gendisk *disk = dev_to_disk(dev);
317 int err;
318
319 /* sysfs gives us a PAGE_SIZE buffer */
320 BUILD_BUG_ON(PAGE_SIZE < VIRTIO_BLK_ID_BYTES);
321
322 buf[VIRTIO_BLK_ID_BYTES] = '\0';
323 err = virtblk_get_id(disk, buf);
324 if (!err)
325 return strlen(buf);
326
327 if (err == -EIO) /* Unsupported? Make it empty. */
328 return 0;
329
330 return err;
331}
Michael S. Tsirkin393c5252014-10-23 16:08:44 +0300332
333static DEVICE_ATTR(serial, S_IRUGO, virtblk_serial_show, NULL);
Ryan Harpera5eb9e42010-06-23 22:19:57 -0500334
Christoph Hellwig7a7c9242011-02-01 21:43:48 +0100335static void virtblk_config_changed_work(struct work_struct *work)
336{
337 struct virtio_blk *vblk =
338 container_of(work, struct virtio_blk, config_work);
339 struct virtio_device *vdev = vblk->vdev;
340 struct request_queue *q = vblk->disk->queue;
341 char cap_str_2[10], cap_str_10[10];
Milos Vyletel9d9598b2013-03-12 15:34:40 +1030342 char *envp[] = { "RESIZE=1", NULL };
James Bottomleyb9f28d82015-03-05 18:47:01 -0800343 u64 capacity;
Christoph Hellwig7a7c9242011-02-01 21:43:48 +0100344
345 /* Host must always specify the capacity. */
Rusty Russell855e0c52013-10-14 18:11:51 +1030346 virtio_cread(vdev, struct virtio_blk_config, capacity, &capacity);
Christoph Hellwig7a7c9242011-02-01 21:43:48 +0100347
348 /* If capacity is too big, truncate with warning. */
349 if ((sector_t)capacity != capacity) {
350 dev_warn(&vdev->dev, "Capacity %llu too large: truncating\n",
351 (unsigned long long)capacity);
352 capacity = (sector_t)-1;
353 }
354
James Bottomleyb9f28d82015-03-05 18:47:01 -0800355 string_get_size(capacity, queue_logical_block_size(q),
356 STRING_UNITS_2, cap_str_2, sizeof(cap_str_2));
357 string_get_size(capacity, queue_logical_block_size(q),
358 STRING_UNITS_10, cap_str_10, sizeof(cap_str_10));
Christoph Hellwig7a7c9242011-02-01 21:43:48 +0100359
360 dev_notice(&vdev->dev,
361 "new size: %llu %d-byte logical blocks (%s/%s)\n",
362 (unsigned long long)capacity,
363 queue_logical_block_size(q),
364 cap_str_10, cap_str_2);
365
366 set_capacity(vblk->disk, capacity);
Vivek Goyale9986f32012-03-29 10:09:44 +0200367 revalidate_disk(vblk->disk);
Milos Vyletel9d9598b2013-03-12 15:34:40 +1030368 kobject_uevent_env(&disk_to_dev(vblk->disk)->kobj, KOBJ_CHANGE, envp);
Christoph Hellwig7a7c9242011-02-01 21:43:48 +0100369}
370
371static void virtblk_config_changed(struct virtio_device *vdev)
372{
373 struct virtio_blk *vblk = vdev->priv;
374
375 queue_work(virtblk_wq, &vblk->config_work);
376}
377
Amit Shah6abd6e52011-12-22 16:58:29 +0530378static int init_vq(struct virtio_blk *vblk)
379{
380 int err = 0;
Ming Lei6a27b652014-06-26 17:41:48 +0800381 int i;
382 vq_callback_t **callbacks;
383 const char **names;
384 struct virtqueue **vqs;
385 unsigned short num_vqs;
386 struct virtio_device *vdev = vblk->vdev;
Amit Shah6abd6e52011-12-22 16:58:29 +0530387
Ming Lei6a27b652014-06-26 17:41:48 +0800388 err = virtio_cread_feature(vdev, VIRTIO_BLK_F_MQ,
389 struct virtio_blk_config, num_queues,
390 &num_vqs);
391 if (err)
392 num_vqs = 1;
Amit Shah6abd6e52011-12-22 16:58:29 +0530393
Ming Lei6a27b652014-06-26 17:41:48 +0800394 vblk->vqs = kmalloc(sizeof(*vblk->vqs) * num_vqs, GFP_KERNEL);
395 if (!vblk->vqs) {
396 err = -ENOMEM;
397 goto out;
398 }
399
400 names = kmalloc(sizeof(*names) * num_vqs, GFP_KERNEL);
401 if (!names)
402 goto err_names;
403
404 callbacks = kmalloc(sizeof(*callbacks) * num_vqs, GFP_KERNEL);
405 if (!callbacks)
406 goto err_callbacks;
407
408 vqs = kmalloc(sizeof(*vqs) * num_vqs, GFP_KERNEL);
409 if (!vqs)
410 goto err_vqs;
411
412 for (i = 0; i < num_vqs; i++) {
413 callbacks[i] = virtblk_done;
414 snprintf(vblk->vqs[i].name, VQ_NAME_LEN, "req.%d", i);
415 names[i] = vblk->vqs[i].name;
416 }
417
418 /* Discover virtqueues and write information to configuration. */
419 err = vdev->config->find_vqs(vdev, num_vqs, vqs, callbacks, names);
420 if (err)
421 goto err_find_vqs;
422
423 for (i = 0; i < num_vqs; i++) {
424 spin_lock_init(&vblk->vqs[i].lock);
425 vblk->vqs[i].vq = vqs[i];
426 }
427 vblk->num_vqs = num_vqs;
428
429 err_find_vqs:
430 kfree(vqs);
431 err_vqs:
432 kfree(callbacks);
433 err_callbacks:
434 kfree(names);
435 err_names:
436 if (err)
437 kfree(vblk->vqs);
438 out:
Amit Shah6abd6e52011-12-22 16:58:29 +0530439 return err;
440}
441
Ren Mingxinc0aa3e02012-04-10 15:28:05 +0800442/*
443 * Legacy naming scheme used for virtio devices. We are stuck with it for
444 * virtio blk but don't ever use it for any new driver.
445 */
446static int virtblk_name_format(char *prefix, int index, char *buf, int buflen)
447{
448 const int base = 'z' - 'a' + 1;
449 char *begin = buf + strlen(prefix);
450 char *end = buf + buflen;
451 char *p;
452 int unit;
453
454 p = end - 1;
455 *p = '\0';
456 unit = base;
457 do {
458 if (p == begin)
459 return -EINVAL;
460 *--p = 'a' + (index % unit);
461 index = (index / unit) - 1;
462 } while (index >= 0);
463
464 memmove(begin, p, end - p);
465 memcpy(buf, prefix, strlen(prefix));
466
467 return 0;
468}
469
Paolo Bonzinicd5d5032012-07-03 15:19:37 +0200470static int virtblk_get_cache_mode(struct virtio_device *vdev)
471{
472 u8 writeback;
473 int err;
474
Rusty Russell855e0c52013-10-14 18:11:51 +1030475 err = virtio_cread_feature(vdev, VIRTIO_BLK_F_CONFIG_WCE,
476 struct virtio_blk_config, wce,
477 &writeback);
Michael S. Tsirkin592002f2016-02-24 17:07:27 +0200478
479 /*
480 * If WCE is not configurable and flush is not available,
481 * assume no writeback cache is in use.
482 */
Paolo Bonzinicd5d5032012-07-03 15:19:37 +0200483 if (err)
Michael S. Tsirkin592002f2016-02-24 17:07:27 +0200484 writeback = virtio_has_feature(vdev, VIRTIO_BLK_F_FLUSH);
Paolo Bonzinicd5d5032012-07-03 15:19:37 +0200485
486 return writeback;
487}
488
489static void virtblk_update_cache_mode(struct virtio_device *vdev)
490{
491 u8 writeback = virtblk_get_cache_mode(vdev);
492 struct virtio_blk *vblk = vdev->priv;
493
Jens Axboead9126a2016-03-30 10:12:58 -0600494 blk_queue_write_cache(vblk->disk->queue, writeback, false);
Paolo Bonzinicd5d5032012-07-03 15:19:37 +0200495 revalidate_disk(vblk->disk);
496}
497
498static const char *const virtblk_cache_types[] = {
499 "write through", "write back"
500};
501
502static ssize_t
503virtblk_cache_type_store(struct device *dev, struct device_attribute *attr,
504 const char *buf, size_t count)
505{
506 struct gendisk *disk = dev_to_disk(dev);
507 struct virtio_blk *vblk = disk->private_data;
508 struct virtio_device *vdev = vblk->vdev;
509 int i;
Paolo Bonzinicd5d5032012-07-03 15:19:37 +0200510
511 BUG_ON(!virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_CONFIG_WCE));
512 for (i = ARRAY_SIZE(virtblk_cache_types); --i >= 0; )
513 if (sysfs_streq(buf, virtblk_cache_types[i]))
514 break;
515
516 if (i < 0)
517 return -EINVAL;
518
Rusty Russell855e0c52013-10-14 18:11:51 +1030519 virtio_cwrite8(vdev, offsetof(struct virtio_blk_config, wce), i);
Paolo Bonzinicd5d5032012-07-03 15:19:37 +0200520 virtblk_update_cache_mode(vdev);
521 return count;
522}
523
524static ssize_t
525virtblk_cache_type_show(struct device *dev, struct device_attribute *attr,
526 char *buf)
527{
528 struct gendisk *disk = dev_to_disk(dev);
529 struct virtio_blk *vblk = disk->private_data;
530 u8 writeback = virtblk_get_cache_mode(vblk->vdev);
531
532 BUG_ON(writeback >= ARRAY_SIZE(virtblk_cache_types));
533 return snprintf(buf, 40, "%s\n", virtblk_cache_types[writeback]);
534}
535
536static const struct device_attribute dev_attr_cache_type_ro =
537 __ATTR(cache_type, S_IRUGO,
538 virtblk_cache_type_show, NULL);
539static const struct device_attribute dev_attr_cache_type_rw =
540 __ATTR(cache_type, S_IRUGO|S_IWUSR,
541 virtblk_cache_type_show, virtblk_cache_type_store);
542
Christoph Hellwig24d2f902014-04-15 14:14:00 -0600543static int virtblk_init_request(void *data, struct request *rq,
544 unsigned int hctx_idx, unsigned int request_idx,
545 unsigned int numa_node)
Christoph Hellwige9b267d2014-04-15 13:59:10 -0600546{
547 struct virtio_blk *vblk = data;
548 struct virtblk_req *vbr = blk_mq_rq_to_pdu(rq);
549
550 sg_init_table(vbr->sg, vblk->sg_elems);
551 return 0;
552}
553
Jens Axboe1cf7e9c2013-11-01 10:52:52 -0600554static struct blk_mq_ops virtio_mq_ops = {
555 .queue_rq = virtio_queue_rq,
556 .map_queue = blk_mq_map_queue,
Christoph Hellwig5124c282014-02-10 03:24:39 -0800557 .complete = virtblk_request_done,
Christoph Hellwig24d2f902014-04-15 14:14:00 -0600558 .init_request = virtblk_init_request,
Jens Axboe1cf7e9c2013-11-01 10:52:52 -0600559};
560
Christoph Hellwig24d2f902014-04-15 14:14:00 -0600561static unsigned int virtblk_queue_depth;
562module_param_named(queue_depth, virtblk_queue_depth, uint, 0444);
Jens Axboe1cf7e9c2013-11-01 10:52:52 -0600563
Greg Kroah-Hartman8d85fce2012-12-21 15:13:49 -0800564static int virtblk_probe(struct virtio_device *vdev)
Rusty Russelle467cde2007-10-22 11:03:38 +1000565{
566 struct virtio_blk *vblk;
Christoph Hellwig69740c82010-02-24 14:22:25 -0600567 struct request_queue *q;
Michael S. Tsirkin5087a502011-10-30 21:29:59 +0200568 int err, index;
Asias Hea98755c2012-08-08 16:07:04 +0800569
Rusty Russelle467cde2007-10-22 11:03:38 +1000570 u64 cap;
Christoph Hellwig69740c82010-02-24 14:22:25 -0600571 u32 v, blk_size, sg_elems, opt_io_size;
572 u16 min_io_size;
573 u8 physical_block_exp, alignment_offset;
Rusty Russelle467cde2007-10-22 11:03:38 +1000574
Michael S. Tsirkina4379fd2015-01-12 16:23:37 +0200575 if (!vdev->config->get) {
576 dev_err(&vdev->dev, "%s failure: config access disabled\n",
577 __func__);
578 return -EINVAL;
579 }
580
Michael S. Tsirkin5087a502011-10-30 21:29:59 +0200581 err = ida_simple_get(&vd_index_ida, 0, minor_to_index(1 << MINORBITS),
582 GFP_KERNEL);
583 if (err < 0)
584 goto out;
585 index = err;
Christian Borntraeger4f3bf192008-01-31 15:53:53 +0100586
Rusty Russell0864b792008-12-30 09:26:05 -0600587 /* We need to know how many segments before we allocate. */
Rusty Russell855e0c52013-10-14 18:11:51 +1030588 err = virtio_cread_feature(vdev, VIRTIO_BLK_F_SEG_MAX,
589 struct virtio_blk_config, seg_max,
590 &sg_elems);
Christoph Hellwiga5b365a2010-05-25 14:17:54 +0200591
592 /* We need at least one SG element, whatever they say. */
593 if (err || !sg_elems)
Rusty Russell0864b792008-12-30 09:26:05 -0600594 sg_elems = 1;
595
596 /* We need an extra sg elements at head and tail. */
597 sg_elems += 2;
Jens Axboe1cf7e9c2013-11-01 10:52:52 -0600598 vdev->priv = vblk = kmalloc(sizeof(*vblk), GFP_KERNEL);
Rusty Russelle467cde2007-10-22 11:03:38 +1000599 if (!vblk) {
600 err = -ENOMEM;
Michael S. Tsirkin5087a502011-10-30 21:29:59 +0200601 goto out_free_index;
Rusty Russelle467cde2007-10-22 11:03:38 +1000602 }
603
Rusty Russelle467cde2007-10-22 11:03:38 +1000604 vblk->vdev = vdev;
Rusty Russell0864b792008-12-30 09:26:05 -0600605 vblk->sg_elems = sg_elems;
Asias Hea98755c2012-08-08 16:07:04 +0800606
Christoph Hellwig7a7c9242011-02-01 21:43:48 +0100607 INIT_WORK(&vblk->config_work, virtblk_config_changed_work);
Rusty Russelle467cde2007-10-22 11:03:38 +1000608
Amit Shah6abd6e52011-12-22 16:58:29 +0530609 err = init_vq(vblk);
610 if (err)
Rusty Russelle467cde2007-10-22 11:03:38 +1000611 goto out_free_vblk;
Rusty Russelle467cde2007-10-22 11:03:38 +1000612
Rusty Russelle467cde2007-10-22 11:03:38 +1000613 /* FIXME: How many partitions? How long is a piece of string? */
Christian Borntraeger4f3bf192008-01-31 15:53:53 +0100614 vblk->disk = alloc_disk(1 << PART_BITS);
Rusty Russelle467cde2007-10-22 11:03:38 +1000615 if (!vblk->disk) {
616 err = -ENOMEM;
Jens Axboe1cf7e9c2013-11-01 10:52:52 -0600617 goto out_free_vq;
Rusty Russelle467cde2007-10-22 11:03:38 +1000618 }
619
Rusty Russellfc4324b2014-03-19 17:08:24 +1030620 /* Default queue sizing is to fill the ring. */
Christoph Hellwig24d2f902014-04-15 14:14:00 -0600621 if (!virtblk_queue_depth) {
Ming Lei6a27b652014-06-26 17:41:48 +0800622 virtblk_queue_depth = vblk->vqs[0].vq->num_free;
Rusty Russellfc4324b2014-03-19 17:08:24 +1030623 /* ... but without indirect descs, we use 2 descs per req */
624 if (!virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC))
Christoph Hellwig24d2f902014-04-15 14:14:00 -0600625 virtblk_queue_depth /= 2;
Rusty Russellfc4324b2014-03-19 17:08:24 +1030626 }
Christoph Hellwig24d2f902014-04-15 14:14:00 -0600627
628 memset(&vblk->tag_set, 0, sizeof(vblk->tag_set));
629 vblk->tag_set.ops = &virtio_mq_ops;
Christoph Hellwig24d2f902014-04-15 14:14:00 -0600630 vblk->tag_set.queue_depth = virtblk_queue_depth;
631 vblk->tag_set.numa_node = NUMA_NO_NODE;
632 vblk->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
633 vblk->tag_set.cmd_size =
Jens Axboe1cf7e9c2013-11-01 10:52:52 -0600634 sizeof(struct virtblk_req) +
635 sizeof(struct scatterlist) * sg_elems;
Christoph Hellwig24d2f902014-04-15 14:14:00 -0600636 vblk->tag_set.driver_data = vblk;
Ming Lei6a27b652014-06-26 17:41:48 +0800637 vblk->tag_set.nr_hw_queues = vblk->num_vqs;
Jens Axboe1cf7e9c2013-11-01 10:52:52 -0600638
Christoph Hellwig24d2f902014-04-15 14:14:00 -0600639 err = blk_mq_alloc_tag_set(&vblk->tag_set);
640 if (err)
641 goto out_put_disk;
642
643 q = vblk->disk->queue = blk_mq_init_queue(&vblk->tag_set);
Ming Lei35b489d2015-01-02 14:25:27 +0000644 if (IS_ERR(q)) {
Rusty Russelle467cde2007-10-22 11:03:38 +1000645 err = -ENOMEM;
Christoph Hellwig24d2f902014-04-15 14:14:00 -0600646 goto out_free_tags;
Rusty Russelle467cde2007-10-22 11:03:38 +1000647 }
648
Christoph Hellwig69740c82010-02-24 14:22:25 -0600649 q->queuedata = vblk;
Fernando Luis Vázquez Cao7d116b62008-10-27 18:45:15 +0900650
Ren Mingxinc0aa3e02012-04-10 15:28:05 +0800651 virtblk_name_format("vd", index, vblk->disk->disk_name, DISK_NAME_LEN);
Christian Borntraegerd50ed902008-02-01 09:05:00 +0100652
Rusty Russelle467cde2007-10-22 11:03:38 +1000653 vblk->disk->major = major;
Christian Borntraegerd50ed902008-02-01 09:05:00 +0100654 vblk->disk->first_minor = index_to_minor(index);
Rusty Russelle467cde2007-10-22 11:03:38 +1000655 vblk->disk->private_data = vblk;
656 vblk->disk->fops = &virtblk_fops;
Fam Zheng5fa31422015-09-06 17:05:42 +0800657 vblk->disk->flags |= GENHD_FL_EXT_DEVT;
Michael S. Tsirkin5087a502011-10-30 21:29:59 +0200658 vblk->index = index;
Christian Borntraeger4f3bf192008-01-31 15:53:53 +0100659
Tejun Heo02c42b72010-09-03 11:56:18 +0200660 /* configure queue flush support */
Paolo Bonzinicd5d5032012-07-03 15:19:37 +0200661 virtblk_update_cache_mode(vdev);
Rusty Russelle467cde2007-10-22 11:03:38 +1000662
Christian Borntraeger3ef53602008-05-16 11:17:03 +0200663 /* If disk is read-only in the host, the guest should obey */
664 if (virtio_has_feature(vdev, VIRTIO_BLK_F_RO))
665 set_disk_ro(vblk->disk, 1);
666
Rusty Russella586d4f2008-02-04 23:49:56 -0500667 /* Host must always specify the capacity. */
Rusty Russell855e0c52013-10-14 18:11:51 +1030668 virtio_cread(vdev, struct virtio_blk_config, capacity, &cap);
Rusty Russelle467cde2007-10-22 11:03:38 +1000669
670 /* If capacity is too big, truncate with warning. */
671 if ((sector_t)cap != cap) {
672 dev_warn(&vdev->dev, "Capacity %llu too large: truncating\n",
673 (unsigned long long)cap);
674 cap = (sector_t)-1;
675 }
676 set_capacity(vblk->disk, cap);
677
Rusty Russell0864b792008-12-30 09:26:05 -0600678 /* We can handle whatever the host told us to handle. */
Martin K. Petersenee714f22010-03-10 00:48:32 -0500679 blk_queue_max_segments(q, vblk->sg_elems-2);
Rusty Russell0864b792008-12-30 09:26:05 -0600680
Christoph Hellwig4eff3ca2009-07-17 21:47:45 -0600681 /* No need to bounce any requests */
Christoph Hellwig69740c82010-02-24 14:22:25 -0600682 blk_queue_bounce_limit(q, BLK_BOUNCE_ANY);
Christoph Hellwig4eff3ca2009-07-17 21:47:45 -0600683
Rusty Russell4b7f7e22008-12-30 09:26:04 -0600684 /* No real sector limit. */
Martin K. Petersenee714f22010-03-10 00:48:32 -0500685 blk_queue_max_hw_sectors(q, -1U);
Rusty Russell4b7f7e22008-12-30 09:26:04 -0600686
Rusty Russella586d4f2008-02-04 23:49:56 -0500687 /* Host can optionally specify maximum segment size and number of
688 * segments. */
Rusty Russell855e0c52013-10-14 18:11:51 +1030689 err = virtio_cread_feature(vdev, VIRTIO_BLK_F_SIZE_MAX,
690 struct virtio_blk_config, size_max, &v);
Rusty Russelle467cde2007-10-22 11:03:38 +1000691 if (!err)
Christoph Hellwig69740c82010-02-24 14:22:25 -0600692 blk_queue_max_segment_size(q, v);
Rusty Russell4b7f7e22008-12-30 09:26:04 -0600693 else
Christoph Hellwig69740c82010-02-24 14:22:25 -0600694 blk_queue_max_segment_size(q, -1U);
Rusty Russelle467cde2007-10-22 11:03:38 +1000695
Christian Borntraeger066f4d82008-05-29 11:08:26 +0200696 /* Host can optionally specify the block size of the device */
Rusty Russell855e0c52013-10-14 18:11:51 +1030697 err = virtio_cread_feature(vdev, VIRTIO_BLK_F_BLK_SIZE,
698 struct virtio_blk_config, blk_size,
699 &blk_size);
Christian Borntraeger066f4d82008-05-29 11:08:26 +0200700 if (!err)
Christoph Hellwig69740c82010-02-24 14:22:25 -0600701 blk_queue_logical_block_size(q, blk_size);
702 else
703 blk_size = queue_logical_block_size(q);
704
705 /* Use topology information if available */
Rusty Russell855e0c52013-10-14 18:11:51 +1030706 err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
707 struct virtio_blk_config, physical_block_exp,
708 &physical_block_exp);
Christoph Hellwig69740c82010-02-24 14:22:25 -0600709 if (!err && physical_block_exp)
710 blk_queue_physical_block_size(q,
711 blk_size * (1 << physical_block_exp));
712
Rusty Russell855e0c52013-10-14 18:11:51 +1030713 err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
714 struct virtio_blk_config, alignment_offset,
715 &alignment_offset);
Christoph Hellwig69740c82010-02-24 14:22:25 -0600716 if (!err && alignment_offset)
717 blk_queue_alignment_offset(q, blk_size * alignment_offset);
718
Rusty Russell855e0c52013-10-14 18:11:51 +1030719 err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
720 struct virtio_blk_config, min_io_size,
721 &min_io_size);
Christoph Hellwig69740c82010-02-24 14:22:25 -0600722 if (!err && min_io_size)
723 blk_queue_io_min(q, blk_size * min_io_size);
724
Rusty Russell855e0c52013-10-14 18:11:51 +1030725 err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
726 struct virtio_blk_config, opt_io_size,
727 &opt_io_size);
Christoph Hellwig69740c82010-02-24 14:22:25 -0600728 if (!err && opt_io_size)
729 blk_queue_io_opt(q, blk_size * opt_io_size);
730
Michael S. Tsirkin7a113702014-10-15 10:22:30 +1030731 virtio_device_ready(vdev);
732
Dan Williams0d52c7562016-06-15 19:44:20 -0700733 device_add_disk(&vdev->dev, vblk->disk);
Ryan Harpera5eb9e42010-06-23 22:19:57 -0500734 err = device_create_file(disk_to_dev(vblk->disk), &dev_attr_serial);
735 if (err)
736 goto out_del_disk;
737
Paolo Bonzinicd5d5032012-07-03 15:19:37 +0200738 if (virtio_has_feature(vdev, VIRTIO_BLK_F_CONFIG_WCE))
739 err = device_create_file(disk_to_dev(vblk->disk),
740 &dev_attr_cache_type_rw);
741 else
742 err = device_create_file(disk_to_dev(vblk->disk),
743 &dev_attr_cache_type_ro);
744 if (err)
745 goto out_del_disk;
Rusty Russelle467cde2007-10-22 11:03:38 +1000746 return 0;
747
Ryan Harpera5eb9e42010-06-23 22:19:57 -0500748out_del_disk:
749 del_gendisk(vblk->disk);
750 blk_cleanup_queue(vblk->disk->queue);
Christoph Hellwig24d2f902014-04-15 14:14:00 -0600751out_free_tags:
752 blk_mq_free_tag_set(&vblk->tag_set);
Rusty Russelle467cde2007-10-22 11:03:38 +1000753out_put_disk:
754 put_disk(vblk->disk);
Rusty Russelle467cde2007-10-22 11:03:38 +1000755out_free_vq:
Michael S. Tsirkind2a7ddd2009-06-12 22:16:36 -0600756 vdev->config->del_vqs(vdev);
Rusty Russelle467cde2007-10-22 11:03:38 +1000757out_free_vblk:
758 kfree(vblk);
Michael S. Tsirkin5087a502011-10-30 21:29:59 +0200759out_free_index:
760 ida_simple_remove(&vd_index_ida, index);
Rusty Russelle467cde2007-10-22 11:03:38 +1000761out:
762 return err;
763}
764
Greg Kroah-Hartman8d85fce2012-12-21 15:13:49 -0800765static void virtblk_remove(struct virtio_device *vdev)
Rusty Russelle467cde2007-10-22 11:03:38 +1000766{
767 struct virtio_blk *vblk = vdev->priv;
Michael S. Tsirkin5087a502011-10-30 21:29:59 +0200768 int index = vblk->index;
Alexander Graff4953fe2013-01-02 15:37:17 +1030769 int refc;
Rusty Russelle467cde2007-10-22 11:03:38 +1000770
Michael S. Tsirkincc74f712014-10-15 10:22:26 +1030771 /* Make sure no work handler is accessing the device. */
772 flush_work(&vblk->config_work);
Christoph Hellwig7a7c9242011-02-01 21:43:48 +0100773
Asias He02e2b122012-05-25 10:34:47 +0800774 del_gendisk(vblk->disk);
Asias He483001c2012-05-25 10:34:48 +0800775 blk_cleanup_queue(vblk->disk->queue);
Asias He02e2b122012-05-25 10:34:47 +0800776
Christoph Hellwig24d2f902014-04-15 14:14:00 -0600777 blk_mq_free_tag_set(&vblk->tag_set);
778
Rusty Russell6e5aa7e2008-02-04 23:50:03 -0500779 /* Stop all the virtqueues. */
780 vdev->config->reset(vdev);
781
Alexander Graff4953fe2013-01-02 15:37:17 +1030782 refc = atomic_read(&disk_to_dev(vblk->disk)->kobj.kref.refcount);
Rusty Russelle467cde2007-10-22 11:03:38 +1000783 put_disk(vblk->disk);
Michael S. Tsirkind2a7ddd2009-06-12 22:16:36 -0600784 vdev->config->del_vqs(vdev);
Ming Lei6a27b652014-06-26 17:41:48 +0800785 kfree(vblk->vqs);
Rusty Russelle467cde2007-10-22 11:03:38 +1000786 kfree(vblk);
Alexander Graff4953fe2013-01-02 15:37:17 +1030787
788 /* Only free device id if we don't have any users */
789 if (refc == 1)
790 ida_simple_remove(&vd_index_ida, index);
Rusty Russelle467cde2007-10-22 11:03:38 +1000791}
792
Aaron Lu89107002013-09-17 09:25:23 +0930793#ifdef CONFIG_PM_SLEEP
Amit Shahf8fb5bc2011-12-22 16:58:30 +0530794static int virtblk_freeze(struct virtio_device *vdev)
795{
796 struct virtio_blk *vblk = vdev->priv;
797
798 /* Ensure we don't receive any more interrupts */
799 vdev->config->reset(vdev);
800
Michael S. Tsirkincc74f712014-10-15 10:22:26 +1030801 /* Make sure no work handler is accessing the device. */
Amit Shahf8fb5bc2011-12-22 16:58:30 +0530802 flush_work(&vblk->config_work);
803
Jens Axboe1cf7e9c2013-11-01 10:52:52 -0600804 blk_mq_stop_hw_queues(vblk->disk->queue);
Amit Shahf8fb5bc2011-12-22 16:58:30 +0530805
806 vdev->config->del_vqs(vdev);
807 return 0;
808}
809
810static int virtblk_restore(struct virtio_device *vdev)
811{
812 struct virtio_blk *vblk = vdev->priv;
813 int ret;
814
Amit Shahf8fb5bc2011-12-22 16:58:30 +0530815 ret = init_vq(vdev->priv);
Michael S. Tsirkin6d62c372014-10-15 10:22:32 +1030816 if (ret)
817 return ret;
Jens Axboe1cf7e9c2013-11-01 10:52:52 -0600818
Michael S. Tsirkin6d62c372014-10-15 10:22:32 +1030819 virtio_device_ready(vdev);
820
821 blk_mq_start_stopped_hw_queues(vblk->disk->queue, true);
822 return 0;
Amit Shahf8fb5bc2011-12-22 16:58:30 +0530823}
824#endif
825
Márton Németh47483e22010-01-10 13:40:02 +0100826static const struct virtio_device_id id_table[] = {
Rusty Russelle467cde2007-10-22 11:03:38 +1000827 { VIRTIO_ID_BLOCK, VIRTIO_DEV_ANY_ID },
828 { 0 },
829};
830
Michael S. Tsirkin19c1c5a2014-10-07 16:39:49 +0200831static unsigned int features_legacy[] = {
Tejun Heo02c42b72010-09-03 11:56:18 +0200832 VIRTIO_BLK_F_SEG_MAX, VIRTIO_BLK_F_SIZE_MAX, VIRTIO_BLK_F_GEOMETRY,
833 VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE, VIRTIO_BLK_F_SCSI,
Michael S. Tsirkin592002f2016-02-24 17:07:27 +0200834 VIRTIO_BLK_F_FLUSH, VIRTIO_BLK_F_TOPOLOGY, VIRTIO_BLK_F_CONFIG_WCE,
Ming Lei6a27b652014-06-26 17:41:48 +0800835 VIRTIO_BLK_F_MQ,
Michael S. Tsirkin19c1c5a2014-10-07 16:39:49 +0200836}
837;
838static unsigned int features[] = {
839 VIRTIO_BLK_F_SEG_MAX, VIRTIO_BLK_F_SIZE_MAX, VIRTIO_BLK_F_GEOMETRY,
840 VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE,
Michael S. Tsirkin592002f2016-02-24 17:07:27 +0200841 VIRTIO_BLK_F_FLUSH, VIRTIO_BLK_F_TOPOLOGY, VIRTIO_BLK_F_CONFIG_WCE,
Michael S. Tsirkin19c1c5a2014-10-07 16:39:49 +0200842 VIRTIO_BLK_F_MQ,
Rusty Russellc45a6812008-05-02 21:50:50 -0500843};
844
Greg Kroah-Hartman8d85fce2012-12-21 15:13:49 -0800845static struct virtio_driver virtio_blk = {
Michael S. Tsirkin19c1c5a2014-10-07 16:39:49 +0200846 .feature_table = features,
847 .feature_table_size = ARRAY_SIZE(features),
848 .feature_table_legacy = features_legacy,
849 .feature_table_size_legacy = ARRAY_SIZE(features_legacy),
850 .driver.name = KBUILD_MODNAME,
851 .driver.owner = THIS_MODULE,
852 .id_table = id_table,
853 .probe = virtblk_probe,
854 .remove = virtblk_remove,
855 .config_changed = virtblk_config_changed,
Aaron Lu89107002013-09-17 09:25:23 +0930856#ifdef CONFIG_PM_SLEEP
Michael S. Tsirkin19c1c5a2014-10-07 16:39:49 +0200857 .freeze = virtblk_freeze,
858 .restore = virtblk_restore,
Amit Shahf8fb5bc2011-12-22 16:58:30 +0530859#endif
Rusty Russelle467cde2007-10-22 11:03:38 +1000860};
861
862static int __init init(void)
863{
Christoph Hellwig7a7c9242011-02-01 21:43:48 +0100864 int error;
865
866 virtblk_wq = alloc_workqueue("virtio-blk", 0, 0);
867 if (!virtblk_wq)
868 return -ENOMEM;
869
Christian Borntraeger4f3bf192008-01-31 15:53:53 +0100870 major = register_blkdev(0, "virtblk");
Christoph Hellwig7a7c9242011-02-01 21:43:48 +0100871 if (major < 0) {
872 error = major;
873 goto out_destroy_workqueue;
874 }
875
876 error = register_virtio_driver(&virtio_blk);
877 if (error)
878 goto out_unregister_blkdev;
879 return 0;
880
881out_unregister_blkdev:
882 unregister_blkdev(major, "virtblk");
883out_destroy_workqueue:
884 destroy_workqueue(virtblk_wq);
885 return error;
Rusty Russelle467cde2007-10-22 11:03:38 +1000886}
887
888static void __exit fini(void)
889{
Rusty Russelle467cde2007-10-22 11:03:38 +1000890 unregister_virtio_driver(&virtio_blk);
Michael S. Tsirkin38f37b52014-10-23 18:57:19 +0300891 unregister_blkdev(major, "virtblk");
Christoph Hellwig7a7c9242011-02-01 21:43:48 +0100892 destroy_workqueue(virtblk_wq);
Rusty Russelle467cde2007-10-22 11:03:38 +1000893}
894module_init(init);
895module_exit(fini);
896
897MODULE_DEVICE_TABLE(virtio, id_table);
898MODULE_DESCRIPTION("Virtio block driver");
899MODULE_LICENSE("GPL");