blob: 63ec86a93b83a6b3682e54a8ff934716808dc516 [file] [log] [blame]
Christoph Hellwig21d34712015-11-26 09:08:36 +01001/*
2 * NVM Express device driver
3 * Copyright (c) 2011-2014, Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 */
14
15#include <linux/blkdev.h>
16#include <linux/blk-mq.h>
17#include <linux/errno.h>
Christoph Hellwig1673f1f2015-11-26 10:54:19 +010018#include <linux/hdreg.h>
Christoph Hellwig21d34712015-11-26 09:08:36 +010019#include <linux/kernel.h>
20#include <linux/slab.h>
21#include <linux/types.h>
Christoph Hellwig1673f1f2015-11-26 10:54:19 +010022#include <linux/pr.h>
23#include <linux/ptrace.h>
24#include <linux/nvme_ioctl.h>
25#include <linux/t10-pi.h>
26#include <scsi/sg.h>
27#include <asm/unaligned.h>
Christoph Hellwig21d34712015-11-26 09:08:36 +010028
29#include "nvme.h"
30
Christoph Hellwig1673f1f2015-11-26 10:54:19 +010031DEFINE_SPINLOCK(dev_list_lock);
32
33static void nvme_free_ns(struct kref *kref)
34{
35 struct nvme_ns *ns = container_of(kref, struct nvme_ns, kref);
36
37 if (ns->type == NVME_NS_LIGHTNVM)
38 nvme_nvm_unregister(ns->queue, ns->disk->disk_name);
39
40 spin_lock(&dev_list_lock);
41 ns->disk->private_data = NULL;
42 spin_unlock(&dev_list_lock);
43
44 nvme_put_ctrl(ns->ctrl);
45 put_disk(ns->disk);
46 kfree(ns);
47}
48
49void nvme_put_ns(struct nvme_ns *ns)
50{
51 kref_put(&ns->kref, nvme_free_ns);
52}
53
54static struct nvme_ns *nvme_get_ns_from_disk(struct gendisk *disk)
55{
56 struct nvme_ns *ns;
57
58 spin_lock(&dev_list_lock);
59 ns = disk->private_data;
60 if (ns && !kref_get_unless_zero(&ns->kref))
61 ns = NULL;
62 spin_unlock(&dev_list_lock);
63
64 return ns;
65}
66
Christoph Hellwig41609822015-11-20 09:00:02 +010067struct request *nvme_alloc_request(struct request_queue *q,
68 struct nvme_command *cmd, unsigned int flags)
Christoph Hellwig21d34712015-11-26 09:08:36 +010069{
70 bool write = cmd->common.opcode & 1;
Christoph Hellwig21d34712015-11-26 09:08:36 +010071 struct request *req;
Christoph Hellwig21d34712015-11-26 09:08:36 +010072
Christoph Hellwig41609822015-11-20 09:00:02 +010073 req = blk_mq_alloc_request(q, write, flags);
Christoph Hellwig21d34712015-11-26 09:08:36 +010074 if (IS_ERR(req))
Christoph Hellwig41609822015-11-20 09:00:02 +010075 return req;
Christoph Hellwig21d34712015-11-26 09:08:36 +010076
77 req->cmd_type = REQ_TYPE_DRV_PRIV;
78 req->cmd_flags |= REQ_FAILFAST_DRIVER;
79 req->__data_len = 0;
80 req->__sector = (sector_t) -1;
81 req->bio = req->biotail = NULL;
82
Christoph Hellwig21d34712015-11-26 09:08:36 +010083 req->cmd = (unsigned char *)cmd;
84 req->cmd_len = sizeof(struct nvme_command);
85 req->special = (void *)0;
86
Christoph Hellwig41609822015-11-20 09:00:02 +010087 return req;
88}
89
90/*
91 * Returns 0 on success. If the result is negative, it's a Linux error code;
92 * if the result is positive, it's an NVM Express status code
93 */
94int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
95 void *buffer, unsigned bufflen, u32 *result, unsigned timeout)
96{
97 struct request *req;
98 int ret;
99
100 req = nvme_alloc_request(q, cmd, 0);
101 if (IS_ERR(req))
102 return PTR_ERR(req);
103
104 req->timeout = timeout ? timeout : ADMIN_TIMEOUT;
105
Christoph Hellwig21d34712015-11-26 09:08:36 +0100106 if (buffer && bufflen) {
107 ret = blk_rq_map_kern(q, req, buffer, bufflen, GFP_KERNEL);
108 if (ret)
109 goto out;
Christoph Hellwig41609822015-11-20 09:00:02 +0100110 }
111
112 blk_execute_rq(req->q, NULL, req, 0);
113 if (result)
114 *result = (u32)(uintptr_t)req->special;
115 ret = req->errors;
116 out:
117 blk_mq_free_request(req);
118 return ret;
119}
120
121int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
122 void *buffer, unsigned bufflen)
123{
124 return __nvme_submit_sync_cmd(q, cmd, buffer, bufflen, NULL, 0);
125}
126
Keith Busch0b7f1f22015-10-23 09:47:28 -0600127int __nvme_submit_user_cmd(struct request_queue *q, struct nvme_command *cmd,
128 void __user *ubuffer, unsigned bufflen,
129 void __user *meta_buffer, unsigned meta_len, u32 meta_seed,
130 u32 *result, unsigned timeout)
Christoph Hellwig41609822015-11-20 09:00:02 +0100131{
Keith Busch0b7f1f22015-10-23 09:47:28 -0600132 bool write = cmd->common.opcode & 1;
133 struct nvme_ns *ns = q->queuedata;
134 struct gendisk *disk = ns ? ns->disk : NULL;
Christoph Hellwig41609822015-11-20 09:00:02 +0100135 struct request *req;
Keith Busch0b7f1f22015-10-23 09:47:28 -0600136 struct bio *bio = NULL;
137 void *meta = NULL;
Christoph Hellwig41609822015-11-20 09:00:02 +0100138 int ret;
139
140 req = nvme_alloc_request(q, cmd, 0);
141 if (IS_ERR(req))
142 return PTR_ERR(req);
143
144 req->timeout = timeout ? timeout : ADMIN_TIMEOUT;
145
146 if (ubuffer && bufflen) {
Christoph Hellwig21d34712015-11-26 09:08:36 +0100147 ret = blk_rq_map_user(q, req, NULL, ubuffer, bufflen,
148 GFP_KERNEL);
149 if (ret)
150 goto out;
151 bio = req->bio;
Christoph Hellwig21d34712015-11-26 09:08:36 +0100152
Keith Busch0b7f1f22015-10-23 09:47:28 -0600153 if (!disk)
154 goto submit;
155 bio->bi_bdev = bdget_disk(disk, 0);
156 if (!bio->bi_bdev) {
157 ret = -ENODEV;
158 goto out_unmap;
159 }
160
161 if (meta_buffer) {
162 struct bio_integrity_payload *bip;
163
164 meta = kmalloc(meta_len, GFP_KERNEL);
165 if (!meta) {
166 ret = -ENOMEM;
167 goto out_unmap;
168 }
169
170 if (write) {
171 if (copy_from_user(meta, meta_buffer,
172 meta_len)) {
173 ret = -EFAULT;
174 goto out_free_meta;
175 }
176 }
177
178 bip = bio_integrity_alloc(bio, GFP_KERNEL, 1);
179 if (!bip) {
180 ret = -ENOMEM;
181 goto out_free_meta;
182 }
183
184 bip->bip_iter.bi_size = meta_len;
185 bip->bip_iter.bi_sector = meta_seed;
186
187 ret = bio_integrity_add_page(bio, virt_to_page(meta),
188 meta_len, offset_in_page(meta));
189 if (ret != meta_len) {
190 ret = -ENOMEM;
191 goto out_free_meta;
192 }
193 }
194 }
195 submit:
196 blk_execute_rq(req->q, disk, req, 0);
197 ret = req->errors;
Christoph Hellwig21d34712015-11-26 09:08:36 +0100198 if (result)
199 *result = (u32)(uintptr_t)req->special;
Keith Busch0b7f1f22015-10-23 09:47:28 -0600200 if (meta && !ret && !write) {
201 if (copy_to_user(meta_buffer, meta, meta_len))
202 ret = -EFAULT;
203 }
204 out_free_meta:
205 kfree(meta);
206 out_unmap:
207 if (bio) {
208 if (disk && bio->bi_bdev)
209 bdput(bio->bi_bdev);
210 blk_rq_unmap_user(bio);
211 }
Christoph Hellwig21d34712015-11-26 09:08:36 +0100212 out:
213 blk_mq_free_request(req);
214 return ret;
215}
216
Keith Busch0b7f1f22015-10-23 09:47:28 -0600217int nvme_submit_user_cmd(struct request_queue *q, struct nvme_command *cmd,
218 void __user *ubuffer, unsigned bufflen, u32 *result,
219 unsigned timeout)
220{
221 return __nvme_submit_user_cmd(q, cmd, ubuffer, bufflen, NULL, 0, 0,
222 result, timeout);
223}
224
Christoph Hellwig1c63dc62015-11-26 10:06:56 +0100225int nvme_identify_ctrl(struct nvme_ctrl *dev, struct nvme_id_ctrl **id)
Christoph Hellwig21d34712015-11-26 09:08:36 +0100226{
227 struct nvme_command c = { };
228 int error;
229
230 /* gcc-4.4.4 (at least) has issues with initializers and anon unions */
231 c.identify.opcode = nvme_admin_identify;
232 c.identify.cns = cpu_to_le32(1);
233
234 *id = kmalloc(sizeof(struct nvme_id_ctrl), GFP_KERNEL);
235 if (!*id)
236 return -ENOMEM;
237
238 error = nvme_submit_sync_cmd(dev->admin_q, &c, *id,
239 sizeof(struct nvme_id_ctrl));
240 if (error)
241 kfree(*id);
242 return error;
243}
244
Christoph Hellwig1c63dc62015-11-26 10:06:56 +0100245int nvme_identify_ns(struct nvme_ctrl *dev, unsigned nsid,
Christoph Hellwig21d34712015-11-26 09:08:36 +0100246 struct nvme_id_ns **id)
247{
248 struct nvme_command c = { };
249 int error;
250
251 /* gcc-4.4.4 (at least) has issues with initializers and anon unions */
252 c.identify.opcode = nvme_admin_identify,
253 c.identify.nsid = cpu_to_le32(nsid),
254
255 *id = kmalloc(sizeof(struct nvme_id_ns), GFP_KERNEL);
256 if (!*id)
257 return -ENOMEM;
258
259 error = nvme_submit_sync_cmd(dev->admin_q, &c, *id,
260 sizeof(struct nvme_id_ns));
261 if (error)
262 kfree(*id);
263 return error;
264}
265
Christoph Hellwig1c63dc62015-11-26 10:06:56 +0100266int nvme_get_features(struct nvme_ctrl *dev, unsigned fid, unsigned nsid,
Christoph Hellwig21d34712015-11-26 09:08:36 +0100267 dma_addr_t dma_addr, u32 *result)
268{
269 struct nvme_command c;
270
271 memset(&c, 0, sizeof(c));
272 c.features.opcode = nvme_admin_get_features;
273 c.features.nsid = cpu_to_le32(nsid);
274 c.features.prp1 = cpu_to_le64(dma_addr);
275 c.features.fid = cpu_to_le32(fid);
276
Christoph Hellwig41609822015-11-20 09:00:02 +0100277 return __nvme_submit_sync_cmd(dev->admin_q, &c, NULL, 0, result, 0);
Christoph Hellwig21d34712015-11-26 09:08:36 +0100278}
279
Christoph Hellwig1c63dc62015-11-26 10:06:56 +0100280int nvme_set_features(struct nvme_ctrl *dev, unsigned fid, unsigned dword11,
Christoph Hellwig21d34712015-11-26 09:08:36 +0100281 dma_addr_t dma_addr, u32 *result)
282{
283 struct nvme_command c;
284
285 memset(&c, 0, sizeof(c));
286 c.features.opcode = nvme_admin_set_features;
287 c.features.prp1 = cpu_to_le64(dma_addr);
288 c.features.fid = cpu_to_le32(fid);
289 c.features.dword11 = cpu_to_le32(dword11);
290
Christoph Hellwig41609822015-11-20 09:00:02 +0100291 return __nvme_submit_sync_cmd(dev->admin_q, &c, NULL, 0, result, 0);
Christoph Hellwig21d34712015-11-26 09:08:36 +0100292}
293
Christoph Hellwig1c63dc62015-11-26 10:06:56 +0100294int nvme_get_log_page(struct nvme_ctrl *dev, struct nvme_smart_log **log)
Christoph Hellwig21d34712015-11-26 09:08:36 +0100295{
296 struct nvme_command c = { };
297 int error;
298
299 c.common.opcode = nvme_admin_get_log_page,
300 c.common.nsid = cpu_to_le32(0xFFFFFFFF),
301 c.common.cdw10[0] = cpu_to_le32(
302 (((sizeof(struct nvme_smart_log) / 4) - 1) << 16) |
303 NVME_LOG_SMART),
304
305 *log = kmalloc(sizeof(struct nvme_smart_log), GFP_KERNEL);
306 if (!*log)
307 return -ENOMEM;
308
309 error = nvme_submit_sync_cmd(dev->admin_q, &c, *log,
310 sizeof(struct nvme_smart_log));
311 if (error)
312 kfree(*log);
313 return error;
314}
Christoph Hellwig1673f1f2015-11-26 10:54:19 +0100315
316static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
317{
318 struct nvme_user_io io;
319 struct nvme_command c;
320 unsigned length, meta_len;
321 void __user *metadata;
322
323 if (copy_from_user(&io, uio, sizeof(io)))
324 return -EFAULT;
325
326 switch (io.opcode) {
327 case nvme_cmd_write:
328 case nvme_cmd_read:
329 case nvme_cmd_compare:
330 break;
331 default:
332 return -EINVAL;
333 }
334
335 length = (io.nblocks + 1) << ns->lba_shift;
336 meta_len = (io.nblocks + 1) * ns->ms;
337 metadata = (void __user *)(uintptr_t)io.metadata;
338
339 if (ns->ext) {
340 length += meta_len;
341 meta_len = 0;
342 } else if (meta_len) {
343 if ((io.metadata & 3) || !io.metadata)
344 return -EINVAL;
345 }
346
347 memset(&c, 0, sizeof(c));
348 c.rw.opcode = io.opcode;
349 c.rw.flags = io.flags;
350 c.rw.nsid = cpu_to_le32(ns->ns_id);
351 c.rw.slba = cpu_to_le64(io.slba);
352 c.rw.length = cpu_to_le16(io.nblocks);
353 c.rw.control = cpu_to_le16(io.control);
354 c.rw.dsmgmt = cpu_to_le32(io.dsmgmt);
355 c.rw.reftag = cpu_to_le32(io.reftag);
356 c.rw.apptag = cpu_to_le16(io.apptag);
357 c.rw.appmask = cpu_to_le16(io.appmask);
358
359 return __nvme_submit_user_cmd(ns->queue, &c,
360 (void __user *)(uintptr_t)io.addr, length,
361 metadata, meta_len, io.slba, NULL, 0);
362}
363
364int nvme_user_cmd(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
365 struct nvme_passthru_cmd __user *ucmd)
366{
367 struct nvme_passthru_cmd cmd;
368 struct nvme_command c;
369 unsigned timeout = 0;
370 int status;
371
372 if (!capable(CAP_SYS_ADMIN))
373 return -EACCES;
374 if (copy_from_user(&cmd, ucmd, sizeof(cmd)))
375 return -EFAULT;
376
377 memset(&c, 0, sizeof(c));
378 c.common.opcode = cmd.opcode;
379 c.common.flags = cmd.flags;
380 c.common.nsid = cpu_to_le32(cmd.nsid);
381 c.common.cdw2[0] = cpu_to_le32(cmd.cdw2);
382 c.common.cdw2[1] = cpu_to_le32(cmd.cdw3);
383 c.common.cdw10[0] = cpu_to_le32(cmd.cdw10);
384 c.common.cdw10[1] = cpu_to_le32(cmd.cdw11);
385 c.common.cdw10[2] = cpu_to_le32(cmd.cdw12);
386 c.common.cdw10[3] = cpu_to_le32(cmd.cdw13);
387 c.common.cdw10[4] = cpu_to_le32(cmd.cdw14);
388 c.common.cdw10[5] = cpu_to_le32(cmd.cdw15);
389
390 if (cmd.timeout_ms)
391 timeout = msecs_to_jiffies(cmd.timeout_ms);
392
393 status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c,
394 (void __user *)cmd.addr, cmd.data_len,
395 &cmd.result, timeout);
396 if (status >= 0) {
397 if (put_user(cmd.result, &ucmd->result))
398 return -EFAULT;
399 }
400
401 return status;
402}
403
404static int nvme_ioctl(struct block_device *bdev, fmode_t mode,
405 unsigned int cmd, unsigned long arg)
406{
407 struct nvme_ns *ns = bdev->bd_disk->private_data;
408
409 switch (cmd) {
410 case NVME_IOCTL_ID:
411 force_successful_syscall_return();
412 return ns->ns_id;
413 case NVME_IOCTL_ADMIN_CMD:
414 return nvme_user_cmd(ns->ctrl, NULL, (void __user *)arg);
415 case NVME_IOCTL_IO_CMD:
416 return nvme_user_cmd(ns->ctrl, ns, (void __user *)arg);
417 case NVME_IOCTL_SUBMIT_IO:
418 return nvme_submit_io(ns, (void __user *)arg);
419 case SG_GET_VERSION_NUM:
420 return nvme_sg_get_version_num((void __user *)arg);
421 case SG_IO:
422 return nvme_sg_io(ns, (void __user *)arg);
423 default:
424 return -ENOTTY;
425 }
426}
427
428#ifdef CONFIG_COMPAT
429static int nvme_compat_ioctl(struct block_device *bdev, fmode_t mode,
430 unsigned int cmd, unsigned long arg)
431{
432 switch (cmd) {
433 case SG_IO:
434 return -ENOIOCTLCMD;
435 }
436 return nvme_ioctl(bdev, mode, cmd, arg);
437}
438#else
439#define nvme_compat_ioctl NULL
440#endif
441
442static int nvme_open(struct block_device *bdev, fmode_t mode)
443{
444 return nvme_get_ns_from_disk(bdev->bd_disk) ? 0 : -ENXIO;
445}
446
447static void nvme_release(struct gendisk *disk, fmode_t mode)
448{
449 nvme_put_ns(disk->private_data);
450}
451
452static int nvme_getgeo(struct block_device *bdev, struct hd_geometry *geo)
453{
454 /* some standard values */
455 geo->heads = 1 << 6;
456 geo->sectors = 1 << 5;
457 geo->cylinders = get_capacity(bdev->bd_disk) >> 11;
458 return 0;
459}
460
461#ifdef CONFIG_BLK_DEV_INTEGRITY
462static void nvme_init_integrity(struct nvme_ns *ns)
463{
464 struct blk_integrity integrity;
465
466 switch (ns->pi_type) {
467 case NVME_NS_DPS_PI_TYPE3:
468 integrity.profile = &t10_pi_type3_crc;
469 break;
470 case NVME_NS_DPS_PI_TYPE1:
471 case NVME_NS_DPS_PI_TYPE2:
472 integrity.profile = &t10_pi_type1_crc;
473 break;
474 default:
475 integrity.profile = NULL;
476 break;
477 }
478 integrity.tuple_size = ns->ms;
479 blk_integrity_register(ns->disk, &integrity);
480 blk_queue_max_integrity_segments(ns->queue, 1);
481}
482#else
483static void nvme_init_integrity(struct nvme_ns *ns)
484{
485}
486#endif /* CONFIG_BLK_DEV_INTEGRITY */
487
488static void nvme_config_discard(struct nvme_ns *ns)
489{
490 u32 logical_block_size = queue_logical_block_size(ns->queue);
491 ns->queue->limits.discard_zeroes_data = 0;
492 ns->queue->limits.discard_alignment = logical_block_size;
493 ns->queue->limits.discard_granularity = logical_block_size;
494 blk_queue_max_discard_sectors(ns->queue, 0xffffffff);
495 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, ns->queue);
496}
497
498int nvme_revalidate_disk(struct gendisk *disk)
499{
500 struct nvme_ns *ns = disk->private_data;
501 struct nvme_id_ns *id;
502 u8 lbaf, pi_type;
503 u16 old_ms;
504 unsigned short bs;
505
506 if (nvme_identify_ns(ns->ctrl, ns->ns_id, &id)) {
507 dev_warn(ns->ctrl->dev, "%s: Identify failure nvme%dn%d\n",
508 __func__, ns->ctrl->instance, ns->ns_id);
509 return -ENODEV;
510 }
511 if (id->ncap == 0) {
512 kfree(id);
513 return -ENODEV;
514 }
515
516 if (nvme_nvm_ns_supported(ns, id) && ns->type != NVME_NS_LIGHTNVM) {
517 if (nvme_nvm_register(ns->queue, disk->disk_name)) {
518 dev_warn(ns->ctrl->dev,
519 "%s: LightNVM init failure\n", __func__);
520 kfree(id);
521 return -ENODEV;
522 }
523 ns->type = NVME_NS_LIGHTNVM;
524 }
525
526 old_ms = ns->ms;
527 lbaf = id->flbas & NVME_NS_FLBAS_LBA_MASK;
528 ns->lba_shift = id->lbaf[lbaf].ds;
529 ns->ms = le16_to_cpu(id->lbaf[lbaf].ms);
530 ns->ext = ns->ms && (id->flbas & NVME_NS_FLBAS_META_EXT);
531
532 /*
533 * If identify namespace failed, use default 512 byte block size so
534 * block layer can use before failing read/write for 0 capacity.
535 */
536 if (ns->lba_shift == 0)
537 ns->lba_shift = 9;
538 bs = 1 << ns->lba_shift;
539
540 /* XXX: PI implementation requires metadata equal t10 pi tuple size */
541 pi_type = ns->ms == sizeof(struct t10_pi_tuple) ?
542 id->dps & NVME_NS_DPS_PI_MASK : 0;
543
544 blk_mq_freeze_queue(disk->queue);
545 if (blk_get_integrity(disk) && (ns->pi_type != pi_type ||
546 ns->ms != old_ms ||
547 bs != queue_logical_block_size(disk->queue) ||
548 (ns->ms && ns->ext)))
549 blk_integrity_unregister(disk);
550
551 ns->pi_type = pi_type;
552 blk_queue_logical_block_size(ns->queue, bs);
553
554 if (ns->ms && !ns->ext)
555 nvme_init_integrity(ns);
556
557 if (ns->ms && !(ns->ms == 8 && ns->pi_type) && !blk_get_integrity(disk))
558 set_capacity(disk, 0);
559 else
560 set_capacity(disk, le64_to_cpup(&id->nsze) << (ns->lba_shift - 9));
561
562 if (ns->ctrl->oncs & NVME_CTRL_ONCS_DSM)
563 nvme_config_discard(ns);
564 blk_mq_unfreeze_queue(disk->queue);
565
566 kfree(id);
567 return 0;
568}
569
570static char nvme_pr_type(enum pr_type type)
571{
572 switch (type) {
573 case PR_WRITE_EXCLUSIVE:
574 return 1;
575 case PR_EXCLUSIVE_ACCESS:
576 return 2;
577 case PR_WRITE_EXCLUSIVE_REG_ONLY:
578 return 3;
579 case PR_EXCLUSIVE_ACCESS_REG_ONLY:
580 return 4;
581 case PR_WRITE_EXCLUSIVE_ALL_REGS:
582 return 5;
583 case PR_EXCLUSIVE_ACCESS_ALL_REGS:
584 return 6;
585 default:
586 return 0;
587 }
588};
589
590static int nvme_pr_command(struct block_device *bdev, u32 cdw10,
591 u64 key, u64 sa_key, u8 op)
592{
593 struct nvme_ns *ns = bdev->bd_disk->private_data;
594 struct nvme_command c;
595 u8 data[16] = { 0, };
596
597 put_unaligned_le64(key, &data[0]);
598 put_unaligned_le64(sa_key, &data[8]);
599
600 memset(&c, 0, sizeof(c));
601 c.common.opcode = op;
602 c.common.nsid = cpu_to_le32(ns->ns_id);
603 c.common.cdw10[0] = cpu_to_le32(cdw10);
604
605 return nvme_submit_sync_cmd(ns->queue, &c, data, 16);
606}
607
608static int nvme_pr_register(struct block_device *bdev, u64 old,
609 u64 new, unsigned flags)
610{
611 u32 cdw10;
612
613 if (flags & ~PR_FL_IGNORE_KEY)
614 return -EOPNOTSUPP;
615
616 cdw10 = old ? 2 : 0;
617 cdw10 |= (flags & PR_FL_IGNORE_KEY) ? 1 << 3 : 0;
618 cdw10 |= (1 << 30) | (1 << 31); /* PTPL=1 */
619 return nvme_pr_command(bdev, cdw10, old, new, nvme_cmd_resv_register);
620}
621
622static int nvme_pr_reserve(struct block_device *bdev, u64 key,
623 enum pr_type type, unsigned flags)
624{
625 u32 cdw10;
626
627 if (flags & ~PR_FL_IGNORE_KEY)
628 return -EOPNOTSUPP;
629
630 cdw10 = nvme_pr_type(type) << 8;
631 cdw10 |= ((flags & PR_FL_IGNORE_KEY) ? 1 << 3 : 0);
632 return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_acquire);
633}
634
635static int nvme_pr_preempt(struct block_device *bdev, u64 old, u64 new,
636 enum pr_type type, bool abort)
637{
638 u32 cdw10 = nvme_pr_type(type) << 8 | abort ? 2 : 1;
639 return nvme_pr_command(bdev, cdw10, old, new, nvme_cmd_resv_acquire);
640}
641
642static int nvme_pr_clear(struct block_device *bdev, u64 key)
643{
644 u32 cdw10 = 1 | key ? 1 << 3 : 0;
645 return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_register);
646}
647
648static int nvme_pr_release(struct block_device *bdev, u64 key, enum pr_type type)
649{
650 u32 cdw10 = nvme_pr_type(type) << 8 | key ? 1 << 3 : 0;
651 return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_release);
652}
653
654static const struct pr_ops nvme_pr_ops = {
655 .pr_register = nvme_pr_register,
656 .pr_reserve = nvme_pr_reserve,
657 .pr_release = nvme_pr_release,
658 .pr_preempt = nvme_pr_preempt,
659 .pr_clear = nvme_pr_clear,
660};
661
662const struct block_device_operations nvme_fops = {
663 .owner = THIS_MODULE,
664 .ioctl = nvme_ioctl,
665 .compat_ioctl = nvme_compat_ioctl,
666 .open = nvme_open,
667 .release = nvme_release,
668 .getgeo = nvme_getgeo,
669 .revalidate_disk= nvme_revalidate_disk,
670 .pr_ops = &nvme_pr_ops,
671};
672
673static void nvme_free_ctrl(struct kref *kref)
674{
675 struct nvme_ctrl *ctrl = container_of(kref, struct nvme_ctrl, kref);
676
677 ctrl->ops->free_ctrl(ctrl);
678}
679
680void nvme_put_ctrl(struct nvme_ctrl *ctrl)
681{
682 kref_put(&ctrl->kref, nvme_free_ctrl);
683}
684