Christoph Hellwig | f11bb3e | 2015-10-03 15:46:41 +0200 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (c) 2011-2014, Intel Corporation. |
| 3 | * |
| 4 | * This program is free software; you can redistribute it and/or modify it |
| 5 | * under the terms and conditions of the GNU General Public License, |
| 6 | * version 2, as published by the Free Software Foundation. |
| 7 | * |
| 8 | * This program is distributed in the hope it will be useful, but WITHOUT |
| 9 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
| 10 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for |
| 11 | * more details. |
| 12 | */ |
| 13 | |
| 14 | #ifndef _NVME_H |
| 15 | #define _NVME_H |
| 16 | |
| 17 | #include <linux/nvme.h> |
| 18 | #include <linux/pci.h> |
| 19 | #include <linux/kref.h> |
| 20 | #include <linux/blk-mq.h> |
Matias Bjørling | b0b4e09 | 2016-09-16 14:25:07 +0200 | [diff] [blame] | 21 | #include <linux/lightnvm.h> |
Christoph Hellwig | f11bb3e | 2015-10-03 15:46:41 +0200 | [diff] [blame] | 22 | |
Christoph Hellwig | 297465c | 2015-11-26 12:58:11 +0100 | [diff] [blame] | 23 | enum { |
| 24 | /* |
| 25 | * Driver internal status code for commands that were cancelled due |
| 26 | * to timeouts or controller shutdown. The value is negative so |
| 27 | * that it a) doesn't overlap with the unsigned hardware error codes, |
| 28 | * and b) can easily be tested for. |
| 29 | */ |
| 30 | NVME_SC_CANCELLED = -EINTR, |
| 31 | }; |
| 32 | |
Christoph Hellwig | f11bb3e | 2015-10-03 15:46:41 +0200 | [diff] [blame] | 33 | extern unsigned char nvme_io_timeout; |
| 34 | #define NVME_IO_TIMEOUT (nvme_io_timeout * HZ) |
| 35 | |
Christoph Hellwig | 21d3471 | 2015-11-26 09:08:36 +0100 | [diff] [blame] | 36 | extern unsigned char admin_timeout; |
| 37 | #define ADMIN_TIMEOUT (admin_timeout * HZ) |
| 38 | |
Christoph Hellwig | 5fd4ce1 | 2015-11-28 15:03:49 +0100 | [diff] [blame] | 39 | extern unsigned char shutdown_timeout; |
| 40 | #define SHUTDOWN_TIMEOUT (shutdown_timeout * HZ) |
| 41 | |
Sagi Grimberg | 038bd4c | 2016-06-13 16:45:28 +0200 | [diff] [blame] | 42 | #define NVME_DEFAULT_KATO 5 |
| 43 | #define NVME_KATO_GRACE 10 |
| 44 | |
Keith Busch | f80ec96 | 2016-07-12 16:20:31 -0700 | [diff] [blame] | 45 | extern unsigned int nvme_max_retries; |
| 46 | |
Matias Bjørling | ca06408 | 2015-10-29 17:57:29 +0900 | [diff] [blame] | 47 | enum { |
| 48 | NVME_NS_LBA = 0, |
| 49 | NVME_NS_LIGHTNVM = 1, |
| 50 | }; |
| 51 | |
Christoph Hellwig | f11bb3e | 2015-10-03 15:46:41 +0200 | [diff] [blame] | 52 | /* |
Christoph Hellwig | 106198e | 2015-11-26 10:07:41 +0100 | [diff] [blame] | 53 | * List of workarounds for devices that required behavior not specified in |
| 54 | * the standard. |
Christoph Hellwig | f11bb3e | 2015-10-03 15:46:41 +0200 | [diff] [blame] | 55 | */ |
Christoph Hellwig | 106198e | 2015-11-26 10:07:41 +0100 | [diff] [blame] | 56 | enum nvme_quirks { |
| 57 | /* |
| 58 | * Prefers I/O aligned to a stripe size specified in a vendor |
| 59 | * specific Identify field. |
| 60 | */ |
| 61 | NVME_QUIRK_STRIPE_SIZE = (1 << 0), |
Keith Busch | 540c801 | 2015-10-22 15:45:06 -0600 | [diff] [blame] | 62 | |
| 63 | /* |
| 64 | * The controller doesn't handle Identify value others than 0 or 1 |
| 65 | * correctly. |
| 66 | */ |
| 67 | NVME_QUIRK_IDENTIFY_CNS = (1 << 1), |
Keith Busch | 08095e7 | 2016-03-04 13:15:17 -0700 | [diff] [blame] | 68 | |
| 69 | /* |
| 70 | * The controller deterministically returns O's on reads to discarded |
| 71 | * logical blocks. |
| 72 | */ |
| 73 | NVME_QUIRK_DISCARD_ZEROES = (1 << 2), |
Guilherme G. Piccoli | 54adc01 | 2016-06-14 18:22:41 -0300 | [diff] [blame] | 74 | |
| 75 | /* |
| 76 | * The controller needs a delay before starts checking the device |
| 77 | * readiness, which is done by reading the NVME_CSTS_RDY bit. |
| 78 | */ |
| 79 | NVME_QUIRK_DELAY_BEFORE_CHK_RDY = (1 << 3), |
Christoph Hellwig | 106198e | 2015-11-26 10:07:41 +0100 | [diff] [blame] | 80 | }; |
| 81 | |
Guilherme G. Piccoli | 54adc01 | 2016-06-14 18:22:41 -0300 | [diff] [blame] | 82 | /* The below value is the specific amount of delay needed before checking |
| 83 | * readiness in case of the PCI_DEVICE(0x1c58, 0x0003), which needs the |
| 84 | * NVME_QUIRK_DELAY_BEFORE_CHK_RDY quirk enabled. The value (in ms) was |
| 85 | * found empirically. |
| 86 | */ |
| 87 | #define NVME_QUIRK_DELAY_AMOUNT 2000 |
| 88 | |
Christoph Hellwig | bb8d261 | 2016-04-26 13:51:57 +0200 | [diff] [blame] | 89 | enum nvme_ctrl_state { |
| 90 | NVME_CTRL_NEW, |
| 91 | NVME_CTRL_LIVE, |
| 92 | NVME_CTRL_RESETTING, |
Christoph Hellwig | def61ec | 2016-07-06 21:55:49 +0900 | [diff] [blame] | 93 | NVME_CTRL_RECONNECTING, |
Christoph Hellwig | bb8d261 | 2016-04-26 13:51:57 +0200 | [diff] [blame] | 94 | NVME_CTRL_DELETING, |
Keith Busch | 0ff9d4e | 2016-05-12 08:37:14 -0600 | [diff] [blame] | 95 | NVME_CTRL_DEAD, |
Christoph Hellwig | bb8d261 | 2016-04-26 13:51:57 +0200 | [diff] [blame] | 96 | }; |
| 97 | |
Christoph Hellwig | 1c63dc6 | 2015-11-26 10:06:56 +0100 | [diff] [blame] | 98 | struct nvme_ctrl { |
Christoph Hellwig | bb8d261 | 2016-04-26 13:51:57 +0200 | [diff] [blame] | 99 | enum nvme_ctrl_state state; |
| 100 | spinlock_t lock; |
Christoph Hellwig | 1c63dc6 | 2015-11-26 10:06:56 +0100 | [diff] [blame] | 101 | const struct nvme_ctrl_ops *ops; |
Christoph Hellwig | f11bb3e | 2015-10-03 15:46:41 +0200 | [diff] [blame] | 102 | struct request_queue *admin_q; |
Christoph Hellwig | 07bfcd0 | 2016-06-13 16:45:26 +0200 | [diff] [blame] | 103 | struct request_queue *connect_q; |
Christoph Hellwig | f11bb3e | 2015-10-03 15:46:41 +0200 | [diff] [blame] | 104 | struct device *dev; |
Christoph Hellwig | f11bb3e | 2015-10-03 15:46:41 +0200 | [diff] [blame] | 105 | struct kref kref; |
Christoph Hellwig | f11bb3e | 2015-10-03 15:46:41 +0200 | [diff] [blame] | 106 | int instance; |
Christoph Hellwig | 5bae7f7 | 2015-11-28 15:39:07 +0100 | [diff] [blame] | 107 | struct blk_mq_tag_set *tagset; |
| 108 | struct list_head namespaces; |
Christoph Hellwig | 69d3b8a | 2015-12-24 15:27:00 +0100 | [diff] [blame] | 109 | struct mutex namespaces_mutex; |
Christoph Hellwig | 5bae7f7 | 2015-11-28 15:39:07 +0100 | [diff] [blame] | 110 | struct device *device; /* char device */ |
Christoph Hellwig | f3ca80f | 2015-11-28 15:40:19 +0100 | [diff] [blame] | 111 | struct list_head node; |
Keith Busch | 075790e | 2016-02-24 09:15:53 -0700 | [diff] [blame] | 112 | struct ida ns_ida; |
Christoph Hellwig | 1c63dc6 | 2015-11-26 10:06:56 +0100 | [diff] [blame] | 113 | |
Christoph Hellwig | f11bb3e | 2015-10-03 15:46:41 +0200 | [diff] [blame] | 114 | char name[12]; |
| 115 | char serial[20]; |
| 116 | char model[40]; |
| 117 | char firmware_rev[8]; |
Christoph Hellwig | 76e3914 | 2016-04-16 14:57:58 -0400 | [diff] [blame] | 118 | u16 cntlid; |
Christoph Hellwig | 5fd4ce1 | 2015-11-28 15:03:49 +0100 | [diff] [blame] | 119 | |
| 120 | u32 ctrl_config; |
| 121 | |
| 122 | u32 page_size; |
Christoph Hellwig | f11bb3e | 2015-10-03 15:46:41 +0200 | [diff] [blame] | 123 | u32 max_hw_sectors; |
| 124 | u32 stripe_size; |
Christoph Hellwig | f11bb3e | 2015-10-03 15:46:41 +0200 | [diff] [blame] | 125 | u16 oncs; |
Keith Busch | 118472a | 2016-02-18 09:57:48 -0700 | [diff] [blame] | 126 | u16 vid; |
Christoph Hellwig | 6bf25d1 | 2015-11-20 09:36:44 +0100 | [diff] [blame] | 127 | atomic_t abort_limit; |
Christoph Hellwig | f11bb3e | 2015-10-03 15:46:41 +0200 | [diff] [blame] | 128 | u8 event_limit; |
| 129 | u8 vwc; |
Christoph Hellwig | f3ca80f | 2015-11-28 15:40:19 +0100 | [diff] [blame] | 130 | u32 vs; |
Christoph Hellwig | 07bfcd0 | 2016-06-13 16:45:26 +0200 | [diff] [blame] | 131 | u32 sgls; |
Sagi Grimberg | 038bd4c | 2016-06-13 16:45:28 +0200 | [diff] [blame] | 132 | u16 kas; |
| 133 | unsigned int kato; |
Christoph Hellwig | f3ca80f | 2015-11-28 15:40:19 +0100 | [diff] [blame] | 134 | bool subsystem; |
Christoph Hellwig | 106198e | 2015-11-26 10:07:41 +0100 | [diff] [blame] | 135 | unsigned long quirks; |
Christoph Hellwig | 5955be2 | 2016-04-26 13:51:59 +0200 | [diff] [blame] | 136 | struct work_struct scan_work; |
Christoph Hellwig | f866fc42 | 2016-04-26 13:52:00 +0200 | [diff] [blame] | 137 | struct work_struct async_event_work; |
Sagi Grimberg | 038bd4c | 2016-06-13 16:45:28 +0200 | [diff] [blame] | 138 | struct delayed_work ka_work; |
Christoph Hellwig | 07bfcd0 | 2016-06-13 16:45:26 +0200 | [diff] [blame] | 139 | |
| 140 | /* Fabrics only */ |
| 141 | u16 sqsize; |
| 142 | u32 ioccsz; |
| 143 | u32 iorcsz; |
| 144 | u16 icdoff; |
| 145 | u16 maxcmd; |
| 146 | struct nvmf_ctrl_options *opts; |
Christoph Hellwig | f11bb3e | 2015-10-03 15:46:41 +0200 | [diff] [blame] | 147 | }; |
| 148 | |
| 149 | /* |
| 150 | * An NVM Express namespace is equivalent to a SCSI LUN |
| 151 | */ |
| 152 | struct nvme_ns { |
| 153 | struct list_head list; |
| 154 | |
Christoph Hellwig | 1c63dc6 | 2015-11-26 10:06:56 +0100 | [diff] [blame] | 155 | struct nvme_ctrl *ctrl; |
Christoph Hellwig | f11bb3e | 2015-10-03 15:46:41 +0200 | [diff] [blame] | 156 | struct request_queue *queue; |
| 157 | struct gendisk *disk; |
Matias Bjørling | b0b4e09 | 2016-09-16 14:25:07 +0200 | [diff] [blame] | 158 | struct nvm_dev *ndev; |
Christoph Hellwig | f11bb3e | 2015-10-03 15:46:41 +0200 | [diff] [blame] | 159 | struct kref kref; |
Keith Busch | 075790e | 2016-02-24 09:15:53 -0700 | [diff] [blame] | 160 | int instance; |
Christoph Hellwig | f11bb3e | 2015-10-03 15:46:41 +0200 | [diff] [blame] | 161 | |
Keith Busch | 2b9b6e8 | 2015-12-22 10:10:45 -0700 | [diff] [blame] | 162 | u8 eui[8]; |
| 163 | u8 uuid[16]; |
| 164 | |
Christoph Hellwig | f11bb3e | 2015-10-03 15:46:41 +0200 | [diff] [blame] | 165 | unsigned ns_id; |
| 166 | int lba_shift; |
| 167 | u16 ms; |
| 168 | bool ext; |
| 169 | u8 pi_type; |
Keith Busch | 646017a | 2016-02-24 09:15:54 -0700 | [diff] [blame] | 170 | unsigned long flags; |
| 171 | |
| 172 | #define NVME_NS_REMOVING 0 |
Keith Busch | 69d9a99 | 2016-02-24 09:15:56 -0700 | [diff] [blame] | 173 | #define NVME_NS_DEAD 1 |
Keith Busch | 646017a | 2016-02-24 09:15:54 -0700 | [diff] [blame] | 174 | |
Christoph Hellwig | f11bb3e | 2015-10-03 15:46:41 +0200 | [diff] [blame] | 175 | u64 mode_select_num_blocks; |
| 176 | u32 mode_select_block_len; |
| 177 | }; |
| 178 | |
Christoph Hellwig | 1c63dc6 | 2015-11-26 10:06:56 +0100 | [diff] [blame] | 179 | struct nvme_ctrl_ops { |
Ming Lin | 1a353d8 | 2016-06-13 16:45:24 +0200 | [diff] [blame] | 180 | const char *name; |
Sagi Grimberg | e439bb1 | 2016-02-10 10:03:29 -0800 | [diff] [blame] | 181 | struct module *module; |
Christoph Hellwig | 07bfcd0 | 2016-06-13 16:45:26 +0200 | [diff] [blame] | 182 | bool is_fabrics; |
Christoph Hellwig | 1c63dc6 | 2015-11-26 10:06:56 +0100 | [diff] [blame] | 183 | int (*reg_read32)(struct nvme_ctrl *ctrl, u32 off, u32 *val); |
Christoph Hellwig | 5fd4ce1 | 2015-11-28 15:03:49 +0100 | [diff] [blame] | 184 | int (*reg_write32)(struct nvme_ctrl *ctrl, u32 off, u32 val); |
Christoph Hellwig | 7fd8930 | 2015-11-28 15:37:52 +0100 | [diff] [blame] | 185 | int (*reg_read64)(struct nvme_ctrl *ctrl, u32 off, u64 *val); |
Christoph Hellwig | f3ca80f | 2015-11-28 15:40:19 +0100 | [diff] [blame] | 186 | int (*reset_ctrl)(struct nvme_ctrl *ctrl); |
Christoph Hellwig | 1673f1f | 2015-11-26 10:54:19 +0100 | [diff] [blame] | 187 | void (*free_ctrl)(struct nvme_ctrl *ctrl); |
Christoph Hellwig | f866fc42 | 2016-04-26 13:52:00 +0200 | [diff] [blame] | 188 | void (*submit_async_event)(struct nvme_ctrl *ctrl, int aer_idx); |
Ming Lin | 1a353d8 | 2016-06-13 16:45:24 +0200 | [diff] [blame] | 189 | int (*delete_ctrl)(struct nvme_ctrl *ctrl); |
| 190 | const char *(*get_subsysnqn)(struct nvme_ctrl *ctrl); |
| 191 | int (*get_address)(struct nvme_ctrl *ctrl, char *buf, int size); |
Christoph Hellwig | f11bb3e | 2015-10-03 15:46:41 +0200 | [diff] [blame] | 192 | }; |
| 193 | |
Christoph Hellwig | 1c63dc6 | 2015-11-26 10:06:56 +0100 | [diff] [blame] | 194 | static inline bool nvme_ctrl_ready(struct nvme_ctrl *ctrl) |
| 195 | { |
| 196 | u32 val = 0; |
| 197 | |
| 198 | if (ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &val)) |
| 199 | return false; |
| 200 | return val & NVME_CSTS_RDY; |
| 201 | } |
| 202 | |
Christoph Hellwig | f3ca80f | 2015-11-28 15:40:19 +0100 | [diff] [blame] | 203 | static inline int nvme_reset_subsystem(struct nvme_ctrl *ctrl) |
| 204 | { |
| 205 | if (!ctrl->subsystem) |
| 206 | return -ENOTTY; |
| 207 | return ctrl->ops->reg_write32(ctrl, NVME_REG_NSSR, 0x4E564D65); |
| 208 | } |
| 209 | |
Christoph Hellwig | f11bb3e | 2015-10-03 15:46:41 +0200 | [diff] [blame] | 210 | static inline u64 nvme_block_nr(struct nvme_ns *ns, sector_t sector) |
| 211 | { |
| 212 | return (sector >> (ns->lba_shift - 9)); |
| 213 | } |
| 214 | |
Ming Lin | 58b4560 | 2016-03-22 00:24:43 -0700 | [diff] [blame] | 215 | static inline unsigned nvme_map_len(struct request *rq) |
| 216 | { |
Mike Christie | c2df40d | 2016-06-05 14:32:17 -0500 | [diff] [blame] | 217 | if (req_op(rq) == REQ_OP_DISCARD) |
Ming Lin | 58b4560 | 2016-03-22 00:24:43 -0700 | [diff] [blame] | 218 | return sizeof(struct nvme_dsm_range); |
| 219 | else |
| 220 | return blk_rq_bytes(rq); |
| 221 | } |
| 222 | |
Ming Lin | 6904242 | 2016-04-25 14:33:20 -0700 | [diff] [blame] | 223 | static inline void nvme_cleanup_cmd(struct request *req) |
| 224 | { |
Mike Christie | c2df40d | 2016-06-05 14:32:17 -0500 | [diff] [blame] | 225 | if (req_op(req) == REQ_OP_DISCARD) |
Ming Lin | 6904242 | 2016-04-25 14:33:20 -0700 | [diff] [blame] | 226 | kfree(req->completion_data); |
| 227 | } |
| 228 | |
Christoph Hellwig | 15a190f7 | 2015-10-16 07:58:39 +0200 | [diff] [blame] | 229 | static inline int nvme_error_status(u16 status) |
| 230 | { |
| 231 | switch (status & 0x7ff) { |
| 232 | case NVME_SC_SUCCESS: |
| 233 | return 0; |
| 234 | case NVME_SC_CAP_EXCEEDED: |
| 235 | return -ENOSPC; |
| 236 | default: |
| 237 | return -EIO; |
| 238 | } |
| 239 | } |
| 240 | |
Christoph Hellwig | 7688faa | 2015-11-28 15:41:58 +0100 | [diff] [blame] | 241 | static inline bool nvme_req_needs_retry(struct request *req, u16 status) |
| 242 | { |
| 243 | return !(status & NVME_SC_DNR || blk_noretry_request(req)) && |
Keith Busch | f80ec96 | 2016-07-12 16:20:31 -0700 | [diff] [blame] | 244 | (jiffies - req->start_time) < req->timeout && |
| 245 | req->retries < nvme_max_retries; |
Christoph Hellwig | 7688faa | 2015-11-28 15:41:58 +0100 | [diff] [blame] | 246 | } |
| 247 | |
Ming Lin | c55a2fd | 2016-05-18 14:05:02 -0700 | [diff] [blame] | 248 | void nvme_cancel_request(struct request *req, void *data, bool reserved); |
Christoph Hellwig | bb8d261 | 2016-04-26 13:51:57 +0200 | [diff] [blame] | 249 | bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl, |
| 250 | enum nvme_ctrl_state new_state); |
Christoph Hellwig | 5fd4ce1 | 2015-11-28 15:03:49 +0100 | [diff] [blame] | 251 | int nvme_disable_ctrl(struct nvme_ctrl *ctrl, u64 cap); |
| 252 | int nvme_enable_ctrl(struct nvme_ctrl *ctrl, u64 cap); |
| 253 | int nvme_shutdown_ctrl(struct nvme_ctrl *ctrl); |
Christoph Hellwig | f3ca80f | 2015-11-28 15:40:19 +0100 | [diff] [blame] | 254 | int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev, |
| 255 | const struct nvme_ctrl_ops *ops, unsigned long quirks); |
Keith Busch | 53029b0 | 2015-11-28 15:41:02 +0100 | [diff] [blame] | 256 | void nvme_uninit_ctrl(struct nvme_ctrl *ctrl); |
Christoph Hellwig | 1673f1f | 2015-11-26 10:54:19 +0100 | [diff] [blame] | 257 | void nvme_put_ctrl(struct nvme_ctrl *ctrl); |
Christoph Hellwig | 7fd8930 | 2015-11-28 15:37:52 +0100 | [diff] [blame] | 258 | int nvme_init_identify(struct nvme_ctrl *ctrl); |
Christoph Hellwig | 5bae7f7 | 2015-11-28 15:39:07 +0100 | [diff] [blame] | 259 | |
Christoph Hellwig | 5955be2 | 2016-04-26 13:51:59 +0200 | [diff] [blame] | 260 | void nvme_queue_scan(struct nvme_ctrl *ctrl); |
Christoph Hellwig | 5bae7f7 | 2015-11-28 15:39:07 +0100 | [diff] [blame] | 261 | void nvme_remove_namespaces(struct nvme_ctrl *ctrl); |
Christoph Hellwig | 1673f1f | 2015-11-26 10:54:19 +0100 | [diff] [blame] | 262 | |
Christoph Hellwig | f866fc42 | 2016-04-26 13:52:00 +0200 | [diff] [blame] | 263 | #define NVME_NR_AERS 1 |
| 264 | void nvme_complete_async_event(struct nvme_ctrl *ctrl, |
| 265 | struct nvme_completion *cqe); |
| 266 | void nvme_queue_async_events(struct nvme_ctrl *ctrl); |
| 267 | |
Keith Busch | 2564626 | 2016-01-04 09:10:57 -0700 | [diff] [blame] | 268 | void nvme_stop_queues(struct nvme_ctrl *ctrl); |
| 269 | void nvme_start_queues(struct nvme_ctrl *ctrl); |
Keith Busch | 69d9a99 | 2016-02-24 09:15:56 -0700 | [diff] [blame] | 270 | void nvme_kill_queues(struct nvme_ctrl *ctrl); |
Sagi Grimberg | 363c9aa | 2015-12-24 15:26:59 +0100 | [diff] [blame] | 271 | |
Christoph Hellwig | eb71f43 | 2016-06-13 16:45:23 +0200 | [diff] [blame] | 272 | #define NVME_QID_ANY -1 |
Christoph Hellwig | 4160982 | 2015-11-20 09:00:02 +0100 | [diff] [blame] | 273 | struct request *nvme_alloc_request(struct request_queue *q, |
Christoph Hellwig | eb71f43 | 2016-06-13 16:45:23 +0200 | [diff] [blame] | 274 | struct nvme_command *cmd, unsigned int flags, int qid); |
Christoph Hellwig | 7688faa | 2015-11-28 15:41:58 +0100 | [diff] [blame] | 275 | void nvme_requeue_req(struct request *req); |
Ming Lin | 8093f7c | 2016-04-12 13:10:14 -0600 | [diff] [blame] | 276 | int nvme_setup_cmd(struct nvme_ns *ns, struct request *req, |
| 277 | struct nvme_command *cmd); |
Christoph Hellwig | f11bb3e | 2015-10-03 15:46:41 +0200 | [diff] [blame] | 278 | int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, |
| 279 | void *buf, unsigned bufflen); |
| 280 | int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, |
Christoph Hellwig | 1cb3cce | 2016-02-29 15:59:47 +0100 | [diff] [blame] | 281 | struct nvme_completion *cqe, void *buffer, unsigned bufflen, |
Christoph Hellwig | eb71f43 | 2016-06-13 16:45:23 +0200 | [diff] [blame] | 282 | unsigned timeout, int qid, int at_head, int flags); |
Christoph Hellwig | 4160982 | 2015-11-20 09:00:02 +0100 | [diff] [blame] | 283 | int nvme_submit_user_cmd(struct request_queue *q, struct nvme_command *cmd, |
| 284 | void __user *ubuffer, unsigned bufflen, u32 *result, |
| 285 | unsigned timeout); |
Keith Busch | 0b7f1f2 | 2015-10-23 09:47:28 -0600 | [diff] [blame] | 286 | int __nvme_submit_user_cmd(struct request_queue *q, struct nvme_command *cmd, |
| 287 | void __user *ubuffer, unsigned bufflen, |
| 288 | void __user *meta_buffer, unsigned meta_len, u32 meta_seed, |
Christoph Hellwig | f11bb3e | 2015-10-03 15:46:41 +0200 | [diff] [blame] | 289 | u32 *result, unsigned timeout); |
Christoph Hellwig | 1c63dc6 | 2015-11-26 10:06:56 +0100 | [diff] [blame] | 290 | int nvme_identify_ctrl(struct nvme_ctrl *dev, struct nvme_id_ctrl **id); |
| 291 | int nvme_identify_ns(struct nvme_ctrl *dev, unsigned nsid, |
Christoph Hellwig | f11bb3e | 2015-10-03 15:46:41 +0200 | [diff] [blame] | 292 | struct nvme_id_ns **id); |
Christoph Hellwig | 1c63dc6 | 2015-11-26 10:06:56 +0100 | [diff] [blame] | 293 | int nvme_get_log_page(struct nvme_ctrl *dev, struct nvme_smart_log **log); |
| 294 | int nvme_get_features(struct nvme_ctrl *dev, unsigned fid, unsigned nsid, |
Andy Lutomirski | 1a6fe74 | 2016-09-16 11:16:10 -0700 | [diff] [blame] | 295 | void *buffer, size_t buflen, u32 *result); |
Christoph Hellwig | 1c63dc6 | 2015-11-26 10:06:56 +0100 | [diff] [blame] | 296 | int nvme_set_features(struct nvme_ctrl *dev, unsigned fid, unsigned dword11, |
Andy Lutomirski | 1a6fe74 | 2016-09-16 11:16:10 -0700 | [diff] [blame] | 297 | void *buffer, size_t buflen, u32 *result); |
Christoph Hellwig | 9a0be7a | 2015-11-26 11:09:06 +0100 | [diff] [blame] | 298 | int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count); |
Sagi Grimberg | 038bd4c | 2016-06-13 16:45:28 +0200 | [diff] [blame] | 299 | void nvme_start_keep_alive(struct nvme_ctrl *ctrl); |
| 300 | void nvme_stop_keep_alive(struct nvme_ctrl *ctrl); |
Christoph Hellwig | f11bb3e | 2015-10-03 15:46:41 +0200 | [diff] [blame] | 301 | |
| 302 | struct sg_io_hdr; |
| 303 | |
| 304 | int nvme_sg_io(struct nvme_ns *ns, struct sg_io_hdr __user *u_hdr); |
| 305 | int nvme_sg_io32(struct nvme_ns *ns, unsigned long arg); |
| 306 | int nvme_sg_get_version_num(int __user *ip); |
| 307 | |
Keith Busch | c4699e7 | 2015-11-28 16:49:22 +0100 | [diff] [blame] | 308 | #ifdef CONFIG_NVM |
Matias Bjørling | ca06408 | 2015-10-29 17:57:29 +0900 | [diff] [blame] | 309 | int nvme_nvm_ns_supported(struct nvme_ns *ns, struct nvme_id_ns *id); |
Simon A. F. Lund | 40267ef | 2016-09-16 14:25:08 +0200 | [diff] [blame] | 310 | int nvme_nvm_register(struct nvme_ns *ns, char *disk_name, int node, |
| 311 | const struct attribute_group *attrs); |
Matias Bjørling | b0b4e09 | 2016-09-16 14:25:07 +0200 | [diff] [blame] | 312 | void nvme_nvm_unregister(struct nvme_ns *ns); |
Simon A. F. Lund | 40267ef | 2016-09-16 14:25:08 +0200 | [diff] [blame] | 313 | |
| 314 | static inline struct nvme_ns *nvme_get_ns_from_dev(struct device *dev) |
| 315 | { |
| 316 | if (dev->type->devnode) |
| 317 | return dev_to_disk(dev)->private_data; |
| 318 | |
| 319 | return (container_of(dev, struct nvm_dev, dev))->private_data; |
| 320 | } |
Keith Busch | c4699e7 | 2015-11-28 16:49:22 +0100 | [diff] [blame] | 321 | #else |
Matias Bjørling | b0b4e09 | 2016-09-16 14:25:07 +0200 | [diff] [blame] | 322 | static inline int nvme_nvm_register(struct nvme_ns *ns, char *disk_name, |
Simon A. F. Lund | 40267ef | 2016-09-16 14:25:08 +0200 | [diff] [blame] | 323 | int node, |
| 324 | const struct attribute_group *attrs) |
Keith Busch | c4699e7 | 2015-11-28 16:49:22 +0100 | [diff] [blame] | 325 | { |
| 326 | return 0; |
| 327 | } |
| 328 | |
Matias Bjørling | b0b4e09 | 2016-09-16 14:25:07 +0200 | [diff] [blame] | 329 | static inline void nvme_nvm_unregister(struct nvme_ns *ns) {}; |
Keith Busch | c4699e7 | 2015-11-28 16:49:22 +0100 | [diff] [blame] | 330 | |
| 331 | static inline int nvme_nvm_ns_supported(struct nvme_ns *ns, struct nvme_id_ns *id) |
| 332 | { |
| 333 | return 0; |
| 334 | } |
Simon A. F. Lund | 40267ef | 2016-09-16 14:25:08 +0200 | [diff] [blame] | 335 | static inline struct nvme_ns *nvme_get_ns_from_dev(struct device *dev) |
| 336 | { |
| 337 | return dev_to_disk(dev)->private_data; |
| 338 | } |
Keith Busch | c4699e7 | 2015-11-28 16:49:22 +0100 | [diff] [blame] | 339 | #endif /* CONFIG_NVM */ |
Matias Bjørling | ca06408 | 2015-10-29 17:57:29 +0900 | [diff] [blame] | 340 | |
Christoph Hellwig | 5bae7f7 | 2015-11-28 15:39:07 +0100 | [diff] [blame] | 341 | int __init nvme_core_init(void); |
| 342 | void nvme_core_exit(void); |
| 343 | |
Christoph Hellwig | f11bb3e | 2015-10-03 15:46:41 +0200 | [diff] [blame] | 344 | #endif /* _NVME_H */ |