2 * Copyright (c) 2011-2014, Intel Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 #include <linux/nvme.h>
18 #include <linux/pci.h>
19 #include <linux/kref.h>
20 #include <linux/blk-mq.h>
24 * Driver internal status code for commands that were cancelled due
25 * to timeouts or controller shutdown. The value is negative so
26 * that it a) doesn't overlap with the unsigned hardware error codes,
27 * and b) can easily be tested for.
29 NVME_SC_CANCELLED = -EINTR,
32 extern unsigned char nvme_io_timeout;
33 #define NVME_IO_TIMEOUT (nvme_io_timeout * HZ)
35 extern unsigned char admin_timeout;
36 #define ADMIN_TIMEOUT (admin_timeout * HZ)
38 extern unsigned char shutdown_timeout;
39 #define SHUTDOWN_TIMEOUT (shutdown_timeout * HZ)
47 * List of workarounds for devices that required behavior not specified in
52 * Prefers I/O aligned to a stripe size specified in a vendor
53 * specific Identify field.
55 NVME_QUIRK_STRIPE_SIZE = (1 << 0),
58 * The controller doesn't handle Identify value others than 0 or 1
61 NVME_QUIRK_IDENTIFY_CNS = (1 << 1),
65 const struct nvme_ctrl_ops *ops;
66 struct request_queue *admin_q;
70 struct blk_mq_tag_set *tagset;
71 struct list_head namespaces;
72 struct mutex namespaces_mutex;
73 struct device *device; /* char device */
74 struct list_head node;
97 * An NVM Express namespace is equivalent to a SCSI LUN
100 struct list_head list;
102 struct nvme_ctrl *ctrl;
103 struct request_queue *queue;
104 struct gendisk *disk;
119 #define NVME_NS_REMOVING 0
120 #define NVME_NS_DEAD 1
122 u64 mode_select_num_blocks;
123 u32 mode_select_block_len;
126 struct nvme_ctrl_ops {
127 int (*reg_read32)(struct nvme_ctrl *ctrl, u32 off, u32 *val);
128 int (*reg_write32)(struct nvme_ctrl *ctrl, u32 off, u32 val);
129 int (*reg_read64)(struct nvme_ctrl *ctrl, u32 off, u64 *val);
130 bool (*io_incapable)(struct nvme_ctrl *ctrl);
131 int (*reset_ctrl)(struct nvme_ctrl *ctrl);
132 void (*free_ctrl)(struct nvme_ctrl *ctrl);
135 static inline bool nvme_ctrl_ready(struct nvme_ctrl *ctrl)
139 if (ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &val))
141 return val & NVME_CSTS_RDY;
144 static inline bool nvme_io_incapable(struct nvme_ctrl *ctrl)
148 if (ctrl->ops->io_incapable(ctrl))
150 if (ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &val))
152 return val & NVME_CSTS_CFS;
155 static inline int nvme_reset_subsystem(struct nvme_ctrl *ctrl)
157 if (!ctrl->subsystem)
159 return ctrl->ops->reg_write32(ctrl, NVME_REG_NSSR, 0x4E564D65);
162 static inline u64 nvme_block_nr(struct nvme_ns *ns, sector_t sector)
164 return (sector >> (ns->lba_shift - 9));
167 static inline void nvme_setup_flush(struct nvme_ns *ns,
168 struct nvme_command *cmnd)
170 memset(cmnd, 0, sizeof(*cmnd));
171 cmnd->common.opcode = nvme_cmd_flush;
172 cmnd->common.nsid = cpu_to_le32(ns->ns_id);
175 static inline void nvme_setup_rw(struct nvme_ns *ns, struct request *req,
176 struct nvme_command *cmnd)
181 if (req->cmd_flags & REQ_FUA)
182 control |= NVME_RW_FUA;
183 if (req->cmd_flags & (REQ_FAILFAST_DEV | REQ_RAHEAD))
184 control |= NVME_RW_LR;
186 if (req->cmd_flags & REQ_RAHEAD)
187 dsmgmt |= NVME_RW_DSM_FREQ_PREFETCH;
189 memset(cmnd, 0, sizeof(*cmnd));
190 cmnd->rw.opcode = (rq_data_dir(req) ? nvme_cmd_write : nvme_cmd_read);
191 cmnd->rw.command_id = req->tag;
192 cmnd->rw.nsid = cpu_to_le32(ns->ns_id);
193 cmnd->rw.slba = cpu_to_le64(nvme_block_nr(ns, blk_rq_pos(req)));
194 cmnd->rw.length = cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1);
197 switch (ns->pi_type) {
198 case NVME_NS_DPS_PI_TYPE3:
199 control |= NVME_RW_PRINFO_PRCHK_GUARD;
201 case NVME_NS_DPS_PI_TYPE1:
202 case NVME_NS_DPS_PI_TYPE2:
203 control |= NVME_RW_PRINFO_PRCHK_GUARD |
204 NVME_RW_PRINFO_PRCHK_REF;
205 cmnd->rw.reftag = cpu_to_le32(
206 nvme_block_nr(ns, blk_rq_pos(req)));
209 if (!blk_integrity_rq(req))
210 control |= NVME_RW_PRINFO_PRACT;
213 cmnd->rw.control = cpu_to_le16(control);
214 cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt);
218 static inline int nvme_error_status(u16 status)
220 switch (status & 0x7ff) {
221 case NVME_SC_SUCCESS:
223 case NVME_SC_CAP_EXCEEDED:
230 static inline bool nvme_req_needs_retry(struct request *req, u16 status)
232 return !(status & NVME_SC_DNR || blk_noretry_request(req)) &&
233 (jiffies - req->start_time) < req->timeout;
236 int nvme_disable_ctrl(struct nvme_ctrl *ctrl, u64 cap);
237 int nvme_enable_ctrl(struct nvme_ctrl *ctrl, u64 cap);
238 int nvme_shutdown_ctrl(struct nvme_ctrl *ctrl);
239 int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
240 const struct nvme_ctrl_ops *ops, unsigned long quirks);
241 void nvme_uninit_ctrl(struct nvme_ctrl *ctrl);
242 void nvme_put_ctrl(struct nvme_ctrl *ctrl);
243 int nvme_init_identify(struct nvme_ctrl *ctrl);
245 void nvme_scan_namespaces(struct nvme_ctrl *ctrl);
246 void nvme_remove_namespaces(struct nvme_ctrl *ctrl);
248 void nvme_stop_queues(struct nvme_ctrl *ctrl);
249 void nvme_start_queues(struct nvme_ctrl *ctrl);
250 void nvme_kill_queues(struct nvme_ctrl *ctrl);
252 struct request *nvme_alloc_request(struct request_queue *q,
253 struct nvme_command *cmd, unsigned int flags);
254 void nvme_requeue_req(struct request *req);
255 int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
256 void *buf, unsigned bufflen);
257 int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
258 void *buffer, unsigned bufflen, u32 *result, unsigned timeout);
259 int nvme_submit_user_cmd(struct request_queue *q, struct nvme_command *cmd,
260 void __user *ubuffer, unsigned bufflen, u32 *result,
262 int __nvme_submit_user_cmd(struct request_queue *q, struct nvme_command *cmd,
263 void __user *ubuffer, unsigned bufflen,
264 void __user *meta_buffer, unsigned meta_len, u32 meta_seed,
265 u32 *result, unsigned timeout);
266 int nvme_identify_ctrl(struct nvme_ctrl *dev, struct nvme_id_ctrl **id);
267 int nvme_identify_ns(struct nvme_ctrl *dev, unsigned nsid,
268 struct nvme_id_ns **id);
269 int nvme_get_log_page(struct nvme_ctrl *dev, struct nvme_smart_log **log);
270 int nvme_get_features(struct nvme_ctrl *dev, unsigned fid, unsigned nsid,
271 dma_addr_t dma_addr, u32 *result);
272 int nvme_set_features(struct nvme_ctrl *dev, unsigned fid, unsigned dword11,
273 dma_addr_t dma_addr, u32 *result);
274 int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count);
276 extern spinlock_t dev_list_lock;
280 int nvme_sg_io(struct nvme_ns *ns, struct sg_io_hdr __user *u_hdr);
281 int nvme_sg_io32(struct nvme_ns *ns, unsigned long arg);
282 int nvme_sg_get_version_num(int __user *ip);
285 int nvme_nvm_ns_supported(struct nvme_ns *ns, struct nvme_id_ns *id);
286 int nvme_nvm_register(struct request_queue *q, char *disk_name);
287 void nvme_nvm_unregister(struct request_queue *q, char *disk_name);
289 static inline int nvme_nvm_register(struct request_queue *q, char *disk_name)
294 static inline void nvme_nvm_unregister(struct request_queue *q, char *disk_name) {};
296 static inline int nvme_nvm_ns_supported(struct nvme_ns *ns, struct nvme_id_ns *id)
300 #endif /* CONFIG_NVM */
302 int __init nvme_core_init(void);
303 void nvme_core_exit(void);