2 * Copyright (c) 2011-2014, Intel Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 #include <linux/nvme.h>
18 #include <linux/pci.h>
19 #include <linux/kref.h>
20 #include <linux/blk-mq.h>
24 * Driver internal status code for commands that were cancelled due
25 * to timeouts or controller shutdown. The value is negative so
26 * that it a) doesn't overlap with the unsigned hardware error codes,
27 * and b) can easily be tested for.
29 NVME_SC_CANCELLED = -EINTR,
32 extern unsigned char nvme_io_timeout;
33 #define NVME_IO_TIMEOUT (nvme_io_timeout * HZ)
35 extern unsigned char admin_timeout;
36 #define ADMIN_TIMEOUT (admin_timeout * HZ)
38 extern unsigned char shutdown_timeout;
39 #define SHUTDOWN_TIMEOUT (shutdown_timeout * HZ)
47 * List of workarounds for devices that required behavior not specified in
52 * Prefers I/O aligned to a stripe size specified in a vendor
53 * specific Identify field.
55 NVME_QUIRK_STRIPE_SIZE = (1 << 0),
58 * The controller doesn't handle Identify value others than 0 or 1
61 NVME_QUIRK_IDENTIFY_CNS = (1 << 1),
65 const struct nvme_ctrl_ops *ops;
66 struct request_queue *admin_q;
70 struct blk_mq_tag_set *tagset;
71 struct list_head namespaces;
72 struct device *device; /* char device */
73 struct list_head node;
95 * An NVM Express namespace is equivalent to a SCSI LUN
98 struct list_head list;
100 struct nvme_ctrl *ctrl;
101 struct request_queue *queue;
102 struct gendisk *disk;
111 u64 mode_select_num_blocks;
112 u32 mode_select_block_len;
115 struct nvme_ctrl_ops {
116 int (*reg_read32)(struct nvme_ctrl *ctrl, u32 off, u32 *val);
117 int (*reg_write32)(struct nvme_ctrl *ctrl, u32 off, u32 val);
118 int (*reg_read64)(struct nvme_ctrl *ctrl, u32 off, u64 *val);
119 bool (*io_incapable)(struct nvme_ctrl *ctrl);
120 int (*reset_ctrl)(struct nvme_ctrl *ctrl);
121 void (*free_ctrl)(struct nvme_ctrl *ctrl);
124 static inline bool nvme_ctrl_ready(struct nvme_ctrl *ctrl)
128 if (ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &val))
130 return val & NVME_CSTS_RDY;
133 static inline bool nvme_io_incapable(struct nvme_ctrl *ctrl)
137 if (ctrl->ops->io_incapable(ctrl))
139 if (ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &val))
141 return val & NVME_CSTS_CFS;
144 static inline int nvme_reset_subsystem(struct nvme_ctrl *ctrl)
146 if (!ctrl->subsystem)
148 return ctrl->ops->reg_write32(ctrl, NVME_REG_NSSR, 0x4E564D65);
151 static inline u64 nvme_block_nr(struct nvme_ns *ns, sector_t sector)
153 return (sector >> (ns->lba_shift - 9));
156 static inline void nvme_setup_flush(struct nvme_ns *ns,
157 struct nvme_command *cmnd)
159 memset(cmnd, 0, sizeof(*cmnd));
160 cmnd->common.opcode = nvme_cmd_flush;
161 cmnd->common.nsid = cpu_to_le32(ns->ns_id);
164 static inline void nvme_setup_rw(struct nvme_ns *ns, struct request *req,
165 struct nvme_command *cmnd)
170 if (req->cmd_flags & REQ_FUA)
171 control |= NVME_RW_FUA;
172 if (req->cmd_flags & (REQ_FAILFAST_DEV | REQ_RAHEAD))
173 control |= NVME_RW_LR;
175 if (req->cmd_flags & REQ_RAHEAD)
176 dsmgmt |= NVME_RW_DSM_FREQ_PREFETCH;
178 memset(cmnd, 0, sizeof(*cmnd));
179 cmnd->rw.opcode = (rq_data_dir(req) ? nvme_cmd_write : nvme_cmd_read);
180 cmnd->rw.command_id = req->tag;
181 cmnd->rw.nsid = cpu_to_le32(ns->ns_id);
182 cmnd->rw.slba = cpu_to_le64(nvme_block_nr(ns, blk_rq_pos(req)));
183 cmnd->rw.length = cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1);
186 switch (ns->pi_type) {
187 case NVME_NS_DPS_PI_TYPE3:
188 control |= NVME_RW_PRINFO_PRCHK_GUARD;
190 case NVME_NS_DPS_PI_TYPE1:
191 case NVME_NS_DPS_PI_TYPE2:
192 control |= NVME_RW_PRINFO_PRCHK_GUARD |
193 NVME_RW_PRINFO_PRCHK_REF;
194 cmnd->rw.reftag = cpu_to_le32(
195 nvme_block_nr(ns, blk_rq_pos(req)));
198 if (!blk_integrity_rq(req))
199 control |= NVME_RW_PRINFO_PRACT;
202 cmnd->rw.control = cpu_to_le16(control);
203 cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt);
207 static inline int nvme_error_status(u16 status)
209 switch (status & 0x7ff) {
210 case NVME_SC_SUCCESS:
212 case NVME_SC_CAP_EXCEEDED:
219 int nvme_disable_ctrl(struct nvme_ctrl *ctrl, u64 cap);
220 int nvme_enable_ctrl(struct nvme_ctrl *ctrl, u64 cap);
221 int nvme_shutdown_ctrl(struct nvme_ctrl *ctrl);
222 int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
223 const struct nvme_ctrl_ops *ops, unsigned long quirks);
224 void nvme_uninit_ctrl(struct nvme_ctrl *ctrl);
225 void nvme_put_ctrl(struct nvme_ctrl *ctrl);
226 int nvme_init_identify(struct nvme_ctrl *ctrl);
228 void nvme_scan_namespaces(struct nvme_ctrl *ctrl);
229 void nvme_remove_namespaces(struct nvme_ctrl *ctrl);
231 struct request *nvme_alloc_request(struct request_queue *q,
232 struct nvme_command *cmd, unsigned int flags);
233 int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
234 void *buf, unsigned bufflen);
235 int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
236 void *buffer, unsigned bufflen, u32 *result, unsigned timeout);
237 int nvme_submit_user_cmd(struct request_queue *q, struct nvme_command *cmd,
238 void __user *ubuffer, unsigned bufflen, u32 *result,
240 int __nvme_submit_user_cmd(struct request_queue *q, struct nvme_command *cmd,
241 void __user *ubuffer, unsigned bufflen,
242 void __user *meta_buffer, unsigned meta_len, u32 meta_seed,
243 u32 *result, unsigned timeout);
244 int nvme_identify_ctrl(struct nvme_ctrl *dev, struct nvme_id_ctrl **id);
245 int nvme_identify_ns(struct nvme_ctrl *dev, unsigned nsid,
246 struct nvme_id_ns **id);
247 int nvme_get_log_page(struct nvme_ctrl *dev, struct nvme_smart_log **log);
248 int nvme_get_features(struct nvme_ctrl *dev, unsigned fid, unsigned nsid,
249 dma_addr_t dma_addr, u32 *result);
250 int nvme_set_features(struct nvme_ctrl *dev, unsigned fid, unsigned dword11,
251 dma_addr_t dma_addr, u32 *result);
252 int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count);
254 extern spinlock_t dev_list_lock;
258 int nvme_sg_io(struct nvme_ns *ns, struct sg_io_hdr __user *u_hdr);
259 int nvme_sg_io32(struct nvme_ns *ns, unsigned long arg);
260 int nvme_sg_get_version_num(int __user *ip);
262 int nvme_nvm_ns_supported(struct nvme_ns *ns, struct nvme_id_ns *id);
263 int nvme_nvm_register(struct request_queue *q, char *disk_name);
264 void nvme_nvm_unregister(struct request_queue *q, char *disk_name);
266 int __init nvme_core_init(void);
267 void nvme_core_exit(void);