nvme-pci: fix NULL pointer reference in nvme_alloc_ns
[linux-2.6-block.git] / drivers / nvme / host / nvme.h
CommitLineData
f11bb3e2
CH
1/*
2 * Copyright (c) 2011-2014, Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 */
13
14#ifndef _NVME_H
15#define _NVME_H
16
17#include <linux/nvme.h>
a6a5149b 18#include <linux/cdev.h>
f11bb3e2
CH
19#include <linux/pci.h>
20#include <linux/kref.h>
21#include <linux/blk-mq.h>
b0b4e09c 22#include <linux/lightnvm.h>
a98e58e5 23#include <linux/sed-opal.h>
f11bb3e2 24
8ae4e447 25extern unsigned int nvme_io_timeout;
f11bb3e2
CH
26#define NVME_IO_TIMEOUT (nvme_io_timeout * HZ)
27
8ae4e447 28extern unsigned int admin_timeout;
21d34711
CH
29#define ADMIN_TIMEOUT (admin_timeout * HZ)
30
038bd4cb
SG
31#define NVME_DEFAULT_KATO 5
32#define NVME_KATO_GRACE 10
33
9a6327d2
SG
34extern struct workqueue_struct *nvme_wq;
35
ca064085
MB
36enum {
37 NVME_NS_LBA = 0,
38 NVME_NS_LIGHTNVM = 1,
39};
40
f11bb3e2 41/*
106198ed
CH
42 * List of workarounds for devices that required behavior not specified in
43 * the standard.
f11bb3e2 44 */
106198ed
CH
45enum nvme_quirks {
46 /*
47 * Prefers I/O aligned to a stripe size specified in a vendor
48 * specific Identify field.
49 */
50 NVME_QUIRK_STRIPE_SIZE = (1 << 0),
540c801c
KB
51
52 /*
53 * The controller doesn't handle Identify value others than 0 or 1
54 * correctly.
55 */
56 NVME_QUIRK_IDENTIFY_CNS = (1 << 1),
08095e70
KB
57
58 /*
e850fd16
CH
59 * The controller deterministically returns O's on reads to
60 * logical blocks that deallocate was called on.
08095e70 61 */
e850fd16 62 NVME_QUIRK_DEALLOCATE_ZEROES = (1 << 2),
54adc010
GP
63
64 /*
65 * The controller needs a delay before starts checking the device
66 * readiness, which is done by reading the NVME_CSTS_RDY bit.
67 */
68 NVME_QUIRK_DELAY_BEFORE_CHK_RDY = (1 << 3),
c5552fde
AL
69
70 /*
71 * APST should not be used.
72 */
73 NVME_QUIRK_NO_APST = (1 << 4),
ff5350a8
AL
74
75 /*
76 * The deepest sleep state should not be used.
77 */
78 NVME_QUIRK_NO_DEEPEST_PS = (1 << 5),
608cc4b1
CH
79
80 /*
81 * Supports the LighNVM command set if indicated in vs[1].
82 */
83 NVME_QUIRK_LIGHTNVM = (1 << 6),
106198ed
CH
84};
85
d49187e9
CH
86/*
87 * Common request structure for NVMe passthrough. All drivers must have
88 * this structure as the first member of their request-private data.
89 */
90struct nvme_request {
91 struct nvme_command *cmd;
92 union nvme_result result;
44e44b29 93 u8 retries;
27fa9bc5
CH
94 u8 flags;
95 u16 status;
96};
97
32acab31
CH
98/*
99 * Mark a bio as coming in through the mpath node.
100 */
101#define REQ_NVME_MPATH REQ_DRV
102
27fa9bc5
CH
103enum {
104 NVME_REQ_CANCELLED = (1 << 0),
d49187e9
CH
105};
106
107static inline struct nvme_request *nvme_req(struct request *req)
108{
109 return blk_mq_rq_to_pdu(req);
110}
111
54adc010
GP
112/* The below value is the specific amount of delay needed before checking
113 * readiness in case of the PCI_DEVICE(0x1c58, 0x0003), which needs the
114 * NVME_QUIRK_DELAY_BEFORE_CHK_RDY quirk enabled. The value (in ms) was
115 * found empirically.
116 */
8c97eecc 117#define NVME_QUIRK_DELAY_AMOUNT 2300
54adc010 118
bb8d261e
CH
119enum nvme_ctrl_state {
120 NVME_CTRL_NEW,
121 NVME_CTRL_LIVE,
2b1b7e78 122 NVME_CTRL_ADMIN_ONLY, /* Only admin queue live */
bb8d261e 123 NVME_CTRL_RESETTING,
def61eca 124 NVME_CTRL_RECONNECTING,
bb8d261e 125 NVME_CTRL_DELETING,
0ff9d4e1 126 NVME_CTRL_DEAD,
bb8d261e
CH
127};
128
1c63dc66 129struct nvme_ctrl {
bb8d261e 130 enum nvme_ctrl_state state;
bd4da3ab 131 bool identified;
bb8d261e 132 spinlock_t lock;
1c63dc66 133 const struct nvme_ctrl_ops *ops;
f11bb3e2 134 struct request_queue *admin_q;
07bfcd09 135 struct request_queue *connect_q;
f11bb3e2 136 struct device *dev;
f11bb3e2 137 int instance;
5bae7f73 138 struct blk_mq_tag_set *tagset;
34b6c231 139 struct blk_mq_tag_set *admin_tagset;
f11bb3e2 140 struct list_head namespaces;
69d3b8ac 141 struct mutex namespaces_mutex;
d22524a4 142 struct device ctrl_device;
5bae7f73 143 struct device *device; /* char device */
a6a5149b 144 struct cdev cdev;
d86c4d8e 145 struct work_struct reset_work;
c5017e85 146 struct work_struct delete_work;
1c63dc66 147
ab9e00cc
CH
148 struct nvme_subsystem *subsys;
149 struct list_head subsys_entry;
150
4f1244c8 151 struct opal_dev *opal_dev;
a98e58e5 152
f11bb3e2 153 char name[12];
76e3914a 154 u16 cntlid;
5fd4ce1b
CH
155
156 u32 ctrl_config;
b6dccf7f 157 u16 mtfa;
d858e5f0 158 u32 queue_count;
5fd4ce1b 159
20d0dfe6 160 u64 cap;
5fd4ce1b 161 u32 page_size;
f11bb3e2 162 u32 max_hw_sectors;
f11bb3e2 163 u16 oncs;
8a9ae523 164 u16 oacs;
f5d11840
JA
165 u16 nssa;
166 u16 nr_streams;
6bf25d16 167 atomic_t abort_limit;
f11bb3e2 168 u8 vwc;
f3ca80fc 169 u32 vs;
07bfcd09 170 u32 sgls;
038bd4cb 171 u16 kas;
c5552fde
AL
172 u8 npss;
173 u8 apsta;
e3d7874d 174 u32 aen_result;
07fbd32a 175 unsigned int shutdown_timeout;
038bd4cb 176 unsigned int kato;
f3ca80fc 177 bool subsystem;
106198ed 178 unsigned long quirks;
c5552fde 179 struct nvme_id_power_state psd[32];
84fef62d 180 struct nvme_effects_log *effects;
5955be21 181 struct work_struct scan_work;
f866fc42 182 struct work_struct async_event_work;
038bd4cb 183 struct delayed_work ka_work;
b6dccf7f 184 struct work_struct fw_act_work;
07bfcd09 185
c5552fde
AL
186 /* Power saving configuration */
187 u64 ps_max_latency_us;
76a5af84 188 bool apst_enabled;
c5552fde 189
044a9df1 190 /* PCIe only: */
fe6d53c9
CH
191 u32 hmpre;
192 u32 hmmin;
044a9df1
CH
193 u32 hmminds;
194 u16 hmmaxd;
fe6d53c9 195
07bfcd09
CH
196 /* Fabrics only */
197 u16 sqsize;
198 u32 ioccsz;
199 u32 iorcsz;
200 u16 icdoff;
201 u16 maxcmd;
fdf9dfa8 202 int nr_reconnects;
07bfcd09 203 struct nvmf_ctrl_options *opts;
f11bb3e2
CH
204};
205
ab9e00cc
CH
206struct nvme_subsystem {
207 int instance;
208 struct device dev;
209 /*
210 * Because we unregister the device on the last put we need
211 * a separate refcount.
212 */
213 struct kref ref;
214 struct list_head entry;
215 struct mutex lock;
216 struct list_head ctrls;
ed754e5d 217 struct list_head nsheads;
ab9e00cc
CH
218 char subnqn[NVMF_NQN_SIZE];
219 char serial[20];
220 char model[40];
221 char firmware_rev[8];
222 u8 cmic;
223 u16 vendor_id;
ed754e5d 224 struct ida ns_ida;
ab9e00cc
CH
225};
226
002fab04
CH
227/*
228 * Container structure for uniqueue namespace identifiers.
229 */
230struct nvme_ns_ids {
231 u8 eui64[8];
232 u8 nguid[16];
233 uuid_t uuid;
234};
235
ed754e5d
CH
236/*
237 * Anchor structure for namespaces. There is one for each namespace in a
238 * NVMe subsystem that any of our controllers can see, and the namespace
239 * structure for each controller is chained of it. For private namespaces
240 * there is a 1:1 relation to our namespace structures, that is ->list
241 * only ever has a single entry for private namespaces.
242 */
243struct nvme_ns_head {
32acab31
CH
244#ifdef CONFIG_NVME_MULTIPATH
245 struct gendisk *disk;
246 struct nvme_ns __rcu *current_path;
247 struct bio_list requeue_list;
248 spinlock_t requeue_lock;
249 struct work_struct requeue_work;
250#endif
ed754e5d
CH
251 struct list_head list;
252 struct srcu_struct srcu;
253 struct nvme_subsystem *subsys;
254 unsigned ns_id;
255 struct nvme_ns_ids ids;
256 struct list_head entry;
257 struct kref ref;
258 int instance;
259};
260
f11bb3e2
CH
261struct nvme_ns {
262 struct list_head list;
263
1c63dc66 264 struct nvme_ctrl *ctrl;
f11bb3e2
CH
265 struct request_queue *queue;
266 struct gendisk *disk;
ed754e5d 267 struct list_head siblings;
b0b4e09c 268 struct nvm_dev *ndev;
f11bb3e2 269 struct kref kref;
ed754e5d 270 struct nvme_ns_head *head;
f11bb3e2 271
f11bb3e2
CH
272 int lba_shift;
273 u16 ms;
f5d11840
JA
274 u16 sgs;
275 u32 sws;
f11bb3e2
CH
276 bool ext;
277 u8 pi_type;
646017a6 278 unsigned long flags;
646017a6 279#define NVME_NS_REMOVING 0
69d9a99c 280#define NVME_NS_DEAD 1
57eeaf8e 281 u16 noiob;
f11bb3e2
CH
282};
283
1c63dc66 284struct nvme_ctrl_ops {
1a353d85 285 const char *name;
e439bb12 286 struct module *module;
d3d5b87d
CH
287 unsigned int flags;
288#define NVME_F_FABRICS (1 << 0)
c81bfba9 289#define NVME_F_METADATA_SUPPORTED (1 << 1)
1c63dc66 290 int (*reg_read32)(struct nvme_ctrl *ctrl, u32 off, u32 *val);
5fd4ce1b 291 int (*reg_write32)(struct nvme_ctrl *ctrl, u32 off, u32 val);
7fd8930f 292 int (*reg_read64)(struct nvme_ctrl *ctrl, u32 off, u64 *val);
1673f1f0 293 void (*free_ctrl)(struct nvme_ctrl *ctrl);
ad22c355 294 void (*submit_async_event)(struct nvme_ctrl *ctrl);
c5017e85 295 void (*delete_ctrl)(struct nvme_ctrl *ctrl);
1a353d85 296 int (*get_address)(struct nvme_ctrl *ctrl, char *buf, int size);
31b84460 297 int (*reinit_request)(void *data, struct request *rq);
f11bb3e2
CH
298};
299
1c63dc66
CH
300static inline bool nvme_ctrl_ready(struct nvme_ctrl *ctrl)
301{
302 u32 val = 0;
303
304 if (ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &val))
305 return false;
306 return val & NVME_CSTS_RDY;
307}
308
f3ca80fc
CH
309static inline int nvme_reset_subsystem(struct nvme_ctrl *ctrl)
310{
311 if (!ctrl->subsystem)
312 return -ENOTTY;
313 return ctrl->ops->reg_write32(ctrl, NVME_REG_NSSR, 0x4E564D65);
314}
315
f11bb3e2
CH
316static inline u64 nvme_block_nr(struct nvme_ns *ns, sector_t sector)
317{
318 return (sector >> (ns->lba_shift - 9));
319}
320
6904242d
ML
321static inline void nvme_cleanup_cmd(struct request *req)
322{
f9d03f96
CH
323 if (req->rq_flags & RQF_SPECIAL_PAYLOAD) {
324 kfree(page_address(req->special_vec.bv_page) +
325 req->special_vec.bv_offset);
326 }
6904242d
ML
327}
328
27fa9bc5
CH
329static inline void nvme_end_request(struct request *req, __le16 status,
330 union nvme_result result)
15a190f7 331{
27fa9bc5 332 struct nvme_request *rq = nvme_req(req);
15a190f7 333
27fa9bc5
CH
334 rq->status = le16_to_cpu(status) >> 1;
335 rq->result = result;
08e0029a 336 blk_mq_complete_request(req);
7688faa6
CH
337}
338
d22524a4
CH
339static inline void nvme_get_ctrl(struct nvme_ctrl *ctrl)
340{
341 get_device(ctrl->device);
342}
343
344static inline void nvme_put_ctrl(struct nvme_ctrl *ctrl)
345{
346 put_device(ctrl->device);
347}
348
77f02a7a 349void nvme_complete_rq(struct request *req);
c55a2fd4 350void nvme_cancel_request(struct request *req, void *data, bool reserved);
bb8d261e
CH
351bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
352 enum nvme_ctrl_state new_state);
5fd4ce1b
CH
353int nvme_disable_ctrl(struct nvme_ctrl *ctrl, u64 cap);
354int nvme_enable_ctrl(struct nvme_ctrl *ctrl, u64 cap);
355int nvme_shutdown_ctrl(struct nvme_ctrl *ctrl);
f3ca80fc
CH
356int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
357 const struct nvme_ctrl_ops *ops, unsigned long quirks);
53029b04 358void nvme_uninit_ctrl(struct nvme_ctrl *ctrl);
d09f2b45
SG
359void nvme_start_ctrl(struct nvme_ctrl *ctrl);
360void nvme_stop_ctrl(struct nvme_ctrl *ctrl);
1673f1f0 361void nvme_put_ctrl(struct nvme_ctrl *ctrl);
7fd8930f 362int nvme_init_identify(struct nvme_ctrl *ctrl);
5bae7f73 363
5955be21 364void nvme_queue_scan(struct nvme_ctrl *ctrl);
5bae7f73 365void nvme_remove_namespaces(struct nvme_ctrl *ctrl);
1673f1f0 366
4f1244c8
CH
367int nvme_sec_submit(void *data, u16 spsp, u8 secp, void *buffer, size_t len,
368 bool send);
a98e58e5 369
7bf58533
CH
370void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status,
371 union nvme_result *res);
f866fc42 372
25646264
KB
373void nvme_stop_queues(struct nvme_ctrl *ctrl);
374void nvme_start_queues(struct nvme_ctrl *ctrl);
69d9a99c 375void nvme_kill_queues(struct nvme_ctrl *ctrl);
302ad8cc
KB
376void nvme_unfreeze(struct nvme_ctrl *ctrl);
377void nvme_wait_freeze(struct nvme_ctrl *ctrl);
378void nvme_wait_freeze_timeout(struct nvme_ctrl *ctrl, long timeout);
379void nvme_start_freeze(struct nvme_ctrl *ctrl);
31b84460 380int nvme_reinit_tagset(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set);
363c9aac 381
eb71f435 382#define NVME_QID_ANY -1
4160982e 383struct request *nvme_alloc_request(struct request_queue *q,
9a95e4ef 384 struct nvme_command *cmd, blk_mq_req_flags_t flags, int qid);
fc17b653 385blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req,
8093f7ca 386 struct nvme_command *cmd);
f11bb3e2
CH
387int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
388 void *buf, unsigned bufflen);
389int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
d49187e9 390 union nvme_result *result, void *buffer, unsigned bufflen,
9a95e4ef
BVA
391 unsigned timeout, int qid, int at_head,
392 blk_mq_req_flags_t flags);
9a0be7ab 393int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count);
038bd4cb
SG
394void nvme_start_keep_alive(struct nvme_ctrl *ctrl);
395void nvme_stop_keep_alive(struct nvme_ctrl *ctrl);
d86c4d8e 396int nvme_reset_ctrl(struct nvme_ctrl *ctrl);
c5017e85
CH
397int nvme_delete_ctrl(struct nvme_ctrl *ctrl);
398int nvme_delete_ctrl_sync(struct nvme_ctrl *ctrl);
f11bb3e2 399
5b85b826 400extern const struct attribute_group nvme_ns_id_attr_group;
32acab31
CH
401extern const struct block_device_operations nvme_ns_head_ops;
402
403#ifdef CONFIG_NVME_MULTIPATH
404void nvme_failover_req(struct request *req);
405bool nvme_req_needs_failover(struct request *req);
406void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl);
407int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl,struct nvme_ns_head *head);
408void nvme_mpath_add_disk(struct nvme_ns_head *head);
e9a48034 409void nvme_mpath_add_disk_links(struct nvme_ns *ns);
32acab31 410void nvme_mpath_remove_disk(struct nvme_ns_head *head);
e9a48034 411void nvme_mpath_remove_disk_links(struct nvme_ns *ns);
32acab31
CH
412
413static inline void nvme_mpath_clear_current_path(struct nvme_ns *ns)
414{
415 struct nvme_ns_head *head = ns->head;
416
417 if (head && ns == srcu_dereference(head->current_path, &head->srcu))
418 rcu_assign_pointer(head->current_path, NULL);
419}
420struct nvme_ns *nvme_find_path(struct nvme_ns_head *head);
421#else
422static inline void nvme_failover_req(struct request *req)
423{
424}
425static inline bool nvme_req_needs_failover(struct request *req)
426{
427 return false;
428}
429static inline void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl)
430{
431}
432static inline int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl,
433 struct nvme_ns_head *head)
434{
435 return 0;
436}
437static inline void nvme_mpath_add_disk(struct nvme_ns_head *head)
438{
439}
440static inline void nvme_mpath_remove_disk(struct nvme_ns_head *head)
441{
442}
e9a48034
HR
443static inline void nvme_mpath_add_disk_links(struct nvme_ns *ns)
444{
445}
446static inline void nvme_mpath_remove_disk_links(struct nvme_ns *ns)
447{
448}
32acab31
CH
449static inline void nvme_mpath_clear_current_path(struct nvme_ns *ns)
450{
451}
452#endif /* CONFIG_NVME_MULTIPATH */
453
c4699e70 454#ifdef CONFIG_NVM
3dc87dd0 455int nvme_nvm_register(struct nvme_ns *ns, char *disk_name, int node);
b0b4e09c 456void nvme_nvm_unregister(struct nvme_ns *ns);
3dc87dd0
MB
457int nvme_nvm_register_sysfs(struct nvme_ns *ns);
458void nvme_nvm_unregister_sysfs(struct nvme_ns *ns);
84d4add7 459int nvme_nvm_ioctl(struct nvme_ns *ns, unsigned int cmd, unsigned long arg);
c4699e70 460#else
b0b4e09c 461static inline int nvme_nvm_register(struct nvme_ns *ns, char *disk_name,
3dc87dd0 462 int node)
c4699e70
KB
463{
464 return 0;
465}
466
b0b4e09c 467static inline void nvme_nvm_unregister(struct nvme_ns *ns) {};
3dc87dd0
MB
468static inline int nvme_nvm_register_sysfs(struct nvme_ns *ns)
469{
470 return 0;
471}
472static inline void nvme_nvm_unregister_sysfs(struct nvme_ns *ns) {};
84d4add7
MB
473static inline int nvme_nvm_ioctl(struct nvme_ns *ns, unsigned int cmd,
474 unsigned long arg)
475{
476 return -ENOTTY;
477}
3dc87dd0
MB
478#endif /* CONFIG_NVM */
479
40267efd
SL
480static inline struct nvme_ns *nvme_get_ns_from_dev(struct device *dev)
481{
482 return dev_to_disk(dev)->private_data;
483}
ca064085 484
5bae7f73
CH
485int __init nvme_core_init(void);
486void nvme_core_exit(void);
487
f11bb3e2 488#endif /* _NVME_H */