PCI/ASPM: Add pcie_aspm_enabled()
[linux-2.6-block.git] / drivers / nvme / host / pci.c
CommitLineData
5f37396d 1// SPDX-License-Identifier: GPL-2.0
b60503ba
MW
2/*
3 * NVM Express device driver
6eb0d698 4 * Copyright (c) 2011-2014, Intel Corporation.
b60503ba
MW
5 */
6
a0a3408e 7#include <linux/aer.h>
18119775 8#include <linux/async.h>
b60503ba 9#include <linux/blkdev.h>
a4aea562 10#include <linux/blk-mq.h>
dca51e78 11#include <linux/blk-mq-pci.h>
ff5350a8 12#include <linux/dmi.h>
b60503ba
MW
13#include <linux/init.h>
14#include <linux/interrupt.h>
15#include <linux/io.h>
b60503ba
MW
16#include <linux/mm.h>
17#include <linux/module.h>
77bf25ea 18#include <linux/mutex.h>
d0877473 19#include <linux/once.h>
b60503ba 20#include <linux/pci.h>
d916b1be 21#include <linux/suspend.h>
e1e5e564 22#include <linux/t10-pi.h>
b60503ba 23#include <linux/types.h>
2f8e2c87 24#include <linux/io-64-nonatomic-lo-hi.h>
a98e58e5 25#include <linux/sed-opal.h>
0f238ff5 26#include <linux/pci-p2pdma.h>
797a796a 27
604c01d5 28#include "trace.h"
f11bb3e2
CH
29#include "nvme.h"
30
b60503ba
MW
31#define SQ_SIZE(depth) (depth * sizeof(struct nvme_command))
32#define CQ_SIZE(depth) (depth * sizeof(struct nvme_completion))
c965809c 33
a7a7cbe3 34#define SGES_PER_PAGE (PAGE_SIZE / sizeof(struct nvme_sgl_desc))
9d43cf64 35
943e942e
JA
36/*
37 * These can be higher, but we need to ensure that any command doesn't
38 * require an sg allocation that needs more than a page of data.
39 */
40#define NVME_MAX_KB_SZ 4096
41#define NVME_MAX_SEGS 127
42
58ffacb5
MW
43static int use_threaded_interrupts;
44module_param(use_threaded_interrupts, int, 0);
45
8ffaadf7 46static bool use_cmb_sqes = true;
69f4eb9f 47module_param(use_cmb_sqes, bool, 0444);
8ffaadf7
JD
48MODULE_PARM_DESC(use_cmb_sqes, "use controller's memory buffer for I/O SQes");
49
87ad72a5
CH
50static unsigned int max_host_mem_size_mb = 128;
51module_param(max_host_mem_size_mb, uint, 0444);
52MODULE_PARM_DESC(max_host_mem_size_mb,
53 "Maximum Host Memory Buffer (HMB) size per controller (in MiB)");
1fa6aead 54
a7a7cbe3
CK
55static unsigned int sgl_threshold = SZ_32K;
56module_param(sgl_threshold, uint, 0644);
57MODULE_PARM_DESC(sgl_threshold,
58 "Use SGLs when average request segment size is larger or equal to "
59 "this size. Use 0 to disable SGLs.");
60
b27c1e68 61static int io_queue_depth_set(const char *val, const struct kernel_param *kp);
62static const struct kernel_param_ops io_queue_depth_ops = {
63 .set = io_queue_depth_set,
64 .get = param_get_int,
65};
66
67static int io_queue_depth = 1024;
68module_param_cb(io_queue_depth, &io_queue_depth_ops, &io_queue_depth, 0644);
69MODULE_PARM_DESC(io_queue_depth, "set io queue depth, should >= 2");
70
3b6592f7 71static int write_queues;
483178f3 72module_param(write_queues, int, 0644);
3b6592f7
JA
73MODULE_PARM_DESC(write_queues,
74 "Number of queues to use for writes. If not set, reads and writes "
75 "will share a queue set.");
76
a232ea0e 77static int poll_queues;
483178f3 78module_param(poll_queues, int, 0644);
4b04cc6a
JA
79MODULE_PARM_DESC(poll_queues, "Number of queues to use for polled IO.");
80
1c63dc66
CH
81struct nvme_dev;
82struct nvme_queue;
b3fffdef 83
a5cdb68c 84static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown);
8fae268b 85static bool __nvme_disable_io_queues(struct nvme_dev *dev, u8 opcode);
d4b4ff8e 86
1c63dc66
CH
87/*
88 * Represents an NVM Express device. Each nvme_dev is a PCI function.
89 */
90struct nvme_dev {
147b27e4 91 struct nvme_queue *queues;
1c63dc66
CH
92 struct blk_mq_tag_set tagset;
93 struct blk_mq_tag_set admin_tagset;
94 u32 __iomem *dbs;
95 struct device *dev;
96 struct dma_pool *prp_page_pool;
97 struct dma_pool *prp_small_pool;
1c63dc66
CH
98 unsigned online_queues;
99 unsigned max_qid;
e20ba6e1 100 unsigned io_queues[HCTX_MAX_TYPES];
22b55601 101 unsigned int num_vecs;
1c63dc66
CH
102 int q_depth;
103 u32 db_stride;
1c63dc66 104 void __iomem *bar;
97f6ef64 105 unsigned long bar_mapped_size;
5c8809e6 106 struct work_struct remove_work;
77bf25ea 107 struct mutex shutdown_lock;
1c63dc66 108 bool subsystem;
1c63dc66 109 u64 cmb_size;
0f238ff5 110 bool cmb_use_sqes;
1c63dc66 111 u32 cmbsz;
202021c1 112 u32 cmbloc;
1c63dc66 113 struct nvme_ctrl ctrl;
d916b1be 114 u32 last_ps;
87ad72a5 115
943e942e
JA
116 mempool_t *iod_mempool;
117
87ad72a5 118 /* shadow doorbell buffer support: */
f9f38e33
HK
119 u32 *dbbuf_dbs;
120 dma_addr_t dbbuf_dbs_dma_addr;
121 u32 *dbbuf_eis;
122 dma_addr_t dbbuf_eis_dma_addr;
87ad72a5
CH
123
124 /* host memory buffer support: */
125 u64 host_mem_size;
126 u32 nr_host_mem_descs;
4033f35d 127 dma_addr_t host_mem_descs_dma;
87ad72a5
CH
128 struct nvme_host_mem_buf_desc *host_mem_descs;
129 void **host_mem_desc_bufs;
4d115420 130};
1fa6aead 131
b27c1e68 132static int io_queue_depth_set(const char *val, const struct kernel_param *kp)
133{
134 int n = 0, ret;
135
136 ret = kstrtoint(val, 10, &n);
137 if (ret != 0 || n < 2)
138 return -EINVAL;
139
140 return param_set_int(val, kp);
141}
142
f9f38e33
HK
143static inline unsigned int sq_idx(unsigned int qid, u32 stride)
144{
145 return qid * 2 * stride;
146}
147
148static inline unsigned int cq_idx(unsigned int qid, u32 stride)
149{
150 return (qid * 2 + 1) * stride;
151}
152
1c63dc66
CH
153static inline struct nvme_dev *to_nvme_dev(struct nvme_ctrl *ctrl)
154{
155 return container_of(ctrl, struct nvme_dev, ctrl);
156}
157
b60503ba
MW
158/*
159 * An NVM Express queue. Each device has at least two (one for admin
160 * commands and one for I/O commands).
161 */
162struct nvme_queue {
091b6092 163 struct nvme_dev *dev;
1ab0cd69 164 spinlock_t sq_lock;
b60503ba 165 struct nvme_command *sq_cmds;
3a7afd8e
CH
166 /* only used for poll queues: */
167 spinlock_t cq_poll_lock ____cacheline_aligned_in_smp;
b60503ba 168 volatile struct nvme_completion *cqes;
42483228 169 struct blk_mq_tags **tags;
b60503ba
MW
170 dma_addr_t sq_dma_addr;
171 dma_addr_t cq_dma_addr;
b60503ba
MW
172 u32 __iomem *q_db;
173 u16 q_depth;
7c349dde 174 u16 cq_vector;
b60503ba 175 u16 sq_tail;
04f3eafd 176 u16 last_sq_tail;
b60503ba 177 u16 cq_head;
68fa9dbe 178 u16 last_cq_head;
c30341dc 179 u16 qid;
e9539f47 180 u8 cq_phase;
4e224106
CH
181 unsigned long flags;
182#define NVMEQ_ENABLED 0
63223078 183#define NVMEQ_SQ_CMB 1
d1ed6aa1 184#define NVMEQ_DELETE_ERROR 2
7c349dde 185#define NVMEQ_POLLED 3
f9f38e33
HK
186 u32 *dbbuf_sq_db;
187 u32 *dbbuf_cq_db;
188 u32 *dbbuf_sq_ei;
189 u32 *dbbuf_cq_ei;
d1ed6aa1 190 struct completion delete_done;
b60503ba
MW
191};
192
71bd150c 193/*
9b048119
CH
194 * The nvme_iod describes the data in an I/O.
195 *
196 * The sg pointer contains the list of PRP/SGL chunk allocations in addition
197 * to the actual struct scatterlist.
71bd150c
CH
198 */
199struct nvme_iod {
d49187e9 200 struct nvme_request req;
f4800d6d 201 struct nvme_queue *nvmeq;
a7a7cbe3 202 bool use_sgl;
f4800d6d 203 int aborted;
71bd150c 204 int npages; /* In the PRP list. 0 means small pool in use */
71bd150c 205 int nents; /* Used in scatterlist */
71bd150c 206 dma_addr_t first_dma;
dff824b2 207 unsigned int dma_len; /* length of single DMA segment mapping */
783b94bd 208 dma_addr_t meta_dma;
f4800d6d 209 struct scatterlist *sg;
b60503ba
MW
210};
211
3b6592f7
JA
212static unsigned int max_io_queues(void)
213{
4b04cc6a 214 return num_possible_cpus() + write_queues + poll_queues;
3b6592f7
JA
215}
216
217static unsigned int max_queue_count(void)
218{
219 /* IO queues + admin queue */
220 return 1 + max_io_queues();
221}
222
f9f38e33
HK
223static inline unsigned int nvme_dbbuf_size(u32 stride)
224{
3b6592f7 225 return (max_queue_count() * 8 * stride);
f9f38e33
HK
226}
227
228static int nvme_dbbuf_dma_alloc(struct nvme_dev *dev)
229{
230 unsigned int mem_size = nvme_dbbuf_size(dev->db_stride);
231
232 if (dev->dbbuf_dbs)
233 return 0;
234
235 dev->dbbuf_dbs = dma_alloc_coherent(dev->dev, mem_size,
236 &dev->dbbuf_dbs_dma_addr,
237 GFP_KERNEL);
238 if (!dev->dbbuf_dbs)
239 return -ENOMEM;
240 dev->dbbuf_eis = dma_alloc_coherent(dev->dev, mem_size,
241 &dev->dbbuf_eis_dma_addr,
242 GFP_KERNEL);
243 if (!dev->dbbuf_eis) {
244 dma_free_coherent(dev->dev, mem_size,
245 dev->dbbuf_dbs, dev->dbbuf_dbs_dma_addr);
246 dev->dbbuf_dbs = NULL;
247 return -ENOMEM;
248 }
249
250 return 0;
251}
252
253static void nvme_dbbuf_dma_free(struct nvme_dev *dev)
254{
255 unsigned int mem_size = nvme_dbbuf_size(dev->db_stride);
256
257 if (dev->dbbuf_dbs) {
258 dma_free_coherent(dev->dev, mem_size,
259 dev->dbbuf_dbs, dev->dbbuf_dbs_dma_addr);
260 dev->dbbuf_dbs = NULL;
261 }
262 if (dev->dbbuf_eis) {
263 dma_free_coherent(dev->dev, mem_size,
264 dev->dbbuf_eis, dev->dbbuf_eis_dma_addr);
265 dev->dbbuf_eis = NULL;
266 }
267}
268
269static void nvme_dbbuf_init(struct nvme_dev *dev,
270 struct nvme_queue *nvmeq, int qid)
271{
272 if (!dev->dbbuf_dbs || !qid)
273 return;
274
275 nvmeq->dbbuf_sq_db = &dev->dbbuf_dbs[sq_idx(qid, dev->db_stride)];
276 nvmeq->dbbuf_cq_db = &dev->dbbuf_dbs[cq_idx(qid, dev->db_stride)];
277 nvmeq->dbbuf_sq_ei = &dev->dbbuf_eis[sq_idx(qid, dev->db_stride)];
278 nvmeq->dbbuf_cq_ei = &dev->dbbuf_eis[cq_idx(qid, dev->db_stride)];
279}
280
281static void nvme_dbbuf_set(struct nvme_dev *dev)
282{
283 struct nvme_command c;
284
285 if (!dev->dbbuf_dbs)
286 return;
287
288 memset(&c, 0, sizeof(c));
289 c.dbbuf.opcode = nvme_admin_dbbuf;
290 c.dbbuf.prp1 = cpu_to_le64(dev->dbbuf_dbs_dma_addr);
291 c.dbbuf.prp2 = cpu_to_le64(dev->dbbuf_eis_dma_addr);
292
293 if (nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0)) {
9bdcfb10 294 dev_warn(dev->ctrl.device, "unable to set dbbuf\n");
f9f38e33
HK
295 /* Free memory and continue on */
296 nvme_dbbuf_dma_free(dev);
297 }
298}
299
300static inline int nvme_dbbuf_need_event(u16 event_idx, u16 new_idx, u16 old)
301{
302 return (u16)(new_idx - event_idx - 1) < (u16)(new_idx - old);
303}
304
305/* Update dbbuf and return true if an MMIO is required */
306static bool nvme_dbbuf_update_and_check_event(u16 value, u32 *dbbuf_db,
307 volatile u32 *dbbuf_ei)
308{
309 if (dbbuf_db) {
310 u16 old_value;
311
312 /*
313 * Ensure that the queue is written before updating
314 * the doorbell in memory
315 */
316 wmb();
317
318 old_value = *dbbuf_db;
319 *dbbuf_db = value;
320
f1ed3df2
MW
321 /*
322 * Ensure that the doorbell is updated before reading the event
323 * index from memory. The controller needs to provide similar
324 * ordering to ensure the envent index is updated before reading
325 * the doorbell.
326 */
327 mb();
328
f9f38e33
HK
329 if (!nvme_dbbuf_need_event(*dbbuf_ei, value, old_value))
330 return false;
331 }
332
333 return true;
b60503ba
MW
334}
335
ac3dd5bd
JA
336/*
337 * Will slightly overestimate the number of pages needed. This is OK
338 * as it only leads to a small amount of wasted memory for the lifetime of
339 * the I/O.
340 */
341static int nvme_npages(unsigned size, struct nvme_dev *dev)
342{
5fd4ce1b
CH
343 unsigned nprps = DIV_ROUND_UP(size + dev->ctrl.page_size,
344 dev->ctrl.page_size);
ac3dd5bd
JA
345 return DIV_ROUND_UP(8 * nprps, PAGE_SIZE - 8);
346}
347
a7a7cbe3
CK
348/*
349 * Calculates the number of pages needed for the SGL segments. For example a 4k
350 * page can accommodate 256 SGL descriptors.
351 */
352static int nvme_pci_npages_sgl(unsigned int num_seg)
ac3dd5bd 353{
a7a7cbe3 354 return DIV_ROUND_UP(num_seg * sizeof(struct nvme_sgl_desc), PAGE_SIZE);
f4800d6d 355}
ac3dd5bd 356
a7a7cbe3
CK
357static unsigned int nvme_pci_iod_alloc_size(struct nvme_dev *dev,
358 unsigned int size, unsigned int nseg, bool use_sgl)
f4800d6d 359{
a7a7cbe3
CK
360 size_t alloc_size;
361
362 if (use_sgl)
363 alloc_size = sizeof(__le64 *) * nvme_pci_npages_sgl(nseg);
364 else
365 alloc_size = sizeof(__le64 *) * nvme_npages(size, dev);
366
367 return alloc_size + sizeof(struct scatterlist) * nseg;
f4800d6d 368}
ac3dd5bd 369
a4aea562
MB
370static int nvme_admin_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
371 unsigned int hctx_idx)
e85248e5 372{
a4aea562 373 struct nvme_dev *dev = data;
147b27e4 374 struct nvme_queue *nvmeq = &dev->queues[0];
a4aea562 375
42483228
KB
376 WARN_ON(hctx_idx != 0);
377 WARN_ON(dev->admin_tagset.tags[0] != hctx->tags);
378 WARN_ON(nvmeq->tags);
379
a4aea562 380 hctx->driver_data = nvmeq;
42483228 381 nvmeq->tags = &dev->admin_tagset.tags[0];
a4aea562 382 return 0;
e85248e5
MW
383}
384
4af0e21c
KB
385static void nvme_admin_exit_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
386{
387 struct nvme_queue *nvmeq = hctx->driver_data;
388
389 nvmeq->tags = NULL;
390}
391
a4aea562
MB
392static int nvme_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
393 unsigned int hctx_idx)
b60503ba 394{
a4aea562 395 struct nvme_dev *dev = data;
147b27e4 396 struct nvme_queue *nvmeq = &dev->queues[hctx_idx + 1];
a4aea562 397
42483228
KB
398 if (!nvmeq->tags)
399 nvmeq->tags = &dev->tagset.tags[hctx_idx];
b60503ba 400
42483228 401 WARN_ON(dev->tagset.tags[hctx_idx] != hctx->tags);
a4aea562
MB
402 hctx->driver_data = nvmeq;
403 return 0;
b60503ba
MW
404}
405
d6296d39
CH
406static int nvme_init_request(struct blk_mq_tag_set *set, struct request *req,
407 unsigned int hctx_idx, unsigned int numa_node)
b60503ba 408{
d6296d39 409 struct nvme_dev *dev = set->driver_data;
f4800d6d 410 struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
0350815a 411 int queue_idx = (set == &dev->tagset) ? hctx_idx + 1 : 0;
147b27e4 412 struct nvme_queue *nvmeq = &dev->queues[queue_idx];
a4aea562
MB
413
414 BUG_ON(!nvmeq);
f4800d6d 415 iod->nvmeq = nvmeq;
59e29ce6
SG
416
417 nvme_req(req)->ctrl = &dev->ctrl;
a4aea562
MB
418 return 0;
419}
420
3b6592f7
JA
421static int queue_irq_offset(struct nvme_dev *dev)
422{
423 /* if we have more than 1 vec, admin queue offsets us by 1 */
424 if (dev->num_vecs > 1)
425 return 1;
426
427 return 0;
428}
429
dca51e78
CH
430static int nvme_pci_map_queues(struct blk_mq_tag_set *set)
431{
432 struct nvme_dev *dev = set->driver_data;
3b6592f7
JA
433 int i, qoff, offset;
434
435 offset = queue_irq_offset(dev);
436 for (i = 0, qoff = 0; i < set->nr_maps; i++) {
437 struct blk_mq_queue_map *map = &set->map[i];
438
439 map->nr_queues = dev->io_queues[i];
440 if (!map->nr_queues) {
e20ba6e1 441 BUG_ON(i == HCTX_TYPE_DEFAULT);
7e849dd9 442 continue;
3b6592f7
JA
443 }
444
4b04cc6a
JA
445 /*
446 * The poll queue(s) doesn't have an IRQ (and hence IRQ
447 * affinity), so use the regular blk-mq cpu mapping
448 */
3b6592f7 449 map->queue_offset = qoff;
cb9e0e50 450 if (i != HCTX_TYPE_POLL && offset)
4b04cc6a
JA
451 blk_mq_pci_map_queues(map, to_pci_dev(dev->dev), offset);
452 else
453 blk_mq_map_queues(map);
3b6592f7
JA
454 qoff += map->nr_queues;
455 offset += map->nr_queues;
456 }
457
458 return 0;
dca51e78
CH
459}
460
04f3eafd
JA
461/*
462 * Write sq tail if we are asked to, or if the next command would wrap.
463 */
464static inline void nvme_write_sq_db(struct nvme_queue *nvmeq, bool write_sq)
465{
466 if (!write_sq) {
467 u16 next_tail = nvmeq->sq_tail + 1;
468
469 if (next_tail == nvmeq->q_depth)
470 next_tail = 0;
471 if (next_tail != nvmeq->last_sq_tail)
472 return;
473 }
474
475 if (nvme_dbbuf_update_and_check_event(nvmeq->sq_tail,
476 nvmeq->dbbuf_sq_db, nvmeq->dbbuf_sq_ei))
477 writel(nvmeq->sq_tail, nvmeq->q_db);
478 nvmeq->last_sq_tail = nvmeq->sq_tail;
479}
480
b60503ba 481/**
90ea5ca4 482 * nvme_submit_cmd() - Copy a command into a queue and ring the doorbell
b60503ba
MW
483 * @nvmeq: The queue to use
484 * @cmd: The command to send
04f3eafd 485 * @write_sq: whether to write to the SQ doorbell
b60503ba 486 */
04f3eafd
JA
487static void nvme_submit_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd,
488 bool write_sq)
b60503ba 489{
90ea5ca4 490 spin_lock(&nvmeq->sq_lock);
0f238ff5 491 memcpy(&nvmeq->sq_cmds[nvmeq->sq_tail], cmd, sizeof(*cmd));
90ea5ca4
CH
492 if (++nvmeq->sq_tail == nvmeq->q_depth)
493 nvmeq->sq_tail = 0;
04f3eafd
JA
494 nvme_write_sq_db(nvmeq, write_sq);
495 spin_unlock(&nvmeq->sq_lock);
496}
497
498static void nvme_commit_rqs(struct blk_mq_hw_ctx *hctx)
499{
500 struct nvme_queue *nvmeq = hctx->driver_data;
501
502 spin_lock(&nvmeq->sq_lock);
503 if (nvmeq->sq_tail != nvmeq->last_sq_tail)
504 nvme_write_sq_db(nvmeq, true);
90ea5ca4 505 spin_unlock(&nvmeq->sq_lock);
b60503ba
MW
506}
507
a7a7cbe3 508static void **nvme_pci_iod_list(struct request *req)
b60503ba 509{
f4800d6d 510 struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
a7a7cbe3 511 return (void **)(iod->sg + blk_rq_nr_phys_segments(req));
b60503ba
MW
512}
513
955b1b5a
MI
514static inline bool nvme_pci_use_sgls(struct nvme_dev *dev, struct request *req)
515{
516 struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
20469a37 517 int nseg = blk_rq_nr_phys_segments(req);
955b1b5a
MI
518 unsigned int avg_seg_size;
519
20469a37
KB
520 if (nseg == 0)
521 return false;
522
523 avg_seg_size = DIV_ROUND_UP(blk_rq_payload_bytes(req), nseg);
955b1b5a
MI
524
525 if (!(dev->ctrl.sgls & ((1 << 0) | (1 << 1))))
526 return false;
527 if (!iod->nvmeq->qid)
528 return false;
529 if (!sgl_threshold || avg_seg_size < sgl_threshold)
530 return false;
531 return true;
532}
533
7fe07d14 534static void nvme_unmap_data(struct nvme_dev *dev, struct request *req)
b60503ba 535{
f4800d6d 536 struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
7fe07d14
CH
537 enum dma_data_direction dma_dir = rq_data_dir(req) ?
538 DMA_TO_DEVICE : DMA_FROM_DEVICE;
a7a7cbe3
CK
539 const int last_prp = dev->ctrl.page_size / sizeof(__le64) - 1;
540 dma_addr_t dma_addr = iod->first_dma, next_dma_addr;
eca18b23 541 int i;
eca18b23 542
dff824b2
CH
543 if (iod->dma_len) {
544 dma_unmap_page(dev->dev, dma_addr, iod->dma_len, dma_dir);
545 return;
7fe07d14
CH
546 }
547
dff824b2
CH
548 WARN_ON_ONCE(!iod->nents);
549
550 /* P2PDMA requests do not need to be unmapped */
551 if (!is_pci_p2pdma_page(sg_page(iod->sg)))
552 dma_unmap_sg(dev->dev, iod->sg, iod->nents, rq_dma_dir(req));
553
554
eca18b23 555 if (iod->npages == 0)
a7a7cbe3
CK
556 dma_pool_free(dev->prp_small_pool, nvme_pci_iod_list(req)[0],
557 dma_addr);
558
eca18b23 559 for (i = 0; i < iod->npages; i++) {
a7a7cbe3
CK
560 void *addr = nvme_pci_iod_list(req)[i];
561
562 if (iod->use_sgl) {
563 struct nvme_sgl_desc *sg_list = addr;
564
565 next_dma_addr =
566 le64_to_cpu((sg_list[SGES_PER_PAGE - 1]).addr);
567 } else {
568 __le64 *prp_list = addr;
569
570 next_dma_addr = le64_to_cpu(prp_list[last_prp]);
571 }
572
573 dma_pool_free(dev->prp_page_pool, addr, dma_addr);
574 dma_addr = next_dma_addr;
eca18b23 575 }
ac3dd5bd 576
d43f1ccf 577 mempool_free(iod->sg, dev->iod_mempool);
b4ff9c8d
KB
578}
579
d0877473
KB
580static void nvme_print_sgl(struct scatterlist *sgl, int nents)
581{
582 int i;
583 struct scatterlist *sg;
584
585 for_each_sg(sgl, sg, nents, i) {
586 dma_addr_t phys = sg_phys(sg);
587 pr_warn("sg[%d] phys_addr:%pad offset:%d length:%d "
588 "dma_address:%pad dma_length:%d\n",
589 i, &phys, sg->offset, sg->length, &sg_dma_address(sg),
590 sg_dma_len(sg));
591 }
592}
593
a7a7cbe3
CK
594static blk_status_t nvme_pci_setup_prps(struct nvme_dev *dev,
595 struct request *req, struct nvme_rw_command *cmnd)
ff22b54f 596{
f4800d6d 597 struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
99802a7a 598 struct dma_pool *pool;
b131c61d 599 int length = blk_rq_payload_bytes(req);
eca18b23 600 struct scatterlist *sg = iod->sg;
ff22b54f
MW
601 int dma_len = sg_dma_len(sg);
602 u64 dma_addr = sg_dma_address(sg);
5fd4ce1b 603 u32 page_size = dev->ctrl.page_size;
f137e0f1 604 int offset = dma_addr & (page_size - 1);
e025344c 605 __le64 *prp_list;
a7a7cbe3 606 void **list = nvme_pci_iod_list(req);
e025344c 607 dma_addr_t prp_dma;
eca18b23 608 int nprps, i;
ff22b54f 609
1d090624 610 length -= (page_size - offset);
5228b328
JS
611 if (length <= 0) {
612 iod->first_dma = 0;
a7a7cbe3 613 goto done;
5228b328 614 }
ff22b54f 615
1d090624 616 dma_len -= (page_size - offset);
ff22b54f 617 if (dma_len) {
1d090624 618 dma_addr += (page_size - offset);
ff22b54f
MW
619 } else {
620 sg = sg_next(sg);
621 dma_addr = sg_dma_address(sg);
622 dma_len = sg_dma_len(sg);
623 }
624
1d090624 625 if (length <= page_size) {
edd10d33 626 iod->first_dma = dma_addr;
a7a7cbe3 627 goto done;
e025344c
SMM
628 }
629
1d090624 630 nprps = DIV_ROUND_UP(length, page_size);
99802a7a
MW
631 if (nprps <= (256 / 8)) {
632 pool = dev->prp_small_pool;
eca18b23 633 iod->npages = 0;
99802a7a
MW
634 } else {
635 pool = dev->prp_page_pool;
eca18b23 636 iod->npages = 1;
99802a7a
MW
637 }
638
69d2b571 639 prp_list = dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma);
b77954cb 640 if (!prp_list) {
edd10d33 641 iod->first_dma = dma_addr;
eca18b23 642 iod->npages = -1;
86eea289 643 return BLK_STS_RESOURCE;
b77954cb 644 }
eca18b23
MW
645 list[0] = prp_list;
646 iod->first_dma = prp_dma;
e025344c
SMM
647 i = 0;
648 for (;;) {
1d090624 649 if (i == page_size >> 3) {
e025344c 650 __le64 *old_prp_list = prp_list;
69d2b571 651 prp_list = dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma);
eca18b23 652 if (!prp_list)
86eea289 653 return BLK_STS_RESOURCE;
eca18b23 654 list[iod->npages++] = prp_list;
7523d834
MW
655 prp_list[0] = old_prp_list[i - 1];
656 old_prp_list[i - 1] = cpu_to_le64(prp_dma);
657 i = 1;
e025344c
SMM
658 }
659 prp_list[i++] = cpu_to_le64(dma_addr);
1d090624
KB
660 dma_len -= page_size;
661 dma_addr += page_size;
662 length -= page_size;
e025344c
SMM
663 if (length <= 0)
664 break;
665 if (dma_len > 0)
666 continue;
86eea289
KB
667 if (unlikely(dma_len < 0))
668 goto bad_sgl;
e025344c
SMM
669 sg = sg_next(sg);
670 dma_addr = sg_dma_address(sg);
671 dma_len = sg_dma_len(sg);
ff22b54f
MW
672 }
673
a7a7cbe3
CK
674done:
675 cmnd->dptr.prp1 = cpu_to_le64(sg_dma_address(iod->sg));
676 cmnd->dptr.prp2 = cpu_to_le64(iod->first_dma);
677
86eea289
KB
678 return BLK_STS_OK;
679
680 bad_sgl:
d0877473
KB
681 WARN(DO_ONCE(nvme_print_sgl, iod->sg, iod->nents),
682 "Invalid SGL for payload:%d nents:%d\n",
683 blk_rq_payload_bytes(req), iod->nents);
86eea289 684 return BLK_STS_IOERR;
ff22b54f
MW
685}
686
a7a7cbe3
CK
687static void nvme_pci_sgl_set_data(struct nvme_sgl_desc *sge,
688 struct scatterlist *sg)
689{
690 sge->addr = cpu_to_le64(sg_dma_address(sg));
691 sge->length = cpu_to_le32(sg_dma_len(sg));
692 sge->type = NVME_SGL_FMT_DATA_DESC << 4;
693}
694
695static void nvme_pci_sgl_set_seg(struct nvme_sgl_desc *sge,
696 dma_addr_t dma_addr, int entries)
697{
698 sge->addr = cpu_to_le64(dma_addr);
699 if (entries < SGES_PER_PAGE) {
700 sge->length = cpu_to_le32(entries * sizeof(*sge));
701 sge->type = NVME_SGL_FMT_LAST_SEG_DESC << 4;
702 } else {
703 sge->length = cpu_to_le32(PAGE_SIZE);
704 sge->type = NVME_SGL_FMT_SEG_DESC << 4;
705 }
706}
707
708static blk_status_t nvme_pci_setup_sgls(struct nvme_dev *dev,
b0f2853b 709 struct request *req, struct nvme_rw_command *cmd, int entries)
a7a7cbe3
CK
710{
711 struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
a7a7cbe3
CK
712 struct dma_pool *pool;
713 struct nvme_sgl_desc *sg_list;
714 struct scatterlist *sg = iod->sg;
a7a7cbe3 715 dma_addr_t sgl_dma;
b0f2853b 716 int i = 0;
a7a7cbe3 717
a7a7cbe3
CK
718 /* setting the transfer type as SGL */
719 cmd->flags = NVME_CMD_SGL_METABUF;
720
b0f2853b 721 if (entries == 1) {
a7a7cbe3
CK
722 nvme_pci_sgl_set_data(&cmd->dptr.sgl, sg);
723 return BLK_STS_OK;
724 }
725
726 if (entries <= (256 / sizeof(struct nvme_sgl_desc))) {
727 pool = dev->prp_small_pool;
728 iod->npages = 0;
729 } else {
730 pool = dev->prp_page_pool;
731 iod->npages = 1;
732 }
733
734 sg_list = dma_pool_alloc(pool, GFP_ATOMIC, &sgl_dma);
735 if (!sg_list) {
736 iod->npages = -1;
737 return BLK_STS_RESOURCE;
738 }
739
740 nvme_pci_iod_list(req)[0] = sg_list;
741 iod->first_dma = sgl_dma;
742
743 nvme_pci_sgl_set_seg(&cmd->dptr.sgl, sgl_dma, entries);
744
745 do {
746 if (i == SGES_PER_PAGE) {
747 struct nvme_sgl_desc *old_sg_desc = sg_list;
748 struct nvme_sgl_desc *link = &old_sg_desc[i - 1];
749
750 sg_list = dma_pool_alloc(pool, GFP_ATOMIC, &sgl_dma);
751 if (!sg_list)
752 return BLK_STS_RESOURCE;
753
754 i = 0;
755 nvme_pci_iod_list(req)[iod->npages++] = sg_list;
756 sg_list[i++] = *link;
757 nvme_pci_sgl_set_seg(link, sgl_dma, entries);
758 }
759
760 nvme_pci_sgl_set_data(&sg_list[i++], sg);
a7a7cbe3 761 sg = sg_next(sg);
b0f2853b 762 } while (--entries > 0);
a7a7cbe3 763
a7a7cbe3
CK
764 return BLK_STS_OK;
765}
766
dff824b2
CH
767static blk_status_t nvme_setup_prp_simple(struct nvme_dev *dev,
768 struct request *req, struct nvme_rw_command *cmnd,
769 struct bio_vec *bv)
770{
771 struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
772 unsigned int first_prp_len = dev->ctrl.page_size - bv->bv_offset;
773
774 iod->first_dma = dma_map_bvec(dev->dev, bv, rq_dma_dir(req), 0);
775 if (dma_mapping_error(dev->dev, iod->first_dma))
776 return BLK_STS_RESOURCE;
777 iod->dma_len = bv->bv_len;
778
779 cmnd->dptr.prp1 = cpu_to_le64(iod->first_dma);
780 if (bv->bv_len > first_prp_len)
781 cmnd->dptr.prp2 = cpu_to_le64(iod->first_dma + first_prp_len);
782 return 0;
783}
784
29791057
CH
785static blk_status_t nvme_setup_sgl_simple(struct nvme_dev *dev,
786 struct request *req, struct nvme_rw_command *cmnd,
787 struct bio_vec *bv)
788{
789 struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
790
791 iod->first_dma = dma_map_bvec(dev->dev, bv, rq_dma_dir(req), 0);
792 if (dma_mapping_error(dev->dev, iod->first_dma))
793 return BLK_STS_RESOURCE;
794 iod->dma_len = bv->bv_len;
795
049bf372 796 cmnd->flags = NVME_CMD_SGL_METABUF;
29791057
CH
797 cmnd->dptr.sgl.addr = cpu_to_le64(iod->first_dma);
798 cmnd->dptr.sgl.length = cpu_to_le32(iod->dma_len);
799 cmnd->dptr.sgl.type = NVME_SGL_FMT_DATA_DESC << 4;
800 return 0;
801}
802
fc17b653 803static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
b131c61d 804 struct nvme_command *cmnd)
d29ec824 805{
f4800d6d 806 struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
70479b71 807 blk_status_t ret = BLK_STS_RESOURCE;
b0f2853b 808 int nr_mapped;
d29ec824 809
dff824b2
CH
810 if (blk_rq_nr_phys_segments(req) == 1) {
811 struct bio_vec bv = req_bvec(req);
812
813 if (!is_pci_p2pdma_page(bv.bv_page)) {
814 if (bv.bv_offset + bv.bv_len <= dev->ctrl.page_size * 2)
815 return nvme_setup_prp_simple(dev, req,
816 &cmnd->rw, &bv);
29791057
CH
817
818 if (iod->nvmeq->qid &&
819 dev->ctrl.sgls & ((1 << 0) | (1 << 1)))
820 return nvme_setup_sgl_simple(dev, req,
821 &cmnd->rw, &bv);
dff824b2
CH
822 }
823 }
824
825 iod->dma_len = 0;
d43f1ccf
CH
826 iod->sg = mempool_alloc(dev->iod_mempool, GFP_ATOMIC);
827 if (!iod->sg)
828 return BLK_STS_RESOURCE;
f9d03f96 829 sg_init_table(iod->sg, blk_rq_nr_phys_segments(req));
70479b71 830 iod->nents = blk_rq_map_sg(req->q, req, iod->sg);
ba1ca37e
CH
831 if (!iod->nents)
832 goto out;
d29ec824 833
e0596ab2
LG
834 if (is_pci_p2pdma_page(sg_page(iod->sg)))
835 nr_mapped = pci_p2pdma_map_sg(dev->dev, iod->sg, iod->nents,
70479b71 836 rq_dma_dir(req));
e0596ab2
LG
837 else
838 nr_mapped = dma_map_sg_attrs(dev->dev, iod->sg, iod->nents,
70479b71 839 rq_dma_dir(req), DMA_ATTR_NO_WARN);
b0f2853b 840 if (!nr_mapped)
ba1ca37e 841 goto out;
d29ec824 842
70479b71 843 iod->use_sgl = nvme_pci_use_sgls(dev, req);
955b1b5a 844 if (iod->use_sgl)
b0f2853b 845 ret = nvme_pci_setup_sgls(dev, req, &cmnd->rw, nr_mapped);
a7a7cbe3
CK
846 else
847 ret = nvme_pci_setup_prps(dev, req, &cmnd->rw);
4aedb705 848out:
86eea289 849 if (ret != BLK_STS_OK)
4aedb705
CH
850 nvme_unmap_data(dev, req);
851 return ret;
852}
3045c0d0 853
4aedb705
CH
854static blk_status_t nvme_map_metadata(struct nvme_dev *dev, struct request *req,
855 struct nvme_command *cmnd)
856{
857 struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
00df5cb4 858
4aedb705
CH
859 iod->meta_dma = dma_map_bvec(dev->dev, rq_integrity_vec(req),
860 rq_dma_dir(req), 0);
861 if (dma_mapping_error(dev->dev, iod->meta_dma))
862 return BLK_STS_IOERR;
863 cmnd->rw.metadata = cpu_to_le64(iod->meta_dma);
864 return 0;
00df5cb4
MW
865}
866
d29ec824
CH
867/*
868 * NOTE: ns is NULL when called on the admin queue.
869 */
fc17b653 870static blk_status_t nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
a4aea562 871 const struct blk_mq_queue_data *bd)
edd10d33 872{
a4aea562
MB
873 struct nvme_ns *ns = hctx->queue->queuedata;
874 struct nvme_queue *nvmeq = hctx->driver_data;
d29ec824 875 struct nvme_dev *dev = nvmeq->dev;
a4aea562 876 struct request *req = bd->rq;
9b048119 877 struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
ba1ca37e 878 struct nvme_command cmnd;
ebe6d874 879 blk_status_t ret;
e1e5e564 880
9b048119
CH
881 iod->aborted = 0;
882 iod->npages = -1;
883 iod->nents = 0;
884
d1f06f4a
JA
885 /*
886 * We should not need to do this, but we're still using this to
887 * ensure we can drain requests on a dying queue.
888 */
4e224106 889 if (unlikely(!test_bit(NVMEQ_ENABLED, &nvmeq->flags)))
d1f06f4a
JA
890 return BLK_STS_IOERR;
891
f9d03f96 892 ret = nvme_setup_cmd(ns, req, &cmnd);
fc17b653 893 if (ret)
f4800d6d 894 return ret;
a4aea562 895
fc17b653 896 if (blk_rq_nr_phys_segments(req)) {
b131c61d 897 ret = nvme_map_data(dev, req, &cmnd);
fc17b653 898 if (ret)
9b048119 899 goto out_free_cmd;
fc17b653 900 }
a4aea562 901
4aedb705
CH
902 if (blk_integrity_rq(req)) {
903 ret = nvme_map_metadata(dev, req, &cmnd);
904 if (ret)
905 goto out_unmap_data;
906 }
907
aae239e1 908 blk_mq_start_request(req);
04f3eafd 909 nvme_submit_cmd(nvmeq, &cmnd, bd->last);
fc17b653 910 return BLK_STS_OK;
4aedb705
CH
911out_unmap_data:
912 nvme_unmap_data(dev, req);
f9d03f96
CH
913out_free_cmd:
914 nvme_cleanup_cmd(req);
ba1ca37e 915 return ret;
b60503ba 916}
e1e5e564 917
77f02a7a 918static void nvme_pci_complete_rq(struct request *req)
eee417b0 919{
f4800d6d 920 struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
4aedb705 921 struct nvme_dev *dev = iod->nvmeq->dev;
a4aea562 922
915f04c9 923 nvme_cleanup_cmd(req);
4aedb705
CH
924 if (blk_integrity_rq(req))
925 dma_unmap_page(dev->dev, iod->meta_dma,
926 rq_integrity_vec(req)->bv_len, rq_data_dir(req));
b15c592d 927 if (blk_rq_nr_phys_segments(req))
4aedb705 928 nvme_unmap_data(dev, req);
77f02a7a 929 nvme_complete_rq(req);
b60503ba
MW
930}
931
d783e0bd 932/* We read the CQE phase first to check if the rest of the entry is valid */
750dde44 933static inline bool nvme_cqe_pending(struct nvme_queue *nvmeq)
d783e0bd 934{
750dde44
CH
935 return (le16_to_cpu(nvmeq->cqes[nvmeq->cq_head].status) & 1) ==
936 nvmeq->cq_phase;
d783e0bd
MR
937}
938
eb281c82 939static inline void nvme_ring_cq_doorbell(struct nvme_queue *nvmeq)
b60503ba 940{
eb281c82 941 u16 head = nvmeq->cq_head;
adf68f21 942
397c699f
KB
943 if (nvme_dbbuf_update_and_check_event(head, nvmeq->dbbuf_cq_db,
944 nvmeq->dbbuf_cq_ei))
945 writel(head, nvmeq->q_db + nvmeq->dev->db_stride);
eb281c82 946}
aae239e1 947
5cb525c8 948static inline void nvme_handle_cqe(struct nvme_queue *nvmeq, u16 idx)
83a12fb7 949{
5cb525c8 950 volatile struct nvme_completion *cqe = &nvmeq->cqes[idx];
83a12fb7 951 struct request *req;
adf68f21 952
83a12fb7
SG
953 if (unlikely(cqe->command_id >= nvmeq->q_depth)) {
954 dev_warn(nvmeq->dev->ctrl.device,
955 "invalid id %d completed on queue %d\n",
956 cqe->command_id, le16_to_cpu(cqe->sq_id));
957 return;
b60503ba
MW
958 }
959
83a12fb7
SG
960 /*
961 * AEN requests are special as they don't time out and can
962 * survive any kind of queue freeze and often don't respond to
963 * aborts. We don't even bother to allocate a struct request
964 * for them but rather special case them here.
965 */
966 if (unlikely(nvmeq->qid == 0 &&
38dabe21 967 cqe->command_id >= NVME_AQ_BLK_MQ_DEPTH)) {
83a12fb7
SG
968 nvme_complete_async_event(&nvmeq->dev->ctrl,
969 cqe->status, &cqe->result);
a0fa9647 970 return;
83a12fb7 971 }
b60503ba 972
83a12fb7 973 req = blk_mq_tag_to_rq(*nvmeq->tags, cqe->command_id);
604c01d5 974 trace_nvme_sq(req, cqe->sq_head, nvmeq->sq_tail);
83a12fb7
SG
975 nvme_end_request(req, cqe->status, cqe->result);
976}
b60503ba 977
5cb525c8 978static void nvme_complete_cqes(struct nvme_queue *nvmeq, u16 start, u16 end)
b60503ba 979{
5cb525c8
JA
980 while (start != end) {
981 nvme_handle_cqe(nvmeq, start);
982 if (++start == nvmeq->q_depth)
983 start = 0;
984 }
985}
adf68f21 986
5cb525c8
JA
987static inline void nvme_update_cq_head(struct nvme_queue *nvmeq)
988{
dcca1662 989 if (nvmeq->cq_head == nvmeq->q_depth - 1) {
5cb525c8
JA
990 nvmeq->cq_head = 0;
991 nvmeq->cq_phase = !nvmeq->cq_phase;
dcca1662
HY
992 } else {
993 nvmeq->cq_head++;
b60503ba 994 }
a0fa9647
JA
995}
996
1052b8ac
JA
997static inline int nvme_process_cq(struct nvme_queue *nvmeq, u16 *start,
998 u16 *end, unsigned int tag)
a0fa9647 999{
1052b8ac 1000 int found = 0;
b60503ba 1001
5cb525c8 1002 *start = nvmeq->cq_head;
1052b8ac
JA
1003 while (nvme_cqe_pending(nvmeq)) {
1004 if (tag == -1U || nvmeq->cqes[nvmeq->cq_head].command_id == tag)
1005 found++;
5cb525c8 1006 nvme_update_cq_head(nvmeq);
920d13a8 1007 }
5cb525c8 1008 *end = nvmeq->cq_head;
eb281c82 1009
5cb525c8 1010 if (*start != *end)
920d13a8 1011 nvme_ring_cq_doorbell(nvmeq);
5cb525c8 1012 return found;
b60503ba
MW
1013}
1014
1015static irqreturn_t nvme_irq(int irq, void *data)
58ffacb5 1016{
58ffacb5 1017 struct nvme_queue *nvmeq = data;
68fa9dbe 1018 irqreturn_t ret = IRQ_NONE;
5cb525c8
JA
1019 u16 start, end;
1020
3a7afd8e
CH
1021 /*
1022 * The rmb/wmb pair ensures we see all updates from a previous run of
1023 * the irq handler, even if that was on another CPU.
1024 */
1025 rmb();
68fa9dbe
JA
1026 if (nvmeq->cq_head != nvmeq->last_cq_head)
1027 ret = IRQ_HANDLED;
5cb525c8 1028 nvme_process_cq(nvmeq, &start, &end, -1);
68fa9dbe 1029 nvmeq->last_cq_head = nvmeq->cq_head;
3a7afd8e 1030 wmb();
5cb525c8 1031
68fa9dbe
JA
1032 if (start != end) {
1033 nvme_complete_cqes(nvmeq, start, end);
1034 return IRQ_HANDLED;
1035 }
1036
1037 return ret;
58ffacb5
MW
1038}
1039
1040static irqreturn_t nvme_irq_check(int irq, void *data)
1041{
1042 struct nvme_queue *nvmeq = data;
750dde44 1043 if (nvme_cqe_pending(nvmeq))
d783e0bd
MR
1044 return IRQ_WAKE_THREAD;
1045 return IRQ_NONE;
58ffacb5
MW
1046}
1047
0b2a8a9f
CH
1048/*
1049 * Poll for completions any queue, including those not dedicated to polling.
1050 * Can be called from any context.
1051 */
1052static int nvme_poll_irqdisable(struct nvme_queue *nvmeq, unsigned int tag)
a0fa9647 1053{
3a7afd8e 1054 struct pci_dev *pdev = to_pci_dev(nvmeq->dev->dev);
5cb525c8 1055 u16 start, end;
1052b8ac 1056 int found;
a0fa9647 1057
3a7afd8e
CH
1058 /*
1059 * For a poll queue we need to protect against the polling thread
1060 * using the CQ lock. For normal interrupt driven threads we have
1061 * to disable the interrupt to avoid racing with it.
1062 */
7c349dde 1063 if (test_bit(NVMEQ_POLLED, &nvmeq->flags)) {
3a7afd8e 1064 spin_lock(&nvmeq->cq_poll_lock);
91a509f8 1065 found = nvme_process_cq(nvmeq, &start, &end, tag);
3a7afd8e 1066 spin_unlock(&nvmeq->cq_poll_lock);
91a509f8
CH
1067 } else {
1068 disable_irq(pci_irq_vector(pdev, nvmeq->cq_vector));
1069 found = nvme_process_cq(nvmeq, &start, &end, tag);
3a7afd8e 1070 enable_irq(pci_irq_vector(pdev, nvmeq->cq_vector));
91a509f8 1071 }
442e19b7 1072
5cb525c8 1073 nvme_complete_cqes(nvmeq, start, end);
442e19b7 1074 return found;
a0fa9647
JA
1075}
1076
9743139c 1077static int nvme_poll(struct blk_mq_hw_ctx *hctx)
dabcefab
JA
1078{
1079 struct nvme_queue *nvmeq = hctx->driver_data;
1080 u16 start, end;
1081 bool found;
1082
1083 if (!nvme_cqe_pending(nvmeq))
1084 return 0;
1085
3a7afd8e 1086 spin_lock(&nvmeq->cq_poll_lock);
9743139c 1087 found = nvme_process_cq(nvmeq, &start, &end, -1);
3a7afd8e 1088 spin_unlock(&nvmeq->cq_poll_lock);
dabcefab
JA
1089
1090 nvme_complete_cqes(nvmeq, start, end);
1091 return found;
1092}
1093
ad22c355 1094static void nvme_pci_submit_async_event(struct nvme_ctrl *ctrl)
b60503ba 1095{
f866fc42 1096 struct nvme_dev *dev = to_nvme_dev(ctrl);
147b27e4 1097 struct nvme_queue *nvmeq = &dev->queues[0];
a4aea562 1098 struct nvme_command c;
b60503ba 1099
a4aea562
MB
1100 memset(&c, 0, sizeof(c));
1101 c.common.opcode = nvme_admin_async_event;
ad22c355 1102 c.common.command_id = NVME_AQ_BLK_MQ_DEPTH;
04f3eafd 1103 nvme_submit_cmd(nvmeq, &c, true);
f705f837
CH
1104}
1105
b60503ba 1106static int adapter_delete_queue(struct nvme_dev *dev, u8 opcode, u16 id)
f705f837 1107{
b60503ba
MW
1108 struct nvme_command c;
1109
1110 memset(&c, 0, sizeof(c));
1111 c.delete_queue.opcode = opcode;
1112 c.delete_queue.qid = cpu_to_le16(id);
1113
1c63dc66 1114 return nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0);
b60503ba
MW
1115}
1116
b60503ba 1117static int adapter_alloc_cq(struct nvme_dev *dev, u16 qid,
a8e3e0bb 1118 struct nvme_queue *nvmeq, s16 vector)
b60503ba 1119{
b60503ba 1120 struct nvme_command c;
4b04cc6a
JA
1121 int flags = NVME_QUEUE_PHYS_CONTIG;
1122
7c349dde 1123 if (!test_bit(NVMEQ_POLLED, &nvmeq->flags))
4b04cc6a 1124 flags |= NVME_CQ_IRQ_ENABLED;
b60503ba 1125
d29ec824 1126 /*
16772ae6 1127 * Note: we (ab)use the fact that the prp fields survive if no data
d29ec824
CH
1128 * is attached to the request.
1129 */
b60503ba
MW
1130 memset(&c, 0, sizeof(c));
1131 c.create_cq.opcode = nvme_admin_create_cq;
1132 c.create_cq.prp1 = cpu_to_le64(nvmeq->cq_dma_addr);
1133 c.create_cq.cqid = cpu_to_le16(qid);
1134 c.create_cq.qsize = cpu_to_le16(nvmeq->q_depth - 1);
1135 c.create_cq.cq_flags = cpu_to_le16(flags);
7c349dde 1136 c.create_cq.irq_vector = cpu_to_le16(vector);
b60503ba 1137
1c63dc66 1138 return nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0);
b60503ba
MW
1139}
1140
1141static int adapter_alloc_sq(struct nvme_dev *dev, u16 qid,
1142 struct nvme_queue *nvmeq)
1143{
9abd68ef 1144 struct nvme_ctrl *ctrl = &dev->ctrl;
b60503ba 1145 struct nvme_command c;
81c1cd98 1146 int flags = NVME_QUEUE_PHYS_CONTIG;
b60503ba 1147
9abd68ef
JA
1148 /*
1149 * Some drives have a bug that auto-enables WRRU if MEDIUM isn't
1150 * set. Since URGENT priority is zeroes, it makes all queues
1151 * URGENT.
1152 */
1153 if (ctrl->quirks & NVME_QUIRK_MEDIUM_PRIO_SQ)
1154 flags |= NVME_SQ_PRIO_MEDIUM;
1155
d29ec824 1156 /*
16772ae6 1157 * Note: we (ab)use the fact that the prp fields survive if no data
d29ec824
CH
1158 * is attached to the request.
1159 */
b60503ba
MW
1160 memset(&c, 0, sizeof(c));
1161 c.create_sq.opcode = nvme_admin_create_sq;
1162 c.create_sq.prp1 = cpu_to_le64(nvmeq->sq_dma_addr);
1163 c.create_sq.sqid = cpu_to_le16(qid);
1164 c.create_sq.qsize = cpu_to_le16(nvmeq->q_depth - 1);
1165 c.create_sq.sq_flags = cpu_to_le16(flags);
1166 c.create_sq.cqid = cpu_to_le16(qid);
1167
1c63dc66 1168 return nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0);
b60503ba
MW
1169}
1170
1171static int adapter_delete_cq(struct nvme_dev *dev, u16 cqid)
1172{
1173 return adapter_delete_queue(dev, nvme_admin_delete_cq, cqid);
1174}
1175
1176static int adapter_delete_sq(struct nvme_dev *dev, u16 sqid)
1177{
1178 return adapter_delete_queue(dev, nvme_admin_delete_sq, sqid);
1179}
1180
2a842aca 1181static void abort_endio(struct request *req, blk_status_t error)
bc5fc7e4 1182{
f4800d6d
CH
1183 struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
1184 struct nvme_queue *nvmeq = iod->nvmeq;
e44ac588 1185
27fa9bc5
CH
1186 dev_warn(nvmeq->dev->ctrl.device,
1187 "Abort status: 0x%x", nvme_req(req)->status);
e7a2a87d 1188 atomic_inc(&nvmeq->dev->ctrl.abort_limit);
e7a2a87d 1189 blk_mq_free_request(req);
bc5fc7e4
MW
1190}
1191
b2a0eb1a
KB
1192static bool nvme_should_reset(struct nvme_dev *dev, u32 csts)
1193{
1194
1195 /* If true, indicates loss of adapter communication, possibly by a
1196 * NVMe Subsystem reset.
1197 */
1198 bool nssro = dev->subsystem && (csts & NVME_CSTS_NSSRO);
1199
ad70062c
JW
1200 /* If there is a reset/reinit ongoing, we shouldn't reset again. */
1201 switch (dev->ctrl.state) {
1202 case NVME_CTRL_RESETTING:
ad6a0a52 1203 case NVME_CTRL_CONNECTING:
b2a0eb1a 1204 return false;
ad70062c
JW
1205 default:
1206 break;
1207 }
b2a0eb1a
KB
1208
1209 /* We shouldn't reset unless the controller is on fatal error state
1210 * _or_ if we lost the communication with it.
1211 */
1212 if (!(csts & NVME_CSTS_CFS) && !nssro)
1213 return false;
1214
b2a0eb1a
KB
1215 return true;
1216}
1217
1218static void nvme_warn_reset(struct nvme_dev *dev, u32 csts)
1219{
1220 /* Read a config register to help see what died. */
1221 u16 pci_status;
1222 int result;
1223
1224 result = pci_read_config_word(to_pci_dev(dev->dev), PCI_STATUS,
1225 &pci_status);
1226 if (result == PCIBIOS_SUCCESSFUL)
1227 dev_warn(dev->ctrl.device,
1228 "controller is down; will reset: CSTS=0x%x, PCI_STATUS=0x%hx\n",
1229 csts, pci_status);
1230 else
1231 dev_warn(dev->ctrl.device,
1232 "controller is down; will reset: CSTS=0x%x, PCI_STATUS read failed (%d)\n",
1233 csts, result);
1234}
1235
31c7c7d2 1236static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
c30341dc 1237{
f4800d6d
CH
1238 struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
1239 struct nvme_queue *nvmeq = iod->nvmeq;
c30341dc 1240 struct nvme_dev *dev = nvmeq->dev;
a4aea562 1241 struct request *abort_req;
a4aea562 1242 struct nvme_command cmd;
b2a0eb1a
KB
1243 u32 csts = readl(dev->bar + NVME_REG_CSTS);
1244
651438bb
WX
1245 /* If PCI error recovery process is happening, we cannot reset or
1246 * the recovery mechanism will surely fail.
1247 */
1248 mb();
1249 if (pci_channel_offline(to_pci_dev(dev->dev)))
1250 return BLK_EH_RESET_TIMER;
1251
b2a0eb1a
KB
1252 /*
1253 * Reset immediately if the controller is failed
1254 */
1255 if (nvme_should_reset(dev, csts)) {
1256 nvme_warn_reset(dev, csts);
1257 nvme_dev_disable(dev, false);
d86c4d8e 1258 nvme_reset_ctrl(&dev->ctrl);
db8c48e4 1259 return BLK_EH_DONE;
b2a0eb1a 1260 }
c30341dc 1261
7776db1c
KB
1262 /*
1263 * Did we miss an interrupt?
1264 */
0b2a8a9f 1265 if (nvme_poll_irqdisable(nvmeq, req->tag)) {
7776db1c
KB
1266 dev_warn(dev->ctrl.device,
1267 "I/O %d QID %d timeout, completion polled\n",
1268 req->tag, nvmeq->qid);
db8c48e4 1269 return BLK_EH_DONE;
7776db1c
KB
1270 }
1271
31c7c7d2 1272 /*
fd634f41
CH
1273 * Shutdown immediately if controller times out while starting. The
1274 * reset work will see the pci device disabled when it gets the forced
1275 * cancellation error. All outstanding requests are completed on
db8c48e4 1276 * shutdown, so we return BLK_EH_DONE.
fd634f41 1277 */
4244140d
KB
1278 switch (dev->ctrl.state) {
1279 case NVME_CTRL_CONNECTING:
2036f726
KB
1280 nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING);
1281 /* fall through */
1282 case NVME_CTRL_DELETING:
b9cac43c 1283 dev_warn_ratelimited(dev->ctrl.device,
fd634f41
CH
1284 "I/O %d QID %d timeout, disable controller\n",
1285 req->tag, nvmeq->qid);
2036f726 1286 nvme_dev_disable(dev, true);
27fa9bc5 1287 nvme_req(req)->flags |= NVME_REQ_CANCELLED;
db8c48e4 1288 return BLK_EH_DONE;
39a9dd81
KB
1289 case NVME_CTRL_RESETTING:
1290 return BLK_EH_RESET_TIMER;
4244140d
KB
1291 default:
1292 break;
c30341dc
KB
1293 }
1294
fd634f41
CH
1295 /*
1296 * Shutdown the controller immediately and schedule a reset if the
1297 * command was already aborted once before and still hasn't been
1298 * returned to the driver, or if this is the admin queue.
31c7c7d2 1299 */
f4800d6d 1300 if (!nvmeq->qid || iod->aborted) {
1b3c47c1 1301 dev_warn(dev->ctrl.device,
e1569a16
KB
1302 "I/O %d QID %d timeout, reset controller\n",
1303 req->tag, nvmeq->qid);
a5cdb68c 1304 nvme_dev_disable(dev, false);
d86c4d8e 1305 nvme_reset_ctrl(&dev->ctrl);
c30341dc 1306
27fa9bc5 1307 nvme_req(req)->flags |= NVME_REQ_CANCELLED;
db8c48e4 1308 return BLK_EH_DONE;
c30341dc 1309 }
c30341dc 1310
e7a2a87d 1311 if (atomic_dec_return(&dev->ctrl.abort_limit) < 0) {
6bf25d16 1312 atomic_inc(&dev->ctrl.abort_limit);
31c7c7d2 1313 return BLK_EH_RESET_TIMER;
6bf25d16 1314 }
7bf7d778 1315 iod->aborted = 1;
a4aea562 1316
c30341dc
KB
1317 memset(&cmd, 0, sizeof(cmd));
1318 cmd.abort.opcode = nvme_admin_abort_cmd;
a4aea562 1319 cmd.abort.cid = req->tag;
c30341dc 1320 cmd.abort.sqid = cpu_to_le16(nvmeq->qid);
c30341dc 1321
1b3c47c1
SG
1322 dev_warn(nvmeq->dev->ctrl.device,
1323 "I/O %d QID %d timeout, aborting\n",
1324 req->tag, nvmeq->qid);
e7a2a87d
CH
1325
1326 abort_req = nvme_alloc_request(dev->ctrl.admin_q, &cmd,
eb71f435 1327 BLK_MQ_REQ_NOWAIT, NVME_QID_ANY);
e7a2a87d
CH
1328 if (IS_ERR(abort_req)) {
1329 atomic_inc(&dev->ctrl.abort_limit);
1330 return BLK_EH_RESET_TIMER;
1331 }
1332
1333 abort_req->timeout = ADMIN_TIMEOUT;
1334 abort_req->end_io_data = NULL;
1335 blk_execute_rq_nowait(abort_req->q, NULL, abort_req, 0, abort_endio);
c30341dc 1336
31c7c7d2
CH
1337 /*
1338 * The aborted req will be completed on receiving the abort req.
1339 * We enable the timer again. If hit twice, it'll cause a device reset,
1340 * as the device then is in a faulty state.
1341 */
1342 return BLK_EH_RESET_TIMER;
c30341dc
KB
1343}
1344
a4aea562
MB
1345static void nvme_free_queue(struct nvme_queue *nvmeq)
1346{
88a041f4 1347 dma_free_coherent(nvmeq->dev->dev, CQ_SIZE(nvmeq->q_depth),
9e866774 1348 (void *)nvmeq->cqes, nvmeq->cq_dma_addr);
63223078
CH
1349 if (!nvmeq->sq_cmds)
1350 return;
0f238ff5 1351
63223078 1352 if (test_and_clear_bit(NVMEQ_SQ_CMB, &nvmeq->flags)) {
88a041f4 1353 pci_free_p2pmem(to_pci_dev(nvmeq->dev->dev),
63223078
CH
1354 nvmeq->sq_cmds, SQ_SIZE(nvmeq->q_depth));
1355 } else {
88a041f4 1356 dma_free_coherent(nvmeq->dev->dev, SQ_SIZE(nvmeq->q_depth),
63223078 1357 nvmeq->sq_cmds, nvmeq->sq_dma_addr);
0f238ff5 1358 }
9e866774
MW
1359}
1360
a1a5ef99 1361static void nvme_free_queues(struct nvme_dev *dev, int lowest)
22404274
KB
1362{
1363 int i;
1364
d858e5f0 1365 for (i = dev->ctrl.queue_count - 1; i >= lowest; i--) {
d858e5f0 1366 dev->ctrl.queue_count--;
147b27e4 1367 nvme_free_queue(&dev->queues[i]);
121c7ad4 1368 }
22404274
KB
1369}
1370
4d115420
KB
1371/**
1372 * nvme_suspend_queue - put queue into suspended state
40581d1a 1373 * @nvmeq: queue to suspend
4d115420
KB
1374 */
1375static int nvme_suspend_queue(struct nvme_queue *nvmeq)
b60503ba 1376{
4e224106 1377 if (!test_and_clear_bit(NVMEQ_ENABLED, &nvmeq->flags))
2b25d981 1378 return 1;
a09115b2 1379
4e224106 1380 /* ensure that nvme_queue_rq() sees NVMEQ_ENABLED cleared */
d1f06f4a 1381 mb();
a09115b2 1382
4e224106 1383 nvmeq->dev->online_queues--;
1c63dc66 1384 if (!nvmeq->qid && nvmeq->dev->ctrl.admin_q)
c81545f9 1385 blk_mq_quiesce_queue(nvmeq->dev->ctrl.admin_q);
7c349dde
KB
1386 if (!test_and_clear_bit(NVMEQ_POLLED, &nvmeq->flags))
1387 pci_free_irq(to_pci_dev(nvmeq->dev->dev), nvmeq->cq_vector, nvmeq);
4d115420
KB
1388 return 0;
1389}
b60503ba 1390
8fae268b
KB
1391static void nvme_suspend_io_queues(struct nvme_dev *dev)
1392{
1393 int i;
1394
1395 for (i = dev->ctrl.queue_count - 1; i > 0; i--)
1396 nvme_suspend_queue(&dev->queues[i]);
1397}
1398
a5cdb68c 1399static void nvme_disable_admin_queue(struct nvme_dev *dev, bool shutdown)
4d115420 1400{
147b27e4 1401 struct nvme_queue *nvmeq = &dev->queues[0];
4d115420 1402
a5cdb68c
KB
1403 if (shutdown)
1404 nvme_shutdown_ctrl(&dev->ctrl);
1405 else
20d0dfe6 1406 nvme_disable_ctrl(&dev->ctrl, dev->ctrl.cap);
07836e65 1407
0b2a8a9f 1408 nvme_poll_irqdisable(nvmeq, -1);
b60503ba
MW
1409}
1410
8ffaadf7
JD
1411static int nvme_cmb_qdepth(struct nvme_dev *dev, int nr_io_queues,
1412 int entry_size)
1413{
1414 int q_depth = dev->q_depth;
5fd4ce1b
CH
1415 unsigned q_size_aligned = roundup(q_depth * entry_size,
1416 dev->ctrl.page_size);
8ffaadf7
JD
1417
1418 if (q_size_aligned * nr_io_queues > dev->cmb_size) {
c45f5c99 1419 u64 mem_per_q = div_u64(dev->cmb_size, nr_io_queues);
5fd4ce1b 1420 mem_per_q = round_down(mem_per_q, dev->ctrl.page_size);
c45f5c99 1421 q_depth = div_u64(mem_per_q, entry_size);
8ffaadf7
JD
1422
1423 /*
1424 * Ensure the reduced q_depth is above some threshold where it
1425 * would be better to map queues in system memory with the
1426 * original depth
1427 */
1428 if (q_depth < 64)
1429 return -ENOMEM;
1430 }
1431
1432 return q_depth;
1433}
1434
1435static int nvme_alloc_sq_cmds(struct nvme_dev *dev, struct nvme_queue *nvmeq,
1436 int qid, int depth)
1437{
0f238ff5
LG
1438 struct pci_dev *pdev = to_pci_dev(dev->dev);
1439
1440 if (qid && dev->cmb_use_sqes && (dev->cmbsz & NVME_CMBSZ_SQS)) {
1441 nvmeq->sq_cmds = pci_alloc_p2pmem(pdev, SQ_SIZE(depth));
bfac8e9f
AM
1442 if (nvmeq->sq_cmds) {
1443 nvmeq->sq_dma_addr = pci_p2pmem_virt_to_bus(pdev,
1444 nvmeq->sq_cmds);
1445 if (nvmeq->sq_dma_addr) {
1446 set_bit(NVMEQ_SQ_CMB, &nvmeq->flags);
1447 return 0;
1448 }
1449
1450 pci_free_p2pmem(pdev, nvmeq->sq_cmds, SQ_SIZE(depth));
63223078 1451 }
0f238ff5 1452 }
8ffaadf7 1453
63223078
CH
1454 nvmeq->sq_cmds = dma_alloc_coherent(dev->dev, SQ_SIZE(depth),
1455 &nvmeq->sq_dma_addr, GFP_KERNEL);
815c6704
KB
1456 if (!nvmeq->sq_cmds)
1457 return -ENOMEM;
8ffaadf7
JD
1458 return 0;
1459}
1460
a6ff7262 1461static int nvme_alloc_queue(struct nvme_dev *dev, int qid, int depth)
b60503ba 1462{
147b27e4 1463 struct nvme_queue *nvmeq = &dev->queues[qid];
b60503ba 1464
62314e40
KB
1465 if (dev->ctrl.queue_count > qid)
1466 return 0;
b60503ba 1467
750afb08
LC
1468 nvmeq->cqes = dma_alloc_coherent(dev->dev, CQ_SIZE(depth),
1469 &nvmeq->cq_dma_addr, GFP_KERNEL);
b60503ba
MW
1470 if (!nvmeq->cqes)
1471 goto free_nvmeq;
b60503ba 1472
8ffaadf7 1473 if (nvme_alloc_sq_cmds(dev, nvmeq, qid, depth))
b60503ba
MW
1474 goto free_cqdma;
1475
091b6092 1476 nvmeq->dev = dev;
1ab0cd69 1477 spin_lock_init(&nvmeq->sq_lock);
3a7afd8e 1478 spin_lock_init(&nvmeq->cq_poll_lock);
b60503ba 1479 nvmeq->cq_head = 0;
82123460 1480 nvmeq->cq_phase = 1;
b80d5ccc 1481 nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride];
b60503ba 1482 nvmeq->q_depth = depth;
c30341dc 1483 nvmeq->qid = qid;
d858e5f0 1484 dev->ctrl.queue_count++;
36a7e993 1485
147b27e4 1486 return 0;
b60503ba
MW
1487
1488 free_cqdma:
e75ec752 1489 dma_free_coherent(dev->dev, CQ_SIZE(depth), (void *)nvmeq->cqes,
b60503ba
MW
1490 nvmeq->cq_dma_addr);
1491 free_nvmeq:
147b27e4 1492 return -ENOMEM;
b60503ba
MW
1493}
1494
dca51e78 1495static int queue_request_irq(struct nvme_queue *nvmeq)
3001082c 1496{
0ff199cb
CH
1497 struct pci_dev *pdev = to_pci_dev(nvmeq->dev->dev);
1498 int nr = nvmeq->dev->ctrl.instance;
1499
1500 if (use_threaded_interrupts) {
1501 return pci_request_irq(pdev, nvmeq->cq_vector, nvme_irq_check,
1502 nvme_irq, nvmeq, "nvme%dq%d", nr, nvmeq->qid);
1503 } else {
1504 return pci_request_irq(pdev, nvmeq->cq_vector, nvme_irq,
1505 NULL, nvmeq, "nvme%dq%d", nr, nvmeq->qid);
1506 }
3001082c
MW
1507}
1508
22404274 1509static void nvme_init_queue(struct nvme_queue *nvmeq, u16 qid)
b60503ba 1510{
22404274 1511 struct nvme_dev *dev = nvmeq->dev;
b60503ba 1512
22404274 1513 nvmeq->sq_tail = 0;
04f3eafd 1514 nvmeq->last_sq_tail = 0;
22404274
KB
1515 nvmeq->cq_head = 0;
1516 nvmeq->cq_phase = 1;
b80d5ccc 1517 nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride];
22404274 1518 memset((void *)nvmeq->cqes, 0, CQ_SIZE(nvmeq->q_depth));
f9f38e33 1519 nvme_dbbuf_init(dev, nvmeq, qid);
42f61420 1520 dev->online_queues++;
3a7afd8e 1521 wmb(); /* ensure the first interrupt sees the initialization */
22404274
KB
1522}
1523
4b04cc6a 1524static int nvme_create_queue(struct nvme_queue *nvmeq, int qid, bool polled)
22404274
KB
1525{
1526 struct nvme_dev *dev = nvmeq->dev;
1527 int result;
7c349dde 1528 u16 vector = 0;
3f85d50b 1529
d1ed6aa1
CH
1530 clear_bit(NVMEQ_DELETE_ERROR, &nvmeq->flags);
1531
22b55601
KB
1532 /*
1533 * A queue's vector matches the queue identifier unless the controller
1534 * has only one vector available.
1535 */
4b04cc6a
JA
1536 if (!polled)
1537 vector = dev->num_vecs == 1 ? 0 : qid;
1538 else
7c349dde 1539 set_bit(NVMEQ_POLLED, &nvmeq->flags);
4b04cc6a 1540
a8e3e0bb 1541 result = adapter_alloc_cq(dev, qid, nvmeq, vector);
ded45505
KB
1542 if (result)
1543 return result;
b60503ba
MW
1544
1545 result = adapter_alloc_sq(dev, qid, nvmeq);
1546 if (result < 0)
ded45505
KB
1547 return result;
1548 else if (result)
b60503ba
MW
1549 goto release_cq;
1550
a8e3e0bb 1551 nvmeq->cq_vector = vector;
161b8be2 1552 nvme_init_queue(nvmeq, qid);
4b04cc6a 1553
7c349dde
KB
1554 if (!polled) {
1555 nvmeq->cq_vector = vector;
4b04cc6a
JA
1556 result = queue_request_irq(nvmeq);
1557 if (result < 0)
1558 goto release_sq;
1559 }
b60503ba 1560
4e224106 1561 set_bit(NVMEQ_ENABLED, &nvmeq->flags);
22404274 1562 return result;
b60503ba 1563
a8e3e0bb 1564release_sq:
f25a2dfc 1565 dev->online_queues--;
b60503ba 1566 adapter_delete_sq(dev, qid);
a8e3e0bb 1567release_cq:
b60503ba 1568 adapter_delete_cq(dev, qid);
22404274 1569 return result;
b60503ba
MW
1570}
1571
f363b089 1572static const struct blk_mq_ops nvme_mq_admin_ops = {
d29ec824 1573 .queue_rq = nvme_queue_rq,
77f02a7a 1574 .complete = nvme_pci_complete_rq,
a4aea562 1575 .init_hctx = nvme_admin_init_hctx,
4af0e21c 1576 .exit_hctx = nvme_admin_exit_hctx,
0350815a 1577 .init_request = nvme_init_request,
a4aea562
MB
1578 .timeout = nvme_timeout,
1579};
1580
f363b089 1581static const struct blk_mq_ops nvme_mq_ops = {
376f7ef8
CH
1582 .queue_rq = nvme_queue_rq,
1583 .complete = nvme_pci_complete_rq,
1584 .commit_rqs = nvme_commit_rqs,
1585 .init_hctx = nvme_init_hctx,
1586 .init_request = nvme_init_request,
1587 .map_queues = nvme_pci_map_queues,
1588 .timeout = nvme_timeout,
1589 .poll = nvme_poll,
dabcefab
JA
1590};
1591
ea191d2f
KB
1592static void nvme_dev_remove_admin(struct nvme_dev *dev)
1593{
1c63dc66 1594 if (dev->ctrl.admin_q && !blk_queue_dying(dev->ctrl.admin_q)) {
69d9a99c
KB
1595 /*
1596 * If the controller was reset during removal, it's possible
1597 * user requests may be waiting on a stopped queue. Start the
1598 * queue to flush these to completion.
1599 */
c81545f9 1600 blk_mq_unquiesce_queue(dev->ctrl.admin_q);
1c63dc66 1601 blk_cleanup_queue(dev->ctrl.admin_q);
ea191d2f
KB
1602 blk_mq_free_tag_set(&dev->admin_tagset);
1603 }
1604}
1605
a4aea562
MB
1606static int nvme_alloc_admin_tags(struct nvme_dev *dev)
1607{
1c63dc66 1608 if (!dev->ctrl.admin_q) {
a4aea562
MB
1609 dev->admin_tagset.ops = &nvme_mq_admin_ops;
1610 dev->admin_tagset.nr_hw_queues = 1;
e3e9d50c 1611
38dabe21 1612 dev->admin_tagset.queue_depth = NVME_AQ_MQ_TAG_DEPTH;
a4aea562 1613 dev->admin_tagset.timeout = ADMIN_TIMEOUT;
e75ec752 1614 dev->admin_tagset.numa_node = dev_to_node(dev->dev);
d43f1ccf 1615 dev->admin_tagset.cmd_size = sizeof(struct nvme_iod);
d3484991 1616 dev->admin_tagset.flags = BLK_MQ_F_NO_SCHED;
a4aea562
MB
1617 dev->admin_tagset.driver_data = dev;
1618
1619 if (blk_mq_alloc_tag_set(&dev->admin_tagset))
1620 return -ENOMEM;
34b6c231 1621 dev->ctrl.admin_tagset = &dev->admin_tagset;
a4aea562 1622
1c63dc66
CH
1623 dev->ctrl.admin_q = blk_mq_init_queue(&dev->admin_tagset);
1624 if (IS_ERR(dev->ctrl.admin_q)) {
a4aea562
MB
1625 blk_mq_free_tag_set(&dev->admin_tagset);
1626 return -ENOMEM;
1627 }
1c63dc66 1628 if (!blk_get_queue(dev->ctrl.admin_q)) {
ea191d2f 1629 nvme_dev_remove_admin(dev);
1c63dc66 1630 dev->ctrl.admin_q = NULL;
ea191d2f
KB
1631 return -ENODEV;
1632 }
0fb59cbc 1633 } else
c81545f9 1634 blk_mq_unquiesce_queue(dev->ctrl.admin_q);
a4aea562
MB
1635
1636 return 0;
1637}
1638
97f6ef64
XY
1639static unsigned long db_bar_size(struct nvme_dev *dev, unsigned nr_io_queues)
1640{
1641 return NVME_REG_DBS + ((nr_io_queues + 1) * 8 * dev->db_stride);
1642}
1643
1644static int nvme_remap_bar(struct nvme_dev *dev, unsigned long size)
1645{
1646 struct pci_dev *pdev = to_pci_dev(dev->dev);
1647
1648 if (size <= dev->bar_mapped_size)
1649 return 0;
1650 if (size > pci_resource_len(pdev, 0))
1651 return -ENOMEM;
1652 if (dev->bar)
1653 iounmap(dev->bar);
1654 dev->bar = ioremap(pci_resource_start(pdev, 0), size);
1655 if (!dev->bar) {
1656 dev->bar_mapped_size = 0;
1657 return -ENOMEM;
1658 }
1659 dev->bar_mapped_size = size;
1660 dev->dbs = dev->bar + NVME_REG_DBS;
1661
1662 return 0;
1663}
1664
01ad0990 1665static int nvme_pci_configure_admin_queue(struct nvme_dev *dev)
b60503ba 1666{
ba47e386 1667 int result;
b60503ba
MW
1668 u32 aqa;
1669 struct nvme_queue *nvmeq;
1670
97f6ef64
XY
1671 result = nvme_remap_bar(dev, db_bar_size(dev, 0));
1672 if (result < 0)
1673 return result;
1674
8ef2074d 1675 dev->subsystem = readl(dev->bar + NVME_REG_VS) >= NVME_VS(1, 1, 0) ?
20d0dfe6 1676 NVME_CAP_NSSRC(dev->ctrl.cap) : 0;
dfbac8c7 1677
7a67cbea
CH
1678 if (dev->subsystem &&
1679 (readl(dev->bar + NVME_REG_CSTS) & NVME_CSTS_NSSRO))
1680 writel(NVME_CSTS_NSSRO, dev->bar + NVME_REG_CSTS);
dfbac8c7 1681
20d0dfe6 1682 result = nvme_disable_ctrl(&dev->ctrl, dev->ctrl.cap);
ba47e386
MW
1683 if (result < 0)
1684 return result;
b60503ba 1685
a6ff7262 1686 result = nvme_alloc_queue(dev, 0, NVME_AQ_DEPTH);
147b27e4
SG
1687 if (result)
1688 return result;
b60503ba 1689
147b27e4 1690 nvmeq = &dev->queues[0];
b60503ba
MW
1691 aqa = nvmeq->q_depth - 1;
1692 aqa |= aqa << 16;
1693
7a67cbea
CH
1694 writel(aqa, dev->bar + NVME_REG_AQA);
1695 lo_hi_writeq(nvmeq->sq_dma_addr, dev->bar + NVME_REG_ASQ);
1696 lo_hi_writeq(nvmeq->cq_dma_addr, dev->bar + NVME_REG_ACQ);
b60503ba 1697
20d0dfe6 1698 result = nvme_enable_ctrl(&dev->ctrl, dev->ctrl.cap);
025c557a 1699 if (result)
d4875622 1700 return result;
a4aea562 1701
2b25d981 1702 nvmeq->cq_vector = 0;
161b8be2 1703 nvme_init_queue(nvmeq, 0);
dca51e78 1704 result = queue_request_irq(nvmeq);
758dd7fd 1705 if (result) {
7c349dde 1706 dev->online_queues--;
d4875622 1707 return result;
758dd7fd 1708 }
025c557a 1709
4e224106 1710 set_bit(NVMEQ_ENABLED, &nvmeq->flags);
b60503ba
MW
1711 return result;
1712}
1713
749941f2 1714static int nvme_create_io_queues(struct nvme_dev *dev)
42f61420 1715{
4b04cc6a 1716 unsigned i, max, rw_queues;
749941f2 1717 int ret = 0;
42f61420 1718
d858e5f0 1719 for (i = dev->ctrl.queue_count; i <= dev->max_qid; i++) {
a6ff7262 1720 if (nvme_alloc_queue(dev, i, dev->q_depth)) {
749941f2 1721 ret = -ENOMEM;
42f61420 1722 break;
749941f2
CH
1723 }
1724 }
42f61420 1725
d858e5f0 1726 max = min(dev->max_qid, dev->ctrl.queue_count - 1);
e20ba6e1
CH
1727 if (max != 1 && dev->io_queues[HCTX_TYPE_POLL]) {
1728 rw_queues = dev->io_queues[HCTX_TYPE_DEFAULT] +
1729 dev->io_queues[HCTX_TYPE_READ];
4b04cc6a
JA
1730 } else {
1731 rw_queues = max;
1732 }
1733
949928c1 1734 for (i = dev->online_queues; i <= max; i++) {
4b04cc6a
JA
1735 bool polled = i > rw_queues;
1736
1737 ret = nvme_create_queue(&dev->queues[i], i, polled);
d4875622 1738 if (ret)
42f61420 1739 break;
27e8166c 1740 }
749941f2
CH
1741
1742 /*
1743 * Ignore failing Create SQ/CQ commands, we can continue with less
8adb8c14
MI
1744 * than the desired amount of queues, and even a controller without
1745 * I/O queues can still be used to issue admin commands. This might
749941f2
CH
1746 * be useful to upgrade a buggy firmware for example.
1747 */
1748 return ret >= 0 ? 0 : ret;
b60503ba
MW
1749}
1750
202021c1
SB
1751static ssize_t nvme_cmb_show(struct device *dev,
1752 struct device_attribute *attr,
1753 char *buf)
1754{
1755 struct nvme_dev *ndev = to_nvme_dev(dev_get_drvdata(dev));
1756
c965809c 1757 return scnprintf(buf, PAGE_SIZE, "cmbloc : x%08x\ncmbsz : x%08x\n",
202021c1
SB
1758 ndev->cmbloc, ndev->cmbsz);
1759}
1760static DEVICE_ATTR(cmb, S_IRUGO, nvme_cmb_show, NULL);
1761
88de4598 1762static u64 nvme_cmb_size_unit(struct nvme_dev *dev)
8ffaadf7 1763{
88de4598
CH
1764 u8 szu = (dev->cmbsz >> NVME_CMBSZ_SZU_SHIFT) & NVME_CMBSZ_SZU_MASK;
1765
1766 return 1ULL << (12 + 4 * szu);
1767}
1768
1769static u32 nvme_cmb_size(struct nvme_dev *dev)
1770{
1771 return (dev->cmbsz >> NVME_CMBSZ_SZ_SHIFT) & NVME_CMBSZ_SZ_MASK;
1772}
1773
f65efd6d 1774static void nvme_map_cmb(struct nvme_dev *dev)
8ffaadf7 1775{
88de4598 1776 u64 size, offset;
8ffaadf7
JD
1777 resource_size_t bar_size;
1778 struct pci_dev *pdev = to_pci_dev(dev->dev);
8969f1f8 1779 int bar;
8ffaadf7 1780
9fe5c59f
KB
1781 if (dev->cmb_size)
1782 return;
1783
7a67cbea 1784 dev->cmbsz = readl(dev->bar + NVME_REG_CMBSZ);
f65efd6d
CH
1785 if (!dev->cmbsz)
1786 return;
202021c1 1787 dev->cmbloc = readl(dev->bar + NVME_REG_CMBLOC);
8ffaadf7 1788
88de4598
CH
1789 size = nvme_cmb_size_unit(dev) * nvme_cmb_size(dev);
1790 offset = nvme_cmb_size_unit(dev) * NVME_CMB_OFST(dev->cmbloc);
8969f1f8
CH
1791 bar = NVME_CMB_BIR(dev->cmbloc);
1792 bar_size = pci_resource_len(pdev, bar);
8ffaadf7
JD
1793
1794 if (offset > bar_size)
f65efd6d 1795 return;
8ffaadf7
JD
1796
1797 /*
1798 * Controllers may support a CMB size larger than their BAR,
1799 * for example, due to being behind a bridge. Reduce the CMB to
1800 * the reported size of the BAR
1801 */
1802 if (size > bar_size - offset)
1803 size = bar_size - offset;
1804
0f238ff5
LG
1805 if (pci_p2pdma_add_resource(pdev, bar, size, offset)) {
1806 dev_warn(dev->ctrl.device,
1807 "failed to register the CMB\n");
f65efd6d 1808 return;
0f238ff5
LG
1809 }
1810
8ffaadf7 1811 dev->cmb_size = size;
0f238ff5
LG
1812 dev->cmb_use_sqes = use_cmb_sqes && (dev->cmbsz & NVME_CMBSZ_SQS);
1813
1814 if ((dev->cmbsz & (NVME_CMBSZ_WDS | NVME_CMBSZ_RDS)) ==
1815 (NVME_CMBSZ_WDS | NVME_CMBSZ_RDS))
1816 pci_p2pmem_publish(pdev, true);
f65efd6d
CH
1817
1818 if (sysfs_add_file_to_group(&dev->ctrl.device->kobj,
1819 &dev_attr_cmb.attr, NULL))
1820 dev_warn(dev->ctrl.device,
1821 "failed to add sysfs attribute for CMB\n");
8ffaadf7
JD
1822}
1823
1824static inline void nvme_release_cmb(struct nvme_dev *dev)
1825{
0f238ff5 1826 if (dev->cmb_size) {
1c78f773
MG
1827 sysfs_remove_file_from_group(&dev->ctrl.device->kobj,
1828 &dev_attr_cmb.attr, NULL);
0f238ff5 1829 dev->cmb_size = 0;
8ffaadf7
JD
1830 }
1831}
1832
87ad72a5
CH
1833static int nvme_set_host_mem(struct nvme_dev *dev, u32 bits)
1834{
4033f35d 1835 u64 dma_addr = dev->host_mem_descs_dma;
87ad72a5 1836 struct nvme_command c;
87ad72a5
CH
1837 int ret;
1838
87ad72a5
CH
1839 memset(&c, 0, sizeof(c));
1840 c.features.opcode = nvme_admin_set_features;
1841 c.features.fid = cpu_to_le32(NVME_FEAT_HOST_MEM_BUF);
1842 c.features.dword11 = cpu_to_le32(bits);
1843 c.features.dword12 = cpu_to_le32(dev->host_mem_size >>
1844 ilog2(dev->ctrl.page_size));
1845 c.features.dword13 = cpu_to_le32(lower_32_bits(dma_addr));
1846 c.features.dword14 = cpu_to_le32(upper_32_bits(dma_addr));
1847 c.features.dword15 = cpu_to_le32(dev->nr_host_mem_descs);
1848
1849 ret = nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0);
1850 if (ret) {
1851 dev_warn(dev->ctrl.device,
1852 "failed to set host mem (err %d, flags %#x).\n",
1853 ret, bits);
1854 }
87ad72a5
CH
1855 return ret;
1856}
1857
1858static void nvme_free_host_mem(struct nvme_dev *dev)
1859{
1860 int i;
1861
1862 for (i = 0; i < dev->nr_host_mem_descs; i++) {
1863 struct nvme_host_mem_buf_desc *desc = &dev->host_mem_descs[i];
1864 size_t size = le32_to_cpu(desc->size) * dev->ctrl.page_size;
1865
cc667f6d
LD
1866 dma_free_attrs(dev->dev, size, dev->host_mem_desc_bufs[i],
1867 le64_to_cpu(desc->addr),
1868 DMA_ATTR_NO_KERNEL_MAPPING | DMA_ATTR_NO_WARN);
87ad72a5
CH
1869 }
1870
1871 kfree(dev->host_mem_desc_bufs);
1872 dev->host_mem_desc_bufs = NULL;
4033f35d
CH
1873 dma_free_coherent(dev->dev,
1874 dev->nr_host_mem_descs * sizeof(*dev->host_mem_descs),
1875 dev->host_mem_descs, dev->host_mem_descs_dma);
87ad72a5 1876 dev->host_mem_descs = NULL;
7e5dd57e 1877 dev->nr_host_mem_descs = 0;
87ad72a5
CH
1878}
1879
92dc6895
CH
1880static int __nvme_alloc_host_mem(struct nvme_dev *dev, u64 preferred,
1881 u32 chunk_size)
9d713c2b 1882{
87ad72a5 1883 struct nvme_host_mem_buf_desc *descs;
92dc6895 1884 u32 max_entries, len;
4033f35d 1885 dma_addr_t descs_dma;
2ee0e4ed 1886 int i = 0;
87ad72a5 1887 void **bufs;
6fbcde66 1888 u64 size, tmp;
87ad72a5 1889
87ad72a5
CH
1890 tmp = (preferred + chunk_size - 1);
1891 do_div(tmp, chunk_size);
1892 max_entries = tmp;
044a9df1
CH
1893
1894 if (dev->ctrl.hmmaxd && dev->ctrl.hmmaxd < max_entries)
1895 max_entries = dev->ctrl.hmmaxd;
1896
750afb08
LC
1897 descs = dma_alloc_coherent(dev->dev, max_entries * sizeof(*descs),
1898 &descs_dma, GFP_KERNEL);
87ad72a5
CH
1899 if (!descs)
1900 goto out;
1901
1902 bufs = kcalloc(max_entries, sizeof(*bufs), GFP_KERNEL);
1903 if (!bufs)
1904 goto out_free_descs;
1905
244a8fe4 1906 for (size = 0; size < preferred && i < max_entries; size += len) {
87ad72a5
CH
1907 dma_addr_t dma_addr;
1908
50cdb7c6 1909 len = min_t(u64, chunk_size, preferred - size);
87ad72a5
CH
1910 bufs[i] = dma_alloc_attrs(dev->dev, len, &dma_addr, GFP_KERNEL,
1911 DMA_ATTR_NO_KERNEL_MAPPING | DMA_ATTR_NO_WARN);
1912 if (!bufs[i])
1913 break;
1914
1915 descs[i].addr = cpu_to_le64(dma_addr);
1916 descs[i].size = cpu_to_le32(len / dev->ctrl.page_size);
1917 i++;
1918 }
1919
92dc6895 1920 if (!size)
87ad72a5 1921 goto out_free_bufs;
87ad72a5 1922
87ad72a5
CH
1923 dev->nr_host_mem_descs = i;
1924 dev->host_mem_size = size;
1925 dev->host_mem_descs = descs;
4033f35d 1926 dev->host_mem_descs_dma = descs_dma;
87ad72a5
CH
1927 dev->host_mem_desc_bufs = bufs;
1928 return 0;
1929
1930out_free_bufs:
1931 while (--i >= 0) {
1932 size_t size = le32_to_cpu(descs[i].size) * dev->ctrl.page_size;
1933
cc667f6d
LD
1934 dma_free_attrs(dev->dev, size, bufs[i],
1935 le64_to_cpu(descs[i].addr),
1936 DMA_ATTR_NO_KERNEL_MAPPING | DMA_ATTR_NO_WARN);
87ad72a5
CH
1937 }
1938
1939 kfree(bufs);
1940out_free_descs:
4033f35d
CH
1941 dma_free_coherent(dev->dev, max_entries * sizeof(*descs), descs,
1942 descs_dma);
87ad72a5 1943out:
87ad72a5
CH
1944 dev->host_mem_descs = NULL;
1945 return -ENOMEM;
1946}
1947
92dc6895
CH
1948static int nvme_alloc_host_mem(struct nvme_dev *dev, u64 min, u64 preferred)
1949{
1950 u32 chunk_size;
1951
1952 /* start big and work our way down */
30f92d62 1953 for (chunk_size = min_t(u64, preferred, PAGE_SIZE * MAX_ORDER_NR_PAGES);
044a9df1 1954 chunk_size >= max_t(u32, dev->ctrl.hmminds * 4096, PAGE_SIZE * 2);
92dc6895
CH
1955 chunk_size /= 2) {
1956 if (!__nvme_alloc_host_mem(dev, preferred, chunk_size)) {
1957 if (!min || dev->host_mem_size >= min)
1958 return 0;
1959 nvme_free_host_mem(dev);
1960 }
1961 }
1962
1963 return -ENOMEM;
1964}
1965
9620cfba 1966static int nvme_setup_host_mem(struct nvme_dev *dev)
87ad72a5
CH
1967{
1968 u64 max = (u64)max_host_mem_size_mb * SZ_1M;
1969 u64 preferred = (u64)dev->ctrl.hmpre * 4096;
1970 u64 min = (u64)dev->ctrl.hmmin * 4096;
1971 u32 enable_bits = NVME_HOST_MEM_ENABLE;
6fbcde66 1972 int ret;
87ad72a5
CH
1973
1974 preferred = min(preferred, max);
1975 if (min > max) {
1976 dev_warn(dev->ctrl.device,
1977 "min host memory (%lld MiB) above limit (%d MiB).\n",
1978 min >> ilog2(SZ_1M), max_host_mem_size_mb);
1979 nvme_free_host_mem(dev);
9620cfba 1980 return 0;
87ad72a5
CH
1981 }
1982
1983 /*
1984 * If we already have a buffer allocated check if we can reuse it.
1985 */
1986 if (dev->host_mem_descs) {
1987 if (dev->host_mem_size >= min)
1988 enable_bits |= NVME_HOST_MEM_RETURN;
1989 else
1990 nvme_free_host_mem(dev);
1991 }
1992
1993 if (!dev->host_mem_descs) {
92dc6895
CH
1994 if (nvme_alloc_host_mem(dev, min, preferred)) {
1995 dev_warn(dev->ctrl.device,
1996 "failed to allocate host memory buffer.\n");
9620cfba 1997 return 0; /* controller must work without HMB */
92dc6895
CH
1998 }
1999
2000 dev_info(dev->ctrl.device,
2001 "allocated %lld MiB host memory buffer.\n",
2002 dev->host_mem_size >> ilog2(SZ_1M));
87ad72a5
CH
2003 }
2004
9620cfba
CH
2005 ret = nvme_set_host_mem(dev, enable_bits);
2006 if (ret)
87ad72a5 2007 nvme_free_host_mem(dev);
9620cfba 2008 return ret;
9d713c2b
KB
2009}
2010
612b7286
ML
2011/*
2012 * nirqs is the number of interrupts available for write and read
2013 * queues. The core already reserved an interrupt for the admin queue.
2014 */
2015static void nvme_calc_irq_sets(struct irq_affinity *affd, unsigned int nrirqs)
3b6592f7 2016{
612b7286
ML
2017 struct nvme_dev *dev = affd->priv;
2018 unsigned int nr_read_queues;
3b6592f7
JA
2019
2020 /*
612b7286
ML
2021 * If there is no interupt available for queues, ensure that
2022 * the default queue is set to 1. The affinity set size is
2023 * also set to one, but the irq core ignores it for this case.
2024 *
2025 * If only one interrupt is available or 'write_queue' == 0, combine
2026 * write and read queues.
2027 *
2028 * If 'write_queues' > 0, ensure it leaves room for at least one read
2029 * queue.
3b6592f7 2030 */
612b7286
ML
2031 if (!nrirqs) {
2032 nrirqs = 1;
2033 nr_read_queues = 0;
2034 } else if (nrirqs == 1 || !write_queues) {
2035 nr_read_queues = 0;
2036 } else if (write_queues >= nrirqs) {
2037 nr_read_queues = 1;
3b6592f7 2038 } else {
612b7286 2039 nr_read_queues = nrirqs - write_queues;
3b6592f7 2040 }
612b7286
ML
2041
2042 dev->io_queues[HCTX_TYPE_DEFAULT] = nrirqs - nr_read_queues;
2043 affd->set_size[HCTX_TYPE_DEFAULT] = nrirqs - nr_read_queues;
2044 dev->io_queues[HCTX_TYPE_READ] = nr_read_queues;
2045 affd->set_size[HCTX_TYPE_READ] = nr_read_queues;
2046 affd->nr_sets = nr_read_queues ? 2 : 1;
3b6592f7
JA
2047}
2048
6451fe73 2049static int nvme_setup_irqs(struct nvme_dev *dev, unsigned int nr_io_queues)
3b6592f7
JA
2050{
2051 struct pci_dev *pdev = to_pci_dev(dev->dev);
3b6592f7 2052 struct irq_affinity affd = {
9cfef55b 2053 .pre_vectors = 1,
612b7286
ML
2054 .calc_sets = nvme_calc_irq_sets,
2055 .priv = dev,
3b6592f7 2056 };
6451fe73 2057 unsigned int irq_queues, this_p_queues;
dad77d63 2058 unsigned int nr_cpus = num_possible_cpus();
6451fe73
JA
2059
2060 /*
2061 * Poll queues don't need interrupts, but we need at least one IO
2062 * queue left over for non-polled IO.
2063 */
2064 this_p_queues = poll_queues;
2065 if (this_p_queues >= nr_io_queues) {
2066 this_p_queues = nr_io_queues - 1;
2067 irq_queues = 1;
2068 } else {
dad77d63
MI
2069 if (nr_cpus < nr_io_queues - this_p_queues)
2070 irq_queues = nr_cpus + 1;
2071 else
2072 irq_queues = nr_io_queues - this_p_queues + 1;
6451fe73
JA
2073 }
2074 dev->io_queues[HCTX_TYPE_POLL] = this_p_queues;
3b6592f7 2075
612b7286
ML
2076 /* Initialize for the single interrupt case */
2077 dev->io_queues[HCTX_TYPE_DEFAULT] = 1;
2078 dev->io_queues[HCTX_TYPE_READ] = 0;
3b6592f7 2079
612b7286
ML
2080 return pci_alloc_irq_vectors_affinity(pdev, 1, irq_queues,
2081 PCI_IRQ_ALL_TYPES | PCI_IRQ_AFFINITY, &affd);
3b6592f7
JA
2082}
2083
8fae268b
KB
2084static void nvme_disable_io_queues(struct nvme_dev *dev)
2085{
2086 if (__nvme_disable_io_queues(dev, nvme_admin_delete_sq))
2087 __nvme_disable_io_queues(dev, nvme_admin_delete_cq);
2088}
2089
8d85fce7 2090static int nvme_setup_io_queues(struct nvme_dev *dev)
b60503ba 2091{
147b27e4 2092 struct nvme_queue *adminq = &dev->queues[0];
e75ec752 2093 struct pci_dev *pdev = to_pci_dev(dev->dev);
97f6ef64
XY
2094 int result, nr_io_queues;
2095 unsigned long size;
b60503ba 2096
3b6592f7 2097 nr_io_queues = max_io_queues();
9a0be7ab
CH
2098 result = nvme_set_queue_count(&dev->ctrl, &nr_io_queues);
2099 if (result < 0)
1b23484b 2100 return result;
9a0be7ab 2101
f5fa90dc 2102 if (nr_io_queues == 0)
a5229050 2103 return 0;
4e224106
CH
2104
2105 clear_bit(NVMEQ_ENABLED, &adminq->flags);
b60503ba 2106
0f238ff5 2107 if (dev->cmb_use_sqes) {
8ffaadf7
JD
2108 result = nvme_cmb_qdepth(dev, nr_io_queues,
2109 sizeof(struct nvme_command));
2110 if (result > 0)
2111 dev->q_depth = result;
2112 else
0f238ff5 2113 dev->cmb_use_sqes = false;
8ffaadf7
JD
2114 }
2115
97f6ef64
XY
2116 do {
2117 size = db_bar_size(dev, nr_io_queues);
2118 result = nvme_remap_bar(dev, size);
2119 if (!result)
2120 break;
2121 if (!--nr_io_queues)
2122 return -ENOMEM;
2123 } while (1);
2124 adminq->q_db = dev->dbs;
f1938f6e 2125
8fae268b 2126 retry:
9d713c2b 2127 /* Deregister the admin queue's interrupt */
0ff199cb 2128 pci_free_irq(pdev, 0, adminq);
9d713c2b 2129
e32efbfc
JA
2130 /*
2131 * If we enable msix early due to not intx, disable it again before
2132 * setting up the full range we need.
2133 */
dca51e78 2134 pci_free_irq_vectors(pdev);
3b6592f7
JA
2135
2136 result = nvme_setup_irqs(dev, nr_io_queues);
22b55601 2137 if (result <= 0)
dca51e78 2138 return -EIO;
3b6592f7 2139
22b55601 2140 dev->num_vecs = result;
4b04cc6a 2141 result = max(result - 1, 1);
e20ba6e1 2142 dev->max_qid = result + dev->io_queues[HCTX_TYPE_POLL];
fa08a396 2143
063a8096
MW
2144 /*
2145 * Should investigate if there's a performance win from allocating
2146 * more queues than interrupt vectors; it might allow the submission
2147 * path to scale better, even if the receive path is limited by the
2148 * number of interrupts.
2149 */
dca51e78 2150 result = queue_request_irq(adminq);
7c349dde 2151 if (result)
d4875622 2152 return result;
4e224106 2153 set_bit(NVMEQ_ENABLED, &adminq->flags);
8fae268b
KB
2154
2155 result = nvme_create_io_queues(dev);
2156 if (result || dev->online_queues < 2)
2157 return result;
2158
2159 if (dev->online_queues - 1 < dev->max_qid) {
2160 nr_io_queues = dev->online_queues - 1;
2161 nvme_disable_io_queues(dev);
2162 nvme_suspend_io_queues(dev);
2163 goto retry;
2164 }
2165 dev_info(dev->ctrl.device, "%d/%d/%d default/read/poll queues\n",
2166 dev->io_queues[HCTX_TYPE_DEFAULT],
2167 dev->io_queues[HCTX_TYPE_READ],
2168 dev->io_queues[HCTX_TYPE_POLL]);
2169 return 0;
b60503ba
MW
2170}
2171
2a842aca 2172static void nvme_del_queue_end(struct request *req, blk_status_t error)
a5768aa8 2173{
db3cbfff 2174 struct nvme_queue *nvmeq = req->end_io_data;
b5875222 2175
db3cbfff 2176 blk_mq_free_request(req);
d1ed6aa1 2177 complete(&nvmeq->delete_done);
a5768aa8
KB
2178}
2179
2a842aca 2180static void nvme_del_cq_end(struct request *req, blk_status_t error)
a5768aa8 2181{
db3cbfff 2182 struct nvme_queue *nvmeq = req->end_io_data;
a5768aa8 2183
d1ed6aa1
CH
2184 if (error)
2185 set_bit(NVMEQ_DELETE_ERROR, &nvmeq->flags);
db3cbfff
KB
2186
2187 nvme_del_queue_end(req, error);
a5768aa8
KB
2188}
2189
db3cbfff 2190static int nvme_delete_queue(struct nvme_queue *nvmeq, u8 opcode)
bda4e0fb 2191{
db3cbfff
KB
2192 struct request_queue *q = nvmeq->dev->ctrl.admin_q;
2193 struct request *req;
2194 struct nvme_command cmd;
bda4e0fb 2195
db3cbfff
KB
2196 memset(&cmd, 0, sizeof(cmd));
2197 cmd.delete_queue.opcode = opcode;
2198 cmd.delete_queue.qid = cpu_to_le16(nvmeq->qid);
bda4e0fb 2199
eb71f435 2200 req = nvme_alloc_request(q, &cmd, BLK_MQ_REQ_NOWAIT, NVME_QID_ANY);
db3cbfff
KB
2201 if (IS_ERR(req))
2202 return PTR_ERR(req);
bda4e0fb 2203
db3cbfff
KB
2204 req->timeout = ADMIN_TIMEOUT;
2205 req->end_io_data = nvmeq;
2206
d1ed6aa1 2207 init_completion(&nvmeq->delete_done);
db3cbfff
KB
2208 blk_execute_rq_nowait(q, NULL, req, false,
2209 opcode == nvme_admin_delete_cq ?
2210 nvme_del_cq_end : nvme_del_queue_end);
2211 return 0;
bda4e0fb
KB
2212}
2213
8fae268b 2214static bool __nvme_disable_io_queues(struct nvme_dev *dev, u8 opcode)
a5768aa8 2215{
5271edd4 2216 int nr_queues = dev->online_queues - 1, sent = 0;
db3cbfff 2217 unsigned long timeout;
a5768aa8 2218
db3cbfff 2219 retry:
5271edd4
CH
2220 timeout = ADMIN_TIMEOUT;
2221 while (nr_queues > 0) {
2222 if (nvme_delete_queue(&dev->queues[nr_queues], opcode))
2223 break;
2224 nr_queues--;
2225 sent++;
db3cbfff 2226 }
d1ed6aa1
CH
2227 while (sent) {
2228 struct nvme_queue *nvmeq = &dev->queues[nr_queues + sent];
2229
2230 timeout = wait_for_completion_io_timeout(&nvmeq->delete_done,
5271edd4
CH
2231 timeout);
2232 if (timeout == 0)
2233 return false;
d1ed6aa1
CH
2234
2235 /* handle any remaining CQEs */
2236 if (opcode == nvme_admin_delete_cq &&
2237 !test_bit(NVMEQ_DELETE_ERROR, &nvmeq->flags))
2238 nvme_poll_irqdisable(nvmeq, -1);
2239
2240 sent--;
5271edd4
CH
2241 if (nr_queues)
2242 goto retry;
2243 }
2244 return true;
a5768aa8
KB
2245}
2246
422ef0c7 2247/*
2b1b7e78 2248 * return error value only when tagset allocation failed
422ef0c7 2249 */
8d85fce7 2250static int nvme_dev_add(struct nvme_dev *dev)
b60503ba 2251{
2b1b7e78
JW
2252 int ret;
2253
5bae7f73 2254 if (!dev->ctrl.tagset) {
376f7ef8 2255 dev->tagset.ops = &nvme_mq_ops;
ffe7704d 2256 dev->tagset.nr_hw_queues = dev->online_queues - 1;
8fe34be1 2257 dev->tagset.nr_maps = 2; /* default + read */
ed92ad37
CH
2258 if (dev->io_queues[HCTX_TYPE_POLL])
2259 dev->tagset.nr_maps++;
ffe7704d
KB
2260 dev->tagset.timeout = NVME_IO_TIMEOUT;
2261 dev->tagset.numa_node = dev_to_node(dev->dev);
2262 dev->tagset.queue_depth =
a4aea562 2263 min_t(int, dev->q_depth, BLK_MQ_MAX_DEPTH) - 1;
d43f1ccf 2264 dev->tagset.cmd_size = sizeof(struct nvme_iod);
ffe7704d
KB
2265 dev->tagset.flags = BLK_MQ_F_SHOULD_MERGE;
2266 dev->tagset.driver_data = dev;
b60503ba 2267
2b1b7e78
JW
2268 ret = blk_mq_alloc_tag_set(&dev->tagset);
2269 if (ret) {
2270 dev_warn(dev->ctrl.device,
2271 "IO queues tagset allocation failed %d\n", ret);
2272 return ret;
2273 }
5bae7f73 2274 dev->ctrl.tagset = &dev->tagset;
949928c1
KB
2275 } else {
2276 blk_mq_update_nr_hw_queues(&dev->tagset, dev->online_queues - 1);
2277
2278 /* Free previously allocated queues that are no longer usable */
2279 nvme_free_queues(dev, dev->online_queues);
ffe7704d 2280 }
949928c1 2281
e8fd41bb 2282 nvme_dbbuf_set(dev);
e1e5e564 2283 return 0;
b60503ba
MW
2284}
2285
b00a726a 2286static int nvme_pci_enable(struct nvme_dev *dev)
0877cb0d 2287{
b00a726a 2288 int result = -ENOMEM;
e75ec752 2289 struct pci_dev *pdev = to_pci_dev(dev->dev);
0877cb0d
KB
2290
2291 if (pci_enable_device_mem(pdev))
2292 return result;
2293
0877cb0d 2294 pci_set_master(pdev);
0877cb0d 2295
4fe06923 2296 if (dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(64)))
052d0efa 2297 goto disable;
0877cb0d 2298
7a67cbea 2299 if (readl(dev->bar + NVME_REG_CSTS) == -1) {
0e53d180 2300 result = -ENODEV;
b00a726a 2301 goto disable;
0e53d180 2302 }
e32efbfc
JA
2303
2304 /*
a5229050
KB
2305 * Some devices and/or platforms don't advertise or work with INTx
2306 * interrupts. Pre-enable a single MSIX or MSI vec for setup. We'll
2307 * adjust this later.
e32efbfc 2308 */
dca51e78
CH
2309 result = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES);
2310 if (result < 0)
2311 return result;
e32efbfc 2312
20d0dfe6 2313 dev->ctrl.cap = lo_hi_readq(dev->bar + NVME_REG_CAP);
7a67cbea 2314
20d0dfe6 2315 dev->q_depth = min_t(int, NVME_CAP_MQES(dev->ctrl.cap) + 1,
b27c1e68 2316 io_queue_depth);
20d0dfe6 2317 dev->db_stride = 1 << NVME_CAP_STRIDE(dev->ctrl.cap);
7a67cbea 2318 dev->dbs = dev->bar + 4096;
1f390c1f
SG
2319
2320 /*
2321 * Temporary fix for the Apple controller found in the MacBook8,1 and
2322 * some MacBook7,1 to avoid controller resets and data loss.
2323 */
2324 if (pdev->vendor == PCI_VENDOR_ID_APPLE && pdev->device == 0x2001) {
2325 dev->q_depth = 2;
9bdcfb10
CH
2326 dev_warn(dev->ctrl.device, "detected Apple NVMe controller, "
2327 "set queue depth=%u to work around controller resets\n",
1f390c1f 2328 dev->q_depth);
d554b5e1
MP
2329 } else if (pdev->vendor == PCI_VENDOR_ID_SAMSUNG &&
2330 (pdev->device == 0xa821 || pdev->device == 0xa822) &&
20d0dfe6 2331 NVME_CAP_MQES(dev->ctrl.cap) == 0) {
d554b5e1
MP
2332 dev->q_depth = 64;
2333 dev_err(dev->ctrl.device, "detected PM1725 NVMe controller, "
2334 "set queue depth=%u\n", dev->q_depth);
1f390c1f
SG
2335 }
2336
f65efd6d 2337 nvme_map_cmb(dev);
202021c1 2338
a0a3408e
KB
2339 pci_enable_pcie_error_reporting(pdev);
2340 pci_save_state(pdev);
0877cb0d
KB
2341 return 0;
2342
2343 disable:
0877cb0d
KB
2344 pci_disable_device(pdev);
2345 return result;
2346}
2347
2348static void nvme_dev_unmap(struct nvme_dev *dev)
b00a726a
KB
2349{
2350 if (dev->bar)
2351 iounmap(dev->bar);
a1f447b3 2352 pci_release_mem_regions(to_pci_dev(dev->dev));
b00a726a
KB
2353}
2354
2355static void nvme_pci_disable(struct nvme_dev *dev)
0877cb0d 2356{
e75ec752
CH
2357 struct pci_dev *pdev = to_pci_dev(dev->dev);
2358
dca51e78 2359 pci_free_irq_vectors(pdev);
0877cb0d 2360
a0a3408e
KB
2361 if (pci_is_enabled(pdev)) {
2362 pci_disable_pcie_error_reporting(pdev);
e75ec752 2363 pci_disable_device(pdev);
4d115420 2364 }
4d115420
KB
2365}
2366
a5cdb68c 2367static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
b60503ba 2368{
e43269e6 2369 bool dead = true, freeze = false;
302ad8cc 2370 struct pci_dev *pdev = to_pci_dev(dev->dev);
22404274 2371
77bf25ea 2372 mutex_lock(&dev->shutdown_lock);
302ad8cc
KB
2373 if (pci_is_enabled(pdev)) {
2374 u32 csts = readl(dev->bar + NVME_REG_CSTS);
2375
ebef7368 2376 if (dev->ctrl.state == NVME_CTRL_LIVE ||
e43269e6
KB
2377 dev->ctrl.state == NVME_CTRL_RESETTING) {
2378 freeze = true;
302ad8cc 2379 nvme_start_freeze(&dev->ctrl);
e43269e6 2380 }
302ad8cc
KB
2381 dead = !!((csts & NVME_CSTS_CFS) || !(csts & NVME_CSTS_RDY) ||
2382 pdev->error_state != pci_channel_io_normal);
c9d3bf88 2383 }
c21377f8 2384
302ad8cc
KB
2385 /*
2386 * Give the controller a chance to complete all entered requests if
2387 * doing a safe shutdown.
2388 */
e43269e6
KB
2389 if (!dead && shutdown && freeze)
2390 nvme_wait_freeze_timeout(&dev->ctrl, NVME_IO_TIMEOUT);
9a915a5b
JW
2391
2392 nvme_stop_queues(&dev->ctrl);
87ad72a5 2393
64ee0ac0 2394 if (!dead && dev->ctrl.queue_count > 0) {
8fae268b 2395 nvme_disable_io_queues(dev);
a5cdb68c 2396 nvme_disable_admin_queue(dev, shutdown);
4d115420 2397 }
8fae268b
KB
2398 nvme_suspend_io_queues(dev);
2399 nvme_suspend_queue(&dev->queues[0]);
b00a726a 2400 nvme_pci_disable(dev);
07836e65 2401
e1958e65
ML
2402 blk_mq_tagset_busy_iter(&dev->tagset, nvme_cancel_request, &dev->ctrl);
2403 blk_mq_tagset_busy_iter(&dev->admin_tagset, nvme_cancel_request, &dev->ctrl);
302ad8cc
KB
2404
2405 /*
2406 * The driver will not be starting up queues again if shutting down so
2407 * must flush all entered requests to their failed completion to avoid
2408 * deadlocking blk-mq hot-cpu notifier.
2409 */
c8e9e9b7 2410 if (shutdown) {
302ad8cc 2411 nvme_start_queues(&dev->ctrl);
c8e9e9b7
KB
2412 if (dev->ctrl.admin_q && !blk_queue_dying(dev->ctrl.admin_q))
2413 blk_mq_unquiesce_queue(dev->ctrl.admin_q);
2414 }
77bf25ea 2415 mutex_unlock(&dev->shutdown_lock);
b60503ba
MW
2416}
2417
091b6092
MW
2418static int nvme_setup_prp_pools(struct nvme_dev *dev)
2419{
e75ec752 2420 dev->prp_page_pool = dma_pool_create("prp list page", dev->dev,
091b6092
MW
2421 PAGE_SIZE, PAGE_SIZE, 0);
2422 if (!dev->prp_page_pool)
2423 return -ENOMEM;
2424
99802a7a 2425 /* Optimisation for I/Os between 4k and 128k */
e75ec752 2426 dev->prp_small_pool = dma_pool_create("prp list 256", dev->dev,
99802a7a
MW
2427 256, 256, 0);
2428 if (!dev->prp_small_pool) {
2429 dma_pool_destroy(dev->prp_page_pool);
2430 return -ENOMEM;
2431 }
091b6092
MW
2432 return 0;
2433}
2434
2435static void nvme_release_prp_pools(struct nvme_dev *dev)
2436{
2437 dma_pool_destroy(dev->prp_page_pool);
99802a7a 2438 dma_pool_destroy(dev->prp_small_pool);
091b6092
MW
2439}
2440
1673f1f0 2441static void nvme_pci_free_ctrl(struct nvme_ctrl *ctrl)
5e82e952 2442{
1673f1f0 2443 struct nvme_dev *dev = to_nvme_dev(ctrl);
9ac27090 2444
f9f38e33 2445 nvme_dbbuf_dma_free(dev);
e75ec752 2446 put_device(dev->dev);
4af0e21c
KB
2447 if (dev->tagset.tags)
2448 blk_mq_free_tag_set(&dev->tagset);
1c63dc66
CH
2449 if (dev->ctrl.admin_q)
2450 blk_put_queue(dev->ctrl.admin_q);
5e82e952 2451 kfree(dev->queues);
e286bcfc 2452 free_opal_dev(dev->ctrl.opal_dev);
943e942e 2453 mempool_destroy(dev->iod_mempool);
5e82e952
KB
2454 kfree(dev);
2455}
2456
7c1ce408 2457static void nvme_remove_dead_ctrl(struct nvme_dev *dev)
f58944e2 2458{
d22524a4 2459 nvme_get_ctrl(&dev->ctrl);
69d9a99c 2460 nvme_dev_disable(dev, false);
9f9cafc1 2461 nvme_kill_queues(&dev->ctrl);
03e0f3a6 2462 if (!queue_work(nvme_wq, &dev->remove_work))
f58944e2
KB
2463 nvme_put_ctrl(&dev->ctrl);
2464}
2465
fd634f41 2466static void nvme_reset_work(struct work_struct *work)
5e82e952 2467{
d86c4d8e
CH
2468 struct nvme_dev *dev =
2469 container_of(work, struct nvme_dev, ctrl.reset_work);
a98e58e5 2470 bool was_suspend = !!(dev->ctrl.ctrl_config & NVME_CC_SHN_NORMAL);
e71afda4 2471 int result;
2b1b7e78 2472 enum nvme_ctrl_state new_state = NVME_CTRL_LIVE;
5e82e952 2473
e71afda4
CK
2474 if (WARN_ON(dev->ctrl.state != NVME_CTRL_RESETTING)) {
2475 result = -ENODEV;
fd634f41 2476 goto out;
e71afda4 2477 }
5e82e952 2478
fd634f41
CH
2479 /*
2480 * If we're called to reset a live controller first shut it down before
2481 * moving on.
2482 */
b00a726a 2483 if (dev->ctrl.ctrl_config & NVME_CC_ENABLE)
a5cdb68c 2484 nvme_dev_disable(dev, false);
d6135c3a 2485 nvme_sync_queues(&dev->ctrl);
5e82e952 2486
5c959d73 2487 mutex_lock(&dev->shutdown_lock);
b00a726a 2488 result = nvme_pci_enable(dev);
f0b50732 2489 if (result)
4726bcf3 2490 goto out_unlock;
f0b50732 2491
01ad0990 2492 result = nvme_pci_configure_admin_queue(dev);
f0b50732 2493 if (result)
4726bcf3 2494 goto out_unlock;
f0b50732 2495
0fb59cbc
KB
2496 result = nvme_alloc_admin_tags(dev);
2497 if (result)
4726bcf3 2498 goto out_unlock;
b9afca3e 2499
943e942e
JA
2500 /*
2501 * Limit the max command size to prevent iod->sg allocations going
2502 * over a single page.
2503 */
7637de31
CH
2504 dev->ctrl.max_hw_sectors = min_t(u32,
2505 NVME_MAX_KB_SZ << 1, dma_max_mapping_size(dev->dev) >> 9);
943e942e 2506 dev->ctrl.max_segments = NVME_MAX_SEGS;
a48bc520
CH
2507
2508 /*
2509 * Don't limit the IOMMU merged segment size.
2510 */
2511 dma_set_max_seg_size(dev->dev, 0xffffffff);
2512
5c959d73
KB
2513 mutex_unlock(&dev->shutdown_lock);
2514
2515 /*
2516 * Introduce CONNECTING state from nvme-fc/rdma transports to mark the
2517 * initializing procedure here.
2518 */
2519 if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_CONNECTING)) {
2520 dev_warn(dev->ctrl.device,
2521 "failed to mark controller CONNECTING\n");
cee6c269 2522 result = -EBUSY;
5c959d73
KB
2523 goto out;
2524 }
943e942e 2525
ce4541f4
CH
2526 result = nvme_init_identify(&dev->ctrl);
2527 if (result)
f58944e2 2528 goto out;
ce4541f4 2529
e286bcfc
SB
2530 if (dev->ctrl.oacs & NVME_CTRL_OACS_SEC_SUPP) {
2531 if (!dev->ctrl.opal_dev)
2532 dev->ctrl.opal_dev =
2533 init_opal_dev(&dev->ctrl, &nvme_sec_submit);
2534 else if (was_suspend)
2535 opal_unlock_from_suspend(dev->ctrl.opal_dev);
2536 } else {
2537 free_opal_dev(dev->ctrl.opal_dev);
2538 dev->ctrl.opal_dev = NULL;
4f1244c8 2539 }
a98e58e5 2540
f9f38e33
HK
2541 if (dev->ctrl.oacs & NVME_CTRL_OACS_DBBUF_SUPP) {
2542 result = nvme_dbbuf_dma_alloc(dev);
2543 if (result)
2544 dev_warn(dev->dev,
2545 "unable to allocate dma for dbbuf\n");
2546 }
2547
9620cfba
CH
2548 if (dev->ctrl.hmpre) {
2549 result = nvme_setup_host_mem(dev);
2550 if (result < 0)
2551 goto out;
2552 }
87ad72a5 2553
f0b50732 2554 result = nvme_setup_io_queues(dev);
badc34d4 2555 if (result)
f58944e2 2556 goto out;
f0b50732 2557
2659e57b
CH
2558 /*
2559 * Keep the controller around but remove all namespaces if we don't have
2560 * any working I/O queue.
2561 */
3cf519b5 2562 if (dev->online_queues < 2) {
1b3c47c1 2563 dev_warn(dev->ctrl.device, "IO queues not created\n");
3b24774e 2564 nvme_kill_queues(&dev->ctrl);
5bae7f73 2565 nvme_remove_namespaces(&dev->ctrl);
2b1b7e78 2566 new_state = NVME_CTRL_ADMIN_ONLY;
3cf519b5 2567 } else {
25646264 2568 nvme_start_queues(&dev->ctrl);
302ad8cc 2569 nvme_wait_freeze(&dev->ctrl);
2b1b7e78
JW
2570 /* hit this only when allocate tagset fails */
2571 if (nvme_dev_add(dev))
2572 new_state = NVME_CTRL_ADMIN_ONLY;
302ad8cc 2573 nvme_unfreeze(&dev->ctrl);
3cf519b5
CH
2574 }
2575
2b1b7e78
JW
2576 /*
2577 * If only admin queue live, keep it to do further investigation or
2578 * recovery.
2579 */
2580 if (!nvme_change_ctrl_state(&dev->ctrl, new_state)) {
2581 dev_warn(dev->ctrl.device,
2582 "failed to mark controller state %d\n", new_state);
e71afda4 2583 result = -ENODEV;
bb8d261e
CH
2584 goto out;
2585 }
92911a55 2586
d09f2b45 2587 nvme_start_ctrl(&dev->ctrl);
3cf519b5 2588 return;
f0b50732 2589
4726bcf3
KB
2590 out_unlock:
2591 mutex_unlock(&dev->shutdown_lock);
3cf519b5 2592 out:
7c1ce408
CK
2593 if (result)
2594 dev_warn(dev->ctrl.device,
2595 "Removing after probe failure status: %d\n", result);
2596 nvme_remove_dead_ctrl(dev);
f0b50732
KB
2597}
2598
5c8809e6 2599static void nvme_remove_dead_ctrl_work(struct work_struct *work)
9a6b9458 2600{
5c8809e6 2601 struct nvme_dev *dev = container_of(work, struct nvme_dev, remove_work);
e75ec752 2602 struct pci_dev *pdev = to_pci_dev(dev->dev);
9a6b9458
KB
2603
2604 if (pci_get_drvdata(pdev))
921920ab 2605 device_release_driver(&pdev->dev);
1673f1f0 2606 nvme_put_ctrl(&dev->ctrl);
9a6b9458
KB
2607}
2608
1c63dc66 2609static int nvme_pci_reg_read32(struct nvme_ctrl *ctrl, u32 off, u32 *val)
9ca97374 2610{
1c63dc66 2611 *val = readl(to_nvme_dev(ctrl)->bar + off);
90667892 2612 return 0;
9ca97374
TH
2613}
2614
5fd4ce1b 2615static int nvme_pci_reg_write32(struct nvme_ctrl *ctrl, u32 off, u32 val)
4cc06521 2616{
5fd4ce1b
CH
2617 writel(val, to_nvme_dev(ctrl)->bar + off);
2618 return 0;
2619}
4cc06521 2620
7fd8930f
CH
2621static int nvme_pci_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val)
2622{
2623 *val = readq(to_nvme_dev(ctrl)->bar + off);
2624 return 0;
4cc06521
KB
2625}
2626
97c12223
KB
2627static int nvme_pci_get_address(struct nvme_ctrl *ctrl, char *buf, int size)
2628{
2629 struct pci_dev *pdev = to_pci_dev(to_nvme_dev(ctrl)->dev);
2630
2631 return snprintf(buf, size, "%s", dev_name(&pdev->dev));
2632}
2633
1c63dc66 2634static const struct nvme_ctrl_ops nvme_pci_ctrl_ops = {
1a353d85 2635 .name = "pcie",
e439bb12 2636 .module = THIS_MODULE,
e0596ab2
LG
2637 .flags = NVME_F_METADATA_SUPPORTED |
2638 NVME_F_PCI_P2PDMA,
1c63dc66 2639 .reg_read32 = nvme_pci_reg_read32,
5fd4ce1b 2640 .reg_write32 = nvme_pci_reg_write32,
7fd8930f 2641 .reg_read64 = nvme_pci_reg_read64,
1673f1f0 2642 .free_ctrl = nvme_pci_free_ctrl,
f866fc42 2643 .submit_async_event = nvme_pci_submit_async_event,
97c12223 2644 .get_address = nvme_pci_get_address,
1c63dc66 2645};
4cc06521 2646
b00a726a
KB
2647static int nvme_dev_map(struct nvme_dev *dev)
2648{
b00a726a
KB
2649 struct pci_dev *pdev = to_pci_dev(dev->dev);
2650
a1f447b3 2651 if (pci_request_mem_regions(pdev, "nvme"))
b00a726a
KB
2652 return -ENODEV;
2653
97f6ef64 2654 if (nvme_remap_bar(dev, NVME_REG_DBS + 4096))
b00a726a
KB
2655 goto release;
2656
9fa196e7 2657 return 0;
b00a726a 2658 release:
9fa196e7
MG
2659 pci_release_mem_regions(pdev);
2660 return -ENODEV;
b00a726a
KB
2661}
2662
8427bbc2 2663static unsigned long check_vendor_combination_bug(struct pci_dev *pdev)
ff5350a8
AL
2664{
2665 if (pdev->vendor == 0x144d && pdev->device == 0xa802) {
2666 /*
2667 * Several Samsung devices seem to drop off the PCIe bus
2668 * randomly when APST is on and uses the deepest sleep state.
2669 * This has been observed on a Samsung "SM951 NVMe SAMSUNG
2670 * 256GB", a "PM951 NVMe SAMSUNG 512GB", and a "Samsung SSD
2671 * 950 PRO 256GB", but it seems to be restricted to two Dell
2672 * laptops.
2673 */
2674 if (dmi_match(DMI_SYS_VENDOR, "Dell Inc.") &&
2675 (dmi_match(DMI_PRODUCT_NAME, "XPS 15 9550") ||
2676 dmi_match(DMI_PRODUCT_NAME, "Precision 5510")))
2677 return NVME_QUIRK_NO_DEEPEST_PS;
8427bbc2
KHF
2678 } else if (pdev->vendor == 0x144d && pdev->device == 0xa804) {
2679 /*
2680 * Samsung SSD 960 EVO drops off the PCIe bus after system
467c77d4
JJ
2681 * suspend on a Ryzen board, ASUS PRIME B350M-A, as well as
2682 * within few minutes after bootup on a Coffee Lake board -
2683 * ASUS PRIME Z370-A
8427bbc2
KHF
2684 */
2685 if (dmi_match(DMI_BOARD_VENDOR, "ASUSTeK COMPUTER INC.") &&
467c77d4
JJ
2686 (dmi_match(DMI_BOARD_NAME, "PRIME B350M-A") ||
2687 dmi_match(DMI_BOARD_NAME, "PRIME Z370-A")))
8427bbc2 2688 return NVME_QUIRK_NO_APST;
ff5350a8
AL
2689 }
2690
2691 return 0;
2692}
2693
18119775
KB
2694static void nvme_async_probe(void *data, async_cookie_t cookie)
2695{
2696 struct nvme_dev *dev = data;
80f513b5 2697
18119775
KB
2698 nvme_reset_ctrl_sync(&dev->ctrl);
2699 flush_work(&dev->ctrl.scan_work);
80f513b5 2700 nvme_put_ctrl(&dev->ctrl);
18119775
KB
2701}
2702
8d85fce7 2703static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
b60503ba 2704{
a4aea562 2705 int node, result = -ENOMEM;
b60503ba 2706 struct nvme_dev *dev;
ff5350a8 2707 unsigned long quirks = id->driver_data;
943e942e 2708 size_t alloc_size;
b60503ba 2709
a4aea562
MB
2710 node = dev_to_node(&pdev->dev);
2711 if (node == NUMA_NO_NODE)
2fa84351 2712 set_dev_node(&pdev->dev, first_memory_node);
a4aea562
MB
2713
2714 dev = kzalloc_node(sizeof(*dev), GFP_KERNEL, node);
b60503ba
MW
2715 if (!dev)
2716 return -ENOMEM;
147b27e4 2717
3b6592f7
JA
2718 dev->queues = kcalloc_node(max_queue_count(), sizeof(struct nvme_queue),
2719 GFP_KERNEL, node);
b60503ba
MW
2720 if (!dev->queues)
2721 goto free;
2722
e75ec752 2723 dev->dev = get_device(&pdev->dev);
9a6b9458 2724 pci_set_drvdata(pdev, dev);
1c63dc66 2725
b00a726a
KB
2726 result = nvme_dev_map(dev);
2727 if (result)
b00c9b7a 2728 goto put_pci;
b00a726a 2729
d86c4d8e 2730 INIT_WORK(&dev->ctrl.reset_work, nvme_reset_work);
5c8809e6 2731 INIT_WORK(&dev->remove_work, nvme_remove_dead_ctrl_work);
77bf25ea 2732 mutex_init(&dev->shutdown_lock);
b60503ba 2733
091b6092
MW
2734 result = nvme_setup_prp_pools(dev);
2735 if (result)
b00c9b7a 2736 goto unmap;
4cc06521 2737
8427bbc2 2738 quirks |= check_vendor_combination_bug(pdev);
ff5350a8 2739
943e942e
JA
2740 /*
2741 * Double check that our mempool alloc size will cover the biggest
2742 * command we support.
2743 */
2744 alloc_size = nvme_pci_iod_alloc_size(dev, NVME_MAX_KB_SZ,
2745 NVME_MAX_SEGS, true);
2746 WARN_ON_ONCE(alloc_size > PAGE_SIZE);
2747
2748 dev->iod_mempool = mempool_create_node(1, mempool_kmalloc,
2749 mempool_kfree,
2750 (void *) alloc_size,
2751 GFP_KERNEL, node);
2752 if (!dev->iod_mempool) {
2753 result = -ENOMEM;
2754 goto release_pools;
2755 }
2756
b6e44b4c
KB
2757 result = nvme_init_ctrl(&dev->ctrl, &pdev->dev, &nvme_pci_ctrl_ops,
2758 quirks);
2759 if (result)
2760 goto release_mempool;
2761
1b3c47c1
SG
2762 dev_info(dev->ctrl.device, "pci function %s\n", dev_name(&pdev->dev));
2763
80f513b5 2764 nvme_get_ctrl(&dev->ctrl);
18119775 2765 async_schedule(nvme_async_probe, dev);
4caff8fc 2766
b60503ba
MW
2767 return 0;
2768
b6e44b4c
KB
2769 release_mempool:
2770 mempool_destroy(dev->iod_mempool);
0877cb0d 2771 release_pools:
091b6092 2772 nvme_release_prp_pools(dev);
b00c9b7a
CJ
2773 unmap:
2774 nvme_dev_unmap(dev);
a96d4f5c 2775 put_pci:
e75ec752 2776 put_device(dev->dev);
b60503ba
MW
2777 free:
2778 kfree(dev->queues);
b60503ba
MW
2779 kfree(dev);
2780 return result;
2781}
2782
775755ed 2783static void nvme_reset_prepare(struct pci_dev *pdev)
f0d54a54 2784{
a6739479 2785 struct nvme_dev *dev = pci_get_drvdata(pdev);
f263fbb8 2786 nvme_dev_disable(dev, false);
775755ed 2787}
f0d54a54 2788
775755ed
CH
2789static void nvme_reset_done(struct pci_dev *pdev)
2790{
f263fbb8 2791 struct nvme_dev *dev = pci_get_drvdata(pdev);
79c48ccf 2792 nvme_reset_ctrl_sync(&dev->ctrl);
f0d54a54
KB
2793}
2794
09ece142
KB
2795static void nvme_shutdown(struct pci_dev *pdev)
2796{
2797 struct nvme_dev *dev = pci_get_drvdata(pdev);
a5cdb68c 2798 nvme_dev_disable(dev, true);
09ece142
KB
2799}
2800
f58944e2
KB
2801/*
2802 * The driver's remove may be called on a device in a partially initialized
2803 * state. This function must not have any dependencies on the device state in
2804 * order to proceed.
2805 */
8d85fce7 2806static void nvme_remove(struct pci_dev *pdev)
b60503ba
MW
2807{
2808 struct nvme_dev *dev = pci_get_drvdata(pdev);
9a6b9458 2809
bb8d261e 2810 nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING);
9a6b9458 2811 pci_set_drvdata(pdev, NULL);
0ff9d4e1 2812
6db28eda 2813 if (!pci_device_is_present(pdev)) {
0ff9d4e1 2814 nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DEAD);
1d39e692 2815 nvme_dev_disable(dev, true);
cb4bfda6 2816 nvme_dev_remove_admin(dev);
6db28eda 2817 }
0ff9d4e1 2818
d86c4d8e 2819 flush_work(&dev->ctrl.reset_work);
d09f2b45
SG
2820 nvme_stop_ctrl(&dev->ctrl);
2821 nvme_remove_namespaces(&dev->ctrl);
a5cdb68c 2822 nvme_dev_disable(dev, true);
9fe5c59f 2823 nvme_release_cmb(dev);
87ad72a5 2824 nvme_free_host_mem(dev);
a4aea562 2825 nvme_dev_remove_admin(dev);
a1a5ef99 2826 nvme_free_queues(dev, 0);
d09f2b45 2827 nvme_uninit_ctrl(&dev->ctrl);
9a6b9458 2828 nvme_release_prp_pools(dev);
b00a726a 2829 nvme_dev_unmap(dev);
1673f1f0 2830 nvme_put_ctrl(&dev->ctrl);
b60503ba
MW
2831}
2832
671a6018 2833#ifdef CONFIG_PM_SLEEP
d916b1be
KB
2834static int nvme_get_power_state(struct nvme_ctrl *ctrl, u32 *ps)
2835{
2836 return nvme_get_features(ctrl, NVME_FEAT_POWER_MGMT, 0, NULL, 0, ps);
2837}
2838
2839static int nvme_set_power_state(struct nvme_ctrl *ctrl, u32 ps)
2840{
2841 return nvme_set_features(ctrl, NVME_FEAT_POWER_MGMT, ps, NULL, 0, NULL);
2842}
2843
2844static int nvme_resume(struct device *dev)
2845{
2846 struct nvme_dev *ndev = pci_get_drvdata(to_pci_dev(dev));
2847 struct nvme_ctrl *ctrl = &ndev->ctrl;
2848
2849 if (pm_resume_via_firmware() || !ctrl->npss ||
2850 nvme_set_power_state(ctrl, ndev->last_ps) != 0)
2851 nvme_reset_ctrl(ctrl);
2852 return 0;
2853}
2854
cd638946
KB
2855static int nvme_suspend(struct device *dev)
2856{
2857 struct pci_dev *pdev = to_pci_dev(dev);
2858 struct nvme_dev *ndev = pci_get_drvdata(pdev);
d916b1be
KB
2859 struct nvme_ctrl *ctrl = &ndev->ctrl;
2860 int ret = -EBUSY;
2861
2862 /*
2863 * The platform does not remove power for a kernel managed suspend so
2864 * use host managed nvme power settings for lowest idle power if
2865 * possible. This should have quicker resume latency than a full device
2866 * shutdown. But if the firmware is involved after the suspend or the
2867 * device does not support any non-default power states, shut down the
2868 * device fully.
2869 */
2870 if (pm_suspend_via_firmware() || !ctrl->npss) {
2871 nvme_dev_disable(ndev, true);
2872 return 0;
2873 }
2874
2875 nvme_start_freeze(ctrl);
2876 nvme_wait_freeze(ctrl);
2877 nvme_sync_queues(ctrl);
2878
2879 if (ctrl->state != NVME_CTRL_LIVE &&
2880 ctrl->state != NVME_CTRL_ADMIN_ONLY)
2881 goto unfreeze;
2882
2883 ndev->last_ps = 0;
2884 ret = nvme_get_power_state(ctrl, &ndev->last_ps);
2885 if (ret < 0)
2886 goto unfreeze;
2887
2888 ret = nvme_set_power_state(ctrl, ctrl->npss);
2889 if (ret < 0)
2890 goto unfreeze;
2891
2892 if (ret) {
2893 /*
2894 * Clearing npss forces a controller reset on resume. The
2895 * correct value will be resdicovered then.
2896 */
2897 nvme_dev_disable(ndev, true);
2898 ctrl->npss = 0;
2899 ret = 0;
2900 goto unfreeze;
2901 }
2902 /*
2903 * A saved state prevents pci pm from generically controlling the
2904 * device's power. If we're using protocol specific settings, we don't
2905 * want pci interfering.
2906 */
2907 pci_save_state(pdev);
2908unfreeze:
2909 nvme_unfreeze(ctrl);
2910 return ret;
2911}
2912
2913static int nvme_simple_suspend(struct device *dev)
2914{
2915 struct nvme_dev *ndev = pci_get_drvdata(to_pci_dev(dev));
cd638946 2916
a5cdb68c 2917 nvme_dev_disable(ndev, true);
cd638946
KB
2918 return 0;
2919}
2920
d916b1be 2921static int nvme_simple_resume(struct device *dev)
cd638946
KB
2922{
2923 struct pci_dev *pdev = to_pci_dev(dev);
2924 struct nvme_dev *ndev = pci_get_drvdata(pdev);
cd638946 2925
d86c4d8e 2926 nvme_reset_ctrl(&ndev->ctrl);
9a6b9458 2927 return 0;
cd638946
KB
2928}
2929
21774222 2930static const struct dev_pm_ops nvme_dev_pm_ops = {
d916b1be
KB
2931 .suspend = nvme_suspend,
2932 .resume = nvme_resume,
2933 .freeze = nvme_simple_suspend,
2934 .thaw = nvme_simple_resume,
2935 .poweroff = nvme_simple_suspend,
2936 .restore = nvme_simple_resume,
2937};
2938#endif /* CONFIG_PM_SLEEP */
b60503ba 2939
a0a3408e
KB
2940static pci_ers_result_t nvme_error_detected(struct pci_dev *pdev,
2941 pci_channel_state_t state)
2942{
2943 struct nvme_dev *dev = pci_get_drvdata(pdev);
2944
2945 /*
2946 * A frozen channel requires a reset. When detected, this method will
2947 * shutdown the controller to quiesce. The controller will be restarted
2948 * after the slot reset through driver's slot_reset callback.
2949 */
a0a3408e
KB
2950 switch (state) {
2951 case pci_channel_io_normal:
2952 return PCI_ERS_RESULT_CAN_RECOVER;
2953 case pci_channel_io_frozen:
d011fb31
KB
2954 dev_warn(dev->ctrl.device,
2955 "frozen state error detected, reset controller\n");
a5cdb68c 2956 nvme_dev_disable(dev, false);
a0a3408e
KB
2957 return PCI_ERS_RESULT_NEED_RESET;
2958 case pci_channel_io_perm_failure:
d011fb31
KB
2959 dev_warn(dev->ctrl.device,
2960 "failure state error detected, request disconnect\n");
a0a3408e
KB
2961 return PCI_ERS_RESULT_DISCONNECT;
2962 }
2963 return PCI_ERS_RESULT_NEED_RESET;
2964}
2965
2966static pci_ers_result_t nvme_slot_reset(struct pci_dev *pdev)
2967{
2968 struct nvme_dev *dev = pci_get_drvdata(pdev);
2969
1b3c47c1 2970 dev_info(dev->ctrl.device, "restart after slot reset\n");
a0a3408e 2971 pci_restore_state(pdev);
d86c4d8e 2972 nvme_reset_ctrl(&dev->ctrl);
a0a3408e
KB
2973 return PCI_ERS_RESULT_RECOVERED;
2974}
2975
2976static void nvme_error_resume(struct pci_dev *pdev)
2977{
72cd4cc2
KB
2978 struct nvme_dev *dev = pci_get_drvdata(pdev);
2979
2980 flush_work(&dev->ctrl.reset_work);
a0a3408e
KB
2981}
2982
1d352035 2983static const struct pci_error_handlers nvme_err_handler = {
b60503ba 2984 .error_detected = nvme_error_detected,
b60503ba
MW
2985 .slot_reset = nvme_slot_reset,
2986 .resume = nvme_error_resume,
775755ed
CH
2987 .reset_prepare = nvme_reset_prepare,
2988 .reset_done = nvme_reset_done,
b60503ba
MW
2989};
2990
6eb0d698 2991static const struct pci_device_id nvme_id_table[] = {
106198ed 2992 { PCI_VDEVICE(INTEL, 0x0953),
08095e70 2993 .driver_data = NVME_QUIRK_STRIPE_SIZE |
e850fd16 2994 NVME_QUIRK_DEALLOCATE_ZEROES, },
99466e70
KB
2995 { PCI_VDEVICE(INTEL, 0x0a53),
2996 .driver_data = NVME_QUIRK_STRIPE_SIZE |
e850fd16 2997 NVME_QUIRK_DEALLOCATE_ZEROES, },
99466e70
KB
2998 { PCI_VDEVICE(INTEL, 0x0a54),
2999 .driver_data = NVME_QUIRK_STRIPE_SIZE |
e850fd16 3000 NVME_QUIRK_DEALLOCATE_ZEROES, },
f99cb7af
DWF
3001 { PCI_VDEVICE(INTEL, 0x0a55),
3002 .driver_data = NVME_QUIRK_STRIPE_SIZE |
3003 NVME_QUIRK_DEALLOCATE_ZEROES, },
50af47d0 3004 { PCI_VDEVICE(INTEL, 0xf1a5), /* Intel 600P/P3100 */
9abd68ef
JA
3005 .driver_data = NVME_QUIRK_NO_DEEPEST_PS |
3006 NVME_QUIRK_MEDIUM_PRIO_SQ },
6299358d
JD
3007 { PCI_VDEVICE(INTEL, 0xf1a6), /* Intel 760p/Pro 7600p */
3008 .driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN, },
540c801c 3009 { PCI_VDEVICE(INTEL, 0x5845), /* Qemu emulated controller */
7b210e4e
CH
3010 .driver_data = NVME_QUIRK_IDENTIFY_CNS |
3011 NVME_QUIRK_DISABLE_WRITE_ZEROES, },
0302ae60
MP
3012 { PCI_DEVICE(0x1bb1, 0x0100), /* Seagate Nytro Flash Storage */
3013 .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, },
54adc010
GP
3014 { PCI_DEVICE(0x1c58, 0x0003), /* HGST adapter */
3015 .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, },
8c97eecc
JL
3016 { PCI_DEVICE(0x1c58, 0x0023), /* WDC SN200 adapter */
3017 .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, },
015282c9
WW
3018 { PCI_DEVICE(0x1c5f, 0x0540), /* Memblaze Pblaze4 adapter */
3019 .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, },
d554b5e1
MP
3020 { PCI_DEVICE(0x144d, 0xa821), /* Samsung PM1725 */
3021 .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, },
3022 { PCI_DEVICE(0x144d, 0xa822), /* Samsung PM1725a */
3023 .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, },
608cc4b1
CH
3024 { PCI_DEVICE(0x1d1d, 0x1f1f), /* LighNVM qemu device */
3025 .driver_data = NVME_QUIRK_LIGHTNVM, },
3026 { PCI_DEVICE(0x1d1d, 0x2807), /* CNEX WL */
3027 .driver_data = NVME_QUIRK_LIGHTNVM, },
ea48e877
WX
3028 { PCI_DEVICE(0x1d1d, 0x2601), /* CNEX Granby */
3029 .driver_data = NVME_QUIRK_LIGHTNVM, },
08b903b5
MN
3030 { PCI_DEVICE(0x10ec, 0x5762), /* ADATA SX6000LNP */
3031 .driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN, },
b60503ba 3032 { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) },
c74dc780 3033 { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2001) },
124298bd 3034 { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2003) },
b60503ba
MW
3035 { 0, }
3036};
3037MODULE_DEVICE_TABLE(pci, nvme_id_table);
3038
3039static struct pci_driver nvme_driver = {
3040 .name = "nvme",
3041 .id_table = nvme_id_table,
3042 .probe = nvme_probe,
8d85fce7 3043 .remove = nvme_remove,
09ece142 3044 .shutdown = nvme_shutdown,
d916b1be 3045#ifdef CONFIG_PM_SLEEP
cd638946
KB
3046 .driver = {
3047 .pm = &nvme_dev_pm_ops,
3048 },
d916b1be 3049#endif
74d986ab 3050 .sriov_configure = pci_sriov_configure_simple,
b60503ba
MW
3051 .err_handler = &nvme_err_handler,
3052};
3053
3054static int __init nvme_init(void)
3055{
81101540
CH
3056 BUILD_BUG_ON(sizeof(struct nvme_create_cq) != 64);
3057 BUILD_BUG_ON(sizeof(struct nvme_create_sq) != 64);
3058 BUILD_BUG_ON(sizeof(struct nvme_delete_queue) != 64);
612b7286 3059 BUILD_BUG_ON(IRQ_AFFINITY_MAX_SETS < 2);
9a6327d2 3060 return pci_register_driver(&nvme_driver);
b60503ba
MW
3061}
3062
3063static void __exit nvme_exit(void)
3064{
3065 pci_unregister_driver(&nvme_driver);
03e0f3a6 3066 flush_workqueue(nvme_wq);
b60503ba
MW
3067}
3068
3069MODULE_AUTHOR("Matthew Wilcox <willy@linux.intel.com>");
3070MODULE_LICENSE("GPL");
c78b4713 3071MODULE_VERSION("1.0");
b60503ba
MW
3072module_init(nvme_init);
3073module_exit(nvme_exit);