Commit | Line | Data |
---|---|---|
c5343203 JS |
1 | /* |
2 | * Copyright (c) 2016 Avago Technologies. All rights reserved. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or modify | |
5 | * it under the terms of version 2 of the GNU General Public License as | |
6 | * published by the Free Software Foundation. | |
7 | * | |
8 | * This program is distributed in the hope that it will be useful. | |
9 | * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND WARRANTIES, | |
10 | * INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A | |
11 | * PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE DISCLAIMED, EXCEPT TO | |
12 | * THE EXTENT THAT SUCH DISCLAIMERS ARE HELD TO BE LEGALLY INVALID. | |
13 | * See the GNU General Public License for more details, a copy of which | |
14 | * can be found in the file COPYING included with this package | |
15 | * | |
16 | */ | |
17 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | |
18 | #include <linux/module.h> | |
19 | #include <linux/slab.h> | |
20 | #include <linux/blk-mq.h> | |
21 | #include <linux/parser.h> | |
22 | #include <linux/random.h> | |
23 | #include <uapi/scsi/fc/fc_fs.h> | |
24 | #include <uapi/scsi/fc/fc_els.h> | |
25 | ||
26 | #include "nvmet.h" | |
27 | #include <linux/nvme-fc-driver.h> | |
28 | #include <linux/nvme-fc.h> | |
29 | ||
30 | ||
31 | /* *************************** Data Structures/Defines ****************** */ | |
32 | ||
33 | ||
34 | #define NVMET_LS_CTX_COUNT 4 | |
35 | ||
36 | /* for this implementation, assume small single frame rqst/rsp */ | |
37 | #define NVME_FC_MAX_LS_BUFFER_SIZE 2048 | |
38 | ||
39 | struct nvmet_fc_tgtport; | |
40 | struct nvmet_fc_tgt_assoc; | |
41 | ||
42 | struct nvmet_fc_ls_iod { | |
43 | struct nvmefc_tgt_ls_req *lsreq; | |
44 | struct nvmefc_tgt_fcp_req *fcpreq; /* only if RS */ | |
45 | ||
46 | struct list_head ls_list; /* tgtport->ls_list */ | |
47 | ||
48 | struct nvmet_fc_tgtport *tgtport; | |
49 | struct nvmet_fc_tgt_assoc *assoc; | |
50 | ||
51 | u8 *rqstbuf; | |
52 | u8 *rspbuf; | |
53 | u16 rqstdatalen; | |
54 | dma_addr_t rspdma; | |
55 | ||
56 | struct scatterlist sg[2]; | |
57 | ||
58 | struct work_struct work; | |
59 | } __aligned(sizeof(unsigned long long)); | |
60 | ||
48fa362b JS |
61 | #define NVMET_FC_MAX_SEQ_LENGTH (256 * 1024) |
62 | #define NVMET_FC_MAX_XFR_SGENTS (NVMET_FC_MAX_SEQ_LENGTH / PAGE_SIZE) | |
c5343203 JS |
63 | |
64 | enum nvmet_fcp_datadir { | |
65 | NVMET_FCP_NODATA, | |
66 | NVMET_FCP_WRITE, | |
67 | NVMET_FCP_READ, | |
68 | NVMET_FCP_ABORTED, | |
69 | }; | |
70 | ||
71 | struct nvmet_fc_fcp_iod { | |
72 | struct nvmefc_tgt_fcp_req *fcpreq; | |
73 | ||
74 | struct nvme_fc_cmd_iu cmdiubuf; | |
75 | struct nvme_fc_ersp_iu rspiubuf; | |
76 | dma_addr_t rspdma; | |
77 | struct scatterlist *data_sg; | |
c5343203 | 78 | int data_sg_cnt; |
c5343203 JS |
79 | u32 offset; |
80 | enum nvmet_fcp_datadir io_dir; | |
81 | bool active; | |
82 | bool abort; | |
a97ec51b JS |
83 | bool aborted; |
84 | bool writedataactive; | |
c5343203 JS |
85 | spinlock_t flock; |
86 | ||
87 | struct nvmet_req req; | |
88 | struct work_struct work; | |
39498fae | 89 | struct work_struct done_work; |
9d625f77 | 90 | struct work_struct defer_work; |
c5343203 JS |
91 | |
92 | struct nvmet_fc_tgtport *tgtport; | |
93 | struct nvmet_fc_tgt_queue *queue; | |
94 | ||
95 | struct list_head fcp_list; /* tgtport->fcp_list */ | |
96 | }; | |
97 | ||
98 | struct nvmet_fc_tgtport { | |
99 | ||
100 | struct nvmet_fc_target_port fc_target_port; | |
101 | ||
102 | struct list_head tgt_list; /* nvmet_fc_target_list */ | |
103 | struct device *dev; /* dev for dma mapping */ | |
104 | struct nvmet_fc_target_template *ops; | |
105 | ||
106 | struct nvmet_fc_ls_iod *iod; | |
107 | spinlock_t lock; | |
108 | struct list_head ls_list; | |
109 | struct list_head ls_busylist; | |
110 | struct list_head assoc_list; | |
111 | struct ida assoc_cnt; | |
112 | struct nvmet_port *port; | |
113 | struct kref ref; | |
48fa362b | 114 | u32 max_sg_cnt; |
c5343203 JS |
115 | }; |
116 | ||
0fb228d3 JS |
117 | struct nvmet_fc_defer_fcp_req { |
118 | struct list_head req_list; | |
119 | struct nvmefc_tgt_fcp_req *fcp_req; | |
120 | }; | |
121 | ||
c5343203 JS |
122 | struct nvmet_fc_tgt_queue { |
123 | bool ninetypercent; | |
124 | u16 qid; | |
125 | u16 sqsize; | |
126 | u16 ersp_ratio; | |
f63688a6 | 127 | __le16 sqhd; |
c5343203 JS |
128 | int cpu; |
129 | atomic_t connected; | |
130 | atomic_t sqtail; | |
131 | atomic_t zrspcnt; | |
132 | atomic_t rsn; | |
133 | spinlock_t qlock; | |
134 | struct nvmet_port *port; | |
135 | struct nvmet_cq nvme_cq; | |
136 | struct nvmet_sq nvme_sq; | |
137 | struct nvmet_fc_tgt_assoc *assoc; | |
138 | struct nvmet_fc_fcp_iod *fod; /* array of fcp_iods */ | |
139 | struct list_head fod_list; | |
0fb228d3 JS |
140 | struct list_head pending_cmd_list; |
141 | struct list_head avail_defer_list; | |
c5343203 JS |
142 | struct workqueue_struct *work_q; |
143 | struct kref ref; | |
144 | } __aligned(sizeof(unsigned long long)); | |
145 | ||
146 | struct nvmet_fc_tgt_assoc { | |
147 | u64 association_id; | |
148 | u32 a_id; | |
149 | struct nvmet_fc_tgtport *tgtport; | |
150 | struct list_head a_list; | |
deb61742 | 151 | struct nvmet_fc_tgt_queue *queues[NVMET_NR_QUEUES + 1]; |
c5343203 | 152 | struct kref ref; |
a96d4bd8 | 153 | struct work_struct del_work; |
c5343203 JS |
154 | }; |
155 | ||
156 | ||
157 | static inline int | |
158 | nvmet_fc_iodnum(struct nvmet_fc_ls_iod *iodptr) | |
159 | { | |
160 | return (iodptr - iodptr->tgtport->iod); | |
161 | } | |
162 | ||
163 | static inline int | |
164 | nvmet_fc_fodnum(struct nvmet_fc_fcp_iod *fodptr) | |
165 | { | |
166 | return (fodptr - fodptr->queue->fod); | |
167 | } | |
168 | ||
169 | ||
170 | /* | |
171 | * Association and Connection IDs: | |
172 | * | |
173 | * Association ID will have random number in upper 6 bytes and zero | |
174 | * in lower 2 bytes | |
175 | * | |
176 | * Connection IDs will be Association ID with QID or'd in lower 2 bytes | |
177 | * | |
178 | * note: Association ID = Connection ID for queue 0 | |
179 | */ | |
180 | #define BYTES_FOR_QID sizeof(u16) | |
181 | #define BYTES_FOR_QID_SHIFT (BYTES_FOR_QID * 8) | |
182 | #define NVMET_FC_QUEUEID_MASK ((u64)((1 << BYTES_FOR_QID_SHIFT) - 1)) | |
183 | ||
184 | static inline u64 | |
185 | nvmet_fc_makeconnid(struct nvmet_fc_tgt_assoc *assoc, u16 qid) | |
186 | { | |
187 | return (assoc->association_id | qid); | |
188 | } | |
189 | ||
190 | static inline u64 | |
191 | nvmet_fc_getassociationid(u64 connectionid) | |
192 | { | |
193 | return connectionid & ~NVMET_FC_QUEUEID_MASK; | |
194 | } | |
195 | ||
196 | static inline u16 | |
197 | nvmet_fc_getqueueid(u64 connectionid) | |
198 | { | |
199 | return (u16)(connectionid & NVMET_FC_QUEUEID_MASK); | |
200 | } | |
201 | ||
202 | static inline struct nvmet_fc_tgtport * | |
203 | targetport_to_tgtport(struct nvmet_fc_target_port *targetport) | |
204 | { | |
205 | return container_of(targetport, struct nvmet_fc_tgtport, | |
206 | fc_target_port); | |
207 | } | |
208 | ||
209 | static inline struct nvmet_fc_fcp_iod * | |
210 | nvmet_req_to_fod(struct nvmet_req *nvme_req) | |
211 | { | |
212 | return container_of(nvme_req, struct nvmet_fc_fcp_iod, req); | |
213 | } | |
214 | ||
215 | ||
216 | /* *************************** Globals **************************** */ | |
217 | ||
218 | ||
219 | static DEFINE_SPINLOCK(nvmet_fc_tgtlock); | |
220 | ||
221 | static LIST_HEAD(nvmet_fc_target_list); | |
222 | static DEFINE_IDA(nvmet_fc_tgtport_cnt); | |
223 | ||
224 | ||
225 | static void nvmet_fc_handle_ls_rqst_work(struct work_struct *work); | |
226 | static void nvmet_fc_handle_fcp_rqst_work(struct work_struct *work); | |
39498fae | 227 | static void nvmet_fc_fcp_rqst_op_done_work(struct work_struct *work); |
9d625f77 | 228 | static void nvmet_fc_fcp_rqst_op_defer_work(struct work_struct *work); |
c5343203 JS |
229 | static void nvmet_fc_tgt_a_put(struct nvmet_fc_tgt_assoc *assoc); |
230 | static int nvmet_fc_tgt_a_get(struct nvmet_fc_tgt_assoc *assoc); | |
231 | static void nvmet_fc_tgt_q_put(struct nvmet_fc_tgt_queue *queue); | |
232 | static int nvmet_fc_tgt_q_get(struct nvmet_fc_tgt_queue *queue); | |
233 | static void nvmet_fc_tgtport_put(struct nvmet_fc_tgtport *tgtport); | |
234 | static int nvmet_fc_tgtport_get(struct nvmet_fc_tgtport *tgtport); | |
0fb228d3 JS |
235 | static void nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport, |
236 | struct nvmet_fc_fcp_iod *fod); | |
a96d4bd8 | 237 | static void nvmet_fc_delete_target_assoc(struct nvmet_fc_tgt_assoc *assoc); |
c5343203 JS |
238 | |
239 | ||
240 | /* *********************** FC-NVME DMA Handling **************************** */ | |
241 | ||
242 | /* | |
243 | * The fcloop device passes in a NULL device pointer. Real LLD's will | |
244 | * pass in a valid device pointer. If NULL is passed to the dma mapping | |
245 | * routines, depending on the platform, it may or may not succeed, and | |
246 | * may crash. | |
247 | * | |
248 | * As such: | |
249 | * Wrapper all the dma routines and check the dev pointer. | |
250 | * | |
251 | * If simple mappings (return just a dma address, we'll noop them, | |
252 | * returning a dma address of 0. | |
253 | * | |
254 | * On more complex mappings (dma_map_sg), a pseudo routine fills | |
255 | * in the scatter list, setting all dma addresses to 0. | |
256 | */ | |
257 | ||
258 | static inline dma_addr_t | |
259 | fc_dma_map_single(struct device *dev, void *ptr, size_t size, | |
260 | enum dma_data_direction dir) | |
261 | { | |
262 | return dev ? dma_map_single(dev, ptr, size, dir) : (dma_addr_t)0L; | |
263 | } | |
264 | ||
265 | static inline int | |
266 | fc_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) | |
267 | { | |
268 | return dev ? dma_mapping_error(dev, dma_addr) : 0; | |
269 | } | |
270 | ||
271 | static inline void | |
272 | fc_dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size, | |
273 | enum dma_data_direction dir) | |
274 | { | |
275 | if (dev) | |
276 | dma_unmap_single(dev, addr, size, dir); | |
277 | } | |
278 | ||
279 | static inline void | |
280 | fc_dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size, | |
281 | enum dma_data_direction dir) | |
282 | { | |
283 | if (dev) | |
284 | dma_sync_single_for_cpu(dev, addr, size, dir); | |
285 | } | |
286 | ||
287 | static inline void | |
288 | fc_dma_sync_single_for_device(struct device *dev, dma_addr_t addr, size_t size, | |
289 | enum dma_data_direction dir) | |
290 | { | |
291 | if (dev) | |
292 | dma_sync_single_for_device(dev, addr, size, dir); | |
293 | } | |
294 | ||
295 | /* pseudo dma_map_sg call */ | |
296 | static int | |
297 | fc_map_sg(struct scatterlist *sg, int nents) | |
298 | { | |
299 | struct scatterlist *s; | |
300 | int i; | |
301 | ||
302 | WARN_ON(nents == 0 || sg[0].length == 0); | |
303 | ||
304 | for_each_sg(sg, s, nents, i) { | |
305 | s->dma_address = 0L; | |
306 | #ifdef CONFIG_NEED_SG_DMA_LENGTH | |
307 | s->dma_length = s->length; | |
308 | #endif | |
309 | } | |
310 | return nents; | |
311 | } | |
312 | ||
313 | static inline int | |
314 | fc_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, | |
315 | enum dma_data_direction dir) | |
316 | { | |
317 | return dev ? dma_map_sg(dev, sg, nents, dir) : fc_map_sg(sg, nents); | |
318 | } | |
319 | ||
320 | static inline void | |
321 | fc_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, | |
322 | enum dma_data_direction dir) | |
323 | { | |
324 | if (dev) | |
325 | dma_unmap_sg(dev, sg, nents, dir); | |
326 | } | |
327 | ||
328 | ||
329 | /* *********************** FC-NVME Port Management ************************ */ | |
330 | ||
331 | ||
332 | static int | |
333 | nvmet_fc_alloc_ls_iodlist(struct nvmet_fc_tgtport *tgtport) | |
334 | { | |
335 | struct nvmet_fc_ls_iod *iod; | |
336 | int i; | |
337 | ||
338 | iod = kcalloc(NVMET_LS_CTX_COUNT, sizeof(struct nvmet_fc_ls_iod), | |
339 | GFP_KERNEL); | |
340 | if (!iod) | |
341 | return -ENOMEM; | |
342 | ||
343 | tgtport->iod = iod; | |
344 | ||
345 | for (i = 0; i < NVMET_LS_CTX_COUNT; iod++, i++) { | |
346 | INIT_WORK(&iod->work, nvmet_fc_handle_ls_rqst_work); | |
347 | iod->tgtport = tgtport; | |
348 | list_add_tail(&iod->ls_list, &tgtport->ls_list); | |
349 | ||
350 | iod->rqstbuf = kcalloc(2, NVME_FC_MAX_LS_BUFFER_SIZE, | |
351 | GFP_KERNEL); | |
352 | if (!iod->rqstbuf) | |
353 | goto out_fail; | |
354 | ||
355 | iod->rspbuf = iod->rqstbuf + NVME_FC_MAX_LS_BUFFER_SIZE; | |
356 | ||
357 | iod->rspdma = fc_dma_map_single(tgtport->dev, iod->rspbuf, | |
358 | NVME_FC_MAX_LS_BUFFER_SIZE, | |
359 | DMA_TO_DEVICE); | |
360 | if (fc_dma_mapping_error(tgtport->dev, iod->rspdma)) | |
361 | goto out_fail; | |
362 | } | |
363 | ||
364 | return 0; | |
365 | ||
366 | out_fail: | |
367 | kfree(iod->rqstbuf); | |
368 | list_del(&iod->ls_list); | |
369 | for (iod--, i--; i >= 0; iod--, i--) { | |
370 | fc_dma_unmap_single(tgtport->dev, iod->rspdma, | |
371 | NVME_FC_MAX_LS_BUFFER_SIZE, DMA_TO_DEVICE); | |
372 | kfree(iod->rqstbuf); | |
373 | list_del(&iod->ls_list); | |
374 | } | |
375 | ||
376 | kfree(iod); | |
377 | ||
378 | return -EFAULT; | |
379 | } | |
380 | ||
381 | static void | |
382 | nvmet_fc_free_ls_iodlist(struct nvmet_fc_tgtport *tgtport) | |
383 | { | |
384 | struct nvmet_fc_ls_iod *iod = tgtport->iod; | |
385 | int i; | |
386 | ||
387 | for (i = 0; i < NVMET_LS_CTX_COUNT; iod++, i++) { | |
388 | fc_dma_unmap_single(tgtport->dev, | |
389 | iod->rspdma, NVME_FC_MAX_LS_BUFFER_SIZE, | |
390 | DMA_TO_DEVICE); | |
391 | kfree(iod->rqstbuf); | |
392 | list_del(&iod->ls_list); | |
393 | } | |
394 | kfree(tgtport->iod); | |
395 | } | |
396 | ||
397 | static struct nvmet_fc_ls_iod * | |
398 | nvmet_fc_alloc_ls_iod(struct nvmet_fc_tgtport *tgtport) | |
399 | { | |
369157b4 | 400 | struct nvmet_fc_ls_iod *iod; |
c5343203 JS |
401 | unsigned long flags; |
402 | ||
403 | spin_lock_irqsave(&tgtport->lock, flags); | |
404 | iod = list_first_entry_or_null(&tgtport->ls_list, | |
405 | struct nvmet_fc_ls_iod, ls_list); | |
406 | if (iod) | |
407 | list_move_tail(&iod->ls_list, &tgtport->ls_busylist); | |
408 | spin_unlock_irqrestore(&tgtport->lock, flags); | |
409 | return iod; | |
410 | } | |
411 | ||
412 | ||
413 | static void | |
414 | nvmet_fc_free_ls_iod(struct nvmet_fc_tgtport *tgtport, | |
415 | struct nvmet_fc_ls_iod *iod) | |
416 | { | |
417 | unsigned long flags; | |
418 | ||
419 | spin_lock_irqsave(&tgtport->lock, flags); | |
420 | list_move(&iod->ls_list, &tgtport->ls_list); | |
421 | spin_unlock_irqrestore(&tgtport->lock, flags); | |
422 | } | |
423 | ||
424 | static void | |
425 | nvmet_fc_prep_fcp_iodlist(struct nvmet_fc_tgtport *tgtport, | |
426 | struct nvmet_fc_tgt_queue *queue) | |
427 | { | |
428 | struct nvmet_fc_fcp_iod *fod = queue->fod; | |
429 | int i; | |
430 | ||
431 | for (i = 0; i < queue->sqsize; fod++, i++) { | |
432 | INIT_WORK(&fod->work, nvmet_fc_handle_fcp_rqst_work); | |
39498fae | 433 | INIT_WORK(&fod->done_work, nvmet_fc_fcp_rqst_op_done_work); |
9d625f77 | 434 | INIT_WORK(&fod->defer_work, nvmet_fc_fcp_rqst_op_defer_work); |
c5343203 JS |
435 | fod->tgtport = tgtport; |
436 | fod->queue = queue; | |
437 | fod->active = false; | |
a97ec51b JS |
438 | fod->abort = false; |
439 | fod->aborted = false; | |
440 | fod->fcpreq = NULL; | |
c5343203 JS |
441 | list_add_tail(&fod->fcp_list, &queue->fod_list); |
442 | spin_lock_init(&fod->flock); | |
443 | ||
444 | fod->rspdma = fc_dma_map_single(tgtport->dev, &fod->rspiubuf, | |
445 | sizeof(fod->rspiubuf), DMA_TO_DEVICE); | |
446 | if (fc_dma_mapping_error(tgtport->dev, fod->rspdma)) { | |
447 | list_del(&fod->fcp_list); | |
448 | for (fod--, i--; i >= 0; fod--, i--) { | |
449 | fc_dma_unmap_single(tgtport->dev, fod->rspdma, | |
450 | sizeof(fod->rspiubuf), | |
451 | DMA_TO_DEVICE); | |
452 | fod->rspdma = 0L; | |
453 | list_del(&fod->fcp_list); | |
454 | } | |
455 | ||
456 | return; | |
457 | } | |
458 | } | |
459 | } | |
460 | ||
461 | static void | |
462 | nvmet_fc_destroy_fcp_iodlist(struct nvmet_fc_tgtport *tgtport, | |
463 | struct nvmet_fc_tgt_queue *queue) | |
464 | { | |
465 | struct nvmet_fc_fcp_iod *fod = queue->fod; | |
466 | int i; | |
467 | ||
468 | for (i = 0; i < queue->sqsize; fod++, i++) { | |
469 | if (fod->rspdma) | |
470 | fc_dma_unmap_single(tgtport->dev, fod->rspdma, | |
471 | sizeof(fod->rspiubuf), DMA_TO_DEVICE); | |
472 | } | |
473 | } | |
474 | ||
475 | static struct nvmet_fc_fcp_iod * | |
476 | nvmet_fc_alloc_fcp_iod(struct nvmet_fc_tgt_queue *queue) | |
477 | { | |
369157b4 | 478 | struct nvmet_fc_fcp_iod *fod; |
c5343203 | 479 | |
0fb228d3 JS |
480 | lockdep_assert_held(&queue->qlock); |
481 | ||
c5343203 JS |
482 | fod = list_first_entry_or_null(&queue->fod_list, |
483 | struct nvmet_fc_fcp_iod, fcp_list); | |
484 | if (fod) { | |
485 | list_del(&fod->fcp_list); | |
486 | fod->active = true; | |
c5343203 JS |
487 | /* |
488 | * no queue reference is taken, as it was taken by the | |
489 | * queue lookup just prior to the allocation. The iod | |
490 | * will "inherit" that reference. | |
491 | */ | |
492 | } | |
c5343203 JS |
493 | return fod; |
494 | } | |
495 | ||
496 | ||
0fb228d3 JS |
497 | static void |
498 | nvmet_fc_queue_fcp_req(struct nvmet_fc_tgtport *tgtport, | |
499 | struct nvmet_fc_tgt_queue *queue, | |
500 | struct nvmefc_tgt_fcp_req *fcpreq) | |
501 | { | |
502 | struct nvmet_fc_fcp_iod *fod = fcpreq->nvmet_fc_private; | |
503 | ||
504 | /* | |
505 | * put all admin cmds on hw queue id 0. All io commands go to | |
506 | * the respective hw queue based on a modulo basis | |
507 | */ | |
508 | fcpreq->hwqid = queue->qid ? | |
509 | ((queue->qid - 1) % tgtport->ops->max_hw_queues) : 0; | |
510 | ||
511 | if (tgtport->ops->target_features & NVMET_FCTGTFEAT_CMD_IN_ISR) | |
512 | queue_work_on(queue->cpu, queue->work_q, &fod->work); | |
513 | else | |
514 | nvmet_fc_handle_fcp_rqst(tgtport, fod); | |
515 | } | |
516 | ||
9d625f77 JS |
517 | static void |
518 | nvmet_fc_fcp_rqst_op_defer_work(struct work_struct *work) | |
519 | { | |
520 | struct nvmet_fc_fcp_iod *fod = | |
521 | container_of(work, struct nvmet_fc_fcp_iod, defer_work); | |
522 | ||
523 | /* Submit deferred IO for processing */ | |
524 | nvmet_fc_queue_fcp_req(fod->tgtport, fod->queue, fod->fcpreq); | |
525 | ||
526 | } | |
527 | ||
c5343203 JS |
528 | static void |
529 | nvmet_fc_free_fcp_iod(struct nvmet_fc_tgt_queue *queue, | |
530 | struct nvmet_fc_fcp_iod *fod) | |
531 | { | |
19b58d94 JS |
532 | struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq; |
533 | struct nvmet_fc_tgtport *tgtport = fod->tgtport; | |
0fb228d3 | 534 | struct nvmet_fc_defer_fcp_req *deferfcp; |
c5343203 JS |
535 | unsigned long flags; |
536 | ||
a97ec51b JS |
537 | fc_dma_sync_single_for_cpu(tgtport->dev, fod->rspdma, |
538 | sizeof(fod->rspiubuf), DMA_TO_DEVICE); | |
539 | ||
540 | fcpreq->nvmet_fc_private = NULL; | |
541 | ||
c5343203 | 542 | fod->active = false; |
a97ec51b JS |
543 | fod->abort = false; |
544 | fod->aborted = false; | |
545 | fod->writedataactive = false; | |
546 | fod->fcpreq = NULL; | |
0fb228d3 JS |
547 | |
548 | tgtport->ops->fcp_req_release(&tgtport->fc_target_port, fcpreq); | |
549 | ||
619c62dc JS |
550 | /* release the queue lookup reference on the completed IO */ |
551 | nvmet_fc_tgt_q_put(queue); | |
552 | ||
0fb228d3 JS |
553 | spin_lock_irqsave(&queue->qlock, flags); |
554 | deferfcp = list_first_entry_or_null(&queue->pending_cmd_list, | |
555 | struct nvmet_fc_defer_fcp_req, req_list); | |
556 | if (!deferfcp) { | |
557 | list_add_tail(&fod->fcp_list, &fod->queue->fod_list); | |
558 | spin_unlock_irqrestore(&queue->qlock, flags); | |
0fb228d3 JS |
559 | return; |
560 | } | |
561 | ||
562 | /* Re-use the fod for the next pending cmd that was deferred */ | |
563 | list_del(&deferfcp->req_list); | |
564 | ||
565 | fcpreq = deferfcp->fcp_req; | |
566 | ||
567 | /* deferfcp can be reused for another IO at a later date */ | |
568 | list_add_tail(&deferfcp->req_list, &queue->avail_defer_list); | |
569 | ||
c5343203 JS |
570 | spin_unlock_irqrestore(&queue->qlock, flags); |
571 | ||
0fb228d3 JS |
572 | /* Save NVME CMD IO in fod */ |
573 | memcpy(&fod->cmdiubuf, fcpreq->rspaddr, fcpreq->rsplen); | |
574 | ||
575 | /* Setup new fcpreq to be processed */ | |
576 | fcpreq->rspaddr = NULL; | |
577 | fcpreq->rsplen = 0; | |
578 | fcpreq->nvmet_fc_private = fod; | |
579 | fod->fcpreq = fcpreq; | |
580 | fod->active = true; | |
581 | ||
582 | /* inform LLDD IO is now being processed */ | |
583 | tgtport->ops->defer_rcv(&tgtport->fc_target_port, fcpreq); | |
584 | ||
c5343203 | 585 | /* |
0fb228d3 JS |
586 | * Leave the queue lookup get reference taken when |
587 | * fod was originally allocated. | |
c5343203 | 588 | */ |
9d625f77 JS |
589 | |
590 | queue_work(queue->work_q, &fod->defer_work); | |
c5343203 JS |
591 | } |
592 | ||
593 | static int | |
594 | nvmet_fc_queue_to_cpu(struct nvmet_fc_tgtport *tgtport, int qid) | |
595 | { | |
596 | int cpu, idx, cnt; | |
597 | ||
4b8ba5fa | 598 | if (tgtport->ops->max_hw_queues == 1) |
c5343203 JS |
599 | return WORK_CPU_UNBOUND; |
600 | ||
601 | /* Simple cpu selection based on qid modulo active cpu count */ | |
602 | idx = !qid ? 0 : (qid - 1) % num_active_cpus(); | |
603 | ||
604 | /* find the n'th active cpu */ | |
605 | for (cpu = 0, cnt = 0; ; ) { | |
606 | if (cpu_active(cpu)) { | |
607 | if (cnt == idx) | |
608 | break; | |
609 | cnt++; | |
610 | } | |
611 | cpu = (cpu + 1) % num_possible_cpus(); | |
612 | } | |
613 | ||
614 | return cpu; | |
615 | } | |
616 | ||
617 | static struct nvmet_fc_tgt_queue * | |
618 | nvmet_fc_alloc_target_queue(struct nvmet_fc_tgt_assoc *assoc, | |
619 | u16 qid, u16 sqsize) | |
620 | { | |
621 | struct nvmet_fc_tgt_queue *queue; | |
622 | unsigned long flags; | |
623 | int ret; | |
624 | ||
deb61742 | 625 | if (qid > NVMET_NR_QUEUES) |
c5343203 JS |
626 | return NULL; |
627 | ||
628 | queue = kzalloc((sizeof(*queue) + | |
629 | (sizeof(struct nvmet_fc_fcp_iod) * sqsize)), | |
630 | GFP_KERNEL); | |
631 | if (!queue) | |
632 | return NULL; | |
633 | ||
634 | if (!nvmet_fc_tgt_a_get(assoc)) | |
635 | goto out_free_queue; | |
636 | ||
637 | queue->work_q = alloc_workqueue("ntfc%d.%d.%d", 0, 0, | |
638 | assoc->tgtport->fc_target_port.port_num, | |
639 | assoc->a_id, qid); | |
640 | if (!queue->work_q) | |
641 | goto out_a_put; | |
642 | ||
643 | queue->fod = (struct nvmet_fc_fcp_iod *)&queue[1]; | |
644 | queue->qid = qid; | |
645 | queue->sqsize = sqsize; | |
646 | queue->assoc = assoc; | |
647 | queue->port = assoc->tgtport->port; | |
648 | queue->cpu = nvmet_fc_queue_to_cpu(assoc->tgtport, qid); | |
649 | INIT_LIST_HEAD(&queue->fod_list); | |
0fb228d3 JS |
650 | INIT_LIST_HEAD(&queue->avail_defer_list); |
651 | INIT_LIST_HEAD(&queue->pending_cmd_list); | |
c5343203 JS |
652 | atomic_set(&queue->connected, 0); |
653 | atomic_set(&queue->sqtail, 0); | |
654 | atomic_set(&queue->rsn, 1); | |
655 | atomic_set(&queue->zrspcnt, 0); | |
656 | spin_lock_init(&queue->qlock); | |
657 | kref_init(&queue->ref); | |
658 | ||
659 | nvmet_fc_prep_fcp_iodlist(assoc->tgtport, queue); | |
660 | ||
661 | ret = nvmet_sq_init(&queue->nvme_sq); | |
662 | if (ret) | |
663 | goto out_fail_iodlist; | |
664 | ||
665 | WARN_ON(assoc->queues[qid]); | |
666 | spin_lock_irqsave(&assoc->tgtport->lock, flags); | |
667 | assoc->queues[qid] = queue; | |
668 | spin_unlock_irqrestore(&assoc->tgtport->lock, flags); | |
669 | ||
670 | return queue; | |
671 | ||
672 | out_fail_iodlist: | |
673 | nvmet_fc_destroy_fcp_iodlist(assoc->tgtport, queue); | |
674 | destroy_workqueue(queue->work_q); | |
675 | out_a_put: | |
676 | nvmet_fc_tgt_a_put(assoc); | |
677 | out_free_queue: | |
678 | kfree(queue); | |
679 | return NULL; | |
680 | } | |
681 | ||
682 | ||
683 | static void | |
684 | nvmet_fc_tgt_queue_free(struct kref *ref) | |
685 | { | |
686 | struct nvmet_fc_tgt_queue *queue = | |
687 | container_of(ref, struct nvmet_fc_tgt_queue, ref); | |
688 | unsigned long flags; | |
689 | ||
690 | spin_lock_irqsave(&queue->assoc->tgtport->lock, flags); | |
691 | queue->assoc->queues[queue->qid] = NULL; | |
692 | spin_unlock_irqrestore(&queue->assoc->tgtport->lock, flags); | |
693 | ||
694 | nvmet_fc_destroy_fcp_iodlist(queue->assoc->tgtport, queue); | |
695 | ||
696 | nvmet_fc_tgt_a_put(queue->assoc); | |
697 | ||
698 | destroy_workqueue(queue->work_q); | |
699 | ||
700 | kfree(queue); | |
701 | } | |
702 | ||
703 | static void | |
704 | nvmet_fc_tgt_q_put(struct nvmet_fc_tgt_queue *queue) | |
705 | { | |
706 | kref_put(&queue->ref, nvmet_fc_tgt_queue_free); | |
707 | } | |
708 | ||
709 | static int | |
710 | nvmet_fc_tgt_q_get(struct nvmet_fc_tgt_queue *queue) | |
711 | { | |
712 | return kref_get_unless_zero(&queue->ref); | |
713 | } | |
714 | ||
715 | ||
c5343203 JS |
716 | static void |
717 | nvmet_fc_delete_target_queue(struct nvmet_fc_tgt_queue *queue) | |
718 | { | |
a97ec51b | 719 | struct nvmet_fc_tgtport *tgtport = queue->assoc->tgtport; |
c5343203 | 720 | struct nvmet_fc_fcp_iod *fod = queue->fod; |
16a5a480 | 721 | struct nvmet_fc_defer_fcp_req *deferfcp, *tempptr; |
c5343203 | 722 | unsigned long flags; |
a97ec51b | 723 | int i, writedataactive; |
c5343203 JS |
724 | bool disconnect; |
725 | ||
726 | disconnect = atomic_xchg(&queue->connected, 0); | |
727 | ||
728 | spin_lock_irqsave(&queue->qlock, flags); | |
729 | /* about outstanding io's */ | |
730 | for (i = 0; i < queue->sqsize; fod++, i++) { | |
731 | if (fod->active) { | |
732 | spin_lock(&fod->flock); | |
733 | fod->abort = true; | |
a97ec51b | 734 | writedataactive = fod->writedataactive; |
c5343203 | 735 | spin_unlock(&fod->flock); |
a97ec51b JS |
736 | /* |
737 | * only call lldd abort routine if waiting for | |
738 | * writedata. other outstanding ops should finish | |
739 | * on their own. | |
740 | */ | |
741 | if (writedataactive) { | |
742 | spin_lock(&fod->flock); | |
743 | fod->aborted = true; | |
744 | spin_unlock(&fod->flock); | |
745 | tgtport->ops->fcp_abort( | |
746 | &tgtport->fc_target_port, fod->fcpreq); | |
747 | } | |
c5343203 JS |
748 | } |
749 | } | |
0fb228d3 JS |
750 | |
751 | /* Cleanup defer'ed IOs in queue */ | |
16a5a480 JS |
752 | list_for_each_entry_safe(deferfcp, tempptr, &queue->avail_defer_list, |
753 | req_list) { | |
0fb228d3 JS |
754 | list_del(&deferfcp->req_list); |
755 | kfree(deferfcp); | |
756 | } | |
757 | ||
758 | for (;;) { | |
759 | deferfcp = list_first_entry_or_null(&queue->pending_cmd_list, | |
760 | struct nvmet_fc_defer_fcp_req, req_list); | |
761 | if (!deferfcp) | |
762 | break; | |
763 | ||
764 | list_del(&deferfcp->req_list); | |
765 | spin_unlock_irqrestore(&queue->qlock, flags); | |
766 | ||
767 | tgtport->ops->defer_rcv(&tgtport->fc_target_port, | |
768 | deferfcp->fcp_req); | |
769 | ||
770 | tgtport->ops->fcp_abort(&tgtport->fc_target_port, | |
771 | deferfcp->fcp_req); | |
772 | ||
773 | tgtport->ops->fcp_req_release(&tgtport->fc_target_port, | |
774 | deferfcp->fcp_req); | |
775 | ||
619c62dc JS |
776 | /* release the queue lookup reference */ |
777 | nvmet_fc_tgt_q_put(queue); | |
778 | ||
0fb228d3 JS |
779 | kfree(deferfcp); |
780 | ||
781 | spin_lock_irqsave(&queue->qlock, flags); | |
782 | } | |
c5343203 JS |
783 | spin_unlock_irqrestore(&queue->qlock, flags); |
784 | ||
785 | flush_workqueue(queue->work_q); | |
786 | ||
787 | if (disconnect) | |
788 | nvmet_sq_destroy(&queue->nvme_sq); | |
789 | ||
790 | nvmet_fc_tgt_q_put(queue); | |
791 | } | |
792 | ||
793 | static struct nvmet_fc_tgt_queue * | |
794 | nvmet_fc_find_target_queue(struct nvmet_fc_tgtport *tgtport, | |
795 | u64 connection_id) | |
796 | { | |
797 | struct nvmet_fc_tgt_assoc *assoc; | |
798 | struct nvmet_fc_tgt_queue *queue; | |
799 | u64 association_id = nvmet_fc_getassociationid(connection_id); | |
800 | u16 qid = nvmet_fc_getqueueid(connection_id); | |
801 | unsigned long flags; | |
802 | ||
0c319d3a JS |
803 | if (qid > NVMET_NR_QUEUES) |
804 | return NULL; | |
805 | ||
c5343203 JS |
806 | spin_lock_irqsave(&tgtport->lock, flags); |
807 | list_for_each_entry(assoc, &tgtport->assoc_list, a_list) { | |
808 | if (association_id == assoc->association_id) { | |
809 | queue = assoc->queues[qid]; | |
810 | if (queue && | |
811 | (!atomic_read(&queue->connected) || | |
812 | !nvmet_fc_tgt_q_get(queue))) | |
813 | queue = NULL; | |
814 | spin_unlock_irqrestore(&tgtport->lock, flags); | |
815 | return queue; | |
816 | } | |
817 | } | |
818 | spin_unlock_irqrestore(&tgtport->lock, flags); | |
819 | return NULL; | |
820 | } | |
821 | ||
a96d4bd8 JS |
822 | static void |
823 | nvmet_fc_delete_assoc(struct work_struct *work) | |
824 | { | |
825 | struct nvmet_fc_tgt_assoc *assoc = | |
826 | container_of(work, struct nvmet_fc_tgt_assoc, del_work); | |
827 | ||
828 | nvmet_fc_delete_target_assoc(assoc); | |
829 | nvmet_fc_tgt_a_put(assoc); | |
830 | } | |
831 | ||
c5343203 JS |
832 | static struct nvmet_fc_tgt_assoc * |
833 | nvmet_fc_alloc_target_assoc(struct nvmet_fc_tgtport *tgtport) | |
834 | { | |
835 | struct nvmet_fc_tgt_assoc *assoc, *tmpassoc; | |
836 | unsigned long flags; | |
837 | u64 ran; | |
838 | int idx; | |
839 | bool needrandom = true; | |
840 | ||
841 | assoc = kzalloc(sizeof(*assoc), GFP_KERNEL); | |
842 | if (!assoc) | |
843 | return NULL; | |
844 | ||
845 | idx = ida_simple_get(&tgtport->assoc_cnt, 0, 0, GFP_KERNEL); | |
846 | if (idx < 0) | |
847 | goto out_free_assoc; | |
848 | ||
849 | if (!nvmet_fc_tgtport_get(tgtport)) | |
850 | goto out_ida_put; | |
851 | ||
852 | assoc->tgtport = tgtport; | |
853 | assoc->a_id = idx; | |
854 | INIT_LIST_HEAD(&assoc->a_list); | |
855 | kref_init(&assoc->ref); | |
a96d4bd8 | 856 | INIT_WORK(&assoc->del_work, nvmet_fc_delete_assoc); |
c5343203 JS |
857 | |
858 | while (needrandom) { | |
859 | get_random_bytes(&ran, sizeof(ran) - BYTES_FOR_QID); | |
860 | ran = ran << BYTES_FOR_QID_SHIFT; | |
861 | ||
862 | spin_lock_irqsave(&tgtport->lock, flags); | |
863 | needrandom = false; | |
864 | list_for_each_entry(tmpassoc, &tgtport->assoc_list, a_list) | |
865 | if (ran == tmpassoc->association_id) { | |
866 | needrandom = true; | |
867 | break; | |
868 | } | |
869 | if (!needrandom) { | |
870 | assoc->association_id = ran; | |
871 | list_add_tail(&assoc->a_list, &tgtport->assoc_list); | |
872 | } | |
873 | spin_unlock_irqrestore(&tgtport->lock, flags); | |
874 | } | |
875 | ||
876 | return assoc; | |
877 | ||
878 | out_ida_put: | |
879 | ida_simple_remove(&tgtport->assoc_cnt, idx); | |
880 | out_free_assoc: | |
881 | kfree(assoc); | |
882 | return NULL; | |
883 | } | |
884 | ||
885 | static void | |
886 | nvmet_fc_target_assoc_free(struct kref *ref) | |
887 | { | |
888 | struct nvmet_fc_tgt_assoc *assoc = | |
889 | container_of(ref, struct nvmet_fc_tgt_assoc, ref); | |
890 | struct nvmet_fc_tgtport *tgtport = assoc->tgtport; | |
891 | unsigned long flags; | |
892 | ||
893 | spin_lock_irqsave(&tgtport->lock, flags); | |
894 | list_del(&assoc->a_list); | |
895 | spin_unlock_irqrestore(&tgtport->lock, flags); | |
896 | ida_simple_remove(&tgtport->assoc_cnt, assoc->a_id); | |
897 | kfree(assoc); | |
898 | nvmet_fc_tgtport_put(tgtport); | |
899 | } | |
900 | ||
901 | static void | |
902 | nvmet_fc_tgt_a_put(struct nvmet_fc_tgt_assoc *assoc) | |
903 | { | |
904 | kref_put(&assoc->ref, nvmet_fc_target_assoc_free); | |
905 | } | |
906 | ||
907 | static int | |
908 | nvmet_fc_tgt_a_get(struct nvmet_fc_tgt_assoc *assoc) | |
909 | { | |
910 | return kref_get_unless_zero(&assoc->ref); | |
911 | } | |
912 | ||
913 | static void | |
914 | nvmet_fc_delete_target_assoc(struct nvmet_fc_tgt_assoc *assoc) | |
915 | { | |
916 | struct nvmet_fc_tgtport *tgtport = assoc->tgtport; | |
917 | struct nvmet_fc_tgt_queue *queue; | |
918 | unsigned long flags; | |
919 | int i; | |
920 | ||
921 | spin_lock_irqsave(&tgtport->lock, flags); | |
deb61742 | 922 | for (i = NVMET_NR_QUEUES; i >= 0; i--) { |
c5343203 JS |
923 | queue = assoc->queues[i]; |
924 | if (queue) { | |
925 | if (!nvmet_fc_tgt_q_get(queue)) | |
926 | continue; | |
927 | spin_unlock_irqrestore(&tgtport->lock, flags); | |
928 | nvmet_fc_delete_target_queue(queue); | |
929 | nvmet_fc_tgt_q_put(queue); | |
930 | spin_lock_irqsave(&tgtport->lock, flags); | |
931 | } | |
932 | } | |
933 | spin_unlock_irqrestore(&tgtport->lock, flags); | |
934 | ||
935 | nvmet_fc_tgt_a_put(assoc); | |
936 | } | |
937 | ||
938 | static struct nvmet_fc_tgt_assoc * | |
939 | nvmet_fc_find_target_assoc(struct nvmet_fc_tgtport *tgtport, | |
940 | u64 association_id) | |
941 | { | |
942 | struct nvmet_fc_tgt_assoc *assoc; | |
943 | struct nvmet_fc_tgt_assoc *ret = NULL; | |
944 | unsigned long flags; | |
945 | ||
946 | spin_lock_irqsave(&tgtport->lock, flags); | |
947 | list_for_each_entry(assoc, &tgtport->assoc_list, a_list) { | |
948 | if (association_id == assoc->association_id) { | |
949 | ret = assoc; | |
950 | nvmet_fc_tgt_a_get(assoc); | |
951 | break; | |
952 | } | |
953 | } | |
954 | spin_unlock_irqrestore(&tgtport->lock, flags); | |
955 | ||
956 | return ret; | |
957 | } | |
958 | ||
959 | ||
960 | /** | |
961 | * nvme_fc_register_targetport - transport entry point called by an | |
962 | * LLDD to register the existence of a local | |
963 | * NVME subystem FC port. | |
964 | * @pinfo: pointer to information about the port to be registered | |
965 | * @template: LLDD entrypoints and operational parameters for the port | |
966 | * @dev: physical hardware device node port corresponds to. Will be | |
967 | * used for DMA mappings | |
968 | * @portptr: pointer to a local port pointer. Upon success, the routine | |
969 | * will allocate a nvme_fc_local_port structure and place its | |
970 | * address in the local port pointer. Upon failure, local port | |
971 | * pointer will be set to NULL. | |
972 | * | |
973 | * Returns: | |
974 | * a completion status. Must be 0 upon success; a negative errno | |
975 | * (ex: -ENXIO) upon failure. | |
976 | */ | |
977 | int | |
978 | nvmet_fc_register_targetport(struct nvmet_fc_port_info *pinfo, | |
979 | struct nvmet_fc_target_template *template, | |
980 | struct device *dev, | |
981 | struct nvmet_fc_target_port **portptr) | |
982 | { | |
983 | struct nvmet_fc_tgtport *newrec; | |
984 | unsigned long flags; | |
985 | int ret, idx; | |
986 | ||
987 | if (!template->xmt_ls_rsp || !template->fcp_op || | |
a97ec51b | 988 | !template->fcp_abort || |
19b58d94 | 989 | !template->fcp_req_release || !template->targetport_delete || |
c5343203 JS |
990 | !template->max_hw_queues || !template->max_sgl_segments || |
991 | !template->max_dif_sgl_segments || !template->dma_boundary) { | |
992 | ret = -EINVAL; | |
993 | goto out_regtgt_failed; | |
994 | } | |
995 | ||
996 | newrec = kzalloc((sizeof(*newrec) + template->target_priv_sz), | |
997 | GFP_KERNEL); | |
998 | if (!newrec) { | |
999 | ret = -ENOMEM; | |
1000 | goto out_regtgt_failed; | |
1001 | } | |
1002 | ||
1003 | idx = ida_simple_get(&nvmet_fc_tgtport_cnt, 0, 0, GFP_KERNEL); | |
1004 | if (idx < 0) { | |
1005 | ret = -ENOSPC; | |
1006 | goto out_fail_kfree; | |
1007 | } | |
1008 | ||
1009 | if (!get_device(dev) && dev) { | |
1010 | ret = -ENODEV; | |
1011 | goto out_ida_put; | |
1012 | } | |
1013 | ||
1014 | newrec->fc_target_port.node_name = pinfo->node_name; | |
1015 | newrec->fc_target_port.port_name = pinfo->port_name; | |
1016 | newrec->fc_target_port.private = &newrec[1]; | |
1017 | newrec->fc_target_port.port_id = pinfo->port_id; | |
1018 | newrec->fc_target_port.port_num = idx; | |
1019 | INIT_LIST_HEAD(&newrec->tgt_list); | |
1020 | newrec->dev = dev; | |
1021 | newrec->ops = template; | |
1022 | spin_lock_init(&newrec->lock); | |
1023 | INIT_LIST_HEAD(&newrec->ls_list); | |
1024 | INIT_LIST_HEAD(&newrec->ls_busylist); | |
1025 | INIT_LIST_HEAD(&newrec->assoc_list); | |
1026 | kref_init(&newrec->ref); | |
1027 | ida_init(&newrec->assoc_cnt); | |
48fa362b JS |
1028 | newrec->max_sg_cnt = min_t(u32, NVMET_FC_MAX_XFR_SGENTS, |
1029 | template->max_sgl_segments); | |
c5343203 JS |
1030 | |
1031 | ret = nvmet_fc_alloc_ls_iodlist(newrec); | |
1032 | if (ret) { | |
1033 | ret = -ENOMEM; | |
1034 | goto out_free_newrec; | |
1035 | } | |
1036 | ||
1037 | spin_lock_irqsave(&nvmet_fc_tgtlock, flags); | |
1038 | list_add_tail(&newrec->tgt_list, &nvmet_fc_target_list); | |
1039 | spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags); | |
1040 | ||
1041 | *portptr = &newrec->fc_target_port; | |
1042 | return 0; | |
1043 | ||
1044 | out_free_newrec: | |
1045 | put_device(dev); | |
1046 | out_ida_put: | |
1047 | ida_simple_remove(&nvmet_fc_tgtport_cnt, idx); | |
1048 | out_fail_kfree: | |
1049 | kfree(newrec); | |
1050 | out_regtgt_failed: | |
1051 | *portptr = NULL; | |
1052 | return ret; | |
1053 | } | |
1054 | EXPORT_SYMBOL_GPL(nvmet_fc_register_targetport); | |
1055 | ||
1056 | ||
1057 | static void | |
1058 | nvmet_fc_free_tgtport(struct kref *ref) | |
1059 | { | |
1060 | struct nvmet_fc_tgtport *tgtport = | |
1061 | container_of(ref, struct nvmet_fc_tgtport, ref); | |
1062 | struct device *dev = tgtport->dev; | |
1063 | unsigned long flags; | |
1064 | ||
1065 | spin_lock_irqsave(&nvmet_fc_tgtlock, flags); | |
1066 | list_del(&tgtport->tgt_list); | |
1067 | spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags); | |
1068 | ||
1069 | nvmet_fc_free_ls_iodlist(tgtport); | |
1070 | ||
1071 | /* let the LLDD know we've finished tearing it down */ | |
1072 | tgtport->ops->targetport_delete(&tgtport->fc_target_port); | |
1073 | ||
1074 | ida_simple_remove(&nvmet_fc_tgtport_cnt, | |
1075 | tgtport->fc_target_port.port_num); | |
1076 | ||
1077 | ida_destroy(&tgtport->assoc_cnt); | |
1078 | ||
1079 | kfree(tgtport); | |
1080 | ||
1081 | put_device(dev); | |
1082 | } | |
1083 | ||
1084 | static void | |
1085 | nvmet_fc_tgtport_put(struct nvmet_fc_tgtport *tgtport) | |
1086 | { | |
1087 | kref_put(&tgtport->ref, nvmet_fc_free_tgtport); | |
1088 | } | |
1089 | ||
1090 | static int | |
1091 | nvmet_fc_tgtport_get(struct nvmet_fc_tgtport *tgtport) | |
1092 | { | |
1093 | return kref_get_unless_zero(&tgtport->ref); | |
1094 | } | |
1095 | ||
1096 | static void | |
1097 | __nvmet_fc_free_assocs(struct nvmet_fc_tgtport *tgtport) | |
1098 | { | |
1099 | struct nvmet_fc_tgt_assoc *assoc, *next; | |
1100 | unsigned long flags; | |
1101 | ||
1102 | spin_lock_irqsave(&tgtport->lock, flags); | |
1103 | list_for_each_entry_safe(assoc, next, | |
1104 | &tgtport->assoc_list, a_list) { | |
1105 | if (!nvmet_fc_tgt_a_get(assoc)) | |
1106 | continue; | |
1107 | spin_unlock_irqrestore(&tgtport->lock, flags); | |
1108 | nvmet_fc_delete_target_assoc(assoc); | |
1109 | nvmet_fc_tgt_a_put(assoc); | |
1110 | spin_lock_irqsave(&tgtport->lock, flags); | |
1111 | } | |
1112 | spin_unlock_irqrestore(&tgtport->lock, flags); | |
1113 | } | |
1114 | ||
1115 | /* | |
1116 | * nvmet layer has called to terminate an association | |
1117 | */ | |
1118 | static void | |
1119 | nvmet_fc_delete_ctrl(struct nvmet_ctrl *ctrl) | |
1120 | { | |
1121 | struct nvmet_fc_tgtport *tgtport, *next; | |
1122 | struct nvmet_fc_tgt_assoc *assoc; | |
1123 | struct nvmet_fc_tgt_queue *queue; | |
1124 | unsigned long flags; | |
1125 | bool found_ctrl = false; | |
1126 | ||
1127 | /* this is a bit ugly, but don't want to make locks layered */ | |
1128 | spin_lock_irqsave(&nvmet_fc_tgtlock, flags); | |
1129 | list_for_each_entry_safe(tgtport, next, &nvmet_fc_target_list, | |
1130 | tgt_list) { | |
1131 | if (!nvmet_fc_tgtport_get(tgtport)) | |
1132 | continue; | |
1133 | spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags); | |
1134 | ||
1135 | spin_lock_irqsave(&tgtport->lock, flags); | |
1136 | list_for_each_entry(assoc, &tgtport->assoc_list, a_list) { | |
1137 | queue = assoc->queues[0]; | |
1138 | if (queue && queue->nvme_sq.ctrl == ctrl) { | |
1139 | if (nvmet_fc_tgt_a_get(assoc)) | |
1140 | found_ctrl = true; | |
1141 | break; | |
1142 | } | |
1143 | } | |
1144 | spin_unlock_irqrestore(&tgtport->lock, flags); | |
1145 | ||
1146 | nvmet_fc_tgtport_put(tgtport); | |
1147 | ||
1148 | if (found_ctrl) { | |
a96d4bd8 | 1149 | schedule_work(&assoc->del_work); |
c5343203 JS |
1150 | return; |
1151 | } | |
1152 | ||
1153 | spin_lock_irqsave(&nvmet_fc_tgtlock, flags); | |
1154 | } | |
1155 | spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags); | |
1156 | } | |
1157 | ||
1158 | /** | |
1159 | * nvme_fc_unregister_targetport - transport entry point called by an | |
1160 | * LLDD to deregister/remove a previously | |
1161 | * registered a local NVME subsystem FC port. | |
1162 | * @tgtport: pointer to the (registered) target port that is to be | |
1163 | * deregistered. | |
1164 | * | |
1165 | * Returns: | |
1166 | * a completion status. Must be 0 upon success; a negative errno | |
1167 | * (ex: -ENXIO) upon failure. | |
1168 | */ | |
1169 | int | |
1170 | nvmet_fc_unregister_targetport(struct nvmet_fc_target_port *target_port) | |
1171 | { | |
1172 | struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port); | |
1173 | ||
1174 | /* terminate any outstanding associations */ | |
1175 | __nvmet_fc_free_assocs(tgtport); | |
1176 | ||
1177 | nvmet_fc_tgtport_put(tgtport); | |
1178 | ||
1179 | return 0; | |
1180 | } | |
1181 | EXPORT_SYMBOL_GPL(nvmet_fc_unregister_targetport); | |
1182 | ||
1183 | ||
1184 | /* *********************** FC-NVME LS Handling **************************** */ | |
1185 | ||
1186 | ||
1187 | static void | |
3f5e1188 | 1188 | nvmet_fc_format_rsp_hdr(void *buf, u8 ls_cmd, __be32 desc_len, u8 rqst_ls_cmd) |
c5343203 JS |
1189 | { |
1190 | struct fcnvme_ls_acc_hdr *acc = buf; | |
1191 | ||
1192 | acc->w0.ls_cmd = ls_cmd; | |
1193 | acc->desc_list_len = desc_len; | |
1194 | acc->rqst.desc_tag = cpu_to_be32(FCNVME_LSDESC_RQST); | |
1195 | acc->rqst.desc_len = | |
1196 | fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rqst)); | |
1197 | acc->rqst.w0.ls_cmd = rqst_ls_cmd; | |
1198 | } | |
1199 | ||
1200 | static int | |
1201 | nvmet_fc_format_rjt(void *buf, u16 buflen, u8 ls_cmd, | |
1202 | u8 reason, u8 explanation, u8 vendor) | |
1203 | { | |
1204 | struct fcnvme_ls_rjt *rjt = buf; | |
1205 | ||
1206 | nvmet_fc_format_rsp_hdr(buf, FCNVME_LSDESC_RQST, | |
1207 | fcnvme_lsdesc_len(sizeof(struct fcnvme_ls_rjt)), | |
1208 | ls_cmd); | |
1209 | rjt->rjt.desc_tag = cpu_to_be32(FCNVME_LSDESC_RJT); | |
1210 | rjt->rjt.desc_len = fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rjt)); | |
1211 | rjt->rjt.reason_code = reason; | |
1212 | rjt->rjt.reason_explanation = explanation; | |
1213 | rjt->rjt.vendor = vendor; | |
1214 | ||
1215 | return sizeof(struct fcnvme_ls_rjt); | |
1216 | } | |
1217 | ||
1218 | /* Validation Error indexes into the string table below */ | |
1219 | enum { | |
1220 | VERR_NO_ERROR = 0, | |
1221 | VERR_CR_ASSOC_LEN = 1, | |
1222 | VERR_CR_ASSOC_RQST_LEN = 2, | |
1223 | VERR_CR_ASSOC_CMD = 3, | |
1224 | VERR_CR_ASSOC_CMD_LEN = 4, | |
1225 | VERR_ERSP_RATIO = 5, | |
1226 | VERR_ASSOC_ALLOC_FAIL = 6, | |
1227 | VERR_QUEUE_ALLOC_FAIL = 7, | |
1228 | VERR_CR_CONN_LEN = 8, | |
1229 | VERR_CR_CONN_RQST_LEN = 9, | |
1230 | VERR_ASSOC_ID = 10, | |
1231 | VERR_ASSOC_ID_LEN = 11, | |
1232 | VERR_NO_ASSOC = 12, | |
1233 | VERR_CONN_ID = 13, | |
1234 | VERR_CONN_ID_LEN = 14, | |
1235 | VERR_NO_CONN = 15, | |
1236 | VERR_CR_CONN_CMD = 16, | |
1237 | VERR_CR_CONN_CMD_LEN = 17, | |
1238 | VERR_DISCONN_LEN = 18, | |
1239 | VERR_DISCONN_RQST_LEN = 19, | |
1240 | VERR_DISCONN_CMD = 20, | |
1241 | VERR_DISCONN_CMD_LEN = 21, | |
1242 | VERR_DISCONN_SCOPE = 22, | |
1243 | VERR_RS_LEN = 23, | |
1244 | VERR_RS_RQST_LEN = 24, | |
1245 | VERR_RS_CMD = 25, | |
1246 | VERR_RS_CMD_LEN = 26, | |
1247 | VERR_RS_RCTL = 27, | |
1248 | VERR_RS_RO = 28, | |
1249 | }; | |
1250 | ||
1251 | static char *validation_errors[] = { | |
1252 | "OK", | |
1253 | "Bad CR_ASSOC Length", | |
1254 | "Bad CR_ASSOC Rqst Length", | |
1255 | "Not CR_ASSOC Cmd", | |
1256 | "Bad CR_ASSOC Cmd Length", | |
1257 | "Bad Ersp Ratio", | |
1258 | "Association Allocation Failed", | |
1259 | "Queue Allocation Failed", | |
1260 | "Bad CR_CONN Length", | |
1261 | "Bad CR_CONN Rqst Length", | |
1262 | "Not Association ID", | |
1263 | "Bad Association ID Length", | |
1264 | "No Association", | |
1265 | "Not Connection ID", | |
1266 | "Bad Connection ID Length", | |
1267 | "No Connection", | |
1268 | "Not CR_CONN Cmd", | |
1269 | "Bad CR_CONN Cmd Length", | |
1270 | "Bad DISCONN Length", | |
1271 | "Bad DISCONN Rqst Length", | |
1272 | "Not DISCONN Cmd", | |
1273 | "Bad DISCONN Cmd Length", | |
1274 | "Bad Disconnect Scope", | |
1275 | "Bad RS Length", | |
1276 | "Bad RS Rqst Length", | |
1277 | "Not RS Cmd", | |
1278 | "Bad RS Cmd Length", | |
1279 | "Bad RS R_CTL", | |
1280 | "Bad RS Relative Offset", | |
1281 | }; | |
1282 | ||
1283 | static void | |
1284 | nvmet_fc_ls_create_association(struct nvmet_fc_tgtport *tgtport, | |
1285 | struct nvmet_fc_ls_iod *iod) | |
1286 | { | |
1287 | struct fcnvme_ls_cr_assoc_rqst *rqst = | |
1288 | (struct fcnvme_ls_cr_assoc_rqst *)iod->rqstbuf; | |
1289 | struct fcnvme_ls_cr_assoc_acc *acc = | |
1290 | (struct fcnvme_ls_cr_assoc_acc *)iod->rspbuf; | |
1291 | struct nvmet_fc_tgt_queue *queue; | |
1292 | int ret = 0; | |
1293 | ||
1294 | memset(acc, 0, sizeof(*acc)); | |
1295 | ||
4cb7ca80 JS |
1296 | /* |
1297 | * FC-NVME spec changes. There are initiators sending different | |
1298 | * lengths as padding sizes for Create Association Cmd descriptor | |
1299 | * was incorrect. | |
1300 | * Accept anything of "minimum" length. Assume format per 1.15 | |
1301 | * spec (with HOSTID reduced to 16 bytes), ignore how long the | |
1302 | * trailing pad length is. | |
1303 | */ | |
1304 | if (iod->rqstdatalen < FCNVME_LSDESC_CRA_RQST_MINLEN) | |
c5343203 | 1305 | ret = VERR_CR_ASSOC_LEN; |
7722ecdc CH |
1306 | else if (be32_to_cpu(rqst->desc_list_len) < |
1307 | FCNVME_LSDESC_CRA_RQST_MIN_LISTLEN) | |
c5343203 JS |
1308 | ret = VERR_CR_ASSOC_RQST_LEN; |
1309 | else if (rqst->assoc_cmd.desc_tag != | |
1310 | cpu_to_be32(FCNVME_LSDESC_CREATE_ASSOC_CMD)) | |
1311 | ret = VERR_CR_ASSOC_CMD; | |
7722ecdc CH |
1312 | else if (be32_to_cpu(rqst->assoc_cmd.desc_len) < |
1313 | FCNVME_LSDESC_CRA_CMD_DESC_MIN_DESCLEN) | |
c5343203 JS |
1314 | ret = VERR_CR_ASSOC_CMD_LEN; |
1315 | else if (!rqst->assoc_cmd.ersp_ratio || | |
1316 | (be16_to_cpu(rqst->assoc_cmd.ersp_ratio) >= | |
1317 | be16_to_cpu(rqst->assoc_cmd.sqsize))) | |
1318 | ret = VERR_ERSP_RATIO; | |
1319 | ||
1320 | else { | |
1321 | /* new association w/ admin queue */ | |
1322 | iod->assoc = nvmet_fc_alloc_target_assoc(tgtport); | |
1323 | if (!iod->assoc) | |
1324 | ret = VERR_ASSOC_ALLOC_FAIL; | |
1325 | else { | |
1326 | queue = nvmet_fc_alloc_target_queue(iod->assoc, 0, | |
1327 | be16_to_cpu(rqst->assoc_cmd.sqsize)); | |
1328 | if (!queue) | |
1329 | ret = VERR_QUEUE_ALLOC_FAIL; | |
1330 | } | |
1331 | } | |
1332 | ||
1333 | if (ret) { | |
1334 | dev_err(tgtport->dev, | |
1335 | "Create Association LS failed: %s\n", | |
1336 | validation_errors[ret]); | |
1337 | iod->lsreq->rsplen = nvmet_fc_format_rjt(acc, | |
1338 | NVME_FC_MAX_LS_BUFFER_SIZE, rqst->w0.ls_cmd, | |
4083aa98 JS |
1339 | FCNVME_RJT_RC_LOGIC, |
1340 | FCNVME_RJT_EXP_NONE, 0); | |
c5343203 JS |
1341 | return; |
1342 | } | |
1343 | ||
1344 | queue->ersp_ratio = be16_to_cpu(rqst->assoc_cmd.ersp_ratio); | |
1345 | atomic_set(&queue->connected, 1); | |
1346 | queue->sqhd = 0; /* best place to init value */ | |
1347 | ||
1348 | /* format a response */ | |
1349 | ||
1350 | iod->lsreq->rsplen = sizeof(*acc); | |
1351 | ||
1352 | nvmet_fc_format_rsp_hdr(acc, FCNVME_LS_ACC, | |
1353 | fcnvme_lsdesc_len( | |
1354 | sizeof(struct fcnvme_ls_cr_assoc_acc)), | |
1355 | FCNVME_LS_CREATE_ASSOCIATION); | |
1356 | acc->associd.desc_tag = cpu_to_be32(FCNVME_LSDESC_ASSOC_ID); | |
1357 | acc->associd.desc_len = | |
1358 | fcnvme_lsdesc_len( | |
1359 | sizeof(struct fcnvme_lsdesc_assoc_id)); | |
1360 | acc->associd.association_id = | |
1361 | cpu_to_be64(nvmet_fc_makeconnid(iod->assoc, 0)); | |
1362 | acc->connectid.desc_tag = cpu_to_be32(FCNVME_LSDESC_CONN_ID); | |
1363 | acc->connectid.desc_len = | |
1364 | fcnvme_lsdesc_len( | |
1365 | sizeof(struct fcnvme_lsdesc_conn_id)); | |
1366 | acc->connectid.connection_id = acc->associd.association_id; | |
1367 | } | |
1368 | ||
1369 | static void | |
1370 | nvmet_fc_ls_create_connection(struct nvmet_fc_tgtport *tgtport, | |
1371 | struct nvmet_fc_ls_iod *iod) | |
1372 | { | |
1373 | struct fcnvme_ls_cr_conn_rqst *rqst = | |
1374 | (struct fcnvme_ls_cr_conn_rqst *)iod->rqstbuf; | |
1375 | struct fcnvme_ls_cr_conn_acc *acc = | |
1376 | (struct fcnvme_ls_cr_conn_acc *)iod->rspbuf; | |
1377 | struct nvmet_fc_tgt_queue *queue; | |
1378 | int ret = 0; | |
1379 | ||
1380 | memset(acc, 0, sizeof(*acc)); | |
1381 | ||
1382 | if (iod->rqstdatalen < sizeof(struct fcnvme_ls_cr_conn_rqst)) | |
1383 | ret = VERR_CR_CONN_LEN; | |
1384 | else if (rqst->desc_list_len != | |
1385 | fcnvme_lsdesc_len( | |
1386 | sizeof(struct fcnvme_ls_cr_conn_rqst))) | |
1387 | ret = VERR_CR_CONN_RQST_LEN; | |
1388 | else if (rqst->associd.desc_tag != cpu_to_be32(FCNVME_LSDESC_ASSOC_ID)) | |
1389 | ret = VERR_ASSOC_ID; | |
1390 | else if (rqst->associd.desc_len != | |
1391 | fcnvme_lsdesc_len( | |
1392 | sizeof(struct fcnvme_lsdesc_assoc_id))) | |
1393 | ret = VERR_ASSOC_ID_LEN; | |
1394 | else if (rqst->connect_cmd.desc_tag != | |
1395 | cpu_to_be32(FCNVME_LSDESC_CREATE_CONN_CMD)) | |
1396 | ret = VERR_CR_CONN_CMD; | |
1397 | else if (rqst->connect_cmd.desc_len != | |
1398 | fcnvme_lsdesc_len( | |
1399 | sizeof(struct fcnvme_lsdesc_cr_conn_cmd))) | |
1400 | ret = VERR_CR_CONN_CMD_LEN; | |
1401 | else if (!rqst->connect_cmd.ersp_ratio || | |
1402 | (be16_to_cpu(rqst->connect_cmd.ersp_ratio) >= | |
1403 | be16_to_cpu(rqst->connect_cmd.sqsize))) | |
1404 | ret = VERR_ERSP_RATIO; | |
1405 | ||
1406 | else { | |
1407 | /* new io queue */ | |
1408 | iod->assoc = nvmet_fc_find_target_assoc(tgtport, | |
1409 | be64_to_cpu(rqst->associd.association_id)); | |
1410 | if (!iod->assoc) | |
1411 | ret = VERR_NO_ASSOC; | |
1412 | else { | |
1413 | queue = nvmet_fc_alloc_target_queue(iod->assoc, | |
1414 | be16_to_cpu(rqst->connect_cmd.qid), | |
1415 | be16_to_cpu(rqst->connect_cmd.sqsize)); | |
1416 | if (!queue) | |
1417 | ret = VERR_QUEUE_ALLOC_FAIL; | |
1418 | ||
1419 | /* release get taken in nvmet_fc_find_target_assoc */ | |
1420 | nvmet_fc_tgt_a_put(iod->assoc); | |
1421 | } | |
1422 | } | |
1423 | ||
1424 | if (ret) { | |
1425 | dev_err(tgtport->dev, | |
1426 | "Create Connection LS failed: %s\n", | |
1427 | validation_errors[ret]); | |
1428 | iod->lsreq->rsplen = nvmet_fc_format_rjt(acc, | |
1429 | NVME_FC_MAX_LS_BUFFER_SIZE, rqst->w0.ls_cmd, | |
1430 | (ret == VERR_NO_ASSOC) ? | |
4083aa98 JS |
1431 | FCNVME_RJT_RC_INV_ASSOC : |
1432 | FCNVME_RJT_RC_LOGIC, | |
1433 | FCNVME_RJT_EXP_NONE, 0); | |
c5343203 JS |
1434 | return; |
1435 | } | |
1436 | ||
1437 | queue->ersp_ratio = be16_to_cpu(rqst->connect_cmd.ersp_ratio); | |
1438 | atomic_set(&queue->connected, 1); | |
1439 | queue->sqhd = 0; /* best place to init value */ | |
1440 | ||
1441 | /* format a response */ | |
1442 | ||
1443 | iod->lsreq->rsplen = sizeof(*acc); | |
1444 | ||
1445 | nvmet_fc_format_rsp_hdr(acc, FCNVME_LS_ACC, | |
1446 | fcnvme_lsdesc_len(sizeof(struct fcnvme_ls_cr_conn_acc)), | |
1447 | FCNVME_LS_CREATE_CONNECTION); | |
1448 | acc->connectid.desc_tag = cpu_to_be32(FCNVME_LSDESC_CONN_ID); | |
1449 | acc->connectid.desc_len = | |
1450 | fcnvme_lsdesc_len( | |
1451 | sizeof(struct fcnvme_lsdesc_conn_id)); | |
1452 | acc->connectid.connection_id = | |
1453 | cpu_to_be64(nvmet_fc_makeconnid(iod->assoc, | |
1454 | be16_to_cpu(rqst->connect_cmd.qid))); | |
1455 | } | |
1456 | ||
1457 | static void | |
1458 | nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport *tgtport, | |
1459 | struct nvmet_fc_ls_iod *iod) | |
1460 | { | |
1461 | struct fcnvme_ls_disconnect_rqst *rqst = | |
1462 | (struct fcnvme_ls_disconnect_rqst *)iod->rqstbuf; | |
1463 | struct fcnvme_ls_disconnect_acc *acc = | |
1464 | (struct fcnvme_ls_disconnect_acc *)iod->rspbuf; | |
c81e55e0 | 1465 | struct nvmet_fc_tgt_queue *queue = NULL; |
c5343203 JS |
1466 | struct nvmet_fc_tgt_assoc *assoc; |
1467 | int ret = 0; | |
1468 | bool del_assoc = false; | |
1469 | ||
1470 | memset(acc, 0, sizeof(*acc)); | |
1471 | ||
1472 | if (iod->rqstdatalen < sizeof(struct fcnvme_ls_disconnect_rqst)) | |
1473 | ret = VERR_DISCONN_LEN; | |
1474 | else if (rqst->desc_list_len != | |
1475 | fcnvme_lsdesc_len( | |
1476 | sizeof(struct fcnvme_ls_disconnect_rqst))) | |
1477 | ret = VERR_DISCONN_RQST_LEN; | |
1478 | else if (rqst->associd.desc_tag != cpu_to_be32(FCNVME_LSDESC_ASSOC_ID)) | |
1479 | ret = VERR_ASSOC_ID; | |
1480 | else if (rqst->associd.desc_len != | |
1481 | fcnvme_lsdesc_len( | |
1482 | sizeof(struct fcnvme_lsdesc_assoc_id))) | |
1483 | ret = VERR_ASSOC_ID_LEN; | |
1484 | else if (rqst->discon_cmd.desc_tag != | |
1485 | cpu_to_be32(FCNVME_LSDESC_DISCONN_CMD)) | |
1486 | ret = VERR_DISCONN_CMD; | |
1487 | else if (rqst->discon_cmd.desc_len != | |
1488 | fcnvme_lsdesc_len( | |
1489 | sizeof(struct fcnvme_lsdesc_disconn_cmd))) | |
1490 | ret = VERR_DISCONN_CMD_LEN; | |
1491 | else if ((rqst->discon_cmd.scope != FCNVME_DISCONN_ASSOCIATION) && | |
1492 | (rqst->discon_cmd.scope != FCNVME_DISCONN_CONNECTION)) | |
1493 | ret = VERR_DISCONN_SCOPE; | |
1494 | else { | |
1495 | /* match an active association */ | |
1496 | assoc = nvmet_fc_find_target_assoc(tgtport, | |
1497 | be64_to_cpu(rqst->associd.association_id)); | |
1498 | iod->assoc = assoc; | |
c81e55e0 JS |
1499 | if (assoc) { |
1500 | if (rqst->discon_cmd.scope == | |
1501 | FCNVME_DISCONN_CONNECTION) { | |
1502 | queue = nvmet_fc_find_target_queue(tgtport, | |
1503 | be64_to_cpu( | |
1504 | rqst->discon_cmd.id)); | |
1505 | if (!queue) { | |
1506 | nvmet_fc_tgt_a_put(assoc); | |
1507 | ret = VERR_NO_CONN; | |
1508 | } | |
1509 | } | |
1510 | } else | |
c5343203 JS |
1511 | ret = VERR_NO_ASSOC; |
1512 | } | |
1513 | ||
1514 | if (ret) { | |
1515 | dev_err(tgtport->dev, | |
1516 | "Disconnect LS failed: %s\n", | |
1517 | validation_errors[ret]); | |
1518 | iod->lsreq->rsplen = nvmet_fc_format_rjt(acc, | |
1519 | NVME_FC_MAX_LS_BUFFER_SIZE, rqst->w0.ls_cmd, | |
4083aa98 JS |
1520 | (ret == VERR_NO_ASSOC) ? |
1521 | FCNVME_RJT_RC_INV_ASSOC : | |
1522 | (ret == VERR_NO_CONN) ? | |
1523 | FCNVME_RJT_RC_INV_CONN : | |
1524 | FCNVME_RJT_RC_LOGIC, | |
1525 | FCNVME_RJT_EXP_NONE, 0); | |
c5343203 JS |
1526 | return; |
1527 | } | |
1528 | ||
1529 | /* format a response */ | |
1530 | ||
1531 | iod->lsreq->rsplen = sizeof(*acc); | |
1532 | ||
1533 | nvmet_fc_format_rsp_hdr(acc, FCNVME_LS_ACC, | |
1534 | fcnvme_lsdesc_len( | |
1535 | sizeof(struct fcnvme_ls_disconnect_acc)), | |
1536 | FCNVME_LS_DISCONNECT); | |
1537 | ||
1538 | ||
c81e55e0 JS |
1539 | /* are we to delete a Connection ID (queue) */ |
1540 | if (queue) { | |
1541 | int qid = queue->qid; | |
c5343203 | 1542 | |
c81e55e0 | 1543 | nvmet_fc_delete_target_queue(queue); |
c5343203 | 1544 | |
c81e55e0 JS |
1545 | /* release the get taken by find_target_queue */ |
1546 | nvmet_fc_tgt_q_put(queue); | |
c5343203 | 1547 | |
c81e55e0 JS |
1548 | /* tear association down if io queue terminated */ |
1549 | if (!qid) | |
1550 | del_assoc = true; | |
c5343203 JS |
1551 | } |
1552 | ||
1553 | /* release get taken in nvmet_fc_find_target_assoc */ | |
1554 | nvmet_fc_tgt_a_put(iod->assoc); | |
1555 | ||
1556 | if (del_assoc) | |
1557 | nvmet_fc_delete_target_assoc(iod->assoc); | |
1558 | } | |
1559 | ||
1560 | ||
1561 | /* *********************** NVME Ctrl Routines **************************** */ | |
1562 | ||
1563 | ||
1564 | static void nvmet_fc_fcp_nvme_cmd_done(struct nvmet_req *nvme_req); | |
1565 | ||
e929f06d | 1566 | static const struct nvmet_fabrics_ops nvmet_fc_tgt_fcp_ops; |
c5343203 JS |
1567 | |
1568 | static void | |
1569 | nvmet_fc_xmt_ls_rsp_done(struct nvmefc_tgt_ls_req *lsreq) | |
1570 | { | |
1571 | struct nvmet_fc_ls_iod *iod = lsreq->nvmet_fc_private; | |
1572 | struct nvmet_fc_tgtport *tgtport = iod->tgtport; | |
1573 | ||
1574 | fc_dma_sync_single_for_cpu(tgtport->dev, iod->rspdma, | |
1575 | NVME_FC_MAX_LS_BUFFER_SIZE, DMA_TO_DEVICE); | |
1576 | nvmet_fc_free_ls_iod(tgtport, iod); | |
1577 | nvmet_fc_tgtport_put(tgtport); | |
1578 | } | |
1579 | ||
1580 | static void | |
1581 | nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport *tgtport, | |
1582 | struct nvmet_fc_ls_iod *iod) | |
1583 | { | |
1584 | int ret; | |
1585 | ||
1586 | fc_dma_sync_single_for_device(tgtport->dev, iod->rspdma, | |
1587 | NVME_FC_MAX_LS_BUFFER_SIZE, DMA_TO_DEVICE); | |
1588 | ||
1589 | ret = tgtport->ops->xmt_ls_rsp(&tgtport->fc_target_port, iod->lsreq); | |
1590 | if (ret) | |
1591 | nvmet_fc_xmt_ls_rsp_done(iod->lsreq); | |
1592 | } | |
1593 | ||
1594 | /* | |
1595 | * Actual processing routine for received FC-NVME LS Requests from the LLD | |
1596 | */ | |
1597 | static void | |
1598 | nvmet_fc_handle_ls_rqst(struct nvmet_fc_tgtport *tgtport, | |
1599 | struct nvmet_fc_ls_iod *iod) | |
1600 | { | |
1601 | struct fcnvme_ls_rqst_w0 *w0 = | |
1602 | (struct fcnvme_ls_rqst_w0 *)iod->rqstbuf; | |
1603 | ||
1604 | iod->lsreq->nvmet_fc_private = iod; | |
1605 | iod->lsreq->rspbuf = iod->rspbuf; | |
1606 | iod->lsreq->rspdma = iod->rspdma; | |
1607 | iod->lsreq->done = nvmet_fc_xmt_ls_rsp_done; | |
1608 | /* Be preventative. handlers will later set to valid length */ | |
1609 | iod->lsreq->rsplen = 0; | |
1610 | ||
1611 | iod->assoc = NULL; | |
1612 | ||
1613 | /* | |
1614 | * handlers: | |
1615 | * parse request input, execute the request, and format the | |
1616 | * LS response | |
1617 | */ | |
1618 | switch (w0->ls_cmd) { | |
1619 | case FCNVME_LS_CREATE_ASSOCIATION: | |
1620 | /* Creates Association and initial Admin Queue/Connection */ | |
1621 | nvmet_fc_ls_create_association(tgtport, iod); | |
1622 | break; | |
1623 | case FCNVME_LS_CREATE_CONNECTION: | |
1624 | /* Creates an IO Queue/Connection */ | |
1625 | nvmet_fc_ls_create_connection(tgtport, iod); | |
1626 | break; | |
1627 | case FCNVME_LS_DISCONNECT: | |
1628 | /* Terminate a Queue/Connection or the Association */ | |
1629 | nvmet_fc_ls_disconnect(tgtport, iod); | |
1630 | break; | |
1631 | default: | |
1632 | iod->lsreq->rsplen = nvmet_fc_format_rjt(iod->rspbuf, | |
1633 | NVME_FC_MAX_LS_BUFFER_SIZE, w0->ls_cmd, | |
4083aa98 | 1634 | FCNVME_RJT_RC_INVAL, FCNVME_RJT_EXP_NONE, 0); |
c5343203 JS |
1635 | } |
1636 | ||
1637 | nvmet_fc_xmt_ls_rsp(tgtport, iod); | |
1638 | } | |
1639 | ||
1640 | /* | |
1641 | * Actual processing routine for received FC-NVME LS Requests from the LLD | |
1642 | */ | |
1643 | static void | |
1644 | nvmet_fc_handle_ls_rqst_work(struct work_struct *work) | |
1645 | { | |
1646 | struct nvmet_fc_ls_iod *iod = | |
1647 | container_of(work, struct nvmet_fc_ls_iod, work); | |
1648 | struct nvmet_fc_tgtport *tgtport = iod->tgtport; | |
1649 | ||
1650 | nvmet_fc_handle_ls_rqst(tgtport, iod); | |
1651 | } | |
1652 | ||
1653 | ||
1654 | /** | |
1655 | * nvmet_fc_rcv_ls_req - transport entry point called by an LLDD | |
1656 | * upon the reception of a NVME LS request. | |
1657 | * | |
1658 | * The nvmet-fc layer will copy payload to an internal structure for | |
1659 | * processing. As such, upon completion of the routine, the LLDD may | |
1660 | * immediately free/reuse the LS request buffer passed in the call. | |
1661 | * | |
1662 | * If this routine returns error, the LLDD should abort the exchange. | |
1663 | * | |
1664 | * @tgtport: pointer to the (registered) target port the LS was | |
1665 | * received on. | |
1666 | * @lsreq: pointer to a lsreq request structure to be used to reference | |
1667 | * the exchange corresponding to the LS. | |
1668 | * @lsreqbuf: pointer to the buffer containing the LS Request | |
1669 | * @lsreqbuf_len: length, in bytes, of the received LS request | |
1670 | */ | |
1671 | int | |
1672 | nvmet_fc_rcv_ls_req(struct nvmet_fc_target_port *target_port, | |
1673 | struct nvmefc_tgt_ls_req *lsreq, | |
1674 | void *lsreqbuf, u32 lsreqbuf_len) | |
1675 | { | |
1676 | struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port); | |
1677 | struct nvmet_fc_ls_iod *iod; | |
1678 | ||
1679 | if (lsreqbuf_len > NVME_FC_MAX_LS_BUFFER_SIZE) | |
1680 | return -E2BIG; | |
1681 | ||
1682 | if (!nvmet_fc_tgtport_get(tgtport)) | |
1683 | return -ESHUTDOWN; | |
1684 | ||
1685 | iod = nvmet_fc_alloc_ls_iod(tgtport); | |
1686 | if (!iod) { | |
1687 | nvmet_fc_tgtport_put(tgtport); | |
1688 | return -ENOENT; | |
1689 | } | |
1690 | ||
1691 | iod->lsreq = lsreq; | |
1692 | iod->fcpreq = NULL; | |
1693 | memcpy(iod->rqstbuf, lsreqbuf, lsreqbuf_len); | |
1694 | iod->rqstdatalen = lsreqbuf_len; | |
1695 | ||
1696 | schedule_work(&iod->work); | |
1697 | ||
1698 | return 0; | |
1699 | } | |
1700 | EXPORT_SYMBOL_GPL(nvmet_fc_rcv_ls_req); | |
1701 | ||
1702 | ||
1703 | /* | |
1704 | * ********************** | |
1705 | * Start of FCP handling | |
1706 | * ********************** | |
1707 | */ | |
1708 | ||
1709 | static int | |
1710 | nvmet_fc_alloc_tgt_pgs(struct nvmet_fc_fcp_iod *fod) | |
1711 | { | |
1712 | struct scatterlist *sg; | |
c5343203 | 1713 | unsigned int nent; |
c5343203 | 1714 | |
4442b56f | 1715 | sg = sgl_alloc(fod->req.transfer_len, GFP_KERNEL, &nent); |
c5343203 JS |
1716 | if (!sg) |
1717 | goto out; | |
1718 | ||
c5343203 JS |
1719 | fod->data_sg = sg; |
1720 | fod->data_sg_cnt = nent; | |
1721 | fod->data_sg_cnt = fc_dma_map_sg(fod->tgtport->dev, sg, nent, | |
1722 | ((fod->io_dir == NVMET_FCP_WRITE) ? | |
1723 | DMA_FROM_DEVICE : DMA_TO_DEVICE)); | |
1724 | /* note: write from initiator perspective */ | |
1725 | ||
1726 | return 0; | |
1727 | ||
c5343203 JS |
1728 | out: |
1729 | return NVME_SC_INTERNAL; | |
1730 | } | |
1731 | ||
1732 | static void | |
1733 | nvmet_fc_free_tgt_pgs(struct nvmet_fc_fcp_iod *fod) | |
1734 | { | |
c5343203 JS |
1735 | if (!fod->data_sg || !fod->data_sg_cnt) |
1736 | return; | |
1737 | ||
1738 | fc_dma_unmap_sg(fod->tgtport->dev, fod->data_sg, fod->data_sg_cnt, | |
1739 | ((fod->io_dir == NVMET_FCP_WRITE) ? | |
1740 | DMA_FROM_DEVICE : DMA_TO_DEVICE)); | |
4442b56f | 1741 | sgl_free(fod->data_sg); |
c820ad4c JS |
1742 | fod->data_sg = NULL; |
1743 | fod->data_sg_cnt = 0; | |
c5343203 JS |
1744 | } |
1745 | ||
1746 | ||
1747 | static bool | |
1748 | queue_90percent_full(struct nvmet_fc_tgt_queue *q, u32 sqhd) | |
1749 | { | |
1750 | u32 sqtail, used; | |
1751 | ||
1752 | /* egad, this is ugly. And sqtail is just a best guess */ | |
1753 | sqtail = atomic_read(&q->sqtail) % q->sqsize; | |
1754 | ||
1755 | used = (sqtail < sqhd) ? (sqtail + q->sqsize - sqhd) : (sqtail - sqhd); | |
1756 | return ((used * 10) >= (((u32)(q->sqsize - 1) * 9))); | |
1757 | } | |
1758 | ||
1759 | /* | |
1760 | * Prep RSP payload. | |
1761 | * May be a NVMET_FCOP_RSP or NVMET_FCOP_READDATA_RSP op | |
1762 | */ | |
1763 | static void | |
1764 | nvmet_fc_prep_fcp_rsp(struct nvmet_fc_tgtport *tgtport, | |
1765 | struct nvmet_fc_fcp_iod *fod) | |
1766 | { | |
1767 | struct nvme_fc_ersp_iu *ersp = &fod->rspiubuf; | |
1768 | struct nvme_common_command *sqe = &fod->cmdiubuf.sqe.common; | |
1769 | struct nvme_completion *cqe = &ersp->cqe; | |
1770 | u32 *cqewd = (u32 *)cqe; | |
1771 | bool send_ersp = false; | |
1772 | u32 rsn, rspcnt, xfr_length; | |
1773 | ||
1774 | if (fod->fcpreq->op == NVMET_FCOP_READDATA_RSP) | |
5e62d5c9 | 1775 | xfr_length = fod->req.transfer_len; |
c5343203 JS |
1776 | else |
1777 | xfr_length = fod->offset; | |
1778 | ||
1779 | /* | |
1780 | * check to see if we can send a 0's rsp. | |
1781 | * Note: to send a 0's response, the NVME-FC host transport will | |
1782 | * recreate the CQE. The host transport knows: sq id, SQHD (last | |
1783 | * seen in an ersp), and command_id. Thus it will create a | |
1784 | * zero-filled CQE with those known fields filled in. Transport | |
1785 | * must send an ersp for any condition where the cqe won't match | |
1786 | * this. | |
1787 | * | |
1788 | * Here are the FC-NVME mandated cases where we must send an ersp: | |
1789 | * every N responses, where N=ersp_ratio | |
1790 | * force fabric commands to send ersp's (not in FC-NVME but good | |
1791 | * practice) | |
1792 | * normal cmds: any time status is non-zero, or status is zero | |
1793 | * but words 0 or 1 are non-zero. | |
1794 | * the SQ is 90% or more full | |
1795 | * the cmd is a fused command | |
1796 | * transferred data length not equal to cmd iu length | |
1797 | */ | |
1798 | rspcnt = atomic_inc_return(&fod->queue->zrspcnt); | |
1799 | if (!(rspcnt % fod->queue->ersp_ratio) || | |
1800 | sqe->opcode == nvme_fabrics_command || | |
5e62d5c9 | 1801 | xfr_length != fod->req.transfer_len || |
c5343203 JS |
1802 | (le16_to_cpu(cqe->status) & 0xFFFE) || cqewd[0] || cqewd[1] || |
1803 | (sqe->flags & (NVME_CMD_FUSE_FIRST | NVME_CMD_FUSE_SECOND)) || | |
8ad76cf1 | 1804 | queue_90percent_full(fod->queue, le16_to_cpu(cqe->sq_head))) |
c5343203 JS |
1805 | send_ersp = true; |
1806 | ||
1807 | /* re-set the fields */ | |
1808 | fod->fcpreq->rspaddr = ersp; | |
1809 | fod->fcpreq->rspdma = fod->rspdma; | |
1810 | ||
1811 | if (!send_ersp) { | |
1812 | memset(ersp, 0, NVME_FC_SIZEOF_ZEROS_RSP); | |
1813 | fod->fcpreq->rsplen = NVME_FC_SIZEOF_ZEROS_RSP; | |
1814 | } else { | |
1815 | ersp->iu_len = cpu_to_be16(sizeof(*ersp)/sizeof(u32)); | |
1816 | rsn = atomic_inc_return(&fod->queue->rsn); | |
1817 | ersp->rsn = cpu_to_be32(rsn); | |
1818 | ersp->xfrd_len = cpu_to_be32(xfr_length); | |
1819 | fod->fcpreq->rsplen = sizeof(*ersp); | |
1820 | } | |
1821 | ||
1822 | fc_dma_sync_single_for_device(tgtport->dev, fod->rspdma, | |
1823 | sizeof(fod->rspiubuf), DMA_TO_DEVICE); | |
1824 | } | |
1825 | ||
1826 | static void nvmet_fc_xmt_fcp_op_done(struct nvmefc_tgt_fcp_req *fcpreq); | |
1827 | ||
a97ec51b JS |
1828 | static void |
1829 | nvmet_fc_abort_op(struct nvmet_fc_tgtport *tgtport, | |
1830 | struct nvmet_fc_fcp_iod *fod) | |
1831 | { | |
1832 | struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq; | |
1833 | ||
1834 | /* data no longer needed */ | |
1835 | nvmet_fc_free_tgt_pgs(fod); | |
1836 | ||
1837 | /* | |
1838 | * if an ABTS was received or we issued the fcp_abort early | |
1839 | * don't call abort routine again. | |
1840 | */ | |
1841 | /* no need to take lock - lock was taken earlier to get here */ | |
1842 | if (!fod->aborted) | |
1843 | tgtport->ops->fcp_abort(&tgtport->fc_target_port, fcpreq); | |
1844 | ||
1845 | nvmet_fc_free_fcp_iod(fod->queue, fod); | |
1846 | } | |
1847 | ||
c5343203 JS |
1848 | static void |
1849 | nvmet_fc_xmt_fcp_rsp(struct nvmet_fc_tgtport *tgtport, | |
1850 | struct nvmet_fc_fcp_iod *fod) | |
1851 | { | |
1852 | int ret; | |
1853 | ||
1854 | fod->fcpreq->op = NVMET_FCOP_RSP; | |
1855 | fod->fcpreq->timeout = 0; | |
1856 | ||
1857 | nvmet_fc_prep_fcp_rsp(tgtport, fod); | |
1858 | ||
1859 | ret = tgtport->ops->fcp_op(&tgtport->fc_target_port, fod->fcpreq); | |
1860 | if (ret) | |
a97ec51b | 1861 | nvmet_fc_abort_op(tgtport, fod); |
c5343203 JS |
1862 | } |
1863 | ||
1864 | static void | |
1865 | nvmet_fc_transfer_fcp_data(struct nvmet_fc_tgtport *tgtport, | |
1866 | struct nvmet_fc_fcp_iod *fod, u8 op) | |
1867 | { | |
1868 | struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq; | |
a97ec51b | 1869 | unsigned long flags; |
48fa362b | 1870 | u32 tlen; |
c5343203 JS |
1871 | int ret; |
1872 | ||
1873 | fcpreq->op = op; | |
1874 | fcpreq->offset = fod->offset; | |
1875 | fcpreq->timeout = NVME_FC_TGTOP_TIMEOUT_SEC; | |
48fa362b JS |
1876 | |
1877 | tlen = min_t(u32, tgtport->max_sg_cnt * PAGE_SIZE, | |
5e62d5c9 | 1878 | (fod->req.transfer_len - fod->offset)); |
c5343203 JS |
1879 | fcpreq->transfer_length = tlen; |
1880 | fcpreq->transferred_length = 0; | |
1881 | fcpreq->fcp_error = 0; | |
1882 | fcpreq->rsplen = 0; | |
1883 | ||
48fa362b JS |
1884 | fcpreq->sg = &fod->data_sg[fod->offset / PAGE_SIZE]; |
1885 | fcpreq->sg_cnt = DIV_ROUND_UP(tlen, PAGE_SIZE); | |
c5343203 JS |
1886 | |
1887 | /* | |
1888 | * If the last READDATA request: check if LLDD supports | |
1889 | * combined xfr with response. | |
1890 | */ | |
1891 | if ((op == NVMET_FCOP_READDATA) && | |
5e62d5c9 | 1892 | ((fod->offset + fcpreq->transfer_length) == fod->req.transfer_len) && |
c5343203 JS |
1893 | (tgtport->ops->target_features & NVMET_FCTGTFEAT_READDATA_RSP)) { |
1894 | fcpreq->op = NVMET_FCOP_READDATA_RSP; | |
1895 | nvmet_fc_prep_fcp_rsp(tgtport, fod); | |
1896 | } | |
1897 | ||
1898 | ret = tgtport->ops->fcp_op(&tgtport->fc_target_port, fod->fcpreq); | |
1899 | if (ret) { | |
1900 | /* | |
1901 | * should be ok to set w/o lock as its in the thread of | |
1902 | * execution (not an async timer routine) and doesn't | |
1903 | * contend with any clearing action | |
1904 | */ | |
1905 | fod->abort = true; | |
1906 | ||
a97ec51b JS |
1907 | if (op == NVMET_FCOP_WRITEDATA) { |
1908 | spin_lock_irqsave(&fod->flock, flags); | |
1909 | fod->writedataactive = false; | |
1910 | spin_unlock_irqrestore(&fod->flock, flags); | |
29b3d26e | 1911 | nvmet_req_complete(&fod->req, NVME_SC_INTERNAL); |
a97ec51b | 1912 | } else /* NVMET_FCOP_READDATA or NVMET_FCOP_READDATA_RSP */ { |
c5343203 JS |
1913 | fcpreq->fcp_error = ret; |
1914 | fcpreq->transferred_length = 0; | |
1915 | nvmet_fc_xmt_fcp_op_done(fod->fcpreq); | |
1916 | } | |
1917 | } | |
1918 | } | |
1919 | ||
a97ec51b JS |
1920 | static inline bool |
1921 | __nvmet_fc_fod_op_abort(struct nvmet_fc_fcp_iod *fod, bool abort) | |
1922 | { | |
1923 | struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq; | |
1924 | struct nvmet_fc_tgtport *tgtport = fod->tgtport; | |
1925 | ||
1926 | /* if in the middle of an io and we need to tear down */ | |
1927 | if (abort) { | |
1928 | if (fcpreq->op == NVMET_FCOP_WRITEDATA) { | |
29b3d26e | 1929 | nvmet_req_complete(&fod->req, NVME_SC_INTERNAL); |
a97ec51b JS |
1930 | return true; |
1931 | } | |
1932 | ||
1933 | nvmet_fc_abort_op(tgtport, fod); | |
1934 | return true; | |
1935 | } | |
1936 | ||
1937 | return false; | |
1938 | } | |
1939 | ||
39498fae JS |
1940 | /* |
1941 | * actual done handler for FCP operations when completed by the lldd | |
1942 | */ | |
c5343203 | 1943 | static void |
39498fae | 1944 | nvmet_fc_fod_op_done(struct nvmet_fc_fcp_iod *fod) |
c5343203 | 1945 | { |
39498fae | 1946 | struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq; |
c5343203 JS |
1947 | struct nvmet_fc_tgtport *tgtport = fod->tgtport; |
1948 | unsigned long flags; | |
1949 | bool abort; | |
1950 | ||
1951 | spin_lock_irqsave(&fod->flock, flags); | |
1952 | abort = fod->abort; | |
a97ec51b | 1953 | fod->writedataactive = false; |
c5343203 JS |
1954 | spin_unlock_irqrestore(&fod->flock, flags); |
1955 | ||
c5343203 JS |
1956 | switch (fcpreq->op) { |
1957 | ||
1958 | case NVMET_FCOP_WRITEDATA: | |
a97ec51b JS |
1959 | if (__nvmet_fc_fod_op_abort(fod, abort)) |
1960 | return; | |
f64935ab | 1961 | if (fcpreq->fcp_error || |
c5343203 | 1962 | fcpreq->transferred_length != fcpreq->transfer_length) { |
a97ec51b JS |
1963 | spin_lock(&fod->flock); |
1964 | fod->abort = true; | |
1965 | spin_unlock(&fod->flock); | |
1966 | ||
29b3d26e | 1967 | nvmet_req_complete(&fod->req, NVME_SC_INTERNAL); |
c5343203 JS |
1968 | return; |
1969 | } | |
1970 | ||
1971 | fod->offset += fcpreq->transferred_length; | |
5e62d5c9 | 1972 | if (fod->offset != fod->req.transfer_len) { |
a97ec51b JS |
1973 | spin_lock_irqsave(&fod->flock, flags); |
1974 | fod->writedataactive = true; | |
1975 | spin_unlock_irqrestore(&fod->flock, flags); | |
1976 | ||
c5343203 JS |
1977 | /* transfer the next chunk */ |
1978 | nvmet_fc_transfer_fcp_data(tgtport, fod, | |
1979 | NVMET_FCOP_WRITEDATA); | |
1980 | return; | |
1981 | } | |
1982 | ||
1983 | /* data transfer complete, resume with nvmet layer */ | |
5e62d5c9 | 1984 | nvmet_req_execute(&fod->req); |
c5343203 JS |
1985 | break; |
1986 | ||
1987 | case NVMET_FCOP_READDATA: | |
1988 | case NVMET_FCOP_READDATA_RSP: | |
a97ec51b JS |
1989 | if (__nvmet_fc_fod_op_abort(fod, abort)) |
1990 | return; | |
f64935ab | 1991 | if (fcpreq->fcp_error || |
c5343203 | 1992 | fcpreq->transferred_length != fcpreq->transfer_length) { |
a97ec51b | 1993 | nvmet_fc_abort_op(tgtport, fod); |
c5343203 JS |
1994 | return; |
1995 | } | |
1996 | ||
1997 | /* success */ | |
1998 | ||
1999 | if (fcpreq->op == NVMET_FCOP_READDATA_RSP) { | |
2000 | /* data no longer needed */ | |
2001 | nvmet_fc_free_tgt_pgs(fod); | |
c5343203 JS |
2002 | nvmet_fc_free_fcp_iod(fod->queue, fod); |
2003 | return; | |
2004 | } | |
2005 | ||
2006 | fod->offset += fcpreq->transferred_length; | |
5e62d5c9 | 2007 | if (fod->offset != fod->req.transfer_len) { |
c5343203 JS |
2008 | /* transfer the next chunk */ |
2009 | nvmet_fc_transfer_fcp_data(tgtport, fod, | |
2010 | NVMET_FCOP_READDATA); | |
2011 | return; | |
2012 | } | |
2013 | ||
2014 | /* data transfer complete, send response */ | |
2015 | ||
2016 | /* data no longer needed */ | |
2017 | nvmet_fc_free_tgt_pgs(fod); | |
2018 | ||
2019 | nvmet_fc_xmt_fcp_rsp(tgtport, fod); | |
2020 | ||
2021 | break; | |
2022 | ||
2023 | case NVMET_FCOP_RSP: | |
a97ec51b JS |
2024 | if (__nvmet_fc_fod_op_abort(fod, abort)) |
2025 | return; | |
c5343203 JS |
2026 | nvmet_fc_free_fcp_iod(fod->queue, fod); |
2027 | break; | |
2028 | ||
2029 | default: | |
c5343203 JS |
2030 | break; |
2031 | } | |
2032 | } | |
2033 | ||
39498fae JS |
2034 | static void |
2035 | nvmet_fc_fcp_rqst_op_done_work(struct work_struct *work) | |
2036 | { | |
2037 | struct nvmet_fc_fcp_iod *fod = | |
2038 | container_of(work, struct nvmet_fc_fcp_iod, done_work); | |
2039 | ||
2040 | nvmet_fc_fod_op_done(fod); | |
2041 | } | |
2042 | ||
2043 | static void | |
2044 | nvmet_fc_xmt_fcp_op_done(struct nvmefc_tgt_fcp_req *fcpreq) | |
2045 | { | |
2046 | struct nvmet_fc_fcp_iod *fod = fcpreq->nvmet_fc_private; | |
2047 | struct nvmet_fc_tgt_queue *queue = fod->queue; | |
2048 | ||
2049 | if (fod->tgtport->ops->target_features & NVMET_FCTGTFEAT_OPDONE_IN_ISR) | |
2050 | /* context switch so completion is not in ISR context */ | |
2051 | queue_work_on(queue->cpu, queue->work_q, &fod->done_work); | |
2052 | else | |
2053 | nvmet_fc_fod_op_done(fod); | |
2054 | } | |
2055 | ||
c5343203 JS |
2056 | /* |
2057 | * actual completion handler after execution by the nvmet layer | |
2058 | */ | |
2059 | static void | |
2060 | __nvmet_fc_fcp_nvme_cmd_done(struct nvmet_fc_tgtport *tgtport, | |
2061 | struct nvmet_fc_fcp_iod *fod, int status) | |
2062 | { | |
2063 | struct nvme_common_command *sqe = &fod->cmdiubuf.sqe.common; | |
2064 | struct nvme_completion *cqe = &fod->rspiubuf.cqe; | |
2065 | unsigned long flags; | |
2066 | bool abort; | |
2067 | ||
2068 | spin_lock_irqsave(&fod->flock, flags); | |
2069 | abort = fod->abort; | |
2070 | spin_unlock_irqrestore(&fod->flock, flags); | |
2071 | ||
2072 | /* if we have a CQE, snoop the last sq_head value */ | |
2073 | if (!status) | |
2074 | fod->queue->sqhd = cqe->sq_head; | |
2075 | ||
2076 | if (abort) { | |
a97ec51b | 2077 | nvmet_fc_abort_op(tgtport, fod); |
c5343203 JS |
2078 | return; |
2079 | } | |
2080 | ||
2081 | /* if an error handling the cmd post initial parsing */ | |
2082 | if (status) { | |
2083 | /* fudge up a failed CQE status for our transport error */ | |
2084 | memset(cqe, 0, sizeof(*cqe)); | |
2085 | cqe->sq_head = fod->queue->sqhd; /* echo last cqe sqhd */ | |
2086 | cqe->sq_id = cpu_to_le16(fod->queue->qid); | |
2087 | cqe->command_id = sqe->command_id; | |
2088 | cqe->status = cpu_to_le16(status); | |
2089 | } else { | |
2090 | ||
2091 | /* | |
2092 | * try to push the data even if the SQE status is non-zero. | |
2093 | * There may be a status where data still was intended to | |
2094 | * be moved | |
2095 | */ | |
2096 | if ((fod->io_dir == NVMET_FCP_READ) && (fod->data_sg_cnt)) { | |
2097 | /* push the data over before sending rsp */ | |
2098 | nvmet_fc_transfer_fcp_data(tgtport, fod, | |
2099 | NVMET_FCOP_READDATA); | |
2100 | return; | |
2101 | } | |
2102 | ||
2103 | /* writes & no data - fall thru */ | |
2104 | } | |
2105 | ||
2106 | /* data no longer needed */ | |
2107 | nvmet_fc_free_tgt_pgs(fod); | |
2108 | ||
2109 | nvmet_fc_xmt_fcp_rsp(tgtport, fod); | |
2110 | } | |
2111 | ||
2112 | ||
2113 | static void | |
2114 | nvmet_fc_fcp_nvme_cmd_done(struct nvmet_req *nvme_req) | |
2115 | { | |
2116 | struct nvmet_fc_fcp_iod *fod = nvmet_req_to_fod(nvme_req); | |
2117 | struct nvmet_fc_tgtport *tgtport = fod->tgtport; | |
2118 | ||
2119 | __nvmet_fc_fcp_nvme_cmd_done(tgtport, fod, 0); | |
2120 | } | |
2121 | ||
2122 | ||
2123 | /* | |
2124 | * Actual processing routine for received FC-NVME LS Requests from the LLD | |
2125 | */ | |
edba98dd | 2126 | static void |
c5343203 JS |
2127 | nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport, |
2128 | struct nvmet_fc_fcp_iod *fod) | |
2129 | { | |
2130 | struct nvme_fc_cmd_iu *cmdiu = &fod->cmdiubuf; | |
cce75291 | 2131 | u32 xfrlen = be32_to_cpu(cmdiu->data_len); |
c5343203 JS |
2132 | int ret; |
2133 | ||
2134 | /* | |
2135 | * Fused commands are currently not supported in the linux | |
2136 | * implementation. | |
2137 | * | |
2138 | * As such, the implementation of the FC transport does not | |
2139 | * look at the fused commands and order delivery to the upper | |
2140 | * layer until we have both based on csn. | |
2141 | */ | |
2142 | ||
2143 | fod->fcpreq->done = nvmet_fc_xmt_fcp_op_done; | |
2144 | ||
c5343203 JS |
2145 | if (cmdiu->flags & FCNVME_CMD_FLAGS_WRITE) { |
2146 | fod->io_dir = NVMET_FCP_WRITE; | |
2147 | if (!nvme_is_write(&cmdiu->sqe)) | |
2148 | goto transport_error; | |
2149 | } else if (cmdiu->flags & FCNVME_CMD_FLAGS_READ) { | |
2150 | fod->io_dir = NVMET_FCP_READ; | |
2151 | if (nvme_is_write(&cmdiu->sqe)) | |
2152 | goto transport_error; | |
2153 | } else { | |
2154 | fod->io_dir = NVMET_FCP_NODATA; | |
cce75291 | 2155 | if (xfrlen) |
c5343203 JS |
2156 | goto transport_error; |
2157 | } | |
2158 | ||
2159 | fod->req.cmd = &fod->cmdiubuf.sqe; | |
2160 | fod->req.rsp = &fod->rspiubuf.cqe; | |
2161 | fod->req.port = fod->queue->port; | |
2162 | ||
c5343203 JS |
2163 | /* clear any response payload */ |
2164 | memset(&fod->rspiubuf, 0, sizeof(fod->rspiubuf)); | |
2165 | ||
188f7e8a JS |
2166 | fod->data_sg = NULL; |
2167 | fod->data_sg_cnt = 0; | |
2168 | ||
c5343203 JS |
2169 | ret = nvmet_req_init(&fod->req, |
2170 | &fod->queue->nvme_cq, | |
2171 | &fod->queue->nvme_sq, | |
2172 | &nvmet_fc_tgt_fcp_ops); | |
188f7e8a JS |
2173 | if (!ret) { |
2174 | /* bad SQE content or invalid ctrl state */ | |
2175 | /* nvmet layer has already called op done to send rsp. */ | |
c5343203 JS |
2176 | return; |
2177 | } | |
2178 | ||
cce75291 JS |
2179 | fod->req.transfer_len = xfrlen; |
2180 | ||
c5343203 JS |
2181 | /* keep a running counter of tail position */ |
2182 | atomic_inc(&fod->queue->sqtail); | |
2183 | ||
5e62d5c9 | 2184 | if (fod->req.transfer_len) { |
c5343203 JS |
2185 | ret = nvmet_fc_alloc_tgt_pgs(fod); |
2186 | if (ret) { | |
2187 | nvmet_req_complete(&fod->req, ret); | |
2188 | return; | |
2189 | } | |
2190 | } | |
2191 | fod->req.sg = fod->data_sg; | |
2192 | fod->req.sg_cnt = fod->data_sg_cnt; | |
2193 | fod->offset = 0; | |
c5343203 JS |
2194 | |
2195 | if (fod->io_dir == NVMET_FCP_WRITE) { | |
2196 | /* pull the data over before invoking nvmet layer */ | |
2197 | nvmet_fc_transfer_fcp_data(tgtport, fod, NVMET_FCOP_WRITEDATA); | |
2198 | return; | |
2199 | } | |
2200 | ||
2201 | /* | |
2202 | * Reads or no data: | |
2203 | * | |
2204 | * can invoke the nvmet_layer now. If read data, cmd completion will | |
2205 | * push the data | |
2206 | */ | |
5e62d5c9 | 2207 | nvmet_req_execute(&fod->req); |
c5343203 JS |
2208 | return; |
2209 | ||
2210 | transport_error: | |
a97ec51b | 2211 | nvmet_fc_abort_op(tgtport, fod); |
c5343203 JS |
2212 | } |
2213 | ||
2214 | /* | |
2215 | * Actual processing routine for received FC-NVME LS Requests from the LLD | |
2216 | */ | |
2217 | static void | |
2218 | nvmet_fc_handle_fcp_rqst_work(struct work_struct *work) | |
2219 | { | |
2220 | struct nvmet_fc_fcp_iod *fod = | |
2221 | container_of(work, struct nvmet_fc_fcp_iod, work); | |
2222 | struct nvmet_fc_tgtport *tgtport = fod->tgtport; | |
2223 | ||
2224 | nvmet_fc_handle_fcp_rqst(tgtport, fod); | |
2225 | } | |
2226 | ||
2227 | /** | |
2228 | * nvmet_fc_rcv_fcp_req - transport entry point called by an LLDD | |
2229 | * upon the reception of a NVME FCP CMD IU. | |
2230 | * | |
2231 | * Pass a FC-NVME FCP CMD IU received from the FC link to the nvmet-fc | |
2232 | * layer for processing. | |
2233 | * | |
0fb228d3 JS |
2234 | * The nvmet_fc layer allocates a local job structure (struct |
2235 | * nvmet_fc_fcp_iod) from the queue for the io and copies the | |
2236 | * CMD IU buffer to the job structure. As such, on a successful | |
2237 | * completion (returns 0), the LLDD may immediately free/reuse | |
2238 | * the CMD IU buffer passed in the call. | |
2239 | * | |
2240 | * However, in some circumstances, due to the packetized nature of FC | |
2241 | * and the api of the FC LLDD which may issue a hw command to send the | |
2242 | * response, but the LLDD may not get the hw completion for that command | |
2243 | * and upcall the nvmet_fc layer before a new command may be | |
2244 | * asynchronously received - its possible for a command to be received | |
2245 | * before the LLDD and nvmet_fc have recycled the job structure. It gives | |
2246 | * the appearance of more commands received than fits in the sq. | |
2247 | * To alleviate this scenario, a temporary queue is maintained in the | |
2248 | * transport for pending LLDD requests waiting for a queue job structure. | |
2249 | * In these "overrun" cases, a temporary queue element is allocated | |
2250 | * the LLDD request and CMD iu buffer information remembered, and the | |
2251 | * routine returns a -EOVERFLOW status. Subsequently, when a queue job | |
2252 | * structure is freed, it is immediately reallocated for anything on the | |
2253 | * pending request list. The LLDDs defer_rcv() callback is called, | |
2254 | * informing the LLDD that it may reuse the CMD IU buffer, and the io | |
2255 | * is then started normally with the transport. | |
c5343203 | 2256 | * |
0fb228d3 JS |
2257 | * The LLDD, when receiving an -EOVERFLOW completion status, is to treat |
2258 | * the completion as successful but must not reuse the CMD IU buffer | |
2259 | * until the LLDD's defer_rcv() callback has been called for the | |
2260 | * corresponding struct nvmefc_tgt_fcp_req pointer. | |
2261 | * | |
2262 | * If there is any other condition in which an error occurs, the | |
2263 | * transport will return a non-zero status indicating the error. | |
2264 | * In all cases other than -EOVERFLOW, the transport has not accepted the | |
2265 | * request and the LLDD should abort the exchange. | |
c5343203 JS |
2266 | * |
2267 | * @target_port: pointer to the (registered) target port the FCP CMD IU | |
19b58d94 | 2268 | * was received on. |
c5343203 JS |
2269 | * @fcpreq: pointer to a fcpreq request structure to be used to reference |
2270 | * the exchange corresponding to the FCP Exchange. | |
2271 | * @cmdiubuf: pointer to the buffer containing the FCP CMD IU | |
2272 | * @cmdiubuf_len: length, in bytes, of the received FCP CMD IU | |
2273 | */ | |
2274 | int | |
2275 | nvmet_fc_rcv_fcp_req(struct nvmet_fc_target_port *target_port, | |
2276 | struct nvmefc_tgt_fcp_req *fcpreq, | |
2277 | void *cmdiubuf, u32 cmdiubuf_len) | |
2278 | { | |
2279 | struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port); | |
2280 | struct nvme_fc_cmd_iu *cmdiu = cmdiubuf; | |
2281 | struct nvmet_fc_tgt_queue *queue; | |
2282 | struct nvmet_fc_fcp_iod *fod; | |
0fb228d3 JS |
2283 | struct nvmet_fc_defer_fcp_req *deferfcp; |
2284 | unsigned long flags; | |
c5343203 JS |
2285 | |
2286 | /* validate iu, so the connection id can be used to find the queue */ | |
2287 | if ((cmdiubuf_len != sizeof(*cmdiu)) || | |
2288 | (cmdiu->scsi_id != NVME_CMD_SCSI_ID) || | |
2289 | (cmdiu->fc_id != NVME_CMD_FC_ID) || | |
2290 | (be16_to_cpu(cmdiu->iu_len) != (sizeof(*cmdiu)/4))) | |
2291 | return -EIO; | |
2292 | ||
c5343203 JS |
2293 | queue = nvmet_fc_find_target_queue(tgtport, |
2294 | be64_to_cpu(cmdiu->connection_id)); | |
2295 | if (!queue) | |
2296 | return -ENOTCONN; | |
2297 | ||
2298 | /* | |
2299 | * note: reference taken by find_target_queue | |
2300 | * After successful fod allocation, the fod will inherit the | |
2301 | * ownership of that reference and will remove the reference | |
2302 | * when the fod is freed. | |
2303 | */ | |
2304 | ||
0fb228d3 JS |
2305 | spin_lock_irqsave(&queue->qlock, flags); |
2306 | ||
c5343203 | 2307 | fod = nvmet_fc_alloc_fcp_iod(queue); |
0fb228d3 JS |
2308 | if (fod) { |
2309 | spin_unlock_irqrestore(&queue->qlock, flags); | |
2310 | ||
2311 | fcpreq->nvmet_fc_private = fod; | |
2312 | fod->fcpreq = fcpreq; | |
2313 | ||
2314 | memcpy(&fod->cmdiubuf, cmdiubuf, cmdiubuf_len); | |
2315 | ||
2316 | nvmet_fc_queue_fcp_req(tgtport, queue, fcpreq); | |
2317 | ||
2318 | return 0; | |
2319 | } | |
2320 | ||
2321 | if (!tgtport->ops->defer_rcv) { | |
2322 | spin_unlock_irqrestore(&queue->qlock, flags); | |
c5343203 JS |
2323 | /* release the queue lookup reference */ |
2324 | nvmet_fc_tgt_q_put(queue); | |
2325 | return -ENOENT; | |
2326 | } | |
2327 | ||
0fb228d3 JS |
2328 | deferfcp = list_first_entry_or_null(&queue->avail_defer_list, |
2329 | struct nvmet_fc_defer_fcp_req, req_list); | |
2330 | if (deferfcp) { | |
2331 | /* Just re-use one that was previously allocated */ | |
2332 | list_del(&deferfcp->req_list); | |
2333 | } else { | |
2334 | spin_unlock_irqrestore(&queue->qlock, flags); | |
c5343203 | 2335 | |
0fb228d3 JS |
2336 | /* Now we need to dynamically allocate one */ |
2337 | deferfcp = kmalloc(sizeof(*deferfcp), GFP_KERNEL); | |
2338 | if (!deferfcp) { | |
2339 | /* release the queue lookup reference */ | |
2340 | nvmet_fc_tgt_q_put(queue); | |
2341 | return -ENOMEM; | |
2342 | } | |
2343 | spin_lock_irqsave(&queue->qlock, flags); | |
2344 | } | |
c5343203 | 2345 | |
0fb228d3 JS |
2346 | /* For now, use rspaddr / rsplen to save payload information */ |
2347 | fcpreq->rspaddr = cmdiubuf; | |
2348 | fcpreq->rsplen = cmdiubuf_len; | |
2349 | deferfcp->fcp_req = fcpreq; | |
2350 | ||
2351 | /* defer processing till a fod becomes available */ | |
2352 | list_add_tail(&deferfcp->req_list, &queue->pending_cmd_list); | |
2353 | ||
2354 | /* NOTE: the queue lookup reference is still valid */ | |
2355 | ||
2356 | spin_unlock_irqrestore(&queue->qlock, flags); | |
2357 | ||
2358 | return -EOVERFLOW; | |
c5343203 JS |
2359 | } |
2360 | EXPORT_SYMBOL_GPL(nvmet_fc_rcv_fcp_req); | |
2361 | ||
a97ec51b JS |
2362 | /** |
2363 | * nvmet_fc_rcv_fcp_abort - transport entry point called by an LLDD | |
2364 | * upon the reception of an ABTS for a FCP command | |
2365 | * | |
2366 | * Notify the transport that an ABTS has been received for a FCP command | |
2367 | * that had been given to the transport via nvmet_fc_rcv_fcp_req(). The | |
2368 | * LLDD believes the command is still being worked on | |
2369 | * (template_ops->fcp_req_release() has not been called). | |
2370 | * | |
2371 | * The transport will wait for any outstanding work (an op to the LLDD, | |
2372 | * which the lldd should complete with error due to the ABTS; or the | |
2373 | * completion from the nvmet layer of the nvme command), then will | |
2374 | * stop processing and call the nvmet_fc_rcv_fcp_req() callback to | |
2375 | * return the i/o context to the LLDD. The LLDD may send the BA_ACC | |
2376 | * to the ABTS either after return from this function (assuming any | |
2377 | * outstanding op work has been terminated) or upon the callback being | |
2378 | * called. | |
2379 | * | |
2380 | * @target_port: pointer to the (registered) target port the FCP CMD IU | |
2381 | * was received on. | |
2382 | * @fcpreq: pointer to the fcpreq request structure that corresponds | |
2383 | * to the exchange that received the ABTS. | |
2384 | */ | |
2385 | void | |
2386 | nvmet_fc_rcv_fcp_abort(struct nvmet_fc_target_port *target_port, | |
2387 | struct nvmefc_tgt_fcp_req *fcpreq) | |
2388 | { | |
2389 | struct nvmet_fc_fcp_iod *fod = fcpreq->nvmet_fc_private; | |
2390 | struct nvmet_fc_tgt_queue *queue; | |
2391 | unsigned long flags; | |
2392 | ||
2393 | if (!fod || fod->fcpreq != fcpreq) | |
2394 | /* job appears to have already completed, ignore abort */ | |
2395 | return; | |
2396 | ||
2397 | queue = fod->queue; | |
2398 | ||
2399 | spin_lock_irqsave(&queue->qlock, flags); | |
2400 | if (fod->active) { | |
2401 | /* | |
2402 | * mark as abort. The abort handler, invoked upon completion | |
2403 | * of any work, will detect the aborted status and do the | |
2404 | * callback. | |
2405 | */ | |
2406 | spin_lock(&fod->flock); | |
2407 | fod->abort = true; | |
2408 | fod->aborted = true; | |
2409 | spin_unlock(&fod->flock); | |
2410 | } | |
2411 | spin_unlock_irqrestore(&queue->qlock, flags); | |
2412 | } | |
2413 | EXPORT_SYMBOL_GPL(nvmet_fc_rcv_fcp_abort); | |
2414 | ||
c5343203 JS |
2415 | |
2416 | struct nvmet_fc_traddr { | |
2417 | u64 nn; | |
2418 | u64 pn; | |
2419 | }; | |
2420 | ||
c5343203 | 2421 | static int |
9c5358e1 | 2422 | __nvme_fc_parse_u64(substring_t *sstr, u64 *val) |
c5343203 | 2423 | { |
c5343203 JS |
2424 | u64 token64; |
2425 | ||
9c5358e1 JS |
2426 | if (match_u64(sstr, &token64)) |
2427 | return -EINVAL; | |
2428 | *val = token64; | |
c5343203 | 2429 | |
9c5358e1 JS |
2430 | return 0; |
2431 | } | |
c5343203 | 2432 | |
9c5358e1 JS |
2433 | /* |
2434 | * This routine validates and extracts the WWN's from the TRADDR string. | |
2435 | * As kernel parsers need the 0x to determine number base, universally | |
2436 | * build string to parse with 0x prefix before parsing name strings. | |
2437 | */ | |
2438 | static int | |
2439 | nvme_fc_parse_traddr(struct nvmet_fc_traddr *traddr, char *buf, size_t blen) | |
2440 | { | |
2441 | char name[2 + NVME_FC_TRADDR_HEXNAMELEN + 1]; | |
2442 | substring_t wwn = { name, &name[sizeof(name)-1] }; | |
2443 | int nnoffset, pnoffset; | |
2444 | ||
2445 | /* validate it string one of the 2 allowed formats */ | |
2446 | if (strnlen(buf, blen) == NVME_FC_TRADDR_MAXLENGTH && | |
2447 | !strncmp(buf, "nn-0x", NVME_FC_TRADDR_OXNNLEN) && | |
2448 | !strncmp(&buf[NVME_FC_TRADDR_MAX_PN_OFFSET], | |
2449 | "pn-0x", NVME_FC_TRADDR_OXNNLEN)) { | |
2450 | nnoffset = NVME_FC_TRADDR_OXNNLEN; | |
2451 | pnoffset = NVME_FC_TRADDR_MAX_PN_OFFSET + | |
2452 | NVME_FC_TRADDR_OXNNLEN; | |
2453 | } else if ((strnlen(buf, blen) == NVME_FC_TRADDR_MINLENGTH && | |
2454 | !strncmp(buf, "nn-", NVME_FC_TRADDR_NNLEN) && | |
2455 | !strncmp(&buf[NVME_FC_TRADDR_MIN_PN_OFFSET], | |
2456 | "pn-", NVME_FC_TRADDR_NNLEN))) { | |
2457 | nnoffset = NVME_FC_TRADDR_NNLEN; | |
2458 | pnoffset = NVME_FC_TRADDR_MIN_PN_OFFSET + NVME_FC_TRADDR_NNLEN; | |
2459 | } else | |
2460 | goto out_einval; | |
2461 | ||
2462 | name[0] = '0'; | |
2463 | name[1] = 'x'; | |
2464 | name[2 + NVME_FC_TRADDR_HEXNAMELEN] = 0; | |
2465 | ||
2466 | memcpy(&name[2], &buf[nnoffset], NVME_FC_TRADDR_HEXNAMELEN); | |
2467 | if (__nvme_fc_parse_u64(&wwn, &traddr->nn)) | |
2468 | goto out_einval; | |
2469 | ||
2470 | memcpy(&name[2], &buf[pnoffset], NVME_FC_TRADDR_HEXNAMELEN); | |
2471 | if (__nvme_fc_parse_u64(&wwn, &traddr->pn)) | |
2472 | goto out_einval; | |
c5343203 | 2473 | |
9c5358e1 JS |
2474 | return 0; |
2475 | ||
2476 | out_einval: | |
2477 | pr_warn("%s: bad traddr string\n", __func__); | |
2478 | return -EINVAL; | |
c5343203 JS |
2479 | } |
2480 | ||
2481 | static int | |
2482 | nvmet_fc_add_port(struct nvmet_port *port) | |
2483 | { | |
2484 | struct nvmet_fc_tgtport *tgtport; | |
2485 | struct nvmet_fc_traddr traddr = { 0L, 0L }; | |
2486 | unsigned long flags; | |
2487 | int ret; | |
2488 | ||
2489 | /* validate the address info */ | |
2490 | if ((port->disc_addr.trtype != NVMF_TRTYPE_FC) || | |
2491 | (port->disc_addr.adrfam != NVMF_ADDR_FAMILY_FC)) | |
2492 | return -EINVAL; | |
2493 | ||
2494 | /* map the traddr address info to a target port */ | |
2495 | ||
9c5358e1 JS |
2496 | ret = nvme_fc_parse_traddr(&traddr, port->disc_addr.traddr, |
2497 | sizeof(port->disc_addr.traddr)); | |
c5343203 JS |
2498 | if (ret) |
2499 | return ret; | |
2500 | ||
2501 | ret = -ENXIO; | |
2502 | spin_lock_irqsave(&nvmet_fc_tgtlock, flags); | |
2503 | list_for_each_entry(tgtport, &nvmet_fc_target_list, tgt_list) { | |
2504 | if ((tgtport->fc_target_port.node_name == traddr.nn) && | |
2505 | (tgtport->fc_target_port.port_name == traddr.pn)) { | |
9ce1f2e1 JS |
2506 | tgtport->port = port; |
2507 | ret = 0; | |
c5343203 JS |
2508 | break; |
2509 | } | |
2510 | } | |
2511 | spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags); | |
2512 | return ret; | |
2513 | } | |
2514 | ||
2515 | static void | |
2516 | nvmet_fc_remove_port(struct nvmet_port *port) | |
2517 | { | |
9ce1f2e1 | 2518 | /* nothing to do */ |
c5343203 JS |
2519 | } |
2520 | ||
e929f06d | 2521 | static const struct nvmet_fabrics_ops nvmet_fc_tgt_fcp_ops = { |
c5343203 JS |
2522 | .owner = THIS_MODULE, |
2523 | .type = NVMF_TRTYPE_FC, | |
2524 | .msdbd = 1, | |
2525 | .add_port = nvmet_fc_add_port, | |
2526 | .remove_port = nvmet_fc_remove_port, | |
2527 | .queue_response = nvmet_fc_fcp_nvme_cmd_done, | |
2528 | .delete_ctrl = nvmet_fc_delete_ctrl, | |
2529 | }; | |
2530 | ||
2531 | static int __init nvmet_fc_init_module(void) | |
2532 | { | |
2533 | return nvmet_register_transport(&nvmet_fc_tgt_fcp_ops); | |
2534 | } | |
2535 | ||
2536 | static void __exit nvmet_fc_exit_module(void) | |
2537 | { | |
2538 | /* sanity check - all lports should be removed */ | |
2539 | if (!list_empty(&nvmet_fc_target_list)) | |
2540 | pr_warn("%s: targetport list not empty\n", __func__); | |
2541 | ||
2542 | nvmet_unregister_transport(&nvmet_fc_tgt_fcp_ops); | |
2543 | ||
2544 | ida_destroy(&nvmet_fc_tgtport_cnt); | |
2545 | } | |
2546 | ||
2547 | module_init(nvmet_fc_init_module); | |
2548 | module_exit(nvmet_fc_exit_module); | |
2549 | ||
2550 | MODULE_LICENSE("GPL v2"); |