Merge tag 'fbdev-v5.2' of git://github.com/bzolnier/linux
[linux-2.6-block.git] / drivers / target / target_core_xcopy.c
1 /*******************************************************************************
2  * Filename: target_core_xcopy.c
3  *
4  * This file contains support for SPC-4 Extended-Copy offload with generic
5  * TCM backends.
6  *
7  * Copyright (c) 2011-2013 Datera, Inc. All rights reserved.
8  *
9  * Author:
10  * Nicholas A. Bellinger <nab@daterainc.com>
11  *
12  * This program is free software; you can redistribute it and/or modify
13  * it under the terms of the GNU General Public License as published by
14  * the Free Software Foundation; either version 2 of the License, or
15  * (at your option) any later version.
16  *
17  * This program is distributed in the hope that it will be useful,
18  * but WITHOUT ANY WARRANTY; without even the implied warranty of
19  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
20  * GNU General Public License for more details.
21  *
22  ******************************************************************************/
23
24 #include <linux/slab.h>
25 #include <linux/spinlock.h>
26 #include <linux/list.h>
27 #include <linux/configfs.h>
28 #include <linux/ratelimit.h>
29 #include <scsi/scsi_proto.h>
30 #include <asm/unaligned.h>
31
32 #include <target/target_core_base.h>
33 #include <target/target_core_backend.h>
34 #include <target/target_core_fabric.h>
35
36 #include "target_core_internal.h"
37 #include "target_core_pr.h"
38 #include "target_core_ua.h"
39 #include "target_core_xcopy.h"
40
41 static struct workqueue_struct *xcopy_wq = NULL;
42
43 static sense_reason_t target_parse_xcopy_cmd(struct xcopy_op *xop);
44
45 static int target_xcopy_gen_naa_ieee(struct se_device *dev, unsigned char *buf)
46 {
47         int off = 0;
48
49         buf[off++] = (0x6 << 4);
50         buf[off++] = 0x01;
51         buf[off++] = 0x40;
52         buf[off] = (0x5 << 4);
53
54         spc_parse_naa_6h_vendor_specific(dev, &buf[off]);
55         return 0;
56 }
57
58 struct xcopy_dev_search_info {
59         const unsigned char *dev_wwn;
60         struct se_device *found_dev;
61 };
62
63 static int target_xcopy_locate_se_dev_e4_iter(struct se_device *se_dev,
64                                               void *data)
65 {
66         struct xcopy_dev_search_info *info = data;
67         unsigned char tmp_dev_wwn[XCOPY_NAA_IEEE_REGEX_LEN];
68         int rc;
69
70         if (!se_dev->dev_attrib.emulate_3pc)
71                 return 0;
72
73         memset(&tmp_dev_wwn[0], 0, XCOPY_NAA_IEEE_REGEX_LEN);
74         target_xcopy_gen_naa_ieee(se_dev, &tmp_dev_wwn[0]);
75
76         rc = memcmp(&tmp_dev_wwn[0], info->dev_wwn, XCOPY_NAA_IEEE_REGEX_LEN);
77         if (rc != 0)
78                 return 0;
79
80         info->found_dev = se_dev;
81         pr_debug("XCOPY 0xe4: located se_dev: %p\n", se_dev);
82
83         rc = target_depend_item(&se_dev->dev_group.cg_item);
84         if (rc != 0) {
85                 pr_err("configfs_depend_item attempt failed: %d for se_dev: %p\n",
86                        rc, se_dev);
87                 return rc;
88         }
89
90         pr_debug("Called configfs_depend_item for se_dev: %p se_dev->se_dev_group: %p\n",
91                  se_dev, &se_dev->dev_group);
92         return 1;
93 }
94
95 static int target_xcopy_locate_se_dev_e4(const unsigned char *dev_wwn,
96                                         struct se_device **found_dev)
97 {
98         struct xcopy_dev_search_info info;
99         int ret;
100
101         memset(&info, 0, sizeof(info));
102         info.dev_wwn = dev_wwn;
103
104         ret = target_for_each_device(target_xcopy_locate_se_dev_e4_iter, &info);
105         if (ret == 1) {
106                 *found_dev = info.found_dev;
107                 return 0;
108         } else {
109                 pr_debug_ratelimited("Unable to locate 0xe4 descriptor for EXTENDED_COPY\n");
110                 return -EINVAL;
111         }
112 }
113
114 static int target_xcopy_parse_tiddesc_e4(struct se_cmd *se_cmd, struct xcopy_op *xop,
115                                 unsigned char *p, unsigned short cscd_index)
116 {
117         unsigned char *desc = p;
118         unsigned short ript;
119         u8 desig_len;
120         /*
121          * Extract RELATIVE INITIATOR PORT IDENTIFIER
122          */
123         ript = get_unaligned_be16(&desc[2]);
124         pr_debug("XCOPY 0xe4: RELATIVE INITIATOR PORT IDENTIFIER: %hu\n", ript);
125         /*
126          * Check for supported code set, association, and designator type
127          */
128         if ((desc[4] & 0x0f) != 0x1) {
129                 pr_err("XCOPY 0xe4: code set of non binary type not supported\n");
130                 return -EINVAL;
131         }
132         if ((desc[5] & 0x30) != 0x00) {
133                 pr_err("XCOPY 0xe4: association other than LUN not supported\n");
134                 return -EINVAL;
135         }
136         if ((desc[5] & 0x0f) != 0x3) {
137                 pr_err("XCOPY 0xe4: designator type unsupported: 0x%02x\n",
138                                 (desc[5] & 0x0f));
139                 return -EINVAL;
140         }
141         /*
142          * Check for matching 16 byte length for NAA IEEE Registered Extended
143          * Assigned designator
144          */
145         desig_len = desc[7];
146         if (desig_len != 16) {
147                 pr_err("XCOPY 0xe4: invalid desig_len: %d\n", (int)desig_len);
148                 return -EINVAL;
149         }
150         pr_debug("XCOPY 0xe4: desig_len: %d\n", (int)desig_len);
151         /*
152          * Check for NAA IEEE Registered Extended Assigned header..
153          */
154         if ((desc[8] & 0xf0) != 0x60) {
155                 pr_err("XCOPY 0xe4: Unsupported DESIGNATOR TYPE: 0x%02x\n",
156                                         (desc[8] & 0xf0));
157                 return -EINVAL;
158         }
159
160         if (cscd_index != xop->stdi && cscd_index != xop->dtdi) {
161                 pr_debug("XCOPY 0xe4: ignoring CSCD entry %d - neither src nor "
162                          "dest\n", cscd_index);
163                 return 0;
164         }
165
166         if (cscd_index == xop->stdi) {
167                 memcpy(&xop->src_tid_wwn[0], &desc[8], XCOPY_NAA_IEEE_REGEX_LEN);
168                 /*
169                  * Determine if the source designator matches the local device
170                  */
171                 if (!memcmp(&xop->local_dev_wwn[0], &xop->src_tid_wwn[0],
172                                 XCOPY_NAA_IEEE_REGEX_LEN)) {
173                         xop->op_origin = XCOL_SOURCE_RECV_OP;
174                         xop->src_dev = se_cmd->se_dev;
175                         pr_debug("XCOPY 0xe4: Set xop->src_dev %p from source"
176                                         " received xop\n", xop->src_dev);
177                 }
178         }
179
180         if (cscd_index == xop->dtdi) {
181                 memcpy(&xop->dst_tid_wwn[0], &desc[8], XCOPY_NAA_IEEE_REGEX_LEN);
182                 /*
183                  * Determine if the destination designator matches the local
184                  * device. If @cscd_index corresponds to both source (stdi) and
185                  * destination (dtdi), or dtdi comes after stdi, then
186                  * XCOL_DEST_RECV_OP wins.
187                  */
188                 if (!memcmp(&xop->local_dev_wwn[0], &xop->dst_tid_wwn[0],
189                                 XCOPY_NAA_IEEE_REGEX_LEN)) {
190                         xop->op_origin = XCOL_DEST_RECV_OP;
191                         xop->dst_dev = se_cmd->se_dev;
192                         pr_debug("XCOPY 0xe4: Set xop->dst_dev: %p from destination"
193                                 " received xop\n", xop->dst_dev);
194                 }
195         }
196
197         return 0;
198 }
199
200 static int target_xcopy_parse_target_descriptors(struct se_cmd *se_cmd,
201                                 struct xcopy_op *xop, unsigned char *p,
202                                 unsigned short tdll, sense_reason_t *sense_ret)
203 {
204         struct se_device *local_dev = se_cmd->se_dev;
205         unsigned char *desc = p;
206         int offset = tdll % XCOPY_TARGET_DESC_LEN, rc;
207         unsigned short cscd_index = 0;
208         unsigned short start = 0;
209
210         *sense_ret = TCM_INVALID_PARAMETER_LIST;
211
212         if (offset != 0) {
213                 pr_err("XCOPY target descriptor list length is not"
214                         " multiple of %d\n", XCOPY_TARGET_DESC_LEN);
215                 *sense_ret = TCM_UNSUPPORTED_TARGET_DESC_TYPE_CODE;
216                 return -EINVAL;
217         }
218         if (tdll > RCR_OP_MAX_TARGET_DESC_COUNT * XCOPY_TARGET_DESC_LEN) {
219                 pr_err("XCOPY target descriptor supports a maximum"
220                         " two src/dest descriptors, tdll: %hu too large..\n", tdll);
221                 /* spc4r37 6.4.3.4 CSCD DESCRIPTOR LIST LENGTH field */
222                 *sense_ret = TCM_TOO_MANY_TARGET_DESCS;
223                 return -EINVAL;
224         }
225         /*
226          * Generate an IEEE Registered Extended designator based upon the
227          * se_device the XCOPY was received upon..
228          */
229         memset(&xop->local_dev_wwn[0], 0, XCOPY_NAA_IEEE_REGEX_LEN);
230         target_xcopy_gen_naa_ieee(local_dev, &xop->local_dev_wwn[0]);
231
232         while (start < tdll) {
233                 /*
234                  * Check target descriptor identification with 0xE4 type, and
235                  * compare the current index with the CSCD descriptor IDs in
236                  * the segment descriptor. Use VPD 0x83 WWPN matching ..
237                  */
238                 switch (desc[0]) {
239                 case 0xe4:
240                         rc = target_xcopy_parse_tiddesc_e4(se_cmd, xop,
241                                                         &desc[0], cscd_index);
242                         if (rc != 0)
243                                 goto out;
244                         start += XCOPY_TARGET_DESC_LEN;
245                         desc += XCOPY_TARGET_DESC_LEN;
246                         cscd_index++;
247                         break;
248                 default:
249                         pr_err("XCOPY unsupported descriptor type code:"
250                                         " 0x%02x\n", desc[0]);
251                         *sense_ret = TCM_UNSUPPORTED_TARGET_DESC_TYPE_CODE;
252                         goto out;
253                 }
254         }
255
256         switch (xop->op_origin) {
257         case XCOL_SOURCE_RECV_OP:
258                 rc = target_xcopy_locate_se_dev_e4(xop->dst_tid_wwn,
259                                                 &xop->dst_dev);
260                 break;
261         case XCOL_DEST_RECV_OP:
262                 rc = target_xcopy_locate_se_dev_e4(xop->src_tid_wwn,
263                                                 &xop->src_dev);
264                 break;
265         default:
266                 pr_err("XCOPY CSCD descriptor IDs not found in CSCD list - "
267                         "stdi: %hu dtdi: %hu\n", xop->stdi, xop->dtdi);
268                 rc = -EINVAL;
269                 break;
270         }
271         /*
272          * If a matching IEEE NAA 0x83 descriptor for the requested device
273          * is not located on this node, return COPY_ABORTED with ASQ/ASQC
274          * 0x0d/0x02 - COPY_TARGET_DEVICE_NOT_REACHABLE to request the
275          * initiator to fall back to normal copy method.
276          */
277         if (rc < 0) {
278                 *sense_ret = TCM_COPY_TARGET_DEVICE_NOT_REACHABLE;
279                 goto out;
280         }
281
282         pr_debug("XCOPY TGT desc: Source dev: %p NAA IEEE WWN: 0x%16phN\n",
283                  xop->src_dev, &xop->src_tid_wwn[0]);
284         pr_debug("XCOPY TGT desc: Dest dev: %p NAA IEEE WWN: 0x%16phN\n",
285                  xop->dst_dev, &xop->dst_tid_wwn[0]);
286
287         return cscd_index;
288
289 out:
290         return -EINVAL;
291 }
292
293 static int target_xcopy_parse_segdesc_02(struct se_cmd *se_cmd, struct xcopy_op *xop,
294                                         unsigned char *p)
295 {
296         unsigned char *desc = p;
297         int dc = (desc[1] & 0x02);
298         unsigned short desc_len;
299
300         desc_len = get_unaligned_be16(&desc[2]);
301         if (desc_len != 0x18) {
302                 pr_err("XCOPY segment desc 0x02: Illegal desc_len:"
303                                 " %hu\n", desc_len);
304                 return -EINVAL;
305         }
306
307         xop->stdi = get_unaligned_be16(&desc[4]);
308         xop->dtdi = get_unaligned_be16(&desc[6]);
309
310         if (xop->stdi > XCOPY_CSCD_DESC_ID_LIST_OFF_MAX ||
311             xop->dtdi > XCOPY_CSCD_DESC_ID_LIST_OFF_MAX) {
312                 pr_err("XCOPY segment desc 0x02: unsupported CSCD ID > 0x%x; stdi: %hu dtdi: %hu\n",
313                         XCOPY_CSCD_DESC_ID_LIST_OFF_MAX, xop->stdi, xop->dtdi);
314                 return -EINVAL;
315         }
316
317         pr_debug("XCOPY seg desc 0x02: desc_len: %hu stdi: %hu dtdi: %hu, DC: %d\n",
318                 desc_len, xop->stdi, xop->dtdi, dc);
319
320         xop->nolb = get_unaligned_be16(&desc[10]);
321         xop->src_lba = get_unaligned_be64(&desc[12]);
322         xop->dst_lba = get_unaligned_be64(&desc[20]);
323         pr_debug("XCOPY seg desc 0x02: nolb: %hu src_lba: %llu dst_lba: %llu\n",
324                 xop->nolb, (unsigned long long)xop->src_lba,
325                 (unsigned long long)xop->dst_lba);
326
327         if (dc != 0) {
328                 xop->dbl = get_unaligned_be24(&desc[29]);
329
330                 pr_debug("XCOPY seg desc 0x02: DC=1 w/ dbl: %u\n", xop->dbl);
331         }
332         return 0;
333 }
334
335 static int target_xcopy_parse_segment_descriptors(struct se_cmd *se_cmd,
336                                 struct xcopy_op *xop, unsigned char *p,
337                                 unsigned int sdll, sense_reason_t *sense_ret)
338 {
339         unsigned char *desc = p;
340         unsigned int start = 0;
341         int offset = sdll % XCOPY_SEGMENT_DESC_LEN, rc, ret = 0;
342
343         *sense_ret = TCM_INVALID_PARAMETER_LIST;
344
345         if (offset != 0) {
346                 pr_err("XCOPY segment descriptor list length is not"
347                         " multiple of %d\n", XCOPY_SEGMENT_DESC_LEN);
348                 *sense_ret = TCM_UNSUPPORTED_SEGMENT_DESC_TYPE_CODE;
349                 return -EINVAL;
350         }
351         if (sdll > RCR_OP_MAX_SG_DESC_COUNT * XCOPY_SEGMENT_DESC_LEN) {
352                 pr_err("XCOPY supports %u segment descriptor(s), sdll: %u too"
353                         " large..\n", RCR_OP_MAX_SG_DESC_COUNT, sdll);
354                 /* spc4r37 6.4.3.5 SEGMENT DESCRIPTOR LIST LENGTH field */
355                 *sense_ret = TCM_TOO_MANY_SEGMENT_DESCS;
356                 return -EINVAL;
357         }
358
359         while (start < sdll) {
360                 /*
361                  * Check segment descriptor type code for block -> block
362                  */
363                 switch (desc[0]) {
364                 case 0x02:
365                         rc = target_xcopy_parse_segdesc_02(se_cmd, xop, desc);
366                         if (rc < 0)
367                                 goto out;
368
369                         ret++;
370                         start += XCOPY_SEGMENT_DESC_LEN;
371                         desc += XCOPY_SEGMENT_DESC_LEN;
372                         break;
373                 default:
374                         pr_err("XCOPY unsupported segment descriptor"
375                                 "type: 0x%02x\n", desc[0]);
376                         *sense_ret = TCM_UNSUPPORTED_SEGMENT_DESC_TYPE_CODE;
377                         goto out;
378                 }
379         }
380
381         return ret;
382
383 out:
384         return -EINVAL;
385 }
386
387 /*
388  * Start xcopy_pt ops
389  */
390
391 struct xcopy_pt_cmd {
392         struct se_cmd se_cmd;
393         struct completion xpt_passthrough_sem;
394         unsigned char sense_buffer[TRANSPORT_SENSE_BUFFER];
395 };
396
397 struct se_portal_group xcopy_pt_tpg;
398 static struct se_session xcopy_pt_sess;
399 static struct se_node_acl xcopy_pt_nacl;
400
401 static int xcopy_pt_get_cmd_state(struct se_cmd *se_cmd)
402 {
403         return 0;
404 }
405
406 static void xcopy_pt_undepend_remotedev(struct xcopy_op *xop)
407 {
408         struct se_device *remote_dev;
409
410         if (xop->op_origin == XCOL_SOURCE_RECV_OP)
411                 remote_dev = xop->dst_dev;
412         else
413                 remote_dev = xop->src_dev;
414
415         pr_debug("Calling configfs_undepend_item for"
416                   " remote_dev: %p remote_dev->dev_group: %p\n",
417                   remote_dev, &remote_dev->dev_group.cg_item);
418
419         target_undepend_item(&remote_dev->dev_group.cg_item);
420 }
421
422 static void xcopy_pt_release_cmd(struct se_cmd *se_cmd)
423 {
424         struct xcopy_pt_cmd *xpt_cmd = container_of(se_cmd,
425                                 struct xcopy_pt_cmd, se_cmd);
426
427         kfree(xpt_cmd);
428 }
429
430 static int xcopy_pt_check_stop_free(struct se_cmd *se_cmd)
431 {
432         struct xcopy_pt_cmd *xpt_cmd = container_of(se_cmd,
433                                 struct xcopy_pt_cmd, se_cmd);
434
435         complete(&xpt_cmd->xpt_passthrough_sem);
436         return 0;
437 }
438
439 static int xcopy_pt_write_pending(struct se_cmd *se_cmd)
440 {
441         return 0;
442 }
443
444 static int xcopy_pt_queue_data_in(struct se_cmd *se_cmd)
445 {
446         return 0;
447 }
448
449 static int xcopy_pt_queue_status(struct se_cmd *se_cmd)
450 {
451         return 0;
452 }
453
454 static const struct target_core_fabric_ops xcopy_pt_tfo = {
455         .fabric_name            = "xcopy-pt",
456         .get_cmd_state          = xcopy_pt_get_cmd_state,
457         .release_cmd            = xcopy_pt_release_cmd,
458         .check_stop_free        = xcopy_pt_check_stop_free,
459         .write_pending          = xcopy_pt_write_pending,
460         .queue_data_in          = xcopy_pt_queue_data_in,
461         .queue_status           = xcopy_pt_queue_status,
462 };
463
464 /*
465  * End xcopy_pt_ops
466  */
467
468 int target_xcopy_setup_pt(void)
469 {
470         int ret;
471
472         xcopy_wq = alloc_workqueue("xcopy_wq", WQ_MEM_RECLAIM, 0);
473         if (!xcopy_wq) {
474                 pr_err("Unable to allocate xcopy_wq\n");
475                 return -ENOMEM;
476         }
477
478         memset(&xcopy_pt_tpg, 0, sizeof(struct se_portal_group));
479         INIT_LIST_HEAD(&xcopy_pt_tpg.se_tpg_node);
480         INIT_LIST_HEAD(&xcopy_pt_tpg.acl_node_list);
481         INIT_LIST_HEAD(&xcopy_pt_tpg.tpg_sess_list);
482
483         xcopy_pt_tpg.se_tpg_tfo = &xcopy_pt_tfo;
484
485         memset(&xcopy_pt_nacl, 0, sizeof(struct se_node_acl));
486         INIT_LIST_HEAD(&xcopy_pt_nacl.acl_list);
487         INIT_LIST_HEAD(&xcopy_pt_nacl.acl_sess_list);
488         memset(&xcopy_pt_sess, 0, sizeof(struct se_session));
489         ret = transport_init_session(&xcopy_pt_sess);
490         if (ret < 0)
491                 return ret;
492
493         xcopy_pt_nacl.se_tpg = &xcopy_pt_tpg;
494         xcopy_pt_nacl.nacl_sess = &xcopy_pt_sess;
495
496         xcopy_pt_sess.se_tpg = &xcopy_pt_tpg;
497         xcopy_pt_sess.se_node_acl = &xcopy_pt_nacl;
498
499         return 0;
500 }
501
502 void target_xcopy_release_pt(void)
503 {
504         if (xcopy_wq)
505                 destroy_workqueue(xcopy_wq);
506 }
507
508 /*
509  * target_xcopy_setup_pt_cmd - set up a pass-through command
510  * @xpt_cmd:     Data structure to initialize.
511  * @xop:         Describes the XCOPY operation received from an initiator.
512  * @se_dev:      Backend device to associate with @xpt_cmd if
513  *               @remote_port == true.
514  * @cdb:         SCSI CDB to be copied into @xpt_cmd.
515  * @remote_port: If false, use the LUN through which the XCOPY command has
516  *               been received. If true, use @se_dev->xcopy_lun.
517  * @alloc_mem:   Whether or not to allocate an SGL list.
518  *
519  * Set up a SCSI command (READ or WRITE) that will be used to execute an
520  * XCOPY command.
521  */
522 static int target_xcopy_setup_pt_cmd(
523         struct xcopy_pt_cmd *xpt_cmd,
524         struct xcopy_op *xop,
525         struct se_device *se_dev,
526         unsigned char *cdb,
527         bool remote_port,
528         bool alloc_mem)
529 {
530         struct se_cmd *cmd = &xpt_cmd->se_cmd;
531         sense_reason_t sense_rc;
532         int ret = 0, rc;
533
534         /*
535          * Setup LUN+port to honor reservations based upon xop->op_origin for
536          * X-COPY PUSH or X-COPY PULL based upon where the CDB was received.
537          */
538         if (remote_port) {
539                 cmd->se_lun = &se_dev->xcopy_lun;
540                 cmd->se_dev = se_dev;
541         } else {
542                 cmd->se_lun = xop->xop_se_cmd->se_lun;
543                 cmd->se_dev = xop->xop_se_cmd->se_dev;
544         }
545         cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
546
547         cmd->tag = 0;
548         sense_rc = target_setup_cmd_from_cdb(cmd, cdb);
549         if (sense_rc) {
550                 ret = -EINVAL;
551                 goto out;
552         }
553
554         if (alloc_mem) {
555                 rc = target_alloc_sgl(&cmd->t_data_sg, &cmd->t_data_nents,
556                                       cmd->data_length, false, false);
557                 if (rc < 0) {
558                         ret = rc;
559                         goto out;
560                 }
561                 /*
562                  * Set this bit so that transport_free_pages() allows the
563                  * caller to release SGLs + physical memory allocated by
564                  * transport_generic_get_mem()..
565                  */
566                 cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC;
567         } else {
568                 /*
569                  * Here the previously allocated SGLs for the internal READ
570                  * are mapped zero-copy to the internal WRITE.
571                  */
572                 sense_rc = transport_generic_map_mem_to_cmd(cmd,
573                                         xop->xop_data_sg, xop->xop_data_nents,
574                                         NULL, 0);
575                 if (sense_rc) {
576                         ret = -EINVAL;
577                         goto out;
578                 }
579
580                 pr_debug("Setup PASSTHROUGH_NOALLOC t_data_sg: %p t_data_nents:"
581                          " %u\n", cmd->t_data_sg, cmd->t_data_nents);
582         }
583
584         return 0;
585
586 out:
587         return ret;
588 }
589
590 static int target_xcopy_issue_pt_cmd(struct xcopy_pt_cmd *xpt_cmd)
591 {
592         struct se_cmd *se_cmd = &xpt_cmd->se_cmd;
593         sense_reason_t sense_rc;
594
595         sense_rc = transport_generic_new_cmd(se_cmd);
596         if (sense_rc)
597                 return -EINVAL;
598
599         if (se_cmd->data_direction == DMA_TO_DEVICE)
600                 target_execute_cmd(se_cmd);
601
602         wait_for_completion_interruptible(&xpt_cmd->xpt_passthrough_sem);
603
604         pr_debug("target_xcopy_issue_pt_cmd(): SCSI status: 0x%02x\n",
605                         se_cmd->scsi_status);
606
607         return (se_cmd->scsi_status) ? -EINVAL : 0;
608 }
609
610 static int target_xcopy_read_source(
611         struct se_cmd *ec_cmd,
612         struct xcopy_op *xop,
613         struct se_device *src_dev,
614         sector_t src_lba,
615         u32 src_sectors)
616 {
617         struct xcopy_pt_cmd *xpt_cmd;
618         struct se_cmd *se_cmd;
619         u32 length = (src_sectors * src_dev->dev_attrib.block_size);
620         int rc;
621         unsigned char cdb[16];
622         bool remote_port = (xop->op_origin == XCOL_DEST_RECV_OP);
623
624         xpt_cmd = kzalloc(sizeof(struct xcopy_pt_cmd), GFP_KERNEL);
625         if (!xpt_cmd) {
626                 pr_err("Unable to allocate xcopy_pt_cmd\n");
627                 return -ENOMEM;
628         }
629         init_completion(&xpt_cmd->xpt_passthrough_sem);
630         se_cmd = &xpt_cmd->se_cmd;
631
632         memset(&cdb[0], 0, 16);
633         cdb[0] = READ_16;
634         put_unaligned_be64(src_lba, &cdb[2]);
635         put_unaligned_be32(src_sectors, &cdb[10]);
636         pr_debug("XCOPY: Built READ_16: LBA: %llu Sectors: %u Length: %u\n",
637                 (unsigned long long)src_lba, src_sectors, length);
638
639         transport_init_se_cmd(se_cmd, &xcopy_pt_tfo, &xcopy_pt_sess, length,
640                               DMA_FROM_DEVICE, 0, &xpt_cmd->sense_buffer[0]);
641         xop->src_pt_cmd = xpt_cmd;
642
643         rc = target_xcopy_setup_pt_cmd(xpt_cmd, xop, src_dev, &cdb[0],
644                                 remote_port, true);
645         if (rc < 0) {
646                 ec_cmd->scsi_status = xpt_cmd->se_cmd.scsi_status;
647                 transport_generic_free_cmd(se_cmd, 0);
648                 return rc;
649         }
650
651         xop->xop_data_sg = se_cmd->t_data_sg;
652         xop->xop_data_nents = se_cmd->t_data_nents;
653         pr_debug("XCOPY-READ: Saved xop->xop_data_sg: %p, num: %u for READ"
654                 " memory\n", xop->xop_data_sg, xop->xop_data_nents);
655
656         rc = target_xcopy_issue_pt_cmd(xpt_cmd);
657         if (rc < 0) {
658                 ec_cmd->scsi_status = xpt_cmd->se_cmd.scsi_status;
659                 transport_generic_free_cmd(se_cmd, 0);
660                 return rc;
661         }
662         /*
663          * Clear off the allocated t_data_sg, that has been saved for
664          * zero-copy WRITE submission reuse in struct xcopy_op..
665          */
666         se_cmd->t_data_sg = NULL;
667         se_cmd->t_data_nents = 0;
668
669         return 0;
670 }
671
672 static int target_xcopy_write_destination(
673         struct se_cmd *ec_cmd,
674         struct xcopy_op *xop,
675         struct se_device *dst_dev,
676         sector_t dst_lba,
677         u32 dst_sectors)
678 {
679         struct xcopy_pt_cmd *xpt_cmd;
680         struct se_cmd *se_cmd;
681         u32 length = (dst_sectors * dst_dev->dev_attrib.block_size);
682         int rc;
683         unsigned char cdb[16];
684         bool remote_port = (xop->op_origin == XCOL_SOURCE_RECV_OP);
685
686         xpt_cmd = kzalloc(sizeof(struct xcopy_pt_cmd), GFP_KERNEL);
687         if (!xpt_cmd) {
688                 pr_err("Unable to allocate xcopy_pt_cmd\n");
689                 return -ENOMEM;
690         }
691         init_completion(&xpt_cmd->xpt_passthrough_sem);
692         se_cmd = &xpt_cmd->se_cmd;
693
694         memset(&cdb[0], 0, 16);
695         cdb[0] = WRITE_16;
696         put_unaligned_be64(dst_lba, &cdb[2]);
697         put_unaligned_be32(dst_sectors, &cdb[10]);
698         pr_debug("XCOPY: Built WRITE_16: LBA: %llu Sectors: %u Length: %u\n",
699                 (unsigned long long)dst_lba, dst_sectors, length);
700
701         transport_init_se_cmd(se_cmd, &xcopy_pt_tfo, &xcopy_pt_sess, length,
702                               DMA_TO_DEVICE, 0, &xpt_cmd->sense_buffer[0]);
703         xop->dst_pt_cmd = xpt_cmd;
704
705         rc = target_xcopy_setup_pt_cmd(xpt_cmd, xop, dst_dev, &cdb[0],
706                                 remote_port, false);
707         if (rc < 0) {
708                 struct se_cmd *src_cmd = &xop->src_pt_cmd->se_cmd;
709                 ec_cmd->scsi_status = xpt_cmd->se_cmd.scsi_status;
710                 /*
711                  * If the failure happened before the t_mem_list hand-off in
712                  * target_xcopy_setup_pt_cmd(), Reset memory + clear flag so that
713                  * core releases this memory on error during X-COPY WRITE I/O.
714                  */
715                 src_cmd->se_cmd_flags &= ~SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC;
716                 src_cmd->t_data_sg = xop->xop_data_sg;
717                 src_cmd->t_data_nents = xop->xop_data_nents;
718
719                 transport_generic_free_cmd(se_cmd, 0);
720                 return rc;
721         }
722
723         rc = target_xcopy_issue_pt_cmd(xpt_cmd);
724         if (rc < 0) {
725                 ec_cmd->scsi_status = xpt_cmd->se_cmd.scsi_status;
726                 se_cmd->se_cmd_flags &= ~SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC;
727                 transport_generic_free_cmd(se_cmd, 0);
728                 return rc;
729         }
730
731         return 0;
732 }
733
734 static void target_xcopy_do_work(struct work_struct *work)
735 {
736         struct xcopy_op *xop = container_of(work, struct xcopy_op, xop_work);
737         struct se_cmd *ec_cmd = xop->xop_se_cmd;
738         struct se_device *src_dev, *dst_dev;
739         sector_t src_lba, dst_lba, end_lba;
740         unsigned int max_sectors;
741         int rc = 0;
742         unsigned short nolb, cur_nolb, max_nolb, copied_nolb = 0;
743
744         if (target_parse_xcopy_cmd(xop) != TCM_NO_SENSE)
745                 goto err_free;
746
747         if (WARN_ON_ONCE(!xop->src_dev) || WARN_ON_ONCE(!xop->dst_dev))
748                 goto err_free;
749
750         src_dev = xop->src_dev;
751         dst_dev = xop->dst_dev;
752         src_lba = xop->src_lba;
753         dst_lba = xop->dst_lba;
754         nolb = xop->nolb;
755         end_lba = src_lba + nolb;
756         /*
757          * Break up XCOPY I/O into hw_max_sectors sized I/O based on the
758          * smallest max_sectors between src_dev + dev_dev, or
759          */
760         max_sectors = min(src_dev->dev_attrib.hw_max_sectors,
761                           dst_dev->dev_attrib.hw_max_sectors);
762         max_sectors = min_t(u32, max_sectors, XCOPY_MAX_SECTORS);
763
764         max_nolb = min_t(u16, max_sectors, ((u16)(~0U)));
765
766         pr_debug("target_xcopy_do_work: nolb: %hu, max_nolb: %hu end_lba: %llu\n",
767                         nolb, max_nolb, (unsigned long long)end_lba);
768         pr_debug("target_xcopy_do_work: Starting src_lba: %llu, dst_lba: %llu\n",
769                         (unsigned long long)src_lba, (unsigned long long)dst_lba);
770
771         while (src_lba < end_lba) {
772                 cur_nolb = min(nolb, max_nolb);
773
774                 pr_debug("target_xcopy_do_work: Calling read src_dev: %p src_lba: %llu,"
775                         " cur_nolb: %hu\n", src_dev, (unsigned long long)src_lba, cur_nolb);
776
777                 rc = target_xcopy_read_source(ec_cmd, xop, src_dev, src_lba, cur_nolb);
778                 if (rc < 0)
779                         goto out;
780
781                 src_lba += cur_nolb;
782                 pr_debug("target_xcopy_do_work: Incremented READ src_lba to %llu\n",
783                                 (unsigned long long)src_lba);
784
785                 pr_debug("target_xcopy_do_work: Calling write dst_dev: %p dst_lba: %llu,"
786                         " cur_nolb: %hu\n", dst_dev, (unsigned long long)dst_lba, cur_nolb);
787
788                 rc = target_xcopy_write_destination(ec_cmd, xop, dst_dev,
789                                                 dst_lba, cur_nolb);
790                 if (rc < 0) {
791                         transport_generic_free_cmd(&xop->src_pt_cmd->se_cmd, 0);
792                         goto out;
793                 }
794
795                 dst_lba += cur_nolb;
796                 pr_debug("target_xcopy_do_work: Incremented WRITE dst_lba to %llu\n",
797                                 (unsigned long long)dst_lba);
798
799                 copied_nolb += cur_nolb;
800                 nolb -= cur_nolb;
801
802                 transport_generic_free_cmd(&xop->src_pt_cmd->se_cmd, 0);
803                 xop->dst_pt_cmd->se_cmd.se_cmd_flags &= ~SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC;
804
805                 transport_generic_free_cmd(&xop->dst_pt_cmd->se_cmd, 0);
806         }
807
808         xcopy_pt_undepend_remotedev(xop);
809         kfree(xop);
810
811         pr_debug("target_xcopy_do_work: Final src_lba: %llu, dst_lba: %llu\n",
812                 (unsigned long long)src_lba, (unsigned long long)dst_lba);
813         pr_debug("target_xcopy_do_work: Blocks copied: %hu, Bytes Copied: %u\n",
814                 copied_nolb, copied_nolb * dst_dev->dev_attrib.block_size);
815
816         pr_debug("target_xcopy_do_work: Setting X-COPY GOOD status -> sending response\n");
817         target_complete_cmd(ec_cmd, SAM_STAT_GOOD);
818         return;
819
820 out:
821         xcopy_pt_undepend_remotedev(xop);
822
823 err_free:
824         kfree(xop);
825         /*
826          * Don't override an error scsi status if it has already been set
827          */
828         if (ec_cmd->scsi_status == SAM_STAT_GOOD) {
829                 pr_warn_ratelimited("target_xcopy_do_work: rc: %d, Setting X-COPY"
830                         " CHECK_CONDITION -> sending response\n", rc);
831                 ec_cmd->scsi_status = SAM_STAT_CHECK_CONDITION;
832         }
833         target_complete_cmd(ec_cmd, ec_cmd->scsi_status);
834 }
835
836 /*
837  * Returns TCM_NO_SENSE upon success or a sense code != TCM_NO_SENSE if parsing
838  * fails.
839  */
840 static sense_reason_t target_parse_xcopy_cmd(struct xcopy_op *xop)
841 {
842         struct se_cmd *se_cmd = xop->xop_se_cmd;
843         unsigned char *p = NULL, *seg_desc;
844         unsigned int list_id, list_id_usage, sdll, inline_dl;
845         sense_reason_t ret = TCM_INVALID_PARAMETER_LIST;
846         int rc;
847         unsigned short tdll;
848
849         p = transport_kmap_data_sg(se_cmd);
850         if (!p) {
851                 pr_err("transport_kmap_data_sg() failed in target_do_xcopy\n");
852                 return TCM_OUT_OF_RESOURCES;
853         }
854
855         list_id = p[0];
856         list_id_usage = (p[1] & 0x18) >> 3;
857
858         /*
859          * Determine TARGET DESCRIPTOR LIST LENGTH + SEGMENT DESCRIPTOR LIST LENGTH
860          */
861         tdll = get_unaligned_be16(&p[2]);
862         sdll = get_unaligned_be32(&p[8]);
863         if (tdll + sdll > RCR_OP_MAX_DESC_LIST_LEN) {
864                 pr_err("XCOPY descriptor list length %u exceeds maximum %u\n",
865                        tdll + sdll, RCR_OP_MAX_DESC_LIST_LEN);
866                 ret = TCM_PARAMETER_LIST_LENGTH_ERROR;
867                 goto out;
868         }
869
870         inline_dl = get_unaligned_be32(&p[12]);
871         if (inline_dl != 0) {
872                 pr_err("XCOPY with non zero inline data length\n");
873                 goto out;
874         }
875
876         if (se_cmd->data_length < (XCOPY_HDR_LEN + tdll + sdll + inline_dl)) {
877                 pr_err("XCOPY parameter truncation: data length %u too small "
878                         "for tdll: %hu sdll: %u inline_dl: %u\n",
879                         se_cmd->data_length, tdll, sdll, inline_dl);
880                 ret = TCM_PARAMETER_LIST_LENGTH_ERROR;
881                 goto out;
882         }
883
884         pr_debug("Processing XCOPY with list_id: 0x%02x list_id_usage: 0x%02x"
885                 " tdll: %hu sdll: %u inline_dl: %u\n", list_id, list_id_usage,
886                 tdll, sdll, inline_dl);
887
888         /*
889          * skip over the target descriptors until segment descriptors
890          * have been passed - CSCD ids are needed to determine src and dest.
891          */
892         seg_desc = &p[16] + tdll;
893
894         rc = target_xcopy_parse_segment_descriptors(se_cmd, xop, seg_desc,
895                                                     sdll, &ret);
896         if (rc <= 0)
897                 goto out;
898
899         pr_debug("XCOPY: Processed %d segment descriptors, length: %u\n", rc,
900                                 rc * XCOPY_SEGMENT_DESC_LEN);
901
902         rc = target_xcopy_parse_target_descriptors(se_cmd, xop, &p[16], tdll, &ret);
903         if (rc <= 0)
904                 goto out;
905
906         if (xop->src_dev->dev_attrib.block_size !=
907             xop->dst_dev->dev_attrib.block_size) {
908                 pr_err("XCOPY: Non matching src_dev block_size: %u + dst_dev"
909                        " block_size: %u currently unsupported\n",
910                         xop->src_dev->dev_attrib.block_size,
911                         xop->dst_dev->dev_attrib.block_size);
912                 xcopy_pt_undepend_remotedev(xop);
913                 ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
914                 goto out;
915         }
916
917         pr_debug("XCOPY: Processed %d target descriptors, length: %u\n", rc,
918                                 rc * XCOPY_TARGET_DESC_LEN);
919         transport_kunmap_data_sg(se_cmd);
920         return TCM_NO_SENSE;
921
922 out:
923         if (p)
924                 transport_kunmap_data_sg(se_cmd);
925         return ret;
926 }
927
928 sense_reason_t target_do_xcopy(struct se_cmd *se_cmd)
929 {
930         struct se_device *dev = se_cmd->se_dev;
931         struct xcopy_op *xop;
932         unsigned int sa;
933
934         if (!dev->dev_attrib.emulate_3pc) {
935                 pr_err("EXTENDED_COPY operation explicitly disabled\n");
936                 return TCM_UNSUPPORTED_SCSI_OPCODE;
937         }
938
939         sa = se_cmd->t_task_cdb[1] & 0x1f;
940         if (sa != 0x00) {
941                 pr_err("EXTENDED_COPY(LID4) not supported\n");
942                 return TCM_UNSUPPORTED_SCSI_OPCODE;
943         }
944
945         if (se_cmd->data_length == 0) {
946                 target_complete_cmd(se_cmd, SAM_STAT_GOOD);
947                 return TCM_NO_SENSE;
948         }
949         if (se_cmd->data_length < XCOPY_HDR_LEN) {
950                 pr_err("XCOPY parameter truncation: length %u < hdr_len %u\n",
951                                 se_cmd->data_length, XCOPY_HDR_LEN);
952                 return TCM_PARAMETER_LIST_LENGTH_ERROR;
953         }
954
955         xop = kzalloc(sizeof(struct xcopy_op), GFP_KERNEL);
956         if (!xop)
957                 goto err;
958         xop->xop_se_cmd = se_cmd;
959         INIT_WORK(&xop->xop_work, target_xcopy_do_work);
960         if (WARN_ON_ONCE(!queue_work(xcopy_wq, &xop->xop_work)))
961                 goto free;
962         return TCM_NO_SENSE;
963
964 free:
965         kfree(xop);
966
967 err:
968         return TCM_OUT_OF_RESOURCES;
969 }
970
971 static sense_reason_t target_rcr_operating_parameters(struct se_cmd *se_cmd)
972 {
973         unsigned char *p;
974
975         p = transport_kmap_data_sg(se_cmd);
976         if (!p) {
977                 pr_err("transport_kmap_data_sg failed in"
978                        " target_rcr_operating_parameters\n");
979                 return TCM_OUT_OF_RESOURCES;
980         }
981
982         if (se_cmd->data_length < 54) {
983                 pr_err("Receive Copy Results Op Parameters length"
984                        " too small: %u\n", se_cmd->data_length);
985                 transport_kunmap_data_sg(se_cmd);
986                 return TCM_INVALID_CDB_FIELD;
987         }
988         /*
989          * Set SNLID=1 (Supports no List ID)
990          */
991         p[4] = 0x1;
992         /*
993          * MAXIMUM TARGET DESCRIPTOR COUNT
994          */
995         put_unaligned_be16(RCR_OP_MAX_TARGET_DESC_COUNT, &p[8]);
996         /*
997          * MAXIMUM SEGMENT DESCRIPTOR COUNT
998          */
999         put_unaligned_be16(RCR_OP_MAX_SG_DESC_COUNT, &p[10]);
1000         /*
1001          * MAXIMUM DESCRIPTOR LIST LENGTH
1002          */
1003         put_unaligned_be32(RCR_OP_MAX_DESC_LIST_LEN, &p[12]);
1004         /*
1005          * MAXIMUM SEGMENT LENGTH
1006          */
1007         put_unaligned_be32(RCR_OP_MAX_SEGMENT_LEN, &p[16]);
1008         /*
1009          * MAXIMUM INLINE DATA LENGTH for SA 0x04 (NOT SUPPORTED)
1010          */
1011         put_unaligned_be32(0x0, &p[20]);
1012         /*
1013          * HELD DATA LIMIT
1014          */
1015         put_unaligned_be32(0x0, &p[24]);
1016         /*
1017          * MAXIMUM STREAM DEVICE TRANSFER SIZE
1018          */
1019         put_unaligned_be32(0x0, &p[28]);
1020         /*
1021          * TOTAL CONCURRENT COPIES
1022          */
1023         put_unaligned_be16(RCR_OP_TOTAL_CONCURR_COPIES, &p[34]);
1024         /*
1025          * MAXIMUM CONCURRENT COPIES
1026          */
1027         p[36] = RCR_OP_MAX_CONCURR_COPIES;
1028         /*
1029          * DATA SEGMENT GRANULARITY (log 2)
1030          */
1031         p[37] = RCR_OP_DATA_SEG_GRAN_LOG2;
1032         /*
1033          * INLINE DATA GRANULARITY log 2)
1034          */
1035         p[38] = RCR_OP_INLINE_DATA_GRAN_LOG2;
1036         /*
1037          * HELD DATA GRANULARITY
1038          */
1039         p[39] = RCR_OP_HELD_DATA_GRAN_LOG2;
1040         /*
1041          * IMPLEMENTED DESCRIPTOR LIST LENGTH
1042          */
1043         p[43] = 0x2;
1044         /*
1045          * List of implemented descriptor type codes (ordered)
1046          */
1047         p[44] = 0x02; /* Copy Block to Block device */
1048         p[45] = 0xe4; /* Identification descriptor target descriptor */
1049
1050         /*
1051          * AVAILABLE DATA (n-3)
1052          */
1053         put_unaligned_be32(42, &p[0]);
1054
1055         transport_kunmap_data_sg(se_cmd);
1056         target_complete_cmd(se_cmd, GOOD);
1057
1058         return TCM_NO_SENSE;
1059 }
1060
1061 sense_reason_t target_do_receive_copy_results(struct se_cmd *se_cmd)
1062 {
1063         unsigned char *cdb = &se_cmd->t_task_cdb[0];
1064         int sa = (cdb[1] & 0x1f), list_id = cdb[2];
1065         sense_reason_t rc = TCM_NO_SENSE;
1066
1067         pr_debug("Entering target_do_receive_copy_results: SA: 0x%02x, List ID:"
1068                 " 0x%02x, AL: %u\n", sa, list_id, se_cmd->data_length);
1069
1070         if (list_id != 0) {
1071                 pr_err("Receive Copy Results with non zero list identifier"
1072                        " not supported\n");
1073                 return TCM_INVALID_CDB_FIELD;
1074         }
1075
1076         switch (sa) {
1077         case RCR_SA_OPERATING_PARAMETERS:
1078                 rc = target_rcr_operating_parameters(se_cmd);
1079                 break;
1080         case RCR_SA_COPY_STATUS:
1081         case RCR_SA_RECEIVE_DATA:
1082         case RCR_SA_FAILED_SEGMENT_DETAILS:
1083         default:
1084                 pr_err("Unsupported SA for receive copy results: 0x%02x\n", sa);
1085                 return TCM_INVALID_CDB_FIELD;
1086         }
1087
1088         return rc;
1089 }