blob: 474cd44fac14d61b530a6100d5e6060086777560 [file] [log] [blame]
Nicholas Bellingercbf031f2013-08-20 15:38:55 -07001/*******************************************************************************
2 * Filename: target_core_xcopy.c
3 *
4 * This file contains support for SPC-4 Extended-Copy offload with generic
5 * TCM backends.
6 *
7 * Copyright (c) 2011-2013 Datera, Inc. All rights reserved.
8 *
9 * Author:
10 * Nicholas A. Bellinger <nab@daterainc.com>
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 ******************************************************************************/
23
Nicholas Bellingercbf031f2013-08-20 15:38:55 -070024#include <linux/slab.h>
25#include <linux/spinlock.h>
26#include <linux/list.h>
27#include <linux/configfs.h>
28#include <scsi/scsi.h>
29#include <scsi/scsi_cmnd.h>
30#include <asm/unaligned.h>
31
32#include <target/target_core_base.h>
33#include <target/target_core_backend.h>
34#include <target/target_core_fabric.h>
35#include <target/target_core_configfs.h>
36
37#include "target_core_pr.h"
38#include "target_core_ua.h"
39#include "target_core_xcopy.h"
40
41static struct workqueue_struct *xcopy_wq = NULL;
42/*
43 * From target_core_spc.c
44 */
45extern void spc_parse_naa_6h_vendor_specific(struct se_device *, unsigned char *);
46/*
47 * From target_core_device.c
48 */
49extern struct mutex g_device_mutex;
50extern struct list_head g_device_list;
51/*
52 * From target_core_configfs.c
53 */
54extern struct configfs_subsystem *target_core_subsystem[];
55
56static int target_xcopy_gen_naa_ieee(struct se_device *dev, unsigned char *buf)
57{
58 int off = 0;
59
60 buf[off++] = (0x6 << 4);
61 buf[off++] = 0x01;
62 buf[off++] = 0x40;
63 buf[off] = (0x5 << 4);
64
65 spc_parse_naa_6h_vendor_specific(dev, &buf[off]);
66 return 0;
67}
68
69static int target_xcopy_locate_se_dev_e4(struct se_cmd *se_cmd, struct xcopy_op *xop,
70 bool src)
71{
72 struct se_device *se_dev;
73 struct configfs_subsystem *subsys = target_core_subsystem[0];
74 unsigned char tmp_dev_wwn[XCOPY_NAA_IEEE_REGEX_LEN], *dev_wwn;
75 int rc;
76
77 if (src == true)
78 dev_wwn = &xop->dst_tid_wwn[0];
79 else
80 dev_wwn = &xop->src_tid_wwn[0];
81
82 mutex_lock(&g_device_mutex);
83 list_for_each_entry(se_dev, &g_device_list, g_dev_node) {
84
Nicholas Bellingeracb3f262013-10-07 18:05:14 -070085 if (!se_dev->dev_attrib.emulate_3pc)
86 continue;
87
Nicholas Bellingercbf031f2013-08-20 15:38:55 -070088 memset(&tmp_dev_wwn[0], 0, XCOPY_NAA_IEEE_REGEX_LEN);
89 target_xcopy_gen_naa_ieee(se_dev, &tmp_dev_wwn[0]);
90
91 rc = memcmp(&tmp_dev_wwn[0], dev_wwn, XCOPY_NAA_IEEE_REGEX_LEN);
92 if (rc != 0)
93 continue;
94
95 if (src == true) {
96 xop->dst_dev = se_dev;
97 pr_debug("XCOPY 0xe4: Setting xop->dst_dev: %p from located"
98 " se_dev\n", xop->dst_dev);
99 } else {
100 xop->src_dev = se_dev;
101 pr_debug("XCOPY 0xe4: Setting xop->src_dev: %p from located"
102 " se_dev\n", xop->src_dev);
103 }
104
105 rc = configfs_depend_item(subsys,
106 &se_dev->dev_group.cg_item);
107 if (rc != 0) {
108 pr_err("configfs_depend_item attempt failed:"
109 " %d for se_dev: %p\n", rc, se_dev);
110 mutex_unlock(&g_device_mutex);
111 return rc;
112 }
113
114 pr_debug("Called configfs_depend_item for subsys: %p se_dev: %p"
115 " se_dev->se_dev_group: %p\n", subsys, se_dev,
116 &se_dev->dev_group);
117
118 mutex_unlock(&g_device_mutex);
119 return 0;
120 }
121 mutex_unlock(&g_device_mutex);
122
123 pr_err("Unable to locate 0xe4 descriptor for EXTENDED_COPY\n");
124 return -EINVAL;
125}
126
127static int target_xcopy_parse_tiddesc_e4(struct se_cmd *se_cmd, struct xcopy_op *xop,
128 unsigned char *p, bool src)
129{
130 unsigned char *desc = p;
131 unsigned short ript;
132 u8 desig_len;
133 /*
134 * Extract RELATIVE INITIATOR PORT IDENTIFIER
135 */
136 ript = get_unaligned_be16(&desc[2]);
137 pr_debug("XCOPY 0xe4: RELATIVE INITIATOR PORT IDENTIFIER: %hu\n", ript);
138 /*
139 * Check for supported code set, association, and designator type
140 */
141 if ((desc[4] & 0x0f) != 0x1) {
142 pr_err("XCOPY 0xe4: code set of non binary type not supported\n");
143 return -EINVAL;
144 }
145 if ((desc[5] & 0x30) != 0x00) {
146 pr_err("XCOPY 0xe4: association other than LUN not supported\n");
147 return -EINVAL;
148 }
149 if ((desc[5] & 0x0f) != 0x3) {
150 pr_err("XCOPY 0xe4: designator type unsupported: 0x%02x\n",
151 (desc[5] & 0x0f));
152 return -EINVAL;
153 }
154 /*
155 * Check for matching 16 byte length for NAA IEEE Registered Extended
156 * Assigned designator
157 */
158 desig_len = desc[7];
159 if (desig_len != 16) {
160 pr_err("XCOPY 0xe4: invalid desig_len: %d\n", (int)desig_len);
161 return -EINVAL;
162 }
163 pr_debug("XCOPY 0xe4: desig_len: %d\n", (int)desig_len);
164 /*
165 * Check for NAA IEEE Registered Extended Assigned header..
166 */
167 if ((desc[8] & 0xf0) != 0x60) {
168 pr_err("XCOPY 0xe4: Unsupported DESIGNATOR TYPE: 0x%02x\n",
169 (desc[8] & 0xf0));
170 return -EINVAL;
171 }
172
173 if (src == true) {
174 memcpy(&xop->src_tid_wwn[0], &desc[8], XCOPY_NAA_IEEE_REGEX_LEN);
175 /*
176 * Determine if the source designator matches the local device
177 */
178 if (!memcmp(&xop->local_dev_wwn[0], &xop->src_tid_wwn[0],
179 XCOPY_NAA_IEEE_REGEX_LEN)) {
180 xop->op_origin = XCOL_SOURCE_RECV_OP;
181 xop->src_dev = se_cmd->se_dev;
182 pr_debug("XCOPY 0xe4: Set xop->src_dev %p from source"
183 " received xop\n", xop->src_dev);
184 }
185 } else {
186 memcpy(&xop->dst_tid_wwn[0], &desc[8], XCOPY_NAA_IEEE_REGEX_LEN);
187 /*
188 * Determine if the destination designator matches the local device
189 */
190 if (!memcmp(&xop->local_dev_wwn[0], &xop->dst_tid_wwn[0],
191 XCOPY_NAA_IEEE_REGEX_LEN)) {
192 xop->op_origin = XCOL_DEST_RECV_OP;
193 xop->dst_dev = se_cmd->se_dev;
194 pr_debug("XCOPY 0xe4: Set xop->dst_dev: %p from destination"
195 " received xop\n", xop->dst_dev);
196 }
197 }
198
199 return 0;
200}
201
202static int target_xcopy_parse_target_descriptors(struct se_cmd *se_cmd,
203 struct xcopy_op *xop, unsigned char *p,
204 unsigned short tdll)
205{
206 struct se_device *local_dev = se_cmd->se_dev;
207 unsigned char *desc = p;
208 int offset = tdll % XCOPY_TARGET_DESC_LEN, rc, ret = 0;
209 unsigned short start = 0;
210 bool src = true;
211
212 if (offset != 0) {
213 pr_err("XCOPY target descriptor list length is not"
214 " multiple of %d\n", XCOPY_TARGET_DESC_LEN);
215 return -EINVAL;
216 }
217 if (tdll > 64) {
218 pr_err("XCOPY target descriptor supports a maximum"
219 " two src/dest descriptors, tdll: %hu too large..\n", tdll);
220 return -EINVAL;
221 }
222 /*
223 * Generate an IEEE Registered Extended designator based upon the
224 * se_device the XCOPY was received upon..
225 */
226 memset(&xop->local_dev_wwn[0], 0, XCOPY_NAA_IEEE_REGEX_LEN);
227 target_xcopy_gen_naa_ieee(local_dev, &xop->local_dev_wwn[0]);
228
229 while (start < tdll) {
230 /*
231 * Check target descriptor identification with 0xE4 type with
232 * use VPD 0x83 WWPN matching ..
233 */
234 switch (desc[0]) {
235 case 0xe4:
236 rc = target_xcopy_parse_tiddesc_e4(se_cmd, xop,
237 &desc[0], src);
238 if (rc != 0)
239 goto out;
240 /*
241 * Assume target descriptors are in source -> destination order..
242 */
243 if (src == true)
244 src = false;
245 else
246 src = true;
247 start += XCOPY_TARGET_DESC_LEN;
248 desc += XCOPY_TARGET_DESC_LEN;
249 ret++;
250 break;
251 default:
252 pr_err("XCOPY unsupported descriptor type code:"
253 " 0x%02x\n", desc[0]);
254 goto out;
255 }
256 }
257
258 if (xop->op_origin == XCOL_SOURCE_RECV_OP)
259 rc = target_xcopy_locate_se_dev_e4(se_cmd, xop, true);
260 else
261 rc = target_xcopy_locate_se_dev_e4(se_cmd, xop, false);
262
263 if (rc < 0)
264 goto out;
265
266 pr_debug("XCOPY TGT desc: Source dev: %p NAA IEEE WWN: 0x%16phN\n",
267 xop->src_dev, &xop->src_tid_wwn[0]);
268 pr_debug("XCOPY TGT desc: Dest dev: %p NAA IEEE WWN: 0x%16phN\n",
269 xop->dst_dev, &xop->dst_tid_wwn[0]);
270
271 return ret;
272
273out:
274 return -EINVAL;
275}
276
277static int target_xcopy_parse_segdesc_02(struct se_cmd *se_cmd, struct xcopy_op *xop,
278 unsigned char *p)
279{
280 unsigned char *desc = p;
281 int dc = (desc[1] & 0x02);
282 unsigned short desc_len;
283
284 desc_len = get_unaligned_be16(&desc[2]);
285 if (desc_len != 0x18) {
286 pr_err("XCOPY segment desc 0x02: Illegal desc_len:"
287 " %hu\n", desc_len);
288 return -EINVAL;
289 }
290
291 xop->stdi = get_unaligned_be16(&desc[4]);
292 xop->dtdi = get_unaligned_be16(&desc[6]);
293 pr_debug("XCOPY seg desc 0x02: desc_len: %hu stdi: %hu dtdi: %hu, DC: %d\n",
294 desc_len, xop->stdi, xop->dtdi, dc);
295
296 xop->nolb = get_unaligned_be16(&desc[10]);
297 xop->src_lba = get_unaligned_be64(&desc[12]);
298 xop->dst_lba = get_unaligned_be64(&desc[20]);
299 pr_debug("XCOPY seg desc 0x02: nolb: %hu src_lba: %llu dst_lba: %llu\n",
300 xop->nolb, (unsigned long long)xop->src_lba,
301 (unsigned long long)xop->dst_lba);
302
303 if (dc != 0) {
Nicholas Bellinger3e9e01d2013-09-18 12:33:42 -0700304 xop->dbl = (desc[29] & 0xff) << 16;
305 xop->dbl |= (desc[30] & 0xff) << 8;
Nicholas Bellingercbf031f2013-08-20 15:38:55 -0700306 xop->dbl |= desc[31] & 0xff;
307
308 pr_debug("XCOPY seg desc 0x02: DC=1 w/ dbl: %u\n", xop->dbl);
309 }
310 return 0;
311}
312
313static int target_xcopy_parse_segment_descriptors(struct se_cmd *se_cmd,
314 struct xcopy_op *xop, unsigned char *p,
315 unsigned int sdll)
316{
317 unsigned char *desc = p;
318 unsigned int start = 0;
319 int offset = sdll % XCOPY_SEGMENT_DESC_LEN, rc, ret = 0;
320
321 if (offset != 0) {
322 pr_err("XCOPY segment descriptor list length is not"
323 " multiple of %d\n", XCOPY_SEGMENT_DESC_LEN);
324 return -EINVAL;
325 }
326
327 while (start < sdll) {
328 /*
329 * Check segment descriptor type code for block -> block
330 */
331 switch (desc[0]) {
332 case 0x02:
333 rc = target_xcopy_parse_segdesc_02(se_cmd, xop, desc);
334 if (rc < 0)
335 goto out;
336
337 ret++;
338 start += XCOPY_SEGMENT_DESC_LEN;
339 desc += XCOPY_SEGMENT_DESC_LEN;
340 break;
341 default:
342 pr_err("XCOPY unspported segment descriptor"
343 "type: 0x%02x\n", desc[0]);
344 goto out;
345 }
346 }
347
348 return ret;
349
350out:
351 return -EINVAL;
352}
353
354/*
355 * Start xcopy_pt ops
356 */
357
358struct xcopy_pt_cmd {
359 bool remote_port;
360 struct se_cmd se_cmd;
361 struct xcopy_op *xcopy_op;
362 struct completion xpt_passthrough_sem;
Nicholas Bellinger366bda192013-10-24 00:10:36 -0700363 unsigned char sense_buffer[TRANSPORT_SENSE_BUFFER];
Nicholas Bellingercbf031f2013-08-20 15:38:55 -0700364};
365
366static struct se_port xcopy_pt_port;
367static struct se_portal_group xcopy_pt_tpg;
368static struct se_session xcopy_pt_sess;
369static struct se_node_acl xcopy_pt_nacl;
370
371static char *xcopy_pt_get_fabric_name(void)
372{
373 return "xcopy-pt";
374}
375
376static u32 xcopy_pt_get_tag(struct se_cmd *se_cmd)
377{
378 return 0;
379}
380
381static int xcopy_pt_get_cmd_state(struct se_cmd *se_cmd)
382{
383 return 0;
384}
385
386static void xcopy_pt_undepend_remotedev(struct xcopy_op *xop)
387{
388 struct configfs_subsystem *subsys = target_core_subsystem[0];
389 struct se_device *remote_dev;
390
391 if (xop->op_origin == XCOL_SOURCE_RECV_OP)
392 remote_dev = xop->dst_dev;
393 else
394 remote_dev = xop->src_dev;
395
396 pr_debug("Calling configfs_undepend_item for subsys: %p"
397 " remote_dev: %p remote_dev->dev_group: %p\n",
398 subsys, remote_dev, &remote_dev->dev_group.cg_item);
399
400 configfs_undepend_item(subsys, &remote_dev->dev_group.cg_item);
401}
402
403static void xcopy_pt_release_cmd(struct se_cmd *se_cmd)
404{
405 struct xcopy_pt_cmd *xpt_cmd = container_of(se_cmd,
406 struct xcopy_pt_cmd, se_cmd);
407
408 if (xpt_cmd->remote_port)
409 kfree(se_cmd->se_lun);
410
411 kfree(xpt_cmd);
412}
413
414static int xcopy_pt_check_stop_free(struct se_cmd *se_cmd)
415{
416 struct xcopy_pt_cmd *xpt_cmd = container_of(se_cmd,
417 struct xcopy_pt_cmd, se_cmd);
418
419 complete(&xpt_cmd->xpt_passthrough_sem);
420 return 0;
421}
422
423static int xcopy_pt_write_pending(struct se_cmd *se_cmd)
424{
425 return 0;
426}
427
428static int xcopy_pt_write_pending_status(struct se_cmd *se_cmd)
429{
430 return 0;
431}
432
433static int xcopy_pt_queue_data_in(struct se_cmd *se_cmd)
434{
435 return 0;
436}
437
438static int xcopy_pt_queue_status(struct se_cmd *se_cmd)
439{
440 return 0;
441}
442
443static struct target_core_fabric_ops xcopy_pt_tfo = {
444 .get_fabric_name = xcopy_pt_get_fabric_name,
445 .get_task_tag = xcopy_pt_get_tag,
446 .get_cmd_state = xcopy_pt_get_cmd_state,
447 .release_cmd = xcopy_pt_release_cmd,
448 .check_stop_free = xcopy_pt_check_stop_free,
449 .write_pending = xcopy_pt_write_pending,
450 .write_pending_status = xcopy_pt_write_pending_status,
451 .queue_data_in = xcopy_pt_queue_data_in,
452 .queue_status = xcopy_pt_queue_status,
453};
454
455/*
456 * End xcopy_pt_ops
457 */
458
459int target_xcopy_setup_pt(void)
460{
461 xcopy_wq = alloc_workqueue("xcopy_wq", WQ_MEM_RECLAIM, 0);
462 if (!xcopy_wq) {
463 pr_err("Unable to allocate xcopy_wq\n");
464 return -ENOMEM;
465 }
466
467 memset(&xcopy_pt_port, 0, sizeof(struct se_port));
468 INIT_LIST_HEAD(&xcopy_pt_port.sep_alua_list);
469 INIT_LIST_HEAD(&xcopy_pt_port.sep_list);
470 mutex_init(&xcopy_pt_port.sep_tg_pt_md_mutex);
471
472 memset(&xcopy_pt_tpg, 0, sizeof(struct se_portal_group));
473 INIT_LIST_HEAD(&xcopy_pt_tpg.se_tpg_node);
474 INIT_LIST_HEAD(&xcopy_pt_tpg.acl_node_list);
475 INIT_LIST_HEAD(&xcopy_pt_tpg.tpg_sess_list);
476
477 xcopy_pt_port.sep_tpg = &xcopy_pt_tpg;
478 xcopy_pt_tpg.se_tpg_tfo = &xcopy_pt_tfo;
479
480 memset(&xcopy_pt_nacl, 0, sizeof(struct se_node_acl));
481 INIT_LIST_HEAD(&xcopy_pt_nacl.acl_list);
482 INIT_LIST_HEAD(&xcopy_pt_nacl.acl_sess_list);
483 memset(&xcopy_pt_sess, 0, sizeof(struct se_session));
484 INIT_LIST_HEAD(&xcopy_pt_sess.sess_list);
485 INIT_LIST_HEAD(&xcopy_pt_sess.sess_acl_list);
486
487 xcopy_pt_nacl.se_tpg = &xcopy_pt_tpg;
488 xcopy_pt_nacl.nacl_sess = &xcopy_pt_sess;
489
490 xcopy_pt_sess.se_tpg = &xcopy_pt_tpg;
491 xcopy_pt_sess.se_node_acl = &xcopy_pt_nacl;
492
493 return 0;
494}
495
496void target_xcopy_release_pt(void)
497{
498 if (xcopy_wq)
499 destroy_workqueue(xcopy_wq);
500}
501
502static void target_xcopy_setup_pt_port(
503 struct xcopy_pt_cmd *xpt_cmd,
504 struct xcopy_op *xop,
505 bool remote_port)
506{
507 struct se_cmd *ec_cmd = xop->xop_se_cmd;
508 struct se_cmd *pt_cmd = &xpt_cmd->se_cmd;
509
510 if (xop->op_origin == XCOL_SOURCE_RECV_OP) {
511 /*
512 * Honor destination port reservations for X-COPY PUSH emulation
513 * when CDB is received on local source port, and READs blocks to
514 * WRITE on remote destination port.
515 */
516 if (remote_port) {
517 xpt_cmd->remote_port = remote_port;
518 pt_cmd->se_lun->lun_sep = &xcopy_pt_port;
519 pr_debug("Setup emulated remote DEST xcopy_pt_port: %p to"
520 " cmd->se_lun->lun_sep for X-COPY data PUSH\n",
521 pt_cmd->se_lun->lun_sep);
522 } else {
523 pt_cmd->se_lun = ec_cmd->se_lun;
524 pt_cmd->se_dev = ec_cmd->se_dev;
525
526 pr_debug("Honoring local SRC port from ec_cmd->se_dev:"
527 " %p\n", pt_cmd->se_dev);
528 pt_cmd->se_lun = ec_cmd->se_lun;
529 pr_debug("Honoring local SRC port from ec_cmd->se_lun: %p\n",
530 pt_cmd->se_lun);
531 }
532 } else {
533 /*
534 * Honor source port reservation for X-COPY PULL emulation
535 * when CDB is received on local desintation port, and READs
536 * blocks from the remote source port to WRITE on local
537 * destination port.
538 */
539 if (remote_port) {
540 xpt_cmd->remote_port = remote_port;
541 pt_cmd->se_lun->lun_sep = &xcopy_pt_port;
542 pr_debug("Setup emulated remote SRC xcopy_pt_port: %p to"
543 " cmd->se_lun->lun_sep for X-COPY data PULL\n",
544 pt_cmd->se_lun->lun_sep);
545 } else {
546 pt_cmd->se_lun = ec_cmd->se_lun;
547 pt_cmd->se_dev = ec_cmd->se_dev;
548
549 pr_debug("Honoring local DST port from ec_cmd->se_dev:"
550 " %p\n", pt_cmd->se_dev);
551 pt_cmd->se_lun = ec_cmd->se_lun;
552 pr_debug("Honoring local DST port from ec_cmd->se_lun: %p\n",
553 pt_cmd->se_lun);
554 }
555 }
556}
557
558static int target_xcopy_init_pt_lun(
559 struct xcopy_pt_cmd *xpt_cmd,
560 struct xcopy_op *xop,
561 struct se_device *se_dev,
562 struct se_cmd *pt_cmd,
563 bool remote_port)
564{
565 /*
566 * Don't allocate + init an pt_cmd->se_lun if honoring local port for
567 * reservations. The pt_cmd->se_lun pointer will be setup from within
568 * target_xcopy_setup_pt_port()
569 */
570 if (remote_port == false) {
571 pt_cmd->se_cmd_flags |= SCF_SE_LUN_CMD | SCF_CMD_XCOPY_PASSTHROUGH;
572 return 0;
573 }
574
575 pt_cmd->se_lun = kzalloc(sizeof(struct se_lun), GFP_KERNEL);
576 if (!pt_cmd->se_lun) {
577 pr_err("Unable to allocate pt_cmd->se_lun\n");
578 return -ENOMEM;
579 }
580 init_completion(&pt_cmd->se_lun->lun_shutdown_comp);
581 INIT_LIST_HEAD(&pt_cmd->se_lun->lun_cmd_list);
582 INIT_LIST_HEAD(&pt_cmd->se_lun->lun_acl_list);
583 spin_lock_init(&pt_cmd->se_lun->lun_acl_lock);
584 spin_lock_init(&pt_cmd->se_lun->lun_cmd_lock);
585 spin_lock_init(&pt_cmd->se_lun->lun_sep_lock);
586
587 pt_cmd->se_dev = se_dev;
588
589 pr_debug("Setup emulated se_dev: %p from se_dev\n", pt_cmd->se_dev);
590 pt_cmd->se_lun->lun_se_dev = se_dev;
591 pt_cmd->se_cmd_flags |= SCF_SE_LUN_CMD | SCF_CMD_XCOPY_PASSTHROUGH;
592
593 pr_debug("Setup emulated se_dev: %p to pt_cmd->se_lun->lun_se_dev\n",
594 pt_cmd->se_lun->lun_se_dev);
595
596 return 0;
597}
598
599static int target_xcopy_setup_pt_cmd(
600 struct xcopy_pt_cmd *xpt_cmd,
601 struct xcopy_op *xop,
602 struct se_device *se_dev,
603 unsigned char *cdb,
604 bool remote_port,
605 bool alloc_mem)
606{
607 struct se_cmd *cmd = &xpt_cmd->se_cmd;
608 sense_reason_t sense_rc;
609 int ret = 0, rc;
610 /*
611 * Setup LUN+port to honor reservations based upon xop->op_origin for
612 * X-COPY PUSH or X-COPY PULL based upon where the CDB was received.
613 */
614 rc = target_xcopy_init_pt_lun(xpt_cmd, xop, se_dev, cmd, remote_port);
615 if (rc < 0) {
616 ret = rc;
617 goto out;
618 }
619 xpt_cmd->xcopy_op = xop;
620 target_xcopy_setup_pt_port(xpt_cmd, xop, remote_port);
621
622 sense_rc = target_setup_cmd_from_cdb(cmd, cdb);
623 if (sense_rc) {
624 ret = -EINVAL;
625 goto out;
626 }
627
628 if (alloc_mem) {
629 rc = target_alloc_sgl(&cmd->t_data_sg, &cmd->t_data_nents,
630 cmd->data_length, false);
631 if (rc < 0) {
632 ret = rc;
633 goto out;
634 }
635 /*
636 * Set this bit so that transport_free_pages() allows the
637 * caller to release SGLs + physical memory allocated by
638 * transport_generic_get_mem()..
639 */
640 cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC;
641 } else {
642 /*
643 * Here the previously allocated SGLs for the internal READ
644 * are mapped zero-copy to the internal WRITE.
645 */
646 sense_rc = transport_generic_map_mem_to_cmd(cmd,
647 xop->xop_data_sg, xop->xop_data_nents,
648 NULL, 0);
649 if (sense_rc) {
650 ret = -EINVAL;
651 goto out;
652 }
653
654 pr_debug("Setup PASSTHROUGH_NOALLOC t_data_sg: %p t_data_nents:"
655 " %u\n", cmd->t_data_sg, cmd->t_data_nents);
656 }
657
658 return 0;
659
660out:
661 if (remote_port == true)
662 kfree(cmd->se_lun);
663 return ret;
664}
665
666static int target_xcopy_issue_pt_cmd(struct xcopy_pt_cmd *xpt_cmd)
667{
668 struct se_cmd *se_cmd = &xpt_cmd->se_cmd;
669 sense_reason_t sense_rc;
670
671 sense_rc = transport_generic_new_cmd(se_cmd);
672 if (sense_rc)
673 return -EINVAL;
674
675 if (se_cmd->data_direction == DMA_TO_DEVICE)
676 target_execute_cmd(se_cmd);
677
678 wait_for_completion_interruptible(&xpt_cmd->xpt_passthrough_sem);
679
680 pr_debug("target_xcopy_issue_pt_cmd(): SCSI status: 0x%02x\n",
681 se_cmd->scsi_status);
Nicholas Bellinger8a955d62013-10-24 00:15:27 -0700682
683 return (se_cmd->scsi_status) ? -EINVAL : 0;
Nicholas Bellingercbf031f2013-08-20 15:38:55 -0700684}
685
686static int target_xcopy_read_source(
687 struct se_cmd *ec_cmd,
688 struct xcopy_op *xop,
689 struct se_device *src_dev,
690 sector_t src_lba,
691 u32 src_sectors)
692{
693 struct xcopy_pt_cmd *xpt_cmd;
694 struct se_cmd *se_cmd;
695 u32 length = (src_sectors * src_dev->dev_attrib.block_size);
696 int rc;
697 unsigned char cdb[16];
698 bool remote_port = (xop->op_origin == XCOL_DEST_RECV_OP);
699
700 xpt_cmd = kzalloc(sizeof(struct xcopy_pt_cmd), GFP_KERNEL);
701 if (!xpt_cmd) {
702 pr_err("Unable to allocate xcopy_pt_cmd\n");
703 return -ENOMEM;
704 }
705 init_completion(&xpt_cmd->xpt_passthrough_sem);
706 se_cmd = &xpt_cmd->se_cmd;
707
708 memset(&cdb[0], 0, 16);
709 cdb[0] = READ_16;
710 put_unaligned_be64(src_lba, &cdb[2]);
711 put_unaligned_be32(src_sectors, &cdb[10]);
712 pr_debug("XCOPY: Built READ_16: LBA: %llu Sectors: %u Length: %u\n",
713 (unsigned long long)src_lba, src_sectors, length);
714
715 transport_init_se_cmd(se_cmd, &xcopy_pt_tfo, NULL, length,
Nicholas Bellinger366bda192013-10-24 00:10:36 -0700716 DMA_FROM_DEVICE, 0, &xpt_cmd->sense_buffer[0]);
Nicholas Bellingercbf031f2013-08-20 15:38:55 -0700717 xop->src_pt_cmd = xpt_cmd;
718
719 rc = target_xcopy_setup_pt_cmd(xpt_cmd, xop, src_dev, &cdb[0],
720 remote_port, true);
721 if (rc < 0) {
722 transport_generic_free_cmd(se_cmd, 0);
723 return rc;
724 }
725
726 xop->xop_data_sg = se_cmd->t_data_sg;
727 xop->xop_data_nents = se_cmd->t_data_nents;
728 pr_debug("XCOPY-READ: Saved xop->xop_data_sg: %p, num: %u for READ"
729 " memory\n", xop->xop_data_sg, xop->xop_data_nents);
730
731 rc = target_xcopy_issue_pt_cmd(xpt_cmd);
732 if (rc < 0) {
733 transport_generic_free_cmd(se_cmd, 0);
734 return rc;
735 }
736 /*
737 * Clear off the allocated t_data_sg, that has been saved for
738 * zero-copy WRITE submission reuse in struct xcopy_op..
739 */
740 se_cmd->t_data_sg = NULL;
741 se_cmd->t_data_nents = 0;
742
743 return 0;
744}
745
746static int target_xcopy_write_destination(
747 struct se_cmd *ec_cmd,
748 struct xcopy_op *xop,
749 struct se_device *dst_dev,
750 sector_t dst_lba,
751 u32 dst_sectors)
752{
753 struct xcopy_pt_cmd *xpt_cmd;
754 struct se_cmd *se_cmd;
755 u32 length = (dst_sectors * dst_dev->dev_attrib.block_size);
756 int rc;
757 unsigned char cdb[16];
758 bool remote_port = (xop->op_origin == XCOL_SOURCE_RECV_OP);
759
760 xpt_cmd = kzalloc(sizeof(struct xcopy_pt_cmd), GFP_KERNEL);
761 if (!xpt_cmd) {
762 pr_err("Unable to allocate xcopy_pt_cmd\n");
763 return -ENOMEM;
764 }
765 init_completion(&xpt_cmd->xpt_passthrough_sem);
766 se_cmd = &xpt_cmd->se_cmd;
767
768 memset(&cdb[0], 0, 16);
769 cdb[0] = WRITE_16;
770 put_unaligned_be64(dst_lba, &cdb[2]);
771 put_unaligned_be32(dst_sectors, &cdb[10]);
772 pr_debug("XCOPY: Built WRITE_16: LBA: %llu Sectors: %u Length: %u\n",
773 (unsigned long long)dst_lba, dst_sectors, length);
774
775 transport_init_se_cmd(se_cmd, &xcopy_pt_tfo, NULL, length,
Nicholas Bellinger366bda192013-10-24 00:10:36 -0700776 DMA_TO_DEVICE, 0, &xpt_cmd->sense_buffer[0]);
Nicholas Bellingercbf031f2013-08-20 15:38:55 -0700777 xop->dst_pt_cmd = xpt_cmd;
778
779 rc = target_xcopy_setup_pt_cmd(xpt_cmd, xop, dst_dev, &cdb[0],
780 remote_port, false);
781 if (rc < 0) {
782 struct se_cmd *src_cmd = &xop->src_pt_cmd->se_cmd;
783 /*
784 * If the failure happened before the t_mem_list hand-off in
785 * target_xcopy_setup_pt_cmd(), Reset memory + clear flag so that
786 * core releases this memory on error during X-COPY WRITE I/O.
787 */
788 src_cmd->se_cmd_flags &= ~SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC;
789 src_cmd->t_data_sg = xop->xop_data_sg;
790 src_cmd->t_data_nents = xop->xop_data_nents;
791
792 transport_generic_free_cmd(se_cmd, 0);
793 return rc;
794 }
795
796 rc = target_xcopy_issue_pt_cmd(xpt_cmd);
797 if (rc < 0) {
798 se_cmd->se_cmd_flags &= ~SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC;
799 transport_generic_free_cmd(se_cmd, 0);
800 return rc;
801 }
802
803 return 0;
804}
805
806static void target_xcopy_do_work(struct work_struct *work)
807{
808 struct xcopy_op *xop = container_of(work, struct xcopy_op, xop_work);
809 struct se_device *src_dev = xop->src_dev, *dst_dev = xop->dst_dev;
810 struct se_cmd *ec_cmd = xop->xop_se_cmd;
811 sector_t src_lba = xop->src_lba, dst_lba = xop->dst_lba, end_lba;
812 unsigned int max_sectors;
813 int rc;
814 unsigned short nolb = xop->nolb, cur_nolb, max_nolb, copied_nolb = 0;
815
816 end_lba = src_lba + nolb;
817 /*
818 * Break up XCOPY I/O into hw_max_sectors sized I/O based on the
819 * smallest max_sectors between src_dev + dev_dev, or
820 */
821 max_sectors = min(src_dev->dev_attrib.hw_max_sectors,
822 dst_dev->dev_attrib.hw_max_sectors);
823 max_sectors = min_t(u32, max_sectors, XCOPY_MAX_SECTORS);
824
825 max_nolb = min_t(u16, max_sectors, ((u16)(~0U)));
826
827 pr_debug("target_xcopy_do_work: nolb: %hu, max_nolb: %hu end_lba: %llu\n",
828 nolb, max_nolb, (unsigned long long)end_lba);
829 pr_debug("target_xcopy_do_work: Starting src_lba: %llu, dst_lba: %llu\n",
830 (unsigned long long)src_lba, (unsigned long long)dst_lba);
831
832 while (src_lba < end_lba) {
833 cur_nolb = min(nolb, max_nolb);
834
835 pr_debug("target_xcopy_do_work: Calling read src_dev: %p src_lba: %llu,"
836 " cur_nolb: %hu\n", src_dev, (unsigned long long)src_lba, cur_nolb);
837
838 rc = target_xcopy_read_source(ec_cmd, xop, src_dev, src_lba, cur_nolb);
839 if (rc < 0)
840 goto out;
841
842 src_lba += cur_nolb;
843 pr_debug("target_xcopy_do_work: Incremented READ src_lba to %llu\n",
844 (unsigned long long)src_lba);
845
846 pr_debug("target_xcopy_do_work: Calling write dst_dev: %p dst_lba: %llu,"
847 " cur_nolb: %hu\n", dst_dev, (unsigned long long)dst_lba, cur_nolb);
848
849 rc = target_xcopy_write_destination(ec_cmd, xop, dst_dev,
850 dst_lba, cur_nolb);
851 if (rc < 0) {
852 transport_generic_free_cmd(&xop->src_pt_cmd->se_cmd, 0);
853 goto out;
854 }
855
856 dst_lba += cur_nolb;
857 pr_debug("target_xcopy_do_work: Incremented WRITE dst_lba to %llu\n",
858 (unsigned long long)dst_lba);
859
860 copied_nolb += cur_nolb;
861 nolb -= cur_nolb;
862
863 transport_generic_free_cmd(&xop->src_pt_cmd->se_cmd, 0);
864 xop->dst_pt_cmd->se_cmd.se_cmd_flags &= ~SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC;
865
866 transport_generic_free_cmd(&xop->dst_pt_cmd->se_cmd, 0);
867 }
868
869 xcopy_pt_undepend_remotedev(xop);
870 kfree(xop);
871
872 pr_debug("target_xcopy_do_work: Final src_lba: %llu, dst_lba: %llu\n",
873 (unsigned long long)src_lba, (unsigned long long)dst_lba);
874 pr_debug("target_xcopy_do_work: Blocks copied: %hu, Bytes Copied: %u\n",
875 copied_nolb, copied_nolb * dst_dev->dev_attrib.block_size);
876
877 pr_debug("target_xcopy_do_work: Setting X-COPY GOOD status -> sending response\n");
878 target_complete_cmd(ec_cmd, SAM_STAT_GOOD);
879 return;
880
881out:
882 xcopy_pt_undepend_remotedev(xop);
883 kfree(xop);
884
885 pr_warn("target_xcopy_do_work: Setting X-COPY CHECK_CONDITION -> sending response\n");
886 ec_cmd->scsi_status = SAM_STAT_CHECK_CONDITION;
887 target_complete_cmd(ec_cmd, SAM_STAT_CHECK_CONDITION);
888}
889
890sense_reason_t target_do_xcopy(struct se_cmd *se_cmd)
891{
Nicholas Bellingeracb3f262013-10-07 18:05:14 -0700892 struct se_device *dev = se_cmd->se_dev;
Nicholas Bellingercbf031f2013-08-20 15:38:55 -0700893 struct xcopy_op *xop = NULL;
894 unsigned char *p = NULL, *seg_desc;
895 unsigned int list_id, list_id_usage, sdll, inline_dl, sa;
Nicholas Bellinger48502dd2013-10-24 00:27:00 -0700896 sense_reason_t ret = TCM_INVALID_PARAMETER_LIST;
Nicholas Bellingercbf031f2013-08-20 15:38:55 -0700897 int rc;
898 unsigned short tdll;
899
Nicholas Bellingeracb3f262013-10-07 18:05:14 -0700900 if (!dev->dev_attrib.emulate_3pc) {
901 pr_err("EXTENDED_COPY operation explicitly disabled\n");
902 return TCM_UNSUPPORTED_SCSI_OPCODE;
903 }
904
Nicholas Bellingercbf031f2013-08-20 15:38:55 -0700905 sa = se_cmd->t_task_cdb[1] & 0x1f;
906 if (sa != 0x00) {
907 pr_err("EXTENDED_COPY(LID4) not supported\n");
908 return TCM_UNSUPPORTED_SCSI_OPCODE;
909 }
910
Nicholas Bellinger934a1382013-10-07 15:20:07 -0700911 xop = kzalloc(sizeof(struct xcopy_op), GFP_KERNEL);
912 if (!xop) {
913 pr_err("Unable to allocate xcopy_op\n");
914 return TCM_OUT_OF_RESOURCES;
915 }
916 xop->xop_se_cmd = se_cmd;
917
Nicholas Bellingercbf031f2013-08-20 15:38:55 -0700918 p = transport_kmap_data_sg(se_cmd);
919 if (!p) {
920 pr_err("transport_kmap_data_sg() failed in target_do_xcopy\n");
Nicholas Bellinger934a1382013-10-07 15:20:07 -0700921 kfree(xop);
Nicholas Bellingercbf031f2013-08-20 15:38:55 -0700922 return TCM_OUT_OF_RESOURCES;
923 }
924
925 list_id = p[0];
Nicholas Bellinger3f7a46c2013-10-07 15:22:15 -0700926 list_id_usage = (p[1] & 0x18) >> 3;
927
Nicholas Bellingercbf031f2013-08-20 15:38:55 -0700928 /*
929 * Determine TARGET DESCRIPTOR LIST LENGTH + SEGMENT DESCRIPTOR LIST LENGTH
930 */
931 tdll = get_unaligned_be16(&p[2]);
932 sdll = get_unaligned_be32(&p[8]);
933
934 inline_dl = get_unaligned_be32(&p[12]);
935 if (inline_dl != 0) {
936 pr_err("XCOPY with non zero inline data length\n");
937 goto out;
938 }
939
Nicholas Bellingercbf031f2013-08-20 15:38:55 -0700940 pr_debug("Processing XCOPY with list_id: 0x%02x list_id_usage: 0x%02x"
941 " tdll: %hu sdll: %u inline_dl: %u\n", list_id, list_id_usage,
942 tdll, sdll, inline_dl);
943
944 rc = target_xcopy_parse_target_descriptors(se_cmd, xop, &p[16], tdll);
945 if (rc <= 0)
946 goto out;
947
Nicholas Bellinger48502dd2013-10-24 00:27:00 -0700948 if (xop->src_dev->dev_attrib.block_size !=
949 xop->dst_dev->dev_attrib.block_size) {
950 pr_err("XCOPY: Non matching src_dev block_size: %u + dst_dev"
951 " block_size: %u currently unsupported\n",
952 xop->src_dev->dev_attrib.block_size,
953 xop->dst_dev->dev_attrib.block_size);
954 xcopy_pt_undepend_remotedev(xop);
955 ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
956 goto out;
957 }
958
Nicholas Bellingercbf031f2013-08-20 15:38:55 -0700959 pr_debug("XCOPY: Processed %d target descriptors, length: %u\n", rc,
960 rc * XCOPY_TARGET_DESC_LEN);
961 seg_desc = &p[16];
962 seg_desc += (rc * XCOPY_TARGET_DESC_LEN);
963
964 rc = target_xcopy_parse_segment_descriptors(se_cmd, xop, seg_desc, sdll);
965 if (rc <= 0) {
966 xcopy_pt_undepend_remotedev(xop);
967 goto out;
968 }
969 transport_kunmap_data_sg(se_cmd);
970
971 pr_debug("XCOPY: Processed %d segment descriptors, length: %u\n", rc,
972 rc * XCOPY_SEGMENT_DESC_LEN);
973 INIT_WORK(&xop->xop_work, target_xcopy_do_work);
974 queue_work(xcopy_wq, &xop->xop_work);
975 return TCM_NO_SENSE;
976
977out:
978 if (p)
979 transport_kunmap_data_sg(se_cmd);
980 kfree(xop);
Nicholas Bellinger48502dd2013-10-24 00:27:00 -0700981 return ret;
Nicholas Bellingercbf031f2013-08-20 15:38:55 -0700982}
983
984static sense_reason_t target_rcr_operating_parameters(struct se_cmd *se_cmd)
985{
986 unsigned char *p;
987
988 p = transport_kmap_data_sg(se_cmd);
989 if (!p) {
990 pr_err("transport_kmap_data_sg failed in"
991 " target_rcr_operating_parameters\n");
992 return TCM_OUT_OF_RESOURCES;
993 }
994
995 if (se_cmd->data_length < 54) {
996 pr_err("Receive Copy Results Op Parameters length"
997 " too small: %u\n", se_cmd->data_length);
998 transport_kunmap_data_sg(se_cmd);
999 return TCM_INVALID_CDB_FIELD;
1000 }
1001 /*
1002 * Set SNLID=1 (Supports no List ID)
1003 */
1004 p[4] = 0x1;
1005 /*
1006 * MAXIMUM TARGET DESCRIPTOR COUNT
1007 */
1008 put_unaligned_be16(RCR_OP_MAX_TARGET_DESC_COUNT, &p[8]);
1009 /*
1010 * MAXIMUM SEGMENT DESCRIPTOR COUNT
1011 */
1012 put_unaligned_be16(RCR_OP_MAX_SG_DESC_COUNT, &p[10]);
1013 /*
1014 * MAXIMUM DESCRIPTOR LIST LENGTH
1015 */
1016 put_unaligned_be32(RCR_OP_MAX_DESC_LIST_LEN, &p[12]);
1017 /*
1018 * MAXIMUM SEGMENT LENGTH
1019 */
1020 put_unaligned_be32(RCR_OP_MAX_SEGMENT_LEN, &p[16]);
1021 /*
1022 * MAXIMUM INLINE DATA LENGTH for SA 0x04 (NOT SUPPORTED)
1023 */
1024 put_unaligned_be32(0x0, &p[20]);
1025 /*
1026 * HELD DATA LIMIT
1027 */
1028 put_unaligned_be32(0x0, &p[24]);
1029 /*
1030 * MAXIMUM STREAM DEVICE TRANSFER SIZE
1031 */
1032 put_unaligned_be32(0x0, &p[28]);
1033 /*
1034 * TOTAL CONCURRENT COPIES
1035 */
1036 put_unaligned_be16(RCR_OP_TOTAL_CONCURR_COPIES, &p[34]);
1037 /*
1038 * MAXIMUM CONCURRENT COPIES
1039 */
1040 p[36] = RCR_OP_MAX_CONCURR_COPIES;
1041 /*
1042 * DATA SEGMENT GRANULARITY (log 2)
1043 */
1044 p[37] = RCR_OP_DATA_SEG_GRAN_LOG2;
1045 /*
1046 * INLINE DATA GRANULARITY log 2)
1047 */
1048 p[38] = RCR_OP_INLINE_DATA_GRAN_LOG2;
1049 /*
1050 * HELD DATA GRANULARITY
1051 */
1052 p[39] = RCR_OP_HELD_DATA_GRAN_LOG2;
1053 /*
1054 * IMPLEMENTED DESCRIPTOR LIST LENGTH
1055 */
1056 p[43] = 0x2;
1057 /*
1058 * List of implemented descriptor type codes (ordered)
1059 */
1060 p[44] = 0x02; /* Copy Block to Block device */
1061 p[45] = 0xe4; /* Identification descriptor target descriptor */
1062
1063 /*
1064 * AVAILABLE DATA (n-3)
1065 */
1066 put_unaligned_be32(42, &p[0]);
1067
1068 transport_kunmap_data_sg(se_cmd);
1069 target_complete_cmd(se_cmd, GOOD);
1070
1071 return TCM_NO_SENSE;
1072}
1073
1074sense_reason_t target_do_receive_copy_results(struct se_cmd *se_cmd)
1075{
1076 unsigned char *cdb = &se_cmd->t_task_cdb[0];
1077 int sa = (cdb[1] & 0x1f), list_id = cdb[2];
1078 sense_reason_t rc = TCM_NO_SENSE;
1079
1080 pr_debug("Entering target_do_receive_copy_results: SA: 0x%02x, List ID:"
1081 " 0x%02x, AL: %u\n", sa, list_id, se_cmd->data_length);
1082
1083 if (list_id != 0) {
1084 pr_err("Receive Copy Results with non zero list identifier"
1085 " not supported\n");
1086 return TCM_INVALID_CDB_FIELD;
1087 }
1088
1089 switch (sa) {
1090 case RCR_SA_OPERATING_PARAMETERS:
1091 rc = target_rcr_operating_parameters(se_cmd);
1092 break;
1093 case RCR_SA_COPY_STATUS:
1094 case RCR_SA_RECEIVE_DATA:
1095 case RCR_SA_FAILED_SEGMENT_DETAILS:
1096 default:
1097 pr_err("Unsupported SA for receive copy results: 0x%02x\n", sa);
1098 return TCM_INVALID_CDB_FIELD;
1099 }
1100
1101 return rc;
1102}