blob: 5545619d3045c0b1acafb022e2ec819d2827bc73 [file] [log] [blame]
Nicholas Bellingercbf031f2013-08-20 15:38:55 -07001/*******************************************************************************
2 * Filename: target_core_xcopy.c
3 *
4 * This file contains support for SPC-4 Extended-Copy offload with generic
5 * TCM backends.
6 *
7 * Copyright (c) 2011-2013 Datera, Inc. All rights reserved.
8 *
9 * Author:
10 * Nicholas A. Bellinger <nab@daterainc.com>
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 ******************************************************************************/
23
Nicholas Bellingercbf031f2013-08-20 15:38:55 -070024#include <linux/slab.h>
25#include <linux/spinlock.h>
26#include <linux/list.h>
27#include <linux/configfs.h>
28#include <scsi/scsi.h>
29#include <scsi/scsi_cmnd.h>
30#include <asm/unaligned.h>
31
32#include <target/target_core_base.h>
33#include <target/target_core_backend.h>
34#include <target/target_core_fabric.h>
35#include <target/target_core_configfs.h>
36
Christoph Hellwigb13876d2015-03-26 12:27:31 +010037#include "target_core_internal.h"
Nicholas Bellingercbf031f2013-08-20 15:38:55 -070038#include "target_core_pr.h"
39#include "target_core_ua.h"
40#include "target_core_xcopy.h"
41
42static struct workqueue_struct *xcopy_wq = NULL;
Nicholas Bellingercbf031f2013-08-20 15:38:55 -070043
44static int target_xcopy_gen_naa_ieee(struct se_device *dev, unsigned char *buf)
45{
46 int off = 0;
47
48 buf[off++] = (0x6 << 4);
49 buf[off++] = 0x01;
50 buf[off++] = 0x40;
51 buf[off] = (0x5 << 4);
52
53 spc_parse_naa_6h_vendor_specific(dev, &buf[off]);
54 return 0;
55}
56
57static int target_xcopy_locate_se_dev_e4(struct se_cmd *se_cmd, struct xcopy_op *xop,
58 bool src)
59{
60 struct se_device *se_dev;
Nicholas Bellingercbf031f2013-08-20 15:38:55 -070061 unsigned char tmp_dev_wwn[XCOPY_NAA_IEEE_REGEX_LEN], *dev_wwn;
62 int rc;
63
Christophe Vu-Brugier0bcc2972014-06-06 17:15:16 +020064 if (src)
Nicholas Bellingercbf031f2013-08-20 15:38:55 -070065 dev_wwn = &xop->dst_tid_wwn[0];
66 else
67 dev_wwn = &xop->src_tid_wwn[0];
68
69 mutex_lock(&g_device_mutex);
70 list_for_each_entry(se_dev, &g_device_list, g_dev_node) {
71
Nicholas Bellingeracb3f262013-10-07 18:05:14 -070072 if (!se_dev->dev_attrib.emulate_3pc)
73 continue;
74
Nicholas Bellingercbf031f2013-08-20 15:38:55 -070075 memset(&tmp_dev_wwn[0], 0, XCOPY_NAA_IEEE_REGEX_LEN);
76 target_xcopy_gen_naa_ieee(se_dev, &tmp_dev_wwn[0]);
77
78 rc = memcmp(&tmp_dev_wwn[0], dev_wwn, XCOPY_NAA_IEEE_REGEX_LEN);
79 if (rc != 0)
80 continue;
81
Christophe Vu-Brugier0bcc2972014-06-06 17:15:16 +020082 if (src) {
Nicholas Bellingercbf031f2013-08-20 15:38:55 -070083 xop->dst_dev = se_dev;
84 pr_debug("XCOPY 0xe4: Setting xop->dst_dev: %p from located"
85 " se_dev\n", xop->dst_dev);
86 } else {
87 xop->src_dev = se_dev;
88 pr_debug("XCOPY 0xe4: Setting xop->src_dev: %p from located"
89 " se_dev\n", xop->src_dev);
90 }
91
Christoph Hellwigd588cf82015-05-03 08:50:52 +020092 rc = target_depend_item(&se_dev->dev_group.cg_item);
Nicholas Bellingercbf031f2013-08-20 15:38:55 -070093 if (rc != 0) {
94 pr_err("configfs_depend_item attempt failed:"
95 " %d for se_dev: %p\n", rc, se_dev);
96 mutex_unlock(&g_device_mutex);
97 return rc;
98 }
99
Christoph Hellwigd588cf82015-05-03 08:50:52 +0200100 pr_debug("Called configfs_depend_item for se_dev: %p"
101 " se_dev->se_dev_group: %p\n", se_dev,
Nicholas Bellingercbf031f2013-08-20 15:38:55 -0700102 &se_dev->dev_group);
103
104 mutex_unlock(&g_device_mutex);
105 return 0;
106 }
107 mutex_unlock(&g_device_mutex);
108
109 pr_err("Unable to locate 0xe4 descriptor for EXTENDED_COPY\n");
110 return -EINVAL;
111}
112
113static int target_xcopy_parse_tiddesc_e4(struct se_cmd *se_cmd, struct xcopy_op *xop,
114 unsigned char *p, bool src)
115{
116 unsigned char *desc = p;
117 unsigned short ript;
118 u8 desig_len;
119 /*
120 * Extract RELATIVE INITIATOR PORT IDENTIFIER
121 */
122 ript = get_unaligned_be16(&desc[2]);
123 pr_debug("XCOPY 0xe4: RELATIVE INITIATOR PORT IDENTIFIER: %hu\n", ript);
124 /*
125 * Check for supported code set, association, and designator type
126 */
127 if ((desc[4] & 0x0f) != 0x1) {
128 pr_err("XCOPY 0xe4: code set of non binary type not supported\n");
129 return -EINVAL;
130 }
131 if ((desc[5] & 0x30) != 0x00) {
132 pr_err("XCOPY 0xe4: association other than LUN not supported\n");
133 return -EINVAL;
134 }
135 if ((desc[5] & 0x0f) != 0x3) {
136 pr_err("XCOPY 0xe4: designator type unsupported: 0x%02x\n",
137 (desc[5] & 0x0f));
138 return -EINVAL;
139 }
140 /*
141 * Check for matching 16 byte length for NAA IEEE Registered Extended
142 * Assigned designator
143 */
144 desig_len = desc[7];
145 if (desig_len != 16) {
146 pr_err("XCOPY 0xe4: invalid desig_len: %d\n", (int)desig_len);
147 return -EINVAL;
148 }
149 pr_debug("XCOPY 0xe4: desig_len: %d\n", (int)desig_len);
150 /*
151 * Check for NAA IEEE Registered Extended Assigned header..
152 */
153 if ((desc[8] & 0xf0) != 0x60) {
154 pr_err("XCOPY 0xe4: Unsupported DESIGNATOR TYPE: 0x%02x\n",
155 (desc[8] & 0xf0));
156 return -EINVAL;
157 }
158
Christophe Vu-Brugier0bcc2972014-06-06 17:15:16 +0200159 if (src) {
Nicholas Bellingercbf031f2013-08-20 15:38:55 -0700160 memcpy(&xop->src_tid_wwn[0], &desc[8], XCOPY_NAA_IEEE_REGEX_LEN);
161 /*
162 * Determine if the source designator matches the local device
163 */
164 if (!memcmp(&xop->local_dev_wwn[0], &xop->src_tid_wwn[0],
165 XCOPY_NAA_IEEE_REGEX_LEN)) {
166 xop->op_origin = XCOL_SOURCE_RECV_OP;
167 xop->src_dev = se_cmd->se_dev;
168 pr_debug("XCOPY 0xe4: Set xop->src_dev %p from source"
169 " received xop\n", xop->src_dev);
170 }
171 } else {
172 memcpy(&xop->dst_tid_wwn[0], &desc[8], XCOPY_NAA_IEEE_REGEX_LEN);
173 /*
174 * Determine if the destination designator matches the local device
175 */
176 if (!memcmp(&xop->local_dev_wwn[0], &xop->dst_tid_wwn[0],
177 XCOPY_NAA_IEEE_REGEX_LEN)) {
178 xop->op_origin = XCOL_DEST_RECV_OP;
179 xop->dst_dev = se_cmd->se_dev;
180 pr_debug("XCOPY 0xe4: Set xop->dst_dev: %p from destination"
181 " received xop\n", xop->dst_dev);
182 }
183 }
184
185 return 0;
186}
187
188static int target_xcopy_parse_target_descriptors(struct se_cmd *se_cmd,
189 struct xcopy_op *xop, unsigned char *p,
190 unsigned short tdll)
191{
192 struct se_device *local_dev = se_cmd->se_dev;
193 unsigned char *desc = p;
194 int offset = tdll % XCOPY_TARGET_DESC_LEN, rc, ret = 0;
195 unsigned short start = 0;
196 bool src = true;
197
198 if (offset != 0) {
199 pr_err("XCOPY target descriptor list length is not"
200 " multiple of %d\n", XCOPY_TARGET_DESC_LEN);
201 return -EINVAL;
202 }
203 if (tdll > 64) {
204 pr_err("XCOPY target descriptor supports a maximum"
205 " two src/dest descriptors, tdll: %hu too large..\n", tdll);
206 return -EINVAL;
207 }
208 /*
209 * Generate an IEEE Registered Extended designator based upon the
210 * se_device the XCOPY was received upon..
211 */
212 memset(&xop->local_dev_wwn[0], 0, XCOPY_NAA_IEEE_REGEX_LEN);
213 target_xcopy_gen_naa_ieee(local_dev, &xop->local_dev_wwn[0]);
214
215 while (start < tdll) {
216 /*
217 * Check target descriptor identification with 0xE4 type with
218 * use VPD 0x83 WWPN matching ..
219 */
220 switch (desc[0]) {
221 case 0xe4:
222 rc = target_xcopy_parse_tiddesc_e4(se_cmd, xop,
223 &desc[0], src);
224 if (rc != 0)
225 goto out;
226 /*
227 * Assume target descriptors are in source -> destination order..
228 */
Christophe Vu-Brugier0bcc2972014-06-06 17:15:16 +0200229 if (src)
Nicholas Bellingercbf031f2013-08-20 15:38:55 -0700230 src = false;
231 else
232 src = true;
233 start += XCOPY_TARGET_DESC_LEN;
234 desc += XCOPY_TARGET_DESC_LEN;
235 ret++;
236 break;
237 default:
238 pr_err("XCOPY unsupported descriptor type code:"
239 " 0x%02x\n", desc[0]);
240 goto out;
241 }
242 }
243
244 if (xop->op_origin == XCOL_SOURCE_RECV_OP)
245 rc = target_xcopy_locate_se_dev_e4(se_cmd, xop, true);
246 else
247 rc = target_xcopy_locate_se_dev_e4(se_cmd, xop, false);
248
249 if (rc < 0)
250 goto out;
251
252 pr_debug("XCOPY TGT desc: Source dev: %p NAA IEEE WWN: 0x%16phN\n",
253 xop->src_dev, &xop->src_tid_wwn[0]);
254 pr_debug("XCOPY TGT desc: Dest dev: %p NAA IEEE WWN: 0x%16phN\n",
255 xop->dst_dev, &xop->dst_tid_wwn[0]);
256
257 return ret;
258
259out:
260 return -EINVAL;
261}
262
263static int target_xcopy_parse_segdesc_02(struct se_cmd *se_cmd, struct xcopy_op *xop,
264 unsigned char *p)
265{
266 unsigned char *desc = p;
267 int dc = (desc[1] & 0x02);
268 unsigned short desc_len;
269
270 desc_len = get_unaligned_be16(&desc[2]);
271 if (desc_len != 0x18) {
272 pr_err("XCOPY segment desc 0x02: Illegal desc_len:"
273 " %hu\n", desc_len);
274 return -EINVAL;
275 }
276
277 xop->stdi = get_unaligned_be16(&desc[4]);
278 xop->dtdi = get_unaligned_be16(&desc[6]);
279 pr_debug("XCOPY seg desc 0x02: desc_len: %hu stdi: %hu dtdi: %hu, DC: %d\n",
280 desc_len, xop->stdi, xop->dtdi, dc);
281
282 xop->nolb = get_unaligned_be16(&desc[10]);
283 xop->src_lba = get_unaligned_be64(&desc[12]);
284 xop->dst_lba = get_unaligned_be64(&desc[20]);
285 pr_debug("XCOPY seg desc 0x02: nolb: %hu src_lba: %llu dst_lba: %llu\n",
286 xop->nolb, (unsigned long long)xop->src_lba,
287 (unsigned long long)xop->dst_lba);
288
289 if (dc != 0) {
Nicholas Bellinger3e9e01d2013-09-18 12:33:42 -0700290 xop->dbl = (desc[29] & 0xff) << 16;
291 xop->dbl |= (desc[30] & 0xff) << 8;
Nicholas Bellingercbf031f2013-08-20 15:38:55 -0700292 xop->dbl |= desc[31] & 0xff;
293
294 pr_debug("XCOPY seg desc 0x02: DC=1 w/ dbl: %u\n", xop->dbl);
295 }
296 return 0;
297}
298
299static int target_xcopy_parse_segment_descriptors(struct se_cmd *se_cmd,
300 struct xcopy_op *xop, unsigned char *p,
301 unsigned int sdll)
302{
303 unsigned char *desc = p;
304 unsigned int start = 0;
305 int offset = sdll % XCOPY_SEGMENT_DESC_LEN, rc, ret = 0;
306
307 if (offset != 0) {
308 pr_err("XCOPY segment descriptor list length is not"
309 " multiple of %d\n", XCOPY_SEGMENT_DESC_LEN);
310 return -EINVAL;
311 }
312
313 while (start < sdll) {
314 /*
315 * Check segment descriptor type code for block -> block
316 */
317 switch (desc[0]) {
318 case 0x02:
319 rc = target_xcopy_parse_segdesc_02(se_cmd, xop, desc);
320 if (rc < 0)
321 goto out;
322
323 ret++;
324 start += XCOPY_SEGMENT_DESC_LEN;
325 desc += XCOPY_SEGMENT_DESC_LEN;
326 break;
327 default:
Masanari Iida6774def2014-11-05 22:26:48 +0900328 pr_err("XCOPY unsupported segment descriptor"
Nicholas Bellingercbf031f2013-08-20 15:38:55 -0700329 "type: 0x%02x\n", desc[0]);
330 goto out;
331 }
332 }
333
334 return ret;
335
336out:
337 return -EINVAL;
338}
339
340/*
341 * Start xcopy_pt ops
342 */
343
344struct xcopy_pt_cmd {
345 bool remote_port;
346 struct se_cmd se_cmd;
347 struct xcopy_op *xcopy_op;
348 struct completion xpt_passthrough_sem;
Nicholas Bellinger366bda192013-10-24 00:10:36 -0700349 unsigned char sense_buffer[TRANSPORT_SENSE_BUFFER];
Nicholas Bellingercbf031f2013-08-20 15:38:55 -0700350};
351
352static struct se_port xcopy_pt_port;
353static struct se_portal_group xcopy_pt_tpg;
354static struct se_session xcopy_pt_sess;
355static struct se_node_acl xcopy_pt_nacl;
356
357static char *xcopy_pt_get_fabric_name(void)
358{
359 return "xcopy-pt";
360}
361
Nicholas Bellingercbf031f2013-08-20 15:38:55 -0700362static int xcopy_pt_get_cmd_state(struct se_cmd *se_cmd)
363{
364 return 0;
365}
366
367static void xcopy_pt_undepend_remotedev(struct xcopy_op *xop)
368{
Nicholas Bellingercbf031f2013-08-20 15:38:55 -0700369 struct se_device *remote_dev;
370
371 if (xop->op_origin == XCOL_SOURCE_RECV_OP)
372 remote_dev = xop->dst_dev;
373 else
374 remote_dev = xop->src_dev;
375
Christoph Hellwigd588cf82015-05-03 08:50:52 +0200376 pr_debug("Calling configfs_undepend_item for"
Nicholas Bellingercbf031f2013-08-20 15:38:55 -0700377 " remote_dev: %p remote_dev->dev_group: %p\n",
Christoph Hellwigd588cf82015-05-03 08:50:52 +0200378 remote_dev, &remote_dev->dev_group.cg_item);
Nicholas Bellingercbf031f2013-08-20 15:38:55 -0700379
Christoph Hellwigd588cf82015-05-03 08:50:52 +0200380 target_undepend_item(&remote_dev->dev_group.cg_item);
Nicholas Bellingercbf031f2013-08-20 15:38:55 -0700381}
382
383static void xcopy_pt_release_cmd(struct se_cmd *se_cmd)
384{
385 struct xcopy_pt_cmd *xpt_cmd = container_of(se_cmd,
386 struct xcopy_pt_cmd, se_cmd);
387
Nicholas Bellingercbf031f2013-08-20 15:38:55 -0700388 kfree(xpt_cmd);
389}
390
391static int xcopy_pt_check_stop_free(struct se_cmd *se_cmd)
392{
393 struct xcopy_pt_cmd *xpt_cmd = container_of(se_cmd,
394 struct xcopy_pt_cmd, se_cmd);
395
396 complete(&xpt_cmd->xpt_passthrough_sem);
397 return 0;
398}
399
400static int xcopy_pt_write_pending(struct se_cmd *se_cmd)
401{
402 return 0;
403}
404
405static int xcopy_pt_write_pending_status(struct se_cmd *se_cmd)
406{
407 return 0;
408}
409
410static int xcopy_pt_queue_data_in(struct se_cmd *se_cmd)
411{
412 return 0;
413}
414
415static int xcopy_pt_queue_status(struct se_cmd *se_cmd)
416{
417 return 0;
418}
419
Christoph Hellwig9ac89282015-04-08 20:01:35 +0200420static const struct target_core_fabric_ops xcopy_pt_tfo = {
Nicholas Bellingercbf031f2013-08-20 15:38:55 -0700421 .get_fabric_name = xcopy_pt_get_fabric_name,
Nicholas Bellingercbf031f2013-08-20 15:38:55 -0700422 .get_cmd_state = xcopy_pt_get_cmd_state,
423 .release_cmd = xcopy_pt_release_cmd,
424 .check_stop_free = xcopy_pt_check_stop_free,
425 .write_pending = xcopy_pt_write_pending,
426 .write_pending_status = xcopy_pt_write_pending_status,
427 .queue_data_in = xcopy_pt_queue_data_in,
428 .queue_status = xcopy_pt_queue_status,
429};
430
431/*
432 * End xcopy_pt_ops
433 */
434
435int target_xcopy_setup_pt(void)
436{
437 xcopy_wq = alloc_workqueue("xcopy_wq", WQ_MEM_RECLAIM, 0);
438 if (!xcopy_wq) {
439 pr_err("Unable to allocate xcopy_wq\n");
440 return -ENOMEM;
441 }
442
443 memset(&xcopy_pt_port, 0, sizeof(struct se_port));
444 INIT_LIST_HEAD(&xcopy_pt_port.sep_alua_list);
445 INIT_LIST_HEAD(&xcopy_pt_port.sep_list);
446 mutex_init(&xcopy_pt_port.sep_tg_pt_md_mutex);
447
448 memset(&xcopy_pt_tpg, 0, sizeof(struct se_portal_group));
449 INIT_LIST_HEAD(&xcopy_pt_tpg.se_tpg_node);
450 INIT_LIST_HEAD(&xcopy_pt_tpg.acl_node_list);
451 INIT_LIST_HEAD(&xcopy_pt_tpg.tpg_sess_list);
452
453 xcopy_pt_port.sep_tpg = &xcopy_pt_tpg;
454 xcopy_pt_tpg.se_tpg_tfo = &xcopy_pt_tfo;
455
456 memset(&xcopy_pt_nacl, 0, sizeof(struct se_node_acl));
457 INIT_LIST_HEAD(&xcopy_pt_nacl.acl_list);
458 INIT_LIST_HEAD(&xcopy_pt_nacl.acl_sess_list);
459 memset(&xcopy_pt_sess, 0, sizeof(struct se_session));
460 INIT_LIST_HEAD(&xcopy_pt_sess.sess_list);
461 INIT_LIST_HEAD(&xcopy_pt_sess.sess_acl_list);
462
463 xcopy_pt_nacl.se_tpg = &xcopy_pt_tpg;
464 xcopy_pt_nacl.nacl_sess = &xcopy_pt_sess;
465
466 xcopy_pt_sess.se_tpg = &xcopy_pt_tpg;
467 xcopy_pt_sess.se_node_acl = &xcopy_pt_nacl;
468
469 return 0;
470}
471
472void target_xcopy_release_pt(void)
473{
474 if (xcopy_wq)
475 destroy_workqueue(xcopy_wq);
476}
477
478static void target_xcopy_setup_pt_port(
479 struct xcopy_pt_cmd *xpt_cmd,
480 struct xcopy_op *xop,
481 bool remote_port)
482{
483 struct se_cmd *ec_cmd = xop->xop_se_cmd;
484 struct se_cmd *pt_cmd = &xpt_cmd->se_cmd;
485
486 if (xop->op_origin == XCOL_SOURCE_RECV_OP) {
487 /*
488 * Honor destination port reservations for X-COPY PUSH emulation
489 * when CDB is received on local source port, and READs blocks to
490 * WRITE on remote destination port.
491 */
492 if (remote_port) {
493 xpt_cmd->remote_port = remote_port;
494 pt_cmd->se_lun->lun_sep = &xcopy_pt_port;
495 pr_debug("Setup emulated remote DEST xcopy_pt_port: %p to"
496 " cmd->se_lun->lun_sep for X-COPY data PUSH\n",
497 pt_cmd->se_lun->lun_sep);
498 } else {
499 pt_cmd->se_lun = ec_cmd->se_lun;
500 pt_cmd->se_dev = ec_cmd->se_dev;
501
502 pr_debug("Honoring local SRC port from ec_cmd->se_dev:"
503 " %p\n", pt_cmd->se_dev);
504 pt_cmd->se_lun = ec_cmd->se_lun;
505 pr_debug("Honoring local SRC port from ec_cmd->se_lun: %p\n",
506 pt_cmd->se_lun);
507 }
508 } else {
509 /*
510 * Honor source port reservation for X-COPY PULL emulation
511 * when CDB is received on local desintation port, and READs
512 * blocks from the remote source port to WRITE on local
513 * destination port.
514 */
515 if (remote_port) {
516 xpt_cmd->remote_port = remote_port;
517 pt_cmd->se_lun->lun_sep = &xcopy_pt_port;
518 pr_debug("Setup emulated remote SRC xcopy_pt_port: %p to"
519 " cmd->se_lun->lun_sep for X-COPY data PULL\n",
520 pt_cmd->se_lun->lun_sep);
521 } else {
522 pt_cmd->se_lun = ec_cmd->se_lun;
523 pt_cmd->se_dev = ec_cmd->se_dev;
524
525 pr_debug("Honoring local DST port from ec_cmd->se_dev:"
526 " %p\n", pt_cmd->se_dev);
527 pt_cmd->se_lun = ec_cmd->se_lun;
528 pr_debug("Honoring local DST port from ec_cmd->se_lun: %p\n",
529 pt_cmd->se_lun);
530 }
531 }
532}
533
Christoph Hellwig2c336e32015-04-07 19:11:17 +0200534static void target_xcopy_init_pt_lun(struct se_device *se_dev,
535 struct se_cmd *pt_cmd, bool remote_port)
Nicholas Bellingercbf031f2013-08-20 15:38:55 -0700536{
537 /*
538 * Don't allocate + init an pt_cmd->se_lun if honoring local port for
539 * reservations. The pt_cmd->se_lun pointer will be setup from within
540 * target_xcopy_setup_pt_port()
541 */
Christoph Hellwig2c336e32015-04-07 19:11:17 +0200542 if (remote_port) {
543 pr_debug("Setup emulated se_dev: %p from se_dev\n",
544 pt_cmd->se_dev);
545 pt_cmd->se_lun = &se_dev->xcopy_lun;
546 pt_cmd->se_dev = se_dev;
Nicholas Bellingercbf031f2013-08-20 15:38:55 -0700547 }
548
Christoph Hellwigc3d0a7c2015-04-07 19:11:16 +0200549 pt_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
Nicholas Bellingercbf031f2013-08-20 15:38:55 -0700550}
551
552static int target_xcopy_setup_pt_cmd(
553 struct xcopy_pt_cmd *xpt_cmd,
554 struct xcopy_op *xop,
555 struct se_device *se_dev,
556 unsigned char *cdb,
557 bool remote_port,
558 bool alloc_mem)
559{
560 struct se_cmd *cmd = &xpt_cmd->se_cmd;
561 sense_reason_t sense_rc;
562 int ret = 0, rc;
563 /*
564 * Setup LUN+port to honor reservations based upon xop->op_origin for
565 * X-COPY PUSH or X-COPY PULL based upon where the CDB was received.
566 */
Christoph Hellwig2c336e32015-04-07 19:11:17 +0200567 target_xcopy_init_pt_lun(se_dev, cmd, remote_port);
568
Nicholas Bellingercbf031f2013-08-20 15:38:55 -0700569 xpt_cmd->xcopy_op = xop;
570 target_xcopy_setup_pt_port(xpt_cmd, xop, remote_port);
571
Bart Van Assche649ee052015-04-14 13:26:44 +0200572 cmd->tag = 0;
Nicholas Bellingercbf031f2013-08-20 15:38:55 -0700573 sense_rc = target_setup_cmd_from_cdb(cmd, cdb);
574 if (sense_rc) {
575 ret = -EINVAL;
576 goto out;
577 }
578
579 if (alloc_mem) {
580 rc = target_alloc_sgl(&cmd->t_data_sg, &cmd->t_data_nents,
581 cmd->data_length, false);
582 if (rc < 0) {
583 ret = rc;
584 goto out;
585 }
586 /*
587 * Set this bit so that transport_free_pages() allows the
588 * caller to release SGLs + physical memory allocated by
589 * transport_generic_get_mem()..
590 */
591 cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC;
592 } else {
593 /*
594 * Here the previously allocated SGLs for the internal READ
595 * are mapped zero-copy to the internal WRITE.
596 */
597 sense_rc = transport_generic_map_mem_to_cmd(cmd,
598 xop->xop_data_sg, xop->xop_data_nents,
599 NULL, 0);
600 if (sense_rc) {
601 ret = -EINVAL;
602 goto out;
603 }
604
605 pr_debug("Setup PASSTHROUGH_NOALLOC t_data_sg: %p t_data_nents:"
606 " %u\n", cmd->t_data_sg, cmd->t_data_nents);
607 }
608
609 return 0;
610
611out:
Nicholas Bellingercbf031f2013-08-20 15:38:55 -0700612 return ret;
613}
614
615static int target_xcopy_issue_pt_cmd(struct xcopy_pt_cmd *xpt_cmd)
616{
617 struct se_cmd *se_cmd = &xpt_cmd->se_cmd;
618 sense_reason_t sense_rc;
619
620 sense_rc = transport_generic_new_cmd(se_cmd);
621 if (sense_rc)
622 return -EINVAL;
623
624 if (se_cmd->data_direction == DMA_TO_DEVICE)
625 target_execute_cmd(se_cmd);
626
627 wait_for_completion_interruptible(&xpt_cmd->xpt_passthrough_sem);
628
629 pr_debug("target_xcopy_issue_pt_cmd(): SCSI status: 0x%02x\n",
630 se_cmd->scsi_status);
Nicholas Bellinger8a955d62013-10-24 00:15:27 -0700631
632 return (se_cmd->scsi_status) ? -EINVAL : 0;
Nicholas Bellingercbf031f2013-08-20 15:38:55 -0700633}
634
635static int target_xcopy_read_source(
636 struct se_cmd *ec_cmd,
637 struct xcopy_op *xop,
638 struct se_device *src_dev,
639 sector_t src_lba,
640 u32 src_sectors)
641{
642 struct xcopy_pt_cmd *xpt_cmd;
643 struct se_cmd *se_cmd;
644 u32 length = (src_sectors * src_dev->dev_attrib.block_size);
645 int rc;
646 unsigned char cdb[16];
647 bool remote_port = (xop->op_origin == XCOL_DEST_RECV_OP);
648
649 xpt_cmd = kzalloc(sizeof(struct xcopy_pt_cmd), GFP_KERNEL);
650 if (!xpt_cmd) {
651 pr_err("Unable to allocate xcopy_pt_cmd\n");
652 return -ENOMEM;
653 }
654 init_completion(&xpt_cmd->xpt_passthrough_sem);
655 se_cmd = &xpt_cmd->se_cmd;
656
657 memset(&cdb[0], 0, 16);
658 cdb[0] = READ_16;
659 put_unaligned_be64(src_lba, &cdb[2]);
660 put_unaligned_be32(src_sectors, &cdb[10]);
661 pr_debug("XCOPY: Built READ_16: LBA: %llu Sectors: %u Length: %u\n",
662 (unsigned long long)src_lba, src_sectors, length);
663
664 transport_init_se_cmd(se_cmd, &xcopy_pt_tfo, NULL, length,
Nicholas Bellinger366bda192013-10-24 00:10:36 -0700665 DMA_FROM_DEVICE, 0, &xpt_cmd->sense_buffer[0]);
Nicholas Bellingercbf031f2013-08-20 15:38:55 -0700666 xop->src_pt_cmd = xpt_cmd;
667
668 rc = target_xcopy_setup_pt_cmd(xpt_cmd, xop, src_dev, &cdb[0],
669 remote_port, true);
670 if (rc < 0) {
671 transport_generic_free_cmd(se_cmd, 0);
672 return rc;
673 }
674
675 xop->xop_data_sg = se_cmd->t_data_sg;
676 xop->xop_data_nents = se_cmd->t_data_nents;
677 pr_debug("XCOPY-READ: Saved xop->xop_data_sg: %p, num: %u for READ"
678 " memory\n", xop->xop_data_sg, xop->xop_data_nents);
679
680 rc = target_xcopy_issue_pt_cmd(xpt_cmd);
681 if (rc < 0) {
682 transport_generic_free_cmd(se_cmd, 0);
683 return rc;
684 }
685 /*
686 * Clear off the allocated t_data_sg, that has been saved for
687 * zero-copy WRITE submission reuse in struct xcopy_op..
688 */
689 se_cmd->t_data_sg = NULL;
690 se_cmd->t_data_nents = 0;
691
692 return 0;
693}
694
695static int target_xcopy_write_destination(
696 struct se_cmd *ec_cmd,
697 struct xcopy_op *xop,
698 struct se_device *dst_dev,
699 sector_t dst_lba,
700 u32 dst_sectors)
701{
702 struct xcopy_pt_cmd *xpt_cmd;
703 struct se_cmd *se_cmd;
704 u32 length = (dst_sectors * dst_dev->dev_attrib.block_size);
705 int rc;
706 unsigned char cdb[16];
707 bool remote_port = (xop->op_origin == XCOL_SOURCE_RECV_OP);
708
709 xpt_cmd = kzalloc(sizeof(struct xcopy_pt_cmd), GFP_KERNEL);
710 if (!xpt_cmd) {
711 pr_err("Unable to allocate xcopy_pt_cmd\n");
712 return -ENOMEM;
713 }
714 init_completion(&xpt_cmd->xpt_passthrough_sem);
715 se_cmd = &xpt_cmd->se_cmd;
716
717 memset(&cdb[0], 0, 16);
718 cdb[0] = WRITE_16;
719 put_unaligned_be64(dst_lba, &cdb[2]);
720 put_unaligned_be32(dst_sectors, &cdb[10]);
721 pr_debug("XCOPY: Built WRITE_16: LBA: %llu Sectors: %u Length: %u\n",
722 (unsigned long long)dst_lba, dst_sectors, length);
723
724 transport_init_se_cmd(se_cmd, &xcopy_pt_tfo, NULL, length,
Nicholas Bellinger366bda192013-10-24 00:10:36 -0700725 DMA_TO_DEVICE, 0, &xpt_cmd->sense_buffer[0]);
Nicholas Bellingercbf031f2013-08-20 15:38:55 -0700726 xop->dst_pt_cmd = xpt_cmd;
727
728 rc = target_xcopy_setup_pt_cmd(xpt_cmd, xop, dst_dev, &cdb[0],
729 remote_port, false);
730 if (rc < 0) {
731 struct se_cmd *src_cmd = &xop->src_pt_cmd->se_cmd;
732 /*
733 * If the failure happened before the t_mem_list hand-off in
734 * target_xcopy_setup_pt_cmd(), Reset memory + clear flag so that
735 * core releases this memory on error during X-COPY WRITE I/O.
736 */
737 src_cmd->se_cmd_flags &= ~SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC;
738 src_cmd->t_data_sg = xop->xop_data_sg;
739 src_cmd->t_data_nents = xop->xop_data_nents;
740
741 transport_generic_free_cmd(se_cmd, 0);
742 return rc;
743 }
744
745 rc = target_xcopy_issue_pt_cmd(xpt_cmd);
746 if (rc < 0) {
747 se_cmd->se_cmd_flags &= ~SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC;
748 transport_generic_free_cmd(se_cmd, 0);
749 return rc;
750 }
751
752 return 0;
753}
754
755static void target_xcopy_do_work(struct work_struct *work)
756{
757 struct xcopy_op *xop = container_of(work, struct xcopy_op, xop_work);
758 struct se_device *src_dev = xop->src_dev, *dst_dev = xop->dst_dev;
759 struct se_cmd *ec_cmd = xop->xop_se_cmd;
760 sector_t src_lba = xop->src_lba, dst_lba = xop->dst_lba, end_lba;
761 unsigned int max_sectors;
762 int rc;
763 unsigned short nolb = xop->nolb, cur_nolb, max_nolb, copied_nolb = 0;
764
765 end_lba = src_lba + nolb;
766 /*
767 * Break up XCOPY I/O into hw_max_sectors sized I/O based on the
768 * smallest max_sectors between src_dev + dev_dev, or
769 */
770 max_sectors = min(src_dev->dev_attrib.hw_max_sectors,
771 dst_dev->dev_attrib.hw_max_sectors);
772 max_sectors = min_t(u32, max_sectors, XCOPY_MAX_SECTORS);
773
774 max_nolb = min_t(u16, max_sectors, ((u16)(~0U)));
775
776 pr_debug("target_xcopy_do_work: nolb: %hu, max_nolb: %hu end_lba: %llu\n",
777 nolb, max_nolb, (unsigned long long)end_lba);
778 pr_debug("target_xcopy_do_work: Starting src_lba: %llu, dst_lba: %llu\n",
779 (unsigned long long)src_lba, (unsigned long long)dst_lba);
780
781 while (src_lba < end_lba) {
782 cur_nolb = min(nolb, max_nolb);
783
784 pr_debug("target_xcopy_do_work: Calling read src_dev: %p src_lba: %llu,"
785 " cur_nolb: %hu\n", src_dev, (unsigned long long)src_lba, cur_nolb);
786
787 rc = target_xcopy_read_source(ec_cmd, xop, src_dev, src_lba, cur_nolb);
788 if (rc < 0)
789 goto out;
790
791 src_lba += cur_nolb;
792 pr_debug("target_xcopy_do_work: Incremented READ src_lba to %llu\n",
793 (unsigned long long)src_lba);
794
795 pr_debug("target_xcopy_do_work: Calling write dst_dev: %p dst_lba: %llu,"
796 " cur_nolb: %hu\n", dst_dev, (unsigned long long)dst_lba, cur_nolb);
797
798 rc = target_xcopy_write_destination(ec_cmd, xop, dst_dev,
799 dst_lba, cur_nolb);
800 if (rc < 0) {
801 transport_generic_free_cmd(&xop->src_pt_cmd->se_cmd, 0);
802 goto out;
803 }
804
805 dst_lba += cur_nolb;
806 pr_debug("target_xcopy_do_work: Incremented WRITE dst_lba to %llu\n",
807 (unsigned long long)dst_lba);
808
809 copied_nolb += cur_nolb;
810 nolb -= cur_nolb;
811
812 transport_generic_free_cmd(&xop->src_pt_cmd->se_cmd, 0);
813 xop->dst_pt_cmd->se_cmd.se_cmd_flags &= ~SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC;
814
815 transport_generic_free_cmd(&xop->dst_pt_cmd->se_cmd, 0);
816 }
817
818 xcopy_pt_undepend_remotedev(xop);
819 kfree(xop);
820
821 pr_debug("target_xcopy_do_work: Final src_lba: %llu, dst_lba: %llu\n",
822 (unsigned long long)src_lba, (unsigned long long)dst_lba);
823 pr_debug("target_xcopy_do_work: Blocks copied: %hu, Bytes Copied: %u\n",
824 copied_nolb, copied_nolb * dst_dev->dev_attrib.block_size);
825
826 pr_debug("target_xcopy_do_work: Setting X-COPY GOOD status -> sending response\n");
827 target_complete_cmd(ec_cmd, SAM_STAT_GOOD);
828 return;
829
830out:
831 xcopy_pt_undepend_remotedev(xop);
832 kfree(xop);
833
834 pr_warn("target_xcopy_do_work: Setting X-COPY CHECK_CONDITION -> sending response\n");
835 ec_cmd->scsi_status = SAM_STAT_CHECK_CONDITION;
836 target_complete_cmd(ec_cmd, SAM_STAT_CHECK_CONDITION);
837}
838
839sense_reason_t target_do_xcopy(struct se_cmd *se_cmd)
840{
Nicholas Bellingeracb3f262013-10-07 18:05:14 -0700841 struct se_device *dev = se_cmd->se_dev;
Nicholas Bellingercbf031f2013-08-20 15:38:55 -0700842 struct xcopy_op *xop = NULL;
843 unsigned char *p = NULL, *seg_desc;
844 unsigned int list_id, list_id_usage, sdll, inline_dl, sa;
Nicholas Bellinger48502dd2013-10-24 00:27:00 -0700845 sense_reason_t ret = TCM_INVALID_PARAMETER_LIST;
Nicholas Bellingercbf031f2013-08-20 15:38:55 -0700846 int rc;
847 unsigned short tdll;
848
Nicholas Bellingeracb3f262013-10-07 18:05:14 -0700849 if (!dev->dev_attrib.emulate_3pc) {
850 pr_err("EXTENDED_COPY operation explicitly disabled\n");
851 return TCM_UNSUPPORTED_SCSI_OPCODE;
852 }
853
Nicholas Bellingercbf031f2013-08-20 15:38:55 -0700854 sa = se_cmd->t_task_cdb[1] & 0x1f;
855 if (sa != 0x00) {
856 pr_err("EXTENDED_COPY(LID4) not supported\n");
857 return TCM_UNSUPPORTED_SCSI_OPCODE;
858 }
859
Nicholas Bellinger934a1382013-10-07 15:20:07 -0700860 xop = kzalloc(sizeof(struct xcopy_op), GFP_KERNEL);
861 if (!xop) {
862 pr_err("Unable to allocate xcopy_op\n");
863 return TCM_OUT_OF_RESOURCES;
864 }
865 xop->xop_se_cmd = se_cmd;
866
Nicholas Bellingercbf031f2013-08-20 15:38:55 -0700867 p = transport_kmap_data_sg(se_cmd);
868 if (!p) {
869 pr_err("transport_kmap_data_sg() failed in target_do_xcopy\n");
Nicholas Bellinger934a1382013-10-07 15:20:07 -0700870 kfree(xop);
Nicholas Bellingercbf031f2013-08-20 15:38:55 -0700871 return TCM_OUT_OF_RESOURCES;
872 }
873
874 list_id = p[0];
Nicholas Bellinger3f7a46c2013-10-07 15:22:15 -0700875 list_id_usage = (p[1] & 0x18) >> 3;
876
Nicholas Bellingercbf031f2013-08-20 15:38:55 -0700877 /*
878 * Determine TARGET DESCRIPTOR LIST LENGTH + SEGMENT DESCRIPTOR LIST LENGTH
879 */
880 tdll = get_unaligned_be16(&p[2]);
881 sdll = get_unaligned_be32(&p[8]);
882
883 inline_dl = get_unaligned_be32(&p[12]);
884 if (inline_dl != 0) {
885 pr_err("XCOPY with non zero inline data length\n");
886 goto out;
887 }
888
Nicholas Bellingercbf031f2013-08-20 15:38:55 -0700889 pr_debug("Processing XCOPY with list_id: 0x%02x list_id_usage: 0x%02x"
890 " tdll: %hu sdll: %u inline_dl: %u\n", list_id, list_id_usage,
891 tdll, sdll, inline_dl);
892
893 rc = target_xcopy_parse_target_descriptors(se_cmd, xop, &p[16], tdll);
894 if (rc <= 0)
895 goto out;
896
Nicholas Bellinger48502dd2013-10-24 00:27:00 -0700897 if (xop->src_dev->dev_attrib.block_size !=
898 xop->dst_dev->dev_attrib.block_size) {
899 pr_err("XCOPY: Non matching src_dev block_size: %u + dst_dev"
900 " block_size: %u currently unsupported\n",
901 xop->src_dev->dev_attrib.block_size,
902 xop->dst_dev->dev_attrib.block_size);
903 xcopy_pt_undepend_remotedev(xop);
904 ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
905 goto out;
906 }
907
Nicholas Bellingercbf031f2013-08-20 15:38:55 -0700908 pr_debug("XCOPY: Processed %d target descriptors, length: %u\n", rc,
909 rc * XCOPY_TARGET_DESC_LEN);
910 seg_desc = &p[16];
911 seg_desc += (rc * XCOPY_TARGET_DESC_LEN);
912
913 rc = target_xcopy_parse_segment_descriptors(se_cmd, xop, seg_desc, sdll);
914 if (rc <= 0) {
915 xcopy_pt_undepend_remotedev(xop);
916 goto out;
917 }
918 transport_kunmap_data_sg(se_cmd);
919
920 pr_debug("XCOPY: Processed %d segment descriptors, length: %u\n", rc,
921 rc * XCOPY_SEGMENT_DESC_LEN);
922 INIT_WORK(&xop->xop_work, target_xcopy_do_work);
923 queue_work(xcopy_wq, &xop->xop_work);
924 return TCM_NO_SENSE;
925
926out:
927 if (p)
928 transport_kunmap_data_sg(se_cmd);
929 kfree(xop);
Nicholas Bellinger48502dd2013-10-24 00:27:00 -0700930 return ret;
Nicholas Bellingercbf031f2013-08-20 15:38:55 -0700931}
932
933static sense_reason_t target_rcr_operating_parameters(struct se_cmd *se_cmd)
934{
935 unsigned char *p;
936
937 p = transport_kmap_data_sg(se_cmd);
938 if (!p) {
939 pr_err("transport_kmap_data_sg failed in"
940 " target_rcr_operating_parameters\n");
941 return TCM_OUT_OF_RESOURCES;
942 }
943
944 if (se_cmd->data_length < 54) {
945 pr_err("Receive Copy Results Op Parameters length"
946 " too small: %u\n", se_cmd->data_length);
947 transport_kunmap_data_sg(se_cmd);
948 return TCM_INVALID_CDB_FIELD;
949 }
950 /*
951 * Set SNLID=1 (Supports no List ID)
952 */
953 p[4] = 0x1;
954 /*
955 * MAXIMUM TARGET DESCRIPTOR COUNT
956 */
957 put_unaligned_be16(RCR_OP_MAX_TARGET_DESC_COUNT, &p[8]);
958 /*
959 * MAXIMUM SEGMENT DESCRIPTOR COUNT
960 */
961 put_unaligned_be16(RCR_OP_MAX_SG_DESC_COUNT, &p[10]);
962 /*
963 * MAXIMUM DESCRIPTOR LIST LENGTH
964 */
965 put_unaligned_be32(RCR_OP_MAX_DESC_LIST_LEN, &p[12]);
966 /*
967 * MAXIMUM SEGMENT LENGTH
968 */
969 put_unaligned_be32(RCR_OP_MAX_SEGMENT_LEN, &p[16]);
970 /*
971 * MAXIMUM INLINE DATA LENGTH for SA 0x04 (NOT SUPPORTED)
972 */
973 put_unaligned_be32(0x0, &p[20]);
974 /*
975 * HELD DATA LIMIT
976 */
977 put_unaligned_be32(0x0, &p[24]);
978 /*
979 * MAXIMUM STREAM DEVICE TRANSFER SIZE
980 */
981 put_unaligned_be32(0x0, &p[28]);
982 /*
983 * TOTAL CONCURRENT COPIES
984 */
985 put_unaligned_be16(RCR_OP_TOTAL_CONCURR_COPIES, &p[34]);
986 /*
987 * MAXIMUM CONCURRENT COPIES
988 */
989 p[36] = RCR_OP_MAX_CONCURR_COPIES;
990 /*
991 * DATA SEGMENT GRANULARITY (log 2)
992 */
993 p[37] = RCR_OP_DATA_SEG_GRAN_LOG2;
994 /*
995 * INLINE DATA GRANULARITY log 2)
996 */
997 p[38] = RCR_OP_INLINE_DATA_GRAN_LOG2;
998 /*
999 * HELD DATA GRANULARITY
1000 */
1001 p[39] = RCR_OP_HELD_DATA_GRAN_LOG2;
1002 /*
1003 * IMPLEMENTED DESCRIPTOR LIST LENGTH
1004 */
1005 p[43] = 0x2;
1006 /*
1007 * List of implemented descriptor type codes (ordered)
1008 */
1009 p[44] = 0x02; /* Copy Block to Block device */
1010 p[45] = 0xe4; /* Identification descriptor target descriptor */
1011
1012 /*
1013 * AVAILABLE DATA (n-3)
1014 */
1015 put_unaligned_be32(42, &p[0]);
1016
1017 transport_kunmap_data_sg(se_cmd);
1018 target_complete_cmd(se_cmd, GOOD);
1019
1020 return TCM_NO_SENSE;
1021}
1022
1023sense_reason_t target_do_receive_copy_results(struct se_cmd *se_cmd)
1024{
1025 unsigned char *cdb = &se_cmd->t_task_cdb[0];
1026 int sa = (cdb[1] & 0x1f), list_id = cdb[2];
1027 sense_reason_t rc = TCM_NO_SENSE;
1028
1029 pr_debug("Entering target_do_receive_copy_results: SA: 0x%02x, List ID:"
1030 " 0x%02x, AL: %u\n", sa, list_id, se_cmd->data_length);
1031
1032 if (list_id != 0) {
1033 pr_err("Receive Copy Results with non zero list identifier"
1034 " not supported\n");
1035 return TCM_INVALID_CDB_FIELD;
1036 }
1037
1038 switch (sa) {
1039 case RCR_SA_OPERATING_PARAMETERS:
1040 rc = target_rcr_operating_parameters(se_cmd);
1041 break;
1042 case RCR_SA_COPY_STATUS:
1043 case RCR_SA_RECEIVE_DATA:
1044 case RCR_SA_FAILED_SEGMENT_DETAILS:
1045 default:
1046 pr_err("Unsupported SA for receive copy results: 0x%02x\n", sa);
1047 return TCM_INVALID_CDB_FIELD;
1048 }
1049
1050 return rc;
1051}