xref: /freebsd-src/sys/cam/ctl/ctl_tpc.c (revision 5ec9cb893bd22bf2d47ab2fef29aae6ee5e1d131)
1 /*-
2  * Copyright (c) 2014 Alexander Motin <mav@FreeBSD.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer,
10  *    without modification, immediately at the beginning of the file.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25  */
26 
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
29 
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/kernel.h>
33 #include <sys/types.h>
34 #include <sys/lock.h>
35 #include <sys/module.h>
36 #include <sys/mutex.h>
37 #include <sys/condvar.h>
38 #include <sys/malloc.h>
39 #include <sys/conf.h>
40 #include <sys/queue.h>
41 #include <sys/sysctl.h>
42 #include <machine/atomic.h>
43 
44 #include <cam/cam.h>
45 #include <cam/scsi/scsi_all.h>
46 #include <cam/scsi/scsi_da.h>
47 #include <cam/ctl/ctl_io.h>
48 #include <cam/ctl/ctl.h>
49 #include <cam/ctl/ctl_frontend.h>
50 #include <cam/ctl/ctl_frontend_internal.h>
51 #include <cam/ctl/ctl_util.h>
52 #include <cam/ctl/ctl_backend.h>
53 #include <cam/ctl/ctl_ioctl.h>
54 #include <cam/ctl/ctl_ha.h>
55 #include <cam/ctl/ctl_private.h>
56 #include <cam/ctl/ctl_debug.h>
57 #include <cam/ctl/ctl_scsi_all.h>
58 #include <cam/ctl/ctl_tpc.h>
59 #include <cam/ctl/ctl_error.h>
60 
61 #define	TPC_MAX_CSCDS	64
62 #define	TPC_MAX_SEGS	64
63 #define	TPC_MAX_SEG	0
64 #define	TPC_MAX_LIST	8192
65 #define	TPC_MAX_INLINE	0
66 #define	TPC_MAX_LISTS	255
67 #define	TPC_MAX_IO_SIZE	(1024 * 1024)
68 #define	TPC_MAX_IOCHUNK_SIZE	(TPC_MAX_IO_SIZE * 16)
69 #define	TPC_MIN_TOKEN_TIMEOUT	1
70 #define	TPC_DFL_TOKEN_TIMEOUT	60
71 #define	TPC_MAX_TOKEN_TIMEOUT	600
72 
73 MALLOC_DEFINE(M_CTL_TPC, "ctltpc", "CTL TPC");
74 
75 typedef enum {
76 	TPC_ERR_RETRY		= 0x000,
77 	TPC_ERR_FAIL		= 0x001,
78 	TPC_ERR_MASK		= 0x0ff,
79 	TPC_ERR_NO_DECREMENT	= 0x100
80 } tpc_error_action;
81 
82 struct tpc_list;
83 TAILQ_HEAD(runl, tpc_io);
84 struct tpc_io {
85 	union ctl_io		*io;
86 	uint64_t		 lun;
87 	struct tpc_list		*list;
88 	struct runl		 run;
89 	TAILQ_ENTRY(tpc_io)	 rlinks;
90 	TAILQ_ENTRY(tpc_io)	 links;
91 };
92 
93 struct tpc_token {
94 	uint8_t			 token[512];
95 	uint64_t		 lun;
96 	uint32_t		 blocksize;
97 	uint8_t			*params;
98 	struct scsi_range_desc	*range;
99 	int			 nrange;
100 	int			 active;
101 	time_t			 last_active;
102 	uint32_t		 timeout;
103 	TAILQ_ENTRY(tpc_token)	 links;
104 };
105 
106 struct tpc_list {
107 	uint8_t			 service_action;
108 	int			 init_port;
109 	uint32_t		 init_idx;
110 	uint32_t		 list_id;
111 	uint8_t			 flags;
112 	uint8_t			*params;
113 	struct scsi_ec_cscd	*cscd;
114 	struct scsi_ec_segment	*seg[TPC_MAX_SEGS];
115 	uint8_t			*inl;
116 	int			 ncscd;
117 	int			 nseg;
118 	int			 leninl;
119 	struct tpc_token	*token;
120 	struct scsi_range_desc	*range;
121 	int			 nrange;
122 	off_t			 offset_into_rod;
123 
124 	int			 curseg;
125 	off_t			 cursectors;
126 	off_t			 curbytes;
127 	int			 curops;
128 	int			 stage;
129 	uint8_t			*buf;
130 	off_t			 segsectors;
131 	off_t			 segbytes;
132 	int			 tbdio;
133 	int			 error;
134 	int			 abort;
135 	int			 completed;
136 	time_t			 last_active;
137 	TAILQ_HEAD(, tpc_io)	 allio;
138 	struct scsi_sense_data	 sense_data;
139 	uint8_t			 sense_len;
140 	uint8_t			 scsi_status;
141 	struct ctl_scsiio	*ctsio;
142 	struct ctl_lun		*lun;
143 	int			 res_token_valid;
144 	uint8_t			 res_token[512];
145 	TAILQ_ENTRY(tpc_list)	 links;
146 };
147 
148 static void
149 tpc_timeout(void *arg)
150 {
151 	struct ctl_softc *softc = arg;
152 	struct ctl_lun *lun;
153 	struct tpc_token *token, *ttoken;
154 	struct tpc_list *list, *tlist;
155 
156 	/* Free completed lists with expired timeout. */
157 	STAILQ_FOREACH(lun, &softc->lun_list, links) {
158 		mtx_lock(&lun->lun_lock);
159 		TAILQ_FOREACH_SAFE(list, &lun->tpc_lists, links, tlist) {
160 			if (!list->completed || time_uptime < list->last_active +
161 			    TPC_DFL_TOKEN_TIMEOUT)
162 				continue;
163 			TAILQ_REMOVE(&lun->tpc_lists, list, links);
164 			free(list, M_CTL);
165 		}
166 		mtx_unlock(&lun->lun_lock);
167 	}
168 
169 	/* Free inactive ROD tokens with expired timeout. */
170 	mtx_lock(&softc->tpc_lock);
171 	TAILQ_FOREACH_SAFE(token, &softc->tpc_tokens, links, ttoken) {
172 		if (token->active ||
173 		    time_uptime < token->last_active + token->timeout + 1)
174 			continue;
175 		TAILQ_REMOVE(&softc->tpc_tokens, token, links);
176 		free(token->params, M_CTL);
177 		free(token, M_CTL);
178 	}
179 	mtx_unlock(&softc->tpc_lock);
180 	callout_schedule(&softc->tpc_timeout, hz);
181 }
182 
183 void
184 ctl_tpc_init(struct ctl_softc *softc)
185 {
186 
187 	mtx_init(&softc->tpc_lock, "CTL TPC mutex", NULL, MTX_DEF);
188 	TAILQ_INIT(&softc->tpc_tokens);
189 	callout_init_mtx(&softc->tpc_timeout, &softc->ctl_lock, 0);
190 	callout_reset(&softc->tpc_timeout, hz, tpc_timeout, softc);
191 }
192 
193 void
194 ctl_tpc_shutdown(struct ctl_softc *softc)
195 {
196 	struct tpc_token *token;
197 
198 	callout_drain(&softc->tpc_timeout);
199 
200 	/* Free ROD tokens. */
201 	mtx_lock(&softc->tpc_lock);
202 	while ((token = TAILQ_FIRST(&softc->tpc_tokens)) != NULL) {
203 		TAILQ_REMOVE(&softc->tpc_tokens, token, links);
204 		free(token->params, M_CTL);
205 		free(token, M_CTL);
206 	}
207 	mtx_unlock(&softc->tpc_lock);
208 	mtx_destroy(&softc->tpc_lock);
209 }
210 
211 void
212 ctl_tpc_lun_init(struct ctl_lun *lun)
213 {
214 
215 	TAILQ_INIT(&lun->tpc_lists);
216 }
217 
218 void
219 ctl_tpc_lun_shutdown(struct ctl_lun *lun)
220 {
221 	struct ctl_softc *softc = lun->ctl_softc;
222 	struct tpc_list *list;
223 	struct tpc_token *token, *ttoken;
224 
225 	/* Free lists for this LUN. */
226 	while ((list = TAILQ_FIRST(&lun->tpc_lists)) != NULL) {
227 		TAILQ_REMOVE(&lun->tpc_lists, list, links);
228 		KASSERT(list->completed,
229 		    ("Not completed TPC (%p) on shutdown", list));
230 		free(list, M_CTL);
231 	}
232 
233 	/* Free ROD tokens for this LUN. */
234 	mtx_lock(&softc->tpc_lock);
235 	TAILQ_FOREACH_SAFE(token, &softc->tpc_tokens, links, ttoken) {
236 		if (token->lun != lun->lun || token->active)
237 			continue;
238 		TAILQ_REMOVE(&softc->tpc_tokens, token, links);
239 		free(token->params, M_CTL);
240 		free(token, M_CTL);
241 	}
242 	mtx_unlock(&softc->tpc_lock);
243 }
244 
245 int
246 ctl_inquiry_evpd_tpc(struct ctl_scsiio *ctsio, int alloc_len)
247 {
248 	struct scsi_vpd_tpc *tpc_ptr;
249 	struct scsi_vpd_tpc_descriptor *d_ptr;
250 	struct scsi_vpd_tpc_descriptor_bdrl *bdrl_ptr;
251 	struct scsi_vpd_tpc_descriptor_sc *sc_ptr;
252 	struct scsi_vpd_tpc_descriptor_sc_descr *scd_ptr;
253 	struct scsi_vpd_tpc_descriptor_pd *pd_ptr;
254 	struct scsi_vpd_tpc_descriptor_sd *sd_ptr;
255 	struct scsi_vpd_tpc_descriptor_sdid *sdid_ptr;
256 	struct scsi_vpd_tpc_descriptor_rtf *rtf_ptr;
257 	struct scsi_vpd_tpc_descriptor_rtf_block *rtfb_ptr;
258 	struct scsi_vpd_tpc_descriptor_srt *srt_ptr;
259 	struct scsi_vpd_tpc_descriptor_srtd *srtd_ptr;
260 	struct scsi_vpd_tpc_descriptor_gco *gco_ptr;
261 	struct ctl_lun *lun;
262 	int data_len;
263 
264 	lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
265 
266 	data_len = sizeof(struct scsi_vpd_tpc) +
267 	    sizeof(struct scsi_vpd_tpc_descriptor_bdrl) +
268 	    roundup2(sizeof(struct scsi_vpd_tpc_descriptor_sc) +
269 	     2 * sizeof(struct scsi_vpd_tpc_descriptor_sc_descr) + 11, 4) +
270 	    sizeof(struct scsi_vpd_tpc_descriptor_pd) +
271 	    roundup2(sizeof(struct scsi_vpd_tpc_descriptor_sd) + 4, 4) +
272 	    roundup2(sizeof(struct scsi_vpd_tpc_descriptor_sdid) + 2, 4) +
273 	    sizeof(struct scsi_vpd_tpc_descriptor_rtf) +
274 	     sizeof(struct scsi_vpd_tpc_descriptor_rtf_block) +
275 	    sizeof(struct scsi_vpd_tpc_descriptor_srt) +
276 	     2*sizeof(struct scsi_vpd_tpc_descriptor_srtd) +
277 	    sizeof(struct scsi_vpd_tpc_descriptor_gco);
278 
279 	ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO);
280 	tpc_ptr = (struct scsi_vpd_tpc *)ctsio->kern_data_ptr;
281 	ctsio->kern_sg_entries = 0;
282 
283 	if (data_len < alloc_len) {
284 		ctsio->residual = alloc_len - data_len;
285 		ctsio->kern_data_len = data_len;
286 		ctsio->kern_total_len = data_len;
287 	} else {
288 		ctsio->residual = 0;
289 		ctsio->kern_data_len = alloc_len;
290 		ctsio->kern_total_len = alloc_len;
291 	}
292 	ctsio->kern_data_resid = 0;
293 	ctsio->kern_rel_offset = 0;
294 	ctsio->kern_sg_entries = 0;
295 
296 	/*
297 	 * The control device is always connected.  The disk device, on the
298 	 * other hand, may not be online all the time.
299 	 */
300 	if (lun != NULL)
301 		tpc_ptr->device = (SID_QUAL_LU_CONNECTED << 5) |
302 				     lun->be_lun->lun_type;
303 	else
304 		tpc_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT;
305 	tpc_ptr->page_code = SVPD_SCSI_TPC;
306 	scsi_ulto2b(data_len - 4, tpc_ptr->page_length);
307 
308 	/* Block Device ROD Limits */
309 	d_ptr = (struct scsi_vpd_tpc_descriptor *)&tpc_ptr->descr[0];
310 	bdrl_ptr = (struct scsi_vpd_tpc_descriptor_bdrl *)d_ptr;
311 	scsi_ulto2b(SVPD_TPC_BDRL, bdrl_ptr->desc_type);
312 	scsi_ulto2b(sizeof(*bdrl_ptr) - 4, bdrl_ptr->desc_length);
313 	scsi_ulto2b(TPC_MAX_SEGS, bdrl_ptr->maximum_ranges);
314 	scsi_ulto4b(TPC_MAX_TOKEN_TIMEOUT,
315 	    bdrl_ptr->maximum_inactivity_timeout);
316 	scsi_ulto4b(TPC_DFL_TOKEN_TIMEOUT,
317 	    bdrl_ptr->default_inactivity_timeout);
318 	scsi_u64to8b(0, bdrl_ptr->maximum_token_transfer_size);
319 	scsi_u64to8b(0, bdrl_ptr->optimal_transfer_count);
320 
321 	/* Supported commands */
322 	d_ptr = (struct scsi_vpd_tpc_descriptor *)
323 	    (&d_ptr->parameters[0] + scsi_2btoul(d_ptr->desc_length));
324 	sc_ptr = (struct scsi_vpd_tpc_descriptor_sc *)d_ptr;
325 	scsi_ulto2b(SVPD_TPC_SC, sc_ptr->desc_type);
326 	sc_ptr->list_length = 2 * sizeof(*scd_ptr) + 11;
327 	scsi_ulto2b(roundup2(1 + sc_ptr->list_length, 4), sc_ptr->desc_length);
328 	scd_ptr = &sc_ptr->descr[0];
329 	scd_ptr->opcode = EXTENDED_COPY;
330 	scd_ptr->sa_length = 5;
331 	scd_ptr->supported_service_actions[0] = EC_EC_LID1;
332 	scd_ptr->supported_service_actions[1] = EC_EC_LID4;
333 	scd_ptr->supported_service_actions[2] = EC_PT;
334 	scd_ptr->supported_service_actions[3] = EC_WUT;
335 	scd_ptr->supported_service_actions[4] = EC_COA;
336 	scd_ptr = (struct scsi_vpd_tpc_descriptor_sc_descr *)
337 	    &scd_ptr->supported_service_actions[scd_ptr->sa_length];
338 	scd_ptr->opcode = RECEIVE_COPY_STATUS;
339 	scd_ptr->sa_length = 6;
340 	scd_ptr->supported_service_actions[0] = RCS_RCS_LID1;
341 	scd_ptr->supported_service_actions[1] = RCS_RCFD;
342 	scd_ptr->supported_service_actions[2] = RCS_RCS_LID4;
343 	scd_ptr->supported_service_actions[3] = RCS_RCOP;
344 	scd_ptr->supported_service_actions[4] = RCS_RRTI;
345 	scd_ptr->supported_service_actions[5] = RCS_RART;
346 
347 	/* Parameter data. */
348 	d_ptr = (struct scsi_vpd_tpc_descriptor *)
349 	    (&d_ptr->parameters[0] + scsi_2btoul(d_ptr->desc_length));
350 	pd_ptr = (struct scsi_vpd_tpc_descriptor_pd *)d_ptr;
351 	scsi_ulto2b(SVPD_TPC_PD, pd_ptr->desc_type);
352 	scsi_ulto2b(sizeof(*pd_ptr) - 4, pd_ptr->desc_length);
353 	scsi_ulto2b(TPC_MAX_CSCDS, pd_ptr->maximum_cscd_descriptor_count);
354 	scsi_ulto2b(TPC_MAX_SEGS, pd_ptr->maximum_segment_descriptor_count);
355 	scsi_ulto4b(TPC_MAX_LIST, pd_ptr->maximum_descriptor_list_length);
356 	scsi_ulto4b(TPC_MAX_INLINE, pd_ptr->maximum_inline_data_length);
357 
358 	/* Supported Descriptors */
359 	d_ptr = (struct scsi_vpd_tpc_descriptor *)
360 	    (&d_ptr->parameters[0] + scsi_2btoul(d_ptr->desc_length));
361 	sd_ptr = (struct scsi_vpd_tpc_descriptor_sd *)d_ptr;
362 	scsi_ulto2b(SVPD_TPC_SD, sd_ptr->desc_type);
363 	scsi_ulto2b(roundup2(sizeof(*sd_ptr) - 4 + 4, 4), sd_ptr->desc_length);
364 	sd_ptr->list_length = 4;
365 	sd_ptr->supported_descriptor_codes[0] = EC_SEG_B2B;
366 	sd_ptr->supported_descriptor_codes[1] = EC_SEG_VERIFY;
367 	sd_ptr->supported_descriptor_codes[2] = EC_SEG_REGISTER_KEY;
368 	sd_ptr->supported_descriptor_codes[3] = EC_CSCD_ID;
369 
370 	/* Supported CSCD Descriptor IDs */
371 	d_ptr = (struct scsi_vpd_tpc_descriptor *)
372 	    (&d_ptr->parameters[0] + scsi_2btoul(d_ptr->desc_length));
373 	sdid_ptr = (struct scsi_vpd_tpc_descriptor_sdid *)d_ptr;
374 	scsi_ulto2b(SVPD_TPC_SDID, sdid_ptr->desc_type);
375 	scsi_ulto2b(roundup2(sizeof(*sdid_ptr) - 4 + 2, 4), sdid_ptr->desc_length);
376 	scsi_ulto2b(2, sdid_ptr->list_length);
377 	scsi_ulto2b(0xffff, &sdid_ptr->supported_descriptor_ids[0]);
378 
379 	/* ROD Token Features */
380 	d_ptr = (struct scsi_vpd_tpc_descriptor *)
381 	    (&d_ptr->parameters[0] + scsi_2btoul(d_ptr->desc_length));
382 	rtf_ptr = (struct scsi_vpd_tpc_descriptor_rtf *)d_ptr;
383 	scsi_ulto2b(SVPD_TPC_RTF, rtf_ptr->desc_type);
384 	scsi_ulto2b(sizeof(*rtf_ptr) - 4 + sizeof(*rtfb_ptr), rtf_ptr->desc_length);
385 	rtf_ptr->remote_tokens = 0;
386 	scsi_ulto4b(TPC_MIN_TOKEN_TIMEOUT, rtf_ptr->minimum_token_lifetime);
387 	scsi_ulto4b(UINT32_MAX, rtf_ptr->maximum_token_lifetime);
388 	scsi_ulto4b(TPC_MAX_TOKEN_TIMEOUT,
389 	    rtf_ptr->maximum_token_inactivity_timeout);
390 	scsi_ulto2b(sizeof(*rtfb_ptr), rtf_ptr->type_specific_features_length);
391 	rtfb_ptr = (struct scsi_vpd_tpc_descriptor_rtf_block *)
392 	    &rtf_ptr->type_specific_features;
393 	rtfb_ptr->type_format = SVPD_TPC_RTF_BLOCK;
394 	scsi_ulto2b(sizeof(*rtfb_ptr) - 4, rtfb_ptr->desc_length);
395 	scsi_ulto2b(0, rtfb_ptr->optimal_length_granularity);
396 	scsi_u64to8b(0, rtfb_ptr->maximum_bytes);
397 	scsi_u64to8b(0, rtfb_ptr->optimal_bytes);
398 	scsi_u64to8b(TPC_MAX_IOCHUNK_SIZE,
399 	    rtfb_ptr->optimal_bytes_to_token_per_segment);
400 	scsi_u64to8b(TPC_MAX_IOCHUNK_SIZE,
401 	    rtfb_ptr->optimal_bytes_from_token_per_segment);
402 
403 	/* Supported ROD Tokens */
404 	d_ptr = (struct scsi_vpd_tpc_descriptor *)
405 	    (&d_ptr->parameters[0] + scsi_2btoul(d_ptr->desc_length));
406 	srt_ptr = (struct scsi_vpd_tpc_descriptor_srt *)d_ptr;
407 	scsi_ulto2b(SVPD_TPC_SRT, srt_ptr->desc_type);
408 	scsi_ulto2b(sizeof(*srt_ptr) - 4 + 2*sizeof(*srtd_ptr), srt_ptr->desc_length);
409 	scsi_ulto2b(2*sizeof(*srtd_ptr), srt_ptr->rod_type_descriptors_length);
410 	srtd_ptr = (struct scsi_vpd_tpc_descriptor_srtd *)
411 	    &srt_ptr->rod_type_descriptors;
412 	scsi_ulto4b(ROD_TYPE_AUR, srtd_ptr->rod_type);
413 	srtd_ptr->flags = SVPD_TPC_SRTD_TIN | SVPD_TPC_SRTD_TOUT;
414 	scsi_ulto2b(0, srtd_ptr->preference_indicator);
415 	srtd_ptr++;
416 	scsi_ulto4b(ROD_TYPE_BLOCK_ZERO, srtd_ptr->rod_type);
417 	srtd_ptr->flags = SVPD_TPC_SRTD_TIN;
418 	scsi_ulto2b(0, srtd_ptr->preference_indicator);
419 
420 	/* General Copy Operations */
421 	d_ptr = (struct scsi_vpd_tpc_descriptor *)
422 	    (&d_ptr->parameters[0] + scsi_2btoul(d_ptr->desc_length));
423 	gco_ptr = (struct scsi_vpd_tpc_descriptor_gco *)d_ptr;
424 	scsi_ulto2b(SVPD_TPC_GCO, gco_ptr->desc_type);
425 	scsi_ulto2b(sizeof(*gco_ptr) - 4, gco_ptr->desc_length);
426 	scsi_ulto4b(TPC_MAX_LISTS, gco_ptr->total_concurrent_copies);
427 	scsi_ulto4b(TPC_MAX_LISTS, gco_ptr->maximum_identified_concurrent_copies);
428 	scsi_ulto4b(TPC_MAX_SEG, gco_ptr->maximum_segment_length);
429 	gco_ptr->data_segment_granularity = 0;
430 	gco_ptr->inline_data_granularity = 0;
431 
432 	ctl_set_success(ctsio);
433 	ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
434 	ctsio->be_move_done = ctl_config_move_done;
435 	ctl_datamove((union ctl_io *)ctsio);
436 
437 	return (CTL_RETVAL_COMPLETE);
438 }
439 
440 int
441 ctl_receive_copy_operating_parameters(struct ctl_scsiio *ctsio)
442 {
443 	struct scsi_receive_copy_operating_parameters *cdb;
444 	struct scsi_receive_copy_operating_parameters_data *data;
445 	int retval;
446 	int alloc_len, total_len;
447 
448 	CTL_DEBUG_PRINT(("ctl_report_supported_tmf\n"));
449 
450 	cdb = (struct scsi_receive_copy_operating_parameters *)ctsio->cdb;
451 
452 	retval = CTL_RETVAL_COMPLETE;
453 
454 	total_len = sizeof(*data) + 4;
455 	alloc_len = scsi_4btoul(cdb->length);
456 
457 	ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO);
458 
459 	ctsio->kern_sg_entries = 0;
460 
461 	if (total_len < alloc_len) {
462 		ctsio->residual = alloc_len - total_len;
463 		ctsio->kern_data_len = total_len;
464 		ctsio->kern_total_len = total_len;
465 	} else {
466 		ctsio->residual = 0;
467 		ctsio->kern_data_len = alloc_len;
468 		ctsio->kern_total_len = alloc_len;
469 	}
470 	ctsio->kern_data_resid = 0;
471 	ctsio->kern_rel_offset = 0;
472 
473 	data = (struct scsi_receive_copy_operating_parameters_data *)ctsio->kern_data_ptr;
474 	scsi_ulto4b(sizeof(*data) - 4 + 4, data->length);
475 	data->snlid = RCOP_SNLID;
476 	scsi_ulto2b(TPC_MAX_CSCDS, data->maximum_cscd_descriptor_count);
477 	scsi_ulto2b(TPC_MAX_SEGS, data->maximum_segment_descriptor_count);
478 	scsi_ulto4b(TPC_MAX_LIST, data->maximum_descriptor_list_length);
479 	scsi_ulto4b(TPC_MAX_SEG, data->maximum_segment_length);
480 	scsi_ulto4b(TPC_MAX_INLINE, data->maximum_inline_data_length);
481 	scsi_ulto4b(0, data->held_data_limit);
482 	scsi_ulto4b(0, data->maximum_stream_device_transfer_size);
483 	scsi_ulto2b(TPC_MAX_LISTS, data->total_concurrent_copies);
484 	data->maximum_concurrent_copies = TPC_MAX_LISTS;
485 	data->data_segment_granularity = 0;
486 	data->inline_data_granularity = 0;
487 	data->held_data_granularity = 0;
488 	data->implemented_descriptor_list_length = 4;
489 	data->list_of_implemented_descriptor_type_codes[0] = EC_SEG_B2B;
490 	data->list_of_implemented_descriptor_type_codes[1] = EC_SEG_VERIFY;
491 	data->list_of_implemented_descriptor_type_codes[2] = EC_SEG_REGISTER_KEY;
492 	data->list_of_implemented_descriptor_type_codes[3] = EC_CSCD_ID;
493 
494 	ctl_set_success(ctsio);
495 	ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
496 	ctsio->be_move_done = ctl_config_move_done;
497 	ctl_datamove((union ctl_io *)ctsio);
498 	return (retval);
499 }
500 
501 static struct tpc_list *
502 tpc_find_list(struct ctl_lun *lun, uint32_t list_id, uint32_t init_idx)
503 {
504 	struct tpc_list *list;
505 
506 	mtx_assert(&lun->lun_lock, MA_OWNED);
507 	TAILQ_FOREACH(list, &lun->tpc_lists, links) {
508 		if ((list->flags & EC_LIST_ID_USAGE_MASK) !=
509 		     EC_LIST_ID_USAGE_NONE && list->list_id == list_id &&
510 		    list->init_idx == init_idx)
511 			break;
512 	}
513 	return (list);
514 }
515 
516 int
517 ctl_receive_copy_status_lid1(struct ctl_scsiio *ctsio)
518 {
519 	struct ctl_lun *lun;
520 	struct scsi_receive_copy_status_lid1 *cdb;
521 	struct scsi_receive_copy_status_lid1_data *data;
522 	struct tpc_list *list;
523 	struct tpc_list list_copy;
524 	int retval;
525 	int alloc_len, total_len;
526 	uint32_t list_id;
527 
528 	CTL_DEBUG_PRINT(("ctl_receive_copy_status_lid1\n"));
529 
530 	cdb = (struct scsi_receive_copy_status_lid1 *)ctsio->cdb;
531 	lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
532 
533 	retval = CTL_RETVAL_COMPLETE;
534 
535 	list_id = cdb->list_identifier;
536 	mtx_lock(&lun->lun_lock);
537 	list = tpc_find_list(lun, list_id,
538 	    ctl_get_resindex(&ctsio->io_hdr.nexus));
539 	if (list == NULL) {
540 		mtx_unlock(&lun->lun_lock);
541 		ctl_set_invalid_field(ctsio, /*sks_valid*/ 1,
542 		    /*command*/ 1, /*field*/ 2, /*bit_valid*/ 0,
543 		    /*bit*/ 0);
544 		ctl_done((union ctl_io *)ctsio);
545 		return (retval);
546 	}
547 	list_copy = *list;
548 	if (list->completed) {
549 		TAILQ_REMOVE(&lun->tpc_lists, list, links);
550 		free(list, M_CTL);
551 	}
552 	mtx_unlock(&lun->lun_lock);
553 
554 	total_len = sizeof(*data);
555 	alloc_len = scsi_4btoul(cdb->length);
556 
557 	ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO);
558 
559 	ctsio->kern_sg_entries = 0;
560 
561 	if (total_len < alloc_len) {
562 		ctsio->residual = alloc_len - total_len;
563 		ctsio->kern_data_len = total_len;
564 		ctsio->kern_total_len = total_len;
565 	} else {
566 		ctsio->residual = 0;
567 		ctsio->kern_data_len = alloc_len;
568 		ctsio->kern_total_len = alloc_len;
569 	}
570 	ctsio->kern_data_resid = 0;
571 	ctsio->kern_rel_offset = 0;
572 
573 	data = (struct scsi_receive_copy_status_lid1_data *)ctsio->kern_data_ptr;
574 	scsi_ulto4b(sizeof(*data) - 4, data->available_data);
575 	if (list_copy.completed) {
576 		if (list_copy.error || list_copy.abort)
577 			data->copy_command_status = RCS_CCS_ERROR;
578 		else
579 			data->copy_command_status = RCS_CCS_COMPLETED;
580 	} else
581 		data->copy_command_status = RCS_CCS_INPROG;
582 	scsi_ulto2b(list_copy.curseg, data->segments_processed);
583 	if (list_copy.curbytes <= UINT32_MAX) {
584 		data->transfer_count_units = RCS_TC_BYTES;
585 		scsi_ulto4b(list_copy.curbytes, data->transfer_count);
586 	} else {
587 		data->transfer_count_units = RCS_TC_MBYTES;
588 		scsi_ulto4b(list_copy.curbytes >> 20, data->transfer_count);
589 	}
590 
591 	ctl_set_success(ctsio);
592 	ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
593 	ctsio->be_move_done = ctl_config_move_done;
594 	ctl_datamove((union ctl_io *)ctsio);
595 	return (retval);
596 }
597 
598 int
599 ctl_receive_copy_failure_details(struct ctl_scsiio *ctsio)
600 {
601 	struct ctl_lun *lun;
602 	struct scsi_receive_copy_failure_details *cdb;
603 	struct scsi_receive_copy_failure_details_data *data;
604 	struct tpc_list *list;
605 	struct tpc_list list_copy;
606 	int retval;
607 	int alloc_len, total_len;
608 	uint32_t list_id;
609 
610 	CTL_DEBUG_PRINT(("ctl_receive_copy_failure_details\n"));
611 
612 	cdb = (struct scsi_receive_copy_failure_details *)ctsio->cdb;
613 	lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
614 
615 	retval = CTL_RETVAL_COMPLETE;
616 
617 	list_id = cdb->list_identifier;
618 	mtx_lock(&lun->lun_lock);
619 	list = tpc_find_list(lun, list_id,
620 	    ctl_get_resindex(&ctsio->io_hdr.nexus));
621 	if (list == NULL || !list->completed) {
622 		mtx_unlock(&lun->lun_lock);
623 		ctl_set_invalid_field(ctsio, /*sks_valid*/ 1,
624 		    /*command*/ 1, /*field*/ 2, /*bit_valid*/ 0,
625 		    /*bit*/ 0);
626 		ctl_done((union ctl_io *)ctsio);
627 		return (retval);
628 	}
629 	list_copy = *list;
630 	TAILQ_REMOVE(&lun->tpc_lists, list, links);
631 	free(list, M_CTL);
632 	mtx_unlock(&lun->lun_lock);
633 
634 	total_len = sizeof(*data) + list_copy.sense_len;
635 	alloc_len = scsi_4btoul(cdb->length);
636 
637 	ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO);
638 
639 	ctsio->kern_sg_entries = 0;
640 
641 	if (total_len < alloc_len) {
642 		ctsio->residual = alloc_len - total_len;
643 		ctsio->kern_data_len = total_len;
644 		ctsio->kern_total_len = total_len;
645 	} else {
646 		ctsio->residual = 0;
647 		ctsio->kern_data_len = alloc_len;
648 		ctsio->kern_total_len = alloc_len;
649 	}
650 	ctsio->kern_data_resid = 0;
651 	ctsio->kern_rel_offset = 0;
652 
653 	data = (struct scsi_receive_copy_failure_details_data *)ctsio->kern_data_ptr;
654 	if (list_copy.completed && (list_copy.error || list_copy.abort)) {
655 		scsi_ulto4b(sizeof(*data) - 4 + list_copy.sense_len,
656 		    data->available_data);
657 		data->copy_command_status = RCS_CCS_ERROR;
658 	} else
659 		scsi_ulto4b(0, data->available_data);
660 	scsi_ulto2b(list_copy.sense_len, data->sense_data_length);
661 	memcpy(data->sense_data, &list_copy.sense_data, list_copy.sense_len);
662 
663 	ctl_set_success(ctsio);
664 	ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
665 	ctsio->be_move_done = ctl_config_move_done;
666 	ctl_datamove((union ctl_io *)ctsio);
667 	return (retval);
668 }
669 
670 int
671 ctl_receive_copy_status_lid4(struct ctl_scsiio *ctsio)
672 {
673 	struct ctl_lun *lun;
674 	struct scsi_receive_copy_status_lid4 *cdb;
675 	struct scsi_receive_copy_status_lid4_data *data;
676 	struct tpc_list *list;
677 	struct tpc_list list_copy;
678 	int retval;
679 	int alloc_len, total_len;
680 	uint32_t list_id;
681 
682 	CTL_DEBUG_PRINT(("ctl_receive_copy_status_lid4\n"));
683 
684 	cdb = (struct scsi_receive_copy_status_lid4 *)ctsio->cdb;
685 	lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
686 
687 	retval = CTL_RETVAL_COMPLETE;
688 
689 	list_id = scsi_4btoul(cdb->list_identifier);
690 	mtx_lock(&lun->lun_lock);
691 	list = tpc_find_list(lun, list_id,
692 	    ctl_get_resindex(&ctsio->io_hdr.nexus));
693 	if (list == NULL) {
694 		mtx_unlock(&lun->lun_lock);
695 		ctl_set_invalid_field(ctsio, /*sks_valid*/ 1,
696 		    /*command*/ 1, /*field*/ 2, /*bit_valid*/ 0,
697 		    /*bit*/ 0);
698 		ctl_done((union ctl_io *)ctsio);
699 		return (retval);
700 	}
701 	list_copy = *list;
702 	if (list->completed) {
703 		TAILQ_REMOVE(&lun->tpc_lists, list, links);
704 		free(list, M_CTL);
705 	}
706 	mtx_unlock(&lun->lun_lock);
707 
708 	total_len = sizeof(*data) + list_copy.sense_len;
709 	alloc_len = scsi_4btoul(cdb->length);
710 
711 	ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO);
712 
713 	ctsio->kern_sg_entries = 0;
714 
715 	if (total_len < alloc_len) {
716 		ctsio->residual = alloc_len - total_len;
717 		ctsio->kern_data_len = total_len;
718 		ctsio->kern_total_len = total_len;
719 	} else {
720 		ctsio->residual = 0;
721 		ctsio->kern_data_len = alloc_len;
722 		ctsio->kern_total_len = alloc_len;
723 	}
724 	ctsio->kern_data_resid = 0;
725 	ctsio->kern_rel_offset = 0;
726 
727 	data = (struct scsi_receive_copy_status_lid4_data *)ctsio->kern_data_ptr;
728 	scsi_ulto4b(sizeof(*data) - 4 + list_copy.sense_len,
729 	    data->available_data);
730 	data->response_to_service_action = list_copy.service_action;
731 	if (list_copy.completed) {
732 		if (list_copy.error)
733 			data->copy_command_status = RCS_CCS_ERROR;
734 		else if (list_copy.abort)
735 			data->copy_command_status = RCS_CCS_ABORTED;
736 		else
737 			data->copy_command_status = RCS_CCS_COMPLETED;
738 	} else
739 		data->copy_command_status = RCS_CCS_INPROG_FG;
740 	scsi_ulto2b(list_copy.curops, data->operation_counter);
741 	scsi_ulto4b(UINT32_MAX, data->estimated_status_update_delay);
742 	data->transfer_count_units = RCS_TC_BYTES;
743 	scsi_u64to8b(list_copy.curbytes, data->transfer_count);
744 	scsi_ulto2b(list_copy.curseg, data->segments_processed);
745 	data->length_of_the_sense_data_field = list_copy.sense_len;
746 	data->sense_data_length = list_copy.sense_len;
747 	memcpy(data->sense_data, &list_copy.sense_data, list_copy.sense_len);
748 
749 	ctl_set_success(ctsio);
750 	ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
751 	ctsio->be_move_done = ctl_config_move_done;
752 	ctl_datamove((union ctl_io *)ctsio);
753 	return (retval);
754 }
755 
756 int
757 ctl_copy_operation_abort(struct ctl_scsiio *ctsio)
758 {
759 	struct ctl_lun *lun;
760 	struct scsi_copy_operation_abort *cdb;
761 	struct tpc_list *list;
762 	int retval;
763 	uint32_t list_id;
764 
765 	CTL_DEBUG_PRINT(("ctl_copy_operation_abort\n"));
766 
767 	cdb = (struct scsi_copy_operation_abort *)ctsio->cdb;
768 	lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
769 
770 	retval = CTL_RETVAL_COMPLETE;
771 
772 	list_id = scsi_4btoul(cdb->list_identifier);
773 	mtx_lock(&lun->lun_lock);
774 	list = tpc_find_list(lun, list_id,
775 	    ctl_get_resindex(&ctsio->io_hdr.nexus));
776 	if (list == NULL) {
777 		mtx_unlock(&lun->lun_lock);
778 		ctl_set_invalid_field(ctsio, /*sks_valid*/ 1,
779 		    /*command*/ 1, /*field*/ 2, /*bit_valid*/ 0,
780 		    /*bit*/ 0);
781 		ctl_done((union ctl_io *)ctsio);
782 		return (retval);
783 	}
784 	list->abort = 1;
785 	mtx_unlock(&lun->lun_lock);
786 
787 	ctl_set_success(ctsio);
788 	ctl_done((union ctl_io *)ctsio);
789 	return (retval);
790 }
791 
792 static uint64_t
793 tpc_resolve(struct tpc_list *list, uint16_t idx, uint32_t *ss,
794     uint32_t *pb, uint32_t *pbo)
795 {
796 
797 	if (idx == 0xffff) {
798 		if (ss && list->lun->be_lun)
799 			*ss = list->lun->be_lun->blocksize;
800 		if (pb && list->lun->be_lun)
801 			*pb = list->lun->be_lun->blocksize <<
802 			    list->lun->be_lun->pblockexp;
803 		if (pbo && list->lun->be_lun)
804 			*pbo = list->lun->be_lun->blocksize *
805 			    list->lun->be_lun->pblockoff;
806 		return (list->lun->lun);
807 	}
808 	if (idx >= list->ncscd)
809 		return (UINT64_MAX);
810 	return (tpcl_resolve(list->lun->ctl_softc,
811 	    list->init_port, &list->cscd[idx], ss, pb, pbo));
812 }
813 
814 static int
815 tpc_process_b2b(struct tpc_list *list)
816 {
817 	struct scsi_ec_segment_b2b *seg;
818 	struct scsi_ec_cscd_dtsp *sdstp, *ddstp;
819 	struct tpc_io *tior, *tiow;
820 	struct runl run;
821 	uint64_t sl, dl;
822 	off_t srclba, dstlba, numbytes, donebytes, roundbytes;
823 	int numlba;
824 	uint32_t srcblock, dstblock, pb, pbo, adj;
825 
826 	if (list->stage == 1) {
827 		while ((tior = TAILQ_FIRST(&list->allio)) != NULL) {
828 			TAILQ_REMOVE(&list->allio, tior, links);
829 			ctl_free_io(tior->io);
830 			free(tior, M_CTL);
831 		}
832 		free(list->buf, M_CTL);
833 		if (list->abort) {
834 			ctl_set_task_aborted(list->ctsio);
835 			return (CTL_RETVAL_ERROR);
836 		} else if (list->error) {
837 			ctl_set_sense(list->ctsio, /*current_error*/ 1,
838 			    /*sense_key*/ SSD_KEY_COPY_ABORTED,
839 			    /*asc*/ 0x0d, /*ascq*/ 0x01, SSD_ELEM_NONE);
840 			return (CTL_RETVAL_ERROR);
841 		}
842 		list->cursectors += list->segsectors;
843 		list->curbytes += list->segbytes;
844 		return (CTL_RETVAL_COMPLETE);
845 	}
846 
847 	TAILQ_INIT(&list->allio);
848 	seg = (struct scsi_ec_segment_b2b *)list->seg[list->curseg];
849 	sl = tpc_resolve(list, scsi_2btoul(seg->src_cscd), &srcblock, NULL, NULL);
850 	dl = tpc_resolve(list, scsi_2btoul(seg->dst_cscd), &dstblock, &pb, &pbo);
851 	if (sl >= CTL_MAX_LUNS || dl >= CTL_MAX_LUNS) {
852 		ctl_set_sense(list->ctsio, /*current_error*/ 1,
853 		    /*sense_key*/ SSD_KEY_COPY_ABORTED,
854 		    /*asc*/ 0x08, /*ascq*/ 0x04, SSD_ELEM_NONE);
855 		return (CTL_RETVAL_ERROR);
856 	}
857 	if (pbo > 0)
858 		pbo = pb - pbo;
859 	sdstp = &list->cscd[scsi_2btoul(seg->src_cscd)].dtsp;
860 	if (scsi_3btoul(sdstp->block_length) != 0)
861 		srcblock = scsi_3btoul(sdstp->block_length);
862 	ddstp = &list->cscd[scsi_2btoul(seg->dst_cscd)].dtsp;
863 	if (scsi_3btoul(ddstp->block_length) != 0)
864 		dstblock = scsi_3btoul(ddstp->block_length);
865 	numlba = scsi_2btoul(seg->number_of_blocks);
866 	if (seg->flags & EC_SEG_DC)
867 		numbytes = (off_t)numlba * dstblock;
868 	else
869 		numbytes = (off_t)numlba * srcblock;
870 	srclba = scsi_8btou64(seg->src_lba);
871 	dstlba = scsi_8btou64(seg->dst_lba);
872 
873 //	printf("Copy %ju bytes from %ju @ %ju to %ju @ %ju\n",
874 //	    (uintmax_t)numbytes, sl, scsi_8btou64(seg->src_lba),
875 //	    dl, scsi_8btou64(seg->dst_lba));
876 
877 	if (numbytes == 0)
878 		return (CTL_RETVAL_COMPLETE);
879 
880 	if (numbytes % srcblock != 0 || numbytes % dstblock != 0) {
881 		ctl_set_sense(list->ctsio, /*current_error*/ 1,
882 		    /*sense_key*/ SSD_KEY_COPY_ABORTED,
883 		    /*asc*/ 0x26, /*ascq*/ 0x0A, SSD_ELEM_NONE);
884 		return (CTL_RETVAL_ERROR);
885 	}
886 
887 	list->buf = malloc(numbytes, M_CTL, M_WAITOK);
888 	list->segbytes = numbytes;
889 	list->segsectors = numbytes / dstblock;
890 	donebytes = 0;
891 	TAILQ_INIT(&run);
892 	list->tbdio = 0;
893 	while (donebytes < numbytes) {
894 		roundbytes = numbytes - donebytes;
895 		if (roundbytes > TPC_MAX_IO_SIZE) {
896 			roundbytes = TPC_MAX_IO_SIZE;
897 			roundbytes -= roundbytes % dstblock;
898 			if (pb > dstblock) {
899 				adj = (dstlba * dstblock + roundbytes - pbo) % pb;
900 				if (roundbytes > adj)
901 					roundbytes -= adj;
902 			}
903 		}
904 
905 		tior = malloc(sizeof(*tior), M_CTL, M_WAITOK | M_ZERO);
906 		TAILQ_INIT(&tior->run);
907 		tior->list = list;
908 		TAILQ_INSERT_TAIL(&list->allio, tior, links);
909 		tior->io = tpcl_alloc_io();
910 		ctl_scsi_read_write(tior->io,
911 				    /*data_ptr*/ &list->buf[donebytes],
912 				    /*data_len*/ roundbytes,
913 				    /*read_op*/ 1,
914 				    /*byte2*/ 0,
915 				    /*minimum_cdb_size*/ 0,
916 				    /*lba*/ srclba,
917 				    /*num_blocks*/ roundbytes / srcblock,
918 				    /*tag_type*/ CTL_TAG_SIMPLE,
919 				    /*control*/ 0);
920 		tior->io->io_hdr.retries = 3;
921 		tior->lun = sl;
922 		tior->io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = tior;
923 
924 		tiow = malloc(sizeof(*tior), M_CTL, M_WAITOK | M_ZERO);
925 		TAILQ_INIT(&tiow->run);
926 		tiow->list = list;
927 		TAILQ_INSERT_TAIL(&list->allio, tiow, links);
928 		tiow->io = tpcl_alloc_io();
929 		ctl_scsi_read_write(tiow->io,
930 				    /*data_ptr*/ &list->buf[donebytes],
931 				    /*data_len*/ roundbytes,
932 				    /*read_op*/ 0,
933 				    /*byte2*/ 0,
934 				    /*minimum_cdb_size*/ 0,
935 				    /*lba*/ dstlba,
936 				    /*num_blocks*/ roundbytes / dstblock,
937 				    /*tag_type*/ CTL_TAG_SIMPLE,
938 				    /*control*/ 0);
939 		tiow->io->io_hdr.retries = 3;
940 		tiow->lun = dl;
941 		tiow->io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = tiow;
942 
943 		TAILQ_INSERT_TAIL(&tior->run, tiow, rlinks);
944 		TAILQ_INSERT_TAIL(&run, tior, rlinks);
945 		list->tbdio++;
946 		donebytes += roundbytes;
947 		srclba += roundbytes / srcblock;
948 		dstlba += roundbytes / dstblock;
949 	}
950 
951 	while ((tior = TAILQ_FIRST(&run)) != NULL) {
952 		TAILQ_REMOVE(&run, tior, rlinks);
953 		if (tpcl_queue(tior->io, tior->lun) != CTL_RETVAL_COMPLETE)
954 			panic("tpcl_queue() error");
955 	}
956 
957 	list->stage++;
958 	return (CTL_RETVAL_QUEUED);
959 }
960 
961 static int
962 tpc_process_verify(struct tpc_list *list)
963 {
964 	struct scsi_ec_segment_verify *seg;
965 	struct tpc_io *tio;
966 	uint64_t sl;
967 
968 	if (list->stage == 1) {
969 		while ((tio = TAILQ_FIRST(&list->allio)) != NULL) {
970 			TAILQ_REMOVE(&list->allio, tio, links);
971 			ctl_free_io(tio->io);
972 			free(tio, M_CTL);
973 		}
974 		if (list->abort) {
975 			ctl_set_task_aborted(list->ctsio);
976 			return (CTL_RETVAL_ERROR);
977 		} else if (list->error) {
978 			ctl_set_sense(list->ctsio, /*current_error*/ 1,
979 			    /*sense_key*/ SSD_KEY_COPY_ABORTED,
980 			    /*asc*/ 0x0d, /*ascq*/ 0x01, SSD_ELEM_NONE);
981 			return (CTL_RETVAL_ERROR);
982 		} else
983 			return (CTL_RETVAL_COMPLETE);
984 	}
985 
986 	TAILQ_INIT(&list->allio);
987 	seg = (struct scsi_ec_segment_verify *)list->seg[list->curseg];
988 	sl = tpc_resolve(list, scsi_2btoul(seg->src_cscd), NULL, NULL, NULL);
989 	if (sl >= CTL_MAX_LUNS) {
990 		ctl_set_sense(list->ctsio, /*current_error*/ 1,
991 		    /*sense_key*/ SSD_KEY_COPY_ABORTED,
992 		    /*asc*/ 0x08, /*ascq*/ 0x04, SSD_ELEM_NONE);
993 		return (CTL_RETVAL_ERROR);
994 	}
995 
996 //	printf("Verify %ju\n", sl);
997 
998 	if ((seg->tur & 0x01) == 0)
999 		return (CTL_RETVAL_COMPLETE);
1000 
1001 	list->tbdio = 1;
1002 	tio = malloc(sizeof(*tio), M_CTL, M_WAITOK | M_ZERO);
1003 	TAILQ_INIT(&tio->run);
1004 	tio->list = list;
1005 	TAILQ_INSERT_TAIL(&list->allio, tio, links);
1006 	tio->io = tpcl_alloc_io();
1007 	ctl_scsi_tur(tio->io, /*tag_type*/ CTL_TAG_SIMPLE, /*control*/ 0);
1008 	tio->io->io_hdr.retries = 3;
1009 	tio->lun = sl;
1010 	tio->io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = tio;
1011 	list->stage++;
1012 	if (tpcl_queue(tio->io, tio->lun) != CTL_RETVAL_COMPLETE)
1013 		panic("tpcl_queue() error");
1014 	return (CTL_RETVAL_QUEUED);
1015 }
1016 
1017 static int
1018 tpc_process_register_key(struct tpc_list *list)
1019 {
1020 	struct scsi_ec_segment_register_key *seg;
1021 	struct tpc_io *tio;
1022 	uint64_t dl;
1023 	int datalen;
1024 
1025 	if (list->stage == 1) {
1026 		while ((tio = TAILQ_FIRST(&list->allio)) != NULL) {
1027 			TAILQ_REMOVE(&list->allio, tio, links);
1028 			ctl_free_io(tio->io);
1029 			free(tio, M_CTL);
1030 		}
1031 		free(list->buf, M_CTL);
1032 		if (list->abort) {
1033 			ctl_set_task_aborted(list->ctsio);
1034 			return (CTL_RETVAL_ERROR);
1035 		} else if (list->error) {
1036 			ctl_set_sense(list->ctsio, /*current_error*/ 1,
1037 			    /*sense_key*/ SSD_KEY_COPY_ABORTED,
1038 			    /*asc*/ 0x0d, /*ascq*/ 0x01, SSD_ELEM_NONE);
1039 			return (CTL_RETVAL_ERROR);
1040 		} else
1041 			return (CTL_RETVAL_COMPLETE);
1042 	}
1043 
1044 	TAILQ_INIT(&list->allio);
1045 	seg = (struct scsi_ec_segment_register_key *)list->seg[list->curseg];
1046 	dl = tpc_resolve(list, scsi_2btoul(seg->dst_cscd), NULL, NULL, NULL);
1047 	if (dl >= CTL_MAX_LUNS) {
1048 		ctl_set_sense(list->ctsio, /*current_error*/ 1,
1049 		    /*sense_key*/ SSD_KEY_COPY_ABORTED,
1050 		    /*asc*/ 0x08, /*ascq*/ 0x04, SSD_ELEM_NONE);
1051 		return (CTL_RETVAL_ERROR);
1052 	}
1053 
1054 //	printf("Register Key %ju\n", dl);
1055 
1056 	list->tbdio = 1;
1057 	tio = malloc(sizeof(*tio), M_CTL, M_WAITOK | M_ZERO);
1058 	TAILQ_INIT(&tio->run);
1059 	tio->list = list;
1060 	TAILQ_INSERT_TAIL(&list->allio, tio, links);
1061 	tio->io = tpcl_alloc_io();
1062 	datalen = sizeof(struct scsi_per_res_out_parms);
1063 	list->buf = malloc(datalen, M_CTL, M_WAITOK);
1064 	ctl_scsi_persistent_res_out(tio->io,
1065 	    list->buf, datalen, SPRO_REGISTER, -1,
1066 	    scsi_8btou64(seg->res_key), scsi_8btou64(seg->sa_res_key),
1067 	    /*tag_type*/ CTL_TAG_SIMPLE, /*control*/ 0);
1068 	tio->io->io_hdr.retries = 3;
1069 	tio->lun = dl;
1070 	tio->io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = tio;
1071 	list->stage++;
1072 	if (tpcl_queue(tio->io, tio->lun) != CTL_RETVAL_COMPLETE)
1073 		panic("tpcl_queue() error");
1074 	return (CTL_RETVAL_QUEUED);
1075 }
1076 
1077 static off_t
1078 tpc_ranges_length(struct scsi_range_desc *range, int nrange)
1079 {
1080 	off_t length = 0;
1081 	int r;
1082 
1083 	for (r = 0; r < nrange; r++)
1084 		length += scsi_4btoul(range[r].length);
1085 	return (length);
1086 }
1087 
1088 static int
1089 tpc_skip_ranges(struct scsi_range_desc *range, int nrange, off_t skip,
1090     int *srange, off_t *soffset)
1091 {
1092 	off_t off;
1093 	int r;
1094 
1095 	r = 0;
1096 	off = 0;
1097 	while (r < nrange) {
1098 		if (skip - off < scsi_4btoul(range[r].length)) {
1099 			*srange = r;
1100 			*soffset = skip - off;
1101 			return (0);
1102 		}
1103 		off += scsi_4btoul(range[r].length);
1104 		r++;
1105 	}
1106 	return (-1);
1107 }
1108 
1109 static int
1110 tpc_process_wut(struct tpc_list *list)
1111 {
1112 	struct tpc_io *tio, *tior, *tiow;
1113 	struct runl run, *prun;
1114 	int drange, srange;
1115 	off_t doffset, soffset;
1116 	off_t srclba, dstlba, numbytes, donebytes, roundbytes;
1117 	uint32_t srcblock, dstblock, pb, pbo, adj;
1118 
1119 	if (list->stage > 0) {
1120 		/* Cleanup after previous rounds. */
1121 		while ((tio = TAILQ_FIRST(&list->allio)) != NULL) {
1122 			TAILQ_REMOVE(&list->allio, tio, links);
1123 			ctl_free_io(tio->io);
1124 			free(tio, M_CTL);
1125 		}
1126 		free(list->buf, M_CTL);
1127 		if (list->abort) {
1128 			ctl_set_task_aborted(list->ctsio);
1129 			return (CTL_RETVAL_ERROR);
1130 		} else if (list->error) {
1131 			ctl_set_sense(list->ctsio, /*current_error*/ 1,
1132 			    /*sense_key*/ SSD_KEY_COPY_ABORTED,
1133 			    /*asc*/ 0x0d, /*ascq*/ 0x01, SSD_ELEM_NONE);
1134 			return (CTL_RETVAL_ERROR);
1135 		}
1136 		list->cursectors += list->segsectors;
1137 		list->curbytes += list->segbytes;
1138 	}
1139 
1140 	/* Check where we are on destination ranges list. */
1141 	if (tpc_skip_ranges(list->range, list->nrange, list->cursectors,
1142 	    &drange, &doffset) != 0)
1143 		return (CTL_RETVAL_COMPLETE);
1144 	dstblock = list->lun->be_lun->blocksize;
1145 	pb = dstblock << list->lun->be_lun->pblockexp;
1146 	if (list->lun->be_lun->pblockoff > 0)
1147 		pbo = pb - dstblock * list->lun->be_lun->pblockoff;
1148 	else
1149 		pbo = 0;
1150 
1151 	/* Check where we are on source ranges list. */
1152 	srcblock = list->token->blocksize;
1153 	if (tpc_skip_ranges(list->token->range, list->token->nrange,
1154 	    list->offset_into_rod + list->cursectors * dstblock / srcblock,
1155 	    &srange, &soffset) != 0) {
1156 		ctl_set_sense(list->ctsio, /*current_error*/ 1,
1157 		    /*sense_key*/ SSD_KEY_COPY_ABORTED,
1158 		    /*asc*/ 0x0d, /*ascq*/ 0x04, SSD_ELEM_NONE);
1159 		return (CTL_RETVAL_ERROR);
1160 	}
1161 
1162 	srclba = scsi_8btou64(list->token->range[srange].lba) + soffset;
1163 	dstlba = scsi_8btou64(list->range[drange].lba) + doffset;
1164 	numbytes = srcblock *
1165 	    (scsi_4btoul(list->token->range[srange].length) - soffset);
1166 	numbytes = omin(numbytes, dstblock *
1167 	    (scsi_4btoul(list->range[drange].length) - doffset));
1168 	if (numbytes > TPC_MAX_IOCHUNK_SIZE) {
1169 		numbytes = TPC_MAX_IOCHUNK_SIZE;
1170 		numbytes -= numbytes % dstblock;
1171 		if (pb > dstblock) {
1172 			adj = (dstlba * dstblock + numbytes - pbo) % pb;
1173 			if (numbytes > adj)
1174 				numbytes -= adj;
1175 		}
1176 	}
1177 
1178 	if (numbytes % srcblock != 0 || numbytes % dstblock != 0) {
1179 		ctl_set_sense(list->ctsio, /*current_error*/ 1,
1180 		    /*sense_key*/ SSD_KEY_COPY_ABORTED,
1181 		    /*asc*/ 0x26, /*ascq*/ 0x0A, SSD_ELEM_NONE);
1182 		return (CTL_RETVAL_ERROR);
1183 	}
1184 
1185 	list->buf = malloc(numbytes, M_CTL, M_WAITOK |
1186 	    (list->token == NULL ? M_ZERO : 0));
1187 	list->segbytes = numbytes;
1188 	list->segsectors = numbytes / dstblock;
1189 //printf("Copy chunk of %ju sectors from %ju to %ju\n", list->segsectors,
1190 //    srclba, dstlba);
1191 	donebytes = 0;
1192 	TAILQ_INIT(&run);
1193 	prun = &run;
1194 	list->tbdio = 1;
1195 	TAILQ_INIT(&list->allio);
1196 	while (donebytes < numbytes) {
1197 		roundbytes = numbytes - donebytes;
1198 		if (roundbytes > TPC_MAX_IO_SIZE) {
1199 			roundbytes = TPC_MAX_IO_SIZE;
1200 			roundbytes -= roundbytes % dstblock;
1201 			if (pb > dstblock) {
1202 				adj = (dstlba * dstblock + roundbytes - pbo) % pb;
1203 				if (roundbytes > adj)
1204 					roundbytes -= adj;
1205 			}
1206 		}
1207 
1208 		tior = malloc(sizeof(*tior), M_CTL, M_WAITOK | M_ZERO);
1209 		TAILQ_INIT(&tior->run);
1210 		tior->list = list;
1211 		TAILQ_INSERT_TAIL(&list->allio, tior, links);
1212 		tior->io = tpcl_alloc_io();
1213 		ctl_scsi_read_write(tior->io,
1214 				    /*data_ptr*/ &list->buf[donebytes],
1215 				    /*data_len*/ roundbytes,
1216 				    /*read_op*/ 1,
1217 				    /*byte2*/ 0,
1218 				    /*minimum_cdb_size*/ 0,
1219 				    /*lba*/ srclba,
1220 				    /*num_blocks*/ roundbytes / srcblock,
1221 				    /*tag_type*/ CTL_TAG_SIMPLE,
1222 				    /*control*/ 0);
1223 		tior->io->io_hdr.retries = 3;
1224 		tior->lun = list->token->lun;
1225 		tior->io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = tior;
1226 
1227 		tiow = malloc(sizeof(*tiow), M_CTL, M_WAITOK | M_ZERO);
1228 		TAILQ_INIT(&tiow->run);
1229 		tiow->list = list;
1230 		TAILQ_INSERT_TAIL(&list->allio, tiow, links);
1231 		tiow->io = tpcl_alloc_io();
1232 		ctl_scsi_read_write(tiow->io,
1233 				    /*data_ptr*/ &list->buf[donebytes],
1234 				    /*data_len*/ roundbytes,
1235 				    /*read_op*/ 0,
1236 				    /*byte2*/ 0,
1237 				    /*minimum_cdb_size*/ 0,
1238 				    /*lba*/ dstlba,
1239 				    /*num_blocks*/ roundbytes / dstblock,
1240 				    /*tag_type*/ CTL_TAG_SIMPLE,
1241 				    /*control*/ 0);
1242 		tiow->io->io_hdr.retries = 3;
1243 		tiow->lun = list->lun->lun;
1244 		tiow->io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = tiow;
1245 
1246 		TAILQ_INSERT_TAIL(&tior->run, tiow, rlinks);
1247 		TAILQ_INSERT_TAIL(prun, tior, rlinks);
1248 		prun = &tior->run;
1249 		donebytes += roundbytes;
1250 		srclba += roundbytes / srcblock;
1251 		dstlba += roundbytes / dstblock;
1252 	}
1253 
1254 	while ((tior = TAILQ_FIRST(&run)) != NULL) {
1255 		TAILQ_REMOVE(&run, tior, rlinks);
1256 		if (tpcl_queue(tior->io, tior->lun) != CTL_RETVAL_COMPLETE)
1257 			panic("tpcl_queue() error");
1258 	}
1259 
1260 	list->stage++;
1261 	return (CTL_RETVAL_QUEUED);
1262 }
1263 
1264 static int
1265 tpc_process_zero_wut(struct tpc_list *list)
1266 {
1267 	struct tpc_io *tio, *tiow;
1268 	struct runl run, *prun;
1269 	int r;
1270 	uint32_t dstblock, len;
1271 
1272 	if (list->stage > 0) {
1273 complete:
1274 		/* Cleanup after previous rounds. */
1275 		while ((tio = TAILQ_FIRST(&list->allio)) != NULL) {
1276 			TAILQ_REMOVE(&list->allio, tio, links);
1277 			ctl_free_io(tio->io);
1278 			free(tio, M_CTL);
1279 		}
1280 		free(list->buf, M_CTL);
1281 		if (list->abort) {
1282 			ctl_set_task_aborted(list->ctsio);
1283 			return (CTL_RETVAL_ERROR);
1284 		} else if (list->error) {
1285 			ctl_set_sense(list->ctsio, /*current_error*/ 1,
1286 			    /*sense_key*/ SSD_KEY_COPY_ABORTED,
1287 			    /*asc*/ 0x0d, /*ascq*/ 0x01, SSD_ELEM_NONE);
1288 			return (CTL_RETVAL_ERROR);
1289 		}
1290 		list->cursectors += list->segsectors;
1291 		list->curbytes += list->segbytes;
1292 		return (CTL_RETVAL_COMPLETE);
1293 	}
1294 
1295 	dstblock = list->lun->be_lun->blocksize;
1296 	list->buf = malloc(dstblock, M_CTL, M_WAITOK | M_ZERO);
1297 	TAILQ_INIT(&run);
1298 	prun = &run;
1299 	list->tbdio = 1;
1300 	TAILQ_INIT(&list->allio);
1301 	list->segsectors = 0;
1302 	for (r = 0; r < list->nrange; r++) {
1303 		len = scsi_4btoul(list->range[r].length);
1304 		if (len == 0)
1305 			continue;
1306 
1307 		tiow = malloc(sizeof(*tiow), M_CTL, M_WAITOK | M_ZERO);
1308 		TAILQ_INIT(&tiow->run);
1309 		tiow->list = list;
1310 		TAILQ_INSERT_TAIL(&list->allio, tiow, links);
1311 		tiow->io = tpcl_alloc_io();
1312 		ctl_scsi_write_same(tiow->io,
1313 				    /*data_ptr*/ list->buf,
1314 				    /*data_len*/ dstblock,
1315 				    /*byte2*/ 0,
1316 				    /*lba*/ scsi_8btou64(list->range[r].lba),
1317 				    /*num_blocks*/ len,
1318 				    /*tag_type*/ CTL_TAG_SIMPLE,
1319 				    /*control*/ 0);
1320 		tiow->io->io_hdr.retries = 3;
1321 		tiow->lun = list->lun->lun;
1322 		tiow->io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = tiow;
1323 
1324 		TAILQ_INSERT_TAIL(prun, tiow, rlinks);
1325 		prun = &tiow->run;
1326 		list->segsectors += len;
1327 	}
1328 	list->segbytes = list->segsectors * dstblock;
1329 
1330 	if (TAILQ_EMPTY(&run))
1331 		goto complete;
1332 
1333 	while ((tiow = TAILQ_FIRST(&run)) != NULL) {
1334 		TAILQ_REMOVE(&run, tiow, rlinks);
1335 		if (tpcl_queue(tiow->io, tiow->lun) != CTL_RETVAL_COMPLETE)
1336 			panic("tpcl_queue() error");
1337 	}
1338 
1339 	list->stage++;
1340 	return (CTL_RETVAL_QUEUED);
1341 }
1342 
1343 static void
1344 tpc_process(struct tpc_list *list)
1345 {
1346 	struct ctl_lun *lun = list->lun;
1347 	struct ctl_softc *softc = lun->ctl_softc;
1348 	struct scsi_ec_segment *seg;
1349 	struct ctl_scsiio *ctsio = list->ctsio;
1350 	int retval = CTL_RETVAL_COMPLETE;
1351 
1352 	if (list->service_action == EC_WUT) {
1353 		if (list->token != NULL)
1354 			retval = tpc_process_wut(list);
1355 		else
1356 			retval = tpc_process_zero_wut(list);
1357 		if (retval == CTL_RETVAL_QUEUED)
1358 			return;
1359 		if (retval == CTL_RETVAL_ERROR) {
1360 			list->error = 1;
1361 			goto done;
1362 		}
1363 	} else {
1364 //printf("ZZZ %d cscd, %d segs\n", list->ncscd, list->nseg);
1365 		while (list->curseg < list->nseg) {
1366 			seg = list->seg[list->curseg];
1367 			switch (seg->type_code) {
1368 			case EC_SEG_B2B:
1369 				retval = tpc_process_b2b(list);
1370 				break;
1371 			case EC_SEG_VERIFY:
1372 				retval = tpc_process_verify(list);
1373 				break;
1374 			case EC_SEG_REGISTER_KEY:
1375 				retval = tpc_process_register_key(list);
1376 				break;
1377 			default:
1378 				ctl_set_sense(ctsio, /*current_error*/ 1,
1379 				    /*sense_key*/ SSD_KEY_COPY_ABORTED,
1380 				    /*asc*/ 0x26, /*ascq*/ 0x09, SSD_ELEM_NONE);
1381 				goto done;
1382 			}
1383 			if (retval == CTL_RETVAL_QUEUED)
1384 				return;
1385 			if (retval == CTL_RETVAL_ERROR) {
1386 				list->error = 1;
1387 				goto done;
1388 			}
1389 			list->curseg++;
1390 			list->stage = 0;
1391 		}
1392 	}
1393 
1394 	ctl_set_success(ctsio);
1395 
1396 done:
1397 //printf("ZZZ done\n");
1398 	free(list->params, M_CTL);
1399 	list->params = NULL;
1400 	if (list->token) {
1401 		mtx_lock(&softc->tpc_lock);
1402 		if (--list->token->active == 0)
1403 			list->token->last_active = time_uptime;
1404 		mtx_unlock(&softc->tpc_lock);
1405 		list->token = NULL;
1406 	}
1407 	mtx_lock(&lun->lun_lock);
1408 	if ((list->flags & EC_LIST_ID_USAGE_MASK) == EC_LIST_ID_USAGE_NONE) {
1409 		TAILQ_REMOVE(&lun->tpc_lists, list, links);
1410 		free(list, M_CTL);
1411 	} else {
1412 		list->completed = 1;
1413 		list->last_active = time_uptime;
1414 		list->sense_data = ctsio->sense_data;
1415 		list->sense_len = ctsio->sense_len;
1416 		list->scsi_status = ctsio->scsi_status;
1417 	}
1418 	mtx_unlock(&lun->lun_lock);
1419 
1420 	ctl_done((union ctl_io *)ctsio);
1421 }
1422 
1423 /*
1424  * For any sort of check condition, busy, etc., we just retry.  We do not
1425  * decrement the retry count for unit attention type errors.  These are
1426  * normal, and we want to save the retry count for "real" errors.  Otherwise,
1427  * we could end up with situations where a command will succeed in some
1428  * situations and fail in others, depending on whether a unit attention is
1429  * pending.  Also, some of our error recovery actions, most notably the
1430  * LUN reset action, will cause a unit attention.
1431  *
1432  * We can add more detail here later if necessary.
1433  */
1434 static tpc_error_action
1435 tpc_checkcond_parse(union ctl_io *io)
1436 {
1437 	tpc_error_action error_action;
1438 	int error_code, sense_key, asc, ascq;
1439 
1440 	/*
1441 	 * Default to retrying the command.
1442 	 */
1443 	error_action = TPC_ERR_RETRY;
1444 
1445 	scsi_extract_sense_len(&io->scsiio.sense_data,
1446 			       io->scsiio.sense_len,
1447 			       &error_code,
1448 			       &sense_key,
1449 			       &asc,
1450 			       &ascq,
1451 			       /*show_errors*/ 1);
1452 
1453 	switch (error_code) {
1454 	case SSD_DEFERRED_ERROR:
1455 	case SSD_DESC_DEFERRED_ERROR:
1456 		error_action |= TPC_ERR_NO_DECREMENT;
1457 		break;
1458 	case SSD_CURRENT_ERROR:
1459 	case SSD_DESC_CURRENT_ERROR:
1460 	default:
1461 		switch (sense_key) {
1462 		case SSD_KEY_UNIT_ATTENTION:
1463 			error_action |= TPC_ERR_NO_DECREMENT;
1464 			break;
1465 		case SSD_KEY_HARDWARE_ERROR:
1466 			/*
1467 			 * This is our generic "something bad happened"
1468 			 * error code.  It often isn't recoverable.
1469 			 */
1470 			if ((asc == 0x44) && (ascq == 0x00))
1471 				error_action = TPC_ERR_FAIL;
1472 			break;
1473 		case SSD_KEY_NOT_READY:
1474 			/*
1475 			 * If the LUN is powered down, there likely isn't
1476 			 * much point in retrying right now.
1477 			 */
1478 			if ((asc == 0x04) && (ascq == 0x02))
1479 				error_action = TPC_ERR_FAIL;
1480 			/*
1481 			 * If the LUN is offline, there probably isn't much
1482 			 * point in retrying, either.
1483 			 */
1484 			if ((asc == 0x04) && (ascq == 0x03))
1485 				error_action = TPC_ERR_FAIL;
1486 			break;
1487 		}
1488 	}
1489 	return (error_action);
1490 }
1491 
1492 static tpc_error_action
1493 tpc_error_parse(union ctl_io *io)
1494 {
1495 	tpc_error_action error_action = TPC_ERR_RETRY;
1496 
1497 	switch (io->io_hdr.io_type) {
1498 	case CTL_IO_SCSI:
1499 		switch (io->io_hdr.status & CTL_STATUS_MASK) {
1500 		case CTL_SCSI_ERROR:
1501 			switch (io->scsiio.scsi_status) {
1502 			case SCSI_STATUS_CHECK_COND:
1503 				error_action = tpc_checkcond_parse(io);
1504 				break;
1505 			default:
1506 				break;
1507 			}
1508 			break;
1509 		default:
1510 			break;
1511 		}
1512 		break;
1513 	case CTL_IO_TASK:
1514 		break;
1515 	default:
1516 		panic("%s: invalid ctl_io type %d\n", __func__,
1517 		      io->io_hdr.io_type);
1518 		break;
1519 	}
1520 	return (error_action);
1521 }
1522 
1523 void
1524 tpc_done(union ctl_io *io)
1525 {
1526 	struct tpc_io *tio, *tior;
1527 
1528 	/*
1529 	 * Very minimal retry logic.  We basically retry if we got an error
1530 	 * back, and the retry count is greater than 0.  If we ever want
1531 	 * more sophisticated initiator type behavior, the CAM error
1532 	 * recovery code in ../common might be helpful.
1533 	 */
1534 	tio = io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr;
1535 	if (((io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS)
1536 	 && (io->io_hdr.retries > 0)) {
1537 		ctl_io_status old_status;
1538 		tpc_error_action error_action;
1539 
1540 		error_action = tpc_error_parse(io);
1541 		switch (error_action & TPC_ERR_MASK) {
1542 		case TPC_ERR_FAIL:
1543 			break;
1544 		case TPC_ERR_RETRY:
1545 		default:
1546 			if ((error_action & TPC_ERR_NO_DECREMENT) == 0)
1547 				io->io_hdr.retries--;
1548 			old_status = io->io_hdr.status;
1549 			io->io_hdr.status = CTL_STATUS_NONE;
1550 			io->io_hdr.flags &= ~CTL_FLAG_ABORT;
1551 			io->io_hdr.flags &= ~CTL_FLAG_SENT_2OTHER_SC;
1552 			if (tpcl_queue(io, tio->lun) != CTL_RETVAL_COMPLETE) {
1553 				printf("%s: error returned from ctl_queue()!\n",
1554 				       __func__);
1555 				io->io_hdr.status = old_status;
1556 			} else
1557 				return;
1558 		}
1559 	}
1560 
1561 	if ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS)
1562 		tio->list->error = 1;
1563 	else
1564 		atomic_add_int(&tio->list->curops, 1);
1565 	if (!tio->list->error && !tio->list->abort) {
1566 		while ((tior = TAILQ_FIRST(&tio->run)) != NULL) {
1567 			TAILQ_REMOVE(&tio->run, tior, rlinks);
1568 			atomic_add_int(&tio->list->tbdio, 1);
1569 			if (tpcl_queue(tior->io, tior->lun) != CTL_RETVAL_COMPLETE)
1570 				panic("tpcl_queue() error");
1571 		}
1572 	}
1573 	if (atomic_fetchadd_int(&tio->list->tbdio, -1) == 1)
1574 		tpc_process(tio->list);
1575 }
1576 
1577 int
1578 ctl_extended_copy_lid1(struct ctl_scsiio *ctsio)
1579 {
1580 	struct scsi_extended_copy *cdb;
1581 	struct scsi_extended_copy_lid1_data *data;
1582 	struct ctl_lun *lun;
1583 	struct tpc_list *list, *tlist;
1584 	uint8_t *ptr;
1585 	char *value;
1586 	int len, off, lencscd, lenseg, leninl, nseg;
1587 
1588 	CTL_DEBUG_PRINT(("ctl_extended_copy_lid1\n"));
1589 
1590 	lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
1591 	cdb = (struct scsi_extended_copy *)ctsio->cdb;
1592 	len = scsi_4btoul(cdb->length);
1593 
1594 	if (len < sizeof(struct scsi_extended_copy_lid1_data) ||
1595 	    len > sizeof(struct scsi_extended_copy_lid1_data) +
1596 	    TPC_MAX_LIST + TPC_MAX_INLINE) {
1597 		ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 1,
1598 		    /*field*/ 9, /*bit_valid*/ 0, /*bit*/ 0);
1599 		goto done;
1600 	}
1601 
1602 	/*
1603 	 * If we've got a kernel request that hasn't been malloced yet,
1604 	 * malloc it and tell the caller the data buffer is here.
1605 	 */
1606 	if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) {
1607 		ctsio->kern_data_ptr = malloc(len, M_CTL, M_WAITOK);
1608 		ctsio->kern_data_len = len;
1609 		ctsio->kern_total_len = len;
1610 		ctsio->kern_data_resid = 0;
1611 		ctsio->kern_rel_offset = 0;
1612 		ctsio->kern_sg_entries = 0;
1613 		ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
1614 		ctsio->be_move_done = ctl_config_move_done;
1615 		ctl_datamove((union ctl_io *)ctsio);
1616 
1617 		return (CTL_RETVAL_COMPLETE);
1618 	}
1619 
1620 	data = (struct scsi_extended_copy_lid1_data *)ctsio->kern_data_ptr;
1621 	lencscd = scsi_2btoul(data->cscd_list_length);
1622 	lenseg = scsi_4btoul(data->segment_list_length);
1623 	leninl = scsi_4btoul(data->inline_data_length);
1624 	if (len < sizeof(struct scsi_extended_copy_lid1_data) +
1625 	    lencscd + lenseg + leninl ||
1626 	    leninl > TPC_MAX_INLINE) {
1627 		ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 0,
1628 		    /*field*/ 2, /*bit_valid*/ 0, /*bit*/ 0);
1629 		goto done;
1630 	}
1631 	if (lencscd > TPC_MAX_CSCDS * sizeof(struct scsi_ec_cscd)) {
1632 		ctl_set_sense(ctsio, /*current_error*/ 1,
1633 		    /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST,
1634 		    /*asc*/ 0x26, /*ascq*/ 0x06, SSD_ELEM_NONE);
1635 		goto done;
1636 	}
1637 	if (lencscd + lenseg > TPC_MAX_LIST) {
1638 		ctl_set_param_len_error(ctsio);
1639 		goto done;
1640 	}
1641 
1642 	list = malloc(sizeof(struct tpc_list), M_CTL, M_WAITOK | M_ZERO);
1643 	list->service_action = cdb->service_action;
1644 	value = ctl_get_opt(&lun->be_lun->options, "insecure_tpc");
1645 	if (value != NULL && strcmp(value, "on") == 0)
1646 		list->init_port = -1;
1647 	else
1648 		list->init_port = ctsio->io_hdr.nexus.targ_port;
1649 	list->init_idx = ctl_get_resindex(&ctsio->io_hdr.nexus);
1650 	list->list_id = data->list_identifier;
1651 	list->flags = data->flags;
1652 	list->params = ctsio->kern_data_ptr;
1653 	list->cscd = (struct scsi_ec_cscd *)&data->data[0];
1654 	ptr = &data->data[lencscd];
1655 	for (nseg = 0, off = 0; off < lenseg; nseg++) {
1656 		if (nseg >= TPC_MAX_SEGS) {
1657 			free(list, M_CTL);
1658 			ctl_set_sense(ctsio, /*current_error*/ 1,
1659 			    /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST,
1660 			    /*asc*/ 0x26, /*ascq*/ 0x08, SSD_ELEM_NONE);
1661 			goto done;
1662 		}
1663 		list->seg[nseg] = (struct scsi_ec_segment *)(ptr + off);
1664 		off += sizeof(struct scsi_ec_segment) +
1665 		    scsi_2btoul(list->seg[nseg]->descr_length);
1666 	}
1667 	list->inl = &data->data[lencscd + lenseg];
1668 	list->ncscd = lencscd / sizeof(struct scsi_ec_cscd);
1669 	list->nseg = nseg;
1670 	list->leninl = leninl;
1671 	list->ctsio = ctsio;
1672 	list->lun = lun;
1673 	mtx_lock(&lun->lun_lock);
1674 	if ((list->flags & EC_LIST_ID_USAGE_MASK) != EC_LIST_ID_USAGE_NONE) {
1675 		tlist = tpc_find_list(lun, list->list_id, list->init_idx);
1676 		if (tlist != NULL && !tlist->completed) {
1677 			mtx_unlock(&lun->lun_lock);
1678 			free(list, M_CTL);
1679 			ctl_set_invalid_field(ctsio, /*sks_valid*/ 1,
1680 			    /*command*/ 0, /*field*/ 0, /*bit_valid*/ 0,
1681 			    /*bit*/ 0);
1682 			goto done;
1683 		}
1684 		if (tlist != NULL) {
1685 			TAILQ_REMOVE(&lun->tpc_lists, tlist, links);
1686 			free(tlist, M_CTL);
1687 		}
1688 	}
1689 	TAILQ_INSERT_TAIL(&lun->tpc_lists, list, links);
1690 	mtx_unlock(&lun->lun_lock);
1691 
1692 	tpc_process(list);
1693 	return (CTL_RETVAL_COMPLETE);
1694 
1695 done:
1696 	if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) {
1697 		free(ctsio->kern_data_ptr, M_CTL);
1698 		ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED;
1699 	}
1700 	ctl_done((union ctl_io *)ctsio);
1701 	return (CTL_RETVAL_COMPLETE);
1702 }
1703 
1704 int
1705 ctl_extended_copy_lid4(struct ctl_scsiio *ctsio)
1706 {
1707 	struct scsi_extended_copy *cdb;
1708 	struct scsi_extended_copy_lid4_data *data;
1709 	struct ctl_lun *lun;
1710 	struct tpc_list *list, *tlist;
1711 	uint8_t *ptr;
1712 	char *value;
1713 	int len, off, lencscd, lenseg, leninl, nseg;
1714 
1715 	CTL_DEBUG_PRINT(("ctl_extended_copy_lid4\n"));
1716 
1717 	lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
1718 	cdb = (struct scsi_extended_copy *)ctsio->cdb;
1719 	len = scsi_4btoul(cdb->length);
1720 
1721 	if (len < sizeof(struct scsi_extended_copy_lid4_data) ||
1722 	    len > sizeof(struct scsi_extended_copy_lid4_data) +
1723 	    TPC_MAX_LIST + TPC_MAX_INLINE) {
1724 		ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 1,
1725 		    /*field*/ 9, /*bit_valid*/ 0, /*bit*/ 0);
1726 		goto done;
1727 	}
1728 
1729 	/*
1730 	 * If we've got a kernel request that hasn't been malloced yet,
1731 	 * malloc it and tell the caller the data buffer is here.
1732 	 */
1733 	if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) {
1734 		ctsio->kern_data_ptr = malloc(len, M_CTL, M_WAITOK);
1735 		ctsio->kern_data_len = len;
1736 		ctsio->kern_total_len = len;
1737 		ctsio->kern_data_resid = 0;
1738 		ctsio->kern_rel_offset = 0;
1739 		ctsio->kern_sg_entries = 0;
1740 		ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
1741 		ctsio->be_move_done = ctl_config_move_done;
1742 		ctl_datamove((union ctl_io *)ctsio);
1743 
1744 		return (CTL_RETVAL_COMPLETE);
1745 	}
1746 
1747 	data = (struct scsi_extended_copy_lid4_data *)ctsio->kern_data_ptr;
1748 	lencscd = scsi_2btoul(data->cscd_list_length);
1749 	lenseg = scsi_2btoul(data->segment_list_length);
1750 	leninl = scsi_2btoul(data->inline_data_length);
1751 	if (len < sizeof(struct scsi_extended_copy_lid4_data) +
1752 	    lencscd + lenseg + leninl ||
1753 	    leninl > TPC_MAX_INLINE) {
1754 		ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 0,
1755 		    /*field*/ 2, /*bit_valid*/ 0, /*bit*/ 0);
1756 		goto done;
1757 	}
1758 	if (lencscd > TPC_MAX_CSCDS * sizeof(struct scsi_ec_cscd)) {
1759 		ctl_set_sense(ctsio, /*current_error*/ 1,
1760 		    /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST,
1761 		    /*asc*/ 0x26, /*ascq*/ 0x06, SSD_ELEM_NONE);
1762 		goto done;
1763 	}
1764 	if (lencscd + lenseg > TPC_MAX_LIST) {
1765 		ctl_set_param_len_error(ctsio);
1766 		goto done;
1767 	}
1768 
1769 	list = malloc(sizeof(struct tpc_list), M_CTL, M_WAITOK | M_ZERO);
1770 	list->service_action = cdb->service_action;
1771 	value = ctl_get_opt(&lun->be_lun->options, "insecure_tpc");
1772 	if (value != NULL && strcmp(value, "on") == 0)
1773 		list->init_port = -1;
1774 	else
1775 		list->init_port = ctsio->io_hdr.nexus.targ_port;
1776 	list->init_idx = ctl_get_resindex(&ctsio->io_hdr.nexus);
1777 	list->list_id = scsi_4btoul(data->list_identifier);
1778 	list->flags = data->flags;
1779 	list->params = ctsio->kern_data_ptr;
1780 	list->cscd = (struct scsi_ec_cscd *)&data->data[0];
1781 	ptr = &data->data[lencscd];
1782 	for (nseg = 0, off = 0; off < lenseg; nseg++) {
1783 		if (nseg >= TPC_MAX_SEGS) {
1784 			free(list, M_CTL);
1785 			ctl_set_sense(ctsio, /*current_error*/ 1,
1786 			    /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST,
1787 			    /*asc*/ 0x26, /*ascq*/ 0x08, SSD_ELEM_NONE);
1788 			goto done;
1789 		}
1790 		list->seg[nseg] = (struct scsi_ec_segment *)(ptr + off);
1791 		off += sizeof(struct scsi_ec_segment) +
1792 		    scsi_2btoul(list->seg[nseg]->descr_length);
1793 	}
1794 	list->inl = &data->data[lencscd + lenseg];
1795 	list->ncscd = lencscd / sizeof(struct scsi_ec_cscd);
1796 	list->nseg = nseg;
1797 	list->leninl = leninl;
1798 	list->ctsio = ctsio;
1799 	list->lun = lun;
1800 	mtx_lock(&lun->lun_lock);
1801 	if ((list->flags & EC_LIST_ID_USAGE_MASK) != EC_LIST_ID_USAGE_NONE) {
1802 		tlist = tpc_find_list(lun, list->list_id, list->init_idx);
1803 		if (tlist != NULL && !tlist->completed) {
1804 			mtx_unlock(&lun->lun_lock);
1805 			free(list, M_CTL);
1806 			ctl_set_invalid_field(ctsio, /*sks_valid*/ 1,
1807 			    /*command*/ 0, /*field*/ 0, /*bit_valid*/ 0,
1808 			    /*bit*/ 0);
1809 			goto done;
1810 		}
1811 		if (tlist != NULL) {
1812 			TAILQ_REMOVE(&lun->tpc_lists, tlist, links);
1813 			free(tlist, M_CTL);
1814 		}
1815 	}
1816 	TAILQ_INSERT_TAIL(&lun->tpc_lists, list, links);
1817 	mtx_unlock(&lun->lun_lock);
1818 
1819 	tpc_process(list);
1820 	return (CTL_RETVAL_COMPLETE);
1821 
1822 done:
1823 	if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) {
1824 		free(ctsio->kern_data_ptr, M_CTL);
1825 		ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED;
1826 	}
1827 	ctl_done((union ctl_io *)ctsio);
1828 	return (CTL_RETVAL_COMPLETE);
1829 }
1830 
1831 static void
1832 tpc_create_token(struct ctl_lun *lun, struct ctl_port *port, off_t len,
1833     struct scsi_token *token)
1834 {
1835 	static int id = 0;
1836 	struct scsi_vpd_id_descriptor *idd = NULL;
1837 	struct scsi_ec_cscd_id *cscd;
1838 	struct scsi_read_capacity_data_long *dtsd;
1839 	int targid_len;
1840 
1841 	scsi_ulto4b(ROD_TYPE_AUR, token->type);
1842 	scsi_ulto2b(0x01f8, token->length);
1843 	scsi_u64to8b(atomic_fetchadd_int(&id, 1), &token->body[0]);
1844 	if (lun->lun_devid)
1845 		idd = scsi_get_devid_desc((struct scsi_vpd_id_descriptor *)
1846 		    lun->lun_devid->data, lun->lun_devid->len,
1847 		    scsi_devid_is_lun_naa);
1848 	if (idd == NULL && lun->lun_devid)
1849 		idd = scsi_get_devid_desc((struct scsi_vpd_id_descriptor *)
1850 		    lun->lun_devid->data, lun->lun_devid->len,
1851 		    scsi_devid_is_lun_eui64);
1852 	if (idd != NULL) {
1853 		cscd = (struct scsi_ec_cscd_id *)&token->body[8];
1854 		cscd->type_code = EC_CSCD_ID;
1855 		cscd->luidt_pdt = T_DIRECT;
1856 		memcpy(&cscd->codeset, idd, 4 + idd->length);
1857 		scsi_ulto3b(lun->be_lun->blocksize, cscd->dtsp.block_length);
1858 	}
1859 	scsi_u64to8b(0, &token->body[40]); /* XXX: Should be 128bit value. */
1860 	scsi_u64to8b(len, &token->body[48]);
1861 
1862 	/* ROD token device type specific data (RC16 without first field) */
1863 	dtsd = (struct scsi_read_capacity_data_long *)&token->body[88 - 8];
1864 	scsi_ulto4b(lun->be_lun->blocksize, dtsd->length);
1865 	dtsd->prot_lbppbe = lun->be_lun->pblockexp & SRC16_LBPPBE;
1866 	scsi_ulto2b(lun->be_lun->pblockoff & SRC16_LALBA_A, dtsd->lalba_lbp);
1867 	if (lun->be_lun->flags & CTL_LUN_FLAG_UNMAP)
1868 		dtsd->lalba_lbp[0] |= SRC16_LBPME | SRC16_LBPRZ;
1869 
1870 	if (port->target_devid) {
1871 		targid_len = port->target_devid->len;
1872 		memcpy(&token->body[120], port->target_devid->data, targid_len);
1873 	} else
1874 		targid_len = 32;
1875 	arc4rand(&token->body[120 + targid_len], 384 - targid_len, 0);
1876 };
1877 
1878 int
1879 ctl_populate_token(struct ctl_scsiio *ctsio)
1880 {
1881 	struct scsi_populate_token *cdb;
1882 	struct scsi_populate_token_data *data;
1883 	struct ctl_softc *softc;
1884 	struct ctl_lun *lun;
1885 	struct ctl_port *port;
1886 	struct tpc_list *list, *tlist;
1887 	struct tpc_token *token;
1888 	int len, lendesc;
1889 
1890 	CTL_DEBUG_PRINT(("ctl_populate_token\n"));
1891 
1892 	lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
1893 	softc = lun->ctl_softc;
1894 	port = softc->ctl_ports[ctl_port_idx(ctsio->io_hdr.nexus.targ_port)];
1895 	cdb = (struct scsi_populate_token *)ctsio->cdb;
1896 	len = scsi_4btoul(cdb->length);
1897 
1898 	if (len < sizeof(struct scsi_populate_token_data) ||
1899 	    len > sizeof(struct scsi_populate_token_data) +
1900 	     TPC_MAX_SEGS * sizeof(struct scsi_range_desc)) {
1901 		ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 1,
1902 		    /*field*/ 9, /*bit_valid*/ 0, /*bit*/ 0);
1903 		goto done;
1904 	}
1905 
1906 	/*
1907 	 * If we've got a kernel request that hasn't been malloced yet,
1908 	 * malloc it and tell the caller the data buffer is here.
1909 	 */
1910 	if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) {
1911 		ctsio->kern_data_ptr = malloc(len, M_CTL, M_WAITOK);
1912 		ctsio->kern_data_len = len;
1913 		ctsio->kern_total_len = len;
1914 		ctsio->kern_data_resid = 0;
1915 		ctsio->kern_rel_offset = 0;
1916 		ctsio->kern_sg_entries = 0;
1917 		ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
1918 		ctsio->be_move_done = ctl_config_move_done;
1919 		ctl_datamove((union ctl_io *)ctsio);
1920 
1921 		return (CTL_RETVAL_COMPLETE);
1922 	}
1923 
1924 	data = (struct scsi_populate_token_data *)ctsio->kern_data_ptr;
1925 	lendesc = scsi_2btoul(data->range_descriptor_length);
1926 	if (len < sizeof(struct scsi_populate_token_data) + lendesc) {
1927 		ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 0,
1928 		    /*field*/ 2, /*bit_valid*/ 0, /*bit*/ 0);
1929 		goto done;
1930 	}
1931 /*
1932 	printf("PT(list=%u) flags=%x to=%d rt=%x len=%x\n",
1933 	    scsi_4btoul(cdb->list_identifier),
1934 	    data->flags, scsi_4btoul(data->inactivity_timeout),
1935 	    scsi_4btoul(data->rod_type),
1936 	    scsi_2btoul(data->range_descriptor_length));
1937 */
1938 	if ((data->flags & EC_PT_RTV) &&
1939 	    scsi_4btoul(data->rod_type) != ROD_TYPE_AUR) {
1940 		ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 0,
1941 		    /*field*/ 8, /*bit_valid*/ 0, /*bit*/ 0);
1942 		goto done;
1943 	}
1944 
1945 	list = malloc(sizeof(struct tpc_list), M_CTL, M_WAITOK | M_ZERO);
1946 	list->service_action = cdb->service_action;
1947 	list->init_port = ctsio->io_hdr.nexus.targ_port;
1948 	list->init_idx = ctl_get_resindex(&ctsio->io_hdr.nexus);
1949 	list->list_id = scsi_4btoul(cdb->list_identifier);
1950 	list->flags = data->flags;
1951 	list->ctsio = ctsio;
1952 	list->lun = lun;
1953 	mtx_lock(&lun->lun_lock);
1954 	tlist = tpc_find_list(lun, list->list_id, list->init_idx);
1955 	if (tlist != NULL && !tlist->completed) {
1956 		mtx_unlock(&lun->lun_lock);
1957 		free(list, M_CTL);
1958 		ctl_set_invalid_field(ctsio, /*sks_valid*/ 1,
1959 		    /*command*/ 0, /*field*/ 0, /*bit_valid*/ 0,
1960 		    /*bit*/ 0);
1961 		goto done;
1962 	}
1963 	if (tlist != NULL) {
1964 		TAILQ_REMOVE(&lun->tpc_lists, tlist, links);
1965 		free(tlist, M_CTL);
1966 	}
1967 	TAILQ_INSERT_TAIL(&lun->tpc_lists, list, links);
1968 	mtx_unlock(&lun->lun_lock);
1969 
1970 	token = malloc(sizeof(*token), M_CTL, M_WAITOK | M_ZERO);
1971 	token->lun = lun->lun;
1972 	token->blocksize = lun->be_lun->blocksize;
1973 	token->params = ctsio->kern_data_ptr;
1974 	token->range = &data->desc[0];
1975 	token->nrange = scsi_2btoul(data->range_descriptor_length) /
1976 	    sizeof(struct scsi_range_desc);
1977 	list->cursectors = tpc_ranges_length(token->range, token->nrange);
1978 	list->curbytes = (off_t)list->cursectors * lun->be_lun->blocksize;
1979 	tpc_create_token(lun, port, list->curbytes,
1980 	    (struct scsi_token *)token->token);
1981 	token->active = 0;
1982 	token->last_active = time_uptime;
1983 	token->timeout = scsi_4btoul(data->inactivity_timeout);
1984 	if (token->timeout == 0)
1985 		token->timeout = TPC_DFL_TOKEN_TIMEOUT;
1986 	else if (token->timeout < TPC_MIN_TOKEN_TIMEOUT)
1987 		token->timeout = TPC_MIN_TOKEN_TIMEOUT;
1988 	else if (token->timeout > TPC_MAX_TOKEN_TIMEOUT) {
1989 		ctl_set_invalid_field(ctsio, /*sks_valid*/ 1,
1990 		    /*command*/ 0, /*field*/ 4, /*bit_valid*/ 0,
1991 		    /*bit*/ 0);
1992 	}
1993 	memcpy(list->res_token, token->token, sizeof(list->res_token));
1994 	list->res_token_valid = 1;
1995 	list->curseg = 0;
1996 	list->completed = 1;
1997 	list->last_active = time_uptime;
1998 	mtx_lock(&softc->tpc_lock);
1999 	TAILQ_INSERT_TAIL(&softc->tpc_tokens, token, links);
2000 	mtx_unlock(&softc->tpc_lock);
2001 	ctl_set_success(ctsio);
2002 	ctl_done((union ctl_io *)ctsio);
2003 	return (CTL_RETVAL_COMPLETE);
2004 
2005 done:
2006 	if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) {
2007 		free(ctsio->kern_data_ptr, M_CTL);
2008 		ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED;
2009 	}
2010 	ctl_done((union ctl_io *)ctsio);
2011 	return (CTL_RETVAL_COMPLETE);
2012 }
2013 
2014 int
2015 ctl_write_using_token(struct ctl_scsiio *ctsio)
2016 {
2017 	struct scsi_write_using_token *cdb;
2018 	struct scsi_write_using_token_data *data;
2019 	struct ctl_softc *softc;
2020 	struct ctl_lun *lun;
2021 	struct tpc_list *list, *tlist;
2022 	struct tpc_token *token;
2023 	int len, lendesc;
2024 
2025 	CTL_DEBUG_PRINT(("ctl_write_using_token\n"));
2026 
2027 	lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
2028 	softc = lun->ctl_softc;
2029 	cdb = (struct scsi_write_using_token *)ctsio->cdb;
2030 	len = scsi_4btoul(cdb->length);
2031 
2032 	if (len < sizeof(struct scsi_populate_token_data) ||
2033 	    len > sizeof(struct scsi_populate_token_data) +
2034 	     TPC_MAX_SEGS * sizeof(struct scsi_range_desc)) {
2035 		ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 1,
2036 		    /*field*/ 9, /*bit_valid*/ 0, /*bit*/ 0);
2037 		goto done;
2038 	}
2039 
2040 	/*
2041 	 * If we've got a kernel request that hasn't been malloced yet,
2042 	 * malloc it and tell the caller the data buffer is here.
2043 	 */
2044 	if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) {
2045 		ctsio->kern_data_ptr = malloc(len, M_CTL, M_WAITOK);
2046 		ctsio->kern_data_len = len;
2047 		ctsio->kern_total_len = len;
2048 		ctsio->kern_data_resid = 0;
2049 		ctsio->kern_rel_offset = 0;
2050 		ctsio->kern_sg_entries = 0;
2051 		ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
2052 		ctsio->be_move_done = ctl_config_move_done;
2053 		ctl_datamove((union ctl_io *)ctsio);
2054 
2055 		return (CTL_RETVAL_COMPLETE);
2056 	}
2057 
2058 	data = (struct scsi_write_using_token_data *)ctsio->kern_data_ptr;
2059 	lendesc = scsi_2btoul(data->range_descriptor_length);
2060 	if (len < sizeof(struct scsi_populate_token_data) + lendesc) {
2061 		ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 0,
2062 		    /*field*/ 2, /*bit_valid*/ 0, /*bit*/ 0);
2063 		goto done;
2064 	}
2065 /*
2066 	printf("WUT(list=%u) flags=%x off=%ju len=%x\n",
2067 	    scsi_4btoul(cdb->list_identifier),
2068 	    data->flags, scsi_8btou64(data->offset_into_rod),
2069 	    scsi_2btoul(data->range_descriptor_length));
2070 */
2071 	list = malloc(sizeof(struct tpc_list), M_CTL, M_WAITOK | M_ZERO);
2072 	list->service_action = cdb->service_action;
2073 	list->init_port = ctsio->io_hdr.nexus.targ_port;
2074 	list->init_idx = ctl_get_resindex(&ctsio->io_hdr.nexus);
2075 	list->list_id = scsi_4btoul(cdb->list_identifier);
2076 	list->flags = data->flags;
2077 	list->params = ctsio->kern_data_ptr;
2078 	list->range = &data->desc[0];
2079 	list->nrange = scsi_2btoul(data->range_descriptor_length) /
2080 	    sizeof(struct scsi_range_desc);
2081 	list->offset_into_rod = scsi_8btou64(data->offset_into_rod);
2082 	list->ctsio = ctsio;
2083 	list->lun = lun;
2084 	mtx_lock(&lun->lun_lock);
2085 	tlist = tpc_find_list(lun, list->list_id, list->init_idx);
2086 	if (tlist != NULL && !tlist->completed) {
2087 		mtx_unlock(&lun->lun_lock);
2088 		free(list, M_CTL);
2089 		ctl_set_invalid_field(ctsio, /*sks_valid*/ 1,
2090 		    /*command*/ 0, /*field*/ 0, /*bit_valid*/ 0,
2091 		    /*bit*/ 0);
2092 		goto done;
2093 	}
2094 	if (tlist != NULL) {
2095 		TAILQ_REMOVE(&lun->tpc_lists, tlist, links);
2096 		free(tlist, M_CTL);
2097 	}
2098 	TAILQ_INSERT_TAIL(&lun->tpc_lists, list, links);
2099 	mtx_unlock(&lun->lun_lock);
2100 
2101 	/* Block device zero ROD token -> no token. */
2102 	if (scsi_4btoul(data->rod_token) == ROD_TYPE_BLOCK_ZERO) {
2103 		tpc_process(list);
2104 		return (CTL_RETVAL_COMPLETE);
2105 	}
2106 
2107 	mtx_lock(&softc->tpc_lock);
2108 	TAILQ_FOREACH(token, &softc->tpc_tokens, links) {
2109 		if (memcmp(token->token, data->rod_token,
2110 		    sizeof(data->rod_token)) == 0)
2111 			break;
2112 	}
2113 	if (token != NULL) {
2114 		token->active++;
2115 		list->token = token;
2116 		if (data->flags & EC_WUT_DEL_TKN)
2117 			token->timeout = 0;
2118 	}
2119 	mtx_unlock(&softc->tpc_lock);
2120 	if (token == NULL) {
2121 		mtx_lock(&lun->lun_lock);
2122 		TAILQ_REMOVE(&lun->tpc_lists, list, links);
2123 		mtx_unlock(&lun->lun_lock);
2124 		free(list, M_CTL);
2125 		ctl_set_sense(ctsio, /*current_error*/ 1,
2126 		    /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST,
2127 		    /*asc*/ 0x23, /*ascq*/ 0x04, SSD_ELEM_NONE);
2128 		goto done;
2129 	}
2130 
2131 	tpc_process(list);
2132 	return (CTL_RETVAL_COMPLETE);
2133 
2134 done:
2135 	if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) {
2136 		free(ctsio->kern_data_ptr, M_CTL);
2137 		ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED;
2138 	}
2139 	ctl_done((union ctl_io *)ctsio);
2140 	return (CTL_RETVAL_COMPLETE);
2141 }
2142 
2143 int
2144 ctl_receive_rod_token_information(struct ctl_scsiio *ctsio)
2145 {
2146 	struct ctl_lun *lun;
2147 	struct scsi_receive_rod_token_information *cdb;
2148 	struct scsi_receive_copy_status_lid4_data *data;
2149 	struct tpc_list *list;
2150 	struct tpc_list list_copy;
2151 	uint8_t *ptr;
2152 	int retval;
2153 	int alloc_len, total_len, token_len;
2154 	uint32_t list_id;
2155 
2156 	CTL_DEBUG_PRINT(("ctl_receive_rod_token_information\n"));
2157 
2158 	cdb = (struct scsi_receive_rod_token_information *)ctsio->cdb;
2159 	lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
2160 
2161 	retval = CTL_RETVAL_COMPLETE;
2162 
2163 	list_id = scsi_4btoul(cdb->list_identifier);
2164 	mtx_lock(&lun->lun_lock);
2165 	list = tpc_find_list(lun, list_id,
2166 	    ctl_get_resindex(&ctsio->io_hdr.nexus));
2167 	if (list == NULL) {
2168 		mtx_unlock(&lun->lun_lock);
2169 		ctl_set_invalid_field(ctsio, /*sks_valid*/ 1,
2170 		    /*command*/ 1, /*field*/ 2, /*bit_valid*/ 0,
2171 		    /*bit*/ 0);
2172 		ctl_done((union ctl_io *)ctsio);
2173 		return (retval);
2174 	}
2175 	list_copy = *list;
2176 	if (list->completed) {
2177 		TAILQ_REMOVE(&lun->tpc_lists, list, links);
2178 		free(list, M_CTL);
2179 	}
2180 	mtx_unlock(&lun->lun_lock);
2181 
2182 	token_len = list_copy.res_token_valid ? 2 + sizeof(list_copy.res_token) : 0;
2183 	total_len = sizeof(*data) + list_copy.sense_len + 4 + token_len;
2184 	alloc_len = scsi_4btoul(cdb->length);
2185 
2186 	ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO);
2187 
2188 	ctsio->kern_sg_entries = 0;
2189 
2190 	if (total_len < alloc_len) {
2191 		ctsio->residual = alloc_len - total_len;
2192 		ctsio->kern_data_len = total_len;
2193 		ctsio->kern_total_len = total_len;
2194 	} else {
2195 		ctsio->residual = 0;
2196 		ctsio->kern_data_len = alloc_len;
2197 		ctsio->kern_total_len = alloc_len;
2198 	}
2199 	ctsio->kern_data_resid = 0;
2200 	ctsio->kern_rel_offset = 0;
2201 
2202 	data = (struct scsi_receive_copy_status_lid4_data *)ctsio->kern_data_ptr;
2203 	scsi_ulto4b(sizeof(*data) - 4 + list_copy.sense_len +
2204 	    4 + token_len, data->available_data);
2205 	data->response_to_service_action = list_copy.service_action;
2206 	if (list_copy.completed) {
2207 		if (list_copy.error)
2208 			data->copy_command_status = RCS_CCS_ERROR;
2209 		else if (list_copy.abort)
2210 			data->copy_command_status = RCS_CCS_ABORTED;
2211 		else
2212 			data->copy_command_status = RCS_CCS_COMPLETED;
2213 	} else
2214 		data->copy_command_status = RCS_CCS_INPROG_FG;
2215 	scsi_ulto2b(list_copy.curops, data->operation_counter);
2216 	scsi_ulto4b(UINT32_MAX, data->estimated_status_update_delay);
2217 	data->transfer_count_units = RCS_TC_LBAS;
2218 	scsi_u64to8b(list_copy.cursectors, data->transfer_count);
2219 	scsi_ulto2b(list_copy.curseg, data->segments_processed);
2220 	data->length_of_the_sense_data_field = list_copy.sense_len;
2221 	data->sense_data_length = list_copy.sense_len;
2222 	memcpy(data->sense_data, &list_copy.sense_data, list_copy.sense_len);
2223 
2224 	ptr = &data->sense_data[data->length_of_the_sense_data_field];
2225 	scsi_ulto4b(token_len, &ptr[0]);
2226 	if (list_copy.res_token_valid) {
2227 		scsi_ulto2b(0, &ptr[4]);
2228 		memcpy(&ptr[6], list_copy.res_token, sizeof(list_copy.res_token));
2229 	}
2230 /*
2231 	printf("RRTI(list=%u) valid=%d\n",
2232 	    scsi_4btoul(cdb->list_identifier), list_copy.res_token_valid);
2233 */
2234 	ctl_set_success(ctsio);
2235 	ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
2236 	ctsio->be_move_done = ctl_config_move_done;
2237 	ctl_datamove((union ctl_io *)ctsio);
2238 	return (retval);
2239 }
2240 
2241 int
2242 ctl_report_all_rod_tokens(struct ctl_scsiio *ctsio)
2243 {
2244 	struct ctl_softc *softc;
2245 	struct ctl_lun *lun;
2246 	struct scsi_report_all_rod_tokens *cdb;
2247 	struct scsi_report_all_rod_tokens_data *data;
2248 	struct tpc_token *token;
2249 	int retval;
2250 	int alloc_len, total_len, tokens, i;
2251 
2252 	CTL_DEBUG_PRINT(("ctl_receive_rod_token_information\n"));
2253 
2254 	cdb = (struct scsi_report_all_rod_tokens *)ctsio->cdb;
2255 	lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
2256 	softc = lun->ctl_softc;
2257 
2258 	retval = CTL_RETVAL_COMPLETE;
2259 
2260 	tokens = 0;
2261 	mtx_lock(&softc->tpc_lock);
2262 	TAILQ_FOREACH(token, &softc->tpc_tokens, links)
2263 		tokens++;
2264 	mtx_unlock(&softc->tpc_lock);
2265 	if (tokens > 512)
2266 		tokens = 512;
2267 
2268 	total_len = sizeof(*data) + tokens * 96;
2269 	alloc_len = scsi_4btoul(cdb->length);
2270 
2271 	ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO);
2272 
2273 	ctsio->kern_sg_entries = 0;
2274 
2275 	if (total_len < alloc_len) {
2276 		ctsio->residual = alloc_len - total_len;
2277 		ctsio->kern_data_len = total_len;
2278 		ctsio->kern_total_len = total_len;
2279 	} else {
2280 		ctsio->residual = 0;
2281 		ctsio->kern_data_len = alloc_len;
2282 		ctsio->kern_total_len = alloc_len;
2283 	}
2284 	ctsio->kern_data_resid = 0;
2285 	ctsio->kern_rel_offset = 0;
2286 
2287 	data = (struct scsi_report_all_rod_tokens_data *)ctsio->kern_data_ptr;
2288 	i = 0;
2289 	mtx_lock(&softc->tpc_lock);
2290 	TAILQ_FOREACH(token, &softc->tpc_tokens, links) {
2291 		if (i >= tokens)
2292 			break;
2293 		memcpy(&data->rod_management_token_list[i * 96],
2294 		    token->token, 96);
2295 		i++;
2296 	}
2297 	mtx_unlock(&softc->tpc_lock);
2298 	scsi_ulto4b(sizeof(*data) - 4 + i * 96, data->available_data);
2299 /*
2300 	printf("RART tokens=%d\n", i);
2301 */
2302 	ctl_set_success(ctsio);
2303 	ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
2304 	ctsio->be_move_done = ctl_config_move_done;
2305 	ctl_datamove((union ctl_io *)ctsio);
2306 	return (retval);
2307 }
2308 
2309