xref: /openbsd-src/sys/dev/ata/atascsi.c (revision 50b7afb2c2c0993b0894d4e34bf857cb13ed9c80)
1 /*	$OpenBSD: atascsi.c,v 1.119 2014/07/12 18:48:17 tedu Exp $ */
2 
3 /*
4  * Copyright (c) 2007 David Gwynne <dlg@openbsd.org>
5  * Copyright (c) 2010 Conformal Systems LLC <info@conformal.com>
6  * Copyright (c) 2010 Jonathan Matthew <jonathan@d14n.org>
7  *
8  * Permission to use, copy, modify, and distribute this software for any
9  * purpose with or without fee is hereby granted, provided that the above
10  * copyright notice and this permission notice appear in all copies.
11  *
12  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19  */
20 
21 #include <sys/param.h>
22 #include <sys/systm.h>
23 #include <sys/buf.h>
24 #include <sys/kernel.h>
25 #include <sys/malloc.h>
26 #include <sys/device.h>
27 #include <sys/proc.h>
28 #include <sys/queue.h>
29 #include <sys/pool.h>
30 
31 #include <scsi/scsi_all.h>
32 #include <scsi/scsi_disk.h>
33 #include <scsi/scsiconf.h>
34 
35 #include <dev/ata/atascsi.h>
36 #include <dev/ata/pmreg.h>
37 
38 #include <sys/ataio.h>
39 
40 struct atascsi_port;
41 
42 struct atascsi {
43 	struct device		*as_dev;
44 	void			*as_cookie;
45 
46 	struct atascsi_host_port **as_host_ports;
47 
48 	struct atascsi_methods	*as_methods;
49 	struct scsi_adapter	as_switch;
50 	struct scsi_link	as_link;
51 	struct scsibus_softc	*as_scsibus;
52 
53 	int			as_capability;
54 	int			as_ncqdepth;
55 };
56 
57 /*
58  * atascsi_host_port is a port attached to the host controller, and
59  * only holds the details relevant to the host controller.
60  * atascsi_port is any port, including ports on port multipliers, and
61  * it holds details of the device attached to the port.
62  *
63  * When there is a port multiplier attached to a port, the ahp_ports
64  * array in the atascsi_host_port struct contains one atascsi_port for
65  * each port, and one for the control port (port 15).  The index into
66  * the array is the LUN used to address the port.  For the control port,
67  * the LUN is 0, and for the port multiplier ports, the LUN is the
68  * port number plus one.
69  *
70  * When there is no port multiplier attached to a port, the ahp_ports
71  * array contains a single entry for the device.  The LUN and port number
72  * for this entry are both 0.
73  */
74 
75 struct atascsi_host_port {
76 	struct scsi_iopool	ahp_iopool;
77 	struct atascsi		*ahp_as;
78 	int			ahp_port;
79 	int			ahp_nports;
80 
81 	struct atascsi_port	**ahp_ports;
82 };
83 
84 struct atascsi_port {
85 	struct ata_identify	ap_identify;
86 	struct atascsi_host_port *ap_host_port;
87 	struct atascsi		*ap_as;
88 	int			ap_pmp_port;
89 	int			ap_type;
90 	int			ap_ncqdepth;
91 	int			ap_features;
92 #define ATA_PORT_F_NCQ			0x1
93 #define ATA_PORT_F_TRIM			0x2
94 };
95 
96 void		atascsi_cmd(struct scsi_xfer *);
97 int		atascsi_probe(struct scsi_link *);
98 void		atascsi_free(struct scsi_link *);
99 
100 /* template */
101 struct scsi_adapter atascsi_switch = {
102 	atascsi_cmd,		/* scsi_cmd */
103 	scsi_minphys,		/* scsi_minphys */
104 	atascsi_probe,		/* dev_probe */
105 	atascsi_free,		/* dev_free */
106 	NULL,			/* ioctl */
107 };
108 
109 void		ata_swapcopy(void *, void *, size_t);
110 
111 void		atascsi_disk_cmd(struct scsi_xfer *);
112 void		atascsi_disk_cmd_done(struct ata_xfer *);
113 void		atascsi_disk_inq(struct scsi_xfer *);
114 void		atascsi_disk_inquiry(struct scsi_xfer *);
115 void		atascsi_disk_vpd_supported(struct scsi_xfer *);
116 void		atascsi_disk_vpd_serial(struct scsi_xfer *);
117 void		atascsi_disk_vpd_ident(struct scsi_xfer *);
118 void		atascsi_disk_vpd_ata(struct scsi_xfer *);
119 void		atascsi_disk_vpd_limits(struct scsi_xfer *);
120 void		atascsi_disk_vpd_info(struct scsi_xfer *);
121 void		atascsi_disk_vpd_thin(struct scsi_xfer *);
122 void		atascsi_disk_write_same_16(struct scsi_xfer *);
123 void		atascsi_disk_write_same_16_done(struct ata_xfer *);
124 void		atascsi_disk_unmap(struct scsi_xfer *);
125 void		atascsi_disk_unmap_task(void *, void *);
126 void		atascsi_disk_unmap_done(struct ata_xfer *);
127 void		atascsi_disk_capacity(struct scsi_xfer *);
128 void		atascsi_disk_capacity16(struct scsi_xfer *);
129 void		atascsi_disk_sync(struct scsi_xfer *);
130 void		atascsi_disk_sync_done(struct ata_xfer *);
131 void		atascsi_disk_sense(struct scsi_xfer *);
132 void		atascsi_disk_start_stop(struct scsi_xfer *);
133 void		atascsi_disk_start_stop_done(struct ata_xfer *);
134 
135 void		atascsi_atapi_cmd(struct scsi_xfer *);
136 void		atascsi_atapi_cmd_done(struct ata_xfer *);
137 
138 void		atascsi_pmp_cmd(struct scsi_xfer *);
139 void		atascsi_pmp_cmd_done(struct ata_xfer *);
140 void		atascsi_pmp_sense(struct scsi_xfer *xs);
141 void		atascsi_pmp_inq(struct scsi_xfer *xs);
142 
143 
144 void		atascsi_passthru_12(struct scsi_xfer *);
145 void		atascsi_passthru_16(struct scsi_xfer *);
146 int		atascsi_passthru_map(struct scsi_xfer *, u_int8_t, u_int8_t);
147 void		atascsi_passthru_done(struct ata_xfer *);
148 
149 void		atascsi_done(struct scsi_xfer *, int);
150 
151 void		ata_exec(struct atascsi *, struct ata_xfer *);
152 
153 void		ata_polled_complete(struct ata_xfer *);
154 int		ata_polled(struct ata_xfer *);
155 
156 u_int64_t	ata_identify_blocks(struct ata_identify *);
157 u_int		ata_identify_blocksize(struct ata_identify *);
158 u_int		ata_identify_block_l2p_exp(struct ata_identify *);
159 u_int		ata_identify_block_logical_align(struct ata_identify *);
160 
161 void		*atascsi_io_get(void *);
162 void		atascsi_io_put(void *, void *);
163 struct atascsi_port * atascsi_lookup_port(struct scsi_link *);
164 
165 int		atascsi_port_identify(struct atascsi_port *,
166 		    struct ata_identify *);
167 int		atascsi_port_set_features(struct atascsi_port *, int, int);
168 
169 
170 struct atascsi *
171 atascsi_attach(struct device *self, struct atascsi_attach_args *aaa)
172 {
173 	struct scsibus_attach_args	saa;
174 	struct atascsi			*as;
175 
176 	as = malloc(sizeof(*as), M_DEVBUF, M_WAITOK | M_ZERO);
177 
178 	as->as_dev = self;
179 	as->as_cookie = aaa->aaa_cookie;
180 	as->as_methods = aaa->aaa_methods;
181 	as->as_capability = aaa->aaa_capability;
182 	as->as_ncqdepth = aaa->aaa_ncmds;
183 
184 	/* copy from template and modify for ourselves */
185 	as->as_switch = atascsi_switch;
186 	if (aaa->aaa_minphys != NULL)
187 		as->as_switch.scsi_minphys = aaa->aaa_minphys;
188 
189 	/* fill in our scsi_link */
190 	as->as_link.adapter = &as->as_switch;
191 	as->as_link.adapter_softc = as;
192 	as->as_link.adapter_buswidth = aaa->aaa_nports;
193 	as->as_link.luns = SATA_PMP_MAX_PORTS;
194 	as->as_link.adapter_target = aaa->aaa_nports;
195 	as->as_link.openings = 1;
196 
197 	as->as_host_ports = malloc(sizeof(struct atascsi_host_port *) *
198 	    aaa->aaa_nports, M_DEVBUF, M_WAITOK | M_ZERO);
199 
200 	bzero(&saa, sizeof(saa));
201 	saa.saa_sc_link = &as->as_link;
202 
203 	/* stash the scsibus so we can do hotplug on it */
204 	as->as_scsibus = (struct scsibus_softc *)config_found(self, &saa,
205 	    scsiprint);
206 
207 	return (as);
208 }
209 
210 int
211 atascsi_detach(struct atascsi *as, int flags)
212 {
213 	int				rv;
214 
215 	rv = config_detach((struct device *)as->as_scsibus, flags);
216 	if (rv != 0)
217 		return (rv);
218 
219 	free(as->as_host_ports, M_DEVBUF, 0);
220 	free(as, M_DEVBUF, 0);
221 
222 	return (0);
223 }
224 
225 int
226 atascsi_probe_dev(struct atascsi *as, int port, int lun)
227 {
228 	if (lun == 0) {
229 		return (scsi_probe_target(as->as_scsibus, port));
230 	} else {
231 		return (scsi_probe_lun(as->as_scsibus, port, lun));
232 	}
233 }
234 
235 int
236 atascsi_detach_dev(struct atascsi *as, int port, int lun, int flags)
237 {
238 	if (lun == 0) {
239 		return (scsi_detach_target(as->as_scsibus, port, flags));
240 	} else {
241 		return (scsi_detach_lun(as->as_scsibus, port, lun, flags));
242 	}
243 }
244 
245 struct atascsi_port *
246 atascsi_lookup_port(struct scsi_link *link)
247 {
248 	struct atascsi 			*as = link->adapter_softc;
249 	struct atascsi_host_port 	*ahp;
250 
251 	if (link->target >= as->as_link.adapter_buswidth)
252 		return (NULL);
253 
254 	ahp = as->as_host_ports[link->target];
255 	if (link->lun >= ahp->ahp_nports)
256 		return (NULL);
257 
258 	return (ahp->ahp_ports[link->lun]);
259 }
260 
261 int
262 atascsi_probe(struct scsi_link *link)
263 {
264 	struct atascsi			*as = link->adapter_softc;
265 	struct atascsi_host_port 	*ahp;
266 	struct atascsi_port		*ap;
267 	struct ata_xfer			*xa;
268 	struct ata_identify		*identify;
269 	int				port, type, qdepth;
270 	int				rv;
271 	u_int16_t			cmdset;
272 
273 	port = link->target;
274 	if (port >= as->as_link.adapter_buswidth)
275 		return (ENXIO);
276 
277 	/* if this is a PMP port, check it's valid */
278 	if (link->lun > 0) {
279 		if (link->lun >= as->as_host_ports[port]->ahp_nports)
280 			return (ENXIO);
281 	}
282 
283 	type = as->as_methods->probe(as->as_cookie, port, link->lun);
284 	switch (type) {
285 	case ATA_PORT_T_DISK:
286 		break;
287 	case ATA_PORT_T_ATAPI:
288 		link->flags |= SDEV_ATAPI;
289 		link->quirks |= SDEV_ONLYBIG;
290 		break;
291 	case ATA_PORT_T_PM:
292 		if (link->lun != 0) {
293 			printf("%s.%d.%d: Port multipliers cannot be nested\n",
294 			    as->as_dev->dv_xname, port, link->lun);
295 			rv = ENODEV;
296 			goto unsupported;
297 		}
298 		break;
299 	default:
300 		rv = ENODEV;
301 		goto unsupported;
302 	}
303 
304 	ap = malloc(sizeof(*ap), M_DEVBUF, M_WAITOK | M_ZERO);
305 	ap->ap_as = as;
306 
307 	if (link->lun == 0) {
308 		ahp = malloc(sizeof(*ahp), M_DEVBUF, M_WAITOK | M_ZERO);
309 		ahp->ahp_as = as;
310 		ahp->ahp_port = port;
311 
312 		scsi_iopool_init(&ahp->ahp_iopool, ahp, atascsi_io_get,
313 		    atascsi_io_put);
314 
315 		as->as_host_ports[port] = ahp;
316 
317 		if (type == ATA_PORT_T_PM) {
318 			ahp->ahp_nports = SATA_PMP_MAX_PORTS;
319 			ap->ap_pmp_port = SATA_PMP_CONTROL_PORT;
320 		} else {
321 			ahp->ahp_nports = 1;
322 			ap->ap_pmp_port = 0;
323 		}
324 		ahp->ahp_ports = malloc(sizeof(struct atascsi_port *) *
325 		    ahp->ahp_nports, M_DEVBUF, M_WAITOK | M_ZERO);
326 	} else {
327 		ahp = as->as_host_ports[port];
328 		ap->ap_pmp_port = link->lun - 1;
329 	}
330 
331 	ap->ap_host_port = ahp;
332 	ap->ap_type = type;
333 
334 	link->pool = &ahp->ahp_iopool;
335 
336 	/* fetch the device info, except for port multipliers */
337 	if (type != ATA_PORT_T_PM) {
338 
339 		/* devices attached to port multipliers tend not to be
340 		 * spun up at this point, and sometimes this prevents
341 		 * identification from working, so we retry a few times
342 		 * with a fairly long delay.
343 		 */
344 
345 		identify = dma_alloc(sizeof(*identify), PR_WAITOK | PR_ZERO);
346 
347 		int count = (link->lun > 0) ? 6 : 2;
348 		while (count--) {
349 			rv = atascsi_port_identify(ap, identify);
350 			if (rv == 0) {
351 				ap->ap_identify = *identify;
352 				break;
353 			}
354 			if (count > 0)
355 				delay(5000000);
356 		}
357 
358 		dma_free(identify, sizeof(*identify));
359 
360 		if (rv != 0) {
361 			goto error;
362 		}
363 	}
364 
365 	ahp->ahp_ports[link->lun] = ap;
366 
367 	if (type != ATA_PORT_T_DISK)
368 		return (0);
369 
370 	if (as->as_capability & ASAA_CAP_NCQ &&
371 	    ISSET(letoh16(ap->ap_identify.satacap), ATA_SATACAP_NCQ) &&
372 	    (link->lun == 0 || as->as_capability & ASAA_CAP_PMP_NCQ)) {
373 		ap->ap_ncqdepth = ATA_QDEPTH(letoh16(ap->ap_identify.qdepth));
374 		qdepth = MIN(ap->ap_ncqdepth, as->as_ncqdepth);
375 		if (ISSET(as->as_capability, ASAA_CAP_NEEDS_RESERVED))
376 			qdepth--;
377 
378 		if (qdepth > 1) {
379 			SET(ap->ap_features, ATA_PORT_F_NCQ);
380 
381 			/* Raise the number of openings */
382 			link->openings = qdepth;
383 
384 			/*
385 			 * XXX for directly attached devices, throw away any xfers
386 			 * that have tag numbers higher than what the device supports.
387 			 */
388 			if (link->lun == 0) {
389 				while (qdepth--) {
390 					xa = scsi_io_get(&ahp->ahp_iopool, SCSI_NOSLEEP);
391 					if (xa->tag < link->openings) {
392 						xa->state = ATA_S_COMPLETE;
393 						scsi_io_put(&ahp->ahp_iopool, xa);
394 					}
395 				}
396 			}
397 		}
398 	}
399 
400 	if (ISSET(letoh16(ap->ap_identify.data_set_mgmt),
401 	    ATA_ID_DATA_SET_MGMT_TRIM))
402 		SET(ap->ap_features, ATA_PORT_F_TRIM);
403 
404 	cmdset = letoh16(ap->ap_identify.cmdset82);
405 
406 	/* Enable write cache if supported */
407 	if (ISSET(cmdset, ATA_IDENTIFY_WRITECACHE)) {
408 		/* We don't care if it fails. */
409 		(void)atascsi_port_set_features(ap, ATA_SF_WRITECACHE_EN, 0);
410 	}
411 
412 	/* Enable read lookahead if supported */
413 	if (ISSET(cmdset, ATA_IDENTIFY_LOOKAHEAD)) {
414 		/* We don't care if it fails. */
415 		(void)atascsi_port_set_features(ap, ATA_SF_LOOKAHEAD_EN, 0);
416 	}
417 
418 	/*
419 	 * FREEZE LOCK the device so malicous users can't lock it on us.
420 	 * As there is no harm in issuing this to devices that don't
421 	 * support the security feature set we just send it, and don't bother
422 	 * checking if the device sends a command abort to tell us it doesn't
423 	 * support it
424 	 */
425 	xa = scsi_io_get(&ahp->ahp_iopool, SCSI_NOSLEEP);
426 	if (xa == NULL)
427 		panic("no free xfers on a new port");
428 	xa->fis->command = ATA_C_SEC_FREEZE_LOCK;
429 	xa->fis->flags = ATA_H2D_FLAGS_CMD | ap->ap_pmp_port;
430 	xa->flags = ATA_F_POLL;
431 	xa->timeout = 1000;
432 	xa->complete = ata_polled_complete;
433 	xa->pmp_port = ap->ap_pmp_port;
434 	xa->atascsi_private = &ahp->ahp_iopool;
435 	ata_exec(as, xa);
436 	ata_polled(xa); /* we dont care if it doesnt work */
437 
438 	return (0);
439 error:
440 	free(ap, M_DEVBUF, 0);
441 unsupported:
442 
443 	as->as_methods->free(as->as_cookie, port, link->lun);
444 	return (rv);
445 }
446 
447 void
448 atascsi_free(struct scsi_link *link)
449 {
450 	struct atascsi			*as = link->adapter_softc;
451 	struct atascsi_host_port	*ahp;
452 	struct atascsi_port		*ap;
453 	int				port;
454 
455 	port = link->target;
456 	if (port >= as->as_link.adapter_buswidth)
457 		return;
458 
459 	ahp = as->as_host_ports[port];
460 	if (ahp == NULL)
461 		return;
462 
463 	if (link->lun >= ahp->ahp_nports)
464 		return;
465 
466 	ap = ahp->ahp_ports[link->lun];
467 	free(ap, M_DEVBUF, 0);
468 	ahp->ahp_ports[link->lun] = NULL;
469 
470 	as->as_methods->free(as->as_cookie, port, link->lun);
471 
472 	if (link->lun == ahp->ahp_nports - 1) {
473 		/* we've already freed all of ahp->ahp_ports, now
474 		 * free ahp itself.  this relies on the order luns are
475 		 * detached in scsi_detach_target().
476 		 */
477 		free(ahp, M_DEVBUF, 0);
478 		as->as_host_ports[port] = NULL;
479 	}
480 }
481 
482 void
483 atascsi_cmd(struct scsi_xfer *xs)
484 {
485 	struct scsi_link	*link = xs->sc_link;
486 	struct atascsi_port	*ap;
487 
488 	ap = atascsi_lookup_port(link);
489 	if (ap == NULL) {
490 		atascsi_done(xs, XS_DRIVER_STUFFUP);
491 		return;
492 	}
493 
494 	switch (ap->ap_type) {
495 	case ATA_PORT_T_DISK:
496 		atascsi_disk_cmd(xs);
497 		break;
498 	case ATA_PORT_T_ATAPI:
499 		atascsi_atapi_cmd(xs);
500 		break;
501 	case ATA_PORT_T_PM:
502 		atascsi_pmp_cmd(xs);
503 		break;
504 
505 	case ATA_PORT_T_NONE:
506 	default:
507 		atascsi_done(xs, XS_DRIVER_STUFFUP);
508 		break;
509 	}
510 }
511 
512 void
513 atascsi_disk_cmd(struct scsi_xfer *xs)
514 {
515 	struct scsi_link	*link = xs->sc_link;
516 	struct atascsi		*as = link->adapter_softc;
517 	struct atascsi_port	*ap;
518 	struct ata_xfer		*xa = xs->io;
519 	int			flags = 0;
520 	struct ata_fis_h2d	*fis;
521 	u_int64_t		lba;
522 	u_int32_t		sector_count;
523 
524 	ap = atascsi_lookup_port(link);
525 
526 	switch (xs->cmd->opcode) {
527 	case READ_COMMAND:
528 	case READ_BIG:
529 	case READ_12:
530 	case READ_16:
531 		flags = ATA_F_READ;
532 		break;
533 	case WRITE_COMMAND:
534 	case WRITE_BIG:
535 	case WRITE_12:
536 	case WRITE_16:
537 		flags = ATA_F_WRITE;
538 		/* deal with io outside the switch */
539 		break;
540 
541 	case WRITE_SAME_16:
542 		atascsi_disk_write_same_16(xs);
543 		return;
544 	case UNMAP:
545 		atascsi_disk_unmap(xs);
546 		return;
547 
548 	case SYNCHRONIZE_CACHE:
549 		atascsi_disk_sync(xs);
550 		return;
551 	case REQUEST_SENSE:
552 		atascsi_disk_sense(xs);
553 		return;
554 	case INQUIRY:
555 		atascsi_disk_inq(xs);
556 		return;
557 	case READ_CAPACITY:
558 		atascsi_disk_capacity(xs);
559 		return;
560 	case READ_CAPACITY_16:
561 		atascsi_disk_capacity16(xs);
562 		return;
563 
564 	case ATA_PASSTHRU_12:
565 		atascsi_passthru_12(xs);
566 		return;
567 	case ATA_PASSTHRU_16:
568 		atascsi_passthru_16(xs);
569 		return;
570 
571 	case START_STOP:
572 		atascsi_disk_start_stop(xs);
573 		return;
574 
575 	case TEST_UNIT_READY:
576 	case PREVENT_ALLOW:
577 		atascsi_done(xs, XS_NOERROR);
578 		return;
579 
580 	default:
581 		atascsi_done(xs, XS_DRIVER_STUFFUP);
582 		return;
583 	}
584 
585 	xa->flags = flags;
586 	scsi_cmd_rw_decode(xs->cmd, &lba, &sector_count);
587 	if ((lba >> 48) != 0 || (sector_count >> 16) != 0) {
588 		atascsi_done(xs, XS_DRIVER_STUFFUP);
589 		return;
590 	}
591 
592 	fis = xa->fis;
593 
594 	fis->flags = ATA_H2D_FLAGS_CMD | ap->ap_pmp_port;
595 	fis->lba_low = lba & 0xff;
596 	fis->lba_mid = (lba >> 8) & 0xff;
597 	fis->lba_high = (lba >> 16) & 0xff;
598 
599 	if (ISSET(ap->ap_features, ATA_PORT_F_NCQ) &&
600 	    (xa->tag < ap->ap_ncqdepth) &&
601 	    !(xs->flags & SCSI_POLL)) {
602 		/* Use NCQ */
603 		xa->flags |= ATA_F_NCQ;
604 		fis->command = (xa->flags & ATA_F_WRITE) ?
605 		    ATA_C_WRITE_FPDMA : ATA_C_READ_FPDMA;
606 		fis->device = ATA_H2D_DEVICE_LBA;
607 		fis->lba_low_exp = (lba >> 24) & 0xff;
608 		fis->lba_mid_exp = (lba >> 32) & 0xff;
609 		fis->lba_high_exp = (lba >> 40) & 0xff;
610 		fis->sector_count = xa->tag << 3;
611 		fis->features = sector_count & 0xff;
612 		fis->features_exp = (sector_count >> 8) & 0xff;
613 	} else if (sector_count > 0x100 || lba > 0xfffffff) {
614 		/* Use LBA48 */
615 		fis->command = (xa->flags & ATA_F_WRITE) ?
616 		    ATA_C_WRITEDMA_EXT : ATA_C_READDMA_EXT;
617 		fis->device = ATA_H2D_DEVICE_LBA;
618 		fis->lba_low_exp = (lba >> 24) & 0xff;
619 		fis->lba_mid_exp = (lba >> 32) & 0xff;
620 		fis->lba_high_exp = (lba >> 40) & 0xff;
621 		fis->sector_count = sector_count & 0xff;
622 		fis->sector_count_exp = (sector_count >> 8) & 0xff;
623 	} else {
624 		/* Use LBA */
625 		fis->command = (xa->flags & ATA_F_WRITE) ?
626 		    ATA_C_WRITEDMA : ATA_C_READDMA;
627 		fis->device = ATA_H2D_DEVICE_LBA | ((lba >> 24) & 0x0f);
628 		fis->sector_count = sector_count & 0xff;
629 	}
630 
631 	xa->data = xs->data;
632 	xa->datalen = xs->datalen;
633 	xa->complete = atascsi_disk_cmd_done;
634 	xa->timeout = xs->timeout;
635 	xa->pmp_port = ap->ap_pmp_port;
636 	xa->atascsi_private = xs;
637 	if (xs->flags & SCSI_POLL)
638 		xa->flags |= ATA_F_POLL;
639 
640 	ata_exec(as, xa);
641 }
642 
643 void
644 atascsi_disk_cmd_done(struct ata_xfer *xa)
645 {
646 	struct scsi_xfer	*xs = xa->atascsi_private;
647 
648 	switch (xa->state) {
649 	case ATA_S_COMPLETE:
650 		xs->error = XS_NOERROR;
651 		break;
652 	case ATA_S_ERROR:
653 		/* fake sense? */
654 		xs->error = XS_DRIVER_STUFFUP;
655 		break;
656 	case ATA_S_TIMEOUT:
657 		xs->error = XS_TIMEOUT;
658 		break;
659 	default:
660 		panic("atascsi_disk_cmd_done: unexpected ata_xfer state (%d)",
661 		    xa->state);
662 	}
663 
664 	xs->resid = xa->resid;
665 
666 	scsi_done(xs);
667 }
668 
669 void
670 atascsi_disk_inq(struct scsi_xfer *xs)
671 {
672 	struct scsi_inquiry	*inq = (struct scsi_inquiry *)xs->cmd;
673 
674 	if (xs->cmdlen != sizeof(*inq)) {
675 		atascsi_done(xs, XS_DRIVER_STUFFUP);
676 		return;
677 	}
678 
679 	if (ISSET(inq->flags, SI_EVPD)) {
680 		switch (inq->pagecode) {
681 		case SI_PG_SUPPORTED:
682 			atascsi_disk_vpd_supported(xs);
683 			break;
684 		case SI_PG_SERIAL:
685 			atascsi_disk_vpd_serial(xs);
686 			break;
687 		case SI_PG_DEVID:
688 			atascsi_disk_vpd_ident(xs);
689 			break;
690 		case SI_PG_ATA:
691 			atascsi_disk_vpd_ata(xs);
692 			break;
693 		case SI_PG_DISK_LIMITS:
694 			atascsi_disk_vpd_limits(xs);
695 			break;
696 		case SI_PG_DISK_INFO:
697 			atascsi_disk_vpd_info(xs);
698 			break;
699 		case SI_PG_DISK_THIN:
700 			atascsi_disk_vpd_thin(xs);
701 			break;
702 		default:
703 			atascsi_done(xs, XS_DRIVER_STUFFUP);
704 			break;
705 		}
706 	} else
707 		atascsi_disk_inquiry(xs);
708 }
709 
710 void
711 atascsi_disk_inquiry(struct scsi_xfer *xs)
712 {
713 	struct scsi_inquiry_data inq;
714 	struct scsi_link        *link = xs->sc_link;
715 	struct atascsi_port	*ap;
716 
717 	ap = atascsi_lookup_port(link);
718 
719 	bzero(&inq, sizeof(inq));
720 
721 	inq.device = T_DIRECT;
722 	inq.version = 0x05; /* SPC-3 */
723 	inq.response_format = 2;
724 	inq.additional_length = 32;
725 	inq.flags |= SID_CmdQue;
726 	bcopy("ATA     ", inq.vendor, sizeof(inq.vendor));
727 	ata_swapcopy(ap->ap_identify.model, inq.product,
728 	    sizeof(inq.product));
729 	ata_swapcopy(ap->ap_identify.firmware, inq.revision,
730 	    sizeof(inq.revision));
731 
732 	bcopy(&inq, xs->data, MIN(sizeof(inq), xs->datalen));
733 
734 	atascsi_done(xs, XS_NOERROR);
735 }
736 
737 void
738 atascsi_disk_vpd_supported(struct scsi_xfer *xs)
739 {
740 	struct {
741 		struct scsi_vpd_hdr	hdr;
742 		u_int8_t		list[7];
743 	}			pg;
744 	struct scsi_link        *link = xs->sc_link;
745 	struct atascsi_port	*ap;
746 	int			fat;
747 
748 	ap = atascsi_lookup_port(link);
749 	fat = ISSET(ap->ap_features, ATA_PORT_F_TRIM) ? 0 : 1;
750 
751 	bzero(&pg, sizeof(pg));
752 
753 	pg.hdr.device = T_DIRECT;
754 	pg.hdr.page_code = SI_PG_SUPPORTED;
755 	_lto2b(sizeof(pg.list) - fat, pg.hdr.page_length);
756 	pg.list[0] = SI_PG_SUPPORTED;
757 	pg.list[1] = SI_PG_SERIAL;
758 	pg.list[2] = SI_PG_DEVID;
759 	pg.list[3] = SI_PG_ATA;
760 	pg.list[4] = SI_PG_DISK_LIMITS;
761 	pg.list[5] = SI_PG_DISK_INFO;
762 	pg.list[6] = SI_PG_DISK_THIN; /* "trimmed" if fat. get it? tehe. */
763 
764 	bcopy(&pg, xs->data, MIN(sizeof(pg) - fat, xs->datalen));
765 
766 	atascsi_done(xs, XS_NOERROR);
767 }
768 
769 void
770 atascsi_disk_vpd_serial(struct scsi_xfer *xs)
771 {
772 	struct scsi_link        *link = xs->sc_link;
773 	struct atascsi_port	*ap;
774 	struct scsi_vpd_serial	pg;
775 
776 	ap = atascsi_lookup_port(link);
777 	bzero(&pg, sizeof(pg));
778 
779 	pg.hdr.device = T_DIRECT;
780 	pg.hdr.page_code = SI_PG_SERIAL;
781 	_lto2b(sizeof(ap->ap_identify.serial), pg.hdr.page_length);
782 	ata_swapcopy(ap->ap_identify.serial, pg.serial,
783 	    sizeof(ap->ap_identify.serial));
784 
785 	bcopy(&pg, xs->data, MIN(sizeof(pg), xs->datalen));
786 
787 	atascsi_done(xs, XS_NOERROR);
788 }
789 
790 void
791 atascsi_disk_vpd_ident(struct scsi_xfer *xs)
792 {
793 	struct scsi_link        *link = xs->sc_link;
794 	struct atascsi_port	*ap;
795 	struct {
796 		struct scsi_vpd_hdr	hdr;
797 		struct scsi_vpd_devid_hdr devid_hdr;
798 		u_int8_t		devid[68];
799 	}			pg;
800 	u_int8_t		*p;
801 	size_t			pg_len;
802 
803 	ap = atascsi_lookup_port(link);
804 	bzero(&pg, sizeof(pg));
805 	if (letoh16(ap->ap_identify.features87) & ATA_ID_F87_WWN) {
806 		pg_len = 8;
807 
808 		pg.devid_hdr.pi_code = VPD_DEVID_CODE_BINARY;
809 		pg.devid_hdr.flags = VPD_DEVID_ASSOC_LU | VPD_DEVID_TYPE_NAA;
810 
811 		ata_swapcopy(&ap->ap_identify.naa_ieee_oui, pg.devid, pg_len);
812 	} else {
813 		pg_len = 68;
814 
815 		pg.devid_hdr.pi_code = VPD_DEVID_CODE_ASCII;
816 		pg.devid_hdr.flags = VPD_DEVID_ASSOC_LU | VPD_DEVID_TYPE_T10;
817 
818 		p = pg.devid;
819 		bcopy("ATA     ", p, 8);
820 		p += 8;
821 		ata_swapcopy(ap->ap_identify.model, p,
822 		    sizeof(ap->ap_identify.model));
823 		p += sizeof(ap->ap_identify.model);
824 		ata_swapcopy(ap->ap_identify.serial, p,
825 		    sizeof(ap->ap_identify.serial));
826 	}
827 
828 	pg.devid_hdr.len = pg_len;
829 	pg_len += sizeof(pg.devid_hdr);
830 
831 	pg.hdr.device = T_DIRECT;
832 	pg.hdr.page_code = SI_PG_DEVID;
833 	_lto2b(pg_len, pg.hdr.page_length);
834 	pg_len += sizeof(pg.hdr);
835 
836 	bcopy(&pg, xs->data, MIN(pg_len, xs->datalen));
837 
838 	atascsi_done(xs, XS_NOERROR);
839 }
840 
841 void
842 atascsi_disk_vpd_ata(struct scsi_xfer *xs)
843 {
844 	struct scsi_link        *link = xs->sc_link;
845 	struct atascsi_port	*ap;
846 	struct scsi_vpd_ata	pg;
847 
848 	ap = atascsi_lookup_port(link);
849 	bzero(&pg, sizeof(pg));
850 
851 	pg.hdr.device = T_DIRECT;
852 	pg.hdr.page_code = SI_PG_ATA;
853 	_lto2b(sizeof(pg) - sizeof(pg.hdr), pg.hdr.page_length);
854 
855 	memset(pg.sat_vendor, ' ', sizeof(pg.sat_vendor));
856 	memcpy(pg.sat_vendor, "OpenBSD",
857 	    MIN(strlen("OpenBSD"), sizeof(pg.sat_vendor)));
858 	memset(pg.sat_product, ' ', sizeof(pg.sat_product));
859 	memcpy(pg.sat_product, "atascsi",
860 	    MIN(strlen("atascsi"), sizeof(pg.sat_product)));
861 	memset(pg.sat_revision, ' ', sizeof(pg.sat_revision));
862 	memcpy(pg.sat_revision, osrelease,
863 	    MIN(strlen(osrelease), sizeof(pg.sat_product)));
864 
865 	/* XXX device signature */
866 
867 	switch (ap->ap_type) {
868 	case ATA_PORT_T_DISK:
869 		pg.command_code = VPD_ATA_COMMAND_CODE_ATA;
870 		break;
871 	case ATA_PORT_T_ATAPI:
872 		pg.command_code = VPD_ATA_COMMAND_CODE_ATAPI;
873 		break;
874 	}
875 
876 	memcpy(pg.identify, &ap->ap_identify, sizeof(pg.identify));
877 
878 	bcopy(&pg, xs->data, MIN(sizeof(pg), xs->datalen));
879 
880 	atascsi_done(xs, XS_NOERROR);
881 }
882 
883 void
884 atascsi_disk_vpd_limits(struct scsi_xfer *xs)
885 {
886 	struct scsi_link        *link = xs->sc_link;
887 	struct atascsi_port	*ap;
888 	struct scsi_vpd_disk_limits pg;
889 
890 	ap = atascsi_lookup_port(link);
891 	bzero(&pg, sizeof(pg));
892 	pg.hdr.device = T_DIRECT;
893 	pg.hdr.page_code = SI_PG_DISK_LIMITS;
894 	_lto2b(SI_PG_DISK_LIMITS_LEN_THIN, pg.hdr.page_length);
895 
896 	_lto2b(1 << ata_identify_block_l2p_exp(&ap->ap_identify),
897 	    pg.optimal_xfer_granularity);
898 
899 	if (ISSET(ap->ap_features, ATA_PORT_F_TRIM)) {
900 		/*
901 		 * ATA only supports 65535 blocks per TRIM descriptor, so
902 		 * avoid having to split UNMAP descriptors and overflow the page
903 		 * limit by using that as a max.
904 		 */
905 		_lto4b(ATA_DSM_TRIM_MAX_LEN, pg.max_unmap_lba_count);
906 		_lto4b(512 / 8, pg.max_unmap_desc_count);
907         }
908 
909 	bcopy(&pg, xs->data, MIN(sizeof(pg), xs->datalen));
910 
911 	atascsi_done(xs, XS_NOERROR);
912 }
913 
914 void
915 atascsi_disk_vpd_info(struct scsi_xfer *xs)
916 {
917 	struct scsi_link        *link = xs->sc_link;
918 	struct atascsi_port	*ap;
919 	struct scsi_vpd_disk_info pg;
920 
921 	ap = atascsi_lookup_port(link);
922 	bzero(&pg, sizeof(pg));
923 	pg.hdr.device = T_DIRECT;
924 	pg.hdr.page_code = SI_PG_DISK_INFO;
925 	_lto2b(sizeof(pg) - sizeof(pg.hdr), pg.hdr.page_length);
926 
927 	_lto2b(letoh16(ap->ap_identify.rpm), pg.rpm);
928 	pg.form_factor = letoh16(ap->ap_identify.form) & ATA_ID_FORM_MASK;
929 
930 	bcopy(&pg, xs->data, MIN(sizeof(pg), xs->datalen));
931 
932 	atascsi_done(xs, XS_NOERROR);
933 }
934 
935 void
936 atascsi_disk_vpd_thin(struct scsi_xfer *xs)
937 {
938 	struct scsi_link        *link = xs->sc_link;
939 	struct atascsi_port	*ap;
940 	struct scsi_vpd_disk_thin pg;
941 
942 	ap = atascsi_lookup_port(link);
943 	if (!ISSET(ap->ap_features, ATA_PORT_F_TRIM)) {
944 		atascsi_done(xs, XS_DRIVER_STUFFUP);
945 		return;
946 	}
947 
948 	bzero(&pg, sizeof(pg));
949 	pg.hdr.device = T_DIRECT;
950 	pg.hdr.page_code = SI_PG_DISK_THIN;
951 	_lto2b(sizeof(pg) - sizeof(pg.hdr), pg.hdr.page_length);
952 
953 	pg.flags = VPD_DISK_THIN_TPU | VPD_DISK_THIN_TPWS;
954 
955 	bcopy(&pg, xs->data, MIN(sizeof(pg), xs->datalen));
956 
957 	atascsi_done(xs, XS_NOERROR);
958 }
959 
960 void
961 atascsi_disk_write_same_16(struct scsi_xfer *xs)
962 {
963 	struct scsi_link	*link = xs->sc_link;
964 	struct atascsi		*as = link->adapter_softc;
965 	struct atascsi_port	*ap;
966 	struct scsi_write_same_16 *cdb;
967 	struct ata_xfer		*xa = xs->io;
968 	struct ata_fis_h2d	*fis;
969 	u_int64_t		lba;
970 	u_int32_t		length;
971 	u_int64_t		desc;
972 
973 	if (xs->cmdlen != sizeof(*cdb)) {
974 		atascsi_done(xs, XS_DRIVER_STUFFUP);
975 		return;
976 	}
977 
978 	ap = atascsi_lookup_port(link);
979 	cdb = (struct scsi_write_same_16 *)xs->cmd;
980 
981 	if (!ISSET(cdb->flags, WRITE_SAME_F_UNMAP) ||
982 	   !ISSET(ap->ap_features, ATA_PORT_F_TRIM)) {
983 		/* generate sense data */
984 		atascsi_done(xs, XS_DRIVER_STUFFUP);
985 		return;
986 	}
987 
988 	if (xs->datalen < 512) {
989 		/* generate sense data */
990 		atascsi_done(xs, XS_DRIVER_STUFFUP);
991 		return;
992 	}
993 
994 	lba = _8btol(cdb->lba);
995 	length = _4btol(cdb->length);
996 
997 	if (length > ATA_DSM_TRIM_MAX_LEN) {
998 		/* XXX we dont support requests over 65535 blocks */
999 		atascsi_done(xs, XS_DRIVER_STUFFUP);
1000 		return;
1001 	}
1002 
1003 	xa->data = xs->data;
1004 	xa->datalen = 512;
1005 	xa->flags = ATA_F_WRITE;
1006 	xa->pmp_port = ap->ap_pmp_port;
1007 	if (xs->flags & SCSI_POLL)
1008 		xa->flags |= ATA_F_POLL;
1009 	xa->complete = atascsi_disk_write_same_16_done;
1010 	xa->atascsi_private = xs;
1011 	xa->timeout = (xs->timeout < 45000) ? 45000 : xs->timeout;
1012 
1013 	/* TRIM sends a list of blocks to discard in the databuf. */
1014 	memset(xa->data, 0, xa->datalen);
1015 	desc = htole64(ATA_DSM_TRIM_DESC(lba, length));
1016 	memcpy(xa->data, &desc, sizeof(desc));
1017 
1018 	fis = xa->fis;
1019 	fis->flags = ATA_H2D_FLAGS_CMD | ap->ap_pmp_port;
1020 	fis->command = ATA_C_DSM;
1021 	fis->features = ATA_DSM_TRIM;
1022 	fis->sector_count = 1;
1023 
1024 	ata_exec(as, xa);
1025 }
1026 
1027 void
1028 atascsi_disk_write_same_16_done(struct ata_xfer *xa)
1029 {
1030 	struct scsi_xfer	*xs = xa->atascsi_private;
1031 
1032 	switch (xa->state) {
1033 	case ATA_S_COMPLETE:
1034 		xs->error = XS_NOERROR;
1035 		break;
1036 	case ATA_S_ERROR:
1037 		xs->error = XS_DRIVER_STUFFUP;
1038 		break;
1039 	case ATA_S_TIMEOUT:
1040 		xs->error = XS_TIMEOUT;
1041 		break;
1042 
1043 	default:
1044 		panic("atascsi_disk_write_same_16_done: "
1045 		    "unexpected ata_xfer state (%d)", xa->state);
1046 	}
1047 
1048 	scsi_done(xs);
1049 }
1050 
1051 void
1052 atascsi_disk_unmap(struct scsi_xfer *xs)
1053 {
1054 	struct ata_xfer		*xa = xs->io;
1055 	struct scsi_unmap	*cdb;
1056 	struct scsi_unmap_data	*unmap;
1057 	u_int			len;
1058 
1059 	if (ISSET(xs->flags, SCSI_POLL) || xs->cmdlen != sizeof(*cdb))
1060 		atascsi_done(xs, XS_DRIVER_STUFFUP);
1061 
1062 	cdb = (struct scsi_unmap *)xs->cmd;
1063 	len = _2btol(cdb->list_len);
1064 	if (xs->datalen != len || len < sizeof(*unmap)) {
1065 		atascsi_done(xs, XS_DRIVER_STUFFUP);
1066 		return;
1067 	}
1068 
1069 	unmap = (struct scsi_unmap_data *)xs->data;
1070 	if (_2btol(unmap->data_length) != len) {
1071 		atascsi_done(xs, XS_DRIVER_STUFFUP);
1072 		return;
1073 	}
1074 
1075 	len = _2btol(unmap->desc_length);
1076 	if (len != xs->datalen - sizeof(*unmap)) {
1077 		atascsi_done(xs, XS_DRIVER_STUFFUP);
1078 		return;
1079 	}
1080 
1081 	if (len < sizeof(struct scsi_unmap_desc)) {
1082 		/* no work, no error according to sbc3 */
1083 		atascsi_done(xs, XS_NOERROR);
1084 	}
1085 
1086 	if (len > sizeof(struct scsi_unmap_desc) * 64) {
1087 		/* more work than we advertised */
1088 		atascsi_done(xs, XS_DRIVER_STUFFUP);
1089 		return;
1090 	}
1091 
1092 	/* let's go */
1093 	if (ISSET(xs->flags, SCSI_NOSLEEP)) {
1094 		task_set(&xa->task, atascsi_disk_unmap_task, xs, NULL);
1095 		task_add(systq, &xa->task);
1096 	} else {
1097 		/* we can already sleep for memory */
1098 		atascsi_disk_unmap_task(xs, NULL);
1099 	}
1100 }
1101 
1102 void
1103 atascsi_disk_unmap_task(void *xxs, void *a)
1104 {
1105 	struct scsi_xfer	*xs = xxs;
1106 	struct scsi_link	*link = xs->sc_link;
1107 	struct atascsi		*as = link->adapter_softc;
1108 	struct atascsi_port	*ap;
1109 	struct ata_xfer		*xa = xs->io;
1110 	struct ata_fis_h2d	*fis;
1111 	struct scsi_unmap_data	*unmap;
1112 	struct scsi_unmap_desc	*descs, *d;
1113 	u_int64_t		*trims;
1114 	u_int			len, i;
1115 
1116 	trims = dma_alloc(512, PR_WAITOK | PR_ZERO);
1117 
1118 	ap = atascsi_lookup_port(link);
1119 	unmap = (struct scsi_unmap_data *)xs->data;
1120 	descs = (struct scsi_unmap_desc *)(unmap + 1);
1121 
1122 	len = _2btol(unmap->desc_length) / sizeof(*d);
1123 	for (i = 0; i < len; i++) {
1124 		d = &descs[i];
1125 		if (_4btol(d->logical_blocks) > ATA_DSM_TRIM_MAX_LEN)
1126 			goto fail;
1127 
1128 		trims[i] = htole64(ATA_DSM_TRIM_DESC(_8btol(d->logical_addr),
1129 		    _4btol(d->logical_blocks)));
1130 	}
1131 
1132 	xa->data = trims;
1133 	xa->datalen = 512;
1134 	xa->flags = ATA_F_WRITE;
1135 	xa->pmp_port = ap->ap_pmp_port;
1136 	xa->complete = atascsi_disk_unmap_done;
1137 	xa->atascsi_private = xs;
1138 	xa->timeout = (xs->timeout < 45000) ? 45000 : xs->timeout;
1139 
1140 	fis = xa->fis;
1141 	fis->flags = ATA_H2D_FLAGS_CMD | ap->ap_pmp_port;
1142 	fis->command = ATA_C_DSM;
1143 	fis->features = ATA_DSM_TRIM;
1144 	fis->sector_count = 1;
1145 
1146 	ata_exec(as, xa);
1147 	return;
1148 
1149  fail:
1150 	dma_free(xa->data, 512);
1151 	atascsi_done(xs, XS_DRIVER_STUFFUP);
1152 }
1153 
1154 void
1155 atascsi_disk_unmap_done(struct ata_xfer *xa)
1156 {
1157 	struct scsi_xfer	*xs = xa->atascsi_private;
1158 
1159 	dma_free(xa->data, 512);
1160 
1161 	switch (xa->state) {
1162 	case ATA_S_COMPLETE:
1163 		xs->error = XS_NOERROR;
1164 		break;
1165 	case ATA_S_ERROR:
1166 		xs->error = XS_DRIVER_STUFFUP;
1167 		break;
1168 	case ATA_S_TIMEOUT:
1169 		xs->error = XS_TIMEOUT;
1170 		break;
1171 
1172 	default:
1173 		panic("atascsi_disk_unmap_done: "
1174 		    "unexpected ata_xfer state (%d)", xa->state);
1175 	}
1176 
1177 	scsi_done(xs);
1178 }
1179 
1180 void
1181 atascsi_disk_sync(struct scsi_xfer *xs)
1182 {
1183 	struct scsi_link	*link = xs->sc_link;
1184 	struct atascsi		*as = link->adapter_softc;
1185 	struct atascsi_port	*ap;
1186 	struct ata_xfer		*xa = xs->io;
1187 
1188 	if (xs->cmdlen != sizeof(struct scsi_synchronize_cache)) {
1189 		atascsi_done(xs, XS_DRIVER_STUFFUP);
1190 		return;
1191 	}
1192 
1193 	ap = atascsi_lookup_port(link);
1194 	xa->datalen = 0;
1195 	xa->flags = ATA_F_READ;
1196 	xa->complete = atascsi_disk_sync_done;
1197 	/* Spec says flush cache can take >30 sec, so give it at least 45. */
1198 	xa->timeout = (xs->timeout < 45000) ? 45000 : xs->timeout;
1199 	xa->atascsi_private = xs;
1200 	xa->pmp_port = ap->ap_pmp_port;
1201 	if (xs->flags & SCSI_POLL)
1202 		xa->flags |= ATA_F_POLL;
1203 
1204 	xa->fis->flags = ATA_H2D_FLAGS_CMD | ap->ap_pmp_port;
1205 	xa->fis->command = ATA_C_FLUSH_CACHE;
1206 	xa->fis->device = 0;
1207 
1208 	ata_exec(as, xa);
1209 }
1210 
1211 void
1212 atascsi_disk_sync_done(struct ata_xfer *xa)
1213 {
1214 	struct scsi_xfer	*xs = xa->atascsi_private;
1215 
1216 	switch (xa->state) {
1217 	case ATA_S_COMPLETE:
1218 		xs->error = XS_NOERROR;
1219 		break;
1220 
1221 	case ATA_S_ERROR:
1222 	case ATA_S_TIMEOUT:
1223 		printf("atascsi_disk_sync_done: %s\n",
1224 		    xa->state == ATA_S_TIMEOUT ? "timeout" : "error");
1225 		xs->error = (xa->state == ATA_S_TIMEOUT ? XS_TIMEOUT :
1226 		    XS_DRIVER_STUFFUP);
1227 		break;
1228 
1229 	default:
1230 		panic("atascsi_disk_sync_done: unexpected ata_xfer state (%d)",
1231 		    xa->state);
1232 	}
1233 
1234 	scsi_done(xs);
1235 }
1236 
1237 u_int64_t
1238 ata_identify_blocks(struct ata_identify *id)
1239 {
1240 	u_int64_t		blocks = 0;
1241 	int			i;
1242 
1243 	if (letoh16(id->cmdset83) & 0x0400) {
1244 		/* LBA48 feature set supported */
1245 		for (i = 3; i >= 0; --i) {
1246 			blocks <<= 16;
1247 			blocks += letoh16(id->addrsecxt[i]);
1248 		}
1249 	} else {
1250 		blocks = letoh16(id->addrsec[1]);
1251 		blocks <<= 16;
1252 		blocks += letoh16(id->addrsec[0]);
1253 	}
1254 
1255 	return (blocks - 1);
1256 }
1257 
1258 u_int
1259 ata_identify_blocksize(struct ata_identify *id)
1260 {
1261 	u_int			blocksize = 512;
1262 	u_int16_t		p2l_sect = letoh16(id->p2l_sect);
1263 
1264 	if ((p2l_sect & ATA_ID_P2L_SECT_MASK) == ATA_ID_P2L_SECT_VALID &&
1265 	    ISSET(p2l_sect, ATA_ID_P2L_SECT_SIZESET)) {
1266 		blocksize = letoh16(id->words_lsec[1]);
1267 		blocksize <<= 16;
1268 		blocksize += letoh16(id->words_lsec[0]);
1269 		blocksize <<= 1;
1270 	}
1271 
1272 	return (blocksize);
1273 }
1274 
1275 u_int
1276 ata_identify_block_l2p_exp(struct ata_identify *id)
1277 {
1278 	u_int			exponent = 0;
1279 	u_int16_t		p2l_sect = letoh16(id->p2l_sect);
1280 
1281 	if ((p2l_sect & ATA_ID_P2L_SECT_MASK) == ATA_ID_P2L_SECT_VALID &&
1282 	    ISSET(p2l_sect, ATA_ID_P2L_SECT_SET)) {
1283 		exponent = (p2l_sect & ATA_ID_P2L_SECT_SIZE);
1284 	}
1285 
1286 	return (exponent);
1287 }
1288 
1289 u_int
1290 ata_identify_block_logical_align(struct ata_identify *id)
1291 {
1292 	u_int			align = 0;
1293 	u_int16_t		p2l_sect = letoh16(id->p2l_sect);
1294 	u_int16_t		logical_align = letoh16(id->logical_align);
1295 
1296 	if ((p2l_sect & ATA_ID_P2L_SECT_MASK) == ATA_ID_P2L_SECT_VALID &&
1297 	    ISSET(p2l_sect, ATA_ID_P2L_SECT_SET) &&
1298 	    (logical_align & ATA_ID_LALIGN_MASK) == ATA_ID_LALIGN_VALID)
1299 		align = logical_align & ATA_ID_LALIGN;
1300 
1301 	return (align);
1302 }
1303 
1304 void
1305 atascsi_disk_capacity(struct scsi_xfer *xs)
1306 {
1307 	struct scsi_link	*link = xs->sc_link;
1308 	struct atascsi_port	*ap;
1309 	struct scsi_read_cap_data rcd;
1310 	u_int64_t		capacity;
1311 
1312 	ap = atascsi_lookup_port(link);
1313 	if (xs->cmdlen != sizeof(struct scsi_read_capacity)) {
1314 		atascsi_done(xs, XS_DRIVER_STUFFUP);
1315 		return;
1316 	}
1317 
1318 	bzero(&rcd, sizeof(rcd));
1319 	capacity = ata_identify_blocks(&ap->ap_identify);
1320 	if (capacity > 0xffffffff)
1321 		capacity = 0xffffffff;
1322 
1323 	_lto4b(capacity, rcd.addr);
1324 	_lto4b(ata_identify_blocksize(&ap->ap_identify), rcd.length);
1325 
1326 	bcopy(&rcd, xs->data, MIN(sizeof(rcd), xs->datalen));
1327 
1328 	atascsi_done(xs, XS_NOERROR);
1329 }
1330 
1331 void
1332 atascsi_disk_capacity16(struct scsi_xfer *xs)
1333 {
1334 	struct scsi_link	*link = xs->sc_link;
1335 	struct atascsi_port	*ap;
1336 	struct scsi_read_cap_data_16 rcd;
1337 	u_int			align;
1338 	u_int16_t		lowest_aligned = 0;
1339 
1340 	ap = atascsi_lookup_port(link);
1341 	if (xs->cmdlen != sizeof(struct scsi_read_capacity_16)) {
1342 		atascsi_done(xs, XS_DRIVER_STUFFUP);
1343 		return;
1344 	}
1345 
1346 	bzero(&rcd, sizeof(rcd));
1347 
1348 	_lto8b(ata_identify_blocks(&ap->ap_identify), rcd.addr);
1349 	_lto4b(ata_identify_blocksize(&ap->ap_identify), rcd.length);
1350 	rcd.logical_per_phys = ata_identify_block_l2p_exp(&ap->ap_identify);
1351 	align = ata_identify_block_logical_align(&ap->ap_identify);
1352 	if (align > 0)
1353 		lowest_aligned = (1 << rcd.logical_per_phys) - align;
1354 
1355 	if (ISSET(ap->ap_features, ATA_PORT_F_TRIM)) {
1356 		SET(lowest_aligned, READ_CAP_16_TPE);
1357 
1358 		if (ISSET(letoh16(ap->ap_identify.add_support),
1359 		    ATA_ID_ADD_SUPPORT_DRT))
1360 			SET(lowest_aligned, READ_CAP_16_TPRZ);
1361 	}
1362 	_lto2b(lowest_aligned, rcd.lowest_aligned);
1363 
1364 	bcopy(&rcd, xs->data, MIN(sizeof(rcd), xs->datalen));
1365 
1366 	atascsi_done(xs, XS_NOERROR);
1367 }
1368 
1369 int
1370 atascsi_passthru_map(struct scsi_xfer *xs, u_int8_t count_proto, u_int8_t flags)
1371 {
1372 	struct ata_xfer		*xa = xs->io;
1373 
1374 	xa->data = xs->data;
1375 	xa->datalen = xs->datalen;
1376 	xa->timeout = xs->timeout;
1377 	xa->flags = 0;
1378 	if (xs->flags & SCSI_DATA_IN)
1379 		xa->flags |= ATA_F_READ;
1380 	if (xs->flags & SCSI_DATA_OUT)
1381 		xa->flags |= ATA_F_WRITE;
1382 	if (xs->flags & SCSI_POLL)
1383 		xa->flags |= ATA_F_POLL;
1384 
1385 	switch (count_proto & ATA_PASSTHRU_PROTO_MASK) {
1386 	case ATA_PASSTHRU_PROTO_NON_DATA:
1387 	case ATA_PASSTHRU_PROTO_PIO_DATAIN:
1388 	case ATA_PASSTHRU_PROTO_PIO_DATAOUT:
1389 		xa->flags |= ATA_F_PIO;
1390 		break;
1391 	default:
1392 		/* we dont support this yet */
1393 		return (1);
1394 	}
1395 
1396 	xa->atascsi_private = xs;
1397 	xa->complete = atascsi_passthru_done;
1398 
1399 	return (0);
1400 }
1401 
1402 void
1403 atascsi_passthru_12(struct scsi_xfer *xs)
1404 {
1405 	struct scsi_link	*link = xs->sc_link;
1406 	struct atascsi		*as = link->adapter_softc;
1407 	struct atascsi_port	*ap;
1408 	struct ata_xfer		*xa = xs->io;
1409 	struct scsi_ata_passthru_12 *cdb;
1410 	struct ata_fis_h2d	*fis;
1411 
1412 	if (xs->cmdlen != sizeof(*cdb)) {
1413 		atascsi_done(xs, XS_DRIVER_STUFFUP);
1414 		return;
1415 	}
1416 
1417 	cdb = (struct scsi_ata_passthru_12 *)xs->cmd;
1418 	/* validate cdb */
1419 
1420 	if (atascsi_passthru_map(xs, cdb->count_proto, cdb->flags) != 0) {
1421 		atascsi_done(xs, XS_DRIVER_STUFFUP);
1422 		return;
1423 	}
1424 
1425 	ap = atascsi_lookup_port(link);
1426 	fis = xa->fis;
1427 	fis->flags = ATA_H2D_FLAGS_CMD | ap->ap_pmp_port;
1428 	fis->command = cdb->command;
1429 	fis->features = cdb->features;
1430 	fis->lba_low = cdb->lba_low;
1431 	fis->lba_mid = cdb->lba_mid;
1432 	fis->lba_high = cdb->lba_high;
1433 	fis->device = cdb->device;
1434 	fis->sector_count = cdb->sector_count;
1435 	xa->pmp_port = ap->ap_pmp_port;
1436 
1437 	ata_exec(as, xa);
1438 }
1439 
1440 void
1441 atascsi_passthru_16(struct scsi_xfer *xs)
1442 {
1443 	struct scsi_link	*link = xs->sc_link;
1444 	struct atascsi		*as = link->adapter_softc;
1445 	struct atascsi_port	*ap;
1446 	struct ata_xfer		*xa = xs->io;
1447 	struct scsi_ata_passthru_16 *cdb;
1448 	struct ata_fis_h2d	*fis;
1449 
1450 	if (xs->cmdlen != sizeof(*cdb)) {
1451 		atascsi_done(xs, XS_DRIVER_STUFFUP);
1452 		return;
1453 	}
1454 
1455 	cdb = (struct scsi_ata_passthru_16 *)xs->cmd;
1456 	/* validate cdb */
1457 
1458 	if (atascsi_passthru_map(xs, cdb->count_proto, cdb->flags) != 0) {
1459 		atascsi_done(xs, XS_DRIVER_STUFFUP);
1460 		return;
1461 	}
1462 
1463 	ap = atascsi_lookup_port(link);
1464 	fis = xa->fis;
1465 	fis->flags = ATA_H2D_FLAGS_CMD | ap->ap_pmp_port;
1466 	fis->command = cdb->command;
1467 	fis->features = cdb->features[1];
1468 	fis->lba_low = cdb->lba_low[1];
1469 	fis->lba_mid = cdb->lba_mid[1];
1470 	fis->lba_high = cdb->lba_high[1];
1471 	fis->device = cdb->device;
1472 	fis->lba_low_exp = cdb->lba_low[0];
1473 	fis->lba_mid_exp = cdb->lba_mid[0];
1474 	fis->lba_high_exp = cdb->lba_high[0];
1475 	fis->features_exp = cdb->features[0];
1476 	fis->sector_count = cdb->sector_count[1];
1477 	fis->sector_count_exp = cdb->sector_count[0];
1478 	xa->pmp_port = ap->ap_pmp_port;
1479 
1480 	ata_exec(as, xa);
1481 }
1482 
1483 void
1484 atascsi_passthru_done(struct ata_xfer *xa)
1485 {
1486 	struct scsi_xfer	*xs = xa->atascsi_private;
1487 
1488 	/*
1489 	 * XXX need to generate sense if cdb wants it
1490 	 */
1491 
1492 	switch (xa->state) {
1493 	case ATA_S_COMPLETE:
1494 		xs->error = XS_NOERROR;
1495 		break;
1496 	case ATA_S_ERROR:
1497 		xs->error = XS_DRIVER_STUFFUP;
1498 		break;
1499 	case ATA_S_TIMEOUT:
1500 		printf("atascsi_passthru_done, timeout\n");
1501 		xs->error = XS_TIMEOUT;
1502 		break;
1503 	default:
1504 		panic("atascsi_atapi_cmd_done: unexpected ata_xfer state (%d)",
1505 		    xa->state);
1506 	}
1507 
1508 	xs->resid = xa->resid;
1509 
1510 	scsi_done(xs);
1511 }
1512 
1513 void
1514 atascsi_disk_sense(struct scsi_xfer *xs)
1515 {
1516 	struct scsi_sense_data	*sd = (struct scsi_sense_data *)xs->data;
1517 
1518 	bzero(xs->data, xs->datalen);
1519 	/* check datalen > sizeof(struct scsi_sense_data)? */
1520 	sd->error_code = SSD_ERRCODE_CURRENT;
1521 	sd->flags = SKEY_NO_SENSE;
1522 
1523 	atascsi_done(xs, XS_NOERROR);
1524 }
1525 
1526 void
1527 atascsi_disk_start_stop(struct scsi_xfer *xs)
1528 {
1529 	struct scsi_link	*link = xs->sc_link;
1530 	struct atascsi		*as = link->adapter_softc;
1531 	struct atascsi_port	*ap;
1532 	struct ata_xfer		*xa = xs->io;
1533 	struct scsi_start_stop	*ss = (struct scsi_start_stop *)xs->cmd;
1534 
1535 	if (xs->cmdlen != sizeof(*ss)) {
1536 		atascsi_done(xs, XS_DRIVER_STUFFUP);
1537 		return;
1538 	}
1539 
1540 	if (ss->how != SSS_STOP) {
1541 		atascsi_done(xs, XS_NOERROR);
1542 		return;
1543 	}
1544 
1545 	/*
1546 	 * A SCSI START STOP UNIT command with the START bit set to
1547 	 * zero gets translated into an ATA FLUSH CACHE command
1548 	 * followed by an ATA STANDBY IMMEDIATE command.
1549 	 */
1550 	ap = atascsi_lookup_port(link);
1551 	xa->datalen = 0;
1552 	xa->flags = ATA_F_READ;
1553 	xa->complete = atascsi_disk_start_stop_done;
1554 	/* Spec says flush cache can take >30 sec, so give it at least 45. */
1555 	xa->timeout = (xs->timeout < 45000) ? 45000 : xs->timeout;
1556 	xa->pmp_port = ap->ap_pmp_port;
1557 	xa->atascsi_private = xs;
1558 	if (xs->flags & SCSI_POLL)
1559 		xa->flags |= ATA_F_POLL;
1560 
1561 	xa->fis->flags = ATA_H2D_FLAGS_CMD | ap->ap_pmp_port;
1562 	xa->fis->command = ATA_C_FLUSH_CACHE;
1563 	xa->fis->device = 0;
1564 
1565 	ata_exec(as, xa);
1566 }
1567 
1568 void
1569 atascsi_disk_start_stop_done(struct ata_xfer *xa)
1570 {
1571 	struct scsi_xfer	*xs = xa->atascsi_private;
1572 	struct scsi_link	*link = xs->sc_link;
1573 	struct atascsi		*as = link->adapter_softc;
1574 	struct atascsi_port	*ap;
1575 
1576 	switch (xa->state) {
1577 	case ATA_S_COMPLETE:
1578 		break;
1579 
1580 	case ATA_S_ERROR:
1581 	case ATA_S_TIMEOUT:
1582 		xs->error = (xa->state == ATA_S_TIMEOUT ? XS_TIMEOUT :
1583 		    XS_DRIVER_STUFFUP);
1584 		xs->resid = xa->resid;
1585 		scsi_done(xs);
1586 		return;
1587 
1588 	default:
1589 		panic("atascsi_disk_start_stop_done: unexpected ata_xfer state (%d)",
1590 		    xa->state);
1591 	}
1592 
1593 	/*
1594 	 * The FLUSH CACHE command completed succesfully; now issue
1595 	 * the STANDBY IMMEDATE command.
1596 	 */
1597 	ap = atascsi_lookup_port(link);
1598 	xa->datalen = 0;
1599 	xa->flags = ATA_F_READ;
1600 	xa->state = ATA_S_SETUP;
1601 	xa->complete = atascsi_disk_cmd_done;
1602 	/* Spec says flush cache can take >30 sec, so give it at least 45. */
1603 	xa->timeout = (xs->timeout < 45000) ? 45000 : xs->timeout;
1604 	xa->pmp_port = ap->ap_pmp_port;
1605 	xa->atascsi_private = xs;
1606 	if (xs->flags & SCSI_POLL)
1607 		xa->flags |= ATA_F_POLL;
1608 
1609 	xa->fis->flags = ATA_H2D_FLAGS_CMD | ap->ap_pmp_port;
1610 	xa->fis->command = ATA_C_STANDBY_IMMED;
1611 	xa->fis->device = 0;
1612 
1613 	ata_exec(as, xa);
1614 }
1615 
1616 void
1617 atascsi_atapi_cmd(struct scsi_xfer *xs)
1618 {
1619 	struct scsi_link	*link = xs->sc_link;
1620 	struct atascsi		*as = link->adapter_softc;
1621 	struct atascsi_port	*ap;
1622 	struct ata_xfer		*xa = xs->io;
1623 	struct ata_fis_h2d	*fis;
1624 
1625 	switch (xs->flags & (SCSI_DATA_IN | SCSI_DATA_OUT)) {
1626 	case SCSI_DATA_IN:
1627 		xa->flags = ATA_F_PACKET | ATA_F_READ;
1628 		break;
1629 	case SCSI_DATA_OUT:
1630 		xa->flags = ATA_F_PACKET | ATA_F_WRITE;
1631 		break;
1632 	default:
1633 		xa->flags = ATA_F_PACKET;
1634 	}
1635 	xa->flags |= ATA_F_GET_RFIS;
1636 
1637 	ap = atascsi_lookup_port(link);
1638 	xa->data = xs->data;
1639 	xa->datalen = xs->datalen;
1640 	xa->complete = atascsi_atapi_cmd_done;
1641 	xa->timeout = xs->timeout;
1642 	xa->pmp_port = ap->ap_pmp_port;
1643 	xa->atascsi_private = xs;
1644 	if (xs->flags & SCSI_POLL)
1645 		xa->flags |= ATA_F_POLL;
1646 
1647 	fis = xa->fis;
1648 	fis->flags = ATA_H2D_FLAGS_CMD | ap->ap_pmp_port;
1649 	fis->command = ATA_C_PACKET;
1650 	fis->device = 0;
1651 	fis->sector_count = xa->tag << 3;
1652 	fis->features = ATA_H2D_FEATURES_DMA | ((xa->flags & ATA_F_WRITE) ?
1653 	    ATA_H2D_FEATURES_DIR_WRITE : ATA_H2D_FEATURES_DIR_READ);
1654 	fis->lba_mid = 0x00;
1655 	fis->lba_high = 0x20;
1656 
1657 	/* Copy SCSI command into ATAPI packet. */
1658 	memcpy(xa->packetcmd, xs->cmd, xs->cmdlen);
1659 
1660 	ata_exec(as, xa);
1661 }
1662 
1663 void
1664 atascsi_atapi_cmd_done(struct ata_xfer *xa)
1665 {
1666 	struct scsi_xfer	*xs = xa->atascsi_private;
1667 	struct scsi_sense_data  *sd = &xs->sense;
1668 
1669 	switch (xa->state) {
1670 	case ATA_S_COMPLETE:
1671 		xs->error = XS_NOERROR;
1672 		break;
1673 	case ATA_S_ERROR:
1674 		/* Return PACKET sense data */
1675 		sd->error_code = SSD_ERRCODE_CURRENT;
1676 		sd->flags = (xa->rfis.error & 0xf0) >> 4;
1677 		if (xa->rfis.error & 0x04)
1678 			sd->flags = SKEY_ILLEGAL_REQUEST;
1679 		if (xa->rfis.error & 0x02)
1680 			sd->flags |= SSD_EOM;
1681 		if (xa->rfis.error & 0x01)
1682 			sd->flags |= SSD_ILI;
1683 		xs->error = XS_SENSE;
1684 		break;
1685 	case ATA_S_TIMEOUT:
1686 		printf("atascsi_atapi_cmd_done, timeout\n");
1687 		xs->error = XS_TIMEOUT;
1688 		break;
1689 	default:
1690 		panic("atascsi_atapi_cmd_done: unexpected ata_xfer state (%d)",
1691 		    xa->state);
1692 	}
1693 
1694 	xs->resid = xa->resid;
1695 
1696 	scsi_done(xs);
1697 }
1698 
1699 void
1700 atascsi_pmp_cmd(struct scsi_xfer *xs)
1701 {
1702 	switch (xs->cmd->opcode) {
1703 	case REQUEST_SENSE:
1704 		atascsi_pmp_sense(xs);
1705 		return;
1706 	case INQUIRY:
1707 		atascsi_pmp_inq(xs);
1708 		return;
1709 
1710 	case TEST_UNIT_READY:
1711 	case PREVENT_ALLOW:
1712 		atascsi_done(xs, XS_NOERROR);
1713 		return;
1714 
1715 	default:
1716 		atascsi_done(xs, XS_DRIVER_STUFFUP);
1717 		return;
1718 	}
1719 }
1720 
1721 void
1722 atascsi_pmp_sense(struct scsi_xfer *xs)
1723 {
1724 	struct scsi_sense_data *sd = (struct scsi_sense_data *)xs->data;
1725 
1726 	bzero(xs->data, xs->datalen);
1727 	sd->error_code = SSD_ERRCODE_CURRENT;
1728 	sd->flags = SKEY_NO_SENSE;
1729 
1730 	atascsi_done(xs, XS_NOERROR);
1731 }
1732 
1733 void
1734 atascsi_pmp_inq(struct scsi_xfer *xs)
1735 {
1736 	struct scsi_inquiry_data inq;
1737 	struct scsi_inquiry *in_inq = (struct scsi_inquiry *)xs->cmd;
1738 
1739 	if (ISSET(in_inq->flags, SI_EVPD)) {
1740 		/* any evpd pages we need to support here? */
1741 		atascsi_done(xs, XS_DRIVER_STUFFUP);
1742 		return;
1743 	}
1744 
1745 	bzero(&inq, sizeof(inq));
1746 	inq.device = 0x1E;	/* "well known logical unit" seems reasonable */
1747 	inq.version = 0x05;	/* SPC-3? */
1748 	inq.response_format = 2;
1749 	inq.additional_length = 32;
1750 	inq.flags |= SID_CmdQue;
1751 	bcopy("ATA     ", inq.vendor, sizeof(inq.vendor));
1752 
1753 	/* should use the data from atascsi_pmp_identify here?
1754 	 * not sure how useful the chip id is, but maybe it'd be
1755 	 * nice to include the number of ports.
1756 	 */
1757 	bcopy("Port Multiplier", inq.product, sizeof(inq.product));
1758 	bcopy("    ", inq.revision, sizeof(inq.revision));
1759 
1760 	bcopy(&inq, xs->data, MIN(sizeof(inq), xs->datalen));
1761 	atascsi_done(xs, XS_NOERROR);
1762 }
1763 
1764 void
1765 atascsi_done(struct scsi_xfer *xs, int error)
1766 {
1767 	xs->error = error;
1768 	scsi_done(xs);
1769 }
1770 
1771 void
1772 ata_exec(struct atascsi *as, struct ata_xfer *xa)
1773 {
1774 	as->as_methods->ata_cmd(xa);
1775 }
1776 
1777 void *
1778 atascsi_io_get(void *cookie)
1779 {
1780 	struct atascsi_host_port	*ahp = cookie;
1781 	struct atascsi			*as = ahp->ahp_as;
1782 	struct ata_xfer			*xa;
1783 
1784 	xa = as->as_methods->ata_get_xfer(as->as_cookie, ahp->ahp_port);
1785 	if (xa != NULL)
1786 		xa->fis->type = ATA_FIS_TYPE_H2D;
1787 
1788 	return (xa);
1789 }
1790 
1791 void
1792 atascsi_io_put(void *cookie, void *io)
1793 {
1794 	struct atascsi_host_port	*ahp = cookie;
1795 	struct atascsi			*as = ahp->ahp_as;
1796 	struct ata_xfer			*xa = io;
1797 
1798 	xa->state = ATA_S_COMPLETE; /* XXX this state machine is dumb */
1799 	as->as_methods->ata_put_xfer(xa);
1800 }
1801 
1802 void
1803 ata_polled_complete(struct ata_xfer *xa)
1804 {
1805 	/* do nothing */
1806 }
1807 
1808 int
1809 ata_polled(struct ata_xfer *xa)
1810 {
1811 	int			rv;
1812 
1813 	if (!ISSET(xa->flags, ATA_F_DONE))
1814 		panic("ata_polled: xa isnt complete");
1815 
1816 	switch (xa->state) {
1817 	case ATA_S_COMPLETE:
1818 		rv = 0;
1819 		break;
1820 	case ATA_S_ERROR:
1821 	case ATA_S_TIMEOUT:
1822 		rv = EIO;
1823 		break;
1824 	default:
1825 		panic("ata_polled: xa state (%d)",
1826 		    xa->state);
1827 	}
1828 
1829 	scsi_io_put(xa->atascsi_private, xa);
1830 
1831 	return (rv);
1832 }
1833 
1834 void
1835 ata_complete(struct ata_xfer *xa)
1836 {
1837 	SET(xa->flags, ATA_F_DONE);
1838 	xa->complete(xa);
1839 }
1840 
1841 void
1842 ata_swapcopy(void *src, void *dst, size_t len)
1843 {
1844 	u_int16_t *s = src, *d = dst;
1845 	int i;
1846 
1847 	len /= 2;
1848 
1849 	for (i = 0; i < len; i++)
1850 		d[i] = swap16(s[i]);
1851 }
1852 
1853 int
1854 atascsi_port_identify(struct atascsi_port *ap, struct ata_identify *identify)
1855 {
1856 	struct atascsi			*as = ap->ap_as;
1857 	struct atascsi_host_port	*ahp = ap->ap_host_port;
1858 	struct ata_xfer			*xa;
1859 
1860 	xa = scsi_io_get(&ahp->ahp_iopool, SCSI_NOSLEEP);
1861 	if (xa == NULL)
1862 		panic("no free xfers on a new port");
1863 	xa->pmp_port = ap->ap_pmp_port;
1864 	xa->data = identify;
1865 	xa->datalen = sizeof(*identify);
1866 	xa->fis->flags = ATA_H2D_FLAGS_CMD | ap->ap_pmp_port;
1867 	xa->fis->command = (ap->ap_type == ATA_PORT_T_DISK) ?
1868 	    ATA_C_IDENTIFY : ATA_C_IDENTIFY_PACKET;
1869 	xa->fis->device = 0;
1870 	xa->flags = ATA_F_READ | ATA_F_PIO | ATA_F_POLL;
1871 	xa->timeout = 1000;
1872 	xa->complete = ata_polled_complete;
1873 	xa->atascsi_private = &ahp->ahp_iopool;
1874 	ata_exec(as, xa);
1875 	return (ata_polled(xa));
1876 }
1877 
1878 int
1879 atascsi_port_set_features(struct atascsi_port *ap, int subcommand, int arg)
1880 {
1881 	struct atascsi			*as = ap->ap_as;
1882 	struct atascsi_host_port	*ahp = ap->ap_host_port;
1883 	struct ata_xfer			*xa;
1884 
1885 	xa = scsi_io_get(&ahp->ahp_iopool, SCSI_NOSLEEP);
1886 	if (xa == NULL)
1887 		panic("no free xfers on a new port");
1888 	xa->fis->command = ATA_C_SET_FEATURES;
1889 	xa->fis->features = subcommand;
1890 	xa->fis->sector_count = arg;
1891 	xa->fis->flags = ATA_H2D_FLAGS_CMD | ap->ap_pmp_port;
1892 	xa->flags = ATA_F_POLL;
1893 	xa->timeout = 1000;
1894 	xa->complete = ata_polled_complete;
1895 	xa->pmp_port = ap->ap_pmp_port;
1896 	xa->atascsi_private = &ahp->ahp_iopool;
1897 	ata_exec(as, xa);
1898 	return (ata_polled(xa));
1899 }
1900