xref: /openbsd-src/sys/dev/ata/atascsi.c (revision 99fd087599a8791921855f21bd7e36130f39aadc)
1 /*	$OpenBSD: atascsi.c,v 1.133 2020/02/05 16:29:29 krw Exp $ */
2 
3 /*
4  * Copyright (c) 2007 David Gwynne <dlg@openbsd.org>
5  * Copyright (c) 2010 Conformal Systems LLC <info@conformal.com>
6  * Copyright (c) 2010 Jonathan Matthew <jonathan@d14n.org>
7  *
8  * Permission to use, copy, modify, and distribute this software for any
9  * purpose with or without fee is hereby granted, provided that the above
10  * copyright notice and this permission notice appear in all copies.
11  *
12  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19  */
20 
21 #include <sys/param.h>
22 #include <sys/systm.h>
23 #include <sys/buf.h>
24 #include <sys/kernel.h>
25 #include <sys/malloc.h>
26 #include <sys/device.h>
27 #include <sys/queue.h>
28 #include <sys/pool.h>
29 
30 #include <scsi/scsi_all.h>
31 #include <scsi/scsi_disk.h>
32 #include <scsi/scsiconf.h>
33 
34 #include <dev/ata/atascsi.h>
35 #include <dev/ata/pmreg.h>
36 
37 struct atascsi_port;
38 
39 struct atascsi {
40 	struct device		*as_dev;
41 	void			*as_cookie;
42 
43 	struct atascsi_host_port **as_host_ports;
44 
45 	struct atascsi_methods	*as_methods;
46 	struct scsi_adapter	as_switch;
47 	struct scsi_link	as_link;
48 	struct scsibus_softc	*as_scsibus;
49 
50 	int			as_capability;
51 	int			as_ncqdepth;
52 };
53 
54 /*
55  * atascsi_host_port is a port attached to the host controller, and
56  * only holds the details relevant to the host controller.
57  * atascsi_port is any port, including ports on port multipliers, and
58  * it holds details of the device attached to the port.
59  *
60  * When there is a port multiplier attached to a port, the ahp_ports
61  * array in the atascsi_host_port struct contains one atascsi_port for
62  * each port, and one for the control port (port 15).  The index into
63  * the array is the LUN used to address the port.  For the control port,
64  * the LUN is 0, and for the port multiplier ports, the LUN is the
65  * port number plus one.
66  *
67  * When there is no port multiplier attached to a port, the ahp_ports
68  * array contains a single entry for the device.  The LUN and port number
69  * for this entry are both 0.
70  */
71 
72 struct atascsi_host_port {
73 	struct scsi_iopool	ahp_iopool;
74 	struct atascsi		*ahp_as;
75 	int			ahp_port;
76 	int			ahp_nports;
77 
78 	struct atascsi_port	**ahp_ports;
79 };
80 
81 struct atascsi_port {
82 	struct ata_identify	ap_identify;
83 	struct atascsi_host_port *ap_host_port;
84 	struct atascsi		*ap_as;
85 	int			ap_pmp_port;
86 	int			ap_type;
87 	int			ap_ncqdepth;
88 	int			ap_features;
89 #define ATA_PORT_F_NCQ			0x1
90 #define ATA_PORT_F_TRIM			0x2
91 };
92 
93 void		atascsi_cmd(struct scsi_xfer *);
94 int		atascsi_probe(struct scsi_link *);
95 void		atascsi_free(struct scsi_link *);
96 
97 /* template */
98 struct scsi_adapter atascsi_switch = {
99 	atascsi_cmd, NULL, atascsi_probe, atascsi_free, NULL
100 };
101 
102 void		ata_swapcopy(void *, void *, size_t);
103 
104 void		atascsi_disk_cmd(struct scsi_xfer *);
105 void		atascsi_disk_cmd_done(struct ata_xfer *);
106 void		atascsi_disk_inq(struct scsi_xfer *);
107 void		atascsi_disk_inquiry(struct scsi_xfer *);
108 void		atascsi_disk_vpd_supported(struct scsi_xfer *);
109 void		atascsi_disk_vpd_serial(struct scsi_xfer *);
110 void		atascsi_disk_vpd_ident(struct scsi_xfer *);
111 void		atascsi_disk_vpd_ata(struct scsi_xfer *);
112 void		atascsi_disk_vpd_limits(struct scsi_xfer *);
113 void		atascsi_disk_vpd_info(struct scsi_xfer *);
114 void		atascsi_disk_vpd_thin(struct scsi_xfer *);
115 void		atascsi_disk_write_same_16(struct scsi_xfer *);
116 void		atascsi_disk_write_same_16_done(struct ata_xfer *);
117 void		atascsi_disk_unmap(struct scsi_xfer *);
118 void		atascsi_disk_unmap_task(void *);
119 void		atascsi_disk_unmap_done(struct ata_xfer *);
120 void		atascsi_disk_capacity(struct scsi_xfer *);
121 void		atascsi_disk_capacity16(struct scsi_xfer *);
122 void		atascsi_disk_sync(struct scsi_xfer *);
123 void		atascsi_disk_sync_done(struct ata_xfer *);
124 void		atascsi_disk_sense(struct scsi_xfer *);
125 void		atascsi_disk_start_stop(struct scsi_xfer *);
126 void		atascsi_disk_start_stop_done(struct ata_xfer *);
127 
128 void		atascsi_atapi_cmd(struct scsi_xfer *);
129 void		atascsi_atapi_cmd_done(struct ata_xfer *);
130 
131 void		atascsi_pmp_cmd(struct scsi_xfer *);
132 void		atascsi_pmp_cmd_done(struct ata_xfer *);
133 void		atascsi_pmp_sense(struct scsi_xfer *xs);
134 void		atascsi_pmp_inq(struct scsi_xfer *xs);
135 
136 
137 void		atascsi_passthru_12(struct scsi_xfer *);
138 void		atascsi_passthru_16(struct scsi_xfer *);
139 int		atascsi_passthru_map(struct scsi_xfer *, u_int8_t, u_int8_t);
140 void		atascsi_passthru_done(struct ata_xfer *);
141 
142 void		atascsi_done(struct scsi_xfer *, int);
143 
144 void		ata_exec(struct atascsi *, struct ata_xfer *);
145 
146 void		ata_polled_complete(struct ata_xfer *);
147 int		ata_polled(struct ata_xfer *);
148 
149 u_int64_t	ata_identify_blocks(struct ata_identify *);
150 u_int		ata_identify_blocksize(struct ata_identify *);
151 u_int		ata_identify_block_l2p_exp(struct ata_identify *);
152 u_int		ata_identify_block_logical_align(struct ata_identify *);
153 
154 void		*atascsi_io_get(void *);
155 void		atascsi_io_put(void *, void *);
156 struct atascsi_port * atascsi_lookup_port(struct scsi_link *);
157 
158 int		atascsi_port_identify(struct atascsi_port *,
159 		    struct ata_identify *);
160 int		atascsi_port_set_features(struct atascsi_port *, int, int);
161 
162 
163 struct atascsi *
164 atascsi_attach(struct device *self, struct atascsi_attach_args *aaa)
165 {
166 	struct scsibus_attach_args	saa;
167 	struct atascsi			*as;
168 
169 	as = malloc(sizeof(*as), M_DEVBUF, M_WAITOK | M_ZERO);
170 
171 	as->as_dev = self;
172 	as->as_cookie = aaa->aaa_cookie;
173 	as->as_methods = aaa->aaa_methods;
174 	as->as_capability = aaa->aaa_capability;
175 	as->as_ncqdepth = aaa->aaa_ncmds;
176 
177 	/* copy from template and modify for ourselves */
178 	as->as_switch = atascsi_switch;
179 	if (aaa->aaa_minphys != NULL)
180 		as->as_switch.dev_minphys = aaa->aaa_minphys;
181 
182 	/* fill in our scsi_link */
183 	as->as_link.adapter = &as->as_switch;
184 	as->as_link.adapter_softc = as;
185 	as->as_link.adapter_buswidth = aaa->aaa_nports;
186 	as->as_link.luns = SATA_PMP_MAX_PORTS;
187 	as->as_link.adapter_target = aaa->aaa_nports;
188 	as->as_link.openings = 1;
189 
190 	as->as_host_ports = mallocarray(aaa->aaa_nports,
191 	    sizeof(struct atascsi_host_port *),	M_DEVBUF, M_WAITOK | M_ZERO);
192 
193 	bzero(&saa, sizeof(saa));
194 	saa.saa_sc_link = &as->as_link;
195 
196 	/* stash the scsibus so we can do hotplug on it */
197 	as->as_scsibus = (struct scsibus_softc *)config_found(self, &saa,
198 	    scsiprint);
199 
200 	return (as);
201 }
202 
203 int
204 atascsi_detach(struct atascsi *as, int flags)
205 {
206 	int				rv;
207 
208 	rv = config_detach((struct device *)as->as_scsibus, flags);
209 	if (rv != 0)
210 		return (rv);
211 
212 	free(as->as_host_ports, M_DEVBUF, 0);
213 	free(as, M_DEVBUF, sizeof(*as));
214 
215 	return (0);
216 }
217 
218 struct atascsi_port *
219 atascsi_lookup_port(struct scsi_link *link)
220 {
221 	struct atascsi 			*as = link->adapter_softc;
222 	struct atascsi_host_port 	*ahp;
223 
224 	if (link->target >= as->as_link.adapter_buswidth)
225 		return (NULL);
226 
227 	ahp = as->as_host_ports[link->target];
228 	if (link->lun >= ahp->ahp_nports)
229 		return (NULL);
230 
231 	return (ahp->ahp_ports[link->lun]);
232 }
233 
234 int
235 atascsi_probe(struct scsi_link *link)
236 {
237 	struct atascsi			*as = link->adapter_softc;
238 	struct atascsi_host_port 	*ahp;
239 	struct atascsi_port		*ap;
240 	struct ata_xfer			*xa;
241 	struct ata_identify		*identify;
242 	int				port, type, qdepth;
243 	int				rv;
244 	u_int16_t			cmdset;
245 	u_int16_t			validinfo, ultradma;
246 	int				i, xfermode = -1;
247 
248 	port = link->target;
249 	if (port >= as->as_link.adapter_buswidth)
250 		return (ENXIO);
251 
252 	/* if this is a PMP port, check it's valid */
253 	if (link->lun > 0) {
254 		if (link->lun >= as->as_host_ports[port]->ahp_nports)
255 			return (ENXIO);
256 	}
257 
258 	type = as->as_methods->ata_probe(as->as_cookie, port, link->lun);
259 	switch (type) {
260 	case ATA_PORT_T_DISK:
261 		break;
262 	case ATA_PORT_T_ATAPI:
263 		link->flags |= SDEV_ATAPI;
264 		link->quirks |= SDEV_ONLYBIG;
265 		break;
266 	case ATA_PORT_T_PM:
267 		if (link->lun != 0) {
268 			printf("%s.%d.%d: Port multipliers cannot be nested\n",
269 			    as->as_dev->dv_xname, port, link->lun);
270 			rv = ENODEV;
271 			goto unsupported;
272 		}
273 		break;
274 	default:
275 		rv = ENODEV;
276 		goto unsupported;
277 	}
278 
279 	ap = malloc(sizeof(*ap), M_DEVBUF, M_WAITOK | M_ZERO);
280 	ap->ap_as = as;
281 
282 	if (link->lun == 0) {
283 		ahp = malloc(sizeof(*ahp), M_DEVBUF, M_WAITOK | M_ZERO);
284 		ahp->ahp_as = as;
285 		ahp->ahp_port = port;
286 
287 		scsi_iopool_init(&ahp->ahp_iopool, ahp, atascsi_io_get,
288 		    atascsi_io_put);
289 
290 		as->as_host_ports[port] = ahp;
291 
292 		if (type == ATA_PORT_T_PM) {
293 			ahp->ahp_nports = SATA_PMP_MAX_PORTS;
294 			ap->ap_pmp_port = SATA_PMP_CONTROL_PORT;
295 		} else {
296 			ahp->ahp_nports = 1;
297 			ap->ap_pmp_port = 0;
298 		}
299 		ahp->ahp_ports = mallocarray(ahp->ahp_nports,
300 		    sizeof(struct atascsi_port *), M_DEVBUF, M_WAITOK | M_ZERO);
301 	} else {
302 		ahp = as->as_host_ports[port];
303 		ap->ap_pmp_port = link->lun - 1;
304 	}
305 
306 	ap->ap_host_port = ahp;
307 	ap->ap_type = type;
308 
309 	link->pool = &ahp->ahp_iopool;
310 
311 	/* fetch the device info, except for port multipliers */
312 	if (type != ATA_PORT_T_PM) {
313 
314 		/* devices attached to port multipliers tend not to be
315 		 * spun up at this point, and sometimes this prevents
316 		 * identification from working, so we retry a few times
317 		 * with a fairly long delay.
318 		 */
319 
320 		identify = dma_alloc(sizeof(*identify), PR_WAITOK | PR_ZERO);
321 
322 		int count = (link->lun > 0) ? 6 : 2;
323 		while (count--) {
324 			rv = atascsi_port_identify(ap, identify);
325 			if (rv == 0) {
326 				ap->ap_identify = *identify;
327 				break;
328 			}
329 			if (count > 0)
330 				delay(5000000);
331 		}
332 
333 		dma_free(identify, sizeof(*identify));
334 
335 		if (rv != 0) {
336 			goto error;
337 		}
338 	}
339 
340 	ahp->ahp_ports[link->lun] = ap;
341 
342 	if (type != ATA_PORT_T_DISK)
343 		return (0);
344 
345 	/*
346 	 * Early SATA drives (as well as PATA drives) need to have
347 	 * their transfer mode set properly, otherwise commands that
348 	 * use DMA will time out.
349 	 */
350 	validinfo = letoh16(ap->ap_identify.validinfo);
351 	if (ISSET(validinfo, ATA_ID_VALIDINFO_ULTRADMA)) {
352 		ultradma = letoh16(ap->ap_identify.ultradma);
353 		for (i = 7; i >= 0; i--) {
354 			if (ultradma & (1 << i)) {
355 				xfermode = ATA_SF_XFERMODE_UDMA | i;
356 				break;
357 			}
358 		}
359 	}
360 	if (xfermode != -1)
361 		(void)atascsi_port_set_features(ap, ATA_SF_XFERMODE, xfermode);
362 
363 	if (as->as_capability & ASAA_CAP_NCQ &&
364 	    ISSET(letoh16(ap->ap_identify.satacap), ATA_SATACAP_NCQ) &&
365 	    (link->lun == 0 || as->as_capability & ASAA_CAP_PMP_NCQ)) {
366 		ap->ap_ncqdepth = ATA_QDEPTH(letoh16(ap->ap_identify.qdepth));
367 		qdepth = MIN(ap->ap_ncqdepth, as->as_ncqdepth);
368 		if (ISSET(as->as_capability, ASAA_CAP_NEEDS_RESERVED))
369 			qdepth--;
370 
371 		if (qdepth > 1) {
372 			SET(ap->ap_features, ATA_PORT_F_NCQ);
373 
374 			/* Raise the number of openings */
375 			link->openings = qdepth;
376 
377 			/*
378 			 * XXX for directly attached devices, throw away any xfers
379 			 * that have tag numbers higher than what the device supports.
380 			 */
381 			if (link->lun == 0) {
382 				while (qdepth--) {
383 					xa = scsi_io_get(&ahp->ahp_iopool, SCSI_NOSLEEP);
384 					if (xa->tag < link->openings) {
385 						xa->state = ATA_S_COMPLETE;
386 						scsi_io_put(&ahp->ahp_iopool, xa);
387 					}
388 				}
389 			}
390 		}
391 	}
392 
393 	if (ISSET(letoh16(ap->ap_identify.data_set_mgmt),
394 	    ATA_ID_DATA_SET_MGMT_TRIM))
395 		SET(ap->ap_features, ATA_PORT_F_TRIM);
396 
397 	cmdset = letoh16(ap->ap_identify.cmdset82);
398 
399 	/* Enable write cache if supported */
400 	if (ISSET(cmdset, ATA_IDENTIFY_WRITECACHE)) {
401 		/* We don't care if it fails. */
402 		(void)atascsi_port_set_features(ap, ATA_SF_WRITECACHE_EN, 0);
403 	}
404 
405 	/* Enable read lookahead if supported */
406 	if (ISSET(cmdset, ATA_IDENTIFY_LOOKAHEAD)) {
407 		/* We don't care if it fails. */
408 		(void)atascsi_port_set_features(ap, ATA_SF_LOOKAHEAD_EN, 0);
409 	}
410 
411 	/*
412 	 * FREEZE LOCK the device so malicous users can't lock it on us.
413 	 * As there is no harm in issuing this to devices that don't
414 	 * support the security feature set we just send it, and don't bother
415 	 * checking if the device sends a command abort to tell us it doesn't
416 	 * support it
417 	 */
418 	xa = scsi_io_get(&ahp->ahp_iopool, SCSI_NOSLEEP);
419 	if (xa == NULL)
420 		panic("no free xfers on a new port");
421 	xa->fis->command = ATA_C_SEC_FREEZE_LOCK;
422 	xa->fis->flags = ATA_H2D_FLAGS_CMD | ap->ap_pmp_port;
423 	xa->flags = ATA_F_POLL;
424 	xa->timeout = 1000;
425 	xa->complete = ata_polled_complete;
426 	xa->pmp_port = ap->ap_pmp_port;
427 	xa->atascsi_private = &ahp->ahp_iopool;
428 	ata_exec(as, xa);
429 	ata_polled(xa); /* we dont care if it doesnt work */
430 
431 	return (0);
432 error:
433 	free(ap, M_DEVBUF, sizeof(*ap));
434 unsupported:
435 
436 	as->as_methods->ata_free(as->as_cookie, port, link->lun);
437 	return (rv);
438 }
439 
440 void
441 atascsi_free(struct scsi_link *link)
442 {
443 	struct atascsi			*as = link->adapter_softc;
444 	struct atascsi_host_port	*ahp;
445 	struct atascsi_port		*ap;
446 	int				port;
447 
448 	port = link->target;
449 	if (port >= as->as_link.adapter_buswidth)
450 		return;
451 
452 	ahp = as->as_host_ports[port];
453 	if (ahp == NULL)
454 		return;
455 
456 	if (link->lun >= ahp->ahp_nports)
457 		return;
458 
459 	ap = ahp->ahp_ports[link->lun];
460 	free(ap, M_DEVBUF, sizeof(*ap));
461 	ahp->ahp_ports[link->lun] = NULL;
462 
463 	as->as_methods->ata_free(as->as_cookie, port, link->lun);
464 
465 	if (link->lun == ahp->ahp_nports - 1) {
466 		/* we've already freed all of ahp->ahp_ports, now
467 		 * free ahp itself.  this relies on the order luns are
468 		 * detached in scsi_detach_target().
469 		 */
470 		free(ahp, M_DEVBUF, sizeof(*ahp));
471 		as->as_host_ports[port] = NULL;
472 	}
473 }
474 
475 void
476 atascsi_cmd(struct scsi_xfer *xs)
477 {
478 	struct scsi_link	*link = xs->sc_link;
479 	struct atascsi_port	*ap;
480 
481 	ap = atascsi_lookup_port(link);
482 	if (ap == NULL) {
483 		atascsi_done(xs, XS_DRIVER_STUFFUP);
484 		return;
485 	}
486 
487 	switch (ap->ap_type) {
488 	case ATA_PORT_T_DISK:
489 		atascsi_disk_cmd(xs);
490 		break;
491 	case ATA_PORT_T_ATAPI:
492 		atascsi_atapi_cmd(xs);
493 		break;
494 	case ATA_PORT_T_PM:
495 		atascsi_pmp_cmd(xs);
496 		break;
497 
498 	case ATA_PORT_T_NONE:
499 	default:
500 		atascsi_done(xs, XS_DRIVER_STUFFUP);
501 		break;
502 	}
503 }
504 
505 void
506 atascsi_disk_cmd(struct scsi_xfer *xs)
507 {
508 	struct scsi_link	*link = xs->sc_link;
509 	struct atascsi		*as = link->adapter_softc;
510 	struct atascsi_port	*ap;
511 	struct ata_xfer		*xa = xs->io;
512 	int			flags = 0;
513 	struct ata_fis_h2d	*fis;
514 	u_int64_t		lba;
515 	u_int32_t		sector_count;
516 
517 	ap = atascsi_lookup_port(link);
518 
519 	switch (xs->cmd->opcode) {
520 	case READ_COMMAND:
521 	case READ_BIG:
522 	case READ_12:
523 	case READ_16:
524 		flags = ATA_F_READ;
525 		break;
526 	case WRITE_COMMAND:
527 	case WRITE_BIG:
528 	case WRITE_12:
529 	case WRITE_16:
530 		flags = ATA_F_WRITE;
531 		/* deal with io outside the switch */
532 		break;
533 
534 	case WRITE_SAME_16:
535 		atascsi_disk_write_same_16(xs);
536 		return;
537 	case UNMAP:
538 		atascsi_disk_unmap(xs);
539 		return;
540 
541 	case SYNCHRONIZE_CACHE:
542 		atascsi_disk_sync(xs);
543 		return;
544 	case REQUEST_SENSE:
545 		atascsi_disk_sense(xs);
546 		return;
547 	case INQUIRY:
548 		atascsi_disk_inq(xs);
549 		return;
550 	case READ_CAPACITY:
551 		atascsi_disk_capacity(xs);
552 		return;
553 	case READ_CAPACITY_16:
554 		atascsi_disk_capacity16(xs);
555 		return;
556 
557 	case ATA_PASSTHRU_12:
558 		atascsi_passthru_12(xs);
559 		return;
560 	case ATA_PASSTHRU_16:
561 		atascsi_passthru_16(xs);
562 		return;
563 
564 	case START_STOP:
565 		atascsi_disk_start_stop(xs);
566 		return;
567 
568 	case TEST_UNIT_READY:
569 	case PREVENT_ALLOW:
570 		atascsi_done(xs, XS_NOERROR);
571 		return;
572 
573 	default:
574 		atascsi_done(xs, XS_DRIVER_STUFFUP);
575 		return;
576 	}
577 
578 	xa->flags = flags;
579 	scsi_cmd_rw_decode(xs->cmd, &lba, &sector_count);
580 	if ((lba >> 48) != 0 || (sector_count >> 16) != 0) {
581 		atascsi_done(xs, XS_DRIVER_STUFFUP);
582 		return;
583 	}
584 
585 	fis = xa->fis;
586 
587 	fis->flags = ATA_H2D_FLAGS_CMD | ap->ap_pmp_port;
588 	fis->lba_low = lba & 0xff;
589 	fis->lba_mid = (lba >> 8) & 0xff;
590 	fis->lba_high = (lba >> 16) & 0xff;
591 
592 	if (ISSET(ap->ap_features, ATA_PORT_F_NCQ) &&
593 	    (xa->tag < ap->ap_ncqdepth) &&
594 	    !(xs->flags & SCSI_POLL)) {
595 		/* Use NCQ */
596 		xa->flags |= ATA_F_NCQ;
597 		fis->command = (xa->flags & ATA_F_WRITE) ?
598 		    ATA_C_WRITE_FPDMA : ATA_C_READ_FPDMA;
599 		fis->device = ATA_H2D_DEVICE_LBA;
600 		fis->lba_low_exp = (lba >> 24) & 0xff;
601 		fis->lba_mid_exp = (lba >> 32) & 0xff;
602 		fis->lba_high_exp = (lba >> 40) & 0xff;
603 		fis->sector_count = xa->tag << 3;
604 		fis->features = sector_count & 0xff;
605 		fis->features_exp = (sector_count >> 8) & 0xff;
606 	} else if (sector_count > 0x100 || lba > 0xfffffff) {
607 		/* Use LBA48 */
608 		fis->command = (xa->flags & ATA_F_WRITE) ?
609 		    ATA_C_WRITEDMA_EXT : ATA_C_READDMA_EXT;
610 		fis->device = ATA_H2D_DEVICE_LBA;
611 		fis->lba_low_exp = (lba >> 24) & 0xff;
612 		fis->lba_mid_exp = (lba >> 32) & 0xff;
613 		fis->lba_high_exp = (lba >> 40) & 0xff;
614 		fis->sector_count = sector_count & 0xff;
615 		fis->sector_count_exp = (sector_count >> 8) & 0xff;
616 	} else {
617 		/* Use LBA */
618 		fis->command = (xa->flags & ATA_F_WRITE) ?
619 		    ATA_C_WRITEDMA : ATA_C_READDMA;
620 		fis->device = ATA_H2D_DEVICE_LBA | ((lba >> 24) & 0x0f);
621 		fis->sector_count = sector_count & 0xff;
622 	}
623 
624 	xa->data = xs->data;
625 	xa->datalen = xs->datalen;
626 	xa->complete = atascsi_disk_cmd_done;
627 	xa->timeout = xs->timeout;
628 	xa->pmp_port = ap->ap_pmp_port;
629 	xa->atascsi_private = xs;
630 	if (xs->flags & SCSI_POLL)
631 		xa->flags |= ATA_F_POLL;
632 
633 	ata_exec(as, xa);
634 }
635 
636 void
637 atascsi_disk_cmd_done(struct ata_xfer *xa)
638 {
639 	struct scsi_xfer	*xs = xa->atascsi_private;
640 
641 	switch (xa->state) {
642 	case ATA_S_COMPLETE:
643 		xs->error = XS_NOERROR;
644 		break;
645 	case ATA_S_ERROR:
646 		/* fake sense? */
647 		xs->error = XS_DRIVER_STUFFUP;
648 		break;
649 	case ATA_S_TIMEOUT:
650 		xs->error = XS_TIMEOUT;
651 		break;
652 	default:
653 		panic("atascsi_disk_cmd_done: unexpected ata_xfer state (%d)",
654 		    xa->state);
655 	}
656 
657 	xs->resid = xa->resid;
658 
659 	scsi_done(xs);
660 }
661 
662 void
663 atascsi_disk_inq(struct scsi_xfer *xs)
664 {
665 	struct scsi_inquiry	*inq = (struct scsi_inquiry *)xs->cmd;
666 
667 	if (xs->cmdlen != sizeof(*inq)) {
668 		atascsi_done(xs, XS_DRIVER_STUFFUP);
669 		return;
670 	}
671 
672 	if (ISSET(inq->flags, SI_EVPD)) {
673 		switch (inq->pagecode) {
674 		case SI_PG_SUPPORTED:
675 			atascsi_disk_vpd_supported(xs);
676 			break;
677 		case SI_PG_SERIAL:
678 			atascsi_disk_vpd_serial(xs);
679 			break;
680 		case SI_PG_DEVID:
681 			atascsi_disk_vpd_ident(xs);
682 			break;
683 		case SI_PG_ATA:
684 			atascsi_disk_vpd_ata(xs);
685 			break;
686 		case SI_PG_DISK_LIMITS:
687 			atascsi_disk_vpd_limits(xs);
688 			break;
689 		case SI_PG_DISK_INFO:
690 			atascsi_disk_vpd_info(xs);
691 			break;
692 		case SI_PG_DISK_THIN:
693 			atascsi_disk_vpd_thin(xs);
694 			break;
695 		default:
696 			atascsi_done(xs, XS_DRIVER_STUFFUP);
697 			break;
698 		}
699 	} else
700 		atascsi_disk_inquiry(xs);
701 }
702 
703 void
704 atascsi_disk_inquiry(struct scsi_xfer *xs)
705 {
706 	struct scsi_inquiry_data inq;
707 	struct scsi_link        *link = xs->sc_link;
708 	struct atascsi_port	*ap;
709 
710 	ap = atascsi_lookup_port(link);
711 
712 	bzero(&inq, sizeof(inq));
713 
714 	inq.device = T_DIRECT;
715 	inq.version = 0x05; /* SPC-3 */
716 	inq.response_format = 2;
717 	inq.additional_length = 32;
718 	inq.flags |= SID_CmdQue;
719 	bcopy("ATA     ", inq.vendor, sizeof(inq.vendor));
720 	ata_swapcopy(ap->ap_identify.model, inq.product,
721 	    sizeof(inq.product));
722 	ata_swapcopy(ap->ap_identify.firmware, inq.revision,
723 	    sizeof(inq.revision));
724 
725 	bcopy(&inq, xs->data, MIN(sizeof(inq), xs->datalen));
726 
727 	atascsi_done(xs, XS_NOERROR);
728 }
729 
730 void
731 atascsi_disk_vpd_supported(struct scsi_xfer *xs)
732 {
733 	struct {
734 		struct scsi_vpd_hdr	hdr;
735 		u_int8_t		list[7];
736 	}			pg;
737 	struct scsi_link        *link = xs->sc_link;
738 	struct atascsi_port	*ap;
739 	int			fat;
740 
741 	ap = atascsi_lookup_port(link);
742 	fat = ISSET(ap->ap_features, ATA_PORT_F_TRIM) ? 0 : 1;
743 
744 	bzero(&pg, sizeof(pg));
745 
746 	pg.hdr.device = T_DIRECT;
747 	pg.hdr.page_code = SI_PG_SUPPORTED;
748 	_lto2b(sizeof(pg.list) - fat, pg.hdr.page_length);
749 	pg.list[0] = SI_PG_SUPPORTED;
750 	pg.list[1] = SI_PG_SERIAL;
751 	pg.list[2] = SI_PG_DEVID;
752 	pg.list[3] = SI_PG_ATA;
753 	pg.list[4] = SI_PG_DISK_LIMITS;
754 	pg.list[5] = SI_PG_DISK_INFO;
755 	pg.list[6] = SI_PG_DISK_THIN; /* "trimmed" if fat. get it? tehe. */
756 
757 	bcopy(&pg, xs->data, MIN(sizeof(pg) - fat, xs->datalen));
758 
759 	atascsi_done(xs, XS_NOERROR);
760 }
761 
762 void
763 atascsi_disk_vpd_serial(struct scsi_xfer *xs)
764 {
765 	struct scsi_link        *link = xs->sc_link;
766 	struct atascsi_port	*ap;
767 	struct scsi_vpd_serial	pg;
768 
769 	ap = atascsi_lookup_port(link);
770 	bzero(&pg, sizeof(pg));
771 
772 	pg.hdr.device = T_DIRECT;
773 	pg.hdr.page_code = SI_PG_SERIAL;
774 	_lto2b(sizeof(ap->ap_identify.serial), pg.hdr.page_length);
775 	ata_swapcopy(ap->ap_identify.serial, pg.serial,
776 	    sizeof(ap->ap_identify.serial));
777 
778 	bcopy(&pg, xs->data, MIN(sizeof(pg), xs->datalen));
779 
780 	atascsi_done(xs, XS_NOERROR);
781 }
782 
783 void
784 atascsi_disk_vpd_ident(struct scsi_xfer *xs)
785 {
786 	struct scsi_link        *link = xs->sc_link;
787 	struct atascsi_port	*ap;
788 	struct {
789 		struct scsi_vpd_hdr	hdr;
790 		struct scsi_vpd_devid_hdr devid_hdr;
791 		u_int8_t		devid[68];
792 	}			pg;
793 	u_int8_t		*p;
794 	size_t			pg_len;
795 
796 	ap = atascsi_lookup_port(link);
797 	bzero(&pg, sizeof(pg));
798 	if (letoh16(ap->ap_identify.features87) & ATA_ID_F87_WWN) {
799 		pg_len = 8;
800 
801 		pg.devid_hdr.pi_code = VPD_DEVID_CODE_BINARY;
802 		pg.devid_hdr.flags = VPD_DEVID_ASSOC_LU | VPD_DEVID_TYPE_NAA;
803 
804 		ata_swapcopy(&ap->ap_identify.naa_ieee_oui, pg.devid, pg_len);
805 	} else {
806 		pg_len = 68;
807 
808 		pg.devid_hdr.pi_code = VPD_DEVID_CODE_ASCII;
809 		pg.devid_hdr.flags = VPD_DEVID_ASSOC_LU | VPD_DEVID_TYPE_T10;
810 
811 		p = pg.devid;
812 		bcopy("ATA     ", p, 8);
813 		p += 8;
814 		ata_swapcopy(ap->ap_identify.model, p,
815 		    sizeof(ap->ap_identify.model));
816 		p += sizeof(ap->ap_identify.model);
817 		ata_swapcopy(ap->ap_identify.serial, p,
818 		    sizeof(ap->ap_identify.serial));
819 	}
820 
821 	pg.devid_hdr.len = pg_len;
822 	pg_len += sizeof(pg.devid_hdr);
823 
824 	pg.hdr.device = T_DIRECT;
825 	pg.hdr.page_code = SI_PG_DEVID;
826 	_lto2b(pg_len, pg.hdr.page_length);
827 	pg_len += sizeof(pg.hdr);
828 
829 	bcopy(&pg, xs->data, MIN(pg_len, xs->datalen));
830 
831 	atascsi_done(xs, XS_NOERROR);
832 }
833 
834 void
835 atascsi_disk_vpd_ata(struct scsi_xfer *xs)
836 {
837 	struct scsi_link        *link = xs->sc_link;
838 	struct atascsi_port	*ap;
839 	struct scsi_vpd_ata	pg;
840 
841 	ap = atascsi_lookup_port(link);
842 	bzero(&pg, sizeof(pg));
843 
844 	pg.hdr.device = T_DIRECT;
845 	pg.hdr.page_code = SI_PG_ATA;
846 	_lto2b(sizeof(pg) - sizeof(pg.hdr), pg.hdr.page_length);
847 
848 	memset(pg.sat_vendor, ' ', sizeof(pg.sat_vendor));
849 	memcpy(pg.sat_vendor, "OpenBSD",
850 	    MIN(strlen("OpenBSD"), sizeof(pg.sat_vendor)));
851 	memset(pg.sat_product, ' ', sizeof(pg.sat_product));
852 	memcpy(pg.sat_product, "atascsi",
853 	    MIN(strlen("atascsi"), sizeof(pg.sat_product)));
854 	memset(pg.sat_revision, ' ', sizeof(pg.sat_revision));
855 	memcpy(pg.sat_revision, osrelease,
856 	    MIN(strlen(osrelease), sizeof(pg.sat_revision)));
857 
858 	/* XXX device signature */
859 
860 	switch (ap->ap_type) {
861 	case ATA_PORT_T_DISK:
862 		pg.command_code = VPD_ATA_COMMAND_CODE_ATA;
863 		break;
864 	case ATA_PORT_T_ATAPI:
865 		pg.command_code = VPD_ATA_COMMAND_CODE_ATAPI;
866 		break;
867 	}
868 
869 	memcpy(pg.identify, &ap->ap_identify, sizeof(pg.identify));
870 
871 	bcopy(&pg, xs->data, MIN(sizeof(pg), xs->datalen));
872 
873 	atascsi_done(xs, XS_NOERROR);
874 }
875 
876 void
877 atascsi_disk_vpd_limits(struct scsi_xfer *xs)
878 {
879 	struct scsi_link        *link = xs->sc_link;
880 	struct atascsi_port	*ap;
881 	struct scsi_vpd_disk_limits pg;
882 
883 	ap = atascsi_lookup_port(link);
884 	bzero(&pg, sizeof(pg));
885 	pg.hdr.device = T_DIRECT;
886 	pg.hdr.page_code = SI_PG_DISK_LIMITS;
887 	_lto2b(SI_PG_DISK_LIMITS_LEN_THIN, pg.hdr.page_length);
888 
889 	_lto2b(1 << ata_identify_block_l2p_exp(&ap->ap_identify),
890 	    pg.optimal_xfer_granularity);
891 
892 	if (ISSET(ap->ap_features, ATA_PORT_F_TRIM)) {
893 		/*
894 		 * ATA only supports 65535 blocks per TRIM descriptor, so
895 		 * avoid having to split UNMAP descriptors and overflow the page
896 		 * limit by using that as a max.
897 		 */
898 		_lto4b(ATA_DSM_TRIM_MAX_LEN, pg.max_unmap_lba_count);
899 		_lto4b(512 / 8, pg.max_unmap_desc_count);
900         }
901 
902 	bcopy(&pg, xs->data, MIN(sizeof(pg), xs->datalen));
903 
904 	atascsi_done(xs, XS_NOERROR);
905 }
906 
907 void
908 atascsi_disk_vpd_info(struct scsi_xfer *xs)
909 {
910 	struct scsi_link        *link = xs->sc_link;
911 	struct atascsi_port	*ap;
912 	struct scsi_vpd_disk_info pg;
913 
914 	ap = atascsi_lookup_port(link);
915 	bzero(&pg, sizeof(pg));
916 	pg.hdr.device = T_DIRECT;
917 	pg.hdr.page_code = SI_PG_DISK_INFO;
918 	_lto2b(sizeof(pg) - sizeof(pg.hdr), pg.hdr.page_length);
919 
920 	_lto2b(letoh16(ap->ap_identify.rpm), pg.rpm);
921 	pg.form_factor = letoh16(ap->ap_identify.form) & ATA_ID_FORM_MASK;
922 
923 	bcopy(&pg, xs->data, MIN(sizeof(pg), xs->datalen));
924 
925 	atascsi_done(xs, XS_NOERROR);
926 }
927 
928 void
929 atascsi_disk_vpd_thin(struct scsi_xfer *xs)
930 {
931 	struct scsi_link        *link = xs->sc_link;
932 	struct atascsi_port	*ap;
933 	struct scsi_vpd_disk_thin pg;
934 
935 	ap = atascsi_lookup_port(link);
936 	if (!ISSET(ap->ap_features, ATA_PORT_F_TRIM)) {
937 		atascsi_done(xs, XS_DRIVER_STUFFUP);
938 		return;
939 	}
940 
941 	bzero(&pg, sizeof(pg));
942 	pg.hdr.device = T_DIRECT;
943 	pg.hdr.page_code = SI_PG_DISK_THIN;
944 	_lto2b(sizeof(pg) - sizeof(pg.hdr), pg.hdr.page_length);
945 
946 	pg.flags = VPD_DISK_THIN_TPU | VPD_DISK_THIN_TPWS;
947 
948 	bcopy(&pg, xs->data, MIN(sizeof(pg), xs->datalen));
949 
950 	atascsi_done(xs, XS_NOERROR);
951 }
952 
953 void
954 atascsi_disk_write_same_16(struct scsi_xfer *xs)
955 {
956 	struct scsi_link	*link = xs->sc_link;
957 	struct atascsi		*as = link->adapter_softc;
958 	struct atascsi_port	*ap;
959 	struct scsi_write_same_16 *cdb;
960 	struct ata_xfer		*xa = xs->io;
961 	struct ata_fis_h2d	*fis;
962 	u_int64_t		lba;
963 	u_int32_t		length;
964 	u_int64_t		desc;
965 
966 	if (xs->cmdlen != sizeof(*cdb)) {
967 		atascsi_done(xs, XS_DRIVER_STUFFUP);
968 		return;
969 	}
970 
971 	ap = atascsi_lookup_port(link);
972 	cdb = (struct scsi_write_same_16 *)xs->cmd;
973 
974 	if (!ISSET(cdb->flags, WRITE_SAME_F_UNMAP) ||
975 	   !ISSET(ap->ap_features, ATA_PORT_F_TRIM)) {
976 		/* generate sense data */
977 		atascsi_done(xs, XS_DRIVER_STUFFUP);
978 		return;
979 	}
980 
981 	if (xs->datalen < 512) {
982 		/* generate sense data */
983 		atascsi_done(xs, XS_DRIVER_STUFFUP);
984 		return;
985 	}
986 
987 	lba = _8btol(cdb->lba);
988 	length = _4btol(cdb->length);
989 
990 	if (length > ATA_DSM_TRIM_MAX_LEN) {
991 		/* XXX we dont support requests over 65535 blocks */
992 		atascsi_done(xs, XS_DRIVER_STUFFUP);
993 		return;
994 	}
995 
996 	xa->data = xs->data;
997 	xa->datalen = 512;
998 	xa->flags = ATA_F_WRITE;
999 	xa->pmp_port = ap->ap_pmp_port;
1000 	if (xs->flags & SCSI_POLL)
1001 		xa->flags |= ATA_F_POLL;
1002 	xa->complete = atascsi_disk_write_same_16_done;
1003 	xa->atascsi_private = xs;
1004 	xa->timeout = (xs->timeout < 45000) ? 45000 : xs->timeout;
1005 
1006 	/* TRIM sends a list of blocks to discard in the databuf. */
1007 	memset(xa->data, 0, xa->datalen);
1008 	desc = htole64(ATA_DSM_TRIM_DESC(lba, length));
1009 	memcpy(xa->data, &desc, sizeof(desc));
1010 
1011 	fis = xa->fis;
1012 	fis->flags = ATA_H2D_FLAGS_CMD | ap->ap_pmp_port;
1013 	fis->command = ATA_C_DSM;
1014 	fis->features = ATA_DSM_TRIM;
1015 	fis->sector_count = 1;
1016 
1017 	ata_exec(as, xa);
1018 }
1019 
1020 void
1021 atascsi_disk_write_same_16_done(struct ata_xfer *xa)
1022 {
1023 	struct scsi_xfer	*xs = xa->atascsi_private;
1024 
1025 	switch (xa->state) {
1026 	case ATA_S_COMPLETE:
1027 		xs->error = XS_NOERROR;
1028 		break;
1029 	case ATA_S_ERROR:
1030 		xs->error = XS_DRIVER_STUFFUP;
1031 		break;
1032 	case ATA_S_TIMEOUT:
1033 		xs->error = XS_TIMEOUT;
1034 		break;
1035 
1036 	default:
1037 		panic("atascsi_disk_write_same_16_done: "
1038 		    "unexpected ata_xfer state (%d)", xa->state);
1039 	}
1040 
1041 	scsi_done(xs);
1042 }
1043 
1044 void
1045 atascsi_disk_unmap(struct scsi_xfer *xs)
1046 {
1047 	struct ata_xfer		*xa = xs->io;
1048 	struct scsi_unmap	*cdb;
1049 	struct scsi_unmap_data	*unmap;
1050 	u_int			len;
1051 
1052 	if (ISSET(xs->flags, SCSI_POLL) || xs->cmdlen != sizeof(*cdb))
1053 		atascsi_done(xs, XS_DRIVER_STUFFUP);
1054 
1055 	cdb = (struct scsi_unmap *)xs->cmd;
1056 	len = _2btol(cdb->list_len);
1057 	if (xs->datalen != len || len < sizeof(*unmap)) {
1058 		atascsi_done(xs, XS_DRIVER_STUFFUP);
1059 		return;
1060 	}
1061 
1062 	unmap = (struct scsi_unmap_data *)xs->data;
1063 	if (_2btol(unmap->data_length) != len) {
1064 		atascsi_done(xs, XS_DRIVER_STUFFUP);
1065 		return;
1066 	}
1067 
1068 	len = _2btol(unmap->desc_length);
1069 	if (len != xs->datalen - sizeof(*unmap)) {
1070 		atascsi_done(xs, XS_DRIVER_STUFFUP);
1071 		return;
1072 	}
1073 
1074 	if (len < sizeof(struct scsi_unmap_desc)) {
1075 		/* no work, no error according to sbc3 */
1076 		atascsi_done(xs, XS_NOERROR);
1077 	}
1078 
1079 	if (len > sizeof(struct scsi_unmap_desc) * 64) {
1080 		/* more work than we advertised */
1081 		atascsi_done(xs, XS_DRIVER_STUFFUP);
1082 		return;
1083 	}
1084 
1085 	/* let's go */
1086 	if (ISSET(xs->flags, SCSI_NOSLEEP)) {
1087 		task_set(&xa->task, atascsi_disk_unmap_task, xs);
1088 		task_add(systq, &xa->task);
1089 	} else {
1090 		/* we can already sleep for memory */
1091 		atascsi_disk_unmap_task(xs);
1092 	}
1093 }
1094 
1095 void
1096 atascsi_disk_unmap_task(void *xxs)
1097 {
1098 	struct scsi_xfer	*xs = xxs;
1099 	struct scsi_link	*link = xs->sc_link;
1100 	struct atascsi		*as = link->adapter_softc;
1101 	struct atascsi_port	*ap;
1102 	struct ata_xfer		*xa = xs->io;
1103 	struct ata_fis_h2d	*fis;
1104 	struct scsi_unmap_data	*unmap;
1105 	struct scsi_unmap_desc	*descs, *d;
1106 	u_int64_t		*trims;
1107 	u_int			len, i;
1108 
1109 	trims = dma_alloc(512, PR_WAITOK | PR_ZERO);
1110 
1111 	ap = atascsi_lookup_port(link);
1112 	unmap = (struct scsi_unmap_data *)xs->data;
1113 	descs = (struct scsi_unmap_desc *)(unmap + 1);
1114 
1115 	len = _2btol(unmap->desc_length) / sizeof(*d);
1116 	for (i = 0; i < len; i++) {
1117 		d = &descs[i];
1118 		if (_4btol(d->logical_blocks) > ATA_DSM_TRIM_MAX_LEN)
1119 			goto fail;
1120 
1121 		trims[i] = htole64(ATA_DSM_TRIM_DESC(_8btol(d->logical_addr),
1122 		    _4btol(d->logical_blocks)));
1123 	}
1124 
1125 	xa->data = trims;
1126 	xa->datalen = 512;
1127 	xa->flags = ATA_F_WRITE;
1128 	xa->pmp_port = ap->ap_pmp_port;
1129 	xa->complete = atascsi_disk_unmap_done;
1130 	xa->atascsi_private = xs;
1131 	xa->timeout = (xs->timeout < 45000) ? 45000 : xs->timeout;
1132 
1133 	fis = xa->fis;
1134 	fis->flags = ATA_H2D_FLAGS_CMD | ap->ap_pmp_port;
1135 	fis->command = ATA_C_DSM;
1136 	fis->features = ATA_DSM_TRIM;
1137 	fis->sector_count = 1;
1138 
1139 	ata_exec(as, xa);
1140 	return;
1141 
1142  fail:
1143 	dma_free(xa->data, 512);
1144 	atascsi_done(xs, XS_DRIVER_STUFFUP);
1145 }
1146 
1147 void
1148 atascsi_disk_unmap_done(struct ata_xfer *xa)
1149 {
1150 	struct scsi_xfer	*xs = xa->atascsi_private;
1151 
1152 	dma_free(xa->data, 512);
1153 
1154 	switch (xa->state) {
1155 	case ATA_S_COMPLETE:
1156 		xs->error = XS_NOERROR;
1157 		break;
1158 	case ATA_S_ERROR:
1159 		xs->error = XS_DRIVER_STUFFUP;
1160 		break;
1161 	case ATA_S_TIMEOUT:
1162 		xs->error = XS_TIMEOUT;
1163 		break;
1164 
1165 	default:
1166 		panic("atascsi_disk_unmap_done: "
1167 		    "unexpected ata_xfer state (%d)", xa->state);
1168 	}
1169 
1170 	scsi_done(xs);
1171 }
1172 
1173 void
1174 atascsi_disk_sync(struct scsi_xfer *xs)
1175 {
1176 	struct scsi_link	*link = xs->sc_link;
1177 	struct atascsi		*as = link->adapter_softc;
1178 	struct atascsi_port	*ap;
1179 	struct ata_xfer		*xa = xs->io;
1180 
1181 	if (xs->cmdlen != sizeof(struct scsi_synchronize_cache)) {
1182 		atascsi_done(xs, XS_DRIVER_STUFFUP);
1183 		return;
1184 	}
1185 
1186 	ap = atascsi_lookup_port(link);
1187 	xa->datalen = 0;
1188 	xa->flags = ATA_F_READ;
1189 	xa->complete = atascsi_disk_sync_done;
1190 	/* Spec says flush cache can take >30 sec, so give it at least 45. */
1191 	xa->timeout = (xs->timeout < 45000) ? 45000 : xs->timeout;
1192 	xa->atascsi_private = xs;
1193 	xa->pmp_port = ap->ap_pmp_port;
1194 	if (xs->flags & SCSI_POLL)
1195 		xa->flags |= ATA_F_POLL;
1196 
1197 	xa->fis->flags = ATA_H2D_FLAGS_CMD | ap->ap_pmp_port;
1198 	xa->fis->command = ATA_C_FLUSH_CACHE;
1199 	xa->fis->device = 0;
1200 
1201 	ata_exec(as, xa);
1202 }
1203 
1204 void
1205 atascsi_disk_sync_done(struct ata_xfer *xa)
1206 {
1207 	struct scsi_xfer	*xs = xa->atascsi_private;
1208 
1209 	switch (xa->state) {
1210 	case ATA_S_COMPLETE:
1211 		xs->error = XS_NOERROR;
1212 		break;
1213 
1214 	case ATA_S_ERROR:
1215 	case ATA_S_TIMEOUT:
1216 		printf("atascsi_disk_sync_done: %s\n",
1217 		    xa->state == ATA_S_TIMEOUT ? "timeout" : "error");
1218 		xs->error = (xa->state == ATA_S_TIMEOUT ? XS_TIMEOUT :
1219 		    XS_DRIVER_STUFFUP);
1220 		break;
1221 
1222 	default:
1223 		panic("atascsi_disk_sync_done: unexpected ata_xfer state (%d)",
1224 		    xa->state);
1225 	}
1226 
1227 	scsi_done(xs);
1228 }
1229 
1230 u_int64_t
1231 ata_identify_blocks(struct ata_identify *id)
1232 {
1233 	u_int64_t		blocks = 0;
1234 	int			i;
1235 
1236 	if (letoh16(id->cmdset83) & 0x0400) {
1237 		/* LBA48 feature set supported */
1238 		for (i = 3; i >= 0; --i) {
1239 			blocks <<= 16;
1240 			blocks += letoh16(id->addrsecxt[i]);
1241 		}
1242 	} else {
1243 		blocks = letoh16(id->addrsec[1]);
1244 		blocks <<= 16;
1245 		blocks += letoh16(id->addrsec[0]);
1246 	}
1247 
1248 	return (blocks - 1);
1249 }
1250 
1251 u_int
1252 ata_identify_blocksize(struct ata_identify *id)
1253 {
1254 	u_int			blocksize = 512;
1255 	u_int16_t		p2l_sect = letoh16(id->p2l_sect);
1256 
1257 	if ((p2l_sect & ATA_ID_P2L_SECT_MASK) == ATA_ID_P2L_SECT_VALID &&
1258 	    ISSET(p2l_sect, ATA_ID_P2L_SECT_SIZESET)) {
1259 		blocksize = letoh16(id->words_lsec[1]);
1260 		blocksize <<= 16;
1261 		blocksize += letoh16(id->words_lsec[0]);
1262 		blocksize <<= 1;
1263 	}
1264 
1265 	return (blocksize);
1266 }
1267 
1268 u_int
1269 ata_identify_block_l2p_exp(struct ata_identify *id)
1270 {
1271 	u_int			exponent = 0;
1272 	u_int16_t		p2l_sect = letoh16(id->p2l_sect);
1273 
1274 	if ((p2l_sect & ATA_ID_P2L_SECT_MASK) == ATA_ID_P2L_SECT_VALID &&
1275 	    ISSET(p2l_sect, ATA_ID_P2L_SECT_SET)) {
1276 		exponent = (p2l_sect & ATA_ID_P2L_SECT_SIZE);
1277 	}
1278 
1279 	return (exponent);
1280 }
1281 
1282 u_int
1283 ata_identify_block_logical_align(struct ata_identify *id)
1284 {
1285 	u_int			align = 0;
1286 	u_int16_t		p2l_sect = letoh16(id->p2l_sect);
1287 	u_int16_t		logical_align = letoh16(id->logical_align);
1288 
1289 	if ((p2l_sect & ATA_ID_P2L_SECT_MASK) == ATA_ID_P2L_SECT_VALID &&
1290 	    ISSET(p2l_sect, ATA_ID_P2L_SECT_SET) &&
1291 	    (logical_align & ATA_ID_LALIGN_MASK) == ATA_ID_LALIGN_VALID)
1292 		align = logical_align & ATA_ID_LALIGN;
1293 
1294 	return (align);
1295 }
1296 
1297 void
1298 atascsi_disk_capacity(struct scsi_xfer *xs)
1299 {
1300 	struct scsi_link	*link = xs->sc_link;
1301 	struct atascsi_port	*ap;
1302 	struct scsi_read_cap_data rcd;
1303 	u_int64_t		capacity;
1304 
1305 	ap = atascsi_lookup_port(link);
1306 	if (xs->cmdlen != sizeof(struct scsi_read_capacity)) {
1307 		atascsi_done(xs, XS_DRIVER_STUFFUP);
1308 		return;
1309 	}
1310 
1311 	bzero(&rcd, sizeof(rcd));
1312 	capacity = ata_identify_blocks(&ap->ap_identify);
1313 	if (capacity > 0xffffffff)
1314 		capacity = 0xffffffff;
1315 
1316 	_lto4b(capacity, rcd.addr);
1317 	_lto4b(ata_identify_blocksize(&ap->ap_identify), rcd.length);
1318 
1319 	bcopy(&rcd, xs->data, MIN(sizeof(rcd), xs->datalen));
1320 
1321 	atascsi_done(xs, XS_NOERROR);
1322 }
1323 
1324 void
1325 atascsi_disk_capacity16(struct scsi_xfer *xs)
1326 {
1327 	struct scsi_link	*link = xs->sc_link;
1328 	struct atascsi_port	*ap;
1329 	struct scsi_read_cap_data_16 rcd;
1330 	u_int			align;
1331 	u_int16_t		lowest_aligned = 0;
1332 
1333 	ap = atascsi_lookup_port(link);
1334 	if (xs->cmdlen != sizeof(struct scsi_read_capacity_16)) {
1335 		atascsi_done(xs, XS_DRIVER_STUFFUP);
1336 		return;
1337 	}
1338 
1339 	bzero(&rcd, sizeof(rcd));
1340 
1341 	_lto8b(ata_identify_blocks(&ap->ap_identify), rcd.addr);
1342 	_lto4b(ata_identify_blocksize(&ap->ap_identify), rcd.length);
1343 	rcd.logical_per_phys = ata_identify_block_l2p_exp(&ap->ap_identify);
1344 	align = ata_identify_block_logical_align(&ap->ap_identify);
1345 	if (align > 0)
1346 		lowest_aligned = (1 << rcd.logical_per_phys) - align;
1347 
1348 	if (ISSET(ap->ap_features, ATA_PORT_F_TRIM)) {
1349 		SET(lowest_aligned, READ_CAP_16_TPE);
1350 
1351 		if (ISSET(letoh16(ap->ap_identify.add_support),
1352 		    ATA_ID_ADD_SUPPORT_DRT))
1353 			SET(lowest_aligned, READ_CAP_16_TPRZ);
1354 	}
1355 	_lto2b(lowest_aligned, rcd.lowest_aligned);
1356 
1357 	bcopy(&rcd, xs->data, MIN(sizeof(rcd), xs->datalen));
1358 
1359 	atascsi_done(xs, XS_NOERROR);
1360 }
1361 
1362 int
1363 atascsi_passthru_map(struct scsi_xfer *xs, u_int8_t count_proto, u_int8_t flags)
1364 {
1365 	struct ata_xfer		*xa = xs->io;
1366 
1367 	xa->data = xs->data;
1368 	xa->datalen = xs->datalen;
1369 	xa->timeout = xs->timeout;
1370 	xa->flags = 0;
1371 	if (xs->flags & SCSI_DATA_IN)
1372 		xa->flags |= ATA_F_READ;
1373 	if (xs->flags & SCSI_DATA_OUT)
1374 		xa->flags |= ATA_F_WRITE;
1375 	if (xs->flags & SCSI_POLL)
1376 		xa->flags |= ATA_F_POLL;
1377 
1378 	switch (count_proto & ATA_PASSTHRU_PROTO_MASK) {
1379 	case ATA_PASSTHRU_PROTO_NON_DATA:
1380 	case ATA_PASSTHRU_PROTO_PIO_DATAIN:
1381 	case ATA_PASSTHRU_PROTO_PIO_DATAOUT:
1382 		xa->flags |= ATA_F_PIO;
1383 		break;
1384 	default:
1385 		/* we dont support this yet */
1386 		return (1);
1387 	}
1388 
1389 	xa->atascsi_private = xs;
1390 	xa->complete = atascsi_passthru_done;
1391 
1392 	return (0);
1393 }
1394 
1395 void
1396 atascsi_passthru_12(struct scsi_xfer *xs)
1397 {
1398 	struct scsi_link	*link = xs->sc_link;
1399 	struct atascsi		*as = link->adapter_softc;
1400 	struct atascsi_port	*ap;
1401 	struct ata_xfer		*xa = xs->io;
1402 	struct scsi_ata_passthru_12 *cdb;
1403 	struct ata_fis_h2d	*fis;
1404 
1405 	if (xs->cmdlen != sizeof(*cdb)) {
1406 		atascsi_done(xs, XS_DRIVER_STUFFUP);
1407 		return;
1408 	}
1409 
1410 	cdb = (struct scsi_ata_passthru_12 *)xs->cmd;
1411 	/* validate cdb */
1412 
1413 	if (atascsi_passthru_map(xs, cdb->count_proto, cdb->flags) != 0) {
1414 		atascsi_done(xs, XS_DRIVER_STUFFUP);
1415 		return;
1416 	}
1417 
1418 	ap = atascsi_lookup_port(link);
1419 	fis = xa->fis;
1420 	fis->flags = ATA_H2D_FLAGS_CMD | ap->ap_pmp_port;
1421 	fis->command = cdb->command;
1422 	fis->features = cdb->features;
1423 	fis->lba_low = cdb->lba_low;
1424 	fis->lba_mid = cdb->lba_mid;
1425 	fis->lba_high = cdb->lba_high;
1426 	fis->device = cdb->device;
1427 	fis->sector_count = cdb->sector_count;
1428 	xa->pmp_port = ap->ap_pmp_port;
1429 
1430 	ata_exec(as, xa);
1431 }
1432 
1433 void
1434 atascsi_passthru_16(struct scsi_xfer *xs)
1435 {
1436 	struct scsi_link	*link = xs->sc_link;
1437 	struct atascsi		*as = link->adapter_softc;
1438 	struct atascsi_port	*ap;
1439 	struct ata_xfer		*xa = xs->io;
1440 	struct scsi_ata_passthru_16 *cdb;
1441 	struct ata_fis_h2d	*fis;
1442 
1443 	if (xs->cmdlen != sizeof(*cdb)) {
1444 		atascsi_done(xs, XS_DRIVER_STUFFUP);
1445 		return;
1446 	}
1447 
1448 	cdb = (struct scsi_ata_passthru_16 *)xs->cmd;
1449 	/* validate cdb */
1450 
1451 	if (atascsi_passthru_map(xs, cdb->count_proto, cdb->flags) != 0) {
1452 		atascsi_done(xs, XS_DRIVER_STUFFUP);
1453 		return;
1454 	}
1455 
1456 	ap = atascsi_lookup_port(link);
1457 	fis = xa->fis;
1458 	fis->flags = ATA_H2D_FLAGS_CMD | ap->ap_pmp_port;
1459 	fis->command = cdb->command;
1460 	fis->features = cdb->features[1];
1461 	fis->lba_low = cdb->lba_low[1];
1462 	fis->lba_mid = cdb->lba_mid[1];
1463 	fis->lba_high = cdb->lba_high[1];
1464 	fis->device = cdb->device;
1465 	fis->lba_low_exp = cdb->lba_low[0];
1466 	fis->lba_mid_exp = cdb->lba_mid[0];
1467 	fis->lba_high_exp = cdb->lba_high[0];
1468 	fis->features_exp = cdb->features[0];
1469 	fis->sector_count = cdb->sector_count[1];
1470 	fis->sector_count_exp = cdb->sector_count[0];
1471 	xa->pmp_port = ap->ap_pmp_port;
1472 
1473 	ata_exec(as, xa);
1474 }
1475 
1476 void
1477 atascsi_passthru_done(struct ata_xfer *xa)
1478 {
1479 	struct scsi_xfer	*xs = xa->atascsi_private;
1480 
1481 	/*
1482 	 * XXX need to generate sense if cdb wants it
1483 	 */
1484 
1485 	switch (xa->state) {
1486 	case ATA_S_COMPLETE:
1487 		xs->error = XS_NOERROR;
1488 		break;
1489 	case ATA_S_ERROR:
1490 		xs->error = XS_DRIVER_STUFFUP;
1491 		break;
1492 	case ATA_S_TIMEOUT:
1493 		printf("atascsi_passthru_done, timeout\n");
1494 		xs->error = XS_TIMEOUT;
1495 		break;
1496 	default:
1497 		panic("atascsi_atapi_cmd_done: unexpected ata_xfer state (%d)",
1498 		    xa->state);
1499 	}
1500 
1501 	xs->resid = xa->resid;
1502 
1503 	scsi_done(xs);
1504 }
1505 
1506 void
1507 atascsi_disk_sense(struct scsi_xfer *xs)
1508 {
1509 	struct scsi_sense_data	*sd = (struct scsi_sense_data *)xs->data;
1510 
1511 	bzero(xs->data, xs->datalen);
1512 	/* check datalen > sizeof(struct scsi_sense_data)? */
1513 	sd->error_code = SSD_ERRCODE_CURRENT;
1514 	sd->flags = SKEY_NO_SENSE;
1515 
1516 	atascsi_done(xs, XS_NOERROR);
1517 }
1518 
1519 void
1520 atascsi_disk_start_stop(struct scsi_xfer *xs)
1521 {
1522 	struct scsi_link	*link = xs->sc_link;
1523 	struct atascsi		*as = link->adapter_softc;
1524 	struct atascsi_port	*ap;
1525 	struct ata_xfer		*xa = xs->io;
1526 	struct scsi_start_stop	*ss = (struct scsi_start_stop *)xs->cmd;
1527 
1528 	if (xs->cmdlen != sizeof(*ss)) {
1529 		atascsi_done(xs, XS_DRIVER_STUFFUP);
1530 		return;
1531 	}
1532 
1533 	if (ss->how != SSS_STOP) {
1534 		atascsi_done(xs, XS_NOERROR);
1535 		return;
1536 	}
1537 
1538 	/*
1539 	 * A SCSI START STOP UNIT command with the START bit set to
1540 	 * zero gets translated into an ATA FLUSH CACHE command
1541 	 * followed by an ATA STANDBY IMMEDIATE command.
1542 	 */
1543 	ap = atascsi_lookup_port(link);
1544 	xa->datalen = 0;
1545 	xa->flags = ATA_F_READ;
1546 	xa->complete = atascsi_disk_start_stop_done;
1547 	/* Spec says flush cache can take >30 sec, so give it at least 45. */
1548 	xa->timeout = (xs->timeout < 45000) ? 45000 : xs->timeout;
1549 	xa->pmp_port = ap->ap_pmp_port;
1550 	xa->atascsi_private = xs;
1551 	if (xs->flags & SCSI_POLL)
1552 		xa->flags |= ATA_F_POLL;
1553 
1554 	xa->fis->flags = ATA_H2D_FLAGS_CMD | ap->ap_pmp_port;
1555 	xa->fis->command = ATA_C_FLUSH_CACHE;
1556 	xa->fis->device = 0;
1557 
1558 	ata_exec(as, xa);
1559 }
1560 
1561 void
1562 atascsi_disk_start_stop_done(struct ata_xfer *xa)
1563 {
1564 	struct scsi_xfer	*xs = xa->atascsi_private;
1565 	struct scsi_link	*link = xs->sc_link;
1566 	struct atascsi		*as = link->adapter_softc;
1567 	struct atascsi_port	*ap;
1568 
1569 	switch (xa->state) {
1570 	case ATA_S_COMPLETE:
1571 		break;
1572 
1573 	case ATA_S_ERROR:
1574 	case ATA_S_TIMEOUT:
1575 		xs->error = (xa->state == ATA_S_TIMEOUT ? XS_TIMEOUT :
1576 		    XS_DRIVER_STUFFUP);
1577 		xs->resid = xa->resid;
1578 		scsi_done(xs);
1579 		return;
1580 
1581 	default:
1582 		panic("atascsi_disk_start_stop_done: unexpected ata_xfer state (%d)",
1583 		    xa->state);
1584 	}
1585 
1586 	/*
1587 	 * The FLUSH CACHE command completed succesfully; now issue
1588 	 * the STANDBY IMMEDATE command.
1589 	 */
1590 	ap = atascsi_lookup_port(link);
1591 	xa->datalen = 0;
1592 	xa->flags = ATA_F_READ;
1593 	xa->state = ATA_S_SETUP;
1594 	xa->complete = atascsi_disk_cmd_done;
1595 	/* Spec says flush cache can take >30 sec, so give it at least 45. */
1596 	xa->timeout = (xs->timeout < 45000) ? 45000 : xs->timeout;
1597 	xa->pmp_port = ap->ap_pmp_port;
1598 	xa->atascsi_private = xs;
1599 	if (xs->flags & SCSI_POLL)
1600 		xa->flags |= ATA_F_POLL;
1601 
1602 	xa->fis->flags = ATA_H2D_FLAGS_CMD | ap->ap_pmp_port;
1603 	xa->fis->command = ATA_C_STANDBY_IMMED;
1604 	xa->fis->device = 0;
1605 
1606 	ata_exec(as, xa);
1607 }
1608 
1609 void
1610 atascsi_atapi_cmd(struct scsi_xfer *xs)
1611 {
1612 	struct scsi_link	*link = xs->sc_link;
1613 	struct atascsi		*as = link->adapter_softc;
1614 	struct atascsi_port	*ap;
1615 	struct ata_xfer		*xa = xs->io;
1616 	struct ata_fis_h2d	*fis;
1617 
1618 	switch (xs->flags & (SCSI_DATA_IN | SCSI_DATA_OUT)) {
1619 	case SCSI_DATA_IN:
1620 		xa->flags = ATA_F_PACKET | ATA_F_READ;
1621 		break;
1622 	case SCSI_DATA_OUT:
1623 		xa->flags = ATA_F_PACKET | ATA_F_WRITE;
1624 		break;
1625 	default:
1626 		xa->flags = ATA_F_PACKET;
1627 	}
1628 	xa->flags |= ATA_F_GET_RFIS;
1629 
1630 	ap = atascsi_lookup_port(link);
1631 	xa->data = xs->data;
1632 	xa->datalen = xs->datalen;
1633 	xa->complete = atascsi_atapi_cmd_done;
1634 	xa->timeout = xs->timeout;
1635 	xa->pmp_port = ap->ap_pmp_port;
1636 	xa->atascsi_private = xs;
1637 	if (xs->flags & SCSI_POLL)
1638 		xa->flags |= ATA_F_POLL;
1639 
1640 	fis = xa->fis;
1641 	fis->flags = ATA_H2D_FLAGS_CMD | ap->ap_pmp_port;
1642 	fis->command = ATA_C_PACKET;
1643 	fis->device = 0;
1644 	fis->sector_count = xa->tag << 3;
1645 	fis->features = ATA_H2D_FEATURES_DMA | ((xa->flags & ATA_F_WRITE) ?
1646 	    ATA_H2D_FEATURES_DIR_WRITE : ATA_H2D_FEATURES_DIR_READ);
1647 	fis->lba_mid = 0x00;
1648 	fis->lba_high = 0x20;
1649 
1650 	/* Copy SCSI command into ATAPI packet. */
1651 	memcpy(xa->packetcmd, xs->cmd, xs->cmdlen);
1652 
1653 	ata_exec(as, xa);
1654 }
1655 
1656 void
1657 atascsi_atapi_cmd_done(struct ata_xfer *xa)
1658 {
1659 	struct scsi_xfer	*xs = xa->atascsi_private;
1660 	struct scsi_sense_data  *sd = &xs->sense;
1661 
1662 	switch (xa->state) {
1663 	case ATA_S_COMPLETE:
1664 		xs->error = XS_NOERROR;
1665 		break;
1666 	case ATA_S_ERROR:
1667 		/* Return PACKET sense data */
1668 		sd->error_code = SSD_ERRCODE_CURRENT;
1669 		sd->flags = (xa->rfis.error & 0xf0) >> 4;
1670 		if (xa->rfis.error & 0x04)
1671 			sd->flags = SKEY_ILLEGAL_REQUEST;
1672 		if (xa->rfis.error & 0x02)
1673 			sd->flags |= SSD_EOM;
1674 		if (xa->rfis.error & 0x01)
1675 			sd->flags |= SSD_ILI;
1676 		xs->error = XS_SENSE;
1677 		break;
1678 	case ATA_S_TIMEOUT:
1679 		printf("atascsi_atapi_cmd_done, timeout\n");
1680 		xs->error = XS_TIMEOUT;
1681 		break;
1682 	default:
1683 		panic("atascsi_atapi_cmd_done: unexpected ata_xfer state (%d)",
1684 		    xa->state);
1685 	}
1686 
1687 	xs->resid = xa->resid;
1688 
1689 	scsi_done(xs);
1690 }
1691 
1692 void
1693 atascsi_pmp_cmd(struct scsi_xfer *xs)
1694 {
1695 	switch (xs->cmd->opcode) {
1696 	case REQUEST_SENSE:
1697 		atascsi_pmp_sense(xs);
1698 		return;
1699 	case INQUIRY:
1700 		atascsi_pmp_inq(xs);
1701 		return;
1702 
1703 	case TEST_UNIT_READY:
1704 	case PREVENT_ALLOW:
1705 		atascsi_done(xs, XS_NOERROR);
1706 		return;
1707 
1708 	default:
1709 		atascsi_done(xs, XS_DRIVER_STUFFUP);
1710 		return;
1711 	}
1712 }
1713 
1714 void
1715 atascsi_pmp_sense(struct scsi_xfer *xs)
1716 {
1717 	struct scsi_sense_data *sd = (struct scsi_sense_data *)xs->data;
1718 
1719 	bzero(xs->data, xs->datalen);
1720 	sd->error_code = SSD_ERRCODE_CURRENT;
1721 	sd->flags = SKEY_NO_SENSE;
1722 
1723 	atascsi_done(xs, XS_NOERROR);
1724 }
1725 
1726 void
1727 atascsi_pmp_inq(struct scsi_xfer *xs)
1728 {
1729 	struct scsi_inquiry_data inq;
1730 	struct scsi_inquiry *in_inq = (struct scsi_inquiry *)xs->cmd;
1731 
1732 	if (ISSET(in_inq->flags, SI_EVPD)) {
1733 		/* any evpd pages we need to support here? */
1734 		atascsi_done(xs, XS_DRIVER_STUFFUP);
1735 		return;
1736 	}
1737 
1738 	bzero(&inq, sizeof(inq));
1739 	inq.device = 0x1E;	/* "well known logical unit" seems reasonable */
1740 	inq.version = 0x05;	/* SPC-3? */
1741 	inq.response_format = 2;
1742 	inq.additional_length = 32;
1743 	inq.flags |= SID_CmdQue;
1744 	bcopy("ATA     ", inq.vendor, sizeof(inq.vendor));
1745 
1746 	/* should use the data from atascsi_pmp_identify here?
1747 	 * not sure how useful the chip id is, but maybe it'd be
1748 	 * nice to include the number of ports.
1749 	 */
1750 	bcopy("Port Multiplier", inq.product, sizeof(inq.product));
1751 	bcopy("    ", inq.revision, sizeof(inq.revision));
1752 
1753 	bcopy(&inq, xs->data, MIN(sizeof(inq), xs->datalen));
1754 	atascsi_done(xs, XS_NOERROR);
1755 }
1756 
1757 void
1758 atascsi_done(struct scsi_xfer *xs, int error)
1759 {
1760 	xs->error = error;
1761 	scsi_done(xs);
1762 }
1763 
1764 void
1765 ata_exec(struct atascsi *as, struct ata_xfer *xa)
1766 {
1767 	as->as_methods->ata_cmd(xa);
1768 }
1769 
1770 void *
1771 atascsi_io_get(void *cookie)
1772 {
1773 	struct atascsi_host_port	*ahp = cookie;
1774 	struct atascsi			*as = ahp->ahp_as;
1775 	struct ata_xfer			*xa;
1776 
1777 	xa = as->as_methods->ata_get_xfer(as->as_cookie, ahp->ahp_port);
1778 	if (xa != NULL)
1779 		xa->fis->type = ATA_FIS_TYPE_H2D;
1780 
1781 	return (xa);
1782 }
1783 
1784 void
1785 atascsi_io_put(void *cookie, void *io)
1786 {
1787 	struct atascsi_host_port	*ahp = cookie;
1788 	struct atascsi			*as = ahp->ahp_as;
1789 	struct ata_xfer			*xa = io;
1790 
1791 	xa->state = ATA_S_COMPLETE; /* XXX this state machine is dumb */
1792 	as->as_methods->ata_put_xfer(xa);
1793 }
1794 
1795 void
1796 ata_polled_complete(struct ata_xfer *xa)
1797 {
1798 	/* do nothing */
1799 }
1800 
1801 int
1802 ata_polled(struct ata_xfer *xa)
1803 {
1804 	int			rv;
1805 
1806 	if (!ISSET(xa->flags, ATA_F_DONE))
1807 		panic("ata_polled: xa isnt complete");
1808 
1809 	switch (xa->state) {
1810 	case ATA_S_COMPLETE:
1811 		rv = 0;
1812 		break;
1813 	case ATA_S_ERROR:
1814 	case ATA_S_TIMEOUT:
1815 		rv = EIO;
1816 		break;
1817 	default:
1818 		panic("ata_polled: xa state (%d)",
1819 		    xa->state);
1820 	}
1821 
1822 	scsi_io_put(xa->atascsi_private, xa);
1823 
1824 	return (rv);
1825 }
1826 
1827 void
1828 ata_complete(struct ata_xfer *xa)
1829 {
1830 	SET(xa->flags, ATA_F_DONE);
1831 	xa->complete(xa);
1832 }
1833 
1834 void
1835 ata_swapcopy(void *src, void *dst, size_t len)
1836 {
1837 	u_int16_t *s = src, *d = dst;
1838 	int i;
1839 
1840 	len /= 2;
1841 
1842 	for (i = 0; i < len; i++)
1843 		d[i] = swap16(s[i]);
1844 }
1845 
1846 int
1847 atascsi_port_identify(struct atascsi_port *ap, struct ata_identify *identify)
1848 {
1849 	struct atascsi			*as = ap->ap_as;
1850 	struct atascsi_host_port	*ahp = ap->ap_host_port;
1851 	struct ata_xfer			*xa;
1852 
1853 	xa = scsi_io_get(&ahp->ahp_iopool, SCSI_NOSLEEP);
1854 	if (xa == NULL)
1855 		panic("no free xfers on a new port");
1856 	xa->pmp_port = ap->ap_pmp_port;
1857 	xa->data = identify;
1858 	xa->datalen = sizeof(*identify);
1859 	xa->fis->flags = ATA_H2D_FLAGS_CMD | ap->ap_pmp_port;
1860 	xa->fis->command = (ap->ap_type == ATA_PORT_T_DISK) ?
1861 	    ATA_C_IDENTIFY : ATA_C_IDENTIFY_PACKET;
1862 	xa->fis->device = 0;
1863 	xa->flags = ATA_F_READ | ATA_F_PIO | ATA_F_POLL;
1864 	xa->timeout = 1000;
1865 	xa->complete = ata_polled_complete;
1866 	xa->atascsi_private = &ahp->ahp_iopool;
1867 	ata_exec(as, xa);
1868 	return (ata_polled(xa));
1869 }
1870 
1871 int
1872 atascsi_port_set_features(struct atascsi_port *ap, int subcommand, int arg)
1873 {
1874 	struct atascsi			*as = ap->ap_as;
1875 	struct atascsi_host_port	*ahp = ap->ap_host_port;
1876 	struct ata_xfer			*xa;
1877 
1878 	xa = scsi_io_get(&ahp->ahp_iopool, SCSI_NOSLEEP);
1879 	if (xa == NULL)
1880 		panic("no free xfers on a new port");
1881 	xa->fis->command = ATA_C_SET_FEATURES;
1882 	xa->fis->features = subcommand;
1883 	xa->fis->sector_count = arg;
1884 	xa->fis->flags = ATA_H2D_FLAGS_CMD | ap->ap_pmp_port;
1885 	xa->flags = ATA_F_POLL;
1886 	xa->timeout = 1000;
1887 	xa->complete = ata_polled_complete;
1888 	xa->pmp_port = ap->ap_pmp_port;
1889 	xa->atascsi_private = &ahp->ahp_iopool;
1890 	ata_exec(as, xa);
1891 	return (ata_polled(xa));
1892 }
1893