xref: /netbsd-src/sys/dev/pci/mly.c (revision c2f76ff004a2cb67efe5b12d97bd3ef7fe89e18d)
1 /*	$NetBSD: mly.c,v 1.44 2010/11/13 13:52:07 uebayasi Exp $	*/
2 
3 /*-
4  * Copyright (c) 2001 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Andrew Doran, Thor Lancelot Simon, and Eric Haszlakiewicz.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 /*-
33  * Copyright (c) 2000, 2001 Michael Smith
34  * Copyright (c) 2000 BSDi
35  * All rights reserved.
36  *
37  * Redistribution and use in source and binary forms, with or without
38  * modification, are permitted provided that the following conditions
39  * are met:
40  * 1. Redistributions of source code must retain the above copyright
41  *    notice, this list of conditions and the following disclaimer.
42  * 2. Redistributions in binary form must reproduce the above copyright
43  *    notice, this list of conditions and the following disclaimer in the
44  *    documentation and/or other materials provided with the distribution.
45  *
46  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
47  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
48  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
49  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
50  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
51  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
52  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
53  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
54  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
55  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
56  * SUCH DAMAGE.
57  *
58  * from FreeBSD: mly.c,v 1.8 2001/07/14 00:12:22 msmith Exp
59  */
60 
61 /*
62  * Driver for the Mylex AcceleRAID and eXtremeRAID family with v6 firmware.
63  *
64  * TODO:
65  *
66  * o Make mly->mly_btl a hash, then MLY_BTL_RESCAN becomes a SIMPLEQ.
67  * o Handle FC and multiple LUNs.
68  * o Fix mmbox usage.
69  * o Fix transfer speed fudge.
70  */
71 
72 #include <sys/cdefs.h>
73 __KERNEL_RCSID(0, "$NetBSD: mly.c,v 1.44 2010/11/13 13:52:07 uebayasi Exp $");
74 
75 #include <sys/param.h>
76 #include <sys/systm.h>
77 #include <sys/device.h>
78 #include <sys/kernel.h>
79 #include <sys/queue.h>
80 #include <sys/buf.h>
81 #include <sys/endian.h>
82 #include <sys/conf.h>
83 #include <sys/malloc.h>
84 #include <sys/ioctl.h>
85 #include <sys/scsiio.h>
86 #include <sys/kthread.h>
87 #include <sys/kauth.h>
88 
89 #include <sys/bus.h>
90 
91 #include <dev/scsipi/scsi_all.h>
92 #include <dev/scsipi/scsipi_all.h>
93 #include <dev/scsipi/scsiconf.h>
94 
95 #include <dev/pci/pcireg.h>
96 #include <dev/pci/pcivar.h>
97 #include <dev/pci/pcidevs.h>
98 
99 #include <dev/pci/mlyreg.h>
100 #include <dev/pci/mlyio.h>
101 #include <dev/pci/mlyvar.h>
102 #include <dev/pci/mly_tables.h>
103 
104 static void	mly_attach(device_t, device_t, void *);
105 static int	mly_match(device_t, cfdata_t, void *);
106 static const	struct mly_ident *mly_find_ident(struct pci_attach_args *);
107 static int	mly_fwhandshake(struct mly_softc *);
108 static int	mly_flush(struct mly_softc *);
109 static int	mly_intr(void *);
110 static void	mly_shutdown(void *);
111 
112 static int	mly_alloc_ccbs(struct mly_softc *);
113 static void	mly_check_event(struct mly_softc *);
114 static void	mly_complete_event(struct mly_softc *, struct mly_ccb *);
115 static void	mly_complete_rescan(struct mly_softc *, struct mly_ccb *);
116 static int	mly_dmamem_alloc(struct mly_softc *, int, bus_dmamap_t *,
117 				 void **, bus_addr_t *, bus_dma_segment_t *);
118 static void	mly_dmamem_free(struct mly_softc *, int, bus_dmamap_t,
119 				void *, bus_dma_segment_t *);
120 static int	mly_enable_mmbox(struct mly_softc *);
121 static void	mly_fetch_event(struct mly_softc *);
122 static int	mly_get_controllerinfo(struct mly_softc *);
123 static int	mly_get_eventstatus(struct mly_softc *);
124 static int	mly_ioctl(struct mly_softc *, struct mly_cmd_ioctl *,
125 			  void **, size_t, void *, size_t *);
126 static void	mly_padstr(char *, const char *, int);
127 static void	mly_process_event(struct mly_softc *, struct mly_event *);
128 static void	mly_release_ccbs(struct mly_softc *);
129 static int	mly_scan_btl(struct mly_softc *, int, int);
130 static void	mly_scan_channel(struct mly_softc *, int);
131 static void	mly_thread(void *);
132 
133 static int	mly_ccb_alloc(struct mly_softc *, struct mly_ccb **);
134 static void	mly_ccb_complete(struct mly_softc *, struct mly_ccb *);
135 static void	mly_ccb_enqueue(struct mly_softc *, struct mly_ccb *);
136 static void	mly_ccb_free(struct mly_softc *, struct mly_ccb *);
137 static int	mly_ccb_map(struct mly_softc *, struct mly_ccb *);
138 static int	mly_ccb_poll(struct mly_softc *, struct mly_ccb *, int);
139 static int	mly_ccb_submit(struct mly_softc *, struct mly_ccb *);
140 static void	mly_ccb_unmap(struct mly_softc *, struct mly_ccb *);
141 static int	mly_ccb_wait(struct mly_softc *, struct mly_ccb *, int);
142 
143 static void	mly_get_xfer_mode(struct mly_softc *, int,
144 				  struct scsipi_xfer_mode *);
145 static void	mly_scsipi_complete(struct mly_softc *, struct mly_ccb *);
146 static int	mly_scsipi_ioctl(struct scsipi_channel *, u_long, void *,
147 				 int, struct proc *);
148 static void	mly_scsipi_minphys(struct buf *);
149 static void	mly_scsipi_request(struct scsipi_channel *,
150 				   scsipi_adapter_req_t, void *);
151 
152 static int	mly_user_command(struct mly_softc *, struct mly_user_command *);
153 static int	mly_user_health(struct mly_softc *, struct mly_user_health *);
154 
155 extern struct	cfdriver mly_cd;
156 
157 CFATTACH_DECL(mly, sizeof(struct mly_softc),
158     mly_match, mly_attach, NULL, NULL);
159 
160 dev_type_open(mlyopen);
161 dev_type_close(mlyclose);
162 dev_type_ioctl(mlyioctl);
163 
164 const struct cdevsw mly_cdevsw = {
165 	mlyopen, mlyclose, noread, nowrite, mlyioctl,
166 	nostop, notty, nopoll, nommap, nokqfilter, D_OTHER,
167 };
168 
169 static struct mly_ident {
170 	u_short	vendor;
171 	u_short	product;
172 	u_short	subvendor;
173 	u_short	subproduct;
174 	int	hwif;
175 	const char	*desc;
176 } const mly_ident[] = {
177 	{
178 		PCI_VENDOR_MYLEX,
179 		PCI_PRODUCT_MYLEX_EXTREMERAID,
180 		PCI_VENDOR_MYLEX,
181 		0x0040,
182 		MLY_HWIF_STRONGARM,
183 		"eXtremeRAID 2000"
184 	},
185 	{
186 		PCI_VENDOR_MYLEX,
187 		PCI_PRODUCT_MYLEX_EXTREMERAID,
188 		PCI_VENDOR_MYLEX,
189 		0x0030,
190 		MLY_HWIF_STRONGARM,
191 		"eXtremeRAID 3000"
192 	},
193 	{
194 		PCI_VENDOR_MYLEX,
195 		PCI_PRODUCT_MYLEX_ACCELERAID,
196 		PCI_VENDOR_MYLEX,
197 		0x0050,
198 		MLY_HWIF_I960RX,
199 		"AcceleRAID 352"
200 	},
201 	{
202 		PCI_VENDOR_MYLEX,
203 		PCI_PRODUCT_MYLEX_ACCELERAID,
204 		PCI_VENDOR_MYLEX,
205 		0x0052,
206 		MLY_HWIF_I960RX,
207 		"AcceleRAID 170"
208 	},
209 	{
210 		PCI_VENDOR_MYLEX,
211 		PCI_PRODUCT_MYLEX_ACCELERAID,
212 		PCI_VENDOR_MYLEX,
213 		0x0054,
214 		MLY_HWIF_I960RX,
215 		"AcceleRAID 160"
216 	},
217 };
218 
219 static void	*mly_sdh;
220 
221 /*
222  * Try to find a `mly_ident' entry corresponding to this board.
223  */
224 static const struct mly_ident *
225 mly_find_ident(struct pci_attach_args *pa)
226 {
227 	const struct mly_ident *mpi, *maxmpi;
228 	pcireg_t reg;
229 
230 	mpi = mly_ident;
231 	maxmpi = mpi + sizeof(mly_ident) / sizeof(mly_ident[0]);
232 
233 	if (PCI_CLASS(pa->pa_class) == PCI_CLASS_I2O)
234 		return (NULL);
235 
236 	for (; mpi < maxmpi; mpi++) {
237 		if (PCI_VENDOR(pa->pa_id) != mpi->vendor ||
238 		    PCI_PRODUCT(pa->pa_id) != mpi->product)
239 			continue;
240 
241 		if (mpi->subvendor == 0x0000)
242 			return (mpi);
243 
244 		reg = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
245 
246 		if (PCI_VENDOR(reg) == mpi->subvendor &&
247 		    PCI_PRODUCT(reg) == mpi->subproduct)
248 			return (mpi);
249 	}
250 
251 	return (NULL);
252 }
253 
254 /*
255  * Match a supported board.
256  */
257 static int
258 mly_match(device_t parent, cfdata_t cfdata, void *aux)
259 {
260 
261 	return (mly_find_ident(aux) != NULL);
262 }
263 
264 /*
265  * Attach a supported board.
266  */
267 static void
268 mly_attach(device_t parent, device_t self, void *aux)
269 {
270 	struct pci_attach_args *pa;
271 	struct mly_softc *mly;
272 	struct mly_ioctl_getcontrollerinfo *mi;
273 	const struct mly_ident *ident;
274 	pci_chipset_tag_t pc;
275 	pci_intr_handle_t ih;
276 	bus_space_handle_t memh, ioh;
277 	bus_space_tag_t memt, iot;
278 	pcireg_t reg;
279 	const char *intrstr;
280 	int ior, memr, i, rv, state;
281 	struct scsipi_adapter *adapt;
282 	struct scsipi_channel *chan;
283 
284 	mly = device_private(self);
285 	pa = aux;
286 	pc = pa->pa_pc;
287 	ident = mly_find_ident(pa);
288 	state = 0;
289 
290 	mly->mly_dmat = pa->pa_dmat;
291 	mly->mly_hwif = ident->hwif;
292 
293 	printf(": Mylex %s\n", ident->desc);
294 
295 	/*
296 	 * Map the PCI register window.
297 	 */
298 	memr = -1;
299 	ior = -1;
300 
301 	for (i = 0x10; i <= 0x14; i += 4) {
302 		reg = pci_conf_read(pa->pa_pc, pa->pa_tag, i);
303 
304 		if (PCI_MAPREG_TYPE(reg) == PCI_MAPREG_TYPE_IO) {
305 			if (ior == -1 && PCI_MAPREG_IO_SIZE(reg) != 0)
306 				ior = i;
307 		} else {
308 			if (memr == -1 && PCI_MAPREG_MEM_SIZE(reg) != 0)
309 				memr = i;
310 		}
311 	}
312 
313 	if (memr != -1)
314 		if (pci_mapreg_map(pa, memr, PCI_MAPREG_TYPE_MEM, 0,
315 		    &memt, &memh, NULL, NULL))
316 			memr = -1;
317 	if (ior != -1)
318 		if (pci_mapreg_map(pa, ior, PCI_MAPREG_TYPE_IO, 0,
319 		    &iot, &ioh, NULL, NULL))
320 		    	ior = -1;
321 
322 	if (memr != -1) {
323 		mly->mly_iot = memt;
324 		mly->mly_ioh = memh;
325 	} else if (ior != -1) {
326 		mly->mly_iot = iot;
327 		mly->mly_ioh = ioh;
328 	} else {
329 		aprint_error_dev(self, "can't map i/o or memory space\n");
330 		return;
331 	}
332 
333 	/*
334 	 * Enable the device.
335 	 */
336 	reg = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
337 	pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG,
338 	    reg | PCI_COMMAND_MASTER_ENABLE);
339 
340 	/*
341 	 * Map and establish the interrupt.
342 	 */
343 	if (pci_intr_map(pa, &ih)) {
344 		aprint_error_dev(self, "can't map interrupt\n");
345 		return;
346 	}
347 	intrstr = pci_intr_string(pc, ih);
348 	mly->mly_ih = pci_intr_establish(pc, ih, IPL_BIO, mly_intr, mly);
349 	if (mly->mly_ih == NULL) {
350 		aprint_error_dev(self, "can't establish interrupt");
351 		if (intrstr != NULL)
352 			aprint_error(" at %s", intrstr);
353 		aprint_error("\n");
354 		return;
355 	}
356 
357 	if (intrstr != NULL)
358 		aprint_normal_dev(&mly->mly_dv, "interrupting at %s\n",
359 		    intrstr);
360 
361 	/*
362 	 * Take care of interface-specific tasks.
363 	 */
364 	switch (mly->mly_hwif) {
365 	case MLY_HWIF_I960RX:
366 		mly->mly_doorbell_true = 0x00;
367 		mly->mly_cmd_mailbox = MLY_I960RX_COMMAND_MAILBOX;
368 		mly->mly_status_mailbox = MLY_I960RX_STATUS_MAILBOX;
369 		mly->mly_idbr = MLY_I960RX_IDBR;
370 		mly->mly_odbr = MLY_I960RX_ODBR;
371 		mly->mly_error_status = MLY_I960RX_ERROR_STATUS;
372 		mly->mly_interrupt_status = MLY_I960RX_INTERRUPT_STATUS;
373 		mly->mly_interrupt_mask = MLY_I960RX_INTERRUPT_MASK;
374 		break;
375 
376 	case MLY_HWIF_STRONGARM:
377 		mly->mly_doorbell_true = 0xff;
378 		mly->mly_cmd_mailbox = MLY_STRONGARM_COMMAND_MAILBOX;
379 		mly->mly_status_mailbox = MLY_STRONGARM_STATUS_MAILBOX;
380 		mly->mly_idbr = MLY_STRONGARM_IDBR;
381 		mly->mly_odbr = MLY_STRONGARM_ODBR;
382 		mly->mly_error_status = MLY_STRONGARM_ERROR_STATUS;
383 		mly->mly_interrupt_status = MLY_STRONGARM_INTERRUPT_STATUS;
384 		mly->mly_interrupt_mask = MLY_STRONGARM_INTERRUPT_MASK;
385 		break;
386 	}
387 
388 	/*
389 	 * Allocate and map the scatter/gather lists.
390 	 */
391 	rv = mly_dmamem_alloc(mly, MLY_SGL_SIZE * MLY_MAX_CCBS,
392 	    &mly->mly_sg_dmamap, (void **)&mly->mly_sg,
393 	    &mly->mly_sg_busaddr, &mly->mly_sg_seg);
394 	if (rv) {
395 		printf("%s: unable to allocate S/G maps\n",
396 		    device_xname(&mly->mly_dv));
397 		goto bad;
398 	}
399 	state++;
400 
401 	/*
402 	 * Allocate and map the memory mailbox.
403 	 */
404 	rv = mly_dmamem_alloc(mly, sizeof(struct mly_mmbox),
405 	    &mly->mly_mmbox_dmamap, (void **)&mly->mly_mmbox,
406 	    &mly->mly_mmbox_busaddr, &mly->mly_mmbox_seg);
407 	if (rv) {
408 		aprint_error_dev(&mly->mly_dv, "unable to allocate mailboxes\n");
409 		goto bad;
410 	}
411 	state++;
412 
413 	/*
414 	 * Initialise per-controller queues.
415 	 */
416 	SLIST_INIT(&mly->mly_ccb_free);
417 	SIMPLEQ_INIT(&mly->mly_ccb_queue);
418 
419 	/*
420 	 * Disable interrupts before we start talking to the controller.
421 	 */
422 	mly_outb(mly, mly->mly_interrupt_mask, MLY_INTERRUPT_MASK_DISABLE);
423 
424 	/*
425 	 * Wait for the controller to come ready, handshaking with the
426 	 * firmware if required.  This is typically only necessary on
427 	 * platforms where the controller BIOS does not run.
428 	 */
429 	if (mly_fwhandshake(mly)) {
430 		aprint_error_dev(&mly->mly_dv, "unable to bring controller online\n");
431 		goto bad;
432 	}
433 
434 	/*
435 	 * Allocate initial command buffers, obtain controller feature
436 	 * information, and then reallocate command buffers, since we'll
437 	 * know how many we want.
438 	 */
439 	if (mly_alloc_ccbs(mly)) {
440 		aprint_error_dev(&mly->mly_dv, "unable to allocate CCBs\n");
441 		goto bad;
442 	}
443 	state++;
444 	if (mly_get_controllerinfo(mly)) {
445 		aprint_error_dev(&mly->mly_dv, "unable to retrieve controller info\n");
446 		goto bad;
447 	}
448 	mly_release_ccbs(mly);
449 	if (mly_alloc_ccbs(mly)) {
450 		aprint_error_dev(&mly->mly_dv, "unable to allocate CCBs\n");
451 		state--;
452 		goto bad;
453 	}
454 
455 	/*
456 	 * Get the current event counter for health purposes, populate the
457 	 * initial health status buffer.
458 	 */
459 	if (mly_get_eventstatus(mly)) {
460 		aprint_error_dev(&mly->mly_dv, "unable to retrieve event status\n");
461 		goto bad;
462 	}
463 
464 	/*
465 	 * Enable memory-mailbox mode.
466 	 */
467 	if (mly_enable_mmbox(mly)) {
468 		aprint_error_dev(&mly->mly_dv, "unable to enable memory mailbox\n");
469 		goto bad;
470 	}
471 
472 	/*
473 	 * Print a little information about the controller.
474 	 */
475 	mi = mly->mly_controllerinfo;
476 
477 	printf("%s: %d physical channel%s, firmware %d.%02d-%d-%02d "
478 	    "(%02d%02d%02d%02d), %dMB RAM\n", device_xname(&mly->mly_dv),
479 	    mi->physical_channels_present,
480 	    (mi->physical_channels_present) > 1 ? "s" : "",
481 	    mi->fw_major, mi->fw_minor, mi->fw_turn, mi->fw_build,
482 	    mi->fw_century, mi->fw_year, mi->fw_month, mi->fw_day,
483 	    le16toh(mi->memory_size));
484 
485 	/*
486 	 * Register our `shutdownhook'.
487 	 */
488 	if (mly_sdh == NULL)
489 		shutdownhook_establish(mly_shutdown, NULL);
490 
491 	/*
492 	 * Clear any previous BTL information.  For each bus that scsipi
493 	 * wants to scan, we'll receive the SCBUSIOLLSCAN ioctl and retrieve
494 	 * all BTL info at that point.
495 	 */
496 	memset(&mly->mly_btl, 0, sizeof(mly->mly_btl));
497 
498 	mly->mly_nchans = mly->mly_controllerinfo->physical_channels_present +
499 	    mly->mly_controllerinfo->virtual_channels_present;
500 
501 	/*
502 	 * Attach to scsipi.
503 	 */
504 	adapt = &mly->mly_adapt;
505 	memset(adapt, 0, sizeof(*adapt));
506 	adapt->adapt_dev = &mly->mly_dv;
507 	adapt->adapt_nchannels = mly->mly_nchans;
508 	adapt->adapt_openings = mly->mly_ncmds - MLY_CCBS_RESV;
509 	adapt->adapt_max_periph = mly->mly_ncmds - MLY_CCBS_RESV;
510 	adapt->adapt_request = mly_scsipi_request;
511 	adapt->adapt_minphys = mly_scsipi_minphys;
512 	adapt->adapt_ioctl = mly_scsipi_ioctl;
513 
514 	for (i = 0; i < mly->mly_nchans; i++) {
515 		chan = &mly->mly_chans[i];
516 		memset(chan, 0, sizeof(*chan));
517 		chan->chan_adapter = adapt;
518 		chan->chan_bustype = &scsi_bustype;
519 		chan->chan_channel = i;
520 		chan->chan_ntargets = MLY_MAX_TARGETS;
521 		chan->chan_nluns = MLY_MAX_LUNS;
522 		chan->chan_id = mly->mly_controllerparam->initiator_id;
523 		chan->chan_flags = SCSIPI_CHAN_NOSETTLE;
524 		config_found(&mly->mly_dv, chan, scsiprint);
525 	}
526 
527 	/*
528 	 * Now enable interrupts...
529 	 */
530 	mly_outb(mly, mly->mly_interrupt_mask, MLY_INTERRUPT_MASK_ENABLE);
531 
532 	/*
533 	 * Finally, create our monitoring thread.
534 	 */
535 	mly->mly_state |= MLY_STATE_INITOK;
536 	rv = kthread_create(PRI_NONE, 0, NULL, mly_thread, mly,
537 	    &mly->mly_thread, "%s", device_xname(&mly->mly_dv));
538  	if (rv != 0)
539 		aprint_error_dev(&mly->mly_dv, "unable to create thread (%d)\n",
540 		    rv);
541 	return;
542 
543  bad:
544 	if (state > 2)
545 		mly_release_ccbs(mly);
546 	if (state > 1)
547 		mly_dmamem_free(mly, sizeof(struct mly_mmbox),
548 		    mly->mly_mmbox_dmamap, (void *)mly->mly_mmbox,
549 		    &mly->mly_mmbox_seg);
550 	if (state > 0)
551 		mly_dmamem_free(mly, MLY_SGL_SIZE * MLY_MAX_CCBS,
552 		    mly->mly_sg_dmamap, (void *)mly->mly_sg,
553 		    &mly->mly_sg_seg);
554 }
555 
556 /*
557  * Scan all possible devices on the specified channel.
558  */
559 static void
560 mly_scan_channel(struct mly_softc *mly, int bus)
561 {
562 	int s, target;
563 
564 	for (target = 0; target < MLY_MAX_TARGETS; target++) {
565 		s = splbio();
566 		if (!mly_scan_btl(mly, bus, target)) {
567 			tsleep(&mly->mly_btl[bus][target], PRIBIO, "mlyscan",
568 			    0);
569 		}
570 		splx(s);
571 	}
572 }
573 
574 /*
575  * Shut down all configured `mly' devices.
576  */
577 static void
578 mly_shutdown(void *cookie)
579 {
580 	struct mly_softc *mly;
581 	int i;
582 
583 	for (i = 0; i < mly_cd.cd_ndevs; i++) {
584 		if ((mly = device_lookup_private(&mly_cd, i)) == NULL)
585 			continue;
586 
587 		if (mly_flush(mly))
588 			aprint_error_dev(&mly->mly_dv, "unable to flush cache\n");
589 	}
590 }
591 
592 /*
593  * Fill in the mly_controllerinfo and mly_controllerparam fields in the
594  * softc.
595  */
596 static int
597 mly_get_controllerinfo(struct mly_softc *mly)
598 {
599 	struct mly_cmd_ioctl mci;
600 	int rv;
601 
602 	/*
603 	 * Build the getcontrollerinfo ioctl and send it.
604 	 */
605 	memset(&mci, 0, sizeof(mci));
606 	mci.sub_ioctl = MDACIOCTL_GETCONTROLLERINFO;
607 	rv = mly_ioctl(mly, &mci, (void **)&mly->mly_controllerinfo,
608 	    sizeof(*mly->mly_controllerinfo), NULL, NULL);
609 	if (rv != 0)
610 		return (rv);
611 
612 	/*
613 	 * Build the getcontrollerparameter ioctl and send it.
614 	 */
615 	memset(&mci, 0, sizeof(mci));
616 	mci.sub_ioctl = MDACIOCTL_GETCONTROLLERPARAMETER;
617 	rv = mly_ioctl(mly, &mci, (void **)&mly->mly_controllerparam,
618 	    sizeof(*mly->mly_controllerparam), NULL, NULL);
619 
620 	return (rv);
621 }
622 
623 /*
624  * Rescan a device, possibly as a consequence of getting an event which
625  * suggests that it may have changed.  Must be called with interrupts
626  * blocked.
627  */
628 static int
629 mly_scan_btl(struct mly_softc *mly, int bus, int target)
630 {
631 	struct mly_ccb *mc;
632 	struct mly_cmd_ioctl *mci;
633 	int rv;
634 
635 	if (target == mly->mly_controllerparam->initiator_id) {
636 		mly->mly_btl[bus][target].mb_flags = MLY_BTL_PROTECTED;
637 		return (EIO);
638 	}
639 
640 	/* Don't re-scan if a scan is already in progress. */
641 	if ((mly->mly_btl[bus][target].mb_flags & MLY_BTL_SCANNING) != 0)
642 		return (EBUSY);
643 
644 	/* Get a command. */
645 	if ((rv = mly_ccb_alloc(mly, &mc)) != 0)
646 		return (rv);
647 
648 	/* Set up the data buffer. */
649 	mc->mc_data = malloc(sizeof(union mly_devinfo),
650 	    M_DEVBUF, M_NOWAIT|M_ZERO);
651 
652 	mc->mc_flags |= MLY_CCB_DATAIN;
653 	mc->mc_complete = mly_complete_rescan;
654 
655 	/*
656 	 * Build the ioctl.
657 	 */
658 	mci = (struct mly_cmd_ioctl *)&mc->mc_packet->ioctl;
659 	mci->opcode = MDACMD_IOCTL;
660 	mci->timeout = 30 | MLY_TIMEOUT_SECONDS;
661 	memset(&mci->param, 0, sizeof(mci->param));
662 
663 	if (MLY_BUS_IS_VIRTUAL(mly, bus)) {
664 		mc->mc_length = sizeof(struct mly_ioctl_getlogdevinfovalid);
665 		mci->data_size = htole32(mc->mc_length);
666 		mci->sub_ioctl = MDACIOCTL_GETLOGDEVINFOVALID;
667 		_lto3l(MLY_LOGADDR(0, MLY_LOGDEV_ID(mly, bus, target)),
668 		    mci->addr);
669 	} else {
670 		mc->mc_length = sizeof(struct mly_ioctl_getphysdevinfovalid);
671 		mci->data_size = htole32(mc->mc_length);
672 		mci->sub_ioctl = MDACIOCTL_GETPHYSDEVINFOVALID;
673 		_lto3l(MLY_PHYADDR(0, bus, target, 0), mci->addr);
674 	}
675 
676 	/*
677 	 * Dispatch the command.
678 	 */
679 	if ((rv = mly_ccb_map(mly, mc)) != 0) {
680 		free(mc->mc_data, M_DEVBUF);
681 		mly_ccb_free(mly, mc);
682 		return(rv);
683 	}
684 
685 	mly->mly_btl[bus][target].mb_flags |= MLY_BTL_SCANNING;
686 	mly_ccb_enqueue(mly, mc);
687 	return (0);
688 }
689 
690 /*
691  * Handle the completion of a rescan operation.
692  */
693 static void
694 mly_complete_rescan(struct mly_softc *mly, struct mly_ccb *mc)
695 {
696 	struct mly_ioctl_getlogdevinfovalid *ldi;
697 	struct mly_ioctl_getphysdevinfovalid *pdi;
698 	struct mly_cmd_ioctl *mci;
699 	struct mly_btl btl, *btlp;
700 	struct scsipi_xfer_mode xm;
701 	int bus, target, rescan;
702 	u_int tmp;
703 
704 	mly_ccb_unmap(mly, mc);
705 
706 	/*
707 	 * Recover the bus and target from the command.  We need these even
708 	 * in the case where we don't have a useful response.
709 	 */
710 	mci = (struct mly_cmd_ioctl *)&mc->mc_packet->ioctl;
711 	tmp = _3ltol(mci->addr);
712 	rescan = 0;
713 
714 	if (mci->sub_ioctl == MDACIOCTL_GETLOGDEVINFOVALID) {
715 		bus = MLY_LOGDEV_BUS(mly, MLY_LOGADDR_DEV(tmp));
716 		target = MLY_LOGDEV_TARGET(mly, MLY_LOGADDR_DEV(tmp));
717 	} else {
718 		bus = MLY_PHYADDR_CHANNEL(tmp);
719 		target = MLY_PHYADDR_TARGET(tmp);
720 	}
721 
722 	btlp = &mly->mly_btl[bus][target];
723 
724 	/* The default result is 'no device'. */
725 	memset(&btl, 0, sizeof(btl));
726 	btl.mb_flags = MLY_BTL_PROTECTED;
727 
728 	/* If the rescan completed OK, we have possibly-new BTL data. */
729 	if (mc->mc_status != 0)
730 		goto out;
731 
732 	if (mc->mc_length == sizeof(*ldi)) {
733 		ldi = (struct mly_ioctl_getlogdevinfovalid *)mc->mc_data;
734 		tmp = le32toh(ldi->logical_device_number);
735 
736 		if (MLY_LOGDEV_BUS(mly, tmp) != bus ||
737 		    MLY_LOGDEV_TARGET(mly, tmp) != target) {
738 #ifdef MLYDEBUG
739 			printf("%s: WARNING: BTL rescan (logical) for %d:%d "
740 			    "returned data for %d:%d instead\n",
741 			   device_xname(&mly->mly_dv), bus, target,
742 			   MLY_LOGDEV_BUS(mly, tmp),
743 			   MLY_LOGDEV_TARGET(mly, tmp));
744 #endif
745 			goto out;
746 		}
747 
748 		btl.mb_flags = MLY_BTL_LOGICAL | MLY_BTL_TQING;
749 		btl.mb_type = ldi->raid_level;
750 		btl.mb_state = ldi->state;
751 	} else if (mc->mc_length == sizeof(*pdi)) {
752 		pdi = (struct mly_ioctl_getphysdevinfovalid *)mc->mc_data;
753 
754 		if (pdi->channel != bus || pdi->target != target) {
755 #ifdef MLYDEBUG
756 			printf("%s: WARNING: BTL rescan (physical) for %d:%d "
757 			    " returned data for %d:%d instead\n",
758 			   device_xname(&mly->mly_dv),
759 			   bus, target, pdi->channel, pdi->target);
760 #endif
761 			goto out;
762 		}
763 
764 		btl.mb_flags = MLY_BTL_PHYSICAL;
765 		btl.mb_type = MLY_DEVICE_TYPE_PHYSICAL;
766 		btl.mb_state = pdi->state;
767 		btl.mb_speed = pdi->speed;
768 		btl.mb_width = pdi->width;
769 
770 		if (pdi->state != MLY_DEVICE_STATE_UNCONFIGURED)
771 			btl.mb_flags |= MLY_BTL_PROTECTED;
772 		if (pdi->command_tags != 0)
773 			btl.mb_flags |= MLY_BTL_TQING;
774 	} else {
775 		printf("%s: BTL rescan result invalid\n", device_xname(&mly->mly_dv));
776 		goto out;
777 	}
778 
779 	/* Decide whether we need to rescan the device. */
780 	if (btl.mb_flags != btlp->mb_flags ||
781 	    btl.mb_speed != btlp->mb_speed ||
782 	    btl.mb_width != btlp->mb_width)
783 		rescan = 1;
784 
785  out:
786 	*btlp = btl;
787 
788 	if (rescan && (btl.mb_flags & MLY_BTL_PROTECTED) == 0) {
789 		xm.xm_target = target;
790 		mly_get_xfer_mode(mly, bus, &xm);
791 		/* XXX SCSI mid-layer rescan goes here. */
792 	}
793 
794 	/* Wake anybody waiting on the device to be rescanned. */
795 	wakeup(btlp);
796 
797 	free(mc->mc_data, M_DEVBUF);
798 	mly_ccb_free(mly, mc);
799 }
800 
801 /*
802  * Get the current health status and set the 'next event' counter to suit.
803  */
804 static int
805 mly_get_eventstatus(struct mly_softc *mly)
806 {
807 	struct mly_cmd_ioctl mci;
808 	struct mly_health_status *mh;
809 	int rv;
810 
811 	/* Build the gethealthstatus ioctl and send it. */
812 	memset(&mci, 0, sizeof(mci));
813 	mh = NULL;
814 	mci.sub_ioctl = MDACIOCTL_GETHEALTHSTATUS;
815 
816 	rv = mly_ioctl(mly, &mci, (void *)&mh, sizeof(*mh), NULL, NULL);
817 	if (rv)
818 		return (rv);
819 
820 	/* Get the event counter. */
821 	mly->mly_event_change = le32toh(mh->change_counter);
822 	mly->mly_event_waiting = le32toh(mh->next_event);
823 	mly->mly_event_counter = le32toh(mh->next_event);
824 
825 	/* Save the health status into the memory mailbox */
826 	memcpy(&mly->mly_mmbox->mmm_health.status, mh, sizeof(*mh));
827 
828 	bus_dmamap_sync(mly->mly_dmat, mly->mly_mmbox_dmamap,
829 	    offsetof(struct mly_mmbox, mmm_health),
830 	    sizeof(mly->mly_mmbox->mmm_health),
831 	    BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
832 
833 	free(mh, M_DEVBUF);
834 	return (0);
835 }
836 
837 /*
838  * Enable memory mailbox mode.
839  */
840 static int
841 mly_enable_mmbox(struct mly_softc *mly)
842 {
843 	struct mly_cmd_ioctl mci;
844 	u_int8_t *sp;
845 	u_int64_t tmp;
846 	int rv;
847 
848 	/* Build the ioctl and send it. */
849 	memset(&mci, 0, sizeof(mci));
850 	mci.sub_ioctl = MDACIOCTL_SETMEMORYMAILBOX;
851 
852 	/* Set buffer addresses. */
853 	tmp = mly->mly_mmbox_busaddr + offsetof(struct mly_mmbox, mmm_command);
854 	mci.param.setmemorymailbox.command_mailbox_physaddr = htole64(tmp);
855 
856 	tmp = mly->mly_mmbox_busaddr + offsetof(struct mly_mmbox, mmm_status);
857 	mci.param.setmemorymailbox.status_mailbox_physaddr = htole64(tmp);
858 
859 	tmp = mly->mly_mmbox_busaddr + offsetof(struct mly_mmbox, mmm_health);
860 	mci.param.setmemorymailbox.health_buffer_physaddr = htole64(tmp);
861 
862 	/* Set buffer sizes - abuse of data_size field is revolting. */
863 	sp = (u_int8_t *)&mci.data_size;
864 	sp[0] = (sizeof(union mly_cmd_packet) * MLY_MMBOX_COMMANDS) >> 10;
865 	sp[1] = (sizeof(union mly_status_packet) * MLY_MMBOX_STATUS) >> 10;
866 	mci.param.setmemorymailbox.health_buffer_size =
867 	    sizeof(union mly_health_region) >> 10;
868 
869 	rv = mly_ioctl(mly, &mci, NULL, 0, NULL, NULL);
870 	if (rv)
871 		return (rv);
872 
873 	mly->mly_state |= MLY_STATE_MMBOX_ACTIVE;
874 	return (0);
875 }
876 
877 /*
878  * Flush all pending I/O from the controller.
879  */
880 static int
881 mly_flush(struct mly_softc *mly)
882 {
883 	struct mly_cmd_ioctl mci;
884 
885 	/* Build the ioctl */
886 	memset(&mci, 0, sizeof(mci));
887 	mci.sub_ioctl = MDACIOCTL_FLUSHDEVICEDATA;
888 	mci.param.deviceoperation.operation_device =
889 	    MLY_OPDEVICE_PHYSICAL_CONTROLLER;
890 
891 	/* Pass it off to the controller */
892 	return (mly_ioctl(mly, &mci, NULL, 0, NULL, NULL));
893 }
894 
895 /*
896  * Perform an ioctl command.
897  *
898  * If (data) is not NULL, the command requires data transfer to the
899  * controller.  If (*data) is NULL the command requires data transfer from
900  * the controller, and we will allocate a buffer for it.
901  */
902 static int
903 mly_ioctl(struct mly_softc *mly, struct mly_cmd_ioctl *ioctl, void **data,
904 	  size_t datasize, void *sense_buffer,
905 	  size_t *sense_length)
906 {
907 	struct mly_ccb *mc;
908 	struct mly_cmd_ioctl *mci;
909 	u_int8_t status;
910 	int rv;
911 
912 	mc = NULL;
913 	if ((rv = mly_ccb_alloc(mly, &mc)) != 0)
914 		goto bad;
915 
916 	/*
917 	 * Copy the ioctl structure, but save some important fields and then
918 	 * fixup.
919 	 */
920 	mci = &mc->mc_packet->ioctl;
921 	ioctl->sense_buffer_address = htole64(mci->sense_buffer_address);
922 	ioctl->maximum_sense_size = mci->maximum_sense_size;
923 	*mci = *ioctl;
924 	mci->opcode = MDACMD_IOCTL;
925 	mci->timeout = 30 | MLY_TIMEOUT_SECONDS;
926 
927 	/* Handle the data buffer. */
928 	if (data != NULL) {
929 		if (*data == NULL) {
930 			/* Allocate data buffer */
931 			mc->mc_data = malloc(datasize, M_DEVBUF, M_NOWAIT);
932 			mc->mc_flags |= MLY_CCB_DATAIN;
933 		} else {
934 			mc->mc_data = *data;
935 			mc->mc_flags |= MLY_CCB_DATAOUT;
936 		}
937 		mc->mc_length = datasize;
938 		mc->mc_packet->generic.data_size = htole32(datasize);
939 	}
940 
941 	/* Run the command. */
942 	if (datasize > 0)
943 		if ((rv = mly_ccb_map(mly, mc)) != 0)
944 			goto bad;
945 	rv = mly_ccb_poll(mly, mc, 30000);
946 	if (datasize > 0)
947 		mly_ccb_unmap(mly, mc);
948 	if (rv != 0)
949 		goto bad;
950 
951 	/* Clean up and return any data. */
952 	status = mc->mc_status;
953 
954 	if (status != 0)
955 		printf("mly_ioctl: command status %d\n", status);
956 
957 	if (mc->mc_sense > 0 && sense_buffer != NULL) {
958 		memcpy(sense_buffer, mc->mc_packet, mc->mc_sense);
959 		*sense_length = mc->mc_sense;
960 		goto bad;
961 	}
962 
963 	/* Should we return a data pointer? */
964 	if (data != NULL && *data == NULL)
965 		*data = mc->mc_data;
966 
967 	/* Command completed OK. */
968 	rv = (status != 0 ? EIO : 0);
969 
970  bad:
971 	if (mc != NULL) {
972 		/* Do we need to free a data buffer we allocated? */
973 		if (rv != 0 && mc->mc_data != NULL &&
974 		    (data == NULL || *data == NULL))
975 			free(mc->mc_data, M_DEVBUF);
976 		mly_ccb_free(mly, mc);
977 	}
978 
979 	return (rv);
980 }
981 
982 /*
983  * Check for event(s) outstanding in the controller.
984  */
985 static void
986 mly_check_event(struct mly_softc *mly)
987 {
988 
989 	bus_dmamap_sync(mly->mly_dmat, mly->mly_mmbox_dmamap,
990 	    offsetof(struct mly_mmbox, mmm_health),
991 	    sizeof(mly->mly_mmbox->mmm_health),
992 	    BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
993 
994 	/*
995 	 * The controller may have updated the health status information, so
996 	 * check for it here.  Note that the counters are all in host
997 	 * memory, so this check is very cheap.  Also note that we depend on
998 	 * checking on completion
999 	 */
1000 	if (le32toh(mly->mly_mmbox->mmm_health.status.change_counter) !=
1001 	    mly->mly_event_change) {
1002 		mly->mly_event_change =
1003 		    le32toh(mly->mly_mmbox->mmm_health.status.change_counter);
1004 		mly->mly_event_waiting =
1005 		    le32toh(mly->mly_mmbox->mmm_health.status.next_event);
1006 
1007 		/* Wake up anyone that might be interested in this. */
1008 		wakeup(&mly->mly_event_change);
1009 	}
1010 
1011 	bus_dmamap_sync(mly->mly_dmat, mly->mly_mmbox_dmamap,
1012 	    offsetof(struct mly_mmbox, mmm_health),
1013 	    sizeof(mly->mly_mmbox->mmm_health),
1014 	    BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1015 
1016 	if (mly->mly_event_counter != mly->mly_event_waiting)
1017 		mly_fetch_event(mly);
1018 }
1019 
1020 /*
1021  * Fetch one event from the controller.  If we fail due to resource
1022  * starvation, we'll be retried the next time a command completes.
1023  */
1024 static void
1025 mly_fetch_event(struct mly_softc *mly)
1026 {
1027 	struct mly_ccb *mc;
1028 	struct mly_cmd_ioctl *mci;
1029 	int s;
1030 	u_int32_t event;
1031 
1032 	/* Get a command. */
1033 	if (mly_ccb_alloc(mly, &mc))
1034 		return;
1035 
1036 	/* Set up the data buffer. */
1037 	mc->mc_data = malloc(sizeof(struct mly_event), M_DEVBUF,
1038 	    M_NOWAIT|M_ZERO);
1039 
1040 	mc->mc_length = sizeof(struct mly_event);
1041 	mc->mc_flags |= MLY_CCB_DATAIN;
1042 	mc->mc_complete = mly_complete_event;
1043 
1044 	/*
1045 	 * Get an event number to fetch.  It's possible that we've raced
1046 	 * with another context for the last event, in which case there will
1047 	 * be no more events.
1048 	 */
1049 	s = splbio();
1050 	if (mly->mly_event_counter == mly->mly_event_waiting) {
1051 		splx(s);
1052 		free(mc->mc_data, M_DEVBUF);
1053 		mly_ccb_free(mly, mc);
1054 		return;
1055 	}
1056 	event = mly->mly_event_counter++;
1057 	splx(s);
1058 
1059 	/*
1060 	 * Build the ioctl.
1061 	 *
1062 	 * At this point we are committed to sending this request, as it
1063 	 * will be the only one constructed for this particular event
1064 	 * number.
1065 	 */
1066 	mci = (struct mly_cmd_ioctl *)&mc->mc_packet->ioctl;
1067 	mci->opcode = MDACMD_IOCTL;
1068 	mci->data_size = htole32(sizeof(struct mly_event));
1069 	_lto3l(MLY_PHYADDR(0, 0, (event >> 16) & 0xff, (event >> 24) & 0xff),
1070 	    mci->addr);
1071 	mci->timeout = 30 | MLY_TIMEOUT_SECONDS;
1072 	mci->sub_ioctl = MDACIOCTL_GETEVENT;
1073 	mci->param.getevent.sequence_number_low = htole16(event & 0xffff);
1074 
1075 	/*
1076 	 * Submit the command.
1077 	 */
1078 	if (mly_ccb_map(mly, mc) != 0)
1079 		goto bad;
1080 	mly_ccb_enqueue(mly, mc);
1081 	return;
1082 
1083  bad:
1084 	printf("%s: couldn't fetch event %u\n", device_xname(&mly->mly_dv), event);
1085 	free(mc->mc_data, M_DEVBUF);
1086 	mly_ccb_free(mly, mc);
1087 }
1088 
1089 /*
1090  * Handle the completion of an event poll.
1091  */
1092 static void
1093 mly_complete_event(struct mly_softc *mly, struct mly_ccb *mc)
1094 {
1095 	struct mly_event *me;
1096 
1097 	me = (struct mly_event *)mc->mc_data;
1098 	mly_ccb_unmap(mly, mc);
1099 	mly_ccb_free(mly, mc);
1100 
1101 	/* If the event was successfully fetched, process it. */
1102 	if (mc->mc_status == SCSI_OK)
1103 		mly_process_event(mly, me);
1104 	else
1105 		aprint_error_dev(&mly->mly_dv, "unable to fetch event; status = 0x%x\n",
1106 		    mc->mc_status);
1107 
1108 	free(me, M_DEVBUF);
1109 
1110 	/* Check for another event. */
1111 	mly_check_event(mly);
1112 }
1113 
1114 /*
1115  * Process a controller event.  Called with interrupts blocked (i.e., at
1116  * interrupt time).
1117  */
1118 static void
1119 mly_process_event(struct mly_softc *mly, struct mly_event *me)
1120 {
1121 	struct scsi_sense_data *ssd;
1122 	int bus, target, event, class, action;
1123 	const char *fp, *tp;
1124 
1125 	ssd = (struct scsi_sense_data *)&me->sense[0];
1126 
1127 	/*
1128 	 * Errors can be reported using vendor-unique sense data.  In this
1129 	 * case, the event code will be 0x1c (Request sense data present),
1130 	 * the sense key will be 0x09 (vendor specific), the MSB of the ASC
1131 	 * will be set, and the actual event code will be a 16-bit value
1132 	 * comprised of the ASCQ (low byte) and low seven bits of the ASC
1133 	 * (low seven bits of the high byte).
1134 	 */
1135 	if (le32toh(me->code) == 0x1c &&
1136 	    SSD_SENSE_KEY(ssd->flags) == SKEY_VENDOR_SPECIFIC &&
1137 	    (ssd->asc & 0x80) != 0) {
1138 		event = ((int)(ssd->asc & ~0x80) << 8) +
1139 		    ssd->ascq;
1140 	} else
1141 		event = le32toh(me->code);
1142 
1143 	/* Look up event, get codes. */
1144 	fp = mly_describe_code(mly_table_event, event);
1145 
1146 	/* Quiet event? */
1147 	class = fp[0];
1148 #ifdef notyet
1149 	if (isupper(class) && bootverbose)
1150 		class = tolower(class);
1151 #endif
1152 
1153 	/* Get action code, text string. */
1154 	action = fp[1];
1155 	tp = fp + 3;
1156 
1157 	/*
1158 	 * Print some information about the event.
1159 	 *
1160 	 * This code uses a table derived from the corresponding portion of
1161 	 * the Linux driver, and thus the parser is very similar.
1162 	 */
1163 	switch (class) {
1164 	case 'p':
1165 		/*
1166 		 * Error on physical drive.
1167 		 */
1168 		printf("%s: physical device %d:%d %s\n", device_xname(&mly->mly_dv),
1169 		    me->channel, me->target, tp);
1170 		if (action == 'r')
1171 			mly->mly_btl[me->channel][me->target].mb_flags |=
1172 			    MLY_BTL_RESCAN;
1173 		break;
1174 
1175 	case 'l':
1176 	case 'm':
1177 		/*
1178 		 * Error on logical unit, or message about logical unit.
1179 	 	 */
1180 		bus = MLY_LOGDEV_BUS(mly, me->lun);
1181 		target = MLY_LOGDEV_TARGET(mly, me->lun);
1182 		printf("%s: logical device %d:%d %s\n", device_xname(&mly->mly_dv),
1183 		    bus, target, tp);
1184 		if (action == 'r')
1185 			mly->mly_btl[bus][target].mb_flags |= MLY_BTL_RESCAN;
1186 		break;
1187 
1188 	case 's':
1189 		/*
1190 		 * Report of sense data.
1191 		 */
1192 		if ((SSD_SENSE_KEY(ssd->flags) == SKEY_NO_SENSE ||
1193 		     SSD_SENSE_KEY(ssd->flags) == SKEY_NOT_READY) &&
1194 		    ssd->asc == 0x04 &&
1195 		    (ssd->ascq == 0x01 ||
1196 		     ssd->ascq == 0x02)) {
1197 			/* Ignore NO_SENSE or NOT_READY in one case */
1198 			break;
1199 		}
1200 
1201 		/*
1202 		 * XXX Should translate this if SCSIVERBOSE.
1203 		 */
1204 		printf("%s: physical device %d:%d %s\n", device_xname(&mly->mly_dv),
1205 		    me->channel, me->target, tp);
1206 		printf("%s:  sense key %d  asc %02x  ascq %02x\n",
1207 		    device_xname(&mly->mly_dv), SSD_SENSE_KEY(ssd->flags),
1208 		    ssd->asc, ssd->ascq);
1209 		printf("%s:  info %x%x%x%x  csi %x%x%x%x\n",
1210 		    device_xname(&mly->mly_dv), ssd->info[0], ssd->info[1],
1211 		    ssd->info[2], ssd->info[3], ssd->csi[0],
1212 		    ssd->csi[1], ssd->csi[2],
1213 		    ssd->csi[3]);
1214 		if (action == 'r')
1215 			mly->mly_btl[me->channel][me->target].mb_flags |=
1216 			    MLY_BTL_RESCAN;
1217 		break;
1218 
1219 	case 'e':
1220 		printf("%s: ", device_xname(&mly->mly_dv));
1221 		printf(tp, me->target, me->lun);
1222 		break;
1223 
1224 	case 'c':
1225 		printf("%s: controller %s\n", device_xname(&mly->mly_dv), tp);
1226 		break;
1227 
1228 	case '?':
1229 		printf("%s: %s - %d\n", device_xname(&mly->mly_dv), tp, event);
1230 		break;
1231 
1232 	default:
1233 		/* Probably a 'noisy' event being ignored. */
1234 		break;
1235 	}
1236 }
1237 
1238 /*
1239  * Perform periodic activities.
1240  */
1241 static void
1242 mly_thread(void *cookie)
1243 {
1244 	struct mly_softc *mly;
1245 	struct mly_btl *btl;
1246 	int s, bus, target, done;
1247 
1248 	mly = (struct mly_softc *)cookie;
1249 
1250 	for (;;) {
1251 		/* Check for new events. */
1252 		mly_check_event(mly);
1253 
1254 		/* Re-scan up to 1 device. */
1255 		s = splbio();
1256 		done = 0;
1257 		for (bus = 0; bus < mly->mly_nchans && !done; bus++) {
1258 			for (target = 0; target < MLY_MAX_TARGETS; target++) {
1259 				/* Perform device rescan? */
1260 				btl = &mly->mly_btl[bus][target];
1261 				if ((btl->mb_flags & MLY_BTL_RESCAN) != 0) {
1262 					btl->mb_flags ^= MLY_BTL_RESCAN;
1263 					mly_scan_btl(mly, bus, target);
1264 					done = 1;
1265 					break;
1266 				}
1267 			}
1268 		}
1269 		splx(s);
1270 
1271 		/* Sleep for N seconds. */
1272 		tsleep(mly_thread, PWAIT, "mlyzzz",
1273 		    hz * MLY_PERIODIC_INTERVAL);
1274 	}
1275 }
1276 
1277 /*
1278  * Submit a command to the controller and poll on completion.  Return
1279  * non-zero on timeout.
1280  */
1281 static int
1282 mly_ccb_poll(struct mly_softc *mly, struct mly_ccb *mc, int timo)
1283 {
1284 	int rv;
1285 
1286 	if ((rv = mly_ccb_submit(mly, mc)) != 0)
1287 		return (rv);
1288 
1289 	for (timo *= 10; timo != 0; timo--) {
1290 		if ((mc->mc_flags & MLY_CCB_COMPLETE) != 0)
1291 			break;
1292 		mly_intr(mly);
1293 		DELAY(100);
1294 	}
1295 
1296 	return (timo == 0);
1297 }
1298 
1299 /*
1300  * Submit a command to the controller and sleep on completion.  Return
1301  * non-zero on timeout.
1302  */
1303 static int
1304 mly_ccb_wait(struct mly_softc *mly, struct mly_ccb *mc, int timo)
1305 {
1306 	int rv, s;
1307 
1308 	mly_ccb_enqueue(mly, mc);
1309 
1310 	s = splbio();
1311 	if ((mc->mc_flags & MLY_CCB_COMPLETE) != 0) {
1312 		splx(s);
1313 		return (0);
1314 	}
1315 	rv = tsleep(mc, PRIBIO, "mlywccb", timo * hz / 1000);
1316 	splx(s);
1317 
1318 	return (rv);
1319 }
1320 
1321 /*
1322  * If a CCB is specified, enqueue it.  Pull CCBs off the software queue in
1323  * the order that they were enqueued and try to submit their command blocks
1324  * to the controller for execution.
1325  */
1326 void
1327 mly_ccb_enqueue(struct mly_softc *mly, struct mly_ccb *mc)
1328 {
1329 	int s;
1330 
1331 	s = splbio();
1332 
1333 	if (mc != NULL)
1334 		SIMPLEQ_INSERT_TAIL(&mly->mly_ccb_queue, mc, mc_link.simpleq);
1335 
1336 	while ((mc = SIMPLEQ_FIRST(&mly->mly_ccb_queue)) != NULL) {
1337 		if (mly_ccb_submit(mly, mc))
1338 			break;
1339 		SIMPLEQ_REMOVE_HEAD(&mly->mly_ccb_queue, mc_link.simpleq);
1340 	}
1341 
1342 	splx(s);
1343 }
1344 
1345 /*
1346  * Deliver a command to the controller.
1347  */
1348 static int
1349 mly_ccb_submit(struct mly_softc *mly, struct mly_ccb *mc)
1350 {
1351 	union mly_cmd_packet *pkt;
1352 	int s, off;
1353 
1354 	mc->mc_packet->generic.command_id = htole16(mc->mc_slot);
1355 
1356 	bus_dmamap_sync(mly->mly_dmat, mly->mly_pkt_dmamap,
1357 	    mc->mc_packetphys - mly->mly_pkt_busaddr,
1358 	    sizeof(union mly_cmd_packet),
1359 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1360 
1361 	s = splbio();
1362 
1363 	/*
1364 	 * Do we have to use the hardware mailbox?
1365 	 */
1366 	if ((mly->mly_state & MLY_STATE_MMBOX_ACTIVE) == 0) {
1367 		/*
1368 		 * Check to see if the controller is ready for us.
1369 		 */
1370 		if (mly_idbr_true(mly, MLY_HM_CMDSENT)) {
1371 			splx(s);
1372 			return (EBUSY);
1373 		}
1374 
1375 		/*
1376 		 * It's ready, send the command.
1377 		 */
1378 		mly_outl(mly, mly->mly_cmd_mailbox,
1379 		    (u_int64_t)mc->mc_packetphys & 0xffffffff);
1380 		mly_outl(mly, mly->mly_cmd_mailbox + 4,
1381 		    (u_int64_t)mc->mc_packetphys >> 32);
1382 		mly_outb(mly, mly->mly_idbr, MLY_HM_CMDSENT);
1383 	} else {
1384 		pkt = &mly->mly_mmbox->mmm_command[mly->mly_mmbox_cmd_idx];
1385 		off = (char *)pkt - (char *)mly->mly_mmbox;
1386 
1387 		bus_dmamap_sync(mly->mly_dmat, mly->mly_mmbox_dmamap,
1388 		    off, sizeof(mly->mly_mmbox->mmm_command[0]),
1389 		    BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
1390 
1391 		/* Check to see if the next index is free yet. */
1392 		if (pkt->mmbox.flag != 0) {
1393 			splx(s);
1394 			return (EBUSY);
1395 		}
1396 
1397 		/* Copy in new command */
1398 		memcpy(pkt->mmbox.data, mc->mc_packet->mmbox.data,
1399 		    sizeof(pkt->mmbox.data));
1400 
1401 		/* Copy flag last. */
1402 		pkt->mmbox.flag = mc->mc_packet->mmbox.flag;
1403 
1404 		bus_dmamap_sync(mly->mly_dmat, mly->mly_mmbox_dmamap,
1405 		    off, sizeof(mly->mly_mmbox->mmm_command[0]),
1406 		    BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1407 
1408 		/* Signal controller and update index. */
1409 		mly_outb(mly, mly->mly_idbr, MLY_AM_CMDSENT);
1410 		mly->mly_mmbox_cmd_idx =
1411 		    (mly->mly_mmbox_cmd_idx + 1) % MLY_MMBOX_COMMANDS;
1412 	}
1413 
1414 	splx(s);
1415 	return (0);
1416 }
1417 
1418 /*
1419  * Pick up completed commands from the controller and handle accordingly.
1420  */
1421 int
1422 mly_intr(void *cookie)
1423 {
1424 	struct mly_ccb *mc;
1425 	union mly_status_packet	*sp;
1426 	u_int16_t slot;
1427 	int forus, off;
1428 	struct mly_softc *mly;
1429 
1430 	mly = cookie;
1431 	forus = 0;
1432 
1433 	/*
1434 	 * Pick up hardware-mailbox commands.
1435 	 */
1436 	if (mly_odbr_true(mly, MLY_HM_STSREADY)) {
1437 		slot = mly_inw(mly, mly->mly_status_mailbox);
1438 
1439 		if (slot < MLY_SLOT_MAX) {
1440 			mc = mly->mly_ccbs + (slot - MLY_SLOT_START);
1441 			mc->mc_status =
1442 			    mly_inb(mly, mly->mly_status_mailbox + 2);
1443 			mc->mc_sense =
1444 			    mly_inb(mly, mly->mly_status_mailbox + 3);
1445 			mc->mc_resid =
1446 			    mly_inl(mly, mly->mly_status_mailbox + 4);
1447 
1448 			mly_ccb_complete(mly, mc);
1449 		} else {
1450 			/* Slot 0xffff may mean "extremely bogus command". */
1451 			printf("%s: got HM completion for illegal slot %u\n",
1452 			    device_xname(&mly->mly_dv), slot);
1453 		}
1454 
1455 		/* Unconditionally acknowledge status. */
1456 		mly_outb(mly, mly->mly_odbr, MLY_HM_STSREADY);
1457 		mly_outb(mly, mly->mly_idbr, MLY_HM_STSACK);
1458 		forus = 1;
1459 	}
1460 
1461 	/*
1462 	 * Pick up memory-mailbox commands.
1463 	 */
1464 	if (mly_odbr_true(mly, MLY_AM_STSREADY)) {
1465 		for (;;) {
1466 			sp = &mly->mly_mmbox->mmm_status[mly->mly_mmbox_sts_idx];
1467 			off = (char *)sp - (char *)mly->mly_mmbox;
1468 
1469 			bus_dmamap_sync(mly->mly_dmat, mly->mly_mmbox_dmamap,
1470 			    off, sizeof(mly->mly_mmbox->mmm_command[0]),
1471 			    BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
1472 
1473 			/* Check for more status. */
1474 			if (sp->mmbox.flag == 0)
1475 				break;
1476 
1477 			/* Get slot number. */
1478 			slot = le16toh(sp->status.command_id);
1479 			if (slot < MLY_SLOT_MAX) {
1480 				mc = mly->mly_ccbs + (slot - MLY_SLOT_START);
1481 				mc->mc_status = sp->status.status;
1482 				mc->mc_sense = sp->status.sense_length;
1483 				mc->mc_resid = le32toh(sp->status.residue);
1484 				mly_ccb_complete(mly, mc);
1485 			} else {
1486 				/*
1487 				 * Slot 0xffff may mean "extremely bogus
1488 				 * command".
1489 				 */
1490 				printf("%s: got AM completion for illegal "
1491 				    "slot %u at %d\n", device_xname(&mly->mly_dv),
1492 				    slot, mly->mly_mmbox_sts_idx);
1493 			}
1494 
1495 			/* Clear and move to next index. */
1496 			sp->mmbox.flag = 0;
1497 			mly->mly_mmbox_sts_idx =
1498 			    (mly->mly_mmbox_sts_idx + 1) % MLY_MMBOX_STATUS;
1499 		}
1500 
1501 		/* Acknowledge that we have collected status value(s). */
1502 		mly_outb(mly, mly->mly_odbr, MLY_AM_STSREADY);
1503 		forus = 1;
1504 	}
1505 
1506 	/*
1507 	 * Run the queue.
1508 	 */
1509 	if (forus && ! SIMPLEQ_EMPTY(&mly->mly_ccb_queue))
1510 		mly_ccb_enqueue(mly, NULL);
1511 
1512 	return (forus);
1513 }
1514 
1515 /*
1516  * Process completed commands
1517  */
1518 static void
1519 mly_ccb_complete(struct mly_softc *mly, struct mly_ccb *mc)
1520 {
1521 	void (*complete)(struct mly_softc *, struct mly_ccb *);
1522 
1523 	bus_dmamap_sync(mly->mly_dmat, mly->mly_pkt_dmamap,
1524 	    mc->mc_packetphys - mly->mly_pkt_busaddr,
1525 	    sizeof(union mly_cmd_packet),
1526 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1527 
1528 	complete = mc->mc_complete;
1529 	mc->mc_flags |= MLY_CCB_COMPLETE;
1530 
1531 	/*
1532 	 * Call completion handler or wake up sleeping consumer.
1533 	 */
1534 	if (complete != NULL)
1535 		(*complete)(mly, mc);
1536 	else
1537 		wakeup(mc);
1538 }
1539 
1540 /*
1541  * Allocate a command.
1542  */
1543 int
1544 mly_ccb_alloc(struct mly_softc *mly, struct mly_ccb **mcp)
1545 {
1546 	struct mly_ccb *mc;
1547 	int s;
1548 
1549 	s = splbio();
1550 	mc = SLIST_FIRST(&mly->mly_ccb_free);
1551 	if (mc != NULL)
1552 		SLIST_REMOVE_HEAD(&mly->mly_ccb_free, mc_link.slist);
1553 	splx(s);
1554 
1555 	*mcp = mc;
1556 	return (mc == NULL ? EAGAIN : 0);
1557 }
1558 
1559 /*
1560  * Release a command back to the freelist.
1561  */
1562 void
1563 mly_ccb_free(struct mly_softc *mly, struct mly_ccb *mc)
1564 {
1565 	int s;
1566 
1567 	/*
1568 	 * Fill in parts of the command that may cause confusion if a
1569 	 * consumer doesn't when we are later allocated.
1570 	 */
1571 	mc->mc_data = NULL;
1572 	mc->mc_flags = 0;
1573 	mc->mc_complete = NULL;
1574 	mc->mc_private = NULL;
1575 	mc->mc_packet->generic.command_control = 0;
1576 
1577 	/*
1578 	 * By default, we set up to overwrite the command packet with sense
1579 	 * information.
1580 	 */
1581 	mc->mc_packet->generic.sense_buffer_address =
1582 	    htole64(mc->mc_packetphys);
1583 	mc->mc_packet->generic.maximum_sense_size =
1584 	    sizeof(union mly_cmd_packet);
1585 
1586 	s = splbio();
1587 	SLIST_INSERT_HEAD(&mly->mly_ccb_free, mc, mc_link.slist);
1588 	splx(s);
1589 }
1590 
1591 /*
1592  * Allocate and initialize command and packet structures.
1593  *
1594  * If the controller supports fewer than MLY_MAX_CCBS commands, limit our
1595  * allocation to that number.  If we don't yet know how many commands the
1596  * controller supports, allocate a very small set (suitable for initialization
1597  * purposes only).
1598  */
1599 static int
1600 mly_alloc_ccbs(struct mly_softc *mly)
1601 {
1602 	struct mly_ccb *mc;
1603 	int i, rv;
1604 
1605 	if (mly->mly_controllerinfo == NULL)
1606 		mly->mly_ncmds = MLY_CCBS_RESV;
1607 	else {
1608 		i = le16toh(mly->mly_controllerinfo->maximum_parallel_commands);
1609 		mly->mly_ncmds = min(MLY_MAX_CCBS, i);
1610 	}
1611 
1612 	/*
1613 	 * Allocate enough space for all the command packets in one chunk
1614 	 * and map them permanently into controller-visible space.
1615 	 */
1616 	rv = mly_dmamem_alloc(mly,
1617 	    mly->mly_ncmds * sizeof(union mly_cmd_packet),
1618 	    &mly->mly_pkt_dmamap, (void **)&mly->mly_pkt,
1619 	    &mly->mly_pkt_busaddr, &mly->mly_pkt_seg);
1620 	if (rv)
1621 		return (rv);
1622 
1623 	mly->mly_ccbs = malloc(sizeof(struct mly_ccb) * mly->mly_ncmds,
1624 	    M_DEVBUF, M_NOWAIT|M_ZERO);
1625 
1626 	for (i = 0; i < mly->mly_ncmds; i++) {
1627 		mc = mly->mly_ccbs + i;
1628 		mc->mc_slot = MLY_SLOT_START + i;
1629 		mc->mc_packet = mly->mly_pkt + i;
1630 		mc->mc_packetphys = mly->mly_pkt_busaddr +
1631 		    (i * sizeof(union mly_cmd_packet));
1632 
1633 		rv = bus_dmamap_create(mly->mly_dmat, MLY_MAX_XFER,
1634 		    MLY_MAX_SEGS, MLY_MAX_XFER, 0,
1635 		    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1636 		    &mc->mc_datamap);
1637 		if (rv) {
1638 			mly_release_ccbs(mly);
1639 			return (rv);
1640 		}
1641 
1642 		mly_ccb_free(mly, mc);
1643 	}
1644 
1645 	return (0);
1646 }
1647 
1648 /*
1649  * Free all the storage held by commands.
1650  *
1651  * Must be called with all commands on the free list.
1652  */
1653 static void
1654 mly_release_ccbs(struct mly_softc *mly)
1655 {
1656 	struct mly_ccb *mc;
1657 
1658 	/* Throw away command buffer DMA maps. */
1659 	while (mly_ccb_alloc(mly, &mc) == 0)
1660 		bus_dmamap_destroy(mly->mly_dmat, mc->mc_datamap);
1661 
1662 	/* Release CCB storage. */
1663 	free(mly->mly_ccbs, M_DEVBUF);
1664 
1665 	/* Release the packet storage. */
1666 	mly_dmamem_free(mly, mly->mly_ncmds * sizeof(union mly_cmd_packet),
1667 	    mly->mly_pkt_dmamap, (void *)mly->mly_pkt, &mly->mly_pkt_seg);
1668 }
1669 
1670 /*
1671  * Map a command into controller-visible space.
1672  */
1673 static int
1674 mly_ccb_map(struct mly_softc *mly, struct mly_ccb *mc)
1675 {
1676 	struct mly_cmd_generic *gen;
1677 	struct mly_sg_entry *sg;
1678 	bus_dma_segment_t *ds;
1679 	int flg, nseg, rv;
1680 
1681 #ifdef DIAGNOSTIC
1682 	/* Don't map more than once. */
1683 	if ((mc->mc_flags & MLY_CCB_MAPPED) != 0)
1684 		panic("mly_ccb_map: already mapped");
1685 	mc->mc_flags |= MLY_CCB_MAPPED;
1686 
1687 	/* Does the command have a data buffer? */
1688 	if (mc->mc_data == NULL)
1689 		panic("mly_ccb_map: no data buffer");
1690 #endif
1691 
1692 	rv = bus_dmamap_load(mly->mly_dmat, mc->mc_datamap, mc->mc_data,
1693 	    mc->mc_length, NULL, BUS_DMA_NOWAIT | BUS_DMA_STREAMING |
1694 	    ((mc->mc_flags & MLY_CCB_DATAIN) != 0 ?
1695 	    BUS_DMA_READ : BUS_DMA_WRITE));
1696 	if (rv != 0)
1697 		return (rv);
1698 
1699 	gen = &mc->mc_packet->generic;
1700 
1701 	/*
1702 	 * Can we use the transfer structure directly?
1703 	 */
1704 	if ((nseg = mc->mc_datamap->dm_nsegs) <= 2) {
1705 		mc->mc_sgoff = -1;
1706 		sg = &gen->transfer.direct.sg[0];
1707 	} else {
1708 		mc->mc_sgoff = (mc->mc_slot - MLY_SLOT_START) *
1709 		    MLY_MAX_SEGS;
1710 		sg = mly->mly_sg + mc->mc_sgoff;
1711 		gen->command_control |= MLY_CMDCTL_EXTENDED_SG_TABLE;
1712 		gen->transfer.indirect.entries[0] = htole16(nseg);
1713 		gen->transfer.indirect.table_physaddr[0] =
1714 		    htole64(mly->mly_sg_busaddr +
1715 		    (mc->mc_sgoff * sizeof(struct mly_sg_entry)));
1716 	}
1717 
1718 	/*
1719 	 * Fill the S/G table.
1720 	 */
1721 	for (ds = mc->mc_datamap->dm_segs; nseg != 0; nseg--, sg++, ds++) {
1722 		sg->physaddr = htole64(ds->ds_addr);
1723 		sg->length = htole64(ds->ds_len);
1724 	}
1725 
1726 	/*
1727 	 * Sync up the data map.
1728 	 */
1729 	if ((mc->mc_flags & MLY_CCB_DATAIN) != 0)
1730 		flg = BUS_DMASYNC_PREREAD;
1731 	else /* if ((mc->mc_flags & MLY_CCB_DATAOUT) != 0) */ {
1732 		gen->command_control |= MLY_CMDCTL_DATA_DIRECTION;
1733 		flg = BUS_DMASYNC_PREWRITE;
1734 	}
1735 
1736 	bus_dmamap_sync(mly->mly_dmat, mc->mc_datamap, 0, mc->mc_length, flg);
1737 
1738 	/*
1739 	 * Sync up the chained S/G table, if we're using one.
1740 	 */
1741 	if (mc->mc_sgoff == -1)
1742 		return (0);
1743 
1744 	bus_dmamap_sync(mly->mly_dmat, mly->mly_sg_dmamap, mc->mc_sgoff,
1745 	    MLY_SGL_SIZE, BUS_DMASYNC_PREWRITE);
1746 
1747 	return (0);
1748 }
1749 
1750 /*
1751  * Unmap a command from controller-visible space.
1752  */
1753 static void
1754 mly_ccb_unmap(struct mly_softc *mly, struct mly_ccb *mc)
1755 {
1756 	int flg;
1757 
1758 #ifdef DIAGNOSTIC
1759 	if ((mc->mc_flags & MLY_CCB_MAPPED) == 0)
1760 		panic("mly_ccb_unmap: not mapped");
1761 	mc->mc_flags &= ~MLY_CCB_MAPPED;
1762 #endif
1763 
1764 	if ((mc->mc_flags & MLY_CCB_DATAIN) != 0)
1765 		flg = BUS_DMASYNC_POSTREAD;
1766 	else /* if ((mc->mc_flags & MLY_CCB_DATAOUT) != 0) */
1767 		flg = BUS_DMASYNC_POSTWRITE;
1768 
1769 	bus_dmamap_sync(mly->mly_dmat, mc->mc_datamap, 0, mc->mc_length, flg);
1770 	bus_dmamap_unload(mly->mly_dmat, mc->mc_datamap);
1771 
1772 	if (mc->mc_sgoff == -1)
1773 		return;
1774 
1775 	bus_dmamap_sync(mly->mly_dmat, mly->mly_sg_dmamap, mc->mc_sgoff,
1776 	    MLY_SGL_SIZE, BUS_DMASYNC_POSTWRITE);
1777 }
1778 
1779 /*
1780  * Adjust the size of each I/O before it passes to the SCSI layer.
1781  */
1782 static void
1783 mly_scsipi_minphys(struct buf *bp)
1784 {
1785 
1786 	if (bp->b_bcount > MLY_MAX_XFER)
1787 		bp->b_bcount = MLY_MAX_XFER;
1788 	minphys(bp);
1789 }
1790 
1791 /*
1792  * Start a SCSI command.
1793  */
1794 static void
1795 mly_scsipi_request(struct scsipi_channel *chan, scsipi_adapter_req_t req,
1796 		   void *arg)
1797 {
1798 	struct mly_ccb *mc;
1799 	struct mly_cmd_scsi_small *ss;
1800 	struct scsipi_xfer *xs;
1801 	struct scsipi_periph *periph;
1802 	struct mly_softc *mly;
1803 	struct mly_btl *btl;
1804 	int s, tmp;
1805 
1806 	mly = device_private(chan->chan_adapter->adapt_dev);
1807 
1808 	switch (req) {
1809 	case ADAPTER_REQ_RUN_XFER:
1810 		xs = arg;
1811 		periph = xs->xs_periph;
1812 		btl = &mly->mly_btl[chan->chan_channel][periph->periph_target];
1813 		s = splbio();
1814 		tmp = btl->mb_flags;
1815 		splx(s);
1816 
1817 		/*
1818 		 * Check for I/O attempt to a protected or non-existant
1819 		 * device.
1820 		 */
1821 		if ((tmp & MLY_BTL_PROTECTED) != 0) {
1822 			xs->error = XS_SELTIMEOUT;
1823 			scsipi_done(xs);
1824 			break;
1825 		}
1826 
1827 #ifdef DIAGNOSTIC
1828 		/* XXX Increase if/when we support large SCSI commands. */
1829 		if (xs->cmdlen > MLY_CMD_SCSI_SMALL_CDB) {
1830 			printf("%s: cmd too large\n", device_xname(&mly->mly_dv));
1831 			xs->error = XS_DRIVER_STUFFUP;
1832 			scsipi_done(xs);
1833 			break;
1834 		}
1835 #endif
1836 
1837 		if (mly_ccb_alloc(mly, &mc)) {
1838 			xs->error = XS_RESOURCE_SHORTAGE;
1839 			scsipi_done(xs);
1840 			break;
1841 		}
1842 
1843 		/* Build the command. */
1844 		mc->mc_data = xs->data;
1845 		mc->mc_length = xs->datalen;
1846 		mc->mc_complete = mly_scsipi_complete;
1847 		mc->mc_private = xs;
1848 
1849 		/* Build the packet for the controller. */
1850 		ss = &mc->mc_packet->scsi_small;
1851 		ss->opcode = MDACMD_SCSI;
1852 #ifdef notdef
1853 		/*
1854 		 * XXX FreeBSD does this, but it doesn't fix anything,
1855 		 * XXX and appears potentially harmful.
1856 		 */
1857 		ss->command_control |= MLY_CMDCTL_DISABLE_DISCONNECT;
1858 #endif
1859 
1860 		ss->data_size = htole32(xs->datalen);
1861 		_lto3l(MLY_PHYADDR(0, chan->chan_channel,
1862 		    periph->periph_target, periph->periph_lun), ss->addr);
1863 
1864 		if (xs->timeout < 60 * 1000)
1865 			ss->timeout = xs->timeout / 1000 |
1866 			    MLY_TIMEOUT_SECONDS;
1867 		else if (xs->timeout < 60 * 60 * 1000)
1868 			ss->timeout = xs->timeout / (60 * 1000) |
1869 			    MLY_TIMEOUT_MINUTES;
1870 		else
1871 			ss->timeout = xs->timeout / (60 * 60 * 1000) |
1872 			    MLY_TIMEOUT_HOURS;
1873 
1874 		ss->maximum_sense_size = sizeof(xs->sense);
1875 		ss->cdb_length = xs->cmdlen;
1876 		memcpy(ss->cdb, xs->cmd, xs->cmdlen);
1877 
1878 		if (mc->mc_length != 0) {
1879 			if ((xs->xs_control & XS_CTL_DATA_OUT) != 0)
1880 				mc->mc_flags |= MLY_CCB_DATAOUT;
1881 			else /* if ((xs->xs_control & XS_CTL_DATA_IN) != 0) */
1882 				mc->mc_flags |= MLY_CCB_DATAIN;
1883 
1884 			if (mly_ccb_map(mly, mc) != 0) {
1885 				xs->error = XS_DRIVER_STUFFUP;
1886 				mly_ccb_free(mly, mc);
1887 				scsipi_done(xs);
1888 				break;
1889 			}
1890 		}
1891 
1892 		/*
1893 		 * Give the command to the controller.
1894 		 */
1895 		if ((xs->xs_control & XS_CTL_POLL) != 0) {
1896 			if (mly_ccb_poll(mly, mc, xs->timeout + 5000)) {
1897 				xs->error = XS_REQUEUE;
1898 				if (mc->mc_length != 0)
1899 					mly_ccb_unmap(mly, mc);
1900 				mly_ccb_free(mly, mc);
1901 				scsipi_done(xs);
1902 			}
1903 		} else
1904 			mly_ccb_enqueue(mly, mc);
1905 
1906 		break;
1907 
1908 	case ADAPTER_REQ_GROW_RESOURCES:
1909 		/*
1910 		 * Not supported.
1911 		 */
1912 		break;
1913 
1914 	case ADAPTER_REQ_SET_XFER_MODE:
1915 		/*
1916 		 * We can't change the transfer mode, but at least let
1917 		 * scsipi know what the adapter has negotiated.
1918 		 */
1919 		mly_get_xfer_mode(mly, chan->chan_channel, arg);
1920 		break;
1921 	}
1922 }
1923 
1924 /*
1925  * Handle completion of a SCSI command.
1926  */
1927 static void
1928 mly_scsipi_complete(struct mly_softc *mly, struct mly_ccb *mc)
1929 {
1930 	struct scsipi_xfer *xs;
1931 	struct scsipi_channel *chan;
1932 	struct scsipi_inquiry_data *inq;
1933 	struct mly_btl *btl;
1934 	int target, sl, s;
1935 	const char *p;
1936 
1937 	xs = mc->mc_private;
1938 	xs->status = mc->mc_status;
1939 
1940 	/*
1941 	 * XXX The `resid' value as returned by the controller appears to be
1942 	 * bogus, so we always set it to zero.  Is it perhaps the transfer
1943 	 * count?
1944 	 */
1945 	xs->resid = 0; /* mc->mc_resid; */
1946 
1947 	if (mc->mc_length != 0)
1948 		mly_ccb_unmap(mly, mc);
1949 
1950 	switch (mc->mc_status) {
1951 	case SCSI_OK:
1952 		/*
1953 		 * In order to report logical device type and status, we
1954 		 * overwrite the result of the INQUIRY command to logical
1955 		 * devices.
1956 		 */
1957 		if (xs->cmd->opcode == INQUIRY) {
1958 			chan = xs->xs_periph->periph_channel;
1959 			target = xs->xs_periph->periph_target;
1960 			btl = &mly->mly_btl[chan->chan_channel][target];
1961 
1962 			s = splbio();
1963 			if ((btl->mb_flags & MLY_BTL_LOGICAL) != 0) {
1964 				inq = (struct scsipi_inquiry_data *)xs->data;
1965 				mly_padstr(inq->vendor, "MYLEX", 8);
1966 				p = mly_describe_code(mly_table_device_type,
1967 				    btl->mb_type);
1968 				mly_padstr(inq->product, p, 16);
1969 				p = mly_describe_code(mly_table_device_state,
1970 				    btl->mb_state);
1971 				mly_padstr(inq->revision, p, 4);
1972 			}
1973 			splx(s);
1974 		}
1975 
1976 		xs->error = XS_NOERROR;
1977 		break;
1978 
1979 	case SCSI_CHECK:
1980 		sl = mc->mc_sense;
1981 		if (sl > sizeof(xs->sense.scsi_sense))
1982 			sl = sizeof(xs->sense.scsi_sense);
1983 		memcpy(&xs->sense.scsi_sense, mc->mc_packet, sl);
1984 		xs->error = XS_SENSE;
1985 		break;
1986 
1987 	case SCSI_BUSY:
1988 	case SCSI_QUEUE_FULL:
1989 		xs->error = XS_BUSY;
1990 		break;
1991 
1992 	default:
1993 		printf("%s: unknown SCSI status 0x%x\n",
1994 		    device_xname(&mly->mly_dv), xs->status);
1995 		xs->error = XS_DRIVER_STUFFUP;
1996 		break;
1997 	}
1998 
1999 	mly_ccb_free(mly, mc);
2000 	scsipi_done(xs);
2001 }
2002 
2003 /*
2004  * Notify scsipi about a target's transfer mode.
2005  */
2006 static void
2007 mly_get_xfer_mode(struct mly_softc *mly, int bus, struct scsipi_xfer_mode *xm)
2008 {
2009 	struct mly_btl *btl;
2010 	int s;
2011 
2012 	btl = &mly->mly_btl[bus][xm->xm_target];
2013 	xm->xm_mode = 0;
2014 
2015 	s = splbio();
2016 
2017 	if ((btl->mb_flags & MLY_BTL_PHYSICAL) != 0) {
2018 		if (btl->mb_speed == 0) {
2019 			xm->xm_period = 0;
2020 			xm->xm_offset = 0;
2021 		} else {
2022 			xm->xm_period = 12;			/* XXX */
2023 			xm->xm_offset = 8;			/* XXX */
2024 			xm->xm_mode |= PERIPH_CAP_SYNC;		/* XXX */
2025 		}
2026 
2027 		switch (btl->mb_width) {
2028 		case 32:
2029 			xm->xm_mode = PERIPH_CAP_WIDE32;
2030 			break;
2031 		case 16:
2032 			xm->xm_mode = PERIPH_CAP_WIDE16;
2033 			break;
2034 		default:
2035 			xm->xm_mode = 0;
2036 			break;
2037 		}
2038 	} else /* ((btl->mb_flags & MLY_BTL_LOGICAL) != 0) */ {
2039 		xm->xm_mode = PERIPH_CAP_WIDE16 | PERIPH_CAP_SYNC;
2040 		xm->xm_period = 12;
2041 		xm->xm_offset = 8;
2042 	}
2043 
2044 	if ((btl->mb_flags & MLY_BTL_TQING) != 0)
2045 		xm->xm_mode |= PERIPH_CAP_TQING;
2046 
2047 	splx(s);
2048 
2049 	scsipi_async_event(&mly->mly_chans[bus], ASYNC_EVENT_XFER_MODE, xm);
2050 }
2051 
2052 /*
2053  * ioctl hook; used here only to initiate low-level rescans.
2054  */
2055 static int
2056 mly_scsipi_ioctl(struct scsipi_channel *chan, u_long cmd, void *data,
2057     int flag, struct proc *p)
2058 {
2059 	struct mly_softc *mly;
2060 	int rv;
2061 
2062 	mly = device_private(chan->chan_adapter->adapt_dev);
2063 
2064 	switch (cmd) {
2065 	case SCBUSIOLLSCAN:
2066 		mly_scan_channel(mly, chan->chan_channel);
2067 		rv = 0;
2068 		break;
2069 	default:
2070 		rv = ENOTTY;
2071 		break;
2072 	}
2073 
2074 	return (rv);
2075 }
2076 
2077 /*
2078  * Handshake with the firmware while the card is being initialized.
2079  */
2080 static int
2081 mly_fwhandshake(struct mly_softc *mly)
2082 {
2083 	u_int8_t error, param0, param1;
2084 	int spinup;
2085 
2086 	spinup = 0;
2087 
2088 	/* Set HM_STSACK and let the firmware initialize. */
2089 	mly_outb(mly, mly->mly_idbr, MLY_HM_STSACK);
2090 	DELAY(1000);	/* too short? */
2091 
2092 	/* If HM_STSACK is still true, the controller is initializing. */
2093 	if (!mly_idbr_true(mly, MLY_HM_STSACK))
2094 		return (0);
2095 
2096 	printf("%s: controller initialization started\n",
2097 	    device_xname(&mly->mly_dv));
2098 
2099 	/*
2100 	 * Spin waiting for initialization to finish, or for a message to be
2101 	 * delivered.
2102 	 */
2103 	while (mly_idbr_true(mly, MLY_HM_STSACK)) {
2104 		/* Check for a message */
2105 		if (!mly_error_valid(mly))
2106 			continue;
2107 
2108 		error = mly_inb(mly, mly->mly_error_status) & ~MLY_MSG_EMPTY;
2109 		param0 = mly_inb(mly, mly->mly_cmd_mailbox);
2110 		param1 = mly_inb(mly, mly->mly_cmd_mailbox + 1);
2111 
2112 		switch (error) {
2113 		case MLY_MSG_SPINUP:
2114 			if (!spinup) {
2115 				printf("%s: drive spinup in progress\n",
2116 				    device_xname(&mly->mly_dv));
2117 				spinup = 1;
2118 			}
2119 			break;
2120 
2121 		case MLY_MSG_RACE_RECOVERY_FAIL:
2122 			printf("%s: mirror race recovery failed - \n",
2123 			    device_xname(&mly->mly_dv));
2124 			printf("%s: one or more drives offline\n",
2125 			    device_xname(&mly->mly_dv));
2126 			break;
2127 
2128 		case MLY_MSG_RACE_IN_PROGRESS:
2129 			printf("%s: mirror race recovery in progress\n",
2130 			    device_xname(&mly->mly_dv));
2131 			break;
2132 
2133 		case MLY_MSG_RACE_ON_CRITICAL:
2134 			printf("%s: mirror race recovery on critical drive\n",
2135 			    device_xname(&mly->mly_dv));
2136 			break;
2137 
2138 		case MLY_MSG_PARITY_ERROR:
2139 			printf("%s: FATAL MEMORY PARITY ERROR\n",
2140 			    device_xname(&mly->mly_dv));
2141 			return (ENXIO);
2142 
2143 		default:
2144 			printf("%s: unknown initialization code 0x%x\n",
2145 			    device_xname(&mly->mly_dv), error);
2146 			break;
2147 		}
2148 	}
2149 
2150 	return (0);
2151 }
2152 
2153 /*
2154  * Space-fill a character string
2155  */
2156 static void
2157 mly_padstr(char *dst, const char *src, int len)
2158 {
2159 
2160 	while (len-- > 0) {
2161 		if (*src != '\0')
2162 			*dst++ = *src++;
2163 		else
2164 			*dst++ = ' ';
2165 	}
2166 }
2167 
2168 /*
2169  * Allocate DMA safe memory.
2170  */
2171 static int
2172 mly_dmamem_alloc(struct mly_softc *mly, int size, bus_dmamap_t *dmamap,
2173 		 void **kva, bus_addr_t *paddr, bus_dma_segment_t *seg)
2174 {
2175 	int rseg, rv, state;
2176 
2177 	state = 0;
2178 
2179 	if ((rv = bus_dmamem_alloc(mly->mly_dmat, size, PAGE_SIZE, 0,
2180 	    seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) {
2181 		aprint_error_dev(&mly->mly_dv, "dmamem_alloc = %d\n", rv);
2182 		goto bad;
2183 	}
2184 
2185 	state++;
2186 
2187 	if ((rv = bus_dmamem_map(mly->mly_dmat, seg, 1, size, kva,
2188 	    BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) {
2189 		aprint_error_dev(&mly->mly_dv, "dmamem_map = %d\n", rv);
2190 		goto bad;
2191 	}
2192 
2193 	state++;
2194 
2195 	if ((rv = bus_dmamap_create(mly->mly_dmat, size, size, 1, 0,
2196 	    BUS_DMA_NOWAIT, dmamap)) != 0) {
2197 		aprint_error_dev(&mly->mly_dv, "dmamap_create = %d\n", rv);
2198 		goto bad;
2199 	}
2200 
2201 	state++;
2202 
2203 	if ((rv = bus_dmamap_load(mly->mly_dmat, *dmamap, *kva, size,
2204 	    NULL, BUS_DMA_NOWAIT)) != 0) {
2205 		aprint_error_dev(&mly->mly_dv, "dmamap_load = %d\n", rv);
2206 		goto bad;
2207 	}
2208 
2209 	*paddr = (*dmamap)->dm_segs[0].ds_addr;
2210 	memset(*kva, 0, size);
2211 	return (0);
2212 
2213  bad:
2214 	if (state > 2)
2215 		bus_dmamap_destroy(mly->mly_dmat, *dmamap);
2216 	if (state > 1)
2217 		bus_dmamem_unmap(mly->mly_dmat, *kva, size);
2218 	if (state > 0)
2219 		bus_dmamem_free(mly->mly_dmat, seg, 1);
2220 
2221 	return (rv);
2222 }
2223 
2224 /*
2225  * Free DMA safe memory.
2226  */
2227 static void
2228 mly_dmamem_free(struct mly_softc *mly, int size, bus_dmamap_t dmamap,
2229 		void *kva, bus_dma_segment_t *seg)
2230 {
2231 
2232 	bus_dmamap_unload(mly->mly_dmat, dmamap);
2233 	bus_dmamap_destroy(mly->mly_dmat, dmamap);
2234 	bus_dmamem_unmap(mly->mly_dmat, kva, size);
2235 	bus_dmamem_free(mly->mly_dmat, seg, 1);
2236 }
2237 
2238 
2239 /*
2240  * Accept an open operation on the control device.
2241  */
2242 int
2243 mlyopen(dev_t dev, int flag, int mode, struct lwp *l)
2244 {
2245 	struct mly_softc *mly;
2246 
2247 	if ((mly = device_lookup_private(&mly_cd, minor(dev))) == NULL)
2248 		return (ENXIO);
2249 	if ((mly->mly_state & MLY_STATE_INITOK) == 0)
2250 		return (ENXIO);
2251 	if ((mly->mly_state & MLY_STATE_OPEN) != 0)
2252 		return (EBUSY);
2253 
2254 	mly->mly_state |= MLY_STATE_OPEN;
2255 	return (0);
2256 }
2257 
2258 /*
2259  * Accept the last close on the control device.
2260  */
2261 int
2262 mlyclose(dev_t dev, int flag, int mode,
2263     struct lwp *l)
2264 {
2265 	struct mly_softc *mly;
2266 
2267 	mly = device_lookup_private(&mly_cd, minor(dev));
2268 	mly->mly_state &= ~MLY_STATE_OPEN;
2269 	return (0);
2270 }
2271 
2272 /*
2273  * Handle control operations.
2274  */
2275 int
2276 mlyioctl(dev_t dev, u_long cmd, void *data, int flag,
2277     struct lwp *l)
2278 {
2279 	struct mly_softc *mly;
2280 	int rv;
2281 
2282 	mly = device_lookup_private(&mly_cd, minor(dev));
2283 
2284 	switch (cmd) {
2285 	case MLYIO_COMMAND:
2286 		rv = kauth_authorize_device_passthru(l->l_cred, dev,
2287 		    KAUTH_REQ_DEVICE_RAWIO_PASSTHRU_ALL, data);
2288 		if (rv)
2289 			break;
2290 
2291 		rv = mly_user_command(mly, (void *)data);
2292 		break;
2293 	case MLYIO_HEALTH:
2294 		rv = mly_user_health(mly, (void *)data);
2295 		break;
2296 	default:
2297 		rv = ENOTTY;
2298 		break;
2299 	}
2300 
2301 	return (rv);
2302 }
2303 
2304 /*
2305  * Execute a command passed in from userspace.
2306  *
2307  * The control structure contains the actual command for the controller, as
2308  * well as the user-space data pointer and data size, and an optional sense
2309  * buffer size/pointer.  On completion, the data size is adjusted to the
2310  * command residual, and the sense buffer size to the size of the returned
2311  * sense data.
2312  */
2313 static int
2314 mly_user_command(struct mly_softc *mly, struct mly_user_command *uc)
2315 {
2316 	struct mly_ccb	*mc;
2317 	int rv, mapped;
2318 
2319 	if ((rv = mly_ccb_alloc(mly, &mc)) != 0)
2320 		return (rv);
2321 
2322 	mapped = 0;
2323 	mc->mc_data = NULL;
2324 
2325 	/*
2326 	 * Handle data size/direction.
2327 	 */
2328 	if ((mc->mc_length = abs(uc->DataTransferLength)) != 0) {
2329 		if (mc->mc_length > MAXPHYS) {
2330 			rv = EINVAL;
2331 			goto out;
2332 		}
2333 
2334 		mc->mc_data = malloc(mc->mc_length, M_DEVBUF, M_WAITOK);
2335 		if (mc->mc_data == NULL) {
2336 			rv = ENOMEM;
2337 			goto out;
2338 		}
2339 
2340 		if (uc->DataTransferLength > 0) {
2341 			mc->mc_flags |= MLY_CCB_DATAIN;
2342 			memset(mc->mc_data, 0, mc->mc_length);
2343 		}
2344 
2345 		if (uc->DataTransferLength < 0) {
2346 			mc->mc_flags |= MLY_CCB_DATAOUT;
2347 			rv = copyin(uc->DataTransferBuffer, mc->mc_data,
2348 			    mc->mc_length);
2349 			if (rv != 0)
2350 				goto out;
2351 		}
2352 
2353 		if ((rv = mly_ccb_map(mly, mc)) != 0)
2354 			goto out;
2355 		mapped = 1;
2356 	}
2357 
2358 	/* Copy in the command and execute it. */
2359 	memcpy(mc->mc_packet, &uc->CommandMailbox, sizeof(uc->CommandMailbox));
2360 
2361 	if ((rv = mly_ccb_wait(mly, mc, 60000)) != 0)
2362 		goto out;
2363 
2364 	/* Return the data to userspace. */
2365 	if (uc->DataTransferLength > 0) {
2366 		rv = copyout(mc->mc_data, uc->DataTransferBuffer,
2367 		    mc->mc_length);
2368 		if (rv != 0)
2369 			goto out;
2370 	}
2371 
2372 	/* Return the sense buffer to userspace. */
2373 	if (uc->RequestSenseLength > 0 && mc->mc_sense > 0) {
2374 		rv = copyout(mc->mc_packet, uc->RequestSenseBuffer,
2375 		    min(uc->RequestSenseLength, mc->mc_sense));
2376 		if (rv != 0)
2377 			goto out;
2378 	}
2379 
2380 	/* Return command results to userspace (caller will copy out). */
2381 	uc->DataTransferLength = mc->mc_resid;
2382 	uc->RequestSenseLength = min(uc->RequestSenseLength, mc->mc_sense);
2383 	uc->CommandStatus = mc->mc_status;
2384 	rv = 0;
2385 
2386  out:
2387  	if (mapped)
2388  		mly_ccb_unmap(mly, mc);
2389 	if (mc->mc_data != NULL)
2390 		free(mc->mc_data, M_DEVBUF);
2391 	mly_ccb_free(mly, mc);
2392 
2393 	return (rv);
2394 }
2395 
2396 /*
2397  * Return health status to userspace.  If the health change index in the
2398  * user structure does not match that currently exported by the controller,
2399  * we return the current status immediately.  Otherwise, we block until
2400  * either interrupted or new status is delivered.
2401  */
2402 static int
2403 mly_user_health(struct mly_softc *mly, struct mly_user_health *uh)
2404 {
2405 	struct mly_health_status mh;
2406 	int rv, s;
2407 
2408 	/* Fetch the current health status from userspace. */
2409 	rv = copyin(uh->HealthStatusBuffer, &mh, sizeof(mh));
2410 	if (rv != 0)
2411 		return (rv);
2412 
2413 	/* spin waiting for a status update */
2414 	s = splbio();
2415 	if (mly->mly_event_change == mh.change_counter)
2416 		rv = tsleep(&mly->mly_event_change, PRIBIO | PCATCH,
2417 		    "mlyhealth", 0);
2418 	splx(s);
2419 
2420 	if (rv == 0) {
2421 		/*
2422 		 * Copy the controller's health status buffer out (there is
2423 		 * a race here if it changes again).
2424 		 */
2425 		rv = copyout(&mly->mly_mmbox->mmm_health.status,
2426 		    uh->HealthStatusBuffer, sizeof(uh->HealthStatusBuffer));
2427 	}
2428 
2429 	return (rv);
2430 }
2431