xref: /netbsd-src/sys/dev/pci/amr.c (revision bf1e9b32e27832f0c493206710fb8b58a980838a)
1 /*	$NetBSD: amr.c,v 1.27 2005/06/28 00:28:41 thorpej Exp $	*/
2 
3 /*-
4  * Copyright (c) 2002, 2003 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Andrew Doran.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *        This product includes software developed by the NetBSD
21  *        Foundation, Inc. and its contributors.
22  * 4. Neither the name of The NetBSD Foundation nor the names of its
23  *    contributors may be used to endorse or promote products derived
24  *    from this software without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36  * POSSIBILITY OF SUCH DAMAGE.
37  */
38 
39 /*-
40  * Copyright (c) 1999,2000 Michael Smith
41  * Copyright (c) 2000 BSDi
42  * All rights reserved.
43  *
44  * Redistribution and use in source and binary forms, with or without
45  * modification, are permitted provided that the following conditions
46  * are met:
47  * 1. Redistributions of source code must retain the above copyright
48  *    notice, this list of conditions and the following disclaimer.
49  * 2. Redistributions in binary form must reproduce the above copyright
50  *    notice, this list of conditions and the following disclaimer in the
51  *    documentation and/or other materials provided with the distribution.
52  *
53  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
54  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
55  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
56  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
57  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
58  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
59  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
60  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
61  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
62  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
63  * SUCH DAMAGE.
64  *
65  * from FreeBSD: amr_pci.c,v 1.5 2000/08/30 07:52:40 msmith Exp
66  * from FreeBSD: amr.c,v 1.16 2000/08/30 07:52:40 msmith Exp
67  */
68 
69 /*
70  * Driver for AMI RAID controllers.
71  */
72 
73 #include <sys/cdefs.h>
74 __KERNEL_RCSID(0, "$NetBSD: amr.c,v 1.27 2005/06/28 00:28:41 thorpej Exp $");
75 
76 #include <sys/param.h>
77 #include <sys/systm.h>
78 #include <sys/kernel.h>
79 #include <sys/device.h>
80 #include <sys/queue.h>
81 #include <sys/proc.h>
82 #include <sys/buf.h>
83 #include <sys/malloc.h>
84 #include <sys/kthread.h>
85 
86 #include <uvm/uvm_extern.h>
87 
88 #include <machine/endian.h>
89 #include <machine/bus.h>
90 
91 #include <dev/pci/pcidevs.h>
92 #include <dev/pci/pcivar.h>
93 #include <dev/pci/amrreg.h>
94 #include <dev/pci/amrvar.h>
95 
96 #include "locators.h"
97 
98 static void	amr_attach(struct device *, struct device *, void *);
99 static void	amr_ccb_dump(struct amr_softc *, struct amr_ccb *);
100 static void	*amr_enquire(struct amr_softc *, u_int8_t, u_int8_t, u_int8_t,
101 			     void *);
102 static int	amr_init(struct amr_softc *, const char *,
103 			 struct pci_attach_args *pa);
104 static int	amr_intr(void *);
105 static int	amr_match(struct device *, struct cfdata *, void *);
106 static int	amr_print(void *, const char *);
107 static void	amr_shutdown(void *);
108 static int	amr_submatch(struct device *, struct cfdata *,
109 			     const locdesc_t *, void *);
110 static void	amr_teardown(struct amr_softc *);
111 static void	amr_thread(void *);
112 static void	amr_thread_create(void *);
113 
114 static int	amr_quartz_get_work(struct amr_softc *,
115 				    struct amr_mailbox_resp *);
116 static int	amr_quartz_submit(struct amr_softc *, struct amr_ccb *);
117 static int	amr_std_get_work(struct amr_softc *, struct amr_mailbox_resp *);
118 static int	amr_std_submit(struct amr_softc *, struct amr_ccb *);
119 
120 CFATTACH_DECL(amr, sizeof(struct amr_softc),
121     amr_match, amr_attach, NULL, NULL);
122 
123 #define AT_QUARTZ	0x01	/* `Quartz' chipset */
124 #define	AT_SIG		0x02	/* Check for signature */
125 
126 struct amr_pci_type {
127 	u_short	apt_vendor;
128 	u_short	apt_product;
129 	u_short	apt_flags;
130 } static const amr_pci_type[] = {
131 	{ PCI_VENDOR_AMI,   PCI_PRODUCT_AMI_MEGARAID,  0 },
132 	{ PCI_VENDOR_AMI,   PCI_PRODUCT_AMI_MEGARAID2, 0 },
133 	{ PCI_VENDOR_AMI,   PCI_PRODUCT_AMI_MEGARAID3, AT_QUARTZ },
134 	{ PCI_VENDOR_SYMBIOS, PCI_PRODUCT_AMI_MEGARAID3, AT_QUARTZ },
135 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_AMI_MEGARAID3, AT_QUARTZ | AT_SIG },
136 	{ PCI_VENDOR_DELL,  PCI_PRODUCT_DELL_PERC_4DI, AT_QUARTZ },
137 	{ PCI_VENDOR_DELL,  PCI_PRODUCT_DELL_PERC_4DI_2, AT_QUARTZ },
138 	{ PCI_VENDOR_DELL,  PCI_PRODUCT_DELL_PERC_4ESI, AT_QUARTZ },
139 	{ PCI_VENDOR_SYMBIOS,  PCI_PRODUCT_SYMBIOS_PERC_4SC, AT_QUARTZ },
140 };
141 
142 struct amr_typestr {
143 	const char	*at_str;
144 	int		at_sig;
145 } static const amr_typestr[] = {
146 	{ "Series 431",			AMR_SIG_431 },
147 	{ "Series 438",			AMR_SIG_438 },
148 	{ "Series 466",			AMR_SIG_466 },
149 	{ "Series 467",			AMR_SIG_467 },
150 	{ "Series 490",			AMR_SIG_490 },
151 	{ "Series 762",			AMR_SIG_762 },
152 	{ "HP NetRAID (T5)",		AMR_SIG_T5 },
153 	{ "HP NetRAID (T7)",		AMR_SIG_T7 },
154 };
155 
156 struct {
157 	const char	*ds_descr;
158 	int	ds_happy;
159 } static const amr_dstate[] = {
160 	{ "offline",	0 },
161 	{ "degraded",	1 },
162 	{ "optimal",	1 },
163 	{ "online",	1 },
164 	{ "failed",	0 },
165 	{ "rebuilding",	1 },
166 	{ "hotspare",	0 },
167 };
168 
169 static void	*amr_sdh;
170 
171 static int	amr_max_segs;
172 int		amr_max_xfer;
173 
174 static inline u_int8_t
175 amr_inb(struct amr_softc *amr, int off)
176 {
177 
178 	bus_space_barrier(amr->amr_iot, amr->amr_ioh, off, 1,
179 	    BUS_SPACE_BARRIER_WRITE | BUS_SPACE_BARRIER_READ);
180 	return (bus_space_read_1(amr->amr_iot, amr->amr_ioh, off));
181 }
182 
183 static inline u_int32_t
184 amr_inl(struct amr_softc *amr, int off)
185 {
186 
187 	bus_space_barrier(amr->amr_iot, amr->amr_ioh, off, 4,
188 	    BUS_SPACE_BARRIER_WRITE | BUS_SPACE_BARRIER_READ);
189 	return (bus_space_read_4(amr->amr_iot, amr->amr_ioh, off));
190 }
191 
192 static inline void
193 amr_outb(struct amr_softc *amr, int off, u_int8_t val)
194 {
195 
196 	bus_space_write_1(amr->amr_iot, amr->amr_ioh, off, val);
197 	bus_space_barrier(amr->amr_iot, amr->amr_ioh, off, 1,
198 	    BUS_SPACE_BARRIER_WRITE);
199 }
200 
201 static inline void
202 amr_outl(struct amr_softc *amr, int off, u_int32_t val)
203 {
204 
205 	bus_space_write_4(amr->amr_iot, amr->amr_ioh, off, val);
206 	bus_space_barrier(amr->amr_iot, amr->amr_ioh, off, 4,
207 	    BUS_SPACE_BARRIER_WRITE);
208 }
209 
210 /*
211  * Match a supported device.
212  */
213 static int
214 amr_match(struct device *parent, struct cfdata *match, void *aux)
215 {
216 	struct pci_attach_args *pa;
217 	pcireg_t s;
218 	int i;
219 
220 	pa = (struct pci_attach_args *)aux;
221 
222 	/*
223 	 * Don't match the device if it's operating in I2O mode.  In this
224 	 * case it should be handled by the `iop' driver.
225 	 */
226 	if (PCI_CLASS(pa->pa_class) == PCI_CLASS_I2O)
227 		return (0);
228 
229 	for (i = 0; i < sizeof(amr_pci_type) / sizeof(amr_pci_type[0]); i++)
230 		if (PCI_VENDOR(pa->pa_id) == amr_pci_type[i].apt_vendor &&
231 		    PCI_PRODUCT(pa->pa_id) == amr_pci_type[i].apt_product)
232 		    	break;
233 
234 	if (i == sizeof(amr_pci_type) / sizeof(amr_pci_type[0]))
235 		return (0);
236 
237 	if ((amr_pci_type[i].apt_flags & AT_SIG) == 0)
238 		return (1);
239 
240 	s = pci_conf_read(pa->pa_pc, pa->pa_tag, AMR_QUARTZ_SIG_REG) & 0xffff;
241 	return (s == AMR_QUARTZ_SIG0 || s == AMR_QUARTZ_SIG1);
242 }
243 
244 /*
245  * Attach a supported device.
246  */
247 static void
248 amr_attach(struct device *parent, struct device *self, void *aux)
249 {
250 	struct pci_attach_args *pa;
251 	struct amr_attach_args amra;
252 	const struct amr_pci_type *apt;
253 	struct amr_softc *amr;
254 	pci_chipset_tag_t pc;
255 	pci_intr_handle_t ih;
256 	const char *intrstr;
257 	pcireg_t reg;
258 	int rseg, i, j, size, rv, memreg, ioreg;
259         struct amr_ccb *ac;
260 	int help[2];
261 	locdesc_t *ldesc = (void *)help; /* XXX */
262 
263 	aprint_naive(": RAID controller\n");
264 
265 	amr = (struct amr_softc *)self;
266 	pa = (struct pci_attach_args *)aux;
267 	pc = pa->pa_pc;
268 
269 	for (i = 0; i < sizeof(amr_pci_type) / sizeof(amr_pci_type[0]); i++)
270 		if (PCI_VENDOR(pa->pa_id) == amr_pci_type[i].apt_vendor &&
271 		    PCI_PRODUCT(pa->pa_id) == amr_pci_type[i].apt_product)
272 			break;
273 	apt = amr_pci_type + i;
274 
275 	memreg = ioreg = 0;
276 	for (i = 0x10; i <= 0x14; i += 4) {
277 		reg = pci_conf_read(pc, pa->pa_tag, i);
278 		switch (PCI_MAPREG_TYPE(reg)) {
279 		case PCI_MAPREG_TYPE_MEM:
280 			if (PCI_MAPREG_MEM_SIZE(reg) != 0)
281 				memreg = i;
282 			break;
283 		case PCI_MAPREG_TYPE_IO:
284 			if (PCI_MAPREG_IO_SIZE(reg) != 0)
285 				ioreg = i;
286 			break;
287 
288 		}
289 	}
290 
291 	if (memreg && pci_mapreg_map(pa, memreg, PCI_MAPREG_TYPE_MEM, 0,
292 	    &amr->amr_iot, &amr->amr_ioh, NULL, &amr->amr_ios) == 0)
293 		;
294 	else if (ioreg && pci_mapreg_map(pa, ioreg, PCI_MAPREG_TYPE_IO, 0,
295 	    &amr->amr_iot, &amr->amr_ioh, NULL, &amr->amr_ios) == 0)
296 		;
297 	else {
298 		aprint_error("can't map control registers\n");
299 		amr_teardown(amr);
300 		return;
301 	}
302 
303 	amr->amr_flags |= AMRF_PCI_REGS;
304 	amr->amr_dmat = pa->pa_dmat;
305 	amr->amr_pc = pa->pa_pc;
306 
307 	/* Enable the device. */
308 	reg = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
309 	pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG,
310 	    reg | PCI_COMMAND_MASTER_ENABLE);
311 
312 	/* Map and establish the interrupt. */
313 	if (pci_intr_map(pa, &ih)) {
314 		aprint_error("can't map interrupt\n");
315 		amr_teardown(amr);
316 		return;
317 	}
318 	intrstr = pci_intr_string(pc, ih);
319 	amr->amr_ih = pci_intr_establish(pc, ih, IPL_BIO, amr_intr, amr);
320 	if (amr->amr_ih == NULL) {
321 		aprint_error("can't establish interrupt");
322 		if (intrstr != NULL)
323 			aprint_normal(" at %s", intrstr);
324 		aprint_normal("\n");
325 		amr_teardown(amr);
326 		return;
327 	}
328 	amr->amr_flags |= AMRF_PCI_INTR;
329 
330 	/*
331 	 * Allocate space for the mailbox and S/G lists.  Some controllers
332 	 * don't like S/G lists to be located below 0x2000, so we allocate
333 	 * enough slop to enable us to compensate.
334 	 *
335 	 * The standard mailbox structure needs to be aligned on a 16-byte
336 	 * boundary.  The 64-bit mailbox has one extra field, 4 bytes in
337 	 * size, which preceeds the standard mailbox.
338 	 */
339 	size = AMR_SGL_SIZE * AMR_MAX_CMDS + 0x2000;
340 	amr->amr_dmasize = size;
341 
342 	if ((rv = bus_dmamem_alloc(amr->amr_dmat, size, PAGE_SIZE, 0,
343 	    &amr->amr_dmaseg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) {
344 		aprint_error("%s: unable to allocate buffer, rv = %d\n",
345 		    amr->amr_dv.dv_xname, rv);
346 		amr_teardown(amr);
347 		return;
348 	}
349 	amr->amr_flags |= AMRF_DMA_ALLOC;
350 
351 	if ((rv = bus_dmamem_map(amr->amr_dmat, &amr->amr_dmaseg, rseg, size,
352 	    (caddr_t *)&amr->amr_mbox,
353 	    BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) {
354 		aprint_error("%s: unable to map buffer, rv = %d\n",
355 		    amr->amr_dv.dv_xname, rv);
356 		amr_teardown(amr);
357 		return;
358 	}
359 	amr->amr_flags |= AMRF_DMA_MAP;
360 
361 	if ((rv = bus_dmamap_create(amr->amr_dmat, size, 1, size, 0,
362 	    BUS_DMA_NOWAIT, &amr->amr_dmamap)) != 0) {
363 		aprint_error("%s: unable to create buffer DMA map, rv = %d\n",
364 		    amr->amr_dv.dv_xname, rv);
365 		amr_teardown(amr);
366 		return;
367 	}
368 	amr->amr_flags |= AMRF_DMA_CREATE;
369 
370 	if ((rv = bus_dmamap_load(amr->amr_dmat, amr->amr_dmamap,
371 	    amr->amr_mbox, size, NULL, BUS_DMA_NOWAIT)) != 0) {
372 		aprint_error("%s: unable to load buffer DMA map, rv = %d\n",
373 		    amr->amr_dv.dv_xname, rv);
374 		amr_teardown(amr);
375 		return;
376 	}
377 	amr->amr_flags |= AMRF_DMA_LOAD;
378 
379 	memset(amr->amr_mbox, 0, size);
380 
381 	amr->amr_mbox_paddr = amr->amr_dmamap->dm_segs[0].ds_addr;
382 	amr->amr_sgls_paddr = (amr->amr_mbox_paddr + 0x1fff) & ~0x1fff;
383 	amr->amr_sgls = (struct amr_sgentry *)((caddr_t)amr->amr_mbox +
384 	    amr->amr_sgls_paddr - amr->amr_dmamap->dm_segs[0].ds_addr);
385 
386 	/*
387 	 * Allocate and initalise the command control blocks.
388 	 */
389 	ac = malloc(sizeof(*ac) * AMR_MAX_CMDS, M_DEVBUF, M_NOWAIT | M_ZERO);
390 	amr->amr_ccbs = ac;
391 	SLIST_INIT(&amr->amr_ccb_freelist);
392 	TAILQ_INIT(&amr->amr_ccb_active);
393 	amr->amr_flags |= AMRF_CCBS;
394 
395 	if (amr_max_xfer == 0) {
396 		amr_max_xfer = min(((AMR_MAX_SEGS - 1) * PAGE_SIZE), MAXPHYS);
397 		amr_max_segs = (amr_max_xfer + (PAGE_SIZE * 2) - 1) / PAGE_SIZE;
398 	}
399 
400 	for (i = 0; i < AMR_MAX_CMDS; i++, ac++) {
401 		rv = bus_dmamap_create(amr->amr_dmat, amr_max_xfer,
402 		    amr_max_segs, amr_max_xfer, 0,
403 		    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &ac->ac_xfer_map);
404 		if (rv != 0)
405 			break;
406 
407 		ac->ac_ident = i;
408 		amr_ccb_free(amr, ac);
409 	}
410 	if (i != AMR_MAX_CMDS) {
411 		aprint_error("%s: memory exhausted\n", amr->amr_dv.dv_xname);
412 		amr_teardown(amr);
413 		return;
414 	}
415 
416 	/*
417 	 * Take care of model-specific tasks.
418 	 */
419 	if ((apt->apt_flags & AT_QUARTZ) != 0) {
420 		amr->amr_submit = amr_quartz_submit;
421 		amr->amr_get_work = amr_quartz_get_work;
422 	} else {
423 		amr->amr_submit = amr_std_submit;
424 		amr->amr_get_work = amr_std_get_work;
425 
426 		/* Notify the controller of the mailbox location. */
427 		amr_outl(amr, AMR_SREG_MBOX, (u_int32_t)amr->amr_mbox_paddr + 16);
428 		amr_outb(amr, AMR_SREG_MBOX_ENABLE, AMR_SMBOX_ENABLE_ADDR);
429 
430 		/* Clear outstanding interrupts and enable interrupts. */
431 		amr_outb(amr, AMR_SREG_CMD, AMR_SCMD_ACKINTR);
432 		amr_outb(amr, AMR_SREG_TOGL,
433 		    amr_inb(amr, AMR_SREG_TOGL) | AMR_STOGL_ENABLE);
434 	}
435 
436 	/*
437 	 * Retrieve parameters, and tell the world about us.
438 	 */
439 	amr->amr_enqbuf = malloc(AMR_ENQUIRY_BUFSIZE, M_DEVBUF, M_NOWAIT);
440 	amr->amr_flags |= AMRF_ENQBUF;
441 	amr->amr_maxqueuecnt = i;
442 	aprint_normal(": AMI RAID ");
443 	if (amr_init(amr, intrstr, pa) != 0) {
444 		amr_teardown(amr);
445 		return;
446 	}
447 
448 	/*
449 	 * Cap the maximum number of outstanding commands.  AMI's Linux
450 	 * driver doesn't trust the controller's reported value, and lockups
451 	 * have been seen when we do.
452 	 */
453 	amr->amr_maxqueuecnt = min(amr->amr_maxqueuecnt, AMR_MAX_CMDS);
454 	if (amr->amr_maxqueuecnt > i)
455 		amr->amr_maxqueuecnt = i;
456 
457 	/* Set our `shutdownhook' before we start any device activity. */
458 	if (amr_sdh == NULL)
459 		amr_sdh = shutdownhook_establish(amr_shutdown, NULL);
460 
461 	/* Attach sub-devices. */
462 	for (j = 0; j < amr->amr_numdrives; j++) {
463 		if (amr->amr_drive[j].al_size == 0)
464 			continue;
465 		amra.amra_unit = j;
466 
467 		ldesc->len = 1;
468 		ldesc->locs[AMRCF_UNIT] = j;
469 
470 		amr->amr_drive[j].al_dv = config_found_sm_loc(&amr->amr_dv,
471 			"amr", ldesc, &amra, amr_print, amr_submatch);
472 	}
473 
474 	SIMPLEQ_INIT(&amr->amr_ccb_queue);
475 
476 	/* XXX This doesn't work for newer boards yet. */
477 	if ((apt->apt_flags & AT_QUARTZ) == 0)
478 		kthread_create(amr_thread_create, amr);
479 }
480 
481 /*
482  * Free up resources.
483  */
484 static void
485 amr_teardown(struct amr_softc *amr)
486 {
487 	struct amr_ccb *ac;
488 	int fl;
489 
490 	fl = amr->amr_flags;
491 
492 	if ((fl & AMRF_THREAD) != 0) {
493 		amr->amr_flags |= AMRF_THREAD_EXIT;
494 		wakeup(amr_thread);
495 		while ((amr->amr_flags & AMRF_THREAD_EXIT) != 0)
496 			tsleep(&amr->amr_flags, PWAIT, "amrexit", 0);
497 	}
498 	if ((fl & AMRF_CCBS) != 0) {
499 		SLIST_FOREACH(ac, &amr->amr_ccb_freelist, ac_chain.slist) {
500 			bus_dmamap_destroy(amr->amr_dmat, ac->ac_xfer_map);
501 		}
502 		free(amr->amr_ccbs, M_DEVBUF);
503 	}
504 	if ((fl & AMRF_ENQBUF) != 0)
505 		free(amr->amr_enqbuf, M_DEVBUF);
506 	if ((fl & AMRF_DMA_LOAD) != 0)
507 		bus_dmamap_unload(amr->amr_dmat, amr->amr_dmamap);
508 	if ((fl & AMRF_DMA_MAP) != 0)
509 		bus_dmamem_unmap(amr->amr_dmat, (caddr_t)amr->amr_mbox,
510 		    amr->amr_dmasize);
511 	if ((fl & AMRF_DMA_ALLOC) != 0)
512 		bus_dmamem_free(amr->amr_dmat, &amr->amr_dmaseg, 1);
513 	if ((fl & AMRF_DMA_CREATE) != 0)
514 		bus_dmamap_destroy(amr->amr_dmat, amr->amr_dmamap);
515 	if ((fl & AMRF_PCI_INTR) != 0)
516 		pci_intr_disestablish(amr->amr_pc, amr->amr_ih);
517 	if ((fl & AMRF_PCI_REGS) != 0)
518 		bus_space_unmap(amr->amr_iot, amr->amr_ioh, amr->amr_ios);
519 }
520 
521 /*
522  * Print autoconfiguration message for a sub-device.
523  */
524 static int
525 amr_print(void *aux, const char *pnp)
526 {
527 	struct amr_attach_args *amra;
528 
529 	amra = (struct amr_attach_args *)aux;
530 
531 	if (pnp != NULL)
532 		aprint_normal("block device at %s", pnp);
533 	aprint_normal(" unit %d", amra->amra_unit);
534 	return (UNCONF);
535 }
536 
537 /*
538  * Match a sub-device.
539  */
540 static int
541 amr_submatch(struct device *parent, struct cfdata *cf,
542 	     const locdesc_t *ldesc, void *aux)
543 {
544 	struct amr_attach_args *amra;
545 
546 	amra = (struct amr_attach_args *)aux;
547 
548 	if (cf->cf_loc[AMRCF_UNIT] != AMRCF_UNIT_DEFAULT &&
549 	    cf->cf_loc[AMRCF_UNIT] != ldesc->locs[AMRCF_UNIT])
550 		return (0);
551 
552 	return (config_match(parent, cf, aux));
553 }
554 
555 /*
556  * Retrieve operational parameters and describe the controller.
557  */
558 static int
559 amr_init(struct amr_softc *amr, const char *intrstr,
560 	 struct pci_attach_args *pa)
561 {
562 	struct amr_adapter_info *aa;
563 	struct amr_prodinfo *ap;
564 	struct amr_enquiry *ae;
565 	struct amr_enquiry3 *aex;
566 	const char *prodstr;
567 	u_int i, sig, ishp;
568 	char sbuf[64];
569 
570 	/*
571 	 * Try to get 40LD product info, which tells us what the card is
572 	 * labelled as.
573 	 */
574 	ap = amr_enquire(amr, AMR_CMD_CONFIG, AMR_CONFIG_PRODUCT_INFO, 0,
575 	    amr->amr_enqbuf);
576 	if (ap != NULL) {
577 		aprint_normal("<%.80s>\n", ap->ap_product);
578 		if (intrstr != NULL)
579 			aprint_normal("%s: interrupting at %s\n",
580 			    amr->amr_dv.dv_xname, intrstr);
581 		aprint_normal("%s: firmware %.16s, BIOS %.16s, %dMB RAM\n",
582 		    amr->amr_dv.dv_xname, ap->ap_firmware, ap->ap_bios,
583 		    le16toh(ap->ap_memsize));
584 
585 		amr->amr_maxqueuecnt = ap->ap_maxio;
586 
587 		/*
588 		 * Fetch and record state of logical drives.
589 		 */
590 		aex = amr_enquire(amr, AMR_CMD_CONFIG, AMR_CONFIG_ENQ3,
591 		    AMR_CONFIG_ENQ3_SOLICITED_FULL, amr->amr_enqbuf);
592 		if (aex == NULL) {
593 			aprint_error("%s ENQUIRY3 failed\n",
594 			    amr->amr_dv.dv_xname);
595 			return (-1);
596 		}
597 
598 		if (aex->ae_numldrives > AMR_MAX_UNITS) {
599 			aprint_error(
600 			    "%s: adjust AMR_MAX_UNITS to %d (currently %d)"
601 			    "\n", amr->amr_dv.dv_xname, AMR_MAX_UNITS,
602 			    amr->amr_numdrives);
603 			amr->amr_numdrives = AMR_MAX_UNITS;
604 		} else
605 			amr->amr_numdrives = aex->ae_numldrives;
606 
607 		for (i = 0; i < amr->amr_numdrives; i++) {
608 			amr->amr_drive[i].al_size =
609 			    le32toh(aex->ae_drivesize[i]);
610 			amr->amr_drive[i].al_state = aex->ae_drivestate[i];
611 			amr->amr_drive[i].al_properties = aex->ae_driveprop[i];
612 		}
613 
614 		return (0);
615 	}
616 
617 	/*
618 	 * Try 8LD extended ENQUIRY to get the controller signature.  Once
619 	 * found, search for a product description.
620 	 */
621 	ae = amr_enquire(amr, AMR_CMD_EXT_ENQUIRY2, 0, 0, amr->amr_enqbuf);
622 	if (ae != NULL) {
623 		i = 0;
624 		sig = le32toh(ae->ae_signature);
625 
626 		while (i < sizeof(amr_typestr) / sizeof(amr_typestr[0])) {
627 			if (amr_typestr[i].at_sig == sig)
628 				break;
629 			i++;
630 		}
631 		if (i == sizeof(amr_typestr) / sizeof(amr_typestr[0])) {
632 			snprintf(sbuf, sizeof(sbuf),
633 			    "unknown ENQUIRY2 sig (0x%08x)", sig);
634 			prodstr = sbuf;
635 		} else
636 			prodstr = amr_typestr[i].at_str;
637 	} else {
638 		ae = amr_enquire(amr, AMR_CMD_ENQUIRY, 0, 0, amr->amr_enqbuf);
639 		if (ae == NULL) {
640 			aprint_error("%s: unsupported controller\n",
641 			    amr->amr_dv.dv_xname);
642 			return (-1);
643 		}
644 
645 		switch (PCI_PRODUCT(pa->pa_id)) {
646 		case PCI_PRODUCT_AMI_MEGARAID:
647 			prodstr = "Series 428";
648 			break;
649 		case PCI_PRODUCT_AMI_MEGARAID2:
650 			prodstr = "Series 434";
651 			break;
652 		default:
653 			snprintf(sbuf, sizeof(sbuf), "unknown PCI dev (0x%04x)",
654 			    PCI_PRODUCT(pa->pa_id));
655 			prodstr = sbuf;
656 			break;
657 		}
658 	}
659 
660 	/*
661 	 * HP NetRaid controllers have a special encoding of the firmware
662 	 * and BIOS versions.  The AMI version seems to have it as strings
663 	 * whereas the HP version does it with a leading uppercase character
664 	 * and two binary numbers.
665 	*/
666 	aa = &ae->ae_adapter;
667 
668 	if (aa->aa_firmware[2] >= 'A' && aa->aa_firmware[2] <= 'Z' &&
669 	    aa->aa_firmware[1] <  ' ' && aa->aa_firmware[0] <  ' ' &&
670 	    aa->aa_bios[2] >= 'A' && aa->aa_bios[2] <= 'Z' &&
671 	    aa->aa_bios[1] <  ' ' && aa->aa_bios[0] <  ' ') {
672 		if (le32toh(ae->ae_signature) == AMR_SIG_438) {
673 			/* The AMI 438 is a NetRaid 3si in HP-land. */
674 			prodstr = "HP NetRaid 3si";
675 		}
676 		ishp = 1;
677 	} else
678 		ishp = 0;
679 
680 	aprint_normal("<%s>\n", prodstr);
681 	if (intrstr != NULL)
682 		aprint_normal("%s: interrupting at %s\n", amr->amr_dv.dv_xname,
683 		    intrstr);
684 
685 	if (ishp)
686 		aprint_normal("%s: firmware <%c.%02d.%02d>, BIOS <%c.%02d.%02d>"
687 		    ", %dMB RAM\n", amr->amr_dv.dv_xname, aa->aa_firmware[2],
688 		     aa->aa_firmware[1], aa->aa_firmware[0], aa->aa_bios[2],
689 		     aa->aa_bios[1], aa->aa_bios[0], aa->aa_memorysize);
690 	else
691 		aprint_normal("%s: firmware <%.4s>, BIOS <%.4s>, %dMB RAM\n",
692 		    amr->amr_dv.dv_xname, aa->aa_firmware, aa->aa_bios,
693 		    aa->aa_memorysize);
694 
695 	amr->amr_maxqueuecnt = aa->aa_maxio;
696 
697 	/*
698 	 * Record state of logical drives.
699 	 */
700 	if (ae->ae_ldrv.al_numdrives > AMR_MAX_UNITS) {
701 		aprint_error("%s: adjust AMR_MAX_UNITS to %d (currently %d)\n",
702 		    amr->amr_dv.dv_xname, ae->ae_ldrv.al_numdrives,
703 		    AMR_MAX_UNITS);
704 		amr->amr_numdrives = AMR_MAX_UNITS;
705 	} else
706 		amr->amr_numdrives = ae->ae_ldrv.al_numdrives;
707 
708 	for (i = 0; i < AMR_MAX_UNITS; i++) {
709 		amr->amr_drive[i].al_size = le32toh(ae->ae_ldrv.al_size[i]);
710 		amr->amr_drive[i].al_state = ae->ae_ldrv.al_state[i];
711 		amr->amr_drive[i].al_properties = ae->ae_ldrv.al_properties[i];
712 	}
713 
714 	return (0);
715 }
716 
717 /*
718  * Flush the internal cache on each configured controller.  Called at
719  * shutdown time.
720  */
721 static void
722 amr_shutdown(void *cookie)
723 {
724         extern struct cfdriver amr_cd;
725 	struct amr_softc *amr;
726 	struct amr_ccb *ac;
727 	int i, rv, s;
728 
729 	for (i = 0; i < amr_cd.cd_ndevs; i++) {
730 		if ((amr = device_lookup(&amr_cd, i)) == NULL)
731 			continue;
732 
733 		if ((rv = amr_ccb_alloc(amr, &ac)) == 0) {
734 			ac->ac_cmd.mb_command = AMR_CMD_FLUSH;
735 			s = splbio();
736 			rv = amr_ccb_poll(amr, ac, 30000);
737 			splx(s);
738 			amr_ccb_free(amr, ac);
739 		}
740 		if (rv != 0)
741 			printf("%s: unable to flush cache (%d)\n",
742 			    amr->amr_dv.dv_xname, rv);
743 	}
744 }
745 
746 /*
747  * Interrupt service routine.
748  */
749 static int
750 amr_intr(void *cookie)
751 {
752 	struct amr_softc *amr;
753 	struct amr_ccb *ac;
754 	struct amr_mailbox_resp mbox;
755 	u_int i, forus, idx;
756 
757 	amr = cookie;
758 	forus = 0;
759 
760 	while ((*amr->amr_get_work)(amr, &mbox) == 0) {
761 		/* Iterate over completed commands in this result. */
762 		for (i = 0; i < mbox.mb_nstatus; i++) {
763 			idx = mbox.mb_completed[i] - 1;
764 			ac = amr->amr_ccbs + idx;
765 
766 			if (idx >= amr->amr_maxqueuecnt) {
767 				printf("%s: bad status (bogus ID: %u=%u)\n",
768 				    amr->amr_dv.dv_xname, i, idx);
769 				continue;
770 			}
771 
772 			if ((ac->ac_flags & AC_ACTIVE) == 0) {
773 				printf("%s: bad status (not active; 0x04%x)\n",
774 				    amr->amr_dv.dv_xname, ac->ac_flags);
775 				continue;
776 			}
777 
778 			ac->ac_status = mbox.mb_status;
779 			ac->ac_flags = (ac->ac_flags & ~AC_ACTIVE) |
780 			    AC_COMPLETE;
781 			TAILQ_REMOVE(&amr->amr_ccb_active, ac, ac_chain.tailq);
782 
783 			if ((ac->ac_flags & AC_MOAN) != 0)
784 				printf("%s: ccb %d completed\n",
785 				    amr->amr_dv.dv_xname, ac->ac_ident);
786 
787 			/* Pass notification to upper layers. */
788 			if (ac->ac_handler != NULL)
789 				(*ac->ac_handler)(ac);
790 			else
791 				wakeup(ac);
792 		}
793 		forus = 1;
794 	}
795 
796 	if (forus)
797 		amr_ccb_enqueue(amr, NULL);
798 
799 	return (forus);
800 }
801 
802 /*
803  * Create the watchdog thread.
804  */
805 static void
806 amr_thread_create(void *cookie)
807 {
808 	struct amr_softc *amr;
809 	int rv;
810 
811 	amr = cookie;
812 
813 	if ((amr->amr_flags & AMRF_THREAD_EXIT) != 0) {
814 		amr->amr_flags ^= AMRF_THREAD_EXIT;
815 		wakeup(&amr->amr_flags);
816 		return;
817 	}
818 
819 	rv = kthread_create1(amr_thread, amr, &amr->amr_thread, "%s",
820 	    amr->amr_dv.dv_xname);
821  	if (rv != 0)
822 		aprint_error("%s: unable to create thread (%d)",
823  		    amr->amr_dv.dv_xname, rv);
824  	else
825  		amr->amr_flags |= AMRF_THREAD;
826 }
827 
828 /*
829  * Watchdog thread.
830  */
831 static void
832 amr_thread(void *cookie)
833 {
834 	struct amr_softc *amr;
835 	struct amr_ccb *ac;
836 	struct amr_logdrive *al;
837 	struct amr_enquiry *ae;
838 	time_t curtime;
839 	int rv, i, s;
840 
841 	amr = cookie;
842 	ae = amr->amr_enqbuf;
843 
844 	for (;;) {
845 		tsleep(amr_thread, PWAIT, "amrwdog", AMR_WDOG_TICKS);
846 
847 		if ((amr->amr_flags & AMRF_THREAD_EXIT) != 0) {
848 			amr->amr_flags ^= AMRF_THREAD_EXIT;
849 			wakeup(&amr->amr_flags);
850 			kthread_exit(0);
851 		}
852 
853 		s = splbio();
854 		amr_intr(cookie);
855 		curtime = (time_t)mono_time.tv_sec;
856 		ac = TAILQ_FIRST(&amr->amr_ccb_active);
857 		while (ac != NULL) {
858 			if (ac->ac_start_time + AMR_TIMEOUT > curtime)
859 				break;
860 			if ((ac->ac_flags & AC_MOAN) == 0) {
861 				printf("%s: ccb %d timed out; mailbox:\n",
862 				    amr->amr_dv.dv_xname, ac->ac_ident);
863 				amr_ccb_dump(amr, ac);
864 				ac->ac_flags |= AC_MOAN;
865 			}
866 			ac = TAILQ_NEXT(ac, ac_chain.tailq);
867 		}
868 		splx(s);
869 
870 		if ((rv = amr_ccb_alloc(amr, &ac)) != 0) {
871 			printf("%s: ccb_alloc failed (%d)\n",
872  			    amr->amr_dv.dv_xname, rv);
873 			continue;
874 		}
875 
876 		ac->ac_cmd.mb_command = AMR_CMD_ENQUIRY;
877 
878 		rv = amr_ccb_map(amr, ac, amr->amr_enqbuf,
879 		    AMR_ENQUIRY_BUFSIZE, 0);
880 		if (rv != 0) {
881 			printf("%s: ccb_map failed (%d)\n",
882  			    amr->amr_dv.dv_xname, rv);
883 			amr_ccb_free(amr, ac);
884 			continue;
885 		}
886 
887 		rv = amr_ccb_wait(amr, ac);
888 		amr_ccb_unmap(amr, ac);
889 		if (rv != 0) {
890 			printf("%s: enquiry failed (st=%d)\n",
891  			    amr->amr_dv.dv_xname, ac->ac_status);
892 			continue;
893 		}
894 		amr_ccb_free(amr, ac);
895 
896 		al = amr->amr_drive;
897 		for (i = 0; i < AMR_MAX_UNITS; i++, al++) {
898 			if (al->al_dv == NULL)
899 				continue;
900 			if (al->al_state == ae->ae_ldrv.al_state[i])
901 				continue;
902 
903 			printf("%s: state changed: %s -> %s\n",
904 			    al->al_dv->dv_xname,
905 			    amr_drive_state(al->al_state, NULL),
906 			    amr_drive_state(ae->ae_ldrv.al_state[i], NULL));
907 
908 			al->al_state = ae->ae_ldrv.al_state[i];
909 		}
910 	}
911 }
912 
913 /*
914  * Return a text description of a logical drive's current state.
915  */
916 const char *
917 amr_drive_state(int state, int *happy)
918 {
919 	const char *str;
920 
921 	state = AMR_DRV_CURSTATE(state);
922 	if (state >= sizeof(amr_dstate) / sizeof(amr_dstate[0])) {
923 		if (happy)
924 			*happy = 1;
925 		str = "status unknown";
926 	} else {
927 		if (happy)
928 			*happy = amr_dstate[state].ds_happy;
929 		str = amr_dstate[state].ds_descr;
930 	}
931 
932 	return (str);
933 }
934 
935 /*
936  * Run a generic enquiry-style command.
937  */
938 static void *
939 amr_enquire(struct amr_softc *amr, u_int8_t cmd, u_int8_t cmdsub,
940 	    u_int8_t cmdqual, void *sbuf)
941 {
942 	struct amr_ccb *ac;
943 	u_int8_t *mb;
944 	int rv;
945 
946 	if (amr_ccb_alloc(amr, &ac) != 0)
947 		return (NULL);
948 
949 	/* Build the command proper. */
950 	mb = (u_int8_t *)&ac->ac_cmd;
951 	mb[0] = cmd;
952 	mb[2] = cmdsub;
953 	mb[3] = cmdqual;
954 
955 	rv = amr_ccb_map(amr, ac, sbuf, AMR_ENQUIRY_BUFSIZE, 0);
956 	if (rv == 0) {
957 		rv = amr_ccb_poll(amr, ac, 2000);
958 		amr_ccb_unmap(amr, ac);
959 	}
960 	amr_ccb_free(amr, ac);
961 
962 	return (rv ? NULL : sbuf);
963 }
964 
965 /*
966  * Allocate and initialise a CCB.
967  */
968 int
969 amr_ccb_alloc(struct amr_softc *amr, struct amr_ccb **acp)
970 {
971 	int s;
972 
973 	s = splbio();
974 	if ((*acp = SLIST_FIRST(&amr->amr_ccb_freelist)) == NULL) {
975 		splx(s);
976 		return (EAGAIN);
977 	}
978 	SLIST_REMOVE_HEAD(&amr->amr_ccb_freelist, ac_chain.slist);
979 	splx(s);
980 
981 	return (0);
982 }
983 
984 /*
985  * Free a CCB.
986  */
987 void
988 amr_ccb_free(struct amr_softc *amr, struct amr_ccb *ac)
989 {
990 	int s;
991 
992 	memset(&ac->ac_cmd, 0, sizeof(ac->ac_cmd));
993 	ac->ac_cmd.mb_ident = ac->ac_ident + 1;
994 	ac->ac_cmd.mb_busy = 1;
995 	ac->ac_handler = NULL;
996 	ac->ac_flags = 0;
997 
998 	s = splbio();
999 	SLIST_INSERT_HEAD(&amr->amr_ccb_freelist, ac, ac_chain.slist);
1000 	splx(s);
1001 }
1002 
1003 /*
1004  * If a CCB is specified, enqueue it.  Pull CCBs off the software queue in
1005  * the order that they were enqueued and try to submit their command blocks
1006  * to the controller for execution.
1007  */
1008 void
1009 amr_ccb_enqueue(struct amr_softc *amr, struct amr_ccb *ac)
1010 {
1011 	int s;
1012 
1013 	s = splbio();
1014 
1015 	if (ac != NULL)
1016 		SIMPLEQ_INSERT_TAIL(&amr->amr_ccb_queue, ac, ac_chain.simpleq);
1017 
1018 	while ((ac = SIMPLEQ_FIRST(&amr->amr_ccb_queue)) != NULL) {
1019 		if ((*amr->amr_submit)(amr, ac) != 0)
1020 			break;
1021 		SIMPLEQ_REMOVE_HEAD(&amr->amr_ccb_queue, ac_chain.simpleq);
1022 		TAILQ_INSERT_TAIL(&amr->amr_ccb_active, ac, ac_chain.tailq);
1023 	}
1024 
1025 	splx(s);
1026 }
1027 
1028 /*
1029  * Map the specified CCB's data buffer onto the bus, and fill the
1030  * scatter-gather list.
1031  */
1032 int
1033 amr_ccb_map(struct amr_softc *amr, struct amr_ccb *ac, void *data, int size,
1034 	    int out)
1035 {
1036 	struct amr_sgentry *sge;
1037 	struct amr_mailbox_cmd *mb;
1038 	int nsegs, i, rv, sgloff;
1039 	bus_dmamap_t xfer;
1040 
1041 	xfer = ac->ac_xfer_map;
1042 
1043 	rv = bus_dmamap_load(amr->amr_dmat, xfer, data, size, NULL,
1044 	    BUS_DMA_NOWAIT);
1045 	if (rv != 0)
1046 		return (rv);
1047 
1048 	mb = &ac->ac_cmd;
1049 	ac->ac_xfer_size = size;
1050 	ac->ac_flags |= (out ? AC_XFER_OUT : AC_XFER_IN);
1051 	sgloff = AMR_SGL_SIZE * ac->ac_ident;
1052 
1053 	/* We don't need to use a scatter/gather list for just 1 segment. */
1054 	nsegs = xfer->dm_nsegs;
1055 	if (nsegs == 1) {
1056 		mb->mb_nsgelem = 0;
1057 		mb->mb_physaddr = htole32(xfer->dm_segs[0].ds_addr);
1058 		ac->ac_flags |= AC_NOSGL;
1059 	} else {
1060 		mb->mb_nsgelem = nsegs;
1061 		mb->mb_physaddr = htole32(amr->amr_sgls_paddr + sgloff);
1062 
1063 		sge = (struct amr_sgentry *)((caddr_t)amr->amr_sgls + sgloff);
1064 		for (i = 0; i < nsegs; i++, sge++) {
1065 			sge->sge_addr = htole32(xfer->dm_segs[i].ds_addr);
1066 			sge->sge_count = htole32(xfer->dm_segs[i].ds_len);
1067 		}
1068 	}
1069 
1070 	bus_dmamap_sync(amr->amr_dmat, xfer, 0, ac->ac_xfer_size,
1071 	    out ? BUS_DMASYNC_PREWRITE : BUS_DMASYNC_PREREAD);
1072 
1073 	if ((ac->ac_flags & AC_NOSGL) == 0)
1074 		bus_dmamap_sync(amr->amr_dmat, amr->amr_dmamap, sgloff,
1075 		    AMR_SGL_SIZE, BUS_DMASYNC_PREWRITE);
1076 
1077 	return (0);
1078 }
1079 
1080 /*
1081  * Unmap the specified CCB's data buffer.
1082  */
1083 void
1084 amr_ccb_unmap(struct amr_softc *amr, struct amr_ccb *ac)
1085 {
1086 
1087 	if ((ac->ac_flags & AC_NOSGL) == 0)
1088 		bus_dmamap_sync(amr->amr_dmat, amr->amr_dmamap,
1089 		    AMR_SGL_SIZE * ac->ac_ident, AMR_SGL_SIZE,
1090 		    BUS_DMASYNC_POSTWRITE);
1091 	bus_dmamap_sync(amr->amr_dmat, ac->ac_xfer_map, 0, ac->ac_xfer_size,
1092 	    (ac->ac_flags & AC_XFER_IN) != 0 ?
1093 	    BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1094 	bus_dmamap_unload(amr->amr_dmat, ac->ac_xfer_map);
1095 }
1096 
1097 /*
1098  * Submit a command to the controller and poll on completion.  Return
1099  * non-zero on timeout or error.  Must be called with interrupts blocked.
1100  */
1101 int
1102 amr_ccb_poll(struct amr_softc *amr, struct amr_ccb *ac, int timo)
1103 {
1104 	int rv;
1105 
1106 	if ((rv = (*amr->amr_submit)(amr, ac)) != 0)
1107 		return (rv);
1108 	TAILQ_INSERT_TAIL(&amr->amr_ccb_active, ac, ac_chain.tailq);
1109 
1110 	for (timo *= 10; timo != 0; timo--) {
1111 		amr_intr(amr);
1112 		if ((ac->ac_flags & AC_COMPLETE) != 0)
1113 			break;
1114 		DELAY(100);
1115 	}
1116 
1117 	return (timo == 0 || ac->ac_status != 0 ? EIO : 0);
1118 }
1119 
1120 /*
1121  * Submit a command to the controller and sleep on completion.  Return
1122  * non-zero on error.
1123  */
1124 int
1125 amr_ccb_wait(struct amr_softc *amr, struct amr_ccb *ac)
1126 {
1127 	int s;
1128 
1129 	s = splbio();
1130 	amr_ccb_enqueue(amr, ac);
1131 	tsleep(ac, PRIBIO, "amrcmd", 0);
1132 	splx(s);
1133 
1134 	return (ac->ac_status != 0 ? EIO : 0);
1135 }
1136 
1137 #if 0
1138 /*
1139  * Wait for the mailbox to become available.
1140  */
1141 static int
1142 amr_mbox_wait(struct amr_softc *amr)
1143 {
1144 	int timo;
1145 
1146 	for (timo = 10000; timo != 0; timo--) {
1147 		bus_dmamap_sync(amr->amr_dmat, amr->amr_dmamap, 0,
1148 		    sizeof(struct amr_mailbox), BUS_DMASYNC_POSTREAD);
1149 		if (amr->amr_mbox->mb_cmd.mb_busy == 0)
1150 			break;
1151 		DELAY(100);
1152 	}
1153 
1154 	if (timo == 0)
1155 		printf("%s: controller wedged\n", amr->amr_dv.dv_xname);
1156 
1157 	return (timo != 0 ? 0 : EAGAIN);
1158 }
1159 #endif
1160 
1161 /*
1162  * Tell the controller that the mailbox contains a valid command.  Must be
1163  * called with interrupts blocked.
1164  */
1165 static int
1166 amr_quartz_submit(struct amr_softc *amr, struct amr_ccb *ac)
1167 {
1168 	u_int32_t v;
1169 
1170 	amr->amr_mbox->mb_poll = 0;
1171 	amr->amr_mbox->mb_ack = 0;
1172 	bus_dmamap_sync(amr->amr_dmat, amr->amr_dmamap, 0,
1173 	    sizeof(struct amr_mailbox), BUS_DMASYNC_PREWRITE);
1174 	bus_dmamap_sync(amr->amr_dmat, amr->amr_dmamap, 0,
1175 	    sizeof(struct amr_mailbox), BUS_DMASYNC_POSTREAD);
1176 	if (amr->amr_mbox->mb_cmd.mb_busy != 0)
1177 		return (EAGAIN);
1178 
1179 	v = amr_inl(amr, AMR_QREG_IDB);
1180 	if ((v & AMR_QIDB_SUBMIT) != 0) {
1181 		amr->amr_mbox->mb_cmd.mb_busy = 0;
1182 		bus_dmamap_sync(amr->amr_dmat, amr->amr_dmamap, 0,
1183 		    sizeof(struct amr_mailbox), BUS_DMASYNC_PREWRITE);
1184 		bus_dmamap_sync(amr->amr_dmat, amr->amr_dmamap, 0,
1185 		    sizeof(struct amr_mailbox), BUS_DMASYNC_PREREAD);
1186 		return (EAGAIN);
1187 	}
1188 
1189 	amr->amr_mbox->mb_segment = 0;
1190 	memcpy(&amr->amr_mbox->mb_cmd, &ac->ac_cmd, sizeof(ac->ac_cmd));
1191 	bus_dmamap_sync(amr->amr_dmat, amr->amr_dmamap, 0,
1192 	    sizeof(struct amr_mailbox), BUS_DMASYNC_PREWRITE);
1193 
1194 	ac->ac_start_time = (time_t)mono_time.tv_sec;
1195 	ac->ac_flags |= AC_ACTIVE;
1196 	amr_outl(amr, AMR_QREG_IDB,
1197 	    (amr->amr_mbox_paddr + 16) | AMR_QIDB_SUBMIT);
1198 	return (0);
1199 }
1200 
1201 static int
1202 amr_std_submit(struct amr_softc *amr, struct amr_ccb *ac)
1203 {
1204 
1205 	amr->amr_mbox->mb_poll = 0;
1206 	amr->amr_mbox->mb_ack = 0;
1207 	bus_dmamap_sync(amr->amr_dmat, amr->amr_dmamap, 0,
1208 	    sizeof(struct amr_mailbox), BUS_DMASYNC_PREWRITE);
1209 	bus_dmamap_sync(amr->amr_dmat, amr->amr_dmamap, 0,
1210 	    sizeof(struct amr_mailbox), BUS_DMASYNC_POSTREAD);
1211 	if (amr->amr_mbox->mb_cmd.mb_busy != 0)
1212 		return (EAGAIN);
1213 
1214 	if ((amr_inb(amr, AMR_SREG_MBOX_BUSY) & AMR_SMBOX_BUSY_FLAG) != 0) {
1215 		amr->amr_mbox->mb_cmd.mb_busy = 0;
1216 		bus_dmamap_sync(amr->amr_dmat, amr->amr_dmamap, 0,
1217 		    sizeof(struct amr_mailbox), BUS_DMASYNC_PREWRITE);
1218 		bus_dmamap_sync(amr->amr_dmat, amr->amr_dmamap, 0,
1219 		    sizeof(struct amr_mailbox), BUS_DMASYNC_PREREAD);
1220 		return (EAGAIN);
1221 	}
1222 
1223 	amr->amr_mbox->mb_segment = 0;
1224 	memcpy(&amr->amr_mbox->mb_cmd, &ac->ac_cmd, sizeof(ac->ac_cmd));
1225 	bus_dmamap_sync(amr->amr_dmat, amr->amr_dmamap, 0,
1226 	    sizeof(struct amr_mailbox), BUS_DMASYNC_PREWRITE);
1227 
1228 	ac->ac_start_time = (time_t)mono_time.tv_sec;
1229 	ac->ac_flags |= AC_ACTIVE;
1230 	amr_outb(amr, AMR_SREG_CMD, AMR_SCMD_POST);
1231 	return (0);
1232 }
1233 
1234 /*
1235  * Claim any work that the controller has completed; acknowledge completion,
1236  * save details of the completion in (mbsave).  Must be called with
1237  * interrupts blocked.
1238  */
1239 static int
1240 amr_quartz_get_work(struct amr_softc *amr, struct amr_mailbox_resp *mbsave)
1241 {
1242 
1243 	/* Work waiting for us? */
1244 	if (amr_inl(amr, AMR_QREG_ODB) != AMR_QODB_READY)
1245 		return (-1);
1246 
1247 	bus_dmamap_sync(amr->amr_dmat, amr->amr_dmamap, 0,
1248 	    sizeof(struct amr_mailbox), BUS_DMASYNC_POSTREAD);
1249 
1250 	/* Save the mailbox, which contains a list of completed commands. */
1251 	memcpy(mbsave, &amr->amr_mbox->mb_resp, sizeof(*mbsave));
1252 
1253 	bus_dmamap_sync(amr->amr_dmat, amr->amr_dmamap, 0,
1254 	    sizeof(struct amr_mailbox), BUS_DMASYNC_PREREAD);
1255 
1256 	/* Ack the interrupt and mailbox transfer. */
1257 	amr_outl(amr, AMR_QREG_ODB, AMR_QODB_READY);
1258 	amr_outl(amr, AMR_QREG_IDB, (amr->amr_mbox_paddr+16) | AMR_QIDB_ACK);
1259 
1260 	/*
1261 	 * This waits for the controller to notice that we've taken the
1262 	 * command from it.  It's very inefficient, and we shouldn't do it,
1263 	 * but if we remove this code, we stop completing commands under
1264 	 * load.
1265 	 *
1266 	 * Peter J says we shouldn't do this.  The documentation says we
1267 	 * should.  Who is right?
1268 	 */
1269 	while ((amr_inl(amr, AMR_QREG_IDB) & AMR_QIDB_ACK) != 0)
1270 		DELAY(10);
1271 
1272 	return (0);
1273 }
1274 
1275 static int
1276 amr_std_get_work(struct amr_softc *amr, struct amr_mailbox_resp *mbsave)
1277 {
1278 	u_int8_t istat;
1279 
1280 	/* Check for valid interrupt status. */
1281 	if (((istat = amr_inb(amr, AMR_SREG_INTR)) & AMR_SINTR_VALID) == 0)
1282 		return (-1);
1283 
1284 	/* Ack the interrupt. */
1285 	amr_outb(amr, AMR_SREG_INTR, istat);
1286 
1287 	bus_dmamap_sync(amr->amr_dmat, amr->amr_dmamap, 0,
1288 	    sizeof(struct amr_mailbox), BUS_DMASYNC_POSTREAD);
1289 
1290 	/* Save mailbox, which contains a list of completed commands. */
1291 	memcpy(mbsave, &amr->amr_mbox->mb_resp, sizeof(*mbsave));
1292 
1293 	bus_dmamap_sync(amr->amr_dmat, amr->amr_dmamap, 0,
1294 	    sizeof(struct amr_mailbox), BUS_DMASYNC_PREREAD);
1295 
1296 	/* Ack mailbox transfer. */
1297 	amr_outb(amr, AMR_SREG_CMD, AMR_SCMD_ACKINTR);
1298 
1299 	return (0);
1300 }
1301 
1302 static void
1303 amr_ccb_dump(struct amr_softc *amr, struct amr_ccb *ac)
1304 {
1305 	int i;
1306 
1307 	printf("%s: ", amr->amr_dv.dv_xname);
1308 	for (i = 0; i < 4; i++)
1309 		printf("%08x ", ((u_int32_t *)&ac->ac_cmd)[i]);
1310 	printf("\n");
1311 }
1312