xref: /netbsd-src/sys/dev/pci/amr.c (revision 001c68bd94f75ce9270b69227c4199fbf34ee396)
1 /*	$NetBSD: amr.c,v 1.11 2003/05/15 18:04:08 fvdl Exp $	*/
2 
3 /*-
4  * Copyright (c) 2002, 2003 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Andrew Doran.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *        This product includes software developed by the NetBSD
21  *        Foundation, Inc. and its contributors.
22  * 4. Neither the name of The NetBSD Foundation nor the names of its
23  *    contributors may be used to endorse or promote products derived
24  *    from this software without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36  * POSSIBILITY OF SUCH DAMAGE.
37  */
38 
39 /*-
40  * Copyright (c) 1999,2000 Michael Smith
41  * Copyright (c) 2000 BSDi
42  * All rights reserved.
43  *
44  * Redistribution and use in source and binary forms, with or without
45  * modification, are permitted provided that the following conditions
46  * are met:
47  * 1. Redistributions of source code must retain the above copyright
48  *    notice, this list of conditions and the following disclaimer.
49  * 2. Redistributions in binary form must reproduce the above copyright
50  *    notice, this list of conditions and the following disclaimer in the
51  *    documentation and/or other materials provided with the distribution.
52  *
53  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
54  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
55  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
56  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
57  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
58  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
59  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
60  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
61  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
62  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
63  * SUCH DAMAGE.
64  *
65  * from FreeBSD: amr_pci.c,v 1.5 2000/08/30 07:52:40 msmith Exp
66  * from FreeBSD: amr.c,v 1.16 2000/08/30 07:52:40 msmith Exp
67  */
68 
69 /*
70  * Driver for AMI RAID controllers.
71  */
72 
73 #include <sys/cdefs.h>
74 __KERNEL_RCSID(0, "$NetBSD: amr.c,v 1.11 2003/05/15 18:04:08 fvdl Exp $");
75 
76 #include <sys/param.h>
77 #include <sys/systm.h>
78 #include <sys/kernel.h>
79 #include <sys/device.h>
80 #include <sys/queue.h>
81 #include <sys/proc.h>
82 #include <sys/buf.h>
83 #include <sys/malloc.h>
84 #include <sys/kthread.h>
85 
86 #include <uvm/uvm_extern.h>
87 
88 #include <machine/endian.h>
89 #include <machine/bus.h>
90 
91 #include <dev/pci/pcidevs.h>
92 #include <dev/pci/pcivar.h>
93 #include <dev/pci/amrreg.h>
94 #include <dev/pci/amrvar.h>
95 
96 void	amr_attach(struct device *, struct device *, void *);
97 void	amr_ccb_dump(struct amr_softc *, struct amr_ccb *);
98 void	*amr_enquire(struct amr_softc *, u_int8_t, u_int8_t, u_int8_t, void *);
99 int	amr_init(struct amr_softc *, const char *,
100 			 struct pci_attach_args *pa);
101 int	amr_intr(void *);
102 int	amr_match(struct device *, struct cfdata *, void *);
103 int	amr_print(void *, const char *);
104 void	amr_shutdown(void *);
105 int	amr_submatch(struct device *, struct cfdata *, void *);
106 void	amr_teardown(struct amr_softc *);
107 void	amr_thread(void *);
108 void	amr_thread_create(void *);
109 
110 int	amr_mbox_wait(struct amr_softc *);
111 int	amr_quartz_get_work(struct amr_softc *, struct amr_mailbox_resp *);
112 int	amr_quartz_submit(struct amr_softc *, struct amr_ccb *);
113 int	amr_std_get_work(struct amr_softc *, struct amr_mailbox_resp *);
114 int	amr_std_submit(struct amr_softc *, struct amr_ccb *);
115 
116 static inline u_int8_t	amr_inb(struct amr_softc *, int);
117 static inline u_int32_t	amr_inl(struct amr_softc *, int);
118 static inline void	amr_outb(struct amr_softc *, int, u_int8_t);
119 static inline void	amr_outl(struct amr_softc *, int, u_int32_t);
120 
121 CFATTACH_DECL(amr, sizeof(struct amr_softc),
122     amr_match, amr_attach, NULL, NULL);
123 
124 #define AT_QUARTZ	0x01	/* `Quartz' chipset */
125 #define	AT_SIG		0x02	/* Check for signature */
126 
127 struct amr_pci_type {
128 	u_short	apt_vendor;
129 	u_short	apt_product;
130 	u_short	apt_flags;
131 } const amr_pci_type[] = {
132 	{ PCI_VENDOR_AMI,   PCI_PRODUCT_AMI_MEGARAID,  0 },
133 	{ PCI_VENDOR_AMI,   PCI_PRODUCT_AMI_MEGARAID2, 0 },
134 	{ PCI_VENDOR_AMI,   PCI_PRODUCT_AMI_MEGARAID3, AT_QUARTZ },
135 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_AMI_MEGARAID3, AT_QUARTZ | AT_SIG }
136 };
137 
138 struct amr_typestr {
139 	const char	*at_str;
140 	int		at_sig;
141 } const amr_typestr[] = {
142 	{ "Series 431",			AMR_SIG_431 },
143 	{ "Series 438",			AMR_SIG_438 },
144 	{ "Series 466",			AMR_SIG_466 },
145 	{ "Series 467",			AMR_SIG_467 },
146 	{ "Series 490",			AMR_SIG_490 },
147 	{ "Series 762",			AMR_SIG_762 },
148 	{ "HP NetRAID (T5)",		AMR_SIG_T5 },
149 	{ "HP NetRAID (T7)",		AMR_SIG_T7 },
150 };
151 
152 struct {
153 	const char	*ds_descr;
154 	int	ds_happy;
155 } const amr_dstate[] = {
156 	{ "offline",	0 },
157 	{ "degraded",	1 },
158 	{ "optimal",	1 },
159 	{ "online",	1 },
160 	{ "failed",	0 },
161 	{ "rebuilding",	1 },
162 	{ "hotspare",	0 },
163 };
164 
165 void	*amr_sdh;
166 int	amr_max_segs;
167 int	amr_max_xfer;
168 
169 static inline u_int8_t
170 amr_inb(struct amr_softc *amr, int off)
171 {
172 
173 	bus_space_barrier(amr->amr_iot, amr->amr_ioh, off, 1,
174 	    BUS_SPACE_BARRIER_WRITE | BUS_SPACE_BARRIER_READ);
175 	return (bus_space_read_1(amr->amr_iot, amr->amr_ioh, off));
176 }
177 
178 static inline u_int32_t
179 amr_inl(struct amr_softc *amr, int off)
180 {
181 
182 	bus_space_barrier(amr->amr_iot, amr->amr_ioh, off, 4,
183 	    BUS_SPACE_BARRIER_WRITE | BUS_SPACE_BARRIER_READ);
184 	return (bus_space_read_4(amr->amr_iot, amr->amr_ioh, off));
185 }
186 
187 static inline void
188 amr_outb(struct amr_softc *amr, int off, u_int8_t val)
189 {
190 
191 	bus_space_write_1(amr->amr_iot, amr->amr_ioh, off, val);
192 	bus_space_barrier(amr->amr_iot, amr->amr_ioh, off, 1,
193 	    BUS_SPACE_BARRIER_WRITE);
194 }
195 
196 static inline void
197 amr_outl(struct amr_softc *amr, int off, u_int32_t val)
198 {
199 
200 	bus_space_write_4(amr->amr_iot, amr->amr_ioh, off, val);
201 	bus_space_barrier(amr->amr_iot, amr->amr_ioh, off, 4,
202 	    BUS_SPACE_BARRIER_WRITE);
203 }
204 
205 /*
206  * Match a supported device.
207  */
208 int
209 amr_match(struct device *parent, struct cfdata *match, void *aux)
210 {
211 	struct pci_attach_args *pa;
212 	pcireg_t s;
213 	int i;
214 
215 	pa = (struct pci_attach_args *)aux;
216 
217 	/*
218 	 * Don't match the device if it's operating in I2O mode.  In this
219 	 * case it should be handled by the `iop' driver.
220 	 */
221 	if (PCI_CLASS(pa->pa_class) == PCI_CLASS_I2O)
222 		return (0);
223 
224 	for (i = 0; i < sizeof(amr_pci_type) / sizeof(amr_pci_type[0]); i++)
225 		if (PCI_VENDOR(pa->pa_id) == amr_pci_type[i].apt_vendor &&
226 		    PCI_PRODUCT(pa->pa_id) == amr_pci_type[i].apt_product)
227 		    	break;
228 
229 	if (i == sizeof(amr_pci_type) / sizeof(amr_pci_type[0]))
230 		return (0);
231 
232 	if ((amr_pci_type[i].apt_flags & AT_SIG) == 0)
233 		return (1);
234 
235 	s = pci_conf_read(pa->pa_pc, pa->pa_tag, AMR_QUARTZ_SIG_REG) & 0xffff;
236 	return (s == AMR_QUARTZ_SIG0 || s == AMR_QUARTZ_SIG1);
237 }
238 
239 /*
240  * Attach a supported device.
241  */
242 void
243 amr_attach(struct device *parent, struct device *self, void *aux)
244 {
245 	bus_space_tag_t memt, iot;
246 	bus_space_handle_t memh, ioh;
247 	struct pci_attach_args *pa;
248 	struct amr_attach_args amra;
249 	const struct amr_pci_type *apt;
250 	struct amr_softc *amr;
251 	pci_chipset_tag_t pc;
252 	pci_intr_handle_t ih;
253 	const char *intrstr;
254 	pcireg_t reg;
255 	int rseg, i, j, size, rv, memreg, ioreg;
256 	bus_size_t memsize, iosize;
257         struct amr_ccb *ac;
258 
259 	aprint_naive(": RAID controller\n");
260 
261 	amr = (struct amr_softc *)self;
262 	pa = (struct pci_attach_args *)aux;
263 	pc = pa->pa_pc;
264 
265 	for (i = 0; i < sizeof(amr_pci_type) / sizeof(amr_pci_type[0]); i++)
266 		if (PCI_VENDOR(pa->pa_id) == amr_pci_type[i].apt_vendor &&
267 		    PCI_PRODUCT(pa->pa_id) == amr_pci_type[i].apt_product)
268 			break;
269 	apt = amr_pci_type + i;
270 
271 	memreg = ioreg = 0;
272 	for (i = 0x10; i <= 0x14; i += 4) {
273 		reg = pci_conf_read(pc, pa->pa_tag, i);
274 		switch (PCI_MAPREG_TYPE(reg)) {
275 		case PCI_MAPREG_TYPE_MEM:
276 			if ((memsize = PCI_MAPREG_MEM_SIZE(reg)) != 0)
277 				memreg = i;
278 			break;
279 		case PCI_MAPREG_TYPE_IO:
280 			if ((iosize = PCI_MAPREG_IO_SIZE(reg)) != 0)
281 				ioreg = i;
282 			break;
283 		}
284 	}
285 
286 	if (memreg != 0)
287 		if (pci_mapreg_map(pa, memreg, PCI_MAPREG_TYPE_MEM, 0,
288 		    &memt, &memh, NULL, NULL))
289 			memreg = 0;
290 	if (ioreg != 0)
291 		if (pci_mapreg_map(pa, ioreg, PCI_MAPREG_TYPE_IO, 0,
292 		    &iot, &ioh, NULL, NULL))
293 			ioreg = 0;
294 
295 	if (memreg) {
296 		amr->amr_iot = memt;
297 		amr->amr_ioh = memh;
298 		amr->amr_ios = memsize;
299 	} else if (ioreg) {
300 		amr->amr_iot = iot;
301 		amr->amr_ioh = ioh;
302 		amr->amr_ios = iosize;
303 	} else {
304 		aprint_error("can't map control registers\n");
305 		amr_teardown(amr);
306 		return;
307 	}
308 
309 	amr->amr_flags |= AMRF_PCI_REGS;
310 	amr->amr_dmat = pa->pa_dmat;
311 	amr->amr_pc = pa->pa_pc;
312 
313 	/* Enable the device. */
314 	reg = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
315 	pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG,
316 	    reg | PCI_COMMAND_MASTER_ENABLE);
317 
318 	/* Map and establish the interrupt. */
319 	if (pci_intr_map(pa, &ih)) {
320 		aprint_error("can't map interrupt\n");
321 		amr_teardown(amr);
322 		return;
323 	}
324 	intrstr = pci_intr_string(pc, ih);
325 	amr->amr_ih = pci_intr_establish(pc, ih, IPL_BIO, amr_intr, amr);
326 	if (amr->amr_ih == NULL) {
327 		aprint_error("can't establish interrupt");
328 		if (intrstr != NULL)
329 			aprint_normal(" at %s", intrstr);
330 		aprint_normal("\n");
331 		amr_teardown(amr);
332 		return;
333 	}
334 	amr->amr_flags |= AMRF_PCI_INTR;
335 
336 	/*
337 	 * Allocate space for the mailbox and S/G lists.  Some controllers
338 	 * don't like S/G lists to be located below 0x2000, so we allocate
339 	 * enough slop to enable us to compensate.
340 	 *
341 	 * The standard mailbox structure needs to be aligned on a 16-byte
342 	 * boundary.  The 64-bit mailbox has one extra field, 4 bytes in
343 	 * size, which preceeds the standard mailbox.
344 	 */
345 	size = AMR_SGL_SIZE * AMR_MAX_CMDS + 0x2000;
346 	amr->amr_dmasize = size;
347 
348 	if ((rv = bus_dmamem_alloc(amr->amr_dmat, size, PAGE_SIZE, NULL,
349 	    &amr->amr_dmaseg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) {
350 		aprint_error("%s: unable to allocate buffer, rv = %d\n",
351 		    amr->amr_dv.dv_xname, rv);
352 		amr_teardown(amr);
353 		return;
354 	}
355 	amr->amr_flags |= AMRF_DMA_ALLOC;
356 
357 	if ((rv = bus_dmamem_map(amr->amr_dmat, &amr->amr_dmaseg, rseg, size,
358 	    (caddr_t *)&amr->amr_mbox,
359 	    BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) {
360 		aprint_error("%s: unable to map buffer, rv = %d\n",
361 		    amr->amr_dv.dv_xname, rv);
362 		amr_teardown(amr);
363 		return;
364 	}
365 	amr->amr_flags |= AMRF_DMA_MAP;
366 
367 	if ((rv = bus_dmamap_create(amr->amr_dmat, size, 1, size, 0,
368 	    BUS_DMA_NOWAIT, &amr->amr_dmamap)) != 0) {
369 		aprint_error("%s: unable to create buffer DMA map, rv = %d\n",
370 		    amr->amr_dv.dv_xname, rv);
371 		amr_teardown(amr);
372 		return;
373 	}
374 	amr->amr_flags |= AMRF_DMA_CREATE;
375 
376 	if ((rv = bus_dmamap_load(amr->amr_dmat, amr->amr_dmamap,
377 	    amr->amr_mbox, size, NULL, BUS_DMA_NOWAIT)) != 0) {
378 		aprint_error("%s: unable to load buffer DMA map, rv = %d\n",
379 		    amr->amr_dv.dv_xname, rv);
380 		amr_teardown(amr);
381 		return;
382 	}
383 	amr->amr_flags |= AMRF_DMA_LOAD;
384 
385 	memset(amr->amr_mbox, 0, size);
386 
387 	amr->amr_mbox_paddr = amr->amr_dmamap->dm_segs[0].ds_addr;
388 	amr->amr_sgls_paddr = (amr->amr_mbox_paddr + 0x1fff) & ~0x1fff;
389 	amr->amr_sgls = (struct amr_sgentry *)((caddr_t)amr->amr_mbox +
390 	    amr->amr_sgls_paddr - amr->amr_dmamap->dm_segs[0].ds_addr);
391 
392 	/*
393 	 * Allocate and initalise the command control blocks.
394 	 */
395 	ac = malloc(sizeof(*ac) * AMR_MAX_CMDS, M_DEVBUF, M_NOWAIT | M_ZERO);
396 	amr->amr_ccbs = ac;
397 	SLIST_INIT(&amr->amr_ccb_freelist);
398 	TAILQ_INIT(&amr->amr_ccb_active);
399 	amr->amr_flags |= AMRF_CCBS;
400 
401 	if (amr_max_xfer == 0) {
402 		amr_max_xfer = min(((AMR_MAX_SEGS - 1) * PAGE_SIZE), MAXPHYS);
403 		amr_max_segs = (amr_max_xfer + (PAGE_SIZE * 2) - 1) / PAGE_SIZE;
404 	}
405 
406 	for (i = 0; i < AMR_MAX_CMDS; i++, ac++) {
407 		rv = bus_dmamap_create(amr->amr_dmat, amr_max_xfer,
408 		    amr_max_segs, amr_max_xfer, 0,
409 		    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &ac->ac_xfer_map);
410 		if (rv != 0)
411 			break;
412 
413 		ac->ac_ident = i;
414 		amr_ccb_free(amr, ac);
415 	}
416 	if (i != AMR_MAX_CMDS) {
417 		aprint_error("%s: memory exhausted\n", amr->amr_dv.dv_xname);
418 		amr_teardown(amr);
419 		return;
420 	}
421 
422 	/*
423 	 * Take care of model-specific tasks.
424 	 */
425 	if ((apt->apt_flags & AT_QUARTZ) != 0) {
426 		amr->amr_submit = amr_quartz_submit;
427 		amr->amr_get_work = amr_quartz_get_work;
428 	} else {
429 		amr->amr_submit = amr_std_submit;
430 		amr->amr_get_work = amr_std_get_work;
431 
432 		/* Notify the controller of the mailbox location. */
433 		amr_outl(amr, AMR_SREG_MBOX, (u_int32_t)amr->amr_mbox_paddr + 16);
434 		amr_outb(amr, AMR_SREG_MBOX_ENABLE, AMR_SMBOX_ENABLE_ADDR);
435 
436 		/* Clear outstanding interrupts and enable interrupts. */
437 		amr_outb(amr, AMR_SREG_CMD, AMR_SCMD_ACKINTR);
438 		amr_outb(amr, AMR_SREG_TOGL,
439 		    amr_inb(amr, AMR_SREG_TOGL) | AMR_STOGL_ENABLE);
440 	}
441 
442 	/*
443 	 * Retrieve parameters, and tell the world about us.
444 	 */
445 	amr->amr_enqbuf = malloc(AMR_ENQUIRY_BUFSIZE, M_DEVBUF, M_NOWAIT);
446 	amr->amr_flags |= AMRF_ENQBUF;
447 	amr->amr_maxqueuecnt = i;
448 	aprint_normal(": AMI RAID ");
449 	if (amr_init(amr, intrstr, pa) != 0) {
450 		amr_teardown(amr);
451 		return;
452 	}
453 
454 	/*
455 	 * Cap the maximum number of outstanding commands.  AMI's Linux
456 	 * driver doesn't trust the controller's reported value, and lockups
457 	 * have been seen when we do.
458 	 */
459 	amr->amr_maxqueuecnt = min(amr->amr_maxqueuecnt, AMR_MAX_CMDS);
460 	if (amr->amr_maxqueuecnt > i)
461 		amr->amr_maxqueuecnt = i;
462 
463 	/* Set our `shutdownhook' before we start any device activity. */
464 	if (amr_sdh == NULL)
465 		amr_sdh = shutdownhook_establish(amr_shutdown, NULL);
466 
467 	/* Attach sub-devices. */
468 	for (j = 0; j < amr->amr_numdrives; j++) {
469 		if (amr->amr_drive[j].al_size == 0)
470 			continue;
471 		amra.amra_unit = j;
472 		amr->amr_drive[j].al_dv = config_found_sm(&amr->amr_dv, &amra,
473 		    amr_print, amr_submatch);
474 	}
475 
476 	SIMPLEQ_INIT(&amr->amr_ccb_queue);
477 	kthread_create(amr_thread_create, amr);
478 }
479 
480 /*
481  * Free up resources.
482  */
483 void
484 amr_teardown(struct amr_softc *amr)
485 {
486 	struct amr_ccb *ac;
487 	int fl;
488 
489 	fl = amr->amr_flags;
490 
491 	if ((fl & AMRF_THREAD) != 0) {
492 		amr->amr_flags |= AMRF_THREAD_EXIT;
493 		wakeup(amr_thread);
494 		while ((amr->amr_flags & AMRF_THREAD_EXIT) != 0)
495 			tsleep(&amr->amr_flags, PWAIT, "amrexit", 0);
496 	}
497 	if ((fl & AMRF_CCBS) != 0) {
498 		SLIST_FOREACH(ac, &amr->amr_ccb_freelist, ac_chain.slist) {
499 			bus_dmamap_destroy(amr->amr_dmat, ac->ac_xfer_map);
500 		}
501 		free(amr->amr_ccbs, M_DEVBUF);
502 	}
503 	if ((fl & AMRF_ENQBUF) != 0)
504 		free(amr->amr_enqbuf, M_DEVBUF);
505 	if ((fl & AMRF_DMA_LOAD) != 0)
506 		bus_dmamap_unload(amr->amr_dmat, amr->amr_dmamap);
507 	if ((fl & AMRF_DMA_MAP) != 0)
508 		bus_dmamem_unmap(amr->amr_dmat, (caddr_t)amr->amr_mbox,
509 		    amr->amr_dmasize);
510 	if ((fl & AMRF_DMA_ALLOC) != 0)
511 		bus_dmamem_free(amr->amr_dmat, &amr->amr_dmaseg, 1);
512 	if ((fl & AMRF_DMA_CREATE) != 0)
513 		bus_dmamap_destroy(amr->amr_dmat, amr->amr_dmamap);
514 	if ((fl & AMRF_PCI_INTR) != 0)
515 		pci_intr_disestablish(amr->amr_pc, amr->amr_ih);
516 	if ((fl & AMRF_PCI_REGS) != 0)
517 		bus_space_unmap(amr->amr_iot, amr->amr_ioh, amr->amr_ios);
518 }
519 
520 /*
521  * Print autoconfiguration message for a sub-device.
522  */
523 int
524 amr_print(void *aux, const char *pnp)
525 {
526 	struct amr_attach_args *amra;
527 
528 	amra = (struct amr_attach_args *)aux;
529 
530 	if (pnp != NULL)
531 		aprint_normal("block device at %s", pnp);
532 	aprint_normal(" unit %d", amra->amra_unit);
533 	return (UNCONF);
534 }
535 
536 /*
537  * Match a sub-device.
538  */
539 int
540 amr_submatch(struct device *parent, struct cfdata *cf, void *aux)
541 {
542 	struct amr_attach_args *amra;
543 
544 	amra = (struct amr_attach_args *)aux;
545 
546 	if (cf->amracf_unit != AMRCF_UNIT_DEFAULT &&
547 	    cf->amracf_unit != amra->amra_unit)
548 		return (0);
549 
550 	return (config_match(parent, cf, aux));
551 }
552 
553 /*
554  * Retrieve operational parameters and describe the controller.
555  */
556 int
557 amr_init(struct amr_softc *amr, const char *intrstr,
558 	 struct pci_attach_args *pa)
559 {
560 	struct amr_adapter_info *aa;
561 	struct amr_prodinfo *ap;
562 	struct amr_enquiry *ae;
563 	struct amr_enquiry3 *aex;
564 	const char *prodstr;
565 	u_int i, sig, ishp;
566 	char buf[64];
567 
568 	/*
569 	 * Try to get 40LD product info, which tells us what the card is
570 	 * labelled as.
571 	 */
572 	ap = amr_enquire(amr, AMR_CMD_CONFIG, AMR_CONFIG_PRODUCT_INFO, 0,
573 	    amr->amr_enqbuf);
574 	if (ap != NULL) {
575 		aprint_normal("<%.80s>\n", ap->ap_product);
576 		if (intrstr != NULL)
577 			aprint_normal("%s: interrupting at %s\n",
578 			    amr->amr_dv.dv_xname, intrstr);
579 		aprint_normal("%s: firmware %.16s, BIOS %.16s, %dMB RAM\n",
580 		    amr->amr_dv.dv_xname, ap->ap_firmware, ap->ap_bios,
581 		    le16toh(ap->ap_memsize));
582 
583 		amr->amr_maxqueuecnt = ap->ap_maxio;
584 
585 		/*
586 		 * Fetch and record state of logical drives.
587 		 */
588 		aex = amr_enquire(amr, AMR_CMD_CONFIG, AMR_CONFIG_ENQ3,
589 		    AMR_CONFIG_ENQ3_SOLICITED_FULL, amr->amr_enqbuf);
590 		if (aex == NULL) {
591 			aprint_error("%s ENQUIRY3 failed\n",
592 			    amr->amr_dv.dv_xname);
593 			return (-1);
594 		}
595 
596 		if (aex->ae_numldrives > AMR_MAX_UNITS) {
597 			aprint_error(
598 			    "%s: adjust AMR_MAX_UNITS to %d (currently %d)"
599 			    "\n", amr->amr_dv.dv_xname,
600 			    ae->ae_ldrv.al_numdrives, AMR_MAX_UNITS);
601 			amr->amr_numdrives = AMR_MAX_UNITS;
602 		} else
603 			amr->amr_numdrives = aex->ae_numldrives;
604 
605 		for (i = 0; i < amr->amr_numdrives; i++) {
606 			amr->amr_drive[i].al_size =
607 			    le32toh(aex->ae_drivesize[i]);
608 			amr->amr_drive[i].al_state = aex->ae_drivestate[i];
609 			amr->amr_drive[i].al_properties = aex->ae_driveprop[i];
610 		}
611 
612 		return (0);
613 	}
614 
615 	/*
616 	 * Try 8LD extended ENQUIRY to get the controller signature.  Once
617 	 * found, search for a product description.
618 	 */
619 	ae = amr_enquire(amr, AMR_CMD_EXT_ENQUIRY2, 0, 0, amr->amr_enqbuf);
620 	if (ae != NULL) {
621 		i = 0;
622 		sig = le32toh(ae->ae_signature);
623 
624 		while (i < sizeof(amr_typestr) / sizeof(amr_typestr[0])) {
625 			if (amr_typestr[i].at_sig == sig)
626 				break;
627 			i++;
628 		}
629 		if (i == sizeof(amr_typestr) / sizeof(amr_typestr[0])) {
630 			sprintf(buf, "unknown ENQUIRY2 sig (0x%08x)", sig);
631 			prodstr = buf;
632 		} else
633 			prodstr = amr_typestr[i].at_str;
634 	} else {
635 		ae = amr_enquire(amr, AMR_CMD_ENQUIRY, 0, 0, amr->amr_enqbuf);
636 		if (ae == NULL) {
637 			aprint_error("%s: unsupported controller\n",
638 			    amr->amr_dv.dv_xname);
639 			return (-1);
640 		}
641 
642 		switch (PCI_PRODUCT(pa->pa_id)) {
643 		case PCI_PRODUCT_AMI_MEGARAID:
644 			prodstr = "Series 428";
645 			break;
646 		case PCI_PRODUCT_AMI_MEGARAID2:
647 			prodstr = "Series 434";
648 			break;
649 		default:
650 			sprintf(buf, "unknown PCI dev (0x%04x)",
651 			    PCI_PRODUCT(pa->pa_id));
652 			prodstr = buf;
653 			break;
654 		}
655 	}
656 
657 	/*
658 	 * HP NetRaid controllers have a special encoding of the firmware
659 	 * and BIOS versions.  The AMI version seems to have it as strings
660 	 * whereas the HP version does it with a leading uppercase character
661 	 * and two binary numbers.
662 	*/
663 	aa = &ae->ae_adapter;
664 
665 	if (aa->aa_firmware[2] >= 'A' && aa->aa_firmware[2] <= 'Z' &&
666 	    aa->aa_firmware[1] <  ' ' && aa->aa_firmware[0] <  ' ' &&
667 	    aa->aa_bios[2] >= 'A' && aa->aa_bios[2] <= 'Z' &&
668 	    aa->aa_bios[1] <  ' ' && aa->aa_bios[0] <  ' ') {
669 		if (le32toh(ae->ae_signature) == AMR_SIG_438) {
670 			/* The AMI 438 is a NetRaid 3si in HP-land. */
671 			prodstr = "HP NetRaid 3si";
672 		}
673 		ishp = 1;
674 	} else
675 		ishp = 0;
676 
677 	aprint_normal("<%s>\n", prodstr);
678 	if (intrstr != NULL)
679 		aprint_normal("%s: interrupting at %s\n", amr->amr_dv.dv_xname,
680 		    intrstr);
681 
682 	if (ishp)
683 		aprint_normal("%s: firmware <%c.%02d.%02d>, BIOS <%c.%02d.%02d>"
684 		    ", %dMB RAM\n", amr->amr_dv.dv_xname, aa->aa_firmware[2],
685 		     aa->aa_firmware[1], aa->aa_firmware[0], aa->aa_bios[2],
686 		     aa->aa_bios[1], aa->aa_bios[0], aa->aa_memorysize);
687 	else
688 		aprint_normal("%s: firmware <%.4s>, BIOS <%.4s>, %dMB RAM\n",
689 		    amr->amr_dv.dv_xname, aa->aa_firmware, aa->aa_bios,
690 		    aa->aa_memorysize);
691 
692 	amr->amr_maxqueuecnt = aa->aa_maxio;
693 
694 	/*
695 	 * Record state of logical drives.
696 	 */
697 	if (ae->ae_ldrv.al_numdrives > AMR_MAX_UNITS) {
698 		aprint_error("%s: adjust AMR_MAX_UNITS to %d (currently %d)\n",
699 		    amr->amr_dv.dv_xname, ae->ae_ldrv.al_numdrives,
700 		    AMR_MAX_UNITS);
701 		amr->amr_numdrives = AMR_MAX_UNITS;
702 	} else
703 		amr->amr_numdrives = ae->ae_ldrv.al_numdrives;
704 
705 	for (i = 0; i < AMR_MAX_UNITS; i++) {
706 		amr->amr_drive[i].al_size = le32toh(ae->ae_ldrv.al_size[i]);
707 		amr->amr_drive[i].al_state = ae->ae_ldrv.al_state[i];
708 		amr->amr_drive[i].al_properties = ae->ae_ldrv.al_properties[i];
709 	}
710 
711 	return (0);
712 }
713 
714 /*
715  * Flush the internal cache on each configured controller.  Called at
716  * shutdown time.
717  */
718 void
719 amr_shutdown(void *cookie)
720 {
721         extern struct cfdriver amr_cd;
722 	struct amr_softc *amr;
723 	struct amr_ccb *ac;
724 	int i, rv, s;
725 
726 	for (i = 0; i < amr_cd.cd_ndevs; i++) {
727 		if ((amr = device_lookup(&amr_cd, i)) == NULL)
728 			continue;
729 
730 		if ((rv = amr_ccb_alloc(amr, &ac)) == 0) {
731 			ac->ac_cmd.mb_command = AMR_CMD_FLUSH;
732 			s = splbio();
733 			rv = amr_ccb_poll(amr, ac, 30000);
734 			splx(s);
735 			amr_ccb_free(amr, ac);
736 		}
737 		if (rv != 0)
738 			printf("%s: unable to flush cache (%d)\n",
739 			    amr->amr_dv.dv_xname, rv);
740 	}
741 }
742 
743 /*
744  * Interrupt service routine.
745  */
746 int
747 amr_intr(void *cookie)
748 {
749 	struct amr_softc *amr;
750 	struct amr_ccb *ac;
751 	struct amr_mailbox_resp mbox;
752 	u_int i, forus, idx;
753 
754 	amr = cookie;
755 	forus = 0;
756 
757 	while ((*amr->amr_get_work)(amr, &mbox) == 0) {
758 		/* Iterate over completed commands in this result. */
759 		for (i = 0; i < mbox.mb_nstatus; i++) {
760 			idx = mbox.mb_completed[i] - 1;
761 			ac = amr->amr_ccbs + idx;
762 
763 			if (idx >= amr->amr_maxqueuecnt) {
764 				printf("%s: bad status (bogus ID: %u=%u)\n",
765 				    amr->amr_dv.dv_xname, i, idx);
766 				continue;
767 			}
768 
769 			if ((ac->ac_flags & AC_ACTIVE) == 0) {
770 				printf("%s: bad status (not active; 0x04%x)\n",
771 				    amr->amr_dv.dv_xname, ac->ac_flags);
772 				continue;
773 			}
774 
775 			ac->ac_status = mbox.mb_status;
776 			ac->ac_flags = (ac->ac_flags & ~AC_ACTIVE) |
777 			    AC_COMPLETE;
778 			TAILQ_REMOVE(&amr->amr_ccb_active, ac, ac_chain.tailq);
779 
780 			if ((ac->ac_flags & AC_MOAN) != 0)
781 				printf("%s: ccb %d completed\n",
782 				    amr->amr_dv.dv_xname, ac->ac_ident);
783 
784 			/* Pass notification to upper layers. */
785 			if (ac->ac_handler != NULL)
786 				(*ac->ac_handler)(ac);
787 			else
788 				wakeup(ac);
789 		}
790 		forus = 1;
791 	}
792 
793 	if (forus)
794 		amr_ccb_enqueue(amr, NULL);
795 
796 	return (forus);
797 }
798 
799 /*
800  * Create the watchdog thread.
801  */
802 void
803 amr_thread_create(void *cookie)
804 {
805 	struct amr_softc *amr;
806 	int rv;
807 
808 	amr = cookie;
809 
810 	if ((amr->amr_flags & AMRF_THREAD_EXIT) != 0) {
811 		amr->amr_flags ^= AMRF_THREAD_EXIT;
812 		wakeup(&amr->amr_flags);
813 		return;
814 	}
815 
816 	rv = kthread_create1(amr_thread, amr, &amr->amr_thread, "%s",
817 	    amr->amr_dv.dv_xname);
818  	if (rv != 0)
819 		aprint_error("%s: unable to create thread (%d)",
820  		    amr->amr_dv.dv_xname, rv);
821  	else
822  		amr->amr_flags |= AMRF_THREAD;
823 }
824 
825 /*
826  * Watchdog thread.
827  */
828 void
829 amr_thread(void *cookie)
830 {
831 	struct amr_softc *amr;
832 	struct amr_ccb *ac;
833 	struct amr_logdrive *al;
834 	struct amr_enquiry *ae;
835 	time_t curtime;
836 	int rv, i, s;
837 
838 	amr = cookie;
839 	ae = amr->amr_enqbuf;
840 
841 	for (;;) {
842 		tsleep(amr_thread, PWAIT, "amrwdog", AMR_WDOG_TICKS);
843 
844 		if ((amr->amr_flags & AMRF_THREAD_EXIT) != 0) {
845 			amr->amr_flags ^= AMRF_THREAD_EXIT;
846 			wakeup(&amr->amr_flags);
847 			kthread_exit(0);
848 		}
849 
850 		s = splbio();
851 		amr_intr(cookie);
852 		curtime = (time_t)mono_time.tv_sec;
853 		if ((ac = TAILQ_FIRST(&amr->amr_ccb_active)) != NULL) {
854 			if (ac->ac_start_time + AMR_TIMEOUT > curtime)
855 				break;
856 			if ((ac->ac_flags & AC_MOAN) == 0) {
857 				printf("%s: ccb %d timed out; mailbox:\n",
858 				    amr->amr_dv.dv_xname, ac->ac_ident);
859 				amr_ccb_dump(amr, ac);
860 				ac->ac_flags |= AC_MOAN;
861 			}
862 		}
863 		splx(s);
864 
865 		if ((rv = amr_ccb_alloc(amr, &ac)) != 0) {
866 			printf("%s: ccb_alloc failed (%d)\n",
867  			    amr->amr_dv.dv_xname, rv);
868 			continue;
869 		}
870 
871 		ac->ac_cmd.mb_command = AMR_CMD_ENQUIRY;
872 
873 		rv = amr_ccb_map(amr, ac, amr->amr_enqbuf,
874 		    AMR_ENQUIRY_BUFSIZE, 0);
875 		if (rv != 0) {
876 			printf("%s: ccb_map failed (%d)\n",
877  			    amr->amr_dv.dv_xname, rv);
878 			amr_ccb_free(amr, ac);
879 			continue;
880 		}
881 
882 		rv = amr_ccb_wait(amr, ac);
883 		amr_ccb_unmap(amr, ac);
884 		if (rv != 0) {
885 			printf("%s: enquiry failed (st=%d)\n",
886  			    amr->amr_dv.dv_xname, ac->ac_status);
887 			continue;
888 		}
889 		amr_ccb_free(amr, ac);
890 
891 		al = amr->amr_drive;
892 		for (i = 0; i < AMR_MAX_UNITS; i++, al++) {
893 			if (al->al_dv == NULL)
894 				continue;
895 			if (al->al_state == ae->ae_ldrv.al_state[i])
896 				continue;
897 
898 			printf("%s: state changed: %s -> %s\n",
899 			    al->al_dv->dv_xname,
900 			    amr_drive_state(al->al_state, NULL),
901 			    amr_drive_state(ae->ae_ldrv.al_state[i], NULL));
902 
903 			al->al_state = ae->ae_ldrv.al_state[i];
904 		}
905 	}
906 }
907 
908 /*
909  * Return a text description of a logical drive's current state.
910  */
911 const char *
912 amr_drive_state(int state, int *happy)
913 {
914 	const char *str;
915 
916 	state = AMR_DRV_CURSTATE(state);
917 	if (state >= sizeof(amr_dstate) / sizeof(amr_dstate[0])) {
918 		if (happy)
919 			*happy = 1;
920 		str = "status unknown";
921 	} else {
922 		if (happy)
923 			*happy = amr_dstate[state].ds_happy;
924 		str = amr_dstate[state].ds_descr;
925 	}
926 
927 	return (str);
928 }
929 
930 /*
931  * Run a generic enquiry-style command.
932  */
933 void *
934 amr_enquire(struct amr_softc *amr, u_int8_t cmd, u_int8_t cmdsub,
935 	    u_int8_t cmdqual, void *buf)
936 {
937 	struct amr_ccb *ac;
938 	u_int8_t *mb;
939 	int rv;
940 
941 	if (amr_ccb_alloc(amr, &ac) != 0)
942 		return (NULL);
943 
944 	/* Build the command proper. */
945 	mb = (u_int8_t *)&ac->ac_cmd;
946 	mb[0] = cmd;
947 	mb[2] = cmdsub;
948 	mb[3] = cmdqual;
949 
950 	rv = amr_ccb_map(amr, ac, buf, AMR_ENQUIRY_BUFSIZE, 0);
951 	if (rv == 0) {
952 		rv = amr_ccb_poll(amr, ac, 2000);
953 		amr_ccb_unmap(amr, ac);
954 	}
955 	amr_ccb_free(amr, ac);
956 
957 	return (rv ? NULL : buf);
958 }
959 
960 /*
961  * Allocate and initialise a CCB.
962  */
963 int
964 amr_ccb_alloc(struct amr_softc *amr, struct amr_ccb **acp)
965 {
966 	int s;
967 
968 	s = splbio();
969 	if ((*acp = SLIST_FIRST(&amr->amr_ccb_freelist)) == NULL) {
970 		splx(s);
971 		return (EAGAIN);
972 	}
973 	SLIST_REMOVE_HEAD(&amr->amr_ccb_freelist, ac_chain.slist);
974 	splx(s);
975 
976 	return (0);
977 }
978 
979 /*
980  * Free a CCB.
981  */
982 void
983 amr_ccb_free(struct amr_softc *amr, struct amr_ccb *ac)
984 {
985 	int s;
986 
987 	memset(&ac->ac_cmd, 0, sizeof(ac->ac_cmd));
988 	ac->ac_cmd.mb_ident = ac->ac_ident + 1;
989 	ac->ac_cmd.mb_busy = 1;
990 	ac->ac_handler = NULL;
991 	ac->ac_flags = 0;
992 
993 	s = splbio();
994 	SLIST_INSERT_HEAD(&amr->amr_ccb_freelist, ac, ac_chain.slist);
995 	splx(s);
996 }
997 
998 /*
999  * If a CCB is specified, enqueue it.  Pull CCBs off the software queue in
1000  * the order that they were enqueued and try to submit their command blocks
1001  * to the controller for execution.
1002  */
1003 void
1004 amr_ccb_enqueue(struct amr_softc *amr, struct amr_ccb *ac)
1005 {
1006 	int s;
1007 
1008 	s = splbio();
1009 
1010 	if (ac != NULL)
1011 		SIMPLEQ_INSERT_TAIL(&amr->amr_ccb_queue, ac, ac_chain.simpleq);
1012 
1013 	while ((ac = SIMPLEQ_FIRST(&amr->amr_ccb_queue)) != NULL) {
1014 		if ((*amr->amr_submit)(amr, ac) != 0)
1015 			break;
1016 		SIMPLEQ_REMOVE_HEAD(&amr->amr_ccb_queue, ac_chain.simpleq);
1017 		TAILQ_INSERT_TAIL(&amr->amr_ccb_active, ac, ac_chain.tailq);
1018 	}
1019 
1020 	splx(s);
1021 }
1022 
1023 /*
1024  * Map the specified CCB's data buffer onto the bus, and fill the
1025  * scatter-gather list.
1026  */
1027 int
1028 amr_ccb_map(struct amr_softc *amr, struct amr_ccb *ac, void *data, int size,
1029 	    int out)
1030 {
1031 	struct amr_sgentry *sge;
1032 	struct amr_mailbox_cmd *mb;
1033 	int nsegs, i, rv, sgloff;
1034 	bus_dmamap_t xfer;
1035 
1036 	xfer = ac->ac_xfer_map;
1037 
1038 	rv = bus_dmamap_load(amr->amr_dmat, xfer, data, size, NULL,
1039 	    BUS_DMA_NOWAIT);
1040 	if (rv != 0)
1041 		return (rv);
1042 
1043 	mb = &ac->ac_cmd;
1044 	ac->ac_xfer_size = size;
1045 	ac->ac_flags |= (out ? AC_XFER_OUT : AC_XFER_IN);
1046 	sgloff = AMR_SGL_SIZE * ac->ac_ident;
1047 
1048 	/* We don't need to use a scatter/gather list for just 1 segment. */
1049 	nsegs = xfer->dm_nsegs;
1050 	if (nsegs == 1) {
1051 		mb->mb_nsgelem = 0;
1052 		mb->mb_physaddr = htole32(xfer->dm_segs[0].ds_addr);
1053 		ac->ac_flags |= AC_NOSGL;
1054 	} else {
1055 		mb->mb_nsgelem = nsegs;
1056 		mb->mb_physaddr = htole32(amr->amr_sgls_paddr + sgloff);
1057 
1058 		sge = (struct amr_sgentry *)((caddr_t)amr->amr_sgls + sgloff);
1059 		for (i = 0; i < nsegs; i++, sge++) {
1060 			sge->sge_addr = htole32(xfer->dm_segs[i].ds_addr);
1061 			sge->sge_count = htole32(xfer->dm_segs[i].ds_len);
1062 		}
1063 	}
1064 
1065 	bus_dmamap_sync(amr->amr_dmat, xfer, 0, ac->ac_xfer_size,
1066 	    out ? BUS_DMASYNC_PREWRITE : BUS_DMASYNC_PREREAD);
1067 
1068 	if ((ac->ac_flags & AC_NOSGL) == 0)
1069 		bus_dmamap_sync(amr->amr_dmat, amr->amr_dmamap, sgloff,
1070 		    AMR_SGL_SIZE, BUS_DMASYNC_PREWRITE);
1071 
1072 	return (0);
1073 }
1074 
1075 /*
1076  * Unmap the specified CCB's data buffer.
1077  */
1078 void
1079 amr_ccb_unmap(struct amr_softc *amr, struct amr_ccb *ac)
1080 {
1081 
1082 	if ((ac->ac_flags & AC_NOSGL) == 0)
1083 		bus_dmamap_sync(amr->amr_dmat, amr->amr_dmamap,
1084 		    AMR_SGL_SIZE * ac->ac_ident, AMR_SGL_SIZE,
1085 		    BUS_DMASYNC_POSTWRITE);
1086 	bus_dmamap_sync(amr->amr_dmat, ac->ac_xfer_map, 0, ac->ac_xfer_size,
1087 	    (ac->ac_flags & AC_XFER_IN) != 0 ?
1088 	    BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1089 	bus_dmamap_unload(amr->amr_dmat, ac->ac_xfer_map);
1090 }
1091 
1092 /*
1093  * Submit a command to the controller and poll on completion.  Return
1094  * non-zero on timeout or error.  Must be called with interrupts blocked.
1095  */
1096 int
1097 amr_ccb_poll(struct amr_softc *amr, struct amr_ccb *ac, int timo)
1098 {
1099 	int rv;
1100 
1101 	if ((rv = (*amr->amr_submit)(amr, ac)) != 0)
1102 		return (rv);
1103 	TAILQ_INSERT_TAIL(&amr->amr_ccb_active, ac, ac_chain.tailq);
1104 
1105 	for (timo *= 10; timo != 0; timo--) {
1106 		amr_intr(amr);
1107 		if ((ac->ac_flags & AC_COMPLETE) != 0)
1108 			break;
1109 		DELAY(100);
1110 	}
1111 
1112 	return (timo == 0 || ac->ac_status != 0 ? EIO : 0);
1113 }
1114 
1115 /*
1116  * Submit a command to the controller and sleep on completion.  Return
1117  * non-zero on error.
1118  */
1119 int
1120 amr_ccb_wait(struct amr_softc *amr, struct amr_ccb *ac)
1121 {
1122 	int s;
1123 
1124 	s = splbio();
1125 	amr_ccb_enqueue(amr, ac);
1126 	tsleep(ac, PRIBIO, "amrcmd", 0);
1127 	splx(s);
1128 
1129 	return (ac->ac_status != 0 ? EIO : 0);
1130 }
1131 
1132 /*
1133  * Wait for the mailbox to become available.
1134  */
1135 int
1136 amr_mbox_wait(struct amr_softc *amr)
1137 {
1138 	int timo;
1139 
1140 	for (timo = 10000; timo != 0; timo--) {
1141 		bus_dmamap_sync(amr->amr_dmat, amr->amr_dmamap, 0,
1142 		    sizeof(struct amr_mailbox), BUS_DMASYNC_POSTREAD);
1143 		if (amr->amr_mbox->mb_cmd.mb_busy == 0)
1144 			break;
1145 		DELAY(100);
1146 	}
1147 
1148 	if (timo == 0)
1149 		printf("%s: controller wedged\n", amr->amr_dv.dv_xname);
1150 
1151 	return (timo != 0 ? 0 : EAGAIN);
1152 }
1153 
1154 /*
1155  * Tell the controller that the mailbox contains a valid command.  Must be
1156  * called with interrupts blocked.
1157  */
1158 int
1159 amr_quartz_submit(struct amr_softc *amr, struct amr_ccb *ac)
1160 {
1161 	u_int32_t v;
1162 
1163 	amr->amr_mbox->mb_poll = 0;
1164 	amr->amr_mbox->mb_ack = 0;
1165 	bus_dmamap_sync(amr->amr_dmat, amr->amr_dmamap, 0,
1166 	    sizeof(struct amr_mailbox), BUS_DMASYNC_PREWRITE);
1167 	bus_dmamap_sync(amr->amr_dmat, amr->amr_dmamap, 0,
1168 	    sizeof(struct amr_mailbox), BUS_DMASYNC_POSTREAD);
1169 	if (amr->amr_mbox->mb_cmd.mb_busy != 0)
1170 		return (EAGAIN);
1171 
1172 	v = amr_inl(amr, AMR_QREG_IDB);
1173 	if ((v & (AMR_QIDB_SUBMIT | AMR_QIDB_ACK)) != 0) {
1174 		amr->amr_mbox->mb_cmd.mb_busy = 0;
1175 		bus_dmamap_sync(amr->amr_dmat, amr->amr_dmamap, 0,
1176 		    sizeof(struct amr_mailbox), BUS_DMASYNC_PREWRITE);
1177 		bus_dmamap_sync(amr->amr_dmat, amr->amr_dmamap, 0,
1178 		    sizeof(struct amr_mailbox), BUS_DMASYNC_PREREAD);
1179 		return (EAGAIN);
1180 	}
1181 
1182 	amr->amr_mbox->mb_segment = 0;
1183 	memcpy(&amr->amr_mbox->mb_cmd, &ac->ac_cmd, sizeof(ac->ac_cmd));
1184 	bus_dmamap_sync(amr->amr_dmat, amr->amr_dmamap, 0,
1185 	    sizeof(struct amr_mailbox), BUS_DMASYNC_PREWRITE);
1186 
1187 	ac->ac_start_time = (time_t)mono_time.tv_sec;
1188 	ac->ac_flags |= AC_ACTIVE;
1189 	amr_outl(amr, AMR_QREG_IDB, amr->amr_mbox_paddr | AMR_QIDB_SUBMIT);
1190 	return (0);
1191 }
1192 
1193 int
1194 amr_std_submit(struct amr_softc *amr, struct amr_ccb *ac)
1195 {
1196 
1197 	amr->amr_mbox->mb_poll = 0;
1198 	amr->amr_mbox->mb_ack = 0;
1199 	bus_dmamap_sync(amr->amr_dmat, amr->amr_dmamap, 0,
1200 	    sizeof(struct amr_mailbox), BUS_DMASYNC_PREWRITE);
1201 	bus_dmamap_sync(amr->amr_dmat, amr->amr_dmamap, 0,
1202 	    sizeof(struct amr_mailbox), BUS_DMASYNC_POSTREAD);
1203 	if (amr->amr_mbox->mb_cmd.mb_busy != 0)
1204 		return (EAGAIN);
1205 
1206 	if ((amr_inb(amr, AMR_SREG_MBOX_BUSY) & AMR_SMBOX_BUSY_FLAG) != 0) {
1207 		amr->amr_mbox->mb_cmd.mb_busy = 0;
1208 		bus_dmamap_sync(amr->amr_dmat, amr->amr_dmamap, 0,
1209 		    sizeof(struct amr_mailbox), BUS_DMASYNC_PREWRITE);
1210 		bus_dmamap_sync(amr->amr_dmat, amr->amr_dmamap, 0,
1211 		    sizeof(struct amr_mailbox), BUS_DMASYNC_PREREAD);
1212 		return (EAGAIN);
1213 	}
1214 
1215 	amr->amr_mbox->mb_segment = 0;
1216 	memcpy(&amr->amr_mbox->mb_cmd, &ac->ac_cmd, sizeof(ac->ac_cmd));
1217 	bus_dmamap_sync(amr->amr_dmat, amr->amr_dmamap, 0,
1218 	    sizeof(struct amr_mailbox), BUS_DMASYNC_PREWRITE);
1219 
1220 	ac->ac_start_time = (time_t)mono_time.tv_sec;
1221 	ac->ac_flags |= AC_ACTIVE;
1222 	amr_outb(amr, AMR_SREG_CMD, AMR_SCMD_POST);
1223 	return (0);
1224 }
1225 
1226 /*
1227  * Claim any work that the controller has completed; acknowledge completion,
1228  * save details of the completion in (mbsave).  Must be called with
1229  * interrupts blocked.
1230  */
1231 int
1232 amr_quartz_get_work(struct amr_softc *amr, struct amr_mailbox_resp *mbsave)
1233 {
1234 
1235 	/* Work waiting for us? */
1236 	if (amr_inl(amr, AMR_QREG_ODB) != AMR_QODB_READY)
1237 		return (-1);
1238 
1239 	bus_dmamap_sync(amr->amr_dmat, amr->amr_dmamap, 0,
1240 	    sizeof(struct amr_mailbox), BUS_DMASYNC_POSTREAD);
1241 
1242 	/* Save the mailbox, which contains a list of completed commands. */
1243 	memcpy(mbsave, &amr->amr_mbox->mb_resp, sizeof(*mbsave));
1244 
1245 	bus_dmamap_sync(amr->amr_dmat, amr->amr_dmamap, 0,
1246 	    sizeof(struct amr_mailbox), BUS_DMASYNC_PREREAD);
1247 
1248 	/* Ack the interrupt and mailbox transfer. */
1249 	amr_outl(amr, AMR_QREG_ODB, AMR_QODB_READY);
1250 	amr_outl(amr, AMR_QREG_IDB, (amr->amr_mbox_paddr+16) | AMR_QIDB_ACK);
1251 
1252 	/*
1253 	 * This waits for the controller to notice that we've taken the
1254 	 * command from it.  It's very inefficient, and we shouldn't do it,
1255 	 * but if we remove this code, we stop completing commands under
1256 	 * load.
1257 	 *
1258 	 * Peter J says we shouldn't do this.  The documentation says we
1259 	 * should.  Who is right?
1260 	 */
1261 	while ((amr_inl(amr, AMR_QREG_IDB) & AMR_QIDB_ACK) != 0)
1262 		;
1263 
1264 	return (0);
1265 }
1266 
1267 int
1268 amr_std_get_work(struct amr_softc *amr, struct amr_mailbox_resp *mbsave)
1269 {
1270 	u_int8_t istat;
1271 
1272 	/* Check for valid interrupt status. */
1273 	if (((istat = amr_inb(amr, AMR_SREG_INTR)) & AMR_SINTR_VALID) == 0)
1274 		return (-1);
1275 
1276 	/* Ack the interrupt. */
1277 	amr_outb(amr, AMR_SREG_INTR, istat);
1278 
1279 	bus_dmamap_sync(amr->amr_dmat, amr->amr_dmamap, 0,
1280 	    sizeof(struct amr_mailbox), BUS_DMASYNC_POSTREAD);
1281 
1282 	/* Save mailbox, which contains a list of completed commands. */
1283 	memcpy(mbsave, &amr->amr_mbox->mb_resp, sizeof(*mbsave));
1284 
1285 	bus_dmamap_sync(amr->amr_dmat, amr->amr_dmamap, 0,
1286 	    sizeof(struct amr_mailbox), BUS_DMASYNC_PREREAD);
1287 
1288 	/* Ack mailbox transfer. */
1289 	amr_outb(amr, AMR_SREG_CMD, AMR_SCMD_ACKINTR);
1290 
1291 	return (0);
1292 }
1293 
1294 void
1295 amr_ccb_dump(struct amr_softc *amr, struct amr_ccb *ac)
1296 {
1297 	int i;
1298 
1299 	printf("%s: ", amr->amr_dv.dv_xname);
1300 	for (i = 0; i < 4; i++)
1301 		printf("%08x ", ((u_int32_t *)&ac->ac_cmd)[i]);
1302 	printf("\n");
1303 }
1304