xref: /netbsd-src/sys/arch/arm/xscale/ixp425_qmgr.c (revision bdc22b2e01993381dcefeff2bc9b56ca75a4235c)
1 /*	$NetBSD: ixp425_qmgr.c,v 1.7 2011/07/01 20:32:51 dyoung Exp $	*/
2 
3 /*-
4  * Copyright (c) 2006 Sam Leffler, Errno Consulting
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer,
12  *    without modification.
13  * 2. Redistributions in binary form must reproduce at minimum a disclaimer
14  *    similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
15  *    redistribution must be conditioned upon including a substantially
16  *    similar Disclaimer requirement for further binary redistribution.
17  *
18  * NO WARRANTY
19  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21  * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
22  * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
23  * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
24  * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
27  * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
29  * THE POSSIBILITY OF SUCH DAMAGES.
30  */
31 
32 /*-
33  * Copyright (c) 2001-2005, Intel Corporation.
34  * All rights reserved.
35  *
36  * Redistribution and use in source and binary forms, with or without
37  * modification, are permitted provided that the following conditions
38  * are met:
39  * 1. Redistributions of source code must retain the above copyright
40  *    notice, this list of conditions and the following disclaimer.
41  * 2. Redistributions in binary form must reproduce the above copyright
42  *    notice, this list of conditions and the following disclaimer in the
43  *    documentation and/or other materials provided with the distribution.
44  * 3. Neither the name of the Intel Corporation nor the names of its contributors
45  *    may be used to endorse or promote products derived from this software
46  *    without specific prior written permission.
47  *
48  *
49  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS''
50  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
51  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
52  * ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
53  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
54  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
55  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
56  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
57  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
58  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
59  * SUCH DAMAGE.
60 */
61 #include <sys/cdefs.h>
62 /*__FBSDID("$FreeBSD: src/sys/arm/xscale/ixp425/ixp425_qmgr.c,v 1.1 2006/11/19 23:55:23 sam Exp $");*/
63 __KERNEL_RCSID(0, "$NetBSD: ixp425_qmgr.c,v 1.7 2011/07/01 20:32:51 dyoung Exp $");
64 
65 /*
66  * Intel XScale Queue Manager support.
67  *
68  * Each IXP4XXX device has a hardware block that implements a priority
69  * queue manager that is shared between the XScale cpu and the backend
70  * devices (such as the NPE).  Queues are accessed by reading/writing
71  * special memory locations.  The queue contents are mapped into a shared
72  * SRAM region with entries managed in a circular buffer.  The XScale
73  * processor can receive interrupts based on queue contents (a condition
74  * code determines when interrupts should be delivered).
75  *
76  * The code here basically replaces the qmgr class in the Intel Access
77  * Library (IAL).
78  */
79 #include <sys/param.h>
80 #include <sys/systm.h>
81 #include <sys/kernel.h>
82 #include <sys/time.h>
83 #include <sys/malloc.h>
84 #include <sys/resource.h>
85 
86 #include <sys/bus.h>
87 #include <machine/cpu.h>
88 #include <machine/intr.h>
89 
90 #include <arm/xscale/ixp425reg.h>
91 #include <arm/xscale/ixp425var.h>
92 
93 #include <arm/xscale/ixp425_qmgr.h>
94 
95 /*
96  * State per AQM hw queue.
97  * This structure holds q configuration and dispatch state.
98  */
99 struct qmgrInfo {
100 	int		qSizeInWords;		/* queue size in words */
101 
102 	uint32_t	qOflowStatBitMask;	/* overflow status mask */
103 	int		qWriteCount;		/* queue write count */
104 
105 	bus_size_t	qAccRegAddr;		/* access register */
106 	bus_size_t	qUOStatRegAddr;		/* status register */
107 	bus_size_t	qConfigRegAddr;		/* config register */
108 	int		qSizeInEntries;		/* queue size in entries */
109 
110 	uint32_t	qUflowStatBitMask;	/* underflow status mask */
111 	int		qReadCount;		/* queue read count */
112 
113 	/* XXX union */
114 	uint32_t	qStatRegAddr;
115 	uint32_t	qStatBitsOffset;
116 	uint32_t	qStat0BitMask;
117 	uint32_t	qStat1BitMask;
118 
119 	uint32_t	intRegCheckMask;	/* interrupt reg check mask */
120 	void		(*cb)(int, void *);	/* callback function */
121 	void		*cbarg;			/* callback argument */
122 	int 		priority;		/* dispatch priority */
123 #if 0
124 	/* NB: needed only for A0 parts */
125 	u_int		statusWordOffset;	/* status word offset */
126 	uint32_t	statusMask;             /* status mask */
127 	uint32_t	statusCheckValue;	/* status check value */
128 #endif
129 };
130 
131 struct ixpqmgr_softc {
132 #ifdef __FreeBSD__
133 	device_t		sc_dev;
134 	bus_space_tag_t		sc_iot;
135 	bus_space_handle_t	sc_ioh;
136 	struct resource		*sc_irq;	/* IRQ resource */
137 	int			sc_rid;		/* resource id for irq */
138 	void			*sc_ih;		/* interrupt handler */
139 #else
140 	bus_space_tag_t		sc_iot;
141 	bus_space_handle_t	sc_ioh;
142 	void			*sc_ih[2];	/* interrupt handler */
143 #endif
144 
145 	struct qmgrInfo		qinfo[IX_QMGR_MAX_NUM_QUEUES];
146 	/*
147 	 * This array contains a list of queue identifiers ordered by
148 	 * priority. The table is split logically between queue
149 	 * identifiers 0-31 and 32-63.  To optimize lookups bit masks
150 	 * are kept for the first-32 and last-32 q's.  When the
151 	 * table needs to be rebuilt mark rebuildTable and it'll
152 	 * happen after the next interrupt.
153 	 */
154 	int			priorityTable[IX_QMGR_MAX_NUM_QUEUES];
155 	uint32_t		lowPriorityTableFirstHalfMask;
156 	uint32_t		uppPriorityTableFirstHalfMask;
157 	int			rebuildTable;	/* rebuild priorityTable */
158 
159 	uint32_t		aqmFreeSramAddress;	/* SRAM free space */
160 };
161 
162 static int qmgr_debug = 0;
163 #define	DPRINTF(dev, fmt, ...) do {					\
164 	if (qmgr_debug) printf(fmt, __VA_ARGS__);			\
165 } while (0)
166 #define	DPRINTFn(n, dev, fmt, ...) do {					\
167 	if (qmgr_debug >= n) printf(fmt, __VA_ARGS__);			\
168 } while (0)
169 
170 static struct ixpqmgr_softc *ixpqmgr_sc = NULL;
171 
172 static void ixpqmgr_rebuild(struct ixpqmgr_softc *);
173 static int ixpqmgr_intr(void *);
174 
175 static void aqm_int_enable(struct ixpqmgr_softc *sc, int qId);
176 static void aqm_int_disable(struct ixpqmgr_softc *sc, int qId);
177 static void aqm_qcfg(struct ixpqmgr_softc *sc, int qId, u_int ne, u_int nf);
178 static void aqm_srcsel_write(struct ixpqmgr_softc *sc, int qId, int sourceId);
179 static void aqm_reset(struct ixpqmgr_softc *sc);
180 
181 static void
182 dummyCallback(int qId, void *arg)
183 {
184 	/* XXX complain */
185 }
186 
187 static uint32_t
188 aqm_reg_read(struct ixpqmgr_softc *sc, bus_size_t off)
189 {
190 	DPRINTFn(9, sc->sc_dev, "%s(0x%x)\n", __func__, (int)off);
191 	return bus_space_read_4(sc->sc_iot, sc->sc_ioh, off);
192 }
193 
194 static void
195 aqm_reg_write(struct ixpqmgr_softc *sc, bus_size_t off, uint32_t val)
196 {
197 	DPRINTFn(9, sc->sc_dev, "%s(0x%x, 0x%x)\n", __func__, (int)off, val);
198 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, off, val);
199 }
200 
201 #ifdef __FreeBSD__
202 static int
203 ixpqmgr_probe(device_t dev)
204 {
205 	device_set_desc(dev, "IXP425 Q-Manager");
206 	return 0;
207 }
208 #endif
209 
210 #ifdef __FreeBSD__
211 static void
212 ixpqmgr_attach(device_t dev)
213 #else
214 void *
215 ixpqmgr_init(bus_space_tag_t iot)
216 #endif
217 {
218 #ifdef __FreeBSD__
219 	struct ixpqmgr_softc *sc = device_get_softc(dev);
220 	struct ixp425_softc *sa = device_get_softc(device_get_parent(dev));
221 #else
222 	struct ixpqmgr_softc *sc;
223 #endif
224 	int i;
225 
226 #ifdef __FreeBSD__
227 	ixpqmgr_sc = sc;
228 
229 	sc->sc_dev = dev;
230 	sc->sc_iot = sa->sc_iot;
231 #else
232 	sc = malloc(sizeof(*sc), M_DEVBUF, M_NOWAIT | M_ZERO);
233 	if (sc == NULL)
234 		return (NULL);
235 
236 	sc->sc_iot = iot;
237 #endif
238 
239 	if (bus_space_map(sc->sc_iot, IXP425_QMGR_HWBASE, IXP425_QMGR_SIZE,
240 	    0, &sc->sc_ioh))
241 		panic("%s: Cannot map registers", __func__);
242 
243 #ifdef __FreeBSD__
244 	/* NB: we only use the lower 32 q's */
245 	sc->sc_irq = bus_alloc_resource(dev, SYS_RES_IRQ, &sc->sc_rid,
246 	    IXP425_INT_QUE1_32, IXP425_INT_QUE33_64, 2, RF_ACTIVE);
247 	if (!sc->sc_irq)
248 		panic("Unable to allocate the qmgr irqs.\n");
249 	/* XXX could be a source of entropy */
250 	bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_NET | INTR_MPSAFE,
251 		ixpqmgr_intr, NULL, &sc->sc_ih);
252 #else
253 	ixpqmgr_sc = sc;
254 	sc->sc_ih[0] = ixp425_intr_establish(IXP425_INT_QUE1_32, IPL_NET,
255 	    ixpqmgr_intr, sc);
256 	if (sc->sc_ih[0] == NULL) {
257 		ixpqmgr_sc = NULL;
258 		free(sc, M_DEVBUF);
259 		return (NULL);
260 	}
261 	sc->sc_ih[1] = ixp425_intr_establish(IXP425_INT_QUE33_64, IPL_NET,
262 	    ixpqmgr_intr, sc);
263 	if (sc->sc_ih[1] == NULL) {
264 		ixp425_intr_disestablish(sc->sc_ih[0]);
265 		ixpqmgr_sc = NULL;
266 		free(sc, M_DEVBUF);
267 		return (NULL);
268 	}
269 #endif
270 
271 	/* NB: softc is pre-zero'd */
272 	for (i = 0; i < IX_QMGR_MAX_NUM_QUEUES; i++) {
273 	    struct qmgrInfo *qi = &sc->qinfo[i];
274 
275 	    qi->cb = dummyCallback;
276 	    qi->priority = IX_QMGR_Q_PRIORITY_0;	/* default priority */
277 	    /*
278 	     * There are two interrupt registers, 32 bits each. One
279 	     * for the lower queues(0-31) and one for the upper
280 	     * queues(32-63). Therefore need to mod by 32 i.e the
281 	     * min upper queue identifier.
282 	     */
283 	    qi->intRegCheckMask = (1<<(i%(IX_QMGR_MIN_QUEUPP_QID)));
284 
285 	    /*
286 	     * Register addresses and bit masks are calculated and
287 	     * stored here to optimize QRead, QWrite and QStatusGet
288 	     * functions.
289 	     */
290 
291 	    /* AQM Queue access reg addresses, per queue */
292 	    qi->qAccRegAddr = IX_QMGR_Q_ACCESS_ADDR_GET(i);
293 	    qi->qAccRegAddr = IX_QMGR_Q_ACCESS_ADDR_GET(i);
294 	    qi->qConfigRegAddr = IX_QMGR_Q_CONFIG_ADDR_GET(i);
295 
296 	    /* AQM Queue lower-group (0-31), only */
297 	    if (i < IX_QMGR_MIN_QUEUPP_QID) {
298 		/* AQM Q underflow/overflow status reg address, per queue */
299 		qi->qUOStatRegAddr = IX_QMGR_QUEUOSTAT0_OFFSET +
300 		    ((i / IX_QMGR_QUEUOSTAT_NUM_QUE_PER_WORD) *
301 		     sizeof(uint32_t));
302 
303 		/* AQM Q underflow status bit masks for status reg per queue */
304 		qi->qUflowStatBitMask =
305 		    (IX_QMGR_UNDERFLOW_BIT_OFFSET + 1) <<
306 		    ((i & (IX_QMGR_QUEUOSTAT_NUM_QUE_PER_WORD - 1)) *
307 		     (32 / IX_QMGR_QUEUOSTAT_NUM_QUE_PER_WORD));
308 
309 		/* AQM Q overflow status bit masks for status reg, per queue */
310 		qi->qOflowStatBitMask =
311 		    (IX_QMGR_OVERFLOW_BIT_OFFSET + 1) <<
312 		    ((i & (IX_QMGR_QUEUOSTAT_NUM_QUE_PER_WORD - 1)) *
313 		     (32 / IX_QMGR_QUEUOSTAT_NUM_QUE_PER_WORD));
314 
315 		/* AQM Q lower-group (0-31) status reg addresses, per queue */
316 		qi->qStatRegAddr = IX_QMGR_QUELOWSTAT0_OFFSET +
317 		    ((i / IX_QMGR_QUELOWSTAT_NUM_QUE_PER_WORD) *
318 		     sizeof(uint32_t));
319 
320 		/* AQM Q lower-group (0-31) status register bit offset */
321 		qi->qStatBitsOffset =
322 		    (i & (IX_QMGR_QUELOWSTAT_NUM_QUE_PER_WORD - 1)) *
323 		    (32 / IX_QMGR_QUELOWSTAT_NUM_QUE_PER_WORD);
324 	    } else { /* AQM Q upper-group (32-63), only */
325 		qi->qUOStatRegAddr = 0;		/* XXX */
326 
327 		/* AQM Q upper-group (32-63) Nearly Empty status reg bitmasks */
328 		qi->qStat0BitMask = (1 << (i - IX_QMGR_MIN_QUEUPP_QID));
329 
330 		/* AQM Q upper-group (32-63) Full status register bitmasks */
331 		qi->qStat1BitMask = (1 << (i - IX_QMGR_MIN_QUEUPP_QID));
332 	    }
333 	}
334 
335 	sc->aqmFreeSramAddress = 0x100;	/* Q buffer space starts at 0x2100 */
336 
337 	ixpqmgr_rebuild(sc);		/* build inital priority table */
338 	aqm_reset(sc);			/* reset h/w */
339 
340 	return (sc);
341 }
342 
343 #ifdef __FreeBSD__
344 static void
345 ixpqmgr_detach(device_t dev)
346 {
347 	struct ixpqmgr_softc *sc = device_get_softc(dev);
348 
349 	aqm_reset(sc);		/* disable interrupts */
350 	bus_teardown_intr(dev, sc->sc_irq, sc->sc_ih);
351 	bus_release_resource(dev, SYS_RES_IRQ, sc->sc_rid, sc->sc_irq);
352 	bus_space_unmap(sc->sc_iot, sc->sc_ioh, IXP425_QMGR_SIZE);
353 }
354 #endif
355 
356 int
357 ixpqmgr_qconfig(int qId, int qEntries, int ne, int nf, int srcSel,
358     void (*cb)(int, void *), void *cbarg)
359 {
360 	struct ixpqmgr_softc *sc = ixpqmgr_sc;
361 	struct qmgrInfo *qi = &sc->qinfo[qId];
362 
363 	DPRINTF(sc->sc_dev, "%s(%u, %u, %u, %u, %u, %p, %p)\n",
364 	    __func__, qId, qEntries, ne, nf, srcSel, cb, cbarg);
365 
366 	/* NB: entry size is always 1 */
367 	qi->qSizeInWords = qEntries;
368 
369 	qi->qReadCount = 0;
370 	qi->qWriteCount = 0;
371 	qi->qSizeInEntries = qEntries;	/* XXX kept for code clarity */
372 
373 	if (cb == NULL) {
374 	    /* Reset to dummy callback */
375 	    qi->cb = dummyCallback;
376 	    qi->cbarg = 0;
377 	} else {
378 	    qi->cb = cb;
379 	    qi->cbarg = cbarg;
380 	}
381 
382 	/* Write the config register; NB must be AFTER qinfo setup */
383 	aqm_qcfg(sc, qId, ne, nf);
384 	/*
385 	 * Account for space just allocated to queue.
386 	 */
387 	sc->aqmFreeSramAddress += (qi->qSizeInWords * sizeof(uint32_t));
388 
389 	/* Set the interrupt source if this queue is in the range 0-31 */
390 	if (qId < IX_QMGR_MIN_QUEUPP_QID)
391 	    aqm_srcsel_write(sc, qId, srcSel);
392 
393 	if (cb != NULL)				/* Enable the interrupt */
394 	    aqm_int_enable(sc, qId);
395 
396 	sc->rebuildTable = true;
397 
398 	return 0;		/* XXX */
399 }
400 
401 int
402 ixpqmgr_qwrite(int qId, uint32_t entry)
403 {
404 	struct ixpqmgr_softc *sc = ixpqmgr_sc;
405 	struct qmgrInfo *qi = &sc->qinfo[qId];
406 
407 	DPRINTFn(3, sc->sc_dev, "%s(%u, 0x%x) writeCount %u size %u\n",
408 	    __func__, qId, entry, qi->qWriteCount, qi->qSizeInEntries);
409 
410 	/* write the entry */
411 	aqm_reg_write(sc, qi->qAccRegAddr, entry);
412 
413 	/* NB: overflow is available for lower queues only */
414 	if (qId < IX_QMGR_MIN_QUEUPP_QID) {
415 	    int qSize = qi->qSizeInEntries;
416 	    /*
417 	     * Increment the current number of entries in the queue
418 	     * and check for overflow .
419 	     */
420 	    if (qi->qWriteCount++ == qSize) {	/* check for overflow */
421 		uint32_t status = aqm_reg_read(sc, qi->qUOStatRegAddr);
422 		int qPtrs;
423 
424 		/*
425 		 * Read the status twice because the status may
426 		 * not be immediately ready after the write operation
427 		 */
428 		if ((status & qi->qOflowStatBitMask) ||
429 		    ((status = aqm_reg_read(sc, qi->qUOStatRegAddr)) & qi->qOflowStatBitMask)) {
430 		    /*
431 		     * The queue is full, clear the overflow status bit if set.
432 		     */
433 		    aqm_reg_write(sc, qi->qUOStatRegAddr,
434 			status & ~qi->qOflowStatBitMask);
435 		    qi->qWriteCount = qSize;
436 		    DPRINTFn(5, sc->sc_dev,
437 			"%s(%u, 0x%x) Q full, overflow status cleared\n",
438 			__func__, qId, entry);
439 		    return ENOSPC;
440 		}
441 		/*
442 		 * No overflow occured : someone is draining the queue
443 		 * and the current counter needs to be
444 		 * updated from the current number of entries in the queue
445 		 */
446 
447 		/* calculate number of words in q */
448 		qPtrs = aqm_reg_read(sc, qi->qConfigRegAddr);
449 		DPRINTFn(2, sc->sc_dev,
450 		    "%s(%u, 0x%x) Q full, no overflow status, qConfig 0x%x\n",
451 		    __func__, qId, entry, qPtrs);
452 		qPtrs = (qPtrs - (qPtrs >> 7)) & 0x7f;
453 
454 		if (qPtrs == 0) {
455 		    /*
456 		     * The queue may be full at the time of the
457 		     * snapshot. Next access will check
458 		     * the overflow status again.
459 		     */
460 		    qi->qWriteCount = qSize;
461 		} else {
462 		    /* convert the number of words to a number of entries */
463 		    qi->qWriteCount = qPtrs & (qSize - 1);
464 		}
465 	    }
466 	}
467 	return 0;
468 }
469 
470 int
471 ixpqmgr_qread(int qId, uint32_t *entry)
472 {
473 	struct ixpqmgr_softc *sc = ixpqmgr_sc;
474 	struct qmgrInfo *qi = &sc->qinfo[qId];
475 	bus_size_t off = qi->qAccRegAddr;
476 
477 	*entry = aqm_reg_read(sc, off);
478 
479 	/*
480 	 * Reset the current read count : next access to the read function
481 	 * will force a underflow status check.
482 	 */
483 	qi->qReadCount = 0;
484 
485 	/* Check if underflow occurred on the read */
486 	if (*entry == 0 && qId < IX_QMGR_MIN_QUEUPP_QID) {
487 	    /* get the queue status */
488 	    uint32_t status = aqm_reg_read(sc, qi->qUOStatRegAddr);
489 
490 	    if (status & qi->qUflowStatBitMask) { /* clear underflow status */
491 		aqm_reg_write(sc, qi->qUOStatRegAddr,
492 		    status &~ qi->qUflowStatBitMask);
493 		return ENOSPC;
494 	    }
495 	}
496 	return 0;
497 }
498 
499 int
500 ixpqmgr_qreadm(int qId, uint32_t n, uint32_t *p)
501 {
502 	struct ixpqmgr_softc *sc = ixpqmgr_sc;
503 	struct qmgrInfo *qi = &sc->qinfo[qId];
504 	uint32_t entry;
505 	bus_size_t off = qi->qAccRegAddr;
506 
507 	entry = aqm_reg_read(sc, off);
508 	while (--n) {
509 	    if (entry == 0) {
510 		/* if we read a NULL entry, stop. We have underflowed */
511 		break;
512 	    }
513 	    *p++ = entry;	/* store */
514 	    entry = aqm_reg_read(sc, off);
515 	}
516 	*p = entry;
517 
518 	/*
519 	 * Reset the current read count : next access to the read function
520 	 * will force a underflow status check.
521 	 */
522 	qi->qReadCount = 0;
523 
524 	/* Check if underflow occurred on the read */
525 	if (entry == 0 && qId < IX_QMGR_MIN_QUEUPP_QID) {
526 	    /* get the queue status */
527 	    uint32_t status = aqm_reg_read(sc, qi->qUOStatRegAddr);
528 
529 	    if (status & qi->qUflowStatBitMask) { /* clear underflow status */
530 		aqm_reg_write(sc, qi->qUOStatRegAddr,
531 		    status &~ qi->qUflowStatBitMask);
532 		return ENOSPC;
533 	    }
534 	}
535 	return 0;
536 }
537 
538 uint32_t
539 ixpqmgr_getqstatus(int qId)
540 {
541 #define	QLOWSTATMASK \
542     ((1 << (32 / IX_QMGR_QUELOWSTAT_NUM_QUE_PER_WORD)) - 1)
543 	struct ixpqmgr_softc *sc = ixpqmgr_sc;
544 	const struct qmgrInfo *qi = &sc->qinfo[qId];
545 	uint32_t status;
546 
547 	if (qId < IX_QMGR_MIN_QUEUPP_QID) {
548 	    /* read the status of a queue in the range 0-31 */
549 	    status = aqm_reg_read(sc, qi->qStatRegAddr);
550 
551 	    /* mask out the status bits relevant only to this queue */
552 	    status = (status >> qi->qStatBitsOffset) & QLOWSTATMASK;
553 	} else { /* read status of a queue in the range 32-63 */
554 	    status = 0;
555 	    if (aqm_reg_read(sc, IX_QMGR_QUEUPPSTAT0_OFFSET)&qi->qStat0BitMask)
556 		status |= IX_QMGR_Q_STATUS_NE_BIT_MASK;	/* nearly empty */
557 	    if (aqm_reg_read(sc, IX_QMGR_QUEUPPSTAT1_OFFSET)&qi->qStat1BitMask)
558 		status |= IX_QMGR_Q_STATUS_F_BIT_MASK;	/* full */
559 	}
560 	return status;
561 #undef QLOWSTATMASK
562 }
563 
564 uint32_t
565 ixpqmgr_getqconfig(int qId)
566 {
567 	struct ixpqmgr_softc *sc = ixpqmgr_sc;
568 
569 	return aqm_reg_read(sc, IX_QMGR_Q_CONFIG_ADDR_GET(qId));
570 }
571 
572 void
573 ixpqmgr_dump(void)
574 {
575 	struct ixpqmgr_softc *sc = ixpqmgr_sc;
576 	int i, a;
577 
578 	/* status registers */
579 	printf("0x%04x: %08x %08x %08x %08x\n"
580 		, 0x400
581 		, aqm_reg_read(sc, 0x400)
582 		, aqm_reg_read(sc, 0x400+4)
583 		, aqm_reg_read(sc, 0x400+8)
584 		, aqm_reg_read(sc, 0x400+12)
585 	);
586 	printf("0x%04x: %08x %08x %08x %08x\n"
587 		, 0x410
588 		, aqm_reg_read(sc, 0x410)
589 		, aqm_reg_read(sc, 0x410+4)
590 		, aqm_reg_read(sc, 0x410+8)
591 		, aqm_reg_read(sc, 0x410+12)
592 	);
593 	printf("0x%04x: %08x %08x %08x %08x\n"
594 		, 0x420
595 		, aqm_reg_read(sc, 0x420)
596 		, aqm_reg_read(sc, 0x420+4)
597 		, aqm_reg_read(sc, 0x420+8)
598 		, aqm_reg_read(sc, 0x420+12)
599 	);
600 	printf("0x%04x: %08x %08x %08x %08x\n"
601 		, 0x430
602 		, aqm_reg_read(sc, 0x430)
603 		, aqm_reg_read(sc, 0x430+4)
604 		, aqm_reg_read(sc, 0x430+8)
605 		, aqm_reg_read(sc, 0x430+12)
606 	);
607 	/* q configuration registers */
608 	for (a = 0x2000; a < 0x20ff; a += 32)
609 		printf("0x%04x: %08x %08x %08x %08x %08x %08x %08x %08x\n"
610 			, a
611 			, aqm_reg_read(sc, a)
612 			, aqm_reg_read(sc, a+4)
613 			, aqm_reg_read(sc, a+8)
614 			, aqm_reg_read(sc, a+12)
615 			, aqm_reg_read(sc, a+16)
616 			, aqm_reg_read(sc, a+20)
617 			, aqm_reg_read(sc, a+24)
618 			, aqm_reg_read(sc, a+28)
619 		);
620 	/* allocated SRAM */
621 	for (i = 0x100; i < sc->aqmFreeSramAddress; i += 32) {
622 		a = 0x2000 + i;
623 		printf("0x%04x: %08x %08x %08x %08x %08x %08x %08x %08x\n"
624 			, a
625 			, aqm_reg_read(sc, a)
626 			, aqm_reg_read(sc, a+4)
627 			, aqm_reg_read(sc, a+8)
628 			, aqm_reg_read(sc, a+12)
629 			, aqm_reg_read(sc, a+16)
630 			, aqm_reg_read(sc, a+20)
631 			, aqm_reg_read(sc, a+24)
632 			, aqm_reg_read(sc, a+28)
633 		);
634 	}
635 	for (i = 0; i < 16; i++) {
636 		printf("Q[%2d] config 0x%08x status 0x%02x  "
637 		       "Q[%2d] config 0x%08x status 0x%02x\n"
638 		    , i, ixpqmgr_getqconfig(i), ixpqmgr_getqstatus(i)
639 		    , i+16, ixpqmgr_getqconfig(i+16), ixpqmgr_getqstatus(i+16)
640 		);
641 	}
642 }
643 
644 void
645 ixpqmgr_notify_enable(int qId, int srcSel)
646 {
647 	struct ixpqmgr_softc *sc = ixpqmgr_sc;
648 #if 0
649 	/* Calculate the checkMask and checkValue for this q */
650 	aqm_calc_statuscheck(sc, qId, srcSel);
651 #endif
652 	/* Set the interrupt source if this queue is in the range 0-31 */
653 	if (qId < IX_QMGR_MIN_QUEUPP_QID)
654 	    aqm_srcsel_write(sc, qId, srcSel);
655 
656 	/* Enable the interrupt */
657 	aqm_int_enable(sc, qId);
658 }
659 
660 void
661 ixpqmgr_notify_disable(int qId)
662 {
663 	struct ixpqmgr_softc *sc = ixpqmgr_sc;
664 
665 	aqm_int_disable(sc, qId);
666 }
667 
668 /*
669  * Rebuild the priority table used by the dispatcher.
670  */
671 static void
672 ixpqmgr_rebuild(struct ixpqmgr_softc *sc)
673 {
674 	int q, pri;
675 	int lowQuePriorityTableIndex, uppQuePriorityTableIndex;
676 	struct qmgrInfo *qi;
677 
678 	sc->lowPriorityTableFirstHalfMask = 0;
679 	sc->uppPriorityTableFirstHalfMask = 0;
680 
681 	lowQuePriorityTableIndex = 0;
682 	uppQuePriorityTableIndex = 32;
683 	for (pri = 0; pri < IX_QMGR_NUM_PRIORITY_LEVELS; pri++) {
684 	    /* low priority q's */
685 	    for (q = 0; q < IX_QMGR_MIN_QUEUPP_QID; q++) {
686 		qi = &sc->qinfo[q];
687 		if (qi->priority == pri) {
688 		    /*
689 		     * Build the priority table bitmask which match the
690 		     * queues of the first half of the priority table.
691 		     */
692 		    if (lowQuePriorityTableIndex < 16) {
693 			sc->lowPriorityTableFirstHalfMask |=
694 			    qi->intRegCheckMask;
695 		    }
696 		    sc->priorityTable[lowQuePriorityTableIndex++] = q;
697 		}
698 	    }
699 	    /* high priority q's */
700 	    for (; q < IX_QMGR_MAX_NUM_QUEUES; q++) {
701 		qi = &sc->qinfo[q];
702 		if (qi->priority == pri) {
703 		    /*
704 		     * Build the priority table bitmask which match the
705 		     * queues of the first half of the priority table .
706 		     */
707 		    if (uppQuePriorityTableIndex < 48) {
708 			sc->uppPriorityTableFirstHalfMask |=
709 			    qi->intRegCheckMask;
710 		    }
711 		    sc->priorityTable[uppQuePriorityTableIndex++] = q;
712 		}
713 	    }
714 	}
715 	sc->rebuildTable = false;
716 }
717 
718 /*
719  * Count the number of leading zero bits in a word,
720  * and return the same value than the CLZ instruction.
721  * Note this is similar to the standard ffs function but
722  * it counts zero's from the MSB instead of the LSB.
723  *
724  * word (in)    return value (out)
725  * 0x80000000   0
726  * 0x40000000   1
727  * ,,,          ,,,
728  * 0x00000002   30
729  * 0x00000001   31
730  * 0x00000000   32
731  *
732  * The C version of this function is used as a replacement
733  * for system not providing the equivalent of the CLZ
734  * assembly language instruction.
735  *
736  * Note that this version is big-endian
737  */
738 static unsigned int
739 _lzcount(uint32_t word)
740 {
741 	unsigned int lzcount = 0;
742 
743 	if (word == 0)
744 	    return 32;
745 	while ((word & 0x80000000) == 0) {
746 	    word <<= 1;
747 	    lzcount++;
748 	}
749 	return lzcount;
750 }
751 
752 static int
753 ixpqmgr_intr(void *arg)
754 {
755 	struct ixpqmgr_softc *sc = ixpqmgr_sc;
756 	uint32_t intRegVal;                /* Interrupt reg val */
757 	struct qmgrInfo *qi;
758 	int priorityTableIndex;		/* Priority table index */
759 	int qIndex;			/* Current queue being processed */
760 
761 	/* Read the interrupt register */
762 	intRegVal = aqm_reg_read(sc, IX_QMGR_QINTREG0_OFFSET);
763 	/* Write back to clear interrupt */
764 	aqm_reg_write(sc, IX_QMGR_QINTREG0_OFFSET, intRegVal);
765 
766 	DPRINTFn(5, sc->sc_dev, "%s: ISR0 0x%x ISR1 0x%x\n",
767 	    __func__, intRegVal, aqm_reg_read(sc, IX_QMGR_QINTREG1_OFFSET));
768 
769 	/* No queue has interrupt register set */
770 	if (intRegVal != 0) {
771 		/* get the first queue Id from the interrupt register value */
772 		qIndex = (32 - 1) - _lzcount(intRegVal);
773 
774 		DPRINTFn(2, sc->sc_dev, "%s: ISR0 0x%x qIndex %u\n",
775 		    __func__, intRegVal, qIndex);
776 
777 		/*
778 		 * Optimize for single callback case.
779 		 */
780 		 qi = &sc->qinfo[qIndex];
781 		 if (intRegVal == qi->intRegCheckMask) {
782 		    /*
783 		     * Only 1 queue event triggered a notification.
784 		     * Call the callback function for this queue
785 		     */
786 		    qi->cb(qIndex, qi->cbarg);
787 		 } else {
788 		     /*
789 		      * The event is triggered by more than 1 queue,
790 		      * the queue search will start from the beginning
791 		      * or the middle of the priority table.
792 		      *
793 		      * The search will end when all the bits of the interrupt
794 		      * register are cleared. There is no need to maintain
795 		      * a seperate value and test it at each iteration.
796 		      */
797 		     if (intRegVal & sc->lowPriorityTableFirstHalfMask) {
798 			 priorityTableIndex = 0;
799 		     } else {
800 			 priorityTableIndex = 16;
801 		     }
802 		     /*
803 		      * Iterate over the priority table until all the bits
804 		      * of the interrupt register are cleared.
805 		      */
806 		     do {
807 			 qIndex = sc->priorityTable[priorityTableIndex++];
808 			 if (qIndex >= IX_QMGR_MAX_NUM_QUEUES)
809 			     break;
810 			 qi = &sc->qinfo[qIndex];
811 
812 			 /* If this queue caused this interrupt to be raised */
813 			 if (intRegVal & qi->intRegCheckMask) {
814 			     /* Call the callback function for this queue */
815 			     qi->cb(qIndex, qi->cbarg);
816 			     /* Clear the interrupt register bit */
817 			     intRegVal &= ~qi->intRegCheckMask;
818 			 }
819 		      } while (intRegVal &&
820 		          priorityTableIndex < IX_QMGR_MAX_NUM_QUEUES);
821 		 }
822 	 }
823 
824 	/* Rebuild the priority table if needed */
825 	if (sc->rebuildTable)
826 	    ixpqmgr_rebuild(sc);
827 
828 	return (1);
829 }
830 
831 #if 0
832 /*
833  * Generate the parameters used to check if a Q's status matches
834  * the specified source select.  We calculate which status word
835  * to check (statusWordOffset), the value to check the status
836  * against (statusCheckValue) and the mask (statusMask) to mask
837  * out all but the bits to check in the status word.
838  */
839 static void
840 aqm_calc_statuscheck(int qId, IxQMgrSourceId srcSel)
841 {
842 	struct qmgrInfo *qi = &qinfo[qId];
843 	uint32_t shiftVal;
844 
845 	if (qId < IX_QMGR_MIN_QUEUPP_QID) {
846 	    switch (srcSel) {
847 	    case IX_QMGR_Q_SOURCE_ID_E:
848 		qi->statusCheckValue = IX_QMGR_Q_STATUS_E_BIT_MASK;
849 		qi->statusMask = IX_QMGR_Q_STATUS_E_BIT_MASK;
850 		break;
851 	    case IX_QMGR_Q_SOURCE_ID_NE:
852 		qi->statusCheckValue = IX_QMGR_Q_STATUS_NE_BIT_MASK;
853 		qi->statusMask = IX_QMGR_Q_STATUS_NE_BIT_MASK;
854 		break;
855 	    case IX_QMGR_Q_SOURCE_ID_NF:
856 		qi->statusCheckValue = IX_QMGR_Q_STATUS_NF_BIT_MASK;
857 		qi->statusMask = IX_QMGR_Q_STATUS_NF_BIT_MASK;
858 		break;
859 	    case IX_QMGR_Q_SOURCE_ID_F:
860 		qi->statusCheckValue = IX_QMGR_Q_STATUS_F_BIT_MASK;
861 		qi->statusMask = IX_QMGR_Q_STATUS_F_BIT_MASK;
862 		break;
863 	    case IX_QMGR_Q_SOURCE_ID_NOT_E:
864 		qi->statusCheckValue = 0;
865 		qi->statusMask = IX_QMGR_Q_STATUS_E_BIT_MASK;
866 		break;
867 	    case IX_QMGR_Q_SOURCE_ID_NOT_NE:
868 		qi->statusCheckValue = 0;
869 		qi->statusMask = IX_QMGR_Q_STATUS_NE_BIT_MASK;
870 		break;
871 	    case IX_QMGR_Q_SOURCE_ID_NOT_NF:
872 		qi->statusCheckValue = 0;
873 		qi->statusMask = IX_QMGR_Q_STATUS_NF_BIT_MASK;
874 		break;
875 	    case IX_QMGR_Q_SOURCE_ID_NOT_F:
876 		qi->statusCheckValue = 0;
877 		qi->statusMask = IX_QMGR_Q_STATUS_F_BIT_MASK;
878 		break;
879 	    default:
880 		/* Should never hit */
881 		IX_OSAL_ASSERT(0);
882 		break;
883 	    }
884 
885 	    /* One nibble of status per queue so need to shift the
886 	     * check value and mask out to the correct position.
887 	     */
888 	    shiftVal = (qId % IX_QMGR_QUELOWSTAT_NUM_QUE_PER_WORD) *
889 		IX_QMGR_QUELOWSTAT_BITS_PER_Q;
890 
891 	    /* Calculate the which status word to check from the qId,
892 	     * 8 Qs status per word
893 	     */
894 	    qi->statusWordOffset = qId / IX_QMGR_QUELOWSTAT_NUM_QUE_PER_WORD;
895 
896 	    qi->statusCheckValue <<= shiftVal;
897 	    qi->statusMask <<= shiftVal;
898 	} else {
899 	    /* One status word */
900 	    qi->statusWordOffset = 0;
901 	    /* Single bits per queue and int source bit hardwired  NE,
902 	     * Qs start at 32.
903 	     */
904 	    qi->statusMask = 1 << (qId - IX_QMGR_MIN_QUEUPP_QID);
905 	    qi->statusCheckValue = qi->statusMask;
906 	}
907 }
908 #endif
909 
910 static void
911 aqm_int_enable(struct ixpqmgr_softc *sc, int qId)
912 {
913 	bus_size_t reg;
914 	uint32_t v;
915 
916 	if (qId < IX_QMGR_MIN_QUEUPP_QID)
917 	    reg = IX_QMGR_QUEIEREG0_OFFSET;
918 	else
919 	    reg = IX_QMGR_QUEIEREG1_OFFSET;
920 	v = aqm_reg_read(sc, reg);
921 	aqm_reg_write(sc, reg, v | (1 << (qId % IX_QMGR_MIN_QUEUPP_QID)));
922 
923 	DPRINTF(sc->sc_dev, "%s(%u) 0x%lx: 0x%x => 0x%x\n",
924 	    __func__, qId, reg, v, aqm_reg_read(sc, reg));
925 }
926 
927 static void
928 aqm_int_disable(struct ixpqmgr_softc *sc, int qId)
929 {
930 	bus_size_t reg;
931 	uint32_t v;
932 
933 	if (qId < IX_QMGR_MIN_QUEUPP_QID)
934 	    reg = IX_QMGR_QUEIEREG0_OFFSET;
935 	else
936 	    reg = IX_QMGR_QUEIEREG1_OFFSET;
937 	v = aqm_reg_read(sc, reg);
938 	aqm_reg_write(sc, reg, v &~ (1 << (qId % IX_QMGR_MIN_QUEUPP_QID)));
939 
940 	DPRINTF(sc->sc_dev, "%s(%u) 0x%lx: 0x%x => 0x%x\n",
941 	    __func__, qId, reg, v, aqm_reg_read(sc, reg));
942 }
943 
944 static unsigned
945 log2(unsigned n)
946 {
947 	unsigned count;
948 	/*
949 	 * N.B. this function will return 0 if supplied 0.
950 	 */
951 	for (count = 0; n/2; count++)
952 	    n /= 2;
953 	return count;
954 }
955 
956 static __inline unsigned
957 toAqmEntrySize(int entrySize)
958 {
959 	/* entrySize  1("00"),2("01"),4("10") */
960 	return log2(entrySize);
961 }
962 
963 static __inline unsigned
964 toAqmBufferSize(unsigned bufferSizeInWords)
965 {
966 	/* bufferSize 16("00"),32("01),64("10"),128("11") */
967 	return log2(bufferSizeInWords / IX_QMGR_MIN_BUFFER_SIZE);
968 }
969 
970 static __inline unsigned
971 toAqmWatermark(int watermark)
972 {
973 	/*
974 	 * Watermarks 0("000"),1("001"),2("010"),4("011"),
975 	 * 8("100"),16("101"),32("110"),64("111")
976 	 */
977 	return log2(2 * watermark);
978 }
979 
980 static void
981 aqm_qcfg(struct ixpqmgr_softc *sc, int qId, u_int ne, u_int nf)
982 {
983 	const struct qmgrInfo *qi = &sc->qinfo[qId];
984 	uint32_t qCfg;
985 	uint32_t baseAddress;
986 
987 	/* Build config register */
988 	qCfg = ((toAqmEntrySize(1) & IX_QMGR_ENTRY_SIZE_MASK) <<
989 		    IX_QMGR_Q_CONFIG_ESIZE_OFFSET)
990 	     | ((toAqmBufferSize(qi->qSizeInWords) & IX_QMGR_SIZE_MASK) <<
991 		    IX_QMGR_Q_CONFIG_BSIZE_OFFSET);
992 
993 	/* baseAddress, calculated relative to start address */
994 	baseAddress = sc->aqmFreeSramAddress;
995 
996 	/* base address must be word-aligned */
997 	KASSERT((baseAddress % IX_QMGR_BASE_ADDR_16_WORD_ALIGN) == 0);
998 
999 	/* Now convert to a 16 word pointer as required by QUECONFIG register */
1000 	baseAddress >>= IX_QMGR_BASE_ADDR_16_WORD_SHIFT;
1001 	qCfg |= baseAddress << IX_QMGR_Q_CONFIG_BADDR_OFFSET;
1002 
1003 	/* set watermarks */
1004 	qCfg |= (toAqmWatermark(ne) << IX_QMGR_Q_CONFIG_NE_OFFSET)
1005 	     |  (toAqmWatermark(nf) << IX_QMGR_Q_CONFIG_NF_OFFSET);
1006 
1007 	DPRINTF(sc->sc_dev, "%s(%u, %u, %u) 0x%x => 0x%x @ 0x%x\n",
1008 	    __func__, qId, ne, nf,
1009 	    aqm_reg_read(sc, IX_QMGR_Q_CONFIG_ADDR_GET(qId)),
1010 	    qCfg, (u_int)IX_QMGR_Q_CONFIG_ADDR_GET(qId));
1011 
1012 	aqm_reg_write(sc, IX_QMGR_Q_CONFIG_ADDR_GET(qId), qCfg);
1013 }
1014 
1015 static void
1016 aqm_srcsel_write(struct ixpqmgr_softc *sc, int qId, int sourceId)
1017 {
1018 	bus_size_t off;
1019 	uint32_t v;
1020 
1021 	/*
1022 	 * Calculate the register offset; multiple queues split across registers
1023 	 */
1024 	off = IX_QMGR_INT0SRCSELREG0_OFFSET +
1025 	    ((qId / IX_QMGR_INTSRC_NUM_QUE_PER_WORD) * sizeof(uint32_t));
1026 
1027 	v = aqm_reg_read(sc, off);
1028 	if (off == IX_QMGR_INT0SRCSELREG0_OFFSET && qId == 0) {
1029 	    /* Queue 0 at INT0SRCSELREG should not corrupt the value bit-3  */
1030 	    v |= 0x7;
1031 	} else {
1032 	  const uint32_t bpq = 32 / IX_QMGR_INTSRC_NUM_QUE_PER_WORD;
1033 	  uint32_t mask;
1034 	  int qshift;
1035 
1036 	  qshift = (qId & (IX_QMGR_INTSRC_NUM_QUE_PER_WORD-1)) * bpq;
1037 	  mask = ((1 << bpq) - 1) << qshift;	/* q's status mask */
1038 
1039 	  /* merge sourceId */
1040 	  v = (v &~ mask) | ((sourceId << qshift) & mask);
1041 	}
1042 
1043 	DPRINTF(sc->sc_dev, "%s(%u, %u) 0x%x => 0x%x @ 0x%lx\n",
1044 	    __func__, qId, sourceId, aqm_reg_read(sc, off), v, off);
1045 	aqm_reg_write(sc, off, v);
1046 }
1047 
1048 /*
1049  * Reset AQM registers to default values.
1050  */
1051 static void
1052 aqm_reset(struct ixpqmgr_softc *sc)
1053 {
1054 	int i;
1055 
1056 	/* Reset queues 0..31 status registers 0..3 */
1057 	aqm_reg_write(sc, IX_QMGR_QUELOWSTAT0_OFFSET,
1058 		IX_QMGR_QUELOWSTAT_RESET_VALUE);
1059 	aqm_reg_write(sc, IX_QMGR_QUELOWSTAT1_OFFSET,
1060 		IX_QMGR_QUELOWSTAT_RESET_VALUE);
1061 	aqm_reg_write(sc, IX_QMGR_QUELOWSTAT2_OFFSET,
1062 		IX_QMGR_QUELOWSTAT_RESET_VALUE);
1063 	aqm_reg_write(sc, IX_QMGR_QUELOWSTAT3_OFFSET,
1064 		IX_QMGR_QUELOWSTAT_RESET_VALUE);
1065 
1066 	/* Reset underflow/overflow status registers 0..1 */
1067 	aqm_reg_write(sc, IX_QMGR_QUEUOSTAT0_OFFSET,
1068 		IX_QMGR_QUEUOSTAT_RESET_VALUE);
1069 	aqm_reg_write(sc, IX_QMGR_QUEUOSTAT1_OFFSET,
1070 		IX_QMGR_QUEUOSTAT_RESET_VALUE);
1071 
1072 	/* Reset queues 32..63 nearly empty status registers */
1073 	aqm_reg_write(sc, IX_QMGR_QUEUPPSTAT0_OFFSET,
1074 		IX_QMGR_QUEUPPSTAT0_RESET_VALUE);
1075 
1076 	/* Reset queues 32..63 full status registers */
1077 	aqm_reg_write(sc, IX_QMGR_QUEUPPSTAT1_OFFSET,
1078 		IX_QMGR_QUEUPPSTAT1_RESET_VALUE);
1079 
1080 	/* Reset int0 status flag source select registers 0..3 */
1081 	aqm_reg_write(sc, IX_QMGR_INT0SRCSELREG0_OFFSET,
1082 			     IX_QMGR_INT0SRCSELREG_RESET_VALUE);
1083 	aqm_reg_write(sc, IX_QMGR_INT0SRCSELREG1_OFFSET,
1084 			     IX_QMGR_INT0SRCSELREG_RESET_VALUE);
1085 	aqm_reg_write(sc, IX_QMGR_INT0SRCSELREG2_OFFSET,
1086 			     IX_QMGR_INT0SRCSELREG_RESET_VALUE);
1087 	aqm_reg_write(sc, IX_QMGR_INT0SRCSELREG3_OFFSET,
1088 			     IX_QMGR_INT0SRCSELREG_RESET_VALUE);
1089 
1090 	/* Reset queue interrupt enable register 0..1 */
1091 	aqm_reg_write(sc, IX_QMGR_QUEIEREG0_OFFSET,
1092 		IX_QMGR_QUEIEREG_RESET_VALUE);
1093 	aqm_reg_write(sc, IX_QMGR_QUEIEREG1_OFFSET,
1094 		IX_QMGR_QUEIEREG_RESET_VALUE);
1095 
1096 	/* Reset queue interrupt register 0..1 */
1097 	aqm_reg_write(sc, IX_QMGR_QINTREG0_OFFSET, IX_QMGR_QINTREG_RESET_VALUE);
1098 	aqm_reg_write(sc, IX_QMGR_QINTREG1_OFFSET, IX_QMGR_QINTREG_RESET_VALUE);
1099 
1100 	/* Reset queue configuration words 0..63 */
1101 	for (i = 0; i < IX_QMGR_MAX_NUM_QUEUES; i++)
1102 	    aqm_reg_write(sc, sc->qinfo[i].qConfigRegAddr,
1103 		IX_QMGR_QUECONFIG_RESET_VALUE);
1104 
1105 	/* XXX zero SRAM to simplify debugging */
1106 	for (i = IX_QMGR_QUEBUFFER_SPACE_OFFSET;
1107 	     i < IX_QMGR_AQM_SRAM_SIZE_IN_BYTES; i += sizeof(uint32_t))
1108 	    aqm_reg_write(sc, i, 0);
1109 }
1110 
1111 #ifdef __FreeBSD__
1112 static device_method_t ixpqmgr_methods[] = {
1113 	DEVMETHOD(device_probe,		ixpqmgr_probe),
1114 	DEVMETHOD(device_attach,	ixpqmgr_attach),
1115 	DEVMETHOD(device_detach,	ixpqmgr_detach),
1116 
1117 	{ 0, 0 }
1118 };
1119 
1120 static driver_t ixpqmgr_driver = {
1121 	"ixpqmgr",
1122 	ixpqmgr_methods,
1123 	sizeof(struct ixpqmgr_softc),
1124 };
1125 static devclass_t ixpqmgr_devclass;
1126 
1127 DRIVER_MODULE(ixpqmgr, ixp, ixpqmgr_driver, ixpqmgr_devclass, 0, 0);
1128 #endif
1129