1 /* $NetBSD: ixp425_qmgr.c,v 1.12 2022/09/27 06:12:58 skrll Exp $ */
2
3 /*-
4 * Copyright (c) 2006 Sam Leffler, Errno Consulting
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer,
12 * without modification.
13 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
14 * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
15 * redistribution must be conditioned upon including a substantially
16 * similar Disclaimer requirement for further binary redistribution.
17 *
18 * NO WARRANTY
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21 * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
22 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
23 * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
24 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
27 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
29 * THE POSSIBILITY OF SUCH DAMAGES.
30 */
31
32 /*-
33 * Copyright (c) 2001-2005, Intel Corporation.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 * 1. Redistributions of source code must retain the above copyright
40 * notice, this list of conditions and the following disclaimer.
41 * 2. Redistributions in binary form must reproduce the above copyright
42 * notice, this list of conditions and the following disclaimer in the
43 * documentation and/or other materials provided with the distribution.
44 * 3. Neither the name of the Intel Corporation nor the names of its contributors
45 * may be used to endorse or promote products derived from this software
46 * without specific prior written permission.
47 *
48 *
49 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS''
50 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
51 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
52 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
53 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
54 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
55 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
56 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
57 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
58 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
59 * SUCH DAMAGE.
60 */
61 #include <sys/cdefs.h>
62 /*__FBSDID("$FreeBSD: src/sys/arm/xscale/ixp425/ixp425_qmgr.c,v 1.1 2006/11/19 23:55:23 sam Exp $");*/
63 __KERNEL_RCSID(0, "$NetBSD: ixp425_qmgr.c,v 1.12 2022/09/27 06:12:58 skrll Exp $");
64
65 /*
66 * Intel XScale Queue Manager support.
67 *
68 * Each IXP4XXX device has a hardware block that implements a priority
69 * queue manager that is shared between the XScale cpu and the backend
70 * devices (such as the NPE). Queues are accessed by reading/writing
71 * special memory locations. The queue contents are mapped into a shared
72 * SRAM region with entries managed in a circular buffer. The XScale
73 * processor can receive interrupts based on queue contents (a condition
74 * code determines when interrupts should be delivered).
75 *
76 * The code here basically replaces the qmgr class in the Intel Access
77 * Library (IAL).
78 */
79 #include <sys/param.h>
80 #include <sys/systm.h>
81 #include <sys/kernel.h>
82 #include <sys/time.h>
83 #include <sys/kmem.h>
84 #include <sys/resource.h>
85
86 #include <sys/bus.h>
87 #include <machine/cpu.h>
88 #include <machine/intr.h>
89
90 #include <arm/xscale/ixp425reg.h>
91 #include <arm/xscale/ixp425var.h>
92
93 #include <arm/xscale/ixp425_qmgr.h>
94
95 /*
96 * State per AQM hw queue.
97 * This structure holds q configuration and dispatch state.
98 */
99 struct qmgrInfo {
100 int qSizeInWords; /* queue size in words */
101
102 uint32_t qOflowStatBitMask; /* overflow status mask */
103 int qWriteCount; /* queue write count */
104
105 bus_size_t qAccRegAddr; /* access register */
106 bus_size_t qUOStatRegAddr; /* status register */
107 bus_size_t qConfigRegAddr; /* config register */
108 int qSizeInEntries; /* queue size in entries */
109
110 uint32_t qUflowStatBitMask; /* underflow status mask */
111 int qReadCount; /* queue read count */
112
113 /* XXX union */
114 uint32_t qStatRegAddr;
115 uint32_t qStatBitsOffset;
116 uint32_t qStat0BitMask;
117 uint32_t qStat1BitMask;
118
119 uint32_t intRegCheckMask; /* interrupt reg check mask */
120 void (*cb)(int, void *); /* callback function */
121 void *cbarg; /* callback argument */
122 int priority; /* dispatch priority */
123 #if 0
124 /* NB: needed only for A0 parts */
125 u_int statusWordOffset; /* status word offset */
126 uint32_t statusMask; /* status mask */
127 uint32_t statusCheckValue; /* status check value */
128 #endif
129 };
130
131 struct ixpqmgr_softc {
132 #ifdef __FreeBSD__
133 device_t sc_dev;
134 bus_space_tag_t sc_iot;
135 bus_space_handle_t sc_ioh;
136 struct resource *sc_irq; /* IRQ resource */
137 int sc_rid; /* resource id for irq */
138 void *sc_ih; /* interrupt handler */
139 #else
140 bus_space_tag_t sc_iot;
141 bus_space_handle_t sc_ioh;
142 void *sc_ih[2]; /* interrupt handler */
143 #endif
144
145 struct qmgrInfo qinfo[IX_QMGR_MAX_NUM_QUEUES];
146 /*
147 * This array contains a list of queue identifiers ordered by
148 * priority. The table is split logically between queue
149 * identifiers 0-31 and 32-63. To optimize lookups bit masks
150 * are kept for the first-32 and last-32 q's. When the
151 * table needs to be rebuilt mark rebuildTable and it'll
152 * happen after the next interrupt.
153 */
154 int priorityTable[IX_QMGR_MAX_NUM_QUEUES];
155 uint32_t lowPriorityTableFirstHalfMask;
156 uint32_t uppPriorityTableFirstHalfMask;
157 int rebuildTable; /* rebuild priorityTable */
158
159 uint32_t aqmFreeSramAddress; /* SRAM free space */
160 };
161
162 static int qmgr_debug = 0;
163 #define DPRINTF(dev, fmt, ...) do { \
164 if (qmgr_debug) printf(fmt, __VA_ARGS__); \
165 } while (0)
166 #define DPRINTFn(n, dev, fmt, ...) do { \
167 if (qmgr_debug >= n) printf(fmt, __VA_ARGS__); \
168 } while (0)
169
170 static struct ixpqmgr_softc *ixpqmgr_sc = NULL;
171
172 static void ixpqmgr_rebuild(struct ixpqmgr_softc *);
173 static int ixpqmgr_intr(void *);
174
175 static void aqm_int_enable(struct ixpqmgr_softc *sc, int qId);
176 static void aqm_int_disable(struct ixpqmgr_softc *sc, int qId);
177 static void aqm_qcfg(struct ixpqmgr_softc *sc, int qId, u_int ne, u_int nf);
178 static void aqm_srcsel_write(struct ixpqmgr_softc *sc, int qId, int sourceId);
179 static void aqm_reset(struct ixpqmgr_softc *sc);
180
181 static void
dummyCallback(int qId,void * arg)182 dummyCallback(int qId, void *arg)
183 {
184 /* XXX complain */
185 }
186
187 static uint32_t
aqm_reg_read(struct ixpqmgr_softc * sc,bus_size_t off)188 aqm_reg_read(struct ixpqmgr_softc *sc, bus_size_t off)
189 {
190 DPRINTFn(9, sc->sc_dev, "%s(0x%x)\n", __func__, (int)off);
191 return bus_space_read_4(sc->sc_iot, sc->sc_ioh, off);
192 }
193
194 static void
aqm_reg_write(struct ixpqmgr_softc * sc,bus_size_t off,uint32_t val)195 aqm_reg_write(struct ixpqmgr_softc *sc, bus_size_t off, uint32_t val)
196 {
197 DPRINTFn(9, sc->sc_dev, "%s(0x%x, 0x%x)\n", __func__, (int)off, val);
198 bus_space_write_4(sc->sc_iot, sc->sc_ioh, off, val);
199 }
200
201 #ifdef __FreeBSD__
202 static int
ixpqmgr_probe(device_t dev)203 ixpqmgr_probe(device_t dev)
204 {
205 device_set_desc(dev, "IXP425 Q-Manager");
206 return 0;
207 }
208 #endif
209
210 #ifdef __FreeBSD__
211 static void
ixpqmgr_attach(device_t dev)212 ixpqmgr_attach(device_t dev)
213 #else
214 void *
215 ixpqmgr_init(bus_space_tag_t iot)
216 #endif
217 {
218 #ifdef __FreeBSD__
219 struct ixpqmgr_softc *sc = device_get_softc(dev);
220 struct ixp425_softc *sa = device_get_softc(device_get_parent(dev));
221 #else
222 struct ixpqmgr_softc *sc;
223 #endif
224 int i;
225
226 #ifdef __FreeBSD__
227 ixpqmgr_sc = sc;
228
229 sc->sc_dev = dev;
230 sc->sc_iot = sa->sc_iot;
231 #else
232 sc = kmem_zalloc(sizeof(*sc), KM_SLEEP);
233 sc->sc_iot = iot;
234 #endif
235
236 if (bus_space_map(sc->sc_iot, IXP425_QMGR_HWBASE, IXP425_QMGR_SIZE,
237 0, &sc->sc_ioh))
238 panic("%s: Cannot map registers", __func__);
239
240 #ifdef __FreeBSD__
241 /* NB: we only use the lower 32 q's */
242 sc->sc_irq = bus_alloc_resource(dev, SYS_RES_IRQ, &sc->sc_rid,
243 IXP425_INT_QUE1_32, IXP425_INT_QUE33_64, 2, RF_ACTIVE);
244 if (!sc->sc_irq)
245 panic("Unable to allocate the qmgr irqs.\n");
246 /* XXX could be a source of entropy */
247 bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_NET | INTR_MPSAFE,
248 ixpqmgr_intr, NULL, &sc->sc_ih);
249 #else
250 ixpqmgr_sc = sc;
251 sc->sc_ih[0] = ixp425_intr_establish(IXP425_INT_QUE1_32, IPL_NET,
252 ixpqmgr_intr, sc);
253 if (sc->sc_ih[0] == NULL) {
254 ixpqmgr_sc = NULL;
255 kmem_free(sc, sizeof(*sc));
256 return (NULL);
257 }
258 sc->sc_ih[1] = ixp425_intr_establish(IXP425_INT_QUE33_64, IPL_NET,
259 ixpqmgr_intr, sc);
260 if (sc->sc_ih[1] == NULL) {
261 ixp425_intr_disestablish(sc->sc_ih[0]);
262 ixpqmgr_sc = NULL;
263 kmem_free(sc, sizeof(*sc));
264 return (NULL);
265 }
266 #endif
267
268 /* NB: softc is pre-zero'd */
269 for (i = 0; i < IX_QMGR_MAX_NUM_QUEUES; i++) {
270 struct qmgrInfo *qi = &sc->qinfo[i];
271
272 qi->cb = dummyCallback;
273 qi->priority = IX_QMGR_Q_PRIORITY_0; /* default priority */
274 /*
275 * There are two interrupt registers, 32 bits each. One
276 * for the lower queues(0-31) and one for the upper
277 * queues(32-63). Therefore need to mod by 32 i.e the
278 * min upper queue identifier.
279 */
280 qi->intRegCheckMask = (1<<(i%(IX_QMGR_MIN_QUEUPP_QID)));
281
282 /*
283 * Register addresses and bit masks are calculated and
284 * stored here to optimize QRead, QWrite and QStatusGet
285 * functions.
286 */
287
288 /* AQM Queue access reg addresses, per queue */
289 qi->qAccRegAddr = IX_QMGR_Q_ACCESS_ADDR_GET(i);
290 qi->qAccRegAddr = IX_QMGR_Q_ACCESS_ADDR_GET(i);
291 qi->qConfigRegAddr = IX_QMGR_Q_CONFIG_ADDR_GET(i);
292
293 /* AQM Queue lower-group (0-31), only */
294 if (i < IX_QMGR_MIN_QUEUPP_QID) {
295 /* AQM Q underflow/overflow status reg address, per queue */
296 qi->qUOStatRegAddr = IX_QMGR_QUEUOSTAT0_OFFSET +
297 ((i / IX_QMGR_QUEUOSTAT_NUM_QUE_PER_WORD) *
298 sizeof(uint32_t));
299
300 /* AQM Q underflow status bit masks for status reg per queue */
301 qi->qUflowStatBitMask =
302 (IX_QMGR_UNDERFLOW_BIT_OFFSET + 1) <<
303 ((i & (IX_QMGR_QUEUOSTAT_NUM_QUE_PER_WORD - 1)) *
304 (32 / IX_QMGR_QUEUOSTAT_NUM_QUE_PER_WORD));
305
306 /* AQM Q overflow status bit masks for status reg, per queue */
307 qi->qOflowStatBitMask =
308 (IX_QMGR_OVERFLOW_BIT_OFFSET + 1) <<
309 ((i & (IX_QMGR_QUEUOSTAT_NUM_QUE_PER_WORD - 1)) *
310 (32 / IX_QMGR_QUEUOSTAT_NUM_QUE_PER_WORD));
311
312 /* AQM Q lower-group (0-31) status reg addresses, per queue */
313 qi->qStatRegAddr = IX_QMGR_QUELOWSTAT0_OFFSET +
314 ((i / IX_QMGR_QUELOWSTAT_NUM_QUE_PER_WORD) *
315 sizeof(uint32_t));
316
317 /* AQM Q lower-group (0-31) status register bit offset */
318 qi->qStatBitsOffset =
319 (i & (IX_QMGR_QUELOWSTAT_NUM_QUE_PER_WORD - 1)) *
320 (32 / IX_QMGR_QUELOWSTAT_NUM_QUE_PER_WORD);
321 } else { /* AQM Q upper-group (32-63), only */
322 qi->qUOStatRegAddr = 0; /* XXX */
323
324 /* AQM Q upper-group (32-63) Nearly Empty status reg bitmasks */
325 qi->qStat0BitMask = (1 << (i - IX_QMGR_MIN_QUEUPP_QID));
326
327 /* AQM Q upper-group (32-63) Full status register bitmasks */
328 qi->qStat1BitMask = (1 << (i - IX_QMGR_MIN_QUEUPP_QID));
329 }
330 }
331
332 sc->aqmFreeSramAddress = 0x100; /* Q buffer space starts at 0x2100 */
333
334 ixpqmgr_rebuild(sc); /* build initial priority table */
335 aqm_reset(sc); /* reset h/w */
336
337 return (sc);
338 }
339
340 #ifdef __FreeBSD__
341 static void
ixpqmgr_detach(device_t dev)342 ixpqmgr_detach(device_t dev)
343 {
344 struct ixpqmgr_softc *sc = device_get_softc(dev);
345
346 aqm_reset(sc); /* disable interrupts */
347 bus_teardown_intr(dev, sc->sc_irq, sc->sc_ih);
348 bus_release_resource(dev, SYS_RES_IRQ, sc->sc_rid, sc->sc_irq);
349 bus_space_unmap(sc->sc_iot, sc->sc_ioh, IXP425_QMGR_SIZE);
350 }
351 #endif
352
353 int
ixpqmgr_qconfig(int qId,int qEntries,int ne,int nf,int srcSel,void (* cb)(int,void *),void * cbarg)354 ixpqmgr_qconfig(int qId, int qEntries, int ne, int nf, int srcSel,
355 void (*cb)(int, void *), void *cbarg)
356 {
357 struct ixpqmgr_softc *sc = ixpqmgr_sc;
358 struct qmgrInfo *qi = &sc->qinfo[qId];
359
360 DPRINTF(sc->sc_dev, "%s(%u, %u, %u, %u, %u, %p, %p)\n",
361 __func__, qId, qEntries, ne, nf, srcSel, cb, cbarg);
362
363 /* NB: entry size is always 1 */
364 qi->qSizeInWords = qEntries;
365
366 qi->qReadCount = 0;
367 qi->qWriteCount = 0;
368 qi->qSizeInEntries = qEntries; /* XXX kept for code clarity */
369
370 if (cb == NULL) {
371 /* Reset to dummy callback */
372 qi->cb = dummyCallback;
373 qi->cbarg = 0;
374 } else {
375 qi->cb = cb;
376 qi->cbarg = cbarg;
377 }
378
379 /* Write the config register; NB must be AFTER qinfo setup */
380 aqm_qcfg(sc, qId, ne, nf);
381 /*
382 * Account for space just allocated to queue.
383 */
384 sc->aqmFreeSramAddress += (qi->qSizeInWords * sizeof(uint32_t));
385
386 /* Set the interrupt source if this queue is in the range 0-31 */
387 if (qId < IX_QMGR_MIN_QUEUPP_QID)
388 aqm_srcsel_write(sc, qId, srcSel);
389
390 if (cb != NULL) /* Enable the interrupt */
391 aqm_int_enable(sc, qId);
392
393 sc->rebuildTable = true;
394
395 return 0; /* XXX */
396 }
397
398 int
ixpqmgr_qwrite(int qId,uint32_t entry)399 ixpqmgr_qwrite(int qId, uint32_t entry)
400 {
401 struct ixpqmgr_softc *sc = ixpqmgr_sc;
402 struct qmgrInfo *qi = &sc->qinfo[qId];
403
404 DPRINTFn(3, sc->sc_dev, "%s(%u, 0x%x) writeCount %u size %u\n",
405 __func__, qId, entry, qi->qWriteCount, qi->qSizeInEntries);
406
407 /* write the entry */
408 aqm_reg_write(sc, qi->qAccRegAddr, entry);
409
410 /* NB: overflow is available for lower queues only */
411 if (qId < IX_QMGR_MIN_QUEUPP_QID) {
412 int qSize = qi->qSizeInEntries;
413 /*
414 * Increment the current number of entries in the queue
415 * and check for overflow .
416 */
417 if (qi->qWriteCount++ == qSize) { /* check for overflow */
418 uint32_t status = aqm_reg_read(sc, qi->qUOStatRegAddr);
419 int qPtrs;
420
421 /*
422 * Read the status twice because the status may
423 * not be immediately ready after the write operation
424 */
425 if ((status & qi->qOflowStatBitMask) ||
426 ((status = aqm_reg_read(sc, qi->qUOStatRegAddr)) & qi->qOflowStatBitMask)) {
427 /*
428 * The queue is full, clear the overflow status bit if set.
429 */
430 aqm_reg_write(sc, qi->qUOStatRegAddr,
431 status & ~qi->qOflowStatBitMask);
432 qi->qWriteCount = qSize;
433 DPRINTFn(5, sc->sc_dev,
434 "%s(%u, 0x%x) Q full, overflow status cleared\n",
435 __func__, qId, entry);
436 return ENOSPC;
437 }
438 /*
439 * No overflow occurred : someone is draining the queue
440 * and the current counter needs to be
441 * updated from the current number of entries in the queue
442 */
443
444 /* calculate number of words in q */
445 qPtrs = aqm_reg_read(sc, qi->qConfigRegAddr);
446 DPRINTFn(2, sc->sc_dev,
447 "%s(%u, 0x%x) Q full, no overflow status, qConfig 0x%x\n",
448 __func__, qId, entry, qPtrs);
449 qPtrs = (qPtrs - (qPtrs >> 7)) & 0x7f;
450
451 if (qPtrs == 0) {
452 /*
453 * The queue may be full at the time of the
454 * snapshot. Next access will check
455 * the overflow status again.
456 */
457 qi->qWriteCount = qSize;
458 } else {
459 /* convert the number of words to a number of entries */
460 qi->qWriteCount = qPtrs & (qSize - 1);
461 }
462 }
463 }
464 return 0;
465 }
466
467 int
ixpqmgr_qread(int qId,uint32_t * entry)468 ixpqmgr_qread(int qId, uint32_t *entry)
469 {
470 struct ixpqmgr_softc *sc = ixpqmgr_sc;
471 struct qmgrInfo *qi = &sc->qinfo[qId];
472 bus_size_t off = qi->qAccRegAddr;
473
474 *entry = aqm_reg_read(sc, off);
475
476 /*
477 * Reset the current read count : next access to the read function
478 * will force a underflow status check.
479 */
480 qi->qReadCount = 0;
481
482 /* Check if underflow occurred on the read */
483 if (*entry == 0 && qId < IX_QMGR_MIN_QUEUPP_QID) {
484 /* get the queue status */
485 uint32_t status = aqm_reg_read(sc, qi->qUOStatRegAddr);
486
487 if (status & qi->qUflowStatBitMask) { /* clear underflow status */
488 aqm_reg_write(sc, qi->qUOStatRegAddr,
489 status &~ qi->qUflowStatBitMask);
490 return ENOSPC;
491 }
492 }
493 return 0;
494 }
495
496 int
ixpqmgr_qreadm(int qId,uint32_t n,uint32_t * p)497 ixpqmgr_qreadm(int qId, uint32_t n, uint32_t *p)
498 {
499 struct ixpqmgr_softc *sc = ixpqmgr_sc;
500 struct qmgrInfo *qi = &sc->qinfo[qId];
501 uint32_t entry;
502 bus_size_t off = qi->qAccRegAddr;
503
504 entry = aqm_reg_read(sc, off);
505 while (--n) {
506 if (entry == 0) {
507 /* if we read a NULL entry, stop. We have underflowed */
508 break;
509 }
510 *p++ = entry; /* store */
511 entry = aqm_reg_read(sc, off);
512 }
513 *p = entry;
514
515 /*
516 * Reset the current read count : next access to the read function
517 * will force a underflow status check.
518 */
519 qi->qReadCount = 0;
520
521 /* Check if underflow occurred on the read */
522 if (entry == 0 && qId < IX_QMGR_MIN_QUEUPP_QID) {
523 /* get the queue status */
524 uint32_t status = aqm_reg_read(sc, qi->qUOStatRegAddr);
525
526 if (status & qi->qUflowStatBitMask) { /* clear underflow status */
527 aqm_reg_write(sc, qi->qUOStatRegAddr,
528 status &~ qi->qUflowStatBitMask);
529 return ENOSPC;
530 }
531 }
532 return 0;
533 }
534
535 uint32_t
ixpqmgr_getqstatus(int qId)536 ixpqmgr_getqstatus(int qId)
537 {
538 #define QLOWSTATMASK \
539 ((1 << (32 / IX_QMGR_QUELOWSTAT_NUM_QUE_PER_WORD)) - 1)
540 struct ixpqmgr_softc *sc = ixpqmgr_sc;
541 const struct qmgrInfo *qi = &sc->qinfo[qId];
542 uint32_t status;
543
544 if (qId < IX_QMGR_MIN_QUEUPP_QID) {
545 /* read the status of a queue in the range 0-31 */
546 status = aqm_reg_read(sc, qi->qStatRegAddr);
547
548 /* mask out the status bits relevant only to this queue */
549 status = (status >> qi->qStatBitsOffset) & QLOWSTATMASK;
550 } else { /* read status of a queue in the range 32-63 */
551 status = 0;
552 if (aqm_reg_read(sc, IX_QMGR_QUEUPPSTAT0_OFFSET)&qi->qStat0BitMask)
553 status |= IX_QMGR_Q_STATUS_NE_BIT_MASK; /* nearly empty */
554 if (aqm_reg_read(sc, IX_QMGR_QUEUPPSTAT1_OFFSET)&qi->qStat1BitMask)
555 status |= IX_QMGR_Q_STATUS_F_BIT_MASK; /* full */
556 }
557 return status;
558 #undef QLOWSTATMASK
559 }
560
561 uint32_t
ixpqmgr_getqconfig(int qId)562 ixpqmgr_getqconfig(int qId)
563 {
564 struct ixpqmgr_softc *sc = ixpqmgr_sc;
565
566 return aqm_reg_read(sc, IX_QMGR_Q_CONFIG_ADDR_GET(qId));
567 }
568
569 void
ixpqmgr_dump(void)570 ixpqmgr_dump(void)
571 {
572 struct ixpqmgr_softc *sc = ixpqmgr_sc;
573 int i, a;
574
575 /* status registers */
576 printf("0x%04x: %08x %08x %08x %08x\n"
577 , 0x400
578 , aqm_reg_read(sc, 0x400)
579 , aqm_reg_read(sc, 0x400+4)
580 , aqm_reg_read(sc, 0x400+8)
581 , aqm_reg_read(sc, 0x400+12)
582 );
583 printf("0x%04x: %08x %08x %08x %08x\n"
584 , 0x410
585 , aqm_reg_read(sc, 0x410)
586 , aqm_reg_read(sc, 0x410+4)
587 , aqm_reg_read(sc, 0x410+8)
588 , aqm_reg_read(sc, 0x410+12)
589 );
590 printf("0x%04x: %08x %08x %08x %08x\n"
591 , 0x420
592 , aqm_reg_read(sc, 0x420)
593 , aqm_reg_read(sc, 0x420+4)
594 , aqm_reg_read(sc, 0x420+8)
595 , aqm_reg_read(sc, 0x420+12)
596 );
597 printf("0x%04x: %08x %08x %08x %08x\n"
598 , 0x430
599 , aqm_reg_read(sc, 0x430)
600 , aqm_reg_read(sc, 0x430+4)
601 , aqm_reg_read(sc, 0x430+8)
602 , aqm_reg_read(sc, 0x430+12)
603 );
604 /* q configuration registers */
605 for (a = 0x2000; a < 0x20ff; a += 32)
606 printf("0x%04x: %08x %08x %08x %08x %08x %08x %08x %08x\n"
607 , a
608 , aqm_reg_read(sc, a)
609 , aqm_reg_read(sc, a+4)
610 , aqm_reg_read(sc, a+8)
611 , aqm_reg_read(sc, a+12)
612 , aqm_reg_read(sc, a+16)
613 , aqm_reg_read(sc, a+20)
614 , aqm_reg_read(sc, a+24)
615 , aqm_reg_read(sc, a+28)
616 );
617 /* allocated SRAM */
618 for (i = 0x100; i < sc->aqmFreeSramAddress; i += 32) {
619 a = 0x2000 + i;
620 printf("0x%04x: %08x %08x %08x %08x %08x %08x %08x %08x\n"
621 , a
622 , aqm_reg_read(sc, a)
623 , aqm_reg_read(sc, a+4)
624 , aqm_reg_read(sc, a+8)
625 , aqm_reg_read(sc, a+12)
626 , aqm_reg_read(sc, a+16)
627 , aqm_reg_read(sc, a+20)
628 , aqm_reg_read(sc, a+24)
629 , aqm_reg_read(sc, a+28)
630 );
631 }
632 for (i = 0; i < 16; i++) {
633 printf("Q[%2d] config 0x%08x status 0x%02x "
634 "Q[%2d] config 0x%08x status 0x%02x\n"
635 , i, ixpqmgr_getqconfig(i), ixpqmgr_getqstatus(i)
636 , i+16, ixpqmgr_getqconfig(i+16), ixpqmgr_getqstatus(i+16)
637 );
638 }
639 }
640
641 void
ixpqmgr_notify_enable(int qId,int srcSel)642 ixpqmgr_notify_enable(int qId, int srcSel)
643 {
644 struct ixpqmgr_softc *sc = ixpqmgr_sc;
645 #if 0
646 /* Calculate the checkMask and checkValue for this q */
647 aqm_calc_statuscheck(sc, qId, srcSel);
648 #endif
649 /* Set the interrupt source if this queue is in the range 0-31 */
650 if (qId < IX_QMGR_MIN_QUEUPP_QID)
651 aqm_srcsel_write(sc, qId, srcSel);
652
653 /* Enable the interrupt */
654 aqm_int_enable(sc, qId);
655 }
656
657 void
ixpqmgr_notify_disable(int qId)658 ixpqmgr_notify_disable(int qId)
659 {
660 struct ixpqmgr_softc *sc = ixpqmgr_sc;
661
662 aqm_int_disable(sc, qId);
663 }
664
665 /*
666 * Rebuild the priority table used by the dispatcher.
667 */
668 static void
ixpqmgr_rebuild(struct ixpqmgr_softc * sc)669 ixpqmgr_rebuild(struct ixpqmgr_softc *sc)
670 {
671 int q, pri;
672 int lowQuePriorityTableIndex, uppQuePriorityTableIndex;
673 struct qmgrInfo *qi;
674
675 sc->lowPriorityTableFirstHalfMask = 0;
676 sc->uppPriorityTableFirstHalfMask = 0;
677
678 lowQuePriorityTableIndex = 0;
679 uppQuePriorityTableIndex = 32;
680 for (pri = 0; pri < IX_QMGR_NUM_PRIORITY_LEVELS; pri++) {
681 /* low priority q's */
682 for (q = 0; q < IX_QMGR_MIN_QUEUPP_QID; q++) {
683 qi = &sc->qinfo[q];
684 if (qi->priority == pri) {
685 /*
686 * Build the priority table bitmask which match the
687 * queues of the first half of the priority table.
688 */
689 if (lowQuePriorityTableIndex < 16) {
690 sc->lowPriorityTableFirstHalfMask |=
691 qi->intRegCheckMask;
692 }
693 sc->priorityTable[lowQuePriorityTableIndex++] = q;
694 }
695 }
696 /* high priority q's */
697 for (; q < IX_QMGR_MAX_NUM_QUEUES; q++) {
698 qi = &sc->qinfo[q];
699 if (qi->priority == pri) {
700 /*
701 * Build the priority table bitmask which match the
702 * queues of the first half of the priority table .
703 */
704 if (uppQuePriorityTableIndex < 48) {
705 sc->uppPriorityTableFirstHalfMask |=
706 qi->intRegCheckMask;
707 }
708 sc->priorityTable[uppQuePriorityTableIndex++] = q;
709 }
710 }
711 }
712 sc->rebuildTable = false;
713 }
714
715 /*
716 * Count the number of leading zero bits in a word,
717 * and return the same value than the CLZ instruction.
718 * Note this is similar to the standard ffs function but
719 * it counts zero's from the MSB instead of the LSB.
720 *
721 * word (in) return value (out)
722 * 0x80000000 0
723 * 0x40000000 1
724 * ,,, ,,,
725 * 0x00000002 30
726 * 0x00000001 31
727 * 0x00000000 32
728 *
729 * The C version of this function is used as a replacement
730 * for system not providing the equivalent of the CLZ
731 * assembly language instruction.
732 *
733 * Note that this version is big-endian
734 */
735 static unsigned int
_lzcount(uint32_t word)736 _lzcount(uint32_t word)
737 {
738 unsigned int lzcount = 0;
739
740 if (word == 0)
741 return 32;
742 while ((word & 0x80000000) == 0) {
743 word <<= 1;
744 lzcount++;
745 }
746 return lzcount;
747 }
748
749 static int
ixpqmgr_intr(void * arg)750 ixpqmgr_intr(void *arg)
751 {
752 struct ixpqmgr_softc *sc = ixpqmgr_sc;
753 uint32_t intRegVal; /* Interrupt reg val */
754 struct qmgrInfo *qi;
755 int priorityTableIndex; /* Priority table index */
756 int qIndex; /* Current queue being processed */
757
758 /* Read the interrupt register */
759 intRegVal = aqm_reg_read(sc, IX_QMGR_QINTREG0_OFFSET);
760 /* Write back to clear interrupt */
761 aqm_reg_write(sc, IX_QMGR_QINTREG0_OFFSET, intRegVal);
762
763 DPRINTFn(5, sc->sc_dev, "%s: ISR0 0x%x ISR1 0x%x\n",
764 __func__, intRegVal, aqm_reg_read(sc, IX_QMGR_QINTREG1_OFFSET));
765
766 /* No queue has interrupt register set */
767 if (intRegVal != 0) {
768 /* get the first queue Id from the interrupt register value */
769 qIndex = (32 - 1) - _lzcount(intRegVal);
770
771 DPRINTFn(2, sc->sc_dev, "%s: ISR0 0x%x qIndex %u\n",
772 __func__, intRegVal, qIndex);
773
774 /*
775 * Optimize for single callback case.
776 */
777 qi = &sc->qinfo[qIndex];
778 if (intRegVal == qi->intRegCheckMask) {
779 /*
780 * Only 1 queue event triggered a notification.
781 * Call the callback function for this queue
782 */
783 qi->cb(qIndex, qi->cbarg);
784 } else {
785 /*
786 * The event is triggered by more than 1 queue,
787 * the queue search will start from the beginning
788 * or the middle of the priority table.
789 *
790 * The search will end when all the bits of the interrupt
791 * register are cleared. There is no need to maintain
792 * a separate value and test it at each iteration.
793 */
794 if (intRegVal & sc->lowPriorityTableFirstHalfMask) {
795 priorityTableIndex = 0;
796 } else {
797 priorityTableIndex = 16;
798 }
799 /*
800 * Iterate over the priority table until all the bits
801 * of the interrupt register are cleared.
802 */
803 do {
804 qIndex = sc->priorityTable[priorityTableIndex++];
805 if (qIndex >= IX_QMGR_MAX_NUM_QUEUES)
806 break;
807 qi = &sc->qinfo[qIndex];
808
809 /* If this queue caused this interrupt to be raised */
810 if (intRegVal & qi->intRegCheckMask) {
811 /* Call the callback function for this queue */
812 qi->cb(qIndex, qi->cbarg);
813 /* Clear the interrupt register bit */
814 intRegVal &= ~qi->intRegCheckMask;
815 }
816 } while (intRegVal &&
817 priorityTableIndex < IX_QMGR_MAX_NUM_QUEUES);
818 }
819 }
820
821 /* Rebuild the priority table if needed */
822 if (sc->rebuildTable)
823 ixpqmgr_rebuild(sc);
824
825 return (1);
826 }
827
828 #if 0
829 /*
830 * Generate the parameters used to check if a Q's status matches
831 * the specified source select. We calculate which status word
832 * to check (statusWordOffset), the value to check the status
833 * against (statusCheckValue) and the mask (statusMask) to mask
834 * out all but the bits to check in the status word.
835 */
836 static void
837 aqm_calc_statuscheck(int qId, IxQMgrSourceId srcSel)
838 {
839 struct qmgrInfo *qi = &qinfo[qId];
840 uint32_t shiftVal;
841
842 if (qId < IX_QMGR_MIN_QUEUPP_QID) {
843 switch (srcSel) {
844 case IX_QMGR_Q_SOURCE_ID_E:
845 qi->statusCheckValue = IX_QMGR_Q_STATUS_E_BIT_MASK;
846 qi->statusMask = IX_QMGR_Q_STATUS_E_BIT_MASK;
847 break;
848 case IX_QMGR_Q_SOURCE_ID_NE:
849 qi->statusCheckValue = IX_QMGR_Q_STATUS_NE_BIT_MASK;
850 qi->statusMask = IX_QMGR_Q_STATUS_NE_BIT_MASK;
851 break;
852 case IX_QMGR_Q_SOURCE_ID_NF:
853 qi->statusCheckValue = IX_QMGR_Q_STATUS_NF_BIT_MASK;
854 qi->statusMask = IX_QMGR_Q_STATUS_NF_BIT_MASK;
855 break;
856 case IX_QMGR_Q_SOURCE_ID_F:
857 qi->statusCheckValue = IX_QMGR_Q_STATUS_F_BIT_MASK;
858 qi->statusMask = IX_QMGR_Q_STATUS_F_BIT_MASK;
859 break;
860 case IX_QMGR_Q_SOURCE_ID_NOT_E:
861 qi->statusCheckValue = 0;
862 qi->statusMask = IX_QMGR_Q_STATUS_E_BIT_MASK;
863 break;
864 case IX_QMGR_Q_SOURCE_ID_NOT_NE:
865 qi->statusCheckValue = 0;
866 qi->statusMask = IX_QMGR_Q_STATUS_NE_BIT_MASK;
867 break;
868 case IX_QMGR_Q_SOURCE_ID_NOT_NF:
869 qi->statusCheckValue = 0;
870 qi->statusMask = IX_QMGR_Q_STATUS_NF_BIT_MASK;
871 break;
872 case IX_QMGR_Q_SOURCE_ID_NOT_F:
873 qi->statusCheckValue = 0;
874 qi->statusMask = IX_QMGR_Q_STATUS_F_BIT_MASK;
875 break;
876 default:
877 /* Should never hit */
878 IX_OSAL_ASSERT(0);
879 break;
880 }
881
882 /* One nibble of status per queue so need to shift the
883 * check value and mask out to the correct position.
884 */
885 shiftVal = (qId % IX_QMGR_QUELOWSTAT_NUM_QUE_PER_WORD) *
886 IX_QMGR_QUELOWSTAT_BITS_PER_Q;
887
888 /* Calculate the which status word to check from the qId,
889 * 8 Qs status per word
890 */
891 qi->statusWordOffset = qId / IX_QMGR_QUELOWSTAT_NUM_QUE_PER_WORD;
892
893 qi->statusCheckValue <<= shiftVal;
894 qi->statusMask <<= shiftVal;
895 } else {
896 /* One status word */
897 qi->statusWordOffset = 0;
898 /* Single bits per queue and int source bit hardwired NE,
899 * Qs start at 32.
900 */
901 qi->statusMask = 1 << (qId - IX_QMGR_MIN_QUEUPP_QID);
902 qi->statusCheckValue = qi->statusMask;
903 }
904 }
905 #endif
906
907 static void
aqm_int_enable(struct ixpqmgr_softc * sc,int qId)908 aqm_int_enable(struct ixpqmgr_softc *sc, int qId)
909 {
910 bus_size_t reg;
911 uint32_t v;
912
913 if (qId < IX_QMGR_MIN_QUEUPP_QID)
914 reg = IX_QMGR_QUEIEREG0_OFFSET;
915 else
916 reg = IX_QMGR_QUEIEREG1_OFFSET;
917 v = aqm_reg_read(sc, reg);
918 aqm_reg_write(sc, reg, v | (1 << (qId % IX_QMGR_MIN_QUEUPP_QID)));
919
920 DPRINTF(sc->sc_dev, "%s(%u) 0x%lx: 0x%x => 0x%x\n",
921 __func__, qId, reg, v, aqm_reg_read(sc, reg));
922 }
923
924 static void
aqm_int_disable(struct ixpqmgr_softc * sc,int qId)925 aqm_int_disable(struct ixpqmgr_softc *sc, int qId)
926 {
927 bus_size_t reg;
928 uint32_t v;
929
930 if (qId < IX_QMGR_MIN_QUEUPP_QID)
931 reg = IX_QMGR_QUEIEREG0_OFFSET;
932 else
933 reg = IX_QMGR_QUEIEREG1_OFFSET;
934 v = aqm_reg_read(sc, reg);
935 aqm_reg_write(sc, reg, v &~ (1 << (qId % IX_QMGR_MIN_QUEUPP_QID)));
936
937 DPRINTF(sc->sc_dev, "%s(%u) 0x%lx: 0x%x => 0x%x\n",
938 __func__, qId, reg, v, aqm_reg_read(sc, reg));
939 }
940
941 static unsigned
log2(unsigned n)942 log2(unsigned n)
943 {
944 unsigned count;
945 /*
946 * N.B. this function will return 0 if supplied 0.
947 */
948 for (count = 0; n/2; count++)
949 n /= 2;
950 return count;
951 }
952
953 static __inline unsigned
toAqmEntrySize(int entrySize)954 toAqmEntrySize(int entrySize)
955 {
956 /* entrySize 1("00"),2("01"),4("10") */
957 return log2(entrySize);
958 }
959
960 static __inline unsigned
toAqmBufferSize(unsigned bufferSizeInWords)961 toAqmBufferSize(unsigned bufferSizeInWords)
962 {
963 /* bufferSize 16("00"),32("01),64("10"),128("11") */
964 return log2(bufferSizeInWords / IX_QMGR_MIN_BUFFER_SIZE);
965 }
966
967 static __inline unsigned
toAqmWatermark(int watermark)968 toAqmWatermark(int watermark)
969 {
970 /*
971 * Watermarks 0("000"),1("001"),2("010"),4("011"),
972 * 8("100"),16("101"),32("110"),64("111")
973 */
974 return log2(2 * watermark);
975 }
976
977 static void
aqm_qcfg(struct ixpqmgr_softc * sc,int qId,u_int ne,u_int nf)978 aqm_qcfg(struct ixpqmgr_softc *sc, int qId, u_int ne, u_int nf)
979 {
980 const struct qmgrInfo *qi = &sc->qinfo[qId];
981 uint32_t qCfg;
982 uint32_t baseAddress;
983
984 /* Build config register */
985 qCfg = ((toAqmEntrySize(1) & IX_QMGR_ENTRY_SIZE_MASK) <<
986 IX_QMGR_Q_CONFIG_ESIZE_OFFSET)
987 | ((toAqmBufferSize(qi->qSizeInWords) & IX_QMGR_SIZE_MASK) <<
988 IX_QMGR_Q_CONFIG_BSIZE_OFFSET);
989
990 /* baseAddress, calculated relative to start address */
991 baseAddress = sc->aqmFreeSramAddress;
992
993 /* base address must be word-aligned */
994 KASSERT((baseAddress % IX_QMGR_BASE_ADDR_16_WORD_ALIGN) == 0);
995
996 /* Now convert to a 16 word pointer as required by QUECONFIG register */
997 baseAddress >>= IX_QMGR_BASE_ADDR_16_WORD_SHIFT;
998 qCfg |= baseAddress << IX_QMGR_Q_CONFIG_BADDR_OFFSET;
999
1000 /* set watermarks */
1001 qCfg |= (toAqmWatermark(ne) << IX_QMGR_Q_CONFIG_NE_OFFSET)
1002 | (toAqmWatermark(nf) << IX_QMGR_Q_CONFIG_NF_OFFSET);
1003
1004 DPRINTF(sc->sc_dev, "%s(%u, %u, %u) 0x%x => 0x%x @ 0x%x\n",
1005 __func__, qId, ne, nf,
1006 aqm_reg_read(sc, IX_QMGR_Q_CONFIG_ADDR_GET(qId)),
1007 qCfg, (u_int)IX_QMGR_Q_CONFIG_ADDR_GET(qId));
1008
1009 aqm_reg_write(sc, IX_QMGR_Q_CONFIG_ADDR_GET(qId), qCfg);
1010 }
1011
1012 static void
aqm_srcsel_write(struct ixpqmgr_softc * sc,int qId,int sourceId)1013 aqm_srcsel_write(struct ixpqmgr_softc *sc, int qId, int sourceId)
1014 {
1015 bus_size_t off;
1016 uint32_t v;
1017
1018 /*
1019 * Calculate the register offset; multiple queues split across registers
1020 */
1021 off = IX_QMGR_INT0SRCSELREG0_OFFSET +
1022 ((qId / IX_QMGR_INTSRC_NUM_QUE_PER_WORD) * sizeof(uint32_t));
1023
1024 v = aqm_reg_read(sc, off);
1025 if (off == IX_QMGR_INT0SRCSELREG0_OFFSET && qId == 0) {
1026 /* Queue 0 at INT0SRCSELREG should not corrupt the value bit-3 */
1027 v |= 0x7;
1028 } else {
1029 const uint32_t bpq = 32 / IX_QMGR_INTSRC_NUM_QUE_PER_WORD;
1030 uint32_t mask;
1031 int qshift;
1032
1033 qshift = (qId & (IX_QMGR_INTSRC_NUM_QUE_PER_WORD-1)) * bpq;
1034 mask = ((1 << bpq) - 1) << qshift; /* q's status mask */
1035
1036 /* merge sourceId */
1037 v = (v &~ mask) | ((sourceId << qshift) & mask);
1038 }
1039
1040 DPRINTF(sc->sc_dev, "%s(%u, %u) 0x%x => 0x%x @ 0x%lx\n",
1041 __func__, qId, sourceId, aqm_reg_read(sc, off), v, off);
1042 aqm_reg_write(sc, off, v);
1043 }
1044
1045 /*
1046 * Reset AQM registers to default values.
1047 */
1048 static void
aqm_reset(struct ixpqmgr_softc * sc)1049 aqm_reset(struct ixpqmgr_softc *sc)
1050 {
1051 int i;
1052
1053 /* Reset queues 0..31 status registers 0..3 */
1054 aqm_reg_write(sc, IX_QMGR_QUELOWSTAT0_OFFSET,
1055 IX_QMGR_QUELOWSTAT_RESET_VALUE);
1056 aqm_reg_write(sc, IX_QMGR_QUELOWSTAT1_OFFSET,
1057 IX_QMGR_QUELOWSTAT_RESET_VALUE);
1058 aqm_reg_write(sc, IX_QMGR_QUELOWSTAT2_OFFSET,
1059 IX_QMGR_QUELOWSTAT_RESET_VALUE);
1060 aqm_reg_write(sc, IX_QMGR_QUELOWSTAT3_OFFSET,
1061 IX_QMGR_QUELOWSTAT_RESET_VALUE);
1062
1063 /* Reset underflow/overflow status registers 0..1 */
1064 aqm_reg_write(sc, IX_QMGR_QUEUOSTAT0_OFFSET,
1065 IX_QMGR_QUEUOSTAT_RESET_VALUE);
1066 aqm_reg_write(sc, IX_QMGR_QUEUOSTAT1_OFFSET,
1067 IX_QMGR_QUEUOSTAT_RESET_VALUE);
1068
1069 /* Reset queues 32..63 nearly empty status registers */
1070 aqm_reg_write(sc, IX_QMGR_QUEUPPSTAT0_OFFSET,
1071 IX_QMGR_QUEUPPSTAT0_RESET_VALUE);
1072
1073 /* Reset queues 32..63 full status registers */
1074 aqm_reg_write(sc, IX_QMGR_QUEUPPSTAT1_OFFSET,
1075 IX_QMGR_QUEUPPSTAT1_RESET_VALUE);
1076
1077 /* Reset int0 status flag source select registers 0..3 */
1078 aqm_reg_write(sc, IX_QMGR_INT0SRCSELREG0_OFFSET,
1079 IX_QMGR_INT0SRCSELREG_RESET_VALUE);
1080 aqm_reg_write(sc, IX_QMGR_INT0SRCSELREG1_OFFSET,
1081 IX_QMGR_INT0SRCSELREG_RESET_VALUE);
1082 aqm_reg_write(sc, IX_QMGR_INT0SRCSELREG2_OFFSET,
1083 IX_QMGR_INT0SRCSELREG_RESET_VALUE);
1084 aqm_reg_write(sc, IX_QMGR_INT0SRCSELREG3_OFFSET,
1085 IX_QMGR_INT0SRCSELREG_RESET_VALUE);
1086
1087 /* Reset queue interrupt enable register 0..1 */
1088 aqm_reg_write(sc, IX_QMGR_QUEIEREG0_OFFSET,
1089 IX_QMGR_QUEIEREG_RESET_VALUE);
1090 aqm_reg_write(sc, IX_QMGR_QUEIEREG1_OFFSET,
1091 IX_QMGR_QUEIEREG_RESET_VALUE);
1092
1093 /* Reset queue interrupt register 0..1 */
1094 aqm_reg_write(sc, IX_QMGR_QINTREG0_OFFSET, IX_QMGR_QINTREG_RESET_VALUE);
1095 aqm_reg_write(sc, IX_QMGR_QINTREG1_OFFSET, IX_QMGR_QINTREG_RESET_VALUE);
1096
1097 /* Reset queue configuration words 0..63 */
1098 for (i = 0; i < IX_QMGR_MAX_NUM_QUEUES; i++)
1099 aqm_reg_write(sc, sc->qinfo[i].qConfigRegAddr,
1100 IX_QMGR_QUECONFIG_RESET_VALUE);
1101
1102 /* XXX zero SRAM to simplify debugging */
1103 for (i = IX_QMGR_QUEBUFFER_SPACE_OFFSET;
1104 i < IX_QMGR_AQM_SRAM_SIZE_IN_BYTES; i += sizeof(uint32_t))
1105 aqm_reg_write(sc, i, 0);
1106 }
1107
1108 #ifdef __FreeBSD__
1109 static device_method_t ixpqmgr_methods[] = {
1110 DEVMETHOD(device_probe, ixpqmgr_probe),
1111 DEVMETHOD(device_attach, ixpqmgr_attach),
1112 DEVMETHOD(device_detach, ixpqmgr_detach),
1113
1114 { 0, 0 }
1115 };
1116
1117 static driver_t ixpqmgr_driver = {
1118 "ixpqmgr",
1119 ixpqmgr_methods,
1120 sizeof(struct ixpqmgr_softc),
1121 };
1122 static devclass_t ixpqmgr_devclass;
1123
1124 DRIVER_MODULE(ixpqmgr, ixp, ixpqmgr_driver, ixpqmgr_devclass, 0, 0);
1125 #endif
1126