1 /* $NetBSD: if_ni.c,v 1.50 2022/09/18 16:51:28 thorpej Exp $ */
2 /*
3 * Copyright (c) 2000 Ludd, University of Lule}, Sweden. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed at Ludd, University of
16 * Lule}, Sweden and its contributors.
17 * 4. The name of the author may not be used to endorse or promote products
18 * derived from this software without specific prior written permission
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
21 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
24 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
25 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
29 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * Driver for DEBNA/DEBNT/DEBNK ethernet cards.
34 * Things that is still to do:
35 * Collect statistics.
36 */
37
38 #include <sys/cdefs.h>
39 __KERNEL_RCSID(0, "$NetBSD: if_ni.c,v 1.50 2022/09/18 16:51:28 thorpej Exp $");
40
41 #include "opt_inet.h"
42
43 #include <sys/param.h>
44 #include <sys/mbuf.h>
45 #include <sys/socket.h>
46 #include <sys/device.h>
47 #include <sys/systm.h>
48 #include <sys/sockio.h>
49 #include <sys/sched.h>
50
51 #include <net/if.h>
52 #include <net/if_ether.h>
53 #include <net/if_dl.h>
54 #include <net/bpf.h>
55
56 #include <netinet/in.h>
57 #include <netinet/if_inarp.h>
58
59 #include <sys/bus.h>
60 #ifdef __vax__
61 #include <machine/mtpr.h>
62 #include <machine/pte.h>
63 #endif
64
65 #include <dev/bi/bireg.h>
66 #include <dev/bi/bivar.h>
67
68 #include "ioconf.h"
69 #include "locators.h"
70
71 /*
72 * Tunable buffer parameters. Good idea to have them as power of 8; then
73 * they will fit into a logical VAX page.
74 */
75 #define NMSGBUF 8 /* Message queue entries */
76 #define NTXBUF 16 /* Transmit queue entries */
77 #define NTXFRAGS 8 /* Number of transmit buffer fragments */
78 #define NRXBUF 24 /* Receive queue entries */
79 #define NBDESCS (NTXBUF * NTXFRAGS + NRXBUF)
80 #define NQUEUES 3 /* RX + TX + MSG */
81 #define PKTHDR 18 /* Length of (control) packet header */
82 #define RXADD 18 /* Additional length of receive datagram */
83 #define TXADD (10+NTXFRAGS*8) /* "" transmit "" */
84 #define MSGADD 134 /* "" message "" */
85
86 #include <dev/bi/if_nireg.h> /* XXX include earlier */
87
88 /*
89 * Macros for (most cases of) insqti/remqhi.
90 * Retry NRETRIES times to do the operation, if it still fails assume
91 * a lost lock and panic.
92 */
93 #define NRETRIES 100
94 #define INSQTI(e, h) ({ \
95 int ret = 0, __i; \
96 for (__i = 0; __i < NRETRIES; __i++) { \
97 if ((ret = insqti(e, h)) != ILCK_FAILED) \
98 break; \
99 } \
100 if (__i == NRETRIES) \
101 panic("ni: insqti failed at %d", __LINE__); \
102 ret; \
103 })
104 #define REMQHI(h) ({ \
105 int __i; void *ret = NULL; \
106 for (__i = 0; __i < NRETRIES; __i++) { \
107 if ((ret = remqhi(h)) != (void *)ILCK_FAILED) \
108 break; \
109 } \
110 if (__i == NRETRIES) \
111 panic("ni: remqhi failed at %d", __LINE__); \
112 ret; \
113 })
114
115
116 #define nipqb (&sc->sc_gvppqb->nc_pqb)
117 #define gvp sc->sc_gvppqb
118 #define fqb sc->sc_fqb
119 #define bbd sc->sc_bbd
120
121 struct ni_softc {
122 device_t sc_dev; /* Configuration common part */
123 struct evcnt sc_intrcnt; /* Interrupt coounting */
124 struct ethercom sc_ec; /* Ethernet common part */
125 #define sc_if sc_ec.ec_if /* network-visible interface */
126 bus_space_tag_t sc_iot;
127 bus_addr_t sc_ioh;
128 bus_dma_tag_t sc_dmat;
129 struct ni_gvppqb *sc_gvppqb; /* Port queue block */
130 struct ni_gvppqb *sc_pgvppqb; /* Phys address of PQB */
131 struct ni_fqb *sc_fqb; /* Free Queue block */
132 struct ni_bbd *sc_bbd; /* Buffer descriptors */
133 uint8_t sc_enaddr[ETHER_ADDR_LEN];
134 };
135
136 static int nimatch(device_t, cfdata_t, void *);
137 static void niattach(device_t, device_t, void *);
138 static void niinit(struct ni_softc *);
139 static void nistart(struct ifnet *);
140 static void niintr(void *);
141 static int niioctl(struct ifnet *, u_long, void *);
142 static int ni_add_rxbuf(struct ni_softc *, struct ni_dg *, int);
143 static void ni_setup(struct ni_softc *);
144 static void nitimeout(struct ifnet *);
145 static void ni_shutdown(void *);
146 static void ni_getpgs(struct ni_softc *, int, void **, paddr_t *);
147 static int failtest(struct ni_softc *, int, int, int, const char *);
148
149 volatile int endwait, retry; /* Used during autoconfig */
150
151 CFATTACH_DECL_NEW(ni, sizeof(struct ni_softc),
152 nimatch, niattach, NULL, NULL);
153
154 #define NI_WREG(csr, val) \
155 bus_space_write_4(sc->sc_iot, sc->sc_ioh, csr, val)
156 #define NI_RREG(csr) \
157 bus_space_read_4(sc->sc_iot, sc->sc_ioh, csr)
158
159 #define WAITREG(csr,val) while (NI_RREG(csr) & val);
160 /*
161 * Check for present device.
162 */
163 static int
nimatch(device_t parent,cfdata_t cf,void * aux)164 nimatch(device_t parent, cfdata_t cf, void *aux)
165 {
166 struct bi_attach_args *ba = aux;
167 u_short type;
168
169 type = bus_space_read_2(ba->ba_iot, ba->ba_ioh, BIREG_DTYPE);
170 if (type != BIDT_DEBNA && type != BIDT_DEBNT && type != BIDT_DEBNK)
171 return 0;
172
173 if (cf->cf_loc[BICF_NODE] != BICF_NODE_DEFAULT &&
174 cf->cf_loc[BICF_NODE] != ba->ba_nodenr)
175 return 0;
176
177 return 1;
178 }
179
180 /*
181 * Allocate a bunch of descriptor-safe memory.
182 * We need to get the structures from the beginning of its own pages.
183 */
184 static void
ni_getpgs(struct ni_softc * sc,int size,void ** v,paddr_t * p)185 ni_getpgs(struct ni_softc *sc, int size, void **v, paddr_t *p)
186 {
187 bus_dma_segment_t seg;
188 int nsegs, error;
189
190 if ((error = bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0, &seg, 1,
191 &nsegs, BUS_DMA_NOWAIT)) != 0)
192 panic(" unable to allocate memory: error %d", error);
193
194 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, nsegs, size, v,
195 BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0)
196 panic(" unable to map memory: error %d", error);
197
198 if (p)
199 *p = seg.ds_addr;
200 memset(*v, 0, size);
201 }
202
203 static int
failtest(struct ni_softc * sc,int reg,int mask,int test,const char * str)204 failtest(struct ni_softc *sc, int reg, int mask, int test, const char *str)
205 {
206 int i = 100;
207
208 do {
209 DELAY(100000);
210 } while (((NI_RREG(reg) & mask) != test) && --i);
211
212 if (i == 0) {
213 printf("%s: %s\n", device_xname(sc->sc_dev), str);
214 return 1;
215 }
216 return 0;
217 }
218
219
220 /*
221 * Interface exists: make available by filling in network interface
222 * record. System will initialize the interface when it is ready
223 * to accept packets.
224 */
225 static void
niattach(device_t parent,device_t self,void * aux)226 niattach(device_t parent, device_t self, void *aux)
227 {
228 struct bi_attach_args *ba = aux;
229 struct ni_softc *sc = device_private(self);
230 struct ifnet *ifp = (struct ifnet *)&sc->sc_if;
231 struct ni_msg *msg;
232 struct ni_ptdb *ptdb;
233 void *va;
234 int i, j, s;
235 u_short type;
236
237 sc->sc_dev = self;
238
239 type = bus_space_read_2(ba->ba_iot, ba->ba_ioh, BIREG_DTYPE);
240 aprint_normal(": DEBN%c\n", type == BIDT_DEBNA ? 'A'
241 : type == BIDT_DEBNT ? 'T' : 'K');
242 sc->sc_iot = ba->ba_iot;
243 sc->sc_ioh = ba->ba_ioh;
244 sc->sc_dmat = ba->ba_dmat;
245
246 bi_intr_establish(ba->ba_icookie, ba->ba_ivec,
247 niintr, sc, &sc->sc_intrcnt);
248 evcnt_attach_dynamic(&sc->sc_intrcnt, EVCNT_TYPE_INTR, NULL,
249 device_xname(self), "intr");
250
251 ni_getpgs(sc, sizeof(struct ni_gvppqb), (void **)&sc->sc_gvppqb,
252 (paddr_t *)&sc->sc_pgvppqb);
253 ni_getpgs(sc, sizeof(struct ni_fqb), (void **)&sc->sc_fqb, 0);
254 ni_getpgs(sc, NBDESCS * sizeof(struct ni_bbd),
255 (void **)&sc->sc_bbd, 0);
256 /*
257 * Zero the newly allocated memory.
258 */
259
260 nipqb->np_veclvl = (ba->ba_ivec << 2) + 2;
261 nipqb->np_node = ba->ba_intcpu;
262 nipqb->np_vpqb = (uint32_t)gvp;
263 #ifdef __vax__
264 nipqb->np_spt = nipqb->np_gpt = mfpr(PR_SBR);
265 nipqb->np_sptlen = nipqb->np_gptlen = mfpr(PR_SLR);
266 #else
267 #error Must fix support for non-vax.
268 #endif
269 nipqb->np_bvplvl = 1;
270 nipqb->np_vfqb = (uint32_t)fqb;
271 nipqb->np_vbdt = (uint32_t)bbd;
272 nipqb->np_nbdr = NBDESCS;
273
274 /* Free queue block */
275 nipqb->np_freeq = NQUEUES;
276 fqb->nf_mlen = PKTHDR+MSGADD;
277 fqb->nf_dlen = PKTHDR+TXADD;
278 fqb->nf_rlen = PKTHDR+RXADD;
279
280 strlcpy(ifp->if_xname, device_xname(self), IFNAMSIZ);
281 ifp->if_softc = sc;
282 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
283 ifp->if_start = nistart;
284 ifp->if_ioctl = niioctl;
285 ifp->if_watchdog = nitimeout;
286 IFQ_SET_READY(&ifp->if_snd);
287
288 /*
289 * Start init sequence.
290 */
291
292 /* Reset the node */
293 NI_WREG(BIREG_VAXBICSR, NI_RREG(BIREG_VAXBICSR) | BICSR_NRST);
294 DELAY(500000);
295 i = 20;
296 while ((NI_RREG(BIREG_VAXBICSR) & BICSR_BROKE) && --i)
297 DELAY(500000);
298 if (i == 0) {
299 aprint_error_dev(self, "BROKE bit set after reset\n");
300 return;
301 }
302
303 /* Check state */
304 if (failtest(sc, NI_PSR, PSR_STATE, PSR_UNDEF, "not undefined state"))
305 return;
306
307 /* Clear owner bits */
308 NI_WREG(NI_PSR, NI_RREG(NI_PSR) & ~PSR_OWN);
309 NI_WREG(NI_PCR, NI_RREG(NI_PCR) & ~PCR_OWN);
310
311 /* kick off init */
312 NI_WREG(NI_PCR, (uint32_t)sc->sc_pgvppqb | PCR_INIT | PCR_OWN);
313 while (NI_RREG(NI_PCR) & PCR_OWN)
314 DELAY(100000);
315
316 /* Check state */
317 if (failtest(sc, NI_PSR, PSR_INITED, PSR_INITED, "failed initialize"))
318 return;
319
320 NI_WREG(NI_PSR, NI_RREG(NI_PSR) & ~PSR_OWN);
321
322 WAITREG(NI_PCR, PCR_OWN);
323 NI_WREG(NI_PCR, PCR_OWN | PCR_ENABLE);
324 WAITREG(NI_PCR, PCR_OWN);
325 WAITREG(NI_PSR, PSR_OWN);
326
327 /* Check state */
328 if (failtest(sc, NI_PSR, PSR_STATE, PSR_ENABLED, "failed enable"))
329 return;
330
331 NI_WREG(NI_PSR, NI_RREG(NI_PSR) & ~PSR_OWN);
332
333 /*
334 * The message queue packets must be located on the beginning
335 * of a page. A VAX page is 512 bytes, but it clusters 8 pages.
336 * This knowledge is used here when allocating pages.
337 * !!! How should this be done on MIPS and Alpha??? !!!
338 */
339 #if NBPG < 4096
340 #error pagesize too small
341 #endif
342 s = splvm();
343 /* Set up message free queue */
344 ni_getpgs(sc, NMSGBUF * 512, &va, 0);
345 for (i = 0; i < NMSGBUF; i++) {
346 msg = (void *)((char *)va + i * 512);
347 INSQTI(msg, &fqb->nf_mforw);
348 }
349 WAITREG(NI_PCR, PCR_OWN);
350 NI_WREG(NI_PCR, PCR_FREEQNE | PCR_MFREEQ | PCR_OWN);
351 WAITREG(NI_PCR, PCR_OWN);
352
353 /* Set up xmit queue */
354 ni_getpgs(sc, NTXBUF * 512, &va, 0);
355 for (i = 0; i < NTXBUF; i++) {
356 struct ni_dg *data;
357
358 data = (void *)((char *)va + i * 512);
359 data->nd_status = 0;
360 data->nd_len = TXADD;
361 data->nd_ptdbidx = 1;
362 data->nd_opcode = BVP_DGRAM;
363 for (j = 0; j < NTXFRAGS; j++) {
364 data->bufs[j]._offset = 0;
365 data->bufs[j]._key = 1;
366 bbd[i * NTXFRAGS + j].nb_key = 1;
367 bbd[i * NTXFRAGS + j].nb_status = 0;
368 data->bufs[j]._index = i * NTXFRAGS + j;
369 }
370 INSQTI(data, &fqb->nf_dforw);
371 }
372 WAITREG(NI_PCR, PCR_OWN);
373 NI_WREG(NI_PCR, PCR_FREEQNE | PCR_DFREEQ | PCR_OWN);
374 WAITREG(NI_PCR, PCR_OWN);
375
376 /* recv buffers */
377 ni_getpgs(sc, NRXBUF * 512, &va, 0);
378 for (i = 0; i < NRXBUF; i++) {
379 struct ni_dg *data;
380 int idx;
381
382 data = (void *)((char *)va + i * 512);
383 data->nd_len = RXADD;
384 data->nd_opcode = BVP_DGRAMRX;
385 data->nd_ptdbidx = 2;
386 data->bufs[0]._key = 1;
387
388 idx = NTXBUF * NTXFRAGS + i;
389 if (ni_add_rxbuf(sc, data, idx))
390 panic("niattach: ni_add_rxbuf: out of mbufs");
391
392 INSQTI(data, &fqb->nf_rforw);
393 }
394 WAITREG(NI_PCR, PCR_OWN);
395 NI_WREG(NI_PCR, PCR_FREEQNE | PCR_RFREEQ | PCR_OWN);
396 WAITREG(NI_PCR, PCR_OWN);
397
398 splx(s);
399
400 /* Set initial parameters */
401 msg = REMQHI(&fqb->nf_mforw);
402
403 msg->nm_opcode = BVP_MSG;
404 msg->nm_status = 0;
405 msg->nm_len = sizeof(struct ni_param) + 6;
406 msg->nm_opcode2 = NI_WPARAM;
407 ((struct ni_param *)&msg->nm_text[0])->np_flags = NP_PAD;
408
409 endwait = retry = 0;
410 INSQTI(msg, &gvp->nc_forw0);
411
412 retry: WAITREG(NI_PCR, PCR_OWN);
413 NI_WREG(NI_PCR, PCR_CMDQNE | PCR_CMDQ0 | PCR_OWN);
414 WAITREG(NI_PCR, PCR_OWN);
415 i = 1000;
416 while (endwait == 0 && --i)
417 DELAY(10000);
418
419 if (endwait == 0) {
420 if (++retry < 3)
421 goto retry;
422 aprint_error_dev(self, "no response to set params\n");
423 return;
424 }
425
426 /* Clear counters */
427 msg = REMQHI(&fqb->nf_mforw);
428 msg->nm_opcode = BVP_MSG;
429 msg->nm_status = 0;
430 msg->nm_len = sizeof(struct ni_param) + 6;
431 msg->nm_opcode2 = NI_RCCNTR;
432
433 INSQTI(msg, &gvp->nc_forw0);
434
435 WAITREG(NI_PCR, PCR_OWN);
436 NI_WREG(NI_PCR, PCR_CMDQNE | PCR_CMDQ0 | PCR_OWN);
437 WAITREG(NI_PCR, PCR_OWN);
438
439 /* Enable transmit logic */
440 msg = REMQHI(&fqb->nf_mforw);
441
442 msg->nm_opcode = BVP_MSG;
443 msg->nm_status = 0;
444 msg->nm_len = 18;
445 msg->nm_opcode2 = NI_STPTDB;
446 ptdb = (struct ni_ptdb *)&msg->nm_text[0];
447 memset(ptdb, 0, sizeof(struct ni_ptdb));
448 ptdb->np_index = 1;
449 ptdb->np_fque = 1;
450
451 INSQTI(msg, &gvp->nc_forw0);
452
453 WAITREG(NI_PCR, PCR_OWN);
454 NI_WREG(NI_PCR, PCR_CMDQNE | PCR_CMDQ0 | PCR_OWN);
455 WAITREG(NI_PCR, PCR_OWN);
456
457 /* Wait for everything to finish */
458 WAITREG(NI_PSR, PSR_OWN);
459
460 aprint_normal_dev(self, "hardware address %s\n",
461 ether_sprintf(sc->sc_enaddr));
462
463 /*
464 * Attach the interface.
465 */
466 if_attach(ifp);
467 ether_ifattach(ifp, sc->sc_enaddr);
468 if (shutdownhook_establish(ni_shutdown, sc) == 0)
469 aprint_error_dev(self,
470 "WARNING: unable to establish shutdown hook\n");
471 }
472
473 /*
474 * Initialization of interface.
475 */
476 void
niinit(struct ni_softc * sc)477 niinit(struct ni_softc *sc)
478 {
479 struct ifnet *ifp = &sc->sc_if;
480
481 /*
482 * Set flags (so ni_setup() do the right thing).
483 */
484 ifp->if_flags |= IFF_RUNNING;
485
486 /*
487 * Send setup messages so that the rx/tx locic starts.
488 */
489 ni_setup(sc);
490
491 }
492
493 /*
494 * Start output on interface.
495 */
496 void
nistart(struct ifnet * ifp)497 nistart(struct ifnet *ifp)
498 {
499 struct ni_softc *sc = ifp->if_softc;
500 struct ni_dg *data;
501 struct ni_bbd *bdp;
502 struct mbuf *m, *m0;
503 int i, cnt, res, mlen;
504
505 #ifdef DEBUG
506 if (ifp->if_flags & IFF_DEBUG)
507 printf("%s: nistart\n", device_xname(sc->sc_dev));
508 #endif
509
510 while (fqb->nf_dforw) {
511 IFQ_POLL(&ifp->if_snd, m);
512 if (m == 0)
513 break;
514
515 data = REMQHI(&fqb->nf_dforw);
516 if ((int)data == Q_EMPTY) {
517 break;
518 }
519
520 IFQ_DEQUEUE(&ifp->if_snd, m);
521
522 /*
523 * Count number of mbufs in chain.
524 * Always do DMA directly from mbufs, therefore the transmit
525 * ring is really big.
526 */
527 for (m0 = m, cnt = 0; m0; m0 = m0->m_next)
528 if (m0->m_len)
529 cnt++;
530 if (cnt > NTXFRAGS)
531 panic("nistart"); /* XXX */
532
533 bpf_mtap(ifp, m, BPF_D_OUT);
534 bdp = &bbd[(data->bufs[0]._index & 0x7fff)];
535 for (m0 = m, i = 0, mlen = 0; m0; m0 = m0->m_next) {
536 if (m0->m_len == 0)
537 continue;
538 bdp->nb_status = (mtod(m0, uint32_t) & NIBD_OFFSET) |
539 NIBD_VALID;
540 bdp->nb_pte = (uint32_t)kvtopte(mtod(m0, void *));
541 bdp->nb_len = m0->m_len;
542 data->bufs[i]._offset = 0;
543 data->bufs[i]._len = bdp->nb_len;
544 data->bufs[i]._index |= NIDG_CHAIN;
545 mlen += bdp->nb_len;
546 bdp++;
547 i++;
548 }
549 data->nd_opcode = BVP_DGRAM;
550 data->nd_pad3 = 1;
551 data->nd_ptdbidx = 1;
552 data->nd_len = 10 + i * 8;
553 data->bufs[i - 1]._index &= ~NIDG_CHAIN;
554 data->nd_cmdref = (uint32_t)m;
555 #ifdef DEBUG
556 if (ifp->if_flags & IFF_DEBUG)
557 printf("%s: sending %d bytes (%d segments)\n",
558 device_xname(sc->sc_dev), mlen, i);
559 #endif
560
561 res = INSQTI(data, &gvp->nc_forw0);
562 if (res == Q_EMPTY) {
563 WAITREG(NI_PCR, PCR_OWN);
564 NI_WREG(NI_PCR, PCR_CMDQNE | PCR_CMDQ0 | PCR_OWN);
565 }
566 }
567 }
568
569 void
niintr(void * arg)570 niintr(void *arg)
571 {
572 struct ni_softc *sc = arg;
573 struct ni_dg *data;
574 struct ni_msg *msg;
575 struct ifnet *ifp = &sc->sc_if;
576 struct ni_bbd *bd;
577 struct mbuf *m;
578 int idx, res;
579
580 if ((NI_RREG(NI_PSR) & PSR_STATE) != PSR_ENABLED)
581 return;
582
583 if ((NI_RREG(NI_PSR) & PSR_ERR))
584 printf("%s: PSR %x\n", device_xname(sc->sc_dev),
585 NI_RREG(NI_PSR));
586
587 KERNEL_LOCK(1, NULL);
588 /* Got any response packets? */
589 while ((NI_RREG(NI_PSR) & PSR_RSQ) && (data = REMQHI(&gvp->nc_forwr))) {
590
591 switch (data->nd_opcode) {
592 case BVP_DGRAMRX: /* Receive datagram */
593 idx = data->bufs[0]._index;
594 bd = &bbd[idx];
595 m = (void *)data->nd_cmdref;
596 m->m_pkthdr.len = m->m_len =
597 data->bufs[0]._len - ETHER_CRC_LEN;
598 m_set_rcvif(m, ifp);
599 if (ni_add_rxbuf(sc, data, idx)) {
600 bd->nb_len = (m->m_ext.ext_size - 2);
601 bd->nb_pte =
602 (long)kvtopte(m->m_ext.ext_buf);
603 bd->nb_status = 2 | NIBD_VALID;
604 bd->nb_key = 1;
605 }
606 data->nd_len = RXADD;
607 data->nd_status = 0;
608 res = INSQTI(data, &fqb->nf_rforw);
609 if (res == Q_EMPTY) {
610 WAITREG(NI_PCR, PCR_OWN);
611 NI_WREG(NI_PCR,
612 PCR_FREEQNE | PCR_RFREEQ | PCR_OWN);
613 }
614 if (m == (void *)data->nd_cmdref)
615 break; /* Out of mbufs */
616
617 if_percpuq_enqueue(ifp->if_percpuq, m);
618 break;
619
620 case BVP_DGRAM:
621 m = (struct mbuf *)data->nd_cmdref;
622 m_freem(m);
623 res = INSQTI(data, &fqb->nf_dforw);
624 if (res == Q_EMPTY) {
625 WAITREG(NI_PCR, PCR_OWN);
626 NI_WREG(NI_PCR,
627 PCR_FREEQNE | PCR_DFREEQ | PCR_OWN);
628 }
629 break;
630
631 case BVP_MSGRX:
632 msg = (struct ni_msg *)data;
633 switch (msg->nm_opcode2) {
634 case NI_WPARAM:
635 memcpy(sc->sc_enaddr, ((struct ni_param *)&msg->nm_text[0])->np_dpa, ETHER_ADDR_LEN);
636 endwait = 1;
637 break;
638
639 case NI_RCCNTR:
640 case NI_CLPTDB:
641 case NI_STPTDB:
642 break;
643
644 default:
645 printf("Unkn resp %d\n",
646 msg->nm_opcode2);
647 break;
648 }
649 res = INSQTI(data, &fqb->nf_mforw);
650 if (res == Q_EMPTY) {
651 WAITREG(NI_PCR, PCR_OWN);
652 NI_WREG(NI_PCR,
653 PCR_FREEQNE | PCR_MFREEQ | PCR_OWN);
654 }
655 break;
656
657 default:
658 printf("Unknown opcode %d\n", data->nd_opcode);
659 res = INSQTI(data, &fqb->nf_mforw);
660 if (res == Q_EMPTY) {
661 WAITREG(NI_PCR, PCR_OWN);
662 NI_WREG(NI_PCR,
663 PCR_FREEQNE | PCR_MFREEQ | PCR_OWN);
664 }
665 }
666 }
667
668 /* Try to kick on the start routine again */
669 nistart(ifp);
670
671 NI_WREG(NI_PSR, NI_RREG(NI_PSR) & ~(PSR_OWN | PSR_RSQ));
672 KERNEL_UNLOCK_ONE(NULL);
673 }
674
675 /*
676 * Process an ioctl request.
677 */
678 int
niioctl(struct ifnet * ifp,u_long cmd,void * data)679 niioctl(struct ifnet *ifp, u_long cmd, void *data)
680 {
681 struct ni_softc *sc = ifp->if_softc;
682 struct ifaddr *ifa = (struct ifaddr *)data;
683 int s = splnet(), error = 0;
684
685 switch (cmd) {
686
687 case SIOCINITIFADDR:
688 ifp->if_flags |= IFF_UP;
689 switch (ifa->ifa_addr->sa_family) {
690 #ifdef INET
691 case AF_INET:
692 niinit(sc);
693 arp_ifinit(ifp, ifa);
694 break;
695 #endif
696 }
697 break;
698
699 case SIOCSIFFLAGS:
700 if ((error = ifioctl_common(ifp, cmd, data)) != 0)
701 break;
702 switch (ifp->if_flags & (IFF_UP | IFF_RUNNING)) {
703 case IFF_RUNNING:
704 /*
705 * If interface is marked down and it is running,
706 * stop it.
707 */
708 ifp->if_flags &= ~IFF_RUNNING;
709 ni_setup(sc);
710 break;
711 case IFF_UP:
712 /*
713 * If interface it marked up and it is stopped, then
714 * start it.
715 */
716 niinit(sc);
717 break;
718 case IFF_UP | IFF_RUNNING:
719 /*
720 * Send a new setup packet to match any new changes.
721 * (Like IFF_PROMISC etc)
722 */
723 ni_setup(sc);
724 break;
725 default:
726 break;
727 }
728 break;
729
730 case SIOCADDMULTI:
731 case SIOCDELMULTI:
732 /*
733 * Update our multicast list.
734 */
735 if ((error = ether_ioctl(ifp, cmd, data)) == ENETRESET) {
736 /*
737 * Multicast list has changed; set the hardware filter
738 * accordingly.
739 */
740 if (ifp->if_flags & IFF_RUNNING)
741 ni_setup(sc);
742 error = 0;
743 }
744 break;
745
746 default:
747 error = ether_ioctl(ifp, cmd, data);
748 break;
749 }
750 splx(s);
751 return error;
752 }
753
754 /*
755 * Add a receive buffer to the indicated descriptor.
756 */
757 int
ni_add_rxbuf(struct ni_softc * sc,struct ni_dg * data,int idx)758 ni_add_rxbuf(struct ni_softc *sc, struct ni_dg *data, int idx)
759 {
760 struct ni_bbd *bd = &bbd[idx];
761 struct mbuf *m;
762
763 MGETHDR(m, M_DONTWAIT, MT_DATA);
764 if (m == NULL)
765 return ENOBUFS;
766
767 MCLGET(m, M_DONTWAIT);
768 if ((m->m_flags & M_EXT) == 0) {
769 m_freem(m);
770 return ENOBUFS;
771 }
772
773 m->m_data += 2;
774 bd->nb_len = (m->m_ext.ext_size - 2);
775 bd->nb_pte = (long)kvtopte(m->m_ext.ext_buf);
776 bd->nb_status = 2 | NIBD_VALID;
777 bd->nb_key = 1;
778
779 data->bufs[0]._offset = 0;
780 data->bufs[0]._len = bd->nb_len;
781 data->bufs[0]._index = idx;
782 data->nd_cmdref = (long)m;
783
784 return 0;
785 }
786
787 /*
788 * Create setup packet and put in queue for sending.
789 */
790 void
ni_setup(struct ni_softc * sc)791 ni_setup(struct ni_softc *sc)
792 {
793 struct ethercom *ec = &sc->sc_ec;
794 struct ifnet *ifp = &sc->sc_if;
795 struct ni_msg *msg;
796 struct ni_ptdb *ptdb;
797 struct ether_multi *enm;
798 struct ether_multistep step;
799 int i, res;
800
801 msg = REMQHI(&fqb->nf_mforw);
802 if ((int)msg == Q_EMPTY)
803 return; /* What to do? */
804
805 ptdb = (struct ni_ptdb *)&msg->nm_text[0];
806 memset(ptdb, 0, sizeof(struct ni_ptdb));
807
808 msg->nm_opcode = BVP_MSG;
809 msg->nm_len = 18;
810 ptdb->np_index = 2; /* definition type index */
811 ptdb->np_fque = 2; /* Free queue */
812 if (ifp->if_flags & IFF_RUNNING) {
813 msg->nm_opcode2 = NI_STPTDB;
814 ptdb->np_type = ETHERTYPE_IP;
815 ptdb->np_flags = PTDB_UNKN | PTDB_BDC;
816 if (ifp->if_flags & IFF_PROMISC)
817 ptdb->np_flags |= PTDB_PROMISC;
818 memset(ptdb->np_mcast[0], 0xff, ETHER_ADDR_LEN); /* Broadcast */
819 ptdb->np_adrlen = 1;
820 msg->nm_len += 8;
821 ifp->if_flags &= ~IFF_ALLMULTI;
822 if ((ifp->if_flags & IFF_PROMISC) == 0) {
823 ETHER_LOCK(ec);
824 ETHER_FIRST_MULTI(step, ec, enm);
825 i = 1;
826 while (enm != NULL) {
827 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, 6)) {
828 ifp->if_flags |= IFF_ALLMULTI;
829 ptdb->np_flags |= PTDB_AMC;
830 break;
831 }
832 msg->nm_len += 8;
833 ptdb->np_adrlen++;
834 memcpy(ptdb->np_mcast[i++], enm->enm_addrlo,
835 ETHER_ADDR_LEN);
836 ETHER_NEXT_MULTI(step, enm);
837 }
838 ETHER_UNLOCK(ec);
839 }
840 } else
841 msg->nm_opcode2 = NI_CLPTDB;
842
843 res = INSQTI(msg, &gvp->nc_forw0);
844 if (res == Q_EMPTY) {
845 WAITREG(NI_PCR, PCR_OWN);
846 NI_WREG(NI_PCR, PCR_CMDQNE | PCR_CMDQ0 | PCR_OWN);
847 }
848 }
849
850 /*
851 * Check for dead transmit logic. Not uncommon.
852 */
853 void
nitimeout(struct ifnet * ifp)854 nitimeout(struct ifnet *ifp)
855 {
856 #if 0
857 struct ni_softc *sc = ifp->if_softc;
858
859 if (sc->sc_inq == 0)
860 return;
861
862 printf("%s: xmit logic died, resetting...\n", device_xname(sc->sc_dev));
863 /*
864 * Do a reset of interface, to get it going again.
865 * Will it work by just restart the transmit logic?
866 */
867 niinit(sc);
868 #endif
869 }
870
871 /*
872 * Shutdown hook. Make sure the interface is stopped at reboot.
873 */
874 void
ni_shutdown(void * arg)875 ni_shutdown(void *arg)
876 {
877 struct ni_softc *sc = arg;
878
879 WAITREG(NI_PCR, PCR_OWN);
880 NI_WREG(NI_PCR, PCR_OWN | PCR_SHUTDOWN);
881 WAITREG(NI_PCR, PCR_OWN);
882 WAITREG(NI_PSR, PSR_OWN);
883 }
884