1 /* $NetBSD: if_ni.c,v 1.37 2008/11/07 00:20:02 dyoung Exp $ */ 2 /* 3 * Copyright (c) 2000 Ludd, University of Lule}, Sweden. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. All advertising materials mentioning features or use of this software 14 * must display the following acknowledgement: 15 * This product includes software developed at Ludd, University of 16 * Lule}, Sweden and its contributors. 17 * 4. The name of the author may not be used to endorse or promote products 18 * derived from this software without specific prior written permission 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 21 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 22 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 23 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 24 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 25 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 26 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 27 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 28 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 29 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 /* 33 * Driver for DEBNA/DEBNT/DEBNK ethernet cards. 34 * Things that is still to do: 35 * Collect statistics. 36 */ 37 38 #include <sys/cdefs.h> 39 __KERNEL_RCSID(0, "$NetBSD: if_ni.c,v 1.37 2008/11/07 00:20:02 dyoung Exp $"); 40 41 #include "opt_inet.h" 42 #include "bpfilter.h" 43 44 #include <sys/param.h> 45 #include <sys/mbuf.h> 46 #include <sys/socket.h> 47 #include <sys/device.h> 48 #include <sys/systm.h> 49 #include <sys/sockio.h> 50 #include <sys/sched.h> 51 52 #include <uvm/uvm_extern.h> 53 54 #include <net/if.h> 55 #include <net/if_ether.h> 56 #include <net/if_dl.h> 57 58 #include <netinet/in.h> 59 #include <netinet/if_inarp.h> 60 61 #if NBPFILTER > 0 62 #include <net/bpf.h> 63 #include <net/bpfdesc.h> 64 #endif 65 66 #include <sys/bus.h> 67 #ifdef __vax__ 68 #include <machine/mtpr.h> 69 #include <machine/pte.h> 70 #endif 71 72 #include <dev/bi/bireg.h> 73 #include <dev/bi/bivar.h> 74 75 #include "ioconf.h" 76 #include "locators.h" 77 78 /* 79 * Tunable buffer parameters. Good idea to have them as power of 8; then 80 * they will fit into a logical VAX page. 81 */ 82 #define NMSGBUF 8 /* Message queue entries */ 83 #define NTXBUF 16 /* Transmit queue entries */ 84 #define NTXFRAGS 8 /* Number of transmit buffer fragments */ 85 #define NRXBUF 24 /* Receive queue entries */ 86 #define NBDESCS (NTXBUF * NTXFRAGS + NRXBUF) 87 #define NQUEUES 3 /* RX + TX + MSG */ 88 #define PKTHDR 18 /* Length of (control) packet header */ 89 #define RXADD 18 /* Additional length of receive datagram */ 90 #define TXADD (10+NTXFRAGS*8) /* "" transmit "" */ 91 #define MSGADD 134 /* "" message "" */ 92 93 #include <dev/bi/if_nireg.h> /* XXX include earlier */ 94 95 /* 96 * Macros for (most cases of) insqti/remqhi. 97 * Retry NRETRIES times to do the operation, if it still fails assume 98 * a lost lock and panic. 99 */ 100 #define NRETRIES 100 101 #define INSQTI(e, h) ({ \ 102 int ret = 0, __i; \ 103 for (__i = 0; __i < NRETRIES; __i++) { \ 104 if ((ret = insqti(e, h)) != ILCK_FAILED) \ 105 break; \ 106 } \ 107 if (__i == NRETRIES) \ 108 panic("ni: insqti failed at %d", __LINE__); \ 109 ret; \ 110 }) 111 #define REMQHI(h) ({ \ 112 int __i; void *ret = NULL; \ 113 for (__i = 0; __i < NRETRIES; __i++) { \ 114 if ((ret = remqhi(h)) != (void *)ILCK_FAILED) \ 115 break; \ 116 } \ 117 if (__i == NRETRIES) \ 118 panic("ni: remqhi failed at %d", __LINE__); \ 119 ret; \ 120 }) 121 122 123 #define nipqb (&sc->sc_gvppqb->nc_pqb) 124 #define gvp sc->sc_gvppqb 125 #define fqb sc->sc_fqb 126 #define bbd sc->sc_bbd 127 128 struct ni_softc { 129 device_t sc_dev; /* Configuration common part */ 130 struct evcnt sc_intrcnt; /* Interrupt coounting */ 131 struct ethercom sc_ec; /* Ethernet common part */ 132 #define sc_if sc_ec.ec_if /* network-visible interface */ 133 bus_space_tag_t sc_iot; 134 bus_addr_t sc_ioh; 135 bus_dma_tag_t sc_dmat; 136 struct ni_gvppqb *sc_gvppqb; /* Port queue block */ 137 struct ni_gvppqb *sc_pgvppqb; /* Phys address of PQB */ 138 struct ni_fqb *sc_fqb; /* Free Queue block */ 139 struct ni_bbd *sc_bbd; /* Buffer descriptors */ 140 u_int8_t sc_enaddr[ETHER_ADDR_LEN]; 141 }; 142 143 static int nimatch(device_t, cfdata_t, void *); 144 static void niattach(device_t, device_t, void *); 145 static void niinit(struct ni_softc *); 146 static void nistart(struct ifnet *); 147 static void niintr(void *); 148 static int niioctl(struct ifnet *, u_long, void *); 149 static int ni_add_rxbuf(struct ni_softc *, struct ni_dg *, int); 150 static void ni_setup(struct ni_softc *); 151 static void nitimeout(struct ifnet *); 152 static void ni_shutdown(void *); 153 static void ni_getpgs(struct ni_softc *sc, int size, void **v, paddr_t *p); 154 static int failtest(struct ni_softc *, int, int, int, const char *); 155 156 volatile int endwait, retry; /* Used during autoconfig */ 157 158 CFATTACH_DECL_NEW(ni, sizeof(struct ni_softc), 159 nimatch, niattach, NULL, NULL); 160 161 #define NI_WREG(csr, val) \ 162 bus_space_write_4(sc->sc_iot, sc->sc_ioh, csr, val) 163 #define NI_RREG(csr) \ 164 bus_space_read_4(sc->sc_iot, sc->sc_ioh, csr) 165 166 #define WAITREG(csr,val) while (NI_RREG(csr) & val); 167 /* 168 * Check for present device. 169 */ 170 static int 171 nimatch(device_t parent, cfdata_t cf, void *aux) 172 { 173 struct bi_attach_args *ba = aux; 174 u_short type; 175 176 type = bus_space_read_2(ba->ba_iot, ba->ba_ioh, BIREG_DTYPE); 177 if (type != BIDT_DEBNA && type != BIDT_DEBNT && type != BIDT_DEBNK) 178 return 0; 179 180 if (cf->cf_loc[BICF_NODE] != BICF_NODE_DEFAULT && 181 cf->cf_loc[BICF_NODE] != ba->ba_nodenr) 182 return 0; 183 184 return 1; 185 } 186 187 /* 188 * Allocate a bunch of descriptor-safe memory. 189 * We need to get the structures from the beginning of its own pages. 190 */ 191 static void 192 ni_getpgs(struct ni_softc *sc, int size, void **v, paddr_t *p) 193 { 194 bus_dma_segment_t seg; 195 int nsegs, error; 196 197 if ((error = bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0, &seg, 1, 198 &nsegs, BUS_DMA_NOWAIT)) != 0) 199 panic(" unable to allocate memory: error %d", error); 200 201 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, nsegs, size, v, 202 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) 203 panic(" unable to map memory: error %d", error); 204 205 if (p) 206 *p = seg.ds_addr; 207 memset(*v, 0, size); 208 } 209 210 static int 211 failtest(struct ni_softc *sc, int reg, int mask, int test, const char *str) 212 { 213 int i = 100; 214 215 do { 216 DELAY(100000); 217 } while (((NI_RREG(reg) & mask) != test) && --i); 218 219 if (i == 0) { 220 printf("%s: %s\n", device_xname(sc->sc_dev), str); 221 return 1; 222 } 223 return 0; 224 } 225 226 227 /* 228 * Interface exists: make available by filling in network interface 229 * record. System will initialize the interface when it is ready 230 * to accept packets. 231 */ 232 static void 233 niattach(device_t parent, device_t self, void *aux) 234 { 235 struct bi_attach_args *ba = aux; 236 struct ni_softc *sc = device_private(self); 237 struct ifnet *ifp = (struct ifnet *)&sc->sc_if; 238 struct ni_msg *msg; 239 struct ni_ptdb *ptdb; 240 void *va; 241 int i, j, s, res; 242 u_short type; 243 244 sc->sc_dev = self; 245 246 type = bus_space_read_2(ba->ba_iot, ba->ba_ioh, BIREG_DTYPE); 247 printf(": DEBN%c\n", type == BIDT_DEBNA ? 'A' : type == BIDT_DEBNT ? 248 'T' : 'K'); 249 sc->sc_iot = ba->ba_iot; 250 sc->sc_ioh = ba->ba_ioh; 251 sc->sc_dmat = ba->ba_dmat; 252 253 bi_intr_establish(ba->ba_icookie, ba->ba_ivec, 254 niintr, sc, &sc->sc_intrcnt); 255 evcnt_attach_dynamic(&sc->sc_intrcnt, EVCNT_TYPE_INTR, NULL, 256 device_xname(self), "intr"); 257 258 ni_getpgs(sc, sizeof(struct ni_gvppqb), (void **)&sc->sc_gvppqb, 259 (paddr_t *)&sc->sc_pgvppqb); 260 ni_getpgs(sc, sizeof(struct ni_fqb), (void **)&sc->sc_fqb, 0); 261 ni_getpgs(sc, NBDESCS * sizeof(struct ni_bbd), 262 (void **)&sc->sc_bbd, 0); 263 /* 264 * Zero the newly allocated memory. 265 */ 266 267 nipqb->np_veclvl = (ba->ba_ivec << 2) + 2; 268 nipqb->np_node = ba->ba_intcpu; 269 nipqb->np_vpqb = (u_int32_t)gvp; 270 #ifdef __vax__ 271 nipqb->np_spt = nipqb->np_gpt = mfpr(PR_SBR); 272 nipqb->np_sptlen = nipqb->np_gptlen = mfpr(PR_SLR); 273 #else 274 #error Must fix support for non-vax. 275 #endif 276 nipqb->np_bvplvl = 1; 277 nipqb->np_vfqb = (u_int32_t)fqb; 278 nipqb->np_vbdt = (u_int32_t)bbd; 279 nipqb->np_nbdr = NBDESCS; 280 281 /* Free queue block */ 282 nipqb->np_freeq = NQUEUES; 283 fqb->nf_mlen = PKTHDR+MSGADD; 284 fqb->nf_dlen = PKTHDR+TXADD; 285 fqb->nf_rlen = PKTHDR+RXADD; 286 287 strlcpy(ifp->if_xname, device_xname(self), IFNAMSIZ); 288 ifp->if_softc = sc; 289 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 290 ifp->if_start = nistart; 291 ifp->if_ioctl = niioctl; 292 ifp->if_watchdog = nitimeout; 293 IFQ_SET_READY(&ifp->if_snd); 294 295 /* 296 * Start init sequence. 297 */ 298 299 /* Reset the node */ 300 NI_WREG(BIREG_VAXBICSR, NI_RREG(BIREG_VAXBICSR) | BICSR_NRST); 301 DELAY(500000); 302 i = 20; 303 while ((NI_RREG(BIREG_VAXBICSR) & BICSR_BROKE) && --i) 304 DELAY(500000); 305 if (i == 0) { 306 printf("%s: BROKE bit set after reset\n", device_xname(self)); 307 return; 308 } 309 310 /* Check state */ 311 if (failtest(sc, NI_PSR, PSR_STATE, PSR_UNDEF, "not undefined state")) 312 return; 313 314 /* Clear owner bits */ 315 NI_WREG(NI_PSR, NI_RREG(NI_PSR) & ~PSR_OWN); 316 NI_WREG(NI_PCR, NI_RREG(NI_PCR) & ~PCR_OWN); 317 318 /* kick off init */ 319 NI_WREG(NI_PCR, (u_int32_t)sc->sc_pgvppqb | PCR_INIT | PCR_OWN); 320 while (NI_RREG(NI_PCR) & PCR_OWN) 321 DELAY(100000); 322 323 /* Check state */ 324 if (failtest(sc, NI_PSR, PSR_INITED, PSR_INITED, "failed initialize")) 325 return; 326 327 NI_WREG(NI_PSR, NI_RREG(NI_PSR) & ~PSR_OWN); 328 329 WAITREG(NI_PCR, PCR_OWN); 330 NI_WREG(NI_PCR, PCR_OWN|PCR_ENABLE); 331 WAITREG(NI_PCR, PCR_OWN); 332 WAITREG(NI_PSR, PSR_OWN); 333 334 /* Check state */ 335 if (failtest(sc, NI_PSR, PSR_STATE, PSR_ENABLED, "failed enable")) 336 return; 337 338 NI_WREG(NI_PSR, NI_RREG(NI_PSR) & ~PSR_OWN); 339 340 /* 341 * The message queue packets must be located on the beginning 342 * of a page. A VAX page is 512 bytes, but it clusters 8 pages. 343 * This knowledge is used here when allocating pages. 344 * !!! How should this be done on MIPS and Alpha??? !!! 345 */ 346 #if NBPG < 4096 347 #error pagesize too small 348 #endif 349 s = splvm(); 350 /* Set up message free queue */ 351 ni_getpgs(sc, NMSGBUF * 512, &va, 0); 352 for (i = 0; i < NMSGBUF; i++) { 353 msg = (void *)((char *)va + i * 512); 354 res = INSQTI(msg, &fqb->nf_mforw); 355 } 356 WAITREG(NI_PCR, PCR_OWN); 357 NI_WREG(NI_PCR, PCR_FREEQNE|PCR_MFREEQ|PCR_OWN); 358 WAITREG(NI_PCR, PCR_OWN); 359 360 /* Set up xmit queue */ 361 ni_getpgs(sc, NTXBUF * 512, &va, 0); 362 for (i = 0; i < NTXBUF; i++) { 363 struct ni_dg *data; 364 365 data = (void *)((char *)va + i * 512); 366 data->nd_status = 0; 367 data->nd_len = TXADD; 368 data->nd_ptdbidx = 1; 369 data->nd_opcode = BVP_DGRAM; 370 for (j = 0; j < NTXFRAGS; j++) { 371 data->bufs[j]._offset = 0; 372 data->bufs[j]._key = 1; 373 bbd[i * NTXFRAGS + j].nb_key = 1; 374 bbd[i * NTXFRAGS + j].nb_status = 0; 375 data->bufs[j]._index = i * NTXFRAGS + j; 376 } 377 res = INSQTI(data, &fqb->nf_dforw); 378 } 379 WAITREG(NI_PCR, PCR_OWN); 380 NI_WREG(NI_PCR, PCR_FREEQNE|PCR_DFREEQ|PCR_OWN); 381 WAITREG(NI_PCR, PCR_OWN); 382 383 /* recv buffers */ 384 ni_getpgs(sc, NRXBUF * 512, &va, 0); 385 for (i = 0; i < NRXBUF; i++) { 386 struct ni_dg *data; 387 int idx; 388 389 data = (void *)((char *)va + i * 512); 390 data->nd_len = RXADD; 391 data->nd_opcode = BVP_DGRAMRX; 392 data->nd_ptdbidx = 2; 393 data->bufs[0]._key = 1; 394 395 idx = NTXBUF * NTXFRAGS + i; 396 if (ni_add_rxbuf(sc, data, idx)) 397 panic("niattach: ni_add_rxbuf: out of mbufs"); 398 399 res = INSQTI(data, &fqb->nf_rforw); 400 } 401 WAITREG(NI_PCR, PCR_OWN); 402 NI_WREG(NI_PCR, PCR_FREEQNE|PCR_RFREEQ|PCR_OWN); 403 WAITREG(NI_PCR, PCR_OWN); 404 405 splx(s); 406 407 /* Set initial parameters */ 408 msg = REMQHI(&fqb->nf_mforw); 409 410 msg->nm_opcode = BVP_MSG; 411 msg->nm_status = 0; 412 msg->nm_len = sizeof(struct ni_param) + 6; 413 msg->nm_opcode2 = NI_WPARAM; 414 ((struct ni_param *)&msg->nm_text[0])->np_flags = NP_PAD; 415 416 endwait = retry = 0; 417 res = INSQTI(msg, &gvp->nc_forw0); 418 419 retry: WAITREG(NI_PCR, PCR_OWN); 420 NI_WREG(NI_PCR, PCR_CMDQNE|PCR_CMDQ0|PCR_OWN); 421 WAITREG(NI_PCR, PCR_OWN); 422 i = 1000; 423 while (endwait == 0 && --i) 424 DELAY(10000); 425 426 if (endwait == 0) { 427 if (++retry < 3) 428 goto retry; 429 printf("%s: no response to set params\n", device_xname(self)); 430 return; 431 } 432 433 /* Clear counters */ 434 msg = REMQHI(&fqb->nf_mforw); 435 msg->nm_opcode = BVP_MSG; 436 msg->nm_status = 0; 437 msg->nm_len = sizeof(struct ni_param) + 6; 438 msg->nm_opcode2 = NI_RCCNTR; 439 440 res = INSQTI(msg, &gvp->nc_forw0); 441 442 WAITREG(NI_PCR, PCR_OWN); 443 NI_WREG(NI_PCR, PCR_CMDQNE|PCR_CMDQ0|PCR_OWN); 444 WAITREG(NI_PCR, PCR_OWN); 445 446 /* Enable transmit logic */ 447 msg = REMQHI(&fqb->nf_mforw); 448 449 msg->nm_opcode = BVP_MSG; 450 msg->nm_status = 0; 451 msg->nm_len = 18; 452 msg->nm_opcode2 = NI_STPTDB; 453 ptdb = (struct ni_ptdb *)&msg->nm_text[0]; 454 memset(ptdb, 0, sizeof(struct ni_ptdb)); 455 ptdb->np_index = 1; 456 ptdb->np_fque = 1; 457 458 res = INSQTI(msg, &gvp->nc_forw0); 459 460 WAITREG(NI_PCR, PCR_OWN); 461 NI_WREG(NI_PCR, PCR_CMDQNE|PCR_CMDQ0|PCR_OWN); 462 WAITREG(NI_PCR, PCR_OWN); 463 464 /* Wait for everything to finish */ 465 WAITREG(NI_PSR, PSR_OWN); 466 467 printf("%s: hardware address %s\n", device_xname(self), 468 ether_sprintf(sc->sc_enaddr)); 469 470 /* 471 * Attach the interface. 472 */ 473 if_attach(ifp); 474 ether_ifattach(ifp, sc->sc_enaddr); 475 if (shutdownhook_establish(ni_shutdown, sc) == 0) 476 aprint_error_dev(self, "WARNING: unable to establish shutdown hook\n"); 477 } 478 479 /* 480 * Initialization of interface. 481 */ 482 void 483 niinit(struct ni_softc *sc) 484 { 485 struct ifnet *ifp = &sc->sc_if; 486 487 /* 488 * Set flags (so ni_setup() do the right thing). 489 */ 490 ifp->if_flags |= IFF_RUNNING; 491 ifp->if_flags &= ~IFF_OACTIVE; 492 493 /* 494 * Send setup messages so that the rx/tx locic starts. 495 */ 496 ni_setup(sc); 497 498 } 499 500 /* 501 * Start output on interface. 502 */ 503 void 504 nistart(struct ifnet *ifp) 505 { 506 struct ni_softc *sc = ifp->if_softc; 507 struct ni_dg *data; 508 struct ni_bbd *bdp; 509 struct mbuf *m, *m0; 510 int i, cnt, res, mlen; 511 512 if (ifp->if_flags & IFF_OACTIVE) 513 return; 514 #ifdef DEBUG 515 if (ifp->if_flags & IFF_DEBUG) 516 printf("%s: nistart\n", device_xname(sc->sc_dev)); 517 #endif 518 519 while (fqb->nf_dforw) { 520 IFQ_POLL(&ifp->if_snd, m); 521 if (m == 0) 522 break; 523 524 data = REMQHI(&fqb->nf_dforw); 525 if ((int)data == Q_EMPTY) { 526 ifp->if_flags |= IFF_OACTIVE; 527 break; 528 } 529 530 IFQ_DEQUEUE(&ifp->if_snd, m); 531 532 /* 533 * Count number of mbufs in chain. 534 * Always do DMA directly from mbufs, therefore the transmit 535 * ring is really big. 536 */ 537 for (m0 = m, cnt = 0; m0; m0 = m0->m_next) 538 if (m0->m_len) 539 cnt++; 540 if (cnt > NTXFRAGS) 541 panic("nistart"); /* XXX */ 542 543 #if NBPFILTER > 0 544 if (ifp->if_bpf) 545 bpf_mtap(ifp->if_bpf, m); 546 #endif 547 bdp = &bbd[(data->bufs[0]._index & 0x7fff)]; 548 for (m0 = m, i = 0, mlen = 0; m0; m0 = m0->m_next) { 549 if (m0->m_len == 0) 550 continue; 551 bdp->nb_status = (mtod(m0, u_int32_t) & NIBD_OFFSET) | 552 NIBD_VALID; 553 bdp->nb_pte = (u_int32_t)kvtopte(mtod(m0, void *)); 554 bdp->nb_len = m0->m_len; 555 data->bufs[i]._offset = 0; 556 data->bufs[i]._len = bdp->nb_len; 557 data->bufs[i]._index |= NIDG_CHAIN; 558 mlen += bdp->nb_len; 559 bdp++; 560 i++; 561 } 562 data->nd_opcode = BVP_DGRAM; 563 data->nd_pad3 = 1; 564 data->nd_ptdbidx = 1; 565 data->nd_len = 10 + i * 8; 566 data->bufs[i - 1]._index &= ~NIDG_CHAIN; 567 data->nd_cmdref = (u_int32_t)m; 568 #ifdef DEBUG 569 if (ifp->if_flags & IFF_DEBUG) 570 printf("%s: sending %d bytes (%d segments)\n", 571 device_xname(sc->sc_dev), mlen, i); 572 #endif 573 574 res = INSQTI(data, &gvp->nc_forw0); 575 if (res == Q_EMPTY) { 576 WAITREG(NI_PCR, PCR_OWN); 577 NI_WREG(NI_PCR, PCR_CMDQNE|PCR_CMDQ0|PCR_OWN); 578 } 579 } 580 } 581 582 void 583 niintr(void *arg) 584 { 585 struct ni_softc *sc = arg; 586 struct ni_dg *data; 587 struct ni_msg *msg; 588 struct ifnet *ifp = &sc->sc_if; 589 struct ni_bbd *bd; 590 struct mbuf *m; 591 int idx, res; 592 593 if ((NI_RREG(NI_PSR) & PSR_STATE) != PSR_ENABLED) 594 return; 595 596 if ((NI_RREG(NI_PSR) & PSR_ERR)) 597 printf("%s: PSR %x\n", device_xname(sc->sc_dev), NI_RREG(NI_PSR)); 598 599 KERNEL_LOCK(1, NULL); 600 /* Got any response packets? */ 601 while ((NI_RREG(NI_PSR) & PSR_RSQ) && (data = REMQHI(&gvp->nc_forwr))) { 602 603 switch (data->nd_opcode) { 604 case BVP_DGRAMRX: /* Receive datagram */ 605 idx = data->bufs[0]._index; 606 bd = &bbd[idx]; 607 m = (void *)data->nd_cmdref; 608 m->m_pkthdr.len = m->m_len = 609 data->bufs[0]._len - ETHER_CRC_LEN; 610 m->m_pkthdr.rcvif = ifp; 611 if (ni_add_rxbuf(sc, data, idx)) { 612 bd->nb_len = (m->m_ext.ext_size - 2); 613 bd->nb_pte = 614 (long)kvtopte(m->m_ext.ext_buf); 615 bd->nb_status = 2 | NIBD_VALID; 616 bd->nb_key = 1; 617 } 618 data->nd_len = RXADD; 619 data->nd_status = 0; 620 res = INSQTI(data, &fqb->nf_rforw); 621 if (res == Q_EMPTY) { 622 WAITREG(NI_PCR, PCR_OWN); 623 NI_WREG(NI_PCR, PCR_FREEQNE|PCR_RFREEQ|PCR_OWN); 624 } 625 if (m == (void *)data->nd_cmdref) 626 break; /* Out of mbufs */ 627 628 #if NBPFILTER > 0 629 if (ifp->if_bpf) 630 bpf_mtap(ifp->if_bpf, m); 631 #endif 632 (*ifp->if_input)(ifp, m); 633 break; 634 635 case BVP_DGRAM: 636 m = (struct mbuf *)data->nd_cmdref; 637 ifp->if_flags &= ~IFF_OACTIVE; 638 m_freem(m); 639 res = INSQTI(data, &fqb->nf_dforw); 640 if (res == Q_EMPTY) { 641 WAITREG(NI_PCR, PCR_OWN); 642 NI_WREG(NI_PCR, PCR_FREEQNE|PCR_DFREEQ|PCR_OWN); 643 } 644 break; 645 646 case BVP_MSGRX: 647 msg = (struct ni_msg *)data; 648 switch (msg->nm_opcode2) { 649 case NI_WPARAM: 650 memcpy(sc->sc_enaddr, ((struct ni_param *)&msg->nm_text[0])->np_dpa, ETHER_ADDR_LEN); 651 endwait = 1; 652 break; 653 654 case NI_RCCNTR: 655 case NI_CLPTDB: 656 case NI_STPTDB: 657 break; 658 659 default: 660 printf("Unkn resp %d\n", 661 msg->nm_opcode2); 662 break; 663 } 664 res = INSQTI(data, &fqb->nf_mforw); 665 if (res == Q_EMPTY) { 666 WAITREG(NI_PCR, PCR_OWN); 667 NI_WREG(NI_PCR, PCR_FREEQNE|PCR_MFREEQ|PCR_OWN); 668 } 669 break; 670 671 default: 672 printf("Unknown opcode %d\n", data->nd_opcode); 673 res = INSQTI(data, &fqb->nf_mforw); 674 if (res == Q_EMPTY) { 675 WAITREG(NI_PCR, PCR_OWN); 676 NI_WREG(NI_PCR, PCR_FREEQNE|PCR_MFREEQ|PCR_OWN); 677 } 678 } 679 } 680 681 /* Try to kick on the start routine again */ 682 nistart(ifp); 683 684 NI_WREG(NI_PSR, NI_RREG(NI_PSR) & ~(PSR_OWN|PSR_RSQ)); 685 KERNEL_UNLOCK_ONE(NULL); 686 } 687 688 /* 689 * Process an ioctl request. 690 */ 691 int 692 niioctl(struct ifnet *ifp, u_long cmd, void *data) 693 { 694 struct ni_softc *sc = ifp->if_softc; 695 struct ifaddr *ifa = (struct ifaddr *)data; 696 int s = splnet(), error = 0; 697 698 switch (cmd) { 699 700 case SIOCINITIFADDR: 701 ifp->if_flags |= IFF_UP; 702 switch(ifa->ifa_addr->sa_family) { 703 #ifdef INET 704 case AF_INET: 705 niinit(sc); 706 arp_ifinit(ifp, ifa); 707 break; 708 #endif 709 } 710 break; 711 712 case SIOCSIFFLAGS: 713 if ((error = ifioctl_common(ifp, cmd, data)) != 0) 714 break; 715 switch (ifp->if_flags & (IFF_UP|IFF_RUNNING)) { 716 case IFF_RUNNING: 717 /* 718 * If interface is marked down and it is running, 719 * stop it. 720 */ 721 ifp->if_flags &= ~IFF_RUNNING; 722 ni_setup(sc); 723 break; 724 case IFF_UP: 725 /* 726 * If interface it marked up and it is stopped, then 727 * start it. 728 */ 729 niinit(sc); 730 break; 731 case IFF_UP|IFF_RUNNING: 732 /* 733 * Send a new setup packet to match any new changes. 734 * (Like IFF_PROMISC etc) 735 */ 736 ni_setup(sc); 737 break; 738 default: 739 break; 740 } 741 break; 742 743 case SIOCADDMULTI: 744 case SIOCDELMULTI: 745 /* 746 * Update our multicast list. 747 */ 748 if ((error = ether_ioctl(ifp, cmd, data)) == ENETRESET) { 749 /* 750 * Multicast list has changed; set the hardware filter 751 * accordingly. 752 */ 753 if (ifp->if_flags & IFF_RUNNING) 754 ni_setup(sc); 755 error = 0; 756 } 757 break; 758 759 default: 760 error = ether_ioctl(ifp, cmd, data); 761 break; 762 } 763 splx(s); 764 return (error); 765 } 766 767 /* 768 * Add a receive buffer to the indicated descriptor. 769 */ 770 int 771 ni_add_rxbuf(struct ni_softc *sc, struct ni_dg *data, int idx) 772 { 773 struct ni_bbd *bd = &bbd[idx]; 774 struct mbuf *m; 775 776 MGETHDR(m, M_DONTWAIT, MT_DATA); 777 if (m == NULL) 778 return (ENOBUFS); 779 780 MCLGET(m, M_DONTWAIT); 781 if ((m->m_flags & M_EXT) == 0) { 782 m_freem(m); 783 return (ENOBUFS); 784 } 785 786 m->m_data += 2; 787 bd->nb_len = (m->m_ext.ext_size - 2); 788 bd->nb_pte = (long)kvtopte(m->m_ext.ext_buf); 789 bd->nb_status = 2 | NIBD_VALID; 790 bd->nb_key = 1; 791 792 data->bufs[0]._offset = 0; 793 data->bufs[0]._len = bd->nb_len; 794 data->bufs[0]._index = idx; 795 data->nd_cmdref = (long)m; 796 797 return (0); 798 } 799 800 /* 801 * Create setup packet and put in queue for sending. 802 */ 803 void 804 ni_setup(struct ni_softc *sc) 805 { 806 struct ifnet *ifp = &sc->sc_if; 807 struct ni_msg *msg; 808 struct ni_ptdb *ptdb; 809 struct ether_multi *enm; 810 struct ether_multistep step; 811 int i, res; 812 813 msg = REMQHI(&fqb->nf_mforw); 814 if ((int)msg == Q_EMPTY) 815 return; /* What to do? */ 816 817 ptdb = (struct ni_ptdb *)&msg->nm_text[0]; 818 memset(ptdb, 0, sizeof(struct ni_ptdb)); 819 820 msg->nm_opcode = BVP_MSG; 821 msg->nm_len = 18; 822 ptdb->np_index = 2; /* definition type index */ 823 ptdb->np_fque = 2; /* Free queue */ 824 if (ifp->if_flags & IFF_RUNNING) { 825 msg->nm_opcode2 = NI_STPTDB; 826 ptdb->np_type = ETHERTYPE_IP; 827 ptdb->np_flags = PTDB_UNKN|PTDB_BDC; 828 if (ifp->if_flags & IFF_PROMISC) 829 ptdb->np_flags |= PTDB_PROMISC; 830 memset(ptdb->np_mcast[0], 0xff, ETHER_ADDR_LEN); /* Broadcast */ 831 ptdb->np_adrlen = 1; 832 msg->nm_len += 8; 833 ifp->if_flags &= ~IFF_ALLMULTI; 834 if ((ifp->if_flags & IFF_PROMISC) == 0) { 835 ETHER_FIRST_MULTI(step, &sc->sc_ec, enm); 836 i = 1; 837 while (enm != NULL) { 838 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, 6)) { 839 ifp->if_flags |= IFF_ALLMULTI; 840 ptdb->np_flags |= PTDB_AMC; 841 break; 842 } 843 msg->nm_len += 8; 844 ptdb->np_adrlen++; 845 memcpy(ptdb->np_mcast[i++], enm->enm_addrlo, 846 ETHER_ADDR_LEN); 847 ETHER_NEXT_MULTI(step, enm); 848 } 849 } 850 } else 851 msg->nm_opcode2 = NI_CLPTDB; 852 853 res = INSQTI(msg, &gvp->nc_forw0); 854 if (res == Q_EMPTY) { 855 WAITREG(NI_PCR, PCR_OWN); 856 NI_WREG(NI_PCR, PCR_CMDQNE|PCR_CMDQ0|PCR_OWN); 857 } 858 } 859 860 /* 861 * Check for dead transmit logic. Not uncommon. 862 */ 863 void 864 nitimeout(struct ifnet *ifp) 865 { 866 #if 0 867 struct ni_softc *sc = ifp->if_softc; 868 869 if (sc->sc_inq == 0) 870 return; 871 872 printf("%s: xmit logic died, resetting...\n", device_xname(sc->sc_dev)); 873 /* 874 * Do a reset of interface, to get it going again. 875 * Will it work by just restart the transmit logic? 876 */ 877 niinit(sc); 878 #endif 879 } 880 881 /* 882 * Shutdown hook. Make sure the interface is stopped at reboot. 883 */ 884 void 885 ni_shutdown(void *arg) 886 { 887 struct ni_softc *sc = arg; 888 889 WAITREG(NI_PCR, PCR_OWN); 890 NI_WREG(NI_PCR, PCR_OWN|PCR_SHUTDOWN); 891 WAITREG(NI_PCR, PCR_OWN); 892 WAITREG(NI_PSR, PSR_OWN); 893 } 894