1 /* $NetBSD: if_ie.c,v 1.2 1994/12/15 21:08:06 gwr Exp $ */ 2 3 /*- 4 * Copyright (c) 1993, 1994 Charles Hannum. 5 * Copyright (c) 1992, 1993, University of Vermont and State 6 * Agricultural College. 7 * Copyright (c) 1992, 1993, Garrett A. Wollman. 8 * 9 * Portions: 10 * Copyright (c) 1990, 1991, William F. Jolitz 11 * Copyright (c) 1990, The Regents of the University of California 12 * 13 * All rights reserved. 14 * 15 * Redistribution and use in source and binary forms, with or without 16 * modification, are permitted provided that the following conditions 17 * are met: 18 * 1. Redistributions of source code must retain the above copyright 19 * notice, this list of conditions and the following disclaimer. 20 * 2. Redistributions in binary form must reproduce the above copyright 21 * notice, this list of conditions and the following disclaimer in the 22 * documentation and/or other materials provided with the distribution. 23 * 3. All advertising materials mentioning features or use of this software 24 * must display the following acknowledgement: 25 * This product includes software developed by Charles Hannum, by the 26 * University of Vermont and State Agricultural College and Garrett A. 27 * Wollman, by William F. Jolitz, and by the University of California, 28 * Berkeley, Lawrence Berkeley Laboratory, and its contributors. 29 * 4. Neither the names of the Universities nor the names of the authors 30 * may be used to endorse or promote products derived from this software 31 * without specific prior written permission. 32 * 33 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 34 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 35 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 36 * ARE DISCLAIMED. IN NO EVENT SHALL THE UNIVERSITY OR AUTHORS BE LIABLE 37 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 38 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 39 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 40 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 41 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 42 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 43 * SUCH DAMAGE. 44 */ 45 46 /* 47 * Intel 82586 Ethernet chip 48 * Register, bit, and structure definitions. 49 * 50 * Original StarLAN driver written by Garrett Wollman with reference to the 51 * Clarkson Packet Driver code for this chip written by Russ Nelson and others. 52 * 53 * BPF support code taken from hpdev/if_le.c, supplied with tcpdump. 54 * 55 * 3C507 support is loosely based on code donated to NetBSD by Rafal Boni. 56 * 57 * Majorly cleaned up and 3C507 code merged by Charles Hannum. 58 * 59 * Converted to SUN ie driver by Charles D. Cranor, October 1994. 60 */ 61 62 /* 63 * The i82586 is a very painful chip, found in sun3's, sun-4/100's 64 * sun-4/200's, and VME based suns. The byte order is all wrong for a 65 * SUN, making life difficult. Programming this chip is mostly the same, 66 * but certain details differ from system to system. This driver is 67 * written so that different "ie" interfaces can be controled by the same 68 * driver. 69 */ 70 71 /* 72 Mode of operation: 73 74 We run the 82586 in a standard Ethernet mode. We keep NFRAMES 75 received frame descriptors around for the receiver to use, and 76 NRXBUF associated receive buffer descriptors, both in a circular 77 list. Whenever a frame is received, we rotate both lists as 78 necessary. (The 586 treats both lists as a simple queue.) We also 79 keep a transmit command around so that packets can be sent off 80 quickly. 81 82 We configure the adapter in AL-LOC = 1 mode, which means that the 83 Ethernet/802.3 MAC header is placed at the beginning of the receive 84 buffer rather than being split off into various fields in the RFD. 85 This also means that we must include this header in the transmit 86 buffer as well. 87 88 By convention, all transmit commands, and only transmit commands, 89 shall have the I (IE_CMD_INTR) bit set in the command. This way, 90 when an interrupt arrives at ieintr(), it is immediately possible 91 to tell what precisely caused it. ANY OTHER command-sending 92 routines should run at splimp(), and should post an acknowledgement 93 to every interrupt they generate. 94 */ 95 96 #include "bpfilter.h" 97 98 #include <sys/param.h> 99 #include <sys/systm.h> 100 #include <sys/device.h> 101 #include <sys/errno.h> 102 #include <sys/mbuf.h> 103 #include <sys/buf.h> 104 #include <sys/protosw.h> 105 #include <sys/socket.h> 106 #include <sys/ioctl.h> 107 #include <sys/syslog.h> 108 109 #include <net/if.h> 110 #include <net/if_types.h> 111 #include <net/if_dl.h> 112 #include <net/netisr.h> 113 #include <net/route.h> 114 115 #if NBPFILTER > 0 116 #include <net/bpf.h> 117 #include <net/bpfdesc.h> 118 #endif 119 120 #ifdef INET 121 #include <netinet/in.h> 122 #include <netinet/in_systm.h> 123 #include <netinet/in_var.h> 124 #include <netinet/ip.h> 125 #include <netinet/if_ether.h> 126 #endif 127 128 #ifdef NS 129 #include <netns/ns.h> 130 #include <netns/ns_if.h> 131 #endif 132 133 #include <vm/vm.h> 134 135 /* 136 * ugly byte-order hack for SUNs 137 */ 138 139 #define SWAP(x) ((u_short)(XSWAP((u_short)(x)))) 140 #define XSWAP(y) ( ((y) >> 8) | ((y) << 8) ) 141 142 #include <machine/autoconf.h> 143 #include <machine/cpu.h> 144 #include <machine/pmap.h> 145 146 #include "i82586.h" 147 #include "if_ie.h" 148 #include "if_ie_subr.h" 149 150 static struct mbuf *last_not_for_us; 151 152 /* 153 * IED: ie debug flags 154 */ 155 156 #define IED_RINT 0x01 157 #define IED_TINT 0x02 158 #define IED_RNR 0x04 159 #define IED_CNA 0x08 160 #define IED_READFRAME 0x10 161 #define IED_ALL 0x1f 162 163 #define ETHER_MIN_LEN 64 164 #define ETHER_MAX_LEN 1518 165 #define ETHER_ADDR_LEN 6 166 167 int iewatchdog __P(( /* short */ )); 168 int ieinit __P((struct ie_softc * sc)); 169 int ieioctl __P((struct ifnet * ifp, u_long command, caddr_t data)); 170 int iestart __P((struct ifnet * ifp)); 171 void iereset __P((struct ie_softc *)); 172 static void ie_readframe __P((struct ie_softc * sc, int bufno)); 173 static void ie_drop_packet_buffer __P((struct ie_softc * sc)); 174 static int command_and_wait __P((struct ie_softc * sc, int command, 175 void volatile * pcmd, int)); 176 static void ierint __P((struct ie_softc * sc)); 177 static void ietint __P((struct ie_softc * sc)); 178 static void iernr __P((struct ie_softc * sc)); 179 static void start_receiver __P((struct ie_softc * sc)); 180 static int ieget __P((struct ie_softc *, struct mbuf **, 181 struct ether_header *, int *)); 182 static void setup_bufs __P((struct ie_softc * sc)); 183 static int mc_setup __P((struct ie_softc *, caddr_t)); 184 static void mc_reset __P((struct ie_softc * sc)); 185 186 #ifdef IEDEBUG 187 void print_rbd __P((volatile struct ie_recv_buf_desc * rbd)); 188 int in_ierint = 0; 189 int in_ietint = 0; 190 #endif 191 192 void ie_attach(); 193 194 struct cfdriver iecd = { 195 NULL, "ie", ie_md_match, ie_attach, 196 DV_IFNET, sizeof(struct ie_softc), 197 }; 198 199 /* 200 * address generation macros 201 * MK_24 = KVA -> 24 bit address in SUN byte order 202 * MK_16 = KVA -> 16 bit address in INTEL byte order 203 * ST_24 = store a 24 bit address in SUN byte order to INTEL byte order 204 */ 205 #define MK_24(base, ptr) ((caddr_t)((u_long)ptr - (u_long)base)) 206 #define MK_16(base, ptr) SWAP((u_short)( ((u_long)(ptr)) - ((u_long)(base)) )) 207 #define ST_24(to, from) { \ 208 u_long fval = (u_long)(from); \ 209 u_char *t = (u_char *)&(to), *f = (u_char *)&fval; \ 210 t[0] = f[3]; t[1] = f[2]; t[2] = f[1]; /*t[3] = f[0];*/ \ 211 } 212 /* 213 * zero/copy functions: OBIO can use the normal functions, but VME 214 * must do only byte or half-word (16 bit) accesses... 215 */ 216 217 /* 218 * Here are a few useful functions. We could have done these as macros, 219 * but since we have the inline facility, it makes sense to use that 220 * instead. 221 */ 222 static inline void 223 ie_setup_config(cmd, promiscuous) 224 volatile struct ie_config_cmd *cmd; 225 int promiscuous; 226 { 227 228 /* 229 * these are all char's so don't swap them! 230 */ 231 cmd->ie_config_count = 0x0c; 232 cmd->ie_fifo = 8; 233 cmd->ie_save_bad = 0x40; 234 cmd->ie_addr_len = 0x2e; 235 cmd->ie_priority = 0; 236 cmd->ie_ifs = 0x60; 237 cmd->ie_slot_low = 0; 238 cmd->ie_slot_high = 0xf2; 239 cmd->ie_promisc = !!promiscuous; 240 cmd->ie_crs_cdt = 0; 241 cmd->ie_min_len = 64; 242 cmd->ie_junk = 0xff; 243 } 244 245 static inline caddr_t 246 Align(ptr) 247 caddr_t ptr; 248 { 249 u_long l = (u_long)ptr; 250 251 l = (l + 3) & ~3L; 252 return (caddr_t)l; 253 } 254 255 static inline void 256 ie_ack(sc, mask) 257 struct ie_softc *sc; 258 u_int mask; 259 { 260 volatile struct ie_sys_ctl_block *scb = sc->scb; 261 262 command_and_wait(sc, scb->ie_status & mask, 0, 0); 263 } 264 265 266 /* 267 * Taken almost exactly from Bill's if_is.c, 268 * then modified beyond recognition... 269 */ 270 void 271 ie_attach(parent, self, aux) 272 struct device *parent, *self; 273 void *aux; 274 { 275 struct ie_softc *sc = (void *) self; 276 struct ifnet *ifp = &sc->sc_if; 277 278 /* 279 * Do machine-dependent parts of attach. 280 */ 281 ie_md_attach(parent, self, aux); 282 printf(" hwaddr %s\n", ether_sprintf(sc->sc_addr)); 283 284 /* 285 * Setup for transmit/receive 286 */ 287 if (ie_setupram(sc) == 0) { 288 printf(": RAM CONFIG FAILED!\n"); 289 /* XXX should reclaim resources? */ 290 return; 291 } 292 293 /* 294 * Initialize and attach S/W interface 295 */ 296 ifp->if_unit = sc->sc_dev.dv_unit; 297 ifp->if_name = iecd.cd_name; 298 ifp->if_output = ether_output; 299 ifp->if_start = iestart; 300 ifp->if_ioctl = ieioctl; 301 ifp->if_watchdog = iewatchdog; 302 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 303 #ifdef IFF_NOTRAILERS 304 /* XXX still compile when the blasted things are gone... */ 305 ifp->if_flags |= IFF_NOTRAILERS; 306 #endif 307 308 /* Attach the interface. */ 309 if_attach(ifp); 310 ether_ifattach(ifp); 311 #if NBPFILTER > 0 312 bpfattach(&ifp->if_bpf, ifp, DLT_EN10MB, 313 sizeof(struct ether_header)); 314 #endif 315 } 316 317 /* 318 * Device timeout/watchdog routine. Entered if the device neglects to 319 * generate an interrupt after a transmit has been started on it. 320 */ 321 int 322 iewatchdog(unit) 323 short unit; 324 { 325 struct ie_softc *sc = iecd.cd_devs[unit]; 326 327 log(LOG_ERR, "%s: device timeout\n", sc->sc_dev.dv_xname); 328 ++sc->sc_arpcom.ac_if.if_oerrors; 329 330 iereset(sc); 331 } 332 333 /* 334 * What to do upon receipt of an interrupt. 335 */ 336 int 337 ie_intr(v) 338 void *v; 339 { 340 struct ie_softc *sc = v; 341 register u_short status; 342 343 status = sc->scb->ie_status; 344 345 /* 346 * check for parity error 347 */ 348 if (sc->hard_type == IE_VME) { 349 volatile struct ievme *iev = (volatile struct ievme *)sc->sc_reg; 350 if (iev->status & IEVME_PERR) { 351 printf("%s: parity error (ctrl %x @ %02x%04x)\n", 352 iev->pectrl, iev->pectrl & IEVME_HADDR, 353 iev->peaddr); 354 iev->pectrl = iev->pectrl | IEVME_PARACK; 355 } 356 } 357 loop: 358 if (status & (IE_ST_RECV | IE_ST_RNR)) { 359 #ifdef IEDEBUG 360 in_ierint++; 361 if (sc->sc_debug & IED_RINT) 362 printf("%s: rint\n", sc->sc_dev.dv_xname); 363 #endif 364 ierint(sc); 365 #ifdef IEDEBUG 366 in_ierint--; 367 #endif 368 } 369 if (status & IE_ST_DONE) { 370 #ifdef IEDEBUG 371 in_ietint++; 372 if (sc->sc_debug & IED_TINT) 373 printf("%s: tint\n", sc->sc_dev.dv_xname); 374 #endif 375 ietint(sc); 376 #ifdef IEDEBUG 377 in_ietint--; 378 #endif 379 } 380 if (status & IE_ST_RNR) { 381 #ifdef IEDEBUG 382 if (sc->sc_debug & IED_RNR) 383 printf("%s: rnr\n", sc->sc_dev.dv_xname); 384 #endif 385 iernr(sc); 386 } 387 #ifdef IEDEBUG 388 if ((status & IE_ST_ALLDONE) && (sc->sc_debug & IED_CNA)) 389 printf("%s: cna\n", sc->sc_dev.dv_xname); 390 #endif 391 392 /* Don't ack interrupts which we didn't receive */ 393 ie_ack(sc, IE_ST_WHENCE & status); 394 395 if ((status = sc->scb->ie_status) & IE_ST_WHENCE) 396 goto loop; 397 return 1; 398 } 399 400 /* 401 * Process a received-frame interrupt. 402 */ 403 void 404 ierint(sc) 405 struct ie_softc *sc; 406 { 407 volatile struct ie_sys_ctl_block *scb = sc->scb; 408 int i, status; 409 static int timesthru = 1024; 410 411 i = sc->rfhead; 412 for (;;) { 413 status = sc->rframes[i]->ie_fd_status; 414 415 if ((status & IE_FD_COMPLETE) && (status & IE_FD_OK)) { 416 sc->sc_arpcom.ac_if.if_ipackets++; 417 if (!--timesthru) { 418 sc->sc_arpcom.ac_if.if_ierrors += 419 SWAP(scb->ie_err_crc) + 420 SWAP(scb->ie_err_align) + 421 SWAP(scb->ie_err_resource) + 422 SWAP(scb->ie_err_overrun); 423 scb->ie_err_crc = scb->ie_err_align = 0; 424 scb->ie_err_resource = 0; 425 scb->ie_err_overrun = 0; 426 timesthru = 1024; 427 } 428 ie_readframe(sc, i); 429 } else { 430 if ((status & IE_FD_RNR) != 0 && 431 (scb->ie_status & IE_RU_READY) == 0) { 432 sc->rframes[0]->ie_fd_next = 433 MK_16(sc->sc_maddr, sc->rbuffs[0]); 434 scb->ie_recv_list = 435 MK_16(sc->sc_maddr, sc->rframes[0]); 436 command_and_wait(sc, IE_RU_START, 0, 0); 437 } 438 break; 439 } 440 i = (i + 1) % sc->nframes; 441 } 442 } 443 444 /* 445 * Process a command-complete interrupt. These are only generated by 446 * the transmission of frames. This routine is deceptively simple, since 447 * most of the real work is done by iestart(). 448 */ 449 void 450 ietint(sc) 451 struct ie_softc *sc; 452 { 453 int status; 454 int i; 455 456 sc->sc_arpcom.ac_if.if_timer = 0; 457 sc->sc_arpcom.ac_if.if_flags &= ~IFF_OACTIVE; 458 459 for (i = 0; i < sc->xmit_count; i++) { 460 status = sc->xmit_cmds[i]->ie_xmit_status; 461 462 if (status & IE_XS_LATECOLL) { 463 printf("%s: late collision\n", sc->sc_dev.dv_xname); 464 sc->sc_arpcom.ac_if.if_collisions++; 465 sc->sc_arpcom.ac_if.if_oerrors++; 466 } else if (status & IE_XS_NOCARRIER) { 467 printf("%s: no carrier\n", sc->sc_dev.dv_xname); 468 sc->sc_arpcom.ac_if.if_oerrors++; 469 } else if (status & IE_XS_LOSTCTS) { 470 printf("%s: lost CTS\n", sc->sc_dev.dv_xname); 471 sc->sc_arpcom.ac_if.if_oerrors++; 472 } else if (status & IE_XS_UNDERRUN) { 473 printf("%s: DMA underrun\n", sc->sc_dev.dv_xname); 474 sc->sc_arpcom.ac_if.if_oerrors++; 475 } else if (status & IE_XS_EXCMAX) { 476 printf("%s: too many collisions\n", sc->sc_dev.dv_xname); 477 sc->sc_arpcom.ac_if.if_collisions += 16; 478 sc->sc_arpcom.ac_if.if_oerrors++; 479 } else { 480 sc->sc_arpcom.ac_if.if_opackets++; 481 sc->sc_arpcom.ac_if.if_collisions += 482 SWAP(status & IE_XS_MAXCOLL); 483 } 484 } 485 sc->xmit_count = 0; 486 487 /* 488 * If multicast addresses were added or deleted while we 489 * were transmitting, mc_reset() set the want_mcsetup flag 490 * indicating that we should do it. 491 */ 492 if (sc->want_mcsetup) { 493 mc_setup(sc, (caddr_t)sc->xmit_cbuffs[0]); 494 sc->want_mcsetup = 0; 495 } 496 /* Wish I knew why this seems to be necessary... */ 497 sc->xmit_cmds[0]->ie_xmit_status |= IE_STAT_COMPL; 498 499 iestart(&sc->sc_arpcom.ac_if); 500 } 501 502 /* 503 * Process a receiver-not-ready interrupt. I believe that we get these 504 * when there aren't enough buffers to go around. For now (FIXME), we 505 * just restart the receiver, and hope everything's ok. 506 */ 507 void 508 iernr(sc) 509 struct ie_softc *sc; 510 { 511 512 command_and_wait(sc, IE_RU_DISABLE, 0, 0); /* just in case */ 513 setup_bufs(sc); 514 515 sc->scb->ie_recv_list = MK_16(sc->sc_maddr, sc->rframes[0]); 516 command_and_wait(sc, IE_RU_START, 0, 0); /* was ENABLE */ 517 518 ie_ack(sc, IE_ST_WHENCE); 519 520 sc->sc_arpcom.ac_if.if_ierrors++; 521 } 522 523 /* 524 * Compare two Ether/802 addresses for equality, inlined and 525 * unrolled for speed. I'd love to have an inline assembler 526 * version of this... 527 */ 528 static inline int 529 ether_equal(one, two) 530 u_char *one, *two; 531 { 532 533 if (one[0] != two[0] || one[1] != two[1] || one[2] != two[2] || 534 one[3] != two[3] || one[4] != two[4] || one[5] != two[5]) 535 return 0; 536 return 1; 537 } 538 539 /* 540 * Check for a valid address. to_bpf is filled in with one of the following: 541 * 0 -> BPF doesn't get this packet 542 * 1 -> BPF does get this packet 543 * 2 -> BPF does get this packet, but we don't 544 * Return value is true if the packet is for us, and false otherwise. 545 * 546 * This routine is a mess, but it's also critical that it be as fast 547 * as possible. It could be made cleaner if we can assume that the 548 * only client which will fiddle with IFF_PROMISC is BPF. This is 549 * probably a good assumption, but we do not make it here. (Yet.) 550 */ 551 static inline int 552 check_eh(sc, eh, to_bpf) 553 struct ie_softc *sc; 554 struct ether_header *eh; 555 int *to_bpf; 556 { 557 int i; 558 559 switch (sc->promisc) { 560 case IFF_ALLMULTI: 561 /* 562 * Receiving all multicasts, but no unicasts except those 563 * destined for us. 564 */ 565 #if NBPFILTER > 0 566 *to_bpf = (sc->sc_arpcom.ac_if.if_bpf != 0); 567 #endif 568 if (eh->ether_dhost[0] & 1) 569 return 1; 570 if (ether_equal(eh->ether_dhost, sc->sc_arpcom.ac_enaddr)) 571 return 1; 572 return 0; 573 574 case IFF_PROMISC: 575 /* 576 * Receiving all packets. These need to be passed on to BPF. 577 */ 578 #if NBPFILTER > 0 579 *to_bpf = (sc->sc_arpcom.ac_if.if_bpf != 0); 580 #endif 581 /* If for us, accept and hand up to BPF */ 582 if (ether_equal(eh->ether_dhost, sc->sc_arpcom.ac_enaddr)) 583 return 1; 584 585 #if NBPFILTER > 0 586 if (*to_bpf) 587 *to_bpf = 2; /* we don't need to see it */ 588 #endif 589 590 /* 591 * Not a multicast, so BPF wants to see it but we don't. 592 */ 593 if (!(eh->ether_dhost[0] & 1)) 594 return 1; 595 596 /* 597 * If it's one of our multicast groups, accept it and pass it 598 * up. 599 */ 600 for (i = 0; i < sc->mcast_count; i++) { 601 if (ether_equal(eh->ether_dhost, 602 (u_char *)&sc->mcast_addrs[i])) { 603 #if NBPFILTER > 0 604 if (*to_bpf) 605 *to_bpf = 1; 606 #endif 607 return 1; 608 } 609 } 610 return 1; 611 612 case IFF_ALLMULTI | IFF_PROMISC: 613 /* 614 * Acting as a multicast router, and BPF running at the same 615 * time. Whew! (Hope this is a fast machine...) 616 */ 617 #if NBPFILTER > 0 618 *to_bpf = (sc->sc_arpcom.ac_if.if_bpf != 0); 619 #endif 620 /* We want to see multicasts. */ 621 if (eh->ether_dhost[0] & 1) 622 return 1; 623 624 /* We want to see our own packets */ 625 if (ether_equal(eh->ether_dhost, sc->sc_arpcom.ac_enaddr)) 626 return 1; 627 628 /* Anything else goes to BPF but nothing else. */ 629 #if NBPFILTER > 0 630 if (*to_bpf) 631 *to_bpf = 2; 632 #endif 633 return 1; 634 635 default: 636 /* 637 * Only accept unicast packets destined for us, or multicasts 638 * for groups that we belong to. For now, we assume that the 639 * '586 will only return packets that we asked it for. This 640 * isn't strictly true (it uses hashing for the multicast filter), 641 * but it will do in this case, and we want to get out of here 642 * as quickly as possible. 643 */ 644 #if NBPFILTER > 0 645 *to_bpf = (sc->sc_arpcom.ac_if.if_bpf != 0); 646 #endif 647 return 1; 648 } 649 return 0; 650 } 651 652 /* 653 * We want to isolate the bits that have meaning... This assumes that 654 * IE_RBUF_SIZE is an even power of two. If somehow the act_len exceeds 655 * the size of the buffer, then we are screwed anyway. 656 */ 657 static inline int 658 ie_buflen(sc, head) 659 struct ie_softc *sc; 660 int head; 661 { 662 663 return (SWAP(sc->rbuffs[head]->ie_rbd_actual) 664 & (IE_RBUF_SIZE | (IE_RBUF_SIZE - 1))); 665 } 666 667 static inline int 668 ie_packet_len(sc) 669 struct ie_softc *sc; 670 { 671 int i; 672 int head = sc->rbhead; 673 int acc = 0; 674 675 do { 676 if (!(sc->rbuffs[sc->rbhead]->ie_rbd_actual & IE_RBD_USED)) { 677 #ifdef IEDEBUG 678 print_rbd(sc->rbuffs[sc->rbhead]); 679 #endif 680 log(LOG_ERR, "%s: receive descriptors out of sync at %d\n", 681 sc->sc_dev.dv_xname, sc->rbhead); 682 iereset(sc); 683 return -1; 684 } 685 i = sc->rbuffs[head]->ie_rbd_actual & IE_RBD_LAST; 686 687 acc += ie_buflen(sc, head); 688 head = (head + 1) % sc->nrxbuf; 689 } while (!i); 690 691 return acc; 692 } 693 694 /* 695 * Read data off the interface, and turn it into an mbuf chain. 696 * 697 * This code is DRAMATICALLY different from the previous version; this 698 * version tries to allocate the entire mbuf chain up front, given the 699 * length of the data available. This enables us to allocate mbuf 700 * clusters in many situations where before we would have had a long 701 * chain of partially-full mbufs. This should help to speed up the 702 * operation considerably. (Provided that it works, of course.) 703 */ 704 static inline int 705 ieget(sc, mp, ehp, to_bpf) 706 struct ie_softc *sc; 707 struct mbuf **mp; 708 struct ether_header *ehp; 709 int *to_bpf; 710 { 711 struct mbuf *m, *top, **mymp; 712 int i; 713 int offset; 714 int totlen, resid; 715 int thismboff; 716 int head; 717 718 totlen = ie_packet_len(sc); 719 if (totlen <= 0) 720 return -1; 721 722 i = sc->rbhead; 723 724 /* 725 * Snarf the Ethernet header. 726 */ 727 (sc->memcopy)((caddr_t)sc->cbuffs[i], (caddr_t)ehp, sizeof *ehp); 728 729 /* 730 * As quickly as possible, check if this packet is for us. 731 * If not, don't waste a single cycle copying the rest of the 732 * packet in. 733 * This is only a consideration when FILTER is defined; i.e., when 734 * we are either running BPF or doing multicasting. 735 */ 736 if (!check_eh(sc, ehp, to_bpf)) { 737 ie_drop_packet_buffer(sc); 738 sc->sc_arpcom.ac_if.if_ierrors--; /* just this case, it's 739 * not an error */ 740 return -1; 741 } 742 totlen -= (offset = sizeof *ehp); 743 744 MGETHDR(*mp, M_DONTWAIT, MT_DATA); 745 if (!*mp) { 746 ie_drop_packet_buffer(sc); 747 return -1; 748 } 749 m = *mp; 750 m->m_pkthdr.rcvif = &sc->sc_arpcom.ac_if; 751 m->m_len = MHLEN; 752 resid = m->m_pkthdr.len = totlen; 753 top = 0; 754 mymp = ⊤ 755 756 /* 757 * This loop goes through and allocates mbufs for all the data we will 758 * be copying in. It does not actually do the copying yet. 759 */ 760 do { /* while (resid > 0) */ 761 /* 762 * Try to allocate an mbuf to hold the data that we have. If 763 * we already allocated one, just get another one and stick it 764 * on the end (eventually). If we don't already have one, try 765 * to allocate an mbuf cluster big enough to hold the whole 766 * packet, if we think it's reasonable, or a single mbuf which 767 * may or may not be big enough. Got that? 768 */ 769 if (top) { 770 MGET(m, M_DONTWAIT, MT_DATA); 771 if (!m) { 772 m_freem(top); 773 ie_drop_packet_buffer(sc); 774 return -1; 775 } 776 m->m_len = MLEN; 777 } 778 if (resid >= MINCLSIZE) { 779 MCLGET(m, M_DONTWAIT); 780 if (m->m_flags & M_EXT) 781 m->m_len = min(resid, MCLBYTES); 782 } else { 783 if (resid < m->m_len) { 784 if (!top && resid + max_linkhdr <= m->m_len) 785 m->m_data += max_linkhdr; 786 m->m_len = resid; 787 } 788 } 789 resid -= m->m_len; 790 *mymp = m; 791 mymp = &m->m_next; 792 } while (resid > 0); 793 794 resid = totlen; 795 m = top; 796 thismboff = 0; 797 head = sc->rbhead; 798 799 /* 800 * Now we take the mbuf chain (hopefully only one mbuf most of the 801 * time) and stuff the data into it. There are no possible failures 802 * at or after this point. 803 */ 804 while (resid > 0) { /* while there's stuff left */ 805 int thislen = ie_buflen(sc, head) - offset; 806 807 /* 808 * If too much data for the current mbuf, then fill the current one 809 * up, go to the next one, and try again. 810 */ 811 if (thislen > m->m_len - thismboff) { 812 int newlen = m->m_len - thismboff; 813 (sc->memcopy)((caddr_t)(sc->cbuffs[head] + offset), 814 mtod(m, caddr_t) + thismboff, (u_int)newlen); 815 m = m->m_next; 816 thismboff = 0; /* new mbuf, so no offset */ 817 offset += newlen; /* we are now this far into 818 * the packet */ 819 resid -= newlen; /* so there is this much left 820 * to get */ 821 continue; 822 } 823 /* 824 * If there is more than enough space in the mbuf to hold the 825 * contents of this buffer, copy everything in, advance pointers, 826 * and so on. 827 */ 828 if (thislen < m->m_len - thismboff) { 829 (sc->memcopy)((caddr_t)(sc->cbuffs[head] + offset), 830 mtod(m, caddr_t) + thismboff, (u_int)thislen); 831 thismboff += thislen; /* we are this far into the 832 * mbuf */ 833 resid -= thislen; /* and this much is left */ 834 goto nextbuf; 835 } 836 /* 837 * Otherwise, there is exactly enough space to put this buffer's 838 * contents into the current mbuf. Do the combination of the above 839 * actions. 840 */ 841 (sc->memcopy)((caddr_t)(sc->cbuffs[head] + offset), 842 mtod(m, caddr_t) + thismboff, (u_int)thislen); 843 m = m->m_next; 844 thismboff = 0; /* new mbuf, start at the beginning */ 845 resid -= thislen; /* and we are this far through */ 846 847 /* 848 * Advance all the pointers. We can get here from either of the 849 * last two cases, but never the first. 850 */ 851 nextbuf: 852 offset = 0; 853 sc->rbuffs[head]->ie_rbd_actual = SWAP(0); 854 sc->rbuffs[head]->ie_rbd_length |= IE_RBD_LAST; 855 sc->rbhead = head = (head + 1) % sc->nrxbuf; 856 sc->rbuffs[sc->rbtail]->ie_rbd_length &= ~IE_RBD_LAST; 857 sc->rbtail = (sc->rbtail + 1) % sc->nrxbuf; 858 } 859 860 /* 861 * Unless something changed strangely while we were doing the copy, 862 * we have now copied everything in from the shared memory. 863 * This means that we are done. 864 */ 865 return 0; 866 } 867 868 /* 869 * Read frame NUM from unit UNIT (pre-cached as IE). 870 * 871 * This routine reads the RFD at NUM, and copies in the buffers from 872 * the list of RBD, then rotates the RBD and RFD lists so that the receiver 873 * doesn't start complaining. Trailers are DROPPED---there's no point 874 * in wasting time on confusing code to deal with them. Hopefully, 875 * this machine will never ARP for trailers anyway. 876 */ 877 static void 878 ie_readframe(sc, num) 879 struct ie_softc *sc; 880 int num; /* frame number to read */ 881 { 882 struct ie_recv_frame_desc rfd; 883 struct mbuf *m = 0; 884 struct ether_header eh; 885 #if NBPFILTER > 0 886 int bpf_gets_it = 0; 887 #endif 888 889 (sc->memcopy)((caddr_t)(sc->rframes[num]), &rfd, 890 sizeof(struct ie_recv_frame_desc)); 891 892 /* Immediately advance the RFD list, since we have copied ours now. */ 893 sc->rframes[num]->ie_fd_status = SWAP(0); 894 sc->rframes[num]->ie_fd_last |= IE_FD_LAST; 895 sc->rframes[sc->rftail]->ie_fd_last &= ~IE_FD_LAST; 896 sc->rftail = (sc->rftail + 1) % sc->nframes; 897 sc->rfhead = (sc->rfhead + 1) % sc->nframes; 898 899 if (rfd.ie_fd_status & IE_FD_OK) { 900 #if NBPFILTER > 0 901 if (ieget(sc, &m, &eh, &bpf_gets_it)) { 902 #else 903 if (ieget(sc, &m, &eh, 0)) { 904 #endif 905 sc->sc_arpcom.ac_if.if_ierrors++; 906 return; 907 } 908 } 909 #ifdef IEDEBUG 910 if (sc->sc_debug & IED_READFRAME) 911 printf("%s: frame from ether %s type %x\n", sc->sc_dev.dv_xname, 912 ether_sprintf(eh.ether_shost), (u_int)eh.ether_type); 913 #endif 914 915 if (!m) 916 return; 917 918 if (last_not_for_us) { 919 m_freem(last_not_for_us); 920 last_not_for_us = 0; 921 } 922 #if NBPFILTER > 0 923 /* 924 * Check for a BPF filter; if so, hand it up. 925 * Note that we have to stick an extra mbuf up front, because 926 * bpf_mtap expects to have the ether header at the front. 927 * It doesn't matter that this results in an ill-formatted mbuf chain, 928 * since BPF just looks at the data. (It doesn't try to free the mbuf, 929 * tho' it will make a copy for tcpdump.) 930 */ 931 if (bpf_gets_it) { 932 struct mbuf m0; 933 m0.m_len = sizeof eh; 934 m0.m_data = (caddr_t)&eh; 935 m0.m_next = m; 936 937 /* Pass it up */ 938 bpf_mtap(sc->sc_arpcom.ac_if.if_bpf, &m0); 939 } 940 /* 941 * A signal passed up from the filtering code indicating that the 942 * packet is intended for BPF but not for the protocol machinery. 943 * We can save a few cycles by not handing it off to them. 944 */ 945 if (bpf_gets_it == 2) { 946 last_not_for_us = m; 947 return; 948 } 949 #endif /* NBPFILTER > 0 */ 950 951 /* 952 * In here there used to be code to check destination addresses upon 953 * receipt of a packet. We have deleted that code, and replaced it 954 * with code to check the address much earlier in the cycle, before 955 * copying the data in; this saves us valuable cycles when operating 956 * as a multicast router or when using BPF. 957 */ 958 959 /* 960 * Finally pass this packet up to higher layers. 961 */ 962 ether_input(&sc->sc_arpcom.ac_if, &eh, m); 963 } 964 965 static void 966 ie_drop_packet_buffer(sc) 967 struct ie_softc *sc; 968 { 969 int i; 970 971 do { 972 /* 973 * This means we are somehow out of sync. So, we reset the 974 * adapter. 975 */ 976 if (!(sc->rbuffs[sc->rbhead]->ie_rbd_actual & IE_RBD_USED)) { 977 #ifdef IEDEBUG 978 print_rbd(sc->rbuffs[sc->rbhead]); 979 #endif 980 log(LOG_ERR, "%s: receive descriptors out of sync at %d\n", 981 sc->sc_dev.dv_xname, sc->rbhead); 982 iereset(sc); 983 return; 984 } 985 i = sc->rbuffs[sc->rbhead]->ie_rbd_actual & IE_RBD_LAST; 986 987 sc->rbuffs[sc->rbhead]->ie_rbd_length |= IE_RBD_LAST; 988 sc->rbuffs[sc->rbhead]->ie_rbd_actual = SWAP(0); 989 sc->rbhead = (sc->rbhead + 1) % sc->nrxbuf; 990 sc->rbuffs[sc->rbtail]->ie_rbd_length &= ~IE_RBD_LAST; 991 sc->rbtail = (sc->rbtail + 1) % sc->nrxbuf; 992 } while (!i); 993 } 994 995 /* 996 * Start transmission on an interface. 997 */ 998 int 999 iestart(ifp) 1000 struct ifnet *ifp; 1001 { 1002 struct ie_softc *sc = iecd.cd_devs[ifp->if_unit]; 1003 struct mbuf *m0, *m; 1004 u_char *buffer; 1005 u_short len; 1006 /* This is not really volatile, in this routine, but it makes gcc 1007 * happy. */ 1008 volatile u_short *bptr = &sc->scb->ie_command_list; 1009 1010 if ((ifp->if_flags ^ IFF_RUNNING) & (IFF_RUNNING | IFF_OACTIVE)) 1011 return 0; 1012 1013 do { 1014 IF_DEQUEUE(&sc->sc_arpcom.ac_if.if_snd, m); 1015 if (!m) 1016 break; 1017 1018 buffer = sc->xmit_cbuffs[sc->xmit_count]; 1019 len = 0; 1020 1021 for (m0 = m; m && len < IE_TBUF_SIZE; m = m->m_next) { 1022 (sc->memcopy)(mtod(m, caddr_t), buffer, m->m_len); 1023 buffer += m->m_len; 1024 len += m->m_len; 1025 } 1026 1027 m_freem(m0); 1028 len = max(len, ETHER_MIN_LEN); 1029 1030 #if NBPFILTER > 0 1031 /* 1032 * See if bpf is listening on this interface, let it see the packet 1033 * before we commit it to the wire. 1034 */ 1035 if (sc->sc_arpcom.ac_if.if_bpf) 1036 bpf_tap(sc->sc_arpcom.ac_if.if_bpf, 1037 sc->xmit_cbuffs[sc->xmit_count], 1038 len); 1039 #endif 1040 1041 sc->xmit_buffs[sc->xmit_count]->ie_xmit_flags = 1042 IE_XMIT_LAST | SWAP(len); 1043 sc->xmit_buffs[sc->xmit_count]->ie_xmit_next = SWAP(0xffff); 1044 ST_24(sc->xmit_buffs[sc->xmit_count]->ie_xmit_buf, 1045 MK_24(sc->sc_iobase, sc->xmit_cbuffs[sc->xmit_count])); 1046 1047 sc->xmit_cmds[sc->xmit_count]->com.ie_cmd_cmd = IE_CMD_XMIT; 1048 sc->xmit_cmds[sc->xmit_count]->ie_xmit_status = SWAP(0); 1049 sc->xmit_cmds[sc->xmit_count]->ie_xmit_desc = 1050 MK_16(sc->sc_maddr, sc->xmit_buffs[sc->xmit_count]); 1051 1052 *bptr = MK_16(sc->sc_maddr, sc->xmit_cmds[sc->xmit_count]); 1053 bptr = &sc->xmit_cmds[sc->xmit_count]->com.ie_cmd_link; 1054 } while (++sc->xmit_count < NTXBUF); 1055 1056 /* 1057 * If we queued up anything for transmission, send it. 1058 */ 1059 if (sc->xmit_count) { 1060 sc->xmit_cmds[sc->xmit_count - 1]->com.ie_cmd_cmd |= 1061 IE_CMD_LAST | IE_CMD_INTR; 1062 1063 /* 1064 * By passing the command pointer as a null, we tell 1065 * command_and_wait() to pretend that this isn't an action 1066 * command. I wish I understood what was happening here. 1067 */ 1068 command_and_wait(sc, IE_CU_START, 0, 0); 1069 ifp->if_flags |= IFF_OACTIVE; 1070 } 1071 } 1072 1073 /* 1074 * set up IE's ram space 1075 */ 1076 int 1077 ie_setupram(sc) 1078 struct ie_softc *sc; 1079 { 1080 volatile struct ie_sys_conf_ptr *scp; 1081 volatile struct ie_int_sys_conf_ptr *iscp; 1082 volatile struct ie_sys_ctl_block *scb; 1083 int s; 1084 1085 s = splimp(); 1086 1087 scp = sc->scp; 1088 (sc->memzero)((char *) scp, sizeof *scp); 1089 1090 iscp = sc->iscp; 1091 (sc->memzero)((char *) iscp, sizeof *iscp); 1092 1093 scb = sc->scb; 1094 (sc->memzero)((char *) scb, sizeof *scb); 1095 1096 scp->ie_bus_use = 0; /* 16-bit */ 1097 ST_24(scp->ie_iscp_ptr, MK_24(sc->sc_iobase, iscp)); 1098 1099 iscp->ie_busy = 1; /* ie_busy == char */ 1100 iscp->ie_scb_offset = MK_16(sc->sc_maddr, scb); 1101 ST_24(iscp->ie_base, MK_24(sc->sc_iobase, sc->sc_maddr)); 1102 1103 (sc->reset_586) (sc); 1104 (sc->chan_attn) (sc); 1105 1106 delay(100); /* wait a while... */ 1107 1108 if (iscp->ie_busy) { 1109 splx(s); 1110 return 0; 1111 } 1112 /* 1113 * Acknowledge any interrupts we may have caused... 1114 */ 1115 ie_ack(sc, IE_ST_WHENCE); 1116 splx(s); 1117 1118 return 1; 1119 } 1120 1121 void 1122 iereset(sc) 1123 struct ie_softc *sc; 1124 { 1125 int s = splimp(); 1126 1127 printf("%s: reset\n", sc->sc_dev.dv_xname); 1128 sc->sc_arpcom.ac_if.if_flags &= ~IFF_UP; 1129 ieioctl(&sc->sc_arpcom.ac_if, SIOCSIFFLAGS, 0); 1130 1131 /* 1132 * Stop i82586 dead in its tracks. 1133 */ 1134 if (command_and_wait(sc, IE_RU_ABORT | IE_CU_ABORT, 0, 0)) 1135 printf("%s: abort commands timed out\n", sc->sc_dev.dv_xname); 1136 1137 if (command_and_wait(sc, IE_RU_DISABLE | IE_CU_STOP, 0, 0)) 1138 printf("%s: disable commands timed out\n", sc->sc_dev.dv_xname); 1139 1140 sc->sc_arpcom.ac_if.if_flags |= IFF_UP; 1141 ieioctl(&sc->sc_arpcom.ac_if, SIOCSIFFLAGS, 0); 1142 1143 splx(s); 1144 } 1145 1146 /* 1147 * This is called if we time out. 1148 */ 1149 static void 1150 chan_attn_timeout(rock) 1151 caddr_t rock; 1152 { 1153 *(int *) rock = 1; 1154 } 1155 1156 /* 1157 * Send a command to the controller and wait for it to either 1158 * complete or be accepted, depending on the command. If the 1159 * command pointer is null, then pretend that the command is 1160 * not an action command. If the command pointer is not null, 1161 * and the command is an action command, wait for 1162 * ((volatile struct ie_cmd_common *)pcmd)->ie_cmd_status & MASK 1163 * to become true. 1164 */ 1165 static int 1166 command_and_wait(sc, cmd, pcmd, mask) 1167 struct ie_softc *sc; 1168 int cmd; 1169 volatile void *pcmd; 1170 int mask; 1171 { 1172 volatile struct ie_cmd_common *cc = pcmd; 1173 volatile struct ie_sys_ctl_block *scb = sc->scb; 1174 volatile int timedout = 0; 1175 extern int hz; 1176 1177 scb->ie_command = (u_short)cmd; 1178 1179 if (IE_ACTION_COMMAND(cmd) && pcmd) { 1180 (sc->chan_attn) (sc); 1181 1182 /* 1183 * XXX 1184 * I don't think this timeout works on suns. 1185 * we are at splimp() in the loop, and the timeout 1186 * stuff runs at software spl (so it is masked off?). 1187 */ 1188 1189 /* 1190 * According to the packet driver, the minimum timeout should be 1191 * .369 seconds, which we round up to .4. 1192 */ 1193 1194 timeout(chan_attn_timeout, (caddr_t)&timedout, 2 * hz / 5); 1195 1196 /* 1197 * Now spin-lock waiting for status. This is not a very nice 1198 * thing to do, but I haven't figured out how, or indeed if, we 1199 * can put the process waiting for action to sleep. (We may 1200 * be getting called through some other timeout running in the 1201 * kernel.) 1202 */ 1203 for (;;) 1204 if ((cc->ie_cmd_status & mask) || timedout) 1205 break; 1206 1207 untimeout(chan_attn_timeout, (caddr_t)&timedout); 1208 1209 return timedout; 1210 } else { 1211 1212 /* 1213 * Otherwise, just wait for the command to be accepted. 1214 */ 1215 (sc->chan_attn) (sc); 1216 1217 while (scb->ie_command); /* spin lock */ 1218 1219 return 0; 1220 } 1221 } 1222 1223 /* 1224 * Run the time-domain reflectometer... 1225 */ 1226 static void 1227 run_tdr(sc, cmd) 1228 struct ie_softc *sc; 1229 struct ie_tdr_cmd *cmd; 1230 { 1231 int result; 1232 1233 cmd->com.ie_cmd_status = SWAP(0); 1234 cmd->com.ie_cmd_cmd = IE_CMD_TDR | IE_CMD_LAST; 1235 cmd->com.ie_cmd_link = SWAP(0xffff); 1236 1237 sc->scb->ie_command_list = MK_16(sc->sc_maddr, cmd); 1238 cmd->ie_tdr_time = SWAP(0); 1239 1240 if (command_and_wait(sc, IE_CU_START, cmd, IE_STAT_COMPL) || 1241 !(cmd->com.ie_cmd_status & IE_STAT_OK)) 1242 result = 0x10000; /* XXX */ 1243 else 1244 result = cmd->ie_tdr_time; 1245 1246 ie_ack(sc, IE_ST_WHENCE); 1247 1248 if (result & IE_TDR_SUCCESS) 1249 return; 1250 1251 if (result & 0x10000) { 1252 printf("%s: TDR command failed\n", sc->sc_dev.dv_xname); 1253 } else if (result & IE_TDR_XCVR) { 1254 printf("%s: transceiver problem\n", sc->sc_dev.dv_xname); 1255 } else if (result & IE_TDR_OPEN) { 1256 printf("%s: TDR detected an open %d clocks away\n", 1257 sc->sc_dev.dv_xname, SWAP(result & IE_TDR_TIME)); 1258 } else if (result & IE_TDR_SHORT) { 1259 printf("%s: TDR detected a short %d clocks away\n", 1260 sc->sc_dev.dv_xname, SWAP(result & IE_TDR_TIME)); 1261 } else { 1262 printf("%s: TDR returned unknown status %x\n", 1263 sc->sc_dev.dv_xname, result); 1264 } 1265 } 1266 1267 static void 1268 start_receiver(sc) 1269 struct ie_softc *sc; 1270 { 1271 int s = splimp(); 1272 1273 sc->scb->ie_recv_list = MK_16(sc->sc_maddr, sc->rframes[0]); 1274 command_and_wait(sc, IE_RU_START, 0, 0); 1275 1276 ie_ack(sc, IE_ST_WHENCE); 1277 1278 splx(s); 1279 } 1280 1281 /* 1282 * setup_bufs: set up the buffers 1283 * 1284 * we have a block of KVA at sc->buf_area which is of size sc->buf_area_sz. 1285 * this is to be used for the buffers. the chip indexs its control data 1286 * structures with 16 bit offsets, and it indexes actual buffers with 1287 * 24 bit addresses. so we should allocate control buffers first so that 1288 * we don't overflow the 16 bit offset field. The number of transmit 1289 * buffers is fixed at compile time. 1290 * 1291 * note: this function was written to be easy to understand, rather than 1292 * highly efficient (it isn't in the critical path). 1293 */ 1294 static void 1295 setup_bufs(sc) 1296 struct ie_softc *sc; 1297 { 1298 caddr_t ptr = sc->buf_area; /* memory pool */ 1299 volatile struct ie_recv_frame_desc *rfd = (void *) ptr; 1300 volatile struct ie_recv_buf_desc *rbd; 1301 int n, r; 1302 1303 /* 1304 * step 0: zero memory and figure out how many recv buffers and 1305 * frames we can have. XXX CURRENTLY HARDWIRED AT MAX 1306 */ 1307 (sc->memzero)(ptr, sc->buf_area_sz); 1308 ptr = Align(ptr); /* set alignment and stick with it */ 1309 1310 n = (int)Align(sizeof(struct ie_xmit_cmd)) + 1311 (int)Align(sizeof(struct ie_xmit_buf)) + IE_TBUF_SIZE; 1312 n *= NTXBUF; /* n = total size of xmit area */ 1313 1314 n = sc->buf_area_sz - n;/* n = free space for recv stuff */ 1315 1316 r = (int)Align(sizeof(struct ie_recv_frame_desc)) + 1317 (((int)Align(sizeof(struct ie_recv_buf_desc)) + IE_RBUF_SIZE) * B_PER_F); 1318 1319 /* r = size of one R frame */ 1320 1321 sc->nframes = n / r; 1322 if (sc->nframes <= 0) 1323 panic("ie: bogus buffer calc\n"); 1324 if (sc->nframes > MXFRAMES) 1325 sc->nframes = MXFRAMES; 1326 1327 sc->nrxbuf = sc->nframes * B_PER_F; 1328 1329 #ifdef IEDEBUG 1330 printf("IEDEBUG: %d frames %d bufs\n", sc->nframes, sc->nrxbuf); 1331 #endif 1332 1333 /* 1334 * step 1a: lay out and zero frame data structures for transmit and recv 1335 */ 1336 for (n = 0; n < NTXBUF; n++) { 1337 sc->xmit_cmds[n] = (volatile struct ie_xmit_cmd *) ptr; 1338 ptr = Align(ptr + sizeof(struct ie_xmit_cmd)); 1339 } 1340 1341 for (n = 0; n < sc->nframes; n++) { 1342 sc->rframes[n] = (volatile struct ie_recv_frame_desc *) ptr; 1343 ptr = Align(ptr + sizeof(struct ie_recv_frame_desc)); 1344 } 1345 1346 /* 1347 * step 1b: link together the recv frames and set EOL on last one 1348 */ 1349 for (n = 0; n < sc->nframes; n++) { 1350 sc->rframes[n]->ie_fd_next = 1351 MK_16(sc->sc_maddr, sc->rframes[(n + 1) % sc->nframes]); 1352 } 1353 sc->rframes[sc->nframes - 1]->ie_fd_last |= IE_FD_LAST; 1354 1355 /* 1356 * step 2a: lay out and zero frame buffer structures for xmit and recv 1357 */ 1358 for (n = 0; n < NTXBUF; n++) { 1359 sc->xmit_buffs[n] = (volatile struct ie_xmit_buf *) ptr; 1360 ptr = Align(ptr + sizeof(struct ie_xmit_buf)); 1361 } 1362 1363 for (n = 0; n < sc->nrxbuf; n++) { 1364 sc->rbuffs[n] = (volatile struct ie_recv_buf_desc *) ptr; 1365 ptr = Align(ptr + sizeof(struct ie_recv_buf_desc)); 1366 } 1367 1368 /* 1369 * step 2b: link together recv bufs and set EOL on last one 1370 */ 1371 for (n = 0; n < sc->nrxbuf; n++) { 1372 sc->rbuffs[n]->ie_rbd_next = 1373 MK_16(sc->sc_maddr, sc->rbuffs[(n + 1) % sc->nrxbuf]); 1374 } 1375 sc->rbuffs[sc->nrxbuf - 1]->ie_rbd_length |= IE_RBD_LAST; 1376 1377 /* 1378 * step 3: allocate the actual data buffers for xmit and recv 1379 * recv buffer gets linked into recv_buf_desc list here 1380 */ 1381 for (n = 0; n < NTXBUF; n++) { 1382 sc->xmit_cbuffs[n] = (u_char *) ptr; 1383 ptr = Align(ptr + IE_TBUF_SIZE); 1384 } 1385 1386 for (n = 0; n < sc->nrxbuf; n++) { 1387 sc->cbuffs[n] = (char *) ptr; /* XXX why char vs uchar? */ 1388 sc->rbuffs[n]->ie_rbd_length = SWAP(IE_RBUF_SIZE); 1389 ST_24(sc->rbuffs[n]->ie_rbd_buffer, MK_24(sc->sc_iobase, ptr)); 1390 ptr = Align(ptr + IE_RBUF_SIZE); 1391 } 1392 1393 /* 1394 * step 4: set the head and tail pointers on receive to keep track of 1395 * the order in which RFDs and RBDs are used. link in recv frames 1396 * and buffer into the scb. 1397 */ 1398 1399 sc->rfhead = 0; 1400 sc->rftail = sc->nframes - 1; 1401 sc->rbhead = 0; 1402 sc->rbtail = sc->nrxbuf - 1; 1403 1404 sc->scb->ie_recv_list = MK_16(sc->sc_maddr, sc->rframes[0]); 1405 sc->rframes[0]->ie_fd_buf_desc = MK_16(sc->sc_maddr, sc->rbuffs[0]); 1406 1407 #ifdef IEDEBUG 1408 printf("IE_DEBUG: reserved %d bytes\n", ptr - sc->buf_area); 1409 #endif 1410 } 1411 1412 /* 1413 * Run the multicast setup command. 1414 * Called at splimp(). 1415 */ 1416 static int 1417 mc_setup(sc, ptr) 1418 struct ie_softc *sc; 1419 caddr_t ptr; 1420 { 1421 volatile struct ie_mcast_cmd *cmd = (void *) ptr; 1422 1423 cmd->com.ie_cmd_status = SWAP(0); 1424 cmd->com.ie_cmd_cmd = IE_CMD_MCAST | IE_CMD_LAST; 1425 cmd->com.ie_cmd_link = SWAP(0xffff); 1426 1427 (sc->memcopy)((caddr_t)sc->mcast_addrs, (caddr_t)cmd->ie_mcast_addrs, 1428 sc->mcast_count * sizeof *sc->mcast_addrs); 1429 1430 cmd->ie_mcast_bytes = 1431 SWAP(sc->mcast_count * ETHER_ADDR_LEN); /* grrr... */ 1432 1433 sc->scb->ie_command_list = MK_16(sc->sc_maddr, cmd); 1434 if (command_and_wait(sc, IE_CU_START, cmd, IE_STAT_COMPL) || 1435 !(cmd->com.ie_cmd_status & IE_STAT_OK)) { 1436 printf("%s: multicast address setup command failed\n", 1437 sc->sc_dev.dv_xname); 1438 return 0; 1439 } 1440 return 1; 1441 } 1442 1443 /* 1444 * This routine inits the ie. 1445 * This includes executing the CONFIGURE, IA-SETUP, and MC-SETUP commands, 1446 * starting the receiver unit, and clearing interrupts. 1447 * 1448 * THIS ROUTINE MUST BE CALLED AT splimp() OR HIGHER. 1449 */ 1450 int 1451 ieinit(sc) 1452 struct ie_softc *sc; 1453 { 1454 volatile struct ie_sys_ctl_block *scb = sc->scb; 1455 caddr_t ptr; 1456 int n; 1457 1458 ptr = sc->buf_area; 1459 1460 /* 1461 * Send the configure command first. 1462 */ 1463 { 1464 volatile struct ie_config_cmd *cmd = (void *) ptr; 1465 1466 ie_setup_config(cmd, sc->promisc); 1467 cmd->com.ie_cmd_status = SWAP(0); 1468 cmd->com.ie_cmd_cmd = IE_CMD_CONFIG | IE_CMD_LAST; 1469 cmd->com.ie_cmd_link = SWAP(0xffff); 1470 1471 scb->ie_command_list = MK_16(sc->sc_maddr, cmd); 1472 1473 if (command_and_wait(sc, IE_CU_START, cmd, IE_STAT_COMPL) || 1474 !(cmd->com.ie_cmd_status & IE_STAT_OK)) { 1475 printf("%s: configure command failed\n", 1476 sc->sc_dev.dv_xname); 1477 return 0; 1478 } 1479 } 1480 /* 1481 * Now send the Individual Address Setup command. 1482 */ 1483 { 1484 volatile struct ie_iasetup_cmd *cmd = (void *) ptr; 1485 1486 cmd->com.ie_cmd_status = SWAP(0); 1487 cmd->com.ie_cmd_cmd = IE_CMD_IASETUP | IE_CMD_LAST; 1488 cmd->com.ie_cmd_link = SWAP(0xffff); 1489 1490 (sc->memcopy)(sc->sc_arpcom.ac_enaddr, 1491 (caddr_t)&cmd->ie_address, sizeof cmd->ie_address); 1492 1493 scb->ie_command_list = MK_16(sc->sc_maddr, cmd); 1494 if (command_and_wait(sc, IE_CU_START, cmd, IE_STAT_COMPL) || 1495 !(cmd->com.ie_cmd_status & IE_STAT_OK)) { 1496 printf("%s: individual address setup command failed\n", 1497 sc->sc_dev.dv_xname); 1498 return 0; 1499 } 1500 } 1501 1502 /* 1503 * Now run the time-domain reflectometer. 1504 */ 1505 run_tdr(sc, (void *) ptr); 1506 1507 /* 1508 * Acknowledge any interrupts we have generated thus far. 1509 */ 1510 ie_ack(sc, IE_ST_WHENCE); 1511 1512 /* 1513 * Set up the transmit and recv buffers. 1514 */ 1515 setup_bufs(sc); 1516 1517 /* 1518 * This must be coordinated with iestart() and ietint(). 1519 */ 1520 sc->xmit_cmds[0]->ie_xmit_status = IE_STAT_COMPL; 1521 1522 sc->sc_arpcom.ac_if.if_flags |= IFF_RUNNING; 1523 /* tell higher levels that we are here */ 1524 1525 start_receiver(sc); 1526 if (sc->run_586) 1527 (sc->run_586) (sc); 1528 1529 return 0; 1530 } 1531 1532 static void 1533 iestop(sc) 1534 struct ie_softc *sc; 1535 { 1536 1537 command_and_wait(sc, IE_RU_DISABLE, 0, 0); 1538 } 1539 1540 int 1541 ieioctl(ifp, cmd, data) 1542 register struct ifnet *ifp; 1543 u_long cmd; 1544 caddr_t data; 1545 { 1546 struct ie_softc *sc = iecd.cd_devs[ifp->if_unit]; 1547 struct ifaddr *ifa = (struct ifaddr *) data; 1548 struct ifreq *ifr = (struct ifreq *) data; 1549 int s, error = 0; 1550 1551 s = splimp(); 1552 1553 switch (cmd) { 1554 1555 case SIOCSIFADDR: 1556 ifp->if_flags |= IFF_UP; 1557 1558 switch (ifa->ifa_addr->sa_family) { 1559 #ifdef INET 1560 case AF_INET: 1561 ieinit(sc); 1562 /* 1563 * See if another station has *our* IP address. 1564 * i.e.: There is an address conflict! If a 1565 * conflict exists, a message is sent to the 1566 * console. 1567 */ 1568 sc->sc_arpcom.ac_ipaddr = IA_SIN(ifa)->sin_addr; 1569 arpwhohas(&sc->sc_arpcom, &IA_SIN(ifa)->sin_addr); 1570 break; 1571 #endif 1572 #ifdef NS 1573 /* XXX - This code is probably wrong. */ 1574 case AF_NS: 1575 { 1576 struct ns_addr *ina = &IA_SNS(ifa)->sns_addr; 1577 1578 if (ns_nullhost(*ina)) 1579 ina->x_host = 1580 *(union ns_host *) (sc->sc_arpcom.ac_enaddr); 1581 else 1582 bcopy(ina->x_host.c_host, 1583 sc->sc_arpcom.ac_enaddr, 1584 sizeof(sc->sc_arpcom.ac_enaddr)); 1585 /* Set new address. */ 1586 ieinit(sc); 1587 break; 1588 } 1589 #endif /* NS */ 1590 default: 1591 ieinit(sc); 1592 break; 1593 } 1594 break; 1595 1596 case SIOCSIFFLAGS: 1597 sc->promisc = ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI); 1598 1599 if ((ifp->if_flags & IFF_UP) == 0 && 1600 (ifp->if_flags & IFF_RUNNING) != 0) { 1601 /* 1602 * If interface is marked down and it is running, then 1603 * stop it. 1604 */ 1605 iestop(sc); 1606 ifp->if_flags &= ~IFF_RUNNING; 1607 } else if ((ifp->if_flags & IFF_UP) != 0 && 1608 (ifp->if_flags & IFF_RUNNING) == 0) { 1609 /* 1610 * If interface is marked up and it is stopped, then 1611 * start it. 1612 */ 1613 ieinit(sc); 1614 } else { 1615 /* 1616 * Reset the interface to pick up changes in any other 1617 * flags that affect hardware registers. 1618 */ 1619 iestop(sc); 1620 ieinit(sc); 1621 } 1622 #ifdef IEDEBUG 1623 if (ifp->if_flags & IFF_DEBUG) 1624 sc->sc_debug = IED_ALL; 1625 else 1626 sc->sc_debug = 0; 1627 #endif 1628 break; 1629 1630 case SIOCADDMULTI: 1631 case SIOCDELMULTI: 1632 error = (cmd == SIOCADDMULTI) ? 1633 ether_addmulti(ifr, &sc->sc_arpcom) : 1634 ether_delmulti(ifr, &sc->sc_arpcom); 1635 1636 if (error == ENETRESET) { 1637 /* 1638 * Multicast list has changed; set the hardware filter 1639 * accordingly. 1640 */ 1641 mc_reset(sc); 1642 error = 0; 1643 } 1644 break; 1645 1646 default: 1647 error = EINVAL; 1648 } 1649 splx(s); 1650 return error; 1651 } 1652 1653 static void 1654 mc_reset(sc) 1655 struct ie_softc *sc; 1656 { 1657 struct ether_multi *enm; 1658 struct ether_multistep step; 1659 1660 /* 1661 * Step through the list of addresses. 1662 */ 1663 sc->mcast_count = 0; 1664 ETHER_FIRST_MULTI(step, &sc->sc_arpcom, enm); 1665 while (enm) { 1666 if (sc->mcast_count >= MAXMCAST || 1667 bcmp(enm->enm_addrlo, enm->enm_addrhi, 6) != 0) { 1668 sc->sc_arpcom.ac_if.if_flags |= IFF_ALLMULTI; 1669 ieioctl(&sc->sc_arpcom.ac_if, SIOCSIFFLAGS, (void *)0); 1670 goto setflag; 1671 } 1672 bcopy(enm->enm_addrlo, &sc->mcast_addrs[sc->mcast_count], 6); 1673 sc->mcast_count++; 1674 ETHER_NEXT_MULTI(step, enm); 1675 } 1676 setflag: 1677 sc->want_mcsetup = 1; 1678 } 1679 1680 #ifdef IEDEBUG 1681 void 1682 print_rbd(rbd) 1683 volatile struct ie_recv_buf_desc *rbd; 1684 { 1685 1686 printf("RBD at %08lx:\nactual %04x, next %04x, buffer %08x\n" 1687 "length %04x, mbz %04x\n", (u_long)rbd, rbd->ie_rbd_actual, 1688 rbd->ie_rbd_next, rbd->ie_rbd_buffer, rbd->ie_rbd_length, 1689 rbd->mbz); 1690 } 1691 #endif 1692