1 /* $NetBSD: if_ie.c,v 1.1 1994/12/12 18:59:07 gwr Exp $ */ 2 3 /*- 4 * Copyright (c) 1993, 1994 Charles Hannum. 5 * Copyright (c) 1992, 1993, University of Vermont and State 6 * Agricultural College. 7 * Copyright (c) 1992, 1993, Garrett A. Wollman. 8 * 9 * Portions: 10 * Copyright (c) 1990, 1991, William F. Jolitz 11 * Copyright (c) 1990, The Regents of the University of California 12 * 13 * All rights reserved. 14 * 15 * Redistribution and use in source and binary forms, with or without 16 * modification, are permitted provided that the following conditions 17 * are met: 18 * 1. Redistributions of source code must retain the above copyright 19 * notice, this list of conditions and the following disclaimer. 20 * 2. Redistributions in binary form must reproduce the above copyright 21 * notice, this list of conditions and the following disclaimer in the 22 * documentation and/or other materials provided with the distribution. 23 * 3. All advertising materials mentioning features or use of this software 24 * must display the following acknowledgement: 25 * This product includes software developed by Charles Hannum, by the 26 * University of Vermont and State Agricultural College and Garrett A. 27 * Wollman, by William F. Jolitz, and by the University of California, 28 * Berkeley, Lawrence Berkeley Laboratory, and its contributors. 29 * 4. Neither the names of the Universities nor the names of the authors 30 * may be used to endorse or promote products derived from this software 31 * without specific prior written permission. 32 * 33 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 34 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 35 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 36 * ARE DISCLAIMED. IN NO EVENT SHALL THE UNIVERSITY OR AUTHORS BE LIABLE 37 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 38 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 39 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 40 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 41 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 42 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 43 * SUCH DAMAGE. 44 */ 45 46 /* 47 * Intel 82586 Ethernet chip 48 * Register, bit, and structure definitions. 49 * 50 * Original StarLAN driver written by Garrett Wollman with reference to the 51 * Clarkson Packet Driver code for this chip written by Russ Nelson and others. 52 * 53 * BPF support code taken from hpdev/if_le.c, supplied with tcpdump. 54 * 55 * 3C507 support is loosely based on code donated to NetBSD by Rafal Boni. 56 * 57 * Majorly cleaned up and 3C507 code merged by Charles Hannum. 58 * 59 * Converted to SUN ie driver by Charles D. Cranor, October 1994. 60 */ 61 62 /* 63 * The i82586 is a very painful chip, found in sun3's, sun-4/100's 64 * sun-4/200's, and VME based suns. The byte order is all wrong for a 65 * SUN, making life difficult. Programming this chip is mostly the same, 66 * but certain details differ from system to system. This driver is 67 * written so that different "ie" interfaces can be controled by the same 68 * driver. 69 */ 70 71 /* 72 Mode of operation: 73 74 We run the 82586 in a standard Ethernet mode. We keep NFRAMES 75 received frame descriptors around for the receiver to use, and 76 NRXBUF associated receive buffer descriptors, both in a circular 77 list. Whenever a frame is received, we rotate both lists as 78 necessary. (The 586 treats both lists as a simple queue.) We also 79 keep a transmit command around so that packets can be sent off 80 quickly. 81 82 We configure the adapter in AL-LOC = 1 mode, which means that the 83 Ethernet/802.3 MAC header is placed at the beginning of the receive 84 buffer rather than being split off into various fields in the RFD. 85 This also means that we must include this header in the transmit 86 buffer as well. 87 88 By convention, all transmit commands, and only transmit commands, 89 shall have the I (IE_CMD_INTR) bit set in the command. This way, 90 when an interrupt arrives at ieintr(), it is immediately possible 91 to tell what precisely caused it. ANY OTHER command-sending 92 routines should run at splimp(), and should post an acknowledgement 93 to every interrupt they generate. 94 */ 95 96 #include "bpfilter.h" 97 98 #include <sys/param.h> 99 #include <sys/systm.h> 100 #include <sys/device.h> 101 #include <sys/errno.h> 102 #include <sys/mbuf.h> 103 #include <sys/buf.h> 104 #include <sys/protosw.h> 105 #include <sys/socket.h> 106 #include <sys/ioctl.h> 107 #include <sys/syslog.h> 108 109 #include <net/if.h> 110 #include <net/if_types.h> 111 #include <net/if_dl.h> 112 #include <net/netisr.h> 113 #include <net/route.h> 114 115 #if NBPFILTER > 0 116 #include <net/bpf.h> 117 #include <net/bpfdesc.h> 118 #endif 119 120 #ifdef INET 121 #include <netinet/in.h> 122 #include <netinet/in_systm.h> 123 #include <netinet/in_var.h> 124 #include <netinet/ip.h> 125 #include <netinet/if_ether.h> 126 #endif 127 128 #ifdef NS 129 #include <netns/ns.h> 130 #include <netns/ns_if.h> 131 #endif 132 133 #include <vm/vm.h> 134 135 /* 136 * ugly byte-order hack for SUNs 137 */ 138 139 #define SWAP(x) ((u_short)(XSWAP((u_short)(x)))) 140 #define XSWAP(y) ( ((y) >> 8) | ((y) << 8) ) 141 142 #include <machine/autoconf.h> 143 #include <machine/cpu.h> 144 #include <machine/pmap.h> 145 146 #include "i82586.h" 147 #include "if_ie.h" 148 #include "if_ie_subr.h" 149 150 static struct mbuf *last_not_for_us; 151 152 /* 153 * IED: ie debug flags 154 */ 155 156 #define IED_RINT 0x01 157 #define IED_TINT 0x02 158 #define IED_RNR 0x04 159 #define IED_CNA 0x08 160 #define IED_READFRAME 0x10 161 #define IED_ALL 0x1f 162 163 #define ETHER_MIN_LEN 64 164 #define ETHER_MAX_LEN 1518 165 #define ETHER_ADDR_LEN 6 166 167 int iewatchdog __P(( /* short */ )); 168 int ieinit __P((struct ie_softc * sc)); 169 int ieioctl __P((struct ifnet * ifp, u_long command, caddr_t data)); 170 int iestart __P((struct ifnet * ifp)); 171 void iereset __P((struct ie_softc *)); 172 static void ie_readframe __P((struct ie_softc * sc, int bufno)); 173 static void ie_drop_packet_buffer __P((struct ie_softc * sc)); 174 static int command_and_wait __P((struct ie_softc * sc, int command, 175 void volatile * pcmd, int)); 176 static void ierint __P((struct ie_softc * sc)); 177 static void ietint __P((struct ie_softc * sc)); 178 static void iernr __P((struct ie_softc * sc)); 179 static void start_receiver __P((struct ie_softc * sc)); 180 static int ieget __P((struct ie_softc *, struct mbuf **, 181 struct ether_header *, int *)); 182 static void setup_bufs __P((struct ie_softc * sc)); 183 static int mc_setup __P((struct ie_softc *, caddr_t)); 184 static void mc_reset __P((struct ie_softc * sc)); 185 186 #ifdef IEDEBUG 187 void print_rbd __P((volatile struct ie_recv_buf_desc * rbd)); 188 int in_ierint = 0; 189 int in_ietint = 0; 190 #endif 191 192 void ie_attach(); 193 194 struct cfdriver iecd = { 195 NULL, "ie", ie_md_match, ie_attach, 196 DV_IFNET, sizeof(struct ie_softc), 197 }; 198 199 /* 200 * address generation macros 201 * MK_24 = KVA -> 24 bit address in SUN byte order 202 * MK_16 = KVA -> 16 bit address in INTEL byte order 203 * ST_24 = store a 24 bit address in SUN byte order to INTEL byte order 204 */ 205 #define MK_24(base, ptr) ((caddr_t)((u_long)ptr - (u_long)base)) 206 #define MK_16(base, ptr) SWAP((u_short)( ((u_long)(ptr)) - ((u_long)(base)) )) 207 #define ST_24(to, from) { \ 208 u_long fval = (u_long)(from); \ 209 u_char *t = (u_char *)&(to), *f = (u_char *)&fval; \ 210 t[0] = f[3]; t[1] = f[2]; t[2] = f[1]; /*t[3] = f[0];*/ \ 211 } 212 #define MEM sc->sc_maddr 213 214 /* 215 * zero/copy functions: OBIO can use the normal functions, but VME 216 * must do only byte or half-word (16 bit) accesses... 217 */ 218 219 /* 220 * Here are a few useful functions. We could have done these as macros, 221 * but since we have the inline facility, it makes sense to use that 222 * instead. 223 */ 224 static inline void 225 ie_setup_config(cmd, promiscuous) 226 volatile struct ie_config_cmd *cmd; 227 int promiscuous; 228 { 229 230 /* 231 * these are all char's so don't swap them! 232 */ 233 cmd->ie_config_count = 0x0c; 234 cmd->ie_fifo = 8; 235 cmd->ie_save_bad = 0x40; 236 cmd->ie_addr_len = 0x2e; 237 cmd->ie_priority = 0; 238 cmd->ie_ifs = 0x60; 239 cmd->ie_slot_low = 0; 240 cmd->ie_slot_high = 0xf2; 241 cmd->ie_promisc = !!promiscuous; 242 cmd->ie_crs_cdt = 0; 243 cmd->ie_min_len = 64; 244 cmd->ie_junk = 0xff; 245 } 246 247 static inline caddr_t 248 Align(ptr) 249 caddr_t ptr; 250 { 251 u_long l = (u_long)ptr; 252 253 l = (l + 3) & ~3L; 254 return (caddr_t)l; 255 } 256 257 static inline void 258 ie_ack(sc, mask) 259 struct ie_softc *sc; 260 u_int mask; 261 { 262 volatile struct ie_sys_ctl_block *scb = sc->scb; 263 264 command_and_wait(sc, scb->ie_status & mask, 0, 0); 265 } 266 267 268 /* 269 * Taken almost exactly from Bill's if_is.c, 270 * then modified beyond recognition... 271 */ 272 void 273 ieattach(parent, self, aux) 274 struct device *parent, *self; 275 void *aux; 276 { 277 struct ie_softc *sc = (void *) self; 278 struct ifnet *ifp = &sc->sc_if; 279 280 /* 281 * Do machine-dependent parts of attach. 282 */ 283 ie_md_attach(parent, self, aux); 284 printf(" hwaddr %s\n", ether_sprintf(sc->sc_addr)); 285 286 /* 287 * Setup for transmit/receive 288 */ 289 if (ie_setupram(sc) == 0) { 290 printf(": RAM CONFIG FAILED!\n"); 291 /* XXX should reclaim resources? */ 292 return; 293 } 294 295 /* 296 * Initialize and attach S/W interface 297 */ 298 ifp->if_unit = sc->sc_dev.dv_unit; 299 ifp->if_name = iecd.cd_name; 300 ifp->if_output = ether_output; 301 ifp->if_start = iestart; 302 ifp->if_ioctl = ieioctl; 303 ifp->if_watchdog = iewatchdog; 304 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 305 #ifdef IFF_NOTRAILERS 306 /* XXX still compile when the blasted things are gone... */ 307 ifp->if_flags |= IFF_NOTRAILERS; 308 #endif 309 310 /* Attach the interface. */ 311 if_attach(ifp); 312 ether_ifattach(ifp); 313 #if NBPFILTER > 0 314 bpfattach(&ifp->if_bpf, ifp, DLT_EN10MB, 315 sizeof(struct ether_header)); 316 #endif 317 } 318 } 319 320 /* 321 * Device timeout/watchdog routine. Entered if the device neglects to 322 * generate an interrupt after a transmit has been started on it. 323 */ 324 int 325 iewatchdog(unit) 326 short unit; 327 { 328 struct ie_softc *sc = iecd.cd_devs[unit]; 329 330 log(LOG_ERR, "%s: device timeout\n", sc->sc_dev.dv_xname); 331 ++sc->sc_arpcom.ac_if.if_oerrors; 332 333 iereset(sc); 334 } 335 336 /* 337 * What to do upon receipt of an interrupt. 338 */ 339 int 340 ie_intr(v) 341 void *v; 342 { 343 struct ie_softc *sc = v; 344 register u_short status; 345 346 status = sc->scb->ie_status; 347 348 /* 349 * check for parity error 350 */ 351 if (sc->hard_type == IE_VME) { 352 volatile struct ievme *iev = (volatile struct ievme *)sc->sc_reg; 353 if (iev->status & IEVME_PERR) { 354 printf("%s: parity error (ctrl %x @ %02x%04x)\n", 355 iev->pectrl, iev->pectrl & IEVME_HADDR, 356 iev->peaddr); 357 iev->pectrl = iev->pectrl | IEVME_PARACK; 358 } 359 } 360 loop: 361 if (status & (IE_ST_RECV | IE_ST_RNR)) { 362 #ifdef IEDEBUG 363 in_ierint++; 364 if (sc->sc_debug & IED_RINT) 365 printf("%s: rint\n", sc->sc_dev.dv_xname); 366 #endif 367 ierint(sc); 368 #ifdef IEDEBUG 369 in_ierint--; 370 #endif 371 } 372 if (status & IE_ST_DONE) { 373 #ifdef IEDEBUG 374 in_ietint++; 375 if (sc->sc_debug & IED_TINT) 376 printf("%s: tint\n", sc->sc_dev.dv_xname); 377 #endif 378 ietint(sc); 379 #ifdef IEDEBUG 380 in_ietint--; 381 #endif 382 } 383 if (status & IE_ST_RNR) { 384 #ifdef IEDEBUG 385 if (sc->sc_debug & IED_RNR) 386 printf("%s: rnr\n", sc->sc_dev.dv_xname); 387 #endif 388 iernr(sc); 389 } 390 #ifdef IEDEBUG 391 if ((status & IE_ST_ALLDONE) && (sc->sc_debug & IED_CNA)) 392 printf("%s: cna\n", sc->sc_dev.dv_xname); 393 #endif 394 395 /* Don't ack interrupts which we didn't receive */ 396 ie_ack(sc, IE_ST_WHENCE & status); 397 398 if ((status = sc->scb->ie_status) & IE_ST_WHENCE) 399 goto loop; 400 return 1; 401 } 402 403 /* 404 * Process a received-frame interrupt. 405 */ 406 void 407 ierint(sc) 408 struct ie_softc *sc; 409 { 410 volatile struct ie_sys_ctl_block *scb = sc->scb; 411 int i, status; 412 static int timesthru = 1024; 413 414 i = sc->rfhead; 415 for (;;) { 416 status = sc->rframes[i]->ie_fd_status; 417 418 if ((status & IE_FD_COMPLETE) && (status & IE_FD_OK)) { 419 sc->sc_arpcom.ac_if.if_ipackets++; 420 if (!--timesthru) { 421 sc->sc_arpcom.ac_if.if_ierrors += 422 SWAP(scb->ie_err_crc) + 423 SWAP(scb->ie_err_align) + 424 SWAP(scb->ie_err_resource) + 425 SWAP(scb->ie_err_overrun); 426 scb->ie_err_crc = scb->ie_err_align = 0; 427 scb->ie_err_resource = 0; 428 scb->ie_err_overrun = 0; 429 timesthru = 1024; 430 } 431 ie_readframe(sc, i); 432 } else { 433 if ((status & IE_FD_RNR) != 0 && 434 (scb->ie_status & IE_RU_READY) == 0) { 435 sc->rframes[0]->ie_fd_next = MK_16(MEM, 436 sc->rbuffs[0]); 437 scb->ie_recv_list = MK_16(MEM, sc->rframes[0]); 438 command_and_wait(sc, IE_RU_START, 0, 0); 439 } 440 break; 441 } 442 i = (i + 1) % sc->nframes; 443 } 444 } 445 446 /* 447 * Process a command-complete interrupt. These are only generated by 448 * the transmission of frames. This routine is deceptively simple, since 449 * most of the real work is done by iestart(). 450 */ 451 void 452 ietint(sc) 453 struct ie_softc *sc; 454 { 455 int status; 456 int i; 457 458 sc->sc_arpcom.ac_if.if_timer = 0; 459 sc->sc_arpcom.ac_if.if_flags &= ~IFF_OACTIVE; 460 461 for (i = 0; i < sc->xmit_count; i++) { 462 status = sc->xmit_cmds[i]->ie_xmit_status; 463 464 if (status & IE_XS_LATECOLL) { 465 printf("%s: late collision\n", sc->sc_dev.dv_xname); 466 sc->sc_arpcom.ac_if.if_collisions++; 467 sc->sc_arpcom.ac_if.if_oerrors++; 468 } else if (status & IE_XS_NOCARRIER) { 469 printf("%s: no carrier\n", sc->sc_dev.dv_xname); 470 sc->sc_arpcom.ac_if.if_oerrors++; 471 } else if (status & IE_XS_LOSTCTS) { 472 printf("%s: lost CTS\n", sc->sc_dev.dv_xname); 473 sc->sc_arpcom.ac_if.if_oerrors++; 474 } else if (status & IE_XS_UNDERRUN) { 475 printf("%s: DMA underrun\n", sc->sc_dev.dv_xname); 476 sc->sc_arpcom.ac_if.if_oerrors++; 477 } else if (status & IE_XS_EXCMAX) { 478 printf("%s: too many collisions\n", sc->sc_dev.dv_xname); 479 sc->sc_arpcom.ac_if.if_collisions += 16; 480 sc->sc_arpcom.ac_if.if_oerrors++; 481 } else { 482 sc->sc_arpcom.ac_if.if_opackets++; 483 sc->sc_arpcom.ac_if.if_collisions += 484 SWAP(status & IE_XS_MAXCOLL); 485 } 486 } 487 sc->xmit_count = 0; 488 489 /* 490 * If multicast addresses were added or deleted while we 491 * were transmitting, mc_reset() set the want_mcsetup flag 492 * indicating that we should do it. 493 */ 494 if (sc->want_mcsetup) { 495 mc_setup(sc, (caddr_t)sc->xmit_cbuffs[0]); 496 sc->want_mcsetup = 0; 497 } 498 /* Wish I knew why this seems to be necessary... */ 499 sc->xmit_cmds[0]->ie_xmit_status |= IE_STAT_COMPL; 500 501 iestart(&sc->sc_arpcom.ac_if); 502 } 503 504 /* 505 * Process a receiver-not-ready interrupt. I believe that we get these 506 * when there aren't enough buffers to go around. For now (FIXME), we 507 * just restart the receiver, and hope everything's ok. 508 */ 509 void 510 iernr(sc) 511 struct ie_softc *sc; 512 { 513 514 command_and_wait(sc, IE_RU_DISABLE, 0, 0); /* just in case */ 515 setup_bufs(sc); 516 517 sc->scb->ie_recv_list = MK_16(MEM, sc->rframes[0]); 518 command_and_wait(sc, IE_RU_START, 0, 0); /* was ENABLE */ 519 520 ie_ack(sc, IE_ST_WHENCE); 521 522 sc->sc_arpcom.ac_if.if_ierrors++; 523 } 524 525 /* 526 * Compare two Ether/802 addresses for equality, inlined and 527 * unrolled for speed. I'd love to have an inline assembler 528 * version of this... 529 */ 530 static inline int 531 ether_equal(one, two) 532 u_char *one, *two; 533 { 534 535 if (one[0] != two[0] || one[1] != two[1] || one[2] != two[2] || 536 one[3] != two[3] || one[4] != two[4] || one[5] != two[5]) 537 return 0; 538 return 1; 539 } 540 541 /* 542 * Check for a valid address. to_bpf is filled in with one of the following: 543 * 0 -> BPF doesn't get this packet 544 * 1 -> BPF does get this packet 545 * 2 -> BPF does get this packet, but we don't 546 * Return value is true if the packet is for us, and false otherwise. 547 * 548 * This routine is a mess, but it's also critical that it be as fast 549 * as possible. It could be made cleaner if we can assume that the 550 * only client which will fiddle with IFF_PROMISC is BPF. This is 551 * probably a good assumption, but we do not make it here. (Yet.) 552 */ 553 static inline int 554 check_eh(sc, eh, to_bpf) 555 struct ie_softc *sc; 556 struct ether_header *eh; 557 int *to_bpf; 558 { 559 int i; 560 561 switch (sc->promisc) { 562 case IFF_ALLMULTI: 563 /* 564 * Receiving all multicasts, but no unicasts except those 565 * destined for us. 566 */ 567 #if NBPFILTER > 0 568 *to_bpf = (sc->sc_arpcom.ac_if.if_bpf != 0); 569 #endif 570 if (eh->ether_dhost[0] & 1) 571 return 1; 572 if (ether_equal(eh->ether_dhost, sc->sc_arpcom.ac_enaddr)) 573 return 1; 574 return 0; 575 576 case IFF_PROMISC: 577 /* 578 * Receiving all packets. These need to be passed on to BPF. 579 */ 580 #if NBPFILTER > 0 581 *to_bpf = (sc->sc_arpcom.ac_if.if_bpf != 0); 582 #endif 583 /* If for us, accept and hand up to BPF */ 584 if (ether_equal(eh->ether_dhost, sc->sc_arpcom.ac_enaddr)) 585 return 1; 586 587 #if NBPFILTER > 0 588 if (*to_bpf) 589 *to_bpf = 2; /* we don't need to see it */ 590 #endif 591 592 /* 593 * Not a multicast, so BPF wants to see it but we don't. 594 */ 595 if (!(eh->ether_dhost[0] & 1)) 596 return 1; 597 598 /* 599 * If it's one of our multicast groups, accept it and pass it 600 * up. 601 */ 602 for (i = 0; i < sc->mcast_count; i++) { 603 if (ether_equal(eh->ether_dhost, 604 (u_char *)&sc->mcast_addrs[i])) { 605 #if NBPFILTER > 0 606 if (*to_bpf) 607 *to_bpf = 1; 608 #endif 609 return 1; 610 } 611 } 612 return 1; 613 614 case IFF_ALLMULTI | IFF_PROMISC: 615 /* 616 * Acting as a multicast router, and BPF running at the same 617 * time. Whew! (Hope this is a fast machine...) 618 */ 619 #if NBPFILTER > 0 620 *to_bpf = (sc->sc_arpcom.ac_if.if_bpf != 0); 621 #endif 622 /* We want to see multicasts. */ 623 if (eh->ether_dhost[0] & 1) 624 return 1; 625 626 /* We want to see our own packets */ 627 if (ether_equal(eh->ether_dhost, sc->sc_arpcom.ac_enaddr)) 628 return 1; 629 630 /* Anything else goes to BPF but nothing else. */ 631 #if NBPFILTER > 0 632 if (*to_bpf) 633 *to_bpf = 2; 634 #endif 635 return 1; 636 637 default: 638 /* 639 * Only accept unicast packets destined for us, or multicasts 640 * for groups that we belong to. For now, we assume that the 641 * '586 will only return packets that we asked it for. This 642 * isn't strictly true (it uses hashing for the multicast filter), 643 * but it will do in this case, and we want to get out of here 644 * as quickly as possible. 645 */ 646 #if NBPFILTER > 0 647 *to_bpf = (sc->sc_arpcom.ac_if.if_bpf != 0); 648 #endif 649 return 1; 650 } 651 return 0; 652 } 653 654 /* 655 * We want to isolate the bits that have meaning... This assumes that 656 * IE_RBUF_SIZE is an even power of two. If somehow the act_len exceeds 657 * the size of the buffer, then we are screwed anyway. 658 */ 659 static inline int 660 ie_buflen(sc, head) 661 struct ie_softc *sc; 662 int head; 663 { 664 665 return (SWAP(sc->rbuffs[head]->ie_rbd_actual) 666 & (IE_RBUF_SIZE | (IE_RBUF_SIZE - 1))); 667 } 668 669 static inline int 670 ie_packet_len(sc) 671 struct ie_softc *sc; 672 { 673 int i; 674 int head = sc->rbhead; 675 int acc = 0; 676 677 do { 678 if (!(sc->rbuffs[sc->rbhead]->ie_rbd_actual & IE_RBD_USED)) { 679 #ifdef IEDEBUG 680 print_rbd(sc->rbuffs[sc->rbhead]); 681 #endif 682 log(LOG_ERR, "%s: receive descriptors out of sync at %d\n", 683 sc->sc_dev.dv_xname, sc->rbhead); 684 iereset(sc); 685 return -1; 686 } 687 i = sc->rbuffs[head]->ie_rbd_actual & IE_RBD_LAST; 688 689 acc += ie_buflen(sc, head); 690 head = (head + 1) % sc->nrxbuf; 691 } while (!i); 692 693 return acc; 694 } 695 696 /* 697 * Read data off the interface, and turn it into an mbuf chain. 698 * 699 * This code is DRAMATICALLY different from the previous version; this 700 * version tries to allocate the entire mbuf chain up front, given the 701 * length of the data available. This enables us to allocate mbuf 702 * clusters in many situations where before we would have had a long 703 * chain of partially-full mbufs. This should help to speed up the 704 * operation considerably. (Provided that it works, of course.) 705 */ 706 static inline int 707 ieget(sc, mp, ehp, to_bpf) 708 struct ie_softc *sc; 709 struct mbuf **mp; 710 struct ether_header *ehp; 711 int *to_bpf; 712 { 713 struct mbuf *m, *top, **mymp; 714 int i; 715 int offset; 716 int totlen, resid; 717 int thismboff; 718 int head; 719 720 totlen = ie_packet_len(sc); 721 if (totlen <= 0) 722 return -1; 723 724 i = sc->rbhead; 725 726 /* 727 * Snarf the Ethernet header. 728 */ 729 (sc->memcopy)((caddr_t)sc->cbuffs[i], (caddr_t)ehp, sizeof *ehp); 730 731 /* 732 * As quickly as possible, check if this packet is for us. 733 * If not, don't waste a single cycle copying the rest of the 734 * packet in. 735 * This is only a consideration when FILTER is defined; i.e., when 736 * we are either running BPF or doing multicasting. 737 */ 738 if (!check_eh(sc, ehp, to_bpf)) { 739 ie_drop_packet_buffer(sc); 740 sc->sc_arpcom.ac_if.if_ierrors--; /* just this case, it's 741 * not an error */ 742 return -1; 743 } 744 totlen -= (offset = sizeof *ehp); 745 746 MGETHDR(*mp, M_DONTWAIT, MT_DATA); 747 if (!*mp) { 748 ie_drop_packet_buffer(sc); 749 return -1; 750 } 751 m = *mp; 752 m->m_pkthdr.rcvif = &sc->sc_arpcom.ac_if; 753 m->m_len = MHLEN; 754 resid = m->m_pkthdr.len = totlen; 755 top = 0; 756 mymp = ⊤ 757 758 /* 759 * This loop goes through and allocates mbufs for all the data we will 760 * be copying in. It does not actually do the copying yet. 761 */ 762 do { /* while (resid > 0) */ 763 /* 764 * Try to allocate an mbuf to hold the data that we have. If 765 * we already allocated one, just get another one and stick it 766 * on the end (eventually). If we don't already have one, try 767 * to allocate an mbuf cluster big enough to hold the whole 768 * packet, if we think it's reasonable, or a single mbuf which 769 * may or may not be big enough. Got that? 770 */ 771 if (top) { 772 MGET(m, M_DONTWAIT, MT_DATA); 773 if (!m) { 774 m_freem(top); 775 ie_drop_packet_buffer(sc); 776 return -1; 777 } 778 m->m_len = MLEN; 779 } 780 if (resid >= MINCLSIZE) { 781 MCLGET(m, M_DONTWAIT); 782 if (m->m_flags & M_EXT) 783 m->m_len = min(resid, MCLBYTES); 784 } else { 785 if (resid < m->m_len) { 786 if (!top && resid + max_linkhdr <= m->m_len) 787 m->m_data += max_linkhdr; 788 m->m_len = resid; 789 } 790 } 791 resid -= m->m_len; 792 *mymp = m; 793 mymp = &m->m_next; 794 } while (resid > 0); 795 796 resid = totlen; 797 m = top; 798 thismboff = 0; 799 head = sc->rbhead; 800 801 /* 802 * Now we take the mbuf chain (hopefully only one mbuf most of the 803 * time) and stuff the data into it. There are no possible failures 804 * at or after this point. 805 */ 806 while (resid > 0) { /* while there's stuff left */ 807 int thislen = ie_buflen(sc, head) - offset; 808 809 /* 810 * If too much data for the current mbuf, then fill the current one 811 * up, go to the next one, and try again. 812 */ 813 if (thislen > m->m_len - thismboff) { 814 int newlen = m->m_len - thismboff; 815 (sc->memcopy)((caddr_t)(sc->cbuffs[head] + offset), 816 mtod(m, caddr_t) + thismboff, (u_int)newlen); 817 m = m->m_next; 818 thismboff = 0; /* new mbuf, so no offset */ 819 offset += newlen; /* we are now this far into 820 * the packet */ 821 resid -= newlen; /* so there is this much left 822 * to get */ 823 continue; 824 } 825 /* 826 * If there is more than enough space in the mbuf to hold the 827 * contents of this buffer, copy everything in, advance pointers, 828 * and so on. 829 */ 830 if (thislen < m->m_len - thismboff) { 831 (sc->memcopy)((caddr_t)(sc->cbuffs[head] + offset), 832 mtod(m, caddr_t) + thismboff, (u_int)thislen); 833 thismboff += thislen; /* we are this far into the 834 * mbuf */ 835 resid -= thislen; /* and this much is left */ 836 goto nextbuf; 837 } 838 /* 839 * Otherwise, there is exactly enough space to put this buffer's 840 * contents into the current mbuf. Do the combination of the above 841 * actions. 842 */ 843 (sc->memcopy)((caddr_t)(sc->cbuffs[head] + offset), 844 mtod(m, caddr_t) + thismboff, (u_int)thislen); 845 m = m->m_next; 846 thismboff = 0; /* new mbuf, start at the beginning */ 847 resid -= thislen; /* and we are this far through */ 848 849 /* 850 * Advance all the pointers. We can get here from either of the 851 * last two cases, but never the first. 852 */ 853 nextbuf: 854 offset = 0; 855 sc->rbuffs[head]->ie_rbd_actual = SWAP(0); 856 sc->rbuffs[head]->ie_rbd_length |= IE_RBD_LAST; 857 sc->rbhead = head = (head + 1) % sc->nrxbuf; 858 sc->rbuffs[sc->rbtail]->ie_rbd_length &= ~IE_RBD_LAST; 859 sc->rbtail = (sc->rbtail + 1) % sc->nrxbuf; 860 } 861 862 /* 863 * Unless something changed strangely while we were doing the copy, 864 * we have now copied everything in from the shared memory. 865 * This means that we are done. 866 */ 867 return 0; 868 } 869 870 /* 871 * Read frame NUM from unit UNIT (pre-cached as IE). 872 * 873 * This routine reads the RFD at NUM, and copies in the buffers from 874 * the list of RBD, then rotates the RBD and RFD lists so that the receiver 875 * doesn't start complaining. Trailers are DROPPED---there's no point 876 * in wasting time on confusing code to deal with them. Hopefully, 877 * this machine will never ARP for trailers anyway. 878 */ 879 static void 880 ie_readframe(sc, num) 881 struct ie_softc *sc; 882 int num; /* frame number to read */ 883 { 884 struct ie_recv_frame_desc rfd; 885 struct mbuf *m = 0; 886 struct ether_header eh; 887 #if NBPFILTER > 0 888 int bpf_gets_it = 0; 889 #endif 890 891 (sc->memcopy)((caddr_t)(sc->rframes[num]), &rfd, 892 sizeof(struct ie_recv_frame_desc)); 893 894 /* Immediately advance the RFD list, since we have copied ours now. */ 895 sc->rframes[num]->ie_fd_status = SWAP(0); 896 sc->rframes[num]->ie_fd_last |= IE_FD_LAST; 897 sc->rframes[sc->rftail]->ie_fd_last &= ~IE_FD_LAST; 898 sc->rftail = (sc->rftail + 1) % sc->nframes; 899 sc->rfhead = (sc->rfhead + 1) % sc->nframes; 900 901 if (rfd.ie_fd_status & IE_FD_OK) { 902 #if NBPFILTER > 0 903 if (ieget(sc, &m, &eh, &bpf_gets_it)) { 904 #else 905 if (ieget(sc, &m, &eh, 0)) { 906 #endif 907 sc->sc_arpcom.ac_if.if_ierrors++; 908 return; 909 } 910 } 911 #ifdef IEDEBUG 912 if (sc->sc_debug & IED_READFRAME) 913 printf("%s: frame from ether %s type %x\n", sc->sc_dev.dv_xname, 914 ether_sprintf(eh.ether_shost), (u_int)eh.ether_type); 915 #endif 916 917 if (!m) 918 return; 919 920 if (last_not_for_us) { 921 m_freem(last_not_for_us); 922 last_not_for_us = 0; 923 } 924 #if NBPFILTER > 0 925 /* 926 * Check for a BPF filter; if so, hand it up. 927 * Note that we have to stick an extra mbuf up front, because 928 * bpf_mtap expects to have the ether header at the front. 929 * It doesn't matter that this results in an ill-formatted mbuf chain, 930 * since BPF just looks at the data. (It doesn't try to free the mbuf, 931 * tho' it will make a copy for tcpdump.) 932 */ 933 if (bpf_gets_it) { 934 struct mbuf m0; 935 m0.m_len = sizeof eh; 936 m0.m_data = (caddr_t)&eh; 937 m0.m_next = m; 938 939 /* Pass it up */ 940 bpf_mtap(sc->sc_arpcom.ac_if.if_bpf, &m0); 941 } 942 /* 943 * A signal passed up from the filtering code indicating that the 944 * packet is intended for BPF but not for the protocol machinery. 945 * We can save a few cycles by not handing it off to them. 946 */ 947 if (bpf_gets_it == 2) { 948 last_not_for_us = m; 949 return; 950 } 951 #endif /* NBPFILTER > 0 */ 952 953 /* 954 * In here there used to be code to check destination addresses upon 955 * receipt of a packet. We have deleted that code, and replaced it 956 * with code to check the address much earlier in the cycle, before 957 * copying the data in; this saves us valuable cycles when operating 958 * as a multicast router or when using BPF. 959 */ 960 961 /* 962 * Finally pass this packet up to higher layers. 963 */ 964 ether_input(&sc->sc_arpcom.ac_if, &eh, m); 965 } 966 967 static void 968 ie_drop_packet_buffer(sc) 969 struct ie_softc *sc; 970 { 971 int i; 972 973 do { 974 /* 975 * This means we are somehow out of sync. So, we reset the 976 * adapter. 977 */ 978 if (!(sc->rbuffs[sc->rbhead]->ie_rbd_actual & IE_RBD_USED)) { 979 #ifdef IEDEBUG 980 print_rbd(sc->rbuffs[sc->rbhead]); 981 #endif 982 log(LOG_ERR, "%s: receive descriptors out of sync at %d\n", 983 sc->sc_dev.dv_xname, sc->rbhead); 984 iereset(sc); 985 return; 986 } 987 i = sc->rbuffs[sc->rbhead]->ie_rbd_actual & IE_RBD_LAST; 988 989 sc->rbuffs[sc->rbhead]->ie_rbd_length |= IE_RBD_LAST; 990 sc->rbuffs[sc->rbhead]->ie_rbd_actual = SWAP(0); 991 sc->rbhead = (sc->rbhead + 1) % sc->nrxbuf; 992 sc->rbuffs[sc->rbtail]->ie_rbd_length &= ~IE_RBD_LAST; 993 sc->rbtail = (sc->rbtail + 1) % sc->nrxbuf; 994 } while (!i); 995 } 996 997 /* 998 * Start transmission on an interface. 999 */ 1000 int 1001 iestart(ifp) 1002 struct ifnet *ifp; 1003 { 1004 struct ie_softc *sc = iecd.cd_devs[ifp->if_unit]; 1005 struct mbuf *m0, *m; 1006 u_char *buffer; 1007 u_short len; 1008 /* This is not really volatile, in this routine, but it makes gcc 1009 * happy. */ 1010 volatile u_short *bptr = &sc->scb->ie_command_list; 1011 1012 if ((ifp->if_flags ^ IFF_RUNNING) & (IFF_RUNNING | IFF_OACTIVE)) 1013 return 0; 1014 1015 do { 1016 IF_DEQUEUE(&sc->sc_arpcom.ac_if.if_snd, m); 1017 if (!m) 1018 break; 1019 1020 buffer = sc->xmit_cbuffs[sc->xmit_count]; 1021 len = 0; 1022 1023 for (m0 = m; m && len < IE_TBUF_SIZE; m = m->m_next) { 1024 (sc->memcopy)(mtod(m, caddr_t), buffer, m->m_len); 1025 buffer += m->m_len; 1026 len += m->m_len; 1027 } 1028 1029 m_freem(m0); 1030 len = max(len, ETHER_MIN_LEN); 1031 1032 #if NBPFILTER > 0 1033 /* 1034 * See if bpf is listening on this interface, let it see the packet 1035 * before we commit it to the wire. 1036 */ 1037 if (sc->sc_arpcom.ac_if.if_bpf) 1038 bpf_tap(sc->sc_arpcom.ac_if.if_bpf, 1039 sc->xmit_cbuffs[sc->xmit_count], 1040 len); 1041 #endif 1042 1043 sc->xmit_buffs[sc->xmit_count]->ie_xmit_flags = 1044 IE_XMIT_LAST | SWAP(len); 1045 sc->xmit_buffs[sc->xmit_count]->ie_xmit_next = SWAP(0xffff); 1046 ST_24(sc->xmit_buffs[sc->xmit_count]->ie_xmit_buf, 1047 MK_24(MEM, sc->xmit_cbuffs[sc->xmit_count])); 1048 1049 sc->xmit_cmds[sc->xmit_count]->com.ie_cmd_cmd = IE_CMD_XMIT; 1050 sc->xmit_cmds[sc->xmit_count]->ie_xmit_status = SWAP(0); 1051 sc->xmit_cmds[sc->xmit_count]->ie_xmit_desc = 1052 MK_16(MEM, sc->xmit_buffs[sc->xmit_count]); 1053 1054 *bptr = MK_16(MEM, sc->xmit_cmds[sc->xmit_count]); 1055 bptr = &sc->xmit_cmds[sc->xmit_count]->com.ie_cmd_link; 1056 } while (++sc->xmit_count < NTXBUF); 1057 1058 /* 1059 * If we queued up anything for transmission, send it. 1060 */ 1061 if (sc->xmit_count) { 1062 sc->xmit_cmds[sc->xmit_count - 1]->com.ie_cmd_cmd |= 1063 IE_CMD_LAST | IE_CMD_INTR; 1064 1065 /* 1066 * By passing the command pointer as a null, we tell 1067 * command_and_wait() to pretend that this isn't an action 1068 * command. I wish I understood what was happening here. 1069 */ 1070 command_and_wait(sc, IE_CU_START, 0, 0); 1071 ifp->if_flags |= IFF_OACTIVE; 1072 } 1073 } 1074 1075 /* 1076 * set up IE's ram space 1077 */ 1078 int 1079 ie_setupram(sc) 1080 struct ie_softc *sc; 1081 { 1082 volatile struct ie_sys_conf_ptr *scp; 1083 volatile struct ie_int_sys_conf_ptr *iscp; 1084 volatile struct ie_sys_ctl_block *scb; 1085 int s; 1086 1087 s = splimp(); 1088 1089 scp = sc->scp; 1090 (sc->memzero)((char *) scp, sizeof *scp); 1091 1092 iscp = sc->iscp; 1093 (sc->memzero)((char *) iscp, sizeof *iscp); 1094 1095 scb = sc->scb; 1096 (sc->memzero)((char *) scb, sizeof *scb); 1097 1098 scp->ie_bus_use = 0; /* 16-bit */ 1099 ST_24(scp->ie_iscp_ptr, MK_24(sc->sc_iobase, iscp)); 1100 1101 iscp->ie_busy = 1; /* ie_busy == char */ 1102 iscp->ie_scb_offset = MK_16(sc->sc_maddr, scb); 1103 ST_24(iscp->ie_base, MK_24(sc->sc_iobase, sc->sc_maddr)); 1104 1105 (sc->reset_586) (sc); 1106 (sc->chan_attn) (sc); 1107 1108 delay(100); /* wait a while... */ 1109 1110 if (iscp->ie_busy) { 1111 splx(s); 1112 return 0; 1113 } 1114 /* 1115 * Acknowledge any interrupts we may have caused... 1116 */ 1117 ie_ack(sc, IE_ST_WHENCE); 1118 splx(s); 1119 1120 return 1; 1121 } 1122 1123 void 1124 iereset(sc) 1125 struct ie_softc *sc; 1126 { 1127 int s = splimp(); 1128 1129 printf("%s: reset\n", sc->sc_dev.dv_xname); 1130 sc->sc_arpcom.ac_if.if_flags &= ~IFF_UP; 1131 ieioctl(&sc->sc_arpcom.ac_if, SIOCSIFFLAGS, 0); 1132 1133 /* 1134 * Stop i82586 dead in its tracks. 1135 */ 1136 if (command_and_wait(sc, IE_RU_ABORT | IE_CU_ABORT, 0, 0)) 1137 printf("%s: abort commands timed out\n", sc->sc_dev.dv_xname); 1138 1139 if (command_and_wait(sc, IE_RU_DISABLE | IE_CU_STOP, 0, 0)) 1140 printf("%s: disable commands timed out\n", sc->sc_dev.dv_xname); 1141 1142 sc->sc_arpcom.ac_if.if_flags |= IFF_UP; 1143 ieioctl(&sc->sc_arpcom.ac_if, SIOCSIFFLAGS, 0); 1144 1145 splx(s); 1146 } 1147 1148 /* 1149 * This is called if we time out. 1150 */ 1151 static void 1152 chan_attn_timeout(rock) 1153 caddr_t rock; 1154 { 1155 *(int *) rock = 1; 1156 } 1157 1158 /* 1159 * Send a command to the controller and wait for it to either 1160 * complete or be accepted, depending on the command. If the 1161 * command pointer is null, then pretend that the command is 1162 * not an action command. If the command pointer is not null, 1163 * and the command is an action command, wait for 1164 * ((volatile struct ie_cmd_common *)pcmd)->ie_cmd_status & MASK 1165 * to become true. 1166 */ 1167 static int 1168 command_and_wait(sc, cmd, pcmd, mask) 1169 struct ie_softc *sc; 1170 int cmd; 1171 volatile void *pcmd; 1172 int mask; 1173 { 1174 volatile struct ie_cmd_common *cc = pcmd; 1175 volatile struct ie_sys_ctl_block *scb = sc->scb; 1176 volatile int timedout = 0; 1177 extern int hz; 1178 1179 scb->ie_command = (u_short)cmd; 1180 1181 if (IE_ACTION_COMMAND(cmd) && pcmd) { 1182 (sc->chan_attn) (sc); 1183 1184 /* 1185 * XXX 1186 * I don't think this timeout works on suns. 1187 * we are at splimp() in the loop, and the timeout 1188 * stuff runs at software spl (so it is masked off?). 1189 */ 1190 1191 /* 1192 * According to the packet driver, the minimum timeout should be 1193 * .369 seconds, which we round up to .4. 1194 */ 1195 1196 timeout(chan_attn_timeout, (caddr_t)&timedout, 2 * hz / 5); 1197 1198 /* 1199 * Now spin-lock waiting for status. This is not a very nice 1200 * thing to do, but I haven't figured out how, or indeed if, we 1201 * can put the process waiting for action to sleep. (We may 1202 * be getting called through some other timeout running in the 1203 * kernel.) 1204 */ 1205 for (;;) 1206 if ((cc->ie_cmd_status & mask) || timedout) 1207 break; 1208 1209 untimeout(chan_attn_timeout, (caddr_t)&timedout); 1210 1211 return timedout; 1212 } else { 1213 1214 /* 1215 * Otherwise, just wait for the command to be accepted. 1216 */ 1217 (sc->chan_attn) (sc); 1218 1219 while (scb->ie_command); /* spin lock */ 1220 1221 return 0; 1222 } 1223 } 1224 1225 /* 1226 * Run the time-domain reflectometer... 1227 */ 1228 static void 1229 run_tdr(sc, cmd) 1230 struct ie_softc *sc; 1231 struct ie_tdr_cmd *cmd; 1232 { 1233 int result; 1234 1235 cmd->com.ie_cmd_status = SWAP(0); 1236 cmd->com.ie_cmd_cmd = IE_CMD_TDR | IE_CMD_LAST; 1237 cmd->com.ie_cmd_link = SWAP(0xffff); 1238 1239 sc->scb->ie_command_list = MK_16(MEM, cmd); 1240 cmd->ie_tdr_time = SWAP(0); 1241 1242 if (command_and_wait(sc, IE_CU_START, cmd, IE_STAT_COMPL) || 1243 !(cmd->com.ie_cmd_status & IE_STAT_OK)) 1244 result = 0x10000; /* XXX */ 1245 else 1246 result = cmd->ie_tdr_time; 1247 1248 ie_ack(sc, IE_ST_WHENCE); 1249 1250 if (result & IE_TDR_SUCCESS) 1251 return; 1252 1253 if (result & 0x10000) { 1254 printf("%s: TDR command failed\n", sc->sc_dev.dv_xname); 1255 } else if (result & IE_TDR_XCVR) { 1256 printf("%s: transceiver problem\n", sc->sc_dev.dv_xname); 1257 } else if (result & IE_TDR_OPEN) { 1258 printf("%s: TDR detected an open %d clocks away\n", 1259 sc->sc_dev.dv_xname, SWAP(result & IE_TDR_TIME)); 1260 } else if (result & IE_TDR_SHORT) { 1261 printf("%s: TDR detected a short %d clocks away\n", 1262 sc->sc_dev.dv_xname, SWAP(result & IE_TDR_TIME)); 1263 } else { 1264 printf("%s: TDR returned unknown status %x\n", 1265 sc->sc_dev.dv_xname, result); 1266 } 1267 } 1268 1269 static void 1270 start_receiver(sc) 1271 struct ie_softc *sc; 1272 { 1273 int s = splimp(); 1274 1275 sc->scb->ie_recv_list = MK_16(MEM, sc->rframes[0]); 1276 command_and_wait(sc, IE_RU_START, 0, 0); 1277 1278 ie_ack(sc, IE_ST_WHENCE); 1279 1280 splx(s); 1281 } 1282 1283 /* 1284 * setup_bufs: set up the buffers 1285 * 1286 * we have a block of KVA at sc->buf_area which is of size sc->buf_area_sz. 1287 * this is to be used for the buffers. the chip indexs its control data 1288 * structures with 16 bit offsets, and it indexes actual buffers with 1289 * 24 bit addresses. so we should allocate control buffers first so that 1290 * we don't overflow the 16 bit offset field. The number of transmit 1291 * buffers is fixed at compile time. 1292 * 1293 * note: this function was written to be easy to understand, rather than 1294 * highly efficient (it isn't in the critical path). 1295 */ 1296 static void 1297 setup_bufs(sc) 1298 struct ie_softc *sc; 1299 { 1300 caddr_t ptr = sc->buf_area; /* memory pool */ 1301 volatile struct ie_recv_frame_desc *rfd = (void *) ptr; 1302 volatile struct ie_recv_buf_desc *rbd; 1303 int n, r; 1304 1305 /* 1306 * step 0: zero memory and figure out how many recv buffers and 1307 * frames we can have. XXX CURRENTLY HARDWIRED AT MAX 1308 */ 1309 (sc->memzero)(ptr, sc->buf_area_sz); 1310 ptr = Align(ptr); /* set alignment and stick with it */ 1311 1312 n = (int)Align(sizeof(struct ie_xmit_cmd)) + 1313 (int)Align(sizeof(struct ie_xmit_buf)) + IE_TBUF_SIZE; 1314 n *= NTXBUF; /* n = total size of xmit area */ 1315 1316 n = sc->buf_area_sz - n;/* n = free space for recv stuff */ 1317 1318 r = (int)Align(sizeof(struct ie_recv_frame_desc)) + 1319 (((int)Align(sizeof(struct ie_recv_buf_desc)) + IE_RBUF_SIZE) * B_PER_F); 1320 1321 /* r = size of one R frame */ 1322 1323 sc->nframes = n / r; 1324 if (sc->nframes <= 0) 1325 panic("ie: bogus buffer calc\n"); 1326 if (sc->nframes > MXFRAMES) 1327 sc->nframes = MXFRAMES; 1328 1329 sc->nrxbuf = sc->nframes * B_PER_F; 1330 1331 #ifdef IEDEBUG 1332 printf("IEDEBUG: %d frames %d bufs\n", sc->nframes, sc->nrxbuf); 1333 #endif 1334 1335 /* 1336 * step 1a: lay out and zero frame data structures for transmit and recv 1337 */ 1338 for (n = 0; n < NTXBUF; n++) { 1339 sc->xmit_cmds[n] = (volatile struct ie_xmit_cmd *) ptr; 1340 ptr = Align(ptr + sizeof(struct ie_xmit_cmd)); 1341 } 1342 1343 for (n = 0; n < sc->nframes; n++) { 1344 sc->rframes[n] = (volatile struct ie_recv_frame_desc *) ptr; 1345 ptr = Align(ptr + sizeof(struct ie_recv_frame_desc)); 1346 } 1347 1348 /* 1349 * step 1b: link together the recv frames and set EOL on last one 1350 */ 1351 for (n = 0; n < sc->nframes; n++) { 1352 sc->rframes[n]->ie_fd_next = 1353 MK_16(MEM, sc->rframes[(n + 1) % sc->nframes]); 1354 } 1355 sc->rframes[sc->nframes - 1]->ie_fd_last |= IE_FD_LAST; 1356 1357 /* 1358 * step 2a: lay out and zero frame buffer structures for xmit and recv 1359 */ 1360 for (n = 0; n < NTXBUF; n++) { 1361 sc->xmit_buffs[n] = (volatile struct ie_xmit_buf *) ptr; 1362 ptr = Align(ptr + sizeof(struct ie_xmit_buf)); 1363 } 1364 1365 for (n = 0; n < sc->nrxbuf; n++) { 1366 sc->rbuffs[n] = (volatile struct ie_recv_buf_desc *) ptr; 1367 ptr = Align(ptr + sizeof(struct ie_recv_buf_desc)); 1368 } 1369 1370 /* 1371 * step 2b: link together recv bufs and set EOL on last one 1372 */ 1373 for (n = 0; n < sc->nrxbuf; n++) { 1374 sc->rbuffs[n]->ie_rbd_next = 1375 MK_16(MEM, sc->rbuffs[(n + 1) % sc->nrxbuf]); 1376 } 1377 sc->rbuffs[sc->nrxbuf - 1]->ie_rbd_length |= IE_RBD_LAST; 1378 1379 /* 1380 * step 3: allocate the actual data buffers for xmit and recv 1381 * recv buffer gets linked into recv_buf_desc list here 1382 */ 1383 for (n = 0; n < NTXBUF; n++) { 1384 sc->xmit_cbuffs[n] = (u_char *) ptr; 1385 ptr = Align(ptr + IE_TBUF_SIZE); 1386 } 1387 1388 for (n = 0; n < sc->nrxbuf; n++) { 1389 sc->cbuffs[n] = (char *) ptr; /* XXX why char vs uchar? */ 1390 sc->rbuffs[n]->ie_rbd_length = SWAP(IE_RBUF_SIZE); 1391 ST_24(sc->rbuffs[n]->ie_rbd_buffer, MK_24(MEM, ptr)); 1392 ptr = Align(ptr + IE_RBUF_SIZE); 1393 } 1394 1395 /* 1396 * step 4: set the head and tail pointers on receive to keep track of 1397 * the order in which RFDs and RBDs are used. link in recv frames 1398 * and buffer into the scb. 1399 */ 1400 1401 sc->rfhead = 0; 1402 sc->rftail = sc->nframes - 1; 1403 sc->rbhead = 0; 1404 sc->rbtail = sc->nrxbuf - 1; 1405 1406 sc->scb->ie_recv_list = MK_16(MEM, sc->rframes[0]); 1407 sc->rframes[0]->ie_fd_buf_desc = MK_16(MEM, sc->rbuffs[0]); 1408 1409 #ifdef IEDEBUG 1410 printf("IE_DEBUG: reserved %d bytes\n", ptr - sc->buf_area); 1411 #endif 1412 } 1413 1414 /* 1415 * Run the multicast setup command. 1416 * Called at splimp(). 1417 */ 1418 static int 1419 mc_setup(sc, ptr) 1420 struct ie_softc *sc; 1421 caddr_t ptr; 1422 { 1423 volatile struct ie_mcast_cmd *cmd = (void *) ptr; 1424 1425 cmd->com.ie_cmd_status = SWAP(0); 1426 cmd->com.ie_cmd_cmd = IE_CMD_MCAST | IE_CMD_LAST; 1427 cmd->com.ie_cmd_link = SWAP(0xffff); 1428 1429 (sc->memcopy)((caddr_t)sc->mcast_addrs, (caddr_t)cmd->ie_mcast_addrs, 1430 sc->mcast_count * sizeof *sc->mcast_addrs); 1431 1432 cmd->ie_mcast_bytes = 1433 SWAP(sc->mcast_count * ETHER_ADDR_LEN); /* grrr... */ 1434 1435 sc->scb->ie_command_list = MK_16(MEM, cmd); 1436 if (command_and_wait(sc, IE_CU_START, cmd, IE_STAT_COMPL) || 1437 !(cmd->com.ie_cmd_status & IE_STAT_OK)) { 1438 printf("%s: multicast address setup command failed\n", 1439 sc->sc_dev.dv_xname); 1440 return 0; 1441 } 1442 return 1; 1443 } 1444 1445 /* 1446 * This routine inits the ie. 1447 * This includes executing the CONFIGURE, IA-SETUP, and MC-SETUP commands, 1448 * starting the receiver unit, and clearing interrupts. 1449 * 1450 * THIS ROUTINE MUST BE CALLED AT splimp() OR HIGHER. 1451 */ 1452 int 1453 ieinit(sc) 1454 struct ie_softc *sc; 1455 { 1456 volatile struct ie_sys_ctl_block *scb = sc->scb; 1457 caddr_t ptr; 1458 int n; 1459 1460 ptr = sc->buf_area; 1461 1462 /* 1463 * Send the configure command first. 1464 */ 1465 { 1466 volatile struct ie_config_cmd *cmd = (void *) ptr; 1467 1468 ie_setup_config(cmd, sc->promisc); 1469 cmd->com.ie_cmd_status = SWAP(0); 1470 cmd->com.ie_cmd_cmd = IE_CMD_CONFIG | IE_CMD_LAST; 1471 cmd->com.ie_cmd_link = SWAP(0xffff); 1472 1473 scb->ie_command_list = MK_16(MEM, cmd); 1474 1475 if (command_and_wait(sc, IE_CU_START, cmd, IE_STAT_COMPL) || 1476 !(cmd->com.ie_cmd_status & IE_STAT_OK)) { 1477 printf("%s: configure command failed\n", 1478 sc->sc_dev.dv_xname); 1479 return 0; 1480 } 1481 } 1482 /* 1483 * Now send the Individual Address Setup command. 1484 */ 1485 { 1486 volatile struct ie_iasetup_cmd *cmd = (void *) ptr; 1487 1488 cmd->com.ie_cmd_status = SWAP(0); 1489 cmd->com.ie_cmd_cmd = IE_CMD_IASETUP | IE_CMD_LAST; 1490 cmd->com.ie_cmd_link = SWAP(0xffff); 1491 1492 (sc->memcopy)(sc->sc_arpcom.ac_enaddr, 1493 (caddr_t)&cmd->ie_address, sizeof cmd->ie_address); 1494 1495 scb->ie_command_list = MK_16(MEM, cmd); 1496 if (command_and_wait(sc, IE_CU_START, cmd, IE_STAT_COMPL) || 1497 !(cmd->com.ie_cmd_status & IE_STAT_OK)) { 1498 printf("%s: individual address setup command failed\n", 1499 sc->sc_dev.dv_xname); 1500 return 0; 1501 } 1502 } 1503 1504 /* 1505 * Now run the time-domain reflectometer. 1506 */ 1507 run_tdr(sc, (void *) ptr); 1508 1509 /* 1510 * Acknowledge any interrupts we have generated thus far. 1511 */ 1512 ie_ack(sc, IE_ST_WHENCE); 1513 1514 /* 1515 * Set up the transmit and recv buffers. 1516 */ 1517 setup_bufs(sc); 1518 1519 /* 1520 * This must be coordinated with iestart() and ietint(). 1521 */ 1522 sc->xmit_cmds[0]->ie_xmit_status = IE_STAT_COMPL; 1523 1524 sc->sc_arpcom.ac_if.if_flags |= IFF_RUNNING; 1525 /* tell higher levels that we are here */ 1526 1527 start_receiver(sc); 1528 if (sc->run_586) 1529 (sc->run_586) (sc); 1530 1531 return 0; 1532 } 1533 1534 static void 1535 iestop(sc) 1536 struct ie_softc *sc; 1537 { 1538 1539 command_and_wait(sc, IE_RU_DISABLE, 0, 0); 1540 } 1541 1542 int 1543 ieioctl(ifp, cmd, data) 1544 register struct ifnet *ifp; 1545 u_long cmd; 1546 caddr_t data; 1547 { 1548 struct ie_softc *sc = iecd.cd_devs[ifp->if_unit]; 1549 struct ifaddr *ifa = (struct ifaddr *) data; 1550 struct ifreq *ifr = (struct ifreq *) data; 1551 int s, error = 0; 1552 1553 s = splimp(); 1554 1555 switch (cmd) { 1556 1557 case SIOCSIFADDR: 1558 ifp->if_flags |= IFF_UP; 1559 1560 switch (ifa->ifa_addr->sa_family) { 1561 #ifdef INET 1562 case AF_INET: 1563 ieinit(sc); 1564 /* 1565 * See if another station has *our* IP address. 1566 * i.e.: There is an address conflict! If a 1567 * conflict exists, a message is sent to the 1568 * console. 1569 */ 1570 sc->sc_arpcom.ac_ipaddr = IA_SIN(ifa)->sin_addr; 1571 arpwhohas(&sc->sc_arpcom, &IA_SIN(ifa)->sin_addr); 1572 break; 1573 #endif 1574 #ifdef NS 1575 /* XXX - This code is probably wrong. */ 1576 case AF_NS: 1577 { 1578 struct ns_addr *ina = &IA_SNS(ifa)->sns_addr; 1579 1580 if (ns_nullhost(*ina)) 1581 ina->x_host = 1582 *(union ns_host *) (sc->sc_arpcom.ac_enaddr); 1583 else 1584 bcopy(ina->x_host.c_host, 1585 sc->sc_arpcom.ac_enaddr, 1586 sizeof(sc->sc_arpcom.ac_enaddr)); 1587 /* Set new address. */ 1588 ieinit(sc); 1589 break; 1590 } 1591 #endif /* NS */ 1592 default: 1593 ieinit(sc); 1594 break; 1595 } 1596 break; 1597 1598 case SIOCSIFFLAGS: 1599 sc->promisc = ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI); 1600 1601 if ((ifp->if_flags & IFF_UP) == 0 && 1602 (ifp->if_flags & IFF_RUNNING) != 0) { 1603 /* 1604 * If interface is marked down and it is running, then 1605 * stop it. 1606 */ 1607 iestop(sc); 1608 ifp->if_flags &= ~IFF_RUNNING; 1609 } else if ((ifp->if_flags & IFF_UP) != 0 && 1610 (ifp->if_flags & IFF_RUNNING) == 0) { 1611 /* 1612 * If interface is marked up and it is stopped, then 1613 * start it. 1614 */ 1615 ieinit(sc); 1616 } else { 1617 /* 1618 * Reset the interface to pick up changes in any other 1619 * flags that affect hardware registers. 1620 */ 1621 iestop(sc); 1622 ieinit(sc); 1623 } 1624 #ifdef IEDEBUG 1625 if (ifp->if_flags & IFF_DEBUG) 1626 sc->sc_debug = IED_ALL; 1627 else 1628 sc->sc_debug = 0; 1629 #endif 1630 break; 1631 1632 case SIOCADDMULTI: 1633 case SIOCDELMULTI: 1634 error = (cmd == SIOCADDMULTI) ? 1635 ether_addmulti(ifr, &sc->sc_arpcom) : 1636 ether_delmulti(ifr, &sc->sc_arpcom); 1637 1638 if (error == ENETRESET) { 1639 /* 1640 * Multicast list has changed; set the hardware filter 1641 * accordingly. 1642 */ 1643 mc_reset(sc); 1644 error = 0; 1645 } 1646 break; 1647 1648 default: 1649 error = EINVAL; 1650 } 1651 splx(s); 1652 return error; 1653 } 1654 1655 static void 1656 mc_reset(sc) 1657 struct ie_softc *sc; 1658 { 1659 struct ether_multi *enm; 1660 struct ether_multistep step; 1661 1662 /* 1663 * Step through the list of addresses. 1664 */ 1665 sc->mcast_count = 0; 1666 ETHER_FIRST_MULTI(step, &sc->sc_arpcom, enm); 1667 while (enm) { 1668 if (sc->mcast_count >= MAXMCAST || 1669 bcmp(enm->enm_addrlo, enm->enm_addrhi, 6) != 0) { 1670 sc->sc_arpcom.ac_if.if_flags |= IFF_ALLMULTI; 1671 ieioctl(&sc->sc_arpcom.ac_if, SIOCSIFFLAGS, (void *)0); 1672 goto setflag; 1673 } 1674 bcopy(enm->enm_addrlo, &sc->mcast_addrs[sc->mcast_count], 6); 1675 sc->mcast_count++; 1676 ETHER_NEXT_MULTI(step, enm); 1677 } 1678 setflag: 1679 sc->want_mcsetup = 1; 1680 } 1681 1682 #ifdef IEDEBUG 1683 void 1684 print_rbd(rbd) 1685 volatile struct ie_recv_buf_desc *rbd; 1686 { 1687 1688 printf("RBD at %08lx:\nactual %04x, next %04x, buffer %08x\n" 1689 "length %04x, mbz %04x\n", (u_long)rbd, rbd->ie_rbd_actual, 1690 rbd->ie_rbd_next, rbd->ie_rbd_buffer, rbd->ie_rbd_length, 1691 rbd->mbz); 1692 } 1693 #endif 1694