1 /* $NetBSD: if_ie.c,v 1.56 2015/05/20 09:17:17 ozaki-r Exp $ */ 2 3 /*- 4 * Copyright (c) 1993, 1994, 1995 Charles M. Hannum. 5 * Copyright (c) 1992, 1993, University of Vermont and State 6 * Agricultural College. 7 * Copyright (c) 1992, 1993, Garrett A. Wollman. 8 * 9 * Portions: 10 * Copyright (c) 1994, 1995, Rafal K. Boni 11 * Copyright (c) 1990, 1991, William F. Jolitz 12 * Copyright (c) 1990, The Regents of the University of California 13 * 14 * All rights reserved. 15 * 16 * Redistribution and use in source and binary forms, with or without 17 * modification, are permitted provided that the following conditions 18 * are met: 19 * 1. Redistributions of source code must retain the above copyright 20 * notice, this list of conditions and the following disclaimer. 21 * 2. Redistributions in binary form must reproduce the above copyright 22 * notice, this list of conditions and the following disclaimer in the 23 * documentation and/or other materials provided with the distribution. 24 * 3. All advertising materials mentioning features or use of this software 25 * must display the following acknowledgement: 26 * This product includes software developed by Charles M. Hannum, by the 27 * University of Vermont and State Agricultural College and Garrett A. 28 * Wollman, by William F. Jolitz, and by the University of California, 29 * Berkeley, Lawrence Berkeley Laboratory, and its contributors. 30 * 4. Neither the names of the Universities nor the names of the authors 31 * may be used to endorse or promote products derived from this software 32 * without specific prior written permission. 33 * 34 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 35 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 36 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 37 * ARE DISCLAIMED. IN NO EVENT SHALL THE UNIVERSITY OR AUTHORS BE LIABLE 38 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 39 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 40 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 41 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 42 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 43 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 44 * SUCH DAMAGE. 45 */ 46 47 /* 48 * Intel 82586 Ethernet chip 49 * Register, bit, and structure definitions. 50 * 51 * Original StarLAN driver written by Garrett Wollman with reference to the 52 * Clarkson Packet Driver code for this chip written by Russ Nelson and others. 53 * 54 * BPF support code taken from hpdev/if_le.c, supplied with tcpdump. 55 * 56 * 3C507 support is loosely based on code donated to NetBSD by Rafal Boni. 57 * 58 * Majorly cleaned up and 3C507 code merged by Charles Hannum. 59 * 60 * Converted to SUN ie driver by Charles D. Cranor, 61 * October 1994, January 1995. 62 * This sun version based on i386 version 1.30. 63 * [ see sys/dev/isa/if_ie.c ] 64 */ 65 66 /* 67 * The i82586 is a very painful chip, found in sun3's, sun-4/100's 68 * sun-4/200's, and VME based suns. The byte order is all wrong for a 69 * SUN, making life difficult. Programming this chip is mostly the same, 70 * but certain details differ from system to system. This driver is 71 * written so that different "ie" interfaces can be controled by the same 72 * driver. 73 */ 74 75 /* 76 Mode of operation: 77 78 We run the 82586 in a standard Ethernet mode. We keep NFRAMES 79 received frame descriptors around for the receiver to use, and 80 NRXBUF associated receive buffer descriptors, both in a circular 81 list. Whenever a frame is received, we rotate both lists as 82 necessary. (The 586 treats both lists as a simple queue.) We also 83 keep a transmit command around so that packets can be sent off 84 quickly. 85 86 We configure the adapter in AL-LOC = 1 mode, which means that the 87 Ethernet/802.3 MAC header is placed at the beginning of the receive 88 buffer rather than being split off into various fields in the RFD. 89 This also means that we must include this header in the transmit 90 buffer as well. 91 92 By convention, all transmit commands, and only transmit commands, 93 shall have the I (IE_CMD_INTR) bit set in the command. This way, 94 when an interrupt arrives at ieintr(), it is immediately possible 95 to tell what precisely caused it. ANY OTHER command-sending 96 routines should run at splnet(), and should post an acknowledgement 97 to every interrupt they generate. 98 */ 99 100 #include <sys/cdefs.h> 101 __KERNEL_RCSID(0, "$NetBSD: if_ie.c,v 1.56 2015/05/20 09:17:17 ozaki-r Exp $"); 102 103 #include "opt_inet.h" 104 #include "opt_ns.h" 105 106 #include <sys/param.h> 107 #include <sys/systm.h> 108 #include <sys/mbuf.h> 109 #include <sys/buf.h> 110 #include <sys/protosw.h> 111 #include <sys/socket.h> 112 #include <sys/ioctl.h> 113 #include <sys/errno.h> 114 #include <sys/syslog.h> 115 #include <sys/device.h> 116 117 #include <net/if.h> 118 #include <net/if_types.h> 119 #include <net/if_dl.h> 120 #include <net/if_ether.h> 121 122 #include <net/bpf.h> 123 #include <net/bpfdesc.h> 124 125 #ifdef INET 126 #include <netinet/in.h> 127 #include <netinet/in_systm.h> 128 #include <netinet/in_var.h> 129 #include <netinet/ip.h> 130 #include <netinet/if_inarp.h> 131 #endif 132 133 #include <uvm/uvm_extern.h> 134 135 #include <machine/autoconf.h> 136 #include <machine/cpu.h> 137 #include <machine/pmap.h> 138 139 /* 140 * ugly byte-order hack for SUNs 141 */ 142 143 #define XSWAP(y) ( (((y) & 0xff00) >> 8) | (((y) & 0xff) << 8) ) 144 #define SWAP(x) ((u_short)(XSWAP((u_short)(x)))) 145 146 #include "i82586.h" 147 #include "if_iereg.h" 148 #include "if_ievar.h" 149 150 /* #define IEDEBUG XXX */ 151 152 /* 153 * IED: ie debug flags 154 */ 155 156 #define IED_RINT 0x01 157 #define IED_TINT 0x02 158 #define IED_RNR 0x04 159 #define IED_CNA 0x08 160 #define IED_READFRAME 0x10 161 #define IED_ENQ 0x20 162 #define IED_XMIT 0x40 163 #define IED_ALL 0x7f 164 165 #ifdef IEDEBUG 166 #define inline /* not */ 167 void print_rbd(volatile struct ie_recv_buf_desc *); 168 int in_ierint = 0; 169 int in_ietint = 0; 170 int ie_debug_flags = 0; 171 #endif 172 173 /* XXX - Skip TDR for now - it always complains... */ 174 int ie_run_tdr = 0; 175 176 static void iewatchdog(struct ifnet *); 177 static int ieinit(struct ie_softc *); 178 static int ieioctl(struct ifnet *, u_long, void *); 179 static void iestart(struct ifnet *); 180 static void iereset(struct ie_softc *); 181 static int ie_setupram(struct ie_softc *); 182 183 static int cmd_and_wait(struct ie_softc *, int, void *, int); 184 185 static void ie_drop_packet_buffer(struct ie_softc *); 186 static void ie_readframe(struct ie_softc *, int); 187 static inline void ie_setup_config(struct ie_config_cmd *, int, int); 188 189 static void ierint(struct ie_softc *); 190 static void iestop(struct ie_softc *); 191 static void ietint(struct ie_softc *); 192 static void iexmit(struct ie_softc *); 193 194 static int mc_setup(struct ie_softc *, void *); 195 static void mc_reset(struct ie_softc *); 196 static void run_tdr(struct ie_softc *, struct ie_tdr_cmd *); 197 static void iememinit(struct ie_softc *); 198 199 static inline uint8_t *Align(char *); 200 static inline u_int Swap32(u_int); 201 static inline u_int vtop24(struct ie_softc *, void *); 202 static inline uint16_t vtop16sw(struct ie_softc *, void *); 203 204 static inline void ie_ack(struct ie_softc *, u_int); 205 static inline u_short ether_cmp(u_char *, uint8_t *); 206 static inline int check_eh(struct ie_softc *, struct ether_header *, int *); 207 static inline int ie_buflen(struct ie_softc *, int); 208 static inline int ie_packet_len(struct ie_softc *); 209 static inline struct mbuf * ieget(struct ie_softc *, int *); 210 211 212 /* 213 * Here are a few useful functions. We could have done these as macros, 214 * but since we have the inline facility, it makes sense to use that 215 * instead. 216 */ 217 218 /* KVA to 24 bit device address */ 219 static inline u_int 220 vtop24(struct ie_softc *sc, void *ptr) 221 { 222 u_int pa; 223 224 pa = (vaddr_t)ptr - (vaddr_t)sc->sc_iobase; 225 #ifdef IEDEBUG 226 if (pa & ~0xffFFff) 227 panic("ie:vtop24"); 228 #endif 229 return pa; 230 } 231 232 /* KVA to 16 bit offset, swapped */ 233 static inline u_short 234 vtop16sw(struct ie_softc *sc, void *ptr) 235 { 236 u_int pa; 237 238 pa = (vaddr_t)ptr - (vaddr_t)sc->sc_maddr; 239 #ifdef IEDEBUG 240 if (pa & ~0xFFff) 241 panic("ie:vtop16"); 242 #endif 243 244 return SWAP(pa); 245 } 246 247 static inline u_int 248 Swap32(u_int x) 249 { 250 u_int y; 251 252 y = x & 0xFF; 253 y <<= 8; x >>= 8; 254 y |= x & 0xFF; 255 y <<= 8; x >>= 8; 256 y |= x & 0xFF; 257 y <<= 8; x >>= 8; 258 y |= x & 0xFF; 259 260 return y; 261 } 262 263 static inline uint8_t * 264 Align(char *ptr) 265 { 266 u_long l = (u_long)ptr; 267 268 l = (l + 3) & ~3L; 269 return (uint8_t *)l; 270 } 271 272 273 static inline void 274 ie_ack(struct ie_softc *sc, u_int mask) 275 { 276 volatile struct ie_sys_ctl_block *scb = sc->scb; 277 278 cmd_and_wait(sc, scb->ie_status & mask, 0, 0); 279 } 280 281 282 /* 283 * Taken almost exactly from Bill's if_is.c, 284 * then modified beyond recognition... 285 */ 286 void 287 ie_attach(struct ie_softc *sc) 288 { 289 struct ifnet *ifp = &sc->sc_if; 290 291 /* MD code has done its part before calling this. */ 292 printf(": macaddr %s\n", ether_sprintf(sc->sc_addr)); 293 294 /* 295 * Compute number of transmit and receive buffers. 296 * Tx buffers take 1536 bytes, and fixed in number. 297 * Rx buffers are 512 bytes each, variable number. 298 * Need at least 1 frame for each 3 rx buffers. 299 * The ratio 3bufs:2frames is a compromise. 300 */ 301 sc->ntxbuf = NTXBUF; /* XXX - Fix me... */ 302 switch (sc->sc_msize) { 303 case 16384: 304 sc->nframes = 8 * 4; 305 sc->nrxbuf = 8 * 6; 306 break; 307 case 32768: 308 sc->nframes = 16 * 4; 309 sc->nrxbuf = 16 * 6; 310 break; 311 case 65536: 312 sc->nframes = 32 * 4; 313 sc->nrxbuf = 32 * 6; 314 break; 315 default: 316 sc->nframes = 0; 317 } 318 if (sc->nframes > MXFRAMES) 319 sc->nframes = MXFRAMES; 320 if (sc->nrxbuf > MXRXBUF) 321 sc->nrxbuf = MXRXBUF; 322 323 #ifdef IEDEBUG 324 aprint_debug_dev(sc->sc_dev, 325 "%dK memory, %d tx frames, %d rx frames, %d rx bufs\n", 326 (sc->sc_msize >> 10), sc->ntxbuf, sc->nframes, sc->nrxbuf); 327 #endif 328 329 if ((sc->nframes <= 0) || (sc->nrxbuf <= 0)) 330 panic("%s: weird memory size", __func__); 331 332 /* 333 * Setup RAM for transmit/receive 334 */ 335 if (ie_setupram(sc) == 0) { 336 aprint_error(": RAM CONFIG FAILED!\n"); 337 /* XXX should reclaim resources? */ 338 return; 339 } 340 341 /* 342 * Initialize and attach S/W interface 343 */ 344 strcpy(ifp->if_xname, device_xname(sc->sc_dev)); 345 ifp->if_softc = sc; 346 ifp->if_start = iestart; 347 ifp->if_ioctl = ieioctl; 348 ifp->if_watchdog = iewatchdog; 349 ifp->if_flags = 350 IFF_BROADCAST | IFF_SIMPLEX | IFF_NOTRAILERS | IFF_MULTICAST; 351 352 /* Attach the interface. */ 353 if_attach(ifp); 354 ether_ifattach(ifp, sc->sc_addr); 355 } 356 357 /* 358 * Setup IE's ram space. 359 */ 360 static int 361 ie_setupram(struct ie_softc *sc) 362 { 363 volatile struct ie_sys_conf_ptr *scp; 364 volatile struct ie_int_sys_conf_ptr *iscp; 365 volatile struct ie_sys_ctl_block *scb; 366 int off; 367 368 /* 369 * Allocate from end of buffer space for 370 * ISCP, SCB, and other small stuff. 371 */ 372 off = sc->buf_area_sz; 373 off &= ~3; 374 375 /* SCP (address already chosen). */ 376 scp = sc->scp; 377 (sc->sc_memset)(__UNVOLATILE(scp), 0, sizeof(*scp)); 378 379 /* ISCP */ 380 off -= sizeof(*iscp); 381 iscp = (volatile void *)(sc->buf_area + off); 382 (sc->sc_memset)(__UNVOLATILE(iscp), 0, sizeof(*iscp)); 383 sc->iscp = iscp; 384 385 /* SCB */ 386 off -= sizeof(*scb); 387 scb = (volatile void *)(sc->buf_area + off); 388 (sc->sc_memset)(__UNVOLATILE(scb), 0, sizeof(*scb)); 389 sc->scb = scb; 390 391 /* Remainder is for buffers, etc. */ 392 sc->buf_area_sz = off; 393 394 /* 395 * Now fill in the structures we just allocated. 396 */ 397 398 /* SCP: main thing is 24-bit ptr to ISCP */ 399 scp->ie_bus_use = 0; /* 16-bit */ 400 scp->ie_iscp_ptr = Swap32(vtop24(sc, __UNVOLATILE(iscp))); 401 402 /* ISCP */ 403 iscp->ie_busy = 1; /* ie_busy == char */ 404 iscp->ie_scb_offset = vtop16sw(sc, __UNVOLATILE(scb)); 405 iscp->ie_base = Swap32(vtop24(sc, sc->sc_maddr)); 406 407 /* SCB */ 408 scb->ie_command_list = SWAP(0xffff); 409 scb->ie_recv_list = SWAP(0xffff); 410 411 /* Other stuff is done in ieinit() */ 412 (sc->reset_586)(sc); 413 (sc->chan_attn)(sc); 414 415 delay(100); /* wait a while... */ 416 417 if (iscp->ie_busy) { 418 return 0; 419 } 420 /* 421 * Acknowledge any interrupts we may have caused... 422 */ 423 ie_ack(sc, IE_ST_WHENCE); 424 425 return 1; 426 } 427 428 /* 429 * Device timeout/watchdog routine. Entered if the device neglects to 430 * generate an interrupt after a transmit has been started on it. 431 */ 432 static void 433 iewatchdog(struct ifnet *ifp) 434 { 435 struct ie_softc *sc = ifp->if_softc; 436 437 log(LOG_ERR, "%s: device timeout\n", device_xname(sc->sc_dev)); 438 ++ifp->if_oerrors; 439 iereset(sc); 440 } 441 442 /* 443 * What to do upon receipt of an interrupt. 444 */ 445 int 446 ie_intr(void *arg) 447 { 448 struct ie_softc *sc = arg; 449 uint16_t status; 450 int loopcnt; 451 452 /* 453 * check for parity error 454 */ 455 if (sc->hard_type == IE_VME) { 456 volatile struct ievme *iev = 457 (volatile struct ievme *)sc->sc_reg; 458 459 if (iev->status & IEVME_PERR) { 460 printf("%s: parity error (ctrl 0x%x @ 0x%02x%04x)\n", 461 device_xname(sc->sc_dev), iev->pectrl, 462 iev->pectrl & IEVME_HADDR, iev->peaddr); 463 iev->pectrl = iev->pectrl | IEVME_PARACK; 464 } 465 } 466 467 status = sc->scb->ie_status; 468 if ((status & IE_ST_WHENCE) == 0) 469 return 0; 470 471 loopcnt = sc->nframes; 472 loop: 473 /* Ack interrupts FIRST in case we receive more during the ISR. */ 474 ie_ack(sc, IE_ST_WHENCE & status); 475 476 if (status & (IE_ST_RECV | IE_ST_RNR)) { 477 #ifdef IEDEBUG 478 in_ierint++; 479 if (sc->sc_debug & IED_RINT) 480 printf("%s: rint\n", device_xname(sc->sc_dev)); 481 #endif 482 ierint(sc); 483 #ifdef IEDEBUG 484 in_ierint--; 485 #endif 486 } 487 488 if (status & IE_ST_DONE) { 489 #ifdef IEDEBUG 490 in_ietint++; 491 if (sc->sc_debug & IED_TINT) 492 printf("%s: tint\n", device_xname(sc->sc_dev)); 493 #endif 494 ietint(sc); 495 #ifdef IEDEBUG 496 in_ietint--; 497 #endif 498 } 499 500 /* 501 * Receiver not ready (RNR) just means it has 502 * run out of resources (buffers or frames). 503 * One can easily cause this with (i.e.) spray. 504 * This is not a serious error, so be silent. 505 */ 506 if (status & IE_ST_RNR) { 507 #ifdef IEDEBUG 508 printf("%s: receiver not ready\n", device_xname(sc->sc_dev)); 509 #endif 510 sc->sc_if.if_ierrors++; 511 iereset(sc); 512 } 513 514 #ifdef IEDEBUG 515 if ((status & IE_ST_ALLDONE) && (sc->sc_debug & IED_CNA)) 516 printf("%s: cna\n", device_xname(sc->sc_dev)); 517 #endif 518 519 status = sc->scb->ie_status; 520 if (status & IE_ST_WHENCE) { 521 /* It still wants service... */ 522 if (--loopcnt > 0) 523 goto loop; 524 /* ... but we've been here long enough. */ 525 log(LOG_ERR, "%s: interrupt stuck?\n", 526 device_xname(sc->sc_dev)); 527 iereset(sc); 528 } 529 return 1; 530 } 531 532 /* 533 * Process a received-frame interrupt. 534 */ 535 void 536 ierint(struct ie_softc *sc) 537 { 538 volatile struct ie_sys_ctl_block *scb = sc->scb; 539 int i, status; 540 static int timesthru = 1024; 541 542 i = sc->rfhead; 543 for (;;) { 544 status = sc->rframes[i]->ie_fd_status; 545 546 if ((status & IE_FD_COMPLETE) && (status & IE_FD_OK)) { 547 if (!--timesthru) { 548 sc->sc_if.if_ierrors += 549 SWAP(scb->ie_err_crc) + 550 SWAP(scb->ie_err_align) + 551 SWAP(scb->ie_err_resource) + 552 SWAP(scb->ie_err_overrun); 553 scb->ie_err_crc = 0; 554 scb->ie_err_align = 0; 555 scb->ie_err_resource = 0; 556 scb->ie_err_overrun = 0; 557 timesthru = 1024; 558 } 559 ie_readframe(sc, i); 560 } else { 561 if ((status & IE_FD_RNR) != 0 && 562 (scb->ie_status & IE_RU_READY) == 0) { 563 sc->rframes[0]->ie_fd_buf_desc = vtop16sw(sc, 564 __UNVOLATILE(sc->rbuffs[0])); 565 scb->ie_recv_list = vtop16sw(sc, 566 __UNVOLATILE(sc->rframes[0])); 567 cmd_and_wait(sc, IE_RU_START, 0, 0); 568 } 569 break; 570 } 571 i = (i + 1) % sc->nframes; 572 } 573 } 574 575 /* 576 * Process a command-complete interrupt. These are only generated by the 577 * transmission of frames. This routine is deceptively simple, since most 578 * of the real work is done by iestart(). 579 */ 580 void 581 ietint(struct ie_softc *sc) 582 { 583 struct ifnet *ifp; 584 int status; 585 586 ifp = &sc->sc_if; 587 588 ifp->if_timer = 0; 589 ifp->if_flags &= ~IFF_OACTIVE; 590 591 status = sc->xmit_cmds[sc->xctail]->ie_xmit_status; 592 593 if (!(status & IE_STAT_COMPL) || (status & IE_STAT_BUSY)) 594 printf("%s: command still busy!\n", __func__); 595 596 if (status & IE_STAT_OK) { 597 ifp->if_opackets++; 598 ifp->if_collisions += 599 SWAP(status & IE_XS_MAXCOLL); 600 } else { 601 ifp->if_oerrors++; 602 /* 603 * XXX 604 * Check SQE and DEFERRED? 605 * What if more than one bit is set? 606 */ 607 if (status & IE_STAT_ABORT) 608 printf("%s: send aborted\n", device_xname(sc->sc_dev)); 609 if (status & IE_XS_LATECOLL) 610 printf("%s: late collision\n", 611 device_xname(sc->sc_dev)); 612 if (status & IE_XS_NOCARRIER) 613 printf("%s: no carrier\n", device_xname(sc->sc_dev)); 614 if (status & IE_XS_LOSTCTS) 615 printf("%s: lost CTS\n", device_xname(sc->sc_dev)); 616 if (status & IE_XS_UNDERRUN) 617 printf("%s: DMA underrun\n", device_xname(sc->sc_dev)); 618 if (status & IE_XS_EXCMAX) { 619 /* Do not print this one (too noisy). */ 620 ifp->if_collisions += 16; 621 } 622 } 623 624 /* 625 * If multicast addresses were added or deleted while we 626 * were transmitting, mc_reset() set the want_mcsetup flag 627 * indicating that we should do it. 628 */ 629 if (sc->want_mcsetup) { 630 mc_setup(sc, (void *)sc->xmit_cbuffs[sc->xctail]); 631 sc->want_mcsetup = 0; 632 } 633 634 /* Done with the buffer. */ 635 sc->xmit_busy--; 636 sc->xctail = (sc->xctail + 1) % NTXBUF; 637 638 /* Start the next packet, if any, transmitting. */ 639 if (sc->xmit_busy > 0) 640 iexmit(sc); 641 642 iestart(ifp); 643 } 644 645 /* 646 * Compare two Ether/802 addresses for equality, inlined and 647 * unrolled for speed. I'd love to have an inline assembler 648 * version of this... XXX: Who wanted that? mycroft? 649 * I wrote one, but the following is just as efficient. 650 * This expands to 10 short m68k instructions! -gwr 651 * Note: use this like memcmp() 652 */ 653 static inline uint16_t 654 ether_cmp(uint8_t *one, uint8_t *two) 655 { 656 uint16_t *a = (uint16_t *)one; 657 uint16_t *b = (uint16_t *)two; 658 uint16_t diff; 659 660 diff = *a++ - *b++; 661 diff |= *a++ - *b++; 662 diff |= *a++ - *b++; 663 664 return diff; 665 } 666 #define ether_equal !ether_cmp 667 668 /* 669 * Check for a valid address. to_bpf is filled in with one of the following: 670 * 0 -> BPF doesn't get this packet 671 * 1 -> BPF does get this packet 672 * 2 -> BPF does get this packet, but we don't 673 * Return value is true if the packet is for us, and false otherwise. 674 * 675 * This routine is a mess, but it's also critical that it be as fast 676 * as possible. It could be made cleaner if we can assume that the 677 * only client which will fiddle with IFF_PROMISC is BPF. This is 678 * probably a good assumption, but we do not make it here. (Yet.) 679 */ 680 static inline int 681 check_eh(struct ie_softc *sc, struct ether_header *eh, int *to_bpf) 682 { 683 struct ifnet *ifp; 684 685 ifp = &sc->sc_if; 686 *to_bpf = (ifp->if_bpf != 0); 687 688 /* 689 * This is all handled at a higher level now. 690 */ 691 return 1; 692 } 693 694 /* 695 * We want to isolate the bits that have meaning... This assumes that 696 * IE_RBUF_SIZE is an even power of two. If somehow the act_len exceeds 697 * the size of the buffer, then we are screwed anyway. 698 */ 699 static inline int 700 ie_buflen(struct ie_softc *sc, int head) 701 { 702 int len; 703 704 len = SWAP(sc->rbuffs[head]->ie_rbd_actual); 705 len &= (IE_RBUF_SIZE | (IE_RBUF_SIZE - 1)); 706 return len; 707 } 708 709 static inline int 710 ie_packet_len(struct ie_softc *sc) 711 { 712 int i; 713 int head = sc->rbhead; 714 int acc = 0; 715 716 do { 717 if ((sc->rbuffs[sc->rbhead]->ie_rbd_actual & IE_RBD_USED) 718 == 0) { 719 #ifdef IEDEBUG 720 print_rbd(sc->rbuffs[sc->rbhead]); 721 #endif 722 log(LOG_ERR, 723 "%s: receive descriptors out of sync at %d\n", 724 device_xname(sc->sc_dev), sc->rbhead); 725 iereset(sc); 726 return -1; 727 } 728 729 i = sc->rbuffs[head]->ie_rbd_actual & IE_RBD_LAST; 730 731 acc += ie_buflen(sc, head); 732 head = (head + 1) % sc->nrxbuf; 733 } while (i == 0); 734 735 return acc; 736 } 737 738 /* 739 * Setup all necessary artifacts for an XMIT command, and then pass the XMIT 740 * command to the chip to be executed. On the way, if we have a BPF listener 741 * also give him a copy. 742 */ 743 static void 744 iexmit(struct ie_softc *sc) 745 { 746 struct ifnet *ifp; 747 748 ifp = &sc->sc_if; 749 750 #ifdef IEDEBUG 751 if (sc->sc_debug & IED_XMIT) 752 printf("%s: xmit buffer %d\n", device_xname(sc->sc_dev), 753 sc->xctail); 754 #endif 755 756 /* 757 * If BPF is listening on this interface, let it see the packet before 758 * we push it on the wire. 759 */ 760 bpf_tap(ifp, sc->xmit_cbuffs[sc->xctail], 761 SWAP(sc->xmit_buffs[sc->xctail]->ie_xmit_flags)); 762 763 sc->xmit_buffs[sc->xctail]->ie_xmit_flags |= IE_XMIT_LAST; 764 sc->xmit_buffs[sc->xctail]->ie_xmit_next = SWAP(0xffff); 765 sc->xmit_buffs[sc->xctail]->ie_xmit_buf = 766 Swap32(vtop24(sc, sc->xmit_cbuffs[sc->xctail])); 767 768 sc->xmit_cmds[sc->xctail]->com.ie_cmd_link = SWAP(0xffff); 769 sc->xmit_cmds[sc->xctail]->com.ie_cmd_cmd = 770 IE_CMD_XMIT | IE_CMD_INTR | IE_CMD_LAST; 771 772 sc->xmit_cmds[sc->xctail]->ie_xmit_status = SWAP(0); 773 sc->xmit_cmds[sc->xctail]->ie_xmit_desc = 774 vtop16sw(sc, __UNVOLATILE(sc->xmit_buffs[sc->xctail])); 775 776 sc->scb->ie_command_list = 777 vtop16sw(sc, __UNVOLATILE(sc->xmit_cmds[sc->xctail])); 778 cmd_and_wait(sc, IE_CU_START, 0, 0); 779 780 ifp->if_timer = 5; 781 } 782 783 /* 784 * Read data off the interface, and turn it into an mbuf chain. 785 * 786 * This code is DRAMATICALLY different from the previous version; this 787 * version tries to allocate the entire mbuf chain up front, given the 788 * length of the data available. This enables us to allocate mbuf 789 * clusters in many situations where before we would have had a long 790 * chain of partially-full mbufs. This should help to speed up the 791 * operation considerably. (Provided that it works, of course.) 792 */ 793 static inline struct mbuf * 794 ieget(struct ie_softc *sc, int *to_bpf) 795 { 796 struct mbuf *top, **mp, *m; 797 int len, totlen, resid; 798 int thisrboff, thismboff; 799 int head; 800 struct ether_header eh; 801 802 totlen = ie_packet_len(sc); 803 if (totlen <= 0) 804 return 0; 805 806 head = sc->rbhead; 807 808 /* 809 * Snarf the Ethernet header. 810 */ 811 (sc->sc_memcpy)((void *)&eh, (void *)sc->cbuffs[head], 812 sizeof(struct ether_header)); 813 814 /* 815 * As quickly as possible, check if this packet is for us. 816 * If not, don't waste a single cycle copying the rest of the 817 * packet in. 818 * This is only a consideration when FILTER is defined; i.e., when 819 * we are either running BPF or doing multicasting. 820 */ 821 if (check_eh(sc, &eh, to_bpf) == 0) { 822 /* just this case, it's not an error */ 823 sc->sc_if.if_ierrors--; 824 return 0; 825 } 826 827 resid = totlen; 828 829 MGETHDR(m, M_DONTWAIT, MT_DATA); 830 if (m == 0) 831 return 0; 832 833 m->m_pkthdr.rcvif = &sc->sc_if; 834 m->m_pkthdr.len = totlen; 835 len = MHLEN; 836 top = 0; 837 mp = ⊤ 838 839 /* 840 * This loop goes through and allocates mbufs for all the data we will 841 * be copying in. It does not actually do the copying yet. 842 */ 843 while (totlen > 0) { 844 if (top) { 845 MGET(m, M_DONTWAIT, MT_DATA); 846 if (m == 0) { 847 m_freem(top); 848 return 0; 849 } 850 len = MLEN; 851 } 852 if (totlen >= MINCLSIZE) { 853 MCLGET(m, M_DONTWAIT); 854 if (m->m_flags & M_EXT) 855 len = MCLBYTES; 856 } 857 858 if (mp == &top) { 859 char *newdata = (char *) 860 ALIGN(m->m_data + sizeof(struct ether_header)) - 861 sizeof(struct ether_header); 862 len -= newdata - m->m_data; 863 m->m_data = newdata; 864 } 865 866 m->m_len = len = min(totlen, len); 867 868 totlen -= len; 869 *mp = m; 870 mp = &m->m_next; 871 } 872 873 m = top; 874 thismboff = 0; 875 876 /* 877 * Copy the Ethernet header into the mbuf chain. 878 */ 879 memcpy(mtod(m, void *), &eh, sizeof(struct ether_header)); 880 thismboff = sizeof(struct ether_header); 881 thisrboff = sizeof(struct ether_header); 882 resid -= sizeof(struct ether_header); 883 884 /* 885 * Now we take the mbuf chain (hopefully only one mbuf most of the 886 * time) and stuff the data into it. There are no possible failures 887 * at or after this point. 888 */ 889 while (resid > 0) { 890 int thisrblen = ie_buflen(sc, head) - thisrboff; 891 int thismblen = m->m_len - thismboff; 892 893 len = min(thisrblen, thismblen); 894 (sc->sc_memcpy)(mtod(m, char *) + thismboff, 895 (void *)(sc->cbuffs[head] + thisrboff), 896 (u_int)len); 897 resid -= len; 898 899 if (len == thismblen) { 900 m = m->m_next; 901 thismboff = 0; 902 } else 903 thismboff += len; 904 905 if (len == thisrblen) { 906 head = (head + 1) % sc->nrxbuf; 907 thisrboff = 0; 908 } else 909 thisrboff += len; 910 } 911 912 /* 913 * Unless something changed strangely while we were doing the copy, 914 * we have now copied everything in from the shared memory. 915 * This means that we are done. 916 */ 917 return top; 918 } 919 920 /* 921 * Read frame NUM from unit UNIT (pre-cached as IE). 922 * 923 * This routine reads the RFD at NUM, and copies in the buffers from 924 * the list of RBD, then rotates the RBD and RFD lists so that the receiver 925 * doesn't start complaining. Trailers are DROPPED---there's no point 926 * in wasting time on confusing code to deal with them. Hopefully, 927 * this machine will never ARP for trailers anyway. 928 */ 929 static void 930 ie_readframe(struct ie_softc *sc, int num) 931 { 932 int status; 933 struct mbuf *m = 0; 934 int bpf_gets_it = 0; 935 936 status = sc->rframes[num]->ie_fd_status; 937 938 /* Advance the RFD list, since we're done with this descriptor. */ 939 sc->rframes[num]->ie_fd_status = SWAP(0); 940 sc->rframes[num]->ie_fd_last |= IE_FD_LAST; 941 sc->rframes[sc->rftail]->ie_fd_last &= ~IE_FD_LAST; 942 sc->rftail = (sc->rftail + 1) % sc->nframes; 943 sc->rfhead = (sc->rfhead + 1) % sc->nframes; 944 945 if (status & IE_FD_OK) { 946 m = ieget(sc, &bpf_gets_it); 947 ie_drop_packet_buffer(sc); 948 } 949 if (m == 0) { 950 sc->sc_if.if_ierrors++; 951 return; 952 } 953 954 #ifdef IEDEBUG 955 if (sc->sc_debug & IED_READFRAME) { 956 struct ether_header *eh = mtod(m, struct ether_header *); 957 958 printf("%s: frame from ether %s type 0x%x\n", 959 device_xname(sc->sc_dev), 960 ether_sprintf(eh->ether_shost), (u_int)eh->ether_type); 961 } 962 #endif 963 964 /* 965 * Check for a BPF filter; if so, hand it up. 966 * Note that we have to stick an extra mbuf up front, because 967 * bpf_mtap expects to have the ether header at the front. 968 * It doesn't matter that this results in an ill-formatted mbuf chain, 969 * since BPF just looks at the data. (It doesn't try to free the mbuf, 970 * tho' it will make a copy for tcpdump.) 971 */ 972 if (bpf_gets_it) { 973 /* Pass it up. */ 974 bpf_mtap(&sc->sc_if, m); 975 976 /* 977 * A signal passed up from the filtering code indicating that 978 * the packet is intended for BPF but not for the protocol 979 * machinery. We can save a few cycles by not handing it off 980 * to them. 981 */ 982 if (bpf_gets_it == 2) { 983 m_freem(m); 984 return; 985 } 986 } 987 988 /* 989 * In here there used to be code to check destination addresses upon 990 * receipt of a packet. We have deleted that code, and replaced it 991 * with code to check the address much earlier in the cycle, before 992 * copying the data in; this saves us valuable cycles when operating 993 * as a multicast router or when using BPF. 994 */ 995 996 /* 997 * Finally pass this packet up to higher layers. 998 */ 999 (*sc->sc_if.if_input)(&sc->sc_if, m); 1000 sc->sc_if.if_ipackets++; 1001 } 1002 1003 static void 1004 ie_drop_packet_buffer(struct ie_softc *sc) 1005 { 1006 int i; 1007 1008 do { 1009 /* 1010 * This means we are somehow out of sync. So, we reset the 1011 * adapter. 1012 */ 1013 if ((sc->rbuffs[sc->rbhead]->ie_rbd_actual & IE_RBD_USED) 1014 == 0) { 1015 #ifdef IEDEBUG 1016 print_rbd(sc->rbuffs[sc->rbhead]); 1017 #endif 1018 log(LOG_ERR, 1019 "%s: receive descriptors out of sync at %d\n", 1020 device_xname(sc->sc_dev), sc->rbhead); 1021 iereset(sc); 1022 return; 1023 } 1024 1025 i = sc->rbuffs[sc->rbhead]->ie_rbd_actual & IE_RBD_LAST; 1026 1027 sc->rbuffs[sc->rbhead]->ie_rbd_length |= IE_RBD_LAST; 1028 sc->rbuffs[sc->rbhead]->ie_rbd_actual = SWAP(0); 1029 sc->rbhead = (sc->rbhead + 1) % sc->nrxbuf; 1030 sc->rbuffs[sc->rbtail]->ie_rbd_length &= ~IE_RBD_LAST; 1031 sc->rbtail = (sc->rbtail + 1) % sc->nrxbuf; 1032 } while (i == 0); 1033 } 1034 1035 /* 1036 * Start transmission on an interface. 1037 */ 1038 static void 1039 iestart(struct ifnet *ifp) 1040 { 1041 struct ie_softc *sc = ifp->if_softc; 1042 struct mbuf *m0, *m; 1043 uint8_t *buffer; 1044 uint16_t len; 1045 1046 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) 1047 return; 1048 1049 for (;;) { 1050 if (sc->xmit_busy == sc->ntxbuf) { 1051 ifp->if_flags |= IFF_OACTIVE; 1052 break; 1053 } 1054 1055 IF_DEQUEUE(&ifp->if_snd, m0); 1056 if (m0 == 0) 1057 break; 1058 1059 /* We need to use m->m_pkthdr.len, so require the header */ 1060 if ((m0->m_flags & M_PKTHDR) == 0) 1061 panic("%s: no header mbuf", __func__); 1062 1063 /* Tap off here if there is a BPF listener. */ 1064 bpf_mtap(ifp, m0); 1065 1066 #ifdef IEDEBUG 1067 if (sc->sc_debug & IED_ENQ) 1068 printf("%s: fill buffer %d\n", device_xname(sc->sc_dev), 1069 sc->xchead); 1070 #endif 1071 1072 buffer = sc->xmit_cbuffs[sc->xchead]; 1073 for (m = m0; m != 0; m = m->m_next) { 1074 (sc->sc_memcpy)(buffer, mtod(m, void *), m->m_len); 1075 buffer += m->m_len; 1076 } 1077 if (m0->m_pkthdr.len < ETHER_MIN_LEN - ETHER_CRC_LEN) { 1078 sc->sc_memset(buffer, 0, 1079 ETHER_MIN_LEN - ETHER_CRC_LEN - m0->m_pkthdr.len); 1080 len = ETHER_MIN_LEN - ETHER_CRC_LEN; 1081 } else 1082 len = m0->m_pkthdr.len; 1083 1084 m_freem(m0); 1085 sc->xmit_buffs[sc->xchead]->ie_xmit_flags = SWAP(len); 1086 1087 /* Start the first packet transmitting. */ 1088 if (sc->xmit_busy == 0) 1089 iexmit(sc); 1090 1091 sc->xchead = (sc->xchead + 1) % sc->ntxbuf; 1092 sc->xmit_busy++; 1093 } 1094 } 1095 1096 static void 1097 iereset(struct ie_softc *sc) 1098 { 1099 int s; 1100 1101 s = splnet(); 1102 1103 /* No message here. The caller does that. */ 1104 iestop(sc); 1105 1106 /* 1107 * Stop i82586 dead in its tracks. 1108 */ 1109 if (cmd_and_wait(sc, IE_RU_ABORT | IE_CU_ABORT, 0, 0)) 1110 printf("%s: abort commands timed out\n", 1111 device_xname(sc->sc_dev)); 1112 1113 if (cmd_and_wait(sc, IE_RU_DISABLE | IE_CU_STOP, 0, 0)) 1114 printf("%s: disable commands timed out\n", 1115 device_xname(sc->sc_dev)); 1116 1117 ieinit(sc); 1118 1119 splx(s); 1120 } 1121 1122 /* 1123 * Send a command to the controller and wait for it to either 1124 * complete or be accepted, depending on the command. If the 1125 * command pointer is null, then pretend that the command is 1126 * not an action command. If the command pointer is not null, 1127 * and the command is an action command, wait for 1128 * ((volatile struct ie_cmd_common *)pcmd)->ie_cmd_status & MASK 1129 * to become true. 1130 */ 1131 static int 1132 cmd_and_wait(struct ie_softc *sc, int cmd, void *pcmd, int mask) 1133 { 1134 volatile struct ie_cmd_common *cc = pcmd; 1135 volatile struct ie_sys_ctl_block *scb = sc->scb; 1136 int tmo; 1137 1138 scb->ie_command = (uint16_t)cmd; 1139 (sc->chan_attn)(sc); 1140 1141 /* Wait for the command to be accepted by the CU. */ 1142 tmo = 10; 1143 while (scb->ie_command && --tmo) 1144 delay(10); 1145 if (scb->ie_command) { 1146 #ifdef IEDEBUG 1147 printf("%s: cmd_and_wait, CU stuck (1)\n", 1148 device_xname(sc->sc_dev)); 1149 #endif 1150 return -1; /* timed out */ 1151 } 1152 1153 /* 1154 * If asked, also wait for it to finish. 1155 */ 1156 if (IE_ACTION_COMMAND(cmd) && pcmd) { 1157 1158 /* 1159 * According to the packet driver, the minimum timeout should 1160 * be .369 seconds, which we round up to .4. 1161 */ 1162 tmo = 36900; 1163 1164 /* 1165 * Now spin-lock waiting for status. This is not a very nice 1166 * thing to do, but I haven't figured out how, or indeed if, we 1167 * can put the process waiting for action to sleep. (We may 1168 * be getting called through some other timeout running in the 1169 * kernel.) 1170 */ 1171 while (((cc->ie_cmd_status & mask) == 0) && --tmo) 1172 delay(10); 1173 1174 if ((cc->ie_cmd_status & mask) == 0) { 1175 #ifdef IEDEBUG 1176 printf("%s: cmd_and_wait, CU stuck (2)\n", 1177 device_xname(sc->sc_dev)); 1178 #endif 1179 return -1; /* timed out */ 1180 } 1181 } 1182 return 0; 1183 } 1184 1185 /* 1186 * Run the time-domain reflectometer. 1187 */ 1188 static void 1189 run_tdr(struct ie_softc *sc, struct ie_tdr_cmd *cmd) 1190 { 1191 int result; 1192 1193 cmd->com.ie_cmd_status = SWAP(0); 1194 cmd->com.ie_cmd_cmd = IE_CMD_TDR | IE_CMD_LAST; 1195 cmd->com.ie_cmd_link = SWAP(0xffff); 1196 1197 sc->scb->ie_command_list = vtop16sw(sc, cmd); 1198 cmd->ie_tdr_time = SWAP(0); 1199 1200 if (cmd_and_wait(sc, IE_CU_START, cmd, IE_STAT_COMPL) || 1201 (cmd->com.ie_cmd_status & IE_STAT_OK) == 0) 1202 result = 0x10000; /* impossible value */ 1203 else 1204 result = cmd->ie_tdr_time; 1205 1206 ie_ack(sc, IE_ST_WHENCE); 1207 1208 if (result & IE_TDR_SUCCESS) 1209 return; 1210 1211 if (result & 0x10000) { 1212 printf("%s: TDR command failed\n", device_xname(sc->sc_dev)); 1213 } else if (result & IE_TDR_XCVR) { 1214 printf("%s: transceiver problem\n", device_xname(sc->sc_dev)); 1215 } else if (result & IE_TDR_OPEN) { 1216 printf("%s: TDR detected an open %d clocks away\n", 1217 device_xname(sc->sc_dev), SWAP(result & IE_TDR_TIME)); 1218 } else if (result & IE_TDR_SHORT) { 1219 printf("%s: TDR detected a short %d clocks away\n", 1220 device_xname(sc->sc_dev), SWAP(result & IE_TDR_TIME)); 1221 } else { 1222 printf("%s: TDR returned unknown status 0x%x\n", 1223 device_xname(sc->sc_dev), result); 1224 } 1225 } 1226 1227 /* 1228 * iememinit: set up the buffers 1229 * 1230 * we have a block of KVA at sc->buf_area which is of size sc->buf_area_sz. 1231 * this is to be used for the buffers. the chip indexs its control data 1232 * structures with 16 bit offsets, and it indexes actual buffers with 1233 * 24 bit addresses. so we should allocate control buffers first so that 1234 * we don't overflow the 16 bit offset field. The number of transmit 1235 * buffers is fixed at compile time. 1236 * 1237 * note: this function was written to be easy to understand, rather than 1238 * highly efficient (it isn't in the critical path). 1239 * 1240 * The memory layout is: tbufs, rbufs, (gap), control blocks 1241 * [tbuf0, tbuf1] [rbuf0,...rbufN] gap [rframes] [tframes] 1242 * XXX - This needs review... 1243 */ 1244 static void 1245 iememinit(struct ie_softc *sc) 1246 { 1247 uint8_t *ptr; 1248 int i; 1249 uint16_t nxt; 1250 1251 /* First, zero all the memory. */ 1252 ptr = sc->buf_area; 1253 (sc->sc_memset)(ptr, 0, sc->buf_area_sz); 1254 1255 /* Allocate tx/rx buffers. */ 1256 for (i = 0; i < NTXBUF; i++) { 1257 sc->xmit_cbuffs[i] = ptr; 1258 ptr += IE_TBUF_SIZE; 1259 } 1260 for (i = 0; i < sc->nrxbuf; i++) { 1261 sc->cbuffs[i] = ptr; 1262 ptr += IE_RBUF_SIZE; 1263 } 1264 1265 /* Small pad (Don't trust the chip...) */ 1266 ptr += 16; 1267 1268 /* Allocate and fill in xmit buffer descriptors. */ 1269 for (i = 0; i < NTXBUF; i++) { 1270 sc->xmit_buffs[i] = (volatile void *)ptr; 1271 ptr = Align(ptr + sizeof(*sc->xmit_buffs[i])); 1272 sc->xmit_buffs[i]->ie_xmit_buf = 1273 Swap32(vtop24(sc, sc->xmit_cbuffs[i])); 1274 sc->xmit_buffs[i]->ie_xmit_next = SWAP(0xffff); 1275 } 1276 1277 /* Allocate and fill in recv buffer descriptors. */ 1278 for (i = 0; i < sc->nrxbuf; i++) { 1279 sc->rbuffs[i] = (volatile void *)ptr; 1280 ptr = Align(ptr + sizeof(*sc->rbuffs[i])); 1281 sc->rbuffs[i]->ie_rbd_buffer = 1282 Swap32(vtop24(sc, sc->cbuffs[i])); 1283 sc->rbuffs[i]->ie_rbd_length = SWAP(IE_RBUF_SIZE); 1284 } 1285 1286 /* link together recv bufs and set EOL on last */ 1287 i = sc->nrxbuf - 1; 1288 sc->rbuffs[i]->ie_rbd_length |= IE_RBD_LAST; 1289 nxt = vtop16sw(sc, __UNVOLATILE(sc->rbuffs[0])); 1290 do { 1291 sc->rbuffs[i]->ie_rbd_next = nxt; 1292 nxt = vtop16sw(sc, __UNVOLATILE(sc->rbuffs[i])); 1293 } while (--i >= 0); 1294 1295 /* Allocate transmit commands. */ 1296 for (i = 0; i < NTXBUF; i++) { 1297 sc->xmit_cmds[i] = (volatile void *)ptr; 1298 ptr = Align(ptr + sizeof(*sc->xmit_cmds[i])); 1299 sc->xmit_cmds[i]->com.ie_cmd_link = SWAP(0xffff); 1300 } 1301 1302 /* Allocate receive frames. */ 1303 for (i = 0; i < sc->nframes; i++) { 1304 sc->rframes[i] = (volatile void *)ptr; 1305 ptr = Align(ptr + sizeof(*sc->rframes[i])); 1306 } 1307 1308 /* Link together recv frames and set EOL on last */ 1309 i = sc->nframes - 1; 1310 sc->rframes[i]->ie_fd_last |= IE_FD_LAST; 1311 nxt = vtop16sw(sc, __UNVOLATILE(sc->rframes[0])); 1312 do { 1313 sc->rframes[i]->ie_fd_next = nxt; 1314 nxt = vtop16sw(sc, __UNVOLATILE(sc->rframes[i])); 1315 } while (--i >= 0); 1316 1317 1318 /* Pointers to last packet sent and next available transmit buffer. */ 1319 sc->xchead = sc->xctail = 0; 1320 1321 /* Clear transmit-busy flag. */ 1322 sc->xmit_busy = 0; 1323 1324 /* 1325 * Set the head and tail pointers on receive to keep track of 1326 * the order in which RFDs and RBDs are used. link the 1327 * recv frames and buffer into the scb. 1328 */ 1329 sc->rfhead = 0; 1330 sc->rftail = sc->nframes - 1; 1331 sc->rbhead = 0; 1332 sc->rbtail = sc->nrxbuf - 1; 1333 1334 sc->scb->ie_recv_list = 1335 vtop16sw(sc, __UNVOLATILE(sc->rframes[0])); 1336 sc->rframes[0]->ie_fd_buf_desc = 1337 vtop16sw(sc, __UNVOLATILE(sc->rbuffs[0])); 1338 1339 i = (ptr - sc->buf_area); 1340 #ifdef IEDEBUG 1341 printf("IE_DEBUG: used %d of %d bytes\n", i, sc->buf_area_sz); 1342 #endif 1343 if (i > sc->buf_area_sz) 1344 panic("ie: iememinit, out of space"); 1345 } 1346 1347 /* 1348 * Run the multicast setup command. 1349 * Called at splnet(). 1350 */ 1351 static int 1352 mc_setup(struct ie_softc *sc, void *ptr) 1353 { 1354 struct ie_mcast_cmd *cmd = ptr; /* XXX - Was volatile */ 1355 1356 cmd->com.ie_cmd_status = SWAP(0); 1357 cmd->com.ie_cmd_cmd = IE_CMD_MCAST | IE_CMD_LAST; 1358 cmd->com.ie_cmd_link = SWAP(0xffff); 1359 1360 (sc->sc_memcpy)((void *)cmd->ie_mcast_addrs, 1361 (void *)sc->mcast_addrs, 1362 sc->mcast_count * sizeof *sc->mcast_addrs); 1363 1364 cmd->ie_mcast_bytes = 1365 SWAP(sc->mcast_count * ETHER_ADDR_LEN); /* grrr... */ 1366 1367 sc->scb->ie_command_list = vtop16sw(sc, cmd); 1368 if (cmd_and_wait(sc, IE_CU_START, cmd, IE_STAT_COMPL) || 1369 (cmd->com.ie_cmd_status & IE_STAT_OK) == 0) { 1370 printf("%s: multicast address setup command failed\n", 1371 device_xname(sc->sc_dev)); 1372 return 0; 1373 } 1374 return 1; 1375 } 1376 1377 static inline void 1378 ie_setup_config(struct ie_config_cmd *cmd, int promiscuous, int manchester) 1379 { 1380 1381 /* 1382 * these are all char's so no need to byte-swap 1383 */ 1384 cmd->ie_config_count = 0x0c; 1385 cmd->ie_fifo = 8; 1386 cmd->ie_save_bad = 0x40; 1387 cmd->ie_addr_len = 0x2e; 1388 cmd->ie_priority = 0; 1389 cmd->ie_ifs = 0x60; 1390 cmd->ie_slot_low = 0; 1391 cmd->ie_slot_high = 0xf2; 1392 cmd->ie_promisc = promiscuous | manchester << 2; 1393 cmd->ie_crs_cdt = 0; 1394 cmd->ie_min_len = 64; 1395 cmd->ie_junk = 0xff; 1396 } 1397 1398 /* 1399 * This routine inits the ie. 1400 * This includes executing the CONFIGURE, IA-SETUP, and MC-SETUP commands, 1401 * starting the receiver unit, and clearing interrupts. 1402 * 1403 * THIS ROUTINE MUST BE CALLED AT splnet() OR HIGHER. 1404 */ 1405 static int 1406 ieinit(struct ie_softc *sc) 1407 { 1408 volatile struct ie_sys_ctl_block *scb = sc->scb; 1409 void *ptr; 1410 struct ifnet *ifp; 1411 1412 ifp = &sc->sc_if; 1413 ptr = sc->buf_area; /* XXX - Use scb instead? */ 1414 1415 /* 1416 * Send the configure command first. 1417 */ 1418 { 1419 struct ie_config_cmd *cmd = ptr; /* XXX - Was volatile */ 1420 1421 scb->ie_command_list = vtop16sw(sc, cmd); 1422 cmd->com.ie_cmd_status = SWAP(0); 1423 cmd->com.ie_cmd_cmd = IE_CMD_CONFIG | IE_CMD_LAST; 1424 cmd->com.ie_cmd_link = SWAP(0xffff); 1425 1426 ie_setup_config(cmd, (sc->promisc != 0), 0); 1427 1428 if (cmd_and_wait(sc, IE_CU_START, cmd, IE_STAT_COMPL) || 1429 (cmd->com.ie_cmd_status & IE_STAT_OK) == 0) { 1430 printf("%s: configure command failed\n", 1431 device_xname(sc->sc_dev)); 1432 return 0; 1433 } 1434 } 1435 1436 /* 1437 * Now send the Individual Address Setup command. 1438 */ 1439 { 1440 struct ie_iasetup_cmd *cmd = ptr; /* XXX - Was volatile */ 1441 1442 scb->ie_command_list = vtop16sw(sc, cmd); 1443 cmd->com.ie_cmd_status = SWAP(0); 1444 cmd->com.ie_cmd_cmd = IE_CMD_IASETUP | IE_CMD_LAST; 1445 cmd->com.ie_cmd_link = SWAP(0xffff); 1446 1447 (sc->sc_memcpy)((void *)&cmd->ie_address, 1448 CLLADDR(ifp->if_sadl), sizeof(cmd->ie_address)); 1449 1450 if (cmd_and_wait(sc, IE_CU_START, cmd, IE_STAT_COMPL) || 1451 (cmd->com.ie_cmd_status & IE_STAT_OK) == 0) { 1452 printf("%s: individual address setup command failed\n", 1453 device_xname(sc->sc_dev)); 1454 return 0; 1455 } 1456 } 1457 1458 /* 1459 * Now run the time-domain reflectometer. 1460 */ 1461 if (ie_run_tdr) 1462 run_tdr(sc, ptr); 1463 1464 /* 1465 * Acknowledge any interrupts we have generated thus far. 1466 */ 1467 ie_ack(sc, IE_ST_WHENCE); 1468 1469 /* 1470 * Set up the transmit and recv buffers. 1471 */ 1472 iememinit(sc); 1473 1474 /* tell higher levels that we are here */ 1475 ifp->if_flags |= IFF_RUNNING; 1476 ifp->if_flags &= ~IFF_OACTIVE; 1477 1478 sc->scb->ie_recv_list = 1479 vtop16sw(sc, __UNVOLATILE(sc->rframes[0])); 1480 cmd_and_wait(sc, IE_RU_START, 0, 0); 1481 1482 ie_ack(sc, IE_ST_WHENCE); 1483 1484 if (sc->run_586) 1485 (sc->run_586)(sc); 1486 1487 return 0; 1488 } 1489 1490 static void 1491 iestop(struct ie_softc *sc) 1492 { 1493 1494 cmd_and_wait(sc, IE_RU_DISABLE, 0, 0); 1495 } 1496 1497 static int 1498 ieioctl(struct ifnet *ifp, u_long cmd, void *data) 1499 { 1500 struct ie_softc *sc = ifp->if_softc; 1501 struct ifaddr *ifa = (struct ifaddr *)data; 1502 int s, error = 0; 1503 1504 s = splnet(); 1505 1506 switch (cmd) { 1507 1508 case SIOCINITIFADDR: 1509 ifp->if_flags |= IFF_UP; 1510 1511 switch (ifa->ifa_addr->sa_family) { 1512 #ifdef INET 1513 case AF_INET: 1514 ieinit(sc); 1515 arp_ifinit(ifp, ifa); 1516 break; 1517 #endif 1518 default: 1519 ieinit(sc); 1520 break; 1521 } 1522 break; 1523 1524 case SIOCSIFFLAGS: 1525 if ((error = ifioctl_common(ifp, cmd, data)) != 0) 1526 break; 1527 sc->promisc = ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI); 1528 1529 switch (ifp->if_flags & (IFF_UP|IFF_RUNNING)) { 1530 case IFF_RUNNING: 1531 /* 1532 * If interface is marked down and it is running, then 1533 * stop it. 1534 */ 1535 iestop(sc); 1536 ifp->if_flags &= ~IFF_RUNNING; 1537 break; 1538 case IFF_UP: 1539 /* 1540 * If interface is marked up and it is stopped, then 1541 * start it. 1542 */ 1543 ieinit(sc); 1544 break; 1545 default: 1546 /* 1547 * Reset the interface to pick up changes in any other 1548 * flags that affect hardware registers. 1549 */ 1550 iestop(sc); 1551 ieinit(sc); 1552 break; 1553 } 1554 #ifdef IEDEBUG 1555 if (ifp->if_flags & IFF_DEBUG) 1556 sc->sc_debug = IED_ALL; 1557 else 1558 sc->sc_debug = ie_debug_flags; 1559 #endif 1560 break; 1561 1562 case SIOCADDMULTI: 1563 case SIOCDELMULTI: 1564 if ((error = ether_ioctl(ifp, cmd, data)) == ENETRESET) { 1565 /* 1566 * Multicast list has changed; set the hardware filter 1567 * accordingly. 1568 */ 1569 if (ifp->if_flags & IFF_RUNNING) 1570 mc_reset(sc); 1571 error = 0; 1572 } 1573 break; 1574 1575 default: 1576 error = ether_ioctl(ifp, cmd, data); 1577 break; 1578 } 1579 splx(s); 1580 return error; 1581 } 1582 1583 static void 1584 mc_reset(struct ie_softc *sc) 1585 { 1586 struct ether_multi *enm; 1587 struct ether_multistep step; 1588 struct ifnet *ifp; 1589 1590 ifp = &sc->sc_if; 1591 1592 /* 1593 * Step through the list of addresses. 1594 */ 1595 sc->mcast_count = 0; 1596 ETHER_FIRST_MULTI(step, &sc->sc_ethercom, enm); 1597 while (enm) { 1598 if (sc->mcast_count >= MAXMCAST || 1599 ether_cmp(enm->enm_addrlo, enm->enm_addrhi) != 0) { 1600 ifp->if_flags |= IFF_ALLMULTI; 1601 ieioctl(ifp, SIOCSIFFLAGS, NULL); 1602 goto setflag; 1603 } 1604 memcpy(&sc->mcast_addrs[sc->mcast_count], enm->enm_addrlo, 1605 ETHER_ADDR_LEN); 1606 sc->mcast_count++; 1607 ETHER_NEXT_MULTI(step, enm); 1608 } 1609 setflag: 1610 sc->want_mcsetup = 1; 1611 } 1612 1613 #ifdef IEDEBUG 1614 void 1615 print_rbd(volatile struct ie_recv_buf_desc *rbd) 1616 { 1617 1618 printf("RBD at %08lx:\nactual %04x, next %04x, buffer %08x\n" 1619 "length %04x, mbz %04x\n", (u_long)rbd, rbd->ie_rbd_actual, 1620 rbd->ie_rbd_next, rbd->ie_rbd_buffer, rbd->ie_rbd_length, 1621 rbd->mbz); 1622 } 1623 #endif 1624