1 /* $NetBSD: hd64570.c,v 1.19 2001/08/14 11:55:38 mrg Exp $ */ 2 3 /* 4 * Copyright (c) 1999 Christian E. Hopps 5 * Copyright (c) 1998 Vixie Enterprises 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. Neither the name of Vixie Enterprises nor the names 18 * of its contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY VIXIE ENTERPRISES AND 22 * CONTRIBUTORS ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, 23 * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 24 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 25 * DISCLAIMED. IN NO EVENT SHALL VIXIE ENTERPRISES OR 26 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 27 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 28 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF 29 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 30 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 31 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 32 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 * 35 * This software has been written for Vixie Enterprises by Michael Graff 36 * <explorer@flame.org>. To learn more about Vixie Enterprises, see 37 * ``http://www.vix.com''. 38 */ 39 40 /* 41 * TODO: 42 * 43 * o teach the receive logic about errors, and about long frames that 44 * span more than one input buffer. (Right now, receive/transmit is 45 * limited to one descriptor's buffer space, which is MTU + 4 bytes. 46 * This is currently 1504, which is large enough to hold the HDLC 47 * header and the packet itself. Packets which are too long are 48 * silently dropped on transmit and silently dropped on receive. 49 * o write code to handle the msci interrupts, needed only for CD 50 * and CTS changes. 51 * o consider switching back to a "queue tx with DMA active" model which 52 * should help sustain outgoing traffic 53 * o through clever use of bus_dma*() functions, it should be possible 54 * to map the mbuf's data area directly into a descriptor transmit 55 * buffer, removing the need to allocate extra memory. If, however, 56 * we run out of descriptors for this, we will need to then allocate 57 * one large mbuf, copy the fragmented chain into it, and put it onto 58 * a single descriptor. 59 * o use bus_dmamap_sync() with the right offset and lengths, rather 60 * than cheating and always sync'ing the whole region. 61 * 62 * o perhaps allow rx and tx to be in more than one page 63 * if not using dma. currently the assumption is that 64 * rx uses a page and tx uses a page. 65 */ 66 67 #include "bpfilter.h" 68 #include "opt_inet.h" 69 #include "opt_iso.h" 70 71 #include <sys/param.h> 72 #include <sys/systm.h> 73 #include <sys/device.h> 74 #include <sys/mbuf.h> 75 #include <sys/socket.h> 76 #include <sys/sockio.h> 77 #include <sys/kernel.h> 78 79 #include <net/if.h> 80 #include <net/if_types.h> 81 #include <net/netisr.h> 82 83 #if defined(INET) || defined(INET6) 84 #include <netinet/in.h> 85 #include <netinet/in_systm.h> 86 #include <netinet/in_var.h> 87 #include <netinet/ip.h> 88 #ifdef INET6 89 #include <netinet6/in6_var.h> 90 #endif 91 #endif 92 93 #ifdef ISO 94 #include <net/if_llc.h> 95 #include <netiso/iso.h> 96 #include <netiso/iso_var.h> 97 #endif 98 99 #if NBPFILTER > 0 100 #include <net/bpf.h> 101 #endif 102 103 #include <machine/cpu.h> 104 #include <machine/bus.h> 105 #include <machine/intr.h> 106 107 #include <dev/pci/pcivar.h> 108 #include <dev/pci/pcireg.h> 109 #include <dev/pci/pcidevs.h> 110 111 #include <dev/ic/hd64570reg.h> 112 #include <dev/ic/hd64570var.h> 113 114 #define SCA_DEBUG_RX 0x0001 115 #define SCA_DEBUG_TX 0x0002 116 #define SCA_DEBUG_CISCO 0x0004 117 #define SCA_DEBUG_DMA 0x0008 118 #define SCA_DEBUG_RXPKT 0x0010 119 #define SCA_DEBUG_TXPKT 0x0020 120 #define SCA_DEBUG_INTR 0x0040 121 #define SCA_DEBUG_CLOCK 0x0080 122 123 #if 0 124 #define SCA_DEBUG_LEVEL ( 0xFFFF ) 125 #else 126 #define SCA_DEBUG_LEVEL 0 127 #endif 128 129 u_int32_t sca_debug = SCA_DEBUG_LEVEL; 130 131 #if SCA_DEBUG_LEVEL > 0 132 #define SCA_DPRINTF(l, x) do { \ 133 if ((l) & sca_debug) \ 134 printf x;\ 135 } while (0) 136 #else 137 #define SCA_DPRINTF(l, x) 138 #endif 139 140 #if 0 141 #define SCA_USE_FASTQ /* use a split queue, one for fast traffic */ 142 #endif 143 144 static inline void msci_write_1(sca_port_t *, u_int, u_int8_t); 145 static inline u_int8_t msci_read_1(sca_port_t *, u_int); 146 147 static inline void dmac_write_1(sca_port_t *, u_int, u_int8_t); 148 static inline void dmac_write_2(sca_port_t *, u_int, u_int16_t); 149 static inline u_int8_t dmac_read_1(sca_port_t *, u_int); 150 static inline u_int16_t dmac_read_2(sca_port_t *, u_int); 151 152 static void sca_msci_init(struct sca_softc *, sca_port_t *); 153 static void sca_dmac_init(struct sca_softc *, sca_port_t *); 154 static void sca_dmac_rxinit(sca_port_t *); 155 156 static int sca_dmac_intr(sca_port_t *, u_int8_t); 157 static int sca_msci_intr(sca_port_t *, u_int8_t); 158 159 static void sca_get_packets(sca_port_t *); 160 static int sca_frame_avail(sca_port_t *); 161 static void sca_frame_process(sca_port_t *); 162 static void sca_frame_read_done(sca_port_t *); 163 164 static void sca_port_starttx(sca_port_t *); 165 166 static void sca_port_up(sca_port_t *); 167 static void sca_port_down(sca_port_t *); 168 169 static int sca_output __P((struct ifnet *, struct mbuf *, struct sockaddr *, 170 struct rtentry *)); 171 static int sca_ioctl __P((struct ifnet *, u_long, caddr_t)); 172 static void sca_start __P((struct ifnet *)); 173 static void sca_watchdog __P((struct ifnet *)); 174 175 static struct mbuf *sca_mbuf_alloc(struct sca_softc *, caddr_t, u_int); 176 177 #if SCA_DEBUG_LEVEL > 0 178 static void sca_frame_print(sca_port_t *, sca_desc_t *, u_int8_t *); 179 #endif 180 181 182 #define sca_read_1(sc, reg) (sc)->sc_read_1(sc, reg) 183 #define sca_read_2(sc, reg) (sc)->sc_read_2(sc, reg) 184 #define sca_write_1(sc, reg, val) (sc)->sc_write_1(sc, reg, val) 185 #define sca_write_2(sc, reg, val) (sc)->sc_write_2(sc, reg, val) 186 187 #define sca_page_addr(sc, addr) ((bus_addr_t)(u_long)(addr) & (sc)->scu_pagemask) 188 189 static inline void 190 msci_write_1(sca_port_t *scp, u_int reg, u_int8_t val) 191 { 192 sca_write_1(scp->sca, scp->msci_off + reg, val); 193 } 194 195 static inline u_int8_t 196 msci_read_1(sca_port_t *scp, u_int reg) 197 { 198 return sca_read_1(scp->sca, scp->msci_off + reg); 199 } 200 201 static inline void 202 dmac_write_1(sca_port_t *scp, u_int reg, u_int8_t val) 203 { 204 sca_write_1(scp->sca, scp->dmac_off + reg, val); 205 } 206 207 static inline void 208 dmac_write_2(sca_port_t *scp, u_int reg, u_int16_t val) 209 { 210 sca_write_2(scp->sca, scp->dmac_off + reg, val); 211 } 212 213 static inline u_int8_t 214 dmac_read_1(sca_port_t *scp, u_int reg) 215 { 216 return sca_read_1(scp->sca, scp->dmac_off + reg); 217 } 218 219 static inline u_int16_t 220 dmac_read_2(sca_port_t *scp, u_int reg) 221 { 222 return sca_read_2(scp->sca, scp->dmac_off + reg); 223 } 224 225 /* 226 * read the chain pointer 227 */ 228 static inline u_int16_t 229 sca_desc_read_chainp(struct sca_softc *sc, struct sca_desc *dp) 230 { 231 if (sc->sc_usedma) 232 return ((dp)->sd_chainp); 233 return (bus_space_read_2(sc->scu_memt, sc->scu_memh, 234 sca_page_addr(sc, dp) + offsetof(struct sca_desc, sd_chainp))); 235 } 236 237 /* 238 * write the chain pointer 239 */ 240 static inline void 241 sca_desc_write_chainp(struct sca_softc *sc, struct sca_desc *dp, u_int16_t cp) 242 { 243 if (sc->sc_usedma) 244 (dp)->sd_chainp = cp; 245 else 246 bus_space_write_2(sc->scu_memt, sc->scu_memh, 247 sca_page_addr(sc, dp) 248 + offsetof(struct sca_desc, sd_chainp), cp); 249 } 250 251 /* 252 * read the buffer pointer 253 */ 254 static inline u_int32_t 255 sca_desc_read_bufp(struct sca_softc *sc, struct sca_desc *dp) 256 { 257 u_int32_t address; 258 259 if (sc->sc_usedma) 260 address = dp->sd_bufp | dp->sd_hbufp << 16; 261 else { 262 address = bus_space_read_2(sc->scu_memt, sc->scu_memh, 263 sca_page_addr(sc, dp) + offsetof(struct sca_desc, sd_bufp)); 264 address |= bus_space_read_1(sc->scu_memt, sc->scu_memh, 265 sca_page_addr(sc, dp) 266 + offsetof(struct sca_desc, sd_hbufp)) << 16; 267 } 268 return (address); 269 } 270 271 /* 272 * write the buffer pointer 273 */ 274 static inline void 275 sca_desc_write_bufp(struct sca_softc *sc, struct sca_desc *dp, u_int32_t bufp) 276 { 277 if (sc->sc_usedma) { 278 dp->sd_bufp = bufp & 0xFFFF; 279 dp->sd_hbufp = (bufp & 0x00FF0000) >> 16; 280 } else { 281 bus_space_write_2(sc->scu_memt, sc->scu_memh, 282 sca_page_addr(sc, dp) + offsetof(struct sca_desc, sd_bufp), 283 bufp & 0xFFFF); 284 bus_space_write_1(sc->scu_memt, sc->scu_memh, 285 sca_page_addr(sc, dp) + offsetof(struct sca_desc, sd_hbufp), 286 (bufp & 0x00FF0000) >> 16); 287 } 288 } 289 290 /* 291 * read the buffer length 292 */ 293 static inline u_int16_t 294 sca_desc_read_buflen(struct sca_softc *sc, struct sca_desc *dp) 295 { 296 if (sc->sc_usedma) 297 return ((dp)->sd_buflen); 298 return (bus_space_read_2(sc->scu_memt, sc->scu_memh, 299 sca_page_addr(sc, dp) + offsetof(struct sca_desc, sd_buflen))); 300 } 301 302 /* 303 * write the buffer length 304 */ 305 static inline void 306 sca_desc_write_buflen(struct sca_softc *sc, struct sca_desc *dp, u_int16_t len) 307 { 308 if (sc->sc_usedma) 309 (dp)->sd_buflen = len; 310 else 311 bus_space_write_2(sc->scu_memt, sc->scu_memh, 312 sca_page_addr(sc, dp) 313 + offsetof(struct sca_desc, sd_buflen), len); 314 } 315 316 /* 317 * read the descriptor status 318 */ 319 static inline u_int8_t 320 sca_desc_read_stat(struct sca_softc *sc, struct sca_desc *dp) 321 { 322 if (sc->sc_usedma) 323 return ((dp)->sd_stat); 324 return (bus_space_read_1(sc->scu_memt, sc->scu_memh, 325 sca_page_addr(sc, dp) + offsetof(struct sca_desc, sd_stat))); 326 } 327 328 /* 329 * write the descriptor status 330 */ 331 static inline void 332 sca_desc_write_stat(struct sca_softc *sc, struct sca_desc *dp, u_int8_t stat) 333 { 334 if (sc->sc_usedma) 335 (dp)->sd_stat = stat; 336 else 337 bus_space_write_1(sc->scu_memt, sc->scu_memh, 338 sca_page_addr(sc, dp) + offsetof(struct sca_desc, sd_stat), 339 stat); 340 } 341 342 void 343 sca_init(struct sca_softc *sc) 344 { 345 /* 346 * Do a little sanity check: check number of ports. 347 */ 348 if (sc->sc_numports < 1 || sc->sc_numports > 2) 349 panic("sca can\'t handle more than 2 or less than 1 ports"); 350 351 /* 352 * disable DMA and MSCI interrupts 353 */ 354 sca_write_1(sc, SCA_DMER, 0); 355 sca_write_1(sc, SCA_IER0, 0); 356 sca_write_1(sc, SCA_IER1, 0); 357 sca_write_1(sc, SCA_IER2, 0); 358 359 /* 360 * configure interrupt system 361 */ 362 sca_write_1(sc, SCA_ITCR, 363 SCA_ITCR_INTR_PRI_MSCI | SCA_ITCR_ACK_NONE | SCA_ITCR_VOUT_IVR); 364 #if 0 365 /* these are for the intrerrupt ack cycle which we don't use */ 366 sca_write_1(sc, SCA_IVR, 0x40); 367 sca_write_1(sc, SCA_IMVR, 0x40); 368 #endif 369 370 /* 371 * set wait control register to zero wait states 372 */ 373 sca_write_1(sc, SCA_PABR0, 0); 374 sca_write_1(sc, SCA_PABR1, 0); 375 sca_write_1(sc, SCA_WCRL, 0); 376 sca_write_1(sc, SCA_WCRM, 0); 377 sca_write_1(sc, SCA_WCRH, 0); 378 379 /* 380 * disable DMA and reset status 381 */ 382 sca_write_1(sc, SCA_PCR, SCA_PCR_PR2); 383 384 /* 385 * disable transmit DMA for all channels 386 */ 387 sca_write_1(sc, SCA_DSR0 + SCA_DMAC_OFF_0, 0); 388 sca_write_1(sc, SCA_DCR0 + SCA_DMAC_OFF_0, SCA_DCR_ABRT); 389 sca_write_1(sc, SCA_DSR1 + SCA_DMAC_OFF_0, 0); 390 sca_write_1(sc, SCA_DCR1 + SCA_DMAC_OFF_0, SCA_DCR_ABRT); 391 sca_write_1(sc, SCA_DSR0 + SCA_DMAC_OFF_1, 0); 392 sca_write_1(sc, SCA_DCR0 + SCA_DMAC_OFF_1, SCA_DCR_ABRT); 393 sca_write_1(sc, SCA_DSR1 + SCA_DMAC_OFF_1, 0); 394 sca_write_1(sc, SCA_DCR1 + SCA_DMAC_OFF_1, SCA_DCR_ABRT); 395 396 /* 397 * enable DMA based on channel enable flags for each channel 398 */ 399 sca_write_1(sc, SCA_DMER, SCA_DMER_EN); 400 401 /* 402 * Should check to see if the chip is responding, but for now 403 * assume it is. 404 */ 405 } 406 407 /* 408 * initialize the port and attach it to the networking layer 409 */ 410 void 411 sca_port_attach(struct sca_softc *sc, u_int port) 412 { 413 sca_port_t *scp = &sc->sc_ports[port]; 414 struct ifnet *ifp; 415 static u_int ntwo_unit = 0; 416 417 scp->sca = sc; /* point back to the parent */ 418 419 scp->sp_port = port; 420 421 if (port == 0) { 422 scp->msci_off = SCA_MSCI_OFF_0; 423 scp->dmac_off = SCA_DMAC_OFF_0; 424 if(sc->sc_parent != NULL) 425 ntwo_unit=sc->sc_parent->dv_unit * 2 + 0; 426 else 427 ntwo_unit = 0; /* XXX */ 428 } else { 429 scp->msci_off = SCA_MSCI_OFF_1; 430 scp->dmac_off = SCA_DMAC_OFF_1; 431 if(sc->sc_parent != NULL) 432 ntwo_unit=sc->sc_parent->dv_unit * 2 + 1; 433 else 434 ntwo_unit = 1; /* XXX */ 435 } 436 437 sca_msci_init(sc, scp); 438 sca_dmac_init(sc, scp); 439 440 /* 441 * attach to the network layer 442 */ 443 ifp = &scp->sp_if; 444 sprintf(ifp->if_xname, "ntwo%d", ntwo_unit); 445 ifp->if_softc = scp; 446 ifp->if_mtu = SCA_MTU; 447 ifp->if_flags = IFF_POINTOPOINT | IFF_MULTICAST; 448 ifp->if_type = IFT_PTPSERIAL; 449 ifp->if_hdrlen = HDLC_HDRLEN; 450 ifp->if_ioctl = sca_ioctl; 451 ifp->if_output = sca_output; 452 ifp->if_watchdog = sca_watchdog; 453 ifp->if_snd.ifq_maxlen = IFQ_MAXLEN; 454 scp->linkq.ifq_maxlen = 5; /* if we exceed this we are hosed already */ 455 #ifdef SCA_USE_FASTQ 456 scp->fastq.ifq_maxlen = IFQ_MAXLEN; 457 #endif 458 if_attach(ifp); 459 if_alloc_sadl(ifp); 460 461 #if NBPFILTER > 0 462 bpfattach(ifp, DLT_HDLC, HDLC_HDRLEN); 463 #endif 464 465 if (sc->sc_parent == NULL) 466 printf("%s: port %d\n", ifp->if_xname, port); 467 else 468 printf("%s at %s port %d\n", 469 ifp->if_xname, sc->sc_parent->dv_xname, port); 470 471 /* 472 * reset the last seen times on the cisco keepalive protocol 473 */ 474 scp->cka_lasttx = time.tv_usec; 475 scp->cka_lastrx = 0; 476 } 477 478 #if 0 479 /* 480 * returns log2(div), sets 'tmc' for the required freq 'hz' 481 */ 482 static u_int8_t 483 sca_msci_get_baud_rate_values(u_int32_t hz, u_int8_t *tmcp) 484 { 485 u_int32_t tmc, div; 486 u_int32_t clock; 487 488 /* clock hz = (chipclock / tmc) / 2^(div); */ 489 /* 490 * TD == tmc * 2^(n) 491 * 492 * note: 493 * 1 <= TD <= 256 TD is inc of 1 494 * 2 <= TD <= 512 TD is inc of 2 495 * 4 <= TD <= 1024 TD is inc of 4 496 * ... 497 * 512 <= TD <= 256*512 TD is inc of 512 498 * 499 * so note there are overlaps. We lose prec 500 * as div increases so we wish to minize div. 501 * 502 * basically we want to do 503 * 504 * tmc = chip / hz, but have tmc <= 256 505 */ 506 507 /* assume system clock is 9.8304Mhz or 9830400hz */ 508 clock = clock = 9830400 >> 1; 509 510 /* round down */ 511 div = 0; 512 while ((tmc = clock / hz) > 256 || (tmc == 256 && (clock / tmc) > hz)) { 513 clock >>= 1; 514 div++; 515 } 516 if (clock / tmc > hz) 517 tmc++; 518 if (!tmc) 519 tmc = 1; 520 521 if (div > SCA_RXS_DIV_512) { 522 /* set to maximums */ 523 div = SCA_RXS_DIV_512; 524 tmc = 0; 525 } 526 527 *tmcp = (tmc & 0xFF); /* 0 == 256 */ 528 return (div & 0xFF); 529 } 530 #endif 531 532 /* 533 * initialize the port's MSCI 534 */ 535 static void 536 sca_msci_init(struct sca_softc *sc, sca_port_t *scp) 537 { 538 /* reset the channel */ 539 msci_write_1(scp, SCA_CMD0, SCA_CMD_RESET); 540 541 msci_write_1(scp, SCA_MD00, 542 ( SCA_MD0_CRC_1 543 | SCA_MD0_CRC_CCITT 544 | SCA_MD0_CRC_ENABLE 545 | SCA_MD0_MODE_HDLC)); 546 #if 0 547 /* immediately send receive reset so the above takes */ 548 msci_write_1(scp, SCA_CMD0, SCA_CMD_RXRESET); 549 #endif 550 551 msci_write_1(scp, SCA_MD10, SCA_MD1_NOADDRCHK); 552 msci_write_1(scp, SCA_MD20, 553 (SCA_MD2_DUPLEX | SCA_MD2_ADPLLx8 | SCA_MD2_NRZ)); 554 555 /* be safe and do it again */ 556 msci_write_1(scp, SCA_CMD0, SCA_CMD_RXRESET); 557 558 /* setup underrun and idle control, and initial RTS state */ 559 msci_write_1(scp, SCA_CTL0, 560 (SCA_CTL_IDLC_PATTERN 561 | SCA_CTL_UDRNC_AFTER_FCS 562 | SCA_CTL_RTS_LOW)); 563 564 /* reset the transmitter */ 565 msci_write_1(scp, SCA_CMD0, SCA_CMD_TXRESET); 566 567 /* 568 * set the clock sources 569 */ 570 msci_write_1(scp, SCA_RXS0, scp->sp_rxs); 571 msci_write_1(scp, SCA_TXS0, scp->sp_txs); 572 msci_write_1(scp, SCA_TMC0, scp->sp_tmc); 573 574 /* set external clock generate as requested */ 575 sc->sc_clock_callback(sc->sc_aux, scp->sp_port, scp->sp_eclock); 576 577 /* 578 * XXX don't pay attention to CTS or CD changes right now. I can't 579 * simulate one, and the transmitter will try to transmit even if 580 * CD isn't there anyway, so nothing bad SHOULD happen. 581 */ 582 #if 0 583 msci_write_1(scp, SCA_IE00, 0); 584 msci_write_1(scp, SCA_IE10, 0); /* 0x0c == CD and CTS changes only */ 585 #else 586 /* this would deliver transmitter underrun to ST1/ISR1 */ 587 msci_write_1(scp, SCA_IE10, SCA_ST1_UDRN); 588 msci_write_1(scp, SCA_IE00, SCA_ST0_TXINT); 589 #endif 590 msci_write_1(scp, SCA_IE20, 0); 591 592 msci_write_1(scp, SCA_FIE0, 0); 593 594 msci_write_1(scp, SCA_SA00, 0); 595 msci_write_1(scp, SCA_SA10, 0); 596 597 msci_write_1(scp, SCA_IDL0, 0x7e); 598 599 msci_write_1(scp, SCA_RRC0, 0x0e); 600 /* msci_write_1(scp, SCA_TRC00, 0x10); */ 601 /* 602 * the correct values here are important for avoiding underruns 603 * for any value less than or equal to TRC0 txrdy is activated 604 * which will start the dmac transfer to the fifo. 605 * for buffer size >= TRC1 + 1 txrdy is cleared which will stop dma. 606 * 607 * thus if we are using a very fast clock that empties the fifo 608 * quickly, delays in the dmac starting to fill the fifo can 609 * lead to underruns so we want a fairly full fifo to still 610 * cause the dmac to start. for cards with on board ram this 611 * has no effect on system performance. For cards that dma 612 * to/from system memory it will cause more, shorter, 613 * bus accesses rather than fewer longer ones. 614 */ 615 msci_write_1(scp, SCA_TRC00, 0x00); 616 msci_write_1(scp, SCA_TRC10, 0x1f); 617 } 618 619 /* 620 * Take the memory for the port and construct two circular linked lists of 621 * descriptors (one tx, one rx) and set the pointers in these descriptors 622 * to point to the buffer space for this port. 623 */ 624 static void 625 sca_dmac_init(struct sca_softc *sc, sca_port_t *scp) 626 { 627 sca_desc_t *desc; 628 u_int32_t desc_p; 629 u_int32_t buf_p; 630 int i; 631 632 if (sc->sc_usedma) 633 bus_dmamap_sync(sc->scu_dmat, sc->scu_dmam, 0, sc->scu_allocsize, 634 BUS_DMASYNC_PREWRITE); 635 else { 636 /* 637 * XXX assumes that all tx desc and bufs in same page 638 */ 639 sc->scu_page_on(sc); 640 sc->scu_set_page(sc, scp->sp_txdesc_p); 641 } 642 643 desc = scp->sp_txdesc; 644 desc_p = scp->sp_txdesc_p; 645 buf_p = scp->sp_txbuf_p; 646 scp->sp_txcur = 0; 647 scp->sp_txinuse = 0; 648 649 #ifdef DEBUG 650 /* make sure that we won't wrap */ 651 if ((desc_p & 0xffff0000) != 652 ((desc_p + sizeof(*desc) * scp->sp_ntxdesc) & 0xffff0000)) 653 panic("sca: tx descriptors cross architecural boundry"); 654 if ((buf_p & 0xff000000) != 655 ((buf_p + SCA_BSIZE * scp->sp_ntxdesc) & 0xff000000)) 656 panic("sca: tx buffers cross architecural boundry"); 657 #endif 658 659 for (i = 0 ; i < scp->sp_ntxdesc ; i++) { 660 /* 661 * desc_p points to the physcial address of the NEXT desc 662 */ 663 desc_p += sizeof(sca_desc_t); 664 665 sca_desc_write_chainp(sc, desc, desc_p & 0x0000ffff); 666 sca_desc_write_bufp(sc, desc, buf_p); 667 sca_desc_write_buflen(sc, desc, SCA_BSIZE); 668 sca_desc_write_stat(sc, desc, 0); 669 670 desc++; /* point to the next descriptor */ 671 buf_p += SCA_BSIZE; 672 } 673 674 /* 675 * "heal" the circular list by making the last entry point to the 676 * first. 677 */ 678 sca_desc_write_chainp(sc, desc - 1, scp->sp_txdesc_p & 0x0000ffff); 679 680 /* 681 * Now, initialize the transmit DMA logic 682 * 683 * CPB == chain pointer base address 684 */ 685 dmac_write_1(scp, SCA_DSR1, 0); 686 dmac_write_1(scp, SCA_DCR1, SCA_DCR_ABRT); 687 dmac_write_1(scp, SCA_DMR1, SCA_DMR_TMOD | SCA_DMR_NF); 688 /* XXX1 689 dmac_write_1(scp, SCA_DIR1, 690 (SCA_DIR_EOT | SCA_DIR_BOF | SCA_DIR_COF)); 691 */ 692 dmac_write_1(scp, SCA_DIR1, 693 (SCA_DIR_EOM | SCA_DIR_EOT | SCA_DIR_BOF | SCA_DIR_COF)); 694 dmac_write_1(scp, SCA_CPB1, 695 (u_int8_t)((scp->sp_txdesc_p & 0x00ff0000) >> 16)); 696 697 /* 698 * now, do the same thing for receive descriptors 699 * 700 * XXX assumes that all rx desc and bufs in same page 701 */ 702 if (!sc->sc_usedma) 703 sc->scu_set_page(sc, scp->sp_rxdesc_p); 704 705 desc = scp->sp_rxdesc; 706 desc_p = scp->sp_rxdesc_p; 707 buf_p = scp->sp_rxbuf_p; 708 709 #ifdef DEBUG 710 /* make sure that we won't wrap */ 711 if ((desc_p & 0xffff0000) != 712 ((desc_p + sizeof(*desc) * scp->sp_nrxdesc) & 0xffff0000)) 713 panic("sca: rx descriptors cross architecural boundry"); 714 if ((buf_p & 0xff000000) != 715 ((buf_p + SCA_BSIZE * scp->sp_nrxdesc) & 0xff000000)) 716 panic("sca: rx buffers cross architecural boundry"); 717 #endif 718 719 for (i = 0 ; i < scp->sp_nrxdesc; i++) { 720 /* 721 * desc_p points to the physcial address of the NEXT desc 722 */ 723 desc_p += sizeof(sca_desc_t); 724 725 sca_desc_write_chainp(sc, desc, desc_p & 0x0000ffff); 726 sca_desc_write_bufp(sc, desc, buf_p); 727 /* sca_desc_write_buflen(sc, desc, SCA_BSIZE); */ 728 sca_desc_write_buflen(sc, desc, 0); 729 sca_desc_write_stat(sc, desc, 0); 730 731 desc++; /* point to the next descriptor */ 732 buf_p += SCA_BSIZE; 733 } 734 735 /* 736 * "heal" the circular list by making the last entry point to the 737 * first. 738 */ 739 sca_desc_write_chainp(sc, desc - 1, scp->sp_rxdesc_p & 0x0000ffff); 740 741 sca_dmac_rxinit(scp); 742 743 if (sc->sc_usedma) 744 bus_dmamap_sync(sc->scu_dmat, sc->scu_dmam, 745 0, sc->scu_allocsize, BUS_DMASYNC_POSTWRITE); 746 else 747 sc->scu_page_off(sc); 748 } 749 750 /* 751 * reset and reinitialize the receive DMA logic 752 */ 753 static void 754 sca_dmac_rxinit(sca_port_t *scp) 755 { 756 /* 757 * ... and the receive DMA logic ... 758 */ 759 dmac_write_1(scp, SCA_DSR0, 0); /* disable DMA */ 760 dmac_write_1(scp, SCA_DCR0, SCA_DCR_ABRT); 761 762 dmac_write_1(scp, SCA_DMR0, SCA_DMR_TMOD | SCA_DMR_NF); 763 dmac_write_2(scp, SCA_BFLL0, SCA_BSIZE); 764 765 /* reset descriptors to initial state */ 766 scp->sp_rxstart = 0; 767 scp->sp_rxend = scp->sp_nrxdesc - 1; 768 769 /* 770 * CPB == chain pointer base 771 * CDA == current descriptor address 772 * EDA == error descriptor address (overwrite position) 773 * because cda can't be eda when starting we always 774 * have a single buffer gap between cda and eda 775 */ 776 dmac_write_1(scp, SCA_CPB0, 777 (u_int8_t)((scp->sp_rxdesc_p & 0x00ff0000) >> 16)); 778 dmac_write_2(scp, SCA_CDAL0, (u_int16_t)(scp->sp_rxdesc_p & 0xffff)); 779 dmac_write_2(scp, SCA_EDAL0, (u_int16_t) 780 (scp->sp_rxdesc_p + (sizeof(sca_desc_t) * scp->sp_rxend))); 781 782 /* 783 * enable receiver DMA 784 */ 785 dmac_write_1(scp, SCA_DIR0, 786 (SCA_DIR_EOT | SCA_DIR_EOM | SCA_DIR_BOF | SCA_DIR_COF)); 787 dmac_write_1(scp, SCA_DSR0, SCA_DSR_DE); 788 } 789 790 /* 791 * Queue the packet for our start routine to transmit 792 */ 793 static int 794 sca_output(ifp, m, dst, rt0) 795 struct ifnet *ifp; 796 struct mbuf *m; 797 struct sockaddr *dst; 798 struct rtentry *rt0; 799 { 800 #ifdef ISO 801 struct hdlc_llc_header *llc; 802 #endif 803 struct hdlc_header *hdlc; 804 struct ifqueue *ifq = NULL; 805 int s, error, len; 806 short mflags; 807 ALTQ_DECL(struct altq_pktattr pktattr;) 808 809 error = 0; 810 811 if ((ifp->if_flags & IFF_UP) != IFF_UP) { 812 error = ENETDOWN; 813 goto bad; 814 } 815 816 /* 817 * If the queueing discipline needs packet classification, 818 * do it before prepending link headers. 819 */ 820 IFQ_CLASSIFY(&ifp->if_snd, m, dst->sa_family, &pktattr); 821 822 /* 823 * determine address family, and priority for this packet 824 */ 825 switch (dst->sa_family) { 826 #ifdef INET 827 case AF_INET: 828 #ifdef SCA_USE_FASTQ 829 if ((mtod(m, struct ip *)->ip_tos & IPTOS_LOWDELAY) 830 == IPTOS_LOWDELAY) 831 ifq = &((sca_port_t *)ifp->if_softc)->fastq; 832 #endif 833 /* 834 * Add cisco serial line header. If there is no 835 * space in the first mbuf, allocate another. 836 */ 837 M_PREPEND(m, sizeof(struct hdlc_header), M_DONTWAIT); 838 if (m == 0) 839 return (ENOBUFS); 840 hdlc = mtod(m, struct hdlc_header *); 841 hdlc->h_proto = htons(HDLC_PROTOCOL_IP); 842 break; 843 #endif 844 #ifdef INET6 845 case AF_INET6: 846 /* 847 * Add cisco serial line header. If there is no 848 * space in the first mbuf, allocate another. 849 */ 850 M_PREPEND(m, sizeof(struct hdlc_header), M_DONTWAIT); 851 if (m == 0) 852 return (ENOBUFS); 853 hdlc = mtod(m, struct hdlc_header *); 854 hdlc->h_proto = htons(HDLC_PROTOCOL_IPV6); 855 break; 856 #endif 857 #ifdef ISO 858 case AF_ISO: 859 /* 860 * Add cisco llc serial line header. If there is no 861 * space in the first mbuf, allocate another. 862 */ 863 M_PREPEND(m, sizeof(struct hdlc_llc_header), M_DONTWAIT); 864 if (m == 0) 865 return (ENOBUFS); 866 hdlc = mtod(m, struct hdlc_header *); 867 llc = mtod(m, struct hdlc_llc_header *); 868 llc->hl_dsap = llc->hl_ssap = LLC_ISO_LSAP; 869 llc->hl_ffb = 0; 870 break; 871 #endif 872 default: 873 printf("%s: address family %d unsupported\n", 874 ifp->if_xname, dst->sa_family); 875 error = EAFNOSUPPORT; 876 goto bad; 877 } 878 879 /* finish */ 880 if ((m->m_flags & (M_BCAST | M_MCAST)) != 0) 881 hdlc->h_addr = CISCO_MULTICAST; 882 else 883 hdlc->h_addr = CISCO_UNICAST; 884 hdlc->h_resv = 0; 885 886 /* 887 * queue the packet. If interactive, use the fast queue. 888 */ 889 mflags = m->m_flags; 890 len = m->m_pkthdr.len; 891 s = splnet(); 892 if (ifq != NULL) { 893 if (IF_QFULL(ifq)) { 894 IF_DROP(ifq); 895 m_freem(m); 896 error = ENOBUFS; 897 } else 898 IF_ENQUEUE(ifq, m); 899 } else 900 IFQ_ENQUEUE(&ifp->if_snd, m, &pktattr, error); 901 if (error != 0) { 902 splx(s); 903 ifp->if_oerrors++; 904 ifp->if_collisions++; 905 return (error); 906 } 907 ifp->if_obytes += len; 908 if (mflags & M_MCAST) 909 ifp->if_omcasts++; 910 911 sca_start(ifp); 912 splx(s); 913 914 return (error); 915 916 bad: 917 if (m) 918 m_freem(m); 919 return (error); 920 } 921 922 static int 923 sca_ioctl(ifp, cmd, addr) 924 struct ifnet *ifp; 925 u_long cmd; 926 caddr_t addr; 927 { 928 struct ifreq *ifr; 929 struct ifaddr *ifa; 930 int error; 931 int s; 932 933 s = splnet(); 934 935 ifr = (struct ifreq *)addr; 936 ifa = (struct ifaddr *)addr; 937 error = 0; 938 939 switch (cmd) { 940 case SIOCSIFADDR: 941 switch(ifa->ifa_addr->sa_family) { 942 #ifdef INET 943 case AF_INET: 944 #endif 945 #ifdef INET6 946 case AF_INET6: 947 #endif 948 #if defined(INET) || defined(INET6) 949 ifp->if_flags |= IFF_UP; 950 sca_port_up(ifp->if_softc); 951 break; 952 #endif 953 default: 954 error = EAFNOSUPPORT; 955 break; 956 } 957 break; 958 959 case SIOCSIFDSTADDR: 960 #ifdef INET 961 if (ifa->ifa_addr->sa_family == AF_INET) 962 break; 963 #endif 964 #ifdef INET6 965 if (ifa->ifa_addr->sa_family == AF_INET6) 966 break; 967 #endif 968 error = EAFNOSUPPORT; 969 break; 970 971 case SIOCADDMULTI: 972 case SIOCDELMULTI: 973 /* XXX need multicast group management code */ 974 if (ifr == 0) { 975 error = EAFNOSUPPORT; /* XXX */ 976 break; 977 } 978 switch (ifr->ifr_addr.sa_family) { 979 #ifdef INET 980 case AF_INET: 981 break; 982 #endif 983 #ifdef INET6 984 case AF_INET6: 985 break; 986 #endif 987 default: 988 error = EAFNOSUPPORT; 989 break; 990 } 991 break; 992 993 case SIOCSIFFLAGS: 994 if (ifr->ifr_flags & IFF_UP) { 995 ifp->if_flags |= IFF_UP; 996 sca_port_up(ifp->if_softc); 997 } else { 998 ifp->if_flags &= ~IFF_UP; 999 sca_port_down(ifp->if_softc); 1000 } 1001 1002 break; 1003 1004 default: 1005 error = EINVAL; 1006 } 1007 1008 splx(s); 1009 return error; 1010 } 1011 1012 /* 1013 * start packet transmission on the interface 1014 * 1015 * MUST BE CALLED AT splnet() 1016 */ 1017 static void 1018 sca_start(ifp) 1019 struct ifnet *ifp; 1020 { 1021 sca_port_t *scp = ifp->if_softc; 1022 struct sca_softc *sc = scp->sca; 1023 struct mbuf *m, *mb_head; 1024 sca_desc_t *desc; 1025 u_int8_t *buf, stat; 1026 u_int32_t buf_p; 1027 int nexttx; 1028 int trigger_xmit; 1029 u_int len; 1030 1031 SCA_DPRINTF(SCA_DEBUG_TX, ("TX: enter start\n")); 1032 1033 /* 1034 * can't queue when we are full or transmitter is busy 1035 */ 1036 #ifdef oldcode 1037 if ((scp->sp_txinuse >= (scp->sp_ntxdesc - 1)) 1038 || ((ifp->if_flags & IFF_OACTIVE) == IFF_OACTIVE)) 1039 return; 1040 #else 1041 if (scp->sp_txinuse 1042 || ((ifp->if_flags & IFF_OACTIVE) == IFF_OACTIVE)) 1043 return; 1044 #endif 1045 SCA_DPRINTF(SCA_DEBUG_TX, ("TX: txinuse %d\n", scp->sp_txinuse)); 1046 1047 /* 1048 * XXX assume that all tx desc and bufs in same page 1049 */ 1050 if (sc->sc_usedma) 1051 bus_dmamap_sync(sc->scu_dmat, sc->scu_dmam, 1052 0, sc->scu_allocsize, 1053 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1054 else { 1055 sc->scu_page_on(sc); 1056 sc->scu_set_page(sc, scp->sp_txdesc_p); 1057 } 1058 1059 trigger_xmit = 0; 1060 1061 txloop: 1062 IF_DEQUEUE(&scp->linkq, mb_head); 1063 if (mb_head == NULL) 1064 #ifdef SCA_USE_FASTQ 1065 IF_DEQUEUE(&scp->fastq, mb_head); 1066 if (mb_head == NULL) 1067 #endif 1068 IF_DEQUEUE(&ifp->if_snd, mb_head); 1069 if (mb_head == NULL) 1070 goto start_xmit; 1071 1072 SCA_DPRINTF(SCA_DEBUG_TX, ("TX: got mbuf\n")); 1073 #ifdef oldcode 1074 if (scp->txinuse != 0) { 1075 /* Kill EOT interrupts on the previous descriptor. */ 1076 desc = &scp->sp_txdesc[scp->txcur]; 1077 stat = sca_desc_read_stat(sc, desc); 1078 sca_desc_write_stat(sc, desc, stat & ~SCA_DESC_EOT); 1079 1080 /* Figure out what the next free descriptor is. */ 1081 nexttx = (scp->sp_txcur + 1) % scp->sp_ntxdesc; 1082 } else 1083 nexttx = 0; 1084 #endif /* oldcode */ 1085 1086 if (scp->sp_txinuse) 1087 nexttx = (scp->sp_txcur + 1) % scp->sp_ntxdesc; 1088 else 1089 nexttx = 0; 1090 1091 SCA_DPRINTF(SCA_DEBUG_TX, ("TX: nexttx %d\n", nexttx)); 1092 1093 buf = scp->sp_txbuf + SCA_BSIZE * nexttx; 1094 buf_p = scp->sp_txbuf_p + SCA_BSIZE * nexttx; 1095 1096 /* XXX hoping we can delay the desc write till after we don't drop. */ 1097 desc = &scp->sp_txdesc[nexttx]; 1098 1099 /* XXX isn't this set already?? */ 1100 sca_desc_write_bufp(sc, desc, buf_p); 1101 len = 0; 1102 1103 SCA_DPRINTF(SCA_DEBUG_TX, ("TX: buf %x buf_p %x\n", (u_int)buf, buf_p)); 1104 1105 #if 0 /* uncomment this for a core in cc1 */ 1106 X 1107 #endif 1108 /* 1109 * Run through the chain, copying data into the descriptor as we 1110 * go. If it won't fit in one transmission block, drop the packet. 1111 * No, this isn't nice, but most of the time it _will_ fit. 1112 */ 1113 for (m = mb_head ; m != NULL ; m = m->m_next) { 1114 if (m->m_len != 0) { 1115 len += m->m_len; 1116 if (len > SCA_BSIZE) { 1117 m_freem(mb_head); 1118 goto txloop; 1119 } 1120 SCA_DPRINTF(SCA_DEBUG_TX, 1121 ("TX: about to mbuf len %d\n", m->m_len)); 1122 1123 if (sc->sc_usedma) 1124 memcpy(buf, mtod(m, u_int8_t *), m->m_len); 1125 else 1126 bus_space_write_region_1(sc->scu_memt, 1127 sc->scu_memh, sca_page_addr(sc, buf_p), 1128 mtod(m, u_int8_t *), m->m_len); 1129 buf += m->m_len; 1130 buf_p += m->m_len; 1131 } 1132 } 1133 1134 /* set the buffer, the length, and mark end of frame and end of xfer */ 1135 sca_desc_write_buflen(sc, desc, len); 1136 sca_desc_write_stat(sc, desc, SCA_DESC_EOM); 1137 1138 ifp->if_opackets++; 1139 1140 #if NBPFILTER > 0 1141 /* 1142 * Pass packet to bpf if there is a listener. 1143 */ 1144 if (ifp->if_bpf) 1145 bpf_mtap(ifp->if_bpf, mb_head); 1146 #endif 1147 1148 m_freem(mb_head); 1149 1150 scp->sp_txcur = nexttx; 1151 scp->sp_txinuse++; 1152 trigger_xmit = 1; 1153 1154 SCA_DPRINTF(SCA_DEBUG_TX, 1155 ("TX: inuse %d index %d\n", scp->sp_txinuse, scp->sp_txcur)); 1156 1157 /* 1158 * XXX so didn't this used to limit us to 1?! - multi may be untested 1159 * sp_ntxdesc used to be hard coded to 2 with claim of a too hard 1160 * to find bug 1161 */ 1162 #ifdef oldcode 1163 if (scp->sp_txinuse < (scp->sp_ntxdesc - 1)) 1164 #endif 1165 if (scp->sp_txinuse < scp->sp_ntxdesc) 1166 goto txloop; 1167 1168 start_xmit: 1169 SCA_DPRINTF(SCA_DEBUG_TX, ("TX: trigger_xmit %d\n", trigger_xmit)); 1170 1171 if (trigger_xmit != 0) { 1172 /* set EOT on final descriptor */ 1173 desc = &scp->sp_txdesc[scp->sp_txcur]; 1174 stat = sca_desc_read_stat(sc, desc); 1175 sca_desc_write_stat(sc, desc, stat | SCA_DESC_EOT); 1176 } 1177 1178 if (sc->sc_usedma) 1179 bus_dmamap_sync(sc->scu_dmat, sc->scu_dmam, 0, 1180 sc->scu_allocsize, 1181 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1182 1183 if (trigger_xmit != 0) 1184 sca_port_starttx(scp); 1185 1186 if (!sc->sc_usedma) 1187 sc->scu_page_off(sc); 1188 } 1189 1190 static void 1191 sca_watchdog(ifp) 1192 struct ifnet *ifp; 1193 { 1194 } 1195 1196 int 1197 sca_hardintr(struct sca_softc *sc) 1198 { 1199 u_int8_t isr0, isr1, isr2; 1200 int ret; 1201 1202 ret = 0; /* non-zero means we processed at least one interrupt */ 1203 1204 SCA_DPRINTF(SCA_DEBUG_INTR, ("sca_hardintr entered\n")); 1205 1206 while (1) { 1207 /* 1208 * read SCA interrupts 1209 */ 1210 isr0 = sca_read_1(sc, SCA_ISR0); 1211 isr1 = sca_read_1(sc, SCA_ISR1); 1212 isr2 = sca_read_1(sc, SCA_ISR2); 1213 1214 if (isr0 == 0 && isr1 == 0 && isr2 == 0) 1215 break; 1216 1217 SCA_DPRINTF(SCA_DEBUG_INTR, 1218 ("isr0 = %02x, isr1 = %02x, isr2 = %02x\n", 1219 isr0, isr1, isr2)); 1220 1221 /* 1222 * check DMAC interrupt 1223 */ 1224 if (isr1 & 0x0f) 1225 ret += sca_dmac_intr(&sc->sc_ports[0], 1226 isr1 & 0x0f); 1227 1228 if (isr1 & 0xf0) 1229 ret += sca_dmac_intr(&sc->sc_ports[1], 1230 (isr1 & 0xf0) >> 4); 1231 1232 /* 1233 * mcsi intterupts 1234 */ 1235 if (isr0 & 0x0f) 1236 ret += sca_msci_intr(&sc->sc_ports[0], isr0 & 0x0f); 1237 1238 if (isr0 & 0xf0) 1239 ret += sca_msci_intr(&sc->sc_ports[1], 1240 (isr0 & 0xf0) >> 4); 1241 1242 #if 0 /* We don't GET timer interrupts, we have them disabled (msci IE20) */ 1243 if (isr2) 1244 ret += sca_timer_intr(sc, isr2); 1245 #endif 1246 } 1247 1248 return (ret); 1249 } 1250 1251 static int 1252 sca_dmac_intr(sca_port_t *scp, u_int8_t isr) 1253 { 1254 u_int8_t dsr; 1255 int ret; 1256 1257 ret = 0; 1258 1259 /* 1260 * Check transmit channel 1261 */ 1262 if (isr & (SCA_ISR1_DMAC_TX0A | SCA_ISR1_DMAC_TX0B)) { 1263 SCA_DPRINTF(SCA_DEBUG_INTR, 1264 ("TX INTERRUPT port %d\n", scp->sp_port)); 1265 1266 dsr = 1; 1267 while (dsr != 0) { 1268 ret++; 1269 /* 1270 * reset interrupt 1271 */ 1272 dsr = dmac_read_1(scp, SCA_DSR1); 1273 dmac_write_1(scp, SCA_DSR1, 1274 dsr | SCA_DSR_DEWD); 1275 1276 /* 1277 * filter out the bits we don't care about 1278 */ 1279 dsr &= ( SCA_DSR_COF | SCA_DSR_BOF | SCA_DSR_EOT); 1280 if (dsr == 0) 1281 break; 1282 1283 /* 1284 * check for counter overflow 1285 */ 1286 if (dsr & SCA_DSR_COF) { 1287 printf("%s: TXDMA counter overflow\n", 1288 scp->sp_if.if_xname); 1289 1290 scp->sp_if.if_flags &= ~IFF_OACTIVE; 1291 scp->sp_txcur = 0; 1292 scp->sp_txinuse = 0; 1293 } 1294 1295 /* 1296 * check for buffer overflow 1297 */ 1298 if (dsr & SCA_DSR_BOF) { 1299 printf("%s: TXDMA buffer overflow, cda 0x%04x, eda 0x%04x, cpb 0x%02x\n", 1300 scp->sp_if.if_xname, 1301 dmac_read_2(scp, SCA_CDAL1), 1302 dmac_read_2(scp, SCA_EDAL1), 1303 dmac_read_1(scp, SCA_CPB1)); 1304 1305 /* 1306 * Yikes. Arrange for a full 1307 * transmitter restart. 1308 */ 1309 scp->sp_if.if_flags &= ~IFF_OACTIVE; 1310 scp->sp_txcur = 0; 1311 scp->sp_txinuse = 0; 1312 } 1313 1314 /* 1315 * check for end of transfer, which is not 1316 * an error. It means that all data queued 1317 * was transmitted, and we mark ourself as 1318 * not in use and stop the watchdog timer. 1319 */ 1320 if (dsr & SCA_DSR_EOT) { 1321 SCA_DPRINTF(SCA_DEBUG_TX, 1322 ("Transmit completed. cda %x eda %x dsr %x\n", 1323 dmac_read_2(scp, SCA_CDAL1), 1324 dmac_read_2(scp, SCA_EDAL1), 1325 dsr)); 1326 1327 scp->sp_if.if_flags &= ~IFF_OACTIVE; 1328 scp->sp_txcur = 0; 1329 scp->sp_txinuse = 0; 1330 1331 /* 1332 * check for more packets 1333 */ 1334 sca_start(&scp->sp_if); 1335 } 1336 } 1337 } 1338 /* 1339 * receive channel check 1340 */ 1341 if (isr & (SCA_ISR1_DMAC_RX0A | SCA_ISR1_DMAC_RX0B)) { 1342 SCA_DPRINTF(SCA_DEBUG_INTR, ("RX INTERRUPT port %d\n", 1343 (scp == &scp->sca->sc_ports[0] ? 0 : 1))); 1344 1345 dsr = 1; 1346 while (dsr != 0) { 1347 ret++; 1348 1349 dsr = dmac_read_1(scp, SCA_DSR0); 1350 dmac_write_1(scp, SCA_DSR0, dsr | SCA_DSR_DEWD); 1351 1352 /* 1353 * filter out the bits we don't care about 1354 */ 1355 dsr &= (SCA_DSR_EOM | SCA_DSR_COF 1356 | SCA_DSR_BOF | SCA_DSR_EOT); 1357 if (dsr == 0) 1358 break; 1359 1360 /* 1361 * End of frame 1362 */ 1363 if (dsr & SCA_DSR_EOM) { 1364 SCA_DPRINTF(SCA_DEBUG_RX, ("Got a frame!\n")); 1365 1366 sca_get_packets(scp); 1367 } 1368 1369 /* 1370 * check for counter overflow 1371 */ 1372 if (dsr & SCA_DSR_COF) { 1373 printf("%s: RXDMA counter overflow\n", 1374 scp->sp_if.if_xname); 1375 1376 sca_dmac_rxinit(scp); 1377 } 1378 1379 /* 1380 * check for end of transfer, which means we 1381 * ran out of descriptors to receive into. 1382 * This means the line is much faster than 1383 * we can handle. 1384 */ 1385 if (dsr & (SCA_DSR_BOF | SCA_DSR_EOT)) { 1386 printf("%s: RXDMA buffer overflow\n", 1387 scp->sp_if.if_xname); 1388 1389 sca_dmac_rxinit(scp); 1390 } 1391 } 1392 } 1393 1394 return ret; 1395 } 1396 1397 static int 1398 sca_msci_intr(sca_port_t *scp, u_int8_t isr) 1399 { 1400 u_int8_t st1, trc0; 1401 1402 /* get and clear the specific interrupt -- should act on it :)*/ 1403 if ((st1 = msci_read_1(scp, SCA_ST10))) { 1404 /* clear the interrupt */ 1405 msci_write_1(scp, SCA_ST10, st1); 1406 1407 if (st1 & SCA_ST1_UDRN) { 1408 /* underrun -- try to increase ready control */ 1409 trc0 = msci_read_1(scp, SCA_TRC00); 1410 if (trc0 == 0x1f) 1411 printf("TX: underrun - fifo depth maxed\n"); 1412 else { 1413 if ((trc0 += 2) > 0x1f) 1414 trc0 = 0x1f; 1415 SCA_DPRINTF(SCA_DEBUG_TX, 1416 ("TX: udrn - incr fifo to %d\n", trc0)); 1417 msci_write_1(scp, SCA_TRC00, trc0); 1418 } 1419 } 1420 } 1421 return (0); 1422 } 1423 1424 static void 1425 sca_get_packets(sca_port_t *scp) 1426 { 1427 struct sca_softc *sc; 1428 1429 SCA_DPRINTF(SCA_DEBUG_RX, ("RX: sca_get_packets\n")); 1430 1431 sc = scp->sca; 1432 if (sc->sc_usedma) 1433 bus_dmamap_sync(sc->scu_dmat, sc->scu_dmam, 1434 0, sc->scu_allocsize, 1435 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1436 else { 1437 /* 1438 * XXX this code is unable to deal with rx stuff 1439 * in more than 1 page 1440 */ 1441 sc->scu_page_on(sc); 1442 sc->scu_set_page(sc, scp->sp_rxdesc_p); 1443 } 1444 1445 /* process as many frames as are available */ 1446 while (sca_frame_avail(scp)) { 1447 sca_frame_process(scp); 1448 sca_frame_read_done(scp); 1449 } 1450 1451 if (sc->sc_usedma) 1452 bus_dmamap_sync(sc->scu_dmat, sc->scu_dmam, 1453 0, sc->scu_allocsize, 1454 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1455 else 1456 sc->scu_page_off(sc); 1457 } 1458 1459 /* 1460 * Starting with the first descriptor we wanted to read into, up to but 1461 * not including the current SCA read descriptor, look for a packet. 1462 * 1463 * must be called at splnet() 1464 */ 1465 static int 1466 sca_frame_avail(sca_port_t *scp) 1467 { 1468 struct sca_softc *sc; 1469 u_int16_t cda; 1470 u_int32_t desc_p; /* physical address (lower 16 bits) */ 1471 sca_desc_t *desc; 1472 u_int8_t rxstat; 1473 int cdaidx, toolong; 1474 1475 /* 1476 * Read the current descriptor from the SCA. 1477 */ 1478 sc = scp->sca; 1479 cda = dmac_read_2(scp, SCA_CDAL0); 1480 1481 /* 1482 * calculate the index of the current descriptor 1483 */ 1484 desc_p = (scp->sp_rxdesc_p & 0xFFFF); 1485 desc_p = cda - desc_p; 1486 cdaidx = desc_p / sizeof(sca_desc_t); 1487 1488 SCA_DPRINTF(SCA_DEBUG_RX, 1489 ("RX: cda %x desc_p %x cdaidx %u, nrxdesc %d rxstart %d\n", 1490 cda, desc_p, cdaidx, scp->sp_nrxdesc, scp->sp_rxstart)); 1491 1492 /* note confusion */ 1493 if (cdaidx >= scp->sp_nrxdesc) 1494 panic("current descriptor index out of range"); 1495 1496 /* see if we have a valid frame available */ 1497 toolong = 0; 1498 for (; scp->sp_rxstart != cdaidx; sca_frame_read_done(scp)) { 1499 /* 1500 * We might have a valid descriptor. Set up a pointer 1501 * to the kva address for it so we can more easily examine 1502 * the contents. 1503 */ 1504 desc = &scp->sp_rxdesc[scp->sp_rxstart]; 1505 rxstat = sca_desc_read_stat(scp->sca, desc); 1506 1507 SCA_DPRINTF(SCA_DEBUG_RX, ("port %d RX: idx %d rxstat %x\n", 1508 scp->sp_port, scp->sp_rxstart, rxstat)); 1509 1510 SCA_DPRINTF(SCA_DEBUG_RX, ("port %d RX: buflen %d\n", 1511 scp->sp_port, sca_desc_read_buflen(scp->sca, desc))); 1512 1513 /* 1514 * check for errors 1515 */ 1516 if (rxstat & SCA_DESC_ERRORS) { 1517 /* 1518 * consider an error condition the end 1519 * of a frame 1520 */ 1521 scp->sp_if.if_ierrors++; 1522 toolong = 0; 1523 continue; 1524 } 1525 1526 /* 1527 * if we aren't skipping overlong frames 1528 * we are done, otherwise reset and look for 1529 * another good frame 1530 */ 1531 if (rxstat & SCA_DESC_EOM) { 1532 if (!toolong) 1533 return (1); 1534 toolong = 0; 1535 } else if (!toolong) { 1536 /* 1537 * we currently don't deal with frames 1538 * larger than a single buffer (fixed MTU) 1539 */ 1540 scp->sp_if.if_ierrors++; 1541 toolong = 1; 1542 } 1543 SCA_DPRINTF(SCA_DEBUG_RX, ("RX: idx %d no EOM\n", 1544 scp->sp_rxstart)); 1545 } 1546 1547 SCA_DPRINTF(SCA_DEBUG_RX, ("RX: returning none\n")); 1548 return 0; 1549 } 1550 1551 /* 1552 * Pass the packet up to the kernel if it is a packet we want to pay 1553 * attention to. 1554 * 1555 * MUST BE CALLED AT splnet() 1556 */ 1557 static void 1558 sca_frame_process(sca_port_t *scp) 1559 { 1560 struct ifqueue *ifq; 1561 struct hdlc_header *hdlc; 1562 struct cisco_pkt *cisco; 1563 sca_desc_t *desc; 1564 struct mbuf *m; 1565 u_int8_t *bufp; 1566 u_int16_t len; 1567 u_int32_t t; 1568 1569 t = (time.tv_sec - boottime.tv_sec) * 1000; 1570 desc = &scp->sp_rxdesc[scp->sp_rxstart]; 1571 bufp = scp->sp_rxbuf + SCA_BSIZE * scp->sp_rxstart; 1572 len = sca_desc_read_buflen(scp->sca, desc); 1573 1574 SCA_DPRINTF(SCA_DEBUG_RX, 1575 ("RX: desc %lx bufp %lx len %d\n", (bus_addr_t)desc, 1576 (bus_addr_t)bufp, len)); 1577 1578 #if SCA_DEBUG_LEVEL > 0 1579 if (sca_debug & SCA_DEBUG_RXPKT) 1580 sca_frame_print(scp, desc, bufp); 1581 #endif 1582 /* 1583 * skip packets that are too short 1584 */ 1585 if (len < sizeof(struct hdlc_header)) { 1586 scp->sp_if.if_ierrors++; 1587 return; 1588 } 1589 1590 m = sca_mbuf_alloc(scp->sca, bufp, len); 1591 if (m == NULL) { 1592 SCA_DPRINTF(SCA_DEBUG_RX, ("RX: no mbuf!\n")); 1593 return; 1594 } 1595 1596 /* 1597 * read and then strip off the HDLC information 1598 */ 1599 m = m_pullup(m, sizeof(struct hdlc_header)); 1600 if (m == NULL) { 1601 SCA_DPRINTF(SCA_DEBUG_RX, ("RX: no m_pullup!\n")); 1602 return; 1603 } 1604 1605 #if NBPFILTER > 0 1606 if (scp->sp_if.if_bpf) 1607 bpf_mtap(scp->sp_if.if_bpf, m); 1608 #endif 1609 1610 scp->sp_if.if_ipackets++; 1611 1612 hdlc = mtod(m, struct hdlc_header *); 1613 switch (ntohs(hdlc->h_proto)) { 1614 #ifdef INET 1615 case HDLC_PROTOCOL_IP: 1616 SCA_DPRINTF(SCA_DEBUG_RX, ("Received IP packet\n")); 1617 m->m_pkthdr.rcvif = &scp->sp_if; 1618 m->m_pkthdr.len -= sizeof(struct hdlc_header); 1619 m->m_data += sizeof(struct hdlc_header); 1620 m->m_len -= sizeof(struct hdlc_header); 1621 ifq = &ipintrq; 1622 schednetisr(NETISR_IP); 1623 break; 1624 #endif /* INET */ 1625 #ifdef INET6 1626 case HDLC_PROTOCOL_IPV6: 1627 SCA_DPRINTF(SCA_DEBUG_RX, ("Received IP packet\n")); 1628 m->m_pkthdr.rcvif = &scp->sp_if; 1629 m->m_pkthdr.len -= sizeof(struct hdlc_header); 1630 m->m_data += sizeof(struct hdlc_header); 1631 m->m_len -= sizeof(struct hdlc_header); 1632 ifq = &ip6intrq; 1633 schednetisr(NETISR_IPV6); 1634 break; 1635 #endif /* INET6 */ 1636 #ifdef ISO 1637 case HDLC_PROTOCOL_ISO: 1638 if (m->m_pkthdr.len < sizeof(struct hdlc_llc_header)) 1639 goto dropit; 1640 m->m_pkthdr.rcvif = &scp->sp_if; 1641 m->m_pkthdr.len -= sizeof(struct hdlc_llc_header); 1642 m->m_data += sizeof(struct hdlc_llc_header); 1643 m->m_len -= sizeof(struct hdlc_llc_header); 1644 ifq = &clnlintrq; 1645 schednetisr(NETISR_ISO); 1646 break; 1647 #endif /* ISO */ 1648 case CISCO_KEEPALIVE: 1649 SCA_DPRINTF(SCA_DEBUG_CISCO, 1650 ("Received CISCO keepalive packet\n")); 1651 1652 if (len < CISCO_PKT_LEN) { 1653 SCA_DPRINTF(SCA_DEBUG_CISCO, 1654 ("short CISCO packet %d, wanted %d\n", 1655 len, CISCO_PKT_LEN)); 1656 scp->sp_if.if_ierrors++; 1657 goto dropit; 1658 } 1659 1660 m = m_pullup(m, sizeof(struct cisco_pkt)); 1661 if (m == NULL) { 1662 SCA_DPRINTF(SCA_DEBUG_RX, ("RX: no m_pullup!\n")); 1663 return; 1664 } 1665 1666 cisco = (struct cisco_pkt *) 1667 (mtod(m, u_int8_t *) + HDLC_HDRLEN); 1668 m->m_pkthdr.rcvif = &scp->sp_if; 1669 1670 switch (ntohl(cisco->type)) { 1671 case CISCO_ADDR_REQ: 1672 printf("Got CISCO addr_req, ignoring\n"); 1673 scp->sp_if.if_ierrors++; 1674 goto dropit; 1675 1676 case CISCO_ADDR_REPLY: 1677 printf("Got CISCO addr_reply, ignoring\n"); 1678 scp->sp_if.if_ierrors++; 1679 goto dropit; 1680 1681 case CISCO_KEEPALIVE_REQ: 1682 1683 SCA_DPRINTF(SCA_DEBUG_CISCO, 1684 ("Received KA, mseq %d," 1685 " yseq %d, rel 0x%04x, t0" 1686 " %04x, t1 %04x\n", 1687 ntohl(cisco->par1), ntohl(cisco->par2), 1688 ntohs(cisco->rel), ntohs(cisco->time0), 1689 ntohs(cisco->time1))); 1690 1691 scp->cka_lastrx = ntohl(cisco->par1); 1692 scp->cka_lasttx++; 1693 1694 /* 1695 * schedule the transmit right here. 1696 */ 1697 cisco->par2 = cisco->par1; 1698 cisco->par1 = htonl(scp->cka_lasttx); 1699 cisco->time0 = htons((u_int16_t)(t >> 16)); 1700 cisco->time1 = htons((u_int16_t)(t & 0x0000ffff)); 1701 1702 ifq = &scp->linkq; 1703 if (IF_QFULL(ifq)) { 1704 IF_DROP(ifq); 1705 goto dropit; 1706 } 1707 IF_ENQUEUE(ifq, m); 1708 1709 sca_start(&scp->sp_if); 1710 1711 /* since start may have reset this fix */ 1712 if (!scp->sca->sc_usedma) { 1713 scp->sca->scu_set_page(scp->sca, 1714 scp->sp_rxdesc_p); 1715 scp->sca->scu_page_on(scp->sca); 1716 } 1717 return; 1718 default: 1719 SCA_DPRINTF(SCA_DEBUG_CISCO, 1720 ("Unknown CISCO keepalive protocol 0x%04x\n", 1721 ntohl(cisco->type))); 1722 1723 scp->sp_if.if_noproto++; 1724 goto dropit; 1725 } 1726 return; 1727 default: 1728 SCA_DPRINTF(SCA_DEBUG_RX, 1729 ("Unknown/unexpected ethertype 0x%04x\n", 1730 ntohs(hdlc->h_proto))); 1731 scp->sp_if.if_noproto++; 1732 goto dropit; 1733 } 1734 1735 /* queue the packet */ 1736 if (!IF_QFULL(ifq)) { 1737 IF_ENQUEUE(ifq, m); 1738 } else { 1739 IF_DROP(ifq); 1740 scp->sp_if.if_iqdrops++; 1741 goto dropit; 1742 } 1743 return; 1744 dropit: 1745 if (m) 1746 m_freem(m); 1747 return; 1748 } 1749 1750 #if SCA_DEBUG_LEVEL > 0 1751 /* 1752 * do a hex dump of the packet received into descriptor "desc" with 1753 * data buffer "p" 1754 */ 1755 static void 1756 sca_frame_print(sca_port_t *scp, sca_desc_t *desc, u_int8_t *p) 1757 { 1758 int i; 1759 int nothing_yet = 1; 1760 struct sca_softc *sc; 1761 u_int len; 1762 1763 sc = scp->sca; 1764 printf("desc va %p: chainp 0x%x bufp 0x%0x stat 0x%0x len %d\n", 1765 desc, 1766 sca_desc_read_chainp(sc, desc), 1767 sca_desc_read_bufp(sc, desc), 1768 sca_desc_read_stat(sc, desc), 1769 (len = sca_desc_read_buflen(sc, desc))); 1770 1771 for (i = 0 ; i < len && i < 256; i++) { 1772 if (nothing_yet == 1 && 1773 (sc->sc_usedma ? *p 1774 : bus_space_read_1(sc->scu_memt, sc->scu_memh, 1775 sca_page_addr(sc, p))) == 0) { 1776 p++; 1777 continue; 1778 } 1779 nothing_yet = 0; 1780 if (i % 16 == 0) 1781 printf("\n"); 1782 printf("%02x ", 1783 (sc->sc_usedma ? *p 1784 : bus_space_read_1(sc->scu_memt, sc->scu_memh, 1785 sca_page_addr(sc, p)))); 1786 p++; 1787 } 1788 1789 if (i % 16 != 1) 1790 printf("\n"); 1791 } 1792 #endif 1793 1794 /* 1795 * adjust things becuase we have just read the current starting 1796 * frame 1797 * 1798 * must be called at splnet() 1799 */ 1800 static void 1801 sca_frame_read_done(sca_port_t *scp) 1802 { 1803 u_int16_t edesc_p; 1804 1805 /* update where our indicies are */ 1806 scp->sp_rxend = scp->sp_rxstart; 1807 scp->sp_rxstart = (scp->sp_rxstart + 1) % scp->sp_nrxdesc; 1808 1809 /* update the error [end] descriptor */ 1810 edesc_p = (u_int16_t)scp->sp_rxdesc_p + 1811 (sizeof(sca_desc_t) * scp->sp_rxend); 1812 dmac_write_2(scp, SCA_EDAL0, edesc_p); 1813 } 1814 1815 /* 1816 * set a port to the "up" state 1817 */ 1818 static void 1819 sca_port_up(sca_port_t *scp) 1820 { 1821 struct sca_softc *sc = scp->sca; 1822 #if 0 1823 u_int8_t ier0, ier1; 1824 #endif 1825 1826 /* 1827 * reset things 1828 */ 1829 #if 0 1830 msci_write_1(scp, SCA_CMD0, SCA_CMD_TXRESET); 1831 msci_write_1(scp, SCA_CMD0, SCA_CMD_RXRESET); 1832 #endif 1833 /* 1834 * clear in-use flag 1835 */ 1836 scp->sp_if.if_flags &= ~IFF_OACTIVE; 1837 scp->sp_if.if_flags |= IFF_RUNNING; 1838 1839 /* 1840 * raise DTR 1841 */ 1842 sc->sc_dtr_callback(sc->sc_aux, scp->sp_port, 1); 1843 1844 /* 1845 * raise RTS 1846 */ 1847 msci_write_1(scp, SCA_CTL0, 1848 (msci_read_1(scp, SCA_CTL0) & ~SCA_CTL_RTS_MASK) 1849 | SCA_CTL_RTS_HIGH); 1850 1851 #if 0 1852 /* 1853 * enable interrupts (no timer IER2) 1854 */ 1855 ier0 = SCA_IER0_MSCI_RXRDY0 | SCA_IER0_MSCI_TXRDY0 1856 | SCA_IER0_MSCI_RXINT0 | SCA_IER0_MSCI_TXINT0; 1857 ier1 = SCA_IER1_DMAC_RX0A | SCA_IER1_DMAC_RX0B 1858 | SCA_IER1_DMAC_TX0A | SCA_IER1_DMAC_TX0B; 1859 if (scp->sp_port == 1) { 1860 ier0 <<= 4; 1861 ier1 <<= 4; 1862 } 1863 sca_write_1(sc, SCA_IER0, sca_read_1(sc, SCA_IER0) | ier0); 1864 sca_write_1(sc, SCA_IER1, sca_read_1(sc, SCA_IER1) | ier1); 1865 #else 1866 if (scp->sp_port == 0) { 1867 sca_write_1(sc, SCA_IER0, sca_read_1(sc, SCA_IER0) | 0x0f); 1868 sca_write_1(sc, SCA_IER1, sca_read_1(sc, SCA_IER1) | 0x0f); 1869 } else { 1870 sca_write_1(sc, SCA_IER0, sca_read_1(sc, SCA_IER0) | 0xf0); 1871 sca_write_1(sc, SCA_IER1, sca_read_1(sc, SCA_IER1) | 0xf0); 1872 } 1873 #endif 1874 1875 /* 1876 * enable transmit and receive 1877 */ 1878 msci_write_1(scp, SCA_CMD0, SCA_CMD_TXENABLE); 1879 msci_write_1(scp, SCA_CMD0, SCA_CMD_RXENABLE); 1880 1881 /* 1882 * reset internal state 1883 */ 1884 scp->sp_txinuse = 0; 1885 scp->sp_txcur = 0; 1886 scp->cka_lasttx = time.tv_usec; 1887 scp->cka_lastrx = 0; 1888 } 1889 1890 /* 1891 * set a port to the "down" state 1892 */ 1893 static void 1894 sca_port_down(sca_port_t *scp) 1895 { 1896 struct sca_softc *sc = scp->sca; 1897 #if 0 1898 u_int8_t ier0, ier1; 1899 #endif 1900 1901 /* 1902 * lower DTR 1903 */ 1904 sc->sc_dtr_callback(sc->sc_aux, scp->sp_port, 0); 1905 1906 /* 1907 * lower RTS 1908 */ 1909 msci_write_1(scp, SCA_CTL0, 1910 (msci_read_1(scp, SCA_CTL0) & ~SCA_CTL_RTS_MASK) 1911 | SCA_CTL_RTS_LOW); 1912 1913 /* 1914 * disable interrupts 1915 */ 1916 #if 0 1917 ier0 = SCA_IER0_MSCI_RXRDY0 | SCA_IER0_MSCI_TXRDY0 1918 | SCA_IER0_MSCI_RXINT0 | SCA_IER0_MSCI_TXINT0; 1919 ier1 = SCA_IER1_DMAC_RX0A | SCA_IER1_DMAC_RX0B 1920 | SCA_IER1_DMAC_TX0A | SCA_IER1_DMAC_TX0B; 1921 if (scp->sp_port == 1) { 1922 ier0 <<= 4; 1923 ier1 <<= 4; 1924 } 1925 sca_write_1(sc, SCA_IER0, sca_read_1(sc, SCA_IER0) & ~ier0); 1926 sca_write_1(sc, SCA_IER1, sca_read_1(sc, SCA_IER1) & ~ier1); 1927 #else 1928 if (scp->sp_port == 0) { 1929 sca_write_1(sc, SCA_IER0, sca_read_1(sc, SCA_IER0) & 0xf0); 1930 sca_write_1(sc, SCA_IER1, sca_read_1(sc, SCA_IER1) & 0xf0); 1931 } else { 1932 sca_write_1(sc, SCA_IER0, sca_read_1(sc, SCA_IER0) & 0x0f); 1933 sca_write_1(sc, SCA_IER1, sca_read_1(sc, SCA_IER1) & 0x0f); 1934 } 1935 #endif 1936 1937 /* 1938 * disable transmit and receive 1939 */ 1940 msci_write_1(scp, SCA_CMD0, SCA_CMD_RXDISABLE); 1941 msci_write_1(scp, SCA_CMD0, SCA_CMD_TXDISABLE); 1942 1943 /* 1944 * no, we're not in use anymore 1945 */ 1946 scp->sp_if.if_flags &= ~(IFF_OACTIVE|IFF_RUNNING); 1947 } 1948 1949 /* 1950 * disable all DMA and interrupts for all ports at once. 1951 */ 1952 void 1953 sca_shutdown(struct sca_softc *sca) 1954 { 1955 /* 1956 * disable DMA and interrupts 1957 */ 1958 sca_write_1(sca, SCA_DMER, 0); 1959 sca_write_1(sca, SCA_IER0, 0); 1960 sca_write_1(sca, SCA_IER1, 0); 1961 } 1962 1963 /* 1964 * If there are packets to transmit, start the transmit DMA logic. 1965 */ 1966 static void 1967 sca_port_starttx(sca_port_t *scp) 1968 { 1969 struct sca_softc *sc; 1970 u_int32_t startdesc_p, enddesc_p; 1971 int enddesc; 1972 1973 sc = scp->sca; 1974 1975 SCA_DPRINTF(SCA_DEBUG_TX, ("TX: starttx\n")); 1976 1977 if (((scp->sp_if.if_flags & IFF_OACTIVE) == IFF_OACTIVE) 1978 || scp->sp_txinuse == 0) 1979 return; 1980 1981 SCA_DPRINTF(SCA_DEBUG_TX, ("TX: setting oactive\n")); 1982 1983 scp->sp_if.if_flags |= IFF_OACTIVE; 1984 1985 /* 1986 * We have something to do, since we have at least one packet 1987 * waiting, and we are not already marked as active. 1988 */ 1989 enddesc = (scp->sp_txcur + 1) % scp->sp_ntxdesc; 1990 startdesc_p = scp->sp_txdesc_p; 1991 enddesc_p = scp->sp_txdesc_p + sizeof(sca_desc_t) * enddesc; 1992 1993 SCA_DPRINTF(SCA_DEBUG_TX, ("TX: start %x end %x\n", 1994 startdesc_p, enddesc_p)); 1995 1996 dmac_write_2(scp, SCA_EDAL1, (u_int16_t)(enddesc_p & 0x0000ffff)); 1997 dmac_write_2(scp, SCA_CDAL1, 1998 (u_int16_t)(startdesc_p & 0x0000ffff)); 1999 2000 /* 2001 * enable the DMA 2002 */ 2003 dmac_write_1(scp, SCA_DSR1, SCA_DSR_DE); 2004 } 2005 2006 /* 2007 * allocate an mbuf at least long enough to hold "len" bytes. 2008 * If "p" is non-NULL, copy "len" bytes from it into the new mbuf, 2009 * otherwise let the caller handle copying the data in. 2010 */ 2011 static struct mbuf * 2012 sca_mbuf_alloc(struct sca_softc *sc, caddr_t p, u_int len) 2013 { 2014 struct mbuf *m; 2015 2016 /* 2017 * allocate an mbuf and copy the important bits of data 2018 * into it. If the packet won't fit in the header, 2019 * allocate a cluster for it and store it there. 2020 */ 2021 MGETHDR(m, M_DONTWAIT, MT_DATA); 2022 if (m == NULL) 2023 return NULL; 2024 if (len > MHLEN) { 2025 if (len > MCLBYTES) { 2026 m_freem(m); 2027 return NULL; 2028 } 2029 MCLGET(m, M_DONTWAIT); 2030 if ((m->m_flags & M_EXT) == 0) { 2031 m_freem(m); 2032 return NULL; 2033 } 2034 } 2035 if (p != NULL) { 2036 /* XXX do we need to sync here? */ 2037 if (sc->sc_usedma) 2038 memcpy(mtod(m, caddr_t), p, len); 2039 else 2040 bus_space_read_region_1(sc->scu_memt, sc->scu_memh, 2041 sca_page_addr(sc, p), mtod(m, u_int8_t *), len); 2042 } 2043 m->m_len = len; 2044 m->m_pkthdr.len = len; 2045 2046 return (m); 2047 } 2048 2049 /* 2050 * get the base clock 2051 */ 2052 void 2053 sca_get_base_clock(struct sca_softc *sc) 2054 { 2055 struct timeval btv, ctv, dtv; 2056 u_int64_t bcnt; 2057 u_int32_t cnt; 2058 u_int16_t subcnt; 2059 2060 /* disable the timer, set prescale to 0 */ 2061 sca_write_1(sc, SCA_TCSR0, 0); 2062 sca_write_1(sc, SCA_TEPR0, 0); 2063 2064 /* reset the counter */ 2065 (void)sca_read_1(sc, SCA_TCSR0); 2066 subcnt = sca_read_2(sc, SCA_TCNTL0); 2067 2068 /* count to max */ 2069 sca_write_2(sc, SCA_TCONRL0, 0xffff); 2070 2071 cnt = 0; 2072 microtime(&btv); 2073 /* start the timer -- no interrupt enable */ 2074 sca_write_1(sc, SCA_TCSR0, SCA_TCSR_TME); 2075 for (;;) { 2076 microtime(&ctv); 2077 2078 /* end around 3/4 of a second */ 2079 timersub(&ctv, &btv, &dtv); 2080 if (dtv.tv_usec >= 750000) 2081 break; 2082 2083 /* spin */ 2084 while (!(sca_read_1(sc, SCA_TCSR0) & SCA_TCSR_CMF)) 2085 ; 2086 /* reset the timer */ 2087 (void)sca_read_2(sc, SCA_TCNTL0); 2088 cnt++; 2089 } 2090 2091 /* stop the timer */ 2092 sca_write_1(sc, SCA_TCSR0, 0); 2093 2094 subcnt = sca_read_2(sc, SCA_TCNTL0); 2095 /* add the slop in and get the total timer ticks */ 2096 cnt = (cnt << 16) | subcnt; 2097 2098 /* cnt is 1/8 the actual time */ 2099 bcnt = cnt * 8; 2100 /* make it proportional to 3/4 of a second */ 2101 bcnt *= (u_int64_t)750000; 2102 bcnt /= (u_int64_t)dtv.tv_usec; 2103 cnt = bcnt; 2104 2105 /* make it Hz */ 2106 cnt *= 4; 2107 cnt /= 3; 2108 2109 SCA_DPRINTF(SCA_DEBUG_CLOCK, 2110 ("sca: unadjusted base %lu Hz\n", (u_long)cnt)); 2111 2112 /* 2113 * round to the nearest 200 -- this allows for +-3 ticks error 2114 */ 2115 sc->sc_baseclock = ((cnt + 100) / 200) * 200; 2116 } 2117 2118 /* 2119 * print the information about the clock on the ports 2120 */ 2121 void 2122 sca_print_clock_info(struct sca_softc *sc) 2123 { 2124 struct sca_port *scp; 2125 u_int32_t mhz, div; 2126 int i; 2127 2128 printf("%s: base clock %d Hz\n", sc->sc_parent->dv_xname, 2129 sc->sc_baseclock); 2130 2131 /* print the information about the port clock selection */ 2132 for (i = 0; i < sc->sc_numports; i++) { 2133 scp = &sc->sc_ports[i]; 2134 mhz = sc->sc_baseclock / (scp->sp_tmc ? scp->sp_tmc : 256); 2135 div = scp->sp_rxs & SCA_RXS_DIV_MASK; 2136 2137 printf("%s: rx clock: ", scp->sp_if.if_xname); 2138 switch (scp->sp_rxs & SCA_RXS_CLK_MASK) { 2139 case SCA_RXS_CLK_LINE: 2140 printf("line"); 2141 break; 2142 case SCA_RXS_CLK_LINE_SN: 2143 printf("line with noise suppression"); 2144 break; 2145 case SCA_RXS_CLK_INTERNAL: 2146 printf("internal %d Hz", (mhz >> div)); 2147 break; 2148 case SCA_RXS_CLK_ADPLL_OUT: 2149 printf("adpll using internal %d Hz", (mhz >> div)); 2150 break; 2151 case SCA_RXS_CLK_ADPLL_IN: 2152 printf("adpll using line clock"); 2153 break; 2154 } 2155 printf(" tx clock: "); 2156 div = scp->sp_txs & SCA_TXS_DIV_MASK; 2157 switch (scp->sp_txs & SCA_TXS_CLK_MASK) { 2158 case SCA_TXS_CLK_LINE: 2159 printf("line\n"); 2160 break; 2161 case SCA_TXS_CLK_INTERNAL: 2162 printf("internal %d Hz\n", (mhz >> div)); 2163 break; 2164 case SCA_TXS_CLK_RXCLK: 2165 printf("rxclock\n"); 2166 break; 2167 } 2168 if (scp->sp_eclock) 2169 printf("%s: outputting line clock\n", 2170 scp->sp_if.if_xname); 2171 } 2172 } 2173 2174