1 /* $NetBSD: hd64570.c,v 1.42 2010/01/19 22:06:24 pooka Exp $ */ 2 3 /* 4 * Copyright (c) 1999 Christian E. Hopps 5 * Copyright (c) 1998 Vixie Enterprises 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. Neither the name of Vixie Enterprises nor the names 18 * of its contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY VIXIE ENTERPRISES AND 22 * CONTRIBUTORS ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, 23 * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 24 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 25 * DISCLAIMED. IN NO EVENT SHALL VIXIE ENTERPRISES OR 26 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 27 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 28 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF 29 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 30 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 31 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 32 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 * 35 * This software has been written for Vixie Enterprises by Michael Graff 36 * <explorer@flame.org>. To learn more about Vixie Enterprises, see 37 * ``http://www.vix.com''. 38 */ 39 40 /* 41 * TODO: 42 * 43 * o teach the receive logic about errors, and about long frames that 44 * span more than one input buffer. (Right now, receive/transmit is 45 * limited to one descriptor's buffer space, which is MTU + 4 bytes. 46 * This is currently 1504, which is large enough to hold the HDLC 47 * header and the packet itself. Packets which are too long are 48 * silently dropped on transmit and silently dropped on receive. 49 * o write code to handle the msci interrupts, needed only for CD 50 * and CTS changes. 51 * o consider switching back to a "queue tx with DMA active" model which 52 * should help sustain outgoing traffic 53 * o through clever use of bus_dma*() functions, it should be possible 54 * to map the mbuf's data area directly into a descriptor transmit 55 * buffer, removing the need to allocate extra memory. If, however, 56 * we run out of descriptors for this, we will need to then allocate 57 * one large mbuf, copy the fragmented chain into it, and put it onto 58 * a single descriptor. 59 * o use bus_dmamap_sync() with the right offset and lengths, rather 60 * than cheating and always sync'ing the whole region. 61 * 62 * o perhaps allow rx and tx to be in more than one page 63 * if not using DMA. currently the assumption is that 64 * rx uses a page and tx uses a page. 65 */ 66 67 #include <sys/cdefs.h> 68 __KERNEL_RCSID(0, "$NetBSD: hd64570.c,v 1.42 2010/01/19 22:06:24 pooka Exp $"); 69 70 #include "opt_inet.h" 71 #include "opt_iso.h" 72 73 #include <sys/param.h> 74 #include <sys/systm.h> 75 #include <sys/device.h> 76 #include <sys/mbuf.h> 77 #include <sys/socket.h> 78 #include <sys/sockio.h> 79 #include <sys/kernel.h> 80 81 #include <net/if.h> 82 #include <net/if_types.h> 83 #include <net/netisr.h> 84 85 #if defined(INET) || defined(INET6) 86 #include <netinet/in.h> 87 #include <netinet/in_systm.h> 88 #include <netinet/in_var.h> 89 #include <netinet/ip.h> 90 #ifdef INET6 91 #include <netinet6/in6_var.h> 92 #endif 93 #endif 94 95 #ifdef ISO 96 #include <net/if_llc.h> 97 #include <netiso/iso.h> 98 #include <netiso/iso_var.h> 99 #endif 100 101 #include <net/bpf.h> 102 103 #include <sys/cpu.h> 104 #include <sys/bus.h> 105 #include <sys/intr.h> 106 107 #include <dev/pci/pcivar.h> 108 #include <dev/pci/pcireg.h> 109 #include <dev/pci/pcidevs.h> 110 111 #include <dev/ic/hd64570reg.h> 112 #include <dev/ic/hd64570var.h> 113 114 #define SCA_DEBUG_RX 0x0001 115 #define SCA_DEBUG_TX 0x0002 116 #define SCA_DEBUG_CISCO 0x0004 117 #define SCA_DEBUG_DMA 0x0008 118 #define SCA_DEBUG_RXPKT 0x0010 119 #define SCA_DEBUG_TXPKT 0x0020 120 #define SCA_DEBUG_INTR 0x0040 121 #define SCA_DEBUG_CLOCK 0x0080 122 123 #if 0 124 #define SCA_DEBUG_LEVEL ( 0xFFFF ) 125 #else 126 #define SCA_DEBUG_LEVEL 0 127 #endif 128 129 u_int32_t sca_debug = SCA_DEBUG_LEVEL; 130 131 #if SCA_DEBUG_LEVEL > 0 132 #define SCA_DPRINTF(l, x) do { \ 133 if ((l) & sca_debug) \ 134 printf x;\ 135 } while (0) 136 #else 137 #define SCA_DPRINTF(l, x) 138 #endif 139 140 #if 0 141 #define SCA_USE_FASTQ /* use a split queue, one for fast traffic */ 142 #endif 143 144 static inline void msci_write_1(sca_port_t *, u_int, u_int8_t); 145 static inline u_int8_t msci_read_1(sca_port_t *, u_int); 146 147 static inline void dmac_write_1(sca_port_t *, u_int, u_int8_t); 148 static inline void dmac_write_2(sca_port_t *, u_int, u_int16_t); 149 static inline u_int8_t dmac_read_1(sca_port_t *, u_int); 150 static inline u_int16_t dmac_read_2(sca_port_t *, u_int); 151 152 static void sca_msci_init(struct sca_softc *, sca_port_t *); 153 static void sca_dmac_init(struct sca_softc *, sca_port_t *); 154 static void sca_dmac_rxinit(sca_port_t *); 155 156 static int sca_dmac_intr(sca_port_t *, u_int8_t); 157 static int sca_msci_intr(sca_port_t *, u_int8_t); 158 159 static void sca_get_packets(sca_port_t *); 160 static int sca_frame_avail(sca_port_t *); 161 static void sca_frame_process(sca_port_t *); 162 static void sca_frame_read_done(sca_port_t *); 163 164 static void sca_port_starttx(sca_port_t *); 165 166 static void sca_port_up(sca_port_t *); 167 static void sca_port_down(sca_port_t *); 168 169 static int sca_output(struct ifnet *, struct mbuf *, const struct sockaddr *, 170 struct rtentry *); 171 static int sca_ioctl(struct ifnet *, u_long, void *); 172 static void sca_start(struct ifnet *); 173 static void sca_watchdog(struct ifnet *); 174 175 static struct mbuf *sca_mbuf_alloc(struct sca_softc *, void *, u_int); 176 177 #if SCA_DEBUG_LEVEL > 0 178 static void sca_frame_print(sca_port_t *, sca_desc_t *, u_int8_t *); 179 #endif 180 181 182 #define sca_read_1(sc, reg) (sc)->sc_read_1(sc, reg) 183 #define sca_read_2(sc, reg) (sc)->sc_read_2(sc, reg) 184 #define sca_write_1(sc, reg, val) (sc)->sc_write_1(sc, reg, val) 185 #define sca_write_2(sc, reg, val) (sc)->sc_write_2(sc, reg, val) 186 187 #define sca_page_addr(sc, addr) ((bus_addr_t)(u_long)(addr) & (sc)->scu_pagemask) 188 189 static inline void 190 msci_write_1(sca_port_t *scp, u_int reg, u_int8_t val) 191 { 192 sca_write_1(scp->sca, scp->msci_off + reg, val); 193 } 194 195 static inline u_int8_t 196 msci_read_1(sca_port_t *scp, u_int reg) 197 { 198 return sca_read_1(scp->sca, scp->msci_off + reg); 199 } 200 201 static inline void 202 dmac_write_1(sca_port_t *scp, u_int reg, u_int8_t val) 203 { 204 sca_write_1(scp->sca, scp->dmac_off + reg, val); 205 } 206 207 static inline void 208 dmac_write_2(sca_port_t *scp, u_int reg, u_int16_t val) 209 { 210 sca_write_2(scp->sca, scp->dmac_off + reg, val); 211 } 212 213 static inline u_int8_t 214 dmac_read_1(sca_port_t *scp, u_int reg) 215 { 216 return sca_read_1(scp->sca, scp->dmac_off + reg); 217 } 218 219 static inline u_int16_t 220 dmac_read_2(sca_port_t *scp, u_int reg) 221 { 222 return sca_read_2(scp->sca, scp->dmac_off + reg); 223 } 224 225 /* 226 * read the chain pointer 227 */ 228 static inline u_int16_t 229 sca_desc_read_chainp(struct sca_softc *sc, struct sca_desc *dp) 230 { 231 if (sc->sc_usedma) 232 return ((dp)->sd_chainp); 233 return (bus_space_read_2(sc->scu_memt, sc->scu_memh, 234 sca_page_addr(sc, dp) + offsetof(struct sca_desc, sd_chainp))); 235 } 236 237 /* 238 * write the chain pointer 239 */ 240 static inline void 241 sca_desc_write_chainp(struct sca_softc *sc, struct sca_desc *dp, u_int16_t cp) 242 { 243 if (sc->sc_usedma) 244 (dp)->sd_chainp = cp; 245 else 246 bus_space_write_2(sc->scu_memt, sc->scu_memh, 247 sca_page_addr(sc, dp) 248 + offsetof(struct sca_desc, sd_chainp), cp); 249 } 250 251 /* 252 * read the buffer pointer 253 */ 254 static inline u_int32_t 255 sca_desc_read_bufp(struct sca_softc *sc, struct sca_desc *dp) 256 { 257 u_int32_t address; 258 259 if (sc->sc_usedma) 260 address = dp->sd_bufp | dp->sd_hbufp << 16; 261 else { 262 address = bus_space_read_2(sc->scu_memt, sc->scu_memh, 263 sca_page_addr(sc, dp) + offsetof(struct sca_desc, sd_bufp)); 264 address |= bus_space_read_1(sc->scu_memt, sc->scu_memh, 265 sca_page_addr(sc, dp) 266 + offsetof(struct sca_desc, sd_hbufp)) << 16; 267 } 268 return (address); 269 } 270 271 /* 272 * write the buffer pointer 273 */ 274 static inline void 275 sca_desc_write_bufp(struct sca_softc *sc, struct sca_desc *dp, u_int32_t bufp) 276 { 277 if (sc->sc_usedma) { 278 dp->sd_bufp = bufp & 0xFFFF; 279 dp->sd_hbufp = (bufp & 0x00FF0000) >> 16; 280 } else { 281 bus_space_write_2(sc->scu_memt, sc->scu_memh, 282 sca_page_addr(sc, dp) + offsetof(struct sca_desc, sd_bufp), 283 bufp & 0xFFFF); 284 bus_space_write_1(sc->scu_memt, sc->scu_memh, 285 sca_page_addr(sc, dp) + offsetof(struct sca_desc, sd_hbufp), 286 (bufp & 0x00FF0000) >> 16); 287 } 288 } 289 290 /* 291 * read the buffer length 292 */ 293 static inline u_int16_t 294 sca_desc_read_buflen(struct sca_softc *sc, struct sca_desc *dp) 295 { 296 if (sc->sc_usedma) 297 return ((dp)->sd_buflen); 298 return (bus_space_read_2(sc->scu_memt, sc->scu_memh, 299 sca_page_addr(sc, dp) + offsetof(struct sca_desc, sd_buflen))); 300 } 301 302 /* 303 * write the buffer length 304 */ 305 static inline void 306 sca_desc_write_buflen(struct sca_softc *sc, struct sca_desc *dp, u_int16_t len) 307 { 308 if (sc->sc_usedma) 309 (dp)->sd_buflen = len; 310 else 311 bus_space_write_2(sc->scu_memt, sc->scu_memh, 312 sca_page_addr(sc, dp) 313 + offsetof(struct sca_desc, sd_buflen), len); 314 } 315 316 /* 317 * read the descriptor status 318 */ 319 static inline u_int8_t 320 sca_desc_read_stat(struct sca_softc *sc, struct sca_desc *dp) 321 { 322 if (sc->sc_usedma) 323 return ((dp)->sd_stat); 324 return (bus_space_read_1(sc->scu_memt, sc->scu_memh, 325 sca_page_addr(sc, dp) + offsetof(struct sca_desc, sd_stat))); 326 } 327 328 /* 329 * write the descriptor status 330 */ 331 static inline void 332 sca_desc_write_stat(struct sca_softc *sc, struct sca_desc *dp, u_int8_t stat) 333 { 334 if (sc->sc_usedma) 335 (dp)->sd_stat = stat; 336 else 337 bus_space_write_1(sc->scu_memt, sc->scu_memh, 338 sca_page_addr(sc, dp) + offsetof(struct sca_desc, sd_stat), 339 stat); 340 } 341 342 void 343 sca_init(struct sca_softc *sc) 344 { 345 /* 346 * Do a little sanity check: check number of ports. 347 */ 348 if (sc->sc_numports < 1 || sc->sc_numports > 2) 349 panic("sca can\'t handle more than 2 or less than 1 ports"); 350 351 /* 352 * disable DMA and MSCI interrupts 353 */ 354 sca_write_1(sc, SCA_DMER, 0); 355 sca_write_1(sc, SCA_IER0, 0); 356 sca_write_1(sc, SCA_IER1, 0); 357 sca_write_1(sc, SCA_IER2, 0); 358 359 /* 360 * configure interrupt system 361 */ 362 sca_write_1(sc, SCA_ITCR, 363 SCA_ITCR_INTR_PRI_MSCI | SCA_ITCR_ACK_NONE | SCA_ITCR_VOUT_IVR); 364 #if 0 365 /* these are for the intrerrupt ack cycle which we don't use */ 366 sca_write_1(sc, SCA_IVR, 0x40); 367 sca_write_1(sc, SCA_IMVR, 0x40); 368 #endif 369 370 /* 371 * set wait control register to zero wait states 372 */ 373 sca_write_1(sc, SCA_PABR0, 0); 374 sca_write_1(sc, SCA_PABR1, 0); 375 sca_write_1(sc, SCA_WCRL, 0); 376 sca_write_1(sc, SCA_WCRM, 0); 377 sca_write_1(sc, SCA_WCRH, 0); 378 379 /* 380 * disable DMA and reset status 381 */ 382 sca_write_1(sc, SCA_PCR, SCA_PCR_PR2); 383 384 /* 385 * disable transmit DMA for all channels 386 */ 387 sca_write_1(sc, SCA_DSR0 + SCA_DMAC_OFF_0, 0); 388 sca_write_1(sc, SCA_DCR0 + SCA_DMAC_OFF_0, SCA_DCR_ABRT); 389 sca_write_1(sc, SCA_DSR1 + SCA_DMAC_OFF_0, 0); 390 sca_write_1(sc, SCA_DCR1 + SCA_DMAC_OFF_0, SCA_DCR_ABRT); 391 sca_write_1(sc, SCA_DSR0 + SCA_DMAC_OFF_1, 0); 392 sca_write_1(sc, SCA_DCR0 + SCA_DMAC_OFF_1, SCA_DCR_ABRT); 393 sca_write_1(sc, SCA_DSR1 + SCA_DMAC_OFF_1, 0); 394 sca_write_1(sc, SCA_DCR1 + SCA_DMAC_OFF_1, SCA_DCR_ABRT); 395 396 /* 397 * enable DMA based on channel enable flags for each channel 398 */ 399 sca_write_1(sc, SCA_DMER, SCA_DMER_EN); 400 401 /* 402 * Should check to see if the chip is responding, but for now 403 * assume it is. 404 */ 405 } 406 407 /* 408 * initialize the port and attach it to the networking layer 409 */ 410 void 411 sca_port_attach(struct sca_softc *sc, u_int port) 412 { 413 struct timeval now; 414 sca_port_t *scp = &sc->sc_ports[port]; 415 struct ifnet *ifp; 416 static u_int ntwo_unit = 0; 417 418 scp->sca = sc; /* point back to the parent */ 419 420 scp->sp_port = port; 421 422 if (port == 0) { 423 scp->msci_off = SCA_MSCI_OFF_0; 424 scp->dmac_off = SCA_DMAC_OFF_0; 425 if(sc->sc_parent != NULL) 426 ntwo_unit = device_unit(sc->sc_parent) * 2 + 0; 427 else 428 ntwo_unit = 0; /* XXX */ 429 } else { 430 scp->msci_off = SCA_MSCI_OFF_1; 431 scp->dmac_off = SCA_DMAC_OFF_1; 432 if(sc->sc_parent != NULL) 433 ntwo_unit = device_unit(sc->sc_parent) * 2 + 1; 434 else 435 ntwo_unit = 1; /* XXX */ 436 } 437 438 sca_msci_init(sc, scp); 439 sca_dmac_init(sc, scp); 440 441 /* 442 * attach to the network layer 443 */ 444 ifp = &scp->sp_if; 445 snprintf(ifp->if_xname, sizeof(ifp->if_xname), "ntwo%d", ntwo_unit); 446 ifp->if_softc = scp; 447 ifp->if_mtu = SCA_MTU; 448 ifp->if_flags = IFF_POINTOPOINT | IFF_MULTICAST; 449 ifp->if_type = IFT_PTPSERIAL; 450 ifp->if_hdrlen = HDLC_HDRLEN; 451 ifp->if_ioctl = sca_ioctl; 452 ifp->if_output = sca_output; 453 ifp->if_watchdog = sca_watchdog; 454 ifp->if_snd.ifq_maxlen = IFQ_MAXLEN; 455 scp->linkq.ifq_maxlen = 5; /* if we exceed this we are hosed already */ 456 #ifdef SCA_USE_FASTQ 457 scp->fastq.ifq_maxlen = IFQ_MAXLEN; 458 #endif 459 IFQ_SET_READY(&ifp->if_snd); 460 if_attach(ifp); 461 if_alloc_sadl(ifp); 462 463 bpf_ops->bpf_attach(ifp, DLT_HDLC, HDLC_HDRLEN, &ifp->if_bpf); 464 465 if (sc->sc_parent == NULL) 466 printf("%s: port %d\n", ifp->if_xname, port); 467 else 468 printf("%s at %s port %d\n", 469 ifp->if_xname, device_xname(sc->sc_parent), port); 470 471 /* 472 * reset the last seen times on the cisco keepalive protocol 473 */ 474 getmicrotime(&now); 475 scp->cka_lasttx = now.tv_usec; 476 scp->cka_lastrx = 0; 477 } 478 479 #if 0 480 /* 481 * returns log2(div), sets 'tmc' for the required freq 'hz' 482 */ 483 static u_int8_t 484 sca_msci_get_baud_rate_values(u_int32_t hz, u_int8_t *tmcp) 485 { 486 u_int32_t tmc, div; 487 u_int32_t clock; 488 489 /* clock hz = (chipclock / tmc) / 2^(div); */ 490 /* 491 * TD == tmc * 2^(n) 492 * 493 * note: 494 * 1 <= TD <= 256 TD is inc of 1 495 * 2 <= TD <= 512 TD is inc of 2 496 * 4 <= TD <= 1024 TD is inc of 4 497 * ... 498 * 512 <= TD <= 256*512 TD is inc of 512 499 * 500 * so note there are overlaps. We lose prec 501 * as div increases so we wish to minize div. 502 * 503 * basically we want to do 504 * 505 * tmc = chip / hz, but have tmc <= 256 506 */ 507 508 /* assume system clock is 9.8304MHz or 9830400Hz */ 509 clock = clock = 9830400 >> 1; 510 511 /* round down */ 512 div = 0; 513 while ((tmc = clock / hz) > 256 || (tmc == 256 && (clock / tmc) > hz)) { 514 clock >>= 1; 515 div++; 516 } 517 if (clock / tmc > hz) 518 tmc++; 519 if (!tmc) 520 tmc = 1; 521 522 if (div > SCA_RXS_DIV_512) { 523 /* set to maximums */ 524 div = SCA_RXS_DIV_512; 525 tmc = 0; 526 } 527 528 *tmcp = (tmc & 0xFF); /* 0 == 256 */ 529 return (div & 0xFF); 530 } 531 #endif 532 533 /* 534 * initialize the port's MSCI 535 */ 536 static void 537 sca_msci_init(struct sca_softc *sc, sca_port_t *scp) 538 { 539 /* reset the channel */ 540 msci_write_1(scp, SCA_CMD0, SCA_CMD_RESET); 541 542 msci_write_1(scp, SCA_MD00, 543 ( SCA_MD0_CRC_1 544 | SCA_MD0_CRC_CCITT 545 | SCA_MD0_CRC_ENABLE 546 | SCA_MD0_MODE_HDLC)); 547 #if 0 548 /* immediately send receive reset so the above takes */ 549 msci_write_1(scp, SCA_CMD0, SCA_CMD_RXRESET); 550 #endif 551 552 msci_write_1(scp, SCA_MD10, SCA_MD1_NOADDRCHK); 553 msci_write_1(scp, SCA_MD20, 554 (SCA_MD2_DUPLEX | SCA_MD2_ADPLLx8 | SCA_MD2_NRZ)); 555 556 /* be safe and do it again */ 557 msci_write_1(scp, SCA_CMD0, SCA_CMD_RXRESET); 558 559 /* setup underrun and idle control, and initial RTS state */ 560 msci_write_1(scp, SCA_CTL0, 561 (SCA_CTL_IDLC_PATTERN 562 | SCA_CTL_UDRNC_AFTER_FCS 563 | SCA_CTL_RTS_LOW)); 564 565 /* reset the transmitter */ 566 msci_write_1(scp, SCA_CMD0, SCA_CMD_TXRESET); 567 568 /* 569 * set the clock sources 570 */ 571 msci_write_1(scp, SCA_RXS0, scp->sp_rxs); 572 msci_write_1(scp, SCA_TXS0, scp->sp_txs); 573 msci_write_1(scp, SCA_TMC0, scp->sp_tmc); 574 575 /* set external clock generate as requested */ 576 sc->sc_clock_callback(sc->sc_aux, scp->sp_port, scp->sp_eclock); 577 578 /* 579 * XXX don't pay attention to CTS or CD changes right now. I can't 580 * simulate one, and the transmitter will try to transmit even if 581 * CD isn't there anyway, so nothing bad SHOULD happen. 582 */ 583 #if 0 584 msci_write_1(scp, SCA_IE00, 0); 585 msci_write_1(scp, SCA_IE10, 0); /* 0x0c == CD and CTS changes only */ 586 #else 587 /* this would deliver transmitter underrun to ST1/ISR1 */ 588 msci_write_1(scp, SCA_IE10, SCA_ST1_UDRN); 589 msci_write_1(scp, SCA_IE00, SCA_ST0_TXINT); 590 #endif 591 msci_write_1(scp, SCA_IE20, 0); 592 593 msci_write_1(scp, SCA_FIE0, 0); 594 595 msci_write_1(scp, SCA_SA00, 0); 596 msci_write_1(scp, SCA_SA10, 0); 597 598 msci_write_1(scp, SCA_IDL0, 0x7e); 599 600 msci_write_1(scp, SCA_RRC0, 0x0e); 601 /* msci_write_1(scp, SCA_TRC00, 0x10); */ 602 /* 603 * the correct values here are important for avoiding underruns 604 * for any value less than or equal to TRC0 txrdy is activated 605 * which will start the dmac transfer to the fifo. 606 * for buffer size >= TRC1 + 1 txrdy is cleared which will stop DMA. 607 * 608 * thus if we are using a very fast clock that empties the fifo 609 * quickly, delays in the dmac starting to fill the fifo can 610 * lead to underruns so we want a fairly full fifo to still 611 * cause the dmac to start. for cards with on board ram this 612 * has no effect on system performance. For cards that DMA 613 * to/from system memory it will cause more, shorter, 614 * bus accesses rather than fewer longer ones. 615 */ 616 msci_write_1(scp, SCA_TRC00, 0x00); 617 msci_write_1(scp, SCA_TRC10, 0x1f); 618 } 619 620 /* 621 * Take the memory for the port and construct two circular linked lists of 622 * descriptors (one tx, one rx) and set the pointers in these descriptors 623 * to point to the buffer space for this port. 624 */ 625 static void 626 sca_dmac_init(struct sca_softc *sc, sca_port_t *scp) 627 { 628 sca_desc_t *desc; 629 u_int32_t desc_p; 630 u_int32_t buf_p; 631 int i; 632 633 if (sc->sc_usedma) 634 bus_dmamap_sync(sc->scu_dmat, sc->scu_dmam, 0, sc->scu_allocsize, 635 BUS_DMASYNC_PREWRITE); 636 else { 637 /* 638 * XXX assumes that all tx desc and bufs in same page 639 */ 640 sc->scu_page_on(sc); 641 sc->scu_set_page(sc, scp->sp_txdesc_p); 642 } 643 644 desc = scp->sp_txdesc; 645 desc_p = scp->sp_txdesc_p; 646 buf_p = scp->sp_txbuf_p; 647 scp->sp_txcur = 0; 648 scp->sp_txinuse = 0; 649 650 #ifdef DEBUG 651 /* make sure that we won't wrap */ 652 if ((desc_p & 0xffff0000) != 653 ((desc_p + sizeof(*desc) * scp->sp_ntxdesc) & 0xffff0000)) 654 panic("sca: tx descriptors cross architecural boundary"); 655 if ((buf_p & 0xff000000) != 656 ((buf_p + SCA_BSIZE * scp->sp_ntxdesc) & 0xff000000)) 657 panic("sca: tx buffers cross architecural boundary"); 658 #endif 659 660 for (i = 0 ; i < scp->sp_ntxdesc ; i++) { 661 /* 662 * desc_p points to the physcial address of the NEXT desc 663 */ 664 desc_p += sizeof(sca_desc_t); 665 666 sca_desc_write_chainp(sc, desc, desc_p & 0x0000ffff); 667 sca_desc_write_bufp(sc, desc, buf_p); 668 sca_desc_write_buflen(sc, desc, SCA_BSIZE); 669 sca_desc_write_stat(sc, desc, 0); 670 671 desc++; /* point to the next descriptor */ 672 buf_p += SCA_BSIZE; 673 } 674 675 /* 676 * "heal" the circular list by making the last entry point to the 677 * first. 678 */ 679 sca_desc_write_chainp(sc, desc - 1, scp->sp_txdesc_p & 0x0000ffff); 680 681 /* 682 * Now, initialize the transmit DMA logic 683 * 684 * CPB == chain pointer base address 685 */ 686 dmac_write_1(scp, SCA_DSR1, 0); 687 dmac_write_1(scp, SCA_DCR1, SCA_DCR_ABRT); 688 dmac_write_1(scp, SCA_DMR1, SCA_DMR_TMOD | SCA_DMR_NF); 689 /* XXX1 690 dmac_write_1(scp, SCA_DIR1, 691 (SCA_DIR_EOT | SCA_DIR_BOF | SCA_DIR_COF)); 692 */ 693 dmac_write_1(scp, SCA_DIR1, 694 (SCA_DIR_EOM | SCA_DIR_EOT | SCA_DIR_BOF | SCA_DIR_COF)); 695 dmac_write_1(scp, SCA_CPB1, 696 (u_int8_t)((scp->sp_txdesc_p & 0x00ff0000) >> 16)); 697 698 /* 699 * now, do the same thing for receive descriptors 700 * 701 * XXX assumes that all rx desc and bufs in same page 702 */ 703 if (!sc->sc_usedma) 704 sc->scu_set_page(sc, scp->sp_rxdesc_p); 705 706 desc = scp->sp_rxdesc; 707 desc_p = scp->sp_rxdesc_p; 708 buf_p = scp->sp_rxbuf_p; 709 710 #ifdef DEBUG 711 /* make sure that we won't wrap */ 712 if ((desc_p & 0xffff0000) != 713 ((desc_p + sizeof(*desc) * scp->sp_nrxdesc) & 0xffff0000)) 714 panic("sca: rx descriptors cross architecural boundary"); 715 if ((buf_p & 0xff000000) != 716 ((buf_p + SCA_BSIZE * scp->sp_nrxdesc) & 0xff000000)) 717 panic("sca: rx buffers cross architecural boundary"); 718 #endif 719 720 for (i = 0 ; i < scp->sp_nrxdesc; i++) { 721 /* 722 * desc_p points to the physcial address of the NEXT desc 723 */ 724 desc_p += sizeof(sca_desc_t); 725 726 sca_desc_write_chainp(sc, desc, desc_p & 0x0000ffff); 727 sca_desc_write_bufp(sc, desc, buf_p); 728 /* sca_desc_write_buflen(sc, desc, SCA_BSIZE); */ 729 sca_desc_write_buflen(sc, desc, 0); 730 sca_desc_write_stat(sc, desc, 0); 731 732 desc++; /* point to the next descriptor */ 733 buf_p += SCA_BSIZE; 734 } 735 736 /* 737 * "heal" the circular list by making the last entry point to the 738 * first. 739 */ 740 sca_desc_write_chainp(sc, desc - 1, scp->sp_rxdesc_p & 0x0000ffff); 741 742 sca_dmac_rxinit(scp); 743 744 if (sc->sc_usedma) 745 bus_dmamap_sync(sc->scu_dmat, sc->scu_dmam, 746 0, sc->scu_allocsize, BUS_DMASYNC_POSTWRITE); 747 else 748 sc->scu_page_off(sc); 749 } 750 751 /* 752 * reset and reinitialize the receive DMA logic 753 */ 754 static void 755 sca_dmac_rxinit(sca_port_t *scp) 756 { 757 /* 758 * ... and the receive DMA logic ... 759 */ 760 dmac_write_1(scp, SCA_DSR0, 0); /* disable DMA */ 761 dmac_write_1(scp, SCA_DCR0, SCA_DCR_ABRT); 762 763 dmac_write_1(scp, SCA_DMR0, SCA_DMR_TMOD | SCA_DMR_NF); 764 dmac_write_2(scp, SCA_BFLL0, SCA_BSIZE); 765 766 /* reset descriptors to initial state */ 767 scp->sp_rxstart = 0; 768 scp->sp_rxend = scp->sp_nrxdesc - 1; 769 770 /* 771 * CPB == chain pointer base 772 * CDA == current descriptor address 773 * EDA == error descriptor address (overwrite position) 774 * because cda can't be eda when starting we always 775 * have a single buffer gap between cda and eda 776 */ 777 dmac_write_1(scp, SCA_CPB0, 778 (u_int8_t)((scp->sp_rxdesc_p & 0x00ff0000) >> 16)); 779 dmac_write_2(scp, SCA_CDAL0, (u_int16_t)(scp->sp_rxdesc_p & 0xffff)); 780 dmac_write_2(scp, SCA_EDAL0, (u_int16_t) 781 (scp->sp_rxdesc_p + (sizeof(sca_desc_t) * scp->sp_rxend))); 782 783 /* 784 * enable receiver DMA 785 */ 786 dmac_write_1(scp, SCA_DIR0, 787 (SCA_DIR_EOT | SCA_DIR_EOM | SCA_DIR_BOF | SCA_DIR_COF)); 788 dmac_write_1(scp, SCA_DSR0, SCA_DSR_DE); 789 } 790 791 /* 792 * Queue the packet for our start routine to transmit 793 */ 794 static int 795 sca_output( 796 struct ifnet *ifp, 797 struct mbuf *m, 798 const struct sockaddr *dst, 799 struct rtentry *rt0) 800 { 801 #ifdef ISO 802 struct hdlc_llc_header *llc; 803 #endif 804 struct hdlc_header *hdlc; 805 struct ifqueue *ifq = NULL; 806 int s, error, len; 807 short mflags; 808 ALTQ_DECL(struct altq_pktattr pktattr;) 809 810 error = 0; 811 812 if ((ifp->if_flags & IFF_UP) != IFF_UP) { 813 error = ENETDOWN; 814 goto bad; 815 } 816 817 /* 818 * If the queueing discipline needs packet classification, 819 * do it before prepending link headers. 820 */ 821 IFQ_CLASSIFY(&ifp->if_snd, m, dst->sa_family, &pktattr); 822 823 /* 824 * determine address family, and priority for this packet 825 */ 826 switch (dst->sa_family) { 827 #ifdef INET 828 case AF_INET: 829 #ifdef SCA_USE_FASTQ 830 if ((mtod(m, struct ip *)->ip_tos & IPTOS_LOWDELAY) 831 == IPTOS_LOWDELAY) 832 ifq = &((sca_port_t *)ifp->if_softc)->fastq; 833 #endif 834 /* 835 * Add cisco serial line header. If there is no 836 * space in the first mbuf, allocate another. 837 */ 838 M_PREPEND(m, sizeof(struct hdlc_header), M_DONTWAIT); 839 if (m == 0) 840 return (ENOBUFS); 841 hdlc = mtod(m, struct hdlc_header *); 842 hdlc->h_proto = htons(HDLC_PROTOCOL_IP); 843 break; 844 #endif 845 #ifdef INET6 846 case AF_INET6: 847 /* 848 * Add cisco serial line header. If there is no 849 * space in the first mbuf, allocate another. 850 */ 851 M_PREPEND(m, sizeof(struct hdlc_header), M_DONTWAIT); 852 if (m == 0) 853 return (ENOBUFS); 854 hdlc = mtod(m, struct hdlc_header *); 855 hdlc->h_proto = htons(HDLC_PROTOCOL_IPV6); 856 break; 857 #endif 858 #ifdef ISO 859 case AF_ISO: 860 /* 861 * Add cisco llc serial line header. If there is no 862 * space in the first mbuf, allocate another. 863 */ 864 M_PREPEND(m, sizeof(struct hdlc_llc_header), M_DONTWAIT); 865 if (m == 0) 866 return (ENOBUFS); 867 hdlc = mtod(m, struct hdlc_header *); 868 llc = mtod(m, struct hdlc_llc_header *); 869 llc->hl_dsap = llc->hl_ssap = LLC_ISO_LSAP; 870 llc->hl_ffb = 0; 871 break; 872 #endif 873 default: 874 printf("%s: address family %d unsupported\n", 875 ifp->if_xname, dst->sa_family); 876 error = EAFNOSUPPORT; 877 goto bad; 878 } 879 880 /* finish */ 881 if ((m->m_flags & (M_BCAST | M_MCAST)) != 0) 882 hdlc->h_addr = CISCO_MULTICAST; 883 else 884 hdlc->h_addr = CISCO_UNICAST; 885 hdlc->h_resv = 0; 886 887 /* 888 * queue the packet. If interactive, use the fast queue. 889 */ 890 mflags = m->m_flags; 891 len = m->m_pkthdr.len; 892 s = splnet(); 893 if (ifq != NULL) { 894 if (IF_QFULL(ifq)) { 895 IF_DROP(ifq); 896 m_freem(m); 897 error = ENOBUFS; 898 } else 899 IF_ENQUEUE(ifq, m); 900 } else 901 IFQ_ENQUEUE(&ifp->if_snd, m, &pktattr, error); 902 if (error != 0) { 903 splx(s); 904 ifp->if_oerrors++; 905 ifp->if_collisions++; 906 return (error); 907 } 908 ifp->if_obytes += len; 909 if (mflags & M_MCAST) 910 ifp->if_omcasts++; 911 912 sca_start(ifp); 913 splx(s); 914 915 return (error); 916 917 bad: 918 if (m) 919 m_freem(m); 920 return (error); 921 } 922 923 static int 924 sca_ioctl(struct ifnet *ifp, u_long cmd, void *data) 925 { 926 struct ifreq *ifr; 927 struct ifaddr *ifa; 928 int error; 929 int s; 930 931 s = splnet(); 932 933 ifr = (struct ifreq *)data; 934 ifa = (struct ifaddr *)data; 935 error = 0; 936 937 switch (cmd) { 938 case SIOCINITIFADDR: 939 switch(ifa->ifa_addr->sa_family) { 940 #ifdef INET 941 case AF_INET: 942 #endif 943 #ifdef INET6 944 case AF_INET6: 945 #endif 946 #if defined(INET) || defined(INET6) 947 ifp->if_flags |= IFF_UP; 948 sca_port_up(ifp->if_softc); 949 break; 950 #endif 951 default: 952 error = EAFNOSUPPORT; 953 break; 954 } 955 break; 956 957 case SIOCSIFDSTADDR: 958 #ifdef INET 959 if (ifa->ifa_addr->sa_family == AF_INET) 960 break; 961 #endif 962 #ifdef INET6 963 if (ifa->ifa_addr->sa_family == AF_INET6) 964 break; 965 #endif 966 error = EAFNOSUPPORT; 967 break; 968 969 case SIOCADDMULTI: 970 case SIOCDELMULTI: 971 /* XXX need multicast group management code */ 972 if (ifr == 0) { 973 error = EAFNOSUPPORT; /* XXX */ 974 break; 975 } 976 switch (ifreq_getaddr(cmd, ifr)->sa_family) { 977 #ifdef INET 978 case AF_INET: 979 break; 980 #endif 981 #ifdef INET6 982 case AF_INET6: 983 break; 984 #endif 985 default: 986 error = EAFNOSUPPORT; 987 break; 988 } 989 break; 990 991 case SIOCSIFFLAGS: 992 if ((error = ifioctl_common(ifp, cmd, data)) != 0) 993 break; 994 if (ifr->ifr_flags & IFF_UP) { 995 ifp->if_flags |= IFF_UP; 996 sca_port_up(ifp->if_softc); 997 } else { 998 ifp->if_flags &= ~IFF_UP; 999 sca_port_down(ifp->if_softc); 1000 } 1001 1002 break; 1003 1004 default: 1005 error = ifioctl_common(ifp, cmd, data); 1006 } 1007 1008 splx(s); 1009 return error; 1010 } 1011 1012 /* 1013 * start packet transmission on the interface 1014 * 1015 * MUST BE CALLED AT splnet() 1016 */ 1017 static void 1018 sca_start(struct ifnet *ifp) 1019 { 1020 sca_port_t *scp = ifp->if_softc; 1021 struct sca_softc *sc = scp->sca; 1022 struct mbuf *m, *mb_head; 1023 sca_desc_t *desc; 1024 u_int8_t *buf, stat; 1025 u_int32_t buf_p; 1026 int nexttx; 1027 int trigger_xmit; 1028 u_int len; 1029 1030 SCA_DPRINTF(SCA_DEBUG_TX, ("TX: enter start\n")); 1031 1032 /* 1033 * can't queue when we are full or transmitter is busy 1034 */ 1035 #ifdef oldcode 1036 if ((scp->sp_txinuse >= (scp->sp_ntxdesc - 1)) 1037 || ((ifp->if_flags & IFF_OACTIVE) == IFF_OACTIVE)) 1038 return; 1039 #else 1040 if (scp->sp_txinuse 1041 || ((ifp->if_flags & IFF_OACTIVE) == IFF_OACTIVE)) 1042 return; 1043 #endif 1044 SCA_DPRINTF(SCA_DEBUG_TX, ("TX: txinuse %d\n", scp->sp_txinuse)); 1045 1046 /* 1047 * XXX assume that all tx desc and bufs in same page 1048 */ 1049 if (sc->sc_usedma) 1050 bus_dmamap_sync(sc->scu_dmat, sc->scu_dmam, 1051 0, sc->scu_allocsize, 1052 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1053 else { 1054 sc->scu_page_on(sc); 1055 sc->scu_set_page(sc, scp->sp_txdesc_p); 1056 } 1057 1058 trigger_xmit = 0; 1059 1060 txloop: 1061 IF_DEQUEUE(&scp->linkq, mb_head); 1062 if (mb_head == NULL) 1063 #ifdef SCA_USE_FASTQ 1064 IF_DEQUEUE(&scp->fastq, mb_head); 1065 if (mb_head == NULL) 1066 #endif 1067 IFQ_DEQUEUE(&ifp->if_snd, mb_head); 1068 if (mb_head == NULL) 1069 goto start_xmit; 1070 1071 SCA_DPRINTF(SCA_DEBUG_TX, ("TX: got mbuf\n")); 1072 #ifdef oldcode 1073 if (scp->txinuse != 0) { 1074 /* Kill EOT interrupts on the previous descriptor. */ 1075 desc = &scp->sp_txdesc[scp->txcur]; 1076 stat = sca_desc_read_stat(sc, desc); 1077 sca_desc_write_stat(sc, desc, stat & ~SCA_DESC_EOT); 1078 1079 /* Figure out what the next free descriptor is. */ 1080 nexttx = (scp->sp_txcur + 1) % scp->sp_ntxdesc; 1081 } else 1082 nexttx = 0; 1083 #endif /* oldcode */ 1084 1085 if (scp->sp_txinuse) 1086 nexttx = (scp->sp_txcur + 1) % scp->sp_ntxdesc; 1087 else 1088 nexttx = 0; 1089 1090 SCA_DPRINTF(SCA_DEBUG_TX, ("TX: nexttx %d\n", nexttx)); 1091 1092 buf = scp->sp_txbuf + SCA_BSIZE * nexttx; 1093 buf_p = scp->sp_txbuf_p + SCA_BSIZE * nexttx; 1094 1095 /* XXX hoping we can delay the desc write till after we don't drop. */ 1096 desc = &scp->sp_txdesc[nexttx]; 1097 1098 /* XXX isn't this set already?? */ 1099 sca_desc_write_bufp(sc, desc, buf_p); 1100 len = 0; 1101 1102 SCA_DPRINTF(SCA_DEBUG_TX, ("TX: buf %x buf_p %x\n", (u_int)buf, buf_p)); 1103 1104 #if 0 /* uncomment this for a core in cc1 */ 1105 X 1106 #endif 1107 /* 1108 * Run through the chain, copying data into the descriptor as we 1109 * go. If it won't fit in one transmission block, drop the packet. 1110 * No, this isn't nice, but most of the time it _will_ fit. 1111 */ 1112 for (m = mb_head ; m != NULL ; m = m->m_next) { 1113 if (m->m_len != 0) { 1114 len += m->m_len; 1115 if (len > SCA_BSIZE) { 1116 m_freem(mb_head); 1117 goto txloop; 1118 } 1119 SCA_DPRINTF(SCA_DEBUG_TX, 1120 ("TX: about to mbuf len %d\n", m->m_len)); 1121 1122 if (sc->sc_usedma) 1123 memcpy(buf, mtod(m, u_int8_t *), m->m_len); 1124 else 1125 bus_space_write_region_1(sc->scu_memt, 1126 sc->scu_memh, sca_page_addr(sc, buf_p), 1127 mtod(m, u_int8_t *), m->m_len); 1128 buf += m->m_len; 1129 buf_p += m->m_len; 1130 } 1131 } 1132 1133 /* set the buffer, the length, and mark end of frame and end of xfer */ 1134 sca_desc_write_buflen(sc, desc, len); 1135 sca_desc_write_stat(sc, desc, SCA_DESC_EOM); 1136 1137 ifp->if_opackets++; 1138 1139 /* 1140 * Pass packet to bpf if there is a listener. 1141 */ 1142 if (ifp->if_bpf) 1143 bpf_ops->bpf_mtap(ifp->if_bpf, mb_head); 1144 1145 m_freem(mb_head); 1146 1147 scp->sp_txcur = nexttx; 1148 scp->sp_txinuse++; 1149 trigger_xmit = 1; 1150 1151 SCA_DPRINTF(SCA_DEBUG_TX, 1152 ("TX: inuse %d index %d\n", scp->sp_txinuse, scp->sp_txcur)); 1153 1154 /* 1155 * XXX so didn't this used to limit us to 1?! - multi may be untested 1156 * sp_ntxdesc used to be hard coded to 2 with claim of a too hard 1157 * to find bug 1158 */ 1159 #ifdef oldcode 1160 if (scp->sp_txinuse < (scp->sp_ntxdesc - 1)) 1161 #endif 1162 if (scp->sp_txinuse < scp->sp_ntxdesc) 1163 goto txloop; 1164 1165 start_xmit: 1166 SCA_DPRINTF(SCA_DEBUG_TX, ("TX: trigger_xmit %d\n", trigger_xmit)); 1167 1168 if (trigger_xmit != 0) { 1169 /* set EOT on final descriptor */ 1170 desc = &scp->sp_txdesc[scp->sp_txcur]; 1171 stat = sca_desc_read_stat(sc, desc); 1172 sca_desc_write_stat(sc, desc, stat | SCA_DESC_EOT); 1173 } 1174 1175 if (sc->sc_usedma) 1176 bus_dmamap_sync(sc->scu_dmat, sc->scu_dmam, 0, 1177 sc->scu_allocsize, 1178 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1179 1180 if (trigger_xmit != 0) 1181 sca_port_starttx(scp); 1182 1183 if (!sc->sc_usedma) 1184 sc->scu_page_off(sc); 1185 } 1186 1187 static void 1188 sca_watchdog(struct ifnet *ifp) 1189 { 1190 } 1191 1192 int 1193 sca_hardintr(struct sca_softc *sc) 1194 { 1195 u_int8_t isr0, isr1, isr2; 1196 int ret; 1197 1198 ret = 0; /* non-zero means we processed at least one interrupt */ 1199 1200 SCA_DPRINTF(SCA_DEBUG_INTR, ("sca_hardintr entered\n")); 1201 1202 while (1) { 1203 /* 1204 * read SCA interrupts 1205 */ 1206 isr0 = sca_read_1(sc, SCA_ISR0); 1207 isr1 = sca_read_1(sc, SCA_ISR1); 1208 isr2 = sca_read_1(sc, SCA_ISR2); 1209 1210 if (isr0 == 0 && isr1 == 0 && isr2 == 0) 1211 break; 1212 1213 SCA_DPRINTF(SCA_DEBUG_INTR, 1214 ("isr0 = %02x, isr1 = %02x, isr2 = %02x\n", 1215 isr0, isr1, isr2)); 1216 1217 /* 1218 * check DMAC interrupt 1219 */ 1220 if (isr1 & 0x0f) 1221 ret += sca_dmac_intr(&sc->sc_ports[0], 1222 isr1 & 0x0f); 1223 1224 if (isr1 & 0xf0) 1225 ret += sca_dmac_intr(&sc->sc_ports[1], 1226 (isr1 & 0xf0) >> 4); 1227 1228 /* 1229 * mcsi intterupts 1230 */ 1231 if (isr0 & 0x0f) 1232 ret += sca_msci_intr(&sc->sc_ports[0], isr0 & 0x0f); 1233 1234 if (isr0 & 0xf0) 1235 ret += sca_msci_intr(&sc->sc_ports[1], 1236 (isr0 & 0xf0) >> 4); 1237 1238 #if 0 /* We don't GET timer interrupts, we have them disabled (msci IE20) */ 1239 if (isr2) 1240 ret += sca_timer_intr(sc, isr2); 1241 #endif 1242 } 1243 1244 return (ret); 1245 } 1246 1247 static int 1248 sca_dmac_intr(sca_port_t *scp, u_int8_t isr) 1249 { 1250 u_int8_t dsr; 1251 int ret; 1252 1253 ret = 0; 1254 1255 /* 1256 * Check transmit channel 1257 */ 1258 if (isr & (SCA_ISR1_DMAC_TX0A | SCA_ISR1_DMAC_TX0B)) { 1259 SCA_DPRINTF(SCA_DEBUG_INTR, 1260 ("TX INTERRUPT port %d\n", scp->sp_port)); 1261 1262 dsr = 1; 1263 while (dsr != 0) { 1264 ret++; 1265 /* 1266 * reset interrupt 1267 */ 1268 dsr = dmac_read_1(scp, SCA_DSR1); 1269 dmac_write_1(scp, SCA_DSR1, 1270 dsr | SCA_DSR_DEWD); 1271 1272 /* 1273 * filter out the bits we don't care about 1274 */ 1275 dsr &= ( SCA_DSR_COF | SCA_DSR_BOF | SCA_DSR_EOT); 1276 if (dsr == 0) 1277 break; 1278 1279 /* 1280 * check for counter overflow 1281 */ 1282 if (dsr & SCA_DSR_COF) { 1283 printf("%s: TXDMA counter overflow\n", 1284 scp->sp_if.if_xname); 1285 1286 scp->sp_if.if_flags &= ~IFF_OACTIVE; 1287 scp->sp_txcur = 0; 1288 scp->sp_txinuse = 0; 1289 } 1290 1291 /* 1292 * check for buffer overflow 1293 */ 1294 if (dsr & SCA_DSR_BOF) { 1295 printf("%s: TXDMA buffer overflow, cda 0x%04x, eda 0x%04x, cpb 0x%02x\n", 1296 scp->sp_if.if_xname, 1297 dmac_read_2(scp, SCA_CDAL1), 1298 dmac_read_2(scp, SCA_EDAL1), 1299 dmac_read_1(scp, SCA_CPB1)); 1300 1301 /* 1302 * Yikes. Arrange for a full 1303 * transmitter restart. 1304 */ 1305 scp->sp_if.if_flags &= ~IFF_OACTIVE; 1306 scp->sp_txcur = 0; 1307 scp->sp_txinuse = 0; 1308 } 1309 1310 /* 1311 * check for end of transfer, which is not 1312 * an error. It means that all data queued 1313 * was transmitted, and we mark ourself as 1314 * not in use and stop the watchdog timer. 1315 */ 1316 if (dsr & SCA_DSR_EOT) { 1317 SCA_DPRINTF(SCA_DEBUG_TX, 1318 ("Transmit completed. cda %x eda %x dsr %x\n", 1319 dmac_read_2(scp, SCA_CDAL1), 1320 dmac_read_2(scp, SCA_EDAL1), 1321 dsr)); 1322 1323 scp->sp_if.if_flags &= ~IFF_OACTIVE; 1324 scp->sp_txcur = 0; 1325 scp->sp_txinuse = 0; 1326 1327 /* 1328 * check for more packets 1329 */ 1330 sca_start(&scp->sp_if); 1331 } 1332 } 1333 } 1334 /* 1335 * receive channel check 1336 */ 1337 if (isr & (SCA_ISR1_DMAC_RX0A | SCA_ISR1_DMAC_RX0B)) { 1338 SCA_DPRINTF(SCA_DEBUG_INTR, ("RX INTERRUPT port %d\n", 1339 (scp == &scp->sca->sc_ports[0] ? 0 : 1))); 1340 1341 dsr = 1; 1342 while (dsr != 0) { 1343 ret++; 1344 1345 dsr = dmac_read_1(scp, SCA_DSR0); 1346 dmac_write_1(scp, SCA_DSR0, dsr | SCA_DSR_DEWD); 1347 1348 /* 1349 * filter out the bits we don't care about 1350 */ 1351 dsr &= (SCA_DSR_EOM | SCA_DSR_COF 1352 | SCA_DSR_BOF | SCA_DSR_EOT); 1353 if (dsr == 0) 1354 break; 1355 1356 /* 1357 * End of frame 1358 */ 1359 if (dsr & SCA_DSR_EOM) { 1360 SCA_DPRINTF(SCA_DEBUG_RX, ("Got a frame!\n")); 1361 1362 sca_get_packets(scp); 1363 } 1364 1365 /* 1366 * check for counter overflow 1367 */ 1368 if (dsr & SCA_DSR_COF) { 1369 printf("%s: RXDMA counter overflow\n", 1370 scp->sp_if.if_xname); 1371 1372 sca_dmac_rxinit(scp); 1373 } 1374 1375 /* 1376 * check for end of transfer, which means we 1377 * ran out of descriptors to receive into. 1378 * This means the line is much faster than 1379 * we can handle. 1380 */ 1381 if (dsr & (SCA_DSR_BOF | SCA_DSR_EOT)) { 1382 printf("%s: RXDMA buffer overflow\n", 1383 scp->sp_if.if_xname); 1384 1385 sca_dmac_rxinit(scp); 1386 } 1387 } 1388 } 1389 1390 return ret; 1391 } 1392 1393 static int 1394 sca_msci_intr(sca_port_t *scp, u_int8_t isr) 1395 { 1396 u_int8_t st1, trc0; 1397 1398 /* get and clear the specific interrupt -- should act on it :)*/ 1399 if ((st1 = msci_read_1(scp, SCA_ST10))) { 1400 /* clear the interrupt */ 1401 msci_write_1(scp, SCA_ST10, st1); 1402 1403 if (st1 & SCA_ST1_UDRN) { 1404 /* underrun -- try to increase ready control */ 1405 trc0 = msci_read_1(scp, SCA_TRC00); 1406 if (trc0 == 0x1f) 1407 printf("TX: underrun - fifo depth maxed\n"); 1408 else { 1409 if ((trc0 += 2) > 0x1f) 1410 trc0 = 0x1f; 1411 SCA_DPRINTF(SCA_DEBUG_TX, 1412 ("TX: udrn - incr fifo to %d\n", trc0)); 1413 msci_write_1(scp, SCA_TRC00, trc0); 1414 } 1415 } 1416 } 1417 return (0); 1418 } 1419 1420 static void 1421 sca_get_packets(sca_port_t *scp) 1422 { 1423 struct sca_softc *sc; 1424 1425 SCA_DPRINTF(SCA_DEBUG_RX, ("RX: sca_get_packets\n")); 1426 1427 sc = scp->sca; 1428 if (sc->sc_usedma) 1429 bus_dmamap_sync(sc->scu_dmat, sc->scu_dmam, 1430 0, sc->scu_allocsize, 1431 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1432 else { 1433 /* 1434 * XXX this code is unable to deal with rx stuff 1435 * in more than 1 page 1436 */ 1437 sc->scu_page_on(sc); 1438 sc->scu_set_page(sc, scp->sp_rxdesc_p); 1439 } 1440 1441 /* process as many frames as are available */ 1442 while (sca_frame_avail(scp)) { 1443 sca_frame_process(scp); 1444 sca_frame_read_done(scp); 1445 } 1446 1447 if (sc->sc_usedma) 1448 bus_dmamap_sync(sc->scu_dmat, sc->scu_dmam, 1449 0, sc->scu_allocsize, 1450 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1451 else 1452 sc->scu_page_off(sc); 1453 } 1454 1455 /* 1456 * Starting with the first descriptor we wanted to read into, up to but 1457 * not including the current SCA read descriptor, look for a packet. 1458 * 1459 * must be called at splnet() 1460 */ 1461 static int 1462 sca_frame_avail(sca_port_t *scp) 1463 { 1464 u_int16_t cda; 1465 u_int32_t desc_p; /* physical address (lower 16 bits) */ 1466 sca_desc_t *desc; 1467 u_int8_t rxstat; 1468 int cdaidx, toolong; 1469 1470 /* 1471 * Read the current descriptor from the SCA. 1472 */ 1473 cda = dmac_read_2(scp, SCA_CDAL0); 1474 1475 /* 1476 * calculate the index of the current descriptor 1477 */ 1478 desc_p = (scp->sp_rxdesc_p & 0xFFFF); 1479 desc_p = cda - desc_p; 1480 cdaidx = desc_p / sizeof(sca_desc_t); 1481 1482 SCA_DPRINTF(SCA_DEBUG_RX, 1483 ("RX: cda %x desc_p %x cdaidx %u, nrxdesc %d rxstart %d\n", 1484 cda, desc_p, cdaidx, scp->sp_nrxdesc, scp->sp_rxstart)); 1485 1486 /* note confusion */ 1487 if (cdaidx >= scp->sp_nrxdesc) 1488 panic("current descriptor index out of range"); 1489 1490 /* see if we have a valid frame available */ 1491 toolong = 0; 1492 for (; scp->sp_rxstart != cdaidx; sca_frame_read_done(scp)) { 1493 /* 1494 * We might have a valid descriptor. Set up a pointer 1495 * to the kva address for it so we can more easily examine 1496 * the contents. 1497 */ 1498 desc = &scp->sp_rxdesc[scp->sp_rxstart]; 1499 rxstat = sca_desc_read_stat(scp->sca, desc); 1500 1501 SCA_DPRINTF(SCA_DEBUG_RX, ("port %d RX: idx %d rxstat %x\n", 1502 scp->sp_port, scp->sp_rxstart, rxstat)); 1503 1504 SCA_DPRINTF(SCA_DEBUG_RX, ("port %d RX: buflen %d\n", 1505 scp->sp_port, sca_desc_read_buflen(scp->sca, desc))); 1506 1507 /* 1508 * check for errors 1509 */ 1510 if (rxstat & SCA_DESC_ERRORS) { 1511 /* 1512 * consider an error condition the end 1513 * of a frame 1514 */ 1515 scp->sp_if.if_ierrors++; 1516 toolong = 0; 1517 continue; 1518 } 1519 1520 /* 1521 * if we aren't skipping overlong frames 1522 * we are done, otherwise reset and look for 1523 * another good frame 1524 */ 1525 if (rxstat & SCA_DESC_EOM) { 1526 if (!toolong) 1527 return (1); 1528 toolong = 0; 1529 } else if (!toolong) { 1530 /* 1531 * we currently don't deal with frames 1532 * larger than a single buffer (fixed MTU) 1533 */ 1534 scp->sp_if.if_ierrors++; 1535 toolong = 1; 1536 } 1537 SCA_DPRINTF(SCA_DEBUG_RX, ("RX: idx %d no EOM\n", 1538 scp->sp_rxstart)); 1539 } 1540 1541 SCA_DPRINTF(SCA_DEBUG_RX, ("RX: returning none\n")); 1542 return 0; 1543 } 1544 1545 /* 1546 * Pass the packet up to the kernel if it is a packet we want to pay 1547 * attention to. 1548 * 1549 * MUST BE CALLED AT splnet() 1550 */ 1551 static void 1552 sca_frame_process(sca_port_t *scp) 1553 { 1554 struct ifqueue *ifq; 1555 struct hdlc_header *hdlc; 1556 struct cisco_pkt *cisco; 1557 sca_desc_t *desc; 1558 struct mbuf *m; 1559 u_int8_t *bufp; 1560 u_int16_t len; 1561 u_int32_t t; 1562 1563 t = time_uptime * 1000; 1564 desc = &scp->sp_rxdesc[scp->sp_rxstart]; 1565 bufp = scp->sp_rxbuf + SCA_BSIZE * scp->sp_rxstart; 1566 len = sca_desc_read_buflen(scp->sca, desc); 1567 1568 SCA_DPRINTF(SCA_DEBUG_RX, 1569 ("RX: desc %lx bufp %lx len %d\n", (bus_addr_t)desc, 1570 (bus_addr_t)bufp, len)); 1571 1572 #if SCA_DEBUG_LEVEL > 0 1573 if (sca_debug & SCA_DEBUG_RXPKT) 1574 sca_frame_print(scp, desc, bufp); 1575 #endif 1576 /* 1577 * skip packets that are too short 1578 */ 1579 if (len < sizeof(struct hdlc_header)) { 1580 scp->sp_if.if_ierrors++; 1581 return; 1582 } 1583 1584 m = sca_mbuf_alloc(scp->sca, bufp, len); 1585 if (m == NULL) { 1586 SCA_DPRINTF(SCA_DEBUG_RX, ("RX: no mbuf!\n")); 1587 return; 1588 } 1589 1590 /* 1591 * read and then strip off the HDLC information 1592 */ 1593 m = m_pullup(m, sizeof(struct hdlc_header)); 1594 if (m == NULL) { 1595 SCA_DPRINTF(SCA_DEBUG_RX, ("RX: no m_pullup!\n")); 1596 return; 1597 } 1598 1599 if (scp->sp_if.if_bpf) 1600 bpf_ops->bpf_mtap(scp->sp_if.if_bpf, m); 1601 1602 scp->sp_if.if_ipackets++; 1603 1604 hdlc = mtod(m, struct hdlc_header *); 1605 switch (ntohs(hdlc->h_proto)) { 1606 #ifdef INET 1607 case HDLC_PROTOCOL_IP: 1608 SCA_DPRINTF(SCA_DEBUG_RX, ("Received IP packet\n")); 1609 m->m_pkthdr.rcvif = &scp->sp_if; 1610 m->m_pkthdr.len -= sizeof(struct hdlc_header); 1611 m->m_data += sizeof(struct hdlc_header); 1612 m->m_len -= sizeof(struct hdlc_header); 1613 ifq = &ipintrq; 1614 schednetisr(NETISR_IP); 1615 break; 1616 #endif /* INET */ 1617 #ifdef INET6 1618 case HDLC_PROTOCOL_IPV6: 1619 SCA_DPRINTF(SCA_DEBUG_RX, ("Received IP packet\n")); 1620 m->m_pkthdr.rcvif = &scp->sp_if; 1621 m->m_pkthdr.len -= sizeof(struct hdlc_header); 1622 m->m_data += sizeof(struct hdlc_header); 1623 m->m_len -= sizeof(struct hdlc_header); 1624 ifq = &ip6intrq; 1625 schednetisr(NETISR_IPV6); 1626 break; 1627 #endif /* INET6 */ 1628 #ifdef ISO 1629 case HDLC_PROTOCOL_ISO: 1630 if (m->m_pkthdr.len < sizeof(struct hdlc_llc_header)) 1631 goto dropit; 1632 m->m_pkthdr.rcvif = &scp->sp_if; 1633 m->m_pkthdr.len -= sizeof(struct hdlc_llc_header); 1634 m->m_data += sizeof(struct hdlc_llc_header); 1635 m->m_len -= sizeof(struct hdlc_llc_header); 1636 ifq = &clnlintrq; 1637 schednetisr(NETISR_ISO); 1638 break; 1639 #endif /* ISO */ 1640 case CISCO_KEEPALIVE: 1641 SCA_DPRINTF(SCA_DEBUG_CISCO, 1642 ("Received CISCO keepalive packet\n")); 1643 1644 if (len < CISCO_PKT_LEN) { 1645 SCA_DPRINTF(SCA_DEBUG_CISCO, 1646 ("short CISCO packet %d, wanted %d\n", 1647 len, CISCO_PKT_LEN)); 1648 scp->sp_if.if_ierrors++; 1649 goto dropit; 1650 } 1651 1652 m = m_pullup(m, sizeof(struct cisco_pkt)); 1653 if (m == NULL) { 1654 SCA_DPRINTF(SCA_DEBUG_RX, ("RX: no m_pullup!\n")); 1655 return; 1656 } 1657 1658 cisco = (struct cisco_pkt *) 1659 (mtod(m, u_int8_t *) + HDLC_HDRLEN); 1660 m->m_pkthdr.rcvif = &scp->sp_if; 1661 1662 switch (ntohl(cisco->type)) { 1663 case CISCO_ADDR_REQ: 1664 printf("Got CISCO addr_req, ignoring\n"); 1665 scp->sp_if.if_ierrors++; 1666 goto dropit; 1667 1668 case CISCO_ADDR_REPLY: 1669 printf("Got CISCO addr_reply, ignoring\n"); 1670 scp->sp_if.if_ierrors++; 1671 goto dropit; 1672 1673 case CISCO_KEEPALIVE_REQ: 1674 1675 SCA_DPRINTF(SCA_DEBUG_CISCO, 1676 ("Received KA, mseq %d," 1677 " yseq %d, rel 0x%04x, t0" 1678 " %04x, t1 %04x\n", 1679 ntohl(cisco->par1), ntohl(cisco->par2), 1680 ntohs(cisco->rel), ntohs(cisco->time0), 1681 ntohs(cisco->time1))); 1682 1683 scp->cka_lastrx = ntohl(cisco->par1); 1684 scp->cka_lasttx++; 1685 1686 /* 1687 * schedule the transmit right here. 1688 */ 1689 cisco->par2 = cisco->par1; 1690 cisco->par1 = htonl(scp->cka_lasttx); 1691 cisco->time0 = htons((u_int16_t)(t >> 16)); 1692 cisco->time1 = htons((u_int16_t)(t & 0x0000ffff)); 1693 1694 ifq = &scp->linkq; 1695 if (IF_QFULL(ifq)) { 1696 IF_DROP(ifq); 1697 goto dropit; 1698 } 1699 IF_ENQUEUE(ifq, m); 1700 1701 sca_start(&scp->sp_if); 1702 1703 /* since start may have reset this fix */ 1704 if (!scp->sca->sc_usedma) { 1705 scp->sca->scu_set_page(scp->sca, 1706 scp->sp_rxdesc_p); 1707 scp->sca->scu_page_on(scp->sca); 1708 } 1709 return; 1710 default: 1711 SCA_DPRINTF(SCA_DEBUG_CISCO, 1712 ("Unknown CISCO keepalive protocol 0x%04x\n", 1713 ntohl(cisco->type))); 1714 1715 scp->sp_if.if_noproto++; 1716 goto dropit; 1717 } 1718 return; 1719 default: 1720 SCA_DPRINTF(SCA_DEBUG_RX, 1721 ("Unknown/unexpected ethertype 0x%04x\n", 1722 ntohs(hdlc->h_proto))); 1723 scp->sp_if.if_noproto++; 1724 goto dropit; 1725 } 1726 1727 /* queue the packet */ 1728 if (!IF_QFULL(ifq)) { 1729 IF_ENQUEUE(ifq, m); 1730 } else { 1731 IF_DROP(ifq); 1732 scp->sp_if.if_iqdrops++; 1733 goto dropit; 1734 } 1735 return; 1736 dropit: 1737 if (m) 1738 m_freem(m); 1739 return; 1740 } 1741 1742 #if SCA_DEBUG_LEVEL > 0 1743 /* 1744 * do a hex dump of the packet received into descriptor "desc" with 1745 * data buffer "p" 1746 */ 1747 static void 1748 sca_frame_print(sca_port_t *scp, sca_desc_t *desc, u_int8_t *p) 1749 { 1750 int i; 1751 int nothing_yet = 1; 1752 struct sca_softc *sc; 1753 u_int len; 1754 1755 sc = scp->sca; 1756 printf("desc va %p: chainp 0x%x bufp 0x%0x stat 0x%0x len %d\n", 1757 desc, 1758 sca_desc_read_chainp(sc, desc), 1759 sca_desc_read_bufp(sc, desc), 1760 sca_desc_read_stat(sc, desc), 1761 (len = sca_desc_read_buflen(sc, desc))); 1762 1763 for (i = 0 ; i < len && i < 256; i++) { 1764 if (nothing_yet == 1 && 1765 (sc->sc_usedma ? *p 1766 : bus_space_read_1(sc->scu_memt, sc->scu_memh, 1767 sca_page_addr(sc, p))) == 0) { 1768 p++; 1769 continue; 1770 } 1771 nothing_yet = 0; 1772 if (i % 16 == 0) 1773 printf("\n"); 1774 printf("%02x ", 1775 (sc->sc_usedma ? *p 1776 : bus_space_read_1(sc->scu_memt, sc->scu_memh, 1777 sca_page_addr(sc, p)))); 1778 p++; 1779 } 1780 1781 if (i % 16 != 1) 1782 printf("\n"); 1783 } 1784 #endif 1785 1786 /* 1787 * adjust things because we have just read the current starting 1788 * frame 1789 * 1790 * must be called at splnet() 1791 */ 1792 static void 1793 sca_frame_read_done(sca_port_t *scp) 1794 { 1795 u_int16_t edesc_p; 1796 1797 /* update where our indicies are */ 1798 scp->sp_rxend = scp->sp_rxstart; 1799 scp->sp_rxstart = (scp->sp_rxstart + 1) % scp->sp_nrxdesc; 1800 1801 /* update the error [end] descriptor */ 1802 edesc_p = (u_int16_t)scp->sp_rxdesc_p + 1803 (sizeof(sca_desc_t) * scp->sp_rxend); 1804 dmac_write_2(scp, SCA_EDAL0, edesc_p); 1805 } 1806 1807 /* 1808 * set a port to the "up" state 1809 */ 1810 static void 1811 sca_port_up(sca_port_t *scp) 1812 { 1813 struct sca_softc *sc = scp->sca; 1814 struct timeval now; 1815 #if 0 1816 u_int8_t ier0, ier1; 1817 #endif 1818 1819 /* 1820 * reset things 1821 */ 1822 #if 0 1823 msci_write_1(scp, SCA_CMD0, SCA_CMD_TXRESET); 1824 msci_write_1(scp, SCA_CMD0, SCA_CMD_RXRESET); 1825 #endif 1826 /* 1827 * clear in-use flag 1828 */ 1829 scp->sp_if.if_flags &= ~IFF_OACTIVE; 1830 scp->sp_if.if_flags |= IFF_RUNNING; 1831 1832 /* 1833 * raise DTR 1834 */ 1835 sc->sc_dtr_callback(sc->sc_aux, scp->sp_port, 1); 1836 1837 /* 1838 * raise RTS 1839 */ 1840 msci_write_1(scp, SCA_CTL0, 1841 (msci_read_1(scp, SCA_CTL0) & ~SCA_CTL_RTS_MASK) 1842 | SCA_CTL_RTS_HIGH); 1843 1844 #if 0 1845 /* 1846 * enable interrupts (no timer IER2) 1847 */ 1848 ier0 = SCA_IER0_MSCI_RXRDY0 | SCA_IER0_MSCI_TXRDY0 1849 | SCA_IER0_MSCI_RXINT0 | SCA_IER0_MSCI_TXINT0; 1850 ier1 = SCA_IER1_DMAC_RX0A | SCA_IER1_DMAC_RX0B 1851 | SCA_IER1_DMAC_TX0A | SCA_IER1_DMAC_TX0B; 1852 if (scp->sp_port == 1) { 1853 ier0 <<= 4; 1854 ier1 <<= 4; 1855 } 1856 sca_write_1(sc, SCA_IER0, sca_read_1(sc, SCA_IER0) | ier0); 1857 sca_write_1(sc, SCA_IER1, sca_read_1(sc, SCA_IER1) | ier1); 1858 #else 1859 if (scp->sp_port == 0) { 1860 sca_write_1(sc, SCA_IER0, sca_read_1(sc, SCA_IER0) | 0x0f); 1861 sca_write_1(sc, SCA_IER1, sca_read_1(sc, SCA_IER1) | 0x0f); 1862 } else { 1863 sca_write_1(sc, SCA_IER0, sca_read_1(sc, SCA_IER0) | 0xf0); 1864 sca_write_1(sc, SCA_IER1, sca_read_1(sc, SCA_IER1) | 0xf0); 1865 } 1866 #endif 1867 1868 /* 1869 * enable transmit and receive 1870 */ 1871 msci_write_1(scp, SCA_CMD0, SCA_CMD_TXENABLE); 1872 msci_write_1(scp, SCA_CMD0, SCA_CMD_RXENABLE); 1873 1874 /* 1875 * reset internal state 1876 */ 1877 scp->sp_txinuse = 0; 1878 scp->sp_txcur = 0; 1879 getmicrotime(&now); 1880 scp->cka_lasttx = now.tv_usec; 1881 scp->cka_lastrx = 0; 1882 } 1883 1884 /* 1885 * set a port to the "down" state 1886 */ 1887 static void 1888 sca_port_down(sca_port_t *scp) 1889 { 1890 struct sca_softc *sc = scp->sca; 1891 #if 0 1892 u_int8_t ier0, ier1; 1893 #endif 1894 1895 /* 1896 * lower DTR 1897 */ 1898 sc->sc_dtr_callback(sc->sc_aux, scp->sp_port, 0); 1899 1900 /* 1901 * lower RTS 1902 */ 1903 msci_write_1(scp, SCA_CTL0, 1904 (msci_read_1(scp, SCA_CTL0) & ~SCA_CTL_RTS_MASK) 1905 | SCA_CTL_RTS_LOW); 1906 1907 /* 1908 * disable interrupts 1909 */ 1910 #if 0 1911 ier0 = SCA_IER0_MSCI_RXRDY0 | SCA_IER0_MSCI_TXRDY0 1912 | SCA_IER0_MSCI_RXINT0 | SCA_IER0_MSCI_TXINT0; 1913 ier1 = SCA_IER1_DMAC_RX0A | SCA_IER1_DMAC_RX0B 1914 | SCA_IER1_DMAC_TX0A | SCA_IER1_DMAC_TX0B; 1915 if (scp->sp_port == 1) { 1916 ier0 <<= 4; 1917 ier1 <<= 4; 1918 } 1919 sca_write_1(sc, SCA_IER0, sca_read_1(sc, SCA_IER0) & ~ier0); 1920 sca_write_1(sc, SCA_IER1, sca_read_1(sc, SCA_IER1) & ~ier1); 1921 #else 1922 if (scp->sp_port == 0) { 1923 sca_write_1(sc, SCA_IER0, sca_read_1(sc, SCA_IER0) & 0xf0); 1924 sca_write_1(sc, SCA_IER1, sca_read_1(sc, SCA_IER1) & 0xf0); 1925 } else { 1926 sca_write_1(sc, SCA_IER0, sca_read_1(sc, SCA_IER0) & 0x0f); 1927 sca_write_1(sc, SCA_IER1, sca_read_1(sc, SCA_IER1) & 0x0f); 1928 } 1929 #endif 1930 1931 /* 1932 * disable transmit and receive 1933 */ 1934 msci_write_1(scp, SCA_CMD0, SCA_CMD_RXDISABLE); 1935 msci_write_1(scp, SCA_CMD0, SCA_CMD_TXDISABLE); 1936 1937 /* 1938 * no, we're not in use anymore 1939 */ 1940 scp->sp_if.if_flags &= ~(IFF_OACTIVE|IFF_RUNNING); 1941 } 1942 1943 /* 1944 * disable all DMA and interrupts for all ports at once. 1945 */ 1946 void 1947 sca_shutdown(struct sca_softc *sca) 1948 { 1949 /* 1950 * disable DMA and interrupts 1951 */ 1952 sca_write_1(sca, SCA_DMER, 0); 1953 sca_write_1(sca, SCA_IER0, 0); 1954 sca_write_1(sca, SCA_IER1, 0); 1955 } 1956 1957 /* 1958 * If there are packets to transmit, start the transmit DMA logic. 1959 */ 1960 static void 1961 sca_port_starttx(sca_port_t *scp) 1962 { 1963 u_int32_t startdesc_p, enddesc_p; 1964 int enddesc; 1965 1966 SCA_DPRINTF(SCA_DEBUG_TX, ("TX: starttx\n")); 1967 1968 if (((scp->sp_if.if_flags & IFF_OACTIVE) == IFF_OACTIVE) 1969 || scp->sp_txinuse == 0) 1970 return; 1971 1972 SCA_DPRINTF(SCA_DEBUG_TX, ("TX: setting oactive\n")); 1973 1974 scp->sp_if.if_flags |= IFF_OACTIVE; 1975 1976 /* 1977 * We have something to do, since we have at least one packet 1978 * waiting, and we are not already marked as active. 1979 */ 1980 enddesc = (scp->sp_txcur + 1) % scp->sp_ntxdesc; 1981 startdesc_p = scp->sp_txdesc_p; 1982 enddesc_p = scp->sp_txdesc_p + sizeof(sca_desc_t) * enddesc; 1983 1984 SCA_DPRINTF(SCA_DEBUG_TX, ("TX: start %x end %x\n", 1985 startdesc_p, enddesc_p)); 1986 1987 dmac_write_2(scp, SCA_EDAL1, (u_int16_t)(enddesc_p & 0x0000ffff)); 1988 dmac_write_2(scp, SCA_CDAL1, 1989 (u_int16_t)(startdesc_p & 0x0000ffff)); 1990 1991 /* 1992 * enable the DMA 1993 */ 1994 dmac_write_1(scp, SCA_DSR1, SCA_DSR_DE); 1995 } 1996 1997 /* 1998 * allocate an mbuf at least long enough to hold "len" bytes. 1999 * If "p" is non-NULL, copy "len" bytes from it into the new mbuf, 2000 * otherwise let the caller handle copying the data in. 2001 */ 2002 static struct mbuf * 2003 sca_mbuf_alloc(struct sca_softc *sc, void *p, u_int len) 2004 { 2005 struct mbuf *m; 2006 2007 /* 2008 * allocate an mbuf and copy the important bits of data 2009 * into it. If the packet won't fit in the header, 2010 * allocate a cluster for it and store it there. 2011 */ 2012 MGETHDR(m, M_DONTWAIT, MT_DATA); 2013 if (m == NULL) 2014 return NULL; 2015 if (len > MHLEN) { 2016 if (len > MCLBYTES) { 2017 m_freem(m); 2018 return NULL; 2019 } 2020 MCLGET(m, M_DONTWAIT); 2021 if ((m->m_flags & M_EXT) == 0) { 2022 m_freem(m); 2023 return NULL; 2024 } 2025 } 2026 if (p != NULL) { 2027 /* XXX do we need to sync here? */ 2028 if (sc->sc_usedma) 2029 memcpy(mtod(m, void *), p, len); 2030 else 2031 bus_space_read_region_1(sc->scu_memt, sc->scu_memh, 2032 sca_page_addr(sc, p), mtod(m, u_int8_t *), len); 2033 } 2034 m->m_len = len; 2035 m->m_pkthdr.len = len; 2036 2037 return (m); 2038 } 2039 2040 /* 2041 * get the base clock 2042 */ 2043 void 2044 sca_get_base_clock(struct sca_softc *sc) 2045 { 2046 struct timeval btv, ctv, dtv; 2047 u_int64_t bcnt; 2048 u_int32_t cnt; 2049 u_int16_t subcnt; 2050 2051 /* disable the timer, set prescale to 0 */ 2052 sca_write_1(sc, SCA_TCSR0, 0); 2053 sca_write_1(sc, SCA_TEPR0, 0); 2054 2055 /* reset the counter */ 2056 (void)sca_read_1(sc, SCA_TCSR0); 2057 subcnt = sca_read_2(sc, SCA_TCNTL0); 2058 2059 /* count to max */ 2060 sca_write_2(sc, SCA_TCONRL0, 0xffff); 2061 2062 cnt = 0; 2063 microtime(&btv); 2064 /* start the timer -- no interrupt enable */ 2065 sca_write_1(sc, SCA_TCSR0, SCA_TCSR_TME); 2066 for (;;) { 2067 microtime(&ctv); 2068 2069 /* end around 3/4 of a second */ 2070 timersub(&ctv, &btv, &dtv); 2071 if (dtv.tv_usec >= 750000) 2072 break; 2073 2074 /* spin */ 2075 while (!(sca_read_1(sc, SCA_TCSR0) & SCA_TCSR_CMF)) 2076 ; 2077 /* reset the timer */ 2078 (void)sca_read_2(sc, SCA_TCNTL0); 2079 cnt++; 2080 } 2081 2082 /* stop the timer */ 2083 sca_write_1(sc, SCA_TCSR0, 0); 2084 2085 subcnt = sca_read_2(sc, SCA_TCNTL0); 2086 /* add the slop in and get the total timer ticks */ 2087 cnt = (cnt << 16) | subcnt; 2088 2089 /* cnt is 1/8 the actual time */ 2090 bcnt = cnt * 8; 2091 /* make it proportional to 3/4 of a second */ 2092 bcnt *= (u_int64_t)750000; 2093 bcnt /= (u_int64_t)dtv.tv_usec; 2094 cnt = bcnt; 2095 2096 /* make it Hz */ 2097 cnt *= 4; 2098 cnt /= 3; 2099 2100 SCA_DPRINTF(SCA_DEBUG_CLOCK, 2101 ("sca: unadjusted base %lu Hz\n", (u_long)cnt)); 2102 2103 /* 2104 * round to the nearest 200 -- this allows for +-3 ticks error 2105 */ 2106 sc->sc_baseclock = ((cnt + 100) / 200) * 200; 2107 } 2108 2109 /* 2110 * print the information about the clock on the ports 2111 */ 2112 void 2113 sca_print_clock_info(struct sca_softc *sc) 2114 { 2115 struct sca_port *scp; 2116 u_int32_t mhz, div; 2117 int i; 2118 2119 printf("%s: base clock %d Hz\n", device_xname(sc->sc_parent), 2120 sc->sc_baseclock); 2121 2122 /* print the information about the port clock selection */ 2123 for (i = 0; i < sc->sc_numports; i++) { 2124 scp = &sc->sc_ports[i]; 2125 mhz = sc->sc_baseclock / (scp->sp_tmc ? scp->sp_tmc : 256); 2126 div = scp->sp_rxs & SCA_RXS_DIV_MASK; 2127 2128 printf("%s: rx clock: ", scp->sp_if.if_xname); 2129 switch (scp->sp_rxs & SCA_RXS_CLK_MASK) { 2130 case SCA_RXS_CLK_LINE: 2131 printf("line"); 2132 break; 2133 case SCA_RXS_CLK_LINE_SN: 2134 printf("line with noise suppression"); 2135 break; 2136 case SCA_RXS_CLK_INTERNAL: 2137 printf("internal %d Hz", (mhz >> div)); 2138 break; 2139 case SCA_RXS_CLK_ADPLL_OUT: 2140 printf("adpll using internal %d Hz", (mhz >> div)); 2141 break; 2142 case SCA_RXS_CLK_ADPLL_IN: 2143 printf("adpll using line clock"); 2144 break; 2145 } 2146 printf(" tx clock: "); 2147 div = scp->sp_txs & SCA_TXS_DIV_MASK; 2148 switch (scp->sp_txs & SCA_TXS_CLK_MASK) { 2149 case SCA_TXS_CLK_LINE: 2150 printf("line\n"); 2151 break; 2152 case SCA_TXS_CLK_INTERNAL: 2153 printf("internal %d Hz\n", (mhz >> div)); 2154 break; 2155 case SCA_TXS_CLK_RXCLK: 2156 printf("rxclock\n"); 2157 break; 2158 } 2159 if (scp->sp_eclock) 2160 printf("%s: outputting line clock\n", 2161 scp->sp_if.if_xname); 2162 } 2163 } 2164 2165