1 /* $NetBSD: hd64570.c,v 1.47 2014/06/05 23:48:16 rmind Exp $ */ 2 3 /* 4 * Copyright (c) 1999 Christian E. Hopps 5 * Copyright (c) 1998 Vixie Enterprises 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. Neither the name of Vixie Enterprises nor the names 18 * of its contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY VIXIE ENTERPRISES AND 22 * CONTRIBUTORS ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, 23 * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 24 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 25 * DISCLAIMED. IN NO EVENT SHALL VIXIE ENTERPRISES OR 26 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 27 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 28 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF 29 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 30 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 31 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 32 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 * 35 * This software has been written for Vixie Enterprises by Michael Graff 36 * <explorer@flame.org>. To learn more about Vixie Enterprises, see 37 * ``http://www.vix.com''. 38 */ 39 40 /* 41 * TODO: 42 * 43 * o teach the receive logic about errors, and about long frames that 44 * span more than one input buffer. (Right now, receive/transmit is 45 * limited to one descriptor's buffer space, which is MTU + 4 bytes. 46 * This is currently 1504, which is large enough to hold the HDLC 47 * header and the packet itself. Packets which are too long are 48 * silently dropped on transmit and silently dropped on receive. 49 * o write code to handle the msci interrupts, needed only for CD 50 * and CTS changes. 51 * o consider switching back to a "queue tx with DMA active" model which 52 * should help sustain outgoing traffic 53 * o through clever use of bus_dma*() functions, it should be possible 54 * to map the mbuf's data area directly into a descriptor transmit 55 * buffer, removing the need to allocate extra memory. If, however, 56 * we run out of descriptors for this, we will need to then allocate 57 * one large mbuf, copy the fragmented chain into it, and put it onto 58 * a single descriptor. 59 * o use bus_dmamap_sync() with the right offset and lengths, rather 60 * than cheating and always sync'ing the whole region. 61 * 62 * o perhaps allow rx and tx to be in more than one page 63 * if not using DMA. currently the assumption is that 64 * rx uses a page and tx uses a page. 65 */ 66 67 #include <sys/cdefs.h> 68 __KERNEL_RCSID(0, "$NetBSD: hd64570.c,v 1.47 2014/06/05 23:48:16 rmind Exp $"); 69 70 #include "opt_inet.h" 71 72 #include <sys/param.h> 73 #include <sys/systm.h> 74 #include <sys/device.h> 75 #include <sys/mbuf.h> 76 #include <sys/socket.h> 77 #include <sys/sockio.h> 78 #include <sys/kernel.h> 79 80 #include <net/if.h> 81 #include <net/if_types.h> 82 #include <net/netisr.h> 83 84 #if defined(INET) || defined(INET6) 85 #include <netinet/in.h> 86 #include <netinet/in_systm.h> 87 #include <netinet/in_var.h> 88 #include <netinet/ip.h> 89 #ifdef INET6 90 #include <netinet6/in6_var.h> 91 #endif 92 #endif 93 94 #include <net/bpf.h> 95 96 #include <sys/cpu.h> 97 #include <sys/bus.h> 98 #include <sys/intr.h> 99 100 #include <dev/pci/pcivar.h> 101 #include <dev/pci/pcireg.h> 102 #include <dev/pci/pcidevs.h> 103 104 #include <dev/ic/hd64570reg.h> 105 #include <dev/ic/hd64570var.h> 106 107 #define SCA_DEBUG_RX 0x0001 108 #define SCA_DEBUG_TX 0x0002 109 #define SCA_DEBUG_CISCO 0x0004 110 #define SCA_DEBUG_DMA 0x0008 111 #define SCA_DEBUG_RXPKT 0x0010 112 #define SCA_DEBUG_TXPKT 0x0020 113 #define SCA_DEBUG_INTR 0x0040 114 #define SCA_DEBUG_CLOCK 0x0080 115 116 #if 0 117 #define SCA_DEBUG_LEVEL ( 0xFFFF ) 118 #else 119 #define SCA_DEBUG_LEVEL 0 120 #endif 121 122 u_int32_t sca_debug = SCA_DEBUG_LEVEL; 123 124 #if SCA_DEBUG_LEVEL > 0 125 #define SCA_DPRINTF(l, x) do { \ 126 if ((l) & sca_debug) \ 127 printf x;\ 128 } while (0) 129 #else 130 #define SCA_DPRINTF(l, x) 131 #endif 132 133 #if 0 134 #define SCA_USE_FASTQ /* use a split queue, one for fast traffic */ 135 #endif 136 137 static inline void msci_write_1(sca_port_t *, u_int, u_int8_t); 138 static inline u_int8_t msci_read_1(sca_port_t *, u_int); 139 140 static inline void dmac_write_1(sca_port_t *, u_int, u_int8_t); 141 static inline void dmac_write_2(sca_port_t *, u_int, u_int16_t); 142 static inline u_int8_t dmac_read_1(sca_port_t *, u_int); 143 static inline u_int16_t dmac_read_2(sca_port_t *, u_int); 144 145 static void sca_msci_init(struct sca_softc *, sca_port_t *); 146 static void sca_dmac_init(struct sca_softc *, sca_port_t *); 147 static void sca_dmac_rxinit(sca_port_t *); 148 149 static int sca_dmac_intr(sca_port_t *, u_int8_t); 150 static int sca_msci_intr(sca_port_t *, u_int8_t); 151 152 static void sca_get_packets(sca_port_t *); 153 static int sca_frame_avail(sca_port_t *); 154 static void sca_frame_process(sca_port_t *); 155 static void sca_frame_read_done(sca_port_t *); 156 157 static void sca_port_starttx(sca_port_t *); 158 159 static void sca_port_up(sca_port_t *); 160 static void sca_port_down(sca_port_t *); 161 162 static int sca_output(struct ifnet *, struct mbuf *, const struct sockaddr *, 163 struct rtentry *); 164 static int sca_ioctl(struct ifnet *, u_long, void *); 165 static void sca_start(struct ifnet *); 166 static void sca_watchdog(struct ifnet *); 167 168 static struct mbuf *sca_mbuf_alloc(struct sca_softc *, void *, u_int); 169 170 #if SCA_DEBUG_LEVEL > 0 171 static void sca_frame_print(sca_port_t *, sca_desc_t *, u_int8_t *); 172 #endif 173 174 175 #define sca_read_1(sc, reg) (sc)->sc_read_1(sc, reg) 176 #define sca_read_2(sc, reg) (sc)->sc_read_2(sc, reg) 177 #define sca_write_1(sc, reg, val) (sc)->sc_write_1(sc, reg, val) 178 #define sca_write_2(sc, reg, val) (sc)->sc_write_2(sc, reg, val) 179 180 #define sca_page_addr(sc, addr) ((bus_addr_t)(u_long)(addr) & (sc)->scu_pagemask) 181 182 static inline void 183 msci_write_1(sca_port_t *scp, u_int reg, u_int8_t val) 184 { 185 sca_write_1(scp->sca, scp->msci_off + reg, val); 186 } 187 188 static inline u_int8_t 189 msci_read_1(sca_port_t *scp, u_int reg) 190 { 191 return sca_read_1(scp->sca, scp->msci_off + reg); 192 } 193 194 static inline void 195 dmac_write_1(sca_port_t *scp, u_int reg, u_int8_t val) 196 { 197 sca_write_1(scp->sca, scp->dmac_off + reg, val); 198 } 199 200 static inline void 201 dmac_write_2(sca_port_t *scp, u_int reg, u_int16_t val) 202 { 203 sca_write_2(scp->sca, scp->dmac_off + reg, val); 204 } 205 206 static inline u_int8_t 207 dmac_read_1(sca_port_t *scp, u_int reg) 208 { 209 return sca_read_1(scp->sca, scp->dmac_off + reg); 210 } 211 212 static inline u_int16_t 213 dmac_read_2(sca_port_t *scp, u_int reg) 214 { 215 return sca_read_2(scp->sca, scp->dmac_off + reg); 216 } 217 218 #if SCA_DEBUG_LEVEL > 0 219 /* 220 * read the chain pointer 221 */ 222 static inline u_int16_t 223 sca_desc_read_chainp(struct sca_softc *sc, struct sca_desc *dp) 224 { 225 if (sc->sc_usedma) 226 return ((dp)->sd_chainp); 227 return (bus_space_read_2(sc->scu_memt, sc->scu_memh, 228 sca_page_addr(sc, dp) + offsetof(struct sca_desc, sd_chainp))); 229 } 230 #endif 231 232 /* 233 * write the chain pointer 234 */ 235 static inline void 236 sca_desc_write_chainp(struct sca_softc *sc, struct sca_desc *dp, u_int16_t cp) 237 { 238 if (sc->sc_usedma) 239 (dp)->sd_chainp = cp; 240 else 241 bus_space_write_2(sc->scu_memt, sc->scu_memh, 242 sca_page_addr(sc, dp) 243 + offsetof(struct sca_desc, sd_chainp), cp); 244 } 245 246 #if SCA_DEBUG_LEVEL > 0 247 /* 248 * read the buffer pointer 249 */ 250 static inline u_int32_t 251 sca_desc_read_bufp(struct sca_softc *sc, struct sca_desc *dp) 252 { 253 u_int32_t address; 254 255 if (sc->sc_usedma) 256 address = dp->sd_bufp | dp->sd_hbufp << 16; 257 else { 258 address = bus_space_read_2(sc->scu_memt, sc->scu_memh, 259 sca_page_addr(sc, dp) + offsetof(struct sca_desc, sd_bufp)); 260 address |= bus_space_read_1(sc->scu_memt, sc->scu_memh, 261 sca_page_addr(sc, dp) 262 + offsetof(struct sca_desc, sd_hbufp)) << 16; 263 } 264 return (address); 265 } 266 #endif 267 268 /* 269 * write the buffer pointer 270 */ 271 static inline void 272 sca_desc_write_bufp(struct sca_softc *sc, struct sca_desc *dp, u_int32_t bufp) 273 { 274 if (sc->sc_usedma) { 275 dp->sd_bufp = bufp & 0xFFFF; 276 dp->sd_hbufp = (bufp & 0x00FF0000) >> 16; 277 } else { 278 bus_space_write_2(sc->scu_memt, sc->scu_memh, 279 sca_page_addr(sc, dp) + offsetof(struct sca_desc, sd_bufp), 280 bufp & 0xFFFF); 281 bus_space_write_1(sc->scu_memt, sc->scu_memh, 282 sca_page_addr(sc, dp) + offsetof(struct sca_desc, sd_hbufp), 283 (bufp & 0x00FF0000) >> 16); 284 } 285 } 286 287 /* 288 * read the buffer length 289 */ 290 static inline u_int16_t 291 sca_desc_read_buflen(struct sca_softc *sc, struct sca_desc *dp) 292 { 293 if (sc->sc_usedma) 294 return ((dp)->sd_buflen); 295 return (bus_space_read_2(sc->scu_memt, sc->scu_memh, 296 sca_page_addr(sc, dp) + offsetof(struct sca_desc, sd_buflen))); 297 } 298 299 /* 300 * write the buffer length 301 */ 302 static inline void 303 sca_desc_write_buflen(struct sca_softc *sc, struct sca_desc *dp, u_int16_t len) 304 { 305 if (sc->sc_usedma) 306 (dp)->sd_buflen = len; 307 else 308 bus_space_write_2(sc->scu_memt, sc->scu_memh, 309 sca_page_addr(sc, dp) 310 + offsetof(struct sca_desc, sd_buflen), len); 311 } 312 313 /* 314 * read the descriptor status 315 */ 316 static inline u_int8_t 317 sca_desc_read_stat(struct sca_softc *sc, struct sca_desc *dp) 318 { 319 if (sc->sc_usedma) 320 return ((dp)->sd_stat); 321 return (bus_space_read_1(sc->scu_memt, sc->scu_memh, 322 sca_page_addr(sc, dp) + offsetof(struct sca_desc, sd_stat))); 323 } 324 325 /* 326 * write the descriptor status 327 */ 328 static inline void 329 sca_desc_write_stat(struct sca_softc *sc, struct sca_desc *dp, u_int8_t stat) 330 { 331 if (sc->sc_usedma) 332 (dp)->sd_stat = stat; 333 else 334 bus_space_write_1(sc->scu_memt, sc->scu_memh, 335 sca_page_addr(sc, dp) + offsetof(struct sca_desc, sd_stat), 336 stat); 337 } 338 339 void 340 sca_init(struct sca_softc *sc) 341 { 342 /* 343 * Do a little sanity check: check number of ports. 344 */ 345 if (sc->sc_numports < 1 || sc->sc_numports > 2) 346 panic("sca can\'t handle more than 2 or less than 1 ports"); 347 348 /* 349 * disable DMA and MSCI interrupts 350 */ 351 sca_write_1(sc, SCA_DMER, 0); 352 sca_write_1(sc, SCA_IER0, 0); 353 sca_write_1(sc, SCA_IER1, 0); 354 sca_write_1(sc, SCA_IER2, 0); 355 356 /* 357 * configure interrupt system 358 */ 359 sca_write_1(sc, SCA_ITCR, 360 SCA_ITCR_INTR_PRI_MSCI | SCA_ITCR_ACK_NONE | SCA_ITCR_VOUT_IVR); 361 #if 0 362 /* these are for the intrerrupt ack cycle which we don't use */ 363 sca_write_1(sc, SCA_IVR, 0x40); 364 sca_write_1(sc, SCA_IMVR, 0x40); 365 #endif 366 367 /* 368 * set wait control register to zero wait states 369 */ 370 sca_write_1(sc, SCA_PABR0, 0); 371 sca_write_1(sc, SCA_PABR1, 0); 372 sca_write_1(sc, SCA_WCRL, 0); 373 sca_write_1(sc, SCA_WCRM, 0); 374 sca_write_1(sc, SCA_WCRH, 0); 375 376 /* 377 * disable DMA and reset status 378 */ 379 sca_write_1(sc, SCA_PCR, SCA_PCR_PR2); 380 381 /* 382 * disable transmit DMA for all channels 383 */ 384 sca_write_1(sc, SCA_DSR0 + SCA_DMAC_OFF_0, 0); 385 sca_write_1(sc, SCA_DCR0 + SCA_DMAC_OFF_0, SCA_DCR_ABRT); 386 sca_write_1(sc, SCA_DSR1 + SCA_DMAC_OFF_0, 0); 387 sca_write_1(sc, SCA_DCR1 + SCA_DMAC_OFF_0, SCA_DCR_ABRT); 388 sca_write_1(sc, SCA_DSR0 + SCA_DMAC_OFF_1, 0); 389 sca_write_1(sc, SCA_DCR0 + SCA_DMAC_OFF_1, SCA_DCR_ABRT); 390 sca_write_1(sc, SCA_DSR1 + SCA_DMAC_OFF_1, 0); 391 sca_write_1(sc, SCA_DCR1 + SCA_DMAC_OFF_1, SCA_DCR_ABRT); 392 393 /* 394 * enable DMA based on channel enable flags for each channel 395 */ 396 sca_write_1(sc, SCA_DMER, SCA_DMER_EN); 397 398 /* 399 * Should check to see if the chip is responding, but for now 400 * assume it is. 401 */ 402 } 403 404 /* 405 * initialize the port and attach it to the networking layer 406 */ 407 void 408 sca_port_attach(struct sca_softc *sc, u_int port) 409 { 410 struct timeval now; 411 sca_port_t *scp = &sc->sc_ports[port]; 412 struct ifnet *ifp; 413 static u_int ntwo_unit = 0; 414 415 scp->sca = sc; /* point back to the parent */ 416 417 scp->sp_port = port; 418 419 if (port == 0) { 420 scp->msci_off = SCA_MSCI_OFF_0; 421 scp->dmac_off = SCA_DMAC_OFF_0; 422 if(sc->sc_parent != NULL) 423 ntwo_unit = device_unit(sc->sc_parent) * 2 + 0; 424 else 425 ntwo_unit = 0; /* XXX */ 426 } else { 427 scp->msci_off = SCA_MSCI_OFF_1; 428 scp->dmac_off = SCA_DMAC_OFF_1; 429 if(sc->sc_parent != NULL) 430 ntwo_unit = device_unit(sc->sc_parent) * 2 + 1; 431 else 432 ntwo_unit = 1; /* XXX */ 433 } 434 435 sca_msci_init(sc, scp); 436 sca_dmac_init(sc, scp); 437 438 /* 439 * attach to the network layer 440 */ 441 ifp = &scp->sp_if; 442 snprintf(ifp->if_xname, sizeof(ifp->if_xname), "ntwo%d", ntwo_unit); 443 ifp->if_softc = scp; 444 ifp->if_mtu = SCA_MTU; 445 ifp->if_flags = IFF_POINTOPOINT | IFF_MULTICAST; 446 ifp->if_type = IFT_PTPSERIAL; 447 ifp->if_hdrlen = HDLC_HDRLEN; 448 ifp->if_ioctl = sca_ioctl; 449 ifp->if_output = sca_output; 450 ifp->if_watchdog = sca_watchdog; 451 ifp->if_snd.ifq_maxlen = IFQ_MAXLEN; 452 scp->linkq.ifq_maxlen = 5; /* if we exceed this we are hosed already */ 453 #ifdef SCA_USE_FASTQ 454 scp->fastq.ifq_maxlen = IFQ_MAXLEN; 455 #endif 456 IFQ_SET_READY(&ifp->if_snd); 457 if_attach(ifp); 458 if_alloc_sadl(ifp); 459 bpf_attach(ifp, DLT_HDLC, HDLC_HDRLEN); 460 461 if (sc->sc_parent == NULL) 462 printf("%s: port %d\n", ifp->if_xname, port); 463 else 464 printf("%s at %s port %d\n", 465 ifp->if_xname, device_xname(sc->sc_parent), port); 466 467 /* 468 * reset the last seen times on the cisco keepalive protocol 469 */ 470 getmicrotime(&now); 471 scp->cka_lasttx = now.tv_usec; 472 scp->cka_lastrx = 0; 473 } 474 475 #if 0 476 /* 477 * returns log2(div), sets 'tmc' for the required freq 'hz' 478 */ 479 static u_int8_t 480 sca_msci_get_baud_rate_values(u_int32_t hz, u_int8_t *tmcp) 481 { 482 u_int32_t tmc, div; 483 u_int32_t clock; 484 485 /* clock hz = (chipclock / tmc) / 2^(div); */ 486 /* 487 * TD == tmc * 2^(n) 488 * 489 * note: 490 * 1 <= TD <= 256 TD is inc of 1 491 * 2 <= TD <= 512 TD is inc of 2 492 * 4 <= TD <= 1024 TD is inc of 4 493 * ... 494 * 512 <= TD <= 256*512 TD is inc of 512 495 * 496 * so note there are overlaps. We lose prec 497 * as div increases so we wish to minize div. 498 * 499 * basically we want to do 500 * 501 * tmc = chip / hz, but have tmc <= 256 502 */ 503 504 /* assume system clock is 9.8304MHz or 9830400Hz */ 505 clock = clock = 9830400 >> 1; 506 507 /* round down */ 508 div = 0; 509 while ((tmc = clock / hz) > 256 || (tmc == 256 && (clock / tmc) > hz)) { 510 clock >>= 1; 511 div++; 512 } 513 if (clock / tmc > hz) 514 tmc++; 515 if (!tmc) 516 tmc = 1; 517 518 if (div > SCA_RXS_DIV_512) { 519 /* set to maximums */ 520 div = SCA_RXS_DIV_512; 521 tmc = 0; 522 } 523 524 *tmcp = (tmc & 0xFF); /* 0 == 256 */ 525 return (div & 0xFF); 526 } 527 #endif 528 529 /* 530 * initialize the port's MSCI 531 */ 532 static void 533 sca_msci_init(struct sca_softc *sc, sca_port_t *scp) 534 { 535 /* reset the channel */ 536 msci_write_1(scp, SCA_CMD0, SCA_CMD_RESET); 537 538 msci_write_1(scp, SCA_MD00, 539 ( SCA_MD0_CRC_1 540 | SCA_MD0_CRC_CCITT 541 | SCA_MD0_CRC_ENABLE 542 | SCA_MD0_MODE_HDLC)); 543 #if 0 544 /* immediately send receive reset so the above takes */ 545 msci_write_1(scp, SCA_CMD0, SCA_CMD_RXRESET); 546 #endif 547 548 msci_write_1(scp, SCA_MD10, SCA_MD1_NOADDRCHK); 549 msci_write_1(scp, SCA_MD20, 550 (SCA_MD2_DUPLEX | SCA_MD2_ADPLLx8 | SCA_MD2_NRZ)); 551 552 /* be safe and do it again */ 553 msci_write_1(scp, SCA_CMD0, SCA_CMD_RXRESET); 554 555 /* setup underrun and idle control, and initial RTS state */ 556 msci_write_1(scp, SCA_CTL0, 557 (SCA_CTL_IDLC_PATTERN 558 | SCA_CTL_UDRNC_AFTER_FCS 559 | SCA_CTL_RTS_LOW)); 560 561 /* reset the transmitter */ 562 msci_write_1(scp, SCA_CMD0, SCA_CMD_TXRESET); 563 564 /* 565 * set the clock sources 566 */ 567 msci_write_1(scp, SCA_RXS0, scp->sp_rxs); 568 msci_write_1(scp, SCA_TXS0, scp->sp_txs); 569 msci_write_1(scp, SCA_TMC0, scp->sp_tmc); 570 571 /* set external clock generate as requested */ 572 sc->sc_clock_callback(sc->sc_aux, scp->sp_port, scp->sp_eclock); 573 574 /* 575 * XXX don't pay attention to CTS or CD changes right now. I can't 576 * simulate one, and the transmitter will try to transmit even if 577 * CD isn't there anyway, so nothing bad SHOULD happen. 578 */ 579 #if 0 580 msci_write_1(scp, SCA_IE00, 0); 581 msci_write_1(scp, SCA_IE10, 0); /* 0x0c == CD and CTS changes only */ 582 #else 583 /* this would deliver transmitter underrun to ST1/ISR1 */ 584 msci_write_1(scp, SCA_IE10, SCA_ST1_UDRN); 585 msci_write_1(scp, SCA_IE00, SCA_ST0_TXINT); 586 #endif 587 msci_write_1(scp, SCA_IE20, 0); 588 589 msci_write_1(scp, SCA_FIE0, 0); 590 591 msci_write_1(scp, SCA_SA00, 0); 592 msci_write_1(scp, SCA_SA10, 0); 593 594 msci_write_1(scp, SCA_IDL0, 0x7e); 595 596 msci_write_1(scp, SCA_RRC0, 0x0e); 597 /* msci_write_1(scp, SCA_TRC00, 0x10); */ 598 /* 599 * the correct values here are important for avoiding underruns 600 * for any value less than or equal to TRC0 txrdy is activated 601 * which will start the dmac transfer to the fifo. 602 * for buffer size >= TRC1 + 1 txrdy is cleared which will stop DMA. 603 * 604 * thus if we are using a very fast clock that empties the fifo 605 * quickly, delays in the dmac starting to fill the fifo can 606 * lead to underruns so we want a fairly full fifo to still 607 * cause the dmac to start. for cards with on board ram this 608 * has no effect on system performance. For cards that DMA 609 * to/from system memory it will cause more, shorter, 610 * bus accesses rather than fewer longer ones. 611 */ 612 msci_write_1(scp, SCA_TRC00, 0x00); 613 msci_write_1(scp, SCA_TRC10, 0x1f); 614 } 615 616 /* 617 * Take the memory for the port and construct two circular linked lists of 618 * descriptors (one tx, one rx) and set the pointers in these descriptors 619 * to point to the buffer space for this port. 620 */ 621 static void 622 sca_dmac_init(struct sca_softc *sc, sca_port_t *scp) 623 { 624 sca_desc_t *desc; 625 u_int32_t desc_p; 626 u_int32_t buf_p; 627 int i; 628 629 if (sc->sc_usedma) 630 bus_dmamap_sync(sc->scu_dmat, sc->scu_dmam, 0, sc->scu_allocsize, 631 BUS_DMASYNC_PREWRITE); 632 else { 633 /* 634 * XXX assumes that all tx desc and bufs in same page 635 */ 636 sc->scu_page_on(sc); 637 sc->scu_set_page(sc, scp->sp_txdesc_p); 638 } 639 640 desc = scp->sp_txdesc; 641 desc_p = scp->sp_txdesc_p; 642 buf_p = scp->sp_txbuf_p; 643 scp->sp_txcur = 0; 644 scp->sp_txinuse = 0; 645 646 #ifdef DEBUG 647 /* make sure that we won't wrap */ 648 if ((desc_p & 0xffff0000) != 649 ((desc_p + sizeof(*desc) * scp->sp_ntxdesc) & 0xffff0000)) 650 panic("sca: tx descriptors cross architecural boundary"); 651 if ((buf_p & 0xff000000) != 652 ((buf_p + SCA_BSIZE * scp->sp_ntxdesc) & 0xff000000)) 653 panic("sca: tx buffers cross architecural boundary"); 654 #endif 655 656 for (i = 0 ; i < scp->sp_ntxdesc ; i++) { 657 /* 658 * desc_p points to the physcial address of the NEXT desc 659 */ 660 desc_p += sizeof(sca_desc_t); 661 662 sca_desc_write_chainp(sc, desc, desc_p & 0x0000ffff); 663 sca_desc_write_bufp(sc, desc, buf_p); 664 sca_desc_write_buflen(sc, desc, SCA_BSIZE); 665 sca_desc_write_stat(sc, desc, 0); 666 667 desc++; /* point to the next descriptor */ 668 buf_p += SCA_BSIZE; 669 } 670 671 /* 672 * "heal" the circular list by making the last entry point to the 673 * first. 674 */ 675 sca_desc_write_chainp(sc, desc - 1, scp->sp_txdesc_p & 0x0000ffff); 676 677 /* 678 * Now, initialize the transmit DMA logic 679 * 680 * CPB == chain pointer base address 681 */ 682 dmac_write_1(scp, SCA_DSR1, 0); 683 dmac_write_1(scp, SCA_DCR1, SCA_DCR_ABRT); 684 dmac_write_1(scp, SCA_DMR1, SCA_DMR_TMOD | SCA_DMR_NF); 685 /* XXX1 686 dmac_write_1(scp, SCA_DIR1, 687 (SCA_DIR_EOT | SCA_DIR_BOF | SCA_DIR_COF)); 688 */ 689 dmac_write_1(scp, SCA_DIR1, 690 (SCA_DIR_EOM | SCA_DIR_EOT | SCA_DIR_BOF | SCA_DIR_COF)); 691 dmac_write_1(scp, SCA_CPB1, 692 (u_int8_t)((scp->sp_txdesc_p & 0x00ff0000) >> 16)); 693 694 /* 695 * now, do the same thing for receive descriptors 696 * 697 * XXX assumes that all rx desc and bufs in same page 698 */ 699 if (!sc->sc_usedma) 700 sc->scu_set_page(sc, scp->sp_rxdesc_p); 701 702 desc = scp->sp_rxdesc; 703 desc_p = scp->sp_rxdesc_p; 704 buf_p = scp->sp_rxbuf_p; 705 706 #ifdef DEBUG 707 /* make sure that we won't wrap */ 708 if ((desc_p & 0xffff0000) != 709 ((desc_p + sizeof(*desc) * scp->sp_nrxdesc) & 0xffff0000)) 710 panic("sca: rx descriptors cross architecural boundary"); 711 if ((buf_p & 0xff000000) != 712 ((buf_p + SCA_BSIZE * scp->sp_nrxdesc) & 0xff000000)) 713 panic("sca: rx buffers cross architecural boundary"); 714 #endif 715 716 for (i = 0 ; i < scp->sp_nrxdesc; i++) { 717 /* 718 * desc_p points to the physcial address of the NEXT desc 719 */ 720 desc_p += sizeof(sca_desc_t); 721 722 sca_desc_write_chainp(sc, desc, desc_p & 0x0000ffff); 723 sca_desc_write_bufp(sc, desc, buf_p); 724 /* sca_desc_write_buflen(sc, desc, SCA_BSIZE); */ 725 sca_desc_write_buflen(sc, desc, 0); 726 sca_desc_write_stat(sc, desc, 0); 727 728 desc++; /* point to the next descriptor */ 729 buf_p += SCA_BSIZE; 730 } 731 732 /* 733 * "heal" the circular list by making the last entry point to the 734 * first. 735 */ 736 sca_desc_write_chainp(sc, desc - 1, scp->sp_rxdesc_p & 0x0000ffff); 737 738 sca_dmac_rxinit(scp); 739 740 if (sc->sc_usedma) 741 bus_dmamap_sync(sc->scu_dmat, sc->scu_dmam, 742 0, sc->scu_allocsize, BUS_DMASYNC_POSTWRITE); 743 else 744 sc->scu_page_off(sc); 745 } 746 747 /* 748 * reset and reinitialize the receive DMA logic 749 */ 750 static void 751 sca_dmac_rxinit(sca_port_t *scp) 752 { 753 /* 754 * ... and the receive DMA logic ... 755 */ 756 dmac_write_1(scp, SCA_DSR0, 0); /* disable DMA */ 757 dmac_write_1(scp, SCA_DCR0, SCA_DCR_ABRT); 758 759 dmac_write_1(scp, SCA_DMR0, SCA_DMR_TMOD | SCA_DMR_NF); 760 dmac_write_2(scp, SCA_BFLL0, SCA_BSIZE); 761 762 /* reset descriptors to initial state */ 763 scp->sp_rxstart = 0; 764 scp->sp_rxend = scp->sp_nrxdesc - 1; 765 766 /* 767 * CPB == chain pointer base 768 * CDA == current descriptor address 769 * EDA == error descriptor address (overwrite position) 770 * because cda can't be eda when starting we always 771 * have a single buffer gap between cda and eda 772 */ 773 dmac_write_1(scp, SCA_CPB0, 774 (u_int8_t)((scp->sp_rxdesc_p & 0x00ff0000) >> 16)); 775 dmac_write_2(scp, SCA_CDAL0, (u_int16_t)(scp->sp_rxdesc_p & 0xffff)); 776 dmac_write_2(scp, SCA_EDAL0, (u_int16_t) 777 (scp->sp_rxdesc_p + (sizeof(sca_desc_t) * scp->sp_rxend))); 778 779 /* 780 * enable receiver DMA 781 */ 782 dmac_write_1(scp, SCA_DIR0, 783 (SCA_DIR_EOT | SCA_DIR_EOM | SCA_DIR_BOF | SCA_DIR_COF)); 784 dmac_write_1(scp, SCA_DSR0, SCA_DSR_DE); 785 } 786 787 /* 788 * Queue the packet for our start routine to transmit 789 */ 790 static int 791 sca_output( 792 struct ifnet *ifp, 793 struct mbuf *m, 794 const struct sockaddr *dst, 795 struct rtentry *rt0) 796 { 797 struct hdlc_header *hdlc; 798 struct ifqueue *ifq = NULL; 799 int s, error, len; 800 short mflags; 801 ALTQ_DECL(struct altq_pktattr pktattr;) 802 803 error = 0; 804 805 if ((ifp->if_flags & IFF_UP) != IFF_UP) { 806 error = ENETDOWN; 807 goto bad; 808 } 809 810 /* 811 * If the queueing discipline needs packet classification, 812 * do it before prepending link headers. 813 */ 814 IFQ_CLASSIFY(&ifp->if_snd, m, dst->sa_family, &pktattr); 815 816 /* 817 * determine address family, and priority for this packet 818 */ 819 switch (dst->sa_family) { 820 #ifdef INET 821 case AF_INET: 822 #ifdef SCA_USE_FASTQ 823 if ((mtod(m, struct ip *)->ip_tos & IPTOS_LOWDELAY) 824 == IPTOS_LOWDELAY) 825 ifq = &((sca_port_t *)ifp->if_softc)->fastq; 826 #endif 827 /* 828 * Add cisco serial line header. If there is no 829 * space in the first mbuf, allocate another. 830 */ 831 M_PREPEND(m, sizeof(struct hdlc_header), M_DONTWAIT); 832 if (m == 0) 833 return (ENOBUFS); 834 hdlc = mtod(m, struct hdlc_header *); 835 hdlc->h_proto = htons(HDLC_PROTOCOL_IP); 836 break; 837 #endif 838 #ifdef INET6 839 case AF_INET6: 840 /* 841 * Add cisco serial line header. If there is no 842 * space in the first mbuf, allocate another. 843 */ 844 M_PREPEND(m, sizeof(struct hdlc_header), M_DONTWAIT); 845 if (m == 0) 846 return (ENOBUFS); 847 hdlc = mtod(m, struct hdlc_header *); 848 hdlc->h_proto = htons(HDLC_PROTOCOL_IPV6); 849 break; 850 #endif 851 default: 852 printf("%s: address family %d unsupported\n", 853 ifp->if_xname, dst->sa_family); 854 error = EAFNOSUPPORT; 855 goto bad; 856 } 857 858 /* finish */ 859 if ((m->m_flags & (M_BCAST | M_MCAST)) != 0) 860 hdlc->h_addr = CISCO_MULTICAST; 861 else 862 hdlc->h_addr = CISCO_UNICAST; 863 hdlc->h_resv = 0; 864 865 /* 866 * queue the packet. If interactive, use the fast queue. 867 */ 868 mflags = m->m_flags; 869 len = m->m_pkthdr.len; 870 s = splnet(); 871 if (ifq != NULL) { 872 if (IF_QFULL(ifq)) { 873 IF_DROP(ifq); 874 m_freem(m); 875 error = ENOBUFS; 876 } else 877 IF_ENQUEUE(ifq, m); 878 } else 879 IFQ_ENQUEUE(&ifp->if_snd, m, &pktattr, error); 880 if (error != 0) { 881 splx(s); 882 ifp->if_oerrors++; 883 ifp->if_collisions++; 884 return (error); 885 } 886 ifp->if_obytes += len; 887 if (mflags & M_MCAST) 888 ifp->if_omcasts++; 889 890 sca_start(ifp); 891 splx(s); 892 893 return (error); 894 895 bad: 896 if (m) 897 m_freem(m); 898 return (error); 899 } 900 901 static int 902 sca_ioctl(struct ifnet *ifp, u_long cmd, void *data) 903 { 904 struct ifreq *ifr; 905 struct ifaddr *ifa; 906 int error; 907 int s; 908 909 s = splnet(); 910 911 ifr = (struct ifreq *)data; 912 ifa = (struct ifaddr *)data; 913 error = 0; 914 915 switch (cmd) { 916 case SIOCINITIFADDR: 917 switch(ifa->ifa_addr->sa_family) { 918 #ifdef INET 919 case AF_INET: 920 #endif 921 #ifdef INET6 922 case AF_INET6: 923 #endif 924 #if defined(INET) || defined(INET6) 925 ifp->if_flags |= IFF_UP; 926 sca_port_up(ifp->if_softc); 927 break; 928 #endif 929 default: 930 error = EAFNOSUPPORT; 931 break; 932 } 933 break; 934 935 case SIOCSIFDSTADDR: 936 #ifdef INET 937 if (ifa->ifa_addr->sa_family == AF_INET) 938 break; 939 #endif 940 #ifdef INET6 941 if (ifa->ifa_addr->sa_family == AF_INET6) 942 break; 943 #endif 944 error = EAFNOSUPPORT; 945 break; 946 947 case SIOCADDMULTI: 948 case SIOCDELMULTI: 949 /* XXX need multicast group management code */ 950 if (ifr == 0) { 951 error = EAFNOSUPPORT; /* XXX */ 952 break; 953 } 954 switch (ifreq_getaddr(cmd, ifr)->sa_family) { 955 #ifdef INET 956 case AF_INET: 957 break; 958 #endif 959 #ifdef INET6 960 case AF_INET6: 961 break; 962 #endif 963 default: 964 error = EAFNOSUPPORT; 965 break; 966 } 967 break; 968 969 case SIOCSIFFLAGS: 970 if ((error = ifioctl_common(ifp, cmd, data)) != 0) 971 break; 972 if (ifr->ifr_flags & IFF_UP) { 973 ifp->if_flags |= IFF_UP; 974 sca_port_up(ifp->if_softc); 975 } else { 976 ifp->if_flags &= ~IFF_UP; 977 sca_port_down(ifp->if_softc); 978 } 979 980 break; 981 982 default: 983 error = ifioctl_common(ifp, cmd, data); 984 } 985 986 splx(s); 987 return error; 988 } 989 990 /* 991 * start packet transmission on the interface 992 * 993 * MUST BE CALLED AT splnet() 994 */ 995 static void 996 sca_start(struct ifnet *ifp) 997 { 998 sca_port_t *scp = ifp->if_softc; 999 struct sca_softc *sc = scp->sca; 1000 struct mbuf *m, *mb_head; 1001 sca_desc_t *desc; 1002 u_int8_t *buf, stat; 1003 u_int32_t buf_p; 1004 int nexttx; 1005 int trigger_xmit; 1006 u_int len; 1007 1008 SCA_DPRINTF(SCA_DEBUG_TX, ("TX: enter start\n")); 1009 1010 /* 1011 * can't queue when we are full or transmitter is busy 1012 */ 1013 #ifdef oldcode 1014 if ((scp->sp_txinuse >= (scp->sp_ntxdesc - 1)) 1015 || ((ifp->if_flags & IFF_OACTIVE) == IFF_OACTIVE)) 1016 return; 1017 #else 1018 if (scp->sp_txinuse 1019 || ((ifp->if_flags & IFF_OACTIVE) == IFF_OACTIVE)) 1020 return; 1021 #endif 1022 SCA_DPRINTF(SCA_DEBUG_TX, ("TX: txinuse %d\n", scp->sp_txinuse)); 1023 1024 /* 1025 * XXX assume that all tx desc and bufs in same page 1026 */ 1027 if (sc->sc_usedma) 1028 bus_dmamap_sync(sc->scu_dmat, sc->scu_dmam, 1029 0, sc->scu_allocsize, 1030 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1031 else { 1032 sc->scu_page_on(sc); 1033 sc->scu_set_page(sc, scp->sp_txdesc_p); 1034 } 1035 1036 trigger_xmit = 0; 1037 1038 txloop: 1039 IF_DEQUEUE(&scp->linkq, mb_head); 1040 if (mb_head == NULL) 1041 #ifdef SCA_USE_FASTQ 1042 IF_DEQUEUE(&scp->fastq, mb_head); 1043 if (mb_head == NULL) 1044 #endif 1045 IFQ_DEQUEUE(&ifp->if_snd, mb_head); 1046 if (mb_head == NULL) 1047 goto start_xmit; 1048 1049 SCA_DPRINTF(SCA_DEBUG_TX, ("TX: got mbuf\n")); 1050 #ifdef oldcode 1051 if (scp->txinuse != 0) { 1052 /* Kill EOT interrupts on the previous descriptor. */ 1053 desc = &scp->sp_txdesc[scp->txcur]; 1054 stat = sca_desc_read_stat(sc, desc); 1055 sca_desc_write_stat(sc, desc, stat & ~SCA_DESC_EOT); 1056 1057 /* Figure out what the next free descriptor is. */ 1058 nexttx = (scp->sp_txcur + 1) % scp->sp_ntxdesc; 1059 } else 1060 nexttx = 0; 1061 #endif /* oldcode */ 1062 1063 if (scp->sp_txinuse) 1064 nexttx = (scp->sp_txcur + 1) % scp->sp_ntxdesc; 1065 else 1066 nexttx = 0; 1067 1068 SCA_DPRINTF(SCA_DEBUG_TX, ("TX: nexttx %d\n", nexttx)); 1069 1070 buf = scp->sp_txbuf + SCA_BSIZE * nexttx; 1071 buf_p = scp->sp_txbuf_p + SCA_BSIZE * nexttx; 1072 1073 /* XXX hoping we can delay the desc write till after we don't drop. */ 1074 desc = &scp->sp_txdesc[nexttx]; 1075 1076 /* XXX isn't this set already?? */ 1077 sca_desc_write_bufp(sc, desc, buf_p); 1078 len = 0; 1079 1080 SCA_DPRINTF(SCA_DEBUG_TX, ("TX: buf %x buf_p %x\n", (u_int)buf, buf_p)); 1081 1082 #if 0 /* uncomment this for a core in cc1 */ 1083 X 1084 #endif 1085 /* 1086 * Run through the chain, copying data into the descriptor as we 1087 * go. If it won't fit in one transmission block, drop the packet. 1088 * No, this isn't nice, but most of the time it _will_ fit. 1089 */ 1090 for (m = mb_head ; m != NULL ; m = m->m_next) { 1091 if (m->m_len != 0) { 1092 len += m->m_len; 1093 if (len > SCA_BSIZE) { 1094 m_freem(mb_head); 1095 goto txloop; 1096 } 1097 SCA_DPRINTF(SCA_DEBUG_TX, 1098 ("TX: about to mbuf len %d\n", m->m_len)); 1099 1100 if (sc->sc_usedma) 1101 memcpy(buf, mtod(m, u_int8_t *), m->m_len); 1102 else 1103 bus_space_write_region_1(sc->scu_memt, 1104 sc->scu_memh, sca_page_addr(sc, buf_p), 1105 mtod(m, u_int8_t *), m->m_len); 1106 buf += m->m_len; 1107 buf_p += m->m_len; 1108 } 1109 } 1110 1111 /* set the buffer, the length, and mark end of frame and end of xfer */ 1112 sca_desc_write_buflen(sc, desc, len); 1113 sca_desc_write_stat(sc, desc, SCA_DESC_EOM); 1114 1115 ifp->if_opackets++; 1116 1117 /* 1118 * Pass packet to bpf if there is a listener. 1119 */ 1120 bpf_mtap(ifp, mb_head); 1121 1122 m_freem(mb_head); 1123 1124 scp->sp_txcur = nexttx; 1125 scp->sp_txinuse++; 1126 trigger_xmit = 1; 1127 1128 SCA_DPRINTF(SCA_DEBUG_TX, 1129 ("TX: inuse %d index %d\n", scp->sp_txinuse, scp->sp_txcur)); 1130 1131 /* 1132 * XXX so didn't this used to limit us to 1?! - multi may be untested 1133 * sp_ntxdesc used to be hard coded to 2 with claim of a too hard 1134 * to find bug 1135 */ 1136 #ifdef oldcode 1137 if (scp->sp_txinuse < (scp->sp_ntxdesc - 1)) 1138 #endif 1139 if (scp->sp_txinuse < scp->sp_ntxdesc) 1140 goto txloop; 1141 1142 start_xmit: 1143 SCA_DPRINTF(SCA_DEBUG_TX, ("TX: trigger_xmit %d\n", trigger_xmit)); 1144 1145 if (trigger_xmit != 0) { 1146 /* set EOT on final descriptor */ 1147 desc = &scp->sp_txdesc[scp->sp_txcur]; 1148 stat = sca_desc_read_stat(sc, desc); 1149 sca_desc_write_stat(sc, desc, stat | SCA_DESC_EOT); 1150 } 1151 1152 if (sc->sc_usedma) 1153 bus_dmamap_sync(sc->scu_dmat, sc->scu_dmam, 0, 1154 sc->scu_allocsize, 1155 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1156 1157 if (trigger_xmit != 0) 1158 sca_port_starttx(scp); 1159 1160 if (!sc->sc_usedma) 1161 sc->scu_page_off(sc); 1162 } 1163 1164 static void 1165 sca_watchdog(struct ifnet *ifp) 1166 { 1167 } 1168 1169 int 1170 sca_hardintr(struct sca_softc *sc) 1171 { 1172 u_int8_t isr0, isr1, isr2; 1173 int ret; 1174 1175 ret = 0; /* non-zero means we processed at least one interrupt */ 1176 1177 SCA_DPRINTF(SCA_DEBUG_INTR, ("sca_hardintr entered\n")); 1178 1179 while (1) { 1180 /* 1181 * read SCA interrupts 1182 */ 1183 isr0 = sca_read_1(sc, SCA_ISR0); 1184 isr1 = sca_read_1(sc, SCA_ISR1); 1185 isr2 = sca_read_1(sc, SCA_ISR2); 1186 1187 if (isr0 == 0 && isr1 == 0 && isr2 == 0) 1188 break; 1189 1190 SCA_DPRINTF(SCA_DEBUG_INTR, 1191 ("isr0 = %02x, isr1 = %02x, isr2 = %02x\n", 1192 isr0, isr1, isr2)); 1193 1194 /* 1195 * check DMAC interrupt 1196 */ 1197 if (isr1 & 0x0f) 1198 ret += sca_dmac_intr(&sc->sc_ports[0], 1199 isr1 & 0x0f); 1200 1201 if (isr1 & 0xf0) 1202 ret += sca_dmac_intr(&sc->sc_ports[1], 1203 (isr1 & 0xf0) >> 4); 1204 1205 /* 1206 * mcsi intterupts 1207 */ 1208 if (isr0 & 0x0f) 1209 ret += sca_msci_intr(&sc->sc_ports[0], isr0 & 0x0f); 1210 1211 if (isr0 & 0xf0) 1212 ret += sca_msci_intr(&sc->sc_ports[1], 1213 (isr0 & 0xf0) >> 4); 1214 1215 #if 0 /* We don't GET timer interrupts, we have them disabled (msci IE20) */ 1216 if (isr2) 1217 ret += sca_timer_intr(sc, isr2); 1218 #endif 1219 } 1220 1221 return (ret); 1222 } 1223 1224 static int 1225 sca_dmac_intr(sca_port_t *scp, u_int8_t isr) 1226 { 1227 u_int8_t dsr; 1228 int ret; 1229 1230 ret = 0; 1231 1232 /* 1233 * Check transmit channel 1234 */ 1235 if (isr & (SCA_ISR1_DMAC_TX0A | SCA_ISR1_DMAC_TX0B)) { 1236 SCA_DPRINTF(SCA_DEBUG_INTR, 1237 ("TX INTERRUPT port %d\n", scp->sp_port)); 1238 1239 dsr = 1; 1240 while (dsr != 0) { 1241 ret++; 1242 /* 1243 * reset interrupt 1244 */ 1245 dsr = dmac_read_1(scp, SCA_DSR1); 1246 dmac_write_1(scp, SCA_DSR1, 1247 dsr | SCA_DSR_DEWD); 1248 1249 /* 1250 * filter out the bits we don't care about 1251 */ 1252 dsr &= ( SCA_DSR_COF | SCA_DSR_BOF | SCA_DSR_EOT); 1253 if (dsr == 0) 1254 break; 1255 1256 /* 1257 * check for counter overflow 1258 */ 1259 if (dsr & SCA_DSR_COF) { 1260 printf("%s: TXDMA counter overflow\n", 1261 scp->sp_if.if_xname); 1262 1263 scp->sp_if.if_flags &= ~IFF_OACTIVE; 1264 scp->sp_txcur = 0; 1265 scp->sp_txinuse = 0; 1266 } 1267 1268 /* 1269 * check for buffer overflow 1270 */ 1271 if (dsr & SCA_DSR_BOF) { 1272 printf("%s: TXDMA buffer overflow, cda 0x%04x, eda 0x%04x, cpb 0x%02x\n", 1273 scp->sp_if.if_xname, 1274 dmac_read_2(scp, SCA_CDAL1), 1275 dmac_read_2(scp, SCA_EDAL1), 1276 dmac_read_1(scp, SCA_CPB1)); 1277 1278 /* 1279 * Yikes. Arrange for a full 1280 * transmitter restart. 1281 */ 1282 scp->sp_if.if_flags &= ~IFF_OACTIVE; 1283 scp->sp_txcur = 0; 1284 scp->sp_txinuse = 0; 1285 } 1286 1287 /* 1288 * check for end of transfer, which is not 1289 * an error. It means that all data queued 1290 * was transmitted, and we mark ourself as 1291 * not in use and stop the watchdog timer. 1292 */ 1293 if (dsr & SCA_DSR_EOT) { 1294 SCA_DPRINTF(SCA_DEBUG_TX, 1295 ("Transmit completed. cda %x eda %x dsr %x\n", 1296 dmac_read_2(scp, SCA_CDAL1), 1297 dmac_read_2(scp, SCA_EDAL1), 1298 dsr)); 1299 1300 scp->sp_if.if_flags &= ~IFF_OACTIVE; 1301 scp->sp_txcur = 0; 1302 scp->sp_txinuse = 0; 1303 1304 /* 1305 * check for more packets 1306 */ 1307 sca_start(&scp->sp_if); 1308 } 1309 } 1310 } 1311 /* 1312 * receive channel check 1313 */ 1314 if (isr & (SCA_ISR1_DMAC_RX0A | SCA_ISR1_DMAC_RX0B)) { 1315 SCA_DPRINTF(SCA_DEBUG_INTR, ("RX INTERRUPT port %d\n", 1316 (scp == &scp->sca->sc_ports[0] ? 0 : 1))); 1317 1318 dsr = 1; 1319 while (dsr != 0) { 1320 ret++; 1321 1322 dsr = dmac_read_1(scp, SCA_DSR0); 1323 dmac_write_1(scp, SCA_DSR0, dsr | SCA_DSR_DEWD); 1324 1325 /* 1326 * filter out the bits we don't care about 1327 */ 1328 dsr &= (SCA_DSR_EOM | SCA_DSR_COF 1329 | SCA_DSR_BOF | SCA_DSR_EOT); 1330 if (dsr == 0) 1331 break; 1332 1333 /* 1334 * End of frame 1335 */ 1336 if (dsr & SCA_DSR_EOM) { 1337 SCA_DPRINTF(SCA_DEBUG_RX, ("Got a frame!\n")); 1338 1339 sca_get_packets(scp); 1340 } 1341 1342 /* 1343 * check for counter overflow 1344 */ 1345 if (dsr & SCA_DSR_COF) { 1346 printf("%s: RXDMA counter overflow\n", 1347 scp->sp_if.if_xname); 1348 1349 sca_dmac_rxinit(scp); 1350 } 1351 1352 /* 1353 * check for end of transfer, which means we 1354 * ran out of descriptors to receive into. 1355 * This means the line is much faster than 1356 * we can handle. 1357 */ 1358 if (dsr & (SCA_DSR_BOF | SCA_DSR_EOT)) { 1359 printf("%s: RXDMA buffer overflow\n", 1360 scp->sp_if.if_xname); 1361 1362 sca_dmac_rxinit(scp); 1363 } 1364 } 1365 } 1366 1367 return ret; 1368 } 1369 1370 static int 1371 sca_msci_intr(sca_port_t *scp, u_int8_t isr) 1372 { 1373 u_int8_t st1, trc0; 1374 1375 /* get and clear the specific interrupt -- should act on it :)*/ 1376 if ((st1 = msci_read_1(scp, SCA_ST10))) { 1377 /* clear the interrupt */ 1378 msci_write_1(scp, SCA_ST10, st1); 1379 1380 if (st1 & SCA_ST1_UDRN) { 1381 /* underrun -- try to increase ready control */ 1382 trc0 = msci_read_1(scp, SCA_TRC00); 1383 if (trc0 == 0x1f) 1384 printf("TX: underrun - fifo depth maxed\n"); 1385 else { 1386 if ((trc0 += 2) > 0x1f) 1387 trc0 = 0x1f; 1388 SCA_DPRINTF(SCA_DEBUG_TX, 1389 ("TX: udrn - incr fifo to %d\n", trc0)); 1390 msci_write_1(scp, SCA_TRC00, trc0); 1391 } 1392 } 1393 } 1394 return (0); 1395 } 1396 1397 static void 1398 sca_get_packets(sca_port_t *scp) 1399 { 1400 struct sca_softc *sc; 1401 1402 SCA_DPRINTF(SCA_DEBUG_RX, ("RX: sca_get_packets\n")); 1403 1404 sc = scp->sca; 1405 if (sc->sc_usedma) 1406 bus_dmamap_sync(sc->scu_dmat, sc->scu_dmam, 1407 0, sc->scu_allocsize, 1408 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1409 else { 1410 /* 1411 * XXX this code is unable to deal with rx stuff 1412 * in more than 1 page 1413 */ 1414 sc->scu_page_on(sc); 1415 sc->scu_set_page(sc, scp->sp_rxdesc_p); 1416 } 1417 1418 /* process as many frames as are available */ 1419 while (sca_frame_avail(scp)) { 1420 sca_frame_process(scp); 1421 sca_frame_read_done(scp); 1422 } 1423 1424 if (sc->sc_usedma) 1425 bus_dmamap_sync(sc->scu_dmat, sc->scu_dmam, 1426 0, sc->scu_allocsize, 1427 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1428 else 1429 sc->scu_page_off(sc); 1430 } 1431 1432 /* 1433 * Starting with the first descriptor we wanted to read into, up to but 1434 * not including the current SCA read descriptor, look for a packet. 1435 * 1436 * must be called at splnet() 1437 */ 1438 static int 1439 sca_frame_avail(sca_port_t *scp) 1440 { 1441 u_int16_t cda; 1442 u_int32_t desc_p; /* physical address (lower 16 bits) */ 1443 sca_desc_t *desc; 1444 u_int8_t rxstat; 1445 int cdaidx, toolong; 1446 1447 /* 1448 * Read the current descriptor from the SCA. 1449 */ 1450 cda = dmac_read_2(scp, SCA_CDAL0); 1451 1452 /* 1453 * calculate the index of the current descriptor 1454 */ 1455 desc_p = (scp->sp_rxdesc_p & 0xFFFF); 1456 desc_p = cda - desc_p; 1457 cdaidx = desc_p / sizeof(sca_desc_t); 1458 1459 SCA_DPRINTF(SCA_DEBUG_RX, 1460 ("RX: cda %x desc_p %x cdaidx %u, nrxdesc %d rxstart %d\n", 1461 cda, desc_p, cdaidx, scp->sp_nrxdesc, scp->sp_rxstart)); 1462 1463 /* note confusion */ 1464 if (cdaidx >= scp->sp_nrxdesc) 1465 panic("current descriptor index out of range"); 1466 1467 /* see if we have a valid frame available */ 1468 toolong = 0; 1469 for (; scp->sp_rxstart != cdaidx; sca_frame_read_done(scp)) { 1470 /* 1471 * We might have a valid descriptor. Set up a pointer 1472 * to the kva address for it so we can more easily examine 1473 * the contents. 1474 */ 1475 desc = &scp->sp_rxdesc[scp->sp_rxstart]; 1476 rxstat = sca_desc_read_stat(scp->sca, desc); 1477 1478 SCA_DPRINTF(SCA_DEBUG_RX, ("port %d RX: idx %d rxstat %x\n", 1479 scp->sp_port, scp->sp_rxstart, rxstat)); 1480 1481 SCA_DPRINTF(SCA_DEBUG_RX, ("port %d RX: buflen %d\n", 1482 scp->sp_port, sca_desc_read_buflen(scp->sca, desc))); 1483 1484 /* 1485 * check for errors 1486 */ 1487 if (rxstat & SCA_DESC_ERRORS) { 1488 /* 1489 * consider an error condition the end 1490 * of a frame 1491 */ 1492 scp->sp_if.if_ierrors++; 1493 toolong = 0; 1494 continue; 1495 } 1496 1497 /* 1498 * if we aren't skipping overlong frames 1499 * we are done, otherwise reset and look for 1500 * another good frame 1501 */ 1502 if (rxstat & SCA_DESC_EOM) { 1503 if (!toolong) 1504 return (1); 1505 toolong = 0; 1506 } else if (!toolong) { 1507 /* 1508 * we currently don't deal with frames 1509 * larger than a single buffer (fixed MTU) 1510 */ 1511 scp->sp_if.if_ierrors++; 1512 toolong = 1; 1513 } 1514 SCA_DPRINTF(SCA_DEBUG_RX, ("RX: idx %d no EOM\n", 1515 scp->sp_rxstart)); 1516 } 1517 1518 SCA_DPRINTF(SCA_DEBUG_RX, ("RX: returning none\n")); 1519 return 0; 1520 } 1521 1522 /* 1523 * Pass the packet up to the kernel if it is a packet we want to pay 1524 * attention to. 1525 * 1526 * MUST BE CALLED AT splnet() 1527 */ 1528 static void 1529 sca_frame_process(sca_port_t *scp) 1530 { 1531 pktqueue_t *pktq = NULL; 1532 struct ifqueue *ifq = NULL; 1533 struct hdlc_header *hdlc; 1534 struct cisco_pkt *cisco; 1535 sca_desc_t *desc; 1536 struct mbuf *m; 1537 u_int8_t *bufp; 1538 u_int16_t len; 1539 u_int32_t t; 1540 int isr = 0; 1541 1542 t = time_uptime * 1000; 1543 desc = &scp->sp_rxdesc[scp->sp_rxstart]; 1544 bufp = scp->sp_rxbuf + SCA_BSIZE * scp->sp_rxstart; 1545 len = sca_desc_read_buflen(scp->sca, desc); 1546 1547 SCA_DPRINTF(SCA_DEBUG_RX, 1548 ("RX: desc %lx bufp %lx len %d\n", (bus_addr_t)desc, 1549 (bus_addr_t)bufp, len)); 1550 1551 #if SCA_DEBUG_LEVEL > 0 1552 if (sca_debug & SCA_DEBUG_RXPKT) 1553 sca_frame_print(scp, desc, bufp); 1554 #endif 1555 /* 1556 * skip packets that are too short 1557 */ 1558 if (len < sizeof(struct hdlc_header)) { 1559 scp->sp_if.if_ierrors++; 1560 return; 1561 } 1562 1563 m = sca_mbuf_alloc(scp->sca, bufp, len); 1564 if (m == NULL) { 1565 SCA_DPRINTF(SCA_DEBUG_RX, ("RX: no mbuf!\n")); 1566 return; 1567 } 1568 1569 /* 1570 * read and then strip off the HDLC information 1571 */ 1572 m = m_pullup(m, sizeof(struct hdlc_header)); 1573 if (m == NULL) { 1574 SCA_DPRINTF(SCA_DEBUG_RX, ("RX: no m_pullup!\n")); 1575 return; 1576 } 1577 1578 bpf_mtap(&scp->sp_if, m); 1579 1580 scp->sp_if.if_ipackets++; 1581 1582 hdlc = mtod(m, struct hdlc_header *); 1583 switch (ntohs(hdlc->h_proto)) { 1584 #ifdef INET 1585 case HDLC_PROTOCOL_IP: 1586 SCA_DPRINTF(SCA_DEBUG_RX, ("Received IP packet\n")); 1587 m->m_pkthdr.rcvif = &scp->sp_if; 1588 m->m_pkthdr.len -= sizeof(struct hdlc_header); 1589 m->m_data += sizeof(struct hdlc_header); 1590 m->m_len -= sizeof(struct hdlc_header); 1591 pktq = ip_pktq; 1592 break; 1593 #endif /* INET */ 1594 #ifdef INET6 1595 case HDLC_PROTOCOL_IPV6: 1596 SCA_DPRINTF(SCA_DEBUG_RX, ("Received IP packet\n")); 1597 m->m_pkthdr.rcvif = &scp->sp_if; 1598 m->m_pkthdr.len -= sizeof(struct hdlc_header); 1599 m->m_data += sizeof(struct hdlc_header); 1600 m->m_len -= sizeof(struct hdlc_header); 1601 pktq = ip6_pktq; 1602 break; 1603 #endif /* INET6 */ 1604 case CISCO_KEEPALIVE: 1605 SCA_DPRINTF(SCA_DEBUG_CISCO, 1606 ("Received CISCO keepalive packet\n")); 1607 1608 if (len < CISCO_PKT_LEN) { 1609 SCA_DPRINTF(SCA_DEBUG_CISCO, 1610 ("short CISCO packet %d, wanted %d\n", 1611 len, CISCO_PKT_LEN)); 1612 scp->sp_if.if_ierrors++; 1613 goto dropit; 1614 } 1615 1616 m = m_pullup(m, sizeof(struct cisco_pkt)); 1617 if (m == NULL) { 1618 SCA_DPRINTF(SCA_DEBUG_RX, ("RX: no m_pullup!\n")); 1619 return; 1620 } 1621 1622 cisco = (struct cisco_pkt *) 1623 (mtod(m, u_int8_t *) + HDLC_HDRLEN); 1624 m->m_pkthdr.rcvif = &scp->sp_if; 1625 1626 switch (ntohl(cisco->type)) { 1627 case CISCO_ADDR_REQ: 1628 printf("Got CISCO addr_req, ignoring\n"); 1629 scp->sp_if.if_ierrors++; 1630 goto dropit; 1631 1632 case CISCO_ADDR_REPLY: 1633 printf("Got CISCO addr_reply, ignoring\n"); 1634 scp->sp_if.if_ierrors++; 1635 goto dropit; 1636 1637 case CISCO_KEEPALIVE_REQ: 1638 1639 SCA_DPRINTF(SCA_DEBUG_CISCO, 1640 ("Received KA, mseq %d," 1641 " yseq %d, rel 0x%04x, t0" 1642 " %04x, t1 %04x\n", 1643 ntohl(cisco->par1), ntohl(cisco->par2), 1644 ntohs(cisco->rel), ntohs(cisco->time0), 1645 ntohs(cisco->time1))); 1646 1647 scp->cka_lastrx = ntohl(cisco->par1); 1648 scp->cka_lasttx++; 1649 1650 /* 1651 * schedule the transmit right here. 1652 */ 1653 cisco->par2 = cisco->par1; 1654 cisco->par1 = htonl(scp->cka_lasttx); 1655 cisco->time0 = htons((u_int16_t)(t >> 16)); 1656 cisco->time1 = htons((u_int16_t)(t & 0x0000ffff)); 1657 1658 ifq = &scp->linkq; 1659 if (IF_QFULL(ifq)) { 1660 IF_DROP(ifq); 1661 goto dropit; 1662 } 1663 IF_ENQUEUE(ifq, m); 1664 1665 sca_start(&scp->sp_if); 1666 1667 /* since start may have reset this fix */ 1668 if (!scp->sca->sc_usedma) { 1669 scp->sca->scu_set_page(scp->sca, 1670 scp->sp_rxdesc_p); 1671 scp->sca->scu_page_on(scp->sca); 1672 } 1673 return; 1674 default: 1675 SCA_DPRINTF(SCA_DEBUG_CISCO, 1676 ("Unknown CISCO keepalive protocol 0x%04x\n", 1677 ntohl(cisco->type))); 1678 1679 scp->sp_if.if_noproto++; 1680 goto dropit; 1681 } 1682 return; 1683 default: 1684 SCA_DPRINTF(SCA_DEBUG_RX, 1685 ("Unknown/unexpected ethertype 0x%04x\n", 1686 ntohs(hdlc->h_proto))); 1687 scp->sp_if.if_noproto++; 1688 goto dropit; 1689 } 1690 1691 /* Queue the packet */ 1692 if (__predict_true(pktq)) { 1693 if (__predict_false(!pktq_enqueue(pktq, m, 0))) { 1694 scp->sp_if.if_iqdrops++; 1695 goto dropit; 1696 } 1697 return; 1698 } 1699 if (!IF_QFULL(ifq)) { 1700 IF_ENQUEUE(ifq, m); 1701 schednetisr(isr); 1702 } else { 1703 IF_DROP(ifq); 1704 scp->sp_if.if_iqdrops++; 1705 goto dropit; 1706 } 1707 return; 1708 dropit: 1709 if (m) 1710 m_freem(m); 1711 return; 1712 } 1713 1714 #if SCA_DEBUG_LEVEL > 0 1715 /* 1716 * do a hex dump of the packet received into descriptor "desc" with 1717 * data buffer "p" 1718 */ 1719 static void 1720 sca_frame_print(sca_port_t *scp, sca_desc_t *desc, u_int8_t *p) 1721 { 1722 int i; 1723 int nothing_yet = 1; 1724 struct sca_softc *sc; 1725 u_int len; 1726 1727 sc = scp->sca; 1728 printf("desc va %p: chainp 0x%x bufp 0x%0x stat 0x%0x len %d\n", 1729 desc, 1730 sca_desc_read_chainp(sc, desc), 1731 sca_desc_read_bufp(sc, desc), 1732 sca_desc_read_stat(sc, desc), 1733 (len = sca_desc_read_buflen(sc, desc))); 1734 1735 for (i = 0 ; i < len && i < 256; i++) { 1736 if (nothing_yet == 1 && 1737 (sc->sc_usedma ? *p 1738 : bus_space_read_1(sc->scu_memt, sc->scu_memh, 1739 sca_page_addr(sc, p))) == 0) { 1740 p++; 1741 continue; 1742 } 1743 nothing_yet = 0; 1744 if (i % 16 == 0) 1745 printf("\n"); 1746 printf("%02x ", 1747 (sc->sc_usedma ? *p 1748 : bus_space_read_1(sc->scu_memt, sc->scu_memh, 1749 sca_page_addr(sc, p)))); 1750 p++; 1751 } 1752 1753 if (i % 16 != 1) 1754 printf("\n"); 1755 } 1756 #endif 1757 1758 /* 1759 * adjust things because we have just read the current starting 1760 * frame 1761 * 1762 * must be called at splnet() 1763 */ 1764 static void 1765 sca_frame_read_done(sca_port_t *scp) 1766 { 1767 u_int16_t edesc_p; 1768 1769 /* update where our indicies are */ 1770 scp->sp_rxend = scp->sp_rxstart; 1771 scp->sp_rxstart = (scp->sp_rxstart + 1) % scp->sp_nrxdesc; 1772 1773 /* update the error [end] descriptor */ 1774 edesc_p = (u_int16_t)scp->sp_rxdesc_p + 1775 (sizeof(sca_desc_t) * scp->sp_rxend); 1776 dmac_write_2(scp, SCA_EDAL0, edesc_p); 1777 } 1778 1779 /* 1780 * set a port to the "up" state 1781 */ 1782 static void 1783 sca_port_up(sca_port_t *scp) 1784 { 1785 struct sca_softc *sc = scp->sca; 1786 struct timeval now; 1787 #if 0 1788 u_int8_t ier0, ier1; 1789 #endif 1790 1791 /* 1792 * reset things 1793 */ 1794 #if 0 1795 msci_write_1(scp, SCA_CMD0, SCA_CMD_TXRESET); 1796 msci_write_1(scp, SCA_CMD0, SCA_CMD_RXRESET); 1797 #endif 1798 /* 1799 * clear in-use flag 1800 */ 1801 scp->sp_if.if_flags &= ~IFF_OACTIVE; 1802 scp->sp_if.if_flags |= IFF_RUNNING; 1803 1804 /* 1805 * raise DTR 1806 */ 1807 sc->sc_dtr_callback(sc->sc_aux, scp->sp_port, 1); 1808 1809 /* 1810 * raise RTS 1811 */ 1812 msci_write_1(scp, SCA_CTL0, 1813 (msci_read_1(scp, SCA_CTL0) & ~SCA_CTL_RTS_MASK) 1814 | SCA_CTL_RTS_HIGH); 1815 1816 #if 0 1817 /* 1818 * enable interrupts (no timer IER2) 1819 */ 1820 ier0 = SCA_IER0_MSCI_RXRDY0 | SCA_IER0_MSCI_TXRDY0 1821 | SCA_IER0_MSCI_RXINT0 | SCA_IER0_MSCI_TXINT0; 1822 ier1 = SCA_IER1_DMAC_RX0A | SCA_IER1_DMAC_RX0B 1823 | SCA_IER1_DMAC_TX0A | SCA_IER1_DMAC_TX0B; 1824 if (scp->sp_port == 1) { 1825 ier0 <<= 4; 1826 ier1 <<= 4; 1827 } 1828 sca_write_1(sc, SCA_IER0, sca_read_1(sc, SCA_IER0) | ier0); 1829 sca_write_1(sc, SCA_IER1, sca_read_1(sc, SCA_IER1) | ier1); 1830 #else 1831 if (scp->sp_port == 0) { 1832 sca_write_1(sc, SCA_IER0, sca_read_1(sc, SCA_IER0) | 0x0f); 1833 sca_write_1(sc, SCA_IER1, sca_read_1(sc, SCA_IER1) | 0x0f); 1834 } else { 1835 sca_write_1(sc, SCA_IER0, sca_read_1(sc, SCA_IER0) | 0xf0); 1836 sca_write_1(sc, SCA_IER1, sca_read_1(sc, SCA_IER1) | 0xf0); 1837 } 1838 #endif 1839 1840 /* 1841 * enable transmit and receive 1842 */ 1843 msci_write_1(scp, SCA_CMD0, SCA_CMD_TXENABLE); 1844 msci_write_1(scp, SCA_CMD0, SCA_CMD_RXENABLE); 1845 1846 /* 1847 * reset internal state 1848 */ 1849 scp->sp_txinuse = 0; 1850 scp->sp_txcur = 0; 1851 getmicrotime(&now); 1852 scp->cka_lasttx = now.tv_usec; 1853 scp->cka_lastrx = 0; 1854 } 1855 1856 /* 1857 * set a port to the "down" state 1858 */ 1859 static void 1860 sca_port_down(sca_port_t *scp) 1861 { 1862 struct sca_softc *sc = scp->sca; 1863 #if 0 1864 u_int8_t ier0, ier1; 1865 #endif 1866 1867 /* 1868 * lower DTR 1869 */ 1870 sc->sc_dtr_callback(sc->sc_aux, scp->sp_port, 0); 1871 1872 /* 1873 * lower RTS 1874 */ 1875 msci_write_1(scp, SCA_CTL0, 1876 (msci_read_1(scp, SCA_CTL0) & ~SCA_CTL_RTS_MASK) 1877 | SCA_CTL_RTS_LOW); 1878 1879 /* 1880 * disable interrupts 1881 */ 1882 #if 0 1883 ier0 = SCA_IER0_MSCI_RXRDY0 | SCA_IER0_MSCI_TXRDY0 1884 | SCA_IER0_MSCI_RXINT0 | SCA_IER0_MSCI_TXINT0; 1885 ier1 = SCA_IER1_DMAC_RX0A | SCA_IER1_DMAC_RX0B 1886 | SCA_IER1_DMAC_TX0A | SCA_IER1_DMAC_TX0B; 1887 if (scp->sp_port == 1) { 1888 ier0 <<= 4; 1889 ier1 <<= 4; 1890 } 1891 sca_write_1(sc, SCA_IER0, sca_read_1(sc, SCA_IER0) & ~ier0); 1892 sca_write_1(sc, SCA_IER1, sca_read_1(sc, SCA_IER1) & ~ier1); 1893 #else 1894 if (scp->sp_port == 0) { 1895 sca_write_1(sc, SCA_IER0, sca_read_1(sc, SCA_IER0) & 0xf0); 1896 sca_write_1(sc, SCA_IER1, sca_read_1(sc, SCA_IER1) & 0xf0); 1897 } else { 1898 sca_write_1(sc, SCA_IER0, sca_read_1(sc, SCA_IER0) & 0x0f); 1899 sca_write_1(sc, SCA_IER1, sca_read_1(sc, SCA_IER1) & 0x0f); 1900 } 1901 #endif 1902 1903 /* 1904 * disable transmit and receive 1905 */ 1906 msci_write_1(scp, SCA_CMD0, SCA_CMD_RXDISABLE); 1907 msci_write_1(scp, SCA_CMD0, SCA_CMD_TXDISABLE); 1908 1909 /* 1910 * no, we're not in use anymore 1911 */ 1912 scp->sp_if.if_flags &= ~(IFF_OACTIVE|IFF_RUNNING); 1913 } 1914 1915 /* 1916 * disable all DMA and interrupts for all ports at once. 1917 */ 1918 void 1919 sca_shutdown(struct sca_softc *sca) 1920 { 1921 /* 1922 * disable DMA and interrupts 1923 */ 1924 sca_write_1(sca, SCA_DMER, 0); 1925 sca_write_1(sca, SCA_IER0, 0); 1926 sca_write_1(sca, SCA_IER1, 0); 1927 } 1928 1929 /* 1930 * If there are packets to transmit, start the transmit DMA logic. 1931 */ 1932 static void 1933 sca_port_starttx(sca_port_t *scp) 1934 { 1935 u_int32_t startdesc_p, enddesc_p; 1936 int enddesc; 1937 1938 SCA_DPRINTF(SCA_DEBUG_TX, ("TX: starttx\n")); 1939 1940 if (((scp->sp_if.if_flags & IFF_OACTIVE) == IFF_OACTIVE) 1941 || scp->sp_txinuse == 0) 1942 return; 1943 1944 SCA_DPRINTF(SCA_DEBUG_TX, ("TX: setting oactive\n")); 1945 1946 scp->sp_if.if_flags |= IFF_OACTIVE; 1947 1948 /* 1949 * We have something to do, since we have at least one packet 1950 * waiting, and we are not already marked as active. 1951 */ 1952 enddesc = (scp->sp_txcur + 1) % scp->sp_ntxdesc; 1953 startdesc_p = scp->sp_txdesc_p; 1954 enddesc_p = scp->sp_txdesc_p + sizeof(sca_desc_t) * enddesc; 1955 1956 SCA_DPRINTF(SCA_DEBUG_TX, ("TX: start %x end %x\n", 1957 startdesc_p, enddesc_p)); 1958 1959 dmac_write_2(scp, SCA_EDAL1, (u_int16_t)(enddesc_p & 0x0000ffff)); 1960 dmac_write_2(scp, SCA_CDAL1, 1961 (u_int16_t)(startdesc_p & 0x0000ffff)); 1962 1963 /* 1964 * enable the DMA 1965 */ 1966 dmac_write_1(scp, SCA_DSR1, SCA_DSR_DE); 1967 } 1968 1969 /* 1970 * allocate an mbuf at least long enough to hold "len" bytes. 1971 * If "p" is non-NULL, copy "len" bytes from it into the new mbuf, 1972 * otherwise let the caller handle copying the data in. 1973 */ 1974 static struct mbuf * 1975 sca_mbuf_alloc(struct sca_softc *sc, void *p, u_int len) 1976 { 1977 struct mbuf *m; 1978 1979 /* 1980 * allocate an mbuf and copy the important bits of data 1981 * into it. If the packet won't fit in the header, 1982 * allocate a cluster for it and store it there. 1983 */ 1984 MGETHDR(m, M_DONTWAIT, MT_DATA); 1985 if (m == NULL) 1986 return NULL; 1987 if (len > MHLEN) { 1988 if (len > MCLBYTES) { 1989 m_freem(m); 1990 return NULL; 1991 } 1992 MCLGET(m, M_DONTWAIT); 1993 if ((m->m_flags & M_EXT) == 0) { 1994 m_freem(m); 1995 return NULL; 1996 } 1997 } 1998 if (p != NULL) { 1999 /* XXX do we need to sync here? */ 2000 if (sc->sc_usedma) 2001 memcpy(mtod(m, void *), p, len); 2002 else 2003 bus_space_read_region_1(sc->scu_memt, sc->scu_memh, 2004 sca_page_addr(sc, p), mtod(m, u_int8_t *), len); 2005 } 2006 m->m_len = len; 2007 m->m_pkthdr.len = len; 2008 2009 return (m); 2010 } 2011 2012 /* 2013 * get the base clock 2014 */ 2015 void 2016 sca_get_base_clock(struct sca_softc *sc) 2017 { 2018 struct timeval btv, ctv, dtv; 2019 u_int64_t bcnt; 2020 u_int32_t cnt; 2021 u_int16_t subcnt; 2022 2023 /* disable the timer, set prescale to 0 */ 2024 sca_write_1(sc, SCA_TCSR0, 0); 2025 sca_write_1(sc, SCA_TEPR0, 0); 2026 2027 /* reset the counter */ 2028 (void)sca_read_1(sc, SCA_TCSR0); 2029 subcnt = sca_read_2(sc, SCA_TCNTL0); 2030 2031 /* count to max */ 2032 sca_write_2(sc, SCA_TCONRL0, 0xffff); 2033 2034 cnt = 0; 2035 microtime(&btv); 2036 /* start the timer -- no interrupt enable */ 2037 sca_write_1(sc, SCA_TCSR0, SCA_TCSR_TME); 2038 for (;;) { 2039 microtime(&ctv); 2040 2041 /* end around 3/4 of a second */ 2042 timersub(&ctv, &btv, &dtv); 2043 if (dtv.tv_usec >= 750000) 2044 break; 2045 2046 /* spin */ 2047 while (!(sca_read_1(sc, SCA_TCSR0) & SCA_TCSR_CMF)) 2048 ; 2049 /* reset the timer */ 2050 (void)sca_read_2(sc, SCA_TCNTL0); 2051 cnt++; 2052 } 2053 2054 /* stop the timer */ 2055 sca_write_1(sc, SCA_TCSR0, 0); 2056 2057 subcnt = sca_read_2(sc, SCA_TCNTL0); 2058 /* add the slop in and get the total timer ticks */ 2059 cnt = (cnt << 16) | subcnt; 2060 2061 /* cnt is 1/8 the actual time */ 2062 bcnt = cnt * 8; 2063 /* make it proportional to 3/4 of a second */ 2064 bcnt *= (u_int64_t)750000; 2065 bcnt /= (u_int64_t)dtv.tv_usec; 2066 cnt = bcnt; 2067 2068 /* make it Hz */ 2069 cnt *= 4; 2070 cnt /= 3; 2071 2072 SCA_DPRINTF(SCA_DEBUG_CLOCK, 2073 ("sca: unadjusted base %lu Hz\n", (u_long)cnt)); 2074 2075 /* 2076 * round to the nearest 200 -- this allows for +-3 ticks error 2077 */ 2078 sc->sc_baseclock = ((cnt + 100) / 200) * 200; 2079 } 2080 2081 /* 2082 * print the information about the clock on the ports 2083 */ 2084 void 2085 sca_print_clock_info(struct sca_softc *sc) 2086 { 2087 struct sca_port *scp; 2088 u_int32_t mhz, div; 2089 int i; 2090 2091 printf("%s: base clock %d Hz\n", device_xname(sc->sc_parent), 2092 sc->sc_baseclock); 2093 2094 /* print the information about the port clock selection */ 2095 for (i = 0; i < sc->sc_numports; i++) { 2096 scp = &sc->sc_ports[i]; 2097 mhz = sc->sc_baseclock / (scp->sp_tmc ? scp->sp_tmc : 256); 2098 div = scp->sp_rxs & SCA_RXS_DIV_MASK; 2099 2100 printf("%s: rx clock: ", scp->sp_if.if_xname); 2101 switch (scp->sp_rxs & SCA_RXS_CLK_MASK) { 2102 case SCA_RXS_CLK_LINE: 2103 printf("line"); 2104 break; 2105 case SCA_RXS_CLK_LINE_SN: 2106 printf("line with noise suppression"); 2107 break; 2108 case SCA_RXS_CLK_INTERNAL: 2109 printf("internal %d Hz", (mhz >> div)); 2110 break; 2111 case SCA_RXS_CLK_ADPLL_OUT: 2112 printf("adpll using internal %d Hz", (mhz >> div)); 2113 break; 2114 case SCA_RXS_CLK_ADPLL_IN: 2115 printf("adpll using line clock"); 2116 break; 2117 } 2118 printf(" tx clock: "); 2119 div = scp->sp_txs & SCA_TXS_DIV_MASK; 2120 switch (scp->sp_txs & SCA_TXS_CLK_MASK) { 2121 case SCA_TXS_CLK_LINE: 2122 printf("line\n"); 2123 break; 2124 case SCA_TXS_CLK_INTERNAL: 2125 printf("internal %d Hz\n", (mhz >> div)); 2126 break; 2127 case SCA_TXS_CLK_RXCLK: 2128 printf("rxclock\n"); 2129 break; 2130 } 2131 if (scp->sp_eclock) 2132 printf("%s: outputting line clock\n", 2133 scp->sp_if.if_xname); 2134 } 2135 } 2136 2137