1 /* $NetBSD: hd64570.c,v 1.44 2013/03/01 18:25:55 joerg Exp $ */ 2 3 /* 4 * Copyright (c) 1999 Christian E. Hopps 5 * Copyright (c) 1998 Vixie Enterprises 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. Neither the name of Vixie Enterprises nor the names 18 * of its contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY VIXIE ENTERPRISES AND 22 * CONTRIBUTORS ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, 23 * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 24 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 25 * DISCLAIMED. IN NO EVENT SHALL VIXIE ENTERPRISES OR 26 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 27 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 28 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF 29 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 30 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 31 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 32 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 * 35 * This software has been written for Vixie Enterprises by Michael Graff 36 * <explorer@flame.org>. To learn more about Vixie Enterprises, see 37 * ``http://www.vix.com''. 38 */ 39 40 /* 41 * TODO: 42 * 43 * o teach the receive logic about errors, and about long frames that 44 * span more than one input buffer. (Right now, receive/transmit is 45 * limited to one descriptor's buffer space, which is MTU + 4 bytes. 46 * This is currently 1504, which is large enough to hold the HDLC 47 * header and the packet itself. Packets which are too long are 48 * silently dropped on transmit and silently dropped on receive. 49 * o write code to handle the msci interrupts, needed only for CD 50 * and CTS changes. 51 * o consider switching back to a "queue tx with DMA active" model which 52 * should help sustain outgoing traffic 53 * o through clever use of bus_dma*() functions, it should be possible 54 * to map the mbuf's data area directly into a descriptor transmit 55 * buffer, removing the need to allocate extra memory. If, however, 56 * we run out of descriptors for this, we will need to then allocate 57 * one large mbuf, copy the fragmented chain into it, and put it onto 58 * a single descriptor. 59 * o use bus_dmamap_sync() with the right offset and lengths, rather 60 * than cheating and always sync'ing the whole region. 61 * 62 * o perhaps allow rx and tx to be in more than one page 63 * if not using DMA. currently the assumption is that 64 * rx uses a page and tx uses a page. 65 */ 66 67 #include <sys/cdefs.h> 68 __KERNEL_RCSID(0, "$NetBSD: hd64570.c,v 1.44 2013/03/01 18:25:55 joerg Exp $"); 69 70 #include "opt_inet.h" 71 72 #include <sys/param.h> 73 #include <sys/systm.h> 74 #include <sys/device.h> 75 #include <sys/mbuf.h> 76 #include <sys/socket.h> 77 #include <sys/sockio.h> 78 #include <sys/kernel.h> 79 80 #include <net/if.h> 81 #include <net/if_types.h> 82 #include <net/netisr.h> 83 84 #if defined(INET) || defined(INET6) 85 #include <netinet/in.h> 86 #include <netinet/in_systm.h> 87 #include <netinet/in_var.h> 88 #include <netinet/ip.h> 89 #ifdef INET6 90 #include <netinet6/in6_var.h> 91 #endif 92 #endif 93 94 #include <net/bpf.h> 95 96 #include <sys/cpu.h> 97 #include <sys/bus.h> 98 #include <sys/intr.h> 99 100 #include <dev/pci/pcivar.h> 101 #include <dev/pci/pcireg.h> 102 #include <dev/pci/pcidevs.h> 103 104 #include <dev/ic/hd64570reg.h> 105 #include <dev/ic/hd64570var.h> 106 107 #define SCA_DEBUG_RX 0x0001 108 #define SCA_DEBUG_TX 0x0002 109 #define SCA_DEBUG_CISCO 0x0004 110 #define SCA_DEBUG_DMA 0x0008 111 #define SCA_DEBUG_RXPKT 0x0010 112 #define SCA_DEBUG_TXPKT 0x0020 113 #define SCA_DEBUG_INTR 0x0040 114 #define SCA_DEBUG_CLOCK 0x0080 115 116 #if 0 117 #define SCA_DEBUG_LEVEL ( 0xFFFF ) 118 #else 119 #define SCA_DEBUG_LEVEL 0 120 #endif 121 122 u_int32_t sca_debug = SCA_DEBUG_LEVEL; 123 124 #if SCA_DEBUG_LEVEL > 0 125 #define SCA_DPRINTF(l, x) do { \ 126 if ((l) & sca_debug) \ 127 printf x;\ 128 } while (0) 129 #else 130 #define SCA_DPRINTF(l, x) 131 #endif 132 133 #if 0 134 #define SCA_USE_FASTQ /* use a split queue, one for fast traffic */ 135 #endif 136 137 static inline void msci_write_1(sca_port_t *, u_int, u_int8_t); 138 static inline u_int8_t msci_read_1(sca_port_t *, u_int); 139 140 static inline void dmac_write_1(sca_port_t *, u_int, u_int8_t); 141 static inline void dmac_write_2(sca_port_t *, u_int, u_int16_t); 142 static inline u_int8_t dmac_read_1(sca_port_t *, u_int); 143 static inline u_int16_t dmac_read_2(sca_port_t *, u_int); 144 145 static void sca_msci_init(struct sca_softc *, sca_port_t *); 146 static void sca_dmac_init(struct sca_softc *, sca_port_t *); 147 static void sca_dmac_rxinit(sca_port_t *); 148 149 static int sca_dmac_intr(sca_port_t *, u_int8_t); 150 static int sca_msci_intr(sca_port_t *, u_int8_t); 151 152 static void sca_get_packets(sca_port_t *); 153 static int sca_frame_avail(sca_port_t *); 154 static void sca_frame_process(sca_port_t *); 155 static void sca_frame_read_done(sca_port_t *); 156 157 static void sca_port_starttx(sca_port_t *); 158 159 static void sca_port_up(sca_port_t *); 160 static void sca_port_down(sca_port_t *); 161 162 static int sca_output(struct ifnet *, struct mbuf *, const struct sockaddr *, 163 struct rtentry *); 164 static int sca_ioctl(struct ifnet *, u_long, void *); 165 static void sca_start(struct ifnet *); 166 static void sca_watchdog(struct ifnet *); 167 168 static struct mbuf *sca_mbuf_alloc(struct sca_softc *, void *, u_int); 169 170 #if SCA_DEBUG_LEVEL > 0 171 static void sca_frame_print(sca_port_t *, sca_desc_t *, u_int8_t *); 172 #endif 173 174 175 #define sca_read_1(sc, reg) (sc)->sc_read_1(sc, reg) 176 #define sca_read_2(sc, reg) (sc)->sc_read_2(sc, reg) 177 #define sca_write_1(sc, reg, val) (sc)->sc_write_1(sc, reg, val) 178 #define sca_write_2(sc, reg, val) (sc)->sc_write_2(sc, reg, val) 179 180 #define sca_page_addr(sc, addr) ((bus_addr_t)(u_long)(addr) & (sc)->scu_pagemask) 181 182 static inline void 183 msci_write_1(sca_port_t *scp, u_int reg, u_int8_t val) 184 { 185 sca_write_1(scp->sca, scp->msci_off + reg, val); 186 } 187 188 static inline u_int8_t 189 msci_read_1(sca_port_t *scp, u_int reg) 190 { 191 return sca_read_1(scp->sca, scp->msci_off + reg); 192 } 193 194 static inline void 195 dmac_write_1(sca_port_t *scp, u_int reg, u_int8_t val) 196 { 197 sca_write_1(scp->sca, scp->dmac_off + reg, val); 198 } 199 200 static inline void 201 dmac_write_2(sca_port_t *scp, u_int reg, u_int16_t val) 202 { 203 sca_write_2(scp->sca, scp->dmac_off + reg, val); 204 } 205 206 static inline u_int8_t 207 dmac_read_1(sca_port_t *scp, u_int reg) 208 { 209 return sca_read_1(scp->sca, scp->dmac_off + reg); 210 } 211 212 static inline u_int16_t 213 dmac_read_2(sca_port_t *scp, u_int reg) 214 { 215 return sca_read_2(scp->sca, scp->dmac_off + reg); 216 } 217 218 /* 219 * read the chain pointer 220 */ 221 static inline u_int16_t 222 sca_desc_read_chainp(struct sca_softc *sc, struct sca_desc *dp) 223 { 224 if (sc->sc_usedma) 225 return ((dp)->sd_chainp); 226 return (bus_space_read_2(sc->scu_memt, sc->scu_memh, 227 sca_page_addr(sc, dp) + offsetof(struct sca_desc, sd_chainp))); 228 } 229 230 /* 231 * write the chain pointer 232 */ 233 static inline void 234 sca_desc_write_chainp(struct sca_softc *sc, struct sca_desc *dp, u_int16_t cp) 235 { 236 if (sc->sc_usedma) 237 (dp)->sd_chainp = cp; 238 else 239 bus_space_write_2(sc->scu_memt, sc->scu_memh, 240 sca_page_addr(sc, dp) 241 + offsetof(struct sca_desc, sd_chainp), cp); 242 } 243 244 /* 245 * read the buffer pointer 246 */ 247 static inline u_int32_t 248 sca_desc_read_bufp(struct sca_softc *sc, struct sca_desc *dp) 249 { 250 u_int32_t address; 251 252 if (sc->sc_usedma) 253 address = dp->sd_bufp | dp->sd_hbufp << 16; 254 else { 255 address = bus_space_read_2(sc->scu_memt, sc->scu_memh, 256 sca_page_addr(sc, dp) + offsetof(struct sca_desc, sd_bufp)); 257 address |= bus_space_read_1(sc->scu_memt, sc->scu_memh, 258 sca_page_addr(sc, dp) 259 + offsetof(struct sca_desc, sd_hbufp)) << 16; 260 } 261 return (address); 262 } 263 264 /* 265 * write the buffer pointer 266 */ 267 static inline void 268 sca_desc_write_bufp(struct sca_softc *sc, struct sca_desc *dp, u_int32_t bufp) 269 { 270 if (sc->sc_usedma) { 271 dp->sd_bufp = bufp & 0xFFFF; 272 dp->sd_hbufp = (bufp & 0x00FF0000) >> 16; 273 } else { 274 bus_space_write_2(sc->scu_memt, sc->scu_memh, 275 sca_page_addr(sc, dp) + offsetof(struct sca_desc, sd_bufp), 276 bufp & 0xFFFF); 277 bus_space_write_1(sc->scu_memt, sc->scu_memh, 278 sca_page_addr(sc, dp) + offsetof(struct sca_desc, sd_hbufp), 279 (bufp & 0x00FF0000) >> 16); 280 } 281 } 282 283 /* 284 * read the buffer length 285 */ 286 static inline u_int16_t 287 sca_desc_read_buflen(struct sca_softc *sc, struct sca_desc *dp) 288 { 289 if (sc->sc_usedma) 290 return ((dp)->sd_buflen); 291 return (bus_space_read_2(sc->scu_memt, sc->scu_memh, 292 sca_page_addr(sc, dp) + offsetof(struct sca_desc, sd_buflen))); 293 } 294 295 /* 296 * write the buffer length 297 */ 298 static inline void 299 sca_desc_write_buflen(struct sca_softc *sc, struct sca_desc *dp, u_int16_t len) 300 { 301 if (sc->sc_usedma) 302 (dp)->sd_buflen = len; 303 else 304 bus_space_write_2(sc->scu_memt, sc->scu_memh, 305 sca_page_addr(sc, dp) 306 + offsetof(struct sca_desc, sd_buflen), len); 307 } 308 309 /* 310 * read the descriptor status 311 */ 312 static inline u_int8_t 313 sca_desc_read_stat(struct sca_softc *sc, struct sca_desc *dp) 314 { 315 if (sc->sc_usedma) 316 return ((dp)->sd_stat); 317 return (bus_space_read_1(sc->scu_memt, sc->scu_memh, 318 sca_page_addr(sc, dp) + offsetof(struct sca_desc, sd_stat))); 319 } 320 321 /* 322 * write the descriptor status 323 */ 324 static inline void 325 sca_desc_write_stat(struct sca_softc *sc, struct sca_desc *dp, u_int8_t stat) 326 { 327 if (sc->sc_usedma) 328 (dp)->sd_stat = stat; 329 else 330 bus_space_write_1(sc->scu_memt, sc->scu_memh, 331 sca_page_addr(sc, dp) + offsetof(struct sca_desc, sd_stat), 332 stat); 333 } 334 335 void 336 sca_init(struct sca_softc *sc) 337 { 338 /* 339 * Do a little sanity check: check number of ports. 340 */ 341 if (sc->sc_numports < 1 || sc->sc_numports > 2) 342 panic("sca can\'t handle more than 2 or less than 1 ports"); 343 344 /* 345 * disable DMA and MSCI interrupts 346 */ 347 sca_write_1(sc, SCA_DMER, 0); 348 sca_write_1(sc, SCA_IER0, 0); 349 sca_write_1(sc, SCA_IER1, 0); 350 sca_write_1(sc, SCA_IER2, 0); 351 352 /* 353 * configure interrupt system 354 */ 355 sca_write_1(sc, SCA_ITCR, 356 SCA_ITCR_INTR_PRI_MSCI | SCA_ITCR_ACK_NONE | SCA_ITCR_VOUT_IVR); 357 #if 0 358 /* these are for the intrerrupt ack cycle which we don't use */ 359 sca_write_1(sc, SCA_IVR, 0x40); 360 sca_write_1(sc, SCA_IMVR, 0x40); 361 #endif 362 363 /* 364 * set wait control register to zero wait states 365 */ 366 sca_write_1(sc, SCA_PABR0, 0); 367 sca_write_1(sc, SCA_PABR1, 0); 368 sca_write_1(sc, SCA_WCRL, 0); 369 sca_write_1(sc, SCA_WCRM, 0); 370 sca_write_1(sc, SCA_WCRH, 0); 371 372 /* 373 * disable DMA and reset status 374 */ 375 sca_write_1(sc, SCA_PCR, SCA_PCR_PR2); 376 377 /* 378 * disable transmit DMA for all channels 379 */ 380 sca_write_1(sc, SCA_DSR0 + SCA_DMAC_OFF_0, 0); 381 sca_write_1(sc, SCA_DCR0 + SCA_DMAC_OFF_0, SCA_DCR_ABRT); 382 sca_write_1(sc, SCA_DSR1 + SCA_DMAC_OFF_0, 0); 383 sca_write_1(sc, SCA_DCR1 + SCA_DMAC_OFF_0, SCA_DCR_ABRT); 384 sca_write_1(sc, SCA_DSR0 + SCA_DMAC_OFF_1, 0); 385 sca_write_1(sc, SCA_DCR0 + SCA_DMAC_OFF_1, SCA_DCR_ABRT); 386 sca_write_1(sc, SCA_DSR1 + SCA_DMAC_OFF_1, 0); 387 sca_write_1(sc, SCA_DCR1 + SCA_DMAC_OFF_1, SCA_DCR_ABRT); 388 389 /* 390 * enable DMA based on channel enable flags for each channel 391 */ 392 sca_write_1(sc, SCA_DMER, SCA_DMER_EN); 393 394 /* 395 * Should check to see if the chip is responding, but for now 396 * assume it is. 397 */ 398 } 399 400 /* 401 * initialize the port and attach it to the networking layer 402 */ 403 void 404 sca_port_attach(struct sca_softc *sc, u_int port) 405 { 406 struct timeval now; 407 sca_port_t *scp = &sc->sc_ports[port]; 408 struct ifnet *ifp; 409 static u_int ntwo_unit = 0; 410 411 scp->sca = sc; /* point back to the parent */ 412 413 scp->sp_port = port; 414 415 if (port == 0) { 416 scp->msci_off = SCA_MSCI_OFF_0; 417 scp->dmac_off = SCA_DMAC_OFF_0; 418 if(sc->sc_parent != NULL) 419 ntwo_unit = device_unit(sc->sc_parent) * 2 + 0; 420 else 421 ntwo_unit = 0; /* XXX */ 422 } else { 423 scp->msci_off = SCA_MSCI_OFF_1; 424 scp->dmac_off = SCA_DMAC_OFF_1; 425 if(sc->sc_parent != NULL) 426 ntwo_unit = device_unit(sc->sc_parent) * 2 + 1; 427 else 428 ntwo_unit = 1; /* XXX */ 429 } 430 431 sca_msci_init(sc, scp); 432 sca_dmac_init(sc, scp); 433 434 /* 435 * attach to the network layer 436 */ 437 ifp = &scp->sp_if; 438 snprintf(ifp->if_xname, sizeof(ifp->if_xname), "ntwo%d", ntwo_unit); 439 ifp->if_softc = scp; 440 ifp->if_mtu = SCA_MTU; 441 ifp->if_flags = IFF_POINTOPOINT | IFF_MULTICAST; 442 ifp->if_type = IFT_PTPSERIAL; 443 ifp->if_hdrlen = HDLC_HDRLEN; 444 ifp->if_ioctl = sca_ioctl; 445 ifp->if_output = sca_output; 446 ifp->if_watchdog = sca_watchdog; 447 ifp->if_snd.ifq_maxlen = IFQ_MAXLEN; 448 scp->linkq.ifq_maxlen = 5; /* if we exceed this we are hosed already */ 449 #ifdef SCA_USE_FASTQ 450 scp->fastq.ifq_maxlen = IFQ_MAXLEN; 451 #endif 452 IFQ_SET_READY(&ifp->if_snd); 453 if_attach(ifp); 454 if_alloc_sadl(ifp); 455 bpf_attach(ifp, DLT_HDLC, HDLC_HDRLEN); 456 457 if (sc->sc_parent == NULL) 458 printf("%s: port %d\n", ifp->if_xname, port); 459 else 460 printf("%s at %s port %d\n", 461 ifp->if_xname, device_xname(sc->sc_parent), port); 462 463 /* 464 * reset the last seen times on the cisco keepalive protocol 465 */ 466 getmicrotime(&now); 467 scp->cka_lasttx = now.tv_usec; 468 scp->cka_lastrx = 0; 469 } 470 471 #if 0 472 /* 473 * returns log2(div), sets 'tmc' for the required freq 'hz' 474 */ 475 static u_int8_t 476 sca_msci_get_baud_rate_values(u_int32_t hz, u_int8_t *tmcp) 477 { 478 u_int32_t tmc, div; 479 u_int32_t clock; 480 481 /* clock hz = (chipclock / tmc) / 2^(div); */ 482 /* 483 * TD == tmc * 2^(n) 484 * 485 * note: 486 * 1 <= TD <= 256 TD is inc of 1 487 * 2 <= TD <= 512 TD is inc of 2 488 * 4 <= TD <= 1024 TD is inc of 4 489 * ... 490 * 512 <= TD <= 256*512 TD is inc of 512 491 * 492 * so note there are overlaps. We lose prec 493 * as div increases so we wish to minize div. 494 * 495 * basically we want to do 496 * 497 * tmc = chip / hz, but have tmc <= 256 498 */ 499 500 /* assume system clock is 9.8304MHz or 9830400Hz */ 501 clock = clock = 9830400 >> 1; 502 503 /* round down */ 504 div = 0; 505 while ((tmc = clock / hz) > 256 || (tmc == 256 && (clock / tmc) > hz)) { 506 clock >>= 1; 507 div++; 508 } 509 if (clock / tmc > hz) 510 tmc++; 511 if (!tmc) 512 tmc = 1; 513 514 if (div > SCA_RXS_DIV_512) { 515 /* set to maximums */ 516 div = SCA_RXS_DIV_512; 517 tmc = 0; 518 } 519 520 *tmcp = (tmc & 0xFF); /* 0 == 256 */ 521 return (div & 0xFF); 522 } 523 #endif 524 525 /* 526 * initialize the port's MSCI 527 */ 528 static void 529 sca_msci_init(struct sca_softc *sc, sca_port_t *scp) 530 { 531 /* reset the channel */ 532 msci_write_1(scp, SCA_CMD0, SCA_CMD_RESET); 533 534 msci_write_1(scp, SCA_MD00, 535 ( SCA_MD0_CRC_1 536 | SCA_MD0_CRC_CCITT 537 | SCA_MD0_CRC_ENABLE 538 | SCA_MD0_MODE_HDLC)); 539 #if 0 540 /* immediately send receive reset so the above takes */ 541 msci_write_1(scp, SCA_CMD0, SCA_CMD_RXRESET); 542 #endif 543 544 msci_write_1(scp, SCA_MD10, SCA_MD1_NOADDRCHK); 545 msci_write_1(scp, SCA_MD20, 546 (SCA_MD2_DUPLEX | SCA_MD2_ADPLLx8 | SCA_MD2_NRZ)); 547 548 /* be safe and do it again */ 549 msci_write_1(scp, SCA_CMD0, SCA_CMD_RXRESET); 550 551 /* setup underrun and idle control, and initial RTS state */ 552 msci_write_1(scp, SCA_CTL0, 553 (SCA_CTL_IDLC_PATTERN 554 | SCA_CTL_UDRNC_AFTER_FCS 555 | SCA_CTL_RTS_LOW)); 556 557 /* reset the transmitter */ 558 msci_write_1(scp, SCA_CMD0, SCA_CMD_TXRESET); 559 560 /* 561 * set the clock sources 562 */ 563 msci_write_1(scp, SCA_RXS0, scp->sp_rxs); 564 msci_write_1(scp, SCA_TXS0, scp->sp_txs); 565 msci_write_1(scp, SCA_TMC0, scp->sp_tmc); 566 567 /* set external clock generate as requested */ 568 sc->sc_clock_callback(sc->sc_aux, scp->sp_port, scp->sp_eclock); 569 570 /* 571 * XXX don't pay attention to CTS or CD changes right now. I can't 572 * simulate one, and the transmitter will try to transmit even if 573 * CD isn't there anyway, so nothing bad SHOULD happen. 574 */ 575 #if 0 576 msci_write_1(scp, SCA_IE00, 0); 577 msci_write_1(scp, SCA_IE10, 0); /* 0x0c == CD and CTS changes only */ 578 #else 579 /* this would deliver transmitter underrun to ST1/ISR1 */ 580 msci_write_1(scp, SCA_IE10, SCA_ST1_UDRN); 581 msci_write_1(scp, SCA_IE00, SCA_ST0_TXINT); 582 #endif 583 msci_write_1(scp, SCA_IE20, 0); 584 585 msci_write_1(scp, SCA_FIE0, 0); 586 587 msci_write_1(scp, SCA_SA00, 0); 588 msci_write_1(scp, SCA_SA10, 0); 589 590 msci_write_1(scp, SCA_IDL0, 0x7e); 591 592 msci_write_1(scp, SCA_RRC0, 0x0e); 593 /* msci_write_1(scp, SCA_TRC00, 0x10); */ 594 /* 595 * the correct values here are important for avoiding underruns 596 * for any value less than or equal to TRC0 txrdy is activated 597 * which will start the dmac transfer to the fifo. 598 * for buffer size >= TRC1 + 1 txrdy is cleared which will stop DMA. 599 * 600 * thus if we are using a very fast clock that empties the fifo 601 * quickly, delays in the dmac starting to fill the fifo can 602 * lead to underruns so we want a fairly full fifo to still 603 * cause the dmac to start. for cards with on board ram this 604 * has no effect on system performance. For cards that DMA 605 * to/from system memory it will cause more, shorter, 606 * bus accesses rather than fewer longer ones. 607 */ 608 msci_write_1(scp, SCA_TRC00, 0x00); 609 msci_write_1(scp, SCA_TRC10, 0x1f); 610 } 611 612 /* 613 * Take the memory for the port and construct two circular linked lists of 614 * descriptors (one tx, one rx) and set the pointers in these descriptors 615 * to point to the buffer space for this port. 616 */ 617 static void 618 sca_dmac_init(struct sca_softc *sc, sca_port_t *scp) 619 { 620 sca_desc_t *desc; 621 u_int32_t desc_p; 622 u_int32_t buf_p; 623 int i; 624 625 if (sc->sc_usedma) 626 bus_dmamap_sync(sc->scu_dmat, sc->scu_dmam, 0, sc->scu_allocsize, 627 BUS_DMASYNC_PREWRITE); 628 else { 629 /* 630 * XXX assumes that all tx desc and bufs in same page 631 */ 632 sc->scu_page_on(sc); 633 sc->scu_set_page(sc, scp->sp_txdesc_p); 634 } 635 636 desc = scp->sp_txdesc; 637 desc_p = scp->sp_txdesc_p; 638 buf_p = scp->sp_txbuf_p; 639 scp->sp_txcur = 0; 640 scp->sp_txinuse = 0; 641 642 #ifdef DEBUG 643 /* make sure that we won't wrap */ 644 if ((desc_p & 0xffff0000) != 645 ((desc_p + sizeof(*desc) * scp->sp_ntxdesc) & 0xffff0000)) 646 panic("sca: tx descriptors cross architecural boundary"); 647 if ((buf_p & 0xff000000) != 648 ((buf_p + SCA_BSIZE * scp->sp_ntxdesc) & 0xff000000)) 649 panic("sca: tx buffers cross architecural boundary"); 650 #endif 651 652 for (i = 0 ; i < scp->sp_ntxdesc ; i++) { 653 /* 654 * desc_p points to the physcial address of the NEXT desc 655 */ 656 desc_p += sizeof(sca_desc_t); 657 658 sca_desc_write_chainp(sc, desc, desc_p & 0x0000ffff); 659 sca_desc_write_bufp(sc, desc, buf_p); 660 sca_desc_write_buflen(sc, desc, SCA_BSIZE); 661 sca_desc_write_stat(sc, desc, 0); 662 663 desc++; /* point to the next descriptor */ 664 buf_p += SCA_BSIZE; 665 } 666 667 /* 668 * "heal" the circular list by making the last entry point to the 669 * first. 670 */ 671 sca_desc_write_chainp(sc, desc - 1, scp->sp_txdesc_p & 0x0000ffff); 672 673 /* 674 * Now, initialize the transmit DMA logic 675 * 676 * CPB == chain pointer base address 677 */ 678 dmac_write_1(scp, SCA_DSR1, 0); 679 dmac_write_1(scp, SCA_DCR1, SCA_DCR_ABRT); 680 dmac_write_1(scp, SCA_DMR1, SCA_DMR_TMOD | SCA_DMR_NF); 681 /* XXX1 682 dmac_write_1(scp, SCA_DIR1, 683 (SCA_DIR_EOT | SCA_DIR_BOF | SCA_DIR_COF)); 684 */ 685 dmac_write_1(scp, SCA_DIR1, 686 (SCA_DIR_EOM | SCA_DIR_EOT | SCA_DIR_BOF | SCA_DIR_COF)); 687 dmac_write_1(scp, SCA_CPB1, 688 (u_int8_t)((scp->sp_txdesc_p & 0x00ff0000) >> 16)); 689 690 /* 691 * now, do the same thing for receive descriptors 692 * 693 * XXX assumes that all rx desc and bufs in same page 694 */ 695 if (!sc->sc_usedma) 696 sc->scu_set_page(sc, scp->sp_rxdesc_p); 697 698 desc = scp->sp_rxdesc; 699 desc_p = scp->sp_rxdesc_p; 700 buf_p = scp->sp_rxbuf_p; 701 702 #ifdef DEBUG 703 /* make sure that we won't wrap */ 704 if ((desc_p & 0xffff0000) != 705 ((desc_p + sizeof(*desc) * scp->sp_nrxdesc) & 0xffff0000)) 706 panic("sca: rx descriptors cross architecural boundary"); 707 if ((buf_p & 0xff000000) != 708 ((buf_p + SCA_BSIZE * scp->sp_nrxdesc) & 0xff000000)) 709 panic("sca: rx buffers cross architecural boundary"); 710 #endif 711 712 for (i = 0 ; i < scp->sp_nrxdesc; i++) { 713 /* 714 * desc_p points to the physcial address of the NEXT desc 715 */ 716 desc_p += sizeof(sca_desc_t); 717 718 sca_desc_write_chainp(sc, desc, desc_p & 0x0000ffff); 719 sca_desc_write_bufp(sc, desc, buf_p); 720 /* sca_desc_write_buflen(sc, desc, SCA_BSIZE); */ 721 sca_desc_write_buflen(sc, desc, 0); 722 sca_desc_write_stat(sc, desc, 0); 723 724 desc++; /* point to the next descriptor */ 725 buf_p += SCA_BSIZE; 726 } 727 728 /* 729 * "heal" the circular list by making the last entry point to the 730 * first. 731 */ 732 sca_desc_write_chainp(sc, desc - 1, scp->sp_rxdesc_p & 0x0000ffff); 733 734 sca_dmac_rxinit(scp); 735 736 if (sc->sc_usedma) 737 bus_dmamap_sync(sc->scu_dmat, sc->scu_dmam, 738 0, sc->scu_allocsize, BUS_DMASYNC_POSTWRITE); 739 else 740 sc->scu_page_off(sc); 741 } 742 743 /* 744 * reset and reinitialize the receive DMA logic 745 */ 746 static void 747 sca_dmac_rxinit(sca_port_t *scp) 748 { 749 /* 750 * ... and the receive DMA logic ... 751 */ 752 dmac_write_1(scp, SCA_DSR0, 0); /* disable DMA */ 753 dmac_write_1(scp, SCA_DCR0, SCA_DCR_ABRT); 754 755 dmac_write_1(scp, SCA_DMR0, SCA_DMR_TMOD | SCA_DMR_NF); 756 dmac_write_2(scp, SCA_BFLL0, SCA_BSIZE); 757 758 /* reset descriptors to initial state */ 759 scp->sp_rxstart = 0; 760 scp->sp_rxend = scp->sp_nrxdesc - 1; 761 762 /* 763 * CPB == chain pointer base 764 * CDA == current descriptor address 765 * EDA == error descriptor address (overwrite position) 766 * because cda can't be eda when starting we always 767 * have a single buffer gap between cda and eda 768 */ 769 dmac_write_1(scp, SCA_CPB0, 770 (u_int8_t)((scp->sp_rxdesc_p & 0x00ff0000) >> 16)); 771 dmac_write_2(scp, SCA_CDAL0, (u_int16_t)(scp->sp_rxdesc_p & 0xffff)); 772 dmac_write_2(scp, SCA_EDAL0, (u_int16_t) 773 (scp->sp_rxdesc_p + (sizeof(sca_desc_t) * scp->sp_rxend))); 774 775 /* 776 * enable receiver DMA 777 */ 778 dmac_write_1(scp, SCA_DIR0, 779 (SCA_DIR_EOT | SCA_DIR_EOM | SCA_DIR_BOF | SCA_DIR_COF)); 780 dmac_write_1(scp, SCA_DSR0, SCA_DSR_DE); 781 } 782 783 /* 784 * Queue the packet for our start routine to transmit 785 */ 786 static int 787 sca_output( 788 struct ifnet *ifp, 789 struct mbuf *m, 790 const struct sockaddr *dst, 791 struct rtentry *rt0) 792 { 793 struct hdlc_header *hdlc; 794 struct ifqueue *ifq = NULL; 795 int s, error, len; 796 short mflags; 797 ALTQ_DECL(struct altq_pktattr pktattr;) 798 799 error = 0; 800 801 if ((ifp->if_flags & IFF_UP) != IFF_UP) { 802 error = ENETDOWN; 803 goto bad; 804 } 805 806 /* 807 * If the queueing discipline needs packet classification, 808 * do it before prepending link headers. 809 */ 810 IFQ_CLASSIFY(&ifp->if_snd, m, dst->sa_family, &pktattr); 811 812 /* 813 * determine address family, and priority for this packet 814 */ 815 switch (dst->sa_family) { 816 #ifdef INET 817 case AF_INET: 818 #ifdef SCA_USE_FASTQ 819 if ((mtod(m, struct ip *)->ip_tos & IPTOS_LOWDELAY) 820 == IPTOS_LOWDELAY) 821 ifq = &((sca_port_t *)ifp->if_softc)->fastq; 822 #endif 823 /* 824 * Add cisco serial line header. If there is no 825 * space in the first mbuf, allocate another. 826 */ 827 M_PREPEND(m, sizeof(struct hdlc_header), M_DONTWAIT); 828 if (m == 0) 829 return (ENOBUFS); 830 hdlc = mtod(m, struct hdlc_header *); 831 hdlc->h_proto = htons(HDLC_PROTOCOL_IP); 832 break; 833 #endif 834 #ifdef INET6 835 case AF_INET6: 836 /* 837 * Add cisco serial line header. If there is no 838 * space in the first mbuf, allocate another. 839 */ 840 M_PREPEND(m, sizeof(struct hdlc_header), M_DONTWAIT); 841 if (m == 0) 842 return (ENOBUFS); 843 hdlc = mtod(m, struct hdlc_header *); 844 hdlc->h_proto = htons(HDLC_PROTOCOL_IPV6); 845 break; 846 #endif 847 default: 848 printf("%s: address family %d unsupported\n", 849 ifp->if_xname, dst->sa_family); 850 error = EAFNOSUPPORT; 851 goto bad; 852 } 853 854 /* finish */ 855 if ((m->m_flags & (M_BCAST | M_MCAST)) != 0) 856 hdlc->h_addr = CISCO_MULTICAST; 857 else 858 hdlc->h_addr = CISCO_UNICAST; 859 hdlc->h_resv = 0; 860 861 /* 862 * queue the packet. If interactive, use the fast queue. 863 */ 864 mflags = m->m_flags; 865 len = m->m_pkthdr.len; 866 s = splnet(); 867 if (ifq != NULL) { 868 if (IF_QFULL(ifq)) { 869 IF_DROP(ifq); 870 m_freem(m); 871 error = ENOBUFS; 872 } else 873 IF_ENQUEUE(ifq, m); 874 } else 875 IFQ_ENQUEUE(&ifp->if_snd, m, &pktattr, error); 876 if (error != 0) { 877 splx(s); 878 ifp->if_oerrors++; 879 ifp->if_collisions++; 880 return (error); 881 } 882 ifp->if_obytes += len; 883 if (mflags & M_MCAST) 884 ifp->if_omcasts++; 885 886 sca_start(ifp); 887 splx(s); 888 889 return (error); 890 891 bad: 892 if (m) 893 m_freem(m); 894 return (error); 895 } 896 897 static int 898 sca_ioctl(struct ifnet *ifp, u_long cmd, void *data) 899 { 900 struct ifreq *ifr; 901 struct ifaddr *ifa; 902 int error; 903 int s; 904 905 s = splnet(); 906 907 ifr = (struct ifreq *)data; 908 ifa = (struct ifaddr *)data; 909 error = 0; 910 911 switch (cmd) { 912 case SIOCINITIFADDR: 913 switch(ifa->ifa_addr->sa_family) { 914 #ifdef INET 915 case AF_INET: 916 #endif 917 #ifdef INET6 918 case AF_INET6: 919 #endif 920 #if defined(INET) || defined(INET6) 921 ifp->if_flags |= IFF_UP; 922 sca_port_up(ifp->if_softc); 923 break; 924 #endif 925 default: 926 error = EAFNOSUPPORT; 927 break; 928 } 929 break; 930 931 case SIOCSIFDSTADDR: 932 #ifdef INET 933 if (ifa->ifa_addr->sa_family == AF_INET) 934 break; 935 #endif 936 #ifdef INET6 937 if (ifa->ifa_addr->sa_family == AF_INET6) 938 break; 939 #endif 940 error = EAFNOSUPPORT; 941 break; 942 943 case SIOCADDMULTI: 944 case SIOCDELMULTI: 945 /* XXX need multicast group management code */ 946 if (ifr == 0) { 947 error = EAFNOSUPPORT; /* XXX */ 948 break; 949 } 950 switch (ifreq_getaddr(cmd, ifr)->sa_family) { 951 #ifdef INET 952 case AF_INET: 953 break; 954 #endif 955 #ifdef INET6 956 case AF_INET6: 957 break; 958 #endif 959 default: 960 error = EAFNOSUPPORT; 961 break; 962 } 963 break; 964 965 case SIOCSIFFLAGS: 966 if ((error = ifioctl_common(ifp, cmd, data)) != 0) 967 break; 968 if (ifr->ifr_flags & IFF_UP) { 969 ifp->if_flags |= IFF_UP; 970 sca_port_up(ifp->if_softc); 971 } else { 972 ifp->if_flags &= ~IFF_UP; 973 sca_port_down(ifp->if_softc); 974 } 975 976 break; 977 978 default: 979 error = ifioctl_common(ifp, cmd, data); 980 } 981 982 splx(s); 983 return error; 984 } 985 986 /* 987 * start packet transmission on the interface 988 * 989 * MUST BE CALLED AT splnet() 990 */ 991 static void 992 sca_start(struct ifnet *ifp) 993 { 994 sca_port_t *scp = ifp->if_softc; 995 struct sca_softc *sc = scp->sca; 996 struct mbuf *m, *mb_head; 997 sca_desc_t *desc; 998 u_int8_t *buf, stat; 999 u_int32_t buf_p; 1000 int nexttx; 1001 int trigger_xmit; 1002 u_int len; 1003 1004 SCA_DPRINTF(SCA_DEBUG_TX, ("TX: enter start\n")); 1005 1006 /* 1007 * can't queue when we are full or transmitter is busy 1008 */ 1009 #ifdef oldcode 1010 if ((scp->sp_txinuse >= (scp->sp_ntxdesc - 1)) 1011 || ((ifp->if_flags & IFF_OACTIVE) == IFF_OACTIVE)) 1012 return; 1013 #else 1014 if (scp->sp_txinuse 1015 || ((ifp->if_flags & IFF_OACTIVE) == IFF_OACTIVE)) 1016 return; 1017 #endif 1018 SCA_DPRINTF(SCA_DEBUG_TX, ("TX: txinuse %d\n", scp->sp_txinuse)); 1019 1020 /* 1021 * XXX assume that all tx desc and bufs in same page 1022 */ 1023 if (sc->sc_usedma) 1024 bus_dmamap_sync(sc->scu_dmat, sc->scu_dmam, 1025 0, sc->scu_allocsize, 1026 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1027 else { 1028 sc->scu_page_on(sc); 1029 sc->scu_set_page(sc, scp->sp_txdesc_p); 1030 } 1031 1032 trigger_xmit = 0; 1033 1034 txloop: 1035 IF_DEQUEUE(&scp->linkq, mb_head); 1036 if (mb_head == NULL) 1037 #ifdef SCA_USE_FASTQ 1038 IF_DEQUEUE(&scp->fastq, mb_head); 1039 if (mb_head == NULL) 1040 #endif 1041 IFQ_DEQUEUE(&ifp->if_snd, mb_head); 1042 if (mb_head == NULL) 1043 goto start_xmit; 1044 1045 SCA_DPRINTF(SCA_DEBUG_TX, ("TX: got mbuf\n")); 1046 #ifdef oldcode 1047 if (scp->txinuse != 0) { 1048 /* Kill EOT interrupts on the previous descriptor. */ 1049 desc = &scp->sp_txdesc[scp->txcur]; 1050 stat = sca_desc_read_stat(sc, desc); 1051 sca_desc_write_stat(sc, desc, stat & ~SCA_DESC_EOT); 1052 1053 /* Figure out what the next free descriptor is. */ 1054 nexttx = (scp->sp_txcur + 1) % scp->sp_ntxdesc; 1055 } else 1056 nexttx = 0; 1057 #endif /* oldcode */ 1058 1059 if (scp->sp_txinuse) 1060 nexttx = (scp->sp_txcur + 1) % scp->sp_ntxdesc; 1061 else 1062 nexttx = 0; 1063 1064 SCA_DPRINTF(SCA_DEBUG_TX, ("TX: nexttx %d\n", nexttx)); 1065 1066 buf = scp->sp_txbuf + SCA_BSIZE * nexttx; 1067 buf_p = scp->sp_txbuf_p + SCA_BSIZE * nexttx; 1068 1069 /* XXX hoping we can delay the desc write till after we don't drop. */ 1070 desc = &scp->sp_txdesc[nexttx]; 1071 1072 /* XXX isn't this set already?? */ 1073 sca_desc_write_bufp(sc, desc, buf_p); 1074 len = 0; 1075 1076 SCA_DPRINTF(SCA_DEBUG_TX, ("TX: buf %x buf_p %x\n", (u_int)buf, buf_p)); 1077 1078 #if 0 /* uncomment this for a core in cc1 */ 1079 X 1080 #endif 1081 /* 1082 * Run through the chain, copying data into the descriptor as we 1083 * go. If it won't fit in one transmission block, drop the packet. 1084 * No, this isn't nice, but most of the time it _will_ fit. 1085 */ 1086 for (m = mb_head ; m != NULL ; m = m->m_next) { 1087 if (m->m_len != 0) { 1088 len += m->m_len; 1089 if (len > SCA_BSIZE) { 1090 m_freem(mb_head); 1091 goto txloop; 1092 } 1093 SCA_DPRINTF(SCA_DEBUG_TX, 1094 ("TX: about to mbuf len %d\n", m->m_len)); 1095 1096 if (sc->sc_usedma) 1097 memcpy(buf, mtod(m, u_int8_t *), m->m_len); 1098 else 1099 bus_space_write_region_1(sc->scu_memt, 1100 sc->scu_memh, sca_page_addr(sc, buf_p), 1101 mtod(m, u_int8_t *), m->m_len); 1102 buf += m->m_len; 1103 buf_p += m->m_len; 1104 } 1105 } 1106 1107 /* set the buffer, the length, and mark end of frame and end of xfer */ 1108 sca_desc_write_buflen(sc, desc, len); 1109 sca_desc_write_stat(sc, desc, SCA_DESC_EOM); 1110 1111 ifp->if_opackets++; 1112 1113 /* 1114 * Pass packet to bpf if there is a listener. 1115 */ 1116 bpf_mtap(ifp, mb_head); 1117 1118 m_freem(mb_head); 1119 1120 scp->sp_txcur = nexttx; 1121 scp->sp_txinuse++; 1122 trigger_xmit = 1; 1123 1124 SCA_DPRINTF(SCA_DEBUG_TX, 1125 ("TX: inuse %d index %d\n", scp->sp_txinuse, scp->sp_txcur)); 1126 1127 /* 1128 * XXX so didn't this used to limit us to 1?! - multi may be untested 1129 * sp_ntxdesc used to be hard coded to 2 with claim of a too hard 1130 * to find bug 1131 */ 1132 #ifdef oldcode 1133 if (scp->sp_txinuse < (scp->sp_ntxdesc - 1)) 1134 #endif 1135 if (scp->sp_txinuse < scp->sp_ntxdesc) 1136 goto txloop; 1137 1138 start_xmit: 1139 SCA_DPRINTF(SCA_DEBUG_TX, ("TX: trigger_xmit %d\n", trigger_xmit)); 1140 1141 if (trigger_xmit != 0) { 1142 /* set EOT on final descriptor */ 1143 desc = &scp->sp_txdesc[scp->sp_txcur]; 1144 stat = sca_desc_read_stat(sc, desc); 1145 sca_desc_write_stat(sc, desc, stat | SCA_DESC_EOT); 1146 } 1147 1148 if (sc->sc_usedma) 1149 bus_dmamap_sync(sc->scu_dmat, sc->scu_dmam, 0, 1150 sc->scu_allocsize, 1151 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1152 1153 if (trigger_xmit != 0) 1154 sca_port_starttx(scp); 1155 1156 if (!sc->sc_usedma) 1157 sc->scu_page_off(sc); 1158 } 1159 1160 static void 1161 sca_watchdog(struct ifnet *ifp) 1162 { 1163 } 1164 1165 int 1166 sca_hardintr(struct sca_softc *sc) 1167 { 1168 u_int8_t isr0, isr1, isr2; 1169 int ret; 1170 1171 ret = 0; /* non-zero means we processed at least one interrupt */ 1172 1173 SCA_DPRINTF(SCA_DEBUG_INTR, ("sca_hardintr entered\n")); 1174 1175 while (1) { 1176 /* 1177 * read SCA interrupts 1178 */ 1179 isr0 = sca_read_1(sc, SCA_ISR0); 1180 isr1 = sca_read_1(sc, SCA_ISR1); 1181 isr2 = sca_read_1(sc, SCA_ISR2); 1182 1183 if (isr0 == 0 && isr1 == 0 && isr2 == 0) 1184 break; 1185 1186 SCA_DPRINTF(SCA_DEBUG_INTR, 1187 ("isr0 = %02x, isr1 = %02x, isr2 = %02x\n", 1188 isr0, isr1, isr2)); 1189 1190 /* 1191 * check DMAC interrupt 1192 */ 1193 if (isr1 & 0x0f) 1194 ret += sca_dmac_intr(&sc->sc_ports[0], 1195 isr1 & 0x0f); 1196 1197 if (isr1 & 0xf0) 1198 ret += sca_dmac_intr(&sc->sc_ports[1], 1199 (isr1 & 0xf0) >> 4); 1200 1201 /* 1202 * mcsi intterupts 1203 */ 1204 if (isr0 & 0x0f) 1205 ret += sca_msci_intr(&sc->sc_ports[0], isr0 & 0x0f); 1206 1207 if (isr0 & 0xf0) 1208 ret += sca_msci_intr(&sc->sc_ports[1], 1209 (isr0 & 0xf0) >> 4); 1210 1211 #if 0 /* We don't GET timer interrupts, we have them disabled (msci IE20) */ 1212 if (isr2) 1213 ret += sca_timer_intr(sc, isr2); 1214 #endif 1215 } 1216 1217 return (ret); 1218 } 1219 1220 static int 1221 sca_dmac_intr(sca_port_t *scp, u_int8_t isr) 1222 { 1223 u_int8_t dsr; 1224 int ret; 1225 1226 ret = 0; 1227 1228 /* 1229 * Check transmit channel 1230 */ 1231 if (isr & (SCA_ISR1_DMAC_TX0A | SCA_ISR1_DMAC_TX0B)) { 1232 SCA_DPRINTF(SCA_DEBUG_INTR, 1233 ("TX INTERRUPT port %d\n", scp->sp_port)); 1234 1235 dsr = 1; 1236 while (dsr != 0) { 1237 ret++; 1238 /* 1239 * reset interrupt 1240 */ 1241 dsr = dmac_read_1(scp, SCA_DSR1); 1242 dmac_write_1(scp, SCA_DSR1, 1243 dsr | SCA_DSR_DEWD); 1244 1245 /* 1246 * filter out the bits we don't care about 1247 */ 1248 dsr &= ( SCA_DSR_COF | SCA_DSR_BOF | SCA_DSR_EOT); 1249 if (dsr == 0) 1250 break; 1251 1252 /* 1253 * check for counter overflow 1254 */ 1255 if (dsr & SCA_DSR_COF) { 1256 printf("%s: TXDMA counter overflow\n", 1257 scp->sp_if.if_xname); 1258 1259 scp->sp_if.if_flags &= ~IFF_OACTIVE; 1260 scp->sp_txcur = 0; 1261 scp->sp_txinuse = 0; 1262 } 1263 1264 /* 1265 * check for buffer overflow 1266 */ 1267 if (dsr & SCA_DSR_BOF) { 1268 printf("%s: TXDMA buffer overflow, cda 0x%04x, eda 0x%04x, cpb 0x%02x\n", 1269 scp->sp_if.if_xname, 1270 dmac_read_2(scp, SCA_CDAL1), 1271 dmac_read_2(scp, SCA_EDAL1), 1272 dmac_read_1(scp, SCA_CPB1)); 1273 1274 /* 1275 * Yikes. Arrange for a full 1276 * transmitter restart. 1277 */ 1278 scp->sp_if.if_flags &= ~IFF_OACTIVE; 1279 scp->sp_txcur = 0; 1280 scp->sp_txinuse = 0; 1281 } 1282 1283 /* 1284 * check for end of transfer, which is not 1285 * an error. It means that all data queued 1286 * was transmitted, and we mark ourself as 1287 * not in use and stop the watchdog timer. 1288 */ 1289 if (dsr & SCA_DSR_EOT) { 1290 SCA_DPRINTF(SCA_DEBUG_TX, 1291 ("Transmit completed. cda %x eda %x dsr %x\n", 1292 dmac_read_2(scp, SCA_CDAL1), 1293 dmac_read_2(scp, SCA_EDAL1), 1294 dsr)); 1295 1296 scp->sp_if.if_flags &= ~IFF_OACTIVE; 1297 scp->sp_txcur = 0; 1298 scp->sp_txinuse = 0; 1299 1300 /* 1301 * check for more packets 1302 */ 1303 sca_start(&scp->sp_if); 1304 } 1305 } 1306 } 1307 /* 1308 * receive channel check 1309 */ 1310 if (isr & (SCA_ISR1_DMAC_RX0A | SCA_ISR1_DMAC_RX0B)) { 1311 SCA_DPRINTF(SCA_DEBUG_INTR, ("RX INTERRUPT port %d\n", 1312 (scp == &scp->sca->sc_ports[0] ? 0 : 1))); 1313 1314 dsr = 1; 1315 while (dsr != 0) { 1316 ret++; 1317 1318 dsr = dmac_read_1(scp, SCA_DSR0); 1319 dmac_write_1(scp, SCA_DSR0, dsr | SCA_DSR_DEWD); 1320 1321 /* 1322 * filter out the bits we don't care about 1323 */ 1324 dsr &= (SCA_DSR_EOM | SCA_DSR_COF 1325 | SCA_DSR_BOF | SCA_DSR_EOT); 1326 if (dsr == 0) 1327 break; 1328 1329 /* 1330 * End of frame 1331 */ 1332 if (dsr & SCA_DSR_EOM) { 1333 SCA_DPRINTF(SCA_DEBUG_RX, ("Got a frame!\n")); 1334 1335 sca_get_packets(scp); 1336 } 1337 1338 /* 1339 * check for counter overflow 1340 */ 1341 if (dsr & SCA_DSR_COF) { 1342 printf("%s: RXDMA counter overflow\n", 1343 scp->sp_if.if_xname); 1344 1345 sca_dmac_rxinit(scp); 1346 } 1347 1348 /* 1349 * check for end of transfer, which means we 1350 * ran out of descriptors to receive into. 1351 * This means the line is much faster than 1352 * we can handle. 1353 */ 1354 if (dsr & (SCA_DSR_BOF | SCA_DSR_EOT)) { 1355 printf("%s: RXDMA buffer overflow\n", 1356 scp->sp_if.if_xname); 1357 1358 sca_dmac_rxinit(scp); 1359 } 1360 } 1361 } 1362 1363 return ret; 1364 } 1365 1366 static int 1367 sca_msci_intr(sca_port_t *scp, u_int8_t isr) 1368 { 1369 u_int8_t st1, trc0; 1370 1371 /* get and clear the specific interrupt -- should act on it :)*/ 1372 if ((st1 = msci_read_1(scp, SCA_ST10))) { 1373 /* clear the interrupt */ 1374 msci_write_1(scp, SCA_ST10, st1); 1375 1376 if (st1 & SCA_ST1_UDRN) { 1377 /* underrun -- try to increase ready control */ 1378 trc0 = msci_read_1(scp, SCA_TRC00); 1379 if (trc0 == 0x1f) 1380 printf("TX: underrun - fifo depth maxed\n"); 1381 else { 1382 if ((trc0 += 2) > 0x1f) 1383 trc0 = 0x1f; 1384 SCA_DPRINTF(SCA_DEBUG_TX, 1385 ("TX: udrn - incr fifo to %d\n", trc0)); 1386 msci_write_1(scp, SCA_TRC00, trc0); 1387 } 1388 } 1389 } 1390 return (0); 1391 } 1392 1393 static void 1394 sca_get_packets(sca_port_t *scp) 1395 { 1396 struct sca_softc *sc; 1397 1398 SCA_DPRINTF(SCA_DEBUG_RX, ("RX: sca_get_packets\n")); 1399 1400 sc = scp->sca; 1401 if (sc->sc_usedma) 1402 bus_dmamap_sync(sc->scu_dmat, sc->scu_dmam, 1403 0, sc->scu_allocsize, 1404 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1405 else { 1406 /* 1407 * XXX this code is unable to deal with rx stuff 1408 * in more than 1 page 1409 */ 1410 sc->scu_page_on(sc); 1411 sc->scu_set_page(sc, scp->sp_rxdesc_p); 1412 } 1413 1414 /* process as many frames as are available */ 1415 while (sca_frame_avail(scp)) { 1416 sca_frame_process(scp); 1417 sca_frame_read_done(scp); 1418 } 1419 1420 if (sc->sc_usedma) 1421 bus_dmamap_sync(sc->scu_dmat, sc->scu_dmam, 1422 0, sc->scu_allocsize, 1423 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1424 else 1425 sc->scu_page_off(sc); 1426 } 1427 1428 /* 1429 * Starting with the first descriptor we wanted to read into, up to but 1430 * not including the current SCA read descriptor, look for a packet. 1431 * 1432 * must be called at splnet() 1433 */ 1434 static int 1435 sca_frame_avail(sca_port_t *scp) 1436 { 1437 u_int16_t cda; 1438 u_int32_t desc_p; /* physical address (lower 16 bits) */ 1439 sca_desc_t *desc; 1440 u_int8_t rxstat; 1441 int cdaidx, toolong; 1442 1443 /* 1444 * Read the current descriptor from the SCA. 1445 */ 1446 cda = dmac_read_2(scp, SCA_CDAL0); 1447 1448 /* 1449 * calculate the index of the current descriptor 1450 */ 1451 desc_p = (scp->sp_rxdesc_p & 0xFFFF); 1452 desc_p = cda - desc_p; 1453 cdaidx = desc_p / sizeof(sca_desc_t); 1454 1455 SCA_DPRINTF(SCA_DEBUG_RX, 1456 ("RX: cda %x desc_p %x cdaidx %u, nrxdesc %d rxstart %d\n", 1457 cda, desc_p, cdaidx, scp->sp_nrxdesc, scp->sp_rxstart)); 1458 1459 /* note confusion */ 1460 if (cdaidx >= scp->sp_nrxdesc) 1461 panic("current descriptor index out of range"); 1462 1463 /* see if we have a valid frame available */ 1464 toolong = 0; 1465 for (; scp->sp_rxstart != cdaidx; sca_frame_read_done(scp)) { 1466 /* 1467 * We might have a valid descriptor. Set up a pointer 1468 * to the kva address for it so we can more easily examine 1469 * the contents. 1470 */ 1471 desc = &scp->sp_rxdesc[scp->sp_rxstart]; 1472 rxstat = sca_desc_read_stat(scp->sca, desc); 1473 1474 SCA_DPRINTF(SCA_DEBUG_RX, ("port %d RX: idx %d rxstat %x\n", 1475 scp->sp_port, scp->sp_rxstart, rxstat)); 1476 1477 SCA_DPRINTF(SCA_DEBUG_RX, ("port %d RX: buflen %d\n", 1478 scp->sp_port, sca_desc_read_buflen(scp->sca, desc))); 1479 1480 /* 1481 * check for errors 1482 */ 1483 if (rxstat & SCA_DESC_ERRORS) { 1484 /* 1485 * consider an error condition the end 1486 * of a frame 1487 */ 1488 scp->sp_if.if_ierrors++; 1489 toolong = 0; 1490 continue; 1491 } 1492 1493 /* 1494 * if we aren't skipping overlong frames 1495 * we are done, otherwise reset and look for 1496 * another good frame 1497 */ 1498 if (rxstat & SCA_DESC_EOM) { 1499 if (!toolong) 1500 return (1); 1501 toolong = 0; 1502 } else if (!toolong) { 1503 /* 1504 * we currently don't deal with frames 1505 * larger than a single buffer (fixed MTU) 1506 */ 1507 scp->sp_if.if_ierrors++; 1508 toolong = 1; 1509 } 1510 SCA_DPRINTF(SCA_DEBUG_RX, ("RX: idx %d no EOM\n", 1511 scp->sp_rxstart)); 1512 } 1513 1514 SCA_DPRINTF(SCA_DEBUG_RX, ("RX: returning none\n")); 1515 return 0; 1516 } 1517 1518 /* 1519 * Pass the packet up to the kernel if it is a packet we want to pay 1520 * attention to. 1521 * 1522 * MUST BE CALLED AT splnet() 1523 */ 1524 static void 1525 sca_frame_process(sca_port_t *scp) 1526 { 1527 struct ifqueue *ifq; 1528 struct hdlc_header *hdlc; 1529 struct cisco_pkt *cisco; 1530 sca_desc_t *desc; 1531 struct mbuf *m; 1532 u_int8_t *bufp; 1533 u_int16_t len; 1534 u_int32_t t; 1535 1536 t = time_uptime * 1000; 1537 desc = &scp->sp_rxdesc[scp->sp_rxstart]; 1538 bufp = scp->sp_rxbuf + SCA_BSIZE * scp->sp_rxstart; 1539 len = sca_desc_read_buflen(scp->sca, desc); 1540 1541 SCA_DPRINTF(SCA_DEBUG_RX, 1542 ("RX: desc %lx bufp %lx len %d\n", (bus_addr_t)desc, 1543 (bus_addr_t)bufp, len)); 1544 1545 #if SCA_DEBUG_LEVEL > 0 1546 if (sca_debug & SCA_DEBUG_RXPKT) 1547 sca_frame_print(scp, desc, bufp); 1548 #endif 1549 /* 1550 * skip packets that are too short 1551 */ 1552 if (len < sizeof(struct hdlc_header)) { 1553 scp->sp_if.if_ierrors++; 1554 return; 1555 } 1556 1557 m = sca_mbuf_alloc(scp->sca, bufp, len); 1558 if (m == NULL) { 1559 SCA_DPRINTF(SCA_DEBUG_RX, ("RX: no mbuf!\n")); 1560 return; 1561 } 1562 1563 /* 1564 * read and then strip off the HDLC information 1565 */ 1566 m = m_pullup(m, sizeof(struct hdlc_header)); 1567 if (m == NULL) { 1568 SCA_DPRINTF(SCA_DEBUG_RX, ("RX: no m_pullup!\n")); 1569 return; 1570 } 1571 1572 bpf_mtap(&scp->sp_if, m); 1573 1574 scp->sp_if.if_ipackets++; 1575 1576 hdlc = mtod(m, struct hdlc_header *); 1577 switch (ntohs(hdlc->h_proto)) { 1578 #ifdef INET 1579 case HDLC_PROTOCOL_IP: 1580 SCA_DPRINTF(SCA_DEBUG_RX, ("Received IP packet\n")); 1581 m->m_pkthdr.rcvif = &scp->sp_if; 1582 m->m_pkthdr.len -= sizeof(struct hdlc_header); 1583 m->m_data += sizeof(struct hdlc_header); 1584 m->m_len -= sizeof(struct hdlc_header); 1585 ifq = &ipintrq; 1586 schednetisr(NETISR_IP); 1587 break; 1588 #endif /* INET */ 1589 #ifdef INET6 1590 case HDLC_PROTOCOL_IPV6: 1591 SCA_DPRINTF(SCA_DEBUG_RX, ("Received IP packet\n")); 1592 m->m_pkthdr.rcvif = &scp->sp_if; 1593 m->m_pkthdr.len -= sizeof(struct hdlc_header); 1594 m->m_data += sizeof(struct hdlc_header); 1595 m->m_len -= sizeof(struct hdlc_header); 1596 ifq = &ip6intrq; 1597 schednetisr(NETISR_IPV6); 1598 break; 1599 #endif /* INET6 */ 1600 case CISCO_KEEPALIVE: 1601 SCA_DPRINTF(SCA_DEBUG_CISCO, 1602 ("Received CISCO keepalive packet\n")); 1603 1604 if (len < CISCO_PKT_LEN) { 1605 SCA_DPRINTF(SCA_DEBUG_CISCO, 1606 ("short CISCO packet %d, wanted %d\n", 1607 len, CISCO_PKT_LEN)); 1608 scp->sp_if.if_ierrors++; 1609 goto dropit; 1610 } 1611 1612 m = m_pullup(m, sizeof(struct cisco_pkt)); 1613 if (m == NULL) { 1614 SCA_DPRINTF(SCA_DEBUG_RX, ("RX: no m_pullup!\n")); 1615 return; 1616 } 1617 1618 cisco = (struct cisco_pkt *) 1619 (mtod(m, u_int8_t *) + HDLC_HDRLEN); 1620 m->m_pkthdr.rcvif = &scp->sp_if; 1621 1622 switch (ntohl(cisco->type)) { 1623 case CISCO_ADDR_REQ: 1624 printf("Got CISCO addr_req, ignoring\n"); 1625 scp->sp_if.if_ierrors++; 1626 goto dropit; 1627 1628 case CISCO_ADDR_REPLY: 1629 printf("Got CISCO addr_reply, ignoring\n"); 1630 scp->sp_if.if_ierrors++; 1631 goto dropit; 1632 1633 case CISCO_KEEPALIVE_REQ: 1634 1635 SCA_DPRINTF(SCA_DEBUG_CISCO, 1636 ("Received KA, mseq %d," 1637 " yseq %d, rel 0x%04x, t0" 1638 " %04x, t1 %04x\n", 1639 ntohl(cisco->par1), ntohl(cisco->par2), 1640 ntohs(cisco->rel), ntohs(cisco->time0), 1641 ntohs(cisco->time1))); 1642 1643 scp->cka_lastrx = ntohl(cisco->par1); 1644 scp->cka_lasttx++; 1645 1646 /* 1647 * schedule the transmit right here. 1648 */ 1649 cisco->par2 = cisco->par1; 1650 cisco->par1 = htonl(scp->cka_lasttx); 1651 cisco->time0 = htons((u_int16_t)(t >> 16)); 1652 cisco->time1 = htons((u_int16_t)(t & 0x0000ffff)); 1653 1654 ifq = &scp->linkq; 1655 if (IF_QFULL(ifq)) { 1656 IF_DROP(ifq); 1657 goto dropit; 1658 } 1659 IF_ENQUEUE(ifq, m); 1660 1661 sca_start(&scp->sp_if); 1662 1663 /* since start may have reset this fix */ 1664 if (!scp->sca->sc_usedma) { 1665 scp->sca->scu_set_page(scp->sca, 1666 scp->sp_rxdesc_p); 1667 scp->sca->scu_page_on(scp->sca); 1668 } 1669 return; 1670 default: 1671 SCA_DPRINTF(SCA_DEBUG_CISCO, 1672 ("Unknown CISCO keepalive protocol 0x%04x\n", 1673 ntohl(cisco->type))); 1674 1675 scp->sp_if.if_noproto++; 1676 goto dropit; 1677 } 1678 return; 1679 default: 1680 SCA_DPRINTF(SCA_DEBUG_RX, 1681 ("Unknown/unexpected ethertype 0x%04x\n", 1682 ntohs(hdlc->h_proto))); 1683 scp->sp_if.if_noproto++; 1684 goto dropit; 1685 } 1686 1687 /* queue the packet */ 1688 if (!IF_QFULL(ifq)) { 1689 IF_ENQUEUE(ifq, m); 1690 } else { 1691 IF_DROP(ifq); 1692 scp->sp_if.if_iqdrops++; 1693 goto dropit; 1694 } 1695 return; 1696 dropit: 1697 if (m) 1698 m_freem(m); 1699 return; 1700 } 1701 1702 #if SCA_DEBUG_LEVEL > 0 1703 /* 1704 * do a hex dump of the packet received into descriptor "desc" with 1705 * data buffer "p" 1706 */ 1707 static void 1708 sca_frame_print(sca_port_t *scp, sca_desc_t *desc, u_int8_t *p) 1709 { 1710 int i; 1711 int nothing_yet = 1; 1712 struct sca_softc *sc; 1713 u_int len; 1714 1715 sc = scp->sca; 1716 printf("desc va %p: chainp 0x%x bufp 0x%0x stat 0x%0x len %d\n", 1717 desc, 1718 sca_desc_read_chainp(sc, desc), 1719 sca_desc_read_bufp(sc, desc), 1720 sca_desc_read_stat(sc, desc), 1721 (len = sca_desc_read_buflen(sc, desc))); 1722 1723 for (i = 0 ; i < len && i < 256; i++) { 1724 if (nothing_yet == 1 && 1725 (sc->sc_usedma ? *p 1726 : bus_space_read_1(sc->scu_memt, sc->scu_memh, 1727 sca_page_addr(sc, p))) == 0) { 1728 p++; 1729 continue; 1730 } 1731 nothing_yet = 0; 1732 if (i % 16 == 0) 1733 printf("\n"); 1734 printf("%02x ", 1735 (sc->sc_usedma ? *p 1736 : bus_space_read_1(sc->scu_memt, sc->scu_memh, 1737 sca_page_addr(sc, p)))); 1738 p++; 1739 } 1740 1741 if (i % 16 != 1) 1742 printf("\n"); 1743 } 1744 #endif 1745 1746 /* 1747 * adjust things because we have just read the current starting 1748 * frame 1749 * 1750 * must be called at splnet() 1751 */ 1752 static void 1753 sca_frame_read_done(sca_port_t *scp) 1754 { 1755 u_int16_t edesc_p; 1756 1757 /* update where our indicies are */ 1758 scp->sp_rxend = scp->sp_rxstart; 1759 scp->sp_rxstart = (scp->sp_rxstart + 1) % scp->sp_nrxdesc; 1760 1761 /* update the error [end] descriptor */ 1762 edesc_p = (u_int16_t)scp->sp_rxdesc_p + 1763 (sizeof(sca_desc_t) * scp->sp_rxend); 1764 dmac_write_2(scp, SCA_EDAL0, edesc_p); 1765 } 1766 1767 /* 1768 * set a port to the "up" state 1769 */ 1770 static void 1771 sca_port_up(sca_port_t *scp) 1772 { 1773 struct sca_softc *sc = scp->sca; 1774 struct timeval now; 1775 #if 0 1776 u_int8_t ier0, ier1; 1777 #endif 1778 1779 /* 1780 * reset things 1781 */ 1782 #if 0 1783 msci_write_1(scp, SCA_CMD0, SCA_CMD_TXRESET); 1784 msci_write_1(scp, SCA_CMD0, SCA_CMD_RXRESET); 1785 #endif 1786 /* 1787 * clear in-use flag 1788 */ 1789 scp->sp_if.if_flags &= ~IFF_OACTIVE; 1790 scp->sp_if.if_flags |= IFF_RUNNING; 1791 1792 /* 1793 * raise DTR 1794 */ 1795 sc->sc_dtr_callback(sc->sc_aux, scp->sp_port, 1); 1796 1797 /* 1798 * raise RTS 1799 */ 1800 msci_write_1(scp, SCA_CTL0, 1801 (msci_read_1(scp, SCA_CTL0) & ~SCA_CTL_RTS_MASK) 1802 | SCA_CTL_RTS_HIGH); 1803 1804 #if 0 1805 /* 1806 * enable interrupts (no timer IER2) 1807 */ 1808 ier0 = SCA_IER0_MSCI_RXRDY0 | SCA_IER0_MSCI_TXRDY0 1809 | SCA_IER0_MSCI_RXINT0 | SCA_IER0_MSCI_TXINT0; 1810 ier1 = SCA_IER1_DMAC_RX0A | SCA_IER1_DMAC_RX0B 1811 | SCA_IER1_DMAC_TX0A | SCA_IER1_DMAC_TX0B; 1812 if (scp->sp_port == 1) { 1813 ier0 <<= 4; 1814 ier1 <<= 4; 1815 } 1816 sca_write_1(sc, SCA_IER0, sca_read_1(sc, SCA_IER0) | ier0); 1817 sca_write_1(sc, SCA_IER1, sca_read_1(sc, SCA_IER1) | ier1); 1818 #else 1819 if (scp->sp_port == 0) { 1820 sca_write_1(sc, SCA_IER0, sca_read_1(sc, SCA_IER0) | 0x0f); 1821 sca_write_1(sc, SCA_IER1, sca_read_1(sc, SCA_IER1) | 0x0f); 1822 } else { 1823 sca_write_1(sc, SCA_IER0, sca_read_1(sc, SCA_IER0) | 0xf0); 1824 sca_write_1(sc, SCA_IER1, sca_read_1(sc, SCA_IER1) | 0xf0); 1825 } 1826 #endif 1827 1828 /* 1829 * enable transmit and receive 1830 */ 1831 msci_write_1(scp, SCA_CMD0, SCA_CMD_TXENABLE); 1832 msci_write_1(scp, SCA_CMD0, SCA_CMD_RXENABLE); 1833 1834 /* 1835 * reset internal state 1836 */ 1837 scp->sp_txinuse = 0; 1838 scp->sp_txcur = 0; 1839 getmicrotime(&now); 1840 scp->cka_lasttx = now.tv_usec; 1841 scp->cka_lastrx = 0; 1842 } 1843 1844 /* 1845 * set a port to the "down" state 1846 */ 1847 static void 1848 sca_port_down(sca_port_t *scp) 1849 { 1850 struct sca_softc *sc = scp->sca; 1851 #if 0 1852 u_int8_t ier0, ier1; 1853 #endif 1854 1855 /* 1856 * lower DTR 1857 */ 1858 sc->sc_dtr_callback(sc->sc_aux, scp->sp_port, 0); 1859 1860 /* 1861 * lower RTS 1862 */ 1863 msci_write_1(scp, SCA_CTL0, 1864 (msci_read_1(scp, SCA_CTL0) & ~SCA_CTL_RTS_MASK) 1865 | SCA_CTL_RTS_LOW); 1866 1867 /* 1868 * disable interrupts 1869 */ 1870 #if 0 1871 ier0 = SCA_IER0_MSCI_RXRDY0 | SCA_IER0_MSCI_TXRDY0 1872 | SCA_IER0_MSCI_RXINT0 | SCA_IER0_MSCI_TXINT0; 1873 ier1 = SCA_IER1_DMAC_RX0A | SCA_IER1_DMAC_RX0B 1874 | SCA_IER1_DMAC_TX0A | SCA_IER1_DMAC_TX0B; 1875 if (scp->sp_port == 1) { 1876 ier0 <<= 4; 1877 ier1 <<= 4; 1878 } 1879 sca_write_1(sc, SCA_IER0, sca_read_1(sc, SCA_IER0) & ~ier0); 1880 sca_write_1(sc, SCA_IER1, sca_read_1(sc, SCA_IER1) & ~ier1); 1881 #else 1882 if (scp->sp_port == 0) { 1883 sca_write_1(sc, SCA_IER0, sca_read_1(sc, SCA_IER0) & 0xf0); 1884 sca_write_1(sc, SCA_IER1, sca_read_1(sc, SCA_IER1) & 0xf0); 1885 } else { 1886 sca_write_1(sc, SCA_IER0, sca_read_1(sc, SCA_IER0) & 0x0f); 1887 sca_write_1(sc, SCA_IER1, sca_read_1(sc, SCA_IER1) & 0x0f); 1888 } 1889 #endif 1890 1891 /* 1892 * disable transmit and receive 1893 */ 1894 msci_write_1(scp, SCA_CMD0, SCA_CMD_RXDISABLE); 1895 msci_write_1(scp, SCA_CMD0, SCA_CMD_TXDISABLE); 1896 1897 /* 1898 * no, we're not in use anymore 1899 */ 1900 scp->sp_if.if_flags &= ~(IFF_OACTIVE|IFF_RUNNING); 1901 } 1902 1903 /* 1904 * disable all DMA and interrupts for all ports at once. 1905 */ 1906 void 1907 sca_shutdown(struct sca_softc *sca) 1908 { 1909 /* 1910 * disable DMA and interrupts 1911 */ 1912 sca_write_1(sca, SCA_DMER, 0); 1913 sca_write_1(sca, SCA_IER0, 0); 1914 sca_write_1(sca, SCA_IER1, 0); 1915 } 1916 1917 /* 1918 * If there are packets to transmit, start the transmit DMA logic. 1919 */ 1920 static void 1921 sca_port_starttx(sca_port_t *scp) 1922 { 1923 u_int32_t startdesc_p, enddesc_p; 1924 int enddesc; 1925 1926 SCA_DPRINTF(SCA_DEBUG_TX, ("TX: starttx\n")); 1927 1928 if (((scp->sp_if.if_flags & IFF_OACTIVE) == IFF_OACTIVE) 1929 || scp->sp_txinuse == 0) 1930 return; 1931 1932 SCA_DPRINTF(SCA_DEBUG_TX, ("TX: setting oactive\n")); 1933 1934 scp->sp_if.if_flags |= IFF_OACTIVE; 1935 1936 /* 1937 * We have something to do, since we have at least one packet 1938 * waiting, and we are not already marked as active. 1939 */ 1940 enddesc = (scp->sp_txcur + 1) % scp->sp_ntxdesc; 1941 startdesc_p = scp->sp_txdesc_p; 1942 enddesc_p = scp->sp_txdesc_p + sizeof(sca_desc_t) * enddesc; 1943 1944 SCA_DPRINTF(SCA_DEBUG_TX, ("TX: start %x end %x\n", 1945 startdesc_p, enddesc_p)); 1946 1947 dmac_write_2(scp, SCA_EDAL1, (u_int16_t)(enddesc_p & 0x0000ffff)); 1948 dmac_write_2(scp, SCA_CDAL1, 1949 (u_int16_t)(startdesc_p & 0x0000ffff)); 1950 1951 /* 1952 * enable the DMA 1953 */ 1954 dmac_write_1(scp, SCA_DSR1, SCA_DSR_DE); 1955 } 1956 1957 /* 1958 * allocate an mbuf at least long enough to hold "len" bytes. 1959 * If "p" is non-NULL, copy "len" bytes from it into the new mbuf, 1960 * otherwise let the caller handle copying the data in. 1961 */ 1962 static struct mbuf * 1963 sca_mbuf_alloc(struct sca_softc *sc, void *p, u_int len) 1964 { 1965 struct mbuf *m; 1966 1967 /* 1968 * allocate an mbuf and copy the important bits of data 1969 * into it. If the packet won't fit in the header, 1970 * allocate a cluster for it and store it there. 1971 */ 1972 MGETHDR(m, M_DONTWAIT, MT_DATA); 1973 if (m == NULL) 1974 return NULL; 1975 if (len > MHLEN) { 1976 if (len > MCLBYTES) { 1977 m_freem(m); 1978 return NULL; 1979 } 1980 MCLGET(m, M_DONTWAIT); 1981 if ((m->m_flags & M_EXT) == 0) { 1982 m_freem(m); 1983 return NULL; 1984 } 1985 } 1986 if (p != NULL) { 1987 /* XXX do we need to sync here? */ 1988 if (sc->sc_usedma) 1989 memcpy(mtod(m, void *), p, len); 1990 else 1991 bus_space_read_region_1(sc->scu_memt, sc->scu_memh, 1992 sca_page_addr(sc, p), mtod(m, u_int8_t *), len); 1993 } 1994 m->m_len = len; 1995 m->m_pkthdr.len = len; 1996 1997 return (m); 1998 } 1999 2000 /* 2001 * get the base clock 2002 */ 2003 void 2004 sca_get_base_clock(struct sca_softc *sc) 2005 { 2006 struct timeval btv, ctv, dtv; 2007 u_int64_t bcnt; 2008 u_int32_t cnt; 2009 u_int16_t subcnt; 2010 2011 /* disable the timer, set prescale to 0 */ 2012 sca_write_1(sc, SCA_TCSR0, 0); 2013 sca_write_1(sc, SCA_TEPR0, 0); 2014 2015 /* reset the counter */ 2016 (void)sca_read_1(sc, SCA_TCSR0); 2017 subcnt = sca_read_2(sc, SCA_TCNTL0); 2018 2019 /* count to max */ 2020 sca_write_2(sc, SCA_TCONRL0, 0xffff); 2021 2022 cnt = 0; 2023 microtime(&btv); 2024 /* start the timer -- no interrupt enable */ 2025 sca_write_1(sc, SCA_TCSR0, SCA_TCSR_TME); 2026 for (;;) { 2027 microtime(&ctv); 2028 2029 /* end around 3/4 of a second */ 2030 timersub(&ctv, &btv, &dtv); 2031 if (dtv.tv_usec >= 750000) 2032 break; 2033 2034 /* spin */ 2035 while (!(sca_read_1(sc, SCA_TCSR0) & SCA_TCSR_CMF)) 2036 ; 2037 /* reset the timer */ 2038 (void)sca_read_2(sc, SCA_TCNTL0); 2039 cnt++; 2040 } 2041 2042 /* stop the timer */ 2043 sca_write_1(sc, SCA_TCSR0, 0); 2044 2045 subcnt = sca_read_2(sc, SCA_TCNTL0); 2046 /* add the slop in and get the total timer ticks */ 2047 cnt = (cnt << 16) | subcnt; 2048 2049 /* cnt is 1/8 the actual time */ 2050 bcnt = cnt * 8; 2051 /* make it proportional to 3/4 of a second */ 2052 bcnt *= (u_int64_t)750000; 2053 bcnt /= (u_int64_t)dtv.tv_usec; 2054 cnt = bcnt; 2055 2056 /* make it Hz */ 2057 cnt *= 4; 2058 cnt /= 3; 2059 2060 SCA_DPRINTF(SCA_DEBUG_CLOCK, 2061 ("sca: unadjusted base %lu Hz\n", (u_long)cnt)); 2062 2063 /* 2064 * round to the nearest 200 -- this allows for +-3 ticks error 2065 */ 2066 sc->sc_baseclock = ((cnt + 100) / 200) * 200; 2067 } 2068 2069 /* 2070 * print the information about the clock on the ports 2071 */ 2072 void 2073 sca_print_clock_info(struct sca_softc *sc) 2074 { 2075 struct sca_port *scp; 2076 u_int32_t mhz, div; 2077 int i; 2078 2079 printf("%s: base clock %d Hz\n", device_xname(sc->sc_parent), 2080 sc->sc_baseclock); 2081 2082 /* print the information about the port clock selection */ 2083 for (i = 0; i < sc->sc_numports; i++) { 2084 scp = &sc->sc_ports[i]; 2085 mhz = sc->sc_baseclock / (scp->sp_tmc ? scp->sp_tmc : 256); 2086 div = scp->sp_rxs & SCA_RXS_DIV_MASK; 2087 2088 printf("%s: rx clock: ", scp->sp_if.if_xname); 2089 switch (scp->sp_rxs & SCA_RXS_CLK_MASK) { 2090 case SCA_RXS_CLK_LINE: 2091 printf("line"); 2092 break; 2093 case SCA_RXS_CLK_LINE_SN: 2094 printf("line with noise suppression"); 2095 break; 2096 case SCA_RXS_CLK_INTERNAL: 2097 printf("internal %d Hz", (mhz >> div)); 2098 break; 2099 case SCA_RXS_CLK_ADPLL_OUT: 2100 printf("adpll using internal %d Hz", (mhz >> div)); 2101 break; 2102 case SCA_RXS_CLK_ADPLL_IN: 2103 printf("adpll using line clock"); 2104 break; 2105 } 2106 printf(" tx clock: "); 2107 div = scp->sp_txs & SCA_TXS_DIV_MASK; 2108 switch (scp->sp_txs & SCA_TXS_CLK_MASK) { 2109 case SCA_TXS_CLK_LINE: 2110 printf("line\n"); 2111 break; 2112 case SCA_TXS_CLK_INTERNAL: 2113 printf("internal %d Hz\n", (mhz >> div)); 2114 break; 2115 case SCA_TXS_CLK_RXCLK: 2116 printf("rxclock\n"); 2117 break; 2118 } 2119 if (scp->sp_eclock) 2120 printf("%s: outputting line clock\n", 2121 scp->sp_if.if_xname); 2122 } 2123 } 2124 2125