1 /* $OpenBSD: if_myx.c,v 1.111 2020/07/17 03:37:36 dlg Exp $ */ 2 3 /* 4 * Copyright (c) 2007 Reyk Floeter <reyk@openbsd.org> 5 * 6 * Permission to use, copy, modify, and distribute this software for any 7 * purpose with or without fee is hereby granted, provided that the above 8 * copyright notice and this permission notice appear in all copies. 9 * 10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 /* 20 * Driver for the Myricom Myri-10G Lanai-Z8E Ethernet chipsets. 21 */ 22 23 #include "bpfilter.h" 24 #include "kstat.h" 25 26 #include <sys/param.h> 27 #include <sys/systm.h> 28 #include <sys/sockio.h> 29 #include <sys/mbuf.h> 30 #include <sys/kernel.h> 31 #include <sys/socket.h> 32 #include <sys/malloc.h> 33 #include <sys/pool.h> 34 #include <sys/timeout.h> 35 #include <sys/device.h> 36 #include <sys/proc.h> 37 #include <sys/queue.h> 38 #include <sys/rwlock.h> 39 #include <sys/kstat.h> 40 41 #include <machine/bus.h> 42 #include <machine/intr.h> 43 44 #include <net/if.h> 45 #include <net/if_dl.h> 46 #include <net/if_media.h> 47 48 #if NBPFILTER > 0 49 #include <net/bpf.h> 50 #endif 51 52 #include <netinet/in.h> 53 #include <netinet/if_ether.h> 54 55 #include <dev/pci/pcireg.h> 56 #include <dev/pci/pcivar.h> 57 #include <dev/pci/pcidevs.h> 58 59 #include <dev/pci/if_myxreg.h> 60 61 #ifdef MYX_DEBUG 62 #define MYXDBG_INIT (1<<0) /* chipset initialization */ 63 #define MYXDBG_CMD (2<<0) /* commands */ 64 #define MYXDBG_INTR (3<<0) /* interrupts */ 65 #define MYXDBG_ALL 0xffff /* enable all debugging messages */ 66 int myx_debug = MYXDBG_ALL; 67 #define DPRINTF(_lvl, _arg...) do { \ 68 if (myx_debug & (_lvl)) \ 69 printf(_arg); \ 70 } while (0) 71 #else 72 #define DPRINTF(_lvl, arg...) 73 #endif 74 75 #define DEVNAME(_s) ((_s)->sc_dev.dv_xname) 76 77 struct myx_dmamem { 78 bus_dmamap_t mxm_map; 79 bus_dma_segment_t mxm_seg; 80 int mxm_nsegs; 81 size_t mxm_size; 82 caddr_t mxm_kva; 83 }; 84 85 struct pool *myx_mcl_pool; 86 87 struct myx_slot { 88 bus_dmamap_t ms_map; 89 struct mbuf *ms_m; 90 }; 91 92 struct myx_rx_ring { 93 struct myx_softc *mrr_softc; 94 struct timeout mrr_refill; 95 struct if_rxring mrr_rxr; 96 struct myx_slot *mrr_slots; 97 u_int32_t mrr_offset; 98 u_int mrr_running; 99 u_int mrr_prod; 100 u_int mrr_cons; 101 struct mbuf *(*mrr_mclget)(void); 102 }; 103 104 enum myx_state { 105 MYX_S_OFF = 0, 106 MYX_S_RUNNING, 107 MYX_S_DOWN 108 }; 109 110 struct myx_softc { 111 struct device sc_dev; 112 struct arpcom sc_ac; 113 114 pci_chipset_tag_t sc_pc; 115 pci_intr_handle_t sc_ih; 116 pcitag_t sc_tag; 117 118 bus_dma_tag_t sc_dmat; 119 bus_space_tag_t sc_memt; 120 bus_space_handle_t sc_memh; 121 bus_size_t sc_mems; 122 123 struct myx_dmamem sc_zerodma; 124 struct myx_dmamem sc_cmddma; 125 struct myx_dmamem sc_paddma; 126 127 struct myx_dmamem sc_sts_dma; 128 volatile struct myx_status *sc_sts; 129 130 int sc_intx; 131 void *sc_irqh; 132 u_int32_t sc_irqcoaloff; 133 u_int32_t sc_irqclaimoff; 134 u_int32_t sc_irqdeassertoff; 135 136 struct myx_dmamem sc_intrq_dma; 137 struct myx_intrq_desc *sc_intrq; 138 u_int sc_intrq_count; 139 u_int sc_intrq_idx; 140 141 u_int sc_rx_ring_count; 142 #define MYX_RXSMALL 0 143 #define MYX_RXBIG 1 144 struct myx_rx_ring sc_rx_ring[2]; 145 146 bus_size_t sc_tx_boundary; 147 u_int sc_tx_ring_count; 148 u_int32_t sc_tx_ring_offset; 149 u_int sc_tx_nsegs; 150 u_int32_t sc_tx_count; /* shadows ms_txdonecnt */ 151 u_int sc_tx_ring_prod; 152 u_int sc_tx_ring_cons; 153 154 u_int sc_tx_prod; 155 u_int sc_tx_cons; 156 struct myx_slot *sc_tx_slots; 157 158 struct ifmedia sc_media; 159 160 volatile enum myx_state sc_state; 161 volatile u_int8_t sc_linkdown; 162 163 struct rwlock sc_sff_lock; 164 165 #if NKSTAT > 0 166 struct mutex sc_kstat_mtx; 167 struct timeout sc_kstat_tmo; 168 struct kstat *sc_kstat; 169 #endif 170 }; 171 172 #define MYX_RXSMALL_SIZE MCLBYTES 173 #define MYX_RXBIG_SIZE (MYX_MTU - \ 174 (ETHER_ALIGN + ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN)) 175 176 int myx_match(struct device *, void *, void *); 177 void myx_attach(struct device *, struct device *, void *); 178 int myx_pcie_dc(struct myx_softc *, struct pci_attach_args *); 179 int myx_query(struct myx_softc *sc, char *, size_t); 180 u_int myx_ether_aton(char *, u_int8_t *, u_int); 181 void myx_attachhook(struct device *); 182 int myx_loadfirmware(struct myx_softc *, const char *); 183 int myx_probe_firmware(struct myx_softc *); 184 185 void myx_read(struct myx_softc *, bus_size_t, void *, bus_size_t); 186 void myx_write(struct myx_softc *, bus_size_t, void *, bus_size_t); 187 188 #if defined(__LP64__) 189 #define _myx_bus_space_write bus_space_write_raw_region_8 190 typedef u_int64_t myx_bus_t; 191 #else 192 #define _myx_bus_space_write bus_space_write_raw_region_4 193 typedef u_int32_t myx_bus_t; 194 #endif 195 #define myx_bus_space_write(_sc, _o, _a, _l) \ 196 _myx_bus_space_write((_sc)->sc_memt, (_sc)->sc_memh, (_o), (_a), (_l)) 197 198 int myx_cmd(struct myx_softc *, u_int32_t, struct myx_cmd *, u_int32_t *); 199 int myx_boot(struct myx_softc *, u_int32_t); 200 201 int myx_rdma(struct myx_softc *, u_int); 202 int myx_dmamem_alloc(struct myx_softc *, struct myx_dmamem *, 203 bus_size_t, u_int align); 204 void myx_dmamem_free(struct myx_softc *, struct myx_dmamem *); 205 int myx_media_change(struct ifnet *); 206 void myx_media_status(struct ifnet *, struct ifmediareq *); 207 void myx_link_state(struct myx_softc *, u_int32_t); 208 void myx_watchdog(struct ifnet *); 209 int myx_ioctl(struct ifnet *, u_long, caddr_t); 210 int myx_rxrinfo(struct myx_softc *, struct if_rxrinfo *); 211 void myx_up(struct myx_softc *); 212 void myx_iff(struct myx_softc *); 213 void myx_down(struct myx_softc *); 214 int myx_get_sffpage(struct myx_softc *, struct if_sffpage *); 215 216 void myx_start(struct ifqueue *); 217 void myx_write_txd_tail(struct myx_softc *, struct myx_slot *, u_int8_t, 218 u_int32_t, u_int); 219 int myx_load_mbuf(struct myx_softc *, struct myx_slot *, struct mbuf *); 220 int myx_setlladdr(struct myx_softc *, u_int32_t, u_int8_t *); 221 int myx_intr(void *); 222 void myx_rxeof(struct myx_softc *); 223 void myx_txeof(struct myx_softc *, u_int32_t); 224 225 int myx_buf_fill(struct myx_softc *, struct myx_slot *, 226 struct mbuf *(*)(void)); 227 struct mbuf * myx_mcl_small(void); 228 struct mbuf * myx_mcl_big(void); 229 230 int myx_rx_init(struct myx_softc *, int, bus_size_t); 231 int myx_rx_fill(struct myx_softc *, struct myx_rx_ring *); 232 void myx_rx_empty(struct myx_softc *, struct myx_rx_ring *); 233 void myx_rx_free(struct myx_softc *, struct myx_rx_ring *); 234 235 int myx_tx_init(struct myx_softc *, bus_size_t); 236 void myx_tx_empty(struct myx_softc *); 237 void myx_tx_free(struct myx_softc *); 238 239 void myx_refill(void *); 240 241 #if NKSTAT > 0 242 void myx_kstat_attach(struct myx_softc *); 243 void myx_kstat_start(struct myx_softc *); 244 void myx_kstat_stop(struct myx_softc *); 245 #endif 246 247 struct cfdriver myx_cd = { 248 NULL, "myx", DV_IFNET 249 }; 250 struct cfattach myx_ca = { 251 sizeof(struct myx_softc), myx_match, myx_attach 252 }; 253 254 const struct pci_matchid myx_devices[] = { 255 { PCI_VENDOR_MYRICOM, PCI_PRODUCT_MYRICOM_Z8E }, 256 { PCI_VENDOR_MYRICOM, PCI_PRODUCT_MYRICOM_Z8E_9 } 257 }; 258 259 int 260 myx_match(struct device *parent, void *match, void *aux) 261 { 262 return (pci_matchbyid(aux, myx_devices, nitems(myx_devices))); 263 } 264 265 void 266 myx_attach(struct device *parent, struct device *self, void *aux) 267 { 268 struct myx_softc *sc = (struct myx_softc *)self; 269 struct pci_attach_args *pa = aux; 270 char part[32]; 271 pcireg_t memtype; 272 273 sc->sc_pc = pa->pa_pc; 274 sc->sc_tag = pa->pa_tag; 275 sc->sc_dmat = pa->pa_dmat; 276 277 sc->sc_rx_ring[MYX_RXSMALL].mrr_softc = sc; 278 sc->sc_rx_ring[MYX_RXSMALL].mrr_mclget = myx_mcl_small; 279 timeout_set(&sc->sc_rx_ring[MYX_RXSMALL].mrr_refill, myx_refill, 280 &sc->sc_rx_ring[MYX_RXSMALL]); 281 sc->sc_rx_ring[MYX_RXBIG].mrr_softc = sc; 282 sc->sc_rx_ring[MYX_RXBIG].mrr_mclget = myx_mcl_big; 283 timeout_set(&sc->sc_rx_ring[MYX_RXBIG].mrr_refill, myx_refill, 284 &sc->sc_rx_ring[MYX_RXBIG]); 285 286 /* Map the PCI memory space */ 287 memtype = pci_mapreg_type(sc->sc_pc, sc->sc_tag, MYXBAR0); 288 if (pci_mapreg_map(pa, MYXBAR0, memtype, BUS_SPACE_MAP_PREFETCHABLE, 289 &sc->sc_memt, &sc->sc_memh, NULL, &sc->sc_mems, 0)) { 290 printf(": unable to map register memory\n"); 291 return; 292 } 293 294 /* Get board details (mac/part) */ 295 memset(part, 0, sizeof(part)); 296 if (myx_query(sc, part, sizeof(part)) != 0) 297 goto unmap; 298 299 /* Map the interrupt */ 300 if (pci_intr_map_msi(pa, &sc->sc_ih) != 0) { 301 if (pci_intr_map(pa, &sc->sc_ih) != 0) { 302 printf(": unable to map interrupt\n"); 303 goto unmap; 304 } 305 sc->sc_intx = 1; 306 } 307 308 printf(": %s, model %s, address %s\n", 309 pci_intr_string(pa->pa_pc, sc->sc_ih), 310 part[0] == '\0' ? "(unknown)" : part, 311 ether_sprintf(sc->sc_ac.ac_enaddr)); 312 313 if (myx_pcie_dc(sc, pa) != 0) 314 printf("%s: unable to configure PCI Express\n", DEVNAME(sc)); 315 316 config_mountroot(self, myx_attachhook); 317 318 return; 319 320 unmap: 321 bus_space_unmap(sc->sc_memt, sc->sc_memh, sc->sc_mems); 322 sc->sc_mems = 0; 323 } 324 325 int 326 myx_pcie_dc(struct myx_softc *sc, struct pci_attach_args *pa) 327 { 328 pcireg_t dcsr; 329 pcireg_t mask = PCI_PCIE_DCSR_MPS | PCI_PCIE_DCSR_ERO; 330 pcireg_t dc = ((fls(4096) - 8) << 12) | PCI_PCIE_DCSR_ERO; 331 int reg; 332 333 if (pci_get_capability(sc->sc_pc, pa->pa_tag, PCI_CAP_PCIEXPRESS, 334 ®, NULL) == 0) 335 return (-1); 336 337 reg += PCI_PCIE_DCSR; 338 dcsr = pci_conf_read(sc->sc_pc, pa->pa_tag, reg); 339 if ((dcsr & mask) != dc) { 340 CLR(dcsr, mask); 341 SET(dcsr, dc); 342 pci_conf_write(sc->sc_pc, pa->pa_tag, reg, dcsr); 343 } 344 345 return (0); 346 } 347 348 u_int 349 myx_ether_aton(char *mac, u_int8_t *lladdr, u_int maxlen) 350 { 351 u_int i, j; 352 u_int8_t digit; 353 354 memset(lladdr, 0, ETHER_ADDR_LEN); 355 for (i = j = 0; mac[i] != '\0' && i < maxlen; i++) { 356 if (mac[i] >= '0' && mac[i] <= '9') 357 digit = mac[i] - '0'; 358 else if (mac[i] >= 'A' && mac[i] <= 'F') 359 digit = mac[i] - 'A' + 10; 360 else if (mac[i] >= 'a' && mac[i] <= 'f') 361 digit = mac[i] - 'a' + 10; 362 else 363 continue; 364 if ((j & 1) == 0) 365 digit <<= 4; 366 lladdr[j++/2] |= digit; 367 } 368 369 return (i); 370 } 371 372 int 373 myx_query(struct myx_softc *sc, char *part, size_t partlen) 374 { 375 struct myx_gen_hdr hdr; 376 u_int32_t offset; 377 u_int8_t strings[MYX_STRING_SPECS_SIZE]; 378 u_int i, len, maxlen; 379 380 myx_read(sc, MYX_HEADER_POS, &offset, sizeof(offset)); 381 offset = betoh32(offset); 382 if (offset + sizeof(hdr) > sc->sc_mems) { 383 printf(": header is outside register window\n"); 384 return (1); 385 } 386 387 myx_read(sc, offset, &hdr, sizeof(hdr)); 388 offset = betoh32(hdr.fw_specs); 389 len = min(betoh32(hdr.fw_specs_len), sizeof(strings)); 390 391 bus_space_read_region_1(sc->sc_memt, sc->sc_memh, offset, strings, len); 392 393 for (i = 0; i < len; i++) { 394 maxlen = len - i; 395 if (strings[i] == '\0') 396 break; 397 if (maxlen > 4 && memcmp("MAC=", &strings[i], 4) == 0) { 398 i += 4; 399 i += myx_ether_aton(&strings[i], 400 sc->sc_ac.ac_enaddr, maxlen); 401 } else if (maxlen > 3 && memcmp("PC=", &strings[i], 3) == 0) { 402 i += 3; 403 i += strlcpy(part, &strings[i], min(maxlen, partlen)); 404 } 405 for (; i < len; i++) { 406 if (strings[i] == '\0') 407 break; 408 } 409 } 410 411 return (0); 412 } 413 414 int 415 myx_loadfirmware(struct myx_softc *sc, const char *filename) 416 { 417 struct myx_gen_hdr hdr; 418 u_int8_t *fw; 419 size_t fwlen; 420 u_int32_t offset; 421 u_int i, ret = 1; 422 423 if (loadfirmware(filename, &fw, &fwlen) != 0) { 424 printf("%s: could not load firmware %s\n", DEVNAME(sc), 425 filename); 426 return (1); 427 } 428 if (fwlen > MYX_SRAM_SIZE || fwlen < MYXFW_MIN_LEN) { 429 printf("%s: invalid firmware %s size\n", DEVNAME(sc), filename); 430 goto err; 431 } 432 433 memcpy(&offset, fw + MYX_HEADER_POS, sizeof(offset)); 434 offset = betoh32(offset); 435 if ((offset + sizeof(hdr)) > fwlen) { 436 printf("%s: invalid firmware %s\n", DEVNAME(sc), filename); 437 goto err; 438 } 439 440 memcpy(&hdr, fw + offset, sizeof(hdr)); 441 DPRINTF(MYXDBG_INIT, "%s: " 442 "fw hdr off %u, length %u, type 0x%x, version %s\n", 443 DEVNAME(sc), offset, betoh32(hdr.fw_hdrlength), 444 betoh32(hdr.fw_type), hdr.fw_version); 445 446 if (betoh32(hdr.fw_type) != MYXFW_TYPE_ETH || 447 memcmp(MYXFW_VER, hdr.fw_version, strlen(MYXFW_VER)) != 0) { 448 printf("%s: invalid firmware type 0x%x version %s\n", 449 DEVNAME(sc), betoh32(hdr.fw_type), hdr.fw_version); 450 goto err; 451 } 452 453 /* Write the firmware to the card's SRAM */ 454 for (i = 0; i < fwlen; i += 256) 455 myx_write(sc, i + MYX_FW, fw + i, min(256, fwlen - i)); 456 457 if (myx_boot(sc, fwlen) != 0) { 458 printf("%s: failed to boot %s\n", DEVNAME(sc), filename); 459 goto err; 460 } 461 462 ret = 0; 463 464 err: 465 free(fw, M_DEVBUF, fwlen); 466 return (ret); 467 } 468 469 void 470 myx_attachhook(struct device *self) 471 { 472 struct myx_softc *sc = (struct myx_softc *)self; 473 struct ifnet *ifp = &sc->sc_ac.ac_if; 474 struct myx_cmd mc; 475 476 /* this is sort of racy */ 477 if (myx_mcl_pool == NULL) { 478 myx_mcl_pool = malloc(sizeof(*myx_mcl_pool), M_DEVBUF, 479 M_WAITOK); 480 481 m_pool_init(myx_mcl_pool, MYX_RXBIG_SIZE, MYX_BOUNDARY, 482 "myxmcl"); 483 pool_cache_init(myx_mcl_pool); 484 } 485 486 /* Allocate command DMA memory */ 487 if (myx_dmamem_alloc(sc, &sc->sc_cmddma, MYXALIGN_CMD, 488 MYXALIGN_CMD) != 0) { 489 printf("%s: failed to allocate command DMA memory\n", 490 DEVNAME(sc)); 491 return; 492 } 493 494 /* Try the firmware stored on disk */ 495 if (myx_loadfirmware(sc, MYXFW_ALIGNED) != 0) { 496 /* error printed by myx_loadfirmware */ 497 goto freecmd; 498 } 499 500 memset(&mc, 0, sizeof(mc)); 501 502 if (myx_cmd(sc, MYXCMD_RESET, &mc, NULL) != 0) { 503 printf("%s: failed to reset the device\n", DEVNAME(sc)); 504 goto freecmd; 505 } 506 507 sc->sc_tx_boundary = 4096; 508 509 if (myx_probe_firmware(sc) != 0) { 510 printf("%s: error while selecting firmware\n", DEVNAME(sc)); 511 goto freecmd; 512 } 513 514 sc->sc_irqh = pci_intr_establish(sc->sc_pc, sc->sc_ih, 515 IPL_NET | IPL_MPSAFE, myx_intr, sc, DEVNAME(sc)); 516 if (sc->sc_irqh == NULL) { 517 printf("%s: unable to establish interrupt\n", DEVNAME(sc)); 518 goto freecmd; 519 } 520 521 #if NKSTAT > 0 522 myx_kstat_attach(sc); 523 #endif 524 525 ifp->if_softc = sc; 526 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 527 ifp->if_xflags = IFXF_MPSAFE; 528 ifp->if_ioctl = myx_ioctl; 529 ifp->if_qstart = myx_start; 530 ifp->if_watchdog = myx_watchdog; 531 ifp->if_hardmtu = MYX_RXBIG_SIZE; 532 strlcpy(ifp->if_xname, DEVNAME(sc), IFNAMSIZ); 533 ifq_set_maxlen(&ifp->if_snd, 1); 534 535 ifp->if_capabilities = IFCAP_VLAN_MTU; 536 #if 0 537 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING; 538 ifp->if_capabilities |= IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 | 539 IFCAP_CSUM_UDPv4; 540 #endif 541 542 ifmedia_init(&sc->sc_media, 0, myx_media_change, myx_media_status); 543 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO, 0, NULL); 544 ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO); 545 546 if_attach(ifp); 547 ether_ifattach(ifp); 548 549 return; 550 551 freecmd: 552 myx_dmamem_free(sc, &sc->sc_cmddma); 553 } 554 555 int 556 myx_probe_firmware(struct myx_softc *sc) 557 { 558 struct myx_dmamem test; 559 bus_dmamap_t map; 560 struct myx_cmd mc; 561 pcireg_t csr; 562 int offset; 563 int width = 0; 564 565 if (pci_get_capability(sc->sc_pc, sc->sc_tag, PCI_CAP_PCIEXPRESS, 566 &offset, NULL)) { 567 csr = pci_conf_read(sc->sc_pc, sc->sc_tag, 568 offset + PCI_PCIE_LCSR); 569 width = (csr >> 20) & 0x3f; 570 571 if (width <= 4) { 572 /* 573 * if the link width is 4 or less we can use the 574 * aligned firmware. 575 */ 576 return (0); 577 } 578 } 579 580 if (myx_dmamem_alloc(sc, &test, 4096, 4096) != 0) 581 return (1); 582 map = test.mxm_map; 583 584 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 585 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 586 587 memset(&mc, 0, sizeof(mc)); 588 mc.mc_data0 = htobe32(MYX_ADDRLOW(map->dm_segs[0].ds_addr)); 589 mc.mc_data1 = htobe32(MYX_ADDRHIGH(map->dm_segs[0].ds_addr)); 590 mc.mc_data2 = htobe32(4096 * 0x10000); 591 if (myx_cmd(sc, MYXCMD_UNALIGNED_DMA_TEST, &mc, NULL) != 0) { 592 printf("%s: DMA read test failed\n", DEVNAME(sc)); 593 goto fail; 594 } 595 596 memset(&mc, 0, sizeof(mc)); 597 mc.mc_data0 = htobe32(MYX_ADDRLOW(map->dm_segs[0].ds_addr)); 598 mc.mc_data1 = htobe32(MYX_ADDRHIGH(map->dm_segs[0].ds_addr)); 599 mc.mc_data2 = htobe32(4096 * 0x1); 600 if (myx_cmd(sc, MYXCMD_UNALIGNED_DMA_TEST, &mc, NULL) != 0) { 601 printf("%s: DMA write test failed\n", DEVNAME(sc)); 602 goto fail; 603 } 604 605 memset(&mc, 0, sizeof(mc)); 606 mc.mc_data0 = htobe32(MYX_ADDRLOW(map->dm_segs[0].ds_addr)); 607 mc.mc_data1 = htobe32(MYX_ADDRHIGH(map->dm_segs[0].ds_addr)); 608 mc.mc_data2 = htobe32(4096 * 0x10001); 609 if (myx_cmd(sc, MYXCMD_UNALIGNED_DMA_TEST, &mc, NULL) != 0) { 610 printf("%s: DMA read/write test failed\n", DEVNAME(sc)); 611 goto fail; 612 } 613 614 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 615 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 616 myx_dmamem_free(sc, &test); 617 return (0); 618 619 fail: 620 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 621 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 622 myx_dmamem_free(sc, &test); 623 624 if (myx_loadfirmware(sc, MYXFW_UNALIGNED) != 0) { 625 printf("%s: unable to load %s\n", DEVNAME(sc), 626 MYXFW_UNALIGNED); 627 return (1); 628 } 629 630 sc->sc_tx_boundary = 2048; 631 632 printf("%s: using unaligned firmware\n", DEVNAME(sc)); 633 return (0); 634 } 635 636 void 637 myx_read(struct myx_softc *sc, bus_size_t off, void *ptr, bus_size_t len) 638 { 639 bus_space_barrier(sc->sc_memt, sc->sc_memh, off, len, 640 BUS_SPACE_BARRIER_READ); 641 bus_space_read_raw_region_4(sc->sc_memt, sc->sc_memh, off, ptr, len); 642 } 643 644 void 645 myx_write(struct myx_softc *sc, bus_size_t off, void *ptr, bus_size_t len) 646 { 647 bus_space_write_raw_region_4(sc->sc_memt, sc->sc_memh, off, ptr, len); 648 bus_space_barrier(sc->sc_memt, sc->sc_memh, off, len, 649 BUS_SPACE_BARRIER_WRITE); 650 } 651 652 int 653 myx_dmamem_alloc(struct myx_softc *sc, struct myx_dmamem *mxm, 654 bus_size_t size, u_int align) 655 { 656 mxm->mxm_size = size; 657 658 if (bus_dmamap_create(sc->sc_dmat, mxm->mxm_size, 1, 659 mxm->mxm_size, 0, BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, 660 &mxm->mxm_map) != 0) 661 return (1); 662 if (bus_dmamem_alloc(sc->sc_dmat, mxm->mxm_size, 663 align, 0, &mxm->mxm_seg, 1, &mxm->mxm_nsegs, 664 BUS_DMA_WAITOK | BUS_DMA_ZERO) != 0) 665 goto destroy; 666 if (bus_dmamem_map(sc->sc_dmat, &mxm->mxm_seg, mxm->mxm_nsegs, 667 mxm->mxm_size, &mxm->mxm_kva, BUS_DMA_WAITOK) != 0) 668 goto free; 669 if (bus_dmamap_load(sc->sc_dmat, mxm->mxm_map, mxm->mxm_kva, 670 mxm->mxm_size, NULL, BUS_DMA_WAITOK) != 0) 671 goto unmap; 672 673 return (0); 674 unmap: 675 bus_dmamem_unmap(sc->sc_dmat, mxm->mxm_kva, mxm->mxm_size); 676 free: 677 bus_dmamem_free(sc->sc_dmat, &mxm->mxm_seg, 1); 678 destroy: 679 bus_dmamap_destroy(sc->sc_dmat, mxm->mxm_map); 680 return (1); 681 } 682 683 void 684 myx_dmamem_free(struct myx_softc *sc, struct myx_dmamem *mxm) 685 { 686 bus_dmamap_unload(sc->sc_dmat, mxm->mxm_map); 687 bus_dmamem_unmap(sc->sc_dmat, mxm->mxm_kva, mxm->mxm_size); 688 bus_dmamem_free(sc->sc_dmat, &mxm->mxm_seg, 1); 689 bus_dmamap_destroy(sc->sc_dmat, mxm->mxm_map); 690 } 691 692 int 693 myx_cmd(struct myx_softc *sc, u_int32_t cmd, struct myx_cmd *mc, u_int32_t *r) 694 { 695 bus_dmamap_t map = sc->sc_cmddma.mxm_map; 696 struct myx_response *mr; 697 u_int i; 698 u_int32_t result, data; 699 700 mc->mc_cmd = htobe32(cmd); 701 mc->mc_addr_high = htobe32(MYX_ADDRHIGH(map->dm_segs[0].ds_addr)); 702 mc->mc_addr_low = htobe32(MYX_ADDRLOW(map->dm_segs[0].ds_addr)); 703 704 mr = (struct myx_response *)sc->sc_cmddma.mxm_kva; 705 mr->mr_result = 0xffffffff; 706 707 /* Send command */ 708 myx_write(sc, MYX_CMD, (u_int8_t *)mc, sizeof(struct myx_cmd)); 709 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 710 BUS_DMASYNC_PREREAD); 711 712 for (i = 0; i < 20; i++) { 713 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 714 BUS_DMASYNC_POSTREAD); 715 result = betoh32(mr->mr_result); 716 data = betoh32(mr->mr_data); 717 718 if (result != 0xffffffff) 719 break; 720 721 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 722 BUS_DMASYNC_PREREAD); 723 delay(1000); 724 } 725 726 DPRINTF(MYXDBG_CMD, "%s(%s): cmd %u completed, i %d, " 727 "result 0x%x, data 0x%x (%u)\n", DEVNAME(sc), __func__, 728 cmd, i, result, data, data); 729 730 if (result == MYXCMD_OK) { 731 if (r != NULL) 732 *r = data; 733 } 734 735 return (result); 736 } 737 738 int 739 myx_boot(struct myx_softc *sc, u_int32_t length) 740 { 741 struct myx_bootcmd bc; 742 bus_dmamap_t map = sc->sc_cmddma.mxm_map; 743 u_int32_t *status; 744 u_int i, ret = 1; 745 746 memset(&bc, 0, sizeof(bc)); 747 bc.bc_addr_high = htobe32(MYX_ADDRHIGH(map->dm_segs[0].ds_addr)); 748 bc.bc_addr_low = htobe32(MYX_ADDRLOW(map->dm_segs[0].ds_addr)); 749 bc.bc_result = 0xffffffff; 750 bc.bc_offset = htobe32(MYX_FW_BOOT); 751 bc.bc_length = htobe32(length - 8); 752 bc.bc_copyto = htobe32(8); 753 bc.bc_jumpto = htobe32(0); 754 755 status = (u_int32_t *)sc->sc_cmddma.mxm_kva; 756 *status = 0; 757 758 /* Send command */ 759 myx_write(sc, MYX_BOOT, &bc, sizeof(bc)); 760 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 761 BUS_DMASYNC_PREREAD); 762 763 for (i = 0; i < 200; i++) { 764 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 765 BUS_DMASYNC_POSTREAD); 766 if (*status == 0xffffffff) { 767 ret = 0; 768 break; 769 } 770 771 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 772 BUS_DMASYNC_PREREAD); 773 delay(1000); 774 } 775 776 DPRINTF(MYXDBG_CMD, "%s: boot completed, i %d, result %d\n", 777 DEVNAME(sc), i, ret); 778 779 return (ret); 780 } 781 782 int 783 myx_rdma(struct myx_softc *sc, u_int do_enable) 784 { 785 struct myx_rdmacmd rc; 786 bus_dmamap_t map = sc->sc_cmddma.mxm_map; 787 bus_dmamap_t pad = sc->sc_paddma.mxm_map; 788 u_int32_t *status; 789 int ret = 1; 790 u_int i; 791 792 /* 793 * It is required to setup a _dummy_ RDMA address. It also makes 794 * some PCI-E chipsets resend dropped messages. 795 */ 796 rc.rc_addr_high = htobe32(MYX_ADDRHIGH(map->dm_segs[0].ds_addr)); 797 rc.rc_addr_low = htobe32(MYX_ADDRLOW(map->dm_segs[0].ds_addr)); 798 rc.rc_result = 0xffffffff; 799 rc.rc_rdma_high = htobe32(MYX_ADDRHIGH(pad->dm_segs[0].ds_addr)); 800 rc.rc_rdma_low = htobe32(MYX_ADDRLOW(pad->dm_segs[0].ds_addr)); 801 rc.rc_enable = htobe32(do_enable); 802 803 status = (u_int32_t *)sc->sc_cmddma.mxm_kva; 804 *status = 0; 805 806 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 807 BUS_DMASYNC_PREREAD); 808 809 /* Send command */ 810 myx_write(sc, MYX_RDMA, &rc, sizeof(rc)); 811 812 for (i = 0; i < 20; i++) { 813 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 814 BUS_DMASYNC_POSTREAD); 815 816 if (*status == 0xffffffff) { 817 ret = 0; 818 break; 819 } 820 821 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 822 BUS_DMASYNC_PREREAD); 823 delay(1000); 824 } 825 826 DPRINTF(MYXDBG_CMD, "%s(%s): dummy RDMA %s, i %d, result 0x%x\n", 827 DEVNAME(sc), __func__, 828 do_enable ? "enabled" : "disabled", i, betoh32(*status)); 829 830 return (ret); 831 } 832 833 int 834 myx_media_change(struct ifnet *ifp) 835 { 836 /* ignore */ 837 return (0); 838 } 839 840 void 841 myx_media_status(struct ifnet *ifp, struct ifmediareq *imr) 842 { 843 struct myx_softc *sc = (struct myx_softc *)ifp->if_softc; 844 bus_dmamap_t map = sc->sc_sts_dma.mxm_map; 845 u_int32_t sts; 846 847 imr->ifm_active = IFM_ETHER | IFM_AUTO; 848 if (!ISSET(ifp->if_flags, IFF_RUNNING)) { 849 imr->ifm_status = 0; 850 return; 851 } 852 853 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 854 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 855 sts = sc->sc_sts->ms_linkstate; 856 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 857 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 858 859 myx_link_state(sc, sts); 860 861 imr->ifm_status = IFM_AVALID; 862 if (!LINK_STATE_IS_UP(ifp->if_link_state)) 863 return; 864 865 imr->ifm_active |= IFM_FDX | IFM_FLOW | 866 IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE; 867 imr->ifm_status |= IFM_ACTIVE; 868 } 869 870 void 871 myx_link_state(struct myx_softc *sc, u_int32_t sts) 872 { 873 struct ifnet *ifp = &sc->sc_ac.ac_if; 874 int link_state = LINK_STATE_DOWN; 875 876 if (betoh32(sts) == MYXSTS_LINKUP) 877 link_state = LINK_STATE_FULL_DUPLEX; 878 if (ifp->if_link_state != link_state) { 879 ifp->if_link_state = link_state; 880 if_link_state_change(ifp); 881 ifp->if_baudrate = LINK_STATE_IS_UP(ifp->if_link_state) ? 882 IF_Gbps(10) : 0; 883 } 884 } 885 886 void 887 myx_watchdog(struct ifnet *ifp) 888 { 889 return; 890 } 891 892 int 893 myx_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 894 { 895 struct myx_softc *sc = (struct myx_softc *)ifp->if_softc; 896 struct ifreq *ifr = (struct ifreq *)data; 897 int s, error = 0; 898 899 s = splnet(); 900 901 switch (cmd) { 902 case SIOCSIFADDR: 903 ifp->if_flags |= IFF_UP; 904 /* FALLTHROUGH */ 905 906 case SIOCSIFFLAGS: 907 if (ISSET(ifp->if_flags, IFF_UP)) { 908 if (ISSET(ifp->if_flags, IFF_RUNNING)) 909 error = ENETRESET; 910 else 911 myx_up(sc); 912 } else { 913 if (ISSET(ifp->if_flags, IFF_RUNNING)) 914 myx_down(sc); 915 } 916 break; 917 918 case SIOCGIFMEDIA: 919 case SIOCSIFMEDIA: 920 error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd); 921 break; 922 923 case SIOCGIFRXR: 924 error = myx_rxrinfo(sc, (struct if_rxrinfo *)ifr->ifr_data); 925 break; 926 927 case SIOCGIFSFFPAGE: 928 error = rw_enter(&sc->sc_sff_lock, RW_WRITE|RW_INTR); 929 if (error != 0) 930 break; 931 932 error = myx_get_sffpage(sc, (struct if_sffpage *)data); 933 rw_exit(&sc->sc_sff_lock); 934 break; 935 936 default: 937 error = ether_ioctl(ifp, &sc->sc_ac, cmd, data); 938 } 939 940 if (error == ENETRESET) { 941 if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) == 942 (IFF_UP | IFF_RUNNING)) 943 myx_iff(sc); 944 error = 0; 945 } 946 947 splx(s); 948 return (error); 949 } 950 951 int 952 myx_rxrinfo(struct myx_softc *sc, struct if_rxrinfo *ifri) 953 { 954 struct if_rxring_info ifr[2]; 955 956 memset(ifr, 0, sizeof(ifr)); 957 958 ifr[0].ifr_size = MYX_RXSMALL_SIZE; 959 ifr[0].ifr_info = sc->sc_rx_ring[0].mrr_rxr; 960 strlcpy(ifr[0].ifr_name, "small", sizeof(ifr[0].ifr_name)); 961 962 ifr[1].ifr_size = MYX_RXBIG_SIZE; 963 ifr[1].ifr_info = sc->sc_rx_ring[1].mrr_rxr; 964 strlcpy(ifr[1].ifr_name, "large", sizeof(ifr[1].ifr_name)); 965 966 return (if_rxr_info_ioctl(ifri, nitems(ifr), ifr)); 967 } 968 969 static int 970 myx_i2c_byte(struct myx_softc *sc, uint8_t addr, uint8_t off, uint8_t *byte) 971 { 972 struct myx_cmd mc; 973 int result; 974 uint32_t r; 975 unsigned int ms; 976 977 memset(&mc, 0, sizeof(mc)); 978 mc.mc_data0 = htobe32(0); /* get 1 byte */ 979 mc.mc_data1 = htobe32((addr << 8) | off); 980 result = myx_cmd(sc, MYXCMD_I2C_READ, &mc, NULL); 981 if (result != 0) 982 return (EIO); 983 984 for (ms = 0; ms < 50; ms++) { 985 memset(&mc, 0, sizeof(mc)); 986 mc.mc_data0 = htobe32(off); 987 result = myx_cmd(sc, MYXCMD_I2C_BYTE, &mc, &r); 988 switch (result) { 989 case MYXCMD_OK: 990 *byte = r; 991 return (0); 992 case MYXCMD_ERR_BUSY: 993 break; 994 default: 995 return (EIO); 996 } 997 998 delay(1000); 999 } 1000 1001 return (EBUSY); 1002 } 1003 1004 int 1005 myx_get_sffpage(struct myx_softc *sc, struct if_sffpage *sff) 1006 { 1007 unsigned int i; 1008 int result; 1009 1010 if (sff->sff_addr == IFSFF_ADDR_EEPROM) { 1011 uint8_t page; 1012 1013 result = myx_i2c_byte(sc, IFSFF_ADDR_EEPROM, 127, &page); 1014 if (result != 0) 1015 return (result); 1016 1017 if (page != sff->sff_page) 1018 return (ENXIO); 1019 } 1020 1021 for (i = 0; i < sizeof(sff->sff_data); i++) { 1022 result = myx_i2c_byte(sc, sff->sff_addr, 1023 i, &sff->sff_data[i]); 1024 if (result != 0) 1025 return (result); 1026 } 1027 1028 return (0); 1029 } 1030 1031 void 1032 myx_up(struct myx_softc *sc) 1033 { 1034 struct ifnet *ifp = &sc->sc_ac.ac_if; 1035 struct myx_cmd mc; 1036 bus_dmamap_t map; 1037 size_t size; 1038 u_int maxpkt; 1039 u_int32_t r; 1040 1041 memset(&mc, 0, sizeof(mc)); 1042 if (myx_cmd(sc, MYXCMD_RESET, &mc, NULL) != 0) { 1043 printf("%s: failed to reset the device\n", DEVNAME(sc)); 1044 return; 1045 } 1046 1047 if (myx_dmamem_alloc(sc, &sc->sc_zerodma, 1048 64, MYXALIGN_CMD) != 0) { 1049 printf("%s: failed to allocate zero pad memory\n", 1050 DEVNAME(sc)); 1051 return; 1052 } 1053 memset(sc->sc_zerodma.mxm_kva, 0, 64); 1054 bus_dmamap_sync(sc->sc_dmat, sc->sc_zerodma.mxm_map, 0, 1055 sc->sc_zerodma.mxm_map->dm_mapsize, BUS_DMASYNC_PREREAD); 1056 1057 if (myx_dmamem_alloc(sc, &sc->sc_paddma, 1058 MYXALIGN_CMD, MYXALIGN_CMD) != 0) { 1059 printf("%s: failed to allocate pad DMA memory\n", 1060 DEVNAME(sc)); 1061 goto free_zero; 1062 } 1063 bus_dmamap_sync(sc->sc_dmat, sc->sc_paddma.mxm_map, 0, 1064 sc->sc_paddma.mxm_map->dm_mapsize, 1065 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1066 1067 if (myx_rdma(sc, MYXRDMA_ON) != 0) { 1068 printf("%s: failed to enable dummy RDMA\n", DEVNAME(sc)); 1069 goto free_pad; 1070 } 1071 1072 if (myx_cmd(sc, MYXCMD_GET_RXRINGSZ, &mc, &r) != 0) { 1073 printf("%s: unable to get rx ring size\n", DEVNAME(sc)); 1074 goto free_pad; 1075 } 1076 sc->sc_rx_ring_count = r / sizeof(struct myx_rx_desc); 1077 1078 memset(&mc, 0, sizeof(mc)); 1079 if (myx_cmd(sc, MYXCMD_GET_TXRINGSZ, &mc, &r) != 0) { 1080 printf("%s: unable to get tx ring size\n", DEVNAME(sc)); 1081 goto free_pad; 1082 } 1083 sc->sc_tx_ring_prod = 0; 1084 sc->sc_tx_ring_cons = 0; 1085 sc->sc_tx_ring_count = r / sizeof(struct myx_tx_desc); 1086 sc->sc_tx_nsegs = min(16, sc->sc_tx_ring_count / 4); /* magic */ 1087 sc->sc_tx_count = 0; 1088 ifq_set_maxlen(&ifp->if_snd, sc->sc_tx_ring_count - 1); 1089 1090 /* Allocate Interrupt Queue */ 1091 1092 sc->sc_intrq_count = sc->sc_rx_ring_count * 2; 1093 sc->sc_intrq_idx = 0; 1094 1095 size = sc->sc_intrq_count * sizeof(struct myx_intrq_desc); 1096 if (myx_dmamem_alloc(sc, &sc->sc_intrq_dma, 1097 size, MYXALIGN_DATA) != 0) { 1098 goto free_pad; 1099 } 1100 sc->sc_intrq = (struct myx_intrq_desc *)sc->sc_intrq_dma.mxm_kva; 1101 map = sc->sc_intrq_dma.mxm_map; 1102 memset(sc->sc_intrq, 0, size); 1103 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 1104 BUS_DMASYNC_PREREAD); 1105 1106 memset(&mc, 0, sizeof(mc)); 1107 mc.mc_data0 = htobe32(size); 1108 if (myx_cmd(sc, MYXCMD_SET_INTRQSZ, &mc, NULL) != 0) { 1109 printf("%s: failed to set intrq size\n", DEVNAME(sc)); 1110 goto free_intrq; 1111 } 1112 1113 memset(&mc, 0, sizeof(mc)); 1114 mc.mc_data0 = htobe32(MYX_ADDRLOW(map->dm_segs[0].ds_addr)); 1115 mc.mc_data1 = htobe32(MYX_ADDRHIGH(map->dm_segs[0].ds_addr)); 1116 if (myx_cmd(sc, MYXCMD_SET_INTRQDMA, &mc, NULL) != 0) { 1117 printf("%s: failed to set intrq address\n", DEVNAME(sc)); 1118 goto free_intrq; 1119 } 1120 1121 /* 1122 * get interrupt offsets 1123 */ 1124 1125 memset(&mc, 0, sizeof(mc)); 1126 if (myx_cmd(sc, MYXCMD_GET_INTRACKOFF, &mc, 1127 &sc->sc_irqclaimoff) != 0) { 1128 printf("%s: failed to get IRQ ack offset\n", DEVNAME(sc)); 1129 goto free_intrq; 1130 } 1131 1132 memset(&mc, 0, sizeof(mc)); 1133 if (myx_cmd(sc, MYXCMD_GET_INTRDEASSERTOFF, &mc, 1134 &sc->sc_irqdeassertoff) != 0) { 1135 printf("%s: failed to get IRQ deassert offset\n", DEVNAME(sc)); 1136 goto free_intrq; 1137 } 1138 1139 memset(&mc, 0, sizeof(mc)); 1140 if (myx_cmd(sc, MYXCMD_GET_INTRCOALDELAYOFF, &mc, 1141 &sc->sc_irqcoaloff) != 0) { 1142 printf("%s: failed to get IRQ coal offset\n", DEVNAME(sc)); 1143 goto free_intrq; 1144 } 1145 1146 /* Set an appropriate interrupt coalescing period */ 1147 r = htobe32(MYX_IRQCOALDELAY); 1148 myx_write(sc, sc->sc_irqcoaloff, &r, sizeof(r)); 1149 1150 if (myx_setlladdr(sc, MYXCMD_SET_LLADDR, LLADDR(ifp->if_sadl)) != 0) { 1151 printf("%s: failed to configure lladdr\n", DEVNAME(sc)); 1152 goto free_intrq; 1153 } 1154 1155 memset(&mc, 0, sizeof(mc)); 1156 if (myx_cmd(sc, MYXCMD_UNSET_PROMISC, &mc, NULL) != 0) { 1157 printf("%s: failed to disable promisc mode\n", DEVNAME(sc)); 1158 goto free_intrq; 1159 } 1160 1161 memset(&mc, 0, sizeof(mc)); 1162 if (myx_cmd(sc, MYXCMD_FC_DEFAULT, &mc, NULL) != 0) { 1163 printf("%s: failed to configure flow control\n", DEVNAME(sc)); 1164 goto free_intrq; 1165 } 1166 1167 memset(&mc, 0, sizeof(mc)); 1168 if (myx_cmd(sc, MYXCMD_GET_TXRINGOFF, &mc, 1169 &sc->sc_tx_ring_offset) != 0) { 1170 printf("%s: unable to get tx ring offset\n", DEVNAME(sc)); 1171 goto free_intrq; 1172 } 1173 1174 memset(&mc, 0, sizeof(mc)); 1175 if (myx_cmd(sc, MYXCMD_GET_RXSMALLRINGOFF, &mc, 1176 &sc->sc_rx_ring[MYX_RXSMALL].mrr_offset) != 0) { 1177 printf("%s: unable to get small rx ring offset\n", DEVNAME(sc)); 1178 goto free_intrq; 1179 } 1180 1181 memset(&mc, 0, sizeof(mc)); 1182 if (myx_cmd(sc, MYXCMD_GET_RXBIGRINGOFF, &mc, 1183 &sc->sc_rx_ring[MYX_RXBIG].mrr_offset) != 0) { 1184 printf("%s: unable to get big rx ring offset\n", DEVNAME(sc)); 1185 goto free_intrq; 1186 } 1187 1188 /* Allocate Interrupt Data */ 1189 if (myx_dmamem_alloc(sc, &sc->sc_sts_dma, 1190 sizeof(struct myx_status), MYXALIGN_DATA) != 0) { 1191 printf("%s: failed to allocate status DMA memory\n", 1192 DEVNAME(sc)); 1193 goto free_intrq; 1194 } 1195 sc->sc_sts = (struct myx_status *)sc->sc_sts_dma.mxm_kva; 1196 map = sc->sc_sts_dma.mxm_map; 1197 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 1198 BUS_DMASYNC_PREREAD); 1199 1200 memset(&mc, 0, sizeof(mc)); 1201 mc.mc_data0 = htobe32(MYX_ADDRLOW(map->dm_segs[0].ds_addr)); 1202 mc.mc_data1 = htobe32(MYX_ADDRHIGH(map->dm_segs[0].ds_addr)); 1203 mc.mc_data2 = htobe32(sizeof(struct myx_status)); 1204 if (myx_cmd(sc, MYXCMD_SET_STATSDMA, &mc, NULL) != 0) { 1205 printf("%s: failed to set status DMA offset\n", DEVNAME(sc)); 1206 goto free_sts; 1207 } 1208 1209 maxpkt = ifp->if_hardmtu + ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; 1210 1211 memset(&mc, 0, sizeof(mc)); 1212 mc.mc_data0 = htobe32(maxpkt); 1213 if (myx_cmd(sc, MYXCMD_SET_MTU, &mc, NULL) != 0) { 1214 printf("%s: failed to set MTU size %d\n", DEVNAME(sc), maxpkt); 1215 goto free_sts; 1216 } 1217 1218 if (myx_tx_init(sc, maxpkt) != 0) 1219 goto free_sts; 1220 1221 if (myx_rx_init(sc, MYX_RXSMALL, MCLBYTES) != 0) 1222 goto free_tx_ring; 1223 1224 if (myx_rx_fill(sc, &sc->sc_rx_ring[MYX_RXSMALL]) != 0) 1225 goto free_rx_ring_small; 1226 1227 if (myx_rx_init(sc, MYX_RXBIG, MYX_RXBIG_SIZE) != 0) 1228 goto empty_rx_ring_small; 1229 1230 if (myx_rx_fill(sc, &sc->sc_rx_ring[MYX_RXBIG]) != 0) 1231 goto free_rx_ring_big; 1232 1233 memset(&mc, 0, sizeof(mc)); 1234 mc.mc_data0 = htobe32(MYX_RXSMALL_SIZE - ETHER_ALIGN); 1235 if (myx_cmd(sc, MYXCMD_SET_SMALLBUFSZ, &mc, NULL) != 0) { 1236 printf("%s: failed to set small buf size\n", DEVNAME(sc)); 1237 goto empty_rx_ring_big; 1238 } 1239 1240 memset(&mc, 0, sizeof(mc)); 1241 mc.mc_data0 = htobe32(16384); 1242 if (myx_cmd(sc, MYXCMD_SET_BIGBUFSZ, &mc, NULL) != 0) { 1243 printf("%s: failed to set big buf size\n", DEVNAME(sc)); 1244 goto empty_rx_ring_big; 1245 } 1246 1247 sc->sc_state = MYX_S_RUNNING; 1248 1249 if (myx_cmd(sc, MYXCMD_SET_IFUP, &mc, NULL) != 0) { 1250 printf("%s: failed to start the device\n", DEVNAME(sc)); 1251 goto empty_rx_ring_big; 1252 } 1253 1254 myx_iff(sc); 1255 SET(ifp->if_flags, IFF_RUNNING); 1256 ifq_restart(&ifp->if_snd); 1257 1258 #if NKSTAT > 0 1259 timeout_add_sec(&sc->sc_kstat_tmo, 1); 1260 #endif 1261 1262 return; 1263 1264 empty_rx_ring_big: 1265 myx_rx_empty(sc, &sc->sc_rx_ring[MYX_RXBIG]); 1266 free_rx_ring_big: 1267 myx_rx_free(sc, &sc->sc_rx_ring[MYX_RXBIG]); 1268 empty_rx_ring_small: 1269 myx_rx_empty(sc, &sc->sc_rx_ring[MYX_RXSMALL]); 1270 free_rx_ring_small: 1271 myx_rx_free(sc, &sc->sc_rx_ring[MYX_RXSMALL]); 1272 free_tx_ring: 1273 myx_tx_free(sc); 1274 free_sts: 1275 bus_dmamap_sync(sc->sc_dmat, sc->sc_sts_dma.mxm_map, 0, 1276 sc->sc_sts_dma.mxm_map->dm_mapsize, BUS_DMASYNC_POSTREAD); 1277 myx_dmamem_free(sc, &sc->sc_sts_dma); 1278 free_intrq: 1279 bus_dmamap_sync(sc->sc_dmat, sc->sc_intrq_dma.mxm_map, 0, 1280 sc->sc_intrq_dma.mxm_map->dm_mapsize, BUS_DMASYNC_POSTREAD); 1281 myx_dmamem_free(sc, &sc->sc_intrq_dma); 1282 free_pad: 1283 bus_dmamap_sync(sc->sc_dmat, sc->sc_paddma.mxm_map, 0, 1284 sc->sc_paddma.mxm_map->dm_mapsize, 1285 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1286 myx_dmamem_free(sc, &sc->sc_paddma); 1287 1288 memset(&mc, 0, sizeof(mc)); 1289 if (myx_cmd(sc, MYXCMD_RESET, &mc, NULL) != 0) { 1290 printf("%s: failed to reset the device\n", DEVNAME(sc)); 1291 } 1292 free_zero: 1293 bus_dmamap_sync(sc->sc_dmat, sc->sc_zerodma.mxm_map, 0, 1294 sc->sc_zerodma.mxm_map->dm_mapsize, BUS_DMASYNC_POSTREAD); 1295 myx_dmamem_free(sc, &sc->sc_zerodma); 1296 } 1297 1298 int 1299 myx_setlladdr(struct myx_softc *sc, u_int32_t cmd, u_int8_t *addr) 1300 { 1301 struct myx_cmd mc; 1302 1303 memset(&mc, 0, sizeof(mc)); 1304 mc.mc_data0 = htobe32(addr[0] << 24 | addr[1] << 16 | 1305 addr[2] << 8 | addr[3]); 1306 mc.mc_data1 = htobe32(addr[4] << 8 | addr[5]); 1307 1308 if (myx_cmd(sc, cmd, &mc, NULL) != 0) { 1309 printf("%s: failed to set the lladdr\n", DEVNAME(sc)); 1310 return (-1); 1311 } 1312 return (0); 1313 } 1314 1315 void 1316 myx_iff(struct myx_softc *sc) 1317 { 1318 struct myx_cmd mc; 1319 struct ifnet *ifp = &sc->sc_ac.ac_if; 1320 struct ether_multi *enm; 1321 struct ether_multistep step; 1322 u_int8_t *addr; 1323 1324 CLR(ifp->if_flags, IFF_ALLMULTI); 1325 1326 if (myx_cmd(sc, ISSET(ifp->if_flags, IFF_PROMISC) ? 1327 MYXCMD_SET_PROMISC : MYXCMD_UNSET_PROMISC, &mc, NULL) != 0) { 1328 printf("%s: failed to configure promisc mode\n", DEVNAME(sc)); 1329 return; 1330 } 1331 1332 if (myx_cmd(sc, MYXCMD_SET_ALLMULTI, &mc, NULL) != 0) { 1333 printf("%s: failed to enable ALLMULTI\n", DEVNAME(sc)); 1334 return; 1335 } 1336 1337 if (myx_cmd(sc, MYXCMD_UNSET_MCAST, &mc, NULL) != 0) { 1338 printf("%s: failed to leave all mcast groups \n", DEVNAME(sc)); 1339 return; 1340 } 1341 1342 if (ISSET(ifp->if_flags, IFF_PROMISC) || 1343 sc->sc_ac.ac_multirangecnt > 0) { 1344 SET(ifp->if_flags, IFF_ALLMULTI); 1345 return; 1346 } 1347 1348 ETHER_FIRST_MULTI(step, &sc->sc_ac, enm); 1349 while (enm != NULL) { 1350 addr = enm->enm_addrlo; 1351 1352 memset(&mc, 0, sizeof(mc)); 1353 mc.mc_data0 = htobe32(addr[0] << 24 | addr[1] << 16 | 1354 addr[2] << 8 | addr[3]); 1355 mc.mc_data1 = htobe32(addr[4] << 24 | addr[5] << 16); 1356 if (myx_cmd(sc, MYXCMD_SET_MCASTGROUP, &mc, NULL) != 0) { 1357 printf("%s: failed to join mcast group\n", DEVNAME(sc)); 1358 return; 1359 } 1360 1361 ETHER_NEXT_MULTI(step, enm); 1362 } 1363 1364 memset(&mc, 0, sizeof(mc)); 1365 if (myx_cmd(sc, MYXCMD_UNSET_ALLMULTI, &mc, NULL) != 0) { 1366 printf("%s: failed to disable ALLMULTI\n", DEVNAME(sc)); 1367 return; 1368 } 1369 } 1370 1371 void 1372 myx_down(struct myx_softc *sc) 1373 { 1374 struct ifnet *ifp = &sc->sc_ac.ac_if; 1375 volatile struct myx_status *sts = sc->sc_sts; 1376 bus_dmamap_t map = sc->sc_sts_dma.mxm_map; 1377 struct sleep_state sls; 1378 struct myx_cmd mc; 1379 int s; 1380 int ring; 1381 1382 CLR(ifp->if_flags, IFF_RUNNING); 1383 1384 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 1385 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1386 sc->sc_linkdown = sts->ms_linkdown; 1387 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 1388 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1389 1390 sc->sc_state = MYX_S_DOWN; 1391 membar_producer(); 1392 1393 memset(&mc, 0, sizeof(mc)); 1394 (void)myx_cmd(sc, MYXCMD_SET_IFDOWN, &mc, NULL); 1395 1396 while (sc->sc_state != MYX_S_OFF) { 1397 sleep_setup(&sls, sts, PWAIT, "myxdown"); 1398 membar_consumer(); 1399 sleep_finish(&sls, sc->sc_state != MYX_S_OFF); 1400 } 1401 1402 s = splnet(); 1403 if (ifp->if_link_state != LINK_STATE_UNKNOWN) { 1404 ifp->if_link_state = LINK_STATE_UNKNOWN; 1405 ifp->if_baudrate = 0; 1406 if_link_state_change(ifp); 1407 } 1408 splx(s); 1409 1410 memset(&mc, 0, sizeof(mc)); 1411 if (myx_cmd(sc, MYXCMD_RESET, &mc, NULL) != 0) { 1412 printf("%s: failed to reset the device\n", DEVNAME(sc)); 1413 } 1414 1415 ifq_clr_oactive(&ifp->if_snd); 1416 ifq_barrier(&ifp->if_snd); 1417 1418 for (ring = 0; ring < 2; ring++) { 1419 struct myx_rx_ring *mrr = &sc->sc_rx_ring[ring]; 1420 1421 timeout_del(&mrr->mrr_refill); 1422 myx_rx_empty(sc, mrr); 1423 myx_rx_free(sc, mrr); 1424 } 1425 1426 myx_tx_empty(sc); 1427 myx_tx_free(sc); 1428 1429 #if NKSTAT > 0 1430 myx_kstat_stop(sc); 1431 sc->sc_sts = NULL; 1432 #endif 1433 1434 /* the sleep shizz above already synced this dmamem */ 1435 myx_dmamem_free(sc, &sc->sc_sts_dma); 1436 1437 bus_dmamap_sync(sc->sc_dmat, sc->sc_intrq_dma.mxm_map, 0, 1438 sc->sc_intrq_dma.mxm_map->dm_mapsize, BUS_DMASYNC_POSTREAD); 1439 myx_dmamem_free(sc, &sc->sc_intrq_dma); 1440 1441 bus_dmamap_sync(sc->sc_dmat, sc->sc_paddma.mxm_map, 0, 1442 sc->sc_paddma.mxm_map->dm_mapsize, BUS_DMASYNC_POSTREAD); 1443 myx_dmamem_free(sc, &sc->sc_paddma); 1444 1445 bus_dmamap_sync(sc->sc_dmat, sc->sc_zerodma.mxm_map, 0, 1446 sc->sc_zerodma.mxm_map->dm_mapsize, BUS_DMASYNC_POSTREAD); 1447 myx_dmamem_free(sc, &sc->sc_zerodma); 1448 } 1449 1450 void 1451 myx_write_txd_tail(struct myx_softc *sc, struct myx_slot *ms, u_int8_t flags, 1452 u_int32_t offset, u_int idx) 1453 { 1454 struct myx_tx_desc txd; 1455 bus_dmamap_t zmap = sc->sc_zerodma.mxm_map; 1456 bus_dmamap_t map = ms->ms_map; 1457 int i; 1458 1459 for (i = 1; i < map->dm_nsegs; i++) { 1460 memset(&txd, 0, sizeof(txd)); 1461 txd.tx_addr = htobe64(map->dm_segs[i].ds_addr); 1462 txd.tx_length = htobe16(map->dm_segs[i].ds_len); 1463 txd.tx_flags = flags; 1464 1465 myx_bus_space_write(sc, 1466 offset + sizeof(txd) * ((idx + i) % sc->sc_tx_ring_count), 1467 &txd, sizeof(txd)); 1468 } 1469 1470 /* pad runt frames */ 1471 if (map->dm_mapsize < 60) { 1472 memset(&txd, 0, sizeof(txd)); 1473 txd.tx_addr = htobe64(zmap->dm_segs[0].ds_addr); 1474 txd.tx_length = htobe16(60 - map->dm_mapsize); 1475 txd.tx_flags = flags; 1476 1477 myx_bus_space_write(sc, 1478 offset + sizeof(txd) * ((idx + i) % sc->sc_tx_ring_count), 1479 &txd, sizeof(txd)); 1480 } 1481 } 1482 1483 void 1484 myx_start(struct ifqueue *ifq) 1485 { 1486 struct ifnet *ifp = ifq->ifq_if; 1487 struct myx_tx_desc txd; 1488 struct myx_softc *sc = ifp->if_softc; 1489 struct myx_slot *ms; 1490 bus_dmamap_t map; 1491 struct mbuf *m; 1492 u_int32_t offset = sc->sc_tx_ring_offset; 1493 u_int idx, cons, prod; 1494 u_int free, used; 1495 u_int8_t flags; 1496 1497 idx = sc->sc_tx_ring_prod; 1498 1499 /* figure out space */ 1500 free = sc->sc_tx_ring_cons; 1501 if (free <= idx) 1502 free += sc->sc_tx_ring_count; 1503 free -= idx; 1504 1505 cons = prod = sc->sc_tx_prod; 1506 1507 used = 0; 1508 1509 for (;;) { 1510 if (used + sc->sc_tx_nsegs + 1 > free) { 1511 ifq_set_oactive(ifq); 1512 break; 1513 } 1514 1515 m = ifq_dequeue(ifq); 1516 if (m == NULL) 1517 break; 1518 1519 ms = &sc->sc_tx_slots[prod]; 1520 1521 if (myx_load_mbuf(sc, ms, m) != 0) { 1522 m_freem(m); 1523 ifp->if_oerrors++; 1524 continue; 1525 } 1526 1527 #if NBPFILTER > 0 1528 if (ifp->if_bpf) 1529 bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT); 1530 #endif 1531 1532 map = ms->ms_map; 1533 bus_dmamap_sync(sc->sc_dmat, map, 0, 1534 map->dm_mapsize, BUS_DMASYNC_PREWRITE); 1535 1536 used += map->dm_nsegs + (map->dm_mapsize < 60 ? 1 : 0); 1537 1538 if (++prod >= sc->sc_tx_ring_count) 1539 prod = 0; 1540 } 1541 1542 if (cons == prod) 1543 return; 1544 1545 ms = &sc->sc_tx_slots[cons]; 1546 1547 for (;;) { 1548 idx += ms->ms_map->dm_nsegs + 1549 (ms->ms_map->dm_mapsize < 60 ? 1 : 0); 1550 if (idx >= sc->sc_tx_ring_count) 1551 idx -= sc->sc_tx_ring_count; 1552 1553 if (++cons >= sc->sc_tx_ring_count) 1554 cons = 0; 1555 1556 if (cons == prod) 1557 break; 1558 1559 ms = &sc->sc_tx_slots[cons]; 1560 map = ms->ms_map; 1561 1562 flags = MYXTXD_FLAGS_NO_TSO; 1563 if (map->dm_mapsize < 1520) 1564 flags |= MYXTXD_FLAGS_SMALL; 1565 1566 memset(&txd, 0, sizeof(txd)); 1567 txd.tx_addr = htobe64(map->dm_segs[0].ds_addr); 1568 txd.tx_length = htobe16(map->dm_segs[0].ds_len); 1569 txd.tx_nsegs = map->dm_nsegs + (map->dm_mapsize < 60 ? 1 : 0); 1570 txd.tx_flags = flags | MYXTXD_FLAGS_FIRST; 1571 myx_bus_space_write(sc, 1572 offset + sizeof(txd) * idx, &txd, sizeof(txd)); 1573 1574 myx_write_txd_tail(sc, ms, flags, offset, idx); 1575 } 1576 1577 /* go back and post first packet */ 1578 ms = &sc->sc_tx_slots[sc->sc_tx_prod]; 1579 map = ms->ms_map; 1580 1581 flags = MYXTXD_FLAGS_NO_TSO; 1582 if (map->dm_mapsize < 1520) 1583 flags |= MYXTXD_FLAGS_SMALL; 1584 1585 memset(&txd, 0, sizeof(txd)); 1586 txd.tx_addr = htobe64(map->dm_segs[0].ds_addr); 1587 txd.tx_length = htobe16(map->dm_segs[0].ds_len); 1588 txd.tx_nsegs = map->dm_nsegs + (map->dm_mapsize < 60 ? 1 : 0); 1589 txd.tx_flags = flags | MYXTXD_FLAGS_FIRST; 1590 1591 /* make sure the first descriptor is seen after the others */ 1592 myx_write_txd_tail(sc, ms, flags, offset, sc->sc_tx_ring_prod); 1593 1594 myx_bus_space_write(sc, 1595 offset + sizeof(txd) * sc->sc_tx_ring_prod, &txd, 1596 sizeof(txd) - sizeof(myx_bus_t)); 1597 1598 bus_space_barrier(sc->sc_memt, sc->sc_memh, offset, 1599 sizeof(txd) * sc->sc_tx_ring_count, BUS_SPACE_BARRIER_WRITE); 1600 1601 myx_bus_space_write(sc, 1602 offset + sizeof(txd) * (sc->sc_tx_ring_prod + 1) - 1603 sizeof(myx_bus_t), 1604 (u_int8_t *)&txd + sizeof(txd) - sizeof(myx_bus_t), 1605 sizeof(myx_bus_t)); 1606 1607 bus_space_barrier(sc->sc_memt, sc->sc_memh, 1608 offset + sizeof(txd) * sc->sc_tx_ring_prod, sizeof(txd), 1609 BUS_SPACE_BARRIER_WRITE); 1610 1611 /* commit */ 1612 sc->sc_tx_ring_prod = idx; 1613 sc->sc_tx_prod = prod; 1614 } 1615 1616 int 1617 myx_load_mbuf(struct myx_softc *sc, struct myx_slot *ms, struct mbuf *m) 1618 { 1619 bus_dma_tag_t dmat = sc->sc_dmat; 1620 bus_dmamap_t dmap = ms->ms_map; 1621 1622 switch (bus_dmamap_load_mbuf(dmat, dmap, m, 1623 BUS_DMA_STREAMING | BUS_DMA_NOWAIT)) { 1624 case 0: 1625 break; 1626 1627 case EFBIG: /* mbuf chain is too fragmented */ 1628 if (m_defrag(m, M_DONTWAIT) == 0 && 1629 bus_dmamap_load_mbuf(dmat, dmap, m, 1630 BUS_DMA_STREAMING | BUS_DMA_NOWAIT) == 0) 1631 break; 1632 default: 1633 return (1); 1634 } 1635 1636 ms->ms_m = m; 1637 return (0); 1638 } 1639 1640 int 1641 myx_intr(void *arg) 1642 { 1643 struct myx_softc *sc = (struct myx_softc *)arg; 1644 volatile struct myx_status *sts = sc->sc_sts; 1645 enum myx_state state; 1646 bus_dmamap_t map = sc->sc_sts_dma.mxm_map; 1647 u_int32_t data; 1648 u_int8_t valid = 0; 1649 1650 state = sc->sc_state; 1651 if (state == MYX_S_OFF) 1652 return (0); 1653 1654 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 1655 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1656 1657 valid = sts->ms_isvalid; 1658 if (valid == 0x0) { 1659 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 1660 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1661 return (0); 1662 } 1663 1664 if (sc->sc_intx) { 1665 data = htobe32(0); 1666 bus_space_write_raw_region_4(sc->sc_memt, sc->sc_memh, 1667 sc->sc_irqdeassertoff, &data, sizeof(data)); 1668 } 1669 sts->ms_isvalid = 0; 1670 1671 do { 1672 data = sts->ms_txdonecnt; 1673 1674 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 1675 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE | 1676 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1677 } while (sts->ms_isvalid); 1678 1679 data = betoh32(data); 1680 if (data != sc->sc_tx_count) 1681 myx_txeof(sc, data); 1682 1683 data = htobe32(3); 1684 if (valid & 0x1) { 1685 myx_rxeof(sc); 1686 1687 bus_space_write_raw_region_4(sc->sc_memt, sc->sc_memh, 1688 sc->sc_irqclaimoff, &data, sizeof(data)); 1689 } 1690 bus_space_write_raw_region_4(sc->sc_memt, sc->sc_memh, 1691 sc->sc_irqclaimoff + sizeof(data), &data, sizeof(data)); 1692 1693 if (sts->ms_statusupdated) { 1694 if (state == MYX_S_DOWN && 1695 sc->sc_linkdown != sts->ms_linkdown) { 1696 sc->sc_state = MYX_S_OFF; 1697 membar_producer(); 1698 wakeup(sts); 1699 } else { 1700 data = sts->ms_linkstate; 1701 if (data != 0xffffffff) { 1702 KERNEL_LOCK(); 1703 myx_link_state(sc, data); 1704 KERNEL_UNLOCK(); 1705 } 1706 } 1707 } 1708 1709 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 1710 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1711 1712 return (1); 1713 } 1714 1715 void 1716 myx_refill(void *xmrr) 1717 { 1718 struct myx_rx_ring *mrr = xmrr; 1719 struct myx_softc *sc = mrr->mrr_softc; 1720 1721 myx_rx_fill(sc, mrr); 1722 1723 if (mrr->mrr_prod == mrr->mrr_cons) 1724 timeout_add(&mrr->mrr_refill, 1); 1725 } 1726 1727 void 1728 myx_txeof(struct myx_softc *sc, u_int32_t done_count) 1729 { 1730 struct ifnet *ifp = &sc->sc_ac.ac_if; 1731 struct myx_slot *ms; 1732 bus_dmamap_t map; 1733 u_int idx, cons; 1734 1735 idx = sc->sc_tx_ring_cons; 1736 cons = sc->sc_tx_cons; 1737 1738 do { 1739 ms = &sc->sc_tx_slots[cons]; 1740 map = ms->ms_map; 1741 1742 idx += map->dm_nsegs + (map->dm_mapsize < 60 ? 1 : 0); 1743 1744 bus_dmamap_sync(sc->sc_dmat, map, 0, 1745 map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1746 bus_dmamap_unload(sc->sc_dmat, map); 1747 m_freem(ms->ms_m); 1748 1749 if (++cons >= sc->sc_tx_ring_count) 1750 cons = 0; 1751 } while (++sc->sc_tx_count != done_count); 1752 1753 if (idx >= sc->sc_tx_ring_count) 1754 idx -= sc->sc_tx_ring_count; 1755 1756 sc->sc_tx_ring_cons = idx; 1757 sc->sc_tx_cons = cons; 1758 1759 if (ifq_is_oactive(&ifp->if_snd)) 1760 ifq_restart(&ifp->if_snd); 1761 } 1762 1763 void 1764 myx_rxeof(struct myx_softc *sc) 1765 { 1766 static const struct myx_intrq_desc zerodesc = { 0, 0 }; 1767 struct ifnet *ifp = &sc->sc_ac.ac_if; 1768 struct mbuf_list ml = MBUF_LIST_INITIALIZER(); 1769 struct myx_rx_ring *mrr; 1770 struct myx_slot *ms; 1771 struct mbuf *m; 1772 int ring; 1773 u_int rxfree[2] = { 0 , 0 }; 1774 u_int len; 1775 int livelocked; 1776 1777 bus_dmamap_sync(sc->sc_dmat, sc->sc_intrq_dma.mxm_map, 0, 1778 sc->sc_intrq_dma.mxm_map->dm_mapsize, BUS_DMASYNC_POSTREAD); 1779 1780 while ((len = betoh16(sc->sc_intrq[sc->sc_intrq_idx].iq_length)) != 0) { 1781 sc->sc_intrq[sc->sc_intrq_idx] = zerodesc; 1782 1783 if (++sc->sc_intrq_idx >= sc->sc_intrq_count) 1784 sc->sc_intrq_idx = 0; 1785 1786 ring = (len <= (MYX_RXSMALL_SIZE - ETHER_ALIGN)) ? 1787 MYX_RXSMALL : MYX_RXBIG; 1788 1789 mrr = &sc->sc_rx_ring[ring]; 1790 ms = &mrr->mrr_slots[mrr->mrr_cons]; 1791 1792 if (++mrr->mrr_cons >= sc->sc_rx_ring_count) 1793 mrr->mrr_cons = 0; 1794 1795 bus_dmamap_sync(sc->sc_dmat, ms->ms_map, 0, 1796 ms->ms_map->dm_mapsize, BUS_DMASYNC_POSTREAD); 1797 bus_dmamap_unload(sc->sc_dmat, ms->ms_map); 1798 1799 m = ms->ms_m; 1800 m->m_data += ETHER_ALIGN; 1801 m->m_pkthdr.len = m->m_len = len; 1802 1803 ml_enqueue(&ml, m); 1804 1805 rxfree[ring]++; 1806 } 1807 1808 bus_dmamap_sync(sc->sc_dmat, sc->sc_intrq_dma.mxm_map, 0, 1809 sc->sc_intrq_dma.mxm_map->dm_mapsize, BUS_DMASYNC_PREREAD); 1810 1811 livelocked = ifiq_input(&ifp->if_rcv, &ml); 1812 for (ring = MYX_RXSMALL; ring <= MYX_RXBIG; ring++) { 1813 if (rxfree[ring] == 0) 1814 continue; 1815 1816 mrr = &sc->sc_rx_ring[ring]; 1817 1818 if (livelocked) 1819 if_rxr_livelocked(&mrr->mrr_rxr); 1820 1821 if_rxr_put(&mrr->mrr_rxr, rxfree[ring]); 1822 myx_rx_fill(sc, mrr); 1823 if (mrr->mrr_prod == mrr->mrr_cons) 1824 timeout_add(&mrr->mrr_refill, 0); 1825 } 1826 } 1827 1828 static int 1829 myx_rx_fill_slots(struct myx_softc *sc, struct myx_rx_ring *mrr, u_int slots) 1830 { 1831 struct myx_rx_desc rxd; 1832 struct myx_slot *ms; 1833 u_int32_t offset = mrr->mrr_offset; 1834 u_int p, first, fills; 1835 1836 first = p = mrr->mrr_prod; 1837 if (myx_buf_fill(sc, &mrr->mrr_slots[first], mrr->mrr_mclget) != 0) 1838 return (slots); 1839 1840 if (++p >= sc->sc_rx_ring_count) 1841 p = 0; 1842 1843 for (fills = 1; fills < slots; fills++) { 1844 ms = &mrr->mrr_slots[p]; 1845 1846 if (myx_buf_fill(sc, ms, mrr->mrr_mclget) != 0) 1847 break; 1848 1849 rxd.rx_addr = htobe64(ms->ms_map->dm_segs[0].ds_addr); 1850 myx_bus_space_write(sc, offset + p * sizeof(rxd), 1851 &rxd, sizeof(rxd)); 1852 1853 if (++p >= sc->sc_rx_ring_count) 1854 p = 0; 1855 } 1856 1857 mrr->mrr_prod = p; 1858 1859 /* make sure the first descriptor is seen after the others */ 1860 if (fills > 1) { 1861 bus_space_barrier(sc->sc_memt, sc->sc_memh, 1862 offset, sizeof(rxd) * sc->sc_rx_ring_count, 1863 BUS_SPACE_BARRIER_WRITE); 1864 } 1865 1866 ms = &mrr->mrr_slots[first]; 1867 rxd.rx_addr = htobe64(ms->ms_map->dm_segs[0].ds_addr); 1868 myx_bus_space_write(sc, offset + first * sizeof(rxd), 1869 &rxd, sizeof(rxd)); 1870 1871 return (slots - fills); 1872 } 1873 1874 int 1875 myx_rx_init(struct myx_softc *sc, int ring, bus_size_t size) 1876 { 1877 struct myx_rx_desc rxd; 1878 struct myx_rx_ring *mrr = &sc->sc_rx_ring[ring]; 1879 struct myx_slot *ms; 1880 u_int32_t offset = mrr->mrr_offset; 1881 int rv; 1882 int i; 1883 1884 mrr->mrr_slots = mallocarray(sizeof(*ms), sc->sc_rx_ring_count, 1885 M_DEVBUF, M_WAITOK); 1886 if (mrr->mrr_slots == NULL) 1887 return (ENOMEM); 1888 1889 memset(&rxd, 0xff, sizeof(rxd)); 1890 for (i = 0; i < sc->sc_rx_ring_count; i++) { 1891 ms = &mrr->mrr_slots[i]; 1892 rv = bus_dmamap_create(sc->sc_dmat, size, 1, size, 0, 1893 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &ms->ms_map); 1894 if (rv != 0) 1895 goto destroy; 1896 1897 myx_bus_space_write(sc, offset + i * sizeof(rxd), 1898 &rxd, sizeof(rxd)); 1899 } 1900 1901 if_rxr_init(&mrr->mrr_rxr, 2, sc->sc_rx_ring_count - 2); 1902 mrr->mrr_prod = mrr->mrr_cons = 0; 1903 1904 return (0); 1905 1906 destroy: 1907 while (i-- > 0) { 1908 ms = &mrr->mrr_slots[i]; 1909 bus_dmamap_destroy(sc->sc_dmat, ms->ms_map); 1910 } 1911 free(mrr->mrr_slots, M_DEVBUF, sizeof(*ms) * sc->sc_rx_ring_count); 1912 return (rv); 1913 } 1914 1915 int 1916 myx_rx_fill(struct myx_softc *sc, struct myx_rx_ring *mrr) 1917 { 1918 u_int slots; 1919 1920 slots = if_rxr_get(&mrr->mrr_rxr, sc->sc_rx_ring_count); 1921 if (slots == 0) 1922 return (1); 1923 1924 slots = myx_rx_fill_slots(sc, mrr, slots); 1925 if (slots > 0) 1926 if_rxr_put(&mrr->mrr_rxr, slots); 1927 1928 return (0); 1929 } 1930 1931 void 1932 myx_rx_empty(struct myx_softc *sc, struct myx_rx_ring *mrr) 1933 { 1934 struct myx_slot *ms; 1935 1936 while (mrr->mrr_cons != mrr->mrr_prod) { 1937 ms = &mrr->mrr_slots[mrr->mrr_cons]; 1938 1939 if (++mrr->mrr_cons >= sc->sc_rx_ring_count) 1940 mrr->mrr_cons = 0; 1941 1942 bus_dmamap_sync(sc->sc_dmat, ms->ms_map, 0, 1943 ms->ms_map->dm_mapsize, BUS_DMASYNC_POSTREAD); 1944 bus_dmamap_unload(sc->sc_dmat, ms->ms_map); 1945 m_freem(ms->ms_m); 1946 } 1947 1948 if_rxr_init(&mrr->mrr_rxr, 2, sc->sc_rx_ring_count - 2); 1949 } 1950 1951 void 1952 myx_rx_free(struct myx_softc *sc, struct myx_rx_ring *mrr) 1953 { 1954 struct myx_slot *ms; 1955 int i; 1956 1957 for (i = 0; i < sc->sc_rx_ring_count; i++) { 1958 ms = &mrr->mrr_slots[i]; 1959 bus_dmamap_destroy(sc->sc_dmat, ms->ms_map); 1960 } 1961 1962 free(mrr->mrr_slots, M_DEVBUF, sizeof(*ms) * sc->sc_rx_ring_count); 1963 } 1964 1965 struct mbuf * 1966 myx_mcl_small(void) 1967 { 1968 struct mbuf *m; 1969 1970 m = MCLGETI(NULL, M_DONTWAIT, NULL, MYX_RXSMALL_SIZE); 1971 if (m == NULL) 1972 return (NULL); 1973 1974 m->m_len = m->m_pkthdr.len = MYX_RXSMALL_SIZE; 1975 1976 return (m); 1977 } 1978 1979 struct mbuf * 1980 myx_mcl_big(void) 1981 { 1982 struct mbuf *m; 1983 void *mcl; 1984 1985 MGETHDR(m, M_DONTWAIT, MT_DATA); 1986 if (m == NULL) 1987 return (NULL); 1988 1989 mcl = pool_get(myx_mcl_pool, PR_NOWAIT); 1990 if (mcl == NULL) { 1991 m_free(m); 1992 return (NULL); 1993 } 1994 1995 MEXTADD(m, mcl, MYX_RXBIG_SIZE, M_EXTWR, MEXTFREE_POOL, myx_mcl_pool); 1996 m->m_len = m->m_pkthdr.len = MYX_RXBIG_SIZE; 1997 1998 return (m); 1999 } 2000 2001 int 2002 myx_buf_fill(struct myx_softc *sc, struct myx_slot *ms, 2003 struct mbuf *(*mclget)(void)) 2004 { 2005 struct mbuf *m; 2006 int rv; 2007 2008 m = (*mclget)(); 2009 if (m == NULL) 2010 return (ENOMEM); 2011 2012 rv = bus_dmamap_load_mbuf(sc->sc_dmat, ms->ms_map, m, BUS_DMA_NOWAIT); 2013 if (rv != 0) { 2014 m_freem(m); 2015 return (rv); 2016 } 2017 2018 bus_dmamap_sync(sc->sc_dmat, ms->ms_map, 0, 2019 ms->ms_map->dm_mapsize, BUS_DMASYNC_PREREAD); 2020 2021 ms->ms_m = m; 2022 2023 return (0); 2024 } 2025 2026 int 2027 myx_tx_init(struct myx_softc *sc, bus_size_t size) 2028 { 2029 struct myx_slot *ms; 2030 int rv; 2031 int i; 2032 2033 sc->sc_tx_slots = mallocarray(sizeof(*ms), sc->sc_tx_ring_count, 2034 M_DEVBUF, M_WAITOK); 2035 if (sc->sc_tx_slots == NULL) 2036 return (ENOMEM); 2037 2038 for (i = 0; i < sc->sc_tx_ring_count; i++) { 2039 ms = &sc->sc_tx_slots[i]; 2040 rv = bus_dmamap_create(sc->sc_dmat, size, sc->sc_tx_nsegs, 2041 sc->sc_tx_boundary, sc->sc_tx_boundary, 2042 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &ms->ms_map); 2043 if (rv != 0) 2044 goto destroy; 2045 } 2046 2047 sc->sc_tx_prod = sc->sc_tx_cons = 0; 2048 2049 return (0); 2050 2051 destroy: 2052 while (i-- > 0) { 2053 ms = &sc->sc_tx_slots[i]; 2054 bus_dmamap_destroy(sc->sc_dmat, ms->ms_map); 2055 } 2056 free(sc->sc_tx_slots, M_DEVBUF, sizeof(*ms) * sc->sc_tx_ring_count); 2057 return (rv); 2058 } 2059 2060 void 2061 myx_tx_empty(struct myx_softc *sc) 2062 { 2063 struct myx_slot *ms; 2064 u_int cons = sc->sc_tx_cons; 2065 u_int prod = sc->sc_tx_prod; 2066 2067 while (cons != prod) { 2068 ms = &sc->sc_tx_slots[cons]; 2069 2070 bus_dmamap_sync(sc->sc_dmat, ms->ms_map, 0, 2071 ms->ms_map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 2072 bus_dmamap_unload(sc->sc_dmat, ms->ms_map); 2073 m_freem(ms->ms_m); 2074 2075 if (++cons >= sc->sc_tx_ring_count) 2076 cons = 0; 2077 } 2078 2079 sc->sc_tx_cons = cons; 2080 } 2081 2082 void 2083 myx_tx_free(struct myx_softc *sc) 2084 { 2085 struct myx_slot *ms; 2086 int i; 2087 2088 for (i = 0; i < sc->sc_tx_ring_count; i++) { 2089 ms = &sc->sc_tx_slots[i]; 2090 bus_dmamap_destroy(sc->sc_dmat, ms->ms_map); 2091 } 2092 2093 free(sc->sc_tx_slots, M_DEVBUF, sizeof(*ms) * sc->sc_tx_ring_count); 2094 } 2095 2096 #if NKSTAT > 0 2097 enum myx_counters { 2098 myx_stat_dropped_pause, 2099 myx_stat_dropped_ucast_filtered, 2100 myx_stat_dropped_bad_crc32, 2101 myx_stat_dropped_bad_phy, 2102 myx_stat_dropped_mcast_filtered, 2103 myx_stat_send_done, 2104 myx_stat_dropped_link_overflow, 2105 myx_stat_dropped_link, 2106 myx_stat_dropped_runt, 2107 myx_stat_dropped_overrun, 2108 myx_stat_dropped_no_small_bufs, 2109 myx_stat_dropped_no_large_bufs, 2110 2111 myx_ncounters, 2112 }; 2113 2114 struct myx_counter { 2115 const char *mc_name; 2116 unsigned int mc_offset; 2117 }; 2118 2119 #define MYX_C_OFF(_f) offsetof(struct myx_status, _f) 2120 2121 static const struct myx_counter myx_counters[myx_ncounters] = { 2122 { "pause drops", MYX_C_OFF(ms_dropped_pause), }, 2123 { "ucast filtered", MYX_C_OFF(ms_dropped_unicast), }, 2124 { "bad crc32", MYX_C_OFF(ms_dropped_pause), }, 2125 { "bad phy", MYX_C_OFF(ms_dropped_phyerr), }, 2126 { "mcast filtered", MYX_C_OFF(ms_dropped_mcast), }, 2127 { "tx done", MYX_C_OFF(ms_txdonecnt), }, 2128 { "rx discards", MYX_C_OFF(ms_dropped_linkoverflow), }, 2129 { "rx errors", MYX_C_OFF(ms_dropped_linkerror), }, 2130 { "rx undersize", MYX_C_OFF(ms_dropped_runt), }, 2131 { "rx oversize", MYX_C_OFF(ms_dropped_overrun), }, 2132 { "small discards", MYX_C_OFF(ms_dropped_smallbufunderrun), }, 2133 { "large discards", MYX_C_OFF(ms_dropped_bigbufunderrun), }, 2134 }; 2135 2136 struct myx_kstats { 2137 struct kstat_kv mk_counters[myx_ncounters]; 2138 struct kstat_kv mk_rdma_tags_available; 2139 }; 2140 2141 struct myx_kstat_cache { 2142 uint32_t mkc_counters[myx_ncounters]; 2143 }; 2144 2145 struct myx_kstat_state { 2146 struct myx_kstat_cache mks_caches[2]; 2147 unsigned int mks_gen; 2148 }; 2149 2150 int 2151 myx_kstat_read(struct kstat *ks) 2152 { 2153 struct myx_softc *sc = ks->ks_softc; 2154 struct myx_kstats *mk = ks->ks_data; 2155 struct myx_kstat_state *mks = ks->ks_ptr; 2156 unsigned int gen = (mks->mks_gen++ & 1); 2157 struct myx_kstat_cache *omkc = &mks->mks_caches[gen]; 2158 struct myx_kstat_cache *nmkc = &mks->mks_caches[!gen]; 2159 unsigned int i = 0; 2160 2161 volatile struct myx_status *sts = sc->sc_sts; 2162 bus_dmamap_t map = sc->sc_sts_dma.mxm_map; 2163 2164 if (sc->sc_sts == NULL) 2165 return (0); /* counters are valid, just not updated */ 2166 2167 getnanouptime(&ks->ks_updated); 2168 2169 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 2170 BUS_DMASYNC_POSTREAD); 2171 for (i = 0; i < myx_ncounters; i++) { 2172 const struct myx_counter *mc = &myx_counters[i]; 2173 nmkc->mkc_counters[i] = 2174 bemtoh32((uint32_t *)((uint8_t *)sts + mc->mc_offset)); 2175 } 2176 2177 kstat_kv_u32(&mk->mk_rdma_tags_available) = 2178 bemtoh32(&sts->ms_rdmatags_available); 2179 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 2180 BUS_DMASYNC_PREREAD); 2181 2182 for (i = 0; i < myx_ncounters; i++) { 2183 kstat_kv_u64(&mk->mk_counters[i]) += 2184 nmkc->mkc_counters[i] - omkc->mkc_counters[i]; 2185 } 2186 2187 return (0); 2188 } 2189 2190 void 2191 myx_kstat_tick(void *arg) 2192 { 2193 struct myx_softc *sc = arg; 2194 2195 if (!ISSET(sc->sc_ac.ac_if.if_flags, IFF_RUNNING)) 2196 return; 2197 2198 timeout_add_sec(&sc->sc_kstat_tmo, 4); 2199 2200 if (!mtx_enter_try(&sc->sc_kstat_mtx)) 2201 return; 2202 2203 myx_kstat_read(sc->sc_kstat); 2204 2205 mtx_leave(&sc->sc_kstat_mtx); 2206 } 2207 2208 void 2209 myx_kstat_start(struct myx_softc *sc) 2210 { 2211 if (sc->sc_kstat == NULL) 2212 return; 2213 2214 myx_kstat_tick(sc); 2215 } 2216 2217 void 2218 myx_kstat_stop(struct myx_softc *sc) 2219 { 2220 struct myx_kstat_state *mks; 2221 2222 if (sc->sc_kstat == NULL) 2223 return; 2224 2225 timeout_del_barrier(&sc->sc_kstat_tmo); 2226 2227 mks = sc->sc_kstat->ks_ptr; 2228 2229 mtx_enter(&sc->sc_kstat_mtx); 2230 memset(mks, 0, sizeof(*mks)); 2231 mtx_leave(&sc->sc_kstat_mtx); 2232 } 2233 2234 void 2235 myx_kstat_attach(struct myx_softc *sc) 2236 { 2237 struct kstat *ks; 2238 struct myx_kstats *mk; 2239 struct myx_kstat_state *mks; 2240 unsigned int i; 2241 2242 mtx_init(&sc->sc_kstat_mtx, IPL_SOFTCLOCK); 2243 timeout_set(&sc->sc_kstat_tmo, myx_kstat_tick, sc); 2244 2245 ks = kstat_create(DEVNAME(sc), 0, "myx-stats", 0, KSTAT_T_KV, 0); 2246 if (ks == NULL) 2247 return; 2248 2249 mk = malloc(sizeof(*mk), M_DEVBUF, M_WAITOK|M_ZERO); 2250 for (i = 0; i < myx_ncounters; i++) { 2251 const struct myx_counter *mc = &myx_counters[i]; 2252 2253 kstat_kv_unit_init(&mk->mk_counters[i], mc->mc_name, 2254 KSTAT_KV_T_COUNTER64, KSTAT_KV_U_PACKETS); 2255 } 2256 kstat_kv_init(&mk->mk_rdma_tags_available, "rdma tags free", 2257 KSTAT_KV_T_UINT32); 2258 2259 mks = malloc(sizeof(*mks), M_DEVBUF, M_WAITOK|M_ZERO); 2260 /* these start at 0 */ 2261 2262 kstat_set_mutex(ks, &sc->sc_kstat_mtx); 2263 ks->ks_data = mk; 2264 ks->ks_datalen = sizeof(*mk); 2265 ks->ks_read = myx_kstat_read; 2266 ks->ks_ptr = mks; 2267 2268 ks->ks_softc = sc; 2269 sc->sc_kstat = ks; 2270 kstat_install(ks); 2271 } 2272 #endif /* NKSTAT > 0 */ 2273