1 /*- 2 * Copyright (c) 2016, Vincenzo Maffione 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice unmodified, this list of conditions, and the following 10 * disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 * 26 * $FreeBSD$ 27 */ 28 29 /* Driver for ptnet paravirtualized network device. */ 30 31 #include <sys/cdefs.h> 32 33 #include <sys/types.h> 34 #include <sys/param.h> 35 #include <sys/systm.h> 36 #include <sys/kernel.h> 37 #include <sys/sockio.h> 38 #include <sys/mbuf.h> 39 #include <sys/malloc.h> 40 #include <sys/module.h> 41 #include <sys/socket.h> 42 #include <sys/sysctl.h> 43 #include <sys/lock.h> 44 #include <sys/mutex.h> 45 #include <sys/taskqueue.h> 46 #include <sys/smp.h> 47 #include <sys/time.h> 48 #include <machine/smp.h> 49 50 #include <vm/uma.h> 51 #include <vm/vm.h> 52 #include <vm/pmap.h> 53 54 #include <net/ethernet.h> 55 #include <net/if.h> 56 #include <net/if_var.h> 57 #include <net/if_arp.h> 58 #include <net/if_dl.h> 59 #include <net/if_types.h> 60 #include <net/if_media.h> 61 #include <net/if_vlan_var.h> 62 #include <net/bpf.h> 63 64 #include <netinet/in_systm.h> 65 #include <netinet/in.h> 66 #include <netinet/ip.h> 67 #include <netinet/ip6.h> 68 #include <netinet6/ip6_var.h> 69 #include <netinet/udp.h> 70 #include <netinet/tcp.h> 71 #include <netinet/sctp.h> 72 73 #include <machine/bus.h> 74 #include <machine/resource.h> 75 #include <sys/bus.h> 76 #include <sys/rman.h> 77 78 #include <dev/pci/pcivar.h> 79 #include <dev/pci/pcireg.h> 80 81 #include "opt_inet.h" 82 #include "opt_inet6.h" 83 84 #include <sys/selinfo.h> 85 #include <net/netmap.h> 86 #include <dev/netmap/netmap_kern.h> 87 #include <net/netmap_virt.h> 88 #include <dev/netmap/netmap_mem2.h> 89 #include <dev/virtio/network/virtio_net.h> 90 91 #ifndef PTNET_CSB_ALLOC 92 #error "No support for on-device CSB" 93 #endif 94 95 #ifndef INET 96 #error "INET not defined, cannot support offloadings" 97 #endif 98 99 #if __FreeBSD_version >= 1100000 100 static uint64_t ptnet_get_counter(if_t, ift_counter); 101 #else 102 typedef struct ifnet *if_t; 103 #define if_getsoftc(_ifp) (_ifp)->if_softc 104 #endif 105 106 //#define PTNETMAP_STATS 107 //#define DEBUG 108 #ifdef DEBUG 109 #define DBG(x) x 110 #else /* !DEBUG */ 111 #define DBG(x) 112 #endif /* !DEBUG */ 113 114 extern int ptnet_vnet_hdr; /* Tunable parameter */ 115 116 struct ptnet_softc; 117 118 struct ptnet_queue_stats { 119 uint64_t packets; /* if_[io]packets */ 120 uint64_t bytes; /* if_[io]bytes */ 121 uint64_t errors; /* if_[io]errors */ 122 uint64_t iqdrops; /* if_iqdrops */ 123 uint64_t mcasts; /* if_[io]mcasts */ 124 #ifdef PTNETMAP_STATS 125 uint64_t intrs; 126 uint64_t kicks; 127 #endif /* PTNETMAP_STATS */ 128 }; 129 130 struct ptnet_queue { 131 struct ptnet_softc *sc; 132 struct resource *irq; 133 void *cookie; 134 int kring_id; 135 struct ptnet_ring *ptring; 136 unsigned int kick; 137 struct mtx lock; 138 struct buf_ring *bufring; /* for TX queues */ 139 struct ptnet_queue_stats stats; 140 #ifdef PTNETMAP_STATS 141 struct ptnet_queue_stats last_stats; 142 #endif /* PTNETMAP_STATS */ 143 struct taskqueue *taskq; 144 struct task task; 145 char lock_name[16]; 146 }; 147 148 #define PTNET_Q_LOCK(_pq) mtx_lock(&(_pq)->lock) 149 #define PTNET_Q_TRYLOCK(_pq) mtx_trylock(&(_pq)->lock) 150 #define PTNET_Q_UNLOCK(_pq) mtx_unlock(&(_pq)->lock) 151 152 struct ptnet_softc { 153 device_t dev; 154 if_t ifp; 155 struct ifmedia media; 156 struct mtx lock; 157 char lock_name[16]; 158 char hwaddr[ETHER_ADDR_LEN]; 159 160 /* Mirror of PTFEAT register. */ 161 uint32_t ptfeatures; 162 unsigned int vnet_hdr_len; 163 164 /* PCI BARs support. */ 165 struct resource *iomem; 166 struct resource *msix_mem; 167 168 unsigned int num_rings; 169 unsigned int num_tx_rings; 170 struct ptnet_queue *queues; 171 struct ptnet_queue *rxqueues; 172 struct ptnet_csb *csb; 173 174 unsigned int min_tx_space; 175 176 struct netmap_pt_guest_adapter *ptna; 177 178 struct callout tick; 179 #ifdef PTNETMAP_STATS 180 struct timeval last_ts; 181 #endif /* PTNETMAP_STATS */ 182 }; 183 184 #define PTNET_CORE_LOCK(_sc) mtx_lock(&(_sc)->lock) 185 #define PTNET_CORE_UNLOCK(_sc) mtx_unlock(&(_sc)->lock) 186 187 static int ptnet_probe(device_t); 188 static int ptnet_attach(device_t); 189 static int ptnet_detach(device_t); 190 static int ptnet_suspend(device_t); 191 static int ptnet_resume(device_t); 192 static int ptnet_shutdown(device_t); 193 194 static void ptnet_init(void *opaque); 195 static int ptnet_ioctl(if_t ifp, u_long cmd, caddr_t data); 196 static int ptnet_init_locked(struct ptnet_softc *sc); 197 static int ptnet_stop(struct ptnet_softc *sc); 198 static int ptnet_transmit(if_t ifp, struct mbuf *m); 199 static int ptnet_drain_transmit_queue(struct ptnet_queue *pq, 200 unsigned int budget, 201 bool may_resched); 202 static void ptnet_qflush(if_t ifp); 203 static void ptnet_tx_task(void *context, int pending); 204 205 static int ptnet_media_change(if_t ifp); 206 static void ptnet_media_status(if_t ifp, struct ifmediareq *ifmr); 207 #ifdef PTNETMAP_STATS 208 static void ptnet_tick(void *opaque); 209 #endif 210 211 static int ptnet_irqs_init(struct ptnet_softc *sc); 212 static void ptnet_irqs_fini(struct ptnet_softc *sc); 213 214 static uint32_t ptnet_nm_ptctl(if_t ifp, uint32_t cmd); 215 static int ptnet_nm_config(struct netmap_adapter *na, unsigned *txr, 216 unsigned *txd, unsigned *rxr, unsigned *rxd); 217 static void ptnet_update_vnet_hdr(struct ptnet_softc *sc); 218 static int ptnet_nm_register(struct netmap_adapter *na, int onoff); 219 static int ptnet_nm_txsync(struct netmap_kring *kring, int flags); 220 static int ptnet_nm_rxsync(struct netmap_kring *kring, int flags); 221 222 static void ptnet_tx_intr(void *opaque); 223 static void ptnet_rx_intr(void *opaque); 224 225 static unsigned ptnet_rx_discard(struct netmap_kring *kring, 226 unsigned int head); 227 static int ptnet_rx_eof(struct ptnet_queue *pq, unsigned int budget, 228 bool may_resched); 229 static void ptnet_rx_task(void *context, int pending); 230 231 #ifdef DEVICE_POLLING 232 static poll_handler_t ptnet_poll; 233 #endif 234 235 static device_method_t ptnet_methods[] = { 236 DEVMETHOD(device_probe, ptnet_probe), 237 DEVMETHOD(device_attach, ptnet_attach), 238 DEVMETHOD(device_detach, ptnet_detach), 239 DEVMETHOD(device_suspend, ptnet_suspend), 240 DEVMETHOD(device_resume, ptnet_resume), 241 DEVMETHOD(device_shutdown, ptnet_shutdown), 242 DEVMETHOD_END 243 }; 244 245 static driver_t ptnet_driver = { 246 "ptnet", 247 ptnet_methods, 248 sizeof(struct ptnet_softc) 249 }; 250 251 /* We use (SI_ORDER_MIDDLE+2) here, see DEV_MODULE_ORDERED() invocation. */ 252 static devclass_t ptnet_devclass; 253 DRIVER_MODULE_ORDERED(ptnet, pci, ptnet_driver, ptnet_devclass, 254 NULL, NULL, SI_ORDER_MIDDLE + 2); 255 256 static int 257 ptnet_probe(device_t dev) 258 { 259 if (pci_get_vendor(dev) != PTNETMAP_PCI_VENDOR_ID || 260 pci_get_device(dev) != PTNETMAP_PCI_NETIF_ID) { 261 return (ENXIO); 262 } 263 264 device_set_desc(dev, "ptnet network adapter"); 265 266 return (BUS_PROBE_DEFAULT); 267 } 268 269 static inline void ptnet_kick(struct ptnet_queue *pq) 270 { 271 #ifdef PTNETMAP_STATS 272 pq->stats.kicks ++; 273 #endif /* PTNETMAP_STATS */ 274 bus_write_4(pq->sc->iomem, pq->kick, 0); 275 } 276 277 #define PTNET_BUF_RING_SIZE 4096 278 #define PTNET_RX_BUDGET 512 279 #define PTNET_RX_BATCH 1 280 #define PTNET_TX_BUDGET 512 281 #define PTNET_TX_BATCH 64 282 #define PTNET_HDR_SIZE sizeof(struct virtio_net_hdr_mrg_rxbuf) 283 #define PTNET_MAX_PKT_SIZE 65536 284 285 #define PTNET_CSUM_OFFLOAD (CSUM_TCP | CSUM_UDP | CSUM_SCTP) 286 #define PTNET_CSUM_OFFLOAD_IPV6 (CSUM_TCP_IPV6 | CSUM_UDP_IPV6 |\ 287 CSUM_SCTP_IPV6) 288 #define PTNET_ALL_OFFLOAD (CSUM_TSO | PTNET_CSUM_OFFLOAD |\ 289 PTNET_CSUM_OFFLOAD_IPV6) 290 291 static int 292 ptnet_attach(device_t dev) 293 { 294 uint32_t ptfeatures = PTNETMAP_F_BASE; 295 unsigned int num_rx_rings, num_tx_rings; 296 struct netmap_adapter na_arg; 297 unsigned int nifp_offset; 298 struct ptnet_softc *sc; 299 if_t ifp; 300 uint32_t macreg; 301 int err, rid; 302 int i; 303 304 sc = device_get_softc(dev); 305 sc->dev = dev; 306 307 /* Setup PCI resources. */ 308 pci_enable_busmaster(dev); 309 310 rid = PCIR_BAR(PTNETMAP_IO_PCI_BAR); 311 sc->iomem = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &rid, 312 RF_ACTIVE); 313 if (sc->iomem == NULL) { 314 device_printf(dev, "Failed to map I/O BAR\n"); 315 return (ENXIO); 316 } 317 318 /* Check if we are supported by the hypervisor. If not, 319 * bail out immediately. */ 320 if (ptnet_vnet_hdr) { 321 ptfeatures |= PTNETMAP_F_VNET_HDR; 322 } 323 bus_write_4(sc->iomem, PTNET_IO_PTFEAT, ptfeatures); /* wanted */ 324 ptfeatures = bus_read_4(sc->iomem, PTNET_IO_PTFEAT); /* acked */ 325 if (!(ptfeatures & PTNETMAP_F_BASE)) { 326 device_printf(dev, "Hypervisor does not support netmap " 327 "passthorugh\n"); 328 err = ENXIO; 329 goto err_path; 330 } 331 sc->ptfeatures = ptfeatures; 332 333 /* Allocate CSB and carry out CSB allocation protocol (CSBBAH first, 334 * then CSBBAL). */ 335 sc->csb = malloc(sizeof(struct ptnet_csb), M_DEVBUF, 336 M_NOWAIT | M_ZERO); 337 if (sc->csb == NULL) { 338 device_printf(dev, "Failed to allocate CSB\n"); 339 err = ENOMEM; 340 goto err_path; 341 } 342 343 { 344 vm_paddr_t paddr = vtophys(sc->csb); 345 346 bus_write_4(sc->iomem, PTNET_IO_CSBBAH, 347 (paddr >> 32) & 0xffffffff); 348 bus_write_4(sc->iomem, PTNET_IO_CSBBAL, paddr & 0xffffffff); 349 } 350 351 num_tx_rings = bus_read_4(sc->iomem, PTNET_IO_NUM_TX_RINGS); 352 num_rx_rings = bus_read_4(sc->iomem, PTNET_IO_NUM_RX_RINGS); 353 sc->num_rings = num_tx_rings + num_rx_rings; 354 sc->num_tx_rings = num_tx_rings; 355 356 /* Allocate and initialize per-queue data structures. */ 357 sc->queues = malloc(sizeof(struct ptnet_queue) * sc->num_rings, 358 M_DEVBUF, M_NOWAIT | M_ZERO); 359 if (sc->queues == NULL) { 360 err = ENOMEM; 361 goto err_path; 362 } 363 sc->rxqueues = sc->queues + num_tx_rings; 364 365 for (i = 0; i < sc->num_rings; i++) { 366 struct ptnet_queue *pq = sc->queues + i; 367 368 pq->sc = sc; 369 pq->kring_id = i; 370 pq->kick = PTNET_IO_KICK_BASE + 4 * i; 371 pq->ptring = sc->csb->rings + i; 372 snprintf(pq->lock_name, sizeof(pq->lock_name), "%s-%d", 373 device_get_nameunit(dev), i); 374 mtx_init(&pq->lock, pq->lock_name, NULL, MTX_DEF); 375 if (i >= num_tx_rings) { 376 /* RX queue: fix kring_id. */ 377 pq->kring_id -= num_tx_rings; 378 } else { 379 /* TX queue: allocate buf_ring. */ 380 pq->bufring = buf_ring_alloc(PTNET_BUF_RING_SIZE, 381 M_DEVBUF, M_NOWAIT, &pq->lock); 382 if (pq->bufring == NULL) { 383 err = ENOMEM; 384 goto err_path; 385 } 386 } 387 } 388 389 sc->min_tx_space = 64; /* Safe initial value. */ 390 391 err = ptnet_irqs_init(sc); 392 if (err) { 393 goto err_path; 394 } 395 396 /* Setup Ethernet interface. */ 397 sc->ifp = ifp = if_alloc(IFT_ETHER); 398 if (ifp == NULL) { 399 device_printf(dev, "Failed to allocate ifnet\n"); 400 err = ENOMEM; 401 goto err_path; 402 } 403 404 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 405 ifp->if_baudrate = IF_Gbps(10); 406 ifp->if_softc = sc; 407 ifp->if_flags = IFF_BROADCAST | IFF_MULTICAST | IFF_SIMPLEX; 408 ifp->if_init = ptnet_init; 409 ifp->if_ioctl = ptnet_ioctl; 410 #if __FreeBSD_version >= 1100000 411 ifp->if_get_counter = ptnet_get_counter; 412 #endif 413 ifp->if_transmit = ptnet_transmit; 414 ifp->if_qflush = ptnet_qflush; 415 416 ifmedia_init(&sc->media, IFM_IMASK, ptnet_media_change, 417 ptnet_media_status); 418 ifmedia_add(&sc->media, IFM_ETHER | IFM_10G_T | IFM_FDX, 0, NULL); 419 ifmedia_set(&sc->media, IFM_ETHER | IFM_10G_T | IFM_FDX); 420 421 macreg = bus_read_4(sc->iomem, PTNET_IO_MAC_HI); 422 sc->hwaddr[0] = (macreg >> 8) & 0xff; 423 sc->hwaddr[1] = macreg & 0xff; 424 macreg = bus_read_4(sc->iomem, PTNET_IO_MAC_LO); 425 sc->hwaddr[2] = (macreg >> 24) & 0xff; 426 sc->hwaddr[3] = (macreg >> 16) & 0xff; 427 sc->hwaddr[4] = (macreg >> 8) & 0xff; 428 sc->hwaddr[5] = macreg & 0xff; 429 430 ether_ifattach(ifp, sc->hwaddr); 431 432 ifp->if_hdrlen = sizeof(struct ether_vlan_header); 433 ifp->if_capabilities |= IFCAP_JUMBO_MTU | IFCAP_VLAN_MTU; 434 435 if (sc->ptfeatures & PTNETMAP_F_VNET_HDR) { 436 /* Similarly to what the vtnet driver does, we can emulate 437 * VLAN offloadings by inserting and removing the 802.1Q 438 * header during transmit and receive. We are then able 439 * to do checksum offloading of VLAN frames. */ 440 ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6 441 | IFCAP_VLAN_HWCSUM 442 | IFCAP_TSO | IFCAP_LRO 443 | IFCAP_VLAN_HWTSO 444 | IFCAP_VLAN_HWTAGGING; 445 } 446 447 ifp->if_capenable = ifp->if_capabilities; 448 #ifdef DEVICE_POLLING 449 /* Don't enable polling by default. */ 450 ifp->if_capabilities |= IFCAP_POLLING; 451 #endif 452 snprintf(sc->lock_name, sizeof(sc->lock_name), 453 "%s", device_get_nameunit(dev)); 454 mtx_init(&sc->lock, sc->lock_name, "ptnet core lock", MTX_DEF); 455 callout_init_mtx(&sc->tick, &sc->lock, 0); 456 457 /* Prepare a netmap_adapter struct instance to do netmap_attach(). */ 458 nifp_offset = bus_read_4(sc->iomem, PTNET_IO_NIFP_OFS); 459 memset(&na_arg, 0, sizeof(na_arg)); 460 na_arg.ifp = ifp; 461 na_arg.num_tx_desc = bus_read_4(sc->iomem, PTNET_IO_NUM_TX_SLOTS); 462 na_arg.num_rx_desc = bus_read_4(sc->iomem, PTNET_IO_NUM_RX_SLOTS); 463 na_arg.num_tx_rings = num_tx_rings; 464 na_arg.num_rx_rings = num_rx_rings; 465 na_arg.nm_config = ptnet_nm_config; 466 na_arg.nm_krings_create = ptnet_nm_krings_create; 467 na_arg.nm_krings_delete = ptnet_nm_krings_delete; 468 na_arg.nm_dtor = ptnet_nm_dtor; 469 na_arg.nm_register = ptnet_nm_register; 470 na_arg.nm_txsync = ptnet_nm_txsync; 471 na_arg.nm_rxsync = ptnet_nm_rxsync; 472 473 netmap_pt_guest_attach(&na_arg, sc->csb, nifp_offset, ptnet_nm_ptctl); 474 475 /* Now a netmap adapter for this ifp has been allocated, and it 476 * can be accessed through NA(ifp). We also have to initialize the CSB 477 * pointer. */ 478 sc->ptna = (struct netmap_pt_guest_adapter *)NA(ifp); 479 480 /* If virtio-net header was negotiated, set the virt_hdr_len field in 481 * the netmap adapter, to inform users that this netmap adapter requires 482 * the application to deal with the headers. */ 483 ptnet_update_vnet_hdr(sc); 484 485 device_printf(dev, "%s() completed\n", __func__); 486 487 return (0); 488 489 err_path: 490 ptnet_detach(dev); 491 return err; 492 } 493 494 static int 495 ptnet_detach(device_t dev) 496 { 497 struct ptnet_softc *sc = device_get_softc(dev); 498 int i; 499 500 #ifdef DEVICE_POLLING 501 if (sc->ifp->if_capenable & IFCAP_POLLING) { 502 ether_poll_deregister(sc->ifp); 503 } 504 #endif 505 callout_drain(&sc->tick); 506 507 if (sc->queues) { 508 /* Drain taskqueues before calling if_detach. */ 509 for (i = 0; i < sc->num_rings; i++) { 510 struct ptnet_queue *pq = sc->queues + i; 511 512 if (pq->taskq) { 513 taskqueue_drain(pq->taskq, &pq->task); 514 } 515 } 516 } 517 518 if (sc->ifp) { 519 ether_ifdetach(sc->ifp); 520 521 /* Uninitialize netmap adapters for this device. */ 522 netmap_detach(sc->ifp); 523 524 ifmedia_removeall(&sc->media); 525 if_free(sc->ifp); 526 sc->ifp = NULL; 527 } 528 529 ptnet_irqs_fini(sc); 530 531 if (sc->csb) { 532 bus_write_4(sc->iomem, PTNET_IO_CSBBAH, 0); 533 bus_write_4(sc->iomem, PTNET_IO_CSBBAL, 0); 534 free(sc->csb, M_DEVBUF); 535 sc->csb = NULL; 536 } 537 538 if (sc->queues) { 539 for (i = 0; i < sc->num_rings; i++) { 540 struct ptnet_queue *pq = sc->queues + i; 541 542 if (mtx_initialized(&pq->lock)) { 543 mtx_destroy(&pq->lock); 544 } 545 if (pq->bufring != NULL) { 546 buf_ring_free(pq->bufring, M_DEVBUF); 547 } 548 } 549 free(sc->queues, M_DEVBUF); 550 sc->queues = NULL; 551 } 552 553 if (sc->iomem) { 554 bus_release_resource(dev, SYS_RES_IOPORT, 555 PCIR_BAR(PTNETMAP_IO_PCI_BAR), sc->iomem); 556 sc->iomem = NULL; 557 } 558 559 mtx_destroy(&sc->lock); 560 561 device_printf(dev, "%s() completed\n", __func__); 562 563 return (0); 564 } 565 566 static int 567 ptnet_suspend(device_t dev) 568 { 569 struct ptnet_softc *sc; 570 571 sc = device_get_softc(dev); 572 (void)sc; 573 574 return (0); 575 } 576 577 static int 578 ptnet_resume(device_t dev) 579 { 580 struct ptnet_softc *sc; 581 582 sc = device_get_softc(dev); 583 (void)sc; 584 585 return (0); 586 } 587 588 static int 589 ptnet_shutdown(device_t dev) 590 { 591 /* 592 * Suspend already does all of what we need to 593 * do here; we just never expect to be resumed. 594 */ 595 return (ptnet_suspend(dev)); 596 } 597 598 static int 599 ptnet_irqs_init(struct ptnet_softc *sc) 600 { 601 int rid = PCIR_BAR(PTNETMAP_MSIX_PCI_BAR); 602 int nvecs = sc->num_rings; 603 device_t dev = sc->dev; 604 int err = ENOSPC; 605 int cpu_cur; 606 int i; 607 608 if (pci_find_cap(dev, PCIY_MSIX, NULL) != 0) { 609 device_printf(dev, "Could not find MSI-X capability\n"); 610 return (ENXIO); 611 } 612 613 sc->msix_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 614 &rid, RF_ACTIVE); 615 if (sc->msix_mem == NULL) { 616 device_printf(dev, "Failed to allocate MSIX PCI BAR\n"); 617 return (ENXIO); 618 } 619 620 if (pci_msix_count(dev) < nvecs) { 621 device_printf(dev, "Not enough MSI-X vectors\n"); 622 goto err_path; 623 } 624 625 err = pci_alloc_msix(dev, &nvecs); 626 if (err) { 627 device_printf(dev, "Failed to allocate MSI-X vectors\n"); 628 goto err_path; 629 } 630 631 for (i = 0; i < nvecs; i++) { 632 struct ptnet_queue *pq = sc->queues + i; 633 634 rid = i + 1; 635 pq->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 636 RF_ACTIVE); 637 if (pq->irq == NULL) { 638 device_printf(dev, "Failed to allocate interrupt " 639 "for queue #%d\n", i); 640 err = ENOSPC; 641 goto err_path; 642 } 643 } 644 645 cpu_cur = CPU_FIRST(); 646 for (i = 0; i < nvecs; i++) { 647 struct ptnet_queue *pq = sc->queues + i; 648 void (*handler)(void *) = ptnet_tx_intr; 649 650 if (i >= sc->num_tx_rings) { 651 handler = ptnet_rx_intr; 652 } 653 err = bus_setup_intr(dev, pq->irq, INTR_TYPE_NET | INTR_MPSAFE, 654 NULL /* intr_filter */, handler, 655 pq, &pq->cookie); 656 if (err) { 657 device_printf(dev, "Failed to register intr handler " 658 "for queue #%d\n", i); 659 goto err_path; 660 } 661 662 bus_describe_intr(dev, pq->irq, pq->cookie, "q%d", i); 663 #if 0 664 bus_bind_intr(sc->dev, pq->irq, cpu_cur); 665 #endif 666 cpu_cur = CPU_NEXT(cpu_cur); 667 } 668 669 device_printf(dev, "Allocated %d MSI-X vectors\n", nvecs); 670 671 cpu_cur = CPU_FIRST(); 672 for (i = 0; i < nvecs; i++) { 673 struct ptnet_queue *pq = sc->queues + i; 674 static void (*handler)(void *context, int pending); 675 676 handler = (i < sc->num_tx_rings) ? ptnet_tx_task : ptnet_rx_task; 677 678 TASK_INIT(&pq->task, 0, handler, pq); 679 pq->taskq = taskqueue_create_fast("ptnet_queue", M_NOWAIT, 680 taskqueue_thread_enqueue, &pq->taskq); 681 taskqueue_start_threads(&pq->taskq, 1, PI_NET, "%s-pq-%d", 682 device_get_nameunit(sc->dev), cpu_cur); 683 cpu_cur = CPU_NEXT(cpu_cur); 684 } 685 686 return 0; 687 err_path: 688 ptnet_irqs_fini(sc); 689 return err; 690 } 691 692 static void 693 ptnet_irqs_fini(struct ptnet_softc *sc) 694 { 695 device_t dev = sc->dev; 696 int i; 697 698 for (i = 0; i < sc->num_rings; i++) { 699 struct ptnet_queue *pq = sc->queues + i; 700 701 if (pq->taskq) { 702 taskqueue_free(pq->taskq); 703 pq->taskq = NULL; 704 } 705 706 if (pq->cookie) { 707 bus_teardown_intr(dev, pq->irq, pq->cookie); 708 pq->cookie = NULL; 709 } 710 711 if (pq->irq) { 712 bus_release_resource(dev, SYS_RES_IRQ, i + 1, pq->irq); 713 pq->irq = NULL; 714 } 715 } 716 717 if (sc->msix_mem) { 718 pci_release_msi(dev); 719 720 bus_release_resource(dev, SYS_RES_MEMORY, 721 PCIR_BAR(PTNETMAP_MSIX_PCI_BAR), 722 sc->msix_mem); 723 sc->msix_mem = NULL; 724 } 725 } 726 727 static void 728 ptnet_init(void *opaque) 729 { 730 struct ptnet_softc *sc = opaque; 731 732 PTNET_CORE_LOCK(sc); 733 ptnet_init_locked(sc); 734 PTNET_CORE_UNLOCK(sc); 735 } 736 737 static int 738 ptnet_ioctl(if_t ifp, u_long cmd, caddr_t data) 739 { 740 struct ptnet_softc *sc = if_getsoftc(ifp); 741 device_t dev = sc->dev; 742 struct ifreq *ifr = (struct ifreq *)data; 743 int mask, err = 0; 744 745 switch (cmd) { 746 case SIOCSIFFLAGS: 747 device_printf(dev, "SIOCSIFFLAGS %x\n", ifp->if_flags); 748 PTNET_CORE_LOCK(sc); 749 if (ifp->if_flags & IFF_UP) { 750 /* Network stack wants the iff to be up. */ 751 err = ptnet_init_locked(sc); 752 } else { 753 /* Network stack wants the iff to be down. */ 754 err = ptnet_stop(sc); 755 } 756 /* We don't need to do nothing to support IFF_PROMISC, 757 * since that is managed by the backend port. */ 758 PTNET_CORE_UNLOCK(sc); 759 break; 760 761 case SIOCSIFCAP: 762 device_printf(dev, "SIOCSIFCAP %x %x\n", 763 ifr->ifr_reqcap, ifp->if_capenable); 764 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 765 #ifdef DEVICE_POLLING 766 if (mask & IFCAP_POLLING) { 767 struct ptnet_queue *pq; 768 int i; 769 770 if (ifr->ifr_reqcap & IFCAP_POLLING) { 771 err = ether_poll_register(ptnet_poll, ifp); 772 if (err) { 773 break; 774 } 775 /* Stop queues and sync with taskqueues. */ 776 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 777 for (i = 0; i < sc->num_rings; i++) { 778 pq = sc-> queues + i; 779 /* Make sure the worker sees the 780 * IFF_DRV_RUNNING down. */ 781 PTNET_Q_LOCK(pq); 782 pq->ptring->guest_need_kick = 0; 783 PTNET_Q_UNLOCK(pq); 784 /* Wait for rescheduling to finish. */ 785 if (pq->taskq) { 786 taskqueue_drain(pq->taskq, 787 &pq->task); 788 } 789 } 790 ifp->if_drv_flags |= IFF_DRV_RUNNING; 791 } else { 792 err = ether_poll_deregister(ifp); 793 for (i = 0; i < sc->num_rings; i++) { 794 pq = sc-> queues + i; 795 PTNET_Q_LOCK(pq); 796 pq->ptring->guest_need_kick = 1; 797 PTNET_Q_UNLOCK(pq); 798 } 799 } 800 } 801 #endif /* DEVICE_POLLING */ 802 ifp->if_capenable = ifr->ifr_reqcap; 803 break; 804 805 case SIOCSIFMTU: 806 /* We support any reasonable MTU. */ 807 if (ifr->ifr_mtu < ETHERMIN || 808 ifr->ifr_mtu > PTNET_MAX_PKT_SIZE) { 809 err = EINVAL; 810 } else { 811 PTNET_CORE_LOCK(sc); 812 ifp->if_mtu = ifr->ifr_mtu; 813 PTNET_CORE_UNLOCK(sc); 814 } 815 break; 816 817 case SIOCSIFMEDIA: 818 case SIOCGIFMEDIA: 819 err = ifmedia_ioctl(ifp, ifr, &sc->media, cmd); 820 break; 821 822 default: 823 err = ether_ioctl(ifp, cmd, data); 824 break; 825 } 826 827 return err; 828 } 829 830 static int 831 ptnet_init_locked(struct ptnet_softc *sc) 832 { 833 if_t ifp = sc->ifp; 834 struct netmap_adapter *na_dr = &sc->ptna->dr.up; 835 struct netmap_adapter *na_nm = &sc->ptna->hwup.up; 836 unsigned int nm_buf_size; 837 int ret; 838 839 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 840 return 0; /* nothing to do */ 841 } 842 843 device_printf(sc->dev, "%s\n", __func__); 844 845 /* Translate offload capabilities according to if_capenable. */ 846 ifp->if_hwassist = 0; 847 if (ifp->if_capenable & IFCAP_TXCSUM) 848 ifp->if_hwassist |= PTNET_CSUM_OFFLOAD; 849 if (ifp->if_capenable & IFCAP_TXCSUM_IPV6) 850 ifp->if_hwassist |= PTNET_CSUM_OFFLOAD_IPV6; 851 if (ifp->if_capenable & IFCAP_TSO4) 852 ifp->if_hwassist |= CSUM_IP_TSO; 853 if (ifp->if_capenable & IFCAP_TSO6) 854 ifp->if_hwassist |= CSUM_IP6_TSO; 855 856 /* 857 * Prepare the interface for netmap mode access. 858 */ 859 netmap_update_config(na_dr); 860 861 ret = netmap_mem_finalize(na_dr->nm_mem, na_dr); 862 if (ret) { 863 device_printf(sc->dev, "netmap_mem_finalize() failed\n"); 864 return ret; 865 } 866 867 if (sc->ptna->backend_regifs == 0) { 868 ret = ptnet_nm_krings_create(na_nm); 869 if (ret) { 870 device_printf(sc->dev, "ptnet_nm_krings_create() " 871 "failed\n"); 872 goto err_mem_finalize; 873 } 874 875 ret = netmap_mem_rings_create(na_dr); 876 if (ret) { 877 device_printf(sc->dev, "netmap_mem_rings_create() " 878 "failed\n"); 879 goto err_rings_create; 880 } 881 882 ret = netmap_mem_get_lut(na_dr->nm_mem, &na_dr->na_lut); 883 if (ret) { 884 device_printf(sc->dev, "netmap_mem_get_lut() " 885 "failed\n"); 886 goto err_get_lut; 887 } 888 } 889 890 ret = ptnet_nm_register(na_dr, 1 /* on */); 891 if (ret) { 892 goto err_register; 893 } 894 895 nm_buf_size = NETMAP_BUF_SIZE(na_dr); 896 897 KASSERT(nm_buf_size > 0, ("Invalid netmap buffer size")); 898 sc->min_tx_space = PTNET_MAX_PKT_SIZE / nm_buf_size + 2; 899 device_printf(sc->dev, "%s: min_tx_space = %u\n", __func__, 900 sc->min_tx_space); 901 #ifdef PTNETMAP_STATS 902 callout_reset(&sc->tick, hz, ptnet_tick, sc); 903 #endif 904 905 ifp->if_drv_flags |= IFF_DRV_RUNNING; 906 907 return 0; 908 909 err_register: 910 memset(&na_dr->na_lut, 0, sizeof(na_dr->na_lut)); 911 err_get_lut: 912 netmap_mem_rings_delete(na_dr); 913 err_rings_create: 914 ptnet_nm_krings_delete(na_nm); 915 err_mem_finalize: 916 netmap_mem_deref(na_dr->nm_mem, na_dr); 917 918 return ret; 919 } 920 921 /* To be called under core lock. */ 922 static int 923 ptnet_stop(struct ptnet_softc *sc) 924 { 925 if_t ifp = sc->ifp; 926 struct netmap_adapter *na_dr = &sc->ptna->dr.up; 927 struct netmap_adapter *na_nm = &sc->ptna->hwup.up; 928 int i; 929 930 device_printf(sc->dev, "%s\n", __func__); 931 932 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { 933 return 0; /* nothing to do */ 934 } 935 936 /* Clear the driver-ready flag, and synchronize with all the queues, 937 * so that after this loop we are sure nobody is working anymore with 938 * the device. This scheme is taken from the vtnet driver. */ 939 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 940 callout_stop(&sc->tick); 941 for (i = 0; i < sc->num_rings; i++) { 942 PTNET_Q_LOCK(sc->queues + i); 943 PTNET_Q_UNLOCK(sc->queues + i); 944 } 945 946 ptnet_nm_register(na_dr, 0 /* off */); 947 948 if (sc->ptna->backend_regifs == 0) { 949 netmap_mem_rings_delete(na_dr); 950 ptnet_nm_krings_delete(na_nm); 951 } 952 netmap_mem_deref(na_dr->nm_mem, na_dr); 953 954 return 0; 955 } 956 957 static void 958 ptnet_qflush(if_t ifp) 959 { 960 struct ptnet_softc *sc = if_getsoftc(ifp); 961 int i; 962 963 /* Flush all the bufrings and do the interface flush. */ 964 for (i = 0; i < sc->num_rings; i++) { 965 struct ptnet_queue *pq = sc->queues + i; 966 struct mbuf *m; 967 968 PTNET_Q_LOCK(pq); 969 if (pq->bufring) { 970 while ((m = buf_ring_dequeue_sc(pq->bufring))) { 971 m_freem(m); 972 } 973 } 974 PTNET_Q_UNLOCK(pq); 975 } 976 977 if_qflush(ifp); 978 } 979 980 static int 981 ptnet_media_change(if_t ifp) 982 { 983 struct ptnet_softc *sc = if_getsoftc(ifp); 984 struct ifmedia *ifm = &sc->media; 985 986 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) { 987 return EINVAL; 988 } 989 990 return 0; 991 } 992 993 #if __FreeBSD_version >= 1100000 994 static uint64_t 995 ptnet_get_counter(if_t ifp, ift_counter cnt) 996 { 997 struct ptnet_softc *sc = if_getsoftc(ifp); 998 struct ptnet_queue_stats stats[2]; 999 int i; 1000 1001 /* Accumulate statistics over the queues. */ 1002 memset(stats, 0, sizeof(stats)); 1003 for (i = 0; i < sc->num_rings; i++) { 1004 struct ptnet_queue *pq = sc->queues + i; 1005 int idx = (i < sc->num_tx_rings) ? 0 : 1; 1006 1007 stats[idx].packets += pq->stats.packets; 1008 stats[idx].bytes += pq->stats.bytes; 1009 stats[idx].errors += pq->stats.errors; 1010 stats[idx].iqdrops += pq->stats.iqdrops; 1011 stats[idx].mcasts += pq->stats.mcasts; 1012 } 1013 1014 switch (cnt) { 1015 case IFCOUNTER_IPACKETS: 1016 return (stats[1].packets); 1017 case IFCOUNTER_IQDROPS: 1018 return (stats[1].iqdrops); 1019 case IFCOUNTER_IERRORS: 1020 return (stats[1].errors); 1021 case IFCOUNTER_OPACKETS: 1022 return (stats[0].packets); 1023 case IFCOUNTER_OBYTES: 1024 return (stats[0].bytes); 1025 case IFCOUNTER_OMCASTS: 1026 return (stats[0].mcasts); 1027 default: 1028 return (if_get_counter_default(ifp, cnt)); 1029 } 1030 } 1031 #endif 1032 1033 1034 #ifdef PTNETMAP_STATS 1035 /* Called under core lock. */ 1036 static void 1037 ptnet_tick(void *opaque) 1038 { 1039 struct ptnet_softc *sc = opaque; 1040 int i; 1041 1042 for (i = 0; i < sc->num_rings; i++) { 1043 struct ptnet_queue *pq = sc->queues + i; 1044 struct ptnet_queue_stats cur = pq->stats; 1045 struct timeval now; 1046 unsigned int delta; 1047 1048 microtime(&now); 1049 delta = now.tv_usec - sc->last_ts.tv_usec + 1050 (now.tv_sec - sc->last_ts.tv_sec) * 1000000; 1051 delta /= 1000; /* in milliseconds */ 1052 1053 if (delta == 0) 1054 continue; 1055 1056 device_printf(sc->dev, "#%d[%u ms]:pkts %lu, kicks %lu, " 1057 "intr %lu\n", i, delta, 1058 (cur.packets - pq->last_stats.packets), 1059 (cur.kicks - pq->last_stats.kicks), 1060 (cur.intrs - pq->last_stats.intrs)); 1061 pq->last_stats = cur; 1062 } 1063 microtime(&sc->last_ts); 1064 callout_schedule(&sc->tick, hz); 1065 } 1066 #endif /* PTNETMAP_STATS */ 1067 1068 static void 1069 ptnet_media_status(if_t ifp, struct ifmediareq *ifmr) 1070 { 1071 /* We are always active, as the backend netmap port is 1072 * always open in netmap mode. */ 1073 ifmr->ifm_status = IFM_AVALID | IFM_ACTIVE; 1074 ifmr->ifm_active = IFM_ETHER | IFM_10G_T | IFM_FDX; 1075 } 1076 1077 static uint32_t 1078 ptnet_nm_ptctl(if_t ifp, uint32_t cmd) 1079 { 1080 struct ptnet_softc *sc = if_getsoftc(ifp); 1081 int ret; 1082 1083 bus_write_4(sc->iomem, PTNET_IO_PTCTL, cmd); 1084 ret = bus_read_4(sc->iomem, PTNET_IO_PTSTS); 1085 device_printf(sc->dev, "PTCTL %u, ret %u\n", cmd, ret); 1086 1087 return ret; 1088 } 1089 1090 static int 1091 ptnet_nm_config(struct netmap_adapter *na, unsigned *txr, unsigned *txd, 1092 unsigned *rxr, unsigned *rxd) 1093 { 1094 struct ptnet_softc *sc = if_getsoftc(na->ifp); 1095 1096 *txr = bus_read_4(sc->iomem, PTNET_IO_NUM_TX_RINGS); 1097 *rxr = bus_read_4(sc->iomem, PTNET_IO_NUM_RX_RINGS); 1098 *txd = bus_read_4(sc->iomem, PTNET_IO_NUM_TX_SLOTS); 1099 *rxd = bus_read_4(sc->iomem, PTNET_IO_NUM_RX_SLOTS); 1100 1101 device_printf(sc->dev, "txr %u, rxr %u, txd %u, rxd %u\n", 1102 *txr, *rxr, *txd, *rxd); 1103 1104 return 0; 1105 } 1106 1107 static void 1108 ptnet_sync_from_csb(struct ptnet_softc *sc, struct netmap_adapter *na) 1109 { 1110 int i; 1111 1112 /* Sync krings from the host, reading from 1113 * CSB. */ 1114 for (i = 0; i < sc->num_rings; i++) { 1115 struct ptnet_ring *ptring = sc->queues[i].ptring; 1116 struct netmap_kring *kring; 1117 1118 if (i < na->num_tx_rings) { 1119 kring = na->tx_rings + i; 1120 } else { 1121 kring = na->rx_rings + i - na->num_tx_rings; 1122 } 1123 kring->rhead = kring->ring->head = ptring->head; 1124 kring->rcur = kring->ring->cur = ptring->cur; 1125 kring->nr_hwcur = ptring->hwcur; 1126 kring->nr_hwtail = kring->rtail = 1127 kring->ring->tail = ptring->hwtail; 1128 1129 ND("%d,%d: csb {hc %u h %u c %u ht %u}", t, i, 1130 ptring->hwcur, ptring->head, ptring->cur, 1131 ptring->hwtail); 1132 ND("%d,%d: kring {hc %u rh %u rc %u h %u c %u ht %u rt %u t %u}", 1133 t, i, kring->nr_hwcur, kring->rhead, kring->rcur, 1134 kring->ring->head, kring->ring->cur, kring->nr_hwtail, 1135 kring->rtail, kring->ring->tail); 1136 } 1137 } 1138 1139 static void 1140 ptnet_update_vnet_hdr(struct ptnet_softc *sc) 1141 { 1142 sc->vnet_hdr_len = ptnet_vnet_hdr ? PTNET_HDR_SIZE : 0; 1143 sc->ptna->hwup.up.virt_hdr_len = sc->vnet_hdr_len; 1144 bus_write_4(sc->iomem, PTNET_IO_VNET_HDR_LEN, sc->vnet_hdr_len); 1145 } 1146 1147 static int 1148 ptnet_nm_register(struct netmap_adapter *na, int onoff) 1149 { 1150 /* device-specific */ 1151 if_t ifp = na->ifp; 1152 struct ptnet_softc *sc = if_getsoftc(ifp); 1153 int native = (na == &sc->ptna->hwup.up); 1154 struct ptnet_queue *pq; 1155 enum txrx t; 1156 int ret = 0; 1157 int i; 1158 1159 if (!onoff) { 1160 sc->ptna->backend_regifs--; 1161 } 1162 1163 /* If this is the last netmap client, guest interrupt enable flags may 1164 * be in arbitrary state. Since these flags are going to be used also 1165 * by the netdevice driver, we have to make sure to start with 1166 * notifications enabled. Also, schedule NAPI to flush pending packets 1167 * in the RX rings, since we will not receive further interrupts 1168 * until these will be processed. */ 1169 if (native && !onoff && na->active_fds == 0) { 1170 D("Exit netmap mode, re-enable interrupts"); 1171 for (i = 0; i < sc->num_rings; i++) { 1172 pq = sc->queues + i; 1173 pq->ptring->guest_need_kick = 1; 1174 } 1175 } 1176 1177 if (onoff) { 1178 if (sc->ptna->backend_regifs == 0) { 1179 /* Initialize notification enable fields in the CSB. */ 1180 for (i = 0; i < sc->num_rings; i++) { 1181 pq = sc->queues + i; 1182 pq->ptring->host_need_kick = 1; 1183 pq->ptring->guest_need_kick = 1184 (!(ifp->if_capenable & IFCAP_POLLING) 1185 && i >= sc->num_tx_rings); 1186 } 1187 1188 /* Set the virtio-net header length. */ 1189 ptnet_update_vnet_hdr(sc); 1190 1191 /* Make sure the host adapter passed through is ready 1192 * for txsync/rxsync. */ 1193 ret = ptnet_nm_ptctl(ifp, PTNETMAP_PTCTL_REGIF); 1194 if (ret) { 1195 return ret; 1196 } 1197 } 1198 1199 /* Sync from CSB must be done after REGIF PTCTL. Skip this 1200 * step only if this is a netmap client and it is not the 1201 * first one. */ 1202 if ((!native && sc->ptna->backend_regifs == 0) || 1203 (native && na->active_fds == 0)) { 1204 ptnet_sync_from_csb(sc, na); 1205 } 1206 1207 /* If not native, don't call nm_set_native_flags, since we don't want 1208 * to replace if_transmit method, nor set NAF_NETMAP_ON */ 1209 if (native) { 1210 for_rx_tx(t) { 1211 for (i = 0; i <= nma_get_nrings(na, t); i++) { 1212 struct netmap_kring *kring = &NMR(na, t)[i]; 1213 1214 if (nm_kring_pending_on(kring)) { 1215 kring->nr_mode = NKR_NETMAP_ON; 1216 } 1217 } 1218 } 1219 nm_set_native_flags(na); 1220 } 1221 1222 } else { 1223 if (native) { 1224 nm_clear_native_flags(na); 1225 for_rx_tx(t) { 1226 for (i = 0; i <= nma_get_nrings(na, t); i++) { 1227 struct netmap_kring *kring = &NMR(na, t)[i]; 1228 1229 if (nm_kring_pending_off(kring)) { 1230 kring->nr_mode = NKR_NETMAP_OFF; 1231 } 1232 } 1233 } 1234 } 1235 1236 /* Sync from CSB must be done before UNREGIF PTCTL, on the last 1237 * netmap client. */ 1238 if (native && na->active_fds == 0) { 1239 ptnet_sync_from_csb(sc, na); 1240 } 1241 1242 if (sc->ptna->backend_regifs == 0) { 1243 ret = ptnet_nm_ptctl(ifp, PTNETMAP_PTCTL_UNREGIF); 1244 } 1245 } 1246 1247 if (onoff) { 1248 sc->ptna->backend_regifs++; 1249 } 1250 1251 return ret; 1252 } 1253 1254 static int 1255 ptnet_nm_txsync(struct netmap_kring *kring, int flags) 1256 { 1257 struct ptnet_softc *sc = if_getsoftc(kring->na->ifp); 1258 struct ptnet_queue *pq = sc->queues + kring->ring_id; 1259 bool notify; 1260 1261 notify = netmap_pt_guest_txsync(pq->ptring, kring, flags); 1262 if (notify) { 1263 ptnet_kick(pq); 1264 } 1265 1266 return 0; 1267 } 1268 1269 static int 1270 ptnet_nm_rxsync(struct netmap_kring *kring, int flags) 1271 { 1272 struct ptnet_softc *sc = if_getsoftc(kring->na->ifp); 1273 struct ptnet_queue *pq = sc->rxqueues + kring->ring_id; 1274 bool notify; 1275 1276 notify = netmap_pt_guest_rxsync(pq->ptring, kring, flags); 1277 if (notify) { 1278 ptnet_kick(pq); 1279 } 1280 1281 return 0; 1282 } 1283 1284 static void 1285 ptnet_tx_intr(void *opaque) 1286 { 1287 struct ptnet_queue *pq = opaque; 1288 struct ptnet_softc *sc = pq->sc; 1289 1290 DBG(device_printf(sc->dev, "Tx interrupt #%d\n", pq->kring_id)); 1291 #ifdef PTNETMAP_STATS 1292 pq->stats.intrs ++; 1293 #endif /* PTNETMAP_STATS */ 1294 1295 if (netmap_tx_irq(sc->ifp, pq->kring_id) != NM_IRQ_PASS) { 1296 return; 1297 } 1298 1299 /* Schedule the tasqueue to flush process transmissions requests. 1300 * However, vtnet, if_em and if_igb just call ptnet_transmit() here, 1301 * at least when using MSI-X interrupts. The if_em driver, instead 1302 * schedule taskqueue when using legacy interrupts. */ 1303 taskqueue_enqueue(pq->taskq, &pq->task); 1304 } 1305 1306 static void 1307 ptnet_rx_intr(void *opaque) 1308 { 1309 struct ptnet_queue *pq = opaque; 1310 struct ptnet_softc *sc = pq->sc; 1311 unsigned int unused; 1312 1313 DBG(device_printf(sc->dev, "Rx interrupt #%d\n", pq->kring_id)); 1314 #ifdef PTNETMAP_STATS 1315 pq->stats.intrs ++; 1316 #endif /* PTNETMAP_STATS */ 1317 1318 if (netmap_rx_irq(sc->ifp, pq->kring_id, &unused) != NM_IRQ_PASS) { 1319 return; 1320 } 1321 1322 /* Like vtnet, if_igb and if_em drivers when using MSI-X interrupts, 1323 * receive-side processing is executed directly in the interrupt 1324 * service routine. Alternatively, we may schedule the taskqueue. */ 1325 ptnet_rx_eof(pq, PTNET_RX_BUDGET, true); 1326 } 1327 1328 /* The following offloadings-related functions are taken from the vtnet 1329 * driver, but the same functionality is required for the ptnet driver. 1330 * As a temporary solution, I copied this code from vtnet and I started 1331 * to generalize it (taking away driver-specific statistic accounting), 1332 * making as little modifications as possible. 1333 * In the future we need to share these functions between vtnet and ptnet. 1334 */ 1335 static int 1336 ptnet_tx_offload_ctx(struct mbuf *m, int *etype, int *proto, int *start) 1337 { 1338 struct ether_vlan_header *evh; 1339 int offset; 1340 1341 evh = mtod(m, struct ether_vlan_header *); 1342 if (evh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { 1343 /* BMV: We should handle nested VLAN tags too. */ 1344 *etype = ntohs(evh->evl_proto); 1345 offset = sizeof(struct ether_vlan_header); 1346 } else { 1347 *etype = ntohs(evh->evl_encap_proto); 1348 offset = sizeof(struct ether_header); 1349 } 1350 1351 switch (*etype) { 1352 #if defined(INET) 1353 case ETHERTYPE_IP: { 1354 struct ip *ip, iphdr; 1355 if (__predict_false(m->m_len < offset + sizeof(struct ip))) { 1356 m_copydata(m, offset, sizeof(struct ip), 1357 (caddr_t) &iphdr); 1358 ip = &iphdr; 1359 } else 1360 ip = (struct ip *)(m->m_data + offset); 1361 *proto = ip->ip_p; 1362 *start = offset + (ip->ip_hl << 2); 1363 break; 1364 } 1365 #endif 1366 #if defined(INET6) 1367 case ETHERTYPE_IPV6: 1368 *proto = -1; 1369 *start = ip6_lasthdr(m, offset, IPPROTO_IPV6, proto); 1370 /* Assert the network stack sent us a valid packet. */ 1371 KASSERT(*start > offset, 1372 ("%s: mbuf %p start %d offset %d proto %d", __func__, m, 1373 *start, offset, *proto)); 1374 break; 1375 #endif 1376 default: 1377 /* Here we should increment the tx_csum_bad_ethtype counter. */ 1378 return (EINVAL); 1379 } 1380 1381 return (0); 1382 } 1383 1384 static int 1385 ptnet_tx_offload_tso(if_t ifp, struct mbuf *m, int eth_type, 1386 int offset, bool allow_ecn, struct virtio_net_hdr *hdr) 1387 { 1388 static struct timeval lastecn; 1389 static int curecn; 1390 struct tcphdr *tcp, tcphdr; 1391 1392 if (__predict_false(m->m_len < offset + sizeof(struct tcphdr))) { 1393 m_copydata(m, offset, sizeof(struct tcphdr), (caddr_t) &tcphdr); 1394 tcp = &tcphdr; 1395 } else 1396 tcp = (struct tcphdr *)(m->m_data + offset); 1397 1398 hdr->hdr_len = offset + (tcp->th_off << 2); 1399 hdr->gso_size = m->m_pkthdr.tso_segsz; 1400 hdr->gso_type = eth_type == ETHERTYPE_IP ? VIRTIO_NET_HDR_GSO_TCPV4 : 1401 VIRTIO_NET_HDR_GSO_TCPV6; 1402 1403 if (tcp->th_flags & TH_CWR) { 1404 /* 1405 * Drop if VIRTIO_NET_F_HOST_ECN was not negotiated. In FreeBSD, 1406 * ECN support is not on a per-interface basis, but globally via 1407 * the net.inet.tcp.ecn.enable sysctl knob. The default is off. 1408 */ 1409 if (!allow_ecn) { 1410 if (ppsratecheck(&lastecn, &curecn, 1)) 1411 if_printf(ifp, 1412 "TSO with ECN not negotiated with host\n"); 1413 return (ENOTSUP); 1414 } 1415 hdr->gso_type |= VIRTIO_NET_HDR_GSO_ECN; 1416 } 1417 1418 /* Here we should increment tx_tso counter. */ 1419 1420 return (0); 1421 } 1422 1423 static struct mbuf * 1424 ptnet_tx_offload(if_t ifp, struct mbuf *m, bool allow_ecn, 1425 struct virtio_net_hdr *hdr) 1426 { 1427 int flags, etype, csum_start, proto, error; 1428 1429 flags = m->m_pkthdr.csum_flags; 1430 1431 error = ptnet_tx_offload_ctx(m, &etype, &proto, &csum_start); 1432 if (error) 1433 goto drop; 1434 1435 if ((etype == ETHERTYPE_IP && flags & PTNET_CSUM_OFFLOAD) || 1436 (etype == ETHERTYPE_IPV6 && flags & PTNET_CSUM_OFFLOAD_IPV6)) { 1437 /* 1438 * We could compare the IP protocol vs the CSUM_ flag too, 1439 * but that really should not be necessary. 1440 */ 1441 hdr->flags |= VIRTIO_NET_HDR_F_NEEDS_CSUM; 1442 hdr->csum_start = csum_start; 1443 hdr->csum_offset = m->m_pkthdr.csum_data; 1444 /* Here we should increment the tx_csum counter. */ 1445 } 1446 1447 if (flags & CSUM_TSO) { 1448 if (__predict_false(proto != IPPROTO_TCP)) { 1449 /* Likely failed to correctly parse the mbuf. 1450 * Here we should increment the tx_tso_not_tcp 1451 * counter. */ 1452 goto drop; 1453 } 1454 1455 KASSERT(hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM, 1456 ("%s: mbuf %p TSO without checksum offload %#x", 1457 __func__, m, flags)); 1458 1459 error = ptnet_tx_offload_tso(ifp, m, etype, csum_start, 1460 allow_ecn, hdr); 1461 if (error) 1462 goto drop; 1463 } 1464 1465 return (m); 1466 1467 drop: 1468 m_freem(m); 1469 return (NULL); 1470 } 1471 1472 static void 1473 ptnet_vlan_tag_remove(struct mbuf *m) 1474 { 1475 struct ether_vlan_header *evh; 1476 1477 evh = mtod(m, struct ether_vlan_header *); 1478 m->m_pkthdr.ether_vtag = ntohs(evh->evl_tag); 1479 m->m_flags |= M_VLANTAG; 1480 1481 /* Strip the 802.1Q header. */ 1482 bcopy((char *) evh, (char *) evh + ETHER_VLAN_ENCAP_LEN, 1483 ETHER_HDR_LEN - ETHER_TYPE_LEN); 1484 m_adj(m, ETHER_VLAN_ENCAP_LEN); 1485 } 1486 1487 /* 1488 * Use the checksum offset in the VirtIO header to set the 1489 * correct CSUM_* flags. 1490 */ 1491 static int 1492 ptnet_rx_csum_by_offset(struct mbuf *m, uint16_t eth_type, int ip_start, 1493 struct virtio_net_hdr *hdr) 1494 { 1495 #if defined(INET) || defined(INET6) 1496 int offset = hdr->csum_start + hdr->csum_offset; 1497 #endif 1498 1499 /* Only do a basic sanity check on the offset. */ 1500 switch (eth_type) { 1501 #if defined(INET) 1502 case ETHERTYPE_IP: 1503 if (__predict_false(offset < ip_start + sizeof(struct ip))) 1504 return (1); 1505 break; 1506 #endif 1507 #if defined(INET6) 1508 case ETHERTYPE_IPV6: 1509 if (__predict_false(offset < ip_start + sizeof(struct ip6_hdr))) 1510 return (1); 1511 break; 1512 #endif 1513 default: 1514 /* Here we should increment the rx_csum_bad_ethtype counter. */ 1515 return (1); 1516 } 1517 1518 /* 1519 * Use the offset to determine the appropriate CSUM_* flags. This is 1520 * a bit dirty, but we can get by with it since the checksum offsets 1521 * happen to be different. We assume the host host does not do IPv4 1522 * header checksum offloading. 1523 */ 1524 switch (hdr->csum_offset) { 1525 case offsetof(struct udphdr, uh_sum): 1526 case offsetof(struct tcphdr, th_sum): 1527 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR; 1528 m->m_pkthdr.csum_data = 0xFFFF; 1529 break; 1530 case offsetof(struct sctphdr, checksum): 1531 m->m_pkthdr.csum_flags |= CSUM_SCTP_VALID; 1532 break; 1533 default: 1534 /* Here we should increment the rx_csum_bad_offset counter. */ 1535 return (1); 1536 } 1537 1538 return (0); 1539 } 1540 1541 static int 1542 ptnet_rx_csum_by_parse(struct mbuf *m, uint16_t eth_type, int ip_start, 1543 struct virtio_net_hdr *hdr) 1544 { 1545 int offset, proto; 1546 1547 switch (eth_type) { 1548 #if defined(INET) 1549 case ETHERTYPE_IP: { 1550 struct ip *ip; 1551 if (__predict_false(m->m_len < ip_start + sizeof(struct ip))) 1552 return (1); 1553 ip = (struct ip *)(m->m_data + ip_start); 1554 proto = ip->ip_p; 1555 offset = ip_start + (ip->ip_hl << 2); 1556 break; 1557 } 1558 #endif 1559 #if defined(INET6) 1560 case ETHERTYPE_IPV6: 1561 if (__predict_false(m->m_len < ip_start + 1562 sizeof(struct ip6_hdr))) 1563 return (1); 1564 offset = ip6_lasthdr(m, ip_start, IPPROTO_IPV6, &proto); 1565 if (__predict_false(offset < 0)) 1566 return (1); 1567 break; 1568 #endif 1569 default: 1570 /* Here we should increment the rx_csum_bad_ethtype counter. */ 1571 return (1); 1572 } 1573 1574 switch (proto) { 1575 case IPPROTO_TCP: 1576 if (__predict_false(m->m_len < offset + sizeof(struct tcphdr))) 1577 return (1); 1578 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR; 1579 m->m_pkthdr.csum_data = 0xFFFF; 1580 break; 1581 case IPPROTO_UDP: 1582 if (__predict_false(m->m_len < offset + sizeof(struct udphdr))) 1583 return (1); 1584 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR; 1585 m->m_pkthdr.csum_data = 0xFFFF; 1586 break; 1587 case IPPROTO_SCTP: 1588 if (__predict_false(m->m_len < offset + sizeof(struct sctphdr))) 1589 return (1); 1590 m->m_pkthdr.csum_flags |= CSUM_SCTP_VALID; 1591 break; 1592 default: 1593 /* 1594 * For the remaining protocols, FreeBSD does not support 1595 * checksum offloading, so the checksum will be recomputed. 1596 */ 1597 #if 0 1598 if_printf(ifp, "cksum offload of unsupported " 1599 "protocol eth_type=%#x proto=%d csum_start=%d " 1600 "csum_offset=%d\n", __func__, eth_type, proto, 1601 hdr->csum_start, hdr->csum_offset); 1602 #endif 1603 break; 1604 } 1605 1606 return (0); 1607 } 1608 1609 /* 1610 * Set the appropriate CSUM_* flags. Unfortunately, the information 1611 * provided is not directly useful to us. The VirtIO header gives the 1612 * offset of the checksum, which is all Linux needs, but this is not 1613 * how FreeBSD does things. We are forced to peek inside the packet 1614 * a bit. 1615 * 1616 * It would be nice if VirtIO gave us the L4 protocol or if FreeBSD 1617 * could accept the offsets and let the stack figure it out. 1618 */ 1619 static int 1620 ptnet_rx_csum(struct mbuf *m, struct virtio_net_hdr *hdr) 1621 { 1622 struct ether_header *eh; 1623 struct ether_vlan_header *evh; 1624 uint16_t eth_type; 1625 int offset, error; 1626 1627 eh = mtod(m, struct ether_header *); 1628 eth_type = ntohs(eh->ether_type); 1629 if (eth_type == ETHERTYPE_VLAN) { 1630 /* BMV: We should handle nested VLAN tags too. */ 1631 evh = mtod(m, struct ether_vlan_header *); 1632 eth_type = ntohs(evh->evl_proto); 1633 offset = sizeof(struct ether_vlan_header); 1634 } else 1635 offset = sizeof(struct ether_header); 1636 1637 if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) 1638 error = ptnet_rx_csum_by_offset(m, eth_type, offset, hdr); 1639 else 1640 error = ptnet_rx_csum_by_parse(m, eth_type, offset, hdr); 1641 1642 return (error); 1643 } 1644 /* End of offloading-related functions to be shared with vtnet. */ 1645 1646 static inline void 1647 ptnet_sync_tail(struct ptnet_ring *ptring, struct netmap_kring *kring) 1648 { 1649 struct netmap_ring *ring = kring->ring; 1650 1651 /* Update hwcur and hwtail as known by the host. */ 1652 ptnetmap_guest_read_kring_csb(ptring, kring); 1653 1654 /* nm_sync_finalize */ 1655 ring->tail = kring->rtail = kring->nr_hwtail; 1656 } 1657 1658 static void 1659 ptnet_ring_update(struct ptnet_queue *pq, struct netmap_kring *kring, 1660 unsigned int head, unsigned int sync_flags) 1661 { 1662 struct netmap_ring *ring = kring->ring; 1663 struct ptnet_ring *ptring = pq->ptring; 1664 1665 /* Some packets have been pushed to the netmap ring. We have 1666 * to tell the host to process the new packets, updating cur 1667 * and head in the CSB. */ 1668 ring->head = ring->cur = head; 1669 1670 /* Mimic nm_txsync_prologue/nm_rxsync_prologue. */ 1671 kring->rcur = kring->rhead = head; 1672 1673 ptnetmap_guest_write_kring_csb(ptring, kring->rcur, kring->rhead); 1674 1675 /* Kick the host if needed. */ 1676 if (NM_ACCESS_ONCE(ptring->host_need_kick)) { 1677 ptring->sync_flags = sync_flags; 1678 ptnet_kick(pq); 1679 } 1680 } 1681 1682 #define PTNET_TX_NOSPACE(_h, _k, _min) \ 1683 ((((_h) < (_k)->rtail) ? 0 : (_k)->nkr_num_slots) + \ 1684 (_k)->rtail - (_h)) < (_min) 1685 1686 /* This function may be called by the network stack, or by 1687 * by the taskqueue thread. */ 1688 static int 1689 ptnet_drain_transmit_queue(struct ptnet_queue *pq, unsigned int budget, 1690 bool may_resched) 1691 { 1692 struct ptnet_softc *sc = pq->sc; 1693 bool have_vnet_hdr = sc->vnet_hdr_len; 1694 struct netmap_adapter *na = &sc->ptna->dr.up; 1695 if_t ifp = sc->ifp; 1696 unsigned int batch_count = 0; 1697 struct ptnet_ring *ptring; 1698 struct netmap_kring *kring; 1699 struct netmap_ring *ring; 1700 struct netmap_slot *slot; 1701 unsigned int count = 0; 1702 unsigned int minspace; 1703 unsigned int head; 1704 unsigned int lim; 1705 struct mbuf *mhead; 1706 struct mbuf *mf; 1707 int nmbuf_bytes; 1708 uint8_t *nmbuf; 1709 1710 if (!PTNET_Q_TRYLOCK(pq)) { 1711 /* We failed to acquire the lock, schedule the taskqueue. */ 1712 RD(1, "Deferring TX work"); 1713 if (may_resched) { 1714 taskqueue_enqueue(pq->taskq, &pq->task); 1715 } 1716 1717 return 0; 1718 } 1719 1720 if (unlikely(!(ifp->if_drv_flags & IFF_DRV_RUNNING))) { 1721 PTNET_Q_UNLOCK(pq); 1722 RD(1, "Interface is down"); 1723 return ENETDOWN; 1724 } 1725 1726 ptring = pq->ptring; 1727 kring = na->tx_rings + pq->kring_id; 1728 ring = kring->ring; 1729 lim = kring->nkr_num_slots - 1; 1730 head = ring->head; 1731 minspace = sc->min_tx_space; 1732 1733 while (count < budget) { 1734 if (PTNET_TX_NOSPACE(head, kring, minspace)) { 1735 /* We ran out of slot, let's see if the host has 1736 * freed up some, by reading hwcur and hwtail from 1737 * the CSB. */ 1738 ptnet_sync_tail(ptring, kring); 1739 1740 if (PTNET_TX_NOSPACE(head, kring, minspace)) { 1741 /* Still no slots available. Reactivate the 1742 * interrupts so that we can be notified 1743 * when some free slots are made available by 1744 * the host. */ 1745 ptring->guest_need_kick = 1; 1746 1747 /* Double-check. */ 1748 ptnet_sync_tail(ptring, kring); 1749 if (likely(PTNET_TX_NOSPACE(head, kring, 1750 minspace))) { 1751 break; 1752 } 1753 1754 RD(1, "Found more slots by doublecheck"); 1755 /* More slots were freed before reactivating 1756 * the interrupts. */ 1757 ptring->guest_need_kick = 0; 1758 } 1759 } 1760 1761 mhead = drbr_peek(ifp, pq->bufring); 1762 if (!mhead) { 1763 break; 1764 } 1765 1766 /* Initialize transmission state variables. */ 1767 slot = ring->slot + head; 1768 nmbuf = NMB(na, slot); 1769 nmbuf_bytes = 0; 1770 1771 /* If needed, prepare the virtio-net header at the beginning 1772 * of the first slot. */ 1773 if (have_vnet_hdr) { 1774 struct virtio_net_hdr *vh = 1775 (struct virtio_net_hdr *)nmbuf; 1776 1777 /* For performance, we could replace this memset() with 1778 * two 8-bytes-wide writes. */ 1779 memset(nmbuf, 0, PTNET_HDR_SIZE); 1780 if (mhead->m_pkthdr.csum_flags & PTNET_ALL_OFFLOAD) { 1781 mhead = ptnet_tx_offload(ifp, mhead, false, 1782 vh); 1783 if (unlikely(!mhead)) { 1784 /* Packet dropped because errors 1785 * occurred while preparing the vnet 1786 * header. Let's go ahead with the next 1787 * packet. */ 1788 pq->stats.errors ++; 1789 drbr_advance(ifp, pq->bufring); 1790 continue; 1791 } 1792 } 1793 ND(1, "%s: [csum_flags %lX] vnet hdr: flags %x " 1794 "csum_start %u csum_ofs %u hdr_len = %u " 1795 "gso_size %u gso_type %x", __func__, 1796 mhead->m_pkthdr.csum_flags, vh->flags, 1797 vh->csum_start, vh->csum_offset, vh->hdr_len, 1798 vh->gso_size, vh->gso_type); 1799 1800 nmbuf += PTNET_HDR_SIZE; 1801 nmbuf_bytes += PTNET_HDR_SIZE; 1802 } 1803 1804 for (mf = mhead; mf; mf = mf->m_next) { 1805 uint8_t *mdata = mf->m_data; 1806 int mlen = mf->m_len; 1807 1808 for (;;) { 1809 int copy = NETMAP_BUF_SIZE(na) - nmbuf_bytes; 1810 1811 if (mlen < copy) { 1812 copy = mlen; 1813 } 1814 memcpy(nmbuf, mdata, copy); 1815 1816 mdata += copy; 1817 mlen -= copy; 1818 nmbuf += copy; 1819 nmbuf_bytes += copy; 1820 1821 if (!mlen) { 1822 break; 1823 } 1824 1825 slot->len = nmbuf_bytes; 1826 slot->flags = NS_MOREFRAG; 1827 1828 head = nm_next(head, lim); 1829 KASSERT(head != ring->tail, 1830 ("Unexpectedly run out of TX space")); 1831 slot = ring->slot + head; 1832 nmbuf = NMB(na, slot); 1833 nmbuf_bytes = 0; 1834 } 1835 } 1836 1837 /* Complete last slot and update head. */ 1838 slot->len = nmbuf_bytes; 1839 slot->flags = 0; 1840 head = nm_next(head, lim); 1841 1842 /* Consume the packet just processed. */ 1843 drbr_advance(ifp, pq->bufring); 1844 1845 /* Copy the packet to listeners. */ 1846 ETHER_BPF_MTAP(ifp, mhead); 1847 1848 pq->stats.packets ++; 1849 pq->stats.bytes += mhead->m_pkthdr.len; 1850 if (mhead->m_flags & M_MCAST) { 1851 pq->stats.mcasts ++; 1852 } 1853 1854 m_freem(mhead); 1855 1856 count ++; 1857 if (++batch_count == PTNET_TX_BATCH) { 1858 ptnet_ring_update(pq, kring, head, NAF_FORCE_RECLAIM); 1859 batch_count = 0; 1860 } 1861 } 1862 1863 if (batch_count) { 1864 ptnet_ring_update(pq, kring, head, NAF_FORCE_RECLAIM); 1865 } 1866 1867 if (count >= budget && may_resched) { 1868 DBG(RD(1, "out of budget: resched, %d mbufs pending\n", 1869 drbr_inuse(ifp, pq->bufring))); 1870 taskqueue_enqueue(pq->taskq, &pq->task); 1871 } 1872 1873 PTNET_Q_UNLOCK(pq); 1874 1875 return count; 1876 } 1877 1878 static int 1879 ptnet_transmit(if_t ifp, struct mbuf *m) 1880 { 1881 struct ptnet_softc *sc = if_getsoftc(ifp); 1882 struct ptnet_queue *pq; 1883 unsigned int queue_idx; 1884 int err; 1885 1886 DBG(device_printf(sc->dev, "transmit %p\n", m)); 1887 1888 /* Insert 802.1Q header if needed. */ 1889 if (m->m_flags & M_VLANTAG) { 1890 m = ether_vlanencap(m, m->m_pkthdr.ether_vtag); 1891 if (m == NULL) { 1892 return ENOBUFS; 1893 } 1894 m->m_flags &= ~M_VLANTAG; 1895 } 1896 1897 /* Get the flow-id if available. */ 1898 queue_idx = (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) ? 1899 m->m_pkthdr.flowid : curcpu; 1900 1901 if (unlikely(queue_idx >= sc->num_tx_rings)) { 1902 queue_idx %= sc->num_tx_rings; 1903 } 1904 1905 pq = sc->queues + queue_idx; 1906 1907 err = drbr_enqueue(ifp, pq->bufring, m); 1908 if (err) { 1909 /* ENOBUFS when the bufring is full */ 1910 RD(1, "%s: drbr_enqueue() failed %d\n", 1911 __func__, err); 1912 pq->stats.errors ++; 1913 return err; 1914 } 1915 1916 if (ifp->if_capenable & IFCAP_POLLING) { 1917 /* If polling is on, the transmit queues will be 1918 * drained by the poller. */ 1919 return 0; 1920 } 1921 1922 err = ptnet_drain_transmit_queue(pq, PTNET_TX_BUDGET, true); 1923 1924 return (err < 0) ? err : 0; 1925 } 1926 1927 static unsigned int 1928 ptnet_rx_discard(struct netmap_kring *kring, unsigned int head) 1929 { 1930 struct netmap_ring *ring = kring->ring; 1931 struct netmap_slot *slot = ring->slot + head; 1932 1933 for (;;) { 1934 head = nm_next(head, kring->nkr_num_slots - 1); 1935 if (!(slot->flags & NS_MOREFRAG) || head == ring->tail) { 1936 break; 1937 } 1938 slot = ring->slot + head; 1939 } 1940 1941 return head; 1942 } 1943 1944 static inline struct mbuf * 1945 ptnet_rx_slot(struct mbuf *mtail, uint8_t *nmbuf, unsigned int nmbuf_len) 1946 { 1947 uint8_t *mdata = mtod(mtail, uint8_t *) + mtail->m_len; 1948 1949 do { 1950 unsigned int copy; 1951 1952 if (mtail->m_len == MCLBYTES) { 1953 struct mbuf *mf; 1954 1955 mf = m_getcl(M_NOWAIT, MT_DATA, 0); 1956 if (unlikely(!mf)) { 1957 return NULL; 1958 } 1959 1960 mtail->m_next = mf; 1961 mtail = mf; 1962 mdata = mtod(mtail, uint8_t *); 1963 mtail->m_len = 0; 1964 } 1965 1966 copy = MCLBYTES - mtail->m_len; 1967 if (nmbuf_len < copy) { 1968 copy = nmbuf_len; 1969 } 1970 1971 memcpy(mdata, nmbuf, copy); 1972 1973 nmbuf += copy; 1974 nmbuf_len -= copy; 1975 mdata += copy; 1976 mtail->m_len += copy; 1977 } while (nmbuf_len); 1978 1979 return mtail; 1980 } 1981 1982 static int 1983 ptnet_rx_eof(struct ptnet_queue *pq, unsigned int budget, bool may_resched) 1984 { 1985 struct ptnet_softc *sc = pq->sc; 1986 bool have_vnet_hdr = sc->vnet_hdr_len; 1987 struct ptnet_ring *ptring = pq->ptring; 1988 struct netmap_adapter *na = &sc->ptna->dr.up; 1989 struct netmap_kring *kring = na->rx_rings + pq->kring_id; 1990 struct netmap_ring *ring = kring->ring; 1991 unsigned int const lim = kring->nkr_num_slots - 1; 1992 unsigned int head = ring->head; 1993 unsigned int batch_count = 0; 1994 if_t ifp = sc->ifp; 1995 unsigned int count = 0; 1996 1997 PTNET_Q_LOCK(pq); 1998 1999 if (unlikely(!(ifp->if_drv_flags & IFF_DRV_RUNNING))) { 2000 goto unlock; 2001 } 2002 2003 kring->nr_kflags &= ~NKR_PENDINTR; 2004 2005 while (count < budget) { 2006 unsigned int prev_head = head; 2007 struct mbuf *mhead, *mtail; 2008 struct virtio_net_hdr *vh; 2009 struct netmap_slot *slot; 2010 unsigned int nmbuf_len; 2011 uint8_t *nmbuf; 2012 host_sync: 2013 if (head == ring->tail) { 2014 /* We ran out of slot, let's see if the host has 2015 * added some, by reading hwcur and hwtail from 2016 * the CSB. */ 2017 ptnet_sync_tail(ptring, kring); 2018 2019 if (head == ring->tail) { 2020 /* Still no slots available. Reactivate 2021 * interrupts as they were disabled by the 2022 * host thread right before issuing the 2023 * last interrupt. */ 2024 ptring->guest_need_kick = 1; 2025 2026 /* Double-check. */ 2027 ptnet_sync_tail(ptring, kring); 2028 if (likely(head == ring->tail)) { 2029 break; 2030 } 2031 ptring->guest_need_kick = 0; 2032 } 2033 } 2034 2035 /* Initialize ring state variables, possibly grabbing the 2036 * virtio-net header. */ 2037 slot = ring->slot + head; 2038 nmbuf = NMB(na, slot); 2039 nmbuf_len = slot->len; 2040 2041 vh = (struct virtio_net_hdr *)nmbuf; 2042 if (have_vnet_hdr) { 2043 if (unlikely(nmbuf_len < PTNET_HDR_SIZE)) { 2044 /* There is no good reason why host should 2045 * put the header in multiple netmap slots. 2046 * If this is the case, discard. */ 2047 RD(1, "Fragmented vnet-hdr: dropping"); 2048 head = ptnet_rx_discard(kring, head); 2049 pq->stats.iqdrops ++; 2050 goto skip; 2051 } 2052 ND(1, "%s: vnet hdr: flags %x csum_start %u " 2053 "csum_ofs %u hdr_len = %u gso_size %u " 2054 "gso_type %x", __func__, vh->flags, 2055 vh->csum_start, vh->csum_offset, vh->hdr_len, 2056 vh->gso_size, vh->gso_type); 2057 nmbuf += PTNET_HDR_SIZE; 2058 nmbuf_len -= PTNET_HDR_SIZE; 2059 } 2060 2061 /* Allocate the head of a new mbuf chain. 2062 * We use m_getcl() to allocate an mbuf with standard cluster 2063 * size (MCLBYTES). In the future we could use m_getjcl() 2064 * to choose different sizes. */ 2065 mhead = mtail = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 2066 if (unlikely(mhead == NULL)) { 2067 device_printf(sc->dev, "%s: failed to allocate mbuf " 2068 "head\n", __func__); 2069 pq->stats.errors ++; 2070 break; 2071 } 2072 2073 /* Initialize the mbuf state variables. */ 2074 mhead->m_pkthdr.len = nmbuf_len; 2075 mtail->m_len = 0; 2076 2077 /* Scan all the netmap slots containing the current packet. */ 2078 for (;;) { 2079 DBG(device_printf(sc->dev, "%s: h %u t %u rcv frag " 2080 "len %u, flags %u\n", __func__, 2081 head, ring->tail, slot->len, 2082 slot->flags)); 2083 2084 mtail = ptnet_rx_slot(mtail, nmbuf, nmbuf_len); 2085 if (unlikely(!mtail)) { 2086 /* Ouch. We ran out of memory while processing 2087 * a packet. We have to restore the previous 2088 * head position, free the mbuf chain, and 2089 * schedule the taskqueue to give the packet 2090 * another chance. */ 2091 device_printf(sc->dev, "%s: failed to allocate" 2092 " mbuf frag, reset head %u --> %u\n", 2093 __func__, head, prev_head); 2094 head = prev_head; 2095 m_freem(mhead); 2096 pq->stats.errors ++; 2097 if (may_resched) { 2098 taskqueue_enqueue(pq->taskq, 2099 &pq->task); 2100 } 2101 goto escape; 2102 } 2103 2104 /* We have to increment head irrespective of the 2105 * NS_MOREFRAG being set or not. */ 2106 head = nm_next(head, lim); 2107 2108 if (!(slot->flags & NS_MOREFRAG)) { 2109 break; 2110 } 2111 2112 if (unlikely(head == ring->tail)) { 2113 /* The very last slot prepared by the host has 2114 * the NS_MOREFRAG set. Drop it and continue 2115 * the outer cycle (to do the double-check). */ 2116 RD(1, "Incomplete packet: dropping"); 2117 m_freem(mhead); 2118 pq->stats.iqdrops ++; 2119 goto host_sync; 2120 } 2121 2122 slot = ring->slot + head; 2123 nmbuf = NMB(na, slot); 2124 nmbuf_len = slot->len; 2125 mhead->m_pkthdr.len += nmbuf_len; 2126 } 2127 2128 mhead->m_pkthdr.rcvif = ifp; 2129 mhead->m_pkthdr.csum_flags = 0; 2130 2131 /* Store the queue idx in the packet header. */ 2132 mhead->m_pkthdr.flowid = pq->kring_id; 2133 M_HASHTYPE_SET(mhead, M_HASHTYPE_OPAQUE); 2134 2135 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) { 2136 struct ether_header *eh; 2137 2138 eh = mtod(mhead, struct ether_header *); 2139 if (eh->ether_type == htons(ETHERTYPE_VLAN)) { 2140 ptnet_vlan_tag_remove(mhead); 2141 /* 2142 * With the 802.1Q header removed, update the 2143 * checksum starting location accordingly. 2144 */ 2145 if (vh->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) 2146 vh->csum_start -= ETHER_VLAN_ENCAP_LEN; 2147 } 2148 } 2149 2150 if (have_vnet_hdr && (vh->flags & (VIRTIO_NET_HDR_F_NEEDS_CSUM 2151 | VIRTIO_NET_HDR_F_DATA_VALID))) { 2152 if (unlikely(ptnet_rx_csum(mhead, vh))) { 2153 m_freem(mhead); 2154 RD(1, "Csum offload error: dropping"); 2155 pq->stats.iqdrops ++; 2156 goto skip; 2157 } 2158 } 2159 2160 pq->stats.packets ++; 2161 pq->stats.bytes += mhead->m_pkthdr.len; 2162 2163 PTNET_Q_UNLOCK(pq); 2164 (*ifp->if_input)(ifp, mhead); 2165 PTNET_Q_LOCK(pq); 2166 2167 if (unlikely(!(ifp->if_drv_flags & IFF_DRV_RUNNING))) { 2168 /* The interface has gone down while we didn't 2169 * have the lock. Stop any processing and exit. */ 2170 goto unlock; 2171 } 2172 skip: 2173 count ++; 2174 if (++batch_count == PTNET_RX_BATCH) { 2175 /* Some packets have been pushed to the network stack. 2176 * We need to update the CSB to tell the host about the new 2177 * ring->cur and ring->head (RX buffer refill). */ 2178 ptnet_ring_update(pq, kring, head, NAF_FORCE_READ); 2179 batch_count = 0; 2180 } 2181 } 2182 escape: 2183 if (batch_count) { 2184 ptnet_ring_update(pq, kring, head, NAF_FORCE_READ); 2185 2186 } 2187 2188 if (count >= budget && may_resched) { 2189 /* If we ran out of budget or the double-check found new 2190 * slots to process, schedule the taskqueue. */ 2191 DBG(RD(1, "out of budget: resched h %u t %u\n", 2192 head, ring->tail)); 2193 taskqueue_enqueue(pq->taskq, &pq->task); 2194 } 2195 unlock: 2196 PTNET_Q_UNLOCK(pq); 2197 2198 return count; 2199 } 2200 2201 static void 2202 ptnet_rx_task(void *context, int pending) 2203 { 2204 struct ptnet_queue *pq = context; 2205 2206 DBG(RD(1, "%s: pq #%u\n", __func__, pq->kring_id)); 2207 ptnet_rx_eof(pq, PTNET_RX_BUDGET, true); 2208 } 2209 2210 static void 2211 ptnet_tx_task(void *context, int pending) 2212 { 2213 struct ptnet_queue *pq = context; 2214 2215 DBG(RD(1, "%s: pq #%u\n", __func__, pq->kring_id)); 2216 ptnet_drain_transmit_queue(pq, PTNET_TX_BUDGET, true); 2217 } 2218 2219 #ifdef DEVICE_POLLING 2220 /* We don't need to handle differently POLL_AND_CHECK_STATUS and 2221 * POLL_ONLY, since we don't have an Interrupt Status Register. */ 2222 static int 2223 ptnet_poll(if_t ifp, enum poll_cmd cmd, int budget) 2224 { 2225 struct ptnet_softc *sc = if_getsoftc(ifp); 2226 unsigned int queue_budget; 2227 unsigned int count = 0; 2228 bool borrow = false; 2229 int i; 2230 2231 KASSERT(sc->num_rings > 0, ("Found no queues in while polling ptnet")); 2232 queue_budget = MAX(budget / sc->num_rings, 1); 2233 RD(1, "Per-queue budget is %d", queue_budget); 2234 2235 while (budget) { 2236 unsigned int rcnt = 0; 2237 2238 for (i = 0; i < sc->num_rings; i++) { 2239 struct ptnet_queue *pq = sc->queues + i; 2240 2241 if (borrow) { 2242 queue_budget = MIN(queue_budget, budget); 2243 if (queue_budget == 0) { 2244 break; 2245 } 2246 } 2247 2248 if (i < sc->num_tx_rings) { 2249 rcnt += ptnet_drain_transmit_queue(pq, 2250 queue_budget, false); 2251 } else { 2252 rcnt += ptnet_rx_eof(pq, queue_budget, 2253 false); 2254 } 2255 } 2256 2257 if (!rcnt) { 2258 /* A scan of the queues gave no result, we can 2259 * stop here. */ 2260 break; 2261 } 2262 2263 if (rcnt > budget) { 2264 /* This may happen when initial budget < sc->num_rings, 2265 * since one packet budget is given to each queue 2266 * anyway. Just pretend we didn't eat "so much". */ 2267 rcnt = budget; 2268 } 2269 count += rcnt; 2270 budget -= rcnt; 2271 borrow = true; 2272 } 2273 2274 2275 return count; 2276 } 2277 #endif /* DEVICE_POLLING */ 2278