1 /* 2 * Copyright (C) 2013 Universita` di Pisa. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 16 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 23 * SUCH DAMAGE. 24 */ 25 26 /* 27 * This module implements netmap support on top of standard, 28 * unmodified device drivers. 29 * 30 * A NIOCREGIF request is handled here if the device does not 31 * have native support. TX and RX rings are emulated as follows: 32 * 33 * NIOCREGIF 34 * We preallocate a block of TX mbufs (roughly as many as 35 * tx descriptors; the number is not critical) to speed up 36 * operation during transmissions. The refcount on most of 37 * these buffers is artificially bumped up so we can recycle 38 * them more easily. Also, the destructor is intercepted 39 * so we use it as an interrupt notification to wake up 40 * processes blocked on a poll(). 41 * 42 * For each receive ring we allocate one "struct mbq" 43 * (an mbuf tailq plus a spinlock). We intercept packets 44 * (through if_input) 45 * on the receive path and put them in the mbq from which 46 * netmap receive routines can grab them. 47 * 48 * TX: 49 * in the generic_txsync() routine, netmap buffers are copied 50 * (or linked, in a future) to the preallocated mbufs 51 * and pushed to the transmit queue. Some of these mbufs 52 * (those with NS_REPORT, or otherwise every half ring) 53 * have the refcount=1, others have refcount=2. 54 * When the destructor is invoked, we take that as 55 * a notification that all mbufs up to that one in 56 * the specific ring have been completed, and generate 57 * the equivalent of a transmit interrupt. 58 * 59 * RX: 60 * 61 */ 62 63 /* __FBSDID("$FreeBSD: head/sys/dev/netmap/netmap.c 257666 2013-11-05 01:06:22Z luigi $"); */ 64 65 #include <sys/types.h> 66 #include <sys/errno.h> 67 #include <sys/malloc.h> 68 #include <sys/lock.h> /* PROT_EXEC */ 69 #include <sys/socket.h> /* sockaddrs */ 70 #include <sys/event.h> 71 #include <net/if.h> 72 #include <net/if_var.h> 73 #include <sys/bus.h> /* bus_dmamap_* in netmap_kern.h */ 74 75 // XXX temporary - D() defined here 76 #include <net/netmap.h> 77 #include "netmap_kern.h" 78 #include "netmap_mem2.h" 79 80 #define rtnl_lock() D("rtnl_lock called"); 81 #define rtnl_unlock() D("rtnl_lock called"); 82 #define MBUF_TXQ(m) ((m)->m_pkthdr.hash) 83 #define smp_mb() 84 85 /* 86 * mbuf wrappers 87 */ 88 89 /* 90 * we allocate an EXT_PACKET 91 */ 92 #define netmap_get_mbuf(len) m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR) 93 94 /* mbuf destructor, also need to change the type to EXT_EXTREF, 95 * add an M_NOFREE flag, and then clear the flag and 96 * chain into uma_zfree(zone_pack, mf) 97 * (or reinstall the buffer ?) 98 */ 99 #define SET_MBUF_DESTRUCTOR(m, fn) do { \ 100 (m)->m_ext.ext_free = (void *)fn; \ 101 /* (m)->m_ext.ext_type = EXT_EXTREF; */ \ 102 } while (0) 103 104 105 #define GET_MBUF_REFCNT(m) ((m)->m_ext.ref_cnt ? *(m)->m_ext.ref_cnt : -1) 106 107 /* ======================== usage stats =========================== */ 108 109 #ifdef RATE 110 #define IFRATE(x) x 111 struct rate_stats { 112 unsigned long txpkt; 113 unsigned long txsync; 114 unsigned long txirq; 115 unsigned long rxpkt; 116 unsigned long rxirq; 117 unsigned long rxsync; 118 }; 119 120 struct rate_context { 121 unsigned refcount; 122 struct timer_list timer; 123 struct rate_stats new; 124 struct rate_stats old; 125 }; 126 127 #define RATE_PRINTK(_NAME_) \ 128 printk( #_NAME_ " = %lu Hz\n", (cur._NAME_ - ctx->old._NAME_)/RATE_PERIOD); 129 #define RATE_PERIOD 2 130 static void rate_callback(unsigned long arg) 131 { 132 struct rate_context * ctx = (struct rate_context *)arg; 133 struct rate_stats cur = ctx->new; 134 int r; 135 136 RATE_PRINTK(txpkt); 137 RATE_PRINTK(txsync); 138 RATE_PRINTK(txirq); 139 RATE_PRINTK(rxpkt); 140 RATE_PRINTK(rxsync); 141 RATE_PRINTK(rxirq); 142 printk("\n"); 143 144 ctx->old = cur; 145 r = mod_timer(&ctx->timer, jiffies + 146 msecs_to_jiffies(RATE_PERIOD * 1000)); 147 if (unlikely(r)) 148 D("[v1000] Error: mod_timer()"); 149 } 150 151 static struct rate_context rate_ctx; 152 153 #else /* !RATE */ 154 #define IFRATE(x) 155 #endif /* !RATE */ 156 157 158 /* =============== GENERIC NETMAP ADAPTER SUPPORT ================= */ 159 #define GENERIC_BUF_SIZE netmap_buf_size /* Size of the mbufs in the Tx pool. */ 160 161 /* 162 * Wrapper used by the generic adapter layer to notify 163 * the poller threads. Differently from netmap_rx_irq(), we check 164 * only IFCAP_NETMAP instead of NAF_NATIVE_ON to enable the irq. 165 */ 166 static int 167 netmap_generic_irq(struct ifnet *ifp, u_int q, u_int *work_done) 168 { 169 if (unlikely(!(ifp->if_capenable & IFCAP_NETMAP))) 170 return 0; 171 172 return netmap_common_irq(ifp, q, work_done); 173 } 174 175 176 /* Enable/disable netmap mode for a generic network interface. */ 177 int generic_netmap_register(struct netmap_adapter *na, int enable) 178 { 179 struct ifnet *ifp = na->ifp; 180 struct netmap_generic_adapter *gna = (struct netmap_generic_adapter *)na; 181 struct mbuf *m; 182 int error; 183 int i, r; 184 185 if (!na) 186 return EINVAL; 187 188 #ifdef REG_RESET 189 error = ifp->netdev_ops->ndo_stop(ifp); 190 if (error) { 191 return error; 192 } 193 #endif /* REG_RESET */ 194 195 if (enable) { /* Enable netmap mode. */ 196 /* Initialize the rx queue, as generic_rx_handler() can 197 * be called as soon as netmap_catch_rx() returns. 198 */ 199 for (r=0; r<na->num_rx_rings; r++) { 200 mbq_safe_init(&na->rx_rings[r].rx_queue); 201 na->rx_rings[r].nr_ntc = 0; 202 } 203 204 /* Init the mitigation timer. */ 205 netmap_mitigation_init(gna); 206 207 /* 208 * Preallocate packet buffers for the tx rings. 209 */ 210 for (r=0; r<na->num_tx_rings; r++) { 211 na->tx_rings[r].nr_ntc = 0; 212 na->tx_rings[r].tx_pool = kmalloc(na->num_tx_desc * sizeof(struct mbuf *), 213 M_DEVBUF, M_NOWAIT | M_ZERO); 214 if (!na->tx_rings[r].tx_pool) { 215 D("tx_pool allocation failed"); 216 error = ENOMEM; 217 goto free_tx_pool; 218 } 219 for (i=0; i<na->num_tx_desc; i++) { 220 m = netmap_get_mbuf(GENERIC_BUF_SIZE); 221 if (!m) { 222 D("tx_pool[%d] allocation failed", i); 223 error = ENOMEM; 224 goto free_mbufs; 225 } 226 na->tx_rings[r].tx_pool[i] = m; 227 } 228 } 229 rtnl_lock(); 230 /* Prepare to intercept incoming traffic. */ 231 error = netmap_catch_rx(na, 1); 232 if (error) { 233 D("netdev_rx_handler_register() failed"); 234 goto register_handler; 235 } 236 ifp->if_capenable |= IFCAP_NETMAP; 237 238 /* Make netmap control the packet steering. */ 239 netmap_catch_packet_steering(gna, 1); 240 241 rtnl_unlock(); 242 243 #ifdef RATE 244 if (rate_ctx.refcount == 0) { 245 D("setup_timer()"); 246 memset(&rate_ctx, 0, sizeof(rate_ctx)); 247 setup_timer(&rate_ctx.timer, &rate_callback, (unsigned long)&rate_ctx); 248 if (mod_timer(&rate_ctx.timer, jiffies + msecs_to_jiffies(1500))) { 249 D("Error: mod_timer()"); 250 } 251 } 252 rate_ctx.refcount++; 253 #endif /* RATE */ 254 255 } else { /* Disable netmap mode. */ 256 rtnl_lock(); 257 258 ifp->if_capenable &= ~IFCAP_NETMAP; 259 260 /* Release packet steering control. */ 261 netmap_catch_packet_steering(gna, 0); 262 263 /* Do not intercept packets on the rx path. */ 264 netmap_catch_rx(na, 0); 265 266 rtnl_unlock(); 267 268 /* Free the mbufs going to the netmap rings */ 269 for (r=0; r<na->num_rx_rings; r++) { 270 mbq_safe_purge(&na->rx_rings[r].rx_queue); 271 mbq_safe_destroy(&na->rx_rings[r].rx_queue); 272 } 273 274 netmap_mitigation_cleanup(gna); 275 276 for (r=0; r<na->num_tx_rings; r++) { 277 for (i=0; i<na->num_tx_desc; i++) { 278 m_freem(na->tx_rings[r].tx_pool[i]); 279 } 280 kfree(na->tx_rings[r].tx_pool, M_DEVBUF); 281 } 282 283 #ifdef RATE 284 if (--rate_ctx.refcount == 0) { 285 D("del_timer()"); 286 del_timer(&rate_ctx.timer); 287 } 288 #endif 289 } 290 291 #ifdef REG_RESET 292 error = ifp->netdev_ops->ndo_open(ifp); 293 if (error) { 294 goto alloc_tx_pool; 295 } 296 #endif 297 298 return 0; 299 300 register_handler: 301 rtnl_unlock(); 302 free_tx_pool: 303 r--; 304 i = na->num_tx_desc; /* Useless, but just to stay safe. */ 305 free_mbufs: 306 i--; 307 for (; r>=0; r--) { 308 for (; i>=0; i--) { 309 m_freem(na->tx_rings[r].tx_pool[i]); 310 } 311 kfree(na->tx_rings[r].tx_pool, M_DEVBUF); 312 i = na->num_tx_desc - 1; 313 } 314 315 return error; 316 } 317 318 /* 319 * Callback invoked when the device driver frees an mbuf used 320 * by netmap to transmit a packet. This usually happens when 321 * the NIC notifies the driver that transmission is completed. 322 */ 323 static void 324 generic_mbuf_destructor(struct mbuf *m) 325 { 326 if (netmap_verbose) 327 D("Tx irq (%p) queue %d", m, MBUF_TXQ(m)); 328 netmap_generic_irq(MBUF_IFP(m), MBUF_TXQ(m), NULL); 329 #if 0 330 m->m_ext.ext_type = EXT_PACKET; 331 #endif 332 m->m_ext.ext_free = NULL; 333 #if 0 334 if (*(m->m_ext.ref_cnt) == 0) 335 *(m->m_ext.ref_cnt) = 1; 336 uma_zfree(zone_pack, m); 337 #endif 338 IFRATE(rate_ctx.new.txirq++); 339 } 340 341 /* Record completed transmissions and update hwavail. 342 * 343 * nr_ntc is the oldest tx buffer not yet completed 344 * (same as nr_hwavail + nr_hwcur + 1), 345 * nr_hwcur is the first unsent buffer. 346 * When cleaning, we try to recover buffers between nr_ntc and nr_hwcur. 347 */ 348 static int 349 generic_netmap_tx_clean(struct netmap_kring *kring) 350 { 351 u_int num_slots = kring->nkr_num_slots; 352 u_int ntc = kring->nr_ntc; 353 u_int hwcur = kring->nr_hwcur; 354 u_int n = 0; 355 struct mbuf **tx_pool = kring->tx_pool; 356 357 while (ntc != hwcur) { /* buffers not completed */ 358 struct mbuf *m = tx_pool[ntc]; 359 360 if (unlikely(m == NULL)) { 361 /* try to replenish the entry */ 362 tx_pool[ntc] = m = netmap_get_mbuf(GENERIC_BUF_SIZE); 363 if (unlikely(m == NULL)) { 364 D("mbuf allocation failed, XXX error"); 365 // XXX how do we proceed ? break ? 366 return -ENOMEM; 367 } 368 #if 0 369 } else if (GET_MBUF_REFCNT(m) != 1) { 370 break; /* This mbuf is still busy: its refcnt is 2. */ 371 #endif 372 } 373 if (unlikely(++ntc == num_slots)) { 374 ntc = 0; 375 } 376 n++; 377 } 378 kring->nr_ntc = ntc; 379 kring->nr_hwavail += n; 380 ND("tx completed [%d] -> hwavail %d", n, kring->nr_hwavail); 381 382 return n; 383 } 384 385 386 /* 387 * We have pending packets in the driver between nr_ntc and j. 388 * Compute a position in the middle, to be used to generate 389 * a notification. 390 */ 391 static inline u_int 392 generic_tx_event_middle(struct netmap_kring *kring, u_int hwcur) 393 { 394 u_int n = kring->nkr_num_slots; 395 u_int ntc = kring->nr_ntc; 396 u_int e; 397 398 if (hwcur >= ntc) { 399 e = (hwcur + ntc) / 2; 400 } else { /* wrap around */ 401 e = (hwcur + n + ntc) / 2; 402 if (e >= n) { 403 e -= n; 404 } 405 } 406 407 if (unlikely(e >= n)) { 408 D("This cannot happen"); 409 e = 0; 410 } 411 412 return e; 413 } 414 415 /* 416 * We have pending packets in the driver between nr_ntc and hwcur. 417 * Schedule a notification approximately in the middle of the two. 418 * There is a race but this is only called within txsync which does 419 * a double check. 420 */ 421 static void 422 generic_set_tx_event(struct netmap_kring *kring, u_int hwcur) 423 { 424 struct mbuf *m; 425 u_int e; 426 427 if (kring->nr_ntc == hwcur) { 428 return; 429 } 430 e = generic_tx_event_middle(kring, hwcur); 431 432 m = kring->tx_pool[e]; 433 if (m == NULL) { 434 /* This can happen if there is already an event on the netmap 435 slot 'e': There is nothing to do. */ 436 return; 437 } 438 ND("Event at %d mbuf %p refcnt %d", e, m, GET_MBUF_REFCNT(m)); 439 kring->tx_pool[e] = NULL; 440 SET_MBUF_DESTRUCTOR(m, generic_mbuf_destructor); 441 442 // XXX wmb() ? 443 /* Decrement the refcount an free it if we have the last one. */ 444 m_freem(m); 445 smp_mb(); 446 } 447 448 449 /* 450 * generic_netmap_txsync() transforms netmap buffers into mbufs 451 * and passes them to the standard device driver 452 * (ndo_start_xmit() or ifp->if_transmit() ). 453 * On linux this is not done directly, but using dev_queue_xmit(), 454 * since it implements the TX flow control (and takes some locks). 455 */ 456 static int 457 generic_netmap_txsync(struct netmap_adapter *na, u_int ring_nr, int flags) 458 { 459 struct ifnet *ifp = na->ifp; 460 struct netmap_kring *kring = &na->tx_rings[ring_nr]; 461 struct netmap_ring *ring = kring->ring; 462 u_int j, k, num_slots = kring->nkr_num_slots; 463 int new_slots, ntx; 464 465 IFRATE(rate_ctx.new.txsync++); 466 467 // TODO: handle the case of mbuf allocation failure 468 /* first, reclaim completed buffers */ 469 generic_netmap_tx_clean(kring); 470 471 /* Take a copy of ring->cur now, and never read it again. */ 472 k = ring->cur; 473 if (unlikely(k >= num_slots)) { 474 return netmap_ring_reinit(kring); 475 } 476 477 rmb(); 478 j = kring->nr_hwcur; 479 /* 480 * 'new_slots' counts how many new slots have been added: 481 * everything from hwcur to cur, excluding reserved ones, if any. 482 * nr_hwreserved start from hwcur and counts how many slots were 483 * not sent to the NIC from the previous round. 484 */ 485 new_slots = k - j - kring->nr_hwreserved; 486 if (new_slots < 0) { 487 new_slots += num_slots; 488 } 489 ntx = 0; 490 if (j != k) { 491 /* Process new packets to send: 492 * j is the current index in the netmap ring. 493 */ 494 while (j != k) { 495 struct netmap_slot *slot = &ring->slot[j]; /* Current slot in the netmap ring */ 496 void *addr = NMB(slot); 497 u_int len = slot->len; 498 struct mbuf *m; 499 int tx_ret; 500 501 if (unlikely(addr == netmap_buffer_base || len > NETMAP_BUF_SIZE)) { 502 return netmap_ring_reinit(kring); 503 } 504 /* Tale a mbuf from the tx pool and copy in the user packet. */ 505 m = kring->tx_pool[j]; 506 if (unlikely(!m)) { 507 RD(5, "This should never happen"); 508 kring->tx_pool[j] = m = netmap_get_mbuf(GENERIC_BUF_SIZE); 509 if (unlikely(m == NULL)) { 510 D("mbuf allocation failed"); 511 break; 512 } 513 } 514 /* XXX we should ask notifications when NS_REPORT is set, 515 * or roughly every half frame. We can optimize this 516 * by lazily requesting notifications only when a 517 * transmission fails. Probably the best way is to 518 * break on failures and set notifications when 519 * ring->avail == 0 || j != k 520 */ 521 tx_ret = generic_xmit_frame(ifp, m, addr, len, ring_nr); 522 if (unlikely(tx_ret)) { 523 RD(5, "start_xmit failed: err %d [%u,%u,%u,%u]", 524 tx_ret, kring->nr_ntc, j, k, kring->nr_hwavail); 525 /* 526 * No room for this mbuf in the device driver. 527 * Request a notification FOR A PREVIOUS MBUF, 528 * then call generic_netmap_tx_clean(kring) to do the 529 * double check and see if we can free more buffers. 530 * If there is space continue, else break; 531 * NOTE: the double check is necessary if the problem 532 * occurs in the txsync call after selrecord(). 533 * Also, we need some way to tell the caller that not 534 * all buffers were queued onto the device (this was 535 * not a problem with native netmap driver where space 536 * is preallocated). The bridge has a similar problem 537 * and we solve it there by dropping the excess packets. 538 */ 539 generic_set_tx_event(kring, j); 540 if (generic_netmap_tx_clean(kring)) { /* space now available */ 541 continue; 542 } else { 543 break; 544 } 545 } 546 slot->flags &= ~(NS_REPORT | NS_BUF_CHANGED); 547 if (unlikely(++j == num_slots)) 548 j = 0; 549 ntx++; 550 } 551 552 /* Update hwcur to the next slot to transmit. */ 553 kring->nr_hwcur = j; 554 555 /* 556 * Report all new slots as unavailable, even those not sent. 557 * We account for them with with hwreserved, so that 558 * nr_hwreserved =:= cur - nr_hwcur 559 */ 560 kring->nr_hwavail -= new_slots; 561 kring->nr_hwreserved = k - j; 562 if (kring->nr_hwreserved < 0) { 563 kring->nr_hwreserved += num_slots; 564 } 565 566 IFRATE(rate_ctx.new.txpkt += ntx); 567 568 if (!kring->nr_hwavail) { 569 /* No more available slots? Set a notification event 570 * on a netmap slot that will be cleaned in the future. 571 * No doublecheck is performed, since txsync() will be 572 * called twice by netmap_poll(). 573 */ 574 generic_set_tx_event(kring, j); 575 } 576 ND("tx #%d, hwavail = %d", n, kring->nr_hwavail); 577 } 578 579 /* Synchronize the user's view to the kernel view. */ 580 ring->avail = kring->nr_hwavail; 581 ring->reserved = kring->nr_hwreserved; 582 583 return 0; 584 } 585 586 /* 587 * This handler is registered (through netmap_catch_rx()) 588 * within the attached network interface 589 * in the RX subsystem, so that every mbuf passed up by 590 * the driver can be stolen to the network stack. 591 * Stolen packets are put in a queue where the 592 * generic_netmap_rxsync() callback can extract them. 593 */ 594 void generic_rx_handler(struct ifnet *ifp, struct mbuf *m) 595 { 596 struct netmap_adapter *na = NA(ifp); 597 struct netmap_generic_adapter *gna = (struct netmap_generic_adapter *)na; 598 u_int work_done; 599 u_int rr = 0; // receive ring number 600 601 ND("called"); 602 /* limit the size of the queue */ 603 if (unlikely(mbq_len(&na->rx_rings[rr].rx_queue) > 1024)) { 604 m_freem(m); 605 } else { 606 mbq_safe_enqueue(&na->rx_rings[rr].rx_queue, m); 607 } 608 609 if (netmap_generic_mit < 32768) { 610 /* no rx mitigation, pass notification up */ 611 netmap_generic_irq(na->ifp, rr, &work_done); 612 IFRATE(rate_ctx.new.rxirq++); 613 } else { 614 /* same as send combining, filter notification if there is a 615 * pending timer, otherwise pass it up and start a timer. 616 */ 617 if (likely(netmap_mitigation_active(gna))) { 618 /* Record that there is some pending work. */ 619 gna->mit_pending = 1; 620 } else { 621 netmap_generic_irq(na->ifp, rr, &work_done); 622 IFRATE(rate_ctx.new.rxirq++); 623 netmap_mitigation_start(gna); 624 } 625 } 626 } 627 628 /* 629 * generic_netmap_rxsync() extracts mbufs from the queue filled by 630 * generic_netmap_rx_handler() and puts their content in the netmap 631 * receive ring. 632 * Access must be protected because the rx handler is asynchronous, 633 */ 634 static int 635 generic_netmap_rxsync(struct netmap_adapter *na, u_int ring_nr, int flags) 636 { 637 struct netmap_kring *kring = &na->rx_rings[ring_nr]; 638 struct netmap_ring *ring = kring->ring; 639 u_int j, n, lim = kring->nkr_num_slots - 1; 640 int force_update = (flags & NAF_FORCE_READ) || kring->nr_kflags & NKR_PENDINTR; 641 u_int k, resvd = ring->reserved; 642 643 if (ring->cur > lim) 644 return netmap_ring_reinit(kring); 645 646 /* Import newly received packets into the netmap ring. */ 647 if (netmap_no_pendintr || force_update) { 648 uint16_t slot_flags = kring->nkr_slot_flags; 649 struct mbuf *m; 650 651 n = 0; 652 j = kring->nr_ntc; /* first empty slot in the receive ring */ 653 /* extract buffers from the rx queue, stop at most one 654 * slot before nr_hwcur (index k) 655 */ 656 k = (kring->nr_hwcur) ? kring->nr_hwcur-1 : lim; 657 while (j != k) { 658 int len; 659 void *addr = NMB(&ring->slot[j]); 660 661 if (addr == netmap_buffer_base) { /* Bad buffer */ 662 return netmap_ring_reinit(kring); 663 } 664 /* 665 * Call the locked version of the function. 666 * XXX Ideally we could grab a batch of mbufs at once, 667 * by changing rx_queue into a ring. 668 */ 669 m = mbq_safe_dequeue(&kring->rx_queue); 670 if (!m) 671 break; 672 len = MBUF_LEN(m); 673 m_copydata(m, 0, len, addr); 674 ring->slot[j].len = len; 675 ring->slot[j].flags = slot_flags; 676 m_freem(m); 677 if (unlikely(j++ == lim)) 678 j = 0; 679 n++; 680 } 681 if (n) { 682 kring->nr_ntc = j; 683 kring->nr_hwavail += n; 684 IFRATE(rate_ctx.new.rxpkt += n); 685 } 686 kring->nr_kflags &= ~NKR_PENDINTR; 687 } 688 689 // XXX should we invert the order ? 690 /* Skip past packets that userspace has released */ 691 j = kring->nr_hwcur; 692 k = ring->cur; 693 if (resvd > 0) { 694 if (resvd + ring->avail >= lim + 1) { 695 D("XXX invalid reserve/avail %d %d", resvd, ring->avail); 696 ring->reserved = resvd = 0; // XXX panic... 697 } 698 k = (k >= resvd) ? k - resvd : k + lim + 1 - resvd; 699 } 700 if (j != k) { 701 /* Userspace has released some packets. */ 702 for (n = 0; j != k; n++) { 703 struct netmap_slot *slot = &ring->slot[j]; 704 705 slot->flags &= ~NS_BUF_CHANGED; 706 if (unlikely(j++ == lim)) 707 j = 0; 708 } 709 kring->nr_hwavail -= n; 710 kring->nr_hwcur = k; 711 } 712 /* Tell userspace that there are new packets. */ 713 ring->avail = kring->nr_hwavail - resvd; 714 IFRATE(rate_ctx.new.rxsync++); 715 716 return 0; 717 } 718 719 static void 720 generic_netmap_dtor(struct netmap_adapter *na) 721 { 722 struct ifnet *ifp = na->ifp; 723 struct netmap_generic_adapter *gna = (struct netmap_generic_adapter*)na; 724 struct netmap_adapter *prev_na = gna->prev; 725 726 if (prev_na != NULL) { 727 D("Released generic NA %p", gna); 728 #if 0 729 if_rele(na->ifp); 730 #endif 731 netmap_adapter_put(prev_na); 732 } 733 if (ifp != NULL) { 734 WNA(ifp) = prev_na; 735 D("Restored native NA %p", prev_na); 736 na->ifp = NULL; 737 } 738 } 739 740 /* 741 * generic_netmap_attach() makes it possible to use netmap on 742 * a device without native netmap support. 743 * This is less performant than native support but potentially 744 * faster than raw sockets or similar schemes. 745 * 746 * In this "emulated" mode, netmap rings do not necessarily 747 * have the same size as those in the NIC. We use a default 748 * value and possibly override it if the OS has ways to fetch the 749 * actual configuration. 750 */ 751 int 752 generic_netmap_attach(struct ifnet *ifp) 753 { 754 struct netmap_adapter *na; 755 struct netmap_generic_adapter *gna; 756 int retval; 757 u_int num_tx_desc, num_rx_desc; 758 759 num_tx_desc = num_rx_desc = netmap_generic_ringsize; /* starting point */ 760 761 generic_find_num_desc(ifp, &num_tx_desc, &num_rx_desc); 762 ND("Netmap ring size: TX = %d, RX = %d", num_tx_desc, num_rx_desc); 763 764 gna = kmalloc(sizeof(*gna), M_DEVBUF, M_NOWAIT | M_ZERO); 765 if (gna == NULL) { 766 D("no memory on attach, give up"); 767 return ENOMEM; 768 } 769 na = (struct netmap_adapter *)gna; 770 na->ifp = ifp; 771 na->num_tx_desc = num_tx_desc; 772 na->num_rx_desc = num_rx_desc; 773 na->nm_register = &generic_netmap_register; 774 na->nm_txsync = &generic_netmap_txsync; 775 na->nm_rxsync = &generic_netmap_rxsync; 776 na->nm_dtor = &generic_netmap_dtor; 777 /* when using generic, IFCAP_NETMAP is set so we force 778 * NAF_SKIP_INTR to use the regular interrupt handler 779 */ 780 na->na_flags = NAF_SKIP_INTR; 781 782 ND("[GNA] num_tx_queues(%d), real_num_tx_queues(%d), len(%lu)", 783 ifp->num_tx_queues, ifp->real_num_tx_queues, 784 ifp->tx_queue_len); 785 ND("[GNA] num_rx_queues(%d), real_num_rx_queues(%d)", 786 ifp->num_rx_queues, ifp->real_num_rx_queues); 787 788 generic_find_num_queues(ifp, &na->num_tx_rings, &na->num_rx_rings); 789 790 retval = netmap_attach_common(na); 791 if (retval) { 792 kfree(gna, M_DEVBUF); 793 } 794 795 return retval; 796 } 797