1*fb578518SFranco Fichtner /* 2*fb578518SFranco Fichtner * Copyright (C) 2013 Universita` di Pisa. All rights reserved. 3*fb578518SFranco Fichtner * 4*fb578518SFranco Fichtner * Redistribution and use in source and binary forms, with or without 5*fb578518SFranco Fichtner * modification, are permitted provided that the following conditions 6*fb578518SFranco Fichtner * are met: 7*fb578518SFranco Fichtner * 1. Redistributions of source code must retain the above copyright 8*fb578518SFranco Fichtner * notice, this list of conditions and the following disclaimer. 9*fb578518SFranco Fichtner * 2. Redistributions in binary form must reproduce the above copyright 10*fb578518SFranco Fichtner * notice, this list of conditions and the following disclaimer in the 11*fb578518SFranco Fichtner * documentation and/or other materials provided with the distribution. 12*fb578518SFranco Fichtner * 13*fb578518SFranco Fichtner * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 14*fb578518SFranco Fichtner * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15*fb578518SFranco Fichtner * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 16*fb578518SFranco Fichtner * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 17*fb578518SFranco Fichtner * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 18*fb578518SFranco Fichtner * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 19*fb578518SFranco Fichtner * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 20*fb578518SFranco Fichtner * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 21*fb578518SFranco Fichtner * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 22*fb578518SFranco Fichtner * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 23*fb578518SFranco Fichtner * SUCH DAMAGE. 24*fb578518SFranco Fichtner */ 25*fb578518SFranco Fichtner 26*fb578518SFranco Fichtner /* 27*fb578518SFranco Fichtner * This module implements netmap support on top of standard, 28*fb578518SFranco Fichtner * unmodified device drivers. 29*fb578518SFranco Fichtner * 30*fb578518SFranco Fichtner * A NIOCREGIF request is handled here if the device does not 31*fb578518SFranco Fichtner * have native support. TX and RX rings are emulated as follows: 32*fb578518SFranco Fichtner * 33*fb578518SFranco Fichtner * NIOCREGIF 34*fb578518SFranco Fichtner * We preallocate a block of TX mbufs (roughly as many as 35*fb578518SFranco Fichtner * tx descriptors; the number is not critical) to speed up 36*fb578518SFranco Fichtner * operation during transmissions. The refcount on most of 37*fb578518SFranco Fichtner * these buffers is artificially bumped up so we can recycle 38*fb578518SFranco Fichtner * them more easily. Also, the destructor is intercepted 39*fb578518SFranco Fichtner * so we use it as an interrupt notification to wake up 40*fb578518SFranco Fichtner * processes blocked on a poll(). 41*fb578518SFranco Fichtner * 42*fb578518SFranco Fichtner * For each receive ring we allocate one "struct mbq" 43*fb578518SFranco Fichtner * (an mbuf tailq plus a spinlock). We intercept packets 44*fb578518SFranco Fichtner * (through if_input) 45*fb578518SFranco Fichtner * on the receive path and put them in the mbq from which 46*fb578518SFranco Fichtner * netmap receive routines can grab them. 47*fb578518SFranco Fichtner * 48*fb578518SFranco Fichtner * TX: 49*fb578518SFranco Fichtner * in the generic_txsync() routine, netmap buffers are copied 50*fb578518SFranco Fichtner * (or linked, in a future) to the preallocated mbufs 51*fb578518SFranco Fichtner * and pushed to the transmit queue. Some of these mbufs 52*fb578518SFranco Fichtner * (those with NS_REPORT, or otherwise every half ring) 53*fb578518SFranco Fichtner * have the refcount=1, others have refcount=2. 54*fb578518SFranco Fichtner * When the destructor is invoked, we take that as 55*fb578518SFranco Fichtner * a notification that all mbufs up to that one in 56*fb578518SFranco Fichtner * the specific ring have been completed, and generate 57*fb578518SFranco Fichtner * the equivalent of a transmit interrupt. 58*fb578518SFranco Fichtner * 59*fb578518SFranco Fichtner * RX: 60*fb578518SFranco Fichtner * 61*fb578518SFranco Fichtner */ 62*fb578518SFranco Fichtner 63*fb578518SFranco Fichtner #ifdef __FreeBSD__ 64*fb578518SFranco Fichtner 65*fb578518SFranco Fichtner #include <sys/cdefs.h> /* prerequisite */ 66*fb578518SFranco Fichtner __FBSDID("$FreeBSD: head/sys/dev/netmap/netmap.c 257666 2013-11-05 01:06:22Z luigi $"); 67*fb578518SFranco Fichtner 68*fb578518SFranco Fichtner #include <sys/types.h> 69*fb578518SFranco Fichtner #include <sys/errno.h> 70*fb578518SFranco Fichtner #include <sys/malloc.h> 71*fb578518SFranco Fichtner #include <sys/lock.h> /* PROT_EXEC */ 72*fb578518SFranco Fichtner #include <sys/rwlock.h> 73*fb578518SFranco Fichtner #include <sys/socket.h> /* sockaddrs */ 74*fb578518SFranco Fichtner #include <sys/selinfo.h> 75*fb578518SFranco Fichtner #include <net/if.h> 76*fb578518SFranco Fichtner #include <net/if_var.h> 77*fb578518SFranco Fichtner #include <machine/bus.h> /* bus_dmamap_* in netmap_kern.h */ 78*fb578518SFranco Fichtner 79*fb578518SFranco Fichtner // XXX temporary - D() defined here 80*fb578518SFranco Fichtner #include <net/netmap.h> 81*fb578518SFranco Fichtner #include <dev/netmap/netmap_kern.h> 82*fb578518SFranco Fichtner #include <dev/netmap/netmap_mem2.h> 83*fb578518SFranco Fichtner 84*fb578518SFranco Fichtner #define rtnl_lock() D("rtnl_lock called"); 85*fb578518SFranco Fichtner #define rtnl_unlock() D("rtnl_lock called"); 86*fb578518SFranco Fichtner #define MBUF_TXQ(m) ((m)->m_pkthdr.flowid) 87*fb578518SFranco Fichtner #define smp_mb() 88*fb578518SFranco Fichtner 89*fb578518SFranco Fichtner /* 90*fb578518SFranco Fichtner * mbuf wrappers 91*fb578518SFranco Fichtner */ 92*fb578518SFranco Fichtner 93*fb578518SFranco Fichtner /* 94*fb578518SFranco Fichtner * we allocate an EXT_PACKET 95*fb578518SFranco Fichtner */ 96*fb578518SFranco Fichtner #define netmap_get_mbuf(len) m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR|M_NOFREE) 97*fb578518SFranco Fichtner 98*fb578518SFranco Fichtner /* mbuf destructor, also need to change the type to EXT_EXTREF, 99*fb578518SFranco Fichtner * add an M_NOFREE flag, and then clear the flag and 100*fb578518SFranco Fichtner * chain into uma_zfree(zone_pack, mf) 101*fb578518SFranco Fichtner * (or reinstall the buffer ?) 102*fb578518SFranco Fichtner */ 103*fb578518SFranco Fichtner #define SET_MBUF_DESTRUCTOR(m, fn) do { \ 104*fb578518SFranco Fichtner (m)->m_ext.ext_free = (void *)fn; \ 105*fb578518SFranco Fichtner (m)->m_ext.ext_type = EXT_EXTREF; \ 106*fb578518SFranco Fichtner } while (0) 107*fb578518SFranco Fichtner 108*fb578518SFranco Fichtner 109*fb578518SFranco Fichtner #define GET_MBUF_REFCNT(m) ((m)->m_ext.ref_cnt ? *(m)->m_ext.ref_cnt : -1) 110*fb578518SFranco Fichtner 111*fb578518SFranco Fichtner 112*fb578518SFranco Fichtner 113*fb578518SFranco Fichtner #else /* linux */ 114*fb578518SFranco Fichtner 115*fb578518SFranco Fichtner #include "bsd_glue.h" 116*fb578518SFranco Fichtner 117*fb578518SFranco Fichtner #include <linux/rtnetlink.h> /* rtnl_[un]lock() */ 118*fb578518SFranco Fichtner #include <linux/ethtool.h> /* struct ethtool_ops, get_ringparam */ 119*fb578518SFranco Fichtner #include <linux/hrtimer.h> 120*fb578518SFranco Fichtner 121*fb578518SFranco Fichtner //#define RATE /* Enables communication statistics. */ 122*fb578518SFranco Fichtner 123*fb578518SFranco Fichtner //#define REG_RESET 124*fb578518SFranco Fichtner 125*fb578518SFranco Fichtner #endif /* linux */ 126*fb578518SFranco Fichtner 127*fb578518SFranco Fichtner 128*fb578518SFranco Fichtner /* Common headers. */ 129*fb578518SFranco Fichtner #include <net/netmap.h> 130*fb578518SFranco Fichtner #include <dev/netmap/netmap_kern.h> 131*fb578518SFranco Fichtner #include <dev/netmap/netmap_mem2.h> 132*fb578518SFranco Fichtner 133*fb578518SFranco Fichtner 134*fb578518SFranco Fichtner 135*fb578518SFranco Fichtner /* ======================== usage stats =========================== */ 136*fb578518SFranco Fichtner 137*fb578518SFranco Fichtner #ifdef RATE 138*fb578518SFranco Fichtner #define IFRATE(x) x 139*fb578518SFranco Fichtner struct rate_stats { 140*fb578518SFranco Fichtner unsigned long txpkt; 141*fb578518SFranco Fichtner unsigned long txsync; 142*fb578518SFranco Fichtner unsigned long txirq; 143*fb578518SFranco Fichtner unsigned long rxpkt; 144*fb578518SFranco Fichtner unsigned long rxirq; 145*fb578518SFranco Fichtner unsigned long rxsync; 146*fb578518SFranco Fichtner }; 147*fb578518SFranco Fichtner 148*fb578518SFranco Fichtner struct rate_context { 149*fb578518SFranco Fichtner unsigned refcount; 150*fb578518SFranco Fichtner struct timer_list timer; 151*fb578518SFranco Fichtner struct rate_stats new; 152*fb578518SFranco Fichtner struct rate_stats old; 153*fb578518SFranco Fichtner }; 154*fb578518SFranco Fichtner 155*fb578518SFranco Fichtner #define RATE_PRINTK(_NAME_) \ 156*fb578518SFranco Fichtner printk( #_NAME_ " = %lu Hz\n", (cur._NAME_ - ctx->old._NAME_)/RATE_PERIOD); 157*fb578518SFranco Fichtner #define RATE_PERIOD 2 158*fb578518SFranco Fichtner static void rate_callback(unsigned long arg) 159*fb578518SFranco Fichtner { 160*fb578518SFranco Fichtner struct rate_context * ctx = (struct rate_context *)arg; 161*fb578518SFranco Fichtner struct rate_stats cur = ctx->new; 162*fb578518SFranco Fichtner int r; 163*fb578518SFranco Fichtner 164*fb578518SFranco Fichtner RATE_PRINTK(txpkt); 165*fb578518SFranco Fichtner RATE_PRINTK(txsync); 166*fb578518SFranco Fichtner RATE_PRINTK(txirq); 167*fb578518SFranco Fichtner RATE_PRINTK(rxpkt); 168*fb578518SFranco Fichtner RATE_PRINTK(rxsync); 169*fb578518SFranco Fichtner RATE_PRINTK(rxirq); 170*fb578518SFranco Fichtner printk("\n"); 171*fb578518SFranco Fichtner 172*fb578518SFranco Fichtner ctx->old = cur; 173*fb578518SFranco Fichtner r = mod_timer(&ctx->timer, jiffies + 174*fb578518SFranco Fichtner msecs_to_jiffies(RATE_PERIOD * 1000)); 175*fb578518SFranco Fichtner if (unlikely(r)) 176*fb578518SFranco Fichtner D("[v1000] Error: mod_timer()"); 177*fb578518SFranco Fichtner } 178*fb578518SFranco Fichtner 179*fb578518SFranco Fichtner static struct rate_context rate_ctx; 180*fb578518SFranco Fichtner 181*fb578518SFranco Fichtner #else /* !RATE */ 182*fb578518SFranco Fichtner #define IFRATE(x) 183*fb578518SFranco Fichtner #endif /* !RATE */ 184*fb578518SFranco Fichtner 185*fb578518SFranco Fichtner 186*fb578518SFranco Fichtner /* =============== GENERIC NETMAP ADAPTER SUPPORT ================= */ 187*fb578518SFranco Fichtner #define GENERIC_BUF_SIZE netmap_buf_size /* Size of the mbufs in the Tx pool. */ 188*fb578518SFranco Fichtner 189*fb578518SFranco Fichtner /* 190*fb578518SFranco Fichtner * Wrapper used by the generic adapter layer to notify 191*fb578518SFranco Fichtner * the poller threads. Differently from netmap_rx_irq(), we check 192*fb578518SFranco Fichtner * only IFCAP_NETMAP instead of NAF_NATIVE_ON to enable the irq. 193*fb578518SFranco Fichtner */ 194*fb578518SFranco Fichtner static int 195*fb578518SFranco Fichtner netmap_generic_irq(struct ifnet *ifp, u_int q, u_int *work_done) 196*fb578518SFranco Fichtner { 197*fb578518SFranco Fichtner if (unlikely(!(ifp->if_capenable & IFCAP_NETMAP))) 198*fb578518SFranco Fichtner return 0; 199*fb578518SFranco Fichtner 200*fb578518SFranco Fichtner return netmap_common_irq(ifp, q, work_done); 201*fb578518SFranco Fichtner } 202*fb578518SFranco Fichtner 203*fb578518SFranco Fichtner 204*fb578518SFranco Fichtner /* Enable/disable netmap mode for a generic network interface. */ 205*fb578518SFranco Fichtner int generic_netmap_register(struct netmap_adapter *na, int enable) 206*fb578518SFranco Fichtner { 207*fb578518SFranco Fichtner struct ifnet *ifp = na->ifp; 208*fb578518SFranco Fichtner struct netmap_generic_adapter *gna = (struct netmap_generic_adapter *)na; 209*fb578518SFranco Fichtner struct mbuf *m; 210*fb578518SFranco Fichtner int error; 211*fb578518SFranco Fichtner int i, r; 212*fb578518SFranco Fichtner 213*fb578518SFranco Fichtner if (!na) 214*fb578518SFranco Fichtner return EINVAL; 215*fb578518SFranco Fichtner 216*fb578518SFranco Fichtner #ifdef REG_RESET 217*fb578518SFranco Fichtner error = ifp->netdev_ops->ndo_stop(ifp); 218*fb578518SFranco Fichtner if (error) { 219*fb578518SFranco Fichtner return error; 220*fb578518SFranco Fichtner } 221*fb578518SFranco Fichtner #endif /* REG_RESET */ 222*fb578518SFranco Fichtner 223*fb578518SFranco Fichtner if (enable) { /* Enable netmap mode. */ 224*fb578518SFranco Fichtner /* Initialize the rx queue, as generic_rx_handler() can 225*fb578518SFranco Fichtner * be called as soon as netmap_catch_rx() returns. 226*fb578518SFranco Fichtner */ 227*fb578518SFranco Fichtner for (r=0; r<na->num_rx_rings; r++) { 228*fb578518SFranco Fichtner mbq_safe_init(&na->rx_rings[r].rx_queue); 229*fb578518SFranco Fichtner na->rx_rings[r].nr_ntc = 0; 230*fb578518SFranco Fichtner } 231*fb578518SFranco Fichtner 232*fb578518SFranco Fichtner /* Init the mitigation timer. */ 233*fb578518SFranco Fichtner netmap_mitigation_init(gna); 234*fb578518SFranco Fichtner 235*fb578518SFranco Fichtner /* 236*fb578518SFranco Fichtner * Preallocate packet buffers for the tx rings. 237*fb578518SFranco Fichtner */ 238*fb578518SFranco Fichtner for (r=0; r<na->num_tx_rings; r++) { 239*fb578518SFranco Fichtner na->tx_rings[r].nr_ntc = 0; 240*fb578518SFranco Fichtner na->tx_rings[r].tx_pool = malloc(na->num_tx_desc * sizeof(struct mbuf *), 241*fb578518SFranco Fichtner M_DEVBUF, M_NOWAIT | M_ZERO); 242*fb578518SFranco Fichtner if (!na->tx_rings[r].tx_pool) { 243*fb578518SFranco Fichtner D("tx_pool allocation failed"); 244*fb578518SFranco Fichtner error = ENOMEM; 245*fb578518SFranco Fichtner goto free_tx_pool; 246*fb578518SFranco Fichtner } 247*fb578518SFranco Fichtner for (i=0; i<na->num_tx_desc; i++) { 248*fb578518SFranco Fichtner m = netmap_get_mbuf(GENERIC_BUF_SIZE); 249*fb578518SFranco Fichtner if (!m) { 250*fb578518SFranco Fichtner D("tx_pool[%d] allocation failed", i); 251*fb578518SFranco Fichtner error = ENOMEM; 252*fb578518SFranco Fichtner goto free_mbufs; 253*fb578518SFranco Fichtner } 254*fb578518SFranco Fichtner na->tx_rings[r].tx_pool[i] = m; 255*fb578518SFranco Fichtner } 256*fb578518SFranco Fichtner } 257*fb578518SFranco Fichtner rtnl_lock(); 258*fb578518SFranco Fichtner /* Prepare to intercept incoming traffic. */ 259*fb578518SFranco Fichtner error = netmap_catch_rx(na, 1); 260*fb578518SFranco Fichtner if (error) { 261*fb578518SFranco Fichtner D("netdev_rx_handler_register() failed"); 262*fb578518SFranco Fichtner goto register_handler; 263*fb578518SFranco Fichtner } 264*fb578518SFranco Fichtner ifp->if_capenable |= IFCAP_NETMAP; 265*fb578518SFranco Fichtner 266*fb578518SFranco Fichtner /* Make netmap control the packet steering. */ 267*fb578518SFranco Fichtner netmap_catch_packet_steering(gna, 1); 268*fb578518SFranco Fichtner 269*fb578518SFranco Fichtner rtnl_unlock(); 270*fb578518SFranco Fichtner 271*fb578518SFranco Fichtner #ifdef RATE 272*fb578518SFranco Fichtner if (rate_ctx.refcount == 0) { 273*fb578518SFranco Fichtner D("setup_timer()"); 274*fb578518SFranco Fichtner memset(&rate_ctx, 0, sizeof(rate_ctx)); 275*fb578518SFranco Fichtner setup_timer(&rate_ctx.timer, &rate_callback, (unsigned long)&rate_ctx); 276*fb578518SFranco Fichtner if (mod_timer(&rate_ctx.timer, jiffies + msecs_to_jiffies(1500))) { 277*fb578518SFranco Fichtner D("Error: mod_timer()"); 278*fb578518SFranco Fichtner } 279*fb578518SFranco Fichtner } 280*fb578518SFranco Fichtner rate_ctx.refcount++; 281*fb578518SFranco Fichtner #endif /* RATE */ 282*fb578518SFranco Fichtner 283*fb578518SFranco Fichtner } else { /* Disable netmap mode. */ 284*fb578518SFranco Fichtner rtnl_lock(); 285*fb578518SFranco Fichtner 286*fb578518SFranco Fichtner ifp->if_capenable &= ~IFCAP_NETMAP; 287*fb578518SFranco Fichtner 288*fb578518SFranco Fichtner /* Release packet steering control. */ 289*fb578518SFranco Fichtner netmap_catch_packet_steering(gna, 0); 290*fb578518SFranco Fichtner 291*fb578518SFranco Fichtner /* Do not intercept packets on the rx path. */ 292*fb578518SFranco Fichtner netmap_catch_rx(na, 0); 293*fb578518SFranco Fichtner 294*fb578518SFranco Fichtner rtnl_unlock(); 295*fb578518SFranco Fichtner 296*fb578518SFranco Fichtner /* Free the mbufs going to the netmap rings */ 297*fb578518SFranco Fichtner for (r=0; r<na->num_rx_rings; r++) { 298*fb578518SFranco Fichtner mbq_safe_purge(&na->rx_rings[r].rx_queue); 299*fb578518SFranco Fichtner mbq_safe_destroy(&na->rx_rings[r].rx_queue); 300*fb578518SFranco Fichtner } 301*fb578518SFranco Fichtner 302*fb578518SFranco Fichtner netmap_mitigation_cleanup(gna); 303*fb578518SFranco Fichtner 304*fb578518SFranco Fichtner for (r=0; r<na->num_tx_rings; r++) { 305*fb578518SFranco Fichtner for (i=0; i<na->num_tx_desc; i++) { 306*fb578518SFranco Fichtner m_freem(na->tx_rings[r].tx_pool[i]); 307*fb578518SFranco Fichtner } 308*fb578518SFranco Fichtner free(na->tx_rings[r].tx_pool, M_DEVBUF); 309*fb578518SFranco Fichtner } 310*fb578518SFranco Fichtner 311*fb578518SFranco Fichtner #ifdef RATE 312*fb578518SFranco Fichtner if (--rate_ctx.refcount == 0) { 313*fb578518SFranco Fichtner D("del_timer()"); 314*fb578518SFranco Fichtner del_timer(&rate_ctx.timer); 315*fb578518SFranco Fichtner } 316*fb578518SFranco Fichtner #endif 317*fb578518SFranco Fichtner } 318*fb578518SFranco Fichtner 319*fb578518SFranco Fichtner #ifdef REG_RESET 320*fb578518SFranco Fichtner error = ifp->netdev_ops->ndo_open(ifp); 321*fb578518SFranco Fichtner if (error) { 322*fb578518SFranco Fichtner goto alloc_tx_pool; 323*fb578518SFranco Fichtner } 324*fb578518SFranco Fichtner #endif 325*fb578518SFranco Fichtner 326*fb578518SFranco Fichtner return 0; 327*fb578518SFranco Fichtner 328*fb578518SFranco Fichtner register_handler: 329*fb578518SFranco Fichtner rtnl_unlock(); 330*fb578518SFranco Fichtner free_tx_pool: 331*fb578518SFranco Fichtner r--; 332*fb578518SFranco Fichtner i = na->num_tx_desc; /* Useless, but just to stay safe. */ 333*fb578518SFranco Fichtner free_mbufs: 334*fb578518SFranco Fichtner i--; 335*fb578518SFranco Fichtner for (; r>=0; r--) { 336*fb578518SFranco Fichtner for (; i>=0; i--) { 337*fb578518SFranco Fichtner m_freem(na->tx_rings[r].tx_pool[i]); 338*fb578518SFranco Fichtner } 339*fb578518SFranco Fichtner free(na->tx_rings[r].tx_pool, M_DEVBUF); 340*fb578518SFranco Fichtner i = na->num_tx_desc - 1; 341*fb578518SFranco Fichtner } 342*fb578518SFranco Fichtner 343*fb578518SFranco Fichtner return error; 344*fb578518SFranco Fichtner } 345*fb578518SFranco Fichtner 346*fb578518SFranco Fichtner /* 347*fb578518SFranco Fichtner * Callback invoked when the device driver frees an mbuf used 348*fb578518SFranco Fichtner * by netmap to transmit a packet. This usually happens when 349*fb578518SFranco Fichtner * the NIC notifies the driver that transmission is completed. 350*fb578518SFranco Fichtner */ 351*fb578518SFranco Fichtner static void 352*fb578518SFranco Fichtner generic_mbuf_destructor(struct mbuf *m) 353*fb578518SFranco Fichtner { 354*fb578518SFranco Fichtner if (netmap_verbose) 355*fb578518SFranco Fichtner D("Tx irq (%p) queue %d", m, MBUF_TXQ(m)); 356*fb578518SFranco Fichtner netmap_generic_irq(MBUF_IFP(m), MBUF_TXQ(m), NULL); 357*fb578518SFranco Fichtner #ifdef __FreeBSD__ 358*fb578518SFranco Fichtner m->m_ext.ext_type = EXT_PACKET; 359*fb578518SFranco Fichtner m->m_ext.ext_free = NULL; 360*fb578518SFranco Fichtner if (*(m->m_ext.ref_cnt) == 0) 361*fb578518SFranco Fichtner *(m->m_ext.ref_cnt) = 1; 362*fb578518SFranco Fichtner uma_zfree(zone_pack, m); 363*fb578518SFranco Fichtner #endif /* __FreeBSD__ */ 364*fb578518SFranco Fichtner IFRATE(rate_ctx.new.txirq++); 365*fb578518SFranco Fichtner } 366*fb578518SFranco Fichtner 367*fb578518SFranco Fichtner /* Record completed transmissions and update hwavail. 368*fb578518SFranco Fichtner * 369*fb578518SFranco Fichtner * nr_ntc is the oldest tx buffer not yet completed 370*fb578518SFranco Fichtner * (same as nr_hwavail + nr_hwcur + 1), 371*fb578518SFranco Fichtner * nr_hwcur is the first unsent buffer. 372*fb578518SFranco Fichtner * When cleaning, we try to recover buffers between nr_ntc and nr_hwcur. 373*fb578518SFranco Fichtner */ 374*fb578518SFranco Fichtner static int 375*fb578518SFranco Fichtner generic_netmap_tx_clean(struct netmap_kring *kring) 376*fb578518SFranco Fichtner { 377*fb578518SFranco Fichtner u_int num_slots = kring->nkr_num_slots; 378*fb578518SFranco Fichtner u_int ntc = kring->nr_ntc; 379*fb578518SFranco Fichtner u_int hwcur = kring->nr_hwcur; 380*fb578518SFranco Fichtner u_int n = 0; 381*fb578518SFranco Fichtner struct mbuf **tx_pool = kring->tx_pool; 382*fb578518SFranco Fichtner 383*fb578518SFranco Fichtner while (ntc != hwcur) { /* buffers not completed */ 384*fb578518SFranco Fichtner struct mbuf *m = tx_pool[ntc]; 385*fb578518SFranco Fichtner 386*fb578518SFranco Fichtner if (unlikely(m == NULL)) { 387*fb578518SFranco Fichtner /* try to replenish the entry */ 388*fb578518SFranco Fichtner tx_pool[ntc] = m = netmap_get_mbuf(GENERIC_BUF_SIZE); 389*fb578518SFranco Fichtner if (unlikely(m == NULL)) { 390*fb578518SFranco Fichtner D("mbuf allocation failed, XXX error"); 391*fb578518SFranco Fichtner // XXX how do we proceed ? break ? 392*fb578518SFranco Fichtner return -ENOMEM; 393*fb578518SFranco Fichtner } 394*fb578518SFranco Fichtner } else if (GET_MBUF_REFCNT(m) != 1) { 395*fb578518SFranco Fichtner break; /* This mbuf is still busy: its refcnt is 2. */ 396*fb578518SFranco Fichtner } 397*fb578518SFranco Fichtner if (unlikely(++ntc == num_slots)) { 398*fb578518SFranco Fichtner ntc = 0; 399*fb578518SFranco Fichtner } 400*fb578518SFranco Fichtner n++; 401*fb578518SFranco Fichtner } 402*fb578518SFranco Fichtner kring->nr_ntc = ntc; 403*fb578518SFranco Fichtner kring->nr_hwavail += n; 404*fb578518SFranco Fichtner ND("tx completed [%d] -> hwavail %d", n, kring->nr_hwavail); 405*fb578518SFranco Fichtner 406*fb578518SFranco Fichtner return n; 407*fb578518SFranco Fichtner } 408*fb578518SFranco Fichtner 409*fb578518SFranco Fichtner 410*fb578518SFranco Fichtner /* 411*fb578518SFranco Fichtner * We have pending packets in the driver between nr_ntc and j. 412*fb578518SFranco Fichtner * Compute a position in the middle, to be used to generate 413*fb578518SFranco Fichtner * a notification. 414*fb578518SFranco Fichtner */ 415*fb578518SFranco Fichtner static inline u_int 416*fb578518SFranco Fichtner generic_tx_event_middle(struct netmap_kring *kring, u_int hwcur) 417*fb578518SFranco Fichtner { 418*fb578518SFranco Fichtner u_int n = kring->nkr_num_slots; 419*fb578518SFranco Fichtner u_int ntc = kring->nr_ntc; 420*fb578518SFranco Fichtner u_int e; 421*fb578518SFranco Fichtner 422*fb578518SFranco Fichtner if (hwcur >= ntc) { 423*fb578518SFranco Fichtner e = (hwcur + ntc) / 2; 424*fb578518SFranco Fichtner } else { /* wrap around */ 425*fb578518SFranco Fichtner e = (hwcur + n + ntc) / 2; 426*fb578518SFranco Fichtner if (e >= n) { 427*fb578518SFranco Fichtner e -= n; 428*fb578518SFranco Fichtner } 429*fb578518SFranco Fichtner } 430*fb578518SFranco Fichtner 431*fb578518SFranco Fichtner if (unlikely(e >= n)) { 432*fb578518SFranco Fichtner D("This cannot happen"); 433*fb578518SFranco Fichtner e = 0; 434*fb578518SFranco Fichtner } 435*fb578518SFranco Fichtner 436*fb578518SFranco Fichtner return e; 437*fb578518SFranco Fichtner } 438*fb578518SFranco Fichtner 439*fb578518SFranco Fichtner /* 440*fb578518SFranco Fichtner * We have pending packets in the driver between nr_ntc and hwcur. 441*fb578518SFranco Fichtner * Schedule a notification approximately in the middle of the two. 442*fb578518SFranco Fichtner * There is a race but this is only called within txsync which does 443*fb578518SFranco Fichtner * a double check. 444*fb578518SFranco Fichtner */ 445*fb578518SFranco Fichtner static void 446*fb578518SFranco Fichtner generic_set_tx_event(struct netmap_kring *kring, u_int hwcur) 447*fb578518SFranco Fichtner { 448*fb578518SFranco Fichtner struct mbuf *m; 449*fb578518SFranco Fichtner u_int e; 450*fb578518SFranco Fichtner 451*fb578518SFranco Fichtner if (kring->nr_ntc == hwcur) { 452*fb578518SFranco Fichtner return; 453*fb578518SFranco Fichtner } 454*fb578518SFranco Fichtner e = generic_tx_event_middle(kring, hwcur); 455*fb578518SFranco Fichtner 456*fb578518SFranco Fichtner m = kring->tx_pool[e]; 457*fb578518SFranco Fichtner if (m == NULL) { 458*fb578518SFranco Fichtner /* This can happen if there is already an event on the netmap 459*fb578518SFranco Fichtner slot 'e': There is nothing to do. */ 460*fb578518SFranco Fichtner return; 461*fb578518SFranco Fichtner } 462*fb578518SFranco Fichtner ND("Event at %d mbuf %p refcnt %d", e, m, GET_MBUF_REFCNT(m)); 463*fb578518SFranco Fichtner kring->tx_pool[e] = NULL; 464*fb578518SFranco Fichtner SET_MBUF_DESTRUCTOR(m, generic_mbuf_destructor); 465*fb578518SFranco Fichtner 466*fb578518SFranco Fichtner // XXX wmb() ? 467*fb578518SFranco Fichtner /* Decrement the refcount an free it if we have the last one. */ 468*fb578518SFranco Fichtner m_freem(m); 469*fb578518SFranco Fichtner smp_mb(); 470*fb578518SFranco Fichtner } 471*fb578518SFranco Fichtner 472*fb578518SFranco Fichtner 473*fb578518SFranco Fichtner /* 474*fb578518SFranco Fichtner * generic_netmap_txsync() transforms netmap buffers into mbufs 475*fb578518SFranco Fichtner * and passes them to the standard device driver 476*fb578518SFranco Fichtner * (ndo_start_xmit() or ifp->if_transmit() ). 477*fb578518SFranco Fichtner * On linux this is not done directly, but using dev_queue_xmit(), 478*fb578518SFranco Fichtner * since it implements the TX flow control (and takes some locks). 479*fb578518SFranco Fichtner */ 480*fb578518SFranco Fichtner static int 481*fb578518SFranco Fichtner generic_netmap_txsync(struct netmap_adapter *na, u_int ring_nr, int flags) 482*fb578518SFranco Fichtner { 483*fb578518SFranco Fichtner struct ifnet *ifp = na->ifp; 484*fb578518SFranco Fichtner struct netmap_kring *kring = &na->tx_rings[ring_nr]; 485*fb578518SFranco Fichtner struct netmap_ring *ring = kring->ring; 486*fb578518SFranco Fichtner u_int j, k, num_slots = kring->nkr_num_slots; 487*fb578518SFranco Fichtner int new_slots, ntx; 488*fb578518SFranco Fichtner 489*fb578518SFranco Fichtner IFRATE(rate_ctx.new.txsync++); 490*fb578518SFranco Fichtner 491*fb578518SFranco Fichtner // TODO: handle the case of mbuf allocation failure 492*fb578518SFranco Fichtner /* first, reclaim completed buffers */ 493*fb578518SFranco Fichtner generic_netmap_tx_clean(kring); 494*fb578518SFranco Fichtner 495*fb578518SFranco Fichtner /* Take a copy of ring->cur now, and never read it again. */ 496*fb578518SFranco Fichtner k = ring->cur; 497*fb578518SFranco Fichtner if (unlikely(k >= num_slots)) { 498*fb578518SFranco Fichtner return netmap_ring_reinit(kring); 499*fb578518SFranco Fichtner } 500*fb578518SFranco Fichtner 501*fb578518SFranco Fichtner rmb(); 502*fb578518SFranco Fichtner j = kring->nr_hwcur; 503*fb578518SFranco Fichtner /* 504*fb578518SFranco Fichtner * 'new_slots' counts how many new slots have been added: 505*fb578518SFranco Fichtner * everything from hwcur to cur, excluding reserved ones, if any. 506*fb578518SFranco Fichtner * nr_hwreserved start from hwcur and counts how many slots were 507*fb578518SFranco Fichtner * not sent to the NIC from the previous round. 508*fb578518SFranco Fichtner */ 509*fb578518SFranco Fichtner new_slots = k - j - kring->nr_hwreserved; 510*fb578518SFranco Fichtner if (new_slots < 0) { 511*fb578518SFranco Fichtner new_slots += num_slots; 512*fb578518SFranco Fichtner } 513*fb578518SFranco Fichtner ntx = 0; 514*fb578518SFranco Fichtner if (j != k) { 515*fb578518SFranco Fichtner /* Process new packets to send: 516*fb578518SFranco Fichtner * j is the current index in the netmap ring. 517*fb578518SFranco Fichtner */ 518*fb578518SFranco Fichtner while (j != k) { 519*fb578518SFranco Fichtner struct netmap_slot *slot = &ring->slot[j]; /* Current slot in the netmap ring */ 520*fb578518SFranco Fichtner void *addr = NMB(slot); 521*fb578518SFranco Fichtner u_int len = slot->len; 522*fb578518SFranco Fichtner struct mbuf *m; 523*fb578518SFranco Fichtner int tx_ret; 524*fb578518SFranco Fichtner 525*fb578518SFranco Fichtner if (unlikely(addr == netmap_buffer_base || len > NETMAP_BUF_SIZE)) { 526*fb578518SFranco Fichtner return netmap_ring_reinit(kring); 527*fb578518SFranco Fichtner } 528*fb578518SFranco Fichtner /* Tale a mbuf from the tx pool and copy in the user packet. */ 529*fb578518SFranco Fichtner m = kring->tx_pool[j]; 530*fb578518SFranco Fichtner if (unlikely(!m)) { 531*fb578518SFranco Fichtner RD(5, "This should never happen"); 532*fb578518SFranco Fichtner kring->tx_pool[j] = m = netmap_get_mbuf(GENERIC_BUF_SIZE); 533*fb578518SFranco Fichtner if (unlikely(m == NULL)) { 534*fb578518SFranco Fichtner D("mbuf allocation failed"); 535*fb578518SFranco Fichtner break; 536*fb578518SFranco Fichtner } 537*fb578518SFranco Fichtner } 538*fb578518SFranco Fichtner /* XXX we should ask notifications when NS_REPORT is set, 539*fb578518SFranco Fichtner * or roughly every half frame. We can optimize this 540*fb578518SFranco Fichtner * by lazily requesting notifications only when a 541*fb578518SFranco Fichtner * transmission fails. Probably the best way is to 542*fb578518SFranco Fichtner * break on failures and set notifications when 543*fb578518SFranco Fichtner * ring->avail == 0 || j != k 544*fb578518SFranco Fichtner */ 545*fb578518SFranco Fichtner tx_ret = generic_xmit_frame(ifp, m, addr, len, ring_nr); 546*fb578518SFranco Fichtner if (unlikely(tx_ret)) { 547*fb578518SFranco Fichtner RD(5, "start_xmit failed: err %d [%u,%u,%u,%u]", 548*fb578518SFranco Fichtner tx_ret, kring->nr_ntc, j, k, kring->nr_hwavail); 549*fb578518SFranco Fichtner /* 550*fb578518SFranco Fichtner * No room for this mbuf in the device driver. 551*fb578518SFranco Fichtner * Request a notification FOR A PREVIOUS MBUF, 552*fb578518SFranco Fichtner * then call generic_netmap_tx_clean(kring) to do the 553*fb578518SFranco Fichtner * double check and see if we can free more buffers. 554*fb578518SFranco Fichtner * If there is space continue, else break; 555*fb578518SFranco Fichtner * NOTE: the double check is necessary if the problem 556*fb578518SFranco Fichtner * occurs in the txsync call after selrecord(). 557*fb578518SFranco Fichtner * Also, we need some way to tell the caller that not 558*fb578518SFranco Fichtner * all buffers were queued onto the device (this was 559*fb578518SFranco Fichtner * not a problem with native netmap driver where space 560*fb578518SFranco Fichtner * is preallocated). The bridge has a similar problem 561*fb578518SFranco Fichtner * and we solve it there by dropping the excess packets. 562*fb578518SFranco Fichtner */ 563*fb578518SFranco Fichtner generic_set_tx_event(kring, j); 564*fb578518SFranco Fichtner if (generic_netmap_tx_clean(kring)) { /* space now available */ 565*fb578518SFranco Fichtner continue; 566*fb578518SFranco Fichtner } else { 567*fb578518SFranco Fichtner break; 568*fb578518SFranco Fichtner } 569*fb578518SFranco Fichtner } 570*fb578518SFranco Fichtner slot->flags &= ~(NS_REPORT | NS_BUF_CHANGED); 571*fb578518SFranco Fichtner if (unlikely(++j == num_slots)) 572*fb578518SFranco Fichtner j = 0; 573*fb578518SFranco Fichtner ntx++; 574*fb578518SFranco Fichtner } 575*fb578518SFranco Fichtner 576*fb578518SFranco Fichtner /* Update hwcur to the next slot to transmit. */ 577*fb578518SFranco Fichtner kring->nr_hwcur = j; 578*fb578518SFranco Fichtner 579*fb578518SFranco Fichtner /* 580*fb578518SFranco Fichtner * Report all new slots as unavailable, even those not sent. 581*fb578518SFranco Fichtner * We account for them with with hwreserved, so that 582*fb578518SFranco Fichtner * nr_hwreserved =:= cur - nr_hwcur 583*fb578518SFranco Fichtner */ 584*fb578518SFranco Fichtner kring->nr_hwavail -= new_slots; 585*fb578518SFranco Fichtner kring->nr_hwreserved = k - j; 586*fb578518SFranco Fichtner if (kring->nr_hwreserved < 0) { 587*fb578518SFranco Fichtner kring->nr_hwreserved += num_slots; 588*fb578518SFranco Fichtner } 589*fb578518SFranco Fichtner 590*fb578518SFranco Fichtner IFRATE(rate_ctx.new.txpkt += ntx); 591*fb578518SFranco Fichtner 592*fb578518SFranco Fichtner if (!kring->nr_hwavail) { 593*fb578518SFranco Fichtner /* No more available slots? Set a notification event 594*fb578518SFranco Fichtner * on a netmap slot that will be cleaned in the future. 595*fb578518SFranco Fichtner * No doublecheck is performed, since txsync() will be 596*fb578518SFranco Fichtner * called twice by netmap_poll(). 597*fb578518SFranco Fichtner */ 598*fb578518SFranco Fichtner generic_set_tx_event(kring, j); 599*fb578518SFranco Fichtner } 600*fb578518SFranco Fichtner ND("tx #%d, hwavail = %d", n, kring->nr_hwavail); 601*fb578518SFranco Fichtner } 602*fb578518SFranco Fichtner 603*fb578518SFranco Fichtner /* Synchronize the user's view to the kernel view. */ 604*fb578518SFranco Fichtner ring->avail = kring->nr_hwavail; 605*fb578518SFranco Fichtner ring->reserved = kring->nr_hwreserved; 606*fb578518SFranco Fichtner 607*fb578518SFranco Fichtner return 0; 608*fb578518SFranco Fichtner } 609*fb578518SFranco Fichtner 610*fb578518SFranco Fichtner /* 611*fb578518SFranco Fichtner * This handler is registered (through netmap_catch_rx()) 612*fb578518SFranco Fichtner * within the attached network interface 613*fb578518SFranco Fichtner * in the RX subsystem, so that every mbuf passed up by 614*fb578518SFranco Fichtner * the driver can be stolen to the network stack. 615*fb578518SFranco Fichtner * Stolen packets are put in a queue where the 616*fb578518SFranco Fichtner * generic_netmap_rxsync() callback can extract them. 617*fb578518SFranco Fichtner */ 618*fb578518SFranco Fichtner void generic_rx_handler(struct ifnet *ifp, struct mbuf *m) 619*fb578518SFranco Fichtner { 620*fb578518SFranco Fichtner struct netmap_adapter *na = NA(ifp); 621*fb578518SFranco Fichtner struct netmap_generic_adapter *gna = (struct netmap_generic_adapter *)na; 622*fb578518SFranco Fichtner u_int work_done; 623*fb578518SFranco Fichtner u_int rr = 0; // receive ring number 624*fb578518SFranco Fichtner 625*fb578518SFranco Fichtner ND("called"); 626*fb578518SFranco Fichtner /* limit the size of the queue */ 627*fb578518SFranco Fichtner if (unlikely(mbq_len(&na->rx_rings[rr].rx_queue) > 1024)) { 628*fb578518SFranco Fichtner m_freem(m); 629*fb578518SFranco Fichtner } else { 630*fb578518SFranco Fichtner mbq_safe_enqueue(&na->rx_rings[rr].rx_queue, m); 631*fb578518SFranco Fichtner } 632*fb578518SFranco Fichtner 633*fb578518SFranco Fichtner if (netmap_generic_mit < 32768) { 634*fb578518SFranco Fichtner /* no rx mitigation, pass notification up */ 635*fb578518SFranco Fichtner netmap_generic_irq(na->ifp, rr, &work_done); 636*fb578518SFranco Fichtner IFRATE(rate_ctx.new.rxirq++); 637*fb578518SFranco Fichtner } else { 638*fb578518SFranco Fichtner /* same as send combining, filter notification if there is a 639*fb578518SFranco Fichtner * pending timer, otherwise pass it up and start a timer. 640*fb578518SFranco Fichtner */ 641*fb578518SFranco Fichtner if (likely(netmap_mitigation_active(gna))) { 642*fb578518SFranco Fichtner /* Record that there is some pending work. */ 643*fb578518SFranco Fichtner gna->mit_pending = 1; 644*fb578518SFranco Fichtner } else { 645*fb578518SFranco Fichtner netmap_generic_irq(na->ifp, rr, &work_done); 646*fb578518SFranco Fichtner IFRATE(rate_ctx.new.rxirq++); 647*fb578518SFranco Fichtner netmap_mitigation_start(gna); 648*fb578518SFranco Fichtner } 649*fb578518SFranco Fichtner } 650*fb578518SFranco Fichtner } 651*fb578518SFranco Fichtner 652*fb578518SFranco Fichtner /* 653*fb578518SFranco Fichtner * generic_netmap_rxsync() extracts mbufs from the queue filled by 654*fb578518SFranco Fichtner * generic_netmap_rx_handler() and puts their content in the netmap 655*fb578518SFranco Fichtner * receive ring. 656*fb578518SFranco Fichtner * Access must be protected because the rx handler is asynchronous, 657*fb578518SFranco Fichtner */ 658*fb578518SFranco Fichtner static int 659*fb578518SFranco Fichtner generic_netmap_rxsync(struct netmap_adapter *na, u_int ring_nr, int flags) 660*fb578518SFranco Fichtner { 661*fb578518SFranco Fichtner struct netmap_kring *kring = &na->rx_rings[ring_nr]; 662*fb578518SFranco Fichtner struct netmap_ring *ring = kring->ring; 663*fb578518SFranco Fichtner u_int j, n, lim = kring->nkr_num_slots - 1; 664*fb578518SFranco Fichtner int force_update = (flags & NAF_FORCE_READ) || kring->nr_kflags & NKR_PENDINTR; 665*fb578518SFranco Fichtner u_int k, resvd = ring->reserved; 666*fb578518SFranco Fichtner 667*fb578518SFranco Fichtner if (ring->cur > lim) 668*fb578518SFranco Fichtner return netmap_ring_reinit(kring); 669*fb578518SFranco Fichtner 670*fb578518SFranco Fichtner /* Import newly received packets into the netmap ring. */ 671*fb578518SFranco Fichtner if (netmap_no_pendintr || force_update) { 672*fb578518SFranco Fichtner uint16_t slot_flags = kring->nkr_slot_flags; 673*fb578518SFranco Fichtner struct mbuf *m; 674*fb578518SFranco Fichtner 675*fb578518SFranco Fichtner n = 0; 676*fb578518SFranco Fichtner j = kring->nr_ntc; /* first empty slot in the receive ring */ 677*fb578518SFranco Fichtner /* extract buffers from the rx queue, stop at most one 678*fb578518SFranco Fichtner * slot before nr_hwcur (index k) 679*fb578518SFranco Fichtner */ 680*fb578518SFranco Fichtner k = (kring->nr_hwcur) ? kring->nr_hwcur-1 : lim; 681*fb578518SFranco Fichtner while (j != k) { 682*fb578518SFranco Fichtner int len; 683*fb578518SFranco Fichtner void *addr = NMB(&ring->slot[j]); 684*fb578518SFranco Fichtner 685*fb578518SFranco Fichtner if (addr == netmap_buffer_base) { /* Bad buffer */ 686*fb578518SFranco Fichtner return netmap_ring_reinit(kring); 687*fb578518SFranco Fichtner } 688*fb578518SFranco Fichtner /* 689*fb578518SFranco Fichtner * Call the locked version of the function. 690*fb578518SFranco Fichtner * XXX Ideally we could grab a batch of mbufs at once, 691*fb578518SFranco Fichtner * by changing rx_queue into a ring. 692*fb578518SFranco Fichtner */ 693*fb578518SFranco Fichtner m = mbq_safe_dequeue(&kring->rx_queue); 694*fb578518SFranco Fichtner if (!m) 695*fb578518SFranco Fichtner break; 696*fb578518SFranco Fichtner len = MBUF_LEN(m); 697*fb578518SFranco Fichtner m_copydata(m, 0, len, addr); 698*fb578518SFranco Fichtner ring->slot[j].len = len; 699*fb578518SFranco Fichtner ring->slot[j].flags = slot_flags; 700*fb578518SFranco Fichtner m_freem(m); 701*fb578518SFranco Fichtner if (unlikely(j++ == lim)) 702*fb578518SFranco Fichtner j = 0; 703*fb578518SFranco Fichtner n++; 704*fb578518SFranco Fichtner } 705*fb578518SFranco Fichtner if (n) { 706*fb578518SFranco Fichtner kring->nr_ntc = j; 707*fb578518SFranco Fichtner kring->nr_hwavail += n; 708*fb578518SFranco Fichtner IFRATE(rate_ctx.new.rxpkt += n); 709*fb578518SFranco Fichtner } 710*fb578518SFranco Fichtner kring->nr_kflags &= ~NKR_PENDINTR; 711*fb578518SFranco Fichtner } 712*fb578518SFranco Fichtner 713*fb578518SFranco Fichtner // XXX should we invert the order ? 714*fb578518SFranco Fichtner /* Skip past packets that userspace has released */ 715*fb578518SFranco Fichtner j = kring->nr_hwcur; 716*fb578518SFranco Fichtner k = ring->cur; 717*fb578518SFranco Fichtner if (resvd > 0) { 718*fb578518SFranco Fichtner if (resvd + ring->avail >= lim + 1) { 719*fb578518SFranco Fichtner D("XXX invalid reserve/avail %d %d", resvd, ring->avail); 720*fb578518SFranco Fichtner ring->reserved = resvd = 0; // XXX panic... 721*fb578518SFranco Fichtner } 722*fb578518SFranco Fichtner k = (k >= resvd) ? k - resvd : k + lim + 1 - resvd; 723*fb578518SFranco Fichtner } 724*fb578518SFranco Fichtner if (j != k) { 725*fb578518SFranco Fichtner /* Userspace has released some packets. */ 726*fb578518SFranco Fichtner for (n = 0; j != k; n++) { 727*fb578518SFranco Fichtner struct netmap_slot *slot = &ring->slot[j]; 728*fb578518SFranco Fichtner 729*fb578518SFranco Fichtner slot->flags &= ~NS_BUF_CHANGED; 730*fb578518SFranco Fichtner if (unlikely(j++ == lim)) 731*fb578518SFranco Fichtner j = 0; 732*fb578518SFranco Fichtner } 733*fb578518SFranco Fichtner kring->nr_hwavail -= n; 734*fb578518SFranco Fichtner kring->nr_hwcur = k; 735*fb578518SFranco Fichtner } 736*fb578518SFranco Fichtner /* Tell userspace that there are new packets. */ 737*fb578518SFranco Fichtner ring->avail = kring->nr_hwavail - resvd; 738*fb578518SFranco Fichtner IFRATE(rate_ctx.new.rxsync++); 739*fb578518SFranco Fichtner 740*fb578518SFranco Fichtner return 0; 741*fb578518SFranco Fichtner } 742*fb578518SFranco Fichtner 743*fb578518SFranco Fichtner static void 744*fb578518SFranco Fichtner generic_netmap_dtor(struct netmap_adapter *na) 745*fb578518SFranco Fichtner { 746*fb578518SFranco Fichtner struct ifnet *ifp = na->ifp; 747*fb578518SFranco Fichtner struct netmap_generic_adapter *gna = (struct netmap_generic_adapter*)na; 748*fb578518SFranco Fichtner struct netmap_adapter *prev_na = gna->prev; 749*fb578518SFranco Fichtner 750*fb578518SFranco Fichtner if (prev_na != NULL) { 751*fb578518SFranco Fichtner D("Released generic NA %p", gna); 752*fb578518SFranco Fichtner if_rele(na->ifp); 753*fb578518SFranco Fichtner netmap_adapter_put(prev_na); 754*fb578518SFranco Fichtner } 755*fb578518SFranco Fichtner if (ifp != NULL) { 756*fb578518SFranco Fichtner WNA(ifp) = prev_na; 757*fb578518SFranco Fichtner D("Restored native NA %p", prev_na); 758*fb578518SFranco Fichtner na->ifp = NULL; 759*fb578518SFranco Fichtner } 760*fb578518SFranco Fichtner } 761*fb578518SFranco Fichtner 762*fb578518SFranco Fichtner /* 763*fb578518SFranco Fichtner * generic_netmap_attach() makes it possible to use netmap on 764*fb578518SFranco Fichtner * a device without native netmap support. 765*fb578518SFranco Fichtner * This is less performant than native support but potentially 766*fb578518SFranco Fichtner * faster than raw sockets or similar schemes. 767*fb578518SFranco Fichtner * 768*fb578518SFranco Fichtner * In this "emulated" mode, netmap rings do not necessarily 769*fb578518SFranco Fichtner * have the same size as those in the NIC. We use a default 770*fb578518SFranco Fichtner * value and possibly override it if the OS has ways to fetch the 771*fb578518SFranco Fichtner * actual configuration. 772*fb578518SFranco Fichtner */ 773*fb578518SFranco Fichtner int 774*fb578518SFranco Fichtner generic_netmap_attach(struct ifnet *ifp) 775*fb578518SFranco Fichtner { 776*fb578518SFranco Fichtner struct netmap_adapter *na; 777*fb578518SFranco Fichtner struct netmap_generic_adapter *gna; 778*fb578518SFranco Fichtner int retval; 779*fb578518SFranco Fichtner u_int num_tx_desc, num_rx_desc; 780*fb578518SFranco Fichtner 781*fb578518SFranco Fichtner num_tx_desc = num_rx_desc = netmap_generic_ringsize; /* starting point */ 782*fb578518SFranco Fichtner 783*fb578518SFranco Fichtner generic_find_num_desc(ifp, &num_tx_desc, &num_rx_desc); 784*fb578518SFranco Fichtner ND("Netmap ring size: TX = %d, RX = %d", num_tx_desc, num_rx_desc); 785*fb578518SFranco Fichtner 786*fb578518SFranco Fichtner gna = malloc(sizeof(*gna), M_DEVBUF, M_NOWAIT | M_ZERO); 787*fb578518SFranco Fichtner if (gna == NULL) { 788*fb578518SFranco Fichtner D("no memory on attach, give up"); 789*fb578518SFranco Fichtner return ENOMEM; 790*fb578518SFranco Fichtner } 791*fb578518SFranco Fichtner na = (struct netmap_adapter *)gna; 792*fb578518SFranco Fichtner na->ifp = ifp; 793*fb578518SFranco Fichtner na->num_tx_desc = num_tx_desc; 794*fb578518SFranco Fichtner na->num_rx_desc = num_rx_desc; 795*fb578518SFranco Fichtner na->nm_register = &generic_netmap_register; 796*fb578518SFranco Fichtner na->nm_txsync = &generic_netmap_txsync; 797*fb578518SFranco Fichtner na->nm_rxsync = &generic_netmap_rxsync; 798*fb578518SFranco Fichtner na->nm_dtor = &generic_netmap_dtor; 799*fb578518SFranco Fichtner /* when using generic, IFCAP_NETMAP is set so we force 800*fb578518SFranco Fichtner * NAF_SKIP_INTR to use the regular interrupt handler 801*fb578518SFranco Fichtner */ 802*fb578518SFranco Fichtner na->na_flags = NAF_SKIP_INTR; 803*fb578518SFranco Fichtner 804*fb578518SFranco Fichtner ND("[GNA] num_tx_queues(%d), real_num_tx_queues(%d), len(%lu)", 805*fb578518SFranco Fichtner ifp->num_tx_queues, ifp->real_num_tx_queues, 806*fb578518SFranco Fichtner ifp->tx_queue_len); 807*fb578518SFranco Fichtner ND("[GNA] num_rx_queues(%d), real_num_rx_queues(%d)", 808*fb578518SFranco Fichtner ifp->num_rx_queues, ifp->real_num_rx_queues); 809*fb578518SFranco Fichtner 810*fb578518SFranco Fichtner generic_find_num_queues(ifp, &na->num_tx_rings, &na->num_rx_rings); 811*fb578518SFranco Fichtner 812*fb578518SFranco Fichtner retval = netmap_attach_common(na); 813*fb578518SFranco Fichtner if (retval) { 814*fb578518SFranco Fichtner free(gna, M_DEVBUF); 815*fb578518SFranco Fichtner } 816*fb578518SFranco Fichtner 817*fb578518SFranco Fichtner return retval; 818*fb578518SFranco Fichtner } 819