1fb578518SFranco Fichtner /* 2fb578518SFranco Fichtner * Copyright (C) 2013 Universita` di Pisa. All rights reserved. 3fb578518SFranco Fichtner * 4fb578518SFranco Fichtner * Redistribution and use in source and binary forms, with or without 5fb578518SFranco Fichtner * modification, are permitted provided that the following conditions 6fb578518SFranco Fichtner * are met: 7fb578518SFranco Fichtner * 1. Redistributions of source code must retain the above copyright 8fb578518SFranco Fichtner * notice, this list of conditions and the following disclaimer. 9fb578518SFranco Fichtner * 2. Redistributions in binary form must reproduce the above copyright 10fb578518SFranco Fichtner * notice, this list of conditions and the following disclaimer in the 11fb578518SFranco Fichtner * documentation and/or other materials provided with the distribution. 12fb578518SFranco Fichtner * 13fb578518SFranco Fichtner * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 14fb578518SFranco Fichtner * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15fb578518SFranco Fichtner * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 16fb578518SFranco Fichtner * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 17fb578518SFranco Fichtner * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 18fb578518SFranco Fichtner * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 19fb578518SFranco Fichtner * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 20fb578518SFranco Fichtner * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 21fb578518SFranco Fichtner * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 22fb578518SFranco Fichtner * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 23fb578518SFranco Fichtner * SUCH DAMAGE. 24fb578518SFranco Fichtner */ 25fb578518SFranco Fichtner 26fb578518SFranco Fichtner /* 27fb578518SFranco Fichtner * This module implements netmap support on top of standard, 28fb578518SFranco Fichtner * unmodified device drivers. 29fb578518SFranco Fichtner * 30fb578518SFranco Fichtner * A NIOCREGIF request is handled here if the device does not 31fb578518SFranco Fichtner * have native support. TX and RX rings are emulated as follows: 32fb578518SFranco Fichtner * 33fb578518SFranco Fichtner * NIOCREGIF 34fb578518SFranco Fichtner * We preallocate a block of TX mbufs (roughly as many as 35fb578518SFranco Fichtner * tx descriptors; the number is not critical) to speed up 36fb578518SFranco Fichtner * operation during transmissions. The refcount on most of 37fb578518SFranco Fichtner * these buffers is artificially bumped up so we can recycle 38fb578518SFranco Fichtner * them more easily. Also, the destructor is intercepted 39fb578518SFranco Fichtner * so we use it as an interrupt notification to wake up 40fb578518SFranco Fichtner * processes blocked on a poll(). 41fb578518SFranco Fichtner * 42fb578518SFranco Fichtner * For each receive ring we allocate one "struct mbq" 43fb578518SFranco Fichtner * (an mbuf tailq plus a spinlock). We intercept packets 44fb578518SFranco Fichtner * (through if_input) 45fb578518SFranco Fichtner * on the receive path and put them in the mbq from which 46fb578518SFranco Fichtner * netmap receive routines can grab them. 47fb578518SFranco Fichtner * 48fb578518SFranco Fichtner * TX: 49fb578518SFranco Fichtner * in the generic_txsync() routine, netmap buffers are copied 50fb578518SFranco Fichtner * (or linked, in a future) to the preallocated mbufs 51fb578518SFranco Fichtner * and pushed to the transmit queue. Some of these mbufs 52fb578518SFranco Fichtner * (those with NS_REPORT, or otherwise every half ring) 53fb578518SFranco Fichtner * have the refcount=1, others have refcount=2. 54fb578518SFranco Fichtner * When the destructor is invoked, we take that as 55fb578518SFranco Fichtner * a notification that all mbufs up to that one in 56fb578518SFranco Fichtner * the specific ring have been completed, and generate 57fb578518SFranco Fichtner * the equivalent of a transmit interrupt. 58fb578518SFranco Fichtner * 59fb578518SFranco Fichtner * RX: 60fb578518SFranco Fichtner * 61fb578518SFranco Fichtner */ 62fb578518SFranco Fichtner 63*ed9bd855SFranco Fichtner /* __FBSDID("$FreeBSD: head/sys/dev/netmap/netmap.c 257666 2013-11-05 01:06:22Z luigi $"); */ 64fb578518SFranco Fichtner 65fb578518SFranco Fichtner #include <sys/types.h> 66fb578518SFranco Fichtner #include <sys/errno.h> 67fb578518SFranco Fichtner #include <sys/malloc.h> 68fb578518SFranco Fichtner #include <sys/lock.h> /* PROT_EXEC */ 69fb578518SFranco Fichtner #include <sys/rwlock.h> 70fb578518SFranco Fichtner #include <sys/socket.h> /* sockaddrs */ 71fb578518SFranco Fichtner #include <sys/selinfo.h> 72fb578518SFranco Fichtner #include <net/if.h> 73fb578518SFranco Fichtner #include <net/if_var.h> 74*ed9bd855SFranco Fichtner #include <sys/bus.h> /* bus_dmamap_* in netmap_kern.h */ 75fb578518SFranco Fichtner 76fb578518SFranco Fichtner // XXX temporary - D() defined here 77fb578518SFranco Fichtner #include <net/netmap.h> 78*ed9bd855SFranco Fichtner 79*ed9bd855SFranco Fichtner #include "netmap_kern.h" 80*ed9bd855SFranco Fichtner #include "netmap_mem2.h" 81fb578518SFranco Fichtner 82fb578518SFranco Fichtner #define rtnl_lock() D("rtnl_lock called"); 83fb578518SFranco Fichtner #define rtnl_unlock() D("rtnl_lock called"); 84fb578518SFranco Fichtner #define MBUF_TXQ(m) ((m)->m_pkthdr.flowid) 85fb578518SFranco Fichtner #define smp_mb() 86fb578518SFranco Fichtner 87fb578518SFranco Fichtner /* 88fb578518SFranco Fichtner * mbuf wrappers 89fb578518SFranco Fichtner */ 90fb578518SFranco Fichtner 91fb578518SFranco Fichtner /* 92fb578518SFranco Fichtner * we allocate an EXT_PACKET 93fb578518SFranco Fichtner */ 94fb578518SFranco Fichtner #define netmap_get_mbuf(len) m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR|M_NOFREE) 95fb578518SFranco Fichtner 96fb578518SFranco Fichtner /* mbuf destructor, also need to change the type to EXT_EXTREF, 97fb578518SFranco Fichtner * add an M_NOFREE flag, and then clear the flag and 98fb578518SFranco Fichtner * chain into uma_zfree(zone_pack, mf) 99fb578518SFranco Fichtner * (or reinstall the buffer ?) 100fb578518SFranco Fichtner */ 101fb578518SFranco Fichtner #define SET_MBUF_DESTRUCTOR(m, fn) do { \ 102fb578518SFranco Fichtner (m)->m_ext.ext_free = (void *)fn; \ 103fb578518SFranco Fichtner (m)->m_ext.ext_type = EXT_EXTREF; \ 104fb578518SFranco Fichtner } while (0) 105fb578518SFranco Fichtner 106fb578518SFranco Fichtner 107fb578518SFranco Fichtner #define GET_MBUF_REFCNT(m) ((m)->m_ext.ref_cnt ? *(m)->m_ext.ref_cnt : -1) 108fb578518SFranco Fichtner 109fb578518SFranco Fichtner /* ======================== usage stats =========================== */ 110fb578518SFranco Fichtner 111fb578518SFranco Fichtner #ifdef RATE 112fb578518SFranco Fichtner #define IFRATE(x) x 113fb578518SFranco Fichtner struct rate_stats { 114fb578518SFranco Fichtner unsigned long txpkt; 115fb578518SFranco Fichtner unsigned long txsync; 116fb578518SFranco Fichtner unsigned long txirq; 117fb578518SFranco Fichtner unsigned long rxpkt; 118fb578518SFranco Fichtner unsigned long rxirq; 119fb578518SFranco Fichtner unsigned long rxsync; 120fb578518SFranco Fichtner }; 121fb578518SFranco Fichtner 122fb578518SFranco Fichtner struct rate_context { 123fb578518SFranco Fichtner unsigned refcount; 124fb578518SFranco Fichtner struct timer_list timer; 125fb578518SFranco Fichtner struct rate_stats new; 126fb578518SFranco Fichtner struct rate_stats old; 127fb578518SFranco Fichtner }; 128fb578518SFranco Fichtner 129fb578518SFranco Fichtner #define RATE_PRINTK(_NAME_) \ 130fb578518SFranco Fichtner printk( #_NAME_ " = %lu Hz\n", (cur._NAME_ - ctx->old._NAME_)/RATE_PERIOD); 131fb578518SFranco Fichtner #define RATE_PERIOD 2 132fb578518SFranco Fichtner static void rate_callback(unsigned long arg) 133fb578518SFranco Fichtner { 134fb578518SFranco Fichtner struct rate_context * ctx = (struct rate_context *)arg; 135fb578518SFranco Fichtner struct rate_stats cur = ctx->new; 136fb578518SFranco Fichtner int r; 137fb578518SFranco Fichtner 138fb578518SFranco Fichtner RATE_PRINTK(txpkt); 139fb578518SFranco Fichtner RATE_PRINTK(txsync); 140fb578518SFranco Fichtner RATE_PRINTK(txirq); 141fb578518SFranco Fichtner RATE_PRINTK(rxpkt); 142fb578518SFranco Fichtner RATE_PRINTK(rxsync); 143fb578518SFranco Fichtner RATE_PRINTK(rxirq); 144fb578518SFranco Fichtner printk("\n"); 145fb578518SFranco Fichtner 146fb578518SFranco Fichtner ctx->old = cur; 147fb578518SFranco Fichtner r = mod_timer(&ctx->timer, jiffies + 148fb578518SFranco Fichtner msecs_to_jiffies(RATE_PERIOD * 1000)); 149fb578518SFranco Fichtner if (unlikely(r)) 150fb578518SFranco Fichtner D("[v1000] Error: mod_timer()"); 151fb578518SFranco Fichtner } 152fb578518SFranco Fichtner 153fb578518SFranco Fichtner static struct rate_context rate_ctx; 154fb578518SFranco Fichtner 155fb578518SFranco Fichtner #else /* !RATE */ 156fb578518SFranco Fichtner #define IFRATE(x) 157fb578518SFranco Fichtner #endif /* !RATE */ 158fb578518SFranco Fichtner 159fb578518SFranco Fichtner 160fb578518SFranco Fichtner /* =============== GENERIC NETMAP ADAPTER SUPPORT ================= */ 161fb578518SFranco Fichtner #define GENERIC_BUF_SIZE netmap_buf_size /* Size of the mbufs in the Tx pool. */ 162fb578518SFranco Fichtner 163fb578518SFranco Fichtner /* 164fb578518SFranco Fichtner * Wrapper used by the generic adapter layer to notify 165fb578518SFranco Fichtner * the poller threads. Differently from netmap_rx_irq(), we check 166fb578518SFranco Fichtner * only IFCAP_NETMAP instead of NAF_NATIVE_ON to enable the irq. 167fb578518SFranco Fichtner */ 168fb578518SFranco Fichtner static int 169fb578518SFranco Fichtner netmap_generic_irq(struct ifnet *ifp, u_int q, u_int *work_done) 170fb578518SFranco Fichtner { 171fb578518SFranco Fichtner if (unlikely(!(ifp->if_capenable & IFCAP_NETMAP))) 172fb578518SFranco Fichtner return 0; 173fb578518SFranco Fichtner 174fb578518SFranco Fichtner return netmap_common_irq(ifp, q, work_done); 175fb578518SFranco Fichtner } 176fb578518SFranco Fichtner 177fb578518SFranco Fichtner 178fb578518SFranco Fichtner /* Enable/disable netmap mode for a generic network interface. */ 179fb578518SFranco Fichtner int generic_netmap_register(struct netmap_adapter *na, int enable) 180fb578518SFranco Fichtner { 181fb578518SFranco Fichtner struct ifnet *ifp = na->ifp; 182fb578518SFranco Fichtner struct netmap_generic_adapter *gna = (struct netmap_generic_adapter *)na; 183fb578518SFranco Fichtner struct mbuf *m; 184fb578518SFranco Fichtner int error; 185fb578518SFranco Fichtner int i, r; 186fb578518SFranco Fichtner 187fb578518SFranco Fichtner if (!na) 188fb578518SFranco Fichtner return EINVAL; 189fb578518SFranco Fichtner 190fb578518SFranco Fichtner #ifdef REG_RESET 191fb578518SFranco Fichtner error = ifp->netdev_ops->ndo_stop(ifp); 192fb578518SFranco Fichtner if (error) { 193fb578518SFranco Fichtner return error; 194fb578518SFranco Fichtner } 195fb578518SFranco Fichtner #endif /* REG_RESET */ 196fb578518SFranco Fichtner 197fb578518SFranco Fichtner if (enable) { /* Enable netmap mode. */ 198fb578518SFranco Fichtner /* Initialize the rx queue, as generic_rx_handler() can 199fb578518SFranco Fichtner * be called as soon as netmap_catch_rx() returns. 200fb578518SFranco Fichtner */ 201fb578518SFranco Fichtner for (r=0; r<na->num_rx_rings; r++) { 202fb578518SFranco Fichtner mbq_safe_init(&na->rx_rings[r].rx_queue); 203fb578518SFranco Fichtner na->rx_rings[r].nr_ntc = 0; 204fb578518SFranco Fichtner } 205fb578518SFranco Fichtner 206fb578518SFranco Fichtner /* Init the mitigation timer. */ 207fb578518SFranco Fichtner netmap_mitigation_init(gna); 208fb578518SFranco Fichtner 209fb578518SFranco Fichtner /* 210fb578518SFranco Fichtner * Preallocate packet buffers for the tx rings. 211fb578518SFranco Fichtner */ 212fb578518SFranco Fichtner for (r=0; r<na->num_tx_rings; r++) { 213fb578518SFranco Fichtner na->tx_rings[r].nr_ntc = 0; 214*ed9bd855SFranco Fichtner na->tx_rings[r].tx_pool = kmalloc(na->num_tx_desc * sizeof(struct mbuf *), 215fb578518SFranco Fichtner M_DEVBUF, M_NOWAIT | M_ZERO); 216fb578518SFranco Fichtner if (!na->tx_rings[r].tx_pool) { 217fb578518SFranco Fichtner D("tx_pool allocation failed"); 218fb578518SFranco Fichtner error = ENOMEM; 219fb578518SFranco Fichtner goto free_tx_pool; 220fb578518SFranco Fichtner } 221fb578518SFranco Fichtner for (i=0; i<na->num_tx_desc; i++) { 222fb578518SFranco Fichtner m = netmap_get_mbuf(GENERIC_BUF_SIZE); 223fb578518SFranco Fichtner if (!m) { 224fb578518SFranco Fichtner D("tx_pool[%d] allocation failed", i); 225fb578518SFranco Fichtner error = ENOMEM; 226fb578518SFranco Fichtner goto free_mbufs; 227fb578518SFranco Fichtner } 228fb578518SFranco Fichtner na->tx_rings[r].tx_pool[i] = m; 229fb578518SFranco Fichtner } 230fb578518SFranco Fichtner } 231fb578518SFranco Fichtner rtnl_lock(); 232fb578518SFranco Fichtner /* Prepare to intercept incoming traffic. */ 233fb578518SFranco Fichtner error = netmap_catch_rx(na, 1); 234fb578518SFranco Fichtner if (error) { 235fb578518SFranco Fichtner D("netdev_rx_handler_register() failed"); 236fb578518SFranco Fichtner goto register_handler; 237fb578518SFranco Fichtner } 238fb578518SFranco Fichtner ifp->if_capenable |= IFCAP_NETMAP; 239fb578518SFranco Fichtner 240fb578518SFranco Fichtner /* Make netmap control the packet steering. */ 241fb578518SFranco Fichtner netmap_catch_packet_steering(gna, 1); 242fb578518SFranco Fichtner 243fb578518SFranco Fichtner rtnl_unlock(); 244fb578518SFranco Fichtner 245fb578518SFranco Fichtner #ifdef RATE 246fb578518SFranco Fichtner if (rate_ctx.refcount == 0) { 247fb578518SFranco Fichtner D("setup_timer()"); 248fb578518SFranco Fichtner memset(&rate_ctx, 0, sizeof(rate_ctx)); 249fb578518SFranco Fichtner setup_timer(&rate_ctx.timer, &rate_callback, (unsigned long)&rate_ctx); 250fb578518SFranco Fichtner if (mod_timer(&rate_ctx.timer, jiffies + msecs_to_jiffies(1500))) { 251fb578518SFranco Fichtner D("Error: mod_timer()"); 252fb578518SFranco Fichtner } 253fb578518SFranco Fichtner } 254fb578518SFranco Fichtner rate_ctx.refcount++; 255fb578518SFranco Fichtner #endif /* RATE */ 256fb578518SFranco Fichtner 257fb578518SFranco Fichtner } else { /* Disable netmap mode. */ 258fb578518SFranco Fichtner rtnl_lock(); 259fb578518SFranco Fichtner 260fb578518SFranco Fichtner ifp->if_capenable &= ~IFCAP_NETMAP; 261fb578518SFranco Fichtner 262fb578518SFranco Fichtner /* Release packet steering control. */ 263fb578518SFranco Fichtner netmap_catch_packet_steering(gna, 0); 264fb578518SFranco Fichtner 265fb578518SFranco Fichtner /* Do not intercept packets on the rx path. */ 266fb578518SFranco Fichtner netmap_catch_rx(na, 0); 267fb578518SFranco Fichtner 268fb578518SFranco Fichtner rtnl_unlock(); 269fb578518SFranco Fichtner 270fb578518SFranco Fichtner /* Free the mbufs going to the netmap rings */ 271fb578518SFranco Fichtner for (r=0; r<na->num_rx_rings; r++) { 272fb578518SFranco Fichtner mbq_safe_purge(&na->rx_rings[r].rx_queue); 273fb578518SFranco Fichtner mbq_safe_destroy(&na->rx_rings[r].rx_queue); 274fb578518SFranco Fichtner } 275fb578518SFranco Fichtner 276fb578518SFranco Fichtner netmap_mitigation_cleanup(gna); 277fb578518SFranco Fichtner 278fb578518SFranco Fichtner for (r=0; r<na->num_tx_rings; r++) { 279fb578518SFranco Fichtner for (i=0; i<na->num_tx_desc; i++) { 280fb578518SFranco Fichtner m_freem(na->tx_rings[r].tx_pool[i]); 281fb578518SFranco Fichtner } 282*ed9bd855SFranco Fichtner kfree(na->tx_rings[r].tx_pool, M_DEVBUF); 283fb578518SFranco Fichtner } 284fb578518SFranco Fichtner 285fb578518SFranco Fichtner #ifdef RATE 286fb578518SFranco Fichtner if (--rate_ctx.refcount == 0) { 287fb578518SFranco Fichtner D("del_timer()"); 288fb578518SFranco Fichtner del_timer(&rate_ctx.timer); 289fb578518SFranco Fichtner } 290fb578518SFranco Fichtner #endif 291fb578518SFranco Fichtner } 292fb578518SFranco Fichtner 293fb578518SFranco Fichtner #ifdef REG_RESET 294fb578518SFranco Fichtner error = ifp->netdev_ops->ndo_open(ifp); 295fb578518SFranco Fichtner if (error) { 296fb578518SFranco Fichtner goto alloc_tx_pool; 297fb578518SFranco Fichtner } 298fb578518SFranco Fichtner #endif 299fb578518SFranco Fichtner 300fb578518SFranco Fichtner return 0; 301fb578518SFranco Fichtner 302fb578518SFranco Fichtner register_handler: 303fb578518SFranco Fichtner rtnl_unlock(); 304fb578518SFranco Fichtner free_tx_pool: 305fb578518SFranco Fichtner r--; 306fb578518SFranco Fichtner i = na->num_tx_desc; /* Useless, but just to stay safe. */ 307fb578518SFranco Fichtner free_mbufs: 308fb578518SFranco Fichtner i--; 309fb578518SFranco Fichtner for (; r>=0; r--) { 310fb578518SFranco Fichtner for (; i>=0; i--) { 311fb578518SFranco Fichtner m_freem(na->tx_rings[r].tx_pool[i]); 312fb578518SFranco Fichtner } 313*ed9bd855SFranco Fichtner kfree(na->tx_rings[r].tx_pool, M_DEVBUF); 314fb578518SFranco Fichtner i = na->num_tx_desc - 1; 315fb578518SFranco Fichtner } 316fb578518SFranco Fichtner 317fb578518SFranco Fichtner return error; 318fb578518SFranco Fichtner } 319fb578518SFranco Fichtner 320fb578518SFranco Fichtner /* 321fb578518SFranco Fichtner * Callback invoked when the device driver frees an mbuf used 322fb578518SFranco Fichtner * by netmap to transmit a packet. This usually happens when 323fb578518SFranco Fichtner * the NIC notifies the driver that transmission is completed. 324fb578518SFranco Fichtner */ 325fb578518SFranco Fichtner static void 326fb578518SFranco Fichtner generic_mbuf_destructor(struct mbuf *m) 327fb578518SFranco Fichtner { 328fb578518SFranco Fichtner if (netmap_verbose) 329fb578518SFranco Fichtner D("Tx irq (%p) queue %d", m, MBUF_TXQ(m)); 330fb578518SFranco Fichtner netmap_generic_irq(MBUF_IFP(m), MBUF_TXQ(m), NULL); 331fb578518SFranco Fichtner m->m_ext.ext_type = EXT_PACKET; 332fb578518SFranco Fichtner m->m_ext.ext_free = NULL; 333fb578518SFranco Fichtner if (*(m->m_ext.ref_cnt) == 0) 334fb578518SFranco Fichtner *(m->m_ext.ref_cnt) = 1; 335fb578518SFranco Fichtner uma_zfree(zone_pack, m); 336fb578518SFranco Fichtner IFRATE(rate_ctx.new.txirq++); 337fb578518SFranco Fichtner } 338fb578518SFranco Fichtner 339fb578518SFranco Fichtner /* Record completed transmissions and update hwavail. 340fb578518SFranco Fichtner * 341fb578518SFranco Fichtner * nr_ntc is the oldest tx buffer not yet completed 342fb578518SFranco Fichtner * (same as nr_hwavail + nr_hwcur + 1), 343fb578518SFranco Fichtner * nr_hwcur is the first unsent buffer. 344fb578518SFranco Fichtner * When cleaning, we try to recover buffers between nr_ntc and nr_hwcur. 345fb578518SFranco Fichtner */ 346fb578518SFranco Fichtner static int 347fb578518SFranco Fichtner generic_netmap_tx_clean(struct netmap_kring *kring) 348fb578518SFranco Fichtner { 349fb578518SFranco Fichtner u_int num_slots = kring->nkr_num_slots; 350fb578518SFranco Fichtner u_int ntc = kring->nr_ntc; 351fb578518SFranco Fichtner u_int hwcur = kring->nr_hwcur; 352fb578518SFranco Fichtner u_int n = 0; 353fb578518SFranco Fichtner struct mbuf **tx_pool = kring->tx_pool; 354fb578518SFranco Fichtner 355fb578518SFranco Fichtner while (ntc != hwcur) { /* buffers not completed */ 356fb578518SFranco Fichtner struct mbuf *m = tx_pool[ntc]; 357fb578518SFranco Fichtner 358fb578518SFranco Fichtner if (unlikely(m == NULL)) { 359fb578518SFranco Fichtner /* try to replenish the entry */ 360fb578518SFranco Fichtner tx_pool[ntc] = m = netmap_get_mbuf(GENERIC_BUF_SIZE); 361fb578518SFranco Fichtner if (unlikely(m == NULL)) { 362fb578518SFranco Fichtner D("mbuf allocation failed, XXX error"); 363fb578518SFranco Fichtner // XXX how do we proceed ? break ? 364fb578518SFranco Fichtner return -ENOMEM; 365fb578518SFranco Fichtner } 366fb578518SFranco Fichtner } else if (GET_MBUF_REFCNT(m) != 1) { 367fb578518SFranco Fichtner break; /* This mbuf is still busy: its refcnt is 2. */ 368fb578518SFranco Fichtner } 369fb578518SFranco Fichtner if (unlikely(++ntc == num_slots)) { 370fb578518SFranco Fichtner ntc = 0; 371fb578518SFranco Fichtner } 372fb578518SFranco Fichtner n++; 373fb578518SFranco Fichtner } 374fb578518SFranco Fichtner kring->nr_ntc = ntc; 375fb578518SFranco Fichtner kring->nr_hwavail += n; 376fb578518SFranco Fichtner ND("tx completed [%d] -> hwavail %d", n, kring->nr_hwavail); 377fb578518SFranco Fichtner 378fb578518SFranco Fichtner return n; 379fb578518SFranco Fichtner } 380fb578518SFranco Fichtner 381fb578518SFranco Fichtner 382fb578518SFranco Fichtner /* 383fb578518SFranco Fichtner * We have pending packets in the driver between nr_ntc and j. 384fb578518SFranco Fichtner * Compute a position in the middle, to be used to generate 385fb578518SFranco Fichtner * a notification. 386fb578518SFranco Fichtner */ 387fb578518SFranco Fichtner static inline u_int 388fb578518SFranco Fichtner generic_tx_event_middle(struct netmap_kring *kring, u_int hwcur) 389fb578518SFranco Fichtner { 390fb578518SFranco Fichtner u_int n = kring->nkr_num_slots; 391fb578518SFranco Fichtner u_int ntc = kring->nr_ntc; 392fb578518SFranco Fichtner u_int e; 393fb578518SFranco Fichtner 394fb578518SFranco Fichtner if (hwcur >= ntc) { 395fb578518SFranco Fichtner e = (hwcur + ntc) / 2; 396fb578518SFranco Fichtner } else { /* wrap around */ 397fb578518SFranco Fichtner e = (hwcur + n + ntc) / 2; 398fb578518SFranco Fichtner if (e >= n) { 399fb578518SFranco Fichtner e -= n; 400fb578518SFranco Fichtner } 401fb578518SFranco Fichtner } 402fb578518SFranco Fichtner 403fb578518SFranco Fichtner if (unlikely(e >= n)) { 404fb578518SFranco Fichtner D("This cannot happen"); 405fb578518SFranco Fichtner e = 0; 406fb578518SFranco Fichtner } 407fb578518SFranco Fichtner 408fb578518SFranco Fichtner return e; 409fb578518SFranco Fichtner } 410fb578518SFranco Fichtner 411fb578518SFranco Fichtner /* 412fb578518SFranco Fichtner * We have pending packets in the driver between nr_ntc and hwcur. 413fb578518SFranco Fichtner * Schedule a notification approximately in the middle of the two. 414fb578518SFranco Fichtner * There is a race but this is only called within txsync which does 415fb578518SFranco Fichtner * a double check. 416fb578518SFranco Fichtner */ 417fb578518SFranco Fichtner static void 418fb578518SFranco Fichtner generic_set_tx_event(struct netmap_kring *kring, u_int hwcur) 419fb578518SFranco Fichtner { 420fb578518SFranco Fichtner struct mbuf *m; 421fb578518SFranco Fichtner u_int e; 422fb578518SFranco Fichtner 423fb578518SFranco Fichtner if (kring->nr_ntc == hwcur) { 424fb578518SFranco Fichtner return; 425fb578518SFranco Fichtner } 426fb578518SFranco Fichtner e = generic_tx_event_middle(kring, hwcur); 427fb578518SFranco Fichtner 428fb578518SFranco Fichtner m = kring->tx_pool[e]; 429fb578518SFranco Fichtner if (m == NULL) { 430fb578518SFranco Fichtner /* This can happen if there is already an event on the netmap 431fb578518SFranco Fichtner slot 'e': There is nothing to do. */ 432fb578518SFranco Fichtner return; 433fb578518SFranco Fichtner } 434fb578518SFranco Fichtner ND("Event at %d mbuf %p refcnt %d", e, m, GET_MBUF_REFCNT(m)); 435fb578518SFranco Fichtner kring->tx_pool[e] = NULL; 436fb578518SFranco Fichtner SET_MBUF_DESTRUCTOR(m, generic_mbuf_destructor); 437fb578518SFranco Fichtner 438fb578518SFranco Fichtner // XXX wmb() ? 439fb578518SFranco Fichtner /* Decrement the refcount an free it if we have the last one. */ 440fb578518SFranco Fichtner m_freem(m); 441fb578518SFranco Fichtner smp_mb(); 442fb578518SFranco Fichtner } 443fb578518SFranco Fichtner 444fb578518SFranco Fichtner 445fb578518SFranco Fichtner /* 446fb578518SFranco Fichtner * generic_netmap_txsync() transforms netmap buffers into mbufs 447fb578518SFranco Fichtner * and passes them to the standard device driver 448fb578518SFranco Fichtner * (ndo_start_xmit() or ifp->if_transmit() ). 449fb578518SFranco Fichtner * On linux this is not done directly, but using dev_queue_xmit(), 450fb578518SFranco Fichtner * since it implements the TX flow control (and takes some locks). 451fb578518SFranco Fichtner */ 452fb578518SFranco Fichtner static int 453fb578518SFranco Fichtner generic_netmap_txsync(struct netmap_adapter *na, u_int ring_nr, int flags) 454fb578518SFranco Fichtner { 455fb578518SFranco Fichtner struct ifnet *ifp = na->ifp; 456fb578518SFranco Fichtner struct netmap_kring *kring = &na->tx_rings[ring_nr]; 457fb578518SFranco Fichtner struct netmap_ring *ring = kring->ring; 458fb578518SFranco Fichtner u_int j, k, num_slots = kring->nkr_num_slots; 459fb578518SFranco Fichtner int new_slots, ntx; 460fb578518SFranco Fichtner 461fb578518SFranco Fichtner IFRATE(rate_ctx.new.txsync++); 462fb578518SFranco Fichtner 463fb578518SFranco Fichtner // TODO: handle the case of mbuf allocation failure 464fb578518SFranco Fichtner /* first, reclaim completed buffers */ 465fb578518SFranco Fichtner generic_netmap_tx_clean(kring); 466fb578518SFranco Fichtner 467fb578518SFranco Fichtner /* Take a copy of ring->cur now, and never read it again. */ 468fb578518SFranco Fichtner k = ring->cur; 469fb578518SFranco Fichtner if (unlikely(k >= num_slots)) { 470fb578518SFranco Fichtner return netmap_ring_reinit(kring); 471fb578518SFranco Fichtner } 472fb578518SFranco Fichtner 473fb578518SFranco Fichtner rmb(); 474fb578518SFranco Fichtner j = kring->nr_hwcur; 475fb578518SFranco Fichtner /* 476fb578518SFranco Fichtner * 'new_slots' counts how many new slots have been added: 477fb578518SFranco Fichtner * everything from hwcur to cur, excluding reserved ones, if any. 478fb578518SFranco Fichtner * nr_hwreserved start from hwcur and counts how many slots were 479fb578518SFranco Fichtner * not sent to the NIC from the previous round. 480fb578518SFranco Fichtner */ 481fb578518SFranco Fichtner new_slots = k - j - kring->nr_hwreserved; 482fb578518SFranco Fichtner if (new_slots < 0) { 483fb578518SFranco Fichtner new_slots += num_slots; 484fb578518SFranco Fichtner } 485fb578518SFranco Fichtner ntx = 0; 486fb578518SFranco Fichtner if (j != k) { 487fb578518SFranco Fichtner /* Process new packets to send: 488fb578518SFranco Fichtner * j is the current index in the netmap ring. 489fb578518SFranco Fichtner */ 490fb578518SFranco Fichtner while (j != k) { 491fb578518SFranco Fichtner struct netmap_slot *slot = &ring->slot[j]; /* Current slot in the netmap ring */ 492fb578518SFranco Fichtner void *addr = NMB(slot); 493fb578518SFranco Fichtner u_int len = slot->len; 494fb578518SFranco Fichtner struct mbuf *m; 495fb578518SFranco Fichtner int tx_ret; 496fb578518SFranco Fichtner 497fb578518SFranco Fichtner if (unlikely(addr == netmap_buffer_base || len > NETMAP_BUF_SIZE)) { 498fb578518SFranco Fichtner return netmap_ring_reinit(kring); 499fb578518SFranco Fichtner } 500fb578518SFranco Fichtner /* Tale a mbuf from the tx pool and copy in the user packet. */ 501fb578518SFranco Fichtner m = kring->tx_pool[j]; 502fb578518SFranco Fichtner if (unlikely(!m)) { 503fb578518SFranco Fichtner RD(5, "This should never happen"); 504fb578518SFranco Fichtner kring->tx_pool[j] = m = netmap_get_mbuf(GENERIC_BUF_SIZE); 505fb578518SFranco Fichtner if (unlikely(m == NULL)) { 506fb578518SFranco Fichtner D("mbuf allocation failed"); 507fb578518SFranco Fichtner break; 508fb578518SFranco Fichtner } 509fb578518SFranco Fichtner } 510fb578518SFranco Fichtner /* XXX we should ask notifications when NS_REPORT is set, 511fb578518SFranco Fichtner * or roughly every half frame. We can optimize this 512fb578518SFranco Fichtner * by lazily requesting notifications only when a 513fb578518SFranco Fichtner * transmission fails. Probably the best way is to 514fb578518SFranco Fichtner * break on failures and set notifications when 515fb578518SFranco Fichtner * ring->avail == 0 || j != k 516fb578518SFranco Fichtner */ 517fb578518SFranco Fichtner tx_ret = generic_xmit_frame(ifp, m, addr, len, ring_nr); 518fb578518SFranco Fichtner if (unlikely(tx_ret)) { 519fb578518SFranco Fichtner RD(5, "start_xmit failed: err %d [%u,%u,%u,%u]", 520fb578518SFranco Fichtner tx_ret, kring->nr_ntc, j, k, kring->nr_hwavail); 521fb578518SFranco Fichtner /* 522fb578518SFranco Fichtner * No room for this mbuf in the device driver. 523fb578518SFranco Fichtner * Request a notification FOR A PREVIOUS MBUF, 524fb578518SFranco Fichtner * then call generic_netmap_tx_clean(kring) to do the 525fb578518SFranco Fichtner * double check and see if we can free more buffers. 526fb578518SFranco Fichtner * If there is space continue, else break; 527fb578518SFranco Fichtner * NOTE: the double check is necessary if the problem 528fb578518SFranco Fichtner * occurs in the txsync call after selrecord(). 529fb578518SFranco Fichtner * Also, we need some way to tell the caller that not 530fb578518SFranco Fichtner * all buffers were queued onto the device (this was 531fb578518SFranco Fichtner * not a problem with native netmap driver where space 532fb578518SFranco Fichtner * is preallocated). The bridge has a similar problem 533fb578518SFranco Fichtner * and we solve it there by dropping the excess packets. 534fb578518SFranco Fichtner */ 535fb578518SFranco Fichtner generic_set_tx_event(kring, j); 536fb578518SFranco Fichtner if (generic_netmap_tx_clean(kring)) { /* space now available */ 537fb578518SFranco Fichtner continue; 538fb578518SFranco Fichtner } else { 539fb578518SFranco Fichtner break; 540fb578518SFranco Fichtner } 541fb578518SFranco Fichtner } 542fb578518SFranco Fichtner slot->flags &= ~(NS_REPORT | NS_BUF_CHANGED); 543fb578518SFranco Fichtner if (unlikely(++j == num_slots)) 544fb578518SFranco Fichtner j = 0; 545fb578518SFranco Fichtner ntx++; 546fb578518SFranco Fichtner } 547fb578518SFranco Fichtner 548fb578518SFranco Fichtner /* Update hwcur to the next slot to transmit. */ 549fb578518SFranco Fichtner kring->nr_hwcur = j; 550fb578518SFranco Fichtner 551fb578518SFranco Fichtner /* 552fb578518SFranco Fichtner * Report all new slots as unavailable, even those not sent. 553fb578518SFranco Fichtner * We account for them with with hwreserved, so that 554fb578518SFranco Fichtner * nr_hwreserved =:= cur - nr_hwcur 555fb578518SFranco Fichtner */ 556fb578518SFranco Fichtner kring->nr_hwavail -= new_slots; 557fb578518SFranco Fichtner kring->nr_hwreserved = k - j; 558fb578518SFranco Fichtner if (kring->nr_hwreserved < 0) { 559fb578518SFranco Fichtner kring->nr_hwreserved += num_slots; 560fb578518SFranco Fichtner } 561fb578518SFranco Fichtner 562fb578518SFranco Fichtner IFRATE(rate_ctx.new.txpkt += ntx); 563fb578518SFranco Fichtner 564fb578518SFranco Fichtner if (!kring->nr_hwavail) { 565fb578518SFranco Fichtner /* No more available slots? Set a notification event 566fb578518SFranco Fichtner * on a netmap slot that will be cleaned in the future. 567fb578518SFranco Fichtner * No doublecheck is performed, since txsync() will be 568fb578518SFranco Fichtner * called twice by netmap_poll(). 569fb578518SFranco Fichtner */ 570fb578518SFranco Fichtner generic_set_tx_event(kring, j); 571fb578518SFranco Fichtner } 572fb578518SFranco Fichtner ND("tx #%d, hwavail = %d", n, kring->nr_hwavail); 573fb578518SFranco Fichtner } 574fb578518SFranco Fichtner 575fb578518SFranco Fichtner /* Synchronize the user's view to the kernel view. */ 576fb578518SFranco Fichtner ring->avail = kring->nr_hwavail; 577fb578518SFranco Fichtner ring->reserved = kring->nr_hwreserved; 578fb578518SFranco Fichtner 579fb578518SFranco Fichtner return 0; 580fb578518SFranco Fichtner } 581fb578518SFranco Fichtner 582fb578518SFranco Fichtner /* 583fb578518SFranco Fichtner * This handler is registered (through netmap_catch_rx()) 584fb578518SFranco Fichtner * within the attached network interface 585fb578518SFranco Fichtner * in the RX subsystem, so that every mbuf passed up by 586fb578518SFranco Fichtner * the driver can be stolen to the network stack. 587fb578518SFranco Fichtner * Stolen packets are put in a queue where the 588fb578518SFranco Fichtner * generic_netmap_rxsync() callback can extract them. 589fb578518SFranco Fichtner */ 590fb578518SFranco Fichtner void generic_rx_handler(struct ifnet *ifp, struct mbuf *m) 591fb578518SFranco Fichtner { 592fb578518SFranco Fichtner struct netmap_adapter *na = NA(ifp); 593fb578518SFranco Fichtner struct netmap_generic_adapter *gna = (struct netmap_generic_adapter *)na; 594fb578518SFranco Fichtner u_int work_done; 595fb578518SFranco Fichtner u_int rr = 0; // receive ring number 596fb578518SFranco Fichtner 597fb578518SFranco Fichtner ND("called"); 598fb578518SFranco Fichtner /* limit the size of the queue */ 599fb578518SFranco Fichtner if (unlikely(mbq_len(&na->rx_rings[rr].rx_queue) > 1024)) { 600fb578518SFranco Fichtner m_freem(m); 601fb578518SFranco Fichtner } else { 602fb578518SFranco Fichtner mbq_safe_enqueue(&na->rx_rings[rr].rx_queue, m); 603fb578518SFranco Fichtner } 604fb578518SFranco Fichtner 605fb578518SFranco Fichtner if (netmap_generic_mit < 32768) { 606fb578518SFranco Fichtner /* no rx mitigation, pass notification up */ 607fb578518SFranco Fichtner netmap_generic_irq(na->ifp, rr, &work_done); 608fb578518SFranco Fichtner IFRATE(rate_ctx.new.rxirq++); 609fb578518SFranco Fichtner } else { 610fb578518SFranco Fichtner /* same as send combining, filter notification if there is a 611fb578518SFranco Fichtner * pending timer, otherwise pass it up and start a timer. 612fb578518SFranco Fichtner */ 613fb578518SFranco Fichtner if (likely(netmap_mitigation_active(gna))) { 614fb578518SFranco Fichtner /* Record that there is some pending work. */ 615fb578518SFranco Fichtner gna->mit_pending = 1; 616fb578518SFranco Fichtner } else { 617fb578518SFranco Fichtner netmap_generic_irq(na->ifp, rr, &work_done); 618fb578518SFranco Fichtner IFRATE(rate_ctx.new.rxirq++); 619fb578518SFranco Fichtner netmap_mitigation_start(gna); 620fb578518SFranco Fichtner } 621fb578518SFranco Fichtner } 622fb578518SFranco Fichtner } 623fb578518SFranco Fichtner 624fb578518SFranco Fichtner /* 625fb578518SFranco Fichtner * generic_netmap_rxsync() extracts mbufs from the queue filled by 626fb578518SFranco Fichtner * generic_netmap_rx_handler() and puts their content in the netmap 627fb578518SFranco Fichtner * receive ring. 628fb578518SFranco Fichtner * Access must be protected because the rx handler is asynchronous, 629fb578518SFranco Fichtner */ 630fb578518SFranco Fichtner static int 631fb578518SFranco Fichtner generic_netmap_rxsync(struct netmap_adapter *na, u_int ring_nr, int flags) 632fb578518SFranco Fichtner { 633fb578518SFranco Fichtner struct netmap_kring *kring = &na->rx_rings[ring_nr]; 634fb578518SFranco Fichtner struct netmap_ring *ring = kring->ring; 635fb578518SFranco Fichtner u_int j, n, lim = kring->nkr_num_slots - 1; 636fb578518SFranco Fichtner int force_update = (flags & NAF_FORCE_READ) || kring->nr_kflags & NKR_PENDINTR; 637fb578518SFranco Fichtner u_int k, resvd = ring->reserved; 638fb578518SFranco Fichtner 639fb578518SFranco Fichtner if (ring->cur > lim) 640fb578518SFranco Fichtner return netmap_ring_reinit(kring); 641fb578518SFranco Fichtner 642fb578518SFranco Fichtner /* Import newly received packets into the netmap ring. */ 643fb578518SFranco Fichtner if (netmap_no_pendintr || force_update) { 644fb578518SFranco Fichtner uint16_t slot_flags = kring->nkr_slot_flags; 645fb578518SFranco Fichtner struct mbuf *m; 646fb578518SFranco Fichtner 647fb578518SFranco Fichtner n = 0; 648fb578518SFranco Fichtner j = kring->nr_ntc; /* first empty slot in the receive ring */ 649fb578518SFranco Fichtner /* extract buffers from the rx queue, stop at most one 650fb578518SFranco Fichtner * slot before nr_hwcur (index k) 651fb578518SFranco Fichtner */ 652fb578518SFranco Fichtner k = (kring->nr_hwcur) ? kring->nr_hwcur-1 : lim; 653fb578518SFranco Fichtner while (j != k) { 654fb578518SFranco Fichtner int len; 655fb578518SFranco Fichtner void *addr = NMB(&ring->slot[j]); 656fb578518SFranco Fichtner 657fb578518SFranco Fichtner if (addr == netmap_buffer_base) { /* Bad buffer */ 658fb578518SFranco Fichtner return netmap_ring_reinit(kring); 659fb578518SFranco Fichtner } 660fb578518SFranco Fichtner /* 661fb578518SFranco Fichtner * Call the locked version of the function. 662fb578518SFranco Fichtner * XXX Ideally we could grab a batch of mbufs at once, 663fb578518SFranco Fichtner * by changing rx_queue into a ring. 664fb578518SFranco Fichtner */ 665fb578518SFranco Fichtner m = mbq_safe_dequeue(&kring->rx_queue); 666fb578518SFranco Fichtner if (!m) 667fb578518SFranco Fichtner break; 668fb578518SFranco Fichtner len = MBUF_LEN(m); 669fb578518SFranco Fichtner m_copydata(m, 0, len, addr); 670fb578518SFranco Fichtner ring->slot[j].len = len; 671fb578518SFranco Fichtner ring->slot[j].flags = slot_flags; 672fb578518SFranco Fichtner m_freem(m); 673fb578518SFranco Fichtner if (unlikely(j++ == lim)) 674fb578518SFranco Fichtner j = 0; 675fb578518SFranco Fichtner n++; 676fb578518SFranco Fichtner } 677fb578518SFranco Fichtner if (n) { 678fb578518SFranco Fichtner kring->nr_ntc = j; 679fb578518SFranco Fichtner kring->nr_hwavail += n; 680fb578518SFranco Fichtner IFRATE(rate_ctx.new.rxpkt += n); 681fb578518SFranco Fichtner } 682fb578518SFranco Fichtner kring->nr_kflags &= ~NKR_PENDINTR; 683fb578518SFranco Fichtner } 684fb578518SFranco Fichtner 685fb578518SFranco Fichtner // XXX should we invert the order ? 686fb578518SFranco Fichtner /* Skip past packets that userspace has released */ 687fb578518SFranco Fichtner j = kring->nr_hwcur; 688fb578518SFranco Fichtner k = ring->cur; 689fb578518SFranco Fichtner if (resvd > 0) { 690fb578518SFranco Fichtner if (resvd + ring->avail >= lim + 1) { 691fb578518SFranco Fichtner D("XXX invalid reserve/avail %d %d", resvd, ring->avail); 692fb578518SFranco Fichtner ring->reserved = resvd = 0; // XXX panic... 693fb578518SFranco Fichtner } 694fb578518SFranco Fichtner k = (k >= resvd) ? k - resvd : k + lim + 1 - resvd; 695fb578518SFranco Fichtner } 696fb578518SFranco Fichtner if (j != k) { 697fb578518SFranco Fichtner /* Userspace has released some packets. */ 698fb578518SFranco Fichtner for (n = 0; j != k; n++) { 699fb578518SFranco Fichtner struct netmap_slot *slot = &ring->slot[j]; 700fb578518SFranco Fichtner 701fb578518SFranco Fichtner slot->flags &= ~NS_BUF_CHANGED; 702fb578518SFranco Fichtner if (unlikely(j++ == lim)) 703fb578518SFranco Fichtner j = 0; 704fb578518SFranco Fichtner } 705fb578518SFranco Fichtner kring->nr_hwavail -= n; 706fb578518SFranco Fichtner kring->nr_hwcur = k; 707fb578518SFranco Fichtner } 708fb578518SFranco Fichtner /* Tell userspace that there are new packets. */ 709fb578518SFranco Fichtner ring->avail = kring->nr_hwavail - resvd; 710fb578518SFranco Fichtner IFRATE(rate_ctx.new.rxsync++); 711fb578518SFranco Fichtner 712fb578518SFranco Fichtner return 0; 713fb578518SFranco Fichtner } 714fb578518SFranco Fichtner 715fb578518SFranco Fichtner static void 716fb578518SFranco Fichtner generic_netmap_dtor(struct netmap_adapter *na) 717fb578518SFranco Fichtner { 718fb578518SFranco Fichtner struct ifnet *ifp = na->ifp; 719fb578518SFranco Fichtner struct netmap_generic_adapter *gna = (struct netmap_generic_adapter*)na; 720fb578518SFranco Fichtner struct netmap_adapter *prev_na = gna->prev; 721fb578518SFranco Fichtner 722fb578518SFranco Fichtner if (prev_na != NULL) { 723fb578518SFranco Fichtner D("Released generic NA %p", gna); 724fb578518SFranco Fichtner if_rele(na->ifp); 725fb578518SFranco Fichtner netmap_adapter_put(prev_na); 726fb578518SFranco Fichtner } 727fb578518SFranco Fichtner if (ifp != NULL) { 728fb578518SFranco Fichtner WNA(ifp) = prev_na; 729fb578518SFranco Fichtner D("Restored native NA %p", prev_na); 730fb578518SFranco Fichtner na->ifp = NULL; 731fb578518SFranco Fichtner } 732fb578518SFranco Fichtner } 733fb578518SFranco Fichtner 734fb578518SFranco Fichtner /* 735fb578518SFranco Fichtner * generic_netmap_attach() makes it possible to use netmap on 736fb578518SFranco Fichtner * a device without native netmap support. 737fb578518SFranco Fichtner * This is less performant than native support but potentially 738fb578518SFranco Fichtner * faster than raw sockets or similar schemes. 739fb578518SFranco Fichtner * 740fb578518SFranco Fichtner * In this "emulated" mode, netmap rings do not necessarily 741fb578518SFranco Fichtner * have the same size as those in the NIC. We use a default 742fb578518SFranco Fichtner * value and possibly override it if the OS has ways to fetch the 743fb578518SFranco Fichtner * actual configuration. 744fb578518SFranco Fichtner */ 745fb578518SFranco Fichtner int 746fb578518SFranco Fichtner generic_netmap_attach(struct ifnet *ifp) 747fb578518SFranco Fichtner { 748fb578518SFranco Fichtner struct netmap_adapter *na; 749fb578518SFranco Fichtner struct netmap_generic_adapter *gna; 750fb578518SFranco Fichtner int retval; 751fb578518SFranco Fichtner u_int num_tx_desc, num_rx_desc; 752fb578518SFranco Fichtner 753fb578518SFranco Fichtner num_tx_desc = num_rx_desc = netmap_generic_ringsize; /* starting point */ 754fb578518SFranco Fichtner 755fb578518SFranco Fichtner generic_find_num_desc(ifp, &num_tx_desc, &num_rx_desc); 756fb578518SFranco Fichtner ND("Netmap ring size: TX = %d, RX = %d", num_tx_desc, num_rx_desc); 757fb578518SFranco Fichtner 758*ed9bd855SFranco Fichtner gna = kmalloc(sizeof(*gna), M_DEVBUF, M_NOWAIT | M_ZERO); 759fb578518SFranco Fichtner if (gna == NULL) { 760fb578518SFranco Fichtner D("no memory on attach, give up"); 761fb578518SFranco Fichtner return ENOMEM; 762fb578518SFranco Fichtner } 763fb578518SFranco Fichtner na = (struct netmap_adapter *)gna; 764fb578518SFranco Fichtner na->ifp = ifp; 765fb578518SFranco Fichtner na->num_tx_desc = num_tx_desc; 766fb578518SFranco Fichtner na->num_rx_desc = num_rx_desc; 767fb578518SFranco Fichtner na->nm_register = &generic_netmap_register; 768fb578518SFranco Fichtner na->nm_txsync = &generic_netmap_txsync; 769fb578518SFranco Fichtner na->nm_rxsync = &generic_netmap_rxsync; 770fb578518SFranco Fichtner na->nm_dtor = &generic_netmap_dtor; 771fb578518SFranco Fichtner /* when using generic, IFCAP_NETMAP is set so we force 772fb578518SFranco Fichtner * NAF_SKIP_INTR to use the regular interrupt handler 773fb578518SFranco Fichtner */ 774fb578518SFranco Fichtner na->na_flags = NAF_SKIP_INTR; 775fb578518SFranco Fichtner 776fb578518SFranco Fichtner ND("[GNA] num_tx_queues(%d), real_num_tx_queues(%d), len(%lu)", 777fb578518SFranco Fichtner ifp->num_tx_queues, ifp->real_num_tx_queues, 778fb578518SFranco Fichtner ifp->tx_queue_len); 779fb578518SFranco Fichtner ND("[GNA] num_rx_queues(%d), real_num_rx_queues(%d)", 780fb578518SFranco Fichtner ifp->num_rx_queues, ifp->real_num_rx_queues); 781fb578518SFranco Fichtner 782fb578518SFranco Fichtner generic_find_num_queues(ifp, &na->num_tx_rings, &na->num_rx_rings); 783fb578518SFranco Fichtner 784fb578518SFranco Fichtner retval = netmap_attach_common(na); 785fb578518SFranco Fichtner if (retval) { 786*ed9bd855SFranco Fichtner kfree(gna, M_DEVBUF); 787fb578518SFranco Fichtner } 788fb578518SFranco Fichtner 789fb578518SFranco Fichtner return retval; 790fb578518SFranco Fichtner } 791