1fb578518SFranco Fichtner /* 2fb578518SFranco Fichtner * Copyright (C) 2013 Universita` di Pisa. All rights reserved. 3fb578518SFranco Fichtner * 4fb578518SFranco Fichtner * Redistribution and use in source and binary forms, with or without 5fb578518SFranco Fichtner * modification, are permitted provided that the following conditions 6fb578518SFranco Fichtner * are met: 7fb578518SFranco Fichtner * 1. Redistributions of source code must retain the above copyright 8fb578518SFranco Fichtner * notice, this list of conditions and the following disclaimer. 9fb578518SFranco Fichtner * 2. Redistributions in binary form must reproduce the above copyright 10fb578518SFranco Fichtner * notice, this list of conditions and the following disclaimer in the 11fb578518SFranco Fichtner * documentation and/or other materials provided with the distribution. 12fb578518SFranco Fichtner * 13fb578518SFranco Fichtner * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 14fb578518SFranco Fichtner * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15fb578518SFranco Fichtner * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 16fb578518SFranco Fichtner * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 17fb578518SFranco Fichtner * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 18fb578518SFranco Fichtner * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 19fb578518SFranco Fichtner * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 20fb578518SFranco Fichtner * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 21fb578518SFranco Fichtner * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 22fb578518SFranco Fichtner * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 23fb578518SFranco Fichtner * SUCH DAMAGE. 24fb578518SFranco Fichtner */ 25fb578518SFranco Fichtner 26fb578518SFranco Fichtner /* 27fb578518SFranco Fichtner * This module implements netmap support on top of standard, 28fb578518SFranco Fichtner * unmodified device drivers. 29fb578518SFranco Fichtner * 30fb578518SFranco Fichtner * A NIOCREGIF request is handled here if the device does not 31fb578518SFranco Fichtner * have native support. TX and RX rings are emulated as follows: 32fb578518SFranco Fichtner * 33fb578518SFranco Fichtner * NIOCREGIF 34fb578518SFranco Fichtner * We preallocate a block of TX mbufs (roughly as many as 35fb578518SFranco Fichtner * tx descriptors; the number is not critical) to speed up 36fb578518SFranco Fichtner * operation during transmissions. The refcount on most of 37fb578518SFranco Fichtner * these buffers is artificially bumped up so we can recycle 38fb578518SFranco Fichtner * them more easily. Also, the destructor is intercepted 39fb578518SFranco Fichtner * so we use it as an interrupt notification to wake up 40fb578518SFranco Fichtner * processes blocked on a poll(). 41fb578518SFranco Fichtner * 42fb578518SFranco Fichtner * For each receive ring we allocate one "struct mbq" 43fb578518SFranco Fichtner * (an mbuf tailq plus a spinlock). We intercept packets 44fb578518SFranco Fichtner * (through if_input) 45fb578518SFranco Fichtner * on the receive path and put them in the mbq from which 46fb578518SFranco Fichtner * netmap receive routines can grab them. 47fb578518SFranco Fichtner * 48fb578518SFranco Fichtner * TX: 49fb578518SFranco Fichtner * in the generic_txsync() routine, netmap buffers are copied 50fb578518SFranco Fichtner * (or linked, in a future) to the preallocated mbufs 51fb578518SFranco Fichtner * and pushed to the transmit queue. Some of these mbufs 52fb578518SFranco Fichtner * (those with NS_REPORT, or otherwise every half ring) 53fb578518SFranco Fichtner * have the refcount=1, others have refcount=2. 54fb578518SFranco Fichtner * When the destructor is invoked, we take that as 55fb578518SFranco Fichtner * a notification that all mbufs up to that one in 56fb578518SFranco Fichtner * the specific ring have been completed, and generate 57fb578518SFranco Fichtner * the equivalent of a transmit interrupt. 58fb578518SFranco Fichtner * 59fb578518SFranco Fichtner * RX: 60fb578518SFranco Fichtner * 61fb578518SFranco Fichtner */ 62fb578518SFranco Fichtner 63fb578518SFranco Fichtner 64*785c7ee6SFranco Fichtner #include <sys/cdefs.h> /* prerequisite */ 65*785c7ee6SFranco Fichtner __FBSDID("$FreeBSD: head/sys/dev/netmap/netmap.c 257666 2013-11-05 01:06:22Z luigi $"); 66*785c7ee6SFranco Fichtner 67fb578518SFranco Fichtner #include <sys/types.h> 68fb578518SFranco Fichtner #include <sys/errno.h> 69fb578518SFranco Fichtner #include <sys/malloc.h> 70fb578518SFranco Fichtner #include <sys/lock.h> /* PROT_EXEC */ 71fb578518SFranco Fichtner #include <sys/socket.h> /* sockaddrs */ 7213431b3eSFranco Fichtner #include <sys/event.h> 73fb578518SFranco Fichtner #include <net/if.h> 74fb578518SFranco Fichtner #include <net/if_var.h> 75ed9bd855SFranco Fichtner #include <sys/bus.h> /* bus_dmamap_* in netmap_kern.h */ 76fb578518SFranco Fichtner 77fb578518SFranco Fichtner // XXX temporary - D() defined here 78fb578518SFranco Fichtner #include <net/netmap.h> 79b3f97fadSFranco Fichtner #include <net/netmap/netmap_kern.h> 80b3f97fadSFranco Fichtner #include <net/netmap/netmap_mem2.h> 81fb578518SFranco Fichtner 82fb578518SFranco Fichtner #define rtnl_lock() D("rtnl_lock called"); 83fb578518SFranco Fichtner #define rtnl_unlock() D("rtnl_lock called"); 84bf9f7c16SFranco Fichtner #define MBUF_TXQ(m) ((m)->m_pkthdr.hash) 85fb578518SFranco Fichtner #define smp_mb() 86fb578518SFranco Fichtner 87fb578518SFranco Fichtner /* 88fb578518SFranco Fichtner * mbuf wrappers 89fb578518SFranco Fichtner */ 90fb578518SFranco Fichtner 91fb578518SFranco Fichtner /* 92fb578518SFranco Fichtner * we allocate an EXT_PACKET 93fb578518SFranco Fichtner */ 94bf9f7c16SFranco Fichtner #define netmap_get_mbuf(len) m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR) 95fb578518SFranco Fichtner 96fb578518SFranco Fichtner /* mbuf destructor, also need to change the type to EXT_EXTREF, 97fb578518SFranco Fichtner * add an M_NOFREE flag, and then clear the flag and 98fb578518SFranco Fichtner * chain into uma_zfree(zone_pack, mf) 99fb578518SFranco Fichtner * (or reinstall the buffer ?) 100fb578518SFranco Fichtner */ 101fb578518SFranco Fichtner #define SET_MBUF_DESTRUCTOR(m, fn) do { \ 102fb578518SFranco Fichtner (m)->m_ext.ext_free = (void *)fn; \ 103bf9f7c16SFranco Fichtner /* (m)->m_ext.ext_type = EXT_EXTREF; */ \ 104fb578518SFranco Fichtner } while (0) 105fb578518SFranco Fichtner 106fb578518SFranco Fichtner 107fb578518SFranco Fichtner #define GET_MBUF_REFCNT(m) ((m)->m_ext.ref_cnt ? *(m)->m_ext.ref_cnt : -1) 108fb578518SFranco Fichtner 109fb578518SFranco Fichtner /* ======================== usage stats =========================== */ 110fb578518SFranco Fichtner 111fb578518SFranco Fichtner #ifdef RATE 112fb578518SFranco Fichtner #define IFRATE(x) x 113fb578518SFranco Fichtner struct rate_stats { 114fb578518SFranco Fichtner unsigned long txpkt; 115fb578518SFranco Fichtner unsigned long txsync; 116fb578518SFranco Fichtner unsigned long txirq; 117fb578518SFranco Fichtner unsigned long rxpkt; 118fb578518SFranco Fichtner unsigned long rxirq; 119fb578518SFranco Fichtner unsigned long rxsync; 120fb578518SFranco Fichtner }; 121fb578518SFranco Fichtner 122fb578518SFranco Fichtner struct rate_context { 123fb578518SFranco Fichtner unsigned refcount; 124fb578518SFranco Fichtner struct timer_list timer; 125fb578518SFranco Fichtner struct rate_stats new; 126fb578518SFranco Fichtner struct rate_stats old; 127fb578518SFranco Fichtner }; 128fb578518SFranco Fichtner 129fb578518SFranco Fichtner #define RATE_PRINTK(_NAME_) \ 130fb578518SFranco Fichtner printk( #_NAME_ " = %lu Hz\n", (cur._NAME_ - ctx->old._NAME_)/RATE_PERIOD); 131fb578518SFranco Fichtner #define RATE_PERIOD 2 132fb578518SFranco Fichtner static void rate_callback(unsigned long arg) 133fb578518SFranco Fichtner { 134fb578518SFranco Fichtner struct rate_context * ctx = (struct rate_context *)arg; 135fb578518SFranco Fichtner struct rate_stats cur = ctx->new; 136fb578518SFranco Fichtner int r; 137fb578518SFranco Fichtner 138fb578518SFranco Fichtner RATE_PRINTK(txpkt); 139fb578518SFranco Fichtner RATE_PRINTK(txsync); 140fb578518SFranco Fichtner RATE_PRINTK(txirq); 141fb578518SFranco Fichtner RATE_PRINTK(rxpkt); 142fb578518SFranco Fichtner RATE_PRINTK(rxsync); 143fb578518SFranco Fichtner RATE_PRINTK(rxirq); 144fb578518SFranco Fichtner printk("\n"); 145fb578518SFranco Fichtner 146fb578518SFranco Fichtner ctx->old = cur; 147fb578518SFranco Fichtner r = mod_timer(&ctx->timer, jiffies + 148fb578518SFranco Fichtner msecs_to_jiffies(RATE_PERIOD * 1000)); 149fb578518SFranco Fichtner if (unlikely(r)) 150fb578518SFranco Fichtner D("[v1000] Error: mod_timer()"); 151fb578518SFranco Fichtner } 152fb578518SFranco Fichtner 153fb578518SFranco Fichtner static struct rate_context rate_ctx; 154fb578518SFranco Fichtner 155fb578518SFranco Fichtner #else /* !RATE */ 156fb578518SFranco Fichtner #define IFRATE(x) 157fb578518SFranco Fichtner #endif /* !RATE */ 158fb578518SFranco Fichtner 159fb578518SFranco Fichtner 160fb578518SFranco Fichtner /* =============== GENERIC NETMAP ADAPTER SUPPORT ================= */ 161fb578518SFranco Fichtner #define GENERIC_BUF_SIZE netmap_buf_size /* Size of the mbufs in the Tx pool. */ 162fb578518SFranco Fichtner 163fb578518SFranco Fichtner /* 164fb578518SFranco Fichtner * Wrapper used by the generic adapter layer to notify 165fb578518SFranco Fichtner * the poller threads. Differently from netmap_rx_irq(), we check 166fb578518SFranco Fichtner * only IFCAP_NETMAP instead of NAF_NATIVE_ON to enable the irq. 167fb578518SFranco Fichtner */ 168fb578518SFranco Fichtner static int 169fb578518SFranco Fichtner netmap_generic_irq(struct ifnet *ifp, u_int q, u_int *work_done) 170fb578518SFranco Fichtner { 171fb578518SFranco Fichtner if (unlikely(!(ifp->if_capenable & IFCAP_NETMAP))) 172fb578518SFranco Fichtner return 0; 173fb578518SFranco Fichtner 174fb578518SFranco Fichtner return netmap_common_irq(ifp, q, work_done); 175fb578518SFranco Fichtner } 176fb578518SFranco Fichtner 177fb578518SFranco Fichtner 178fb578518SFranco Fichtner /* Enable/disable netmap mode for a generic network interface. */ 179fb578518SFranco Fichtner int generic_netmap_register(struct netmap_adapter *na, int enable) 180fb578518SFranco Fichtner { 181fb578518SFranco Fichtner struct ifnet *ifp = na->ifp; 182fb578518SFranco Fichtner struct netmap_generic_adapter *gna = (struct netmap_generic_adapter *)na; 183fb578518SFranco Fichtner struct mbuf *m; 184fb578518SFranco Fichtner int error; 185fb578518SFranco Fichtner int i, r; 186fb578518SFranco Fichtner 187fb578518SFranco Fichtner if (!na) 188fb578518SFranco Fichtner return EINVAL; 189fb578518SFranco Fichtner 190fb578518SFranco Fichtner #ifdef REG_RESET 191fb578518SFranco Fichtner error = ifp->netdev_ops->ndo_stop(ifp); 192fb578518SFranco Fichtner if (error) { 193fb578518SFranco Fichtner return error; 194fb578518SFranco Fichtner } 195fb578518SFranco Fichtner #endif /* REG_RESET */ 196fb578518SFranco Fichtner 197fb578518SFranco Fichtner if (enable) { /* Enable netmap mode. */ 198fb578518SFranco Fichtner /* Initialize the rx queue, as generic_rx_handler() can 199fb578518SFranco Fichtner * be called as soon as netmap_catch_rx() returns. 200fb578518SFranco Fichtner */ 201fb578518SFranco Fichtner for (r=0; r<na->num_rx_rings; r++) { 202fb578518SFranco Fichtner mbq_safe_init(&na->rx_rings[r].rx_queue); 203fb578518SFranco Fichtner na->rx_rings[r].nr_ntc = 0; 204fb578518SFranco Fichtner } 205fb578518SFranco Fichtner 206fb578518SFranco Fichtner /* Init the mitigation timer. */ 207fb578518SFranco Fichtner netmap_mitigation_init(gna); 208fb578518SFranco Fichtner 209fb578518SFranco Fichtner /* 210fb578518SFranco Fichtner * Preallocate packet buffers for the tx rings. 211fb578518SFranco Fichtner */ 212fb578518SFranco Fichtner for (r=0; r<na->num_tx_rings; r++) { 213fb578518SFranco Fichtner na->tx_rings[r].nr_ntc = 0; 214ed9bd855SFranco Fichtner na->tx_rings[r].tx_pool = kmalloc(na->num_tx_desc * sizeof(struct mbuf *), 215fb578518SFranco Fichtner M_DEVBUF, M_NOWAIT | M_ZERO); 216fb578518SFranco Fichtner if (!na->tx_rings[r].tx_pool) { 217fb578518SFranco Fichtner D("tx_pool allocation failed"); 218fb578518SFranco Fichtner error = ENOMEM; 219fb578518SFranco Fichtner goto free_tx_pool; 220fb578518SFranco Fichtner } 221fb578518SFranco Fichtner for (i=0; i<na->num_tx_desc; i++) { 222fb578518SFranco Fichtner m = netmap_get_mbuf(GENERIC_BUF_SIZE); 223fb578518SFranco Fichtner if (!m) { 224fb578518SFranco Fichtner D("tx_pool[%d] allocation failed", i); 225fb578518SFranco Fichtner error = ENOMEM; 226fb578518SFranco Fichtner goto free_mbufs; 227fb578518SFranco Fichtner } 228fb578518SFranco Fichtner na->tx_rings[r].tx_pool[i] = m; 229fb578518SFranco Fichtner } 230fb578518SFranco Fichtner } 231fb578518SFranco Fichtner rtnl_lock(); 232fb578518SFranco Fichtner /* Prepare to intercept incoming traffic. */ 233fb578518SFranco Fichtner error = netmap_catch_rx(na, 1); 234fb578518SFranco Fichtner if (error) { 235fb578518SFranco Fichtner D("netdev_rx_handler_register() failed"); 236fb578518SFranco Fichtner goto register_handler; 237fb578518SFranco Fichtner } 238fb578518SFranco Fichtner ifp->if_capenable |= IFCAP_NETMAP; 239fb578518SFranco Fichtner 240fb578518SFranco Fichtner /* Make netmap control the packet steering. */ 241fb578518SFranco Fichtner netmap_catch_packet_steering(gna, 1); 242fb578518SFranco Fichtner 243fb578518SFranco Fichtner rtnl_unlock(); 244fb578518SFranco Fichtner 245fb578518SFranco Fichtner #ifdef RATE 246fb578518SFranco Fichtner if (rate_ctx.refcount == 0) { 247fb578518SFranco Fichtner D("setup_timer()"); 248fb578518SFranco Fichtner memset(&rate_ctx, 0, sizeof(rate_ctx)); 249fb578518SFranco Fichtner setup_timer(&rate_ctx.timer, &rate_callback, (unsigned long)&rate_ctx); 250fb578518SFranco Fichtner if (mod_timer(&rate_ctx.timer, jiffies + msecs_to_jiffies(1500))) { 251fb578518SFranco Fichtner D("Error: mod_timer()"); 252fb578518SFranco Fichtner } 253fb578518SFranco Fichtner } 254fb578518SFranco Fichtner rate_ctx.refcount++; 255fb578518SFranco Fichtner #endif /* RATE */ 256fb578518SFranco Fichtner 257fb578518SFranco Fichtner } else { /* Disable netmap mode. */ 258fb578518SFranco Fichtner rtnl_lock(); 259fb578518SFranco Fichtner 260fb578518SFranco Fichtner ifp->if_capenable &= ~IFCAP_NETMAP; 261fb578518SFranco Fichtner 262fb578518SFranco Fichtner /* Release packet steering control. */ 263fb578518SFranco Fichtner netmap_catch_packet_steering(gna, 0); 264fb578518SFranco Fichtner 265fb578518SFranco Fichtner /* Do not intercept packets on the rx path. */ 266fb578518SFranco Fichtner netmap_catch_rx(na, 0); 267fb578518SFranco Fichtner 268fb578518SFranco Fichtner rtnl_unlock(); 269fb578518SFranco Fichtner 270fb578518SFranco Fichtner /* Free the mbufs going to the netmap rings */ 271fb578518SFranco Fichtner for (r=0; r<na->num_rx_rings; r++) { 272fb578518SFranco Fichtner mbq_safe_purge(&na->rx_rings[r].rx_queue); 273fb578518SFranco Fichtner mbq_safe_destroy(&na->rx_rings[r].rx_queue); 274fb578518SFranco Fichtner } 275fb578518SFranco Fichtner 276fb578518SFranco Fichtner netmap_mitigation_cleanup(gna); 277fb578518SFranco Fichtner 278fb578518SFranco Fichtner for (r=0; r<na->num_tx_rings; r++) { 279fb578518SFranco Fichtner for (i=0; i<na->num_tx_desc; i++) { 280fb578518SFranco Fichtner m_freem(na->tx_rings[r].tx_pool[i]); 281fb578518SFranco Fichtner } 282ed9bd855SFranco Fichtner kfree(na->tx_rings[r].tx_pool, M_DEVBUF); 283fb578518SFranco Fichtner } 284fb578518SFranco Fichtner 285fb578518SFranco Fichtner #ifdef RATE 286fb578518SFranco Fichtner if (--rate_ctx.refcount == 0) { 287fb578518SFranco Fichtner D("del_timer()"); 288fb578518SFranco Fichtner del_timer(&rate_ctx.timer); 289fb578518SFranco Fichtner } 290fb578518SFranco Fichtner #endif 291fb578518SFranco Fichtner } 292fb578518SFranco Fichtner 293fb578518SFranco Fichtner #ifdef REG_RESET 294fb578518SFranco Fichtner error = ifp->netdev_ops->ndo_open(ifp); 295fb578518SFranco Fichtner if (error) { 296fb578518SFranco Fichtner goto alloc_tx_pool; 297fb578518SFranco Fichtner } 298fb578518SFranco Fichtner #endif 299fb578518SFranco Fichtner 300fb578518SFranco Fichtner return 0; 301fb578518SFranco Fichtner 302fb578518SFranco Fichtner register_handler: 303fb578518SFranco Fichtner rtnl_unlock(); 304fb578518SFranco Fichtner free_tx_pool: 305fb578518SFranco Fichtner r--; 306fb578518SFranco Fichtner i = na->num_tx_desc; /* Useless, but just to stay safe. */ 307fb578518SFranco Fichtner free_mbufs: 308fb578518SFranco Fichtner i--; 309fb578518SFranco Fichtner for (; r>=0; r--) { 310fb578518SFranco Fichtner for (; i>=0; i--) { 311fb578518SFranco Fichtner m_freem(na->tx_rings[r].tx_pool[i]); 312fb578518SFranco Fichtner } 313ed9bd855SFranco Fichtner kfree(na->tx_rings[r].tx_pool, M_DEVBUF); 314fb578518SFranco Fichtner i = na->num_tx_desc - 1; 315fb578518SFranco Fichtner } 316fb578518SFranco Fichtner 317fb578518SFranco Fichtner return error; 318fb578518SFranco Fichtner } 319fb578518SFranco Fichtner 320fb578518SFranco Fichtner /* 321fb578518SFranco Fichtner * Callback invoked when the device driver frees an mbuf used 322fb578518SFranco Fichtner * by netmap to transmit a packet. This usually happens when 323fb578518SFranco Fichtner * the NIC notifies the driver that transmission is completed. 324fb578518SFranco Fichtner */ 325fb578518SFranco Fichtner static void 326fb578518SFranco Fichtner generic_mbuf_destructor(struct mbuf *m) 327fb578518SFranco Fichtner { 328fb578518SFranco Fichtner if (netmap_verbose) 329fb578518SFranco Fichtner D("Tx irq (%p) queue %d", m, MBUF_TXQ(m)); 330fb578518SFranco Fichtner netmap_generic_irq(MBUF_IFP(m), MBUF_TXQ(m), NULL); 331bf9f7c16SFranco Fichtner #if 0 332fb578518SFranco Fichtner m->m_ext.ext_type = EXT_PACKET; 333bf9f7c16SFranco Fichtner #endif 334fb578518SFranco Fichtner m->m_ext.ext_free = NULL; 335bf9f7c16SFranco Fichtner #if 0 336fb578518SFranco Fichtner if (*(m->m_ext.ref_cnt) == 0) 337fb578518SFranco Fichtner *(m->m_ext.ref_cnt) = 1; 338fb578518SFranco Fichtner uma_zfree(zone_pack, m); 339bf9f7c16SFranco Fichtner #endif 340fb578518SFranco Fichtner IFRATE(rate_ctx.new.txirq++); 341fb578518SFranco Fichtner } 342fb578518SFranco Fichtner 343fb578518SFranco Fichtner /* Record completed transmissions and update hwavail. 344fb578518SFranco Fichtner * 345fb578518SFranco Fichtner * nr_ntc is the oldest tx buffer not yet completed 346fb578518SFranco Fichtner * (same as nr_hwavail + nr_hwcur + 1), 347fb578518SFranco Fichtner * nr_hwcur is the first unsent buffer. 348fb578518SFranco Fichtner * When cleaning, we try to recover buffers between nr_ntc and nr_hwcur. 349fb578518SFranco Fichtner */ 350fb578518SFranco Fichtner static int 351fb578518SFranco Fichtner generic_netmap_tx_clean(struct netmap_kring *kring) 352fb578518SFranco Fichtner { 353fb578518SFranco Fichtner u_int num_slots = kring->nkr_num_slots; 354fb578518SFranco Fichtner u_int ntc = kring->nr_ntc; 355fb578518SFranco Fichtner u_int hwcur = kring->nr_hwcur; 356fb578518SFranco Fichtner u_int n = 0; 357fb578518SFranco Fichtner struct mbuf **tx_pool = kring->tx_pool; 358fb578518SFranco Fichtner 359fb578518SFranco Fichtner while (ntc != hwcur) { /* buffers not completed */ 360fb578518SFranco Fichtner struct mbuf *m = tx_pool[ntc]; 361fb578518SFranco Fichtner 362fb578518SFranco Fichtner if (unlikely(m == NULL)) { 363fb578518SFranco Fichtner /* try to replenish the entry */ 364fb578518SFranco Fichtner tx_pool[ntc] = m = netmap_get_mbuf(GENERIC_BUF_SIZE); 365fb578518SFranco Fichtner if (unlikely(m == NULL)) { 366fb578518SFranco Fichtner D("mbuf allocation failed, XXX error"); 367fb578518SFranco Fichtner // XXX how do we proceed ? break ? 368fb578518SFranco Fichtner return -ENOMEM; 369fb578518SFranco Fichtner } 370bf9f7c16SFranco Fichtner #if 0 371fb578518SFranco Fichtner } else if (GET_MBUF_REFCNT(m) != 1) { 372fb578518SFranco Fichtner break; /* This mbuf is still busy: its refcnt is 2. */ 373bf9f7c16SFranco Fichtner #endif 374fb578518SFranco Fichtner } 375fb578518SFranco Fichtner if (unlikely(++ntc == num_slots)) { 376fb578518SFranco Fichtner ntc = 0; 377fb578518SFranco Fichtner } 378fb578518SFranco Fichtner n++; 379fb578518SFranco Fichtner } 380fb578518SFranco Fichtner kring->nr_ntc = ntc; 381fb578518SFranco Fichtner kring->nr_hwavail += n; 382fb578518SFranco Fichtner ND("tx completed [%d] -> hwavail %d", n, kring->nr_hwavail); 383fb578518SFranco Fichtner 384fb578518SFranco Fichtner return n; 385fb578518SFranco Fichtner } 386fb578518SFranco Fichtner 387fb578518SFranco Fichtner 388fb578518SFranco Fichtner /* 389fb578518SFranco Fichtner * We have pending packets in the driver between nr_ntc and j. 390fb578518SFranco Fichtner * Compute a position in the middle, to be used to generate 391fb578518SFranco Fichtner * a notification. 392fb578518SFranco Fichtner */ 393fb578518SFranco Fichtner static inline u_int 394fb578518SFranco Fichtner generic_tx_event_middle(struct netmap_kring *kring, u_int hwcur) 395fb578518SFranco Fichtner { 396fb578518SFranco Fichtner u_int n = kring->nkr_num_slots; 397fb578518SFranco Fichtner u_int ntc = kring->nr_ntc; 398fb578518SFranco Fichtner u_int e; 399fb578518SFranco Fichtner 400fb578518SFranco Fichtner if (hwcur >= ntc) { 401fb578518SFranco Fichtner e = (hwcur + ntc) / 2; 402fb578518SFranco Fichtner } else { /* wrap around */ 403fb578518SFranco Fichtner e = (hwcur + n + ntc) / 2; 404fb578518SFranco Fichtner if (e >= n) { 405fb578518SFranco Fichtner e -= n; 406fb578518SFranco Fichtner } 407fb578518SFranco Fichtner } 408fb578518SFranco Fichtner 409fb578518SFranco Fichtner if (unlikely(e >= n)) { 410fb578518SFranco Fichtner D("This cannot happen"); 411fb578518SFranco Fichtner e = 0; 412fb578518SFranco Fichtner } 413fb578518SFranco Fichtner 414fb578518SFranco Fichtner return e; 415fb578518SFranco Fichtner } 416fb578518SFranco Fichtner 417fb578518SFranco Fichtner /* 418fb578518SFranco Fichtner * We have pending packets in the driver between nr_ntc and hwcur. 419fb578518SFranco Fichtner * Schedule a notification approximately in the middle of the two. 420fb578518SFranco Fichtner * There is a race but this is only called within txsync which does 421fb578518SFranco Fichtner * a double check. 422fb578518SFranco Fichtner */ 423fb578518SFranco Fichtner static void 424fb578518SFranco Fichtner generic_set_tx_event(struct netmap_kring *kring, u_int hwcur) 425fb578518SFranco Fichtner { 426fb578518SFranco Fichtner struct mbuf *m; 427fb578518SFranco Fichtner u_int e; 428fb578518SFranco Fichtner 429fb578518SFranco Fichtner if (kring->nr_ntc == hwcur) { 430fb578518SFranco Fichtner return; 431fb578518SFranco Fichtner } 432fb578518SFranco Fichtner e = generic_tx_event_middle(kring, hwcur); 433fb578518SFranco Fichtner 434fb578518SFranco Fichtner m = kring->tx_pool[e]; 435fb578518SFranco Fichtner if (m == NULL) { 436fb578518SFranco Fichtner /* This can happen if there is already an event on the netmap 437fb578518SFranco Fichtner slot 'e': There is nothing to do. */ 438fb578518SFranco Fichtner return; 439fb578518SFranco Fichtner } 440fb578518SFranco Fichtner ND("Event at %d mbuf %p refcnt %d", e, m, GET_MBUF_REFCNT(m)); 441fb578518SFranco Fichtner kring->tx_pool[e] = NULL; 442fb578518SFranco Fichtner SET_MBUF_DESTRUCTOR(m, generic_mbuf_destructor); 443fb578518SFranco Fichtner 444fb578518SFranco Fichtner // XXX wmb() ? 445fb578518SFranco Fichtner /* Decrement the refcount an free it if we have the last one. */ 446fb578518SFranco Fichtner m_freem(m); 447fb578518SFranco Fichtner smp_mb(); 448fb578518SFranco Fichtner } 449fb578518SFranco Fichtner 450fb578518SFranco Fichtner 451fb578518SFranco Fichtner /* 452fb578518SFranco Fichtner * generic_netmap_txsync() transforms netmap buffers into mbufs 453fb578518SFranco Fichtner * and passes them to the standard device driver 454fb578518SFranco Fichtner * (ndo_start_xmit() or ifp->if_transmit() ). 455fb578518SFranco Fichtner * On linux this is not done directly, but using dev_queue_xmit(), 456fb578518SFranco Fichtner * since it implements the TX flow control (and takes some locks). 457fb578518SFranco Fichtner */ 458fb578518SFranco Fichtner static int 459fb578518SFranco Fichtner generic_netmap_txsync(struct netmap_adapter *na, u_int ring_nr, int flags) 460fb578518SFranco Fichtner { 461fb578518SFranco Fichtner struct ifnet *ifp = na->ifp; 462fb578518SFranco Fichtner struct netmap_kring *kring = &na->tx_rings[ring_nr]; 463fb578518SFranco Fichtner struct netmap_ring *ring = kring->ring; 464fb578518SFranco Fichtner u_int j, k, num_slots = kring->nkr_num_slots; 465fb578518SFranco Fichtner int new_slots, ntx; 466fb578518SFranco Fichtner 467fb578518SFranco Fichtner IFRATE(rate_ctx.new.txsync++); 468fb578518SFranco Fichtner 469fb578518SFranco Fichtner // TODO: handle the case of mbuf allocation failure 470fb578518SFranco Fichtner /* first, reclaim completed buffers */ 471fb578518SFranco Fichtner generic_netmap_tx_clean(kring); 472fb578518SFranco Fichtner 473fb578518SFranco Fichtner /* Take a copy of ring->cur now, and never read it again. */ 474fb578518SFranco Fichtner k = ring->cur; 475fb578518SFranco Fichtner if (unlikely(k >= num_slots)) { 476fb578518SFranco Fichtner return netmap_ring_reinit(kring); 477fb578518SFranco Fichtner } 478fb578518SFranco Fichtner 479fb578518SFranco Fichtner rmb(); 480fb578518SFranco Fichtner j = kring->nr_hwcur; 481fb578518SFranco Fichtner /* 482fb578518SFranco Fichtner * 'new_slots' counts how many new slots have been added: 483fb578518SFranco Fichtner * everything from hwcur to cur, excluding reserved ones, if any. 484fb578518SFranco Fichtner * nr_hwreserved start from hwcur and counts how many slots were 485fb578518SFranco Fichtner * not sent to the NIC from the previous round. 486fb578518SFranco Fichtner */ 487fb578518SFranco Fichtner new_slots = k - j - kring->nr_hwreserved; 488fb578518SFranco Fichtner if (new_slots < 0) { 489fb578518SFranco Fichtner new_slots += num_slots; 490fb578518SFranco Fichtner } 491fb578518SFranco Fichtner ntx = 0; 492fb578518SFranco Fichtner if (j != k) { 493fb578518SFranco Fichtner /* Process new packets to send: 494fb578518SFranco Fichtner * j is the current index in the netmap ring. 495fb578518SFranco Fichtner */ 496fb578518SFranco Fichtner while (j != k) { 497fb578518SFranco Fichtner struct netmap_slot *slot = &ring->slot[j]; /* Current slot in the netmap ring */ 498fb578518SFranco Fichtner void *addr = NMB(slot); 499fb578518SFranco Fichtner u_int len = slot->len; 500fb578518SFranco Fichtner struct mbuf *m; 501fb578518SFranco Fichtner int tx_ret; 502fb578518SFranco Fichtner 503fb578518SFranco Fichtner if (unlikely(addr == netmap_buffer_base || len > NETMAP_BUF_SIZE)) { 504fb578518SFranco Fichtner return netmap_ring_reinit(kring); 505fb578518SFranco Fichtner } 506fb578518SFranco Fichtner /* Tale a mbuf from the tx pool and copy in the user packet. */ 507fb578518SFranco Fichtner m = kring->tx_pool[j]; 508fb578518SFranco Fichtner if (unlikely(!m)) { 509fb578518SFranco Fichtner RD(5, "This should never happen"); 510fb578518SFranco Fichtner kring->tx_pool[j] = m = netmap_get_mbuf(GENERIC_BUF_SIZE); 511fb578518SFranco Fichtner if (unlikely(m == NULL)) { 512fb578518SFranco Fichtner D("mbuf allocation failed"); 513fb578518SFranco Fichtner break; 514fb578518SFranco Fichtner } 515fb578518SFranco Fichtner } 516fb578518SFranco Fichtner /* XXX we should ask notifications when NS_REPORT is set, 517fb578518SFranco Fichtner * or roughly every half frame. We can optimize this 518fb578518SFranco Fichtner * by lazily requesting notifications only when a 519fb578518SFranco Fichtner * transmission fails. Probably the best way is to 520fb578518SFranco Fichtner * break on failures and set notifications when 521fb578518SFranco Fichtner * ring->avail == 0 || j != k 522fb578518SFranco Fichtner */ 523fb578518SFranco Fichtner tx_ret = generic_xmit_frame(ifp, m, addr, len, ring_nr); 524fb578518SFranco Fichtner if (unlikely(tx_ret)) { 525fb578518SFranco Fichtner RD(5, "start_xmit failed: err %d [%u,%u,%u,%u]", 526fb578518SFranco Fichtner tx_ret, kring->nr_ntc, j, k, kring->nr_hwavail); 527fb578518SFranco Fichtner /* 528fb578518SFranco Fichtner * No room for this mbuf in the device driver. 529fb578518SFranco Fichtner * Request a notification FOR A PREVIOUS MBUF, 530fb578518SFranco Fichtner * then call generic_netmap_tx_clean(kring) to do the 531fb578518SFranco Fichtner * double check and see if we can free more buffers. 532fb578518SFranco Fichtner * If there is space continue, else break; 533fb578518SFranco Fichtner * NOTE: the double check is necessary if the problem 534fb578518SFranco Fichtner * occurs in the txsync call after selrecord(). 535fb578518SFranco Fichtner * Also, we need some way to tell the caller that not 536fb578518SFranco Fichtner * all buffers were queued onto the device (this was 537fb578518SFranco Fichtner * not a problem with native netmap driver where space 538fb578518SFranco Fichtner * is preallocated). The bridge has a similar problem 539fb578518SFranco Fichtner * and we solve it there by dropping the excess packets. 540fb578518SFranco Fichtner */ 541fb578518SFranco Fichtner generic_set_tx_event(kring, j); 542fb578518SFranco Fichtner if (generic_netmap_tx_clean(kring)) { /* space now available */ 543fb578518SFranco Fichtner continue; 544fb578518SFranco Fichtner } else { 545fb578518SFranco Fichtner break; 546fb578518SFranco Fichtner } 547fb578518SFranco Fichtner } 548fb578518SFranco Fichtner slot->flags &= ~(NS_REPORT | NS_BUF_CHANGED); 549fb578518SFranco Fichtner if (unlikely(++j == num_slots)) 550fb578518SFranco Fichtner j = 0; 551fb578518SFranco Fichtner ntx++; 552fb578518SFranco Fichtner } 553fb578518SFranco Fichtner 554fb578518SFranco Fichtner /* Update hwcur to the next slot to transmit. */ 555fb578518SFranco Fichtner kring->nr_hwcur = j; 556fb578518SFranco Fichtner 557fb578518SFranco Fichtner /* 558fb578518SFranco Fichtner * Report all new slots as unavailable, even those not sent. 559fb578518SFranco Fichtner * We account for them with with hwreserved, so that 560fb578518SFranco Fichtner * nr_hwreserved =:= cur - nr_hwcur 561fb578518SFranco Fichtner */ 562fb578518SFranco Fichtner kring->nr_hwavail -= new_slots; 563fb578518SFranco Fichtner kring->nr_hwreserved = k - j; 564fb578518SFranco Fichtner if (kring->nr_hwreserved < 0) { 565fb578518SFranco Fichtner kring->nr_hwreserved += num_slots; 566fb578518SFranco Fichtner } 567fb578518SFranco Fichtner 568fb578518SFranco Fichtner IFRATE(rate_ctx.new.txpkt += ntx); 569fb578518SFranco Fichtner 570fb578518SFranco Fichtner if (!kring->nr_hwavail) { 571fb578518SFranco Fichtner /* No more available slots? Set a notification event 572fb578518SFranco Fichtner * on a netmap slot that will be cleaned in the future. 573fb578518SFranco Fichtner * No doublecheck is performed, since txsync() will be 574fb578518SFranco Fichtner * called twice by netmap_poll(). 575fb578518SFranco Fichtner */ 576fb578518SFranco Fichtner generic_set_tx_event(kring, j); 577fb578518SFranco Fichtner } 578fb578518SFranco Fichtner ND("tx #%d, hwavail = %d", n, kring->nr_hwavail); 579fb578518SFranco Fichtner } 580fb578518SFranco Fichtner 581fb578518SFranco Fichtner /* Synchronize the user's view to the kernel view. */ 582fb578518SFranco Fichtner ring->avail = kring->nr_hwavail; 583fb578518SFranco Fichtner ring->reserved = kring->nr_hwreserved; 584fb578518SFranco Fichtner 585fb578518SFranco Fichtner return 0; 586fb578518SFranco Fichtner } 587fb578518SFranco Fichtner 588fb578518SFranco Fichtner /* 589fb578518SFranco Fichtner * This handler is registered (through netmap_catch_rx()) 590fb578518SFranco Fichtner * within the attached network interface 591fb578518SFranco Fichtner * in the RX subsystem, so that every mbuf passed up by 592fb578518SFranco Fichtner * the driver can be stolen to the network stack. 593fb578518SFranco Fichtner * Stolen packets are put in a queue where the 594fb578518SFranco Fichtner * generic_netmap_rxsync() callback can extract them. 595fb578518SFranco Fichtner */ 596fb578518SFranco Fichtner void generic_rx_handler(struct ifnet *ifp, struct mbuf *m) 597fb578518SFranco Fichtner { 598fb578518SFranco Fichtner struct netmap_adapter *na = NA(ifp); 599fb578518SFranco Fichtner struct netmap_generic_adapter *gna = (struct netmap_generic_adapter *)na; 600fb578518SFranco Fichtner u_int work_done; 601fb578518SFranco Fichtner u_int rr = 0; // receive ring number 602fb578518SFranco Fichtner 603fb578518SFranco Fichtner ND("called"); 604fb578518SFranco Fichtner /* limit the size of the queue */ 605fb578518SFranco Fichtner if (unlikely(mbq_len(&na->rx_rings[rr].rx_queue) > 1024)) { 606fb578518SFranco Fichtner m_freem(m); 607fb578518SFranco Fichtner } else { 608fb578518SFranco Fichtner mbq_safe_enqueue(&na->rx_rings[rr].rx_queue, m); 609fb578518SFranco Fichtner } 610fb578518SFranco Fichtner 611fb578518SFranco Fichtner if (netmap_generic_mit < 32768) { 612fb578518SFranco Fichtner /* no rx mitigation, pass notification up */ 613fb578518SFranco Fichtner netmap_generic_irq(na->ifp, rr, &work_done); 614fb578518SFranco Fichtner IFRATE(rate_ctx.new.rxirq++); 615fb578518SFranco Fichtner } else { 616fb578518SFranco Fichtner /* same as send combining, filter notification if there is a 617fb578518SFranco Fichtner * pending timer, otherwise pass it up and start a timer. 618fb578518SFranco Fichtner */ 619fb578518SFranco Fichtner if (likely(netmap_mitigation_active(gna))) { 620fb578518SFranco Fichtner /* Record that there is some pending work. */ 621fb578518SFranco Fichtner gna->mit_pending = 1; 622fb578518SFranco Fichtner } else { 623fb578518SFranco Fichtner netmap_generic_irq(na->ifp, rr, &work_done); 624fb578518SFranco Fichtner IFRATE(rate_ctx.new.rxirq++); 625fb578518SFranco Fichtner netmap_mitigation_start(gna); 626fb578518SFranco Fichtner } 627fb578518SFranco Fichtner } 628fb578518SFranco Fichtner } 629fb578518SFranco Fichtner 630fb578518SFranco Fichtner /* 631fb578518SFranco Fichtner * generic_netmap_rxsync() extracts mbufs from the queue filled by 632fb578518SFranco Fichtner * generic_netmap_rx_handler() and puts their content in the netmap 633fb578518SFranco Fichtner * receive ring. 634fb578518SFranco Fichtner * Access must be protected because the rx handler is asynchronous, 635fb578518SFranco Fichtner */ 636fb578518SFranco Fichtner static int 637fb578518SFranco Fichtner generic_netmap_rxsync(struct netmap_adapter *na, u_int ring_nr, int flags) 638fb578518SFranco Fichtner { 639fb578518SFranco Fichtner struct netmap_kring *kring = &na->rx_rings[ring_nr]; 640fb578518SFranco Fichtner struct netmap_ring *ring = kring->ring; 641fb578518SFranco Fichtner u_int j, n, lim = kring->nkr_num_slots - 1; 642fb578518SFranco Fichtner int force_update = (flags & NAF_FORCE_READ) || kring->nr_kflags & NKR_PENDINTR; 643fb578518SFranco Fichtner u_int k, resvd = ring->reserved; 644fb578518SFranco Fichtner 645fb578518SFranco Fichtner if (ring->cur > lim) 646fb578518SFranco Fichtner return netmap_ring_reinit(kring); 647fb578518SFranco Fichtner 648fb578518SFranco Fichtner /* Import newly received packets into the netmap ring. */ 649fb578518SFranco Fichtner if (netmap_no_pendintr || force_update) { 650fb578518SFranco Fichtner uint16_t slot_flags = kring->nkr_slot_flags; 651fb578518SFranco Fichtner struct mbuf *m; 652fb578518SFranco Fichtner 653fb578518SFranco Fichtner n = 0; 654fb578518SFranco Fichtner j = kring->nr_ntc; /* first empty slot in the receive ring */ 655fb578518SFranco Fichtner /* extract buffers from the rx queue, stop at most one 656fb578518SFranco Fichtner * slot before nr_hwcur (index k) 657fb578518SFranco Fichtner */ 658fb578518SFranco Fichtner k = (kring->nr_hwcur) ? kring->nr_hwcur-1 : lim; 659fb578518SFranco Fichtner while (j != k) { 660fb578518SFranco Fichtner int len; 661fb578518SFranco Fichtner void *addr = NMB(&ring->slot[j]); 662fb578518SFranco Fichtner 663fb578518SFranco Fichtner if (addr == netmap_buffer_base) { /* Bad buffer */ 664fb578518SFranco Fichtner return netmap_ring_reinit(kring); 665fb578518SFranco Fichtner } 666fb578518SFranco Fichtner /* 667fb578518SFranco Fichtner * Call the locked version of the function. 668fb578518SFranco Fichtner * XXX Ideally we could grab a batch of mbufs at once, 669fb578518SFranco Fichtner * by changing rx_queue into a ring. 670fb578518SFranco Fichtner */ 671fb578518SFranco Fichtner m = mbq_safe_dequeue(&kring->rx_queue); 672fb578518SFranco Fichtner if (!m) 673fb578518SFranco Fichtner break; 674fb578518SFranco Fichtner len = MBUF_LEN(m); 675fb578518SFranco Fichtner m_copydata(m, 0, len, addr); 676fb578518SFranco Fichtner ring->slot[j].len = len; 677fb578518SFranco Fichtner ring->slot[j].flags = slot_flags; 678fb578518SFranco Fichtner m_freem(m); 679fb578518SFranco Fichtner if (unlikely(j++ == lim)) 680fb578518SFranco Fichtner j = 0; 681fb578518SFranco Fichtner n++; 682fb578518SFranco Fichtner } 683fb578518SFranco Fichtner if (n) { 684fb578518SFranco Fichtner kring->nr_ntc = j; 685fb578518SFranco Fichtner kring->nr_hwavail += n; 686fb578518SFranco Fichtner IFRATE(rate_ctx.new.rxpkt += n); 687fb578518SFranco Fichtner } 688fb578518SFranco Fichtner kring->nr_kflags &= ~NKR_PENDINTR; 689fb578518SFranco Fichtner } 690fb578518SFranco Fichtner 691fb578518SFranco Fichtner // XXX should we invert the order ? 692fb578518SFranco Fichtner /* Skip past packets that userspace has released */ 693fb578518SFranco Fichtner j = kring->nr_hwcur; 694fb578518SFranco Fichtner k = ring->cur; 695fb578518SFranco Fichtner if (resvd > 0) { 696fb578518SFranco Fichtner if (resvd + ring->avail >= lim + 1) { 697fb578518SFranco Fichtner D("XXX invalid reserve/avail %d %d", resvd, ring->avail); 698fb578518SFranco Fichtner ring->reserved = resvd = 0; // XXX panic... 699fb578518SFranco Fichtner } 700fb578518SFranco Fichtner k = (k >= resvd) ? k - resvd : k + lim + 1 - resvd; 701fb578518SFranco Fichtner } 702fb578518SFranco Fichtner if (j != k) { 703fb578518SFranco Fichtner /* Userspace has released some packets. */ 704fb578518SFranco Fichtner for (n = 0; j != k; n++) { 705fb578518SFranco Fichtner struct netmap_slot *slot = &ring->slot[j]; 706fb578518SFranco Fichtner 707fb578518SFranco Fichtner slot->flags &= ~NS_BUF_CHANGED; 708fb578518SFranco Fichtner if (unlikely(j++ == lim)) 709fb578518SFranco Fichtner j = 0; 710fb578518SFranco Fichtner } 711fb578518SFranco Fichtner kring->nr_hwavail -= n; 712fb578518SFranco Fichtner kring->nr_hwcur = k; 713fb578518SFranco Fichtner } 714fb578518SFranco Fichtner /* Tell userspace that there are new packets. */ 715fb578518SFranco Fichtner ring->avail = kring->nr_hwavail - resvd; 716fb578518SFranco Fichtner IFRATE(rate_ctx.new.rxsync++); 717fb578518SFranco Fichtner 718fb578518SFranco Fichtner return 0; 719fb578518SFranco Fichtner } 720fb578518SFranco Fichtner 721fb578518SFranco Fichtner static void 722fb578518SFranco Fichtner generic_netmap_dtor(struct netmap_adapter *na) 723fb578518SFranco Fichtner { 724fb578518SFranco Fichtner struct ifnet *ifp = na->ifp; 725fb578518SFranco Fichtner struct netmap_generic_adapter *gna = (struct netmap_generic_adapter*)na; 726fb578518SFranco Fichtner struct netmap_adapter *prev_na = gna->prev; 727fb578518SFranco Fichtner 728fb578518SFranco Fichtner if (prev_na != NULL) { 729fb578518SFranco Fichtner D("Released generic NA %p", gna); 730bf9f7c16SFranco Fichtner #if 0 731fb578518SFranco Fichtner if_rele(na->ifp); 732bf9f7c16SFranco Fichtner #endif 733fb578518SFranco Fichtner netmap_adapter_put(prev_na); 734fb578518SFranco Fichtner } 735fb578518SFranco Fichtner if (ifp != NULL) { 736fb578518SFranco Fichtner WNA(ifp) = prev_na; 737fb578518SFranco Fichtner D("Restored native NA %p", prev_na); 738fb578518SFranco Fichtner na->ifp = NULL; 739fb578518SFranco Fichtner } 740fb578518SFranco Fichtner } 741fb578518SFranco Fichtner 742fb578518SFranco Fichtner /* 743fb578518SFranco Fichtner * generic_netmap_attach() makes it possible to use netmap on 744fb578518SFranco Fichtner * a device without native netmap support. 745fb578518SFranco Fichtner * This is less performant than native support but potentially 746fb578518SFranco Fichtner * faster than raw sockets or similar schemes. 747fb578518SFranco Fichtner * 748fb578518SFranco Fichtner * In this "emulated" mode, netmap rings do not necessarily 749fb578518SFranco Fichtner * have the same size as those in the NIC. We use a default 750fb578518SFranco Fichtner * value and possibly override it if the OS has ways to fetch the 751fb578518SFranco Fichtner * actual configuration. 752fb578518SFranco Fichtner */ 753fb578518SFranco Fichtner int 754fb578518SFranco Fichtner generic_netmap_attach(struct ifnet *ifp) 755fb578518SFranco Fichtner { 756fb578518SFranco Fichtner struct netmap_adapter *na; 757fb578518SFranco Fichtner struct netmap_generic_adapter *gna; 758fb578518SFranco Fichtner int retval; 759fb578518SFranco Fichtner u_int num_tx_desc, num_rx_desc; 760fb578518SFranco Fichtner 761fb578518SFranco Fichtner num_tx_desc = num_rx_desc = netmap_generic_ringsize; /* starting point */ 762fb578518SFranco Fichtner 763fb578518SFranco Fichtner generic_find_num_desc(ifp, &num_tx_desc, &num_rx_desc); 764fb578518SFranco Fichtner ND("Netmap ring size: TX = %d, RX = %d", num_tx_desc, num_rx_desc); 765fb578518SFranco Fichtner 766ed9bd855SFranco Fichtner gna = kmalloc(sizeof(*gna), M_DEVBUF, M_NOWAIT | M_ZERO); 767fb578518SFranco Fichtner if (gna == NULL) { 768fb578518SFranco Fichtner D("no memory on attach, give up"); 769fb578518SFranco Fichtner return ENOMEM; 770fb578518SFranco Fichtner } 771fb578518SFranco Fichtner na = (struct netmap_adapter *)gna; 772fb578518SFranco Fichtner na->ifp = ifp; 773fb578518SFranco Fichtner na->num_tx_desc = num_tx_desc; 774fb578518SFranco Fichtner na->num_rx_desc = num_rx_desc; 775fb578518SFranco Fichtner na->nm_register = &generic_netmap_register; 776fb578518SFranco Fichtner na->nm_txsync = &generic_netmap_txsync; 777fb578518SFranco Fichtner na->nm_rxsync = &generic_netmap_rxsync; 778fb578518SFranco Fichtner na->nm_dtor = &generic_netmap_dtor; 779fb578518SFranco Fichtner /* when using generic, IFCAP_NETMAP is set so we force 780fb578518SFranco Fichtner * NAF_SKIP_INTR to use the regular interrupt handler 781fb578518SFranco Fichtner */ 782fb578518SFranco Fichtner na->na_flags = NAF_SKIP_INTR; 783fb578518SFranco Fichtner 784fb578518SFranco Fichtner ND("[GNA] num_tx_queues(%d), real_num_tx_queues(%d), len(%lu)", 785fb578518SFranco Fichtner ifp->num_tx_queues, ifp->real_num_tx_queues, 786fb578518SFranco Fichtner ifp->tx_queue_len); 787fb578518SFranco Fichtner ND("[GNA] num_rx_queues(%d), real_num_rx_queues(%d)", 788fb578518SFranco Fichtner ifp->num_rx_queues, ifp->real_num_rx_queues); 789fb578518SFranco Fichtner 790fb578518SFranco Fichtner generic_find_num_queues(ifp, &na->num_tx_rings, &na->num_rx_rings); 791fb578518SFranco Fichtner 792fb578518SFranco Fichtner retval = netmap_attach_common(na); 793fb578518SFranco Fichtner if (retval) { 794ed9bd855SFranco Fichtner kfree(gna, M_DEVBUF); 795fb578518SFranco Fichtner } 796fb578518SFranco Fichtner 797fb578518SFranco Fichtner return retval; 798fb578518SFranco Fichtner } 799