1fb578518SFranco Fichtner /* 2fb578518SFranco Fichtner * Copyright (C) 2013 Universita` di Pisa. All rights reserved. 3fb578518SFranco Fichtner * 4fb578518SFranco Fichtner * Redistribution and use in source and binary forms, with or without 5fb578518SFranco Fichtner * modification, are permitted provided that the following conditions 6fb578518SFranco Fichtner * are met: 7fb578518SFranco Fichtner * 1. Redistributions of source code must retain the above copyright 8fb578518SFranco Fichtner * notice, this list of conditions and the following disclaimer. 9fb578518SFranco Fichtner * 2. Redistributions in binary form must reproduce the above copyright 10fb578518SFranco Fichtner * notice, this list of conditions and the following disclaimer in the 11fb578518SFranco Fichtner * documentation and/or other materials provided with the distribution. 12fb578518SFranco Fichtner * 13fb578518SFranco Fichtner * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 14fb578518SFranco Fichtner * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15fb578518SFranco Fichtner * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 16fb578518SFranco Fichtner * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 17fb578518SFranco Fichtner * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 18fb578518SFranco Fichtner * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 19fb578518SFranco Fichtner * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 20fb578518SFranco Fichtner * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 21fb578518SFranco Fichtner * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 22fb578518SFranco Fichtner * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 23fb578518SFranco Fichtner * SUCH DAMAGE. 24fb578518SFranco Fichtner */ 25fb578518SFranco Fichtner 26fb578518SFranco Fichtner /* 27fb578518SFranco Fichtner * This module implements netmap support on top of standard, 28fb578518SFranco Fichtner * unmodified device drivers. 29fb578518SFranco Fichtner * 30fb578518SFranco Fichtner * A NIOCREGIF request is handled here if the device does not 31fb578518SFranco Fichtner * have native support. TX and RX rings are emulated as follows: 32fb578518SFranco Fichtner * 33fb578518SFranco Fichtner * NIOCREGIF 34fb578518SFranco Fichtner * We preallocate a block of TX mbufs (roughly as many as 35fb578518SFranco Fichtner * tx descriptors; the number is not critical) to speed up 36fb578518SFranco Fichtner * operation during transmissions. The refcount on most of 37fb578518SFranco Fichtner * these buffers is artificially bumped up so we can recycle 38fb578518SFranco Fichtner * them more easily. Also, the destructor is intercepted 39fb578518SFranco Fichtner * so we use it as an interrupt notification to wake up 40fb578518SFranco Fichtner * processes blocked on a poll(). 41fb578518SFranco Fichtner * 42fb578518SFranco Fichtner * For each receive ring we allocate one "struct mbq" 43fb578518SFranco Fichtner * (an mbuf tailq plus a spinlock). We intercept packets 44fb578518SFranco Fichtner * (through if_input) 45fb578518SFranco Fichtner * on the receive path and put them in the mbq from which 46fb578518SFranco Fichtner * netmap receive routines can grab them. 47fb578518SFranco Fichtner * 48fb578518SFranco Fichtner * TX: 49fb578518SFranco Fichtner * in the generic_txsync() routine, netmap buffers are copied 50fb578518SFranco Fichtner * (or linked, in a future) to the preallocated mbufs 51fb578518SFranco Fichtner * and pushed to the transmit queue. Some of these mbufs 52fb578518SFranco Fichtner * (those with NS_REPORT, or otherwise every half ring) 53fb578518SFranco Fichtner * have the refcount=1, others have refcount=2. 54fb578518SFranco Fichtner * When the destructor is invoked, we take that as 55fb578518SFranco Fichtner * a notification that all mbufs up to that one in 56fb578518SFranco Fichtner * the specific ring have been completed, and generate 57fb578518SFranco Fichtner * the equivalent of a transmit interrupt. 58fb578518SFranco Fichtner * 59fb578518SFranco Fichtner * RX: 60fb578518SFranco Fichtner * 61fb578518SFranco Fichtner */ 62fb578518SFranco Fichtner 63fb578518SFranco Fichtner 64fb578518SFranco Fichtner #include <sys/types.h> 65fb578518SFranco Fichtner #include <sys/errno.h> 66fb578518SFranco Fichtner #include <sys/malloc.h> 67fb578518SFranco Fichtner #include <sys/lock.h> /* PROT_EXEC */ 68fb578518SFranco Fichtner #include <sys/socket.h> /* sockaddrs */ 6913431b3eSFranco Fichtner #include <sys/event.h> 70fb578518SFranco Fichtner #include <net/if.h> 71fb578518SFranco Fichtner #include <net/if_var.h> 72ed9bd855SFranco Fichtner #include <sys/bus.h> /* bus_dmamap_* in netmap_kern.h */ 73fb578518SFranco Fichtner 74fb578518SFranco Fichtner // XXX temporary - D() defined here 75fb578518SFranco Fichtner #include <net/netmap.h> 76*b3f97fadSFranco Fichtner #include <net/netmap/netmap_kern.h> 77*b3f97fadSFranco Fichtner #include <net/netmap/netmap_mem2.h> 78fb578518SFranco Fichtner 79fb578518SFranco Fichtner #define rtnl_lock() D("rtnl_lock called"); 80fb578518SFranco Fichtner #define rtnl_unlock() D("rtnl_lock called"); 81bf9f7c16SFranco Fichtner #define MBUF_TXQ(m) ((m)->m_pkthdr.hash) 82fb578518SFranco Fichtner #define smp_mb() 83fb578518SFranco Fichtner 84fb578518SFranco Fichtner /* 85fb578518SFranco Fichtner * mbuf wrappers 86fb578518SFranco Fichtner */ 87fb578518SFranco Fichtner 88fb578518SFranco Fichtner /* 89fb578518SFranco Fichtner * we allocate an EXT_PACKET 90fb578518SFranco Fichtner */ 91bf9f7c16SFranco Fichtner #define netmap_get_mbuf(len) m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR) 92fb578518SFranco Fichtner 93fb578518SFranco Fichtner /* mbuf destructor, also need to change the type to EXT_EXTREF, 94fb578518SFranco Fichtner * add an M_NOFREE flag, and then clear the flag and 95fb578518SFranco Fichtner * chain into uma_zfree(zone_pack, mf) 96fb578518SFranco Fichtner * (or reinstall the buffer ?) 97fb578518SFranco Fichtner */ 98fb578518SFranco Fichtner #define SET_MBUF_DESTRUCTOR(m, fn) do { \ 99fb578518SFranco Fichtner (m)->m_ext.ext_free = (void *)fn; \ 100bf9f7c16SFranco Fichtner /* (m)->m_ext.ext_type = EXT_EXTREF; */ \ 101fb578518SFranco Fichtner } while (0) 102fb578518SFranco Fichtner 103fb578518SFranco Fichtner 104fb578518SFranco Fichtner #define GET_MBUF_REFCNT(m) ((m)->m_ext.ref_cnt ? *(m)->m_ext.ref_cnt : -1) 105fb578518SFranco Fichtner 106fb578518SFranco Fichtner /* ======================== usage stats =========================== */ 107fb578518SFranco Fichtner 108fb578518SFranco Fichtner #ifdef RATE 109fb578518SFranco Fichtner #define IFRATE(x) x 110fb578518SFranco Fichtner struct rate_stats { 111fb578518SFranco Fichtner unsigned long txpkt; 112fb578518SFranco Fichtner unsigned long txsync; 113fb578518SFranco Fichtner unsigned long txirq; 114fb578518SFranco Fichtner unsigned long rxpkt; 115fb578518SFranco Fichtner unsigned long rxirq; 116fb578518SFranco Fichtner unsigned long rxsync; 117fb578518SFranco Fichtner }; 118fb578518SFranco Fichtner 119fb578518SFranco Fichtner struct rate_context { 120fb578518SFranco Fichtner unsigned refcount; 121fb578518SFranco Fichtner struct timer_list timer; 122fb578518SFranco Fichtner struct rate_stats new; 123fb578518SFranco Fichtner struct rate_stats old; 124fb578518SFranco Fichtner }; 125fb578518SFranco Fichtner 126fb578518SFranco Fichtner #define RATE_PRINTK(_NAME_) \ 127fb578518SFranco Fichtner printk( #_NAME_ " = %lu Hz\n", (cur._NAME_ - ctx->old._NAME_)/RATE_PERIOD); 128fb578518SFranco Fichtner #define RATE_PERIOD 2 129fb578518SFranco Fichtner static void rate_callback(unsigned long arg) 130fb578518SFranco Fichtner { 131fb578518SFranco Fichtner struct rate_context * ctx = (struct rate_context *)arg; 132fb578518SFranco Fichtner struct rate_stats cur = ctx->new; 133fb578518SFranco Fichtner int r; 134fb578518SFranco Fichtner 135fb578518SFranco Fichtner RATE_PRINTK(txpkt); 136fb578518SFranco Fichtner RATE_PRINTK(txsync); 137fb578518SFranco Fichtner RATE_PRINTK(txirq); 138fb578518SFranco Fichtner RATE_PRINTK(rxpkt); 139fb578518SFranco Fichtner RATE_PRINTK(rxsync); 140fb578518SFranco Fichtner RATE_PRINTK(rxirq); 141fb578518SFranco Fichtner printk("\n"); 142fb578518SFranco Fichtner 143fb578518SFranco Fichtner ctx->old = cur; 144fb578518SFranco Fichtner r = mod_timer(&ctx->timer, jiffies + 145fb578518SFranco Fichtner msecs_to_jiffies(RATE_PERIOD * 1000)); 146fb578518SFranco Fichtner if (unlikely(r)) 147fb578518SFranco Fichtner D("[v1000] Error: mod_timer()"); 148fb578518SFranco Fichtner } 149fb578518SFranco Fichtner 150fb578518SFranco Fichtner static struct rate_context rate_ctx; 151fb578518SFranco Fichtner 152fb578518SFranco Fichtner #else /* !RATE */ 153fb578518SFranco Fichtner #define IFRATE(x) 154fb578518SFranco Fichtner #endif /* !RATE */ 155fb578518SFranco Fichtner 156fb578518SFranco Fichtner 157fb578518SFranco Fichtner /* =============== GENERIC NETMAP ADAPTER SUPPORT ================= */ 158fb578518SFranco Fichtner #define GENERIC_BUF_SIZE netmap_buf_size /* Size of the mbufs in the Tx pool. */ 159fb578518SFranco Fichtner 160fb578518SFranco Fichtner /* 161fb578518SFranco Fichtner * Wrapper used by the generic adapter layer to notify 162fb578518SFranco Fichtner * the poller threads. Differently from netmap_rx_irq(), we check 163fb578518SFranco Fichtner * only IFCAP_NETMAP instead of NAF_NATIVE_ON to enable the irq. 164fb578518SFranco Fichtner */ 165fb578518SFranco Fichtner static int 166fb578518SFranco Fichtner netmap_generic_irq(struct ifnet *ifp, u_int q, u_int *work_done) 167fb578518SFranco Fichtner { 168fb578518SFranco Fichtner if (unlikely(!(ifp->if_capenable & IFCAP_NETMAP))) 169fb578518SFranco Fichtner return 0; 170fb578518SFranco Fichtner 171fb578518SFranco Fichtner return netmap_common_irq(ifp, q, work_done); 172fb578518SFranco Fichtner } 173fb578518SFranco Fichtner 174fb578518SFranco Fichtner 175fb578518SFranco Fichtner /* Enable/disable netmap mode for a generic network interface. */ 176fb578518SFranco Fichtner int generic_netmap_register(struct netmap_adapter *na, int enable) 177fb578518SFranco Fichtner { 178fb578518SFranco Fichtner struct ifnet *ifp = na->ifp; 179fb578518SFranco Fichtner struct netmap_generic_adapter *gna = (struct netmap_generic_adapter *)na; 180fb578518SFranco Fichtner struct mbuf *m; 181fb578518SFranco Fichtner int error; 182fb578518SFranco Fichtner int i, r; 183fb578518SFranco Fichtner 184fb578518SFranco Fichtner if (!na) 185fb578518SFranco Fichtner return EINVAL; 186fb578518SFranco Fichtner 187fb578518SFranco Fichtner #ifdef REG_RESET 188fb578518SFranco Fichtner error = ifp->netdev_ops->ndo_stop(ifp); 189fb578518SFranco Fichtner if (error) { 190fb578518SFranco Fichtner return error; 191fb578518SFranco Fichtner } 192fb578518SFranco Fichtner #endif /* REG_RESET */ 193fb578518SFranco Fichtner 194fb578518SFranco Fichtner if (enable) { /* Enable netmap mode. */ 195fb578518SFranco Fichtner /* Initialize the rx queue, as generic_rx_handler() can 196fb578518SFranco Fichtner * be called as soon as netmap_catch_rx() returns. 197fb578518SFranco Fichtner */ 198fb578518SFranco Fichtner for (r=0; r<na->num_rx_rings; r++) { 199fb578518SFranco Fichtner mbq_safe_init(&na->rx_rings[r].rx_queue); 200fb578518SFranco Fichtner na->rx_rings[r].nr_ntc = 0; 201fb578518SFranco Fichtner } 202fb578518SFranco Fichtner 203fb578518SFranco Fichtner /* Init the mitigation timer. */ 204fb578518SFranco Fichtner netmap_mitigation_init(gna); 205fb578518SFranco Fichtner 206fb578518SFranco Fichtner /* 207fb578518SFranco Fichtner * Preallocate packet buffers for the tx rings. 208fb578518SFranco Fichtner */ 209fb578518SFranco Fichtner for (r=0; r<na->num_tx_rings; r++) { 210fb578518SFranco Fichtner na->tx_rings[r].nr_ntc = 0; 211ed9bd855SFranco Fichtner na->tx_rings[r].tx_pool = kmalloc(na->num_tx_desc * sizeof(struct mbuf *), 212fb578518SFranco Fichtner M_DEVBUF, M_NOWAIT | M_ZERO); 213fb578518SFranco Fichtner if (!na->tx_rings[r].tx_pool) { 214fb578518SFranco Fichtner D("tx_pool allocation failed"); 215fb578518SFranco Fichtner error = ENOMEM; 216fb578518SFranco Fichtner goto free_tx_pool; 217fb578518SFranco Fichtner } 218fb578518SFranco Fichtner for (i=0; i<na->num_tx_desc; i++) { 219fb578518SFranco Fichtner m = netmap_get_mbuf(GENERIC_BUF_SIZE); 220fb578518SFranco Fichtner if (!m) { 221fb578518SFranco Fichtner D("tx_pool[%d] allocation failed", i); 222fb578518SFranco Fichtner error = ENOMEM; 223fb578518SFranco Fichtner goto free_mbufs; 224fb578518SFranco Fichtner } 225fb578518SFranco Fichtner na->tx_rings[r].tx_pool[i] = m; 226fb578518SFranco Fichtner } 227fb578518SFranco Fichtner } 228fb578518SFranco Fichtner rtnl_lock(); 229fb578518SFranco Fichtner /* Prepare to intercept incoming traffic. */ 230fb578518SFranco Fichtner error = netmap_catch_rx(na, 1); 231fb578518SFranco Fichtner if (error) { 232fb578518SFranco Fichtner D("netdev_rx_handler_register() failed"); 233fb578518SFranco Fichtner goto register_handler; 234fb578518SFranco Fichtner } 235fb578518SFranco Fichtner ifp->if_capenable |= IFCAP_NETMAP; 236fb578518SFranco Fichtner 237fb578518SFranco Fichtner /* Make netmap control the packet steering. */ 238fb578518SFranco Fichtner netmap_catch_packet_steering(gna, 1); 239fb578518SFranco Fichtner 240fb578518SFranco Fichtner rtnl_unlock(); 241fb578518SFranco Fichtner 242fb578518SFranco Fichtner #ifdef RATE 243fb578518SFranco Fichtner if (rate_ctx.refcount == 0) { 244fb578518SFranco Fichtner D("setup_timer()"); 245fb578518SFranco Fichtner memset(&rate_ctx, 0, sizeof(rate_ctx)); 246fb578518SFranco Fichtner setup_timer(&rate_ctx.timer, &rate_callback, (unsigned long)&rate_ctx); 247fb578518SFranco Fichtner if (mod_timer(&rate_ctx.timer, jiffies + msecs_to_jiffies(1500))) { 248fb578518SFranco Fichtner D("Error: mod_timer()"); 249fb578518SFranco Fichtner } 250fb578518SFranco Fichtner } 251fb578518SFranco Fichtner rate_ctx.refcount++; 252fb578518SFranco Fichtner #endif /* RATE */ 253fb578518SFranco Fichtner 254fb578518SFranco Fichtner } else { /* Disable netmap mode. */ 255fb578518SFranco Fichtner rtnl_lock(); 256fb578518SFranco Fichtner 257fb578518SFranco Fichtner ifp->if_capenable &= ~IFCAP_NETMAP; 258fb578518SFranco Fichtner 259fb578518SFranco Fichtner /* Release packet steering control. */ 260fb578518SFranco Fichtner netmap_catch_packet_steering(gna, 0); 261fb578518SFranco Fichtner 262fb578518SFranco Fichtner /* Do not intercept packets on the rx path. */ 263fb578518SFranco Fichtner netmap_catch_rx(na, 0); 264fb578518SFranco Fichtner 265fb578518SFranco Fichtner rtnl_unlock(); 266fb578518SFranco Fichtner 267fb578518SFranco Fichtner /* Free the mbufs going to the netmap rings */ 268fb578518SFranco Fichtner for (r=0; r<na->num_rx_rings; r++) { 269fb578518SFranco Fichtner mbq_safe_purge(&na->rx_rings[r].rx_queue); 270fb578518SFranco Fichtner mbq_safe_destroy(&na->rx_rings[r].rx_queue); 271fb578518SFranco Fichtner } 272fb578518SFranco Fichtner 273fb578518SFranco Fichtner netmap_mitigation_cleanup(gna); 274fb578518SFranco Fichtner 275fb578518SFranco Fichtner for (r=0; r<na->num_tx_rings; r++) { 276fb578518SFranco Fichtner for (i=0; i<na->num_tx_desc; i++) { 277fb578518SFranco Fichtner m_freem(na->tx_rings[r].tx_pool[i]); 278fb578518SFranco Fichtner } 279ed9bd855SFranco Fichtner kfree(na->tx_rings[r].tx_pool, M_DEVBUF); 280fb578518SFranco Fichtner } 281fb578518SFranco Fichtner 282fb578518SFranco Fichtner #ifdef RATE 283fb578518SFranco Fichtner if (--rate_ctx.refcount == 0) { 284fb578518SFranco Fichtner D("del_timer()"); 285fb578518SFranco Fichtner del_timer(&rate_ctx.timer); 286fb578518SFranco Fichtner } 287fb578518SFranco Fichtner #endif 288fb578518SFranco Fichtner } 289fb578518SFranco Fichtner 290fb578518SFranco Fichtner #ifdef REG_RESET 291fb578518SFranco Fichtner error = ifp->netdev_ops->ndo_open(ifp); 292fb578518SFranco Fichtner if (error) { 293fb578518SFranco Fichtner goto alloc_tx_pool; 294fb578518SFranco Fichtner } 295fb578518SFranco Fichtner #endif 296fb578518SFranco Fichtner 297fb578518SFranco Fichtner return 0; 298fb578518SFranco Fichtner 299fb578518SFranco Fichtner register_handler: 300fb578518SFranco Fichtner rtnl_unlock(); 301fb578518SFranco Fichtner free_tx_pool: 302fb578518SFranco Fichtner r--; 303fb578518SFranco Fichtner i = na->num_tx_desc; /* Useless, but just to stay safe. */ 304fb578518SFranco Fichtner free_mbufs: 305fb578518SFranco Fichtner i--; 306fb578518SFranco Fichtner for (; r>=0; r--) { 307fb578518SFranco Fichtner for (; i>=0; i--) { 308fb578518SFranco Fichtner m_freem(na->tx_rings[r].tx_pool[i]); 309fb578518SFranco Fichtner } 310ed9bd855SFranco Fichtner kfree(na->tx_rings[r].tx_pool, M_DEVBUF); 311fb578518SFranco Fichtner i = na->num_tx_desc - 1; 312fb578518SFranco Fichtner } 313fb578518SFranco Fichtner 314fb578518SFranco Fichtner return error; 315fb578518SFranco Fichtner } 316fb578518SFranco Fichtner 317fb578518SFranco Fichtner /* 318fb578518SFranco Fichtner * Callback invoked when the device driver frees an mbuf used 319fb578518SFranco Fichtner * by netmap to transmit a packet. This usually happens when 320fb578518SFranco Fichtner * the NIC notifies the driver that transmission is completed. 321fb578518SFranco Fichtner */ 322fb578518SFranco Fichtner static void 323fb578518SFranco Fichtner generic_mbuf_destructor(struct mbuf *m) 324fb578518SFranco Fichtner { 325fb578518SFranco Fichtner if (netmap_verbose) 326fb578518SFranco Fichtner D("Tx irq (%p) queue %d", m, MBUF_TXQ(m)); 327fb578518SFranco Fichtner netmap_generic_irq(MBUF_IFP(m), MBUF_TXQ(m), NULL); 328bf9f7c16SFranco Fichtner #if 0 329fb578518SFranco Fichtner m->m_ext.ext_type = EXT_PACKET; 330bf9f7c16SFranco Fichtner #endif 331fb578518SFranco Fichtner m->m_ext.ext_free = NULL; 332bf9f7c16SFranco Fichtner #if 0 333fb578518SFranco Fichtner if (*(m->m_ext.ref_cnt) == 0) 334fb578518SFranco Fichtner *(m->m_ext.ref_cnt) = 1; 335fb578518SFranco Fichtner uma_zfree(zone_pack, m); 336bf9f7c16SFranco Fichtner #endif 337fb578518SFranco Fichtner IFRATE(rate_ctx.new.txirq++); 338fb578518SFranco Fichtner } 339fb578518SFranco Fichtner 340fb578518SFranco Fichtner /* Record completed transmissions and update hwavail. 341fb578518SFranco Fichtner * 342fb578518SFranco Fichtner * nr_ntc is the oldest tx buffer not yet completed 343fb578518SFranco Fichtner * (same as nr_hwavail + nr_hwcur + 1), 344fb578518SFranco Fichtner * nr_hwcur is the first unsent buffer. 345fb578518SFranco Fichtner * When cleaning, we try to recover buffers between nr_ntc and nr_hwcur. 346fb578518SFranco Fichtner */ 347fb578518SFranco Fichtner static int 348fb578518SFranco Fichtner generic_netmap_tx_clean(struct netmap_kring *kring) 349fb578518SFranco Fichtner { 350fb578518SFranco Fichtner u_int num_slots = kring->nkr_num_slots; 351fb578518SFranco Fichtner u_int ntc = kring->nr_ntc; 352fb578518SFranco Fichtner u_int hwcur = kring->nr_hwcur; 353fb578518SFranco Fichtner u_int n = 0; 354fb578518SFranco Fichtner struct mbuf **tx_pool = kring->tx_pool; 355fb578518SFranco Fichtner 356fb578518SFranco Fichtner while (ntc != hwcur) { /* buffers not completed */ 357fb578518SFranco Fichtner struct mbuf *m = tx_pool[ntc]; 358fb578518SFranco Fichtner 359fb578518SFranco Fichtner if (unlikely(m == NULL)) { 360fb578518SFranco Fichtner /* try to replenish the entry */ 361fb578518SFranco Fichtner tx_pool[ntc] = m = netmap_get_mbuf(GENERIC_BUF_SIZE); 362fb578518SFranco Fichtner if (unlikely(m == NULL)) { 363fb578518SFranco Fichtner D("mbuf allocation failed, XXX error"); 364fb578518SFranco Fichtner // XXX how do we proceed ? break ? 365fb578518SFranco Fichtner return -ENOMEM; 366fb578518SFranco Fichtner } 367bf9f7c16SFranco Fichtner #if 0 368fb578518SFranco Fichtner } else if (GET_MBUF_REFCNT(m) != 1) { 369fb578518SFranco Fichtner break; /* This mbuf is still busy: its refcnt is 2. */ 370bf9f7c16SFranco Fichtner #endif 371fb578518SFranco Fichtner } 372fb578518SFranco Fichtner if (unlikely(++ntc == num_slots)) { 373fb578518SFranco Fichtner ntc = 0; 374fb578518SFranco Fichtner } 375fb578518SFranco Fichtner n++; 376fb578518SFranco Fichtner } 377fb578518SFranco Fichtner kring->nr_ntc = ntc; 378fb578518SFranco Fichtner kring->nr_hwavail += n; 379fb578518SFranco Fichtner ND("tx completed [%d] -> hwavail %d", n, kring->nr_hwavail); 380fb578518SFranco Fichtner 381fb578518SFranco Fichtner return n; 382fb578518SFranco Fichtner } 383fb578518SFranco Fichtner 384fb578518SFranco Fichtner 385fb578518SFranco Fichtner /* 386fb578518SFranco Fichtner * We have pending packets in the driver between nr_ntc and j. 387fb578518SFranco Fichtner * Compute a position in the middle, to be used to generate 388fb578518SFranco Fichtner * a notification. 389fb578518SFranco Fichtner */ 390fb578518SFranco Fichtner static inline u_int 391fb578518SFranco Fichtner generic_tx_event_middle(struct netmap_kring *kring, u_int hwcur) 392fb578518SFranco Fichtner { 393fb578518SFranco Fichtner u_int n = kring->nkr_num_slots; 394fb578518SFranco Fichtner u_int ntc = kring->nr_ntc; 395fb578518SFranco Fichtner u_int e; 396fb578518SFranco Fichtner 397fb578518SFranco Fichtner if (hwcur >= ntc) { 398fb578518SFranco Fichtner e = (hwcur + ntc) / 2; 399fb578518SFranco Fichtner } else { /* wrap around */ 400fb578518SFranco Fichtner e = (hwcur + n + ntc) / 2; 401fb578518SFranco Fichtner if (e >= n) { 402fb578518SFranco Fichtner e -= n; 403fb578518SFranco Fichtner } 404fb578518SFranco Fichtner } 405fb578518SFranco Fichtner 406fb578518SFranco Fichtner if (unlikely(e >= n)) { 407fb578518SFranco Fichtner D("This cannot happen"); 408fb578518SFranco Fichtner e = 0; 409fb578518SFranco Fichtner } 410fb578518SFranco Fichtner 411fb578518SFranco Fichtner return e; 412fb578518SFranco Fichtner } 413fb578518SFranco Fichtner 414fb578518SFranco Fichtner /* 415fb578518SFranco Fichtner * We have pending packets in the driver between nr_ntc and hwcur. 416fb578518SFranco Fichtner * Schedule a notification approximately in the middle of the two. 417fb578518SFranco Fichtner * There is a race but this is only called within txsync which does 418fb578518SFranco Fichtner * a double check. 419fb578518SFranco Fichtner */ 420fb578518SFranco Fichtner static void 421fb578518SFranco Fichtner generic_set_tx_event(struct netmap_kring *kring, u_int hwcur) 422fb578518SFranco Fichtner { 423fb578518SFranco Fichtner struct mbuf *m; 424fb578518SFranco Fichtner u_int e; 425fb578518SFranco Fichtner 426fb578518SFranco Fichtner if (kring->nr_ntc == hwcur) { 427fb578518SFranco Fichtner return; 428fb578518SFranco Fichtner } 429fb578518SFranco Fichtner e = generic_tx_event_middle(kring, hwcur); 430fb578518SFranco Fichtner 431fb578518SFranco Fichtner m = kring->tx_pool[e]; 432fb578518SFranco Fichtner if (m == NULL) { 433fb578518SFranco Fichtner /* This can happen if there is already an event on the netmap 434fb578518SFranco Fichtner slot 'e': There is nothing to do. */ 435fb578518SFranco Fichtner return; 436fb578518SFranco Fichtner } 437fb578518SFranco Fichtner ND("Event at %d mbuf %p refcnt %d", e, m, GET_MBUF_REFCNT(m)); 438fb578518SFranco Fichtner kring->tx_pool[e] = NULL; 439fb578518SFranco Fichtner SET_MBUF_DESTRUCTOR(m, generic_mbuf_destructor); 440fb578518SFranco Fichtner 441fb578518SFranco Fichtner // XXX wmb() ? 442fb578518SFranco Fichtner /* Decrement the refcount an free it if we have the last one. */ 443fb578518SFranco Fichtner m_freem(m); 444fb578518SFranco Fichtner smp_mb(); 445fb578518SFranco Fichtner } 446fb578518SFranco Fichtner 447fb578518SFranco Fichtner 448fb578518SFranco Fichtner /* 449fb578518SFranco Fichtner * generic_netmap_txsync() transforms netmap buffers into mbufs 450fb578518SFranco Fichtner * and passes them to the standard device driver 451fb578518SFranco Fichtner * (ndo_start_xmit() or ifp->if_transmit() ). 452fb578518SFranco Fichtner * On linux this is not done directly, but using dev_queue_xmit(), 453fb578518SFranco Fichtner * since it implements the TX flow control (and takes some locks). 454fb578518SFranco Fichtner */ 455fb578518SFranco Fichtner static int 456fb578518SFranco Fichtner generic_netmap_txsync(struct netmap_adapter *na, u_int ring_nr, int flags) 457fb578518SFranco Fichtner { 458fb578518SFranco Fichtner struct ifnet *ifp = na->ifp; 459fb578518SFranco Fichtner struct netmap_kring *kring = &na->tx_rings[ring_nr]; 460fb578518SFranco Fichtner struct netmap_ring *ring = kring->ring; 461fb578518SFranco Fichtner u_int j, k, num_slots = kring->nkr_num_slots; 462fb578518SFranco Fichtner int new_slots, ntx; 463fb578518SFranco Fichtner 464fb578518SFranco Fichtner IFRATE(rate_ctx.new.txsync++); 465fb578518SFranco Fichtner 466fb578518SFranco Fichtner // TODO: handle the case of mbuf allocation failure 467fb578518SFranco Fichtner /* first, reclaim completed buffers */ 468fb578518SFranco Fichtner generic_netmap_tx_clean(kring); 469fb578518SFranco Fichtner 470fb578518SFranco Fichtner /* Take a copy of ring->cur now, and never read it again. */ 471fb578518SFranco Fichtner k = ring->cur; 472fb578518SFranco Fichtner if (unlikely(k >= num_slots)) { 473fb578518SFranco Fichtner return netmap_ring_reinit(kring); 474fb578518SFranco Fichtner } 475fb578518SFranco Fichtner 476fb578518SFranco Fichtner rmb(); 477fb578518SFranco Fichtner j = kring->nr_hwcur; 478fb578518SFranco Fichtner /* 479fb578518SFranco Fichtner * 'new_slots' counts how many new slots have been added: 480fb578518SFranco Fichtner * everything from hwcur to cur, excluding reserved ones, if any. 481fb578518SFranco Fichtner * nr_hwreserved start from hwcur and counts how many slots were 482fb578518SFranco Fichtner * not sent to the NIC from the previous round. 483fb578518SFranco Fichtner */ 484fb578518SFranco Fichtner new_slots = k - j - kring->nr_hwreserved; 485fb578518SFranco Fichtner if (new_slots < 0) { 486fb578518SFranco Fichtner new_slots += num_slots; 487fb578518SFranco Fichtner } 488fb578518SFranco Fichtner ntx = 0; 489fb578518SFranco Fichtner if (j != k) { 490fb578518SFranco Fichtner /* Process new packets to send: 491fb578518SFranco Fichtner * j is the current index in the netmap ring. 492fb578518SFranco Fichtner */ 493fb578518SFranco Fichtner while (j != k) { 494fb578518SFranco Fichtner struct netmap_slot *slot = &ring->slot[j]; /* Current slot in the netmap ring */ 495fb578518SFranco Fichtner void *addr = NMB(slot); 496fb578518SFranco Fichtner u_int len = slot->len; 497fb578518SFranco Fichtner struct mbuf *m; 498fb578518SFranco Fichtner int tx_ret; 499fb578518SFranco Fichtner 500fb578518SFranco Fichtner if (unlikely(addr == netmap_buffer_base || len > NETMAP_BUF_SIZE)) { 501fb578518SFranco Fichtner return netmap_ring_reinit(kring); 502fb578518SFranco Fichtner } 503fb578518SFranco Fichtner /* Tale a mbuf from the tx pool and copy in the user packet. */ 504fb578518SFranco Fichtner m = kring->tx_pool[j]; 505fb578518SFranco Fichtner if (unlikely(!m)) { 506fb578518SFranco Fichtner RD(5, "This should never happen"); 507fb578518SFranco Fichtner kring->tx_pool[j] = m = netmap_get_mbuf(GENERIC_BUF_SIZE); 508fb578518SFranco Fichtner if (unlikely(m == NULL)) { 509fb578518SFranco Fichtner D("mbuf allocation failed"); 510fb578518SFranco Fichtner break; 511fb578518SFranco Fichtner } 512fb578518SFranco Fichtner } 513fb578518SFranco Fichtner /* XXX we should ask notifications when NS_REPORT is set, 514fb578518SFranco Fichtner * or roughly every half frame. We can optimize this 515fb578518SFranco Fichtner * by lazily requesting notifications only when a 516fb578518SFranco Fichtner * transmission fails. Probably the best way is to 517fb578518SFranco Fichtner * break on failures and set notifications when 518fb578518SFranco Fichtner * ring->avail == 0 || j != k 519fb578518SFranco Fichtner */ 520fb578518SFranco Fichtner tx_ret = generic_xmit_frame(ifp, m, addr, len, ring_nr); 521fb578518SFranco Fichtner if (unlikely(tx_ret)) { 522fb578518SFranco Fichtner RD(5, "start_xmit failed: err %d [%u,%u,%u,%u]", 523fb578518SFranco Fichtner tx_ret, kring->nr_ntc, j, k, kring->nr_hwavail); 524fb578518SFranco Fichtner /* 525fb578518SFranco Fichtner * No room for this mbuf in the device driver. 526fb578518SFranco Fichtner * Request a notification FOR A PREVIOUS MBUF, 527fb578518SFranco Fichtner * then call generic_netmap_tx_clean(kring) to do the 528fb578518SFranco Fichtner * double check and see if we can free more buffers. 529fb578518SFranco Fichtner * If there is space continue, else break; 530fb578518SFranco Fichtner * NOTE: the double check is necessary if the problem 531fb578518SFranco Fichtner * occurs in the txsync call after selrecord(). 532fb578518SFranco Fichtner * Also, we need some way to tell the caller that not 533fb578518SFranco Fichtner * all buffers were queued onto the device (this was 534fb578518SFranco Fichtner * not a problem with native netmap driver where space 535fb578518SFranco Fichtner * is preallocated). The bridge has a similar problem 536fb578518SFranco Fichtner * and we solve it there by dropping the excess packets. 537fb578518SFranco Fichtner */ 538fb578518SFranco Fichtner generic_set_tx_event(kring, j); 539fb578518SFranco Fichtner if (generic_netmap_tx_clean(kring)) { /* space now available */ 540fb578518SFranco Fichtner continue; 541fb578518SFranco Fichtner } else { 542fb578518SFranco Fichtner break; 543fb578518SFranco Fichtner } 544fb578518SFranco Fichtner } 545fb578518SFranco Fichtner slot->flags &= ~(NS_REPORT | NS_BUF_CHANGED); 546fb578518SFranco Fichtner if (unlikely(++j == num_slots)) 547fb578518SFranco Fichtner j = 0; 548fb578518SFranco Fichtner ntx++; 549fb578518SFranco Fichtner } 550fb578518SFranco Fichtner 551fb578518SFranco Fichtner /* Update hwcur to the next slot to transmit. */ 552fb578518SFranco Fichtner kring->nr_hwcur = j; 553fb578518SFranco Fichtner 554fb578518SFranco Fichtner /* 555fb578518SFranco Fichtner * Report all new slots as unavailable, even those not sent. 556fb578518SFranco Fichtner * We account for them with with hwreserved, so that 557fb578518SFranco Fichtner * nr_hwreserved =:= cur - nr_hwcur 558fb578518SFranco Fichtner */ 559fb578518SFranco Fichtner kring->nr_hwavail -= new_slots; 560fb578518SFranco Fichtner kring->nr_hwreserved = k - j; 561fb578518SFranco Fichtner if (kring->nr_hwreserved < 0) { 562fb578518SFranco Fichtner kring->nr_hwreserved += num_slots; 563fb578518SFranco Fichtner } 564fb578518SFranco Fichtner 565fb578518SFranco Fichtner IFRATE(rate_ctx.new.txpkt += ntx); 566fb578518SFranco Fichtner 567fb578518SFranco Fichtner if (!kring->nr_hwavail) { 568fb578518SFranco Fichtner /* No more available slots? Set a notification event 569fb578518SFranco Fichtner * on a netmap slot that will be cleaned in the future. 570fb578518SFranco Fichtner * No doublecheck is performed, since txsync() will be 571fb578518SFranco Fichtner * called twice by netmap_poll(). 572fb578518SFranco Fichtner */ 573fb578518SFranco Fichtner generic_set_tx_event(kring, j); 574fb578518SFranco Fichtner } 575fb578518SFranco Fichtner ND("tx #%d, hwavail = %d", n, kring->nr_hwavail); 576fb578518SFranco Fichtner } 577fb578518SFranco Fichtner 578fb578518SFranco Fichtner /* Synchronize the user's view to the kernel view. */ 579fb578518SFranco Fichtner ring->avail = kring->nr_hwavail; 580fb578518SFranco Fichtner ring->reserved = kring->nr_hwreserved; 581fb578518SFranco Fichtner 582fb578518SFranco Fichtner return 0; 583fb578518SFranco Fichtner } 584fb578518SFranco Fichtner 585fb578518SFranco Fichtner /* 586fb578518SFranco Fichtner * This handler is registered (through netmap_catch_rx()) 587fb578518SFranco Fichtner * within the attached network interface 588fb578518SFranco Fichtner * in the RX subsystem, so that every mbuf passed up by 589fb578518SFranco Fichtner * the driver can be stolen to the network stack. 590fb578518SFranco Fichtner * Stolen packets are put in a queue where the 591fb578518SFranco Fichtner * generic_netmap_rxsync() callback can extract them. 592fb578518SFranco Fichtner */ 593fb578518SFranco Fichtner void generic_rx_handler(struct ifnet *ifp, struct mbuf *m) 594fb578518SFranco Fichtner { 595fb578518SFranco Fichtner struct netmap_adapter *na = NA(ifp); 596fb578518SFranco Fichtner struct netmap_generic_adapter *gna = (struct netmap_generic_adapter *)na; 597fb578518SFranco Fichtner u_int work_done; 598fb578518SFranco Fichtner u_int rr = 0; // receive ring number 599fb578518SFranco Fichtner 600fb578518SFranco Fichtner ND("called"); 601fb578518SFranco Fichtner /* limit the size of the queue */ 602fb578518SFranco Fichtner if (unlikely(mbq_len(&na->rx_rings[rr].rx_queue) > 1024)) { 603fb578518SFranco Fichtner m_freem(m); 604fb578518SFranco Fichtner } else { 605fb578518SFranco Fichtner mbq_safe_enqueue(&na->rx_rings[rr].rx_queue, m); 606fb578518SFranco Fichtner } 607fb578518SFranco Fichtner 608fb578518SFranco Fichtner if (netmap_generic_mit < 32768) { 609fb578518SFranco Fichtner /* no rx mitigation, pass notification up */ 610fb578518SFranco Fichtner netmap_generic_irq(na->ifp, rr, &work_done); 611fb578518SFranco Fichtner IFRATE(rate_ctx.new.rxirq++); 612fb578518SFranco Fichtner } else { 613fb578518SFranco Fichtner /* same as send combining, filter notification if there is a 614fb578518SFranco Fichtner * pending timer, otherwise pass it up and start a timer. 615fb578518SFranco Fichtner */ 616fb578518SFranco Fichtner if (likely(netmap_mitigation_active(gna))) { 617fb578518SFranco Fichtner /* Record that there is some pending work. */ 618fb578518SFranco Fichtner gna->mit_pending = 1; 619fb578518SFranco Fichtner } else { 620fb578518SFranco Fichtner netmap_generic_irq(na->ifp, rr, &work_done); 621fb578518SFranco Fichtner IFRATE(rate_ctx.new.rxirq++); 622fb578518SFranco Fichtner netmap_mitigation_start(gna); 623fb578518SFranco Fichtner } 624fb578518SFranco Fichtner } 625fb578518SFranco Fichtner } 626fb578518SFranco Fichtner 627fb578518SFranco Fichtner /* 628fb578518SFranco Fichtner * generic_netmap_rxsync() extracts mbufs from the queue filled by 629fb578518SFranco Fichtner * generic_netmap_rx_handler() and puts their content in the netmap 630fb578518SFranco Fichtner * receive ring. 631fb578518SFranco Fichtner * Access must be protected because the rx handler is asynchronous, 632fb578518SFranco Fichtner */ 633fb578518SFranco Fichtner static int 634fb578518SFranco Fichtner generic_netmap_rxsync(struct netmap_adapter *na, u_int ring_nr, int flags) 635fb578518SFranco Fichtner { 636fb578518SFranco Fichtner struct netmap_kring *kring = &na->rx_rings[ring_nr]; 637fb578518SFranco Fichtner struct netmap_ring *ring = kring->ring; 638fb578518SFranco Fichtner u_int j, n, lim = kring->nkr_num_slots - 1; 639fb578518SFranco Fichtner int force_update = (flags & NAF_FORCE_READ) || kring->nr_kflags & NKR_PENDINTR; 640fb578518SFranco Fichtner u_int k, resvd = ring->reserved; 641fb578518SFranco Fichtner 642fb578518SFranco Fichtner if (ring->cur > lim) 643fb578518SFranco Fichtner return netmap_ring_reinit(kring); 644fb578518SFranco Fichtner 645fb578518SFranco Fichtner /* Import newly received packets into the netmap ring. */ 646fb578518SFranco Fichtner if (netmap_no_pendintr || force_update) { 647fb578518SFranco Fichtner uint16_t slot_flags = kring->nkr_slot_flags; 648fb578518SFranco Fichtner struct mbuf *m; 649fb578518SFranco Fichtner 650fb578518SFranco Fichtner n = 0; 651fb578518SFranco Fichtner j = kring->nr_ntc; /* first empty slot in the receive ring */ 652fb578518SFranco Fichtner /* extract buffers from the rx queue, stop at most one 653fb578518SFranco Fichtner * slot before nr_hwcur (index k) 654fb578518SFranco Fichtner */ 655fb578518SFranco Fichtner k = (kring->nr_hwcur) ? kring->nr_hwcur-1 : lim; 656fb578518SFranco Fichtner while (j != k) { 657fb578518SFranco Fichtner int len; 658fb578518SFranco Fichtner void *addr = NMB(&ring->slot[j]); 659fb578518SFranco Fichtner 660fb578518SFranco Fichtner if (addr == netmap_buffer_base) { /* Bad buffer */ 661fb578518SFranco Fichtner return netmap_ring_reinit(kring); 662fb578518SFranco Fichtner } 663fb578518SFranco Fichtner /* 664fb578518SFranco Fichtner * Call the locked version of the function. 665fb578518SFranco Fichtner * XXX Ideally we could grab a batch of mbufs at once, 666fb578518SFranco Fichtner * by changing rx_queue into a ring. 667fb578518SFranco Fichtner */ 668fb578518SFranco Fichtner m = mbq_safe_dequeue(&kring->rx_queue); 669fb578518SFranco Fichtner if (!m) 670fb578518SFranco Fichtner break; 671fb578518SFranco Fichtner len = MBUF_LEN(m); 672fb578518SFranco Fichtner m_copydata(m, 0, len, addr); 673fb578518SFranco Fichtner ring->slot[j].len = len; 674fb578518SFranco Fichtner ring->slot[j].flags = slot_flags; 675fb578518SFranco Fichtner m_freem(m); 676fb578518SFranco Fichtner if (unlikely(j++ == lim)) 677fb578518SFranco Fichtner j = 0; 678fb578518SFranco Fichtner n++; 679fb578518SFranco Fichtner } 680fb578518SFranco Fichtner if (n) { 681fb578518SFranco Fichtner kring->nr_ntc = j; 682fb578518SFranco Fichtner kring->nr_hwavail += n; 683fb578518SFranco Fichtner IFRATE(rate_ctx.new.rxpkt += n); 684fb578518SFranco Fichtner } 685fb578518SFranco Fichtner kring->nr_kflags &= ~NKR_PENDINTR; 686fb578518SFranco Fichtner } 687fb578518SFranco Fichtner 688fb578518SFranco Fichtner // XXX should we invert the order ? 689fb578518SFranco Fichtner /* Skip past packets that userspace has released */ 690fb578518SFranco Fichtner j = kring->nr_hwcur; 691fb578518SFranco Fichtner k = ring->cur; 692fb578518SFranco Fichtner if (resvd > 0) { 693fb578518SFranco Fichtner if (resvd + ring->avail >= lim + 1) { 694fb578518SFranco Fichtner D("XXX invalid reserve/avail %d %d", resvd, ring->avail); 695fb578518SFranco Fichtner ring->reserved = resvd = 0; // XXX panic... 696fb578518SFranco Fichtner } 697fb578518SFranco Fichtner k = (k >= resvd) ? k - resvd : k + lim + 1 - resvd; 698fb578518SFranco Fichtner } 699fb578518SFranco Fichtner if (j != k) { 700fb578518SFranco Fichtner /* Userspace has released some packets. */ 701fb578518SFranco Fichtner for (n = 0; j != k; n++) { 702fb578518SFranco Fichtner struct netmap_slot *slot = &ring->slot[j]; 703fb578518SFranco Fichtner 704fb578518SFranco Fichtner slot->flags &= ~NS_BUF_CHANGED; 705fb578518SFranco Fichtner if (unlikely(j++ == lim)) 706fb578518SFranco Fichtner j = 0; 707fb578518SFranco Fichtner } 708fb578518SFranco Fichtner kring->nr_hwavail -= n; 709fb578518SFranco Fichtner kring->nr_hwcur = k; 710fb578518SFranco Fichtner } 711fb578518SFranco Fichtner /* Tell userspace that there are new packets. */ 712fb578518SFranco Fichtner ring->avail = kring->nr_hwavail - resvd; 713fb578518SFranco Fichtner IFRATE(rate_ctx.new.rxsync++); 714fb578518SFranco Fichtner 715fb578518SFranco Fichtner return 0; 716fb578518SFranco Fichtner } 717fb578518SFranco Fichtner 718fb578518SFranco Fichtner static void 719fb578518SFranco Fichtner generic_netmap_dtor(struct netmap_adapter *na) 720fb578518SFranco Fichtner { 721fb578518SFranco Fichtner struct ifnet *ifp = na->ifp; 722fb578518SFranco Fichtner struct netmap_generic_adapter *gna = (struct netmap_generic_adapter*)na; 723fb578518SFranco Fichtner struct netmap_adapter *prev_na = gna->prev; 724fb578518SFranco Fichtner 725fb578518SFranco Fichtner if (prev_na != NULL) { 726fb578518SFranco Fichtner D("Released generic NA %p", gna); 727bf9f7c16SFranco Fichtner #if 0 728fb578518SFranco Fichtner if_rele(na->ifp); 729bf9f7c16SFranco Fichtner #endif 730fb578518SFranco Fichtner netmap_adapter_put(prev_na); 731fb578518SFranco Fichtner } 732fb578518SFranco Fichtner if (ifp != NULL) { 733fb578518SFranco Fichtner WNA(ifp) = prev_na; 734fb578518SFranco Fichtner D("Restored native NA %p", prev_na); 735fb578518SFranco Fichtner na->ifp = NULL; 736fb578518SFranco Fichtner } 737fb578518SFranco Fichtner } 738fb578518SFranco Fichtner 739fb578518SFranco Fichtner /* 740fb578518SFranco Fichtner * generic_netmap_attach() makes it possible to use netmap on 741fb578518SFranco Fichtner * a device without native netmap support. 742fb578518SFranco Fichtner * This is less performant than native support but potentially 743fb578518SFranco Fichtner * faster than raw sockets or similar schemes. 744fb578518SFranco Fichtner * 745fb578518SFranco Fichtner * In this "emulated" mode, netmap rings do not necessarily 746fb578518SFranco Fichtner * have the same size as those in the NIC. We use a default 747fb578518SFranco Fichtner * value and possibly override it if the OS has ways to fetch the 748fb578518SFranco Fichtner * actual configuration. 749fb578518SFranco Fichtner */ 750fb578518SFranco Fichtner int 751fb578518SFranco Fichtner generic_netmap_attach(struct ifnet *ifp) 752fb578518SFranco Fichtner { 753fb578518SFranco Fichtner struct netmap_adapter *na; 754fb578518SFranco Fichtner struct netmap_generic_adapter *gna; 755fb578518SFranco Fichtner int retval; 756fb578518SFranco Fichtner u_int num_tx_desc, num_rx_desc; 757fb578518SFranco Fichtner 758fb578518SFranco Fichtner num_tx_desc = num_rx_desc = netmap_generic_ringsize; /* starting point */ 759fb578518SFranco Fichtner 760fb578518SFranco Fichtner generic_find_num_desc(ifp, &num_tx_desc, &num_rx_desc); 761fb578518SFranco Fichtner ND("Netmap ring size: TX = %d, RX = %d", num_tx_desc, num_rx_desc); 762fb578518SFranco Fichtner 763ed9bd855SFranco Fichtner gna = kmalloc(sizeof(*gna), M_DEVBUF, M_NOWAIT | M_ZERO); 764fb578518SFranco Fichtner if (gna == NULL) { 765fb578518SFranco Fichtner D("no memory on attach, give up"); 766fb578518SFranco Fichtner return ENOMEM; 767fb578518SFranco Fichtner } 768fb578518SFranco Fichtner na = (struct netmap_adapter *)gna; 769fb578518SFranco Fichtner na->ifp = ifp; 770fb578518SFranco Fichtner na->num_tx_desc = num_tx_desc; 771fb578518SFranco Fichtner na->num_rx_desc = num_rx_desc; 772fb578518SFranco Fichtner na->nm_register = &generic_netmap_register; 773fb578518SFranco Fichtner na->nm_txsync = &generic_netmap_txsync; 774fb578518SFranco Fichtner na->nm_rxsync = &generic_netmap_rxsync; 775fb578518SFranco Fichtner na->nm_dtor = &generic_netmap_dtor; 776fb578518SFranco Fichtner /* when using generic, IFCAP_NETMAP is set so we force 777fb578518SFranco Fichtner * NAF_SKIP_INTR to use the regular interrupt handler 778fb578518SFranco Fichtner */ 779fb578518SFranco Fichtner na->na_flags = NAF_SKIP_INTR; 780fb578518SFranco Fichtner 781fb578518SFranco Fichtner ND("[GNA] num_tx_queues(%d), real_num_tx_queues(%d), len(%lu)", 782fb578518SFranco Fichtner ifp->num_tx_queues, ifp->real_num_tx_queues, 783fb578518SFranco Fichtner ifp->tx_queue_len); 784fb578518SFranco Fichtner ND("[GNA] num_rx_queues(%d), real_num_rx_queues(%d)", 785fb578518SFranco Fichtner ifp->num_rx_queues, ifp->real_num_rx_queues); 786fb578518SFranco Fichtner 787fb578518SFranco Fichtner generic_find_num_queues(ifp, &na->num_tx_rings, &na->num_rx_rings); 788fb578518SFranco Fichtner 789fb578518SFranco Fichtner retval = netmap_attach_common(na); 790fb578518SFranco Fichtner if (retval) { 791ed9bd855SFranco Fichtner kfree(gna, M_DEVBUF); 792fb578518SFranco Fichtner } 793fb578518SFranco Fichtner 794fb578518SFranco Fichtner return retval; 795fb578518SFranco Fichtner } 796