1 /*- 2 * BSD LICENSE 3 * 4 * Copyright(c) 2010-2015 Intel Corporation. All rights reserved. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include <stdint.h> 35 #include <stdio.h> 36 #include <stdlib.h> 37 #include <string.h> 38 #include <errno.h> 39 40 #include <tmmintrin.h> 41 42 #include <rte_cycles.h> 43 #include <rte_memory.h> 44 #include <rte_memzone.h> 45 #include <rte_branch_prediction.h> 46 #include <rte_mempool.h> 47 #include <rte_malloc.h> 48 #include <rte_mbuf.h> 49 #include <rte_ether.h> 50 #include <rte_ethdev.h> 51 #include <rte_prefetch.h> 52 #include <rte_string_fns.h> 53 #include <rte_errno.h> 54 #include <rte_byteorder.h> 55 56 #include "virtio_logs.h" 57 #include "virtio_ethdev.h" 58 #include "virtqueue.h" 59 #include "virtio_rxtx.h" 60 61 #define RTE_VIRTIO_VPMD_RX_BURST 32 62 #define RTE_VIRTIO_DESC_PER_LOOP 8 63 #define RTE_VIRTIO_VPMD_RX_REARM_THRESH RTE_VIRTIO_VPMD_RX_BURST 64 65 #ifndef __INTEL_COMPILER 66 #pragma GCC diagnostic ignored "-Wcast-qual" 67 #endif 68 69 int __attribute__((cold)) 70 virtqueue_enqueue_recv_refill_simple(struct virtqueue *vq, 71 struct rte_mbuf *cookie) 72 { 73 struct vq_desc_extra *dxp; 74 struct vring_desc *start_dp; 75 uint16_t desc_idx; 76 77 desc_idx = vq->vq_avail_idx & (vq->vq_nentries - 1); 78 dxp = &vq->vq_descx[desc_idx]; 79 dxp->cookie = (void *)cookie; 80 vq->sw_ring[desc_idx] = cookie; 81 82 start_dp = vq->vq_ring.desc; 83 start_dp[desc_idx].addr = 84 VIRTIO_MBUF_ADDR(cookie, vq) + 85 RTE_PKTMBUF_HEADROOM - vq->hw->vtnet_hdr_size; 86 start_dp[desc_idx].len = cookie->buf_len - 87 RTE_PKTMBUF_HEADROOM + vq->hw->vtnet_hdr_size; 88 89 vq->vq_free_cnt--; 90 vq->vq_avail_idx++; 91 92 return 0; 93 } 94 95 static inline void 96 virtio_rxq_rearm_vec(struct virtnet_rx *rxvq) 97 { 98 int i; 99 uint16_t desc_idx; 100 struct rte_mbuf **sw_ring; 101 struct vring_desc *start_dp; 102 int ret; 103 struct virtqueue *vq = rxvq->vq; 104 105 desc_idx = vq->vq_avail_idx & (vq->vq_nentries - 1); 106 sw_ring = &vq->sw_ring[desc_idx]; 107 start_dp = &vq->vq_ring.desc[desc_idx]; 108 109 ret = rte_mempool_get_bulk(rxvq->mpool, (void **)sw_ring, 110 RTE_VIRTIO_VPMD_RX_REARM_THRESH); 111 if (unlikely(ret)) { 112 rte_eth_devices[rxvq->port_id].data->rx_mbuf_alloc_failed += 113 RTE_VIRTIO_VPMD_RX_REARM_THRESH; 114 return; 115 } 116 117 for (i = 0; i < RTE_VIRTIO_VPMD_RX_REARM_THRESH; i++) { 118 uintptr_t p; 119 120 p = (uintptr_t)&sw_ring[i]->rearm_data; 121 *(uint64_t *)p = rxvq->mbuf_initializer; 122 123 start_dp[i].addr = 124 VIRTIO_MBUF_ADDR(sw_ring[i], vq) + 125 RTE_PKTMBUF_HEADROOM - vq->hw->vtnet_hdr_size; 126 start_dp[i].len = sw_ring[i]->buf_len - 127 RTE_PKTMBUF_HEADROOM + vq->hw->vtnet_hdr_size; 128 } 129 130 vq->vq_avail_idx += RTE_VIRTIO_VPMD_RX_REARM_THRESH; 131 vq->vq_free_cnt -= RTE_VIRTIO_VPMD_RX_REARM_THRESH; 132 vq_update_avail_idx(vq); 133 } 134 135 /* virtio vPMD receive routine, only accept(nb_pkts >= RTE_VIRTIO_DESC_PER_LOOP) 136 * 137 * This routine is for non-mergeable RX, one desc for each guest buffer. 138 * This routine is based on the RX ring layout optimization. Each entry in the 139 * avail ring points to the desc with the same index in the desc ring and this 140 * will never be changed in the driver. 141 * 142 * - nb_pkts < RTE_VIRTIO_DESC_PER_LOOP, just return no packet 143 */ 144 uint16_t 145 virtio_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts, 146 uint16_t nb_pkts) 147 { 148 struct virtnet_rx *rxvq = rx_queue; 149 struct virtqueue *vq = rxvq->vq; 150 uint16_t nb_used; 151 uint16_t desc_idx; 152 struct vring_used_elem *rused; 153 struct rte_mbuf **sw_ring; 154 struct rte_mbuf **sw_ring_end; 155 uint16_t nb_pkts_received; 156 __m128i shuf_msk1, shuf_msk2, len_adjust; 157 158 shuf_msk1 = _mm_set_epi8( 159 0xFF, 0xFF, 0xFF, 0xFF, 160 0xFF, 0xFF, /* vlan tci */ 161 5, 4, /* dat len */ 162 0xFF, 0xFF, 5, 4, /* pkt len */ 163 0xFF, 0xFF, 0xFF, 0xFF /* packet type */ 164 165 ); 166 167 shuf_msk2 = _mm_set_epi8( 168 0xFF, 0xFF, 0xFF, 0xFF, 169 0xFF, 0xFF, /* vlan tci */ 170 13, 12, /* dat len */ 171 0xFF, 0xFF, 13, 12, /* pkt len */ 172 0xFF, 0xFF, 0xFF, 0xFF /* packet type */ 173 ); 174 175 /* Subtract the header length. 176 * In which case do we need the header length in used->len ? 177 */ 178 len_adjust = _mm_set_epi16( 179 0, 0, 180 0, 181 (uint16_t)-vq->hw->vtnet_hdr_size, 182 0, (uint16_t)-vq->hw->vtnet_hdr_size, 183 0, 0); 184 185 if (unlikely(nb_pkts < RTE_VIRTIO_DESC_PER_LOOP)) 186 return 0; 187 188 nb_used = VIRTQUEUE_NUSED(vq); 189 190 rte_compiler_barrier(); 191 192 if (unlikely(nb_used == 0)) 193 return 0; 194 195 nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, RTE_VIRTIO_DESC_PER_LOOP); 196 nb_used = RTE_MIN(nb_used, nb_pkts); 197 198 desc_idx = (uint16_t)(vq->vq_used_cons_idx & (vq->vq_nentries - 1)); 199 rused = &vq->vq_ring.used->ring[desc_idx]; 200 sw_ring = &vq->sw_ring[desc_idx]; 201 sw_ring_end = &vq->sw_ring[vq->vq_nentries]; 202 203 _mm_prefetch((const void *)rused, _MM_HINT_T0); 204 205 if (vq->vq_free_cnt >= RTE_VIRTIO_VPMD_RX_REARM_THRESH) { 206 virtio_rxq_rearm_vec(rxvq); 207 if (unlikely(virtqueue_kick_prepare(vq))) 208 virtqueue_notify(vq); 209 } 210 211 for (nb_pkts_received = 0; 212 nb_pkts_received < nb_used;) { 213 __m128i desc[RTE_VIRTIO_DESC_PER_LOOP / 2]; 214 __m128i mbp[RTE_VIRTIO_DESC_PER_LOOP / 2]; 215 __m128i pkt_mb[RTE_VIRTIO_DESC_PER_LOOP]; 216 217 mbp[0] = _mm_loadu_si128((__m128i *)(sw_ring + 0)); 218 desc[0] = _mm_loadu_si128((__m128i *)(rused + 0)); 219 _mm_storeu_si128((__m128i *)&rx_pkts[0], mbp[0]); 220 221 mbp[1] = _mm_loadu_si128((__m128i *)(sw_ring + 2)); 222 desc[1] = _mm_loadu_si128((__m128i *)(rused + 2)); 223 _mm_storeu_si128((__m128i *)&rx_pkts[2], mbp[1]); 224 225 mbp[2] = _mm_loadu_si128((__m128i *)(sw_ring + 4)); 226 desc[2] = _mm_loadu_si128((__m128i *)(rused + 4)); 227 _mm_storeu_si128((__m128i *)&rx_pkts[4], mbp[2]); 228 229 mbp[3] = _mm_loadu_si128((__m128i *)(sw_ring + 6)); 230 desc[3] = _mm_loadu_si128((__m128i *)(rused + 6)); 231 _mm_storeu_si128((__m128i *)&rx_pkts[6], mbp[3]); 232 233 pkt_mb[1] = _mm_shuffle_epi8(desc[0], shuf_msk2); 234 pkt_mb[0] = _mm_shuffle_epi8(desc[0], shuf_msk1); 235 pkt_mb[1] = _mm_add_epi16(pkt_mb[1], len_adjust); 236 pkt_mb[0] = _mm_add_epi16(pkt_mb[0], len_adjust); 237 _mm_storeu_si128((void *)&rx_pkts[1]->rx_descriptor_fields1, 238 pkt_mb[1]); 239 _mm_storeu_si128((void *)&rx_pkts[0]->rx_descriptor_fields1, 240 pkt_mb[0]); 241 242 pkt_mb[3] = _mm_shuffle_epi8(desc[1], shuf_msk2); 243 pkt_mb[2] = _mm_shuffle_epi8(desc[1], shuf_msk1); 244 pkt_mb[3] = _mm_add_epi16(pkt_mb[3], len_adjust); 245 pkt_mb[2] = _mm_add_epi16(pkt_mb[2], len_adjust); 246 _mm_storeu_si128((void *)&rx_pkts[3]->rx_descriptor_fields1, 247 pkt_mb[3]); 248 _mm_storeu_si128((void *)&rx_pkts[2]->rx_descriptor_fields1, 249 pkt_mb[2]); 250 251 pkt_mb[5] = _mm_shuffle_epi8(desc[2], shuf_msk2); 252 pkt_mb[4] = _mm_shuffle_epi8(desc[2], shuf_msk1); 253 pkt_mb[5] = _mm_add_epi16(pkt_mb[5], len_adjust); 254 pkt_mb[4] = _mm_add_epi16(pkt_mb[4], len_adjust); 255 _mm_storeu_si128((void *)&rx_pkts[5]->rx_descriptor_fields1, 256 pkt_mb[5]); 257 _mm_storeu_si128((void *)&rx_pkts[4]->rx_descriptor_fields1, 258 pkt_mb[4]); 259 260 pkt_mb[7] = _mm_shuffle_epi8(desc[3], shuf_msk2); 261 pkt_mb[6] = _mm_shuffle_epi8(desc[3], shuf_msk1); 262 pkt_mb[7] = _mm_add_epi16(pkt_mb[7], len_adjust); 263 pkt_mb[6] = _mm_add_epi16(pkt_mb[6], len_adjust); 264 _mm_storeu_si128((void *)&rx_pkts[7]->rx_descriptor_fields1, 265 pkt_mb[7]); 266 _mm_storeu_si128((void *)&rx_pkts[6]->rx_descriptor_fields1, 267 pkt_mb[6]); 268 269 if (unlikely(nb_used <= RTE_VIRTIO_DESC_PER_LOOP)) { 270 if (sw_ring + nb_used <= sw_ring_end) 271 nb_pkts_received += nb_used; 272 else 273 nb_pkts_received += sw_ring_end - sw_ring; 274 break; 275 } else { 276 if (unlikely(sw_ring + RTE_VIRTIO_DESC_PER_LOOP >= 277 sw_ring_end)) { 278 nb_pkts_received += sw_ring_end - sw_ring; 279 break; 280 } else { 281 nb_pkts_received += RTE_VIRTIO_DESC_PER_LOOP; 282 283 rx_pkts += RTE_VIRTIO_DESC_PER_LOOP; 284 sw_ring += RTE_VIRTIO_DESC_PER_LOOP; 285 rused += RTE_VIRTIO_DESC_PER_LOOP; 286 nb_used -= RTE_VIRTIO_DESC_PER_LOOP; 287 } 288 } 289 } 290 291 vq->vq_used_cons_idx += nb_pkts_received; 292 vq->vq_free_cnt += nb_pkts_received; 293 rxvq->stats.packets += nb_pkts_received; 294 return nb_pkts_received; 295 } 296 297 #define VIRTIO_TX_FREE_THRESH 32 298 #define VIRTIO_TX_MAX_FREE_BUF_SZ 32 299 #define VIRTIO_TX_FREE_NR 32 300 /* TODO: vq->tx_free_cnt could mean num of free slots so we could avoid shift */ 301 static inline void 302 virtio_xmit_cleanup(struct virtqueue *vq) 303 { 304 uint16_t i, desc_idx; 305 uint32_t nb_free = 0; 306 struct rte_mbuf *m, *free[VIRTIO_TX_MAX_FREE_BUF_SZ]; 307 308 desc_idx = (uint16_t)(vq->vq_used_cons_idx & 309 ((vq->vq_nentries >> 1) - 1)); 310 m = (struct rte_mbuf *)vq->vq_descx[desc_idx++].cookie; 311 m = __rte_pktmbuf_prefree_seg(m); 312 if (likely(m != NULL)) { 313 free[0] = m; 314 nb_free = 1; 315 for (i = 1; i < VIRTIO_TX_FREE_NR; i++) { 316 m = (struct rte_mbuf *)vq->vq_descx[desc_idx++].cookie; 317 m = __rte_pktmbuf_prefree_seg(m); 318 if (likely(m != NULL)) { 319 if (likely(m->pool == free[0]->pool)) 320 free[nb_free++] = m; 321 else { 322 rte_mempool_put_bulk(free[0]->pool, 323 (void **)free, 324 RTE_MIN(RTE_DIM(free), 325 nb_free)); 326 free[0] = m; 327 nb_free = 1; 328 } 329 } 330 } 331 rte_mempool_put_bulk(free[0]->pool, (void **)free, 332 RTE_MIN(RTE_DIM(free), nb_free)); 333 } else { 334 for (i = 1; i < VIRTIO_TX_FREE_NR; i++) { 335 m = (struct rte_mbuf *)vq->vq_descx[desc_idx++].cookie; 336 m = __rte_pktmbuf_prefree_seg(m); 337 if (m != NULL) 338 rte_mempool_put(m->pool, m); 339 } 340 } 341 342 vq->vq_used_cons_idx += VIRTIO_TX_FREE_NR; 343 vq->vq_free_cnt += (VIRTIO_TX_FREE_NR << 1); 344 } 345 346 uint16_t 347 virtio_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts, 348 uint16_t nb_pkts) 349 { 350 struct virtnet_tx *txvq = tx_queue; 351 struct virtqueue *vq = txvq->vq; 352 uint16_t nb_used; 353 uint16_t desc_idx; 354 struct vring_desc *start_dp; 355 uint16_t nb_tail, nb_commit; 356 int i; 357 uint16_t desc_idx_max = (vq->vq_nentries >> 1) - 1; 358 359 nb_used = VIRTQUEUE_NUSED(vq); 360 rte_compiler_barrier(); 361 362 if (nb_used >= VIRTIO_TX_FREE_THRESH) 363 virtio_xmit_cleanup(vq); 364 365 nb_commit = nb_pkts = RTE_MIN((vq->vq_free_cnt >> 1), nb_pkts); 366 desc_idx = (uint16_t)(vq->vq_avail_idx & desc_idx_max); 367 start_dp = vq->vq_ring.desc; 368 nb_tail = (uint16_t) (desc_idx_max + 1 - desc_idx); 369 370 if (nb_commit >= nb_tail) { 371 for (i = 0; i < nb_tail; i++) 372 vq->vq_descx[desc_idx + i].cookie = tx_pkts[i]; 373 for (i = 0; i < nb_tail; i++) { 374 start_dp[desc_idx].addr = 375 VIRTIO_MBUF_DATA_DMA_ADDR(*tx_pkts, vq); 376 start_dp[desc_idx].len = (*tx_pkts)->pkt_len; 377 tx_pkts++; 378 desc_idx++; 379 } 380 nb_commit -= nb_tail; 381 desc_idx = 0; 382 } 383 for (i = 0; i < nb_commit; i++) 384 vq->vq_descx[desc_idx + i].cookie = tx_pkts[i]; 385 for (i = 0; i < nb_commit; i++) { 386 start_dp[desc_idx].addr = 387 VIRTIO_MBUF_DATA_DMA_ADDR(*tx_pkts, vq); 388 start_dp[desc_idx].len = (*tx_pkts)->pkt_len; 389 tx_pkts++; 390 desc_idx++; 391 } 392 393 rte_compiler_barrier(); 394 395 vq->vq_free_cnt -= (uint16_t)(nb_pkts << 1); 396 vq->vq_avail_idx += nb_pkts; 397 vq->vq_ring.avail->idx = vq->vq_avail_idx; 398 txvq->stats.packets += nb_pkts; 399 400 if (likely(nb_pkts)) { 401 if (unlikely(virtqueue_kick_prepare(vq))) 402 virtqueue_notify(vq); 403 } 404 405 return nb_pkts; 406 } 407 408 int __attribute__((cold)) 409 virtio_rxq_vec_setup(struct virtnet_rx *rxq) 410 { 411 uintptr_t p; 412 struct rte_mbuf mb_def = { .buf_addr = 0 }; /* zeroed mbuf */ 413 414 mb_def.nb_segs = 1; 415 mb_def.data_off = RTE_PKTMBUF_HEADROOM; 416 mb_def.port = rxq->port_id; 417 rte_mbuf_refcnt_set(&mb_def, 1); 418 419 /* prevent compiler reordering: rearm_data covers previous fields */ 420 rte_compiler_barrier(); 421 p = (uintptr_t)&mb_def.rearm_data; 422 rxq->mbuf_initializer = *(uint64_t *)p; 423 424 return 0; 425 } 426