1 /*- 2 * BSD LICENSE 3 * 4 * Copyright(c) 2010-2015 Intel Corporation. All rights reserved. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include <stdint.h> 35 #include <stdio.h> 36 #include <stdlib.h> 37 #include <string.h> 38 #include <errno.h> 39 40 #include <tmmintrin.h> 41 42 #include <rte_cycles.h> 43 #include <rte_memory.h> 44 #include <rte_memzone.h> 45 #include <rte_branch_prediction.h> 46 #include <rte_mempool.h> 47 #include <rte_malloc.h> 48 #include <rte_mbuf.h> 49 #include <rte_ether.h> 50 #include <rte_ethdev.h> 51 #include <rte_prefetch.h> 52 #include <rte_string_fns.h> 53 #include <rte_errno.h> 54 #include <rte_byteorder.h> 55 56 #include "virtio_logs.h" 57 #include "virtio_ethdev.h" 58 #include "virtqueue.h" 59 #include "virtio_rxtx.h" 60 61 #define RTE_VIRTIO_VPMD_RX_BURST 32 62 #define RTE_VIRTIO_DESC_PER_LOOP 8 63 #define RTE_VIRTIO_VPMD_RX_REARM_THRESH RTE_VIRTIO_VPMD_RX_BURST 64 65 #ifndef __INTEL_COMPILER 66 #pragma GCC diagnostic ignored "-Wcast-qual" 67 #endif 68 69 int __attribute__((cold)) 70 virtqueue_enqueue_recv_refill_simple(struct virtqueue *vq, 71 struct rte_mbuf *cookie) 72 { 73 struct vq_desc_extra *dxp; 74 struct vring_desc *start_dp; 75 uint16_t desc_idx; 76 77 desc_idx = vq->vq_avail_idx & (vq->vq_nentries - 1); 78 dxp = &vq->vq_descx[desc_idx]; 79 dxp->cookie = (void *)cookie; 80 vq->sw_ring[desc_idx] = cookie; 81 82 start_dp = vq->vq_ring.desc; 83 start_dp[desc_idx].addr = MBUF_DATA_DMA_ADDR(cookie, vq->offset) - 84 vq->hw->vtnet_hdr_size; 85 start_dp[desc_idx].len = cookie->buf_len - 86 RTE_PKTMBUF_HEADROOM + vq->hw->vtnet_hdr_size; 87 88 vq->vq_free_cnt--; 89 vq->vq_avail_idx++; 90 91 return 0; 92 } 93 94 static inline void 95 virtio_rxq_rearm_vec(struct virtnet_rx *rxvq) 96 { 97 int i; 98 uint16_t desc_idx; 99 struct rte_mbuf **sw_ring; 100 struct vring_desc *start_dp; 101 int ret; 102 struct virtqueue *vq = rxvq->vq; 103 104 desc_idx = vq->vq_avail_idx & (vq->vq_nentries - 1); 105 sw_ring = &vq->sw_ring[desc_idx]; 106 start_dp = &vq->vq_ring.desc[desc_idx]; 107 108 ret = rte_mempool_get_bulk(rxvq->mpool, (void **)sw_ring, 109 RTE_VIRTIO_VPMD_RX_REARM_THRESH); 110 if (unlikely(ret)) { 111 rte_eth_devices[rxvq->port_id].data->rx_mbuf_alloc_failed += 112 RTE_VIRTIO_VPMD_RX_REARM_THRESH; 113 return; 114 } 115 116 for (i = 0; i < RTE_VIRTIO_VPMD_RX_REARM_THRESH; i++) { 117 uintptr_t p; 118 119 p = (uintptr_t)&sw_ring[i]->rearm_data; 120 *(uint64_t *)p = rxvq->mbuf_initializer; 121 122 start_dp[i].addr = 123 MBUF_DATA_DMA_ADDR(sw_ring[i], vq->offset) - 124 vq->hw->vtnet_hdr_size; 125 start_dp[i].len = sw_ring[i]->buf_len - 126 RTE_PKTMBUF_HEADROOM + vq->hw->vtnet_hdr_size; 127 } 128 129 vq->vq_avail_idx += RTE_VIRTIO_VPMD_RX_REARM_THRESH; 130 vq->vq_free_cnt -= RTE_VIRTIO_VPMD_RX_REARM_THRESH; 131 vq_update_avail_idx(vq); 132 } 133 134 /* virtio vPMD receive routine, only accept(nb_pkts >= RTE_VIRTIO_DESC_PER_LOOP) 135 * 136 * This routine is for non-mergeable RX, one desc for each guest buffer. 137 * This routine is based on the RX ring layout optimization. Each entry in the 138 * avail ring points to the desc with the same index in the desc ring and this 139 * will never be changed in the driver. 140 * 141 * - nb_pkts < RTE_VIRTIO_DESC_PER_LOOP, just return no packet 142 */ 143 uint16_t 144 virtio_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts, 145 uint16_t nb_pkts) 146 { 147 struct virtnet_rx *rxvq = rx_queue; 148 struct virtqueue *vq = rxvq->vq; 149 uint16_t nb_used; 150 uint16_t desc_idx; 151 struct vring_used_elem *rused; 152 struct rte_mbuf **sw_ring; 153 struct rte_mbuf **sw_ring_end; 154 uint16_t nb_pkts_received; 155 __m128i shuf_msk1, shuf_msk2, len_adjust; 156 157 shuf_msk1 = _mm_set_epi8( 158 0xFF, 0xFF, 0xFF, 0xFF, 159 0xFF, 0xFF, /* vlan tci */ 160 5, 4, /* dat len */ 161 0xFF, 0xFF, 5, 4, /* pkt len */ 162 0xFF, 0xFF, 0xFF, 0xFF /* packet type */ 163 164 ); 165 166 shuf_msk2 = _mm_set_epi8( 167 0xFF, 0xFF, 0xFF, 0xFF, 168 0xFF, 0xFF, /* vlan tci */ 169 13, 12, /* dat len */ 170 0xFF, 0xFF, 13, 12, /* pkt len */ 171 0xFF, 0xFF, 0xFF, 0xFF /* packet type */ 172 ); 173 174 /* Subtract the header length. 175 * In which case do we need the header length in used->len ? 176 */ 177 len_adjust = _mm_set_epi16( 178 0, 0, 179 0, 180 (uint16_t)-vq->hw->vtnet_hdr_size, 181 0, (uint16_t)-vq->hw->vtnet_hdr_size, 182 0, 0); 183 184 if (unlikely(nb_pkts < RTE_VIRTIO_DESC_PER_LOOP)) 185 return 0; 186 187 nb_used = VIRTQUEUE_NUSED(vq); 188 189 rte_compiler_barrier(); 190 191 if (unlikely(nb_used == 0)) 192 return 0; 193 194 nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, RTE_VIRTIO_DESC_PER_LOOP); 195 nb_used = RTE_MIN(nb_used, nb_pkts); 196 197 desc_idx = (uint16_t)(vq->vq_used_cons_idx & (vq->vq_nentries - 1)); 198 rused = &vq->vq_ring.used->ring[desc_idx]; 199 sw_ring = &vq->sw_ring[desc_idx]; 200 sw_ring_end = &vq->sw_ring[vq->vq_nentries]; 201 202 _mm_prefetch((const void *)rused, _MM_HINT_T0); 203 204 if (vq->vq_free_cnt >= RTE_VIRTIO_VPMD_RX_REARM_THRESH) { 205 virtio_rxq_rearm_vec(rxvq); 206 if (unlikely(virtqueue_kick_prepare(vq))) 207 virtqueue_notify(vq); 208 } 209 210 for (nb_pkts_received = 0; 211 nb_pkts_received < nb_used;) { 212 __m128i desc[RTE_VIRTIO_DESC_PER_LOOP / 2]; 213 __m128i mbp[RTE_VIRTIO_DESC_PER_LOOP / 2]; 214 __m128i pkt_mb[RTE_VIRTIO_DESC_PER_LOOP]; 215 216 mbp[0] = _mm_loadu_si128((__m128i *)(sw_ring + 0)); 217 desc[0] = _mm_loadu_si128((__m128i *)(rused + 0)); 218 _mm_storeu_si128((__m128i *)&rx_pkts[0], mbp[0]); 219 220 mbp[1] = _mm_loadu_si128((__m128i *)(sw_ring + 2)); 221 desc[1] = _mm_loadu_si128((__m128i *)(rused + 2)); 222 _mm_storeu_si128((__m128i *)&rx_pkts[2], mbp[1]); 223 224 mbp[2] = _mm_loadu_si128((__m128i *)(sw_ring + 4)); 225 desc[2] = _mm_loadu_si128((__m128i *)(rused + 4)); 226 _mm_storeu_si128((__m128i *)&rx_pkts[4], mbp[2]); 227 228 mbp[3] = _mm_loadu_si128((__m128i *)(sw_ring + 6)); 229 desc[3] = _mm_loadu_si128((__m128i *)(rused + 6)); 230 _mm_storeu_si128((__m128i *)&rx_pkts[6], mbp[3]); 231 232 pkt_mb[1] = _mm_shuffle_epi8(desc[0], shuf_msk2); 233 pkt_mb[0] = _mm_shuffle_epi8(desc[0], shuf_msk1); 234 pkt_mb[1] = _mm_add_epi16(pkt_mb[1], len_adjust); 235 pkt_mb[0] = _mm_add_epi16(pkt_mb[0], len_adjust); 236 _mm_storeu_si128((void *)&rx_pkts[1]->rx_descriptor_fields1, 237 pkt_mb[1]); 238 _mm_storeu_si128((void *)&rx_pkts[0]->rx_descriptor_fields1, 239 pkt_mb[0]); 240 241 pkt_mb[3] = _mm_shuffle_epi8(desc[1], shuf_msk2); 242 pkt_mb[2] = _mm_shuffle_epi8(desc[1], shuf_msk1); 243 pkt_mb[3] = _mm_add_epi16(pkt_mb[3], len_adjust); 244 pkt_mb[2] = _mm_add_epi16(pkt_mb[2], len_adjust); 245 _mm_storeu_si128((void *)&rx_pkts[3]->rx_descriptor_fields1, 246 pkt_mb[3]); 247 _mm_storeu_si128((void *)&rx_pkts[2]->rx_descriptor_fields1, 248 pkt_mb[2]); 249 250 pkt_mb[5] = _mm_shuffle_epi8(desc[2], shuf_msk2); 251 pkt_mb[4] = _mm_shuffle_epi8(desc[2], shuf_msk1); 252 pkt_mb[5] = _mm_add_epi16(pkt_mb[5], len_adjust); 253 pkt_mb[4] = _mm_add_epi16(pkt_mb[4], len_adjust); 254 _mm_storeu_si128((void *)&rx_pkts[5]->rx_descriptor_fields1, 255 pkt_mb[5]); 256 _mm_storeu_si128((void *)&rx_pkts[4]->rx_descriptor_fields1, 257 pkt_mb[4]); 258 259 pkt_mb[7] = _mm_shuffle_epi8(desc[3], shuf_msk2); 260 pkt_mb[6] = _mm_shuffle_epi8(desc[3], shuf_msk1); 261 pkt_mb[7] = _mm_add_epi16(pkt_mb[7], len_adjust); 262 pkt_mb[6] = _mm_add_epi16(pkt_mb[6], len_adjust); 263 _mm_storeu_si128((void *)&rx_pkts[7]->rx_descriptor_fields1, 264 pkt_mb[7]); 265 _mm_storeu_si128((void *)&rx_pkts[6]->rx_descriptor_fields1, 266 pkt_mb[6]); 267 268 if (unlikely(nb_used <= RTE_VIRTIO_DESC_PER_LOOP)) { 269 if (sw_ring + nb_used <= sw_ring_end) 270 nb_pkts_received += nb_used; 271 else 272 nb_pkts_received += sw_ring_end - sw_ring; 273 break; 274 } else { 275 if (unlikely(sw_ring + RTE_VIRTIO_DESC_PER_LOOP >= 276 sw_ring_end)) { 277 nb_pkts_received += sw_ring_end - sw_ring; 278 break; 279 } else { 280 nb_pkts_received += RTE_VIRTIO_DESC_PER_LOOP; 281 282 rx_pkts += RTE_VIRTIO_DESC_PER_LOOP; 283 sw_ring += RTE_VIRTIO_DESC_PER_LOOP; 284 rused += RTE_VIRTIO_DESC_PER_LOOP; 285 nb_used -= RTE_VIRTIO_DESC_PER_LOOP; 286 } 287 } 288 } 289 290 vq->vq_used_cons_idx += nb_pkts_received; 291 vq->vq_free_cnt += nb_pkts_received; 292 rxvq->stats.packets += nb_pkts_received; 293 return nb_pkts_received; 294 } 295 296 #define VIRTIO_TX_FREE_THRESH 32 297 #define VIRTIO_TX_MAX_FREE_BUF_SZ 32 298 #define VIRTIO_TX_FREE_NR 32 299 /* TODO: vq->tx_free_cnt could mean num of free slots so we could avoid shift */ 300 static inline void 301 virtio_xmit_cleanup(struct virtqueue *vq) 302 { 303 uint16_t i, desc_idx; 304 int nb_free = 0; 305 struct rte_mbuf *m, *free[VIRTIO_TX_MAX_FREE_BUF_SZ]; 306 307 desc_idx = (uint16_t)(vq->vq_used_cons_idx & 308 ((vq->vq_nentries >> 1) - 1)); 309 m = (struct rte_mbuf *)vq->vq_descx[desc_idx++].cookie; 310 m = __rte_pktmbuf_prefree_seg(m); 311 if (likely(m != NULL)) { 312 free[0] = m; 313 nb_free = 1; 314 for (i = 1; i < VIRTIO_TX_FREE_NR; i++) { 315 m = (struct rte_mbuf *)vq->vq_descx[desc_idx++].cookie; 316 m = __rte_pktmbuf_prefree_seg(m); 317 if (likely(m != NULL)) { 318 if (likely(m->pool == free[0]->pool)) 319 free[nb_free++] = m; 320 else { 321 rte_mempool_put_bulk(free[0]->pool, 322 (void **)free, nb_free); 323 free[0] = m; 324 nb_free = 1; 325 } 326 } 327 } 328 rte_mempool_put_bulk(free[0]->pool, (void **)free, nb_free); 329 } else { 330 for (i = 1; i < VIRTIO_TX_FREE_NR; i++) { 331 m = (struct rte_mbuf *)vq->vq_descx[desc_idx++].cookie; 332 m = __rte_pktmbuf_prefree_seg(m); 333 if (m != NULL) 334 rte_mempool_put(m->pool, m); 335 } 336 } 337 338 vq->vq_used_cons_idx += VIRTIO_TX_FREE_NR; 339 vq->vq_free_cnt += (VIRTIO_TX_FREE_NR << 1); 340 } 341 342 uint16_t 343 virtio_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts, 344 uint16_t nb_pkts) 345 { 346 struct virtnet_tx *txvq = tx_queue; 347 struct virtqueue *vq = txvq->vq; 348 uint16_t nb_used; 349 uint16_t desc_idx; 350 struct vring_desc *start_dp; 351 uint16_t nb_tail, nb_commit; 352 int i; 353 uint16_t desc_idx_max = (vq->vq_nentries >> 1) - 1; 354 355 nb_used = VIRTQUEUE_NUSED(vq); 356 rte_compiler_barrier(); 357 358 if (nb_used >= VIRTIO_TX_FREE_THRESH) 359 virtio_xmit_cleanup(vq); 360 361 nb_commit = nb_pkts = RTE_MIN((vq->vq_free_cnt >> 1), nb_pkts); 362 desc_idx = (uint16_t)(vq->vq_avail_idx & desc_idx_max); 363 start_dp = vq->vq_ring.desc; 364 nb_tail = (uint16_t) (desc_idx_max + 1 - desc_idx); 365 366 if (nb_commit >= nb_tail) { 367 for (i = 0; i < nb_tail; i++) 368 vq->vq_descx[desc_idx + i].cookie = tx_pkts[i]; 369 for (i = 0; i < nb_tail; i++) { 370 start_dp[desc_idx].addr = 371 MBUF_DATA_DMA_ADDR(*tx_pkts, vq->offset); 372 start_dp[desc_idx].len = (*tx_pkts)->pkt_len; 373 tx_pkts++; 374 desc_idx++; 375 } 376 nb_commit -= nb_tail; 377 desc_idx = 0; 378 } 379 for (i = 0; i < nb_commit; i++) 380 vq->vq_descx[desc_idx + i].cookie = tx_pkts[i]; 381 for (i = 0; i < nb_commit; i++) { 382 start_dp[desc_idx].addr = 383 MBUF_DATA_DMA_ADDR(*tx_pkts, vq->offset); 384 start_dp[desc_idx].len = (*tx_pkts)->pkt_len; 385 tx_pkts++; 386 desc_idx++; 387 } 388 389 rte_compiler_barrier(); 390 391 vq->vq_free_cnt -= (uint16_t)(nb_pkts << 1); 392 vq->vq_avail_idx += nb_pkts; 393 vq->vq_ring.avail->idx = vq->vq_avail_idx; 394 txvq->stats.packets += nb_pkts; 395 396 if (likely(nb_pkts)) { 397 if (unlikely(virtqueue_kick_prepare(vq))) 398 virtqueue_notify(vq); 399 } 400 401 return nb_pkts; 402 } 403 404 int __attribute__((cold)) 405 virtio_rxq_vec_setup(struct virtnet_rx *rxq) 406 { 407 uintptr_t p; 408 struct rte_mbuf mb_def = { .buf_addr = 0 }; /* zeroed mbuf */ 409 410 mb_def.nb_segs = 1; 411 mb_def.data_off = RTE_PKTMBUF_HEADROOM; 412 mb_def.port = rxq->port_id; 413 rte_mbuf_refcnt_set(&mb_def, 1); 414 415 /* prevent compiler reordering: rearm_data covers previous fields */ 416 rte_compiler_barrier(); 417 p = (uintptr_t)&mb_def.rearm_data; 418 rxq->mbuf_initializer = *(uint64_t *)p; 419 420 return 0; 421 } 422