15566a3e3SBruce Richardson /* SPDX-License-Identifier: BSD-3-Clause 25566a3e3SBruce Richardson * Copyright(c) 2010-2015 Intel Corporation 36c3169a3SBruce Richardson */ 46c3169a3SBruce Richardson #include <stdint.h> 5*b02b02b6SMaxime Coquelin #include <unistd.h> 66c3169a3SBruce Richardson 7*b02b02b6SMaxime Coquelin #include <rte_eal_paging.h> 8*b02b02b6SMaxime Coquelin #include <rte_malloc.h> 96c3169a3SBruce Richardson #include <rte_mbuf.h> 10*b02b02b6SMaxime Coquelin #include <rte_memzone.h> 116c3169a3SBruce Richardson 126c3169a3SBruce Richardson #include "virtqueue.h" 136c3169a3SBruce Richardson #include "virtio_logs.h" 14b5ba7ee4SMaxime Coquelin #include "virtio.h" 15bcf55c93STiwei Bie #include "virtio_rxtx_simple.h" 166c3169a3SBruce Richardson 176c3169a3SBruce Richardson /* 186c3169a3SBruce Richardson * Two types of mbuf to be cleaned: 196c3169a3SBruce Richardson * 1) mbuf that has been consumed by backend but not used by virtio. 207be78d02SJosh Soref * 2) mbuf that hasn't been consumed by backend. 216c3169a3SBruce Richardson */ 226c3169a3SBruce Richardson struct rte_mbuf * 23727411f5SOlivier Matz virtqueue_detach_unused(struct virtqueue *vq) 246c3169a3SBruce Richardson { 256c3169a3SBruce Richardson struct rte_mbuf *cookie; 26e67ae1e2SOlivier Matz struct virtio_hw *hw; 27e67ae1e2SOlivier Matz uint16_t start, end; 28e67ae1e2SOlivier Matz int type, idx; 296c3169a3SBruce Richardson 30e67ae1e2SOlivier Matz if (vq == NULL) 31e67ae1e2SOlivier Matz return NULL; 32e67ae1e2SOlivier Matz 33e67ae1e2SOlivier Matz hw = vq->hw; 34e67ae1e2SOlivier Matz type = virtio_get_queue_type(hw, vq->vq_queue_index); 35e67ae1e2SOlivier Matz start = vq->vq_avail_idx & (vq->vq_nentries - 1); 36e67ae1e2SOlivier Matz end = (vq->vq_avail_idx + vq->vq_free_cnt) & (vq->vq_nentries - 1); 37e67ae1e2SOlivier Matz 386c3169a3SBruce Richardson for (idx = 0; idx < vq->vq_nentries; idx++) { 39b4f9a45aSMaxime Coquelin if (hw->use_vec_rx && !virtio_with_packed_queue(hw) && 404710e16aSMarvin Liu type == VTNET_RQ) { 41e67ae1e2SOlivier Matz if (start <= end && idx >= start && idx < end) 42e67ae1e2SOlivier Matz continue; 43e67ae1e2SOlivier Matz if (start > end && (idx >= start || idx < end)) 44e67ae1e2SOlivier Matz continue; 45d5284f0dSMaxime Coquelin cookie = vq->rxq.sw_ring[idx]; 46e67ae1e2SOlivier Matz if (cookie != NULL) { 47d5284f0dSMaxime Coquelin vq->rxq.sw_ring[idx] = NULL; 48e67ae1e2SOlivier Matz return cookie; 49e67ae1e2SOlivier Matz } 50e67ae1e2SOlivier Matz } else { 512f7fdb9dSBernard Iremonger cookie = vq->vq_descx[idx].cookie; 522f7fdb9dSBernard Iremonger if (cookie != NULL) { 536c3169a3SBruce Richardson vq->vq_descx[idx].cookie = NULL; 546c3169a3SBruce Richardson return cookie; 556c3169a3SBruce Richardson } 566c3169a3SBruce Richardson } 57e67ae1e2SOlivier Matz } 58e67ae1e2SOlivier Matz 596c3169a3SBruce Richardson return NULL; 606c3169a3SBruce Richardson } 61d8227497STiwei Bie 62a76290c8SJens Freimann /* Flush used descs */ 63a76290c8SJens Freimann static void 64a76290c8SJens Freimann virtqueue_rxvq_flush_packed(struct virtqueue *vq) 65a76290c8SJens Freimann { 66a76290c8SJens Freimann struct vq_desc_extra *dxp; 67a76290c8SJens Freimann uint16_t i; 68a76290c8SJens Freimann 694cdc4d98STiwei Bie struct vring_packed_desc *descs = vq->vq_packed.ring.desc; 70a76290c8SJens Freimann int cnt = 0; 71a76290c8SJens Freimann 72a76290c8SJens Freimann i = vq->vq_used_cons_idx; 73a76290c8SJens Freimann while (desc_is_used(&descs[i], vq) && cnt++ < vq->vq_nentries) { 74a76290c8SJens Freimann dxp = &vq->vq_descx[descs[i].id]; 75a76290c8SJens Freimann if (dxp->cookie != NULL) { 76a76290c8SJens Freimann rte_pktmbuf_free(dxp->cookie); 77a76290c8SJens Freimann dxp->cookie = NULL; 78a76290c8SJens Freimann } 79a76290c8SJens Freimann vq->vq_free_cnt++; 80a76290c8SJens Freimann vq->vq_used_cons_idx++; 81a76290c8SJens Freimann if (vq->vq_used_cons_idx >= vq->vq_nentries) { 82a76290c8SJens Freimann vq->vq_used_cons_idx -= vq->vq_nentries; 83dfd33aa4STiwei Bie vq->vq_packed.used_wrap_counter ^= 1; 84a76290c8SJens Freimann } 85a76290c8SJens Freimann i = vq->vq_used_cons_idx; 86a76290c8SJens Freimann } 87a76290c8SJens Freimann } 88a76290c8SJens Freimann 89d8227497STiwei Bie /* Flush the elements in the used ring. */ 90a76290c8SJens Freimann static void 91a76290c8SJens Freimann virtqueue_rxvq_flush_split(struct virtqueue *vq) 92d8227497STiwei Bie { 93bcf55c93STiwei Bie struct virtnet_rx *rxq = &vq->rxq; 94bcf55c93STiwei Bie struct virtio_hw *hw = vq->hw; 95d8227497STiwei Bie struct vring_used_elem *uep; 96d8227497STiwei Bie struct vq_desc_extra *dxp; 97d8227497STiwei Bie uint16_t used_idx, desc_idx; 98d8227497STiwei Bie uint16_t nb_used, i; 99d8227497STiwei Bie 100ea5207c1SJoyce Kong nb_used = virtqueue_nused(vq); 101d8227497STiwei Bie 102d8227497STiwei Bie for (i = 0; i < nb_used; i++) { 103d8227497STiwei Bie used_idx = vq->vq_used_cons_idx & (vq->vq_nentries - 1); 104dfd33aa4STiwei Bie uep = &vq->vq_split.ring.used->ring[used_idx]; 1054710e16aSMarvin Liu if (hw->use_vec_rx) { 106bcf55c93STiwei Bie desc_idx = used_idx; 107d5284f0dSMaxime Coquelin rte_pktmbuf_free(vq->rxq.sw_ring[desc_idx]); 108bcf55c93STiwei Bie vq->vq_free_cnt++; 1097097ca1bSMarvin Liu } else if (hw->use_inorder_rx) { 1107097ca1bSMarvin Liu desc_idx = (uint16_t)uep->id; 1117097ca1bSMarvin Liu dxp = &vq->vq_descx[desc_idx]; 1127097ca1bSMarvin Liu if (dxp->cookie != NULL) { 1137097ca1bSMarvin Liu rte_pktmbuf_free(dxp->cookie); 1147097ca1bSMarvin Liu dxp->cookie = NULL; 1157097ca1bSMarvin Liu } 1167097ca1bSMarvin Liu vq_ring_free_inorder(vq, desc_idx, 1); 117bcf55c93STiwei Bie } else { 118d8227497STiwei Bie desc_idx = (uint16_t)uep->id; 119d8227497STiwei Bie dxp = &vq->vq_descx[desc_idx]; 120d8227497STiwei Bie if (dxp->cookie != NULL) { 121d8227497STiwei Bie rte_pktmbuf_free(dxp->cookie); 122d8227497STiwei Bie dxp->cookie = NULL; 123d8227497STiwei Bie } 124d8227497STiwei Bie vq_ring_free_chain(vq, desc_idx); 125d8227497STiwei Bie } 126bcf55c93STiwei Bie vq->vq_used_cons_idx++; 127bcf55c93STiwei Bie } 128bcf55c93STiwei Bie 1294710e16aSMarvin Liu if (hw->use_vec_rx) { 130bcf55c93STiwei Bie while (vq->vq_free_cnt >= RTE_VIRTIO_VPMD_RX_REARM_THRESH) { 131bcf55c93STiwei Bie virtio_rxq_rearm_vec(rxq); 132bcf55c93STiwei Bie if (virtqueue_kick_prepare(vq)) 133bcf55c93STiwei Bie virtqueue_notify(vq); 134bcf55c93STiwei Bie } 135bcf55c93STiwei Bie } 136d8227497STiwei Bie } 137a76290c8SJens Freimann 138a76290c8SJens Freimann /* Flush the elements in the used ring. */ 139a76290c8SJens Freimann void 140a76290c8SJens Freimann virtqueue_rxvq_flush(struct virtqueue *vq) 141a76290c8SJens Freimann { 142a76290c8SJens Freimann struct virtio_hw *hw = vq->hw; 143a76290c8SJens Freimann 144b4f9a45aSMaxime Coquelin if (virtio_with_packed_queue(hw)) 145a76290c8SJens Freimann virtqueue_rxvq_flush_packed(vq); 146a76290c8SJens Freimann else 147a76290c8SJens Freimann virtqueue_rxvq_flush_split(vq); 148a76290c8SJens Freimann } 1496ebbf410SXuan Ding 15089149851SMaxime Coquelin static void 15189149851SMaxime Coquelin virtqueue_txq_indirect_header_init_packed(struct virtqueue *vq, uint32_t idx) 15289149851SMaxime Coquelin { 15389149851SMaxime Coquelin struct virtio_tx_region *txr; 15489149851SMaxime Coquelin struct vring_packed_desc *desc; 15589149851SMaxime Coquelin rte_iova_t hdr_mem; 15689149851SMaxime Coquelin 15789149851SMaxime Coquelin txr = vq->txq.hdr_mz->addr; 15889149851SMaxime Coquelin hdr_mem = vq->txq.hdr_mem; 15989149851SMaxime Coquelin desc = txr[idx].tx_packed_indir; 16089149851SMaxime Coquelin 16189149851SMaxime Coquelin vring_desc_init_indirect_packed(desc, RTE_DIM(txr[idx].tx_packed_indir)); 16289149851SMaxime Coquelin desc->addr = hdr_mem + idx * sizeof(*txr) + offsetof(struct virtio_tx_region, tx_hdr); 16389149851SMaxime Coquelin desc->len = vq->hw->vtnet_hdr_size; 16489149851SMaxime Coquelin } 16589149851SMaxime Coquelin 16689149851SMaxime Coquelin static void 16789149851SMaxime Coquelin virtqueue_txq_indirect_header_init_split(struct virtqueue *vq, uint32_t idx) 16889149851SMaxime Coquelin { 16989149851SMaxime Coquelin struct virtio_tx_region *txr; 17089149851SMaxime Coquelin struct vring_desc *desc; 17189149851SMaxime Coquelin rte_iova_t hdr_mem; 17289149851SMaxime Coquelin 17389149851SMaxime Coquelin txr = vq->txq.hdr_mz->addr; 17489149851SMaxime Coquelin hdr_mem = vq->txq.hdr_mem; 17589149851SMaxime Coquelin desc = txr[idx].tx_indir; 17689149851SMaxime Coquelin 17789149851SMaxime Coquelin vring_desc_init_split(desc, RTE_DIM(txr[idx].tx_indir)); 17889149851SMaxime Coquelin desc->addr = hdr_mem + idx * sizeof(*txr) + offsetof(struct virtio_tx_region, tx_hdr); 17989149851SMaxime Coquelin desc->len = vq->hw->vtnet_hdr_size; 18089149851SMaxime Coquelin desc->flags = VRING_DESC_F_NEXT; 18189149851SMaxime Coquelin } 18289149851SMaxime Coquelin 18389149851SMaxime Coquelin void 18489149851SMaxime Coquelin virtqueue_txq_indirect_headers_init(struct virtqueue *vq) 18589149851SMaxime Coquelin { 18689149851SMaxime Coquelin uint32_t i; 18789149851SMaxime Coquelin 18889149851SMaxime Coquelin if (!virtio_with_feature(vq->hw, VIRTIO_RING_F_INDIRECT_DESC)) 18989149851SMaxime Coquelin return; 19089149851SMaxime Coquelin 19189149851SMaxime Coquelin for (i = 0; i < vq->vq_nentries; i++) 19289149851SMaxime Coquelin if (virtio_with_packed_queue(vq->hw)) 19389149851SMaxime Coquelin virtqueue_txq_indirect_header_init_packed(vq, i); 19489149851SMaxime Coquelin else 19589149851SMaxime Coquelin virtqueue_txq_indirect_header_init_split(vq, i); 19689149851SMaxime Coquelin } 19789149851SMaxime Coquelin 1986ebbf410SXuan Ding int 1996ebbf410SXuan Ding virtqueue_rxvq_reset_packed(struct virtqueue *vq) 2006ebbf410SXuan Ding { 2016ebbf410SXuan Ding int size = vq->vq_nentries; 2026ebbf410SXuan Ding struct vq_desc_extra *dxp; 2036ebbf410SXuan Ding uint16_t desc_idx; 2046ebbf410SXuan Ding 2056ebbf410SXuan Ding vq->vq_used_cons_idx = 0; 2066ebbf410SXuan Ding vq->vq_desc_head_idx = 0; 2076ebbf410SXuan Ding vq->vq_avail_idx = 0; 2086ebbf410SXuan Ding vq->vq_desc_tail_idx = (uint16_t)(vq->vq_nentries - 1); 2096ebbf410SXuan Ding vq->vq_free_cnt = vq->vq_nentries; 2106ebbf410SXuan Ding 2116ebbf410SXuan Ding vq->vq_packed.used_wrap_counter = 1; 2126ebbf410SXuan Ding vq->vq_packed.cached_flags = VRING_PACKED_DESC_F_AVAIL; 2136ebbf410SXuan Ding vq->vq_packed.event_flags_shadow = 0; 2146ebbf410SXuan Ding vq->vq_packed.cached_flags |= VRING_DESC_F_WRITE; 2156ebbf410SXuan Ding 2165fddf290SMaxime Coquelin memset(vq->mz->addr, 0, vq->mz->len); 2176ebbf410SXuan Ding 2186ebbf410SXuan Ding for (desc_idx = 0; desc_idx < vq->vq_nentries; desc_idx++) { 2196ebbf410SXuan Ding dxp = &vq->vq_descx[desc_idx]; 2206ebbf410SXuan Ding if (dxp->cookie != NULL) { 2216ebbf410SXuan Ding rte_pktmbuf_free(dxp->cookie); 2226ebbf410SXuan Ding dxp->cookie = NULL; 2236ebbf410SXuan Ding } 2246ebbf410SXuan Ding } 2256ebbf410SXuan Ding 2266ebbf410SXuan Ding vring_desc_init_packed(vq, size); 2276ebbf410SXuan Ding 228f50560a5SMarvin Liu virtqueue_disable_intr(vq); 2296ebbf410SXuan Ding return 0; 2306ebbf410SXuan Ding } 2316ebbf410SXuan Ding 2326ebbf410SXuan Ding int 2336ebbf410SXuan Ding virtqueue_txvq_reset_packed(struct virtqueue *vq) 2346ebbf410SXuan Ding { 2356ebbf410SXuan Ding int size = vq->vq_nentries; 2366ebbf410SXuan Ding struct vq_desc_extra *dxp; 2376ebbf410SXuan Ding uint16_t desc_idx; 2386ebbf410SXuan Ding 2396ebbf410SXuan Ding vq->vq_used_cons_idx = 0; 2406ebbf410SXuan Ding vq->vq_desc_head_idx = 0; 2416ebbf410SXuan Ding vq->vq_avail_idx = 0; 2426ebbf410SXuan Ding vq->vq_desc_tail_idx = (uint16_t)(vq->vq_nentries - 1); 2436ebbf410SXuan Ding vq->vq_free_cnt = vq->vq_nentries; 2446ebbf410SXuan Ding 2456ebbf410SXuan Ding vq->vq_packed.used_wrap_counter = 1; 2466ebbf410SXuan Ding vq->vq_packed.cached_flags = VRING_PACKED_DESC_F_AVAIL; 2476ebbf410SXuan Ding vq->vq_packed.event_flags_shadow = 0; 2486ebbf410SXuan Ding 2495fddf290SMaxime Coquelin memset(vq->mz->addr, 0, vq->mz->len); 25089149851SMaxime Coquelin memset(vq->txq.hdr_mz->addr, 0, vq->txq.hdr_mz->len); 2516ebbf410SXuan Ding 2526ebbf410SXuan Ding for (desc_idx = 0; desc_idx < vq->vq_nentries; desc_idx++) { 2536ebbf410SXuan Ding dxp = &vq->vq_descx[desc_idx]; 2546ebbf410SXuan Ding if (dxp->cookie != NULL) { 2556ebbf410SXuan Ding rte_pktmbuf_free(dxp->cookie); 2566ebbf410SXuan Ding dxp->cookie = NULL; 2576ebbf410SXuan Ding } 2586ebbf410SXuan Ding } 2596ebbf410SXuan Ding 26089149851SMaxime Coquelin virtqueue_txq_indirect_headers_init(vq); 2616ebbf410SXuan Ding vring_desc_init_packed(vq, size); 262f50560a5SMarvin Liu virtqueue_disable_intr(vq); 26389149851SMaxime Coquelin 2646ebbf410SXuan Ding return 0; 2656ebbf410SXuan Ding } 266*b02b02b6SMaxime Coquelin 267*b02b02b6SMaxime Coquelin 268*b02b02b6SMaxime Coquelin static void 269*b02b02b6SMaxime Coquelin virtio_init_vring(struct virtqueue *vq) 270*b02b02b6SMaxime Coquelin { 271*b02b02b6SMaxime Coquelin int size = vq->vq_nentries; 272*b02b02b6SMaxime Coquelin uint8_t *ring_mem = vq->vq_ring_virt_mem; 273*b02b02b6SMaxime Coquelin 274*b02b02b6SMaxime Coquelin PMD_INIT_FUNC_TRACE(); 275*b02b02b6SMaxime Coquelin 276*b02b02b6SMaxime Coquelin memset(ring_mem, 0, vq->vq_ring_size); 277*b02b02b6SMaxime Coquelin 278*b02b02b6SMaxime Coquelin vq->vq_used_cons_idx = 0; 279*b02b02b6SMaxime Coquelin vq->vq_desc_head_idx = 0; 280*b02b02b6SMaxime Coquelin vq->vq_avail_idx = 0; 281*b02b02b6SMaxime Coquelin vq->vq_desc_tail_idx = (uint16_t)(vq->vq_nentries - 1); 282*b02b02b6SMaxime Coquelin vq->vq_free_cnt = vq->vq_nentries; 283*b02b02b6SMaxime Coquelin memset(vq->vq_descx, 0, sizeof(struct vq_desc_extra) * vq->vq_nentries); 284*b02b02b6SMaxime Coquelin if (virtio_with_packed_queue(vq->hw)) { 285*b02b02b6SMaxime Coquelin vring_init_packed(&vq->vq_packed.ring, ring_mem, 286*b02b02b6SMaxime Coquelin VIRTIO_VRING_ALIGN, size); 287*b02b02b6SMaxime Coquelin vring_desc_init_packed(vq, size); 288*b02b02b6SMaxime Coquelin } else { 289*b02b02b6SMaxime Coquelin struct vring *vr = &vq->vq_split.ring; 290*b02b02b6SMaxime Coquelin 291*b02b02b6SMaxime Coquelin vring_init_split(vr, ring_mem, VIRTIO_VRING_ALIGN, size); 292*b02b02b6SMaxime Coquelin vring_desc_init_split(vr->desc, size); 293*b02b02b6SMaxime Coquelin } 294*b02b02b6SMaxime Coquelin /* 295*b02b02b6SMaxime Coquelin * Disable device(host) interrupting guest 296*b02b02b6SMaxime Coquelin */ 297*b02b02b6SMaxime Coquelin virtqueue_disable_intr(vq); 298*b02b02b6SMaxime Coquelin } 299*b02b02b6SMaxime Coquelin 300*b02b02b6SMaxime Coquelin static int 301*b02b02b6SMaxime Coquelin virtio_alloc_queue_headers(struct virtqueue *vq, int numa_node, const char *name) 302*b02b02b6SMaxime Coquelin { 303*b02b02b6SMaxime Coquelin char hdr_name[VIRTQUEUE_MAX_NAME_SZ]; 304*b02b02b6SMaxime Coquelin const struct rte_memzone **hdr_mz; 305*b02b02b6SMaxime Coquelin rte_iova_t *hdr_mem; 306*b02b02b6SMaxime Coquelin ssize_t size; 307*b02b02b6SMaxime Coquelin int queue_type; 308*b02b02b6SMaxime Coquelin 309*b02b02b6SMaxime Coquelin queue_type = virtio_get_queue_type(vq->hw, vq->vq_queue_index); 310*b02b02b6SMaxime Coquelin switch (queue_type) { 311*b02b02b6SMaxime Coquelin case VTNET_TQ: 312*b02b02b6SMaxime Coquelin /* 313*b02b02b6SMaxime Coquelin * For each xmit packet, allocate a virtio_net_hdr 314*b02b02b6SMaxime Coquelin * and indirect ring elements 315*b02b02b6SMaxime Coquelin */ 316*b02b02b6SMaxime Coquelin size = vq->vq_nentries * sizeof(struct virtio_tx_region); 317*b02b02b6SMaxime Coquelin hdr_mz = &vq->txq.hdr_mz; 318*b02b02b6SMaxime Coquelin hdr_mem = &vq->txq.hdr_mem; 319*b02b02b6SMaxime Coquelin break; 320*b02b02b6SMaxime Coquelin case VTNET_CQ: 321*b02b02b6SMaxime Coquelin /* Allocate a page for control vq command, data and status */ 322*b02b02b6SMaxime Coquelin size = rte_mem_page_size(); 323*b02b02b6SMaxime Coquelin hdr_mz = &vq->cq.hdr_mz; 324*b02b02b6SMaxime Coquelin hdr_mem = &vq->cq.hdr_mem; 325*b02b02b6SMaxime Coquelin break; 326*b02b02b6SMaxime Coquelin case VTNET_RQ: 327*b02b02b6SMaxime Coquelin /* fallthrough */ 328*b02b02b6SMaxime Coquelin default: 329*b02b02b6SMaxime Coquelin return 0; 330*b02b02b6SMaxime Coquelin } 331*b02b02b6SMaxime Coquelin 332*b02b02b6SMaxime Coquelin snprintf(hdr_name, sizeof(hdr_name), "%s_hdr", name); 333*b02b02b6SMaxime Coquelin *hdr_mz = rte_memzone_reserve_aligned(hdr_name, size, numa_node, 334*b02b02b6SMaxime Coquelin RTE_MEMZONE_IOVA_CONTIG, RTE_CACHE_LINE_SIZE); 335*b02b02b6SMaxime Coquelin if (*hdr_mz == NULL) { 336*b02b02b6SMaxime Coquelin if (rte_errno == EEXIST) 337*b02b02b6SMaxime Coquelin *hdr_mz = rte_memzone_lookup(hdr_name); 338*b02b02b6SMaxime Coquelin if (*hdr_mz == NULL) 339*b02b02b6SMaxime Coquelin return -ENOMEM; 340*b02b02b6SMaxime Coquelin } 341*b02b02b6SMaxime Coquelin 342*b02b02b6SMaxime Coquelin memset((*hdr_mz)->addr, 0, size); 343*b02b02b6SMaxime Coquelin 344*b02b02b6SMaxime Coquelin if (vq->hw->use_va) 345*b02b02b6SMaxime Coquelin *hdr_mem = (uintptr_t)(*hdr_mz)->addr; 346*b02b02b6SMaxime Coquelin else 347*b02b02b6SMaxime Coquelin *hdr_mem = (uintptr_t)(*hdr_mz)->iova; 348*b02b02b6SMaxime Coquelin 349*b02b02b6SMaxime Coquelin return 0; 350*b02b02b6SMaxime Coquelin } 351*b02b02b6SMaxime Coquelin 352*b02b02b6SMaxime Coquelin static void 353*b02b02b6SMaxime Coquelin virtio_free_queue_headers(struct virtqueue *vq) 354*b02b02b6SMaxime Coquelin { 355*b02b02b6SMaxime Coquelin const struct rte_memzone **hdr_mz; 356*b02b02b6SMaxime Coquelin rte_iova_t *hdr_mem; 357*b02b02b6SMaxime Coquelin int queue_type; 358*b02b02b6SMaxime Coquelin 359*b02b02b6SMaxime Coquelin queue_type = virtio_get_queue_type(vq->hw, vq->vq_queue_index); 360*b02b02b6SMaxime Coquelin switch (queue_type) { 361*b02b02b6SMaxime Coquelin case VTNET_TQ: 362*b02b02b6SMaxime Coquelin hdr_mz = &vq->txq.hdr_mz; 363*b02b02b6SMaxime Coquelin hdr_mem = &vq->txq.hdr_mem; 364*b02b02b6SMaxime Coquelin break; 365*b02b02b6SMaxime Coquelin case VTNET_CQ: 366*b02b02b6SMaxime Coquelin hdr_mz = &vq->cq.hdr_mz; 367*b02b02b6SMaxime Coquelin hdr_mem = &vq->cq.hdr_mem; 368*b02b02b6SMaxime Coquelin break; 369*b02b02b6SMaxime Coquelin case VTNET_RQ: 370*b02b02b6SMaxime Coquelin /* fallthrough */ 371*b02b02b6SMaxime Coquelin default: 372*b02b02b6SMaxime Coquelin return; 373*b02b02b6SMaxime Coquelin } 374*b02b02b6SMaxime Coquelin 375*b02b02b6SMaxime Coquelin rte_memzone_free(*hdr_mz); 376*b02b02b6SMaxime Coquelin *hdr_mz = NULL; 377*b02b02b6SMaxime Coquelin *hdr_mem = 0; 378*b02b02b6SMaxime Coquelin } 379*b02b02b6SMaxime Coquelin 380*b02b02b6SMaxime Coquelin static int 381*b02b02b6SMaxime Coquelin virtio_rxq_sw_ring_alloc(struct virtqueue *vq, int numa_node) 382*b02b02b6SMaxime Coquelin { 383*b02b02b6SMaxime Coquelin void *sw_ring; 384*b02b02b6SMaxime Coquelin struct rte_mbuf *mbuf; 385*b02b02b6SMaxime Coquelin size_t size; 386*b02b02b6SMaxime Coquelin 387*b02b02b6SMaxime Coquelin /* SW ring is only used with vectorized datapath */ 388*b02b02b6SMaxime Coquelin if (!vq->hw->use_vec_rx) 389*b02b02b6SMaxime Coquelin return 0; 390*b02b02b6SMaxime Coquelin 391*b02b02b6SMaxime Coquelin size = (RTE_PMD_VIRTIO_RX_MAX_BURST + vq->vq_nentries) * sizeof(vq->rxq.sw_ring[0]); 392*b02b02b6SMaxime Coquelin 393*b02b02b6SMaxime Coquelin sw_ring = rte_zmalloc_socket("sw_ring", size, RTE_CACHE_LINE_SIZE, numa_node); 394*b02b02b6SMaxime Coquelin if (!sw_ring) { 395*b02b02b6SMaxime Coquelin PMD_INIT_LOG(ERR, "can not allocate RX soft ring"); 396*b02b02b6SMaxime Coquelin return -ENOMEM; 397*b02b02b6SMaxime Coquelin } 398*b02b02b6SMaxime Coquelin 399*b02b02b6SMaxime Coquelin mbuf = rte_zmalloc_socket("sw_ring", sizeof(*mbuf), RTE_CACHE_LINE_SIZE, numa_node); 400*b02b02b6SMaxime Coquelin if (!mbuf) { 401*b02b02b6SMaxime Coquelin PMD_INIT_LOG(ERR, "can not allocate fake mbuf"); 402*b02b02b6SMaxime Coquelin rte_free(sw_ring); 403*b02b02b6SMaxime Coquelin return -ENOMEM; 404*b02b02b6SMaxime Coquelin } 405*b02b02b6SMaxime Coquelin 406*b02b02b6SMaxime Coquelin vq->rxq.sw_ring = sw_ring; 407*b02b02b6SMaxime Coquelin vq->rxq.fake_mbuf = mbuf; 408*b02b02b6SMaxime Coquelin 409*b02b02b6SMaxime Coquelin return 0; 410*b02b02b6SMaxime Coquelin } 411*b02b02b6SMaxime Coquelin 412*b02b02b6SMaxime Coquelin static void 413*b02b02b6SMaxime Coquelin virtio_rxq_sw_ring_free(struct virtqueue *vq) 414*b02b02b6SMaxime Coquelin { 415*b02b02b6SMaxime Coquelin rte_free(vq->rxq.fake_mbuf); 416*b02b02b6SMaxime Coquelin vq->rxq.fake_mbuf = NULL; 417*b02b02b6SMaxime Coquelin rte_free(vq->rxq.sw_ring); 418*b02b02b6SMaxime Coquelin vq->rxq.sw_ring = NULL; 419*b02b02b6SMaxime Coquelin } 420*b02b02b6SMaxime Coquelin 421*b02b02b6SMaxime Coquelin struct virtqueue * 422*b02b02b6SMaxime Coquelin virtqueue_alloc(struct virtio_hw *hw, uint16_t index, uint16_t num, int type, 423*b02b02b6SMaxime Coquelin int node, const char *name) 424*b02b02b6SMaxime Coquelin { 425*b02b02b6SMaxime Coquelin struct virtqueue *vq; 426*b02b02b6SMaxime Coquelin const struct rte_memzone *mz; 427*b02b02b6SMaxime Coquelin unsigned int size; 428*b02b02b6SMaxime Coquelin 429*b02b02b6SMaxime Coquelin size = sizeof(*vq) + num * sizeof(struct vq_desc_extra); 430*b02b02b6SMaxime Coquelin size = RTE_ALIGN_CEIL(size, RTE_CACHE_LINE_SIZE); 431*b02b02b6SMaxime Coquelin 432*b02b02b6SMaxime Coquelin vq = rte_zmalloc_socket(name, size, RTE_CACHE_LINE_SIZE, node); 433*b02b02b6SMaxime Coquelin if (vq == NULL) { 434*b02b02b6SMaxime Coquelin PMD_INIT_LOG(ERR, "can not allocate vq"); 435*b02b02b6SMaxime Coquelin return NULL; 436*b02b02b6SMaxime Coquelin } 437*b02b02b6SMaxime Coquelin 438*b02b02b6SMaxime Coquelin vq->hw = hw; 439*b02b02b6SMaxime Coquelin vq->vq_queue_index = index; 440*b02b02b6SMaxime Coquelin vq->vq_nentries = num; 441*b02b02b6SMaxime Coquelin if (virtio_with_packed_queue(hw)) { 442*b02b02b6SMaxime Coquelin vq->vq_packed.used_wrap_counter = 1; 443*b02b02b6SMaxime Coquelin vq->vq_packed.cached_flags = VRING_PACKED_DESC_F_AVAIL; 444*b02b02b6SMaxime Coquelin vq->vq_packed.event_flags_shadow = 0; 445*b02b02b6SMaxime Coquelin if (type == VTNET_RQ) 446*b02b02b6SMaxime Coquelin vq->vq_packed.cached_flags |= VRING_DESC_F_WRITE; 447*b02b02b6SMaxime Coquelin } 448*b02b02b6SMaxime Coquelin 449*b02b02b6SMaxime Coquelin /* 450*b02b02b6SMaxime Coquelin * Reserve a memzone for vring elements 451*b02b02b6SMaxime Coquelin */ 452*b02b02b6SMaxime Coquelin size = vring_size(hw, num, VIRTIO_VRING_ALIGN); 453*b02b02b6SMaxime Coquelin vq->vq_ring_size = RTE_ALIGN_CEIL(size, VIRTIO_VRING_ALIGN); 454*b02b02b6SMaxime Coquelin PMD_INIT_LOG(DEBUG, "vring_size: %d, rounded_vring_size: %d", size, vq->vq_ring_size); 455*b02b02b6SMaxime Coquelin 456*b02b02b6SMaxime Coquelin mz = rte_memzone_reserve_aligned(name, vq->vq_ring_size, node, 457*b02b02b6SMaxime Coquelin RTE_MEMZONE_IOVA_CONTIG, VIRTIO_VRING_ALIGN); 458*b02b02b6SMaxime Coquelin if (mz == NULL) { 459*b02b02b6SMaxime Coquelin if (rte_errno == EEXIST) 460*b02b02b6SMaxime Coquelin mz = rte_memzone_lookup(name); 461*b02b02b6SMaxime Coquelin if (mz == NULL) 462*b02b02b6SMaxime Coquelin goto free_vq; 463*b02b02b6SMaxime Coquelin } 464*b02b02b6SMaxime Coquelin 465*b02b02b6SMaxime Coquelin memset(mz->addr, 0, mz->len); 466*b02b02b6SMaxime Coquelin vq->mz = mz; 467*b02b02b6SMaxime Coquelin vq->vq_ring_virt_mem = mz->addr; 468*b02b02b6SMaxime Coquelin 469*b02b02b6SMaxime Coquelin if (hw->use_va) { 470*b02b02b6SMaxime Coquelin vq->vq_ring_mem = (uintptr_t)mz->addr; 471*b02b02b6SMaxime Coquelin vq->mbuf_addr_offset = offsetof(struct rte_mbuf, buf_addr); 472*b02b02b6SMaxime Coquelin } else { 473*b02b02b6SMaxime Coquelin vq->vq_ring_mem = mz->iova; 474*b02b02b6SMaxime Coquelin vq->mbuf_addr_offset = offsetof(struct rte_mbuf, buf_iova); 475*b02b02b6SMaxime Coquelin } 476*b02b02b6SMaxime Coquelin 477*b02b02b6SMaxime Coquelin PMD_INIT_LOG(DEBUG, "vq->vq_ring_mem: 0x%" PRIx64, vq->vq_ring_mem); 478*b02b02b6SMaxime Coquelin PMD_INIT_LOG(DEBUG, "vq->vq_ring_virt_mem: %p", vq->vq_ring_virt_mem); 479*b02b02b6SMaxime Coquelin 480*b02b02b6SMaxime Coquelin virtio_init_vring(vq); 481*b02b02b6SMaxime Coquelin 482*b02b02b6SMaxime Coquelin if (virtio_alloc_queue_headers(vq, node, name)) { 483*b02b02b6SMaxime Coquelin PMD_INIT_LOG(ERR, "Failed to alloc queue headers"); 484*b02b02b6SMaxime Coquelin goto free_mz; 485*b02b02b6SMaxime Coquelin } 486*b02b02b6SMaxime Coquelin 487*b02b02b6SMaxime Coquelin switch (type) { 488*b02b02b6SMaxime Coquelin case VTNET_RQ: 489*b02b02b6SMaxime Coquelin if (virtio_rxq_sw_ring_alloc(vq, node)) 490*b02b02b6SMaxime Coquelin goto free_hdr_mz; 491*b02b02b6SMaxime Coquelin break; 492*b02b02b6SMaxime Coquelin case VTNET_TQ: 493*b02b02b6SMaxime Coquelin virtqueue_txq_indirect_headers_init(vq); 494*b02b02b6SMaxime Coquelin break; 495*b02b02b6SMaxime Coquelin } 496*b02b02b6SMaxime Coquelin 497*b02b02b6SMaxime Coquelin return vq; 498*b02b02b6SMaxime Coquelin 499*b02b02b6SMaxime Coquelin free_hdr_mz: 500*b02b02b6SMaxime Coquelin virtio_free_queue_headers(vq); 501*b02b02b6SMaxime Coquelin free_mz: 502*b02b02b6SMaxime Coquelin rte_memzone_free(mz); 503*b02b02b6SMaxime Coquelin free_vq: 504*b02b02b6SMaxime Coquelin rte_free(vq); 505*b02b02b6SMaxime Coquelin 506*b02b02b6SMaxime Coquelin return NULL; 507*b02b02b6SMaxime Coquelin } 508*b02b02b6SMaxime Coquelin 509*b02b02b6SMaxime Coquelin void 510*b02b02b6SMaxime Coquelin virtqueue_free(struct virtqueue *vq) 511*b02b02b6SMaxime Coquelin { 512*b02b02b6SMaxime Coquelin int type; 513*b02b02b6SMaxime Coquelin 514*b02b02b6SMaxime Coquelin type = virtio_get_queue_type(vq->hw, vq->vq_queue_index); 515*b02b02b6SMaxime Coquelin switch (type) { 516*b02b02b6SMaxime Coquelin case VTNET_RQ: 517*b02b02b6SMaxime Coquelin virtio_rxq_sw_ring_free(vq); 518*b02b02b6SMaxime Coquelin break; 519*b02b02b6SMaxime Coquelin case VTNET_TQ: 520*b02b02b6SMaxime Coquelin case VTNET_CQ: 521*b02b02b6SMaxime Coquelin virtio_free_queue_headers(vq); 522*b02b02b6SMaxime Coquelin break; 523*b02b02b6SMaxime Coquelin } 524*b02b02b6SMaxime Coquelin 525*b02b02b6SMaxime Coquelin rte_memzone_free(vq->mz); 526*b02b02b6SMaxime Coquelin rte_free(vq); 527*b02b02b6SMaxime Coquelin } 528