1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2016 Intel Corporation 3 * Copyright(c) 2022 Red Hat Inc, 4 */ 5 6 #include <unistd.h> 7 8 #include <rte_common.h> 9 #include <rte_eal.h> 10 #include <rte_errno.h> 11 12 #include "virtio_cvq.h" 13 #include "virtqueue.h" 14 15 static struct virtio_pmd_ctrl * 16 virtio_send_command_packed(struct virtnet_ctl *cvq, 17 struct virtio_pmd_ctrl *ctrl, 18 int *dlen, int pkt_num) 19 { 20 struct virtqueue *vq = virtnet_cq_to_vq(cvq); 21 int head; 22 struct vring_packed_desc *desc = vq->vq_packed.ring.desc; 23 struct virtio_pmd_ctrl *result; 24 uint16_t flags; 25 int sum = 0; 26 int nb_descs = 0; 27 int k; 28 29 /* 30 * Format is enforced in qemu code: 31 * One TX packet for header; 32 * At least one TX packet per argument; 33 * One RX packet for ACK. 34 */ 35 head = vq->vq_avail_idx; 36 flags = vq->vq_packed.cached_flags; 37 desc[head].addr = cvq->hdr_mem; 38 desc[head].len = sizeof(struct virtio_net_ctrl_hdr); 39 vq->vq_free_cnt--; 40 nb_descs++; 41 if (++vq->vq_avail_idx >= vq->vq_nentries) { 42 vq->vq_avail_idx -= vq->vq_nentries; 43 vq->vq_packed.cached_flags ^= VRING_PACKED_DESC_F_AVAIL_USED; 44 } 45 46 for (k = 0; k < pkt_num; k++) { 47 desc[vq->vq_avail_idx].addr = cvq->hdr_mem 48 + sizeof(struct virtio_net_ctrl_hdr) 49 + sizeof(ctrl->status) + sizeof(uint8_t) * sum; 50 desc[vq->vq_avail_idx].len = dlen[k]; 51 desc[vq->vq_avail_idx].flags = VRING_DESC_F_NEXT | 52 vq->vq_packed.cached_flags; 53 sum += dlen[k]; 54 vq->vq_free_cnt--; 55 nb_descs++; 56 if (++vq->vq_avail_idx >= vq->vq_nentries) { 57 vq->vq_avail_idx -= vq->vq_nentries; 58 vq->vq_packed.cached_flags ^= 59 VRING_PACKED_DESC_F_AVAIL_USED; 60 } 61 } 62 63 desc[vq->vq_avail_idx].addr = cvq->hdr_mem 64 + sizeof(struct virtio_net_ctrl_hdr); 65 desc[vq->vq_avail_idx].len = sizeof(ctrl->status); 66 desc[vq->vq_avail_idx].flags = VRING_DESC_F_WRITE | 67 vq->vq_packed.cached_flags; 68 vq->vq_free_cnt--; 69 nb_descs++; 70 if (++vq->vq_avail_idx >= vq->vq_nentries) { 71 vq->vq_avail_idx -= vq->vq_nentries; 72 vq->vq_packed.cached_flags ^= VRING_PACKED_DESC_F_AVAIL_USED; 73 } 74 75 virtqueue_store_flags_packed(&desc[head], VRING_DESC_F_NEXT | flags, 76 vq->hw->weak_barriers); 77 78 virtio_wmb(vq->hw->weak_barriers); 79 cvq->notify_queue(vq, cvq->notify_cookie); 80 81 /* wait for used desc in virtqueue 82 * desc_is_used has a load-acquire or rte_io_rmb inside 83 */ 84 while (!desc_is_used(&desc[head], vq)) 85 usleep(100); 86 87 /* now get used descriptors */ 88 vq->vq_free_cnt += nb_descs; 89 vq->vq_used_cons_idx += nb_descs; 90 if (vq->vq_used_cons_idx >= vq->vq_nentries) { 91 vq->vq_used_cons_idx -= vq->vq_nentries; 92 vq->vq_packed.used_wrap_counter ^= 1; 93 } 94 95 PMD_INIT_LOG(DEBUG, "vq->vq_free_cnt=%d\n" 96 "vq->vq_avail_idx=%d\n" 97 "vq->vq_used_cons_idx=%d\n" 98 "vq->vq_packed.cached_flags=0x%x\n" 99 "vq->vq_packed.used_wrap_counter=%d", 100 vq->vq_free_cnt, 101 vq->vq_avail_idx, 102 vq->vq_used_cons_idx, 103 vq->vq_packed.cached_flags, 104 vq->vq_packed.used_wrap_counter); 105 106 result = cvq->hdr_mz->addr; 107 return result; 108 } 109 110 static struct virtio_pmd_ctrl * 111 virtio_send_command_split(struct virtnet_ctl *cvq, 112 struct virtio_pmd_ctrl *ctrl, 113 int *dlen, int pkt_num) 114 { 115 struct virtio_pmd_ctrl *result; 116 struct virtqueue *vq = virtnet_cq_to_vq(cvq); 117 uint32_t head, i; 118 int k, sum = 0; 119 120 head = vq->vq_desc_head_idx; 121 122 /* 123 * Format is enforced in qemu code: 124 * One TX packet for header; 125 * At least one TX packet per argument; 126 * One RX packet for ACK. 127 */ 128 vq->vq_split.ring.desc[head].flags = VRING_DESC_F_NEXT; 129 vq->vq_split.ring.desc[head].addr = cvq->hdr_mem; 130 vq->vq_split.ring.desc[head].len = sizeof(struct virtio_net_ctrl_hdr); 131 vq->vq_free_cnt--; 132 i = vq->vq_split.ring.desc[head].next; 133 134 for (k = 0; k < pkt_num; k++) { 135 vq->vq_split.ring.desc[i].flags = VRING_DESC_F_NEXT; 136 vq->vq_split.ring.desc[i].addr = cvq->hdr_mem 137 + sizeof(struct virtio_net_ctrl_hdr) 138 + sizeof(ctrl->status) + sizeof(uint8_t) * sum; 139 vq->vq_split.ring.desc[i].len = dlen[k]; 140 sum += dlen[k]; 141 vq->vq_free_cnt--; 142 i = vq->vq_split.ring.desc[i].next; 143 } 144 145 vq->vq_split.ring.desc[i].flags = VRING_DESC_F_WRITE; 146 vq->vq_split.ring.desc[i].addr = cvq->hdr_mem 147 + sizeof(struct virtio_net_ctrl_hdr); 148 vq->vq_split.ring.desc[i].len = sizeof(ctrl->status); 149 vq->vq_free_cnt--; 150 151 vq->vq_desc_head_idx = vq->vq_split.ring.desc[i].next; 152 153 vq_update_avail_ring(vq, head); 154 vq_update_avail_idx(vq); 155 156 PMD_INIT_LOG(DEBUG, "vq->vq_queue_index = %d", vq->vq_queue_index); 157 158 cvq->notify_queue(vq, cvq->notify_cookie); 159 160 while (virtqueue_nused(vq) == 0) 161 usleep(100); 162 163 while (virtqueue_nused(vq)) { 164 uint32_t idx, desc_idx, used_idx; 165 struct vring_used_elem *uep; 166 167 used_idx = (uint32_t)(vq->vq_used_cons_idx 168 & (vq->vq_nentries - 1)); 169 uep = &vq->vq_split.ring.used->ring[used_idx]; 170 idx = (uint32_t)uep->id; 171 desc_idx = idx; 172 173 while (vq->vq_split.ring.desc[desc_idx].flags & 174 VRING_DESC_F_NEXT) { 175 desc_idx = vq->vq_split.ring.desc[desc_idx].next; 176 vq->vq_free_cnt++; 177 } 178 179 vq->vq_split.ring.desc[desc_idx].next = vq->vq_desc_head_idx; 180 vq->vq_desc_head_idx = idx; 181 182 vq->vq_used_cons_idx++; 183 vq->vq_free_cnt++; 184 } 185 186 PMD_INIT_LOG(DEBUG, "vq->vq_free_cnt=%d\nvq->vq_desc_head_idx=%d", 187 vq->vq_free_cnt, vq->vq_desc_head_idx); 188 189 result = cvq->hdr_mz->addr; 190 return result; 191 } 192 193 int 194 virtio_send_command(struct virtnet_ctl *cvq, struct virtio_pmd_ctrl *ctrl, int *dlen, int pkt_num) 195 { 196 virtio_net_ctrl_ack status = ~0; 197 struct virtio_pmd_ctrl *result; 198 struct virtqueue *vq; 199 200 ctrl->status = status; 201 202 if (!cvq) { 203 PMD_INIT_LOG(ERR, "Control queue is not supported."); 204 return -1; 205 } 206 207 rte_spinlock_lock(&cvq->lock); 208 vq = virtnet_cq_to_vq(cvq); 209 210 PMD_INIT_LOG(DEBUG, "vq->vq_desc_head_idx = %d, status = %d, " 211 "vq->hw->cvq = %p vq = %p", 212 vq->vq_desc_head_idx, status, vq->hw->cvq, vq); 213 214 if (vq->vq_free_cnt < pkt_num + 2 || pkt_num < 1) { 215 rte_spinlock_unlock(&cvq->lock); 216 return -1; 217 } 218 219 memcpy(cvq->hdr_mz->addr, ctrl, sizeof(struct virtio_pmd_ctrl)); 220 221 if (virtio_with_packed_queue(vq->hw)) 222 result = virtio_send_command_packed(cvq, ctrl, dlen, pkt_num); 223 else 224 result = virtio_send_command_split(cvq, ctrl, dlen, pkt_num); 225 226 rte_spinlock_unlock(&cvq->lock); 227 return result->status; 228 } 229 230