1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2018 Intel Corporation 3 */ 4 5 /** 6 * @file 7 * 8 * Device specific vhost lib 9 */ 10 11 #include <sys/queue.h> 12 13 #include <dev_driver.h> 14 #include <rte_class.h> 15 #include <rte_malloc.h> 16 #include <rte_spinlock.h> 17 #include <rte_tailq.h> 18 19 #include "rte_vdpa.h" 20 #include "vdpa_driver.h" 21 #include "vhost.h" 22 23 /** Double linked list of vDPA devices. */ 24 TAILQ_HEAD(vdpa_device_list, rte_vdpa_device); 25 26 static struct vdpa_device_list vdpa_device_list__ = 27 TAILQ_HEAD_INITIALIZER(vdpa_device_list__); 28 static rte_spinlock_t vdpa_device_list_lock = RTE_SPINLOCK_INITIALIZER; 29 static struct vdpa_device_list * const vdpa_device_list 30 __rte_guarded_by(&vdpa_device_list_lock) = &vdpa_device_list__; 31 32 static struct rte_vdpa_device * 33 __vdpa_find_device_by_name(const char *name) 34 __rte_exclusive_locks_required(&vdpa_device_list_lock) 35 { 36 struct rte_vdpa_device *dev, *ret = NULL; 37 38 if (name == NULL) 39 return NULL; 40 41 TAILQ_FOREACH(dev, vdpa_device_list, next) { 42 if (!strncmp(dev->device->name, name, RTE_DEV_NAME_MAX_LEN)) { 43 ret = dev; 44 break; 45 } 46 } 47 48 return ret; 49 } 50 51 struct rte_vdpa_device * 52 rte_vdpa_find_device_by_name(const char *name) 53 { 54 struct rte_vdpa_device *dev; 55 56 rte_spinlock_lock(&vdpa_device_list_lock); 57 dev = __vdpa_find_device_by_name(name); 58 rte_spinlock_unlock(&vdpa_device_list_lock); 59 60 return dev; 61 } 62 63 struct rte_device * 64 rte_vdpa_get_rte_device(struct rte_vdpa_device *vdpa_dev) 65 { 66 if (vdpa_dev == NULL) 67 return NULL; 68 69 return vdpa_dev->device; 70 } 71 72 struct rte_vdpa_device * 73 rte_vdpa_register_device(struct rte_device *rte_dev, 74 struct rte_vdpa_dev_ops *ops) 75 { 76 struct rte_vdpa_device *dev; 77 int ret = 0; 78 79 if (ops == NULL) 80 return NULL; 81 82 /* Check mandatory ops are implemented */ 83 if (!ops->get_queue_num || !ops->get_features || 84 !ops->get_protocol_features || !ops->dev_conf || 85 !ops->dev_close || !ops->set_vring_state || 86 !ops->set_features) { 87 VHOST_LOG_CONFIG(rte_dev->name, ERR, 88 "Some mandatory vDPA ops aren't implemented\n"); 89 return NULL; 90 } 91 92 rte_spinlock_lock(&vdpa_device_list_lock); 93 /* Check the device hasn't been register already */ 94 dev = __vdpa_find_device_by_name(rte_dev->name); 95 if (dev) { 96 dev = NULL; 97 goto out_unlock; 98 } 99 100 dev = rte_zmalloc(NULL, sizeof(*dev), 0); 101 if (!dev) 102 goto out_unlock; 103 104 dev->device = rte_dev; 105 dev->ops = ops; 106 107 if (ops->get_dev_type) { 108 ret = ops->get_dev_type(dev, &dev->type); 109 if (ret) { 110 VHOST_LOG_CONFIG(rte_dev->name, ERR, 111 "Failed to get vdpa dev type.\n"); 112 ret = -1; 113 goto out_unlock; 114 } 115 } else { 116 /** by default, we assume vdpa device is a net device */ 117 dev->type = RTE_VHOST_VDPA_DEVICE_TYPE_NET; 118 } 119 120 TAILQ_INSERT_TAIL(vdpa_device_list, dev, next); 121 out_unlock: 122 rte_spinlock_unlock(&vdpa_device_list_lock); 123 124 return dev; 125 } 126 127 int 128 rte_vdpa_unregister_device(struct rte_vdpa_device *dev) 129 { 130 struct rte_vdpa_device *cur_dev, *tmp_dev; 131 int ret = -1; 132 133 rte_spinlock_lock(&vdpa_device_list_lock); 134 RTE_TAILQ_FOREACH_SAFE(cur_dev, vdpa_device_list, next, tmp_dev) { 135 if (dev != cur_dev) 136 continue; 137 138 TAILQ_REMOVE(vdpa_device_list, dev, next); 139 rte_free(dev); 140 ret = 0; 141 break; 142 } 143 rte_spinlock_unlock(&vdpa_device_list_lock); 144 145 return ret; 146 } 147 148 int 149 rte_vdpa_relay_vring_used(int vid, uint16_t qid, void *vring_m) 150 __rte_no_thread_safety_analysis /* FIXME: requires iotlb_lock? */ 151 { 152 struct virtio_net *dev = get_device(vid); 153 uint16_t idx, idx_m, desc_id; 154 struct vhost_virtqueue *vq; 155 struct vring_desc desc; 156 struct vring_desc *desc_ring; 157 struct vring_desc *idesc = NULL; 158 struct vring *s_vring; 159 uint64_t dlen; 160 uint32_t nr_descs; 161 int ret; 162 163 if (!dev || !vring_m) 164 return -1; 165 166 if (qid >= dev->nr_vring) 167 return -1; 168 169 if (vq_is_packed(dev)) 170 return -1; 171 172 s_vring = (struct vring *)vring_m; 173 vq = dev->virtqueue[qid]; 174 idx = vq->used->idx; 175 idx_m = s_vring->used->idx; 176 ret = (uint16_t)(idx_m - idx); 177 178 while (idx != idx_m) { 179 /* copy used entry, used ring logging is not covered here */ 180 vq->used->ring[idx & (vq->size - 1)] = 181 s_vring->used->ring[idx & (vq->size - 1)]; 182 183 desc_id = vq->used->ring[idx & (vq->size - 1)].id; 184 desc_ring = vq->desc; 185 nr_descs = vq->size; 186 187 if (unlikely(desc_id >= vq->size)) 188 return -1; 189 190 if (vq->desc[desc_id].flags & VRING_DESC_F_INDIRECT) { 191 dlen = vq->desc[desc_id].len; 192 nr_descs = dlen / sizeof(struct vring_desc); 193 if (unlikely(nr_descs > vq->size)) 194 return -1; 195 196 desc_ring = (struct vring_desc *)(uintptr_t) 197 vhost_iova_to_vva(dev, vq, 198 vq->desc[desc_id].addr, &dlen, 199 VHOST_ACCESS_RO); 200 if (unlikely(!desc_ring)) 201 return -1; 202 203 if (unlikely(dlen < vq->desc[desc_id].len)) { 204 idesc = vhost_alloc_copy_ind_table(dev, vq, 205 vq->desc[desc_id].addr, 206 vq->desc[desc_id].len); 207 if (unlikely(!idesc)) 208 return -1; 209 210 desc_ring = idesc; 211 } 212 213 desc_id = 0; 214 } 215 216 /* dirty page logging for DMA writeable buffer */ 217 do { 218 if (unlikely(desc_id >= vq->size)) 219 goto fail; 220 if (unlikely(nr_descs-- == 0)) 221 goto fail; 222 desc = desc_ring[desc_id]; 223 if (desc.flags & VRING_DESC_F_WRITE) 224 vhost_log_write_iova(dev, vq, desc.addr, 225 desc.len); 226 desc_id = desc.next; 227 } while (desc.flags & VRING_DESC_F_NEXT); 228 229 if (unlikely(idesc)) { 230 free_ind_table(idesc); 231 idesc = NULL; 232 } 233 234 idx++; 235 } 236 237 /* used idx is the synchronization point for the split vring */ 238 __atomic_store_n(&vq->used->idx, idx_m, __ATOMIC_RELEASE); 239 240 if (dev->features & (1ULL << VIRTIO_RING_F_EVENT_IDX)) 241 vring_used_event(s_vring) = idx_m; 242 243 return ret; 244 245 fail: 246 if (unlikely(idesc)) 247 free_ind_table(idesc); 248 return -1; 249 } 250 251 int 252 rte_vdpa_get_queue_num(struct rte_vdpa_device *dev, uint32_t *queue_num) 253 { 254 if (dev == NULL || dev->ops == NULL || dev->ops->get_queue_num == NULL) 255 return -1; 256 257 return dev->ops->get_queue_num(dev, queue_num); 258 } 259 260 int 261 rte_vdpa_get_features(struct rte_vdpa_device *dev, uint64_t *features) 262 { 263 if (dev == NULL || dev->ops == NULL || dev->ops->get_features == NULL) 264 return -1; 265 266 return dev->ops->get_features(dev, features); 267 } 268 269 int 270 rte_vdpa_get_protocol_features(struct rte_vdpa_device *dev, uint64_t *features) 271 { 272 if (dev == NULL || dev->ops == NULL || 273 dev->ops->get_protocol_features == NULL) 274 return -1; 275 276 return dev->ops->get_protocol_features(dev, features); 277 } 278 279 int 280 rte_vdpa_get_stats_names(struct rte_vdpa_device *dev, 281 struct rte_vdpa_stat_name *stats_names, 282 unsigned int size) 283 { 284 if (!dev) 285 return -EINVAL; 286 287 if (dev->ops->get_stats_names == NULL) 288 return -ENOTSUP; 289 290 return dev->ops->get_stats_names(dev, stats_names, size); 291 } 292 293 int 294 rte_vdpa_get_stats(struct rte_vdpa_device *dev, uint16_t qid, 295 struct rte_vdpa_stat *stats, unsigned int n) 296 { 297 if (!dev || !stats || !n) 298 return -EINVAL; 299 300 if (dev->ops->get_stats == NULL) 301 return -ENOTSUP; 302 303 return dev->ops->get_stats(dev, qid, stats, n); 304 } 305 306 int 307 rte_vdpa_reset_stats(struct rte_vdpa_device *dev, uint16_t qid) 308 { 309 if (!dev) 310 return -EINVAL; 311 312 if (dev->ops->reset_stats == NULL) 313 return -ENOTSUP; 314 315 return dev->ops->reset_stats(dev, qid); 316 } 317 318 static int 319 vdpa_dev_match(struct rte_vdpa_device *dev, 320 const struct rte_device *rte_dev) 321 { 322 if (dev->device == rte_dev) 323 return 0; 324 325 return -1; 326 } 327 328 /* Generic rte_vdpa_dev comparison function. */ 329 typedef int (*rte_vdpa_cmp_t)(struct rte_vdpa_device *, 330 const struct rte_device *rte_dev); 331 332 static struct rte_vdpa_device * 333 vdpa_find_device(const struct rte_vdpa_device *start, rte_vdpa_cmp_t cmp, 334 struct rte_device *rte_dev) 335 { 336 struct rte_vdpa_device *dev; 337 338 rte_spinlock_lock(&vdpa_device_list_lock); 339 if (start == NULL) 340 dev = TAILQ_FIRST(vdpa_device_list); 341 else 342 dev = TAILQ_NEXT(start, next); 343 344 while (dev != NULL) { 345 if (cmp(dev, rte_dev) == 0) 346 break; 347 348 dev = TAILQ_NEXT(dev, next); 349 } 350 rte_spinlock_unlock(&vdpa_device_list_lock); 351 352 return dev; 353 } 354 355 static void * 356 vdpa_dev_iterate(const void *start, 357 const char *str, 358 const struct rte_dev_iterator *it) 359 { 360 struct rte_vdpa_device *vdpa_dev = NULL; 361 362 RTE_SET_USED(str); 363 364 vdpa_dev = vdpa_find_device(start, vdpa_dev_match, it->device); 365 366 return vdpa_dev; 367 } 368 369 static struct rte_class rte_class_vdpa = { 370 .dev_iterate = vdpa_dev_iterate, 371 }; 372 373 RTE_REGISTER_CLASS(vdpa, rte_class_vdpa); 374