1 /*- 2 * BSD LICENSE 3 * 4 * Copyright (c) Intel Corporation. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #ifndef SPDK_VHOST_INTERNAL_H 35 #define SPDK_VHOST_INTERNAL_H 36 #include <linux/virtio_config.h> 37 38 #include "spdk/stdinc.h" 39 40 #include <rte_vhost.h> 41 42 #include "spdk_internal/vhost_user.h" 43 #include "spdk/log.h" 44 #include "spdk/util.h" 45 #include "spdk/rpc.h" 46 #include "spdk/config.h" 47 48 #define SPDK_VHOST_MAX_VQUEUES 256 49 #define SPDK_VHOST_MAX_VQ_SIZE 1024 50 51 #define SPDK_VHOST_SCSI_CTRLR_MAX_DEVS 8 52 53 #define SPDK_VHOST_IOVS_MAX 129 54 55 #define SPDK_VHOST_VQ_MAX_SUBMISSIONS 32 56 57 /* 58 * Rate at which stats are checked for interrupt coalescing. 59 */ 60 #define SPDK_VHOST_STATS_CHECK_INTERVAL_MS 10 61 /* 62 * Default threshold at which interrupts start to be coalesced. 63 */ 64 #define SPDK_VHOST_VQ_IOPS_COALESCING_THRESHOLD 60000 65 66 /* 67 * Currently coalescing is not used by default. 68 * Setting this to value > 0 here or by RPC will enable coalescing. 69 */ 70 #define SPDK_VHOST_COALESCING_DELAY_BASE_US 0 71 72 #define SPDK_VHOST_FEATURES ((1ULL << VHOST_F_LOG_ALL) | \ 73 (1ULL << VHOST_USER_F_PROTOCOL_FEATURES) | \ 74 (1ULL << VIRTIO_F_VERSION_1) | \ 75 (1ULL << VIRTIO_F_NOTIFY_ON_EMPTY) | \ 76 (1ULL << VIRTIO_RING_F_EVENT_IDX) | \ 77 (1ULL << VIRTIO_RING_F_INDIRECT_DESC) | \ 78 (1ULL << VIRTIO_F_RING_PACKED)) 79 80 #define SPDK_VHOST_DISABLED_FEATURES ((1ULL << VIRTIO_RING_F_EVENT_IDX) | \ 81 (1ULL << VIRTIO_F_NOTIFY_ON_EMPTY)) 82 83 #define VRING_DESC_F_AVAIL (1ULL << VRING_PACKED_DESC_F_AVAIL) 84 #define VRING_DESC_F_USED (1ULL << VRING_PACKED_DESC_F_USED) 85 #define VRING_DESC_F_AVAIL_USED (VRING_DESC_F_AVAIL | VRING_DESC_F_USED) 86 87 typedef struct rte_vhost_resubmit_desc spdk_vhost_resubmit_desc; 88 typedef struct rte_vhost_resubmit_info spdk_vhost_resubmit_info; 89 90 struct spdk_vhost_virtqueue { 91 struct rte_vhost_vring vring; 92 struct rte_vhost_ring_inflight vring_inflight; 93 uint16_t last_avail_idx; 94 uint16_t last_used_idx; 95 96 struct { 97 /* To mark a descriptor as available in packed ring 98 * Equal to avail_wrap_counter in spec. 99 */ 100 uint8_t avail_phase : 1; 101 /* To mark a descriptor as used in packed ring 102 * Equal to used_wrap_counter in spec. 103 */ 104 uint8_t used_phase : 1; 105 uint8_t padding : 5; 106 bool packed_ring : 1; 107 } packed; 108 109 void *tasks; 110 111 /* Request count from last stats check */ 112 uint32_t req_cnt; 113 114 /* Request count from last event */ 115 uint16_t used_req_cnt; 116 117 /* How long interrupt is delayed */ 118 uint32_t irq_delay_time; 119 120 /* Next time when we need to send event */ 121 uint64_t next_event_time; 122 123 /* Associated vhost_virtqueue in the virtio device's virtqueue list */ 124 uint32_t vring_idx; 125 126 struct spdk_vhost_session *vsession; 127 } __attribute((aligned(SPDK_CACHE_LINE_SIZE))); 128 129 struct spdk_vhost_session { 130 struct spdk_vhost_dev *vdev; 131 132 /* rte_vhost connection ID. */ 133 int vid; 134 135 /* Unique session ID. */ 136 uint64_t id; 137 /* Unique session name. */ 138 char *name; 139 140 bool initialized; 141 bool started; 142 bool needs_restart; 143 bool forced_polling; 144 145 struct rte_vhost_memory *mem; 146 147 int task_cnt; 148 149 uint16_t max_queues; 150 151 uint64_t negotiated_features; 152 153 /* Local copy of device coalescing settings. */ 154 uint32_t coalescing_delay_time_base; 155 uint32_t coalescing_io_rate_threshold; 156 157 /* Next time when stats for event coalescing will be checked. */ 158 uint64_t next_stats_check_time; 159 160 /* Interval used for event coalescing checking. */ 161 uint64_t stats_check_interval; 162 163 struct spdk_vhost_virtqueue virtqueue[SPDK_VHOST_MAX_VQUEUES]; 164 165 TAILQ_ENTRY(spdk_vhost_session) tailq; 166 }; 167 168 struct spdk_vhost_dev { 169 char *name; 170 char *path; 171 172 struct spdk_thread *thread; 173 bool registered; 174 175 uint64_t virtio_features; 176 uint64_t disabled_features; 177 uint64_t protocol_features; 178 179 const struct spdk_vhost_dev_backend *backend; 180 181 /* Saved orginal values used to setup coalescing to avoid integer 182 * rounding issues during save/load config. 183 */ 184 uint32_t coalescing_delay_us; 185 uint32_t coalescing_iops_threshold; 186 187 /* Current connections to the device */ 188 TAILQ_HEAD(, spdk_vhost_session) vsessions; 189 190 /* Increment-only session counter */ 191 uint64_t vsessions_num; 192 193 /* Number of started and actively polled sessions */ 194 uint32_t active_session_num; 195 196 /* Number of pending asynchronous operations */ 197 uint32_t pending_async_op_num; 198 199 TAILQ_ENTRY(spdk_vhost_dev) tailq; 200 }; 201 202 /** 203 * \param vdev vhost device. 204 * \param vsession vhost session. 205 * \param arg user-provided parameter. 206 * 207 * \return negative values will break the foreach call, meaning 208 * the function won't be called again. Return codes zero and 209 * positive don't have any effect. 210 */ 211 typedef int (*spdk_vhost_session_fn)(struct spdk_vhost_dev *vdev, 212 struct spdk_vhost_session *vsession, 213 void *arg); 214 215 /** 216 * \param vdev vhost device. 217 * \param arg user-provided parameter. 218 */ 219 typedef void (*spdk_vhost_dev_fn)(struct spdk_vhost_dev *vdev, void *arg); 220 221 struct spdk_vhost_dev_backend { 222 /** 223 * Size of additional per-session context data 224 * allocated whenever a new client connects. 225 */ 226 size_t session_ctx_size; 227 228 int (*start_session)(struct spdk_vhost_session *vsession); 229 int (*stop_session)(struct spdk_vhost_session *vsession); 230 231 int (*vhost_get_config)(struct spdk_vhost_dev *vdev, uint8_t *config, uint32_t len); 232 int (*vhost_set_config)(struct spdk_vhost_dev *vdev, uint8_t *config, 233 uint32_t offset, uint32_t size, uint32_t flags); 234 235 void (*dump_info_json)(struct spdk_vhost_dev *vdev, struct spdk_json_write_ctx *w); 236 void (*write_config_json)(struct spdk_vhost_dev *vdev, struct spdk_json_write_ctx *w); 237 int (*remove_device)(struct spdk_vhost_dev *vdev); 238 }; 239 240 void *vhost_gpa_to_vva(struct spdk_vhost_session *vsession, uint64_t addr, uint64_t len); 241 242 uint16_t vhost_vq_avail_ring_get(struct spdk_vhost_virtqueue *vq, uint16_t *reqs, 243 uint16_t reqs_len); 244 245 /** 246 * Get a virtio split descriptor at given index in given virtqueue. 247 * The descriptor will provide access to the entire descriptor 248 * chain. The subsequent descriptors are accesible via 249 * \c spdk_vhost_vring_desc_get_next. 250 * \param vsession vhost session 251 * \param vq virtqueue 252 * \param req_idx descriptor index 253 * \param desc pointer to be set to the descriptor 254 * \param desc_table descriptor table to be used with 255 * \c spdk_vhost_vring_desc_get_next. This might be either 256 * default virtqueue descriptor table or per-chain indirect 257 * table. 258 * \param desc_table_size size of the *desc_table* 259 * \return 0 on success, -1 if given index is invalid. 260 * If -1 is returned, the content of params is undefined. 261 */ 262 int vhost_vq_get_desc(struct spdk_vhost_session *vsession, struct spdk_vhost_virtqueue *vq, 263 uint16_t req_idx, struct vring_desc **desc, struct vring_desc **desc_table, 264 uint32_t *desc_table_size); 265 266 /** 267 * Get a virtio packed descriptor at given index in given virtqueue. 268 * The descriptor will provide access to the entire descriptor 269 * chain. The subsequent descriptors are accesible via 270 * \c vhost_vring_packed_desc_get_next. 271 * \param vsession vhost session 272 * \param vq virtqueue 273 * \param req_idx descriptor index 274 * \param desc pointer to be set to the descriptor 275 * \param desc_table descriptor table to be used with 276 * \c spdk_vhost_vring_desc_get_next. This might be either 277 * \c NULL or per-chain indirect table. 278 * \param desc_table_size size of the *desc_table* 279 * \return 0 on success, -1 if given index is invalid. 280 * If -1 is returned, the content of params is undefined. 281 */ 282 int vhost_vq_get_desc_packed(struct spdk_vhost_session *vsession, 283 struct spdk_vhost_virtqueue *virtqueue, 284 uint16_t req_idx, struct vring_packed_desc **desc, 285 struct vring_packed_desc **desc_table, uint32_t *desc_table_size); 286 287 /** 288 * Send IRQ/call client (if pending) for \c vq. 289 * \param vsession vhost session 290 * \param vq virtqueue 291 * \return 292 * 0 - if no interrupt was signalled 293 * 1 - if interrupt was signalled 294 */ 295 int vhost_vq_used_signal(struct spdk_vhost_session *vsession, struct spdk_vhost_virtqueue *vq); 296 297 298 /** 299 * Send IRQs for all queues that need to be signaled. 300 * \param vsession vhost session 301 * \param vq virtqueue 302 */ 303 void vhost_session_used_signal(struct spdk_vhost_session *vsession); 304 305 /** 306 * Send IRQs for the queue that need to be signaled. 307 * \param vq virtqueue 308 */ 309 void vhost_session_vq_used_signal(struct spdk_vhost_virtqueue *virtqueue); 310 311 void vhost_vq_used_ring_enqueue(struct spdk_vhost_session *vsession, 312 struct spdk_vhost_virtqueue *vq, 313 uint16_t id, uint32_t len); 314 315 /** 316 * Enqueue the entry to the used ring when device complete the request. 317 * \param vsession vhost session 318 * \param vq virtqueue 319 * \req_idx descriptor index. It's the first index of this descriptor chain. 320 * \num_descs descriptor count. It's the count of the number of buffers in the chain. 321 * \buffer_id descriptor buffer ID. 322 * \length device write length. Specify the length of the buffer that has been initialized 323 * (written to) by the device 324 */ 325 void vhost_vq_packed_ring_enqueue(struct spdk_vhost_session *vsession, 326 struct spdk_vhost_virtqueue *virtqueue, 327 uint16_t num_descs, uint16_t buffer_id, 328 uint32_t length); 329 330 /** 331 * Get subsequent descriptor from given table. 332 * \param desc current descriptor, will be set to the 333 * next descriptor (NULL in case this is the last 334 * descriptor in the chain or the next desc is invalid) 335 * \param desc_table descriptor table 336 * \param desc_table_size size of the *desc_table* 337 * \return 0 on success, -1 if given index is invalid 338 * The *desc* param will be set regardless of the 339 * return value. 340 */ 341 int vhost_vring_desc_get_next(struct vring_desc **desc, 342 struct vring_desc *desc_table, uint32_t desc_table_size); 343 static inline bool 344 vhost_vring_desc_is_wr(struct vring_desc *cur_desc) 345 { 346 return !!(cur_desc->flags & VRING_DESC_F_WRITE); 347 } 348 349 int vhost_vring_desc_to_iov(struct spdk_vhost_session *vsession, struct iovec *iov, 350 uint16_t *iov_index, const struct vring_desc *desc); 351 352 bool vhost_vq_packed_ring_is_avail(struct spdk_vhost_virtqueue *virtqueue); 353 354 /** 355 * Get subsequent descriptor from vq or desc table. 356 * \param desc current descriptor, will be set to the 357 * next descriptor (NULL in case this is the last 358 * descriptor in the chain or the next desc is invalid) 359 * \req_idx index of current desc, will be set to the next 360 * index. If desc_table != NULL the req_idx is the the vring index 361 * or the req_idx is the desc_table index. 362 * \param desc_table descriptor table 363 * \param desc_table_size size of the *desc_table* 364 * \return 0 on success, -1 if given index is invalid 365 * The *desc* param will be set regardless of the 366 * return value. 367 */ 368 int vhost_vring_packed_desc_get_next(struct vring_packed_desc **desc, uint16_t *req_idx, 369 struct spdk_vhost_virtqueue *vq, 370 struct vring_packed_desc *desc_table, 371 uint32_t desc_table_size); 372 373 bool vhost_vring_packed_desc_is_wr(struct vring_packed_desc *cur_desc); 374 375 int vhost_vring_packed_desc_to_iov(struct spdk_vhost_session *vsession, struct iovec *iov, 376 uint16_t *iov_index, const struct vring_packed_desc *desc); 377 378 uint16_t vhost_vring_packed_desc_get_buffer_id(struct spdk_vhost_virtqueue *vq, uint16_t req_idx, 379 uint16_t *num_descs); 380 381 static inline bool __attribute__((always_inline)) 382 vhost_dev_has_feature(struct spdk_vhost_session *vsession, unsigned feature_id) 383 { 384 return vsession->negotiated_features & (1ULL << feature_id); 385 } 386 387 int vhost_dev_register(struct spdk_vhost_dev *vdev, const char *name, const char *mask_str, 388 const struct spdk_vhost_dev_backend *backend); 389 int vhost_dev_unregister(struct spdk_vhost_dev *vdev); 390 391 void vhost_dump_info_json(struct spdk_vhost_dev *vdev, struct spdk_json_write_ctx *w); 392 393 /* 394 * Vhost callbacks for vhost_device_ops interface 395 */ 396 397 int vhost_new_connection_cb(int vid, const char *ifname); 398 int vhost_start_device_cb(int vid); 399 int vhost_stop_device_cb(int vid); 400 int vhost_destroy_connection_cb(int vid); 401 402 /* 403 * Memory registration functions used in start/stop device callbacks 404 */ 405 void vhost_session_mem_register(struct rte_vhost_memory *mem); 406 void vhost_session_mem_unregister(struct rte_vhost_memory *mem); 407 408 /* 409 * Call a function for each session of the provided vhost device. 410 * The function will be called one-by-one on each session's thread. 411 * 412 * \param vdev vhost device 413 * \param fn function to call on each session's thread 414 * \param cpl_fn function to be called at the end of the iteration on 415 * the vhost management thread. 416 * Optional, can be NULL. 417 * \param arg additional argument to the both callbacks 418 */ 419 void vhost_dev_foreach_session(struct spdk_vhost_dev *dev, 420 spdk_vhost_session_fn fn, 421 spdk_vhost_dev_fn cpl_fn, 422 void *arg); 423 424 /** 425 * Call a function on the provided lcore and block until either 426 * spdk_vhost_session_start_done() or spdk_vhost_session_stop_done() 427 * is called. 428 * 429 * This must be called under the global vhost mutex, which this function 430 * will unlock for the time it's waiting. It's meant to be called only 431 * from start/stop session callbacks. 432 * 433 * \param vsession vhost session 434 * \param cb_fn the function to call. The void *arg parameter in cb_fn 435 * is always NULL. 436 * \param timeout_sec timeout in seconds. This function will still 437 * block after the timeout expires, but will print the provided errmsg. 438 * \param errmsg error message to print once the timeout expires 439 * \return return the code passed to spdk_vhost_session_event_done(). 440 */ 441 int vhost_session_send_event(struct spdk_vhost_session *vsession, 442 spdk_vhost_session_fn cb_fn, unsigned timeout_sec, 443 const char *errmsg); 444 445 /** 446 * Finish a blocking spdk_vhost_session_send_event() call and finally 447 * start the session. This must be called on the target lcore, which 448 * will now receive all session-related messages (e.g. from 449 * spdk_vhost_dev_foreach_session()). 450 * 451 * Must be called under the global vhost lock. 452 * 453 * \param vsession vhost session 454 * \param response return code 455 */ 456 void vhost_session_start_done(struct spdk_vhost_session *vsession, int response); 457 458 /** 459 * Finish a blocking spdk_vhost_session_send_event() call and finally 460 * stop the session. This must be called on the session's lcore which 461 * used to receive all session-related messages (e.g. from 462 * spdk_vhost_dev_foreach_session()). After this call, the session- 463 * related messages will be once again processed by any arbitrary thread. 464 * 465 * Must be called under the global vhost lock. 466 * 467 * Must be called under the global vhost mutex. 468 * 469 * \param vsession vhost session 470 * \param response return code 471 */ 472 void vhost_session_stop_done(struct spdk_vhost_session *vsession, int response); 473 474 struct spdk_vhost_session *vhost_session_find_by_vid(int vid); 475 void vhost_session_install_rte_compat_hooks(struct spdk_vhost_session *vsession); 476 int vhost_register_unix_socket(const char *path, const char *ctrl_name, 477 uint64_t virtio_features, uint64_t disabled_features, uint64_t protocol_features); 478 int vhost_driver_unregister(const char *path); 479 int vhost_get_mem_table(int vid, struct rte_vhost_memory **mem); 480 int vhost_get_negotiated_features(int vid, uint64_t *negotiated_features); 481 482 int remove_vhost_controller(struct spdk_vhost_dev *vdev); 483 484 #endif /* SPDK_VHOST_INTERNAL_H */ 485