1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright 2019 Mellanox Technologies, Ltd 3 */ 4 5 #ifndef RTE_PMD_MLX5_VDPA_H_ 6 #define RTE_PMD_MLX5_VDPA_H_ 7 8 #include <linux/virtio_net.h> 9 #include <sys/queue.h> 10 11 #ifdef PEDANTIC 12 #pragma GCC diagnostic ignored "-Wpedantic" 13 #endif 14 #include <rte_vdpa.h> 15 #include <rte_vdpa_dev.h> 16 #include <rte_vhost.h> 17 #ifdef PEDANTIC 18 #pragma GCC diagnostic error "-Wpedantic" 19 #endif 20 #include <rte_spinlock.h> 21 #include <rte_interrupts.h> 22 23 #include <mlx5_glue.h> 24 #include <mlx5_devx_cmds.h> 25 #include <mlx5_common_devx.h> 26 #include <mlx5_prm.h> 27 28 29 #define MLX5_VDPA_INTR_RETRIES 256 30 #define MLX5_VDPA_INTR_RETRIES_USEC 1000 31 32 #ifndef VIRTIO_F_ORDER_PLATFORM 33 #define VIRTIO_F_ORDER_PLATFORM 36 34 #endif 35 36 #ifndef VIRTIO_F_RING_PACKED 37 #define VIRTIO_F_RING_PACKED 34 38 #endif 39 40 #define MLX5_VDPA_DEFAULT_TIMER_DELAY_US 0u 41 #define MLX5_VDPA_DEFAULT_TIMER_STEP_US 1u 42 43 struct mlx5_vdpa_cq { 44 uint16_t log_desc_n; 45 uint32_t cq_ci:24; 46 uint32_t arm_sn:2; 47 uint32_t armed:1; 48 int callfd; 49 rte_spinlock_t sl; 50 struct mlx5_devx_cq cq_obj; 51 uint64_t errors; 52 }; 53 54 struct mlx5_vdpa_event_qp { 55 struct mlx5_vdpa_cq cq; 56 struct mlx5_devx_obj *fw_qp; 57 struct mlx5_devx_obj *sw_qp; 58 struct mlx5dv_devx_umem *umem_obj; 59 void *umem_buf; 60 volatile uint32_t *db_rec; 61 }; 62 63 struct mlx5_vdpa_query_mr { 64 SLIST_ENTRY(mlx5_vdpa_query_mr) next; 65 void *addr; 66 uint64_t length; 67 struct mlx5dv_devx_umem *umem; 68 struct mlx5_devx_obj *mkey; 69 int is_indirect; 70 }; 71 72 enum { 73 MLX5_VDPA_NOTIFIER_STATE_DISABLED, 74 MLX5_VDPA_NOTIFIER_STATE_ENABLED, 75 MLX5_VDPA_NOTIFIER_STATE_ERR 76 }; 77 78 struct mlx5_vdpa_virtq { 79 SLIST_ENTRY(mlx5_vdpa_virtq) next; 80 uint8_t enable; 81 uint16_t index; 82 uint16_t vq_size; 83 uint8_t notifier_state; 84 bool stopped; 85 uint32_t version; 86 struct mlx5_vdpa_priv *priv; 87 struct mlx5_devx_obj *virtq; 88 struct mlx5_devx_obj *counters; 89 struct mlx5_vdpa_event_qp eqp; 90 struct { 91 struct mlx5dv_devx_umem *obj; 92 void *buf; 93 uint32_t size; 94 } umems[3]; 95 struct rte_intr_handle intr_handle; 96 uint64_t err_time[3]; /* RDTSC time of recent errors. */ 97 uint32_t n_retry; 98 struct mlx5_devx_virtio_q_couners_attr reset; 99 }; 100 101 struct mlx5_vdpa_steer { 102 struct mlx5_devx_obj *rqt; 103 void *domain; 104 void *tbl; 105 struct { 106 struct mlx5dv_flow_matcher *matcher; 107 struct mlx5_devx_obj *tir; 108 void *tir_action; 109 void *flow; 110 } rss[7]; 111 }; 112 113 enum { 114 MLX5_VDPA_EVENT_MODE_DYNAMIC_TIMER, 115 MLX5_VDPA_EVENT_MODE_FIXED_TIMER, 116 MLX5_VDPA_EVENT_MODE_ONLY_INTERRUPT 117 }; 118 119 struct mlx5_vdpa_priv { 120 TAILQ_ENTRY(mlx5_vdpa_priv) next; 121 uint8_t configured; 122 pthread_mutex_t vq_config_lock; 123 uint64_t last_traffic_tic; 124 pthread_t timer_tid; 125 pthread_mutex_t timer_lock; 126 pthread_cond_t timer_cond; 127 volatile uint8_t timer_on; 128 int event_mode; 129 int event_core; /* Event thread cpu affinity core. */ 130 uint32_t event_us; 131 uint32_t timer_delay_us; 132 uint32_t no_traffic_time_s; 133 uint8_t hw_latency_mode; /* Hardware CQ moderation mode. */ 134 uint16_t hw_max_latency_us; /* Hardware CQ moderation period in usec. */ 135 uint16_t hw_max_pending_comp; /* Hardware CQ moderation counter. */ 136 struct rte_vdpa_device *vdev; /* vDPA device. */ 137 int vid; /* vhost device id. */ 138 struct ibv_context *ctx; /* Device context. */ 139 struct rte_pci_device *pci_dev; 140 struct mlx5_hca_vdpa_attr caps; 141 uint32_t pdn; /* Protection Domain number. */ 142 struct ibv_pd *pd; 143 uint32_t gpa_mkey_index; 144 struct ibv_mr *null_mr; 145 struct rte_vhost_memory *vmem; 146 struct mlx5dv_devx_event_channel *eventc; 147 struct mlx5dv_devx_event_channel *err_chnl; 148 struct mlx5dv_devx_uar *uar; 149 struct rte_intr_handle intr_handle; 150 struct rte_intr_handle err_intr_handle; 151 struct mlx5_devx_obj *td; 152 struct mlx5_devx_obj *tiss[16]; /* TIS list for each LAG port. */ 153 uint16_t nr_virtqs; 154 uint8_t num_lag_ports; 155 uint8_t qp_ts_format; 156 uint64_t features; /* Negotiated features. */ 157 uint16_t log_max_rqt_size; 158 struct mlx5_vdpa_steer steer; 159 struct mlx5dv_var *var; 160 void *virtq_db_addr; 161 SLIST_HEAD(mr_list, mlx5_vdpa_query_mr) mr_list; 162 struct mlx5_vdpa_virtq virtqs[]; 163 }; 164 165 enum { 166 MLX5_VDPA_STATS_RECEIVED_DESCRIPTORS, 167 MLX5_VDPA_STATS_COMPLETED_DESCRIPTORS, 168 MLX5_VDPA_STATS_BAD_DESCRIPTOR_ERRORS, 169 MLX5_VDPA_STATS_EXCEED_MAX_CHAIN, 170 MLX5_VDPA_STATS_INVALID_BUFFER, 171 MLX5_VDPA_STATS_COMPLETION_ERRORS, 172 MLX5_VDPA_STATS_MAX 173 }; 174 175 /* 176 * Check whether virtq is for traffic receive. 177 * According to VIRTIO_NET Spec the virtqueues index identity its type by: 178 * 0 receiveq1 179 * 1 transmitq1 180 * ... 181 * 2(N-1) receiveqN 182 * 2(N-1)+1 transmitqN 183 * 2N controlq 184 */ 185 static inline uint8_t 186 is_virtq_recvq(int virtq_index, int nr_vring) 187 { 188 if (virtq_index % 2 == 0 && virtq_index != nr_vring - 1) 189 return 1; 190 return 0; 191 } 192 193 /** 194 * Release all the prepared memory regions and all their related resources. 195 * 196 * @param[in] priv 197 * The vdpa driver private structure. 198 */ 199 void mlx5_vdpa_mem_dereg(struct mlx5_vdpa_priv *priv); 200 201 /** 202 * Register all the memory regions of the virtio device to the HW and allocate 203 * all their related resources. 204 * 205 * @param[in] priv 206 * The vdpa driver private structure. 207 * 208 * @return 209 * 0 on success, a negative errno value otherwise and rte_errno is set. 210 */ 211 int mlx5_vdpa_mem_register(struct mlx5_vdpa_priv *priv); 212 213 214 /** 215 * Create an event QP and all its related resources. 216 * 217 * @param[in] priv 218 * The vdpa driver private structure. 219 * @param[in] desc_n 220 * Number of descriptors. 221 * @param[in] callfd 222 * The guest notification file descriptor. 223 * @param[in/out] eqp 224 * Pointer to the event QP structure. 225 * 226 * @return 227 * 0 on success, -1 otherwise and rte_errno is set. 228 */ 229 int mlx5_vdpa_event_qp_create(struct mlx5_vdpa_priv *priv, uint16_t desc_n, 230 int callfd, struct mlx5_vdpa_event_qp *eqp); 231 232 /** 233 * Destroy an event QP and all its related resources. 234 * 235 * @param[in/out] eqp 236 * Pointer to the event QP structure. 237 */ 238 void mlx5_vdpa_event_qp_destroy(struct mlx5_vdpa_event_qp *eqp); 239 240 /** 241 * Release all the event global resources. 242 * 243 * @param[in] priv 244 * The vdpa driver private structure. 245 */ 246 void mlx5_vdpa_event_qp_global_release(struct mlx5_vdpa_priv *priv); 247 248 /** 249 * Setup CQE event. 250 * 251 * @param[in] priv 252 * The vdpa driver private structure. 253 * 254 * @return 255 * 0 on success, a negative errno value otherwise and rte_errno is set. 256 */ 257 int mlx5_vdpa_cqe_event_setup(struct mlx5_vdpa_priv *priv); 258 259 /** 260 * Unset CQE event . 261 * 262 * @param[in] priv 263 * The vdpa driver private structure. 264 */ 265 void mlx5_vdpa_cqe_event_unset(struct mlx5_vdpa_priv *priv); 266 267 /** 268 * Setup error interrupt handler. 269 * 270 * @param[in] priv 271 * The vdpa driver private structure. 272 * 273 * @return 274 * 0 on success, a negative errno value otherwise and rte_errno is set. 275 */ 276 int mlx5_vdpa_err_event_setup(struct mlx5_vdpa_priv *priv); 277 278 /** 279 * Unset error event handler. 280 * 281 * @param[in] priv 282 * The vdpa driver private structure. 283 */ 284 void mlx5_vdpa_err_event_unset(struct mlx5_vdpa_priv *priv); 285 286 /** 287 * Release a virtq and all its related resources. 288 * 289 * @param[in] priv 290 * The vdpa driver private structure. 291 */ 292 void mlx5_vdpa_virtqs_release(struct mlx5_vdpa_priv *priv); 293 294 /** 295 * Create all the HW virtqs resources and all their related resources. 296 * 297 * @param[in] priv 298 * The vdpa driver private structure. 299 * 300 * @return 301 * 0 on success, a negative errno value otherwise and rte_errno is set. 302 */ 303 int mlx5_vdpa_virtqs_prepare(struct mlx5_vdpa_priv *priv); 304 305 /** 306 * Enable\Disable virtq.. 307 * 308 * @param[in] priv 309 * The vdpa driver private structure. 310 * @param[in] index 311 * The virtq index. 312 * @param[in] enable 313 * Set to enable, otherwise disable. 314 * 315 * @return 316 * 0 on success, a negative value otherwise. 317 */ 318 int mlx5_vdpa_virtq_enable(struct mlx5_vdpa_priv *priv, int index, int enable); 319 320 /** 321 * Unset steering and release all its related resources- stop traffic. 322 * 323 * @param[in] priv 324 * The vdpa driver private structure. 325 */ 326 void mlx5_vdpa_steer_unset(struct mlx5_vdpa_priv *priv); 327 328 /** 329 * Update steering according to the received queues status. 330 * 331 * @param[in] priv 332 * The vdpa driver private structure. 333 * 334 * @return 335 * 0 on success, a negative value otherwise. 336 */ 337 int mlx5_vdpa_steer_update(struct mlx5_vdpa_priv *priv); 338 339 /** 340 * Setup steering and all its related resources to enable RSS traffic from the 341 * device to all the Rx host queues. 342 * 343 * @param[in] priv 344 * The vdpa driver private structure. 345 * 346 * @return 347 * 0 on success, a negative value otherwise. 348 */ 349 int mlx5_vdpa_steer_setup(struct mlx5_vdpa_priv *priv); 350 351 /** 352 * Enable\Disable live migration logging. 353 * 354 * @param[in] priv 355 * The vdpa driver private structure. 356 * @param[in] enable 357 * Set for enable, unset for disable. 358 * 359 * @return 360 * 0 on success, a negative value otherwise. 361 */ 362 int mlx5_vdpa_logging_enable(struct mlx5_vdpa_priv *priv, int enable); 363 364 /** 365 * Set dirty bitmap logging to allow live migration. 366 * 367 * @param[in] priv 368 * The vdpa driver private structure. 369 * @param[in] log_base 370 * Vhost log base. 371 * @param[in] log_size 372 * Vhost log size. 373 * 374 * @return 375 * 0 on success, a negative value otherwise. 376 */ 377 int mlx5_vdpa_dirty_bitmap_set(struct mlx5_vdpa_priv *priv, uint64_t log_base, 378 uint64_t log_size); 379 380 /** 381 * Log all virtqs information for live migration. 382 * 383 * @param[in] priv 384 * The vdpa driver private structure. 385 * @param[in] enable 386 * Set for enable, unset for disable. 387 * 388 * @return 389 * 0 on success, a negative value otherwise. 390 */ 391 int mlx5_vdpa_lm_log(struct mlx5_vdpa_priv *priv); 392 393 /** 394 * Modify virtq state to be ready or suspend. 395 * 396 * @param[in] virtq 397 * The vdpa driver private virtq structure. 398 * @param[in] state 399 * Set for ready, otherwise suspend. 400 * 401 * @return 402 * 0 on success, a negative value otherwise. 403 */ 404 int mlx5_vdpa_virtq_modify(struct mlx5_vdpa_virtq *virtq, int state); 405 406 /** 407 * Stop virtq before destroying it. 408 * 409 * @param[in] priv 410 * The vdpa driver private structure. 411 * @param[in] index 412 * The virtq index. 413 * 414 * @return 415 * 0 on success, a negative value otherwise. 416 */ 417 int mlx5_vdpa_virtq_stop(struct mlx5_vdpa_priv *priv, int index); 418 419 /** 420 * Query virtq information. 421 * 422 * @param[in] priv 423 * The vdpa driver private structure. 424 * @param[in] index 425 * The virtq index. 426 * 427 * @return 428 * 0 on success, a negative value otherwise. 429 */ 430 int mlx5_vdpa_virtq_query(struct mlx5_vdpa_priv *priv, int index); 431 432 /** 433 * Get virtq statistics. 434 * 435 * @param[in] priv 436 * The vdpa driver private structure. 437 * @param[in] qid 438 * The virtq index. 439 * @param stats 440 * The virtq statistics array to fill. 441 * @param n 442 * The number of elements in @p stats array. 443 * 444 * @return 445 * A negative value on error, otherwise the number of entries filled in the 446 * @p stats array. 447 */ 448 int 449 mlx5_vdpa_virtq_stats_get(struct mlx5_vdpa_priv *priv, int qid, 450 struct rte_vdpa_stat *stats, unsigned int n); 451 452 /** 453 * Reset virtq statistics. 454 * 455 * @param[in] priv 456 * The vdpa driver private structure. 457 * @param[in] qid 458 * The virtq index. 459 * 460 * @return 461 * A negative value on error, otherwise 0. 462 */ 463 int 464 mlx5_vdpa_virtq_stats_reset(struct mlx5_vdpa_priv *priv, int qid); 465 #endif /* RTE_PMD_MLX5_VDPA_H_ */ 466