1 // SPDX-License-Identifier: BSD-3-Clause 2 /* Copyright 2018 Mellanox Technologies, Ltd */ 3 4 #include <unistd.h> 5 6 #include <rte_errno.h> 7 #include <rte_malloc.h> 8 9 #include "mlx5_prm.h" 10 #include "mlx5_devx_cmds.h" 11 #include "mlx5_common_utils.h" 12 13 14 /** 15 * Allocate flow counters via devx interface. 16 * 17 * @param[in] ctx 18 * Context returned from mlx5 open_device() glue function. 19 * @param dcs 20 * Pointer to counters properties structure to be filled by the routine. 21 * @param bulk_n_128 22 * Bulk counter numbers in 128 counters units. 23 * 24 * @return 25 * Pointer to counter object on success, a negative value otherwise and 26 * rte_errno is set. 27 */ 28 struct mlx5_devx_obj * 29 mlx5_devx_cmd_flow_counter_alloc(void *ctx, uint32_t bulk_n_128) 30 { 31 struct mlx5_devx_obj *dcs = rte_zmalloc("dcs", sizeof(*dcs), 0); 32 uint32_t in[MLX5_ST_SZ_DW(alloc_flow_counter_in)] = {0}; 33 uint32_t out[MLX5_ST_SZ_DW(alloc_flow_counter_out)] = {0}; 34 35 if (!dcs) { 36 rte_errno = ENOMEM; 37 return NULL; 38 } 39 MLX5_SET(alloc_flow_counter_in, in, opcode, 40 MLX5_CMD_OP_ALLOC_FLOW_COUNTER); 41 MLX5_SET(alloc_flow_counter_in, in, flow_counter_bulk, bulk_n_128); 42 dcs->obj = mlx5_glue->devx_obj_create(ctx, in, 43 sizeof(in), out, sizeof(out)); 44 if (!dcs->obj) { 45 DRV_LOG(ERR, "Can't allocate counters - error %d", errno); 46 rte_errno = errno; 47 rte_free(dcs); 48 return NULL; 49 } 50 dcs->id = MLX5_GET(alloc_flow_counter_out, out, flow_counter_id); 51 return dcs; 52 } 53 54 /** 55 * Query flow counters values. 56 * 57 * @param[in] dcs 58 * devx object that was obtained from mlx5_devx_cmd_fc_alloc. 59 * @param[in] clear 60 * Whether hardware should clear the counters after the query or not. 61 * @param[in] n_counters 62 * 0 in case of 1 counter to read, otherwise the counter number to read. 63 * @param pkts 64 * The number of packets that matched the flow. 65 * @param bytes 66 * The number of bytes that matched the flow. 67 * @param mkey 68 * The mkey key for batch query. 69 * @param addr 70 * The address in the mkey range for batch query. 71 * @param cmd_comp 72 * The completion object for asynchronous batch query. 73 * @param async_id 74 * The ID to be returned in the asynchronous batch query response. 75 * 76 * @return 77 * 0 on success, a negative value otherwise. 78 */ 79 int 80 mlx5_devx_cmd_flow_counter_query(struct mlx5_devx_obj *dcs, 81 int clear, uint32_t n_counters, 82 uint64_t *pkts, uint64_t *bytes, 83 uint32_t mkey, void *addr, 84 void *cmd_comp, 85 uint64_t async_id) 86 { 87 int out_len = MLX5_ST_SZ_BYTES(query_flow_counter_out) + 88 MLX5_ST_SZ_BYTES(traffic_counter); 89 uint32_t out[out_len]; 90 uint32_t in[MLX5_ST_SZ_DW(query_flow_counter_in)] = {0}; 91 void *stats; 92 int rc; 93 94 MLX5_SET(query_flow_counter_in, in, opcode, 95 MLX5_CMD_OP_QUERY_FLOW_COUNTER); 96 MLX5_SET(query_flow_counter_in, in, op_mod, 0); 97 MLX5_SET(query_flow_counter_in, in, flow_counter_id, dcs->id); 98 MLX5_SET(query_flow_counter_in, in, clear, !!clear); 99 100 if (n_counters) { 101 MLX5_SET(query_flow_counter_in, in, num_of_counters, 102 n_counters); 103 MLX5_SET(query_flow_counter_in, in, dump_to_memory, 1); 104 MLX5_SET(query_flow_counter_in, in, mkey, mkey); 105 MLX5_SET64(query_flow_counter_in, in, address, 106 (uint64_t)(uintptr_t)addr); 107 } 108 if (!cmd_comp) 109 rc = mlx5_glue->devx_obj_query(dcs->obj, in, sizeof(in), out, 110 out_len); 111 else 112 rc = mlx5_glue->devx_obj_query_async(dcs->obj, in, sizeof(in), 113 out_len, async_id, 114 cmd_comp); 115 if (rc) { 116 DRV_LOG(ERR, "Failed to query devx counters with rc %d", rc); 117 rte_errno = rc; 118 return -rc; 119 } 120 if (!n_counters) { 121 stats = MLX5_ADDR_OF(query_flow_counter_out, 122 out, flow_statistics); 123 *pkts = MLX5_GET64(traffic_counter, stats, packets); 124 *bytes = MLX5_GET64(traffic_counter, stats, octets); 125 } 126 return 0; 127 } 128 129 /** 130 * Create a new mkey. 131 * 132 * @param[in] ctx 133 * Context returned from mlx5 open_device() glue function. 134 * @param[in] attr 135 * Attributes of the requested mkey. 136 * 137 * @return 138 * Pointer to Devx mkey on success, a negative value otherwise and rte_errno 139 * is set. 140 */ 141 struct mlx5_devx_obj * 142 mlx5_devx_cmd_mkey_create(void *ctx, 143 struct mlx5_devx_mkey_attr *attr) 144 { 145 struct mlx5_klm *klm_array = attr->klm_array; 146 int klm_num = attr->klm_num; 147 int in_size_dw = MLX5_ST_SZ_DW(create_mkey_in) + 148 (klm_num ? RTE_ALIGN(klm_num, 4) : 0) * MLX5_ST_SZ_DW(klm); 149 uint32_t in[in_size_dw]; 150 uint32_t out[MLX5_ST_SZ_DW(create_mkey_out)] = {0}; 151 void *mkc; 152 struct mlx5_devx_obj *mkey = rte_zmalloc("mkey", sizeof(*mkey), 0); 153 size_t pgsize; 154 uint32_t translation_size; 155 156 if (!mkey) { 157 rte_errno = ENOMEM; 158 return NULL; 159 } 160 memset(in, 0, in_size_dw * 4); 161 pgsize = sysconf(_SC_PAGESIZE); 162 MLX5_SET(create_mkey_in, in, opcode, MLX5_CMD_OP_CREATE_MKEY); 163 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry); 164 if (klm_num > 0) { 165 int i; 166 uint8_t *klm = (uint8_t *)MLX5_ADDR_OF(create_mkey_in, in, 167 klm_pas_mtt); 168 translation_size = RTE_ALIGN(klm_num, 4); 169 for (i = 0; i < klm_num; i++) { 170 MLX5_SET(klm, klm, byte_count, klm_array[i].byte_count); 171 MLX5_SET(klm, klm, mkey, klm_array[i].mkey); 172 MLX5_SET64(klm, klm, address, klm_array[i].address); 173 klm += MLX5_ST_SZ_BYTES(klm); 174 } 175 for (; i < (int)translation_size; i++) { 176 MLX5_SET(klm, klm, mkey, 0x0); 177 MLX5_SET64(klm, klm, address, 0x0); 178 klm += MLX5_ST_SZ_BYTES(klm); 179 } 180 MLX5_SET(mkc, mkc, access_mode_1_0, attr->log_entity_size ? 181 MLX5_MKC_ACCESS_MODE_KLM_FBS : 182 MLX5_MKC_ACCESS_MODE_KLM); 183 MLX5_SET(mkc, mkc, log_page_size, attr->log_entity_size); 184 } else { 185 translation_size = (RTE_ALIGN(attr->size, pgsize) * 8) / 16; 186 MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_MTT); 187 MLX5_SET(mkc, mkc, log_page_size, rte_log2_u32(pgsize)); 188 } 189 MLX5_SET(create_mkey_in, in, translations_octword_actual_size, 190 translation_size); 191 MLX5_SET(create_mkey_in, in, mkey_umem_id, attr->umem_id); 192 MLX5_SET(create_mkey_in, in, pg_access, attr->pg_access); 193 MLX5_SET(mkc, mkc, lw, 0x1); 194 MLX5_SET(mkc, mkc, lr, 0x1); 195 MLX5_SET(mkc, mkc, qpn, 0xffffff); 196 MLX5_SET(mkc, mkc, pd, attr->pd); 197 MLX5_SET(mkc, mkc, mkey_7_0, attr->umem_id & 0xFF); 198 MLX5_SET(mkc, mkc, translations_octword_size, translation_size); 199 if (attr->relaxed_ordering == 1) { 200 MLX5_SET(mkc, mkc, relaxed_ordering_write, 0x1); 201 MLX5_SET(mkc, mkc, relaxed_ordering_read, 0x1); 202 } 203 MLX5_SET64(mkc, mkc, start_addr, attr->addr); 204 MLX5_SET64(mkc, mkc, len, attr->size); 205 mkey->obj = mlx5_glue->devx_obj_create(ctx, in, in_size_dw * 4, out, 206 sizeof(out)); 207 if (!mkey->obj) { 208 DRV_LOG(ERR, "Can't create %sdirect mkey - error %d\n", 209 klm_num ? "an in" : "a ", errno); 210 rte_errno = errno; 211 rte_free(mkey); 212 return NULL; 213 } 214 mkey->id = MLX5_GET(create_mkey_out, out, mkey_index); 215 mkey->id = (mkey->id << 8) | (attr->umem_id & 0xFF); 216 return mkey; 217 } 218 219 /** 220 * Get status of devx command response. 221 * Mainly used for asynchronous commands. 222 * 223 * @param[in] out 224 * The out response buffer. 225 * 226 * @return 227 * 0 on success, non-zero value otherwise. 228 */ 229 int 230 mlx5_devx_get_out_command_status(void *out) 231 { 232 int status; 233 234 if (!out) 235 return -EINVAL; 236 status = MLX5_GET(query_flow_counter_out, out, status); 237 if (status) { 238 int syndrome = MLX5_GET(query_flow_counter_out, out, syndrome); 239 240 DRV_LOG(ERR, "Bad devX status %x, syndrome = %x", status, 241 syndrome); 242 } 243 return status; 244 } 245 246 /** 247 * Destroy any object allocated by a Devx API. 248 * 249 * @param[in] obj 250 * Pointer to a general object. 251 * 252 * @return 253 * 0 on success, a negative value otherwise. 254 */ 255 int 256 mlx5_devx_cmd_destroy(struct mlx5_devx_obj *obj) 257 { 258 int ret; 259 260 if (!obj) 261 return 0; 262 ret = mlx5_glue->devx_obj_destroy(obj->obj); 263 rte_free(obj); 264 return ret; 265 } 266 267 /** 268 * Query NIC vport context. 269 * Fills minimal inline attribute. 270 * 271 * @param[in] ctx 272 * ibv contexts returned from mlx5dv_open_device. 273 * @param[in] vport 274 * vport index 275 * @param[out] attr 276 * Attributes device values. 277 * 278 * @return 279 * 0 on success, a negative value otherwise. 280 */ 281 static int 282 mlx5_devx_cmd_query_nic_vport_context(void *ctx, 283 unsigned int vport, 284 struct mlx5_hca_attr *attr) 285 { 286 uint32_t in[MLX5_ST_SZ_DW(query_nic_vport_context_in)] = {0}; 287 uint32_t out[MLX5_ST_SZ_DW(query_nic_vport_context_out)] = {0}; 288 void *vctx; 289 int status, syndrome, rc; 290 291 /* Query NIC vport context to determine inline mode. */ 292 MLX5_SET(query_nic_vport_context_in, in, opcode, 293 MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT); 294 MLX5_SET(query_nic_vport_context_in, in, vport_number, vport); 295 if (vport) 296 MLX5_SET(query_nic_vport_context_in, in, other_vport, 1); 297 rc = mlx5_glue->devx_general_cmd(ctx, 298 in, sizeof(in), 299 out, sizeof(out)); 300 if (rc) 301 goto error; 302 status = MLX5_GET(query_nic_vport_context_out, out, status); 303 syndrome = MLX5_GET(query_nic_vport_context_out, out, syndrome); 304 if (status) { 305 DRV_LOG(DEBUG, "Failed to query NIC vport context, " 306 "status %x, syndrome = %x", 307 status, syndrome); 308 return -1; 309 } 310 vctx = MLX5_ADDR_OF(query_nic_vport_context_out, out, 311 nic_vport_context); 312 attr->vport_inline_mode = MLX5_GET(nic_vport_context, vctx, 313 min_wqe_inline_mode); 314 return 0; 315 error: 316 rc = (rc > 0) ? -rc : rc; 317 return rc; 318 } 319 320 /** 321 * Query NIC vDPA attributes. 322 * 323 * @param[in] ctx 324 * Context returned from mlx5 open_device() glue function. 325 * @param[out] vdpa_attr 326 * vDPA Attributes structure to fill. 327 */ 328 static void 329 mlx5_devx_cmd_query_hca_vdpa_attr(void *ctx, 330 struct mlx5_hca_vdpa_attr *vdpa_attr) 331 { 332 uint32_t in[MLX5_ST_SZ_DW(query_hca_cap_in)] = {0}; 333 uint32_t out[MLX5_ST_SZ_DW(query_hca_cap_out)] = {0}; 334 void *hcattr = MLX5_ADDR_OF(query_hca_cap_out, out, capability); 335 int status, syndrome, rc; 336 337 MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP); 338 MLX5_SET(query_hca_cap_in, in, op_mod, 339 MLX5_GET_HCA_CAP_OP_MOD_VDPA_EMULATION | 340 MLX5_HCA_CAP_OPMOD_GET_CUR); 341 rc = mlx5_glue->devx_general_cmd(ctx, in, sizeof(in), out, sizeof(out)); 342 status = MLX5_GET(query_hca_cap_out, out, status); 343 syndrome = MLX5_GET(query_hca_cap_out, out, syndrome); 344 if (rc || status) { 345 RTE_LOG(DEBUG, PMD, "Failed to query devx VDPA capabilities," 346 " status %x, syndrome = %x", status, syndrome); 347 vdpa_attr->valid = 0; 348 } else { 349 vdpa_attr->valid = 1; 350 vdpa_attr->desc_tunnel_offload_type = 351 MLX5_GET(virtio_emulation_cap, hcattr, 352 desc_tunnel_offload_type); 353 vdpa_attr->eth_frame_offload_type = 354 MLX5_GET(virtio_emulation_cap, hcattr, 355 eth_frame_offload_type); 356 vdpa_attr->virtio_version_1_0 = 357 MLX5_GET(virtio_emulation_cap, hcattr, 358 virtio_version_1_0); 359 vdpa_attr->tso_ipv4 = MLX5_GET(virtio_emulation_cap, hcattr, 360 tso_ipv4); 361 vdpa_attr->tso_ipv6 = MLX5_GET(virtio_emulation_cap, hcattr, 362 tso_ipv6); 363 vdpa_attr->tx_csum = MLX5_GET(virtio_emulation_cap, hcattr, 364 tx_csum); 365 vdpa_attr->rx_csum = MLX5_GET(virtio_emulation_cap, hcattr, 366 rx_csum); 367 vdpa_attr->event_mode = MLX5_GET(virtio_emulation_cap, hcattr, 368 event_mode); 369 vdpa_attr->virtio_queue_type = 370 MLX5_GET(virtio_emulation_cap, hcattr, 371 virtio_queue_type); 372 vdpa_attr->log_doorbell_stride = 373 MLX5_GET(virtio_emulation_cap, hcattr, 374 log_doorbell_stride); 375 vdpa_attr->log_doorbell_bar_size = 376 MLX5_GET(virtio_emulation_cap, hcattr, 377 log_doorbell_bar_size); 378 vdpa_attr->doorbell_bar_offset = 379 MLX5_GET64(virtio_emulation_cap, hcattr, 380 doorbell_bar_offset); 381 vdpa_attr->max_num_virtio_queues = 382 MLX5_GET(virtio_emulation_cap, hcattr, 383 max_num_virtio_queues); 384 vdpa_attr->umems[0].a = MLX5_GET(virtio_emulation_cap, hcattr, 385 umem_1_buffer_param_a); 386 vdpa_attr->umems[0].b = MLX5_GET(virtio_emulation_cap, hcattr, 387 umem_1_buffer_param_b); 388 vdpa_attr->umems[1].a = MLX5_GET(virtio_emulation_cap, hcattr, 389 umem_2_buffer_param_a); 390 vdpa_attr->umems[1].b = MLX5_GET(virtio_emulation_cap, hcattr, 391 umem_2_buffer_param_b); 392 vdpa_attr->umems[2].a = MLX5_GET(virtio_emulation_cap, hcattr, 393 umem_3_buffer_param_a); 394 vdpa_attr->umems[2].b = MLX5_GET(virtio_emulation_cap, hcattr, 395 umem_3_buffer_param_b); 396 } 397 } 398 399 /** 400 * Query HCA attributes. 401 * Using those attributes we can check on run time if the device 402 * is having the required capabilities. 403 * 404 * @param[in] ctx 405 * Context returned from mlx5 open_device() glue function. 406 * @param[out] attr 407 * Attributes device values. 408 * 409 * @return 410 * 0 on success, a negative value otherwise. 411 */ 412 int 413 mlx5_devx_cmd_query_hca_attr(void *ctx, 414 struct mlx5_hca_attr *attr) 415 { 416 uint32_t in[MLX5_ST_SZ_DW(query_hca_cap_in)] = {0}; 417 uint32_t out[MLX5_ST_SZ_DW(query_hca_cap_out)] = {0}; 418 void *hcattr; 419 int status, syndrome, rc, i; 420 421 MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP); 422 MLX5_SET(query_hca_cap_in, in, op_mod, 423 MLX5_GET_HCA_CAP_OP_MOD_GENERAL_DEVICE | 424 MLX5_HCA_CAP_OPMOD_GET_CUR); 425 426 rc = mlx5_glue->devx_general_cmd(ctx, 427 in, sizeof(in), out, sizeof(out)); 428 if (rc) 429 goto error; 430 status = MLX5_GET(query_hca_cap_out, out, status); 431 syndrome = MLX5_GET(query_hca_cap_out, out, syndrome); 432 if (status) { 433 DRV_LOG(DEBUG, "Failed to query devx HCA capabilities, " 434 "status %x, syndrome = %x", 435 status, syndrome); 436 return -1; 437 } 438 hcattr = MLX5_ADDR_OF(query_hca_cap_out, out, capability); 439 attr->flow_counter_bulk_alloc_bitmap = 440 MLX5_GET(cmd_hca_cap, hcattr, flow_counter_bulk_alloc); 441 attr->flow_counters_dump = MLX5_GET(cmd_hca_cap, hcattr, 442 flow_counters_dump); 443 attr->log_max_rqt_size = MLX5_GET(cmd_hca_cap, hcattr, 444 log_max_rqt_size); 445 attr->eswitch_manager = MLX5_GET(cmd_hca_cap, hcattr, eswitch_manager); 446 attr->hairpin = MLX5_GET(cmd_hca_cap, hcattr, hairpin); 447 attr->log_max_hairpin_queues = MLX5_GET(cmd_hca_cap, hcattr, 448 log_max_hairpin_queues); 449 attr->log_max_hairpin_wq_data_sz = MLX5_GET(cmd_hca_cap, hcattr, 450 log_max_hairpin_wq_data_sz); 451 attr->log_max_hairpin_num_packets = MLX5_GET 452 (cmd_hca_cap, hcattr, log_min_hairpin_wq_data_sz); 453 attr->vhca_id = MLX5_GET(cmd_hca_cap, hcattr, vhca_id); 454 attr->relaxed_ordering_write = MLX5_GET(cmd_hca_cap, hcattr, 455 relaxed_ordering_write); 456 attr->relaxed_ordering_read = MLX5_GET(cmd_hca_cap, hcattr, 457 relaxed_ordering_read); 458 attr->eth_net_offloads = MLX5_GET(cmd_hca_cap, hcattr, 459 eth_net_offloads); 460 attr->eth_virt = MLX5_GET(cmd_hca_cap, hcattr, eth_virt); 461 attr->flex_parser_protocols = MLX5_GET(cmd_hca_cap, hcattr, 462 flex_parser_protocols); 463 attr->qos.sup = MLX5_GET(cmd_hca_cap, hcattr, qos); 464 attr->vdpa.valid = !!(MLX5_GET64(cmd_hca_cap, hcattr, 465 general_obj_types) & 466 MLX5_GENERAL_OBJ_TYPES_CAP_VIRTQ_NET_Q); 467 attr->vdpa.queue_counters_valid = !!(MLX5_GET64(cmd_hca_cap, hcattr, 468 general_obj_types) & 469 MLX5_GENERAL_OBJ_TYPES_CAP_VIRTIO_Q_COUNTERS); 470 if (attr->qos.sup) { 471 MLX5_SET(query_hca_cap_in, in, op_mod, 472 MLX5_GET_HCA_CAP_OP_MOD_QOS_CAP | 473 MLX5_HCA_CAP_OPMOD_GET_CUR); 474 rc = mlx5_glue->devx_general_cmd(ctx, in, sizeof(in), 475 out, sizeof(out)); 476 if (rc) 477 goto error; 478 if (status) { 479 DRV_LOG(DEBUG, "Failed to query devx QOS capabilities," 480 " status %x, syndrome = %x", 481 status, syndrome); 482 return -1; 483 } 484 hcattr = MLX5_ADDR_OF(query_hca_cap_out, out, capability); 485 attr->qos.srtcm_sup = 486 MLX5_GET(qos_cap, hcattr, flow_meter_srtcm); 487 attr->qos.log_max_flow_meter = 488 MLX5_GET(qos_cap, hcattr, log_max_flow_meter); 489 attr->qos.flow_meter_reg_c_ids = 490 MLX5_GET(qos_cap, hcattr, flow_meter_reg_id); 491 attr->qos.flow_meter_reg_share = 492 MLX5_GET(qos_cap, hcattr, flow_meter_reg_share); 493 } 494 if (attr->vdpa.valid) 495 mlx5_devx_cmd_query_hca_vdpa_attr(ctx, &attr->vdpa); 496 if (!attr->eth_net_offloads) 497 return 0; 498 499 /* Query HCA offloads for Ethernet protocol. */ 500 memset(in, 0, sizeof(in)); 501 memset(out, 0, sizeof(out)); 502 MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP); 503 MLX5_SET(query_hca_cap_in, in, op_mod, 504 MLX5_GET_HCA_CAP_OP_MOD_ETHERNET_OFFLOAD_CAPS | 505 MLX5_HCA_CAP_OPMOD_GET_CUR); 506 507 rc = mlx5_glue->devx_general_cmd(ctx, 508 in, sizeof(in), 509 out, sizeof(out)); 510 if (rc) { 511 attr->eth_net_offloads = 0; 512 goto error; 513 } 514 status = MLX5_GET(query_hca_cap_out, out, status); 515 syndrome = MLX5_GET(query_hca_cap_out, out, syndrome); 516 if (status) { 517 DRV_LOG(DEBUG, "Failed to query devx HCA capabilities, " 518 "status %x, syndrome = %x", 519 status, syndrome); 520 attr->eth_net_offloads = 0; 521 return -1; 522 } 523 hcattr = MLX5_ADDR_OF(query_hca_cap_out, out, capability); 524 attr->wqe_vlan_insert = MLX5_GET(per_protocol_networking_offload_caps, 525 hcattr, wqe_vlan_insert); 526 attr->lro_cap = MLX5_GET(per_protocol_networking_offload_caps, hcattr, 527 lro_cap); 528 attr->tunnel_lro_gre = MLX5_GET(per_protocol_networking_offload_caps, 529 hcattr, tunnel_lro_gre); 530 attr->tunnel_lro_vxlan = MLX5_GET(per_protocol_networking_offload_caps, 531 hcattr, tunnel_lro_vxlan); 532 attr->lro_max_msg_sz_mode = MLX5_GET 533 (per_protocol_networking_offload_caps, 534 hcattr, lro_max_msg_sz_mode); 535 for (i = 0 ; i < MLX5_LRO_NUM_SUPP_PERIODS ; i++) { 536 attr->lro_timer_supported_periods[i] = 537 MLX5_GET(per_protocol_networking_offload_caps, hcattr, 538 lro_timer_supported_periods[i]); 539 } 540 attr->tunnel_stateless_geneve_rx = 541 MLX5_GET(per_protocol_networking_offload_caps, 542 hcattr, tunnel_stateless_geneve_rx); 543 attr->geneve_max_opt_len = 544 MLX5_GET(per_protocol_networking_offload_caps, 545 hcattr, max_geneve_opt_len); 546 attr->wqe_inline_mode = MLX5_GET(per_protocol_networking_offload_caps, 547 hcattr, wqe_inline_mode); 548 attr->tunnel_stateless_gtp = MLX5_GET 549 (per_protocol_networking_offload_caps, 550 hcattr, tunnel_stateless_gtp); 551 if (attr->wqe_inline_mode != MLX5_CAP_INLINE_MODE_VPORT_CONTEXT) 552 return 0; 553 if (attr->eth_virt) { 554 rc = mlx5_devx_cmd_query_nic_vport_context(ctx, 0, attr); 555 if (rc) { 556 attr->eth_virt = 0; 557 goto error; 558 } 559 } 560 return 0; 561 error: 562 rc = (rc > 0) ? -rc : rc; 563 return rc; 564 } 565 566 /** 567 * Query TIS transport domain from QP verbs object using DevX API. 568 * 569 * @param[in] qp 570 * Pointer to verbs QP returned by ibv_create_qp . 571 * @param[in] tis_num 572 * TIS number of TIS to query. 573 * @param[out] tis_td 574 * Pointer to TIS transport domain variable, to be set by the routine. 575 * 576 * @return 577 * 0 on success, a negative value otherwise. 578 */ 579 int 580 mlx5_devx_cmd_qp_query_tis_td(void *qp, uint32_t tis_num, 581 uint32_t *tis_td) 582 { 583 #ifdef HAVE_IBV_FLOW_DV_SUPPORT 584 uint32_t in[MLX5_ST_SZ_DW(query_tis_in)] = {0}; 585 uint32_t out[MLX5_ST_SZ_DW(query_tis_out)] = {0}; 586 int rc; 587 void *tis_ctx; 588 589 MLX5_SET(query_tis_in, in, opcode, MLX5_CMD_OP_QUERY_TIS); 590 MLX5_SET(query_tis_in, in, tisn, tis_num); 591 rc = mlx5_glue->devx_qp_query(qp, in, sizeof(in), out, sizeof(out)); 592 if (rc) { 593 DRV_LOG(ERR, "Failed to query QP using DevX"); 594 return -rc; 595 }; 596 tis_ctx = MLX5_ADDR_OF(query_tis_out, out, tis_context); 597 *tis_td = MLX5_GET(tisc, tis_ctx, transport_domain); 598 return 0; 599 #else 600 (void)qp; 601 (void)tis_num; 602 (void)tis_td; 603 return -ENOTSUP; 604 #endif 605 } 606 607 /** 608 * Fill WQ data for DevX API command. 609 * Utility function for use when creating DevX objects containing a WQ. 610 * 611 * @param[in] wq_ctx 612 * Pointer to WQ context to fill with data. 613 * @param [in] wq_attr 614 * Pointer to WQ attributes structure to fill in WQ context. 615 */ 616 static void 617 devx_cmd_fill_wq_data(void *wq_ctx, struct mlx5_devx_wq_attr *wq_attr) 618 { 619 MLX5_SET(wq, wq_ctx, wq_type, wq_attr->wq_type); 620 MLX5_SET(wq, wq_ctx, wq_signature, wq_attr->wq_signature); 621 MLX5_SET(wq, wq_ctx, end_padding_mode, wq_attr->end_padding_mode); 622 MLX5_SET(wq, wq_ctx, cd_slave, wq_attr->cd_slave); 623 MLX5_SET(wq, wq_ctx, hds_skip_first_sge, wq_attr->hds_skip_first_sge); 624 MLX5_SET(wq, wq_ctx, log2_hds_buf_size, wq_attr->log2_hds_buf_size); 625 MLX5_SET(wq, wq_ctx, page_offset, wq_attr->page_offset); 626 MLX5_SET(wq, wq_ctx, lwm, wq_attr->lwm); 627 MLX5_SET(wq, wq_ctx, pd, wq_attr->pd); 628 MLX5_SET(wq, wq_ctx, uar_page, wq_attr->uar_page); 629 MLX5_SET64(wq, wq_ctx, dbr_addr, wq_attr->dbr_addr); 630 MLX5_SET(wq, wq_ctx, hw_counter, wq_attr->hw_counter); 631 MLX5_SET(wq, wq_ctx, sw_counter, wq_attr->sw_counter); 632 MLX5_SET(wq, wq_ctx, log_wq_stride, wq_attr->log_wq_stride); 633 MLX5_SET(wq, wq_ctx, log_wq_pg_sz, wq_attr->log_wq_pg_sz); 634 MLX5_SET(wq, wq_ctx, log_wq_sz, wq_attr->log_wq_sz); 635 MLX5_SET(wq, wq_ctx, dbr_umem_valid, wq_attr->dbr_umem_valid); 636 MLX5_SET(wq, wq_ctx, wq_umem_valid, wq_attr->wq_umem_valid); 637 MLX5_SET(wq, wq_ctx, log_hairpin_num_packets, 638 wq_attr->log_hairpin_num_packets); 639 MLX5_SET(wq, wq_ctx, log_hairpin_data_sz, wq_attr->log_hairpin_data_sz); 640 MLX5_SET(wq, wq_ctx, single_wqe_log_num_of_strides, 641 wq_attr->single_wqe_log_num_of_strides); 642 MLX5_SET(wq, wq_ctx, two_byte_shift_en, wq_attr->two_byte_shift_en); 643 MLX5_SET(wq, wq_ctx, single_stride_log_num_of_bytes, 644 wq_attr->single_stride_log_num_of_bytes); 645 MLX5_SET(wq, wq_ctx, dbr_umem_id, wq_attr->dbr_umem_id); 646 MLX5_SET(wq, wq_ctx, wq_umem_id, wq_attr->wq_umem_id); 647 MLX5_SET64(wq, wq_ctx, wq_umem_offset, wq_attr->wq_umem_offset); 648 } 649 650 /** 651 * Create RQ using DevX API. 652 * 653 * @param[in] ctx 654 * Context returned from mlx5 open_device() glue function. 655 * @param [in] rq_attr 656 * Pointer to create RQ attributes structure. 657 * @param [in] socket 658 * CPU socket ID for allocations. 659 * 660 * @return 661 * The DevX object created, NULL otherwise and rte_errno is set. 662 */ 663 struct mlx5_devx_obj * 664 mlx5_devx_cmd_create_rq(void *ctx, 665 struct mlx5_devx_create_rq_attr *rq_attr, 666 int socket) 667 { 668 uint32_t in[MLX5_ST_SZ_DW(create_rq_in)] = {0}; 669 uint32_t out[MLX5_ST_SZ_DW(create_rq_out)] = {0}; 670 void *rq_ctx, *wq_ctx; 671 struct mlx5_devx_wq_attr *wq_attr; 672 struct mlx5_devx_obj *rq = NULL; 673 674 rq = rte_calloc_socket(__func__, 1, sizeof(*rq), 0, socket); 675 if (!rq) { 676 DRV_LOG(ERR, "Failed to allocate RQ data"); 677 rte_errno = ENOMEM; 678 return NULL; 679 } 680 MLX5_SET(create_rq_in, in, opcode, MLX5_CMD_OP_CREATE_RQ); 681 rq_ctx = MLX5_ADDR_OF(create_rq_in, in, ctx); 682 MLX5_SET(rqc, rq_ctx, rlky, rq_attr->rlky); 683 MLX5_SET(rqc, rq_ctx, delay_drop_en, rq_attr->delay_drop_en); 684 MLX5_SET(rqc, rq_ctx, scatter_fcs, rq_attr->scatter_fcs); 685 MLX5_SET(rqc, rq_ctx, vsd, rq_attr->vsd); 686 MLX5_SET(rqc, rq_ctx, mem_rq_type, rq_attr->mem_rq_type); 687 MLX5_SET(rqc, rq_ctx, state, rq_attr->state); 688 MLX5_SET(rqc, rq_ctx, flush_in_error_en, rq_attr->flush_in_error_en); 689 MLX5_SET(rqc, rq_ctx, hairpin, rq_attr->hairpin); 690 MLX5_SET(rqc, rq_ctx, user_index, rq_attr->user_index); 691 MLX5_SET(rqc, rq_ctx, cqn, rq_attr->cqn); 692 MLX5_SET(rqc, rq_ctx, counter_set_id, rq_attr->counter_set_id); 693 MLX5_SET(rqc, rq_ctx, rmpn, rq_attr->rmpn); 694 wq_ctx = MLX5_ADDR_OF(rqc, rq_ctx, wq); 695 wq_attr = &rq_attr->wq_attr; 696 devx_cmd_fill_wq_data(wq_ctx, wq_attr); 697 rq->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), 698 out, sizeof(out)); 699 if (!rq->obj) { 700 DRV_LOG(ERR, "Failed to create RQ using DevX"); 701 rte_errno = errno; 702 rte_free(rq); 703 return NULL; 704 } 705 rq->id = MLX5_GET(create_rq_out, out, rqn); 706 return rq; 707 } 708 709 /** 710 * Modify RQ using DevX API. 711 * 712 * @param[in] rq 713 * Pointer to RQ object structure. 714 * @param [in] rq_attr 715 * Pointer to modify RQ attributes structure. 716 * 717 * @return 718 * 0 on success, a negative errno value otherwise and rte_errno is set. 719 */ 720 int 721 mlx5_devx_cmd_modify_rq(struct mlx5_devx_obj *rq, 722 struct mlx5_devx_modify_rq_attr *rq_attr) 723 { 724 uint32_t in[MLX5_ST_SZ_DW(modify_rq_in)] = {0}; 725 uint32_t out[MLX5_ST_SZ_DW(modify_rq_out)] = {0}; 726 void *rq_ctx, *wq_ctx; 727 int ret; 728 729 MLX5_SET(modify_rq_in, in, opcode, MLX5_CMD_OP_MODIFY_RQ); 730 MLX5_SET(modify_rq_in, in, rq_state, rq_attr->rq_state); 731 MLX5_SET(modify_rq_in, in, rqn, rq->id); 732 MLX5_SET64(modify_rq_in, in, modify_bitmask, rq_attr->modify_bitmask); 733 rq_ctx = MLX5_ADDR_OF(modify_rq_in, in, ctx); 734 MLX5_SET(rqc, rq_ctx, state, rq_attr->state); 735 if (rq_attr->modify_bitmask & 736 MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_SCATTER_FCS) 737 MLX5_SET(rqc, rq_ctx, scatter_fcs, rq_attr->scatter_fcs); 738 if (rq_attr->modify_bitmask & MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_VSD) 739 MLX5_SET(rqc, rq_ctx, vsd, rq_attr->vsd); 740 if (rq_attr->modify_bitmask & 741 MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_RQ_COUNTER_SET_ID) 742 MLX5_SET(rqc, rq_ctx, counter_set_id, rq_attr->counter_set_id); 743 MLX5_SET(rqc, rq_ctx, hairpin_peer_sq, rq_attr->hairpin_peer_sq); 744 MLX5_SET(rqc, rq_ctx, hairpin_peer_vhca, rq_attr->hairpin_peer_vhca); 745 if (rq_attr->modify_bitmask & MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_WQ_LWM) { 746 wq_ctx = MLX5_ADDR_OF(rqc, rq_ctx, wq); 747 MLX5_SET(wq, wq_ctx, lwm, rq_attr->lwm); 748 } 749 ret = mlx5_glue->devx_obj_modify(rq->obj, in, sizeof(in), 750 out, sizeof(out)); 751 if (ret) { 752 DRV_LOG(ERR, "Failed to modify RQ using DevX"); 753 rte_errno = errno; 754 return -errno; 755 } 756 return ret; 757 } 758 759 /** 760 * Create TIR using DevX API. 761 * 762 * @param[in] ctx 763 * Context returned from mlx5 open_device() glue function. 764 * @param [in] tir_attr 765 * Pointer to TIR attributes structure. 766 * 767 * @return 768 * The DevX object created, NULL otherwise and rte_errno is set. 769 */ 770 struct mlx5_devx_obj * 771 mlx5_devx_cmd_create_tir(void *ctx, 772 struct mlx5_devx_tir_attr *tir_attr) 773 { 774 uint32_t in[MLX5_ST_SZ_DW(create_tir_in)] = {0}; 775 uint32_t out[MLX5_ST_SZ_DW(create_tir_out)] = {0}; 776 void *tir_ctx, *outer, *inner, *rss_key; 777 struct mlx5_devx_obj *tir = NULL; 778 779 tir = rte_calloc(__func__, 1, sizeof(*tir), 0); 780 if (!tir) { 781 DRV_LOG(ERR, "Failed to allocate TIR data"); 782 rte_errno = ENOMEM; 783 return NULL; 784 } 785 MLX5_SET(create_tir_in, in, opcode, MLX5_CMD_OP_CREATE_TIR); 786 tir_ctx = MLX5_ADDR_OF(create_tir_in, in, ctx); 787 MLX5_SET(tirc, tir_ctx, disp_type, tir_attr->disp_type); 788 MLX5_SET(tirc, tir_ctx, lro_timeout_period_usecs, 789 tir_attr->lro_timeout_period_usecs); 790 MLX5_SET(tirc, tir_ctx, lro_enable_mask, tir_attr->lro_enable_mask); 791 MLX5_SET(tirc, tir_ctx, lro_max_msg_sz, tir_attr->lro_max_msg_sz); 792 MLX5_SET(tirc, tir_ctx, inline_rqn, tir_attr->inline_rqn); 793 MLX5_SET(tirc, tir_ctx, rx_hash_symmetric, tir_attr->rx_hash_symmetric); 794 MLX5_SET(tirc, tir_ctx, tunneled_offload_en, 795 tir_attr->tunneled_offload_en); 796 MLX5_SET(tirc, tir_ctx, indirect_table, tir_attr->indirect_table); 797 MLX5_SET(tirc, tir_ctx, rx_hash_fn, tir_attr->rx_hash_fn); 798 MLX5_SET(tirc, tir_ctx, self_lb_block, tir_attr->self_lb_block); 799 MLX5_SET(tirc, tir_ctx, transport_domain, tir_attr->transport_domain); 800 rss_key = MLX5_ADDR_OF(tirc, tir_ctx, rx_hash_toeplitz_key); 801 memcpy(rss_key, tir_attr->rx_hash_toeplitz_key, MLX5_RSS_HASH_KEY_LEN); 802 outer = MLX5_ADDR_OF(tirc, tir_ctx, rx_hash_field_selector_outer); 803 MLX5_SET(rx_hash_field_select, outer, l3_prot_type, 804 tir_attr->rx_hash_field_selector_outer.l3_prot_type); 805 MLX5_SET(rx_hash_field_select, outer, l4_prot_type, 806 tir_attr->rx_hash_field_selector_outer.l4_prot_type); 807 MLX5_SET(rx_hash_field_select, outer, selected_fields, 808 tir_attr->rx_hash_field_selector_outer.selected_fields); 809 inner = MLX5_ADDR_OF(tirc, tir_ctx, rx_hash_field_selector_inner); 810 MLX5_SET(rx_hash_field_select, inner, l3_prot_type, 811 tir_attr->rx_hash_field_selector_inner.l3_prot_type); 812 MLX5_SET(rx_hash_field_select, inner, l4_prot_type, 813 tir_attr->rx_hash_field_selector_inner.l4_prot_type); 814 MLX5_SET(rx_hash_field_select, inner, selected_fields, 815 tir_attr->rx_hash_field_selector_inner.selected_fields); 816 tir->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), 817 out, sizeof(out)); 818 if (!tir->obj) { 819 DRV_LOG(ERR, "Failed to create TIR using DevX"); 820 rte_errno = errno; 821 rte_free(tir); 822 return NULL; 823 } 824 tir->id = MLX5_GET(create_tir_out, out, tirn); 825 return tir; 826 } 827 828 /** 829 * Create RQT using DevX API. 830 * 831 * @param[in] ctx 832 * Context returned from mlx5 open_device() glue function. 833 * @param [in] rqt_attr 834 * Pointer to RQT attributes structure. 835 * 836 * @return 837 * The DevX object created, NULL otherwise and rte_errno is set. 838 */ 839 struct mlx5_devx_obj * 840 mlx5_devx_cmd_create_rqt(void *ctx, 841 struct mlx5_devx_rqt_attr *rqt_attr) 842 { 843 uint32_t *in = NULL; 844 uint32_t inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + 845 rqt_attr->rqt_actual_size * sizeof(uint32_t); 846 uint32_t out[MLX5_ST_SZ_DW(create_rqt_out)] = {0}; 847 void *rqt_ctx; 848 struct mlx5_devx_obj *rqt = NULL; 849 int i; 850 851 in = rte_calloc(__func__, 1, inlen, 0); 852 if (!in) { 853 DRV_LOG(ERR, "Failed to allocate RQT IN data"); 854 rte_errno = ENOMEM; 855 return NULL; 856 } 857 rqt = rte_calloc(__func__, 1, sizeof(*rqt), 0); 858 if (!rqt) { 859 DRV_LOG(ERR, "Failed to allocate RQT data"); 860 rte_errno = ENOMEM; 861 rte_free(in); 862 return NULL; 863 } 864 MLX5_SET(create_rqt_in, in, opcode, MLX5_CMD_OP_CREATE_RQT); 865 rqt_ctx = MLX5_ADDR_OF(create_rqt_in, in, rqt_context); 866 MLX5_SET(rqtc, rqt_ctx, list_q_type, rqt_attr->rq_type); 867 MLX5_SET(rqtc, rqt_ctx, rqt_max_size, rqt_attr->rqt_max_size); 868 MLX5_SET(rqtc, rqt_ctx, rqt_actual_size, rqt_attr->rqt_actual_size); 869 for (i = 0; i < rqt_attr->rqt_actual_size; i++) 870 MLX5_SET(rqtc, rqt_ctx, rq_num[i], rqt_attr->rq_list[i]); 871 rqt->obj = mlx5_glue->devx_obj_create(ctx, in, inlen, out, sizeof(out)); 872 rte_free(in); 873 if (!rqt->obj) { 874 DRV_LOG(ERR, "Failed to create RQT using DevX"); 875 rte_errno = errno; 876 rte_free(rqt); 877 return NULL; 878 } 879 rqt->id = MLX5_GET(create_rqt_out, out, rqtn); 880 return rqt; 881 } 882 883 /** 884 * Modify RQT using DevX API. 885 * 886 * @param[in] rqt 887 * Pointer to RQT DevX object structure. 888 * @param [in] rqt_attr 889 * Pointer to RQT attributes structure. 890 * 891 * @return 892 * 0 on success, a negative errno value otherwise and rte_errno is set. 893 */ 894 int 895 mlx5_devx_cmd_modify_rqt(struct mlx5_devx_obj *rqt, 896 struct mlx5_devx_rqt_attr *rqt_attr) 897 { 898 uint32_t inlen = MLX5_ST_SZ_BYTES(modify_rqt_in) + 899 rqt_attr->rqt_actual_size * sizeof(uint32_t); 900 uint32_t out[MLX5_ST_SZ_DW(modify_rqt_out)] = {0}; 901 uint32_t *in = rte_calloc(__func__, 1, inlen, 0); 902 void *rqt_ctx; 903 int i; 904 int ret; 905 906 if (!in) { 907 DRV_LOG(ERR, "Failed to allocate RQT modify IN data."); 908 rte_errno = ENOMEM; 909 return -ENOMEM; 910 } 911 MLX5_SET(modify_rqt_in, in, opcode, MLX5_CMD_OP_MODIFY_RQT); 912 MLX5_SET(modify_rqt_in, in, rqtn, rqt->id); 913 MLX5_SET64(modify_rqt_in, in, modify_bitmask, 0x1); 914 rqt_ctx = MLX5_ADDR_OF(modify_rqt_in, in, rqt_context); 915 MLX5_SET(rqtc, rqt_ctx, list_q_type, rqt_attr->rq_type); 916 MLX5_SET(rqtc, rqt_ctx, rqt_max_size, rqt_attr->rqt_max_size); 917 MLX5_SET(rqtc, rqt_ctx, rqt_actual_size, rqt_attr->rqt_actual_size); 918 for (i = 0; i < rqt_attr->rqt_actual_size; i++) 919 MLX5_SET(rqtc, rqt_ctx, rq_num[i], rqt_attr->rq_list[i]); 920 ret = mlx5_glue->devx_obj_modify(rqt->obj, in, inlen, out, sizeof(out)); 921 rte_free(in); 922 if (ret) { 923 DRV_LOG(ERR, "Failed to modify RQT using DevX."); 924 rte_errno = errno; 925 return -rte_errno; 926 } 927 return ret; 928 } 929 930 /** 931 * Create SQ using DevX API. 932 * 933 * @param[in] ctx 934 * Context returned from mlx5 open_device() glue function. 935 * @param [in] sq_attr 936 * Pointer to SQ attributes structure. 937 * @param [in] socket 938 * CPU socket ID for allocations. 939 * 940 * @return 941 * The DevX object created, NULL otherwise and rte_errno is set. 942 **/ 943 struct mlx5_devx_obj * 944 mlx5_devx_cmd_create_sq(void *ctx, 945 struct mlx5_devx_create_sq_attr *sq_attr) 946 { 947 uint32_t in[MLX5_ST_SZ_DW(create_sq_in)] = {0}; 948 uint32_t out[MLX5_ST_SZ_DW(create_sq_out)] = {0}; 949 void *sq_ctx; 950 void *wq_ctx; 951 struct mlx5_devx_wq_attr *wq_attr; 952 struct mlx5_devx_obj *sq = NULL; 953 954 sq = rte_calloc(__func__, 1, sizeof(*sq), 0); 955 if (!sq) { 956 DRV_LOG(ERR, "Failed to allocate SQ data"); 957 rte_errno = ENOMEM; 958 return NULL; 959 } 960 MLX5_SET(create_sq_in, in, opcode, MLX5_CMD_OP_CREATE_SQ); 961 sq_ctx = MLX5_ADDR_OF(create_sq_in, in, ctx); 962 MLX5_SET(sqc, sq_ctx, rlky, sq_attr->rlky); 963 MLX5_SET(sqc, sq_ctx, cd_master, sq_attr->cd_master); 964 MLX5_SET(sqc, sq_ctx, fre, sq_attr->fre); 965 MLX5_SET(sqc, sq_ctx, flush_in_error_en, sq_attr->flush_in_error_en); 966 MLX5_SET(sqc, sq_ctx, allow_multi_pkt_send_wqe, 967 sq_attr->flush_in_error_en); 968 MLX5_SET(sqc, sq_ctx, min_wqe_inline_mode, 969 sq_attr->min_wqe_inline_mode); 970 MLX5_SET(sqc, sq_ctx, state, sq_attr->state); 971 MLX5_SET(sqc, sq_ctx, reg_umr, sq_attr->reg_umr); 972 MLX5_SET(sqc, sq_ctx, allow_swp, sq_attr->allow_swp); 973 MLX5_SET(sqc, sq_ctx, hairpin, sq_attr->hairpin); 974 MLX5_SET(sqc, sq_ctx, user_index, sq_attr->user_index); 975 MLX5_SET(sqc, sq_ctx, cqn, sq_attr->cqn); 976 MLX5_SET(sqc, sq_ctx, packet_pacing_rate_limit_index, 977 sq_attr->packet_pacing_rate_limit_index); 978 MLX5_SET(sqc, sq_ctx, tis_lst_sz, sq_attr->tis_lst_sz); 979 MLX5_SET(sqc, sq_ctx, tis_num_0, sq_attr->tis_num); 980 wq_ctx = MLX5_ADDR_OF(sqc, sq_ctx, wq); 981 wq_attr = &sq_attr->wq_attr; 982 devx_cmd_fill_wq_data(wq_ctx, wq_attr); 983 sq->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), 984 out, sizeof(out)); 985 if (!sq->obj) { 986 DRV_LOG(ERR, "Failed to create SQ using DevX"); 987 rte_errno = errno; 988 rte_free(sq); 989 return NULL; 990 } 991 sq->id = MLX5_GET(create_sq_out, out, sqn); 992 return sq; 993 } 994 995 /** 996 * Modify SQ using DevX API. 997 * 998 * @param[in] sq 999 * Pointer to SQ object structure. 1000 * @param [in] sq_attr 1001 * Pointer to SQ attributes structure. 1002 * 1003 * @return 1004 * 0 on success, a negative errno value otherwise and rte_errno is set. 1005 */ 1006 int 1007 mlx5_devx_cmd_modify_sq(struct mlx5_devx_obj *sq, 1008 struct mlx5_devx_modify_sq_attr *sq_attr) 1009 { 1010 uint32_t in[MLX5_ST_SZ_DW(modify_sq_in)] = {0}; 1011 uint32_t out[MLX5_ST_SZ_DW(modify_sq_out)] = {0}; 1012 void *sq_ctx; 1013 int ret; 1014 1015 MLX5_SET(modify_sq_in, in, opcode, MLX5_CMD_OP_MODIFY_SQ); 1016 MLX5_SET(modify_sq_in, in, sq_state, sq_attr->sq_state); 1017 MLX5_SET(modify_sq_in, in, sqn, sq->id); 1018 sq_ctx = MLX5_ADDR_OF(modify_sq_in, in, ctx); 1019 MLX5_SET(sqc, sq_ctx, state, sq_attr->state); 1020 MLX5_SET(sqc, sq_ctx, hairpin_peer_rq, sq_attr->hairpin_peer_rq); 1021 MLX5_SET(sqc, sq_ctx, hairpin_peer_vhca, sq_attr->hairpin_peer_vhca); 1022 ret = mlx5_glue->devx_obj_modify(sq->obj, in, sizeof(in), 1023 out, sizeof(out)); 1024 if (ret) { 1025 DRV_LOG(ERR, "Failed to modify SQ using DevX"); 1026 rte_errno = errno; 1027 return -errno; 1028 } 1029 return ret; 1030 } 1031 1032 /** 1033 * Create TIS using DevX API. 1034 * 1035 * @param[in] ctx 1036 * Context returned from mlx5 open_device() glue function. 1037 * @param [in] tis_attr 1038 * Pointer to TIS attributes structure. 1039 * 1040 * @return 1041 * The DevX object created, NULL otherwise and rte_errno is set. 1042 */ 1043 struct mlx5_devx_obj * 1044 mlx5_devx_cmd_create_tis(void *ctx, 1045 struct mlx5_devx_tis_attr *tis_attr) 1046 { 1047 uint32_t in[MLX5_ST_SZ_DW(create_tis_in)] = {0}; 1048 uint32_t out[MLX5_ST_SZ_DW(create_tis_out)] = {0}; 1049 struct mlx5_devx_obj *tis = NULL; 1050 void *tis_ctx; 1051 1052 tis = rte_calloc(__func__, 1, sizeof(*tis), 0); 1053 if (!tis) { 1054 DRV_LOG(ERR, "Failed to allocate TIS object"); 1055 rte_errno = ENOMEM; 1056 return NULL; 1057 } 1058 MLX5_SET(create_tis_in, in, opcode, MLX5_CMD_OP_CREATE_TIS); 1059 tis_ctx = MLX5_ADDR_OF(create_tis_in, in, ctx); 1060 MLX5_SET(tisc, tis_ctx, strict_lag_tx_port_affinity, 1061 tis_attr->strict_lag_tx_port_affinity); 1062 MLX5_SET(tisc, tis_ctx, strict_lag_tx_port_affinity, 1063 tis_attr->strict_lag_tx_port_affinity); 1064 MLX5_SET(tisc, tis_ctx, prio, tis_attr->prio); 1065 MLX5_SET(tisc, tis_ctx, transport_domain, 1066 tis_attr->transport_domain); 1067 tis->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), 1068 out, sizeof(out)); 1069 if (!tis->obj) { 1070 DRV_LOG(ERR, "Failed to create TIS using DevX"); 1071 rte_errno = errno; 1072 rte_free(tis); 1073 return NULL; 1074 } 1075 tis->id = MLX5_GET(create_tis_out, out, tisn); 1076 return tis; 1077 } 1078 1079 /** 1080 * Create transport domain using DevX API. 1081 * 1082 * @param[in] ctx 1083 * Context returned from mlx5 open_device() glue function. 1084 * @return 1085 * The DevX object created, NULL otherwise and rte_errno is set. 1086 */ 1087 struct mlx5_devx_obj * 1088 mlx5_devx_cmd_create_td(void *ctx) 1089 { 1090 uint32_t in[MLX5_ST_SZ_DW(alloc_transport_domain_in)] = {0}; 1091 uint32_t out[MLX5_ST_SZ_DW(alloc_transport_domain_out)] = {0}; 1092 struct mlx5_devx_obj *td = NULL; 1093 1094 td = rte_calloc(__func__, 1, sizeof(*td), 0); 1095 if (!td) { 1096 DRV_LOG(ERR, "Failed to allocate TD object"); 1097 rte_errno = ENOMEM; 1098 return NULL; 1099 } 1100 MLX5_SET(alloc_transport_domain_in, in, opcode, 1101 MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN); 1102 td->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), 1103 out, sizeof(out)); 1104 if (!td->obj) { 1105 DRV_LOG(ERR, "Failed to create TIS using DevX"); 1106 rte_errno = errno; 1107 rte_free(td); 1108 return NULL; 1109 } 1110 td->id = MLX5_GET(alloc_transport_domain_out, out, 1111 transport_domain); 1112 return td; 1113 } 1114 1115 /** 1116 * Dump all flows to file. 1117 * 1118 * @param[in] fdb_domain 1119 * FDB domain. 1120 * @param[in] rx_domain 1121 * RX domain. 1122 * @param[in] tx_domain 1123 * TX domain. 1124 * @param[out] file 1125 * Pointer to file stream. 1126 * 1127 * @return 1128 * 0 on success, a nagative value otherwise. 1129 */ 1130 int 1131 mlx5_devx_cmd_flow_dump(void *fdb_domain __rte_unused, 1132 void *rx_domain __rte_unused, 1133 void *tx_domain __rte_unused, FILE *file __rte_unused) 1134 { 1135 int ret = 0; 1136 1137 #ifdef HAVE_MLX5_DR_FLOW_DUMP 1138 if (fdb_domain) { 1139 ret = mlx5_glue->dr_dump_domain(file, fdb_domain); 1140 if (ret) 1141 return ret; 1142 } 1143 MLX5_ASSERT(rx_domain); 1144 ret = mlx5_glue->dr_dump_domain(file, rx_domain); 1145 if (ret) 1146 return ret; 1147 MLX5_ASSERT(tx_domain); 1148 ret = mlx5_glue->dr_dump_domain(file, tx_domain); 1149 #else 1150 ret = ENOTSUP; 1151 #endif 1152 return -ret; 1153 } 1154 1155 /* 1156 * Create CQ using DevX API. 1157 * 1158 * @param[in] ctx 1159 * Context returned from mlx5 open_device() glue function. 1160 * @param [in] attr 1161 * Pointer to CQ attributes structure. 1162 * 1163 * @return 1164 * The DevX object created, NULL otherwise and rte_errno is set. 1165 */ 1166 struct mlx5_devx_obj * 1167 mlx5_devx_cmd_create_cq(void *ctx, struct mlx5_devx_cq_attr *attr) 1168 { 1169 uint32_t in[MLX5_ST_SZ_DW(create_cq_in)] = {0}; 1170 uint32_t out[MLX5_ST_SZ_DW(create_cq_out)] = {0}; 1171 struct mlx5_devx_obj *cq_obj = rte_zmalloc(__func__, sizeof(*cq_obj), 1172 0); 1173 void *cqctx = MLX5_ADDR_OF(create_cq_in, in, cq_context); 1174 1175 if (!cq_obj) { 1176 DRV_LOG(ERR, "Failed to allocate CQ object memory."); 1177 rte_errno = ENOMEM; 1178 return NULL; 1179 } 1180 MLX5_SET(create_cq_in, in, opcode, MLX5_CMD_OP_CREATE_CQ); 1181 if (attr->db_umem_valid) { 1182 MLX5_SET(cqc, cqctx, dbr_umem_valid, attr->db_umem_valid); 1183 MLX5_SET(cqc, cqctx, dbr_umem_id, attr->db_umem_id); 1184 MLX5_SET64(cqc, cqctx, dbr_addr, attr->db_umem_offset); 1185 } else { 1186 MLX5_SET64(cqc, cqctx, dbr_addr, attr->db_addr); 1187 } 1188 MLX5_SET(cqc, cqctx, cc, attr->use_first_only); 1189 MLX5_SET(cqc, cqctx, oi, attr->overrun_ignore); 1190 MLX5_SET(cqc, cqctx, log_cq_size, attr->log_cq_size); 1191 MLX5_SET(cqc, cqctx, log_page_size, attr->log_page_size - 1192 MLX5_ADAPTER_PAGE_SHIFT); 1193 MLX5_SET(cqc, cqctx, c_eqn, attr->eqn); 1194 MLX5_SET(cqc, cqctx, uar_page, attr->uar_page_id); 1195 if (attr->q_umem_valid) { 1196 MLX5_SET(create_cq_in, in, cq_umem_valid, attr->q_umem_valid); 1197 MLX5_SET(create_cq_in, in, cq_umem_id, attr->q_umem_id); 1198 MLX5_SET64(create_cq_in, in, cq_umem_offset, 1199 attr->q_umem_offset); 1200 } 1201 cq_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out, 1202 sizeof(out)); 1203 if (!cq_obj->obj) { 1204 rte_errno = errno; 1205 DRV_LOG(ERR, "Failed to create CQ using DevX errno=%d.", errno); 1206 rte_free(cq_obj); 1207 return NULL; 1208 } 1209 cq_obj->id = MLX5_GET(create_cq_out, out, cqn); 1210 return cq_obj; 1211 } 1212 1213 /** 1214 * Create VIRTQ using DevX API. 1215 * 1216 * @param[in] ctx 1217 * Context returned from mlx5 open_device() glue function. 1218 * @param [in] attr 1219 * Pointer to VIRTQ attributes structure. 1220 * 1221 * @return 1222 * The DevX object created, NULL otherwise and rte_errno is set. 1223 */ 1224 struct mlx5_devx_obj * 1225 mlx5_devx_cmd_create_virtq(void *ctx, 1226 struct mlx5_devx_virtq_attr *attr) 1227 { 1228 uint32_t in[MLX5_ST_SZ_DW(create_virtq_in)] = {0}; 1229 uint32_t out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0}; 1230 struct mlx5_devx_obj *virtq_obj = rte_zmalloc(__func__, 1231 sizeof(*virtq_obj), 0); 1232 void *virtq = MLX5_ADDR_OF(create_virtq_in, in, virtq); 1233 void *hdr = MLX5_ADDR_OF(create_virtq_in, in, hdr); 1234 void *virtctx = MLX5_ADDR_OF(virtio_net_q, virtq, virtio_q_context); 1235 1236 if (!virtq_obj) { 1237 DRV_LOG(ERR, "Failed to allocate virtq data."); 1238 rte_errno = ENOMEM; 1239 return NULL; 1240 } 1241 MLX5_SET(general_obj_in_cmd_hdr, hdr, opcode, 1242 MLX5_CMD_OP_CREATE_GENERAL_OBJECT); 1243 MLX5_SET(general_obj_in_cmd_hdr, hdr, obj_type, 1244 MLX5_GENERAL_OBJ_TYPE_VIRTQ); 1245 MLX5_SET16(virtio_net_q, virtq, hw_available_index, 1246 attr->hw_available_index); 1247 MLX5_SET16(virtio_net_q, virtq, hw_used_index, attr->hw_used_index); 1248 MLX5_SET16(virtio_net_q, virtq, tso_ipv4, attr->tso_ipv4); 1249 MLX5_SET16(virtio_net_q, virtq, tso_ipv6, attr->tso_ipv6); 1250 MLX5_SET16(virtio_net_q, virtq, tx_csum, attr->tx_csum); 1251 MLX5_SET16(virtio_net_q, virtq, rx_csum, attr->rx_csum); 1252 MLX5_SET16(virtio_q, virtctx, virtio_version_1_0, 1253 attr->virtio_version_1_0); 1254 MLX5_SET16(virtio_q, virtctx, event_mode, attr->event_mode); 1255 MLX5_SET(virtio_q, virtctx, event_qpn_or_msix, attr->qp_id); 1256 MLX5_SET64(virtio_q, virtctx, desc_addr, attr->desc_addr); 1257 MLX5_SET64(virtio_q, virtctx, used_addr, attr->used_addr); 1258 MLX5_SET64(virtio_q, virtctx, available_addr, attr->available_addr); 1259 MLX5_SET16(virtio_q, virtctx, queue_index, attr->queue_index); 1260 MLX5_SET16(virtio_q, virtctx, queue_size, attr->q_size); 1261 MLX5_SET(virtio_q, virtctx, virtio_q_mkey, attr->mkey); 1262 MLX5_SET(virtio_q, virtctx, umem_1_id, attr->umems[0].id); 1263 MLX5_SET(virtio_q, virtctx, umem_1_size, attr->umems[0].size); 1264 MLX5_SET64(virtio_q, virtctx, umem_1_offset, attr->umems[0].offset); 1265 MLX5_SET(virtio_q, virtctx, umem_2_id, attr->umems[1].id); 1266 MLX5_SET(virtio_q, virtctx, umem_2_size, attr->umems[1].size); 1267 MLX5_SET64(virtio_q, virtctx, umem_2_offset, attr->umems[1].offset); 1268 MLX5_SET(virtio_q, virtctx, umem_3_id, attr->umems[2].id); 1269 MLX5_SET(virtio_q, virtctx, umem_3_size, attr->umems[2].size); 1270 MLX5_SET64(virtio_q, virtctx, umem_3_offset, attr->umems[2].offset); 1271 MLX5_SET(virtio_q, virtctx, counter_set_id, attr->counters_obj_id); 1272 MLX5_SET(virtio_q, virtctx, pd, attr->pd); 1273 MLX5_SET(virtio_net_q, virtq, tisn_or_qpn, attr->tis_id); 1274 virtq_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out, 1275 sizeof(out)); 1276 if (!virtq_obj->obj) { 1277 rte_errno = errno; 1278 DRV_LOG(ERR, "Failed to create VIRTQ Obj using DevX."); 1279 rte_free(virtq_obj); 1280 return NULL; 1281 } 1282 virtq_obj->id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id); 1283 return virtq_obj; 1284 } 1285 1286 /** 1287 * Modify VIRTQ using DevX API. 1288 * 1289 * @param[in] virtq_obj 1290 * Pointer to virtq object structure. 1291 * @param [in] attr 1292 * Pointer to modify virtq attributes structure. 1293 * 1294 * @return 1295 * 0 on success, a negative errno value otherwise and rte_errno is set. 1296 */ 1297 int 1298 mlx5_devx_cmd_modify_virtq(struct mlx5_devx_obj *virtq_obj, 1299 struct mlx5_devx_virtq_attr *attr) 1300 { 1301 uint32_t in[MLX5_ST_SZ_DW(create_virtq_in)] = {0}; 1302 uint32_t out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0}; 1303 void *virtq = MLX5_ADDR_OF(create_virtq_in, in, virtq); 1304 void *hdr = MLX5_ADDR_OF(create_virtq_in, in, hdr); 1305 void *virtctx = MLX5_ADDR_OF(virtio_net_q, virtq, virtio_q_context); 1306 int ret; 1307 1308 MLX5_SET(general_obj_in_cmd_hdr, hdr, opcode, 1309 MLX5_CMD_OP_MODIFY_GENERAL_OBJECT); 1310 MLX5_SET(general_obj_in_cmd_hdr, hdr, obj_type, 1311 MLX5_GENERAL_OBJ_TYPE_VIRTQ); 1312 MLX5_SET(general_obj_in_cmd_hdr, hdr, obj_id, virtq_obj->id); 1313 MLX5_SET64(virtio_net_q, virtq, modify_field_select, attr->type); 1314 MLX5_SET16(virtio_q, virtctx, queue_index, attr->queue_index); 1315 switch (attr->type) { 1316 case MLX5_VIRTQ_MODIFY_TYPE_STATE: 1317 MLX5_SET16(virtio_net_q, virtq, state, attr->state); 1318 break; 1319 case MLX5_VIRTQ_MODIFY_TYPE_DIRTY_BITMAP_PARAMS: 1320 MLX5_SET(virtio_net_q, virtq, dirty_bitmap_mkey, 1321 attr->dirty_bitmap_mkey); 1322 MLX5_SET64(virtio_net_q, virtq, dirty_bitmap_addr, 1323 attr->dirty_bitmap_addr); 1324 MLX5_SET(virtio_net_q, virtq, dirty_bitmap_size, 1325 attr->dirty_bitmap_size); 1326 break; 1327 case MLX5_VIRTQ_MODIFY_TYPE_DIRTY_BITMAP_DUMP_ENABLE: 1328 MLX5_SET(virtio_net_q, virtq, dirty_bitmap_dump_enable, 1329 attr->dirty_bitmap_dump_enable); 1330 break; 1331 default: 1332 rte_errno = EINVAL; 1333 return -rte_errno; 1334 } 1335 ret = mlx5_glue->devx_obj_modify(virtq_obj->obj, in, sizeof(in), 1336 out, sizeof(out)); 1337 if (ret) { 1338 DRV_LOG(ERR, "Failed to modify VIRTQ using DevX."); 1339 rte_errno = errno; 1340 return -errno; 1341 } 1342 return ret; 1343 } 1344 1345 /** 1346 * Query VIRTQ using DevX API. 1347 * 1348 * @param[in] virtq_obj 1349 * Pointer to virtq object structure. 1350 * @param [in/out] attr 1351 * Pointer to virtq attributes structure. 1352 * 1353 * @return 1354 * 0 on success, a negative errno value otherwise and rte_errno is set. 1355 */ 1356 int 1357 mlx5_devx_cmd_query_virtq(struct mlx5_devx_obj *virtq_obj, 1358 struct mlx5_devx_virtq_attr *attr) 1359 { 1360 uint32_t in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {0}; 1361 uint32_t out[MLX5_ST_SZ_DW(query_virtq_out)] = {0}; 1362 void *hdr = MLX5_ADDR_OF(query_virtq_out, in, hdr); 1363 void *virtq = MLX5_ADDR_OF(query_virtq_out, out, virtq); 1364 int ret; 1365 1366 MLX5_SET(general_obj_in_cmd_hdr, hdr, opcode, 1367 MLX5_CMD_OP_QUERY_GENERAL_OBJECT); 1368 MLX5_SET(general_obj_in_cmd_hdr, hdr, obj_type, 1369 MLX5_GENERAL_OBJ_TYPE_VIRTQ); 1370 MLX5_SET(general_obj_in_cmd_hdr, hdr, obj_id, virtq_obj->id); 1371 ret = mlx5_glue->devx_obj_query(virtq_obj->obj, in, sizeof(in), 1372 out, sizeof(out)); 1373 if (ret) { 1374 DRV_LOG(ERR, "Failed to modify VIRTQ using DevX."); 1375 rte_errno = errno; 1376 return -errno; 1377 } 1378 attr->hw_available_index = MLX5_GET16(virtio_net_q, virtq, 1379 hw_available_index); 1380 attr->hw_used_index = MLX5_GET16(virtio_net_q, virtq, hw_used_index); 1381 return ret; 1382 } 1383 1384 /** 1385 * Create QP using DevX API. 1386 * 1387 * @param[in] ctx 1388 * Context returned from mlx5 open_device() glue function. 1389 * @param [in] attr 1390 * Pointer to QP attributes structure. 1391 * 1392 * @return 1393 * The DevX object created, NULL otherwise and rte_errno is set. 1394 */ 1395 struct mlx5_devx_obj * 1396 mlx5_devx_cmd_create_qp(void *ctx, 1397 struct mlx5_devx_qp_attr *attr) 1398 { 1399 uint32_t in[MLX5_ST_SZ_DW(create_qp_in)] = {0}; 1400 uint32_t out[MLX5_ST_SZ_DW(create_qp_out)] = {0}; 1401 struct mlx5_devx_obj *qp_obj = rte_zmalloc(__func__, sizeof(*qp_obj), 1402 0); 1403 void *qpc = MLX5_ADDR_OF(create_qp_in, in, qpc); 1404 1405 if (!qp_obj) { 1406 DRV_LOG(ERR, "Failed to allocate QP data."); 1407 rte_errno = ENOMEM; 1408 return NULL; 1409 } 1410 MLX5_SET(create_qp_in, in, opcode, MLX5_CMD_OP_CREATE_QP); 1411 MLX5_SET(qpc, qpc, st, MLX5_QP_ST_RC); 1412 MLX5_SET(qpc, qpc, pd, attr->pd); 1413 if (attr->uar_index) { 1414 MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED); 1415 MLX5_SET(qpc, qpc, uar_page, attr->uar_index); 1416 MLX5_SET(qpc, qpc, log_page_size, attr->log_page_size - 1417 MLX5_ADAPTER_PAGE_SHIFT); 1418 if (attr->sq_size) { 1419 MLX5_ASSERT(RTE_IS_POWER_OF_2(attr->sq_size)); 1420 MLX5_SET(qpc, qpc, cqn_snd, attr->cqn); 1421 MLX5_SET(qpc, qpc, log_sq_size, 1422 rte_log2_u32(attr->sq_size)); 1423 } else { 1424 MLX5_SET(qpc, qpc, no_sq, 1); 1425 } 1426 if (attr->rq_size) { 1427 MLX5_ASSERT(RTE_IS_POWER_OF_2(attr->rq_size)); 1428 MLX5_SET(qpc, qpc, cqn_rcv, attr->cqn); 1429 MLX5_SET(qpc, qpc, log_rq_stride, attr->log_rq_stride - 1430 MLX5_LOG_RQ_STRIDE_SHIFT); 1431 MLX5_SET(qpc, qpc, log_rq_size, 1432 rte_log2_u32(attr->rq_size)); 1433 MLX5_SET(qpc, qpc, rq_type, MLX5_NON_ZERO_RQ); 1434 } else { 1435 MLX5_SET(qpc, qpc, rq_type, MLX5_ZERO_LEN_RQ); 1436 } 1437 if (attr->dbr_umem_valid) { 1438 MLX5_SET(qpc, qpc, dbr_umem_valid, 1439 attr->dbr_umem_valid); 1440 MLX5_SET(qpc, qpc, dbr_umem_id, attr->dbr_umem_id); 1441 } 1442 MLX5_SET64(qpc, qpc, dbr_addr, attr->dbr_address); 1443 MLX5_SET64(create_qp_in, in, wq_umem_offset, 1444 attr->wq_umem_offset); 1445 MLX5_SET(create_qp_in, in, wq_umem_id, attr->wq_umem_id); 1446 MLX5_SET(create_qp_in, in, wq_umem_valid, 1); 1447 } else { 1448 /* Special QP to be managed by FW - no SQ\RQ\CQ\UAR\DB rec. */ 1449 MLX5_SET(qpc, qpc, rq_type, MLX5_ZERO_LEN_RQ); 1450 MLX5_SET(qpc, qpc, no_sq, 1); 1451 } 1452 qp_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out, 1453 sizeof(out)); 1454 if (!qp_obj->obj) { 1455 rte_errno = errno; 1456 DRV_LOG(ERR, "Failed to create QP Obj using DevX."); 1457 rte_free(qp_obj); 1458 return NULL; 1459 } 1460 qp_obj->id = MLX5_GET(create_qp_out, out, qpn); 1461 return qp_obj; 1462 } 1463 1464 /** 1465 * Modify QP using DevX API. 1466 * Currently supports only force loop-back QP. 1467 * 1468 * @param[in] qp 1469 * Pointer to QP object structure. 1470 * @param [in] qp_st_mod_op 1471 * The QP state modification operation. 1472 * @param [in] remote_qp_id 1473 * The remote QP ID for MLX5_CMD_OP_INIT2RTR_QP operation. 1474 * 1475 * @return 1476 * 0 on success, a negative errno value otherwise and rte_errno is set. 1477 */ 1478 int 1479 mlx5_devx_cmd_modify_qp_state(struct mlx5_devx_obj *qp, uint32_t qp_st_mod_op, 1480 uint32_t remote_qp_id) 1481 { 1482 union { 1483 uint32_t rst2init[MLX5_ST_SZ_DW(rst2init_qp_in)]; 1484 uint32_t init2rtr[MLX5_ST_SZ_DW(init2rtr_qp_in)]; 1485 uint32_t rtr2rts[MLX5_ST_SZ_DW(rtr2rts_qp_in)]; 1486 } in; 1487 union { 1488 uint32_t rst2init[MLX5_ST_SZ_DW(rst2init_qp_out)]; 1489 uint32_t init2rtr[MLX5_ST_SZ_DW(init2rtr_qp_out)]; 1490 uint32_t rtr2rts[MLX5_ST_SZ_DW(rtr2rts_qp_out)]; 1491 } out; 1492 void *qpc; 1493 int ret; 1494 unsigned int inlen; 1495 unsigned int outlen; 1496 1497 memset(&in, 0, sizeof(in)); 1498 memset(&out, 0, sizeof(out)); 1499 MLX5_SET(rst2init_qp_in, &in, opcode, qp_st_mod_op); 1500 switch (qp_st_mod_op) { 1501 case MLX5_CMD_OP_RST2INIT_QP: 1502 MLX5_SET(rst2init_qp_in, &in, qpn, qp->id); 1503 qpc = MLX5_ADDR_OF(rst2init_qp_in, &in, qpc); 1504 MLX5_SET(qpc, qpc, primary_address_path.vhca_port_num, 1); 1505 MLX5_SET(qpc, qpc, rre, 1); 1506 MLX5_SET(qpc, qpc, rwe, 1); 1507 MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED); 1508 inlen = sizeof(in.rst2init); 1509 outlen = sizeof(out.rst2init); 1510 break; 1511 case MLX5_CMD_OP_INIT2RTR_QP: 1512 MLX5_SET(init2rtr_qp_in, &in, qpn, qp->id); 1513 qpc = MLX5_ADDR_OF(init2rtr_qp_in, &in, qpc); 1514 MLX5_SET(qpc, qpc, primary_address_path.fl, 1); 1515 MLX5_SET(qpc, qpc, primary_address_path.vhca_port_num, 1); 1516 MLX5_SET(qpc, qpc, mtu, 1); 1517 MLX5_SET(qpc, qpc, log_msg_max, 30); 1518 MLX5_SET(qpc, qpc, remote_qpn, remote_qp_id); 1519 MLX5_SET(qpc, qpc, min_rnr_nak, 0); 1520 inlen = sizeof(in.init2rtr); 1521 outlen = sizeof(out.init2rtr); 1522 break; 1523 case MLX5_CMD_OP_RTR2RTS_QP: 1524 qpc = MLX5_ADDR_OF(rtr2rts_qp_in, &in, qpc); 1525 MLX5_SET(rtr2rts_qp_in, &in, qpn, qp->id); 1526 MLX5_SET(qpc, qpc, primary_address_path.ack_timeout, 14); 1527 MLX5_SET(qpc, qpc, log_ack_req_freq, 0); 1528 MLX5_SET(qpc, qpc, retry_count, 7); 1529 MLX5_SET(qpc, qpc, rnr_retry, 7); 1530 inlen = sizeof(in.rtr2rts); 1531 outlen = sizeof(out.rtr2rts); 1532 break; 1533 default: 1534 DRV_LOG(ERR, "Invalid or unsupported QP modify op %u.", 1535 qp_st_mod_op); 1536 rte_errno = EINVAL; 1537 return -rte_errno; 1538 } 1539 ret = mlx5_glue->devx_obj_modify(qp->obj, &in, inlen, &out, outlen); 1540 if (ret) { 1541 DRV_LOG(ERR, "Failed to modify QP using DevX."); 1542 rte_errno = errno; 1543 return -errno; 1544 } 1545 return ret; 1546 } 1547 1548 struct mlx5_devx_obj * 1549 mlx5_devx_cmd_create_virtio_q_counters(void *ctx) 1550 { 1551 uint32_t in[MLX5_ST_SZ_DW(create_virtio_q_counters_in)] = {0}; 1552 uint32_t out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0}; 1553 struct mlx5_devx_obj *couners_obj = rte_zmalloc(__func__, 1554 sizeof(*couners_obj), 0); 1555 void *hdr = MLX5_ADDR_OF(create_virtio_q_counters_in, in, hdr); 1556 1557 if (!couners_obj) { 1558 DRV_LOG(ERR, "Failed to allocate virtio queue counters data."); 1559 rte_errno = ENOMEM; 1560 return NULL; 1561 } 1562 MLX5_SET(general_obj_in_cmd_hdr, hdr, opcode, 1563 MLX5_CMD_OP_CREATE_GENERAL_OBJECT); 1564 MLX5_SET(general_obj_in_cmd_hdr, hdr, obj_type, 1565 MLX5_GENERAL_OBJ_TYPE_VIRTIO_Q_COUNTERS); 1566 couners_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out, 1567 sizeof(out)); 1568 if (!couners_obj->obj) { 1569 rte_errno = errno; 1570 DRV_LOG(ERR, "Failed to create virtio queue counters Obj using" 1571 " DevX."); 1572 rte_free(couners_obj); 1573 return NULL; 1574 } 1575 couners_obj->id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id); 1576 return couners_obj; 1577 } 1578 1579 int 1580 mlx5_devx_cmd_query_virtio_q_counters(struct mlx5_devx_obj *couners_obj, 1581 struct mlx5_devx_virtio_q_couners_attr *attr) 1582 { 1583 uint32_t in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {0}; 1584 uint32_t out[MLX5_ST_SZ_DW(query_virtio_q_counters_out)] = {0}; 1585 void *hdr = MLX5_ADDR_OF(query_virtio_q_counters_out, in, hdr); 1586 void *virtio_q_counters = MLX5_ADDR_OF(query_virtio_q_counters_out, out, 1587 virtio_q_counters); 1588 int ret; 1589 1590 MLX5_SET(general_obj_in_cmd_hdr, hdr, opcode, 1591 MLX5_CMD_OP_QUERY_GENERAL_OBJECT); 1592 MLX5_SET(general_obj_in_cmd_hdr, hdr, obj_type, 1593 MLX5_GENERAL_OBJ_TYPE_VIRTIO_Q_COUNTERS); 1594 MLX5_SET(general_obj_in_cmd_hdr, hdr, obj_id, couners_obj->id); 1595 ret = mlx5_glue->devx_obj_query(couners_obj->obj, in, sizeof(in), out, 1596 sizeof(out)); 1597 if (ret) { 1598 DRV_LOG(ERR, "Failed to query virtio q counters using DevX."); 1599 rte_errno = errno; 1600 return -errno; 1601 } 1602 attr->received_desc = MLX5_GET64(virtio_q_counters, virtio_q_counters, 1603 received_desc); 1604 attr->completed_desc = MLX5_GET64(virtio_q_counters, virtio_q_counters, 1605 completed_desc); 1606 attr->error_cqes = MLX5_GET(virtio_q_counters, virtio_q_counters, 1607 error_cqes); 1608 attr->bad_desc_errors = MLX5_GET(virtio_q_counters, virtio_q_counters, 1609 bad_desc_errors); 1610 attr->exceed_max_chain = MLX5_GET(virtio_q_counters, virtio_q_counters, 1611 exceed_max_chain); 1612 attr->invalid_buffer = MLX5_GET(virtio_q_counters, virtio_q_counters, 1613 invalid_buffer); 1614 return ret; 1615 } 1616