1 // SPDX-License-Identifier: BSD-3-Clause 2 /* Copyright 2018 Mellanox Technologies, Ltd */ 3 4 #include <unistd.h> 5 6 #include <rte_errno.h> 7 #include <rte_malloc.h> 8 9 #include "mlx5_prm.h" 10 #include "mlx5_devx_cmds.h" 11 #include "mlx5_common_utils.h" 12 13 14 /** 15 * Allocate flow counters via devx interface. 16 * 17 * @param[in] ctx 18 * Context returned from mlx5 open_device() glue function. 19 * @param dcs 20 * Pointer to counters properties structure to be filled by the routine. 21 * @param bulk_n_128 22 * Bulk counter numbers in 128 counters units. 23 * 24 * @return 25 * Pointer to counter object on success, a negative value otherwise and 26 * rte_errno is set. 27 */ 28 struct mlx5_devx_obj * 29 mlx5_devx_cmd_flow_counter_alloc(void *ctx, uint32_t bulk_n_128) 30 { 31 struct mlx5_devx_obj *dcs = rte_zmalloc("dcs", sizeof(*dcs), 0); 32 uint32_t in[MLX5_ST_SZ_DW(alloc_flow_counter_in)] = {0}; 33 uint32_t out[MLX5_ST_SZ_DW(alloc_flow_counter_out)] = {0}; 34 35 if (!dcs) { 36 rte_errno = ENOMEM; 37 return NULL; 38 } 39 MLX5_SET(alloc_flow_counter_in, in, opcode, 40 MLX5_CMD_OP_ALLOC_FLOW_COUNTER); 41 MLX5_SET(alloc_flow_counter_in, in, flow_counter_bulk, bulk_n_128); 42 dcs->obj = mlx5_glue->devx_obj_create(ctx, in, 43 sizeof(in), out, sizeof(out)); 44 if (!dcs->obj) { 45 DRV_LOG(ERR, "Can't allocate counters - error %d", errno); 46 rte_errno = errno; 47 rte_free(dcs); 48 return NULL; 49 } 50 dcs->id = MLX5_GET(alloc_flow_counter_out, out, flow_counter_id); 51 return dcs; 52 } 53 54 /** 55 * Query flow counters values. 56 * 57 * @param[in] dcs 58 * devx object that was obtained from mlx5_devx_cmd_fc_alloc. 59 * @param[in] clear 60 * Whether hardware should clear the counters after the query or not. 61 * @param[in] n_counters 62 * 0 in case of 1 counter to read, otherwise the counter number to read. 63 * @param pkts 64 * The number of packets that matched the flow. 65 * @param bytes 66 * The number of bytes that matched the flow. 67 * @param mkey 68 * The mkey key for batch query. 69 * @param addr 70 * The address in the mkey range for batch query. 71 * @param cmd_comp 72 * The completion object for asynchronous batch query. 73 * @param async_id 74 * The ID to be returned in the asynchronous batch query response. 75 * 76 * @return 77 * 0 on success, a negative value otherwise. 78 */ 79 int 80 mlx5_devx_cmd_flow_counter_query(struct mlx5_devx_obj *dcs, 81 int clear, uint32_t n_counters, 82 uint64_t *pkts, uint64_t *bytes, 83 uint32_t mkey, void *addr, 84 void *cmd_comp, 85 uint64_t async_id) 86 { 87 int out_len = MLX5_ST_SZ_BYTES(query_flow_counter_out) + 88 MLX5_ST_SZ_BYTES(traffic_counter); 89 uint32_t out[out_len]; 90 uint32_t in[MLX5_ST_SZ_DW(query_flow_counter_in)] = {0}; 91 void *stats; 92 int rc; 93 94 MLX5_SET(query_flow_counter_in, in, opcode, 95 MLX5_CMD_OP_QUERY_FLOW_COUNTER); 96 MLX5_SET(query_flow_counter_in, in, op_mod, 0); 97 MLX5_SET(query_flow_counter_in, in, flow_counter_id, dcs->id); 98 MLX5_SET(query_flow_counter_in, in, clear, !!clear); 99 100 if (n_counters) { 101 MLX5_SET(query_flow_counter_in, in, num_of_counters, 102 n_counters); 103 MLX5_SET(query_flow_counter_in, in, dump_to_memory, 1); 104 MLX5_SET(query_flow_counter_in, in, mkey, mkey); 105 MLX5_SET64(query_flow_counter_in, in, address, 106 (uint64_t)(uintptr_t)addr); 107 } 108 if (!cmd_comp) 109 rc = mlx5_glue->devx_obj_query(dcs->obj, in, sizeof(in), out, 110 out_len); 111 else 112 rc = mlx5_glue->devx_obj_query_async(dcs->obj, in, sizeof(in), 113 out_len, async_id, 114 cmd_comp); 115 if (rc) { 116 DRV_LOG(ERR, "Failed to query devx counters with rc %d", rc); 117 rte_errno = rc; 118 return -rc; 119 } 120 if (!n_counters) { 121 stats = MLX5_ADDR_OF(query_flow_counter_out, 122 out, flow_statistics); 123 *pkts = MLX5_GET64(traffic_counter, stats, packets); 124 *bytes = MLX5_GET64(traffic_counter, stats, octets); 125 } 126 return 0; 127 } 128 129 /** 130 * Create a new mkey. 131 * 132 * @param[in] ctx 133 * Context returned from mlx5 open_device() glue function. 134 * @param[in] attr 135 * Attributes of the requested mkey. 136 * 137 * @return 138 * Pointer to Devx mkey on success, a negative value otherwise and rte_errno 139 * is set. 140 */ 141 struct mlx5_devx_obj * 142 mlx5_devx_cmd_mkey_create(void *ctx, 143 struct mlx5_devx_mkey_attr *attr) 144 { 145 struct mlx5_klm *klm_array = attr->klm_array; 146 int klm_num = attr->klm_num; 147 int in_size_dw = MLX5_ST_SZ_DW(create_mkey_in) + 148 (klm_num ? RTE_ALIGN(klm_num, 4) : 0) * MLX5_ST_SZ_DW(klm); 149 uint32_t in[in_size_dw]; 150 uint32_t out[MLX5_ST_SZ_DW(create_mkey_out)] = {0}; 151 void *mkc; 152 struct mlx5_devx_obj *mkey = rte_zmalloc("mkey", sizeof(*mkey), 0); 153 size_t pgsize; 154 uint32_t translation_size; 155 156 if (!mkey) { 157 rte_errno = ENOMEM; 158 return NULL; 159 } 160 memset(in, 0, in_size_dw * 4); 161 pgsize = sysconf(_SC_PAGESIZE); 162 MLX5_SET(create_mkey_in, in, opcode, MLX5_CMD_OP_CREATE_MKEY); 163 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry); 164 if (klm_num > 0) { 165 int i; 166 uint8_t *klm = (uint8_t *)MLX5_ADDR_OF(create_mkey_in, in, 167 klm_pas_mtt); 168 translation_size = RTE_ALIGN(klm_num, 4); 169 for (i = 0; i < klm_num; i++) { 170 MLX5_SET(klm, klm, byte_count, klm_array[i].byte_count); 171 MLX5_SET(klm, klm, mkey, klm_array[i].mkey); 172 MLX5_SET64(klm, klm, address, klm_array[i].address); 173 klm += MLX5_ST_SZ_BYTES(klm); 174 } 175 for (; i < (int)translation_size; i++) { 176 MLX5_SET(klm, klm, mkey, 0x0); 177 MLX5_SET64(klm, klm, address, 0x0); 178 klm += MLX5_ST_SZ_BYTES(klm); 179 } 180 MLX5_SET(mkc, mkc, access_mode_1_0, attr->log_entity_size ? 181 MLX5_MKC_ACCESS_MODE_KLM_FBS : 182 MLX5_MKC_ACCESS_MODE_KLM); 183 MLX5_SET(mkc, mkc, log_page_size, attr->log_entity_size); 184 } else { 185 translation_size = (RTE_ALIGN(attr->size, pgsize) * 8) / 16; 186 MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_MTT); 187 MLX5_SET(mkc, mkc, log_page_size, rte_log2_u32(pgsize)); 188 } 189 MLX5_SET(create_mkey_in, in, translations_octword_actual_size, 190 translation_size); 191 MLX5_SET(create_mkey_in, in, mkey_umem_id, attr->umem_id); 192 MLX5_SET(create_mkey_in, in, pg_access, attr->pg_access); 193 MLX5_SET(mkc, mkc, lw, 0x1); 194 MLX5_SET(mkc, mkc, lr, 0x1); 195 MLX5_SET(mkc, mkc, qpn, 0xffffff); 196 MLX5_SET(mkc, mkc, pd, attr->pd); 197 MLX5_SET(mkc, mkc, mkey_7_0, attr->umem_id & 0xFF); 198 MLX5_SET(mkc, mkc, translations_octword_size, translation_size); 199 if (attr->relaxed_ordering == 1) { 200 MLX5_SET(mkc, mkc, relaxed_ordering_write, 0x1); 201 MLX5_SET(mkc, mkc, relaxed_ordering_read, 0x1); 202 } 203 MLX5_SET64(mkc, mkc, start_addr, attr->addr); 204 MLX5_SET64(mkc, mkc, len, attr->size); 205 mkey->obj = mlx5_glue->devx_obj_create(ctx, in, in_size_dw * 4, out, 206 sizeof(out)); 207 if (!mkey->obj) { 208 DRV_LOG(ERR, "Can't create %sdirect mkey - error %d\n", 209 klm_num ? "an in" : "a ", errno); 210 rte_errno = errno; 211 rte_free(mkey); 212 return NULL; 213 } 214 mkey->id = MLX5_GET(create_mkey_out, out, mkey_index); 215 mkey->id = (mkey->id << 8) | (attr->umem_id & 0xFF); 216 return mkey; 217 } 218 219 /** 220 * Get status of devx command response. 221 * Mainly used for asynchronous commands. 222 * 223 * @param[in] out 224 * The out response buffer. 225 * 226 * @return 227 * 0 on success, non-zero value otherwise. 228 */ 229 int 230 mlx5_devx_get_out_command_status(void *out) 231 { 232 int status; 233 234 if (!out) 235 return -EINVAL; 236 status = MLX5_GET(query_flow_counter_out, out, status); 237 if (status) { 238 int syndrome = MLX5_GET(query_flow_counter_out, out, syndrome); 239 240 DRV_LOG(ERR, "Bad devX status %x, syndrome = %x", status, 241 syndrome); 242 } 243 return status; 244 } 245 246 /** 247 * Destroy any object allocated by a Devx API. 248 * 249 * @param[in] obj 250 * Pointer to a general object. 251 * 252 * @return 253 * 0 on success, a negative value otherwise. 254 */ 255 int 256 mlx5_devx_cmd_destroy(struct mlx5_devx_obj *obj) 257 { 258 int ret; 259 260 if (!obj) 261 return 0; 262 ret = mlx5_glue->devx_obj_destroy(obj->obj); 263 rte_free(obj); 264 return ret; 265 } 266 267 /** 268 * Query NIC vport context. 269 * Fills minimal inline attribute. 270 * 271 * @param[in] ctx 272 * ibv contexts returned from mlx5dv_open_device. 273 * @param[in] vport 274 * vport index 275 * @param[out] attr 276 * Attributes device values. 277 * 278 * @return 279 * 0 on success, a negative value otherwise. 280 */ 281 static int 282 mlx5_devx_cmd_query_nic_vport_context(void *ctx, 283 unsigned int vport, 284 struct mlx5_hca_attr *attr) 285 { 286 uint32_t in[MLX5_ST_SZ_DW(query_nic_vport_context_in)] = {0}; 287 uint32_t out[MLX5_ST_SZ_DW(query_nic_vport_context_out)] = {0}; 288 void *vctx; 289 int status, syndrome, rc; 290 291 /* Query NIC vport context to determine inline mode. */ 292 MLX5_SET(query_nic_vport_context_in, in, opcode, 293 MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT); 294 MLX5_SET(query_nic_vport_context_in, in, vport_number, vport); 295 if (vport) 296 MLX5_SET(query_nic_vport_context_in, in, other_vport, 1); 297 rc = mlx5_glue->devx_general_cmd(ctx, 298 in, sizeof(in), 299 out, sizeof(out)); 300 if (rc) 301 goto error; 302 status = MLX5_GET(query_nic_vport_context_out, out, status); 303 syndrome = MLX5_GET(query_nic_vport_context_out, out, syndrome); 304 if (status) { 305 DRV_LOG(DEBUG, "Failed to query NIC vport context, " 306 "status %x, syndrome = %x", 307 status, syndrome); 308 return -1; 309 } 310 vctx = MLX5_ADDR_OF(query_nic_vport_context_out, out, 311 nic_vport_context); 312 attr->vport_inline_mode = MLX5_GET(nic_vport_context, vctx, 313 min_wqe_inline_mode); 314 return 0; 315 error: 316 rc = (rc > 0) ? -rc : rc; 317 return rc; 318 } 319 320 /** 321 * Query NIC vDPA attributes. 322 * 323 * @param[in] ctx 324 * Context returned from mlx5 open_device() glue function. 325 * @param[out] vdpa_attr 326 * vDPA Attributes structure to fill. 327 */ 328 static void 329 mlx5_devx_cmd_query_hca_vdpa_attr(void *ctx, 330 struct mlx5_hca_vdpa_attr *vdpa_attr) 331 { 332 uint32_t in[MLX5_ST_SZ_DW(query_hca_cap_in)] = {0}; 333 uint32_t out[MLX5_ST_SZ_DW(query_hca_cap_out)] = {0}; 334 void *hcattr = MLX5_ADDR_OF(query_hca_cap_out, out, capability); 335 int status, syndrome, rc; 336 337 MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP); 338 MLX5_SET(query_hca_cap_in, in, op_mod, 339 MLX5_GET_HCA_CAP_OP_MOD_VDPA_EMULATION | 340 MLX5_HCA_CAP_OPMOD_GET_CUR); 341 rc = mlx5_glue->devx_general_cmd(ctx, in, sizeof(in), out, sizeof(out)); 342 status = MLX5_GET(query_hca_cap_out, out, status); 343 syndrome = MLX5_GET(query_hca_cap_out, out, syndrome); 344 if (rc || status) { 345 RTE_LOG(DEBUG, PMD, "Failed to query devx VDPA capabilities," 346 " status %x, syndrome = %x", status, syndrome); 347 vdpa_attr->valid = 0; 348 } else { 349 vdpa_attr->valid = 1; 350 vdpa_attr->desc_tunnel_offload_type = 351 MLX5_GET(virtio_emulation_cap, hcattr, 352 desc_tunnel_offload_type); 353 vdpa_attr->eth_frame_offload_type = 354 MLX5_GET(virtio_emulation_cap, hcattr, 355 eth_frame_offload_type); 356 vdpa_attr->virtio_version_1_0 = 357 MLX5_GET(virtio_emulation_cap, hcattr, 358 virtio_version_1_0); 359 vdpa_attr->tso_ipv4 = MLX5_GET(virtio_emulation_cap, hcattr, 360 tso_ipv4); 361 vdpa_attr->tso_ipv6 = MLX5_GET(virtio_emulation_cap, hcattr, 362 tso_ipv6); 363 vdpa_attr->tx_csum = MLX5_GET(virtio_emulation_cap, hcattr, 364 tx_csum); 365 vdpa_attr->rx_csum = MLX5_GET(virtio_emulation_cap, hcattr, 366 rx_csum); 367 vdpa_attr->event_mode = MLX5_GET(virtio_emulation_cap, hcattr, 368 event_mode); 369 vdpa_attr->virtio_queue_type = 370 MLX5_GET(virtio_emulation_cap, hcattr, 371 virtio_queue_type); 372 vdpa_attr->log_doorbell_stride = 373 MLX5_GET(virtio_emulation_cap, hcattr, 374 log_doorbell_stride); 375 vdpa_attr->log_doorbell_bar_size = 376 MLX5_GET(virtio_emulation_cap, hcattr, 377 log_doorbell_bar_size); 378 vdpa_attr->doorbell_bar_offset = 379 MLX5_GET64(virtio_emulation_cap, hcattr, 380 doorbell_bar_offset); 381 vdpa_attr->max_num_virtio_queues = 382 MLX5_GET(virtio_emulation_cap, hcattr, 383 max_num_virtio_queues); 384 vdpa_attr->umems[0].a = MLX5_GET(virtio_emulation_cap, hcattr, 385 umem_1_buffer_param_a); 386 vdpa_attr->umems[0].b = MLX5_GET(virtio_emulation_cap, hcattr, 387 umem_1_buffer_param_b); 388 vdpa_attr->umems[1].a = MLX5_GET(virtio_emulation_cap, hcattr, 389 umem_2_buffer_param_a); 390 vdpa_attr->umems[1].b = MLX5_GET(virtio_emulation_cap, hcattr, 391 umem_2_buffer_param_b); 392 vdpa_attr->umems[2].a = MLX5_GET(virtio_emulation_cap, hcattr, 393 umem_3_buffer_param_a); 394 vdpa_attr->umems[2].b = MLX5_GET(virtio_emulation_cap, hcattr, 395 umem_3_buffer_param_b); 396 } 397 } 398 399 /** 400 * Query HCA attributes. 401 * Using those attributes we can check on run time if the device 402 * is having the required capabilities. 403 * 404 * @param[in] ctx 405 * Context returned from mlx5 open_device() glue function. 406 * @param[out] attr 407 * Attributes device values. 408 * 409 * @return 410 * 0 on success, a negative value otherwise. 411 */ 412 int 413 mlx5_devx_cmd_query_hca_attr(void *ctx, 414 struct mlx5_hca_attr *attr) 415 { 416 uint32_t in[MLX5_ST_SZ_DW(query_hca_cap_in)] = {0}; 417 uint32_t out[MLX5_ST_SZ_DW(query_hca_cap_out)] = {0}; 418 void *hcattr; 419 int status, syndrome, rc; 420 421 MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP); 422 MLX5_SET(query_hca_cap_in, in, op_mod, 423 MLX5_GET_HCA_CAP_OP_MOD_GENERAL_DEVICE | 424 MLX5_HCA_CAP_OPMOD_GET_CUR); 425 426 rc = mlx5_glue->devx_general_cmd(ctx, 427 in, sizeof(in), out, sizeof(out)); 428 if (rc) 429 goto error; 430 status = MLX5_GET(query_hca_cap_out, out, status); 431 syndrome = MLX5_GET(query_hca_cap_out, out, syndrome); 432 if (status) { 433 DRV_LOG(DEBUG, "Failed to query devx HCA capabilities, " 434 "status %x, syndrome = %x", 435 status, syndrome); 436 return -1; 437 } 438 hcattr = MLX5_ADDR_OF(query_hca_cap_out, out, capability); 439 attr->flow_counter_bulk_alloc_bitmap = 440 MLX5_GET(cmd_hca_cap, hcattr, flow_counter_bulk_alloc); 441 attr->flow_counters_dump = MLX5_GET(cmd_hca_cap, hcattr, 442 flow_counters_dump); 443 attr->log_max_rqt_size = MLX5_GET(cmd_hca_cap, hcattr, 444 log_max_rqt_size); 445 attr->eswitch_manager = MLX5_GET(cmd_hca_cap, hcattr, eswitch_manager); 446 attr->hairpin = MLX5_GET(cmd_hca_cap, hcattr, hairpin); 447 attr->log_max_hairpin_queues = MLX5_GET(cmd_hca_cap, hcattr, 448 log_max_hairpin_queues); 449 attr->log_max_hairpin_wq_data_sz = MLX5_GET(cmd_hca_cap, hcattr, 450 log_max_hairpin_wq_data_sz); 451 attr->log_max_hairpin_num_packets = MLX5_GET 452 (cmd_hca_cap, hcattr, log_min_hairpin_wq_data_sz); 453 attr->vhca_id = MLX5_GET(cmd_hca_cap, hcattr, vhca_id); 454 attr->relaxed_ordering_write = MLX5_GET(cmd_hca_cap, hcattr, 455 relaxed_ordering_write); 456 attr->relaxed_ordering_read = MLX5_GET(cmd_hca_cap, hcattr, 457 relaxed_ordering_read); 458 attr->eth_net_offloads = MLX5_GET(cmd_hca_cap, hcattr, 459 eth_net_offloads); 460 attr->eth_virt = MLX5_GET(cmd_hca_cap, hcattr, eth_virt); 461 attr->flex_parser_protocols = MLX5_GET(cmd_hca_cap, hcattr, 462 flex_parser_protocols); 463 attr->qos.sup = MLX5_GET(cmd_hca_cap, hcattr, qos); 464 attr->vdpa.valid = !!(MLX5_GET64(cmd_hca_cap, hcattr, 465 general_obj_types) & 466 MLX5_GENERAL_OBJ_TYPES_CAP_VIRTQ_NET_Q); 467 if (attr->qos.sup) { 468 MLX5_SET(query_hca_cap_in, in, op_mod, 469 MLX5_GET_HCA_CAP_OP_MOD_QOS_CAP | 470 MLX5_HCA_CAP_OPMOD_GET_CUR); 471 rc = mlx5_glue->devx_general_cmd(ctx, in, sizeof(in), 472 out, sizeof(out)); 473 if (rc) 474 goto error; 475 if (status) { 476 DRV_LOG(DEBUG, "Failed to query devx QOS capabilities," 477 " status %x, syndrome = %x", 478 status, syndrome); 479 return -1; 480 } 481 hcattr = MLX5_ADDR_OF(query_hca_cap_out, out, capability); 482 attr->qos.srtcm_sup = 483 MLX5_GET(qos_cap, hcattr, flow_meter_srtcm); 484 attr->qos.log_max_flow_meter = 485 MLX5_GET(qos_cap, hcattr, log_max_flow_meter); 486 attr->qos.flow_meter_reg_c_ids = 487 MLX5_GET(qos_cap, hcattr, flow_meter_reg_id); 488 attr->qos.flow_meter_reg_share = 489 MLX5_GET(qos_cap, hcattr, flow_meter_reg_share); 490 } 491 if (attr->vdpa.valid) 492 mlx5_devx_cmd_query_hca_vdpa_attr(ctx, &attr->vdpa); 493 if (!attr->eth_net_offloads) 494 return 0; 495 496 /* Query HCA offloads for Ethernet protocol. */ 497 memset(in, 0, sizeof(in)); 498 memset(out, 0, sizeof(out)); 499 MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP); 500 MLX5_SET(query_hca_cap_in, in, op_mod, 501 MLX5_GET_HCA_CAP_OP_MOD_ETHERNET_OFFLOAD_CAPS | 502 MLX5_HCA_CAP_OPMOD_GET_CUR); 503 504 rc = mlx5_glue->devx_general_cmd(ctx, 505 in, sizeof(in), 506 out, sizeof(out)); 507 if (rc) { 508 attr->eth_net_offloads = 0; 509 goto error; 510 } 511 status = MLX5_GET(query_hca_cap_out, out, status); 512 syndrome = MLX5_GET(query_hca_cap_out, out, syndrome); 513 if (status) { 514 DRV_LOG(DEBUG, "Failed to query devx HCA capabilities, " 515 "status %x, syndrome = %x", 516 status, syndrome); 517 attr->eth_net_offloads = 0; 518 return -1; 519 } 520 hcattr = MLX5_ADDR_OF(query_hca_cap_out, out, capability); 521 attr->wqe_vlan_insert = MLX5_GET(per_protocol_networking_offload_caps, 522 hcattr, wqe_vlan_insert); 523 attr->lro_cap = MLX5_GET(per_protocol_networking_offload_caps, hcattr, 524 lro_cap); 525 attr->tunnel_lro_gre = MLX5_GET(per_protocol_networking_offload_caps, 526 hcattr, tunnel_lro_gre); 527 attr->tunnel_lro_vxlan = MLX5_GET(per_protocol_networking_offload_caps, 528 hcattr, tunnel_lro_vxlan); 529 attr->lro_max_msg_sz_mode = MLX5_GET 530 (per_protocol_networking_offload_caps, 531 hcattr, lro_max_msg_sz_mode); 532 for (int i = 0 ; i < MLX5_LRO_NUM_SUPP_PERIODS ; i++) { 533 attr->lro_timer_supported_periods[i] = 534 MLX5_GET(per_protocol_networking_offload_caps, hcattr, 535 lro_timer_supported_periods[i]); 536 } 537 attr->tunnel_stateless_geneve_rx = 538 MLX5_GET(per_protocol_networking_offload_caps, 539 hcattr, tunnel_stateless_geneve_rx); 540 attr->geneve_max_opt_len = 541 MLX5_GET(per_protocol_networking_offload_caps, 542 hcattr, max_geneve_opt_len); 543 attr->wqe_inline_mode = MLX5_GET(per_protocol_networking_offload_caps, 544 hcattr, wqe_inline_mode); 545 attr->tunnel_stateless_gtp = MLX5_GET 546 (per_protocol_networking_offload_caps, 547 hcattr, tunnel_stateless_gtp); 548 if (attr->wqe_inline_mode != MLX5_CAP_INLINE_MODE_VPORT_CONTEXT) 549 return 0; 550 if (attr->eth_virt) { 551 rc = mlx5_devx_cmd_query_nic_vport_context(ctx, 0, attr); 552 if (rc) { 553 attr->eth_virt = 0; 554 goto error; 555 } 556 } 557 return 0; 558 error: 559 rc = (rc > 0) ? -rc : rc; 560 return rc; 561 } 562 563 /** 564 * Query TIS transport domain from QP verbs object using DevX API. 565 * 566 * @param[in] qp 567 * Pointer to verbs QP returned by ibv_create_qp . 568 * @param[in] tis_num 569 * TIS number of TIS to query. 570 * @param[out] tis_td 571 * Pointer to TIS transport domain variable, to be set by the routine. 572 * 573 * @return 574 * 0 on success, a negative value otherwise. 575 */ 576 int 577 mlx5_devx_cmd_qp_query_tis_td(void *qp, uint32_t tis_num, 578 uint32_t *tis_td) 579 { 580 uint32_t in[MLX5_ST_SZ_DW(query_tis_in)] = {0}; 581 uint32_t out[MLX5_ST_SZ_DW(query_tis_out)] = {0}; 582 int rc; 583 void *tis_ctx; 584 585 MLX5_SET(query_tis_in, in, opcode, MLX5_CMD_OP_QUERY_TIS); 586 MLX5_SET(query_tis_in, in, tisn, tis_num); 587 rc = mlx5_glue->devx_qp_query(qp, in, sizeof(in), out, sizeof(out)); 588 if (rc) { 589 DRV_LOG(ERR, "Failed to query QP using DevX"); 590 return -rc; 591 }; 592 tis_ctx = MLX5_ADDR_OF(query_tis_out, out, tis_context); 593 *tis_td = MLX5_GET(tisc, tis_ctx, transport_domain); 594 return 0; 595 } 596 597 /** 598 * Fill WQ data for DevX API command. 599 * Utility function for use when creating DevX objects containing a WQ. 600 * 601 * @param[in] wq_ctx 602 * Pointer to WQ context to fill with data. 603 * @param [in] wq_attr 604 * Pointer to WQ attributes structure to fill in WQ context. 605 */ 606 static void 607 devx_cmd_fill_wq_data(void *wq_ctx, struct mlx5_devx_wq_attr *wq_attr) 608 { 609 MLX5_SET(wq, wq_ctx, wq_type, wq_attr->wq_type); 610 MLX5_SET(wq, wq_ctx, wq_signature, wq_attr->wq_signature); 611 MLX5_SET(wq, wq_ctx, end_padding_mode, wq_attr->end_padding_mode); 612 MLX5_SET(wq, wq_ctx, cd_slave, wq_attr->cd_slave); 613 MLX5_SET(wq, wq_ctx, hds_skip_first_sge, wq_attr->hds_skip_first_sge); 614 MLX5_SET(wq, wq_ctx, log2_hds_buf_size, wq_attr->log2_hds_buf_size); 615 MLX5_SET(wq, wq_ctx, page_offset, wq_attr->page_offset); 616 MLX5_SET(wq, wq_ctx, lwm, wq_attr->lwm); 617 MLX5_SET(wq, wq_ctx, pd, wq_attr->pd); 618 MLX5_SET(wq, wq_ctx, uar_page, wq_attr->uar_page); 619 MLX5_SET64(wq, wq_ctx, dbr_addr, wq_attr->dbr_addr); 620 MLX5_SET(wq, wq_ctx, hw_counter, wq_attr->hw_counter); 621 MLX5_SET(wq, wq_ctx, sw_counter, wq_attr->sw_counter); 622 MLX5_SET(wq, wq_ctx, log_wq_stride, wq_attr->log_wq_stride); 623 MLX5_SET(wq, wq_ctx, log_wq_pg_sz, wq_attr->log_wq_pg_sz); 624 MLX5_SET(wq, wq_ctx, log_wq_sz, wq_attr->log_wq_sz); 625 MLX5_SET(wq, wq_ctx, dbr_umem_valid, wq_attr->dbr_umem_valid); 626 MLX5_SET(wq, wq_ctx, wq_umem_valid, wq_attr->wq_umem_valid); 627 MLX5_SET(wq, wq_ctx, log_hairpin_num_packets, 628 wq_attr->log_hairpin_num_packets); 629 MLX5_SET(wq, wq_ctx, log_hairpin_data_sz, wq_attr->log_hairpin_data_sz); 630 MLX5_SET(wq, wq_ctx, single_wqe_log_num_of_strides, 631 wq_attr->single_wqe_log_num_of_strides); 632 MLX5_SET(wq, wq_ctx, two_byte_shift_en, wq_attr->two_byte_shift_en); 633 MLX5_SET(wq, wq_ctx, single_stride_log_num_of_bytes, 634 wq_attr->single_stride_log_num_of_bytes); 635 MLX5_SET(wq, wq_ctx, dbr_umem_id, wq_attr->dbr_umem_id); 636 MLX5_SET(wq, wq_ctx, wq_umem_id, wq_attr->wq_umem_id); 637 MLX5_SET64(wq, wq_ctx, wq_umem_offset, wq_attr->wq_umem_offset); 638 } 639 640 /** 641 * Create RQ using DevX API. 642 * 643 * @param[in] ctx 644 * Context returned from mlx5 open_device() glue function. 645 * @param [in] rq_attr 646 * Pointer to create RQ attributes structure. 647 * @param [in] socket 648 * CPU socket ID for allocations. 649 * 650 * @return 651 * The DevX object created, NULL otherwise and rte_errno is set. 652 */ 653 struct mlx5_devx_obj * 654 mlx5_devx_cmd_create_rq(void *ctx, 655 struct mlx5_devx_create_rq_attr *rq_attr, 656 int socket) 657 { 658 uint32_t in[MLX5_ST_SZ_DW(create_rq_in)] = {0}; 659 uint32_t out[MLX5_ST_SZ_DW(create_rq_out)] = {0}; 660 void *rq_ctx, *wq_ctx; 661 struct mlx5_devx_wq_attr *wq_attr; 662 struct mlx5_devx_obj *rq = NULL; 663 664 rq = rte_calloc_socket(__func__, 1, sizeof(*rq), 0, socket); 665 if (!rq) { 666 DRV_LOG(ERR, "Failed to allocate RQ data"); 667 rte_errno = ENOMEM; 668 return NULL; 669 } 670 MLX5_SET(create_rq_in, in, opcode, MLX5_CMD_OP_CREATE_RQ); 671 rq_ctx = MLX5_ADDR_OF(create_rq_in, in, ctx); 672 MLX5_SET(rqc, rq_ctx, rlky, rq_attr->rlky); 673 MLX5_SET(rqc, rq_ctx, delay_drop_en, rq_attr->delay_drop_en); 674 MLX5_SET(rqc, rq_ctx, scatter_fcs, rq_attr->scatter_fcs); 675 MLX5_SET(rqc, rq_ctx, vsd, rq_attr->vsd); 676 MLX5_SET(rqc, rq_ctx, mem_rq_type, rq_attr->mem_rq_type); 677 MLX5_SET(rqc, rq_ctx, state, rq_attr->state); 678 MLX5_SET(rqc, rq_ctx, flush_in_error_en, rq_attr->flush_in_error_en); 679 MLX5_SET(rqc, rq_ctx, hairpin, rq_attr->hairpin); 680 MLX5_SET(rqc, rq_ctx, user_index, rq_attr->user_index); 681 MLX5_SET(rqc, rq_ctx, cqn, rq_attr->cqn); 682 MLX5_SET(rqc, rq_ctx, counter_set_id, rq_attr->counter_set_id); 683 MLX5_SET(rqc, rq_ctx, rmpn, rq_attr->rmpn); 684 wq_ctx = MLX5_ADDR_OF(rqc, rq_ctx, wq); 685 wq_attr = &rq_attr->wq_attr; 686 devx_cmd_fill_wq_data(wq_ctx, wq_attr); 687 rq->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), 688 out, sizeof(out)); 689 if (!rq->obj) { 690 DRV_LOG(ERR, "Failed to create RQ using DevX"); 691 rte_errno = errno; 692 rte_free(rq); 693 return NULL; 694 } 695 rq->id = MLX5_GET(create_rq_out, out, rqn); 696 return rq; 697 } 698 699 /** 700 * Modify RQ using DevX API. 701 * 702 * @param[in] rq 703 * Pointer to RQ object structure. 704 * @param [in] rq_attr 705 * Pointer to modify RQ attributes structure. 706 * 707 * @return 708 * 0 on success, a negative errno value otherwise and rte_errno is set. 709 */ 710 int 711 mlx5_devx_cmd_modify_rq(struct mlx5_devx_obj *rq, 712 struct mlx5_devx_modify_rq_attr *rq_attr) 713 { 714 uint32_t in[MLX5_ST_SZ_DW(modify_rq_in)] = {0}; 715 uint32_t out[MLX5_ST_SZ_DW(modify_rq_out)] = {0}; 716 void *rq_ctx, *wq_ctx; 717 int ret; 718 719 MLX5_SET(modify_rq_in, in, opcode, MLX5_CMD_OP_MODIFY_RQ); 720 MLX5_SET(modify_rq_in, in, rq_state, rq_attr->rq_state); 721 MLX5_SET(modify_rq_in, in, rqn, rq->id); 722 MLX5_SET64(modify_rq_in, in, modify_bitmask, rq_attr->modify_bitmask); 723 rq_ctx = MLX5_ADDR_OF(modify_rq_in, in, ctx); 724 MLX5_SET(rqc, rq_ctx, state, rq_attr->state); 725 if (rq_attr->modify_bitmask & 726 MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_SCATTER_FCS) 727 MLX5_SET(rqc, rq_ctx, scatter_fcs, rq_attr->scatter_fcs); 728 if (rq_attr->modify_bitmask & MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_VSD) 729 MLX5_SET(rqc, rq_ctx, vsd, rq_attr->vsd); 730 if (rq_attr->modify_bitmask & 731 MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_RQ_COUNTER_SET_ID) 732 MLX5_SET(rqc, rq_ctx, counter_set_id, rq_attr->counter_set_id); 733 MLX5_SET(rqc, rq_ctx, hairpin_peer_sq, rq_attr->hairpin_peer_sq); 734 MLX5_SET(rqc, rq_ctx, hairpin_peer_vhca, rq_attr->hairpin_peer_vhca); 735 if (rq_attr->modify_bitmask & MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_WQ_LWM) { 736 wq_ctx = MLX5_ADDR_OF(rqc, rq_ctx, wq); 737 MLX5_SET(wq, wq_ctx, lwm, rq_attr->lwm); 738 } 739 ret = mlx5_glue->devx_obj_modify(rq->obj, in, sizeof(in), 740 out, sizeof(out)); 741 if (ret) { 742 DRV_LOG(ERR, "Failed to modify RQ using DevX"); 743 rte_errno = errno; 744 return -errno; 745 } 746 return ret; 747 } 748 749 /** 750 * Create TIR using DevX API. 751 * 752 * @param[in] ctx 753 * Context returned from mlx5 open_device() glue function. 754 * @param [in] tir_attr 755 * Pointer to TIR attributes structure. 756 * 757 * @return 758 * The DevX object created, NULL otherwise and rte_errno is set. 759 */ 760 struct mlx5_devx_obj * 761 mlx5_devx_cmd_create_tir(void *ctx, 762 struct mlx5_devx_tir_attr *tir_attr) 763 { 764 uint32_t in[MLX5_ST_SZ_DW(create_tir_in)] = {0}; 765 uint32_t out[MLX5_ST_SZ_DW(create_tir_out)] = {0}; 766 void *tir_ctx, *outer, *inner, *rss_key; 767 struct mlx5_devx_obj *tir = NULL; 768 769 tir = rte_calloc(__func__, 1, sizeof(*tir), 0); 770 if (!tir) { 771 DRV_LOG(ERR, "Failed to allocate TIR data"); 772 rte_errno = ENOMEM; 773 return NULL; 774 } 775 MLX5_SET(create_tir_in, in, opcode, MLX5_CMD_OP_CREATE_TIR); 776 tir_ctx = MLX5_ADDR_OF(create_tir_in, in, ctx); 777 MLX5_SET(tirc, tir_ctx, disp_type, tir_attr->disp_type); 778 MLX5_SET(tirc, tir_ctx, lro_timeout_period_usecs, 779 tir_attr->lro_timeout_period_usecs); 780 MLX5_SET(tirc, tir_ctx, lro_enable_mask, tir_attr->lro_enable_mask); 781 MLX5_SET(tirc, tir_ctx, lro_max_msg_sz, tir_attr->lro_max_msg_sz); 782 MLX5_SET(tirc, tir_ctx, inline_rqn, tir_attr->inline_rqn); 783 MLX5_SET(tirc, tir_ctx, rx_hash_symmetric, tir_attr->rx_hash_symmetric); 784 MLX5_SET(tirc, tir_ctx, tunneled_offload_en, 785 tir_attr->tunneled_offload_en); 786 MLX5_SET(tirc, tir_ctx, indirect_table, tir_attr->indirect_table); 787 MLX5_SET(tirc, tir_ctx, rx_hash_fn, tir_attr->rx_hash_fn); 788 MLX5_SET(tirc, tir_ctx, self_lb_block, tir_attr->self_lb_block); 789 MLX5_SET(tirc, tir_ctx, transport_domain, tir_attr->transport_domain); 790 rss_key = MLX5_ADDR_OF(tirc, tir_ctx, rx_hash_toeplitz_key); 791 memcpy(rss_key, tir_attr->rx_hash_toeplitz_key, MLX5_RSS_HASH_KEY_LEN); 792 outer = MLX5_ADDR_OF(tirc, tir_ctx, rx_hash_field_selector_outer); 793 MLX5_SET(rx_hash_field_select, outer, l3_prot_type, 794 tir_attr->rx_hash_field_selector_outer.l3_prot_type); 795 MLX5_SET(rx_hash_field_select, outer, l4_prot_type, 796 tir_attr->rx_hash_field_selector_outer.l4_prot_type); 797 MLX5_SET(rx_hash_field_select, outer, selected_fields, 798 tir_attr->rx_hash_field_selector_outer.selected_fields); 799 inner = MLX5_ADDR_OF(tirc, tir_ctx, rx_hash_field_selector_inner); 800 MLX5_SET(rx_hash_field_select, inner, l3_prot_type, 801 tir_attr->rx_hash_field_selector_inner.l3_prot_type); 802 MLX5_SET(rx_hash_field_select, inner, l4_prot_type, 803 tir_attr->rx_hash_field_selector_inner.l4_prot_type); 804 MLX5_SET(rx_hash_field_select, inner, selected_fields, 805 tir_attr->rx_hash_field_selector_inner.selected_fields); 806 tir->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), 807 out, sizeof(out)); 808 if (!tir->obj) { 809 DRV_LOG(ERR, "Failed to create TIR using DevX"); 810 rte_errno = errno; 811 rte_free(tir); 812 return NULL; 813 } 814 tir->id = MLX5_GET(create_tir_out, out, tirn); 815 return tir; 816 } 817 818 /** 819 * Create RQT using DevX API. 820 * 821 * @param[in] ctx 822 * Context returned from mlx5 open_device() glue function. 823 * @param [in] rqt_attr 824 * Pointer to RQT attributes structure. 825 * 826 * @return 827 * The DevX object created, NULL otherwise and rte_errno is set. 828 */ 829 struct mlx5_devx_obj * 830 mlx5_devx_cmd_create_rqt(void *ctx, 831 struct mlx5_devx_rqt_attr *rqt_attr) 832 { 833 uint32_t *in = NULL; 834 uint32_t inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + 835 rqt_attr->rqt_actual_size * sizeof(uint32_t); 836 uint32_t out[MLX5_ST_SZ_DW(create_rqt_out)] = {0}; 837 void *rqt_ctx; 838 struct mlx5_devx_obj *rqt = NULL; 839 int i; 840 841 in = rte_calloc(__func__, 1, inlen, 0); 842 if (!in) { 843 DRV_LOG(ERR, "Failed to allocate RQT IN data"); 844 rte_errno = ENOMEM; 845 return NULL; 846 } 847 rqt = rte_calloc(__func__, 1, sizeof(*rqt), 0); 848 if (!rqt) { 849 DRV_LOG(ERR, "Failed to allocate RQT data"); 850 rte_errno = ENOMEM; 851 rte_free(in); 852 return NULL; 853 } 854 MLX5_SET(create_rqt_in, in, opcode, MLX5_CMD_OP_CREATE_RQT); 855 rqt_ctx = MLX5_ADDR_OF(create_rqt_in, in, rqt_context); 856 MLX5_SET(rqtc, rqt_ctx, list_q_type, rqt_attr->rq_type); 857 MLX5_SET(rqtc, rqt_ctx, rqt_max_size, rqt_attr->rqt_max_size); 858 MLX5_SET(rqtc, rqt_ctx, rqt_actual_size, rqt_attr->rqt_actual_size); 859 for (i = 0; i < rqt_attr->rqt_actual_size; i++) 860 MLX5_SET(rqtc, rqt_ctx, rq_num[i], rqt_attr->rq_list[i]); 861 rqt->obj = mlx5_glue->devx_obj_create(ctx, in, inlen, out, sizeof(out)); 862 rte_free(in); 863 if (!rqt->obj) { 864 DRV_LOG(ERR, "Failed to create RQT using DevX"); 865 rte_errno = errno; 866 rte_free(rqt); 867 return NULL; 868 } 869 rqt->id = MLX5_GET(create_rqt_out, out, rqtn); 870 return rqt; 871 } 872 873 /** 874 * Modify RQT using DevX API. 875 * 876 * @param[in] rqt 877 * Pointer to RQT DevX object structure. 878 * @param [in] rqt_attr 879 * Pointer to RQT attributes structure. 880 * 881 * @return 882 * 0 on success, a negative errno value otherwise and rte_errno is set. 883 */ 884 int 885 mlx5_devx_cmd_modify_rqt(struct mlx5_devx_obj *rqt, 886 struct mlx5_devx_rqt_attr *rqt_attr) 887 { 888 uint32_t inlen = MLX5_ST_SZ_BYTES(modify_rqt_in) + 889 rqt_attr->rqt_actual_size * sizeof(uint32_t); 890 uint32_t out[MLX5_ST_SZ_DW(modify_rqt_out)] = {0}; 891 uint32_t *in = rte_calloc(__func__, 1, inlen, 0); 892 void *rqt_ctx; 893 int i; 894 int ret; 895 896 if (!in) { 897 DRV_LOG(ERR, "Failed to allocate RQT modify IN data."); 898 rte_errno = ENOMEM; 899 return -ENOMEM; 900 } 901 MLX5_SET(modify_rqt_in, in, opcode, MLX5_CMD_OP_MODIFY_RQT); 902 MLX5_SET(modify_rqt_in, in, rqtn, rqt->id); 903 MLX5_SET64(modify_rqt_in, in, modify_bitmask, 0x1); 904 rqt_ctx = MLX5_ADDR_OF(modify_rqt_in, in, rqt_context); 905 MLX5_SET(rqtc, rqt_ctx, list_q_type, rqt_attr->rq_type); 906 MLX5_SET(rqtc, rqt_ctx, rqt_max_size, rqt_attr->rqt_max_size); 907 MLX5_SET(rqtc, rqt_ctx, rqt_actual_size, rqt_attr->rqt_actual_size); 908 for (i = 0; i < rqt_attr->rqt_actual_size; i++) 909 MLX5_SET(rqtc, rqt_ctx, rq_num[i], rqt_attr->rq_list[i]); 910 ret = mlx5_glue->devx_obj_modify(rqt->obj, in, inlen, out, sizeof(out)); 911 rte_free(in); 912 if (ret) { 913 DRV_LOG(ERR, "Failed to modify RQT using DevX."); 914 rte_errno = errno; 915 return -rte_errno; 916 } 917 return ret; 918 } 919 920 /** 921 * Create SQ using DevX API. 922 * 923 * @param[in] ctx 924 * Context returned from mlx5 open_device() glue function. 925 * @param [in] sq_attr 926 * Pointer to SQ attributes structure. 927 * @param [in] socket 928 * CPU socket ID for allocations. 929 * 930 * @return 931 * The DevX object created, NULL otherwise and rte_errno is set. 932 **/ 933 struct mlx5_devx_obj * 934 mlx5_devx_cmd_create_sq(void *ctx, 935 struct mlx5_devx_create_sq_attr *sq_attr) 936 { 937 uint32_t in[MLX5_ST_SZ_DW(create_sq_in)] = {0}; 938 uint32_t out[MLX5_ST_SZ_DW(create_sq_out)] = {0}; 939 void *sq_ctx; 940 void *wq_ctx; 941 struct mlx5_devx_wq_attr *wq_attr; 942 struct mlx5_devx_obj *sq = NULL; 943 944 sq = rte_calloc(__func__, 1, sizeof(*sq), 0); 945 if (!sq) { 946 DRV_LOG(ERR, "Failed to allocate SQ data"); 947 rte_errno = ENOMEM; 948 return NULL; 949 } 950 MLX5_SET(create_sq_in, in, opcode, MLX5_CMD_OP_CREATE_SQ); 951 sq_ctx = MLX5_ADDR_OF(create_sq_in, in, ctx); 952 MLX5_SET(sqc, sq_ctx, rlky, sq_attr->rlky); 953 MLX5_SET(sqc, sq_ctx, cd_master, sq_attr->cd_master); 954 MLX5_SET(sqc, sq_ctx, fre, sq_attr->fre); 955 MLX5_SET(sqc, sq_ctx, flush_in_error_en, sq_attr->flush_in_error_en); 956 MLX5_SET(sqc, sq_ctx, allow_multi_pkt_send_wqe, 957 sq_attr->flush_in_error_en); 958 MLX5_SET(sqc, sq_ctx, min_wqe_inline_mode, 959 sq_attr->min_wqe_inline_mode); 960 MLX5_SET(sqc, sq_ctx, state, sq_attr->state); 961 MLX5_SET(sqc, sq_ctx, reg_umr, sq_attr->reg_umr); 962 MLX5_SET(sqc, sq_ctx, allow_swp, sq_attr->allow_swp); 963 MLX5_SET(sqc, sq_ctx, hairpin, sq_attr->hairpin); 964 MLX5_SET(sqc, sq_ctx, user_index, sq_attr->user_index); 965 MLX5_SET(sqc, sq_ctx, cqn, sq_attr->cqn); 966 MLX5_SET(sqc, sq_ctx, packet_pacing_rate_limit_index, 967 sq_attr->packet_pacing_rate_limit_index); 968 MLX5_SET(sqc, sq_ctx, tis_lst_sz, sq_attr->tis_lst_sz); 969 MLX5_SET(sqc, sq_ctx, tis_num_0, sq_attr->tis_num); 970 wq_ctx = MLX5_ADDR_OF(sqc, sq_ctx, wq); 971 wq_attr = &sq_attr->wq_attr; 972 devx_cmd_fill_wq_data(wq_ctx, wq_attr); 973 sq->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), 974 out, sizeof(out)); 975 if (!sq->obj) { 976 DRV_LOG(ERR, "Failed to create SQ using DevX"); 977 rte_errno = errno; 978 rte_free(sq); 979 return NULL; 980 } 981 sq->id = MLX5_GET(create_sq_out, out, sqn); 982 return sq; 983 } 984 985 /** 986 * Modify SQ using DevX API. 987 * 988 * @param[in] sq 989 * Pointer to SQ object structure. 990 * @param [in] sq_attr 991 * Pointer to SQ attributes structure. 992 * 993 * @return 994 * 0 on success, a negative errno value otherwise and rte_errno is set. 995 */ 996 int 997 mlx5_devx_cmd_modify_sq(struct mlx5_devx_obj *sq, 998 struct mlx5_devx_modify_sq_attr *sq_attr) 999 { 1000 uint32_t in[MLX5_ST_SZ_DW(modify_sq_in)] = {0}; 1001 uint32_t out[MLX5_ST_SZ_DW(modify_sq_out)] = {0}; 1002 void *sq_ctx; 1003 int ret; 1004 1005 MLX5_SET(modify_sq_in, in, opcode, MLX5_CMD_OP_MODIFY_SQ); 1006 MLX5_SET(modify_sq_in, in, sq_state, sq_attr->sq_state); 1007 MLX5_SET(modify_sq_in, in, sqn, sq->id); 1008 sq_ctx = MLX5_ADDR_OF(modify_sq_in, in, ctx); 1009 MLX5_SET(sqc, sq_ctx, state, sq_attr->state); 1010 MLX5_SET(sqc, sq_ctx, hairpin_peer_rq, sq_attr->hairpin_peer_rq); 1011 MLX5_SET(sqc, sq_ctx, hairpin_peer_vhca, sq_attr->hairpin_peer_vhca); 1012 ret = mlx5_glue->devx_obj_modify(sq->obj, in, sizeof(in), 1013 out, sizeof(out)); 1014 if (ret) { 1015 DRV_LOG(ERR, "Failed to modify SQ using DevX"); 1016 rte_errno = errno; 1017 return -errno; 1018 } 1019 return ret; 1020 } 1021 1022 /** 1023 * Create TIS using DevX API. 1024 * 1025 * @param[in] ctx 1026 * Context returned from mlx5 open_device() glue function. 1027 * @param [in] tis_attr 1028 * Pointer to TIS attributes structure. 1029 * 1030 * @return 1031 * The DevX object created, NULL otherwise and rte_errno is set. 1032 */ 1033 struct mlx5_devx_obj * 1034 mlx5_devx_cmd_create_tis(void *ctx, 1035 struct mlx5_devx_tis_attr *tis_attr) 1036 { 1037 uint32_t in[MLX5_ST_SZ_DW(create_tis_in)] = {0}; 1038 uint32_t out[MLX5_ST_SZ_DW(create_tis_out)] = {0}; 1039 struct mlx5_devx_obj *tis = NULL; 1040 void *tis_ctx; 1041 1042 tis = rte_calloc(__func__, 1, sizeof(*tis), 0); 1043 if (!tis) { 1044 DRV_LOG(ERR, "Failed to allocate TIS object"); 1045 rte_errno = ENOMEM; 1046 return NULL; 1047 } 1048 MLX5_SET(create_tis_in, in, opcode, MLX5_CMD_OP_CREATE_TIS); 1049 tis_ctx = MLX5_ADDR_OF(create_tis_in, in, ctx); 1050 MLX5_SET(tisc, tis_ctx, strict_lag_tx_port_affinity, 1051 tis_attr->strict_lag_tx_port_affinity); 1052 MLX5_SET(tisc, tis_ctx, strict_lag_tx_port_affinity, 1053 tis_attr->strict_lag_tx_port_affinity); 1054 MLX5_SET(tisc, tis_ctx, prio, tis_attr->prio); 1055 MLX5_SET(tisc, tis_ctx, transport_domain, 1056 tis_attr->transport_domain); 1057 tis->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), 1058 out, sizeof(out)); 1059 if (!tis->obj) { 1060 DRV_LOG(ERR, "Failed to create TIS using DevX"); 1061 rte_errno = errno; 1062 rte_free(tis); 1063 return NULL; 1064 } 1065 tis->id = MLX5_GET(create_tis_out, out, tisn); 1066 return tis; 1067 } 1068 1069 /** 1070 * Create transport domain using DevX API. 1071 * 1072 * @param[in] ctx 1073 * Context returned from mlx5 open_device() glue function. 1074 * @return 1075 * The DevX object created, NULL otherwise and rte_errno is set. 1076 */ 1077 struct mlx5_devx_obj * 1078 mlx5_devx_cmd_create_td(void *ctx) 1079 { 1080 uint32_t in[MLX5_ST_SZ_DW(alloc_transport_domain_in)] = {0}; 1081 uint32_t out[MLX5_ST_SZ_DW(alloc_transport_domain_out)] = {0}; 1082 struct mlx5_devx_obj *td = NULL; 1083 1084 td = rte_calloc(__func__, 1, sizeof(*td), 0); 1085 if (!td) { 1086 DRV_LOG(ERR, "Failed to allocate TD object"); 1087 rte_errno = ENOMEM; 1088 return NULL; 1089 } 1090 MLX5_SET(alloc_transport_domain_in, in, opcode, 1091 MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN); 1092 td->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), 1093 out, sizeof(out)); 1094 if (!td->obj) { 1095 DRV_LOG(ERR, "Failed to create TIS using DevX"); 1096 rte_errno = errno; 1097 rte_free(td); 1098 return NULL; 1099 } 1100 td->id = MLX5_GET(alloc_transport_domain_out, out, 1101 transport_domain); 1102 return td; 1103 } 1104 1105 /** 1106 * Dump all flows to file. 1107 * 1108 * @param[in] fdb_domain 1109 * FDB domain. 1110 * @param[in] rx_domain 1111 * RX domain. 1112 * @param[in] tx_domain 1113 * TX domain. 1114 * @param[out] file 1115 * Pointer to file stream. 1116 * 1117 * @return 1118 * 0 on success, a nagative value otherwise. 1119 */ 1120 int 1121 mlx5_devx_cmd_flow_dump(void *fdb_domain __rte_unused, 1122 void *rx_domain __rte_unused, 1123 void *tx_domain __rte_unused, FILE *file __rte_unused) 1124 { 1125 int ret = 0; 1126 1127 #ifdef HAVE_MLX5_DR_FLOW_DUMP 1128 if (fdb_domain) { 1129 ret = mlx5_glue->dr_dump_domain(file, fdb_domain); 1130 if (ret) 1131 return ret; 1132 } 1133 MLX5_ASSERT(rx_domain); 1134 ret = mlx5_glue->dr_dump_domain(file, rx_domain); 1135 if (ret) 1136 return ret; 1137 MLX5_ASSERT(tx_domain); 1138 ret = mlx5_glue->dr_dump_domain(file, tx_domain); 1139 #else 1140 ret = ENOTSUP; 1141 #endif 1142 return -ret; 1143 } 1144 1145 /* 1146 * Create CQ using DevX API. 1147 * 1148 * @param[in] ctx 1149 * Context returned from mlx5 open_device() glue function. 1150 * @param [in] attr 1151 * Pointer to CQ attributes structure. 1152 * 1153 * @return 1154 * The DevX object created, NULL otherwise and rte_errno is set. 1155 */ 1156 struct mlx5_devx_obj * 1157 mlx5_devx_cmd_create_cq(void *ctx, struct mlx5_devx_cq_attr *attr) 1158 { 1159 uint32_t in[MLX5_ST_SZ_DW(create_cq_in)] = {0}; 1160 uint32_t out[MLX5_ST_SZ_DW(create_cq_out)] = {0}; 1161 struct mlx5_devx_obj *cq_obj = rte_zmalloc(__func__, sizeof(*cq_obj), 1162 0); 1163 void *cqctx = MLX5_ADDR_OF(create_cq_in, in, cq_context); 1164 1165 if (!cq_obj) { 1166 DRV_LOG(ERR, "Failed to allocate CQ object memory."); 1167 rte_errno = ENOMEM; 1168 return NULL; 1169 } 1170 MLX5_SET(create_cq_in, in, opcode, MLX5_CMD_OP_CREATE_CQ); 1171 if (attr->db_umem_valid) { 1172 MLX5_SET(cqc, cqctx, dbr_umem_valid, attr->db_umem_valid); 1173 MLX5_SET(cqc, cqctx, dbr_umem_id, attr->db_umem_id); 1174 MLX5_SET64(cqc, cqctx, dbr_addr, attr->db_umem_offset); 1175 } else { 1176 MLX5_SET64(cqc, cqctx, dbr_addr, attr->db_addr); 1177 } 1178 MLX5_SET(cqc, cqctx, cc, attr->use_first_only); 1179 MLX5_SET(cqc, cqctx, oi, attr->overrun_ignore); 1180 MLX5_SET(cqc, cqctx, log_cq_size, attr->log_cq_size); 1181 MLX5_SET(cqc, cqctx, log_page_size, attr->log_page_size - 1182 MLX5_ADAPTER_PAGE_SHIFT); 1183 MLX5_SET(cqc, cqctx, c_eqn, attr->eqn); 1184 MLX5_SET(cqc, cqctx, uar_page, attr->uar_page_id); 1185 if (attr->q_umem_valid) { 1186 MLX5_SET(create_cq_in, in, cq_umem_valid, attr->q_umem_valid); 1187 MLX5_SET(create_cq_in, in, cq_umem_id, attr->q_umem_id); 1188 MLX5_SET64(create_cq_in, in, cq_umem_offset, 1189 attr->q_umem_offset); 1190 } 1191 cq_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out, 1192 sizeof(out)); 1193 if (!cq_obj->obj) { 1194 rte_errno = errno; 1195 DRV_LOG(ERR, "Failed to create CQ using DevX errno=%d.", errno); 1196 rte_free(cq_obj); 1197 return NULL; 1198 } 1199 cq_obj->id = MLX5_GET(create_cq_out, out, cqn); 1200 return cq_obj; 1201 } 1202 1203 /** 1204 * Create VIRTQ using DevX API. 1205 * 1206 * @param[in] ctx 1207 * Context returned from mlx5 open_device() glue function. 1208 * @param [in] attr 1209 * Pointer to VIRTQ attributes structure. 1210 * 1211 * @return 1212 * The DevX object created, NULL otherwise and rte_errno is set. 1213 */ 1214 struct mlx5_devx_obj * 1215 mlx5_devx_cmd_create_virtq(void *ctx, 1216 struct mlx5_devx_virtq_attr *attr) 1217 { 1218 uint32_t in[MLX5_ST_SZ_DW(create_virtq_in)] = {0}; 1219 uint32_t out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0}; 1220 struct mlx5_devx_obj *virtq_obj = rte_zmalloc(__func__, 1221 sizeof(*virtq_obj), 0); 1222 void *virtq = MLX5_ADDR_OF(create_virtq_in, in, virtq); 1223 void *hdr = MLX5_ADDR_OF(create_virtq_in, in, hdr); 1224 void *virtctx = MLX5_ADDR_OF(virtio_net_q, virtq, virtio_q_context); 1225 1226 if (!virtq_obj) { 1227 DRV_LOG(ERR, "Failed to allocate virtq data."); 1228 rte_errno = ENOMEM; 1229 return NULL; 1230 } 1231 MLX5_SET(general_obj_in_cmd_hdr, hdr, opcode, 1232 MLX5_CMD_OP_CREATE_GENERAL_OBJECT); 1233 MLX5_SET(general_obj_in_cmd_hdr, hdr, obj_type, 1234 MLX5_GENERAL_OBJ_TYPE_VIRTQ); 1235 MLX5_SET16(virtio_net_q, virtq, hw_available_index, 1236 attr->hw_available_index); 1237 MLX5_SET16(virtio_net_q, virtq, hw_used_index, attr->hw_used_index); 1238 MLX5_SET16(virtio_net_q, virtq, tso_ipv4, attr->tso_ipv4); 1239 MLX5_SET16(virtio_net_q, virtq, tso_ipv6, attr->tso_ipv6); 1240 MLX5_SET16(virtio_net_q, virtq, tx_csum, attr->tx_csum); 1241 MLX5_SET16(virtio_net_q, virtq, rx_csum, attr->rx_csum); 1242 MLX5_SET16(virtio_q, virtctx, virtio_version_1_0, 1243 attr->virtio_version_1_0); 1244 MLX5_SET16(virtio_q, virtctx, event_mode, attr->event_mode); 1245 MLX5_SET(virtio_q, virtctx, event_qpn_or_msix, attr->qp_id); 1246 MLX5_SET64(virtio_q, virtctx, desc_addr, attr->desc_addr); 1247 MLX5_SET64(virtio_q, virtctx, used_addr, attr->used_addr); 1248 MLX5_SET64(virtio_q, virtctx, available_addr, attr->available_addr); 1249 MLX5_SET16(virtio_q, virtctx, queue_index, attr->queue_index); 1250 MLX5_SET16(virtio_q, virtctx, queue_size, attr->q_size); 1251 MLX5_SET(virtio_q, virtctx, virtio_q_mkey, attr->mkey); 1252 MLX5_SET(virtio_q, virtctx, umem_1_id, attr->umems[0].id); 1253 MLX5_SET(virtio_q, virtctx, umem_1_size, attr->umems[0].size); 1254 MLX5_SET64(virtio_q, virtctx, umem_1_offset, attr->umems[0].offset); 1255 MLX5_SET(virtio_q, virtctx, umem_2_id, attr->umems[1].id); 1256 MLX5_SET(virtio_q, virtctx, umem_2_size, attr->umems[1].size); 1257 MLX5_SET64(virtio_q, virtctx, umem_2_offset, attr->umems[1].offset); 1258 MLX5_SET(virtio_q, virtctx, umem_3_id, attr->umems[2].id); 1259 MLX5_SET(virtio_q, virtctx, umem_3_size, attr->umems[2].size); 1260 MLX5_SET64(virtio_q, virtctx, umem_3_offset, attr->umems[2].offset); 1261 MLX5_SET(virtio_net_q, virtq, tisn_or_qpn, attr->tis_id); 1262 virtq_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out, 1263 sizeof(out)); 1264 if (!virtq_obj->obj) { 1265 rte_errno = errno; 1266 DRV_LOG(ERR, "Failed to create VIRTQ Obj using DevX."); 1267 rte_free(virtq_obj); 1268 return NULL; 1269 } 1270 virtq_obj->id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id); 1271 return virtq_obj; 1272 } 1273 1274 /** 1275 * Modify VIRTQ using DevX API. 1276 * 1277 * @param[in] virtq_obj 1278 * Pointer to virtq object structure. 1279 * @param [in] attr 1280 * Pointer to modify virtq attributes structure. 1281 * 1282 * @return 1283 * 0 on success, a negative errno value otherwise and rte_errno is set. 1284 */ 1285 int 1286 mlx5_devx_cmd_modify_virtq(struct mlx5_devx_obj *virtq_obj, 1287 struct mlx5_devx_virtq_attr *attr) 1288 { 1289 uint32_t in[MLX5_ST_SZ_DW(create_virtq_in)] = {0}; 1290 uint32_t out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0}; 1291 void *virtq = MLX5_ADDR_OF(create_virtq_in, in, virtq); 1292 void *hdr = MLX5_ADDR_OF(create_virtq_in, in, hdr); 1293 void *virtctx = MLX5_ADDR_OF(virtio_net_q, virtq, virtio_q_context); 1294 int ret; 1295 1296 MLX5_SET(general_obj_in_cmd_hdr, hdr, opcode, 1297 MLX5_CMD_OP_MODIFY_GENERAL_OBJECT); 1298 MLX5_SET(general_obj_in_cmd_hdr, hdr, obj_type, 1299 MLX5_GENERAL_OBJ_TYPE_VIRTQ); 1300 MLX5_SET(general_obj_in_cmd_hdr, hdr, obj_id, virtq_obj->id); 1301 MLX5_SET64(virtio_net_q, virtq, modify_field_select, attr->type); 1302 MLX5_SET16(virtio_q, virtctx, queue_index, attr->queue_index); 1303 switch (attr->type) { 1304 case MLX5_VIRTQ_MODIFY_TYPE_STATE: 1305 MLX5_SET16(virtio_net_q, virtq, state, attr->state); 1306 break; 1307 case MLX5_VIRTQ_MODIFY_TYPE_DIRTY_BITMAP_PARAMS: 1308 MLX5_SET(virtio_net_q, virtq, dirty_bitmap_mkey, 1309 attr->dirty_bitmap_mkey); 1310 MLX5_SET64(virtio_net_q, virtq, dirty_bitmap_addr, 1311 attr->dirty_bitmap_addr); 1312 MLX5_SET(virtio_net_q, virtq, dirty_bitmap_size, 1313 attr->dirty_bitmap_size); 1314 break; 1315 case MLX5_VIRTQ_MODIFY_TYPE_DIRTY_BITMAP_DUMP_ENABLE: 1316 MLX5_SET(virtio_net_q, virtq, dirty_bitmap_dump_enable, 1317 attr->dirty_bitmap_dump_enable); 1318 break; 1319 default: 1320 rte_errno = EINVAL; 1321 return -rte_errno; 1322 } 1323 ret = mlx5_glue->devx_obj_modify(virtq_obj->obj, in, sizeof(in), 1324 out, sizeof(out)); 1325 if (ret) { 1326 DRV_LOG(ERR, "Failed to modify VIRTQ using DevX."); 1327 rte_errno = errno; 1328 return -errno; 1329 } 1330 return ret; 1331 } 1332 1333 /** 1334 * Query VIRTQ using DevX API. 1335 * 1336 * @param[in] virtq_obj 1337 * Pointer to virtq object structure. 1338 * @param [in/out] attr 1339 * Pointer to virtq attributes structure. 1340 * 1341 * @return 1342 * 0 on success, a negative errno value otherwise and rte_errno is set. 1343 */ 1344 int 1345 mlx5_devx_cmd_query_virtq(struct mlx5_devx_obj *virtq_obj, 1346 struct mlx5_devx_virtq_attr *attr) 1347 { 1348 uint32_t in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {0}; 1349 uint32_t out[MLX5_ST_SZ_DW(query_virtq_out)] = {0}; 1350 void *hdr = MLX5_ADDR_OF(query_virtq_out, in, hdr); 1351 void *virtq = MLX5_ADDR_OF(query_virtq_out, out, virtq); 1352 int ret; 1353 1354 MLX5_SET(general_obj_in_cmd_hdr, hdr, opcode, 1355 MLX5_CMD_OP_QUERY_GENERAL_OBJECT); 1356 MLX5_SET(general_obj_in_cmd_hdr, hdr, obj_type, 1357 MLX5_GENERAL_OBJ_TYPE_VIRTQ); 1358 MLX5_SET(general_obj_in_cmd_hdr, hdr, obj_id, virtq_obj->id); 1359 ret = mlx5_glue->devx_obj_query(virtq_obj->obj, in, sizeof(in), 1360 out, sizeof(out)); 1361 if (ret) { 1362 DRV_LOG(ERR, "Failed to modify VIRTQ using DevX."); 1363 rte_errno = errno; 1364 return -errno; 1365 } 1366 attr->hw_available_index = MLX5_GET16(virtio_net_q, virtq, 1367 hw_available_index); 1368 attr->hw_used_index = MLX5_GET16(virtio_net_q, virtq, hw_used_index); 1369 return ret; 1370 } 1371 1372 /** 1373 * Create QP using DevX API. 1374 * 1375 * @param[in] ctx 1376 * Context returned from mlx5 open_device() glue function. 1377 * @param [in] attr 1378 * Pointer to QP attributes structure. 1379 * 1380 * @return 1381 * The DevX object created, NULL otherwise and rte_errno is set. 1382 */ 1383 struct mlx5_devx_obj * 1384 mlx5_devx_cmd_create_qp(void *ctx, 1385 struct mlx5_devx_qp_attr *attr) 1386 { 1387 uint32_t in[MLX5_ST_SZ_DW(create_qp_in)] = {0}; 1388 uint32_t out[MLX5_ST_SZ_DW(create_qp_out)] = {0}; 1389 struct mlx5_devx_obj *qp_obj = rte_zmalloc(__func__, sizeof(*qp_obj), 1390 0); 1391 void *qpc = MLX5_ADDR_OF(create_qp_in, in, qpc); 1392 1393 if (!qp_obj) { 1394 DRV_LOG(ERR, "Failed to allocate QP data."); 1395 rte_errno = ENOMEM; 1396 return NULL; 1397 } 1398 MLX5_SET(create_qp_in, in, opcode, MLX5_CMD_OP_CREATE_QP); 1399 MLX5_SET(qpc, qpc, st, MLX5_QP_ST_RC); 1400 MLX5_SET(qpc, qpc, pd, attr->pd); 1401 if (attr->uar_index) { 1402 MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED); 1403 MLX5_SET(qpc, qpc, uar_page, attr->uar_index); 1404 MLX5_SET(qpc, qpc, log_page_size, attr->log_page_size - 1405 MLX5_ADAPTER_PAGE_SHIFT); 1406 if (attr->sq_size) { 1407 MLX5_ASSERT(RTE_IS_POWER_OF_2(attr->sq_size)); 1408 MLX5_SET(qpc, qpc, cqn_snd, attr->cqn); 1409 MLX5_SET(qpc, qpc, log_sq_size, 1410 rte_log2_u32(attr->sq_size)); 1411 } else { 1412 MLX5_SET(qpc, qpc, no_sq, 1); 1413 } 1414 if (attr->rq_size) { 1415 MLX5_ASSERT(RTE_IS_POWER_OF_2(attr->rq_size)); 1416 MLX5_SET(qpc, qpc, cqn_rcv, attr->cqn); 1417 MLX5_SET(qpc, qpc, log_rq_stride, attr->log_rq_stride - 1418 MLX5_LOG_RQ_STRIDE_SHIFT); 1419 MLX5_SET(qpc, qpc, log_rq_size, 1420 rte_log2_u32(attr->rq_size)); 1421 MLX5_SET(qpc, qpc, rq_type, MLX5_NON_ZERO_RQ); 1422 } else { 1423 MLX5_SET(qpc, qpc, rq_type, MLX5_ZERO_LEN_RQ); 1424 } 1425 if (attr->dbr_umem_valid) { 1426 MLX5_SET(qpc, qpc, dbr_umem_valid, 1427 attr->dbr_umem_valid); 1428 MLX5_SET(qpc, qpc, dbr_umem_id, attr->dbr_umem_id); 1429 } 1430 MLX5_SET64(qpc, qpc, dbr_addr, attr->dbr_address); 1431 MLX5_SET64(create_qp_in, in, wq_umem_offset, 1432 attr->wq_umem_offset); 1433 MLX5_SET(create_qp_in, in, wq_umem_id, attr->wq_umem_id); 1434 MLX5_SET(create_qp_in, in, wq_umem_valid, 1); 1435 } else { 1436 /* Special QP to be managed by FW - no SQ\RQ\CQ\UAR\DB rec. */ 1437 MLX5_SET(qpc, qpc, rq_type, MLX5_ZERO_LEN_RQ); 1438 MLX5_SET(qpc, qpc, no_sq, 1); 1439 } 1440 qp_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out, 1441 sizeof(out)); 1442 if (!qp_obj->obj) { 1443 rte_errno = errno; 1444 DRV_LOG(ERR, "Failed to create QP Obj using DevX."); 1445 rte_free(qp_obj); 1446 return NULL; 1447 } 1448 qp_obj->id = MLX5_GET(create_qp_out, out, qpn); 1449 return qp_obj; 1450 } 1451 1452 /** 1453 * Modify QP using DevX API. 1454 * Currently supports only force loop-back QP. 1455 * 1456 * @param[in] qp 1457 * Pointer to QP object structure. 1458 * @param [in] qp_st_mod_op 1459 * The QP state modification operation. 1460 * @param [in] remote_qp_id 1461 * The remote QP ID for MLX5_CMD_OP_INIT2RTR_QP operation. 1462 * 1463 * @return 1464 * 0 on success, a negative errno value otherwise and rte_errno is set. 1465 */ 1466 int 1467 mlx5_devx_cmd_modify_qp_state(struct mlx5_devx_obj *qp, uint32_t qp_st_mod_op, 1468 uint32_t remote_qp_id) 1469 { 1470 union { 1471 uint32_t rst2init[MLX5_ST_SZ_DW(rst2init_qp_in)]; 1472 uint32_t init2rtr[MLX5_ST_SZ_DW(init2rtr_qp_in)]; 1473 uint32_t rtr2rts[MLX5_ST_SZ_DW(rtr2rts_qp_in)]; 1474 } in; 1475 union { 1476 uint32_t rst2init[MLX5_ST_SZ_DW(rst2init_qp_out)]; 1477 uint32_t init2rtr[MLX5_ST_SZ_DW(init2rtr_qp_out)]; 1478 uint32_t rtr2rts[MLX5_ST_SZ_DW(rtr2rts_qp_out)]; 1479 } out; 1480 void *qpc; 1481 int ret; 1482 unsigned int inlen; 1483 unsigned int outlen; 1484 1485 memset(&in, 0, sizeof(in)); 1486 memset(&out, 0, sizeof(out)); 1487 MLX5_SET(rst2init_qp_in, &in, opcode, qp_st_mod_op); 1488 switch (qp_st_mod_op) { 1489 case MLX5_CMD_OP_RST2INIT_QP: 1490 MLX5_SET(rst2init_qp_in, &in, qpn, qp->id); 1491 qpc = MLX5_ADDR_OF(rst2init_qp_in, &in, qpc); 1492 MLX5_SET(qpc, qpc, primary_address_path.vhca_port_num, 1); 1493 MLX5_SET(qpc, qpc, rre, 1); 1494 MLX5_SET(qpc, qpc, rwe, 1); 1495 MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED); 1496 inlen = sizeof(in.rst2init); 1497 outlen = sizeof(out.rst2init); 1498 break; 1499 case MLX5_CMD_OP_INIT2RTR_QP: 1500 MLX5_SET(init2rtr_qp_in, &in, qpn, qp->id); 1501 qpc = MLX5_ADDR_OF(init2rtr_qp_in, &in, qpc); 1502 MLX5_SET(qpc, qpc, primary_address_path.fl, 1); 1503 MLX5_SET(qpc, qpc, primary_address_path.vhca_port_num, 1); 1504 MLX5_SET(qpc, qpc, mtu, 1); 1505 MLX5_SET(qpc, qpc, log_msg_max, 30); 1506 MLX5_SET(qpc, qpc, remote_qpn, remote_qp_id); 1507 MLX5_SET(qpc, qpc, min_rnr_nak, 0); 1508 inlen = sizeof(in.init2rtr); 1509 outlen = sizeof(out.init2rtr); 1510 break; 1511 case MLX5_CMD_OP_RTR2RTS_QP: 1512 qpc = MLX5_ADDR_OF(rtr2rts_qp_in, &in, qpc); 1513 MLX5_SET(rtr2rts_qp_in, &in, qpn, qp->id); 1514 MLX5_SET(qpc, qpc, primary_address_path.ack_timeout, 14); 1515 MLX5_SET(qpc, qpc, log_ack_req_freq, 0); 1516 MLX5_SET(qpc, qpc, retry_count, 7); 1517 MLX5_SET(qpc, qpc, rnr_retry, 7); 1518 inlen = sizeof(in.rtr2rts); 1519 outlen = sizeof(out.rtr2rts); 1520 break; 1521 default: 1522 DRV_LOG(ERR, "Invalid or unsupported QP modify op %u.", 1523 qp_st_mod_op); 1524 rte_errno = EINVAL; 1525 return -rte_errno; 1526 } 1527 ret = mlx5_glue->devx_obj_modify(qp->obj, &in, inlen, &out, outlen); 1528 if (ret) { 1529 DRV_LOG(ERR, "Failed to modify QP using DevX."); 1530 rte_errno = errno; 1531 return -errno; 1532 } 1533 return ret; 1534 } 1535