1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2023 Intel Corporation 3 */ 4 5 #include "idpf_common_virtchnl.h" 6 #include "idpf_common_logs.h" 7 8 static int 9 idpf_vc_clean(struct idpf_adapter *adapter) 10 { 11 struct idpf_ctlq_msg *q_msg[IDPF_CTLQ_LEN]; 12 uint16_t num_q_msg = IDPF_CTLQ_LEN; 13 struct idpf_dma_mem *dma_mem; 14 int err; 15 uint32_t i; 16 17 for (i = 0; i < 10; i++) { 18 err = idpf_ctlq_clean_sq(adapter->hw.asq, &num_q_msg, q_msg); 19 msleep(20); 20 if (num_q_msg > 0) 21 break; 22 } 23 if (err != 0) 24 return err; 25 26 /* Empty queue is not an error */ 27 for (i = 0; i < num_q_msg; i++) { 28 dma_mem = q_msg[i]->ctx.indirect.payload; 29 if (dma_mem != NULL) { 30 idpf_free_dma_mem(&adapter->hw, dma_mem); 31 rte_free(dma_mem); 32 } 33 rte_free(q_msg[i]); 34 } 35 36 return 0; 37 } 38 39 static int 40 idpf_send_vc_msg(struct idpf_adapter *adapter, uint32_t op, 41 uint16_t msg_size, uint8_t *msg) 42 { 43 struct idpf_ctlq_msg *ctlq_msg; 44 struct idpf_dma_mem *dma_mem; 45 int err; 46 47 err = idpf_vc_clean(adapter); 48 if (err != 0) 49 goto err; 50 51 ctlq_msg = rte_zmalloc(NULL, sizeof(struct idpf_ctlq_msg), 0); 52 if (ctlq_msg == NULL) { 53 err = -ENOMEM; 54 goto err; 55 } 56 57 dma_mem = rte_zmalloc(NULL, sizeof(struct idpf_dma_mem), 0); 58 if (dma_mem == NULL) { 59 err = -ENOMEM; 60 goto dma_mem_error; 61 } 62 63 dma_mem->size = IDPF_DFLT_MBX_BUF_SIZE; 64 idpf_alloc_dma_mem(&adapter->hw, dma_mem, dma_mem->size); 65 if (dma_mem->va == NULL) { 66 err = -ENOMEM; 67 goto dma_alloc_error; 68 } 69 70 memcpy(dma_mem->va, msg, msg_size); 71 72 ctlq_msg->opcode = idpf_mbq_opc_send_msg_to_pf; 73 ctlq_msg->func_id = 0; 74 ctlq_msg->data_len = msg_size; 75 ctlq_msg->cookie.mbx.chnl_opcode = op; 76 ctlq_msg->cookie.mbx.chnl_retval = VIRTCHNL_STATUS_SUCCESS; 77 ctlq_msg->ctx.indirect.payload = dma_mem; 78 79 err = idpf_ctlq_send(&adapter->hw, adapter->hw.asq, 1, ctlq_msg); 80 if (err != 0) 81 goto send_error; 82 83 return 0; 84 85 send_error: 86 idpf_free_dma_mem(&adapter->hw, dma_mem); 87 dma_alloc_error: 88 rte_free(dma_mem); 89 dma_mem_error: 90 rte_free(ctlq_msg); 91 err: 92 return err; 93 } 94 95 static enum idpf_vc_result 96 idpf_read_msg_from_cp(struct idpf_adapter *adapter, uint16_t buf_len, 97 uint8_t *buf) 98 { 99 struct idpf_hw *hw = &adapter->hw; 100 struct idpf_ctlq_msg ctlq_msg; 101 struct idpf_dma_mem *dma_mem = NULL; 102 enum idpf_vc_result result = IDPF_MSG_NON; 103 uint32_t opcode; 104 uint16_t pending = 1; 105 int ret; 106 107 ret = idpf_ctlq_recv(hw->arq, &pending, &ctlq_msg); 108 if (ret != 0) { 109 DRV_LOG(DEBUG, "Can't read msg from AQ"); 110 if (ret != -ENOMSG) 111 result = IDPF_MSG_ERR; 112 return result; 113 } 114 115 rte_memcpy(buf, ctlq_msg.ctx.indirect.payload->va, buf_len); 116 117 opcode = rte_le_to_cpu_32(ctlq_msg.cookie.mbx.chnl_opcode); 118 adapter->cmd_retval = rte_le_to_cpu_32(ctlq_msg.cookie.mbx.chnl_retval); 119 120 DRV_LOG(DEBUG, "CQ from CP carries opcode %u, retval %d", 121 opcode, adapter->cmd_retval); 122 123 if (opcode == VIRTCHNL2_OP_EVENT) { 124 struct virtchnl2_event *ve = ctlq_msg.ctx.indirect.payload->va; 125 126 result = IDPF_MSG_SYS; 127 switch (ve->event) { 128 case VIRTCHNL2_EVENT_LINK_CHANGE: 129 /* TBD */ 130 break; 131 default: 132 DRV_LOG(ERR, "%s: Unknown event %d from CP", 133 __func__, ve->event); 134 break; 135 } 136 } else { 137 /* async reply msg on command issued by pf previously */ 138 result = IDPF_MSG_CMD; 139 if (opcode != adapter->pend_cmd) { 140 DRV_LOG(WARNING, "command mismatch, expect %u, get %u", 141 adapter->pend_cmd, opcode); 142 result = IDPF_MSG_ERR; 143 } 144 } 145 146 if (ctlq_msg.data_len != 0) 147 dma_mem = ctlq_msg.ctx.indirect.payload; 148 else 149 pending = 0; 150 151 ret = idpf_ctlq_post_rx_buffs(hw, hw->arq, &pending, &dma_mem); 152 if (ret != 0 && dma_mem != NULL) 153 idpf_free_dma_mem(hw, dma_mem); 154 155 return result; 156 } 157 158 #define MAX_TRY_TIMES 200 159 #define ASQ_DELAY_MS 10 160 161 int 162 idpf_vc_one_msg_read(struct idpf_adapter *adapter, uint32_t ops, uint16_t buf_len, 163 uint8_t *buf) 164 { 165 int err = 0; 166 int i = 0; 167 int ret; 168 169 do { 170 ret = idpf_read_msg_from_cp(adapter, buf_len, buf); 171 if (ret == IDPF_MSG_CMD) 172 break; 173 rte_delay_ms(ASQ_DELAY_MS); 174 } while (i++ < MAX_TRY_TIMES); 175 if (i >= MAX_TRY_TIMES || 176 adapter->cmd_retval != VIRTCHNL_STATUS_SUCCESS) { 177 err = -EBUSY; 178 DRV_LOG(ERR, "No response or return failure (%d) for cmd %d", 179 adapter->cmd_retval, ops); 180 } 181 182 return err; 183 } 184 185 int 186 idpf_vc_cmd_execute(struct idpf_adapter *adapter, struct idpf_cmd_info *args) 187 { 188 int err = 0; 189 int i = 0; 190 int ret; 191 192 if (atomic_set_cmd(adapter, args->ops)) 193 return -EINVAL; 194 195 ret = idpf_send_vc_msg(adapter, args->ops, args->in_args_size, args->in_args); 196 if (ret != 0) { 197 DRV_LOG(ERR, "fail to send cmd %d", args->ops); 198 clear_cmd(adapter); 199 return ret; 200 } 201 202 switch (args->ops) { 203 case VIRTCHNL_OP_VERSION: 204 case VIRTCHNL2_OP_GET_CAPS: 205 case VIRTCHNL2_OP_GET_PTYPE_INFO: 206 /* for init virtchnl ops, need to poll the response */ 207 err = idpf_vc_one_msg_read(adapter, args->ops, args->out_size, args->out_buffer); 208 clear_cmd(adapter); 209 break; 210 default: 211 /* For other virtchnl ops in running time, 212 * wait for the cmd done flag. 213 */ 214 do { 215 if (adapter->pend_cmd == VIRTCHNL_OP_UNKNOWN) 216 break; 217 rte_delay_ms(ASQ_DELAY_MS); 218 /* If don't read msg or read sys event, continue */ 219 } while (i++ < MAX_TRY_TIMES); 220 /* If there's no response is received, clear command */ 221 if (i >= MAX_TRY_TIMES || 222 adapter->cmd_retval != VIRTCHNL_STATUS_SUCCESS) { 223 err = -EBUSY; 224 DRV_LOG(ERR, "No response or return failure (%d) for cmd %d", 225 adapter->cmd_retval, args->ops); 226 clear_cmd(adapter); 227 } 228 break; 229 } 230 231 return err; 232 } 233 234 int 235 idpf_vc_api_version_check(struct idpf_adapter *adapter) 236 { 237 struct virtchnl2_version_info version, *pver; 238 struct idpf_cmd_info args; 239 int err; 240 241 memset(&version, 0, sizeof(struct virtchnl_version_info)); 242 version.major = VIRTCHNL2_VERSION_MAJOR_2; 243 version.minor = VIRTCHNL2_VERSION_MINOR_0; 244 245 args.ops = VIRTCHNL_OP_VERSION; 246 args.in_args = (uint8_t *)&version; 247 args.in_args_size = sizeof(version); 248 args.out_buffer = adapter->mbx_resp; 249 args.out_size = IDPF_DFLT_MBX_BUF_SIZE; 250 251 err = idpf_vc_cmd_execute(adapter, &args); 252 if (err != 0) { 253 DRV_LOG(ERR, 254 "Failed to execute command of VIRTCHNL_OP_VERSION"); 255 return err; 256 } 257 258 pver = (struct virtchnl2_version_info *)args.out_buffer; 259 adapter->virtchnl_version = *pver; 260 261 if (adapter->virtchnl_version.major != VIRTCHNL2_VERSION_MAJOR_2 || 262 adapter->virtchnl_version.minor != VIRTCHNL2_VERSION_MINOR_0) { 263 DRV_LOG(ERR, "VIRTCHNL API version mismatch:(%u.%u)-(%u.%u)", 264 adapter->virtchnl_version.major, 265 adapter->virtchnl_version.minor, 266 VIRTCHNL2_VERSION_MAJOR_2, 267 VIRTCHNL2_VERSION_MINOR_0); 268 return -EINVAL; 269 } 270 271 return 0; 272 } 273 274 int 275 idpf_vc_caps_get(struct idpf_adapter *adapter) 276 { 277 struct idpf_cmd_info args; 278 int err; 279 280 args.ops = VIRTCHNL2_OP_GET_CAPS; 281 args.in_args = (uint8_t *)&adapter->caps; 282 args.in_args_size = sizeof(struct virtchnl2_get_capabilities); 283 args.out_buffer = adapter->mbx_resp; 284 args.out_size = IDPF_DFLT_MBX_BUF_SIZE; 285 286 err = idpf_vc_cmd_execute(adapter, &args); 287 if (err != 0) { 288 DRV_LOG(ERR, 289 "Failed to execute command of VIRTCHNL2_OP_GET_CAPS"); 290 return err; 291 } 292 293 rte_memcpy(&adapter->caps, args.out_buffer, sizeof(struct virtchnl2_get_capabilities)); 294 295 return 0; 296 } 297 298 int 299 idpf_vc_vport_create(struct idpf_vport *vport, 300 struct virtchnl2_create_vport *create_vport_info) 301 { 302 struct idpf_adapter *adapter = vport->adapter; 303 struct virtchnl2_create_vport vport_msg; 304 struct idpf_cmd_info args; 305 int err = -1; 306 307 memset(&vport_msg, 0, sizeof(struct virtchnl2_create_vport)); 308 vport_msg.vport_type = create_vport_info->vport_type; 309 vport_msg.txq_model = create_vport_info->txq_model; 310 vport_msg.rxq_model = create_vport_info->rxq_model; 311 vport_msg.num_tx_q = create_vport_info->num_tx_q; 312 vport_msg.num_tx_complq = create_vport_info->num_tx_complq; 313 vport_msg.num_rx_q = create_vport_info->num_rx_q; 314 vport_msg.num_rx_bufq = create_vport_info->num_rx_bufq; 315 316 memset(&args, 0, sizeof(args)); 317 args.ops = VIRTCHNL2_OP_CREATE_VPORT; 318 args.in_args = (uint8_t *)&vport_msg; 319 args.in_args_size = sizeof(vport_msg); 320 args.out_buffer = adapter->mbx_resp; 321 args.out_size = IDPF_DFLT_MBX_BUF_SIZE; 322 323 err = idpf_vc_cmd_execute(adapter, &args); 324 if (err != 0) { 325 DRV_LOG(ERR, 326 "Failed to execute command of VIRTCHNL2_OP_CREATE_VPORT"); 327 return err; 328 } 329 330 rte_memcpy(&(vport->vport_info.info), args.out_buffer, IDPF_DFLT_MBX_BUF_SIZE); 331 return 0; 332 } 333 334 int 335 idpf_vc_vport_destroy(struct idpf_vport *vport) 336 { 337 struct idpf_adapter *adapter = vport->adapter; 338 struct virtchnl2_vport vc_vport; 339 struct idpf_cmd_info args; 340 int err; 341 342 vc_vport.vport_id = vport->vport_id; 343 344 memset(&args, 0, sizeof(args)); 345 args.ops = VIRTCHNL2_OP_DESTROY_VPORT; 346 args.in_args = (uint8_t *)&vc_vport; 347 args.in_args_size = sizeof(vc_vport); 348 args.out_buffer = adapter->mbx_resp; 349 args.out_size = IDPF_DFLT_MBX_BUF_SIZE; 350 351 err = idpf_vc_cmd_execute(adapter, &args); 352 if (err != 0) 353 DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_DESTROY_VPORT"); 354 355 return err; 356 } 357 358 int 359 idpf_vc_queue_grps_add(struct idpf_vport *vport, 360 struct virtchnl2_add_queue_groups *p2p_queue_grps_info, 361 uint8_t *p2p_queue_grps_out) 362 { 363 struct idpf_adapter *adapter = vport->adapter; 364 struct idpf_cmd_info args; 365 int size, qg_info_size; 366 int err = -1; 367 368 size = sizeof(*p2p_queue_grps_info) + 369 (p2p_queue_grps_info->qg_info.num_queue_groups - 1) * 370 sizeof(struct virtchnl2_queue_group_info); 371 372 memset(&args, 0, sizeof(args)); 373 args.ops = VIRTCHNL2_OP_ADD_QUEUE_GROUPS; 374 args.in_args = (uint8_t *)p2p_queue_grps_info; 375 args.in_args_size = size; 376 args.out_buffer = adapter->mbx_resp; 377 args.out_size = IDPF_DFLT_MBX_BUF_SIZE; 378 379 err = idpf_vc_cmd_execute(adapter, &args); 380 if (err != 0) { 381 DRV_LOG(ERR, 382 "Failed to execute command of VIRTCHNL2_OP_ADD_QUEUE_GROUPS"); 383 return err; 384 } 385 386 rte_memcpy(p2p_queue_grps_out, args.out_buffer, IDPF_DFLT_MBX_BUF_SIZE); 387 return 0; 388 } 389 390 int idpf_vc_queue_grps_del(struct idpf_vport *vport, 391 uint16_t num_q_grps, 392 struct virtchnl2_queue_group_id *qg_ids) 393 { 394 struct idpf_adapter *adapter = vport->adapter; 395 struct virtchnl2_delete_queue_groups *vc_del_q_grps; 396 struct idpf_cmd_info args; 397 int size; 398 int err; 399 400 size = sizeof(*vc_del_q_grps) + 401 (num_q_grps - 1) * sizeof(struct virtchnl2_queue_group_id); 402 vc_del_q_grps = rte_zmalloc("vc_del_q_grps", size, 0); 403 404 vc_del_q_grps->vport_id = vport->vport_id; 405 vc_del_q_grps->num_queue_groups = num_q_grps; 406 memcpy(vc_del_q_grps->qg_ids, qg_ids, 407 num_q_grps * sizeof(struct virtchnl2_queue_group_id)); 408 409 memset(&args, 0, sizeof(args)); 410 args.ops = VIRTCHNL2_OP_DEL_QUEUE_GROUPS; 411 args.in_args = (uint8_t *)vc_del_q_grps; 412 args.in_args_size = size; 413 args.out_buffer = adapter->mbx_resp; 414 args.out_size = IDPF_DFLT_MBX_BUF_SIZE; 415 416 err = idpf_vc_cmd_execute(adapter, &args); 417 if (err != 0) 418 DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_DEL_QUEUE_GROUPS"); 419 420 rte_free(vc_del_q_grps); 421 return err; 422 } 423 424 int 425 idpf_vc_rss_key_set(struct idpf_vport *vport) 426 { 427 struct idpf_adapter *adapter = vport->adapter; 428 struct virtchnl2_rss_key *rss_key; 429 struct idpf_cmd_info args; 430 int len, err; 431 432 len = sizeof(*rss_key) + sizeof(rss_key->key[0]) * 433 (vport->rss_key_size - 1); 434 rss_key = rte_zmalloc("rss_key", len, 0); 435 if (rss_key == NULL) 436 return -ENOMEM; 437 438 rss_key->vport_id = vport->vport_id; 439 rss_key->key_len = vport->rss_key_size; 440 rte_memcpy(rss_key->key, vport->rss_key, 441 sizeof(rss_key->key[0]) * vport->rss_key_size); 442 443 memset(&args, 0, sizeof(args)); 444 args.ops = VIRTCHNL2_OP_SET_RSS_KEY; 445 args.in_args = (uint8_t *)rss_key; 446 args.in_args_size = len; 447 args.out_buffer = adapter->mbx_resp; 448 args.out_size = IDPF_DFLT_MBX_BUF_SIZE; 449 450 err = idpf_vc_cmd_execute(adapter, &args); 451 if (err != 0) 452 DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_SET_RSS_KEY"); 453 454 rte_free(rss_key); 455 return err; 456 } 457 458 int idpf_vc_rss_key_get(struct idpf_vport *vport) 459 { 460 struct idpf_adapter *adapter = vport->adapter; 461 struct virtchnl2_rss_key *rss_key_ret; 462 struct virtchnl2_rss_key rss_key; 463 struct idpf_cmd_info args; 464 int err; 465 466 memset(&rss_key, 0, sizeof(rss_key)); 467 rss_key.vport_id = vport->vport_id; 468 469 memset(&args, 0, sizeof(args)); 470 args.ops = VIRTCHNL2_OP_GET_RSS_KEY; 471 args.in_args = (uint8_t *)&rss_key; 472 args.in_args_size = sizeof(rss_key); 473 args.out_buffer = adapter->mbx_resp; 474 args.out_size = IDPF_DFLT_MBX_BUF_SIZE; 475 476 err = idpf_vc_cmd_execute(adapter, &args); 477 478 if (!err) { 479 rss_key_ret = (struct virtchnl2_rss_key *)args.out_buffer; 480 if (rss_key_ret->key_len != vport->rss_key_size) { 481 rte_free(vport->rss_key); 482 vport->rss_key = NULL; 483 vport->rss_key_size = RTE_MIN(IDPF_RSS_KEY_LEN, 484 rss_key_ret->key_len); 485 vport->rss_key = rte_zmalloc("rss_key", vport->rss_key_size, 0); 486 if (!vport->rss_key) { 487 vport->rss_key_size = 0; 488 DRV_LOG(ERR, "Failed to allocate RSS key"); 489 return -ENOMEM; 490 } 491 } 492 rte_memcpy(vport->rss_key, rss_key_ret->key, vport->rss_key_size); 493 } else { 494 DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_GET_RSS_KEY"); 495 } 496 497 return err; 498 } 499 500 int 501 idpf_vc_rss_lut_set(struct idpf_vport *vport) 502 { 503 struct idpf_adapter *adapter = vport->adapter; 504 struct virtchnl2_rss_lut *rss_lut; 505 struct idpf_cmd_info args; 506 int len, err; 507 508 len = sizeof(*rss_lut) + sizeof(rss_lut->lut[0]) * 509 (vport->rss_lut_size - 1); 510 rss_lut = rte_zmalloc("rss_lut", len, 0); 511 if (rss_lut == NULL) 512 return -ENOMEM; 513 514 rss_lut->vport_id = vport->vport_id; 515 rss_lut->lut_entries = vport->rss_lut_size; 516 rte_memcpy(rss_lut->lut, vport->rss_lut, 517 sizeof(rss_lut->lut[0]) * vport->rss_lut_size); 518 519 memset(&args, 0, sizeof(args)); 520 args.ops = VIRTCHNL2_OP_SET_RSS_LUT; 521 args.in_args = (uint8_t *)rss_lut; 522 args.in_args_size = len; 523 args.out_buffer = adapter->mbx_resp; 524 args.out_size = IDPF_DFLT_MBX_BUF_SIZE; 525 526 err = idpf_vc_cmd_execute(adapter, &args); 527 if (err != 0) 528 DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_SET_RSS_LUT"); 529 530 rte_free(rss_lut); 531 return err; 532 } 533 534 int 535 idpf_vc_rss_lut_get(struct idpf_vport *vport) 536 { 537 struct idpf_adapter *adapter = vport->adapter; 538 struct virtchnl2_rss_lut *rss_lut_ret; 539 struct virtchnl2_rss_lut rss_lut; 540 struct idpf_cmd_info args; 541 int err; 542 543 memset(&rss_lut, 0, sizeof(rss_lut)); 544 rss_lut.vport_id = vport->vport_id; 545 546 memset(&args, 0, sizeof(args)); 547 args.ops = VIRTCHNL2_OP_GET_RSS_LUT; 548 args.in_args = (uint8_t *)&rss_lut; 549 args.in_args_size = sizeof(rss_lut); 550 args.out_buffer = adapter->mbx_resp; 551 args.out_size = IDPF_DFLT_MBX_BUF_SIZE; 552 553 err = idpf_vc_cmd_execute(adapter, &args); 554 555 if (!err) { 556 rss_lut_ret = (struct virtchnl2_rss_lut *)args.out_buffer; 557 if (rss_lut_ret->lut_entries != vport->rss_lut_size) { 558 rte_free(vport->rss_lut); 559 vport->rss_lut = NULL; 560 vport->rss_lut = rte_zmalloc("rss_lut", 561 sizeof(uint32_t) * rss_lut_ret->lut_entries, 0); 562 if (vport->rss_lut == NULL) { 563 DRV_LOG(ERR, "Failed to allocate RSS lut"); 564 return -ENOMEM; 565 } 566 } 567 rte_memcpy(vport->rss_lut, rss_lut_ret->lut, rss_lut_ret->lut_entries); 568 vport->rss_lut_size = rss_lut_ret->lut_entries; 569 } else { 570 DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_GET_RSS_LUT"); 571 } 572 573 return err; 574 } 575 576 int 577 idpf_vc_rss_hash_get(struct idpf_vport *vport) 578 { 579 struct idpf_adapter *adapter = vport->adapter; 580 struct virtchnl2_rss_hash *rss_hash_ret; 581 struct virtchnl2_rss_hash rss_hash; 582 struct idpf_cmd_info args; 583 int err; 584 585 memset(&rss_hash, 0, sizeof(rss_hash)); 586 rss_hash.ptype_groups = vport->rss_hf; 587 rss_hash.vport_id = vport->vport_id; 588 589 memset(&args, 0, sizeof(args)); 590 args.ops = VIRTCHNL2_OP_GET_RSS_HASH; 591 args.in_args = (uint8_t *)&rss_hash; 592 args.in_args_size = sizeof(rss_hash); 593 args.out_buffer = adapter->mbx_resp; 594 args.out_size = IDPF_DFLT_MBX_BUF_SIZE; 595 596 err = idpf_vc_cmd_execute(adapter, &args); 597 598 if (!err) { 599 rss_hash_ret = (struct virtchnl2_rss_hash *)args.out_buffer; 600 vport->rss_hf = rss_hash_ret->ptype_groups; 601 } else { 602 DRV_LOG(ERR, "Failed to execute command of OP_GET_RSS_HASH"); 603 } 604 605 return err; 606 } 607 608 int 609 idpf_vc_rss_hash_set(struct idpf_vport *vport) 610 { 611 struct idpf_adapter *adapter = vport->adapter; 612 struct virtchnl2_rss_hash rss_hash; 613 struct idpf_cmd_info args; 614 int err; 615 616 memset(&rss_hash, 0, sizeof(rss_hash)); 617 rss_hash.ptype_groups = vport->rss_hf; 618 rss_hash.vport_id = vport->vport_id; 619 620 memset(&args, 0, sizeof(args)); 621 args.ops = VIRTCHNL2_OP_SET_RSS_HASH; 622 args.in_args = (uint8_t *)&rss_hash; 623 args.in_args_size = sizeof(rss_hash); 624 args.out_buffer = adapter->mbx_resp; 625 args.out_size = IDPF_DFLT_MBX_BUF_SIZE; 626 627 err = idpf_vc_cmd_execute(adapter, &args); 628 if (err != 0) 629 DRV_LOG(ERR, "Failed to execute command of OP_SET_RSS_HASH"); 630 631 return err; 632 } 633 634 int 635 idpf_vc_irq_map_unmap_config(struct idpf_vport *vport, uint16_t nb_rxq, bool map) 636 { 637 struct idpf_adapter *adapter = vport->adapter; 638 struct virtchnl2_queue_vector_maps *map_info; 639 struct virtchnl2_queue_vector *vecmap; 640 struct idpf_cmd_info args; 641 int len, i, err = 0; 642 643 len = sizeof(struct virtchnl2_queue_vector_maps) + 644 (nb_rxq - 1) * sizeof(struct virtchnl2_queue_vector); 645 646 map_info = rte_zmalloc("map_info", len, 0); 647 if (map_info == NULL) 648 return -ENOMEM; 649 650 map_info->vport_id = vport->vport_id; 651 map_info->num_qv_maps = nb_rxq; 652 for (i = 0; i < nb_rxq; i++) { 653 vecmap = &map_info->qv_maps[i]; 654 vecmap->queue_id = vport->qv_map[i].queue_id; 655 vecmap->vector_id = vport->qv_map[i].vector_id; 656 vecmap->itr_idx = VIRTCHNL2_ITR_IDX_0; 657 vecmap->queue_type = VIRTCHNL2_QUEUE_TYPE_RX; 658 } 659 660 args.ops = map ? VIRTCHNL2_OP_MAP_QUEUE_VECTOR : 661 VIRTCHNL2_OP_UNMAP_QUEUE_VECTOR; 662 args.in_args = (uint8_t *)map_info; 663 args.in_args_size = len; 664 args.out_buffer = adapter->mbx_resp; 665 args.out_size = IDPF_DFLT_MBX_BUF_SIZE; 666 err = idpf_vc_cmd_execute(adapter, &args); 667 if (err != 0) 668 DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_%s_QUEUE_VECTOR", 669 map ? "MAP" : "UNMAP"); 670 671 rte_free(map_info); 672 return err; 673 } 674 675 int 676 idpf_vc_vectors_alloc(struct idpf_vport *vport, uint16_t num_vectors) 677 { 678 struct idpf_adapter *adapter = vport->adapter; 679 struct virtchnl2_alloc_vectors *alloc_vec; 680 struct idpf_cmd_info args; 681 int err, len; 682 683 len = sizeof(struct virtchnl2_alloc_vectors) + 684 (num_vectors - 1) * sizeof(struct virtchnl2_vector_chunk); 685 alloc_vec = rte_zmalloc("alloc_vec", len, 0); 686 if (alloc_vec == NULL) 687 return -ENOMEM; 688 689 alloc_vec->num_vectors = num_vectors; 690 691 args.ops = VIRTCHNL2_OP_ALLOC_VECTORS; 692 args.in_args = (uint8_t *)alloc_vec; 693 args.in_args_size = len; 694 args.out_buffer = adapter->mbx_resp; 695 args.out_size = IDPF_DFLT_MBX_BUF_SIZE; 696 err = idpf_vc_cmd_execute(adapter, &args); 697 if (err != 0) 698 DRV_LOG(ERR, "Failed to execute command VIRTCHNL2_OP_ALLOC_VECTORS"); 699 700 rte_memcpy(vport->recv_vectors, args.out_buffer, len); 701 rte_free(alloc_vec); 702 return err; 703 } 704 705 int 706 idpf_vc_vectors_dealloc(struct idpf_vport *vport) 707 { 708 struct idpf_adapter *adapter = vport->adapter; 709 struct virtchnl2_alloc_vectors *alloc_vec; 710 struct virtchnl2_vector_chunks *vcs; 711 struct idpf_cmd_info args; 712 int err, len; 713 714 alloc_vec = vport->recv_vectors; 715 vcs = &alloc_vec->vchunks; 716 717 len = sizeof(struct virtchnl2_vector_chunks) + 718 (vcs->num_vchunks - 1) * sizeof(struct virtchnl2_vector_chunk); 719 720 args.ops = VIRTCHNL2_OP_DEALLOC_VECTORS; 721 args.in_args = (uint8_t *)vcs; 722 args.in_args_size = len; 723 args.out_buffer = adapter->mbx_resp; 724 args.out_size = IDPF_DFLT_MBX_BUF_SIZE; 725 err = idpf_vc_cmd_execute(adapter, &args); 726 if (err != 0) 727 DRV_LOG(ERR, "Failed to execute command VIRTCHNL2_OP_DEALLOC_VECTORS"); 728 729 return err; 730 } 731 732 int 733 idpf_vc_ena_dis_one_queue(struct idpf_vport *vport, uint16_t qid, 734 uint32_t type, bool on) 735 { 736 struct idpf_adapter *adapter = vport->adapter; 737 struct virtchnl2_del_ena_dis_queues *queue_select; 738 struct virtchnl2_queue_chunk *queue_chunk; 739 struct idpf_cmd_info args; 740 int err, len; 741 742 len = sizeof(struct virtchnl2_del_ena_dis_queues); 743 queue_select = rte_zmalloc("queue_select", len, 0); 744 if (queue_select == NULL) 745 return -ENOMEM; 746 747 queue_chunk = queue_select->chunks.chunks; 748 queue_select->chunks.num_chunks = 1; 749 queue_select->vport_id = vport->vport_id; 750 751 queue_chunk->type = type; 752 queue_chunk->start_queue_id = qid; 753 queue_chunk->num_queues = 1; 754 755 args.ops = on ? VIRTCHNL2_OP_ENABLE_QUEUES : 756 VIRTCHNL2_OP_DISABLE_QUEUES; 757 args.in_args = (uint8_t *)queue_select; 758 args.in_args_size = len; 759 args.out_buffer = adapter->mbx_resp; 760 args.out_size = IDPF_DFLT_MBX_BUF_SIZE; 761 err = idpf_vc_cmd_execute(adapter, &args); 762 if (err != 0) 763 DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_%s_QUEUES", 764 on ? "ENABLE" : "DISABLE"); 765 766 rte_free(queue_select); 767 return err; 768 } 769 770 int 771 idpf_vc_queue_switch(struct idpf_vport *vport, uint16_t qid, 772 bool rx, bool on) 773 { 774 uint32_t type; 775 int err, queue_id; 776 777 /* switch txq/rxq */ 778 type = rx ? VIRTCHNL2_QUEUE_TYPE_RX : VIRTCHNL2_QUEUE_TYPE_TX; 779 780 if (type == VIRTCHNL2_QUEUE_TYPE_RX) 781 queue_id = vport->chunks_info.rx_start_qid + qid; 782 else 783 queue_id = vport->chunks_info.tx_start_qid + qid; 784 err = idpf_vc_ena_dis_one_queue(vport, queue_id, type, on); 785 if (err != 0) 786 return err; 787 788 /* switch tx completion queue */ 789 if (!rx && vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT) { 790 type = VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION; 791 queue_id = vport->chunks_info.tx_compl_start_qid + qid; 792 err = idpf_vc_ena_dis_one_queue(vport, queue_id, type, on); 793 if (err != 0) 794 return err; 795 } 796 797 /* switch rx buffer queue */ 798 if (rx && vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT) { 799 type = VIRTCHNL2_QUEUE_TYPE_RX_BUFFER; 800 queue_id = vport->chunks_info.rx_buf_start_qid + 2 * qid; 801 err = idpf_vc_ena_dis_one_queue(vport, queue_id, type, on); 802 if (err != 0) 803 return err; 804 queue_id++; 805 err = idpf_vc_ena_dis_one_queue(vport, queue_id, type, on); 806 if (err != 0) 807 return err; 808 } 809 810 return err; 811 } 812 813 #define IDPF_RXTX_QUEUE_CHUNKS_NUM 2 814 int 815 idpf_vc_queues_ena_dis(struct idpf_vport *vport, bool enable) 816 { 817 struct idpf_adapter *adapter = vport->adapter; 818 struct virtchnl2_del_ena_dis_queues *queue_select; 819 struct virtchnl2_queue_chunk *queue_chunk; 820 uint32_t type; 821 struct idpf_cmd_info args; 822 uint16_t num_chunks; 823 int err, len; 824 825 num_chunks = IDPF_RXTX_QUEUE_CHUNKS_NUM; 826 if (vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT) 827 num_chunks++; 828 if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT) 829 num_chunks++; 830 831 len = sizeof(struct virtchnl2_del_ena_dis_queues) + 832 sizeof(struct virtchnl2_queue_chunk) * (num_chunks - 1); 833 queue_select = rte_zmalloc("queue_select", len, 0); 834 if (queue_select == NULL) 835 return -ENOMEM; 836 837 queue_chunk = queue_select->chunks.chunks; 838 queue_select->chunks.num_chunks = num_chunks; 839 queue_select->vport_id = vport->vport_id; 840 841 type = VIRTCHNL_QUEUE_TYPE_RX; 842 queue_chunk[type].type = type; 843 queue_chunk[type].start_queue_id = vport->chunks_info.rx_start_qid; 844 queue_chunk[type].num_queues = vport->num_rx_q; 845 846 type = VIRTCHNL2_QUEUE_TYPE_TX; 847 queue_chunk[type].type = type; 848 queue_chunk[type].start_queue_id = vport->chunks_info.tx_start_qid; 849 queue_chunk[type].num_queues = vport->num_tx_q; 850 851 if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT) { 852 type = VIRTCHNL2_QUEUE_TYPE_RX_BUFFER; 853 queue_chunk[type].type = type; 854 queue_chunk[type].start_queue_id = 855 vport->chunks_info.rx_buf_start_qid; 856 queue_chunk[type].num_queues = vport->num_rx_bufq; 857 } 858 859 if (vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT) { 860 type = VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION; 861 queue_chunk[type].type = type; 862 queue_chunk[type].start_queue_id = 863 vport->chunks_info.tx_compl_start_qid; 864 queue_chunk[type].num_queues = vport->num_tx_complq; 865 } 866 867 args.ops = enable ? VIRTCHNL2_OP_ENABLE_QUEUES : 868 VIRTCHNL2_OP_DISABLE_QUEUES; 869 args.in_args = (uint8_t *)queue_select; 870 args.in_args_size = len; 871 args.out_buffer = adapter->mbx_resp; 872 args.out_size = IDPF_DFLT_MBX_BUF_SIZE; 873 err = idpf_vc_cmd_execute(adapter, &args); 874 if (err != 0) 875 DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_%s_QUEUES", 876 enable ? "ENABLE" : "DISABLE"); 877 878 rte_free(queue_select); 879 return err; 880 } 881 882 int 883 idpf_vc_vport_ena_dis(struct idpf_vport *vport, bool enable) 884 { 885 struct idpf_adapter *adapter = vport->adapter; 886 struct virtchnl2_vport vc_vport; 887 struct idpf_cmd_info args; 888 int err; 889 890 vc_vport.vport_id = vport->vport_id; 891 args.ops = enable ? VIRTCHNL2_OP_ENABLE_VPORT : 892 VIRTCHNL2_OP_DISABLE_VPORT; 893 args.in_args = (uint8_t *)&vc_vport; 894 args.in_args_size = sizeof(vc_vport); 895 args.out_buffer = adapter->mbx_resp; 896 args.out_size = IDPF_DFLT_MBX_BUF_SIZE; 897 898 err = idpf_vc_cmd_execute(adapter, &args); 899 if (err != 0) { 900 DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_%s_VPORT", 901 enable ? "ENABLE" : "DISABLE"); 902 } 903 904 return err; 905 } 906 907 int 908 idpf_vc_ptype_info_query(struct idpf_adapter *adapter, 909 struct virtchnl2_get_ptype_info *req_ptype_info, 910 struct virtchnl2_get_ptype_info *recv_ptype_info) 911 { 912 struct idpf_cmd_info args; 913 int err; 914 915 args.ops = VIRTCHNL2_OP_GET_PTYPE_INFO; 916 args.in_args = (uint8_t *)req_ptype_info; 917 args.in_args_size = sizeof(struct virtchnl2_get_ptype_info); 918 args.out_buffer = adapter->mbx_resp; 919 args.out_size = IDPF_DFLT_MBX_BUF_SIZE; 920 921 err = idpf_vc_cmd_execute(adapter, &args); 922 if (err != 0) 923 DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_GET_PTYPE_INFO"); 924 925 rte_memcpy(recv_ptype_info, args.out_buffer, IDPF_DFLT_MBX_BUF_SIZE); 926 return err; 927 } 928 929 int 930 idpf_vc_stats_query(struct idpf_vport *vport, 931 struct virtchnl2_vport_stats **pstats) 932 { 933 struct idpf_adapter *adapter = vport->adapter; 934 struct virtchnl2_vport_stats vport_stats; 935 struct idpf_cmd_info args; 936 int err; 937 938 vport_stats.vport_id = vport->vport_id; 939 args.ops = VIRTCHNL2_OP_GET_STATS; 940 args.in_args = (u8 *)&vport_stats; 941 args.in_args_size = sizeof(vport_stats); 942 args.out_buffer = adapter->mbx_resp; 943 args.out_size = IDPF_DFLT_MBX_BUF_SIZE; 944 945 err = idpf_vc_cmd_execute(adapter, &args); 946 if (err) { 947 DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_GET_STATS"); 948 *pstats = NULL; 949 return err; 950 } 951 *pstats = (struct virtchnl2_vport_stats *)args.out_buffer; 952 return 0; 953 } 954 955 #define IDPF_RX_BUF_STRIDE 64 956 int 957 idpf_vc_rxq_config(struct idpf_vport *vport, struct idpf_rx_queue *rxq) 958 { 959 struct idpf_adapter *adapter = vport->adapter; 960 struct virtchnl2_config_rx_queues *vc_rxqs = NULL; 961 struct virtchnl2_rxq_info *rxq_info; 962 struct idpf_cmd_info args; 963 uint16_t num_qs; 964 int size, err, i; 965 966 if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE) 967 num_qs = IDPF_RXQ_PER_GRP; 968 else 969 num_qs = IDPF_RXQ_PER_GRP + IDPF_RX_BUFQ_PER_GRP; 970 971 size = sizeof(*vc_rxqs) + (num_qs - 1) * 972 sizeof(struct virtchnl2_rxq_info); 973 vc_rxqs = rte_zmalloc("cfg_rxqs", size, 0); 974 if (vc_rxqs == NULL) { 975 DRV_LOG(ERR, "Failed to allocate virtchnl2_config_rx_queues"); 976 err = -ENOMEM; 977 return err; 978 } 979 vc_rxqs->vport_id = vport->vport_id; 980 vc_rxqs->num_qinfo = num_qs; 981 if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE) { 982 rxq_info = &vc_rxqs->qinfo[0]; 983 rxq_info->dma_ring_addr = rxq->rx_ring_phys_addr; 984 rxq_info->type = VIRTCHNL2_QUEUE_TYPE_RX; 985 rxq_info->queue_id = rxq->queue_id; 986 rxq_info->model = VIRTCHNL2_QUEUE_MODEL_SINGLE; 987 rxq_info->data_buffer_size = rxq->rx_buf_len; 988 rxq_info->max_pkt_size = vport->max_pkt_len; 989 990 rxq_info->desc_ids = VIRTCHNL2_RXDID_2_FLEX_SQ_NIC_M; 991 rxq_info->qflags |= VIRTCHNL2_RX_DESC_SIZE_32BYTE; 992 993 rxq_info->ring_len = rxq->nb_rx_desc; 994 } else { 995 /* Rx queue */ 996 rxq_info = &vc_rxqs->qinfo[0]; 997 rxq_info->dma_ring_addr = rxq->rx_ring_phys_addr; 998 rxq_info->type = VIRTCHNL2_QUEUE_TYPE_RX; 999 rxq_info->queue_id = rxq->queue_id; 1000 rxq_info->model = VIRTCHNL2_QUEUE_MODEL_SPLIT; 1001 rxq_info->data_buffer_size = rxq->rx_buf_len; 1002 rxq_info->max_pkt_size = vport->max_pkt_len; 1003 1004 rxq_info->desc_ids = VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M; 1005 rxq_info->qflags |= VIRTCHNL2_RX_DESC_SIZE_32BYTE; 1006 1007 rxq_info->ring_len = rxq->nb_rx_desc; 1008 rxq_info->rx_bufq1_id = rxq->bufq1->queue_id; 1009 rxq_info->bufq2_ena = 1; 1010 rxq_info->rx_bufq2_id = rxq->bufq2->queue_id; 1011 rxq_info->rx_buffer_low_watermark = 64; 1012 1013 /* Buffer queue */ 1014 for (i = 1; i <= IDPF_RX_BUFQ_PER_GRP; i++) { 1015 struct idpf_rx_queue *bufq = i == 1 ? rxq->bufq1 : rxq->bufq2; 1016 rxq_info = &vc_rxqs->qinfo[i]; 1017 rxq_info->dma_ring_addr = bufq->rx_ring_phys_addr; 1018 rxq_info->type = VIRTCHNL2_QUEUE_TYPE_RX_BUFFER; 1019 rxq_info->queue_id = bufq->queue_id; 1020 rxq_info->model = VIRTCHNL2_QUEUE_MODEL_SPLIT; 1021 rxq_info->data_buffer_size = bufq->rx_buf_len; 1022 rxq_info->desc_ids = VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M; 1023 rxq_info->ring_len = bufq->nb_rx_desc; 1024 1025 rxq_info->buffer_notif_stride = IDPF_RX_BUF_STRIDE; 1026 rxq_info->rx_buffer_low_watermark = 64; 1027 } 1028 } 1029 1030 memset(&args, 0, sizeof(args)); 1031 args.ops = VIRTCHNL2_OP_CONFIG_RX_QUEUES; 1032 args.in_args = (uint8_t *)vc_rxqs; 1033 args.in_args_size = size; 1034 args.out_buffer = adapter->mbx_resp; 1035 args.out_size = IDPF_DFLT_MBX_BUF_SIZE; 1036 1037 err = idpf_vc_cmd_execute(adapter, &args); 1038 rte_free(vc_rxqs); 1039 if (err != 0) 1040 DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_CONFIG_RX_QUEUES"); 1041 1042 return err; 1043 } 1044 1045 int idpf_vc_rxq_config_by_info(struct idpf_vport *vport, struct virtchnl2_rxq_info *rxq_info, 1046 uint16_t num_qs) 1047 { 1048 struct idpf_adapter *adapter = vport->adapter; 1049 struct virtchnl2_config_rx_queues *vc_rxqs = NULL; 1050 struct idpf_cmd_info args; 1051 int size, err, i; 1052 1053 size = sizeof(*vc_rxqs) + (num_qs - 1) * 1054 sizeof(struct virtchnl2_rxq_info); 1055 vc_rxqs = rte_zmalloc("cfg_rxqs", size, 0); 1056 if (vc_rxqs == NULL) { 1057 DRV_LOG(ERR, "Failed to allocate virtchnl2_config_rx_queues"); 1058 err = -ENOMEM; 1059 return err; 1060 } 1061 vc_rxqs->vport_id = vport->vport_id; 1062 vc_rxqs->num_qinfo = num_qs; 1063 memcpy(vc_rxqs->qinfo, rxq_info, num_qs * sizeof(struct virtchnl2_rxq_info)); 1064 1065 memset(&args, 0, sizeof(args)); 1066 args.ops = VIRTCHNL2_OP_CONFIG_RX_QUEUES; 1067 args.in_args = (uint8_t *)vc_rxqs; 1068 args.in_args_size = size; 1069 args.out_buffer = adapter->mbx_resp; 1070 args.out_size = IDPF_DFLT_MBX_BUF_SIZE; 1071 1072 err = idpf_vc_cmd_execute(adapter, &args); 1073 rte_free(vc_rxqs); 1074 if (err != 0) 1075 DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_CONFIG_RX_QUEUES"); 1076 1077 return err; 1078 } 1079 1080 int 1081 idpf_vc_txq_config(struct idpf_vport *vport, struct idpf_tx_queue *txq) 1082 { 1083 struct idpf_adapter *adapter = vport->adapter; 1084 struct virtchnl2_config_tx_queues *vc_txqs = NULL; 1085 struct virtchnl2_txq_info *txq_info; 1086 struct idpf_cmd_info args; 1087 uint16_t num_qs; 1088 int size, err; 1089 1090 if (vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE) 1091 num_qs = IDPF_TXQ_PER_GRP; 1092 else 1093 num_qs = IDPF_TXQ_PER_GRP + IDPF_TX_COMPLQ_PER_GRP; 1094 1095 size = sizeof(*vc_txqs) + (num_qs - 1) * 1096 sizeof(struct virtchnl2_txq_info); 1097 vc_txqs = rte_zmalloc("cfg_txqs", size, 0); 1098 if (vc_txqs == NULL) { 1099 DRV_LOG(ERR, "Failed to allocate virtchnl2_config_tx_queues"); 1100 err = -ENOMEM; 1101 return err; 1102 } 1103 vc_txqs->vport_id = vport->vport_id; 1104 vc_txqs->num_qinfo = num_qs; 1105 1106 if (vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE) { 1107 txq_info = &vc_txqs->qinfo[0]; 1108 txq_info->dma_ring_addr = txq->tx_ring_phys_addr; 1109 txq_info->type = VIRTCHNL2_QUEUE_TYPE_TX; 1110 txq_info->queue_id = txq->queue_id; 1111 txq_info->model = VIRTCHNL2_QUEUE_MODEL_SINGLE; 1112 txq_info->sched_mode = VIRTCHNL2_TXQ_SCHED_MODE_QUEUE; 1113 txq_info->ring_len = txq->nb_tx_desc; 1114 } else { 1115 /* txq info */ 1116 txq_info = &vc_txqs->qinfo[0]; 1117 txq_info->dma_ring_addr = txq->tx_ring_phys_addr; 1118 txq_info->type = VIRTCHNL2_QUEUE_TYPE_TX; 1119 txq_info->queue_id = txq->queue_id; 1120 txq_info->model = VIRTCHNL2_QUEUE_MODEL_SPLIT; 1121 txq_info->sched_mode = VIRTCHNL2_TXQ_SCHED_MODE_FLOW; 1122 txq_info->ring_len = txq->nb_tx_desc; 1123 txq_info->tx_compl_queue_id = txq->complq->queue_id; 1124 txq_info->relative_queue_id = txq_info->queue_id; 1125 1126 /* tx completion queue info */ 1127 txq_info = &vc_txqs->qinfo[1]; 1128 txq_info->dma_ring_addr = txq->complq->tx_ring_phys_addr; 1129 txq_info->type = VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION; 1130 txq_info->queue_id = txq->complq->queue_id; 1131 txq_info->model = VIRTCHNL2_QUEUE_MODEL_SPLIT; 1132 txq_info->sched_mode = VIRTCHNL2_TXQ_SCHED_MODE_FLOW; 1133 txq_info->ring_len = txq->complq->nb_tx_desc; 1134 } 1135 1136 memset(&args, 0, sizeof(args)); 1137 args.ops = VIRTCHNL2_OP_CONFIG_TX_QUEUES; 1138 args.in_args = (uint8_t *)vc_txqs; 1139 args.in_args_size = size; 1140 args.out_buffer = adapter->mbx_resp; 1141 args.out_size = IDPF_DFLT_MBX_BUF_SIZE; 1142 1143 err = idpf_vc_cmd_execute(adapter, &args); 1144 rte_free(vc_txqs); 1145 if (err != 0) 1146 DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_CONFIG_TX_QUEUES"); 1147 1148 return err; 1149 } 1150 1151 int 1152 idpf_vc_txq_config_by_info(struct idpf_vport *vport, struct virtchnl2_txq_info *txq_info, 1153 uint16_t num_qs) 1154 { 1155 struct idpf_adapter *adapter = vport->adapter; 1156 struct virtchnl2_config_tx_queues *vc_txqs = NULL; 1157 struct idpf_cmd_info args; 1158 int size, err; 1159 1160 size = sizeof(*vc_txqs) + (num_qs - 1) * sizeof(struct virtchnl2_txq_info); 1161 vc_txqs = rte_zmalloc("cfg_txqs", size, 0); 1162 if (vc_txqs == NULL) { 1163 DRV_LOG(ERR, "Failed to allocate virtchnl2_config_tx_queues"); 1164 err = -ENOMEM; 1165 return err; 1166 } 1167 vc_txqs->vport_id = vport->vport_id; 1168 vc_txqs->num_qinfo = num_qs; 1169 memcpy(vc_txqs->qinfo, txq_info, num_qs * sizeof(struct virtchnl2_txq_info)); 1170 1171 memset(&args, 0, sizeof(args)); 1172 args.ops = VIRTCHNL2_OP_CONFIG_TX_QUEUES; 1173 args.in_args = (uint8_t *)vc_txqs; 1174 args.in_args_size = size; 1175 args.out_buffer = adapter->mbx_resp; 1176 args.out_size = IDPF_DFLT_MBX_BUF_SIZE; 1177 1178 err = idpf_vc_cmd_execute(adapter, &args); 1179 rte_free(vc_txqs); 1180 if (err != 0) 1181 DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_CONFIG_TX_QUEUES"); 1182 1183 return err; 1184 } 1185 1186 int 1187 idpf_vc_ctlq_recv(struct idpf_ctlq_info *cq, u16 *num_q_msg, 1188 struct idpf_ctlq_msg *q_msg) 1189 { 1190 return idpf_ctlq_recv(cq, num_q_msg, q_msg); 1191 } 1192 1193 int 1194 idpf_vc_ctlq_post_rx_buffs(struct idpf_hw *hw, struct idpf_ctlq_info *cq, 1195 u16 *buff_count, struct idpf_dma_mem **buffs) 1196 { 1197 return idpf_ctlq_post_rx_buffs(hw, cq, buff_count, buffs); 1198 } 1199