1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2023 Intel Corporation
3 */
4
5 #include "idpf_common_virtchnl.h"
6 #include "idpf_common_logs.h"
7
8 static int
idpf_vc_clean(struct idpf_adapter * adapter)9 idpf_vc_clean(struct idpf_adapter *adapter)
10 {
11 struct idpf_ctlq_msg *q_msg[IDPF_CTLQ_LEN];
12 uint16_t num_q_msg = IDPF_CTLQ_LEN;
13 struct idpf_dma_mem *dma_mem;
14 int err;
15 uint32_t i;
16
17 for (i = 0; i < 10; i++) {
18 err = idpf_ctlq_clean_sq(adapter->hw.asq, &num_q_msg, q_msg);
19 msleep(20);
20 if (num_q_msg > 0)
21 break;
22 }
23 if (err != 0)
24 return err;
25
26 /* Empty queue is not an error */
27 for (i = 0; i < num_q_msg; i++) {
28 dma_mem = q_msg[i]->ctx.indirect.payload;
29 if (dma_mem != NULL) {
30 idpf_free_dma_mem(&adapter->hw, dma_mem);
31 rte_free(dma_mem);
32 }
33 rte_free(q_msg[i]);
34 }
35
36 return 0;
37 }
38
39 static int
idpf_send_vc_msg(struct idpf_adapter * adapter,uint32_t op,uint16_t msg_size,uint8_t * msg)40 idpf_send_vc_msg(struct idpf_adapter *adapter, uint32_t op,
41 uint16_t msg_size, uint8_t *msg)
42 {
43 struct idpf_ctlq_msg *ctlq_msg;
44 struct idpf_dma_mem *dma_mem;
45 int err;
46
47 err = idpf_vc_clean(adapter);
48 if (err != 0)
49 goto err;
50
51 ctlq_msg = rte_zmalloc(NULL, sizeof(struct idpf_ctlq_msg), 0);
52 if (ctlq_msg == NULL) {
53 err = -ENOMEM;
54 goto err;
55 }
56
57 dma_mem = rte_zmalloc(NULL, sizeof(struct idpf_dma_mem), 0);
58 if (dma_mem == NULL) {
59 err = -ENOMEM;
60 goto dma_mem_error;
61 }
62
63 dma_mem->size = IDPF_DFLT_MBX_BUF_SIZE;
64 idpf_alloc_dma_mem(&adapter->hw, dma_mem, dma_mem->size);
65 if (dma_mem->va == NULL) {
66 err = -ENOMEM;
67 goto dma_alloc_error;
68 }
69
70 memcpy(dma_mem->va, msg, msg_size);
71
72 ctlq_msg->opcode = idpf_mbq_opc_send_msg_to_pf;
73 ctlq_msg->func_id = 0;
74 ctlq_msg->data_len = msg_size;
75 ctlq_msg->cookie.mbx.chnl_opcode = op;
76 ctlq_msg->cookie.mbx.chnl_retval = VIRTCHNL_STATUS_SUCCESS;
77 ctlq_msg->ctx.indirect.payload = dma_mem;
78
79 err = idpf_ctlq_send(&adapter->hw, adapter->hw.asq, 1, ctlq_msg);
80 if (err != 0)
81 goto send_error;
82
83 return 0;
84
85 send_error:
86 idpf_free_dma_mem(&adapter->hw, dma_mem);
87 dma_alloc_error:
88 rte_free(dma_mem);
89 dma_mem_error:
90 rte_free(ctlq_msg);
91 err:
92 return err;
93 }
94
95 static enum idpf_vc_result
idpf_read_msg_from_cp(struct idpf_adapter * adapter,uint16_t buf_len,uint8_t * buf)96 idpf_read_msg_from_cp(struct idpf_adapter *adapter, uint16_t buf_len,
97 uint8_t *buf)
98 {
99 struct idpf_hw *hw = &adapter->hw;
100 struct idpf_ctlq_msg ctlq_msg;
101 struct idpf_dma_mem *dma_mem = NULL;
102 enum idpf_vc_result result = IDPF_MSG_NON;
103 uint32_t opcode;
104 uint16_t pending = 1;
105 int ret;
106
107 ret = idpf_ctlq_recv(hw->arq, &pending, &ctlq_msg);
108 if (ret != 0) {
109 DRV_LOG(DEBUG, "Can't read msg from AQ");
110 if (ret != -ENOMSG)
111 result = IDPF_MSG_ERR;
112 return result;
113 }
114
115 rte_memcpy(buf, ctlq_msg.ctx.indirect.payload->va, buf_len);
116
117 opcode = rte_le_to_cpu_32(ctlq_msg.cookie.mbx.chnl_opcode);
118 adapter->cmd_retval = rte_le_to_cpu_32(ctlq_msg.cookie.mbx.chnl_retval);
119
120 DRV_LOG(DEBUG, "CQ from CP carries opcode %u, retval %d",
121 opcode, adapter->cmd_retval);
122
123 if (opcode == VIRTCHNL2_OP_EVENT) {
124 struct virtchnl2_event *ve = ctlq_msg.ctx.indirect.payload->va;
125
126 result = IDPF_MSG_SYS;
127 switch (ve->event) {
128 case VIRTCHNL2_EVENT_LINK_CHANGE:
129 /* TBD */
130 break;
131 default:
132 DRV_LOG(ERR, "%s: Unknown event %d from CP",
133 __func__, ve->event);
134 break;
135 }
136 } else {
137 /* async reply msg on command issued by pf previously */
138 result = IDPF_MSG_CMD;
139 if (opcode != adapter->pend_cmd) {
140 DRV_LOG(WARNING, "command mismatch, expect %u, get %u",
141 adapter->pend_cmd, opcode);
142 result = IDPF_MSG_ERR;
143 }
144 }
145
146 if (ctlq_msg.data_len != 0)
147 dma_mem = ctlq_msg.ctx.indirect.payload;
148 else
149 pending = 0;
150
151 ret = idpf_ctlq_post_rx_buffs(hw, hw->arq, &pending, &dma_mem);
152 if (ret != 0 && dma_mem != NULL)
153 idpf_free_dma_mem(hw, dma_mem);
154
155 return result;
156 }
157
158 #define MAX_TRY_TIMES 200
159 #define ASQ_DELAY_MS 10
160
161 int
idpf_vc_one_msg_read(struct idpf_adapter * adapter,uint32_t ops,uint16_t buf_len,uint8_t * buf)162 idpf_vc_one_msg_read(struct idpf_adapter *adapter, uint32_t ops, uint16_t buf_len,
163 uint8_t *buf)
164 {
165 int err = 0;
166 int i = 0;
167 int ret;
168
169 do {
170 ret = idpf_read_msg_from_cp(adapter, buf_len, buf);
171 if (ret == IDPF_MSG_CMD)
172 break;
173 rte_delay_ms(ASQ_DELAY_MS);
174 } while (i++ < MAX_TRY_TIMES);
175 if (i >= MAX_TRY_TIMES ||
176 adapter->cmd_retval != VIRTCHNL_STATUS_SUCCESS) {
177 err = -EBUSY;
178 DRV_LOG(ERR, "No response or return failure (%d) for cmd %d",
179 adapter->cmd_retval, ops);
180 }
181
182 return err;
183 }
184
185 int
idpf_vc_cmd_execute(struct idpf_adapter * adapter,struct idpf_cmd_info * args)186 idpf_vc_cmd_execute(struct idpf_adapter *adapter, struct idpf_cmd_info *args)
187 {
188 int err = 0;
189 int i = 0;
190 int ret;
191
192 if (atomic_set_cmd(adapter, args->ops))
193 return -EINVAL;
194
195 ret = idpf_send_vc_msg(adapter, args->ops, args->in_args_size, args->in_args);
196 if (ret != 0) {
197 DRV_LOG(ERR, "fail to send cmd %d", args->ops);
198 clear_cmd(adapter);
199 return ret;
200 }
201
202 switch (args->ops) {
203 case VIRTCHNL_OP_VERSION:
204 case VIRTCHNL2_OP_GET_CAPS:
205 case VIRTCHNL2_OP_GET_PTYPE_INFO:
206 /* for init virtchnl ops, need to poll the response */
207 err = idpf_vc_one_msg_read(adapter, args->ops, args->out_size, args->out_buffer);
208 clear_cmd(adapter);
209 break;
210 default:
211 /* For other virtchnl ops in running time,
212 * wait for the cmd done flag.
213 */
214 do {
215 if (adapter->pend_cmd == VIRTCHNL_OP_UNKNOWN)
216 break;
217 rte_delay_ms(ASQ_DELAY_MS);
218 /* If don't read msg or read sys event, continue */
219 } while (i++ < MAX_TRY_TIMES);
220 /* If there's no response is received, clear command */
221 if (i >= MAX_TRY_TIMES ||
222 adapter->cmd_retval != VIRTCHNL_STATUS_SUCCESS) {
223 err = -EBUSY;
224 DRV_LOG(ERR, "No response or return failure (%d) for cmd %d",
225 adapter->cmd_retval, args->ops);
226 clear_cmd(adapter);
227 }
228 break;
229 }
230
231 return err;
232 }
233
234 int
idpf_vc_api_version_check(struct idpf_adapter * adapter)235 idpf_vc_api_version_check(struct idpf_adapter *adapter)
236 {
237 struct virtchnl2_version_info version, *pver;
238 struct idpf_cmd_info args;
239 int err;
240
241 memset(&version, 0, sizeof(struct virtchnl_version_info));
242 version.major = VIRTCHNL2_VERSION_MAJOR_2;
243 version.minor = VIRTCHNL2_VERSION_MINOR_0;
244
245 args.ops = VIRTCHNL_OP_VERSION;
246 args.in_args = (uint8_t *)&version;
247 args.in_args_size = sizeof(version);
248 args.out_buffer = adapter->mbx_resp;
249 args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
250
251 err = idpf_vc_cmd_execute(adapter, &args);
252 if (err != 0) {
253 DRV_LOG(ERR,
254 "Failed to execute command of VIRTCHNL_OP_VERSION");
255 return err;
256 }
257
258 pver = (struct virtchnl2_version_info *)args.out_buffer;
259 adapter->virtchnl_version = *pver;
260
261 if (adapter->virtchnl_version.major != VIRTCHNL2_VERSION_MAJOR_2 ||
262 adapter->virtchnl_version.minor != VIRTCHNL2_VERSION_MINOR_0) {
263 DRV_LOG(ERR, "VIRTCHNL API version mismatch:(%u.%u)-(%u.%u)",
264 adapter->virtchnl_version.major,
265 adapter->virtchnl_version.minor,
266 VIRTCHNL2_VERSION_MAJOR_2,
267 VIRTCHNL2_VERSION_MINOR_0);
268 return -EINVAL;
269 }
270
271 return 0;
272 }
273
274 int
idpf_vc_caps_get(struct idpf_adapter * adapter)275 idpf_vc_caps_get(struct idpf_adapter *adapter)
276 {
277 struct idpf_cmd_info args;
278 int err;
279
280 args.ops = VIRTCHNL2_OP_GET_CAPS;
281 args.in_args = (uint8_t *)&adapter->caps;
282 args.in_args_size = sizeof(struct virtchnl2_get_capabilities);
283 args.out_buffer = adapter->mbx_resp;
284 args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
285
286 err = idpf_vc_cmd_execute(adapter, &args);
287 if (err != 0) {
288 DRV_LOG(ERR,
289 "Failed to execute command of VIRTCHNL2_OP_GET_CAPS");
290 return err;
291 }
292
293 rte_memcpy(&adapter->caps, args.out_buffer, sizeof(struct virtchnl2_get_capabilities));
294
295 return 0;
296 }
297
298 int
idpf_vc_vport_create(struct idpf_vport * vport,struct virtchnl2_create_vport * create_vport_info)299 idpf_vc_vport_create(struct idpf_vport *vport,
300 struct virtchnl2_create_vport *create_vport_info)
301 {
302 struct idpf_adapter *adapter = vport->adapter;
303 struct virtchnl2_create_vport vport_msg;
304 struct idpf_cmd_info args;
305 int err = -1;
306
307 memset(&vport_msg, 0, sizeof(struct virtchnl2_create_vport));
308 vport_msg.vport_type = create_vport_info->vport_type;
309 vport_msg.txq_model = create_vport_info->txq_model;
310 vport_msg.rxq_model = create_vport_info->rxq_model;
311 vport_msg.num_tx_q = create_vport_info->num_tx_q;
312 vport_msg.num_tx_complq = create_vport_info->num_tx_complq;
313 vport_msg.num_rx_q = create_vport_info->num_rx_q;
314 vport_msg.num_rx_bufq = create_vport_info->num_rx_bufq;
315
316 memset(&args, 0, sizeof(args));
317 args.ops = VIRTCHNL2_OP_CREATE_VPORT;
318 args.in_args = (uint8_t *)&vport_msg;
319 args.in_args_size = sizeof(vport_msg);
320 args.out_buffer = adapter->mbx_resp;
321 args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
322
323 err = idpf_vc_cmd_execute(adapter, &args);
324 if (err != 0) {
325 DRV_LOG(ERR,
326 "Failed to execute command of VIRTCHNL2_OP_CREATE_VPORT");
327 return err;
328 }
329
330 rte_memcpy(&(vport->vport_info.info), args.out_buffer, IDPF_DFLT_MBX_BUF_SIZE);
331 return 0;
332 }
333
334 int
idpf_vc_vport_destroy(struct idpf_vport * vport)335 idpf_vc_vport_destroy(struct idpf_vport *vport)
336 {
337 struct idpf_adapter *adapter = vport->adapter;
338 struct virtchnl2_vport vc_vport;
339 struct idpf_cmd_info args;
340 int err;
341
342 vc_vport.vport_id = vport->vport_id;
343
344 memset(&args, 0, sizeof(args));
345 args.ops = VIRTCHNL2_OP_DESTROY_VPORT;
346 args.in_args = (uint8_t *)&vc_vport;
347 args.in_args_size = sizeof(vc_vport);
348 args.out_buffer = adapter->mbx_resp;
349 args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
350
351 err = idpf_vc_cmd_execute(adapter, &args);
352 if (err != 0)
353 DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_DESTROY_VPORT");
354
355 return err;
356 }
357
358 int
idpf_vc_queue_grps_add(struct idpf_vport * vport,struct virtchnl2_add_queue_groups * p2p_queue_grps_info,uint8_t * p2p_queue_grps_out)359 idpf_vc_queue_grps_add(struct idpf_vport *vport,
360 struct virtchnl2_add_queue_groups *p2p_queue_grps_info,
361 uint8_t *p2p_queue_grps_out)
362 {
363 struct idpf_adapter *adapter = vport->adapter;
364 struct idpf_cmd_info args;
365 int size, qg_info_size;
366 int err = -1;
367
368 size = sizeof(*p2p_queue_grps_info) +
369 (p2p_queue_grps_info->num_queue_groups - 1) *
370 sizeof(struct virtchnl2_queue_group_info);
371
372 memset(&args, 0, sizeof(args));
373 args.ops = VIRTCHNL2_OP_ADD_QUEUE_GROUPS;
374 args.in_args = (uint8_t *)p2p_queue_grps_info;
375 args.in_args_size = size;
376 args.out_buffer = adapter->mbx_resp;
377 args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
378
379 err = idpf_vc_cmd_execute(adapter, &args);
380 if (err != 0) {
381 DRV_LOG(ERR,
382 "Failed to execute command of VIRTCHNL2_OP_ADD_QUEUE_GROUPS");
383 return err;
384 }
385
386 rte_memcpy(p2p_queue_grps_out, args.out_buffer, IDPF_DFLT_MBX_BUF_SIZE);
387 return 0;
388 }
389
idpf_vc_queue_grps_del(struct idpf_vport * vport,uint16_t num_q_grps,struct virtchnl2_queue_group_id * qg_ids)390 int idpf_vc_queue_grps_del(struct idpf_vport *vport,
391 uint16_t num_q_grps,
392 struct virtchnl2_queue_group_id *qg_ids)
393 {
394 struct idpf_adapter *adapter = vport->adapter;
395 struct virtchnl2_delete_queue_groups *vc_del_q_grps;
396 struct idpf_cmd_info args;
397 int size;
398 int err;
399
400 size = sizeof(*vc_del_q_grps) +
401 (num_q_grps - 1) * sizeof(struct virtchnl2_queue_group_id);
402 vc_del_q_grps = rte_zmalloc("vc_del_q_grps", size, 0);
403
404 vc_del_q_grps->vport_id = vport->vport_id;
405 vc_del_q_grps->num_queue_groups = num_q_grps;
406 memcpy(vc_del_q_grps->qg_ids, qg_ids,
407 num_q_grps * sizeof(struct virtchnl2_queue_group_id));
408
409 memset(&args, 0, sizeof(args));
410 args.ops = VIRTCHNL2_OP_DEL_QUEUE_GROUPS;
411 args.in_args = (uint8_t *)vc_del_q_grps;
412 args.in_args_size = size;
413 args.out_buffer = adapter->mbx_resp;
414 args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
415
416 err = idpf_vc_cmd_execute(adapter, &args);
417 if (err != 0)
418 DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_DEL_QUEUE_GROUPS");
419
420 rte_free(vc_del_q_grps);
421 return err;
422 }
423
424 int
idpf_vc_rss_key_set(struct idpf_vport * vport)425 idpf_vc_rss_key_set(struct idpf_vport *vport)
426 {
427 struct idpf_adapter *adapter = vport->adapter;
428 struct virtchnl2_rss_key *rss_key;
429 struct idpf_cmd_info args;
430 int len, err;
431
432 len = sizeof(*rss_key) + sizeof(rss_key->key[0]) *
433 (vport->rss_key_size - 1);
434 rss_key = rte_zmalloc("rss_key", len, 0);
435 if (rss_key == NULL)
436 return -ENOMEM;
437
438 rss_key->vport_id = vport->vport_id;
439 rss_key->key_len = vport->rss_key_size;
440 rte_memcpy(rss_key->key, vport->rss_key,
441 sizeof(rss_key->key[0]) * vport->rss_key_size);
442
443 memset(&args, 0, sizeof(args));
444 args.ops = VIRTCHNL2_OP_SET_RSS_KEY;
445 args.in_args = (uint8_t *)rss_key;
446 args.in_args_size = len;
447 args.out_buffer = adapter->mbx_resp;
448 args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
449
450 err = idpf_vc_cmd_execute(adapter, &args);
451 if (err != 0)
452 DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_SET_RSS_KEY");
453
454 rte_free(rss_key);
455 return err;
456 }
457
idpf_vc_rss_key_get(struct idpf_vport * vport)458 int idpf_vc_rss_key_get(struct idpf_vport *vport)
459 {
460 struct idpf_adapter *adapter = vport->adapter;
461 struct virtchnl2_rss_key *rss_key_ret;
462 struct virtchnl2_rss_key rss_key;
463 struct idpf_cmd_info args;
464 int err;
465
466 memset(&rss_key, 0, sizeof(rss_key));
467 rss_key.vport_id = vport->vport_id;
468
469 memset(&args, 0, sizeof(args));
470 args.ops = VIRTCHNL2_OP_GET_RSS_KEY;
471 args.in_args = (uint8_t *)&rss_key;
472 args.in_args_size = sizeof(rss_key);
473 args.out_buffer = adapter->mbx_resp;
474 args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
475
476 err = idpf_vc_cmd_execute(adapter, &args);
477
478 if (!err) {
479 rss_key_ret = (struct virtchnl2_rss_key *)args.out_buffer;
480 if (rss_key_ret->key_len != vport->rss_key_size) {
481 rte_free(vport->rss_key);
482 vport->rss_key = NULL;
483 vport->rss_key_size = RTE_MIN(IDPF_RSS_KEY_LEN,
484 rss_key_ret->key_len);
485 vport->rss_key = rte_zmalloc("rss_key", vport->rss_key_size, 0);
486 if (!vport->rss_key) {
487 vport->rss_key_size = 0;
488 DRV_LOG(ERR, "Failed to allocate RSS key");
489 return -ENOMEM;
490 }
491 }
492 rte_memcpy(vport->rss_key, rss_key_ret->key, vport->rss_key_size);
493 } else {
494 DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_GET_RSS_KEY");
495 }
496
497 return err;
498 }
499
500 int
idpf_vc_rss_lut_set(struct idpf_vport * vport)501 idpf_vc_rss_lut_set(struct idpf_vport *vport)
502 {
503 struct idpf_adapter *adapter = vport->adapter;
504 struct virtchnl2_rss_lut *rss_lut;
505 struct idpf_cmd_info args;
506 int len, err;
507
508 len = sizeof(*rss_lut) + sizeof(rss_lut->lut[0]) *
509 (vport->rss_lut_size - 1);
510 rss_lut = rte_zmalloc("rss_lut", len, 0);
511 if (rss_lut == NULL)
512 return -ENOMEM;
513
514 rss_lut->vport_id = vport->vport_id;
515 rss_lut->lut_entries = vport->rss_lut_size;
516 rte_memcpy(rss_lut->lut, vport->rss_lut,
517 sizeof(rss_lut->lut[0]) * vport->rss_lut_size);
518
519 memset(&args, 0, sizeof(args));
520 args.ops = VIRTCHNL2_OP_SET_RSS_LUT;
521 args.in_args = (uint8_t *)rss_lut;
522 args.in_args_size = len;
523 args.out_buffer = adapter->mbx_resp;
524 args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
525
526 err = idpf_vc_cmd_execute(adapter, &args);
527 if (err != 0)
528 DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_SET_RSS_LUT");
529
530 rte_free(rss_lut);
531 return err;
532 }
533
534 int
idpf_vc_rss_lut_get(struct idpf_vport * vport)535 idpf_vc_rss_lut_get(struct idpf_vport *vport)
536 {
537 struct idpf_adapter *adapter = vport->adapter;
538 struct virtchnl2_rss_lut *rss_lut_ret;
539 struct virtchnl2_rss_lut rss_lut;
540 struct idpf_cmd_info args;
541 int err;
542
543 memset(&rss_lut, 0, sizeof(rss_lut));
544 rss_lut.vport_id = vport->vport_id;
545
546 memset(&args, 0, sizeof(args));
547 args.ops = VIRTCHNL2_OP_GET_RSS_LUT;
548 args.in_args = (uint8_t *)&rss_lut;
549 args.in_args_size = sizeof(rss_lut);
550 args.out_buffer = adapter->mbx_resp;
551 args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
552
553 err = idpf_vc_cmd_execute(adapter, &args);
554
555 if (!err) {
556 rss_lut_ret = (struct virtchnl2_rss_lut *)args.out_buffer;
557 if (rss_lut_ret->lut_entries != vport->rss_lut_size) {
558 rte_free(vport->rss_lut);
559 vport->rss_lut = NULL;
560 vport->rss_lut = rte_zmalloc("rss_lut",
561 sizeof(uint32_t) * rss_lut_ret->lut_entries, 0);
562 if (vport->rss_lut == NULL) {
563 DRV_LOG(ERR, "Failed to allocate RSS lut");
564 return -ENOMEM;
565 }
566 }
567 rte_memcpy(vport->rss_lut, rss_lut_ret->lut, rss_lut_ret->lut_entries);
568 vport->rss_lut_size = rss_lut_ret->lut_entries;
569 } else {
570 DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_GET_RSS_LUT");
571 }
572
573 return err;
574 }
575
576 int
idpf_vc_rss_hash_get(struct idpf_vport * vport)577 idpf_vc_rss_hash_get(struct idpf_vport *vport)
578 {
579 struct idpf_adapter *adapter = vport->adapter;
580 struct virtchnl2_rss_hash *rss_hash_ret;
581 struct virtchnl2_rss_hash rss_hash;
582 struct idpf_cmd_info args;
583 int err;
584
585 memset(&rss_hash, 0, sizeof(rss_hash));
586 rss_hash.ptype_groups = vport->rss_hf;
587 rss_hash.vport_id = vport->vport_id;
588
589 memset(&args, 0, sizeof(args));
590 args.ops = VIRTCHNL2_OP_GET_RSS_HASH;
591 args.in_args = (uint8_t *)&rss_hash;
592 args.in_args_size = sizeof(rss_hash);
593 args.out_buffer = adapter->mbx_resp;
594 args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
595
596 err = idpf_vc_cmd_execute(adapter, &args);
597
598 if (!err) {
599 rss_hash_ret = (struct virtchnl2_rss_hash *)args.out_buffer;
600 vport->rss_hf = rss_hash_ret->ptype_groups;
601 } else {
602 DRV_LOG(ERR, "Failed to execute command of OP_GET_RSS_HASH");
603 }
604
605 return err;
606 }
607
608 int
idpf_vc_rss_hash_set(struct idpf_vport * vport)609 idpf_vc_rss_hash_set(struct idpf_vport *vport)
610 {
611 struct idpf_adapter *adapter = vport->adapter;
612 struct virtchnl2_rss_hash rss_hash;
613 struct idpf_cmd_info args;
614 int err;
615
616 memset(&rss_hash, 0, sizeof(rss_hash));
617 rss_hash.ptype_groups = vport->rss_hf;
618 rss_hash.vport_id = vport->vport_id;
619
620 memset(&args, 0, sizeof(args));
621 args.ops = VIRTCHNL2_OP_SET_RSS_HASH;
622 args.in_args = (uint8_t *)&rss_hash;
623 args.in_args_size = sizeof(rss_hash);
624 args.out_buffer = adapter->mbx_resp;
625 args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
626
627 err = idpf_vc_cmd_execute(adapter, &args);
628 if (err != 0)
629 DRV_LOG(ERR, "Failed to execute command of OP_SET_RSS_HASH");
630
631 return err;
632 }
633
634 int
idpf_vc_irq_map_unmap_config(struct idpf_vport * vport,uint16_t nb_rxq,bool map)635 idpf_vc_irq_map_unmap_config(struct idpf_vport *vport, uint16_t nb_rxq, bool map)
636 {
637 struct idpf_adapter *adapter = vport->adapter;
638 struct virtchnl2_queue_vector_maps *map_info;
639 struct virtchnl2_queue_vector *vecmap;
640 struct idpf_cmd_info args;
641 int len, i, err = 0;
642
643 len = sizeof(struct virtchnl2_queue_vector_maps) +
644 (nb_rxq - 1) * sizeof(struct virtchnl2_queue_vector);
645
646 map_info = rte_zmalloc("map_info", len, 0);
647 if (map_info == NULL)
648 return -ENOMEM;
649
650 map_info->vport_id = vport->vport_id;
651 map_info->num_qv_maps = nb_rxq;
652 for (i = 0; i < nb_rxq; i++) {
653 vecmap = &map_info->qv_maps[i];
654 vecmap->queue_id = vport->qv_map[i].queue_id;
655 vecmap->vector_id = vport->qv_map[i].vector_id;
656 vecmap->itr_idx = VIRTCHNL2_ITR_IDX_0;
657 vecmap->queue_type = VIRTCHNL2_QUEUE_TYPE_RX;
658 }
659
660 args.ops = map ? VIRTCHNL2_OP_MAP_QUEUE_VECTOR :
661 VIRTCHNL2_OP_UNMAP_QUEUE_VECTOR;
662 args.in_args = (uint8_t *)map_info;
663 args.in_args_size = len;
664 args.out_buffer = adapter->mbx_resp;
665 args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
666 err = idpf_vc_cmd_execute(adapter, &args);
667 if (err != 0)
668 DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_%s_QUEUE_VECTOR",
669 map ? "MAP" : "UNMAP");
670
671 rte_free(map_info);
672 return err;
673 }
674
675 int
idpf_vc_vectors_alloc(struct idpf_vport * vport,uint16_t num_vectors)676 idpf_vc_vectors_alloc(struct idpf_vport *vport, uint16_t num_vectors)
677 {
678 struct idpf_adapter *adapter = vport->adapter;
679 struct virtchnl2_alloc_vectors *alloc_vec;
680 struct idpf_cmd_info args;
681 int err, len;
682
683 len = sizeof(struct virtchnl2_alloc_vectors) +
684 (num_vectors - 1) * sizeof(struct virtchnl2_vector_chunk);
685 alloc_vec = rte_zmalloc("alloc_vec", len, 0);
686 if (alloc_vec == NULL)
687 return -ENOMEM;
688
689 alloc_vec->num_vectors = num_vectors;
690
691 args.ops = VIRTCHNL2_OP_ALLOC_VECTORS;
692 args.in_args = (uint8_t *)alloc_vec;
693 args.in_args_size = len;
694 args.out_buffer = adapter->mbx_resp;
695 args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
696 err = idpf_vc_cmd_execute(adapter, &args);
697 if (err != 0)
698 DRV_LOG(ERR, "Failed to execute command VIRTCHNL2_OP_ALLOC_VECTORS");
699
700 rte_memcpy(vport->recv_vectors, args.out_buffer, len);
701 rte_free(alloc_vec);
702 return err;
703 }
704
705 int
idpf_vc_vectors_dealloc(struct idpf_vport * vport)706 idpf_vc_vectors_dealloc(struct idpf_vport *vport)
707 {
708 struct idpf_adapter *adapter = vport->adapter;
709 struct virtchnl2_alloc_vectors *alloc_vec;
710 struct virtchnl2_vector_chunks *vcs;
711 struct idpf_cmd_info args;
712 int err, len;
713
714 alloc_vec = vport->recv_vectors;
715 vcs = &alloc_vec->vchunks;
716
717 len = sizeof(struct virtchnl2_vector_chunks) +
718 (vcs->num_vchunks - 1) * sizeof(struct virtchnl2_vector_chunk);
719
720 args.ops = VIRTCHNL2_OP_DEALLOC_VECTORS;
721 args.in_args = (uint8_t *)vcs;
722 args.in_args_size = len;
723 args.out_buffer = adapter->mbx_resp;
724 args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
725 err = idpf_vc_cmd_execute(adapter, &args);
726 if (err != 0)
727 DRV_LOG(ERR, "Failed to execute command VIRTCHNL2_OP_DEALLOC_VECTORS");
728
729 return err;
730 }
731
732 int
idpf_vc_ena_dis_one_queue(struct idpf_vport * vport,uint16_t qid,uint32_t type,bool on)733 idpf_vc_ena_dis_one_queue(struct idpf_vport *vport, uint16_t qid,
734 uint32_t type, bool on)
735 {
736 struct idpf_adapter *adapter = vport->adapter;
737 struct virtchnl2_del_ena_dis_queues *queue_select;
738 struct virtchnl2_queue_chunk *queue_chunk;
739 struct idpf_cmd_info args;
740 int err, len;
741
742 len = sizeof(struct virtchnl2_del_ena_dis_queues);
743 queue_select = rte_zmalloc("queue_select", len, 0);
744 if (queue_select == NULL)
745 return -ENOMEM;
746
747 queue_chunk = queue_select->chunks.chunks;
748 queue_select->chunks.num_chunks = 1;
749 queue_select->vport_id = vport->vport_id;
750
751 queue_chunk->type = type;
752 queue_chunk->start_queue_id = qid;
753 queue_chunk->num_queues = 1;
754
755 args.ops = on ? VIRTCHNL2_OP_ENABLE_QUEUES :
756 VIRTCHNL2_OP_DISABLE_QUEUES;
757 args.in_args = (uint8_t *)queue_select;
758 args.in_args_size = len;
759 args.out_buffer = adapter->mbx_resp;
760 args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
761 err = idpf_vc_cmd_execute(adapter, &args);
762 if (err != 0)
763 DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_%s_QUEUES",
764 on ? "ENABLE" : "DISABLE");
765
766 rte_free(queue_select);
767 return err;
768 }
769
770 int
idpf_vc_queue_switch(struct idpf_vport * vport,uint16_t qid,bool rx,bool on,uint32_t type)771 idpf_vc_queue_switch(struct idpf_vport *vport, uint16_t qid,
772 bool rx, bool on, uint32_t type)
773 {
774 int err, queue_id;
775
776 if (rx)
777 queue_id = vport->chunks_info.rx_start_qid + qid;
778 else
779 queue_id = vport->chunks_info.tx_start_qid + qid;
780 err = idpf_vc_ena_dis_one_queue(vport, queue_id, type, on);
781 if (err != 0)
782 return err;
783
784 /* switch tx completion queue */
785 if (!rx && vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT) {
786 type = VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION;
787 queue_id = vport->chunks_info.tx_compl_start_qid + qid;
788 err = idpf_vc_ena_dis_one_queue(vport, queue_id, type, on);
789 if (err != 0)
790 return err;
791 }
792
793 /* switch rx buffer queue */
794 if (rx && vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT) {
795 type = VIRTCHNL2_QUEUE_TYPE_RX_BUFFER;
796 queue_id = vport->chunks_info.rx_buf_start_qid + 2 * qid;
797 err = idpf_vc_ena_dis_one_queue(vport, queue_id, type, on);
798 if (err != 0)
799 return err;
800 queue_id++;
801 err = idpf_vc_ena_dis_one_queue(vport, queue_id, type, on);
802 if (err != 0)
803 return err;
804 }
805
806 return err;
807 }
808
809 #define IDPF_RXTX_QUEUE_CHUNKS_NUM 2
810 int
idpf_vc_queues_ena_dis(struct idpf_vport * vport,bool enable)811 idpf_vc_queues_ena_dis(struct idpf_vport *vport, bool enable)
812 {
813 struct idpf_adapter *adapter = vport->adapter;
814 struct virtchnl2_del_ena_dis_queues *queue_select;
815 struct virtchnl2_queue_chunk *queue_chunk;
816 uint32_t type;
817 struct idpf_cmd_info args;
818 uint16_t num_chunks;
819 int err, len;
820
821 num_chunks = IDPF_RXTX_QUEUE_CHUNKS_NUM;
822 if (vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT)
823 num_chunks++;
824 if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT)
825 num_chunks++;
826
827 len = sizeof(struct virtchnl2_del_ena_dis_queues) +
828 sizeof(struct virtchnl2_queue_chunk) * (num_chunks - 1);
829 queue_select = rte_zmalloc("queue_select", len, 0);
830 if (queue_select == NULL)
831 return -ENOMEM;
832
833 queue_chunk = queue_select->chunks.chunks;
834 queue_select->chunks.num_chunks = num_chunks;
835 queue_select->vport_id = vport->vport_id;
836
837 type = VIRTCHNL_QUEUE_TYPE_RX;
838 queue_chunk[type].type = type;
839 queue_chunk[type].start_queue_id = vport->chunks_info.rx_start_qid;
840 queue_chunk[type].num_queues = vport->num_rx_q;
841
842 type = VIRTCHNL2_QUEUE_TYPE_TX;
843 queue_chunk[type].type = type;
844 queue_chunk[type].start_queue_id = vport->chunks_info.tx_start_qid;
845 queue_chunk[type].num_queues = vport->num_tx_q;
846
847 if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT) {
848 type = VIRTCHNL2_QUEUE_TYPE_RX_BUFFER;
849 queue_chunk[type].type = type;
850 queue_chunk[type].start_queue_id =
851 vport->chunks_info.rx_buf_start_qid;
852 queue_chunk[type].num_queues = vport->num_rx_bufq;
853 }
854
855 if (vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT) {
856 type = VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION;
857 queue_chunk[type].type = type;
858 queue_chunk[type].start_queue_id =
859 vport->chunks_info.tx_compl_start_qid;
860 queue_chunk[type].num_queues = vport->num_tx_complq;
861 }
862
863 args.ops = enable ? VIRTCHNL2_OP_ENABLE_QUEUES :
864 VIRTCHNL2_OP_DISABLE_QUEUES;
865 args.in_args = (uint8_t *)queue_select;
866 args.in_args_size = len;
867 args.out_buffer = adapter->mbx_resp;
868 args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
869 err = idpf_vc_cmd_execute(adapter, &args);
870 if (err != 0)
871 DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_%s_QUEUES",
872 enable ? "ENABLE" : "DISABLE");
873
874 rte_free(queue_select);
875 return err;
876 }
877
878 int
idpf_vc_vport_ena_dis(struct idpf_vport * vport,bool enable)879 idpf_vc_vport_ena_dis(struct idpf_vport *vport, bool enable)
880 {
881 struct idpf_adapter *adapter = vport->adapter;
882 struct virtchnl2_vport vc_vport;
883 struct idpf_cmd_info args;
884 int err;
885
886 vc_vport.vport_id = vport->vport_id;
887 args.ops = enable ? VIRTCHNL2_OP_ENABLE_VPORT :
888 VIRTCHNL2_OP_DISABLE_VPORT;
889 args.in_args = (uint8_t *)&vc_vport;
890 args.in_args_size = sizeof(vc_vport);
891 args.out_buffer = adapter->mbx_resp;
892 args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
893
894 err = idpf_vc_cmd_execute(adapter, &args);
895 if (err != 0) {
896 DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_%s_VPORT",
897 enable ? "ENABLE" : "DISABLE");
898 }
899
900 return err;
901 }
902
903 int
idpf_vc_ptype_info_query(struct idpf_adapter * adapter,struct virtchnl2_get_ptype_info * req_ptype_info,struct virtchnl2_get_ptype_info * recv_ptype_info)904 idpf_vc_ptype_info_query(struct idpf_adapter *adapter,
905 struct virtchnl2_get_ptype_info *req_ptype_info,
906 struct virtchnl2_get_ptype_info *recv_ptype_info)
907 {
908 struct idpf_cmd_info args;
909 int err;
910
911 args.ops = VIRTCHNL2_OP_GET_PTYPE_INFO;
912 args.in_args = (uint8_t *)req_ptype_info;
913 args.in_args_size = sizeof(struct virtchnl2_get_ptype_info);
914 args.out_buffer = adapter->mbx_resp;
915 args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
916
917 err = idpf_vc_cmd_execute(adapter, &args);
918 if (err != 0)
919 DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_GET_PTYPE_INFO");
920
921 rte_memcpy(recv_ptype_info, args.out_buffer, IDPF_DFLT_MBX_BUF_SIZE);
922 return err;
923 }
924
925 int
idpf_vc_stats_query(struct idpf_vport * vport,struct virtchnl2_vport_stats ** pstats)926 idpf_vc_stats_query(struct idpf_vport *vport,
927 struct virtchnl2_vport_stats **pstats)
928 {
929 struct idpf_adapter *adapter = vport->adapter;
930 struct virtchnl2_vport_stats vport_stats;
931 struct idpf_cmd_info args;
932 int err;
933
934 vport_stats.vport_id = vport->vport_id;
935 args.ops = VIRTCHNL2_OP_GET_STATS;
936 args.in_args = (u8 *)&vport_stats;
937 args.in_args_size = sizeof(vport_stats);
938 args.out_buffer = adapter->mbx_resp;
939 args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
940
941 err = idpf_vc_cmd_execute(adapter, &args);
942 if (err) {
943 DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_GET_STATS");
944 *pstats = NULL;
945 return err;
946 }
947 *pstats = (struct virtchnl2_vport_stats *)args.out_buffer;
948 return 0;
949 }
950
951 #define IDPF_RX_BUF_STRIDE 64
952 int
idpf_vc_rxq_config(struct idpf_vport * vport,struct idpf_rx_queue * rxq)953 idpf_vc_rxq_config(struct idpf_vport *vport, struct idpf_rx_queue *rxq)
954 {
955 struct idpf_adapter *adapter = vport->adapter;
956 struct virtchnl2_config_rx_queues *vc_rxqs = NULL;
957 struct virtchnl2_rxq_info *rxq_info;
958 struct idpf_cmd_info args;
959 uint16_t num_qs;
960 int size, err, i;
961
962 if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE)
963 num_qs = IDPF_RXQ_PER_GRP;
964 else
965 num_qs = IDPF_RXQ_PER_GRP + IDPF_RX_BUFQ_PER_GRP;
966
967 size = sizeof(*vc_rxqs) + (num_qs - 1) *
968 sizeof(struct virtchnl2_rxq_info);
969 vc_rxqs = rte_zmalloc("cfg_rxqs", size, 0);
970 if (vc_rxqs == NULL) {
971 DRV_LOG(ERR, "Failed to allocate virtchnl2_config_rx_queues");
972 err = -ENOMEM;
973 return err;
974 }
975 vc_rxqs->vport_id = vport->vport_id;
976 vc_rxqs->num_qinfo = num_qs;
977 if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE) {
978 rxq_info = &vc_rxqs->qinfo[0];
979 rxq_info->dma_ring_addr = rxq->rx_ring_phys_addr;
980 rxq_info->type = VIRTCHNL2_QUEUE_TYPE_RX;
981 rxq_info->queue_id = rxq->queue_id;
982 rxq_info->model = VIRTCHNL2_QUEUE_MODEL_SINGLE;
983 rxq_info->data_buffer_size = rxq->rx_buf_len;
984 rxq_info->max_pkt_size = vport->max_pkt_len;
985
986 rxq_info->desc_ids = VIRTCHNL2_RXDID_2_FLEX_SQ_NIC_M;
987 rxq_info->qflags |= VIRTCHNL2_RX_DESC_SIZE_32BYTE;
988
989 rxq_info->ring_len = rxq->nb_rx_desc;
990 } else {
991 /* Rx queue */
992 rxq_info = &vc_rxqs->qinfo[0];
993 rxq_info->dma_ring_addr = rxq->rx_ring_phys_addr;
994 rxq_info->type = VIRTCHNL2_QUEUE_TYPE_RX;
995 rxq_info->queue_id = rxq->queue_id;
996 rxq_info->model = VIRTCHNL2_QUEUE_MODEL_SPLIT;
997 rxq_info->data_buffer_size = rxq->rx_buf_len;
998 rxq_info->max_pkt_size = vport->max_pkt_len;
999
1000 rxq_info->desc_ids = VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M;
1001 rxq_info->qflags |= VIRTCHNL2_RX_DESC_SIZE_32BYTE;
1002
1003 rxq_info->ring_len = rxq->nb_rx_desc;
1004 rxq_info->rx_bufq1_id = rxq->bufq1->queue_id;
1005 rxq_info->bufq2_ena = 1;
1006 rxq_info->rx_bufq2_id = rxq->bufq2->queue_id;
1007 rxq_info->rx_buffer_low_watermark = 64;
1008
1009 /* Buffer queue */
1010 for (i = 1; i <= IDPF_RX_BUFQ_PER_GRP; i++) {
1011 struct idpf_rx_queue *bufq = i == 1 ? rxq->bufq1 : rxq->bufq2;
1012 rxq_info = &vc_rxqs->qinfo[i];
1013 rxq_info->dma_ring_addr = bufq->rx_ring_phys_addr;
1014 rxq_info->type = VIRTCHNL2_QUEUE_TYPE_RX_BUFFER;
1015 rxq_info->queue_id = bufq->queue_id;
1016 rxq_info->model = VIRTCHNL2_QUEUE_MODEL_SPLIT;
1017 rxq_info->data_buffer_size = bufq->rx_buf_len;
1018 rxq_info->desc_ids = VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M;
1019 rxq_info->ring_len = bufq->nb_rx_desc;
1020
1021 rxq_info->buffer_notif_stride = IDPF_RX_BUF_STRIDE;
1022 rxq_info->rx_buffer_low_watermark = 64;
1023 }
1024 }
1025
1026 memset(&args, 0, sizeof(args));
1027 args.ops = VIRTCHNL2_OP_CONFIG_RX_QUEUES;
1028 args.in_args = (uint8_t *)vc_rxqs;
1029 args.in_args_size = size;
1030 args.out_buffer = adapter->mbx_resp;
1031 args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
1032
1033 err = idpf_vc_cmd_execute(adapter, &args);
1034 rte_free(vc_rxqs);
1035 if (err != 0)
1036 DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_CONFIG_RX_QUEUES");
1037
1038 return err;
1039 }
1040
idpf_vc_rxq_config_by_info(struct idpf_vport * vport,struct virtchnl2_rxq_info * rxq_info,uint16_t num_qs)1041 int idpf_vc_rxq_config_by_info(struct idpf_vport *vport, struct virtchnl2_rxq_info *rxq_info,
1042 uint16_t num_qs)
1043 {
1044 struct idpf_adapter *adapter = vport->adapter;
1045 struct virtchnl2_config_rx_queues *vc_rxqs = NULL;
1046 struct idpf_cmd_info args;
1047 int size, err, i;
1048
1049 size = sizeof(*vc_rxqs) + (num_qs - 1) *
1050 sizeof(struct virtchnl2_rxq_info);
1051 vc_rxqs = rte_zmalloc("cfg_rxqs", size, 0);
1052 if (vc_rxqs == NULL) {
1053 DRV_LOG(ERR, "Failed to allocate virtchnl2_config_rx_queues");
1054 err = -ENOMEM;
1055 return err;
1056 }
1057 vc_rxqs->vport_id = vport->vport_id;
1058 vc_rxqs->num_qinfo = num_qs;
1059 memcpy(vc_rxqs->qinfo, rxq_info, num_qs * sizeof(struct virtchnl2_rxq_info));
1060
1061 memset(&args, 0, sizeof(args));
1062 args.ops = VIRTCHNL2_OP_CONFIG_RX_QUEUES;
1063 args.in_args = (uint8_t *)vc_rxqs;
1064 args.in_args_size = size;
1065 args.out_buffer = adapter->mbx_resp;
1066 args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
1067
1068 err = idpf_vc_cmd_execute(adapter, &args);
1069 rte_free(vc_rxqs);
1070 if (err != 0)
1071 DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_CONFIG_RX_QUEUES");
1072
1073 return err;
1074 }
1075
1076 int
idpf_vc_txq_config(struct idpf_vport * vport,struct idpf_tx_queue * txq)1077 idpf_vc_txq_config(struct idpf_vport *vport, struct idpf_tx_queue *txq)
1078 {
1079 struct idpf_adapter *adapter = vport->adapter;
1080 struct virtchnl2_config_tx_queues *vc_txqs = NULL;
1081 struct virtchnl2_txq_info *txq_info;
1082 struct idpf_cmd_info args;
1083 uint16_t num_qs;
1084 int size, err;
1085
1086 if (vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE)
1087 num_qs = IDPF_TXQ_PER_GRP;
1088 else
1089 num_qs = IDPF_TXQ_PER_GRP + IDPF_TX_COMPLQ_PER_GRP;
1090
1091 size = sizeof(*vc_txqs) + (num_qs - 1) *
1092 sizeof(struct virtchnl2_txq_info);
1093 vc_txqs = rte_zmalloc("cfg_txqs", size, 0);
1094 if (vc_txqs == NULL) {
1095 DRV_LOG(ERR, "Failed to allocate virtchnl2_config_tx_queues");
1096 err = -ENOMEM;
1097 return err;
1098 }
1099 vc_txqs->vport_id = vport->vport_id;
1100 vc_txqs->num_qinfo = num_qs;
1101
1102 if (vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE) {
1103 txq_info = &vc_txqs->qinfo[0];
1104 txq_info->dma_ring_addr = txq->tx_ring_phys_addr;
1105 txq_info->type = VIRTCHNL2_QUEUE_TYPE_TX;
1106 txq_info->queue_id = txq->queue_id;
1107 txq_info->model = VIRTCHNL2_QUEUE_MODEL_SINGLE;
1108 txq_info->sched_mode = VIRTCHNL2_TXQ_SCHED_MODE_QUEUE;
1109 txq_info->ring_len = txq->nb_tx_desc;
1110 } else {
1111 /* txq info */
1112 txq_info = &vc_txqs->qinfo[0];
1113 txq_info->dma_ring_addr = txq->tx_ring_phys_addr;
1114 txq_info->type = VIRTCHNL2_QUEUE_TYPE_TX;
1115 txq_info->queue_id = txq->queue_id;
1116 txq_info->model = VIRTCHNL2_QUEUE_MODEL_SPLIT;
1117 txq_info->sched_mode = VIRTCHNL2_TXQ_SCHED_MODE_FLOW;
1118 txq_info->ring_len = txq->nb_tx_desc;
1119 txq_info->tx_compl_queue_id = txq->complq->queue_id;
1120 txq_info->relative_queue_id = txq_info->queue_id;
1121
1122 /* tx completion queue info */
1123 txq_info = &vc_txqs->qinfo[1];
1124 txq_info->dma_ring_addr = txq->complq->tx_ring_phys_addr;
1125 txq_info->type = VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION;
1126 txq_info->queue_id = txq->complq->queue_id;
1127 txq_info->model = VIRTCHNL2_QUEUE_MODEL_SPLIT;
1128 txq_info->sched_mode = VIRTCHNL2_TXQ_SCHED_MODE_FLOW;
1129 txq_info->ring_len = txq->complq->nb_tx_desc;
1130 }
1131
1132 memset(&args, 0, sizeof(args));
1133 args.ops = VIRTCHNL2_OP_CONFIG_TX_QUEUES;
1134 args.in_args = (uint8_t *)vc_txqs;
1135 args.in_args_size = size;
1136 args.out_buffer = adapter->mbx_resp;
1137 args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
1138
1139 err = idpf_vc_cmd_execute(adapter, &args);
1140 rte_free(vc_txqs);
1141 if (err != 0)
1142 DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_CONFIG_TX_QUEUES");
1143
1144 return err;
1145 }
1146
1147 int
idpf_vc_txq_config_by_info(struct idpf_vport * vport,struct virtchnl2_txq_info * txq_info,uint16_t num_qs)1148 idpf_vc_txq_config_by_info(struct idpf_vport *vport, struct virtchnl2_txq_info *txq_info,
1149 uint16_t num_qs)
1150 {
1151 struct idpf_adapter *adapter = vport->adapter;
1152 struct virtchnl2_config_tx_queues *vc_txqs = NULL;
1153 struct idpf_cmd_info args;
1154 int size, err;
1155
1156 size = sizeof(*vc_txqs) + (num_qs - 1) * sizeof(struct virtchnl2_txq_info);
1157 vc_txqs = rte_zmalloc("cfg_txqs", size, 0);
1158 if (vc_txqs == NULL) {
1159 DRV_LOG(ERR, "Failed to allocate virtchnl2_config_tx_queues");
1160 err = -ENOMEM;
1161 return err;
1162 }
1163 vc_txqs->vport_id = vport->vport_id;
1164 vc_txqs->num_qinfo = num_qs;
1165 memcpy(vc_txqs->qinfo, txq_info, num_qs * sizeof(struct virtchnl2_txq_info));
1166
1167 memset(&args, 0, sizeof(args));
1168 args.ops = VIRTCHNL2_OP_CONFIG_TX_QUEUES;
1169 args.in_args = (uint8_t *)vc_txqs;
1170 args.in_args_size = size;
1171 args.out_buffer = adapter->mbx_resp;
1172 args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
1173
1174 err = idpf_vc_cmd_execute(adapter, &args);
1175 rte_free(vc_txqs);
1176 if (err != 0)
1177 DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_CONFIG_TX_QUEUES");
1178
1179 return err;
1180 }
1181
1182 int
idpf_vc_ctlq_recv(struct idpf_ctlq_info * cq,u16 * num_q_msg,struct idpf_ctlq_msg * q_msg)1183 idpf_vc_ctlq_recv(struct idpf_ctlq_info *cq, u16 *num_q_msg,
1184 struct idpf_ctlq_msg *q_msg)
1185 {
1186 return idpf_ctlq_recv(cq, num_q_msg, q_msg);
1187 }
1188
1189 int
idpf_vc_ctlq_post_rx_buffs(struct idpf_hw * hw,struct idpf_ctlq_info * cq,u16 * buff_count,struct idpf_dma_mem ** buffs)1190 idpf_vc_ctlq_post_rx_buffs(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
1191 u16 *buff_count, struct idpf_dma_mem **buffs)
1192 {
1193 return idpf_ctlq_post_rx_buffs(hw, cq, buff_count, buffs);
1194 }
1195