1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2024 Marvell.
3 */
4
5 #include <rte_thash.h>
6
7 #include <cnxk_eswitch.h>
8 #include <cnxk_rep.h>
9
10 #define CNXK_NIX_DEF_SQ_COUNT 512
11
12 int
cnxk_eswitch_representor_id(struct cnxk_eswitch_dev * eswitch_dev,uint16_t hw_func,uint16_t * rep_id)13 cnxk_eswitch_representor_id(struct cnxk_eswitch_dev *eswitch_dev, uint16_t hw_func,
14 uint16_t *rep_id)
15 {
16 struct cnxk_esw_repr_hw_info *repr_info;
17 int rc = 0;
18
19 repr_info = cnxk_eswitch_representor_hw_info(eswitch_dev, hw_func);
20 if (!repr_info) {
21 plt_warn("Failed to get representor group for %x", hw_func);
22 rc = -ENOENT;
23 goto fail;
24 }
25
26 *rep_id = repr_info->rep_id;
27
28 return 0;
29 fail:
30 return rc;
31 }
32
33 struct cnxk_esw_repr_hw_info *
cnxk_eswitch_representor_hw_info(struct cnxk_eswitch_dev * eswitch_dev,uint16_t hw_func)34 cnxk_eswitch_representor_hw_info(struct cnxk_eswitch_dev *eswitch_dev, uint16_t hw_func)
35 {
36 struct cnxk_eswitch_devargs *esw_da;
37 int i, j;
38
39 if (!eswitch_dev)
40 return NULL;
41
42 /* Traversing the initialized represented list */
43 for (i = 0; i < eswitch_dev->nb_esw_da; i++) {
44 esw_da = &eswitch_dev->esw_da[i];
45 for (j = 0; j < esw_da->nb_repr_ports; j++) {
46 if (esw_da->repr_hw_info[j].hw_func == hw_func)
47 return &esw_da->repr_hw_info[j];
48 }
49 }
50 return NULL;
51 }
52
53 static int
eswitch_hw_rsrc_cleanup(struct cnxk_eswitch_dev * eswitch_dev,struct rte_pci_device * pci_dev)54 eswitch_hw_rsrc_cleanup(struct cnxk_eswitch_dev *eswitch_dev, struct rte_pci_device *pci_dev)
55 {
56 struct roc_nix *nix;
57 int rc = 0;
58
59 nix = &eswitch_dev->nix;
60
61 roc_nix_unregister_queue_irqs(nix);
62 roc_nix_tm_fini(nix);
63 rc = roc_nix_lf_free(nix);
64 if (rc) {
65 plt_err("Failed to cleanup sq, rc %d", rc);
66 goto exit;
67 }
68
69 /* Check if this device is hosting common resource */
70 nix = roc_idev_npa_nix_get();
71 if (!nix || nix->pci_dev != pci_dev) {
72 rc = 0;
73 goto exit;
74 }
75
76 /* Try nix fini now */
77 rc = roc_nix_dev_fini(nix);
78 if (rc == -EAGAIN) {
79 plt_info("Common resource in use by other devices %s", pci_dev->name);
80 goto exit;
81 } else if (rc) {
82 plt_err("Failed in nix dev fini, rc=%d", rc);
83 goto exit;
84 }
85
86 rte_free(eswitch_dev->txq);
87 rte_free(eswitch_dev->rxq);
88 rte_free(eswitch_dev->cxq);
89
90 exit:
91 return rc;
92 }
93
94 static int
cnxk_eswitch_dev_remove(struct rte_pci_device * pci_dev)95 cnxk_eswitch_dev_remove(struct rte_pci_device *pci_dev)
96 {
97 struct cnxk_eswitch_dev *eswitch_dev;
98 int rc = 0;
99
100 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
101 return 0;
102
103 eswitch_dev = cnxk_eswitch_pmd_priv();
104 if (!eswitch_dev) {
105 rc = -EINVAL;
106 goto exit;
107 }
108
109 /* Remove representor devices associated with PF */
110 if (eswitch_dev->repr_cnt.nb_repr_created) {
111 /* Exiting the rep msg ctrl thread */
112 if (eswitch_dev->start_ctrl_msg_thrd) {
113 uint32_t sunlen;
114 struct sockaddr_un sun = {0};
115 int sock_fd = 0;
116
117 eswitch_dev->start_ctrl_msg_thrd = false;
118 if (!eswitch_dev->client_connected) {
119 plt_esw_dbg("Establishing connection for teardown");
120 sock_fd = socket(AF_UNIX, SOCK_STREAM, 0);
121 if (sock_fd == -1) {
122 plt_err("Failed to open socket. err %d", -errno);
123 return -errno;
124 }
125 sun.sun_family = AF_UNIX;
126 sunlen = sizeof(struct sockaddr_un);
127 strncpy(sun.sun_path, CNXK_ESWITCH_CTRL_MSG_SOCK_PATH,
128 sizeof(sun.sun_path) - 1);
129
130 if (connect(sock_fd, (struct sockaddr *)&sun, sunlen) < 0) {
131 plt_err("Failed to connect socket: %s, err %d",
132 CNXK_ESWITCH_CTRL_MSG_SOCK_PATH, errno);
133 close(sock_fd);
134 return -errno;
135 }
136 }
137 rte_thread_join(eswitch_dev->rep_ctrl_msg_thread, NULL);
138 if (!eswitch_dev->client_connected)
139 close(sock_fd);
140 }
141
142 if (eswitch_dev->repte_msg_proc.start_thread) {
143 eswitch_dev->repte_msg_proc.start_thread = false;
144 pthread_cond_signal(&eswitch_dev->repte_msg_proc.repte_msg_cond);
145 rte_thread_join(eswitch_dev->repte_msg_proc.repte_msg_thread, NULL);
146 pthread_mutex_destroy(&eswitch_dev->repte_msg_proc.mutex);
147 pthread_cond_destroy(&eswitch_dev->repte_msg_proc.repte_msg_cond);
148 }
149
150 /* Remove representor devices associated with PF */
151 cnxk_rep_dev_remove(eswitch_dev);
152 }
153
154 /* Cleanup NPC rxtx flow rules */
155 cnxk_eswitch_flow_rules_remove_list(eswitch_dev, &eswitch_dev->esw_flow_list,
156 eswitch_dev->npc.pf_func);
157
158 /* Cleanup HW resources */
159 eswitch_hw_rsrc_cleanup(eswitch_dev, pci_dev);
160
161 rte_free(eswitch_dev);
162 exit:
163 return rc;
164 }
165
166 int
cnxk_eswitch_nix_rsrc_start(struct cnxk_eswitch_dev * eswitch_dev)167 cnxk_eswitch_nix_rsrc_start(struct cnxk_eswitch_dev *eswitch_dev)
168 {
169 int rc;
170
171 /* Install eswitch PF mcam rules */
172 rc = cnxk_eswitch_pfvf_flow_rules_install(eswitch_dev, false);
173 if (rc) {
174 plt_err("Failed to install rxtx rules, rc %d", rc);
175 goto done;
176 }
177
178 /* Configure TPID for Eswitch PF LFs */
179 rc = roc_eswitch_nix_vlan_tpid_set(&eswitch_dev->nix, ROC_NIX_VLAN_TYPE_OUTER,
180 CNXK_ESWITCH_VLAN_TPID, false);
181 if (rc) {
182 plt_err("Failed to configure tpid, rc %d", rc);
183 goto done;
184 }
185
186 /* Enable Rx in NPC */
187 rc = roc_nix_npc_rx_ena_dis(&eswitch_dev->nix, true);
188 if (rc) {
189 plt_err("Failed to enable NPC rx %d", rc);
190 goto done;
191 }
192
193 rc = roc_npc_mcam_enable_all_entries(&eswitch_dev->npc, 1);
194 if (rc) {
195 plt_err("Failed to enable NPC entries %d", rc);
196 goto done;
197 }
198
199 done:
200 return 0;
201 }
202
203 int
cnxk_eswitch_txq_start(struct cnxk_eswitch_dev * eswitch_dev,uint16_t qid)204 cnxk_eswitch_txq_start(struct cnxk_eswitch_dev *eswitch_dev, uint16_t qid)
205 {
206 struct roc_nix_sq *sq = &eswitch_dev->txq[qid].sqs;
207 int rc = -EINVAL;
208
209 if (eswitch_dev->txq[qid].state == CNXK_ESWITCH_QUEUE_STATE_STARTED)
210 return 0;
211
212 if (eswitch_dev->txq[qid].state != CNXK_ESWITCH_QUEUE_STATE_CONFIGURED) {
213 plt_err("Eswitch txq %d not configured yet", qid);
214 goto done;
215 }
216
217 rc = roc_nix_sq_ena_dis(sq, true);
218 if (rc) {
219 plt_err("Failed to enable sq aura fc, txq=%u, rc=%d", qid, rc);
220 goto done;
221 }
222
223 eswitch_dev->txq[qid].state = CNXK_ESWITCH_QUEUE_STATE_STARTED;
224 done:
225 return rc;
226 }
227
228 int
cnxk_eswitch_txq_stop(struct cnxk_eswitch_dev * eswitch_dev,uint16_t qid)229 cnxk_eswitch_txq_stop(struct cnxk_eswitch_dev *eswitch_dev, uint16_t qid)
230 {
231 struct roc_nix_sq *sq = &eswitch_dev->txq[qid].sqs;
232 int rc = -EINVAL;
233
234 if (eswitch_dev->txq[qid].state == CNXK_ESWITCH_QUEUE_STATE_STOPPED ||
235 eswitch_dev->txq[qid].state == CNXK_ESWITCH_QUEUE_STATE_RELEASED)
236 return 0;
237
238 if (eswitch_dev->txq[qid].state != CNXK_ESWITCH_QUEUE_STATE_STARTED) {
239 plt_err("Eswitch txq %d not started", qid);
240 goto done;
241 }
242
243 rc = roc_nix_sq_ena_dis(sq, false);
244 if (rc) {
245 plt_err("Failed to disable sqb aura fc, txq=%u, rc=%d", qid, rc);
246 goto done;
247 }
248
249 eswitch_dev->txq[qid].state = CNXK_ESWITCH_QUEUE_STATE_STOPPED;
250 done:
251 return rc;
252 }
253
254 int
cnxk_eswitch_rxq_start(struct cnxk_eswitch_dev * eswitch_dev,uint16_t qid)255 cnxk_eswitch_rxq_start(struct cnxk_eswitch_dev *eswitch_dev, uint16_t qid)
256 {
257 struct roc_nix_rq *rq = &eswitch_dev->rxq[qid].rqs;
258 int rc = -EINVAL;
259
260 if (eswitch_dev->rxq[qid].state == CNXK_ESWITCH_QUEUE_STATE_STARTED)
261 return 0;
262
263 if (eswitch_dev->rxq[qid].state != CNXK_ESWITCH_QUEUE_STATE_CONFIGURED) {
264 plt_err("Eswitch rxq %d not configured yet", qid);
265 goto done;
266 }
267
268 rc = roc_nix_rq_ena_dis(rq, true);
269 if (rc) {
270 plt_err("Failed to enable rxq=%u, rc=%d", qid, rc);
271 goto done;
272 }
273
274 eswitch_dev->rxq[qid].state = CNXK_ESWITCH_QUEUE_STATE_STARTED;
275 done:
276 return rc;
277 }
278
279 int
cnxk_eswitch_rxq_stop(struct cnxk_eswitch_dev * eswitch_dev,uint16_t qid)280 cnxk_eswitch_rxq_stop(struct cnxk_eswitch_dev *eswitch_dev, uint16_t qid)
281 {
282 struct roc_nix_rq *rq = &eswitch_dev->rxq[qid].rqs;
283 int rc = -EINVAL;
284
285 if (eswitch_dev->rxq[qid].state == CNXK_ESWITCH_QUEUE_STATE_STOPPED ||
286 eswitch_dev->rxq[qid].state == CNXK_ESWITCH_QUEUE_STATE_RELEASED)
287 return 0;
288
289 if (eswitch_dev->rxq[qid].state != CNXK_ESWITCH_QUEUE_STATE_STARTED) {
290 plt_err("Eswitch rxq %d not started", qid);
291 goto done;
292 }
293
294 rc = roc_nix_rq_ena_dis(rq, false);
295 if (rc) {
296 plt_err("Failed to disable rxq=%u, rc=%d", qid, rc);
297 goto done;
298 }
299
300 eswitch_dev->rxq[qid].state = CNXK_ESWITCH_QUEUE_STATE_STOPPED;
301 done:
302 return rc;
303 }
304
305 int
cnxk_eswitch_rxq_release(struct cnxk_eswitch_dev * eswitch_dev,uint16_t qid)306 cnxk_eswitch_rxq_release(struct cnxk_eswitch_dev *eswitch_dev, uint16_t qid)
307 {
308 struct roc_nix_rq *rq;
309 struct roc_nix_cq *cq;
310 int rc;
311
312 if (eswitch_dev->rxq[qid].state == CNXK_ESWITCH_QUEUE_STATE_RELEASED)
313 return 0;
314
315 /* Cleanup ROC SQ */
316 rq = &eswitch_dev->rxq[qid].rqs;
317 rc = roc_nix_rq_fini(rq);
318 if (rc) {
319 plt_err("Failed to cleanup sq, rc=%d", rc);
320 goto fail;
321 }
322
323 eswitch_dev->rxq[qid].state = CNXK_ESWITCH_QUEUE_STATE_RELEASED;
324
325 /* Cleanup ROC CQ */
326 cq = &eswitch_dev->cxq[qid].cqs;
327 rc = roc_nix_cq_fini(cq);
328 if (rc) {
329 plt_err("Failed to cleanup cq, rc=%d", rc);
330 goto fail;
331 }
332
333 eswitch_dev->cxq[qid].state = CNXK_ESWITCH_QUEUE_STATE_RELEASED;
334 fail:
335 return rc;
336 }
337
338 int
cnxk_eswitch_rxq_setup(struct cnxk_eswitch_dev * eswitch_dev,uint16_t qid,uint16_t nb_desc,const struct rte_eth_rxconf * rx_conf,struct rte_mempool * mp)339 cnxk_eswitch_rxq_setup(struct cnxk_eswitch_dev *eswitch_dev, uint16_t qid, uint16_t nb_desc,
340 const struct rte_eth_rxconf *rx_conf, struct rte_mempool *mp)
341 {
342 struct roc_nix *nix = &eswitch_dev->nix;
343 struct rte_mempool *lpb_pool = mp;
344 struct rte_mempool_ops *ops;
345 const char *platform_ops;
346 struct roc_nix_rq *rq;
347 struct roc_nix_cq *cq;
348 uint16_t first_skip;
349 int rc = -EINVAL;
350
351 if (eswitch_dev->rxq[qid].state != CNXK_ESWITCH_QUEUE_STATE_RELEASED ||
352 eswitch_dev->cxq[qid].state != CNXK_ESWITCH_QUEUE_STATE_RELEASED) {
353 plt_err("Queue %d is in invalid state %d, cannot be setup", qid,
354 eswitch_dev->txq[qid].state);
355 goto fail;
356 }
357
358 RTE_SET_USED(rx_conf);
359 platform_ops = rte_mbuf_platform_mempool_ops();
360 /* This driver needs cnxk_npa mempool ops to work */
361 ops = rte_mempool_get_ops(lpb_pool->ops_index);
362 if (strncmp(ops->name, platform_ops, RTE_MEMPOOL_OPS_NAMESIZE)) {
363 plt_err("mempool ops should be of cnxk_npa type");
364 goto fail;
365 }
366
367 if (lpb_pool->pool_id == 0) {
368 plt_err("Invalid pool_id");
369 goto fail;
370 }
371
372 /* Setup ROC CQ */
373 cq = &eswitch_dev->cxq[qid].cqs;
374 memset(cq, 0, sizeof(struct roc_nix_cq));
375 cq->qid = qid;
376 cq->nb_desc = nb_desc;
377 rc = roc_nix_cq_init(nix, cq);
378 if (rc) {
379 plt_err("Failed to init roc cq for rq=%d, rc=%d", qid, rc);
380 goto fail;
381 }
382 eswitch_dev->cxq[qid].state = CNXK_ESWITCH_QUEUE_STATE_CONFIGURED;
383
384 /* Setup ROC RQ */
385 rq = &eswitch_dev->rxq[qid].rqs;
386 memset(rq, 0, sizeof(struct roc_nix_rq));
387 rq->qid = qid;
388 rq->cqid = cq->qid;
389 rq->aura_handle = lpb_pool->pool_id;
390 rq->flow_tag_width = 32;
391 rq->sso_ena = false;
392
393 /* Calculate first mbuf skip */
394 first_skip = (sizeof(struct rte_mbuf));
395 first_skip += RTE_PKTMBUF_HEADROOM;
396 first_skip += rte_pktmbuf_priv_size(lpb_pool);
397 rq->first_skip = first_skip;
398 rq->later_skip = sizeof(struct rte_mbuf) + rte_pktmbuf_priv_size(lpb_pool);
399 rq->lpb_size = lpb_pool->elt_size;
400 if (roc_errata_nix_no_meta_aura())
401 rq->lpb_drop_ena = true;
402
403 rc = roc_nix_rq_init(nix, rq, true);
404 if (rc) {
405 plt_err("Failed to init roc rq for rq=%d, rc=%d", qid, rc);
406 goto cq_fini;
407 }
408 eswitch_dev->rxq[qid].state = CNXK_ESWITCH_QUEUE_STATE_CONFIGURED;
409
410 return 0;
411 cq_fini:
412 rc |= roc_nix_cq_fini(cq);
413 fail:
414 return rc;
415 }
416
417 int
cnxk_eswitch_txq_release(struct cnxk_eswitch_dev * eswitch_dev,uint16_t qid)418 cnxk_eswitch_txq_release(struct cnxk_eswitch_dev *eswitch_dev, uint16_t qid)
419 {
420 struct roc_nix_sq *sq;
421 int rc = 0;
422
423 if (eswitch_dev->txq[qid].state == CNXK_ESWITCH_QUEUE_STATE_RELEASED)
424 return 0;
425
426 /* Cleanup ROC SQ */
427 sq = &eswitch_dev->txq[qid].sqs;
428 rc = roc_nix_sq_fini(sq);
429 if (rc) {
430 plt_err("Failed to cleanup sq, rc=%d", rc);
431 goto fail;
432 }
433
434 eswitch_dev->txq[qid].state = CNXK_ESWITCH_QUEUE_STATE_RELEASED;
435 fail:
436 return rc;
437 }
438
439 int
cnxk_eswitch_txq_setup(struct cnxk_eswitch_dev * eswitch_dev,uint16_t qid,uint16_t nb_desc,const struct rte_eth_txconf * tx_conf)440 cnxk_eswitch_txq_setup(struct cnxk_eswitch_dev *eswitch_dev, uint16_t qid, uint16_t nb_desc,
441 const struct rte_eth_txconf *tx_conf)
442 {
443 struct roc_nix_sq *sq;
444 int rc = 0;
445
446 if (eswitch_dev->txq[qid].state != CNXK_ESWITCH_QUEUE_STATE_RELEASED) {
447 plt_err("Queue %d is in invalid state %d, cannot be setup", qid,
448 eswitch_dev->txq[qid].state);
449 rc = -EINVAL;
450 goto fail;
451 }
452 RTE_SET_USED(tx_conf);
453 /* Setup ROC SQ */
454 sq = &eswitch_dev->txq[qid].sqs;
455 memset(sq, 0, sizeof(struct roc_nix_sq));
456 sq->qid = qid;
457 sq->nb_desc = nb_desc;
458 sq->max_sqe_sz = NIX_MAXSQESZ_W16;
459 if (sq->nb_desc >= CNXK_NIX_DEF_SQ_COUNT)
460 sq->fc_hyst_bits = 0x1;
461
462 rc = roc_nix_sq_init(&eswitch_dev->nix, sq);
463 if (rc)
464 plt_err("Failed to init sq=%d, rc=%d", qid, rc);
465
466 eswitch_dev->txq[qid].state = CNXK_ESWITCH_QUEUE_STATE_CONFIGURED;
467
468 fail:
469 return rc;
470 }
471
472 static int
nix_lf_setup(struct cnxk_eswitch_dev * eswitch_dev)473 nix_lf_setup(struct cnxk_eswitch_dev *eswitch_dev)
474 {
475 uint16_t nb_rxq, nb_txq, nb_cq;
476 struct roc_nix_fc_cfg fc_cfg;
477 struct roc_nix *nix;
478 uint64_t rx_cfg;
479 void *qs;
480 int rc;
481
482 /* Initialize base roc nix */
483 nix = &eswitch_dev->nix;
484 nix->pci_dev = eswitch_dev->pci_dev;
485 nix->hw_vlan_ins = true;
486 nix->reta_sz = ROC_NIX_RSS_RETA_SZ_256;
487 rc = roc_nix_dev_init(nix);
488 if (rc) {
489 plt_err("Failed to init nix eswitch device, rc=%d(%s)", rc, roc_error_msg_get(rc));
490 goto fail;
491 }
492
493 /* Get the representors count */
494 rc = roc_nix_max_rep_count(&eswitch_dev->nix);
495 if (rc) {
496 plt_err("Failed to get rep cnt, rc=%d(%s)", rc, roc_error_msg_get(rc));
497 goto free_cqs;
498 }
499 eswitch_dev->repr_cnt.max_repr = eswitch_dev->nix.rep_cnt;
500
501 /* Allocating an NIX LF */
502 nb_rxq = CNXK_ESWITCH_MAX_RXQ;
503 nb_txq = CNXK_ESWITCH_MAX_TXQ;
504 nb_cq = CNXK_ESWITCH_MAX_RXQ;
505 rx_cfg = ROC_NIX_LF_RX_CFG_DIS_APAD;
506 rc = roc_nix_lf_alloc(nix, nb_rxq, nb_txq, rx_cfg);
507 if (rc) {
508 plt_err("lf alloc failed = %s(%d)", roc_error_msg_get(rc), rc);
509 goto dev_fini;
510 }
511
512 if (nb_rxq) {
513 /* Allocate memory for eswitch rq's and cq's */
514 qs = plt_zmalloc(sizeof(struct cnxk_eswitch_rxq) * nb_rxq, 0);
515 if (!qs) {
516 plt_err("Failed to alloc eswitch rxq");
517 goto lf_free;
518 }
519 eswitch_dev->rxq = qs;
520 }
521
522 if (nb_txq) {
523 /* Allocate memory for roc sq's */
524 qs = plt_zmalloc(sizeof(struct cnxk_eswitch_txq) * nb_txq, 0);
525 if (!qs) {
526 plt_err("Failed to alloc eswitch txq");
527 goto free_rqs;
528 }
529 eswitch_dev->txq = qs;
530 }
531
532 if (nb_cq) {
533 qs = plt_zmalloc(sizeof(struct cnxk_eswitch_cxq) * nb_cq, 0);
534 if (!qs) {
535 plt_err("Failed to alloc eswitch cxq");
536 goto free_sqs;
537 }
538 eswitch_dev->cxq = qs;
539 }
540
541 eswitch_dev->nb_rxq = nb_rxq;
542 eswitch_dev->nb_txq = nb_txq;
543
544 /* Re-enable NIX LF error interrupts */
545 roc_nix_err_intr_ena_dis(nix, true);
546 roc_nix_ras_intr_ena_dis(nix, true);
547
548 rc = roc_nix_lso_fmt_setup(nix);
549 if (rc) {
550 plt_err("lso setup failed = %s(%d)", roc_error_msg_get(rc), rc);
551 goto free_cqs;
552 }
553
554 rc = roc_nix_switch_hdr_set(nix, 0, 0, 0, 0);
555 if (rc) {
556 plt_err("switch hdr set failed = %s(%d)", roc_error_msg_get(rc), rc);
557 goto free_cqs;
558 }
559
560 rc = roc_nix_tm_init(nix);
561 if (rc) {
562 plt_err("tm failed = %s(%d)", roc_error_msg_get(rc), rc);
563 goto free_cqs;
564 }
565
566 /* Register queue IRQs */
567 rc = roc_nix_register_queue_irqs(nix);
568 if (rc) {
569 plt_err("Failed to register queue interrupts rc=%d", rc);
570 goto tm_fini;
571 }
572
573 /* Enable default tree */
574 rc = roc_nix_tm_hierarchy_enable(nix, ROC_NIX_TM_DEFAULT, false);
575 if (rc) {
576 plt_err("tm default hierarchy enable failed = %s(%d)", roc_error_msg_get(rc), rc);
577 goto q_irq_fini;
578 }
579
580 memset(&fc_cfg, 0, sizeof(struct roc_nix_fc_cfg));
581 fc_cfg.rxchan_cfg.enable = false;
582 rc = roc_nix_fc_config_set(nix, &fc_cfg);
583 if (rc) {
584 plt_err("Failed to setup flow control, rc=%d(%s)", rc, roc_error_msg_get(rc));
585 goto q_irq_fini;
586 }
587
588 roc_nix_fc_mode_get(nix);
589
590 return rc;
591 q_irq_fini:
592 roc_nix_unregister_queue_irqs(nix);
593 tm_fini:
594 roc_nix_tm_fini(nix);
595 free_cqs:
596 rte_free(eswitch_dev->cxq);
597 free_sqs:
598 rte_free(eswitch_dev->txq);
599 free_rqs:
600 rte_free(eswitch_dev->rxq);
601 lf_free:
602 roc_nix_lf_free(nix);
603 dev_fini:
604 roc_nix_dev_fini(nix);
605 fail:
606 return rc;
607 }
608
609 static int
eswitch_hw_rsrc_setup(struct cnxk_eswitch_dev * eswitch_dev,struct rte_pci_device * pci_dev)610 eswitch_hw_rsrc_setup(struct cnxk_eswitch_dev *eswitch_dev, struct rte_pci_device *pci_dev)
611 {
612 struct roc_nix *nix;
613 int rc;
614
615 nix = &eswitch_dev->nix;
616 rc = nix_lf_setup(eswitch_dev);
617 if (rc) {
618 plt_err("Failed to setup hw rsrc, rc=%d(%s)", rc, roc_error_msg_get(rc));
619 goto fail;
620 }
621
622 /* Initialize roc npc */
623 eswitch_dev->npc.roc_nix = nix;
624 eswitch_dev->npc.flow_max_priority = 3;
625 eswitch_dev->npc.flow_prealloc_size = 1;
626 rc = roc_npc_init(&eswitch_dev->npc);
627 if (rc)
628 goto rsrc_cleanup;
629
630 /* List for eswitch default flows */
631 TAILQ_INIT(&eswitch_dev->esw_flow_list);
632
633 return rc;
634 rsrc_cleanup:
635 eswitch_hw_rsrc_cleanup(eswitch_dev, pci_dev);
636 fail:
637 return rc;
638 }
639
640 int
cnxk_eswitch_representor_info_get(struct cnxk_eswitch_dev * eswitch_dev,struct rte_eth_representor_info * info)641 cnxk_eswitch_representor_info_get(struct cnxk_eswitch_dev *eswitch_dev,
642 struct rte_eth_representor_info *info)
643 {
644 struct cnxk_eswitch_devargs *esw_da;
645 int rc = 0, n_entries, i, j = 0, k = 0;
646
647 for (i = 0; i < eswitch_dev->nb_esw_da; i++) {
648 for (j = 0; j < eswitch_dev->esw_da[i].nb_repr_ports; j++)
649 k++;
650 }
651 n_entries = k;
652
653 if (info == NULL)
654 goto out;
655
656 if ((uint32_t)n_entries > info->nb_ranges_alloc)
657 n_entries = info->nb_ranges_alloc;
658
659 k = 0;
660 info->controller = 0;
661 info->pf = 0;
662 for (i = 0; i < eswitch_dev->nb_esw_da; i++) {
663 esw_da = &eswitch_dev->esw_da[i];
664 info->ranges[k].type = esw_da->da.type;
665 switch (esw_da->da.type) {
666 case RTE_ETH_REPRESENTOR_PF:
667 info->ranges[k].controller = 0;
668 info->ranges[k].pf = esw_da->repr_hw_info[0].pfvf;
669 info->ranges[k].vf = 0;
670 info->ranges[k].id_base = info->ranges[i].pf;
671 info->ranges[k].id_end = info->ranges[i].pf;
672 snprintf(info->ranges[k].name, sizeof(info->ranges[k].name), "pf%d",
673 info->ranges[k].pf);
674 k++;
675 break;
676 case RTE_ETH_REPRESENTOR_VF:
677 for (j = 0; j < esw_da->nb_repr_ports; j++) {
678 info->ranges[k].controller = 0;
679 info->ranges[k].pf = esw_da->da.ports[0];
680 info->ranges[k].vf = esw_da->repr_hw_info[j].pfvf;
681 info->ranges[k].id_base = esw_da->repr_hw_info[j].port_id;
682 info->ranges[k].id_end = esw_da->repr_hw_info[j].port_id;
683 snprintf(info->ranges[k].name, sizeof(info->ranges[k].name),
684 "pf%dvf%d", info->ranges[k].pf, info->ranges[k].vf);
685 k++;
686 }
687 break;
688 default:
689 plt_err("Invalid type %d", esw_da->da.type);
690 rc = 0;
691 goto fail;
692 };
693 }
694 info->nb_ranges = k;
695 fail:
696 return rc;
697 out:
698 return n_entries;
699 }
700
701 static int
cnxk_eswitch_dev_probe(struct rte_pci_driver * pci_drv,struct rte_pci_device * pci_dev)702 cnxk_eswitch_dev_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
703 {
704 struct cnxk_eswitch_dev *eswitch_dev;
705 const struct rte_memzone *mz = NULL;
706 uint16_t num_reps;
707 int rc = -ENOMEM;
708
709 RTE_SET_USED(pci_drv);
710
711 eswitch_dev = cnxk_eswitch_pmd_priv();
712 if (!eswitch_dev) {
713 rc = roc_plt_init();
714 if (rc) {
715 plt_err("Failed to initialize platform model, rc=%d", rc);
716 return rc;
717 }
718
719 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
720 return 0;
721
722 mz = rte_memzone_reserve_aligned(CNXK_REP_ESWITCH_DEV_MZ, sizeof(*eswitch_dev),
723 SOCKET_ID_ANY, 0, RTE_CACHE_LINE_SIZE);
724 if (mz == NULL) {
725 plt_err("Failed to reserve a memzone");
726 goto fail;
727 }
728
729 eswitch_dev = mz->addr;
730 eswitch_dev->pci_dev = pci_dev;
731
732 rc = eswitch_hw_rsrc_setup(eswitch_dev, pci_dev);
733 if (rc) {
734 plt_err("Failed to setup hw rsrc, rc=%d(%s)", rc, roc_error_msg_get(rc));
735 goto free_mem;
736 }
737 }
738
739 if (pci_dev->device.devargs) {
740 rc = cnxk_eswitch_repr_devargs(pci_dev, eswitch_dev);
741 if (rc)
742 goto rsrc_cleanup;
743 }
744
745 if (eswitch_dev->repr_cnt.nb_repr_created > eswitch_dev->repr_cnt.max_repr) {
746 plt_err("Representors to be created %d can be greater than max allowed %d",
747 eswitch_dev->repr_cnt.nb_repr_created, eswitch_dev->repr_cnt.max_repr);
748 rc = -EINVAL;
749 goto rsrc_cleanup;
750 }
751
752 num_reps = eswitch_dev->repr_cnt.nb_repr_created;
753 if (!num_reps) {
754 plt_err("No representors enabled");
755 goto fail;
756 }
757
758 plt_esw_dbg("Max no of reps %d reps to be created %d Eswtch pfunc %x",
759 eswitch_dev->repr_cnt.max_repr, eswitch_dev->repr_cnt.nb_repr_created,
760 roc_nix_get_pf_func(&eswitch_dev->nix));
761
762 /* Probe representor ports */
763 rc = cnxk_rep_dev_probe(pci_dev, eswitch_dev);
764 if (rc) {
765 plt_err("Failed to probe representor ports");
766 goto rsrc_cleanup;
767 }
768
769 /* Spinlock for synchronization between representors traffic and control
770 * messages
771 */
772 rte_spinlock_init(&eswitch_dev->rep_lock);
773
774 return rc;
775 rsrc_cleanup:
776 eswitch_hw_rsrc_cleanup(eswitch_dev, pci_dev);
777 free_mem:
778 rte_memzone_free(mz);
779 fail:
780 return rc;
781 }
782
783 static const struct rte_pci_id cnxk_eswitch_pci_map[] = {
784 {RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CNXK_RVU_ESWITCH_PF)},
785 {
786 .vendor_id = 0,
787 },
788 };
789
790 static struct rte_pci_driver cnxk_eswitch_pci = {
791 .id_table = cnxk_eswitch_pci_map,
792 .drv_flags =
793 RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_NEED_IOVA_AS_VA | RTE_PCI_DRV_PROBE_AGAIN,
794 .probe = cnxk_eswitch_dev_probe,
795 .remove = cnxk_eswitch_dev_remove,
796 };
797
798 RTE_PMD_REGISTER_PCI(cnxk_eswitch, cnxk_eswitch_pci);
799 RTE_PMD_REGISTER_PCI_TABLE(cnxk_eswitch, cnxk_eswitch_pci_map);
800 RTE_PMD_REGISTER_KMOD_DEP(cnxk_eswitch, "vfio-pci");
801